pax_global_header00006660000000000000000000000064147772320770014532gustar00rootroot0000000000000052 comment=04151f01f59f902ab932a51e0ca0ebce3883fc51 py_ecc-8.0.0/000077500000000000000000000000001477723207700130015ustar00rootroot00000000000000py_ecc-8.0.0/.circleci/000077500000000000000000000000001477723207700146345ustar00rootroot00000000000000py_ecc-8.0.0/.circleci/config.yml000066400000000000000000000205431477723207700166300ustar00rootroot00000000000000version: 2.1 # heavily inspired by https://raw.githubusercontent.com/pinax/pinax-wiki/6bd2a99ab6f702e300d708532a6d1d9aa638b9f8/.circleci/config.yml common: &common working_directory: ~/repo steps: - checkout - run: name: merge pull request base command: ./.circleci/merge_pr.sh - run: name: merge pull request base (2nd try) command: ./.circleci/merge_pr.sh when: on_fail - run: name: merge pull request base (3rd try) command: ./.circleci/merge_pr.sh when: on_fail - restore_cache: keys: - cache-v1-{{ arch }}-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} - run: name: install dependencies command: | python -m pip install --upgrade pip python -m pip install tox - run: name: run tox command: python -m tox run -r - save_cache: paths: - .hypothesis - .tox - ~/.cache/pip - ~/.local key: cache-v1-{{ arch }}-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} orbs: win: circleci/windows@5.0.0 windows-wheel-steps: windows-wheel-setup: &windows-wheel-setup executor: name: win/default shell: bash.exe working_directory: C:\Users\circleci\project\py_ecc environment: TOXENV: windows-wheel restore-cache-step: &restore-cache-step restore_cache: keys: - cache-v1-{{ arch }}-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} install-pyenv-step: &install-pyenv-step run: name: install pyenv command: | pip install pyenv-win --target $HOME/.pyenv echo 'export PYENV="$HOME/.pyenv/pyenv-win/"' >> $BASH_ENV echo 'export PYENV_ROOT="$HOME/.pyenv/pyenv-win/"' >> $BASH_ENV echo 'export PYENV_USERPROFILE="$HOME/.pyenv/pyenv-win/"' >> $BASH_ENV echo 'export PATH="$PATH:$HOME/.pyenv/pyenv-win/bin"' >> $BASH_ENV echo 'export PATH="$PATH:$HOME/.pyenv/pyenv-win/shims"' >> $BASH_ENV source $BASH_ENV pyenv update install-latest-python-step: &install-latest-python-step run: name: install latest python version and tox command: | LATEST_VERSION=$(pyenv install --list | grep -E "${MINOR_VERSION}\.[0-9]+$" | tail -1) echo "installing python version $LATEST_VERSION" pyenv install $LATEST_VERSION pyenv global $LATEST_VERSION python3 -m pip install --upgrade pip python3 -m pip install tox run-tox-step: &run-tox-step run: name: run tox command: | echo 'running tox with' $(python3 --version) python3 -m tox run -r save-cache-step: &save-cache-step save_cache: paths: - .tox key: cache-v1-{{ arch }}-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} docs: &docs working_directory: ~/repo steps: - checkout - restore_cache: keys: - cache-v1-{{ arch }}-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} - run: name: install dependencies command: | python -m pip install --upgrade pip python -m pip install tox - run: name: run tox command: python -m tox run -r - store_artifacts: path: /home/circleci/repo/docs/_build - save_cache: paths: - .tox - ~/.cache/pip - ~/.local key: cache-v1-{{ arch }}-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} resource_class: xlarge jobs: docs: <<: *docs docker: - image: cimg/python:3.10 environment: TOXENV: docs py38-core: <<: *common docker: - image: cimg/python:3.8 environment: TOXENV: py38-core py39-core: <<: *common docker: - image: cimg/python:3.9 environment: TOXENV: py39-core py310-core: <<: *common docker: - image: cimg/python:3.10 environment: TOXENV: py310-core py311-core: <<: *common docker: - image: cimg/python:3.11 environment: TOXENV: py311-core py312-core: <<: *common docker: - image: cimg/python:3.12 environment: TOXENV: py312-core py313-core: <<: *common docker: - image: cimg/python:3.13 environment: TOXENV: py313-core py38-lint: <<: *common docker: - image: cimg/python:3.8 environment: TOXENV: py38-lint py39-lint: <<: *common docker: - image: cimg/python:3.9 environment: TOXENV: py39-lint py310-lint: <<: *common docker: - image: cimg/python:3.10 environment: TOXENV: py310-lint py311-lint: <<: *common docker: - image: cimg/python:3.11 environment: TOXENV: py311-lint py312-lint: <<: *common docker: - image: cimg/python:3.12 environment: TOXENV: py312-lint py313-lint: <<: *common docker: - image: cimg/python:3.13 environment: TOXENV: py313-lint py38-wheel: <<: *common docker: - image: cimg/python:3.8 environment: TOXENV: py38-wheel py39-wheel: <<: *common docker: - image: cimg/python:3.9 environment: TOXENV: py39-wheel py310-wheel: <<: *common docker: - image: cimg/python:3.10 environment: TOXENV: py310-wheel py311-wheel: <<: *common docker: - image: cimg/python:3.11 environment: TOXENV: py311-wheel py312-wheel: <<: *common docker: - image: cimg/python:3.12 environment: TOXENV: py312-wheel py313-wheel: <<: *common docker: - image: cimg/python:3.13 environment: TOXENV: py313-wheel py311-windows-wheel: <<: *windows-wheel-setup steps: - checkout - <<: *restore-cache-step - <<: *install-pyenv-step - run: name: set minor version command: echo "export MINOR_VERSION='3.11'" >> $BASH_ENV - <<: *install-latest-python-step - <<: *run-tox-step - <<: *save-cache-step py312-windows-wheel: <<: *windows-wheel-setup steps: - checkout - <<: *restore-cache-step - <<: *install-pyenv-step - run: name: set minor version command: echo "export MINOR_VERSION='3.12'" >> $BASH_ENV - <<: *install-latest-python-step - <<: *run-tox-step - <<: *save-cache-step py313-windows-wheel: <<: *windows-wheel-setup steps: - checkout - <<: *restore-cache-step - <<: *install-pyenv-step - run: name: set minor version command: echo "export MINOR_VERSION='3.13'" >> $BASH_ENV - <<: *install-latest-python-step - <<: *run-tox-step - <<: *save-cache-step py38-bls: <<: *common docker: - image: cimg/python:3.8 environment: TOXENV: py38-bls py39-bls: <<: *common docker: - image: cimg/python:3.9 environment: TOXENV: py39-bls py310-bls: <<: *common docker: - image: cimg/python:3.10 environment: TOXENV: py310-bls py311-bls: <<: *common docker: - image: cimg/python:3.11 environment: TOXENV: py311-bls py312-bls: <<: *common docker: - image: cimg/python:3.12 environment: TOXENV: py312-bls py313-bls: <<: *common docker: - image: cimg/python:3.13 environment: TOXENV: py313-bls define: &all_jobs - docs - py38-core - py39-core - py310-core - py311-core - py312-core - py313-core - py38-lint - py39-lint - py310-lint - py311-lint - py312-lint - py313-lint - py38-wheel - py39-wheel - py310-wheel - py311-wheel - py312-wheel - py313-wheel - py311-windows-wheel - py312-windows-wheel - py313-windows-wheel - py38-bls - py39-bls - py310-bls - py311-bls - py312-bls - py313-bls workflows: version: 2 test: jobs: *all_jobs nightly: triggers: - schedule: # Weekdays 12:00p UTC cron: "0 12 * * 1,2,3,4,5" filters: branches: only: - main jobs: *all_jobs py_ecc-8.0.0/.circleci/merge_pr.sh000077500000000000000000000011271477723207700167740ustar00rootroot00000000000000#!/usr/bin/env bash if [[ -n "${CIRCLE_PR_NUMBER}" ]]; then PR_INFO_URL=https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pulls/$CIRCLE_PR_NUMBER PR_BASE_BRANCH=$(curl -L "$PR_INFO_URL" | python -c 'import json, sys; obj = json.load(sys.stdin); sys.stdout.write(obj["base"]["ref"])') git fetch origin +"$PR_BASE_BRANCH":circleci/pr-base # We need these config values or git complains when creating the # merge commit git config --global user.name "Circle CI" git config --global user.email "circleci@example.com" git merge --no-edit circleci/pr-base fi py_ecc-8.0.0/.github/000077500000000000000000000000001477723207700143415ustar00rootroot00000000000000py_ecc-8.0.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001477723207700165245ustar00rootroot00000000000000py_ecc-8.0.0/.github/ISSUE_TEMPLATE/bug_report.yml000066400000000000000000000035421477723207700214230ustar00rootroot00000000000000name: Bug Report description: File a bug report labels: ["bug"] body: - type: markdown attributes: value: "## What was wrong" - type: textarea id: what-happened attributes: label: What happened? description: Also tell us what you expected to happen validations: required: true - type: textarea id: code-that-caused attributes: label: Code that produced the error description: Formats to Python, no backticks needed render: python validations: required: false - type: textarea id: error-output attributes: label: Full error output description: Formats to shell, no backticks needed render: shell validations: required: false - type: markdown attributes: value: "## Potential Solutions" - type: textarea id: how-to-fix attributes: label: Fill this section in if you know how this could or should be fixed description: Include any relevant examples or reference material validations: required: false - type: input id: lib-version attributes: label: py-ecc Version description: Which version of py-ecc are you using? placeholder: x.x.x validations: required: false - type: input id: py-version attributes: label: Python Version description: Which version of Python are you using? placeholder: x.x.x validations: required: false - type: input id: os attributes: label: Operating System description: Which operating system are you using? placeholder: osx/linux/win validations: required: false - type: textarea id: pip-freeze attributes: label: Output from `pip freeze` description: Run `python -m pip freeze` and paste the output below render: shell validations: required: false py_ecc-8.0.0/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000003241477723207700205130ustar00rootroot00000000000000blank_issues_enabled: true contact_links: - name: Questions about using py_ecc? url: https://discord.gg/GHryRvPB84 about: You can ask and answer usage questions on the Ethereum Python Community Discord py_ecc-8.0.0/.github/ISSUE_TEMPLATE/feature_request.yml000066400000000000000000000004601477723207700224520ustar00rootroot00000000000000name: Feature Request description: Request a new feature labels: ["feature_request"] body: - type: textarea id: feature-description attributes: label: What feature should we add? description: Include any relevant examples or reference material validations: required: true py_ecc-8.0.0/.github/pull_request_template.md000066400000000000000000000005721477723207700213060ustar00rootroot00000000000000### What was wrong? Related to Issue # Closes # ### How was it fixed? ### Todo: - [ ] Clean up commit history - [ ] Add or update documentation related to these changes - [ ] Add entry to the [release notes](https://github.com/ethereum/py_ecc/blob/main/newsfragments/README.md) #### Cute Animal Picture ![Put a link to a cute animal picture inside the parenthesis-->](<>) py_ecc-8.0.0/.gitignore000066400000000000000000000025231477723207700147730ustar00rootroot00000000000000*.py[cod] # C extensions *.so # Packages *.egg *.egg-info dist build .build eggs .eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 pip-wheel-metadata venv* .venv* # Installer logs pip-log.txt # Unit test / coverage reports .coverage .tox nosetests.xml # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject # Complexity output/*.html output/*/index.html # Sphinx docs/_build docs/modules.rst docs/*.internal.rst docs/*.utils.rst docs/*._utils.* # Blockchain chains # Hypothesis Property base testing .hypothesis # tox/pytest cache .cache .pytest_cache # pycache __pycache__/ # Test output logs logs # VIM temp files *.sw[op] # mypy .mypy_cache # macOS .DS_Store # pyenv .python-version # vs-code .vscode # jupyter notebook files *.ipynb # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm # For a more precise, explicit template, see: # https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 ## General .idea/* .idea_modules/* ## File-based project format: *.iws ## IntelliJ out/ ## Plugin-specific files: ### JIRA plugin atlassian-ide-plugin.xml ### Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties crashlytics-build.properties fabric.properties # END JetBrains section py_ecc-8.0.0/.pre-commit-config.yaml000066400000000000000000000035101477723207700172610ustar00rootroot00000000000000exclude: '.project-template|docs/conf.py' repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: check-yaml - id: check-toml - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/asottile/pyupgrade rev: v3.15.0 hooks: - id: pyupgrade args: [--py38-plus] - repo: https://github.com/psf/black rev: 23.9.1 hooks: - id: black - repo: https://github.com/PyCQA/flake8 rev: 6.1.0 hooks: - id: flake8 additional_dependencies: - flake8-bugbear==23.9.16 exclude: setup.py - repo: https://github.com/PyCQA/autoflake rev: v2.2.1 hooks: - id: autoflake - repo: https://github.com/pycqa/isort rev: 5.12.0 hooks: - id: isort - repo: https://github.com/pycqa/pydocstyle rev: 6.3.0 hooks: - id: pydocstyle additional_dependencies: - tomli # required until >= python311 - repo: https://github.com/executablebooks/mdformat rev: 0.7.17 hooks: - id: mdformat additional_dependencies: - mdformat-gfm - repo: local hooks: - id: mypy-local name: run mypy with all dev dependencies present entry: python -m mypy -p py_ecc language: system always_run: true pass_filenames: false - repo: https://github.com/PrincetonUniversity/blocklint rev: v0.2.5 hooks: - id: blocklint exclude: 'docs/Makefile|docs/release_notes.rst|tox.ini' - repo: local hooks: - id: check-rst-files name: Check for .rst files in the top-level directory entry: sh -c 'ls *.rst 1>/dev/null 2>&1 && { echo "found .rst file in top-level folder"; exit 1; } || exit 0' language: system always_run: true pass_filenames: false py_ecc-8.0.0/.project-template/000077500000000000000000000000001477723207700163365ustar00rootroot00000000000000py_ecc-8.0.0/.project-template/fill_template_vars.py000066400000000000000000000044151477723207700225700ustar00rootroot00000000000000#!/usr/bin/env python3 import os import sys import re from pathlib import Path def _find_files(project_root): path_exclude_pattern = r"\.git($|\/)|venv|_build" file_exclude_pattern = r"fill_template_vars\.py|\.swp$" filepaths = [] for dir_path, _dir_names, file_names in os.walk(project_root): if not re.search(path_exclude_pattern, dir_path): for file in file_names: if not re.search(file_exclude_pattern, file): filepaths.append(str(Path(dir_path, file))) return filepaths def _replace(pattern, replacement, project_root): print(f"Replacing values: {pattern}") for file in _find_files(project_root): try: with open(file) as f: content = f.read() content = re.sub(pattern, replacement, content) with open(file, "w") as f: f.write(content) except UnicodeDecodeError: pass def main(): project_root = Path(os.path.realpath(sys.argv[0])).parent.parent module_name = input("What is your python module name? ") pypi_input = input(f"What is your pypi package name? (default: {module_name}) ") pypi_name = pypi_input or module_name repo_input = input(f"What is your github project name? (default: {pypi_name}) ") repo_name = repo_input or pypi_name rtd_input = input( f"What is your readthedocs.org project name? (default: {pypi_name}) " ) rtd_name = rtd_input or pypi_name project_input = input( f"What is your project name (ex: at the top of the README)? (default: {repo_name}) " ) project_name = project_input or repo_name short_description = input("What is a one-liner describing the project? ") _replace("", module_name, project_root) _replace("", pypi_name, project_root) _replace("", repo_name, project_root) _replace("", rtd_name, project_root) _replace("", project_name, project_root) _replace("", short_description, project_root) os.makedirs(project_root / module_name, exist_ok=True) Path(project_root / module_name / "__init__.py").touch() Path(project_root / module_name / "py.typed").touch() if __name__ == "__main__": main() py_ecc-8.0.0/.project-template/refill_template_vars.py000066400000000000000000000015771477723207700231250ustar00rootroot00000000000000#!/usr/bin/env python3 import os import sys from pathlib import Path import subprocess def main(): template_dir = Path(os.path.dirname(sys.argv[0])) template_vars_file = template_dir / "template_vars.txt" fill_template_vars_script = template_dir / "fill_template_vars.py" with open(template_vars_file, "r") as input_file: content_lines = input_file.readlines() process = subprocess.Popen( [sys.executable, str(fill_template_vars_script)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, ) for line in content_lines: process.stdin.write(line) process.stdin.flush() stdout, stderr = process.communicate() if process.returncode != 0: print(f"Error occurred: {stderr}") sys.exit(1) print(stdout) if __name__ == "__main__": main() py_ecc-8.0.0/.project-template/template_vars.txt000066400000000000000000000001611477723207700217430ustar00rootroot00000000000000py_ecc py-ecc py_ecc py-ecc py_ecc Elliptic curve crypto in python including secp256k1, alt_bn128, and bls12_381 py_ecc-8.0.0/.readthedocs.yaml000066400000000000000000000005621477723207700162330ustar00rootroot00000000000000version: 2 build: os: ubuntu-22.04 tools: python: "3.10" sphinx: configuration: docs/conf.py fail_on_warning: true python: install: - method: pip path: . extra_requirements: - docs # Build all formats for RTD Downloads - htmlzip, pdf, epub # formats: all # Turn off pdf builds for now, not working formats: - epub - htmlzip py_ecc-8.0.0/LICENSE000066400000000000000000000020731477723207700140100ustar00rootroot00000000000000 The MIT License (MIT) Copyright (c) 2015 Vitalik Buterin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. py_ecc-8.0.0/MANIFEST.in000066400000000000000000000003041477723207700145340ustar00rootroot00000000000000include LICENSE include README.md recursive-include scripts * recursive-include tests * global-include *.pyi recursive-exclude * __pycache__ recursive-exclude * *.py[co] prune .tox prune venv* py_ecc-8.0.0/Makefile000066400000000000000000000066061477723207700144510ustar00rootroot00000000000000CURRENT_SIGN_SETTING := $(shell git config commit.gpgSign) .PHONY: clean-pyc clean-build docs help: @echo "clean-build - remove build artifacts" @echo "clean-pyc - remove Python file artifacts" @echo "clean - run clean-build and clean-pyc" @echo "dist - build package and cat contents of the dist directory" @echo "lint - fix linting issues with pre-commit" @echo "test - run tests quickly with the default Python" @echo "docs - generate docs and open in browser (linux-docs for version on linux)" @echo "autobuild-docs - live update docs when changes are saved" @echo "package-test - build package and install it in a venv for manual testing" @echo "notes - consume towncrier newsfragments and update release notes in docs - requires bump to be set" @echo "release - package and upload a release (does not run notes target) - requires bump to be set" clean-build: rm -fr build/ rm -fr dist/ rm -fr *.egg-info clean-pyc: find . -name '*.pyc' -exec rm -f {} + find . -name '*.pyo' -exec rm -f {} + find . -name '*~' -exec rm -f {} + find . -name '__pycache__' -exec rm -rf {} + clean: clean-build clean-pyc dist: clean python -m build ls -l dist lint: @pre-commit run --all-files --show-diff-on-failure || ( \ echo "\n\n\n * pre-commit should have fixed the errors above. Running again to make sure everything is good..." \ && pre-commit run --all-files --show-diff-on-failure \ ) test: python -m pytest tests # docs commands docs: check-docs open docs/_build/html/index.html linux-docs: check-docs xdg-open docs/_build/html/index.html autobuild-docs: sphinx-autobuild --open-browser docs docs/_build/html # docs helpers validate-newsfragments: python ./newsfragments/validate_files.py towncrier build --draft --version preview check-docs: build-docs validate-newsfragments build-docs: sphinx-apidoc -o docs/ . setup.py "*conftest*" $(MAKE) -C docs clean $(MAKE) -C docs html $(MAKE) -C docs doctest check-docs-ci: build-docs build-docs-ci validate-newsfragments build-docs-ci: $(MAKE) -C docs epub # release commands package-test: clean python -m build python scripts/release/test_package.py notes: check-bump # Let UPCOMING_VERSION be the version that is used for the current bump $(eval UPCOMING_VERSION=$(shell bump-my-version bump --dry-run $(bump) -v | awk -F"'" '/New version will be / {print $$2}')) # Now generate the release notes to have them included in the release commit towncrier build --yes --version $(UPCOMING_VERSION) # Before we bump the version, make sure that the towncrier-generated docs will build make docs git commit -m "Compile release notes" release: check-bump check-git clean # verify that notes command ran correctly ./newsfragments/validate_files.py is-empty CURRENT_SIGN_SETTING=$(git config commit.gpgSign) git config commit.gpgSign true bump-my-version bump $(bump) python -m build git config commit.gpgSign "$(CURRENT_SIGN_SETTING)" git push upstream && git push upstream --tags twine upload dist/* # release helpers check-bump: ifndef bump $(error bump must be set, typically: major, minor, patch, or devnum) endif check-git: # require that upstream is configured for ethereum/py_ecc @if ! git remote -v | grep "upstream[[:space:]]git@github.com:ethereum/py_ecc.git (push)\|upstream[[:space:]]https://github.com/ethereum/py_ecc (push)"; then \ echo "Error: You must have a remote named 'upstream' that points to 'py_ecc'"; \ exit 1; \ fi py_ecc-8.0.0/README.md000066400000000000000000000015421477723207700142620ustar00rootroot00000000000000# py_ecc [![Join the conversation on Discord](https://img.shields.io/discord/809793915578089484?color=blue&label=chat&logo=discord&logoColor=white)](https://discord.gg/GHryRvPB84) [![Build Status](https://circleci.com/gh/ethereum/py_ecc.svg?style=shield)](https://circleci.com/gh/ethereum/py_ecc) [![PyPI version](https://badge.fury.io/py/py-ecc.svg)](https://badge.fury.io/py/py-ecc) [![Python versions](https://img.shields.io/pypi/pyversions/py-ecc.svg)](https://pypi.python.org/pypi/py-ecc) Elliptic curve crypto in python including secp256k1, alt_bn128, and bls12_381. > **Warning**: This library contains some experimental code and has **NOT** been audited. Read the [documentation](https://py-ecc.readthedocs.io/). View the [change log](https://py-ecc.readthedocs.io/en/latest/release_notes.html). ## Installation ```sh python -m pip install py_ecc ``` py_ecc-8.0.0/docs/000077500000000000000000000000001477723207700137315ustar00rootroot00000000000000py_ecc-8.0.0/docs/Makefile000066400000000000000000000152061477723207700153750ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." py_ecc-8.0.0/docs/code_of_conduct.rst000066400000000000000000000063011477723207700176000ustar00rootroot00000000000000Code of Conduct --------------- Our Pledge ~~~~~~~~~~ In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Our Standards ~~~~~~~~~~~~~ Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting Our Responsibilities ~~~~~~~~~~~~~~~~~~~~ Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. Scope ~~~~~ This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. Enforcement ~~~~~~~~~~~ Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at snakecharmers@ethereum.org. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. Attribution ~~~~~~~~~~~ This Code of Conduct is adapted from the `Contributor Covenant `_, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html py_ecc-8.0.0/docs/conf.py000066400000000000000000000220061477723207700152300ustar00rootroot00000000000000# py_ecc documentation build configuration file, created by # sphinx-quickstart on Thu Oct 16 20:43:24 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) import os DIR = os.path.dirname("__file__") with open(os.path.join(DIR, "../setup.py"), "r") as f: for line in f: if "version=" in line: setup_version = line.split('"')[1] break # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx_rtd_theme", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = "py_ecc" copyright = "2019-2023, The Ethereum Foundation" __version__ = setup_version # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = ".".join(__version__.split(".")[:2]) # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ "_build", "modules.rst", ] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = "py_eccdocs" # -- Options for LaTeX output --------------------------------------------- latex_engine = "xelatex" latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( "index", "py_ecc.tex", "py_ecc Documentation", "The Ethereum Foundation", "manual", ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( "index", "py_ecc", "py_ecc Documentation", ["The Ethereum Foundation"], 1, ) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( "index", "py_ecc", "py_ecc Documentation", "The Ethereum Foundation", "py_ecc", "Elliptic curve crypto in python including secp256k1, alt_bn128, and bls12_381", "Miscellaneous", ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Intersphinx configuration ------------------------------------------------ intersphinx_mapping = { "python": ("https://docs.python.org/3.10", None), } # -- Doctest configuration ---------------------------------------- import doctest doctest_default_flags = ( 0 | doctest.DONT_ACCEPT_TRUE_FOR_1 | doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL | doctest.NORMALIZE_WHITESPACE ) py_ecc-8.0.0/docs/contributing.rst000066400000000000000000000116711477723207700172000ustar00rootroot00000000000000Contributing ------------ Thank you for your interest in contributing! We welcome all contributions no matter their size. Please read along to learn how to get started. If you get stuck, feel free to ask for help in `Ethereum Python Discord server `_. Setting the stage ~~~~~~~~~~~~~~~~~ To get started, fork the repository to your own github account, then clone it to your development machine: .. code:: sh git clone git@github.com:your-github-username/py_ecc.git Next, install the development dependencies. We recommend using a virtual environment, such as `virtualenv `_. .. code:: sh cd py_ecc virtualenv -p python venv . venv/bin/activate python -m pip install -e ".[dev]" pre-commit install Running the tests ~~~~~~~~~~~~~~~~~ A great way to explore the code base is to run the tests. We can run all tests with: .. code:: sh pytest tests Code Style ~~~~~~~~~~ We use `pre-commit `_ to enforce a consistent code style across the library. This tool runs automatically with every commit, but you can also run it manually with: .. code:: sh make lint If you need to make a commit that skips the ``pre-commit`` checks, you can do so with ``git commit --no-verify``. This library uses type hints, which are enforced by the ``mypy`` tool (part of the ``pre-commit`` checks). All new code is required to land with type hints, with the exception of code within the ``tests`` directory. Documentation ~~~~~~~~~~~~~ Good documentation will lead to quicker adoption and happier users. Please check out our guide on `how to create documentation for the Python Ethereum ecosystem `_. Pull Requests ~~~~~~~~~~~~~ It's a good idea to make pull requests early on. A pull request represents the start of a discussion, and doesn't necessarily need to be the final, finished submission. GitHub's documentation for working on pull requests is `available here `_. Once you've made a pull request, take a look at the Circle CI build status in the GitHub interface and make sure all tests are passing. In general pull requests that do not pass the CI build yet won't get reviewed unless explicitly requested. If the pull request introduces changes that should be reflected in the release notes, please add a `newsfragment` file as explained `here `_. If possible, the change to the release notes file should be included in the commit that introduces the feature or bugfix. Releasing ~~~~~~~~~ Releases are typically done from the ``main`` branch, except when releasing a beta (in which case the beta is released from ``main``, and the previous stable branch is released from said branch). Final test before each release ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Before releasing a new version, build and test the package that will be released: .. code:: sh git checkout main && git pull make package-test This will build the package and install it in a temporary virtual environment. Follow the instructions to activate the venv and test whatever you think is important. You can also preview the release notes: .. code:: sh towncrier --draft Build the release notes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Before bumping the version number, build the release notes. You must include the part of the version to bump (see below), which changes how the version number will show in the release notes. .. code:: sh make notes bump=$$VERSION_PART_TO_BUMP$$ If there are any errors, be sure to re-run make notes until it works. Push the release to github & pypi ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ After confirming that the release package looks okay, release a new version: .. code:: sh make release bump=$$VERSION_PART_TO_BUMP$$ This command will: - Bump the version number as specified in ``.pyproject.toml`` and ``setup.py``. - Create a git commit and tag for the new version. - Build the package. - Push the commit and tag to github. - Push the new package files to pypi. Which version part to bump ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``$$VERSION_PART_TO_BUMP$$`` must be one of: ``major``, ``minor``, ``patch``, ``stage``, or ``devnum``. The version format for this repo is ``{major}.{minor}.{patch}`` for stable, and ``{major}.{minor}.{patch}-{stage}.{devnum}`` for unstable (``stage`` can be alpha or beta). If you are in a beta version, ``make release bump=stage`` will switch to a stable. To issue an unstable version when the current version is stable, specify the new version explicitly, like ``make release bump="--new-version 4.0.0-alpha.1"`` You can see what the result of bumping any particular version part would be with ``bump-my-version show-bump`` py_ecc-8.0.0/docs/index.rst000066400000000000000000000006001477723207700155660ustar00rootroot00000000000000py_ecc ============================== Elliptic curve crypto in python including secp256k1, alt_bn128, and bls12_381 Installation ------------ .. code-block:: bash python -m pip install py_ecc .. toctree:: :maxdepth: 1 :caption: General Usage release_notes .. toctree:: :maxdepth: 1 :caption: Community contributing code_of_conduct py_ecc-8.0.0/docs/py_ecc.bls.rst000066400000000000000000000023741477723207700165120ustar00rootroot00000000000000py\_ecc.bls package =================== Submodules ---------- py\_ecc.bls.ciphersuites module ------------------------------- .. automodule:: py_ecc.bls.ciphersuites :members: :undoc-members: :show-inheritance: py\_ecc.bls.constants module ---------------------------- .. automodule:: py_ecc.bls.constants :members: :undoc-members: :show-inheritance: py\_ecc.bls.g2\_primitives module --------------------------------- .. automodule:: py_ecc.bls.g2_primitives :members: :undoc-members: :show-inheritance: py\_ecc.bls.hash module ----------------------- .. automodule:: py_ecc.bls.hash :members: :undoc-members: :show-inheritance: py\_ecc.bls.hash\_to\_curve module ---------------------------------- .. automodule:: py_ecc.bls.hash_to_curve :members: :undoc-members: :show-inheritance: py\_ecc.bls.point\_compression module ------------------------------------- .. automodule:: py_ecc.bls.point_compression :members: :undoc-members: :show-inheritance: py\_ecc.bls.typing module ------------------------- .. automodule:: py_ecc.bls.typing :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: py_ecc.bls :members: :undoc-members: :show-inheritance: py_ecc-8.0.0/docs/py_ecc.bls12_381.rst000066400000000000000000000011161477723207700172410ustar00rootroot00000000000000py\_ecc.bls12\_381 package ========================== Submodules ---------- py\_ecc.bls12\_381.bls12\_381\_curve module ------------------------------------------- .. automodule:: py_ecc.bls12_381.bls12_381_curve :members: :undoc-members: :show-inheritance: py\_ecc.bls12\_381.bls12\_381\_pairing module --------------------------------------------- .. automodule:: py_ecc.bls12_381.bls12_381_pairing :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: py_ecc.bls12_381 :members: :undoc-members: :show-inheritance: py_ecc-8.0.0/docs/py_ecc.bn128.rst000066400000000000000000000010101477723207700165460ustar00rootroot00000000000000py\_ecc.bn128 package ===================== Submodules ---------- py\_ecc.bn128.bn128\_curve module --------------------------------- .. automodule:: py_ecc.bn128.bn128_curve :members: :undoc-members: :show-inheritance: py\_ecc.bn128.bn128\_pairing module ----------------------------------- .. automodule:: py_ecc.bn128.bn128_pairing :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: py_ecc.bn128 :members: :undoc-members: :show-inheritance: py_ecc-8.0.0/docs/py_ecc.fields.rst000066400000000000000000000013641477723207700171760ustar00rootroot00000000000000py\_ecc.fields package ====================== Submodules ---------- py\_ecc.fields.field\_elements module ------------------------------------- .. automodule:: py_ecc.fields.field_elements :members: :undoc-members: :show-inheritance: py\_ecc.fields.field\_properties module --------------------------------------- .. automodule:: py_ecc.fields.field_properties :members: :undoc-members: :show-inheritance: py\_ecc.fields.optimized\_field\_elements module ------------------------------------------------ .. automodule:: py_ecc.fields.optimized_field_elements :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: py_ecc.fields :members: :undoc-members: :show-inheritance: py_ecc-8.0.0/docs/py_ecc.optimized_bls12_381.rst000066400000000000000000000025121477723207700213260ustar00rootroot00000000000000py\_ecc.optimized\_bls12\_381 package ===================================== Submodules ---------- py\_ecc.optimized\_bls12\_381.constants module ---------------------------------------------- .. automodule:: py_ecc.optimized_bls12_381.constants :members: :undoc-members: :show-inheritance: py\_ecc.optimized\_bls12\_381.optimized\_clear\_cofactor module --------------------------------------------------------------- .. automodule:: py_ecc.optimized_bls12_381.optimized_clear_cofactor :members: :undoc-members: :show-inheritance: py\_ecc.optimized\_bls12\_381.optimized\_curve module ----------------------------------------------------- .. automodule:: py_ecc.optimized_bls12_381.optimized_curve :members: :undoc-members: :show-inheritance: py\_ecc.optimized\_bls12\_381.optimized\_pairing module ------------------------------------------------------- .. automodule:: py_ecc.optimized_bls12_381.optimized_pairing :members: :undoc-members: :show-inheritance: py\_ecc.optimized\_bls12\_381.optimized\_swu module --------------------------------------------------- .. automodule:: py_ecc.optimized_bls12_381.optimized_swu :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: py_ecc.optimized_bls12_381 :members: :undoc-members: :show-inheritance: py_ecc-8.0.0/docs/py_ecc.optimized_bn128.rst000066400000000000000000000012001477723207700206330ustar00rootroot00000000000000py\_ecc.optimized\_bn128 package ================================ Submodules ---------- py\_ecc.optimized\_bn128.optimized\_curve module ------------------------------------------------ .. automodule:: py_ecc.optimized_bn128.optimized_curve :members: :undoc-members: :show-inheritance: py\_ecc.optimized\_bn128.optimized\_pairing module -------------------------------------------------- .. automodule:: py_ecc.optimized_bn128.optimized_pairing :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: py_ecc.optimized_bn128 :members: :undoc-members: :show-inheritance: py_ecc-8.0.0/docs/py_ecc.rst000066400000000000000000000045271477723207700157350ustar00rootroot00000000000000py\_ecc ======= .. warning:: This library contains some experimental code and has not been audited. BLS Signatures -------------- ``py_ecc`` implements the `IETF BLS draft standard v4 `_ as per the inter-blockchain standardization agreement. The BLS standards specify `different ciphersuites `_ which each have different functionality to accommodate various use cases. The following ciphersuites are available from this library: - ``G2Basic`` also known as ``BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_`` - ``G2MessageAugmentation`` also known as ``BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_AUG_`` - ``G2ProofOfPossession`` also known as ``BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_`` Basic Usage ----------- .. code-block:: python from py_ecc.bls import G2ProofOfPossession as bls_pop private_key = 5566 public_key = bls_pop.SkToPk(private_key) message = b'\xab' * 32 # The message to be signed # Signing signature = bls_pop.Sign(private_key, message) # Verifying assert bls_pop.Verify(public_key, message, signature) Aggregating Signatures ---------------------- .. code-block:: python private_keys = [3, 14, 159] public_keys = [bls_pop.SkToPk(key) for key in private_keys] signatures = [bls_pop.Sign(key, message) for key in private_keys] # Aggregating agg_sig = bls_pop.Aggregate(signatures) # Verifying signatures over the same message. # Note this is only safe if Proofs of Possession have been verified for each of the public keys beforehand. # See the BLS standards for why this is the case. assert bls_pop.FastAggregateVerify(public_keys, message, agg_sig) Multiple Aggregation -------------------- .. code-block:: python messages = [b'\xaa' * 42, b'\xbb' * 32, b'\xcc' * 64] signatures = [bls_pop.Sign(key, message) for key, message in zip(private_keys, messages)] agg_sig = bls_pop.Aggregate(signatures) # Verify aggregate signature with different messages assert bls_pop.AggregateVerify(public_keys, messages, agg_sig) py_ecc package -------------- .. toctree:: :maxdepth: 4 py_ecc.bls py_ecc.bls12_381 py_ecc.bn128 py_ecc.optimized_bn128 py_ecc.secp256k1 py_ecc.fields py_ecc.optimized_bls12_381 py_ecc-8.0.0/docs/py_ecc.secp256k1.rst000066400000000000000000000005551477723207700173540ustar00rootroot00000000000000py\_ecc.secp256k1 package ========================= Submodules ---------- py\_ecc.secp256k1.secp256k1 module ---------------------------------- .. automodule:: py_ecc.secp256k1.secp256k1 :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: py_ecc.secp256k1 :members: :undoc-members: :show-inheritance: py_ecc-8.0.0/docs/release_notes.rst000066400000000000000000000130141477723207700173120ustar00rootroot00000000000000Release notes ============= .. towncrier release notes start py_ecc v8.0.0 (2025-04-14) -------------------------- No significant changes. py_ecc v8.0.0-beta.2 (2025-01-22) --------------------------------- Internal Changes - for py_ecc Contributors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Add support for 3.13 in CI and lib metadata. (`#148 `__) - Merge template updates, notably moving from ``bumpversion`` to ``bump-my-version`` and moving docs from the ``README`` to ReadTheDocs (`#149 `__) - Remove unused ``cached-property`` dependency. (`#152 `__) - Reenable ``from py_ecc import *`` post-lazyloading. (`#153 `__) py_ecc v8.0.0-beta.1 (2024-10-22) --------------------------------- Breaking Changes ~~~~~~~~~~~~~~~~ - Updated typing across the library (`#143 `__) - Set ``ecdsa_raw_recover`` to only accept ``v`` values of 27 or 28 (`#145 `__) Improved Documentation ~~~~~~~~~~~~~~~~~~~~~~ - Add docstrings to ``secp256k1`` (`#141 `__) Features ~~~~~~~~ - Added ``__lt__`` to ``FQ`` classes (`#143 `__) - Add hash-to-curve functions for the G1 curve (`#146 `__) Internal Changes - for py_ecc Contributors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Replace non-test instances of ``assert`` statments with better validation (`#142 `__) Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Lazy-load submodules to reduce initial import time (`#135 `__) py_ecc v7.0.1 (2024-04-23) -------------------------- Internal Changes - for py_ecc Contributors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Template updates, notably adding python 3.12 support (`#138 `__) Miscellaneous Changes ~~~~~~~~~~~~~~~~~~~~~ - `#139 `__ py_ecc v7.0.0 (2023-12-06) -------------------------- Breaking Changes ~~~~~~~~~~~~~~~~ - Drop support for python 3.6 and 3.7 (`#130 `__) Features ~~~~~~~~ - Add support for python 3.11 (`#130 `__) Internal Changes - for py_ecc Contributors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Merge changes from python project template, including using pre-commit for linting and change the name of the ``master`` branch to ``main`` (`#130 `__) 6.0.0 ----- 2021-12-16 * Fix x1 point check (https://github.com/ethereum/py_ecc/pull/121) * Bump eth-typing dependency requirement (https://github.com/ethereum/py_ecc/pull/123) * Bump eth-utils dependency requirement (https://github.com/ethereum/py_ecc/pull/123) * Drop support for Python 3.5 (https://github.com/ethereum/py_ecc/pull/123) * Add support for Python 3.9 and 3.10 (https://github.com/ethereum/py_ecc/pull/123) 5.2.0 ----- 2021-03-09 * Fix prime_field_inv edge case (https://github.com/ethereum/py_ecc/pull/114) * Extract `subgroup_check` from `signature_to_G2` (https://github.com/ethereum/py_ecc/pull/116) * Add G1 and G2 point value check (https://github.com/ethereum/py_ecc/pull/117) * Fix README example (https://github.com/ethereum/py_ecc/pull/115) 5.1.0 ----- 2020-11-16 * Fix BLS G1 and G2 deserialization https://github.com/ethereum/py_ecc/pull/110 * Fix to follow IETF BLS draft 04 point at infinity checking procedure https://github.com/ethereum/py_ecc/pull/107 5.0.0 ----- 2020-10-01 * Implement IETF BLS draft 04 (https://github.com/ethereum/py_ecc/pull/103) 4.1.0 ----- 2020-09-23 * Implement IETF BLS draft 03 (https://github.com/ethereum/py_ecc/pull/102) * Optimize BLS-12-381: Miller loop is now 33% faster (https://github.com/ethereum/py_ecc/pull/100) * Improve final exponentiation efficiency (https://github.com/ethereum/py_ecc/pull/101) 4.0.0 ----- 2020-05-13 * Implement IETF hash-to-curve draft 07 (https://github.com/ethereum/py_ecc/pull/94) 3.1.0 ----- 2020-05-12 * Fix optimized_swu + update error messages (https://github.com/ethereum/py_ecc/pull/97) 3.0.0 ----- 2020-05-12 * Implement IETF BLS signature draft 02 + hash-to-curve draft 06 (https://github.com/ethereum/py_ecc/pull/87) * Fixes Typing errors for points at infinity (NoneTypes) (https://github.com/ethereum/py_ecc/pull/89) 2.0.0 ----- 2020-01-08 * Implement [IETF BLS signature draft 00](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-00) 1.7.1 ----- 2019-07-12 * Introduce the `Domain` type as an alias for `bytes` of size 8 in BLS package. 1.7.0 ----- 2019-05-23 * Update hash function for BLS Signatures https://github.com/ethereum/py_ecc/pull/70 1.6.0 ----- 2019-03-14 * Add BLS Signature APIs https://github.com/ethereum/py_ecc/pull/52 * Maintenance: several refactors to reduce duplicated code * https://github.com/ethereum/py_ecc/pull/41 * https://github.com/ethereum/py_ecc/pull/61 * https://github.com/ethereum/py_ecc/pull/56 * https://github.com/ethereum/py_ecc/pull/63 1.4.8 ----- 2019-02-14 * Bugfix Optimized FQP curves to run modulus on integers during initialization * Bugfix check against elliptic curve points at infinity * Testing tool upgrades 1.4.2 ----- * Bugfix for `safe_ord` helper function. py_ecc-8.0.0/newsfragments/000077500000000000000000000000001477723207700156645ustar00rootroot00000000000000py_ecc-8.0.0/newsfragments/README.md000066400000000000000000000020221477723207700171370ustar00rootroot00000000000000This directory collects "newsfragments": short files that each contain a snippet of ReST-formatted text that will be added to the next release notes. This should be a description of aspects of the change (if any) that are relevant to users. (This contrasts with the commit message and PR description, which are a description of the change as relevant to people working on the code itself.) Each file should be named like `..rst`, where `` is an issue number, and `` is one of: - `breaking` - `bugfix` - `deprecation` - `docs` - `feature` - `internal` - `misc` - `performance` - `removal` So for example: `123.feature.rst`, `456.bugfix.rst` If the PR fixes an issue, use that number here. If there is no issue, then open up the PR first and use the PR number for the newsfragment. Note that the `towncrier` tool will automatically reflow your text, so don't try to do any fancy formatting. Run `towncrier build --draft` to get a preview of what the release notes entry will look like in the final release notes. py_ecc-8.0.0/newsfragments/validate_files.py000077500000000000000000000021621477723207700212150ustar00rootroot00000000000000#!/usr/bin/env python3 # Towncrier silently ignores files that do not match the expected ending. # We use this script to ensure we catch these as errors in CI. import pathlib import sys ALLOWED_EXTENSIONS = { ".breaking.rst", ".bugfix.rst", ".deprecation.rst", ".docs.rst", ".feature.rst", ".internal.rst", ".misc.rst", ".performance.rst", ".removal.rst", } ALLOWED_FILES = { "validate_files.py", "README.md", } THIS_DIR = pathlib.Path(__file__).parent num_args = len(sys.argv) - 1 assert num_args in {0, 1} if num_args == 1: assert sys.argv[1] in ("is-empty",) for fragment_file in THIS_DIR.iterdir(): if fragment_file.name in ALLOWED_FILES: continue elif num_args == 0: full_extension = "".join(fragment_file.suffixes) if full_extension not in ALLOWED_EXTENSIONS: raise Exception(f"Unexpected file: {fragment_file}") elif sys.argv[1] == "is-empty": raise Exception(f"Unexpected file: {fragment_file}") else: raise RuntimeError( f"Strange: arguments {sys.argv} were validated, but not found" ) py_ecc-8.0.0/py_ecc/000077500000000000000000000000001477723207700142435ustar00rootroot00000000000000py_ecc-8.0.0/py_ecc/__init__.py000066400000000000000000000017311477723207700163560ustar00rootroot00000000000000import importlib from importlib.metadata import ( version as __version, ) import sys as _sys from types import ( ModuleType, ) from typing import ( List, ) _sys.setrecursionlimit(max(100000, _sys.getrecursionlimit())) __version__ = __version("py_ecc") _lazy_imports = { "bls": "py_ecc.bls", "bls12_381": "py_ecc.bls12_381", "bn128": "py_ecc.bn128", "optimized_bls12_381": "py_ecc.optimized_bls12_381", "optimized_bn128": "py_ecc.optimized_bn128", "secp256k1": "py_ecc.secp256k1", } __all__ = list(_lazy_imports.keys()) def _import_module(name: str) -> ModuleType: module = importlib.import_module(_lazy_imports[name]) globals()[name] = module return module def __getattr__(name: str) -> ModuleType: if name in _lazy_imports: return _import_module(name) raise AttributeError(f"module 'py_ecc' has no attribute '{name}'") def __dir__() -> List[str]: return list(_lazy_imports.keys()) + list(globals().keys()) py_ecc-8.0.0/py_ecc/bls/000077500000000000000000000000001477723207700150235ustar00rootroot00000000000000py_ecc-8.0.0/py_ecc/bls/__init__.py000066400000000000000000000001371477723207700171350ustar00rootroot00000000000000from .ciphersuites import ( G2Basic, G2MessageAugmentation, G2ProofOfPossession, ) py_ecc-8.0.0/py_ecc/bls/ciphersuites.py000066400000000000000000000271721477723207700201150ustar00rootroot00000000000000from abc import ( ABC, abstractmethod, ) from hashlib import ( sha256, ) from math import ( ceil, log2, ) from typing import ( Sequence, ) from eth_typing import ( BLSPubkey, BLSSignature, ) from eth_utils import ( ValidationError, ) from py_ecc.fields import ( optimized_bls12_381_FQ12 as FQ12, ) from py_ecc.optimized_bls12_381 import ( G1, Z1, Z2, add, curve_order, final_exponentiate, multiply, neg, pairing, ) from .g2_primitives import ( G1_to_pubkey, G2_to_signature, is_inf, pubkey_to_G1, signature_to_G2, subgroup_check, ) from .hash import ( hkdf_expand, hkdf_extract, i2osp, os2ip, ) from .hash_to_curve import ( hash_to_G2, ) class BaseG2Ciphersuite(ABC): DST = b"" xmd_hash_function = sha256 # # Input validation helpers # @staticmethod def _is_valid_privkey(privkey: int) -> bool: return isinstance(privkey, int) and privkey > 0 and privkey < curve_order @staticmethod def _is_valid_pubkey(pubkey: bytes) -> bool: # SV: minimal-pubkey-size return isinstance(pubkey, bytes) and len(pubkey) == 48 @staticmethod def _is_valid_message(message: bytes) -> bool: return isinstance(message, bytes) @staticmethod def _is_valid_signature(signature: bytes) -> bool: # SV: minimal-pubkey-size return isinstance(signature, bytes) and len(signature) == 96 # # APIs # @classmethod def SkToPk(cls, privkey: int) -> BLSPubkey: """ The SkToPk algorithm takes a secret key SK and outputs the corresponding public key PK. Raise `ValidationError` when there is input validation error. """ if not cls._is_valid_privkey(privkey): raise ValidationError("Invalid private key") # Procedure return G1_to_pubkey(multiply(G1, privkey)) @classmethod def KeyGen(cls, IKM: bytes, key_info: bytes = b"") -> int: salt = b"BLS-SIG-KEYGEN-SALT-" SK = 0 while SK == 0: salt = cls.xmd_hash_function(salt).digest() prk = hkdf_extract(salt, IKM + b"\x00") l = ceil((1.5 * ceil(log2(curve_order))) / 8) # noqa: E741 okm = hkdf_expand(prk, key_info + i2osp(l, 2), l) SK = os2ip(okm) % curve_order return SK @staticmethod def KeyValidate(PK: BLSPubkey) -> bool: try: pubkey_point = pubkey_to_G1(PK) except (ValidationError, ValueError, AssertionError): return False if is_inf(pubkey_point): return False if not subgroup_check(pubkey_point): return False return True @classmethod def _CoreSign(cls, SK: int, message: bytes, DST: bytes) -> BLSSignature: """ The CoreSign algorithm computes a signature from SK, a secret key, and message, an octet string. Raise `ValidationError` when there is input validation error. """ # Inputs validation if not cls._is_valid_privkey(SK): raise ValidationError("Invalid secret key") if not cls._is_valid_message(message): raise ValidationError("Invalid message") # Procedure message_point = hash_to_G2(message, DST, cls.xmd_hash_function) signature_point = multiply(message_point, SK) return G2_to_signature(signature_point) @classmethod def _CoreVerify( cls, PK: BLSPubkey, message: bytes, signature: BLSSignature, DST: bytes ) -> bool: try: # Inputs validation if not cls._is_valid_pubkey(PK): raise ValidationError("Invalid public key") if not cls._is_valid_message(message): raise ValidationError("Invalid message") if not cls._is_valid_signature(signature): raise ValidationError("Invalid signature") # Procedure if not cls.KeyValidate(PK): raise ValidationError("Invalid public key") signature_point = signature_to_G2(signature) if not subgroup_check(signature_point): return False final_exponentiation = final_exponentiate( pairing( signature_point, G1, final_exponentiate=False, ) * pairing( hash_to_G2(message, DST, cls.xmd_hash_function), neg(pubkey_to_G1(PK)), final_exponentiate=False, ) ) return final_exponentiation == FQ12.one() except (ValidationError, ValueError, AssertionError): return False @classmethod def Aggregate(cls, signatures: Sequence[BLSSignature]) -> BLSSignature: """ The Aggregate algorithm aggregates multiple signatures into one. Raise `ValidationError` when there is input validation error. """ # Preconditions if len(signatures) < 1: raise ValidationError("Insufficient number of signatures. (n < 1)") # Inputs validation for signature in signatures: if not cls._is_valid_signature(signature): raise ValidationError("Invalid signature") # Procedure aggregate = Z2 # Seed with the point at infinity for signature in signatures: signature_point = signature_to_G2(signature) aggregate = add(aggregate, signature_point) return G2_to_signature(aggregate) @classmethod def _CoreAggregateVerify( cls, PKs: Sequence[BLSPubkey], messages: Sequence[bytes], signature: BLSSignature, DST: bytes, ) -> bool: try: # Inputs validation for pk in PKs: if not cls._is_valid_pubkey(pk): raise ValidationError("Invalid public key") for message in messages: if not cls._is_valid_message(message): raise ValidationError("Invalid message") if not len(PKs) == len(messages): raise ValidationError("Inconsistent number of PKs and messages") if not cls._is_valid_signature(signature): raise ValidationError("Invalid signature") # Preconditions if len(PKs) < 1: raise ValidationError("Insufficient number of PKs. (n < 1)") # Procedure signature_point = signature_to_G2(signature) if not subgroup_check(signature_point): return False aggregate = FQ12.one() for pk, message in zip(PKs, messages): if not cls.KeyValidate(pk): raise ValidationError("Invalid public key") pubkey_point = pubkey_to_G1(pk) message_point = hash_to_G2(message, DST, cls.xmd_hash_function) aggregate *= pairing( message_point, pubkey_point, final_exponentiate=False ) aggregate *= pairing(signature_point, neg(G1), final_exponentiate=False) return final_exponentiate(aggregate) == FQ12.one() except (ValidationError, ValueError, AssertionError): return False @classmethod def Sign(cls, SK: int, message: bytes) -> BLSSignature: return cls._CoreSign(SK, message, cls.DST) @classmethod def Verify(cls, PK: BLSPubkey, message: bytes, signature: BLSSignature) -> bool: return cls._CoreVerify(PK, message, signature, cls.DST) @classmethod @abstractmethod def AggregateVerify( cls, PKs: Sequence[BLSPubkey], messages: Sequence[bytes], signature: BLSSignature, ) -> bool: ... class G2Basic(BaseG2Ciphersuite): DST = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_" @classmethod def AggregateVerify( cls, PKs: Sequence[BLSPubkey], messages: Sequence[bytes], signature: BLSSignature, ) -> bool: if len(messages) != len(set(messages)): # Messages are not unique return False return cls._CoreAggregateVerify(PKs, messages, signature, cls.DST) class G2MessageAugmentation(BaseG2Ciphersuite): DST = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_AUG_" @classmethod def Sign(cls, SK: int, message: bytes) -> BLSSignature: PK = cls.SkToPk(SK) return cls._CoreSign(SK, PK + message, cls.DST) @classmethod def Verify(cls, PK: BLSPubkey, message: bytes, signature: BLSSignature) -> bool: return cls._CoreVerify(PK, PK + message, signature, cls.DST) @classmethod def AggregateVerify( cls, PKs: Sequence[BLSPubkey], messages: Sequence[bytes], signature: BLSSignature, ) -> bool: if len(PKs) != len(messages): return False messages = [pk + msg for pk, msg in zip(PKs, messages)] return cls._CoreAggregateVerify(PKs, messages, signature, cls.DST) class G2ProofOfPossession(BaseG2Ciphersuite): DST = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_" POP_TAG = b"BLS_POP_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_" @classmethod def _is_valid_pubkey(cls, pubkey: bytes) -> bool: """ Note: PopVerify is a precondition for -Verify APIs However, it's difficult to verify it with the API interface in runtime. To ensure KeyValidate has been checked, we check it in the input validation. See https://github.com/cfrg/draft-irtf-cfrg-bls-signature/issues/27 for the discussion. """ if not super()._is_valid_pubkey(pubkey): return False return cls.KeyValidate(BLSPubkey(pubkey)) @classmethod def AggregateVerify( cls, PKs: Sequence[BLSPubkey], messages: Sequence[bytes], signature: BLSSignature, ) -> bool: return cls._CoreAggregateVerify(PKs, messages, signature, cls.DST) @classmethod def PopProve(cls, SK: int) -> BLSSignature: pubkey = cls.SkToPk(SK) return cls._CoreSign(SK, pubkey, cls.POP_TAG) @classmethod def PopVerify(cls, PK: BLSPubkey, proof: BLSSignature) -> bool: return cls._CoreVerify(PK, PK, proof, cls.POP_TAG) @staticmethod def _AggregatePKs(PKs: Sequence[BLSPubkey]) -> BLSPubkey: """ Aggregate the public keys. Raise `ValidationError` when there is input validation error. """ if len(PKs) < 1: raise ValidationError("Insufficient number of PKs. (n < 1)") aggregate = Z1 # Seed with the point at infinity for pk in PKs: pubkey_point = pubkey_to_G1(pk) aggregate = add(aggregate, pubkey_point) return G1_to_pubkey(aggregate) @classmethod def FastAggregateVerify( cls, PKs: Sequence[BLSPubkey], message: bytes, signature: BLSSignature ) -> bool: try: # Inputs validation for pk in PKs: if not cls._is_valid_pubkey(pk): raise ValidationError("Invalid public key") if not cls._is_valid_message(message): raise ValidationError("Invalid message") if not cls._is_valid_signature(signature): raise ValidationError("Invalid signature") # Preconditions if len(PKs) < 1: raise ValidationError("Insufficient number of PKs. (n < 1)") # Procedure aggregate_pubkey = cls._AggregatePKs(PKs) except (ValidationError, AssertionError): return False else: return cls.Verify(aggregate_pubkey, message, signature) py_ecc-8.0.0/py_ecc/bls/constants.py000066400000000000000000000012171477723207700174120ustar00rootroot00000000000000from py_ecc.fields import ( optimized_bls12_381_FQ2 as FQ2, ) from py_ecc.optimized_bls12_381 import ( field_modulus as q, ) G2_COFACTOR = 305502333931268344200999753193121504214466019254188142667664032982267604182971884026507427359259977847832272839041616661285803823378372096355777062779109 # noqa: E501 FQ2_ORDER = q**2 - 1 EIGHTH_ROOTS_OF_UNITY = tuple(FQ2([1, 1]) ** ((FQ2_ORDER * k) // 8) for k in range(8)) POW_2_381 = 2**381 POW_2_382 = 2**382 POW_2_383 = 2**383 POW_2_384 = 2**384 # Parameters for hashing to the field as specified in: # https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-8.8.1 HASH_TO_FIELD_L = 64 py_ecc-8.0.0/py_ecc/bls/g2_primitives.py000066400000000000000000000022131477723207700201560ustar00rootroot00000000000000from eth_typing import ( BLSPubkey, BLSSignature, ) from py_ecc.optimized_bls12_381 import ( curve_order, is_inf, multiply, ) from py_ecc.typing import ( Optimized_Field, Optimized_Point3D, ) from .hash import ( i2osp, os2ip, ) from .point_compression import ( compress_G1, compress_G2, decompress_G1, decompress_G2, ) from .typing import ( G1Compressed, G1Uncompressed, G2Compressed, G2Uncompressed, ) def subgroup_check(P: Optimized_Point3D[Optimized_Field]) -> bool: return is_inf(multiply(P, curve_order)) def G2_to_signature(pt: G2Uncompressed) -> BLSSignature: z1, z2 = compress_G2(pt) return BLSSignature(i2osp(z1, 48) + i2osp(z2, 48)) def signature_to_G2(signature: BLSSignature) -> G2Uncompressed: p = G2Compressed((os2ip(signature[:48]), os2ip(signature[48:]))) signature_point = decompress_G2(p) return signature_point def G1_to_pubkey(pt: G1Uncompressed) -> BLSPubkey: z = compress_G1(pt) return BLSPubkey(i2osp(z, 48)) def pubkey_to_G1(pubkey: BLSPubkey) -> G1Uncompressed: z = os2ip(pubkey) return decompress_G1(G1Compressed(z)) py_ecc-8.0.0/py_ecc/bls/hash.py000066400000000000000000000047311477723207700163250ustar00rootroot00000000000000import hashlib import hmac import math from typing import ( Union, ) from _hashlib import ( HASH, ) def hkdf_extract(salt: Union[bytes, bytearray], ikm: Union[bytes, bytearray]) -> bytes: """ HKDF-Extract https://tools.ietf.org/html/rfc5869 """ return hmac.new(salt, ikm, hashlib.sha256).digest() def hkdf_expand( prk: Union[bytes, bytearray], info: Union[bytes, bytearray], length: int ) -> bytes: """ HKDF-Expand https://tools.ietf.org/html/rfc5869 """ n = math.ceil(length / 32) # okm = T(1) || T(2) || T(3) || ... || T(n) okm = bytearray(0) previous = bytearray(0) for i in range(0, n): # Concatenate (T(i) || info || i) text = previous + info + bytes([i + 1]) # T(i + 1) = HMAC(T(i) || info || i) previous = bytearray(hmac.new(prk, text, hashlib.sha256).digest()) okm.extend(previous) # Return first `length` bytes. return okm[:length] def i2osp(x: int, xlen: int) -> bytes: """ Convert a nonnegative integer `x` to an octet string of a specified length `xlen`. https://tools.ietf.org/html/rfc8017#section-4.1 """ return x.to_bytes(xlen, byteorder="big", signed=False) def os2ip(x: bytes) -> int: """ Convert an octet string `x` to a nonnegative integer. https://tools.ietf.org/html/rfc8017#section-4.2 """ return int.from_bytes(x, byteorder="big", signed=False) def sha256(x: bytes) -> bytes: return hashlib.sha256(x).digest() def xor(a: bytes, b: bytes) -> bytes: return bytes(_a ^ _b for _a, _b in zip(a, b)) def expand_message_xmd( msg: bytes, DST: bytes, len_in_bytes: int, hash_function: HASH ) -> bytes: b_in_bytes = hash_function().digest_size r_in_bytes = hash_function().block_size if len(DST) > 255: raise ValueError("DST must be <= 255 bytes") ell = math.ceil(len_in_bytes / b_in_bytes) if ell > 255: raise ValueError("invalid len in bytes for hash function") DST_prime = DST + i2osp( len(DST), 1 ) # Append the length of the DST as a single byte Z_pad = b"\x00" * r_in_bytes l_i_b_str = i2osp(len_in_bytes, 2) b_0 = hash_function(Z_pad + msg + l_i_b_str + b"\x00" + DST_prime).digest() b = [hash_function(b_0 + b"\x01" + DST_prime).digest()] for i in range(2, ell + 1): b.append(hash_function(xor(b_0, b[i - 2]) + i2osp(i, 1) + DST_prime).digest()) pseudo_random_bytes = b"".join(b) return pseudo_random_bytes[:len_in_bytes] py_ecc-8.0.0/py_ecc/bls/hash_to_curve.py000066400000000000000000000114161477723207700202310ustar00rootroot00000000000000from typing import ( Tuple, ) from _hashlib import ( HASH, ) from py_ecc.fields import ( optimized_bls12_381_FQ as FQ, optimized_bls12_381_FQ2 as FQ2, ) from py_ecc.optimized_bls12_381 import ( add, field_modulus, iso_map_G1, iso_map_G2, multiply_clear_cofactor_G1, multiply_clear_cofactor_G2, optimized_swu_G1, optimized_swu_G2, ) from .constants import ( HASH_TO_FIELD_L, ) from .hash import ( expand_message_xmd, os2ip, ) from .typing import ( G1Uncompressed, G2Uncompressed, ) # Hash to G2 def hash_to_G2(message: bytes, DST: bytes, hash_function: HASH) -> G2Uncompressed: """ Convert a message to a point on G2 as defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-6.6.3 The idea is to first hash into FQ2 and then use SSWU to map the result into G2. Contents and inputs follow the ciphersuite ``BLS12381G2_XMD:SHA-256_SSWU_RO_`` defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-8.8.2 """ u0, u1 = hash_to_field_FQ2(message, 2, DST, hash_function) q0 = map_to_curve_G2(u0) q1 = map_to_curve_G2(u1) r = add(q0, q1) p = clear_cofactor_G2(r) return p def hash_to_field_FQ2( message: bytes, count: int, DST: bytes, hash_function: HASH ) -> Tuple[FQ2, ...]: """ Hash To Base Field for FQ2 Convert a message to a point in the finite field as defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-5.3 """ M = 2 # m is the extension degree of FQ2 len_in_bytes = count * M * HASH_TO_FIELD_L pseudo_random_bytes = expand_message_xmd(message, DST, len_in_bytes, hash_function) u = [] for i in range(0, count): e = [] for j in range(0, M): elem_offset = HASH_TO_FIELD_L * (j + i * M) tv = pseudo_random_bytes[elem_offset : elem_offset + HASH_TO_FIELD_L] e.append(os2ip(tv) % field_modulus) u.append(FQ2(e)) return tuple(u) def map_to_curve_G2(u: FQ2) -> G2Uncompressed: """ Map To Curve for G2 First, convert FQ2 point to a point on the 3-Isogeny curve. SWU Map: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-6.6.3 Second, map 3-Isogeny curve to BLS12-381-G2 curve. 3-Isogeny Map: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#appendix-C.3 """ (x, y, z) = optimized_swu_G2(u) return iso_map_G2(x, y, z) def clear_cofactor_G2(p: G2Uncompressed) -> G2Uncompressed: """ Clear Cofactor via Multiplication Ensure a point falls in the correct sub group of the curve. """ return multiply_clear_cofactor_G2(p) # --- G1 --- def hash_to_G1(message: bytes, DST: bytes, hash_function: HASH) -> G1Uncompressed: """ Convert a message to a point on G1 as defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-6.6.3 The idea is to first hash into FQ and then use SSWU to map the result into G1. Contents and inputs follow the ciphersuite ``BLS12381G1_XMD:SHA-256_SSWU_RO_`` defined here: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-09#section-8.8.1 """ u0, u1 = hash_to_field_FQ(message, 2, DST, hash_function) q0 = map_to_curve_G1(u0) q1 = map_to_curve_G1(u1) r = add(q0, q1) p = clear_cofactor_G1(r) return p def hash_to_field_FQ( message: bytes, count: int, DST: bytes, hash_function: HASH ) -> Tuple[FQ, ...]: """ Hash To Base Field for FQ Convert a message to a point in the finite field as defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-5.3 """ M = 1 # m is the extension degree of FQ len_in_bytes = count * M * HASH_TO_FIELD_L pseudo_random_bytes = expand_message_xmd(message, DST, len_in_bytes, hash_function) u = [] for i in range(0, count): elem_offset = HASH_TO_FIELD_L * (i * M) tv = pseudo_random_bytes[elem_offset : elem_offset + HASH_TO_FIELD_L] u.append(FQ(os2ip(tv) % field_modulus)) return tuple(u) def map_to_curve_G1(u: FQ) -> G1Uncompressed: """ Map To Curve for G1 First, convert FQ point to a point on the 11-Isogeny curve. SWU Map: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-6.6.3 Second, map 11-Isogeny curve to BLS12-381-G1 curve. 11-Isogeny Map: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-09#name-11-isogeny-map-for-bls12-38 """ (x, y, z) = optimized_swu_G1(u) return iso_map_G1(x, y, z) def clear_cofactor_G1(p: G1Uncompressed) -> G1Uncompressed: """ Clear Cofactor via Multiplication Ensure a point falls in the correct subgroup of the curve. """ return multiply_clear_cofactor_G1(p) py_ecc-8.0.0/py_ecc/bls/point_compression.py000066400000000000000000000157131477723207700211560ustar00rootroot00000000000000from typing import ( Optional, Tuple, ) from py_ecc.fields import ( optimized_bls12_381_FQ as FQ, optimized_bls12_381_FQ2 as FQ2, ) from py_ecc.optimized_bls12_381 import ( Z1, Z2, b, b2, field_modulus as q, is_inf, is_on_curve, normalize, ) from .constants import ( EIGHTH_ROOTS_OF_UNITY, FQ2_ORDER, POW_2_381, POW_2_382, POW_2_383, ) from .typing import ( G1Compressed, G1Uncompressed, G2Compressed, G2Uncompressed, ) # # The most-significant three bits of a G1 or G2 encoding should be masked away before # the coordinate(s) are interpreted. # These bits are used to unambiguously represent the underlying element # The format: (c_flag, b_flag, a_flag, x) # https://github.com/zcash/librustzcash/blob/6e0364cd42a2b3d2b958a54771ef51a8db79dd29/pairing/src/bls12_381/README.md#bls12-381-instantiation # noqa: E501 # def get_flags(z: int) -> Tuple[bool, bool, bool]: c_flag = bool((z >> 383) & 1) # The most significant bit. b_flag = bool((z >> 382) & 1) # The second-most significant bit. a_flag = bool((z >> 381) & 1) # The third-most significant bit. return c_flag, b_flag, a_flag def is_point_at_infinity(z1: int, z2: Optional[int] = None) -> bool: """ If z2 is None, the given z1 is a G1 point. Else, (z1, z2) is a G2 point. """ return (z1 % POW_2_381 == 0) and (z2 is None or z2 == 0) # # G1 # def compress_G1(pt: G1Uncompressed) -> G1Compressed: """ A compressed point is a 384-bit integer with the bit order (c_flag, b_flag, a_flag, x), where the c_flag bit is always set to 1, the b_flag bit indicates infinity when set to 1, the a_flag bit helps determine the y-coordinate when decompressing, and the 381-bit integer x is the x-coordinate of the point. """ if is_inf(pt): # Set c_flag = 1 and b_flag = 1. leave a_flag = x = 0 return G1Compressed(POW_2_383 + POW_2_382) else: x, y = normalize(pt) # Record y's leftmost bit to the a_flag a_flag = (y.n * 2) // q # Set c_flag = 1 and b_flag = 0 return G1Compressed(x.n + a_flag * POW_2_381 + POW_2_383) def decompress_G1(z: G1Compressed) -> G1Uncompressed: """ Recovers x and y coordinates from the compressed point. """ c_flag, b_flag, a_flag = get_flags(z) # c_flag == 1 indicates the compressed form # MSB should be 1 if not c_flag: raise ValueError("c_flag should be 1") is_inf_pt = is_point_at_infinity(z) if b_flag != is_inf_pt: raise ValueError(f"b_flag should be {int(is_inf_pt)}") if is_inf_pt: # 3 MSBs should be 110 if a_flag: raise ValueError("a point at infinity should have a_flag == 0") return Z1 # Else, not point at infinity # 3 MSBs should be 100 or 101 x = z % POW_2_381 if x >= q: raise ValueError(f"Point value should be less than field modulus. Got {x}") # Try solving y coordinate from the equation Y^2 = X^3 + b # using quadratic residue y = pow((x**3 + b.n) % q, (q + 1) // 4, q) if pow(y, 2, q) != (x**3 + b.n) % q: raise ValueError("The given point is not on G1: y**2 = x**3 + b") # Choose the y whose leftmost bit is equal to the a_flag if (y * 2) // q != int(a_flag): y = q - y return (FQ(x), FQ(y), FQ(1)) # # G2 # def modular_squareroot_in_FQ2(value: FQ2) -> Optional[FQ2]: """ Given value=``x``, returns the value ``y`` such that ``y**2 % q == x``, and None if this is not possible. In cases where there are two solutions, the value with higher imaginary component is favored; if both solutions have equal imaginary component the value with higher real component is favored. """ candidate_squareroot = value ** ((FQ2_ORDER + 8) // 16) check = candidate_squareroot**2 / value if check in EIGHTH_ROOTS_OF_UNITY[::2]: x1 = ( candidate_squareroot / EIGHTH_ROOTS_OF_UNITY[EIGHTH_ROOTS_OF_UNITY.index(check) // 2] ) x2 = -x1 x1_re, x1_im = x1.coeffs x2_re, x2_im = x2.coeffs return x1 if (x1_im > x2_im or (x1_im == x2_im and x1_re > x2_re)) else x2 return None def compress_G2(pt: G2Uncompressed) -> G2Compressed: """ The compressed point (z1, z2) has the bit order: z1: (c_flag1, b_flag1, a_flag1, x1) z2: (c_flag2, b_flag2, a_flag2, x2) where - c_flag1 is always set to 1 - b_flag1 indicates infinity when set to 1 - a_flag1 helps determine the y-coordinate when decompressing, - a_flag2, b_flag2, and c_flag2 are always set to 0 """ if not is_on_curve(pt, b2): raise ValueError("The given point is not on the twisted curve over FQ**2") if is_inf(pt): return G2Compressed((POW_2_383 + POW_2_382, 0)) x, y = normalize(pt) x_re, x_im = x.coeffs y_re, y_im = y.coeffs # Record the leftmost bit of y_im to the a_flag1 # If y_im happens to be zero, then use the bit of y_re a_flag1 = (int(y_im) * 2) // q if y_im > 0 else (int(y_re) * 2) // q # Imaginary part of x goes to z1, real part goes to z2 # c_flag1 = 1, b_flag1 = 0 z1 = x_im + a_flag1 * POW_2_381 + POW_2_383 # a_flag2 = b_flag2 = c_flag2 = 0 z2 = x_re return G2Compressed((int(z1), int(z2))) def decompress_G2(p: G2Compressed) -> G2Uncompressed: """ Recovers x and y coordinates from the compressed point (z1, z2). """ z1, z2 = p c_flag1, b_flag1, a_flag1 = get_flags(z1) # c_flag == 1 indicates the compressed form # MSB should be 1 if not c_flag1: raise ValueError("c_flag should be 1") is_inf_pt = is_point_at_infinity(z1, z2) if b_flag1 != is_inf_pt: raise ValueError(f"b_flag should be {int(is_inf_pt)}") if is_inf_pt: # 3 MSBs should be 110 if a_flag1: raise ValueError("a point at infinity should have a_flag == 0") return Z2 # Else, not point at infinity # 3 MSBs should be 100 or 101 x1 = z1 % POW_2_381 # Ensure that x1 is less than the field modulus. if x1 >= q: raise ValueError(f"x1 value should be less than field modulus. Got {x1}") # Ensure that z2 is less than the field modulus. if z2 >= q: raise ValueError(f"z2 point value should be less than field modulus. Got {z2}") x2 = z2 # x1 is the imaginary part, x2 is the real part x = FQ2([x2, x1]) y = modular_squareroot_in_FQ2(x**3 + b2) if y is None: raise ValueError("Failed to find a modular squareroot") # Choose the y whose leftmost bit of the imaginary part is equal to the a_flag1 # If y_im happens to be zero, then use the bit of y_re y_re, y_im = y.coeffs if (y_im > 0 and (int(y_im) * 2) // q != int(a_flag1)) or ( y_im == 0 and (int(y_re) * 2) // q != int(a_flag1) ): y = FQ2((y * -1).coeffs) if not is_on_curve((x, y, FQ2([1, 0])), b2): raise ValueError("The given point is not on the twisted curve over FQ**2") return (x, y, FQ2([1, 0])) py_ecc-8.0.0/py_ecc/bls/typing.py000066400000000000000000000006311477723207700167070ustar00rootroot00000000000000from typing import ( NewType, Tuple, ) from py_ecc.fields import ( optimized_bls12_381_FQ, optimized_bls12_381_FQ2, ) from py_ecc.typing import ( Optimized_Point3D, ) G1Uncompressed = Optimized_Point3D[optimized_bls12_381_FQ] G1Compressed = NewType("G1Compressed", int) G2Uncompressed = Optimized_Point3D[optimized_bls12_381_FQ2] G2Compressed = NewType("G2Compressed", Tuple[int, int]) py_ecc-8.0.0/py_ecc/bls12_381/000077500000000000000000000000001477723207700155615ustar00rootroot00000000000000py_ecc-8.0.0/py_ecc/bls12_381/__init__.py000066400000000000000000000006611477723207700176750ustar00rootroot00000000000000from py_ecc.fields import ( bls12_381_FQ as FQ, bls12_381_FQ2 as FQ2, bls12_381_FQ12 as FQ12, bls12_381_FQP as FQP, ) from .bls12_381_curve import ( G1, G2, G12, Z1, Z2, add, b, b2, b12, curve_order, double, eq, field_modulus, is_inf, is_on_curve, multiply, neg, twist, ) from .bls12_381_pairing import ( final_exponentiate, pairing, ) py_ecc-8.0.0/py_ecc/bls12_381/bls12_381_curve.py000066400000000000000000000111411477723207700206530ustar00rootroot00000000000000from py_ecc.fields import ( bls12_381_FQ as FQ, bls12_381_FQ2 as FQ2, bls12_381_FQ12 as FQ12, bls12_381_FQP as FQP, ) from py_ecc.fields.field_properties import ( field_properties, ) from py_ecc.typing import ( Field, GeneralPoint, Point2D, ) field_modulus = field_properties["bls12_381"]["field_modulus"] curve_order = ( 52435875175126190479447740508185965837690552500527637822603658699938581184513 ) # Curve order should be prime if not pow(2, curve_order, curve_order) == 2: raise ValueError("Curve order is not prime") # Curve order should be a factor of field_modulus**12 - 1 if not (field_modulus**12 - 1) % curve_order == 0: raise ValueError("Curve order is not a factor of field_modulus**12 - 1") # Curve is y**2 = x**3 + 4 b = FQ(4) # Twisted curve over FQ**2 b2 = FQ2((4, 4)) # Extension curve over FQ**12; same b value as over FQ b12 = FQ12((4,) + (0,) * 11) # Generator for curve over FQ G1 = ( FQ( 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 # noqa: E501 ), # noqa: E501 FQ( 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569 # noqa: E501 ), # noqa: E501 ) # Generator for twisted curve over FQ2 G2 = ( FQ2( [ 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160, # noqa: E501 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758, # noqa: E501 ] ), FQ2( [ 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905, # noqa: E501 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582, # noqa: E501 ] ), ) # Point at infinity over FQ Z1 = None # Point at infinity for twisted curve over FQ2 Z2 = None # Check if a point is the point at infinity def is_inf(pt: GeneralPoint[Field]) -> bool: return pt is None # Check that a point is on the curve defined by y**2 == x**3 + b def is_on_curve(pt: Point2D[Field], b: Field) -> bool: if is_inf(pt) or pt is None: return True x, y = pt return y**2 - x**3 == b if not is_on_curve(G1, b): raise ValueError("Generator is not on curve") if not is_on_curve(G2, b2): raise ValueError("Generator is not on twisted curve") # Elliptic curve doubling def double(pt: Point2D[Field]) -> Point2D[Field]: if is_inf(pt) or pt is None: return pt x, y = pt m = 3 * x**2 / (2 * y) newx = m**2 - 2 * x newy = -m * newx + m * x - y return (newx, newy) # Elliptic curve addition def add(p1: Point2D[Field], p2: Point2D[Field]) -> Point2D[Field]: if p1 is None or p2 is None: return p1 if p2 is None else p2 x1, y1 = p1 x2, y2 = p2 if x2 == x1 and y2 == y1: return double(p1) elif x2 == x1: return None else: m = (y2 - y1) / (x2 - x1) newx = m**2 - x1 - x2 newy = -m * newx + m * x1 - y1 if not newy == (-m * newx + m * x2 - y2): raise ValueError("Point addition is incorrect") return (newx, newy) # Elliptic curve point multiplication def multiply(pt: Point2D[Field], n: int) -> Point2D[Field]: if n == 0: return None elif n == 1: return pt elif not n % 2: return multiply(double(pt), n // 2) else: return add(multiply(double(pt), int(n // 2)), pt) def eq(p1: GeneralPoint[Field], p2: GeneralPoint[Field]) -> bool: return p1 == p2 # "Twist" a point in E(FQ2) into a point in E(FQ12) w = FQ12([0, 1] + [0] * 10) # Convert P => -P def neg(pt: Point2D[Field]) -> Point2D[Field]: if pt is None: return None x, y = pt return (x, -y) def twist(pt: Point2D[FQP]) -> Point2D[FQ12]: if pt is None: return None _x, _y = pt # Field isomorphism from Z[p] / x**2 to Z[p] / x**2 - 2*x + 2 xcoeffs = [_x.coeffs[0] - _x.coeffs[1], _x.coeffs[1]] ycoeffs = [_y.coeffs[0] - _y.coeffs[1], _y.coeffs[1]] # Isomorphism into subfield of Z[p] / w**12 - 2 * w**6 + 2, # where w**6 = x nx = FQ12([xcoeffs[0]] + [0] * 5 + [xcoeffs[1]] + [0] * 5) ny = FQ12([ycoeffs[0]] + [0] * 5 + [ycoeffs[1]] + [0] * 5) # Divide x coord by w**2 and y coord by w**3 return (nx / w**2, ny / w**3) G12 = twist(G2) # Check that the twist creates a point that is on the curve if not is_on_curve(G12, b12): raise ValueError("Twist creates a point not on curve") py_ecc-8.0.0/py_ecc/bls12_381/bls12_381_pairing.py000066400000000000000000000063541477723207700211720ustar00rootroot00000000000000from py_ecc.fields import ( bls12_381_FQ as FQ, bls12_381_FQ2 as FQ2, bls12_381_FQ12 as FQ12, ) from py_ecc.fields.field_properties import ( field_properties, ) from py_ecc.typing import ( Field, Point2D, ) from .bls12_381_curve import ( G1, add, b, b2, curve_order, double, is_on_curve, multiply, twist, ) field_modulus = field_properties["bls12_381"]["field_modulus"] ate_loop_count = 15132376222941642752 log_ate_loop_count = 62 # Create a function representing the line between P1 and P2, # and evaluate it at T def linefunc(P1: Point2D[Field], P2: Point2D[Field], T: Point2D[Field]) -> Field: if P1 is None or P2 is None or T is None: # No points-at-infinity allowed, sorry raise ValueError("Invalid input - no points-at-infinity allowed") x1, y1 = P1 x2, y2 = P2 xt, yt = T if x1 != x2: m = (y2 - y1) / (x2 - x1) return m * (xt - x1) - (yt - y1) elif y1 == y2: m = 3 * x1**2 / (2 * y1) return m * (xt - x1) - (yt - y1) else: return xt - x1 def cast_point_to_fq12(pt: Point2D[FQ]) -> Point2D[FQ12]: if pt is None: return None x, y = pt return (FQ12([x.n] + [0] * 11), FQ12([y.n] + [0] * 11)) # Check consistency of the "line function" one, two, three = G1, double(G1), multiply(G1, 3) negone, negtwo, negthree = ( multiply(G1, curve_order - 1), multiply(G1, curve_order - 2), multiply(G1, curve_order - 3), ) conditions = [ linefunc(one, two, one) == FQ(0), linefunc(one, two, two) == FQ(0), linefunc(one, two, three) != FQ(0), linefunc(one, two, negthree) == FQ(0), linefunc(one, negone, one) == FQ(0), linefunc(one, negone, negone) == FQ(0), linefunc(one, negone, two) != FQ(0), linefunc(one, one, one) == FQ(0), linefunc(one, one, two) != FQ(0), linefunc(one, one, negtwo) == FQ(0), ] if not all(conditions): raise ValueError("Line function is inconsistent") # Main miller loop def miller_loop(Q: Point2D[FQ12], P: Point2D[FQ12]) -> FQ12: if Q is None or P is None: return FQ12.one() R: Point2D[FQ12] = Q f = FQ12.one() for i in range(log_ate_loop_count, -1, -1): f = f * f * linefunc(R, R, P) R = double(R) if ate_loop_count & (2**i): f = f * linefunc(R, Q, P) R = add(R, Q) # assert R == multiply(Q, ate_loop_count) # Q1 = (Q[0] ** field_modulus, Q[1] ** field_modulus) # assert is_on_curve(Q1, b12) # nQ2 = (Q1[0] ** field_modulus, -Q1[1] ** field_modulus) # assert is_on_curve(nQ2, b12) # f = f * linefunc(R, Q1, P) # R = add(R, Q1) # f = f * linefunc(R, nQ2, P) # R = add(R, nQ2) This line is in many specifications but technically does nothing return f ** ((field_modulus**12 - 1) // curve_order) # Pairing computation def pairing(Q: Point2D[FQ2], P: Point2D[FQ]) -> FQ12: if not is_on_curve(Q, b2): raise ValueError("Invalid input - point Q is not on the correct curve") if not is_on_curve(P, b): raise ValueError("Invalid input - point P is not on the correct curves") return miller_loop(twist(Q), cast_point_to_fq12(P)) def final_exponentiate(p: Field) -> Field: return p ** ((field_modulus**12 - 1) // curve_order) py_ecc-8.0.0/py_ecc/bn128/000077500000000000000000000000001477723207700150755ustar00rootroot00000000000000py_ecc-8.0.0/py_ecc/bn128/__init__.py000066400000000000000000000006311477723207700172060ustar00rootroot00000000000000from py_ecc.fields import ( bn128_FQ as FQ, bn128_FQ2 as FQ2, bn128_FQ12 as FQ12, bn128_FQP as FQP, ) from .bn128_curve import ( G1, G2, G12, Z1, Z2, add, b, b2, b12, curve_order, double, eq, field_modulus, is_inf, is_on_curve, multiply, neg, twist, ) from .bn128_pairing import ( final_exponentiate, pairing, ) py_ecc-8.0.0/py_ecc/bn128/bn128_curve.py000066400000000000000000000102201477723207700175000ustar00rootroot00000000000000from py_ecc.fields import ( bn128_FQ as FQ, bn128_FQ2 as FQ2, bn128_FQ12 as FQ12, bn128_FQP as FQP, ) from py_ecc.fields.field_properties import ( field_properties, ) from py_ecc.typing import ( Field, GeneralPoint, Point2D, ) field_modulus = field_properties["bn128"]["field_modulus"] curve_order = ( 21888242871839275222246405745257275088548364400416034343698204186575808495617 ) # Curve order should be prime if not pow(2, curve_order, curve_order) == 2: raise ValueError("Curve order is not prime") # Curve order should be a factor of field_modulus**12 - 1 if not (field_modulus**12 - 1) % curve_order == 0: raise ValueError("Curve order is not a factor of field_modulus**12 - 1") # Curve is y**2 = x**3 + 3 b = FQ(3) # Twisted curve over FQ**2 b2 = FQ2([3, 0]) / FQ2([9, 1]) # Extension curve over FQ**12; same b value as over FQ b12 = FQ12([3] + [0] * 11) # Generator for curve over FQ G1 = (FQ(1), FQ(2)) # Generator for twisted curve over FQ2 G2 = ( FQ2( [ 10857046999023057135944570762232829481370756359578518086990519993285655852781, # noqa: E501 11559732032986387107991004021392285783925812861821192530917403151452391805634, # noqa: E501 ] ), FQ2( [ 8495653923123431417604973247489272438418190587263600148770280649306958101930, # noqa: E501 4082367875863433681332203403145435568316851327593401208105741076214120093531, # noqa: E501 ] ), ) # Point at infinity over FQ Z1 = None # Point at infinity for twisted curve over FQ2 Z2 = None # Check if a point is the point at infinity def is_inf(pt: GeneralPoint[Field]) -> bool: return pt is None # Check that a point is on the curve defined by y**2 == x**3 + b def is_on_curve(pt: Point2D[Field], b: Field) -> bool: if is_inf(pt) or pt is None: return True x, y = pt return y**2 - x**3 == b if not is_on_curve(G1, b): raise ValueError("G1 is not on the curve") if not is_on_curve(G2, b2): raise ValueError("G2 is not on the curve") # Elliptic curve doubling def double(pt: Point2D[Field]) -> Point2D[Field]: if is_inf(pt) or pt is None: return pt x, y = pt m = 3 * x**2 / (2 * y) newx = m**2 - 2 * x newy = -m * newx + m * x - y return (newx, newy) # Elliptic curve addition def add(p1: Point2D[Field], p2: Point2D[Field]) -> Point2D[Field]: if p1 is None or p2 is None: return p1 if p2 is None else p2 x1, y1 = p1 x2, y2 = p2 if x2 == x1 and y2 == y1: return double(p1) elif x2 == x1: return None else: m = (y2 - y1) / (x2 - x1) newx = m**2 - x1 - x2 newy = -m * newx + m * x1 - y1 if not newy == (-m * newx + m * x2 - y2): raise ValueError("Point addition is incorrect") return (newx, newy) # Elliptic curve point multiplication def multiply(pt: Point2D[Field], n: int) -> Point2D[Field]: if n == 0: return None elif n == 1: return pt elif not n % 2: return multiply(double(pt), n // 2) else: return add(multiply(double(pt), int(n // 2)), pt) def eq(p1: GeneralPoint[Field], p2: GeneralPoint[Field]) -> bool: return p1 == p2 # "Twist" a point in E(FQ2) into a point in E(FQ12) w = FQ12([0, 1] + [0] * 10) # Convert P => -P def neg(pt: Point2D[Field]) -> Point2D[Field]: if pt is None: return None x, y = pt return (x, -y) def twist(pt: Point2D[FQP]) -> Point2D[FQ12]: if pt is None: return None _x, _y = pt # Field isomorphism from Z[p] / x**2 to Z[p] / x**2 - 18*x + 82 xcoeffs = [_x.coeffs[0] - _x.coeffs[1] * 9, _x.coeffs[1]] ycoeffs = [_y.coeffs[0] - _y.coeffs[1] * 9, _y.coeffs[1]] # Isomorphism into subfield of Z[p] / w**12 - 18 * w**6 + 82, # where w**6 = x nx = FQ12([int(xcoeffs[0])] + [0] * 5 + [int(xcoeffs[1])] + [0] * 5) ny = FQ12([int(ycoeffs[0])] + [0] * 5 + [int(ycoeffs[1])] + [0] * 5) # Divide x coord by w**2 and y coord by w**3 return (nx * w**2, ny * w**3) G12 = twist(G2) # Check that the twist creates a point that is on the curve if not is_on_curve(G12, b12): raise ValueError("Twist creates a point not on curve") py_ecc-8.0.0/py_ecc/bn128/bn128_pairing.py000066400000000000000000000063521477723207700200200ustar00rootroot00000000000000from py_ecc.fields import ( bn128_FQ as FQ, bn128_FQ2 as FQ2, bn128_FQ12 as FQ12, ) from py_ecc.fields.field_properties import ( field_properties, ) from py_ecc.typing import ( Field, Point2D, ) from .bn128_curve import ( G1, add, b, b2, curve_order, double, is_on_curve, multiply, twist, ) field_modulus = field_properties["bn128"]["field_modulus"] ate_loop_count = 29793968203157093288 log_ate_loop_count = 63 # Create a function representing the line between P1 and P2, # and evaluate it at T def linefunc(P1: Point2D[Field], P2: Point2D[Field], T: Point2D[Field]) -> Field: if P1 is None or P2 is None or T is None: # No points-at-infinity allowed, sorry raise ValueError("Invalid input - no points-at-infinity allowed") x1, y1 = P1 x2, y2 = P2 xt, yt = T if x1 != x2: m = (y2 - y1) / (x2 - x1) return m * (xt - x1) - (yt - y1) elif y1 == y2: m = 3 * x1**2 / (2 * y1) return m * (xt - x1) - (yt - y1) else: return xt - x1 def cast_point_to_fq12(pt: Point2D[FQ]) -> Point2D[FQ12]: if pt is None: return None x, y = pt fq12_point = (FQ12([x.n] + [0] * 11), FQ12([y.n] + [0] * 11)) return fq12_point # Check consistency of the "line function" one, two, three = G1, double(G1), multiply(G1, 3) negone, negtwo, negthree = ( multiply(G1, curve_order - 1), multiply(G1, curve_order - 2), multiply(G1, curve_order - 3), ) conditions = [ linefunc(one, two, one) == FQ(0), linefunc(one, two, two) == FQ(0), linefunc(one, two, three) != FQ(0), linefunc(one, two, negthree) == FQ(0), linefunc(one, negone, one) == FQ(0), linefunc(one, negone, negone) == FQ(0), linefunc(one, negone, two) != FQ(0), linefunc(one, one, one) == FQ(0), linefunc(one, one, two) != FQ(0), linefunc(one, one, negtwo) == FQ(0), ] if not all(conditions): raise ValueError("Line function is inconsistent") # Main miller loop def miller_loop(Q: Point2D[FQ12], P: Point2D[FQ12]) -> FQ12: if Q is None or P is None: return FQ12.one() R: Point2D[FQ12] = Q f = FQ12.one() for i in range(log_ate_loop_count, -1, -1): f = f * f * linefunc(R, R, P) R = double(R) if ate_loop_count & (2**i): f = f * linefunc(R, Q, P) R = add(R, Q) # assert R == multiply(Q, ate_loop_count) Q1 = (Q[0] ** field_modulus, Q[1] ** field_modulus) # assert is_on_curve(Q1, b12) nQ2 = (Q1[0] ** field_modulus, -Q1[1] ** field_modulus) # assert is_on_curve(nQ2, b12) f = f * linefunc(R, Q1, P) R = add(R, Q1) f = f * linefunc(R, nQ2, P) # R = add(R, nQ2) This line is in many specifications but technically does nothing return f ** ((field_modulus**12 - 1) // curve_order) # Pairing computation def pairing(Q: Point2D[FQ2], P: Point2D[FQ]) -> FQ12: if not is_on_curve(Q, b2): raise ValueError("Invalid input - point Q is not on the correct curve") if not is_on_curve(P, b): raise ValueError("Invalid input - point P is not on the correct curves") return miller_loop(twist(Q), cast_point_to_fq12(P)) def final_exponentiate(p: Field) -> Field: return p ** ((field_modulus**12 - 1) // curve_order) py_ecc-8.0.0/py_ecc/fields/000077500000000000000000000000001477723207700155115ustar00rootroot00000000000000py_ecc-8.0.0/py_ecc/fields/__init__.py000066400000000000000000000053201477723207700176220ustar00rootroot00000000000000from .field_elements import ( FQ, FQ2, FQ12, FQP, ) from .field_properties import ( field_properties, ) from .optimized_field_elements import ( FQ as optimized_FQ, FQ2 as optimized_FQ2, FQ12 as optimized_FQ12, FQP as optimized_FQP, ) # # bn128 curve fields # class bn128_FQ(FQ): field_modulus = field_properties["bn128"]["field_modulus"] class bn128_FQP(FQP): field_modulus = field_properties["bn128"]["field_modulus"] class bn128_FQ2(FQ2, bn128_FQP): field_modulus = field_properties["bn128"]["field_modulus"] FQ2_MODULUS_COEFFS = field_properties["bn128"]["fq2_modulus_coeffs"] class bn128_FQ12(FQ12, bn128_FQP): field_modulus = field_properties["bn128"]["field_modulus"] FQ12_MODULUS_COEFFS = field_properties["bn128"]["fq12_modulus_coeffs"] # # bls12_381 curve fields # class bls12_381_FQ(FQ): field_modulus = field_properties["bls12_381"]["field_modulus"] class bls12_381_FQP(FQP): field_modulus = field_properties["bls12_381"]["field_modulus"] class bls12_381_FQ2(FQ2, bls12_381_FQP): field_modulus = field_properties["bls12_381"]["field_modulus"] FQ2_MODULUS_COEFFS = field_properties["bls12_381"]["fq2_modulus_coeffs"] class bls12_381_FQ12(FQ12, bls12_381_FQP): field_modulus = field_properties["bls12_381"]["field_modulus"] FQ12_MODULUS_COEFFS = field_properties["bls12_381"]["fq12_modulus_coeffs"] # # optimized_bn128 curve fields # class optimized_bn128_FQ(optimized_FQ): field_modulus = field_properties["bn128"]["field_modulus"] class optimized_bn128_FQP(optimized_FQP): field_modulus = field_properties["bn128"]["field_modulus"] class optimized_bn128_FQ2(optimized_FQ2, optimized_bn128_FQP): field_modulus = field_properties["bn128"]["field_modulus"] FQ2_MODULUS_COEFFS = field_properties["bn128"]["fq2_modulus_coeffs"] class optimized_bn128_FQ12(optimized_FQ12, optimized_bn128_FQP): field_modulus = field_properties["bn128"]["field_modulus"] FQ12_MODULUS_COEFFS = field_properties["bn128"]["fq12_modulus_coeffs"] # # optimized_bls12_381 curve fields # class optimized_bls12_381_FQ(optimized_FQ): field_modulus = field_properties["bls12_381"]["field_modulus"] class optimized_bls12_381_FQP(optimized_FQP): field_modulus = field_properties["bls12_381"]["field_modulus"] class optimized_bls12_381_FQ2(optimized_FQ2, optimized_bls12_381_FQP): field_modulus = field_properties["bls12_381"]["field_modulus"] FQ2_MODULUS_COEFFS = field_properties["bls12_381"]["fq2_modulus_coeffs"] class optimized_bls12_381_FQ12(optimized_FQ12, optimized_bls12_381_FQP): field_modulus = field_properties["bls12_381"]["field_modulus"] FQ12_MODULUS_COEFFS = field_properties["bls12_381"]["fq12_modulus_coeffs"] py_ecc-8.0.0/py_ecc/fields/field_elements.py000066400000000000000000000303511477723207700210440ustar00rootroot00000000000000from functools import ( total_ordering, ) from typing import ( TYPE_CHECKING, Any, List, Sequence, Tuple, Type, TypeVar, Union, cast, ) from py_ecc.utils import ( deg, poly_rounded_div, prime_field_inv, ) if TYPE_CHECKING: from py_ecc.typing import ( FQ2_modulus_coeffs_type, FQ12_modulus_coeffs_type, ) # These new TypeVars are needed because these classes are kind of base classes and # we need the output type to correspond to the type of the inherited class T_FQ = TypeVar("T_FQ", bound="FQ") T_FQP = TypeVar("T_FQP", bound="FQP") T_FQ2 = TypeVar("T_FQ2", bound="FQ2") T_FQ12 = TypeVar("T_FQ12", bound="FQ12") IntOrFQ = Union[int, "FQ"] @total_ordering class FQ: """ A class for field elements in FQ. Wrap a number in this class, and it becomes a field element. """ n: int field_modulus: int def __init__(self: T_FQ, val: IntOrFQ) -> None: if not hasattr(self, "field_modulus"): raise AttributeError("Field Modulus hasn't been specified") if isinstance(val, FQ): self.n = val.n elif isinstance(val, int): self.n = val % self.field_modulus else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(val)}" ) def __add__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)((self.n + on) % self.field_modulus) def __mul__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)((self.n * on) % self.field_modulus) def __rmul__(self: T_FQ, other: IntOrFQ) -> T_FQ: return self * other def __radd__(self: T_FQ, other: IntOrFQ) -> T_FQ: return self + other def __rsub__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)((on - self.n) % self.field_modulus) def __sub__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)((self.n - on) % self.field_modulus) def __div__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)( self.n * prime_field_inv(on, self.field_modulus) % self.field_modulus ) def __truediv__(self: T_FQ, other: IntOrFQ) -> T_FQ: return self.__div__(other) def __rdiv__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)( prime_field_inv(self.n, self.field_modulus) * on % self.field_modulus ) def __rtruediv__(self: T_FQ, other: IntOrFQ) -> T_FQ: return self.__rdiv__(other) def __pow__(self: T_FQ, other: int) -> T_FQ: if other == 0: return type(self)(1) elif other == 1: return type(self)(self.n) elif other % 2 == 0: return (self * self) ** (other // 2) else: return ((self * self) ** int(other // 2)) * self def __eq__(self: T_FQ, other: Any) -> bool: if isinstance(other, FQ): return self.n == other.n elif isinstance(other, int): return self.n == other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) def __ne__(self: T_FQ, other: Any) -> bool: return not self == other def __neg__(self: T_FQ) -> T_FQ: return type(self)(-self.n) def __repr__(self: T_FQ) -> str: return repr(self.n) def __int__(self: T_FQ) -> int: return self.n def __lt__(self: T_FQ, other: IntOrFQ) -> bool: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return self.n < on @classmethod def one(cls: Type[T_FQ]) -> T_FQ: return cls(1) @classmethod def zero(cls: Type[T_FQ]) -> T_FQ: return cls(0) int_types_or_FQ = (int, FQ) class FQP: """ A class for elements in polynomial extension fields """ degree: int = 0 field_modulus: int def __init__( self, coeffs: Sequence[IntOrFQ], modulus_coeffs: Sequence[IntOrFQ] = () ) -> None: if not hasattr(self, "field_modulus"): raise AttributeError("Field Modulus hasn't been specified") if len(coeffs) != len(modulus_coeffs): raise Exception("coeffs and modulus_coeffs aren't of the same length") # Encoding all coefficients in the corresponding type FQ self.FQP_corresponding_FQ_class = type( "FQP_corresponding_FQ_class", (FQ,), {"field_modulus": self.field_modulus} ) self.coeffs: Tuple[IntOrFQ, ...] = tuple( self.FQP_corresponding_FQ_class(c) for c in coeffs ) # The coefficients of the modulus, without the leading [1] self.modulus_coeffs: Tuple[IntOrFQ, ...] = tuple(modulus_coeffs) # The degree of the extension field self.degree = len(self.modulus_coeffs) def __add__(self: T_FQP, other: T_FQP) -> T_FQP: if not isinstance(other, type(self)): raise TypeError( f"Expected an FQP object, but got object of type {type(other)}" ) return type(self)([x + y for x, y in zip(self.coeffs, other.coeffs)]) def __sub__(self: T_FQP, other: T_FQP) -> T_FQP: if not isinstance(other, type(self)): raise TypeError( f"Expected an FQP object, but got object of type {type(other)}" ) return type(self)([x - y for x, y in zip(self.coeffs, other.coeffs)]) def __mul__(self: T_FQP, other: Union[int, T_FQ, T_FQP]) -> T_FQP: if isinstance(other, int_types_or_FQ): return type(self)([c * other for c in self.coeffs]) elif isinstance(other, FQP): b = [self.FQP_corresponding_FQ_class(0) for i in range(self.degree * 2 - 1)] for i in range(self.degree): for j in range(self.degree): b[i + j] += self.coeffs[i] * other.coeffs[j] while len(b) > self.degree: exp, top = len(b) - self.degree - 1, b.pop() for i in range(self.degree): b[exp + i] -= top * self.FQP_corresponding_FQ_class( self.modulus_coeffs[i] ) return type(self)(b) else: raise TypeError( "Expected an int or FQ object or FQP object, " f"but got object of type {type(other)}" ) def __rmul__(self: T_FQP, other: Union[int, T_FQ, T_FQP]) -> T_FQP: return self * other def __div__(self: T_FQP, other: Union[int, T_FQ, T_FQP]) -> T_FQP: if isinstance(other, int_types_or_FQ): return type(self)( [ c / other if isinstance(c, FQ) else c // int(other) for c in self.coeffs ] ) elif isinstance(other, type(self)): return self * other.inv() else: raise TypeError( "Expected an int or FQ object or FQP object, " f"but got object of type {type(other)}" ) def __truediv__(self: T_FQP, other: Union[int, T_FQ, T_FQP]) -> T_FQP: return self.__div__(other) def __pow__(self: T_FQP, other: int) -> T_FQP: if other == 0: return type(self)([1] + [0] * (self.degree - 1)) elif other == 1: return type(self)(self.coeffs) elif other % 2 == 0: return (self * self) ** (other // 2) else: return ((self * self) ** int(other // 2)) * self # Extended euclidean algorithm used to find the modular inverse def inv(self: T_FQP) -> T_FQP: lm, hm = ( [1] + [0] * self.degree, [0] * (self.degree + 1), ) low, high = ( cast(List[IntOrFQ], list(self.coeffs + (0,))), cast(List[IntOrFQ], list(self.modulus_coeffs + (1,))), ) while deg(low): r = cast(List[IntOrFQ], list(poly_rounded_div(high, low))) r += [0] * (self.degree + 1 - len(r)) nm = [x for x in hm] new = [x for x in high] if len(lm) != self.degree + 1: raise Exception(f"Length of lm is not {self.degree + 1}") elif len(hm) != self.degree + 1: raise Exception(f"Length of hm is not {self.degree + 1}") elif len(nm) != self.degree + 1: raise Exception(f"Length of nm is not {self.degree + 1}") elif len(low) != self.degree + 1: raise Exception(f"Length of low is not {self.degree + 1}") elif len(high) != self.degree + 1: raise Exception(f"Length of high is not {self.degree + 1}") elif len(new) != self.degree + 1: raise Exception(f"Length of new is not {self.degree + 1}") for i in range(self.degree + 1): for j in range(self.degree + 1 - i): nm[i + j] -= lm[i] * int(r[j]) new[i + j] -= low[i] * int(r[j]) lm, low, hm, high = nm, new, lm, low return type(self)(lm[: self.degree]) / int(low[0]) def __repr__(self: T_FQP) -> str: return repr(self.coeffs) def __eq__(self: T_FQP, other: Any) -> bool: if not isinstance(other, type(self)): raise TypeError( f"Expected an FQP object, but got object of type {type(other)}" ) for c1, c2 in zip(self.coeffs, other.coeffs): if c1 != c2: return False return True def __ne__(self: T_FQP, other: Any) -> bool: return not self == other def __neg__(self: T_FQP) -> T_FQP: return type(self)([-c for c in self.coeffs]) @classmethod def one(cls: Type[T_FQP]) -> T_FQP: return cls([1] + [0] * (cls.degree - 1)) @classmethod def zero(cls: Type[T_FQP]) -> T_FQP: return cls([0] * cls.degree) class FQ2(FQP): """ The quadratic extension field """ degree: int = 2 FQ2_MODULUS_COEFFS: "FQ2_modulus_coeffs_type" def __init__(self, coeffs: Sequence[IntOrFQ]) -> None: if not hasattr(self, "FQ2_MODULUS_COEFFS"): raise AttributeError("FQ2 Modulus Coeffs haven't been specified") super().__init__(coeffs, self.FQ2_MODULUS_COEFFS) class FQ12(FQP): """ The 12th-degree extension field """ degree: int = 12 FQ12_MODULUS_COEFFS: "FQ12_modulus_coeffs_type" def __init__(self, coeffs: Sequence[IntOrFQ]) -> None: if not hasattr(self, "FQ12_MODULUS_COEFFS"): raise AttributeError("FQ12 Modulus Coeffs haven't been specified") super().__init__(coeffs, self.FQ12_MODULUS_COEFFS) py_ecc-8.0.0/py_ecc/fields/field_properties.py000066400000000000000000000020061477723207700214200ustar00rootroot00000000000000from typing import ( TYPE_CHECKING, Dict, TypedDict, ) if TYPE_CHECKING: from py_ecc.typing import ( FQ2_modulus_coeffs_type, FQ12_modulus_coeffs_type, ) class Curve_Field_Properties(TypedDict): field_modulus: int fq2_modulus_coeffs: "FQ2_modulus_coeffs_type" fq12_modulus_coeffs: "FQ12_modulus_coeffs_type" Field_Properties = Dict[str, Curve_Field_Properties] field_properties: Field_Properties = { "bn128": { "field_modulus": 21888242871839275222246405745257275088696311157297823662689037894645226208583, # noqa: E501 "fq2_modulus_coeffs": (1, 0), "fq12_modulus_coeffs": (82, 0, 0, 0, 0, 0, -18, 0, 0, 0, 0, 0), # Implied + [1] }, "bls12_381": { "field_modulus": 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787, # noqa: E501 "fq2_modulus_coeffs": (1, 0), "fq12_modulus_coeffs": (2, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0), # Implied + [1] }, } py_ecc-8.0.0/py_ecc/fields/optimized_field_elements.py000066400000000000000000000344431477723207700231360ustar00rootroot00000000000000from functools import ( cached_property, total_ordering, ) from typing import ( TYPE_CHECKING, Any, List, Sequence, Tuple, Type, TypeVar, Union, cast, ) from py_ecc.utils import ( deg, prime_field_inv, ) if TYPE_CHECKING: from py_ecc.typing import ( FQ2_modulus_coeffs_type, FQ12_modulus_coeffs_type, ) # These new TypeVars are needed because these classes are kind of base classes and # we need the output type to correspond to the type of the inherited class T_FQ = TypeVar("T_FQ", bound="FQ") T_FQP = TypeVar("T_FQP", bound="FQP") T_FQ2 = TypeVar("T_FQ2", bound="FQ2") T_FQ12 = TypeVar("T_FQ12", bound="FQ12") IntOrFQ = Union[int, "FQ"] def mod_int(x: IntOrFQ, n: int) -> int: if isinstance(x, int): return x % n elif isinstance(x, FQ): return x.n % n else: raise TypeError(f"Only int and T_FQ types are accepted: got {type(x)}") @total_ordering class FQ: """ A class for field elements in FQ. Wrap a number in this class, and it becomes a field element. """ n: int field_modulus: int def __init__(self: T_FQ, val: IntOrFQ) -> None: if not hasattr(self, "field_modulus"): raise AttributeError("Field Modulus hasn't been specified") if isinstance(val, FQ): self.n = val.n elif isinstance(val, int): self.n = val % self.field_modulus else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(val)}" ) def __add__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)((self.n + on) % self.field_modulus) def __mul__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)((self.n * on) % self.field_modulus) def __rmul__(self: T_FQ, other: IntOrFQ) -> T_FQ: return self * other def __radd__(self: T_FQ, other: IntOrFQ) -> T_FQ: return self + other def __rsub__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)((on - self.n) % self.field_modulus) def __sub__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)((self.n - on) % self.field_modulus) def __mod__(self: T_FQ, other: IntOrFQ) -> T_FQ: raise NotImplementedError("Modulo Operation not yet supported by fields") def __div__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)( self.n * prime_field_inv(on, self.field_modulus) % self.field_modulus ) def __truediv__(self: T_FQ, other: IntOrFQ) -> T_FQ: return self.__div__(other) def __rdiv__(self: T_FQ, other: IntOrFQ) -> T_FQ: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return type(self)( prime_field_inv(self.n, self.field_modulus) * on % self.field_modulus ) def __rtruediv__(self: T_FQ, other: IntOrFQ) -> T_FQ: return self.__rdiv__(other) def __pow__(self: T_FQ, other: int) -> T_FQ: if other == 0: return type(self)(1) elif other == 1: return type(self)(self.n) elif other % 2 == 0: return (self * self) ** (other // 2) else: return ((self * self) ** int(other // 2)) * self def __eq__(self: T_FQ, other: Any) -> bool: if isinstance(other, FQ): return self.n == other.n elif isinstance(other, int): return self.n == other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) def __ne__(self: T_FQ, other: Any) -> bool: return not self == other def __neg__(self: T_FQ) -> T_FQ: return type(self)(-self.n) def __repr__(self: T_FQ) -> str: return repr(self.n) def __int__(self: T_FQ) -> int: return self.n def __lt__(self: T_FQ, other: IntOrFQ) -> bool: if isinstance(other, FQ): on = other.n elif isinstance(other, int): on = other else: raise TypeError( f"Expected an int or FQ object, but got object of type {type(other)}" ) return self.n < on @cached_property def sgn0(self: T_FQ) -> int: """ Calculates the sign of a value. sgn0(x) = 1 when x is 'negative'; otherwise, sg0(x) = 0 Note this is an optimized variant for m = 1 Defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-4.1 """ return self.n % 2 @classmethod def one(cls: Type[T_FQ]) -> T_FQ: return cls(1) @classmethod def zero(cls: Type[T_FQ]) -> T_FQ: return cls(0) class FQP: """ A class for elements in polynomial extension fields """ degree: int = 0 field_modulus: int mc_tuples: List[Tuple[int, int]] def __init__( self, coeffs: Sequence[IntOrFQ], modulus_coeffs: Sequence[IntOrFQ] = () ) -> None: if not hasattr(self, "field_modulus"): raise AttributeError("Field Modulus hasn't been specified") if len(coeffs) != len(modulus_coeffs): raise Exception("coeffs and modulus_coeffs aren't of the same length") # Not converting coeffs to FQ or explicitly making them integers # for performance reasons if isinstance(coeffs[0], int): self.coeffs: Tuple[IntOrFQ, ...] = tuple( coeff % self.field_modulus for coeff in coeffs ) else: self.coeffs = tuple(coeffs) # The coefficients of the modulus, without the leading [1] self.modulus_coeffs: Tuple[IntOrFQ, ...] = tuple(modulus_coeffs) # The degree of the extension field self.degree = len(self.modulus_coeffs) def __add__(self: T_FQP, other: T_FQP) -> T_FQP: if not isinstance(other, type(self)): raise TypeError( f"Expected an FQP object, but got object of type {type(other)}" ) return type(self)( [int(x + y) % self.field_modulus for x, y in zip(self.coeffs, other.coeffs)] ) def __sub__(self: T_FQP, other: T_FQP) -> T_FQP: if not isinstance(other, type(self)): raise TypeError( f"Expected an FQP object, but got object of type {type(other)}" ) return type(self)( [int(x - y) % self.field_modulus for x, y in zip(self.coeffs, other.coeffs)] ) def __mod__(self: T_FQP, other: Union[int, T_FQP]) -> T_FQP: raise NotImplementedError("Modulo Operation not yet supported by fields") def __mul__(self: T_FQP, other: Union[int, T_FQP]) -> T_FQP: if isinstance(other, int): return type(self)( [int(c) * other % self.field_modulus for c in self.coeffs] ) elif isinstance(other, FQP): b = [0] * (self.degree * 2 - 1) inner_enumerate = list(enumerate(other.coeffs)) for i, eli in enumerate(self.coeffs): for j, elj in inner_enumerate: b[i + j] += int(eli * elj) # MID = len(self.coeffs) // 2 for exp in range(self.degree - 2, -1, -1): top = b.pop() for i, c in self.mc_tuples: b[exp + i] -= top * c return type(self)([x % self.field_modulus for x in b]) else: raise TypeError( f"Expected an int or FQP object, but got object of type {type(other)}" ) def __rmul__(self: T_FQP, other: Union[int, T_FQP]) -> T_FQP: return self * other def __div__(self: T_FQP, other: Union[int, T_FQP]) -> T_FQP: if isinstance(other, int): return type(self)( [ int(c) * prime_field_inv(other, self.field_modulus) % self.field_modulus for c in self.coeffs ] ) elif isinstance(other, type(self)): return self * other.inv() else: raise TypeError( f"Expected an int or FQP object, but got object of type {type(other)}" ) def __truediv__(self: T_FQP, other: Union[int, T_FQP]) -> T_FQP: return self.__div__(other) def __pow__(self: T_FQP, other: int) -> T_FQP: o = type(self)([1] + [0] * (self.degree - 1)) t = self while other > 0: if other & 1: o = o * t other >>= 1 t = t * t return o def optimized_poly_rounded_div( self, a: Sequence[IntOrFQ], b: Sequence[IntOrFQ] ) -> Sequence[IntOrFQ]: dega = deg(a) degb = deg(b) temp = [x for x in a] o = [0 for x in a] for i in range(dega - degb, -1, -1): o[i] = int( o[i] + temp[degb + i] * prime_field_inv(int(b[degb]), self.field_modulus) ) for c in range(degb + 1): temp[c + i] = temp[c + i] - o[c] return [x % self.field_modulus for x in o[: deg(o) + 1]] # Extended euclidean algorithm used to find the modular inverse def inv(self: T_FQP) -> T_FQP: lm, hm = [1] + [0] * self.degree, [0] * (self.degree + 1) low, high = ( cast(List[IntOrFQ], list(self.coeffs + (0,))), cast(List[IntOrFQ], list(self.modulus_coeffs + (1,))), ) while deg(low): r = cast(List[IntOrFQ], list(self.optimized_poly_rounded_div(high, low))) r += [0] * (self.degree + 1 - len(r)) nm = [x for x in hm] new = [x for x in high] # assert len(lm) == len(hm) == len(low) == len(high) == len(nm) == len(new) == self.degree + 1 # noqa: E501 for i in range(self.degree + 1): for j in range(self.degree + 1 - i): nm[i + j] -= lm[i] * int(r[j]) new[i + j] -= low[i] * r[j] nm = [x % self.field_modulus for x in nm] new = [int(x) % self.field_modulus for x in new] lm, low, hm, high = nm, new, lm, low return type(self)(lm[: self.degree]) / int(low[0]) def __repr__(self) -> str: return repr(self.coeffs) def __eq__(self: T_FQP, other: Any) -> bool: if not isinstance(other, type(self)): raise TypeError( f"Expected an FQP object, but got object of type {type(other)}" ) for c1, c2 in zip(self.coeffs, other.coeffs): if c1 != c2: return False return True def __ne__(self: T_FQP, other: Any) -> bool: return not self == other def __neg__(self: T_FQP) -> T_FQP: return type(self)([-c for c in self.coeffs]) @cached_property def sgn0(self: T_FQP) -> int: """ Calculates the sign of a value. sgn0(x) = 1 when x is 'negative'; otherwise, sg0(x) = 0 Defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-4.1 """ sign = 0 zero = 1 for x_i in self.coeffs: sign_i = mod_int(x_i, 2) zero_i = x_i == 0 sign = sign or (zero and sign_i) zero = zero and zero_i return sign @classmethod def one(cls: Type[T_FQP]) -> T_FQP: return cls([1] + [0] * (cls.degree - 1)) @classmethod def zero(cls: Type[T_FQP]) -> T_FQP: return cls([0] * cls.degree) class FQ2(FQP): """ The quadratic extension field """ degree: int = 2 FQ2_MODULUS_COEFFS: "FQ2_modulus_coeffs_type" def __init__(self, coeffs: Sequence[IntOrFQ]) -> None: if not hasattr(self, "FQ2_MODULUS_COEFFS"): raise AttributeError("FQ2 Modulus Coeffs haven't been specified") self.mc_tuples = [(i, c) for i, c in enumerate(self.FQ2_MODULUS_COEFFS) if c] super().__init__(coeffs, self.FQ2_MODULUS_COEFFS) @cached_property def sgn0(self: T_FQP) -> int: """ Calculates the sign of a value. sgn0(x) = 1 when x is 'negative'; otherwise, sg0(x) = 0 Note this is an optimized variant for m = 2 Defined here: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-4.1 """ x_0, x_1 = self.coeffs sign_0 = mod_int(x_0, 2) zero_0 = x_0 == 0 sign_1 = mod_int(x_1, 2) return sign_0 or (zero_0 and sign_1) class FQ12(FQP): """ The 12th-degree extension field """ degree: int = 12 FQ12_MODULUS_COEFFS: "FQ12_modulus_coeffs_type" def __init__(self, coeffs: Sequence[IntOrFQ]) -> None: if not hasattr(self, "FQ12_MODULUS_COEFFS"): raise AttributeError("FQ12 Modulus Coeffs haven't been specified") self.mc_tuples = [(i, c) for i, c in enumerate(self.FQ12_MODULUS_COEFFS) if c] super().__init__(coeffs, self.FQ12_MODULUS_COEFFS) py_ecc-8.0.0/py_ecc/optimized_bls12_381/000077500000000000000000000000001477723207700176455ustar00rootroot00000000000000py_ecc-8.0.0/py_ecc/optimized_bls12_381/__init__.py000066400000000000000000000012751477723207700217630ustar00rootroot00000000000000from py_ecc.fields import ( optimized_bls12_381_FQ as FQ, optimized_bls12_381_FQ2 as FQ2, optimized_bls12_381_FQ12 as FQ12, optimized_bls12_381_FQP as FQP, ) from .optimized_clear_cofactor import ( multiply_clear_cofactor_G1, multiply_clear_cofactor_G2, ) from .optimized_curve import ( G1, G2, G12, Z1, Z2, add, b, b2, b12, curve_order, double, eq, field_modulus, is_inf, is_on_curve, multiply, neg, normalize, twist, ) from .optimized_pairing import ( final_exponentiate, pairing, ) from .optimized_swu import ( iso_map_G1, iso_map_G2, optimized_swu_G1, optimized_swu_G2, ) py_ecc-8.0.0/py_ecc/optimized_bls12_381/constants.py000066400000000000000000000313571477723207700222440ustar00rootroot00000000000000from py_ecc.fields import ( optimized_bls12_381_FQ as FQ, optimized_bls12_381_FQ2 as FQ2, ) # # Ciphersuite BLS12381G2-SHA256-SSWU-RO parameters # ISO_3_A = FQ2([0, 240]) ISO_3_B = FQ2([1012, 1012]) ISO_3_Z = FQ2([-2, -1]) P_MINUS_9_DIV_16 = 1001205140483106588246484290269935788605945006208159541241399033561623546780709821462541004956387089373434649096260670658193992783731681621012512651314777238193313314641988297376025498093520728838658813979860931248214124593092835 # noqa: E501 EV1 = 1015919005498129635886032702454337503112659152043614931979881174103627376789972962005013361970813319613593700736144 # noqa: E501 EV2 = 1244231661155348484223428017511856347821538750986231559855759541903146219579071812422210818684355842447591283616181 # noqa: E501 EV3 = 1646015993121829755895883253076789309308090876275172350194834453434199515639474951814226234213676147507404483718679 # noqa: E501 EV4 = 1637752706019426886789797193293828301565549384974986623510918743054325021588194075665960171838131772227885159387073 # noqa: E501 ETAS = [FQ2([EV1, EV2]), FQ2([-EV2, EV1]), FQ2([EV3, EV4]), FQ2([-EV4, EV3])] RV1 = 1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257 # noqa: E501 POSITIVE_EIGHTH_ROOTS_OF_UNITY = ( FQ2([1, 0]), FQ2([0, 1]), FQ2([RV1, RV1]), FQ2([RV1, -RV1]), ) # X Numerator ISO_3_K_1_0_VAL = 889424345604814976315064405719089812568196182208668418962679585805340366775741747653930584250892369786198727235542 # noqa: E501 ISO_3_K_1_0 = FQ2([ISO_3_K_1_0_VAL, ISO_3_K_1_0_VAL]) # noqa: E501 ISO_3_K_1_1 = FQ2( [ 0, 2668273036814444928945193217157269437704588546626005256888038757416021100327225242961791752752677109358596181706522, # noqa: E501 ] ) ISO_3_K_1_2 = FQ2( [ 2668273036814444928945193217157269437704588546626005256888038757416021100327225242961791752752677109358596181706526, # noqa: E501 1334136518407222464472596608578634718852294273313002628444019378708010550163612621480895876376338554679298090853261, # noqa: E501 ] ) ISO_3_K_1_3 = FQ2( [ 3557697382419259905260257622876359250272784728834673675850718343221361467102966990615722337003569479144794908942033, # noqa: E501 0, ] ) ISO_3_X_NUMERATOR = (ISO_3_K_1_0, ISO_3_K_1_1, ISO_3_K_1_2, ISO_3_K_1_3) # X Denominator ISO_3_K_2_0 = FQ2( [ 0, 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559715, # noqa: E501 ] ) ISO_3_K_2_1 = FQ2( [ 12, 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559775, # noqa: E501 ] ) ISO_3_K_2_2 = FQ2.one() ISO_3_K_2_3 = FQ2.zero() ISO_3_X_DENOMINATOR = (ISO_3_K_2_0, ISO_3_K_2_1, ISO_3_K_2_2, ISO_3_K_2_3) # Y Numerator ISO_3_K_3_0_VAL = 3261222600550988246488569487636662646083386001431784202863158481286248011511053074731078808919938689216061999863558 # noqa: E501 ISO_3_K_3_0 = FQ2([ISO_3_K_3_0_VAL, ISO_3_K_3_0_VAL]) # noqa: E501 ISO_3_K_3_1 = FQ2( [ 0, 889424345604814976315064405719089812568196182208668418962679585805340366775741747653930584250892369786198727235518, # noqa: E501 ] ) ISO_3_K_3_2 = FQ2( [ 2668273036814444928945193217157269437704588546626005256888038757416021100327225242961791752752677109358596181706524, # noqa: E501 1334136518407222464472596608578634718852294273313002628444019378708010550163612621480895876376338554679298090853263, # noqa: E501 ] ) ISO_3_K_3_3 = FQ2( [ 2816510427748580758331037284777117739799287910327449993381818688383577828123182200904113516794492504322962636245776, # noqa: E501 0, ] ) ISO_3_Y_NUMERATOR = (ISO_3_K_3_0, ISO_3_K_3_1, ISO_3_K_3_2, ISO_3_K_3_3) # Y Denominator ISO_3_K_4_0_VAL = 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559355 # noqa: E501 ISO_3_K_4_0 = FQ2([ISO_3_K_4_0_VAL, ISO_3_K_4_0_VAL]) # noqa: E501 ISO_3_K_4_1 = FQ2( [ 0, 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559571, # noqa: E501 ] ) ISO_3_K_4_2 = FQ2( [ 18, 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559769, # noqa: E501 ] ) ISO_3_K_4_3 = FQ2.one() ISO_3_Y_DENOMINATOR = (ISO_3_K_4_0, ISO_3_K_4_1, ISO_3_K_4_2, ISO_3_K_4_3) ISO_3_MAP_COEFFICIENTS = ( ISO_3_X_NUMERATOR, ISO_3_X_DENOMINATOR, ISO_3_Y_NUMERATOR, ISO_3_Y_DENOMINATOR, ) # from https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-09#section-8.8.2 # noqa: E501 H_EFF_G2 = 209869847837335686905080341498658477663839067235703451875306851526599783796572738804459333109033834234622528588876978987822447936461846631641690358257586228683615991308971558879306463436166481 # noqa: E501 # G1 P_MINUS_3_DIV_4 = (FQ.field_modulus - 3) // 4 SQRT_MINUS_11_CUBED = FQ( 0x3D689D1E0E762CEF9F2BEC6130316806B4C80EDA6FC10CE77AE83EAB1EA8B8B8A407C9C6DB195E06F2DBEABC2BAEFF5 # noqa: E501 ) ISO_11_Z = FQ(11) ISO_11_A = FQ( 0x144698A3B8E9433D693A02C96D4982B0EA985383EE66A8D8E8981AEFD881AC98936F8DA0E0F97F5CF428082D584C1D # noqa: E501 ) ISO_11_B = FQ( 0x12E2908D11688030018B12E8753EEE3B2016C1F0F24F4070A0B9C14FCEF35EF55A23215A316CEAA5D1CC48E98E172BE0 # noqa: E501 ) ISO_11_X_NUMERATOR = ( FQ( 0x11A05F2B1E833340B809101DD99815856B303E88A2D7005FF2627B56CDB4E2C85610C2D5F2E62D6EAEAC1662734649B7 # noqa: E501 ), FQ( 0x17294ED3E943AB2F0588BAB22147A81C7C17E75B2F6A8417F565E33C70D1E86B4838F2A6F318C356E834EEF1B3CB83BB # noqa: E501 ), FQ( 0xD54005DB97678EC1D1048C5D10A9A1BCE032473295983E56878E501EC68E25C958C3E3D2A09729FE0179F9DAC9EDCB0 # noqa: E501 ), FQ( 0x1778E7166FCC6DB74E0609D307E55412D7F5E4656A8DBF25F1B33289F1B330835336E25CE3107193C5B388641D9B6861 # noqa: E501 ), FQ( 0xE99726A3199F4436642B4B3E4118E5499DB995A1257FB3F086EEB65982FAC18985A286F301E77C451154CE9AC8895D9 # noqa: E501 ), FQ( 0x1630C3250D7313FF01D1201BF7A74AB5DB3CB17DD952799B9ED3AB9097E68F90A0870D2DCAE73D19CD13C1C66F652983 # noqa: E501 ), FQ( 0xD6ED6553FE44D296A3726C38AE652BFB11586264F0F8CE19008E218F9C86B2A8DA25128C1052ECADDD7F225A139ED84 # noqa: E501 ), FQ( 0x17B81E7701ABDBE2E8743884D1117E53356DE5AB275B4DB1A682C62EF0F2753339B7C8F8C8F475AF9CCB5618E3F0C88E # noqa: E501 ), FQ( 0x80D3CF1F9A78FC47B90B33563BE990DC43B756CE79F5574A2C596C928C5D1DE4FA295F296B74E956D71986A8497E317 # noqa: E501 ), FQ( 0x169B1F8E1BCFA7C42E0C37515D138F22DD2ECB803A0C5C99676314BAF4BB1B7FA3190B2EDC0327797F241067BE390C9E # noqa: E501 ), FQ( 0x10321DA079CE07E272D8EC09D2565B0DFA7DCCDDE6787F96D50AF36003B14866F69B771F8C285DECCA67DF3F1605FB7B # noqa: E501 ), FQ( 0x6E08C248E260E70BD1E962381EDEE3D31D79D7E22C837BC23C0BF1BC24C6B68C24B1B80B64D391FA9C8BA2E8BA2D229 # noqa: E501 ), ) ISO_11_X_DENOMINATOR = ( FQ( 0x8CA8D548CFF19AE18B2E62F4BD3FA6F01D5EF4BA35B48BA9C9588617FC8AC62B558D681BE343DF8993CF9FA40D21B1C # noqa: E501 ), FQ( 0x12561A5DEB559C4348B4711298E536367041E8CA0CF0800C0126C2588C48BF5713DAA8846CB026E9E5C8276EC82B3BFF # noqa: E501 ), FQ( 0xB2962FE57A3225E8137E629BFF2991F6F89416F5A718CD1FCA64E00B11ACEACD6A3D0967C94FEDCFCC239BA5CB83E19 # noqa: E501 ), FQ( 0x3425581A58AE2FEC83AAFEF7C40EB545B08243F16B1655154CCA8ABC28D6FD04976D5243EECF5C4130DE8938DC62CD8 # noqa: E501 ), FQ( 0x13A8E162022914A80A6F1D5F43E7A07DFFDFC759A12062BB8D6B44E833B306DA9BD29BA81F35781D539D395B3532A21E # noqa: E501 ), FQ( 0xE7355F8E4E667B955390F7F0506C6E9395735E9CE9CAD4D0A43BCEF24B8982F7400D24BC4228F11C02DF9A29F6304A5 # noqa: E501 ), FQ( 0x772CAACF16936190F3E0C63E0596721570F5799AF53A1894E2E073062AEDE9CEA73B3538F0DE06CEC2574496EE84A3A # noqa: E501 ), FQ( 0x14A7AC2A9D64A8B230B3F5B074CF01996E7F63C21BCA68A81996E1CDF9822C580FA5B9489D11E2D311F7D99BBDCC5A5E # noqa: E501 ), FQ( 0xA10ECF6ADA54F825E920B3DAFC7A3CCE07F8D1D7161366B74100DA67F39883503826692ABBA43704776EC3A79A1D641 # noqa: E501 ), FQ( 0x95FC13AB9E92AD4476D6E3EB3A56680F682B4EE96F7D03776DF533978F31C1593174E4B4B7865002D6384D168ECDD0A # noqa: E501 ), FQ(1), ) ISO_11_Y_NUMERATOR = ( FQ( 0x90D97C81BA24EE0259D1F094980DCFA11AD138E48A869522B52AF6C956543D3CD0C7AEE9B3BA3C2BE9845719707BB33 # noqa: E501 ), FQ( 0x134996A104EE5811D51036D776FB46831223E96C254F383D0F906343EB67AD34D6C56711962FA8BFE097E75A2E41C696 # noqa: E501 ), FQ( 0xCC786BAA966E66F4A384C86A3B49942552E2D658A31CE2C344BE4B91400DA7D26D521628B00523B8DFE240C72DE1F6 # noqa: E501 ), FQ( 0x1F86376E8981C217898751AD8746757D42AA7B90EEB791C09E4A3EC03251CF9DE405ABA9EC61DECA6355C77B0E5F4CB # noqa: E501 ), FQ( 0x8CC03FDEFE0FF135CAF4FE2A21529C4195536FBE3CE50B879833FD221351ADC2EE7F8DC099040A841B6DAECF2E8FEDB # noqa: E501 ), FQ( 0x16603FCA40634B6A2211E11DB8F0A6A074A7D0D4AFADB7BD76505C3D3AD5544E203F6326C95A807299B23AB13633A5F0 # noqa: E501 ), FQ( 0x4AB0B9BCFAC1BBCB2C977D027796B3CE75BB8CA2BE184CB5231413C4D634F3747A87AC2460F415EC961F8855FE9D6F2 # noqa: E501 ), FQ( 0x987C8D5333AB86FDE9926BD2CA6C674170A05BFE3BDD81FFD038DA6C26C842642F64550FEDFE935A15E4CA31870FB29 # noqa: E501 ), FQ( 0x9FC4018BD96684BE88C9E221E4DA1BB8F3ABD16679DC26C1E8B6E6A1F20CABE69D65201C78607A360370E577BDBA587 # noqa: E501 ), FQ( 0xE1BBA7A1186BDB5223ABDE7ADA14A23C42A0CA7915AF6FE06985E7ED1E4D43B9B3F7055DD4EBA6F2BAFAAEBCA731C30 # noqa: E501 ), FQ( 0x19713E47937CD1BE0DFD0B8F1D43FB93CD2FCBCB6CAF493FD1183E416389E61031BF3A5CCE3FBAFCE813711AD011C132 # noqa: E501 ), FQ( 0x18B46A908F36F6DEB918C143FED2EDCC523559B8AAF0C2462E6BFE7F911F643249D9CDF41B44D606CE07C8A4D0074D8E # noqa: E501 ), FQ( 0xB182CAC101B9399D155096004F53F447AA7B12A3426B08EC02710E807B4633F06C851C1919211F20D4C04F00B971EF8 # noqa: E501 ), FQ( 0x245A394AD1ECA9B72FC00AE7BE315DC757B3B080D4C158013E6632D3C40659CC6CF90AD1C232A6442D9D3F5DB980133 # noqa: E501 ), FQ( 0x5C129645E44CF1102A159F748C4A3FC5E673D81D7E86568D9AB0F5D396A7CE46BA1049B6579AFB7866B1E715475224B # noqa: E501 ), FQ( 0x15E6BE4E990F03CE4EA50B3B42DF2EB5CB181D8F84965A3957ADD4FA95AF01B2B665027EFEC01C7704B456BE69C8B604 # noqa: E501 ), ) ISO_11_Y_DENOMINATOR = ( FQ( 0x16112C4C3A9C98B252181140FAD0EAE9601A6DE578980BE6EEC3232B5BE72E7A07F3688EF60C206D01479253B03663C1 # noqa: E501 ), FQ( 0x1962D75C2381201E1A0CBD6C43C348B885C84FF731C4D59CA4A10356F453E01F78A4260763529E3532F6102C2E49A03D # noqa: E501 ), FQ( 0x58DF3306640DA276FAAAE7D6E8EB15778C4855551AE7F310C35A5DD279CD2ECA6757CD636F96F891E2538B53DBF67F2 # noqa: E501 ), FQ( 0x16B7D288798E5395F20D23BF89EDB4D1D115C5DBDDBCD30E123DA489E726AF41727364F2C28297ADA8D26D98445F5416 # noqa: E501 ), FQ( 0xBE0E079545F43E4B00CC912F8228DDCC6D19C9F0F69BBB0542EDA0FC9DEC916A20B15DC0FD2EDEDDA39142311A5001D # noqa: E501 ), FQ( 0x8D9E5297186DB2D9FB266EAAC783182B70152C65550D881C5ECD87B6F0F5A6449F38DB9DFA9CCE202C6477FAAF9B7AC # noqa: E501 ), FQ( 0x166007C08A99DB2FC3BA8734ACE9824B5EECFDFA8D0CF8EF5DD365BC400A0051D5FA9C01A58B1FB93D1A1399126A775C # noqa: E501 ), FQ( 0x16A3EF08BE3EA7EA03BCDDFABBA6FF6EE5A4375EFA1F4FD7FEB34FD206357132B920F5B00801DEE460EE415A15812ED9 # noqa: E501 ), FQ( 0x1866C8ED336C61231A1BE54FD1D74CC4F9FB0CE4C6AF5920ABC5750C4BF39B4852CFE2F7BB9248836B233D9D55535D4A # noqa: E501 ), FQ( 0x167A55CDA70A6E1CEA820597D94A84903216F763E13D87BB5308592E7EA7D4FBC7385EA3D529B35E346EF48BB8913F55 # noqa: E501 ), FQ( 0x4D2F259EEA405BD48F010A01AD2911D9C6DD039BB61A6290E591B36E636A5C871A5C29F4F83060400F8B49CBA8F6AA8 # noqa: E501 ), FQ( 0xACCBB67481D033FF5852C1E48C50C477F94FF8AEFCE42D28C0F9A88CEA7913516F968986F7EBBEA9684B529E2561092 # noqa: E501 ), FQ( 0xAD6B9514C767FE3C3613144B45F1496543346D98ADF02267D5CEEF9A00D9B8693000763E3B90AC11E99B138573345CC # noqa: E501 ), FQ( 0x2660400EB2E4F3B628BDD0D53CD76F2BF565B94E72927C1CB748DF27942480E420517BD8714CC80D1FADC1326ED06F7 # noqa: E501 ), FQ( 0xE0FA1D816DDC03E6B24255E0D7819C171C40F65E273B853324EFCD6356CAA205CA2F570F13497804415473A1D634B8F # noqa: E501 ), FQ(1), ) ISO_11_MAP_COEFFICIENTS = ( ISO_11_X_NUMERATOR, ISO_11_X_DENOMINATOR, ISO_11_Y_NUMERATOR, ISO_11_Y_DENOMINATOR, ) # from https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-09#section-8.8.1 # noqa: E501 H_EFF_G1 = 0xD201000000010001 py_ecc-8.0.0/py_ecc/optimized_bls12_381/optimized_clear_cofactor.py000066400000000000000000000012701477723207700252510ustar00rootroot00000000000000from py_ecc.typing import ( Optimized_Field, Optimized_Point3D, ) from .constants import ( H_EFF_G1, H_EFF_G2, ) from .optimized_curve import ( multiply, ) def multiply_clear_cofactor_G1( p: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point3D[Optimized_Field]: return multiply(p, H_EFF_G1) # Cofactor Clearing Method by Multiplication # There is an optimization based on this Section 4.1 of https://eprint.iacr.org/2017/419 # However there is a patent `US patent 7110538` so I'm not sure if it can be used. def multiply_clear_cofactor_G2( p: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point3D[Optimized_Field]: return multiply(p, H_EFF_G2) py_ecc-8.0.0/py_ecc/optimized_bls12_381/optimized_curve.py000066400000000000000000000127641477723207700234410ustar00rootroot00000000000000from py_ecc.fields import ( optimized_bls12_381_FQ as FQ, optimized_bls12_381_FQ2 as FQ2, optimized_bls12_381_FQ12 as FQ12, optimized_bls12_381_FQP as FQP, ) from py_ecc.fields.field_properties import ( field_properties, ) from py_ecc.typing import ( Optimized_Field, Optimized_Point2D, Optimized_Point3D, ) field_modulus = field_properties["bls12_381"]["field_modulus"] curve_order = ( 52435875175126190479447740508185965837690552500527637822603658699938581184513 ) # Curve order should be prime if not pow(2, curve_order, curve_order) == 2: raise ValueError("Curve order is not prime") # Curve order should be a factor of field_modulus**12 - 1 if not (field_modulus**12 - 1) % curve_order == 0: raise ValueError("Curve order is not a factor of field_modulus**12 - 1") # Curve is y**2 = x**3 + 4 b = FQ(4) # Twisted curve over FQ**2 b2 = FQ2((4, 4)) # Extension curve over FQ**12; same b value as over FQ b12 = FQ12((4,) + (0,) * 11) # Generator for curve over FQ G1 = ( FQ( 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 # noqa: E501 ), FQ( 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569 # noqa: E501 ), FQ(1), ) # Generator for twisted curve over FQ2 G2 = ( FQ2( ( 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160, # noqa: E501 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758, # noqa: E501 ) ), FQ2( ( 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905, # noqa: E501 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582, # noqa: E501 ) ), FQ2.one(), ) # Point at infinity over FQ Z1 = (FQ.one(), FQ.one(), FQ.zero()) # Point at infinity for twisted curve over FQ2 Z2 = (FQ2.one(), FQ2.one(), FQ2.zero()) # Check if a point is the point at infinity def is_inf(pt: Optimized_Point3D[Optimized_Field]) -> bool: return pt[-1] == pt[-1].__class__.zero() # Check that a point is on the curve defined by y**2 == x**3 + b def is_on_curve(pt: Optimized_Point3D[Optimized_Field], b: Optimized_Field) -> bool: if is_inf(pt): return True x, y, z = pt return y**2 * z - x**3 == b * z**3 if not is_on_curve(G1, b): raise ValueError("Generator is not on curve") if not is_on_curve(G2, b2): raise ValueError("Generator is not on twisted curve") # Elliptic curve doubling def double( pt: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point3D[Optimized_Field]: x, y, z = pt W = 3 * x * x S = y * z B = x * y * S H = W * W - 8 * B S_squared = S * S newx = 2 * H * S newy = W * (4 * B - H) - 8 * y * y * S_squared newz = 8 * S * S_squared return (newx, newy, newz) # Elliptic curve addition def add( p1: Optimized_Point3D[Optimized_Field], p2: Optimized_Point3D[Optimized_Field] ) -> Optimized_Point3D[Optimized_Field]: one, zero = p1[0].one(), p1[0].zero() if p1[2] == zero or p2[2] == zero: return p1 if p2[2] == zero else p2 x1, y1, z1 = p1 x2, y2, z2 = p2 U1 = y2 * z1 U2 = y1 * z2 V1 = x2 * z1 V2 = x1 * z2 if V1 == V2 and U1 == U2: return double(p1) elif V1 == V2: return (one, one, zero) U = U1 - U2 V = V1 - V2 V_squared = V * V V_squared_times_V2 = V_squared * V2 V_cubed = V * V_squared W = z1 * z2 A = U * U * W - V_cubed - 2 * V_squared_times_V2 newx = V * A newy = U * (V_squared_times_V2 - A) - V_cubed * U2 newz = V_cubed * W return (newx, newy, newz) # Elliptic curve point multiplication def multiply( pt: Optimized_Point3D[Optimized_Field], n: int ) -> Optimized_Point3D[Optimized_Field]: if n == 0: return (pt[0].one(), pt[0].one(), pt[0].zero()) elif n == 1: return pt elif not n % 2: return multiply(double(pt), n // 2) else: return add(multiply(double(pt), int(n // 2)), pt) def eq( p1: Optimized_Point3D[Optimized_Field], p2: Optimized_Point3D[Optimized_Field] ) -> bool: x1, y1, z1 = p1 x2, y2, z2 = p2 return x1 * z2 == x2 * z1 and y1 * z2 == y2 * z1 def normalize( pt: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point2D[Optimized_Field]: x, y, z = pt return (x / z, y / z) # "Twist" a point in E(FQ2) into a point in E(FQ12) w = FQ12([0, 1] + [0] * 10) # Convert P => -P def neg(pt: Optimized_Point3D[Optimized_Field]) -> Optimized_Point3D[Optimized_Field]: x, y, z = pt return (x, -y, z) def twist(pt: Optimized_Point3D[FQP]) -> Optimized_Point3D[FQ12]: _x, _y, _z = pt # Field isomorphism from Z[p] / x**2 to Z[p] / x**2 - 2*x + 2 xcoeffs = [_x.coeffs[0] - _x.coeffs[1], _x.coeffs[1]] ycoeffs = [_y.coeffs[0] - _y.coeffs[1], _y.coeffs[1]] zcoeffs = [_z.coeffs[0] - _z.coeffs[1], _z.coeffs[1]] nx = FQ12([0] + [xcoeffs[0]] + [0] * 5 + [xcoeffs[1]] + [0] * 4) ny = FQ12([ycoeffs[0]] + [0] * 5 + [ycoeffs[1]] + [0] * 5) nz = FQ12([0] * 3 + [zcoeffs[0]] + [0] * 5 + [zcoeffs[1]] + [0] * 2) return (nx, ny, nz) # Check that the twist creates a point that is on the curve G12 = twist(G2) if not is_on_curve(G12, b12): raise ValueError("Twist creates a point not on curve") py_ecc-8.0.0/py_ecc/optimized_bls12_381/optimized_pairing.py000066400000000000000000000136771477723207700237520ustar00rootroot00000000000000from py_ecc.fields import ( optimized_bls12_381_FQ as FQ, optimized_bls12_381_FQ2 as FQ2, optimized_bls12_381_FQ12 as FQ12, ) from py_ecc.fields.field_properties import ( field_properties, ) from py_ecc.typing import ( Optimized_Field, Optimized_Point2D, Optimized_Point3D, ) from .optimized_curve import ( G1, add, b, b2, curve_order, double, is_on_curve, multiply, normalize, twist, ) field_modulus = field_properties["bls12_381"]["field_modulus"] ate_loop_count = 15132376222941642752 log_ate_loop_count = 62 pseudo_binary_encoding = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, ] if not ( sum([e * 2**i for i, e in enumerate(pseudo_binary_encoding)]) == ate_loop_count ): raise ValueError("Pseudo binary encoding is incorrect") def normalize1( p: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point3D[Optimized_Field]: x, y = normalize(p) return x, y, x.one() # Create a function representing the line between P1 and P2, # and evaluate it at T. Returns a numerator and a denominator # to avoid unneeded divisions def linefunc( P1: Optimized_Point3D[Optimized_Field], P2: Optimized_Point3D[Optimized_Field], T: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point2D[Optimized_Field]: zero = P1[0].zero() x1, y1, z1 = P1 x2, y2, z2 = P2 xt, yt, zt = T # points in projective coords: (x / z, y / z) # hence, m = (y2/z2 - y1/z1) / (x2/z2 - x1/z1) # multiply numerator and denominator by z1z2 to get values below m_numerator = y2 * z1 - y1 * z2 m_denominator = x2 * z1 - x1 * z2 if m_denominator != zero: # m * ((xt/zt) - (x1/z1)) - ((yt/zt) - (y1/z1)) return ( m_numerator * (xt * z1 - x1 * zt) - m_denominator * (yt * z1 - y1 * zt), m_denominator * zt * z1, ) elif m_numerator == zero: # m = 3(x/z)^2 / 2(y/z), multiply num and den by z**2 m_numerator = 3 * x1 * x1 m_denominator = 2 * y1 * z1 return ( m_numerator * (xt * z1 - x1 * zt) - m_denominator * (yt * z1 - y1 * zt), m_denominator * zt * z1, ) else: return xt * z1 - x1 * zt, z1 * zt def cast_point_to_fq12(pt: Optimized_Point3D[FQ]) -> Optimized_Point3D[FQ12]: if pt is None: return None x, y, z = pt return (FQ12([x.n] + [0] * 11), FQ12([y.n] + [0] * 11), FQ12([z.n] + [0] * 11)) # Check consistency of the "line function" one, two, three = G1, double(G1), multiply(G1, 3) negone, negtwo, negthree = ( multiply(G1, curve_order - 1), multiply(G1, curve_order - 2), multiply(G1, curve_order - 3), ) conditions = [ linefunc(one, two, one)[0] == FQ(0), linefunc(one, two, two)[0] == FQ(0), linefunc(one, two, three)[0] != FQ(0), linefunc(one, two, negthree)[0] == FQ(0), linefunc(one, negone, one)[0] == FQ(0), linefunc(one, negone, negone)[0] == FQ(0), linefunc(one, negone, two)[0] != FQ(0), linefunc(one, one, one)[0] == FQ(0), linefunc(one, one, two)[0] != FQ(0), linefunc(one, one, negtwo)[0] == FQ(0), ] if not all(conditions): raise ValueError("Line function is inconsistent") # Main miller loop def miller_loop( Q: Optimized_Point3D[FQ2], P: Optimized_Point3D[FQ], final_exponentiate: bool = True ) -> FQ12: if Q is None or P is None: return FQ12.one() cast_P = cast_point_to_fq12(P) twist_R = twist_Q = twist(Q) R: Optimized_Point3D[FQ2] = Q f_num, f_den = FQ12.one(), FQ12.one() # for i in range(log_ate_loop_count, -1, -1): for v in pseudo_binary_encoding[62::-1]: _n, _d = linefunc(twist_R, twist_R, cast_P) f_num = f_num * f_num * _n f_den = f_den * f_den * _d R = double(R) twist_R = twist(R) if v == 1: _n, _d = linefunc(twist_R, twist_Q, cast_P) f_num = f_num * _n f_den = f_den * _d R = add(R, Q) twist_R = twist(R) # assert R == multiply(Q, ate_loop_count) # Q1 = (Q[0] ** field_modulus, Q[1] ** field_modulus, Q[2] ** field_modulus) # assert is_on_curve(Q1, b12) # nQ2 = (Q1[0] ** field_modulus, -Q1[1] ** field_modulus, Q1[2] ** field_modulus) # assert is_on_curve(nQ2, b12) # _n1, _d1 = linefunc(R, Q1, P) # R = add(R, Q1) # _n2, _d2 = linefunc(R, nQ2, P) # f = f_num * _n1 * _n2 / (f_den * _d1 * _d2) f = f_num / f_den # R = add(R, nQ2) This line is in many specifications but technically does nothing if final_exponentiate: return f ** ((field_modulus**12 - 1) // curve_order) else: return f # Pairing computation def pairing( Q: Optimized_Point3D[FQ2], P: Optimized_Point3D[FQ], final_exponentiate: bool = True ) -> FQ12: if not is_on_curve(Q, b2): raise ValueError("Invalid input - point Q is not on the correct curve") if not is_on_curve(P, b): raise ValueError("Invalid input - point P is not on the correct curves") if P[-1] == (P[-1].zero()) or Q[-1] == (Q[-1].zero()): return FQ12.one() return miller_loop(Q, P, final_exponentiate=final_exponentiate) exptable = [FQ12([0] * i + [1] + [0] * (11 - i)) ** field_modulus for i in range(12)] def exp_by_p(x: FQ12) -> FQ12: return sum( (table_entry * int(coeff) for table_entry, coeff in zip(exptable, x.coeffs)), FQ12.zero(), ) def final_exponentiate(p: FQ12) -> FQ12: cofactor = (field_modulus**4 - field_modulus**2 + 1) // curve_order p2 = exp_by_p(exp_by_p(p)) * p p3 = exp_by_p(exp_by_p(exp_by_p(exp_by_p(exp_by_p(exp_by_p(p2)))))) / p2 return p3**cofactor py_ecc-8.0.0/py_ecc/optimized_bls12_381/optimized_swu.py000066400000000000000000000141541477723207700231260ustar00rootroot00000000000000from typing import ( Tuple, ) from py_ecc.fields import ( optimized_bls12_381_FQ as FQ, optimized_bls12_381_FQ2 as FQ2, ) from py_ecc.typing import ( Optimized_Point3D, ) from .constants import ( ETAS, ISO_3_A, ISO_3_B, ISO_3_MAP_COEFFICIENTS, ISO_3_Z, ISO_11_A, ISO_11_B, ISO_11_MAP_COEFFICIENTS, ISO_11_Z, P_MINUS_3_DIV_4, P_MINUS_9_DIV_16, POSITIVE_EIGHTH_ROOTS_OF_UNITY, SQRT_MINUS_11_CUBED, ) # Optimized SWU Map - FQ to G1' # Found in Section 4 of https://eprint.iacr.org/2019/403 def optimized_swu_G1(t: FQ) -> Tuple[FQ, FQ, FQ]: t2 = t**2 iso_11_z_t2 = ISO_11_Z * t2 temp = iso_11_z_t2 + iso_11_z_t2**2 denominator = -(ISO_11_A * temp) # -a(Z * t^2 + Z^2 * t^4) temp = temp + FQ.one() numerator = ISO_11_B * temp # b(Z * t^2 + Z^2 * t^4 + 1) # Exceptional case if denominator == FQ.zero(): denominator = ISO_11_Z * ISO_11_A # v = D^3 v = denominator**3 # u = N^3 + a * N * D^2 + b* D^3 u = (numerator**3) + (ISO_11_A * numerator * (denominator**2)) + (ISO_11_B * v) # Attempt y = sqrt(u / v) (is_root, y) = sqrt_division_FQ(u, v) if not is_root: y = y * t**3 * SQRT_MINUS_11_CUBED numerator = numerator * iso_11_z_t2 if t.sgn0 != y.sgn0: y = -y y = y * denominator return numerator, y, denominator # Optimized SWU Map - FQ2 to G2': y^2 = x^3 + 240i * x + 1012 + 1012i # Found in Section 4 of https://eprint.iacr.org/2019/403 def optimized_swu_G2(t: FQ2) -> Tuple[FQ2, FQ2, FQ2]: t2 = t**2 iso_3_z_t2 = ISO_3_Z * t2 temp = iso_3_z_t2 + iso_3_z_t2**2 denominator = -(ISO_3_A * temp) # -a(Z * t^2 + Z^2 * t^4) temp = temp + FQ2.one() numerator = ISO_3_B * temp # b(Z * t^2 + Z^2 * t^4 + 1) # Exceptional case if denominator == FQ2.zero(): denominator = ISO_3_Z * ISO_3_A # v = D^3 v = denominator**3 # u = N^3 + a * N * D^2 + b* D^3 u = (numerator**3) + (ISO_3_A * numerator * (denominator**2)) + (ISO_3_B * v) # Attempt y = sqrt(u / v) (success, sqrt_candidate) = sqrt_division_FQ2(u, v) y = sqrt_candidate # Handle case where (u / v) is not square # sqrt_candidate(x1) = sqrt_candidate(x0) * t^3 sqrt_candidate = sqrt_candidate * t**3 # u(x1) = Z^3 * t^6 * u(x0) u = (iso_3_z_t2) ** 3 * u success_2 = False etas = ETAS for eta in etas: # Valid solution if (eta * sqrt_candidate(x1)) ** 2 * v - u == 0 eta_sqrt_candidate = eta * sqrt_candidate temp1 = eta_sqrt_candidate**2 * v - u if temp1 == FQ2.zero() and not success and not success_2: y = eta_sqrt_candidate success_2 = True if not success and not success_2: # Unreachable raise Exception("Hash to Curve - Optimized SWU failure") if not success: numerator = numerator * iso_3_z_t2 if t.sgn0 != y.sgn0: y = -y y = y * denominator return (numerator, y, denominator) def sqrt_division_FQ(u: FQ, v: FQ) -> Tuple[bool, FQ]: temp = u * v result = temp * ((temp * v**2) ** P_MINUS_3_DIV_4) is_valid_root = (result**2 * v - u) == FQ.zero() return (is_valid_root, result) # Square Root Division # Return: uv^7 * (uv^15)^((p^2 - 9) / 16) * root of unity # If valid square root is found return true, else false def sqrt_division_FQ2(u: FQ2, v: FQ2) -> Tuple[bool, FQ2]: temp1 = u * v**7 temp2 = temp1 * v**8 # gamma = uv^7 * (uv^15)^((p^2 - 9) / 16) gamma = temp2**P_MINUS_9_DIV_16 gamma = gamma * temp1 # Verify there is a valid root is_valid_root = False result = gamma roots = POSITIVE_EIGHTH_ROOTS_OF_UNITY for root in roots: # Valid if (root * gamma)^2 * v - u == 0 sqrt_candidate = root * gamma temp2 = sqrt_candidate**2 * v - u if temp2 == FQ2.zero() and not is_valid_root: is_valid_root = True result = sqrt_candidate return (is_valid_root, result) # Optimal Map from 3-Isogenous Curve to G2 def iso_map_G2(x: FQ2, y: FQ2, z: FQ2) -> Optimized_Point3D[FQ2]: # x-numerator, x-denominator, y-numerator, y-denominator mapped_values = [FQ2.zero(), FQ2.zero(), FQ2.zero(), FQ2.zero()] z_powers = [z, z**2, z**3] # Horner Polynomial Evaluation for i, k_i in enumerate(ISO_3_MAP_COEFFICIENTS): mapped_values[i] = k_i[-1:][0] for j, k_i_j in enumerate(reversed(k_i[:-1])): mapped_values[i] = mapped_values[i] * x + z_powers[j] * k_i_j mapped_values[2] = mapped_values[2] * y # y-numerator * y mapped_values[3] = mapped_values[3] * z # y-denominator * z z_G2 = mapped_values[1] * mapped_values[3] # x-denominator * y-denominator x_G2 = mapped_values[0] * mapped_values[3] # x-numerator * y-denominator y_G2 = mapped_values[1] * mapped_values[2] # y-numerator * x-denominator return (x_G2, y_G2, z_G2) # Optimal Map from 11-Isogenous Curve to G1 def iso_map_G1(x: FQ, y: FQ, z: FQ) -> Optimized_Point3D[FQ]: # x-numerator, x-denominator, y-numerator, y-denominator mapped_values = [FQ.zero(), FQ.zero(), FQ.zero(), FQ.zero()] z_powers = [ z, z**2, z**3, z**4, z**5, z**6, z**7, z**8, z**9, z**10, z**11, z**12, z**13, z**14, z**15, ] # Horner Polynomial Evaluation for i, k_i in enumerate(ISO_11_MAP_COEFFICIENTS): mapped_values[i] = k_i[-1:][0] for j, k_i_j in enumerate(reversed(k_i[:-1])): mapped_values[i] = mapped_values[i] * x + z_powers[j] * k_i_j # Correct for x-denominator polynomial being 1-order lower than # x-numerator polynomial mapped_values[1] = mapped_values[1] * z # x-denominator * z mapped_values[2] = mapped_values[2] * y # y-numerator * y mapped_values[3] = mapped_values[3] * z # y-denominator * z z_G1 = mapped_values[1] * mapped_values[3] # x-denominator * y-denominator x_G1 = mapped_values[0] * mapped_values[3] # x-numerator * y-denominator y_G1 = mapped_values[1] * mapped_values[2] # y-numerator * x-denominator return (x_G1, y_G1, z_G1) py_ecc-8.0.0/py_ecc/optimized_bn128/000077500000000000000000000000001477723207700171615ustar00rootroot00000000000000py_ecc-8.0.0/py_ecc/optimized_bn128/__init__.py000066400000000000000000000007301477723207700212720ustar00rootroot00000000000000from py_ecc.fields import ( optimized_bn128_FQ as FQ, optimized_bn128_FQ2 as FQ2, optimized_bn128_FQ12 as FQ12, optimized_bn128_FQP as FQP, ) from .optimized_curve import ( G1, G2, G12, Z1, Z2, add, b, b2, b12, curve_order, double, eq, field_modulus, is_inf, is_on_curve, multiply, neg, normalize, twist, ) from .optimized_pairing import ( final_exponentiate, pairing, ) py_ecc-8.0.0/py_ecc/optimized_bn128/optimized_curve.py000066400000000000000000000120531477723207700227440ustar00rootroot00000000000000from py_ecc.fields import ( optimized_bn128_FQ as FQ, optimized_bn128_FQ2 as FQ2, optimized_bn128_FQ12 as FQ12, optimized_bn128_FQP as FQP, ) from py_ecc.fields.field_properties import ( field_properties, ) from py_ecc.typing import ( Optimized_Field, Optimized_Point2D, Optimized_Point3D, ) field_modulus = field_properties["bn128"]["field_modulus"] curve_order = ( 21888242871839275222246405745257275088548364400416034343698204186575808495617 ) # Curve order should be prime if not pow(2, curve_order, curve_order) == 2: raise ValueError("Curve order is not prime") # Curve order should be a factor of field_modulus**12 - 1 if not (field_modulus**12 - 1) % curve_order == 0: raise ValueError("Curve order is not a factor of field_modulus**12 - 1") # Curve is y**2 = x**3 + 3 b = FQ(3) # Twisted curve over FQ**2 b2 = FQ2([3, 0]) / FQ2([9, 1]) # Extension curve over FQ**12; same b value as over FQ b12 = FQ12([3] + [0] * 11) # Generator for curve over FQ G1 = (FQ(1), FQ(2), FQ(1)) # Generator for twisted curve over FQ2 G2 = ( FQ2( [ 10857046999023057135944570762232829481370756359578518086990519993285655852781, # noqa: E501 11559732032986387107991004021392285783925812861821192530917403151452391805634, # noqa: E501 ] ), FQ2( [ 8495653923123431417604973247489272438418190587263600148770280649306958101930, # noqa: E501 4082367875863433681332203403145435568316851327593401208105741076214120093531, # noqa: E501 ] ), FQ2.one(), ) # Point at infinity over FQ Z1 = (FQ.one(), FQ.one(), FQ.zero()) # Point at infinity for twisted curve over FQ2 Z2 = (FQ2.one(), FQ2.one(), FQ2.zero()) # Check if a point is the point at infinity def is_inf(pt: Optimized_Point3D[Optimized_Field]) -> bool: return pt[-1] == pt[-1].zero() # Check that a point is on the curve defined by y**2 == x**3 + b def is_on_curve(pt: Optimized_Point3D[Optimized_Field], b: Optimized_Field) -> bool: if is_inf(pt): return True x, y, z = pt return y**2 * z - x**3 == b * z**3 if not is_on_curve(G1, b): raise ValueError("Generator is not on curve") if not is_on_curve(G2, b2): raise ValueError("Generator is not on twisted curve") # Elliptic curve doubling def double( pt: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point3D[Optimized_Field]: x, y, z = pt W = 3 * x * x S = y * z B = x * y * S H = W * W - 8 * B S_squared = S * S newx = 2 * H * S newy = W * (4 * B - H) - 8 * y * y * S_squared newz = 8 * S * S_squared return (newx, newy, newz) # Elliptic curve addition def add( p1: Optimized_Point3D[Optimized_Field], p2: Optimized_Point3D[Optimized_Field] ) -> Optimized_Point3D[Optimized_Field]: one, zero = p1[0].one(), p1[0].zero() if p1[2] == zero or p2[2] == zero: return p1 if p2[2] == zero else p2 x1, y1, z1 = p1 x2, y2, z2 = p2 U1 = y2 * z1 U2 = y1 * z2 V1 = x2 * z1 V2 = x1 * z2 if V1 == V2 and U1 == U2: return double(p1) elif V1 == V2: return (one, one, zero) U = U1 - U2 V = V1 - V2 V_squared = V * V V_squared_times_V2 = V_squared * V2 V_cubed = V * V_squared W = z1 * z2 A = U * U * W - V_cubed - 2 * V_squared_times_V2 newx = V * A newy = U * (V_squared_times_V2 - A) - V_cubed * U2 newz = V_cubed * W return (newx, newy, newz) # Elliptic curve point multiplication def multiply( pt: Optimized_Point3D[Optimized_Field], n: int ) -> Optimized_Point3D[Optimized_Field]: if n == 0: return (pt[0].one(), pt[0].one(), pt[0].zero()) elif n == 1: return pt elif not n % 2: return multiply(double(pt), n // 2) else: return add(multiply(double(pt), int(n // 2)), pt) def eq( p1: Optimized_Point3D[Optimized_Field], p2: Optimized_Point3D[Optimized_Field] ) -> bool: x1, y1, z1 = p1 x2, y2, z2 = p2 return x1 * z2 == x2 * z1 and y1 * z2 == y2 * z1 def normalize( pt: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point2D[Optimized_Field]: x, y, z = pt return (x / z, y / z) # "Twist" a point in E(FQ2) into a point in E(FQ12) w = FQ12([0, 1] + [0] * 10) # Convert P => -P def neg(pt: Optimized_Point3D[Optimized_Field]) -> Optimized_Point3D[Optimized_Field]: x, y, z = pt return (x, -y, z) def twist(pt: Optimized_Point3D[FQP]) -> Optimized_Point3D[FQ12]: _x, _y, _z = pt # Field isomorphism from Z[p] / x**2 to Z[p] / x**2 - 18*x + 82 xcoeffs = [_x.coeffs[0] - _x.coeffs[1] * 9, _x.coeffs[1]] ycoeffs = [_y.coeffs[0] - _y.coeffs[1] * 9, _y.coeffs[1]] zcoeffs = [_z.coeffs[0] - _z.coeffs[1] * 9, _z.coeffs[1]] nx = FQ12([xcoeffs[0]] + [0] * 5 + [xcoeffs[1]] + [0] * 5) ny = FQ12([ycoeffs[0]] + [0] * 5 + [ycoeffs[1]] + [0] * 5) nz = FQ12([zcoeffs[0]] + [0] * 5 + [zcoeffs[1]] + [0] * 5) return (nx * w**2, ny * w**3, nz) # Check that the twist creates a point that is on the curve G12 = twist(G2) if not is_on_curve(G12, b12): raise ValueError("Twist creates a point not on curve") py_ecc-8.0.0/py_ecc/optimized_bn128/optimized_pairing.py000066400000000000000000000132241477723207700232520ustar00rootroot00000000000000from py_ecc.fields import ( optimized_bn128_FQ as FQ, optimized_bn128_FQ2 as FQ2, optimized_bn128_FQ12 as FQ12, ) from py_ecc.fields.field_properties import ( field_properties, ) from py_ecc.typing import ( Optimized_Field, Optimized_Point2D, Optimized_Point3D, ) from .optimized_curve import ( G1, add, b, b2, curve_order, double, is_on_curve, multiply, neg, normalize, twist, ) field_modulus = field_properties["bn128"]["field_modulus"] ate_loop_count = 29793968203157093288 log_ate_loop_count = 63 pseudo_binary_encoding = [ 0, 0, 0, 1, 0, 1, 0, -1, 0, 0, 1, -1, 0, 0, 1, 0, 0, 1, 1, 0, -1, 0, 0, 1, 0, -1, 0, 0, 0, 0, 1, 1, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 0, 0, -1, 0, 0, 0, 1, 1, 0, -1, 0, 0, 1, 0, 1, 1, ] if not ( sum([e * 2**i for i, e in enumerate(pseudo_binary_encoding)]) == ate_loop_count ): raise ValueError("Pseudo binary encoding is incorrect") def normalize1( p: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point3D[Optimized_Field]: x, y = normalize(p) return x, y, x.one() # Create a function representing the line between P1 and P2, # and evaluate it at T. Returns a numerator and a denominator # to avoid unneeded divisions def linefunc( P1: Optimized_Point3D[Optimized_Field], P2: Optimized_Point3D[Optimized_Field], T: Optimized_Point3D[Optimized_Field], ) -> Optimized_Point2D[Optimized_Field]: zero = P1[0].zero() x1, y1, z1 = P1 x2, y2, z2 = P2 xt, yt, zt = T # points in projective coords: (x / z, y / z) # hence, m = (y2/z2 - y1/z1) / (x2/z2 - x1/z1) # multiply numerator and denominator by z1z2 to get values below m_numerator = y2 * z1 - y1 * z2 m_denominator = x2 * z1 - x1 * z2 if m_denominator != zero: # m * ((xt/zt) - (x1/z1)) - ((yt/zt) - (y1/z1)) return ( m_numerator * (xt * z1 - x1 * zt) - m_denominator * (yt * z1 - y1 * zt), m_denominator * zt * z1, ) elif m_numerator == zero: # m = 3(x/z)^2 / 2(y/z), multiply num and den by z**2 m_numerator = 3 * x1 * x1 m_denominator = 2 * y1 * z1 return ( m_numerator * (xt * z1 - x1 * zt) - m_denominator * (yt * z1 - y1 * zt), m_denominator * zt * z1, ) else: return xt * z1 - x1 * zt, z1 * zt def cast_point_to_fq12(pt: Optimized_Point3D[FQ]) -> Optimized_Point3D[FQ12]: if pt is None: return None x, y, z = pt return (FQ12([x.n] + [0] * 11), FQ12([y.n] + [0] * 11), FQ12([z.n] + [0] * 11)) # Check consistency of the "line function" one, two, three = G1, double(G1), multiply(G1, 3) negone, negtwo, negthree = ( multiply(G1, curve_order - 1), multiply(G1, curve_order - 2), multiply(G1, curve_order - 3), ) conditions = [ linefunc(one, two, one)[0] == FQ(0), linefunc(one, two, two)[0] == FQ(0), linefunc(one, two, three)[0] != FQ(0), linefunc(one, two, negthree)[0] == FQ(0), linefunc(one, negone, one)[0] == FQ(0), linefunc(one, negone, negone)[0] == FQ(0), linefunc(one, negone, two)[0] != FQ(0), linefunc(one, one, one)[0] == FQ(0), linefunc(one, one, two)[0] != FQ(0), linefunc(one, one, negtwo)[0] == FQ(0), ] if not all(conditions): raise ValueError("Line function is inconsistent") # Main miller loop def miller_loop( Q: Optimized_Point3D[FQ12], P: Optimized_Point3D[FQ12], final_exponentiate: bool = True, ) -> FQ12: if Q is None or P is None: return FQ12.one() R: Optimized_Point3D[FQ12] = Q f_num, f_den = FQ12.one(), FQ12.one() # for i in range(log_ate_loop_count, -1, -1): for v in pseudo_binary_encoding[63::-1]: _n, _d = linefunc(R, R, P) f_num = f_num * f_num * _n f_den = f_den * f_den * _d R = double(R) # if ate_loop_count & (2**i): if v == 1: _n, _d = linefunc(R, Q, P) f_num = f_num * _n f_den = f_den * _d R = add(R, Q) elif v == -1: nQ = neg(Q) _n, _d = linefunc(R, nQ, P) f_num = f_num * _n f_den = f_den * _d R = add(R, nQ) # assert R == multiply(Q, ate_loop_count) Q1 = (Q[0] ** field_modulus, Q[1] ** field_modulus, Q[2] ** field_modulus) # assert is_on_curve(Q1, b12) nQ2 = (Q1[0] ** field_modulus, -Q1[1] ** field_modulus, Q1[2] ** field_modulus) # assert is_on_curve(nQ2, b12) _n1, _d1 = linefunc(R, Q1, P) R = add(R, Q1) _n2, _d2 = linefunc(R, nQ2, P) f = f_num * _n1 * _n2 / (f_den * _d1 * _d2) # R = add(R, nQ2) This line is in many specifications but technically does nothing if final_exponentiate: return f ** ((field_modulus**12 - 1) // curve_order) else: return f # Pairing computation def pairing( Q: Optimized_Point3D[FQ2], P: Optimized_Point3D[FQ], final_exponentiate: bool = True ) -> FQ12: if not is_on_curve(Q, b2): raise ValueError("Invalid input - point Q is not on the correct curve") if not is_on_curve(P, b): raise ValueError("Invalid input - point P is not on the correct curves") if P[-1] == (P[-1].zero()) or Q[-1] == (Q[-1].zero()): return FQ12.one() return miller_loop( twist(Q), cast_point_to_fq12(P), final_exponentiate=final_exponentiate ) def final_exponentiate(p: Optimized_Field) -> Optimized_Field: return p ** ((field_modulus**12 - 1) // curve_order) py_ecc-8.0.0/py_ecc/py.typed000066400000000000000000000000001477723207700157300ustar00rootroot00000000000000py_ecc-8.0.0/py_ecc/secp256k1/000077500000000000000000000000001477723207700156665ustar00rootroot00000000000000py_ecc-8.0.0/py_ecc/secp256k1/__init__.py000066400000000000000000000001521477723207700177750ustar00rootroot00000000000000from .secp256k1 import ( G, N, P, ecdsa_raw_recover, ecdsa_raw_sign, privtopub, ) py_ecc-8.0.0/py_ecc/secp256k1/secp256k1.py000066400000000000000000000202021477723207700176570ustar00rootroot00000000000000import hashlib import hmac from typing import ( TYPE_CHECKING, Any, Tuple, cast, ) if TYPE_CHECKING: from py_ecc.typing import ( PlainPoint2D, PlainPoint3D, ) def safe_ord(value: Any) -> int: if isinstance(value, int): return value else: return ord(value) # Elliptic curve parameters (secp256k1) P = 2**256 - 2**32 - 977 N = 115792089237316195423570985008687907852837564279074904382605163141518161494337 A = 0 B = 7 Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240 Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424 G = cast("PlainPoint2D", (Gx, Gy)) def bytes_to_int(x: bytes) -> int: o = 0 for b in x: o = (o << 8) + safe_ord(b) return o # Extended Euclidean Algorithm def inv(a: int, n: int) -> int: if a == 0: return 0 lm, hm = 1, 0 low, high = a % n, n while low > 1: r = high // low nm, new = hm - lm * r, high - low * r lm, low, hm, high = nm, new, lm, low return lm % n def to_jacobian(p: "PlainPoint2D") -> "PlainPoint3D": """ Convert a 2D point to its corresponding Jacobian point representation. :param p: the point to convert :type p: PlainPoint2D :return: the Jacobian point representation :rtype: PlainPoint3D """ o = (p[0], p[1], 1) return cast("PlainPoint3D", o) def jacobian_double(p: "PlainPoint3D") -> "PlainPoint3D": """ Double a point in Jacobian coordinates and return the result. :param p: the point to double :type p: PlainPoint3D :return: the resulting Jacobian point :rtype: PlainPoint3D """ if not p[1]: return cast("PlainPoint3D", (0, 0, 0)) ysq = (p[1] ** 2) % P S = (4 * p[0] * ysq) % P M = (3 * p[0] ** 2 + A * p[2] ** 4) % P nx = (M**2 - 2 * S) % P ny = (M * (S - nx) - 8 * ysq**2) % P nz = (2 * p[1] * p[2]) % P return cast("PlainPoint3D", (nx, ny, nz)) def jacobian_add(p: "PlainPoint3D", q: "PlainPoint3D") -> "PlainPoint3D": """ Add two points in Jacobian coordinates and return the result. :param p: the first point to add :type p: PlainPoint3D :param q: the second point to add :type q: PlainPoint3D :return: the resulting Jacobian point :rtype: PlainPoint3D """ if not p[1]: return q if not q[1]: return p U1 = (p[0] * q[2] ** 2) % P U2 = (q[0] * p[2] ** 2) % P S1 = (p[1] * q[2] ** 3) % P S2 = (q[1] * p[2] ** 3) % P if U1 == U2: if S1 != S2: return cast("PlainPoint3D", (0, 0, 1)) return jacobian_double(p) H = U2 - U1 R = S2 - S1 H2 = (H * H) % P H3 = (H * H2) % P U1H2 = (U1 * H2) % P nx = (R**2 - H3 - 2 * U1H2) % P ny = (R * (U1H2 - nx) - S1 * H3) % P nz = (H * p[2] * q[2]) % P return cast("PlainPoint3D", (nx, ny, nz)) def from_jacobian(p: "PlainPoint3D") -> "PlainPoint2D": """ Convert a Jacobian point back to its corresponding 2D point representation. :param p: the point to convert :type p: PlainPoint3D :return: the 2D point representation :rtype: PlainPoint2D """ z = inv(p[2], P) return cast("PlainPoint2D", ((p[0] * z**2) % P, (p[1] * z**3) % P)) def jacobian_multiply(a: "PlainPoint3D", n: int) -> "PlainPoint3D": """ Multiply a point in Jacobian coordinates by an integer and return the result. :param a: the point to multiply :type a: PlainPoint3D :param n: the integer to multiply the point by :type n: int :return: the resulting Jacobian point :rtype: PlainPoint3D """ if a[1] == 0 or n == 0: return cast("PlainPoint3D", (0, 0, 1)) if n == 1: return a if n < 0 or n >= N: return jacobian_multiply(a, n % N) if (n % 2) == 0: return jacobian_double(jacobian_multiply(a, n // 2)) if (n % 2) == 1: return jacobian_add(jacobian_double(jacobian_multiply(a, n // 2)), a) raise ValueError("Unexpected case in jacobian_multiply: This should never happen.") def multiply(a: "PlainPoint2D", n: int) -> "PlainPoint2D": """ Multiply a 2D point a by an integer n using elliptic curve point multiplication, and return the resulting 2D point in plain coordinates. :param a: a 2D point on the elliptic curve :type a: PlainPoint2D :param n: an integer used for point multiplication :type n: int :return: the resulting 2D point in plain coordinates :rtype: PlainPoint2D """ return from_jacobian(jacobian_multiply(to_jacobian(a), n)) def add(a: "PlainPoint2D", b: "PlainPoint2D") -> "PlainPoint2D": """ Add two 2D points a and b using elliptic curve point addition, and return the resulting 2D point in plain coordinates. :param a: a 2D point on the elliptic curve :type a: PlainPoint2D :param b: another 2D point on the elliptic curve :type b: PlainPoint2D :return: the resulting 2D point in plain coordinates :rtype: PlainPoint2D """ return from_jacobian(jacobian_add(to_jacobian(a), to_jacobian(b))) # bytes32 def privtopub(privkey: bytes) -> "PlainPoint2D": return multiply(G, bytes_to_int(privkey)) def deterministic_generate_k(msghash: bytes, priv: bytes) -> int: """ Generate a deterministic value `k` for use in ECDSA signature generation, as described in RFC 6979. The generated `k` value is intended to provide protection against weak random number generation. https://datatracker.ietf.org/doc/html/rfc6979 :param msghash: The hash of the message to be signed. :type msghash: bytes :param priv: The private key to be used in the signature. :type priv: bytes :return: A deterministic value k (as an int) that can be used as the ephemeral private key in the signature generation process. :rtype: int """ v = b"\x01" * 32 k = b"\x00" * 32 k = hmac.new(k, v + b"\x00" + priv + msghash, hashlib.sha256).digest() v = hmac.new(k, v, hashlib.sha256).digest() k = hmac.new(k, v + b"\x01" + priv + msghash, hashlib.sha256).digest() v = hmac.new(k, v, hashlib.sha256).digest() return bytes_to_int(hmac.new(k, v, hashlib.sha256).digest()) # bytes32, bytes32 -> v, r, s (as numbers) def ecdsa_raw_sign(msghash: bytes, priv: bytes) -> Tuple[int, int, int]: """ Return a raw ECDSA signature of the provided `data`, using the provided `private_key`. :param msghash: the data to sign :type msghash: bytes :param priv: the private key to use for signing :type priv: bytes :return: a tuple of integers `(v, r, s)`, representing the raw ECDSA signature :rtype: Tuple[int, int, int] """ z = bytes_to_int(msghash) k = deterministic_generate_k(msghash, priv) r, y = multiply(G, k) s = inv(k, N) * (z + r * bytes_to_int(priv)) % N v, r, s = 27 + ((y % 2) ^ (0 if s * 2 < N else 1)), r, s if s * 2 < N else N - s return v, r, s def ecdsa_raw_recover(msghash: bytes, vrs: Tuple[int, int, int]) -> "PlainPoint2D": """ Recover the public key from the signature and message hash. :param msghash: the hash of the message to be signed :type msghash: bytes :param vrs: the signature generated by the `ecdsa_raw_sign` function :type vrs: Tuple[int, int, int] :return: the recovered public key :rtype: PlainPoint2D """ v, r, s = vrs if v not in (27, 28): raise ValueError(f"value of v was {v}, must be either 27 or 28") x = r xcubedaxb = (x * x * x + A * x + B) % P beta = pow(xcubedaxb, (P + 1) // 4, P) y = beta if v % 2 ^ beta % 2 else (P - beta) # If xcubedaxb is not a quadratic residue, then r cannot be the x coord # for a point on the curve, and so the sig is invalid if (xcubedaxb - y * y) % P != 0 or not (r % N) or not (s % N): raise ValueError( f"sig is invalid, {r} cannot be the x coord for point on curve" ) z = bytes_to_int(msghash) Gz = jacobian_multiply(cast("PlainPoint3D", (Gx, Gy, 1)), (N - z) % N) XY = jacobian_multiply(cast("PlainPoint3D", (x, y, 1)), s) Qr = jacobian_add(Gz, XY) Q = jacobian_multiply(Qr, inv(r, N)) Q_jacobian = from_jacobian(Q) return Q_jacobian py_ecc-8.0.0/py_ecc/typing.py000066400000000000000000000041751477723207700161360ustar00rootroot00000000000000from typing import ( Optional, Tuple, TypeVar, Union, ) from py_ecc.fields import ( bls12_381_FQ, bls12_381_FQ2, bls12_381_FQ12, bls12_381_FQP, bn128_FQ, bn128_FQ2, bn128_FQ12, bn128_FQP, optimized_bls12_381_FQ, optimized_bls12_381_FQ2, optimized_bls12_381_FQ12, optimized_bls12_381_FQP, optimized_bn128_FQ, optimized_bn128_FQ2, optimized_bn128_FQ12, optimized_bn128_FQP, ) from py_ecc.fields.field_elements import ( FQ, FQ2, FQ12, FQP, ) from py_ecc.fields.optimized_field_elements import ( FQ as Optimized_FQ, FQ2 as Optimized_FQ2, FQ12 as Optimized_FQ12, FQP as Optimized_FQP, ) # # These types are wrt Normal Integers # PlainPoint2D = Tuple[int, int] PlainPoint3D = Tuple[int, int, int] # # Types for the normal curves and fields # Field = TypeVar( "Field", # General FQ, FQP, FQ2, FQ12, # bn128 bn128_FQ, bn128_FQP, bn128_FQ2, bn128_FQ12, # bls12_381 bls12_381_FQ, bls12_381_FQP, bls12_381_FQ2, bls12_381_FQ12, ) Point2D = Optional[Tuple[Field, Field]] # Point at infinity is encoded as a None Point3D = Optional[Tuple[Field, Field, Field]] # Point at infinity is encoded as a None GeneralPoint = Union[Point2D[Field], Point3D[Field]] # # Types For optimized curves and fields # Optimized_Field = TypeVar( "Optimized_Field", # General Optimized_FQ, Optimized_FQP, Optimized_FQ2, Optimized_FQ12, # bn128 optimized_bn128_FQ, optimized_bn128_FQP, optimized_bn128_FQ2, optimized_bn128_FQ12, # bls12_381 optimized_bls12_381_FQ, optimized_bls12_381_FQP, optimized_bls12_381_FQ2, optimized_bls12_381_FQ12, ) Optimized_Point2D = Tuple[Optimized_Field, Optimized_Field] Optimized_Point3D = Tuple[Optimized_Field, Optimized_Field, Optimized_Field] Optimized_GeneralPoint = Union[ Optimized_Point2D[Optimized_Field], Optimized_Point3D[Optimized_Field], ] # # Miscellaneous types # FQ2_modulus_coeffs_type = Tuple[int, int] FQ12_modulus_coeffs_type = Tuple[ int, int, int, int, int, int, int, int, int, int, int, int ] py_ecc-8.0.0/py_ecc/utils.py000066400000000000000000000026751477723207700157670ustar00rootroot00000000000000from typing import ( TYPE_CHECKING, Sequence, Tuple, Union, cast, ) if TYPE_CHECKING: from py_ecc.fields.field_elements import ( FQ, ) from py_ecc.fields.optimized_field_elements import ( FQ as optimized_FQ, ) IntOrFQ = Union[int, "FQ"] def prime_field_inv(a: int, n: int) -> int: """ Extended euclidean algorithm to find modular inverses for integers """ # To address a == n edge case. # https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-4 # inv0(x): This function returns the multiplicative inverse of x in # F, extended to all of F by fixing inv0(0) == 0. a %= n if a == 0: return 0 lm, hm = 1, 0 low, high = a % n, n while low > 1: r = high // low nm, new = hm - lm * r, high - low * r lm, low, hm, high = nm, new, lm, low return lm % n # Utility methods for polynomial math def deg(p: Sequence[Union[int, "FQ", "optimized_FQ"]]) -> int: d = len(p) - 1 while p[d] == 0 and d: d -= 1 return d def poly_rounded_div(a: Sequence[IntOrFQ], b: Sequence[IntOrFQ]) -> Tuple[IntOrFQ, ...]: dega = deg(a) degb = deg(b) temp = [x for x in a] o = [0 for x in a] for i in range(dega - degb, -1, -1): o[i] += int(temp[degb + i] / b[degb]) for c in range(degb + 1): temp[c + i] -= o[c] return cast(Tuple[IntOrFQ, ...], tuple(o[: deg(o) + 1])) py_ecc-8.0.0/pyproject.toml000066400000000000000000000103541477723207700157200ustar00rootroot00000000000000[tool.autoflake] exclude = "__init__.py" remove_all_unused_imports = true [tool.isort] combine_as_imports = true extra_standard_library = "pytest" force_grid_wrap = 1 force_sort_within_sections = true force_to_top = "pytest" honor_noqa = true known_first_party = "py_ecc" known_third_party = "hypothesis" multi_line_output = 3 profile = "black" use_parentheses = true [tool.mypy] check_untyped_defs = true disallow_any_generics = true disallow_incomplete_defs = true disallow_subclassing_any = true disallow_untyped_calls = true disallow_untyped_decorators = true disallow_untyped_defs = true ignore_missing_imports = true strict_equality = true strict_optional = true warn_redundant_casts = true warn_return_any = true warn_unused_configs = true warn_unused_ignores = true [tool.pydocstyle] # All error codes found here: # http://www.pydocstyle.org/en/3.0.0/error_codes.html # # Ignored: # D1 - Missing docstring error codes # # Selected: # D2 - Whitespace error codes # D3 - Quote error codes # D4 - Content related error codes select = "D2,D3,D4" # Extra ignores: # D200 - One-line docstring should fit on one line with quotes # D203 - 1 blank line required before class docstring # D204 - 1 blank line required after class docstring # D205 - 1 blank line required between summary line and description # D212 - Multi-line docstring summary should start at the first line # D302 - Use u""" for Unicode docstrings # D400 - First line should end with a period # D401 - First line should be in imperative mood # D412 - No blank lines allowed between a section header and its content # D415 - First line should end with a period, question mark, or exclamation point add-ignore = "D200,D203,D204,D205,D212,D302,D400,D401,D412,D415" # Explanation: # D400 - Enabling this error code seems to make it a requirement that the first # sentence in a docstring is not split across two lines. It also makes it a # requirement that no docstring can have a multi-sentence description without a # summary line. Neither one of those requirements seem appropriate. [tool.pytest.ini_options] addopts = "-v --showlocals --durations 10" log_date_format = "%m-%d %H:%M:%S" log_format = "%(levelname)8s %(asctime)s %(filename)20s %(message)s" xfail_strict = true [tool.towncrier] # Read https://github.com/ethereum/py_ecc/blob/main/newsfragments/README.md for instructions directory = "newsfragments" filename = "docs/release_notes.rst" issue_format = "`#{issue} `__" package = "py_ecc" title_format = "py_ecc v{version} ({project_date})" underlines = ["-", "~", "^"] [[tool.towncrier.type]] directory = "breaking" name = "Breaking Changes" showcontent = true [[tool.towncrier.type]] directory = "bugfix" name = "Bugfixes" showcontent = true [[tool.towncrier.type]] directory = "deprecation" name = "Deprecations" showcontent = true [[tool.towncrier.type]] directory = "docs" name = "Improved Documentation" showcontent = true [[tool.towncrier.type]] directory = "feature" name = "Features" showcontent = true [[tool.towncrier.type]] directory = "internal" name = "Internal Changes - for py_ecc Contributors" showcontent = true [[tool.towncrier.type]] directory = "misc" name = "Miscellaneous Changes" showcontent = false [[tool.towncrier.type]] directory = "performance" name = "Performance Improvements" showcontent = true [[tool.towncrier.type]] directory = "removal" name = "Removals" showcontent = true [tool.bumpversion] current_version = "8.0.0" parse = """ (?P\\d+) \\.(?P\\d+) \\.(?P\\d+) (- (?P[^.]*) \\.(?P\\d+) )? """ serialize = [ "{major}.{minor}.{patch}-{stage}.{devnum}", "{major}.{minor}.{patch}", ] search = "{current_version}" replace = "{new_version}" regex = false ignore_missing_version = false tag = true sign_tags = true tag_name = "v{new_version}" tag_message = "Bump version: {current_version} → {new_version}" allow_dirty = false commit = true message = "Bump version: {current_version} → {new_version}" [tool.bumpversion.parts.stage] optional_value = "stable" first_value = "stable" values = [ "alpha", "beta", "stable", ] [tool.bumpversion.part.devnum] [[tool.bumpversion.files]] filename = "setup.py" search = "version=\"{current_version}\"" replace = "version=\"{new_version}\"" py_ecc-8.0.0/scripts/000077500000000000000000000000001477723207700144705ustar00rootroot00000000000000py_ecc-8.0.0/scripts/release/000077500000000000000000000000001477723207700161105ustar00rootroot00000000000000py_ecc-8.0.0/scripts/release/test_package.py000066400000000000000000000025071477723207700211200ustar00rootroot00000000000000from pathlib import ( Path, ) import subprocess from tempfile import ( TemporaryDirectory, ) import venv def create_venv(parent_path: Path) -> Path: venv_path = parent_path / "package-smoke-test" venv.create(venv_path, with_pip=True) subprocess.run( [venv_path / "bin" / "pip", "install", "-U", "pip", "setuptools"], check=True ) return venv_path def find_wheel(project_path: Path) -> Path: wheels = list(project_path.glob("dist/*.whl")) if len(wheels) != 1: raise Exception( f"Expected one wheel. Instead found: {wheels} " f"in project {project_path.absolute()}" ) return wheels[0] def install_wheel(venv_path: Path, wheel_path: Path) -> None: subprocess.run( [venv_path / "bin" / "pip", "install", f"{wheel_path}"], check=True, ) def test_install_local_wheel() -> None: with TemporaryDirectory() as tmpdir: venv_path = create_venv(Path(tmpdir)) wheel_path = find_wheel(Path(".")) install_wheel(venv_path, wheel_path) print("Installed", wheel_path.absolute(), "to", venv_path) print(f"Activate with `source {venv_path}/bin/activate`") input("Press enter when the test has completed. The directory will be deleted.") if __name__ == "__main__": test_install_local_wheel() py_ecc-8.0.0/setup.py000066400000000000000000000041351477723207700145160ustar00rootroot00000000000000#!/usr/bin/env python from setuptools import ( find_packages, setup, ) extras_require = { "dev": [ "build>=0.9.0", "bump_my_version>=0.19.0", "ipython", "mypy==1.10.0", "pre-commit>=3.4.0", "tox>=4.0.0", "twine", "wheel", ], "docs": [ "sphinx>=6.0.0", "sphinx-autobuild>=2021.3.14", "sphinx_rtd_theme>=1.0.0", "towncrier>=24,<25", ], "test": [ "pytest>=7.0.0", "pytest-xdist>=2.4.0", ], } extras_require["dev"] = ( extras_require["dev"] + extras_require["docs"] + extras_require["test"] ) with open("./README.md") as readme: long_description = readme.read() setup( name="py-ecc", # *IMPORTANT*: Don't manually change the version here. Use `make bump`, as described in readme version="8.0.0", description="""py-ecc: Elliptic curve crypto in python including secp256k1, alt_bn128, and bls12_381""", long_description=long_description, long_description_content_type="text/markdown", author="The Ethereum Foundation", author_email="snakecharmers@ethereum.org", url="https://github.com/ethereum/py_ecc", include_package_data=True, install_requires=[ "eth-typing>=3.0.0", "eth-utils>=2.0.0", ], python_requires=">=3.8, <4", extras_require=extras_require, py_modules=["py_ecc"], license="MIT", zip_safe=False, keywords="ethereum", packages=find_packages(exclude=["scripts", "scripts.*", "tests", "tests.*"]), package_data={"py_ecc": ["py.typed"]}, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ], ) py_ecc-8.0.0/tests/000077500000000000000000000000001477723207700141435ustar00rootroot00000000000000py_ecc-8.0.0/tests/bls/000077500000000000000000000000001477723207700147235ustar00rootroot00000000000000py_ecc-8.0.0/tests/bls/ciphersuites/000077500000000000000000000000001477723207700174325ustar00rootroot00000000000000py_ecc-8.0.0/tests/bls/ciphersuites/test_g2_basic.py000066400000000000000000000066231477723207700225230ustar00rootroot00000000000000import pytest from eth_utils import ( ValidationError, ) from py_ecc.bls import ( G2Basic, ) from py_ecc.bls.g2_primitives import ( G1_to_pubkey, G2_to_signature, ) from py_ecc.optimized_bls12_381 import ( Z1, Z2, ) Z1_PUBKEY = G1_to_pubkey(Z1) Z2_SIGNATURE = G2_to_signature(Z2) @pytest.mark.parametrize( "SKs,messages,result", [ (list(range(1, 11)), list(range(1, 11)), True), ( list(range(1, 4)), (b"42", b"69", b"42"), False, ), # Test duplicate messages fail ], ) def test_aggregate_verify(SKs, messages, result): PKs = [G2Basic.SkToPk(SK) for SK in SKs] messages = [bytes(msg) for msg in messages] signatures = [G2Basic.Sign(SK, msg) for SK, msg in zip(SKs, messages)] aggregate_signature = G2Basic.Aggregate(signatures) assert G2Basic.AggregateVerify(PKs, messages, aggregate_signature) == result @pytest.mark.parametrize( "privkey, success", [ (1, True), (0, False), ("hello", False), # wrong type ], ) def test_sk_to_pk(privkey, success): if success: G2Basic.SkToPk(privkey) else: with pytest.raises(ValidationError): G2Basic.SkToPk(privkey) @pytest.mark.parametrize( "privkey, message, success", [ (1, b"message", True), (0, b"message", False), ("hello", b"message", False), # wrong type privkey (1, 123, False), # wrong type message ], ) def test_sign(privkey, message, success): if success: G2Basic.Sign(privkey, message) else: with pytest.raises(ValidationError): G2Basic.Sign(privkey, message) @pytest.mark.parametrize( "signatures, success", [ ([G2Basic.Sign(1, b"helloworld")], True), ([G2Basic.Sign(1, b"helloworld"), G2Basic.Sign(2, b"helloworld")], True), ([Z2_SIGNATURE], True), (["hello"], False), ([], False), ], ) def test_aggregate(signatures, success): if success: G2Basic.Aggregate(signatures) else: with pytest.raises(ValidationError): G2Basic.Aggregate(signatures) SAMPLE_MESSAGE = b"helloworld" @pytest.mark.parametrize( "pubkey, message, signature, result", [ (G2Basic.SkToPk(1), SAMPLE_MESSAGE, G2Basic.Sign(1, SAMPLE_MESSAGE), True), (G2Basic.SkToPk(2), SAMPLE_MESSAGE, G2Basic.Sign(1, SAMPLE_MESSAGE), False), (G2Basic.SkToPk(1), SAMPLE_MESSAGE, Z2_SIGNATURE, False), (Z1_PUBKEY, SAMPLE_MESSAGE, G2Basic.Sign(1, SAMPLE_MESSAGE), False), (Z1_PUBKEY, SAMPLE_MESSAGE, Z2_SIGNATURE, False), ( b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # noqa: E501 SAMPLE_MESSAGE, b"\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", # noqa: E501 False, ), ], ) def test_verify(pubkey, message, signature, result): assert G2Basic.Verify(pubkey, message, signature) == result py_ecc-8.0.0/tests/bls/ciphersuites/test_g2_message_augmentation.py000066400000000000000000000017051477723207700256350ustar00rootroot00000000000000import pytest from py_ecc.bls import ( G2MessageAugmentation, ) @pytest.mark.parametrize( "privkey", [ (1), (5), (124), (735), (127409812145), (90768492698215092512159), ], ) def test_sign_verify(privkey): msg = str(privkey).encode("utf-8") pub = G2MessageAugmentation.SkToPk(privkey) sig = G2MessageAugmentation.Sign(privkey, msg) assert G2MessageAugmentation.Verify(pub, msg, sig) @pytest.mark.parametrize("SKs,messages", [(list(range(1, 11)), list(range(1, 11)))]) def test_aggregate_verify(SKs, messages): PKs = [G2MessageAugmentation.SkToPk(SK) for SK in SKs] messages = [bytes(msg) + PK for msg, PK in zip(messages, PKs)] signatures = [G2MessageAugmentation.Sign(SK, msg) for SK, msg in zip(SKs, messages)] aggregate_signature = G2MessageAugmentation.Aggregate(signatures) assert G2MessageAugmentation.AggregateVerify(PKs, messages, aggregate_signature) py_ecc-8.0.0/tests/bls/ciphersuites/test_g2_pop.py000066400000000000000000000067221477723207700222400ustar00rootroot00000000000000import pytest from py_ecc.bls import ( G2ProofOfPossession, ) from py_ecc.bls.g2_primitives import ( G1_to_pubkey, G2_to_signature, ) from py_ecc.optimized_bls12_381 import ( G1, Z1, Z2, multiply, ) sample_message = b"\x12" * 32 Z1_PUBKEY = G1_to_pubkey(Z1) Z2_SIGNATURE = G2_to_signature(Z2) def compute_aggregate_signature(SKs, message): PKs = [G2ProofOfPossession.SkToPk(sk) for sk in SKs] signatures = [G2ProofOfPossession.Sign(sk, message) for sk in SKs] aggregate_signature = G2ProofOfPossession.Aggregate(signatures) return (PKs, aggregate_signature) @pytest.mark.parametrize( "sk", [ 42, 69, 31415926, ], ) def test_pop(sk): pk = G2ProofOfPossession.SkToPk(sk) proof = G2ProofOfPossession.PopProve(sk) assert G2ProofOfPossession.PopVerify(pk, proof) @pytest.mark.parametrize( "signature_points,result_point", [ ([multiply(G1, 2), multiply(G1, 3)], multiply(G1, 2 + 3)), ([multiply(G1, 42), multiply(G1, 69)], multiply(G1, 42 + 69)), ], ) def test_aggregate_pks(signature_points, result_point): signatures = [G1_to_pubkey(pt) for pt in signature_points] result_signature = G1_to_pubkey(result_point) assert G2ProofOfPossession._AggregatePKs(signatures) == result_signature @pytest.mark.parametrize( "PK, message, signature, result", [ ( G2ProofOfPossession.SkToPk(1), sample_message, G2ProofOfPossession.Sign(1, sample_message), True, ), (None, sample_message, Z2_SIGNATURE, False), # wrong type (Z1_PUBKEY, sample_message, Z2_SIGNATURE, False), ], ) def test_verify(PK, message, signature, result): assert G2ProofOfPossession.Verify(PK, message, signature) == result @pytest.mark.parametrize( "PKs, aggregate_signature, message, result", [ ( *compute_aggregate_signature(SKs=[1], message=sample_message), sample_message, True, ), ( *compute_aggregate_signature( SKs=tuple(range(1, 5)), message=sample_message ), sample_message, True, ), ([], Z2_SIGNATURE, sample_message, False), ( [G2ProofOfPossession.SkToPk(1), Z1_PUBKEY], G2ProofOfPossession.Sign(1, sample_message), sample_message, False, ), ], ) def test_aggregate_verify(PKs, aggregate_signature, message, result): assert ( G2ProofOfPossession.AggregateVerify( PKs, (message,) * len(PKs), aggregate_signature ) == result ) @pytest.mark.parametrize( "PKs, aggregate_signature, message, result", [ ( *compute_aggregate_signature(SKs=[1], message=sample_message), sample_message, True, ), ( *compute_aggregate_signature( SKs=tuple(range(1, 5)), message=sample_message ), sample_message, True, ), ([], Z2_SIGNATURE, sample_message, False), ( [G2ProofOfPossession.SkToPk(1), Z1_PUBKEY], G2ProofOfPossession.Sign(1, sample_message), sample_message, False, ), ], ) def test_fast_aggregate_verify(PKs, aggregate_signature, message, result): assert ( G2ProofOfPossession.FastAggregateVerify(PKs, message, aggregate_signature) == result ) py_ecc-8.0.0/tests/bls/test_expand_message_xmd.py000066400000000000000000000113351477723207700221720ustar00rootroot00000000000000import pytest from hashlib import ( sha256, ) from py_ecc.bls.hash import ( expand_message_xmd, ) # The test vectors from # https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#appendix-I.1 DST = b"QUUX-V01-CS02-with-expander" @pytest.mark.parametrize( "msg, len_in_bytes, uniform_bytes", [ ( b"", 0x20, bytes.fromhex( "f659819a6473c1835b25ea59e3d38914c98b374f0970b7e4c92181df928fca88" ), ), ( b"abc", 0x20, bytes.fromhex( "1c38f7c211ef233367b2420d04798fa4698080a8901021a795a1151775fe4da7" ), ), ( b"abcdef0123456789", 0x20, bytes.fromhex( "8f7e7b66791f0da0dbb5ec7c22ec637f79758c0a48170bfb7c4611bd304ece89" ), ), ( b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", # noqa: E501 0x20, bytes.fromhex( "72d5aa5ec810370d1f0013c0df2f1d65699494ee2a39f72e1716b1b964e1c642" ), ), ( b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # noqa: E501 0x20, bytes.fromhex( "3b8e704fc48336aca4c2a12195b720882f2162a4b7b13a9c350db46f429b771b" ), ), ( b"", 0x80, bytes.fromhex( "8bcffd1a3cae24cf9cd7ab85628fd111bb17e3739d3b53f89580d217aa79526f1708354a76a402d3569d6a9d19ef3de4d0b991e4f54b9f20dcde9b95a66824cbdf6c1a963a1913d43fd7ac443a02fc5d9d8d77e2071b86ab114a9f34150954a7531da568a1ea8c760861c0cde2005afc2c114042ee7b5848f5303f0611cf297f" # noqa: E501 ), ), ( b"abc", 0x80, bytes.fromhex( "fe994ec51bdaa821598047b3121c149b364b178606d5e72bfbb713933acc29c186f316baecf7ea22212f2496ef3f785a27e84a40d8b299cec56032763eceeff4c61bd1fe65ed81decafff4a31d0198619c0aa0c6c51fca15520789925e813dcfd318b542f8799441271f4db9ee3b8092a7a2e8d5b75b73e28fb1ab6b4573c192" # noqa: E501 ), ), ( b"abcdef0123456789", 0x80, bytes.fromhex( "c9ec7941811b1e19ce98e21db28d22259354d4d0643e301175e2f474e030d32694e9dd5520dde93f3600d8edad94e5c364903088a7228cc9eff685d7eaac50d5a5a8229d083b51de4ccc3733917f4b9535a819b445814890b7029b5de805bf62b33a4dc7e24acdf2c924e9fe50d55a6b832c8c84c7f82474b34e48c6d43867be" # noqa: E501 ), ), ( b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", # noqa: E501 0x80, bytes.fromhex( "48e256ddba722053ba462b2b93351fc966026e6d6db493189798181c5f3feea377b5a6f1d8368d7453faef715f9aecb078cd402cbd548c0e179c4ed1e4c7e5b048e0a39d31817b5b24f50db58bb3720fe96ba53db947842120a068816ac05c159bb5266c63658b4f000cbf87b1209a225def8ef1dca917bcda79a1e42acd8069" # noqa: E501 ), ), ( b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # noqa: E501 0x80, bytes.fromhex( "396962db47f749ec3b5042ce2452b619607f27fd3939ece2746a7614fb83a1d097f554df3927b084e55de92c7871430d6b95c2a13896d8a33bc48587b1f66d21b128a1a8240d5b0c26dfe795a1a842a0807bb148b77c2ef82ed4b6c9f7fcb732e7f94466c8b51e52bf378fba044a31f5cb44583a892f5969dcd73b3fa128816e" # noqa: E501 ), ), ], ) def test_expand_message_xmd_sha256(msg, len_in_bytes, uniform_bytes): assert ( expand_message_xmd( msg=msg, DST=DST, len_in_bytes=len_in_bytes, hash_function=sha256, ) == uniform_bytes ) py_ecc-8.0.0/tests/bls/test_g2_core.py000066400000000000000000000033241477723207700176560ustar00rootroot00000000000000import pytest from py_ecc.bls import ( G2Basic, ) from py_ecc.bls.g2_primitives import ( G2_to_signature, ) from py_ecc.optimized_bls12_381 import ( G2, multiply, ) @pytest.mark.parametrize( "pubkey,success", [ (G2Basic.SkToPk(42), True), (b"\x11" * 48, False), ], ) def test_key_validate(pubkey, success): assert G2Basic.KeyValidate(pubkey) == success @pytest.mark.parametrize( "privkey", [ (1), (5), (124), (735), (127409812145), (90768492698215092512159), ], ) def test_sign_verify(privkey): msg = str(privkey).encode("utf-8") pub = G2Basic.SkToPk(privkey) sig = G2Basic._CoreSign(privkey, msg, G2Basic.DST) assert G2Basic._CoreVerify(pub, msg, sig, G2Basic.DST) @pytest.mark.parametrize( "signature_points,result_point", [ ([multiply(G2, 2), multiply(G2, 3)], multiply(G2, 2 + 3)), ([multiply(G2, 42), multiply(G2, 69)], multiply(G2, 42 + 69)), ], ) def test_aggregate(signature_points, result_point): signatures = [G2_to_signature(pt) for pt in signature_points] result_signature = G2_to_signature(result_point) assert G2Basic.Aggregate(signatures) == result_signature @pytest.mark.parametrize( "SKs,messages", [ (list(range(1, 6)), list(range(1, 6))), ], ) def test_core_aggregate_verify(SKs, messages): PKs = [G2Basic.SkToPk(sk) for sk in SKs] messages = [bytes(msg) for msg in messages] signatures = [ G2Basic._CoreSign(sk, msg, G2Basic.DST) for sk, msg in zip(SKs, messages) ] aggregate_signature = G2Basic.Aggregate(signatures) assert G2Basic._CoreAggregateVerify(PKs, messages, aggregate_signature, G2Basic.DST) py_ecc-8.0.0/tests/bls/test_g2_primatives.py000066400000000000000000000014041477723207700211060ustar00rootroot00000000000000import pytest from py_ecc.bls.g2_primitives import ( G1_to_pubkey, G2_to_signature, pubkey_to_G1, signature_to_G2, ) from py_ecc.optimized_bls12_381 import ( G1, G2, multiply, normalize, ) def test_decompress_G2_with_no_modular_square_root_found(): with pytest.raises(ValueError, match="Failed to find a modular squareroot"): signature_to_G2(b"\xA0" + b"\x11" * 95) def test_G2_signature_encode_decode(): G2_point = multiply(G2, 42) signature = G2_to_signature(G2_point) assert normalize(signature_to_G2(signature)) == normalize(G2_point) def test_G1_pubkey_encode_decode(): G1_point = multiply(G1, 42) pubkey = G1_to_pubkey(G1_point) assert normalize(pubkey_to_G1(pubkey)) == normalize(G1_point) py_ecc-8.0.0/tests/bls/test_hash_to_curve.py000066400000000000000000000346731477723207700212020ustar00rootroot00000000000000""" These are temporary tests to check the functionality of helper functions in `hash_to_G2` They should be removed and replaced with a final version when hash to curve is complete. """ import pytest from hashlib import ( sha256, ) from py_ecc.bls.hash_to_curve import ( hash_to_G1, hash_to_G2, ) from py_ecc.fields import ( optimized_bls12_381_FQ as FQ, optimized_bls12_381_FQ2 as FQ2, optimized_bls12_381_FQ12 as FQ12, optimized_bls12_381_FQP as FQP, ) from py_ecc.optimized_bls12_381 import ( b, b2, is_on_curve, iso_map_G2, ) DST_G1 = b"QUUX-V01-CS02-with-BLS12381G1_XMD:SHA-256_SSWU_RO_" DST_G2 = b"QUUX-V01-CS02-with-BLS12381G2_XMD:SHA-256_SSWU_RO_" @pytest.mark.parametrize( "iso_x,iso_y,iso_z,g2_x,g2_y", [ ( FQ2( [ int( "0888F3832AD680917A71A1816C939290473474982C647B0B196BA0EDF62A0BC1A15D3E87CF6A287137B16C057E1AC808", # noqa: E501 16, ), int( "0B3D6E7A20275C100B460A900B23F2D8D5E9A53C3E59066E8D968D07AB0787940C0AC8A6C8C118FAD9068A2ECF00ADD7", # noqa: E501 16, ), ] ), # Iso-x0 FQ2( [ int( "08696DF8BAF8C488B7CFCA14CB984D0B78C998C3431E41700B493AAF921F779AA7F3660B1F5D6AC3BA4EBC85A1132CF3", # noqa: E501 16, ), int( "053003D3ED23019E585CF255A58634CEDA4C362B2E1D75E2AE85F4D1EF9C400786256D4AEE443DD1C900DD72E4089F73", # noqa: E501 16, ), ] ), # Iso-y0 FQ2( [ int( "108F7DF15439154BF32D7E4D1B6FEFC4BEF7C39A16AACA469D249770AD7B9F4AD3EA3CE58333A3194177C2D14B5CD2BC", # noqa: E501 16, ), int( "09E2E891E7A7AB58D5BF93864000ADBF0B6C31A8E35AB6AEC3B0820C2E536D6F0D170840B0AAFB470A9FD9B2F7DE3C27", # noqa: E501 16, ), ] ), # Iso-z0 FQ2( [ int( "168A912067A8F06CEB1F5F59DCEC69CE47F5A2B1696DFD5E67F1CF675587AD3A19831842D2543957BEE44FE29592996E", # noqa: E501 16, ), int( "116F36861307AA38251CAA73AA44FA359732DD92A15CDC70B21E3F7B2A332F73F86801789C469FE3FBB24DEB18AD5F0C", # noqa: E501 16, ), ] ), # G2-x0 FQ2( [ int( "0D4976CD99F4AD7204BC5983F6CE590766852DB93E5BE6CAB4C28591013E132BC6100D42022D5B66CE68A64A6B2A9C24", # noqa: E501 16, ), int( "0C6BA0E076144119F2B272718EC04C3FB037C9AA2C4074E64BE233AB27C0397BE175B9FDA277DCE8841669F787161AD2", # noqa: E501 16, ), ] ), ), # G2-y0 ( FQ2( [ int( "039C33A34D97134F01D334F13C76BD5BB803B853BE4221A826026BFC93B5CA39E74B51A15D00BF88DF4F655915553027", # noqa: E501 16, ), int( "08DA2162E554A644AECC1F904F2B140D0296B7AC85B4EE59313DCEDE58B375C2E677160BC97CF8114361ABBE7D4672CD", # noqa: E501 16, ), ] ), # Iso-x1 FQ2( [ int( "1201968136C60428FB9DF8004C4915DC5E502D20D32F9DD87BC38163A52E2729289490030235E61EAEA098B0E8D63BF8", # noqa: E501 16, ), int( "116524863E40B6437BBAB965CDB84614F2346F1AD40300E9B15C3BDDE498E1FC1F76346452D3CF25553E2A3B89D9C5B1", # noqa: E501 16, ), ] ), # Iso-y1 FQ2( [ int( "08C3BCEBE1FC7F9987AE406A78C3FC898AE0C8A2FF0139A523E3CE91263EAA617519FC1A1158AF39BBA705316C9C2678", # noqa: E501 16, ), int( "0C9E92BB5509704DA0B6825A3AA36BA68A877875258F17C315FEA1527A82C7975E8439E91644616DABFD28E1DB43C1D9", # noqa: E501 16, ), ] ), # Iso-z1 FQ2( [ int( "1990072F0029639467E5C5EF9F65B31F194C31586D56141A7906DE6EE2B40803E06A301F9EEE9C8B04FA6AF8C5950F64", # noqa: E501 16, ), int( "0910709BEC8515357CB68AE88EA0B7EC6D54190773CC82EDDA68180D62BA214737DC708A5DA815E8B872D3C5B31E5A00", # noqa: E501 16, ), ] ), # G2-x1 FQ2( [ int( "12416C8B9159A047D5F92A6A4E941156E29E2A489B671D2FC3D8ED60FFA5F53FE846ECFB0090211197EF3BA4C07424F9", # noqa: E501 16, ), int( "089977D619CEA9D6D11F7148E1CB7622E46153BF1B4D81944603AA72AEFA6CE7CF07550CB6B582D17440F5949D1214FA", # noqa: E501 16, ), ] ), ), # G2-y1 ], ) def test_iso_map_G2(iso_x, iso_y, iso_z, g2_x, g2_y): (result_x, result_y, result_z) = iso_map_G2(iso_x, iso_y, iso_z) result_x = result_x / result_z result_y = result_y / result_z assert g2_x == result_x assert g2_y == result_y # Tests taken from: https://github.com/cfrg/draft-irtf-cfrg-hash-to-curve/blob/master/draft-irtf-cfrg-hash-to-curve.md#bls12381g2_xmdsha-256_sswu_ro_ # blocklint: URL pragma # noqa: E501 @pytest.mark.parametrize("H", [sha256]) @pytest.mark.parametrize( "msg,x,y", [ ( b"", FQ2( [ 0x0141EBFBDCA40EB85B87142E130AB689C673CF60F1A3E98D69335266F30D9B8D4AC44C1038E9DCDD5393FAF5C41FB78A, # noqa: E501 0x05CB8437535E20ECFFAEF7752BADDF98034139C38452458BAEEFAB379BA13DFF5BF5DD71B72418717047F5B0F37DA03D, # noqa: E501 ] ), FQ2( [ 0x0503921D7F6A12805E72940B963C0CF3471C7B2A524950CA195D11062EE75EC076DAF2D4BC358C4B190C0C98064FDD92, # noqa: E501 0x12424AC32561493F3FE3C260708A12B7C620E7BE00099A974E259DDC7D1F6395C3C811CDD19F1E8DBF3E9ECFDCBAB8D6, # noqa: E501 ] ), ), ( b"abc", FQ2( [ 0x02C2D18E033B960562AAE3CAB37A27CE00D80CCD5BA4B7FE0E7A210245129DBEC7780CCC7954725F4168AFF2787776E6, # noqa: E501 0x139CDDBCCDC5E91B9623EFD38C49F81A6F83F175E80B06FC374DE9EB4B41DFE4CA3A230ED250FBE3A2ACF73A41177FD8, # noqa: E501 ] ), FQ2( [ 0x1787327B68159716A37440985269CF584BCB1E621D3A7202BE6EA05C4CFE244AEB197642555A0645FB87BF7466B2BA48, # noqa: E501 0x00AA65DAE3C8D732D10ECD2C50F8A1BAF3001578F71C694E03866E9F3D49AC1E1CE70DD94A733534F106D4CEC0EDDD16, # noqa: E501 ] ), ), ( b"abcdef0123456789", FQ2( [ 0x121982811D2491FDE9BA7ED31EF9CA474F0E1501297F68C298E9F4C0028ADD35AEA8BB83D53C08CFC007C1E005723CD0, # noqa: E501 0x190D119345B94FBD15497BCBA94ECF7DB2CBFD1E1FE7DA034D26CBBA169FB3968288B3FAFB265F9EBD380512A71C3F2C, # noqa: E501 ] ), FQ2( [ 0x05571A0F8D3C08D094576981F4A3B8EDA0A8E771FCDCC8ECCEAF1356A6ACF17574518ACB506E435B639353C2E14827C8, # noqa: E501 0x0BB5E7572275C567462D91807DE765611490205A941A5A6AF3B1691BFE596C31225D3AABDF15FAFF860CB4EF17C7C3BE, # noqa: E501 ] ), ), ( b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", # noqa: E501 FQ2( [ 0x19A84DD7248A1066F737CC34502EE5555BD3C19F2ECDB3C7D9E24DC65D4E25E50D83F0F77105E955D78F4762D33C17DA, # noqa: E501 0x0934ABA516A52D8AE479939A91998299C76D39CC0C035CD18813BEC433F587E2D7A4FEF038260EEF0CEF4D02AAE3EB91, # noqa: E501 ] ), FQ2( [ 0x14F81CD421617428BC3B9FE25AFBB751D934A00493524BC4E065635B0555084DD54679DF1536101B2C979C0152D09192, # noqa: E501 0x09BCCCFA036B4847C9950780733633F13619994394C23FF0B32FA6B795844F4A0673E20282D07BC69641CEE04F5E5662, # noqa: E501 ] ), ), ( b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # noqa: E501 FQ2( [ 0x01A6BA2F9A11FA5598B2D8ACE0FBE0A0EACB65DECEB476FBBCB64FD24557C2F4B18ECFC5663E54AE16A84F5AB7F62534, # noqa: E501 0x11FCA2FF525572795A801EED17EB12785887C7B63FB77A42BE46CE4A34131D71F7A73E95FEE3F812AEA3DE78B4D01569, # noqa: E501 ] ), FQ2( [ 0x0B6798718C8AED24BC19CB27F866F1C9EFFCDBF92397AD6448B5C9DB90D2B9DA6CBABF48ADC1ADF59A1A28344E79D57E, # noqa: E501 0x03A47F8E6D1763BA0CAD63D6114C0ACCBEF65707825A511B251A660A9B3994249AE4E63FAC38B23DA0C398689EE2AB52, # noqa: E501 ] ), ), ], ) def test_hash_to_G2(msg, x, y, H): point = hash_to_G2(msg, DST_G2, H) assert is_on_curve(point, b2) # Affine result_x = point[0] / point[2] # X / Z result_y = point[1] / point[2] # Y / Z assert x == result_x assert y == result_y @pytest.mark.parametrize( "degree", [ (1), (2), (12), ], ) @pytest.mark.parametrize( "value, expected", [ (0x00, 0), (0x10, 0), (0x01, 1), ], ) def test_FQ_sgn0(degree, value, expected): if degree == 1: x = FQ(value) elif degree == 2: x = FQ2([value, 0]) elif degree == 12: x = FQ12([value] + [0] * 11) assert x.sgn0 == expected if value != 0: assert x.sgn0 != (-x).sgn0 @pytest.mark.parametrize( "value, expected", [ ([0x00, 0x00], 0), ([0x10, 0x00], 0), ([0x01, 0x00], 1), ([0x01, 0x01], 1), ([0x10, 0x10], 0), ], ) def test_FQ2_sgn0(value, expected): x = FQ2(value) y = FQP(value, modulus_coeffs=FQ2.FQ2_MODULUS_COEFFS) assert x.sgn0 == y.sgn0 == expected # --- G1 --- # https://github.com/cfrg/draft-irtf-cfrg-hash-to-curve/blob/main/draft-irtf-cfrg-hash-to-curve.md#bls12381g1_xmdsha-256_sswu_ro_ # noqa: E501 @pytest.mark.parametrize("H", [sha256]) @pytest.mark.parametrize( "msg,x,y", [ ( b"", FQ( 0x052926ADD2207B76CA4FA57A8734416C8DC95E24501772C814278700EED6D1E4E8CF62D9C09DB0FAC349612B759E79A1 # noqa: E501 ), FQ( 0x8BA738453BFED09CB546DBB0783DBB3A5F1F566ED67BB6BE0E8C67E2E81A4CC68EE29813BB7994998F3EAE0C9C6A265 # noqa: E501 ), ), ( b"abc", FQ( 0x03567BC5EF9C690C2AB2ECDF6A96EF1C139CC0B2F284DCA0A9A7943388A49A3AEE664BA5379A7655D3C68900BE2F6903 # noqa: E501 ), FQ( 0x0B9C15F3FE6E5CF4211F346271D7B01C8F3B28BE689C8429C85B67AF215533311F0B8DFAAA154FA6B88176C229F2885D # noqa: E501 ), ), ( b"abcdef0123456789", FQ( 0x11E0B079DEA29A68F0383EE94FED1B940995272407E3BB916BBF268C263DDD57A6A27200A784CBC248E84F357CE82D98 # noqa: E501 ), FQ( 0x03A87AE2CAF14E8EE52E51FA2ED8EEFE80F02457004BA4D486D6AA1F517C0889501DC7413753F9599B099EBCBBD2D709 # noqa: E501 ), ), ( b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", # noqa: E501 FQ( 0x15F68EAA693B95CCB85215DC65FA81038D69629F70AEEE0D0F677CF22285E7BF58D7CB86EEFE8F2E9BC3F8CB84FAC488 # noqa: E501 ), FQ( 0x1807A1D50C29F430B8CAFC4F8638DFEEADF51211E1602A5F184443076715F91BB90A48BA1E370EDCE6AE1062F5E6DD38 # noqa: E501 ), ), ( b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # noqa: E501 FQ( 0x082AABAE8B7DEDB0E78AEB619AD3BFD9277A2F77BA7FAD20EF6AABDC6C31D19BA5A6D12283553294C1825C4B3CA2DCFE # noqa: E501 ), FQ( 0x05B84AE5A942248EEA39E1D91030458C40153F3B654AB7872D779AD1E942856A20C438E8D99BC8ABFBF74729CE1F7AC8 # noqa: E501 ), ), ], ) def test_hash_to_G1(msg, x, y, H): point = hash_to_G1(msg, DST_G1, H) assert is_on_curve(point, b) # Affine result_x = point[0] / point[2] # X / Z result_y = point[1] / point[2] # Y / Z assert x == result_x assert y == result_y py_ecc-8.0.0/tests/bls/test_hkdf.py000066400000000000000000000047011477723207700172520ustar00rootroot00000000000000import pytest from py_ecc.bls.hash import ( hkdf_expand, hkdf_extract, ) # From https://tools.ietf.org/html/rfc5869 @pytest.mark.parametrize( "salt,ikm,prk", [ ( "000102030405060708090a0b0c", "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", "077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5", ), ( "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf", # noqa: E501 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f", # noqa: E501 "06a6b88c5853361a06104c9ceb35b45cef760014904671014a193f40c15fc244", ), ( "", "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", "19ef24a32c717b167f33a91d6f648bdf96596776afdb6377ac434c1c293ccb04", ), ], ) def test_hkdf_extract(salt, ikm, prk): result = hkdf_extract(bytes.fromhex(salt), bytes.fromhex(ikm)) prk = bytes.fromhex(prk) assert prk == result # From https://tools.ietf.org/html/rfc5869 @pytest.mark.parametrize( "prk,info,length,okm", [ ( "077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5", "f0f1f2f3f4f5f6f7f8f9", 42, "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865", # noqa: E501 ), ( "06a6b88c5853361a06104c9ceb35b45cef760014904671014a193f40c15fc244", "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", # noqa: E501 82, "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87", # noqa: E501 ), ( "19ef24a32c717b167f33a91d6f648bdf96596776afdb6377ac434c1c293ccb04", "", 42, "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8", # noqa: E501 ), ], ) def test_hkdf_expand(prk, info, length, okm): result = hkdf_expand(bytes.fromhex(prk), bytes.fromhex(info), length) assert bytes.fromhex(okm) == result py_ecc-8.0.0/tests/bls/test_point_compression.py000066400000000000000000000131131477723207700221050ustar00rootroot00000000000000import pytest from py_ecc.bls.constants import ( POW_2_381, POW_2_382, POW_2_383, POW_2_384, ) from py_ecc.bls.point_compression import ( compress_G1, compress_G2, decompress_G1, decompress_G2, ) from py_ecc.fields import ( optimized_bls12_381_FQ as FQ, optimized_bls12_381_FQ2 as FQ2, ) from py_ecc.optimized_bls12_381 import ( G1, G2, Z1, Z2, b, field_modulus as q, is_on_curve, multiply, normalize, ) @pytest.mark.parametrize( "pt,on_curve,is_infinity", [ # On curve points (G1, True, False), (multiply(G1, 5), True, False), # Infinity point but still on curve (Z1, True, True), # Not on curve ((FQ(5566), FQ(5566), FQ.one()), False, None), ], ) def test_G1_compress_and_decompress_flags(pt, on_curve, is_infinity): assert on_curve == is_on_curve(pt, b) z = compress_G1(pt) if on_curve: x = z % POW_2_381 c_flag = (z % POW_2_384) // POW_2_383 b_flag = (z % POW_2_383) // POW_2_382 a_flag = (z % POW_2_382) // POW_2_381 assert x < q assert c_flag == 1 if is_infinity: assert b_flag == 1 assert a_flag == x == 0 else: assert b_flag == 0 pt_x, pt_y = normalize(pt) assert a_flag == (pt_y.n * 2) // q assert x == pt_x.n # Correct flags should decompress correct x, y assert normalize(decompress_G1(z)) == normalize(pt) else: with pytest.raises(ValueError): decompress_G1(z) compressed_g1 = compress_G1(G1) compressed_z1 = compress_G1(Z1) @pytest.mark.parametrize( "z, error_message", [ (compressed_g1, None), # baseline (compressed_g1 & ~(1 << 383), "c_flag should be 1"), # set c_flag to 0 (compressed_g1 | (1 << 382), "b_flag should be 0"), # set b_flag to 1 (compressed_z1 & ~(1 << 382), "b_flag should be 1"), # set b_flag to 0 ( compressed_z1 | (1 << 381), "a point at infinity should have a_flag == 0", ), # set a_flag to 1 ( q | (1 << 383), "Point value should be less than field modulus.", ), # field modulus and c_flag ], ) def test_decompress_G1_edge_case(z, error_message): if error_message is None: decompress_G1(z) else: with pytest.raises(ValueError, match=error_message): decompress_G1(z) @pytest.mark.parametrize( "pt,on_curve,is_infinity", [ # On curve points (G2, True, False), (multiply(G2, 5), True, False), # Infinity point but still on curve (Z2, True, True), # Not on curve ((FQ2([5566, 5566]), FQ2([5566, 5566]), FQ2.one()), False, None), ], ) def test_G2_compress_and_decompress_flags(pt, on_curve, is_infinity): if on_curve: z1, z2 = compress_G2(pt) x1 = z1 % POW_2_381 c_flag1 = (z1 % POW_2_384) // POW_2_383 b_flag1 = (z1 % POW_2_383) // POW_2_382 a_flag1 = (z1 % POW_2_382) // POW_2_381 x2 = z2 % POW_2_381 c_flag2 = (z2 % POW_2_384) // POW_2_383 b_flag2 = (z2 % POW_2_383) // POW_2_382 a_flag2 = (z2 % POW_2_382) // POW_2_381 assert x1 < q assert x2 < q assert c_flag2 == b_flag2 == a_flag2 == 0 assert c_flag1 == 1 if is_infinity: assert b_flag1 == 1 assert a_flag1 == x1 == x2 == 0 else: assert b_flag1 == 0 _, y = normalize(pt) _, y_im = y.coeffs # TODO: need a case for y_im == 0 assert a_flag1 == (y_im * 2) // q # Correct flags should decompress correct x, y assert normalize(decompress_G2((z1, z2))) == normalize(pt) else: with pytest.raises(ValueError): compress_G2(pt) compressed_g2 = compress_G2(G2) compressed_z2 = compress_G2(Z2) @pytest.mark.parametrize( "z, error_message", [ (compressed_g2, None), # baseline ( (compressed_g2[0] & ~(1 << 383), compressed_g2[1]), "c_flag should be 1", ), # set c_flag1 to 0 ( (compressed_g2[0] | (1 << 382), compressed_g2[1]), "b_flag should be 0", ), # set b_flag1 to 1 ( (compressed_z2[0] & ~(1 << 382), compressed_z2[1]), "b_flag should be 1", ), # set b_flag1 to 0 ( (q | (1 << 383), compressed_z2[1]), "x1 value should be less than field modulus.", ), # x1 == q ( (compressed_z2[0] | (1 << 381), compressed_z2[1]), "a point at infinity should have a_flag == 0", ), # set a_flag1 to 1 ( (compressed_g2[0], compressed_z2[1] | (1 << 383)), "z2 point value should be less than field modulus.", ), # set c_flag2 to 1 ( (compressed_g2[0], compressed_z2[1] | (1 << 382)), "z2 point value should be less than field modulus.", ), # set b_flag2 to 1 ( (compressed_g2[0], compressed_z2[1] | (1 << 381)), "z2 point value should be less than field modulus.", ), # set a_flag2 to 1 ( (compressed_g2[0], compressed_g2[1] + q), "z2 point value should be less than field modulus.", ), # z2 value >= field modulus ], ) def test_decompress_G2_edge_case(z, error_message): if error_message is None: decompress_G2(z) else: with pytest.raises(ValueError, match=error_message): decompress_G2(z) py_ecc-8.0.0/tests/core/000077500000000000000000000000001477723207700150735ustar00rootroot00000000000000py_ecc-8.0.0/tests/core/test_backward_compatibility.py000066400000000000000000000015301477723207700232120ustar00rootroot00000000000000def test_backward_compatibility_imports(): from py_ecc.bls12_381 import ( # noqa: F401 FQ, FQ2, FQ12, FQP, field_modulus, ) from py_ecc.bn128 import ( # noqa: F401 FQ, FQ2, FQ12, FQP, field_modulus, ) from py_ecc.optimized_bls12_381 import ( # noqa: F401 FQ, FQ2, FQ12, FQP, field_modulus, ) from py_ecc.optimized_bn128 import ( # noqa: F401 FQ, FQ2, FQ12, FQP, field_modulus, ) def test_backward_compatibility_py_evm(): from py_ecc import ( optimized_bn128 as bn128, ) from py_ecc.optimized_bn128 import ( # noqa: F401 FQ2, FQP, ) FQ = bn128.FQ p1 = (FQ(0), FQ(0), FQ(1)) bn128.is_on_curve(p1, bn128.b) py_ecc-8.0.0/tests/core/test_bn128_and_bls12_381.py000066400000000000000000000254441477723207700215670ustar00rootroot00000000000000import pytest from py_ecc import ( bls12_381, bn128, optimized_bls12_381, optimized_bn128, ) from py_ecc.fields import ( bls12_381_FQ, bls12_381_FQ2, bls12_381_FQ12, bn128_FQ, bn128_FQ2, bn128_FQ12, optimized_bls12_381_FQ, optimized_bls12_381_FQ2, optimized_bls12_381_FQ12, optimized_bn128_FQ, optimized_bn128_FQ2, optimized_bn128_FQ12, ) from py_ecc.fields.field_properties import ( field_properties, ) @pytest.fixture(params=[bn128, optimized_bn128, bls12_381, optimized_bls12_381]) def lib(request): return request.param @pytest.fixture def FQ(lib): if lib == bn128: return bn128_FQ elif lib == optimized_bn128: return optimized_bn128_FQ elif lib == bls12_381: return bls12_381_FQ elif lib == optimized_bls12_381: return optimized_bls12_381_FQ else: raise Exception("Library Not Found") @pytest.fixture def FQ2(lib): if lib == bn128: return bn128_FQ2 elif lib == optimized_bn128: return optimized_bn128_FQ2 elif lib == bls12_381: return bls12_381_FQ2 elif lib == optimized_bls12_381: return optimized_bls12_381_FQ2 else: raise Exception("Library Not Found") @pytest.fixture def FQ12(lib): if lib == bn128: return bn128_FQ12 elif lib == optimized_bn128: return optimized_bn128_FQ12 elif lib == bls12_381: return bls12_381_FQ12 elif lib == optimized_bls12_381: return optimized_bls12_381_FQ12 else: raise Exception("Library Not Found") @pytest.fixture def field_modulus(lib): if lib == bn128 or lib == optimized_bn128: return field_properties["bn128"]["field_modulus"] elif lib == bls12_381 or lib == optimized_bls12_381: return field_properties["bls12_381"]["field_modulus"] else: raise Exception("Library Not Found") @pytest.fixture def G1(lib): return lib.G1 @pytest.fixture def G2(lib): return lib.G2 @pytest.fixture def G12(lib): return lib.G12 @pytest.fixture def Z1(lib): return lib.Z1 @pytest.fixture def Z2(lib): return lib.Z2 @pytest.fixture def b(lib): return lib.b @pytest.fixture def b2(lib): return lib.b2 @pytest.fixture def b12(lib): return lib.b12 @pytest.fixture def is_inf(lib): return lib.is_inf @pytest.fixture def is_on_curve(lib): return lib.is_on_curve @pytest.fixture def eq(lib): return lib.eq @pytest.fixture def add(lib): return lib.add @pytest.fixture def double(lib): return lib.double @pytest.fixture def curve_order(lib): return lib.curve_order @pytest.fixture def multiply(lib): return lib.multiply @pytest.fixture def pairing(lib): return lib.pairing @pytest.fixture def neg(lib): return lib.neg @pytest.fixture def twist(lib): return lib.twist def test_FQ_object(FQ, field_modulus): assert FQ(2) * FQ(2) == FQ(4) assert FQ(2) / FQ(7) + FQ(9) / FQ(7) == FQ(11) / FQ(7) assert FQ(2) * FQ(7) + FQ(9) * FQ(7) == FQ(11) * FQ(7) assert FQ(9) ** field_modulus == FQ(9) assert FQ(-1).n > 0 def test_FQ2_object(FQ2, field_modulus): x = FQ2([1, 0]) f = FQ2([1, 2]) fpx = FQ2([2, 2]) one = FQ2.one() z1, z2 = FQ2([-1, -1]).coeffs assert x + f == fpx assert f / f == one assert one / f + x / f == (one + x) / f assert one * f + x * f == (one + x) * f assert x ** (field_modulus**2 - 1) == one if isinstance(z1, int): assert z1 > 0 assert z2 > 0 else: assert z1.n > 0 assert z2.n > 0 def test_FQ12_object(FQ12, field_modulus): x = FQ12([1] + [0] * 11) f = FQ12([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) fpx = FQ12([2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) one = FQ12.one() zs = FQ12([-1] * 12).coeffs assert x + f == fpx assert f / f == one assert one / f + x / f == (one + x) / f assert one * f + x * f == (one + x) * f if isinstance(zs[0], int): assert all(z > 0 for z in zs) else: assert all(z.n > 0 for z in zs) # This check takes too long # assert x ** (field_modulus ** 12 - 1) == one def test_G1_object(G1, eq, double, add, multiply, curve_order, is_inf): assert eq(add(add(double(G1), G1), G1), double(double(G1))) assert not eq(double(G1), G1) assert eq( add(multiply(G1, 9), multiply(G1, 5)), add(multiply(G1, 12), multiply(G1, 2)) ) assert is_inf(multiply(G1, curve_order)) def test_G2_object( G2, b2, eq, add, double, multiply, is_inf, curve_order, field_modulus, is_on_curve ): assert eq(add(add(double(G2), G2), G2), double(double(G2))) assert not eq(double(G2), G2) assert eq( add(multiply(G2, 9), multiply(G2, 5)), add(multiply(G2, 12), multiply(G2, 2)) ) assert is_inf(multiply(G2, curve_order)) assert not is_inf(multiply(G2, 2 * field_modulus - curve_order)) assert is_on_curve(multiply(G2, 9), b2) def test_G12_object( G12, b12, eq, add, double, multiply, is_on_curve, is_inf, curve_order ): assert eq(add(add(double(G12), G12), G12), double(double(G12))) assert not eq(double(G12), G12) assert eq( add(multiply(G12, 9), multiply(G12, 5)), add(multiply(G12, 12), multiply(G12, 2)), ) assert is_on_curve(multiply(G12, 9), b12) assert is_inf(multiply(G12, curve_order)) def test_Z1_object(add, eq, double, FQ, G1, is_inf, multiply, neg, twist, Z1): assert eq(G1, add(G1, Z1)) assert eq(Z1, double(Z1)) assert eq(Z1, multiply(Z1, 0)) assert eq(Z1, multiply(Z1, 1)) assert eq(Z1, multiply(Z1, 2)) assert eq(Z1, multiply(Z1, 3)) assert is_inf(neg(Z1)) def test_Z2_object(add, eq, double, FQ2, G2, is_inf, multiply, neg, twist, Z2): assert eq(G2, add(G2, Z2)) assert eq(Z2, double(Z2)) assert eq(Z2, multiply(Z2, 0)) assert eq(Z2, multiply(Z2, 1)) assert eq(Z2, multiply(Z2, 2)) assert eq(Z2, multiply(Z2, 3)) assert is_inf(neg(Z2)) assert is_inf(twist(Z2)) def test_none_point(lib, neg, twist): if lib not in [optimized_bn128, optimized_bls12_381]: pytest.skip() with pytest.raises( expected_exception=TypeError, match="cannot unpack non-iterable NoneType object" ): neg(None) with pytest.raises( expected_exception=TypeError, match="cannot unpack non-iterable NoneType object" ): twist(None) def test_pairing_negative_G1(pairing, G1, G2, FQ12, curve_order, multiply, neg): p1 = pairing(G2, G1) pn1 = pairing(G2, neg(G1)) assert p1 * pn1 == FQ12.one() def test_pairing_negative_G2(pairing, G1, G2, FQ12, curve_order, multiply, neg): p1 = pairing(G2, G1) pn1 = pairing(G2, neg(G1)) np1 = pairing(neg(G2), G1) assert p1 * np1 == FQ12.one() assert pn1 == np1 def test_pairing_output_order(G1, G2, FQ12, pairing, curve_order): p1 = pairing(G2, G1) assert p1**curve_order == FQ12.one() def test_pairing_bilinearity_on_G1(G1, G2, neg, multiply, pairing): p1 = pairing(G2, G1) p2 = pairing(G2, multiply(G1, 2)) pairing(neg(G2), G1) assert p1 * p1 == p2 def test_pairing_is_non_degenerate(G1, G2, neg, pairing, multiply): p1 = pairing(G2, G1) p2 = pairing(G2, multiply(G1, 2)) np1 = pairing(neg(G2), G1) assert p1 != p2 and p1 != np1 and p2 != np1 def test_pairing_bilinearity_on_G2(G1, G2, pairing, multiply): p1 = pairing(G2, G1) po2 = pairing(multiply(G2, 2), G1) assert p1 * p1 == po2 def test_pairing_composit_check(G1, G2, multiply, pairing): p3 = pairing(multiply(G2, 27), multiply(G1, 37)) po3 = pairing(G2, multiply(G1, 999)) assert p3 == po3 r""" for lib in (bn128, optimized_bn128): FQ, FQ2, FQ12, field_modulus = lib.FQ, lib.FQ2, lib.FQ12, lib.field_modulus assert FQ(2) * FQ(2) == FQ(4) assert FQ(2) / FQ(7) + FQ(9) / FQ(7) == FQ(11) / FQ(7) assert FQ(2) * FQ(7) + FQ(9) * FQ(7) == FQ(11) * FQ(7) assert FQ(9) ** field_modulus == FQ(9) print('FQ works fine') x = FQ2([1, 0]) f = FQ2([1, 2]) fpx = FQ2([2, 2]) one = FQ2.one() assert x + f == fpx assert f / f == one assert one / f + x / f == (one + x) / f assert one * f + x * f == (one + x) * f assert x ** (field_modulus ** 2 - 1) == one print('FQ2 works fine') x = FQ12([1] + [0] * 11) f = FQ12([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) fpx = FQ12([2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) one = FQ12.one() assert x + f == fpx assert f / f == one assert one / f + x / f == (one + x) / f assert one * f + x * f == (one + x) * f # This check takes too long # assert x ** (field_modulus ** 12 - 1) == one print('FQ12 works fine') G1, G2, G12, b, b2, b12, is_inf, is_on_curve, eq, add, double, curve_order, multiply = \ # noqa: E501 lib.G1, lib.G2, lib.G12, lib.b, lib.b2, lib.b12, lib.is_inf, lib.is_on_curve, lib.eq, lib.add, lib.double, lib.curve_order, lib.multiply # noqa: E501 assert eq(add(add(double(G1), G1), G1), double(double(G1))) assert not eq(double(G1), G1) assert eq(add(multiply(G1, 9), multiply(G1, 5)), add(multiply(G1, 12), multiply(G1, 2))) # noqa: E501 assert is_inf(multiply(G1, curve_order)) print('G1 works fine') assert eq(add(add(double(G2), G2), G2), double(double(G2))) assert not eq(double(G2), G2) assert eq(add(multiply(G2, 9), multiply(G2, 5)), add(multiply(G2, 12), multiply(G2, 2))) # noqa: E501 assert is_inf(multiply(G2, curve_order)) assert not is_inf(multiply(G2, 2 * field_modulus - curve_order)) assert is_on_curve(multiply(G2, 9), b2) print('G2 works fine') assert eq(add(add(double(G12), G12), G12), double(double(G12))) assert not eq(double(G12), G12) assert eq(add(multiply(G12, 9), multiply(G12, 5)), add(multiply(G12, 12), multiply(G12, 2))) # noqa: E501 assert is_on_curve(multiply(G12, 9), b12) assert is_inf(multiply(G12, curve_order)) print('G12 works fine') pairing, neg = lib.pairing, lib.neg print('Starting pairing tests') a = time.time() p1 = pairing(G2, G1) pn1 = pairing(G2, neg(G1)) assert p1 * pn1 == FQ12.one() print('Pairing check against negative in G1 passed') np1 = pairing(neg(G2), G1) assert p1 * np1 == FQ12.one() assert pn1 == np1 print('Pairing check against negative in G2 passed') assert p1 ** curve_order == FQ12.one() print('Pairing output has correct order') p2 = pairing(G2, multiply(G1, 2)) assert p1 * p1 == p2 print('Pairing bilinearity in G1 passed') assert p1 != p2 and p1 != np1 and p2 != np1 print('Pairing is non-degenerate') po2 = pairing(multiply(G2, 2), G1) assert p1 * p1 == po2 print('Pairing bilinearity in G2 passed') p3 = pairing(multiply(G2, 27), multiply(G1, 37)) po3 = pairing(G2, multiply(G1, 999)) assert p3 == po3 print('Composite check passed') print('Total time for pairings: %.3f' % (time.time() - a)) """ py_ecc-8.0.0/tests/core/test_import_and_version.py000066400000000000000000000005241477723207700224060ustar00rootroot00000000000000import importlib from py_ecc import ( _lazy_imports, ) def test_import_and_version(): import py_ecc assert isinstance(py_ecc.__version__, str) def test_import_all_submodules(): # test import of all submodules due to use of lazy imports for submod in _lazy_imports.values(): importlib.import_module(submod) py_ecc-8.0.0/tests/core/test_secp256k1.py000066400000000000000000000016601477723207700201320ustar00rootroot00000000000000import binascii from py_ecc.secp256k1 import ( ecdsa_raw_recover, ecdsa_raw_sign, privtopub, ) priv = binascii.unhexlify( "792eca682b890b31356247f2b04662bff448b6bb19ea1c8ab48da222c894ef9b" ) pub = ( 20033694065814990006010338153307081985267967222430278129327181081381512401190, 72089573118161052907088366229362685603474623289048716349537937839432544970413, ) def test_privtopub(): assert privtopub(priv) == pub def test_ecdsa_raw_sign(): v, r, s = ecdsa_raw_sign(b"\x35" * 32, priv) assert ecdsa_raw_recover(b"\x35" * 32, (v, r, s)) == pub def test_issue_4_bug(): unsigned_message = ( "6a74f15f29c3227c5d1d2e27894da58d417a484ef53bc7aa57ee323b42ded656" ) v = 28 r = int("5897c2c7c7412b0a555fb6f053ddb6047c59666bbebc6f5573134e074992d841", 16) s = int("1c71d1c62b74caff8695a186e2a24dd701070ba9946748318135e3ac0950b1d4", 16) ecdsa_raw_recover(unsigned_message, (v, r, s)) py_ecc-8.0.0/tests/core/test_utils.py000066400000000000000000000004421477723207700176440ustar00rootroot00000000000000import pytest from py_ecc.utils import ( prime_field_inv, ) @pytest.mark.parametrize( "a,n,result", [ (0, 7, 0), (7, 7, 0), (2, 7, 4), (10, 7, 5), ], ) def test_prime_field_inv(a, n, result): assert prime_field_inv(a, n) % n == result py_ecc-8.0.0/tox.ini000066400000000000000000000032151477723207700143150ustar00rootroot00000000000000[tox] envlist= py{38,39,310,311,312,313}-core py{38,39,310,311,312,313}-lint py{38,39,310,311,312,313}-wheel py{38,39,310,311,312,313}-bls windows-wheel docs [flake8] exclude=venv*,.tox,docs,build extend-ignore=E203 max-line-length=88 per-file-ignores= __init__.py:F401 test_backward_compatibility.py:F811 [blocklint] max_issue_threshold=1 [testenv] usedevelop=True commands= core: pytest {posargs:tests/core} docs: make check-docs-ci bls: pytest {posargs:tests/bls} basepython= docs: python windows-wheel: python py38: python3.8 py39: python3.9 py310: python3.10 py311: python3.11 py312: python3.12 py313: python3.13 extras= test docs allowlist_externals=make,pre-commit [testenv:py{38,39,310,311,312,313}-lint] deps=pre-commit extras=dev commands= pre-commit install pre-commit run --all-files --show-diff-on-failure [testenv:py{38,39,310,311,312,313}-wheel] deps= wheel build[virtualenv] allowlist_externals= /bin/rm /bin/bash commands= python -m pip install --upgrade pip /bin/rm -rf build dist python -m build /bin/bash -c 'python -m pip install --upgrade "$(ls dist/py_ecc-*-py3-none-any.whl)" --progress-bar off' python -c "import py_ecc" skip_install=true [testenv:windows-wheel] deps= wheel build[virtualenv] allowlist_externals= bash.exe commands= python --version python -m pip install --upgrade pip bash.exe -c "rm -rf build dist" python -m build bash.exe -c 'python -m pip install --upgrade "$(ls dist/py_ecc-*-py3-none-any.whl)" --progress-bar off' python -c "import py_ecc" skip_install=true