pax_global_header00006660000000000000000000000064150521603420014510gustar00rootroot0000000000000052 comment=d84b79966583f4131e307809e6dd5590e7cba66f python-einx-0.3.0/000077500000000000000000000000001505216034200137725ustar00rootroot00000000000000python-einx-0.3.0/.github/000077500000000000000000000000001505216034200153325ustar00rootroot00000000000000python-einx-0.3.0/.github/workflows/000077500000000000000000000000001505216034200173675ustar00rootroot00000000000000python-einx-0.3.0/.github/workflows/publish_pypi.yml000066400000000000000000000011321505216034200226160ustar00rootroot00000000000000name: Publish package on PyPI on: release: types: [published] jobs: deploy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Set up Python 3.10 uses: actions/setup-python@v3 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip pip install build - name: Build package run: python -m build - name: Publish package uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} python-einx-0.3.0/.github/workflows/run_pytest.yml000066400000000000000000000026161505216034200223330ustar00rootroot00000000000000name: Test with pytest on: push: branches: [ "master" ] pull_request: branches: [ "master" ] permissions: contents: read jobs: test_py38: runs-on: ubuntu-latest steps: - name: Set up Python 3.8 uses: actions/setup-python@v3 with: python-version: "3.8" - name: Install optional dependencies run: | python -m pip install --upgrade pip pip install pytest "jax[cpu]" flax torch tensorflow einops mlx dask tinygrad scipy - uses: actions/checkout@v3 - name: Test with pytest run: | pip install . EINX_FILTER_TRACEBACK=0 pytest test_py310: runs-on: ubuntu-latest steps: - name: Set up Python 3.10 uses: actions/setup-python@v3 with: python-version: "3.10" - name: Install optional dependencies run: | python -m pip install --upgrade pip pip install pytest "jax[cpu]" flax dm-haiku torch tensorflow einops equinox mlx dask tinygrad scipy pip install --upgrade keras - uses: actions/checkout@v3 - name: Test with pytest run: | pip install . EINX_FILTER_TRACEBACK=0 pytest pip install --upgrade "torch==2.2.0" EINX_FILTER_TRACEBACK=0 pytest pip install --upgrade "torch==2.1.0" EINX_FILTER_TRACEBACK=0 pytest pip install --upgrade "torch==2.0.0" EINX_FILTER_TRACEBACK=0 pytestpython-einx-0.3.0/.gitignore000066400000000000000000000001111505216034200157530ustar00rootroot00000000000000*.egg-info docs/build examples/cifar10 dist/* act __pycache__ /*.pypython-einx-0.3.0/.readthedocs.yml000066400000000000000000000005671505216034200170700ustar00rootroot00000000000000# .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the OS, Python version and other tools you might need build: os: ubuntu-20.04 tools: python: "3.9" sphinx: configuration: docs/source/conf.py python: install: - requirements: docs/requirements.txtpython-einx-0.3.0/CHANGELOG.md000066400000000000000000000156301505216034200156100ustar00rootroot00000000000000# Changelog ## [0.3.0] ### Added - Add partial support for [tinygrad](https://github.com/tinygrad/tinygrad). - Supported: - `einx.rearrange` - `einx.{elementwise|add|multiply|where|...}` - `einx.{reduce|sum|mean|...}` - `einx.{vmap_with_axis|flip|softmax|...}` - `einx.dot` - Not supported: - `einx.vmap` (no `vmap` in tinygrad) - `einx.{index|get_at|set_at|...}` (due to relying on `einx.vmap`) ### Changed - Use `tf.gather_nd` instead of `x[y]` to implement `einx.get_at` for Tensorflow. ### Fixed - Allow empty tuples and lists as constraints for ellipsis parameters. - Fix shorthand notation in `einx.dot`. ## [0.2.2] ### Added - Add [`einx.experimental.shard`](https://einx.readthedocs.io/en/latest/api.html#einx.experimental.shard). ### Fixed - Fix bug when calling einx from multiple threads. (Run unit tests also in multi-threaded context.) ## [0.2.1] ### Changed - **Remove einx dependency in compiled code:** The code for a traced function now directly imports and uses the namespace of the backend (e.g. `import torch`). For example: ```python >>> print(einx.dot("b q (h c), b k (h c) -> b q k h", x, y, h=16, graph=True)) import torch def op0(i0, i1): x0 = torch.reshape(i0, (16, 768, 16, 64)) x1 = torch.reshape(i1, (16, 768, 16, 64)) x2 = torch.einsum("abcd,aecd->abec", x0, x1) return x2 ``` In most cases, compiled functions now contain no reference to other einx code. - **Improve handling of Python scalars:** (see https://github.com/fferflo/einx/issues/7) einx now only converts `int`, `float` and `bool` to tensor objects (e.g. via `torch.asarray`) if the backend function that is called does not support Python scalars (previously all inputs were converted to tensor objects). When using PyTorch, the `device` argument will be used to place the constructed tensor on the correct device.
For example, `torch.add` supports Python scalars ```python >>> print(einx.add("a,", x, 1, graph=True)) import torch def op0(i0, i1): x0 = torch.add(i0, i1) return x0 ``` while `torch.maximum` does not: ```python >>> print(einx.maximum("a,", x, 1, graph=True)) import torch def op0(i0, i1): x0 = torch.asarray(i1, device=i0.device) x1 = torch.maximum(i0, x0) return x1 ``` - Run unit tests for PyTorch and Jax also on the GPU (if it is available). - Run unit tests also with `jax.jit` and `torch.compile`. ### Fixed - Add workarounds for issues with `torch.compile`: https://github.com/pytorch/pytorch/issues/94674 and https://github.com/pytorch/pytorch/issues/124269 ## [0.2.0] ### Added - Add partial support for Apple's [mlx](https://github.com/ml-explore/mlx). - Supported: - `einx.rearrange` - `einx.{elementwise|add|multiply|where|...}` - `einx.{reduce|sum|mean|...}` - `einx.{vmap_with_axis|flip|softmax|...}` - Not supported yet: - `einx.dot` (`mx.einsum` is not implemented yet) - `einx.vmap` (`mx.vmap` does not fully support all primitives yet) - `einx.{index|get_at|set_at|...}` (due to relying on `einx.vmap`) - Add partial support for [dask.array](https://docs.dask.org/en/stable/array.html). - Supported: - `einx.rearrange` - `einx.{elementwise|add|multiply|where|...}` - `einx.{reduce|sum|mean|...}` - `einx.{vmap_with_axis|flip|softmax|...}` - `einx.dot` - Not supported: - `einx.vmap` (`vmap` not implemented in dask) - `einx.{index|get_at|set_at|...}` (due to relying on `einx.vmap`) - Add environment variable `EINX_WARN_ON_RETRACE` to warn when excessive retracing is detected. ### Changed - Allow `->` and `,` to be composed with other operators. (This deprecates the existing `[|]` notation which should instead be implemented with composable `->`. The feature is still maintained for backwards compatibility). For example: - `einx.dot("b [c1->c2]", ...)` expands to `einx.dot("b [c1] -> b [c2]", ...)` - `einx.get_at("b p [i,->]", ...)` expands to `einx.get_at("b p [i], b p -> b p", ...)` - Allow `einx.{set_at|add_at|...}` to be called with zero-sized updates or coordinates (in which case the input tensor is returned as-is). - Remove `backend.dot` which was not used anywhere but in the unit tests. - Improve error reporting: - Drop internal stack frames when raising exceptions. - Better error when passing invalid shape constraints to einx functions. - Reduce overhead of einx when using the PyTorch backend. ### Fixed - Fix compatibility of `einx.nn.torch.Norm` with PyTorch 2.2. - Fix parameters in `einn.param` being ignored. - Fix bug when using concatenations in `einx.rearrange`. See: https://github.com/fferflo/einx/issues/6 - Fix broadcasting new axes in `einx.vmap_with_axis`. - Disable `torch.compile` during graph construction using [torch.compiler.disable](https://pytorch.org/docs/stable/generated/torch.compiler.disable.html). ## [0.1.3] ### Added - Add option to install einx via `pip install einx[torch]` or `pip install einx[keras]` to enforce version requirements on PyTorch or Keras. ### Changed - Fail gracefully and report error when run with incompatible version of PyTorch and Keras. ### Fixed - Fix compatibility with 2.0 <= PyTorch < 2.1. ## [0.1.2] ### Added - Add type annotations to public API. - Allow passing multiple coordinate tensors in `einx.{get_at|set_at|...}`. - Allow implicit output shape in `einx.{set_at|add_at|...}`. - Allow passing backend with string argument to `einx.nn.norm`. - Make backends accessible as `einx.backend.{NAME}` once they are loaded. ### Changed - Refactor tracing: - Trace vmapped functions (previously kept a pointer to an untraced function). - Add shape assertion when calling unsafe functions. - Add comments for better inspection. - Remove `pass_backend` argument from `einx.vmap`. - Cache different functions for different backends. - Don't call `backend.to_tensor` if input already has correct type. For example, tracing `einx.get_at` now gives the following jit-compiled code: ```python >>> print(einx.get_at("b [h w] c, b p [2] -> b p c", x, y, graph=True)) # backend: einx.backend.numpy def op1(i0, i1): x1 = i1[:, 0] x2 = i1[:, 1] x0 = backend.get_at(i0, (x1, x2)) return (x0,) def op0(i0, i1, op1=op1): op2 = backend.vmap(op1, in_axes=(0, 0), out_axes=(0,)) op3 = backend.vmap(op2, in_axes=(3, None), out_axes=(2,)) x0 = op3(i0, i1) return x0[0] ``` ### Fixed - Fix bug when using "1" as coordinate axis in einx.index. - Add workaround for scalar indexing operations with torch.vmap (see https://github.com/pytorch/functorch/issues/747). - Fix support for list/ tuple arguments as tensors with non-trivial shape. - Change einx.reduce to accept only single tensors as arguments (API allowed multiple arguments, but was not implemented). - Don't trace and jit functions if EINX_CACHE_SIZE=0. - Fix bug where some static code analysis tools fail to recognize function specializations.python-einx-0.3.0/LICENSE000066400000000000000000000021061505216034200147760ustar00rootroot00000000000000MIT License Copyright (c) 2023- Florian Fervers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. python-einx-0.3.0/README.md000066400000000000000000000141201505216034200152470ustar00rootroot00000000000000# *einx* - Universal Tensor Operations in Einstein-Inspired Notation [![pytest](https://github.com/fferflo/einx/actions/workflows/run_pytest.yml/badge.svg)](https://github.com/fferflo/einx/actions/workflows/run_pytest.yml) [![Documentation](https://img.shields.io/badge/documentation-link-blue.svg)](https://einx.readthedocs.io) [![PyPI version](https://badge.fury.io/py/einx.svg)](https://badge.fury.io/py/einx) [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/release/python-380/) einx is a Python library that provides a universal interface to formulate tensor operations in frameworks such as Numpy, PyTorch, Jax and Tensorflow. The design is based on the following principles: 1. **Provide a set of elementary tensor operations** following Numpy-like naming: `einx.{sum|max|where|add|dot|flip|get_at|...}` 2. **Use einx notation to express vectorization of the elementary operations.** einx notation is inspired by [einops](https://github.com/arogozhnikov/einops), but introduces several novel concepts such as `[]`-bracket notation and full composability that allow using it as a universal language for tensor operations. einx can be integrated and mixed with existing code seamlessly. All operations are [just-in-time compiled](https://einx.readthedocs.io/en/latest/more/jit.html) into regular Python functions using Python's [exec()](https://docs.python.org/3/library/functions.html#exec) and invoke operations from the respective framework. **Getting started:** * [Tutorial](https://einx.readthedocs.io/en/latest/gettingstarted/tutorial_overview.html) * [Example: GPT-2 with einx](https://einx.readthedocs.io/en/latest/gettingstarted/gpt2.html) * [How is einx different from einops?](https://einx.readthedocs.io/en/latest/faq/einops.html) * [How is einx notation universal?](https://einx.readthedocs.io/en/latest/faq/universal.html) * [API reference](https://einx.readthedocs.io/en/latest/api.html) ## Installation ```python pip install einx ``` See [Installation](https://einx.readthedocs.io/en/latest/gettingstarted/installation.html) for more information. ## What does einx look like? #### Tensor manipulation ```python import einx x = {np.asarray|torch.as_tensor|jnp.asarray|...}(...) # Create some tensor einx.sum("a [b]", x) # Sum-reduction along second axis einx.flip("... (g [c])", x, c=2) # Flip pairs of values along the last axis einx.mean("b [s...] c", x) # Spatial mean-pooling einx.sum("b (s [s2])... c", x, s2=2) # Sum-pooling with kernel_size=stride=2 einx.add("a, b -> a b", x, y) # Outer sum einx.get_at("b [h w] c, b i [2] -> b i c", x, y) # Gather values at coordinates einx.rearrange("b (q + k) -> b q, b k", x, q=2) # Split einx.rearrange("b c, 1 -> b (c + 1)", x, [42]) # Append number to each channel # Apply custom operations: einx.vmap("b [s...] c -> b c", x, op=np.mean) # Spatial mean-pooling einx.vmap("a [b], [b] c -> a c", x, y, op=np.dot) # Matmul ``` All einx functions simply forward computation to the respective backend, e.g. by internally calling `np.reshape`, `np.transpose`, `np.sum` with the appropriate arguments. #### Common neural network operations ```python # Layer normalization mean = einx.mean("b... [c]", x, keepdims=True) var = einx.var("b... [c]", x, keepdims=True) x = (x - mean) * torch.rsqrt(var + epsilon) # Prepend class token einx.rearrange("b s... c, c -> b (1 + (s...)) c", x, cls_token) # Multi-head attention attn = einx.dot("b q (h c), b k (h c) -> b q k h", q, k, h=8) attn = einx.softmax("b q [k] h", attn) x = einx.dot("b q k h, b k (h c) -> b q (h c)", attn, v) # Matmul in linear layers einx.dot("b... [c1->c2]", x, w) # - Regular einx.dot("b... (g [c1->c2])", x, w) # - Grouped: Same weights per group einx.dot("b... ([g c1->g c2])", x, w) # - Grouped: Different weights per group einx.dot("b [s...->s2] c", x, w) # - Spatial mixing as in MLP-mixer ``` See [Common neural network ops](https://einx.readthedocs.io/en/latest/gettingstarted/commonnnops.html) for more examples. #### Optional: Deep learning modules ```python import einx.nn.{torch|flax|haiku|equinox|keras} as einn batchnorm = einn.Norm("[b...] c", decay_rate=0.9) layernorm = einn.Norm("b... [c]") # as used in transformers instancenorm = einn.Norm("b [s...] c") groupnorm = einn.Norm("b [s...] (g [c])", g=8) rmsnorm = einn.Norm("b... [c]", mean=False, bias=False) channel_mix = einn.Linear("b... [c1->c2]", c2=64) spatial_mix1 = einn.Linear("b [s...->s2] c", s2=64) spatial_mix2 = einn.Linear("b [s2->s...] c", s=(64, 64)) patch_embed = einn.Linear("b (s [s2->])... [c1->c2]", s2=4, c2=64) dropout = einn.Dropout("[...]", drop_rate=0.2) spatial_dropout = einn.Dropout("[b] ... [c]", drop_rate=0.2) droppath = einn.Dropout("[b] ...", drop_rate=0.2) ``` See `examples/train_{torch|flax|haiku|equinox|keras}.py` for example trainings on CIFAR10, [GPT-2](https://einx.readthedocs.io/en/latest/gettingstarted/gpt2.html) and [Mamba](https://github.com/fferflo/weightbridge/blob/master/examples/mamba2flax.py) for working example implementations of language models using einx, and [Tutorial: Neural networks](https://einx.readthedocs.io/en/latest/gettingstarted/tutorial_neuralnetworks.html) for more details. #### Just-in-time compilation einx traces the required backend operations for a given call into graph representation and just-in-time compiles them into a regular Python function using Python's [`exec()`](https://docs.python.org/3/library/functions.html#exec). This reduces overhead to a single cache lookup and allows inspecting the generated function. For example: ```python >>> x = np.zeros((3, 10, 10)) >>> graph = einx.sum("... (g [c])", x, g=2, graph=True) >>> print(graph) import numpy as np def op0(i0): x0 = np.reshape(i0, (3, 10, 2, 5)) x1 = np.sum(x0, axis=3) return x1 ``` See [Just-in-time compilation](https://einx.readthedocs.io/en/latest/more/jit.html) for more details.python-einx-0.3.0/docs/000077500000000000000000000000001505216034200147225ustar00rootroot00000000000000python-einx-0.3.0/docs/Makefile000066400000000000000000000011761505216034200163670ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) python-einx-0.3.0/docs/make.bat000066400000000000000000000014441505216034200163320ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.https://www.sphinx-doc.org/ exit /b 1 ) if "%1" == "" goto help %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd python-einx-0.3.0/docs/requirements.txt000066400000000000000000000001261505216034200202050ustar00rootroot00000000000000sphinx>=6.0.0 sphinx-autodoc-typehints sphinx-book-theme>=1.0.1 . dm-haiku flax torchpython-einx-0.3.0/docs/source/000077500000000000000000000000001505216034200162225ustar00rootroot00000000000000python-einx-0.3.0/docs/source/api.rst000066400000000000000000000052011505216034200175230ustar00rootroot00000000000000######## einx API ######## Main ---- .. autofunction:: einx.rearrange .. autofunction:: einx.vmap_with_axis .. autofunction:: einx.vmap .. autofunction:: einx.reduce .. autofunction:: einx.elementwise .. autofunction:: einx.index Reduction operations -------------------- .. autofunction:: einx.sum .. autofunction:: einx.mean .. autofunction:: einx.var .. autofunction:: einx.std .. autofunction:: einx.prod .. autofunction:: einx.count_nonzero .. autofunction:: einx.any .. autofunction:: einx.all .. autofunction:: einx.max .. autofunction:: einx.min .. autofunction:: einx.logsumexp Element-by-element operations ----------------------------- .. autofunction:: einx.add .. autofunction:: einx.subtract .. autofunction:: einx.multiply .. autofunction:: einx.true_divide .. autofunction:: einx.floor_divide .. autofunction:: einx.divide .. autofunction:: einx.logical_and .. autofunction:: einx.logical_or .. autofunction:: einx.where .. autofunction:: einx.less .. autofunction:: einx.less_equal .. autofunction:: einx.greater .. autofunction:: einx.greater_equal .. autofunction:: einx.equal .. autofunction:: einx.not_equal .. autofunction:: einx.maximum .. autofunction:: einx.minimum Indexing operations ------------------- .. autofunction:: einx.get_at .. autofunction:: einx.set_at .. autofunction:: einx.add_at .. autofunction:: einx.subtract_at Miscellaneous operations ------------------------ .. autofunction:: einx.flip .. autofunction:: einx.roll .. autofunction:: einx.softmax .. autofunction:: einx.log_softmax .. autofunction:: einx.arange General dot-product ------------------- .. autofunction:: einx.dot Deep Learning Modules ===================== Haiku ----- .. autoclass:: einx.nn.haiku.Linear .. autoclass:: einx.nn.haiku.Norm .. autoclass:: einx.nn.haiku.Dropout .. autofunction:: einx.nn.haiku.param Flax ---- .. autofunction:: einx.nn.flax.Linear .. autofunction:: einx.nn.flax.Norm .. autofunction:: einx.nn.flax.Dropout .. autofunction:: einx.nn.flax.param Torch ----- .. autoclass:: einx.nn.torch.Linear .. autoclass:: einx.nn.torch.Norm .. autoclass:: einx.nn.torch.Dropout .. autofunction:: einx.nn.torch.param Equinox ------- .. autoclass:: einx.nn.equinox.Linear .. autoclass:: einx.nn.equinox.Norm .. autoclass:: einx.nn.equinox.Dropout .. autofunction:: einx.nn.equinox.param Keras ----- .. autoclass:: einx.nn.keras.Linear .. autoclass:: einx.nn.keras.Norm .. autoclass:: einx.nn.keras.Dropout .. autofunction:: einx.nn.keras.param Experimental ============ .. autofunction:: einx.experimental.shardpython-einx-0.3.0/docs/source/conf.py000066400000000000000000000024021505216034200175170ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = "einx" copyright = "2024, Florian Fervers" author = 'Florian Fervers' # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.mathjax", "sphinx.ext.napoleon", "sphinx.ext.viewcode", "sphinx_autodoc_typehints", ] templates_path = [] exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output html_theme = "sphinx_book_theme" html_theme_options = { "show_toc_level": 2, "repository_url": "https://github.com/fferflo/einx", "use_repository_button": True, } html_static_path = [] python-einx-0.3.0/docs/source/faq/000077500000000000000000000000001505216034200167715ustar00rootroot00000000000000python-einx-0.3.0/docs/source/faq/backend.rst000066400000000000000000000025311505216034200211130ustar00rootroot00000000000000How does einx support different tensor frameworks? ################################################## einx provides interfaces for tensor frameworks in the ``einx.backend.*`` namespace. einx functions accept a ``backend`` argument that defines which backend to use for the computation. For ``backend=None`` (the default case), the backend is implicitly determined from the type of the input tensors. .. code:: python x = np.ones((2, 3)) einx.sum("a [b]", x, backend=einx.backend.get("numpy")) # Uses numpy backend einx.sum("a [b]", x) # Implicitly uses numpy backend Numpy tensors can be mixed with other frameworks in the same operation, in which case the latter backend is used for computations. Frameworks other than Numpy cannot be mixed in the same operation. .. code:: python x = np.zeros((10, 20)) y = np.zeros((20, 30)) einx.dot("a [c1->c2]", x, torch.from_numpy(y)) # Uses torch einx.dot("a [c1->c2]", x, jnp.asarray(y)) # Uses jax einx.dot("a [c1->c2]", torch.from_numpy(x), jnp.asarray(y)) # Raises exception Unkown tensor objects and python sequences are converted to tensors using calls from the respective backend if possible (e.g. ``np.asarray``, ``torch.asarray``). .. code:: python x = np.zeros((10, 20)) einx.add("a b, 1", x, [42.0])python-einx-0.3.0/docs/source/faq/einops.rst000066400000000000000000000103251505216034200210210ustar00rootroot00000000000000How is einx different from einops? ################################## einx uses Einstein-inspired notation that is based on and compatible with the notation used in `einops `_, but introduces several novel concepts that allow using it as a universal language for tensor operations: * Introduction of ``[]``-notation to express vectorization of elementary operations (see :ref:`Bracket notation `). * Ellipses repeat the preceding expression rather than an anonymous axis. This allows expressing multi-dimensional operations more concisely (e.g. ``(a b)...`` or ``b (s [ds])... c``) * Full composability of expressions: Axis lists, compositions, ellipses, brackets and concatenations can be nested arbitrarily (e.g. ``(a b)...`` or ``b (1 + (s...)) c``). * Introduction of concatenations as first-class expressions. The library provides the following additional features based on the einx notation: * Support for many more tensor operations, for example: .. code:: einx.flip("... (g [c])", x, c=2) # Flip pairs of values einx.add("a, b -> a b", x, y) # Outer sum einx.get_at("b [h w] c, b i [2] -> b i c", x, indices) # Gather values einx.softmax("b q [k] h", attn) # Part of attention operation * Simpler notation for existing tensor operations: .. code:: einx.sum("a [b]", x) # same op as einops.reduce(x, "a b -> a", reduction="sum") einx.mean("b (s [ds])... c", x, ds=2) # einops does not support named ellipses. Alternative for 2D case: einops.reduce(x, "b (h h2) (w w2) c -> b h w c", reduction="mean", h2=2, w2=2) * Full support for rearranging expressions in all operations (see :doc:`How does einx handle input and output tensors? `). .. code:: einx.dot("b q (h c), b k (h c) -> b q k h", q, k, h=16) # Axis composition not supported e.g. in einops.einsum. * ``einx.vmap`` and ``einx.vmap_with_axis`` allow applying arbitrary operations using einx notation. * Several generalized deep learning modules in the ``einx.nn.*`` namespace (see :doc:`Tutorial: Neural networks `). * Support for inspecting the backend calls made by einx in index-based notation (see :doc:`Just-in-time compilation `). A non-exhaustive comparison of operations expressed in einx-notation and einops-notation: .. list-table:: :widths: 50 60 :header-rows: 0 * - **einx** - **einops** * - .. code-block:: python einx.mean("b [...] c", x) - .. code-block:: python einops.reduce(x, "b ... c -> b c", reduction="mean") * - .. code-block:: python einx.mean("b [...] c", x, keepdims=True) - .. code-block:: python # For 2D case: einops.reduce(x, "b h w c -> b 1 1 c", reduction="mean") * - .. code-block:: python einx.mean("b (s [s2])... c", x, s2=2) - .. code-block:: python # For 2D case: einops.reduce(x, "b (h h2) (w w2) c -> b h w c", reduction="mean", h2=2, w2=2) * - .. code-block:: python einx.dot("... [c1->c2]", x, w) - .. code-block:: python einops.einsum(x, w, "... c1, c1 c2 -> ... c2") * - .. code-block:: python einx.rearrange("h a, h -> h (a + 1)", x, y) - .. code-block:: python einops.pack([x, y], "h *") * - .. code-block:: python einx.rearrange("h (a + 1) -> h a, h 1 ", x) - .. code-block:: python einops.unpack(x, [[3], [1]], "h *") * - .. code-block:: python einx.rearrange("a c, 1 -> a (c + 1)", x, [42]) - Rearranging and broadcasting not supported in ``einops.pack`` * - .. code-block:: python einx.dot("... (g [c1->c2])", x, w) - Shape rearrangement not supported in ``einops.einsum`` * - .. code-block:: python einx.add("... [c]", x, b) - Elementwise operations not supported * - .. code-block:: python einx.rearrange("(a b) c -> c (a b)", x) - Fails, since values for ``a`` and ``b`` cannot be determined * - .. code-block:: python einx.vmap("b [...] c -> b c", x, op=my_func) - vmap not supported python-einx-0.3.0/docs/source/faq/flatten.rst000066400000000000000000000042621505216034200211640ustar00rootroot00000000000000How does einx handle input and output tensors? ############################################## einx functions accept an operation string that specifies einx expressions for the input and output tensors. The expressions potentially contain nested compositions and concatenations that prevent the backend functions from directly accessing the required axes. To resolve this, einx first flattens the input tensors in each operation such that they contain only a flat list of axes. After the backend operation is applied, the resulting tensors are unflattened to match the requested output expressions. Compositions are flattened by applying a `reshape` operation: .. code:: einx.rearrange("(a b) -> a b", x, a=10, b=20) # same as np.reshape(x, (10, 20)) Concatenations are flattened by splitting the input tensor into multiple tensors along the concatenated axis: .. code:: einx.rearrange("(a + b) -> a, b", x, a=10, b=20) # same as np.split(x, [10], axis=0) After the operation is applied to the flattened tensors, the results are reshaped and concatenated and missing axes are inserted and broadcasted to match the requested output expressions. When multiple input and output tensors are specified, einx tries to find a valid assignment between inputs and outputs for the given axis names. This can sometimes lead to ambiguous assignments: .. code:: # Broadcast and stack x and y along the last axis. x or y first? einx.rearrange("a, b -> a b (1 + 1)", x, y) To find an assignment, einx iterates over the outputs in the order they appear in the operation string, and for each output tries to find the first input expression that allows for a successful assignment. In most cases, this leads to input and output expressions being assigned in the same order: .. code:: einx.rearrange("a, b -> a b (1 + 1)", x, y) # same as np.stack([x, y], axis=-1) The function :func:`einx.rearrange` can be used to perform flattening and unflattening of the input tensors as described in the operation string. Other functions such as :func:`einx.vmap` and :func:`einx.dot` perform the same flattening and unflattening, in addition to applying an operation to the flattened tensors. python-einx-0.3.0/docs/source/faq/solver.rst000066400000000000000000000156401505216034200210430ustar00rootroot00000000000000How does einx parse expressions? ################################ Overview -------- einx functions accept a operation string that specifies the shapes of input and output tensors and the requested operation in einx notation. For example: .. code:: einx.mean("b (s [r])... c -> b s... c", x, r=4) # Mean-pooling with stride 4 To identify the backend operations that are required to execute this statement, einx first parses the operation string and determines an *expression tree* for each input and output tensor. The tree represents a full description of the tensor's shape and axes marked with brackets. The nodes represent different types of subexpressions such as axis lists, compositions, ellipses and concatenations. The leaves of the tree are the named and unnamed axes of the tensor. The expression trees are used to determine the required rearranging steps and axes along which backend operations are applied. einx uses a multi-step process to convert expression strings into expression trees: * **Stage 0**: Split the operation string into separate expression strings for each tensor. * **Stage 1**: Parse the expression string for each tensor and return a (stage-1) tree of nodes representing the nested subexpressions. * **Stage 2**: Expand all ellipses by repeating the respective subexpression, resulting in a stage-2 tree. * **Stage 3**: Determine a value for each axis (i.e. the axis length) using the provided constraints, resulting in a stage-3 tree, i.e. the final expression tree. For a given operation string and signature of input arguments, the required backend operations are traced into graph representation and just-in-time compiled using Python's `exec() `_. Every subsequent call with the same signature will reuse the cached function and therefore incur no additional overhead other than for cache lookup (see :doc:`Just-in-time compilation `). Stage 0: Splitting the operation string --------------------------------------- The operation string is first split into separate expression strings for each tensor. In the above example, this results in ``b (s [r])... c`` and ``b s... c`` for the input and output tensor, respectively. Inputs and outputs are separated by ``->``, and multiple tensors on each side are separated by ``,``. The order of the tensors matches the order of the parameters and return values of the einx function. Most functions also accept shorthand operation strings to avoid redundancy and facilitate more concise expressions. For example, in ``einx.mean`` the output expression can be implicitly determined from the input expression by removing marked axes, and can therefore be omitted (see :func:`einx.reduce`): .. code:: einx.mean("b (s [r])... c -> b s... c", x, r=4) # same as einx.mean("b (s [r])... c", x, r=4) Another example of shorthand notation in :func:`einx.dot`: .. code:: einx.dot("a b, b c -> a c", x, y) # same as einx.dot("a [b] -> a [c]", x, y) # same as einx.dot("a [b->c]", x, y) See :doc:`Tutorial: Operations ` and the documentation of the respective functions for allowed shorthand notation. Stage 1: Parsing the expression string -------------------------------------- The expression string for each tensor is parsed into a (stage-1) expression tree using a simple lexer and parser. The tree is a nested structure of nodes that represent the different types of subexpressions: .. figure:: /images/stage1-tree.png :width: 300 :align: center Stage-1 tree for ``b (s [r])... c``. This includes semantic checks, e.g. to ensure that axis names do not appear more than once per expression. Stage 2: Expanding ellipses --------------------------- To expand the ellipses in a stage-1 expression, einx first determines the *depth* of every axis, i.e. the number of ellipses that the axis is nested in. In the above expression, ``b`` and ``c`` have depth 0, while ``s`` and ``r`` have depth 1. einx ensures that the depth of axes is consistent over different expressions: E.g. an operation ``b s... c -> b s c`` would raise an exception. In a second step, the *expansion* of all ellipses, i.e. the number of repetitions, is determined using the constraints provided by the input tensors. For example, given a tensor with rank 4, the ellipsis in ``b (s [r])... c`` has an expansion of 2. einx ensures that the expansion of all axes is consistent over different expressions: E.g. an operation ``s..., s... -> s...`` would raise an exception if the two input tensors have different rank. The expression ``b (s [r])... c`` is expanded to ``b (s.0 [r.0]) (s.1 [r.1]) c`` for a 4D input tensor: .. figure:: /images/stage2-tree.png :height: 240 :align: center Stage-2 tree for ``b (s [r])... c`` on input tensor with rank 4. Parameters that are passed as additional constraints to the einx function, such as ``r=4`` in .. code:: einx.mean("b (s [r])... c -> b s... c", x, r=4) are included when solving for the depth and expansion of all expressions. Unlike the root expressions describing the input tensors, these parameters can be given both in expanded (``r=(4, 4)``) and unexpanded form (``r=4``). In the first case, the values of ``r.0`` and ``r.1`` are defined explicitly and an additional constraint for the expansion of ``r`` is included. In the second case, the same value is used for the repetitions ``r.0`` and ``r.1``. This extends to nested ellipses with depth > 1 analogously. Stage 3: Determining axis values -------------------------------- In the last step, the values of all axes (i.e. their lengths) are determined using the constraints provided by the input tensors and additional parameters. For example, the above expression with an input tensor of shape ``(2, 4, 8, 3)`` and additional constraint ``r=4`` results in the following final expression tree: .. figure:: /images/stage3-tree.png :height: 240 :align: center Stage-3 tree for ``b (s [r])... c`` for tensor with shape ``(2, 4, 8, 3)`` and constraint ``r=4``. The value of axis lists and axis concatenations is determined as the product and sum of their children's values, respectively. An unnamed axis (i.e. a number in the expression string such as ``1``, ``16``) is treated as an axis with a new unique name and an additional constraint specifying its value. Solver ------ einx uses a `SymPy `_-based solver to determine the depth and expansion of all expressions in stage 2, and the values of all axes in stage 3 by providing equations representing the respective constraints. Instead of directly applying the solver to these equations, einx first determines *equivalence classes* of axes that are known to have the same value (from equations like ``a = b`` and ``a = 1``) and for each equivalence class passes a single variable to `SymPy `_. This speeds up the solver and allows raising more expressive exceptions when conflicting constraints are found. python-einx-0.3.0/docs/source/faq/universal.rst000066400000000000000000000127161505216034200215420ustar00rootroot00000000000000How is einx notation universal? ############################### To address this question, let's first look at how tensor operations are commonly expressed in existing tensor frameworks. Classical notation ------------------ Tensor operations can be dissected into two distinct components: 1. An **elementary operation** that is performed. * Example: ``np.sum`` computes a sum-reduction. 2. A division of the input tensor into sub-tensors. The elementary operation is applied to each sub-tensor independently. We refer to this as **vectorization**. * Example: Sub-tensors in ``np.sum`` span the dimensions specified by the ``axis`` parameter. The sum-reduction is vectorized over all other dimensions. In common tensor frameworks like Numpy, PyTorch, Tensorflow or Jax, different elementary operations are implemented with different vectorization rules. For example, to express vectorization * ``np.sum`` uses the ``axis`` parameter, * ``np.add`` follows `implicit broadcasting rules `_ (e.g. in combination with ``np.newaxis``), and * ``np.matmul`` provides `an implicit and custom set of rules `_. Furthermore, an elementary operation is sometimes implemented in multiple APIs in order to offer vectorization rules for different use cases. For example, the retrieve-at-index operation can be implemented in PyTorch using ``tensor[coords]``, ``torch.gather``, ``torch.index_select``, ``torch.take``, ``torch.take_along_dim``, which conceptually apply the same low-level operation, but follow different vectorization rules (see below). Still, these interfaces sometimes do not cover all desirable use cases. einx notation ------------- einx provides an interface to tensor operations where vectorization is expressed entirely using einx notation, and each elementary operation is represented by exactly one API. The einx notation is: * **Consistent**: The same type of notation is used for all elementary operations. Each elementary operation is represented by exactly one API. * **Complete**: Any operation that can be expressed with existing vectorization tools such as `jax.vmap `_ can also be expressed in einx notation. The following tables show examples of classical API calls that can be expressed using universal einx operations. .. list-table:: Example: ``einx.get_at`` :widths: 42 58 :header-rows: 1 * - Classical API - einx API * - | ``torch.gather(x, 0, y)`` | ``torch.take_along_dim(x, y, dim=0)`` - ``einx.get_at("[_] b c, i b c -> i b c", x, y)`` * - | ``torch.gather(x, 1, y)`` | ``torch.take_along_dim(x, y, dim=1)`` - ``einx.get_at("a [_] c, a i c -> a i c", x, y)`` * - | ``torch.index_select(x, 0, y)`` | ``tf.gather(x, y, axis=0)`` - ``einx.get_at("[_] b c, i -> i b c", x, y)`` * - | ``torch.index_select(x, 1, y)`` | ``tf.gather(x, y, axis=1)`` - ``einx.get_at("a [_] c, i -> a i c", x, y)`` * - ``tf.gather(x, y, axis=1, batch_dims=1)`` - ``einx.get_at("a [_] c, a i -> a i c", x, y)`` * - ``torch.take(x, y)`` - ``einx.get_at("[_], ... -> ...", x, y)`` * - ``tf.gather_nd(x, y)`` - ``einx.get_at("[...], b [i] -> b", x, y)`` * - | ``tf.gather_nd(x, y, batch_dims=1)`` | ``x[y[..., 0], y[..., 1]]`` - ``einx.get_at("a [...], a b [i] -> a b", x, y)`` .. list-table:: Example: ``einx.dot`` (similar to einsum) :widths: 42 58 :header-rows: 1 * - Classical API - einx API * - ``np.matmul(x, y)`` - | ``einx.dot("... a [b], ... [b] c -> ... a c", x, y)`` | ``einx.dot("... [a], [a] -> ...", x, y)`` * - ``np.dot(x, y)`` - | ``einx.dot("x... [a], y... [a] b -> x... y... b", x, y)`` | ``einx.dot("... [a], [a] -> ...", x, y)`` * - ``np.tensordot(x, y, axes=1)`` - ``einx.dot("a [b], [b] c -> a c", x, y)`` * - ``np.tensordot(x, y, axes=([2], [1]))`` - ``einx.dot("a b [c], d [c] e -> a b d e", x, y)`` * - ``np.inner(x, y)`` - ``einx.dot("x... [a], y... [a] -> x... y...", x, y)`` .. list-table:: Example: ``einx.multiply`` :widths: 42 58 :header-rows: 1 * - Classical API - einx API * - | ``np.multiply(x, y[:, np.newaxis])`` | ``x * y[:, np.newaxis]`` - ``einx.multiply("a b, a -> a b", x, y)`` * - ``np.outer(x, y)`` - ``einx.multiply("a, b -> a b", x, y)`` * - ``np.kron(x, y)`` - ``einx.multiply("a..., b... -> (a b)...", x, y),`` * - ``scipy.linalg.khatri_rao(x, y)`` - ``einx.multiply("a c, b c -> (a b) c", x, y)`` .. list-table:: Example: ``einx.flip`` :widths: 42 58 :header-rows: 1 * - Classical API - einx API * - | ``np.flip(x, y, axis=0)`` | ``np.flipud(x, y)`` - ``einx.flip("[a] b", x)`` * - ``np.fliplr(x, y)`` - ``einx.flip("a [b]", x)`` .. * - ``einx.rearrange`` - ``np.reshape`` ``np.transpose`` ``np.squeeze`` ``np.expand_dims`` ``tensor[np.newaxis]`` ``np.stack`` ``np.hstack`` ``np.concatenate`` While elementary operations and vectorization are decoupled conceptually to provide a universal API, the implementation of the operations in the respective backend do not necessarily follow the same decoupling. For example, a matrix multiplication is represented as a vectorized dot-product in einx (using ``einx.dot``), but still invokes an efficient matmul operation on the backend instead of a vectorized evaluation of the dot product.python-einx-0.3.0/docs/source/gettingstarted/000077500000000000000000000000001505216034200212525ustar00rootroot00000000000000python-einx-0.3.0/docs/source/gettingstarted/commonnnops.rst000066400000000000000000000110751505216034200243560ustar00rootroot00000000000000Example: Common neural network operations ######################################### einx allows formulating many common operations of deep learning models as concise expressions. This page provides a few examples. .. code-block:: python import einx import einx.nn.{torch|flax|haiku|equinox|keras} as einn LayerScale ---------- Multiply the input tensor ``x`` with a learnable parameter per channel that is initialized with a small value: .. code-block:: python x = einx.multiply("... [c]", x, einn.param(init=1e-5)) Reference: `LayerScale explained `_ Prepend class-token ------------------- Flatten the spatial axes of an n-dimensional input tensor ``x`` and prepend a learnable class token: .. code-block:: python x = einx.rearrange("b s... c, c -> b (1 + (s...)) c", x, einn.param(name="class_token")) Reference: `Classification token in Vision Transformer `_ Positional embedding -------------------- Add a learnable positional embedding onto all tokens of the input ``x``. Works with n-dimensional inputs (text, image, video, ...): .. code-block:: python x = einx.add("b [s... c]", x, einn.param(name="pos_embed", init=nn.initializers.normal(stddev=0.02))) Reference: `Position embeddings in Vision Transformer `_ Word embedding -------------- Retrieve a learnable embedding vector for each token in the input sequence ``x``: .. code-block:: python x = einx.get_at("[v] c, b t -> b t c", einn.param(name="vocab_embed"), x, v=50257, c=1024) Reference: `Torch tutorial on word embeddings `_ Layer normalization ------------------- Compute the mean and variance along the channel axis, and normalize the tensor by subtracting the mean and dividing by the standard deviation. Apply learnable scale and bias: .. code-block:: python mean = einx.mean("... [c]", x, keepdims=True) var = einx.var("... [c]", x, keepdims=True) x = (x - mean) * torch.rsqrt(var + epsilon) x = einx.add("... [c]", x, einn.param(name="bias")) x = einx.multiply("... [c]", x, einn.param(name="scale")) This can similarly be achieved using the ``einn.Norm`` layer: .. code-block:: python import einx.nn.{torch|flax|haiku|...} as einn x = einn.Norm("... [c]")(x) Reference: `Layer normalization explained `_ Multihead attention ------------------- Compute multihead attention for the queries ``q``, keys ``k`` and values ``v`` with ``h = 8`` heads: .. code-block:: python a = einx.dot("b q (h c), b k (h c) -> b q k h", q, k, h=8) a = einx.softmax("b q [k] h", a) x = einx.dot("b q k h, b k (h c) -> b q (h c)", a, v) Reference: `Multi-Head Attention `_ Shifted window attention ------------------------ Shift and partition the input tensor ``x`` into windows with sidelength ``w``, compute self-attention in each window, and unshift and merge windows again. Works with n-dimensional inputs (text, image, video, ...): .. code-block:: python # Compute axis values so we don't have to specify s and w manually later consts = einx.solve("b (s w)... c", x, w=16) # Shift and partition windows x = einx.roll("b [...] c", x, shift=-shift) x = einx.rearrange("b (s w)... c -> (b s...) (w...) c", x, **consts) # Compute attention ... # Unshift and merge windows x = einx.rearrange("(b s...) (w...) c -> b (s w)... c", x, **consts) x = einx.roll("b [...] c", x, shift=shift) Reference: `Swin Transformer `_ Multilayer Perceptron along spatial axes (MLP-Mixer) ---------------------------------------------------- Apply a weight matrix multiplication along the spatial axes of the input tensor: .. code-block:: python x = einx.dot("b [s...->s2] c", x, einn.param(name="weight1")) ... x = einx.dot("b [s2->s...] c", x, einn.param(name="weight2"), s=(256, 256)) Or with the ``einn.Linear`` layer that includes a bias term: .. code-block:: python x = einn.Linear("b [s...->s2] c")(x) ... x = einn.Linear("b [s2->s...] c", s=(256, 256))(x) Reference: `MLP-Mixer `_ The following page provides an example implementation of GPT-2 with ``einx`` and ``einn`` using many of these operations and validates their correctness by loading pretrained weights and generating some example text.python-einx-0.3.0/docs/source/gettingstarted/gpt2.rst000066400000000000000000000166071505216034200226720ustar00rootroot00000000000000Example: GPT-2 ############## We succeeded in taking that picture, and, if you look at it, you see a dot. That's here. That's home. That's us. On it, *we wrote, "We are the people."* -- Carl Sagan & GPT-2 In this example, we will reimplement the GPT-2 architecture using einx and the deep learning framework `Haiku `_, load pretrained weights from Hugging Face and validate the model by generating some text. .. code-block:: python import haiku as hk import jax, einx from functools import partial import einx.nn.haiku as einn import numpy as np # Define some layer types we will use. # 1. Use channels-last layout # 2. Use layer normalization, and an epsilon of 1e-5 as in the original implementation Linear = partial(einn.Linear, "... [_->channels]") Norm = partial(einn.Norm, "... [c]", epsilon=1e-5) The main building block of GPT-2 consists of multi-head self-attention and a multi-layer perceptron (MLP). Each sub-block uses a residual connection and layer normalization at the beginning of the residual block: .. code-block:: python class Block(hk.Module): heads: int = 25 mlp_ratio: int = 4 def __call__(self, x): # ########### Attention block ########### x0 = x x = Norm()(x) # Predict queries, keys and values x = Linear(channels=3 * x.shape[-1])(x) q, k, v = jnp.split(x, 3, axis=-1) # Compute attention matrix over h heads q = q * ((q.shape[-1] // self.heads) ** -0.5) attn = einx.dot("b q (h c), b k (h c) -> b q k h", q, k, h=self.heads) # Apply causal mask mask = jnp.tril(jnp.ones((q.shape[1], q.shape[1]), dtype=bool)) attn = einx.where("q k, b q k h,", mask, attn, -jnp.inf) # Apply softmax and compute weighted average over the input tokens attn = einx.softmax("b q [k] h", attn) x = einx.dot("b q k h, b k (h c) -> b q (h c)", attn, v) # Output projection x = Linear(channels=x.shape[-1])(x) x = x + x0 # ########### MLP block ########### x0 = x x = Norm()(x) x = Linear(channels=x.shape[-1] * self.mlp_ratio)(x) x = jax.nn.gelu(x) x = Linear(channels=x0.shape[-1])(x) x = x + x0 return x The multi-head attention requires no additional statements to split the channel axis into multiple heads or merge the heads back into a single axis. We instead just specify the channels axis as an :ref:`axis composition ` of ``h`` heads and ``c`` channels per head: .. code-block:: python attn = einx.dot("b q (h c), b k (h c) -> b q k h", q, k, h=self.heads) ... x = einx.dot("b q k h, b k (h c) -> b q (h c)", attn, v) We can verify the correctness of these operations by inspecting the jit-compiled function: >>> graph = einx.dot("b q (h c), b k (h c) -> b q k h", q, k, h=self.heads, graph=True) >>> print(graph) import jax.numpy as jnp def op0(i0, i1): x0 = jnp.reshape(i0, (1, 1024, 25, 64)) x1 = jnp.reshape(i1, (1, 1024, 25, 64)) x2 = jnp.einsum("abcd,aecd->abec", x0, x1) return x2 The final GPT-2 model first embeds the input tokens and adds positional embeddings. It then applies a number of main blocks and maps the output onto next token logits using a linear layer: .. code-block:: python class GPT2(hk.Module): channels: int = 1600 depth: int = 48 vocab_size: int = 50257 block_size: int = 1024 def __call__(self, x): # Word embedding: Retrieve embedding for each token from the word_embed table x = einx.get_at("[v] c, b t -> b t c", einn.param(name="word_embed"), x, v=self.vocab_size, c=self.channels) # Positional embedding x = einx.add("b [t c]", x, einn.param(name="pos_embed", init=hk.initializers.RandomNormal(stddev=0.02))) # Blocks for i in range(self.depth): x = Block(name=f"block{i}")(x) x = Norm()(x) # Classifier x = Linear(channels=self.vocab_size, bias=False)(x) return x We use tensor factories with ``einn.param`` to construct the word and positional embeddings (see :doc:`Tutorial: Neural networks `). With this, we're done with the model definition. Next, we'll define some input data that the model will be applied to and encode it to token representation: .. code-block:: python text = ("We succeeded in taking that picture, and, if you look at it, you see a dot." "That's here. That's home. That's us. On it,") print(f"Input: {text}") # Encode text to tokens import tiktoken encoder = tiktoken.get_encoding("gpt2") tokens = np.asarray(encoder.encode_ordinary(text)) n = len(tokens) # Pad tokens to input block size tokens = np.pad(tokens, (0, GPT2.block_size - n), constant_values=0) The model is initialized using a dummy batch (see `Haiku Basics `_): .. code-block:: python import time rng = jax.random.PRNGKey(int(time.time() * 1000)) model = hk.transform(lambda x: GPT2()(x)) params = model.init(rng, tokens[np.newaxis]) # Add batch axis to tokens using np.newaxis At this point, ``params`` contains only randomly initialized weights. We download the original model weights for the XL variant of GPT-2 from `Hugging Face `_ and load them into our model using the `weightbridge 🌉 `_ library: .. code-block:: python # Download original weights import transformers # only used to download weights pretrained_params = {k: np.asarray(v) for k, v in transformers.GPT2LMHeadModel.from_pretrained(f"gpt2-xl").state_dict().items()} pretrained_params["lm_head.weight"] = np.transpose(pretrained_params["lm_head.weight"], (1, 0)) pretrained_params = {k: v for k, v in pretrained_params.items() if not k.endswith(".attn.bias") and not k.endswith(".attn.masked_bias")} # Map weights to our model implementation import weightbridge params = weightbridge.adapt(pretrained_params, params, hints=[("norm_1", "ln_2")]) Finally, we can run several forward passes to predict next tokens: .. code-block:: python apply = jax.jit(model.apply) # Just-in-time compile the forward pass temperature = 0.3 for _ in range(10): # Predict 10 next tokens logits = apply(params, rng, tokens[np.newaxis])[0] logits = logits[n - 1] # Get logits for next token tokens[n] = jax.random.categorical(rng, logits / temperature) # Sample next token n += 1 print(f"Prediction: {encoder.decode(tokens[:n])}") Input: We succeeded in taking that picture, and, if you look at it, you see a dot. That's here. That's home. That's us. On it, Prediction: We succeeded in taking that picture, and, if you look at it, you see a dot. That's here. That's home. That's us. On it, we wrote, "We are the people." The `full example script can be found here `_, and a similar example script for the `Mamba language model using Flax can be found here `_.python-einx-0.3.0/docs/source/gettingstarted/installation.rst000066400000000000000000000013011505216034200245000ustar00rootroot00000000000000Installation ############ einx can be installed as follows: .. code:: pip install einx If you want to install the latest version from GitHub, you can do so using: .. code:: pip install git+https://github.com/fferflo/einx.git einx automatically detects backends like PyTorch when it is run, but does not include hard dependencies for the corresponding packages. If you plan to use einx with a specific backend, you can also install it as follows: .. code:: pip install einx[torch] This will add a dependency for PyTorch and enforce the version requirements of einx (i.e. PyTorch >= 2.0.0). This is currently only supported for PyTorch (``einx[torch]``) and Keras (``einx[keras]``).python-einx-0.3.0/docs/source/gettingstarted/introduction.rst000066400000000000000000000021771505216034200245340ustar00rootroot00000000000000.. toctree:: :caption: Introduction :maxdepth: 3 Introduction ############ einx is a Python library that provides a universal interface to formulate tensor operations in frameworks such as Numpy, PyTorch, Jax and Tensorflow. The design is based on the following principles: 1. **Provide a set of elementary tensor operations** following Numpy-like naming: ``einx.{sum|max|where|add|dot|flip|get_at|...}`` 2. **Use einx notation to express vectorization of the elementary operations.** The notation is inspired by `einops `_, but introduces several novel concepts such as ``[]``-bracket notation and full composability that allow using it as a universal language for tensor operations. einx can be integrated and mixed with existing code seamlessly. All operations are :doc:`just-in-time compiled ` into regular Python functions using Python's `exec() `_ and invoke operations from the respective framework. **Next steps:** - :doc:`Installation ` - :doc:`Tutorial ` python-einx-0.3.0/docs/source/gettingstarted/tutorial_neuralnetworks.rst000066400000000000000000000353771505216034200270310ustar00rootroot00000000000000Tutorial: Neural networks ######################### einx provides several neural network layer types for deep learning frameworks (`PyTorch `_, `Flax `_, `Haiku `_, `Equinox `_, `Keras `_) in the ``einx.nn.*`` namespace based on the functions in ``einx.*``. These layers provide abstractions that can implement a wide variety of deep learning operations using einx notation. The ``einx.nn.*`` namespace is entirely optional, and is imported as follows: .. code:: import einx.nn.{torch|flax|haiku|equinox|keras} as einn Motivation ---------- The main idea for implementing layers in einx is to exploit :ref:`tensor factories ` to initialize the weights of a layer. For example, consider the following linear layer: .. code:: x = einx.dot("... [c1->c2]", x, w) # x * w x = einx.add("... [c2]", x, b) # x + b The arguments ``w`` and ``b`` represent the layer weights. Instead of determining the shapes of ``w`` and ``b`` in advance to create the weights manually, we define ``w`` and ``b`` as tensor factories that are called inside the einx functions once the shapes are determined. For example, in the Haiku framework ``hk.get_parameter`` is used to create new weights in the current module and can be defined as a tensor factory as follows: .. code:: import haiku as hk class Linear(hk.Module): def __call__(self, x): w = lambda shape: hk.get_parameter(name="weight", shape=shape, dtype="float32", init=hk.initializers.VarianceScaling(1.0, "fan_in", "truncated_normal")) b = lambda shape: hk.get_parameter(name="bias", shape=shape, dtype="float32", init=hk.initializers.Constant(0.0)) x = einx.dot("b... [c1->c2]", x, w, c2=64) x = einx.add("b... [c2]", x, b) return x Unlike a tensor, the tensor factory does not provide shape constraints to the expression solver and requires that we define the missing axes (``c2``) manually. Here, this corresponds to specifying the number of output channels of the linear layer. All other axis values are determined implicitly from the input shapes. The weights are created once a layer is run on the first input batch. This is common practice in jax-based frameworks like Flax and Haiku where a model is typically first invoked with a dummy batch to instantiate all weights. In PyTorch, we rely on `lazy modules `_ by creating weights as ``torch.nn.parameter.UninitializedParameter`` in the constructor and calling their ``materialize`` method on the first input batch. This is handled automatically by einx (see below). Parameter definition with ``einn.param`` ---------------------------------------- einx provides the function ``einn.param`` to create *parameter factories* for the respective deep learning framework. ``einn.param`` is simply a convenience wrapper for the ``lambda shape: ...`` syntax that is used in the example above: .. code:: python # w1 and w2 give the same result when used as tensor factories in einx functions: w1 = lambda shape: hk.get_parameter(name="weight", shape=shape, dtype="float32", init=...) w2 = einn.param(name="weight", dtype="float32", init=...) The utility of ``einn.param`` comes from providing several useful default arguments that simplify the definition of parameters: * **Default argument for** ``init`` The type of (random) initialization that is used for a parameter in neural networks typically depends on the operation that the parameter is used in. For example: * A bias parameter is used in an ``add`` operation and often initialized with zeros. * A weight parameter in linear layers is used in a ``dot`` operation and initialized e.g. using `Lecun normal initialization `_ based on the fan-in or fan-out of the layer. * A scale parameter is used in a ``multiply`` operation and e.g. initialized with ones in normalization layers. To allow ``einn.param`` to use a default initialization method based on the operation that it is used in, einx functions like :func:`einx.dot` and :func:`einx.add` forward their name as optional arguments to tensor factories. ``einn.param`` then defines a corresponding initializer in the respective framework and uses it as a default argument for ``init``. E.g. in Flax: .. code:: python from flax import linen as nn if init == "get_at" or init == "rearrange": init = nn.initializers.normal(stddev=0.02) elif init == "add": init = nn.initializers.zeros_init() elif init == "multiply": init = nn.initializers.ones_init() elif init == "dot": init = nn.initializers.lecun_normal(kwargs["in_axis"], kwargs["out_axis"], kwargs["batch_axis"]) :func:`einx.dot` additionally determines ``in_axis``, ``out_axis`` and ``batch_axis`` from the einx expression and forwards them as optional arguments to tensor factories. In this case, they allow ``nn.initializers.lecun_normal`` to determine the fan-in of the layer and choose the initialization accordingly. * **Default argument for** ``name`` A default name is determined implicitly from the operation that the parameter is used in, for example: .. list-table:: :widths: 30 30 :header-rows: 0 * - Operation - Name * - :func:`einx.add` - ``bias`` * - :func:`einx.multiply` - ``scale`` * - :func:`einx.dot` - ``weight`` * - :func:`einx.get_at` - ``embedding`` * - :func:`einx.rearrange` - ``embedding`` * **Default argument for** ``dtype`` The default data type of the parameter is determined from the ``dtype`` member variable of the respective module if it exists, and chosen as ``float32`` otherwise. Any default argument in ``einn.param`` can be overridden by simply passing the respective argument explicitly: .. code:: # Initialize bias with non-zero values einx.add("b... [c]", x, einn.param(init=nn.initializers.normal(stddev=0.02))) # Initialize layerscale with small value einx.multiply("b... [c]", x, einn.param(init=1e-5, name="layerscale")) If no default argument can be determined (e.g. because there is no default initialization for an operation, or the module does not have a ``dtype`` member) and the argument is not specified explicitly in ``einn.param``, an exception is raised. Example layer using ``einn.param`` ---------------------------------- Our definition of a linear layer above that used the ``lambda shape: ...`` syntax can be simplified using ``einn.param`` as shown below. **Haiku** .. code:: python import haiku as hk class Linear(hk.Module): dtype: str = "float32" def __call__(self, x): x = einx.dot("... [c1->c2]", x, einn.param(), c2=64) x = einx.add("... [c2]", x, einn.param()) return x In Haiku, ``hk.get_parameter`` and ``hk.get_state`` can be passed as the first parameter of ``einn.param`` to determine whether to create a parameter or state variable: .. code:: python einx.add("... [c]", x, einn.param(hk.get_parameter)) # calls einn.param(hk.get_parameter) einx.add("... [c]", x, einn.param()) # calls einn.param(hk.get_parameter) einx.add("... [c]", x, hk.get_parameter) # calls einn.param(hk.get_parameter) einx.add("... [c]", x, einn.param(hk.get_state)) # calls einn.param(hk.get_state) einx.add("... [c]", x, hk.get_state) # calls einn.param(hk.get_state) **Flax** .. code:: python from flax import linen as nn class Linear(nn.Module): dtype: str = "float32" def __call__(self, x): x = einx.dot("... [c1->c2]", x, einn.param(self), c2=64) x = einx.add("... [c2]", x, einn.param(self)) return x In Flax, parameters are created by calling the ``self.param`` or ``self.variable`` method of the current module. For convenience, einx provides several options to determine which one is used: .. code:: python einx.add("... [c]", x, einn.param(self.param)) # calls einn.param(self.param) einx.add("... [c]", x, einn.param(self)) # calls einn.param(self.param) einx.add("... [c]", x, self.param) # calls einn.param(self.param) einx.add("... [c]", x, self) # calls einn.param(self.param) einx.add("... [c]", x, einn.param(self.variable, col="stats")) # calls einn.param(self.variable, col="stats") **PyTorch** .. code:: import torch.nn as nn class Linear(nn.Module): def __init__(self): super().__init__() self.w = nn.parameter.UninitializedParameter(dtype=torch.float32) self.b = nn.parameter.UninitializedParameter(dtype=torch.float32) def forward(self, x): x = einx.dot("b... [c1->c2]", x, self.w, c2=64) x = einx.add("b... [c2]", x, self.b) return x In PyTorch, parameters have to be created in the constructor of the module as ``nn.parameter.UninitializedParameter`` and ``nn.parameter.UninitializedBuffer`` (see `lazy modules `_). They can be passed to einx functions directly, or by using ``einn.param`` (e.g. to specify additional arguments): .. code:: python einx.add("... [c]", x, einn.param(self.w)) # calls einn.param(self.w) einx.add("... [c]", x, self.w) # calls einn.param(self.w) For PyTorch, ``einn.param`` does not support a ``dtype`` and ``name`` argument since these are specified in the constructor. **Equinox** .. code:: import equinox as eqx class Linear(eqx.Module): w: jax.Array b: jax.Array dtype: str = "float32" def __init__(self): self.w = None self.b = None def forward(self, x, rng=None): x = einx.dot("b... [c1->c2]", x, einn.param(self, name="weight", rng=rng), c2=64) x = einx.add("b... [c2]", x, einn.param(self, name="bias", rng=rng)) return x In Equinox, parameters have to be specified as dataclass member variables of the module. In einx, these variables are set to ``None`` in the constructor and initialized in the ``__call__`` method instead by passing the module and member variable name to ``einn.param``. This initializes the parameter and stores it in the respective member variable, such that the module can be used as a regular Equinox module. When a parameter is initialized randomly, it also requires passing a random key ``rng`` to ``einn.param`` on the first call: .. code:: python einx.add("... [c]", x, einn.param(self, rng=rng)) Stateful layers are currently not supported for Equinox, since they require the shape of the state variable to be known in the constructor. **Keras** .. code:: class Linear(einn.Layer): def call(self, x): x = einx.dot("b... [c1->c2]", x, einn.param(self, name="weight"), c2=64) x = einx.add("b... [c2]", x, einn.param(self, name="bias")) return x In Keras, parameters can be created in a layer's ``build`` method instead of the ``__init__`` method, which gives access to the shapes of the layer's input arguments. The regular forward-pass is defined in the ``call`` method. einx provides the base class ``einn.Layer`` which simply implements the ``build`` method to call the layer's ``call`` method with dummy arguments and thereby initialize the layer parameters. .. code:: python einx.add("... [c]", x, einn.param(self)) Layers ------ einx provides the layer types ``einn.{Linear|Norm|Dropout}`` that are implemented as outlined above. **einn.Norm** implements a normalization layer with optional exponential moving average (EMA) over the computed statistics. The first parameter is an einx expression for the axes along which the statistics for normalization are computed. The second parameter is an einx expression for the axes corresponding to the bias and scale terms, and defaults to ``b... [c]``. The different sub-steps can be toggled by passing ``True`` or ``False`` for the ``mean``, ``var``, ``scale`` and ``bias`` parameters. The EMA is used only if ``decay_rate`` is passed. A variety of normalization layers can be implemented using this abstraction: .. code:: layernorm = einn.Norm("b... [c]") instancenorm = einn.Norm("b [s...] c") groupnorm = einn.Norm("b [s...] (g [c])", g=8) batchnorm = einn.Norm("[b...] c", decay_rate=0.9) rmsnorm = einn.Norm("b... [c]", mean=False, bias=False) **einn.Linear** implements a linear layer with optional bias term. The first parameter is an operation string that is forwarded to :func:`einx.dot` to multiply the weight matrix. A bias is added corresponding to the marked output expressions, and is disabled by passing ``bias=False``. .. code:: channel_mix = einn.Linear("b... [c1->c2]", c2=64) spatial_mix1 = einn.Linear("b [s...->s2] c", s2=64) spatial_mix2 = einn.Linear("b [s2->s...] c", s=(64, 64)) patch_embed = einn.Linear("b (s [s2->])... [c1->c2]", s2=4, c2=64) **einn.Dropout** implements a stochastic dropout. The first parameter specifies the shape of the mask in einx notation that is applied to the input tensor. .. code:: dropout = einn.Dropout("[...]", drop_rate=0.2) spatial_dropout = einn.Dropout("[b] ... [c]", drop_rate=0.2) droppath = einn.Dropout("[b] ...", drop_rate=0.2) The following is an example of a simple fully-connected network for image classification using ``einn`` in Flax: .. code:: from flax import linen as nn import einx.nn.flax as einn class Net(nn.Module): @nn.compact def __call__(self, x, training): for c in [1024, 512, 256]: x = einn.Linear("b [...->c]", c=c)(x) x = einn.Norm("[b] c", decay_rate=0.99)(x, training=training) x = nn.gelu(x) x = einn.Dropout("[...]", drop_rate=0.2)(x, training=training) x = einn.Linear("b [...->c]", c=10)(x) # 10 classes return x Example trainings on CIFAR10 are provided in ``examples/train_{torch|flax|haiku|equinox|keras}.py`` for models implemented using ``einn``. ``einn`` layers can be combined with other layers or used as submodules in the respective framework seamlessly. The following page provides examples of common operations in neural networks using ``einx`` and ``einn`` notation.python-einx-0.3.0/docs/source/gettingstarted/tutorial_notation.rst000066400000000000000000000314731505216034200255720ustar00rootroot00000000000000Tutorial: Notation ####################### This tutorial introduces the Einstein-inspired notation that is used in einx. It is based on and compatible with the notation used in `einops `_, but introduces several new concepts such as ``[]``-bracket notation, composable ellipses and axis concatenations. See :doc:`How is einx different from einops? ` for a complete list of differences. Introduction ------------ An einx expression provides a description of the axes of a given tensor. In the simplest case, each dimension is given a unique name (``a``, ``b``, ``c``), and the names are listed to form an einx expression: >>> x = np.ones((2, 3, 4)) >>> einx.matches("a b c", x) # Check whether expression matches the tensor's shape True >>> einx.matches("a b", x) False einx expressions are used to formulate tensor operations such as reshaping and permuting axes in an intuitive way. Instead of defining an operation in classical index-based notation >>> y = np.transpose(x, (0, 2, 1)) >>> y.shape (2, 4, 3) we instead provide the input and output expressions in einx notation and let einx determine the necessary operations: >>> y = einx.rearrange("a b c -> a c b", x) >>> y.shape (2, 4, 3) The purpose of :func:`einx.rearrange` is to map tensors between different einx expressions. It does not perform any computation itself, but rather forwards the computation to the respective backend, e.g. Numpy. To verify that the correct backend calls are made, the just-in-time compiled function that einx invokes for this expression can be printed using ``graph=True``: >>> graph = einx.rearrange("a b c -> a c b", x, graph=True) >>> print(graph) import numpy as np def op0(i0): x0 = np.transpose(i0, (0, 2, 1)) return x0 The function shows that einx performs the expected call to ``np.transpose``. .. note:: einx traces the backend calls made for a given operation and just-in-time compiles them into a regular Python function using Python's `exec() `_. When the function is called with the same signature of arguments, the compiled function is reused and therefore incurs no additional overhead other than for cache lookup (see :doc:`Just-in-time compilation `) .. _axiscomposition: Axis composition ---------------- Multiple axes can be wrapped in parentheses to indicate that they represent an *axis composition*. >>> x = np.ones((6, 4)) >>> einx.matches("(a b) c", x) True The composition ``(a b)`` is an axis itself and comprises the subaxes ``a`` and ``b`` which are layed out in `row-major order `_. This corresponds to ``a`` chunks of ``b`` elements each. The length of the composed axis is the product of the subaxis lengths. We can use :func:`einx.rearrange` to compose and decompose axes in a tensor by passing the respective einx expressions: >>> # Stack 2 chunks of 3 elements into a single dimension with length 6 >>> x = np.ones((2, 3, 4)) >>> einx.rearrange("a b c -> (a b) c", x).shape (6, 4) >>> # Divide a dimension of length 6 into 2 chunks of 3 elements each >>> x = np.ones((6, 4)) >>> einx.rearrange("(a b) c -> a b c", x, a=2).shape (2, 3, 4) Since the decomposition is ambiguous w.r.t. the values of ``a`` and ``b`` (for example ``a=2 b=3`` and ``a=1 b=6`` would be valid), additional constraints have to be passed to find unique axis values, e.g. ``a=2`` as in the example above. Composing and decomposing axes is a cheap operation and e.g. preferred over calling ``np.split``. The graph of these functions shows that it uses a `np.reshape `_ operation with the requested shape: >>> print(einx.rearrange("(a b) c -> a b c", x, a=2, graph=True)) import numpy as np def op0(i0): x0 = np.reshape(i0, (2, 3, 4)) return x0 >>> print(einx.rearrange("a b c -> (a b) c", x, graph=True)) import numpy as np def op0(i0): x0 = np.reshape(i0, (6, 4)) return x0 .. note:: See `this great einops tutorial `_ for hands-on illustrations of axis composition using a batch of images. Axis compositions are used for example to divide the channels of a tensor into equally sized groups (as in multi-headed attention), or to divide an image into patches by decomposing the spatial dimensions (if the image resolution is evenly divisible by the patch size). Ellipsis -------- An *ellipsis* repeats the expression that appears directly in front of it: >>> x = np.ones((2, 3, 4)) >>> einx.matches("a b...", x) # Expands to "a b.0 b.1" True The number of repetitions is determined from the rank of the input tensors: >>> x = np.ones((2, 3, 4, 5)) >>> einx.matches("a b...", x) # Expands to "a b.0 b.1 b.2" True Using ellipses e.g. for spatial dimensions often results in simpler and more readable expressions, and allows using the same expression for tensors with different dimensionality: >>> # Divide an image into a list of patches with size p=8 >>> x = np.ones((256, 256, 3), dtype="uint8") >>> einx.rearrange("(s p)... c -> (s...) p... c", x, p=8).shape (1024, 8, 8, 3) >>> # Divide a volume into a list of cubes with size p=8 >>> x = np.ones((256, 256, 256, 3), dtype="uint8") >>> einx.rearrange("(s p)... c -> (s...) p... c", x, p=8).shape (32768, 8, 8, 8, 3) This operation requires multiple backend calls in index-based notation that might be difficult to understand on first glance. The einx call on the other hand clearly conveys the intent of the operation and requires less code: >>> print(einx.rearrange("(s p)... c -> (s...) p... c", x, p=8, graph=True)) import numpy as np def op0(i0): x0 = np.reshape(i0, (32, 8, 32, 8, 3)) x1 = np.transpose(x0, (0, 2, 1, 3, 4)) x2 = np.reshape(x1, (1024, 8, 8, 3)) return x2 In einops-style notation, an ellipsis always appears at root-level and is anonymous, i.e. does not have a preceding expression. To be fully compatible with einops notation, einx implicitly converts anonymous ellipses by adding an axis in front: .. code:: einx.rearrange("b ... -> ... b", x) # same as einx.rearrange("b _anonymous_ellipsis_axis... -> _anonymous_ellipsis_axis... b", x) Unnamed axes ------------ An *unnamed axis* is a number in the einx expression and similar to using a new unique axis name with an additional constraint specifying its length: >>> x = np.ones((2, 3, 4)) >>> einx.matches("2 b c", x) True >>> einx.matches("a b c", x, a=2) True >>> einx.matches("a 1 c", x) False Unnamed axes is used for example as an alternative to ``np.expand_dims``, ``np.squeeze``, ``np.newaxis``, ``np.broadcast_to``: >>> x = np.ones((2, 1, 3)) >>> einx.rearrange("a 1 b -> 1 1 a b 1 5 6", x).shape (1, 1, 2, 3, 1, 5, 6) Since each unnamed axis is given a unique name, multiple unnamed axes do not refer to the same underlying tensor dimension. This can lead to unexpected behavior: >>> einx.rearrange("a b c -> a c b", x).shape (2, 4, 3) >>> einx.rearrange("2 b c -> 2 c b", x).shape # Raises an exception Concatenation ------------- A *concatenation* represents an axis in einx notation along which two or more subtensors are concatenated. Using axis concatenations, we can describe operations such as `np.concatenate `_, `np.split `_, `np.stack `_, `einops.pack and einops.unpack `_ in pure einx notation. A concatenation axis is marked with ``+`` and wrapped in parentheses, and its length is the sum of the subaxis lengths. >>> x = np.ones((5, 4)) >>> einx.matches("(a + b) c", x) True This is used for example to concatenate tensors that do not have compatible dimensions: >>> x = np.ones((256, 256, 3)) >>> y = np.ones((256, 256)) >>> einx.rearrange("h w c, h w -> h w (c + 1)", x, y).shape (256, 256, 4) The graph shows that einx first reshapes ``y`` by adding a channel dimension, and then concatenates the tensors along that axis: >>> print(einx.rearrange("h w c, h w -> h w (c + 1)", x, y, graph=True)) import numpy as np def op0(i0, i1): x0 = np.reshape(i1, (256, 256, 1)) x1 = np.concatenate([i0, x0], axis=2) return x1 Splitting is supported analogously: >>> z = np.ones((256, 256, 4)) >>> x, y = einx.rearrange("h w (c + 1) -> h w c, h w", z) >>> x.shape, y.shape ((256, 256, 3), (256, 256)) Unlike the index-based `np.concatenate `_, einx also broadcasts subtensors if required: >>> # Append a number to all channels >>> x = np.ones((256, 256, 3)) >>> einx.rearrange("... c, 1 -> ... (c + 1)", x, [42]).shape (256, 256, 4) Additional constraints ---------------------- einx uses a `SymPy `_-based solver to determine the values of named axes in Einstein expressions (see :doc:`How does einx parse expressions? `). In many cases, the shapes of the input tensors provide enough constraints to determine the values of all named axes in the solver. For other cases, einx functions accept ``**parameters`` that are used to specify the values of some or all named axes and provide additional constraints to the solver: .. code:: x = np.zeros((10,)) einx.rearrange("(a b) -> a b", x) # Fails: Values of a and b cannot be determined einx.rearrange("(a b) -> a b", x, a=5) # Succeeds: b determined by solver einx.rearrange("(a b) -> a b", x, b=2) # Succeeds: a determined by solver einx.rearrange("(a b) -> a b", x, a=5, b=2) # Succeeds einx.rearrange("(a b) -> a b", x, a=5, b=5) # Fails: Conflicting constraints .. _bracketnotation: Bracket notation ---------------- einx introduces the ``[]``-notation to denote axes that an operation is applied to. This corresponds to the ``axis`` argument in index-based notation: .. code:: einx.sum("a [b]", x) # same as np.sum(x, axis=1) einx.sum("a [...]", x) # same as np.sum(x, axis=tuple(range(1, x.ndim))) In general, brackets define which sub-tensors the given elementary operation is applied to. For example, the expression ``"a [b c] d"`` indicates that the elementary operation ``einx.sum`` is applied to sub-tensors with shape ``b c`` and vectorized over axes ``a`` and ``d``: .. code:: einx.sum ("a [b c] d", x) # ^^^^^^^^ ^ ^^^^^ ^ # elementary operation vectorized axis sub-tensor axes vectorized axis Some other examples: .. code:: einx.flip("a [b]", x, c=2) # Flip pairs of values einx.add("... [c]", x, b) # Add bias einx.get_at("b [h w] c, b i [2] -> b i c", x, indices) # Gather values einx.softmax("b q [k] h", attn) # Part of attention operation Bracket notation is fully compatible with expression rearranging and can therefore be placed anywhere inside a nested einx expression: >>> # Compute sum over pairs of values along the last axis >>> x = np.ones((2, 2, 16)) >>> einx.sum("... (g [c])", x, c=2).shape (2, 2, 8) >>> # Mean-pooling with stride 4 (if evenly divisible) >>> x = np.ones((4, 256, 256, 3)) >>> einx.mean("b (s [ds])... c", x, ds=4).shape (4, 64, 64, 3) >>> print(einx.mean("b (s [ds])... c", x, ds=4, graph=True)) import numpy as np def op0(i0): x0 = np.reshape(i0, (4, 64, 4, 64, 4, 3)) x1 = np.mean(x0, axis=(2, 4)) return x1 .. note:: See :doc:`How does einx handle input and output tensors? ` for details on how operations are applied to tensors with nested einx expressions. Operations are sensitive to the positioning of brackets, e.g. allowing for flexible ``keepdims=True`` behavior out-of-the-box: >>> x = np.ones((16, 4)) >>> einx.sum("b [c]", x).shape (16,) >>> einx.sum("b ([c])", x).shape (16, 1) >>> einx.sum("b [c]", x, keepdims=True).shape (16, 1) In the second example, ``c`` is reduced within the composition ``(c)``, resulting in an empty composition ``()``, i.e. a trivial axis with size 1. Composability of ``->`` and ``,`` --------------------------------- The operators ``->`` and ``,`` that delimit input and output expressions in an operation can optionally be composed with the einx expressions themselves. If they appear within a nested expression, the expression is expanded such that ``->`` and ``,`` appear only at the root of the expression tree. For example: .. code:: einx.{...}("a [b -> c]", x) # expands to einx.{...}("a [b] -> a [c]", x) einx.{...}("b p [i,->]", x, y) # expands to einx.{...}("b p [i], b p -> b p", x, y) einx provides a wide range of elementary tensor operations that accept arguments in einx notation as described in this document. The following tutorial gives an overview of these functions and their usage. python-einx-0.3.0/docs/source/gettingstarted/tutorial_ops.rst000066400000000000000000000325121505216034200245330ustar00rootroot00000000000000Tutorial: Operations #################### einx represents tensor operations using a set of elementary operations that are vectorized according to the given einx expressions. Internally, einx does not implement the operations from scratch, but forwards computation to the respective backend, e.g. by calling `np.reshape `_, `np.transpose `_ or `np.sum `_ with the appropriate arguments. This tutorial gives an overview of these operations and their usage. For a complete list of provided functions, see the :doc:`API reference `. Rearranging ----------- The function :func:`einx.rearrange` transforms tensors between einx expressions by determining and applying the required backend operations. For example: >>> x = np.ones((4, 256, 17)) >>> y, z = einx.rearrange("b (s p) (c + 1) -> (b s) p c, (b p) s 1", x, p=8) >>> y.shape, z.shape ((128, 8, 16), (32, 32, 1)) Conceptually, this corresponds with a vectorized identity mapping. Using :func:`einx.rearrange` often produces more readable and concise code than specifying backend operations in index-based notation directly. The index-based calls can be inspected using the just-in-time compiled function that einx creates for this expression (see :doc:`Just-in-time compilation `): >>> print(einx.rearrange("b (s p) (c + 1) -> (b s) p c, (b p) s 1", x, p=8, graph=True)) import numpy as np def op0(i0): x0 = np.reshape(i0, (4, 32, 8, 17)) x1 = np.reshape(x0[:, :, :, 0:16], (128, 8, 16)) x2 = np.reshape(x0[:, :, :, 16:17], (4, 32, 8)) x3 = np.transpose(x2, (0, 2, 1)) x4 = np.reshape(x3, (32, 32, 1)) return [x1, x4] Reduction --------- einx provides a family of elementary operations that reduce tensors along one or more axes. For example: .. code:: einx.sum("a [b]", x) # same as np.sum(x, axis=1) einx.mean("a [...]", x) # same as np.mean(x, axis=tuple(range(1, x.ndim))) These functions are specializations of :func:`einx.reduce` and use backend operations like `np.sum `_, `np.prod `_ or `np.any `_ as the ``op`` argument: .. code:: einx.reduce("a [b]", x, op=np.sum) # same as einx.sum("a [b]", x) In ``einx.sum``, the respective backend is determined implicitly from the input tensor (see :doc:`How does einx support different tensor frameworks? `). Generally, the operation string represents both input and output expressions, and marks reduced axes using brackets: >>> x = np.ones((16, 8, 4)) >>> einx.sum("a [b] c -> a c", x).shape (16,) Since the output of the elementary reduction operation is a scalar, no axis is marked in the output expression. The following shorthand notation is supported: * When no brackets are found, brackets are placed implicitly around all axes that do not appear in the output: .. code:: einx.sum("a b c -> a c", x) # Expands to: "a [b] c -> a c" * When no output is given, it is determined implicitly by removing marked subexpressions from the input: .. code:: einx.sum("a [b] c", x) # Expands to: "a [b] c -> a c" :func:`einx.reduce` also allows custom reduction operations that accept the ``axis`` argument similar to `np.sum `_: .. code:: def custom_mean(x, axis): return np.sum(x, axis=axis) / x.shape[axis] einx.reduce("a [b] c", x, op=custom_mean) :func:`einx.reduce` fully supports expression rearranging: >>> x = np.ones((16, 8)) >>> einx.prod("a (b [c]) -> b a", x, c=2).shape (4, 16) Element-by-element ------------------ einx provides a family of elementary operations that apply element-by-element operations to tensors. For example: .. code:: einx.add("a b, b -> a b", x, y) # same as x + y[np.newaxis, :] einx.multiply("a, a b -> a b", x, y) # same as x[:, np.newaxis] * y einx.subtract("a, (a b) -> b a", x, y) # requires reshape and transpose in index-based notation The elementary operations accept and return scalars and no axes are marked with ``[]``-brackets. Internally, the inputs are rearranged such that the operation can be applied using `Numpy broadcasting rules `_. These functions are specializations of :func:`einx.elementwise` and use backend operations like `np.add `_, `np.logical_and `_ and `np.where `_ as the ``op`` argument: .. code:: einx.elementwise("a b, b -> a b", x, y, op=np.add) # same as einx.add("a b, b -> a b", x, y) Generally, the operation string of :func:`einx.elementwise` represents all input and output expressions explicitly: >>> x = np.ones((16, 8)) >>> y = np.ones((16,)) >>> einx.add("a b, a -> a b", x, y).shape (16, 8) The following shorthand notation is supported: * The output is determined implicitly if one of the input expressions contains the named axes of all other inputs and if this choice is unique: .. code:: einx.add("a b, a", x, y) # Expands to: "a b, a -> a b" einx.where("b a, b, a", x, y, z) # Expands to "b a, b, a -> b a" einx.subtract("a b, b a", x, y) # Raises an exception einx.add("a b, a b", x, y) # Expands to: "a b, a b -> a b" * Bracket notation can be used to indicate that the second input is a subexpression of the first: .. code:: einx.add("a [b]", x, y) # Expands to: "a b, b" .. note:: Conceptually, a different elementary operation is used in this case which is applied to tensors of equal shape rather than just scalars. This variant might be removed in future versions. :func:`einx.elementwise` fully supports expression rearranging: >>> x = np.ones((16, 16, 32)) >>> bias = np.ones((4,)) >>> einx.add("b... (g [c])", x, bias).shape (16, 16, 32) Indexing -------- einx provides a family of elementary operations that perform multi-dimensional indexing and update/retrieve values from tensors at specific coordinates: .. code:: image = np.ones((256, 256, 3)) coordinates = np.ones((100, 2), dtype=np.int32) updates = np.ones((100, 3)) # Retrieve values at specific locations in an image y = einx.get_at("[h w] c, i [2] -> i c", image, coordinates) # same as y = image[coordinates[:, 0], coordinates[:, 1]] # Update values at specific locations in an image y = einx.set_at("[h w] c, i [2], i c -> [h w] c", image, coordinates, updates) # same as image[coordinates[:, 0], coordinates[:, 1]] = updates y = image Brackets in the first input indicate axes that are indexed, and a single bracket in the second input indicates the coordinate axis. The length of the coordinate axis should equal the number of indexed axes in the first input. Coordinates can also be passed in separate tensors: .. code:: coordinates_x = np.ones((100,), dtype=np.int32) coordinates_y = np.ones((100,), dtype=np.int32) y = einx.get_at("[h w] c, i, i -> i c", image, coordinates_x, coordinates_y) Indexing functions are specializations of :func:`einx.index` and fully support expression rearranging: .. code:: einx.add_at("b ([h w]) c, ([2] b) i, c i -> c [h w] b", image, coordinates, updates) Dot-product ----------- The function :func:`einx.dot` computes a dot-product along the marked axes: >>> # Matrix multiplication between x and y >>> x = np.ones((4, 16)) >>> y = np.ones((16, 8)) >>> einx.dot("a [b], [b] c -> a c", x, y).shape (4, 8) While operations such as matrix multiplications are represented conceptually as a vectorized dot-products in einx, they are still implemented using efficient matmul calls in the respective backend rather than a vectorized evaluation of the dot-product. The interface of :func:`einx.dot` closely resembles the existing `np.einsum `_ which also uses Einstein-inspired notation to express matrix multiplications. In fact, :func:`einx.dot` internally forwards computation to the ``einsum`` implementation of the respective backend, but additionally supports rearranging of expressions: >>> # Simple grouped linear layer >>> x = np.ones((20, 16)) >>> w = np.ones((8, 4)) >>> print(einx.dot("b (g c1), c1 c2 -> b (g c2)", x, w, g=2, graph=True)) import numpy as np def op0(i0, i1): x0 = np.reshape(i0, (20, 2, 8)) x1 = np.einsum("abc,cd->abd", x0, i1) x2 = np.reshape(x1, (20, 8)) return x2 The following shorthand notation is supported: * When no brackets are found, brackets are placed implicitly around all axes that do not appear in the output: .. code:: einx.dot("a b, b c -> a c", x, y) # Expands to: "a [b], [b] c -> a c" This allows using einsum-like notation with :func:`einx.dot`. * When given two input tensors, the expression of the second input is determined implicitly by marking its components in the input and output expression: .. code:: einx.dot("a [b] -> a [c]", x, y) # Expands to: "a b, b c -> a c" .. note:: Conceptually, the elementary operation in this case is not a simple dot-product, but rather a linear map from ``b`` to ``c`` channels, which motivates the usage of bracket notation in this manner. Axes marked multiple times appear only once in the implicit second input expression: .. code:: einx.dot("[a b] -> [a c]", x, y) # Expands to: "a b, a b c -> a c" Other operations: ``vmap`` -------------------------- If an operation is not provided as a separate einx API, it can still be applied in einx using :func:`einx.vmap` or :func:`einx.vmap_with_axis`. Both functions apply the same vectorization rules as other einx functions, but accept an ``op`` argument that specifies the elementary operation to apply. In :func:`einx.vmap`, the input and output tensors of ``op`` match the marked axes in the input and output expressions: .. code:: # A custom operation: def op(x): # Input: x has shape "b c" x = np.sum(x, axis=1) x = np.flip(x, axis=0) # Output: x has shape "b" return x einx.vmap("a [b c] -> a [b]", x, op=op) :func:`einx.vmap` is implemented using efficient automatic vectorization in the respective backend (e.g. `jax.vmap `_, `torch.vmap `_). einx also implements a simple ``vmap`` function for the Numpy backend for testing/ debugging purposes using a Python loop. In :func:`einx.vmap_with_axis`, ``op`` is instead given an ``axis`` argument and must follow `Numpy broadcasting rules `_: .. code:: # A custom operation: def op(x, axis): # Input: x has shape "a b c", axis is (1, 2) x = np.sum(x, axis=axis[1]) x = np.flip(x, axis=axis[0]) # Output: x has shape "b" return x einx.vmap_with_axis("(a [b c]) -> (a [b])", x, op=op, a=2, b=3, c=4) Both :func:`einx.reduce` and :func:`einx.elementwise` are adaptations of :func:`einx.vmap_with_axis`. Since most backend operations that accept an ``axis`` argument operate on the entire input tensor when ``axis`` is not given, :func:`einx.vmap_with_axis` can often analogously be expressed using :func:`einx.vmap`: >>> x = np.ones((4, 16)) >>> einx.vmap_with_axis("a [b] -> a", x, op=np.sum).shape (4,) >>> einx.vmap ("a [b] -> a", x, op=np.sum).shape (4,) >>> x = np.ones((4, 16)) >>> y = np.ones((4,)) >>> einx.vmap_with_axis("a b, a -> a b", x, y, op=np.add).shape (4, 16) >>> einx.vmap ("a b, a -> a b", x, y, op=np.add).shape (4, 16) :func:`einx.vmap` provides more general vectorization capabilities than :func:`einx.vmap_with_axis`, but might in some cases be slower if the latter relies on a specialized implementation. .. _lazytensorconstruction: Misc: Tensor factories ---------------------------- All einx operations also accept tensor factories instead of tensors as arguments. A tensor factory is a function that accepts a ``shape`` argument and returns a tensor with that shape. This allows deferring the construction of a tensor to the point inside an einx operation where its shape has been resolved, and avoids having to manually determine the shape in advance: .. code:: einx.dot("b... c1, c1 c2 -> b... c2", x, lambda shape: np.random.uniform(shape), c2=32) In this example, the shape of ``x`` is used by the expression solver to determine the values of ``b...`` and ``c1``. Since the tensor factory provides no shape constraints to the solver, the remaining axis values have to be specified explicitly, i.e. ``c2=32``. Tensor factories are particularly useful in the context of deep learning modules: The shapes of a layer's weights are typically chosen to align with the shapes of the layer input and outputs (e.g. the number of input channels in a linear layer must match the corresponding axis in the layer's weight matrix). This can be achieved implicitly by constructing layer weights using tensor factories. The following tutorial describes in more detail how this is used in einx to implement deep learning models.python-einx-0.3.0/docs/source/gettingstarted/tutorial_overview.rst000066400000000000000000000030121505216034200255710ustar00rootroot00000000000000Tutorial: Overview ################## einx provides a universal interface to formulate tensor operations as concise expressions in frameworks such as Numpy, PyTorch, Tensorflow and Jax. This tutorial will introduce the main concepts of Einstein-inspired notation (or *einx notation*) and how it is used as a universal language for expressing tensor operations. An einx expression is a string that represents the axis names of a tensor. For example, given the tensor >>> import numpy as np >>> x = np.ones((2, 3, 4)) we can name its dimensions ``a``, ``b`` and ``c``: >>> import einx >>> einx.matches("a b c", x) # Check whether expression matches the tensor's shape True >>> einx.matches("a b", x) False The purpose of einx expressions is to specify how tensor operations will be applied to the input tensors: >>> np.sum(x, axis=1) >>> # same as >>> einx.sum("a [b] c", x) Here, ``einx.sum`` represents the elementary *sum-reduction* operation that is computed. The expression ``a [b] c`` specifies that it is applied to sub-tensors spanning the ``b`` axis, and vectorized over axes ``a`` and ``c``. This is an example of the general paradigm for formulating complex tensor operations with einx: 1. Provide a set of elementary tensor operations such as ``einx.{sum|max|where|add|dot|flip|get_at|...}``. 2. Use einx notation as a universal language to express vectorization of the elementary ops. The following tutorials will give a deeper dive into einx expressions and how they are used to express a large variety of tensor operations.python-einx-0.3.0/docs/source/images/000077500000000000000000000000001505216034200174675ustar00rootroot00000000000000python-einx-0.3.0/docs/source/images/solver.drawio000077500000000000000000000676461505216034200222360ustar00rootroot00000000000000 python-einx-0.3.0/docs/source/images/stage1-tree.png000077500000000000000000001265741505216034200223400ustar00rootroot00000000000000‰PNG  IHDRÄx¤!ù IDATx^ìÝ Ð,U}'þ¾FÔø–¨¼¨Ñ¸5ˆZ² Qy‹1†·M"1²°ˆbX¦L@ ¬w7 îEM‚ Q1k%ªD‘`t æ.ˆAPW1P(4»F£ÂE’ø#úÁyœ™§gætOwŸÓ}>·Š2¹·û¼|~ç™îïsf¦·Ýyן½À=ú9š ¶ö:ü¶šÒ$,°M N¸:†F€â155zxô%6AlÄ2ˆ3)´i¶" ·Â¨$/ '_"$@€@;q;ŽZÉC@ ΣÎfI€Ø @€@&óx¯·g2sÓ$°ZàÆ«Î˜9H ^mæŒA@ CÍq’C²ˆ³-½‰ ¹€@œù0}òˆó©µ™Öˆë›9ƒcˆÇPEs @€@€€@€älâlKoâd. g¾LŸ|â|jm¦õâúfÎ @€Àâ1TÑ   9$[8ÛÒ›8™ Ä™/Ó'@ 8ŸZ›i}¸¾™3 0x U4ÄHÉV@ ζô&N€@æqæ Àô ÈG@ ΧÖfZ_@ ®oæ ŒA@ CÍq’C²ˆ³-½‰ ¹€@œù0}òˆó©µ™Öˆë›9ƒcˆÇPEs @€@€€@€älâlKoâd. g¾LŸ|â|jm¦õâúfÎ @€Àâ1TÑ   9$[8ÛÒ›8™ Ä™/Ó'@ 8ŸZ›i}¸¾™3 0x U4ÄHÉV@ ζô&N€@æqæ Àô ÈG@ ΧÖfZ_@ ®oæ ŒA@ CÍq’C²ˆ³-½‰ ¹€@œù0}òˆó©µ™Öˆë›9ƒcˆÇPEs @€@€@ÎøE'ž^œsÞE3J×^y~±ÏÞ{ÈÕ?dÛŸ2sÒ»Î}]qÌ‘‡Öo¨§3.xÏŽ¤Ç׃@܇²> ž€@œ^MŒˆÄñüÂúÔu7ûtl±ûn.nùü¥¬»¡4*¥RÆI€vâv=µF€dbxzqî½ÿ1Åõ7ܼñWqQÄɾt:ˆ;åÕ8ÒˆâÉj|íç§½ö­›‹S ˆÓy¥2ô+ ÷ë­7Dˆû ÄÑŠر@¼ÊqàâqF& ¬ ¦C€E±@l‡xñëƒ@쵓y ÄyÖݬ ÈP@ ˆb8×>S&@€ÀRØ!@€@&±@, Ä™¼Ü™&‚â`* @`Øñpñüg|çWÞºÏ5öâ­?ËÞ2=ì×7£'@€@S¸©œó 00¸¿@¼íO™Y¡ö°çœTì¸üšà•õ¤'>¶¸îê ‚Žщ§çœ7k°ìÄk¯<¿Øgï=ƒÚÃAñªh¨/ ×7s) §ˆ÷xô!Å­;oo´¶B«@¼œV n´ôœD€Á ă/¡  @ L@ N7ï½ÿ1Åõ7ÜVÈŠ£Bž#, Ę  0bxÄÅ55L Äiâ Þ³£xÞñ¯Ú²X½ÍzÑÛªO?õ%Å©ÛOX¸èbØ+"lˆ­ d" §ˆçnÈnoUˆ®óyb_ªµõ‡Þ[¦3y!4MÌ Ä–2ˆÓ ÄóŸ^µÓ;Y®U;Åw~ýA«Y ˆƒŠƒ €@œA‘M‘¥€@<®@<½K„'? ±@ìU‘d" #‡¼ezÝ%+ Äë®!ç @`,ñX*iX! §ˆ}IV賋›,|X n²nœC€1 Äc¬ª9 @ B@ N3/ú–ééÖù¬Å/ Ä!ëÄ1ÈA@ Î¡ÊæH€»â4q¹8ë>‡øÚ+Ï/öÙ{ÏÆëZ ˆ/' @`dñÈ j:X$ §ˆ›„âòœ¦Ÿ7ˆb¯” p·€@l% @ 8í@\.÷OW-×CŸµ_qÉûÎ^ɱ@¼XH€‘ Ä#/°é @`" §ˆ§WkÝ·Q× Å±@앑d" +O/ËxzqÎy³ã¯Z¶¡ßL- Ä™¼ì™&V Ä+‰@€qÄà Äó+pGRܺóö- 3ôÛ¨bx¯jfA€õâõ µ@€AÄã Ä“·íO™Y{¡_²% ăxÑ2Hô ÷€¬ ¤ §ˆ'54ÐNÖÓ|°-ÿþίbårˆâ•‹ÄÈD@ ΤЦI€8½@\hëâæxÎ$@€À€âÏÐ  PG@ N/—õ«z¼Ò ÇQ¼í-§--oÕîpÈye£Mw–묷¡k‡xh3^´# ·ã¨$/ §ˆ«Þ6=YL‹Þ>½èÅ!o—^ˆëìL'¿Ø P n€æŒ@@ AM!ñêçø†8Nsú©/)NÝ~Â–Óæ¿ýyÕóÜ:ãY4–ª6ªÞª=ܪ1×ÛŽˆ‡P%c$@€@ûqû¦Z$@€@’qº¸\0‹ž-²˜Bß*=ÝÖª^'`‡Œ1õcâÔ+d|èF@ îÆU«HN@ N;— fþK²BQo¥žoo~'{úß›„ìñ¦zŒ@œjeŒ‹Ý ÄÝújÉÄéâÉb©úÒ«é…úmÒ!‹ï°çœTì¸üš-‡¶ÙGÈ8b#Ç®€þ  G@ Žã®Wô.s î[‡ƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ`>÷ص®¦nû§ïW}ò«ÅÕ×~µxØî÷Ýø—¯Üúíbÿ}Zøä‡»>è>¼Øëð …! @€@×q×ÂÚ'@€@"q¼B\û™ÛŠÜ‚?rí-Åõ7}ms ?óÈlüßÿÅolþÝ“ö|HqÀ¾{ÜŽ÷}ü®ñyÏqæ Àô ÈF@ ΦÔ&J€@îq+`z¸ Áÿôõï6êüA¼÷F8¶{܈o­“âµøœL€Áă)• @`=x=¿Ug/Ú^u^ßÜ=¾ëíÕû>Áîq»ºÇ ÄuÅO€a Äì›Q @€@d¯~õ«ÅŽ;ŠK/½tã¿Ûn»-xDOúÓ‹~ô£3ÇWýݲwÝu×âCÙüïa{Xpÿ$@€îˆ­ (pÕUWmà}ìcgÅn»í¶\>øàÿ-Ãë¶mÛfοóÎ;‹¯|å+í_vÙeÿ»sçÎà>žö´§m†ã<0ø< @€œ✫oî °T`Ý]àI® ¨Ux~0“^äùåe·{la @€08ÌÉQ ‰@›»ÀËÈBñôùv3Y€¦I€½ ĽrëŒR˜ͦŸž¼ºîÛ”ëb»Ç©­ã!@€1Äc¨¢9 @€@-¾vÛÜ!^Ö–ÝãZåw0؈-½@¬]ྱÝãÑ/a$@€ŽâŽ`5K€qRØŽˆ§ûµ{wêÒˆÓ®Ñ @€@ @Š»À)âªÝãÉc|suàârŒV@ miMŒãXwxò…Xåsûþ³î—jµ1^»Çm(jƒ†, ¹zÆN€Ì†¶ œâñ²1•¿`°{œÙ•é @ s8ó`úH]`È»ÀC ÄÓãµ{œúO†ñ @€@qŠÚ @€ÖÚØ.ß }À´6¦.Já-Óuæe÷¸Ž–c  @`(ñP*eœ±ÀXw‡¼C¼lìvGüÃhjÈL@ άà¦K€rÙk žŸ—Ýã~ªŒšÄMÔœC€µrÜÎ%OϳüeÇ䋹.½ôÒbçÎÁkåiO{ZQ¾Ý½üïÀ >Ï @ ©€@ÜTÎy °TÀ.ðò2´Ï7]îv›Ê9úˆûPÖ2hc¸Ü|èC:z±\ñt!í~Y› ' ®dL€tì7¯EŽx^ËîqóõãLhG@ nÇQ+ÈFÀ.p;¥ˆg¿úÕ¯ågŽ'ÿùìq;ëL+ °\@ ¶B @`©€]ànˆ@¼Üõ#ùÈf8þèG?\„]wÝuó‹¹Ê·ß?ìa >× Ÿ€@œ_Í͘+ì¯$Zû8œÐîq¸•#  @ ž€@\ÏËÑ¥€]àþË*77·{ÜÜΙ 0+ [ÈTÀ.pÜ ÄíøÛ=nÇQ+ÈU@ εòæM€@vvÓ*¹@ÜM=ìwãªUŒU@ keÍ‹w ØNwÄÝׯîq÷Æz @€ÀÐâ¡WÐø  0%`x8ËA î¿Vvû7×#RˆS¯ñ @`…€]àa.8nÝìÇõ×;RˆS©„q @ PÀ.p T⇠ÄiÈîqZõ0ô% ÷%­¬!`x ¼DOˆ-Ì]ò{œnmŒŒm Äm‹j-Øn1ñ&âÄ 45<»Çé•‘ @ ®€@\WÌñèHÀ.pG°‰6+'Z˜ÃZw÷øàƒ.=ôÐâÀ&€Q @`dñÈ j: GÀ.ðpjÕÅHâ.TûosÝÝãI@~ØÃÖÿàõH€…@l @ G»À=b'Þ•@œx Ïîq4§ @ ²€@¹º'@`ÜvÇ]ßuf'¯£7Œs×Ù=.wŽ9ä·WÛ=F½’a Äì›Q °€]à„‹“ÐÐℊÑÃPÚØ=.òAÔÃhuA€|â|jm¦t$`¸#Ø‘7+¼À+¦×Æîqþð‡ç iöXS@ ^Ðéä)`8Ϻ·9k¸MÍa·e÷xØõ3z†- »~FO€@Ov{‚Ψ8£bלªÝãš`'@€ÀñxN%@`ÜvÇ]ߨ³ˆcW`ý¯³{üÔ§>u㋹|öxµ6JâÄqÜõJ€@‚_þò—‹Ë.»¬¸ôÒK7þ»í¶Û‚Gùô§?}óÆó€>ÏøãŽ;Š]vÙe­Á·ˆÛÓZrr/v{aÖ  ÄÛT Ø*`تh"PþÂäÚk¯-N9唯Á¸­@\á3Ï<³Øwß}7~)ãO>vó©µ™ Ѐ@Ü­– HP`Ý]àò™ åóAí'XÜž‡tØa‡m„â“O>¹Q0^7—Aø¬³ÎÚ Ã—\rIϺKMÀîqj1† ¡JÆH€ÀZW^yåæ[¡?ö±·µÛn»“\î¼=ô¡ >×ã(w‰ËõQþ)×JÝ`Ü4Oá;wnô¿cÇ»Ãã_rµfh÷¸—ƒ ÈX@ θø¦N`¬å.pV&Ÿ®ûY`»Àc]íÏëðÃ/¦wfË`\¾º Ç«>c\7WárFåNõÅ_Üþä´8*¦»ÇyÈC6¿ÁsGµ$L† Ä–£Xw¸¼Ñ+ß mxË¡·I”¿t©úÜnH0 Ä‹‚ðd’å/ʵëP»Ç¡RŽ#@ 8‡*›# ¬³ ¼ß~ûm„Ÿáˆ0¥ù]âé!, Æ«ñª \öcw8BÁGØ¥ÝãÕ”ˆƒ©H€@lrxòVè:ŸÞ}÷Ý7ßòg8vÇ×ÿ¢]âUÁxQ  “¶ío=Åž‘ÝãØÐ?} Ä}‹ë`é]àòKƒ¾öµ¯Ÿ;Ù.w‚÷ßÿàóH ‰À²]âEÁø^÷º×LWßûÞ÷6¿5zòeYËÆbw¸I¥œSWÀîq]1Ç 04xh3Þ¬¾õ­o÷»ßý²š³]à¬Ê=šÉ†ìÏãùÐ[¾½:$OÚ±;<šå3˜‰Ø=nVª¯åͤœE Ž€@Ç]¯V \wÝuÅg>ó™â˜cŽYyì˜ì—7÷åv‡\ͼǺK܆’Ýá6µ±®€Ýã0Á .¸ xüã_ì½÷Þa'8Š^â^¹uF LàŠ+®(Ž<òÈ¢üßò":¶?vÇVQó)êfwx=çv!`÷x±ê§?ýéâ~áŠ÷¼ç=Å3žñŒ.øµI€ÀñxN%Ð…ÀE]´†Ëg˜~ûÛßÞÛ´ Ü;¹# ô±Klw8Rqu[KÀîñ,×}ï{ߢü¼2qĵ,L€@·q·¾Z'PKà¼óÎ+^øÂnœóô§?½¸æškjŸÒÁ“]àr'ëãÿxðЦ¿ºüB¬=öØ#ø\ˆ-ÐÇ.±ÝáØUÖ]éÝãògäÖ[o nâ©O}êæS:è àóR;°¼¦OžŽðö·¿½8î¸ãR¢ñÈV@ ζô&žšÀ›ßüæâe/{Ùæ°^üâôG”Ú0ŽÇ.ð`Je  t¹Klw¸ãâi¾wã7~£øã?þãMß7½éMÅK_úÒ^¼uB€ÀrØ !€Àé§Ÿ^¼úÕ¯žÉþá¿ù›¿™ÀèÁ.pÒå1¸H]îÛŽTTÝv&pË-·åcõ&Ϙëîñÿú_ÿ«ø­ßú­Ç×¼æ5Åi§Ö™­† ˆÃœE 3SN9eãÙ£ó®ºêªâ€è¬ß& Ûn¢æœºØ%¶;œãJÊoÎW_}õf@þèG? ð‡¹8óÌ3û‚>X [" œp Źçž[9‚o|ãÅýïÿˆ£»»k»ÀÑK`èb—Øîð‚!¯%PîOÉWþïw¿ùÍoxÀ*=Ž?þøâœsÎYËÊÉ4ˆ›Û9“ÀZå7I¿÷½ï­lcÏ=÷,>ûÙÏ®Õ~Ó“í7•sY6w‰í[]Š¢Ü=žä:_:™Êîñã÷¸â¦›nª,åsŸûÜo ö‡þâþÍõ˜¹À?ÿó?oúèâÝï~÷Âù=ëYÏÚÅ?ù“?dà ÚˆÛqÔ /|á Eù[àO~ò“KÝë^W¼ò•¯ j³ÉAv›¨9‡@}6v‰í×wwF~CØ=~ýë__¼êU¯ZZœ'?ùÉï{Ô£•_͘@$8¼nó¸îºë6v†?÷¹Ï­œü_þå_¿üË¿¼ò¸:Ø®£åXí´±Klw¸Zh%Twÿê¯þªø•_ù••…xÌc³±S¼÷Þ{¯<Ö¬/ ¯o¨+®¸âŠ0|Ûm·­<¶<à‹_übñˆG<"èØEÙ^‹ÏÉZXg—ØîpkeÐPÆ©ìéK_*ùÈGUb×]wÝÅÏxÆ3‚ŽwÍâævÎ$$ðþ÷¿ãmÒßÿþ÷ƒŽßc=Н~õ«AÇÎd¸›“t*°Î.±ÝáNK£ñ bï?ô¡-Ê1„ü¹ç=ï¹ñöég?ûÙ!‡;††qC8§8ï¼óоð…!‡nS>?qÇŽAçØbrèMv‰íG/›d Ð÷îñ¡‡ºñMÙuþ¼ýío/Ž;î¸:§8–q ,‡¨#ðæ7¿¹xÙË^Vç”c_ñŠWgœqÆÂóì×&uèMv‰íG/›d&ÐÇîñöíÛ‹7¼á µeßô¦7/}éKkŸçV Ä«A ¶Àé§Ÿ^¼úÕ¯®}^yÂ;ßùÎâØcÝ<×.p#F'HN Î.±ÝáäÊg@ t±{\^ã_ð‚4Ò|Ík^SœvÚiÎu‹b«ƒ@˧œrJqÖYg5nõúë¯/Êg—»CåÿøÇƒÛÚ}÷Ý‹ò-דÿÊÏ#ûC€@uv‰í§Q3£ 0hk÷¸|Æð“žô¤Æ°'Ÿ|rqæ™g6>߉lˆ­ - œp ŹçžÛ¸Å{ÜãŃô âk_ûZpûí·ßfÞÿýƒÏs ý „ìÛî¿.z$PW`Ýãú§*~ðƒÔíróøã?¾8çœsŸïDfb+‚@Kåc•Êoƒìú]à®…µO ;]b»ÃÝùk™@ëì7OùôŠò±Lþ °¾€@¼¾¡2(ßÞ\†áË/¿¼3 »ÀÑj˜@ïËv‰í÷^h] éîqÝ<ëYÏÚÅåÛ°ý!@ ¹€@ÜÜΙŠ/|á aøŸøD«v[åÔ¤–íÛNªTC`m®wŸò”§l„âG=êQkUrˆs­¼y¯-pÝu×m„áÏ}îsk·5߀/ÍhTƒ’¨Ú%¶;œT‰ †@ëë~鿢=æ1ÙÅ{ï½wëcÖ âªlŽ­ \qÅaø¶Ûnk½íIƒ¯Ð­† D¨Ú%¶;½,@ 3uÇ2¨]wÝu#?ãÏ9Ü1L Ä–šïÿû7ÂðwÜQóÌú‡¿éMo*^úÒ—Ö?Ñ$/0½Klw8ùr ÆgŸ}v/×ò]vÙe#?ûÙÏn¿É‰oûÛ‹ãŽ;®É©Î!@ aé]b»Ã ÊЬ!à¾a <§èI`Ûgþú¨;{êK7-ð΋n.^ÿGŸŠ2‡‹.ºÈo|;¿ñâ£;hU“Â^|Ú‡7þãÓ>ü$Gè@`¯Ã/ì Õ¼›,ßQvÄGDAxåoìS<ÿˆÇFé[§†& ­bÆEà­|¦8û7Dé»ìôž÷¼gñ|ÀgƒZ®€@Ü2¨æj \ý©[6ÎÙŸ=jŸëm ÄmjŇ?üá¢|,Ò÷¿ÿýv®ÑÚI/xbñ’c_ã ‡ÈS@ γîf]SàkÿüâÆÏÿsñO÷úå¢üvéò¿OúÓ5[Yïðò 3ÊPì[$×sœ>[ nÏRK [@ n¯~å=Â/ýÒ/uúÅ›Þcè> IDATU£}ž°qPþ÷ ïýU±×£²xÈOÞ§½‰i‰ÀHâ‘Ö´º˜¾aø×ý×Íp< Éåÿ~ë[ßê¦ó»Z-­P†bÏl‡X nÇQ+ _@ n§†_øÂ6ÂpdœŒð~÷»ßfðàòüÇ|s®oíÔS+yl Ä{¸=™›%¯:c樆o¼q&(ÿíßþmñ•¯|% ·°Cžüä'—_~yñ?ña'8j¡Àü ƒ×?‹…\š\ßr±i:Ïù—Ùx›ô'?ùɦMl9ïa{ØLøýÿá?{íµ×Êö]ßV9 cù×?8ãÅ`ê«Úºaøò—¿\”Áxz'ù³Ÿýìê,8¢¼à–;Åþ¬'à†a=?g 0\¶®oÃhäåÎpù ë¦÷¸ÇmÙùý©Ÿú©F͹¾5bsR&q&…6Ívº¼aøæ7¿¹e'¹ ÌßýîwƒÿÜç>wãyƒþ4pÃÐÜΙ [ ËëÛ°ešþÈ#,ÞûÞ÷|ï{ß{3ø–;¾“·=ßÿþ÷:?ä ×·%Çä* çZyón$ã†á†nØÊÓ;Ê·Þzkåø?þøâœsÎi47'…«€\b\߯j} 'çž{nåôvß}÷-oy~âŸØ9…ë[çÄ:°€@<àâzÿ©Ü0|ñ‹_Üò–ë›o¾yää“O.Î<óÌþqFУ†Ñh$Êõ­Ñà:é”SN)Î:묕_|9½ã[þß|ä#£ŒÖõ- »N" ¤P†™†@Ê7 å—wL>“ü =¨xþóŸŸÚ€Fá†a@Å2TZHùúÖêD;lìï|gqûí·o†à”¾ìÒõ­ÃÂkzðñàKh} ¸aèS»ÿ¾Ü0ôo®GÒp}K£]Âõ­+YíŽA@ CÍ¡77 ½QGéÈ Cv €€ë[Eèp®oâjzðñàKh} ¸aèS»ÿ¾Ü0ôo®GÒp}K£]Âõ­+YíŽA@ CÍ¡77 ½QGéÈ Cv €€ë[Eèp®oâjzðñàKh} ¸aèS»ÿ¾Ü0ôo®GÒp}K£]Âõ­+YíŽA@ CÍ¡77 ½QGéÈ Cv €€ë[Eèp®oâjzðñàKh} ¸aèS»ÿ¾Ü0ôo®GÒp}K£]Âõ­+YíŽA@ CÍ¡77 ½QGéÈ Cv €€ë[Eèp®oâjzðñàKh} ¸aèS»ÿ¾Ü0ôo®GÒp}K£]Âõ­+YíŽA@ CÍ¡77 ½QGéÈ Cv €€ë[Eèp®oâjzðñàKh} ¸aèS»ÿ¾Ü0ôo®GÒp}K£]Âõ­+YíŽA@ CÍ¡77 ½QGéÈ Cv €€ë[Eèp®oâjzðñàKh} ¸aèS»ÿ¾Ü0ôo®GÒp}K£]Âõ­+YíŽA`ÐxÛŸ²¥×^y~±ÏÞ{F­Íü¸ÞuîëŠcŽ<4ê˜úì|~þ§Ÿú’âÔí'ô9„ÎúrÃÐm »aX¿ Ÿºî¦b߃Ž]¿¡%-ú¬ýŠKÞwvåu_»>¾Sˆ¯ë—À a€ëÛ¸—Ưoîóǽ&SšÝ`ñkÏ8§8íµoÝbyÂqGo{ËiQs¿Aˆ£.?¯!0Ć5¦ÛÉ©q'¬É6šûõ.ÙÂ4˜@Üm@§ íúæ>@‹kCl Þ{ÿcŠëo¸yK vßíÁÅ-Ÿ¿4jir¿Aˆ£.?¯!0´†5¦ÚÙ©qg´I6œûõ.É¢4”@Ün § íúæ> k$Ãd ^uÃû-º¹ß Ä#yuÈpC»aH±D«^ŸÛ³·L·¡ØN¹_ïÚQL£8:t5Š!]ßV]GÜçwµJòmwøE'ž^œsÞE›U{Ò;³[\þÿ×]}A¾UK6ÿmt)|¹V~Ëéî ĹV~øóÊ CÊÒqÊÕ16‹âq¯Ž¡\ßÜç{¦:»ÁâeŸ÷K[ôUó“E”úsWÕaþÅn2¯!ìê»aHõ¥¬q 冡ÙvÓŠ@\ÏuÑ7©¶õz˜úõ$õñÕ«æ°v}výV~(×7÷ùE‘ú}þ¢µ6¿»?}\êßï4¨@\uá¼óëŸØôž¿±X…_u#Rç³Çóú/R~óÝüó‘Cö¢ ¸há­šß²ÇùÏg´¹£¾(¯ºñ™ŒwÙ7È®zÁïúßÝ0t-·ý¡Ü0ÄUZÞûØqÝ/áš?~rͪº~TÉ6}=ìëzR×c2ǾƗòÏJjcs}K­"íŽg×7÷ù³5Oõ>~eÖ}=ŸÎní®òæ­ *ÏØù…ª¤e·êÆ-4Výd²p›Ü ÌÔ:%­â'íöˆ¯ºæogÞê2¿!üÀìuø…!SqÌ@†pÃ:¥@<[¡ª@\÷æ¡l±ÎëaŸ×“!\ïRÿ™Ie|q*•èfC¸¾¹Ï¯®}j÷ùÓ£œ¿„®ÞØÎšç`qÕMVÕÎë|aV½í¬*D¯:§D¬ºá˜Ü°Ô½AXô–¹ÐEâ§Ûë3Ï?+t^uoë´ÛôX7 Må†qÞnR—ˆ—âªw…Ô4ôu¾ïëÉ®w!¾Ž) ×·q¯‚Ô¯oîó¯¿Ð×ÿ¾îó'ý4 ÓóCßAÛÇOæ`ñüÛË-Žùß¼‡,¢ºo^ôVéS·Ÿ°Q³:7‹Þo¿h‘,ÚY¨û›–>ñüB®z à¢y5}»`W? xy ž×«úì¢P»êõ0Æõd×»!üܤ0F×·ªÐÝR¿¾¹Ï¿»öC¸Ï/ÇYç:µè#“!­»ŸˆÙ–ˆçÜ¢]ܪ‚Ua±ênÑûö«ÚŸ¿I©sƒÐ$À/{»v_ gY?Ë~c´ìmUu(ûIù7HÞ2Ško ©ß0´7ÓîZˆÃqÛ¯‡1®'¹_ïºûIê¿e¸ó>{Lýúæ>ÿG«!õûüEwÕG{ªÞ]»*£õõ32ˆ@\µ0–½Ÿ~<äCé¡o^öVéIÑêÜ Ì·º0ª~ƒ´j!öµ¨âñÕùåD_ó™îÇ C õþúLý†¡?‰æ= Ä«qèoÅ«,—íǸžä~½kþ“’Þ™®oéդͥ|}sŸ¿µÒ)ßç‡d¡ªµ›ò=þ ñü¢Xp«ÞÒòôUo®ZœU¶„é Ùæ‹êª¶ªqhÐ/ÛnZ¿UãjãßÝ0´¡˜n)ß0¤«6;²Eïôhkü«Þ1Rçõ·SßÇ—}®šÃ´UÕëá¢×ü¦xëI¿ãkkÝåÐŽëÛ¸«œòõÍ}þÖµ·Îër—+¹É;q§Ç“jÐD ^õ\Ûù¯³C1ß×$|‡¼Uz2ŽunBwº\ìë¶=?ÿ&sª[óuÇz¾†P©a—ò ÃPDâÙJ-º¦„Ö³ÊsÑ/»ünˆEãÍýzZÇ!çú6„*5cÊ×·º÷|î󛯃uÏ ý¬÷¢~R úÉâUÏ$[^µÛ²“ZÕ_yóqö]Xܺóö™îµWçaчçëì ¬»¸Û>¿î7}Wõ¿ê«÷Ûsh{nB¥†y\Ê7 Cˆ—â:ï–™´úzãz’ûõn(?—!ãt} Qî1©^ßÜçkM…^†5«­ß²¿í3}ÔÓ“ØëÀíQçÔ~Q°|ô²I…<#rÙMM„Eß :=¾UoZ ŠÎëÌÑØç•Š†ÔV[»ãIõ†¡ÝYvÛš@¼<‡||g¾B¡¯‡1®'u^ïcŒ¯ÛÕ>®Ö]߯UÏùÙ¤z}sŸ_©Üã†ü4ýèKHÛ1Iz‡¸êâYgçt·ª-û¦äU¾¨sƒP¿îs#›ÜPõ¹Èæçßd¼ó¿Ðhò¶ë.æì†¡ ÕtÚLõ†!¡Õ#Yç­l«[_}DÝ×ß¾y§Òü,ë¼ö}=©ë×÷øV¯GL\߯½R¼¾¹Ï¯^sMî›ûZ½u_óû׺ý$ˆCvjë„.²e¿É^uCÓd±Ô½I(çJHœ¯@\wU:>oR± ‡@<+5ýzØô5»N nòKÖu®'¹_ïB.†pœ@<„*5cŠ×7÷ùËëÙôšÑ|•¬>³ÉkþêVã‘t ^¶KÛ”nÑó‹«Ú«úA Ù¡nºXBÞNV5ÎU;ÖM­šž'7•s^lob›Ôí_ ŽˆËôu=ÉýzW÷ç#åã┫³þØR¼¾¹Ï«kJ÷ùM_óÃfï¨dñ¢‡>·Aµj‡wÕÍĪóÛX,uwŒSþa Ý•Ÿ®mÝ‘6ÖEHnB”†{LŠ7 CÓˆÓÄÓ£èòz’ûõnh?ŸËÆëú6¦jnKj×7÷ùÇ×ßpsð¢Kå>¿×üàI÷x`²¸î¼ŽYÈ·|öùâUc¯zeÕ9!»×«újãßí·¡¨©Ý0Ä0X·O8½@<=¢¶¯'mßµ=¾u×sNç Äã®vj×7÷ù?ZoCzÝkû5?•Ÿº$q›7TUm­ú6·Ï4´õ-ÓMÂü7¼MÚX5¯&}59§–ÐoUm2¾uÎqðŽ^úç¦vþØÖ¶ùúÝdþu_ú>~Õ;Œªæ<Mjóµ~ÝëI]¿º5]w|uûËùx×·qW?¥ë[›× ÷ùý®[ß2Ý£wÕoJš¼ív2äªßB-j¯êsWå玿ô·;.¿fF¡ç¯Ã:#’ʇïçÇUçsÛ‹jÖ¤ul놡 ÕtÚLé†!•z#ióF§^Ïw]7 ¥v|Õœç¯a]¼6½žÔõkRÓªº¦r½k:ŸÏs}K±*í)¥ë›ûü°º6}]k½ÙQM“5ÝÛä—¼©¼ ¼[’;Äó¿}X÷·áUŸSXtC±lñÍÿÛ¢BÖ½A˜Œ¯î¾éÃÌ›ý„Ÿ5?ÿºõ«º¡NåíànÂ×ÁLé†aˆ~å˜âÙÊ­û º¯‡}_Or¿Þ õç´jÜ®ocªæÖ¹¤t}sŸ¶ÖR¼ÏŸÿeFÝìRÎ|¾þ)ãäqÕmÈg~W-­ªÏÏïð®úV骅Y5¶Ð„ªöêì„WY5y;Þ*»ºÿ^e]g^UuHa^U¿AÚëð ëò8>a”nfZ:4xy .ÿµÎëÙüëᢛXדܯwCý9ˆÇT¹°¹¤r}sŸ~±ÏÞ{-ÅûüuŸÝUýƒ@—”\ ® CuÂÔ¢¹Vµ;f½Uúmo9m¦É°zƒPuãXç·$©~ζ*‡þ©ªuLÖýXu¾ß ¯ö¿§rÃ0dExu }M ½.•=ƺžä~½òÏêüØ]߯TÍ­sIåúæ>¿â’÷´ØR½Ï¯ún‡Ð_ôV}Œ5ôÜ ´†%ˆ×}»í"‡ª‹é·òÖyŸþª1†Þ ”c­Z!Ÿ«s£Ôpm4>mÑ7t¯zëô¢çf¶ñ ‘Æ“™;Ñ C[’i¶“Ê Cš:a£ˆWâòˆU¡xÑëᲇדܯwa?Ã8Êõmuj:ÊT®o«î¡›ÎÏ}~S¹úçU½#)dã«ê—!«®…õG×쌤qè[’›Muë{ÖËvʰõ;ÿß[¶|aֲϬ®g„eÏa[}U} ¿a)MâIݪl}“hÈ/š®‡&ç¹ah¢6œsR¹aŽØÖ‘ ÄaxrTÕëü¢×ÃUŠq=Éýz7äŸÕù±»¾©š[ç’ÂõmÕýóº¨zítŸ¿®jõù‹²HÕ}û¢_ð–-§’]’ Ä]o£‡>ç+$„-ûæê:7åbhãYl«n”æ—óü‹FÈovB¤ª¾|lþºCÚJå·FÓcuÃR¹á“ ÃpõîyU îbN‹^óê¾þö}|S‹Ð×þ¯'uýú_SïÏs}wÕS¸¾¹Ïo¾ÆRºÏŸÌbÕت٦ò…¹å8“ Äë~þizèZh0\öܳº7åØí„Ì+$ÀÇ Äå‚¿üC/Î9ï¢élzóÜ`Kºah 2ÑfR¸aH”&xXñ,Õüõ ¼©ù³?ÿ@qý 7›Ö}=ìóz2„ë]0t溾{ľ¾¹Ï?¤¸uçíYj÷ùÓ“hŠëüFp5NJ&WíÞvñ›ƒU¿®Ó碷~œöڷΔ ´ÍÐì鯛~¾¶ÏâÉüCç—ÚÉ´·†¯.<4ö Ãɶ Y ^ˆOÝ~BQõYªªú‡^?æÏ }½]÷zÒ$—}ö5¾1üLõ5×·¾¤ãôûúæ>¿ß×½.ïóçWpèõlr^*o“^v¿í3}ÔÓìuàö^~rû*ܲÏX5ù ̪€]âÕ½¡Yö^û²½U_NR°.½WÝ -úÁ©ë2϶qÃжhZížaHK£Ùhâ°@<9jÑnn[7 ]_OV½Þ¯ZE]oUÿþýG®oã^ ±¯o]ÞwNWÎ}þÝ}yOÛ/{=o#»tùšÌq—“Ô6¶Ü0´%™f;±oÒT1ªuªÞ2]îûC 5×·Ô*Òîx\ßÚõÔÚ¸âqÕÓl:pÃÐ1päæÝ0D.À»ˆGXÔ‘NÉõm¤…ýá´\߯]_³[O@ ^ÏÏÙ™ ¸awÁÝ0Œ»¾1f'ÇP×g×·&jÃ9Çõm8µ2ÒþâþÍõ8`7 .^ÀÐÝ0 9¤–€@\‹ËÁ\ß"â÷еë[Ⱥ¬€@<ØÒx 7 1ÔûëÓ CÖ¹ô$çRéáÏÓõmø5\6×·q××ìÖˆ×ósvfnÆ]p7 ã®oŒÙ Ä1ÔõÙDÀõ­‰ÚpÎq}N­Œ´¸s=XÀ À‹0t7 H©% ×ârpD×·ˆø=tíúÖ².+ ¶tCÀ C õþútÃПu.= ĹTzøót}~ —ÍÀõmÜõ5»õâõüœ™€†qÜ Ã¸ëcvq u}6p}k¢6œs\߆S+#í_@ îß\pÃ0àâ Ý C’Cj ĵ¸QÀõ-"~]»¾õ€¬‹Á ă-ÇpÃC½¿>Ý0ôg­'Òp}K«mÆõ­mQíI@ S5Í¥s7 GíÀ CT~ QÀõ-"~]»¾õ€¬‹Á ă-ÇpÃC½¿>Ý0ôg­'Òp}K«mÆõ­mQíI@ S5Í¥s7 GíÀ CT~ QÀõ-"~]»¾õ€¬‹Á ă-ÇpÃC½¿>Ý0ôg­'Òp}K«mÆõ­mQíI@ S5Í¥s7 GíÀ CT~ QÀõ-"~]»¾õ€¬‹Á ă-ÇpÃC½¿>Ý0ôg­'Òp}K«mÆõ­mQíI@ S5Í¥s7 GíÀ CT~ QÀõ-"~]»¾õ€¬‹Á ă-ÇpÃC½¿>Ý0ôg­'Òp}K«mÆõ­mQíI@ S5Í¥s7 GíÀ CT~ QÀõ-"~]»¾õ€¬‹Á ă-ÇpÃC½¿>Ý0ôg­'Òp}K«mÆõ­mQíI@ S5Í¥s7 GíÀ CT~ QÀõ-"~]»¾õ€¬‹Á ¬ ă™èA`¯Ã/ì¡]ô%0ÃÐW¿ú!@€@j®o©Ud½ñ¸¾­ççì¼¶}毺3¯)›-ænšÛ¥x¦†«bLÄp}‹¡Þ]Ÿ®oÝÙjy|ñøjjF ¸aè7BÓn" ërSàñÿñÝ3wý‚šh®oÑè;éØõ­VŽT@ iaM«7 ݸÆjÕ C,yý–±u’€ë[JÕX,®oëj!mwÞõ'Ÿéš)HC`Û¶m3q9N£.FA€y ÄyÕÛl  @ 8‘Bd- g]~“'@€Xq,yý @€ ÄVˆ G@×%˜ˆ-  A@ Ž€®K [ @ ¾€@¿F@€ì[ @ ‚€@]— @À±5@€â Äñk` @À±5@€"ÄÐuI€ì[ @ ¾€@¿F@€ì[ @ ‚€@]— @À±5@€â Äñk` @À±5@€"ÄÐuI€ì[ @ ¾€@¿F@€ì[ @ ‚€@]— @À±5@€â Äñk` @À±5@€"ÄÐuI€ì[ @ ¾€@¿F@€ì[ @ ‚€@]— @À±5@€â Äñk` @À±5@€"ÄÐuI€ì[ @ ¾€@¿F@€ì[ @ ‚€@]— @À±5@€â Äñk` @À±5@€"ÄÐuI€ì[ @ ¾€@¿F@€ì[ @ ‚€@]— @À±5@€â Äñk` @À±5@€"ÄÐuI€ì[ @ ¾€@¿F@€ì[ @ ‚€@]— @À±5@€â Äñk` @À±5@€"ÄÐuI€ì[ @ ¾€@¿F@€ì[ @ ‚€@]— @À±5@€â Äñk` @À±5@€"ÄÐuI€ì[ @ ¾€@¿F@€ì[ @ ‚€@]— @À±5@€â Äñk` @À±5@€j ÜqÇÅ.»ìRó¬ÙÃÛÄmŒi­ 9™ P@ `Ñ ™â \zé¥Åµ×^[œrÊ)ƒq[¸ Âgžyf±ï¾û‡rH\½ @€ Ä+˜á @€@‡vØF(>ùä“ãuq„Ï:ë¬Í0|É%—¤c @`@ñ€Še¨ Ž@¹K|衇n h·Ýv«Œ›âé ¼sçÎþwìØaw8¥a$ 0 x@Å2THKàðÃ/¦wfË`\¾ºÜ5^õ㺸*—åNõÅ_œŒÑ @€Ä)”a @€@z—]vYåÎlH0 Ä‹‚ðD£Ü©>øàƒÓÃ1" 0xE2DHW`~—xz¤Ë‚ñª@¼*—ýØNw] C@ FŒ’X´K¼*/ Ä!AxÒ¶ÝáD…a @€À`âÁ”Ê@  @ Ue»Ä‹‚ñ½îu¯™é|ï{ßÛüÖèÉ—e-›¯ÝáTWƒq @€Àâ!UËX  @ I]âù`<zË·W‡áI;v‡“\ E€ˆV0Ã%@€4Bw‰Û½Ýá6µA€ŠB ¶  @€@ uw‰×éÒîð:zÎ%@€?ˆ­ Ð’@»Äv‡[*–f @€À]±e@€Zèc—ØîpKÅÒ ˆ­ Ю@—»Äv‡Û­•Ö @€€bk€´(Ðå.±Ýá ¥) `‡Ø @€í t±Klw¸ý:i‘Ø!¶ @€@Ë]ìÛn¹Hš#@€vˆ­ Ð@›»Äv‡»©‘V  @€€bk€t Ðæ.±Ýá ¤I `‡Ø @€Ý ´±Klw¸»úh™Ø!¶ @€@GmìÛî¨8š%@€vˆ­ ЭÀ:»Äv‡»­Ö  @€€bk€AâÖy IDATt(°Î.±Ýá £i `‡Ø @€Ý 4Ù%¶;Ü}]ô@€ì[ @ c&»Äv‡;.Šæ  @€€bk€ô#Pg—Øîp?5Ñ °Cl  @€êìÛî¡ º @€vˆ­ П@È.±Ýáþê¡' `‡Ø @€= „ìÛ!@€vˆ­ ЯÀ²]b»ÃýÖBo @À±5@€zX¶Klw¸ÇBèŠØ!¶ @€@ÿU»Äv‡û¯ƒ  @€€bk€ô,PµKlw¸ç"èŽØ!¶ @€@é]b»Ãqj W `‡Ø @€¦w‰íG(€.  @€€bk€œn¼øèœ§oî ¼ø´oŒâOÿùFc9 ìuø…9OßÜ ÈXÀqÆÅ7u¹ Ĺ¯€øó¿úS·l bÿ}öˆ?#ÈZ@ κü&O k8ëò›<¼â¼ëoöüH@ ¶ÈU@ εòæM€@![¸[@ ¶ÈU@ εòæM€À–@¼×Û© @ ¯:cfžqe7I*bË‚læwˆâl—‚‰ÈN@ ήä&L€ÀØÒ @ [8ÛÒ›8ìâì—~( [ d+ g[z'½€@œý@€€@l  »€@œû 0ù ÄùÖÞÌ ˜°ClE ­€@œméMœ@öqöKvˆ­rˆs_æO _8ßÚ›9vˆ­lĹ ĹVÞ¼ ˜ð–ik‚lâlKoⲈ³_ø¡€@l) ­€@œméMœ@öqöK±5@€@îqî+Àü ä+ ç[{3'@`VÀ±A€@¶q¶¥7qÙ ÄÙ/Ø!¶È]@ Î}˜?|â|koæØ!¶ °! [ä* çZyó&@`^À[¦­ ²ˆ³-½‰È^@ Î~ @à‡±¥@€@¶q¶¥7qÙ ÄÙ/ÄÖ¹ Ĺ¯ó'¯€@œoíÍœY;ÄVÙ ÄÙ–ÞÄ d/ g¿ `‡Ø @ wØøE'ž^œsÞE Ëð¤'>¶¸îê r/“ù Ѐ@ܪ& ¤€âA–Í  hC F ~íç§½ö­µ‡ú©/)NÝ~Bíóœ@ Om|ÊLwï:÷uÅ1G<„ Þ³céñë¶< ˆ3(²) $ 19ˆ1 ôˆËýçÿªµïüú'ÖnCºhX?uÝMž[ì¾Ûƒ‹[>éÂá5m¿«ù¹]xÈÕ3vÚˆÛÔÔƒè+öœ“Š—_Ӛ͵Wž_ì³÷ž­µ§!m 4 ¬{ïLqý 7o A n««ÛˆW9‚<â<êl–Tôˆ§oö«Š°lÇw>\LŸ/[Ò) Ô Äó!ˆû«ª@ÜŸµžH[@ N»>FG€@‡]âE;Ãu¿,«*¯ ²iš@kuqkk¨ˆ-Ü- [ d+Ðe ^ôåYu¿dhRœªæŽ;¢xÛ[N˶~&>|8^ âxöz&@ -8­z = tˆ«vu›†á IU›Þ:Ýã‚ÑUëqë¤Á ÄÁT$@`äñÈ lz,è*W=_¸Ýܪ]ç6ÚµFĈcÉÞ2^Ï$& 'VÃ!@ ?®qÕNn[KÚãч·î¼}©ég‰W=ª­ñÎWsÑçªC‚ý¢/(ëòÍ‹ú o•ÜW=V={Ýw1Ô™syl¬@Ü—w•Ç¢/ËërWÃqÝÕêxÆ* µ²æE€ÀJ.q×»¸óí×ý‚®º€ª¦oö§ÃzÕ®yUªú s“·Ï‡“É/BÇ{è³ö+.yßÙ+×Ú¢Bç69¿n=&çÕí§îºšôò-Ó¡¶“6§ëÒþ²bÔu¨ë=ßþôù˾5~zÌ}c¸ñ­ ™€@<²‚šá]âªÅ&A-|áG†ÞÏ·X'UâUžšïo:DÌšm]ëª@\74•cj²£ÞG=ʱÕ5œ6^׳*PÆ Ä}xWâÒóyÇ¿jÕÒù÷uÑÒ™@¢ärˆs¨²9 P)ÐE ž¿énú–æ6Kö©ën*ö=èØµš ÅóxŸ'íYì¸üšZ}OÌê鲓ºÞóõ*wçN{í[k·n¿}Ö£‰áôä×õL!÷é=ˆ›®§²uw§ë.Z¸®˜ã «€@<ÖÊš+ÚÄU7Þ}ìô¬šè¢±Ec«{ütÿ«váæwR}–³ à×ßpófÓU|ÑNn ±j¼UŸ^2Ck]×·îñ´E¶‹|yÖy oÈ[šûÞ!®ëW÷øéõ¿êÝUkdÑçºC µêçÑ¿ ÄMåœG€ÀØâ±UÔ|h;WÝØÖ Á¯qà¢ôUoñ­¦&CZ&–} Õª/8Zö¶Ýu?³½,/3Z´ë¸*Œ÷Yù¾Bv{«jQ'˜…âéå[÷Kµê¶ß§w9¯Eý…Ø÷ýX5¸Æ ©C µ€@<êòšËÚÄU;_«R—ª m!7æËvW…£ª›úÓuv{ç?#»jŒÓæUã 5ªûŽ€¾ë1ïúË™ªZ¬úÊ¢_ˆ¬Zÿ]â¾½—⿾¡&wùê«m†$ ©ZÆJ€@«}âº_JÔæÛèUm,›SÓ]®ª0lçÇhKëªñ® qÓ5ªòY~ú®GÓ@<½Kä–ý‚a•e—¸oïE8ôUë±íÇ{M×J nóÕV[ Y@ rõŒµÚÄU;k1ñüg]Cæ4jÕ.Û²›ôù€ÚgÕ[uCÃ@ÝPµ,À…ŽwÒF•Ï¢Ôw=Úzfu²ºoi®[»:í÷í½(×y ˜sÈ»+êÔG nªå<Æ, ¹ºæF€ÀR±âªoP>uû µWE›ôù>CCmU°\µ»8™HÝPµ,×ÙÍ›´êÓw=ÖyzíEòÃêÖò”ºµ«Ó~ßÞU¸Î»ªÎˆ›®Dç @ \@ ·r$#h;×}{q—œU³ÎNÕôØê¼%¹iYg¼uCÕ²@ÜÄhÞ§j—yùͯ“Ðz¬ú²²²Ýº;â«ÖlÀÚe Žá]hëúÎÿC ^µâü;Öˆ×7Ôè#‡îr¶MX†ê~t2¦ªÏ÷.j«n šô±N€i371 é?V=ê>‡¸É/–ý‚aÕú±kÒ~,ïuíºç×yñâ:ZŽ%@`Ìñ˜«kn,h;WÇU «µê´5ä@\÷í­‹~aPÕNÃUk¢n[uCqÙS‹ºõB ®ã½n ]÷üUkgúßâ:ZŽ%@`Ìñ˜«knôˆ«nœ›|&µ²Õ Ëú¬ê¢I¿)ì7 !Ö!DŽֽN=&m†¼}ºªÿºoÙ­[ÿº.¡í×m·­õ¿n ]÷üÐ5T'×Ñr,cˆÇ\]s#@ ×@\÷¹´]–§IhZ4ž:m…–ù¾Æˆë®Zë¶UwǸN(®[ÿºÁ5´ýu¦kP§­uíºç¯Z;Óÿ.×Ñr,cˆÇ\]s#@ ×@\vÖÇ£n&fÙŽf›øUˤNh ,qQ4ù¼réV§«j[õEpU焾õ¿nýëÎ%´ýXëÝ@»îù«ê-×r,¹ĹTÚ< Ø"ÐögˆËúxñ|(¨ Æëì¸ÎC…|‹òäœÐÀ’b .ÇÔ$¤Î×¼ïo™®ûMÆË^ æ¡396´ºõï*ÇZÿëÚuϯó2o‡¸Ž–c ³€@<æêšKºÄU;S¡Ïâ )WÕw-zKkÓG Í#ô9»åyuѤ¯uLÝP5=¿¦ãncÞgQ½cÔ#dMUòK—óVí,×­]zÅð^7Ю{~z Äu´K€À˜â1W×Üè=—Ví²5Ùy¬|Õç?…ŽùcCwù¦û­ ªË~À²ªŸÐGÕ UËqÝ_^Tù¤T‰MÝ/ «ó¨­u~ÁP·vuÖWŒõ¿n ]÷ü:/ùq-Ç 0fxÌÕ57¢âªÏeÖ ZU¯ )Ë‚NÕ8VíØÍ÷[ÕÆ² Z'°¤ˆË1ÕùåÅ|€I¥Uk%ô ¥AÓÏàÖ­—8Æú_7Ю{~—|¸Ž–c ³€@<æêšQqÙé|0(ÿ®n|U›ËëTµ{Yg§°*­Úe®ˆ&sLå-ÓåxB¿U¹îÛãû¬Çºßx^çsãÓë´ný» Ä}zO Ö ´ëž_ç%_ ®£åXÆ, ¹ºæF€@´@\µC·N(® ëÂiÙ_Õ—|•¿jtÑskW…úº(Å@Šù¬rí³Uo¯y§BÝ ¿N ®»]w}õé]õóúË•¶u—|¸Ž–c ³€@<æêšÑqÙñ¢ç½†ÙÉÀ¯òßCß[¦—…¾ºÇ¯ˆR Ä“qU/ú&æe»õËŒ&ÿúåh«ŽŸüû¢_Ê,[;‹Ö쪠?é³n`­ZßËBdÝöËqÕ]ÏuŸ®íº;¼ëž_ç%_ ®£åXÆ, ¹ºæF€@Ô@\v¾(W­Ó‹é6š¬¯>Vy„®ÿuíºç¯šÇô¿ Äu´K€À˜â1W×܈ˆCBqÝ2­ Uí­ BÃ@“@4k*¸ öç(®¿áæà²Ô ÃËæÚaz¬»þBÞb==î&uUhŸäMÚ_ô —.¼× ´ëž:§ò8¸Ž–c ³€@<æêšIâr‹>ËX·D¡o]]ÔnÝqÔ ßm–з„×ýb¦en¾BêúÌ×%´ŸÉyMû«úÆåUk/ÔÝ@\õ‹”é6§CyÓõ5i¯kïuíºç¯ªéô¿ Äu´K€À˜â1W×ÜH&O²ê-Ô‹\çm«!e_ö–ì:ßD=ßWÓÀ’Òñ©ÛO؜֢z­û‹‰y·®ê1ßϲϤ—ÇÖÝ}n«þe;‹Âêô˜š®¯¾¼× ´ëžò³?9F ®£åXÆ, ¹ºæF€@rx2 ·0¯N”µÀ|Àjû«Gàqâ8îz%@ =8½š= ÜxñÑ3=íuàöžzÖM*q*•0޾â¾ÅõG€@ªqª•1.:ˆ;'N¾8ù`GqG°š%@`pñàJfÀ´% ·%9ÜvâáÖÎÈ׈×ós6ãˆÇSK3!@ ¦€@\l„‡ Ä#,ª) ÄAL"@ 8ƒ"›"Õ±•![¹ ĹVÞ¼ ˜ˆ­ ²ˆ³-ýæÄbk W8×Ê›7±5@€ Ä–‚@l ä* çZyó&@@ ¶ [?ˆ-…\â\+oÞÄÖbk@ ¶2ˆ3_¦O€À¦€Ï[ d+à-ÓÙ–ÞÄ d/ g¿ 0ùåøwý¡A€â«nΔ±u@€»ì[ d+ g[z'½€@œý@€€bk€ÜâÜW€ùÈW@ ηöfN€À¬€b+‚lâlKoⲈ³_°Cl  »€@œû 0ù ÄùÖÞÌ °Cl  @`C@ ¶ÈU@ εòæM€À¼€·L[d+ g[z'½€@œý@€ÀbKlâlKoⲈ³_ˆ­rˆs_æO _8ßÚ›9³vˆ­²ˆ³-½‰È^@ Î~ @À±5@€@îqî+Àü ä+ ç[{3'@À±5@€ ù@Œ…¹ ìuø…¹Nݼ È\À[¦3_¦O g8çê›;Ó±õ@€@®q®•7oì[Qÿß=Óÿgþú¨¨ãÑyÞqÞõ7{9 Ä9WßÜ d.`‡8óyúqäè~F@ ¶ ÈU@ εòæM€Q¶mÛ6ÓÿwÞu<:'@€9 Ä9VÝœ  @ º€@½@€ Ø" @€âèº$@€s±%A€"ÄÐuI€bk€Äˆã×À @€€bk€Dˆ# ë’Ø!¶ @€@|8~ Œ€Ø!¶ @€@8º.  @€€bk€Äˆã×À @€€bk€Dˆ# ë’Ø!¶ @€@|8~ Œ€Ø!¶ @€@8º.  @€€bk€Äˆã×À @€€bk€Dˆ# ë’Ø!¶ @€@|8~ Œ€Ø!¶ @€@8º.  @€€bk€Äˆã×À @€€bk€Dˆ# ë’Ø!¶ @€@|8~ Œ€Ø!¶ @€@8º.  @€€bk€Äˆã×À @€€bk€Dˆ# ë’Ø!¶ @€@|8~ Œ€Ø!¶ @€@8º.  @€€bk€Äˆã×À @€€bk€Dˆ# ë’Ø!¶ @€@|8~ Œ€Ø!¶ @€@8º.  @€€bk€Äˆã×À @€€bk€Dˆ# ë’Ø!¶ @€@|8~ Œ€Ø!¶ @€@8º.  @€€bk€Äˆã×À @€€bk€Dˆ# ë’Ø!¶ @€@|8~ Œ€Ø!¶ @€@8º.  @€€bk€Äˆã×À @€€bk€Dˆ# ë’Ø!¶ @€@|8~ Œ€Ø!¶ @€@8º.  @€€bk€¬'pÇw»ì²ËZ´ˆÛÓZr2 €âÍ  @ ®À¥—^Z\{íµÅ)§œÒ8·ˆË |æ™gûî»oqÈ!‡Ä…Ñ;˜€@<°‚.¤!pØa‡m„â“O>¹Q0^7—Aø¬³ÎÚ Ã—\rI0FA€$ ¨X†J€é”»Ä‡zèÆ€vÛm·ÚÁ¸i žÂ;wîÜèÇŽv‡ÓYFB€ˆT,C%@€´?üðbzg¶ ÆåÛ¨Ë]ãUŸ1®ˆ«‚p©QîT_|ñÅiÁ ˆ€@8=#"@€ˆP$C$@€tæw‰§Gº,¯ Ä«‚pÙÝát×…‘ @€À0âaÔÉ(  @ QE»Ä«‚ñ¢@„'mÛNtQ F@ L© ”RX¶K¼(ßë^÷š™Î÷¾÷½Íož|YÖ²ùÚNu5 I@ RµŒ•’Ù%žÆó¡·|{uHž´cw8É¥`P 00x`3\HS t—¸ÑÛnCQ @ (b«€´ Pw—x.í¯£ç\ ð#Øj @€- ô±Klw¸¥bi†Ü% [ @ %>v‰í·T,Í @€Ø @€í t¹Klw¸ÝZiØ!¶ @€@‹]îÛn±Pš"@€vˆ­ о@»Äv‡Û¯“  @€€bk€´,ÐÅ.±Ýá–‹¤9 `‡Ø @€Ý´¹Klw¸›i•Ø!¶ @€@mîÛî @š$@€vˆ­ Ð@»Äv‡»«–  @€€bk€t$ÐÆ.±Ýᎊ£Y `‡Ø @€Ý ¬³Klw¸ÛÚhØ!¶ @€@‡ëìÛî°0š&@€vˆ­ н@“]b»ÃÝ×E @À±5@€:h²Klw¸ã¢hžØ!¶ @€@?uv‰í÷S½ @€;ÄÖèA Î.±Ýá ¢  `‡Ø @€ý „ìÛî¯z"@€vˆ­ Г@È.±Ýដ¡ `‡Ø @€ý ,Û%¶;Üo-ôF€ì[ @ Ge»Äv‡{,„® @€€bk€ô/PµKlw¸ÿ:è‘Ø!¶ @€@ÏU»Äv‡{.‚î @€€bk€Ä˜Þ%¶;§z%@€vˆ­ A`z—Øîp„è’Ø!¶ ÀÏd2ÓŸöá‘þñé??ç3̽¿0ŸÉš)2°CœqñM¼âôê}õ§nÙÔþûì‘Þà2‘@œù0}²ˆ³)µ‰ »€@œû 0ÿ:q-Ç @`¸ñpkgä¨% ×ârpæqæ Àô ÈF@ ΦÔ&J€@îóx¯·çNbþ6n¼êŒ Øâ @€@qu6K±E@`±€@lu @ O8Ϻ›5 ÄÝ”ƒâ`* @`Tñ¨Êi2X²6÷Ø%o™¶ZüH@ ¶ §€@œgÝÍš ìgXtSˆƒ©H€Q Ä£*§É @À±5@ ‰€@ÜDÍ9¾€@<üš‚ì19(S8Ó›6Ù ÄÙ/ä" çRiól" 7Qs†/ ¿†f@€ 8ˆÉA™ ę޴ È^@ Î~ @ 8—J›g¸‰šs 0|xø54 ÄALÊT@ δð¦M€@öqöK¹ĹTÚ<›ÄMÔœC€á Äï¡ @ H@ brP¦q¦…7m²ˆ³_ÈE@ Î¥ÒæÙD@ n¢æ _@ ~ Í€Aq“ƒ2ˆ3-¼i ½€@œý@€@.q.•6Ï&q5ç @`øñðkhˆƒ˜”©€@œiáM›ìâì—rˆs©´y6ˆ›¨9‡Ãˆ‡_C3 @€@€@Ää LâL oÚd/ g¿ ‹@ÎøE'ž^œsÞE3¥¾öÊó‹}öÞ³“òo{àSfÚ}×¹¯+Ž9òÐNúj£Ñ Þ³#éñµ1ÇUmÄ«„ü;Æ) ³®fE€-±@<¿(>uÝMž[ì¾Ûƒ‹[>iÖ?5qÖå7y2ˆ3.¾© —€@,O¯ø½÷?¦¸þ†›7þJ . 8¯×C³%@€ÀD@ ¶ ‰€@,O–úkÏ8§8íµoÝ\ù±@œÉË i @`‹€@lQ @ ¸¿@œú’ˆ·VÈqê«Öø Ѐ@Ü«V  œ€@,Û!^üc)'÷’e@èE@ î…Y'ˆ/  ı@ÿ•È –€@œV=Œ† ı@,wö£a T@ há ›uâáìÁ&" IDATâùÏøÎ×zÝçû ñÖŸo™®ûŠâxŒC@ GÍ‚+âþñ¶>e¦¡ö°çœTì¸üš•µœð¤'>¶¸îê ‚Žщ§çœ7k°ìÄk¯<¿Øgï=ƒÚÃAñªh¨/ ×7s) §ˆ÷xô!Å­;oo´¶B«@¼œV n´ôœD€Á ă/¡  @ L@ N7ï½ÿ1Åõ7ÜVÈŠ£Bž#, Ę  0bxÄÅ55L Äiâ Þ³£xÞñ¯Ú²X½ÍzÑÛªO?õ%Å©ÛOX¸èbØ+"lˆ­ d" §ˆçnÈnoUˆ®óyb_ªµõ‡Þ[¦3y!4MÌ Ä–2ˆÓ ÄóŸ^µÓ;Y®U;Åw~ýA«Y ˆƒŠƒ €@œA‘M‘¥€@<®@<½K„'? ±@ìU‘d" #‡¼ezÝ%+ Äë®!ç @`,ñX*iX! §ˆ}IV賋›,|X n²nœC€1 Äc¬ª9 @ B@ N3/ú–ééÖù¬Å/ Ä!ëÄ1ÈA@ Î¡ÊæH€»â4q¹8ë>‡øÚ+Ï/öÙ{ÏÆëZ ˆ/' @`dñÈ j:X$ §ˆ›„âòœ¦Ÿ7ˆb¯” p·€@l% @ 8í@\.÷OW-×CŸµ_qÉûÎ^ɱ@¼XH€‘ Ä#/°é @`" §ˆ§WkÝ·Q× Å±@앑d" +O/ËxzqÎy³ã¯Z¶¡ßL- Ä™¼ì™&V Ä+‰@€qÄà Äó+pGRܺóö- 3ôÛ¨bx¯jfA€õâõ µ@€AÄã Ä“·íO™Y{¡_²% ăxÑ2Hô ÷€¬ ¤ §ˆ'54ÐNÖÓ|°-ÿþίbårˆâ•‹ÄÈD@ ΤЦI€8½@\hëâæxÎ$@€À€âÏÐ  PG@ N/—õ«z¼Ò ÇQ¼í-§--oÕîpÈye£Mw–묷¡k‡xh3^´# ·ã¨$/ §ˆ«Þ6=YL‹Þ>½èÅ!o—^ˆëìL'¿Ø P n€æŒ@@ AM!ñêçø†8Nsú©/)NÝ~Â–Óæ¿ýyÕóÜ:ãY4–ª6ªÞª=ܪ1×ÛŽˆ‡P%c$@€@ûqû¦Z$@€@’qº¸\0‹ž-²˜Bß*=ÝÖª^'`‡Œ1õcâÔ+d|èF@ îÆU«HN@ N;— fþK²BQo¥žoo~'{úß›„ìñ¦zŒ@œjeŒ‹Ý ÄÝújÉÄéâÉb©úÒ«é…úmÒ!‹ï°çœTì¸üš-‡¶ÙGÈ8b#Ç®€þ  G@ Žã®Wô.s î[‡ƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ@ î[WƒˆW2&@€@+q+Œ!@€@úqú52Âxq<{= @ ¦€@S_ßèQ`>÷ص®*þíw?vmÿ2ýÃJC`¯Ã/Lc FA€ ÄòjœéÄéÔâ«;¿]ÿÊ+Š“^ðÄAýŽŠs_ÿŒâ¡»Ý7Af>8ó`úd# gSj%@ w8°ý +þòo¾03 _y棊3^ñ´t™ùHâÌ€é €@œM©M”Üâ4VÀ¥ùRñò×]]9˜7¾jÿâ‘Æ@3…@œù0}²ˆ³)µ‰ @€@lüàÅã÷¸âæ›o®ÊcûØâ³Ÿýlq{Ü#öPõO€²ˆ³(³I @€@ ÿõ¿þ×âŒ3ÎX:”íÛ·¿ÿû¿ŸÂpŒ^@ }‰MRøèG?Zì·ß~3CyÅ+^±ñÿ¿á o˜ùûk®¹¦xúÓŸžÂ°ŒZ@ uyMŽRxæ3ŸY|èCÚÎcó˜·G—Ê·QîsŸÛü·_ø…_(þæoþ&•¡­€@<ÚÒš¤"ðæ7¿¹xÙË^63œ?û³?+Ž<òÈ¿{Ï{ÞSü§ÿôŸfþýMozSñÒ—¾4•)¥€@<ʲš¤"ðÅ/~qcøÛßþöæž÷¼ççŸþÌ=öØâ]ïz׿ßÝ÷¾÷ÝØA~ä#™ÊTŒƒŒN@ ]IMˆRxþóŸ?~üÇ|#èþôOÿôÌ0ÿáþa#8ÿë¿þëæß—!ùï|gJÓ1 0*xTå4HIà½ï}ïæÛ¢'ãzã߸åíÓ“+ß&ýò—¿|f åÛ©ŸûÜç¦4-c!@€£ˆGSJ!@€”î¼óÎbÏ=÷œyæð3žñŒ™/Öªoù…ZW\qÅæ?ýìÏþìÆŽò¶mÛRšž± @€QÄ£(£I @€@jUϾúê«·ö±-Ï.Ÿ9|Æg uûöí[žM\>ÇøiO{ZÐù"@€Ââ0'G @€`_üÅ_œyŽð£ýè⦛n*~ìÇ~,¨û·Ûx»õç?ÿùÍãËçðƒ :ßA @€@˜€@æä( $pöÙgoy~ð»ßýî-Ï^ÕXùœâ£Ž:jæ°òyÆ'tÒªSý; ( B9Œ¬øÒ—¾´ñè¤o}ë[›‡sÌ13Ï^ÕÆô¿—Ï+¾à‚ 6ÿê~÷»ßÆl=â¨ÓŒc  @€±¥A€ZxÁ ^0óÜàûÜç>öQzT£¾ð…/lìï|ç;›ç—Ï5~Ç;ÞѨ=' @€³±A€Zøßÿûoy^ðüÁly®pÝ®ÊçÿöoÿöÌiåóý×½nSŽ'@€æbK‚¬)P>s¸ÜÉý»¿û»Í–~þç~æyÂëtQ>¿øÃþðfžM¼Ž¦s  @€Àb«¬)ðßþÛ+~ÿ÷¦•|ä#[ž'Ü´›òùÅpÀÌéžMÜTÓy @@ ¶ @€@+UÏþ/ÿå¿lyŽðº•Ï1þŸÿóÎ4ãÙÄëª:Ÿr°Cœû 0XK ê™ÃåiÝóž÷\«Ýù“¿ÿýïo¼-Û³‰[eÕd. g¾LŸš ´õÌáÐx6q¨”ã @€@˜€@æä( 0#Ðö3‡Cy«žM|ÓM7?õS?Ú„ã @€ Ä–h Ðö3‡C‡Põlâr,çw^hŽ#@€bk€4èê™Ã¡£©z6q9¦ç<ç9¡M8ޏKÀ±e@€j ì¹çž3Ï>è ƒfž\³¹F‡—Ï9¾òÊ+7Ï-ÇT~™—? @€@¸€@nåH PT=søª«®Úòœà®©Êçxà3Ý”cûÝßýÝ®»Ö>€@<šRšt-ðñ¼xÚÓž6ÓÍ)§œ²åùÀ]cÒ~ù¼ã3ÏS|þóŸ/~õWu펿üå/{î¹çÌ3‡>úè™çü®ÝI ”ÏI¾ð 7{*ŸM\~9ØÃþðµ{ÿ‹¿ø‹âÑ~tñøÇ?~í¶4@€b Ä1õõM€­|ðƒ,^þò—×_ýÚíÍ?sø^÷º×FLå™Ã¡,ŸM\ûï}ï{›§”s{ûÛßÚÄÂãžô¤'o|ã‹_üÅ_\»-  @€˜qL}} @€@+ïz×»Šc=¶øÿã¯~õ«·ùçþçÅsžóœ™óÏ<óÌ-Ï÷mÜAÏ'–ÏK>å”Sfz-Ÿ«ük¿ökGòš×¼¦øïÿý¿çŸ~ñ¼ç=¯q;N$@€)Ä)TÁ @`-éà÷éOºñ[yçŸ9|àW^yåZc‹}rùÜäòQQ“?åo¼ñÆFÃ*ßšþ„'÷¹OÐé—~©¸üòË7ý÷ÿþߟýìg‹ò µÆð§üb­ò­Òÿ÷ÿþßÍé”ÏYþÀ>4½ï|ç;ÅOÿôO;wîÜ<þàƒ..½ôÒ óD€RˆS­Œq @€@°ÀÏþìÏ7ß|óÌñ¡;¼ø‡Xœxâ‰3çþéŸþiQ>jiLÊG0•bšþúlâªæÇ>ö±ÅßýÝ߉È\ @ C8â›2Æ&°ûî»Ïì^NæwõÕWûí·ßÂé–Ï.wN¿ñol3Äg‡ÖsþÙÄxÀ6v—=›øšk®)ößÿ-]”»ð·Þzkh׎#@€I ÄI–Å  @ TàŽ;îXøÖæg>ó™EùŒâE^øÂÎ|¶|‹tË·LñOù–éòužM\~èCªä(ÛÙe—]ÆHeN ‰€@œI¡M“cøâ¿¸ñùÖEÊ·Dÿæoþæ–Û3‡Cë[çÙÄUo'Ÿî§üœö#ùÈЮG€’ˆ“+‰ @€@ªoˆž>ÿ'~â'Š›nº©Øc=fšÝk¯½6vƒ'ÆðÌáP·gßrË-Åž{îYüË¿üËÂfë~SuèøG€úˆû’Öt"ðñųŸýì¥mŸp ÅÛÞö¶ÍcÆúÌáPàg¿èE/*Î9眥M¾ÿýï/~õW5´[Ç @€äâäJb@ PGàOþäOŠ¿øÅ+O¹øâ‹‹Ã;¬øÄ'>QüÜÏýÜÌñ¡ßH½²“ðÿ·w7°»\uÀ‡—šX²Lx!Q‰©Ý•R“¥(ñ%´”Å^)&ÀRØMµÄl]¥×[tCPlı‰4—ð#/½% Ah7ªc%1PÚ JbÖù¯ÏåùÏy朙33gÎ|š˜ÍzgΜóùÍÿx¾Ï™gž¶7G×»í\pAuë­·V—\rIïhÞþö·W/ùË{sÈU@ ε2úE€AW_}uõ;¿ó;½ÇžþùÕg?ûÙªù›Ã?üÃ?|ôHu)¿9Ü ñÔ/Ήþû¿ÿûÓ§ì~›øÉO~ruçwö6õ»¿û»Õ‰'zsÈU@ ε2úE€Aõ ³n¸á† cëG«ëÇ|÷ÿ»å–[Îø}Þ Æ 8¨þ½åK/½ôØHÚŒº†zùå—W¿÷{¿W€„! @€ÀVâ­VÞ¸  PˆÀsžóœêýïÿ Ñ<ÿùϯNž<9èÜRNªwùÝï~÷ á<ûÙÏ®Þ÷¾÷ :×I @ 8‡*è ¸ð «;î¸#úü=èAU½;\?JýèG?ºzÈCÝÆšOøÖ·¾UýÓ?ýÓÑ£Ñõ.ñ¿ýÛ¿Eç)OyJuûí·GŸç ‹€@œK%ôƒ üèþhõ¥/}iйû'=ô¡­~ðð(·ýÏîßš?ß4ú‰¨.©ºÿøÿxôÿ¶ýOýoßüæ7G_ùG~äGª¿û»¿ÝŽ @€ÀRñRò®K€IÎ>ûìêÛßþv’¶Byàx:0ïBò+^ñŠê OxBÈéÉŽùüç?_½å-o9#ø~ç;ßIv¾†¾ÿû¿¿ºï¾ûúóï @ [8ÛÒèô |ãߨþð‡÷6ٿ׿güÚ×¾v²öC~õ«_]½îu¯ 9t’cþùŸÿ¹zØÃ6IÛ%@€S ÄS kŸ&ø›¿ù›êÜsϬý®†ŸùÌgáŸüÉŸœýÚmüË¿ü˪ÆúЇfïÏÝwß]ýøÿøì×uA B@ N¡¨ XDàÏþìϪŸÿùŸŸíÚyÌcŽ‚ðK^ò’Ù®s¡w¾óGÁø«_ýjÌi£ŽýØÇ>VýÜÏýܨ6œL€–ˆ—’w]-P¿%ú…/|áèvB¨¿'\‡áÜßF]¿=ºÅõ÷‹çøï]ïz׿e<Çu]ƒ¤ˆS(jƒxÓ›ÞT½êU¯šôÚO{ÚÓŽ¾£ûÔ§>uÒë¤nüSŸúTUÇùŸøDꦵwÝu×U¿þë¿>é54N€¦ˆ§’Õ.L.ð›¿ù›Õµ×^;ÉuêŸaªw„¯¸âŠIÚŸ«Ñ믿þhÇ8ÅÏ,µõùÊ+¯¬Þð†7Ì5×!@€I⤜#@€9^üâW7ß|sòK^vÙeGa8÷ßxýÛÄu(¾é¦›BO >îE/zQõGôGÁÇ;ä$ çT }!@€(‹/¾¸ºí¶Û¢Î9tð“Ÿüä£ \·[â§N: ÆŸýìg“ .ªêvýG€Ö( ¯±júL€Gçw^õ¹Ï}n´ÆøÀ£ï ×`oá¿×¿þõGÁø;ßùÎèá>ñ‰O¬îºë®Ñíh€,! /¡îš D ~¤ùž{îÕÖ ^ð‚£]áÇ?þñ£ÚYÛÉ_üâBñÉ“'Guý‘|dU?’í? °FxUÓg¨þõ_ÿµ:묳KœsÎ9GAø¹Ï}îà6J8ñ½ï}ïQ0þ¾0x8÷ßõà?xðùN$@€K ÄKÉ».Œøò—¿\=îqÔÆUW]U]}õÕƒÎ-õ¤'NT×\sÍ áýÃ?üCõØÇ>vйN"@€K ÄKê»6 øÌg>SýÔOýTÔùÏzÖ³Žv…ëï½úïLú»Àõnñ?øÁ(žOúÓÕ\uŽƒ  @€@qUЈøÀ>Pýâ/þbÐy?ôC?t„ëŸiò_¿@ý3Ju0þÊW¾Òð¿ñ§ú§Uýaƒÿ @€ÀÚâµUL  @àHàÆo¬^þò—÷j¼ò•¯< ÃgŸ}vï±øžÀ}÷ÝwŠßüæ7÷²üÁüAõ²—½¬÷8 @€ÜâÜ*¢? $PßµþÞk×?û³?{„/¼ð öÔ.pûí·ãüãDõ÷±ëïeû¬M@ ^[Åô—Ž®¸âŠêmo{Û{ØÃŽ‚ð¯üʯJ(P[ס÷ßøÆ­þê¯þjuýõ×'¼š¦ @€À<ñ<ήB€‰êŸKzßûÞw¬Õú±Ý: ׿ë¿ôõo>×»ÅõãêûÿÕµxÏ{Þ“þ‚Z$@€ Äkž¦xêSŸZÕóÖÿÕo8®ƒðE]4ÍÅ´zLà¶Ûn; Æõ›¾ëÿêÇÒ?õ©OQ"@€«ˆWW2Þ‚ÀÝ~Á†iŒF \üÒV_½÷¾êu¯{}uå•WŽjËÉî½öÚê·ûUyÄÙÕ©wü×a8‹À†νää†Fk¨Ö! ¯£Nz¹1xc7ÜAÿóÚÿS½âEO¨žþß>0è|'¥øÈ>«zËÍŸ¯þ÷•ÿ%MƒZ!P°€@\pq mµñjK§ã% Ä%WרRüßoÝ_ý§‡œuÔ”f Ñámìæ«ýš oÍ™Ê0_•]_£[§€@¼Îºéuáqá6¼¤˜I9£3_E“9aÃæ« ßгˆ³-ŽmY ¹À<÷§}?rË÷ƒ±¸û“×û_X`.{‡˜¯–õwõ¼ÌWy×GïÔ±û€@†˜E—²°À̦G1_åU½ÉKÀ|•W=ô†@›€@ì¾ ¡€f†EÑ¥l,0³)…@œW)ô&CóU†EÑ% Ø-A C8âèR6˜Ù”B Ϋz“¡€ù*âèØ=@ 8ÿéár˜ËÙ·]Ù|•W=ô&/óU^õÐmvˆÝ2°À̰(º”€f6¥°CœW)ô&CóU†EÑ%vˆÝòˆó¯‘.'`¹œ½â¼ìõ&óUþ5ÒCvˆÝ2ˆ3,Š.e#`™M)ìçU ½ÉPÀ|•aQt‰€b÷üâük¤‡Ë X`.go‡8/{½É_À|•ô€b÷ â ‹¢KÙX`fS ;Äy•Bo20_eX]"`‡Ø=@ 8ÿéár˜ËÙÛ!ÎË^oò0_å_#=$`‡Ø=@ C8âèR6˜Ù”Âq^¥Ð› ÌWE—Ø!¹_: IDATvÈ_@ οFz¸œ€ærövˆó²×›üÌWù×H Ø!vÈP@ ΰ(º”€f6¥°CœW)ô&CóU†EÑ%vˆÝòˆó¯‘.'`¹œ½â¼ìõ&óUþ5ÒCvˆÝ2ˆ3,Š.e#`™M)ìçU ½ÉPÀ|•aQt‰€b÷üâük¤‡Ë X`.go‡8/{½É_À|•ô€b÷ â ‹¢KÙX`fS ;Äy•Bo20_eX]"`‡Ø=@ 8ÿéár˜ËÙÛ!ÎË^oò0_å_#=$`‡Ø=@ C5â<ô‚3$ïüó?®žtÞ9‹ 7ûõ®w¼¶ºôy/Ú''`9Î/õÙKÎW×\{SuâšZ‡ôÈGü@õµ/Þ6z¸·¼çTõ—¾ºµË^ôKÕן}¥0?N/o¾šÞØŒˆÇ :ŸÀK.0‡ §kQšÃbÑ‚oHEó>Ç3¯ú,9_ ĵRŠå^vÅÕÕM7ÿ‰@œ×m·šÞ˜¯VS*ݰ€@¼áâz¾K.0‡¨œwá¥ÕçþêoÏ85ÕÍ>íΈÇèåy®f^uYr¾ê Ä)>”{Ôã/ªî¹÷ëq^·Ýjzc¾ZM©ttÃñ†‹oèù ,¹ÀŒUù‹»¾Pÿ3¿ÜyÚÕW]^]uåe±Í&;^ NF™MC˜Ù”â¨#KÎW}ø‰?ñcÕ]·ß2¬o~K¸w.Á‰æÇˆ=M˜¯¦7vcⱂÎ'0À’ ÌØá4'¬ û»Åc¤±ýq|ù˜yÕxÉùªˆë§Rš»¹c›nÎoÍöâ¼îÅ{c¾Ê±*úD฀@ìŽ ¡À’ ÌXŽæã„õK«š/ ³ íã˰ÀÌ«ÆKÎWmøIO<§:õÑ;N#yJeÿë õ‡{õûø ÄyÝ‹9öÆ|•cUô‰€@ì ½À’ ÌœæÛWwßn~§xí‹ÆÇN/`9½qÌ–œ¯Úñ¯ý{óôЧTšK×óاïük8ææple¾rÈ_Àqþ5Òà ,¹ÀŒá~ƳíØNÌÅ¿ð”êÖ÷¿µj[¤¦øù“˜¾9¶\ ̼j»ä|Õ5×4¿ûÝo~&­Ùvý¤ËK.@-¹íÌWÛ®¿Ñ¯C@ ^GôrcK.0c¨½¥ùoc[ŒéSêcû^Ú³¶ß5.a<˜©ïòqí-9_uâæS*CþN÷ÛûôË¡ŸnÚééã¡Ê5¿Î²ûÀrÿœ%_ªuèíÝS|Í&ÄcÜ_BûÙæ«)TµI ­€@œÖSk’,¹À @[°Úß…i.Hû[lû馘EQÛ‚³á'®¹áØBÍÝï>—¾ñÅ,\§ø¹ª9ÇÓg5öß-0Ç ¦=Éùª+7烶0ا°w_ûˆù:Hߪ»®Ó×fê9¸ùU–æuö?œˆ»Ú™[ûúÖìkH›ûçŒõè»'†ü»ùjˆšsÌ+ Ïëíj‚–\`uðßj.<š‹¸¶À|(à¶-CƒaÛ"kP‡,øí\ôùÄ„ø][Íë…Ž»¯/]퇞W7d<1í9ÖsˆÚtç,9_uâæ|û7ÕlwÌBqìPÍê„ö·-6ç¼C!3v~ì ù!Áu¨ÉØb<¦ø+1_M¡ªMiâ´žZ#D`ÉfÈÚEm ¢æB¤ïåZm!ºïœº¿mv·[»àkÛ©1ÙºÝosÊ@¼Äxb¼†k9Dmºs–œ¯½¯ í ø—>ïâ ˆýð¶ÿ7ˆCéDHl{gÿ-Ø}A;f~\" ïúbQ;Ö#¤.±Ç˜¯bÅO`~x~sW$Ð+°ä³·sÿ~@Ûos¶½4«¹#cîzTúª+/;JÌ‚/öQÀ®ØïKOˆ—OÈ=4æ Ì1zéÏ]r¾:ˆ›sCȇk;ýyc?ŒõâCw½ØëÐ÷úûžÐ8ô×þ<´ ³MƒÐù±+ ‡Ìu]tý߃®y5¤~c=ÒÿuTÞ2=ª6 $ˆƒjŽ@ %˜!ýo¸®…JÛâ°oÕ¶ðêú~n[ûÍ„Ð_=î!þÐãÚ!–SSÚxvVñ”wM|ÛKÎW‡q×ÏÂõ°yÞþÓ/}¸-…¹¶ù§þßõÍ—]°ëæz~}Òyçœ&Û;îë[}‘® ÝôësÛžüé;o¬Gß½1äßÍWCÔœC`^x^oW#$°ä³¯ƒmðÐ"¥¹¨ yùTè£Ó‡•Þ#tÁ×¶ Yðu-d‡üÌKŸ}ì¿7}Ö>8ö˜çø%竾Ÿxkþ ôª¶¿çC/ Ü»1æuU&ök&m0ôñâº!óãÐ0Ü57†Ô`çûfè±SüÅÄS¨j“@Z8­§Ö$XrÙ7€æ®c_Àm{\.dAÔ÷ètÛcum/dÁ×µø ûä„K@Ü÷׹̿/9_õâæ<²[»šáòÐqÛy!/›Ú¯Z×o»wU¶mžŒ¹fßüØö¡cè¼Ø¸cέÏoûöÐ;ÖcŠ¿ x UmH+ §õÔ$K.0ûûûÂm»&!‹Ò¶ÅÔ.|‡<*½Gß‚o¼S}—·Ïtª/m<ñTwʸv—œ¯úqs®èû¯y|3Àõ=2]Kîúò΄¦|ìÏEµÀ˜åÍcÃpßOó…Þu1¿)=Ö#´O1Ç Ä1ZŽ%°Œ€@¼Œ»«8(°äóPdž.p†.RÚ®W/Pßúû'«{îýú±®v-cq×Ë\bv\rºµK@œÓÝõ½¾,9_õâ¶Ö=¡Òü›iÎ+!xL•Æâ¾Àßì[×üØ6gÇîîÆŽ¥Ë-æåhmo™¾ëö[Æ”dô¹ñhB ˜\@ žœØÄ ,¹À<ÔÛ¾ßî:·+ØîÞ}èš!¿]yh¡ˆ½!v×ÇØg|õÓQÚxât÷FÊ––œ¯Bqs94_ìý­Oˆ»þVû¾t‡Ä†ÿ±cïê‹@<Íß±V lY@ Þrõ=[%˜](1»¡°1 ¬¶@²C=$×ýyܸmœ} ØP›ÔÇ•48õÝ1®½%ç«Ð@Ü ²mM÷=.ÝöaY×–* öÍ'cwDcçõÐ1Û>„w—}ïl8•¤vØ Äî ,¹Àlãhûp*¶7¢Î½CÜ[ìkß"6•ÝÐvÖ>xhå§9oÉù*47kþ6߆ßtCvˆcCf]•]È‹}ÕTx7þ!¿#zùÐ &¹áâIX5J`Óñ¦Ëoð¹ ,¹Àl3‰ P1®!o.ó;Ä}}oû]å¶sBv¯û®5Ç¿¯q<ñwFø5–œ¯BqÛÏ¿íò¸t-ÒˆCæÊ˜·\÷}¸6E Þÿ0 íÃÈ9»¶j†éÐóÂï¼3ë1æÚ]皯¦PÕ&´qZO­H"°ä³9€1¿#ÒVß˵B¾»œê-ÓCŠ×õhd߸†\kŽsÖ0 Ì9î„ðk,9_…â¶0»ÿ¡Õ~ŠÙÜ]OÒ„<³ÓnÎwsâ¶ëµÍÁ!cêûð ü ?R ·r$ßˆÝ 2XrÙähÛAŒùY³½¶”®öÚv'êè—¿rOuê£wk:Åï¹ÚÞÖúµ/Þ6¦ÉEÏÍy<ñ¢·Æ_r¾Š Ä]#‡>.ݪ÷qsnòfæØ@{|³x¡ïXh×Ôëë4ƒôŽõ˜â/Ë|5…ª6 ¤ˆÓzj@%˜ÍŒùYFÛ.J×÷õ…²ÐZè‚o××]ÿb³mã ÙEIrÃh¤´ñÔCµÀœú®‰kÉù*&7?`Ûý7Û8ôu‡C»žÍ¹fÈ#±?O76†Îmó[ß×BR͉»`¨ÇzÄÝùaG›¯ÂœE`IxI}×&Ð!°äs¿Kc¾?v¨¸mß n†Ç¾·J·-¶Ú c|1;ámVKâ6Ÿ5gÿ~²ÀÌkê\r¾Š ĵZó¾úoâ%—¿¦úÜ_ýíjßa1¸/06«Øö7ÛÇÀÐù±îkÛÓ=‡æ¸T_· ýð³­!;ÙSÿ5™¯¦Ö>ññxC-H.°äs0m¡4&TuÁ´µ»f»•¾ñúÇš ùn[肯mñ³˜j>ŽÙ·M~Ó4,m<ñÔwÌðö—œ¯bqsΨŸN¹éæ?9=ø¾¿ù˜@³CÜö÷Zwªo™3™SÚBṯçõþWÐ}¦@<…ª6 ¤ˆÓzj@%˜ûˆ}|/tðmwáóýÕ¾>†âºïm‹¯ßJ ð¡>©Ž+m<; ÌTwHšv–œ¯bqßïq÷…µC8vu§ègír ÄuÛÞ)qȬ˻Ϲ¾VÛž±;ø}p¤ù 8ÜŠùje× 0N@ ççl“,¹À<´H‹Ùñèƒi{›q½ûü[¯¹þŒfZ<õ=:ˆ-L»vÆ»~feÉÇ¥CÚk@Ü÷WµÌ¿/9_ÅâZèÐϸõýÝ Ä]?aÖõ¡ZÈï»OcæÇÝÝÕœ»ûúxè—Úæ¡CZôi;ÄËÌ®J`íñÚ+¨ÿE ,¹ÀÜÝí-Hèï߆ìÐzsuì‚/äwDûÆûÁAì³ïúûÿ¾Äxbú7äX;.CÔ¦;gÉùjH î h!»‰}?%t(l­À¡>6ÆÎõb^ŒxèÿžÄz ù¿!5íGìñæ«X1ǘ_@ žßÜ ô ,¹À¬;7ä»b½ƒjÐõ¹ýÃúvvǶµµ{ÔpÈ‚¯ë·xCƲhk¶3e ®¯5÷xBœÆc9F/ý¹KÎWCq×ÎlÈY}¸ï‘ì>ýzô…/}õ±Ã½·a‰@\w.æçóR„âÐyu¬G_}†ü»ùjˆšsÌ+ Ïëíj‚–\`ÖŒýžXРZêÛ½ì{äM÷mJ}¸>'äþfÛûóR³o‡ÂàØ8äî1ö}ß¹>oÈ1óÐX´ÿ¿5óÕªÚ$V@ Në©5I–\`Ö˜# Õ×9ôºÐ}ð¾€]ˆwíö-àB}7Å\Þ! ÒãéïØ·À+˜öü%ç«¡¸9W„Þ÷!x§ÛŒ»æ¸˜7Ö €Cq=ƶ CvÙw>‡ž\ ­GóNë‘ö/C žÂS›¦ˆ§PÕ&‘K.0GvÝé&ˆ''Žº€ù*ŠËÁ0_m¬à†»Jx•eÓéÒ,0K¯°ñ°À£—þ\óUzS-–#`¾*§–FR®€@\nmlŘ+.ž®O.`99qÔÌWQ\Þ˜€ùjc7ÜU Ä«,›N—.`Yz…oŒ€æ½ô皯қj±óU9µ4’rârkkd+°À\qñt}r Ìɉ£.`¾ŠârðÆÌW+¸á®R@ ^eÙtºt ÌÒ+l|c,0Çè¥?×|•ÞT‹å˜¯Ê©¥‘”+ —[[#[±€抋§ë“ X`NNuóU—ƒ7&`¾ÚXÁ w•ñ*˦ӥ X`–^aã#`9F/ý¹æ«ô¦Z,GÀ|UN-¤\¸ÜÚÙŠ,0W\<]Ÿ\Àsr⨠˜¯¢¸¼1óÕÆ n¸«ˆWY6.]À³ô ß Ì1zéÏ5_¥7Õb9æ«rji$å ÄåÖÖÈV,`¹ââéú䘓G]À|Ååà ˜¯6VpÃ]¥€@¼Ê²ét阥WØøÆX`ŽÑK®ù*½©Ë0_•SK#)W@ .·¶F¶b ÌO×'°Àœœ8êæ«(.oLÀ|µ±‚î*âU–M§K°À,½ÂÆ7FÀsŒ^úsÍWéMµXŽ€ùªœZI¹q¹µ5² X`®¸xº>¹€æäÄQ0_Eq9xcæ«ÜpW) ¯²l:]º€fé6¾1˜côÒŸk¾JoªÅrÌWåÔÒHʈ˭­‘­XÀsÅÅÓõÉ,0''Žº€ù*ŠËÁ0_m¬à†»Jx•eÓéÒ,0K¯°ñ°À£—þ\óUzS-–#`¾*§–FR®€@\nmlŘ+.ž®O.`99qÔÌWQ\Þ˜€ùjc7ÜU Ä«,›N—.`Yz…oŒ€æ½ô皯қj±óU9µ4’rârkkd+°À\qñt}r Ìɉ£.`¾ŠârðÆÌW+¸á®R@ ^eÙtºt ÌÒ+l|c,0Çè¥?×|•ÞT‹å˜¯Ê©¥‘”+ —[[#[±@s¹â¡è:Éνäää×pn󕻃@¸€ù*ÜÊ‘æˆç’v˜Xݼ€æ²·€ùjYW_—€ùj]õÒÛmÄÛ¨³Q®LÀseÓÝE,0å¯ÌWËú7¯þŸŸùîcÿ«¿þÐóóêàÆ{c¾Úø `øY ÄY–E§¶.`¹õ;Àøc,0c´Òk¾Jo:¦ExŒÞô皯¦7v±q¬˜ã  @€™ <à8Ö³ï~÷»™öT· ‡€@œGô‚ 0Z@ M¨6& o¬à†K€”+ —[[##@`xW­ @€fˆg'wAV. ¯¼€ºO€Ø ÄîÄ Äq^Ž&@€d+ g[#@ S8ÓÂè @ V@ Žs<[ˆ·~? PŒ€@\L) „™â™ ]† 0µ€@<µ°ö (M@ .­¢ÆC€lV@ Þlé œñ@8§ @€rˆs«ˆþ »€@œ{…ô ( B9Œÿ! » @€…Ä…Ò0˜M@ žÚ… @€Ó ÄÓújòâòjjD @€ÀFâÞ° , ¦s" @ /8¯zè ù Äù×H  @€Aq“ƒ pZ@ v3 @€ ˆ )¤a 0›€@<µ  @€¦ˆ§õÕ:å ÄåÕÔˆ @€ Ä-¼a 0X@ LçD @€@^q^õÐòˆó¯‘ @€‚â & @à´€@ìf @€" RHà @`6x6j"@€L+ Oë«uʈ˫© @€ˆ7ZxÃ&@`°€@<˜Î‰ @€¼â¼ê¡7ä/ ç_#=$@€ ÄAL"@€ÀiØÍ@€(D@ .¤†A€ÀlñlÔ.D€˜V@ žÖWë”' —WS#"@€6* o´ð†M€À`x0  @€y ÄyÕCoÈ_@ οFzH€ˆƒ˜D€Ó±› Pˆ€@\H! ƒÙâÙ¨]ˆ 0­€@<­¯Ö (O@ .¯¦FD€lT@ Þhá ›Áñ`:' @€òˆóª‡Þ ¿€@œô $ 19ˆ§b7 @ ¸B³ ijQ» @`ZxZ_­ Pž€@\^Mˆب€@¼ÑÂ6ƒâÁtN$@€¤¸ÿþû«³Î:kTƒ©qŠ>“  0±€@<1°æ  @€!·Ýv[uçwV¿ñ¿18§ Äu~ãßXþùÕE]Ò}Ç @`•ñ*˦Ó @€@‰ÏxÆ3ŽBñ«^õªAÁxl ®ƒðu×]w: ßzë­%2N Än @€@&õ.ñÅ_|Ô›G<âÑÁxh ÞÂ÷Þ{ïÑõO:ew8“ûB7˜N@ žÎVË @€hK.¹¤Úß™­ƒqýu½kÜ÷ãØ@Ü„ë×;Õþð‡£ûî¬M@ ^[Åô—(Zà#ùHëÎlH0 Ä]Ax[ïT?ýéO/ÚÙà P Äî @€@fÍ]âýî Æ}¸/××±;œÙÍ ;L* OÊ«q @€@¼@×.q_0î Ä!Ax×¶Ýáøz9ƒõ Äë­ž @€ Ú%î Æß÷}ßwLä_þå_N¿5z÷²¬Cdv‡ ¾¡ VØA€ÈP d—¸Œ›¡·~¼:$ïÚ±;œá KL* OÊ«q @€ÀpÐ]âáWøÞ™v‡S(jƒµ Äk«˜þ @€›ˆÝ%cwxŒžs X«€@¼ÖÊé7 ° 9v‰íoâV2HZb· @ c9v‰íg|è“ Ä“òjœ 0^`Ê]b»Ããë£Ö+ ¯·vzN€lD`Ê]b»Ã¹‰ “VØA€XÀ»Äv‡WPx]$@`RxR^ @€ÒL±Klw8Mm´B€ÀzâõÖNÏ  @€ ¤Ü%¶;¼±›Çp hˆÝ @€•¤Ü%¶;¼’¢ë&“ Ä“òjœ V Å.±Ýá´5Ñëˆ×[;='@€6(b—ØîðoC&@ U@ vc @€V&0f—ØîðÊŠ­»L* OÊ«q @€@z1»Äv‡Ó×C‹¬W@ ^oíôœذÀ]b»Ã¾a VØA€X¡À]b»Ã+,´. 0©€@<)¯Æ  @€Ó ÄìÛž®Z&@`½ñzk§ç @€ÀÆbv‰íoüf1|Zb7 @`Å!»Äv‡W\`]'@`RxR^ @€¦Ù%¶;ý]b»Ãó¹»ëˆ×[;='@€‹ Üýá,Þ8.ðßO|âèñö«Ÿ†&3s/9™Yt‡Ø=@€ ˆÓMvâíñµ£¶/|Ò£&»††‡ ÄÃÜœE`JxJ]m @€Ââ lxI⤜#D@ N¨ °Mx›u7êañ07g˜R@ žRWÛ @ pf >÷§¯,|ĆG \àîO^{ì`8ÜΑæˆç’v P €@\`Q )™€@œŒRC&ˆ'£Õ0(_@ .¿ÆF8\@ nçLs ÄsI»(P@ .°¨†”L@ NF©!“ Ä“Ñj˜”/ —_c#. ·s&¹⹤]‡( XTCJ& '£ÔÉâÉh5L€ÊˆË¯±ˆ‡Û9“À\ñ\Ò®C€ ˆ ,ª!%ˆ“QjˆÀdñd´&@€å Äåר‡ ÄÃíœI`.x.i×!@€ ÄÕ’ ÄÉ(5D`2x2Z  @€òâòkl„ÃâávÎêúf` IDAT$0—€@<—´ë @€â‹jHÉâd”"0™€@<­†  @€@ùqù56Âáñp;g˜K@ žKÚu @€@qE5¤dq2J ˜L@ žŒVà @ |¸üápx¸3 Ì% Ï%í: @ @¸À¢R28¥†L& OF«a P¾€@\~p¸€@<ÜΙæˆç’v P €@\`Q )™€@œŒRC&ˆ'£Õ0(_@ .¿ÆF8\@ nçLs ÄsI»(P ¤@ü€‡^pF…îüó?®žtÞ9‹V®Ù¯w½ãµÕ¥Ï»xÑ>¹x˜€@æä(K ÄKê»6X¹@)øškoªN\sÃÕ¸ìE¿TÝxý‰E«$/Ê?êâñ(>'˜E@ ž…ÙE @€@™¥âó.¼´úÜ_ýíEzä#~ úÚo[´xñ¢ü£..âs2YâY˜]„”)PB þ‹»¾Pÿ3¿ÜY «¯º¼ºêÊË+ @¼ýè Ä£ 5@`rxrb @€å ”ˆ_vÅÕÕM7ÿÉé"=ñ'~ìØnqýÿ¿ëö[Ê-¢‘M& OF«aÉâd”"@€Û(!?êñU÷ÜûõÓÅ«_Zõ—¾úX1sx¹Ööî®õX ^  |¸ü!˜L`íø–÷œ:~wßn~§8‡—kMVD O& OF«aÉâd”"@€ÛX{ ~Ƴ­:õÑ;Nîâ_xJuëûßZ5ß:Ã˵¶ww­Äñúkhå Äåר  @€Àdkć^XÕü·¥_®5´ˆ]?)µkoÍ¿kÜ|Ü}÷ÆP«Ôç Ä©EµG ½€@œÞT‹ @`3kÄmAñ»ßüÌéÚ5›î{¹VÛO7Å|÷¸ùr¯º#uoþ>rh€mî~÷Ý”}ã;t~3˜¦ÜQoºÖ5j>êÞì[.^Ä}w'°¼€@¼| ô€¬V`͸´š»‹mùPÀmûù¦Ð`ØðvuÈÏ.5jÌ âwíΈ›&Íñ…~hã2äXxˆšsÌ+ Ïëíj @ (µâ¶ðÚ¢šÁ«ïåZm!ºïœú†h °»ÝêØ@ܶSsÓ…†øý6ç ÄÍŸÅjŽmHÿc|bŽˆc´K`xwW%@€E¬57Oî QÍÇŽCÂVì£Ó]J_uåeG÷HL îz”¸kÇ´ë±êØGŽç Äû8ûýÜ}ÈòÄ\|ñ\Ò®C`¸€@<ÜΙ @`ók ÄÍ×¢Úf_XlÛ}îú~n[ûÍG·cñèqínð®ï®_û?é¼srèz%gQ pP@ vƒ @€ƒÖˆÛà¡ïÍ6ÃsÈ˧B>ô¨ô®(1¸Ù^_xß]£m§xÿcƒo'¶âÜÞ&Ý5L8Á    Äkž”,°Æ@Ü }·í‘æOõ=:ÝBÛìxÿC‚\‚ðîï¦Í1——fõým Ä}BþÀòñò5Ð °Z5âØßn{:ô{ªÍkíÂwÈ£Ò)vˆC¾óœûÍ׈s ívˆs¿‹ô@·€@ìî @€ ¬-÷ýöpÄÐPÖv½zø­¿²ºçÞ¯»\WÈ‹Ù!îzIÖZvTÛüczðÍ<Á‰vˆ'@Õ$ÄqbPÍ @€- ¬-÷ýöpWíº‚íîMЇjÞR÷Ï9ô]ߘ@Üõ–éýkõ="žÛý;´f9ŒC Ρ ú@à°€@ì!@€ ¬)·…ŘӮǟCðšçîŸÓ÷‚¨˜@\·û;Ä!߇ãTÇÄSÉj—Z@ v @€ƒÖˆCvjc!BÃä¡Û¾ïÃÆâ!¡¸>'×ï ıw¥ã ˆˆc´K€XS >´K;´¬¡/תÛo ä!;ÔCq}½Ç§ÛÆÝ·c=ÔjèyñP9ç " ‡(9†hXK nûpª’öíðö…Ó¾ó‡âýñÅ>FS(ˆSÝ©Ú!@ M@ v_ @€ƒÖˆca È¡bíÚ™ó;Ä}}oû]å¶sBv¯û®•âßâŠÚ @ K@ vo @€ƒÖˆÇüŽp¦­­¾·6‡|w9Õ[¦‡òQ¿èŒŸ€ªÛé×k 9G ¢æBâP)Ç @€g¬!·íˆ†¾ «­äm»Í]íµ}·þÞñ—¿rOuê£wk>Åï¹E›»Ø¹¼dK SUç Ð' ÷ ùw @ S` ¸¹:vç³íûÈ]/×:2›ÿÖõ½ÝØïïúhÛÆÕ÷ýæ9þ4â9”]ƒÀvâíÖÞÈ  @€ÀhÜqÛmÈw~û`Ú¾Ü }o•n  m} ÄmíÅì„·Y Ä}wÂá¿û“×;àÜKNŽkÐÙ$ˆ““jlG ÷@ÜJcBbW%ÛÚݳ]Jßxý‰cM¶µÓ ¡¡¸íûÍ1o‹n>Z>v'=Õ_âT’Ú!@ M@ v_ @€ƒrÄÍ0™*äµÞý¶c¾Û×ÇÐ@\±íûÍ!¿•àß(#NˆGà9•^¸—È @€@—@Î8ô‘ä¡Õm{;s½ûü[¯¹þŒfú £¾~ÆâC¿·Üµ3Þõ“T9<.Ýòcv½‡Ö6Õy™N%©Ó ÄÓÙj™/s n z)C^èïù†ìÐzsuL n CnÂØïY7?ˆ}¡×¡>Ú!RAç * ‡J9Ž8C ×@<öû´!¥n»Fó¼Ð`xè÷cq݇®ßWH€o¶#·ËÚ!¹ãC`YxYW'@€«È5·íÞzlyhº7ÞµsÍ®G§O\sñºƒ½ßøÐŽ Äñп!çXZ@ ^º®O€V,k ž2 í—ëÐwv‡ì´öìúÚ¡x×϶fí!ŋƦôöÈôŠ']'°xEÒE «@®8W/ýÚ–€G¦·Uo£]§€@¼Îºé5ÈB@ ΢ :‘©€@œiat‹Àž€@ìv @€ ă霸xE6ÄÕ Ä«/¡ @€åâåì]98ÿé!Ø=@€ ˆÓ9qñŠlˆ«ˆW_B @€Ë ÄËÙ»rþqþ5ÒC±{€, ¦sââ ÙW/ ¯¾„@€–ˆ—³wåüâük¤‡b÷ 0X@ LçÄ Ä(²!®^@ ^} €,' /gïÊù Äù×H Äî @`°€@<˜Î‰ˆ7PdC\½€@¼úXN@ ^ÎÞ•óˆó¯‘ˆÝ @€À`x07 o È†¸zxõ%4 °œ€@¼œ½+ç/ ç_#=$ » @€Áñ`:'n@@ Þ@‘ qõñêKh @`9x9{WÎ_@ οFzH@ v @€ƒâÁtNÜ€€@¼"âêâÕ—Ð @€Àrñrö®œ¿€@œô€@ì @€ ă霸xE6ÄÕ Ä«/¡ @€åâåì]98ÿé!Ø=@€ ˆÓ9qñŠlˆ«ˆW_B @€Ë ÄËÙ»rþqþ5ÒC±{€,Рăr" œ{ÉÉ ŒÒ ¬K@ ^W½ô–d% gUÉ\@ μ@º·Ix“e7h F@ N㨕mÄÛ¨³Q®K@ ^W½ô–d% gUÉ\@ μ@º·Ix“e7h @€ˆÝ @€ °Ix“e7h @€ˆÝ @€ °Iÿ=–Á]0ùl_IEND®B`‚python-einx-0.3.0/docs/source/images/stage2-tree.png000077500000000000000000001630541505216034200223330ustar00rootroot00000000000000‰PNG  IHDRôØ:‰sØ IDATx^ìÝ Ô4U} îúŒ nlÊ"þõ áQ„aEMFQåDÉA .˜×£‚2*‹"WÐDÜ@ã 1A˜Q#²(:2n,Ÿâ‚€ü¹ýÑo¿ÕÝukë[UOŸóôýêÞº÷ùÝ·êÖ¯oß^sÛí¯Ì‹ @€@"W|ö©‰´D3¤/°Ó¾g§ßH-$@€¨M`„~m–*"@€¨A@B¿DU F@B0¡ÖQ @€ÀH@Bß@ @€HJ@B?©phLâú‰Hó @€5 Hè× ª: @€júÕü”–€„þ°â­· @€ }c€ @ )é„þN:2©öi e \qÁWœ^B™Ñpn @€@ûúí›;# @€À }ÃÀl }£ƒ 0l ýaÇ_ï  @€É Hè' JH@B?¡`h  @` úK@wJ @€ÙúF9¶Ü1< @€ƒÐtøuž ž€„~z1Ñ¢t¬ÐO'ZB€X†€„þ2Ô“ @`¦€„¾ÁA`¶€„¾ÑA€¶€„þ°ã¯÷ @€ä$ô“ ‰%$ ¡ŸP04… ° ý% ;% @€Àl }£ƒÀœ¿{è @€A Hè:ü:O€HO@B?½˜hQ:Vè§ -!@€,C@BêÎI€ 0S@Bßà 0[@Bßè @€ [@BØñ×{ @€@rúÉ…DƒÐO(šB€X‚€„þÐ’ @`¶€„¾ÑA`Î߇=ô  @€À $ô~'@€¤' ¡Ÿ^L´(+ôÓ‰…– @€–! ¡¿ uç$@€˜) ¡op˜- ¡ot @€†- ¡?ìøë= @ 9 ýäB¢A Hè' M!@€,A@B èNI€ 0[@Bßè 0çïÃú† @`Ðúƒ¿Î @€ÒÐO/&Z”Ž€úéÄBK @€ËÐ_†ºs @€ÌÐ78ÌÐ7: @€ÃÐvüõž œ€„~r!Ñ „$ô †¦ @€– ¡¿t§$@€˜-0ä„þs;>;ýýç¬ÀùÆùgf»î²c#CfÍÆ»¯¨÷gœ˜tÀÞœ«ŽJÏúð¹I·¯Ž>.ªCB‘'@€ô[@B¿ßñÕ; @€@ç$ô%ô§íÅ—|7ÛíÑO϶Übóìú«>ß¹1]gƒ%ôëÔT @ {úÝ‹™ @€z- ¡/¡?9ÀwÙã ìÒ˯ýJB?Ë$ô{}ùÓ9 @€ÀB ý…D @€hS@B_B<ÞNxãéÙ1'œ¶~øIèKè·y-r. @€@Šú)FE› @€Ðo/¡Ÿú0“Ð_!+ôSµÚG€hV@B¿Y_µ @€D HèK臌„¾„~äåÃá @€Þ Hè÷>Ä:H€è–€„¾„¾„þì¿Y+ô»u=ÓZ @€@Ýúu‹ª @ ’€„¾„¾„¾„~¥‹ˆÂ @€ Hè÷8¸ºF€袀„~wúÓ[âL·œqbvÐ{—†¶ÜYMg…~éᤠ @ ú½£N @€ú# ¡ß^BÍÆ»¯8Eðûìÿ¢ìÜ/^TxÐ=tç²K.<«ÐñÏ9ìøìô÷¯4˜Wð矙íºËŽ…êîÃAú}ˆ¢> @€Ê Hè—·S’ @  ý´ú[­Ý+[÷ãJE¾Hò]B>­„~©¡§ @ 7ú½ ¥Ž @€ú! ¡ŸnB—=Ê.½üÊÒmË-6Ï®¿êósËKèKè—`  @€ ¡?€ ë" @ Kúi&ôÏúð¹ÙÓž}Ôª¡4k›žYÛòô ³£Hè÷!Šú@€è‘€„~z ý0¼vÙã ìÒ˯\1Ò9øIÙ{ÞvÌÜÑ—·:¿H¹PiÙ•ý=úsXÕ ý>GWß @€‹$ô9‚ @ E ý4úyÛŬíwòÞeŠl·3+¡óÉ€‡mk§’ÐoÚ‰ @€I Hè'"@€ W@BeB¿Ž‘püÑ/ÌŽ>òUU­Ùx÷¿ûÀ'f°÷ÌSÎJÐÇ´qV[òêÈÛêgú¸EmŽi[Ž•ÐïB”´‘ М€„~s¶j&@€(! ¡ŸnB?„s«µ{eë~|C‰ÈfYÑ­v&+_ô&BÌ¥X! ýÄ¢9 @€–$ô[w: @€ùúi'ôCô¦¿ä¶È˜žµ-O‘²ÓŸ$˜,SæM‚"çLõ ýT#£] @€v$ôÛqv @€‚úé'ôÇ¡ÌûÒÚÉ0?tç²K.<«`äç¶Ïþ/ÊÎýâE«ªóµ4´áJ$ôV= @ q ýĤy @€¡ 9¡?´Xëo¼€„~¼™ @€> Hè÷)šúB€耄~‚¨ Hè7F«b @€@'$ô;&$@€ G@B8±ÖÓx ýx3% @€}ÐïS4õ… Ð ýQÐoŒVÅ @€NHèw"LI€Ž€„þpb­§ñúñfJ @€ú$ ¡ß§hê  @ ú=¢.4& ¡ß­Š  @€ÐïD˜4’ 0 ýáÄZOã$ôãÍ” @€ôI@B¿OÑÔ @€@$ô{D]hL@B¿1Z @€:! ¡ß‰0i$ @`8úɵžÆ HèÇ›)A€è“€„~Ÿ¢©/ @€Hè÷ ˆºÐ˜€„~c´*&@€tB@B¿aÒH @€Àp$ô‡k=Ð7S‚ Ð' ý>ES_ @€=ÐïAu¡1 ýÆhUL€è„€„~'¤‘ @€áHè'Öz/ ¡o¦ @ Oú}Ц¾ @€z ¡ßƒ êBcúѪ˜ Ð  ýN„I#  @€ÃÐN¬õ4^@B?ÞL  @€@Ÿ$ôûM}!@€ô@@B¿AÔ…Æ$ô£U1 @ ú“F @€†# ¡?œXëi¼€„~¼™ @€> Hè÷)šúB€耄~‚¨ Hè7F«b @€@'$ô;&$@€ G@B8±ÖÓx ýx3% @€}ÐïS4õ… Ð ýQÐoŒVÅ @€NHèw"LI€Ž€„þpb­§ñúñfJ @€ú$ ¡ß§hê  @ ú=¢.4& ¡ß­Š  @€ÐïD˜4’ 0é„þpz¾üžþðº_f?¼î?²ï_ûËì×þÇèç›ßù÷ì¾›m”Ýëîd×ýäWÙCvÜ<{Àýïm¿Í½îøïí?÷¾çËoü@[°Ó¾g´çºM€¦€„þ0ã®× @€d$ô› ÍÍ·ün”¤ÿþ(YÿËÑCÿŽßý2»åöÏ{mp×»dwY³&»éæ[sÿ=$üCbÿ÷¿×í‰þðßè¿#áÊz5' ¡ßœ­š  @€) Hè§m"@€ X@B¿ZðoüåÍëW×ÿàö÷w&ðÿ#»fݯªU^¢ôÿ·õ=W­èÉÿmoÿ½Wu ýê†j @€tI@B¿KÑÒV @€À$ôù'?ýÍïWׯL؇÷7üì¦Å$pÄ]o_¹¿rëžßoásû ÿ-6ß(v£ ú݈“V @€êЯKR= @€jøÿïÿeW]uÕŠŸ«¯¾zôÿñ‹_Ôx¦ÅU=ùÉOÎB{¾ùÍo®:ø.w¹Kö»ßåoÕ³¸æü#6Ùd“l‡vÈþèþhÅÃï6ÝtÓ²Õ*G€ @ óú¡ @€tQà·¿ýíÌ„}HÚß|óÍtkûí·ÏÖ®];úùÕ¯~•þùÙ÷¿ÿýÜsí·ß~Ù«_ýêl÷ÝwýûG‘|òÉ«ŽÝ|óͳ¼àÙV[m•]yå•Ùÿý¿ÿwôßû·«½[o½õúDÿtÒà 7¬ý|*$@€ ’€„~JÑÐ @€^ üüç?_Ÿ´¯®¯ºÿÞ÷¾×H_7Ø`ƒõ ûqâ>ü÷AzÐè÷!éýõ¯=;î¸ã²O}êS¹mxÈC’{ì±Ù°êß?üágÿí¿ý·ìÇ?þñª;üðó“N:iýïoºé¦õÉýÉDHø_ýõµ÷?ô1$ù§ýá÷^ @€ú ¡ß‡(ê @€ÀÒ®»îºQÒ~:a~·nݺFÚµñƯOÚõãäýð€™ç Iø×¾öµÙÛÞö¶Ücî}ï{V䇕øó^?üáGIýO~ò“«Ûc=²·¿ýíÙ.»ì2·ŽŸýìgë“ýãýãÿÖ½¥Px#/Ñ~w¿ûݯ‘©” @€@úM¨ª“ @ WËØÏ~Ë-·Ì]i÷ak›Ø×›ßüæQ2ÿÆoÌ-zØa‡’ù÷½ï} Wý·û·ÙQGµêø°¯~Hê?ÿùÏ/\×ä×\sÍŠ­{Ɖþ°Ê¿î­ˆfí×öïÿæE€ @ % ý”¢¡- @€KHa?ûÉíqÂÿ«ðëx…-rÂö:—_~ynuÓûäÇžóK_úÒhµþw¾óUEŸõ¬gû÷¸Ç=b«y|øäÃôŠþ6öëAïx;ûõ×R @€DHèG`9” @ »©ìg?¹EN“Iá*ûäÇFù׿þõ(©ÿ¾÷½oUÑwÜq”ÔìcÔñ“ûõO®èoz¿þéD¿ýú£Âæ` @€H ýH0‡ @€¤+°Ìýì§÷²«ìçígß”b]ûä—iß;ßùÎQbÿw¿ûݪâ'žxböªW½ªLµ•Ëüô§?]±…Ïäô6µ_ÿt¢ß~ý•è @€Û$ô  @€N ôa?û¦À›Ø'?¶­—\rIv衇f_ùÊWV Ûû„ÕúÛn»mlµ?k¿þ°²ÿ–[n©õ¼aOþ¼D¿ýúkeV @ ×ú½¯Î @€º'Ðçý웊FÓûä—i÷Ë^ö²ì¤“NZU4|énHê?å)O)Sm«eòöë‰þð¦RݯûÝï~YXÅ?Þ£ò¿MnÍTw?ÔG€ Ь€„~³¾j'@€ÈÚ~öM ‚6÷É/Ó‡ðFCØ‚'l4ý:üðÃsþeÎÓv™¼ýúÇûö_ýõµ7'lç”—è·_íÔ*$@€$/ ¡Ÿ|ˆ4 ÐMeígŸ·—ý²ö³o*rËÜ'?¶O?üáG[ð|âŸXUt=öÈN=õÔìa{XlµÉ?½_ÿäô6½_ÿäv>aÅ¿ @€@ÿ$ôûS="@€´&`?û֨ן(…}òËôúu¯{]î—â®Y³f´Ï ^ð‚2ÕvªLد2Á?þßMï×?½oØËß‹ @ ›úÝŒ›V @€Z°Ÿ}+Ì…N’â>ù…>qЗ¾ô¥Ñ<ßùÎwV}Ö³ž5Jìßã÷ˆ­¶Çÿë¿þkvå•WŽ~&ýMîן÷½öëïÅpÒ  @ Çú=®® @€Š¤°Ÿýô69’ŠwF.õ}ò‹Œ±Éc~ýë_’úï{ßûVÝqÇGIýÇ>ö±±Õööøßüæ7¹‰þøoj¿þ¼D¿ýú{;ÄtŒ蘀„~Ǧ¹ @€2³ö³¿úê«I †6n¼ñÆÙö³/"eº´O~‘þLó®w½k”Ø¿õÖ[W?ᄲ£Ž:ªLµƒ*3k¿þ°ÂÿÆo¬Õ"¼É–—è¿Ûzë­k=—Ê @€f Hè @€žØÏ¾'¼½]Ý'?6—^zé(©ÿ•¯|eUÑýöÛo´ZÛm·­Öñ· Lî×?½oÿ-·ÜR«Ñ¦›nší°Ã£ŸqÒü_ûõ×J­2 @€@&¡o @€:"Ú~öaõ½d]½ƒ§ûä—yÙË^–tÒI«ŠÞ÷¾÷%õŸò”§”©V™ãýú§ýmì×?¹ÊßÖZ†( @ ^@B?ÞL  @€@cËÞÏ>o‹I·ÆÂ½¾â¾í“_Fì#ùÈhµþºuëVéK_š|òÉeªU&B`¼_ÿt¢¿ýú'ýöëšC  @€Á Hè.ä:L€,[ ìgö®¿êª«VüØÏ~Ù‘iÿü}ß'?VôG?úÑ(©ÿ‰O|bUÑG<â£Õú{ØÃb«u| a¿þ¼DÓûõOïÛo¿þ‚©  @ ÓúŸÆ @€© ØÏ>ÕȤӮ¡ì“_Füu¯{]öªW½jUÑ5kÖŒ’ú/xÁ ÊT«LCᘰŠ?üŒ“þãÿÚ¯¿!tÕ @€ƒÐlèuœ¨"0½ŸýôŠû›o¾¹Jõ3Ën¿ýöÙÚµkWüŒ·É±Ÿ}#äµW:Ô}òc!Ï;ï¼Ñjý+®¸bUу>x”Ø¿ç=ï[­ã[˜Þ¯œèos¿þ°Êƒ 6h¹çNG€hF@B¿Wµ @€=Xæ~öy{Ù‡D¾ýì»;°ì“»°§{Hê¿÷½ï]U8$iCRÏ=÷Œ¯X‰¥ Lï×?¹Ïõ×__{ûÂ5u¼}ÏäøÀÖ~. @€šÐoRWÝ @€@ò!q4½—}øÿö³O>ti }ò«‡ê]ïz×(±ë­·®ªì„NÈŽ:ê¨ê'QC2“ûõOïÛã7ÖÚÎð&i^¢‡vÈì×_+µÊ @€š$ôk‚T  ®€ýìÓMß[fŸüú"|饗f‡zhvÁ¬ªô‰O|âhµþvÛmWß Õ”¤Àx¿þ¼/èmj¿þé/æ É~[œ%9<4Š B@BaÖI ÐoTö³ŸÜ&G²§ßcnQïì“¿H¨ü¿¿üå/ÏÂ%Ó¯ûÜç>£¤þX¾r%;-öëÏKô7µ_^¢ß~ýBO€:! ¡ß‰0i$ `?{c  öÉo'JùÈGF[ð¬[·nÕ _úÒ—f'Ÿ|r; q–N„ýúóýáwyc¨j§Â÷„UüáÇ~ýU5•'@€¦$ô  @ ûÙ' ‰°O~$X ‡‡­WÂ<çœsΪÚþð‡Vëïºë®5œI}¸á†²+¯¼rô3Núÿ[÷~ýw»ÛÝrýöëïóÓ7 P¿€„~ý¦j$@€æØÏÞð蛀}ò—Ñ׿þõÙ+_ùÊÜF„¤þ _øÂå6ÐÙ;+0½_ÿä*ÿ¦÷ëŸÜÎÇnBN€Ðo„U¥ @`¸)ìg?¹—}Øú@2d¸ã±ÉžÛ'¿IݸºÏ;ï¼Ñü.ül¸á†œW¥¦쓟æ˜û¤‡¤þ{ßûÞU IÐÔßsÏ=Ól¼VõF`r¿þé}û›Ü¯:ÑÿÀ>°7¦:B€¬Ð7" @€\ûÙV Ø'¿#âÝï~÷(±Ÿ·%ÊñÇŸ}ôÑÝèˆVöN`ûõO'úí×ß»a¥C 0@ ý]—  @€ÀX ¥ýìÃÊû­·ÞZp$)`Ÿü$Ã2³Q—]vÙ(©Á¬:æ‰O|âhµþvÛm×­Nim¯fí×Vùßzë­µö}ÓM7Íòýáwo¼q­çR P¿€„~ý¦j$@€ÉØÏ>™PhHGì“ßÑÀý¾Ù/ù˳ðfÌôë>÷¹Ï(©àv»ƒZ?¼ýúC¢¿‰-î¶Ùf›,¬â?ã¤ÿø¿öëÄpÓI耀„~‚¤‰ @`ž@ØÏ>o/û°·}û¡-ö³7&û.ðµ¯}-{ík_›}êSŸÊíêCòìØcÍ8à€¾St¾ýèGG«õÃ6bÓ¯—¼ä%Ù[Þò–Î÷Q†)·_ÿxßþ6öë'úí×?Ìñ§× °< ýåÙ;3(,`?ûÂT$PIÀ>ù•ø’-|Í5׌’úçœsΪ6>üá­Ößu×]“m¿†ˆï×?ýżW^yevã7ÆV7÷ø»Ýín¹+úí×_+³Ê @€Àz }ƒ$"Ê~öa/ûµk×ÚÏ>‘q¡í Ø'¿=ë-§÷ IDATeéõ¯}öÊW¾2÷ô!©ÿ¾pYMs^­ „ýúóýMï×?½o¿ýú[ ¹ @€@Ï$ô{PÝ!@€tìgŸnl´lØöÉVüÏ;ï¼ìÐC;ýío¯êø3ŸùÌÑjý{Ýë^ÃBÑ[¿+øÇ?ã¤Óûõç}A¯ýú I 0[@Bßè @€5 ØÏ¾FLUhXÀ>ù '\ýM7Ý4Ú‚çŒ3ÎXÕʰMHHê?îqK¸šF ]_ÿú×¹‰þüoj¿þ¼D¿ýúÛ»³ @€@šúiÆE« @ aûÙ'M#P@À>ùrÈ»ßýîQbÿ–[nYÕãã?>;úè£"¡›Ê Lï×?¹²ÿ?þã?ÊWœS2ìן—è¿Ûj«­j=—Ê @€@ªú©FF» @`©ö³_*¿“hLÀ>ùÑv¶âË.»l´Ïù矿ªOxÂF«õ·ß~ûÎöOà ,S`r¿þé}ûo½õÖZ›¶Ùf›¾œ7üŒ“þãÿÚ¯¿Vj• @€À’$ô—§'@€åØÏ~9îÎJ`YöÉ_–|wÎ{ä‘GfozÓ›V5xóÍ7%õŸúÔ§¶Ò™Ÿþô§Ù7Þ˜m·Ýv­œÏI,K`z¯þðÿÛÚ¯r•¿ýú—5œ—Ê Hè—•SŽ’øÅ/~‘]uÕU¹?ßûÞ÷ix(\»vmö =hôßéŸ 7ܰ‘óª”|ûä1ýèGG[ð„­Õ¦_/yÉK²·¼å-1Õ•:ö+_ùJöÿðÙ)§œRª¼Bº.0Þ¯zEûõO&úí×ßõ‘¤ý诀„~c«g„€ýìfì˜À5×\“m³Í6Kmµ}ò—Êß铇ñ’úçœsΪ~üéŸþéhµþn»íÖXþþÏ{Þó²ï~÷»£mC¼¸S ìן—è¿kr¿þé}ûí×oT @€À2$ô—©ïÜ PH …ýì'WÜo½õÖ…Úí C8è ƒ²³Î:kiÝ·OþÒè{uâ7¼á Ù+^ñŠÜ>zê©£¤¯Ã?|ôI€C9${Ï{ÞÓÄ)ÔI —?üá³ém|ÆÉÿ¦öëÏû‚^ûõ÷rxé’ÐO*C€a LîgõÕW¯Ú"çæ›on&|ÉáxKœé-r6Ùd“FΩR}xßûÞ—ý×ÿú_³<¹Ë]îÒjwí“ß*÷ NöÏÿüÏ£Äý·¿ýíUý}æ3Ÿ9Z­¯{Ý«V‹}öÙ';÷ÜsGu~ó›ßÌvÙe—ZëW! ÌJô7±cø„Z^¢?üî®w½ëùõ™jЯTu /`?{#ƒ@ÿ®½öÚlçwΖá‹=7ÝtÓV:mŸüV˜{’›nºi”Ô?ãŒ3VüáþavÚi§e{Üãjó _†ûƒü`TßÓžö´ìÌ3Ϭ­n °R`z¿þÉí|Ö­[W;W¸fì°Ã£ŸqÒ?ü÷x@íçR!ôW@B¿¿±Õ3´.`?ûÖÉ@R!ù8Þj'¬z ‰É&_öÉoRWÝÓaû›ØÏûÔØk_ûÚì˜cŽ©Œö³Ÿý,Ûl³ÍVÔsÑEeøÃ+×­â&÷ëŸÞ·¿îýú7Úh£ÜDHüÛ¯?.nŽ&@€À$ô‡e}$@€@ö³¯SUz$ðw÷wÙ_ÿõ_¯ïÑe—]6Z­ßÔË>ùMɪwžÀå—_>JêŸþù«{ž0Ú‚'lçVöuá…f|ä#Wßÿý³~ô£e«TŽÆûõç}A¯ýúW%¬Ð7  @`…Àx?û¼½ì¯ºêªÜ•‰uŽ÷³ŸÞË>ìqo?û:„ÕA 9ë®».{ðƒ<Újgüºà‚ V%&ëh}òëPTGU#<2{Ó›Þ´ªš°º>lÁóÔ§>µÔ)§žûÜç®*ûOÿôOÙcó˜Ru*D€@»a¿þ¼D¿ýúÛƒ³ @ Ïú}Ž®¾ @`†€ýì ê˜Üjg\ïg>ó™lß}÷­í4öɯRE5 |ìc­ÖohM¿^üâgo}ë[£ÏtÄGd'Ÿ|òªráo)üMy Ð]°_^¢?ü.l!W÷Ë~ýu‹ªéHè§ -!@€@­)ígVÝ?ð¬µ*#@ é­vÆ­ú‡ø‡Ò«”'{’Çw\vê©§ævøÞ÷¾wöêW¿: ‰P/m „/‚Iýüã«Ný§ú§£-xvÛm·ÂÍ ‰ûÏ}îs¹Çúӟζ>^ôOàßÿýß³°²ruÿ8ùo¿þþÅ[ PU@B¿ª òX¢Àô~ö“Ûä„UøM¼¶ÜrË,lƒ3þ™Ü"gë­·nâ”ê$@ Q°29ì“Ó¯w¾óÙóž÷¼J-·O~%>…[xÃÞ½â¯È=cx3*$ý‹¼Âösßÿþ÷sý/ÿå¿d_úÒ—ŠTãz$0½_ÿä*ÿ¦÷ëÿ£?ú£õ_Ö»ñÆ÷HUW Ðm ýnÇOë è¹@ØÏ~Ö^öö³ïyðu@žþô§gøÀr[öÙË^VªöÉ/ŦВþùŸÿ9;ôÐC³o}ë[«ZòŒgò‘ì/ÿò/—ÜS§'@ ¼ýáwMï×?™èÿû®w½k*$ÚA€AHè"Ì:Ù°úñW¿úU¶í¶Ûö¡;ú0!`?{Ã. ¼ÿýïÏžõ¬gÍlú1Ç“½öµ¯êš}ò£¸œ @x#>¬Æ?ýôÓWµ.ìg’úüãs[~ÑEe{ì±ÇÜ^…m|þ÷ÿþß ö\“HI`r¿þé}û›Ü¯:Ñÿ€< %mY ¾ÃåðÃçD€@$ô;$M$ð¡}(;í´Ó²ú§‚ÑQTö³oc?ûŽ$Í&ˆ@¸¦…­v~ò“ŸÌlÑßüÍßdÿãüB-¶O~!&uHà=ïyÏ(±óÍ7¯jux£+¼á5ý:ãŒ3²C9da/ÿþïÿ> ŸŽñ"@€@ñ~ýy_ÐÛÔ~ýÓ‰þvØ!Ûj«­Ê4_™Âöq^xaö?ÿçÿÌÂp^¤+ ¡Ÿnl´Œ@Vy½øÅ/ÎÞñŽwŒnª/zÑ‹¨$,0¹Ÿýô69ö³O8pšF€@´À¼­vÆ•ýõ_ÿuöÞ÷¾waÝöÉ_H䀎 \~ùå£-x¾üå/¯êAøòÛ°ZrõjآꤓNZØÛ‡>ô¡Ù%—\²ð8 @ V ìן—è¿ûÝï~[ÝÜã7Ûl³,/Ñ~7o{²Z¡²ßùÎw²vÚ)Ûd“M²SN9%{æ3ŸIˆD$ô ŒføÜç>7Jæ‡ÉSx…ÉÕýï0K«ì¾õ³~òVáÕÑܰ:"|íä—ÏŽ¿6L¶¼ ЦÀ¢­vÆm û|‡ý¾g½ì“ßfÔœk™ÿý¿ÿ÷ìo|ãª&„dVHêÿÕ_ýÕèßžð„'dŸýìg 5õÝï~wöœç<§Ð±"@€@³ý³¾È»Ê9ÃsoXÅ~ÆIÿñí×_EvqÙ?ù“?Ɉá>5Þã÷X\д* ¡ß*·“(&pä‘GfáËǯÇ=îqÙ¾ð…b…UIÀ~ö•ø&@ çE¶Úì¹çžÙ?þã?®±O~ωîå |ìcmÁsÝu×­ú÷°€ã­o}k¶Ã Ÿö+ò ‰­ï~÷»Eu ßó¾ˆ7ï zÛØ¯œè·_=a>ñij£>z}eáM•ÔßgŸ}ê9Z¨E@B¿F•¨Gà+_ùÊhUþ¿üË¿¬¨ðÔSO=zÕ#`?ûzÕB€ÀðžñŒgdgžyf¡ŽÿçÿüŸ³¯~õ«ëµO~!6õXàÚk¯Íç>þñ¯êåúOÿiÕüo…í ùw–-0½_ÿä*ÿ¦÷ëŸÜÎÇ~ýÅG·¾õ­Ñ÷$M¿^ñŠWd¯{ÝëŠWäHÐo”WåŠ üñٱǛ[àšk®Éîw¿û¯Ì‘£nakœé½ìÃïìgo€ @ ^àý¯ÿ•|ðÁ… î¸ãŽYØ‹5¼Â>ùÇw\6ëáý°ÃË^ýêWg÷½ï} ×ï@]Ûï„mxª¾¶ÝvÛ¬‰­.ª¶KyXÖ~ýÓûöÛ¯u´vÛm·ìâ‹/^õ{ì±Çhµ~xÚ‹å Hè/×ßÙ Œ¾Ôì%/yIvÞyçåjìµ×^Ù¹çžKjJÀ~ö†ÚX·nÝhµVÌGç·ÞzëѪ…D~ørм×~ûí7Jäï¾ûîíuÆ™$ ¾(7¬Ö+!«¼Â›/ùË«T¡,’XÆ~ýy_Ð;ÔýúÃÜí5¯yÍÌq¾ÀýðÃOnÜh! Hè)ÚúšœÀ[Þò–…7Âw¼ãÙóŸÿüäÚÞFƒRÛÏ>|)íÝîv·6ºîHJà™Ï|fö÷ÿ÷Qmúƒ?øƒìÖ[oÍ-ó‡ó™Ï”îVøTË~ðƒl£6*]‡‚èŠÀ2öëÏKô÷}¿þ°èða{ØÜañ¤'=i´Z»í¶ëÊðÑN½ÐïU8u¦+a˜°*ÿ“ŸüäÂ&‡ýÞ·ÜrË…Çuõ€¼ýìÇÛä„kâ>V¹víÚ?!Y~¾Î‹îˆÝjgž]¸þ†ùGqbƒø?ÿçÿdozÓ›²|ä#•ÂJÊYÛ6V®\èˆ@Ûûõç%úÃïúòìþЇ>4»ì²ËæFÓM7}3|Ç’í Hè·ëíl²÷¼ç=£dþ/ùË…ûî»o¥U[ OÐÒa?û¼½ìígßRœ†%Êlµ3ëTöÉ/Åz%’#a›œØO¼ÌC¸ç=ï9Z¥¿Ùf›õÊJg P—Àä~ýÓÛùüîw¿«ë4£z6ß|ól‡výŒ“þãÿvi¿þðFqøž¿"¯ç<ç9£Õúw¿ûÝ‹îjЯQŠ„}‡_üâgguV‘ÃGǼë]ïÊžûÜç>~YÚÏ~YòÎK€fÊlµ3Ý¢°%HxÈ;è ƒšm¬Ú $,1„D~˜Û5ñzÅ+^‘½îu¯k¢ju @ ×ãÿt¢¿‰/¿ÿýèÉÿÔöëÿ—ù—¨ï8 oZ„ùÞÞ{ïÝëñ¢sRÐO%ÚÑk³Ï>{”ÌÝBæ'?ùIvŸûÜ' ûÙ' @€@kaqHè×õòåuIª§K×^{í(‘ÿÖ·¾µÑf‡ï¬ɧm¶Ù¦Ñó¨œCïן÷½a±^ݯ?üÃ?\µ¢?$ú—¹_ÿÿñgW\qETW_ùÊWfû·UÆÁÄ HèÇ›)A °À¯ýëÑö:eVcí·ß~…öØ/ܘNïg?¹MNì›N7:Ä~öE¥G€öƒêÎ;-wê|í¹çžÙÉ'Ÿœ…}Y½ôYàg?ûÙhüÌ¿å–[ZéjX<Òô­tÄI ¸@د?/Ñ~Wdkݘî…/=ŸÞºg¼OÓûõ¿êU¯*õé¯G>ò‘£Õú»í¶[LWK€@„€„~–C Ä|úÓŸ%óÿõ_ÿ5¦ØúcO?ýôìÙÏ~v©²ó ÙϾvR @ w|p¾ ·©—ÕúMɪwÙ¿ýíoGIüÌŸnlûæk×®mû´ÎG€¿ßirå•WŽ~¦·óij¿þ¼/è­c¿þðîøÃKÇ6,âxéK_Zº¼‚ÌÐ7:4 ð²—½,;餓*Õ|à 7”úr3ûÙWbW˜ƒ8óÌ3³g<ã;<îqÝ+­ÖoœÚ Z‰‹ÌoêSEºñüç??{Ç;ÞQäPÇ @€@‹·ÝvÛÌDSûõç%úÃïÂ6mE_;î¸ã色¯'?ùÉ£ÕúÛn»mÙ*”#@ G@Bß° P£À—¿üåѪü‹/¾¸R­ñ‘sÎ93ë°Ÿ}%^…  @`†@Øjç!yH« ɰ’9¼îE «§vÚhEþ¿ýÛ¿%Ñ…Ë/¿<{ðƒœD[4‚ Lï×?¹Oûõ‡-{Æ?“Ûùl¿ýö«{ä‘GŽîqU^›m¶YvÊ)§dOúÓ«T£,ú†š^óš×dÇw\-µ½ï}ïËöÙgŸìª«®ýLîeþS+¿ìg_KøTB€Î 4½ÕÎ,˜°Z?¬no&xè’À¾ð…ìÛßþöè'|q`øoØ[y™¯ðwüw÷wËl‚s @€@M“ûõOïÛ_÷~ýw¿ûÝW%úoºé¦ì…/|a-½yîsŸ;Z­¾À‹júÕü”&0Z¾„ìüóϯM#$Öo¼ñÆÚꛬ(|qNØ[uüó =hýÿÞzë­9§J  @ }¶¶Ú™'ñæ7¿9;âˆ#ÒÇÒBs~ô£­OîO&úò“Ÿ´æöµ¯}-Û}÷Ý[;Ÿ @€@ûãýúó¾ ·îýúëê]ØÂ'$õ÷Úk¯ºªTA Hè2ì:]—@Øû7ÅmÂGåBÂ~2Y?Nào²É&uu_= ÐhÜyçûX ÓãÿøÑÞúVëǨ9¶ ×^{íªÕü!áßÄv OyÊS²~ðƒ]`ÑF¨Y`¼_^¢¿‰ýúË4ÿU¯zUvâ‰'–)ª · HèJ„o¬{åæ3Ÿ)Qºz‘ 6Ø 7a?Nàßínw«~5 @€À`žõ¬geïÿû“ê¯ÕúI…Cc¸îºëVlÙ3^Õ_u‹ÅðÝN~ô£l¹ª  @ ka¿þ¼Dø]›Ÿ$ nzÔ£F«õwÝu×®1j/¥ Hè/=Ð5w½ë]£-v~ó›ß4ÚtûÙ7Ê«rø½À>ðd¾¤,ìݺñÆgáÓdá¿a•þá‡>úô€¡ „„þx_þÉ­{ÂE^ûí·_öÉO~²È¡Ž!@€£„~X¼~ÆIÿðæpÑûNY·¼å-£“^XóíÏx[ñÃI`¸?ùéo²S?q÷ìì³Ïn!| üå—_žm³Í6Ÿkˆ'¸â³Ob·õ™«¶ü“·’åUÔÂ'ÇÆIøÉ„|Þï&ÿ}úo¸á†¢D€ÀuëÖ­OôO&üÖ>Ó¯wŸðgÙ#wóIÃØißæŸáû(|ÞyçeyÌcZéÚþûïŸýÍþ·d[Ý÷î­œÏIt]@B¿ëÔþV>uÞ÷³×¿óâ솟ßÔÊùÂIì}Úµ„~s¶j&@ [o8ûîÙ9眓›ŒŸ—xŸNÔ‡•õ^,W ¬¬ +ùÇ«ù¿vþ ¬ÓOüóå6ÌÙ  °d ýø„}øwÙe—ì²Ë.‹/\²Ä&÷Þ0;ê»fû=fû’5(F`8úɵžV¸âªŸf_¸ðGÙ—/ÉFIm½N;í´ì/xA[§Ìy$ôj%@`€\C„@ÆóŸýâ·Ù¦ûôK#­g,0ßY$´úß;ì°ìÔSO/X²ÄÃö°ì‘¾9{ì÷ÉvZ»YÉZ#0 ýáÄZOk¯~õ«ÙÇ>ö±ì£ýèh_¹&_m´Qv饗f;ì°C“§\Ýúƒ ¹ 0CÀ®¡A ¿æ;ý­ž '`¾çõ¡}(;ðÀã •8z÷ÝwÏžüä'gOzÒ“F[@ºo•@Td°«ú;=êÈÁbè8i+.xãŠ_MO.¼ðÂõÉý«¯¾ºÀ'<á Ù§?ýéFêj¥Ó×½¡Žý&0ì4ûz⟸~%þ[l[|Åñî[•øî¹€„~Ϭ{õ 49øä'?9Jî‡}÷þóŸnø\=ò‘,|¼W ˜( U ÉûÚPMõ›@ªæ;©FF»hZÀ|'N8,|Ík^S¨Ðš5kÖ¯Â{âo¶Ùf…Ê9È}«ˆ’c†* ¡?ÔÈëw)¶&ã/Ó ÿýå/9·­»ï¾{öµ¯}­TºCÀDÁH @`¨mÝ׆ê«ßR0ßI)ÚB€@›æ;ŵö¾yÌcæØ`ƒ Ö¯ÂIü{Ýë^ÅOq¤ûV–C' ¡?¸ëp¶'a¿ºñªýðß›nº)·ùGydö†7¼¡J×]ÖDaÐá×yƒhû¾6hl'°dó%Àé Xš€ùN1úÛn»m´oþe—]¶ªÀ=îq+ñ7Úh£b•V8Ê}«ž¢½Ðï}ˆu°NeNB2œÜÿð‡?œ…›íäëÜsÏÍöÚk¯:»;˜ºLj%@`J`™÷5Á @ ]óv½tÌwŠÅâ°ÃËN=õÔõo¼ñÆ+VâÿÁüA±Šj:Ê}«&HÕôR@B¿—aÕ©¦R™„mxÆÉý°-Oxí´ÓNÙ¥—^šÝõ®wmªû½­×D¡·¡Õ1¤r_(š0ßiÞØHSÀ|gq\>ô¡ex`vŸûÜgýJü¿ø‹¿X\°Á#Ü·ÄUuç$ô;BhS ʼn@øÝÜ?Ûo¿}ö¶·½­M’^œËD¡aÔ J¤x_+Ñ E( `¾SÉ!ôRÀ|g~X×­[—{ì±£Dþ¾ûî›ÌpßJ&’ €„~‚AѤtRŸÜpà YØw‹-¶H1Á–™($M"@ Ôïk­ 8 ˜ï $кI€À*ón ÷­nÆM«ÛÐoÇÙYz"`"Г@NuÃD¡ŸqÕ+ ¸¯-6r¾˜ïô%’úA€@¬€ùN¬XÇ»o¥­HS@B?͸hU¢&‰¦b³L**N€@gÜ×:: '-`¾M¦=0ßéf Ý·º7­nG@B¿gg鉀‰@O9Õ …~ÆU¯X,ྶØÈú"`¾Ó—Hê±æ;±biユF´"M ý4ã¢U‰ ˜$˜ŠÍ2Q¨¨8p_ëlè4œ@´€ùN4™ôDÀ|§›tßêfÜ´º ývœ¥'&= äT7LúW½"@`±€ûÚb#Gè‹€ùN_"©Ä ˜ïÄŠ¥q¼ûVqЊ4$ôÓŒ‹V%*`"h`*6ËD¡" âtVÀ}­³¡ÓpÑæ;Ñd  ÐónÒ}«›qÓêv$ôÛqv–ž˜ô$SÝHe¢ðœÃŽÏNÿ93‘ºóÙ%žÕÏ èKp_[ »“XŠ€ùÎRØ”ÌwB‰&¤rß*ÑtE4.Ðé„þšw_ôóÏÌvÝeÇÆáæ`º]8ãÄì ö^j›Ú<ùtÿ?ú…ÙÑGÒf;—‰@c´K­x™…ÞxzvÌ §E÷¿OWÑW 3Uï‡g}øÜ¹÷Ϫõw²Á†º¯5ˆ«j‰ ˜ï$Íé@ÕùˆùNóCÁ|§yã&ΰÌûVÙþÈS–•S.V ³ ýYI°C~Röž·ëPëñUoèµ6f •Iè/Ý)+ ,c¢&îO{öQ•Ú ßö‹¯W®Cš({?¼ø’ïf»=úéÙ–[lž]Õçg6¯lýMõ·‹õzÀíbÔ´™@9órnJX$Pv>b¾³H¶¾7ߩϲ͚–qߪÒ?yÊ*zÊÆ t6¡¿Ëe—^~åªþ.zø*s|Ùz™s¥XFB?ŨhÓ<¶' ûìÿ¢ìÜ/^T[PRødRmQQ¯ÊÜ'ïï‹îéeêïp ñ€[¢*tDÀ|§#ÒÌÎ ”™˜ïšp:Õ IDAT´fóv½ë:[Û÷­ªí–§¬*¨|Œ@'úãw²gutÙ[Q”¹¡Ç-õc%ôSöM ´9Q˜u“·iÞŠû¼ïËIê×) ÄÞ§WµHè7U¸Í;TÌwR‰„vôMÀ|'ýˆšï¤£¼¶yߪ*$OYUPùXN&ô§¿82|Qääj}_; ê=^B¿^Oµ5/ÐÖDaÖÊüØkV^bQâ³yEg P] 6¡_ýŒjð€k Ž€ùÎpb­§i ˜ï´óöÍë8c[÷­:Ú*OY‡¢:b:™Ðßjí^ٺ߰¾ŸáKg§÷¢¶Z5fÔ{¬„~½žjk^ ‰Â¬ýôÊ~ivÞJÿ¾C¤ùh9CŸ<à¶]¸í›;#e ˜ï,KÞy ¬0ßiD˜ï´o^ÇÛ¸oÕÑÎP‡"ûÜÇNÉ$bCßÜñúÍÙª¹¦' Ó¿ ½¨ãMǼUÿuÔÛŒ²Z ,p/_lT÷pëUtÌwÒ– KÀ|§ýx›ï´o^Ç›¾oÕÑÆP‡š^NýMNª¾qؤHÑ„âtbnÒy¸³V¹ÏêëäCuìµ2öú8mnܱ7û²×¯6â‘7QŠcU=SJè·á÷€¼Ÿöì£bسªoÍ;™ûZT(L Óæ;qá3ßYìU&ÁÑÆý×|çÎØµám¾³øoÅåš¼o•kÑêRò”ù’±Ïy×í¢ùÙØX–½.6½Ð*¶Ièç%ßóÓY´ BÙÕ³yI¥ñ„&6¡›Üšr™AÞfB?L†/½üÊØ±9:¾Ì$±Ô‰ ’ø(ձÚœ(L_Êü½ÖÍ™w==GчÜé„þ®Ý1úͽY_~^¤Í±ÞyoHNêªîó¶¶ï7Eî‡m¯ÐoÓ{ú7ïS|EÆS8&vµhÑzÝ׊J9Ž@÷Ìwâch¾3ß,fžÕæý×|'ËÚô6߉¿¶(QL ÉûV±Ì?Jžr¶OÌýa\K“yÊñ9Ê&óÇå›z&+3;“П~àŸ58¦o&EQì ÚY[í}ä!£I`Œƒ5k¿¦YƒdÖJÑØwŠšüCYô’·ÒpV¿š\•XÇÌNûž]¦ehj¢wƒOaLÏúÕ¶Øã'ûèz0ý¦Ý¬kâôƒyس®#17ÝEíÍ{“xÖCcÑXÇúÆ¿ÌûM‘ûaÛ ýX¿Øã'Çÿ¢Owä‘YÛÓM*Å^^%ôcÅO »æ;wÄÎ|gõóêô¨6ßɲ˜çkóÍ]°c¾ÓÝûƲ[ÞÔ}«®~ÉSÞ!Ù…L™7 æm÷³(àmüû¼„ؼ÷³VÄ$ãšîŸÄGÓÂË©¿©‰BÞM`Ñõ¨iY7ÛEŸ†‰}óqÜY׃yŸžZô…]ó>¾WöSW‹Úþ½‰ëW›ñXÆý&æ~Œc¿7¶þ6½çMn‹LóþvÊ|tuÑ5Å}m‘'ÐóÅŸþ5ß1ßÿÅǼ‘;1ß¹óºj¾ÓŸ{L=iê¾UW[å)ï”L=O9+A¿(’·;˲s:cõN$ôóƼ‡Úið"7ã¢I y[íÌJÍKHO×Wt`ä%% ĺ.Z‹ê™•À+Ò¾˜7Wµ£‰—øhBuùu65QÈ[y¼Ì7¨ª|oHˆR™›tÞõ ÈÊõ*«íËÜæ%ô‹$_CùØOd´eÜoRzÀmÛ{^B¿Èý°­7Ý×–Òm ˜ï|¾µùÎ|&óÕ>æ;ù+ôÍw ]r4G ©ûVèò”«SÎSÉåæ‹”s”HèOŠE ú¼$Z‘Um‹¶ÞÉœy ø˜zÙËäÅ£È²Ž VÑ:òxEߨç(¿¢í«rœÄG½tË65QHm,×ñClŸÊ®ºÉKf.ºöGXÑ>æÈ¼öƼ “ç3ëÝv<–q¿‰¹†x4¹b­mïY ý˜ûá´ß¢ï*s•u_+£¦ n ˜ïì]8pæ;ó©ÌwVú˜ï¬Nè›ï¾Ü8°£ }yÊÕK5OYf'—ÉÞ¥úFE'úÓ7ÈE7‡¼wPŠ>OŸkœ@*²ÕÎ8à17ô&÷²_ÖaºÿEW·N¶76æmõUâ£-évÏÓÔnÞ…¿È›‹Mõ~úMË¢ òÉöÄ^_g]Sõ1ïš[ô:›žwí‰5Êó™uÏj;˸ßÄÜ›Nè·í=+¡s ˜ns‘O·,úÛšþw÷µX1Ç讀ùNñØ™ïÌ·2ßI7¡o¾“?vÍwŠ_ÿR:²©ûV}ŒÍYÅÞWŠ<£ÊS‹d•á ©¾Q‘|B?o•f‘éy«í‹”›õ÷SÞyv¶îÇ7¬-³ê‹I`TÙV¢ØÐmÿ¨:V¶‘Ä(#c"PF-ý2MMRKèÇN:fE.æï³ìõ oÂSt¥| ýEo çõi;˸ßÄÜ›Nè·í—Ð}ƒ{:fúéßO´@Êæ;qÑ)z?µšïÿ²á2s«`\4æ;»¯èM{›ïÄ]W'ÐÔ}+®«–§¬*Ønù¢÷v[UýlÉ'ôËÂWÙ{vÖ$`’{Þ1&±èËÃ9cWˆVÕjˆéÿ¬3M¿ƒ–Š„~µ±‘jé¦& ±×nÒ'/A³Rx²m1ïp—M¢Vio ý2FE®_Uú7=NŠÆc÷›ØûAlìŠÖ¿ ï¼ÜØ{™„~“WEuž€ùN\Ì‹Þ_óúE“¨UîO±÷ÌÉÞOß?ÍwâÆÆôÑEç#ãr±±+Z•ñTv~i¾Smì(=_ ©ûVUwyÊnå)Ën=[uœ4]>é„~^ò¡è ͼ‰ŨtÞ>Êã`,Z!Wô†;®/ïÓó_fÂÕô@ª{‚;Éi«úmI·{ž¦& uìÙ]—DÞõ´È§–òγ"!öz8>_•’*×éö–1*rþeÅ£íûMlü‹ØÍ»ßÌš#,Ë»jB¾jù"×÷µ"JŽ!Ðó¸8šïÌ÷*rÏ^Öý×|çÎØ•™Ë†Ò1ã¿ê|¥jù"Ùæ;E”Ò;¦©ûV•žÊSæë¥œ§Œ}&­2>Ú,›tB¿ÈJùX¬¢ƒlÞJÆE7Å2ƒ%vÒúûÑýX«²Ç×±â£È±lûª”3¨¢—nÙ¦& yá˜7%ë«ó*¦®2×ÃÐïúe¯±E®_1†‹ÆAl]mÞobã_ÄnÙ ý直UË/;áßÝ׊(9†@?Ìwââs½½ß[b¾S<&1ñµšïÜa»(w1+1ÞUç+UËEæ;E”Ò;¦©ûV•žÊSÎ×+û ]%&‹Ê–½G/ªwÙÿžtBÞ*ù²pE¿T1ÔŸ÷‡Z$Wv°Ù!¯ß‹>1PÖªl9 ý²rÊ-K ©‰BÕoS¯Ó#6Q:ïÜ1ü²×þ?à.+㸶u¿‰¬KÑúcë­küW}@­Z¾È5Än%Ç臀ùN\Íwæ{¹·9¦hTbâa¾s‡@•ÄZŒwÕùJÕòEÆùN¥ôŽiê¾U¥§ò”ÅôRÊS}f,Ö³tŽJ6¡Ÿ·²µ.¶"ïR·½Bºo±+ Rþc)ú©ˆIƒ:'u›P‰@šéÔÕÔD!/)½¬¿Õ˜Iù¢ÈÄÔUöæÙ÷„~ŒañÈ««ÉûMlüc¯ýEë_–wÕÔªå÷µ"BŽ!Ðó¸XÆÜ;ŠÞ¦[`¾S<&1ñ0ß¹S Hî#Ï+Æ»ê|¥jù"£Ès|¥ôŽiê¾U¶§ò”e—^~ea¾eå>¦Xö]¸£K:0Ù„~l‚!Ưȗµ¹‡þ¢¶çíÃW¦È§«Ž·B¿Eu´)ÐäDaú Xª¬”™e2¾^Ϋ;fR¾È>&éZöæéwQîü÷˜x,ªµîûMlücûR´þeÿª¨UË/Šwøw¸E”C æ;qqŒ¹'½M·À|§xLbⱨVóEB«÷П÷œQu¾RµüâÞ˜ï1Jñ˜&ï[eú+Oy§ZÝ×Ñ2ñ(Z¦ì=ºhýË:.É„~ÞÄ&f«œI̼º}9n‘=±æ½)Ðô`™NŽû»¨_m ²:ú?}qH¥om¢vÏÓäD!ïzRæS+óD¦ÿæò&ÜU§Ïó÷YözP¥½Uø¦Û[fUÓtÌó®_UúW%±YUï7±ñ]Ñú—å]õµjù"ñv_+¢äý0߉‹£ùÎ|/ó;}ŠÎGÆ%ÌwVŽ-ó¸kÓŽnò¾ë(O9_¬êsclšP]~MNf]SÞó¶cjéxÞÇþf}´múo³ìlúïsÞGéÊ^«$`c’&Q¶½“u½~-#e]ÙûM¬glìbê_†wÕÔªå‹ÄÛ}­ˆ’côCÀ|'.Žæ;ó½ÌwÒK臙ïä[ó¸ë_*G7yßŠí£æ;/Z±ø16áe¾SüÚ4´#›¾oÅxÊSÓJ1O9ýfLlî5ô|:þ±×¹bzqG%—ÐÏ[ÍZvé$ÅôM6üÛtB-okŒÉÄXÞÀÌk[Ñz^}1ŸD¨sOà¸a3ÿè<ë˜~åÅ¡®ägÕ~J|TL³|Ó…¼wócÅyry×y7§¼vľY»2¡èõpº©$ôóîóFñôõ+•x,ë~ÿ&úËÿUP«–/rÅu_+¢äý0ßÙ»p ÍwæS™ï¤›Ð7ßÉ»æ;…/IØô}«hgå)ÏÌvÝeÇB\)æ)óÚ“i*þ…@ç”\B¿©ý¦óêLÆÝ£H²¹h#/aó.O̾ŽUJLù¼„~ÑwÀòâcÓÎ2Çš”QK¿L…¼¿‹˜›Hžb^óÞÍ»æýÛ çÏûû\´Ê¿èõ0å„~ÑkPÑûȸ¯mÆcY÷›Øø7™ÐoÓ{㪠ùªå‹\}Ý׊(9†@?Ìw>_(æ;ó™ÌwVû˜ïX¡_èââ h6î[E%Oùˆìs;¥U–jž²Ê® y»"¤°è8¹„~ÕíZf°E³˜}žµ1憞70ЬÜHú˫頼$c¨zQâ/¯O¡\Ìêþšº0³‰¦…—S…¼Ò¡·e“úyg‹þÆÂùf}é÷¢Ò¬¿ÏEí¹NF?¥ú¡]‹’ú³|¹¶eÜob㻢#¶þ6½óþÞ£é+ „þrî ÎJ ¯æ;«?¡=kóùIóü«Cì|Ä|g¥£ùN_ï:ÕûÕÆ}«H+å‹Ô‘wŒy¬RѹA•ñ_õµjù"WO÷µ"JŽ!Ðó,kêþZö~d¾sz6½nü×Våþk¾“ÍjjüW¯T-_ä m¾SD)½c–yß MæÍŠ>‹ÈS~¾Ñ9ïy¸ŽÜk“Of…~“T7ºLê’L«žeO‚F‘-@R¿¡¤Õr­™~ ¯û“r­RŠ@sîkÍÙª™@jæ;©Edyí1ßYž½3/GÀ|g9îUÏšÂ}«j”'Д€„~S²êí¥€‰@/Ú™(ô3®ezå·Œš2]p_ërô´@œ€ùNœWŸ6ßéstõ-OÀ|§›ãÂ}«›qÓêv$ôÛqv–ž˜ô$SÝ0Qèg\ËôÊn5eº,à¾Öåèi;8ó8¯>m¾Óçèꛄ~Æ€ûVb©'õ Hè×oªÆ H|ô3¸& ýŒk™^yÀ-£¦L—Ü׺=m''`¾çÕç£Íwú]}“ÐïÏpßêO,õ¤~ ýúMÕØc‰~×D¡Ÿq-Ó+¸eԔ鲀ûZ—£§íâÌwâ¼ú|´ùNŸ£«oúýî[ý‰¥žÔ/ ¡_¿©{, ñÑÏàš(ô3®ezå·Œš2]p_ërô´@œ€ùNœWŸ6ßéstõMB¿?cÀ}«?±Ô“ú$ôë7Uc$>ú\…~ƵL¯<à–QS¦Ëîk]Žž¶ˆ0߉óêóÑæ;}Ž®¾Iè÷g ¸oõ'–zR¿€„~ý¦jì±€ÄG?ƒk¢Ðϸ–é•Ü2jÊtYÀ}­ËÑÓvqæ;q^}>Ú|§ÏÑÕ7 ýþŒ÷­þÄROêЯßT=øègpMú×2½ò€[FM™. ¸¯u9zÚN NÀ|'ΫÏG›ïô9ºú&¡ßŸ1à¾ÕŸXêIýúõ›ª±Çý ®‰B?ãZ¦Wp˨)Óe÷µ.GOÛ Ä ˜ïÄyõùhó>GWß$ôû3Ü·úK=©_@B¿~S5öX@⣟Á5Qèg\õŠÅîk‹A /æ;}‰¤~ +`¾+–Æñî[iÄA+ÒÐO3.Z•¨€‰@¢©Ø,…ЀРÐY÷µÎ†Nà D ˜ïD“)@€@OÌwºH÷­nÆM«ÛÐoÇÙYz"`"Г@NuÃD¡ŸqÕ+ ¸¯-6r¾˜ïô%’úA€@¬€ùN¬XÇ»o¥­HS@B?͸hU¢&‰¦b³L**N€@gÜ×:: '-`¾M¦=0ßéf Ý·º7­nG@B¿gg鉀‰@O9Õ …~ÆU¯X,ྶØÈú"`¾Ó—Hê±æ;±biユF´"M ý4ã¢U‰ ˜$˜ŠÍ2Q¨¨8p_ëlè4œ@´€ùN4™ôDÀ|§›tßêfÜ´º ývœ¥'&= äT7LúW½"@`±€ûÚb#Gè‹€ùN_"©Ä ˜ïÄŠ¥q¼ûVqЊ4$ôÓŒ‹V%*`"h`*6ËD¡" âtVÀ}­³¡ÓpÑæ;Ñd  ÐónÒ}«›qÓêv$ôÛqv–ž˜ô$SÝ0Qèg\õŠÅîk‹A /æ;}‰¤~ +`¾+–Æñî[iÄA+ÒÐO3.Z•¨€‰@¢©Ø,…ЀРÐY÷µÎ†Nà D ˜ïD“)@€@OÌwºH÷­nÆM«ÛÐoÇÙYz"`"Г@NuÃD¡ŸqÕ+ ¸¯-6r¾˜ïô%’úA€@¬€ùN¬XÇ»o¥­HS@B?͸hU¢&‰¦b³L**N€@gÜ×:: '-`¾M¦=0ßéf Ý·º7­nG`aB¿f8 n ì´ïÙÝl¸V¯˜ž(à!@€ÀPÜ׆yý‚€ù΢¬0ß)¢´ücÜ·–-èŽÀšoæÀÛºÓ\-%°\åú×uv…º$ÕC€@×Ü׺Aí'0[À|Çè @€Àæ;Ý î[݈“V¦! ¡ŸF´¢#& Ô‚fš(ô#Ž]íÅ?áƒ+š~ûë]íŠv÷@À}­AÔ3Ìw e ˜ï,Sß¹§Ìwº1&Ü·º'­LC@B?8hEGL:( ý~ª§½ð€ÛÓÀv´[îk œf( 1RÉ! ˜ï4F«âæ;%ЖPÄ}k èNÙY5·Ýþêlë5œtL`Íš5+Zì6ܱj. °PÀ|g!‘ @€@i ýÒt  @€x¸ñfJ @€Ý0ßéV¼´–º% ¡ß­xi-t\ÀnǨù @€ÀBó…D @€¥$ôKÓ)H€â<àÆ›)A€tKÀ|§[ñÒZè–€„~·â¥µ Ðq¸ æ @€ Ìw9€”Ð/M§ ˆð€o¦ Ð-ónÅKk  @ [úÝŠ—Ö @€@Ç<àv<€šO€,0ßYHä PZ@B¿4‚ @ ^Àn¼™ @€@·Ìwº/­%@€n Hèw+^ZK€ð€Ûñj> °PÀ|g!‘ @€@i ýÒt  @€x¸ñfJ @€Ý0ßéV¼´–º% ¡ß­xi-t\ÀnǨù @€ÀBó…D @€¥$ôKÓ)H€â<àÆ›)A€t«NJ1 IDATKÀ|§[ñÒZè–€„~·â¥µ Ðq¸ æ @€ Ìw9€”Ð/M§ ˆð€o¦ Ð-ónÅKk  @ [úÝŠ—Ö @€@Ç<àv<€šO€,0ßYHä PZ@B¿4‚ @ ^Àn¼™ @€@·Ìwº/­%@€n Hèw+^ZK€ð€Ûñj> °PÀ|g!‘ @€@i ýÒt  @€x¸ñfJ @€Ý0ßéV¼´–º% ¡ß­xi-t\ÀnǨù @€ÀBó…D @€¥$ôKÓ)H€â<àÆ›)A€tKÀ|§[ñÒZè–€„~·â¥µ Ðq¸ æ @€ Ìw9€”Ð/M§ ˆð€o¦ Ð-ónÅKk  @ [úÝŠ—Ö @€@Ç<àv<€šO€,0ßYHä PZ@B¿4‚ @ ^Àn¼™ @€@·Ìwº/­%@€n Hèw+^ZK€ð€Ûñj> °PÀ|g!‘ @€@i ýÒt  @€x¸ñfJ @€Ý0ßéV¼´–º% ¡ß­xi-t\ÀnǨù @€ÀBó…D @€¥$ôKÓ)H€â<àÆ›)A€tKÀ|§[ñÒZè–€„~·â¥µ Ðq¸ æ @€ Ìw9€”Ð/M§ ˆð€o¦ Ð-ónÅKk  @ [úÝŠ—Ö @€@Ç<àv<€šO€,0ßYHä PZ@B¿4‚ @ ^Àn¼™ @€@·Ìwº/­%@€n Hèw+^ZK€ð€Ûñj> °PÀ|g!‘ @€@i ýÒt  @€x¸ñfJ @€Ý0ßéV¼´–º% ¡ß­xi-t\ÀnǨù @€ÀBó…D @€¥$ôKÓ)H€â<àÆ›)A€tKÀ|§[ñÒZè–€„~·â¥µ Ðq¸ æ @€ Ìw9€”Ð/M§ ˆð€o¦ Ð-ónÅKk  @ [úÝŠ—Ö @€@Ç<àv<€šO€,0ßYHä PZ@B¿4‚ @ ^Àn¼™ @€@·Ìwº/­%@€n Hèw+^ZK€K¸ù曳 6Ø R ê~À­£M•:¤0 Ð+:ææ;½:C€‰ Hè'Í!@€t>ÿùÏgßøÆ7²#Ž8¢tb¿®Üð°}ÒI'e»í¶[¶×^{¥‹¦e @€@§Ìw:.%@€ Hè0èºL€åöÙgŸQRÿðÃ/•دšÐ‰ü“O>y}2ÿsŸû\ùÎ(I€È0ß1, @€@ºúéÆFË @ A°jmï½÷µl‹-¶ˆNì—MèO&òüãÎî¹çZŸàÑ$ Ðuó®GPû  @ Ïú}Ž®¾ @€@#ûî»o6¹2>$öÃ6GWß @ qØUkUdu~=e  @€²æ;eå”#@€õ Hè×oªF˜@«ô­VØ Ò] ˜@«ôÍw ºæ @€@’úI†E£ @ Km¬Z³:¿K#B[  @€@ÿÌwúS="@€n Hèw3nZM€‰ 4¹JßjµÄ‚­9 @` M®Ò7ßè ÒmˆÐ&S€¬hrÕšÕùF ‚€ùN QꈄþÐG€þ @€@mM¬Ò·Z­¶ð¨ˆ¨A ‰Uúæ;5F 0 ýÁ„ZG  @ i&V­YßtÔÔO€Ä˜ïÄh9–Ô/ ¡_¿©  @`Àu®Ò·ZmÀI×  @€@Âu®Ò7ßI8КF€I Hè'"@€® Ô¹jÍêü®Ží&@€ý0ßéw|õŽÒÐO;>ZG€¨c•¾Õj ¼& @€ Ô±Jß|g@FW  @ 6 ýÚ(UD€î¨cÕšÕùF ²€ùNÊÑÑ6è³€„~Ÿ£«o °4*«ô­V[ZØœ˜ˆ¨²Jß|'Ú¡ @`B@Bßp @€ TYµfu~Q% P»€ùNí¤*$@€ $ô9€”(³JßjµrÖJ @€Ë(³Jß|g9±rV臀„~?⨠ @™UkVç'HM"@€f ˜ï @ ] ýv½&³Jßjµ Ý%@€=ˆY¥o¾Ó“ ë,M@BiôNL€CˆYµfuþF„> @€þ ˜ïô/¦zD€é Hè§-#@€žY¥oµZO‚­ @` EVé›ï tpè6Ô* ¡_+§Ê @€Àj"«Ö¬Î7r @€. ˜ït9zÚN€]ÐïR´´•:+0o•¾Õj «† @€óVé›ï* @  ýzÕB€æ Ì[µfu¾ÁC€ôAÀ|§QÔH]@B?õiôF o•¾Õj½ ¯Ž @€· ä­Ò7ß14 @€@}úõYª‰ÌÈ[µfu¾AC€ôIÀ|§OÑÔHQ@B?ŨhôV`r•¾Õj½ ³Ž @€A L®Ò7ßôPÐyh@@B¿TU @€Y“«Ö¬Î7N @€> ˜ïô1ªúD€©Hè§ í @€@CW|ö© Õ¬Ú²Ï;æË£¢ï:þÏÊV¡\C;í{vC5«–š0ßiR·\Ýæ;åÜÚ(e¾Ó†²s @ 9 ýælÕL€$<à&†¸ðâëGÿ]·J¯qo‘ÜÝ'@ ³æ;é…Î|'½˜Œ[d¾“nl´ŒE$ô‹(9†ð€Ûáàizëp['wBÔ"`¾S £J"`¾3@ë&½ÐïmhuŒwxÀ5ð€[ÜÊ‘HIÀ|'¥hhKêæ;©GHû 0_@Bß!@€@ϦpwzÔ‘=ï±î(.pÅo\q°ÜâvŽ$@€@Jæ;)EC[R0ßI-"ÚC€júÕü”&@€@òp“‘.QÀîñš5 ˜ïÔˆ©ªÞ ˜ïô.¤:D€ÀÀ$ô>tŸþ xÀíŒõ°¼€ÜòvJ @ %󔢡-© ˜ï¤í!@€@5 ýj~J @ y¸É‡H—(àw‰øNM€ÌwjÄTUïÌwzR"@`àúºO€@ÿ<àö?ÆzX^Àny;%  ’€ùNJÑЖÔÌwR‹ˆö @ š€„~5?¥  ¼€ÜäC¤Kð€»D|§&@€@æ;5bªªwæ;½ © 0p ýÝ'@ ÿpûc=,/à·¼’HIÀ|'¥hhKjæ;©ED{ PM@B¿šŸÒH^Ànò!ÒÀ% xÀ]"¾S @ Fó1UÕ;óÞ…T‡¸€„þÀ€î иý±–ð€[ÞNI¤$`¾“R4´%5óÔ"¢=¨& ¡_ÍOi$/à7ùià<à.ß©  P£€ùN˜ªê€ùNïBªC \@Bà@÷ è¿€ÜþÇXË xÀ-o§$R0ßI)Ú’š€ùNjÑTЯæ§4’ð€›|ˆ4p‰p—ˆïÔ¨QÀ|§FLUõNÀ|§w!Õ!. ¡?ð ûô_Ànÿc¬‡å<à–·S’) ˜ï¤ mIMÀ|'µˆhª HèWóSšÉ xÀM>D¸D¸KÄwjÔ(`¾S#¦ªz'`¾Ó»êÐøÐ}ú/à·ÿ1ÖÃòpËÛ)I€”ÌwRІ¶¤&`¾“ZD´‡Õ$ô«ù)M€ä<à&" \¢€Ü%â;5j0ß©SU½0ßé]Huˆ Hè|è>ýð€Ûÿëay¸åí”$@€@Jæ;)EC[R0ßI-"ÚC€júÕü”&@€@òp“‘.QÀîñš5 ˜ïÔˆ©ªÞ ˜ïô.¤:D€ÀÀ$ô>tŸþ xÀíŒõ°¼€ÜòvJ @ %󔢡-© ˜ï¤í!@€@5 ýj~J @ y¸YöœÃŽÏNÿ9+bõóÏÌvÝeÇFâ·fãÝWÔû3NÌ:`ïFÎUG¥g}øÜ¤ÛWGgÕá·I]u @ =óóE£Í|çN¡ö={—'@€„$ôަ @ ¸pg£‹/ùn¶Û£Ÿžm¹ÅæÙõW}¾ŽáÖ¹:$ô;2 &@€@®€ùŽùŽùÎ싃ùŽ 'ú% ¡ß¯xê V xÀõ€›÷g±Ëe—^~åèŸ$ôï²bÍE”Ý0ß1ß1ß‘ÐïæÕK«  / ¡o¦:%à×îô€=á§gÇœpÚú_KèKèwꢦ±È0ß1ß1ß‘Ðwq$@€ÀP$ô‡iý$@`°pÛÀM}°Ièß!AO}´jŠ ˜ï˜ïHèKè»Z8ŠÝÐï~ õ€s<àzÀõ€ë×e’}0ß1ß1ß1ßéûuNÿ 0Ð7 Ðs¸p=àzÀíùeN÷ ™ï˜ï˜ï˜ï¸ @`(úC‰´~ 0X¸p=àzÀìPÇ Œ€ùŽùŽùŽùÎ`.x:J€Àà$ô? Ðw¸Ý{ÀÞã~zŒ~àŒ³ƒØ»ôе‡þtöÐ/=Œ$@€@Ræ;æ;úúI]”4† Hè7ˆ«j¤ à·ýÜ5ï¾"ôEðûìÿ¢ìÜ/^TxØèÙ+Óã°CöË>tÙ¹AS¦øe¹Þ¡Ķr†~}3g @ EõŽzGC_C?ŵILèB@C¿ U×$@€@B6¸énpói²÷þ;²?ËÊŒ9æè_ËÞvÞ)gQÙÓù!çåmúdBÓ:Z(úÑ(]ˆƒ ¨wÔ;Å ¨Þ¹GD½3èòäæˆ. ¡Ô  –€ nÚܲ×î,fк×ï”}Ÿòºu ý:Ö o n;?g @ õŽz'¤¡¯Þ¹[É+SY¹ÄA€fúÍÜœE€ÑØà–opc$ðÔ“_˜|Ò1;]j—ûí³ò¿]ôG¯Ëv<ã°µ·\× ¯ãºXÊ®QöªŸâqU1׉-åc5ôSÎŽØ . ÞQïg‹zçõNøZâHŒA@C Y#ZØà¦¿ÁÍÓ»û‡f·ßñF™}ÕÎòÅ«>D¨óA£ 9É7‘Dƒ-Ô;ê²)¤Þ¹[E½Órq:ÐÐO,!Â!@€@lÜqlpó¼¿ä6d.¬{-Oȹſ$X>§É‡!÷LíÜÔ2"4Pï4«#B´cýEââ^êõ¸Ç¨wâzº†ÐÐ:îO€ŽlpdzÁ]L…²/q[ž&ü¹‡gŸ»þâ(3çɇ—]~å ;]+æ=¢ÚÑElp;‚uYô, ÞQïlšrê3Wx¼C¿çÊí Y@C?2¨Ë @ 5ÜÔ2"ž”4ôSʆX Ð\@½ÓÜΙÓPïL?ÇFH€À¼4ôç•o£%@`†6¸3Lº! ØàS9I ¨w’NàPï œ·'@€@d ýÈ .G€ÔlpSˈxR°ÁM)b!@€@sõNs;gN_@½3ý!óÐПW¾– ØàÎ0é†,`ƒLå@$- ÞI:=‚X@½3pÜž‘4ô#ƒºR°ÁM-#âIIÀ7¥lˆ…ÍÔ;Ííœ9}õÎôsl„ÌK@C^ù6Zf(`ƒ;är°€ n0•  ´€z'éôn`õÎÀ p{DÐÐ êrHMÀ7µŒˆ'%Ü”²!4Pï4·sæôÔ;Óϱ 0/ ýyåÛh ˜¡€ î “nÈÁ6¸ÁT$@€@Òê¤Ó#¸Ô;'Àí  Y@C?2¨Ë @ 5ÜÔ2"ž”lpSʆX Ð\@½ÓÜΙÓPïL?ÇFH€À¼4ôç•o£%@`†6¸3Lº! ØàS9I ¨w’NàPï œ·'@€@d ýÈ .G€ÔlpSˈxR°ÁM)b!@€@sõNs;gN_@½3ý!óÐПW¾– ØàÎ0é†,`ƒLå@$- ÞI:=‚X@½3pÜž‘4ô#ƒºR°ÁM-#âIIÀ7¥lˆ…ÍÔ;Ííœ9}õÎôsl„ÌK@C^ù6Zf(`ƒ;är°€ n0•  ´€z'éôn`õÎÀ p{DÐÐ êrHMÀ7µŒˆ'%Ü”²!4Pï4·sæôÔ;Óϱ 0/ ýyåÛh ˜¡€ î “nÈÁ6¸ÁT$@€@Òê¤Ó#¸Ô;'Àí  Y@C?2¨Ë @ 5ÜÔ2"ž”lpSʆX Ð\@½ÓÜΙÓPïL?ÇFH€À¼4ôç•o£%@`†6¸3Lº! ØàS9I ¨w’NàPï œ·'@€@d ýÈ .G€ÔlpSˈxR°ÁM)b!@€@sõNs;gN_@½3ý!óÐПW¾– ØàÎ0é†,`ƒLå@$- ÞI:=‚X@½3pÜž‘4ô#ƒºR°ÁM-#âIIÀ7¥lˆ…ÍÔ;Ííœ9}õÎôsl„ÌK@C^ù6Zf(`ƒ;är°€ n0•  ´€z'éôn`õÎÀ p{DÐÐ êrHM ¸ÁM-¾¹Åóï~/»÷½vÙöòÿ{n©Žw¯§\’jhâ"@€ ê´¦‡z'­|£Qï¤Ñ @ J@C¿JÈÏ  0rÜtø÷wÜ™óêeÇ=÷ç¶‚:÷ŸÏÎ?íñÙOÜÿ¾é9óHlpg> ŸÑ ¨wÒIz'\¬‹D½“~ŽDH€Múæ&.`ƒ›N‚O:óãÙŸ~ôË+ýÊ’yâãÒ ræ‘ØàÎ|>£P邏:õN:¹ÐÐO?"$@€@ ý&jÎ!@€ÀˆlpÓHÖ×~5;þ´ëKƒ9ç5ûg‡ð 4yàÑ’= IDATú3Ÿ†O€ÀhÔ;i¤N½“Fª¢PïT ù9ÒÐÐO;?¢#@€ |ç;ßÉ~æg~&ûïÿý¿—ŽæáxvÓM7e÷¾÷½'0ZC @€æ( Þ™cÖ™†ÐÐBÝ=  @`V'tRvÖYgms~Ìgœ1+ƒ%@€¦# Þ™N.„ÒÐÐO;?¢#@€‘ |üãÏöÛo¿•Qœxâ‰[ÿÿb“ÿ†nÈöÝwß‘Xø @€ÀÜÔ;s˸ñ @€ÀúCê»7L^àñ³}ìcÛãü·ÿößf_øÂ¶þÿÅ×ðäÇ~ä#™¼‰ @€ÓPïL+ŸFC€i hè§Ñ @€ÀˆÞò–·d/yÉKVFðîw¿;{Æ3ž±õ¿½ç=ïÉžùÌg®üüÍo~söâ¿xÄ£: 0'õΜ²m¬ ‚€†~ YLNà+_ùÊÖøwÞyçöØŽ:ê¨ì /\볟ýì좋.Úþßî{ßûn=Áÿà?xr&D€LK@½3­| ŒC@Cy%ŒLà9ÏyÎJóþ‡~臶õ?õS?µ2’/ùË[ÿú§Úþßó&ÿ;ßùΑX¸ @€ÀÜÔ;s˸ñ @€@ ú)dA  0)÷¾÷½Û¯ÕY ìœsÎÙéõ;‹Ÿå¯Ù9þøãW ò×ñ<ýéOŸ”‹Á @€ÓPïL'—FB€ãÐÐW¾DK€‰ |÷»ßÝzâþ–[nÙŽôñ|öÑ~tcäÅ/“{Ä#‘ÝtÓMÙ½îu¯ÄG,< @`nê¹eÜx  @ % ý”²!½À+^ñŠìÌ3Ï\Çõ×_Ÿí·ß~Çvà 7dûï¿ÿÊ1ùµN?ýôÑ› 0-õδòi4 0. ýqåK´ °ÀÇ?þñ÷'žxâN þuC8餓²³Î:kåÇy£ß}÷MxÔB#@€æ$ Þ™S¶•RÐÐO1+b"@€Q <á OXyµÎ{ì‘Ý|óÍÙ½ï}ï ñ|ç;ßÉöÜsÏìÖ[oÝ>>¿æUW]t¾ƒ @€] ¨wºv} °Y@Cß !@€Þò–·ìô¥·—^ziöÌg>³ÖÕßýîwgGqÄÊ9ùµ;î¸Z×q0 @ ¶€z'¶¨ë @€úúõÍœA€V¾ò•¯l}îwÞ¹ý¿ïر#»è¢‹IuÔQÙÅ_¼}în»í–}á _Èô 5ºž“ @€mÔ;mO€âhèÇqt˜±Àsžóœì /ܸÏ}î³Õ€ÈCÒHåïþîï¶> øö·¿½}~~w¼ã®ç$ @€@[õN[Aç @€8úq]…f*ðÞ÷¾7{Æ3ž±2ú7½éMÙñÇßJäœsÎÉ^úÒ—®\#¿×Óžö´V×u2 @ ®€z§®˜ã  @€@wúÝÙº2L\à{ßûÞÖ—ØÞrË-Û#=øàƒ³}ìcQFþøÇ?>»úê«·¯õˆGñ‰Odûî»ïÊh^þò—gguVÔžxâ‰ÙÞð†•k~üãÏ÷¸ÇE½‹ @€Šês‚¤' ¡Ÿ^NDD€#x⟘}ä#ÙŽt=öØzÎüÀDþ_þå_¶¾ ÷Ö[oݾîžð„쪫®Šz#@€Ô;æHO@C?½œˆˆ8÷Üs³¿øÅ+Q^zé¥Ù3ŸùÌN"÷»ßqÄ+×~Ë[Þ’wÜqÜÏE  @€ês€¤) ¡Ÿf^DE€‰ |õ«_Ýzbþ[ßúÖv„;vìÈ.ºè¢N#>ꨣ²‹/¾xû»í¶ÛÖ_<èAêô¾.N€ÌO@½3¿œ1ŒG@C<¹)$ ðÜç>7{ç;ß¹É}îsŸ­ÆúCòN£û»¿û»­¾ýíooß'å‚ .èô¾.N€ÌO@½3¿œ1ŒG@C<¹) ,ð¾÷½/{úÓŸ¾Å›Þô¦ìøãï%²sÎ9'{éK_ºr¯<¦Ã?¼—û»  0}õÎôsl„ 0n ýqçOô ГÀ÷¾÷½­'äÿæoþfûŽtPvõÕW÷ÁÝ·9øàƒ³k®¹fûž{î¹gvÓM7e»ì²K¯q¸ 0=õÎôrjD 0= ýéåÔˆ @ W¾ò•Ùgœ±råk¯½6;à€:¸ÛúK^wÝuÙ¸r@Ûë_ÿú^ãp3 @`zêéåÔˆ @`zúÓË© @€@dO|âÙ¾ûî»rÕ—½ìeÙÞð†Èw »ÜË_þòìo|ãÊÁyŒ}ìcÃ.à( @€@A@½cJ @€qhè#O¢$@€žøÄ'fùÈG¶#xØÃ¶õE¸»îºë QÝu×][¯ÿù⿸}ÿ<Æ+¯¼rxÜ”¿€zgü94˜‡€†þ<òl” ÐPàÜsÏÍ^ü⯜}É%—dGqDÃ+Æ9íÒK/ÍŽ<òÈ•‹å±{ì±qnà* @€ÀlÔ;³Iµ @€À4ô'DC @€n¾öµ¯eù—Î~ë[ßÚ¾Á³žõ¬ìâ‹/îæ†5¯ºcÇŽì]ïz×öY?üÃ?¼õ—?ù“?YóJ'@€æ* Þ™kæ›Æ* ¡?Ö̉›:8ú裳w¼ãÛ÷ùWÿê_m5ÌúЇv~ï|éK_ÚzõÎÿù?ÿgûðç>÷¹Ù\rºc @€™zÇ$ @€ãÐÐW¾DK€= \vÙeÙÓžö´•»}öÙÙK_úÒž"»Í›Þô¦ì„NX9ø}ï{_vøá‡‡]ÀQ @€ÀlÔ;³M½ @€Àˆ4ôGœ<¡ @€@wù“ï7ß|óö :è ìꫯîî†-®|ðÁg×\sÍöò×åIà @`“€zÇü @€ãÐÐ_ÎDL€ ¼êU¯ÊN?ýô•»\{íµÙÐñ›]þºë®Ë<ðÀ•“_ùÊWf¯ýë›]ÐY @€ÀäÔ;“O± @€ÀD4ô'šXÃ"@€fŸüä'³Ç=îq+'¿ìe/ËÞð†74»`Og½üå/ÏÞøÆ7®ÜíŸøDöØÇ>¶§܆‹€zg,™'ØY@C߬ @€K‡rHvÕUWmÿ/{ØÃ¶^_³ë®»&ít×]wm}Aî¿øÅí8ŸøÄ'fW^yeÒq Žè_@½Ó¿¹; @€Xú±$]‡zøÆ7¾‘ýøÿx´ûžwÞyÙqÇ·r½K.¹$;âˆ#¢Ý£Ë ]zé¥Ù‘G¹r‹|L/zÑ‹¢Ý6¶y´À\ˆLT ö{Õ;Õ%¶yõA€Â4ôíI€‰ üõ_ÿuvë­·fO}êS[Göµ¯}më ÷ÿý¿ÿ÷öµòæø»Þõ®Ö×îóÏzÖ³²üCˆÅ¿þáÞúrß>ð­ÃøÀ>í±ÇÙÏþì϶¾–  @€aêÔ;asÇQ 0M ýiæÕ¨ 0 üÕ8Ç|vã7¶ïÑG½ãïØ¾Îþàn5ÂúЇ¶¾vŸøÒ—¾”í¹çžÙ?ÿó?oß6ÛÛßþöÖa<ò‘ÌÎ9çœ,• @€@?êÕ;ýÌ=w!@€44ôÓÌ‹¨ @ @ࢋ.Êžýìggÿñ?þÇìw~çwÎ(?ä²Ë.Ëžö´§­ü0ÿ‚ÙN8¡ñ5‡<ñì³ÏÎò/ò]þ—ñ×ýׇõ{¿÷{Ùøÿ!»ð ³£Ž:ªñuœH€ÔPï”{©wêÍ#G @€Àt4ô§“K#!@€Àì–7rÿí¿ý·Æ¯‚É_µ“?¿øwàf×\sͨ=:è ìÚk¯ÝC>Æ›nº©Ñ˜ò?õÿwÿîßm;æ: ÞI @``õÎú¨wžœnO€ƒhèÂî¦ CओNÊÎ:묭KýÚ¯ýZö'ò'µ/ûªW½*;ýôÓWÎË›ùySÌÿòf~¾É]þ—õ´ÓN«=¬üÉþ÷¿ÿý[çxâ‰Ù™gžYûN @€š ¨wÖ»©wšÍ)g @€À¸4ôÇ?Ñ @`ÖÅ÷ÞçïÀÎsžlò©O}*{ìc»r|þšü)ô)üË_»“?Õ·üüdö˜Ç<&xxï|ç;³ç>÷¹ÛÇçÿï .¸ ø| @€íÔ;›ýÔ;íæ—³  @`|úãË™ˆ  @àû‡zhöáxÛãþ÷¿öå/9»Ï}îdôK¿ôKÙ•W^¹}ìOÿôOg_øÂ²ü q§ð/ÿbÜüU;û·»=œC9$û‹¿ø‹ á}ûÛßÎ~ê§~*»ãŽ;¶Ò“ž”]qÅAç;ˆh/ ÞÙl¨Þi?Ç\Æ% ¡?®|‰––ñˆGd·ÜrËŠIèö¿ÿû¿Ÿ{ì±+ç¾ë]ïÊŽ<òÈI_rÉ%Ù³žõ¬•1wÞyÙ‹^ô¢Êq–=ñöð‡?<û›¿ù›Ês@€ÄPïT;ªwªA€ÓÐПN.„³xÀ°òôøàúë¯ÏöÛo¿µ_ÿú×·ž\ÿÇüÇícòF~ÞП⿼¡Ÿotÿ~äG~dë/øÀ®î 7Üí¿ÿþ;ý<ÿ+ˆÛo¿}ŠLÆD€’Pï„¥E½æä(¿€†þøsh˜¥À]wݵöÕ8Ox²«®ºj­Ëóž÷¼•÷Àç¯ØÉÜù+w¦ø/åNþFþ'é‹ùûxßþö·¯nnøÑ~´ôçùuvÝu×)RHJ@½žõN¸•#  @`ÜúãΟè  0[¯|å+[ïw_÷/¥Îoÿöoïôã?ù“?É?üð•ÿ=ÿÜüU=Sþ—9nþ å—]vYöë¿þë; »ìuDËåßSðà?xÊ\ÆF€’PïÔKƒz§ž—£  @`œúãÌ›¨  0{O}êSÙcûصÿú_ÿëìæ›oÎvß}÷•cöÚk¯­§ñÿ<ðÀìšk®™…çA”]{íµÛc͟ڿ馛VÆ~Ûm·e{î¹gö¿þ×ÿZkòÉO~2{Ìc3 3ƒ$@€C ¨wêë«wê›9ƒÆ% ¡?®|‰–¾/ð| ûÕ_ýÕÇsLö¶·½mû˜W¿úÕÙë_ÿú•sòf~Þԟÿ¼™Ÿor—ÿ½êU¯ÊN;í´íÿé/xAvþùçoäø/ÿå¿dO}êSç@fŒ @`PõN}~õN}3g @€À¸4ôÇ•/Ñ @€À÷þðÿ0û­ßú­J~ðƒÙ“Ÿüä¬ì ·ü5;ùëvæô/íNþçèËÿr›}öÙ'ûЇ>”=å)O©äøƒ?øƒì7ó7+s ÐN@½ÓÌO½ÓÌÍY 0 ýqäI” P8õÔS³ßùß©tyô£ýå_þeöK¿ôKÙ•W^¹}üCúЭWòä_ˆ;§ùÚæ¯ÔùÒ—¾´=ìC9$û‹¿ø‹ì~á²Ï|æ3•¿÷{¿—rÊ)•Ç9€h' Þiæ§Þiææ,‡€†þ8ò$J(ä_xûÖ·¾5È%5Oþš˜å_|qö¬g=+èü©ô®w½+Û±cÇʰʌÖû…/|aöŸþÓš‹ñ @€äÔ;ÍS¢ÞinçLH[@C?íüˆŽÖ<íiOË.»ì²F>GqDvÉ%—4:w*'yä‘Ù¥—^Úh8‡~xö¾÷½¯Ñ¹N"@€ÂÔ;áVeGªwÚù9›ÒÐÐO3/¢"@€ ý÷ß?»á†j;ÝûÞ÷Îò§óóWñü›óo²ÝvÛ­ö5Æ|·¾õ­ìüÿ±õjü)ýï|ç;µ‡³ß~ûe×_}íóœ@€ÔPïÔóZ­Þiææ,‡€†þ8ò$J(<ìaËþöoÿ¶µËýîw¿ì'~â'¶šûeÿgñ³Ýwß½õ½º¼Àm·Ý¶Õ¨ÿû¿ÿû­ÿ»ìÿä?ûæ7¿Ù:ŒŸþéŸÎ¾øÅ/¶¾Ž  @€›Ô;«>ê¿1 @ Ë4ôÍ¥À}ï{ßìŸþéŸz‹ý^÷º×vÃÑäÉK^’ýüÏÿ|o1ä7ú«¿ú«ìÍo~óNûï~÷»½ÅñC?ôCÙwÞÙÛý܈ÌU@½³ú ‚zg®¿ ÆM€ËúæŒNàþá²û±,îW¿úÕÙë^÷ºÁîŸßø5¯yMvÚi§ ÃÿüŸÿ3ûÑýÑÁîïÆ @`êêõÎÔç¸ñ @€@3 ýfnÎ"@€¾ð…/d{íµWïüò/ÿòV#ÿßÿûßû½Ënø_ÿëÝjìÿùŸÿyïñÜtÓMÙÏüÌÏô~_7$@€sPïÜiõÎ\f¼q @€@¨€†~¨”ã @ |ä#ÙŸøÄÞâyà¸ÕÈþóŸßÛ=ëÜèÿø·û_ÿú×ëœÖêØ«®º*{žÐêN&@€Ö ¨wVmÔ;~[ @€ÀÝúfŒNàâ‹/ÎŽ:ê¨^âÎß“Ÿ7ówÛm·^î×ô&ßúÖ·¶šúùûõûøwÑEe;vìèãVîA€f) ÞÙ9íêYþ*44ôM À›Þô¦ì„Nè4îƒ>xëõp@§÷‰}ñë®».ËßñõÕWǾôÊõÎ>ûìì¥/}i§÷pq 0gõÎúì«wæü›aì  ¡o @€Àè^ñŠWdgžyf'qßï~÷Ûz"ÿØcíäú}]ô¼óÎÛzbÿ›ßüf'·<餓²3Î8£“k»( eêêY Þ©6rLO@Cz95"L^àyÏ{^vÁDç1dzÕÌß}÷Ý£_{ˆ ÞvÛm[MýóÏ??úí>úèìío{ôëº  p·€z'l&¨wœE€ÓÐПN.„³8ì°Ã²+®¸"Úxá~a«‘Ÿ_wŠÿ.¿üò­Æþ_þå_FÞ¡‡šå×õèF@½SÏU½SÏËÑ 0^ ýñæN䘭ÀÞ{ïÝxã­Ç¯{Ýkë=ùùŸ´Ïáßé§Ÿ¾ÕØÿîw¿Ûz¸|ä#³Ï}îs­¯ã @€@¹€z§ÙÌPï4ssŒG@C<¹)|_ %Îí·ßÞÊãÈ#Üz*=öhu±|ë­·n5õ/¹ä’V¡?àÈò?q÷èF@½ÓÜU½ÓÜΙ ¾€†~ú9!, üË¿üK¶ë®»66ÙsÏ=·ùOúÓ_c '¾÷½ïÝjìß|ó͇s×]we?ð?Ðø|' @€åê83C½ÇÑU @ - ý´ò!¨øêW¿š=øÁnätòÉ'g§žzj£s§zÒ)§œ’½öµ¯m4¼¯|å+Ùƒô Fç:‰X/ Þ‰;;Ô;q=]†ÐÐÖßÝ  @ ¦À§?ýéì1yL­³~åW~eë©üü½ïþí,¿ ?ZÿÏþìÏjñ|êSŸÊöÙgŸZç8˜¨PïTÕ=B½SWÌñ ª€†~ª™” üéŸþiöÔ§>5Hç'ò'·ùÏ{Þó‚ŽŸûAoûÛ·û_ûÚׂ(>ðdù‡%þ @€qÔ;q=—¯¦ÞéÎÖ•  @  ý~œÝ…" ¼ímoË~ó7³òjÇüV3ÿ¾÷½o屸GàÎ;ïÜjêŸsÎ9•,ø‡˜½à/¨<Î @€@=õN=¯ºG«wêŠ9žRÐÐO)b!@€Jü}ïù{P×ýûÅ_üÅ­Fþþûï_y-¬¸þúë·ûýèG×”Aþ½þ @€qÔ;q=×]M½Ó³» @€@\ ý¸ž®F€ {ì±Ùïÿþïït—ýÑÝjäÿöoÿvÇÌëò¹uÞ´ÿ‡ø‡þ¢½(;ï¼óæb´ @ õNÈK·Pïôëín ÐN@C¿Ÿ³  @ g§?ýéÙûÞ÷¾•»æ¯}É›ùxÀzŽf·»ýöÛ·žÖÏÿüù_ž‹÷¼ç=ó@0J У€z§GìïßJ½Ó¿¹; @€@3 ýfnÎ"@€8à€,ÿóèüß>ûì³ÕÈ?ôÐCŠf^·½âŠ+¶ûŸþô§·ž¿Öèºë®›‚Ñ @€Ô;= ¯¹…zg8{w&@€0 ý0'GèEà¦ÙË}Ü„À˜û?˾~ÇÙi§žtÒIcÊhc?óÌ3³W¿ú•Ùïßìò?ú¿F;èK`¯§\Ò×­FqõÎ(Ò$ÈÔ;'àÿ¿½zgøˆ`\êqåK´ãÐÐwþD?1܉%Ôp:8ñÌg/9úç³'ýßÚÉõ]4LàÃÿï¯do¾à¯²³NÚ7ìG˜±€ îjòÕ;3þe0ô`õN0U§ªw:åuñ‰ ¨w&–PÃIZ@C?éônn6¸s˸ñÖøÇoÝ•ýÈn»n¦`¬«÷øÅzµœ“¸wp5Ó°^ièOg6Iê>”Ãî¡Þ srû3s€@¿úýz»ú&p ²p«.Ž´^u¡êšS°^ièOunW÷Öî7ÝA½3¬¿»KÀz5®|‰vÜúãΟè'&P,÷:ÐûÁ'–bÃi!pÓµg®œ­`láTëUD—˜¬€õjsj­“úAÀú1â%¬W1]jrÖ«É¥Ô€F$ ¡?¢d uú ÆéçØ› (›Ûuq¦õª U×œŠ€õJC*sÙ8ú°~ôo¾éŽê´ò!š´¬WiåC4óÐПW¾6qcâ Þ  ÆAùwº¹õ*­|ˆ&-땆~Z3R4c°~¤•-õNZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ Ø·¢d IDAToPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸€‚1ñ oPã üúiñ‹&q땆~âSTx X?ÒJŽýYZùMZÖ«´ò!šy hèÏ+ßF›¸@—ã.÷Ûg§Ñæš ³Gí½ç *Ÿ.ú£×e;žqØ 1¹y˜@ÙœÊÏ|Àý<»íÖ+Â.Rã(c ¬ír½ZþkÏû¹›³Gôì€#ï>$æ&WÁÌÞË]­W›‚ßÔÐÏÏ‹ñ!å Ž=5;ÿ‚÷—†‘šÙ&¹ÖÛ6zõε^ ÓÐWïÔ›§Žn&ðäÃË.¿ò†í“cÖ:ùE­ÍòÒÕYê®d»»®z§;Ûâ•­WýY»¢€†¾9A !® ƽ÷ß‘Ýøù[vi¬'ZÛ*¸Úèõ{nU3u]4§žüÂì䓎i¬‚±5aÔ tµ^m ²jÆh¸ï¾Ç¡Ùíw|£4Œך„š³ÞÖkq¸õj3^Wë‡z§Å¤uj@Ù‡4ôƒèF{PWë•z§»)¡ÞéÎVC¿?[w"P% ¡_%äçzè¢`¬z¢:V³µ)“‚«©\¿çmz IH$1^¥¤A"Ýß1]¬WUÑW5ôùsÏ>wýÅU—YûóªõRC¿1íìN´^õßЯúýUïÌî×0ú€×ÕBúÑ©“º z'©tcÄå õNF!ÐH@C¿›“t#ÐEÁX|}DÞðZ~Z¿m¬ WMM`Ýûò‹›ØM •¶^cZ³¢‹õªj„ņ~þWFŧéÛ¼v§¸^¯¯¡_•!?_X¯úoè«wüþu)°éÁ†¶õM1nëG—™¬mõN}³¡ÏÐÐï/Ö«þ¬Ý‰@Q@Cßœ @cñõù“ÒÅ/|lÓKˆO( ߛߦª±Ù䜪ðŒUBýþ¼‹õªje ýG=rÏ•w·y wùuù‡ù¿å@«æ}UüCÿÜ·¿ X¯úoè«wú›ßs»Ó¦ïVÉ-4ô§=#Ô;ã˯z§¿œ©wú³v'úæ„bŒÅ§‰ïÌ/¾cvìMª„S:‰ÐŠEña‡ì—}è²s+ÇVÖÔo³éU0V’÷z@ìõ*$ø²†þqÿÏ‘Ù)¯}ëöéMÿê¨ø×%ùºø©Ïüµ†~Hb³“€õªß†¾zÇ/aW›¾WeqÏ6µMYÜÖ®²ÙìºêfnCž¥¡ßŸ¾õª?kw" ¡oHX vÁXl¨.±eM±Ûn½"a¡ %ÐöËߊu›§§ŒCÍ‚òûÆ^¯BF·ní*γ&Í•âµó¿\zþ WC?$1ŽÑЯ9b¯êš px¥@Õw¶,_ És6 Þ©LO¯Ä^¯B‚Wï„(­?FC¿_³­Wu´K ®€WîÄõt5­bŒ›Š™˜ÖVƒnyrÕ†+Æ—±¶ ±Öé©§ø×¡Oç/]l²4}z:¿ž‚±ÖTêüàØëUHÀë6¸ÅyÚä÷~ùmÿš©êõ ùX›Ä¸É¨øiÙïêÜMO¹vñÚ·9×ôëÕf¹Øë‡z'þšÒt—Z½³ˆ{Ó»òóÿ6œsÆËvzu¥†~hÖÇy\ìõ*DA½³^)ä¿ïê꿤™‡!ǨwB”C  ýn\]•@#˜cÕ“ÕÅXU£µx|>À:M˜²Wþ´öò«2ê4¹Ê^ç² ½j|ue‹f_£$¯9©ÏñÔ‰»mA\¶1nºñU0ÖÉ\÷ÇÆ\¯B£]·Á-®/u?xÊï¿<ׯ!«óz²M_½i|ub-Æ“ÿ.mj>å÷]þ«˜&¿Ïë®òDUlE—k.ŸÓÖ#tÞ59ÎzµY-æú¡ÞYµVï4ù½çœ²z7ÿéb­ŽY׬‹ÔúÑ.‡±Ï޹^…ƦÞÙ±ò’êv¡ó®ÉqÖ«&jÎ!G@C?Ž£«ˆ"³`¬z²ºl¼©A_Ö¬ ml—m~Î& ¦÷™®KH!×(Þ/tÜ¡“¢ïñ„ÆU–·&~Mr\£‚14sýs½ xÝ·¸>Õý-^wÑXmè×ý@®8ÞÐxËØÅ߯ⵗ›äu×}HÒxojÒöŽ:¡ó®ÉqÖ«þúêrë&ÿ½žk½³,XÖÐ_~AC¿ÉŠ8îsÔ;÷ä¯éÛWPïÜc©Þ÷º z)hè§1ø¾@¬‚±¬ SÖ€)6>ª¾·ìC€ªsò¡•5¬›£º ¦uON…N¢ÐBrùz]np‡O¨U¬ïZ(ú5}¾Yhæú9.ÖzU'ÚMs²8ÏBšÎ‹{/oP—׈†~ÈëuBƲ±+û˪?ËÚË×»:ëíÍüÅ@B,òcÛz„ä¥é1Ö«~úêõÎêf¿½ËëJÙG4ô›¹Žù,õÎÝÙSïì<‹Û~À¡ÞóÊ và hèŸØˆU0 ®u›ºb²ù«ûêu¯Ú9ù¤c¶Æ]§ÁT÷Õ늬º å®úC'ôW®˜»¦ÆÒ ‰Iƒ,D©¿cb­Wu"ÞÔÐ/Î×÷^^‡–7WUswÓ+eÖ½ZjÓ{£«ž¨Ýôàòº¶hr B×ÛuÍüµs݆?ô¿C‹œ„䯭G¹W÷XëU? }õÎÝÎ꺿¡ëÏוWžðülÇ3+=HC?žõX®¤ÞÉ6¾ÞO½³:“Õ;—ŒåW[œF/ ¡?úÀ”bŒÅôºÆHÙ¦¤ªaSÖèY×è-»~ñI„ÐSÙ†5äˆM¯ûzî4ù@¥Ïñã }Фèë:dCÏØÕûÇZ¯êŒjSC¿ø»²>ä÷.ž·üDfUC¿¬¡Òˆ^×€«Z×5°×ý5B¾^?jï=·‰C×Û²×ÖTÅ–ßdÝUTäç–ý%WÕym=ê̽ºÇZ¯6‹ÅZ?Ô;÷8÷YÔý}H½Þ©3 ý:ZÓ86ÖzUGC½³úÎø…zgu…zÔ™{uUïÔs,ÿ.mjè×ùpsÝ8ë¾ö¬¬]烶õ¶i3ÝZ’ƒ…O1UckëQwþÕ9ÞzµY+Æú¡ÞÙÙ¸¯ú ÎïÂÔê7 ýºÙÿñ1Ö«º êúU5Á²±z§îŒk~¼z§¹3 ´ÐÐo+è|bŒÅÍ\Uƒ¾ìõ! ˜ªWï”m*Ëð!׺fOhCyóÕEã»éhÚÐïkÐ,_l@‡ü,b }ÅÏò˜‹v5iè×ùrÓzÛ¶™_öGØ.EóMMƒ² n“{Ö‡!ÇkÈußÐWï„ÌÄ4ŽI½Þ©£¤¡_GkǪwîΣzgÕayv7©=Ô;ÓXŒ‚ÀúCê»7‚@Û‚±iC¥iS¤ì~yCìÜÿ|IvûßXݺB§NCÝ—¾Õ}J$•‰—úxBš™!–žÐQß1m׫&#®jèç×,®)›þâ¨ø;X\§bý¬kÛ†~ÕÅû®[oËþP÷éúºc 5Ùôs1îºMæ`è9ú›¥Ú®êЙ˜Æq©×;u”4ôëhMãØ¶ëUõÎê+wêþ÷]½ÓdÖ5;G½ÓÌÍYbhèÇPt ‘ÚŒÅæFÕÓž‹°×5æO>é˜Ê‘­Û¤-Ÿ¸©1T§¡_¶‰*X·à«`‡¤>žXÍL ý'Ñ€—n»^5 =dƒ[\“6­?ËO–­±~Šc]÷»_µf7]ã÷/[oO?û³?ËJˆ¡¥µ|R,«bŽ7­ém=šÌÁÐslp»mè7ͽz'tÇ=.õz§Îh5ôëhMãXõNó<ªwÖÛ©wšÏ+g p·€†¾™@ !6cYÁTçÉõb£§Nc¼ìK¬U ª: ýüšeO’nJaÈ÷ 9RO¬†þ3¬»{·Y¯šFÒÐÝ U½n§l½iÒèÞ´>ªÖ˦MÌÅ}Š±ä¯ *þ5U~l“ו½²§iž—ÏÛK[ñ­»††þfÝ6ë‡z§ÜV½Óåoô=×ÖÐïÇ9¥»´Y¯šŽC½³ú„~U}TtVï4yõÏSïÔ7sXú±$]‡@6cÈ“òuC Ýnzòªê‚uúeM¶q5iP…\7Æ1u›úMnuc-ΩºÅôâ~±®£`¬›Áno³^5,dƒ›_»¸®”­CÅšÊÖ»ºjµmjWý޵m`×ùp¡î‡mǾnNhè7ýmIû¼6ë‡zgsnÕ;ÝÎ} ýn}S¼z›õªéxÔ;qú›ò Þi:Kï>Ïþ¬Ÿ³ ´ÐÐo£ç\‘ÚŒu5¡a×)pÊ6Ø!!Ф¡ŸÇòçÛeã¬j˜…ÚÄ>.ÅñÄjÄÇºŽ‚1ö¬kw½6ëUÓ;‡np‹ï²×îT½n'1´¡«™]µ>õÙÐÏÇú¡n~lÿ ʯ«¡ßô·%íóÚ¬]Ì5õNó%Åz§Îè5ôëhMãØ6ëUSõN }õNÓYz÷yögíüœM €†~=çˆ,д`,{'l¬Ðªž°ÏïÓ÷úűÕ}½ªiË®éuRO¬Wå„6E«¼ŒUBýþ¼ézÕ&ÊÐ nñ¸âïüg?wsö胞½ʺf^ÈÜmÒ\\4©ë~‘lW ýÅø‹LÄzõZ›œkè·ÑK÷ܦë‡zgµÉU•aõN•PýŸkè×7ûM׫6ãVïtÓÐWï´™•åçÚŸÅ7uE¡ú¡RŽ#Ѓ@Ó‚±n¸ÎP6}¡äâ:}¾C¿*öbƒlÝñ!=Pu¯>~>äxB7UÅ&aÈœ*»¦‚±JºßŸ7]¯ÚD:'‹ ûüžËN†¼n'?§ª¡²önzʽî_¯tÑÐ_þ0£¬Qúûë÷¼ÎühëQç^uµ^mkº~„üÎÕÍÕâø¹®Þiª[}ÞõNut÷¡¡_GkÇ6]¯ÚŒ^½¿¡¯Þi3#ן«ÞéÆÕU „hè‡(9†@OM ƲÆU?_ZÙµªžÐ y—í¦MrÓWdÝ«0ªÆzý¾ës<±6­±r¬`ì{¶uÓk3ŠÐ nY3~ùC¼å¦`'À—×ÖuO ‡üUÓÂ`è†~ÙÓ»ekzȘª>üh“÷uçjèw¡ÚÏ5Õ;YûÁ‚>ëƒ>fIªã‰Um2Tïô1ÃÂïÑd½ ¿zù‘ê¸ }õNÛ©¡ß +h. ¡ßÜΙ¢ 4)ËžhªóÞãâ Êž~[w½²MMÞðúê×nÏ.¿ò†•K¯kÅjöV%£xŸ”¿4®j,ùÏûOÛüÄÜøÚà†ÌŒþŽi²^µ®ÎwÝëlB_·Sö¡ÀrC¿¸V6YSê6¤ë_ôý}.òÚŽâ}|hÚÖ£í|Ôk.ØdýPï„y÷U„EÓþ¨ÔƳ®Y§£Þi?ob^¡ÉzÕöþê¸ ýu ªwÚÎTïÐo/è š hè7·s&èM Æ6ï<.@ÙS§ëžøß´É -BL‹XñÕmž•+ä©ÓèI.\0õñ´m˜ÅlòÙàv=ë]¿ÉzUï;]gƒ[lº,ÖŒâ56=%»é©óâÚòºŽª{U¼íïcèz[¶^V=Mk]¬Ue¸„|ðÐv†žo½Ú,ÕdýPï„;X¿‹aw ?*õz't$ú¡RÓ9®ÉzÕvôê~úê¶3UC¿½ +h. ¡ßÜΙ¢ Ô-Û¼ïxSðeïˆ-6¿Ë^˰Üð)+Ê^mLuþ¡ X Qæ“âxÚ6b4=®dufX÷ÇÖ]¯bDTgƒ›ß¯ØÌÇžÿÂßÍnüü-[áT}0X§¡_Õð.Ž¿ìw«ª‰ÝWC¿¬Yžÿo›>õú·ÐƒËbÔÐñ[ÖÏ5ê®ê ³Gí½gPrÔ;ALêÃW½Ó8=œXw½Š„z§Ÿ†¾z§ýlµ^µ7tM4ô›Ê9@u Ʋ¦z¦ðº!”]w¹¿îU;o;K†¼‹9´¡_Ö,ªÓ¼)þ©~U㬃ô®\rLã©Ó`[dÙë›ÚüU„‚±ëYYïúu׫zW/?ºî·¸åmtþïß¾xÕR§¡_ç ý²ßÿ<¨ªu©Ï†~“5ªìw¾ÎuÏoëcN®»†õj³nÝõC½³_ö¡ËÎ š²ê ¦Æiè7¦í‰u׫Uïô×ÐWï´›±êv~Î&ÐF@C¿žs D¨[0­UÍ ÐpË6+Ë×®ó>ÓªCúyìeÍž/ý"Ô'ÖqcOY#¥ªZvNH®6Ù*cͼ8ש»^ŸkÝ nÙïþrUÍæM ý¦X­û2ÝÔúyêºfUñü§®Ëb]4vê6˜Ö5èCÇ•WçÕM ¼:±Œe¦M4ó¦Û6°ë®·ùÊqUë~Œu­êylm=šæ(ä<ëÕf¥:ë‡z'dÆ•£Þ94»ýŽolãT} "­¡¢4­cê¬W±F®Þé¿¡¯Þi6{Õ;ÍÜœE †€†~ E× I ´`lò®¿º!nzÒzq­ÐQÙµ¯ðiÒ`*{š.t|!M¢âµê>±Ë⸱Œ'ä ã²±Çhæç×U0ÖYݺ^ÅŒ¢Éwݼ itU5ôc|ÐuÔo¼f…hÓ÷ ´m`7Yoóà6}(».¿mšú¡ët[˜s³x-ëÕfÝÐõC½³Ú®3gC–¯©Þ©ÖЯ6šÚ¡ëUÌq«wúoè«wšÍ`õN37gˆ! ¡CÑ5D-ë¾×¸ixU ™:Úu2Êkߺ^è5Cÿ‚`ùâM¿0¸ë nãXÆòAϲyÌW)›þ&ws^èzóîM6¸ùýËž¤™›U ýüÚu'òs–×¹bl›špmØMú›>”Ý”ß&xÔY§ÛzÄœ›ÅkY¯6놮êñÔ¡¿Oc©wÖGC?4ÓÓ9.t½Š9bõÎ0 }õNýY¬Þ©oæ ±4ôcIº¡c æ|8›žÈnòäWÕÅFWiUÃ(Æ÷åwLãiónòÜj5QêïœÐõ*fDM7¸Åµ't]iè/ÆWÕØ_·f›[›bkÛÀnÚÐÏÇXÖ„ ù+‡…Ϧ¿D ÍGq.µõˆ97­Wõ4C×¾þû«Þ¹;}y­ÞYžÝúõ~×§ptèzs¬êaúêú³XC¿¾™3ÄÐÐ%é:" Q0FÛ%ô" `ì…9ø&Ö«`*ÎPÀzµ9éÖþRr°€õ#˜ª—­W½0»ÉH¬W#Mœ°'! ¡?‰4ÄTŒSɤqt! `ìBµù5­WÍíœ9}땆þôg¹v%`ýèJ¶ÙuÕ;ÍÜœ5ëÕ<òl”i hè§™QÍT@Á8ÓÄv€‚1ˆ©·ƒ¬W½Q»Ñ¬Wú#œ¶BNDÀú‘H"¾†z'­|ˆ&-ëUZùͼ4ôç•o£M\@Á˜x‚„7¨€‚qPþnn½J+¢IKÀz¥¡ŸÖŒ͘¬ieK½“V>D“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&. `LD“–€õ*­|ˆf^úóÊ·Ñ&.P,WxØë)— zÿ¹ßÜz5÷`üu¬W«ZÖ:³Ç±s°~ ;¬WÃú»û¸¬WãÊ—hÇ- ¡?îü‰~b Ɖ%Ôp:P0vÊ[yqëU%‘l X¯4ôý:h*`ýh*ç<õNGW™‡€õjy6Ê44ôÓȃ(l (Má Æp«.Ž´^u¡Úüš?ûË—®œü×~Dó‹93º€õJC?ú¤rÁÙX?†MµzgXÿâÝÕ;iå£õ*íüˆnZúÓʧь\@Á8ò ¿Wc¯Ü;ÝÌz5¬¿ nZþUÑX¯4ô«æˆŸX'`ývn¨w†õWï¤å_õªJÈÏ ÄÐÐgéJ @€AvÙe—•û~ï{ß$7%@€t% ÞéJÖu ›€†`ëü IDATþØ2&^ @€@AÀ×” @€¦. Þ™z†P ýP)Ç @€°ÁM41Â"@€¢ ¨w¢Qº#ÐÐy…O€°Á5 @€© ¨w¦žaã#@ T@C?TÊq @€DlpMŒ° @€hêh”.D€ÀÈ4ôGž@á @€lpÍ @`êê©gØøÐЕr @ QÜD#, @ š€z'¥  0r ý‘'Pø @€\s€˜º€zgê6>B4ôC¥G€HTÀ7ÑÄ‹ˆ& Þ‰FéBŒ\@Cä > @À× @€¦. Þ™z†P ýP)Ç @€°ÁM41Â"@€¢ ¨w¢Qº#ÐÐy…O€°Á5 @€© ¨w¦žaã#@ T@C?TÊq @€DlpMŒ° @€hêh”.D€ÀÈ4ôGž@á @€lpÍ @`êê©gØøÐЕr @ QÜD#, @ š€z'¥  0r ý‘'Pø @€\s€˜º€zgê6>B4ôC¥G€HTÀ7ÑÄ‹ˆ& Þ‰FéBŒ\@Cä > @À× @€¦. Þ™z†P ýP)Ç @€°ÁM41Â"@€¢ ¨w¢Qº#ÐÐy…O€°Á5 @€© ¨w¦žaã#@ T@C?TÊq @€DlpMŒ° @€hêh”.D€ÀÈ4ôGž@á @€lpÍ @`êê©gØøÐЕr @ QÜD#, @ š€z'¥  0r ý‘'Pø @€\s€˜º€zgê6>B4ôC¥G€HTÀ7ÑÄ‹ˆ& Þ‰FéBŒ\@Cä > @À× @€¦. Þ™z†P ýP)Ç @€°ÁM41Â"@€¢ ¨w¢Qº#ÐÐy…O€°Á5 @€© ¨w¦žaã#@ T@C?TÊq @€DlpMŒ° @€hêh”.D€ÀÈ4ôGž@á @€lpÍ @`êê©gØøÐЕr @ QÜD#, @ š€z'¥  0r ý‘'Pø @€\s€˜º€zgê6>B4ôC¥G€HTÀ7ÑÄ‹ˆ& Þ‰FéBŒ\@Cä > @À× @€¦. Þ™z†P ýP)Ç @€°ÁM41Â"@€¢ ¨w¢Qº#ÐÐy…O€°Á5 @€© ¨w¦žaã#@ T@C?TÊq @€DlpMŒ° @€hêh”.D€ÀÈ4ôGž@á @€lpÍ @`êê©gØøÐЕr @ QÜD#, @ š€z'¥  0r ý‘'Pø @€\s€˜º€zgê6>B4ôC¥G€è@à®»îÊvÝu×VW޽ÁS«9™˜”@ŒÚB½3©)a0´ÐÐoçT @€@[+®¸"ûÌg>“½ìe/kÜØµÁÍ7Ûo|ã³G?úÑÙ¡‡ÚvhÎ'@€l ¨wLÄÐÐgéJ @€FO~ò“·šú'œpB£Æ~Û†~ÞÈ?ûì³·›ùúЇÃI @€uêsƒq4ôã8º  @ ±@þÔÚa‡¶uþýïÿÚý¦ ýåFþwܱuÿË/¿ÜÓù3éD @`€zÇÜ @€@ ý8Ž®B€h%ð”§<%[~2>oìç¯áÉŸÚ¯zÇ~݆~Y#?>rîƒü`«q8™ °N ¯5ò‡ÿÔ;æ ê hè×7s @ ºÀ‡?üáÒ'ãC6º¡ ýuüÅ`ò'çžô¤'E›  @€rõŽy@€öúí ] E ø”þòE75ö«úUüü>žÎ’B!@€*ŠOé«wLÔÐЯçåh @€@gëžZ«Úè®kè‡4ò×öt~giua @`I@½c: @ €†~;?g @€¢ lzJ]cÿðWbøçþçìì³ÏÎÞøÆ7f‹/»Ý¤§ó£¦ÐÅ @€ MOé«wLlÐÐ7C @€ „<µVÜè›öùëyBù‹ëx:?¡  0õÎ ’lˆt& ¡ß­  @€š „>¥ßìê«gy:?†¢k @€uBŸÒ¯{ݲãÕ;1]ƒT4ôSÉ„8 @€ß¨ûÔZ8Oç·Ñs. ÐT@½ÓTÎyÌ]@Cî3Àø  @€$úxJßÓjI¦^P @`6}<¥¯Þ™Ít2P³ÐПMª ”“@O­y:L3B¬ @`zêéåÔˆè^@C¿{cw @€4èò)}O«5J‰“ @€È]>¥¯Þ‰œ,—#@  ý$Ò  @€ÀÎ]>µæé|3ŽHA@½“BÄ@€À˜4ôÇ”-± @€³èâ)}O«Ín0HZ ‹§ôÕ;I§\p´ÐÐoçT @€@×]<µæéü®³æú @€@õN-Ç 0w ý¹Ïã'@€’ˆù”¾§Õ’O·  @€À,b>¥¯Þ™å2h³ÐПMª ”«@̧Öh"`ƒÛDÍ9s°ÁK¦“© ¨w¦žaãk# Þi£ç\í4ôÛù9›ÌRÀw–i7è@Ü@(‡ @ qõNâ Þ êAùÝ|æú3Ÿ†O€šØà6QsÎ\lpç’iã$@`êê©gØøÚ¨wÚè9—@; ýv~Î&@€³°ÁeÚ :PÀ7ÊaH\@½“x‚„7¨€zgP~7Ÿ¹€†þÌ'€á @€&6¸MÔœ3ܹdÚ8 ˜º€zgê6¾6ê6zÎ%ÐN@C¿Ÿ³  @€À,lpg™vƒ°Á „rPï$ž á * Þ”ßÍg. ¡?ó `ø @ ‰€ n5çÌEÀw.™6N¦. Þ™z†¯€z§žs ´ÐÐoççl 0KÜY¦Ý lp¡F€ÄÔ;‰'Hxƒ ¨wåwó™ hèÏ|>h"`ƒÛDÍ9s°ÁK¦“© ¨w¦žaãk# Þi£ç\í4ôÛù9›ÌRÀw–i7è@Ü@(‡ @ qõNâ Þ êAùÝ|æú3Ÿ†O€šØà6QsÎ\lpç’iã$@`êê©gØøÚ¨wÚè9—@; ýv~Î&@€³°ÁeÚ :PÀ7ÊaH\@½“x‚„7¨€zgP~7Ÿ¹€†þÌ'€á @€&6¸MÔœ3ܹdÚ8 ˜º€zgê6¾6ê6zÎ%ÐN@C¿Ÿ³  @€À,†Üàîr¿}v2ÿÌ5fÚ{ÏAsQŒë¢?z]¶ã‡ Ó\n^6'ò±?àþ?žÝvë½3ØàöNî†èD@½³3«z§“©Öø¢ËùøÞ7?Ýø:MNTï4Qs8úq]…ÌJ`¨ îkÏØ©ŠàÔ“_˜|Ò1U‡µþ¹ nkB @€@ê ý$&bIeµ†~ªÙøúñM]‘L^`ˆ nÕÙ}5k×%WC¿Ÿiñ{.ÏŽú×4¾Y¯BÒÐoœ' @ )õކ~RòûÁ¬«…4ôSÌ–˜t# ¡ß«« @€I ±Á}Á±§fç_ðþm×GþÜÃWžÖÏÿÿŸ»þâI»\–­{_~q»é ®7¼úf*¦! Þ™F§4ŠM6t]ßÕ;SšYÆ26 ý±eL¼ @ !6¸»ïqhvûߨ}þ¤uñIí¾7ôL6„â»bóV}B“sÚÚà¶t>ÒP螺QÜ-P|¸¥è¢¡o¦˜€†þ|rm¤ @ š@ßÜâÓH‹wæß©_ÕÜàBƒŸÎ?ìý²]vne,eMý.7½ú•)qF! ÞEšfdñÁ–²AwYÛ”ÝO½3‹©g‰ hè'ša @€”úÞಋFnñ ÁRørÜ”ó6æØÚ~ù[ñÀ.¿sÁwÌ3Mì¸G@½c6 -PVÿ¬‹ICèl¹?þ4ôû³v' 0¾7¸›¾p¶ÏFm— ¬Ú°õñe®]¯øTYè“õËñÿ£î5Š uù ú]Í$×%@€@¿êøÞê0ÓMïÊÏ`9猗íôêI ý0[G˜‚€†þ²h  @ g>7¸UOf½UÚâñ9]wï—½¿4Úû”×¾u% ¡ ø²×ÁlJgÕø6[l¬Çü‹†¢k¾©Ü´Íã¬ó”ü¦uB¦Y,]m|5ôC2â¤/ ÞyÿJ’Ô;YÖu½³/«WóŸ-hè³®Y÷›ªÞI át4ô§›[##@€ ô¹Á­z2»¬á¿©AÿÙÏÝœ=ú g¯Ø„6¶Ë6O‹{“†sÈûP×%±Î‡‹kôÝÐ/šÇú¡G™{“ñ7ÉQ“_"Ü&jÎ!@€@zê{r¢Þ¹Û¢¬¡«ÞYþ (kè/?ˆ ¡ŸÞz!"} hè÷©í^ @`"}mpËšïeMàâFªêËqË>¨:'O]Y~±¹ªÛ,^÷äUè ýbùz}6ôóÿŸ¿eípêÄ뻊ã¯ó¡yÉÓЯ£åX¤+ Þ¹'7êò†~Ìzg]C¿¬öÕÐOwÝ>4ôûPv 01¾6¸Å×Û¬k_[Ò,®ûêu¯Ú9ù¤c¶²[§¡¿îU4ëžX_÷Zžº é>úËS~9ÎŇ4! ,®Q´oúÚ¡bÎëÄPçWXC¿Ž–c  ®€zçîÜ,ÿw\½³cí mëbCÿ•'”݃LL ¯ n±½®[¶©©jv—=ý¿®Q\výâ—²ÖÙà6ùbÓë~R˜^ëþâ`݇¹ÿ£öÞ3(ô¢WÝ/Ä]Ü$Öuª‚ÖЯòsŒC@½sÏ;ÛSï”7ôcÔ;u~+4ôëh9–Àô4ô§—S#"@€ ô±Á­ûÞôbó?ä)îÐWïlzÕN“ nÓW¿”=©ßÕ»ÖDe ý¦÷â½c5âc]§ÊFC¿JÈÏ  0õN–ëŒ: }õNwó\C¿;[W&0 ý1dIŒ @ 1>6¸ÅækUƒ¾ì•8!_œZõê²&zÙÓÿ}lp—7o©4òS³Ì1ôKo«¦w¬WåÄzuOU¼úUB~N€q¨w^˜-^-Øç s­wêüVhè×Ñr,é hèO/§FD€:ècƒ[l7y…Nè;Ò‹÷Z|xòªÜwþwžÔ–7(kèÇúÐAC¿erœN€Ô;çîäÖæõN£iXz’†~\Y7?b]§jþièW ù9Æ! ÞißÐWït3×5ô»quUcÐÐK¦ÄI€èrƒ»é)ù¦¡_Ž›_¿ì…¿hÒÐÏïòçèeãnÚÔnjXuž†þ=BúU³ÅÏ  0õNœ†¾z'þ|×ÐoêŠÆ$ ¡?¦l‰•$"ÐÕ·ìø±†\õ„}Õf³êü¦ ýåñÕ} OJMý.ú/8öÔìü Þ¿MÕôûb½‹¿jNjèW ù9Æ! Þ‰×ÐWïÄóúq=]ÀØ4ôÇ–1ñ @€ºÚàÖmhסØô…¶‹ëôùýªØ‹MìuLJüõ@Õ½bü¼Ë†~ñƒž¦¯Ú}C³ÛïøÆöpCæD ý&jÎ!@€@zênúËWUï4›÷úÍÜœE`*úSɤq @€ºØà~ös7g>èÙ+£¨óªœåË®UõTwÈ»û75€c<¡¿)…ÅfôâØªqõ5-ºlèÇÚ´v£…µ†~_³Î} Э€z§û†~ñs®wêÌæXµQ{Uï´Ñs.vúíüœM€f)ÐÅ·ì ­Ð/³-KBÙÓþë®W¶)Ê?Løê×nÏ.¿ò†•˯{õN_Íââ}š>­{âvÙÐÏcmëÛçÆ×7öìr= # Þé¿¡¿¸ã\ëЙÞg]³.&õNh¶G ¾€†~|SW$@€“èbƒ[|"«í“çeïã_÷Äÿ¦McñgëÞ[_·á¼ˆ¯nC¾l\Uï÷ïcBvÝÐo{ýâ_`´_›Lmpû˜qîA€îÔ;çÞÙ‘Ýøù[¶]†ú># ýîÿÝ@Êú)gGl @ QØܲMIŒ÷›—½¿Øü.{ÕÎò{éËèe±…npË®Wç/RØÀ•M˶ ÷ª©ÞöƒŒb~b̯u1kèWeÓÏ  0õÎ ³“O:f%Yê ýÅ„PïŒcå44ô§™W£"@€ ÄÞà–5Õë4¹× ¶ìºËÜu¯ÚyÛy§¬\²ì:ÅB7¸eï÷¯ótWñÕD]>i^guÝÐÏc ýk‰bÜe¯_êò¯lpëÌÇ @ ]õN–©wVçgõNÈoD x¨wB2åÝhèwãêª @`Ò±7¸ÅFm¬&uÙfgùÚuÞÏZchC?Ÿe æ/ýbˆÉ×Ƿ샕ªCÊÎ ±nchƒÛFϹHG@½“eÅšL½ã ýÅo¨z'µJ$óÐП_Θ´ˆ¹Á }¥MÓ ‹ïæÏ¯“?ýÿªß=o§/¼]~ÕNñ~UqÖÙà–]kq¿u™Pö!@~N—Oš×1ŸÇSö¥ENµ÷žÛ!—ý%Äâ‡]›ÙàÖ™9Ž%@€@ºê»s³ü×ê ýÅo¬z'ݵKdÓÐП~ŽDˆ¹Áíúu(ÅWÔ¬Ãyj»,ÖE¾Î7a]ƒ¾N²ê¾¾øáFÝ/äÝ[“†~“xÊþJ¡ŽÙ¦mê\gÓ±6¸±$]‡à ¨wîñWïÜmÑW½S5ó½r§JÈÏ L[@CÚù5: Љ@¬ nÛ÷ɇ nÓ“Ú‹óCÛe×Zü)z݆~~ﲿS~LÈÅk5i ‡ÆÓçwÓ_8lŠ·f~~ ýÐYã8¤- Þ¹'?ê»-ú¬w6ývhè§½vˆŽ@×ú] »>˜ @¬ nÙÓó]4]«ž†¯sÏu¯Þ9åµo]Étè5Cÿ‚`ùâM¿0x* ýÜ"䃚e³®_³³|/ ý .z†D€À,Ô;«µMþ—êú¯Üé¢þÒПå’dж4ôM @ ¶@¬ nœ²Álz¢»É“îUä1„6ôñV½J&Æwé=äkëÞ«ŸÛöÙÈ_äRC¿ö’â$) ÞYm^—%ôù)ŸIDATI½³_ö¡ËÎÝ8»¨¿4ô“\2E 7 ýި݈LG Öw:"FBà }³ÓPïL#FÑ€z§WW%" ¡¢ä @`EÀׄ °^À×ì @€À4Ô;ÓÈ£Qt# ÞéÆÕU „hè‡(9†ÐÐ7 ØàB9Œ‰ hè'ž á * Þ”ßÍg. ¡?ó `ø @ ‰€ n5çÌEÀw.™6N¦. Þ™z†¯€z§žs ´ÐÐoççl 0KÜY¦Ý lp¡F€ÄÔ;‰'Hxƒ ¨wåwó™ hèÏ|>h"`ƒÛDÍ9s°ÁK¦“© ¨w¦žaãk# Þi£ç\í4ôÛù9›ÌRÀw–i7è@Ü@(‡ @ qõNâ Þ êAùÝ|æú3Ÿ†O€šØà6QsÎ\lpç’iã$@`êê©gØøÚ¨wÚè9—@; ýv~Î&@€³°ÁeÚ :PÀ7ÊaH\@½“x‚„7¨€zgP~7Ÿ¹€†þÌ'€á @€&6¸MÔœ3ܹdÚ8 ˜º€zgê6¾6ê6zÎ%ÐN@C¿Ÿ³  @€À,lpg™vƒ°Á „rPï$ž á * Þ”ßÍg. ¡?ó `ø @ ‰€ n5çÌEÀw.™6N¦. Þ™z†¯€z§žs ´ÐÐoççl 0KÜY¦Ý lp¡F€ÄÔ;‰'Hxƒ ¨wåwó™ hèÏ|>h"`ƒÛDÍ9s°ÁK¦“© ¨w¦žaãk# Þi£ç\í4ôÛù9›ÌRÀw–i7è@Ü@(‡ @ qõNâ Þ êAùÝ|æú3Ÿ†O€šØà6QsÎ\lpç’iã$@`êê©gØøÚ¨wÚè9—@; ýv~Î&@€³°ÁeÚ :PÀ7ÊaH\@½“x‚„7¨€zgP~7Ÿ¹€†þÌ'€á @€&6¸MÔœ3ܹdÚ8 ˜º€zgê6¾6ê6zÎ%ÐN@C¿Ÿ³  @€À,lpg™vƒ°Á „rPï$ž á * Þ”ßÍg. ¡?ó `ø @ ‰€ n5çÌEÀw.™6N¦. Þ™z†¯€z§žs ´ÐÐoççl 0KÜY¦Ý lp¡F€ÄÔ;‰'Hxƒ ¨wåwó™ hèÏ|>h"`ƒÛDÍ9s°ÁK¦“© ¨w¦žaãk# Þi£ç\í4ôÛù9›ÌR ¸Á%‚AØë)—é0HI@½“R6Ä’º€z'õ ‰oJúSʦ± @€žlp{‚v›IØàN"A€À Ô;3Lº!7Pï4¦s"ÚúµÉœ@€ØàšÂlpíI€”Ô;)eC,© ¨wRÏø¦$ ¡?¥l èIÀ·'h·™„€ î$ÒhÌP@½3ärcõNc:'¨- ¡_›Ì  @€ @€ @  ýþÍÝ‘ @€ @€ÔÐЯMæ @€ @€ ~ÿæîH€ @€ @€j ükƒEI>aiÐIEND®B`‚python-einx-0.3.0/docs/source/images/stage3-tree.png000077500000000000000000002174531505216034200223370ustar00rootroot00000000000000‰PNG  IHDR(¾\ïè IDATx^ì üVE½ÿs_@1DEäº àÅÌRq¿Ü’^b.]ÍÊÔJô’»©‘-—{](·k^×4Ñ,sÉPQ(µ«â‚P¢! ’†æŠþ™ÇÎç9ÏœçÌœs朙9ïçõú½Ðßo–_sf¾g>ÏÌtûhÅGð @€€#žùíXG,Á ¸O`ë}nvßH,„ @€@nô @€ — P¸ä lq…ëÂ>@€ N(è€ @N@ pÊã8 Ç„y€ @ PÐA @€œ"€@á”;0ÆqŽ;ó @€(è€ @þˆ [n‚?Æc),xæáóZj@ ° œâ!@€¬`…U¼@€ `J”éëD¢NÞ¦­€ @ |áû˜B€ ¯ Pxå.Œ-™EÉÀ©€ @À* «x)€ @À”…)1Ò׉E¼M[!@€@ø(Â÷1-„ @^@ ðÊ][2Š’S @€€UVñR8 @€€) Sb¤¯Š:y›¶B€ ð P„ïcZ@€¼"€@á•»0¶d%§:@€ «(¬â¥p@€ S¦ÄH_'uò6m… @á@ ßÇ´€ xEÂ+walÉ(JNu€ @V PXÅKဠ@¦(L‰‘¾N(êämÚ @€Â'€@¾i! @ðŠ…WîÂØ’ P” œê @€¬@ °Š—Â!@€L P˜#} PÔÉÛ´€ „O"|ÓB@€ à ¯Ü…±%@ (8ÕA€ X%€@a/…C€ ˜@ 0%Fú:@ ¨“·i+ @ŸEø>¦…€ @À+^¹ cK&€@Q2pªƒ @°JÂ*^ ‡ @0%€@aJŒôu"€@Q'oÓV@€ >Šð}L !@€€W(¼rÆ–L¢dàT@€ `•…U¼@€ `J”éëD¢NÞ¦­€ @ |áû˜B€ ¯ Pxå.Œ-™EÉÀ©€ @À* «x)€ @À”@Нs¶¸êÚ;ZýßÔëÅð¡ƒM1j¥ïÖ}DKº®ž(Æí¿—VÞ*Ýxë=NÛWŠ2(S @€@Y(Ê"M=€ @Z((â剧ž;ìüUÑ{£ Ä¢¹÷jõ£P!P„êYÚ@€êI¢ž~§Õ€ @ÀYÍsèÈqbƬ9_!P@áìÐ…a€ @ Pd€F@€ {((¢ÞõÃó®§ýpRWgC @ °7òP2 @€@(ª N€ @‰(Ê(\ï†íb…ë½û @€L P˜Ð"- @€€uQ'C @ °>àP @€@¥(*ÅOå€ @qÉã;(3!@€B"€@’7i  @€ECM€ @Ð €@¡‰$€ @å@ ðG ˆÁï%7\=QŒÛ¯Ì‡#žÚѱƒ"sw"# @€€ƒ(t &A€ :@ (O èÖ}DKWÓös¬¸çþéÚÝtû퉧¦Ý¨•þëÇœ-®º¶•A§Œÿ7õz1|è`­²CH„@‚i @€@D‚¾@€ à ·Š>÷‹_]š©Ïèˆ Ñ"Pdêzd‚ @p”…£ŽÁ,@€ PWî CGŽ3fÍÉÜ5{o´X4÷ÞŽù((2w02B€ xGÂ;—a0 @›…›Å·Þ#:ü”¶Î—t,TÒ1PgŸz”8u‰"ìŽÖA€ 4@  ?@€ 8EÂM".8èì†P‰&÷QpIvû£ÉON W@€ “EN€d‡ @(–…›EüQ¯Pí¤øèïku  ­ŽB"@€ o Pxë: ‡ @a@ K hÞE¡+LD="ÌQŽVA€ D(è € @N@ ðC Ð9â)oÇB @ ÈÛ‡È@€ à6 ·ýƒu€ @ v(Ü(’.½Nº$»ˆŽ‹@@QD?¢ @€ w P¸ë,ƒ @µ$€@á¦@¡ºð:ÞAM.ÀÖéÜ:ý„4€ @þ@ ð×wX@€‚$€@á¦@!;ÛБãÄŒYs´ûÝÿM½^ :X;}+îž|±vßC @ Ðî,$„ @ð’…—nÃh@€ . ÷ŠæÞgz쓉H@@îHGË @€$ ú @€€S(ü(š;Ï×9[\um«ýªÎuÃÕŸý÷Jíw©„€ @^@ ðÚ}@€Â#€@á¯@ï}î)¿º´­“n¿Ý ñÔ´S;/Ej'! @€€×(¼vÆC€ ð P„#PD½³[÷-U÷Òl ŠðF8Z@€ ÐL‚þ@€ à wŠH0ТŽäï?úûã©ý"µ“€ @Àk^»ã!@€@x(Ü(TÃÿM½^ :X«Þxë=â ÃOiI‹@¡…®-Ñ3Ÿ×ò»­÷¹9[Aä‚ @€€(p&@€ ¬$€@áž@ñÄSωvþjK7ÝkÔgÅÝ“/Öêºñ˳¹ƒB ›2Evvä„ @p…{>Á"@€ Pkî ²C9N̘5§¥oqè—Ä•—œÖ±¿ªvOèä“…fÝyò„@²wi @¨ŠúùœC€ § P¸)P¨ŽyŠ:RÒqO*QCæÑ9Þ)I 0Ù¹átGÏhEFpdƒ @p’…“nÁ(@€ P_­E=áìS§N8¢­¨nÝG´ü'Šqûï•Xe’à`bc’-ª2TGKÅÓ¥Ùlb›i(|ð6B€ è@ Ð%E:@€ R P¸+PÈÐgàžbñ«K3õÝ£š OELLF;– Â1‡` @€@.¹ð‘€ @ hn ÒßñK¯uú@Ò1P:yã;=šód=têt5 …«žÁ.@€ ,(²P# @€€5î ‘óU—X7wŒí·$žšvc!}eï1ÇŠ{îŸÞVV‘ub¨åB(,¦x@€ R P”Š›Ê @€ÒÔY HcÃß!€@A€ @‰EHÞ¤-€ @ 8‘&X#€@a -C€ T@¢èT @€ L‚ÞÏÇÃçµüqë}n @€¼%€@á­ë0€ „I"L¿Òªb°ƒ¢Ž”@€ à 7ü€€ @ÿ$€@AW€@2 z @€@H(Bò&m @@ À‰4Á kh)€ @ @§J@€ dôó-/¿ö¡˜ÿêrñÒkËÅ‚¥êd#M ~½Öýû¬'úo´nãß-úõ€ @€¬@ °†–‚!@€²@ ÈB­>yÞ[.ÄmÓß<û~}]aK?·}?1æ Åê«~¢B+¨€ @ T¡z–vA€ O Pxê¸ÌžñâbòÞK–±[¢Ü]Uôê±VC¤Ø~`¯2«¥.@€ @ ¨“i" @ð‰…OÞ*ÏV)N\qßÛåUHMmŽüâvˆô @€ B PŠ“Â @€ò@ ÈK0¼üòX§sn}«mçÄ'û¬/o¶‘è³Áz¢WÏuÂkx-Z´ôM±dÅÏs/¾*^^ôz‹r'ÅI‡î¸â¸§U*°Œ*!@€B$€@¢Wi @ð˜…Çγdúͼ#~¦õΉ‘CˆÛnj©FŠ•Ÿý’˜öä_Z`ì4dcqàîƒ@€ B P‚‘B @€Š"€@QÉ0Ê™·h¹¸ðδ4fä°ÍĈmú‡Ñ@Ç[ñøÓóÅ´§^l±ò¸±ÃÅýz8n9æA€ ø@Â/a# @¨Š9[£©>ýž¸uú»])å±N_5D#'IŠ"pÛý3[Ž{:`×AâóÃ6)ªxÊ @¨1Š;Ÿ¦C€  P¸è•êlºî¡wÄŸ_y¼Ó¨O ÛnÙ·:ƒjXóÓ/,÷?:§«åŸÞ¦¯8x¯©! š @€ P4Š¢‰R @€@.¹ð—ùœÛÞ –~ØÕ®±{_q)öºÁµÓåÉ‹³o¾û‰.ûõZGœtÈŽ.›Œm€ @ž@ ðÄQ˜ @€êB¢.žÖkç±W.kIøƒvÖËHªB ü÷ S[Ê»ø¸] -ŸÂ @€êI¢ž~§Õ€ @ÀYκ¦Ãl [õ]µ«MÏ/ü °ö]váDqѹg(ËûÞ÷Ïß>î”Âê’1n_1uʽ‰eÞ~ßãb›!ÃrÕ‰@‘ ™!@€ PÐ5 @€œ"€@á”;*7Æ–@_Ô/B H šaî¼ëžâªïÊÌwöÌ'Å~{Œ0ÊÀA‡‹‰\n”'JŒ@‘ ™ @€R PÐE @€œ"€@á”;*7Ɔ@¡ÚáW hÞa-Ëî†SÆCÜrÃÕ&Õt¥Ý°Wo1}Öã¼ÆÈÈ@€  ABI @€Ê#€@Qkj*Z øõä›Äø£nkz"«8a"Rä'¢ú²ˆ><-Ø@€ü#€@áŸÏ°€ M"h÷7®H"IœFe(þ}·áâ¹Ù3•íR•™$fèÔßÉþ &]'F9°ÍŽÏn×O¼¶dqÛïM˜B 0îºd€ @Ð €@¡‰$€ @åˆ åÕLM//|K¼¼ðM1ÿ•·ÄK¯¼ÙøyòÙ×D¯žkŠu×ZM,\ò1dðb³MÖú­ûñ¿+~Ö[g5kð.{ö¨–²¿sÐΙêJÛy #Ä+Nº ;í"l•-:‚J ÑÉ—$l˜ìÜ@ ÈÔíÈ@€ B‚.@€ à {îxÿƒ¢Ãü†øðVã_)H|ü»·Ä+þ®ú¬¶ê*b•nÝÄ»ï/Wþ] R¨Øl“uWò_)\|,`ȼy>EI»šíÊ"P¨ÊMÚÉg ): *‘AGœˆêÍ›"O/&/ @€@ ú @€€S(ò¹cÙ[ïwí~xiÅŽˆ•‚Ä›bÁâä+ùd[½«¬²ŠøðCõÑPYìÑ£‡4hØj«­Ä#Žl)FçФ‹«£Eý< õ‘1ñ݃·"îüýFMVíÂH:æ)¾Û"íž •!&õÅó#P¹–Ä€ @š(4A‘ € @Exï½÷)B¼ÿþûEV×UÖ€ÄÀ?ÿøÇ?ÄÔ©SÅüùó•u=ZœqÆbĈ2:þøãÅ\Жvƒ 6ßúÖ·DŸ>}Äœ9sÄóÏ?ßø÷Ïþsî6sÁ”–2² Í;Š(ŠØÑ ¶UßU[Ú—v‡E$T PäîZ@€ à œ€ € @axã7ºDˆh÷C´+âÅ_´ÒèÕV[­K€ˆ„ùï[lÑøýꫯ.üqqæ™gŠ_ÿú×J† "N?ýt±ÿþû·ýýÖ[oßþö·Å«¯¾Úö·ñãÇ‹óÏ?¿ë÷ï¾ûn—XÑ,\HcÑ¢EZíÏ+P¨ümîèÔи@‘µ˜ª;98âI‡i @€l@ °E–r!@€jA`áÂ… ".@Èß-^¼Ø ƒîÝ»w‰‘ð‰›m¶YbRT8묳Ä%—\¢L³Þzë5vLÈ>/¿ürC¤¸óÎ;Û’9R\vÙebèСËxýõ׻ċhÇEôoóVYŠoû}1zÌJ\(²Ü-¡ÛÉòKÅOº”I@€ `B„i!@€jI Šû z÷î­Ü !…y”’éç'?ùICœX¶l™2ë1ÇÓ'zõê¥]ô~ô#qÊ)§´¥—÷RH‘â›ßü¦vYÍ ,XÐuTÔŒ7µ”¡sÄSZ¥EñÅþ,‚êNˆ,夵Wþ]U—Én  ʤ @0%€@aJŒô€ @Ápá>ˆæã˜äË]E|ä‘Lò8§Y³f)‹‹ß3aZç”)S»)ž}öÙ¶¬‡vXC¨X{íµM‹íJì…´äuE 8eü7Ä-7\ÝeÛ†½z‹é³µSuäR–˶u*U]®{¼“,B‡2i @€L P˜#= @€€—\¹¢ùH&y„­Ož{&Lmzûí·"Å5×\Ó–uðàÁ ‘b·Ýv3-¶‘ÞUBµ Ãôâj•h`C ˆ‹)’«Éî ™"S÷% @€@  º @€@0ª¼"~„ÜÑé>[Ћºg"‹}?ûÙÏBŇ~Ø–}âĉâä“O6.ÖUB6$~Áµüî®Õî ™¿hBUO–ÝÆ]— € @(4 ‘€ @À!Üa‹¦{&Lm}ê©§ÄÑG-yä‘¶¬ò8)¹›¢ÿþÚź,P¨v&èˆIâDÑER=·ß÷¸ØfÈ0mÈ„F¸H @€  IBÉ @€Ê!ò}¶Ú¾g"‹Ý'œp‚8ÿüóÛ²ÊK¸¥HqÀhë²@!ðÙíú‰×–,nkKÒeת]Í™‹ÚA‘$N\0é:1zÌZì›!P## @€€ H$ @(–@Ýîƒ(–ÞÊÒʼg"K¤p"|’ÇNÅ?ãÇW ñt® ³g>)öÛcD<㜶¶cËeÛI†II;;LïÈ@ 0¡NZ@€ ,(²P# @€@*ªîƒPÝQÕ}©2&¨òž S“_~ù寑O¿úÕ¯Ú²Ž9R\zé¥bذäã†\(d£Tf§qŠvJÄÅ„¼ÅãöS§ÜÛV}qBÆŠ4òw@€ ,(²P# @€@ƒ÷A”ß\¸g"K«Ï9çå%Ùݺukùô­o}KY¬EdxÒqOñ†pÐábâ—7~šÿfÊùßw.ž›=³-[Öcš B 0õé!@€t PèP"  @¨)îƒpÇñ.Þ3aJgÊ”)#Ÿž}öÙ¶¬‡vXC¨X{íµ[þæ“@ž$T¨v1ÄE…¬;’î¶(BœíB 0íí¤‡ @Ð!€@¡C‰4€ @ `.Ü?–iõÕW˜¸YÓ\¿g¬5B¼ýöÛ ‘âšk®iË:xðà†H±Ûn»uýÍG„I\Ì0:ÝñüÂLL阢0”@€ ÐD‚î@€j@ é>ˆyóæ‰E‹Y!н{wQ‡û ¬À[Q¨O÷Ldapùå—7„ŠåË—·eÿá(N9唯ïC(T₉¨t÷ņ½z‹é³dqKbž¸@1¬û\1hÐ ±ÕV[‰¾}ûZ…A€ Ô‡E}|MK!@œ÷A„ã`_ï™0õÀŒ3"Å#<Ò–uôèÑÝçÝ:·åoß9hgÓjÚÒ«öM„Üü³€øÙÑÅÙ:å'‰y/ÙNª;.P\2~×®¤ë¯¿~C¬ˆ‹æ{ôè¡ÓÒ@€ Ô”EMO³!@ð€k÷AÈÝ,>ÛB¸g" ‘N8AœþùmY{õê%<ù' Õî‡Ûï{\l3d˜6‚øñNºd'‰ºùµ lJØI èTÞÆo¬.¤ˆÁQnY :BÅó Í/F ÐëOò¾ •paë¾ •pÁ}z¾" @ˆ PÐ @ð‚÷Axá¦ÚÉ=ÅwEñV†_¢+;(²–÷]¨„ ù»Å‹g)²cy_ÜmѼã‚û. ÇL€ B"GÒ @€@¸"/Ö³ Ü3aÏïöØš”ì³@Ñ©K—.mÜu¡º°{Ù²e&ˆRÓ®±ÆJáB Üw‘Š€ J"PÇÒ,@€€«¸ÂUÏ`WVÜ3‘•œ^> =N¶S…*Ptâ¿ï¢yƘÕÕ©®õ×_¿mÇE´ ƒ#m÷nʇ @ JUÒ§n@€@€\¸¢ù.yÔ‹;v4šÄ=å8¢ÎiµÔQ èĤù¾‹fáÂö}ñ{/V[mµ4×ñw@€ à4 §Ýƒq€ 7 D÷AÌ›7OÌ;·åçÅ_´b´\„‘bC\|¿“?«¯¾º•z)qÜ3QnŸ8ç –¼ÕU騽‡‹>¬[®5¯mÑÒ7ÅÍw?ÑE¡_¯uÄI‡ìXs*êæ7ßw¿÷Âæ}qábóÍ7Ç?€ @À ^¸ #!@åà>ˆò™S£Û¸g¢ÿ\wϳâ³vU>êSƒÄ¶[ö­Æ˜šÖúô ÅýÎéjý§·í+Þó_jJ#{³«¸ï".\pßEvÿ‘€ ;(ìp¥T@€€\ºBîŒà’P/ºM-䞉êÜþГ·LY¹8þÉ>ë‹/RA5¬ù¶ûgŠ—½ÞÕòýwÙR|aø'kHÂ^““î»»0–/_^hÅò¾ •p!×½{÷Bë¢0@€ F"‡ xL€û ˆÚu³Ú5ø±Çgu–øõ¯­lû!CÄé§Ÿ.ößÿÚ±©¢Á7ýî9ñÈÌWZª9l31b›þU˜S›:z¾˜öTë½BŸÛ¾Ÿ;j«Ú0p¹¡ªû.¢{/ʸï".¸ïÂå^‚m€ ÷ P¸ï#,„ @ Ü3QgÓZÞûàCqε‰%o¼Ý’U÷4xÀF¢×ŠK³¹8Û”ª:½¼{ÉŠŸç^|µåX'™ºWµÄI‡î(V_u•b*£k¢û.âuÏ™3G,[¶¬Ðz×Xc åŽ î»(3…A€‚%€@¬ki øFÀ•û ä]ä>ß:öæ&À=¹Z-`ÆÜ%âŠ_ͲZ…w&pä·Ûì&Ï Èû.TÂ…íû.â÷^pß…ç ó!@@ ($Å@€ÒpD!þjpÏD5ܳÔ*EŠÉÎmÛI‘¥,òè;'Æ|a â„>2oSÊÑO$bؾïBua7÷]xÛ…0€ `L @ɸ‚ÞpÏ„?¾j¶T÷4ùÁÄÃ3øÙϬÞiÈÆâË»l¹âX§Oxf9æIàí·ßV R̰uß…J¸à¾‹"½JY€ 7 P¸á¬€ p„GÎÂT(pÏDÝbÞ‚7ÄK‹–‰—^}³ñï‚%o…ѰŠ[ѯ×:¢ïõĦ}֟콮آ_Š-¢z× Äï»hÞyñæ›oj¾¼ïB%\Èßõéӧк( € r P”ÙZ @À3Üá™Ã0š¸gBT’Íœ9S}ôÑbêÔ©m­Þwß}Åe—]& P#"4Åh¾ï"~ïÅòåË‹«hEI={öl\Ö-"#ú—û. EMa€ B PŠ“Â @ÀÜá‹§°Åàž‰b8†\Ê„ Äüã¶&n°Á ‘bìØ±¥4ÿoû›X¶l™ØtÓMK©J Pø]òÿ˺ï¢y÷]TÕ¨€ ð1 z Kàïÿ»˜;w®òçÅ_´Ònù’;pà@±Å[4þÿ¬¾úêVê¥P@@M€{&è&n»í6ñío[È£üâŸï}ï{â /4).SÚGyDÜtÓMââ‹/ΔŸLð@tßE|ÇE÷]4 Üwá{OÂ~@ð……/žÂN@Pà>:Ü#°`Áѯ_¿J 㞉Jñ{]¹ì¿R¤¸ãŽ;ÚÚñéOº±›b‡v°ÖÆ+®¸B|ãßÏ=÷\ã˜>€ÀJò¾ •p!g󾋸½ÜwA¯„ G¢8–”@–¸pD󎈾}ûZj)ÅB ãÆ7ÞxceញÊÐUñ¹çž+N<ñDe›.½ôÒ†ˆaã3~üøÆN#Ž8B\yå•6ª LIàå—_ñc£"1ÃÖ}ª »¹ï"ÈîE£ @À" ‹p)€ô4ß1oÞ¼¶#™Þÿ}½‚ SÉKO£#˜âG2õèÑð4’C’À5×\#þã?þCÈÅ UVY¥T(Ü3Q*îZTöàƒ6„ˆÙ³g·µ÷Ciì¦XwÝu e±÷Þ{‹{î¹§Qæ“O>)†Zhù:H.lù)wª„ ù»UW]µŽøi3 @ # : ”B€û JÁL%¨”À+¯¼"¶Ûn;!àý®¿þú¥ØÃ=¥`®m%ï¾ûnC¤¸úê«Ûl¹å–bÒ¤Ib÷Ýw/Œ¼û¥—^j”wÐA‰ë¯¿¾°²)h%¿ï¢ùø¨Å‹ŽKŽƒ jüD"†üw³Í6+¼. „ øBÂOa' p„NÂDX$ S££ä·RåB«Í÷LؤKÙqò¸%)T¨võuÖYâ´ÓNË íõ×_={öl)gúôéâ3ŸùLî²)0#Ð|ßEüÞ‹¢ï»XsÍ5•Â…2¸ïÂÌo¤† ÿ Pøç3,† P)?•CÀY?ÿùÏÅ×¾öµ.ûfÎœÙØMaëÃ=¶ÈRn'³fÍjˆS§NmK¶ï¾û6Ž|’ÇfýL›6Mì´ÓN-ÙÇŒ#n»í¶¬E’°@ ºïBua7÷]XN‘€ 4Š ÝKã ˜ˆîƒPÝ1wî\å7GÍkiÏÝ¿ BÞÁ}E¦ Ø#°páB±í¶Û6ŽvŠ>?üpÛBkpÏD)#/ &ˆÿøÇmÅÈÝòȧ±cÇfªBîÒ8òÈ#Ûòþþ÷¿»îºk¦2É”K@Þw¡.¸ï¢\?P øCÂ_a) ÂpDa()XA ùh§È]wÝ%öÙgŸÂøpÏDa()¨ “'Onì¦]üóÝï~W\tÑEÆ5üñâ‚ .hË'Ÿ%ùLñü% ï»P òwòÈ¢?ÜwQ4Qʃ [(l‘¥\@pé>¹+bóÍ7¯˜ÕC6ÄvŠê¸é¦›2‹¼ÙN¹hsæ™gŠK/½Tiþzë­'Î8ã !vù@ lòbx)RÜ~ûímUúÓŸnù´Ã;h›%…ˆ»ï¾[™þ7¿ùÇHñÂ#ðÚk¯ ¹ó¢y÷E$fpßExþ¦E€ ÐJ‚@Àcñû še’»$l|z÷î-ä±KÑOó‘L}ûöµQ%eBŽß—÷LÈ…•øçg?û™øÆ7¾‘Ërî™È…Ì%8÷Üsʼn'ž¨¬QŠkRÄÐùÈãçÏŸ¯LºË.»ˆ)S¦èC@ ñû.šwaؾïb«­¶êº¼»{÷îQ¥)€ à —¼-€bä}IwApݨšÀW¿úUqà 7(ÍçóŸp ™L䞉LØÈT1|P}ôÑâé§Ÿn³äàƒnì¦;~’>o¼ñ†Xýõ;¶â—¿ü¥øò—¿\qK©p…€jÇ…üíû.š… ùß«®ºª+H°€<$€@á¡Ó0¹žä·Sÿñˆþýû×@À­æ>ˆ€KÓ 0k¯½VvØa‰-<í´ÓÄYgeD€{&Œp‘ØAò‹r·ÄUW]Õf<^Š{챇ÒòéÓ§‹‘#Gvl•<6êøƒƒ-Ç$@À%Í÷]Äï½°yßE\¸Øl³Í\‚-)äHãLJ Ò P”Žœ !`Nà–[n“&M¿ÿýïÍ3“à ®ÜÇÄ}Nt Œ€€·ä˜&vZ²dIb¾óïˆÿú¯ÿÒj#÷Lha"‘G®¼òʆPñþûï·Y-…;)àÅ?W_}µ8âˆ#R[yÝu× ¹{‰ ,¢û.TvÛºï".\ 4HôéÓ'‹ùä±H@W8mÚ4ñßÿýßB9È€@Y(Ê"M=È@@~ ï»ßý®øéOÚŽ=öØ ¥¥,Í÷AÄeâ>ˆ²¼@=€@:íÕÿµ¯}MüÏÿüOª9Ü3‘Šˆž˜5kVãȧ‡z¨­ò2l¹›¢ùÛÅòH´óÏ??µµÛo¿½xê©§RÓ‘€€)yß…J¸¿ûðÃM‹ë˜¾gÏžB%\Èßu:¯P#(¬…À³Ï>+¶ÞzkÑ£GqñÅ‹C9B€J!€@Q f*€9»ï¾»!NÈ`P~d°¸É&›˜DŽÂÈoAÊ{’~Tß’,¢rùíy!uóeÔÑÕ2xä@ LiG;E¶ÈsòåyùIî™(ÓkÔU%ïÿûâ¼óÎk3A.ÎI‘âÀlümß}÷¿ýíoµL½âŠ+Ä׿þu­´$‚ P$ábþüùEßR†|ï•»,äO$bDÿrßEá¸[ üÔ§>%ä‘›ò#wõÉ/J®½öÚv+¥t@ ö(jßà" &y¹hôÙ}÷ÝÅ}÷Ý碩ÁÙÄ}Á¹”AÐ9Ú)ªnÔ¨Qâw¿û][íÜ3Q C(Ê“'Onù´páÂ6›åR.ºè"!_”»1u>r¡î¹çžÓIJ@V È{åÅܪ »Ë¸ï".¸ï¢7Oœ8Qœzê©]…I‘HŠ{ï½w1P   º"ðÈ#4vMüéOj±êÒK/m¼Ôò)†÷AÑR ú8øàƒÅõ×_¯ÕðwÜQ<úè£]i¹gB ‰&ðÊ+¯4â¹Ûo¿½­•ÿú¯ÿÚÿ¥¡àøÏ4Bü¨š@ü¾‹æ]¶ï»h>>Šû.ô{ÂÓO?ݸg,þ9ñÄÅ9眣_)!@ 0€ERØ$pöÙg‹ÓO?]YÅ‚ ÄÆol³úàʎß!gâ>ˆàÜMƒ üïÿþ¯8ôÐCµkåýôïß_Ø8Z%¯]ä‡  C ªû.â÷^pßE»·vØañÄO´ýaäÈ‘ÝRTç@ HEÒ¤,d /9üÞ÷¾'xàeî=÷ÜSÜsÏ=J; ÷A„í_Z¸E`ñâÅoÓ™Õзo߯‹R˜—«>£Gn#FŒp«ÁXËäÅÙr7…ü¦jž;þó?ÿ3Oä… à*î»P]Ø]×û.dìöƒü ±_œþùbüøñÎõ ‚ü%€@á¯ï°<^xaêÄþÓŸþT|ó›ß  µæMpí>yIõk¬aÞr@ðœÀ!‡"®»î:£V|âŸË—/Wæ2dHc×àþûïoT&‰!÷Þ{OŒ3FÜu×]™›%w½ôÒKbÍ5×Ì\!øB Šû.TÂEè÷]È/Q6¬c·øÒ—¾ÔØM±é¦›úÒ}°p˜…ÃÎÁ´p Èc‡ä®‰;ï¼3µ‘ò¾„Þ½{§¦ó5ê>ˆèX&ù7¹wàÀ-?R|¿“Tò •LvêÄNŽ¿rÇÄñÇbÔšÀÿøGñãÿXüò—¿ÌÍA~Ó5é˜ÐÜ…S O”}ß…J¸¿ åÝ}ûí·3gÎìèýõ×_¿±[VÞQÆ€@yè‘\yå• qâ­·ÞJͽÏ>ûäúV]j%%÷A¨î‚à>ˆ’@5€2Èr´SRUÜ3‘Ñ d Š€\ì‘Ç2™îHêauÖiì¢èÙ³gP¬h  ¢4ßw?>êÃ?,ªšF9l°4hPã'1¢}ºïB ßòžLÏ׿þõÆnеÖZK'9i ´@  S@ $òÜîï~÷»âÆoÔ®ñòË/Gy¤vúªrDUä©€€]YŽvŠ[$ ‘/­ãƳk,¥CÀaòKR˜±Ï‰'ž(Î9çES&   D‚E\¸˜?~áíÞd“M”Â…3\»ïâOú“ÑaR„‘ñÞ^{íU87 „Â'€@¾i¡n¾ùæ†8azdÑ’%KĆnè@ „à>'Ü€€J# ¿á-Š¢>\æ[IÊñ‰À+¯¼Ò&.ºè"«fË;_äbZ¿~ý¬ÖCá€êB ºïBua·üòaÑŸ-·Ü²mÇ….ª¼ïb›m¶Ï<óŒQSO:é$ñ£ýÈ(‰! PÐ `‘ÀÛo¿Ý8Î)Ë·åF­uGE‘æÇïƒh>–ÉT\ѵ‹û tI‘€@yä‹÷vÛm'äOE~F%.¸à!Ï5æ ¼þúë;&¤8ñÁ”ÒTùeÛBH) ¡@Ž÷]¨„ ù;£œMš·æšk*…‹2î»8ùä“3íÎÛi§»)vØa“¦’¨1Š;Ÿ¦Û%ð›ßü¦!N¼ð ™*ºêª«Äᇞ)o§LÜQ8R „ C=TÈ˱m}ØMa‹,åVMà½÷ÞkˆRœ»OËþȸsàÀeWK}€ ðOòN 9sæ4~âÇGÙºïBuaw÷]üñŸùÌg2ûV~)å¸ãŽËœŸŒ€@} PÔÇ×´´D'œp‚8ÿüósÕ¸téÒL—rD.ìd† P{×_½8øàƒ­sØ}÷Ýs%»)¬£¦‚’È…)NØÚuªÓŒo~ó›â§?ý©NRÒ@€@‰>úè£DáÂÖ}*áBþN ¨ûÚ) ŒÜM!¿}.Å>ð‰À}÷Ý'fÏžÝø‘‰ÊåÙäU~äsüóŸÿ¼J¨€ "Ð|ßEüÞ‹¢ï»Xk­µÚ„‹wß}WuÔQ…´æÈ#lì¦÷jð ÐL‚þœän y)áÔ©Ss–´2» –-[VXyÍõîÝ»q6qô³Å[týwß¾}­ÔI¡€ à>²ŽvêDâ'?ù‰8þøã݇……è@à¯ýk—XÑ,\,Y²¤4n=ö˜1bDiõQ ”O ºïBuawÑ÷]Õ:yd”)öÜsÏ¢Š¤@ 8‘&TG@žíâ±rk¦ šÅ‡HèÑ£GuÀ¨€œ$ N·Ûn;k;ôL½Ç{4î¦`7… 5Òú@à•W^iÛm! ÇwpÀâ¿ø…X°€ &Ýw¡.lÜw‘Åü“O>YLœ81KVò@@ Щ4É>9sæ4뮻ìW¦¨aµÕVS ‘ ±ÆkTb•B€€Ÿ;ì0qíµ×:e<»)œrÆX$°pá–#¢¢]yô”w£í¼óÎ-§h@ð€¼ïB%\Èß•¹ÓOrûÜç>רM1|øpß0b/ P0Š‚R\ø.¿üòÆ‘Nï¼óŽÕÆr„U¼@ÿ$pà 78si¡<û¸{÷îBîö“ÿÊ]ãÇoìîàºEt¯EóQQRÐÐùŒ=ZÜyç:II@hò˘ò'1¤Ø­;ïdExá…6¾Ê¨/n³ïúÊGõm>-‡€>%{G\ú«µÄÍ7߬Ÿ)cÊž={ŠY³f‰~ýúe,l<óÛ±‚ zê²ÆâÞO¹³/šÕïšÿÿïÕW_¿@)/^Ü%\4 ò©øçŠ~^ì´wŒÑ© zØzûïð!~àÄ®»îZJÓÆŒ#¾3æѧ×Z¥ÔG%ð@hã…ï=ûK!ðëæ‹ÿ÷³'ÄÒ7Þ-¥>Y gÛC@a-%C~8÷æµÄwÜ¡: qáAî|àTK@~óUî´ˆv[<6õ㹫&~¡Zè€@ÅB[È+§¼ÇbèСbæÌ™eTר£Çz«‹S¾5\ŒÞu@iuR|%Ú¸†@ákOÄîR <3÷oâ¾i=%/}e}&Mš$¾õ­o•U]mêA ¨«i( B ´À‡C+ DñÎëO¬ßÝIô @ ¾ˆwÌ}Ì1LjK/½ÔŒ{uí ´õ#6¯Õ-†@¸ˆwÂõ--ƒ: ÞÉÞCä=Gòh§¼÷“%YðÙÏ~¶!HHabðàÁ-ɘ·²ûœá}\C ¿ÓÂL€‡z¨K¬˜?~ŽZÛ³þä'?Ç|¡eÖ¹0Ÿ:{Ÿ¶C ÞLæµz“¢õðŸñŽÿ>¤€@6Ä;Ù¸É\cÇŽ¿øÅ/² ÈùùϾ!JÈŸ&–ͼU(v Œ@èãE`–æK ë0eÊ”ÆPògÁ‚…õÄOy.#Ÿü|ò3¤@ÀOYç5?[‹Õ¨7âzûŸÖC Îˆw²yÿ²Ë.G}t¶Ì±\»í¶[×ñM›nº©V™Ì[Z˜HTS¡k5íØ4[@À}÷Ý×%VÈí’Y?_øÂÄ<5;ùšÀ™¼H IDATøÐ º(b^«+;Ú ßïøæ1ì…Š"@¼cNòé§Ÿní´|ùróÌÿ̱×^{uí”èׯŸq9Ì[ÆÈÈP#¡k5êÌ4Õœ@ÑÀoûÛ.±béÒ¥Æyæ™âôÓO7ÎG†V>ô@ ®Šž×êÊ‘vCÀÄ;>x !ˆwÌ©Êò$ÓÏ¿ýÛ¿uí”Øh£L³·¤gÞÊ…Ì}\C ¼Ó¼|lwÞygC¬˜®!PÔ®KÓ`eò¼ÇH¨ÿ¾ûî»Js'L˜ Î=÷\“¦¶‰Ý¨+²çµºr¦ÝpñŽ ^À@  Ä;zÔ?úè£Æ½3gÎl˰öÚk·ì”XsÍ5õ Í‘Šy+<²O ôq "ø.Ló¨râD$VÜzë­BÍŸ{î¹Gì¹çžyšWÛ¼>µu= ‡@í T9¯Õ> P2â’S à â=WsÌ1âÒK/íJܽ{÷–ŸøÄ'ô *(óVA )&H¡kAv[UWyìS$VÈã ägë­·3fÌ«®ºjQÍ­M9>µq5 …b\™×p  `ŸñŽ}ÆÔ¸I€x'Ý/·Ür‹øÊW¾"6Üpî_üâÓ3ZLÁ¼e.E{O ôq Âû.JlpqjK±Bþ 0@\rÉ%6Y6On¥Q€€ç5 ³Id @¼“Y ïtvãâÅ‹Åé§ŸÞ&öÙgg|μåŒ+0ÄA¡kv:Lr‡€ëÀÒ¥K…¼·b£6rš–øxà$L„¬p}^³Òh …@M ïÔÔñ4Ä;~væ-?ý†Õå}\C (§Q‹§B®y%Ptë>¢­¿ýßÔëÅð¡ƒ+í‡q»n¸z¢·ÿ^•ÚTfåñöŸ}êQâÔ G”i‚µºB¬s¼à*Ÿžw•8퇓Œ …ô\7ž ÞÈ;Þxë=çϼå{Ò¢¡ÌkáR4#@¼ã˜C0'yãâû]xÇ>c5T9oemë”Yɑϔ@èãš7EÒ¢Þ‡~I\yÉi¦~-4}Þ¥Pc*( ¢èT™‹@|9èðSrÙ-3ô÷Çs—A°E ë|øÄSωvþªè½ÑbÑÜ{ÍËZ¾­öúXnè­>ÁfØ"@¼c‹,åÖ@Öx„x§¼žC¼Së"kªbÞÊc?ë”yè‘×”@èãš7ÅБãÄŒYsÚü—¶˜aêð,é³(Yêr1…‹^Á¦NÊ|ös¬¸çþé…9Å…c…5†‚‚"e>lžßÓæô,帀Ƅ؀ˆ"  â`\IC#%!Þ)׉Ä;åò.ª¶²ç­¼v³N™— ùM„>®y!PDß4Hr\ÕGŸd PL:¡ëi(\÷öÅ ”ø$-‘MvD¨¶‹Fù)è×.0ãß:B °ïÕÐ[û©þ ÞñÇWXêâ÷ýE¼ã¾T–9oå%Ä:e^‚ä7%ú¸æ…@¿HV^Û¼›‚‹dM»u±é(ŠåIiö ”ø$íœ0³TBEÚB®}ŠÔüLŠü5RBè-†V Þ¡7@À Ä;åûx§|æEÔXÖ¼U„­¬SA‘2L„>®y!Pô¸§XüêÒ.¿ÉK¨ãg¹ómb“n]lZŠbyRš}e>IçQÊñkÜþ{7RµÃ…;xŒB4à…½üîz`[>Qj„€»ˆwÜõ –Õ‹ñNùþ&Þ)Ÿy5–1oa§,ƒuÊ¢HRŽ.ÐÇ5çŠøÅ²Ñ·†ã‹u,ÔévéâÓ!PÏ”í(#ðQízÈ*ND4Te"ÎÚí+”n—/ìvùªJ=°-Ÿ(5BÀ]Ä;îúËêE€x§|ï”ϼˆ˘·Š°“uÊ"(R†)ÐÇ5çŠø){ú¬¸{òÅ‚IÞ´+ÛK@a-%Û!`;ð‰o÷”­(BDUíÊ(¢\;”)é˜ËÓ"ôÀ¶h^”Ÿ ïøì=l‰ñNùÞ$Þ)Ÿy5Úž·Š°Q–Á:eQ$)Ç„@èãšóE§ ¨BYO: &ê¨y¿umÒ᳤MóCÒ9ü>,¬†>dñwyl>ª.Ã6aßJšõ.Šø·>â6eo¼Ü<ãAÒ…ãgŸz”8uÂ&µÓ&ÕYôøU–?\›oªza/‹·ª£©Æ™Îf?n¶ƒyMûñ'!¼'@¼#DUã=ñŽúñ)ËÄ;ó/‹7ñŽ÷Ó…3 °=oÕPÖ)…p}2ÉׯEÓ»B‹êOºå„þç´@¡ ,šÍâ GiIµÐdr<Šê[ÑrAá´NjéOºjRàšÔ9ÓÚשSµ¨©|D‹,iaTV´+F÷¡,3]è@™,]ªËfàc{—C¼|ÓqÁtÜÑÏ"ÿ6kÍâ‰jüTõ U}º6›Œç*{åï¢9F×Þ¼ã—nÛ"{Mýå3­Ç´_%ñTÙ«Ë6*³Ù¯^tÆS¦¼ãå7çO&âvÛ*˜×tz i âéÚŽ4ï‰w´Ñ6Úž‰wZýa›7ñŽYÿ'µ>›ó–¾S²NÙÊ'ë{£,Åæ:eÜ‹¦ã¢­/læé‡¡¿Ç9-PÄ…øBj`è´@õÄSωvþjKÐýö±Je‹Ä, &ñѤ“fY„³ùà«vP<<ýIqÏýú/Í ƒ&,l§ }°ÍÏÕòm>y…P›ÌtHã6˜ªö¤]Imm^$0+MÇÇ8ˆ˜/Yǯ2ü¡ üLúX^ž. eðV½°KÞ~Š v‘WøêTóš‘+H ¯ x'W–›2æ_╾+ƒ7ñNú³BŠllÎ[Ù,jÏÅ:¥š¤é{£jÜÖ]Ÿ5õeÖqÑöÇLÛú{œ³…JLP-tÄ;ZÚ±Y¿Ý¬Z$‹4SÂt±.Þi³<´e 2¸Ÿ1kŽé³ÖHŸ%èÍT‘f¦ÐM Á%³øÄǃ,ÏkÑÀUã©iº/íqbøöƒÅʈY–±Ò”·J`ïŠÓaeRo™þȰ¹½&í’ùtæÃ²wP”É;þ®Úe©ÓŸdÓoóê–˼¦KŠtðŸñ޹‰w:33‰ Êœ‰w„(“7ñŽùØB=6ç-= :§b2™Éü•bs2ª#«8å·õN–¥?†þç¬@_ÀHêìñÉQç¡0ý†sÒÑNÑ™ç: 2QçK:ï,©Ó'}“×Tɳùà§=ðªo‚&µËæ·F²3­ÀG°¸Ð§“žÑ$ÛLÓ7÷’´ñ .B&‰q¡Sµ`4Ž˜iöªD珞`]_›ò5M_å|£3–-P˜ò3MßÜÿÓvߨúHÒqˆº‹d¦£tè­)ÒC dÄ;{—x§ý ñ~O¼cvñα¿€D¼òÌb·m¶æ­¢¬fòc’>¬SJ;MÞÛ“ÞÉtÖ˜‹ê_iå„þç¬@_POÚ¡ZÜJ[¼W-"&-¨ÊO¸:JÔѲ*Ž—JëÀeü½Ó_§Ißò0Y\´Ý¾ÐÛü\-ßVࣚÔÒÆ#ÛŒ’‚‡´ÝJ¦bjÔŽ¤ñ Óî¶´ ü:mͺ+.Í^ùwãW™þ¨b¾1™%cÓK²MË/“w§`]'°U=;Y¶J§)Ìki„ø;Â!@¼“¾;›x‡x'zâM¾`ï¬W‰w™cl´ÄÖ¼U”­¬S®$éú:e’à¶¢:=§ê5ˆzèïqN ªŽÞé%=Þt‚ ÝE­NG;%-puZ`—§ÛÑU‹,iVQƒpZ9I ’:ö™ˆEivØø{è€ f>”i+ðQ}3¼JÁ-Ͻ;ÒY‚Õx ³³ Ïnˆ,s@'Bg1Yæ7Ý1S¶?ª˜o\za/›w'Bg>,Kàd^óa–ÂFC€xç^-Ä;1ï´ó!ÞQï  ÞÑrHÔ€­y«è¬S¶StyRg-WÕ/\^£ ý=ÎI"ÞÉÓÕ¢ Î·ÓŽzR=l*AÁ$@ɺ`Ô<êLüE Àºe¨$u…YGVÿéÚ—']è@6>çµø¸Ö—‹LLÛ”õ[QªÅÙ´±?ꃺ[mU}Ve¯‰¨¤â“4F—í*æ“ùPúÃæ7 Ëæ$P˜Ì‡q~i÷je‡™×²P#ü$@¼³—¶ãˆw:£"ÞiåC¼Ó.Pïh7$ôT `²Ýq®®Sf9i§¹u® /¡¿Ç9)PÄ'ü´ÉN¥pé¾ÔÇëŠÄtŽvŠ:°I€bó.ˆªfºxûu¿}Ül¯©ÏËjkè@Y]«ÇÖ »j"ÓKmñ‰‹°º þÍö˜Ž¯IcjZUc®î8nºÈÝiì1e¤â“4g•í*æ“ùж@Q6ï$Âd ˆÛ¬³û(íÙŠÿyÍ”é!à/â}ßïtfE¼ã®@A¼£î»Ä;úãŸK)mÍ[E´ÑtÍÊt^ÑyGeRÏ“y¾À(kpUx }\sN P}‹VgÇ€j7„N¾¤#.þÙÍbñ«K[zRy& 2yŽ1Ñ{ËOUÄ7>ËX”ÉB&ô “òØ |\(Lƒ¨$ßš<ŸYÇU§»“¡H"MW1ÒåS¶?ª˜oLæCÛEÙ¼U…©`÷E3m€@uˆwÌØëÎç²TâýËdzÄV’±®?ˆwF´ttÛ¼‰wÌÆR›°5o™YÑžšuʼËͯ;”kUþÚB_ŸtN ÈÚ‘òœÝœÔ4wŸN½É‚LÚe°²NÓoðæïæùJ0iRMq…Ó¡ù<ïon[éñ6 ªüM¾ÉÝl›É7². ç±·H" #ñ+OûâýD×UÌ7¦ó©ït˯‚·ê…Ýt.C °9*R6êG€xÇÌçºó«J Ð]Î3?™Î™Í­ÏŸÄ;f}#žZ7‰ò™úN·ü<ý)k|I¼“¯ï»3[óV^î¬SúµN™õ¨ã¼ýÄvþÐ×'(T‹)ºß UŠ& ªsȣΕö FÝ"*OµÛ£SGÎ@Ú~0ŠxMƒ¶²ÚúPG×ê±øqæ}Q¬Tã©Î®2Uý&ß1£úò¼`å?âöfa¤SUþ({¾1õ¿»NóMRŒPï¼CÞü:ãóš%Ò@ Ä;f~$ÞéÌKgήjþ%ÞYé»,±¬ÌmÒÿóÆ+yóë<ÙÄ;:”ÜKckÞÊÓRÖ)Õô\^§4}'ÍÓ?ÊÌú¸æ”@¡³“ÁÔùºM§oš¦MòY:¿i%ÛmzT„)«¬é‹øFŽNÀ›Õ¾<ùBò°ñ9¯­ÀGØ›ˆ¬E2-òѤ¬,ã¡l· EÖ1Vgü2a˜ÖLË*s¾1õ¿»ª Þy_¸óæOë;òïÌk:”H0ï˜ùÑd¼7ï"Kˆwô}bâY*ñÎÇlÓÖ.’<`Â;o¼’7¿N/"ÞÑ¡ä^[óVž–²NÙ™^Öwè<>IË›uŽN+·ê¿‡>®9%PtÚŵ#è^²*ËW <:‹‹Y;¿Îñªv§íèÈÊ*k>ЬäÈW[ê™ÖÝò_4 Ó…ßNõ›¼°dCa¯Ê‘_ËšoLýoÊE·|Ór‹êÿy_¸óæ×GBluu!@¼cæiâμtæV4º^1ññÎÇò,šðίäͯӇˆwt(¹—ÆÖ¼•§¥¬SêÑsiR÷Q¯eî¤ }\sF P}ó¸¨n ó-‚²wPÄÛfú—~Ý]+Í Š f‹ê7²œÐ€"YùT–­ÀGµÈ^Õ³jò’‘æ;“²²¡ & ‹ô‡ª,›ó©ÿMÇ~Ýò«â÷…;oþ´¾Ã¼¦Cˆ4‡ñŽ™/MæÝù(nñ޾OLüA¼³’€ÎÚ‡Š— ï¼ñJÞü:½ˆ÷xJ5oem)ë”ãÄŒYs´ñUµö70ë­ÝЊ†>®9#P˜.˜˜ôo0—yEšíªsìUytvw¤ÕUÄßÙAQEÊ(“€ÍÀ'~!Sžo2%1‰ÆËNe›¼d¤±7YDÎ ðž慕7ñGZ©EÏ7¦þ7m‹nùUõÿ¼/Üyó§ù[þ=ôÀV‡i PÄ;fž6™“t磸Ä;ú>1ñGZ©Ä;i„Úï èôž‘7^É›?½5Ä;:Œ\LcsÞÊÒ^Ö)WR+zÍâݪñ$Ë®¢NÌâÏœê"Ï p¼n“ç3ëxÇÞ®9!PÄÕ­¼ßžW-à%-xwz˜âÓ]L;z)²ÏôèU»²|Û7Û#žœ+ÎÉÔª…É4†E·¢,¢nÔc;ðQ}› ¨gU%À&=/ñ´¦Ï¦ô–é7GL›{ƒ+…©8j2~Uá²çSÿÛ(ªàW`È›_g„ =°Õa@Ô…ñ޾§‰w:³"ÞiåC¼slË—9MðˆwôǦº¥´=o™ðdR–‹ë”qqÉtíU¶<îÓqNžYªÐßã*(Tß6Îú-ßfׯƒù·ø¡ê(–æ…>Õƒ¦²M7@Q•g²S¤È3µÍƒÎ©U¬MÚ¥òCQ‹¹yÛú—¯ùm>ªo[˜.|«ØªÆN“­ÊSñÏô›#ºãa¼}®ª¹¢S?_®ø£ªùÆÔÿ6Š*úÞî¼ùuÆdæ5J¤@ˆwöÒv$ñNgTÄ;î Ä;ê¾K¼£=ü9•Ðö¼¥ÛXÖ)¯ÇÖÂåâ:¥Ê&“u[þ×Ú!QèãZå…­óÚUå6‹ ºÇ°è,žë.ȨàLT8“sQóv|“ü*BW¡TùÁ„‰‰YÒ†>daBž2Õsa2)ª8«Êì$èªÆÝgSÖ¯z>Óva莇. ºcî<µµLT5ߘúߦ@Q&ïÈÇy†¼ùuÆgæ5J¤@ˆwîÕr$ñNgLÄ;í|ˆwØA¡5¸È˜@ó–ŽQ¬S~VÜ=ùbTÂÕuÊ<§Z¨N­páKÔ¡¿ÇU.Pä=(é‰I 4MÎIK³Ñ$@QutoV›†Z#IA‰T‹¦²è´…LU›d>“Ý5!±˜ÐÛü\-¿ŒÀGõ vÉ#«H¡zÎÒž1YŸ*¸’¿O›`“žÏ4ûMÆÃæþáÒ iWšH‘Ä'k™þ¨b¾1õ¿é7nLË/“·êyKëGñ1ÂÕY» à'ââ¨ç&½¯¥ÍSÄ;êgß4!ÞiåH¼ãçœR†ÕeÌ[:íH[Ô)C•†uʬäÌó™ž>Õ zL›+Í­Ë–#ôõÉJ Ý#”²¹®ýÌ0YŽ\ü>é—´]€ÝiÑ-ÍN“%iÑ2²MµJµÈ¤³À˜•›i¾¤€7*GÅV¥fÊô:b©}yÒ‡>äaãsÞ²Ÿ¤gWGXˆø&½v3â¾1})5Mß\ŸÉxØœÏ5"²M%˜&_ºÇšò5MÙ^Å|cêÓ]t¦åK¦üLÓ7÷ã¼/ÜyóëŒËÌk:”H0ï|ìGÝ{#¯ë,Dd™dùÄ;Åûƒxçãžk¿˜¦'Þ c^p½eÍ[8¤­ÿåe¨z—d2/Uuþ¤õÕºc§u—´/!Ú±¾½ÔÐßã*(lo›QǨê8:‹â*[£…+Ó5é!1éÔº aQ™ñAÐ䘗4»âí—Aý=÷OOËÖöw—ãBsf}ȉÇÛìe>I‹Ù¼¤É®ÓË–Ì›¶“¡Ù9ª—aSçéŠ*¦ãad‡«/캜LƯ2ýQö|cêÍ}Ý´ü¤Å ]¿Fétû^!o~v1¯éP"  @¼cîGÝñ>Ë|”4'éî7=±¹õi_(Ó%E¼ó1)Sÿï´ö0âÝ'®~éÊœ·’è²N™½ß¹´Nµ"ïüg²î’œ^ÎÐßã*(òž­ã>@@w¡^UVÀš(Òö´EËNíÓTâùË(ä|ÿŠ«®½CÇM4&Á®v¡$ }(‘—E”øäyÞU€³L’:ãa’3u_Ö³¼°Euº"PÈ ê–Û'fÌš£Ý·³Œ_eù£ìù&Ë|˜&¢4ºYÊ— Êâ÷…;o~N˼¦C‰4ƒñŽ™‰w:ó"ÞYÉ'KæB¼Æc£UÌ[Íí`Rh¯ã¹¾N™wÊ»îbåùxø¼–b·ÞçfÕTVfe…ÍófšŽIɢ𥠲nÓ®ÓYg²<“oó$õ$›¼Óô¤!SNU<%,äTAÝ~U>YwS-äuuw–©<•6$y×µöÈÎ$°ØòGœ·íù&«ÿU/«‘íÍs`žòucƒ<ý?ï wÞü:£'óš%Ò@ Ä;BØš_³ÎGÄ;W‰ø—û¢§-ÏüK¼£³lõÿ¼ñJÞü:#4ñŽ%÷ÒT9oI6×ÍtßEX§¼×jÇìô>\ÄÚ«MãC×*(l:²!PЀ¢8ùVNÕä¥säŒë¤o~×TŠ‚B`DÂ"À¼–?i : Þ¡Dâ‚ ñ}#tÄ;~zØ…yËOrX]¡kuèÅ´13ЀÌ`<ÏHàã¹ 4ŸöaR”˜×¼pFB Ä;…` ¢â ÜH# ïÀr()ó–CÎÀç„>®!P8×å0È%¡.±.ÓŸ2i»]/ìnûëŠ'À¼VdãyFÏX ù¼°“¢¼ À¼æ…›0… Þ)c…ïáFa@€xÇ–CI™·r¦8G ôq ¹.‡A.}p‰u™¶ø”IÛíºxawÛ?XW<æµâ™R"\%@¼ãªgÊ·‹x§|æÔX-âjùg­y++9òÕ@èãEÎ6Õ¶ IDATz1mÌL ô 3Ï3øxîÀÍç…½@˜åæ5/Ü„‘(„ñN!ƒ(„x'7ÒÄ;°Jʼå30Å9¡kÎu9? š·h¹xùµÅüW—‹—^[.,ýÐφ8fu¿^ëˆþ}Öý7Z·ñïýz8f¡Ÿæøøé7VóÂnƒj˜e2ÏÙñ+󜮔 I€x‡~ Þ¡/ä!@ ”‡^r^b v6Ì[vú¥v&Àg§‡˜ŽqvüP›Rß[.ÄmÓß<û~mÚ\eC?·}?1æ [ŠÕW]¥J3¼¯›ÀÇ{Ö^Ø ClAÌsåºöãynàŠyîåVLmñN€NÍØ$âŒàjž¨Ü@ „°^n£6Ƹrû@Ú‡@Q®?‚ªmÆ‹ˆÉxW,YÆn‰2Û«ÇZÅ›íö*³Ú êâ…=(wæj /ì¹ðŸ™y®3ÏUÃZÃ#@¼žO³¶ˆx'+¹úæ#ªÆ÷u˜·ªéwu¬•1®¯wã(ªñ‰÷µÊ‡ùŠûÞö¾>7àÈ/n‡H‘Ñ>Á˜öZP“˜ç ™£æ¹ðÈ ˆwèâú‚ b ZvÒÖ5bÞ²ÓŸ(µ•c\õ=B5Æ!PTïï,۠ιõ­¶Ÿì³¾¼ÙF¢Ïë‰^=×ñ®].¼hé›bÉŠŸç^|U¼¼èõ¥òxÒ¡;rÜSÇød€FÔˆó\yÎfž+55ÕñNý|N‹!—1P^‚úù;Ç@k®x×ÿTíÞõ™·ôû)³`ŒËÆ-K.Ó1" åšç¹ù‘wÄÃÏ´Þ91rè1bÛMkNÆnóŸý’˜öä_Z*ÙiÈÆâÀÝÛ­8ÀÒ |t*M‚@˜ç „iPóœ,’B@ƒñŽ$’@-”1аÍĈmúCÊ"U $Ïk;j+‹µºW4ó–{> Í"Ƹj<ª3Æ!PTãok•·Û_xç?ZìIÀRš?z¾˜öÔ‹-õ7v¸Ø¢_Òl¡"Ÿ¼H `‡óœ®º¥2Ïé’"Ò ï¤3" °’1Pµ½A?p±ùÆÝ«5¬ÄÚ™·J„]êãªuzÚ‡@Q­¼«ýÁ§ß·N·Ëny¬Ó—G ñ®>|Ûý3[Ž{:`×AâóÃ6ñ¹I¥ÛNàS:r*„€7˜çªwó\õ>À‚0ï„áGZ²•E:¹žºÇ@Ì[Õ÷Á-`Œ«Þ»Æ8Šêýã•×=ôŽøãó+wõ©AbÛ-ûzÕß}ú……âþGçt5ãÓÛôïõ/¾7«Tû |JÅMeðŠó\õîbž«ÞXâ0üH+ Pb ²H'×S÷ˆy«ú>²ŒqÕ{·Ó‡@Q½¼²àœÛÞ –~Øe󨽇¯¸{]¯Úà»±ò¢™›ï~¢«ýz­#N:dGß›Uªý>¥â¦2xE€y®zw1ÏUï,ƒñN~¤(‹1PY¤“ë‰Ç@›l´®8ñàÕV’Ì[%®i5ŒqÕ;¾Ó‡@Q½¼²àØ+—µØûƒvöÊþPŒý禮4åâãv ¥i¥´ƒÀ§ÌT/ 0Ϲá6æ97ü€~ ÞñÛX² •M\]_c æ-7ú`¨V0ƹáÙ¤1 ÿxc…ôãöS§Ü›ÈøöûÛ æ¤¡uZŠpO)aðqžÓñD|.t}îcžÓñ*i Йñ=0!àk ôëÉ7‰ñGœØÔç~`‚¡ò´uŽ˜·*ï~Aàã×i=ó{ß?S|û¸S¼ó…w.sÓ`_èÙ3Ÿûía¶ò€ƒ/¸ÜMð1«ê´á Ÿ"(RÂ$àËqÿ¾ÛpñÜì™ZŽÙy×=ÅU7Þ¥•Ö…D.x!|x M•f·lØ«·˜>kóžªsÐR„s|Š H“€óœ)ù­ú®Ú–”"é!àâÿ|†Å¨’€O1Éâ]3S¾”Xçw}æ­*G€ðëöaŒËòeëÈs®¿ßEv"P„ÿ¬•ÒB×è<âDБ¢ÎAKÀ§Š”0 ¸>Ï™ROzw=€ež3õ4é!ÐN€x‡^˜ð%Ê*ND,\)ê1o™<±¤5%àú—GœˆXøp¤…iÏ%½’€Ët§³'“¶s~v»~âµ%‹ÛÚêú©:-E<š>EP¤ „IÀåyΔøeN{†2…)MÒCÀ?Ä;þù ‹!P%b ¤Øfð6CÄ¿¢ Ÿj©LäòqOu~×gÞªr¿n×Ǹ$ñU%ª&­&….yW[ pÉhlqÀeÏÕbÔwÚÙ#U³ŽÐô`»¼xSç ¥ˆ|Š(“2 0¸<Ï™NûŽËsœl'󜉷I 5âz `BÀ‡HõÃ4±AuʂˋxuŽ˜·LžXÒšpyŒS‰¯:§»¨ÖAÓÆDSnE§G (šhMËsõV‰ :âD䯼ùËîuZŠ`MàSEÊ€@˜\çLi'íŒÊA 0%JzøG€xÇ?Ÿa1ª$àz ¤zgÿÞ÷Ïß>î”TlGŒÛWLroK:Wc¡:¿ë3o¥veä àò§tkŠï3Y Í3sVŠÌèÈØLÀÕZõ­݇9j_WÇ8ÕñN&ïjñü&âm= Q øhŧ ƒ¨ÓOÇ^ø@‹á®ÜAQD°¡R-M…2=Zç ¥LÎÔºuëÖR,Ó° ÊÙËtužÓmQ|>”ó˜ü Pè$ A€x§Šå–áz „@Qn 6„FÀõ1.LŠÐz*íÑ"àú-Ìdù6…V  “/ì9ZÎîú<שùñ£ £¹Ð§ù-jB¼åŽNñ€, Þ± ØBñ®Ç@yáâç´sÄ“…ND‘p˜€ëc\Vtñw@ÓÝeYëÍšYÉ‘¯…@¨´l¤j[G<ñ@Eà…½h¢Å–çë<ŸÃšSŠbû¥A€@:âtF®¥p=RÅ3º_L,âÎʲüÅ—4Ê"M=u#àú—ÕqñUw\ÌZ_Þ|y ’¿A ÔZ¶Í'Õ‘ …þà…Ýmßù:ÏÅÓfÂí>‡u€B$@¼ãŸW}ˆâwOJÊi;!T_DÌrgeYå]¿,ÒÔS7>Œq¦>‰¿šeZ_é(Š HÁ ªÅ›:\L¼àr'½NÐâ¤[0 Zxa×ÂTY"×øËzü[3•u'*† P[Ä;þ¹Þ—(¾ 'I'i¿›K¦u}w}ÿž,öƒ€/cœMÕØ&ó¹z Ls›(t?ÐIÛA]ýæD§nGÐÂC  ðÂî¶ï\žçâÇ꾘#P¸Ýç°€@ˆˆwüóªO1¤kºø¦:æÙ´Œ2¼Ê»~”©£Ž\ãŠðG\„Õù"[õš–@aJŒôJ¾>Ðñ5jœâ„´ …þà…Ýmß¹:Ïåòä­Ê[ÌsU‘§^@Å Þ)†c™¥¸Iñ¤º_Òˆó‹ßIáâI Ä@eözꪗǸ"ü'wÞuOqÕwQt¡e Pг¾…ùö@'7)=èò¥1i=Œ %‡€»xaw×7Ò2W繤û“Š¢™õE¿¨úãå0ÏÙ"K¹€Ê!@¼Sç"kq5’mŒŸ†õ›Áñr\\À#*²WSVpyŒ+ÂOñ÷E×Þï¢6"PámÊpváFåÕE12ÎEZ®»š Åua’ ðÂîvïp5pE ØÅ펃u€ ÐB€xÇ¿áj T¤@áÃ7Œy×÷ïÙÁb?¸<ÆI‚Ñø”U8E ð£beA\ £f&‰Yô‚ðV AKa()¥à…½täFº:Ï!P PudC¨˜ñNÅÈP½«1J Èú^@‘¡cpuŒ‹ïìÊú¥jÆ7Ù•ØAÈUu3\} ›¹$‰Y·VÍ\U?…‹^Á&èà…]SU©\ç((ªz&¨€@Ä;Y¨U›ÇÕHR‰/¼e]À‹ßAáâïúÕ>Ô.WÇ8Õ{Þí÷=.¶2ÌÈ>ŒoF.%q'®>БÍIâ„‹—_åéi-yè‘Õà…½Zþiµ»>Ï¥Ù¯ú;—dg¡F@ÈC€x'½jòº]váDqѹg´€¹`Òubô˜µa©Ö LËЮ,GBÞõsÀ#+:puŒSM¦»ÄŠ#Ëê<ì (‹tàõ¸ú@GØ·ê»j›\ :òv‚–¼ÉêðÂ^{š]ŸçtÚOƒ@‘…y @ â<ôªÉëz ×7ÝE‘7Y^á]¿,ÒÔS7.qñÝÒ7ºk™ªw=Óñ±Ì¾€@Q&í€ërùŽŸÛfò@ûæ2‚ß<†½XI€v·{ƒËó\VrYÉ‘€² ÞÉJ®º|®Ç@ñcž$)ÝE8Õ]<ÞI¶‰wýêžj›€Ëc\Òq¾iG=©Þó¤]>E"ì笴ֹú@«¶3¹ü@æuAK^‚ä‡@uxa¯Ž½NÍ®Îs:¶'¥A ÈC¼€ …ñNjÕæñ!R ’Úàm†ˆ;ÿD@Õ—M„*<»~Ô©³\ãT»(:oIéM‡*Û÷e´>Wè¤3¯t·Tå­Ç4?A‹)1ÒCÀ¼°»ã •%®Îsy¨!Pä¡G^@ÈB€x' µjóø%}[Ø”Üó ?0ÍRZzÞõKCME5#à×$Âêº*I¬ÕÍ_F:Š2(× 褋±‹pE)h&À »ÛýÁÅy./1мÉ@¦ˆwL‰UŸÞ—(¯Há²8!{EõÏ„IÀ—1.«Háƒ8ÑiŒëöÑŠO˜]VÙ àâtV[íG (‚"e@þôç¹¼ô(ò$? ˜@ 0%V}zßb ¤ã›’Hº~ìId7EõÏ„IÀ§1NuçN'¯¸ºv©²™a>_¥·ÊÅBˆ‹Û¥ô¾@…€@6¼°gãVV.ç¹¼mG ÈKü€ `J€xÇ”Xõé}T÷Q6ÓôiáNÚ@Qý³€aðqŒë4¾mØ«·˜>kwÎB ðÎenìãí&É|V´äãGnTI€ö*é§×Í<—ΨŒÌseP¦@öïØck«db [dÍÊ%2ãEjè`ŒÓ%e7…]¾µ)Ú W´¸á¬€@¼°g¡V^æ¹òXwª‰yÎ ?` ¬ˆw²’«.1Puì›k&rÃXÆ87|Š@ᆼ·‚Ú ´¸á¬€@¼°g¡V^æ¹òX#P¸Á+ Ø @¼cƒªÝ2‰ìòÕ-w}]R¤ƒ€Æ83^¶R#PØ"[³ry Ýp8A‹~À d!À {jååaž+5…¬±€€ Ä;6¨Ú-“È._ÝÒy××%E:˜`Œ3ãe+5…-²5+÷œÿ}L,XòVW«Çî=\ôÙ`ÝšQ¨¶¹‹–¾)n¾û‰.#úõZGœtÈŽÕE퀀6^صQU’y®ì-•2ÏUï,€ —ñN^‚åç'*Ÿy¼Æx ´ÉFëŠQ½aX0ÆUïÄNc\·V|ª7 |!pÝ=ÏŠ?Î^Øeî¨O ÛnÙ×óƒ°óéŠûÓÕ–OoÛW¼ç¿Ñ6:à…Ým/3ÏUïæ¹ê}€€ò ÞÉK°üüÄ@å3רm³â]/Þõ«÷ „@€1®z/vã(ª÷W<ôä_Å-SV.޲ÏúâË£†xÕß½íþ™âåE¯w5cÿ]¶_þIß›…ý¨ ^ØÝv5ó\õþaž«ÞX@ /â¼ËÏO T>óxñè€]‰ÏÛ¤zð`Œ«Þ‰Æ8Šêýã•ó¼!.¼yåñBÒø‘CˆÛnêU;|5öñÙ/‰iOþ¥ÅüãÆ[ôëák“°µ#À »Û.gž«Ö?ÌsÕò§v@E Þ)Šdyå•ÇZU“*àbó»WkµC ŒqÕ:2mŒC ¨Ö?^Ö~ÓïžÌ|¥Åö‘Ã6#¶éïe{|1úñ§ç‹iO½Øbîç¶ï'ÆŽÚÊ—&`' °‚/ìîwæ¹j|ÄÀ‚p 0ÆUï[Õ‡@Q½_¼µ@>Ô“œÛ¶“ÂÛyb¸Ü91æ '<ñfB N€vúó\5¾bž«†;µB(’ñN‘4Ë/‹¨|æ²Fb j¸Ský0ÆUãóNcE5> ¦V¹=jòƒ/ˆ‡g,¦M.7d§!‹/ï2ˆ­ž.; Û B€v¿ºó\¹þúxžÛrÅ<÷‰r+¦6@(”ñN¡8+)Œ¨\ìÄ@åò¦60Æ•ÛÒÆ8Šrýlmó¼!^Z´L¼ôê›,y+ض–Ù°~½Öý{¯'6í³žødïuÅýz”Y=uAðÂnj E2ÏÙÌ13@(Q€x§D|šF@ ‚$(*8é @ »Àá‡^KRŒ=:S¢"o‚"HLL˜0¡‘œ¸ï¾û²†+@@â– €%@‚¢(iÚAðB øVá AƒjcéСƒr¢"k‚¢91±jÕªZû÷ß?OOx±ª €v ïØ5ô@ŸHPø<»Œ @ÀˆÀG!šŸ\ÁkŸ‚§*Ò~£B5A—˜|³ñÞ{ï52>*E@‚X#ø2Dýñk@L 0¡J €^ <ðÀ±O.ÈÜeI‰‰:lðÍÆÃ;Ìkg‡ €å ï”gOË €TI€E•f›±"€hˆ>EÑ\q«DEZ‚"-1´ÃÓÚ¦‘Š@@ …@ô) â–  €è A¡[”ú@*!ô­Â´ƒ{R‚B&1Q¯›§'*±Ä$ €¥ ï”>t@ïHPx?Å @À”@«§(’믿~¨;Ÿ|ò‰˜0a‚¸êª«Dýǯ[õ—§'LÍ&õ"€ €@œ@«§(ˆwX3 € W€E^A®G¨¬€Ì· £÷h"x”Lb¢^OOTv¹1p@J Þ)…F@¨Œ ŠÊL5E0! û…޶yzB‡"u € €€ª€ìSªõÆ•'ÞÑ¡H €¸#@‚¹¢§ € ¨~«0Ïxz"×"€ €@Vâ¬r\‡ €i$(Ò„ø; €)EcGJp`¯Ä43HM$(4AR  P°ñNÁà4ç´ñŽÓÓGç@ïHPx7¥  p`gE /À]ÞŠ’ €€MÄ;6Í}±]€xÇö¢ €@µHPTk¾-TP z`ß½ï˜ *0dâ?:>ôì¬@ÀMâ7ç^#@¼SŒ3­ €d A‘Í«@g8°;3Ut´ì% Ó$ `@€xÇ*Uz#@¼ãÍT2@ÀK^N+ƒBÖp`g5 ,ÀÕø!@¼ãÇ<2 3Ä;f\©@@ =ŽÔ‚X+ÀÝÚ©¡cp`·`è  A€xG"Ux+@¼ãíÔ20@À ^L#ƒ@’8°³:hñùà7(X €€Ä;^L#ƒ0$@‚Â,Õ"€h A¡…‘J@{8°Û;7ô¬|ìåÏ=@tïèP¤_ˆw|YÆ…ø!@‚Âyd €@¢vÉØY €€Ä;~Ì#£0#@¼cÆ•Z@ô ÐãH- €€µØ­:fv &. €ˆw4 R…·Ä;ÞN-C¼ AáÅ42@ Y€;«Ÿ~ƒ‚åx!@¼ãÅ42C$( ÁR- €€Z©°W€»½sCÏÊàÀ^þÐ@@‡ñŽEêðU€xÇ×™e\ €€$(ü˜GF$ p`gq ,ÀÕø!@¼ãÇ<2 3Ä;f\©@@ =ŽÔ‚X+ÀÝÚ©¡cp`·`è  A€xG"Ux+@¼ãíÔ20@À ^L#ƒ@’8°³:hñùà7(X €€Ä;^L#ƒ0$@‚Â,Õ"€h A¡…‘J@{8°Û;7ô¬|ìåÏ=@tïèP¤_ˆw|YÆ…ø!@‚Âyd €@¢vÉØY €€Ä;~Ì#£0#@¼cÆ•Z@ô ÐãH- €€µØ­:fv &. €ˆw4 R…·Ä;ÞN-C¼ AáÅ42@ Y€;«Ÿ~ƒ‚åx!@¼ãÅ42C$( ÁR- €€Z©°W€»½sCÏÊàÀ^þÐ@@‡ñŽEêðU€xÇ×™e\ €€$(ü˜GF$ p`gq ,ÀÕø!@¼ãÇ<2 3Ä;f\©@@ =ŽÔ‚X+À]ˆSϸDLžòûÐ=5ïÑcï®Fæm­M{†êvãX1ìØAFÚÒQéô;î·º:ƘTv“ºÔ'@¼C¼“¶ÚˆwÖí~ÄŒ4.þŽ €@a$( £¦!@ ìØ“VÞÓÏ.ûö;AlÙa ±âÕÙå,Ð’[%AQòÐ<  I€x‡x‡x'ùÃD¼£i£¡@#$(Œ°R) `vìq«qïÞÃÄs/,­ý‰Å!¾QhÏÞEO@âââ*{e@ì AaÏ\Ð@ÀˆvìÑ…uéøÉâ¢K¯oüÇ$(HPÙ|¨(P€x‡x‡x‡E[M!€h A¡“ª@8°`·q4÷‰Å ^y`ûj¥ €€œññ r»¥@l AaÛŒÐ@@³vìØ9°kÞV¨°N€x‡x‡x‡xǺ‰!€H  b¢ à®vìØ9°»»ƒÑs@@N€x‡x‡x‡xGn·  €€m$(l›úƒhàÀÎ;vÍÛ Õ!€Ö ïïïïX·1Ñ!@)RLBÜàÀîÞ=úÑÕ7íÆ±bرƒ2/J~ƒb ¿A‘yq! `•ññ VmJt@@Z€…4@78°`_kÓž¡Å"›P8|ð™âþ9 ¥Ú^{î*ž]0]ªü©g\"&Où½TÙ ÐSón=öî*]ÞÕ‚$(\9ú„ˆwˆw‚A¼¿3ï°c"€Ø,@‚ÂæÙ¡o €€ìnØ;v(V®z/ÓŒË$8°s`Ï´¸¸pD€x‡x‡Eò‡•…#ÝD**@‚¢¢ϰ@ :Øí?°ïÝ{˜xî…¥™å–¶+^Ýòz$(2/0.D Þ!Þ!AA‚­Š."€Ä `Y €ž p`·ûÀ>ýŽûÅð“/h³ “^ •ô¨K.°G{"íIˆúr{’âË¿/’ZÍØ9°K- !€ ïï HþÀò… ‡63ºŠTP€E'!#€@µ8°ûy`o~ŠB61Q_ù$(HPTkd´ Pââ$(ª°×1F@ÀG>Î*cBš8°»u`—yÅSÞN‚‚EÞ5Äõ €€mÄ;Ä;$(HPض/Ñ@9rN”BœàÀn÷=éG¯“~$[ÇB$AA‚BÇ:¢@À&ââ$(lÚ“è  €€¼ y+J"€N p`·ûÀ÷ƒ×Ñ…¦òØ2‹” ™uB@À%ââ$(\Ú³è+ €À¬@Àsìv؃å·wïaâ¹–J¯Ä§æÝ"zìÝUº<vì™ "€Žïïïï8²]ÑM@ˆ – à¹vûìY’Á5Y¯‚'(Ö|è?:>´ì~Ä Ïw†‡ø)@¼C¼C‚‚…Ÿ»£Bü Aáÿ3B¨¸v7ìÁ2•yÝSÜr4 —¸oÖ5Ò+ éÅBA@Àââ$(Ù®è& €@D€Kð\€»;ö楨úÚ'•$ žo{ *(@¼C¼C‚‚E·>†Œx!@‚‹id €@‹ɽCBܽï˜ÊqzÆ%bò”߇Æ÷wZ!®µiÏП§Ý8V ;vPf÷¸þÇU&Û ™#"€–  p3AѼœˆwÌ}¸x¥¥9[jFÈ/@‚"¿!5 €V p`wÿÀ]`;+W½×fÝíµç®âÙÓS×# ©‹„ €€cÄ;Ä;Ñ%K¼C¼ãØ6Fw@Ê  ¨ìÔ3p¨Švÿìõµ}RCöG³9°s`¯ÊþÇ8@ :Ä;Ä;$(’?ïøLqÿœ…mêÒÙ†–Žª„»!XªE  Þ!ÞiµäˆwƇxø Š‚7(šCh)@‚‚‚x.ÀÝó fx¹HPäâãb@Àâk¦‚ŽX(@¼cá¤Ð%@†  à¹vÏ'˜áåàÀž‹‹@kˆw¬™ :b¡ñŽ…“B—@HP°@ªp`¯ÊL—3Î×V|.ÞúëâÍUŸ‹eý\,ï‹r:âY«Ú·ÛuÜDl×aãÚÿÞ¹Ófžá €zˆwôzR[kâ3+„øÇŒ+µ"€¶ ð…í3Dÿ@œØsry¬À'Ÿ 1sáÇbþËŸ"T€@ß½:‰Áw믻N­Ñ àžñŽ{sæb‰Š5âŸb½i (K€EYò´‹$À½ è 5óÜŸ‰YýK¼ûž–(rÚÛo¶a-I±WçöE6K[ €€Ä;NL“Ó$þ)gúˆÊq§U@ HEjÓ P‚öÐ=n28œÿö<¡ýCûÁ·÷$Iaÿ4ÑC(X€x§`ðŠ5GüSþ„ÿ”?ô0%@‚”,õ"€–p`·d"<èFðZƒËïø Í“ÛvÜ\tݱƒè¸Å&¢ý×Úy0Òò‡°â½ŠwWÿÏ’7V‰·V¼êPðMÂóOÚoõëžÖ.¿£ô°D€xÇ’‰ð°Ä?ÅMjëøgƒÕñÏþÄ?ÅM-!€…  (Œš†@r8°—ãîc«3æ,]þ͉Þ{ï zõfL‹^Z&<óz¨?}ºo-†~³«5}¤# €@ÙÄ;eÏ€¿íÇÆ?ûì(zvÛÎßA[0²¸ø'øMŠ!ºXÐ;º€  S€…NMêB,àÀná¤8Ø¥×V|.&Þõa¨ç½9œ6“‹^|S,xöP{£†ô;wÚ¬°>Ð `³ñŽÍ³ãn߈Ê»¸øgôÐ}ÅN[oZnÇh@@« ­œT†Ø'Àݾ9q±G¿ø‰¸cá¿]^ëtô€î.ÅÙ>Ïœó|èuOÇõßU´Ï6ÎŽ‡Ž#€:ˆwtjRW]€ø§üµ@üSþÐ@À´ ÓÂÔ”,À½ä ð¤ù©|,eÍëì¿«Øc—­<ÃxñOïˆ9O,mtö€n[‰ƒvs£óô0,@¼c¸¢Õÿ”?ñÄ?åÏ=@L  0-Lý €@ÉØKžOš¿|æbù{_4F3äð«{cOFçÆ0‚ŽœqßÓÎvjßNœâ~ntž^"€†ˆw W´zâŸò'>ÿlÓacqÞˆžåwŒ €h A¡’Š@;8°Û9/®õêÌþêòYÃû¹6/ú{õ´y¡q\3ê/ÆÅ @¼Ä;y¹>N€øÇŽuAücÇ<Ð @À” S²Ô‹X"ÀÝ’‰p¼¾ÐOö-1ïÁÙY¹óE¢[÷}¬Ÿ%èÖOD’ˆwJ‚÷¼Y㟻gÝ*FŸ6"qf^yç3çføÇ¹)£Ã €€’ %. #€î p`woÎl챋ô$Ǹƒ; W}Bäˆwä­()/àRüsÁèŠÛ§Ý(=¸ ×OG*]¾Ì‚$(ÊÔ§m@À¼ óÆ´€”*À½T~ow逞†Þe«uÛ!A‘¦Æß@»ˆwìžW{çJüsÔ¡=Ä’—žWf>nøÉbì„ß(_Wô$(Ч=@ XÅzÓ P¸öÂɽlЕz~ÒžEšGì Þ±{~\í ñOÖäD}N\HR põD¿@9rN”BœàÀîìÔYÕqèi`×M+&»8¶ Š4=þŽØ-@¼c÷ü¸Ú;Û㟤ئk·î⮹O·a{Š4(dûëžHP¸ú ¢ß €€œ 9'J!€Î p`wvê¬ê¸íô4¬—žF|÷°ž‰ÅHP¤ òw@Ànâ»çÇÕÞÙÿôÚ³“øë»+C¼iɆ¸ßªHJhØ2o$(l™ ú˜ AaÆ•Z@k8°[3NwÄöznܾùi‚ü°[€xÇîùqµw6Ç?wϺUŒ>mDˆvä¹?§º •û”aßóœ*gs,D‚"uJ)€8-@‚Âéé£ó €@ºöt#J¤ Ø|@Oë}ô›‚ýútêPÞ<>èi³Íß@ ªÄ;Uy³ã¶9þ‰Æ7_o¿¥XøÂr)¸'KÓž¼ªØP!âC°T‹X"@‚Â’‰  €€)ì¦d«U¯ÍôV3ývapxŸ<ýž6¯{²ù[ƒ$(ªõYc´ M€x'›Wµ°9þ‰&(T_Ó}ºTöé‹2Ö Š2Ôi(N€EqÖ´„”"=°—Ò m|þÅ—bµ×ªýÿÍÿ·íD×½|Z¨‹g ïg{—ký‹¾ƒDDð/ú{$(œ˜N:‰$ ïØµ8\w¢Š6Ç?$(ìZóô@ » Šìv\‰8!ÀÝžiú˪Å)?}XœyâžµN]só bòe‹­;ldO'zbó= /ú~åú7ã^k@‚Âú%H@–Ä;ö,—ã—×M+&»¸Ñe•W<uÙjÝÐpyÅ“=Ÿ!z‚TM€EÕfœñ"€@å8°Û3åcÆ?&î~èÍP‡Žì¿ƒÎötÒ“EôÐÞüÚÖ/7:ˆ( ï(“»ÀåxÇ¥E\<#ûš¦èÓÁ¸_yç3ck"ożâ)¯ ×#€v  °{~è [€{nB-Ì~ô-1ò²±uMº ·Øg[-혪ĵ'(¢ß l>t“ 0µJ¨(O€x§<ûæ–]w\JP}=êÐbÉKχºö$DôKÁÅýú¬ýF—­ÿHPØ:3ô Ð#@‚B#µ € (ðù矋ÝvÛMüéOŠ-³ë®»ŠÅ‹‹uÖYÇZÅ3'>ê›Í¿A=¬G¿MH‚ÂÚeFÇ@ð!Þ‰ò»ÿD¿”Œ!é³£¿Í”U}5TK”Eê´‰'@‚¢8kZB¨¨À˜1cÄ•W^ÙrôA™qãÆY+äÂ=À‹¾² î$(¬]ft @ÀaâAŸã’2KÉ…äD02³I@À]îÎ=Gp@à±Ç½zõ õôœsΩýÿѤÅÂ… Åhå¨\HPÄ%âÞ§L‚ÂÊ%F§@ð%Þq5Aô;îÕM­–TÚ« lZŽ$(lš ú‚è A¡ß”@hrÈ!âá‡nüÿ»ì²‹xùå—kÿôµOAÙ|ÐJ=Ñ×$¼IPX¹Äè €€Ã¾Ä;®&(âb›´åäÊÓÁ8HP¤Í&GÜ AáöüÑ{@‹®¾újñãÿ8ÔÃÛo¿]{ì±µÿìŽ;îÇw\èï¿úÕ¯ÄYgeݨlOPœ2ì[bÞƒ³n­~ì‘…uË‹!€8,àS¼ãb‚"U–ÓqÃOc'üFå’ÂË’ (œœ@BHPÊMc €UX¶lYí ‰?ü°1äáÇ‹[n¹%Dp 'ˆiÓ¦5þ³6Ú¨ö„ÅvÛmg•Í Š»gÝ*FŸ6¢á•ö@V--:ƒ à°€oñŽk ФäDRÒ!îG²ƒ1Ûž¤ Aáð&A×@ HA@@U`Ĉ¡dĆnXKjüçAÒbêÔ©ªM-os‚"úÃw>°Htë¾O¢ £K…Ê@*$à[¼ãR‚â‚Ñ?·O»1Ôå´/i…“^eóoR ¨Ð¦ÂP@ ’$(*9í @À¤Àï~÷»ÆkœêíLš4©Íëžê ^ë4räÈP—‚×?sÌ1&»©T·­ Šè«d¾H‚Biê)Œ €@¬€ñŽK Šè4ºvë.îšû´ôjÍ{½tC ’ Ð€H €€Å$(,žº† àžÀ_|Q{"béÒ¥Î|ðÁ⡇j9˜èKvéÒE,^¼X¬½öÚV ؘ ¸nâX1iÜÅ Ùƒ9 +–@pXÀ×xÇ•E4 úöitl:ê(j “ (Jšv@rHP”ãN« €ž œ{î¹büøñ¡Ñ-X°@ôêÕ«åˆ.\(z÷î*ÔuÅWX!e[‚"O’!ϵeOô²g€ö@_ãWÑ'He¿¤_ô7)Fžûsqú¨ ¬[äÄ?ÖM B´  ÐÊIe €Uxì±ÇÚ$"Î9çœ6 ‹$£1cƈ+¯¼2ôç qqà–Îj[‚"î½Ë:‘²ôuö!®.覅©@ MÀçxÇ•EôDZe^q7¯Ñzúõ(&O¿'m þwâŸÂÉi(T€E¡Ü4† à³@ÿþýC¯rêܹ³X²d‰Xgu¤†ýù矋®]»ŠW_}µQ>¨sîܹR×›,D‚¤®|ÝÐå­(‰ `FÀçx§j Šè“$(Ì|f¨@ µ V €®¾úê6?‚}Ûm·‰ãŽ;N©öÛo¿]üñ¡k‚ºÏ<óL¥zt&A¡[4[}$(²¹q €€ßãWY $(ô|.¨@ Ÿ Š|~\ €€X¶lY퇱?üðÆưaÃÄ´iÓ2é >\LŸ>½qm»víÄË/¿,¶ÝvÛLõ鸈…Åüu ÈoH  €Ùª︒ ˆ&¾Þ~K±ð…åÊý Ь¯ŠRnXñâE0Š#€Ž  plÂè. €€}#F,÷ý£ IDATŒ·ÜrK£cl°A-¡°Ã;dêìo¼QKx|üñÇëƒ6n¾ùæLõé¸È¶Ež1ñ#Ùyô¸@ ªUˆw\IP\7q¬˜4îâPw'\?U9x¨ôò¼{Ö­bôi#rÕ!ÝX΂$(rr9 `¹ Ë'ˆî!€Ø-ð»ßýN{챡NNœ8QŒ92WÇ'Mš$Fª#hëè£ÎUoÖ‹IPd•Ó{t½žÔ† 'P•x'ªasüÓe«uCÝU}Š"ïõr+GO)â=ŽÔ‚Ø*@‚ÂÖ™¡_ €Ö |ùå—µµ^ºti£¯txøá‡µôýàƒ<òH£®.]ºÔžÌXk­µ´Ô¯R‰Ít•qey‚BUŒò €U¨R¼ãR‚"úš§ ï²IŠhr"¸ÖÖ×;}#AQ刱#€@HPTa–# €€óÎ;OŒ7.T÷üùóEïÞ½µ´·`ÁѧOŸP]çž{®¸âŠ+´Ô¯R -se9 ›³¥f@x*Å;QÛ㟸DC0†®Ýº‹»æ>ÝfB:´‡XòÒómþsÙÄFYŸ⟲äi(F€E1δ‚ à™Àã?.<ðÀШ~ò“Ÿˆ+¯¼RëHÏ9çñË_þ2Tçc=&8à­í¤Ufû=­ÿÍç -Ê"€TY jñNt®mâbš,ëõ•w>ËrYa× (Œš†@RHP”ÂN£ €® zè¡âÁl £sçε×/­»nø}ÀyÇùÙgŸÕ~0ûÕW_mTÕ¿1wîܼU+]oû]e0$(T´(‹ PeªÅ;®%(‚þæMRØžœÆH‚¢Ê»cG* ¨Â,3F@­×\s8묳BuÞvÛmâ¸ãŽÓÚN½²Ûo¿]üñ¡º¯¾újqæ™gi/®R…Q·lˆºó@/@*T1Þq1AQïsÒë›’Öj¿þÅäé÷8±”‰œ˜&:‰d A‘™Ž @ª(ðÖ[oÕžhøàƒÃ6l˜˜6mšQŽáÇ‹éÓ§7Úh×®]í‰m·ÝÖh»õÊIPœÚôT" €h¨j¼ãr‚¢Þ÷ë&Ž“Æ]œ¸ &\?U9x¨†UR\Ä?ÅYÓ P† Š2Ôi@ÀYOþøãF;A_¦L™b´]…€j„º!XªE T5Þñ!AáãR&þñqV °F€«@I™3gŠcŽ9&TzâĉbäÈ‘’5ä+6iÒ$1jÔ¨P%AŸœ¯b‰«}z‚Bb¸Öá€níÔÐ1@À*Ç;$(ì\ÆÄ?vÎ ½Bt  Ð%I= €^ |ùå—µ'^y啯8ûõë'yä‘BÇ}ÐA‰yóæ5ÚìÚµ«X¼x±Xk­µŒöƒ…Q^éÊ9 KSQ@ ƒ@ÕãM—ÿ€L €@‰$(Jħi@wÎ;ï<1nܸP‡}ôQѧOŸB1þ|Ñ·oßP›Aß.¿ür£ý Aa”WºrèÒTDÈ Põx‡E†ESÀ%Ä? Ó P¢ Šñi@À Ç\xà¡Îž}öÙâ—¿üe)øÉO~"®ºêªPÛA÷ßcý¹üæ'Åòw×ü0øÃ{ˆŽ[ll¬=*n+°â½Š÷=ÝøC§öíÄù'î €ZˆwÚ2ÿhYZ¹*‰Æ?ÛtØXœ7¢g®:¹@À.vͽA°PàÐC>ø`£g;ï¼s퇱×[o½Rzûé§ŸÖ^7õÚk¯5Úú8gÎcý™zÿËâñ—ÞiÔ?`ÿ]Å»le¬=*n+ðâŸÞsžXÚøÃ{l%F Ü *@´ï´e$þѲ´rUÒ&þé¶:þDü“ •‹@ËHPX6!t@À.k®¹FœuÖY¡N͘1Cüñ¥vô¶ÛnC† õ!èëgœa¤_<ó¶¸ýÁ57Ç·í¸¹8z@w#mQi¼ÀÌ9Ï‹·V¼ßøã±‡ì"î±-\ €ä Þ‰'$þɽ´rWŽë¿«8hŸmr×K €ö °g.è  €€eo¿ý¶~„úƒÖ¼ÚhèСbúôéVôtذaâÖ[omôeã7®=Ù±Í6úm¯-ÿ›˜8cÍë…‚F{cè¹ÇöVXøÞ‰E/- žy=4ÌQCzˆ;mæûÐ €€aâd`âË/¥ú¸øgôÐ}ÅN[oZnÇh@@« ­œT† à“ÀI'$n¾ùæÆþã?þ£–ØqÇ­æë¯¿^{ÕÓ¿þõ¯FN<ñD1eÊ#ý»õKÄüçÿª»÷>;ŠžÝ¶3Ò•~%°èÅ7Å‚gßqôÝ«“2  D €ä ÞiMHü“{‰eª .þé³:þJü“É“‹@›HPØ<;ô @ 4Y³f‰£>:Ôþ„ ĨQ£JëS\Ã'N£GýiæÌ™bðàÁÚûùÉg_ˆË§<)ÞýÛG¡ºƒ×=uÝ¡ƒh¿úG³ùál=ìÁB¾»ú–¼±*ôZ§ öö›m(Î?i?±þºkëiŒZ@*+@¼“>õÄ?éFºJ´Ž6XÿìOü£ ›z@‹HPX4t@ÀàÉ„%K–4:Ô¯_?ñÈ#ØÓÁ¦žtÐAbÞ¼yÿ$x-U𤇉Ͻú®øí¿`¢jê”øÁ·÷{un/Yšb €$ ïÈ­â9'“¥ˆLêR7 P® Šrýi@ÀBóÏ?_\qÅ¡ž=ú裢OŸ>öVˆùó狾}û†úvÞyç‰Ë/¿ÜHƒCú¬‡_mó$…‘ƨ´!<91øàÎ$'X €ZˆwÔ‰Ô¼t•&þÑ%I= €€½$(ìz† P‚ÀO·\á*ŠfèÓ}kqô!»¬~­Á:Y«à:@Ä;ÙñO6·¬Wÿd•ã:@À-nͽE0,0`À1wîÜF+;ï¼síuIë­·žá–óUÿé§ŸÖ~0ûµ×^kTt衇Š9sæä«8åê×–ÿM,[ñ±lÕ?kÿ{ù»m¯*•wjßNl·å&bûŽ›ˆm·ÜXìÜi³ª q"€ @¼“™ø'Ÿ_ÒÕÄ?f\©°]€…í3Dÿ@Þ{ï=±Å[hºöÚkÅ™gžªoÆŒâøã×ֆɊn»í61dÈPÁ˜~ô£ikV·¹¶ŽQ €ž èþï^âô…¢Û<½EJ € P]Õ{FŽ8/ðÒK/‰W_}UuÔQ¹ÇòöÛoמ@øç?ÿÙ¨+¸Ùë­·æ®»È †*‚¤Jý߯o\û±ïN:åîÆ]wÝ%:wî,ºuë–».*@@9â¶NÄ;rk‡R €¸ @‚Â…Y¢ €±Á«˜FŽ)ž{î¹ÜB't’¸ùæ›õ¬¿þúµû;î¸c¬àõ×_]»vŸ|òI£Ù`l7ÝtSînìµ×^bÒ¤I"xuÿ@@ â¶ÎÄ;Ŭ=ZA@ E(Ó €€iÓ¦‰N8Aüüç??ûÙÏ2·1kÖ,qôÑG‡®~pzôèÑ™ë,ó &ˆà‡½›ÿcüîw¿›¹[¿øÅ/ÄÅ_,n¹å1|øðÌõp! €¨ ïÄ{鶴#J#€ `« [g†~!€¤ 4L_|ñÅ̯ ^í<-Qÿ×·o_1oÞ¼Ôöm.Я_?ñè£6ºŒqñâÅ™º¼Zb=ö¨]ërâ&Óà¹@’ˆw’'€x§äÅIó €h A¡‘*@Ê3fŒ¸òÊ+kç;ßwÞy§rGÎ?ÿ|qÅW„® ’A’ÂåAr"8´7ÿ ÆzÙe—)+xòâ÷¿ÿ}íºsÎ9GŒ?^¹.@@lÄ;ÉnÄ;ÙÖW!€ `“ ›fƒ¾ €( D7"ø ‰#FH×ñä“OŠý÷ß?T>x­Sð”€ÿ‚×<ߺlþ÷ÄOˆýöÛOzxS§N'žxb£|ðO™2Eúz "€ €@>âÖ~Ä;ùÖW#€ P¶ вg€ö@2 8P<ðÀë;tè Þ|óM±ÁHÕùÍo~SÌ™3§Qv§v/¿ü²~ Û‡Áe¯vúóŸÿÜ΀Äþð©á}üñÇbûí·«V­j”?ì°ÃÄìÙ³¥®§ €ä ÞimH¼“Q €e  (SŸ¶@r téÒE,]º4T‡ì¿þõ¯ÅgœºöÖ[oC† ÉÕ'Û.ž1c†:th¨[×^{­øÑ~”ÚÕ¸o$îºë®â•W^I½– €  G€x'Ý‘x'݈ €Ø*@‚ÂÖ™¡_ €©[n¹eèÛýõ ,X zõê•xýòåËkOüãÿh” A‚ÂÇA‚"8¸×ÿm²É&µ'E:uê”8Ü… ŠÞ½{·ù{ð”ÊÊ•+}dbL €X)@¼#7-Ä;rN”B@À6¶ÍýAøôÓO_ÅÔ¿1wîÜÄz¾÷½ï…~G!x¥SpÃ>xÅ“ÿ‚W< ™àõÁû¬oºé¦Äá†=ôPì߃zÖ[o=© €V ïÈOñ޼%@°I€…M³A_@¤–-[Vû}„¤Á+œN?ýô6¾óÎ;ÅàÁƒCÿyð£ØÁ«¡|þüXvðʦæ³fÍßýîwÛ ;îõWÍ…‚ßùØn»í|æbl €X!@¼£6 Ä;j^”F@À6Ì}@PxòÉ'ÅþûïŸxÝf›m&–,Y":vì*³ûî»×ž–¨ÿëÛ·¯˜7ožrû.^Я_?ñè£6ºm#€døío+~øÃ¦^ï½÷ŠÃ?\Ä}1x­Sðz§*ý ^ó¼þ ù_`Ó³gOqß}÷‰#Ž8"•ã7¿ùøÁ~ZŽ € O€x'›ñN67®B@  e¨Ó& €@nK.¹Düìg?K­gß}÷üãÅ7¿ùM1gΜFùwܱö ¨à²«ô/øëàN¯¿þzcØ øÃÄ7¾ñ ñÔSO¥rüâ¿]tQj9 € €@>âl~Ä;Ùܸ @2HP”¡N› €¹‚À¾þúë¥ê ^¼–¨ùßôéÓÅСC¥®÷­Ð­·Þ*† VœQÒ¸O;í4qÝu×ùÆÂx@°N€x'û”ïd·ãJ@Š AQ¤6m!€h8úè£Å¬Y³2ÕwüñÇ‹3fdºÖ—‹† "n»í¶Là #€ à»ññŽïkœñ!€  ` €8'ðòË/‹Ýwß½ð~ë[ߪ%&öÙgŸÂÛŽkð™gž©%*î¹çžÂû³xñb±Ûn»Þ. "€ Pâ¯fšx§*+žq"€TU€EUgžq#€ <øàƒâÐC-l:uª%&¾ÿýïÖ¦JCÿõ_ÿUKT,_¾\å²\eçÎ+ú÷.F@dâ° ñŸ@ü Aáç¼2*@ÀkéÓ§‹áÇ2Æàw&‚äD»ví i/k#|ðA-Iü>Eÿ¦M›&† VDS´ €@%ˆwÚN;ñN%? @ÀsžO0ÃC|˜8q¢=z´Ñ¡tÐAµßxèÓ§ÑvtW>þ|üFÆ#<¢»êP}&L£F2Ú•#€ PeâäÙ'Þ©ò'ƒ±#€ø&@‚·e< €@Î=÷\1~üx##ÝtÓMkOLœqÆFê/ªÒk¯½¶öDÅßÿþw#MŽ3FŒ7ÎHÝTŠ €B勞ât#J € `» Ûgˆþ!€´øÞ÷¾'¦L™¢]æ”SN©%':v쨽î2*\±bE-I1yòdíÍŸtÒI⦛nÒ^/"€ €ÀWÄ;r+xGΉR €Ø*@‚ÂÖ™¡_ €‰ƒ ³gÏÖ&ôo|£–˜êõñßý÷ß_KTüñÔ6¼Š ^þ!€ €€â5Wâ5/J#€ `‹ [f‚~ €H ì½÷Þâ¹çž“.ŸTpíµ×®ýÎDð …*ü»âŠ+j‰Š/¾ø"÷p÷Úk/ñì³Ï殇 @@ ^€x'ÛÊ ÞÉæÆU €”%@‚¢,yÚEÈ,¼‚iåÊ•™¯.2dHí©‰Î;çªÇµ‹_}õÕZ’bÆŒ¹º¾å–[Šà• üC@3Ä;Ù]‰w²Ûq% €E  (Zœö@r |öÙgb½õÖË\G×®]k‰‰cŽ9&s>\ø»ßý®–¨X²dIæá|úé§bÝu×Í|="€ €@¼ñŽž•A¼£Ç‘Z@0)@‚¤.u#€hxë­·ÄvÛm—©Þ /¼P\rÉ%™®õõ¢‹.ºH\z饙†·lÙ2±í¶Ûfº–‹@@ Y€xGïê ÞÑëIm €è A¡S“º@Œ ,Z´Hì·ß~Jíy䑵§&‚ßMà_[à·$‚§)þçþG‰çÉ'Ÿ={öTº†Â € .@¼“n¤Z‚xGUŒò €#@‚¢gZAÐ$p÷Ýw‹£Ž:Jª¶m¶Ù¦–˜øÞ÷¾'U¾ê…nºé¦Z¢âí·ß–¢¸ë®»Düá €è ÞÑëÙ\ñŽ9[jF@ ‹ Š,j\ƒ PšÀ 7Ü ~ðƒ¤¶?räÈZrb£6J-K5~øa-I1iÒ¤T–ßþö·âÔSOM-G@P ÞQóR-M¼£*Fy@Ì  0gKÍ €‚ßKÞ#œôïC©%&z÷îm õêT¹`Á‚Z¢â¡‡Jtð{Áïzð@ô ïèõLªx§gZA@ • Ö €€Sgœq†øõ¯ݦϛo¾y-1qúé§;5Û;XIˆ÷ß¿MWô£‰k¯½Öö!Ð?@œ Þ)vʈwŠõ¦5@šHP°@œ8æ˜cÄÌ™3C}^3$'¶ÜrK§ÆâJgW®\Y{š"xÝDó¿`.î¸ãW†A?@pF€x§ø©"Þ)Þœ@HP°@œèÓ§ÇþõìÙ³–˜8p Scpµ³³gÏ®%*-ZTBð­ùóç»:ú €€µÄ;åM ñNyö´Œ PMÕœwFm©Àâ{‡XÚ3º…€=ƒNþ±|Õ‡â²Ë®cÆŒ±§cêÉøñãÅOzžèÔa#qÿÿO…FÎPÈ&°û3²]èéUÄ;žN,ÃÒ*@¼£•3SeÄ;™Ø¸¨ÂÄ;ž|†Ž@N9¹ØujR—¯çŒLüø¤îâ°ÿ÷n_‡èĸøÿŽ¿šò¼¸rÌNô—N"P¦ö°>ñN™«‘¶] Þ±c¦ˆwì˜zá†ñŽóD/°Q€…³BŸ*+À½²SÏÀ%þñÁ§b“vëÕJK¢*V߯šçÄPST‹€óìW$(œ_Ä  PâB¹[6F¼cÏ\ÐûˆwìŸ#zˆ€­$(lúUI•œvQ€8#œ¦Ëد4ARM%دHPTb¡3H#ìFX¥+%Þ‘¦¢ |Œ5€™HPd¦ãBô DàÝûò~}ýÊÔèªÀâGLJºÎ½Ü™d¿*ןÖí`¿j=?ìv¯_zW®ûG¹þÑÖÙ¯ìšzc—û•]óAopY€…˳Gß½ önJF`˜ªb¿Ò€HÞ °_‘ ðvq30ãìƉ• ÞQâ¢pÅد*6á ƒ$( âR5ªÀªb”¯’°]³Í~e×|лدHPص"éKìvÍñŽ]óAoì`¿²k>è .  pyöè»wÀÞM)Ò(@¬SCUìW©Â[ö+Þ.nf\€ýÃ8±RÄ;J\®˜ûUÅ&œá"`P€…A\ªF@U€XUŒòU ¶k¶Ù¯ìšzc—û »V$½qI€ýîÙ"Þ±k>è]ìWvͽAÀe.Ï}÷N€Ø»)e@€5bj¨ŠýJ"Ux+À~E‚ÂÛÅÍÀŒ °'Vj€xG‰‹Â`¿ªØ„3\  0ˆKÕ¨ «ŠQ¾JÀvÍ6û•]óAoì`¿"Aa׊¤7. °Ø5[Ä;vͽ±K€ýÊ®ù 7¸,@‚ÂåÙ£ïÞ {7¥ H£°FL U±_i@¤ oدHPx»¸˜qöãÄJ ï(qQ¸bìW›p†‹€Aq©U`U1ÊWI€Ø®Ùf¿²k>è]ìW$(ìZ‘ôÆ%ö»f‹xÇ®ù 7v °_Ù5ô—HP¸<{ôÝ;`璉i Öˆ©¡*ö+ ˆTá­û o73.ÀþaœX©â%. WL€ýªbÎp0(@‚ .U# *@¬*Fù* Û5ÛìWvͽ±K€ýŠ…]+’Þ¸$Àþa×lïØ5ôÆ.ö+»æƒÞ ಠ—g¾{'@ìÝ”2 À15TÅ~¥‘*¼`¿"Aáíâf`ÆØ?Œ+5@¼£ÄEአ°_UlÂ.HPÄ¥jT€UÅ(_%`»f›ýÊ®ù 7v °_‘ °kEÒ—Ø?ìš-â»æƒÞØ%À~e×|Ð\ AáòìÑw›R¤Q€X#¦†ªØ¯4 R…·ìW$(¼]Ü Ì¸û‡qb¥ˆw”¸(\1ö«ŠM8ÃEÀ  ƒ¸T€ª°ªå«$@l×l³_Ù5ôÆ.ö+v­Hzã’û‡]³E¼c×|лدìšzƒ€Ë$(\ž=úî°wSÊ€4 kÄÔPû•DªðV€ýŠ…·‹›`ÿ0N¬ÔñŽ…+&À~U± g¸ Aa—ªP V£|•€íšmö+»æƒÞØ%À~E‚®Io\`ÿ°k¶ˆwìšzc—í<ßù IDATû•]óAopY€…˳Gß½ önJF`˜ªb¿Ò€HÞ °_‘ ðvq30ãìƉ• ÞQâ¢pÅد*6á ƒ$( âR5ªÀªb”¯’°]³Í~e×|лدHPص"éKìvÍñŽ]óAoì`¿²k>è .  pyöè»w&àµ6íÙÆë©y·ˆ{w-Õ1Ú¯i7ŽÃŽTjŸh\N nMWnÙa ±âÕÙr•(”"VÀ* ¨Éý*©û—ŽŸ,.ºôúØ?ëZwÓï¸_ ?ù‚Ø6N9é;â†k/*@×Lì·f\ãje¿jmmrÿ Þ)nW¹¥æuöåßi¥`ÿÐÊ™»2“ûñNî鉭€xÇŒ+ñNq®´„@HPTqÖ³µ¦छz6Ül#€´v9Ævìég—ˆ}û Ýi‡vìÒì…4µ_µê|«Epޤë©g\"&Où= ŠBV‘¿°_•“  Þñ÷3eÓÈ|¦¸ÎÂF—tÆ:A¥ì6Íöêù¸wH¨C»÷c¼ƒÄ;ùˆ9_æóS¹šýJE‹² ÐJ€ë‹LÀ{÷&ž{ai›‘êúÆqBÈ­ÿeÿ{q3À~U|‚‚x§¸õ]µ–Zý6Q`A‚ÂïA¼ãÞüï7gÄ;ÅYÓ¾  ð}†ŸSºàè·½ê¿9}G³ë7Ýœšd; ò è%î›uMêHâ’yñÀ©ä…н_Ét>.Aqæÿ".ºôúÆåYŸ ‹>ýì‹O>õ ™‰¡Lö«bÄ;|M ´ú]¢z›yb›¸~³˜šÍlõïds+ó*Åé³_gMKø.@‚Â÷f|N 耣7ˆë7–ãnò­xu¶SVt¶¼?= äùv;p1s.ÛŠîýJ¦Ý¤½+ºÎ²Ü,ŠÖm–ö:`¬YúØÊ(ú-߸Ïj™öVßB6ñšA™5—µ ûUk9ÝûñŽþ=%ëÚ—½Î¶x§ÞïV¿5üwäqg·yU* ÙYw³œîýJFx'YIæ¿ß‰wÒŸt—Y‡2eˆwd”(ƒ2$(d”(ƒ@A:à´o¾Goè¥Ý8Ž–HTn*Åݰ ¾Mßüj•›vq¯j5MiãS¹ñW¿y©sY9•~ç ðãúYòÀ*3g¾¬ÎýJ¶·Iöèþ¢šH Úo^ëõ×Þ©¼¯ÕÄ·ŸJ_£ý >K­n¦í6?µ”åóœT¿L‚%­oQ™:›¯Éë!»î²”c¿j­¦sÿ Þ [ïdùÄ®¹&.Þ þZß«uÆ5I=eÿÈ7‡º¯Ö¹_ÉöxgXè Vâ|²ë.K9ö«,j\ƒq$(XX$ 3Nûæ{ܾUÂ!îæ›ìú¸Ã\ý写Ìû€“¦U%©R¯#Úžì¸e—VÑã‘íWܼeñË2Çq}$–¹bÊéܯd{œt`îOªŸÑh½õå² Õct¼²ý»!ý|Eën¾é¯úYLJºÈ$²šäMبxÈ®»,娝ŠKPïÄ[gùïëªÆ;Í‚q Šæ/V È²#º} ñΚùËúßíõˆwÖX︽/Ð{Ð/@‚B¿)5"Y@WwS)î†RôFNÚeÇ%5Ò® 0ânÀ×{ª7Ì’¾Ù&‹.7×gòÀ^Æxd­týVIÔ/ëïPpÃOvæŠ)§k¿Rém«5]g27Ñëm7¸›÷™…ÌëœdÆ(sP{òí¹–&VÝïTöÛ2’õÈXeózÈÌKÖ2ìW­åtíÄ;ÉÎÄ;Ù>½ÍûJÜ ÈæêòUºö+âðÁÛˆwÖÄ{ª*kOµ,ñŽªå@ I€k‹tÀÑfI‡Ôè·`d³ª¯zJzµÓ…cN©É«Ü0S}ÕHÒ·|To›JP”5Ù%»¬¯¹É+Ó'`¥âÊèÚ¯TzÜêÀ]¯2ÉÓzÛÍûPóÍñ´µÛêFI¯2kõÞõ´o<·Jh6ïkõ›¶QÙý6)9!³w&%ldÿ{¨>'2ó—×Ceí©–e¿*&AA¼ó•3ñŽê'4¹|°¯œ7úûbرƒb ‘ ÐgíJMÄ;¢åë$‰wÂ+™xg†+mú‰–  °lBèNµtÀÑêI7zâYi7 ân\%Ý¸Ž«?úÍXÙfqp™„J«×K•½Ú²$ˆŠO´²ßjŽºêª‡~e¯Øpûºö+•QµJPD?2ûCÐvôºæo̦%(ânËÜXOº¡˜¶ÿ&ÝOzZ$د{ìݵA,»ßƽ&)­oA#I‰´ÄKpmÜ“vi×åõPY{ªeÙ¯Z‹éÚ?ˆwÖ8¨~lwTÆC‚BE˲ºö+ âðu;âð*’õPY{ªe‰wTÅ(I$(XX$ #VýÝ€èá^æ[ò²¯zjõj§:»ì ³¸X27Ì’nfý¡fË%ë«â¾)ib<º ºê!Ö¹úò×¥c¿RíEÚkÇ¢Ÿ©´ÜqûCóg©U‚B%Y›„mÚ3ô§´äFÜ y•Ä¡Ì~›59‘´×ÊÌA!:icË롺þTʳ_µÖÒ±ï´5.*>Pù,ø¿‘ P}÷ËëØ¯TˆwÚ&(Òb‚fcâÕ—½<ñNv;®D° V 耣‡Ó´„CÜë8dn(¥½ê)î—P “n^É&(š“&näg]BYE'íÛã²ãÖõª(`YñbÊ騝T{šv`î;i7ü£7΢‡ßVŸ¸D­Êï^ÄÝÐO;|Çí»*m¦í·qIeÙ}6O´~•kƒëUoüåõP]*åÙ¯Ì'(ˆwÚ¨|ò$(lê>¥j”gÿÈ¢fîâÉâ¢K¯«ÄÄ;­÷êú_[Q‰wÌ}¾©ì AaÏ\ЄŽXõQÜ·€enêÅÝŒª'Cd^íTŸî´fÍËÂÔoA”µôl вV†íêØ¯TGš– ˆî=i Úhùè u™Ï@½O²¯”js4y—%A¡’tmµßæMNÄ%lTúVw‰š·º w`ÏÒ¦ê:”)Ï Fó ♕hGÛã%*Z~”%Þùj‰wÂÍ«;KìA¼ãÇþÀ(@@Ÿ }–Ô„@n¼pÖDYoòĵÜà»æÿÎ+W½òH ÜTI?©ú-žÜ¥©ÛÇ#ssV†‚'(d”Ü+“w¿Ê2â´EPgtOiõDXô3ݧt}’Æš7A‘–€‰¶›´ßÆýw€êÓªc‘5i•0ö[Õ#Ë”½†Ek©¼ûñŽìJ´£œíñŽŠ -?ÊæÝ¯²(ï„_ñ¤úßïÄ;YV]¶kˆw²¹q´ AÁª@À"¼pôfMÚ·qëCOJ4\8æ”T¤Cgó…­nt©$(â…Ѫ°©4XÀöñèº9K‚Âà"*±ê¼ûU–®ËØ£{R«ý§ù[½q{‡®Ï@t¬IŸý´=;ë_o?n¿½b‰ç^Xê¢ìStÍ鲊Îq«==¯G–5({ v³ ЬsO¼#»‚õ–³=ÞQ- -?ÊïdŸGâd;âìëŠ+@ÀO~Î+£rT Oªí”Íòºª¸WDeçæëZõ%¯‡Žþ%ÕA‚¢µnžýƒx'Þ–xÇä'zMÝ$(Šq¶©•<ûUÖq H‹¢ÎÑØ‹x'ëJL¿Žx'݈ '@‚BΉR"'–y’Au²‡ÝVߌK{'§j‚"̸²Üp“©WGÕ$EÖˆª}®)ÕÃA½=]õ«Î Ùòyö«¬=“9°uG÷•¸}(š8‹ÛïT“tyoÒ§}ÆòÞWI–¨&còŽ=iM Èúi±ûº<ûñNë¹%Þ1»öIP˜õµ±ö<ûUÖñïèMP´š⬫ô«ë8ŸåóãjX#@‚‚Õ€€Ey`•O²CV ØânÈ<Á‘%Aô_æuqãL»(k£»œãÑ•XÐU°îU—¯¾<ûUÖ–eìÑùq¯yJ{½SÐGÙ…®›óiûS‘ Š`ü²Iê ¬‰ÿ ê%A‘õÓb÷uyökx§¸õbc¼£2z*Z~”ͳ_e Þ).AA¼“u•~uç³|~\kHP°°H k÷Ne]ÃJ{"h§è'(¢cS}!í& .»¬õØ2]¯f’½É›æEœ&Tìß³îWyz){`–‹~æŸ~v‰Ø·ß ®$Ýœ”Y»Yn–Öoº«þ°´©E}üÑD‹®Wýå™syôì½6ëþA¼¾i—6ÃÄ;iBê'A¡næúY÷«<ã&Þ1“  Þɳ*ã¯å|¦ß”¨ª ŠªÎ<ã¶R k¬zC[eð­~`¶^O‘¿A‘Ö÷è ¿¤ò2Ow¤µUÄßËìá(Í!zÓSfMÅÕIœ&]ìß³îWyz)»&£ ˆ Íæd«Ìë‚kÒ2{o«§TŸ.2‘ hNÎÄÝx“ý¼êúœ«¬¼*m©–e¿j-–uÿùÌ©ÎU½¼ÌZ'Þɪ›~]™ñNzïÖ” A¡¢åGÙ¬ûUžÑïèOPïäY‘É×ï˜q¥Vª(@‚¢Š³Î˜­ÈÇ݈SyUA3F\]iß •yt«CÖW<ÉNbÒ«WÒÆ%[Ñ劮C¸®9&.zµ™¹Á˜g²ö¸äBsR²ù&§Ê7ô›÷Ö¤orË[i7å‹JPÄÝüþ³VI]]¯”MnÇõ‘…ŽOY1u¨îÄ;·ˆ{w•šâ)¦Ì…Šð%ÞÉ<=F.Tݯtt‚x§˜ñNþÕÊ~•ß@à+^ñÄJ@À"Õ8.I r“;ièqõ6'’^ítõ…ª”y—¹l‚"îæ—Êͨè«!Ònš^.Gå†a³[ÜëÂò<µBlzUªÕ¯º_©Õ_ZõÀ݃‚§Á&Où}£ò´=D%A¡òEÜç?èTÚ¾Td‚"Ë÷™WIܨ^Ÿ×CÇšLªƒýªµ®êþA¼ÓKÜ7ë©%K¼#Å”¹ ŠÌtÎ^¨º_é(ñNq â|+–x'ŸW#€À¬,P €£7ŽÓnnÉ5îðÕ\·Êû€Óú(› úwóJæÁe*²>ºÊ¹2ž¸Ci7v㮑™«V¶ÀºVžžzT÷+­ªØã>ûÍýH»yÞ*A‘5—ôãÚ¶%(‚þĽó¿•Y’wšsÐVÜž¡ú„KÚ¾¤c ÊÖÁ~ÕZJuÿH‹%dç%ZŽx'«œÚu®Ä;2£"A!£äWÕýJÇè‰wŠKPïä[±Ä;ùü¸Ö `5 `‘€J,û ¥¬Ã‹¾%¨'x:ãüÿ¼¶Í`·ºù”ÖO•E«{IOŽÄŠƒ±äù&VÓèu.'îµ_õ5ÑüÚ‰¤o†ë0'ÖµòôÔ£²_éiQÕ{ÐnÒÚ•Y“­q7ïƒ:“q­>ïuÓ7äUöÛzŸ¢ÿ]ÖǸDC½®¸}ºU)-±Áº>YÅ×£²¤Åy{O¼“W0ýz—â´Ñ Hòïï*û•®Ñï› æx'Ûêå|–Í«@ ­ V ¨ÀY¿½+;ܤ›oÑëe¾××ú*ÕfI ÙqåT^Å’%`Ué‹+ãIû&zÚ˜Ón4¦]üXF©¸2*û•®^e9°'Ý0—ù¶}«E0¦Vɬcn•<Í{C^u¿ Æwc1mß×±¯¥µô-¯GÖ9’¹Žýªµ’ÊþA¼#³ââËï +W½×ÀIK°ÊH“ Qò«ŒÊ~¥käÄ;Å'(ˆw²­^âln\…mHP°*°H@6Îò®LÕa¶ú&|½.Ùƒ^\]õWFe¹a÷mGÙñÉÜôŠÖ¥úÙ¾Ô˹2™o€Ç]Gr"¨—Xue™-/»_éìE–{Òº•¹q—– Ð‘¸~ò!¢V¿#”÷†|–ý6è\«$sÒüæIRÈîÓy=t®Íh]ìW­ue÷âð v•5+û9j®“x']˜Eº‘o%d÷+ã&Þ)>AA¼“mïdsã*h+@‚‚U€E²°ê{Á³1í“Êç¤W4\téõ¡îÉÖ)û„GsåY@Üô=è£+ã‘I\5›ë|•pÖO²™ëd÷+­g9°íÇ=é ³6ÓAݪŸ‰àšæ}.Ú·V7óÞÏš h•dn5¿Y8*ût^k3ZûUk]ÙýƒxÇø@öóäJ¼“4²3íO9ÙýJ爉wÊIP隷bâu3®@x¬ , €‹¸a°´úÆ|–oæ¥%<¢7îd¦&혎/Ê;¯KãÉón™¹å†_¥â®‘ݯtö(ë=º÷Èî 2 ŠúøÒI{fôf]«¾å½!Ÿ5AŒ1ÌS(uŸVOŠÉÎGt-åõй6Ù¯Ô4e÷¢þû—xç«ù+ÊÛµx§yu“ Pû¬ûPZv¿Ò9VârÄ;꫘…ºW €@¼ V ”[4|º‚@K`»û•]óAoì`¿j=ìv­Wzc—û‡eóqïP‡vï;Æ®ÒJ`¿*Ÿ¦ðL€…gÊpÜàÀîöüÑ{³Àf}Ukg¿R£|•دHPTi½3V½ìz=óÖF¼“Wë}`¿òyvÅ  (Ö›Öh)@ÌA Y€Ø®ÕÁ~e×|лدHPص"éKìvÍñŽ]óAoì`¿²k>è .  pyöè»wÀÞM)Ò(@¬SCUìW©Â[ö+Þ.nf\€ýÃ8±RÄ;J\®˜ûUÅ&œá"`P€…A\ªF@U€XUŒòU ¶k¶Ù¯ìšzc—û »V$½qI€ýîÙ"Þ±k>è]ìWvͽAÀe.Ï}÷N€Ø»)e@€5bj¨ŠýJ"Ux+À~E‚ÂÛÅÍÀŒ °'Vj€xG‰‹Â`¿ªØ„3\  0ˆKÕ¨ «ŠQ>Àk+>oýõ ñæªÏŲ¿~.–¿÷Ežê¸ößÚ·ÛuÜDl×aãÚÿÞ¹Óf^Ú°_y9­Æžc†Øµ}‡ýÃÌ: Öxö3+õ}'«ûUV¹j_Ǿcfþ«²ï˜Ñ£Vì Aa÷üÐ»Š WlÂKî'Ÿ 1sáÇbþËŸ–Ôƒj5Ûw¯NbðÁ»ˆõ×]Û«³_y5Æþcœ8ÔÀWûNçÕûÎ:Å6,Ùû‡$År °ïäâS¾Øö}Gy@ÿ¾€ý*«\5¯cß)vÞ}ÝwŠU¤5ì AaÇ<Ð jÀ,ÓϽñ™˜õؿĻÿài ÓÖÍõ·ßlÃÚͽ:·/²Y£m±_åõªrör¦Óæ}‡ý£œ5Q¥VÙwÊ™m›÷¬"ìWYåªwûN9sîã¾SŽ$­"P® Šrýi0 ¤@4ÿöL6AÝ)?øöžÞ$)دXî2ì;2JfËØ¸ï°˜óª×ξSþ °qßɪÂ~•U®Z×±ï”?ß>í;åkÒŠ AQ¼9-"(@Ìâ0% UÝ› ÙuüÍÂß ž}#ô·QCzˆ;mVXL4Ä~eBÕŸ:ÙwÊËE–ï;ìå®_[gß)wfãöÑC÷;m½i¹ËÙ:ûUN@Ï/gß)w‚}ÝwÊU¥uÊ AQŽ;­"+@ÌÂ0!ðð‹Ÿˆ;þ«QuðZ§£t7Ñu&Ìœó|èuOÇõßU´Ï6N{±_9=}Æ;Ͼcœ8µ›÷öÔé£@ö hš/±yßÉ:Tö«¬rÕ¸Ž}§üyöqß)_• P¼ ŠâÍiD`‡ ©|,eÍëì¿«Øc—­L4E /þé1牥¿Ðm+1bÐnN{±_9=}Æ;Ͼcœ8µ›÷öÔé£@ö hš/±yßÉ:Tö«¬rÕ¸Ž}§üyöqß)_• P¼ ŠâÍi¬B.ŸùXþÞ6‡Þcõbo\hªÞXðƒn3î{ºÁЩ};qþ‰û9ÍÂÝéé3ÞyöãÄ© ؼï°¤N2°ïd@Ó|ItßÙ¦ÃÆâ¼=5·RluìWÅz»ÖûNù3æã¾S¾*=@ xÅ›Ó"$(X… œyÃ?Bí5¼_¡íÓØWWO›¢¸fÔ!NÓp`wzúŒwž}Ç8±T¶î;ìRÓG!EöE0CÅmÝw²—ý*«\5®cß±cž}ÛwìP¥+@‚¢XoZC ¥0 Ä„€s—­ÖmP½òÎg&Ø´×é[àÌ~¥}‰xU¡oûÎ)þ%æ=8»1Gw>°Htë¾õsfë¾ÃþaýÒq²ƒ.ï;Ñ=&:®ì9A¿mÝw².jö«¬rÕ¸Îå}'n†ˆwª±n%6  °qVèSe€+;õFî{àL‚ÂèòI¬œýªwWZõiß¹{Ö­bôi#Bô®Ü,´õF!û‡+Ÿd·úéÚ¾óÒóψï¦öú£ã†Ÿ,ÆNøÕc뾓ý*¼½´” IDAT«\5®smßi5+Ä;ÕX³Œ[HPØ:3ô«’À•œvãƒö)p¾nâX1iÜÅ!3Æ—PlìW帻ҪOûNó[uùV"ûG>?®Žpiß¹`ôÅíÓnÌ4•_o¿¥XøÂòL×q Š"”i×ö43â4!þŽ&HP˜Ô¥n8°+‚Q\JÀ—À9î[= ©e ½û•vR¯*ôeß9êÐbÉKÏ·™ù–+ûG>?®v;A‘'9Q¹ÍI |B«$@¼cÇlû¶ïØ¡J/(V€E±Þ´†@Kì,>ÎIÉ &VŒ\ìWrNU-åþ÷ÄV}>IPä[Ùìùü¸ÚÝE«xfÂõSÅ‘ƒ‡¶\¯=;‰¿¾»²ÍÞ¯ÿ@1yú=Ö-ßn²_Y·Ä¬êñŽÓáÛ¾c‡*½@ XÅzÓJ ¸Ð!pÝ˧…ª9kx?ÕVGÚ7 y‚¢°©5=°—Ó ZµUÀõ}'íÝð$(ò­<ö|~\/à¾÷T–L¢!)±aã^äÛBö+vœV.ì;­úO¼ÃúF[HPØ2ôÕÀ,.ÎIßlv"AabÕ¤×É~•nTå.ï;Á¼¥í=6ÞŒ[o¶Þ(dÿ¨òî`nì¶ï;qI™äD],ïõæäÃ5Ûºïd?ûUV¹j\gû¾“6 Ä;iBüŠ AQ”4í !@,DeçV¯V‰ P^Z.`¿Ò¨­’nߺ-T×K÷¯­î,¹¸ïÔÇ}j+¸8ïÁÙ!YVÅškØ?òùqu¼€íûNÜ¡ª1Ì)þÚlü- |BM ïèÓ%ÞÑgIM _€E~Cj@@›vm”TÔ$`û½y²Z½›98„_pÉ1ú´¡ùU=Ü—µ88°—%_v9°ë™çèì;Á;Þ¿{XOzˆkµïhĤª†€íñNôõN*OOÔ'Ùïð¡4)@¼£G—xG#µ €€>ú,© °Rà̉…úeóoPĽ›9è|ýïÂÁÅ;qnq{ÑqÃOc'ü¦HæÔ¶HP¤Q ‡ñNv<âìv\‰æHP˜7¦@ TŸì$(J]J4n±öl“sÁèŠÛ§Ýظ¸_ÿbòô{B•‘ ÈfËU-àS¼g—ö;]E{'µG‚–™ð³Ä;Ùæ•x'›W!€@q$(г¦%@ Ÿì$(JYB4ê€võIŠK<ĽË…º-W P†€OñNÔ/zs1ø»OOý"AQÆê¯N›Ä;êsM¼£nÆ P¼ ŠâÍi(TÀ§; ŠB—9$À]}²zíÙIüõÝ• '\?5ö÷oHP¨Ûreøï4ûÅý0ö×Ûo)¾°¼ æÔ6IP¤Q ‡ñŽ:ñŽºW €@ñ$(Š7§E@ PŸì$( ]:4ævµÉŠþÐlÜ«ê5’ P³¥4e øïÔ ã’Áßî|`‘èÖ}Ÿ²¨[¶K‚ÂÊiñ¦SÄ;jSI¼£æEi(O€Eyö´Œ"àÓE!K†FàÀ.?iÑ}$í›È$(äm)‰@™>Å;cRr"éi¯2í›Û&AaËLøÙâùy%Þ‘·¢$”/@‚¢ü9  €€QŸì$(Œ.*wX€»üäuÙjÝPá´o"“ ·¥$e øïÄýæD`;òÜŸ‹ÓG]P&sjÛ$(R‰(C€xGxGÞŠ’ P¾ Šòç€ €F|:°“ 0ºT¨Üaìr“}ÕÌÌ’ ³¥e øïD÷©º« ɉ ¯$(Êþ$øÝ>ñŽÜüï"E)°F€…5SAG@3¾ØfÖµº/À=}£¯KéÚ­»¸kîÓ©’ H%¢Vøïuh±ä¥çÛxÚþZ§æ“ °âãàm'ˆwÒ§–xGˆkF‘ H_)”@À.vͽA´ øp`¯£ Ð¾<¨Ðì­'2O’!ϵe//n–=´_¤€ëñNôu,u;—’AŸÙwŠ\õÕk‹x‡x'N€}§z{#öO€…sʈ@€ëöæÁ `q#/À½õÊHzŸ»®õ$û4†®ödëáÀ.+E9\wâ’ õùxåÏœ›öç¦Ì©ïï pê#Kg A!MEA@ÀMWìqÚ$(Ü\ƒôÚ¼vìØÍÎhÁn㸸&Pþzû-ÅÂ–Û žÐ;NN›3&Þ!Þ!ÞqæãJGP A¡ÄEa@À=ìIÊ$(Ü[ô¸ìØ9°óY£{\‹w’’ýú“§ßc/tJÏHP8;uNtœx‡x‡xlj*D@Y€…2 €n ¸v`o¥K‚­µGo‹àÀÎ{qŸ7Z²SÀ¥x')9qÜð“ÅØ ¿±X²W$($¡(–I€x‡x‡x'ÓG‡‹°^€…õSD@|.ØÓFJ‚"Mˆ¿WU€»¹™çG²ÍÙR3:\‰w’’#Ïý¹8}Ô:IJ©‹E)ì•i”xÇÜT¥fH A‘nD @ÀiWì2È$(d”(SEìæf»9[jF@§€+ñN—­Öm3ì ×OGª“£´ºHP”F_‰†‰wÌM3ñŽ9[jFtéF”@œpåÀ.ƒL‚BF‰2UàÀnnÖ9°›³¥ft ¸ïuh±ä¥çCÃö)9 Œ…ÎUM]Qâsk‚xÇœ-5#€@º Št#J €N ¸p`—&A!+E¹ª p`77ãØÍÙR3:lw®›8VLwqhȾ¼Ö©yP$(t®jê"AQÜ Þ)Κ–@ ­ V เív~*Z”­’ s³ÍÝœ-5# SÀöx§×žÄ_ß]©sȵºl{ƒ…ö)¦Â&âsËxÇœ-5#€@º Št#J €N Ø~`WÁ%A¡¢EÙ* p`77ÛØÍÙR3:lŽw’~[ÇøIPèP¤WˆwÌÍñŽ9[jFtéF”@œ°ùÀ® K‚BUŒòUàÀnn¦9°›³¥ft Øï\0ú‡âöi7ên£.FX©ÔRâsC¼cΖš@ ]€Eº%@§l>°«Â’ P£|U8°››iìæl©6Ç;$(tÎ4uUY€xÇÜì¥fH A‘nD @Ài›ìNÃ*vžw2+‚Q\I€»We ³ïTfªèjâ;–ûŽóàk/ˆw|Ù|ãbßÉçÇÕØ @‚†Y  €€Aìqª&pVÀ¢¨²ve²J\À¾S‰ifÿ Þ±c)°ïØ1¾ö‚xÇ×™Í7.ö|~\€ $(l˜ú€àÀnW¡jg,Š* p`W&«Äì;•˜fI‚ª5À¾cÕtx×âï¦TË€Øw´0R ¥  (•ŸÆ@ó$(ÌË´@à,£D™¬سÊù}ûŽßóËèÂÄ;v¬ö;æÁ×^ïø:³ùÆÅ¾“Ï«°A€… ³@@ƒ—ßü¤Xþî†ÞCtÜbcƒ-RuT`Å{ÿ3î{ºñwjßNœâ~@! MÀ¶;û޶©Í\ûNf:.tT€}§ü‰‹î;ÛtØXœ7¢gù£Þïx3•Ú¾£’Š(U€E©ü4Ž˜˜zÿËâñ—Þi44`ÿ]Å»le¾aZh¼ø§wÄœ'–6þÿöØJŒ¸Bh°íÀξ£mj3Wľ“™Ž `ß)âÚì;ÝVÇ;ƒˆwÊŸz@¼ãÏ\ê ûŽ.IêA \åúÓ: `\à‘gÞ·?¸ææø¶7Gèn¼]X#0sÎóâ­ï7þƒcÙEÜc[ˆÐ&`Û}GÛÔf®ˆ}'3:*À¾SþÄE÷ãúï*Úg›ò;F¼ Þñf*µ „}G%!Pª ŠRùi0/ðÚò¿‰‰3Ö¼^(h±÷Þ;ˆž{lo¾qZ‹^Z&<ózHbÔbçN›¡ƒ€6Ûìì;Ú¦6SEì;™Ø¸Èqör'0nß=t_±ÓÖ›–Û1Z÷J€xÇ«éÌ=öÜ„T€€5$(¬™ :‚˜¸õKÄüçÿj ÷>;ŠžÝ¶3×(5‹E/¾)<ûFH¢ï^Ä]ÐA@«€mö`pì;Z§Xº2öi* z(À¾SΤÆí;}VÇ;C‰wÊ™[%Þñxr‡Æ¾£Fq, AaùÑ=@@‡À'Ÿ}!.Ÿò¤x÷o…ª ^÷Ôu‡¢ýêÍæ‡³uH üPÛ»«ÿgÉ«B¯u jo¿Ù†âü“ö믻¶žÆ¨ Øx`ÿÿÛ»÷˜ËÊòPàï©Hlií #¨åZ† (Š¥šS[ŒPjNJ5á¤6eL8$öD‰± `FSÿ°”hœZM¹•ZDá´%"W¥)b§3’z”S‹âé¬÷7ß^ßÚ{Ý×z×Z?cà[ë½üžwoŸw?ëâ{§»åé{§;k=Å-à{§»ø,ÿÞyÞ®|çTùNwá˜LOòÉ„:s¢¾w¦³·€ŸãkvX¸ïÑáãõ‘Þþ†ãà G¬ëqº«@ŒöÄÚ÷Nÿ+Î÷Nÿ10‚n|ïtëÕ›ïþc0ÖÈwÆÙúóò½SßP úP èS_ßèX Ù´o½ýÑ5wRt<ŒÉu—Ü9qî«Pœ˜\ä»›p¬öY‘Â÷NwkaÖ“ïîÍõ€|§ŸXøÞéÇ}J½Êw¦íbsõ½SÌÉQbP ˆ=BÆG€†’Çl½ý;ák÷mk¸eÍe ¼bÓ!ágå1–G«1oØ“‰ûÞi5ükßó½sä®ïçtÛ±ÞD$à{§Û`øÞéÖ{ª½Éw¦ùìyûÞ±ŒG@b<±4”xlÛÃÛžØñãÝÿ»mçÓ¥Îwp¶ÀÆuû…Ã: ¼hÃáЃö‡o<Öbß°Ï|ï´³|ï´ãªÕqøÞi'޾wÚqÕêrùδWˆïiÇßìÇ- @1îøš ¹ÀOúÓðÜç>·Ö(›Þ°71¦Zr2 0*&r ùΨ–„É @`E@Âb @€ô(pË-·„o|ãá]ïzWåBESöäǃ«®º*œ|òÉᬳÎêQE× @€À˜ä;cЦ¹ @ YŠf=µF€(-pÎ9çì.RlÙ²¥R¡¢n")L\}õÕ+ʼn›nº©ôœ@€X& ß±> @ K@º @€ô,\UxöÙgïÅúõëK*ª(V&vìØ±»ÿ›o¾ÙÝ=¯Ý @€1 ÈwÆUs"@€@}Šú†Z @€ÔxÝë^Vß¹*’Ç>%wUä½£¢l"«0‘L ¹²ñÆo¬=  @€²’\#¹böùŽuB€ Ö @ [o½5óÎ…"÷¢ŠE…‰Ùô“+_ûÚ×F a @€Àä;cŒª9 @ ž€E=?g @€HßE±ºáe…мE^a"éÇÝ…QC @€Àô]òË…ÓP ˜vüÍžˆH`ÑU…y÷EŠ"…‰YÛh! ±€|gÄÁ55TP ¨€æ @€@[Ëî¢XT¨Øwß}ç†óÌ3Ï„«¯¾:\uÕUaöòëeãu÷D[ÑÔ. %°ì. ùŽ5C€i (PL+ÞfK€D.PäªÂôÆ=]„HU¤01kÇÝ‘/ Ã#@€#ïŒ, ¦C€ 5ðœJ€hC è]Môíî‰&µA€”(zEÙv³Ž—ï4¡¨ ´# @ÑŽ«V  @€•Ê^UX¹£]'º{¢Žžs  @€ªòªrÎ#@€À¸(ÆO³!@€F"ÐÅ]®&Éb1  0P.î¢ï tq6“P ˜L¨M”’@Wº{bH+ÂX  @€Àøä;ã‹© @ ¬€EY1Ç @€:hó. WvDÝ @€KÚ¼‹B¾cñ @ ~Šøcd„ @€ÀDÚ¼ªÐÝ]T¦M€"ïDÃ!@€@Ç ƒëŽ PF »(\MX&Ž%@€Úhã. ùNÛQÓ>šP hÆQ+ @€VÚ¸ªÐÝ­„J£ @€@EùNE8§ @` #¢) @€ãhò. WŽ{­˜ª@“wQÈw†º Œ›) (PL1êæL€ J É« Ý=1¨Ð,˜Œ€|g2¡6QÌ (PX @€4q…« hC$@€hâ. ù΄© 0HŠA†Í   @€© 4qU¡»'¦¶jÌ— K@¾3¬x-šP hBQ @€êÜEáj¤  @ ¶@»(ä;µù5@€Î(:'×! @ š@« Ý=QÍÜY @€@·òn½õF€¾(úŽ€þ  @€%ªÜEájÂÀ%@€z¨r…|§÷°* (PTbs @ *Wº{¢ŸX镨& ß©ææ, Q@bˆQ3f @`Òeî¢p5ᤗŠÉ @€Á ”¹‹B¾3Ø08‚…E@€˜@™« Ý=1°à. °[@¾c! @` Óˆ³Y @€#(r…« GtÓ!@€(r…|gb‹Ât €ÅèBjB @€ÀŠ\Uèî‰)¬s$@€ãïŒ7¶fF€™€…µ@€¨À²»(\M8Р6 0'°ì. ùŽÅB€á (P ?†f@€LT`ÙU…è¢0m 02ùÎÈj:H (PX @€ dÝEájÂÔÐ  @€5YwQÈw,ŒC@bq4  @`¢YWº{b¢‹Á´  @€ÀHä;# ¬i @`—€…e@€¸Àê»(\M8ð`> )°ú. ùŽEB€ñ(PŒ'–fB€LT`õU…è"0m 0rùÎÈlzLV@b²¡7q P]à[7žWýdg¶"ðŽ+¾º»Ý½ïU­´¯Ñê/}Ý ÕOv&ô& ßé~aÇòøb2‘|'ÞØØ(bñ @€lØã Ê÷lß=¨Ó6oˆop‘ ûÄ€é 0XùN|¡“ïÄŠxcbd†" @1”H'ˆHÀ†=¢`Jô Ñ‡È  ) ß±0ï·r$ó V PZÀ†½4™&,`Ã>áà›:ƒï :|ß±€|§cpÝ‘€ň‚i* @ +ô†ý¥¯¼´«®õC zo}íÏæÆhÃ}È ™ò ƒÀbùŽÕA€@S MIj‡LHÀ†}BÁ6ÕÒ6ì¥Éœ@€(ä;Q†Å "ïDà 0ŠÑ @€@×6ì]‹ëoH6ìCŠ–± @`±€|Çê °äóáŽQ˃†(‚Ô ˜’€ û”¢m®e(ÊŠ9žq Èw⌋QÅ! ߉#FA`  cˆ¢9 @€ŽlØ;×Ý lØ.ƒ%@€ÀBùŽÅA`±€|Çê @ )Ц$µC€&$`Ã>¡`›jiöÒdN @€@”ò(ÃbP‘Èw" „a€Å‚h  @ kö®Åõ7$ö!EËX  °X@¾cuXòùð ˃†(‚Ô ˜’€ û”¢m®e(ÊŠ9žq Èw⌋QÅ! ߉#FA`  cˆ¢9 @€ŽlØ;×Ý lØ.ƒ%@€ÀBùŽÅA`±€|Çê @ )Ц$µC€&$`Ã>¡`›jiöÒdN @€@”ò(ÃbP‘Èw" „a€Å‚h  @ kö®Åõ7$ö!EËX  °X@¾cuXòùð ˃†(‚Ô ˜’€ û”¢m®e(ÊŠ9žq Èw⌋QÅ! ߉#FA`  cˆ¢9 @€ŽlØ;×Ý lØ.ƒ%@€ÀBùŽÅA`±€|Çê @ )Ц$µC€&$`Ã>¡`›jiöÒdN @€@”ò(ÃbP‘Èw" „a€Å‚h  @ kö®Åõ7$ö!EËX  °X@¾cuXòùð ˃†(‚Ô ˜’€ û”¢m®e(ÊŠ9žq Èw⌋QÅ! ߉#FA`  cˆ¢9 @€ŽlØ;×Ý lØ.ƒ%@€ÀBùŽÅA`±€|Çê @ )Ц$µC€&$`Ã>¡`›jiöÒdN @€@”ò(ÃbP‘Èw" „a€Å‚h  @ kö®Åõ7$ö!EËX  °X@¾cuXòùð ˃†(‚Ô ˜’@ŸöÿòüSÖPãŽÿ6ŸxL¯!HëúO^.xÓÙ½Ži*g­‰dî­AØþè-3(PtN®C´" ßYË*ßie©Untu<þÿÿ½»r;UN”ïTQsY Ö PZ ¯ ûûÿìºpÅû?ºf¼½íwÃ'>|Eéy4y‚ {“šùmÝsïÃáäÓßšà/ŽèrÓnÃ^8,$@€@ÔòŠ˜è9ç¾3Ü|Û]+Cì2×I:•ïļ:ŒÀ°(†/£%@€Qôµa?ñ´ Â}<²Æ ¯+åWD¢»¥¹¨P•7‚÷½çâðžK/Ê;¬ößmØkj€QÈw(¢XˆƒÈÊ…(b–q ' @‘'äï @€À>6ìyWÌwõãó¢å @ÑÍå3Ÿ¿9¼å.¯ÜYÞR ¨' @ *ùŽET òƒY” )PÄ-c"@ ˆ€E%Ç @€s}lØÿðÞ®ûÔWÆqÂñGÍÝM‘üó½w~F¤F.°è}éMù²‚VÛxŠ‘/BÓ#@`2òÉ„z0]v¡FÛùMI¾3˜ec ¢P ˆ>DH€âècþሳ“;žZÁH®„O_IÃ˲ã‹ÖxF”~Ör2³¼÷T9§®˜ {]Aç @ ùNq0Š=é‹uÒ. V CP j䌛ô(Ðõ†=}î~’IDATµØìéwRäýXÝ#™®Hß=qö™/7m½6·å¬"E››xŠÜ8€ƒï "L“dúB¬I·™Ûdõ'ß™ÄÒ3I(Pt¬ 0.®7ìé˜g?L§_Ã˲ÇéxfS÷eéâF›ï,±agÝ êÈwêè9· ¬ügQ» Mˆkƒ>(úP×'¸@×öe/ îò‡ç6Ö·íâåÎmÍ/}Õ_Ñ;V'}·LÙ6ÒE®6ßY¢@ÑÖJÒ.ºï4ï-ß)fºì]É9úà»Ö<êT¢˜­£ˆO@"¾˜ˆ^ Ë {Þ•óé®ó~xNŸ`—ywEÖó“«ñ¯xÿGçâV´ õø¡e o~ËÎM š¼ã$íšl’—m®“q–¹‹aY‘ªÈ&k,mmä(ŠDÄ1ˆ_@¾óŹ ÉwBh;ß™gå«Éßfht™×,ú¤Êwâÿ3BCP J¤Œ“D$Ðå†=ïÊù¬Ʋ‚Ã=÷>N>ý­sšE¨ÏÚ Î U~@/ò<áEa/ST™µÑu"m’žKÑ"N–{•ùW‰Q• {5ç @ >ùÎÞ˜ÈwöXd(šÊwV² «/¬P ˆïûˆ¨. @QÝΙ @`²]mØ³Š Y?j§7†y/ËÎ*jä“;« 0Û,–ýñ{Ñ•qEUÑ¢Êêöº,P$?dÜ÷À# §SfüM½k$=ÿ2wpKrœE-Ç @ ^ùÎÞØÈwöXdݹÛT¾³ú“°ºŸ¬ÜW"Þï ##@ ¼€Ey3g @€É tµaO?NiÑÚéÇ$ùñ»ì£ž=Úé=—^´{=”)P,zôÑ¢; =ªìì](VHVsVt*R𵑶¯ú˜«tÌËŒ¡Ì‡^¢Œ–c  ¯€|gOlVÿÿ¸|ç‚…`ÔÍwÒŠwoùoá‚7ùQ ˆ÷{ÃÈ(/ @QÞÌ @`ò]mØÓ?¨/úA9k“–÷ã}ÖÝ‹~øÎj?ý’æ2ö*•e—ŠaA.º#dQÑ%ñß|â1…†žö*û‚ìY'Mµ“7hŠ¿¹ÍEzMä‹­¦ÚÉ[ yBþN€aÈwê(ä;í¬uŠv\µJ€@? ý¸ë• Z Í û²»ª¢}YvÒ~V¤ÈU IE5ïª?ÒW5Ì;Ob¯EÞjñw C@¾ÓLB¾ÓüzW hÞT‹ô' @ÑŸ½ž  @€À`ÚÚ°g½C¢)¤¼; ò6ÏyçW-P¬ž_ÙÇ>ÅT¤h³@ñ‡ô¾pݧ¾¸BUõýM½Ë"oM*Pä ù;†! ßi®@!ßivÍ+P4ë©5úP è×_ï @`mmØËþ@_oÙ ®gítùм±§”_t|‘»;òújâïm(Ò…«ª¹ÚpÄYáÉO­L·Èš¨b£@QEÍ9ˆO@¾ÓNbu«òjë^¢š›³ˆS@"θˆZ  û=÷>N>ý­só.óh¦Õ'fµ•wÕ}‘w_,ûA»‰;(–=ýãúìØ¼yuµÚ,P4µ o;F3kŠ®V~ Ю€|§ýEº‡)ç;eVsS¹Q™>ÓÇÊwêè9—Õ Ö PZ  {ÖtE_n5¬»1µ—µÉKŠ#ßû—'ÃÍ·Ý5×ü¢G=uõãwºŸªw”zÎ m(’®ëúv¹‘·aozuiýÈwº/PÌzœj¾St¥w™×,“|§h´G€@ž€Ež¿ @€kÚØ°§¯˜«{g@Öû,Ý‘±lœþÛ¢÷>”ý}6¾²†¬yå½£‹%Üv¢nûé;dꮯe¦6ì]¬8} @ }ùεkå;„ûxdÅ¥¯÷)P´ÿù×Ý (Ptg­' 0¦7ìY›¬&ÞõN‰ôùYvZý^‡¬‚@ÖØŠnسÚ+s§H Ò¬…\·€÷á¨[˜Iǧ‰õµhÌ yÑôw C@¾sqxÏ¥ÍK¾£@1[òa|%!(P !JÆH€"hzÞU$(ó£ý"ž¬vWÿ0½èÑNŸøðsMfµ“.tݰg½£ÌÕwéGaµy'@™e×v"KÑ»YÒãÎzÜW›wذ—Y9Ž%@€@¼òä;ó볋|§È'"† Vä;E"åŠ(PQr  0'Ðô†=ýÃsS?ºgmÞV·]æùÆyc,Z H ³~0/òBð¢•>–kö¬BQ^q'ëœ"Öu mØëè9—ñÈwBHçdòwPÌ>¡òx¾«Œ„ÀÐ(†Aã'@€=4¹a/ú¥ªÓL¿Û"i'¹;ã²?ýðš`¯~´Sº¿¼q–Ù°gµ5ëoÑ#YEäœ6ï(cÞE"OÖc»f1Ý|â1+CκSeöǶÍlØË¬Ç @ ^ùΞج¾ûT¾£@1ûÄÊwâýî22CP ZÄŒ—D Ð䆽íÇ契´ˆ¯ÈUõYcÊlØ“1,*8” oÙ÷(¤‹5e_нllU UÆ“uI³eE¨2í,;Ö†½)Ií @ _ùÎ^ù΋®ò¼•ïOyBþN€À(†-c%@€‘4µa¯û>†"Ë®¤Ÿ_ô‡ú¬¶f>([ Húκ»£Èœ’cŠTÒmU)O—öew ,oʼn¤Š¢«Æqˆ[@¾³7>ò=]æ;Ë> qwå(Êy9šH~€½ñ¼9‡—¾òÒJ.Yw7´ñ#rÞÝ eú\ô¨§+ÞÿÑ9ƒ¢m½ÃcuãU_ >–EbQ¤ð´Ú¬íÇ:­îK¢Òד €|g>·IîÜ”ï”ÄSù—Et_D€@ ŠxN%@€ShjÃÞÆ†-+&Ë®¸¯r'B^Á#CÑÅl¼y.jâÅámz÷yEá¢÷R$¶]&f±T ˜ê7£y 06ùÎüñYñ•ï¼<Ü´õÚ¥K¿üKblß6æC`Ú ÓŽ¿Ù @€JMmØ+uî$‘ (PD Ã#@€@AùNA(‡MR@¾3ɰ›4V(ZaÕ(·€ û¸ãkvõlØëù9›±Èwb‰„qÄ( ߉1*ÆD`˜ ÃŒ›Q @€^lØ{å×yä6ì‘Èð PP@¾SÊa“ïL2ì&M ŠVX5J€Æ-`Ã>îøš]=öz~Î&@€@,òX"a1 ÈwbŒŠ1¦€Å0ãfÔ @ Wö^ùu¹€ {ä2<ï„rØ$ä;“ »IhE@¢V @€q ذ;¾fWOÀ†½žŸ³  ‹€|'–HGŒò£bL†) @1̸5èUÀ†½WþÁvþØögÃ÷~ðóðÝφ'~ðlØöÔÏ;—˜¾qÝ~á° „ÃÖï¿ûßx`LÃ3 V@¾3ØÐõ:pùN;üòv\µJ Š¢`  @``6ì XÏÃ}æÙ¾p×OÂ׿ýÓžG2î_yÂÆpî«ûîóœiLØ,  Ð’€|§%Ø‘6+ßé6°òn½õF MŠ6uµM€F*`Ã>ÒÀ¶0­ûÿYØúþ#ìü‘»%Zà]Øäºyw‘â„#ÖuÙ­¾ 0*ùΨÂÙêdä;­òÊwúáÕ+Î(:£Ö€ ûxbÙæL’ÍúÇoý÷6»ÐvŽÀÛßp¼"…UB€ŠòŠp;M¾ÓÀå;ýÇÀÔP ¨£ç\ 0Qö‰¾Ä´“Ç|àóO¯¹sâÐ ¿ŽyÉú°á„u¿º_‰ºH`ûS?;wý÷áÇw„ïmÿ·¹Ã’;).{ÛËv=îé— @€@IùNI° .ßé.èËóçíÊwN•ït=hT@¢QN @€iذO#ÎufyÃ×¾ö­ùwNœvâ‹Ã)ǽ¨N³Î͸û¡'Âßüç¹£^±épþoÃŽJ ÈwJ‚MððÌ|礗„SŽ=l‚ÝM9+ßIÞIqÞ™Gw7= И€Ec”"@€Ó°aŸN¬«Ìô±íφk¾ôÿæN=Íf½ e¥sî~ð»áÎ{Ÿ;÷’ó6‡Ã7X©=' @`ªò©F¾Ø¼å;ÅœÚ:*+ßÙrþÉá×y~[]j—–(Z‚Õ,³€ û˜£[n·?øLøü]ÿ±ÒPòX§7ž¹©~ÃZ(,ð…ÛîŸ{ÜÓ›_sTxÕI/,|¾  @ ùŽU°L@¾Óÿúïô# Є€EŠÚ @€°aŸXÀKN÷Ó_ýIøûÚûx§3O=*wäÁ%[qx¿óýpÛ?<²ÒÄo{p¸ðìߨӤs  09ùÎäB^jÂòR\­,ßi…U£:P èœ\‡ @`ø6ìÃa›3øÀžÛžúùJç³y×K±÷o³Km§’IÞpÓ=+ÿvãºýÂe¿ÿ2N PB@¾Sk‚‡Êwúz:ßyáúýû/<¥ÿ¥(Jq9˜HlØ­ƒeïüÄæþüÇo9Xÿëú;æz½ö’3z….  0\ùÎpc×ÅÈå;](ç÷!ßÉ7rØ(bñ @€lØ# JDCÛ†ý¢ ^îøÊ-+ÂyëÝáØM'E$ž=öèCd€D. ߉<@=O¾Ós~ѽ|'Ž8: uôœK€&*`Ã>ÑÀœö˜6ì_ÞúÙ°åâ çf®@Qp!8Œï <€-_¾Ó2pÁæ( B9Œ@Ä ÇÐ @€@¬6ì±F&ŽqiÃ~ôÁû¬AU ˆcÚï´-<ìöå;qÄO"Ž8: uôœK€&*`Ã>ÑÀœöX6ì¿ó[›ÃÃݯ@Q0î#@€ÀØä;c‹h³ó‘ï4ëYµ5ŠªrÎ#€E<±0 0öÁ„ª—ŽaÃþ‘k® úà{3ýÜAÑ˲Ò):ïtN>¨å;q„K"Ž8: uôœK€&*Þ°O”Á´|äÛÏýåßrú ¬ºÿ›á÷^{ÊÂ1+P *œK€ÊòÊt“8Q¾G˜(∃Q¨# @QGϹ @`¢6ì |Ái}Ãþòã7†ì|R¢`¼F€± ÈwÆÙfæ%ßiƱn+ uO Šþc` @`p6ìƒ Y§ò†ýò-þ“+^§¿æ¬pÇWn™ósE§ËIgèM@¾Óý :–ïÄ&Š8â`ê(PÔÑs.˜¨€ ûD_pÚCݰyëgÖ‹/\™å¯­;(\÷™¿^ó¸'Š‚ Áa¸€|gàlyøò– 6¯@QÊa"P ˆ88†F€¢À;¯ù»¹aåéG;%…ˆä?é÷Q(P qU3hV@¾Ó¬gÕÖ(ªÊ9@< ñÄÂH @€£â†ý¢ ^?÷(§?ùÿ3ü÷K.Y/ÌV Å25  PK@¾S‹¯±“(£ÔÞ(z£×1 @`œCÛ°äš+Ç>øÞ•`sì¦ð¥¿½g÷?+PŒsš¨+ ß©+ØÌù Í8j…@Ÿ }êë› 0B¡mØ>xŸ¹(üÓ÷¶òÏ #\ ¦D€ï4€Ø@  j‚@Ï =@÷ @€± iÃþ;¿µ9<üÐý+!˜=Úiö/(ƶ:͇4# ßiƱn+ uO Šþc` @€Q eÃ~ù–w„Ï]ÿÉûÓ_sV¸î3= ŠQ-M“!@€ Èw£¬ÕE->'ˆB@"Š0 @`<CذgV?Úi Šñ¬K3!@€M ÈwšÔ¬Þ–Eu;gˆE@"–H @`$Cذ¿üøá;Ÿ\¿ú£ŸÿõÜó×D@b$‹Ò4 @€@Ãò†A+6§@QÎi"P ˆ(†B€ƒ@ìö‹.x}¸ã+·¬Pg=ÚiöGŠ1¬Hs @€Í Èwš7­Ò¢E5çˆK@"®x  @`ð1oØ¿¼õ³aËÅ®ÿÚºƒÂ]l[h®@1øåh @ ùN+¬¥U (MæÑ (PD"@€ [ æ ûÑï3‡û—·ÞŽÝt’Ű—œÑ @€Îä;“gv¨@GŒ‚@Š:zÎ%@€X#ë†=ýh§7¿å•WliÝAa @€Yò8Ö…Eq0 u(êè9— @`Š\seøÐß»2ÖcŽÝ¾ô·÷äFO"—È @`’1(ä;!\{É“\&M`È CŽž± @€"ˆmÃ^§ÈPçܾCãŠÂ¾# ³€|'ŽèÊw∃Q¨# @QGϹ @€kbÛ°_¾åásײµH½£µ,h؆½kqý @€À”ä;qD[¾GŒ‚@Š:zÎ%@€P (ø¸¨®—Š {×âú#@€) (PÄmùNq0 u(êè9— @@B§€LN@"Ž+PÄ£ PG@¢Žžs  @€((|  @€Àä(â¹Eq0 u(êè9— @ úEyIv=ç @€ñ ÄV ¨#-ß©£ç\ê (PÔt> @€Àœ€ { Â…qÄÁ( @`œò8â*߉#FA Ž€E=ç @€¬°acQذÇ£ @€q Èw∫|'Ž8: uôœK€  @é°a40†E€£P ˆ#Œò8â`ê(PÔÑs. @€€E¤kÀ†=ÒÀŒB@"Ž0Êw∃Q¨# @QGϹ @€ ‘®öHcX 0 Š8Â(߉#FA Ž€E=ç @€(PDºlØ# Œa @€À((â£|'Ž8: uôœK€  @é°a40†E€£P ˆ#Œò8â`ê(PÔÑs. @€À¨ C¯ û£gì »À˜ ±[/Ÿ|gÈÑ3v{(¬ @€FlØå¬Ü˜ {e:' @€\ùN.Q'Èw:aÖ V(ZåÕ8 @`z6ìqÄ܆=Ž8ŒS@¾G\å;qÄÁ(ÔP ¨£ç\ @€56ìq, ö8â` 0NùNq•ïÄ£ PG@¢Žžs  @€Ö|à/þ1lÛùôÊ¿?ïœÍaà ö'Õ¡Àö§~n¸éž•7®Û/\öû/ëpº"@€ãïôßt¾óÂõû‡w_xJÿ3J (P”âr0 @€@žÀ§oþvøû‡¾¿rØ™§Ž;òà¼Óü½A¿óýpÛ?<²Òâowp¸ð¬ßh°M @€i Èwúÿš|çØ]ùÎÙòþ#cÊ (P”ór4 @€@ŽÀW¿ù/ás_Ùûãø¡~%¼ñÌMÜ:øÂm÷‡ïmÿ·•ßtÆ‘áÕ›ípº"@€ãïôßt¾óæ×^uÒ û˜ PJ@¢—ƒ  @€òÛöÃpÍ {/”Ú‰/§÷¢¼Sý½»z"ÜùÍžké’ó6‡Ã7Ø@ëš @€ùN¿ë +ßÙrþÉá×y~¿Ó;¥(J“9 @ Oà³ópøúýÿ:wØi'½$œrìay§ú{ »ün¸óÞÇçZxå Ãyg]£U§ @€Yò~ÖEV¾óŠ]ùÎùò~¢W5(j: @`­À3?ûyøÀ§þ1ìüá¿Ïý1yÜÓ1/^Öízi¶g7³r’DîÜõ߇ß1÷X§¤õuþr¸ìm/ ûîóKÍt¦ @`E@¾ÓÝbXžï`_ Numpy does not support automatic vectorization (``vmap``). einx implements a custom ``vmap`` for Numpy instead using a Python for-loop for testing and debugging purposes. This affects ``einx.vmap`` and ``einx.{index|get_at|set_at|...}``. ---- **Torch**: `https://pytorch.org/ `_ einx disables ``torch.compile`` (using ``torch.compiler.disable``) when JIT-compiling a call into a Python function and reenables it when executing the function. ---- **Jax**: `https://jax.readthedocs.io/ `_ ---- **Tensorflow**: `https://www.tensorflow.org/ `_ einx does not support tensors with dynamic shapes (i.e. ``None`` in the shape). ---- **MLX**: `https://ml-explore.github.io/mlx `_ ``einx.vmap`` and ``einx.{index|get_at|set_at|...}`` are currently not supported (``mx.vmap`` does not support all required primitives yet). ---- **Tinygrad**: `https://tinygrad.org/ `_ ``einx.vmap`` and ``einx.{index|get_at|set_at|...}`` are currently not supported. ---- **Dask**: `https://docs.dask.org/en/stable/array.html `_ ``einx.vmap`` and ``einx.{index|get_at|set_at|...}`` are currently not supported.python-einx-0.3.0/docs/source/more/gotchas.rst000066400000000000000000000055061505216034200213540ustar00rootroot00000000000000Gotchas ####### 1. **Unnamed axes are always unique** and cannot refer to the same axis in different expressions. E.g. ``3 -> 3`` refers to two different axes, both with length 3. This can lead to unexpected behavior in some cases: ``einx.sum("3 -> 3", x)`` will reduce the first ``3`` axis and insert a new axis broadcasted with length 3. 2. **Spaces in expressions are important.** E.g. in ``(a b)...`` the ellipsis repeats ``(a b)``, while in ``(a b) ...`` the ellipsis repeats a new axis that is inserted in front of it. 3. **einx.dot is not called einx.einsum** despite providing einsum-like functionality. This follows the general paradigm of naming functions after the elementary operation that is computed, and avoids confusion with ``einx.sum``. 4. **einx does not support dynamic shapes** that can occur for example when tracing some types of functions (e.g. `tf.unique `_) in Tensorflow using ``tf.function``. As a workaround, the shape can be specified statically, e.g. using `tf.ensure_shape `_. In Keras, when constructing a model using the `functional API `_, the batch size argument is dynamic by default and should be specified with some dummy value, e.g. ``keras.Input(shape=(...), batch_size=1)``. 5. **einx retraces functions when called with different input shapes.** This can lead to an unexpected slowdown when calling an einx function many times with different input shapes. The problem typically does not arise in frameworks like Jax `where the usage of dynamic shapes is limited `_. 6. **einx implements a custom vmap for Numpy using Python loops**. This is slower than ``vmap`` in other backends, but is included for debugging and testing purposes. 7. **In einx.nn layers, weights are created on the first forward pass** (see :doc:`Tutorial: Neural networks `). This is common practice in jax-based frameworks like Flax and Haiku where the model is initialized using a forward pass on a dummy batch. In other frameworks, an initial forward pass should be added before using the model. (In some circumstances the first actual training batch might be sufficient, but it is safer to always include the initial forward pass.) In PyTorch, `torch.compile `_ should only be applied after the initial forward pass. See the `training scripts `_ for examples on how to include the dummy forward pass. 8. **einx.nn.equinox does not support stateful layers** since Equinox requires the shape of states to be known in the layer's ``__init__`` method. python-einx-0.3.0/docs/source/more/jit.rst000066400000000000000000000065531505216034200205150ustar00rootroot00000000000000Just-in-time compilation ######################## When an einx function is invoked, the required backend operations are determined from the given einx expressions and traced into graph representation. The graph is then just-in-time compiled into a regular Python function using Python's `exec() `_. As a simple example, consider the following einx call: >>> x = np.zeros((10, 10)) >>> einx.sum("a [b]", x).shape (10,) We can inspect the compiled function by passing ``graph=True``: >>> graph = einx.sum("a [b]", x, graph=True) >>> print(graph) import numpy as np def op0(i0): x0 = np.sum(i0, axis=1) return x0 einx passes this string to `exec() `_ to just-in-time compile the function. It then invokes the function using the required arguments. The traced function is cached, such that subsequent calls with the same signature of inputs can reuse it and incur no overhead other than for cache lookup. The function signature includes the types of the input arguments as well as their shape. einx therefore retraces a function every time it is called with different input shapes. The environment variable ``EINX_WARN_ON_RETRACE`` can be used to print a warning when excessive retracing takes place. For example, ``EINX_WARN_ON_RETRACE=10`` will issue a warning when a function is retraced 10 times from the same call site. When using just-in-time compilation like `jax.jit `_, einx incurs zero overhead (other than during initialization). Inspecting operations --------------------- In addition to reducing the overhead, the just-in-time compiled function also allows verifying that the correct backend calls are made. For example: A sum-reduction that requires a reshape operation: >>> x = np.zeros((10, 10)) >>> print(einx.sum("b... (g [c])", x, g=2, graph=True)) import numpy as np def op0(i0): x0 = np.reshape(i0, (10, 2, 5)) x1 = np.sum(x0, axis=2) return x1 A call to ``einx.dot`` that forwards computation to ``np.einsum``: >>> x = np.zeros((10, 10)) >>> print(einx.dot("b... (g [c1->c2])", x, np.ones, g=2, c2=8, graph=True)) import numpy as np def op0(i0, i1): x0 = np.reshape(i0, (10, 2, 5)) x1 = np.einsum("abc,cd->abd", x0, i1((5, 8))) x2 = np.reshape(x1, (10, 16)) return x2 A call to ``einx.get_at`` that applies ``jax.vmap`` to handle batch axes: >>> x = jnp.zeros((4, 128, 128, 3)) >>> y = jnp.zeros((4, 1024, 2), "int32") >>> print(einx.get_at("b [h w] c, b p [2] -> b p c", x, y, graph=True)) import jax def op1(i0, i1): x0 = i1[:, 0] x1 = i1[:, 1] x2 = i0[x0, x1] return (x2,) x3 = jax.vmap(op1, in_axes=(0, 0), out_axes=(0,)) x4 = jax.vmap(x3, in_axes=(3, None), out_axes=(2,)) def op0(i0, i1): x0, = x4(i0, i1) return x0 An operation that requires concatenation of tensors: >>> x = np.zeros((10, 10, 3)) >>> y = np.ones((10, 10)) >>> print(einx.rearrange("h w c, h w -> h w (c + 1)", x, y, graph=True)) import numpy as np def op0(i0, i1): x0 = np.reshape(i1, (10, 10, 1)) x1 = np.concatenate([i0, x0], axis=2) return x1 The just-in-time compiled function can also be called directly with the correct arguments to avoid a cache lookup: >>> graph = einx.rearrange("h w c, h w -> h w (c + 1)", x, y, graph=True) >>> z = graph(x, y) >>> z.shape (10, 10, 4)python-einx-0.3.0/docs/source/more/related.rst000066400000000000000000000021701505216034200213360ustar00rootroot00000000000000Related projects ################ * `einops `_ * `einsum `_ * `eindex `_ * `torchdim `_ * `einindex `_ * `einshape `_ * `einop `_ * `eingather `_ * `einshard `_ * `shardops `_ * `eins `_ * `Named axes in PyTorch `_ * `Named axes in Jax `_ * `Named axes in Penzai `_ * `Dex `_ * `Named Tensor Notation `_ * `Tensor Considered Harmful `_python-einx-0.3.0/einx/000077500000000000000000000000001505216034200147355ustar00rootroot00000000000000python-einx-0.3.0/einx/__init__.py000066400000000000000000000003601505216034200170450ustar00rootroot00000000000000from . import tracer from .tracer import jit, lru_cache, trace from . import traceback_util from . import tree_util from . import backend from .types import * from . import expr from .op import * from . import nn from . import experimental python-einx-0.3.0/einx/backend/000077500000000000000000000000001505216034200163245ustar00rootroot00000000000000python-einx-0.3.0/einx/backend/__init__.py000066400000000000000000000005121505216034200204330ustar00rootroot00000000000000from .register import register_for_module, register, get, backends, numpy from .base import Backend, get_default from . import _numpy as numpy from . import _torch as torch from . import _tensorflow as tensorflow from . import _jax as jax from . import _dask as dask from . import _mlx as mlx from . import _tinygrad as tinygrad python-einx-0.3.0/einx/backend/_dask.py000066400000000000000000000076131505216034200177660ustar00rootroot00000000000000from .base import * import einx.tracer as tracer from einx.tracer.tensor import op import einx, types from functools import partial def create(): import dask.array as da tda = tracer.import_("dask.array", "da") class dask(Backend): name = "dask" tensor_types = [da.Array] @staticmethod @einx.trace def to_tensor(tensor, shape): return einx.tracer.apply( tda.asarray, args=[tensor], output=einx.tracer.Tensor(shape), ) @staticmethod @einx.trace def reshape(tensor, shape): if einx.tracer.is_scalar(tensor): tensor = tda.asarray(tensor) return op.reshape(tda.reshape)(tensor, shape) transpose = op.transpose(tda.transpose) broadcast_to = op.broadcast_to(tda.broadcast_to) einsum = op.einsum(tda.einsum) arange = op.arange(tda.arange) @staticmethod @einx.trace def stack(tensors, axis=0): tensors = [tda.asarray(t) if einx.tracer.is_scalar(t) else t for t in tensors] return op.stack(tda.stack)(tensors, axis=axis) @staticmethod @einx.trace def concatenate(tensors, axis=0): tensors = [tda.asarray(t) if einx.tracer.is_scalar(t) else t for t in tensors] return op.concatenate(tda.concatenate)(tensors, axis=axis) add = associative_binary_to_nary(op.elementwise(tda.add)) subtract = op.elementwise(tda.subtract) multiply = associative_binary_to_nary(op.elementwise(tda.multiply)) true_divide = op.elementwise(tda.true_divide) floor_divide = op.elementwise(tda.floor_divide) divide = op.elementwise(tda.divide) logical_and = associative_binary_to_nary(op.elementwise(tda.logical_and)) logical_or = associative_binary_to_nary(op.elementwise(tda.logical_or)) where = op.elementwise(tda.where) less = op.elementwise(tda.less) less_equal = op.elementwise(tda.less_equal) greater = op.elementwise(tda.greater) greater_equal = op.elementwise(tda.greater_equal) equal = op.elementwise(tda.equal) not_equal = op.elementwise(tda.not_equal) maximum = associative_binary_to_nary(op.elementwise(tda.maximum)) minimum = associative_binary_to_nary(op.elementwise(tda.minimum)) sum = op.reduce(tda.sum) mean = op.reduce(tda.mean) var = op.reduce(tda.var) std = op.reduce(tda.std) prod = op.reduce(tda.prod) count_nonzero = op.reduce(tda.count_nonzero) any = op.reduce(tda.any) all = op.reduce(tda.all) min = op.reduce(tda.min) max = op.reduce(tda.max) log = op.elementwise(tda.log) exp = op.elementwise(tda.exp) sqrt = op.elementwise(tda.sqrt) square = op.elementwise(tda.square) @staticmethod @einx.trace def get_at(tensor, coordinates): return tensor[coordinates] @staticmethod @einx.trace def set_at(tensor, coordinates, updates): return tensor.__setitem__(coordinates, updates) @staticmethod @einx.trace def add_at(tensor, coordinates, updates): return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__iadd__(updates) ) @staticmethod @einx.trace def subtract_at(tensor, coordinates, updates): return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__isub__(updates) ) flip = op.keep_shape(tda.flip) roll = op.keep_shape(tda.roll) @staticmethod @einx.trace def vmap(*args, **kwargs): raise NotImplementedError( "Functions relying on vmap are not supported for the dask backend" ) return dask() python-einx-0.3.0/einx/backend/_jax.py000066400000000000000000000076671505216034200176370ustar00rootroot00000000000000from .base import * import einx.tracer as tracer from einx.tracer.tensor import op import einx, types from functools import partial def create(): tjax = tracer.import_("jax") tjnp = tracer.import_("jax.numpy", "jnp") import jax.numpy as jnp import jax as jax_ class jax(Backend): name = "jax" tensor_types = [jnp.ndarray] @staticmethod @einx.trace def to_tensor(tensor, shape): return einx.tracer.apply( tjnp.asarray, args=[tensor], output=einx.tracer.Tensor(shape), ) reshape = op.reshape(tjnp.reshape) transpose = op.transpose(tjnp.transpose) broadcast_to = op.broadcast_to(tjnp.broadcast_to) einsum = op.einsum(tjnp.einsum) arange = op.arange(tjnp.arange) stack = op.stack(tjnp.stack) concatenate = op.concatenate(tjnp.concatenate) add = associative_binary_to_nary(op.elementwise(tjnp.add)) subtract = op.elementwise(tjnp.subtract) multiply = associative_binary_to_nary(op.elementwise(tjnp.multiply)) true_divide = op.elementwise(tjnp.true_divide) floor_divide = op.elementwise(tjnp.floor_divide) divide = op.elementwise(tjnp.divide) logical_and = associative_binary_to_nary(op.elementwise(tjnp.logical_and)) logical_or = associative_binary_to_nary(op.elementwise(tjnp.logical_or)) where = op.elementwise(tjnp.where) less = op.elementwise(tjnp.less) less_equal = op.elementwise(tjnp.less_equal) greater = op.elementwise(tjnp.greater) greater_equal = op.elementwise(tjnp.greater_equal) equal = op.elementwise(tjnp.equal) not_equal = op.elementwise(tjnp.not_equal) maximum = associative_binary_to_nary(op.elementwise(tjnp.maximum)) minimum = associative_binary_to_nary(op.elementwise(tjnp.minimum)) sum = op.reduce(tjnp.sum) mean = op.reduce(tjnp.mean) var = op.reduce(tjnp.var) std = op.reduce(tjnp.std) prod = op.reduce(tjnp.prod) count_nonzero = op.reduce(tjnp.count_nonzero) any = op.reduce(tjnp.any) all = op.reduce(tjnp.all) min = op.reduce(tjnp.min) max = op.reduce(tjnp.max) logsumexp = op.reduce(tjax.scipy.special.logsumexp) log = op.elementwise(tjnp.log) exp = op.elementwise(tjnp.exp) sqrt = op.elementwise(tjnp.sqrt) rsqrt = op.elementwise(tjax.lax.rsqrt) square = op.elementwise(tjnp.square) @staticmethod @einx.trace def get_at(tensor, coordinates): return tensor[coordinates] @staticmethod @einx.trace def set_at(tensor, coordinates, updates): return einx.tracer.apply( tensor.at[coordinates].set, args=[updates], output=einx.tracer.Tensor(tensor.shape) ) @staticmethod @einx.trace def add_at(tensor, coordinates, updates): return einx.tracer.apply( tensor.at[coordinates].add, args=[updates], output=einx.tracer.Tensor(tensor.shape) ) @staticmethod @einx.trace def subtract_at(tensor, coordinates, updates): return einx.tracer.apply( tensor.at[coordinates].add, args=[-updates], output=einx.tracer.Tensor(tensor.shape) ) flip = op.keep_shape(tjnp.flip) roll = op.keep_shape(tjnp.roll) softmax = op.keep_shape(tjax.nn.softmax) log_softmax = op.keep_shape(tjax.nn.log_softmax) stop_gradient = op.keep_shape(tjax.lax.stop_gradient) vmap = op.vmap(tjax.vmap) class random: @einx.trace def bernoulli(rng, p, shape): return einx.tracer.apply( tjax.random.bernoulli, args=[rng, p, shape], output=einx.tracer.Tensor(shape), ) return jax() python-einx-0.3.0/einx/backend/_mlx.py000066400000000000000000000122301505216034200176330ustar00rootroot00000000000000from .base import * import einx.tracer as tracer from einx.tracer.tensor import op import einx, types from functools import partial def create(): import mlx.core as mx tmx = tracer.import_("mlx.core", "mx") def to_tuple(x): if isinstance(x, tuple): return x elif isinstance(x, list): return tuple(x) elif isinstance(x, np.ndarray): return tuple(x.tolist()) else: raise ValueError(f"Cannot convert {type(x)} to tuple") def to_dtype(x): if isinstance(x, str): if x == "bool": return mx.bool_ else: return vars(mx)[x] else: return x to_dtype2 = to_dtype class mlx(Backend): name = "mlx" tensor_types = [mx.array] to_dtype = staticmethod(to_dtype2) @staticmethod @einx.trace def to_tensor(tensor, shape): return einx.tracer.apply( tmx.array, args=[tensor], output=einx.tracer.Tensor(shape), ) @staticmethod @einx.trace def reshape(tensor, shape): if einx.tracer.is_scalar(tensor): tensor = tmx.array(tensor) return einx.tracer.apply( tmx.reshape, args=[tensor, list(to_tuple(shape))], output=einx.tracer.Tensor(shape) ) transpose = op.transpose(tmx.transpose) broadcast_to = op.broadcast_to(tmx.broadcast_to) @staticmethod @einx.trace def einsum(equation, *tensors): raise NotImplementedError("mlx does not support einsum yet") @staticmethod @einx.trace def arange(start, stop=None, step=None, dtype="int32"): args = [start] if stop is not None: args.append(stop) if step is not None: args.append(step) return op.arange(tmx.arange)(*args, dtype=to_dtype(dtype)) stack = op.stack(tmx.stack) concatenate = op.concatenate(tmx.concatenate) add = associative_binary_to_nary(op.elementwise(tmx.add)) subtract = op.elementwise(tmx.subtract) multiply = associative_binary_to_nary(op.elementwise(tmx.multiply)) true_divide = op.elementwise(tmx.divide) floor_divide = op.elementwise(tmx.floor_divide) divide = op.elementwise(tmx.divide) mod = op.elementwise(tmx.remainder) logical_and = associative_binary_to_nary(op.elementwise(tmx.logical_and)) logical_or = associative_binary_to_nary(op.elementwise(tmx.logical_or)) where = op.elementwise(tmx.where) less = op.elementwise(tmx.less) less_equal = op.elementwise(tmx.less_equal) greater = op.elementwise(tmx.greater) greater_equal = op.elementwise(tmx.greater_equal) equal = op.elementwise(tmx.equal) not_equal = op.elementwise(tmx.not_equal) maximum = associative_binary_to_nary(op.elementwise(tmx.maximum)) minimum = associative_binary_to_nary(op.elementwise(tmx.minimum)) sum = op.reduce(tmx.sum) mean = op.reduce(tmx.mean) var = op.reduce(tmx.var) prod = op.reduce(tmx.prod) count_nonzero = op.reduce(tmx.count_nonzero) any = op.reduce(tmx.any) all = op.reduce(tmx.all) min = op.reduce(tmx.min) max = op.reduce(tmx.max) logsumexp = op.reduce(tmx.logsumexp) log = op.elementwise(tmx.log) exp = op.elementwise(tmx.exp) sqrt = op.elementwise(tmx.sqrt) rsqrt = op.elementwise(tmx.rsqrt) square = op.elementwise(tmx.square) @staticmethod @einx.trace def get_at(tensor, coordinates): return tensor[coordinates] @staticmethod @einx.trace def set_at(tensor, coordinates, updates): return einx.tracer.apply( tensor.at[coordinates].set, args=[updates], output=einx.tracer.Tensor(tensor.shape) ) @staticmethod @einx.trace def add_at(tensor, coordinates, updates): return einx.tracer.apply( tensor.at[coordinates].add, args=[updates], output=einx.tracer.Tensor(tensor.shape) ) @staticmethod @einx.trace def subtract_at(tensor, coordinates, updates): return einx.tracer.apply( tensor.at[coordinates].add, args=[-updates], output=einx.tracer.Tensor(tensor.shape) ) softmax = op.keep_shape(tmx.softmax) stop_gradient = op.keep_shape(tmx.stop_gradient) # vmap = op.vmap(tmx.vmap) @staticmethod def vmap(op, in_axes, out_axes, input_shapes=None, output_shapes=None): raise NotImplementedError("mlx does not fully support vmap yet") sqrt = tmx.sqrt rsqrt = tmx.rsqrt square = tmx.square class random: @einx.trace def bernoulli(rng, p, shape): einx.tracer.apply( tmx.random.bernoulli, args=[p, shape, rng], output=einx.tracer.Tensor(shape), ) return mlx() python-einx-0.3.0/einx/backend/_numpy.py000066400000000000000000000067721505216034200202210ustar00rootroot00000000000000from .base import * import einx.tracer as tracer from einx.tracer.tensor import op import numpy as np import einx, types from functools import partial def create(): tnp = tracer.import_("numpy", "np") class numpy(Backend): name = "numpy" tensor_types = [np.ndarray, np.generic, list, tuple, int, float, bool] _get_tests = staticmethod(_get_tests) @staticmethod @einx.trace def to_tensor(tensor, shape): return einx.tracer.apply( tnp.asarray, args=[tensor], output=einx.tracer.Tensor(shape), ) reshape = op.reshape(tnp.reshape) transpose = op.transpose(tnp.transpose) broadcast_to = op.broadcast_to(tnp.broadcast_to) einsum = op.einsum(tnp.einsum) arange = op.arange(tnp.arange) stack = op.stack(tnp.stack) concatenate = op.concatenate(tnp.concatenate) add = associative_binary_to_nary(op.elementwise(tnp.add)) subtract = op.elementwise(tnp.subtract) multiply = associative_binary_to_nary(op.elementwise(tnp.multiply)) true_divide = op.elementwise(tnp.true_divide) floor_divide = op.elementwise(tnp.floor_divide) divide = op.elementwise(tnp.divide) logical_and = associative_binary_to_nary(op.elementwise(tnp.logical_and)) logical_or = associative_binary_to_nary(op.elementwise(tnp.logical_or)) where = op.elementwise(tnp.where) less = op.elementwise(tnp.less) less_equal = op.elementwise(tnp.less_equal) greater = op.elementwise(tnp.greater) greater_equal = op.elementwise(tnp.greater_equal) equal = op.elementwise(tnp.equal) not_equal = op.elementwise(tnp.not_equal) maximum = associative_binary_to_nary(op.elementwise(tnp.maximum)) minimum = associative_binary_to_nary(op.elementwise(tnp.minimum)) sum = op.reduce(tnp.sum) mean = op.reduce(tnp.mean) var = op.reduce(tnp.var) std = op.reduce(tnp.std) prod = op.reduce(tnp.prod) count_nonzero = op.reduce(tnp.count_nonzero) any = op.reduce(tnp.any) all = op.reduce(tnp.all) min = op.reduce(tnp.min) max = op.reduce(tnp.max) log = op.elementwise(tnp.log) exp = op.elementwise(tnp.exp) sqrt = op.elementwise(tnp.sqrt) square = op.elementwise(tnp.square) @staticmethod @einx.trace def get_at(tensor, coordinates): return tensor[coordinates] @staticmethod @einx.trace def set_at(tensor, coordinates, updates): return tensor.__setitem__(coordinates, updates) @staticmethod @einx.trace def add_at(tensor, coordinates, updates): return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__iadd__(updates) ) @staticmethod @einx.trace def subtract_at(tensor, coordinates, updates): return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__isub__(updates) ) flip = op.keep_shape(tnp.flip) roll = op.keep_shape(tnp.roll) numpy.vmap = op.vmap(partial(vmap_forloop, backend=numpy)) return numpy() def _get_tests(): test = types.SimpleNamespace( full=lambda shape, value=0.0, dtype="float32": np.full(shape, value, dtype=dtype), to_tensor=np.asarray, to_numpy=lambda x: x, ) return [(create(), test)] python-einx-0.3.0/einx/backend/_tensorflow.py000066400000000000000000000247221505216034200212460ustar00rootroot00000000000000from .base import * import einx.tracer as tracer from einx.tracer.tensor import op import einx, types from functools import partial def create(): import tensorflow as tf import tensorflow.experimental.numpy as tnp ttf = tracer.import_("tensorflow", "tf") ttnp = tracer.import_("tensorflow.experimental.numpy", "tnp") def _broadcast_static_shape(shape1, shape2): assert len(shape1) == len(shape2) and all( s1 == s2 or s1 == 1 or s2 == 1 for s1, s2 in zip(shape1, shape2) ) return tuple(max(s1, s2) for s1, s2 in zip(shape1, shape2)) class tensorflow(Backend): name = "tensorflow" tensor_types = [tf.Tensor] @staticmethod @einx.trace def to_tensor(tensor, shape): return einx.tracer.apply( ttf.convert_to_tensor, args=[tensor], output=einx.tracer.Tensor(shape), ) reshape = op.reshape(ttnp.reshape) transpose = op.transpose(ttnp.transpose) broadcast_to = op.broadcast_to(ttnp.broadcast_to) @staticmethod @einx.trace def einsum(equation, *tensors): return op.einsum(ttnp.einsum)(equation, *tensors, optimize="optimal") arange = op.arange(ttnp.arange) stack = op.stack(ttnp.stack) concatenate = op.concatenate(ttnp.concatenate) add = associative_binary_to_nary(op.elementwise(ttnp.add)) subtract = op.elementwise(ttnp.subtract) multiply = associative_binary_to_nary(op.elementwise(ttnp.multiply)) true_divide = op.elementwise(ttnp.true_divide) floor_divide = op.elementwise(ttnp.floor_divide) divide = op.elementwise(ttnp.divide) logical_and = associative_binary_to_nary(op.elementwise(ttnp.logical_and)) logical_or = associative_binary_to_nary(op.elementwise(ttnp.logical_or)) where = op.elementwise(ttnp.where) less = op.elementwise(ttnp.less) less_equal = op.elementwise(ttnp.less_equal) greater = op.elementwise(ttnp.greater) greater_equal = op.elementwise(ttnp.greater_equal) equal = op.elementwise(ttnp.equal) not_equal = op.elementwise(ttnp.not_equal) maximum = associative_binary_to_nary(op.elementwise(ttnp.maximum)) minimum = associative_binary_to_nary(op.elementwise(ttnp.minimum)) sum = op.reduce(ttnp.sum) mean = op.reduce(ttnp.mean) var = op.reduce(ttnp.var) std = op.reduce(ttnp.std) prod = op.reduce(ttnp.prod) count_nonzero = op.reduce(ttnp.count_nonzero) any = op.reduce(ttnp.any) all = op.reduce(ttnp.all) min = op.reduce(ttnp.min) max = op.reduce(ttnp.max) logsumexp = op.reduce(ttf.math.reduce_logsumexp) log = op.elementwise(ttnp.log) exp = op.elementwise(ttnp.exp) sqrt = op.elementwise(ttnp.sqrt) rsqrt = op.elementwise(ttf.math.rsqrt) square = op.elementwise(ttnp.square) @classmethod @einx.trace def get_at(backend, tensor, coordinates): coordinates, _ = backend._prepare_coordinates_and_update(coordinates, None) if isinstance(coordinates, tuple): out_shape = coordinates[0].shape coordinates = ttf.stack(coordinates, axis=-1) else: out_shape = coordinates.shape[:-1] return einx.tracer.apply( ttf.gather_nd, args=[tensor, coordinates], output=einx.tracer.Tensor(out_shape), ) @classmethod @einx.trace def _prepare_coordinates_and_update(backend, coordinates, updates): assert updates is None or isinstance(updates, einx.tracer.Tensor) if isinstance(coordinates, tuple): assert all(isinstance(c, einx.tracer.Tensor) for c in coordinates) shape = coordinates[0].shape for c in coordinates[1:]: shape = _broadcast_static_shape(shape, c.shape) coordinates = [backend.broadcast_to(c, shape) for c in coordinates] coordinates = backend.stack(coordinates, axis=-1) else: assert isinstance(coordinates, einx.tracer.Tensor) coordinates = coordinates[(slice(None),) * (coordinates.ndim - 1) + (None,)] coordinates = coordinates[..., None] assert updates is None or updates.ndim + 1 == coordinates.ndim # Broadcast to common shape if updates is None: shape = coordinates.shape[:-1] else: shape = _broadcast_static_shape(updates.shape, coordinates.shape[:-1]) coordinates = backend.broadcast_to(coordinates, shape + coordinates.shape[-1:]) if updates is not None: updates = backend.broadcast_to(updates, shape) return coordinates, updates @classmethod @einx.trace def set_at(backend, tensor, coordinates, updates): coordinates, updates = backend._prepare_coordinates_and_update(coordinates, updates) return einx.tracer.apply( ttf.tensor_scatter_nd_update, args=[tensor, coordinates, updates], output=einx.tracer.Tensor(tensor.shape), ) @classmethod @einx.trace def add_at(backend, tensor, coordinates, updates): coordinates, updates = backend._prepare_coordinates_and_update(coordinates, updates) return einx.tracer.apply( ttf.tensor_scatter_nd_add, args=[tensor, coordinates, updates], output=einx.tracer.Tensor(tensor.shape), ) @classmethod @einx.trace def subtract_at(backend, tensor, coordinates, updates): coordinates, updates = backend._prepare_coordinates_and_update(coordinates, updates) return einx.tracer.apply( ttf.tensor_scatter_nd_sub, args=[tensor, coordinates, updates], output=einx.tracer.Tensor(tensor.shape), ) @staticmethod @einx.trace def flip(x, axis): if isinstance(axis, int): axis = [axis] return op.keep_shape(ttf.reverse)(x, axis) @staticmethod @einx.trace def roll(x, axis, shift): if isinstance(axis, int): axis = [axis] if isinstance(shift, int): shift = [shift] return op.keep_shape(ttf.roll)(x, tuple(shift), axis=tuple(axis)) @staticmethod @einx.trace def softmax(x, axis): if isinstance(axis, (list, tuple)): if len(axis) != 1: raise ValueError( "Tensorflow only supports softmax along a single axis, " f"got {len(axis)} axes" ) axis = axis[0] return op.keep_shape(ttf.nn.softmax)(x, axis=axis) @staticmethod @einx.trace def log_softmax(x, axis): if isinstance(axis, (list, tuple)): if len(axis) != 1: raise ValueError( "Tensorflow only supports log_softmax along a single axis, " f"got {len(axis)} axes" ) axis = axis[0] return op.keep_shape(ttf.nn.log_softmax)(x, axis=axis) sqrt = op.keep_shape(ttf.math.sqrt) rsqrt = op.keep_shape(ttf.math.rsqrt) square = op.keep_shape(ttnp.square) stop_gradient = op.keep_shape(ttf.stop_gradient) @staticmethod def vmap(op, in_axes, out_axes, input_shapes, output_shapes): @einx.trace def inner(*args): # TODO: suboptimal (?) implementation of vmap in tensorflow that transposes the # vmapped axis to the front and calls tf.vectorized_map. Possible optimization: # Transpose only once for multiple vmaps? if len(args) != len(in_axes): raise ValueError(f"Expected {len(in_axes)} arguments, got {len(args)}") value = {arg.shape[axis] for arg, axis in zip(args, in_axes) if axis is not None} if len(value) != 1: raise ValueError( f"Expected all arguments to have same size along vmap axis, got {value}" ) value = value.pop() # Move vmapped axes to front xs = [] for arg, axis in zip(args, in_axes): if axis is not None: if axis != 0: perm = [axis] + [a for a in range(len(arg.shape)) if a != axis] arg = einx.tracer.op.transpose(ttnp.transpose)(arg, perm) else: arg = arg[tf.newaxis] xs.append(arg) op2 = einx.trace( lambda xs: op(*xs), args=[[einx.tracer.Tensor(x.shape[1:]) for x in xs]] ) xs = einx.tracer.apply( ttf.vectorized_map, args=[op2, xs], output=[einx.tracer.Tensor(shape) for shape in output_shapes], ) if len(xs) != len(out_axes): raise ValueError( f"Expected {len(out_axes)} arguments from vmapped function, got {len(xs)}" ) # Move vmapped axis to out_axis xs = [ einx.tracer.op.transpose(ttnp.transpose)( x, [ (a + 1 if a < out_axis else (0 if a == out_axis else a)) for a in range(len(x.shape)) ], ) for x, out_axis in zip(xs, out_axes) ] return tuple(xs) return inner class random: @einx.trace def bernoulli(rng, p, shape): return ( einx.tracer.apply( ttf.random.uniform, args=[shape], kwargs={"minval": 0.0, "maxval": 1.0, "dtype": "float32", "seed": rng}, output=einx.tracer.Tensor(shape), ) <= p ) return tensorflow() python-einx-0.3.0/einx/backend/_tinygrad.py000066400000000000000000000164601505216034200206650ustar00rootroot00000000000000from .base import * import einx.tracer as tracer from einx.tracer.tensor import op import einx, types from functools import partial import functools def create(): tTensor = tracer.import_("Tensor", from_="tinygrad") tdtypes = tracer.import_("dtypes", from_="tinygrad") from tinygrad import Tensor, dtypes def scalar_to_tensor(x): if isinstance(x, (einx.tracer.Scalar, float, int)): return einx.tracer.apply( tTensor, args=[x], output=einx.tracer.Tensor([]), ) else: return x def elementwise(func, convert_all_to_tensor=False): @einx.trace @functools.wraps(func) def outer(*args): if convert_all_to_tensor: args = [scalar_to_tensor(a) for a in args] else: args = [a for a in args] args[0] = scalar_to_tensor(args[0]) return op.elementwise(func)(*args) return outer def reduce(func): @einx.trace @functools.wraps(func) def reduce(tensor, axis=None, **kwargs): keepdims = kwargs.get("keepdims", False) if axis is None: shape = () else: axes = [axis] if isinstance(axis, int) else axis shape = list(tensor.shape) if keepdims: for a in axes: shape[a] = 1 else: for a in sorted(axes, reverse=True): del shape[a] kwargs = {**kwargs, **{"axis": axis}} if "keepdims" in kwargs: kwargs["keepdim"] = kwargs.pop("keepdims") return tracer.apply(func, args=[tensor], kwargs=kwargs, output=tracer.Tensor(shape)) return reduce def to_dtype(x): if isinstance(x, str): return getattr(dtypes, x) else: return x to_dtype2 = to_dtype class tinygrad(Backend): name = "tinygrad" tensor_types = [Tensor] to_dtype = staticmethod(to_dtype2) @staticmethod @einx.trace def to_tensor(tensor, shape): return einx.tracer.apply( tTensor, args=[tensor], output=einx.tracer.Tensor(shape), ) reshape = op.reshape(tTensor.reshape) transpose = op.transpose(tTensor.permute) broadcast_to = op.broadcast_to(tTensor.expand) @classmethod @einx.trace def einsum(backend, equation, *tensors): x = equation.split("->") if len(x) != 2: raise ValueError("Invalid equation") inputs, output = x inputs = inputs.split(",") if len(inputs) != len(tensors): raise ValueError("Invalid equation") inputs = [x.strip().replace(" ", "") for x in inputs] tensors = [t for t in tensors] scalars = [] for i in list(range(len(inputs)))[::-1]: if (len(inputs[i]) > 0) != (len(tensors[i].shape) > 0): raise ValueError("Invalid equation") if len(inputs[i]) == 0: scalars.append(tensors[i]) inputs.pop(i) tensors.pop(i) if len(tensors) > 1: equation = ",".join(inputs) + "->" + output x = op.einsum(tTensor.einsum)(equation, *tensors) elif len(tensors) == 1: x = tensors[0] else: x = scalars[0] scalars = scalars[1:] for scalar in scalars: x = backend.multiply(x, scalar) return x @staticmethod @einx.trace def arange(n, dtype="int32"): if isinstance(dtype, str): dtype = getattr(tdtypes, dtype) return op.arange(tTensor.arange)(n, dtype=dtype) @staticmethod @einx.trace def concatenate(tensors, axis=0): shape = list(tensors[0].shape) shape[axis] = sum(tensor.shape[axis] for tensor in tensors) return tracer.apply( tTensor.cat, args=[*tensors], kwargs={"dim": axis}, output=tracer.Tensor(shape) ) add = associative_binary_to_nary(elementwise(tTensor.add)) subtract = elementwise(tTensor.sub) multiply = associative_binary_to_nary(elementwise(tTensor.mul)) true_divide = elementwise(tTensor.div) floor_divide = elementwise(partial(tTensor.div, upcast=False)) divide = elementwise(tTensor.div) logical_and = associative_binary_to_nary(elementwise(tTensor.mul)) logical_or = associative_binary_to_nary(elementwise(tTensor.add)) where = elementwise(tTensor.where) less = elementwise(tracer.Operator("<")) less_equal = elementwise(tracer.Operator("<=")) greater = elementwise(tracer.Operator(">")) greater_equal = elementwise(tracer.Operator(">=")) equal = elementwise(tracer.Operator("==")) not_equal = elementwise(tracer.Operator("!=")) maximum = associative_binary_to_nary(elementwise(tTensor.maximum)) minimum = associative_binary_to_nary(elementwise(tTensor.minimum)) sum = reduce(tTensor.sum) mean = reduce(tTensor.mean) var = reduce(tTensor.var) std = reduce(tTensor.std) count_nonzero = reduce(tTensor.sum) min = reduce(tTensor.min) max = reduce(tTensor.max) # tinygrad's logsumexp currently does not support multiple axes, so # we use our custom implementation instead: # logsumexp = reduce(tTensor.logsumexp) log = op.elementwise(tTensor.log) exp = op.elementwise(tTensor.exp) sqrt = op.elementwise(tTensor.sqrt) rsqrt = op.elementwise(tTensor.rsqrt) square = op.elementwise(tTensor.square) @staticmethod @einx.trace def get_at(tensor, coordinates): raise NotImplementedError() @staticmethod @einx.trace def set_at(tensor, coordinates, updates): raise NotImplementedError() @staticmethod @einx.trace def add_at(tensor, coordinates, updates): raise NotImplementedError() @staticmethod @einx.trace def subtract_at(tensor, coordinates, updates): raise NotImplementedError() flip = op.keep_shape(tTensor.flip) softmax = op.keep_shape(tTensor.softmax) log_softmax = op.keep_shape(tTensor.log_softmax) @staticmethod @einx.trace def stop_gradient(tensor): return tensor # TODO: set requires_grad to False? @staticmethod @einx.trace def vmap(op, in_axes, out_axes, input_shapes, output_shapes): raise NotImplementedError( "Functions relying on vmap are not supported for the tinygrad backend" ) class random: @einx.trace def bernoulli(rng, p, shape): return ( einx.tracer.apply( tTensor.rand, args=[*shape], output=einx.tracer.Tensor(shape), ) <= p ) return tinygrad() python-einx-0.3.0/einx/backend/_torch.py000066400000000000000000000357631505216034200201720ustar00rootroot00000000000000from .base import * import einx.tracer as tracer from einx.tracer.tensor import op import einx, types from functools import partial def create(): import torch as torch_ version = tuple(int(i) for i in torch_.__version__.split(".")[:2]) if version < (2, 0): message = "einx with PyTorch requires PyTorch version >= 2, but found " f"{torch_.__version__}. einx functions are disabled for PyTorch." print(f"WARNING: {message}") return ErrorBackend(message) @einx.trace def move_scalars_to_device(args, scalar_indices=None): device = None for arg in args: if isinstance(arg, einx.tracer.Tensor) and not isinstance(arg, einx.tracer.Scalar): device = arg.device break if device is None: raise ValueError("Failed to determine the PyTorch device placement of parameters") def to_tensor(i, x): if einx.tracer.is_scalar(x) and (scalar_indices is None or i in scalar_indices): return einx.tracer.apply( ttorch.asarray, args=[x], kwargs={"device": device}, output=einx.tracer.Tensor(()), ) else: return x return [to_tensor(i, arg) for i, arg in enumerate(args)] def move_scalars_to_device_in_elementwise(op, scalar_indices=None): @einx.trace @functools.wraps(op) def wrapper(*args, **kwargs): args = move_scalars_to_device(args, scalar_indices) return op(*args, **kwargs) return wrapper MARKER_DECORATED_CONSTRUCT_GRAPH = "__einx_decorated_construct_graph" ttorch = tracer.import_("torch") import torch as torch_ def to_tuple(x): if isinstance(x, tuple): return x elif isinstance(x, list): return tuple(x) elif isinstance(x, np.ndarray): return tuple(x.tolist()) else: raise ValueError(f"Cannot convert {type(x)} to tuple") to_tuple2 = to_tuple def to_dtype(x): if isinstance(x, str): return vars(torch_)[x] else: return x to_dtype2 = to_dtype if "compiler" in dir(torch_): tcompiler = ttorch.compiler compiler = torch_.compiler else: tcompiler = tracer.import_("torch._dynamo", "_dynamo") import torch._dynamo as compiler import torch._dynamo as _dynamo if "capture_func_transforms" in vars(_dynamo.config): # Allow torch.vmap to be used inside torch.compile _dynamo.config.capture_func_transforms = True class torch(Backend): name = "torch" tensor_types = [torch_.Tensor] function_name = ( "_call_impl" # Workaround for: https://github.com/pytorch/pytorch/issues/124269 ) to_tuple = staticmethod(to_tuple2) to_dtype = staticmethod(to_dtype2) @staticmethod @einx.trace def to_tensor(arg, shape): assert False @staticmethod @einx.trace def all_to_tensor(tensors, convert_scalars=False): device = None for tensor in tensors: if type(tensor) == einx.tracer.Tensor: device = tensor.device break if device is None: device = ttorch.device("cpu") def to_tensor(tensor): if isinstance(tensor, einx.tracer.TensorRequiringConversion) or ( convert_scalars and einx.tracer.is_scalar(tensor) ): return einx.tracer.apply( ttorch.asarray, args=[tensor], kwargs={"device": device}, output=einx.tracer.Tensor(tensor.shape), ) else: return tensor tensors = [to_tensor(tensor) for tensor in tensors] return tensors @staticmethod @einx.trace def reshape(tensor, shape): if einx.tracer.get_shape(tensor) == shape: return tensor else: return op.reshape(ttorch.reshape)(tensor, to_tuple(shape)) @staticmethod @einx.trace def transpose(tensor, perm): return op.transpose(ttorch.permute)(tensor, to_tuple(perm)) @staticmethod @einx.trace def broadcast_to(tensor, shape): return op.broadcast_to(ttorch.broadcast_to)(tensor, to_tuple(shape)) @staticmethod @einx.trace def einsum(equation, *tensors): tensors = move_scalars_to_device(tensors) return op.einsum(ttorch.einsum)(equation, *tensors) @staticmethod @einx.trace def arange(n, dtype="int32"): return op.arange(ttorch.arange)(n, dtype=to_dtype(dtype)) stack = op.stack(ttorch.stack) concatenate = op.concatenate(ttorch.concatenate) add = associative_binary_to_nary(op.elementwise(ttorch.add)) subtract = op.elementwise(ttorch.subtract) multiply = associative_binary_to_nary(op.elementwise(ttorch.multiply)) true_divide = op.elementwise(ttorch.true_divide) floor_divide = op.elementwise(ttorch.floor_divide) divide = op.elementwise(ttorch.divide) logical_and = move_scalars_to_device_in_elementwise( associative_binary_to_nary(op.elementwise(ttorch.logical_and)) ) logical_or = move_scalars_to_device_in_elementwise( associative_binary_to_nary(op.elementwise(ttorch.logical_or)) ) where = move_scalars_to_device_in_elementwise( op.elementwise(ttorch.where), scalar_indices=[0] ) less = op.elementwise(einx.tracer.Operator("<")) less_equal = op.elementwise(einx.tracer.Operator("<=")) greater = op.elementwise(einx.tracer.Operator(">")) greater_equal = op.elementwise(einx.tracer.Operator(">=")) equal = op.elementwise(einx.tracer.Operator("==")) not_equal = op.elementwise(einx.tracer.Operator("!=")) maximum = move_scalars_to_device_in_elementwise( associative_binary_to_nary(op.elementwise(ttorch.maximum)) ) minimum = move_scalars_to_device_in_elementwise( associative_binary_to_nary(op.elementwise(ttorch.minimum)) ) sum = op.reduce(ttorch.sum) mean = op.reduce(ttorch.mean) var = op.reduce(ttorch.var) std = op.reduce(ttorch.std) prod = op.reduce(ttorch.prod) count_nonzero = op.reduce(ttorch.count_nonzero) any = op.reduce(ttorch.any) all = op.reduce(ttorch.all) min = op.reduce(ttorch.min) max = op.reduce(ttorch.max) logsumexp = op.reduce(ttorch.logsumexp) log = op.elementwise(ttorch.log) exp = op.elementwise(ttorch.exp) sqrt = op.elementwise(ttorch.sqrt) rsqrt = op.elementwise(ttorch.rsqrt) square = op.elementwise(ttorch.square) @staticmethod @einx.trace def get_at(tensor, coordinates): if isinstance(coordinates, tuple): if ( any(isinstance(c, (slice, int, einx.tracer.Scalar)) for c in coordinates) or coordinates[0].ndim > 0 ): return tensor[coordinates] else: # Fix for https://github.com/pytorch/functorch/issues/747 # Scalar coordinates cause problems with torch.vmap and throw an error: # "RuntimeError: vmap: It looks like you're calling .item() on a Tensor. # We don't support vmap over calling .item() on a Tensor ..." # As a workaround, we add a dummy dimension and remove it after the indexing # operation. return tensor[tuple(c[None] for c in coordinates)][0] else: if ( isinstance(coordinates, (slice, int, einx.tracer.Scalar)) or coordinates.ndim > 0 ): return tensor[coordinates] else: # See above return tensor[coordinates[None]][0] @staticmethod @einx.trace def set_at(tensor, coordinates, updates): if isinstance(coordinates, tuple): if ( any(isinstance(c, (slice, int, einx.tracer.Scalar)) for c in coordinates) or coordinates[0].ndim > 0 ): return tensor.__setitem__(coordinates, updates) else: # See above coordinates = tuple(c[None] for c in coordinates) updates = updates[None] return tensor.__setitem__(coordinates, updates) else: if ( isinstance(coordinates, (slice, int, einx.tracer.Scalar)) or coordinates.ndim > 0 ): return tensor.__setitem__(coordinates, updates) else: # See above coordinates = coordinates[None] updates = updates[None] return tensor.__setitem__(coordinates, updates) @staticmethod @einx.trace def add_at(tensor, coordinates, updates): if isinstance(coordinates, tuple): if ( any(isinstance(c, (slice, int, einx.tracer.Scalar)) for c in coordinates) or coordinates[0].ndim > 0 ): return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__iadd__(updates) ) else: # See above coordinates = tuple(c[None] for c in coordinates) updates = updates[None] return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__iadd__(updates) ) else: if ( isinstance(coordinates, (slice, int, einx.tracer.Scalar)) or coordinates.ndim > 0 ): return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__iadd__(updates) ) else: # See above coordinates = coordinates[None] updates = updates[None] return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__iadd__(updates) ) return tensor @staticmethod @einx.trace def subtract_at(tensor, coordinates, updates): if isinstance(coordinates, tuple): if ( any(isinstance(c, (slice, int, einx.tracer.Scalar)) for c in coordinates) or coordinates[0].ndim > 0 ): return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__isub__(updates) ) else: # See above coordinates = tuple(c[None] for c in coordinates) updates = updates[None] return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__isub__(updates) ) else: if ( isinstance(coordinates, (slice, int, einx.tracer.Scalar)) or coordinates.ndim > 0 ): return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__isub__(updates) ) else: # See above coordinates = coordinates[None] updates = updates[None] return tensor.__setitem__( coordinates, tensor.__getitem__(coordinates).__isub__(updates) ) return tensor @staticmethod @einx.trace def flip(tensor, axis): if isinstance(axis, int): axis = [axis] return op.keep_shape(ttorch.flip)(tensor, axis) @staticmethod @einx.trace def roll(tensor, shift, axis): if isinstance(axis, int): axis = [axis] return op.keep_shape(ttorch.roll)(tensor, shift, axis) @staticmethod @einx.trace def softmax(tensor, axis): if isinstance(axis, (list, tuple)): if len(axis) != 1: raise ValueError( "PyTorch only supports softmax along a single axis, " f"got {len(axis)} axes" ) axis = axis[0] return op.keep_shape(ttorch.softmax)(tensor, axis) @staticmethod @einx.trace def log_softmax(tensor, axis): if isinstance(axis, (list, tuple)): if len(axis) != 1: raise ValueError( "PyTorch only supports log_softmax along a single axis, " f"got {len(axis)} axes" ) axis = axis[0] return op.keep_shape(ttorch.nn.functional.log_softmax)(tensor, axis) @staticmethod @einx.trace def stop_gradient(x): raise NotImplementedError("stop_gradient is currently not implemented for PyTorch") @staticmethod @einx.trace def vmap(op, in_axes, out_axes, input_shapes, output_shapes): op = einx.tracer.apply( ttorch.vmap, args=[op], kwargs={ "in_dims": tuple(in_axes) if isinstance(in_axes, list) else in_axes, "out_dims": tuple(out_axes) if isinstance(out_axes, list) else out_axes, }, signature="vmap", output=einx.tracer.Function( output=[einx.tracer.Tensor(shape) for shape in output_shapes] ), ) op = einx.tracer.apply( tcompiler.allow_in_graph, args=[op], comment="Workaround for https://github.com/pytorch/pytorch/issues/94674", ) return op class random: @einx.trace def bernoulli(rng, p, shape): return ( einx.tracer.apply( ttorch.bernoulli, args=[ttorch.full(to_tuple(shape), p)], kwargs={"generator": rng}, output=einx.tracer.Tensor(shape), ) > 0.5 ) einx.jit.decorate_traced_functions(compiler.allow_in_graph) return torch() python-einx-0.3.0/einx/backend/base.py000066400000000000000000000151641505216034200176170ustar00rootroot00000000000000import einx import functools import threading import numpy as np from einx.tracer.tensor import op def associative_binary_to_nary(binary_op): @einx.trace def nary_op(*args): x = args[0] for y in args[1:]: x = binary_op(x, y) return x return nary_op def vmap_forloop(op, in_axes, out_axes, backend): if not isinstance(in_axes, (tuple, list)) or not isinstance(out_axes, (tuple, list)): raise ValueError("in_axes and out_axes must be tuples or lists of integers") def stack(xs, axis): return einx.jit(lambda *xs, backend: backend.stack(xs, axis=axis))(*xs) def inner(*args): if len(args) != len(in_axes): raise ValueError(f"Expected {len(in_axes)} arguments, got {len(args)}") value = {arg.shape[axis] for arg, axis in zip(args, in_axes) if axis is not None} if len(value) != 1: raise ValueError( f"Expected all arguments to have same size along vmap axis, got {value}" ) value = value.pop() xs_stacks = [[]] * len(out_axes) for i in range(value): xs = op(*[ arg[(slice(None),) * axis + (i,)] if axis is not None else arg for arg, axis in zip(args, in_axes) ]) if len(xs) != len(out_axes): raise ValueError( f"Expected {len(out_axes)} arguments from vmapped function, got {len(xs)}" ) for xs_stack, x in zip(xs_stacks, xs): xs_stack.append(x) xs = tuple( stack(xs_stack, axis=out_axis) for out_axis, xs_stack in zip(out_axes, xs_stacks) ) return xs inner.__name__ = f"vmap({op.__name__ if '__name__' in dir(op) else str(op)}, " f"in_axes={in_axes}, out_axes={out_axes})" return inner _thread_local = threading.local() def _get_backend_stack(): if not hasattr(_thread_local, "backend_stack"): _thread_local.backend_stack = [] return _thread_local.backend_stack def get_default(): if len(_get_backend_stack()) > 0: return _get_backend_stack()[-1] else: return None class Backend: function_name = None decorators = [] def __enter__(backend): _get_backend_stack().append(backend) return backend def __exit__(backend, *args): assert _get_backend_stack()[-1] is backend _get_backend_stack().pop() @staticmethod def _decorate_construct_graph(f): return f @classmethod @einx.trace def all_to_tensor(backend, tensors, convert_scalars=False): def to_tensor(tensor): if isinstance(tensor, einx.tracer.TensorRequiringConversion) or ( convert_scalars and einx.tracer.is_scalar(tensor) ): tensor = backend.to_tensor(tensor, tensor.shape) return tensor return [to_tensor(tensor) for tensor in tensors] @classmethod @einx.trace def stack(backend, tensors, axis=0): s = (slice(None),) * axis + (None,) return backend.concatenate([tensor[s] for tensor in tensors], axis=axis) @classmethod @einx.trace def mod(backend, x, y): return backend.subtract(x, backend.multiply(backend.floor_divide(x, y), y)) @classmethod @einx.trace def logsumexp(backend, x, axis=None): if isinstance(axis, int): axis = (axis,) x_max_keepdims = backend.max(x, axis=axis, keepdims=True) x_max_keepdims = backend.stop_gradient(x_max_keepdims) x_max_dropdims = backend.reshape( x_max_keepdims, tuple(s for i, s in enumerate(x_max_keepdims.shape) if i not in axis), ) return ( backend.log(backend.sum(backend.exp(x - x_max_keepdims), axis=axis, keepdims=False)) + x_max_dropdims ) @classmethod @einx.trace def std(backend, x, axis=None, keepdims=False): return backend.sqrt(backend.var(x, axis=axis, keepdims=keepdims)) @classmethod @einx.trace def prod(backend, tensor, axis=None): tensor = backend.log(tensor) tensor = backend.sum(tensor, axis=axis) tensor = backend.exp(tensor) return tensor @classmethod @einx.trace def any(backend, tensor, axis=None): return backend.count_nonzero(tensor, axis=axis) > 0 @classmethod @einx.trace def all(backend, tensor, axis=None): if axis is None: total_num = np.prod(tensor.shape) elif isinstance(axis, int): total_num = tensor.shape[axis] else: total_num = np.prod([tensor.shape[i] for i in axis]) return backend.count_nonzero(tensor, axis=axis) == total_num @classmethod @einx.trace def softmax(backend, x, axis=None): x_max = backend.max(x, axis=axis, keepdims=True) x_max = backend.stop_gradient(x_max) x = x - x_max return backend.exp(x) / backend.sum(backend.exp(x), axis=axis, keepdims=True) @classmethod @einx.trace def log_softmax(backend, x, axis=None): x_max = backend.max(x, axis=axis, keepdims=True) x_max = backend.stop_gradient(x_max) x = x - x_max return x - backend.log(backend.sum(backend.exp(x), axis=axis, keepdims=True)) @classmethod @einx.trace def flip(backend, tensor, axis): if isinstance(axis, int): axis = (axis,) for axis in axis: c = (slice(None),) * axis + (slice(None, None, -1),) tensor = tensor[c] return tensor @classmethod @einx.trace def roll(backend, tensor, shift, axis): if isinstance(axis, int): axis = (axis,) if isinstance(shift, int): shift = (shift,) if len(axis) != len(shift): raise ValueError(f"Got {len(shift)} shifts, expected {len(axis)}") for shift, axis in zip(shift, axis): indices = backend.arange(tensor.shape[axis]) indices = backend.mod(indices - shift, tensor.shape[axis]) c = (slice(None),) * axis + (indices,) tensor = tensor[c] return tensor @classmethod @einx.trace def rsqrt(backend, x): return 1.0 / backend.sqrt(x) @classmethod @einx.trace def vmap(backend, op, in_axes, out_axes): return einx.tracer.import_("einx").backend.vmap_forloop( backend, op, in_axes=in_axes, out_axes=out_axes ) stop_gradient = op.keep_shape(einx.trace(lambda x: x)) class ErrorBackend: def __init__(self, message): self.message = message def __getattr__(self, name): raise RuntimeError(self.message) python-einx-0.3.0/einx/backend/register.py000066400000000000000000000074341505216034200205320ustar00rootroot00000000000000import sys import einx import threading import importlib import numpy as np from .base import Backend backends = [] backend_factories = {} # module-name: [backend-factory] tensortype_to_backend = {} name_to_backend = {} lock = threading.RLock() def register_for_module(module_name, backend_factory): with lock: if module_name in sys.modules: # Module is already imported -> create backend now register(backend_factory()) else: # Module is not yet imported -> register factory if not module_name in backend_factories: backend_factories[module_name] = [] backend_factories[module_name].append(backend_factory) def register(backend): with lock: if not isinstance(backend, Backend): raise ValueError("Backend must be an instance of einx.backend.Backend") backends.append(backend) for type in backend.tensor_types: tensortype_to_backend[type] = backend name_to_backend[backend.name] = backend return backend from . import _numpy from . import _torch from . import _tensorflow from . import _jax from . import _dask from . import _mlx from . import _tinygrad # Create numpy backend now numpy = register(_numpy.create()) # Register other backends to be created after the corresponding modules are imported register_for_module("torch", _torch.create) register_for_module("tensorflow", _tensorflow.create) register_for_module("jax", _jax.create) register_for_module("dask.array", _dask.create) register_for_module("mlx", _mlx.create) register_for_module("tinygrad", _tinygrad.create) # Check if any new modules have been imported and construct backends that have been # registered for them def _update(): for module_name in list(backend_factories.keys()): if module_name in sys.modules: for factory in list(backend_factories[module_name]): register(factory()) del backend_factories[module_name] def _get1(tensor): backend = tensortype_to_backend.get(type(tensor), None) if not backend is None: return backend _update() for backend in backends: if any(isinstance(tensor, type) for type in backend.tensor_types) and not isinstance( tensor, np.ndarray ): # Found matching backend break else: return None tensortype_to_backend[type(tensor)] = backend return backend def get(arg): with lock: if isinstance(arg, str): if arg in name_to_backend: return name_to_backend[arg] _update() if arg in name_to_backend: return name_to_backend[arg] raise ValueError(f"Backend {arg} not found") else: tensors = arg if len(tensors) == 1: return _get1(tensors[0]) backend = None for tensor in tensors: if tensor is not None: backend2 = _get1(tensor) if not backend2 is None: if ( backend is not None and backend != backend2 and backend != numpy and backend2 != numpy ): raise ValueError( "Got tensors with conflicting backends: " f"{backend.__name__} and {backend2.__name__}" ) if backend is None or backend2 != numpy: backend = backend2 if backend is None: raise ValueError(f"Could not determine the backend to use in this operation") else: return backend python-einx-0.3.0/einx/experimental/000077500000000000000000000000001505216034200174325ustar00rootroot00000000000000python-einx-0.3.0/einx/experimental/__init__.py000066400000000000000000000000221505216034200215350ustar00rootroot00000000000000from .op import * python-einx-0.3.0/einx/experimental/op/000077500000000000000000000000001505216034200200505ustar00rootroot00000000000000python-einx-0.3.0/einx/experimental/op/__init__.py000066400000000000000000000000251505216034200221560ustar00rootroot00000000000000from .shard import * python-einx-0.3.0/einx/experimental/op/shard.py000066400000000000000000000217611505216034200215320ustar00rootroot00000000000000import einx import einx.op.util as util import numpy as np from functools import partial from typing import Callable, Union, Any import numpy.typing as npt tP = einx.tracer.import_("PartitionSpec", "P", from_="jax.sharding") tNamedSharding = einx.tracer.import_("NamedSharding", from_="jax.sharding") tMesh = einx.tracer.import_("Mesh", from_="jax.sharding") tjax = einx.tracer.import_("jax") tnp = einx.tracer.import_("numpy", as_="np") def _is_composed(expr): node = expr while node is not None: if isinstance(node, einx.expr.stage3.Composition): return True node = node.parent return False @einx.jit( trace=lambda t, c: lambda expr_in, tensor_in, expr_out, backend=None: c( expr_in, t(tensor_in), expr_out, ) ) def shard_stage3(expr_in, tensor_in, expr_out, mesh=None, backend=None): import jax for root in [expr_in, expr_out]: for expr in root.all(): if isinstance(expr, einx.expr.stage3.Concatenation): raise ValueError("Concatenation not allowed") if isinstance(expr, einx.expr.stage3.Marker): child = expr while child.parent is not None: if ( isinstance(child.parent, einx.expr.stage3.List) and _is_composed(child.parent) and child is not child.parent.children[0] ): raise ValueError( "If device axes are used within a composition they " "must appear as the left-most member of the composition" ) child = child.parent # Call tensor factories tensor_in = einx.tracer.call_factory(tensor_in, expr_in.shape, backend=backend) (tensor_in,) = backend.all_to_tensor([tensor_in]) # Flatten expressions (expr_in,), (tensor_in,) = util.flatten([expr_in], [tensor_in], backend=backend) marked_axes = tuple( axis for axis in expr_in if isinstance(axis, einx.expr.stage3.Axis) and einx.expr.stage3.is_marked(axis) ) if mesh is None: # Construct new mesh devices = tnp.array(tjax.devices()).reshape(tuple(a.value for a in marked_axes)) mesh = tMesh(devices, axis_names=tuple(a.name for a in marked_axes)) elif isinstance(mesh, jax.sharding.Mesh): # Got mesh -> check that marked axes match mesh marked_names = set(a.name for a in marked_axes) mesh_names = set(str(a) for a in mesh.axis_names) if not marked_names.issubset(mesh_names): raise ValueError( f"Marked axes must be subset of mesh axes. Got marked axes {marked_names} and mesh axes {mesh_names}" ) else: # Got list of devices -> construct new mesh devices = tnp.array(mesh).reshape(tuple(a.value for a in marked_axes)) mesh = tMesh(devices, axis_names=tuple(a.name for a in marked_axes)) # Construct partition spec axes = tuple(axis for axis in expr_in if isinstance(axis, einx.expr.stage3.Axis)) partition_spec = [axis.name if einx.expr.stage3.is_marked(axis) else None for axis in axes] partition_spec = tP(*partition_spec) # Shard tensor sharding = tNamedSharding(mesh, partition_spec) tensor_in = tjax.device_put(tensor_in, sharding) # Unflatten output expressions (tensor_in,) = util.unflatten([expr_in], [tensor_in], [expr_out], backend=backend) return tensor_in, expr_in @einx.lru_cache def parse(description, tensor_shape, cse=True, mesh=None, jax_devices=None, **parameters): import jax description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) op = einx.expr.stage1.parse_op(description) if len(op) != 1: raise ValueError(f"Expected exactly one expression, got {len(op)}") def solve(eqs): return einx.expr.solve( [einx.expr.Equation(op[0][0], tensor_shape)] + eqs + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, )[0] if mesh is None: # If no mesh is given, create new mesh of all devices try: expr_in = solve([]) except einx.expr.SolveException as e: # Try with additional constraint of total number of devices expr_mesh = einx.expr.stage1.Composition(einx.expr.stage1.get_marked(op[0][0])) mesh_eq = einx.expr.Equation(expr_mesh, [len(jax.devices())]) try: expr_in = solve([mesh_eq]) except einx.expr.SolveException: # If it still fails, reraise original exception raise e elif isinstance(mesh, jax.sharding.Mesh): # Add constraints for existing mesh axes expr_mesh = einx.expr.stage1.Marker( einx.expr.stage1.List.maybe([ einx.expr.stage1.NamedAxis(name) for name in mesh.axis_names ]) ) mesh_eq = einx.expr.Equation(expr_mesh, mesh.devices.shape) expr_in = solve([mesh_eq]) elif isinstance(mesh, (list, tuple)): # Add constraint for number of devices expr_mesh = einx.expr.stage1.Composition(einx.expr.stage1.get_marked(op[0][0])) mesh_eq = einx.expr.Equation(expr_mesh, [len(mesh)]) expr_in = solve([mesh_eq]) expr_out = expr_in.__deepcopy__() return expr_in, expr_out @einx.traceback_util.filter @einx.jit( trace=lambda t, c: lambda description, tensor, mesh=None, backend=None, **kwargs: c( description, t(tensor), mesh=mesh, **kwargs ) ) def shard( description: str, tensor: einx.Tensor, mesh: Any = None, backend: Union[einx.Backend, str, None] = "jax", cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Shards a tensor over a mesh of devices. *This function is currently experimental and will likely change in future versions.* *This function is currently only supported for Jax: A sharding is created based on the given expression, and applied to the tensor using* ``jax.device_put``. The tensor is sharded across the marked axes in the input expression. The marked axes match the axis names and shape of the mesh: >>> x = jnp.ones((2, 4, 128)) >>> x = einx.experimental.shard("[d1 d2] c") >>> x.sharding NamedSharding(mesh=Mesh('d1': 2, 'd2': 4), spec=PartitionSpec('d1', 'd2', None)) Axis compositions can be used to apply the `sharding rules of Jax `_, where tensor axes are evenly divided by the number of shards: >>> x = jnp.ones((128, 640, 480, 3)) >>> x = einx.experimental.shard("([batch] _) ...", x) >>> x.sharding NamedSharding(mesh=Mesh('batch': 8), spec=PartitionSpec('batch',)) If possible, the sharding is created over all devices. ``_`` is a regular axis name, and its value is determined by :doc:`einx's expression solver `. Optionally, an existing mesh can be passed: >>> from jax.sharding import Mesh >>> devices = np.asarray(jax.devices()).reshape(4, 2) >>> mesh = Mesh(devices, axis_names=("d1", "d2")) >>> x = jnp.ones((4, 1024, 1024)) >>> x = einx.experimental.shard("a ([d2] b) ([d1] c)", x, mesh=mesh) >>> x.sharding NamedSharding(mesh=Mesh('d1': 4, 'd2': 2), spec=PartitionSpec(None, 'd2', 'd1')) The array is replicated over all mesh axes that are not part of the expression: >>> x = jnp.ones((1024, 1024)) >>> x = einx.experimental.shard("a ([d1] b)", x, mesh=mesh) >>> x.sharding NamedSharding(mesh=Mesh('d1': 4, 'd2': 2), spec=PartitionSpec(None, 'd1',)) Args: description: Description string in Einstein notation (see above). tensor: Input tensor or tensor factory matching the description string. mesh: Mesh or list of devices to shard the tensor over. If not given, a new mesh over all available devices will be created matching the axes in the given expression. Defaults to ``None``. cse: Whether to apply common subexpression elimination to the expressions. Defaults to True. graph: Whether to return the graph representation of the operation instead of computing the result. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: The sharded tensor if ``graph=False``, otherwise the graph representation of the operation. """ if backend.name != "jax": raise NotImplementedError("einx.experimental.shard is currently only supported for Jax") expr_in, expr_out = parse( description, einx.tracer.get_shape(tensor), mesh=mesh, cse=cse, **parameters ) tensor, expr_out = shard_stage3(expr_in, tensor, expr_out, mesh=mesh, backend=backend) return tensor shard.parse = parse python-einx-0.3.0/einx/expr/000077500000000000000000000000001505216034200157135ustar00rootroot00000000000000python-einx-0.3.0/einx/expr/__init__.py000066400000000000000000000001371505216034200200250ustar00rootroot00000000000000from . import stage1, stage2, stage3 from .util import * from .solver import SolveException python-einx-0.3.0/einx/expr/solver.py000066400000000000000000000216741505216034200176110ustar00rootroot00000000000000import sympy import math class Expression: def __init__(self): pass def __add__(self, other): return Sum([self, other]) def __radd__(self, other): return Sum([other, self]) def __mul__(self, other): return Product([self, other]) def __rmul__(self, other): return Product([other, self]) class Variable(Expression): def __init__(self, id, name, integer=True): Expression.__init__(self) self.id = id self.name = name self.integer = integer def __iter__(self): yield self def __eq__(self, other): return isinstance(other, Variable) and self.id == other.id def __hash__(self): return hash(self.id) def __str__(self): return f"{self.name}" def sympy(self): return sympy.Symbol(self.id, integer=self.integer) class Constant(Expression): def __init__(self, value): Expression.__init__(self) self.value = value def __iter__(self): yield self def __eq__(self, other): return isinstance(other, Constant) and self.value == other.value def __hash__(self): return hash(self.value) def __str__(self): return str(self.value) def sympy(self): return self.value class Sum(Expression): @staticmethod def maybe(children): if len(children) == 0: return Constant(0) elif len(children) == 1: return children[0] elif all(isinstance(c, Constant) for c in children): return Constant(sum(c.value for c in children)) else: return Sum(children) def __init__(self, children): Expression.__init__(self) self.children = [to_term(c) for c in children] def __iter__(self): yield self for child in self.children: yield from child def __eq__(self, other): return isinstance(other, Sum) and self.children == other.children def __hash__(self): return hash(tuple(self.children)) def __str__(self): return " + ".join(str(c) for c in self.children) def sympy(self): return sum([c.sympy() for c in self.children]) class Product(Expression): @staticmethod def maybe(children): if len(children) == 0: return Constant(1) elif len(children) == 1: return children[0] elif all(isinstance(c, Constant) for c in children): return Constant(math.prod(c.value for c in children)) else: return Product(children) def __init__(self, children): Expression.__init__(self) self.children = [to_term(c) for c in children] def __iter__(self): yield self for child in self.children: yield from child def __eq__(self, other): return isinstance(other, Product) and self.children == other.children def __hash__(self): return hash(tuple(self.children)) def __str__(self): return " * ".join(str(c) for c in self.children) def sympy(self): return math.prod([c.sympy() for c in self.children]) def to_term(x): if isinstance(x, int): return Constant(x) else: if not isinstance(x, Expression): raise TypeError(f"Expected Expression, got {type(x)}") return x class SolveException(Exception): def __init__(self, message): super().__init__(message) def solve(equations): equations = [(to_term(t1), to_term(t2)) for t1, t2 in equations] equations = [(t1, t2) for t1, t2 in equations if t1 != t2] equations = list(set(equations)) variables = { v.id: v for terms in equations for term in terms for v in term if isinstance(v, Variable) } # Find equivalence classes of variables to speed up sympy solver ##### # Find constant definitions constants = {} # id: constant value for t1, t2 in equations: if isinstance(t1, Variable) and isinstance(t2, Constant): if constants.get(t1.id, t2.value) != t2.value: raise SolveException( f"Found contradictory values { {constants[t1.id], t2.value} } for " f"expression '{t1.name}'" ) constants[t1.id] = t2.value elif isinstance(t1, Constant) and isinstance(t2, Variable): if constants.get(t2.id, t1.value) != t1.value: raise SolveException( f"Found contradictory values { {constants[t2.id], t1.value} } for " f"expression '{t2.name}'" ) constants[t2.id] = t1.value elif isinstance(t1, Constant) and isinstance(t2, Constant): if t1.value != t2.value: raise SolveException( f"Found contradictory values {t1.value} != {t2.value} in input equation" ) # Find equivalence classes of variables classes = {v: {v} for v in variables} # id: set of equivalent ids for t1, t2 in equations: if isinstance(t1, Variable) and isinstance(t2, Variable): assert t1.id in classes and t2.id in classes set1 = classes[t1.id] set2 = classes[t2.id] for t_id in set2: classes[t_id] = set1 set1.add(t_id) # For every class: Use constant if it exists, or create single class variable origvar_to_solvevar = {} # id: Variable or Constant for eclass in {id(s): s for s in classes.values()}.values(): if any(n in constants for n in eclass): # Use constant class_constants = {constants[n] for n in eclass if n in constants} if len(class_constants) != 1: names = {variables[a].name for a in eclass} if len(names) == 1: raise SolveException( f"Found contradictory values {class_constants} for expression " f"'{next(iter(names))}'" ) else: raise SolveException( f"Found contradictory values {class_constants} for equivalent " f"expressions {names}" ) v = Constant(next(iter(class_constants))) else: # Create new variable for class v = Variable( f"Class-{id(eclass)}", f"Equivalent expressions { {variables[a].name for a in eclass} }", ) for n in eclass: assert n not in origvar_to_solvevar origvar_to_solvevar[n] = v # Apply to equations def replace(t): if isinstance(t, Variable) and t.id in origvar_to_solvevar: return origvar_to_solvevar[t.id] elif isinstance(t, Constant): return t elif isinstance(t, Sum): return Sum.maybe([replace(c) for c in t.children]) elif isinstance(t, Product): return Product.maybe([replace(c) for c in t.children]) else: raise AssertionError() equations2 = [] for t1o, t2o in equations: t1 = replace(t1o) t2 = replace(t2o) if isinstance(t1, Constant) and isinstance(t2, Constant): if t1.value != t2.value: raise SolveException( f"Found contradictory values {t1.value} != {t2.value} " "for same equivalence class" ) elif t1 != t2: equations2.append((t1, t2)) equations = equations2 # Solve remaining equations using sympy ##### solutions = {} if len(equations) > 0: sympy_equations = [sympy.Eq(t1.sympy(), t2.sympy()) for t1, t2 in equations] if all(eq.is_Boolean and bool(eq) for eq in sympy_equations): solutions = {} else: solutions = sympy.solve(sympy_equations, set=True, manual=True) if solutions == []: solutions = {} elif isinstance(solutions, tuple) and len(solutions) == 2: variables, solutions = solutions if len(solutions) == 0: raise SolveException("Sympy returned no solutions") elif len(solutions) > 1: raise SolveException("Sympy returned multiple possible solutions") else: solutions = next(iter(solutions)) solutions = { str(k): int(v) for k, v in zip(variables, solutions) if v.is_number } else: raise SolveException("Sympy returned unexpected result") # Determine values for original variables in equivalence classes orig_solutions = {} for k, v in origvar_to_solvevar.items(): if isinstance(v, Constant): orig_solutions[k] = v.value elif isinstance(v, Variable): if v.id in solutions: orig_solutions[k] = solutions[v.id] else: raise AssertionError() return orig_solutions python-einx-0.3.0/einx/expr/stage1.py000066400000000000000000000664651505216034200174720ustar00rootroot00000000000000from collections import defaultdict import re import uuid class Expression: def __init__(self, begin_pos, end_pos): self.begin_pos = begin_pos self.end_pos = end_pos self.parent = None @property def depth(self): if self.parent is None: return 0 elif isinstance(self.parent, Ellipsis): return 1 + self.parent.depth else: return self.parent.depth class Composition(Expression): def __init__(self, inner, begin_pos=-1, end_pos=-1): Expression.__init__(self, begin_pos, end_pos) self.inner = inner self.inner.parent = self def all(self): yield self yield from self.inner.all() def __str__(self): return "(" + str(self.inner) + ")" def __deepcopy__(self): return Composition(self.inner.__deepcopy__(), self.begin_pos, self.end_pos) def __eq__(self, other): return isinstance(other, Composition) and self.inner == other.inner def __hash__(self): return 87123 + hash(self.inner) def __len__(self): return len(self.inner) def __getitem__(self, i): return self.inner[i] def expansion(self): return 1 @property def direct_children(self): yield self.inner class Marker(Expression): @staticmethod def maybe(inner, *args, **kwargs): if isinstance(inner, List) and len(inner) == 0: return inner else: return Marker(inner, *args, **kwargs) def __init__(self, inner, begin_pos=-1, end_pos=-1): Expression.__init__(self, begin_pos, end_pos) self.inner = inner self.inner.parent = self assert not (isinstance(inner, List) and len(inner) == 0) def all(self): yield self yield from self.inner.all() def __str__(self): return "[" + str(self.inner) + "]" def __deepcopy__(self): return Marker(self.inner.__deepcopy__(), self.begin_pos, self.end_pos) def __eq__(self, other): return isinstance(other, Marker) and self.inner == other.inner def __hash__(self): return 91236 + hash(self.inner) def expansion(self): return self.inner.expansion() @property def direct_children(self): yield self.inner class NamedAxis(Expression): def __init__(self, name, begin_pos=-1, end_pos=-1): Expression.__init__(self, begin_pos, end_pos) self.name = name def all(self): yield self def __str__(self): return self.name def __deepcopy__(self): return NamedAxis(self.name, self.begin_pos, self.end_pos) def __eq__(self, other): return isinstance(other, NamedAxis) and self.name == other.name def __hash__(self): return 12345 + hash(self.name) def expansion(self): return 1 @property def direct_children(self): yield from () class UnnamedAxis(Expression): def __init__(self, value, begin_pos=-1, end_pos=-1): Expression.__init__(self, begin_pos, end_pos) self.value = value def all(self): yield self def __str__(self): return str(self.value) def __deepcopy__(self): return UnnamedAxis(self.value, self.begin_pos, self.end_pos) def __eq__(self, other): return isinstance(other, UnnamedAxis) and self.value == other.value def __hash__(self): return 67890 + hash(self.value) def expansion(self): return 1 @property def direct_children(self): yield from () class Ellipsis(Expression): anonymous_variable_name = "_anonymous_ellipsis_axis" def maybe(inner, *args, **kwargs): if isinstance(inner, List) and len(inner) == 0: return inner else: return Ellipsis(inner, *args, **kwargs) def __init__(self, inner, begin_pos=-1, end_pos=-1, ellipsis_id=None): Expression.__init__(self, begin_pos, end_pos) self.inner = inner self.inner.parent = self self.ellipsis_id = uuid.uuid4().int if ellipsis_id is None else ellipsis_id assert not (isinstance(inner, List) and len(inner) == 0) def all(self): yield self yield from self.inner.all() def __str__(self): n = str(self.inner) if isinstance(self.inner, List) and len(self.inner.children) > 1: n = "{" + n + "}" return n + _ellipsis def __deepcopy__(self): return Ellipsis(self.inner.__deepcopy__(), self.begin_pos, self.end_pos, self.ellipsis_id) def __eq__(self, other): return isinstance(other, Ellipsis) and self.inner == other.inner def __hash__(self): return 34567 + hash(self.inner) def expansion(self): if self.inner.expansion() == 0: return 0 else: return None @property def direct_children(self): yield self.inner class Concatenation(Expression): def maybe(l, *args, **kwargs): if len(l) == 1: return l[0] else: return Concatenation(l, *args, **kwargs) def __init__(self, children, begin_pos=-1, end_pos=-1): Expression.__init__(self, begin_pos, end_pos) self.children = children for child in self.children: child.parent = self def all(self): yield self for child in self.children: yield from child.all() def __str__(self): return " + ".join([str(c) for c in self.children]) def __deepcopy__(self): return Concatenation( [c.__deepcopy__() for c in self.children], self.begin_pos, self.end_pos ) def __eq__(self, other): return isinstance(other, Concatenation) and self.children == other.children def __hash__(self): return 234 + hash(tuple(self.children)) def __len__(self): return len(self.children) def __getitem__(self, i): return self.children[i] def expansion(self): return 1 @property def direct_children(self): yield from self.children class List(Expression): @staticmethod def maybe(l, *args, **kwargs): if len(l) == 1: return l[0] else: return List(l, *args, **kwargs) def __init__(self, children, begin_pos=-1, end_pos=-1): Expression.__init__(self, begin_pos, end_pos) self.children = children for child in self.children: child.parent = self def all(self): yield self for child in self.children: yield from child.all() def __str__(self): return " ".join([str(c) for c in self.children]) def __deepcopy__(self): return List([c.__deepcopy__() for c in self.children], self.begin_pos, self.end_pos) def __eq__(self, other): return isinstance(other, List) and self.children == other.children def __hash__(self): return 2333 + hash(tuple(self.children)) def __len__(self): return len(self.children) def __getitem__(self, i): return self.children[i] def expansion(self): child_expansions = [c.expansion() for c in self.children] if any(e is None for e in child_expansions): return None else: return sum(child_expansions) @property def direct_children(self): yield from self.children class Args(Expression): @staticmethod def maybe(*args, **kwargs): return Args(*args, **kwargs) def __init__(self, children, begin_pos=-1, end_pos=-1): Expression.__init__(self, begin_pos, end_pos) self.children = children for child in self.children: assert not isinstance(child, Args) child.parent = self def all(self): yield self for child in self.children: yield from child.all() def __str__(self): return ", ".join([str(c) for c in self.children]) def __deepcopy__(self): return Args([c.__deepcopy__() for c in self.children], self.begin_pos, self.end_pos) def __eq__(self, other): return isinstance(other, Args) and self.children == other.children def __hash__(self): return 233314 + hash(tuple(self.children)) def __getitem__(self, i): return self.children[i] def __len__(self): return len(self.children) def __iter__(self): return iter(self.children) class Op(Expression): def __init__(self, children, begin_pos=-1, end_pos=-1): Expression.__init__(self, begin_pos, end_pos) assert len(children) >= 1 self.children = children for child in self.children: child.parent = self def all(self): yield self for child in self.children: yield from child.all() def __str__(self): return " -> ".join([str(c) for c in self.children]) def __deepcopy__(self): return Op([c.__deepcopy__() for c in self.children], self.begin_pos, self.end_pos) def __eq__(self, other): return isinstance(other, Op) and self.children == other.children def __hash__(self): return 961121 + hash(tuple(self.children)) def __getitem__(self, i): return self.children[i] def __len__(self): return len(self.children) def __iter__(self): return iter(self.children) class Token: def __init__(self, pos, text): self.begin_pos = pos self.end_pos = pos + len(text) self.text = text def __str__(self): return self.text def __repr__(self): return f'Token("{self.text}")' class ParseError(Exception): def __init__(self, expression, pos, message): self.expression = expression self.pos = pos self.message = message assert self.pos >= 0 and self.pos < len(self.expression) def __str__(self): return self.message + "\nHere: " + self.expression + "\n" + " " * (self.pos + 6) + "^" _parentheses = { "(": ")", "[": "]", } _parentheses_front = set(_parentheses.keys()) _parentheses_back = set(_parentheses.values()) _disallowed_literals = ["\t", "\n", "\r"] _nary_ops = ["->", "|", ",", " ", "+"] _ellipsis = "..." _axis_name = r"[a-zA-Z_][a-zA-Z0-9_]*" _literals = _nary_ops + list(_parentheses_front) + list(_parentheses_back) + [_ellipsis] def parse_op(text): text = text.strip() for x in _nary_ops: while f" {x}" in text: text = text.replace(f" {x}", x) while f"{x} " in text: text = text.replace(f"{x} ", x) # Lexer tokens = [] start_pos = 0 def next_token(end_pos): nonlocal start_pos if start_pos != end_pos: tokens.append(Token(start_pos, text[start_pos:end_pos])) start_pos = end_pos pos = 0 while pos < len(text): for d in _disallowed_literals: if text[pos:].startswith(d): raise ParseError(text, pos, f"Found disallowed literal '{d}'") for l in _literals: if text[pos:].startswith(l): next_token(pos) next_token(pos + len(l)) pos += len(l) break else: pos += 1 next_token(pos) # Parser def parse(in_tokens, begin_pos): assert isinstance(in_tokens, list) if len(in_tokens) == 0: return List([], begin_pos, begin_pos) # Parentheses if isinstance(in_tokens[0], Token) and in_tokens[0].text in _parentheses_front: assert ( len(in_tokens) >= 2 and isinstance(in_tokens[-1], Token) and in_tokens[-1].text in _parentheses_back ) if in_tokens[0].text == "(": op = Composition elif in_tokens[0].text == "[": op = Marker.maybe else: raise AssertionError() return op( parse(in_tokens[1:-1], in_tokens[1].begin_pos), in_tokens[0].begin_pos, in_tokens[-1].end_pos, ) # N-ary operators for nary_op in _nary_ops: if any(isinstance(t, Token) and t.text == nary_op for t in in_tokens): out_tokens = [] current_tokens = [] allow_empty_operands = nary_op != " " for token in in_tokens: if isinstance(token, Token) and token.text == nary_op: if allow_empty_operands or len(current_tokens) > 0: out_tokens.append( parse( current_tokens, current_tokens[0].begin_pos if len(current_tokens) > 0 else token.begin_pos, ) ) current_tokens = [] else: current_tokens.append(token) if allow_empty_operands or len(current_tokens) > 0: out_tokens.append( parse( current_tokens, current_tokens[0].begin_pos if len(current_tokens) > 0 else token.begin_pos, ) ) if nary_op == " ": op = List elif nary_op in {"->", "|"}: op = Op elif nary_op == ",": op = Args elif nary_op == "+": op = Concatenation else: raise AssertionError() return op(out_tokens, in_tokens[0].begin_pos, in_tokens[-1].end_pos) # Ellipsis if isinstance(in_tokens[-1], Token) and in_tokens[-1].text == _ellipsis: if len(in_tokens) == 1: return Ellipsis( NamedAxis( Ellipsis.anonymous_variable_name, in_tokens[0].begin_pos, in_tokens[0].begin_pos, ), in_tokens[0].begin_pos, in_tokens[0].end_pos, ) else: assert len(in_tokens) == 2 return Ellipsis( parse(in_tokens[:-1], in_tokens[0].begin_pos), in_tokens[0].begin_pos, in_tokens[1].end_pos, ) # Axis if len(in_tokens) == 1 and isinstance(in_tokens[0], Token): value = in_tokens[0].text.strip() if value.isdigit(): return UnnamedAxis(int(value), in_tokens[0].begin_pos, in_tokens[0].end_pos) else: if not re.fullmatch(_axis_name, in_tokens[0].text): raise ParseError( text, in_tokens[0].begin_pos, f"Invalid axis name '{in_tokens[0].text}'" ) return NamedAxis(value, in_tokens[0].begin_pos, in_tokens[0].end_pos) if len(in_tokens) == 1: return in_tokens[0] raise AssertionError() stack = [[]] for token in tokens: if token.text in _parentheses_front: stack.append([]) stack[-1].append(token) elif token.text in _parentheses_back: if len(stack) == 1 or _parentheses[stack[-1][0].text] != token.text: raise ParseError( text, token.begin_pos, f"Unexpected closing parenthesis '{token.text}'" ) stack[-1].append(token) group = stack.pop() stack[-1].append(parse(group, group[0].begin_pos)) else: stack[-1].append(token) if len(stack) > 1: raise ParseError( text, stack[-1][0].begin_pos, f"Unclosed parenthesis '{stack[-1][0].text}'" ) expression = parse(stack[0], 0) # Move up and merge Op def move_up(expr): if isinstance(expr, (NamedAxis, UnnamedAxis)): return Op([expr.__deepcopy__()]) elif isinstance(expr, Composition): op = move_up(expr.inner) return Op( [Composition(arglist, expr.begin_pos, expr.end_pos) for arglist in op.children], op.begin_pos, op.end_pos, ) elif isinstance(expr, Marker): op = move_up(expr.inner) return Op( [Marker.maybe(arglist, expr.begin_pos, expr.end_pos) for arglist in op.children], op.begin_pos, op.end_pos, ) elif isinstance(expr, Ellipsis): op = move_up(expr.inner) return Op( [ Ellipsis.maybe(arglist, expr.begin_pos, expr.end_pos, expr.ellipsis_id) for arglist in op.children ], op.begin_pos, op.end_pos, ) elif isinstance(expr, (List, Concatenation, Args)): _class = type(expr) children = [move_up(c) for c in expr.children] new_children = [] nums = {len(c) for c in children if len(c) != 1} if len(nums) > 1: raise ParseError( text, expr.begin_pos, "Inconsistent usage of '->' operator", ) num = nums.pop() if len(nums) > 0 else 1 new_arglists = [] for idx in range(num): new_children = [] for op in children: if len(op) == 1: new_children.append(op[0]) else: new_children.append(op[idx]) new_arglists.append(_class.maybe(new_children, expr.begin_pos, expr.end_pos)) return Op(new_arglists, expr.begin_pos, expr.end_pos) elif isinstance(expr, Op): return Op( [arglist for child in expr.children for arglist in move_up(child).children], expr.begin_pos, expr.end_pos, ) else: raise AssertionError() expression = move_up(expression) # Move up and merge Args def move_up(expr): if isinstance(expr, (NamedAxis, UnnamedAxis)): return Args([expr.__deepcopy__()]) elif isinstance(expr, Composition): args = move_up(expr.inner) return Args( [Composition(arg, expr.begin_pos, expr.end_pos) for arg in args.children], args.begin_pos, args.end_pos, ) elif isinstance(expr, Marker): args = move_up(expr.inner) return Args( [Marker.maybe(arg, expr.begin_pos, expr.end_pos) for arg in args.children], args.begin_pos, args.end_pos, ) elif isinstance(expr, Ellipsis): args = move_up(expr.inner) return Args( [ Ellipsis.maybe(arg, expr.begin_pos, expr.end_pos, expr.ellipsis_id) for arg in args.children ], args.begin_pos, args.end_pos, ) elif isinstance(expr, (List, Concatenation)): _class = type(expr) children = [move_up(c) for c in expr.children] new_children = [] nums = {len(c) for c in children if len(c) != 1} if len(nums) > 1: raise ParseError( text, expr.begin_pos, "Inconsistent usage of ',' operator", ) num = nums.pop() if len(nums) > 0 else 1 new_args = [] for idx in range(num): new_children = [] for args in children: if len(args) == 1: new_children.append(args[0]) else: new_children.append(args[idx]) new_args.append(_class.maybe(new_children, expr.begin_pos, expr.end_pos)) return Args(new_args, expr.begin_pos, expr.end_pos) elif isinstance(expr, Args): return Args( [arg for child in expr.children for arg in move_up(child).children], expr.begin_pos, expr.end_pos, ) else: raise AssertionError() assert isinstance(expression, Op) expression = Op( [move_up(c) for c in expression.children], expression.begin_pos, expression.end_pos ) # Semantic check: Op cannot have more than two children # TODO: if len(expression.children) > 2: raise ParseError(text, expression.begin_pos, "Cannot have more than one '->' operator") # Semantic check: Axis names can only be used once per expression def traverse(expr, key, axes_by_key): if isinstance(expr, list): for expr in expr: traverse(expr, key, axes_by_key) elif isinstance(expr, NamedAxis): axes_by_key[(key + (expr.name,))].append(expr) elif isinstance(expr, UnnamedAxis): pass elif isinstance(expr, Composition): traverse(expr.inner, key, axes_by_key) elif isinstance(expr, List): traverse(expr.children, key, axes_by_key) elif isinstance(expr, Concatenation): for i, c in enumerate(expr.children): traverse(c, key + ((id(expr), i),), axes_by_key) elif isinstance(expr, Marker): traverse(expr.inner, key, axes_by_key) elif isinstance(expr, Ellipsis): traverse(expr.inner, key, axes_by_key) else: raise TypeError(f"Invalid expression type {type(expr)}") def check(root): axes_by_key = defaultdict(list) traverse(root, (), axes_by_key) for key in list(axes_by_key.keys()): exprs = [] for i in range(len(key) + 1): exprs.extend(axes_by_key[key[:i]]) if len(exprs) > 1: raise ParseError( text, exprs[1].begin_pos, f"Axis name '{exprs[0].name}' is used more than once in expression '{root}'", ) for arglist in expression.children: for arg in arglist.children: check(arg) return expression def parse_args(text): op = parse_op(text) if len(op.children) != 1: raise ParseError(text, op.begin_pos, "Expression cannot contain '->'") assert isinstance(op.children[0], Args) return op.children[0] def parse_arg(text): if isinstance(text, Expression): return text args = parse_args(text) if len(args.children) != 1: raise ParseError(text, args.begin_pos, "Expression cannot contain ','") return args.children[0] def expr_map(f): def outer(expr, *args, **kwargs): # Wrap the user function to return a list of expressions def f2(expr): t = f(expr, *args, **kwargs) if t is None: return None, expr_map.CONTINUE expr, signal = t if isinstance(expr, list) or expr is None: return expr, signal if isinstance(expr, List): return expr.children, signal elif isinstance(expr, Expression): return [expr], signal else: raise TypeError(f"Invalid return type {type(expr)}") return List.maybe(_expr_map(expr, f2)) return outer expr_map.CONTINUE = 1 expr_map.COPY_AND_STOP = 2 expr_map.REPLACE_AND_STOP = 3 expr_map.REPLACE_AND_CONTINUE = 4 def _expr_map(expr, f): exprs, signal = f(expr) if signal == expr_map.REPLACE_AND_STOP: assert isinstance(exprs, list) return exprs elif signal == expr_map.COPY_AND_STOP: return [expr.__deepcopy__()] elif signal == expr_map.REPLACE_AND_CONTINUE: return [c for expr in exprs for c in _expr_map(expr, f)] if isinstance(expr, NamedAxis): return [expr.__deepcopy__()] elif isinstance(expr, UnnamedAxis): return [expr.__deepcopy__()] elif isinstance(expr, Composition): return [Composition(List.maybe(_expr_map(expr.inner, f)))] elif isinstance(expr, List): return [c2 for c1 in expr.children for c2 in _expr_map(c1, f)] elif isinstance(expr, Concatenation): return [Concatenation([List.maybe(_expr_map(c, f)) for c in expr.children])] elif isinstance(expr, Marker): x = _expr_map(expr.inner, f) if len(x) == 0: # Drop empty marker return [] else: return [Marker(List.maybe(x))] elif isinstance(expr, Ellipsis): return [Ellipsis(List.maybe(_expr_map(expr.inner, f)), ellipsis_id=expr.ellipsis_id)] else: raise TypeError(f"Invalid expression type {type(expr)}") @expr_map def demark(expr): if isinstance(expr, Marker): return expr.inner, expr_map.REPLACE_AND_CONTINUE def any_parent_is(expr, pred, include_self=True): if not include_self: if expr.parent is None: return False expr = expr.parent while expr is not None: if pred(expr): return True expr = expr.parent return False def is_marked(expr): return any_parent_is(expr, lambda expr: isinstance(expr, Marker)) def _get_marked(expr): if isinstance(expr, NamedAxis): return [] elif isinstance(expr, UnnamedAxis): return [] elif isinstance(expr, Ellipsis): inner = _get_marked(expr.inner) if len(inner) > 0: return [Ellipsis(List.maybe(inner), ellipsis_id=expr.ellipsis_id)] else: return [] elif isinstance(expr, Marker): return [expr.inner.__deepcopy__()] elif isinstance(expr, Concatenation): return [Concatenation.maybe([x for c in expr.children for x in _get_marked(c)])] elif isinstance(expr, Composition): return [Composition(List.maybe(_get_marked(expr.inner)))] elif isinstance(expr, List): return [List.maybe([x for c in expr.children for x in _get_marked(c)])] else: raise TypeError(f"Invalid expression type {type(expr)}") def get_marked(expr): return List.maybe(_get_marked(expr)) def get_unmarked(expr): return remove(expr, lambda expr: is_marked(expr)) @expr_map def replace(expr, f): expr = f(expr) if expr is not None: return expr, expr_map.REPLACE_AND_STOP @expr_map def remove(expr, pred): if pred(expr): return [], expr_map.REPLACE_AND_STOP python-einx-0.3.0/einx/expr/stage2.py000066400000000000000000001165671505216034200174720ustar00rootroot00000000000000from . import stage1, solver import re import einx import numpy as np from collections import defaultdict class Expression: def __init__(self, ellipsis_indices): self.ellipsis_indices = ellipsis_indices self.parent = None @property def depth(self): return len(self.ellipsis_indices) @property def shape(self): return tuple(i[1] for i in self.ellipsis_indices) + (len(self),) class Composition(Expression): def __init__(self, inner, ellipsis_indices): Expression.__init__(self, ellipsis_indices) self.inner = inner inner.parent = self def __str__(self): return f"({self.inner})" def __len__(self): return 1 def __iter__(self): yield self def __deepcopy__(self): return Composition(self.inner.__deepcopy__(), ellipsis_indices=self.ellipsis_indices) def all(self): yield self yield from self.inner.all() class List(Expression): @staticmethod def maybe(l, *args, **kwargs): if len(l) == 1: return l[0] else: return List(l, *args, **kwargs) def __init__(self, children, ellipsis_indices): Expression.__init__(self, ellipsis_indices) self.children = children for c in children: c.parent = self def __str__(self): return " ".join([str(c) for c in self.children]) def __len__(self): return sum(len(c) for c in self.children) def __iter__(self): for c in self.children: yield from c def __deepcopy__(self): return List( [c.__deepcopy__() for c in self.children], ellipsis_indices=self.ellipsis_indices ) def all(self): yield self for c in self.children: yield from c.all() class NamedAxis(Expression): def __init__(self, name, ellipsis_indices): Expression.__init__(self, ellipsis_indices) self.name = name postfix = "" for idx, _num in self.ellipsis_indices: postfix = postfix + "." + str(idx) if not self.name.endswith(postfix): self.name = self.name + postfix def __str__(self): return self.name def __len__(self): return 1 def __iter__(self): yield self def __deepcopy__(self): return NamedAxis(self.name, ellipsis_indices=self.ellipsis_indices) def all(self): yield self class UnnamedAxis(Expression): def __init__(self, value, ellipsis_indices): Expression.__init__(self, ellipsis_indices) self.value = value def __str__(self): return str(self.value) def __len__(self): return 1 def __iter__(self): yield self def __deepcopy__(self): return UnnamedAxis(self.value, ellipsis_indices=self.ellipsis_indices) def all(self): yield self class Concatenation(Expression): def __init__(self, children, ellipsis_indices): Expression.__init__(self, ellipsis_indices) for c in children: if len(c) != 1: raise ValueError( "Concatenation can only be used on expressions of length 1, " f"but got expression '{c}'" ) self.children = children for c in children: c.parent = self def __str__(self): return "+".join([str(c) for c in self.children]) def __len__(self): return 1 def __iter__(self): yield self def __deepcopy__(self): return Concatenation( [c.__deepcopy__() for c in self.children], ellipsis_indices=self.ellipsis_indices ) def all(self): yield self for c in self.children: yield from c.all() class Marker(Expression): @staticmethod def maybe(inner, *args, **kwargs): if len(inner) == 0: return inner else: return Marker(inner, *args, **kwargs) def __init__(self, inner, ellipsis_indices): Expression.__init__(self, ellipsis_indices) self.inner = inner inner.parent = self assert len(inner) > 0 def __str__(self): return f"[{self.inner}]" def __len__(self): return len(self.inner) def __iter__(self): yield from self.inner def __deepcopy__(self): return Marker(self.inner.__deepcopy__(), ellipsis_indices=self.ellipsis_indices) def all(self): yield self yield from self.inner.all() class SolveDepthException(solver.SolveException): def __init__(self, exprs1, exprs2, expansions1, expansions2, depths1, depths2, message): assert ( len({ len(exprs1), len(exprs2), len(expansions1), len(expansions2), len(depths1), len(depths2), }) == 1 ) self.exprs1 = exprs1 self.exprs2 = exprs2 self.expansions1 = expansions1 self.expansions2 = expansions2 self.depths1 = depths1 self.depths2 = depths2 message_in = message message = ( "Failed to solve for the depth of axes, i.e. the number of outer ellipses.\n" "Equations:\n" ) for expr1, expr2 in zip(exprs1, exprs2): if expr1 is not None and expr2 is not None: message += " " message += f"{einx.expr.util._to_str(expr1)}" message += " = " message += f"{einx.expr.util._to_str(expr2)}" message += "\n" message += f"Reason: {message_in}" super().__init__(message) class SolveExpansionException(solver.SolveException): def __init__(self, exprs1, exprs2, expansions1, expansions2, depths1, depths2, message): assert ( len({ len(exprs1), len(exprs2), len(expansions1), len(expansions2), len(depths1), len(depths2), }) == 1 ) self.exprs1 = exprs1 self.exprs2 = exprs2 self.expansions1 = expansions1 self.expansions2 = expansions2 self.depths1 = depths1 self.depths2 = depths2 message_in = message message = "Failed to solve for the number of axes in the expressions.\nEquations:\n" for expr1, expr2 in zip(exprs1, exprs2): if expr1 is not None and expr2 is not None: message += " " message += f"{einx.expr.util._to_str(expr1)}" message += " = " message += f"{einx.expr.util._to_str(expr2)}" message += "\n" message += f"Reason: {message_in}" super().__init__(message) def solve(exprs1, exprs2, expansions1, expansions2, depths1, depths2): exprs1 = list(exprs1) exprs2 = list(exprs2) expansions1 = list(expansions1) expansions2 = list(expansions2) depths1 = list(depths1) depths2 = list(depths2) if any( expr is not None and not isinstance(expr, stage1.Expression) for expr in exprs1 + exprs2 ): raise ValueError("Can only expand stage1.Expression") if ( len({ len(exprs1), len(exprs2), len(expansions1), len(expansions2), len(depths1), len(depths2), }) != 1 ): raise ValueError("Number of expressions, expansions and depths must be equal") # ##### 1. Find expression depths ##### equations = [] symbolic_expr_depths = {} for root in exprs1 + exprs2: if root is not None: for expr in root.all(): symbolic_expr_depths[id(expr)] = solver.Variable( f"symbolic_expr_depths[{id(expr)}]", str(expr) ) # Add equations: Depth relations between subexpressions for root in exprs1 + exprs2: if root is not None: for expr in root.all(): if isinstance(expr, stage1.Ellipsis): # Ellipsis increases depth by one equations.append(( symbolic_expr_depths[id(expr)] + 1, symbolic_expr_depths[id(expr.inner)], )) else: # All other expressions have the same depth as their children for child in expr.direct_children: equations.append(( symbolic_expr_depths[id(expr)], symbolic_expr_depths[id(child)], )) # Add equations: Depth arguments for root, depth in zip(exprs1 + exprs2, depths1 + depths2): if root is not None and depth is not None: equations.append((symbolic_expr_depths[id(root)], depth)) # Add equations: Root depths for root1, root2, expansion1, expansion2 in zip(exprs1, exprs2, expansions1, expansions2): if ( root1 is not None and root2 is not None and expansion1 is not None and expansion2 is not None ): equations.append(( symbolic_expr_depths[id(root1)] + len(expansion1), symbolic_expr_depths[id(root2)] + len(expansion2), )) # Add equations: Multiple occurrences of the same named axis must have the same depth symbolic_axis_depths = {} for root in exprs1 + exprs2: if root is not None: for axis in root.all(): if isinstance(axis, stage1.NamedAxis): if axis.name not in symbolic_axis_depths: symbolic_axis_depths[axis.name] = solver.Variable( f"symbolic_axis_depths[{axis.name}]", axis.name ) equations.append(( symbolic_expr_depths[id(axis)], symbolic_axis_depths[axis.name], )) # Add equations: Ellipses with the same id must have the same depth symbolic_ellipsis_depths = {} for root in exprs1 + exprs2: if root is not None: for ellipsis in root.all(): if isinstance(ellipsis, stage1.Ellipsis): if ellipsis.ellipsis_id not in symbolic_ellipsis_depths: symbolic_ellipsis_depths[ellipsis.ellipsis_id] = solver.Variable( f"symbolic_ellipsis_depths[{ellipsis.ellipsis_id}]", str(ellipsis) ) equations.append(( symbolic_expr_depths[id(ellipsis)], symbolic_ellipsis_depths[ellipsis.ellipsis_id], )) # Solve try: solutions = solver.solve(equations) except solver.SolveException as e: raise SolveDepthException( exprs1, exprs2, expansions1, expansions2, depths1, depths2, str(e) ) from e expr_depths = {} for k, v in solutions.items(): if k.startswith("symbolic_expr_depths["): expr_depths[int(k[len("symbolic_expr_depths[") : -1])] = int(v) # Raise exception on missing depths failed_exprs = set() for root in exprs1 + exprs2: if root is not None: for expr in root.all(): if id(expr) not in expr_depths: failed_exprs.add(str(expr)) if len(failed_exprs) > 0: raise SolveDepthException( exprs1, exprs2, expansions1, expansions2, depths1, depths2, f"Found no unique solutions for {failed_exprs}", ) # Raise exception on negative depths failed_exprs = set() for root in exprs1 + exprs2: if root is not None: for expr in root.all(): if expr_depths[id(expr)] < 0: failed_exprs.add(str(expr)) if len(failed_exprs) > 0: raise SolveDepthException( exprs1, exprs2, expansions1, expansions2, depths1, depths2, f"Got negative depths for {failed_exprs}", ) for exprs, expansions, _depths in zip( [exprs1, exprs2], [expansions1, expansions2], [depths1, depths2] ): for i in range(len(exprs)): if exprs[i] is not None: missing_depth = expr_depths[id(exprs[i])] assert missing_depth >= 0 # Add missing dimensions to expansions if expansions[i] is not None: assert len(expansions[i]) >= 1 if missing_depth > 0: expansions[i] = [None] * missing_depth + list(expansions[i]) # Add missing ellipses around root expressions if missing_depth > 0: for _ in range(missing_depth): exprs[i] = stage1.Ellipsis(exprs[i], exprs[i].begin_pos, exprs[i].end_pos) expr_depths[id(exprs[i])] = expr_depths[id(exprs[i].inner)] - 1 # ##### 2. Find ellipsis expansions ##### equations = [] symbolic_expr_expansions = {} for root in exprs1 + exprs2: if root is not None: for expr in root.all(): for depth in range(expr_depths[id(expr)] + 1): key = (id(expr), depth) symbolic_expr_expansions[key] = solver.Variable( f"symbolic_expr_expansions[{id(expr)},{depth}]", f"{expr} at depth {depth}" ) # Add equations: Expansion of an expression at depth d (less than own depth) # is equal to the expansion of each child at depth d for root in exprs1 + exprs2: if root is not None: for expr in root.all(): for depth in range(expr_depths[id(expr)]): for child in expr.direct_children: equations.append(( symbolic_expr_expansions[(id(expr), depth)], symbolic_expr_expansions[(id(child), depth)], )) # Add equations: Relations between expressions and their children for root in exprs1 + exprs2: if root is not None: for expr in root.all(): depth = expr_depths[id(expr)] if isinstance(expr, stage1.List): v = sum(symbolic_expr_expansions[(id(child), depth)] for child in expr.children) elif isinstance(expr, stage1.Concatenation): v = 1 elif isinstance(expr, stage1.NamedAxis): v = 1 elif isinstance(expr, stage1.UnnamedAxis): v = 1 elif isinstance(expr, stage1.Composition): v = 1 elif isinstance(expr, stage1.Marker): v = symbolic_expr_expansions[(id(expr.inner), depth)] elif isinstance(expr, stage1.Ellipsis): v = symbolic_expr_expansions[(id(expr.inner), depth)] else: raise AssertionError(f"{expr}") equations.append((symbolic_expr_expansions[(id(expr), depth)], v)) # Add equations: Expansions stored in "expansions" for expansion1, expansion2, expr1, expr2 in zip(expansions1, expansions2, exprs1, exprs2): if expansion1 is not None and expansion2 is not None: if len(expansion1) != len(expansion2) or any( e1 is not None and e2 is not None and e1 != e2 for e1, e2 in zip(expansion1, expansion2) ): raise SolveExpansionException( exprs1, exprs2, expansions1, expansions2, depths1, depths2, f"Expansion '{expansion1}' of expression '{expr1}' does not match expansion " f"'{expansion2}' of expression '{expr2}'", ) if expansion1 is not None and expansion2 is not None: expansion = [e1 if e1 is not None else e2 for e1, e2 in zip(expansion1, expansion2)] elif expansion1 is not None: expansion = expansion1 elif expansion2 is not None: expansion = expansion2 else: expansion = None if expansion is not None: for depth, e in enumerate(expansion): if e is not None: if expr1 is not None and depth <= expr_depths[id(expr1)]: equations.append((symbolic_expr_expansions[(id(expr1), depth)], int(e))) if expr2 is not None and depth <= expr_depths[id(expr2)]: equations.append((symbolic_expr_expansions[(id(expr2), depth)], int(e))) # Add equations: Multiple occurrences of the same named axis must have the same expansions symbolic_axis_expansions = {} for root in exprs1 + exprs2: if root is not None: for axis in root.all(): if isinstance(axis, stage1.NamedAxis): for depth in range(expr_depths[id(axis)] + 1): if axis.name not in symbolic_axis_expansions: symbolic_axis_expansions[(axis.name, depth)] = solver.Variable( f"symbolic_axis_expansions[{axis.name},{depth}]", f"{axis.name} at depth {depth}", ) equations.append(( symbolic_expr_expansions[(id(axis), depth)], symbolic_axis_expansions[(axis.name, depth)], )) # Add equations: Ellipses with the same id must have the same expansions symbolic_ellipsis_expansions = {} for root in exprs1 + exprs2: if root is not None: for ellipsis in root.all(): if isinstance(ellipsis, stage1.Ellipsis): for depth in range(expr_depths[id(ellipsis)] + 1): if ellipsis.ellipsis_id not in symbolic_ellipsis_expansions: symbolic_ellipsis_expansions[(ellipsis.ellipsis_id, depth)] = ( solver.Variable( f"symbolic_ellipsis_expansions[{ellipsis.ellipsis_id},{depth}]", f"{ellipsis} at depth {depth}", ) ) equations.append(( symbolic_expr_expansions[(id(ellipsis), depth)], symbolic_ellipsis_expansions[(ellipsis.ellipsis_id, depth)], )) # Add equations: Same root expansions for root1, root2 in zip(exprs1, exprs2): if root1 is not None and root2 is not None: assert expr_depths[id(root1)] == expr_depths[id(root2)] for depth in range(expr_depths[id(root1)] + 1): equations.append(( symbolic_expr_expansions[(id(root1), depth)], symbolic_expr_expansions[(id(root2), depth)], )) # Solve try: solutions = solver.solve(equations) except solver.SolveException as e: raise SolveExpansionException( exprs1, exprs2, expansions1, expansions2, depths1, depths2, str(e) ) from e def to_key(k): return int(id_expr), int(depth) expansion_values = {} for k, v in solutions.items(): if k.startswith("symbolic_expr_expansions["): k = k[len("symbolic_expr_expansions[") : -1] id_expr, depth = str(k).split(",") try: id_expr = int(id_expr) except ValueError: continue depth = int(depth) expansion_values[(id_expr, depth)] = int(v) failed_exprs = set() for root in exprs1 + exprs2: if root is not None: for expr in root.all(): if (id(root), expr_depths[id(root)]) not in expansion_values: failed_exprs.add(str(expr)) if len(failed_exprs) == 1: raise SolveExpansionException( exprs1, exprs2, expansions1, expansions2, depths1, depths2, f"Found no unique solution for '{failed_exprs.pop()}'", ) elif len(failed_exprs) > 1: raise SolveExpansionException( exprs1, exprs2, expansions1, expansions2, depths1, depths2, f"Found no unique solutions for {failed_exprs}", ) def is_unnamed(expr): for expr in expr.all(): if isinstance(expr, stage1.NamedAxis): return False return True def get_unnamed_value(expr): if isinstance(expr, stage1.List): return np.prod([get_unnamed_value(child) for child in expr.children]).astype("int") elif isinstance(expr, stage1.Concatenation): return np.sum([get_unnamed_value(child) for child in expr.children]) elif isinstance(expr, stage1.NamedAxis): raise AssertionError() elif isinstance(expr, stage1.UnnamedAxis): return expr.value elif isinstance(expr, stage1.Composition): return get_unnamed_value(expr.inner) elif isinstance(expr, stage1.Marker): return get_unnamed_value(expr.inner) elif isinstance(expr, stage1.Ellipsis): value = get_unnamed_value(expr.inner) if value != 1: # TODO: implement this raise NotImplementedError( f"Found unnamed and unexpanded ellipsis '{expr}'. We currently disallow this " "case, since it could can take on multiple values ('2...' could have values " "2, 4, ...) that should be resolved in the solver and then checked to be " "consistent with these constraints." ) return 1 else: raise AssertionError(f"{expr}") # Expand ellipses and map stage1 expressions to stage2 expressions def map(expr, ellipsis_indices): if isinstance(expr, list): return [c for expr in expr for c in map(expr, ellipsis_indices=ellipsis_indices)] elif isinstance(expr, stage1.NamedAxis): return [NamedAxis(expr.name, ellipsis_indices=ellipsis_indices)] elif isinstance(expr, stage1.UnnamedAxis): return [UnnamedAxis(expr.value, ellipsis_indices=ellipsis_indices)] elif isinstance(expr, stage1.List): return map(expr.children, ellipsis_indices=ellipsis_indices) elif isinstance(expr, stage1.Concatenation): return [ Concatenation( [ List.maybe( map(c, ellipsis_indices=ellipsis_indices), ellipsis_indices=ellipsis_indices, ) for c in expr.children ], ellipsis_indices=ellipsis_indices, ) ] elif isinstance(expr, stage1.Composition): return [ Composition( List.maybe( map(expr.inner, ellipsis_indices=ellipsis_indices), ellipsis_indices=ellipsis_indices, ), ellipsis_indices=ellipsis_indices, ) ] elif isinstance(expr, stage1.Marker): return [ Marker.maybe( List.maybe( map(expr.inner, ellipsis_indices=ellipsis_indices), ellipsis_indices=ellipsis_indices, ), ellipsis_indices=ellipsis_indices, ) ] elif isinstance(expr, stage1.Ellipsis): key = (id(expr), expr_depths[id(expr)]) if key in expansion_values: # Ellipsis is expanded expansion = expansion_values[key] if expansion < 0: raise SolveExpansionException( exprs1, exprs2, expansions1, expansions2, depths1, depths2, f"Ellipsis '{expr}' has negative expansion {expansion}", ) return [ c for i in range(expansion) for c in map(expr.inner, ellipsis_indices=ellipsis_indices + [(i, expansion)]) ] else: # Ellipsis is not expanded if is_unnamed(expr): # Contains no named axes -> convert to unnamed axis return [UnnamedAxis(get_unnamed_value(expr), ellipsis_indices=ellipsis_indices)] else: # Contains named axes -> convert to named axis return [NamedAxis(str(expr), ellipsis_indices=ellipsis_indices)] else: raise AssertionError(f"{expr}") exprs1 = [ List.maybe(map(root, ellipsis_indices=[]), ellipsis_indices=[]) if root is not None else None for root in exprs1 ] exprs2 = [ List.maybe(map(root, ellipsis_indices=[]), ellipsis_indices=[]) if root is not None else None for root in exprs2 ] return exprs1, exprs2 def cse(expressions, cse_concat=True, cse_in_markers=False, verbose=False): expressions = list(expressions) if any(expr is not None and not isinstance(expr, Expression) for expr in expressions): raise TypeError("Expected expressions to be of type Expression") # Find possible expressions, identified by their string representation str_to_common_expr = defaultdict(list) for root in expressions: if root is not None: for expr in root.all(): if expr.parent is not None: str_expr = str(expr) str_to_common_expr[str_expr].append([expr]) if isinstance(expr, List): for start_index in range(len(expr.children)): for end_index in range(start_index, len(expr.children)): children = expr.children[start_index : end_index + 1] str_expr = " ".join([str(c) for c in children]) str_to_common_expr[str_expr].append(children) if verbose: print("CSE: All subexpressions") for k in str_to_common_expr.keys(): print(f" {k}") # Keep only expressions # 1. with at least one named axis # 2. where named axes are not also used outside the expression common_exprs = set() for str_expr in str_to_common_expr.keys(): used_axis_ids = set() used_axis_names = set() for exprlist in str_to_common_expr[str_expr]: for expr in exprlist: for v in expr.all(): if isinstance(v, NamedAxis): used_axis_ids.add(id(v)) used_axis_names.add(v.name) if len(used_axis_ids) == 0: continue axes_used_only_in_this_subexpression = True for root in expressions: if root is not None: for global_axis in root.all(): if isinstance(global_axis, NamedAxis) and global_axis.name in used_axis_names: axes_used_only_in_this_subexpression = ( axes_used_only_in_this_subexpression and id(global_axis) in used_axis_ids ) if axes_used_only_in_this_subexpression: common_exprs.add(str_expr) common_exprs = [ str_to_common_expr[k] for k in common_exprs ] # list of common_expr(=list of exprlist) if verbose: print("CSE: Removed expressions with axes that are also used outside the expression") for v in common_exprs: print(f" {[' '.join([str(y) for y in x]) for x in v]}") def remove_duplicates(common_expr): new_common_expr = [] for exprlist1 in common_expr: is_duplicate = False for exprlist2 in new_common_expr: is_duplicate = is_duplicate or ( len(exprlist1) == len(exprlist2) and all(id(expr1) == id(expr2) for expr1, expr2 in zip(exprlist1, exprlist2)) ) if not is_duplicate: new_common_expr.append(exprlist1) return new_common_expr common_exprs = [remove_duplicates(exprlists) for exprlists in common_exprs] if verbose: print("CSE: Removed duplicates") for v in common_exprs: print( f" {[' '.join([str(y) for y in x]) for x in v]} " f"{[[id(y) for y in x] for x in v]}" ) # Remove singletons def is_singleton(expr): if isinstance(expr, list): return len(expr) == 1 and is_singleton(expr[0]) elif isinstance(expr, List): return is_singleton(expr.children) elif isinstance(expr, NamedAxis): return True elif isinstance(expr, UnnamedAxis): return True elif isinstance(expr, Marker): return is_singleton(expr.inner) else: return False common_exprs = [common_expr for common_expr in common_exprs if not is_singleton(common_expr[0])] if verbose: print("CSE: Removed singletons") for v in common_exprs: print(f" {[' '.join([str(y) for y in x]) for x in v]}") # Remove expressions with/ in markers if cse_in_markers: common_exprs = [ common_expr for common_expr in common_exprs if not any( isinstance(expr, Marker) for exprlist in common_expr for expr in exprlist for expr in expr.all() ) ] else: common_exprs = [ common_expr for common_expr in common_exprs if not any( einx.expr.stage2.is_marked(expr) for exprlist in common_expr for expr in exprlist for expr in expr.all() ) ] # Remove expressions that contain concatenations if not cse_concat: common_exprs = [ common_expr for common_expr in common_exprs if not any( isinstance(expr, Concatenation) for exprlist in common_expr for expr in exprlist for expr in expr.all() ) ] if verbose: print("CSE: Removed expressions with markers") for v in common_exprs: print(f" {[' '.join([str(y) for y in x]) for x in v]}") # Remove expressions at root level with len > 1 common_exprs = [ common_expr for common_expr in common_exprs if not ( is_at_root(common_expr[0][0]) and (len(common_expr[0]) > 1 or len(common_expr[0][0]) > 1) ) ] if verbose: print("CSE: Removed subexpressions of root with len > 1") for v in common_exprs: print(f" {[' '.join([str(y) for y in x]) for x in v]}") # Remove subexpressions of subexpressions def any_is_parent_of(parent, child): if isinstance(parent, list): return any(any_is_parent_of(p, child) for p in parent) elif isinstance(child, list): return any(any_is_parent_of(parent, c) for c in child) else: return child.parent is not None and ( id(child.parent) == id(parent) or any_is_parent_of(parent, child.parent) ) common_exprs = [ common_expr for common_expr in common_exprs if not any( id(common_expr) != id(common_expr2) and any_is_parent_of(common_expr2, common_expr) for common_expr2 in common_exprs ) ] if verbose: print("CSE: Removed subexpressions of subexpressions") for v in common_exprs: print(f" {[' '.join([str(y) for y in x]) for x in v]}") # All subexpressions have been found. Now replace them with new Axis objects. def replace(expr): if isinstance(expr, list) and len(expr) == 1: return replace(expr[0]) if not isinstance(expr, list): for idx, common_expr in enumerate(common_exprs): for exprlist in common_expr: if len(exprlist) == 1 and id(expr) == id(exprlist[0]): return [NamedAxis(f"cse.{idx}", expr.ellipsis_indices)] if isinstance(expr, list): result = [] i = 0 while i < len(expr): # Check if a subexpression starts at position i exprlist_found = None for idx, common_expr in enumerate(common_exprs): for exprlist in common_expr: for j in range(len(exprlist)): if i + j >= len(expr) or id(exprlist[j]) != id(expr[i + j]): break else: exprlist_found = exprlist if exprlist_found is not None: break exprlist = exprlist_found if exprlist is not None: assert len(exprlist) > 0 result.append(NamedAxis(f"cse.{idx}", exprlist[0].ellipsis_indices)) i += len(exprlist) else: result.extend(replace(expr[i])) i += 1 return result elif isinstance(expr, NamedAxis): return [expr.__deepcopy__()] elif isinstance(expr, UnnamedAxis): return [expr.__deepcopy__()] elif isinstance(expr, List): return replace(expr.children) elif isinstance(expr, Concatenation): return [ Concatenation( [c2 for c1 in expr.children for c2 in replace(c1)], expr.ellipsis_indices ) ] elif isinstance(expr, Marker): return [ Marker.maybe( List.maybe(replace(expr.inner), expr.ellipsis_indices), expr.ellipsis_indices ) ] elif isinstance(expr, Composition): return [ Composition( List.maybe(replace(expr.inner), expr.ellipsis_indices), expr.ellipsis_indices ) ] else: raise AssertionError() return [ List.maybe(replace(root), ellipsis_indices=[]) if root is not None else None for root in expressions ] def expr_map(f): def outer(expr, *args, **kwargs): # Wrap the user function to return a list of expressions def f2(expr): t = f(expr, *args, **kwargs) if t is None: return None, expr_map.CONTINUE expr, signal = t if isinstance(expr, list) or expr is None: return expr, signal if isinstance(expr, List): return expr.children, signal elif isinstance(expr, Expression): return [expr], signal else: raise TypeError(f"Invalid return type {type(expr)}") return List.maybe(_expr_map(expr, f2)) return outer expr_map.CONTINUE = 1 expr_map.COPY_AND_STOP = 2 expr_map.REPLACE_AND_STOP = 3 expr_map.REPLACE_AND_CONTINUE = 4 def _expr_map(expr, f): exprs, signal = f(expr) if signal == expr_map.REPLACE_AND_STOP: assert isinstance(exprs, list) return exprs elif signal == expr_map.COPY_AND_STOP: return [expr.__deepcopy__()] elif signal == expr_map.REPLACE_AND_CONTINUE: return [c for expr in exprs for c in _expr_map(expr, f)] if isinstance(expr, NamedAxis): return [expr.__deepcopy__()] elif isinstance(expr, UnnamedAxis): return [expr.__deepcopy__()] elif isinstance(expr, Composition): return [Composition(List.maybe(_expr_map(expr.inner, f)))] elif isinstance(expr, List): return [c2 for c1 in expr.children for c2 in _expr_map(c1, f)] elif isinstance(expr, Concatenation): return [Concatenation([List.maybe(_expr_map(c, f)) for c in expr.children])] elif isinstance(expr, Marker): x = _expr_map(expr.inner, f) if len(x) == 0: # Drop empty marker return [] else: return [Marker.maybe(List.maybe(x))] else: raise TypeError(f"Invalid expression type {type(expr)}") @expr_map def demark(expr): if isinstance(expr, Marker): return expr.inner, expr_map.REPLACE_AND_CONTINUE def any_parent_is(expr, pred, include_self=True): if not include_self: if expr.parent is None: return False expr = expr.parent while expr is not None: if pred(expr): return True expr = expr.parent return False def is_at_root(expr): return not any_parent_is(expr, lambda expr: isinstance(expr, Composition), include_self=False) def is_marked(expr): return any_parent_is(expr, lambda expr: isinstance(expr, Marker)) def _get_marked(expr): if isinstance(expr, NamedAxis): return [] elif isinstance(expr, UnnamedAxis): return [] elif isinstance(expr, Marker): return [expr.inner.__deepcopy__()] elif isinstance(expr, Concatenation): return [Concatenation.maybe([x for c in expr.children for x in _get_marked(c)])] elif isinstance(expr, Composition): return [Composition(List.maybe(_get_marked(expr.inner)))] elif isinstance(expr, List): return [List.maybe([x for c in expr.children for x in _get_marked(c)])] else: raise TypeError(f"Invalid expression type {type(expr)}") def get_marked(expr): return List.maybe(_get_marked(expr)) def get_unmarked(expr): return remove(expr, lambda expr: not is_marked(expr)) @expr_map def replace(expr, f): expr = f(expr) if expr is not None: return expr, expr_map.REPLACE_AND_STOP @expr_map def remove(expr, pred): if pred(expr): return [], expr_map.REPLACE_AND_STOP python-einx-0.3.0/einx/expr/stage3.py000066400000000000000000000410211505216034200174510ustar00rootroot00000000000000from . import stage2, solver import numpy as np from functools import partial import einx class Expression: def __init__(self, value): if not isinstance(value, (int, np.integer)): raise TypeError(f"Expected int, got {type(value)}") self.value = int(value) self.parent = None @property def shape(self): return tuple(x.value for x in self) class Composition(Expression): @staticmethod def maybe(inner): if len(inner) == 0: return Axis(None, 1) elif isinstance(inner, list): if len(inner) == 1: return inner[0] else: return Composition(List.maybe(inner)) elif isinstance(inner, List) and len(inner) == 1: return inner.children[0] else: return Composition(inner) def __init__(self, inner): Expression.__init__(self, inner.value) self.inner = inner inner.parent = self assert len(inner) > 0 def __str__(self): return f"({self.inner})" def __len__(self): return 1 def __iter__(self): yield self def __deepcopy__(self): return Composition(self.inner.__deepcopy__()) def __eq__(self, other): return isinstance(other, Composition) and self.inner == other.inner def __hash__(self): return 8716123 + hash(self.inner) def all(self): yield self yield from self.inner.all() class List(Expression): def maybe(l, *args, **kwargs): if not isinstance(l, list): raise TypeError(f"Expected list, got {type(l)}") if len(l) == 1: return l[0] else: return List(l, *args, **kwargs) def __init__(self, children): Expression.__init__(self, np.prod([c.value for c in children]).astype(int)) self.children = children for c in children: if isinstance(c, List): raise ValueError("List cannot have another List as direct child") c.parent = self def __str__(self): return " ".join([str(c) for c in self.children]) def __getitem__(self, i): return self.children[i] def __len__(self): return sum(len(c) for c in self.children) def __iter__(self): for c in self.children: yield from c def __deepcopy__(self): return List([c.__deepcopy__() for c in self.children]) def __eq__(self, other): return isinstance(other, List) and self.children == other.children def __hash__(self): return 6563 + hash(tuple(self.children)) def all(self): yield self for c in self.children: yield from c.all() class Axis(Expression): def __init__(self, name, value): Expression.__init__(self, value) self.name = name if name is not None else f"unnamed.{id(self)}" def __str__(self): return self.name if not self.is_unnamed else str(self.value) def __len__(self): return 1 def __iter__(self): yield self def __deepcopy__(self): return Axis(self.name, self.value) def __eq__(self, other): if not isinstance(other, Axis): return False if self.is_unnamed != other.is_unnamed: return False if self.value != other.value: return False if self.is_unnamed: return True else: return self.name == other.name def __hash__(self): return 9817234 + (hash(self.name) if not self.is_unnamed else 0) + hash(self.value) def all(self): yield self @property def is_unnamed(self): return self.name.startswith("unnamed.") class Concatenation(Expression): @staticmethod def maybe(l, *args, **kwargs): if not isinstance(l, list): raise TypeError(f"Expected list, got {type(l)}") if len(l) == 1: return l[0] else: return Concatenation(l, *args, **kwargs) def __init__(self, children): if len(children) == 0: raise ValueError("Concatenation must have at least one child") Expression.__init__(self, np.sum([c.value for c in children]).astype("int32")) self.children = children for c in children: if len(c) != 1: raise ValueError( "Concatenation can only be used on expressions of length 1, but" f"got expression '{c}'" ) c.parent = self def __str__(self): return "+".join([str(c) for c in self.children]) def __len__(self): return 1 def __iter__(self): yield self def __deepcopy__(self): return Concatenation([c.__deepcopy__() for c in self.children]) def __eq__(self, other): return isinstance(other, Concatenation) and self.children == other.children def __hash__(self): return 123 + hash(tuple(self.children)) def all(self): yield self for c in self.children: yield from c.all() class Marker(Expression): def __init__(self, inner): if len(inner) == 0: raise ValueError("Marker cannot have empty list as child") Expression.__init__(self, inner.value) self.inner = inner inner.parent = self def __str__(self): return f"[{self.inner}]" def __len__(self): return len(self.inner) def __iter__(self): yield from self.inner def __deepcopy__(self): return Marker(self.inner.__deepcopy__()) def __eq__(self, other): return isinstance(other, Marker) and self.inner == other.inner def __hash__(self): return 6433236 + hash(self.inner) def all(self): yield self yield from self.inner.all() class SolveValueException(solver.SolveException): def __init__(self, exprs1, exprs2, message): self.exprs1 = exprs1 self.exprs2 = exprs2 message = f"Failed to solve values of expressions. {message}\nInput:\n" for expr1, expr2 in zip(exprs1, exprs2): message += f" '{einx.expr.util._to_str(expr1)} = {einx.expr.util._to_str(expr2)}'\n" super().__init__(message) def solve(exprs1, exprs2): exprs1 = list(exprs1) exprs2 = list(exprs2) if any( expr is not None and not isinstance(expr, stage2.Expression) for expr in exprs1 + exprs2 ): raise ValueError("Can only expand stage2.Expression") if len(exprs1) != len(exprs2): raise ValueError("Number of expressions must be equal") equations = [] symbolic_expr_values = {} for root in exprs1 + exprs2: if root is not None: for expr in root.all(): symbolic_expr_values[id(expr)] = solver.Variable( f"symbolic_expr_values[{id(expr)}]", str(expr) ) # Add equations: Relations between expressions and their children for root in exprs1 + exprs2: if root is not None: for expr in root.all(): if isinstance(expr, stage2.List): equations.append(( solver.Product([symbolic_expr_values[id(c)] for c in expr.children]), symbolic_expr_values[id(expr)], )) elif isinstance(expr, stage2.Concatenation): equations.append(( solver.Sum([symbolic_expr_values[id(c)] for c in expr.children]), symbolic_expr_values[id(expr)], )) elif isinstance(expr, stage2.Marker) or isinstance(expr, stage2.Composition): equations.append(( symbolic_expr_values[id(expr)], symbolic_expr_values[id(expr.inner)], )) # Add equations: Same root values for root1, root2 in zip(exprs1, exprs2): if root1 is not None and root2 is not None: assert len(root1) == len(root2) for expr1, expr2 in zip(root1, root2): equations.append(( symbolic_expr_values[id(expr1)], symbolic_expr_values[id(expr2)], )) # Add equations: Unnamed axes for root in exprs1 + exprs2: if root is not None: for expr in root.all(): if isinstance(expr, stage2.UnnamedAxis): equations.append(( symbolic_expr_values[id(expr)], int(expr.value), )) # Add equations: Multiple occurrences of the same named axis must have the same value sympy_axis_values = {} for root in exprs1 + exprs2: if root is not None: for axis in root.all(): if isinstance(axis, stage2.NamedAxis): if axis.name not in sympy_axis_values: sympy_axis_values[axis.name] = solver.Variable( f"sympy_axis_values[{axis.name}]", axis.name ) equations.append(( symbolic_expr_values[id(axis)], sympy_axis_values[axis.name], )) # Solve try: solutions = solver.solve(equations) except solver.SolveException as e: raise SolveValueException(exprs1, exprs2, str(e)) from e axis_values = {} for k, v in solutions.items(): if k.startswith("symbolic_expr_values["): axis_values[int(k[len("symbolic_expr_values[") : -1])] = int(v) failed_axes = set() for root in exprs1 + exprs2: if root is not None: for expr in root.all(): if isinstance(expr, stage2.NamedAxis): if id(expr) not in axis_values: failed_axes.add(str(expr)) if len(failed_axes) > 0: raise SolveValueException(exprs1, exprs2, f"Found no unique solutions for {failed_axes}") # Map stage2 expressions to stage3 expressions def map(expr): if isinstance(expr, stage2.NamedAxis): assert id(expr) in axis_values if axis_values[id(expr)] <= 0: raise SolveValueException( exprs1, exprs2, f"Axis '{expr}' has value {axis_values[id(expr)]} <= 0" ) return Axis(expr.name, axis_values[id(expr)]) elif isinstance(expr, stage2.UnnamedAxis): assert id(expr) in axis_values if axis_values[id(expr)] <= 0: raise SolveValueException( exprs1, exprs2, f"Axis '{expr}' has value {axis_values[id(expr)]} <= 0" ) return Axis(None, axis_values[id(expr)]) elif isinstance(expr, stage2.List): return List([map(child) for child in expr.children]) elif isinstance(expr, stage2.Concatenation): return Concatenation([map(child) for child in expr.children]) elif isinstance(expr, stage2.Marker): return Marker(map(expr.inner)) elif isinstance(expr, stage2.Composition): return Composition.maybe(map(expr.inner)) else: raise AssertionError(type(expr)) exprs1 = [map(root) if root is not None else None for root in exprs1] exprs2 = [map(root) if root is not None else None for root in exprs2] return exprs1, exprs2 def expr_map(f): def outer(expr, *args, **kwargs): # Wrap the user function to return a list of expressions def f2(expr): t = f(expr, *args, **kwargs) if t is None: return None, expr_map.CONTINUE expr, signal = t if isinstance(expr, list) or expr is None: return expr, signal if isinstance(expr, List): return expr.children, signal elif isinstance(expr, Expression): return [expr], signal else: raise TypeError(f"Invalid return type {type(expr)}") return List.maybe(_expr_map(expr, f2)) return outer expr_map.CONTINUE = 1 expr_map.COPY_AND_STOP = 2 expr_map.REPLACE_AND_STOP = 3 expr_map.REPLACE_AND_CONTINUE = 4 def _expr_map(expr, f): exprs, signal = f(expr) if signal == expr_map.REPLACE_AND_STOP: assert isinstance(exprs, list) return exprs elif signal == expr_map.COPY_AND_STOP: return [expr.__deepcopy__()] elif signal == expr_map.REPLACE_AND_CONTINUE: return [c for expr in exprs for c in _expr_map(expr, f)] if isinstance(expr, Axis): return [expr.__deepcopy__()] elif isinstance(expr, Composition): return [Composition.maybe(List.maybe(_expr_map(expr.inner, f)))] elif isinstance(expr, List): return [c2 for c1 in expr.children for c2 in _expr_map(c1, f)] elif isinstance(expr, Concatenation): children = [List.maybe(_expr_map(c, f)) for c in expr.children] children = [c if len(c) > 0 else Axis(None, 1) for c in children] return [Concatenation(children)] elif isinstance(expr, Marker): x = _expr_map(expr.inner, f) if len(x) == 0: # Drop empty marker return [] else: return [Marker(List.maybe(x))] else: raise TypeError(f"Invalid expression type {type(expr)}") @expr_map def decompose(expr): if isinstance(expr, Composition): return expr.inner, expr_map.REPLACE_AND_CONTINUE elif isinstance(expr, Concatenation): return None, expr_map.COPY_AND_STOP @expr_map def demark(expr): if isinstance(expr, Marker): return expr.inner, expr_map.REPLACE_AND_CONTINUE @expr_map def replace(expr, f): expr = f(expr) if expr is not None: return expr, expr_map.REPLACE_AND_STOP @expr_map def remove(expr, pred): if pred(expr): return [], expr_map.REPLACE_AND_STOP def remove_unnamed_trivial_axes(expr): def is_concat_child(expr): # Do not remove direct children of concatenations return expr.parent is not None and ( isinstance(expr.parent, Concatenation) or (isinstance(expr.parent, Marker) and is_concat_child(expr.parent)) ) return remove( expr, lambda expr: isinstance(expr, Axis) and expr.is_unnamed and expr.value == 1 and not is_concat_child(expr), ) @expr_map def mark(expr, pred): if ( not isinstance(expr, Marker) and (expr.parent is None or not isinstance(expr.parent, Marker)) and pred(expr) ): return Marker(expr.__deepcopy__()), expr_map.REPLACE_AND_CONTINUE def any_parent_is(expr, pred, include_self=True): if not include_self: if expr.parent is None: return False expr = expr.parent while expr is not None: if pred(expr): return True expr = expr.parent return False def is_marked(expr): return any_parent_is(expr, lambda expr: isinstance(expr, Marker)) def is_at_root(expr): return not any_parent_is(expr, lambda expr: isinstance(expr, Composition)) def is_flat(expr): return all( not isinstance(expr, Composition) and not isinstance(expr, Concatenation) for expr in expr.all() ) def get_axes(expr): return [expr for expr in expr.all() if isinstance(expr, Axis)] def get_named_axes(expr): return [expr for expr in expr.all() if isinstance(expr, Axis) and not expr.is_unnamed] def _get_marked(expr): if isinstance(expr, Axis): return [] elif isinstance(expr, Marker): return [expr.inner.__deepcopy__()] elif isinstance(expr, Concatenation): return [Concatenation.maybe([x for c in expr.children for x in _get_marked(c)])] elif isinstance(expr, Composition): return [Composition.maybe(List.maybe(_get_marked(expr.inner)))] elif isinstance(expr, List): return [List.maybe([x for c in expr.children for x in _get_marked(c)])] else: raise TypeError(f"Invalid expression type {type(expr)}") def get_marked(expr): return List.maybe(_get_marked(expr)) def get_unmarked(expr): return remove(expr, lambda expr: is_marked(expr)) python-einx-0.3.0/einx/expr/util.py000066400000000000000000000120401505216034200172370ustar00rootroot00000000000000from . import stage1, stage2, stage3 import numpy as np import einx def _get_expansion(expr): if isinstance(expr, stage1.Expression): return (expr.expansion(),) elif isinstance(expr, (stage2.Expression, stage3.Expression)): return (len(expr),) elif isinstance(expr, np.ndarray): return tuple(expr.shape) else: return None def _input_expr(expr): if expr is None or isinstance( expr, (str, stage1.Expression, stage2.Expression, stage3.Expression) ): return expr else: if isinstance(expr, np.ndarray): pass elif expr == [] or expr == (): expr = np.asarray(expr).astype("int32") else: try: expr = np.asarray(expr) except Exception as e: raise ValueError(f"Invalid expression '{expr}'") from e if not np.issubdtype(expr.dtype, np.integer): raise ValueError(f"Invalid expression '{expr}', must be integers") expr = " ".join([str(i) for i in expr.flatten()]) return expr class Equation: def __init__(self, expr1, expr2=None, depth1=0, depth2=0): self.expr1 = _input_expr(expr1) self.expr2 = _input_expr(expr2) self.expansion1 = _get_expansion(expr1) self.expansion2 = _get_expansion(expr2) self.depth1 = depth1 self.depth2 = None if expr2 is None else depth2 def __repr__(self): return f"{self.expr} = {self.value.tolist()} (expansion={self.expansion} at " f"depth={self.depth})" def _to_str(l): # Print numpy arrays in a single line rather than with line breaks if l is None: return "None" elif isinstance(l, np.ndarray): return str(tuple(l.tolist())) elif isinstance(l, list): return str(tuple(l)) else: return str(l) def solve( equations, cse=True, cse_concat=True, cse_in_markers=False, after_stage2=None, verbose=False ): if any(not isinstance(c, Equation) for c in equations): raise ValueError("All arguments must be of type Equation") exprs1 = [t.expr1 for t in equations] exprs2 = [t.expr2 for t in equations] expansions1 = [t.expansion1 for t in equations] expansions2 = [t.expansion2 for t in equations] depths1 = [t.depth1 for t in equations] depths2 = [t.depth2 for t in equations] if verbose: print("Stage0:") for expr1, expr2, expansion1, expansion2, depth1, depth2 in zip( exprs1, exprs2, expansions1, expansions2, depths1, depths2 ): print( f" {_to_str(expr1)} (expansion={_to_str(expansion1)} at depth={depth1}) = " f"{_to_str(expr2)} (expansion={_to_str(expansion2)} at depth={depth2})" ) exprs1 = [(stage1.parse_arg(expr) if isinstance(expr, str) else expr) for expr in exprs1] exprs2 = [(stage1.parse_arg(expr) if isinstance(expr, str) else expr) for expr in exprs2] expansions1 = [ expansion if expansion is not None else _get_expansion(expr) for expansion, expr in zip(expansions1, exprs1) ] expansions2 = [ expansion if expansion is not None else _get_expansion(expr) for expansion, expr in zip(expansions2, exprs2) ] if verbose: print("Stage1:") for expr1, expr2, expansion1, expansion2, depth1, depth2 in zip( exprs1, exprs2, expansions1, expansions2, depths1, depths2 ): print( f" {_to_str(expr1)} (expansion={_to_str(expansion1)} at depth={depth1}) = " f"{_to_str(expr2)} (expansion={_to_str(expansion2)} at depth={depth2})" ) exprs1, exprs2 = stage2.solve(exprs1, exprs2, expansions1, expansions2, depths1, depths2) if verbose: print("Stage2:") for expr1, expr2 in zip(exprs1, exprs2): print(f" {_to_str(expr1)} = {_to_str(expr2)}") if cse: exprs = stage2.cse(exprs1 + exprs2, cse_concat=cse_concat, cse_in_markers=cse_in_markers) exprs1, exprs2 = exprs[: len(exprs1)], exprs[len(exprs1) :] if verbose: print("Stage2.CSE:") for expr1, expr2 in zip(exprs1, exprs2): print(f" {_to_str(expr1)} = {_to_str(expr2)}") if after_stage2 is not None: return solve( equations + after_stage2(exprs1, exprs2), cse=cse, cse_concat=cse_concat, cse_in_markers=cse_in_markers, after_stage2=None, verbose=verbose, ) exprs1, exprs2 = stage3.solve(exprs1, exprs2) if verbose: print("Stage3:") for expr1, expr2 in zip(exprs1, exprs2): assert expr1 is None or expr2 is None or expr1.shape == expr2.shape shape = expr1.shape if expr1 is not None else expr2.shape shape = " ".join(str(i) for i in shape) print(f" {_to_str(expr1)} = {_to_str(expr2)} = {shape}") return exprs1 python-einx-0.3.0/einx/nn/000077500000000000000000000000001505216034200153505ustar00rootroot00000000000000python-einx-0.3.0/einx/nn/__init__.py000066400000000000000000000000221505216034200174530ustar00rootroot00000000000000from .nn import * python-einx-0.3.0/einx/nn/equinox.py000066400000000000000000000221371505216034200174170ustar00rootroot00000000000000import einx import jax import equinox as eqx from functools import partial import jax.numpy as jnp from typing import Optional, Callable, Any # TODO: type annotations tjax = einx.tracer.import_("jax") def create_or_retrieve(concrete, name, shape, dtype, init): if name in vars(concrete.module) and vars(concrete.module)[name] is not None: tensor = vars(concrete.module)[name] else: tensor = vars(concrete.module)[name] = init(concrete.rng, shape, dtype) return tensor class ParamFactory: class Concrete(einx.tracer.input.Input): def __init__(self, module, name, init, dtype, rng): self.module = module self.name = name self.init = init if dtype is None: if hasattr(module, "dtype"): dtype = module.dtype else: dtype = "float32" self.dtype = dtype self.rng = rng def to_value_and_key(self): return self, ParamFactory.CacheKey(self.name, self.init, self.dtype) class CacheKey(einx.tracer.input.CacheKey): def __init__(self, name, init, dtype): self.name = name self.init = init self.dtype = dtype def __hash__(self): return hash((self.name, self.init, self.dtype)) def __eq__(self, other): return ( isinstance(other, ParamFactory.CacheKey) and self.name == other.name and self.init == other.init and self.dtype == other.dtype ) def to_tracer(self, backend, virtual_arg): x = ParamFactory.Tracer(self.name, self.init, self.dtype) return x, x class Tracer(einx.tracer.TensorFactory): def __init__(self, name, init, dtype): self.name = name self.init = init self.dtype = dtype def __call__(self, shape, kwargs): name = self.name if not self.name is None else kwargs.get("name", None) init = self.init if not self.init is None else kwargs.get("init", None) dtype = self.dtype if not self.dtype is None else kwargs.get("dtype", None) if name is None: raise ValueError("Must specify name for tensor factory eqx.Module") if init is None: raise ValueError("Must specify init for tensor factory eqx.Module") elif isinstance(init, str): if init == "get_at" or init == "rearrange": init = tjax.nn.initializers.normal(stddev=0.02) elif init == "add": init = tjax.nn.initializers.constant(0.0, dtype=dtype) elif init == "multiply": init = tjax.nn.initializers.constant(1.0, dtype=dtype) elif init == "dot": init = tjax.nn.initializers.lecun_normal( kwargs["in_axis"], kwargs["out_axis"], kwargs["batch_axis"] ) else: raise ValueError(f"Don't know which initializer to use for operation '{init}'") elif isinstance(init, (int, float)): init = tjax.nn.initializers.constant(init, dtype=dtype) return einx.tracer.apply( create_or_retrieve, # TODO: make tracable args=[self, name, shape, dtype, init], output=einx.tracer.Tensor(shape), ) def param(module, name=None, init=None, dtype=None, rng=None): """Create a tensor factory for Equinox parameters. Args: module: The module to create the parameter in. Must be an instance of ``eqx.Module``. name: Name of the parameter. If ``None``, uses a default name determined from the calling operation. Defaults to ``None``. init: Initializer for the parameter. If ``None``, uses a default init method determined from the calling operation. Defaults to ``None``. dtype: Data type of the parameter. If ``None``, uses the ``dtype`` member of the calling module or ``float32`` if it does not exist. Defaults to ``None``. Returns: A tensor factory with the given default parameters. """ return ParamFactory.Concrete(module, name, init, dtype, rng) class Norm(eqx.Module): """Normalization layer. Args: stats: Einstein string determining the axes along which mean and variance are computed. Will be passed to ``einx.reduce``. params: Einstein string determining the axes along which learnable parameters are applied. Will be passed to ``einx.elementwise``. Defaults to ``"b... [c]"``. mean: Whether to apply mean normalization. Defaults to ``True``. var: Whether to apply variance normalization. Defaults to ``True``. scale: Whether to apply a learnable scale according to ``params``. Defaults to ``True``. bias: Whether to apply a learnable bias according to ``params``. Defaults to ``True``. epsilon: A small float added to the variance to avoid division by zero. Defaults to ``1e-5``. fastvar: Whether to use a fast variance computation. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. decay_rate: Decay rate for exponential moving average of mean and variance. If ``None``, no moving average is applied. Defaults to ``None``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ stats: str params: str mean: bool var: bool use_scale: bool use_bias: bool scale: Optional[jax.Array] bias: Optional[jax.Array] decay_rate: Optional[float] epsilon: float fastvar: bool dtype: str kwargs: dict def __init__( self, stats: str, params: str = "b... [c]", mean: bool = True, var: bool = True, scale: bool = True, bias: bool = True, decay_rate: Optional[float] = None, epsilon: float = 1e-5, fastvar: bool = True, dtype: Any = "float32", **kwargs: Any, ): if decay_rate is not None: raise ValueError("Stateful layers are currently not supported in Equinox") self.stats = stats self.params = params self.mean = mean self.var = var self.use_scale = scale self.use_bias = bias self.scale = None self.bias = None self.decay_rate = decay_rate self.epsilon = epsilon self.fastvar = fastvar self.dtype = dtype self.kwargs = kwargs def __call__(self, x, rng=None): x, _mean, _var = einx.nn.norm( x, self.stats, self.params, mean=self.mean, var=self.var, scale=param(self, name="scale", rng=rng) if self.use_scale else None, bias=param(self, name="bias", rng=rng) if self.use_bias else None, epsilon=self.epsilon, fastvar=self.fastvar, **self.kwargs, ) return x class Linear(eqx.Module): """Linear layer. Args: expr: Einstein string determining the axes along which the weight matrix is multiplied. Will be passed to ``einx.dot``. bias: Whether to apply a learnable bias. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ expr: str weight: jax.Array bias: Optional[jax.Array] use_bias: bool kwargs: dict def __init__(self, expr: str, bias: bool = True, dtype: Any = "float32", **kwargs: Any): self.expr = expr self.use_bias = bias self.weight = None self.bias = None self.kwargs = kwargs def __call__(self, x, rng=None): return einx.nn.linear( x, self.expr, bias=param(self, name="bias", rng=rng) if self.use_bias is not None else None, weight=param(self, name="weight", rng=rng), **self.kwargs, ) class Dropout(eqx.Module): """Dropout layer. Args: expr: Einstein string determining the axes along which dropout is applied. Will be passed to ``einx.elementwise``. drop_rate: Drop rate. inference: Whether the layer is used in inference mode (i.e. not apply dropout). Defaults to ``False``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ expr: str drop_rate: float kwargs: dict inference: bool def __init__(self, expr: str, drop_rate: float, inference: bool = False, **kwargs: Any): self.expr = expr self.drop_rate = drop_rate self.kwargs = kwargs self.inference = inference def __call__(self, x, rng): if not self.inference: return einx.nn.dropout( x, self.expr, drop_rate=self.drop_rate, rng=rng, **self.kwargs, ) else: return x python-einx-0.3.0/einx/nn/flax.py000066400000000000000000000311221505216034200166530ustar00rootroot00000000000000import flax.linen as nn import einx import flax from functools import partial import jax.numpy as jnp from typing import Callable, Union, Optional, Any tnn = einx.tracer.import_("flax.linen", "nn") class ParamFactory: class Concrete(einx.tracer.input.Input): def __init__(self, module, name, init, dtype, col, param_type): self.module = module self.name = name self.init = init if dtype is None: if hasattr(module, "dtype"): dtype = module.dtype else: dtype = "float32" self.dtype = dtype self.col = col if param_type == "param": if col is not None: raise ValueError("col is not accepted for flax.linen.Module.param") elif param_type == "variable": if col is None: raise ValueError("col must be specified for flax.linen.Module.variable") else: raise ValueError(f"Unknown tensor factory flax.linen.Module.{param_type}") self.param_type = param_type def to_value_and_key(self): return self.module, ParamFactory.CacheKey( self.name, self.init, self.dtype, self.col, self.param_type ) class CacheKey(einx.tracer.input.CacheKey): def __init__(self, name, init, dtype, col, param_type): self.name = name self.init = init self.dtype = dtype self.col = col self.param_type = param_type def __hash__(self): return hash((self.name, self.init, self.dtype, self.col, self.param_type)) def __eq__(self, other): return ( isinstance(other, ParamFactory.CacheKey) and self.name == other.name and self.init == other.init and self.dtype == other.dtype and self.col == other.col and self.param_type == other.param_type ) def to_tracer(self, backend, virtual_arg): x = ParamFactory.Tracer(self.name, self.init, self.dtype, self.col, self.param_type) return x, x class Tracer(einx.tracer.TensorFactory): def __init__(self, name, init, dtype, col, param_type): self.name = name self.init = init self.dtype = dtype self.col = col self.param_type = param_type def __call__(self, shape, kwargs): name = self.name if not self.name is None else kwargs.get("name", None) init = self.init if not self.init is None else kwargs.get("init", None) dtype = self.dtype if not self.dtype is None else kwargs.get("dtype", None) col = self.col if name is None: raise ValueError( "Must specify name for tensor factory flax.linen.Module.{param|variable}" ) if init is None: raise ValueError( "Must specify init for tensor factory flax.linen.Module.{param|variable}" ) elif isinstance(init, str): if init == "get_at" or init == "rearrange": init = tnn.initializers.normal(stddev=0.02) elif init == "add": init = tnn.initializers.zeros_init() elif init == "multiply": init = tnn.initializers.ones_init() elif init == "dot": init = tnn.initializers.lecun_normal( kwargs["in_axis"], kwargs["out_axis"], kwargs["batch_axis"] ) else: raise ValueError(f"Don't know which initializer to use for operation '{init}'") elif isinstance(init, (int, float)): init = tnn.initializers.constant(init, dtype=dtype) if self.param_type == "param": x = einx.tracer.apply( self.param, args=[name, init, shape, dtype], output=einx.tracer.Tensor(shape) ) else: assert self.param_type == "variable" # Assume that variable initialization does not need an rng key by passing None x = einx.tracer.apply( self.variable, args=[col, name, init, None, shape, dtype], ) x = einx.tracer.apply( einx.tracer.MemberAccess(), args=[x, "value"], output=einx.tracer.Tensor(shape) ) return x def param( x: Union[Callable, nn.Module], name: Optional[str] = None, init: Optional[Any] = None, dtype: Optional[nn.dtypes.Dtype] = None, col: Optional[str] = None, ): """Create a tensor factory for Flax parameters. Args: x: The bound method of a Flax module, i.e. ``nn.Module.param`` or ``nn.Module.variable``, or a module instance in which case its ``param`` method is used. name: Name of the parameter. If ``None``, uses a default name determined from the calling operation. Defaults to ``None``. init: Initializer for the parameter. If ``None``, uses a default init method determined from the calling operation. Defaults to ``None``. dtype: Data type of the parameter. If ``None``, uses the ``dtype`` member of the calling module or ``float32`` if it does not exist. Defaults to ``None``. col: The collection name to use when ``bound_method`` is ``nn.Module.variable``. Returns: A tensor factory with the given default parameters. """ if hasattr(x, "__func__") and x.__func__ == nn.Module.param: module = x.__self__ param_type = "param" elif hasattr(x, "__func__") and x.__func__ == nn.Module.variable: module = x.__self__ param_type = "variable" elif isinstance(x, nn.Module): module = x param_type = "param" else: raise ValueError("x must be a bound method of a Flax module or a Flax module instance") return ParamFactory.Concrete(module, name, init, dtype, col, param_type) # Allow passing nn.Module, nn.Module.param, nn.Module.variable as tensor factory: @einx.tracer.input.register_tensor_factory def tensor_factory(x): if isinstance(x, nn.Module) or ( hasattr(x, "__func__") and (x.__func__ == nn.Module.param or x.__func__ == nn.Module.variable) ): return param(x).to_value_and_key() else: return None # Using _ prefix on classes and a separater constructor, since dataclass/nn.Module does # not support **kwargs parameter. class _Norm(nn.Module): stats: str params: str = "b... [c]" mean: bool = True var: bool = True scale: bool = True bias: bool = True decay_rate: float = None epsilon: float = 1e-5 fastvar: bool = True dtype: nn.dtypes.Dtype = "float32" kwargs: dict = None @nn.compact def __call__(self, x, training=None): if self.decay_rate is not None and training is None: raise ValueError("training must be specified when decay_rate is used") use_ema = self.decay_rate is not None and (not training or self.is_initializing()) x, mean, var = einx.nn.norm( x, self.stats, self.params, mean=param(self.variable, col="stats", name="mean", dtype=self.dtype) if use_ema and self.mean else self.mean, var=param(self.variable, col="stats", name="var", dtype=self.dtype) if use_ema and self.var else self.var, scale=param(self.param, name="scale", dtype=self.dtype) if self.scale else None, bias=param(self.param, name="bias", dtype=self.dtype) if self.bias else None, epsilon=self.epsilon, fastvar=self.fastvar, **(self.kwargs if self.kwargs is not None else {}), ) update_ema = self.decay_rate is not None and training and not self.is_initializing() if update_ema: if self.mean: mean_ema = self.variable("stats", "mean", None) mean_ema.value = self.decay_rate * mean_ema.value + (1 - self.decay_rate) * mean if self.var: var_ema = self.variable("stats", "var", None) var_ema.value = self.decay_rate * var_ema.value + (1 - self.decay_rate) * var return x def Norm( stats: str, params: str = "b... [c]", mean: bool = True, var: bool = True, scale: bool = True, bias: bool = True, decay_rate: Optional[float] = None, epsilon: float = 1e-5, fastvar: bool = True, dtype: nn.dtypes.Dtype = "float32", name: Optional[str] = None, **kwargs: Any, ): """Normalization layer. Args: stats: Einstein string determining the axes along which mean and variance are computed. Will be passed to ``einx.reduce``. params: Einstein string determining the axes along which learnable parameters are applied. Will be passed to ``einx.elementwise``. Defaults to ``"b... [c]"``. mean: Whether to apply mean normalization. Defaults to ``True``. var: Whether to apply variance normalization. Defaults to ``True``. scale: Whether to apply a learnable scale according to ``params``. Defaults to ``True``. bias: Whether to apply a learnable bias according to ``params``. Defaults to ``True``. epsilon: A small float added to the variance to avoid division by zero. Defaults to ``1e-5``. fastvar: Whether to use a fast variance computation. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. decay_rate: Decay rate for exponential moving average of mean and variance. If ``None``, no moving average is applied. Defaults to ``None``. name: Name of the module. Defaults to ``None``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ return _Norm( stats, params=params, mean=mean, var=var, scale=scale, bias=bias, decay_rate=decay_rate, epsilon=epsilon, fastvar=fastvar, dtype=dtype, name=name, kwargs=kwargs, ) class _Linear(nn.Module): expr: str bias: bool = True dtype: nn.dtypes.Dtype = "float32" kwargs: dict = None @nn.compact def __call__(self, x): return einx.nn.linear( x, self.expr, bias=param(self.param, name="bias", dtype=self.dtype) if self.bias else None, weight=param(self.param, name="weight", dtype=self.dtype), **(self.kwargs if self.kwargs is not None else {}), ) def Linear( expr: str, bias: bool = True, dtype: nn.dtypes.Dtype = "float32", name: Optional[str] = None, **kwargs: Any, ): """Linear layer. Args: expr: Einstein string determining the axes along which the weight matrix is multiplied. Will be passed to ``einx.dot``. bias: Whether to apply a learnable bias. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. name: Name of the module. Defaults to ``None``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ return _Linear(expr, bias=bias, dtype=dtype, name=name, kwargs=kwargs) class _Dropout(nn.Module): expr: str drop_rate: float rng_collection: str = "dropout" kwargs: dict = None @nn.compact def __call__(self, x, training): if training: return einx.nn.dropout( x, self.expr, drop_rate=self.drop_rate, rng=self.make_rng(self.rng_collection), **(self.kwargs if self.kwargs is not None else {}), ) else: return x def Dropout( expr: str, drop_rate: float, rng_collection: str = "dropout", name: Optional[str] = None, **kwargs: Any, ): """Dropout layer. Args: expr: Einstein string determining the axes along which dropout is applied. Will be passed to ``einx.elementwise``. drop_rate: Drop rate. rng_collection: the rng collection name to use when requesting an rng key. Defaults to ``"dropout"``. name: Name of the module. Defaults to ``None``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ return _Dropout(expr, drop_rate, rng_collection=rng_collection, name=name, kwargs=kwargs) python-einx-0.3.0/einx/nn/haiku.py000066400000000000000000000245011505216034200170250ustar00rootroot00000000000000import haiku as hk import einx from functools import partial from haiku._src.base import current_module from typing import Any, Callable, Literal, Optional thk = einx.tracer.import_("haiku", "hk") class ParamFactory: class Concrete(einx.tracer.input.Input): def __init__(self, name, init, dtype, param_type): self.name = name self.init = init if dtype is None: module = current_module() if hasattr(module, "dtype"): dtype = module.dtype else: dtype = "float32" self.dtype = dtype self.param_type = param_type def to_value_and_key(self): return None, ParamFactory.CacheKey(self.name, self.init, self.dtype, self.param_type) class CacheKey(einx.tracer.input.CacheKey): def __init__(self, name, init, dtype, param_type): self.name = name self.init = init self.dtype = dtype self.param_type = param_type def __hash__(self): return hash((self.name, self.init, self.dtype, self.param_type)) def __eq__(self, other): return ( isinstance(other, ParamFactory.CacheKey) and self.name == other.name and self.init == other.init and self.dtype == other.dtype and self.param_type == other.param_type ) def to_tracer(self, backend, virtual_arg): return ( None, ParamFactory.Tracer(self.name, self.init, self.dtype, self.param_type, virtual_arg), ) class Tracer(einx.tracer.TensorFactory): def __init__(self, name, init, dtype, param_type, depend_on): self.name = name self.init = init self.dtype = dtype self.param_type = param_type self.depend_on = depend_on def __call__(self, shape, kwargs): name = self.name if not self.name is None else kwargs.get("name", None) init = self.init if not self.init is None else kwargs.get("init", None) dtype = self.dtype if not self.dtype is None else kwargs.get("dtype", None) if name is None: raise ValueError("Must specify name for tensor factory hk.get_{parameter|state}") if init is None: raise ValueError("Must specify init for tensor factory hk.get_{parameter|state}") elif isinstance(init, str): if init in "get_at" or init == "rearrange": init = thk.initializers.RandomNormal(stddev=0.02) elif init == "add": init = thk.initializers.Constant(0.0) elif init == "multiply": init = thk.initializers.Constant(1.0) elif init == "dot": init = thk.initializers.VarianceScaling( 1.0, "fan_in", "truncated_normal", fan_in_axes=kwargs["in_axis"] ) else: raise ValueError(f"Don't know which initializer to use for operation '{init}'") elif isinstance(init, (int, float)): init = thk.initializers.Constant(init) if self.param_type == "parameter": func = thk.get_parameter elif self.param_type == "state": func = thk.get_state else: assert False return einx.tracer.apply( func, kwargs={"shape": shape, "name": name, "dtype": dtype, "init": init}, output=einx.tracer.Tensor(shape), depend_on=[self.depend_on], ) def param( func: Literal[hk.get_parameter, hk.get_state] = hk.get_parameter, name: Optional[str] = None, init: Optional[Any] = None, dtype: Optional[Any] = None, ): """Create a tensor factory for Haiku parameters. Args: func: Either ``hk.get_parameter`` or ``hk.get_state``. Defaults to ``hk.get_parameter``. name: Name of the parameter. If ``None``, uses a default name determined from the calling operation. Defaults to ``None``. init: Initializer for the parameter. If ``None``, uses a default init method determined from the calling operation. Defaults to ``None``. dtype: Data type of the parameter. If ``None``, uses the ``dtype`` member of the calling module or ``float32`` if it does not exist. Defaults to ``None``. Returns: A tensor factory with the given default parameters. """ if func == hk.get_parameter: param_type = "parameter" elif func == hk.get_state: param_type = "state" else: raise ValueError(f"Unknown parameter function '{func}'") return ParamFactory.Concrete(name, init, dtype, param_type) # Allow passing hk.get_parameter and hk.get_state as tensor factory: @einx.tracer.input.register_tensor_factory def tensor_factory(x): if id(x) == id(hk.get_parameter) or id(x) == id(hk.get_state): return param(x).to_value_and_key() else: return None class Norm(hk.Module): """Normalization layer. Args: stats: Einstein string determining the axes along which mean and variance are computed. Will be passed to ``einx.reduce``. params: Einstein string determining the axes along which learnable parameters are applied. Will be passed to ``einx.elementwise``. Defaults to ``"b... [c]"``. mean: Whether to apply mean normalization. Defaults to ``True``. var: Whether to apply variance normalization. Defaults to ``True``. scale: Whether to apply a learnable scale according to ``params``. Defaults to ``True``. bias: Whether to apply a learnable bias according to ``params``. Defaults to ``True``. epsilon: A small float added to the variance to avoid division by zero. Defaults to ``1e-5``. fastvar: Whether to use a fast variance computation. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. decay_rate: Decay rate for exponential moving average of mean and variance. If ``None``, no moving average is applied. Defaults to ``None``. name: Name of the module. Defaults to ``None``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ def __init__( self, stats: str, params: str = "b... [c]", mean: bool = True, var: bool = True, scale: bool = True, bias: bool = True, epsilon: float = 1e-5, fastvar: bool = True, dtype: Any = "float32", decay_rate: Optional[float] = None, name: Optional[str] = None, **kwargs: Any, ): super().__init__(name=name) self.stats = stats self.params = params self.mean = mean self.var = var self.scale = scale self.bias = bias self.epsilon = epsilon self.fastvar = fastvar self.dtype = dtype self.decay_rate = decay_rate self.kwargs = kwargs def __call__(self, x, training=None): if self.decay_rate is not None and training is None: raise ValueError("training must be specified when decay_rate is used") use_ema = self.decay_rate is not None and (not training or hk.running_init()) x, mean, var = einx.nn.norm( x, self.stats, self.params, mean=param(hk.get_state, name="mean") if use_ema and self.mean else self.mean, var=param(hk.get_state, name="var") if use_ema and self.var else self.var, scale=param(hk.get_parameter, name="scale") if self.scale else None, bias=param(hk.get_parameter, name="bias") if self.bias else None, epsilon=self.epsilon, fastvar=self.fastvar, **self.kwargs, ) update_ema = self.decay_rate is not None and training and not hk.running_init() if update_ema: if self.mean: hk.set_state( "mean", hk.get_state("mean") * self.decay_rate + mean * (1 - self.decay_rate) ) if self.var: hk.set_state( "var", hk.get_state("var") * self.decay_rate + var * (1 - self.decay_rate) ) return x class Linear(hk.Module): """Linear layer. Args: expr: Einstein string determining the axes along which the weight matrix is multiplied. Will be passed to ``einx.dot``. bias: Whether to apply a learnable bias. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. name: Name of the module. Defaults to ``None``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ def __init__( self, expr: str, bias: bool = True, dtype: Any = "float32", name: Optional[str] = None, **kwargs: Any, ): super().__init__(name=name) self.expr = expr self.bias = bias self.dtype = dtype self.kwargs = kwargs def __call__(self, x): return einx.nn.linear( x, self.expr, bias=param(hk.get_parameter, name="bias") if self.bias else None, weight=param(hk.get_parameter, name="weight"), **self.kwargs, ) class Dropout(hk.Module): """Dropout layer. Args: expr: Einstein string determining the axes along which dropout is applied. Will be passed to ``einx.elementwise``. drop_rate: Drop rate. name: Name of the module. Defaults to ``None``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ def __init__(self, expr: str, drop_rate: float, name: Optional[str] = None, **kwargs: Any): super().__init__(name=name) self.expr = expr self.drop_rate = drop_rate self.kwargs = kwargs def __call__(self, x, training): if training: return einx.nn.dropout( x, self.expr, drop_rate=self.drop_rate, rng=hk.next_rng_key(), **self.kwargs, ) else: return x python-einx-0.3.0/einx/nn/keras.py000066400000000000000000000247601505216034200170400ustar00rootroot00000000000000import keras import einx import inspect import numpy as np from typing import Any, Callable, Optional _version = tuple(int(i) for i in keras.__version__.split(".")[:2]) if _version < (3, 0): raise ImportError(f"einx.nn.keras requires Keras version >= 3, but found {keras.__version__}") tkeras = einx.tracer.import_("keras") def create_or_retrieve(layer, name, shape, dtype, init, trainable): if name in vars(layer): tensor = vars(layer)[name] else: tensor = vars(layer)[name] = layer.add_weight( shape=shape, dtype=dtype, initializer=init, name=name, trainable=trainable, ) return tensor class ParamFactory: class Concrete(einx.tracer.input.Input): def __init__(self, layer, name, init, dtype, trainable): self.layer = layer self.name = name self.init = init if dtype is None: if hasattr(layer, "dtype"): dtype = layer.dtype else: dtype = "float32" self.dtype = dtype self.trainable = trainable def to_value_and_key(self): return self.layer, ParamFactory.CacheKey( self.name, self.init, self.dtype, self.trainable ) class CacheKey(einx.tracer.input.CacheKey): def __init__(self, name, init, dtype, trainable): self.name = name self.init = init self.dtype = dtype self.trainable = trainable def __hash__(self): return hash((self.name, self.init, self.dtype, self.trainable)) def __eq__(self, other): return ( isinstance(other, ParamFactory.CacheKey) and self.name == other.name and self.init == other.init and self.dtype == other.dtype and self.trainable == other.trainable ) def to_tracer(self, backend, virtual_arg): x = ParamFactory.Tracer(self.name, self.init, self.dtype, self.trainable) return x, x class Tracer(einx.tracer.TensorFactory): def __init__(self, name, init, dtype, trainable): self.name = name self.init = init self.dtype = dtype self.trainable = trainable def __call__(self, shape, kwargs): name = self.name if not self.name is None else kwargs.get("name", None) init = self.init if not self.init is None else kwargs.get("init", None) dtype = self.dtype if not self.dtype is None else kwargs.get("dtype", None) if name is None: raise ValueError("Must specify name for tensor factory keras.layers.Layer") if init is None: raise ValueError("Must specify init for tensor factory keras.layers.Layer") elif isinstance(init, str): if init == "get_at" or init == "rearrange": init = tkeras.initializers.TruncatedNormal(stddev=0.02) elif init == "add": init = tkeras.initializers.Constant(0.0) elif init == "multiply": init = tkeras.initializers.Constant(1.0) elif init == "dot": fan_in = np.prod([shape[i] for i in kwargs["in_axis"]]) std = np.sqrt(1.0 / fan_in) / 0.87962566103423978 init = tkeras.initializers.TruncatedNormal(mean=0.0, stddev=std) else: raise ValueError(f"Don't know which initializer to use for operation '{init}'") elif isinstance(init, (int, float)): init = tkeras.initializers.Constant(init) return einx.tracer.apply( create_or_retrieve, # TODO: make tracable args=[self, name, shape, dtype, init, self.trainable], output=einx.tracer.Tensor(shape), ) def param( layer: keras.layers.Layer, name: Optional[str] = None, init: Optional[Any] = None, dtype: Optional[Any] = None, trainable: bool = True, ): """Create a tensor factory for Keras parameters. Args: layer: The layer to create the parameter in. Must be an instance of ``keras.layers.Layer``. name: Name of the parameter. If ``None``, uses a default name determined from the calling operation. Defaults to ``None``. init: Initializer for the parameter. If ``None``, uses a default init method determined from the calling operation. Defaults to ``None``. dtype: Data type of the parameter. If ``None``, uses the ``dtype`` member of the calling module or ``float32`` if it does not exist. Defaults to ``None``. trainable: Whether the parameter is trainable. Defaults to ``True``. Returns: A tensor factory with the given default parameters. """ return ParamFactory.Concrete(layer, name, init, dtype, trainable) def is_leaf(x): return isinstance(x, tuple) and all(isinstance(y, int) for y in x) class Layer(keras.layers.Layer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def build(self, inputs_shape): tracers = einx.tree_util.tree_map( lambda shape: keras.ops.zeros(shape, dtype="float32"), inputs_shape, is_leaf=is_leaf ) if "is_initializing" in inspect.signature(self.call).parameters: self.call(tracers, is_initializing=True) else: self.call(tracers) class Norm(Layer): """Normalization layer. Args: stats: Einstein string determining the axes along which mean and variance are computed. Will be passed to ``einx.reduce``. params: Einstein string determining the axes along which learnable parameters are applied. Will be passed to ``einx.elementwise``. Defaults to ``"b... [c]"``. mean: Whether to apply mean normalization. Defaults to ``True``. var: Whether to apply variance normalization. Defaults to ``True``. scale: Whether to apply a learnable scale according to ``params``. Defaults to ``True``. bias: Whether to apply a learnable bias according to ``params``. Defaults to ``True``. epsilon: A small float added to the variance to avoid division by zero. Defaults to ``1e-5``. fastvar: Whether to use a fast variance computation. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. decay_rate: Decay rate for exponential moving average of mean and variance. If ``None``, no moving average is applied. Defaults to ``None``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ def __init__( self, stats: str, params: str = "b... [c]", mean: bool = True, var: bool = True, scale: bool = True, bias: bool = True, epsilon: float = 1e-5, fastvar: bool = True, dtype: Any = "float32", decay_rate: Optional[float] = None, **kwargs: Any, ): super().__init__(dtype=dtype) self.stats = stats self.params = params self.use_mean = mean self.use_var = var self.use_scale = scale self.use_bias = bias self.epsilon = epsilon self.fastvar = fastvar self.decay_rate = decay_rate self.kwargs = kwargs def call(self, x, training=None, is_initializing=False): use_ema = self.decay_rate is not None and (not training or is_initializing) x, mean, var = einx.nn.norm( x, self.stats, self.params, mean=param(self, name="mean", trainable=False) if use_ema and self.use_mean else self.use_mean, var=param(self, name="var", trainable=False) if use_ema and self.use_var else self.use_var, scale=param(self, name="scale", trainable=True) if self.use_scale else None, bias=param(self, name="bias", trainable=True) if self.use_bias else None, epsilon=self.epsilon, fastvar=self.fastvar, **(self.kwargs if self.kwargs is not None else {}), ) update_ema = self.decay_rate is not None and training and not is_initializing if update_ema: if self.use_mean: self.mean.assign( keras.ops.cast( self.decay_rate * self.mean.value + (1 - self.decay_rate) * mean, self.mean.dtype, ) ) if self.use_var: self.var.assign( keras.ops.cast( self.decay_rate * self.var.value + (1 - self.decay_rate) * var, self.var.dtype, ) ) return x class Linear(Layer): """Linear layer. Args: expr: Einstein string determining the axes along which the weight matrix is multiplied. Will be passed to ``einx.dot``. bias: Whether to apply a learnable bias. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ def __init__(self, expr: str, bias: bool = True, dtype: Any = "float32", **kwargs: Any): super().__init__(dtype=dtype) self.expr = expr self.use_bias = bias self.kwargs = kwargs def call(self, x): return einx.nn.linear( x, self.expr, bias=param(self, name="bias") if self.use_bias else None, weight=param(self, name="weight"), **self.kwargs, ) class Dropout(Layer): """Dropout layer. Args: expr: Einstein string determining the axes along which dropout is applied. Will be passed to ``einx.elementwise``. drop_rate: Drop rate. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ def __init__(self, expr: str, drop_rate: float, **kwargs: Any): super().__init__() self.expr = expr self.drop_rate = drop_rate self.kwargs = kwargs def call(self, x, training=None): if training: return einx.nn.dropout( x, self.expr, drop_rate=self.drop_rate, **self.kwargs, ) else: return x python-einx-0.3.0/einx/nn/nn.py000066400000000000000000000115721505216034200163430ustar00rootroot00000000000000import einx from typing import Union, Optional, Any @einx.jit( trace=lambda t, c: lambda x, stats, params="b... [c]", mean=True, var=True, scale=None, bias=None, epsilon=0, fastvar=True, backend=None, **kwargs: c( t(x), stats, params, t(mean) if not isinstance(mean, bool) and mean is not None else mean, t(var) if not isinstance(var, bool) and var is not None else var, t(scale) if scale is not None else scale, t(bias) if bias is not None else bias, epsilon, fastvar, **kwargs, ) ) def norm( x: einx.Tensor, stats: str, params: str = "b... [c]", mean: Union[einx.Tensor, bool] = True, var: Union[einx.Tensor, bool] = True, scale: Optional[einx.Tensor] = None, bias: Optional[einx.Tensor] = None, epsilon: float = 0, fastvar: bool = True, backend: Union[einx.Backend, str, None] = None, **kwargs: Any, ): if mean is None or var is None: raise ValueError("mean and var cannot be None") expr_in, expr_stats = einx.reduce.parse(stats, einx.tracer.get_shape(x), **kwargs) expr_in = einx.expr.stage3.demark(expr_in) expr_stats = einx.expr.stage3.demark(expr_stats) # Instantiate moving averages if not isinstance(mean, bool) and mean is not None: mean = einx.tracer.call_factory(mean, shape=expr_stats.shape, backend=backend, init="add") if not isinstance(var, bool) and var is not None: var = einx.tracer.call_factory( var, shape=expr_stats.shape, backend=backend, init="multiply" ) # Compute mean and variance if isinstance(mean, bool): if mean: mean = einx.mean(stats, x, backend=backend, **kwargs) else: mean = None if isinstance(var, bool): if var: if mean is None: # RMS norm var = einx.mean(stats, backend.square(x), backend=backend, **kwargs) else: if fastvar: mean_of_squares = einx.mean(stats, backend.square(x), backend=backend, **kwargs) var = mean_of_squares - backend.square(mean) var = backend.maximum(var, 0) else: var = einx.var(stats, x, backend=backend, **kwargs) else: var = None # Normalize mean and variance if mean is not None: x, _ = einx.subtract_stage3([expr_in, expr_stats], [x, mean], expr_in, backend=backend) if var is not None: inv_std = backend.rsqrt(var + epsilon) x, _ = einx.multiply_stage3([expr_in, expr_stats], [x, inv_std], expr_in, backend=backend) # Apply scale and bias if scale is not None: x = einx.multiply(params, x, scale, backend=backend, **kwargs) if bias is not None: x = einx.add(params, x, bias, backend=backend, **kwargs) return x, mean, var @einx.jit( trace=lambda t, c: lambda x, expr, weight, bias=None, **kwargs: c( t(x), expr, t(weight), t(bias) if bias is not None else None, **kwargs ) ) def linear( x: einx.Tensor, expr: str, weight: einx.Tensor, bias: Optional[einx.Tensor], backend: Union[einx.Backend, str, None] = None, **kwargs: Any, ): (_expr_in1, expr_in2), expr_afterdot = einx.dot.parse( expr, einx.tracer.get_shape(x), einx.tracer.get_shape(weight), **kwargs ) # Weight matrix multiplication x = einx.dot(expr, x, weight, backend=backend, **kwargs) if bias is not None: # Bias expression includes all axes in output that are also in weight matrix weight_axes_names = {a.name for a in expr_in2.all() if isinstance(a, einx.expr.stage3.Axis)} expr_bias = [] for a in expr_afterdot.all(): if isinstance(a, einx.expr.stage3.Axis) and a.name in weight_axes_names: expr_bias.append(a.__deepcopy__()) expr_bias = einx.expr.stage3.List(expr_bias) x, _ = einx.add_stage3( [expr_afterdot, expr_bias], [x, bias], expr_afterdot, backend=backend ) return x @einx.jit( trace=lambda t, c: lambda x, expr, drop_rate, rng=None, **kwargs: c( t(x), expr, drop_rate, t(rng) if rng is not None else None, **kwargs ) ) def dropout( x: einx.Tensor, expr: str, drop_rate: float, rng: Any = None, backend: Union[einx.Backend, str, None] = None, **kwargs: Any, ): keep_rate = 1 - drop_rate (expr_in, expr_mask), expr_out = einx.elementwise.parse( expr, einx.tracer.get_shape(x), None, **kwargs ) with einx.tracer.depend_on(x): drop_mask = backend.random.bernoulli(rng=rng, p=keep_rate, shape=expr_mask.shape) x, _ = einx.where_stage3( [expr_mask, expr_in, einx.expr.stage3.List([])], [drop_mask, x, 0], expr_out, backend=backend, ) return x * (1 / keep_rate) python-einx-0.3.0/einx/nn/torch.py000066400000000000000000000255301505216034200170460ustar00rootroot00000000000000import torch import einx import math from functools import partial import numpy as np from typing import Callable, Union, Optional, Any _version = tuple(int(i) for i in torch.__version__.split(".")[:2]) if _version < (2, 0): raise ImportError(f"einx.nn.torch requires PyTorch version >= 2, but found {torch.__version__}") def _allow_in_graph(func): if "compiler" in dir(torch): return torch.compiler.allow_in_graph(func) else: import torch._dynamo as _dynamo return _dynamo.allow_in_graph(func) ttorch = einx.tracer.import_("torch") class ParamFactory: class Concrete(einx.tracer.input.Input): def __init__(self, param, init): self.param = param self.init = init def to_value_and_key(self): return self.param, ParamFactory.CacheKey(self.init) class CacheKey(einx.tracer.input.CacheKey): def __init__(self, init): self.init = init def __hash__(self): return hash(self.init) def __eq__(self, other): return isinstance(other, ParamFactory.CacheKey) and self.init == other.init def to_tracer(self, backend, virtual_arg): x = ParamFactory.Tracer(self.init) return x, x class Tracer(einx.tracer.TensorFactory): def __init__(self, init): self.init = init def __call__(self, shape, kwargs): init = self.init if not self.init is None else kwargs.get("init", None) x = self output = einx.tracer.Tensor(shape) x = einx.tracer.apply( x.materialize, args=[shape], output=output, inplace_updates=[(x, output)], ) if init is None: raise ValueError( "Must specify init for tensor factory torch.nn.parameter.Uninitialized*" ) elif isinstance(init, str): if init == "get_at" or init == "rearrange": init = partial(ttorch.nn.init.normal_, std=0.02) elif init == "add": init = ttorch.nn.init.zeros_ elif init == "multiply": init = ttorch.nn.init.ones_ elif init == "dot": fan_in = np.prod([shape[i] for i in kwargs["in_axis"]]) std = np.sqrt(1.0 / fan_in) / 0.87962566103423978 init = partial(ttorch.nn.init.trunc_normal_, mean=0.0, std=std, a=-2.0, b=2.0) else: raise ValueError(f"Don't know which initializer to use for operation '{init}'") elif isinstance(init, (int, float)): init = partial(ttorch.nn.init.constant_, val=init) output = einx.tracer.Tensor(shape) x = einx.tracer.apply( init, args=[x], output=output, inplace_updates=[(x, output)], ) return x def param( x: Union[ torch.nn.parameter.UninitializedParameter, torch.nn.parameter.UninitializedBuffer, torch.nn.parameter.Parameter, ], init: Optional[Any] = None, ): """Create a tensor factory for an uninitialized PyTorch parameter or buffer. If the given parameter is not initialized, this returns a tensor factory that calls the ``materialize`` method of ``uninitialized`` with the given shape and returns ``uninitialized``. Otherwise, the parameter is returned as is. Args: x: An instance of ``torch.nn.parameter.UninitializedParameter``, ``torch.nn.parameter.UninitializedBuffer`` or ``torch.nn.parameter.Parameter``. init: Initializer for the parameter. If ``None``, uses a default init method determined from the calling operation. Defaults to ``None``. Returns: A tensor factory with the given default parameters, or the parameter itself if it is already materialized. """ if isinstance( x, (torch.nn.parameter.UninitializedParameter, torch.nn.parameter.UninitializedBuffer) ) and not isinstance(x, torch._subclasses.FakeTensor): # Return return ParamFactory.Concrete(x, init) else: # If parameter is already materialized, return it as is return x # Allow passing UninitializedParameter and UninitializedBuffer as tensor factory: @einx.tracer.input.register_tensor_factory def tensor_factory(x): if isinstance( x, (torch.nn.parameter.UninitializedParameter, torch.nn.parameter.UninitializedBuffer) ) and not isinstance(x, torch._subclasses.FakeTensor): return param(x).to_value_and_key() else: return None class Norm(torch.nn.Module): """Normalization layer. Args: stats: Einstein string determining the axes along which mean and variance are computed. Will be passed to ``einx.reduce``. params: Einstein string determining the axes along which learnable parameters are applied. Will be passed to ``einx.elementwise``. Defaults to ``"b... [c]"``. mean: Whether to apply mean normalization. Defaults to ``True``. var: Whether to apply variance normalization. Defaults to ``True``. scale: Whether to apply a learnable scale according to ``params``. Defaults to ``True``. bias: Whether to apply a learnable bias according to ``params``. Defaults to ``True``. epsilon: A small float added to the variance to avoid division by zero. Defaults to ``1e-5``. fastvar: Whether to use a fast variance computation. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. decay_rate: Decay rate for exponential moving average of mean and variance. If ``None``, no moving average is applied. Defaults to ``None``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ def __init__( self, stats: str, params: str = "b... [c]", mean: bool = True, var: bool = True, scale: bool = True, bias: bool = True, epsilon: float = 1e-5, fastvar: bool = True, dtype: Union[torch.dtype, str] = "float32", decay_rate: Optional[float] = None, **kwargs: Any, ): super().__init__() self.stats = stats self.params = params self.use_mean = mean self.use_var = var self.epsilon = epsilon self.fastvar = fastvar self.decay_rate = decay_rate self.kwargs = kwargs if mean and decay_rate is not None: self.register_buffer( "mean", torch.nn.parameter.UninitializedBuffer(dtype=vars(torch)[dtype]) ) else: self.mean = None if var and decay_rate is not None: self.register_buffer( "var", torch.nn.parameter.UninitializedBuffer(dtype=vars(torch)[dtype]) ) else: self.var = None self.scale = ( torch.nn.parameter.UninitializedParameter(dtype=vars(torch)[dtype]) if scale else None ) self.bias = ( torch.nn.parameter.UninitializedParameter(dtype=vars(torch)[dtype]) if bias else None ) self.initialized = False def forward(self, x): use_ema = self.decay_rate is not None and (not self.training or not self.initialized) x, mean, var = _norm( x, self.stats, self.params, mean=self.mean if use_ema and self.use_mean else self.use_mean, var=self.var if use_ema and self.use_var else self.use_var, scale=self.scale if self.scale is not None else None, bias=self.bias if self.bias is not None else None, epsilon=self.epsilon, fastvar=self.fastvar, kwargs=self.kwargs, ) update_ema = self.decay_rate is not None and self.training if update_ema: with torch.no_grad(): if mean is not None: self.mean.copy_(self.decay_rate * self.mean + (1 - self.decay_rate) * mean) if var is not None: self.var.copy_(self.decay_rate * self.var + (1 - self.decay_rate) * var) if not self.initialized: self.initialized = True return x @_allow_in_graph def _norm(x, stats, params, mean, var, scale, bias, epsilon, fastvar, kwargs): with x.device: return einx.nn.norm( x, stats, params, mean=mean, var=var, scale=scale, bias=bias, epsilon=epsilon, fastvar=fastvar, backend="torch", **kwargs, ) class Linear(torch.nn.Module): """Linear layer. Args: expr: Einstein string determining the axes along which the weight matrix is multiplied. Will be passed to ``einx.dot``. bias: Whether to apply a learnable bias. Defaults to ``True``. dtype: Data type of the weights. Defaults to ``"float32"``. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ def __init__( self, expr: str, bias: bool = True, dtype: Union[torch.dtype, str] = "float32", **kwargs: Any, ): super().__init__() self.weight = torch.nn.parameter.UninitializedParameter(dtype=vars(torch)[dtype]) if bias: self.bias = torch.nn.parameter.UninitializedParameter(dtype=vars(torch)[dtype]) else: self.bias = None self.expr = expr self.kwargs = kwargs def forward(self, x): return _linear(x, self.expr, self.weight, self.bias, self.kwargs) @_allow_in_graph def _linear(x, expr, weight, bias, kwargs): with x.device: return einx.nn.linear( x, expr, weight, bias, backend="torch", **kwargs, ) class Dropout(torch.nn.Module): """Dropout layer. Args: expr: Einstein string determining the axes along which dropout is applied. Will be passed to ``einx.elementwise``. drop_rate: Drop rate. **kwargs: Additional parameters that specify values for single axes, e.g. ``a=4``. """ def __init__(self, expr: str, drop_rate: float, **kwargs: Any): super().__init__() self.expr = expr self.drop_rate = drop_rate self.kwargs = kwargs def forward(self, x): if self.training: return _dropout(x, self.expr, self.drop_rate, self.kwargs) else: return x @_allow_in_graph def _dropout(x, expr, drop_rate, kwargs): with x.device: return einx.nn.dropout( x, expr, drop_rate=drop_rate, backend="torch", **kwargs, ) python-einx-0.3.0/einx/op/000077500000000000000000000000001505216034200153535ustar00rootroot00000000000000python-einx-0.3.0/einx/op/__init__.py000066400000000000000000000003171505216034200174650ustar00rootroot00000000000000from .rearrange import * from .dot import * from .vmap_with_axis import * from .vmap import * from .reduce import * from .elementwise import * from .index import * from .solve import * from .arange import * python-einx-0.3.0/einx/op/arange.py000066400000000000000000000146761505216034200172000ustar00rootroot00000000000000import einx from functools import partial from . import util import numpy as np from typing import Union import numpy.typing as npt @einx.jit( trace=lambda t, c: lambda exprs_in, expr_out, backend=None, dtype="int32": c( exprs_in, expr_out, dtype=dtype ) ) def arange_stage3(expr_in, expr_out, backend, dtype="int32"): if isinstance(backend, str): backend = einx.backend.get(backend) for expr in expr_in.all(): if isinstance(expr, einx.expr.stage3.Marker): raise ValueError("Marker in input expression not allowed") for root in [expr_in, expr_out]: for expr in root.all(): if isinstance(expr, einx.expr.stage3.Concatenation): raise ValueError("Concatenation not allowed") marked_axes = [ expr for expr in expr_out.all() if isinstance(expr, einx.expr.stage3.Axis) and einx.expr.stage3.is_marked(expr) ] if len(marked_axes) > 1: raise ValueError(f"Expected at most one marked axis, got {len(marked_axes)}") ndim = marked_axes[0].value if len(marked_axes) == 1 else 1 expr_in = util.flatten([expr_in])[0] expr_out_flat = util.flatten([expr_out])[0] def replace(expr): if isinstance(expr, einx.expr.stage3.Axis) and einx.expr.stage3.is_marked(expr): expr = einx.expr.stage3.Concatenation([ einx.expr.stage3.Axis(None, 1) for _ in range(ndim) ]) expr = einx.expr.stage3.Composition(expr) return expr expr_out_flat_withconcat = einx.expr.stage3.replace(expr_out_flat, replace) expr_out_flat_withconcat = einx.expr.stage3.demark(expr_out_flat_withconcat) (tensor,), _ = einx.rearrange_stage3( [axis.__deepcopy__() for axis in expr_in], [backend.arange(axis.value, dtype=dtype) for axis in expr_in], [expr_out_flat_withconcat], backend=backend, ) # Unflatten output expressions (tensor,) = util.unflatten( [expr_out_flat], [ tensor, ], [expr_out], backend=backend, ) return tensor, einx.expr.stage3.demark(expr_out) @einx.lru_cache def parse(description, cse=True, **parameters): description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) op = einx.expr.stage1.parse_op(description) # Implicitly determine input expression if len(op) == 1: op = einx.expr.stage1.Op( [ einx.expr.stage1.Args([einx.expr.stage1.get_unmarked(op[0][0])]), op[0], ], ) if len(op[0]) != 1: raise ValueError(f"Expected 1 input expression, but got {len(op[0])}") if len(op[1]) != 1: raise ValueError(f"Expected 1 output expression, but got {len(op[1])}") marked_expr_out = einx.expr.stage1.Composition(einx.expr.stage1.get_marked(op[1][0])) def after_stage2(exprs1, exprs2): expr_out = exprs1[1] out_axes = [ expr for expr in expr_out.all() if isinstance(expr, (einx.expr.stage2.NamedAxis, einx.expr.stage2.UnnamedAxis)) ] marked_out_axes = [expr for expr in out_axes if einx.expr.stage2.is_marked(expr)] if len(marked_out_axes) > 1: raise ValueError(f"Expected at most one marked axis, got {len(marked_out_axes)}") ndim = len(out_axes) - len(marked_out_axes) return [einx.expr.Equation(marked_expr_out, np.asarray([ndim]))] expr_in, expr_out = einx.expr.solve( [einx.expr.Equation(op[0][0])] + [einx.expr.Equation(op[1][0])] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, after_stage2=after_stage2, )[:2] return expr_in, expr_out @einx.traceback_util.filter @einx.jit(trace=lambda t, c: lambda description, backend=None, **kwargs: c(description, **kwargs)) def arange( description: str, *, backend: Union[einx.Backend, str], dtype: str = "int32", cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """n-dimensional ``arange`` operation. *This function might be removed in a future version.* Runs ``arange`` for every axis in ``input``, and stacks the results along the single marked axis in ``output``. Always uses ``start=0`` and ``step=1``. The `description` argument must meet one of the following formats: 1. ``input -> output`` Runs ``backend.arange`` for every axis in ``input``, and stacks the results along the marked axis in ``output``. The values are stacked in the order that the axes appear in ``input``. 2. ``output`` Implicitly determines the input expression by removing the marked axis from ``output``. Example: ``a b [2]`` resolves to ``a b -> a b [2]`` Args: description: Description string in Einstein notation (see above). backend: Backend to use for all operations. cse: Whether to apply common subexpression elimination to the expressions. Defaults to True. graph: Whether to return the graph representation of the operation instead of computing the result. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: The result of the n-dimensional arange operation if `graph=False`, otherwise the graph representation of the operation. Examples: Arange two-dimensional coordinates: >>> tensor = einx.arange("a b [2]", a=5, b=6, backend="numpy") >>> tensor.shape (5, 6, 2) >>> tensor[2, 3] array([2, 3], dtype=int32) Arange two-dimensional coordinates with inverted coordinates (`Cartesian ordering `_: First axis of tensor corresponds to second coordinate along stacked axis and vice versa.): >>> tensor = einx.arange("a b -> b a [2]", a=5, b=6, backend="numpy") >>> tensor.shape (6, 5, 2) >>> tensor[2, 3] array([3, 2], dtype=int32) Arange one-dimensional coordinates: >>> einx.arange("a", a=5, backend="numpy").shape (5,) """ expr_in, expr_out = parse(description, cse=cse, **parameters) tensor, expr_out = arange_stage3(expr_in, expr_out, backend=backend, dtype=dtype) return tensor arange.parse = parse python-einx-0.3.0/einx/op/dot.py000066400000000000000000000263501505216034200165210ustar00rootroot00000000000000import einx from . import util import numpy as np from typing import Union import numpy.typing as npt @einx.jit( trace=lambda t, c: lambda exprs_in, tensors_in, expr_out, backend=None: c( exprs_in, [t(x) for x in tensors_in], expr_out ) ) def dot_stage3(exprs_in, tensors_in, expr_out, backend=None): for root in list(exprs_in) + [expr_out]: for expr in root.all(): if isinstance(expr, einx.expr.stage3.Concatenation): raise ValueError("Concatenation not allowed") for expr in expr_out.all(): if isinstance(expr, einx.expr.stage3.Marker): raise ValueError("Brackets in the output expression not allowed") out_axis_names = {a.name for a in expr_out.all() if isinstance(a, einx.expr.stage3.Axis)} for expr_in in exprs_in: for axis in expr_in.all(): if isinstance(axis, einx.expr.stage3.Axis): is_reduced_axis = axis.name not in out_axis_names is_marked = einx.expr.stage3.is_marked(axis) if is_reduced_axis and not is_marked: raise ValueError(f"Reduced axis {axis} must be marked") elif not is_reduced_axis and is_marked: raise ValueError(f"Marked axis {axis} cannot appear in output expression") # Call tensor factories output_axis_names = {a.name for a in expr_out.all() if isinstance(a, einx.expr.stage3.Axis)} def get_fans(idx): other_input_axis_names = { a.name for i, expr_in in enumerate(exprs_in) for a in expr_in.all() if i != idx and isinstance(a, einx.expr.stage3.Axis) } in_axis = [] out_axis = [] batch_axis = [] for i, child in enumerate(exprs_in[idx]): any_in_other_input = any( isinstance(a, einx.expr.stage3.Axis) and a.name in other_input_axis_names for a in child.all() ) any_in_output = any( isinstance(a, einx.expr.stage3.Axis) and a.name in output_axis_names for a in child.all() ) if any_in_other_input and not any_in_output: in_axis.append(i) elif any_in_output and not any_in_other_input: out_axis.append(i) else: batch_axis.append(i) return { "in_axis": tuple(in_axis), "out_axis": tuple(out_axis), "batch_axis": tuple(batch_axis), } tensors_in = [ einx.tracer.call_factory( tensor, expr.shape, backend, **get_fans(i), name="weight", init="dot" ) for i, (tensor, expr) in enumerate(zip(tensors_in, exprs_in)) ] tensors_in = backend.all_to_tensor(tensors_in) # Flatten expressions exprs_in, tensors_in = util.flatten(exprs_in, tensors_in, backend=backend) expr_out_flat = util.flatten([expr_out])[0] assert all(einx.expr.stage3.is_flat(expr) for expr in exprs_in) assert einx.expr.stage3.is_flat(expr_out_flat) # Apply einsum einsum_variables = {} def get_einsum_variable(key): if key in einsum_variables: return einsum_variables[key] else: v = chr(ord("a") + len(einsum_variables)) if ord(v) > ord("z"): raise ValueError(f"Only supports up to {ord('z') - ord('a') + 1} unique input axes") einsum_variables[key] = v return v def to_einsum(axes): return "".join(get_einsum_variable(a.name) for a in axes) input_axis_names = {a.name for expr in exprs_in for a in einx.expr.stage3.get_axes(expr)} einsum_str = ( ",".join(to_einsum(einx.expr.stage3.get_axes(expr)) for expr in exprs_in) + "->" + to_einsum([ a for a in einx.expr.stage3.get_axes(expr_out_flat) if a.name in input_axis_names ]) ) tensor = backend.einsum(einsum_str, *tensors_in) expr = einx.expr.stage3.List([ a.__deepcopy__() for a in einx.expr.stage3.get_axes(expr_out_flat) if a.name in input_axis_names ]) # Transpose and broadcast missing output dimensions tensor = util.transpose_broadcast(expr, tensor, expr_out_flat, backend=backend)[0] # Unflatten output expression tensor = backend.reshape(tensor, expr_out.shape) return tensor, expr_out @einx.lru_cache def parse(description, *tensor_shapes, cse=True, **parameters): description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) op = einx.expr.stage1.parse_op(description) # Implicitly determine second input expression if len(op[0]) == 1 and len(tensor_shapes) == 2: for root in [op[0][0], op[1][0]]: for expr in root.all(): if ( isinstance(expr, einx.expr.stage1.UnnamedAxis) and expr.value != 1 and einx.expr.stage1.is_marked(expr) ): raise ValueError(f"Cannot mark unnamed non-trivial axes, but found {expr}") # Create second input expression from ordered list of marked axes names = set() expr_in2 = [] for root in [op[0][0], op[1][0]]: for expr in root.all(): if ( isinstance(expr, einx.expr.stage1.NamedAxis) and einx.expr.stage1.is_marked(expr) and expr.name not in names ): names.add(expr.name) # Copy axis expr2 = expr.__deepcopy__() # Apply the same ellipses parent = expr while parent.parent is not None: if isinstance(parent, einx.expr.stage1.Ellipsis): expr2 = einx.expr.stage1.Ellipsis(expr2, ellipsis_id=parent.ellipsis_id) parent = parent.parent # Append to second output expression expr_in2.append(expr2) expr_in2 = einx.expr.stage1.List(expr_in2) op = einx.expr.stage1.Op([ einx.expr.stage1.Args([ einx.expr.stage1.demark(op[0][0]), expr_in2, ]), einx.expr.stage1.Args([ einx.expr.stage1.demark(op[1][0]), ]), ]) if len(op[0]) != len(tensor_shapes): raise ValueError(f"Expected {len(op[0])} input tensor(s), got {len(tensor_shapes)}") if len(op[1]) != 1: raise ValueError(f"Expected 1 output expression, but got {len(op[1])}") exprs = einx.expr.solve( [ einx.expr.Equation(expr_in, tensor_shape) for expr_in, tensor_shape in zip(op[0], tensor_shapes) ] + [einx.expr.Equation(op[1][0])] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, cse_concat=False, )[: len(op[0]) + 1] exprs_in, expr_out = exprs[:-1], exprs[-1] # If no axes are marked, mark all axes that are not in the output if not any(einx.expr.stage3.is_marked(expr) for expr_in in exprs_in for expr in expr_in.all()): axes_names_out = { axis.name for axis in expr_out.all() if isinstance(axis, einx.expr.stage3.Axis) } exprs_in = [ einx.expr.stage3.mark( expr_in, lambda expr: isinstance(expr, einx.expr.stage3.Axis) and expr.name not in axes_names_out, ) for expr_in in exprs_in ] return exprs_in, expr_out @einx.traceback_util.filter @einx.jit( trace=lambda t, c: lambda description, *tensors, backend=None, **kwargs: c( description, *[t(x) for x in tensors], **kwargs ) ) def dot( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Computes a general dot-product of the input tensors. The following shorthand notation is supported: * When no brackets are found, brackets are placed implicitly around all axes that do not appear in the output. Example: ``a b, b c -> a c`` expands to ``a [b], [b] c -> a c`` * When given two input tensors, the expression of the second input is determined implicitly from the marked axes in the input and output expression. Example: ``a [b] -> a [c]`` expands to ``a b, b c -> a c`` Axes marked multiple times appear only once in the implicit second input expression. Example: ``[a b] -> [a c]`` expands to ``a b, a b c -> a c`` The function additionally passes the ``in_axes``, ``out_axes`` and ``batch_axes`` arguments to tensor factories that can be used to determine the fan-in and fan-out of a neural network layer and initialize weights accordingly (see e.g. `jax.nn.initializers.lecun_normal `_) Args: description: Description string for the operation in einx notation. tensors: Input tensors or tensor factories matching the description string. backend: Backend to use for all operations. If None, determines the backend from the input tensors. Defaults to None. cse: Whether to apply common subexpression elimination to the expressions. Defaults to True. graph: Whether to return the graph representation of the operation instead of computing the result. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: The result of the dot-product operation if ``graph=False``, otherwise the graph representation of the operation. Examples: Compute an inner product between two vectors: >>> a, b = np.random.uniform(size=(10,)), np.random.uniform(size=(10,)) >>> einx.dot("a, a ->", a, b).shape () Compute a matrix-vector product: >>> a, b = np.random.uniform(size=(10, 10)), np.random.uniform(size=(10,)) >>> einx.dot("a b, b -> a", a, b).shape (10,) >>> einx.dot("a [b] -> a", a, b).shape (10,) >>> einx.dot("a [b->]", a, b).shape (10,) Compute a vector-matrix product: >>> a, b = np.random.uniform(size=(10,)), np.random.uniform(size=(10, 10)) >>> einx.dot("a, a b -> b", a, b).shape (10,) >>> einx.dot("[a] -> [b]", a, b).shape (10,) >>> einx.dot("[a->b]", a, b).shape (10,) Multiply a tensor with a weight matrix: >>> x, w = ( ... np.random.uniform(size=(4, 16, 16, 64)), ... np.random.uniform( ... size=( ... 64, ... 32, ... ) ... ), ... ) >>> einx.dot("b... [c1->c2]", x, w).shape (4, 16, 16, 32) """ exprs_in, expr_out = parse( description, *[einx.tracer.get_shape(tensor) for tensor in tensors], cse=cse, **parameters ) tensor, _expr = dot_stage3(exprs_in, tensors, expr_out, backend=backend) return tensor dot.parse = parse python-einx-0.3.0/einx/op/elementwise.py000066400000000000000000000373341505216034200202600ustar00rootroot00000000000000import einx from . import util from functools import partial import numpy as np from typing import Callable, Union import numpy.typing as npt @einx.jit( trace=lambda t, c: lambda exprs_in, tensors_in, expr_out, op, backend=None: c( exprs_in, [t(x) for x in tensors_in], expr_out, op ) ) def elementwise_stage3(exprs_in, tensors_in, expr_out, op, backend=None): for root in list(exprs_in) + [expr_out]: for expr in root.all(): if isinstance(expr, einx.expr.stage3.Concatenation): raise ValueError("Concatenation not allowed") assert not any(einx.expr.stage3.is_marked(expr) for root in exprs_in for expr in root.all()) assert not any(einx.expr.stage3.is_marked(expr) for expr in expr_out.all()) # Call tensor factories def get_name(s): if s == "add": return "bias" elif s == "multiply": return "scale" else: return s tensors_in = [ einx.tracer.call_factory( tensor, expr.shape, backend, name=get_name(util._op_to_str(op)), init=util._op_to_str(op), ) for tensor, expr in zip(tensors_in, exprs_in) ] tensors_in = backend.all_to_tensor(tensors_in) tensors_out, exprs_out = einx.vmap_with_axis_stage3( exprs_in, tensors_in, [expr_out], op, backend=backend ) assert len(tensors_out) == 1 and len(exprs_out) == 1 return tensors_out[0], exprs_out[0] @einx.lru_cache def parse(description, *tensor_shapes, cse=True, **parameters): description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) op = einx.expr.stage1.parse_op(description) # Add second input expression from marked subexpressions if len(op[0]) == 1 and len(tensor_shapes) == 2: op = einx.expr.stage1.Op( [ einx.expr.stage1.Args([ einx.expr.stage1.demark(op[0][0]), einx.expr.stage1.get_marked(op[0][0]), ]), ] + list(op[1:]) ) # Implicitly determine output expression if len(op) == 1: # Use one of the input expression if contains the axis names of # all others and if this choice is unique input_args = op[0] in_axis_names = [ {expr.name for expr in root.all() if isinstance(expr, einx.expr.stage1.NamedAxis)} for root in input_args ] valid_parents = set() for i, parent in enumerate(in_axis_names): for j, child in enumerate(in_axis_names): if i != j and not child.issubset(parent): break else: # Found valid parent valid_parents.add(input_args[i]) if len(valid_parents) != 1: raise ValueError(f"Could not implicitly determine output expression for op '{op}'") expr_out = next(iter(valid_parents)).__deepcopy__() op = einx.expr.stage1.Op([op[0], einx.expr.stage1.Args([expr_out])]) if len(op[0]) != len(tensor_shapes): raise ValueError(f"Expected {len(op[0])} input tensors, but got {len(tensor_shapes)}") if len(op[1]) != 1: raise ValueError(f"Expected 1 output expression, but got {len(op[1])}") exprs = einx.expr.solve( [ einx.expr.Equation(expr_in, tensor_shape) for expr_in, tensor_shape in zip(op[0], tensor_shapes) ] + [ einx.expr.Equation( op[1][0], ) ] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, cse_concat=False, )[: len(op[0]) + 1] exprs_in, expr_out = exprs[:-1], exprs[-1] return exprs_in, expr_out @einx.traceback_util.filter @einx.jit( trace=lambda t, c: lambda description, *tensors, backend=None, **kwargs: c( description, *[t(x) for x in tensors], **kwargs ) ) def elementwise( description: str, *tensors: einx.Tensor, op: Callable, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Applies an element-by-element operation over the given tensors. It supports the following shorthand notation: * The output is determined implicitly if one of the input expressions contains the named axes of all other inputs and if this choice is unique. | Example: ``a b, a`` expands to ``a b, a -> a b``. | Example: ``b a, b, a`` expands to ``b a, b, a -> b a``. | Example: ``a b, b a`` raises an exception. | Example: ``a b, a b`` expands to ``a b, a b -> a b``. * Bracket notation can be used when passing two input tensors to indicate that the second input is a subexpression of the first. Example: ``a [b]`` expands to ``a b, b``. Args: description: Description string for the operation in einx notation. tensors: Input tensors or tensor factories matching the description string. op: Backend elemebt-by-element operation. Must accept the same number of tensors as specified in the description string and comply with numpy broadcasting rules. If ``op`` is a string, retrieves the attribute of ``backend`` with the same name. backend: Backend to use for all operations. If None, determines the backend from the input tensors. Defaults to None. cse: Whether to apply common subexpression elimination to the expressions. Defaults to True. graph: Whether to return the graph representation of the operation instead of computing the result. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: The result of the elementwise operation if ``graph=False``, otherwise the graph representation of the operation. Examples: Compute a sum of two vectors: >>> a, b = np.random.uniform(size=(10,)), np.random.uniform(size=(10,)) >>> einx.elementwise("a, a -> a", a, b, op=np.add).shape (10,) Add a vector on all columns of a matrix: >>> a, b = np.random.uniform(size=(10, 10)), np.random.uniform(size=(10,)) >>> einx.add("a b, a -> a b", a, b).shape (10, 10,) Subtract a vector from all rows of a matrix: >>> a, b = np.random.uniform(size=(10, 10)), np.random.uniform(size=(10,)) >>> einx.subtract("a b, b -> a b", a, b).shape (10, 10,) Select from one of two choices according to a boolean mask: >>> x, mask = ( ... np.random.uniform(size=(10, 10)), ... np.random.uniform(size=(10,)), ... ) >>> einx.where("a, a b, -> a b", mask, x, 0).shape (10, 10,) Add a bias onto all channels of a tensor: >>> x, w = ( ... np.random.uniform(size=(4, 16, 16, 64)), ... np.random.uniform(size=(64,)), ... ) >>> einx.add("b... [c]", x, w).shape (4, 16, 16, 64) """ exprs_in, expr_out = parse( description, *[einx.tracer.get_shape(tensor) for tensor in tensors], cse=cse, **parameters ) tensor, expr_out = elementwise_stage3(exprs_in, tensors, expr_out, op=op, backend=backend) return tensor elementwise.parse = parse @einx.traceback_util.filter def add( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="add"``""" return elementwise(description, *tensors, op="add", backend=backend, cse=cse, **parameters) def add_stage3(*args, **kwargs): return elementwise_stage3(*args, op="add", **kwargs) @einx.traceback_util.filter def subtract( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="subtract"``""" return elementwise(description, *tensors, op="subtract", backend=backend, cse=cse, **parameters) def subtract_stage3(*args, **kwargs): return elementwise_stage3(*args, op="subtract", **kwargs) @einx.traceback_util.filter def multiply( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="multiply"``""" return elementwise(description, *tensors, op="multiply", backend=backend, cse=cse, **parameters) def multiply_stage3(*args, **kwargs): return elementwise_stage3(*args, op="multiply", **kwargs) @einx.traceback_util.filter def true_divide( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="true_divide"``""" return elementwise( description, *tensors, op="true_divide", backend=backend, cse=cse, **parameters ) def true_divide_stage3(*args, **kwargs): return elementwise_stage3(*args, op="true_divide", **kwargs) @einx.traceback_util.filter def floor_divide( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="floor_divide"``""" return elementwise( description, *tensors, op="floor_divide", backend=backend, cse=cse, **parameters ) def floor_divide_stage3(*args, **kwargs): return elementwise_stage3(*args, op="floor_divide", **kwargs) @einx.traceback_util.filter def divide( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="divide"``""" return elementwise(description, *tensors, op="divide", backend=backend, cse=cse, **parameters) def divide_stage3(*args, **kwargs): return elementwise_stage3(*args, op="divide", **kwargs) @einx.traceback_util.filter def logical_and( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="logical_and"``""" return elementwise( description, *tensors, op="logical_and", backend=backend, cse=cse, **parameters ) def logical_and_stage3(*args, **kwargs): return elementwise_stage3(*args, op="logical_and", **kwargs) @einx.traceback_util.filter def logical_or( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="logical_or"``""" return elementwise( description, *tensors, op="logical_or", backend=backend, cse=cse, **parameters ) def logical_or_stage3(*args, **kwargs): return elementwise_stage3(*args, op="logical_or", **kwargs) @einx.traceback_util.filter def where( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="where"``""" return elementwise(description, *tensors, op="where", backend=backend, cse=cse, **parameters) def where_stage3(*args, **kwargs): return elementwise_stage3(*args, op="where", **kwargs) @einx.traceback_util.filter def less( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="less"``""" return elementwise(description, *tensors, op="less", backend=backend, cse=cse, **parameters) def less_stage3(*args, **kwargs): return elementwise_stage3(*args, op="less", **kwargs) @einx.traceback_util.filter def less_equal( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="less_equal"``""" return elementwise( description, *tensors, op="less_equal", backend=backend, cse=cse, **parameters ) def less_equal_stage3(*args, **kwargs): return elementwise_stage3(*args, op="less_equal", **kwargs) @einx.traceback_util.filter def greater( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="greater"``""" return elementwise(description, *tensors, op="greater", backend=backend, cse=cse, **parameters) def greater_stage3(*args, **kwargs): return elementwise_stage3(*args, op="greater", **kwargs) @einx.traceback_util.filter def greater_equal( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="greater_equal"``""" return elementwise( description, *tensors, op="greater_equal", backend=backend, cse=cse, **parameters ) def greater_equal_stage3(*args, **kwargs): return elementwise_stage3(*args, op="greater_equal", **kwargs) @einx.traceback_util.filter def equal( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="equal"``""" return elementwise(description, *tensors, op="equal", backend=backend, cse=cse, **parameters) def equal_stage3(*args, **kwargs): return elementwise_stage3(*args, op="equal", **kwargs) @einx.traceback_util.filter def not_equal( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="not_equal"``""" return elementwise( description, *tensors, op="not_equal", backend=backend, cse=cse, **parameters ) def not_equal_stage3(*args, **kwargs): return elementwise_stage3(*args, op="not_equal", **kwargs) @einx.traceback_util.filter def maximum( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="maximum"``""" return elementwise(description, *tensors, op="maximum", backend=backend, cse=cse, **parameters) def maximum_stage3(*args, **kwargs): return elementwise_stage3(*args, op="maximum", **kwargs) @einx.traceback_util.filter def minimum( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.elementwise` with ``op="minimum"``""" return elementwise(description, *tensors, op="minimum", backend=backend, cse=cse, **parameters) def minimum_stage3(*args, **kwargs): return elementwise_stage3(*args, op="minimum", **kwargs) python-einx-0.3.0/einx/op/index.py000066400000000000000000000462721505216034200170470ustar00rootroot00000000000000import einx from functools import partial from . import util import numpy as np from typing import Callable, Union import numpy.typing as npt def _index(*tensors, update, layout, expr_update_inner, expr_common, op=None, backend=None): assert backend is not None if update: tensor_in = tensors[0] tensor_coordinates = tensors[1:-1] tensor_update = tensors[-1] else: tensor_in = tensors[0] tensor_coordinates = tensors[1:] # Split multi-dim coordinate tensors into single-dim coordinate tensors layout = [ (tensor, coordinate_axis_name, expr_coord, ndim) for tensor, (coordinate_axis_name, expr_coord, ndim) in zip(tensor_coordinates, layout) ] layout2 = [] for tensor_coord, coordinate_axis_name, expr_coord, ndim in layout: axis_names = [ axis.name for axis in expr_coord.all() if isinstance(axis, einx.expr.stage3.Axis) ] axis = ( axis_names.index(coordinate_axis_name) if coordinate_axis_name in axis_names else None ) if axis is None: assert ndim == 1 layout2.append((tensor_coord, coordinate_axis_name, expr_coord)) else: axes = [axis for axis in expr_coord.all() if isinstance(axis, einx.expr.stage3.Axis)] del axes[axis] expr_coord = einx.expr.stage3.List.maybe(axes) for i in range(ndim): layout2.append(( tensor_coord[(slice(None),) * axis + (i,)], coordinate_axis_name, expr_coord, )) assert len(layout2) == tensor_in.ndim layout = layout2 # Transpose coordinate and update tensors to match common coordinate expression def transpose(tensor, expr): return util.transpose_broadcast( expr, tensor, expr_common, broadcast=False, backend=backend )[0] tensor_coordinates = tuple( transpose(tensor, expr) for tensor, coordinate_axis_name, expr in layout ) if update: tensor_update = transpose(tensor_update, expr_update_inner) return ( op(tensor_in, tensor_coordinates) if not update else op(tensor_in, tensor_coordinates, tensor_update) ) @einx.jit( trace=lambda t, c: lambda exprs_in, tensors_in, expr_out, **kwargs: c( exprs_in, [t(x) for x in tensors_in], expr_out, **kwargs ) ) def index_stage3(exprs_in, tensors_in, expr_out, *, update, op=None, backend=None): if len(exprs_in) != len(tensors_in): raise ValueError(f"Expected {len(exprs_in)} input tensors, got {len(tensors_in)}") for expr in exprs_in[0]: if isinstance(expr, einx.expr.stage3.Axis) and expr.is_unnamed and expr.value == 1: raise ValueError("First expression cannot contain unnamed axes with value 1") for root in list(exprs_in) + [expr_out]: for expr in root.all(): if isinstance(expr, einx.expr.stage3.Concatenation): raise ValueError("Concatenation not allowed") if not update: # Ensure that no brackets exist in output expression for expr in expr_out.all(): if einx.expr.stage3.is_marked(expr): raise ValueError("Brackets in the output expression are not allowed") if update: axis_names = { axis.name for root in exprs_in[:-1] for axis in root.all() if isinstance(axis, einx.expr.stage3.Axis) } for axis in exprs_in[-1].all(): if isinstance(axis, einx.expr.stage3.Axis) and axis.name not in axis_names: raise ValueError( f"Update expression cannot contain axes that are not in the " f"coordinate or tensor expressions: {axis.name}" ) # Call tensor factories def get_name(s): if s == "get_at": return "embedding" else: return s tensors_in = [ einx.tracer.call_factory( tensor, expr.shape, backend, name=get_name(util._op_to_str(op)), init=util._op_to_str(op), ) for tensor, expr in zip(tensors_in, exprs_in) ] tensors_in = backend.all_to_tensor(tensors_in) expr_tensor = exprs_in[0] exprs_coordinates = exprs_in[1:-1] if update else exprs_in[1:] if update: expr_update = exprs_in[-1] layout = [] total_ndim = 0 for expr_coord in exprs_coordinates: marked_coordinate_axes = [ expr for expr in expr_coord.all() if isinstance(expr, einx.expr.stage3.Axis) and einx.expr.stage3.is_marked(expr) ] if len(marked_coordinate_axes) > 1: raise ValueError( f"Expected at most one coordinate axis in coordinate expression, " f"got {len(marked_coordinate_axes)} in '{expr_coord}'" ) ndim = marked_coordinate_axes[0].value if len(marked_coordinate_axes) == 1 else 1 coordinate_axis_name = ( marked_coordinate_axes[0].name if len(marked_coordinate_axes) == 1 and (not marked_coordinate_axes[0].is_unnamed or marked_coordinate_axes[0].value != 1) else None ) layout.append((coordinate_axis_name, ndim)) total_ndim += ndim marked_tensor_axis_names = { expr.name for expr in expr_tensor.all() if isinstance(expr, einx.expr.stage3.Axis) and einx.expr.stage3.is_marked(expr) } if len(marked_tensor_axis_names) != total_ndim: raise ValueError( f"Expected {total_ndim} marked axes in tensor, got {len(marked_tensor_axis_names)}" ) if update: marked_update_axis_names = { expr.name for expr in expr_update.all() if isinstance(expr, einx.expr.stage3.Axis) and einx.expr.stage3.is_marked(expr) } if len(marked_update_axis_names) != 0: raise ValueError("Update expression cannot contain marked axes") # Add markers around axes in coordinates and update that are not in tensor tensor_axis_names = { expr.name for expr in expr_tensor.all() if isinstance(expr, einx.expr.stage3.Axis) } new_marked_axis_names = set() def replace(expr): if ( isinstance(expr, einx.expr.stage3.Axis) and expr.name not in tensor_axis_names and not einx.expr.stage3.is_marked(expr) ): new_marked_axis_names.add(expr.name) return einx.expr.stage3.Marker(expr.__deepcopy__()) exprs_coordinates = [einx.expr.stage3.replace(expr, replace) for expr in exprs_coordinates] expr_update = einx.expr.stage3.replace(expr_update, replace) if update else None # Add markers around those same axes in output and update def replace(expr): if ( isinstance(expr, einx.expr.stage3.Axis) and expr.name in new_marked_axis_names and not einx.expr.stage3.is_marked(expr) ): return einx.expr.stage3.Marker(expr.__deepcopy__()) expr_out = einx.expr.stage3.replace(expr_out, replace) if update: expr_update = einx.expr.stage3.replace(expr_update, replace) # If updating: Add markers around axes in output that are also marked # in tensor (and are not broadcasted axes) if update: def replace(expr): if ( isinstance(expr, einx.expr.stage3.Axis) and expr.name in marked_tensor_axis_names and not einx.expr.stage3.is_marked(expr) ): return einx.expr.stage3.Marker(expr.__deepcopy__()) expr_out = einx.expr.stage3.replace(expr_out, replace) def to_inner(expr): expr = einx.expr.stage3.get_marked(expr) return util.flatten([expr])[0] exprs_coordinates_inner = [to_inner(expr) for expr in exprs_coordinates] expr_update_inner = to_inner(expr_update) if update else None # Find common expression for coordinates and update in vmapped function layout = [ (coordinate_axis_name, expr_coord, ndim) for expr_coord, (coordinate_axis_name, ndim) in zip(exprs_coordinates_inner, layout) ] if update: layout2 = layout + [(None, expr_update_inner, None)] longest = sorted(layout2, key=lambda x: len(x[1].shape))[-1] all_axes = [ axis for axis in longest[1].all() if isinstance(axis, einx.expr.stage3.Axis) and not axis.name == longest[0] ] axes_names = {axis.name for axis in all_axes} for coordinate_axis_name, expr_coord, _ in layout2: for axis in expr_coord.all(): if ( isinstance(axis, einx.expr.stage3.Axis) and axis.name != coordinate_axis_name and axis.name not in axes_names ): axes_names.add(axis.name) all_axes.append(axis) expr_common = einx.expr.stage3.List.maybe(all_axes) else: expr_common = einx.expr.stage3.get_marked(util.flatten([expr_out])[0]) # Construct vmapped indexing function if isinstance(op, str): op = getattr(backend, op) op = partial( _index, op=op, update=update, layout=layout, expr_common=expr_common, expr_update_inner=expr_update_inner, backend=backend, ) op = einx.trace(op) exprs_in = [expr_tensor] + exprs_coordinates + ([expr_update] if update else []) tensors_out, exprs_out = einx.vmap_stage3( exprs_in, tensors_in, [expr_out], op=op, flat=True, backend=backend ) assert len(tensors_out) == 1 and len(exprs_out) == 1 return tensors_out[0], exprs_out[0] @einx.lru_cache def parse(description, *tensor_shapes, update, cse=True, **parameters): description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) op = einx.expr.stage1.parse_op(description) # Implicitiy determine output expression if len(op) == 1: if update: op = einx.expr.stage1.Op([ op[0], einx.expr.stage1.Args([op[0][0]]), ]) else: raise ValueError("Operation string must contain an output expression") if len(op[0]) != len(tensor_shapes): raise ValueError(f"Expected {len(op[0])} input tensors, but got {len(tensor_shapes)}") if len(op[1]) != 1: raise ValueError(f"Expected 1 output expression, but got {len(op[1])}") def after_stage2(exprs1, exprs2): for expr in exprs1[0].all(): if ( isinstance(expr, einx.expr.stage2.UnnamedAxis) and expr.value == 1 and einx.expr.stage2.is_marked(expr) ): raise ValueError("First expression cannot contain unnamed axes with value 1") tensor_marked_axes = [ expr for expr in exprs1[0].all() if isinstance(expr, (einx.expr.stage2.NamedAxis, einx.expr.stage2.UnnamedAxis)) and einx.expr.stage2.is_marked(expr) ] ndim = len(tensor_marked_axes) concat_this = [] coord_exprs = exprs1[1 : len(tensor_shapes)] if update: coord_exprs = coord_exprs[:-1] for expr in coord_exprs: marked_coordinate_axes = [ expr for expr in exprs1[1].all() if isinstance(expr, (einx.expr.stage2.NamedAxis, einx.expr.stage2.UnnamedAxis)) and einx.expr.stage2.is_marked(expr) ] if len(marked_coordinate_axes) > 1: raise ValueError( f"Expected at most one marked axis per coordinate tensor" f", got {len(marked_coordinate_axes)}" ) elif len(marked_coordinate_axes) == 1: if isinstance(marked_coordinate_axes[0], einx.expr.stage2.NamedAxis): concat_this.append(einx.expr.stage1.NamedAxis(marked_coordinate_axes[0].name)) else: concat_this.append( einx.expr.stage1.UnnamedAxis(marked_coordinate_axes[0].value) ) else: concat_this.append(einx.expr.stage1.UnnamedAxis(1)) return [ einx.expr.Equation( einx.expr.stage1.Concatenation.maybe(concat_this), np.asarray([ndim]) ) ] exprs = einx.expr.solve( [ einx.expr.Equation(expr_in, tensor_shape) for expr_in, tensor_shape in zip(op[0], tensor_shapes) ] + [einx.expr.Equation(op[1][0])] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, after_stage2=after_stage2, )[: len(op[0]) + 1] exprs_in, expr_out = exprs[: len(op[0])], exprs[len(op[0])] if update: # Check that all axes in first input expression also appear in output expression axes_in = { axis.name for axis in exprs_in[0].all() if isinstance(axis, einx.expr.stage3.Axis) } axes_out = {axis.name for axis in expr_out.all() if isinstance(axis, einx.expr.stage3.Axis)} if not axes_in.issubset(axes_out): raise ValueError( f"Output expression does not contain all axes from first input expression: " f"{axes_in - axes_out}" ) return exprs_in, expr_out def _has_zero_shape(tensor): shape = einx.tracer.get_shape(tensor) return shape is not None and any(s == 0 for s in shape) @einx.traceback_util.filter @einx.jit( trace=lambda t, c: lambda description, *tensors, backend=None, **kwargs: c( description, *[t(x) for x in tensors], **kwargs ) ) def index( description: str, *tensors: einx.Tensor, op: Callable, update: bool, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Updates and/ or returns values from a tensor at the given coordinates. * If ``update`` is True: The first tensor receives updates, the last tensor contains the updates, and all other tensors represent the coordinates. If the output expression is not given, it is assumed to be equal to the first input expression. * If ``update`` is False, values are retrieved from the first tensor and the remaining tensors contain the coordinates. Using multiple coordinate expressions will yield the same output as concatenating the coordinate expressions along the coordinate axis first. Args: description: Description string for the operation in einx notation. *tensors: Tensors that the operation will be applied to. op: The update/gather function. If ``op`` is a string, retrieves the attribute of ``backend`` with the same name. update: Whether to update the tensor or return values from the tensor. backend: Backend to use for all operations. If None, determines the backend from the input tensors. Defaults to None. cse: Whether to apply common subexpression elimination to the expressions. Defaults to True. graph: Whether to return the graph representation of the operation instead of computing the result. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: The result of the update/ gather operation if ``graph=False``, otherwise the graph representation of the operation. Examples: Get values from a batch of images (different indices per image): >>> tensor = np.random.uniform(size=(4, 128, 128, 3)) >>> coordinates = np.ones((4, 100, 2)) >>> einx.get_at("b [h w] c, b p [2] -> b p c", tensor, coordinates).shape (4, 100, 3) >>> tensor = np.random.uniform(size=(4, 128, 128, 3)) >>> coordinates_x = np.ones((4, 100), "int32") >>> coordinates_y = np.ones((4, 100), "int32") >>> einx.get_at( ... "b [h w] c, b p, b p -> b p c", ... tensor, ... coordinates_x, ... coordinates_y, ... ).shape (4, 100, 3) Set values in a batch of images (same indices per image): >>> tensor = np.random.uniform(size=(4, 128, 128, 3)) >>> coordinates = np.ones((100, 2), "int32") >>> updates = np.random.uniform(size=(100, 3)) >>> einx.set_at( ... "b [h w] c, p [2], p c -> b [h w] c", tensor, coordinates, updates ... ).shape (4, 128, 128, 3) >>> tensor = np.random.uniform(size=(4, 128, 128, 3)) >>> coordinates_x = np.ones((100,), "int32") >>> coordinates_y = np.ones((100,), "int32") >>> updates = np.random.uniform(size=(100, 3)) >>> einx.set_at( ... "b [h w] c, p, p, p c -> b [h w] c", ... tensor, ... coordinates_x, ... coordinates_y, ... updates, ... ).shape (4, 128, 128, 3) """ if update and any(_has_zero_shape(tensor) for tensor in tensors[1:]): # Skip update if no coordinates are given return tensors[0] exprs_in, expr_out = parse( description, *[einx.tracer.get_shape(tensor) for tensor in tensors], update=update, cse=cse, **parameters, ) tensor, expr_out = index_stage3( exprs_in, tensors, expr_out, op=op, update=update, backend=backend ) return tensor index.parse = parse @einx.traceback_util.filter def get_at( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.index` with ``op="get_at"`` and ``update=False``""" return index( description, *tensors, op="get_at", update=False, backend=backend, cse=cse, **parameters ) @einx.traceback_util.filter def set_at( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.index` with ``op="set_at"`` and ``update=True``""" return index( description, *tensors, op="set_at", update=True, backend=backend, cse=cse, **parameters ) @einx.traceback_util.filter def add_at( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.index` with ``op="add_at"`` and ``update=True``""" return index( description, *tensors, op="add_at", update=True, backend=backend, cse=cse, **parameters ) @einx.traceback_util.filter def subtract_at( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.index` with ``op="subtract_at"`` and ``update=True``""" return index( description, *tensors, op="subtract_at", update=True, backend=backend, cse=cse, **parameters ) python-einx-0.3.0/einx/op/rearrange.py000066400000000000000000000131231505216034200176730ustar00rootroot00000000000000import einx from . import util import numpy as np from typing import Union, Tuple import numpy.typing as npt @einx.jit( trace=lambda t, c: lambda exprs_in, tensors_in, exprs_out, backend=None: c( exprs_in, [t(x) for x in tensors_in], exprs_out ) ) def rearrange_stage3(exprs_in, tensors_in, exprs_out, backend=None): if len(exprs_in) != len(tensors_in): raise ValueError(f"Expected {len(exprs_in)} input tensor(s), got {len(tensors_in)}") if any( isinstance(expr, einx.expr.stage3.Marker) for root in list(exprs_in) + list(exprs_out) for expr in root.all() ): raise ValueError(f"Marker '{expr}' is not allowed") # Call tensor factories tensors_in = [ einx.tracer.call_factory(tensor, expr.shape, backend, name="embedding", init="rearrange") for tensor, expr in zip(tensors_in, exprs_in) ] tensors_in = backend.all_to_tensor(tensors_in, convert_scalars=True) # Flatten expressions exprs_in, tensors_in = util.flatten(exprs_in, tensors_in, backend=backend) exprs_out_flat = util.flatten(exprs_out) assert all(einx.expr.stage3.is_flat(expr) for expr in exprs_in) assert all(einx.expr.stage3.is_flat(expr) for expr in exprs_out_flat) if len(exprs_in) != len(exprs_out_flat): raise ValueError( f"Got different number of input ({len(exprs_in)}) and output expressions " f"({len(exprs_out_flat)}) (after flattening)" ) # TODO: # Order inputs to align with output expressions indices = util.assignment(exprs_in, exprs_out_flat) exprs_in = [exprs_in[i] for i in indices] tensors_in = [tensors_in[i] for i in indices] # Transpose and broadcast missing output dimensions tensors = [ util.transpose_broadcast(expr_in, tensor, expr_out, backend=backend)[0] for expr_in, tensor, expr_out in zip(exprs_in, tensors_in, exprs_out_flat) ] # Unflatten output expressions tensors = util.unflatten(exprs_out_flat, tensors, exprs_out, backend=backend) return tensors, exprs_out @einx.lru_cache def parse(description, *tensor_shapes, cse=True, **parameters): description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) op = einx.expr.stage1.parse_op(description) if len(op[0]) != len(tensor_shapes): raise ValueError(f"Expected {len(op[0])} input tensors, but got {len(tensor_shapes)}") exprs = einx.expr.solve( [ einx.expr.Equation(expr_in, tensor_shape) for expr_in, tensor_shape in zip(op[0], tensor_shapes) ] + [einx.expr.Equation(expr_out) for expr_out in op[1]] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, )[: len(op[0]) + len(op[1])] exprs_in, exprs_out = exprs[: len(op[0])], exprs[len(op[0]) :] return exprs_in, exprs_out @einx.traceback_util.filter @einx.jit( trace=lambda t, c: lambda description, *tensors, backend=None, **kwargs: c( description, *[t(x) for x in tensors], **kwargs ) ) def rearrange( description: str, *tensors: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> Union[einx.Tensor, Tuple[einx.Tensor, ...]]: """Rearranges the input tensors to match the output expressions. Args: description: Description string for the operation in einx notation. Must not contain brackets. tensors: Input tensors or tensor factories matching the description string. backend: Backend to use for all operations. If None, determines the backend from the input tensors. Defaults to None. cse: Whether to apply common subexpression elimination to the expressions. Defaults to True. graph: Whether to return the graph representation of the operation instead of computing the result. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: The result of the rearrange operation if ``graph=False``, otherwise the graph representation of the operation. Examples: Transpose the row and column axes of a batch of images: >>> x = np.random.uniform(size=(4, 64, 48, 3)) >>> einx.rearrange("b h w c -> b w h c", x).shape (4, 48, 64, 3,) Insert new axis (repeats elements along the new axis): >>> x = np.random.uniform(size=(10, 10)) >>> einx.rearrange("a b -> a c b", x, c=100).shape (10, 100, 10,) Concatenate two tensors along the first axis: >>> a, b = ( ... np.random.uniform(size=(10, 10)), ... np.random.uniform(size=(20, 10)), ... ) >>> einx.rearrange("a b, c b -> (a + c) b", a, b).shape (30, 10,) Split a tensor: >>> x = np.random.uniform(size=(10, 2)) >>> a, b = einx.rearrange("a (1 + 1) -> a, a", x) >>> a.shape, b.shape ((10,), (10,)) Swap the first and last third of a tensor along a given axis: >>> x = np.arange(6) >>> einx.rearrange("(b + c + d) -> (d + c + b)", x, b=2, c=2) array([4, 5, 2, 3, 0, 1]) """ exprs_in, exprs_out = parse( description, *[einx.tracer.get_shape(tensor) for tensor in tensors], cse=cse, **parameters ) tensors, exprs_out = rearrange_stage3(exprs_in, tensors, exprs_out, backend=backend) return tensors[0] if len(exprs_out) == 1 else tensors rearrange.parse = parse python-einx-0.3.0/einx/op/reduce.py000066400000000000000000000276671505216034200172160ustar00rootroot00000000000000import einx from . import util import numpy as np from functools import partial from typing import Callable, Union import numpy.typing as npt _any = any # Is overwritten below @einx.jit( trace=lambda t, c: lambda expr_in, tensor_in, expr_out, op, backend=None: c( expr_in, t(tensor_in), expr_out, op=op ) ) def reduce_stage3(expr_in, tensor_in, expr_out, op, backend=None): for root in [expr_in, expr_out]: for expr in root.all(): if isinstance(expr, einx.expr.stage3.Concatenation): raise ValueError("Concatenation not allowed") tensors_out, exprs_out = einx.vmap_with_axis_stage3( [expr_in], [tensor_in], [expr_out], op, backend=backend ) return tensors_out[0], exprs_out[0] @einx.lru_cache def parse(description, tensor_shape, keepdims=None, cse=True, **parameters): description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) op = einx.expr.stage1.parse_op(description) if len(op) == 1: expr_in = einx.expr.solve( [einx.expr.Equation(op[0][0], tensor_shape)] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, cse_in_markers=True, )[0] if not _any(isinstance(expr, einx.expr.stage3.Marker) for expr in expr_in.all()): raise ValueError("No axes are marked for reduction") # Determine output expressions by removing markers from input expressions def replace(expr): if isinstance(expr, einx.expr.stage3.Marker): if keepdims: return [einx.expr.stage3.Axis(None, 1)] else: return [] expr_out = einx.expr.stage3.replace(expr_in, replace) else: if keepdims is not None: raise ValueError("keepdims cannot be given when using '->'") if len(op[0]) != 1: raise ValueError(f"Expected 1 input expression, but got {len(op[0])}") if len(op[1]) != 1: raise ValueError(f"Expected 1 output expression, but got {len(op[1])}") expr_in, expr_out = einx.expr.solve( [einx.expr.Equation(op[0][0], tensor_shape)] + [einx.expr.Equation(op[1][0])] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, cse_in_markers=True, )[:2] # If no axes are marked for reduction in expr_in, mark all axes that # don't appear in expr_out if not _any(einx.expr.stage3.is_marked(expr) for expr in expr_in.all()): axes_names_out = { axis.name for axis in expr_out.all() if isinstance(axis, einx.expr.stage3.Axis) } expr_in = einx.expr.stage3.mark( expr_in, lambda expr: isinstance(expr, einx.expr.stage3.Axis) and expr.name not in axes_names_out, ) return expr_in, expr_out @einx.traceback_util.filter @einx.jit( trace=lambda t, c: lambda description, tensor, backend=None, **kwargs: c( description, t(tensor), **kwargs ) ) def reduce( description: str, tensor: einx.Tensor, op: Union[Callable, str], keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Applies a reduction operation on the given tensors. The operation reduces all marked axes in the input to a single scalar. It supports the following shorthand notation: * When no brackets are found, brackets are placed implicitly around all axes that do not appear in the output. Example: ``a b c -> a c`` resolves to ``a [b] c -> a c``. * When no output is given, it is determined implicitly by removing marked subexpressions from the input. Example: ``a [b] c`` resolves to ``a [b] c -> a c``. Args: description: Description string for the operation in einx notation. tensor: Input tensor or tensor factory matching the description string. op: Backend reduction operation. Is called with ``op(tensor, axis=...)``. If ``op`` is a string, retrieves the attribute of ``backend`` with the same name. keepdims: Whether to replace marked expressions with 1s instead of dropping them. Must be None when ``description`` already contains an output expression. Defaults to None. backend: Backend to use for all operations. If None, determines the backend from the input tensors. Defaults to None. cse: Whether to apply common subexpression elimination to the expressions. Defaults to True. graph: Whether to return the graph representation of the operation instead of computing the result. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: The result of the reduction operation if ``graph=False``, otherwise the graph representation of the operation. Examples: Compute mean along rows of a matrix: >>> x = np.random.uniform(size=(16, 20)) >>> einx.mean("a b -> b", x).shape (20,) >>> einx.mean("[a] b -> b", x).shape (20,) >>> einx.mean("[a] b", x).shape (20,) Compute sum along rows of a matrix and broadcast to the original shape: >>> x = np.random.uniform(size=(16, 20)) >>> einx.sum("[a] b -> a b", x).shape (16, 20,) Sum pooling with kernel size 2: >>> x = np.random.uniform(size=(4, 16, 16, 3)) >>> einx.sum("b (s [s2])... c", x, s2=2).shape (4, 8, 8, 3) Compute variance per channel over an image: >>> x = np.random.uniform(size=(256, 256, 3)) >>> einx.var("[...] c", x).shape (3,) """ expr_in, expr_out = parse( description, einx.tracer.get_shape(tensor), keepdims=keepdims, cse=cse, **parameters ) tensor, expr_out = reduce_stage3(expr_in, tensor, expr_out, op=op, backend=backend) return tensor reduce.parse = parse @einx.traceback_util.filter def sum( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="sum"``""" return reduce( description, tensor, op="sum", keepdims=keepdims, backend=backend, cse=cse, **parameters ) def sum_stage3(*args, **kwargs): return reduce_stage3(*args, op="sum", **kwargs) @einx.traceback_util.filter def mean( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="mean"``""" return reduce( description, tensor, op="mean", keepdims=keepdims, backend=backend, cse=cse, **parameters ) def mean_stage3(*args, **kwargs): return reduce_stage3(*args, op="mean", **kwargs) @einx.traceback_util.filter def var( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="var"``""" return reduce( description, tensor, op="var", keepdims=keepdims, backend=backend, cse=cse, **parameters ) def var_stage3(*args, **kwargs): return reduce_stage3(*args, op="var", **kwargs) @einx.traceback_util.filter def std( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="std"``""" return reduce( description, tensor, op="std", keepdims=keepdims, backend=backend, cse=cse, **parameters ) def std_stage3(*args, **kwargs): return reduce_stage3(*args, op="std", **kwargs) @einx.traceback_util.filter def prod( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="prod"``""" return reduce( description, tensor, op="prod", keepdims=keepdims, backend=backend, cse=cse, **parameters ) def prod_stage3(*args, **kwargs): return reduce_stage3(*args, op="prod", **kwargs) @einx.traceback_util.filter def count_nonzero( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="count_nonzero"``""" return reduce( description, tensor, op="count_nonzero", keepdims=keepdims, backend=backend, cse=cse, **parameters, ) def count_nonzero_stage3(*args, **kwargs): return reduce_stage3(*args, op="count_nonzero", **kwargs) @einx.traceback_util.filter def any( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="any"``""" return reduce( description, tensor, op="any", keepdims=keepdims, backend=backend, cse=cse, **parameters ) def any_stage3(*args, **kwargs): return reduce_stage3(*args, op="any", **kwargs) @einx.traceback_util.filter def all( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="all"``""" return reduce( description, tensor, op="all", keepdims=keepdims, backend=backend, cse=cse, **parameters ) def all_stage3(*args, **kwargs): return reduce_stage3(*args, op="all", **kwargs) @einx.traceback_util.filter def max( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="max"``""" return reduce(description, tensor, op="max", keepdims=keepdims, backend=backend, **parameters) def max_stage3(*args, **kwargs): return reduce_stage3(*args, op="max", **kwargs) @einx.traceback_util.filter def min( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="min"``""" return reduce(description, tensor, op="min", keepdims=keepdims, backend=backend, **parameters) def min_stage3(*args, **kwargs): return reduce_stage3(*args, op="min", **kwargs) @einx.traceback_util.filter def logsumexp( description: str, tensor: einx.Tensor, keepdims: Union[bool, None] = None, backend: Union[einx.Backend, str, None] = None, **parameters: npt.ArrayLike, ) -> einx.Tensor: """Specialization of :func:`einx.reduce` with ``op="logsumexp"``""" return reduce( description, tensor, op="logsumexp", keepdims=keepdims, backend=backend, **parameters ) def logsumexp_stage3(*args, **kwargs): return reduce_stage3(*args, op="logsumexp", **kwargs) python-einx-0.3.0/einx/op/solve.py000066400000000000000000000113651505216034200170630ustar00rootroot00000000000000import einx import numpy as np from collections import defaultdict from typing import Mapping, Optional import numpy.typing as npt @einx.lru_cache def _solve(description, *tensor_shapes, cse=True, **parameters): description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) exprs = einx.expr.stage1.parse_args(description) if len(exprs) != len(tensor_shapes): raise ValueError(f"Expected {len(exprs)} tensors, got {len(tensor_shapes)}") try: exprs = einx.expr.solve( [ einx.expr.Equation(expr, tensor_shape) for expr, tensor_shape in zip(exprs, tensor_shapes) ] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, ) except ( einx.expr.stage2.SolveDepthException, einx.expr.stage2.SolveExpansionException, einx.expr.stage3.SolveValueException, ): return None values = defaultdict(list) for root in exprs: for expr in root.all(): if isinstance(expr, einx.expr.stage3.Axis): tokens = expr.name.split(".") values[tokens[0]].append((tuple(int(t) for t in tokens[1:]), expr.value)) values2 = {} for name, xs in values.items(): shape = np.amax([coord for coord, value in xs], axis=0) + 1 value = np.zeros(shape, dtype="int32") for coord, v in xs: value[coord] = v if value.shape == (): value = int(value) values2[name] = value return values2 def solve( description: str, *tensors: einx.Tensor, cse: bool = False, **parameters: npt.ArrayLike ) -> Optional[Mapping[str, npt.ArrayLike]]: """Solve for the axis values of the given expressions and tensors. Args: description: Description string for the tensors in einx notation. tensors: Input tensors or tensor factories matching the description string. cse: Whether to apply common subexpression elimination to the expressions. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: A mapping from axis name to axis value, or ``None`` if no solution was found. Examples: >>> x = np.zeros((10, 5)) >>> einx.solve("a b", x) {'a': 10, 'b': 5} """ return _solve( description, *[einx.tracer.get_shape(tensor) for tensor in tensors], cse=cse, **parameters ) def matches( description: str, *tensors: einx.Tensor, cse: bool = True, **parameters: npt.ArrayLike ) -> bool: """Check whether the given expressions and tensors match. Args: description: Description string for the tensors in einx notation. tensors: Input tensors or tensor factories matching the description string. cse: Whether to apply common subexpression elimination to the expressions. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: True if the expressions and tensors match, False otherwise. Examples: >>> x = np.zeros((10, 5)) >>> einx.matches("a b", x) True >>> einx.matches("a b c", x) False """ return solve(description, *tensors, cse=cse, **parameters) is not None @einx.traceback_util.filter def check( description: str, *tensors: einx.Tensor, cse: bool = True, **parameters: npt.ArrayLike ) -> None: """Check whether the given expressions and tensors match and raise an exception if they don't. Args: description: Description string for the tensors in einx notation. tensors: Input tensors or tensor factories matching the description string. cse: Whether to apply common subexpression elimination to the expressions. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. """ description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) exprs = einx.expr.stage1.parse_args(description) if len(exprs) != len(tensors): raise ValueError(f"Expected {len(exprs)} tensors, got {len(tensors)}") tensor_shapes = [einx.tracer.get_shape(tensor) for tensor in tensors] einx.expr.solve( [einx.expr.Equation(expr, tensor_shape) for expr, tensor_shape in zip(exprs, tensor_shapes)] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, ) # Raises an exception if no solution is found python-einx-0.3.0/einx/op/util.py000066400000000000000000000207141505216034200167060ustar00rootroot00000000000000import einx import sys import numpy as np def flatten(exprs, tensors=None, backend=None): if tensors is None: exprs_out = [] for expr in exprs: expr = einx.expr.stage3.decompose(expr) expr = einx.expr.stage3.remove_unnamed_trivial_axes(expr) if any(isinstance(e, einx.expr.stage3.Concatenation) for e in expr): concat_index, concat_expr = [ (i, e) for i, e in enumerate(expr) if isinstance(e, einx.expr.stage3.Concatenation) ][0] for i in range(len(concat_expr.children)): # Extract subexpression subexpr = einx.expr.stage3.replace( expr, lambda expr: expr.children[i].__deepcopy__() if id(expr) == id(concat_expr) else None, ) exprs_out.extend(flatten([subexpr])) else: exprs_out.append(expr) return exprs_out else: assert backend is not None if len(exprs) != len(tensors): raise ValueError("Got different number of expressions and tensors") exprs_out = [] tensors_out = [] for expr, tensor in zip(exprs, tensors): expr = einx.expr.stage3.decompose(expr) expr = einx.expr.stage3.remove_unnamed_trivial_axes(expr) tensor = backend.reshape(tensor, expr.shape) if any(isinstance(e, einx.expr.stage3.Concatenation) for e in expr): concat_index, concat_expr = [ (i, e) for i, e in enumerate(expr) if isinstance(e, einx.expr.stage3.Concatenation) ][0] splits = np.cumsum([0] + [c.shape[0] for c in concat_expr.children]) for i in range(len(concat_expr.children)): # Extract subtensor s = (slice(None),) * concat_index + (slice(splits[i], splits[i + 1]),) subtensor = tensor[s] # TODO: split using np.split? # Extract subexpression subexpr = einx.expr.stage3.replace( expr, lambda expr: expr.children[i].__deepcopy__() if id(expr) == id(concat_expr) else None, ) flattened_subexprs, flattened_subtensors = flatten( [subexpr], [subtensor], backend ) exprs_out.extend(flattened_subexprs) tensors_out.extend(flattened_subtensors) else: exprs_out.append(expr) tensors_out.append(tensor) return exprs_out, tensors_out def assignment(exprs_in, exprs_out): if len(exprs_in) != len(exprs_out): raise ValueError("Got different number of input and output expressions") axes_in = [{a.name for a in einx.expr.stage3.get_named_axes(expr_in)} for expr_in in exprs_in] axes_out = [ {a.name for a in einx.expr.stage3.get_named_axes(expr_out)} for expr_out in exprs_out ] cost_matrix = np.ones((len(exprs_out), len(exprs_in)), dtype=int) for i, a_out in enumerate(axes_out): for j, a_in in enumerate(axes_in): cost_matrix[i, j] = 0 if a_in.issubset(a_out) else 1 # Simple brute-force assignment problem solver def assignment_solver(cost_matrix, r=0): if r == cost_matrix.shape[0]: return [], [] # For an expr_out (r), find the first expr_in (c) that matches for c in range(cost_matrix.shape[1]): if cost_matrix[r, c] == 0: cost_matrix2 = cost_matrix.copy() cost_matrix2[r, :] = 1 cost_matrix2[:, c] = 1 rows, cols = assignment_solver(cost_matrix2, r + 1) if rows is not None: return [r] + rows, [c] + cols return None, None row_ind, col_ind = assignment_solver(cost_matrix) if row_ind is None: raise RuntimeError( "Failed to find assignment between input and output expressions" ) # TODO: assert np.all(row_ind == np.arange(len(exprs_out))) return col_ind def transpose_broadcast(expr_in, tensor, expr_out, *, backend, broadcast=True): assert einx.expr.stage3.is_flat(expr_in) and einx.expr.stage3.is_flat( expr_out ), f"'{expr_in}' and '{expr_out}' must be flat" # Transpose axes if necessary in_axes = [a.name for a in einx.expr.stage3.get_axes(expr_in)] out_axes = [a.name for a in einx.expr.stage3.get_axes(expr_out)] out_axes_intersect = [a for a in out_axes if a in in_axes] out_axes_broadcast = [a for a in out_axes if a not in in_axes] if set(out_axes_intersect) != set(in_axes): raise RuntimeError("Found input axes that are not in output expression") # TODO: perm = [in_axes.index(out_axis) for out_axis in out_axes_intersect] tensor = backend.transpose(tensor, tuple(perm)) # Expand and broadcast missing output dimensions if necessary if len(out_axes_broadcast) > 0: pre_broadcast_shape = tuple( 1 if a.name in out_axes_broadcast else a.value for a in einx.expr.stage3.get_axes(expr_out) ) tensor = backend.reshape(tensor, pre_broadcast_shape) if broadcast: tensor = backend.broadcast_to(tensor, expr_out.shape) if not broadcast: expr_out = einx.expr.stage3.List([ (axis if axis.name in in_axes else einx.expr.stage3.Axis(None, 1)) for axis in expr_out ]) return tensor, expr_out def _unflatten(exprs_in, tensors_in, expr_out, backend): expr_out_flat = einx.expr.stage3.decompose(expr_out) expr_out_flat = einx.expr.stage3.remove_unnamed_trivial_axes(expr_out_flat) if any(isinstance(e, einx.expr.stage3.Concatenation) for e in expr_out_flat): concat_index, concat_expr = [ (i, e) for i, e in enumerate(expr_out_flat) if isinstance(e, einx.expr.stage3.Concatenation) ][0] tensors_out = [] for i in range(len(concat_expr.children)): # Extract subexpression of i-th child in concatenation subexpr = einx.expr.stage3.replace( expr_out_flat, lambda expr: expr.children[i].__deepcopy__() if id(expr) == id(concat_expr) else None, ) # Get subtensor subtensor = _unflatten(exprs_in, tensors_in, subexpr, backend) tensors_out.append(subtensor) tensor_out = backend.concatenate(tensors_out, axis=concat_index) else: next_expr_in = next(exprs_in) assert einx.expr.stage3.remove_unnamed_trivial_axes( einx.expr.stage3.decompose(expr_out) ) == einx.expr.stage3.remove_unnamed_trivial_axes(einx.expr.stage3.decompose(next_expr_in)) tensor_out = next(tensors_in) tensor_out = backend.reshape(tensor_out, expr_out.shape) return tensor_out def unflatten(exprs_in, tensors_in, exprs_out, *, backend): if len(exprs_in) != len(tensors_in): raise ValueError("Got different number of input expressions and tensors") assert not backend is None iter_exprs_in = iter(exprs_in) iter_tensors_in = iter(tensors_in) tensors_out = [] for expr_out in exprs_out: t = _unflatten(iter_exprs_in, iter_tensors_in, expr_out, backend) assert einx.tracer.get_shape(t) == expr_out.shape tensors_out.append(t) return tensors_out def _clean_parameter(k, v): if v == () or v == []: return np.asarray(v, dtype=np.int64) try: v = np.asarray(v) except Exception as e: raise ValueError(f"Got invalid parameter {k}={v}") from e if not np.issubdtype(v.dtype, np.integer): raise ValueError(f"Got invalid parameter {k}={v}") return v def _clean_description_and_parameters(description, parameters): # Remove parameters that are not used in the description axis_names = { axis.name for axis in einx.expr.stage1.parse_op(description).all() if isinstance(axis, einx.expr.stage1.NamedAxis) } parameters = {k: _clean_parameter(k, v) for k, v in parameters.items() if k in axis_names} return description, parameters def _op_to_str(op): if "__name__" in dir(op): return op.__name__ else: return str(op) python-einx-0.3.0/einx/op/vmap.py000066400000000000000000000360051505216034200166740ustar00rootroot00000000000000import einx import functools from . import util import numpy as np from typing import Callable, Union, Mapping import numpy.typing as npt @einx.jit( trace=lambda t, c: lambda exprs_in, tensors_in, exprs_out, **kwargs: c( exprs_in, [t(x) for x in tensors_in], exprs_out, **kwargs ) ) def vmap_stage3( exprs_in, tensors_in, exprs_out, *, flat=False, backend=None, op=None, kwargs=None, verbose=False, ): if kwargs is None: kwargs = {} if len(exprs_in) != len(tensors_in): raise ValueError(f"Expected {len(exprs_in)} input tensor(s), got {len(tensors_in)}") for root in list(exprs_in) + list(exprs_out): for expr in root.all(): if isinstance(expr, einx.expr.stage3.Concatenation): raise ValueError("Concatenation not allowed") # Call tensor factories tensors_in = [ einx.tracer.call_factory(tensor, expr.shape, backend=backend) for tensor, expr in zip(tensors_in, exprs_in) ] tensors_in = backend.all_to_tensor(tensors_in) if verbose: print("Expressions:") print(" IN:", [str(e) for e in exprs_in]) print(" OUT:", [str(e) for e in exprs_out]) # Flatten expressions exprs_in_flat, tensors_in = util.flatten(exprs_in, tensors_in, backend=backend) exprs_out_flat = util.flatten(exprs_out) assert all(einx.expr.stage3.is_flat(expr) for expr in exprs_in_flat) assert all(einx.expr.stage3.is_flat(expr) for expr in exprs_out_flat) if verbose: print("Flat expressions:") print(" IN:", [str(e) for e in exprs_in_flat]) print(" OUT:", [str(e) for e in exprs_out_flat]) # In op: Unflatten input arguments, flatten output arguments exprs_in_funcargs = [einx.expr.stage3.get_marked(expr) for expr in exprs_in] exprs_out_funcargs = [einx.expr.stage3.get_marked(expr) for expr in exprs_out] exprs_in_funcargs_flat = [einx.expr.stage3.get_marked(expr) for expr in exprs_in_flat] exprs_out_funcargs_flat = [einx.expr.stage3.get_marked(expr) for expr in exprs_out_flat] if verbose: print("Expressions used in op:") if not flat: print(" IN:", [str(e) for e in exprs_in_funcargs]) print(" OUT:", [str(e) for e in exprs_out_funcargs]) print(" IN_FLAT:", [str(e) for e in exprs_in_funcargs_flat]) print(" OUT_FLAT:", [str(e) for e in exprs_out_funcargs_flat]) @einx.trace(args=[einx.tracer.Tensor(expr.shape) for expr in exprs_in_funcargs_flat]) def op(*tensors_in_flat, op=op): if verbose: print( "Flat input tensors that arrived in op:", [str(einx.tracer.get_shape(a)) for a in tensors_in_flat], ) print("Input types to vmapped function:", [type(t) for t in tensors_in_flat]) assert len(tensors_in_flat) == len(exprs_in_funcargs_flat) if not flat: tensors_in = util.unflatten( exprs_in_funcargs_flat, tensors_in_flat, exprs_in_funcargs, backend=backend ) if verbose: print( "Unflattened input tensors in op:", [str(einx.tracer.get_shape(a)) for a in tensors_in], ) assert len(tensors_in) == len(exprs_in) else: tensors_in = tensors_in_flat exprs_out_expected = exprs_out_funcargs if not flat else exprs_out_funcargs_flat if isinstance(op, str): op = getattr(backend, op) elif not isinstance(op, einx.tracer.Tracer): concrete_op = op op = lambda *args, **kwargs: einx.tracer.apply( concrete_op, args=args, kwargs=kwargs, output=[einx.tracer.Tensor(expr.shape) for expr in exprs_out_expected] if len(exprs_out_expected) > 1 else einx.tracer.Tensor(exprs_out_expected[0].shape), ) tensors_out = op(*tensors_in, **kwargs) if not isinstance(tensors_out, (tuple, list)): tensors_out = (tensors_out,) if len(tensors_out) != len(exprs_out_expected): raise ValueError( f"Expected {len(exprs_out_expected)} output tensor(s) from vmapped " f"function, but got {len(tensors_out)}" ) if any(not isinstance(t, einx.tracer.Tensor) for t in tensors_out): # TODO: might also be int, float, etc? raise ValueError( f"Expected tensors from vmapped function, but got {[type(t) for t in tensors_out]}" ) if verbose: print("Unflattened output tensors in op:") for expr_out, tensor_out in zip(exprs_out_expected, tensors_out): print(" ", expr_out, tensor_out.shape) for i, (expr_out, tensor_out) in enumerate(zip(exprs_out_expected, tensors_out)): if einx.tracer.get_shape(tensor_out) != expr_out.shape: raise ValueError( f"Expected output shape {expr_out.shape} from {i}-th (zero-based) " f"output of vmapped function, but got {einx.tracer.get_shape(tensor_out)}" ) if not flat: exprs_out_funcargs_flat2, tensors_out = util.flatten( exprs_out_funcargs, tensors_out, backend=backend ) if verbose: print( "Flattened output tensors in op:", [str(einx.tracer.get_shape(a)) for a in tensors_out], ) assert ( exprs_out_funcargs_flat2 == exprs_out_funcargs_flat ), f"{[str(s) for s in exprs_out_funcargs_flat2]} != " f"{[str(s) for s in exprs_out_funcargs_flat]}" if verbose: print("Returning types from vmapped function:", [type(t) for t in tensors_out]) return tuple(tensors_out) axes_names_in = [[a.name for a in root] for root in exprs_in_flat] axes_names_in_set = {a.name for root in exprs_in_flat for a in root} def is_broadcast_axis(expr): return ( isinstance(expr, einx.expr.stage3.Axis) and expr.name not in axes_names_in_set and not einx.expr.stage3.is_marked(expr) ) # Get ordered list of vmapped axes def is_vmapped(expr): return not einx.expr.stage3.is_marked(expr) vmapped_axes = [] for root in list(exprs_in_flat): for v in root: if is_vmapped(v) and v.name not in vmapped_axes: vmapped_axes.append(v.name) if verbose: print(f"Vmapping the following axes: {vmapped_axes}") for root in list(exprs_in_flat) + list(exprs_out_flat): for v in root: if (v.name in vmapped_axes or is_broadcast_axis(v)) != is_vmapped(v): raise ValueError(f"Axis {v.name} appears both as vmapped and non-vmapped") # Apply vmap to op exprs_out_flat_without_broadcast = [ einx.expr.stage3.remove(expr, is_broadcast_axis) for expr in exprs_out_flat ] axes_names_out_without_broadcast = [ [a.name for a in root] for root in exprs_out_flat_without_broadcast ] if verbose: print( "Flat output expressions without broadcast:", [str(e) for e in exprs_out_flat_without_broadcast], ) print("Got input axis names:", axes_names_in) print( "Got output axis names (excluding broadcasted output axes):", axes_names_out_without_broadcast, ) vmaps = [] input_shapes = tuple(expr.shape for expr in exprs_in_flat) output_shapes = tuple(expr.shape for expr in exprs_out_flat_without_broadcast) for v in reversed(vmapped_axes): in_axes = tuple( axes_names.index(v) if v in axes_names else None for axes_names in axes_names_in ) out_axes = tuple( axes_names.index(v) if v in axes_names else None for axes_names in axes_names_out_without_broadcast ) if verbose: print( f"Applying backend.vmap to axis {v}, with input axis indices " f"{in_axes} and output axis indices {out_axes}" ) for out_axis, expr_out in zip(out_axes, exprs_out_flat): if out_axis is None: raise ValueError( f"All vmapped axes must appear in the output expression, " f"but '{v}' does not appear in '{expr_out}'" ) vmaps.append((in_axes, out_axes, input_shapes, output_shapes)) def drop_axis(shape, axis): if axis is None: return shape else: return shape[:axis] + shape[axis + 1 :] input_shapes = tuple(drop_axis(shape, axis) for shape, axis in zip(input_shapes, in_axes)) output_shapes = tuple( drop_axis(shape, axis) for shape, axis in zip(output_shapes, out_axes) ) for axes_names in axes_names_in + axes_names_out_without_broadcast: if v in axes_names: axes_names.remove(v) if v in axes_names_out_without_broadcast: axes_names_out_without_broadcast.remove(v) if verbose: print( f"Now has remaining input axes {axes_names_in} and " f"output axes {axes_names_out_without_broadcast}" ) for in_axes, out_axes, input_shapes, output_shapes in reversed(vmaps): op = backend.vmap( op, in_axes=in_axes, out_axes=out_axes, input_shapes=input_shapes, output_shapes=output_shapes, ) # Apply op to tensors if verbose: print("\nSending shapes to backend.vmap:", [str(a.shape) for a in tensors_in]) tensors = einx.tracer.apply( # TODO: replace with tensors = op(*tensors_in) op, args=tensors_in, output=tuple(einx.tracer.Tensor(expr.shape) for expr in exprs_out_flat_without_broadcast), ) if verbose: for tensor, expr in zip(tensors, exprs_out_flat_without_broadcast): print("Got overall flat tensor_out:", tensor.shape, expr) # Transpose and broadcast missing output dimensions tensors = [ util.transpose_broadcast(expr_out_wb, tensor, expr_out, backend=backend)[0] for expr_out_wb, tensor, expr_out in zip( exprs_out_flat_without_broadcast, tensors, exprs_out_flat ) ] if verbose: print("Got overall transposed+broadcasted tensors_out:") for tensor, expr in zip(tensors, exprs_out_flat): print(" ", einx.tracer.get_shape(tensor), expr) # Unflatten output expressions tensors = util.unflatten(exprs_out_flat, tensors, exprs_out, backend=backend) if verbose: print( "Got overall unflattened tensors_out:", [str(einx.tracer.get_shape(a)) for a in tensors] ) return tensors, exprs_out @einx.lru_cache def parse(description, *tensor_shapes, cse=True, **parameters): description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) op = einx.expr.stage1.parse_op(description) # Implicitly determine output expression if len(op) == 1: op = einx.expr.stage1.Op([ op[0], op[0].__deepcopy__(), ]) if len(op[0]) != len(tensor_shapes): raise ValueError(f"Expected {len(op[0])} input tensors, but got {len(tensor_shapes)}") exprs = einx.expr.solve( [ einx.expr.Equation(expr_in, tensor_shape) for expr_in, tensor_shape in zip(op[0], tensor_shapes) ] + [einx.expr.Equation(expr_out) for expr_out in op[1]] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, cse_concat=False, )[: len(op[0]) + len(op[1])] exprs_in, exprs_out = exprs[: len(op[0])], exprs[len(op[0]) :] return exprs_in, exprs_out @einx.traceback_util.filter @einx.jit( trace=lambda t, c: lambda description, *tensors, **kwargs: c( description, *[t(x) for x in tensors], **kwargs ) ) def vmap( description: str, *tensors: einx.Tensor, op: Callable, flat: bool = False, backend: Union[einx.Backend, str, None] = None, cse: bool = True, kwargs: Mapping = {}, **parameters: npt.ArrayLike, ): """Vectorizes and applies a function to the input tensors using automatic vectorization. The function ``op`` must accept input tensors and yield output tensors as specified in ``description`` with shapes matching the subexpressions that are marked with ``[]``-brackets. Args: description: Description string for the operation in einx notation. tensors: Input tensors or tensor factories matching the description string. op: Function that will be vectorized. If ``op`` is a string, retrieves the attribute of ``backend`` with the same name. flat: Whether to pass the tensors to ``op`` in flattened form or matching the nested layout in the input expressions. Defaults to False. kwargs: Additional keyword arguments that are passed to ``op``. Defaults to ``{}``. backend: Backend to use for all operations. If None, determines the backend from the input tensors. Defaults to None. cse: Whether to apply common subexpression elimination to the expressions. Defaults to True. graph: Whether to return the graph representation of the operation instead of computing the result. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: The result of the vectorized operation if `graph=False`, otherwise the graph representation of the operation. Examples: Compute the mean along rows of a matrix: >>> x = np.random.uniform(size=(10, 8)) >>> einx.vmap("a [b] -> a", x, op=np.mean) (10,) Vectorize a custom function: >>> x, y = ( ... np.random.uniform(size=(10, 13, 4)), ... np.random.uniform( ... size=( ... 4, ... 9, ... ) ... ), ... ) >>> def op(x, y): # c, d -> 2 >>> return np.stack([np.mean(x), np.max(y)]) >>> einx.vmap("b1 [c] b2, b2 [d] -> b2 [2] b1", x, y, op=op).shape (4, 2, 10) Compute a matrix-matrix multiplication >>> x, y = ( ... np.random.uniform(size=(5, 10)), ... np.random.uniform(size=(10, 3)), ... ) >>> einx.vmap("a [b], [b] c -> a c", x, y, op=np.dot).shape (5, 3) """ exprs_in, exprs_out = parse( description, *[einx.tracer.get_shape(tensor) for tensor in tensors], cse=cse, **parameters ) tensors, exprs_out = vmap_stage3( exprs_in, tensors, exprs_out, flat=flat, backend=backend, op=op, kwargs=kwargs ) return tensors[0] if len(exprs_out) == 1 else tensors python-einx-0.3.0/einx/op/vmap_with_axis.py000066400000000000000000000305561505216034200207600ustar00rootroot00000000000000import einx from . import util import numpy as np from functools import partial from typing import Callable, Mapping, Union, Tuple import numpy.typing as npt _op_names = ["roll", "flip"] @einx.jit( trace=lambda t, c: lambda exprs_in, tensors_in, exprs_out, op, kwargs={}, backend=None: c( exprs_in, [t(x) for x in tensors_in], exprs_out, op, kwargs ) ) def vmap_with_axis_stage3(exprs_in, tensors_in, exprs_out, op, kwargs=None, backend=None): if kwargs is None: kwargs = {} if len(exprs_in) != len(tensors_in): raise ValueError(f"Expected {len(exprs_in)} input tensor(s), got {len(tensors_in)}") if len(set(exprs_out)) != 1: raise ValueError("All output expressions must be the same") for root in list(exprs_in) + list(exprs_out): for expr in root.all(): if isinstance(expr, einx.expr.stage3.Concatenation): raise ValueError("Concatenation not allowed") if len(exprs_out) > 1: raise ValueError("Only one output tensor allowed") if all(einx.tracer.is_scalar(tensor) for tensor in tensors_in): raise ValueError("At least one input tensor must be a non-scalar") # TODO: support this kwargs = {**kwargs} # Call tensor factories tensors_in = [ einx.tracer.call_factory(tensor, expr.shape, backend=backend) for tensor, expr in zip(tensors_in, exprs_in) ] tensors_in = backend.all_to_tensor(tensors_in) # Flatten expressions exprs_in, tensors_in = util.flatten(exprs_in, tensors_in, backend=backend) in_axis_names = {axis.name for expr in exprs_in for axis in expr} def is_broadcast_axis(expr): return isinstance(expr, einx.expr.stage3.Axis) and expr.name not in in_axis_names exprs_out_flat = util.flatten(exprs_out) exprs_out_flat_without_broadcast = [ einx.expr.stage3.remove(expr, is_broadcast_axis) for expr in exprs_out_flat ] transpose_first = len(exprs_in) > 1 # Ensure that axis markings are consistent def is_vmapped(expr): return not einx.expr.stage3.is_marked(expr) vmapped_axis_names = { v.name for root in list(exprs_in) + list(exprs_out_flat_without_broadcast) for v in root if is_vmapped(v) } for root in list(exprs_in) + list(exprs_out_flat_without_broadcast): for v in root: if (v.name in vmapped_axis_names) != is_vmapped(v): raise ValueError(f"Axis {v.name} appears both as vmapped and non-vmapped") marked_input_axes = { axis.name for expr_in in exprs_in for axis in expr_in.all() if isinstance(axis, einx.expr.stage3.Axis) and einx.expr.stage3.is_marked(axis) } marked_output_axes = { axis.name for expr_out in exprs_out_flat_without_broadcast for axis in expr_out.all() if isinstance(axis, einx.expr.stage3.Axis) and einx.expr.stage3.is_marked(axis) } if marked_output_axes.difference(marked_input_axes): raise ValueError("Marked output axes must be a subset of marked input axes") if transpose_first: # Transpose and insert trivial axes if marked_input_axes != marked_output_axes: raise ValueError( "When using multiple input tensors the same axes must be marked in all tensors" ) x = [ (tensor_in, expr_in) if einx.tracer.is_scalar(tensor_in) else util.transpose_broadcast( expr_in, tensor_in, exprs_out_flat_without_broadcast[0], broadcast=False, backend=backend, ) for expr_in, tensor_in in zip(exprs_in, tensors_in) ] tensors_in = [x[0] for x in x] exprs_in = [x[1] for x in x] assert len({len(expr) for expr in exprs_in if len(expr) > 0}) == 1 marked_input_axes = { axis.name for expr_in in exprs_in for axis in expr_in.all() if isinstance(axis, einx.expr.stage3.Axis) and einx.expr.stage3.is_marked(axis) } exprs_op_output = exprs_out_flat_without_broadcast else: assert len(exprs_in) == 1 # TODO: see above expr_in = exprs_in[0] def to_op_output(expr_out_flat_wb): axis_names = { axis.name for axis in expr_out_flat_wb.all() if isinstance(axis, einx.expr.stage3.Axis) } new_axes = [] for axis in expr_in.all(): if isinstance(axis, einx.expr.stage3.Axis) and axis.name in axis_names: if isinstance(axis.parent, einx.expr.stage3.Marker): axis = axis.parent new_axes.append(axis) return einx.expr.stage3.List.maybe(new_axes) exprs_op_output = [ to_op_output(expr_out_flat_wb) for expr_out_flat_wb in exprs_out_flat_without_broadcast ] # Add axis argument if transpose_first: axis_indices = tuple( i for i, axis in enumerate(exprs_out_flat_without_broadcast[0]) if axis.name in marked_input_axes ) else: axes_in = [list(expr) for expr in exprs_in] axis_indices = tuple( i for i in range(len(axes_in[0])) if any(axes_in[i].name in marked_input_axes for axes_in in axes_in) ) if len(axis_indices) > 0: kwargs["axis"] = axis_indices if len(axis_indices) > 1 else axis_indices[0] # Apply operation if isinstance(op, str): op = getattr(backend, op) elif not isinstance(op, einx.tracer.Tracer): concrete_op = op op = lambda *args, **kwargs: einx.tracer.apply( concrete_op, args=args, kwargs=kwargs, output=[einx.tracer.Tensor(expr.shape) for expr in exprs_op_output] if len(exprs_op_output) > 1 else einx.tracer.Tensor(exprs_op_output[0].shape), ) tensors_out = op(*tensors_in, **kwargs) if not isinstance(tensors_out, (tuple, list)): tensors_out = (tensors_out,) if len(tensors_out) != len(exprs_out_flat_without_broadcast): raise ValueError( f"Expected {len(exprs_out_flat_without_broadcast)} output tensor(s), " f"got {len(tensors_out)}" ) # Transpose and broadcast missing output dimensions tensors_out = [ util.transpose_broadcast(expr_in, tensor_out, expr_out, backend=backend)[0] for expr_in, tensor_out, expr_out in zip(exprs_op_output, tensors_out, exprs_out_flat) ] # Unflatten output expressions tensors_out = util.unflatten(exprs_out_flat, tensors_out, exprs_out, backend=backend) return tensors_out, exprs_out @einx.lru_cache def parse(description, *tensor_shapes, cse=True, **parameters): description, parameters = einx.op.util._clean_description_and_parameters( description, parameters ) op = einx.expr.stage1.parse_op(description) # Implicitly determine output expression if len(op) == 1: op = einx.expr.stage1.Op([ op[0], op[0].__deepcopy__(), ]) if len(op[0]) != len(tensor_shapes): raise ValueError(f"Expected {len(op[0])} input tensors, but got {len(tensor_shapes)}") exprs = einx.expr.solve( [ einx.expr.Equation(expr_in, tensor_shape) for expr_in, tensor_shape in zip(op[0], tensor_shapes) ] + [einx.expr.Equation(expr_out) for expr_out in op[1]] + [ einx.expr.Equation(k, np.asarray(v)[..., np.newaxis], depth1=None, depth2=None) for k, v in parameters.items() ], cse=cse, cse_concat=False, )[: len(op[0]) + len(op[1])] exprs_in, exprs_out = exprs[: len(op[0])], exprs[len(op[0]) :] return exprs_in, exprs_out @einx.traceback_util.filter @einx.jit( trace=lambda t, c: lambda description, *tensors, backend=None, **kwargs: c( description, *[t(x) for x in tensors], **kwargs ) ) def vmap_with_axis( description: str, *tensors: einx.Tensor, op: Callable, backend: Union[einx.Backend, str, None] = None, cse: bool = True, kwargs: Mapping = {}, **parameters: npt.ArrayLike, ): """Applies a function to the marked axes of the input tensors by passing the ``axis`` argument and relying on implicit broadcasting rules. The function ``op`` must accept input tensors and an ``axis`` argument specifying the indices of the axes along which the operation is applied. When the function is applied on scalars, the ``axis`` argument is not passed. For multiple input tensors, the function must follow `Numpy broadcasting rules `_. Args: description: Description string for the operation in einx notation. tensors: Input tensors or tensor factories matching the description string. op: Backend operation. Is called with ``op(tensor, axis=...)``. If ``op`` is a string, retrieves the attribute of ``backend`` with the same name. kwargs: Additional keyword arguments that are passed to ``op``. backend: Backend to use for all operations. If None, determines the backend from the input tensors. Defaults to None. cse: Whether to apply common subexpression elimination to the expressions. Defaults to True. graph: Whether to return the graph representation of the operation instead of computing the result. Defaults to False. **parameters: Additional parameters that specify values for single axes, e.g. ``a=4``. Returns: The result of the operation if ``graph=False``, otherwise the graph representation of the operation. Examples: Reverse order of elements along an axis: >>> x = np.random.uniform(size=(16, 20)) >>> einx.vmap_with_axis("a [b] -> a [b]", x, op=np.flip).shape (16, 20) Roll elements along two axes: >>> x = np.random.uniform(size=(16, 20)) >>> einx.vmap_with_axis( ... "a ([b c]) -> a ([b c])", ... x, ... op=partial(np.roll, shift=(2, 2)), ... b=2, ... ).shape (16, 20) Compute sum along axis: >>> x = np.random.uniform(size=(16, 20)) >>> einx.vmap_with_axis("a ([b] c) -> c a", x, op=np.sum, b=2).shape (16, 20) """ exprs_in, exprs_out = parse( description, *[einx.tracer.get_shape(tensor) for tensor in tensors], cse=cse, **parameters ) tensors, exprs_out = vmap_with_axis_stage3( exprs_in, tensors, exprs_out, op=op, kwargs=kwargs, backend=backend ) return tensors[0] if len(exprs_out) == 1 else tensors vmap_with_axis.parse = parse @einx.traceback_util.filter def flip( description: str, tensor: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ): """Specialization of :func:`einx.vmap_with_axis` with ``op="flip"``.""" return vmap_with_axis(description, tensor, op="flip", backend=backend, cse=cse, **parameters) @einx.traceback_util.filter def roll( description: str, tensor: einx.Tensor, shift: Union[int, Tuple[int]], backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ): """Specialization of :func:`einx.vmap_with_axis` with ``op="roll"`` and ``kwargs={"shift": shift}``. """ return vmap_with_axis( description, tensor, op="roll", backend=backend, kwargs={"shift": shift}, cse=cse, **parameters, ) @einx.traceback_util.filter def softmax( description: str, tensor: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ): """Specialization of :func:`einx.vmap_with_axis` with ``op="softmax"``""" return vmap_with_axis(description, tensor, op="softmax", backend=backend, cse=cse, **parameters) @einx.traceback_util.filter def log_softmax( description: str, tensor: einx.Tensor, backend: Union[einx.Backend, str, None] = None, cse: bool = True, **parameters: npt.ArrayLike, ): """Specialization of :func:`einx.vmap_with_axis` with ``op="log_softmax"``""" return vmap_with_axis( description, tensor, op="log_softmax", backend=backend, cse=cse, **parameters ) python-einx-0.3.0/einx/traceback_util.py000066400000000000000000000040561505216034200202700ustar00rootroot00000000000000import threading import os import traceback import types import functools path = os.path.abspath(os.path.join(__file__, "..")) def include_frame(fname): return not fname.startswith(path) thread_local = threading.local() def _set_in_reraise(): if not hasattr(thread_local, "in_reraise"): thread_local.in_reraise = False assert not thread_local.in_reraise thread_local.in_reraise = True def _unset_in_reraise(): assert thread_local.in_reraise thread_local.in_reraise = False def _is_in_reraise(): return getattr(thread_local, "in_reraise", False) def _filter_tb(tb): tb_list = list(traceback.walk_tb(tb)) first_excluded_idx = 0 while first_excluded_idx < len(tb_list) and include_frame( tb_list[first_excluded_idx][0].f_code.co_filename ): first_excluded_idx += 1 last_excluded_idx = len(tb_list) - 1 while last_excluded_idx >= 0 and include_frame( tb_list[last_excluded_idx][0].f_code.co_filename ): last_excluded_idx -= 1 if first_excluded_idx <= last_excluded_idx: tb_list1 = tb_list[:first_excluded_idx] tb_list2 = tb_list[last_excluded_idx + 1 :] tb_list = tb_list1 + tb_list2 tb = None for f, line_no in tb_list: tb = types.TracebackType(tb, f, f.f_lasti, line_no) return tb def filter(func): filter = os.environ.get("EINX_FILTER_TRACEBACK", "true").lower() in ("true", "yes", "1") if filter: @functools.wraps(func) def func_with_reraise(*args, **kwargs): if not _is_in_reraise(): _set_in_reraise() tb = None try: return func(*args, **kwargs) except Exception as e: tb = _filter_tb(e.__traceback__) raise e.with_traceback(tb) from None finally: del tb _unset_in_reraise() else: return func(*args, **kwargs) return func_with_reraise else: return func python-einx-0.3.0/einx/tracer/000077500000000000000000000000001505216034200162155ustar00rootroot00000000000000python-einx-0.3.0/einx/tracer/__init__.py000066400000000000000000000001461505216034200203270ustar00rootroot00000000000000from .tracer import * from .tensor import * from . import input from .decorator import jit, lru_cache python-einx-0.3.0/einx/tracer/compile.py000066400000000000000000000453361505216034200202320ustar00rootroot00000000000000import einx from .tracer import * from .tensor import * from .optimize import optimize from functools import partial class Variables: def __init__(self, parent=None): self.variables = {} self.parent = parent self.children = [] if not self.parent is None: self.parent.children.append(self) def fork(self): return Variables(parent=self) def __contains__(self, name): return name in self.variables or (not self.parent is None and name in self.parent) def _is_free(self, name): if name in self.variables: return False if not self.parent is None and name in self.parent: return False for child in self.children: if name in child: return False return True def add(self, value, prefix=None, name=None): if prefix is None: assert name is not None if name in self: raise ValueError(f"Variable name '{name}' already exists") self.variables[name] = value return name else: assert name is None i = 0 while not self._is_free(name := f"{prefix}{i}"): i += 1 self.variables[name] = value return name def __getitem__(self, name): if name in self.variables: return self.variables[name] if not self.parent is None: return self.parent[name] raise ValueError(f"Variable '{name}' is not set") class Block: def __init__(self, variables, parent): self.variables = variables self.parent = parent self.code = [] # lines and blocks def is_parent_of(self, other): return other.parent is self or ( not other.parent is None and self.is_parent_of(other.parent) ) @property def root_block(self): block = self while block.parent is not None: block = block.parent return block def is_root(self): return self.parent is None def get_lines_of_code(self): lines = [] for child in self.code: if isinstance(child, str): lines.append(child) else: assert isinstance(child, Block) lines.extend( f" {line}" for line in child.get_lines_of_code() ) # TODO: param for indentation return lines def __str__(self): return "\n".join(self.get_lines_of_code()) def _remove_parentheses(s): assert isinstance(s, str) level = 0 can_remove_parentheses = s[0] == "(" and s[-1] == ")" if can_remove_parentheses: for i, c in enumerate(s): if c == "(": level += 1 elif c == ")": level -= 1 if level == 0: can_remove_parentheses = i == len(s) - 1 break if can_remove_parentheses: s = s[1:-1] return s class Definition: def __init__(self, value, block, code): self._value = value self._block = block self._code = code self.overwritten = False @property def value(self): if self.overwritten: raise ValueError("Trying to access overwritten definition") return self._value @property def block(self): if self.overwritten: raise ValueError("Trying to access overwritten definition") return self._block @property def code(self): if self.overwritten: raise ValueError("Trying to access overwritten definition") return self._code @property def name(self): if self.overwritten: raise ValueError("Trying to access overwritten definition") if not self.is_variable(): raise ValueError("Trying to access name of non-variable definition") return self._code def is_variable(self): if self.overwritten: raise ValueError("Trying to access overwritten definition") return not self._code is None and self._code.isidentifier() def is_pytree(self): if self.overwritten: raise ValueError("Trying to access overwritten definition") return isinstance(self.value, (tuple, list, dict)) def is_overwritten(self): return self.overwritten def overwrite(self, new_value): if self.overwritten: raise ValueError("Trying to overwrite definition twice") if not self.is_variable(): raise ValueError("Trying to overwrite non-variable definition") self.overwritten = True return Definition(new_value, self._block, code=self._code) class CodeObject: def __init__(self, objects): self.root_block = Block(variables=Variables(), parent=None) self.definitions = {} # obj-id: Definition self.constants = [] self.usages = Usages(objects) self.names = einx.tree_util.tree_map(lambda x: self.get_definition_of(x).name, objects) self.code = str(self.root_block) for definition in self.constants: line = f"# {definition.name}: {str(type(definition.value))}" value_str = str(definition.value) if not "\n" in value_str: line += f" = {value_str}" self.code = line + "\n" + self.code locals_globals = {definition.name: definition.value for definition in self.constants} exec(self.code, locals_globals, locals_globals) self.output = einx.tree_util.tree_map(lambda name: locals_globals[name], self.names) def __str__(self): return self.code def join_blocks(self, blocks): blocks = list(blocks) if len(blocks) == 0: return self.root_block block = blocks[0] for block2 in blocks[1:]: if id(block) == id(block2): pass elif block.is_parent_of(block2): block = block2 elif block2.is_parent_of(block): pass else: raise ValueError("Cannot join blocks") return block def execute_application(self, application): assert isinstance(application, Application) comment = f" # {application.comment}" if not application.comment is None else "" # Find block at which to execute the application (i.e. where all dependencies are defined) in_defs = [self.get_definition_of(x) for x in application.dependencies] block = self.join_blocks([d.block for d in in_defs]) use_dynamic_output_check = False if isinstance(application.op, Import): import_str = f"import {application.op.import_}" name = application.op.import_ if not application.op.as_ is None: import_str = f"{import_str} as {application.op.as_}" name = application.op.as_ if not application.op.from_ is None: import_str = f"from {application.op.from_} {import_str}" # Import only once if not any( isinstance(line, str) and (line == import_str or line.startswith(import_str + " #")) for line in block.code ): # First import block.code.insert(0, import_str + comment) self.new_value_definition(application.output, block, name) else: # Subsequent import: Reuse existing definition self.definitions[id(application.output)] = self.definitions[ id(block.variables[name]) ] return inline = None if isinstance(application.op, MemberAccess): inline = True # Always inline obj = self.get_definition_of(application.args[0]).code member = application.args[1] right_str = f"{obj}.{member}" elif isinstance(application.op, Operator): if len(application.args) == 1: op = application.op.op arg = self.get_definition_of(application.args[0]).code right_str = f"({op}{arg})" elif len(application.args) == 2: op = application.op.op arg0 = self.get_definition_of(application.args[0]).code arg1 = self.get_definition_of(application.args[1]).code right_str = f"({arg0} {op} {arg1})" else: raise ValueError(f"Invalid number of arguments for operator '{application.op.op}'") elif isinstance(application.op, AssignAt): obj = self.get_definition_of(application.args[0]).code key = self.get_definition_of(application.args[1]).code op = application.op.op update = self.get_definition_of(application.args[2]).code right_str = f"({obj}[{_remove_parentheses(key)}] {op} {_remove_parentheses(update)})" elif isinstance(application.op, GetAt): obj = self.get_definition_of(application.args[0]).code slices = application.args[1] if not isinstance(slices, tuple): slices = (slices,) assert isinstance(slices, tuple) assert len(slices) > 0 def slice_to_str(s): if isinstance(s, slice): x = "" if s.start is not None: x += str(s.start) x += ":" if s.stop is not None: x += str(s.stop) if s.step is not None: x += ":" + str(s.step) return x else: return _remove_parentheses(self.get_definition_of(s).code) slices = ", ".join(slice_to_str(s) for s in slices) right_str = f"{obj}[{slices}]" else: op = self.get_definition_of(application.op).code args = [self.get_definition_of(arg).code for arg in application.args] + [ f"{k}={self.get_definition_of(v).code}" for k, v in application.kwargs.items() ] args = f"{', '.join(args)}" right_str = f"{op}({args})" use_dynamic_output_check = not isinstance(application.op, Tracer) inplace = len(application.inplace_updates) > 0 if inline is None: # Otherwise: inline if the application string is short and output is used only once inline = ( not use_dynamic_output_check and not inplace and len(right_str) < 20 # TODO: add parameter and len(self.usages.get(application.output)) == 1 ) if inline: assert not use_dynamic_output_check self.new_value_definition(application.output, block, right_str) else: if isinstance(application.output, (tuple, list)) and all( isinstance(x, Tracer) for x in application.output ): # Output: Unwrap list or tuple of tracers assert not inplace output_defs = [ self.new_variable_definition(x, block, prefix="x") for x in application.output ] left_str = " ".join([d.name + "," for d in output_defs]) block.code.append(f"{left_str} = {_remove_parentheses(right_str)}" + comment) elif inplace: # Output: Same existing variable for tensor_in, tensor_out in application.inplace_updates: in_definition = self.get_definition_of(tensor_in) usages = self.usages.get(tensor_in) assert ( in_definition.is_variable() # Must be a variable and in_definition.block is block # Must be in the same block and len(usages) == 1 # Must be used exactly once ) self.overwrite_variable_definition(in_definition, tensor_out) block.code.append(_remove_parentheses(right_str) + comment) else: # Output: Single new variable for pytree of tracers left_str = self.new_variable_definition(application.output, block, prefix="x").name block.code.append(f"{left_str} = {_remove_parentheses(right_str)}" + comment) if use_dynamic_output_check: def check(output): definition = self.get_definition_of(output) if isinstance(definition.value, Tensor): line = f"assert {definition.code}.shape == {self.get_definition_of(output.shape).code}" block.code.append(line) einx.tree_util.tree_map(check, application.output) def _add_definition(self, definition): if id(definition.value) in self.definitions: raise ValueError(f"Trying to add definition for existing value") self.definitions[id(definition.value)] = definition # If value is a pytree, add definition for all leaves def store(x, key): if len(key) > 0: code = definition.code for k in key: if isinstance(k, int): code += f"[{k}]" elif isinstance(k, str): code += f'["{k}"]' else: assert False self.new_value_definition(x, definition.block, code) einx.tree_util.tree_map_with_key(store, definition.value) def new_variable_definition(self, value, block, *args, **kwargs): name = block.variables.add(value, *args, **kwargs) definition = Definition(value, block, name) self._add_definition(definition) return definition def new_value_definition(self, value, block, code): definition = Definition(value, block, code) self._add_definition(definition) if definition.is_variable(): definition.block.variables.add(value, name=definition.name) return definition def new_empty_definition(self, value, block): definition = Definition(value, block, "!!!") # This should never appear in the final code self._add_definition(definition) return definition def overwrite_variable_definition(self, old_definition, new_value): assert old_definition.is_variable() and not old_definition.is_pytree() self.definitions[id(new_value)] = old_definition.overwrite(new_value) def get_definition_of(self, x): if id(x) in self.definitions: definition = self.definitions[id(x)] if definition.is_overwritten(): raise ValueError(f"Trying to access overwritten variable") return definition if isinstance(x, TracableFunction): if x.args is None: raise ValueError("Cannot define a function without args and/or kwargs") # TODO: assert that function has no sideeffects block = self.root_block if x.name is not None: definition = self.new_variable_definition(x, block, name=x.name) else: definition = self.new_variable_definition(x, block, prefix="op") function_block = Block(variables=block.variables.fork(), parent=block) # Define parameters arg_defs = [ self.new_variable_definition(arg, function_block, prefix="i") for arg in x.args ] # TODO: not using kwargs virtual_arg_defs = [ self.new_empty_definition(virtual_arg, function_block) for virtual_arg in x.virtual_args ] argnames = [d.name for d in arg_defs] # Define function body output_def = self.get_definition_of(x.output) block.code.append(f"def {definition.name}({', '.join(argnames)}):") block.code.append(function_block) block.code.append(f" return {output_def.code}") return definition elif isinstance(x, Tracer): if x.origin == "constant": return Definition(x, self.root_block, None) elif x.origin is None: raise ValueError( f"Got a tracer without an origin and a concrete value with type {type(x)}" ) elif isinstance(x.origin, Application): self.execute_application(x.origin) assert id(x) in self.definitions return self.definitions[id(x)] else: assert False, f"{type(x.origin)}" elif isinstance(x, str): return Definition(x, self.root_block, f'"{x}"') elif isinstance(x, tuple): x_defs = [self.get_definition_of(a) for a in x] code = "(" + ", ".join([d.code for d in x_defs]) + ("," if len(x) == 1 else "") + ")" return Definition(x, self.join_blocks([d.block for d in x_defs]), code) elif isinstance(x, list): x_defs = [self.get_definition_of(a) for a in x] code = "[" + ", ".join([d.code for d in x_defs]) + "]" return Definition(x, self.join_blocks([d.block for d in x_defs]), code) elif isinstance(x, dict): x_defs = {k: self.get_definition_of(v) for k, v in x.items()} code = "{" + ", ".join(f'"{k}": {v.code}' for k, v in x_defs.items()) + "}" return Definition(x, self.join_blocks([v.block for v in x_defs.values()]), code) elif isinstance(x, (int, float, np.integer, np.floating)): return Definition(x, self.root_block, str(x)) elif isinstance(x, slice): if x.step is not None: code = f"slice({self.get_definition_of(x.start).code}, {self.get_definition_of(x.stop).code}, {self.get_definition_of(x.step).code})" elif x.stop is not None: code = f"slice({self.get_definition_of(x.start).code}, {self.get_definition_of(x.stop).code})" else: code = f"slice({self.get_definition_of(x.start).code})" return Definition(x, self.root_block, code) elif x is None: return Definition(x, self.root_block, "None") else: # Constant definition = self.new_variable_definition(x, self.root_block, prefix="const") self.constants.append(definition) return definition class CompiledFunction: def __init__(self, function): function = optimize(function) code_object = CodeObject(function) self.code = str(code_object) self.op = code_object.output def __call__(self, *input_concrete): # TODO: assert that input_concrete are compatible with function.input? return self.op(*input_concrete) def __str__(self): return self.code python-einx-0.3.0/einx/tracer/decorator.py000066400000000000000000000172131505216034200205550ustar00rootroot00000000000000import functools import os import einx import threading import frozendict import inspect import sys from functools import partial import numpy as np from collections import defaultdict from .compile import CompiledFunction from .tracer import * def _freeze(x): if isinstance(x, np.ndarray): return tuple(x.tolist()) elif isinstance(x, (list, tuple)): return tuple(_freeze(x) for x in x) elif isinstance(x, dict): return frozendict.frozendict({k: _freeze(v) for k, v in x.items()}) else: return x def freeze(func): @functools.wraps(func) def func_frozen(*args, **kwargs): args = [_freeze(a) for a in args] kwargs = {k: _freeze(v) for k, v in kwargs.items()} return func(*args, **kwargs) return func_frozen traced_functions_decorators = [] traced_functions = [] traced_functions_lock = threading.Lock() thread_local = threading.local() thread_local.warn = True def _with_retrace_warning(func): warn_on_retrace_num = int(os.environ.get("EINX_WARN_ON_RETRACE", 0)) if warn_on_retrace_num > 0: cache_failures = defaultdict(lambda: 0) @functools.wraps(func) def func_with_warn(*args, **kwargs): has_warned = False if warn_on_retrace_num > 0: stack = inspect.stack() # Exclude frames called from this file last_index = 0 for i, frame in enumerate(stack): if frame.filename == __file__: last_index = i stack = stack[last_index + 1 :] if len(stack) > 0: # Generate string description of call stack trace = "" for frame in reversed(stack): trace += ( f'File "{frame.filename}", line {frame.lineno}, in {frame.function}\n' ) if frame.code_context is not None: trace += f" {frame.code_context[0].strip()}\n" cache_failures[trace] += 1 if thread_local.warn and cache_failures[trace] == warn_on_retrace_num: # Print warning has_warned = True print( f"WARNING (einx): The following call stack has resulted in " f"{warn_on_retrace_num} retraces of an einx function.\n" f"A retrace happens when the function is called with " "different signatures of input arguments.\n" f"Call stack (most recent call last):\n" f"{trace}" ) # Don't warn in inner functions that also use lru_cache if has_warned: thread_local.warn = False result = func(*args, **kwargs) thread_local.warn = True else: result = func(*args, **kwargs) return result return func_with_warn else: return func def lru_cache(func): func = _with_retrace_warning(func) max_cache_size = int(os.environ.get("EINX_CACHE_SIZE", -1)) if max_cache_size > 0: func = functools.lru_cache(maxsize=max_cache_size if max_cache_size > 0 else None)(func) elif max_cache_size < 0: if "cache" in vars(functools): func = functools.cache(func) else: func = functools.lru_cache(maxsize=None)(func) func = freeze(func) return func _thread_local = threading.local() def _get_trace_stack(): if not hasattr(_thread_local, "stack"): _thread_local.stack = [] return _thread_local.stack class _trace_context: def __init__(self, backend): self.backend = backend def __enter__(self): _get_trace_stack().append(self) def __exit__(self, *args): assert id(_get_trace_stack()[-1]) == id(self) _get_trace_stack().pop() def _is_tracing(): return len(_get_trace_stack()) > 0 trace_all = lambda t, c: lambda *args, **kwargs: c( *[t(arg) for arg in args], **{k: t(v) for k, v in kwargs.items()} ) trace_none = lambda t, c: lambda *args, **kwargs: c(args, kwargs) def jit(func=None, trace=trace_all): if func is None: return partial(jit, trace=trace) @lru_cache def construct_graph(args, kwargs, backend): with _trace_context(backend): # Replace input keys with tracers and retrieve list of traced arguments virtual_arg = einx.tracer.Tracer() input_tracers, (args, kwargs) = einx.tracer.input.key_to_tracer( (args, kwargs), backend, virtual_arg ) # Trace function output_tracer = func(*args, backend=backend, **kwargs) # Create function that takes only traced arguments as input function = TracableFunction( args=input_tracers, output=output_tracer, name=backend.function_name, virtual_args=[virtual_arg], ) for decorator in backend.decorators: function = decorator(function) # Convert to graph return CompiledFunction(function) def find_backend_and_construct_graph(args, kwargs, traced_input_values, backend): # Determine backend if backend is None: backend = einx.backend.get_default() if backend is None: backend = einx.backend.get(traced_input_values) elif isinstance(backend, str): backend = einx.backend.get(backend) # Construct graph/ retrieve from cache graph = construct_graph(args=args, kwargs=kwargs, backend=backend) return graph has_decorated = False @functools.wraps(func) def func_jit(*args, backend=None, graph=False, **kwargs): if _is_tracing(): assert not graph if backend is None: backend = _get_trace_stack()[-1].backend elif backend != _get_trace_stack()[-1].backend: raise ValueError("Cannot change backend during tracing") return func(*args, backend=backend, **kwargs) return_graph = graph # Replace concrete values with tracers traced_input_values = [] def new_input(x): value, key = einx.tracer.input.concrete_to_value_and_key(x) if not value is None: traced_input_values.append(value) return key args, kwargs = trace(new_input, lambda *args, **kwargs: (args, kwargs))(*args, **kwargs) # Disable torch.compile for graph construction (if torch is imported) nonlocal has_decorated, find_backend_and_construct_graph if not has_decorated and "torch" in sys.modules: import torch._dynamo as _dynamo find_backend_and_construct_graph = _dynamo.disable(find_backend_and_construct_graph) has_decorated = True graph = find_backend_and_construct_graph(args, kwargs, traced_input_values, backend) # Execute/ return graph if return_graph: return graph else: return graph(*traced_input_values) with traced_functions_lock: traced_functions.append(func_jit) for decorator in traced_functions_decorators: decorator(func_jit) return func_jit def decorate_traced_functions(decorator): with traced_functions_lock: for func in traced_functions: decorator(func) traced_functions_decorators.append(decorator) jit.decorate_traced_functions = decorate_traced_functions python-einx-0.3.0/einx/tracer/input.py000066400000000000000000000064131505216034200177320ustar00rootroot00000000000000import numpy as np from . import tracer, tensor import einx import inspect # Define classes for different types of inputs that act as cache keys and will # be converted into the corresponding tracer objects when a graph is constructed class CacheKey: pass class Scalar(CacheKey): def __eq__(self, other): return isinstance(other, Scalar) def __hash__(self): return 1 def to_tracer(self, backend, virtual_arg): x = tensor.Scalar() return x, x class Tensor(CacheKey): def __init__(self, shape, type): self.shape = shape self.type = type def __eq__(self, other): return isinstance(other, Tensor) and other.shape == self.shape and other.type == self.type def __hash__(self): return 2 + hash(self.shape) + hash(self.type) def to_tracer(self, backend, virtual_arg): if any(issubclass(self.type, type) for type in backend.tensor_types): x = tensor.Tensor(self.shape) else: x = tensor.TensorRequiringConversion(self.shape) return x, x class TensorFactory(CacheKey): def __init__(self, params): self.params = tuple(params) def __eq__(self, other): return isinstance(other, TensorFactory) and other.params == self.params def __hash__(self): return 3 + hash(self.params) def to_tracer(self, backend, virtual_arg): x = tensor.TensorFactory(self.params) return x, x class Input: pass tensor_factories = [] def register_tensor_factory(factory): tensor_factories.append(factory) return factory def apply_registered_tensor_factory(x): for factory in tensor_factories: x2 = factory(x) if x2 is not None: return x2 return None def concrete_to_value_and_key(x): if isinstance(x, (float, int, np.floating, np.integer, bool, np.bool_)): # Scalar return x, Scalar() elif isinstance(x, (tuple, list)): # Nested list/ tuple of scalars shape = einx.tracer.get_shape(x) if shape is None: raise ValueError("Failed to determine shape of input tensor") return x, Tensor(shape, type(x)) elif isinstance(x, Input): # Custom input return x.to_value_and_key() elif not (x2 := apply_registered_tensor_factory(x)) is None: # Registered tensor factory return x2 elif callable(x): # Simple callable tensor factory params = [] try: for name, param in inspect.signature(x).parameters.items(): if param.kind == inspect.Parameter.VAR_KEYWORD: name = f"**{name}" elif param.kind == inspect.Parameter.VAR_POSITIONAL: name = f"*{name}" params.append(name) except: pass return x, TensorFactory(params) else: # Tensor return x, Tensor(tuple(int(i) for i in x.shape), type(x)) def key_to_tracer(x, backend, virtual_arg): args = [] def map(x): if isinstance(x, CacheKey): arg, x = x.to_tracer(backend, virtual_arg) if not arg is None: args.append(arg) return x else: return x x = einx.tree_util.tree_map(map, x) return args, x python-einx-0.3.0/einx/tracer/optimize.py000066400000000000000000000105001505216034200204230ustar00rootroot00000000000000from .tracer import * from .tensor import * def get_signature(node): if not node.origin is None: return node.origin.signature else: return None class Optimizer: def __init__(self): self.optimized_nodes = {} self.changed = False def __call__(self, node): if id(node) in self.optimized_nodes: return self.optimized_nodes[id(node)] if isinstance(node, TracableFunction): if node.output is None: raise ValueError("Function output is None") new_node = TracableFunction( func=self(node.func), args=node.args, kwargs=node.kwargs, virtual_args=self(node.virtual_args), output=self(node.output), ) elif isinstance(node, Tracer): if isinstance(node.origin, Application): if ( get_signature(node) == "reshape" and get_signature(node.origin.tensor) == "reshape" ): # Merge consecutive reshape ops shape = node.origin.shape new_node = apply( self(node.origin.op), [self(node.origin.tensor.origin.tensor), shape], output=Tensor(shape), signature="reshape", ) self.changed = True elif ( get_signature(node) == "reshape" and get_shape(node.origin.tensor) == node.origin.shape ): # Skip reshape op if tensor already has right shape new_node = self(node.origin.tensor) self.changed = True elif ( get_signature(node) == "broadcast_to" and get_shape(node.origin.tensor) == node.origin.shape ): # Skip broadcast_to op if tensor already has right shape new_node = self(node.origin.tensor) self.changed = True elif get_signature(node) == "transpose" and list(node.origin.permutation) == list( range(len(node.shape)) ): # Skip transpose op if permutation is identity new_node = self(node.origin.tensor) self.changed = True else: # Optimize only arguments new_output_nodes = einx.tree_util.tree_map( lambda node: node.__copy__(), node.origin.output ) def store(new_node, node): assert not id(node) in self.optimized_nodes self.optimized_nodes[id(node)] = new_node einx.tree_util.tree_map(store, new_output_nodes, node.origin.output) new_node = self.optimized_nodes[id(node)] apply( self(node.origin.op), self(node.origin.args), self(node.origin.kwargs), output=new_output_nodes, signature=node.origin.signature, inplace_updates=[ ( self.optimized_nodes[id(tensor_in)], self.optimized_nodes[id(tensor_out)], ) for tensor_in, tensor_out in node.origin.inplace_updates ], comment=node.origin.comment, depend_on=self(node.origin.depend_on), ) else: new_node = node elif isinstance(node, list): new_node = [self(x) for x in node] elif isinstance(node, tuple): new_node = tuple(self(x) for x in node) elif isinstance(node, dict): new_node = {k: self(v) for k, v in node.items()} else: new_node = node self.optimized_nodes[id(node)] = new_node return new_node def optimize(node): while True: optimizer = Optimizer() node = optimizer(node) if not optimizer.changed: break return node python-einx-0.3.0/einx/tracer/tensor.py000066400000000000000000000373101505216034200201050ustar00rootroot00000000000000import numpy as np from .tracer import * from functools import partial class op: def reshape(op: Tracer): @trace def reshape(tensor, shape): if shape == get_shape(tensor): return tensor else: return apply(op, args=[tensor, shape], output=Tensor(shape), signature="reshape") return reshape def transpose(op: Tracer): @trace def transpose(tensor, perm): if list(perm) == list(range(tensor.ndim)): return tensor else: shape = tuple(tensor.shape[i] for i in perm) return apply(op, args=[tensor, perm], output=Tensor(shape), signature="transpose") return transpose def broadcast_to(op: Tracer): @trace def broadcast_to(tensor, shape): if get_shape(tensor) == shape: return tensor else: return apply( op, args=[tensor, shape], output=Tensor(shape), signature="broadcast_to" ) return broadcast_to def einsum(op: Tracer): @trace def einsum(eq, *tensors, **kwargs): exprs = eq.split("->")[0].split(",") if len(exprs) != len(tensors): raise ValueError(f"Expected {len(exprs)} tensors, got {len(tensors)}") values = {} for i, (expr, tensor) in enumerate(zip(exprs, tensors)): expr = expr.strip().replace(" ", "") if len(expr) != len(tensor.shape): raise ValueError( f"Expected {len(expr)} axes, got {len(tensor.shape)} for {i}-th " "(zero-based) input tensor" ) for axis, value in zip(expr, tensor.shape): if axis in values: if values[axis] != value: raise ValueError( f"Got conflicting values for axis {axis}: {values[axis]} and {value}" ) else: values[axis] = value expr_out = eq.split("->")[-1].strip().replace(" ", "") shape_out = tuple(values[axis] for axis in expr_out) return apply(op, args=[eq, *tensors], kwargs=kwargs, output=Tensor(shape_out)) return einsum def arange(op: Tracer): @trace def arange(n, dtype="int32"): return apply(op, args=[n], kwargs={"dtype": dtype}, output=Tensor((n,))) return arange def stack(op: Tracer): @trace def stack(tensors, axis=0): if axis < 0: axis = len(tensors[0].shape) + axis + 1 shape = list(tensors[0].shape) shape.insert(axis, len(tensors)) return apply(op, args=[tensors], kwargs={"axis": axis}, output=Tensor(shape)) return stack def concatenate(op: Tracer): @trace def concatenate(tensors, axis=0): shape = list(tensors[0].shape) shape[axis] = sum(tensor.shape[axis] for tensor in tensors) return apply(op, args=[tensors], kwargs={"axis": axis}, output=Tensor(shape)) return concatenate def fill_constant(op: Tracer, value): @trace def fill_constant(shape, dtype="float32"): return apply(op, args=[shape], kwargs={"dtype": dtype}, output=Tensor(shape)) return fill_constant def elementwise(op: Tracer): @trace def elementwise(*args, **kwargs): shape = None for a in args: if "shape" in dir(a): if shape is None: shape = a.shape else: shape2 = a.shape while len(shape) < len(shape2): shape = (1,) + shape while len(shape2) < len(shape): shape2 = (1,) + shape2 shape = np.maximum(shape, shape2) assert not shape is None # TODO: can this happen? return apply(op, args=args, kwargs=kwargs, output=Tensor(shape)) return elementwise def keep_shape(op: Tracer): @trace def keep_shape(*args, **kwargs): return apply(op, args=args, kwargs=kwargs, output=Tensor(args[0].shape)) return keep_shape def reduce(op: Tracer): @trace def reduce(tensor, axis=None, **kwargs): keepdims = kwargs.get("keepdims", False) if axis is None: shape = () else: axes = [axis] if isinstance(axis, int) else axis shape = list(tensor.shape) if keepdims: for a in axes: shape[a] = 1 else: for a in sorted(axes, reverse=True): del shape[a] kwargs = {**kwargs, **{"axis": axis}} return apply(op, args=[tensor], kwargs=kwargs, output=Tensor(shape)) return reduce def get_at(op: Tracer): @trace def get_at(tensor, coordinates): coordinates2 = (coordinates,) if not isinstance(coordinates, tuple) else coordinates if len([c for c in coordinates2 if c is not None]) > len(tensor.shape): raise ValueError(f"Too many indices for tensor of dimension {len(tensor.shape)}") def is_multidim(c): if c is None or isinstance(c, (slice, int, np.integer)): return False elif isinstance(c, list): return True else: return c.ndim > 0 if any(is_multidim(c) for c in coordinates2): # Got multi-dimensional indices while len(coordinates2) < len(tensor.shape): coordinates2 = coordinates2 + (slice(None),) # Find front and back slices front_slices = [] back_slices = [] i = 0 is_front = True for i in range(tensor.ndim): if is_front: if isinstance(coordinates2[i], slice): front_slices.append(i) else: is_front = False else: if isinstance(coordinates2[i], slice): back_slices.append(i) # Broadcast coordinates expressions def broadcast(dims): dims = np.asarray(list({int(i) for i in dims})) assert np.all(dims > 0) if len(dims) > 2 or len(dims) == 2 and np.amin(dims) > 1: raise ValueError("Cannot broadcast coordinates") return np.amax(dims) shapes = [c.shape for c in coordinates2 if not isinstance(c, slice)] if len({len(s) for s in shapes}) != 1: raise ValueError("Expected all coordinates to have same number of dimensions") shapes = np.asarray(shapes) shape = [broadcast(shapes[:, i]) for i in range(shapes.shape[1])] # Prepend and append slices shape = tuple( [tensor.shape[i] for i in front_slices] + shape + [tensor.shape[i] for i in back_slices] ) else: output_shape = [] input_shape = tensor.shape for s in coordinates2: if isinstance(s, (int, np.integer)): input_shape = input_shape[1:] elif isinstance(s, slice): start, stop, step = s.indices(input_shape[0]) output_shape.append((stop - start) // step) input_shape = input_shape[1:] elif s is None: output_shape.append(1) elif isinstance(s, Tensor) and s.ndim == 0: input_shape = input_shape[1:] else: raise TypeError(f"Invalid coordinate type: {type(s)}") shape = tuple(output_shape) + tuple(input_shape) return apply(op, args=[tensor, coordinates], output=Tensor(shape)) return get_at def update_at(op: Tracer = None, inplace=False): if op is None: return partial(einx.tracer.tensor.op.update_at, inplace=inplace) @trace def update_at(tensor, coordinates, update): output = Tensor(tensor.shape) return apply( op, args=[tensor, coordinates, update], output=output, inplace_updates=[(tensor, output)] if inplace else [], ) return update_at def vmap(vmap): @trace def vmap_with_output_types(op, in_axes, out_axes, input_shapes, output_shapes): return apply( vmap, args=[op], kwargs={"in_axes": in_axes, "out_axes": out_axes}, signature="vmap", output=Function(output=[Tensor(shape) for shape in output_shapes]), ) return vmap_with_output_types class Tensor(Tracer): def __init__(self, shape): Tracer.__init__(self) if isinstance(shape, np.ndarray): if shape.ndim != 1: raise ValueError(f"Invalid shape: {shape}") self.shape = tuple(int(i) for i in shape) else: try: self.shape = tuple(int(i) for i in shape) except: raise ValueError(f"Invalid shape: {shape}") @property def ndim(self): return len(self.shape) def __copy__(self): assert type(self) == Tensor return Tensor(self.shape) def __getitem__(self, key): return op.get_at(GetAt())(self, key) def __setitem__(self, key, value): if ( not value.origin is None and isinstance(value.origin.op, AssignAt) and value.origin.op != "=" and value.origin.args[0] is self and value.origin.args[1] is key ): # Python reformulates operations like 'tensor[key] += update' as follows: # 1. x1 = __getitem__(tensor, key) # 2. x2 = __iadd__(x1, update) # 3. x3 = __setitem__(tensor, key, x2) # The output of the second line already returns the results of the AssignAt (see below), so # we can skip the third line. return value return op.update_at(AssignAt("="), inplace=True)(self, key, value) def __iadd__(self, value): if not isinstance(self.origin.op, GetAt): raise ValueError("Inplace operator only supported for get_at outputs") return op.update_at(AssignAt("+="), inplace=True)( self.origin.args[0], self.origin.args[1], value ) def __isub__(self, value): if not isinstance(self.origin.op, GetAt): raise ValueError("Inplace operator only supported for get_at outputs") return op.update_at(AssignAt("-="), inplace=True)( self.origin.args[0], self.origin.args[1], value ) def __imul__(self, value): if not isinstance(self.origin.op, GetAt): raise ValueError("Inplace operator only supported for get_at outputs") return op.update_at(AssignAt("*="), inplace=True)( self.origin.args[0], self.origin.args[1], value ) def __itruediv__(self, value): if not isinstance(self.origin.op, GetAt): raise ValueError("Inplace operator only supported for get_at outputs") return op.update_at(AssignAt("/="), inplace=True)( self.origin.args[0], self.origin.args[1], value ) def __ifloordiv__(self, value): if not isinstance(self.origin.op, GetAt): raise ValueError("Inplace operator only supported for get_at outputs") return op.update_at(AssignAt("//="), inplace=True)( self.origin.args[0], self.origin.args[1], value ) def __add__(self, other): return op.elementwise(Operator("+"))(self, other) def __radd__(self, other): return op.elementwise(Operator("+"))(other, self) def __neg__(self): return op.elementwise(Operator("-"))(self) def __sub__(self, other): return op.elementwise(Operator("-"))(self, other) def __rsub__(self, other): return op.elementwise(Operator("-"))(other, self) def __mul__(self, other): return op.elementwise(Operator("*"))(self, other) def __rmul__(self, other): return op.elementwise(Operator("*"))(other, self) def __truediv__(self, other): return op.elementwise(Operator("/"))(self, other) def __rtruediv__(self, other): return op.elementwise(Operator("/"))(other, self) def __floordiv__(self, other): return op.elementwise(Operator("//"))(self, other) def __rfloordiv__(self, other): return op.elementwise(Operator("//"))(other, self) def __div__(self, other): return op.elementwise(Operator("/"))(self, other) def __rdiv__(self, other): return op.elementwise(Operator("/"))(other, self) def __mod__(self, other): return op.elementwise(Operator("%"))(self, other) def __rmod__(self, other): return op.elementwise(Operator("%"))(other, self) def __lt__(self, other): return op.elementwise(Operator("<"))(self, other) def __le__(self, other): return op.elementwise(Operator("<="))(self, other) def __gt__(self, other): return op.elementwise(Operator(">"))(self, other) def __ge__(self, other): return op.elementwise(Operator(">="))(self, other) def __eq__(self, other): return op.elementwise(Operator("=="))(self, other) def __ne__(self, other): return op.elementwise(Operator("!="))(self, other) class Scalar(Tensor): def __init__(self): Tensor.__init__(self, ()) class TensorRequiringConversion(Tensor): def __init__(self, shape): Tensor.__init__(self, shape) class TensorFactory(Tracer): def __init__(self, params): self.params = params def __call__(self, shape, kwargs): # Filter kwargs if any(param.startswith("**") for param in self.params): pass else: kwargs = {k: v for k, v in kwargs.items() if k in self.params} return apply(self, args=[shape], kwargs=kwargs, output=Tensor(shape)) def is_scalar(x): return isinstance(x, (int, float, bool, np.integer, np.floating, np.bool_, Scalar)) def is_tensor(x): return isinstance(x, (int, float, bool, np.integer, np.floating, np.bool_, Tensor)) def _get_list_shape(x): if isinstance(x, (tuple, list)): subshapes = {_get_list_shape(y) for y in x} if len(subshapes) != 1: raise ValueError("Failed to determine shape of input tensor") subshape = subshapes.pop() return (len(x),) + subshape elif is_scalar(x): return () else: raise ValueError("Failed to determine shape of input tensor") def get_shape(x): if isinstance(x, (tuple, list)): return _get_list_shape(x) elif is_scalar(x): return () try: # Concrete tensor return tuple(int(i) for i in x.shape) except: # Cannot determine shape (e.g. tensor factory) return None @trace def call_factory(x, shape, backend, **kwargs): if is_tensor(x): return x elif isinstance(x, TensorFactory): return x(shape, kwargs=kwargs) else: assert False, f"{type(x)}" python-einx-0.3.0/einx/tracer/tracer.py000066400000000000000000000205721505216034200200550ustar00rootroot00000000000000import einx import inspect from functools import partial import threading class Application: def __init__(self, op, args, kwargs, output, signature, inplace_updates, comment, depend_on): self.op = op self.args = args self.kwargs = kwargs self.output = Tracer() if output is None else output self.signature = signature self.inplace_updates = inplace_updates self.comment = comment self.depend_on = depend_on def update_origin(tracer, key): tracer.origin = self einx.tree_util.tree_map_with_key(update_origin, self.output) # TODO: move this somewhere else? if signature == "reshape": params = inspect.getcallargs(lambda tensor, shape: None, *args, **kwargs) self.shape = params["shape"] self.tensor = params["tensor"] elif signature == "broadcast_to": params = inspect.getcallargs(lambda tensor, shape: None, *args, **kwargs) self.shape = params["shape"] self.tensor = params["tensor"] elif signature == "transpose": params = inspect.getcallargs(lambda tensor, permutation: None, *args, **kwargs) self.permutation = params["permutation"] self.tensor = params["tensor"] @property def dependencies(self): return [self.op] + list(self.args) + list(self.kwargs.values()) + self.depend_on def apply( op, args=[], kwargs={}, output=None, signature=None, inplace_updates=[], comment=None, depend_on=[], ): if isinstance(op, partial): return apply( op.func, args=list(op.args) + list(args), kwargs={**op.keywords, **kwargs}, output=output, signature=signature, inplace_updates=inplace_updates, comment=comment, depend_on=depend_on, ) elif isinstance(op, TracableFunction): assert len(inplace_updates) == 0 got_output = op(*args, **kwargs) if not output is None: def check(got_output, expected_output): if type(got_output) != type(expected_output): # TODO: also compare shape etc raise ValueError( f"Expected output type {type(expected_output)} when tracing " f"TracableFunction, got {type(got_output)}" ) einx.tree_util.tree_map(check, got_output, output) return got_output else: return Application( op, args=args, kwargs=kwargs, output=output, signature=signature, inplace_updates=inplace_updates, comment=comment, depend_on=depend_on + _get_depend_on_stack(), ).output _thread_local = threading.local() def _get_depend_on_stack(): if not hasattr(_thread_local, "depend_on"): _thread_local.depend_on = [] return _thread_local.depend_on class depend_on: def __init__(self, tracers): self.tracer = list(einx.tree_util.tree_flatten(tracers)) def __enter__(self): _get_depend_on_stack().append(self.tracer) def __exit__(self, *args): assert _get_depend_on_stack()[-1] is self.tracer _get_depend_on_stack().pop() class Tracer: def __init__(self, origin=None): self.origin = origin def __getattr__(self, key): if key.startswith("__") and key.endswith("__"): return object.__getattribute__(self, key) else: return MemberAccess()(self, key) def __getitem__(self, key): return GetAt()(self, key) def __call__(self, *args, **kwargs): return apply(self, args=args, kwargs=kwargs) def __copy__(self): assert type(self) == Tracer return Tracer() class Import(Tracer): def __init__(self, import_, as_, from_): Tracer.__init__(self, origin="constant") self.import_ = import_ self.as_ = as_ self.from_ = from_ def __call__(self): # Overwrite allowed arguments return apply(self) def import_(import_, as_=None, from_=None): return Import(import_, as_, from_)() class MemberAccess(Tracer): def __init__(self): Tracer.__init__(self, origin="constant") def __call__(self, obj, key): # Overwrite allowed arguments assert isinstance(key, str) return apply(self, args=[obj, key]) class Operator(Tracer): def __init__(self, op: str): Tracer.__init__(self, origin="constant") self.op = op def __call__(self, *args): # Overwrite allowed arguments return apply(self, args=args) class AssignAt(Tracer): def __init__(self, op: str): Tracer.__init__(self, origin="constant") self.op = op def __call__(self, obj, key, update): # Overwrite allowed arguments return apply(self, args=[obj, key, update]) class GetAt(Tracer): def __init__(self): Tracer.__init__(self, origin="constant") def __call__(self, obj, key): # Overwrite allowed arguments return apply(self, args=[obj, key]) class Function(Tracer): def __init__(self, output): self.output = output def __copy__(self): return Function(self.output) def __call__(self, *args, **kwargs): return apply( self, args=args, kwargs=kwargs, output=einx.tree_util.tree_map(lambda x: x.__copy__(), self.output), ) class TracableFunction(Tracer): def __init__(self, func=None, args=None, kwargs=None, virtual_args=[], output=None, name=None): Tracer.__init__(self) if isinstance(func, Tracer): raise ValueError(f"func cannot be a tracer object") if not output is None and args is None and kwargs is None: raise ValueError(f"Cannot create a TracableFunction with an output but no input") if args is None and not kwargs is None: args = [] if not args is None and kwargs is None: kwargs = {} if not func is None and output is None and (not args is None or not kwargs is None): output = func(*args, **kwargs) self.func = func self.args = args self.kwargs = kwargs self.virtual_args = virtual_args self.output = output self.name = name def __call__(self, *args, **kwargs): if self.func is None: raise NotImplementedError( f"Cannot call a TracableFunction that was created without a callable function" ) return self.func(*args, **kwargs) class Usages: def __init__(self, tracers): self.usages = {} # tracer-id: [using-applications] def _capture_usages(x): if not id(x) in self.usages: self.usages[id(x)] = [] if isinstance(x, (list, tuple)): for y in x: _capture_usages(y) elif isinstance(x, dict): for y in x.values(): _capture_usages(y) elif isinstance(x, Tracer) and isinstance(x.origin, Application): for y in x.origin.dependencies: if isinstance(y, Tracer): # Add x.origin to y's usages if not id(y) in self.usages: self.usages[id(y)] = [] for usage in self.usages[id(y)]: if id(usage) == id(x.origin): break else: self.usages[id(y)].append(x.origin) # Continue capturing usages with y _capture_usages(y) elif isinstance(x, TracableFunction): _capture_usages(x.func) _capture_usages(x.args) _capture_usages(x.kwargs) _capture_usages(x.output) _capture_usages(tracers) def get(self, tracers): usages = [] def retrieve_usages(tracer): if id(tracer) in self.usages: usages.extend(self.usages[id(tracer)]) einx.tree_util.tree_map(retrieve_usages, tracers) return usages def trace(func=None, args=None, kwargs=None): if func is None: return partial(trace, args=args, kwargs=kwargs) else: return TracableFunction(func=func, args=args, kwargs=kwargs) python-einx-0.3.0/einx/tree_util.py000066400000000000000000000045201505216034200173040ustar00rootroot00000000000000# Avoid a hard jax dependency def tree_map_with_key(func, *trees, key=(), is_leaf=None): if is_leaf is not None and is_leaf(key, *trees): return func(*trees, key=key) elif all(isinstance(tree, list) for tree in trees) and all( len(trees[0]) == len(tree) for tree in trees[1:] ): return [ tree_map_with_key(func, *elements, key=key + (i,), is_leaf=is_leaf) for i, elements in enumerate(zip(*trees)) ] elif all(isinstance(tree, tuple) for tree in trees) and all( len(trees[0]) == len(tree) for tree in trees[1:] ): return tuple( tree_map_with_key(func, *elements, key=key + (i,), is_leaf=is_leaf) for i, elements in enumerate(zip(*trees)) ) elif all(isinstance(tree, dict) for tree in trees) and all( trees[0].keys() == tree.keys() for tree in trees[1:] ): return { k: tree_map_with_key( func, *[tree[k] for tree in trees], key=key + (k,), is_leaf=is_leaf ) for k in trees[0] } else: return func(*trees, key=key) def tree_map(func, *trees, is_leaf=None): if is_leaf is not None and is_leaf(*trees): return func(*trees) elif all(isinstance(tree, list) for tree in trees) and all( len(trees[0]) == len(tree) for tree in trees[1:] ): return [ tree_map(func, *elements, is_leaf=is_leaf) for i, elements in enumerate(zip(*trees)) ] elif all(isinstance(tree, tuple) for tree in trees) and all( len(trees[0]) == len(tree) for tree in trees[1:] ): return tuple( tree_map(func, *elements, is_leaf=is_leaf) for i, elements in enumerate(zip(*trees)) ) elif all(isinstance(tree, dict) for tree in trees) and all( trees[0].keys() == tree.keys() for tree in trees[1:] ): return {k: tree_map(func, *[tree[k] for tree in trees], is_leaf=is_leaf) for k in trees[0]} else: return func(*trees) def tree_flatten(x, is_leaf=None): if is_leaf is not None and is_leaf(x): yield x elif isinstance(x, (list, tuple)): for x in x: yield from tree_flatten(x, is_leaf=is_leaf) elif isinstance(x, dict): for x in x.items(): yield from tree_flatten(x, is_leaf=is_leaf) else: yield x python-einx-0.3.0/einx/types.py000066400000000000000000000001421505216034200164500ustar00rootroot00000000000000from typing import TypeVar import einx Tensor = TypeVar("Tensor") Backend = einx.backend.Backend python-einx-0.3.0/examples/000077500000000000000000000000001505216034200156105ustar00rootroot00000000000000python-einx-0.3.0/examples/benchmark1.py000066400000000000000000000170321505216034200202000ustar00rootroot00000000000000import torch import jax import einx import timeit import types import einops import random import argparse import math import jax.numpy as jnp import numpy as np from functools import partial from collections import defaultdict parser = argparse.ArgumentParser() parser.add_argument("--n", type=int, default=1000) args = parser.parse_args() k = 1 n = args.n // k rows = [] envs = [ types.SimpleNamespace( name="numpy", backend=einx.backend.get("numpy"), jit=lambda x: x, block_until_ready=lambda x: x, to_numpy=np.asarray, ones=lambda shape, dtype: np.ones(shape, dtype=dtype), transpose=np.transpose, mean=np.mean, einsum=np.einsum, ), types.SimpleNamespace( name="torch-eager", backend=einx.backend.get("torch"), jit=lambda x: x, block_until_ready=lambda x: torch.cuda.synchronize(), to_numpy=lambda x: np.asarray(x.cpu()), ones=lambda shape, dtype: torch.ones(shape, dtype=vars(torch)[dtype]).cuda(), transpose=torch.permute, mean=torch.mean, einsum=torch.einsum, ), types.SimpleNamespace( name="torch-compile", backend=einx.backend.get("torch"), jit=torch.compile, block_until_ready=lambda x: torch.cuda.synchronize(), to_numpy=lambda x: np.asarray(x.cpu()), ones=lambda shape, dtype: torch.ones(shape, dtype=vars(torch)[dtype]).cuda(), transpose=torch.permute, mean=torch.mean, einsum=torch.einsum, ), types.SimpleNamespace( name="jax-jit", backend=einx.backend.get("jax"), jit=jax.jit, block_until_ready=lambda x: x.block_until_ready(), to_numpy=lambda x: np.asarray(x), ones=lambda shape, dtype: jnp.ones(shape, dtype=dtype), transpose=jnp.transpose, mean=jnp.mean, einsum=jnp.einsum, ), ] for env in envs: experiments = [] f = 4 if env.name == "numpy" else 1 x = env.ones((16 // f, 512 // f, 512 // f, 64 // f), "float32") x2 = env.ones((16 // f, 256 // f, 256 // f, 64 // f), "float32") y = env.ones((512 // f, 512 // f), "float32") z1 = env.ones((64 // f,), "float32") w = env.ones((64 // f, 128 // f), "float32") def benchmark_einx(x): return einx.rearrange("b h w c -> b c h w", x, backend=env.backend) def benchmark_einops(x): return einops.rearrange(x, "b h w c -> b c h w") def benchmark_idx(x): return env.transpose(x, (0, 3, 1, 2)) experiments.append(("rearrange", (benchmark_einx, benchmark_einops, benchmark_idx), (x,), 5.0)) def benchmark_einx(x): return einx.mean("b [s...] c", x) def benchmark_einops(x): return einops.reduce(x, "b h w c -> b c", reduction="mean") def benchmark_idx(x): return env.mean(x, axis=(1, 2)) experiments.append(( "spatial_mean", (benchmark_einx, benchmark_einops, benchmark_idx), (x,), 5.0, )) def benchmark_einx(x): return einx.mean("b s... [c]", x) def benchmark_einops(x): return einops.reduce(x, "b h w c -> b h w", reduction="mean") def benchmark_idx(x): return env.mean(x, axis=3) experiments.append(( "channel_mean", (benchmark_einx, benchmark_einops, benchmark_idx), (x,), 5.0, )) def benchmark_einx(x, y): return einx.add("b [s...] c", x, y) def benchmark_idx(x, y): return x + y[None, ..., None] experiments.append(("spatial_add", (benchmark_einx, None, benchmark_idx), (x, y), 5.0)) def benchmark_einx(x, y): return einx.add("b s... [c]", x, y) def benchmark_idx(x, y): return x + y experiments.append(("channel_add", (benchmark_einx, None, benchmark_idx), (x, z1), 5.0)) def benchmark_einx(x, w): return einx.dot("b... [c1->c2]", x, w) def benchmark_einops(x, w): return einops.einsum(x, w, "... c1, c1 c2 -> ... c2") def benchmark_idx(x, w): return env.einsum("b h w c, c d -> b h w d", x, w) experiments.append(("einsum", (benchmark_einx, benchmark_einops, benchmark_idx), (x2, w), 5.0)) for name, methods, inputs, mul in experiments: name = env.name + " " + name print(name) # Assert correctness results = [] for method in methods: if method is not None: results.append(method(*inputs)) results = [env.to_numpy(r) for r in results] for r2 in results[1:]: assert np.allclose(results[0], r2) # Initialization for _ in range(5): for method in methods: if method is not None: env.block_until_ready(method(*inputs)) methods = [env.jit(m) if m is not None else None for m in methods] for _ in range(5): for method in methods: if method is not None: env.block_until_ready(method(*inputs)) # Benchmark times = defaultdict(list) order = "random" if order == "random": methods2 = list(methods) for _ in range(max(1, int(n * mul))): random.shuffle(methods2) for method in methods2: if method is not None: times[method.__name__].append( timeit.repeat( lambda: env.block_until_ready(method(*inputs)), repeat=1, number=k )[0] / k ) elif order == "sequential": for method in methods: if method is not None: for _ in range(max(1, int(n * mul))): times[method.__name__].append( timeit.repeat( lambda: env.block_until_ready(method(*inputs)), repeat=1, number=k )[0] / k ) else: raise AssertionError() # Store and print results for key in list(times.keys()): p = int(len(times[key]) * 0.2) times[key] = sorted(times[key])[p:-p] for method in methods: if method is not None: print( f"{method.__name__:>25}: {1000.0 * np.mean(times[method.__name__]):0.6f} " f"+- {1000.0 * np.std(times[method.__name__]):0.6f}" ) rows.append((name, times)) print() # Print markup table import tabulate table = [] def tostr(times): if len(times) == 0 or times is None: return "" m = f"{np.mean(times):0.3f}" s = f"{np.std(times):0.3f}" return f"{m:>7} +- {s:>7}" for name, times in rows: times = {k: np.asarray(v) for k, v in times.items()} table.append([ name, 1000000.0 * (np.mean(times["benchmark_einx"]) - np.mean(times["benchmark_idx"])), 1000000.0 * (np.mean(times["benchmark_einops"]) - np.mean(times["benchmark_idx"])) if "benchmark_einops" in times else "", tostr(1000.0 * times["benchmark_einx"]), tostr(1000.0 * times["benchmark_einops"]) if "benchmark_einops" in times else "", tostr(1000.0 * times["benchmark_idx"]), ]) print( tabulate.tabulate( table, headers=[ "Method", "einx overhead (us)", "einops overhead (us)", "einx (ms)", "einops (ms)", "index-based (ms)", ], tablefmt="github", ) ) python-einx-0.3.0/examples/benchmark2.py000066400000000000000000000323751505216034200202100ustar00rootroot00000000000000import torch import jax import einx import timeit import einops import random import argparse import math import gc import types import jax.numpy as jnp import numpy as np from functools import partial from collections import defaultdict parser = argparse.ArgumentParser() parser.add_argument("--n", type=int, default=100) args = parser.parse_args() k = 1 n = args.n // k rows = [] envs = [ types.SimpleNamespace( name="torch-eager", backend=einx.backend.get("torch"), jit=lambda x: x, block_until_ready=lambda x: torch.cuda.synchronize(), to_numpy=lambda x: np.asarray(x.cpu()), ones=lambda shape, dtype="float32": torch.ones(shape, dtype=vars(torch)[dtype]).cuda(), transpose=torch.permute, mean=torch.mean, var=torch.var, square=torch.square, einsum=torch.einsum, swapaxes=torch.swapaxes, rsqrt=torch.rsqrt, where=torch.where, dot=torch.matmul, softmax=lambda x, axis: torch.nn.functional.softmax(x, axis), native_transposed=True, ), types.SimpleNamespace( name="torch-compile", backend=einx.backend.get("torch"), jit=torch.compile, block_until_ready=lambda x: torch.cuda.synchronize(), to_numpy=lambda x: np.asarray(x.cpu()), ones=lambda shape, dtype="float32": torch.ones(shape, dtype=vars(torch)[dtype]).cuda(), transpose=torch.permute, mean=torch.mean, var=torch.var, square=torch.square, einsum=torch.einsum, swapaxes=torch.swapaxes, rsqrt=torch.rsqrt, where=torch.where, dot=torch.matmul, softmax=lambda x, axis: torch.nn.functional.softmax(x, axis), native_transposed=True, ), types.SimpleNamespace( name="jax-jit", backend=einx.backend.get("jax"), jit=jax.jit, block_until_ready=lambda x: x.block_until_ready(), to_numpy=lambda x: np.asarray(x), ones=lambda shape, dtype="float32": jnp.ones(shape, dtype=dtype), transpose=jnp.transpose, mean=jnp.mean, var=jnp.var, square=jnp.square, einsum=jnp.einsum, swapaxes=jnp.swapaxes, rsqrt=jnp.sqrt, where=jnp.where, dot=jnp.dot, softmax=jax.nn.softmax, native_transposed=False, ), ] k = int(math.sqrt(args.n)) for env in envs: experiments = [] f = 1 x = env.ones((16 // f, 512 // f, 512 // f, 64 // f), "float32") if "torch" in env.name: x_transposed = env.ones((16 // f, 64 // f, 512 // f, 512 // f), "float32") x_transposed[:] = einx.rearrange("b s... c -> b c s...", x) y = env.ones((512 // f, 512 // f)) z1 = env.ones((64 // f,), "float32") z2 = env.ones((64 // f,), "float32") w = env.ones((64 // f, 128 // f), "float32") if "torch" in env.name: w_transposed = env.ones((128 // f, 64 // f), "float32") w_transposed[:] = w.T w1 = env.ones((512 // f, 512 // f, 128 // f)) w2 = env.ones((128 // f, 512 // f, 512 // f)) b128 = env.ones((128 // f,), "float32") epsilon = 1e-5 query = env.ones((16 // f, 512 // f, 512 // f), "float32") key = env.ones((16 // f, 512 // f, 512 // f), "float32") value = env.ones((16 // f, 512 // f, 512 // f), "float32") def benchmark_einx(x, bias, scale): return einx.nn.norm(x, "b... [c]", bias=bias, scale=scale, epsilon=epsilon, fastvar=False)[ 0 ] def benchmark_idx(x, bias, scale): mean = env.mean(x, axis=-1, keepdims=True) var = env.var(x, axis=-1, keepdims=True) inv = scale * env.rsqrt(var + epsilon) x = inv * (x - mean) + bias return x if "torch" in env.name: def benchmark_native(x, bias, scale): return torch.nn.functional.layer_norm( x, (x.shape[-1],), weight=scale, bias=bias, eps=epsilon ) else: benchmark_native = None experiments.append(( "layernorm", (benchmark_einx, benchmark_native, benchmark_idx), lambda m: (x, z1, z2), 3.0, )) def benchmark_einx(x, bias, scale): return einx.nn.norm(x, "b... [c]", bias=bias, scale=scale, epsilon=epsilon, fastvar=True)[0] def benchmark_idx(x, bias, scale): # https://github.com/deepmind/dm-haiku/blob/main/haiku/_src/layer_norm.py mean = env.mean(x, axis=-1, keepdims=True) mean_of_squares = env.mean(env.square(x), axis=-1, keepdims=True) var = mean_of_squares - env.square(mean) inv = scale * env.rsqrt(var + epsilon) x = inv * (x - mean) + bias return x if "torch" in env.name: def benchmark_native(x, bias, scale): return torch.nn.functional.layer_norm( x, (x.shape[-1],), weight=scale, bias=bias, eps=epsilon ) else: benchmark_native = None experiments.append(( "layernorm_fastvar", (benchmark_einx, benchmark_native, benchmark_idx), lambda m: (x, z1, z2), 3.0, )) def benchmark_einx(x, bias, scale): return einx.nn.norm(x, "[b...] c", bias=bias, scale=scale, epsilon=epsilon, fastvar=False)[ 0 ] def benchmark_idx(x, bias, scale): mean = env.mean(x, axis=(1, 2), keepdims=True) var = env.var(x, axis=(1, 2), keepdims=True) inv = scale * env.rsqrt(var + epsilon) x = inv * (x - mean) + bias return x if "torch" in env.name: def benchmark_native(x, bias, scale): return torch.nn.functional.batch_norm( x, None, None, weight=scale, bias=bias, eps=epsilon, training=True ) else: benchmark_native = None experiments.append(( "batchnorm", (benchmark_einx, benchmark_native, benchmark_idx), lambda m: (x_transposed if env.native_transposed and "native" in m.__name__ else x, z1, z2), 3.0, )) def benchmark_einx(x, bias, scale): return einx.nn.norm(x, "[b...] c", bias=bias, scale=scale, epsilon=epsilon, fastvar=True)[0] def benchmark_idx(x, bias, scale): # https://github.com/deepmind/dm-haiku/blob/main/haiku/_src/batch_norm.py mean = env.mean(x, axis=(1, 2), keepdims=True) mean_of_squares = env.mean(env.square(x), axis=(1, 2), keepdims=True) var = mean_of_squares - env.square(mean) inv = scale * env.rsqrt(var + epsilon) x = inv * (x - mean) + bias return x if "torch" in env.name: def benchmark_native(x, bias, scale): return torch.nn.functional.batch_norm( x, None, None, weight=scale, bias=bias, eps=epsilon, training=True ) else: benchmark_native = None experiments.append(( "batchnorm_fastvar", (benchmark_einx, benchmark_native, benchmark_idx), lambda m: (x_transposed if env.native_transposed and "native" in m.__name__ else x, z1, z2), 3.0, )) def benchmark_einx(x, bias, weight): return einx.nn.linear(x, "b... [c1->c2]", bias=bias, weight=weight) def benchmark_idx(x, bias, weight): # https://github.com/deepmind/dm-haiku/blob/main/haiku/_src/basic.py x = env.dot(x, weight) x = x + bias return x if "torch" in env.name: def benchmark_native(x, bias, weight): return torch.nn.functional.linear(x, weight=weight, bias=bias) else: benchmark_native = None experiments.append(( "channel_linear", (benchmark_einx, benchmark_native, benchmark_idx), lambda m: ( x, b128, w_transposed if env.native_transposed and "native" in m.__name__ else w, ), 1.0, )) def benchmark_einx(x, w1, b1, w2, b2): x0 = x x = einx.nn.linear(x, "b [s...->s2] c", weight=w1, bias=b1) x = env.where(x < 0, 0, x) x = einx.nn.linear(x, "b [s2->s...] c", weight=w2, bias=b2) x = x + x0 return x def benchmark_idx(x, w1, b1, w2, b2): # https://github.com/lucidrains/mlp-mixer-pytorch/blob/main/mlp_mixer_pytorch/mlp_mixer_pytorch.py # https://github.com/google-research/vision_transformer/blob/main/vit_jax/models_mixer.py x0 = x shape = x.shape x = x.reshape([x.shape[0], -1, x.shape[-1]]) x = env.swapaxes(x, 1, 2) x = env.dot(x, w1.reshape([-1, w1.shape[-1]])) x = x + b1 x = env.where(x < 0, 0, x) x = env.dot(x, w2.reshape([w2.shape[0], -1])) x = x + b2.reshape([-1]) x = env.swapaxes(x, 1, 2) x = x.reshape(shape) x = x + x0 return x experiments.append(( "spatial_mlp", (benchmark_einx, None, benchmark_idx), lambda m: (x, w1, b128, w2, y), 1.0, )) heads = 8 def benchmark_einx(q, k, v, heads=heads): attn = einx.dot("b q (h c), b k (h c) -> b q k h", q, k, h=heads) attn = einx.softmax("b q [k] h", attn) x = einx.dot("b q k h, b k (h c) -> b q (h c)", attn, v) return x def benchmark_idx(q, k, v, heads=heads): q = einops.rearrange(q, "b l (h k) -> b h l k", h=heads) k = einops.rearrange(k, "b t (h k) -> b h t k", h=heads) v = einops.rearrange(v, "b t (h v) -> b h t v", h=heads) attn = env.einsum("bhlk,bhtk->bhlt", q, k) attn = env.softmax(attn, axis=3) x = env.einsum("bhlt,bhtv->bhlv", attn, v) x = einops.rearrange(x, "b h l v -> b l (h v)") return x experiments.append(( "multihead-attention", (benchmark_einx, None, benchmark_idx), lambda m: (query, key, value), 1.0, )) for name, methods, inputs, mul in experiments: name = env.name + " " + name print(name) results = [] for method in methods: if method is not None: r = method(*inputs(method)) if "batchnorm" in name and "torch" in env.name and "native" in method.__name__: r = einx.rearrange("b c s... -> b s... c", r) results.append(r) results = [env.to_numpy(r) for r in results] for r2 in results[1:]: assert np.allclose(results[0], r2) for _ in range(5): for method in methods: if method is not None: env.block_until_ready(method(*inputs(method))) methods = [env.jit(m) if m is not None else None for m in methods] for _ in range(5): for method in methods: if method is not None: env.block_until_ready(method(*inputs(method))) times = defaultdict(list) order = "random" if order == "random": methods2 = list(methods) for _ in range(max(1, int(n * mul))): random.shuffle(methods2) for method in methods2: if method is not None: inputs2 = inputs(method) times[method.__name__].append( timeit.repeat( lambda: env.block_until_ready(method(*inputs2)), repeat=1, number=k )[0] / k ) elif order == "sequential": for method in methods: if method is not None: inputs2 = inputs(method) for _ in range(max(1, int(n * mul))): times[method.__name__].append( timeit.repeat( lambda: env.block_until_ready(method(*inputs2)), repeat=1, number=k )[0] / k ) else: raise AssertionError() for key2 in list(times.keys()): p = int(len(times[key2]) * 0.2) times[key2] = sorted(times[key2])[p:-p] # if "benchmark_native" not in times: # times["benchmark_native"] = times["benchmark_idx"] for method in methods: if method is not None: print( f"{method.__name__:>25}: {1000.0 * np.mean(times[method.__name__]):0.6f} " f"+- {1000.0 * np.std(times[method.__name__]):0.6f}" ) rows.append((name, times)) print() del x, y, z1, z2, w, w1, w2, b128, query, key, value gc.collect() import tabulate table = [] def tostr(times): if len(times) == 0 or times is None: return "" m = f"{np.mean(times):0.3f}" s = f"{np.std(times):0.3f}" return f"{m:>7} +- {s:>7}" for name, times in rows: times = {k: np.asarray(v) for k, v in times.items()} table.append([ name, 1000000.0 * ( np.mean(times["benchmark_einx"]) - np.mean( times["benchmark_native"] if "benchmark_native" in times else times["benchmark_idx"] ) ), tostr(1000.0 * times["benchmark_einx"]), tostr(1000.0 * times["benchmark_native"]) if "benchmark_native" in times else "", tostr(1000.0 * times["benchmark_idx"]), ]) print( tabulate.tabulate( table, headers=["Method", "einx overhead (us)", "einx (ms)", "native (ms)", "index-based (ms)"], tablefmt="github", ) ) python-einx-0.3.0/examples/train_equinox.py000066400000000000000000000071221505216034200210510ustar00rootroot00000000000000import ssl ssl._create_default_https_context = ( ssl._create_unverified_context ) # Fixed problem with downloading CIFAR10 dataset import torch import einx import os import torchvision import time import jax import optax import torchvision.transforms as transforms import einx.nn.equinox as einn import equinox as eqx from functools import partial import jax.numpy as jnp from typing import List transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) batch_size = 256 rng = jax.random.PRNGKey(42) def next_rng(): global rng rng, x = jax.random.split(rng) return x cifar10_path = os.path.join(os.path.dirname(__file__), "cifar10") trainset = torchvision.datasets.CIFAR10( root=cifar10_path, train=True, download=True, transform=transform ) trainloader = torch.utils.data.DataLoader( trainset, batch_size=batch_size, shuffle=True, num_workers=2 ) testset = torchvision.datasets.CIFAR10( root=cifar10_path, train=False, download=True, transform=transform ) testloader = torch.utils.data.DataLoader( testset, batch_size=batch_size, shuffle=False, num_workers=2 ) class Block(eqx.Module): linear: einn.Linear norm: einn.Norm dropout: einn.Dropout def __init__(self, c): self.linear = einn.Linear("b [...->c]", c=c) self.norm = einn.Norm("b [c]") self.dropout = einn.Dropout("[...]", drop_rate=0.2) def __call__(self, x, rng): x = self.linear(x, rng=rng) x = self.norm(x, rng=rng) x = jax.nn.gelu(x) x = self.dropout(x, rng=rng) return x class Net(eqx.Module): blocks: List[Block] classifier: einn.Linear def __init__(self): self.blocks = [Block(c) for c in [1024, 512, 256]] self.classifier = einn.Linear("b [...->c]", c=10) def __call__(self, x, rng): for block in self.blocks: x = block(x, rng=rng) return self.classifier(x, rng=rng) train_net = Net() inputs, _ = next(iter(trainloader)) train_net(jnp.asarray(inputs), rng=next_rng()) # Run on dummy batch optimizer = optax.adam(3e-4) opt_state = optimizer.init(eqx.filter(train_net, eqx.is_array)) @partial(eqx.filter_jit, donate="all") def update_step(opt_state, net, images, labels, rng): def loss_fn(net): logits = net(images, rng=rng) one_hot = jax.nn.one_hot(labels, 10) loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot)) return loss _loss, grads = eqx.filter_value_and_grad(loss_fn)(net) updates, new_opt_state = optimizer.update(grads, opt_state, net) new_net = eqx.apply_updates(net, updates) return new_opt_state, new_net @partial(eqx.filter_jit, donate="all") def test_step(net, images, labels): logits = net(images, rng=rng) accurate = jnp.argmax(logits, axis=1) == jnp.asarray(labels) return accurate print("Starting training") for epoch in range(100): t0 = time.time() # Train for _i, data in enumerate(trainloader): inputs, labels = data opt_state, train_net = update_step( opt_state, train_net, jnp.asarray(inputs), jnp.asarray(labels), next_rng() ) # Test correct = 0 total = 0 infer_net = eqx.nn.inference_mode(train_net) for data in testloader: images, labels = data accurate = test_step(infer_net, jnp.asarray(images), jnp.asarray(labels)) total += accurate.shape[0] correct += jnp.sum(accurate) print( f"Test accuracy after {epoch + 1:5d} epochs: {float(correct) / total} " f"({time.time() - t0:.2f}sec)" ) python-einx-0.3.0/examples/train_flax.py000066400000000000000000000070351505216034200203160ustar00rootroot00000000000000import ssl ssl._create_default_https_context = ( ssl._create_unverified_context ) # Fixed problem with downloading CIFAR10 dataset from flax import linen as nn import torch import einx import os import jax import optax import time import torchvision import torchvision.transforms as transforms import jax.numpy as jnp from flax.training import train_state import einx.nn.flax as einn from functools import partial transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) batch_size = 256 rng = jax.random.PRNGKey(42) def next_rng(): global rng rng, x = jax.random.split(rng) return x cifar10_path = os.path.join(os.path.dirname(__file__), "cifar10") trainset = torchvision.datasets.CIFAR10( root=cifar10_path, train=True, download=True, transform=transform ) trainloader = torch.utils.data.DataLoader( trainset, batch_size=batch_size, shuffle=True, num_workers=2 ) testset = torchvision.datasets.CIFAR10( root=cifar10_path, train=False, download=True, transform=transform ) testloader = torch.utils.data.DataLoader( testset, batch_size=batch_size, shuffle=False, num_workers=2 ) class Net(nn.Module): @nn.compact def __call__(self, x, training): for c in [1024, 512, 256]: x = einn.Linear("b [...->c]", c=c)(x) x = einn.Norm("[b] c", decay_rate=0.99)(x, training=training) x = nn.gelu(x) x = einn.Dropout("[...]", drop_rate=0.2)(x, training=training) x = einn.Linear("b [...->c]", c=10)(x) return x net = Net() inputs, labels = next(iter(trainloader)) params = net.init( {"dropout": next_rng(), "params": next_rng()}, jnp.asarray(inputs), training=True ) # Run on dummy batch if "stats" not in params: params["stats"] = {} optimizer = optax.adam(3e-4) opt_state = optimizer.init(params["params"]) @partial(jax.jit, donate_argnums=(0, 1)) def update_step(params, opt_state, images, labels, rng): def loss_fn(params, stats): logits, new_stats = net.apply( {"params": params, "stats": stats}, images, training=True, rngs={"dropout": rng}, mutable=["stats"], ) one_hot = jax.nn.one_hot(labels, 10) loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot)) return loss, new_stats (_loss, new_stats), grads = jax.value_and_grad(loss_fn, has_aux=True)( params["params"], params["stats"] ) params["stats"] = new_stats["stats"] updates, new_opt_state = optimizer.update(grads, opt_state, params["params"]) params["params"] = optax.apply_updates(params["params"], updates) return params, new_opt_state @jax.jit def test_step(params, images, labels): logits = net.apply(params, images, training=False) accurate = jnp.argmax(logits, axis=1) == jnp.asarray(labels) return accurate print("Starting training") for epoch in range(100): t0 = time.time() # Train for data in trainloader: inputs, labels = data params, opt_state = update_step( params, opt_state, jnp.asarray(inputs), jnp.asarray(labels), next_rng() ) # Test correct = 0 total = 0 for data in testloader: images, labels = data accurate = test_step(params, jnp.asarray(images), jnp.asarray(labels)) total += accurate.shape[0] correct += jnp.sum(accurate) print( f"Test accuracy after {epoch + 1:5d} epochs: {float(correct) / total} " f"({time.time() - t0:.2f}sec)" ) python-einx-0.3.0/examples/train_haiku.py000066400000000000000000000064411505216034200204650ustar00rootroot00000000000000import ssl ssl._create_default_https_context = ( ssl._create_unverified_context ) # Fixed problem with downloading CIFAR10 dataset import haiku as hk import torch import einx import os import jax import optax import time import torchvision import torchvision.transforms as transforms import jax.numpy as jnp from functools import partial import einx.nn.haiku as einn transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) batch_size = 256 rng = jax.random.PRNGKey(42) def next_rng(): global rng rng, x = jax.random.split(rng) return x cifar10_path = os.path.join(os.path.dirname(__file__), "cifar10") trainset = torchvision.datasets.CIFAR10( root=cifar10_path, train=True, download=True, transform=transform ) trainloader = torch.utils.data.DataLoader( trainset, batch_size=batch_size, shuffle=True, num_workers=2 ) testset = torchvision.datasets.CIFAR10( root=cifar10_path, train=False, download=True, transform=transform ) testloader = torch.utils.data.DataLoader( testset, batch_size=batch_size, shuffle=False, num_workers=2 ) class Net(hk.Module): def __call__(self, x, training): for c in [1024, 512, 256]: x = einn.Linear("b [...->c]", c=c)(x) x = einn.Norm("[b] c", decay_rate=0.99)(x, training=training) x = jax.nn.gelu(x) x = einn.Dropout("[...]", drop_rate=0.2)(x, training=training) x = einn.Linear("b [...->c]", c=10)(x) return x net = hk.transform_with_state(lambda x, training: Net()(x, training)) inputs, labels = next(iter(trainloader)) params, state = net.init(rng=next_rng(), x=jnp.asarray(inputs), training=True) # Run on dummy batch optimizer = optax.adam(3e-4) opt_state = optimizer.init(params) @partial(jax.jit, donate_argnums=(0, 1, 2)) def update_step(opt_state, params, state, images, labels, rng): def loss_fn(params, state): logits, new_state = net.apply(params, state, rng, images, training=True) one_hot = jax.nn.one_hot(labels, 10) loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot)) return loss, new_state (_loss, new_state), grads = jax.value_and_grad(loss_fn, has_aux=True)(params, state) updates, new_opt_state = optimizer.update(grads, opt_state, params) new_params = optax.apply_updates(params, updates) return new_opt_state, new_params, new_state @jax.jit def test_step(params, state, images, labels): logits, _ = net.apply(params, state, rng, images, training=False) accurate = jnp.argmax(logits, axis=1) == jnp.asarray(labels) return accurate print("Starting training") for epoch in range(100): t0 = time.time() # Train for data in trainloader: inputs, labels = data opt_state, params, state = update_step( opt_state, params, state, jnp.asarray(inputs), jnp.asarray(labels), next_rng() ) # Test correct = 0 total = 0 for data in testloader: images, labels = data accurate = test_step(params, state, jnp.asarray(images), jnp.asarray(labels)) total += accurate.shape[0] correct += jnp.sum(accurate) print( f"Test accuracy after {epoch + 1:5d} epochs: {float(correct) / total} " f"({time.time() - t0:.2f}sec)" ) python-einx-0.3.0/examples/train_keras.py000066400000000000000000000061031505216034200204640ustar00rootroot00000000000000import ssl ssl._create_default_https_context = ( ssl._create_unverified_context ) # Fixed problem with downloading CIFAR10 dataset import torch import keras import einx import os import torchvision import time import torchvision.transforms as transforms import einx.nn.keras as einn import numpy as np import tensorflow as tf transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) batch_size = 256 cifar10_path = os.path.join(os.path.dirname(__file__), "cifar10") trainset = torchvision.datasets.CIFAR10( root=cifar10_path, train=True, download=True, transform=transform ) trainloader = torch.utils.data.DataLoader( trainset, batch_size=batch_size, shuffle=True, num_workers=2 ) testset = torchvision.datasets.CIFAR10( root=cifar10_path, train=False, download=True, transform=transform ) testloader = torch.utils.data.DataLoader( testset, batch_size=batch_size, shuffle=False, num_workers=2 ) # Option 1: Functional inputs = x = keras.Input( shape=(3, 32, 32), batch_size=1 ) # Requires specifying batch_size with some dummy value, since dynamic shapes are not allowed for c in [1024, 512, 256]: x = einn.Linear("b [...->c]", c=c)(x) x = einn.Norm("[b] c", decay_rate=0.99)(x) x = keras.layers.Activation(keras.activations.gelu)(x) x = einn.Dropout("[...]", drop_rate=0.2)(x) x = einn.Linear("b [...->c]", c=10)(x) model = keras.Model(inputs=inputs, outputs=x) # Option 2: Sequential # blocks = [] # for c in [1024, 512, 256]: # blocks.append(einn.Linear("b [...->c]", c=c)) # blocks.append(einn.Norm("[b] c", decay_rate=0.99)) # blocks.append(keras.layers.Activation(keras.activations.gelu)) # blocks.append(einn.Dropout("[...]", drop_rate=0.2)) # blocks.append(einn.Linear("b [...->c]", c=10)) # model = keras.Sequential(blocks) optimizer = keras.optimizers.Adam(learning_rate=1e-3) loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) @tf.function def train_step(inputs, labels): with tf.GradientTape() as tape: logits = model(inputs, training=True) loss_value = loss_fn(labels, logits) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply(grads, model.trainable_weights) @tf.function def test_step(inputs, labels): outputs = model(inputs, training=False) predicted = tf.math.argmax(outputs, axis=1) return predicted == labels print("Starting training") for epoch in range(100): t0 = time.time() # Train for data in trainloader: inputs, labels = data inputs = np.array(inputs) labels = np.array(labels) train_step(inputs, labels) # Test correct = 0 total = 0 for data in testloader: images, labels = data images = np.array(images) labels = np.array(labels) accurate = test_step(images, labels) total += accurate.shape[0] correct += tf.math.count_nonzero(accurate) print( f"Test accuracy after {epoch + 1:5d} epochs {float(correct) / total} " f"({time.time() - t0:.2f}sec)" ) python-einx-0.3.0/examples/train_torch.py000066400000000000000000000053131505216034200205000ustar00rootroot00000000000000import ssl ssl._create_default_https_context = ( ssl._create_unverified_context ) # Fixed problem with downloading CIFAR10 dataset import torch import einx import os import torchvision import time import torchvision.transforms as transforms import torch.nn as nn import torch.optim as optim import einx.nn.torch as einn transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) batch_size = 256 cifar10_path = os.path.join(os.path.dirname(__file__), "cifar10") trainset = torchvision.datasets.CIFAR10( root=cifar10_path, train=True, download=True, transform=transform ) trainloader = torch.utils.data.DataLoader( trainset, batch_size=batch_size, shuffle=True, num_workers=2 ) testset = torchvision.datasets.CIFAR10( root=cifar10_path, train=False, download=True, transform=transform ) testloader = torch.utils.data.DataLoader( testset, batch_size=batch_size, shuffle=False, num_workers=2 ) class Net(nn.Module): def __init__(self): super().__init__() blocks = [] for c in [1024, 512, 256]: blocks.append(einn.Linear("b [...->c]", c=c)) blocks.append(einn.Norm("[b] c", decay_rate=0.99)) blocks.append(nn.GELU()) blocks.append(einn.Dropout("[...]", drop_rate=0.2)) blocks.append(einn.Linear("b [...->c]", c=10)) self.blocks = nn.Sequential(*blocks) def forward(self, x): return self.blocks(x) net = Net() # Call on dummy batch to initialize parameters (before torch.compile!) inputs, _ = next(iter(trainloader)) net(inputs) net = net.cuda() net = torch.compile(net) optimizer = optim.Adam(net.parameters(), lr=3e-4) criterion = nn.CrossEntropyLoss() @torch.compile def test_step(inputs, labels): outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) return predicted == labels print("Starting training") for epoch in range(100): t0 = time.time() # Train net.train() for data in trainloader: inputs, labels = data inputs, labels = inputs.cuda(), labels.cuda() optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # Test net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: inputs, labels = data inputs, labels = inputs.cuda(), labels.cuda() accurate = test_step(inputs, labels) total += accurate.size(0) correct += int(torch.count_nonzero(accurate)) print( f"Test accuracy after {epoch + 1:5d} epochs {float(correct) / total} " f"({time.time() - t0:.2f}sec)" ) python-einx-0.3.0/ruff.toml000066400000000000000000000006161505216034200156340ustar00rootroot00000000000000preview = true line-length = 100 target-version = "py38" [lint] select = ["B", "C", "F", "W", "YTT", "ASYNC", "E", "UP"] ignore = ["F401", "F403", "E722", "F821", "E402", "E741", "C901", "B017", "B023", "B020"] [format] preview = true quote-style = "double" indent-style = "space" skip-magic-trailing-comma = false line-ending = "auto" docstring-code-format = true docstring-code-line-length = 70python-einx-0.3.0/setup.cfg000066400000000000000000000000471505216034200156140ustar00rootroot00000000000000[metadata] description-file = readme.mdpython-einx-0.3.0/setup.py000066400000000000000000000017541505216034200155130ustar00rootroot00000000000000#!/usr/bin/env python3 from setuptools import setup, find_packages with open("README.md") as f: long_description = f.read() setup( name="einx", version="0.3.0", python_requires=">=3.8", description="Universal Tensor Operations in Einstein-Inspired Notation for Python", long_description=long_description, long_description_content_type="text/markdown", author="Florian Fervers", author_email="florian.fervers@gmail.com", url="https://github.com/fferflo/einx", packages=find_packages(), license="MIT", include_package_data=True, install_requires=[ "numpy", "sympy", "frozendict", ], extras_require={ "torch": ["torch>=2"], "keras": ["keras>=3"], }, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", ], ) python-einx-0.3.0/test/000077500000000000000000000000001505216034200147515ustar00rootroot00000000000000python-einx-0.3.0/test/conftest.py000066400000000000000000000162771505216034200171650ustar00rootroot00000000000000import importlib import numpy as np import types import einx import threading import multiprocessing import os tests = [] class WrappedEinx: def __init__(self, wrap, name, inline_args): import einx self.einx = einx self.in_func = False self.wrap = wrap self.name = name self.inline_args = inline_args def __enter__(self): assert not self.in_func self.in_func = True def __exit__(self, *args): assert self.in_func self.in_func = False def __getattr__(self, attr): op = getattr(self.einx, attr) if self.in_func or attr in {"matches", "solve", "check", "trace"}: return op if self.inline_args: def op3(*args, **kwargs): with self: def op2(): return op(*args, **kwargs) op2 = self.wrap(op2) op2() return op2() return op3 else: def op3(*args, **kwargs): with self: op2 = self.wrap(op) op2(*args, **kwargs) return op2(*args, **kwargs) return op3 def in_new_thread(op): def inner(*args, **kwargs): result = [None, None] def run(result): try: result[0] = op(*args, **kwargs) except Exception as e: result[1] = e thread = threading.Thread(target=run, args=(result,)) thread.start() thread.join() if result[1] is not None: raise result[1] else: return result[0] return inner einx_multithread = WrappedEinx(in_new_thread, "multithreading", inline_args=True) def in_new_process(op): def inner(*args, **kwargs): result = multiprocessing.Queue() exception = multiprocessing.Queue() def run(result, exception): try: result.put(op(*args, **kwargs)) except Exception as e: exception.put(e) process = multiprocessing.Process(target=run, args=(result, exception)) process.start() process.join() if not exception.empty(): raise exception.get() else: return result.get() return inner einx_multiprocess = WrappedEinx(in_new_process, "multiprocessing", inline_args=True) # numpy is always available import numpy as np backend = einx.backend.numpy.create() test = types.SimpleNamespace( full=lambda shape, value=0.0, dtype="float32": np.full(shape, value, dtype=dtype), to_tensor=np.asarray, to_numpy=np.asarray, ) tests.append((einx, backend, test)) tests.append((einx_multithread, backend, test)) # tests.append((einx_multiprocess, backend, test)) # too slow if importlib.util.find_spec("jax"): os.environ["XLA_FLAGS"] = ( os.environ.get("XLA_FLAGS", "") + " --xla_force_host_platform_device_count=8" ) import jax import jax.numpy as jnp einx_jit = WrappedEinx(jax.jit, "jax.jit", inline_args=True) backend = einx.backend.jax.create() test_cpu = types.SimpleNamespace( full=lambda shape, value=0.0, dtype="float32": jax.device_put( jnp.full(shape, value, dtype=dtype), device=jax.devices("cpu")[0] ), to_tensor=lambda x: jax.device_put(jnp.asarray(x), device=jax.devices("cpu")[0]), to_numpy=np.asarray, ) tests.append((einx, backend, test_cpu)) tests.append((einx_jit, backend, test_cpu)) try: jax.devices("gpu") has_gpu = True except: has_gpu = False if has_gpu: test_gpu = types.SimpleNamespace( full=lambda shape, value=0.0, dtype="float32": jax.device_put( jnp.full(shape, value, dtype=dtype), device=jax.devices("gpu")[0] ), to_tensor=lambda x: jax.device_put(jnp.asarray(x), device=jax.devices("gpu")[0]), to_numpy=np.asarray, ) tests.append((einx, backend, test_gpu)) tests.append((einx_jit, backend, test_gpu)) if importlib.util.find_spec("torch"): import torch version = tuple(int(i) for i in torch.__version__.split(".")[:2]) def wrap(op): torch.compiler.reset() return torch.compile(op) einx_torchcompile = WrappedEinx(wrap, "torch.compile", inline_args=False) backend = einx.backend.torch.create() dtypes = { "float32": torch.float32, "long": torch.long, "bool": torch.bool, } test_cpu = types.SimpleNamespace( full=lambda shape, value=0.0, dtype="float32", backend=backend: torch.full( backend.to_tuple(shape), value, dtype=dtypes[dtype] ), to_tensor=lambda tensor: torch.asarray(tensor, device=torch.device("cpu")), to_numpy=lambda tensor: tensor.numpy(), ) tests.append((einx, backend, test_cpu)) if version >= (2, 1): tests.append((einx_torchcompile, backend, test_cpu)) if torch.cuda.is_available(): test_gpu = types.SimpleNamespace( full=lambda shape, value=1.0, dtype="float32", backend=backend: torch.full( backend.to_tuple(shape), value, dtype=dtypes[dtype], device=torch.device("cuda") ), to_tensor=lambda tensor: torch.asarray(tensor, device=torch.device("cuda")), to_numpy=lambda tensor: tensor.cpu().numpy(), ) tests.append((einx, backend, test_gpu)) if version >= (2, 1): tests.append((einx_torchcompile, backend, test_gpu)) if importlib.util.find_spec("tensorflow"): import os os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" import tensorflow as tf import tensorflow.experimental.numpy as tnp tnp.experimental_enable_numpy_behavior() backend = einx.backend.tensorflow.create() test = types.SimpleNamespace( full=lambda shape, value=0.0, dtype="float32": tnp.full(shape, value, dtype=dtype), to_tensor=tf.convert_to_tensor, to_numpy=lambda x: x.numpy(), ) tests.append((einx, backend, test)) if importlib.util.find_spec("mlx"): import mlx.core as mx backend = einx.backend.mlx.create() test = types.SimpleNamespace( full=lambda shape, value=0.0, dtype="float32", backend=backend: mx.full( shape, value, dtype=backend.to_dtype(dtype) ), to_tensor=mx.array, to_numpy=np.asarray, ) tests.append((einx, backend, test)) if importlib.util.find_spec("dask"): import dask.array as da backend = einx.backend.dask.create() test = types.SimpleNamespace( full=lambda shape, value=0.0, dtype="float32": da.full(shape, value, dtype=dtype), to_tensor=np.asarray, to_numpy=np.asarray, ) tests.append((einx, backend, test)) if importlib.util.find_spec("tinygrad"): import os os.environ["PYTHON"] = "1" from tinygrad import Tensor backend = einx.backend.tinygrad.create() test = types.SimpleNamespace( full=lambda shape, value=0.0, dtype="float32": Tensor.full( shape, value, dtype=backend.to_dtype(dtype) ), to_tensor=Tensor, to_numpy=lambda x: x.numpy(), ) tests.append((einx, backend, test)) python-einx-0.3.0/test/test_compare.py000066400000000000000000000325401505216034200200140ustar00rootroot00000000000000import importlib import einx if importlib.util.find_spec("einops"): import einops import numpy as np def assert_equal_numpy(a, b): assert a.shape == b.shape if a.dtype.kind in "f": assert np.allclose(a, b) else: assert np.all(a == b) def test_compare_einops(): x = np.random.uniform(size=(4, 128, 128, 3)) assert_equal_numpy( einx.mean("b [s...] c", x), einops.reduce(x, "b ... c -> b c", reduction="mean") ) assert_equal_numpy( einx.mean("b ... c -> b c", x), einops.reduce(x, "b ... c -> b c", reduction="mean") ) assert_equal_numpy( einx.mean("b [s...] c", x, keepdims=True), einops.reduce(x, "b h w c -> b 1 c", reduction="mean"), ) assert_equal_numpy( einx.mean("b [s]... c", x, keepdims=True), einops.reduce(x, "b h w c -> b 1 1 c", reduction="mean"), ) assert_equal_numpy( einx.mean("b h w c -> b 1 1 c", x), einops.reduce(x, "b h w c -> b 1 1 c", reduction="mean"), ) assert_equal_numpy( einx.sum("b (s [s2])... c", x, s2=2), einops.reduce(x, "b (h h2) (w w2) c -> b h w c", reduction="sum", h2=2, w2=2), ) assert_equal_numpy( einx.sum("b (h h2) (w w2) c -> b h w c", x, h2=2, w2=2), einops.reduce(x, "b (h h2) (w w2) c -> b h w c", reduction="sum", h2=2, w2=2), ) w = np.random.uniform(size=(3, 32)) assert_equal_numpy( einx.dot("b... [c1->c2]", x, w), einops.einsum(x, w, "... c1, c1 c2 -> ... c2") ) assert_equal_numpy( einx.dot("... c1, c1 c2 -> ... c2", x, w), einops.einsum(x, w, "... c1, c1 c2 -> ... c2"), ) w = np.random.uniform(size=(128, 128, 64)) assert_equal_numpy( einx.dot("b [s...->s2] c", x, w), einops.einsum(x, w, "b h w c, h w s2 -> b s2 c") ) assert_equal_numpy( einx.dot("b h w c, h w s2 -> b s2 c", x, w), einops.einsum(x, w, "b h w c, h w s2 -> b s2 c"), ) if importlib.util.find_spec("torch"): import torch def assert_equal_torch(a, b): assert a.shape == b.shape if "float" in str(a.dtype): assert torch.allclose(a, b) else: assert torch.all(a == b) def test_compare_torch(): # torch.gather torch.take_along_dim x = torch.rand(4, 128, 3) coords = torch.randint(4, (15, 128, 3)) assert_equal_torch( torch.gather(x, 0, coords), einx.get_at("[_] ..., i ... -> i ...", x, coords), ) assert_equal_torch( torch.take_along_dim(x, coords, dim=0), einx.get_at("[_] ..., i ... -> i ...", x, coords), ) x = torch.rand(4, 128, 3) coords = torch.randint(128, (4, 15, 3)) assert_equal_torch( torch.gather(x, 1, coords), einx.get_at("a [_] ..., a i ... -> a i ...", x, coords), ) assert_equal_torch( torch.take_along_dim(x, coords, dim=1), einx.get_at("a [_] ..., a i ... -> a i ...", x, coords), ) x = torch.rand(4, 128, 3) coords = torch.randint(3, (4, 128, 15)) assert_equal_torch( torch.gather(x, 2, coords), einx.get_at("a b [_] ..., a b i ... -> a b i ...", x, coords), ) assert_equal_torch( torch.take_along_dim(x, coords, dim=2), einx.get_at("a b [_] ..., a b i ... -> a b i ...", x, coords), ) # torch.index_select x = torch.rand(4, 128, 3) indices = torch.randint(4, (15,)) assert_equal_torch( torch.index_select(x, 0, indices), einx.get_at("[_] ..., i -> i ...", x, indices), ) x = torch.rand(4, 128, 3) indices = torch.randint(128, (15,)) assert_equal_torch( torch.index_select(x, 1, indices), einx.get_at("a [_] ..., i -> a i ...", x, indices), ) x = torch.rand(4, 128, 3) indices = torch.randint(3, (15,)) assert_equal_torch( torch.index_select(x, 2, indices), einx.get_at("a b [_] ..., i -> a b i ...", x, indices), ) # torch.take x = torch.rand(128) indices = torch.randint(128, (15, 16, 3)) assert_equal_torch( torch.take(x, indices), einx.get_at("[_], ... -> ...", x, indices), ) # x[y] x = torch.rand((4, 128, 3)) coords = ( torch.rand(( 15, 3, )) * torch.tensor(x.shape, dtype=torch.float32) ).to(torch.int32) assert_equal_torch( x[coords[..., 0], coords[..., 1], coords[..., 2]], einx.get_at("[a...], b... [3] -> b...", x, coords), ) if importlib.util.find_spec("tensorflow"): import os os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" import tensorflow as tf def assert_equal_tf(a, b): assert a.shape == b.shape if "float" in str(a.dtype): assert tf.reduce_all(tf.abs(a - b) < 1e-6) else: assert tf.reduce_all(a == b) def test_compare_tensorflow(): # tf.gather x = tf.random.uniform((4, 128, 3)) coords = tf.random.uniform((15,), maxval=4, dtype=tf.int32) assert_equal_tf( tf.gather(x, coords, axis=0), einx.get_at("[_] ..., i -> i ...", x, coords), ) x = tf.random.uniform((4, 128, 3)) coords = tf.random.uniform((15,), maxval=128, dtype=tf.int32) assert_equal_tf( tf.gather(x, coords, axis=1), einx.get_at("a [_] ..., i -> a i ...", x, coords), ) x = tf.random.uniform((4, 128, 3)) coords = tf.random.uniform((15,), maxval=3, dtype=tf.int32) assert_equal_tf( tf.gather(x, coords, axis=2), einx.get_at("a b [_] ..., i -> a b i ...", x, coords), ) x = tf.random.uniform((4, 128, 3)) coords = tf.random.uniform((4, 15), maxval=128, dtype=tf.int32) assert_equal_tf( tf.gather(x, coords, batch_dims=1, axis=1), einx.get_at("a [_] ..., a i -> a i ...", x, coords), ) x = tf.random.uniform((4, 128, 3)) coords = tf.random.uniform((4, 128, 15), maxval=3, dtype=tf.int32) assert_equal_tf( tf.gather(x, coords, batch_dims=2, axis=2), einx.get_at("a b [_] ..., a b i -> a b i ...", x, coords), ) # tf.gather_nd x = tf.random.uniform((4, 128, 3)) coords = tf.cast( tf.random.uniform((3,), dtype=tf.float32) * tf.cast(tf.shape(x), tf.float32), tf.int32 ) assert_equal_tf( tf.gather_nd(x, coords), einx.get_at("[a...], b... [3] -> b...", x, coords), ) assert_equal_tf( x[coords[..., 0], coords[..., 1], coords[..., 2]], einx.get_at("[a...], b... [3] -> b...", x, coords), ) x = tf.random.uniform((4, 128, 3)) coords = tf.cast( tf.random.uniform( ( 15, 3, ), dtype=tf.float32, ) * tf.cast(tf.shape(x), tf.float32), tf.int32, ) assert_equal_tf( tf.gather_nd(x, coords), einx.get_at("[a...], b... [3] -> b...", x, coords), ) x = tf.random.uniform((4, 128, 3)) coords = tf.cast( tf.random.uniform( ( 15, 15, 3, ), dtype=tf.float32, ) * tf.cast(tf.shape(x), tf.float32), tf.int32, ) assert_equal_tf( tf.gather_nd(x, coords), einx.get_at("[a...], b... [3] -> b...", x, coords), ) x = tf.random.uniform((4, 128, 3)) coords = tf.cast( tf.random.uniform( ( 4, 2, ), dtype=tf.float32, ) * tf.cast(tf.shape(x)[1:], tf.float32), tf.int32, ) assert_equal_tf( tf.gather_nd(x, coords, batch_dims=1), einx.get_at("a [...], a [2] -> a", x, coords), ) x = tf.random.uniform((4, 128, 3)) coords = tf.cast( tf.random.uniform( ( 4, 15, 2, ), dtype=tf.float32, ) * tf.cast(tf.shape(x)[1:], tf.float32), tf.int32, ) assert_equal_tf( tf.gather_nd(x, coords, batch_dims=1), einx.get_at("a [...], a b [2] -> a b", x, coords), ) if importlib.util.find_spec("numpy"): import numpy as np def assert_equal_numpy(a, b): assert a.shape == b.shape if a.dtype.kind in "f": assert np.allclose(a, b) else: assert np.all(a == b) def test_compare_numpy(): # np.matmul x = np.random.uniform(size=(2, 3)) y = np.random.uniform(size=(3, 4)) assert_equal_numpy( np.matmul(x, y), einx.dot("a [b], [b] c -> a c", x, y), ) x = np.random.uniform(size=(2,)) y = np.random.uniform(size=(2,)) assert_equal_numpy( np.matmul(x, y), einx.dot("[a], [a] ->", x, y), ) x = np.random.uniform(size=(16, 2, 3)) y = np.random.uniform(size=(16, 3, 4)) assert_equal_numpy( np.matmul(x, y), einx.dot("... a [b], ... [b] c -> ... a c", x, y), ) x = np.random.uniform(size=(2, 3)) y = np.random.uniform(size=(3,)) assert_equal_numpy( np.matmul(x, y), einx.dot("... [b], [b] -> ...", x, y), ) # np.dot x = np.random.uniform(size=(2, 3)) y = np.random.uniform(size=(3,)) assert_equal_numpy( np.dot(x, y), einx.dot("... [b], [b] -> ...", x, y), ) x = np.random.uniform(size=(2,)) y = np.random.uniform(size=(2,)) assert_equal_numpy( np.dot(x, y), einx.dot("[a], [a] ->", x, y), ) x = np.random.uniform(size=(5, 5, 2, 3)) y = np.random.uniform(size=(5, 5, 3, 4)) assert_equal_numpy( np.dot(x, y), einx.dot("x... [b], y... [b] c -> x... y... c", x, y), ) # np.tensordot x = np.random.uniform(size=(2, 3)) y = np.random.uniform(size=(3, 4)) assert_equal_numpy( np.tensordot(x, y, axes=1), einx.dot("a [b], [b] c -> a c", x, y), ) x = np.random.uniform(size=(2, 3, 4)) y = np.random.uniform(size=(5, 4, 6)) assert_equal_numpy( np.tensordot(x, y, axes=([2], [1])), einx.dot("a b [c], d [c] e -> a b d e", x, y), ) # np.inner x = np.random.uniform(size=(2,)) y = np.random.uniform(size=(2,)) assert_equal_numpy( np.inner(x, y), einx.dot("x... [a], y... [a] -> x... y...", x, y), ) x = np.random.uniform(size=(2, 3)) y = np.random.uniform(size=(4, 3)) assert_equal_numpy( np.inner(x, y), einx.dot("x... [a], y... [a] -> x... y...", x, y), ) # np.multiply x = np.random.uniform(size=(2, 3)) y = np.random.uniform(size=(2, 3)) assert_equal_numpy( np.multiply(x, y), einx.multiply("a b, a b -> a b", x, y), ) # np.outer x = np.random.uniform(size=(2,)) y = np.random.uniform(size=(3,)) assert_equal_numpy( np.outer(x, y), einx.multiply("a, b -> a b", x, y), ) # np.kron x = np.random.uniform(size=(2, 3)) y = np.random.uniform(size=(4, 5)) assert_equal_numpy( np.kron(x, y), einx.multiply("a..., b... -> (a b)...", x, y), ) # np.flip x = np.random.uniform(size=(2, 3)) assert_equal_numpy( np.flip(x, axis=0), einx.flip("[a] b", x), ) x = np.random.uniform(size=(2, 3)) assert_equal_numpy( np.flip(x, axis=1), einx.flip("a [b]", x), ) # np.fliplr x = np.random.uniform(size=(2, 3)) assert_equal_numpy( np.fliplr(x), einx.flip("a [b]", x), ) # np.flipud x = np.random.uniform(size=(2, 3)) assert_equal_numpy( np.flipud(x), einx.flip("[a] b", x), ) if importlib.util.find_spec("scipy"): import numpy as np import scipy.linalg def assert_equal_numpy(a, b): assert a.shape == b.shape if a.dtype.kind in "f": assert np.allclose(a, b) else: assert np.all(a == b) def test_compare_scipy(): # scipy.linalg.khatri_rao x = np.random.uniform(size=(2, 3)) y = np.random.uniform(size=(5, 3)) assert_equal_numpy( scipy.linalg.khatri_rao(x, y), einx.multiply("a c, b c -> (a b) c", x, y), ) python-einx-0.3.0/test/test_experimental.py000066400000000000000000000041241505216034200210600ustar00rootroot00000000000000import importlib import einx import numpy as np if importlib.util.find_spec("jax"): import jax import jax.numpy as jnp from jax.sharding import Mesh def assert_sharding(x, mesh=None, partition=None): assert {**x.sharding.mesh.shape} == mesh assert tuple(x.sharding.spec) == partition def test_sharding(): mesh24 = Mesh(np.asarray(jax.devices("cpu")).reshape(2, 4), axis_names=("d1", "d2")) mesh42 = Mesh(np.asarray(jax.devices("cpu")).reshape(4, 2), axis_names=("d1", "d2")) mesh4 = Mesh(np.asarray(jax.devices("cpu"))[:4], axis_names=("d1",)) # Pass mesh=jax.devices("cpu") instead of mesh=None since we cannot set # global device to cpu here x = jnp.ones((128, 64)) assert_sharding( einx.experimental.shard("([d1] a) b", x, mesh=jax.devices("cpu")), {"d1": 8}, ("d1",) ) assert_sharding( einx.experimental.shard("([d1] a) ([d2] b)", x, d2=2, mesh=jax.devices("cpu")), {"d1": 4, "d2": 2}, ("d1", "d2"), ) assert_sharding( einx.experimental.shard("([batch] _) ...", x, d2=2, mesh=jax.devices("cpu")), {"batch": 8}, ("batch",), ) assert_sharding( einx.experimental.shard("([d1] a) ([d2] b)", x, mesh=mesh24), {"d1": 2, "d2": 4}, ("d1", "d2"), ) assert_sharding(einx.experimental.shard("([d1] a) b", x, mesh=mesh4), {"d1": 4}, ("d1",)) assert_sharding( einx.experimental.shard("b ([d1] a)", x, mesh=mesh4), {"d1": 4}, ( None, "d1", ), ) assert_sharding( einx.experimental.shard("a ([d1] b)", x, mesh=mesh42), {"d1": 4, "d2": 2}, ( None, "d1", ), ) x = jnp.ones((4, 1024, 1024)) assert_sharding( einx.experimental.shard("a ([d2] b) ([d1] c)", x, mesh=mesh42), {"d1": 4, "d2": 2}, (None, "d2", "d1"), ) python-einx-0.3.0/test/test_nn.py000066400000000000000000000245061505216034200170040ustar00rootroot00000000000000import einx import importlib import pytest import numpy as np from functools import partial norms = [ ("[b...] c", {}), ("b [s...] (g [c])", {"g": 2}), ("b [s...] c", {}), ("b... [c]", {}), ("b [s...] ([g] c)", {"g": 2}), ] if importlib.util.find_spec("torch"): import torch import einx.nn.torch if "compiler" in dir(torch): compiler = torch.compiler else: import torch._dynamo as compiler def test_torch_linear(): compiler.reset() x = torch.zeros((4, 128, 128, 3)) layer = einx.nn.torch.Linear("b... [c1->c2]", c2=32) assert layer.forward(x).shape == (4, 128, 128, 32) layer = torch.compile(layer) assert layer.forward(x).shape == (4, 128, 128, 32) @pytest.mark.parametrize("expr_kwargs", norms) @pytest.mark.parametrize("mean", [True, False]) @pytest.mark.parametrize("scale", [True, False]) @pytest.mark.parametrize("decay_rate", [None, 0.9]) def test_torch_norm(expr_kwargs, mean, scale, decay_rate): compiler.reset() expr, kwargs = expr_kwargs x = torch.zeros((4, 128, 128, 32)) layer = einx.nn.torch.Norm(expr, mean=mean, scale=scale, decay_rate=decay_rate, **kwargs) layer.train() assert layer.forward(x).shape == (4, 128, 128, 32) layer.eval() assert layer.forward(x).shape == (4, 128, 128, 32) layer = torch.compile(layer, fullgraph=True) layer.train() assert layer.forward(x).shape == (4, 128, 128, 32) layer.eval() assert layer.forward(x).shape == (4, 128, 128, 32) def test_torch_dropout(): compiler.reset() x = torch.zeros((4, 128, 128, 3)) layer = einx.nn.torch.Dropout("[b] ... [c]", drop_rate=0.2) layer.train() assert layer.forward(x).shape == (4, 128, 128, 3) layer = torch.compile(layer) assert layer.forward(x).shape == (4, 128, 128, 3) layer = einx.nn.torch.Dropout("[b] ... [c]", drop_rate=0.2) layer.eval() assert layer.forward(x).shape == (4, 128, 128, 3) layer = torch.compile(layer) assert layer.forward(x).shape == (4, 128, 128, 3) if importlib.util.find_spec("haiku"): import haiku as hk import jax.numpy as jnp import jax import einx.nn.haiku def test_haiku_linear(): x = jnp.zeros((4, 128, 128, 3)) rng = jax.random.PRNGKey(42) def model(x): return einx.nn.haiku.Linear("b... [c1->c2]", c2=32)(x) model = hk.transform_with_state(model) params, state = model.init(rng=rng, x=x) y, state = jax.jit(model.apply)(params=params, state=state, x=x, rng=rng) assert y.shape == (4, 128, 128, 32) @pytest.mark.parametrize("expr_kwargs", norms) @pytest.mark.parametrize("mean", [True, False]) @pytest.mark.parametrize("scale", [True, False]) @pytest.mark.parametrize("decay_rate", [None, 0.9]) def test_haiku_norm(expr_kwargs, mean, scale, decay_rate): expr, kwargs = expr_kwargs x = jnp.zeros((4, 128, 128, 32)) rng = jax.random.PRNGKey(42) def model(x, training): return einx.nn.haiku.Norm( expr, mean=mean, scale=scale, decay_rate=decay_rate, **kwargs )(x, training) model = hk.transform_with_state(model) params, state = model.init(rng=rng, x=x, training=True) y, state = jax.jit(partial(model.apply, training=False))( params=params, state=state, x=x, rng=rng ) assert y.shape == (4, 128, 128, 32) y, state = jax.jit(partial(model.apply, training=True))( params=params, state=state, x=x, rng=rng ) assert y.shape == (4, 128, 128, 32) def test_haiku_dropout(): x = jnp.zeros((4, 128, 128, 3)) rng = jax.random.PRNGKey(42) def model(x, training): return einx.nn.haiku.Dropout("[b] ... [c]", drop_rate=0.2)(x, training=training) model = hk.transform_with_state(model) params, state = model.init(rng=rng, x=x, training=True) y, state = jax.jit(partial(model.apply, training=True))( params=params, state=state, x=x, rng=rng ) assert y.shape == (4, 128, 128, 3) y, state = jax.jit(partial(model.apply, training=False))( params=params, state=state, x=x, rng=rng ) assert y.shape == (4, 128, 128, 3) if importlib.util.find_spec("flax"): import flax.linen as nn import jax.numpy as jnp import jax import flax import einx.nn.flax def test_flax_linear(): x = jnp.zeros((4, 128, 128, 3)) rng = jax.random.PRNGKey(0) model = einx.nn.flax.Linear("b... [c1->c2]", c2=32) params = model.init(rng, x) y = jax.jit(model.apply)(params, x=x) assert y.shape == (4, 128, 128, 32) @pytest.mark.parametrize("expr_kwargs", norms) @pytest.mark.parametrize("mean", [True, False]) @pytest.mark.parametrize("scale", [True, False]) @pytest.mark.parametrize("decay_rate", [None, 0.9]) def test_flax_norm(expr_kwargs, mean, scale, decay_rate): expr, kwargs = expr_kwargs x = jnp.zeros((4, 128, 128, 32)) rng = jax.random.PRNGKey(42) model = einx.nn.flax.Norm(expr, mean=mean, scale=scale, decay_rate=decay_rate, **kwargs) params = model.init(rng, x, training=True) state, params = flax.core.pop(params, "params") y, state = jax.jit(partial(model.apply, training=False, mutable=list(state.keys())))( {"params": params, **state}, x=x ) assert y.shape == (4, 128, 128, 32) y, state = jax.jit(partial(model.apply, training=True, mutable=list(state.keys())))( {"params": params, **state}, x=x ) assert y.shape == (4, 128, 128, 32) def test_flax_dropout(): x = jnp.zeros((4, 128, 128, 3)) rng = jax.random.PRNGKey(0) model = einx.nn.flax.Dropout("[b] ... [c]", drop_rate=0.2) params = model.init({"params": rng, "dropout": rng}, x, training=True) y = jax.jit(partial(model.apply, training=True))(params, x=x, rngs={"dropout": rng}) assert y.shape == (4, 128, 128, 3) y = jax.jit(partial(model.apply, training=False))(params, x=x, rngs={"dropout": rng}) assert y.shape == (4, 128, 128, 3) if importlib.util.find_spec("equinox"): import equinox as eqx import jax.numpy as jnp import einx.nn.equinox import jax def test_equinox_linear(): x = jnp.zeros((4, 128, 128, 3)) rng = jax.random.PRNGKey(0) layer = einx.nn.equinox.Linear("b... [c1->c2]", c2=32) assert layer(x, rng=rng).shape == (4, 128, 128, 32) assert layer(x).shape == (4, 128, 128, 32) layer = eqx.nn.inference_mode(layer) assert layer(x).shape == (4, 128, 128, 32) assert layer(x).shape == (4, 128, 128, 32) @pytest.mark.parametrize("expr_kwargs", norms) @pytest.mark.parametrize("mean", [True, False]) @pytest.mark.parametrize("scale", [True, False]) @pytest.mark.parametrize("decay_rate", [None]) def test_equinox_norm(expr_kwargs, mean, scale, decay_rate): expr, kwargs = expr_kwargs x = jnp.zeros((4, 128, 128, 32)) for expr, kwargs in norms: for mean in [True, False]: for scale in [True, False]: for decay_rate in [ None ]: # Stateful layers are currently not supported for Equinox layer = einx.nn.equinox.Norm( expr, mean=mean, scale=scale, decay_rate=decay_rate, **kwargs ) assert layer(x).shape == (4, 128, 128, 32) assert layer(x).shape == (4, 128, 128, 32) layer = eqx.nn.inference_mode(layer) assert layer(x).shape == (4, 128, 128, 32) assert layer(x).shape == (4, 128, 128, 32) def test_equinox_dropout(): x = jnp.zeros((4, 128, 128, 3)) rng = jax.random.PRNGKey(0) layer = einx.nn.equinox.Dropout("[b] ... [c]", drop_rate=0.2) assert layer(x, rng=rng).shape == (4, 128, 128, 3) assert layer(x, rng=rng).shape == (4, 128, 128, 3) layer = eqx.nn.inference_mode(layer) assert layer(x, rng=rng).shape == (4, 128, 128, 3) assert layer(x, rng=rng).shape == (4, 128, 128, 3) if importlib.util.find_spec("keras"): import keras version = tuple(int(i) for i in keras.__version__.split(".")[:2]) if version >= (3, 0): import tensorflow as tf import einx.nn.keras def test_keras_linear(): x = tf.zeros((4, 128, 128, 3)) layer = einx.nn.keras.Linear("b... [c1->c2]", c2=32) model = keras.Sequential([layer]) assert model(x, training=True).shape == (4, 128, 128, 32) assert model(x, training=True).shape == (4, 128, 128, 32) assert model(x, training=False).shape == (4, 128, 128, 32) assert model(x, training=False).shape == (4, 128, 128, 32) @pytest.mark.parametrize("expr_kwargs", norms) @pytest.mark.parametrize("mean", [True, False]) @pytest.mark.parametrize("scale", [True, False]) @pytest.mark.parametrize("decay_rate", [None, 0.9]) def test_keras_norm(expr_kwargs, mean, scale, decay_rate): expr, kwargs = expr_kwargs x = tf.zeros((4, 128, 128, 32)) layer = einx.nn.keras.Norm( expr, mean=mean, scale=scale, decay_rate=decay_rate, **kwargs ) model = keras.Sequential([layer]) assert model(x, training=True).shape == (4, 128, 128, 32) assert model(x, training=True).shape == (4, 128, 128, 32) assert model(x, training=False).shape == (4, 128, 128, 32) assert model(x, training=False).shape == (4, 128, 128, 32) def test_keras_dropout(): x = tf.zeros((4, 128, 128, 3)) layer = einx.nn.keras.Dropout("[b] ... [c]", drop_rate=0.2) model = keras.Sequential([layer]) assert model(x, training=True).shape == (4, 128, 128, 3) assert model(x, training=True).shape == (4, 128, 128, 3) assert model(x, training=False).shape == (4, 128, 128, 3) assert model(x, training=False).shape == (4, 128, 128, 3) python-einx-0.3.0/test/test_shapes.py000066400000000000000000000755201505216034200176560ustar00rootroot00000000000000import einx import pytest import numpy as np import conftest @pytest.mark.parametrize("test", conftest.tests) def test_shape_rearrange(test): einx, backend, setup = test x = setup.full((10, 20, 1)) assert einx.rearrange("a b c -> (a b) c 1", x).shape == (200, 1, 1) assert einx.rearrange("a b c -> (a b) c 1", x).shape == (200, 1, 1) assert einx.rearrange("a b c -> (a b) c 1 1 1", x).shape == (200, 1, 1, 1, 1) with pytest.raises(Exception): einx.rearrange("a a b c -> (a b) c 1", x) einx.rearrange("a (a + b) c -> (a b) c 1", x) x = setup.full((10, 20, 20, 2)) assert einx.rearrange("b s... c -> b (s...) c", x).shape == (10, 400, 2) assert einx.rearrange("b ... c -> b (...) c", x).shape == (10, 400, 2) assert einx.rearrange("b (s...) (r...) c -> b (s...) r... c", x, r=(10, 2)).shape == ( 10, 20, 10, 2, 2, ) assert einx.rearrange("b s... c x... -> x... b (s...) c", x, x=()).shape == (10, 400, 2) if backend.name != "torch": assert einx.rearrange("1 -> (x)", [1], x=10, backend=backend).shape == (10,) assert einx.rearrange("1 -> (x y)", [1], x=10, y=20, backend=backend).shape == (200,) assert einx.rearrange("1 -> (x)", setup.to_tensor([1]), x=10).shape == (10,) assert einx.rearrange("1 -> (x y)", setup.to_tensor([1]), x=10, y=20).shape == (200,) x = setup.full((1,)) assert einx.rearrange("1 -> (x)", x, x=10).shape == (10,) assert einx.rearrange("1 -> (x y)", x, x=10, y=20).shape == (200,) x = setup.full((10, 20, 1)) assert einx.rearrange("a b c d... -> a b c (d...)", x).shape == (10, 20, 1, 1) x = setup.full((10, 20, 1, 2)) assert einx.rearrange("a (b...) c d -> a (b... c) d", x).shape == (10, 20, 2) x = setup.full((10, 20, 1, 2, 3)) assert einx.rearrange("a (b... c) d... e -> a (b...) (c d...) e", x, b=[2, 5]).shape == ( 10, 10, 4, 3, ) x = setup.full((10, 20, 6, 24)) assert einx.rearrange("a b (c...) (d...) -> a c... b d...", x, c=[2, 3], d=[4, 6]).shape == ( 10, 2, 3, 20, 4, 6, ) x = setup.full((10, 10)) assert einx.rearrange("a... -> 1 (a...)", x).shape == (1, 100) x = setup.full((10, 20, 5)) assert einx.rearrange("(s1...) (s2...) h -> 1 h (s1...) (s2...)", x).shape == (1, 5, 10, 20) x = setup.full((10, 20)) with pytest.raises(Exception): assert einx.rearrange("(s1...) (s2...) h -> 1 h (s1...) (s2...)", x).shape == (1, 5, 10, 20) x = setup.full((10, 20, 1)) with pytest.raises(Exception): einx.rearrange("a b c -> (a b) c d", x) x = setup.full((10, 20, 1)) with pytest.raises(Exception): einx.rearrange("a b... c... -> a (b...) c...", x) with pytest.raises(Exception): einx.rearrange("a b... -> a b", x) x = setup.full((1, 10, 20, 6)) assert einx.rearrange("a (b...) (e f...) (d c) -> a d (b...) (e f...) c", x, d=2).shape == ( 1, 2, 10, 20, 3, ) x = setup.full((1, 10, 20, 6, 7, 12)) assert einx.rearrange( "a b c d... (e f...) -> a b c d... ((e 2 2) f...)", x, f=[2, 2] ).shape == ( 1, 10, 20, 6, 7, 12 * 2 * 2, ) x = setup.full((10, 20, 3)) assert einx.rearrange("(s s2)... c -> s... s2... c", x, s2=(2, 2)).shape == (5, 10, 2, 2, 3) assert einx.rearrange("(s s2)... c -> s... s2... c", x, s2=2).shape == (5, 10, 2, 2, 3) x = setup.full((10, 10, 10)) assert einx.rearrange("(a b) (c d) (e f) -> a (b c d e) f", x, a=2, f=2).shape == (2, 250, 2) x = setup.full((10,)) y = setup.full((20,)) assert einx.rearrange("a, b -> a + b", x, y).shape == (30,) assert einx.rearrange("a, b -> b + a", x, y).shape == (30,) assert einx.rearrange("a, b -> a b (1 + 1)", x, y).shape == (10, 20, 2) assert [x.shape for x in einx.rearrange("(a + b) -> a, b 1", x, a=4)] == [(4,), (6, 1)] with pytest.raises(Exception): einx.rearrange("a, b -> a b (1 + 1)", x) einx.rearrange("(a + b) -> a b (1 + 1)", x) assert einx.rearrange("a, (b c) -> c (b + a)", x, y, c=2).shape == (2, 20) with pytest.raises(Exception): assert einx.rearrange("a, -> (a +)", x, 1).shape == (11,) x = setup.full((10, 10)) assert einx.rearrange("b c, 1 -> b (c + 1)", x, [42]).shape == (10, 11) assert einx.rearrange("b c, -> b (c + 1)", x, 42).shape == (10, 11) s = setup.full(()) assert einx.rearrange("b c, -> b (c + 1)", x, s).shape == (10, 11) assert einx.arange("c", c=2, backend=backend).shape == (2,) assert einx.arange("c... [2]", c=(4, 3), backend=backend).shape == (4, 3, 2) assert einx.arange("c... [l]", c=(4, 3), backend=backend).shape == (4, 3, 2) with pytest.raises(Exception): einx.arange("c... [3]", c=(4, 3), backend=backend) assert einx.arange("c1 c2 -> [l] c2 c1", c1=4, c2=3, backend=backend).shape == (2, 3, 4) assert einx.arange("(c...) [2]", c=(4, 3), backend=backend).shape == (4 * 3, 2) assert einx.arange("(c... [l])", c=(4, 3), backend=backend).shape == (4 * 3 * 2,) assert einx.arange("c1 c2 -> ([l] c2) c1", c1=4, c2=3, backend=backend).shape == (2 * 3, 4) x = setup.full((10, 20), dtype="bool") y = setup.full((4, 10, 20, 3)) x, y = einx.rearrange("h w, b h w c -> 1 h w 1, b h w c", x, y) x = np.zeros((5, 4)) x = einx.rearrange("(a + b + c) d -> b d, (a + c) d", x, a=1, b=2) assert x[0].shape == (2, 4) assert x[1].shape == (3, 4) x = np.zeros((5, 4)) x = einx.rearrange("(a + b + c) d -> (a + c) d, b d", x, a=1, b=2) assert x[0].shape == (3, 4) assert x[1].shape == (2, 4) @pytest.mark.parametrize("test", conftest.tests) def test_shape_dot(test): einx, backend, setup = test if backend.name == "mlx": pytest.xfail(reason="Backend does not support einsum") x = setup.full((10, 10)) assert einx.dot("a..., a... -> 1", x, x).shape == (1,) assert einx.dot("[a...], [a...] -> 1", x, x).shape == (1,) with pytest.raises(Exception): einx.dot("a..., [a]... -> 1", x, x) x = setup.full((10, 20, 1)) y = setup.full((10, 24)) assert einx.dot("a b c, a d -> 1 b c d", x, y).shape == (1, 20, 1, 24) assert einx.dot("[a] b c, [a] d -> 1 b c d", x, y).shape == (1, 20, 1, 24) assert einx.dot("a b c, a d -> 1 b c d", x, setup.full, d=24).shape == (1, 20, 1, 24) x = setup.full((10, 20, 1)) with pytest.raises(Exception): einx.dot("a b c -> a b c", x, x) with pytest.raises(Exception): einx.dot("a b c, a -> a b c", x) x = setup.full((10, 20)) y = setup.full((20, 30)) assert einx.dot("a [b] -> a [c]", x, y).shape == (10, 30) assert einx.dot("a b, b c -> a c", x, y).shape == (10, 30) assert einx.dot("a [b], [b] c -> a c", x, y).shape == (10, 30) assert einx.dot("a [b->c]", x, y).shape == (10, 30) assert einx.dot("a [b...->c]", x, y).shape == (10, 30) x = setup.full((10, 20)) y = setup.full((10, 20, 30)) assert einx.dot("a b, a b c -> a c", x, y).shape == (10, 30) assert einx.dot("[a b] -> [a c]", x, y).shape == (10, 30) assert einx.dot("[a b->a c]", x, y).shape == (10, 30) x = setup.full((10,)) y = setup.full((30,)) assert einx.dot("a, a ->", x, x).shape == () assert einx.dot("[a->]", x, x).shape == () assert einx.dot("a, c -> a c", x, y).shape == (10, 30) assert einx.dot("a [->c]", x, y).shape == (10, 30) assert einx.dot("a [b...->c]", x, y).shape == (10, 30) x = setup.full((4, 128, 128, 16)) assert einx.dot("b s... [c1->c2]", x, setup.full, c2=32).shape == (4, 128, 128, 32) assert einx.dot("b [s...->s2] c", x, setup.full, s2=32).shape == (4, 32, 16) w = setup.full((2, 2, 16, 32)) assert einx.dot("b (s [s2->])... [c1->c2]", x, w, s2=2, c2=32).shape == (4, 64, 64, 32) x = setup.full((4, 16, 16, 16)) def w(shape): return setup.full(shape) assert einx.dot("b [(s s2)->s]... [c1->c2]", x, w, s2=4, c2=4).shape == (4, 4, 4, 4) assert einx.dot("b (s [s2->])... [c1->c2]", x, w, s2=4, c2=4).shape == (4, 4, 4, 4) s = setup.full(()) x = setup.full((10, 10)) y = setup.full((10,)) assert einx.dot("[->]", s, s, backend=backend).shape == () assert einx.dot("a [->]", y, s).shape == (10,) if backend.name not in {"torch"}: assert einx.dot("[->]", 1, 1, backend=backend).shape == () assert einx.dot("a [->]", y, 1).shape == (10,) assert einx.dot("a [b->]", x, y).shape == (10,) assert einx.dot("a [->b]", y, y).shape == (10, 10) x = setup.full((11, 10)) y = setup.full((11,)) assert einx.dot("... b, ... -> b", x, y).shape == (10,) assert einx.dot("[...] b -> b", x, y).shape == (10,) x = setup.full((10,)) y = setup.full(()) assert einx.dot("... b, ... -> b", x, y).shape == (10,) assert einx.dot("[...] b -> b", x, y).shape == (10,) k = setup.full((2, 4, 100)) v = setup.full((2, 4, 100)) with pytest.raises(Exception): einx.dot("b t (h ck), b t (h cv) -> b h ck cv", k, v, h=32, graph=True) @pytest.mark.parametrize("test", conftest.tests) def test_shape_reduce(test): einx, backend, setup = test x = setup.full((10, 10)) assert einx.reduce("a b -> 1 a", x, op=backend.mean).shape == (1, 10) op = lambda tensor, axis: einx.jit(lambda tensor, backend: backend.mean(tensor, axis))(tensor) assert einx.reduce("a b -> 1 a", x, op=op).shape == ( 1, 10, ) assert einx.mean("a b -> 1 a", x).shape == (1, 10) assert einx.mean("[a] b", x).shape == (10,) assert einx.mean("[a] b -> 1 b", x).shape == (1, 10) x = setup.full((10, 10, 10)) with pytest.raises(Exception): einx.sum("a [b] c -> a b", x) x = setup.full((10, 3, 1)) assert einx.mean("(a [b]) c 1", x, b=2).shape == (5, 3, 1) assert einx.mean("([a b]) c 1", x).shape == (1, 3, 1) assert einx.mean("[(a b)] c 1", x).shape == (3, 1) assert einx.mean("[(a...)] c 1", x).shape == (3, 1) assert einx.mean("(b... [a...]) c 1", x, b=(1, 1)).shape == (1, 3, 1) x = setup.full((1, 10, 3, 2)) assert einx.mean("1 [a...] b", x).shape == (1, 2) assert einx.mean("1 [a]... b", x).shape == (1, 2) assert einx.mean("1 ([a])... b", x).shape == (1, 1, 1, 2) assert einx.mean("1 [a]... b", x, keepdims=True).shape == (1, 1, 1, 2) assert einx.mean("1 [a...] b", x, keepdims=True).shape == (1, 1, 2) x = setup.full((16, 1, 20, 30, 64)) assert einx.mean("(b rg) pv [s...] c", x).shape == (16, 1, 64) x = setup.full((16, 16, 32)) bias = setup.full((4,)) assert einx.add("b... (g [c])", x, bias).shape == (16, 16, 32) assert einx.logsumexp("a [...]", x).shape == (16,) if backend.name != "torch": assert einx.logsumexp("[a]", [0.0, 1.0], backend=backend).shape == () assert einx.logsumexp("[a] 1", [[0.0], [1.0]], backend=backend).shape == (1,) assert einx.logsumexp("[a]", [0.0] * 10, backend=backend).shape == () with pytest.raises(Exception): einx.logsumexp("a", [0.0, [1.0]], backend=backend) x = setup.full((16, 15)) assert einx.sum("[b] a []", x).shape == (15,) assert einx.sum("[b] a [...]", x).shape == (15,) assert einx.sum("b [p] -> b p2", x, p2=7).shape == (16, 7) @pytest.mark.parametrize("test", conftest.tests) def test_shape_elementwise(test): einx, backend, setup = test x = setup.full((10, 5, 1)) y = setup.full((13,)) assert einx.elementwise("a b 1, l -> b l a 1", x, y, op=backend.add).shape == (5, 13, 10, 1) assert einx.elementwise("a b 1, l -> b l a 1", x, y, op=lambda x, y: x + y).shape == ( 5, 13, 10, 1, ) assert einx.add("a b 1, l -> b l a 1", x, y).shape == (5, 13, 10, 1) assert einx.add("a b 1, l -> a b l", x, y).shape == (10, 5, 13) x = setup.full((10, 10)) y = setup.full((10,)) assert einx.add("a, a b", y, x).shape == (10, 10) assert einx.add("a b, a", x, y).shape == (10, 10) assert einx.add("a b, b", x, y).shape == (10, 10) assert einx.add("a [b]", x, y).shape == (10, 10) assert einx.add("a b, a b", x, x).shape == (10, 10) assert einx.add("a b, ", x, 1).shape == (10, 10) assert einx.add(", a b", 1, x).shape == (10, 10) assert einx.add("a b, 1", x, [1]).shape == (10, 10) assert einx.add("1, a b", [1], x).shape == (10, 10) with pytest.raises(Exception): einx.add("a a, a -> a a", x, y) assert einx.add("a b, a b", x, setup.full).shape == (10, 10) assert einx.add("a, a", y, y).shape == (10,) assert einx.add("[a]", y, y).shape == (10,) assert einx.add("b, -> b 3", y, 1).shape == (10, 3) x = setup.full((2, 3)) y = setup.full((10,)) with pytest.raises(Exception): einx.add("a b, c", x, y) x = setup.full((3, 128, 196, 64)) y = setup.full((3, 4, 16)) assert einx.add("b h w (g c), b (g) c -> b h w (g c)", x, y).shape == (3, 128, 196, 64) x = setup.full((10, 20)) y = setup.full((10, 20, 30)) assert einx.add("a b, a b c -> a b c", x, y).shape == (10, 20, 30) assert einx.add("(a [1])...", x, setup.full).shape == (10, 20) x = setup.full((10, 20)) y = setup.full((30, 20)) with pytest.raises(Exception): einx.subtract("ba c, i c -> i ba", x, y) ops = [ ("add", ("float32", "float32")), ("subtract", ("float32", "float32")), ("multiply", ("float32", "float32")), ("true_divide", ("float32", "float32")), ("floor_divide", ("float32", "float32")), ("divide", ("float32", "float32")), ("logical_and", ("bool", "bool")), ("logical_or", ("bool", "bool")), ("where", ("bool", "float32", "float32")), ("less", ("float32", "float32")), ("less_equal", ("float32", "float32")), ("greater", ("float32", "float32")), ("greater_equal", ("float32", "float32")), ("equal", ("float32", "float32")), ("not_equal", ("float32", "float32")), ("maximum", ("float32", "float32")), ("minimum", ("float32", "float32")), ] def create_scalar(dtype): if dtype == "float32": return 1.0 elif dtype == "bool": return True else: assert False for op, dtypes in ops: tensor_args = [setup.full((10,), dtype=dtype, value=1) for dtype in dtypes] scalar_args = [create_scalar(dtype) for dtype in dtypes] for scalar_index in range(len(dtypes)): args = [ scalar_args[scalar_index] if i == scalar_index else tensor_args[i] for i in range(len(dtypes)) ] expr = ( ", ".join(["" if i == scalar_index else "a" for i in range(len(dtypes))]) + " -> a" ) assert getattr(einx, op)(expr, *args).shape == (10,) @pytest.mark.parametrize("test", conftest.tests) def test_shape_vmap(test): einx, backend, setup = test if backend.name in {"mlx", "dask", "tinygrad"}: pytest.xfail(reason="Backend does not fully support vmap") x = setup.full((13,)) assert einx.vmap("b -> b [3]", x, op=lambda x: x + setup.full((3,))).shape == (13, 3) with pytest.raises(Exception): einx.vmap("b -> [b] 3", x, op=lambda x: x + setup.full((3,))) with pytest.raises(Exception): einx.vmap("b -> b 3", x, op=einx.trace(lambda x: x + setup.full((3,)))) with pytest.raises(Exception): einx.vmap("b -> b 3", x, op=lambda x: x + setup.full((3,))) x = setup.full((4, 13, 2)) y = setup.full((13, 4, 5, 5)) def f(x, y): assert x.shape == (4, 2) assert y.shape == (4, 5) x = x[:, 0] + y[:, 0] return einx.rearrange("a -> a 15", x) assert einx.vmap("[a] b [e], b [a] c [d] -> [a] b [g] c", x, y, op=f, g=15).shape == ( 4, 13, 15, 5, ) assert einx.vmap("[a] b [e], b [a] c [d] -> [a] b ([g] c)", x, y, op=f, g=15).shape == ( 4, 13, 15 * 5, ) with pytest.raises(Exception): einx.vmap("[a] b [e], b [a] c [d] -> [g] b [a] c", x, y, op=f, g=15) with pytest.raises(Exception): def f(x, y): assert x.shape == (4, 2) assert y.shape == (4, 5) x = x[:, 0] + y[:, 0] return einx.rearrange("a -> a 16", x) einx.vmap("[a] b [e], b [a] c [d] -> [a] b [g] c", x, y, op=f, g=15) x = setup.full((4, 16)) y = setup.full((16, 32)) op = lambda x, y: einx.jit(lambda x, y, backend: backend.sum(x * y))(x, y) assert einx.vmap("b [c1], [c1] c2 -> b c2", x, y, op=op).shape == ( 4, 32, ) x = setup.full((4,)) y = setup.full((16, 32)) assert einx.vmap("a, b c -> a b c", x, y, op=backend.add).shape == (4, 16, 32) func = lambda x: einx.jit(lambda x, backend: backend.stack([backend.mean(x), backend.max(x)]))( x ) # c -> 2 x = setup.full( ( 16, 64, 3, ), ) assert einx.vmap("b [c] a -> a b [2]", x, op=func).shape == (3, 16, 2) func = lambda x, y: einx.jit( lambda x, y, backend: backend.stack([backend.mean(x), backend.max(y)]) )(x, y) # c, d -> 2 x = setup.full((16, 64)) # b c y = setup.full((16, 72)) # b d assert einx.vmap("b [c], b [d] -> b [2]", x, y, op=func).shape == (16, 2) x = setup.full((16, 64, 3)) # b1 c b2 y = setup.full((3, 72)) # b2 d assert einx.vmap("b1 [c] b2, b2 [d] -> b2 [2] b1", x, y, op=func).shape == (3, 2, 16) @einx.trace def func(x): # (c d) -> 2 x = einx.vmap("([c] d) -> d", x, op=backend.mean, c=16) x = backend.max(x) return backend.stack([x, x]) x = setup.full((16, 64)) # b c assert einx.vmap("b ([c d]) -> b [2]", x, op=func, c=16).shape == (16, 2) assert einx.vmap("b ([c d]) -> b [2] 1", x, op=func, c=16).shape == (16, 2, 1) assert einx.vmap("b [(c d)->2]", x, op=func, c=16).shape == (16, 2) assert einx.vmap("b ([c d->2])", x, op=func, c=16).shape == (16, 2) with pytest.raises(Exception): einx.vmap("b ([c d]) -> [2]", x, op=func, c=16) @einx.trace def func(x): # c d -> 2 x = einx.vmap("[c] d -> d", x, op=backend.mean, c=16) x = backend.max(x) return backend.stack([x, x]) x = setup.full((16, 64)) # b c assert einx.vmap("b ([c d]) -> b [2]", x, op=func, c=16, flat=True).shape == (16, 2) assert einx.vmap("b ([c d]) -> b [2] 1", x, op=func, c=16, flat=True).shape == (16, 2, 1) assert einx.vmap("b [(c d)->2]", x, op=func, c=16, flat=True).shape == (16, 2) assert einx.vmap("b ([c d->2])", x, op=func, c=16, flat=True).shape == (16, 2) with pytest.raises(Exception): einx.vmap("b ([c d]) -> [2]", x, op=func, c=16, flat=True) op = lambda tensor, axis: einx.jit( lambda tensor, backend: backend.roll(tensor, axis=axis, shift=(2, 2)) )(tensor) with pytest.raises(Exception): einx.vmap_with_axis("a ([b c]) -> a ([b c])", x, op=op) assert einx.vmap_with_axis( "a ([b c]) -> a ([b c])", x, op=op, b=2, ).shape == ( 16, 64, ) @pytest.mark.parametrize("test", conftest.tests) def test_shape_index(test): einx, backend, setup = test if backend.name in {"mlx", "dask", "tinygrad"}: pytest.xfail(reason="Backend does not fully support vmap") coord_dtype = "int32" if backend.name != "torch" else "long" x = setup.full((4, 16, 16, 3)) y = setup.full((4, 128, 2), dtype=coord_dtype) y2 = setup.full((128, 4, 2), dtype=coord_dtype) z = setup.full((4, 128, 3)) assert einx.get_at("b [h w] c, b p [2] -> b p c", x, y).shape == (4, 128, 3) assert einx.get_at("b [h w] c, p b [2] -> b p c", x, y2).shape == (4, 128, 3) assert einx.get_at("b [h w] c, b p, b p -> b p c", x, y[..., 0], y[..., 1]).shape == (4, 128, 3) assert einx.get_at("b [h w] c, b (p [1]), b p -> b p c", x, y[..., 0], y[..., 1]).shape == ( 4, 128, 3, ) assert einx.get_at("b [h w] c, b p, p b -> b p c", x, y[..., 0], y2[..., 1]).shape == ( 4, 128, 3, ) assert einx.get_at("b [h w] c, p, p b -> b p c", x, y[0, ..., 0], y2[..., 1]).shape == ( 4, 128, 3, ) assert einx.get_at("b [h w] c, b (p [1]), p b -> b p c", x, y[..., 0], y2[..., 1]).shape == ( 4, 128, 3, ) assert einx.get_at( "b [h w] c, b p [2] -> b p c", x, lambda shape: setup.full(shape, dtype=coord_dtype, value=0), p=128, ).shape == (4, 128, 3) assert einx.get_at( "b [h w] c, b p [l] -> b p c", x, lambda shape: setup.full(shape, dtype=coord_dtype, value=0), p=128, ).shape == (4, 128, 3) assert einx.get_at("b [16 w] c, b p [2] -> b p c", x, y).shape == (4, 128, 3) assert einx.get_at("b [16 16] c, b p [2] -> b p c", x, y).shape == (4, 128, 3) assert einx.get_at("b [h w] c, p [2] -> b p c", x, y[0]).shape == (4, 128, 3) for op in [einx.set_at, einx.add_at, einx.subtract_at]: assert op("b [h w] c, b p [2], b p c -> b [h w] c", x, y, z).shape == (4, 16, 16, 3) assert op("b [h w] c, b p [2], b p c", x, y, z).shape == (4, 16, 16, 3) assert op("b [h w] c, b p, b p, b p c -> b [h w] c", x, y[..., 0], y[..., 1], z).shape == ( 4, 16, 16, 3, ) assert op("b [h w] c, b p, p b, b p c -> b [h w] c", x, y[..., 0], y2[..., 1], z).shape == ( 4, 16, 16, 3, ) assert op( "b [h w] c, b p, p b, p c -> b [h w] c", x, y[..., 0], y2[..., 1], z[0] ).shape == (4, 16, 16, 3) assert op( "b [h w] c, b p, p b, c -> b [h w] c", x, y[..., 0], y2[..., 1], z[0, 0] ).shape == (4, 16, 16, 3) assert op("b [h w] c, p [2], p c -> b [h w] c", x, y[0], z[0]).shape == (4, 16, 16, 3) assert op("b [h w] c, b p [2], b p c -> b h w c", x, y, z).shape == (4, 16, 16, 3) assert op("b [h w] c, b p [2], p c -> b h w c", x, y, z[0]).shape == (4, 16, 16, 3) assert op("b [h w] c, p [2], b p c -> b h w c", x, y[0], z).shape == (4, 16, 16, 3) assert op("b [h w] c, p [2], p c -> b h w c", x, y[0], z[0]).shape == (4, 16, 16, 3) x = setup.full((16, 4, 3, 16)) y = setup.full((2, 4, 128), dtype=coord_dtype, value=0) z = setup.full((3, 4, 128)) assert einx.get_at("[w] b c [h], [2] b p -> b p c", x, y).shape == (4, 128, 3) assert einx.get_at("[w] b c [h], [2] p -> b p c", x, y[:, 0]).shape == (4, 128, 3) for op in [einx.set_at, einx.add_at, einx.subtract_at]: assert op("[w] b c [h], [2] b p, c b p -> b [w h] c", x, y, z).shape == (4, 16, 16, 3) assert op("[w] b c [h], [2] p, c p -> b [w h] c", x, y[:, 0], z[:, 0]).shape == ( 4, 16, 16, 3, ) x = setup.full((16, 4, 3 * 16)) y = setup.full((2, 4, 128), dtype=coord_dtype) z = setup.full((3, 4, 128)) assert einx.get_at("[w] b (c [h]), [2] b p -> b p c", x, y, c=3).shape == (4, 128, 3) assert einx.get_at("[w] b (c [h]), [2] p -> b p c", x, y[:, 0], c=3).shape == (4, 128, 3) for op in [einx.set_at, einx.add_at, einx.subtract_at]: assert op("[w] b (c [h]), [2] b p, c b p -> b ([w h]) c", x, y, z).shape == (4, 256, 3) assert op("[w] b (c [h]), [2] p, c p -> b ([w h]) c", x, y[:, 0], z[:, 0]).shape == ( 4, 256, 3, ) x = setup.full((4, 16, 16, 3)) y = setup.full((4, 3, 4, 5, 2), dtype=coord_dtype) z = setup.full((4, 3, 4, 5, 3)) assert einx.get_at("b [h w] c, b p q r [2] -> b p q r c", x, y).shape == (4, 3, 4, 5, 3) assert einx.get_at("b [h w] c, p q r [2] -> b p q r c", x, y[0]).shape == (4, 3, 4, 5, 3) for op in [einx.set_at, einx.add_at, einx.subtract_at]: assert op("b [h w] c, b p q r [2], b p q r c -> b [h w] c", x, y, z).shape == (4, 16, 16, 3) assert op("b [h w] c, p q r [2], p q r c -> b [h w] c", x, y[0], z[0]).shape == ( 4, 16, 16, 3, ) x = setup.full((4, 1, 1, 3)) y = setup.full((4, 128, 2), dtype=coord_dtype) z = setup.full((4, 128, 3)) with pytest.raises(Exception): einx.get_at("b ([1 1]) c, b p [2] -> b p c", x, y) x = setup.full((4, 5, 6)) y = setup.full((4, 5), dtype=coord_dtype) assert einx.get_at("b t [d], b t -> b t", x, y).shape == (4, 5) assert einx.get_at("... [d], ... -> ...", x, y).shape == (4, 5) assert einx.get_at("b t [d], b (t [1]) -> b (t 1)", x, y).shape == (4, 5) with pytest.raises(Exception): einx.get_at("b t [d], b (t [1]) -> b (t [1])", x, y) x = setup.full((4, 128, 128, 3)) y = setup.full((4, 0, 2), dtype=coord_dtype) y2 = setup.full((4, 2), dtype=coord_dtype) z = setup.full((4, 0, 3)) z2 = setup.full((4, 3)) assert einx.set_at("b [h w] c, b p [2], b p c -> b [h w] c", x, y, z).shape == (4, 128, 128, 3) assert einx.set_at("b [h w] c, b p [2], b c -> b [h w] c", x, y, z2).shape == (4, 128, 128, 3) assert einx.set_at("b [h w] c, b [2], b p c -> b [h w] c", x, y, z2).shape == (4, 128, 128, 3) x = setup.full((4, 128, 16)) y = setup.full((4, 128), dtype=coord_dtype) z = setup.full((4, 128)) assert einx.get_at("b p [i,->]", x, y).shape == (4, 128) assert einx.set_at("b p [i,,->i]", x, y, z).shape == (4, 128, 16) consts = {"b": 4, "h": 16, "w": 16, "c": 3, "p": 128} def make_coords(shape): return setup.full(shape, dtype=coord_dtype) xs = ["([h] b) [w] c", "[h] c [w]", "[h w]"] ys = ["b (p [2])", "[2] p", "[2]"] ys2 = ["p b", "p", "[1]"] zs = ["b p c", "c (p b)"] for x in xs: for z in zs: if not (z == "c (p b)" and getattr(einx, "name", "") == "torch.compile"): shape = einx.add(f"{z}, ", setup.full, 0, **consts, backend=backend).shape for y in ys: assert ( einx.get_at( f"{x}, {y} -> {z}", setup.full, make_coords, **consts, backend=backend, ).shape == shape ) for y1 in ys2: for y2 in ys2: assert ( einx.get_at( f"{x}, {y1}, {y2} -> {z}", setup.full, make_coords, make_coords, **consts, backend=backend, ).shape == shape ) for x in xs: shape = einx.add( f"{x.replace('[', '').replace(']', '')}, ", setup.full, 0, **consts, backend=backend, ).shape for z in zs: z_axes = {a for a in z if a.isalpha()} for y in ys: if all(a in (x + y) for a in z_axes): assert ( einx.set_at( f"{x}, {y}, {z} -> {x}", setup.full, make_coords, setup.full, **consts, backend=backend, ).shape == shape ) for y1 in ys2: for y2 in ys2: if all(a in (x + y1 + y2) for a in z_axes): assert ( einx.set_at( f"{x}, {y1}, {y2}, {z} -> {x}", setup.full, make_coords, make_coords, setup.full, **consts, backend=backend, ).shape == shape ) assert ( einx.set_at( f"{x}, {y1}, {y2}, {z}", setup.full, make_coords, make_coords, setup.full, **consts, backend=backend, ).shape == shape ) @pytest.mark.parametrize("test", conftest.tests) def test_shape_vmap_with_axis(test): einx, backend, setup = test x = setup.full((10, 10)) assert einx.flip("a [b] -> a [b]", x).shape == (10, 10) assert einx.flip("a [b]", x).shape == (10, 10) assert einx.roll("a [b]", x, shift=5).shape == (10, 10) assert einx.roll("a [b]", x, shift=(5,)).shape == (10, 10) assert einx.softmax("a [b] -> a [b]", x).shape == (10, 10) assert einx.softmax("a [b]", x).shape == (10, 10) assert einx.softmax("a [b] -> (a [b]) c", x, c=3).shape == (100, 3) assert einx.softmax("a [b] -> a ([b] c)", x, c=3).shape == (10, 30) assert einx.log_softmax("(a [b]) c", x, b=2).shape == (10, 10) assert einx.flip("a ([b c])", x, b=2).shape == (10, 10) assert einx.roll( "a ([b c])", x, shift=( 5, 5, ), b=2, ).shape == (10, 10) @pytest.mark.parametrize("test", conftest.tests) def test_shape_solve(test): einx, backend, setup = test x = setup.full((2, 3, 4)) assert einx.matches("a b c", x) assert not einx.matches("a b", x) with pytest.raises(Exception): einx.check("a b c d", x) einx.check("a b c", x) x = setup.full((6, 4)) assert einx.matches("(a b) c", x) x = setup.full((2, 3, 4)) assert einx.matches("a b...", x) x = setup.full((5, 4)) assert einx.matches("(a + b) c", x) assert einx.matches("(a + b) c", x, a=2) assert not einx.matches("(a + b) c", x, a=10) params = einx.solve("a b, c b", x, x) assert params["a"] == 5 and params["b"] == 4 and params["c"] == 5 python-einx-0.3.0/test/test_values.py000066400000000000000000000134251505216034200176660ustar00rootroot00000000000000import conftest import einx import pytest import numpy as np def allclose(x, y, setup): if isinstance(x, (list, int, float, tuple, np.ndarray)): x = np.asarray(x) else: x = setup.to_numpy(x) if isinstance(y, (list, int, float, tuple, np.ndarray)): y = np.asarray(y) else: y = setup.to_numpy(y) return np.allclose(x, y) @pytest.mark.parametrize("test", conftest.tests) def test_values(test): einx, backend, setup = test rng = np.random.default_rng(42) if backend.name not in {"mlx", "dask", "tinygrad"}: x = setup.to_tensor(rng.uniform(size=(13,)).astype("float32")) assert allclose( einx.vmap("b -> b [3]", x, op=lambda x: x + setup.full((3,), value=1)), einx.add("b, -> b 3", x, 1), setup=setup, ) x = setup.to_tensor(rng.uniform(size=(10, 20, 3)).astype("float32")) y = setup.to_tensor(rng.uniform(size=(10, 24)).astype("float32")) if backend.name not in {"mlx", "dask", "tinygrad"}: assert allclose( einx.dot("a b c, a d -> a b c d", x, y), einx.vmap( "a [b c], a [d] -> a [b c d]", x, y, op=lambda x, y: einx.dot("b c, d -> b c d", x, y), ), setup=setup, ) assert allclose( einx.multiply("a b c, a b c, a b c", x, x, x), x * x * x, setup=setup, ) if backend.name not in {"mlx", "dask", "tinygrad"}: assert allclose( einx.mean("a b [c]", x), einx.vmap("a b [c] -> a b", x, op=backend.mean), setup=setup, ) assert einx.dot("[->]", 1, 1) == 1 x = setup.full((10, 10), dtype="float32", value=1) y = setup.full((10,), dtype="float32", value=1) if backend.name != "torch": assert allclose( einx.dot("a [->]", y, 1), y, setup=setup, ) assert allclose( einx.dot("a [b->]", x, y), y * 10, setup=setup, ) assert allclose( einx.dot("a [->b]", y, y), x, setup=setup, ) assert allclose( einx.dot("a [b->b]", x, y), einx.multiply("a b, b -> a b", x, y), setup=setup, ) x = setup.to_tensor(np.arange(6)[np.newaxis]) q, k, v = einx.rearrange("b (q+k+v) -> b q, b k, b v", x, q=2, k=2, v=2) assert allclose(q, [[0, 1]], setup=setup) assert allclose(k, [[2, 3]], setup=setup) assert allclose(v, [[4, 5]], setup=setup) x = setup.to_tensor(np.arange(4)[np.newaxis]) q, k = einx.rearrange("b (q+k) -> b q, b k", x, q=2) assert allclose(q, [[0, 1]], setup=setup) assert allclose(k, [[2, 3]], setup=setup) x = setup.to_tensor(np.arange(4).reshape((2, 2))) a, b, c, d = einx.rearrange( "(a + b) (c + d) -> (a c), (a d), (b c), (b d)", x, a=1, b=1, c=1, d=1 ) assert allclose(a, [0], setup=setup) assert allclose(b, [1], setup=setup) assert allclose(c, [2], setup=setup) assert allclose(d, [3], setup=setup) x = setup.to_tensor(np.arange(4)[np.newaxis]) assert allclose( einx.flip("a [b]", x), [[3, 2, 1, 0]], setup=setup, ) assert allclose( einx.roll("a [b]", x, shift=2), [[2, 3, 0, 1]], setup=setup, ) x = setup.to_tensor(np.arange(10)) y = setup.to_tensor(np.arange(10)[::-1].copy()) z = setup.to_tensor(np.arange(10)) if backend.name not in {"mlx", "dask", "tinygrad"}: assert allclose( einx.get_at("[h], h2 -> h2", x, y), y, setup=setup, ) assert allclose( einx.set_at("[h], h2, h2 -> [h]", x, y, z), y, setup=setup, ) assert allclose( einx.arange("a b [2]", a=5, b=6, backend=backend), np.stack(np.meshgrid(np.arange(5), np.arange(6), indexing="ij"), axis=-1).astype("int32"), setup=setup, ) assert allclose( einx.arange("b a -> a b [2]", a=5, b=6, backend=backend), np.stack(np.meshgrid(np.arange(6), np.arange(5), indexing="xy"), axis=-1).astype("int32"), setup=setup, ) if backend.name not in {"mlx", "dask", "tinygrad"}: coord_dtype = "int32" if backend.name != "torch" else "long" x = setup.to_tensor(rng.uniform(size=(4, 5, 6)).astype("float32")) y = setup.full((4, 5), value=3, dtype=coord_dtype) assert allclose( einx.get_at("... [d], ... -> ...", x, y), x[:, :, 3], setup=setup, ) @pytest.mark.parametrize("test", conftest.tests) def test_compare_backends(test): einx, backend, setup = test x = np.random.uniform(size=(10, 3, 10)).astype("float32") y = setup.to_tensor(x) assert allclose( einx.sum("a [b] c", x), einx.sum("a [b] c", y), setup=setup, ) assert allclose( einx.softmax("a [b] c", x), einx.softmax("a [b] c", y), setup=setup, ) assert allclose( einx.log_softmax("a [b] c", x), einx.log_softmax("a [b] c", y), setup=setup, ) assert allclose( einx.logsumexp("a [b] c", x), einx.logsumexp("a [b] c", y), setup=setup, ) assert allclose( einx.flip("a [b c]", x), einx.flip("a [b c]", y), setup=setup, ) assert allclose( einx.flip("a [b c]", x), einx.flip("a [b c]", y), setup=setup, ) assert allclose( einx.roll("a [b] c", x, shift=2), einx.roll("a [b] c", y, shift=2), setup=setup, ) assert allclose( einx.roll("a [b c]", x, shift=(-2, -3)), einx.roll("a [b c]", y, shift=(-2, -3)), setup=setup, )