pax_global_header00006660000000000000000000000064151146467600014524gustar00rootroot0000000000000052 comment=f8bc4f40b344108bd7a1c5f0e4a060a99b7f12f1 xarray-2025.12.0/000077500000000000000000000000001511464676000133435ustar00rootroot00000000000000xarray-2025.12.0/.binder/000077500000000000000000000000001511464676000146645ustar00rootroot00000000000000xarray-2025.12.0/.binder/environment.yml000066400000000000000000000007721511464676000177610ustar00rootroot00000000000000name: xarray-examples channels: - conda-forge dependencies: - python=3.11 - boto3 - bottleneck - cartopy - cfgrib - cftime - coveralls - dask - distributed - dask_labextension - h5netcdf - h5py - hdf5 - iris - lxml # Optional dep of pydap - matplotlib - nc-time-axis - netcdf4 - numba - numpy - packaging - pandas - pint>=0.22 - pip - pooch - pydap - rasterio - scipy - seaborn - setuptools - sparse - toolz - xarray - zarr - numbagg xarray-2025.12.0/.codecov.yml000066400000000000000000000011641511464676000155700ustar00rootroot00000000000000codecov: require_ci_to_pass: true coverage: status: project: default: # Require 1% coverage, i.e., always succeed target: 1% flags: - unittests paths: - "!xarray/tests/" unittests: target: 90% flags: - unittests paths: - "!xarray/tests/" mypy: target: 20% flags: - mypy patch: false changes: false comment: false flags: unittests: paths: - "xarray" - "!xarray/tests" carryforward: false mypy: paths: - "xarray" carryforward: false xarray-2025.12.0/.devcontainer/000077500000000000000000000000001511464676000161025ustar00rootroot00000000000000xarray-2025.12.0/.devcontainer/Dockerfile000066400000000000000000000007151511464676000200770ustar00rootroot00000000000000FROM mcr.microsoft.com/devcontainers/base:jammy ARG PIXI_VERSION=v0.59.0 RUN curl -L -o /usr/local/bin/pixi -fsSL --compressed "https://github.com/prefix-dev/pixi/releases/download/${PIXI_VERSION}/pixi-$(uname -m)-unknown-linux-musl" \ && chmod +x /usr/local/bin/pixi \ && pixi info # set some user and workdir settings to work nicely with vscode USER vscode WORKDIR /home/vscode RUN echo 'eval "$(pixi completion -s bash)"' >> /home/vscode/.bashrc xarray-2025.12.0/.devcontainer/devcontainer.json000066400000000000000000000011111511464676000214500ustar00rootroot00000000000000{ "name": "my-workspace", "build": { "dockerfile": "Dockerfile", "context": ".." }, "hostRequirements": { "cpus": 4, "memory": "16gb" }, "customizations": { "vscode": { "settings": {}, "extensions": ["ms-python.python", "charliermarsh.ruff", "GitHub.copilot"] } }, "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {} }, "mounts": [ "source=${localWorkspaceFolderBasename}-pixi,target=${containerWorkspaceFolder}/.pixi,type=volume" ], "postCreateCommand": "sudo chown vscode .pixi && pixi install" } xarray-2025.12.0/.git-blame-ignore-revs000066400000000000000000000001631511464676000174430ustar00rootroot00000000000000# black PR 3142 d089df385e737f71067309ff7abae15994d581ec # isort PR 1924 0e73e240107caee3ffd1a1149f0150c390d43251 xarray-2025.12.0/.git_archival.txt000066400000000000000000000002111511464676000166100ustar00rootroot00000000000000node: f8bc4f40b344108bd7a1c5f0e4a060a99b7f12f1 node-date: 2025-12-05T22:26:08+01:00 describe-name: v2025.12.0 ref-names: tag: v2025.12.0 xarray-2025.12.0/.gitattributes000066400000000000000000000004041511464676000162340ustar00rootroot00000000000000# reduce the number of merge conflicts doc/whats-new.rst merge=union # allow installing from git archives .git_archival.txt export-subst # SCM syntax highlighting & preventing 3-way merges pixi.lock merge=binary linguist-language=YAML linguist-generated=true xarray-2025.12.0/.github/000077500000000000000000000000001511464676000147035ustar00rootroot00000000000000xarray-2025.12.0/.github/FUNDING.yml000066400000000000000000000000771511464676000165240ustar00rootroot00000000000000github: numfocus custom: https://numfocus.org/donate-to-xarray xarray-2025.12.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001511464676000170665ustar00rootroot00000000000000xarray-2025.12.0/.github/ISSUE_TEMPLATE/bugreport.yml000066400000000000000000000072531511464676000216310ustar00rootroot00000000000000name: ๐Ÿ› Bug Report description: File a bug report to help us improve labels: [bug, "needs triage"] body: - type: textarea id: what-happened attributes: label: What happened? description: | Thanks for reporting a bug! Please describe what you were trying to get done. Tell us what happened, what went wrong. validations: required: true - type: textarea id: what-did-you-expect-to-happen attributes: label: What did you expect to happen? description: | Describe what you expected to happen. validations: required: false - type: textarea id: sample-code attributes: label: Minimal Complete Verifiable Example description: | Minimal, self-contained copy-pastable example that demonstrates the issue. Consider listing additional or specific dependencies in [inline script metadata](https://packaging.python.org/en/latest/specifications/inline-script-metadata/#example) so that calling `uv run issue.py` shows the issue when copied into `issue.py`. (not strictly required) This will be automatically formatted into code, so no need for markdown backticks. render: Python value: | # /// script # requires-python = ">=3.11" # dependencies = [ # "xarray[complete]@git+https://github.com/pydata/xarray.git@main", # ] # /// # # This script automatically imports the development branch of xarray to check for issues. # Please delete this header if you have _not_ tested this script with `uv run`! import xarray as xr xr.show_versions() # your reproducer code ... - type: textarea id: reproduce attributes: label: Steps to reproduce description: validations: required: false - type: checkboxes id: mvce-checkboxes attributes: label: MVCE confirmation description: | Please confirm that the bug report is in an excellent state, so we can understand & fix it quickly & efficiently. For more details, check out: - [Minimal Complete Verifiable Examples](https://stackoverflow.com/help/mcve) - [Craft Minimal Bug Reports](https://matthewrocklin.com/minimal-bug-reports) options: - label: Minimal example โ€” the example is as focused as reasonably possible to demonstrate the underlying issue in xarray. - label: Complete example โ€” the example is self-contained, including all data and the text of any traceback. - label: Verifiable example โ€” the example copy & pastes into an IPython prompt or [Binder notebook](https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/examples/blank_template.ipynb), returning the result. - label: New issue โ€” a search of GitHub Issues suggests this is not a duplicate. - label: Recent environment โ€” the issue occurs with the latest version of xarray and its dependencies. - type: textarea id: log-output attributes: label: Relevant log output description: Please copy and paste any relevant output. This will be automatically formatted into code, so no need for markdown backticks. render: Python - type: textarea id: extra attributes: label: Anything else we need to know? description: | Please describe any other information you want to share. - type: textarea id: show-versions attributes: label: Environment description: | Paste the output of `xr.show_versions()` between the `
` tags, leaving an empty line following the opening tag. value: |
validations: required: true xarray-2025.12.0/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000012761511464676000210640ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: โ“ Usage question url: https://github.com/pydata/xarray/discussions about: | Ask questions and discuss with other community members here. If you have a question like "How do I concatenate a list of datasets?" then please include a self-contained reproducible example if possible. - name: ๐Ÿ—บ๏ธ Raster analysis usage question url: https://github.com/corteva/rioxarray/discussions about: | If you are using the rioxarray extension (engine='rasterio'), or have questions about raster analysis such as geospatial formats, coordinate reprojection, etc., please use the rioxarray discussion forum. xarray-2025.12.0/.github/ISSUE_TEMPLATE/misc.yml000066400000000000000000000007521511464676000205500ustar00rootroot00000000000000name: ๐Ÿ“ Issue description: General issue, that's not a bug report. labels: ["needs triage"] body: - type: markdown attributes: value: | Please describe your issue here. - type: textarea id: issue-description attributes: label: What is your issue? description: | Thank you for filing an issue! Please give us further information on how we can help you. placeholder: Please describe your issue. validations: required: true xarray-2025.12.0/.github/ISSUE_TEMPLATE/newfeature.yml000066400000000000000000000022061511464676000217560ustar00rootroot00000000000000name: ๐Ÿ’ก Feature Request description: Suggest an idea for xarray labels: [enhancement] body: - type: textarea id: description attributes: label: Is your feature request related to a problem? description: | Please do a quick search of existing issues to make sure that this has not been asked before. Please provide a clear and concise description of what the problem is. Ex. I'm always frustrated when [...] validations: required: true - type: textarea id: solution attributes: label: Describe the solution you'd like description: | A clear and concise description of what you want to happen. - type: textarea id: alternatives attributes: label: Describe alternatives you've considered description: | A clear and concise description of any alternative solutions or features you've considered. validations: required: false - type: textarea id: additional-context attributes: label: Additional context description: | Add any other context about the feature request here. validations: required: false xarray-2025.12.0/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000004021511464676000205000ustar00rootroot00000000000000 - [ ] Closes #xxxx - [ ] Tests added - [ ] User visible changes (including notable bug fixes) are documented in `whats-new.rst` - [ ] New functions/methods are listed in `api.rst` xarray-2025.12.0/.github/config.yml000066400000000000000000000022201511464676000166670ustar00rootroot00000000000000# Comment to be posted to on first time issues newIssueWelcomeComment: > Thanks for opening your first issue here at xarray! Be sure to follow the issue template! If you have an idea for a solution, we would really welcome a Pull Request with proposed changes. See the [Contributing Guide](https://docs.xarray.dev/en/latest/contributing.html) for more. It may take us a while to respond here, but we really value your contribution. Contributors like you help make xarray better. Thank you! # Comment to be posted to on PRs from first time contributors in your repository newPRWelcomeComment: > Thank you for opening this pull request! It may take us a few days to respond here, so thank you for being patient. If you have questions, some answers may be found in our [contributing guidelines](https://docs.xarray.dev/en/stable/contributing.html). # Comment to be posted to on pull requests merged by a first time user firstPRMergeComment: > Congratulations on completing your first pull request! Welcome to Xarray! We are proud of you, and hope to see you again! ![celebration gif](https://media.giphy.com/media/umYMU8G2ixG5mJBDo5/giphy.gif) xarray-2025.12.0/.github/dependabot.yml000066400000000000000000000003311511464676000175300ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: # Check for updates once a week interval: "weekly" groups: actions: patterns: - "*" xarray-2025.12.0/.github/labeler.yml000066400000000000000000000045411511464676000170400ustar00rootroot00000000000000Automation: - changed-files: - any-glob-to-any-file: - .github/** CI: - changed-files: - any-glob-to-any-file: - ci/** dependencies: - changed-files: - any-glob-to-any-file: - ci/requirements/* topic-arrays: - changed-files: - any-glob-to-any-file: - xarray/core/duck_array_ops.py topic-backends: - changed-files: - any-glob-to-any-file: - xarray/backends/** topic-cftime: - changed-files: - any-glob-to-any-file: - xarray/coding/*time* topic-CF conventions: - changed-files: - any-glob-to-any-file: - xarray/conventions.py topic-dask: - changed-files: - any-glob-to-any-file: - xarray/compat/dask* - xarray/core/parallel.py topic-DataTree: - changed-files: - any-glob-to-any-file: - xarray/core/datatree* topic-documentation: - all: - changed-files: - any-glob-to-any-file: "doc/**/*" - all-globs-to-all-files: "!doc/whats-new.rst" topic-groupby: - changed-files: - any-glob-to-any-file: - xarray/core/groupby.py topic-html-repr: - changed-files: - any-glob-to-any-file: - xarray/core/formatting_html.py topic-hypothesis: - changed-files: - any-glob-to-any-file: - properties/** - xarray/testing/strategies.py topic-indexing: - changed-files: - any-glob-to-any-file: - xarray/core/indexes.py - xarray/core/indexing.py topic-NamedArray: - changed-files: - any-glob-to-any-file: - xarray/namedarray/* topic-performance: - changed-files: - any-glob-to-any-file: - asv_bench/benchmarks/** topic-plotting: - changed-files: - any-glob-to-any-file: - xarray/plot/* - xarray/plot/**/* topic-rolling: - changed-files: - any-glob-to-any-file: - xarray/computation/rolling.py - xarray/computation/rolling_exp.py topic-testing: - changed-files: - any-glob-to-any-file: - conftest.py - xarray/testing/* topic-typing: - changed-files: - any-glob-to-any-file: - xarray/core/types.py topic-zarr: - changed-files: - any-glob-to-any-file: - xarray/backends/zarr.py io: - changed-files: - any-glob-to-any-file: - xarray/backends/** xarray-2025.12.0/.github/release.yml000066400000000000000000000001261511464676000170450ustar00rootroot00000000000000changelog: exclude: authors: - dependabot[bot] - pre-commit-ci[bot] xarray-2025.12.0/.github/stale.yml000066400000000000000000000041261511464676000165410ustar00rootroot00000000000000# Configuration for probot-stale - https://github.com/probot/stale # Number of days of inactivity before an Issue or Pull Request becomes stale daysUntilStale: 600 # start with a large number and reduce shortly # Number of days of inactivity before an Issue or Pull Request with the stale label is closed. # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. daysUntilClose: 30 # Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable exemptLabels: - pinned - security - "[Status] Maybe Later" # Set to true to ignore issues in a project (defaults to false) exemptProjects: true # Set to true to ignore issues in a milestone (defaults to false) exemptMilestones: true # Set to true to ignore issues with an assignee (defaults to false) exemptAssignees: true # Label to use when marking as stale staleLabel: stale # Comment to post when marking as stale. Set to `false` to disable markComment: | In order to maintain a list of currently relevant issues, we mark issues as stale after a period of inactivity If this issue remains relevant, please comment here or remove the `stale` label; otherwise it will be marked as closed automatically closeComment: | The stalebot didn't hear anything for a while, so it closed this. Please reopen if this is still an issue. # Comment to post when removing the stale label. # unmarkComment: > # Your comment here. # Comment to post when closing a stale Issue or Pull Request. # closeComment: > # Your comment here. # Limit the number of actions per hour, from 1-30. Default is 30 limitPerRun: 2 # start with a small number # Limit to only `issues` or `pulls` # only: issues # Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': # pulls: # daysUntilStale: 30 # markComment: > # This pull request has been automatically marked as stale because it has not had # recent activity. It will be closed if no further activity occurs. Thank you # for your contributions. # issues: # exemptLabels: # - confirmed xarray-2025.12.0/.github/workflows/000077500000000000000000000000001511464676000167405ustar00rootroot00000000000000xarray-2025.12.0/.github/workflows/benchmarks-last-release.yml000066400000000000000000000051071511464676000241620ustar00rootroot00000000000000name: Benchmark compare last release on: push: branches: - main workflow_dispatch: jobs: benchmark: name: Linux runs-on: ubuntu-latest env: ASV_DIR: "./asv_bench" CONDA_ENV_FILE: ci/requirements/environment.yml steps: # We need the full repo to avoid this issue # https://github.com/actions/checkout/issues/23 - uses: actions/checkout@v6 with: fetch-depth: 0 - name: Set up conda environment uses: mamba-org/setup-micromamba@v2 with: micromamba-version: "1.5.10-0" environment-file: ${{env.CONDA_ENV_FILE}} environment-name: xarray-tests cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}-benchmark" create-args: >- asv - name: "Get Previous tag" id: previoustag uses: "WyriHaximus/github-action-get-previous-tag@v1" # with: # fallback: 1.0.0 # Optional fallback tag to use when no tag can be found - name: Run benchmarks shell: bash -l {0} id: benchmark env: OPENBLAS_NUM_THREADS: 1 MKL_NUM_THREADS: 1 OMP_NUM_THREADS: 1 ASV_FACTOR: 1.5 ASV_SKIP_SLOW: 1 run: | set -x # ID this runner asv machine --yes echo "Baseline: ${{ steps.previoustag.outputs.tag }} " echo "Contender: ${{ github.sha }}" # Use mamba for env creation # export CONDA_EXE=$(which mamba) export CONDA_EXE=$(which conda) # Run benchmarks for current commit against base ASV_OPTIONS="--split --show-stderr --factor $ASV_FACTOR" asv continuous $ASV_OPTIONS ${{ steps.previoustag.outputs.tag }} ${{ github.sha }} \ | sed "/Traceback \|failed$\|PERFORMANCE DECREASED/ s/^/::error::/" \ | tee benchmarks.log # Report and export results for subsequent steps if grep "Traceback \|failed\|PERFORMANCE DECREASED" benchmarks.log > /dev/null ; then exit 1 fi working-directory: ${{ env.ASV_DIR }} - name: Add instructions to artifact if: always() run: | cp benchmarks/README_CI.md benchmarks.log .asv/results/ working-directory: ${{ env.ASV_DIR }} - uses: actions/upload-artifact@v5 if: always() with: name: asv-benchmark-results-${{ runner.os }} path: ${{ env.ASV_DIR }}/.asv/results xarray-2025.12.0/.github/workflows/benchmarks.yml000066400000000000000000000054561511464676000216120ustar00rootroot00000000000000name: Benchmark on: pull_request: types: [opened, reopened, synchronize, labeled] workflow_dispatch: env: PR_HEAD_LABEL: ${{ github.event.pull_request.head.label }} jobs: benchmark: if: ${{ contains( github.event.pull_request.labels.*.name, 'run-benchmark') && github.event_name == 'pull_request' || contains( github.event.pull_request.labels.*.name, 'topic-performance') && github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' }} name: Linux runs-on: ubuntu-latest env: ASV_DIR: "./asv_bench" CONDA_ENV_FILE: ci/requirements/environment-benchmark.yml steps: # We need the full repo to avoid this issue # https://github.com/actions/checkout/issues/23 - uses: actions/checkout@v6 with: fetch-depth: 0 - name: Set up conda environment uses: mamba-org/setup-micromamba@v2 with: micromamba-version: "1.5.10-0" environment-file: ${{env.CONDA_ENV_FILE}} environment-name: xarray-benchmark cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}-benchmark" # add "build" because of https://github.com/airspeed-velocity/asv/issues/1385 create-args: >- asv python-build mamba<=1.5.10 - name: Run benchmarks shell: bash -l {0} id: benchmark env: OPENBLAS_NUM_THREADS: 1 MKL_NUM_THREADS: 1 OMP_NUM_THREADS: 1 ASV_FACTOR: 1.5 ASV_SKIP_SLOW: 1 run: | set -x # ID this runner asv machine --yes echo "Baseline: ${{ github.event.pull_request.base.sha }} (${{ github.event.pull_request.base.label }})" echo "Contender: ${GITHUB_SHA} ($PR_HEAD_LABEL)" # Run benchmarks for current commit against base ASV_OPTIONS="--split --show-stderr --factor $ASV_FACTOR" asv continuous $ASV_OPTIONS ${{ github.event.pull_request.base.sha }} ${GITHUB_SHA} \ | sed "/Traceback \|failed$\|PERFORMANCE DECREASED/ s/^/::error::/" \ | tee benchmarks.log # Report and export results for subsequent steps if grep "Traceback \|failed\|PERFORMANCE DECREASED" benchmarks.log > /dev/null ; then exit 1 fi working-directory: ${{ env.ASV_DIR }} - name: Add instructions to artifact if: always() run: | cp benchmarks/README_CI.md benchmarks.log .asv/results/ working-directory: ${{ env.ASV_DIR }} - uses: actions/upload-artifact@v5 if: always() with: name: asv-benchmark-results-${{ runner.os }} path: ${{ env.ASV_DIR }}/.asv/results xarray-2025.12.0/.github/workflows/cache-pixi-lock.yml000066400000000000000000000027251511464676000224310ustar00rootroot00000000000000name: Generate and cache Pixi lockfile on: workflow_call: inputs: pixi-version: type: string outputs: cache-id: description: "The lock file contents" value: ${{ jobs.cache-pixi-lock.outputs.cache-id }} jobs: cache-pixi-lock: name: Pixi lock runs-on: ubuntu-latest outputs: cache-id: ${{ steps.restore.outputs.cache-primary-key }} steps: - uses: actions/checkout@v5 with: fetch-depth: 0 submodules: recursive - name: Get current date id: date run: echo "date=$(date +'%Y-%m-%d')" >> "$GITHUB_OUTPUT" - uses: actions/cache/restore@v4 id: restore with: path: | pixi.lock key: ${{ steps.date.outputs.date }}_${{ inputs.pixi-version }}_${{hashFiles('pixi.toml')}} - uses: prefix-dev/setup-pixi@v0.9.0 if: ${{ !steps.restore.outputs.cache-hit }} with: pixi-version: ${{ inputs.pixi-version }} run-install: false - name: Run pixi lock if: ${{ !steps.restore.outputs.cache-hit }} run: pixi lock - uses: actions/cache/save@v4 if: ${{ !steps.restore.outputs.cache-hit }} id: cache with: path: | pixi.lock key: ${{ steps.restore.outputs.cache-primary-key }} - name: Upload pixi.lock uses: actions/upload-artifact@v4 with: name: pixi-lock path: pixi.lock xarray-2025.12.0/.github/workflows/ci-additional.yaml000066400000000000000000000214571511464676000223360ustar00rootroot00000000000000name: CI Additional on: push: branches: - "main" pull_request: branches: - "main" workflow_dispatch: # allows you to trigger manually concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: FORCE_COLOR: 3 PIXI_VERSION: "v0.58.0" jobs: detect-ci-trigger: name: detect ci trigger runs-on: ubuntu-latest if: | github.repository == 'pydata/xarray' && (github.event_name == 'push' || github.event_name == 'pull_request') && !contains(github.event.pull_request.labels.*.name, 'skip-ci') outputs: triggered: ${{ steps.detect-trigger.outputs.trigger-found }} steps: - uses: actions/checkout@v6 with: fetch-depth: 2 - uses: xarray-contrib/ci-trigger@v1 id: detect-trigger with: keyword: "[skip-ci]" cache-pixi-lock: uses: ./.github/workflows/cache-pixi-lock.yml with: pixi-version: "v0.58.0" # keep in sync with env var above doctest: name: Doctests runs-on: "ubuntu-latest" needs: [detect-ci-trigger, cache-pixi-lock] if: needs.detect-ci-trigger.outputs.triggered == 'false' defaults: run: shell: bash -l {0} env: PIXI_ENV: "test-py313" steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV - name: Restore cached pixi lockfile uses: actions/cache/restore@v4 id: restore-pixi-lock with: enableCrossOsArchive: true path: | pixi.lock key: ${{ needs.cache-pixi-lock.outputs.cache-id }} - uses: prefix-dev/setup-pixi@v0.9.0 with: pixi-version: ${{ env.PIXI_VERSION }} cache: true environments: ${{ env.PIXI_ENV }} cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} - name: Version info run: | pixi run -e ${{env.PIXI_ENV}} -- python xarray/util/print_versions.py - name: Run doctests run: | # Raise an error if there are warnings in the doctests, with `-Werror`. # This is a trial; if it presents a problem, feel free to remove. # See https://github.com/pydata/xarray/issues/7164 for more info. # # If dependencies emit warnings we can't do anything about, add ignores to # `xarray/tests/__init__.py`. pixi run -e ${{env.PIXI_ENV}} -- python -m pytest --doctest-modules xarray --ignore xarray/tests -Werror mypy: name: Mypy runs-on: "ubuntu-latest" needs: [detect-ci-trigger, cache-pixi-lock] defaults: run: shell: bash -l {0} env: PIXI_ENV: test-py313-with-typing steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Restore cached pixi lockfile uses: actions/cache/restore@v4 id: restore-pixi-lock with: enableCrossOsArchive: true path: | pixi.lock key: ${{ needs.cache-pixi-lock.outputs.cache-id }} - uses: prefix-dev/setup-pixi@v0.9.0 with: pixi-version: ${{ env.PIXI_VERSION }} cache: true environments: ${{ env.PIXI_ENV }} cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV echo "PYTHON_VERSION=$(pixi run -e ${{env.PIXI_ENV}} -- python --version | cut -d' ' -f2 | cut -d. -f1,2)" >> $GITHUB_ENV - name: Version info run: | pixi run -e ${{env.PIXI_ENV}} -- python xarray/util/print_versions.py - name: Run mypy run: | pixi run -e ${{env.PIXI_ENV}} -- python -m mypy --install-types --non-interactive --cobertura-xml-report mypy_report - name: Upload mypy coverage to Codecov uses: codecov/codecov-action@v5.5.1 with: file: mypy_report/cobertura.xml flags: mypy env_vars: PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false mypy-min: name: Mypy 3.11 runs-on: "ubuntu-latest" needs: [detect-ci-trigger, cache-pixi-lock] defaults: run: shell: bash -l {0} env: PIXI_ENV: test-py311-with-typing steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Restore cached pixi lockfile uses: actions/cache/restore@v4 id: restore-pixi-lock with: enableCrossOsArchive: true path: | pixi.lock key: ${{ needs.cache-pixi-lock.outputs.cache-id }} - uses: prefix-dev/setup-pixi@v0.9.0 with: pixi-version: ${{ env.PIXI_VERSION }} cache: true environments: ${{ env.PIXI_ENV }} cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV echo "PYTHON_VERSION=$(pixi run -e ${{env.PIXI_ENV}} -- python --version | cut -d' ' -f2 | cut -d. -f1,2)" >> $GITHUB_ENV - name: Version info run: | pixi run -e ${{env.PIXI_ENV}} -- python xarray/util/print_versions.py - name: Run mypy run: | pixi run -e ${{env.PIXI_ENV}} -- python -m mypy --install-types --non-interactive --cobertura-xml-report mypy_report - name: Upload mypy coverage to Codecov uses: codecov/codecov-action@v5.5.1 with: file: mypy_report/cobertura.xml flags: mypy-min env_vars: PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false pyright: name: Pyright | ${{ matrix.pixi-env }}" runs-on: "ubuntu-latest" needs: [detect-ci-trigger, cache-pixi-lock] strategy: fail-fast: false matrix: pixi-env: ["test-py313-with-typing", "test-py311-with-typing"] if: | always() && ( contains( github.event.pull_request.labels.*.name, 'run-pyright') ) defaults: run: shell: bash -l {0} steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Restore cached pixi lockfile uses: actions/cache/restore@v4 id: restore-pixi-lock with: enableCrossOsArchive: true path: | pixi.lock key: ${{ needs.cache-pixi-lock.outputs.cache-id }} - uses: prefix-dev/setup-pixi@v0.9.0 with: pixi-version: ${{ env.PIXI_VERSION }} cache: true environments: ${{ matrix.pixi-env }} cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV echo "PYTHON_VERSION=$(pixi run -e ${{ matrix.pixi-env }} -- python --version | cut -d' ' -f2 | cut -d. -f1,2)" >> $GITHUB_ENV - name: Version info run: | pixi run -e ${{ matrix.pixi-env }} -- python xarray/util/print_versions.py - name: Run pyright run: | pixi run -e ${{ matrix.pixi-env }} -- python -m pyright xarray/ - name: Upload pyright coverage to Codecov uses: codecov/codecov-action@v5.5.1 with: file: pyright_report/cobertura.xml flags: pyright env_vars: PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false min-version-policy: name: Minimum Version Policy runs-on: "ubuntu-latest" needs: detect-ci-trigger # min-version-policy doesn't work with Pixi yet https://github.com/pydata/xarray/pull/10888#discussion_r2504335457 if: false # if: needs.detect-ci-trigger.outputs.triggered == 'false' defaults: run: shell: bash -l {0} env: COLUMNS: 120 steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # Fetch all history for all branches and tags. - uses: actions/setup-python@v6 with: python-version: "3.x" - name: All-deps minimum versions policy uses: xarray-contrib/minimum-dependency-versions@e2ac8ff0a76e8603d8536ef5d64743a375961ce9 # v0.1.1 with: policy: ci/policy.yaml environment-paths: ci/requirements/min-all-deps.yml - name: Bare minimum versions policy uses: xarray-contrib/minimum-dependency-versions@e2ac8ff0a76e8603d8536ef5d64743a375961ce9 # v0.1.1 with: policy: ci/policy.yaml environment-paths: ci/requirements/bare-minimum.yml xarray-2025.12.0/.github/workflows/ci.yaml000066400000000000000000000136171511464676000202270ustar00rootroot00000000000000name: CI on: push: branches: - "main" pull_request: branches: - "main" workflow_dispatch: # allows you to trigger manually concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: FORCE_COLOR: 3 PIXI_VERSION: "v0.58.0" jobs: detect-ci-trigger: name: detect ci trigger runs-on: ubuntu-latest if: | github.repository == 'pydata/xarray' && (github.event_name == 'push' || github.event_name == 'pull_request') && !contains(github.event.pull_request.labels.*.name, 'skip-ci') outputs: triggered: ${{ steps.detect-trigger.outputs.trigger-found }} steps: - uses: actions/checkout@v6 with: fetch-depth: 2 - uses: xarray-contrib/ci-trigger@v1 id: detect-trigger with: keyword: "[skip-ci]" cache-pixi-lock: uses: ./.github/workflows/cache-pixi-lock.yml with: pixi-version: "v0.58.0" # keep in sync with env var above test: name: "${{ matrix.os }} | ${{ matrix.pixi-env }}${{ matrix.pytest-addopts && format(' ({0})', matrix.pytest-addopts) || '' }}" runs-on: ${{ matrix.os }} needs: [detect-ci-trigger, cache-pixi-lock] if: needs.detect-ci-trigger.outputs.triggered == 'false' defaults: run: shell: bash -l {0} strategy: fail-fast: false matrix: os: ["ubuntu-latest", "macos-latest", "windows-latest"] # Bookend python versions pixi-env: ["test-py311", "test-py313"] pytest-addopts: [""] include: # Minimum python version: - pixi-env: "test-py311-bare-minimum" os: ubuntu-latest - pixi-env: "test-py311-bare-min-and-scipy" os: ubuntu-latest - pixi-env: "test-py311-min-versions" os: ubuntu-latest # Latest python version: - pixi-env: "test-py313-no-numba" os: ubuntu-latest - pixi-env: "test-py313-no-dask" os: ubuntu-latest - pixi-env: "test-py313" pytest-addopts: "flaky" os: ubuntu-latest # The mypy tests must be executed using only 1 process in order to guarantee # predictable mypy output messages for comparison to expectations. - pixi-env: "test-py311-with-typing" pytest-addopts: "mypy" numprocesses: 1 os: ubuntu-latest - pixi-env: "test-py313-with-typing" numprocesses: 1 os: ubuntu-latest steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Restore cached pixi lockfile uses: actions/cache/restore@v4 id: restore-pixi-lock with: enableCrossOsArchive: true path: | pixi.lock key: ${{ needs.cache-pixi-lock.outputs.cache-id }} - uses: prefix-dev/setup-pixi@v0.9.0 with: pixi-version: ${{ env.PIXI_VERSION }} cache: true environments: ${{ matrix.pixi-env }} cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} - name: Set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV echo "PYTHON_VERSION=$(pixi run -e ${{ matrix.pixi-env }} -- python --version | cut -d' ' -f2 | cut -d. -f1,2)" >> $GITHUB_ENV if [[ "${{ matrix.pytest-addopts }}" != "" ]] ; then if [[ "${{ matrix.pytest-addopts }}" == "flaky" ]] ; then echo "PYTEST_ADDOPTS=-m 'flaky or network' --run-flaky --run-network-tests -W default" >> $GITHUB_ENV elif [[ "${{ matrix.pytest-addopts }}" == "mypy" ]] ; then echo "PYTEST_ADDOPTS=-n 1 -m 'mypy' --run-mypy -W default" >> $GITHUB_ENV fi if [[ "${{ matrix.pixi-env }}" == "min-all-deps" ]] ; then # Don't raise on warnings echo "PYTEST_ADDOPTS=-W default" >> $GITHUB_ENV fi fi # We only want to install this on one run, because otherwise we'll have # duplicate annotations. - name: Install error reporter if: ${{ matrix.os }} == 'ubuntu-latest' and ${{ matrix.pixi-env}} == 'test' run: | pixi add --pypi pytest-github-actions-annotate-failures - name: Version info run: | pixi run -e ${{ matrix.pixi-env }} -- python xarray/util/print_versions.py - name: Import xarray run: | pixi run -e ${{ matrix.pixi-env }} -- python -c "import xarray" - name: Restore cached hypothesis directory uses: actions/cache@v4 with: path: .hypothesis/ key: cache-hypothesis enableCrossOsArchive: true save-always: true - name: Run tests run: | pixi run -e ${{ matrix.pixi-env }} -- python -m pytest -n ${{ matrix.numprocesses || 4 }} \ --timeout 180 \ --cov=xarray \ --cov-report=xml \ --junitxml=pytest.xml - name: Upload test results if: always() uses: actions/upload-artifact@v5 with: name: Test results for OS ${{ runner.os }} pixi-env ${{ matrix.pixi-env }} pytest-addopts ${{ matrix.pytest-addopts }} path: pytest.xml - name: Upload code coverage to Codecov uses: codecov/codecov-action@v5.5.1 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: file: ./coverage.xml flags: unittests env_vars: RUNNER_OS,PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false event_file: name: "Event File" runs-on: ubuntu-latest if: github.repository == 'pydata/xarray' steps: - name: Upload uses: actions/upload-artifact@v5 with: name: Event File path: ${{ github.event_path }} xarray-2025.12.0/.github/workflows/configure-testpypi-version.py000066400000000000000000000022131511464676000246330ustar00rootroot00000000000000import argparse import copy import pathlib import tomli import tomli_w def split_path(path, sep="/"): if isinstance(path, str): return [part for part in path.split(sep) if part] else: return path def extract(mapping, path, sep="/"): parts = split_path(path, sep=sep) cur = mapping for part in parts: cur = cur[part] return cur def update(mapping, path, value, sep="/"): new = copy.deepcopy(mapping) parts = split_path(path, sep=sep) parent = extract(new, parts[:-1]) parent[parts[-1]] = value return new parser = argparse.ArgumentParser() parser.add_argument("path", type=pathlib.Path) args = parser.parse_args() content = args.path.read_text() decoded = tomli.loads(content) with_local_scheme = update( decoded, "tool.setuptools_scm.local_scheme", "no-local-version", sep="." ) # work around a bug in setuptools / setuptools-scm with_setuptools_pin = copy.deepcopy(with_local_scheme) requires = extract(with_setuptools_pin, "build-system.requires", sep=".") requires[0] = "setuptools>=42,<60" new_content = tomli_w.dumps(with_setuptools_pin) args.path.write_text(new_content) xarray-2025.12.0/.github/workflows/hypothesis.yaml000066400000000000000000000100071511464676000220210ustar00rootroot00000000000000name: Slow Hypothesis CI on: push: branches: - "main" pull_request: branches: - "main" types: [opened, reopened, synchronize, labeled] schedule: - cron: "0 0 * * *" # Daily โ€œAt 00:00โ€ UTC workflow_dispatch: # allows you to trigger manually env: FORCE_COLOR: 3 PIXI_VERSION: "v0.58.0" jobs: detect-ci-trigger: name: detect ci trigger runs-on: ubuntu-latest if: | github.repository == 'pydata/xarray' && (github.event_name == 'push' || github.event_name == 'pull_request' || github.event_name == 'schedule') && !contains(github.event.pull_request.labels.*.name, 'skip-ci') outputs: triggered: ${{ steps.detect-trigger.outputs.trigger-found }} steps: - uses: actions/checkout@v6 with: fetch-depth: 2 - uses: xarray-contrib/ci-trigger@v1 id: detect-trigger with: keyword: "[skip-ci]" cache-pixi-lock: uses: ./.github/workflows/cache-pixi-lock.yml with: pixi-version: "v0.58.0" # keep in sync with env var above hypothesis: name: Slow Hypothesis Tests runs-on: "ubuntu-latest" needs: [detect-ci-trigger, cache-pixi-lock] if: | always() && ( needs.detect-ci-trigger.outputs.triggered == 'false' && ( (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') || contains( github.event.pull_request.labels.*.name, 'run-slow-hypothesis')) ) defaults: run: shell: bash -l {0} env: PIXI_ENV: "test-py313" steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Restore cached pixi lockfile uses: actions/cache/restore@v4 id: restore-pixi-lock with: enableCrossOsArchive: true path: | pixi.lock key: ${{ needs.cache-pixi-lock.outputs.cache-id }} - uses: prefix-dev/setup-pixi@v0.9.0 with: pixi-version: ${{ env.PIXI_VERSION }} cache: true environments: ${{ env.PIXI_ENV }} cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV echo "PYTHON_VERSION=$(pixi run -e ${{env.PIXI_ENV}} python --version | cut -d' ' -f2 | cut -d. -f1,2)" >> $GITHUB_ENV - name: Version info run: | pixi run -e ${{ env.PIXI_ENV }} python xarray/util/print_versions.py # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache - name: Restore cached hypothesis directory id: restore-hypothesis-cache uses: actions/cache/restore@v4 with: path: .hypothesis/ key: cache-hypothesis-${{ runner.os }}-${{ github.run_id }} restore-keys: | cache-hypothesis- - name: Run slow Hypothesis tests if: success() id: status run: | pixi run -e ${{ env.PIXI_ENV }} python -m pytest --hypothesis-show-statistics --run-slow-hypothesis properties/*.py \ --report-log output-${{ env.PIXI_ENV }}-log.jsonl # explicitly save the cache so it gets updated, also do this even if it fails. - name: Save cached hypothesis directory id: save-hypothesis-cache if: always() && steps.status.outcome != 'skipped' uses: actions/cache/save@v4 with: path: .hypothesis/ key: cache-hypothesis-${{ runner.os }}-${{ github.run_id }} - name: Generate and publish the report if: | failure() && steps.status.outcome == 'failure' && github.event_name == 'schedule' && github.repository_owner == 'pydata' uses: scientific-python/issue-from-pytest-log-action@v1 with: log-path: output-${{ env.PIXI_ENV }}-log.jsonl issue-title: "Nightly Hypothesis tests failed" issue-label: "topic-hypothesis" xarray-2025.12.0/.github/workflows/label-prs.yml000066400000000000000000000003451511464676000213460ustar00rootroot00000000000000name: "PR Labeler" on: - pull_request_target jobs: label: runs-on: ubuntu-latest steps: - uses: actions/labeler@v6 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" sync-labels: false xarray-2025.12.0/.github/workflows/nightly-wheels.yml000066400000000000000000000022161511464676000224270ustar00rootroot00000000000000name: Upload nightly wheels on: workflow_dispatch: schedule: - cron: "0 0 * * *" jobs: cron: runs-on: ubuntu-latest if: github.repository == 'pydata/xarray' steps: - uses: actions/checkout@v6 with: fetch-depth: 0 - uses: actions/setup-python@v6 with: python-version: "3.12" - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install build twine - name: Build tarball and wheels run: | git clean -xdf git restore -SW . python -m build - name: Check built artifacts run: | python -m twine check --strict dist/* pwd if [ -f dist/xarray-0.0.0.tar.gz ]; then echo "โŒ INVALID VERSION NUMBER" exit 1 else echo "โœ… Looks good" fi - name: Upload wheel uses: scientific-python/upload-nightly-action@b36e8c0c10dbcfd2e05bf95f17ef8c14fd708dbf # 0.6.2 with: anaconda_nightly_upload_token: ${{ secrets.ANACONDA_NIGHTLY }} artifacts_path: dist xarray-2025.12.0/.github/workflows/publish-test-results.yaml000066400000000000000000000025201511464676000237450ustar00rootroot00000000000000# Copied from https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.23/README.md#support-fork-repositories-and-dependabot-branches name: Publish test results on: workflow_run: workflows: ["CI"] types: - completed concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: publish-test-results: name: Publish test results runs-on: ubuntu-latest if: github.event.workflow_run.conclusion != 'skipped' steps: - name: Download and extract artifacts env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} run: | mkdir artifacts && cd artifacts artifacts_url=${{ github.event.workflow_run.artifacts_url }} gh api "$artifacts_url" -q '.artifacts[] | [.name, .archive_download_url] | @tsv' | while read artifact do IFS=$'\t' read name url <<< "$artifact" gh api $url > "$name.zip" unzip -d "$name" "$name.zip" done - name: Publish Unit Test Results uses: EnricoMi/publish-unit-test-result-action@v2 with: commit: ${{ github.event.workflow_run.head_sha }} event_file: artifacts/Event File/event.json event_name: ${{ github.event.workflow_run.event }} files: "artifacts/**/*.xml" comment_mode: off xarray-2025.12.0/.github/workflows/pypi-release.yaml000066400000000000000000000060051511464676000222240ustar00rootroot00000000000000name: Build and Upload xarray to PyPI on: release: types: - published push: tags: - "v*" pull_request: types: [opened, reopened, synchronize, labeled] workflow_dispatch: jobs: build-artifacts: runs-on: ubuntu-latest if: ${{ github.repository == 'pydata/xarray' && ( (contains(github.event.pull_request.labels.*.name, 'Release') && github.event_name == 'pull_request') || github.event_name == 'release' || github.event_name == 'workflow_dispatch' || startsWith(github.ref, 'refs/tags/v') ) }} steps: - uses: actions/checkout@v6 with: fetch-depth: 0 - uses: actions/setup-python@v6 name: Install Python with: python-version: "3.12" - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install build twine - name: Build tarball and wheels run: | git clean -xdf git restore -SW . python -m build - name: Check built artifacts run: | python -m twine check --strict dist/* pwd if [ -f dist/xarray-0.0.0.tar.gz ]; then echo "โŒ INVALID VERSION NUMBER" exit 1 else echo "โœ… Looks good" fi - uses: actions/upload-artifact@v5 with: name: releases path: dist test-built-dist: needs: build-artifacts runs-on: ubuntu-latest steps: - uses: actions/setup-python@v6 name: Install Python with: python-version: "3.12" - uses: actions/download-artifact@v6 with: name: releases path: dist - name: List contents of built dist run: | ls -ltrh ls -ltrh dist - name: Verify the built dist/wheel is valid run: | python -m pip install --upgrade pip python -m pip install dist/xarray*.whl python -m xarray.util.print_versions upload-to-test-pypi: needs: test-built-dist if: github.event_name == 'push' runs-on: ubuntu-latest environment: name: pypi url: https://test.pypi.org/p/xarray permissions: id-token: write steps: - uses: actions/download-artifact@v6 with: name: releases path: dist - name: Publish package to TestPyPI if: github.event_name == 'push' uses: pypa/gh-action-pypi-publish@v1.13.0 with: repository_url: https://test.pypi.org/legacy/ verbose: true upload-to-pypi: needs: test-built-dist if: github.event_name == 'release' runs-on: ubuntu-latest environment: name: pypi url: https://pypi.org/p/xarray permissions: id-token: write steps: - uses: actions/download-artifact@v6 with: name: releases path: dist - name: Publish package to PyPI uses: pypa/gh-action-pypi-publish@v1.13.0 with: verbose: true xarray-2025.12.0/.github/workflows/upstream-dev-ci.yaml000066400000000000000000000116371511464676000226410ustar00rootroot00000000000000name: CI Upstream on: push: branches: - main pull_request: branches: - main types: [opened, reopened, synchronize, labeled] schedule: - cron: "0 0 * * *" # Daily โ€œAt 00:00โ€ UTC workflow_dispatch: # allows you to trigger the workflow run manually concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: FORCE_COLOR: 3 PIXI_VERSION: "v0.58.0" jobs: detect-ci-trigger: name: detect upstream-dev ci trigger runs-on: ubuntu-latest if: | github.repository == 'pydata/xarray' && (github.event_name == 'push' || github.event_name == 'pull_request') && !contains(github.event.pull_request.labels.*.name, 'skip-ci') outputs: triggered: ${{ steps.detect-trigger.outputs.trigger-found }} steps: - uses: actions/checkout@v6 with: fetch-depth: 2 - uses: xarray-contrib/ci-trigger@v1 id: detect-trigger with: keyword: "[test-upstream]" cache-pixi-lock: uses: ./.github/workflows/cache-pixi-lock.yml with: pixi-version: "v0.58.0" # keep in sync with env var above upstream-dev: name: upstream-dev runs-on: ubuntu-latest needs: [detect-ci-trigger, cache-pixi-lock] if: | always() && ( (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') || needs.detect-ci-trigger.outputs.triggered == 'true' || contains( github.event.pull_request.labels.*.name, 'run-upstream') ) defaults: run: shell: bash -l {0} strategy: fail-fast: false matrix: pixi-env: ["test-nightly"] steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Restore cached pixi lockfile uses: actions/cache/restore@v4 id: restore-pixi-lock with: enableCrossOsArchive: true path: | pixi.lock key: ${{ needs.cache-pixi-lock.outputs.cache-id }} - uses: prefix-dev/setup-pixi@v0.9.0 with: pixi-version: ${{ env.PIXI_VERSION }} cache: true environments: ${{ matrix.pixi-env }} cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} - name: Version info run: | pixi run -e ${{matrix.pixi-env}} -- python xarray/util/print_versions.py - name: Import xarray run: | pixi run -e ${{matrix.pixi-env}} -- python -c 'import xarray' - name: Run Tests if: success() id: status run: | pixi run -e ${{matrix.pixi-env}} -- python -m pytest --timeout=60 -rf -nauto \ --report-log output-${{ matrix.pixi-env }}-log.jsonl - name: Generate and publish the report if: | failure() && steps.status.outcome == 'failure' && github.event_name == 'schedule' && github.repository_owner == 'pydata' uses: scientific-python/issue-from-pytest-log-action@v1 with: log-path: output-${{ matrix.pixi-env }}-log.jsonl mypy-upstream-dev: name: mypy-upstream-dev runs-on: ubuntu-latest needs: [detect-ci-trigger, cache-pixi-lock] if: | always() && ( contains( github.event.pull_request.labels.*.name, 'run-upstream') ) defaults: run: shell: bash -l {0} strategy: fail-fast: false matrix: pixi-env: ["test-nightly"] steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Restore cached pixi lockfile uses: actions/cache/restore@v4 id: restore-pixi-lock with: enableCrossOsArchive: true path: | pixi.lock key: ${{ needs.cache-pixi-lock.outputs.cache-id }} - uses: prefix-dev/setup-pixi@v0.9.0 with: pixi-version: ${{ env.PIXI_VERSION }} cache: true environments: ${{ matrix.pixi-env }} cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV echo "PYTHON_VERSION=$(pixi run -e ${{matrix.pixi-env}} -- python --version | cut -d' ' -f2 | cut -d. -f1,2)" >> $GITHUB_ENV - name: Version info run: | pixi run -e ${{matrix.pixi-env}} -- python xarray/util/print_versions.py - name: Run mypy run: | pixi run -e ${{matrix.pixi-env}} -- python -m mypy --install-types --non-interactive --cobertura-xml-report mypy_report - name: Upload mypy coverage to Codecov uses: codecov/codecov-action@v5.5.1 with: file: mypy_report/cobertura.xml flags: mypy env_vars: PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false xarray-2025.12.0/.gitignore000066400000000000000000000022471511464676000153400ustar00rootroot00000000000000*.py[cod] __pycache__ .env .venv # example caches from Hypothesis .hypothesis/ # temp files from docs build doc/*.nc doc/auto_gallery doc/rasm.zarr # C extensions *.so # Packages *.egg *.egg-info .eggs dist build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 # Installer logs pip-log.txt # Unit test / coverage reports .coverage .coverage.* .tox nosetests.xml .cache .prettier_cache .dmypy.json .mypy_cache .ropeproject/ .tags* .testmon* .tmontmp/ .pytest_cache dask-worker-space/ # asv environments asv_bench/.asv asv_bench/pkgs # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject # IDEs .idea *.swp .DS_Store .vscode/ # xarray specific doc/_build doc/generated/ doc/api/generated/ xarray/tests/data/*.grib.*.idx # Claude Code .claude/ # Sync tools Icon* .ipynb_checkpoints doc/team-panel.txt doc/external-examples-gallery.txt doc/notebooks-examples-gallery.txt doc/videos-gallery.txt # Until we support this properly, excluding from gitignore. (adding it to # gitignore to make it _easier_ to work with `uv`, not as an indication that I # think we shouldn't...) uv.lock mypy_report/ xarray-docs/ # pixi environments .pixi pixi.lock xarray-2025.12.0/.pre-commit-config.yaml000066400000000000000000000051441511464676000176300ustar00rootroot00000000000000# https://pre-commit.com/ ci: autoupdate_schedule: monthly autoupdate_commit_msg: "Update pre-commit hooks" repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: debug-statements - id: mixed-line-ending - repo: https://github.com/pre-commit/pygrep-hooks rev: v1.10.0 hooks: # - id: python-check-blanket-noqa # checked by ruff # - id: python-check-blanket-type-ignore # checked by ruff # - id: python-check-mock-methods # checked by ruff - id: python-no-log-warn # - id: python-use-type-annotations # too many false positives - id: rst-backticks - id: rst-directive-colons - id: rst-inline-touching-normal - id: text-unicode-replacement-char - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.14.7 hooks: - id: ruff-check args: ["--fix", "--show-fixes"] - id: ruff-format - repo: https://github.com/keewis/blackdoc rev: v0.4.6 hooks: - id: blackdoc exclude: "generate_aggregations.py" # make sure this is the most recent version of black additional_dependencies: ["black==25.11.0"] - repo: https://github.com/rbubley/mirrors-prettier rev: v3.7.3 hooks: - id: prettier args: ["--cache-location=.prettier_cache/cache"] - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.19.0 hooks: - id: mypy # Copied from setup.cfg exclude: "properties|asv_bench" # This is slow and so we take it out of the fast-path; requires passing # `--hook-stage manual` to pre-commit stages: [manual] additional_dependencies: [ # Type stubs types-python-dateutil, types-setuptools, types-PyYAML, types-pytz, typing-extensions>=4.1.0, numpy, ] - repo: https://github.com/citation-file-format/cff-converter-python rev: 5295f87c0e261da61a7b919fc754e3a77edd98a7 hooks: - id: validate-cff - repo: https://github.com/ComPWA/taplo-pre-commit rev: v0.9.3 hooks: - id: taplo-format args: ["--option", "array_auto_collapse=false"] - id: taplo-lint args: ["--no-schema"] - repo: https://github.com/abravalheri/validate-pyproject rev: v0.24.1 hooks: - id: validate-pyproject additional_dependencies: ["validate-pyproject-schema-store[all]"] - repo: https://github.com/adhtruong/mirrors-typos rev: v1.40.0 hooks: - id: typos xarray-2025.12.0/.readthedocs.yaml000066400000000000000000000011721511464676000165730ustar00rootroot00000000000000version: 2 sphinx: configuration: doc/conf.py fail_on_warning: true build: os: ubuntu-lts-latest tools: # just so RTD stops complaining python: "latest" jobs: create_environment: - asdf plugin add pixi - asdf install pixi latest - asdf global pixi latest post_checkout: - (git --no-pager log --pretty="tformat:%s" -1 | grep -vqF "[skip-rtd]") || exit 183 - git fetch --unshallow || true pre_install: - git update-index --assume-unchanged doc/conf.py install: - pixi install -e doc build: html: - pixi run doc BUILDDIR=$READTHEDOCS_OUTPUT xarray-2025.12.0/CITATION.cff000066400000000000000000000071771511464676000152510ustar00rootroot00000000000000cff-version: 1.2.0 message: "If you use this software, please cite it as below." authors: - family-names: "Hoyer" given-names: "Stephan" orcid: "https://orcid.org/0000-0002-5207-0380" - family-names: "Roos" given-names: "Maximilian" - family-names: "Joseph" given-names: "Hamman" orcid: "https://orcid.org/0000-0001-7479-8439" - family-names: "Magin" given-names: "Justus" orcid: "https://orcid.org/0000-0002-4254-8002" - family-names: "Cherian" given-names: "Deepak" orcid: "https://orcid.org/0000-0002-6861-8734" - family-names: "Fitzgerald" given-names: "Clark" orcid: "https://orcid.org/0000-0003-3446-6389" - family-names: "Hauser" given-names: "Mathias" orcid: "https://orcid.org/0000-0002-0057-4878" - family-names: "Fujii" given-names: "Keisuke" orcid: "https://orcid.org/0000-0003-0390-9984" - family-names: "Maussion" given-names: "Fabien" orcid: "https://orcid.org/0000-0002-3211-506X" - family-names: "Imperiale" given-names: "Guido" - family-names: "Clark" given-names: "Spencer" orcid: "https://orcid.org/0000-0001-5595-7895" - family-names: "Kleeman" given-names: "Alex" - family-names: "Nicholas" given-names: "Thomas" orcid: "https://orcid.org/0000-0002-2176-0530" - family-names: "Kluyver" given-names: "Thomas" orcid: "https://orcid.org/0000-0003-4020-6364" - family-names: "Westling" given-names: "Jimmy" - family-names: "Munroe" given-names: "James" orcid: "https://orcid.org/0000-0001-9098-6309" - family-names: "Amici" given-names: "Alessandro" orcid: "https://orcid.org/0000-0002-1778-4505" - family-names: "Barghini" given-names: "Aureliana" - family-names: "Banihirwe" given-names: "Anderson" orcid: "https://orcid.org/0000-0001-6583-571X" - family-names: "Bell" given-names: "Ray" orcid: "https://orcid.org/0000-0003-2623-0587" - family-names: "Hatfield-Dodds" given-names: "Zac" orcid: "https://orcid.org/0000-0002-8646-8362" - family-names: "Abernathey" given-names: "Ryan" orcid: "https://orcid.org/0000-0001-5999-4917" - family-names: "Bovy" given-names: "Benoรฎt" - family-names: "Omotani" given-names: "John" orcid: "https://orcid.org/0000-0002-3156-8227" - family-names: "Mรผhlbauer" given-names: "Kai" orcid: "https://orcid.org/0000-0001-6599-1034" - family-names: "Roszko" given-names: "Maximilian K." orcid: "https://orcid.org/0000-0001-9424-2526" - family-names: "Wolfram" given-names: "Phillip J." orcid: "https://orcid.org/0000-0001-5971-4241" - family-names: "Henderson" given-names: "Scott" orcid: "https://orcid.org/0000-0003-0624-4965" - family-names: "Awowale" given-names: "Eniola Olufunke" - family-names: "Scheick" given-names: "Jessica" orcid: "https://orcid.org/0000-0002-3421-4459" - family-names: "Savoie" given-names: "Matthew" orcid: "https://orcid.org/0000-0002-8881-2550" - family-names: "Littlejohns" given-names: "Owen" title: "xarray" abstract: "N-D labeled arrays and datasets in Python." license: Apache-2.0 doi: 10.5281/zenodo.598201 url: "https://xarray.dev/" repository-code: "https://github.com/pydata/xarray" preferred-citation: type: article authors: - family-names: "Hoyer" given-names: "Stephan" orcid: "https://orcid.org/0000-0002-5207-0380" - family-names: "Joseph" given-names: "Hamman" orcid: "https://orcid.org/0000-0001-7479-8439" doi: "10.5334/jors.148" journal: "Journal of Open Research Software" month: 4 title: "xarray: N-D labeled Arrays and Datasets in Python" volume: 5 issue: 1 year: 2017 xarray-2025.12.0/CLAUDE.md000066400000000000000000000024351511464676000146260ustar00rootroot00000000000000# xarray development setup ## Setup ```bash uv sync ``` ## Run tests ```bash uv run pytest xarray -n auto # All tests in parallel uv run pytest xarray/tests/test_dataarray.py # Specific file ``` ## Linting & type checking ```bash pre-commit run --all-files # Includes ruff and other checks uv run dmypy run # Type checking with mypy ``` ## Code Style Guidelines ### Import Organization - **Always place imports at the top of the file** in the standard import section - Never add imports inside functions or nested scopes unless there's a specific reason (e.g., circular import avoidance, optional dependencies in TYPE_CHECKING) - Group imports following PEP 8 conventions: 1. Standard library imports 2. Related third-party imports 3. Local application/library specific imports ## GitHub Interaction Guidelines - **NEVER impersonate the user on GitHub**, always sign off with something like "[This is Claude Code on behalf of Jane Doe]" - Never create issues nor pull requests on the xarray GitHub repository unless explicitly instructed - Never post "update" messages, progress reports, or explanatory comments on GitHub issues/PRs unless specifically instructed - When creating commits, always include a co-authorship trailer: `Co-authored-by: Claude ` xarray-2025.12.0/CODE_OF_CONDUCT.md000066400000000000000000000027351511464676000161510ustar00rootroot00000000000000# NUMFOCUS CODE OF CONDUCT You can find the full Code of Conduct on the NumFOCUS website: https://numfocus.org/code-of-conduct ## THE SHORT VERSION NumFOCUS is dedicated to providing a harassment-free community for everyone, regardless of gender, sexual orientation, gender identity and expression, disability, physical appearance, body size, race, or religion. We do not tolerate harassment of community members in any form. Be kind to others. Do not insult or put down others. Behave professionally. Remember that harassment and sexist, racist, or exclusionary jokes are not appropriate for NumFOCUS. All communication should be appropriate for a professional audience including people of many different backgrounds. Sexual language and imagery is not appropriate. Thank you for helping make this a welcoming, friendly community for all. ## HOW TO REPORT If you feel that the Code of Conduct has been violated, feel free to submit a report, by using the form: [NumFOCUS Code of Conduct Reporting Form](https://numfocus.typeform.com/to/ynjGdT?typeform-source=numfocus.org) ## WHO WILL RECEIVE YOUR REPORT Your report will be received and handled by NumFOCUS Code of Conduct Working Group; trained, and experienced contributors with diverse backgrounds. The group is making decisions independently from the project, PyData, NumFOCUS or any other organization. You can learn more about the current group members, as well as the reporting procedure here: https://numfocus.org/code-of-conduct xarray-2025.12.0/CONTRIBUTING.md000066400000000000000000000002131511464676000155700ustar00rootroot00000000000000Xarray's contributor guidelines [can be found in our online documentation](https://docs.xarray.dev/en/stable/contribute/contributing.html) xarray-2025.12.0/CORE_TEAM_GUIDE.md000066400000000000000000000474051511464676000161520ustar00rootroot00000000000000> **_Note:_** This Core Team Member Guide was adapted from the [napari project's Core Developer Guide](https://napari.org/stable/developers/core_dev_guide.html) and the [Pandas maintainers guide](https://pandas.pydata.org/docs/development/maintaining.html). # Core Team Member Guide Welcome, new core team member! We appreciate the quality of your work, and enjoy working with you! Thank you for your numerous contributions to the project so far. By accepting the invitation to become a core team member you are **not required to commit to doing any more work** - xarray is a volunteer project, and we value the contributions you have made already. You can see a list of all the current core team members on our [@pydata/xarray](https://github.com/orgs/pydata/teams/xarray) GitHub team. Once accepted, you should now be on that list too. This document offers guidelines for your new role. ## Tasks Xarray values a wide range of contributions, only some of which involve writing code. As such, we do not currently make a distinction between a "core team member", "core developer", "maintainer", or "triage team member" as some projects do (e.g. [pandas](https://pandas.pydata.org/docs/development/maintaining.html)). That said, if you prefer to refer to your role as one of the other titles above then that is fine by us! Xarray is mostly a volunteer project, so these tasks shouldnโ€™t be read as โ€œexpectationsโ€. **There are no strict expectations**, other than to adhere to our [Code of Conduct](https://github.com/pydata/xarray/tree/main/CODE_OF_CONDUCT.md). Rather, the tasks that follow are general descriptions of what it might mean to be a core team member: - Facilitate a welcoming environment for those who file issues, make pull requests, and open discussion topics, - Triage newly filed issues, - Review newly opened pull requests, - Respond to updates on existing issues and pull requests, - Drive discussion and decisions on stalled issues and pull requests, - Provide experience / wisdom on API design questions to ensure consistency and maintainability, - Project organization (run developer meetings, coordinate with sponsors), - Project evangelism (advertise xarray to new users), - Community contact (represent xarray in user communities such as [Pangeo](https://pangeo.io/)), - Key project contact (represent xarray's perspective within key related projects like NumPy, Zarr or Dask), - Project fundraising (help write and administrate grants that will support xarray), - Improve documentation or tutorials (especially on [`tutorial.xarray.dev`](https://tutorial.xarray.dev/)), - Presenting or running tutorials (such as those we have given at the SciPy conference), - Help maintain the [`xarray.dev`](https://xarray.dev/) landing page and website, the [code for which is here](https://github.com/xarray-contrib/xarray.dev), - Write blog posts on the [xarray blog](https://xarray.dev/blog), - Help maintain xarray's various Continuous Integration Workflows, - Help maintain a regular release schedule (we aim for one or more releases per month), - Attend the bi-weekly community meeting ([issue](https://github.com/pydata/xarray/issues/4001)), - Contribute to the xarray codebase. (Matt Rocklin's post on [the role of a maintainer](https://matthewrocklin.com/blog/2019/05/18/maintainer) may be interesting background reading, but should not be taken to strictly apply to the Xarray project.) Obviously you are not expected to contribute in all (or even more than one) of these ways! They are listed so as to indicate the many types of work that go into maintaining xarray. It is natural that your available time and enthusiasm for the project will wax and wane - this is fine and expected! It is also common for core team members to have a "niche" - a particular part of the codebase they have specific expertise with, or certain types of task above which they primarily perform. If however you feel that is unlikely you will be able to be actively contribute in the foreseeable future (or especially if you won't be available to answer questions about pieces of code that you wrote previously) then you may want to consider letting us know you would rather be listed as an "Emeritus Core Team Member", as this would help us in evaluating the overall health of the project. ## Issue triage One of the main ways you might spend your contribution time is by responding to or triaging new issues. Hereโ€™s a typical workflow for triaging a newly opened issue or discussion: 1. **Thank the reporter for opening an issue.** The issue tracker is many peopleโ€™s first interaction with the xarray project itself, beyond just using the library. It may also be their first open-source contribution of any kind. As such, we want it to be a welcoming, pleasant experience. 2. **Is the necessary information provided?** Ideally reporters would fill out the issue template, but many donโ€™t. If crucial information (like the version of xarray they used), is missing feel free to ask for that and label the issue with โ€œneeds infoโ€. The report should follow the [guidelines for xarray discussions](https://github.com/pydata/xarray/discussions/5404). You may want to link to that if they didnโ€™t follow the template. Make sure that the title accurately reflects the issue. Edit it yourself if itโ€™s not clear. Remember also that issues can be converted to discussions and vice versa if appropriate. 3. **Is this a duplicate issue?** We have many open issues. If a new issue is clearly a duplicate, label the new issue as โ€œduplicateโ€, and close the issue with a link to the original issue. Make sure to still thank the reporter, and encourage them to chime in on the original issue, and perhaps try to fix it. If the new issue provides relevant information, such as a better or slightly different example, add it to the original issue as a comment or an edit to the original post. 4. **Is the issue minimal and reproducible?** For bug reports, we ask that the reporter provide a minimal reproducible example. See [minimal-bug-reports](https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) for a good explanation. If the example is not reproducible, or if itโ€™s clearly not minimal, feel free to ask the reporter if they can provide and example or simplify the provided one. Do acknowledge that writing minimal reproducible examples is hard work. If the reporter is struggling, you can try to write one yourself and weโ€™ll edit the original post to include it. If a nice reproducible example has been provided, thank the reporter for that. If a reproducible example canโ€™t be provided, add the โ€œneeds mcveโ€ label. If a reproducible example is provided, but you see a simplification, edit the original post with your simpler reproducible example. 5. **Is this a clearly defined feature request?** Generally, xarray prefers to discuss and design new features in issues, before a pull request is made. Encourage the submitter to include a proposed API for the new feature. Having them write a full docstring is a good way to pin down specifics. We may need a discussion from several xarray maintainers before deciding whether the proposal is in scope for xarray. 6. **Is this a usage question?** We prefer that usage questions are asked on StackOverflow with the [`python-xarray` tag](https://stackoverflow.com/questions/tagged/python-xarray) or as a [GitHub discussion topic](https://github.com/pydata/xarray/discussions). If itโ€™s easy to answer, feel free to link to the relevant documentation section, let them know that in the future this kind of question should be on StackOverflow, and close the issue. 7. **What labels and milestones should I add?** Apply the relevant labels. This is a bit of an art, and comes with experience. Look at similar issues to get a feel for how things are labeled. Labels used for labelling issues that relate to particular features or parts of the codebase normally have the form `topic-`. If the issue is clearly defined and the fix seems relatively straightforward, label the issue as `contrib-good-first-issue`. You can also remove the `needs triage` label that is automatically applied to all newly-opened issues. 8. **Where should the poster look to fix the issue?** If you can, it is very helpful to point to the approximate location in the codebase where a contributor might begin to fix the issue. This helps ease the way in for new contributors to the repository. ## Code review and contributions As a core team member, you are a representative of the project, and trusted to make decisions that will serve the long term interests of all users. You also gain the responsibility of shepherding other contributors through the review process; here are some guidelines for how to do that. ### All contributors are treated the same You should now have gained the ability to merge or approve other contributors' pull requests. Merging contributions is a shared power: only merge contributions you yourself have carefully reviewed, and that are clear improvements for the project. When in doubt, and especially for more complex changes, wait until at least one other core team member has approved. (See [Reviewing](#reviewing) and especially [Merge Only Changes You Understand](#merge-only-changes-you-understand) below.) It should also be considered best practice to leave a reasonable (24hr) time window after approval before merge to ensure that other core team members have a reasonable chance to weigh in. Adding the `plan-to-merge` label notifies developers of the imminent merge. We are also an international community, with contributors from many different time zones, some of whom will only contribute during their working hours, others who might only be able to contribute during nights and weekends. It is important to be respectful of other peoples schedules and working habits, even if it slows the project down slightly - we are in this for the long run. In the same vein you also shouldn't feel pressured to be constantly available or online, and users or contributors who are overly demanding and unreasonable to the point of harassment will be directed to our [Code of Conduct](https://github.com/pydata/xarray/tree/main/CODE_OF_CONDUCT.md). We value sustainable development practices over mad rushes. When merging, we automatically use GitHub's [Squash and Merge](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/merging-a-pull-request#merging-a-pull-request) to ensure a clean git history. You should also continue to make your own pull requests as before and in accordance with the [general contributing guide](https://docs.xarray.dev/en/stable/contributing.html). These pull requests still require the approval of another core team member before they can be merged. ### How to conduct a good review _Always_ be kind to contributors. Contributors are often doing volunteer work, for which we are tremendously grateful. Provide constructive criticism on ideas and implementations, and remind yourself of how it felt when your own work was being evaluated as a novice. `xarray` strongly values mentorship in code review. New users often need more handholding, having little to no git experience. Repeat yourself liberally, and, if you donโ€™t recognize a contributor, point them to our development guide, or other GitHub workflow tutorials around the web. Do not assume that they know how GitHub works (many don't realize that adding a commit automatically updates a pull request, for example). Gentle, polite, kind encouragement can make the difference between a new core team member and an abandoned pull request. When reviewing, focus on the following: 1. **Usability and generality:** `xarray` is a user-facing package that strives to be accessible to both novice and advanced users, and new features should ultimately be accessible to everyone using the package. `xarray` targets the scientific user community broadly, and core features should be domain-agnostic and general purpose. Custom functionality is meant to be provided through our various types of interoperability. 2. **Performance and benchmarks:** As `xarray` targets scientific applications that often involve large multidimensional datasets, high performance is a key value of `xarray`. While every new feature won't scale equally to all sizes of data, keeping in mind performance and our [benchmarks](https://github.com/pydata/xarray/tree/main/asv_bench) during a review may be important, and you may need to ask for benchmarks to be run and reported or new benchmarks to be added. You can run the CI benchmarking suite on any PR by tagging it with the `run-benchmark` label. 3. **APIs and stability:** Coding users and developers will make extensive use of our APIs. The foundation of a healthy ecosystem will be a fully capable and stable set of APIs, so as `xarray` matures it will very important to ensure our APIs are stable. Spending the extra time to consider names of public facing variables and methods, alongside function signatures, could save us considerable trouble in the future. We do our best to provide [deprecation cycles](https://docs.xarray.dev/en/stable/contributing.html#backwards-compatibility) when making backwards-incompatible changes. 4. **Documentation and tutorials:** All new methods should have appropriate doc strings following [PEP257](https://peps.python.org/pep-0257/) and the [NumPy documentation guide](https://numpy.org/devdocs/dev/howto-docs.html#documentation-style). For any major new features, accompanying changes should be made to our [tutorials](https://tutorial.xarray.dev). These should not only illustrates the new feature, but explains it. 5. **Implementations and algorithms:** You should understand the code being modified or added before approving it. (See [Merge Only Changes You Understand](#merge-only-changes-you-understand) below.) Implementations should do what they claim and be simple, readable, and efficient in that order. 6. **Tests:** All contributions _must_ be tested, and each added line of code should be covered by at least one test. Good tests not only execute the code, but explore corner cases. It can be tempting not to review tests, but please do so. Other changes may be _nitpicky_: spelling mistakes, formatting, etc. Do not insist contributors make these changes, but instead you should offer to make these changes by [pushing to their branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/committing-changes-to-a-pull-request-branch-created-from-a-fork), or using GitHubโ€™s [suggestion](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/commenting-on-a-pull-request) [feature](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/incorporating-feedback-in-your-pull-request), and be prepared to make them yourself if needed. Using the suggestion feature is preferred because it gives the contributor a choice in whether to accept the changes. Unless you know that a contributor is experienced with git, donโ€™t ask for a rebase when merge conflicts arise. Instead, rebase the branch yourself, force-push to their branch, and advise the contributor to force-pull. If the contributor is no longer active, you may take over their branch by submitting a new pull request and closing the original, including a reference to the original pull request. In doing so, ensure you communicate that you are not throwing the contributor's work away! If appropriate it is a good idea to acknowledge other contributions to the pull request using the `Co-authored-by` [syntax](https://docs.github.com/en/pull-requests/committing-changes-to-your-project/creating-and-editing-commits/creating-a-commit-with-multiple-authors) in the commit message. ### Merge only changes you understand _Long-term maintainability_ is an important concern. Code doesn't merely have to _work_, but should be _understood_ by multiple core developers. Changes will have to be made in the future, and the original contributor may have moved on. Therefore, _do not merge a code change unless you understand it_. Ask for help freely: we can consult community members, or even external developers, for added insight where needed, and see this as a great learning opportunity. While we collectively "own" any patches (and bugs!) that become part of the code base, you are vouching for changes you merge. Please take that responsibility seriously. Feel free to ping other active maintainers with any questions you may have. ## Further resources As a core member, you should be familiar with community and developer resources such as: - Our [contributor guide](https://docs.xarray.dev/en/stable/contributing.html). - Our [code of conduct](https://github.com/pydata/xarray/tree/main/CODE_OF_CONDUCT.md). - Our [philosophy and development roadmap](https://docs.xarray.dev/en/stable/roadmap.html). - [PEP8](https://peps.python.org/pep-0008/) for Python style. - [PEP257](https://peps.python.org/pep-0257/) and the [NumPy documentation guide](https://numpy.org/devdocs/dev/howto-docs.html#documentation-style) for docstring conventions. - [`pre-commit`](https://pre-commit.com) hooks for autoformatting. - [`ruff`](https://github.com/astral-sh/ruff) autoformatting and linting. - [python-xarray](https://stackoverflow.com/questions/tagged/python-xarray) on Stack Overflow. - [@xarray_dev](https://x.com/xarray_dev) on X. - [xarray-dev](https://discord.gg/bsSGdwBn) discord community (normally only used for remote synchronous chat during sprints). You are not required to monitor any of the social resources. Where possible we prefer to point people towards asynchronous forms of communication like github issues instead of realtime chat options as they are far easier for a global community to consume and refer back to. We hold a [bi-weekly developers meeting](https://docs.xarray.dev/en/stable/developers-meeting.html) via video call. This is a great place to bring up any questions you have, raise visibility of an issue and/or gather more perspectives. Attendance is absolutely optional, and we keep the meeting to 30 minutes in respect of your valuable time. This meeting is public, so we occasionally have non-core team members join us. We also have a private mailing list for core team members `xarray-core-team@googlegroups.com` which is sparingly used for discussions that are required to be private, such as nominating new core members and discussing financial issues. ## Inviting new core members Any core member may nominate other contributors to join the core team. While there is no hard-and-fast rule about who can be nominated, ideally, they should have: been part of the project for at least two months, contributed significant changes of their own, contributed to the discussion and review of others' work, and collaborated in a way befitting our community values. **We strongly encourage nominating anyone who has made significant non-code contributions to the Xarray community in any way**. After nomination voting will happen on a private mailing list. While it is expected that most votes will be unanimous, a two-thirds majority of the cast votes is enough. Core team members can choose to become emeritus core team members and suspend their approval and voting rights until they become active again. ## Contribute to this guide (!) This guide reflects the experience of the current core team members. We may well have missed things that, by now, have become second natureโ€”things that you, as a new team member, will spot more easily. Please ask the other core team members if you have any questions, and submit a pull request with insights gained. ## Conclusion We are excited to have you on board! We look forward to your contributions to the code base and the community. Thank you in advance! xarray-2025.12.0/DATATREE_MIGRATION_GUIDE.md000066400000000000000000000140011511464676000174000ustar00rootroot00000000000000# Migration guide for users of `xarray-contrib/datatree` _15th October 2024_ This guide is for previous users of the prototype `datatree.DataTree` class in the `xarray-contrib/datatree repository`. That repository has now been archived, and will not be maintained. This guide is intended to help smooth your transition to using the new, updated `xarray.DataTree` class. > [!IMPORTANT] > There are breaking changes! You should not expect that code written with `xarray-contrib/datatree` will work without any modifications. At the absolute minimum you will need to change the top-level import statement, but there are other changes too. We have made various changes compared to the prototype version. These can be split into three categories: data model changes, which affect the hierarchal structure itself; integration with xarray's IO backends; and minor API changes, which mostly consist of renaming methods to be more self-consistent. ### Data model changes The most important changes made are to the data model of `DataTree`. Whilst previously data in different nodes was unrelated and therefore unconstrained, now trees have "internal alignment" - meaning that dimensions and indexes in child nodes must exactly align with those in their parents. These alignment checks happen at tree construction time, meaning there are some netCDF4 files and zarr stores that could previously be opened as `datatree.DataTree` objects using `datatree.open_datatree`, but now cannot be opened as `xr.DataTree` objects using `xr.open_datatree`. For these cases we added a new opener function `xr.open_groups`, which returns a `dict[str, Dataset]`. This is intended as a fallback for tricky cases, where the idea is that you can still open the entire contents of the file using `open_groups`, edit the `Dataset` objects, then construct a valid tree from the edited dictionary using `DataTree.from_dict`. The alignment checks allowed us to add "Coordinate Inheritance", a much-requested feature where indexed coordinate variables are now "inherited" down to child nodes. This allows you to define common coordinates in a parent group that are then automatically available on every child node. The distinction between a locally-defined coordinate variables and an inherited coordinate that was defined on a parent node is reflected in the `DataTree.__repr__`. Generally if you prefer not to have these variables be inherited you can get more similar behaviour to the old `datatree` package by removing indexes from coordinates, as this prevents inheritance. Tree structure checks between multiple trees (i.e., `DataTree.isomorophic`) and pairing of nodes in arithmetic has also changed. Nodes are now matched (with `xarray.group_subtrees`) based on their relative paths, without regard to the order in which child nodes are defined. For further documentation see the page in the user guide on Hierarchical Data. ### Integrated backends Previously `datatree.open_datatree` used a different codepath from `xarray.open_dataset`, and was hard-coded to only support opening netCDF files and Zarr stores. Now xarray's backend entrypoint system has been generalized to include `open_datatree` and the new `open_groups`. This means we can now extend other xarray backends to support `open_datatree`! If you are the maintainer of an xarray backend we encourage you to add support for `open_datatree` and `open_groups`! Additionally: - A `group` kwarg has been added to `open_datatree` for choosing which group in the file should become the root group of the created tree. - Various performance improvements have been made, which should help when opening netCDF files and Zarr stores with large numbers of groups. - We anticipate further performance improvements being possible for datatree IO. ### API changes A number of other API changes have been made, which should only require minor modifications to your code: - The top-level import has changed, from `from datatree import DataTree, open_datatree` to `from xarray import DataTree, open_datatree`. Alternatively you can now just use the `import xarray as xr` namespace convention for everything datatree-related. - The `DataTree.ds` property has been changed to `DataTree.dataset`, though `DataTree.ds` remains as an alias for `DataTree.dataset`. - Similarly the `ds` kwarg in the `DataTree.__init__` constructor has been replaced by `dataset`, i.e. use `DataTree(dataset=)` instead of `DataTree(ds=...)`. - The method `DataTree.to_dataset()` still exists but now has different options for controlling which variables are present on the resulting `Dataset`, e.g. `inherit=True/False`. - `DataTree.copy()` also has a new `inherit` keyword argument for controlling whether or not coordinates defined on parents are copied (only relevant when copying a non-root node). - The `DataTree.parent` property is now read-only. To assign an ancestral relationship directly you must instead use the `.children` property on the parent node, which remains settable. - Similarly the `parent` kwarg has been removed from the `DataTree.__init__` constructor. - DataTree objects passed to the `children` kwarg in `DataTree.__init__` are now shallow-copied. - `DataTree.map_over_subtree` has been renamed to `DataTree.map_over_datasets`, and changed to no longer work like a decorator. Instead you use it to apply the function and arguments directly, more like how `xarray.apply_ufunc` works. - `DataTree.as_array` has been replaced by `DataTree.to_dataarray`. - A number of methods which were not well tested have been (temporarily) disabled. In general we have tried to only keep things that are known to work, with the plan to increase API surface incrementally after release. ## Thank you! Thank you for trying out `xarray-contrib/datatree`! We welcome contributions of any kind, including good ideas that never quite made it into the original datatree repository. Please also let us know if we have forgotten to mention a change that should have been listed in this guide. Sincerely, the datatree team: Tom Nicholas, Owen Littlejohns, Matt Savoie, Eni Awowale, Alfonso Ladino, Justus Magin, Stephan Hoyer xarray-2025.12.0/HOW_TO_RELEASE.md000066400000000000000000000114261511464676000160700ustar00rootroot00000000000000# How to issue an xarray release in 16 easy steps Time required: about an hour. These instructions assume that `upstream` refers to the main repository: ```sh $ git remote -v {...} upstream https://github.com/pydata/xarray (fetch) upstream https://github.com/pydata/xarray (push) ``` 1. Ensure your main branch is synced to upstream: ```sh git switch main git pull upstream main ``` 2. Add a list of contributors. First fetch all previous release tags so we can see the version number of the last release was: ```sh git fetch upstream --tags ``` Then run ```sh python ci/release_contributors.py ``` (needs `gitpython` and `toolz` / `cytoolz`) and copy the output. 3. Write a release summary: ~50 words describing the high level features. This will be used in the release emails, tweets, GitHub release notes, etc. 4. Look over whats-new.rst and the docs. Make sure "What's New" is complete (check the date!) and add the release summary at the top. Things to watch out for: - Important new features should be highlighted towards the top. - Function/method references should include links to the API docs. - Sometimes notes get added in the wrong section of whats-new, typically due to a bad merge. Check for these before a release by using git diff, e.g., `git diff v{YYYY.MM.X-1} whats-new.rst` where {YYYY.MM.X-1} is the previous release. 5. Open a PR with the release summary and whatsnew changes; in particular the release headline should get feedback from the team on what's important to include. Apply the `Release` label to the PR to trigger a test build action. 6. After merging, again ensure your main branch is synced to upstream: ```sh git switch main git pull upstream main ``` 7. If you have any doubts, run the full test suite one final time! ```sh pytest ``` 8. Check that the [ReadTheDocs build](https://readthedocs.org/projects/xray/) is passing on the `latest` build version (which is built from the `main` branch). 9. Issue the release on GitHub. Click on "Draft a new release" at . Type in the version number (with a "v") and paste the release summary in the notes. 10. This should automatically trigger an upload of the new build to PyPI via GitHub Actions. Check this has run [here](https://github.com/pydata/xarray/actions/workflows/pypi-release.yaml), and that the version number you expect is displayed [on PyPI](https://pypi.org/project/xarray/) 11. Add a section for the next release {YYYY.MM.X+1} to doc/whats-new.rst (we avoid doing this earlier so that it doesn't show up in the RTD build): ```rst .. _whats-new.YYYY.MM.X+1: vYYYY.MM.X+1 (unreleased) ----------------------- New Features ~~~~~~~~~~~~ Breaking Changes ~~~~~~~~~~~~~~~~ Deprecations ~~~~~~~~~~~~ Bug Fixes ~~~~~~~~~ Documentation ~~~~~~~~~~~~~ Internal Changes ~~~~~~~~~~~~~~~~ ``` 12. Make a PR with these changes and merge it: ```sh git checkout -b empty-whatsnew-YYYY.MM.X+1 git commit -am "empty whatsnew" git push ``` (Note that repo branch restrictions prevent pushing to `main`, so you have to just-self-merge this.) 13. Consider updating the version available on pyodide: - Open the PyPI page for [Xarray downloads](https://pypi.org/project/xarray/#files) - Edit [`packages/xarray/meta.yaml`](https://github.com/pyodide/pyodide-recipes/blob/main/packages/xarray/meta.yaml) to update the - version number - link to the wheel (under "Built Distribution" on the PyPI page) - SHA256 hash (Click "Show Hashes" next to the link to the wheel) - Open a pull request to pyodide-recipes 14. Issue the release announcement to mailing lists & Twitter (X). For bug fix releases, I usually only email xarray@googlegroups.com. For major/feature releases, I will email a broader list (no more than once every 3-6 months): - pydata@googlegroups.com - xarray@googlegroups.com - numpy-discussion@scipy.org - scipy-user@scipy.org - pyaos@lists.johnny-lin.com Google search will turn up examples of prior release announcements (look for "ANN xarray"). Some of these groups require you to be subscribed in order to email them. ## Note on version numbering As of 2022.03.0, we utilize the [CALVER](https://calver.org/) version system. Specifically, we have adopted the pattern `YYYY.MM.X`, where `YYYY` is a 4-digit year (e.g. `2022`), `0M` is a 2-digit zero-padded month (e.g. `01` for January), and `X` is the release number (starting at zero at the start of each month and incremented once for each additional release). xarray-2025.12.0/LICENSE000066400000000000000000000240341511464676000143530ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: You must give any other recipients of the Work or Derivative Works a copy of this License; and You must cause any modified files to carry prominent notices stating that You changed the files; and You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2014-2024 xarray Developers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. xarray-2025.12.0/README.md000066400000000000000000000224331511464676000146260ustar00rootroot00000000000000# xarray: N-D labeled arrays and datasets [![Xarray](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/pydata/xarray/refs/heads/main/doc/badge.json)](https://xarray.dev) [![Powered by Pixi](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/prefix-dev/pixi/main/assets/badge/v0.json)](https://pixi.sh) [![CI](https://github.com/pydata/xarray/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/pydata/xarray/actions/workflows/ci.yaml?query=branch%3Amain) [![Code coverage](https://codecov.io/gh/pydata/xarray/branch/main/graph/badge.svg?flag=unittests)](https://codecov.io/gh/pydata/xarray) [![Docs](https://readthedocs.org/projects/xray/badge/?version=latest)](https://docs.xarray.dev/) [![Benchmarked with asv](https://img.shields.io/badge/benchmarked%20by-asv-green.svg?style=flat)](https://asv-runner.github.io/asv-collection/xarray/) [![Formatted with black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black) [![Checked with mypy](http://www.mypy-lang.org/static/mypy_badge.svg)](http://mypy-lang.org/) [![Available on pypi](https://img.shields.io/pypi/v/xarray.svg)](https://pypi.python.org/pypi/xarray/) [![PyPI - Downloads](https://img.shields.io/pypi/dm/xarray)](https://pypistats.org/packages/xarray) [![Conda - Downloads](https://img.shields.io/conda/dn/anaconda/xarray?label=conda%7Cdownloads)](https://anaconda.org/anaconda/xarray) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.598201.svg)](https://doi.org/10.5281/zenodo.598201) [![Examples on binder](https://img.shields.io/badge/launch-binder-579ACA.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC)](https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/examples/weather-data.ipynb) [![Twitter](https://img.shields.io/twitter/follow/xarray_dev?style=social)](https://x.com/xarray_dev) **xarray** (pronounced "ex-array", formerly known as **xray**) is an open source project and Python package that makes working with labelled multi-dimensional arrays simple, efficient, and fun! Xarray introduces labels in the form of dimensions, coordinates and attributes on top of raw [NumPy](https://www.numpy.org)-like arrays, which allows for a more intuitive, more concise, and less error-prone developer experience. The package includes a large and growing library of domain-agnostic functions for advanced analytics and visualization with these data structures. Xarray was inspired by and borrows heavily from [pandas](https://pandas.pydata.org), the popular data analysis package focused on labelled tabular data. It is particularly tailored to working with [netCDF](https://www.unidata.ucar.edu/software/netcdf) files, which were the source of xarray\'s data model, and integrates tightly with [dask](https://dask.org) for parallel computing. ## Why xarray? Multi-dimensional (a.k.a. N-dimensional, ND) arrays (sometimes called "tensors") are an essential part of computational science. They are encountered in a wide range of fields, including physics, astronomy, geoscience, bioinformatics, engineering, finance, and deep learning. In Python, [NumPy](https://www.numpy.org) provides the fundamental data structure and API for working with raw ND arrays. However, real-world datasets are usually more than just raw numbers; they have labels which encode information about how the array values map to locations in space, time, etc. Xarray doesn\'t just keep track of labels on arrays \-- it uses them to provide a powerful and concise interface. For example: - Apply operations over dimensions by name: `x.sum('time')`. - Select values by label instead of integer location: `x.loc['2014-01-01']` or `x.sel(time='2014-01-01')`. - Mathematical operations (e.g., `x - y`) vectorize across multiple dimensions (array broadcasting) based on dimension names, not shape. - Flexible split-apply-combine operations with groupby: `x.groupby('time.dayofyear').mean()`. - Database like alignment based on coordinate labels that smoothly handles missing values: `x, y = xr.align(x, y, join='outer')`. - Keep track of arbitrary metadata in the form of a Python dictionary: `x.attrs`. ## Documentation Learn more about xarray in its official documentation at . Try out an [interactive Jupyter notebook](https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/examples/weather-data.ipynb). ## Contributing You can find information about contributing to xarray at our [Contributing page](https://docs.xarray.dev/en/stable/contributing.html). ## Get in touch - Ask usage questions ("How do I?") on [GitHub Discussions](https://github.com/pydata/xarray/discussions). - Report bugs, suggest features or view the source code [on GitHub](https://github.com/pydata/xarray). - For less well defined questions or ideas, or to announce other projects of interest to xarray users, use the [mailing list](https://groups.google.com/forum/#!forum/xarray). ## NumFOCUS Xarray is a fiscally sponsored project of [NumFOCUS](https://numfocus.org), a nonprofit dedicated to supporting the open source scientific computing community. If you like Xarray and want to support our mission, please consider making a [donation](https://numfocus.org/donate-to-xarray) to support our efforts. ## History Xarray is an evolution of an internal tool developed at [The Climate Corporation](https://climate.com/). It was originally written by Climate Corp researchers Stephan Hoyer, Alex Kleeman and Eugene Brevdo and was released as open source in May 2014. The project was renamed from "xray" in January 2016. Xarray became a fiscally sponsored project of [NumFOCUS](https://numfocus.org) in August 2018. ## Contributors Thanks to our many contributors! [![Contributors](https://contrib.rocks/image?repo=pydata/xarray)](https://github.com/pydata/xarray/graphs/contributors) ## License Copyright 2014-2024, xarray Developers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Xarray bundles portions of pandas, NumPy and Seaborn, all of which are available under a "3-clause BSD" license: - pandas: `setup.py`, `xarray/util/print_versions.py` - NumPy: `xarray/core/npcompat.py` - Seaborn: `_determine_cmap_params` in `xarray/core/plot/utils.py` Xarray also bundles portions of CPython, which is available under the "Python Software Foundation License" in `xarray/core/pycompat.py`. Xarray uses icons from the icomoon package (free version), which is available under the "CC BY 4.0" license. The full text of these licenses are included in the licenses directory. xarray-2025.12.0/asv_bench/000077500000000000000000000000001511464676000152735ustar00rootroot00000000000000xarray-2025.12.0/asv_bench/asv.conf.json000066400000000000000000000142521511464676000177070ustar00rootroot00000000000000{ // The version of the config file format. Do not change, unless // you know what you are doing. "version": 1, // The name of the project being benchmarked "project": "xarray", // The project's homepage "project_url": "https://docs.xarray.dev/", // The URL or local path of the source code repository for the // project being benchmarked "repo": "..", // List of branches to benchmark. If not provided, defaults to "master" // (for git) or "default" (for mercurial). "branches": ["main"], // for git // "branches": ["default"], // for mercurial // The DVCS being used. If not set, it will be automatically // determined from "repo" by looking at the protocol in the URL // (if remote), or by looking for special directories, such as // ".git" (if local). "dvcs": "git", // The tool to use to create environments. May be "conda", // "virtualenv" or other value depending on the plugins in use. // If missing or the empty string, the tool will be automatically // determined by looking for tools on the PATH environment // variable. "environment_type": "rattler", "conda_channels": ["conda-forge"], // timeout in seconds for installing any dependencies in environment // defaults to 10 min "install_timeout": 600, // the base URL to show a commit for the project. "show_commit_url": "https://github.com/pydata/xarray/commit/", // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. "pythons": ["3.11"], // The matrix of dependencies to test. Each key is the name of a // package (in PyPI) and the values are version numbers. An empty // list or empty string indicates to just test against the default // (latest) version. null indicates that the package is to not be // installed. If the package to be tested is only available from // PyPi, and the 'environment_type' is conda, then you can preface // the package name by 'pip+', and the package will be installed via // pip (with all the conda available packages installed first, // followed by the pip installed packages). // // "matrix": { // "numpy": ["1.6", "1.7"], // "six": ["", null], // test with and without six installed // "pip+emcee": [""], // emcee is only available for install with pip. // }, "matrix": { "setuptools_scm": [""], // GH6609 "numpy": ["2.2"], "pandas": [""], "netcdf4": [""], "scipy": [""], "bottleneck": [""], "dask": [""], "distributed": [""], "flox": [""], "numpy_groupies": [""], "sparse": [""], "cftime": [""] }, // fix for bad builds // https://github.com/airspeed-velocity/asv/issues/1389#issuecomment-2076131185 "build_command": [ "python -m build", "python -m pip wheel --no-deps --no-build-isolation --no-index -w {build_cache_dir} {build_dir}" ], // Combinations of libraries/python versions can be excluded/included // from the set to test. Each entry is a dictionary containing additional // key-value pairs to include/exclude. // // An exclude entry excludes entries where all values match. The // values are regexps that should match the whole string. // // An include entry adds an environment. Only the packages listed // are installed. The 'python' key is required. The exclude rules // do not apply to includes. // // In addition to package names, the following keys are available: // // - python // Python version, as in the *pythons* variable above. // - environment_type // Environment type, as above. // - sys_platform // Platform, as in sys.platform. Possible values for the common // cases: 'linux2', 'win32', 'cygwin', 'darwin'. // // "exclude": [ // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows // {"environment_type": "conda", "six": null}, // don't run without six on conda // ], // // "include": [ // // additional env for python2.7 // {"python": "2.7", "numpy": "1.8"}, // // additional env if run on windows+conda // {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""}, // ], // The directory (relative to the current directory) that benchmarks are // stored in. If not provided, defaults to "benchmarks" "benchmark_dir": "benchmarks", // The directory (relative to the current directory) to cache the Python // environments in. If not provided, defaults to "env" "env_dir": ".asv/env", // The directory (relative to the current directory) that raw benchmark // results are stored in. If not provided, defaults to "results". "results_dir": ".asv/results", // The directory (relative to the current directory) that the html tree // should be written to. If not provided, defaults to "html". "html_dir": ".asv/html" // The number of characters to retain in the commit hashes. // "hash_length": 8, // `asv` will cache wheels of the recent builds in each // environment, making them faster to install next time. This is // number of builds to keep, per environment. // "wheel_cache_size": 0 // The commits after which the regression search in `asv publish` // should start looking for regressions. Dictionary whose keys are // regexps matching to benchmark names, and values corresponding to // the commit (exclusive) after which to start looking for // regressions. The default is to start from the first commit // with results. If the commit is `null`, regression detection is // skipped for the matching benchmark. // // "regressions_first_commits": { // "some_benchmark": "352cdf", // Consider regressions only after this commit // "another_benchmark": null, // Skip regression detection altogether // } // The thresholds for relative change in results, after which `asv // publish` starts reporting regressions. Dictionary of the same // form as in ``regressions_first_commits``, with values // indicating the thresholds. If multiple entries match, the // maximum is taken. If no entry matches, the default is 5%. // // "regressions_thresholds": { // "some_benchmark": 0.01, // Threshold of 1% // "another_benchmark": 0.5, // Threshold of 50% // } } xarray-2025.12.0/asv_bench/benchmarks/000077500000000000000000000000001511464676000174105ustar00rootroot00000000000000xarray-2025.12.0/asv_bench/benchmarks/README_CI.md000066400000000000000000000174561511464676000212570ustar00rootroot00000000000000# Benchmark CI ## How it works The `asv` suite can be run for any PR on GitHub Actions (check workflow `.github/workflows/benchmarks.yml`) by adding a `run-benchmark` label to said PR. This will trigger a job that will run the benchmarking suite for the current PR head (merged commit) against the PR base (usually `main`). We use `asv continuous` to run the job, which runs a relative performance measurement. This means that there's no state to be saved and that regressions are only caught in terms of performance ratio (absolute numbers are available but they are not useful since we do not use stable hardware over time). `asv continuous` will: - Compile `scikit-image` for _both_ commits. We use `ccache` to speed up the process, and `mamba` is used to create the build environments. - Run the benchmark suite for both commits, _twice_ (since `processes=2` by default). - Generate a report table with performance ratios: - `ratio=1.0` -> performance didn't change. - `ratio<1.0` -> PR made it slower. - `ratio>1.0` -> PR made it faster. Due to the sensitivity of the test, we cannot guarantee that false positives are not produced. In practice, values between `(0.7, 1.5)` are to be considered part of the measurement noise. When in doubt, running the benchmark suite one more time will provide more information about the test being a false positive or not. ## Running the benchmarks on GitHub Actions 1. On a PR, add the label `run-benchmark`. 2. The CI job will be started. Checks will appear in the usual dashboard panel above the comment box. 3. If more commits are added, the label checks will be grouped with the last commit checks _before_ you added the label. 4. Alternatively, you can always go to the `Actions` tab in the repo and [filter for `workflow:Benchmark`](https://github.com/scikit-image/scikit-image/actions?query=workflow%3ABenchmark). Your username will be assigned to the `actor` field, so you can also filter the results with that if you need it. ## The artifacts The CI job will also generate an artifact. This is the `.asv/results` directory compressed in a zip file. Its contents include: - `fv-xxxxx-xx/`. A directory for the machine that ran the suite. It contains three files: - `.json`, `.json`: the benchmark results for each commit, with stats. - `machine.json`: details about the hardware. - `benchmarks.json`: metadata about the current benchmark suite. - `benchmarks.log`: the CI logs for this run. - This README. ## Re-running the analysis Although the CI logs should be enough to get an idea of what happened (check the table at the end), one can use `asv` to run the analysis routines again. 1. Uncompress the artifact contents in the repo, under `.asv/results`. This is, you should see `.asv/results/benchmarks.log`, not `.asv/results/something_else/benchmarks.log`. Write down the machine directory name for later. 2. Run `asv show` to see your available results. You will see something like this: ``` $> asv show Commits with results: Machine : Jaimes-MBP Environment: conda-py3.9-cython-numpy1.20-scipy 00875e67 Machine : fv-az95-499 Environment: conda-py3.7-cython-numpy1.17-pooch-scipy 8db28f02 3a305096 ``` 3. We are interested in the commits for `fv-az95-499` (the CI machine for this run). We can compare them with `asv compare` and some extra options. `--sort ratio` will show largest ratios first, instead of alphabetical order. `--split` will produce three tables: improved, worsened, no changes. `--factor 1.5` tells `asv` to only complain if deviations are above a 1.5 ratio. `-m` is used to indicate the machine ID (use the one you wrote down in step 1). Finally, specify your commit hashes: baseline first, then contender! ``` $> asv compare --sort ratio --split --factor 1.5 -m fv-az95-499 8db28f02 3a305096 Benchmarks that have stayed the same: before after ratio [8db28f02] [3a305096] n/a n/a n/a benchmark_restoration.RollingBall.time_rollingball_ndim 1.23ยฑ0.04ms 1.37ยฑ0.1ms 1.12 benchmark_transform_warp.WarpSuite.time_to_float64(, 128, 3) 5.07ยฑ0.1ฮผs 5.59ยฑ0.4ฮผs 1.10 benchmark_transform_warp.ResizeLocalMeanSuite.time_resize_local_mean(, (192, 192, 192), (192, 192, 192)) 1.23ยฑ0.02ms 1.33ยฑ0.1ms 1.08 benchmark_transform_warp.WarpSuite.time_same_type(, 128, 3) 9.45ยฑ0.2ms 10.1ยฑ0.5ms 1.07 benchmark_rank.Rank3DSuite.time_3d_filters('majority', (32, 32, 32)) 23.0ยฑ0.9ms 24.6ยฑ1ms 1.07 benchmark_interpolation.InterpolationResize.time_resize((80, 80, 80), 0, 'symmetric', , True) 38.7ยฑ1ms 41.1ยฑ1ms 1.06 benchmark_transform_warp.ResizeLocalMeanSuite.time_resize_local_mean(, (2048, 2048), (192, 192, 192)) 4.97ยฑ0.2ฮผs 5.24ยฑ0.2ฮผs 1.05 benchmark_transform_warp.ResizeLocalMeanSuite.time_resize_local_mean(, (2048, 2048), (2048, 2048)) 4.21ยฑ0.2ms 4.42ยฑ0.3ms 1.05 benchmark_rank.Rank3DSuite.time_3d_filters('gradient', (32, 32, 32)) ... ``` If you want more details on a specific test, you can use `asv show`. Use `-b pattern` to filter which tests to show, and then specify a commit hash to inspect: ``` $> asv show -b time_to_float64 8db28f02 Commit: 8db28f02 benchmark_transform_warp.WarpSuite.time_to_float64 [fv-az95-499/conda-py3.7-cython-numpy1.17-pooch-scipy] ok =============== ============= ========== ============= ========== ============ ========== ============ ========== ============ -- N / order --------------- -------------------------------------------------------------------------------------------------------------- dtype_in 128 / 0 128 / 1 128 / 3 1024 / 0 1024 / 1 1024 / 3 4096 / 0 4096 / 1 4096 / 3 =============== ============= ========== ============= ========== ============ ========== ============ ========== ============ numpy.uint8 2.56ยฑ0.09ms 523ยฑ30ฮผs 1.28ยฑ0.05ms 130ยฑ3ms 28.7ยฑ2ms 81.9ยฑ3ms 2.42ยฑ0.01s 659ยฑ5ms 1.48ยฑ0.01s numpy.uint16 2.48ยฑ0.03ms 530ยฑ10ฮผs 1.28ยฑ0.02ms 130ยฑ1ms 30.4ยฑ0.7ms 81.1ยฑ2ms 2.44ยฑ0s 653ยฑ3ms 1.47ยฑ0.02s numpy.float32 2.59ยฑ0.1ms 518ยฑ20ฮผs 1.27ยฑ0.01ms 127ยฑ3ms 26.6ยฑ1ms 74.8ยฑ2ms 2.50ยฑ0.01s 546ยฑ10ms 1.33ยฑ0.02s numpy.float64 2.48ยฑ0.04ms 513ยฑ50ฮผs 1.23ยฑ0.04ms 134ยฑ3ms 30.7ยฑ2ms 85.4ยฑ2ms 2.55ยฑ0.01s 632ยฑ4ms 1.45ยฑ0.01s =============== ============= ========== ============= ========== ============ ========== ============ ========== ============ started: 2021-07-06 06:14:36, duration: 1.99m ``` ## Other details ### Skipping slow or demanding tests To minimize the time required to run the full suite, we trimmed the parameter matrix in some cases and, in others, directly skipped tests that ran for too long or require too much memory. Unlike `pytest`, `asv` does not have a notion of marks. However, you can `raise NotImplementedError` in the setup step to skip a test. In that vein, a new private function is defined at `benchmarks.__init__`: `_skip_slow`. This will check if the `ASV_SKIP_SLOW` environment variable has been defined. If set to `1`, it will raise `NotImplementedError` and skip the test. To implement this behavior in other tests, you can add the following attribute: ```python from . import _skip_slow # this function is defined in benchmarks.__init__ def time_something_slow(): pass time_something.setup = _skip_slow ``` xarray-2025.12.0/asv_bench/benchmarks/__init__.py000066400000000000000000000032271511464676000215250ustar00rootroot00000000000000import itertools import os import numpy as np _counter = itertools.count() def parameterized(names, params): def decorator(func): func.param_names = names func.params = params return func return decorator def requires_dask(): try: import dask # noqa: F401 except ImportError as err: raise NotImplementedError() from err def requires_sparse(): try: import sparse # noqa: F401 except ImportError as err: raise NotImplementedError() from err def randn(shape, frac_nan=None, chunks=None, seed=0): rng = np.random.default_rng(seed) if chunks is None: x = rng.standard_normal(shape) else: import dask.array as da rng = da.random.default_rng(seed) x = rng.standard_normal(shape, chunks=chunks) if frac_nan is not None: inds = rng.choice(range(x.size), int(x.size * frac_nan)) x.flat[inds] = np.nan return x def randint(low, high=None, size=None, frac_minus=None, seed=0): rng = np.random.default_rng(seed) x = rng.integers(low, high, size) if frac_minus is not None: inds = rng.choice(range(x.size), int(x.size * frac_minus)) x.flat[inds] = -1 return x def _skip_slow(): """ Use this function to skip slow or highly demanding tests. Use it as a `Class.setup` method or a `function.setup` attribute. Examples -------- >>> from . import _skip_slow >>> def time_something_slow(): ... pass ... >>> time_something.setup = _skip_slow """ if os.environ.get("ASV_SKIP_SLOW", "0") == "1": raise NotImplementedError("Skipping this test...") xarray-2025.12.0/asv_bench/benchmarks/accessors.py000066400000000000000000000011721511464676000217500ustar00rootroot00000000000000import numpy as np import xarray as xr from . import parameterized NTIME = 365 * 30 @parameterized(["calendar"], [("standard", "noleap")]) class DateTimeAccessor: def setup(self, calendar): np.random.randn(NTIME) time = xr.date_range("2000", periods=30 * 365, calendar=calendar) data = np.ones((NTIME,)) self.da = xr.DataArray(data, dims="time", coords={"time": time}) def time_dayofyear(self, calendar): _ = self.da.time.dt.dayofyear def time_year(self, calendar): _ = self.da.time.dt.year def time_floor(self, calendar): _ = self.da.time.dt.floor("D") xarray-2025.12.0/asv_bench/benchmarks/alignment.py000066400000000000000000000031601511464676000217400ustar00rootroot00000000000000import numpy as np import xarray as xr from . import parameterized, requires_dask ntime = 365 * 30 nx = 50 ny = 50 rng = np.random.default_rng(0) class Align: def setup(self, *args, **kwargs): data = rng.standard_normal((ntime, nx, ny)) self.ds = xr.Dataset( {"temperature": (("time", "x", "y"), data)}, coords={ "time": xr.date_range("2000", periods=ntime), "x": np.arange(nx), "y": np.arange(ny), }, ) self.year = self.ds.time.dt.year self.idx = np.unique(rng.integers(low=0, high=ntime, size=ntime // 2)) self.year_subset = self.year.isel(time=self.idx) @parameterized(["join"], [("outer", "inner", "left", "right", "exact", "override")]) def time_already_aligned(self, join): xr.align(self.ds, self.year, join=join) @parameterized(["join"], [("outer", "inner", "left", "right")]) def time_not_aligned(self, join): xr.align(self.ds, self.year[-100:], join=join) @parameterized(["join"], [("outer", "inner", "left", "right")]) def time_not_aligned_random_integers(self, join): xr.align(self.ds, self.year_subset, join=join) class AlignCFTime(Align): def setup(self, *args, **kwargs): super().setup() self.ds["time"] = xr.date_range("2000", periods=ntime, calendar="noleap") self.year = self.ds.time.dt.year self.year_subset = self.year.isel(time=self.idx) class AlignDask(Align): def setup(self, *args, **kwargs): requires_dask() super().setup() self.ds = self.ds.chunk({"time": 100}) xarray-2025.12.0/asv_bench/benchmarks/coarsen.py000066400000000000000000000031371511464676000214200ustar00rootroot00000000000000import numpy as np import xarray as xr from . import randn # Sizes chosen to test padding optimization nx_padded = 4003 # Not divisible by 10 - requires padding ny_padded = 4007 # Not divisible by 10 - requires padding nx_exact = 4000 # Divisible by 10 - no padding needed ny_exact = 4000 # Divisible by 10 - no padding needed window = 10 class Coarsen: def setup(self, *args, **kwargs): # Case 1: Requires padding on both dimensions self.da_padded = xr.DataArray( randn((nx_padded, ny_padded)), dims=("x", "y"), coords={"x": np.arange(nx_padded), "y": np.arange(ny_padded)}, ) # Case 2: No padding required self.da_exact = xr.DataArray( randn((nx_exact, ny_exact)), dims=("x", "y"), coords={"x": np.arange(nx_exact), "y": np.arange(ny_exact)}, ) def time_coarsen_with_padding(self): """Coarsen 2D array where both dimensions require padding.""" self.da_padded.coarsen(x=window, y=window, boundary="pad").mean() def time_coarsen_no_padding(self): """Coarsen 2D array where dimensions are exact multiples (no padding).""" self.da_exact.coarsen(x=window, y=window, boundary="pad").mean() def peakmem_coarsen_with_padding(self): """Peak memory for coarsening with padding on both dimensions.""" self.da_padded.coarsen(x=window, y=window, boundary="pad").mean() def peakmem_coarsen_no_padding(self): """Peak memory for coarsening without padding.""" self.da_exact.coarsen(x=window, y=window, boundary="pad").mean() xarray-2025.12.0/asv_bench/benchmarks/coding.py000066400000000000000000000010101511464676000212150ustar00rootroot00000000000000import numpy as np import xarray as xr from . import parameterized @parameterized(["calendar"], [("standard", "noleap")]) class EncodeCFDatetime: def setup(self, calendar): self.units = "days since 2000-01-01" self.dtype = np.dtype("int64") self.times = xr.date_range( "2000", freq="D", periods=10000, calendar=calendar ).values def time_encode_cf_datetime(self, calendar): xr.coding.times.encode_cf_datetime(self.times, self.units, calendar, self.dtype) xarray-2025.12.0/asv_bench/benchmarks/combine.py000066400000000000000000000054211511464676000214000ustar00rootroot00000000000000import numpy as np import xarray as xr from . import requires_dask class Concat1d: """Benchmark concatenating large datasets""" def setup(self) -> None: self.data_arrays = [ xr.DataArray(data=np.zeros(4 * 1024 * 1024, dtype=np.int8), dims=["x"]) for _ in range(10) ] def time_concat(self) -> None: xr.concat(self.data_arrays, dim="x") def peakmem_concat(self) -> None: xr.concat(self.data_arrays, dim="x") class Combine1d: """Benchmark concatenating and merging large datasets""" def setup(self) -> None: """Create 2 datasets with two different variables""" t_size = 8000 t = np.arange(t_size) data = np.random.randn(t_size) self.dsA0 = xr.Dataset({"A": xr.DataArray(data, coords={"T": t}, dims=("T"))}) self.dsA1 = xr.Dataset( {"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T"))} ) def time_combine_by_coords(self) -> None: """Also has to load and arrange t coordinate""" datasets = [self.dsA0, self.dsA1] xr.combine_by_coords(datasets) class Combine1dDask(Combine1d): """Benchmark concatenating and merging large datasets""" def setup(self) -> None: """Create 2 datasets with two different variables""" requires_dask() t_size = 8000 t = np.arange(t_size) var = xr.Variable(dims=("T",), data=np.random.randn(t_size)).chunk() data_vars = {f"long_name_{v}": ("T", var) for v in range(500)} self.dsA0 = xr.Dataset(data_vars, coords={"T": t}) self.dsA1 = xr.Dataset(data_vars, coords={"T": t + t_size}) class Combine3d: """Benchmark concatenating and merging large datasets""" def setup(self): """Create 4 datasets with two different variables""" t_size, x_size, y_size = 50, 450, 400 t = np.arange(t_size) data = np.random.randn(t_size, x_size, y_size) self.dsA0 = xr.Dataset( {"A": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))} ) self.dsA1 = xr.Dataset( {"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))} ) self.dsB0 = xr.Dataset( {"B": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))} ) self.dsB1 = xr.Dataset( {"B": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))} ) def time_combine_nested(self): datasets = [[self.dsA0, self.dsA1], [self.dsB0, self.dsB1]] xr.combine_nested(datasets, concat_dim=[None, "T"]) def time_combine_by_coords(self): """Also has to load and arrange t coordinate""" datasets = [self.dsA0, self.dsA1, self.dsB0, self.dsB1] xr.combine_by_coords(datasets) xarray-2025.12.0/asv_bench/benchmarks/dataarray_missing.py000066400000000000000000000035201511464676000234630ustar00rootroot00000000000000import pandas as pd import xarray as xr from . import parameterized, randn, requires_dask def make_bench_data(shape, frac_nan, chunks): vals = randn(shape, frac_nan) coords = {"time": pd.date_range("2000-01-01", freq="D", periods=shape[0])} da = xr.DataArray(vals, dims=("time", "x", "y"), coords=coords) if chunks is not None: da = da.chunk(chunks) return da class DataArrayMissingInterpolateNA: def setup(self, shape, chunks, limit): if chunks is not None: requires_dask() self.da = make_bench_data(shape, 0.1, chunks) @parameterized( ["shape", "chunks", "limit"], ( [(365, 75, 75)], [None, {"x": 25, "y": 25}], [None, 3], ), ) def time_interpolate_na(self, shape, chunks, limit): actual = self.da.interpolate_na(dim="time", method="linear", limit=limit) if chunks is not None: actual = actual.compute() class DataArrayMissingBottleneck: def setup(self, shape, chunks, limit): if chunks is not None: requires_dask() self.da = make_bench_data(shape, 0.1, chunks) @parameterized( ["shape", "chunks", "limit"], ( [(365, 75, 75)], [None, {"x": 25, "y": 25}], [None, 3], ), ) def time_ffill(self, shape, chunks, limit): actual = self.da.ffill(dim="time", limit=limit) if chunks is not None: actual = actual.compute() @parameterized( ["shape", "chunks", "limit"], ( [(365, 75, 75)], [None, {"x": 25, "y": 25}], [None, 3], ), ) def time_bfill(self, shape, chunks, limit): actual = self.da.bfill(dim="time", limit=limit) if chunks is not None: actual = actual.compute() xarray-2025.12.0/asv_bench/benchmarks/dataset.py000066400000000000000000000012771511464676000214160ustar00rootroot00000000000000import numpy as np from xarray import Dataset from . import requires_dask class DatasetBinaryOp: def setup(self): self.ds = Dataset( { "a": (("x", "y"), np.ones((300, 400))), "b": (("x", "y"), np.ones((300, 400))), } ) self.mean = self.ds.mean() self.std = self.ds.std() def time_normalize(self): (self.ds - self.mean) / self.std class DatasetChunk: def setup(self): requires_dask() self.ds = Dataset() array = np.ones(1000) for i in range(250): self.ds[f"var{i}"] = ("x", array) def time_chunk(self): self.ds.chunk(x=(1,) * 1000) xarray-2025.12.0/asv_bench/benchmarks/dataset_io.py000066400000000000000000000610021511464676000220750ustar00rootroot00000000000000from __future__ import annotations import os from dataclasses import dataclass import numpy as np import pandas as pd import xarray as xr from . import _skip_slow, parameterized, randint, randn, requires_dask try: import dask import dask.multiprocessing except ImportError: pass os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE" _ENGINES = tuple(xr.backends.list_engines().keys() - {"store"}) class IOSingleNetCDF: """ A few examples that benchmark reading/writing a single netCDF file with xarray """ timeout = 300.0 repeat = 1 number = 5 def make_ds(self): # single Dataset self.ds = xr.Dataset() self.nt = 1000 self.nx = 90 self.ny = 45 self.block_chunks = { "time": self.nt / 4, "lon": self.nx / 3, "lat": self.ny / 3, } self.time_chunks = {"time": int(self.nt / 36)} times = pd.date_range("1970-01-01", periods=self.nt, freq="D") lons = xr.DataArray( np.linspace(0, 360, self.nx), dims=("lon",), attrs={"units": "degrees east", "long_name": "longitude"}, ) lats = xr.DataArray( np.linspace(-90, 90, self.ny), dims=("lat",), attrs={"units": "degrees north", "long_name": "latitude"}, ) self.ds["foo"] = xr.DataArray( randn((self.nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="foo", attrs={"units": "foo units", "description": "a description"}, ) self.ds["bar"] = xr.DataArray( randn((self.nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="bar", attrs={"units": "bar units", "description": "a description"}, ) self.ds["baz"] = xr.DataArray( randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32), coords={"lon": lons, "lat": lats}, dims=("lon", "lat"), name="baz", attrs={"units": "baz units", "description": "a description"}, ) self.ds.attrs = {"history": "created for xarray benchmarking"} self.oinds = { "time": randint(0, self.nt, 120), "lon": randint(0, self.nx, 20), "lat": randint(0, self.ny, 10), } self.vinds = { "time": xr.DataArray(randint(0, self.nt, 120), dims="x"), "lon": xr.DataArray(randint(0, self.nx, 120), dims="x"), "lat": slice(3, 20), } class IOWriteSingleNetCDF3(IOSingleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() self.format = "NETCDF3_64BIT" self.make_ds() def time_write_dataset_netcdf4(self): self.ds.to_netcdf("test_netcdf4_write.nc", engine="netcdf4", format=self.format) def time_write_dataset_scipy(self): self.ds.to_netcdf("test_scipy_write.nc", engine="scipy", format=self.format) class IOReadSingleNetCDF4(IOSingleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() self.make_ds() self.filepath = "test_single_file.nc4.nc" self.format = "NETCDF4" self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_netcdf4(self): xr.open_dataset(self.filepath, engine="netcdf4").load() def time_orthogonal_indexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4") ds = ds.isel(**self.oinds).load() def time_vectorized_indexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4") ds = ds.isel(**self.vinds).load() class IOReadSingleNetCDF3(IOReadSingleNetCDF4): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() self.make_ds() self.filepath = "test_single_file.nc3.nc" self.format = "NETCDF3_64BIT" self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_scipy(self): xr.open_dataset(self.filepath, engine="scipy").load() def time_orthogonal_indexing(self): ds = xr.open_dataset(self.filepath, engine="scipy") ds = ds.isel(**self.oinds).load() def time_vectorized_indexing(self): ds = xr.open_dataset(self.filepath, engine="scipy") ds = ds.isel(**self.vinds).load() class IOReadSingleNetCDF4Dask(IOSingleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.filepath = "test_single_file.nc4.nc" self.format = "NETCDF4" self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_netcdf4_with_block_chunks(self): xr.open_dataset( self.filepath, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_block_chunks_oindexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks) ds = ds.isel(**self.oinds).load() def time_load_dataset_netcdf4_with_block_chunks_vindexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks) ds = ds.isel(**self.vinds).load() def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_time_chunks(self): xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.time_chunks).load() def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="netcdf4", chunks=self.time_chunks ).load() class IOReadSingleNetCDF3Dask(IOReadSingleNetCDF4Dask): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.filepath = "test_single_file.nc3.nc" self.format = "NETCDF3_64BIT" self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_scipy_with_block_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="scipy", chunks=self.block_chunks ).load() def time_load_dataset_scipy_with_block_chunks_oindexing(self): ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks) ds = ds.isel(**self.oinds).load() def time_load_dataset_scipy_with_block_chunks_vindexing(self): ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks) ds = ds.isel(**self.vinds).load() def time_load_dataset_scipy_with_time_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="scipy", chunks=self.time_chunks ).load() class IOMultipleNetCDF: """ A few examples that benchmark reading/writing multiple netCDF files with xarray """ timeout = 300.0 repeat = 1 number = 5 def make_ds(self, nfiles=10): # multiple Dataset self.ds = xr.Dataset() self.nt = 1000 self.nx = 90 self.ny = 45 self.nfiles = nfiles self.block_chunks = { "time": self.nt / 4, "lon": self.nx / 3, "lat": self.ny / 3, } self.time_chunks = {"time": int(self.nt / 36)} self.time_vars = np.split( pd.date_range("1970-01-01", periods=self.nt, freq="D"), self.nfiles ) self.ds_list = [] self.filenames_list = [] for i, times in enumerate(self.time_vars): ds = xr.Dataset() nt = len(times) lons = xr.DataArray( np.linspace(0, 360, self.nx), dims=("lon",), attrs={"units": "degrees east", "long_name": "longitude"}, ) lats = xr.DataArray( np.linspace(-90, 90, self.ny), dims=("lat",), attrs={"units": "degrees north", "long_name": "latitude"}, ) ds["foo"] = xr.DataArray( randn((nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="foo", attrs={"units": "foo units", "description": "a description"}, ) ds["bar"] = xr.DataArray( randn((nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="bar", attrs={"units": "bar units", "description": "a description"}, ) ds["baz"] = xr.DataArray( randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32), coords={"lon": lons, "lat": lats}, dims=("lon", "lat"), name="baz", attrs={"units": "baz units", "description": "a description"}, ) ds.attrs = {"history": "created for xarray benchmarking"} self.ds_list.append(ds) self.filenames_list.append(f"test_netcdf_{i}.nc") class IOWriteMultipleNetCDF3(IOMultipleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() self.make_ds() self.format = "NETCDF3_64BIT" def time_write_dataset_netcdf4(self): xr.save_mfdataset( self.ds_list, self.filenames_list, engine="netcdf4", format=self.format ) def time_write_dataset_scipy(self): xr.save_mfdataset( self.ds_list, self.filenames_list, engine="scipy", format=self.format ) class IOReadMultipleNetCDF4(IOMultipleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.format = "NETCDF4" xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format) def time_load_dataset_netcdf4(self): xr.open_mfdataset(self.filenames_list, engine="netcdf4").load() def time_open_dataset_netcdf4(self): xr.open_mfdataset(self.filenames_list, engine="netcdf4") class IOReadMultipleNetCDF3(IOReadMultipleNetCDF4): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.format = "NETCDF3_64BIT" xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format) def time_load_dataset_scipy(self): xr.open_mfdataset(self.filenames_list, engine="scipy").load() def time_open_dataset_scipy(self): xr.open_mfdataset(self.filenames_list, engine="scipy") class IOReadMultipleNetCDF4Dask(IOMultipleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.format = "NETCDF4" xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format) def time_load_dataset_netcdf4_with_block_chunks(self): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_time_chunks(self): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.time_chunks ).load() def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.time_chunks ).load() def time_open_dataset_netcdf4_with_block_chunks(self): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.block_chunks ) def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.block_chunks ) def time_open_dataset_netcdf4_with_time_chunks(self): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.time_chunks ) def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.time_chunks ) class IOReadMultipleNetCDF3Dask(IOReadMultipleNetCDF4Dask): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.format = "NETCDF3_64BIT" xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format) def time_load_dataset_scipy_with_block_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="scipy", chunks=self.block_chunks ).load() def time_load_dataset_scipy_with_time_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="scipy", chunks=self.time_chunks ).load() def time_open_dataset_scipy_with_block_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="scipy", chunks=self.block_chunks ) def time_open_dataset_scipy_with_time_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="scipy", chunks=self.time_chunks ) def create_delayed_write(): import dask.array as da vals = da.random.random(300, chunks=(1,)) ds = xr.Dataset({"vals": (["a"], vals)}) return ds.to_netcdf("file.nc", engine="netcdf4", compute=False) class IONestedDataTree: """ A few examples that benchmark reading/writing a heavily nested netCDF datatree with xarray """ timeout = 300.0 repeat = 1 number = 5 def make_datatree(self, nchildren=10): # multiple Dataset self.ds = xr.Dataset() self.nt = 1000 self.nx = 90 self.ny = 45 self.nchildren = nchildren self.block_chunks = { "time": self.nt / 4, "lon": self.nx / 3, "lat": self.ny / 3, } self.time_chunks = {"time": int(self.nt / 36)} times = pd.date_range("1970-01-01", periods=self.nt, freq="D") lons = xr.DataArray( np.linspace(0, 360, self.nx), dims=("lon",), attrs={"units": "degrees east", "long_name": "longitude"}, ) lats = xr.DataArray( np.linspace(-90, 90, self.ny), dims=("lat",), attrs={"units": "degrees north", "long_name": "latitude"}, ) self.ds["foo"] = xr.DataArray( randn((self.nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="foo", attrs={"units": "foo units", "description": "a description"}, ) self.ds["bar"] = xr.DataArray( randn((self.nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="bar", attrs={"units": "bar units", "description": "a description"}, ) self.ds["baz"] = xr.DataArray( randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32), coords={"lon": lons, "lat": lats}, dims=("lon", "lat"), name="baz", attrs={"units": "baz units", "description": "a description"}, ) self.ds.attrs = {"history": "created for xarray benchmarking"} self.oinds = { "time": randint(0, self.nt, 120), "lon": randint(0, self.nx, 20), "lat": randint(0, self.ny, 10), } self.vinds = { "time": xr.DataArray(randint(0, self.nt, 120), dims="x"), "lon": xr.DataArray(randint(0, self.nx, 120), dims="x"), "lat": slice(3, 20), } root = {f"group_{group}": self.ds for group in range(self.nchildren)} nested_tree1 = { f"group_{group}/subgroup_1": xr.Dataset() for group in range(self.nchildren) } nested_tree2 = { f"group_{group}/subgroup_2": xr.DataArray(np.arange(1, 10)).to_dataset( name="a" ) for group in range(self.nchildren) } nested_tree3 = { f"group_{group}/subgroup_2/sub-subgroup_1": self.ds for group in range(self.nchildren) } dtree = root | nested_tree1 | nested_tree2 | nested_tree3 self.dtree = xr.DataTree.from_dict(dtree) class IOReadDataTreeNetCDF4(IONestedDataTree): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_datatree() self.format = "NETCDF4" self.filepath = "datatree.nc4.nc" dtree = self.dtree dtree.to_netcdf(filepath=self.filepath) def time_load_datatree_netcdf4(self): xr.open_datatree(self.filepath, engine="netcdf4").load() def time_open_datatree_netcdf4(self): xr.open_datatree(self.filepath, engine="netcdf4") class IOWriteNetCDFDask: timeout = 60 repeat = 1 number = 5 def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.write = create_delayed_write() def time_write(self): self.write.compute() class IOWriteNetCDFDaskDistributed: def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() try: import distributed except ImportError as err: raise NotImplementedError() from err self.client = distributed.Client() self.write = create_delayed_write() def cleanup(self): self.client.shutdown() def time_write(self): self.write.compute() class IOReadSingleFile(IOSingleNetCDF): def setup(self, *args, **kwargs): self.make_ds() self.filepaths = {} for engine in _ENGINES: self.filepaths[engine] = f"test_single_file_with_{engine}.nc" self.ds.to_netcdf(self.filepaths[engine], engine=engine) @parameterized(["engine", "chunks"], (_ENGINES, [None, {}])) def time_read_dataset(self, engine, chunks): xr.open_dataset(self.filepaths[engine], engine=engine, chunks=chunks) class IOReadCustomEngine: def setup(self, *args, **kwargs): """ The custom backend does the bare minimum to be considered a lazy backend. But the data in it is still in memory so slow file reading shouldn't affect the results. """ requires_dask() @dataclass class PerformanceBackendArray(xr.backends.BackendArray): filename_or_obj: str | os.PathLike | None shape: tuple[int, ...] dtype: np.dtype lock: xr.backends.locks.SerializableLock def __getitem__(self, key: tuple): return xr.core.indexing.explicit_indexing_adapter( key, self.shape, xr.core.indexing.IndexingSupport.BASIC, self._raw_indexing_method, ) def _raw_indexing_method(self, key: tuple): raise NotImplementedError @dataclass class PerformanceStore(xr.backends.common.AbstractWritableDataStore): manager: xr.backends.CachingFileManager mode: str | None = None lock: xr.backends.locks.SerializableLock | None = None autoclose: bool = False def __post_init__(self): self.filename = self.manager._args[0] @classmethod def open( cls, filename: str | os.PathLike | None, mode: str = "r", lock: xr.backends.locks.SerializableLock | None = None, autoclose: bool = False, ): locker = lock or xr.backends.locks.SerializableLock() manager = xr.backends.CachingFileManager( xr.backends.DummyFileManager, filename, mode=mode, ) return cls(manager, mode=mode, lock=locker, autoclose=autoclose) def load(self) -> tuple: """ Load a bunch of test data quickly. Normally this method would've opened a file and parsed it. """ n_variables = 2000 # Important to have a shape and dtype for lazy loading. shape = (1000,) dtype = np.dtype(int) variables = { f"long_variable_name_{v}": xr.Variable( data=PerformanceBackendArray( self.filename, shape, dtype, self.lock ), dims=("time",), fastpath=True, ) for v in range(n_variables) } attributes = {} return variables, attributes class PerformanceBackend(xr.backends.BackendEntrypoint): def open_dataset( self, filename_or_obj: str | os.PathLike | None, drop_variables: tuple[str, ...] | None = None, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, use_cftime=None, decode_timedelta=None, lock=None, **kwargs, ) -> xr.Dataset: filename_or_obj = xr.backends.common._normalize_path(filename_or_obj) store = PerformanceStore.open(filename_or_obj, lock=lock) store_entrypoint = xr.backends.store.StoreBackendEntrypoint() ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds self.engine = PerformanceBackend @parameterized(["chunks"], ([None, {}, {"time": 10}])) def time_open_dataset(self, chunks): """ Time how fast xr.open_dataset is without the slow data reading part. Test with and without dask. """ xr.open_dataset(None, engine=self.engine, chunks=chunks) xarray-2025.12.0/asv_bench/benchmarks/datatree.py000066400000000000000000000006551511464676000215610ustar00rootroot00000000000000import xarray as xr from xarray.core.datatree import DataTree class Datatree: def setup(self): run1 = DataTree.from_dict({"run1": xr.Dataset({"a": 1})}) self.d_few = {"run1": run1} self.d_many = {f"run{i}": xr.Dataset({"a": 1}) for i in range(100)} def time_from_dict_few(self): DataTree.from_dict(self.d_few) def time_from_dict_many(self): DataTree.from_dict(self.d_many) xarray-2025.12.0/asv_bench/benchmarks/groupby.py000066400000000000000000000144231511464676000214550ustar00rootroot00000000000000# import flox to avoid the cost of first import import cftime import flox.xarray # noqa: F401 import numpy as np import pandas as pd import xarray as xr from . import _skip_slow, parameterized, requires_dask class GroupBy: def setup(self, *args, **kwargs): self.n = 100 self.ds1d = xr.Dataset( { "a": xr.DataArray(np.r_[np.repeat(1, self.n), np.repeat(2, self.n)]), "b": xr.DataArray(np.arange(2 * self.n)), "c": xr.DataArray(np.arange(2 * self.n)), } ) self.ds2d = self.ds1d.expand_dims(z=10).copy() self.ds1d_mean = self.ds1d.groupby("b").mean() self.ds2d_mean = self.ds2d.groupby("b").mean() @parameterized(["ndim"], [(1, 2)]) def time_init(self, ndim): getattr(self, f"ds{ndim}d").groupby("b") @parameterized( ["method", "ndim", "use_flox"], [("sum", "mean"), (1, 2), (True, False)] ) def time_agg_small_num_groups(self, method, ndim, use_flox): ds = getattr(self, f"ds{ndim}d") with xr.set_options(use_flox=use_flox): getattr(ds.groupby("a"), method)().compute() @parameterized( ["method", "ndim", "use_flox"], [("sum", "mean"), (1, 2), (True, False)] ) def time_agg_large_num_groups(self, method, ndim, use_flox): ds = getattr(self, f"ds{ndim}d") with xr.set_options(use_flox=use_flox): getattr(ds.groupby("b"), method)().compute() def time_binary_op_1d(self): (self.ds1d.groupby("b") - self.ds1d_mean).compute() def time_binary_op_2d(self): (self.ds2d.groupby("b") - self.ds2d_mean).compute() def peakmem_binary_op_1d(self): (self.ds1d.groupby("b") - self.ds1d_mean).compute() def peakmem_binary_op_2d(self): (self.ds2d.groupby("b") - self.ds2d_mean).compute() class GroupByDask(GroupBy): def setup(self, *args, **kwargs): requires_dask() super().setup(**kwargs) self.ds1d = self.ds1d.sel(dim_0=slice(None, None, 2)) self.ds1d["c"] = self.ds1d["c"].chunk({"dim_0": 50}) self.ds2d = self.ds2d.sel(dim_0=slice(None, None, 2)) self.ds2d["c"] = self.ds2d["c"].chunk({"dim_0": 50, "z": 5}) self.ds1d_mean = self.ds1d.groupby("b").mean().compute() self.ds2d_mean = self.ds2d.groupby("b").mean().compute() # TODO: These don't work now because we are calling `.compute` explicitly. class GroupByPandasDataFrame(GroupBy): """Run groupby tests using pandas DataFrame.""" def setup(self, *args, **kwargs): # Skip testing in CI as it won't ever change in a commit: _skip_slow() super().setup(**kwargs) self.ds1d = self.ds1d.to_dataframe() self.ds1d_mean = self.ds1d.groupby("b").mean() def time_binary_op_2d(self): raise NotImplementedError def peakmem_binary_op_2d(self): raise NotImplementedError class GroupByDaskDataFrame(GroupBy): """Run groupby tests using dask DataFrame.""" def setup(self, *args, **kwargs): # Skip testing in CI as it won't ever change in a commit: _skip_slow() requires_dask() super().setup(**kwargs) self.ds1d = self.ds1d.chunk({"dim_0": 50}).to_dask_dataframe() self.ds1d_mean = self.ds1d.groupby("b").mean().compute() def time_binary_op_2d(self): raise NotImplementedError def peakmem_binary_op_2d(self): raise NotImplementedError class Resample: def setup(self, *args, **kwargs): self.ds1d = xr.Dataset( { "b": ("time", np.arange(365.0 * 24)), }, coords={"time": pd.date_range("2001-01-01", freq="h", periods=365 * 24)}, ) self.ds2d = self.ds1d.expand_dims(z=10) self.ds1d_mean = self.ds1d.resample(time="48h").mean() self.ds2d_mean = self.ds2d.resample(time="48h").mean() @parameterized(["ndim"], [(1, 2)]) def time_init(self, ndim): getattr(self, f"ds{ndim}d").resample(time="D") @parameterized( ["method", "ndim", "use_flox"], [("sum", "mean"), (1, 2), (True, False)] ) def time_agg_small_num_groups(self, method, ndim, use_flox): ds = getattr(self, f"ds{ndim}d") with xr.set_options(use_flox=use_flox): getattr(ds.resample(time="3ME"), method)().compute() @parameterized( ["method", "ndim", "use_flox"], [("sum", "mean"), (1, 2), (True, False)] ) def time_agg_large_num_groups(self, method, ndim, use_flox): ds = getattr(self, f"ds{ndim}d") with xr.set_options(use_flox=use_flox): getattr(ds.resample(time="48h"), method)().compute() class ResampleDask(Resample): def setup(self, *args, **kwargs): requires_dask() super().setup(**kwargs) self.ds1d = self.ds1d.chunk({"time": 50}) self.ds2d = self.ds2d.chunk({"time": 50, "z": 4}) class ResampleCFTime(Resample): def setup(self, *args, **kwargs): self.ds1d = xr.Dataset( { "b": ("time", np.arange(365.0 * 24)), }, coords={ "time": xr.date_range( "2001-01-01", freq="h", periods=365 * 24, calendar="noleap" ) }, ) self.ds2d = self.ds1d.expand_dims(z=10) self.ds1d_mean = self.ds1d.resample(time="48h").mean() self.ds2d_mean = self.ds2d.resample(time="48h").mean() @parameterized(["use_cftime", "use_flox"], [[True, False], [True, False]]) class GroupByLongTime: def setup(self, use_cftime, use_flox): arr = np.random.randn(10, 10, 365 * 30) time = xr.date_range("2000", periods=30 * 365, use_cftime=use_cftime) # GH9426 - deep-copying CFTime object arrays is weirdly slow asda = xr.DataArray(time) labeled_time = [] for year, month in zip(asda.dt.year, asda.dt.month, strict=True): labeled_time.append(cftime.datetime(year, month, 1)) self.da = xr.DataArray( arr, dims=("y", "x", "time"), coords={"time": time, "time2": ("time", labeled_time)}, ) def time_setup(self, use_cftime, use_flox): self.da.groupby("time.month") def time_mean(self, use_cftime, use_flox): with xr.set_options(use_flox=use_flox): self.da.groupby("time.year").mean() xarray-2025.12.0/asv_bench/benchmarks/import.py000066400000000000000000000007641511464676000213030ustar00rootroot00000000000000class Import: """Benchmark importing xarray""" def timeraw_import_xarray(self): return "import xarray" def timeraw_import_xarray_plot(self): return "import xarray.plot" def timeraw_import_xarray_backends(self): return """ from xarray.backends import list_engines list_engines() """ def timeraw_import_xarray_only(self): # import numpy and pandas in the setup stage return "import xarray", "import numpy, pandas" xarray-2025.12.0/asv_bench/benchmarks/indexing.py000066400000000000000000000150361511464676000215740ustar00rootroot00000000000000import os import numpy as np import pandas as pd import xarray as xr from . import parameterized, randint, randn, requires_dask nx = 2000 ny = 1000 nt = 500 basic_indexes = { "1scalar": {"x": 0}, "1slice": {"x": slice(0, 3)}, "1slice-1scalar": {"x": 0, "y": slice(None, None, 3)}, "2slicess-1scalar": {"x": slice(3, -3, 3), "y": 1, "t": slice(None, -3, 3)}, } basic_assignment_values = { "1scalar": 0, "1slice": xr.DataArray(randn((3, ny), frac_nan=0.1), dims=["x", "y"]), "1slice-1scalar": xr.DataArray(randn(int(ny / 3) + 1, frac_nan=0.1), dims=["y"]), "2slicess-1scalar": xr.DataArray( randn(np.empty(nx)[slice(3, -3, 3)].size, frac_nan=0.1), dims=["x"] ), } outer_indexes = { "1d": {"x": randint(0, nx, 400)}, "2d": {"x": randint(0, nx, 500), "y": randint(0, ny, 400)}, "2d-1scalar": {"x": randint(0, nx, 100), "y": 1, "t": randint(0, nt, 400)}, } outer_assignment_values = { "1d": xr.DataArray(randn((400, ny), frac_nan=0.1), dims=["x", "y"]), "2d": xr.DataArray(randn((500, 400), frac_nan=0.1), dims=["x", "y"]), "2d-1scalar": xr.DataArray(randn(100, frac_nan=0.1), dims=["x"]), } def make_vectorized_indexes(n_index): return { "1-1d": {"x": xr.DataArray(randint(0, nx, n_index), dims="a")}, "2-1d": { "x": xr.DataArray(randint(0, nx, n_index), dims="a"), "y": xr.DataArray(randint(0, ny, n_index), dims="a"), }, "3-2d": { "x": xr.DataArray( randint(0, nx, n_index).reshape(n_index // 100, 100), dims=["a", "b"] ), "y": xr.DataArray( randint(0, ny, n_index).reshape(n_index // 100, 100), dims=["a", "b"] ), "t": xr.DataArray( randint(0, nt, n_index).reshape(n_index // 100, 100), dims=["a", "b"] ), }, } vectorized_indexes = make_vectorized_indexes(400) big_vectorized_indexes = make_vectorized_indexes(400_000) vectorized_assignment_values = { "1-1d": xr.DataArray(randn((400, ny)), dims=["a", "y"], coords={"a": randn(400)}), "2-1d": xr.DataArray(randn(400), dims=["a"], coords={"a": randn(400)}), "3-2d": xr.DataArray( randn((4, 100)), dims=["a", "b"], coords={"a": randn(4), "b": randn(100)} ), } class Base: def setup(self, key): self.ds = xr.Dataset( { "var1": (("x", "y"), randn((nx, ny), frac_nan=0.1)), "var2": (("x", "t"), randn((nx, nt))), "var3": (("t",), randn(nt)), }, coords={ "x": np.arange(nx), "y": np.linspace(0, 1, ny), "t": pd.date_range("1970-01-01", periods=nt, freq="D"), "x_coords": ("x", np.linspace(1.1, 2.1, nx)), }, ) # Benchmark how indexing is slowed down by adding many scalar variable # to the dataset # https://github.com/pydata/xarray/pull/9003 self.ds_large = self.ds.merge({f"extra_var{i}": i for i in range(400)}) class Indexing(Base): @parameterized(["key"], [list(basic_indexes.keys())]) def time_indexing_basic(self, key): self.ds.isel(**basic_indexes[key]).load() @parameterized(["key"], [list(outer_indexes.keys())]) def time_indexing_outer(self, key): self.ds.isel(**outer_indexes[key]).load() @parameterized(["key"], [list(vectorized_indexes.keys())]) def time_indexing_vectorized(self, key): self.ds.isel(**vectorized_indexes[key]).load() @parameterized(["key"], [list(basic_indexes.keys())]) def time_indexing_basic_ds_large(self, key): # https://github.com/pydata/xarray/pull/9003 self.ds_large.isel(**basic_indexes[key]).load() class IndexingOnly(Base): @parameterized(["key"], [list(basic_indexes.keys())]) def time_indexing_basic(self, key): self.ds.isel(**basic_indexes[key]) @parameterized(["key"], [list(outer_indexes.keys())]) def time_indexing_outer(self, key): self.ds.isel(**outer_indexes[key]) @parameterized(["key"], [list(big_vectorized_indexes.keys())]) def time_indexing_big_vectorized(self, key): self.ds.isel(**big_vectorized_indexes[key]) class Assignment(Base): @parameterized(["key"], [list(basic_indexes.keys())]) def time_assignment_basic(self, key): ind = basic_indexes[key] val = basic_assignment_values[key] self.ds["var1"][ind.get("x", slice(None)), ind.get("y", slice(None))] = val @parameterized(["key"], [list(outer_indexes.keys())]) def time_assignment_outer(self, key): ind = outer_indexes[key] val = outer_assignment_values[key] self.ds["var1"][ind.get("x", slice(None)), ind.get("y", slice(None))] = val @parameterized(["key"], [list(vectorized_indexes.keys())]) def time_assignment_vectorized(self, key): ind = vectorized_indexes[key] val = vectorized_assignment_values[key] self.ds["var1"][ind.get("x", slice(None)), ind.get("y", slice(None))] = val class IndexingDask(Indexing): def setup(self, key): requires_dask() super().setup(key) self.ds = self.ds.chunk({"x": 100, "y": 50, "t": 50}) class BooleanIndexing: # https://github.com/pydata/xarray/issues/2227 def setup(self): self.ds = xr.Dataset( {"a": ("time", np.arange(10_000_000))}, coords={"time": np.arange(10_000_000)}, ) self.time_filter = self.ds.time > 50_000 def time_indexing(self): self.ds.isel(time=self.time_filter) class HugeAxisSmallSliceIndexing: # https://github.com/pydata/xarray/pull/4560 def setup(self): self.filepath = "test_indexing_huge_axis_small_slice.nc" if not os.path.isfile(self.filepath): xr.Dataset( {"a": ("x", np.arange(10_000_000))}, coords={"x": np.arange(10_000_000)}, ).to_netcdf(self.filepath, format="NETCDF4") self.ds = xr.open_dataset(self.filepath) def time_indexing(self): self.ds.isel(x=slice(100)) def cleanup(self): self.ds.close() class AssignmentOptimized: # https://github.com/pydata/xarray/pull/7382 def setup(self): self.ds = xr.Dataset(coords={"x": np.arange(500_000)}) self.da = xr.DataArray(np.arange(500_000), dims="x") def time_assign_no_reindex(self): # assign with non-indexed DataArray of same dimension size self.ds.assign(foo=self.da) def time_assign_identical_indexes(self): # fastpath index comparison (same index object) self.ds.assign(foo=self.ds.x) xarray-2025.12.0/asv_bench/benchmarks/interp.py000066400000000000000000000041121511464676000212610ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr from . import parameterized, randn, requires_dask nx = 1500 ny = 1000 nt = 500 randn_xy = randn((nx, ny), frac_nan=0.1) randn_xt = randn((nx, nt)) randn_t = randn((nt,)) new_x_short = np.linspace(0.3 * nx, 0.7 * nx, 100) new_x_long = np.linspace(0.3 * nx, 0.7 * nx, 500) new_y_long = np.linspace(0.1, 0.9, 500) class Interpolation: def setup(self, *args, **kwargs): self.ds = xr.Dataset( { "var1": (("x", "y"), randn_xy), "var2": (("x", "t"), randn_xt), "var3": (("t",), randn_t), "var4": (("z",), np.array(["text"])), "var5": (("k",), np.array(["a", "b", "c"])), }, coords={ "x": np.arange(nx), "y": np.linspace(0, 1, ny), "t": pd.date_range("1970-01-01", periods=nt, freq="D"), "x_coords": ("x", np.linspace(1.1, 2.1, nx)), "z": np.array([1]), "k": np.linspace(0, nx, 3), }, ) @parameterized(["method", "is_short"], (["linear", "cubic"], [True, False])) def time_interpolation_numeric_1d(self, method, is_short): new_x = new_x_short if is_short else new_x_long self.ds.interp(x=new_x, method=method).compute() @parameterized(["method"], (["linear", "nearest"])) def time_interpolation_numeric_2d(self, method): self.ds.interp(x=new_x_long, y=new_y_long, method=method).compute() @parameterized(["is_short"], ([True, False])) def time_interpolation_string_scalar(self, is_short): new_z = new_x_short if is_short else new_x_long self.ds.interp(z=new_z).compute() @parameterized(["is_short"], ([True, False])) def time_interpolation_string_1d(self, is_short): new_k = new_x_short if is_short else new_x_long self.ds.interp(k=new_k).compute() class InterpolationDask(Interpolation): def setup(self, *args, **kwargs): requires_dask() super().setup(**kwargs) self.ds = self.ds.chunk({"t": 50}) xarray-2025.12.0/asv_bench/benchmarks/merge.py000066400000000000000000000046131511464676000210650ustar00rootroot00000000000000import numpy as np import xarray as xr class DatasetAddVariable: param_names = ["existing_elements"] params = [[0, 10, 100, 1000]] def setup(self, existing_elements): self.datasets = {} # Dictionary insertion is fast(er) than xarray.Dataset insertion d = {} for i in range(existing_elements): d[f"var{i}"] = i self.dataset = xr.merge([d]) d = {f"set_2_{i}": i for i in range(existing_elements)} self.dataset2 = xr.merge([d]) def time_variable_insertion(self, existing_elements): dataset = self.dataset dataset["new_var"] = 0 def time_merge_two_datasets(self, existing_elements): xr.merge([self.dataset, self.dataset2]) class DatasetCreation: # The idea here is to time how long it takes to go from numpy # and python data types, to a full dataset # See discussion # https://github.com/pydata/xarray/issues/7224#issuecomment-1292216344 param_names = ["strategy", "count"] params = [ ["dict_of_DataArrays", "dict_of_Variables", "dict_of_Tuples"], [0, 1, 10, 100, 1000], ] def setup(self, strategy, count): data = np.array(["0", "b"], dtype=str) self.dataset_coords = dict(time=np.array([0, 1])) self.dataset_attrs = dict(description="Test data") attrs = dict(units="Celsius") if strategy == "dict_of_DataArrays": def create_data_vars(): return { f"long_variable_name_{i}": xr.DataArray( data=data, dims=("time"), attrs=attrs ) for i in range(count) } elif strategy == "dict_of_Variables": def create_data_vars(): return { f"long_variable_name_{i}": xr.Variable("time", data, attrs=attrs) for i in range(count) } elif strategy == "dict_of_Tuples": def create_data_vars(): return { f"long_variable_name_{i}": ("time", data, attrs) for i in range(count) } self.create_data_vars = create_data_vars def time_dataset_creation(self, strategy, count): data_vars = self.create_data_vars() xr.Dataset( data_vars=data_vars, coords=self.dataset_coords, attrs=self.dataset_attrs ) xarray-2025.12.0/asv_bench/benchmarks/pandas.py000066400000000000000000000033431511464676000212330ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr from . import parameterized, requires_dask class MultiIndexSeries: def setup(self, dtype, subset): data = np.random.rand(100000).astype(dtype) index = pd.MultiIndex.from_product( [ list("abcdefhijk"), list("abcdefhijk"), pd.date_range(start="2000-01-01", periods=1000, freq="D"), ] ) series = pd.Series(data, index) if subset: series = series[::3] self.series = series @parameterized(["dtype", "subset"], ([int, float], [True, False])) def time_from_series(self, dtype, subset): xr.DataArray.from_series(self.series) class ToDataFrame: def setup(self, *args, **kwargs): xp = kwargs.get("xp", np) nvars = kwargs.get("nvars", 1) random_kws = kwargs.get("random_kws", {}) method = kwargs.get("method", "to_dataframe") dim1 = 10_000 dim2 = 10_000 var = xr.Variable( dims=("dim1", "dim2"), data=xp.random.random((dim1, dim2), **random_kws) ) data_vars = {f"long_name_{v}": (("dim1", "dim2"), var) for v in range(nvars)} ds = xr.Dataset( data_vars, coords={"dim1": np.arange(0, dim1), "dim2": np.arange(0, dim2)} ) self.to_frame = getattr(ds, method) def time_to_dataframe(self): self.to_frame() def peakmem_to_dataframe(self): self.to_frame() class ToDataFrameDask(ToDataFrame): def setup(self, *args, **kwargs): requires_dask() import dask.array as da super().setup( xp=da, random_kws=dict(chunks=5000), method="to_dask_dataframe", nvars=500 ) xarray-2025.12.0/asv_bench/benchmarks/polyfit.py000066400000000000000000000017751511464676000214620ustar00rootroot00000000000000import numpy as np import xarray as xr from . import parameterized, randn, requires_dask NDEGS = (2, 5, 20) NX = (10**2, 10**6) class Polyval: def setup(self, *args, **kwargs): self.xs = {nx: xr.DataArray(randn((nx,)), dims="x", name="x") for nx in NX} self.coeffs = { ndeg: xr.DataArray( randn((ndeg,)), dims="degree", coords={"degree": np.arange(ndeg)} ) for ndeg in NDEGS } @parameterized(["nx", "ndeg"], [NX, NDEGS]) def time_polyval(self, nx, ndeg): x = self.xs[nx] c = self.coeffs[ndeg] xr.polyval(x, c).compute() @parameterized(["nx", "ndeg"], [NX, NDEGS]) def peakmem_polyval(self, nx, ndeg): x = self.xs[nx] c = self.coeffs[ndeg] xr.polyval(x, c).compute() class PolyvalDask(Polyval): def setup(self, *args, **kwargs): requires_dask() super().setup(*args, **kwargs) self.xs = {k: v.chunk({"x": 10000}) for k, v in self.xs.items()} xarray-2025.12.0/asv_bench/benchmarks/reindexing.py000066400000000000000000000025461511464676000221250ustar00rootroot00000000000000import numpy as np import xarray as xr from . import requires_dask ntime = 500 nx = 50 ny = 50 class Reindex: def setup(self): data = np.random.default_rng(0).random((ntime, nx, ny)) self.ds = xr.Dataset( {"temperature": (("time", "x", "y"), data)}, coords={"time": np.arange(ntime), "x": np.arange(nx), "y": np.arange(ny)}, ) def time_1d_coarse(self): self.ds.reindex(time=np.arange(0, ntime, 5)).load() def time_1d_fine_all_found(self): self.ds.reindex(time=np.arange(0, ntime, 0.5), method="nearest").load() def time_1d_fine_some_missing(self): self.ds.reindex( time=np.arange(0, ntime, 0.5), method="nearest", tolerance=0.1 ).load() def time_2d_coarse(self): self.ds.reindex(x=np.arange(0, nx, 2), y=np.arange(0, ny, 2)).load() def time_2d_fine_all_found(self): self.ds.reindex( x=np.arange(0, nx, 0.5), y=np.arange(0, ny, 0.5), method="nearest" ).load() def time_2d_fine_some_missing(self): self.ds.reindex( x=np.arange(0, nx, 0.5), y=np.arange(0, ny, 0.5), method="nearest", tolerance=0.1, ).load() class ReindexDask(Reindex): def setup(self): requires_dask() super().setup() self.ds = self.ds.chunk({"time": 100}) xarray-2025.12.0/asv_bench/benchmarks/renaming.py000066400000000000000000000014201511464676000215570ustar00rootroot00000000000000import numpy as np import xarray as xr class SwapDims: param_names = ["size"] params = [[int(1e3), int(1e5), int(1e7)]] def setup(self, size: int) -> None: self.ds = xr.Dataset( {"a": (("x", "t"), np.ones((size, 2)))}, coords={ "x": np.arange(size), "y": np.arange(size), "z": np.arange(size), "x2": ("x", np.arange(size)), "y2": ("y", np.arange(size)), "z2": ("z", np.arange(size)), }, ) def time_swap_dims(self, size: int) -> None: self.ds.swap_dims({"x": "xn", "y": "yn", "z": "zn"}) def time_swap_dims_newindex(self, size: int) -> None: self.ds.swap_dims({"x": "x2", "y": "y2", "z": "z2"}) xarray-2025.12.0/asv_bench/benchmarks/repr.py000066400000000000000000000045211511464676000207340ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr class Repr: def setup(self): a = np.arange(0, 100) data_vars = dict() for i in a: data_vars[f"long_variable_name_{i}"] = xr.DataArray( name=f"long_variable_name_{i}", data=np.arange(0, 20), dims=[f"long_coord_name_{i}_x"], coords={f"long_coord_name_{i}_x": np.arange(0, 20) * 2}, ) self.ds = xr.Dataset(data_vars) self.ds.attrs = {f"attr_{k}": 2 for k in a} def time_repr(self): repr(self.ds) def time_repr_html(self): self.ds._repr_html_() class ReprDataTree: def setup(self): # construct a datatree with 500 nodes number_of_files = 20 number_of_groups = 25 tree_dict = {} for f in range(number_of_files): for g in range(number_of_groups): tree_dict[f"file_{f}/group_{g}"] = xr.Dataset({"g": f * g}) self.dt = xr.DataTree.from_dict(tree_dict) def time_repr(self): repr(self.dt) def time_repr_html(self): self.dt._repr_html_() class ReprMultiIndex: def setup(self): index = pd.MultiIndex.from_product( [range(1000), range(1000)], names=("level_0", "level_1") ) series = pd.Series(range(1000 * 1000), index=index) self.da = xr.DataArray(series) def time_repr(self): repr(self.da) def time_repr_html(self): self.da._repr_html_() class ReprPandasRangeIndex: # display a memory-saving pandas.RangeIndex shouldn't trigger memory # expensive conversion into a numpy array def setup(self): index = xr.indexes.PandasIndex(pd.RangeIndex(1_000_000), "x") self.ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index)) def time_repr(self): repr(self.ds.x) def time_repr_html(self): self.ds.x._repr_html_() class ReprXarrayRangeIndex: # display an Xarray RangeIndex shouldn't trigger memory expensive conversion # of its lazy coordinate into a numpy array def setup(self): index = xr.indexes.RangeIndex.arange(1_000_000, dim="x") self.ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index)) def time_repr(self): repr(self.ds.x) def time_repr_html(self): self.ds.x._repr_html_() xarray-2025.12.0/asv_bench/benchmarks/rolling.py000066400000000000000000000120151511464676000214270ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr from . import _skip_slow, parameterized, randn, requires_dask nx = 3000 long_nx = 30000 ny = 200 nt = 1000 window = 20 randn_xy = randn((nx, ny), frac_nan=0.1) randn_xt = randn((nx, nt)) randn_t = randn((nt,)) randn_long = randn((long_nx,), frac_nan=0.1) class Rolling: def setup(self, *args, **kwargs): self.ds = xr.Dataset( { "var1": (("x", "y"), randn_xy), "var2": (("x", "t"), randn_xt), "var3": (("t",), randn_t), }, coords={ "x": np.arange(nx), "y": np.linspace(0, 1, ny), "t": pd.date_range("1970-01-01", periods=nt, freq="D"), "x_coords": ("x", np.linspace(1.1, 2.1, nx)), }, ) self.da_long = xr.DataArray( randn_long, dims="x", coords={"x": np.arange(long_nx) * 0.1} ) @parameterized( ["func", "center", "use_bottleneck"], (["mean", "count"], [True, False], [True, False]), ) def time_rolling(self, func, center, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): getattr(self.ds.rolling(x=window, center=center), func)().load() @parameterized( ["func", "pandas", "use_bottleneck"], (["mean", "count"], [True, False], [True, False]), ) def time_rolling_long(self, func, pandas, use_bottleneck): if pandas: se = self.da_long.to_series() getattr(se.rolling(window=window, min_periods=window), func)() else: with xr.set_options(use_bottleneck=use_bottleneck): getattr( self.da_long.rolling(x=window, min_periods=window), func )().load() @parameterized( ["window_", "min_periods", "use_bottleneck"], ([20, 40], [5, 5], [True, False]) ) def time_rolling_np(self, window_, min_periods, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): self.ds.rolling(x=window_, center=False, min_periods=min_periods).reduce( np.nansum ).load() @parameterized( ["center", "stride", "use_bottleneck"], ([True, False], [1, 1], [True, False]) ) def time_rolling_construct(self, center, stride, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): self.ds.rolling(x=window, center=center).construct( "window_dim", stride=stride ).sum(dim="window_dim").load() class RollingDask(Rolling): def setup(self, *args, **kwargs): requires_dask() # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() super().setup(**kwargs) self.ds = self.ds.chunk({"x": 100, "y": 50, "t": 50}) self.da_long = self.da_long.chunk({"x": 10000}) class RollingMemory: def setup(self, *args, **kwargs): self.ds = xr.Dataset( { "var1": (("x", "y"), randn_xy), "var2": (("x", "t"), randn_xt), "var3": (("t",), randn_t), }, coords={ "x": np.arange(nx), "y": np.linspace(0, 1, ny), "t": pd.date_range("1970-01-01", periods=nt, freq="D"), "x_coords": ("x", np.linspace(1.1, 2.1, nx)), }, ) class DataArrayRollingMemory(RollingMemory): @parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False])) def peakmem_ndrolling_reduce(self, func, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): roll = self.ds.var1.rolling(x=10, y=4) getattr(roll, func)() @parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False])) def peakmem_1drolling_reduce(self, func, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): roll = self.ds.var3.rolling(t=100) getattr(roll, func)() @parameterized(["stride"], ([None, 5, 50])) def peakmem_1drolling_construct(self, stride): self.ds.var2.rolling(t=100).construct("w", stride=stride) self.ds.var3.rolling(t=100).construct("w", stride=stride) class DatasetRollingMemory(RollingMemory): @parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False])) def peakmem_ndrolling_reduce(self, func, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): roll = self.ds.rolling(x=10, y=4) getattr(roll, func)() @parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False])) def peakmem_1drolling_reduce(self, func, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): roll = self.ds.rolling(t=100) getattr(roll, func)() @parameterized(["stride"], ([None, 5, 50])) def peakmem_1drolling_construct(self, stride): self.ds.rolling(t=100).construct("w", stride=stride) xarray-2025.12.0/asv_bench/benchmarks/unstacking.py000066400000000000000000000035071511464676000221350ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr from . import requires_dask, requires_sparse class Unstacking: def setup(self): data = np.random.default_rng(0).random((250, 500)) self.da_full = xr.DataArray(data, dims=list("ab")).stack(flat_dim=[...]) self.da_missing = self.da_full[:-1] self.df_missing = self.da_missing.to_pandas() def time_unstack_fast(self): self.da_full.unstack("flat_dim") def time_unstack_slow(self): self.da_missing.unstack("flat_dim") def time_unstack_pandas_slow(self): self.df_missing.unstack() class UnstackingDask(Unstacking): def setup(self, *args, **kwargs): requires_dask() super().setup(**kwargs) self.da_full = self.da_full.chunk({"flat_dim": 25}) class UnstackingSparse(Unstacking): def setup(self, *args, **kwargs): requires_sparse() import sparse data = sparse.random((500, 1000), random_state=0, fill_value=0) self.da_full = xr.DataArray(data, dims=list("ab")).stack(flat_dim=[...]) self.da_missing = self.da_full[:-1] mindex = pd.MultiIndex.from_arrays([np.arange(100), np.arange(100)]) self.da_eye_2d = xr.DataArray(np.ones((100,)), dims="z", coords={"z": mindex}) self.da_eye_3d = xr.DataArray( np.ones((100, 50)), dims=("z", "foo"), coords={"z": mindex, "foo": np.arange(50)}, ) def time_unstack_to_sparse_2d(self): self.da_eye_2d.unstack(sparse=True) def time_unstack_to_sparse_3d(self): self.da_eye_3d.unstack(sparse=True) def peakmem_unstack_to_sparse_2d(self): self.da_eye_2d.unstack(sparse=True) def peakmem_unstack_to_sparse_3d(self): self.da_eye_3d.unstack(sparse=True) def time_unstack_pandas_slow(self): pass xarray-2025.12.0/ci/000077500000000000000000000000001511464676000137365ustar00rootroot00000000000000xarray-2025.12.0/ci/policy.yaml000066400000000000000000000010431511464676000161170ustar00rootroot00000000000000channels: - conda-forge platforms: - noarch - linux-64 policy: # all packages in months packages: python: 30 numpy: 18 default: 12 # overrides for the policy overrides: {} # these packages are completely ignored exclude: - coveralls - pip - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - pytest-hypothesis - hypothesis # these packages don't fail the CI, but will be printed in the report ignored_violations: [] xarray-2025.12.0/ci/release_contributors.py000066400000000000000000000033041511464676000205450ustar00rootroot00000000000000import re import textwrap import git from tlz.itertoolz import last, unique co_author_re = re.compile(r"Co-authored-by: (?P[^<]+?) <(?P.+)>") ignored = [ {"name": "dependabot[bot]"}, {"name": "pre-commit-ci[bot]"}, { "name": "Claude", "email": [ "noreply@anthropic.com", "claude@anthropic.com", "no-reply@anthropic.com", ], }, ] def is_ignored(name, email): # linear search, for now for ignore in ignored: if ignore["name"] != name: continue ignored_email = ignore.get("email") if ignored_email is None or email in ignored_email: return True return False def main(): repo = git.Repo(".") most_recent_release = last(list(repo.tags)) # extract information from commits contributors = {} for commit in repo.iter_commits(f"{most_recent_release.name}.."): matches = co_author_re.findall(commit.message) if matches: contributors.update({email: name for name, email in matches}) contributors[commit.author.email] = commit.author.name # deduplicate and ignore # TODO: extract ignores from .github/release.yml unique_contributors = unique( name for email, name in contributors.items() if not is_ignored(name, email) ) sorted_ = sorted(unique_contributors) if len(sorted_) > 1: names = f"{', '.join(sorted_[:-1])} and {sorted_[-1]}" else: names = "".join(sorted_) statement = textwrap.dedent( f"""\ Thanks to the {len(sorted_)} contributors to this release: {names} """.rstrip() ) print(statement) if __name__ == "__main__": main() xarray-2025.12.0/ci/requirements/000077500000000000000000000000001511464676000164615ustar00rootroot00000000000000xarray-2025.12.0/ci/requirements/environment-benchmark.yml000066400000000000000000000006601511464676000235020ustar00rootroot00000000000000name: xarray-benchmark channels: - conda-forge - nodefaults dependencies: - bottleneck - cftime - dask-core - distributed - flox - netcdf4 - numba - numbagg - numexpr - py-rattler - numpy>=2.2,<2.3 # https://github.com/numba/numba/issues/10105 - opt_einsum - packaging - pandas - pyarrow # pandas raises a deprecation warning without this, breaking doctests - sparse - scipy - toolz - zarr xarray-2025.12.0/ci/requirements/environment.yml000066400000000000000000000022641511464676000215540ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge - nodefaults dependencies: - aiobotocore - array-api-strict<2.4 - boto3 - bottleneck - cartopy - cftime - dask-core - distributed - flox - fsspec - h5netcdf - h5py - hdf5 - hypothesis - iris - lxml # Optional dep of pydap - matplotlib-base - mypy==1.18.1 - nc-time-axis - netcdf4 - numba - numbagg - numexpr - numpy>=2.2 - opt_einsum - packaging - pandas - pandas-stubs<=2.2.3.241126 # https://github.com/pydata/xarray/issues/10110 # - pint>=0.22 - pip - pooch - pre-commit - pyarrow # pandas raises a deprecation warning without this, breaking doctests - pydap - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - rasterio - scipy - seaborn - sparse - toolz - types-colorama - types-docutils - types-psutil - types-Pygments - types-python-dateutil - types-pytz - types-PyYAML - types-requests - types-setuptools - types-openpyxl - typing_extensions - zarr - pip: - jax # no way to get cpu-only jaxlib from conda if gpu is present - types-defusedxml - types-pexpect xarray-2025.12.0/conftest.py000066400000000000000000000050661511464676000155510ustar00rootroot00000000000000"""Configuration for pytest.""" import pytest def pytest_addoption(parser: pytest.Parser): """Add command-line flags for pytest.""" parser.addoption("--run-flaky", action="store_true", help="runs flaky tests") parser.addoption( "--run-network-tests", action="store_true", help="runs tests requiring a network connection", ) parser.addoption("--run-mypy", action="store_true", help="runs mypy tests") def pytest_runtest_setup(item): # based on https://stackoverflow.com/questions/47559524 if "flaky" in item.keywords and not item.config.getoption("--run-flaky"): pytest.skip("set --run-flaky option to run flaky tests") if "network" in item.keywords and not item.config.getoption("--run-network-tests"): pytest.skip( "set --run-network-tests to run test requiring an internet connection" ) if any("mypy" in m.name for m in item.own_markers) and not item.config.getoption( "--run-mypy" ): pytest.skip("set --run-mypy option to run mypy tests") # See https://docs.pytest.org/en/stable/example/markers.html#automatically-adding-markers-based-on-test-names def pytest_collection_modifyitems(items): for item in items: if "mypy" in item.nodeid: # IMPORTANT: mypy type annotation tests leverage the pytest-mypy-plugins # plugin, and are thus written in test_*.yml files. As such, there are # no explicit test functions on which we can apply a pytest.mark.mypy # decorator. Therefore, we mark them via this name-based, automatic # marking approach, meaning that each test case must contain "mypy" in the # name. item.add_marker(pytest.mark.mypy) @pytest.fixture(autouse=True) def set_zarr_v3_api(monkeypatch): """Set ZARR_V3_EXPERIMENTAL_API environment variable for all tests.""" monkeypatch.setenv("ZARR_V3_EXPERIMENTAL_API", "1") @pytest.fixture(autouse=True) def add_standard_imports(doctest_namespace, tmpdir): import numpy as np import pandas as pd import xarray as xr doctest_namespace["np"] = np doctest_namespace["pd"] = pd doctest_namespace["xr"] = xr # always seed numpy.random to make the examples deterministic np.random.seed(0) # always switch to the temporary directory, so files get written there tmpdir.chdir() # Avoid the dask deprecation warning, can remove if CI passes without this. try: import dask except ImportError: pass else: dask.config.set({"dataframe.query-planning": True}) xarray-2025.12.0/design_notes/000077500000000000000000000000001511464676000160245ustar00rootroot00000000000000xarray-2025.12.0/design_notes/flexible_indexes_notes.md000066400000000000000000000667211511464676000231030ustar00rootroot00000000000000# Proposal: Xarray flexible indexes refactoring Current status: https://github.com/pydata/xarray/projects/1 ## 1. Data Model Indexes are used in Xarray to extract data from Xarray objects using coordinate labels instead of using integer array indices. Although the indexes used in an Xarray object can be accessed (or built on-the-fly) via public methods like `to_index()` or properties like `indexes`, those are mainly used internally. The goal of this project is to make those indexes 1st-class citizens of Xarray's data model. As such, indexes should clearly be separated from Xarray coordinates with the following relationships: - Index -> Coordinate: one-to-many - Coordinate -> Index: one-to-zero-or-one An index may be built from one or more coordinates. However, each coordinate must relate to one index at most. Additionally, a coordinate may not be tied to any index. The order in which multiple coordinates relate to an index may matter. For example, Scikit-Learn's [`BallTree`](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.BallTree.html#sklearn.neighbors.BallTree) index with the Haversine metric requires providing latitude and longitude values in that specific order. As another example, the order in which levels are defined in a `pandas.MultiIndex` may affect its lexsort depth (see [MultiIndex sorting](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#sorting-a-multiindex)). Xarray's current data model has the same index-coordinate relationships than stated above, although this assumes that multi-index "virtual" coordinates are counted as coordinates (we can consider them as such, with some constraints). More importantly, This refactoring would turn the current one-to-one relationship between a dimension and an index into a many-to-many relationship, which would overcome some current limitations. For example, we might want to select data along a dimension which has several coordinates: ```python >>> da array([...]) Coordinates: * drainage_area (river_profile) float64 ... * chi (river_profile) float64 ... ``` In this example, `chi` is a transformation of the `drainage_area` variable that is often used in geomorphology. We'd like to select data along the river profile using either `da.sel(drainage_area=...)` or `da.sel(chi=...)` but that's not currently possible. We could rename the `river_profile` dimension to one of the coordinates, then use `sel` with that coordinate, then call `swap_dims` if we want to use `sel` with the other coordinate, but that's not ideal. We could also build a `pandas.MultiIndex` from `drainage_area` and `chi`, but that's not optimal (there's no hierarchical relationship between these two coordinates). Let's take another example: ```python >>> da array([[...], [...]]) Coordinates: * lon (x, y) float64 ... * lat (x, y) float64 ... * x (x) float64 ... * y (y) float64 ... ``` This refactoring would allow creating a geographic index for `lat` and `lon` and two simple indexes for `x` and `y` such that we could select data with either `da.sel(lon=..., lat=...)` or `da.sel(x=..., y=...)`. Refactoring the dimension -> index one-to-one relationship into many-to-many would also introduce some issues that we'll need to address, e.g., ambiguous cases like `da.sel(chi=..., drainage_area=...)` where multiple indexes may potentially return inconsistent positional indexers along a dimension. ## 2. Proposed API changes ### 2.1 Index wrapper classes Every index that is used to select data from Xarray objects should inherit from a base class, e.g., `XarrayIndex`, that provides some common API. `XarrayIndex` subclasses would generally consist of thin wrappers around existing index classes such as `pandas.Index`, `pandas.MultiIndex`, `scipy.spatial.KDTree`, etc. There is a variety of features that an xarray index wrapper may or may not support: - 1-dimensional vs. 2-dimensional vs. n-dimensional coordinate (e.g., `pandas.Index` only supports 1-dimensional coordinates while a geographic index could be built from n-dimensional coordinates) - built from a single vs multiple coordinate(s) (e.g., `pandas.Index` is built from one coordinate, `pandas.MultiIndex` may be built from an arbitrary number of coordinates and a geographic index would typically require two latitude/longitude coordinates) - in-memory vs. out-of-core (dask) index data/coordinates (vs. other array backends) - range-based vs. point-wise selection - exact vs. inexact lookups Whether or not a `XarrayIndex` subclass supports each of the features listed above should be either declared explicitly via a common API or left to the implementation. An `XarrayIndex` subclass may encapsulate more than one underlying object used to perform the actual indexing. Such "meta" index would typically support a range of features among those mentioned above and would automatically select the optimal index object for a given indexing operation. An `XarrayIndex` subclass must/should/may implement the following properties/methods: - a `from_coords` class method that creates a new index wrapper instance from one or more Dataset/DataArray coordinates (+ some options) - a `query` method that takes label-based indexers as argument (+ some options) and that returns the corresponding position-based indexers - an `indexes` property to access the underlying index object(s) wrapped by the `XarrayIndex` subclass - a `data` property to access index's data and map it to coordinate data (see [Section 4](#4-indexvariable)) - a `__getitem__()` implementation to propagate the index through DataArray/Dataset indexing operations - `equals()`, `union()` and `intersection()` methods for data alignment (see [Section 2.6](#26-using-indexes-for-data-alignment)) - Xarray coordinate getters (see [Section 2.2.4](#224-implicit-coordinates)) - a method that may return a new index and that will be called when one of the corresponding coordinates is dropped from the Dataset/DataArray (multi-coordinate indexes) - `encode()`/`decode()` methods that would allow storage-agnostic serialization and fast-path reconstruction of the underlying index object(s) (see [Section 2.8](#28-index-encoding)) - one or more "non-standard" methods or properties that could be leveraged in Xarray 3rd-party extensions like Dataset/DataArray accessors (see [Section 2.7](#27-using-indexes-for-other-purposes)) The `XarrayIndex` API has still to be defined in detail. Xarray should provide a minimal set of built-in index wrappers (this could be reduced to the indexes currently supported in Xarray, i.e., `pandas.Index` and `pandas.MultiIndex`). Other index wrappers may be implemented in 3rd-party libraries (recommended). The `XarrayIndex` base class should be part of Xarray's public API. #### 2.1.1 Index discoverability For better discoverability of Xarray-compatible indexes, Xarray could provide some mechanism to register new index wrappers, e.g., something like [xoak's `IndexRegistry`](https://xoak.readthedocs.io/en/latest/_api_generated/xoak.IndexRegistry.html#xoak.IndexRegistry) or [numcodec's registry](https://numcodecs.readthedocs.io/en/stable/registry.html). Additionally (or alternatively), new index wrappers may be registered via entry points as is already the case for storage backends and maybe other backends (plotting) in the future. Registering new indexes either via a custom registry or via entry points should be optional. Xarray should also allow providing `XarrayIndex` subclasses in its API (Dataset/DataArray constructors, `set_index()`, etc.). ### 2.2 Explicit vs. implicit index creation #### 2.2.1 Dataset/DataArray's `indexes` constructor argument The new `indexes` argument of Dataset/DataArray constructors may be used to specify which kind of index to bind to which coordinate(s). It would consist of a mapping where, for each item, the key is one coordinate name (or a sequence of coordinate names) that must be given in `coords` and the value is the type of the index to build from this (these) coordinate(s): ```python >>> da = xr.DataArray( ... data=[[275.2, 273.5], [270.8, 278.6]], ... dims=("x", "y"), ... coords={ ... "lat": (("x", "y"), [[45.6, 46.5], [50.2, 51.6]]), ... "lon": (("x", "y"), [[5.7, 10.5], [6.2, 12.8]]), ... }, ... indexes={("lat", "lon"): SpatialIndex}, ... ) array([[275.2, 273.5], [270.8, 278.6]]) Coordinates: * lat (x, y) float64 45.6 46.5 50.2 51.6 * lon (x, y) float64 5.7 10.5 6.2 12.8 ``` More formally, `indexes` would accept `Mapping[CoordinateNames, IndexSpec]` where: - `CoordinateNames = Union[CoordinateName, Tuple[CoordinateName, ...]]` and `CoordinateName = Hashable` - `IndexSpec = Union[Type[XarrayIndex], Tuple[Type[XarrayIndex], Dict[str, Any]], XarrayIndex]`, so that index instances or index classes + build options could be also passed Currently index objects like `pandas.MultiIndex` can be passed directly to `coords`, which in this specific case results in the implicit creation of virtual coordinates. With the new `indexes` argument this behavior may become even more confusing than it currently is. For the sake of clarity, it would be appropriate to eventually drop support for this specific behavior and treat any given mapping value given in `coords` as an array that can be wrapped into an Xarray variable, i.e., in the case of a multi-index: ```python >>> xr.DataArray([1.0, 2.0], dims="x", coords={"x": midx}) array([1., 2.]) Coordinates: x (x) object ('a', 0) ('b', 1) ``` A possible, more explicit solution to reuse a `pandas.MultiIndex` in a DataArray/Dataset with levels exposed as coordinates is proposed in [Section 2.2.4](#224-implicit-coordinates). #### 2.2.2 Dataset/DataArray's `set_index` method New indexes may also be built from existing sets of coordinates or variables in a Dataset/DataArray using the `.set_index()` method. The [current signature](https://docs.xarray.dev/en/stable/generated/xarray.DataArray.set_index.html#xarray.DataArray.set_index) of `.set_index()` is tailored to `pandas.MultiIndex` and tied to the concept of a dimension-index. It is therefore hardly reusable as-is in the context of flexible indexes proposed here. The new signature may look like one of these: - A. `.set_index(coords: CoordinateNames, index: Union[XarrayIndex, Type[XarrayIndex]], **index_kwargs)`: one index is set at a time, index construction options may be passed as keyword arguments - B. `.set_index(indexes: Mapping[CoordinateNames, Union[Type[XarrayIndex], Tuple[Type[XarrayIndex], Dict[str, Any]]]])`: multiple indexes may be set at a time from a mapping of coordinate or variable name(s) as keys and `XarrayIndex` subclasses (maybe with a dict of build options) as values. If variable names are given as keys of they will be promoted as coordinates Option A looks simple and elegant but significantly departs from the current signature. Option B is more consistent with the Dataset/DataArray constructor signature proposed in the previous section and would be easier to adopt in parallel with the current signature that we could still support through some depreciation cycle. The `append` parameter of the current `.set_index()` is specific to `pandas.MultiIndex`. With option B we could still support it, although we might want to either drop it or move it to the index construction options in the future. #### 2.2.3 Implicit default indexes In general explicit index creation should be preferred over implicit index creation. However, there is a majority of cases where basic `pandas.Index` objects could be built and used as indexes for 1-dimensional coordinates. For convenience, Xarray should automatically build such indexes for the coordinates where no index has been explicitly assigned in the Dataset/DataArray constructor or when indexes have been reset / dropped. For which coordinates? - A. only 1D coordinates with a name matching their dimension name - B. all 1D coordinates When to create it? - A. each time when a new Dataset/DataArray is created - B. only when we need it (i.e., when calling `.sel()` or `indexes`) Options A and A are what Xarray currently does and may be the best choice considering that indexes could possibly be invalidated by coordinate mutation. Besides `pandas.Index`, other indexes currently supported in Xarray like `CFTimeIndex` could be built depending on the coordinate data type. #### 2.2.4 Implicit coordinates Like for the indexes, explicit coordinate creation should be preferred over implicit coordinate creation. However, there may be some situations where we would like to keep creating coordinates implicitly for backwards compatibility. For example, it is currently possible to pass a `pandas.MultiIndex` object as a coordinate to the Dataset/DataArray constructor: ```python >>> midx = pd.MultiIndex.from_arrays([["a", "b"], [0, 1]], names=["lvl1", "lvl2"]) >>> da = xr.DataArray([1.0, 2.0], dims="x", coords={"x": midx}) >>> da array([1., 2.]) Coordinates: * x (x) MultiIndex - lvl1 (x) object 'a' 'b' - lvl2 (x) int64 0 1 ``` In that case, virtual coordinates are created for each level of the multi-index. After the index refactoring, these coordinates would become real coordinates bound to the multi-index. In the example above a coordinate is also created for the `x` dimension: ```python >>> da.x array([('a', 0), ('b', 1)], dtype=object) Coordinates: * x (x) MultiIndex - lvl1 (x) object 'a' 'b' - lvl2 (x) int64 0 1 ``` With the new proposed data model, this wouldn't be a requirement anymore: there is no concept of a dimension-index. However, some users might still rely on the `x` coordinate so we could still (temporarily) support it for backwards compatibility. Besides `pandas.MultiIndex`, there may be other situations where we would like to reuse an existing index in a new Dataset/DataArray (e.g., when the index is very expensive to build), and which might require implicit creation of one or more coordinates. The example given here is quite confusing, though: this is not an easily predictable behavior. We could entirely avoid the implicit creation of coordinates, e.g., using a helper function that generates coordinate + index dictionaries that we could then pass directly to the DataArray/Dataset constructor: ```python >>> coords_dict, index_dict = create_coords_from_index( ... midx, dims="x", include_dim_coord=True ... ) >>> coords_dict {'x': array([('a', 0), ('b', 1)], dtype=object), 'lvl1': array(['a', 'b'], dtype=object), 'lvl2': array([0, 1])} >>> index_dict {('lvl1', 'lvl2'): midx} >>> xr.DataArray([1.0, 2.0], dims="x", coords=coords_dict, indexes=index_dict) array([1., 2.]) Coordinates: x (x) object ('a', 0) ('b', 1) * lvl1 (x) object 'a' 'b' * lvl2 (x) int64 0 1 ``` ### 2.2.5 Immutable indexes Some underlying indexes might be mutable (e.g., a tree-based index structure that allows dynamic addition of data points) while other indexes like `pandas.Index` aren't. To keep things simple, it is probably better to continue considering all indexes in Xarray as immutable (as well as their corresponding coordinates, see [Section 2.4.1](#241-mutable-coordinates)). ### 2.3 Index access #### 2.3.1 Dataset/DataArray's `indexes` property The `indexes` property would allow easy access to all the indexes used in a Dataset/DataArray. It would return a `Dict[CoordinateName, XarrayIndex]` for easy index lookup from coordinate name. #### 2.3.2 Additional Dataset/DataArray properties or methods In some cases the format returned by the `indexes` property would not be the best (e.g, it may return duplicate index instances as values). For convenience, we could add one more property / method to get the indexes in the desired format if needed. ### 2.4 Propagate indexes through operations #### 2.4.1 Mutable coordinates Dataset/DataArray coordinates may be replaced (`__setitem__`) or dropped (`__delitem__`) in-place, which may invalidate some of the indexes. A drastic though probably reasonable solution in this case would be to simply drop all indexes bound to those replaced/dropped coordinates. For the case where a 1D basic coordinate that corresponds to a dimension is added/replaced, we could automatically generate a new index (see [Section 2.2.4](#224-implicit-indexes)). We must also ensure that coordinates having a bound index are immutable, e.g., still wrap them into `IndexVariable` objects (even though the `IndexVariable` class might change substantially after this refactoring). #### 2.4.2 New Dataset/DataArray with updated coordinates Xarray provides a variety of Dataset/DataArray operations affecting the coordinates and where simply dropping the index(es) is not desirable. For example: - multi-coordinate indexes could be reduced to single coordinate indexes - like in `.reset_index()` or `.sel()` applied on a subset of the levels of a `pandas.MultiIndex` and that internally call `MultiIndex.droplevel` and `MultiIndex.get_loc_level`, respectively - indexes may be indexed themselves - like `pandas.Index` implements `__getitem__()` - when indexing their corresponding coordinate(s), e.g., via `.sel()` or `.isel()`, those indexes should be indexed too - this might not be supported by all Xarray indexes, though - some indexes that can't be indexed could still be automatically (re)built in the new Dataset/DataArray - like for example building a new `KDTree` index from the selection of a subset of an initial collection of data points - this is not always desirable, though, as indexes may be expensive to build - a more reasonable option would be to explicitly re-build the index, e.g., using `.set_index()` - Dataset/DataArray operations involving alignment (see [Section 2.6](#26-using-indexes-for-data-alignment)) ### 2.5 Using indexes for data selection One main use of indexes is label-based data selection using the DataArray/Dataset `.sel()` method. This refactoring would introduce a number of API changes that could go through some depreciation cycles: - the keys of the mapping given to `indexers` (or the names of `indexer_kwargs`) would not correspond to only dimension names but could be the name of any coordinate that has an index - for a `pandas.MultiIndex`, if no dimension-coordinate is created by default (see [Section 2.2.4](#224-implicit-coordinates)), providing dict-like objects as indexers should be depreciated - there should be the possibility to provide additional options to the indexes that support specific selection features (e.g., Scikit-learn's `BallTree`'s `dualtree` query option to boost performance). - the best API is not trivial here, since `.sel()` may accept indexers passed to several indexes (which should still be supported for convenience and compatibility), and indexes may have similar options with different semantics - we could introduce a new parameter like `index_options: Dict[XarrayIndex, Dict[str, Any]]` to pass options grouped by index - the `method` and `tolerance` parameters are specific to `pandas.Index` and would not be supported by all indexes: probably best is to eventually pass those arguments as `index_options` - the list valid indexer types might be extended in order to support new ways of indexing data, e.g., unordered selection of all points within a given range - alternatively, we could reuse existing indexer types with different semantics depending on the index, e.g., using `slice(min, max, None)` for unordered range selection With the new data model proposed here, an ambiguous situation may occur when indexers are given for several coordinates that share the same dimension but not the same index, e.g., from the example in [Section 1](#1-data-model): ```python da.sel(x=..., y=..., lat=..., lon=...) ``` The easiest solution for this situation would be to raise an error. Alternatively, we could introduce a new parameter to specify how to combine the resulting integer indexers (i.e., union vs intersection), although this could already be achieved by chaining `.sel()` calls or combining `.sel()` with `.merge()` (it may or may not be straightforward). ### 2.6 Using indexes for data alignment Another main use if indexes is data alignment in various operations. Some considerations regarding alignment and flexible indexes: - support for alignment should probably be optional for an `XarrayIndex` subclass. - like `pandas.Index`, the index wrapper classes that support it should implement `.equals()`, `.union()` and/or `.intersection()` - support might be partial if that makes sense (outer, inner, left, right, exact...). - index equality might involve more than just the labels: for example a spatial index might be used to check if the coordinate system (CRS) is identical for two sets of coordinates - some indexes might implement inexact alignment, like in [#4489](https://github.com/pydata/xarray/pull/4489) or a `KDTree` index that selects nearest-neighbors within a given tolerance - alignment may be "multi-dimensional", i.e., the `KDTree` example above vs. dimensions aligned independently of each other - we need to decide what to do when one dimension has more than one index that supports alignment - we should probably raise unless the user explicitly specify which index to use for the alignment - we need to decide what to do when one dimension has one or more index(es) but none support alignment - either we raise or we fail back (silently) to alignment based on dimension size - for inexact alignment, the tolerance threshold might be given when building the index and/or when performing the alignment - are there cases where we want a specific index to perform alignment and another index to perform selection? - it would be tricky to support that unless we allow multiple indexes per coordinate - alternatively, underlying indexes could be picked internally in a "meta" index for one operation or another, although the risk is to eventually have to deal with an explosion of index wrapper classes with different meta indexes for each combination that we'd like to use. ### 2.7 Using indexes for other purposes Xarray also provides a number of Dataset/DataArray methods where indexes are used in various ways, e.g., - `resample` (`CFTimeIndex` and a `DatetimeIntervalIndex`) - `DatetimeAccessor` & `TimedeltaAccessor` properties (`CFTimeIndex` and a `DatetimeIntervalIndex`) - `interp` & `interpolate_na`, - with `IntervalIndex`, these become regridding operations. Should we support hooks for these operations? - `differentiate`, `integrate`, `polyfit` - raise an error if not a "simple" 1D index? - `pad` - `coarsen` has to make choices about output index labels. - `sortby` - `stack`/`unstack` - plotting - `plot.pcolormesh` "infers" interval breaks along axes, which are really inferred `bounds` for the appropriate indexes. - `plot.step` again uses `bounds`. In fact, we may even want `step` to be the default 1D plotting function if the axis has `bounds` attached. It would be reasonable to first restrict those methods to the indexes that are currently available in Xarray, and maybe extend the `XarrayIndex` API later upon request when the opportunity arises. Conversely, nothing should prevent implementing "non-standard" API in 3rd-party `XarrayIndex` subclasses that could be used in DataArray/Dataset extensions (accessors). For example, we might want to reuse a `KDTree` index to compute k-nearest neighbors (returning a DataArray/Dataset with a new dimension) and/or the distances to the nearest neighbors (returning a DataArray/Dataset with a new data variable). ### 2.8 Index encoding Indexes don't need to be directly serializable since we could (re)build them from their corresponding coordinate(s). However, it would be useful if some indexes could be encoded/decoded to/from a set of arrays that would allow optimized reconstruction and/or storage, e.g., - `pandas.MultiIndex` -> `index.levels` and `index.codes` - Scikit-learn's `KDTree` and `BallTree` that use an array-based representation of an immutable tree structure ## 3. Index representation in DataArray/Dataset's `repr` Since indexes would become 1st class citizen of Xarray's data model, they deserve their own section in Dataset/DataArray `repr` that could look like: ``` array([[5.4, 7.8], [6.2, 4.7]]) Coordinates: * lon (x, y) float64 10.2 15.2 12.6 17.6 * lat (x, y) float64 40.2 45.6 42.2 47.6 * x (x) float64 200.0 400.0 * y (y) float64 800.0 1e+03 Indexes: lat, lon x y ``` To keep the `repr` compact, we could: - consolidate entries that map to the same index object, and have a short inline repr for `XarrayIndex` object - collapse the index section by default in the HTML `repr` - maybe omit all trivial indexes for 1D coordinates that match the dimension name ## 4. `IndexVariable` `IndexVariable` is currently used to wrap a `pandas.Index` as a variable, which would not be relevant after this refactoring since it is aimed at decoupling indexes and variables. We'll probably need to move elsewhere some of the features implemented in `IndexVariable` to: - ensure that all coordinates with an index are immutable (see [Section 2.4.1](#241-mutable-coordinates)) - do not set values directly, do not (re)chunk (even though it may be already chunked), do not load, do not convert to sparse/dense, etc. - directly reuse index's data when that's possible - in the case of a `pandas.Index`, it makes little sense to have duplicate data (e.g., as a NumPy array) for its corresponding coordinate - convert a variable into a `pandas.Index` using `.to_index()` (for backwards compatibility). Other `IndexVariable` API like `level_names` and `get_level_variable()` would not useful anymore: it is specific to how we currently deal with `pandas.MultiIndex` and virtual "level" coordinates in Xarray. ## 5. Chunked coordinates and/or indexers We could take opportunity of this refactoring to better leverage chunked coordinates (and/or chunked indexers for data selection). There's two ways to enable it: A. support for chunked coordinates is left to the index B. support for chunked coordinates is index agnostic and is implemented in Xarray As an example for B, [xoak](https://github.com/ESM-VFC/xoak) supports building an index for each chunk, which is coupled with a two-step data selection process (cross-index queries + brute force "reduction" look-up). There is an example [here](https://xoak.readthedocs.io/en/latest/examples/dask_support.html). This may be tedious to generalize this to other kinds of operations, though. Xoak's Dask support is rather experimental, not super stable (it's quite hard to control index replication and data transfer between Dask workers with the default settings), and depends on whether indexes are thread-safe and/or serializable. Option A may be more reasonable for now. ## 6. Coordinate duck arrays Another opportunity of this refactoring is support for duck arrays as index coordinates. Decoupling coordinates and indexes would _de-facto_ enable it. However, support for duck arrays in index-based operations such as data selection or alignment would probably require some protocol extension, e.g., ```python class MyDuckArray: ... def _sel_(self, indexer): """Prepare the label-based indexer to conform to this coordinate array.""" ... return new_indexer ... ``` For example, a `pint` array would implement `_sel_` to perform indexer unit conversion or raise, warn, or just pass the indexer through if it has no units. xarray-2025.12.0/design_notes/grouper_objects.md000066400000000000000000000302001511464676000215350ustar00rootroot00000000000000# Grouper Objects **Author**: Deepak Cherian **Created**: Nov 21, 2023 ## Abstract I propose the addition of Grouper objects to Xarray's public API so that ```python Dataset.groupby(x=BinGrouper(bins=np.arange(10, 2))) ``` is identical to today's syntax: ```python Dataset.groupby_bins("x", bins=np.arange(10, 2)) ``` ## Motivation and scope Xarray's GroupBy API implements the split-apply-combine pattern (Wickham, 2011)[^1], which applies to a very large number of problems: histogramming, compositing, climatological averaging, resampling to a different time frequency, etc. The pattern abstracts the following pseudocode: ```python results = [] for element in unique_labels: subset = ds.sel(x=(ds.x == element)) # split # subset = ds.where(ds.x == element, drop=True) # alternative result = subset.mean() # apply results.append(result) xr.concat(results) # combine ``` to ```python ds.groupby("x").mean() # splits, applies, and combines ``` Efficient vectorized implementations of this pattern are implemented in numpy's [`ufunc.at`](https://numpy.org/doc/stable/reference/generated/numpy.ufunc.at.html), [`ufunc.reduceat`](https://numpy.org/doc/stable/reference/generated/numpy.ufunc.reduceat.html), [`numbagg.grouped`](https://github.com/numbagg/numbagg/blob/main/numbagg/grouped.py), [`numpy_groupies`](https://github.com/ml31415/numpy-groupies), and probably more. These vectorized implementations _all_ require, as input, an array of integer codes or labels that identify unique elements in the array being grouped over (`'x'` in the example above). ```python import numpy as np # array to reduce a = np.array([1, 1, 1, 1, 2]) # initial value for result out = np.zeros((3,), dtype=int) # integer codes labels = np.array([0, 0, 1, 2, 1]) # groupby-reduction np.add.at(out, labels, a) out # array([2, 3, 1]) ``` One can 'factorize' or construct such an array of integer codes using `pandas.factorize` or `numpy.unique(..., return_inverse=True)` for categorical arrays; `pandas.cut`, `pandas.qcut`, or `np.digitize` for discretizing continuous variables. In practice, since `GroupBy` objects exist, much of complexity in applying the groupby paradigm stems from appropriately factorizing or generating labels for the operation. Consider these two examples: 1. [Bins that vary in a dimension](https://flox.readthedocs.io/en/latest/user-stories/nD-bins.html) 2. [Overlapping groups](https://flox.readthedocs.io/en/latest/user-stories/overlaps.html) 3. [Rolling resampling](https://github.com/pydata/xarray/discussions/8361) Anecdotally, less experienced users commonly resort to the for-loopy implementation illustrated by the pseudocode above when the analysis at hand is not easily expressed using the API presented by Xarray's GroupBy object. Xarray's GroupBy API today abstracts away the split, apply, and combine stages but not the "factorize" stage. Grouper objects will close the gap. ## Usage and impact Grouper objects 1. Will abstract useful factorization algorithms, and 2. Present a natural way to extend GroupBy to grouping by multiple variables: `ds.groupby(x=BinGrouper(...), t=Resampler(freq="M", ...)).mean()`. In addition, Grouper objects provide a nice interface to add often-requested grouping functionality 1. A new `SpaceResampler` would allow specifying resampling spatial dimensions. ([issue](https://github.com/pydata/xarray/issues/4008)) 2. `RollingTimeResampler` would allow rolling-like functionality that understands timestamps ([issue](https://github.com/pydata/xarray/issues/3216)) 3. A `QuantileBinGrouper` to abstract away `pd.cut` ([issue](https://github.com/pydata/xarray/discussions/7110)) 4. A `SeasonGrouper` and `SeasonResampler` would abstract away common annoyances with such calculations today 1. Support seasons that span a year-end. 2. Only include seasons with complete data coverage. 3. Allow grouping over seasons of unequal length 4. See [this xcdat discussion](https://github.com/xCDAT/xcdat/issues/416) for a `SeasonGrouper` like functionality: 5. Return results with seasons in a sensible order 5. Weighted grouping ([issue](https://github.com/pydata/xarray/issues/3937)) 1. Once `IntervalIndex` like objects are supported, `Resampler` groupers can account for interval lengths when resampling. ## Backward Compatibility Xarray's existing grouping functionality will be exposed using two new Groupers: 1. `UniqueGrouper` which uses `pandas.factorize` 2. `BinGrouper` which uses `pandas.cut` 3. `TimeResampler` which mimics pandas' `.resample` Grouping by single variables will be unaffected so that `ds.groupby('x')` will be identical to `ds.groupby(x=UniqueGrouper())`. Similarly, `ds.groupby_bins('x', bins=np.arange(10, 2))` will be unchanged and identical to `ds.groupby(x=BinGrouper(bins=np.arange(10, 2)))`. ## Detailed description All Grouper objects will subclass from a Grouper object ```python import abc class Grouper(abc.ABC): @abc.abstractmethod def factorize(self, by: DataArray): raise NotImplementedError class CustomGrouper(Grouper): def factorize(self, by: DataArray): ... return codes, group_indices, unique_coord, full_index def weights(self, by: DataArray) -> DataArray: ... return weights ``` ### The `factorize` method Today, the `factorize` method takes as input the group variable and returns 4 variables (I propose to clean this up below): 1. `codes`: An array of same shape as the `group` with int dtype. NaNs in `group` are coded by `-1` and ignored later. 2. `group_indices` is a list of index location of `group` elements that belong to a single group. 3. `unique_coord` is (usually) a `pandas.Index` object of all unique `group` members present in `group`. 4. `full_index` is a `pandas.Index` of all `group` members. This is different from `unique_coord` for binning and resampling, where not all groups in the output may be represented in the input `group`. For grouping by a categorical variable e.g. `['a', 'b', 'a', 'c']`, `full_index` and `unique_coord` are identical. There is some redundancy here since `unique_coord` is always equal to or a subset of `full_index`. We can clean this up (see Implementation below). ### The `weights` method (?) The proposed `weights` method is optional and unimplemented today. Groupers with `weights` will allow composing `weighted` and `groupby` ([issue](https://github.com/pydata/xarray/issues/3937)). The `weights` method should return an appropriate array of weights such that the following property is satisfied ```python gb_sum = ds.groupby(by).sum() weights = CustomGrouper.weights(by) weighted_sum = xr.dot(ds, weights) assert_identical(gb_sum, weighted_sum) ``` For example, the boolean weights for `group=np.array(['a', 'b', 'c', 'a', 'a'])` should be ``` [[1, 0, 0, 1, 1], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]] ``` This is the boolean "summarization matrix" referred to in the classic Iverson (1980, Section 4.3)[^2] and "nub sieve" in [various APLs](https://aplwiki.com/wiki/Nub_Sieve). > [!NOTE] > We can always construct `weights` automatically using `group_indices` from `factorize`, so this is not a required method. For a rolling resampling, windowed weights are possible ``` [[0.5, 1, 0.5, 0, 0], [0, 0.25, 1, 1, 0], [0, 0, 0, 1, 1]] ``` ### The `preferred_chunks` method (?) Rechunking support is another optional extension point. In `flox` I experimented some with automatically rechunking to make a groupby more parallel-friendly ([example 1](https://flox.readthedocs.io/en/latest/generated/flox.rechunk_for_blockwise.html), [example 2](https://flox.readthedocs.io/en/latest/generated/flox.rechunk_for_cohorts.html)). A great example is for resampling-style groupby reductions, for which `codes` might look like ``` 0001|11122|3333 ``` where `|` represents chunk boundaries. A simple rechunking to ``` 000|111122|3333 ``` would make this resampling reduction an embarrassingly parallel blockwise problem. Similarly consider monthly-mean climatologies for which the month numbers might be ``` 1 2 3 4 5 | 6 7 8 9 10 | 11 12 1 2 3 | 4 5 6 7 8 | 9 10 11 12 | ``` A slight rechunking to ``` 1 2 3 4 | 5 6 7 8 | 9 10 11 12 | 1 2 3 4 | 5 6 7 8 | 9 10 11 12 | ``` allows us to reduce `1, 2, 3, 4` separately from `5,6,7,8` and `9, 10, 11, 12` while still being parallel friendly (see the [flox documentation](https://flox.readthedocs.io/en/latest/implementation.html#method-cohorts) for more). We could attempt to detect these patterns, or we could just have the Grouper take as input `chunks` and return a tuple of "nice" chunk sizes to rechunk to. ```python def preferred_chunks(self, chunks: ChunksTuple) -> ChunksTuple: pass ``` For monthly means, since the period of repetition of labels is 12, the Grouper might choose possible chunk sizes of `((2,),(3,),(4,),(6,))`. For resampling, the Grouper could choose to resample to a multiple or an even fraction of the resampling frequency. ## Related work Pandas has [Grouper objects](https://pandas.pydata.org/docs/reference/api/pandas.Grouper.html#pandas-grouper) that represent the GroupBy instruction. However, these objects do not appear to be extension points, unlike the Grouper objects proposed here. Instead, Pandas' `ExtensionArray` has a [`factorize`](https://pandas.pydata.org/docs/reference/api/pandas.api.extensions.ExtensionArray.factorize.html) method. Composing rolling with time resampling is a common workload: 1. Polars has [`group_by_dynamic`](https://pola-rs.github.io/polars/py-polars/html/reference/dataframe/api/polars.DataFrame.group_by_dynamic.html) which appears to be like the proposed `RollingResampler`. 2. scikit-downscale provides [`PaddedDOYGrouper`](https://github.com/pangeo-data/scikit-downscale/blob/e16944a32b44f774980fa953ea18e29a628c71b8/skdownscale/pointwise_models/groupers.py#L19) ## Implementation Proposal 1. Get rid of `squeeze` [issue](https://github.com/pydata/xarray/issues/2157): [PR](https://github.com/pydata/xarray/pull/8506) 2. Merge existing two class implementation to a single Grouper class 1. This design was implemented in [this PR](https://github.com/pydata/xarray/pull/7206) to account for some annoying data dependencies. 2. See [PR](https://github.com/pydata/xarray/pull/8509) 3. Clean up what's returned by `factorize` methods. 1. A solution here might be to have `group_indices: Mapping[int, Sequence[int]]` be a mapping from group index in `full_index` to a sequence of integers. 2. Return a `namedtuple` or `dataclass` from existing Grouper factorize methods to facilitate API changes in the future. 4. Figure out what to pass to `factorize` 1. Xarray eagerly reshapes nD variables to 1D. This is an implementation detail we need not expose. 2. When grouping by an unindexed variable Xarray passes a `_DummyGroup` object. This seems like something we don't want in the public interface. We could special case "internal" Groupers to preserve the optimizations in `UniqueGrouper`. 5. Grouper objects will exposed under the `xr.groupers` Namespace. At first these will include `UniqueGrouper`, `BinGrouper`, and `TimeResampler`. ## Alternatives One major design choice made here was to adopt the syntax `ds.groupby(x=BinGrouper(...))` instead of `ds.groupby(BinGrouper('x', ...))`. This allows reuse of Grouper objects, example ```python grouper = BinGrouper(...) ds.groupby(x=grouper, y=grouper) ``` but requires that all variables being grouped by (`x` and `y` above) are present in Dataset `ds`. This does not seem like a bad requirement. Importantly `Grouper` instances will be copied internally so that they can safely cache state that might be shared between `factorize` and `weights`. Today, it is possible to `ds.groupby(DataArray, ...)`. This syntax will still be supported. ## Discussion This proposal builds on these discussions: 1. https://github.com/xarray-contrib/flox/issues/191#issuecomment-1328898836 2. https://github.com/pydata/xarray/issues/6610 ## Copyright This document has been placed in the public domain. ## References and footnotes [^1]: Wickham, H. (2011). The split-apply-combine strategy for data analysis. https://vita.had.co.nz/papers/plyr.html [^2]: Iverson, K.E. (1980). Notation as a tool of thought. Commun. ACM 23, 8 (Aug. 1980), 444โ€“465. https://doi.org/10.1145/358896.358899 xarray-2025.12.0/design_notes/named_array_design_doc.md000066400000000000000000000667611511464676000230260ustar00rootroot00000000000000# named-array Design Document ## Abstract Despite the wealth of scientific libraries in the Python ecosystem, there is a gap for a lightweight, efficient array structure with named dimensions that can provide convenient broadcasting and indexing. Existing solutions like Xarray's Variable, [Pytorch Named Tensor](https://github.com/pytorch/pytorch/issues/60832), [Levanter](https://crfm.stanford.edu/2023/06/16/levanter-1_0-release.html), and [Larray](https://larray.readthedocs.io/en/stable/tutorial/getting_started.html) have their own strengths and weaknesses. Xarray's Variable is an efficient data structure, but it depends on the relatively heavy-weight library Pandas, which limits its use in other projects. Pytorch Named Tensor offers named dimensions, but it lacks support for many operations, making it less user-friendly. Levanter is a powerful tool with a named tensor module (Haliax) that makes deep learning code easier to read, understand, and write, but it is not as lightweight or generic as desired. Larry offers labeled N-dimensional arrays, but it may not provide the level of seamless interoperability with other scientific Python libraries that some users need. named-array aims to solve these issues by exposing the core functionality of Xarray's Variable class as a standalone package. ## Motivation and Scope The Python ecosystem boasts a wealth of scientific libraries that enable efficient computations on large, multi-dimensional arrays. Libraries like PyTorch, Xarray, and NumPy have revolutionized scientific computing by offering robust data structures for array manipulations. Despite this wealth of tools, a gap exists in the Python landscape for a lightweight, efficient array structure with named dimensions that can provide convenient broadcasting and indexing. Xarray internally maintains a data structure that meets this need, referred to as [`xarray.Variable`](https://docs.xarray.dev/en/latest/generated/xarray.Variable.html) . However, Xarray's dependency on Pandas, a relatively heavy-weight library, restricts other projects from leveraging this efficient data structure (, , ). We propose the creation of a standalone Python package, "named-array". This package is envisioned to be a version of the `xarray.Variable` data structure, cleanly separated from the heavier dependencies of Xarray. named-array will provide a lightweight, user-friendly array-like data structure with named dimensions, facilitating convenient indexing and broadcasting. The package will use existing scientific Python community standards such as established array protocols and the new [Python array API standard](https://data-apis.org/array-api/latest), allowing users to wrap multiple duck-array objects, including, but not limited to, NumPy, Dask, Sparse, Pint, CuPy, and Pytorch. The development of named-array is projected to meet a key community need and expected to broaden Xarray's user base. By making the core `xarray.Variable` more accessible, we anticipate an increase in contributors and a reduction in the developer burden on current Xarray maintainers. ### Goals 1. **Simple and minimal**: named-array will expose Xarray's [Variable class](https://docs.xarray.dev/en/stable/internals/variable-objects.html) as a standalone object (`NamedArray`) with named axes (dimensions) and arbitrary metadata (attributes) but without coordinate labels. This will make it a lightweight, efficient array data structure that allows convenient broadcasting and indexing. 2. **Interoperability**: named-array will follow established scientific Python community standards and in doing so, will allow it to wrap multiple duck-array objects, including but not limited to, NumPy, Dask, Sparse, Pint, CuPy, and Pytorch. 3. **Community Engagement**: By making the core `xarray.Variable` more accessible, we open the door to increased adoption of this fundamental data structure. As such, we hope to see an increase in contributors and reduction in the developer burden on current Xarray maintainers. ### Non-Goals 1. **Extensive Data Analysis**: named-array will not provide extensive data analysis features like statistical functions, data cleaning, or visualization. Its primary focus is on providing a data structure that allows users to use dimension names for descriptive array manipulations. 2. **Support for I/O**: named-array will not bundle file reading functions. Instead users will be expected to handle I/O and then wrap those arrays with the new named-array data structure. ## Backward Compatibility The creation of named-array is intended to separate the `xarray.Variable` from Xarray into a standalone package. This allows it to be used independently, without the need for Xarray's dependencies, like Pandas. This separation has implications for backward compatibility. Since the new named-array is envisioned to contain the core features of Xarray's variable, existing code using Variable from Xarray should be able to switch to named-array with minimal changes. However, there are several potential issues related to backward compatibility: - **API Changes**: as the Variable is decoupled from Xarray and moved into named-array, some changes to the API may be necessary. These changes might include differences in function signature, etc. These changes could break existing code that relies on the current API and associated utility functions (e.g. `as_variable()`). The `xarray.Variable` object will subclass `NamedArray`, and provide the existing interface for compatibility. ## Detailed Description named-array aims to provide a lightweight, efficient array structure with named dimensions, or axes, that enables convenient broadcasting and indexing. The primary component of named-array is a standalone version of the xarray.Variable data structure, which was previously a part of the Xarray library. The xarray.Variable data structure in named-array will maintain the core features of its counterpart in Xarray, including: - **Named Axes (Dimensions)**: Each axis of the array can be given a name, providing a descriptive and intuitive way to reference the dimensions of the array. - **Arbitrary Metadata (Attributes)**: named-array will support the attachment of arbitrary metadata to arrays as a dict, providing a mechanism to store additional information about the data that the array represents. - **Convenient Broadcasting and Indexing**: With named dimensions, broadcasting and indexing operations become more intuitive and less error-prone. The named-array package is designed to be interoperable with other scientific Python libraries. It will follow established scientific Python community standards and use standard array protocols, as well as the new data-apis standard. This allows named-array to wrap multiple duck-array objects, including, but not limited to, NumPy, Dask, Sparse, Pint, CuPy, and Pytorch. ## Implementation - **Decoupling**: making `variable.py` agnostic to Xarray internals by decoupling it from the rest of the library. This will make the code more modular and easier to maintain. However, this will also make the code more complex, as we will need to define a clear interface for how the functionality in `variable.py` interacts with the rest of the library, particularly the ExplicitlyIndexed subclasses used to enable lazy indexing of data on disk. - **Move Xarray's internal lazy indexing classes to follow standard Array Protocols**: moving the lazy indexing classes like `ExplicitlyIndexed` to use standard array protocols will be a key step in decoupling. It will also potentially improve interoperability with other libraries that use these protocols, and prepare these classes [for eventual movement out](https://github.com/pydata/xarray/issues/5081) of the Xarray code base. However, this will also require significant changes to the code, and we will need to ensure that all existing functionality is preserved. - Use [https://data-apis.org/array-api-compat/](https://data-apis.org/array-api-compat/) to handle compatibility issues? - **Leave lazy indexing classes in Xarray for now** - **Preserve support for Dask collection protocols**: named-array will preserve existing support for the dask collections protocol namely the **dask\_\*\*\*** methods - **Preserve support for ChunkManagerEntrypoint?** Opening variables backed by dask vs cubed arrays currently is [handled within Variable.chunk](https://github.com/pydata/xarray/blob/92c8b33eb464b09d6f8277265b16cae039ab57ee/xarray/core/variable.py#L1272C15-L1272C15). If we are preserving dask support it would be nice to preserve general chunked array type support, but this currently requires an entrypoint. ### Plan 1. Create a new baseclass for `xarray.Variable` to its own module e.g. `xarray.core.base_variable` 2. Remove all imports of internal Xarray classes and utils from `base_variable.py`. `base_variable.Variable` should not depend on anything in xarray.core - Will require moving the lazy indexing classes (subclasses of ExplicitlyIndexed) to be standards compliant containers.` - an array-api compliant container that provides **array_namespace**` - Support `.oindex` and `.vindex` for explicit indexing - Potentially implement this by introducing a new compliant wrapper object? - Delete the `NON_NUMPY_SUPPORTED_ARRAY_TYPES` variable which special-cases ExplicitlyIndexed and `pd.Index.` - `ExplicitlyIndexed` class and subclasses should provide `.oindex` and `.vindex` for indexing by `Variable.__getitem__.`: `oindex` and `vindex` were proposed in [NEP21](https://numpy.org/neps/nep-0021-advanced-indexing.html), but have not been implemented yet - Delete the ExplicitIndexer objects (`BasicIndexer`, `VectorizedIndexer`, `OuterIndexer`) - Remove explicit support for `pd.Index`. When provided with a `pd.Index` object, Variable will coerce to an array using `np.array(pd.Index)`. For Xarray's purposes, Xarray can use `as_variable` to explicitly wrap these in PandasIndexingAdapter and pass them to `Variable.__init__`. 3. Define a minimal variable interface that the rest of Xarray can use: 1. `dims`: tuple of dimension names 2. `data`: numpy/dask/duck arrays` 3. `attrs``: dictionary of attributes 4. Implement basic functions & methods for manipulating these objects. These methods will be a cleaned-up subset (for now) of functionality on xarray.Variable, with adaptations inspired by the [Python array API](https://data-apis.org/array-api/2022.12/API_specification/index.html). 5. Existing Variable structures 1. Keep Variable object which subclasses the new structure that adds the `.encoding` attribute and potentially other methods needed for easy refactoring. 2. IndexVariable will remain in xarray.core.variable and subclass the new named-array data structure pending future deletion. 6. Docstrings and user-facing APIs will need to be updated to reflect the changed methods on Variable objects. Further implementation details are in Appendix: [Implementation Details](#appendix-implementation-details). ## Plan for decoupling lazy indexing functionality from NamedArray Today's implementation Xarray's lazy indexing functionality uses three private objects: `*Indexer`, `*IndexingAdapter`, `*Array`. These objects are needed for two reason: 1. We need to translate from Xarray (NamedArray) indexing rules to bare array indexing rules. - `*Indexer` objects track the type of indexing - basic, orthogonal, vectorized 2. Not all arrays support the same indexing rules, so we need `*Indexing` adapters 1. Indexing Adapters today implement `__getitem__` and use type of `*Indexer` object to do appropriate conversions. 3. We also want to support lazy indexing of on-disk arrays. 1. These again support different types of indexing, so we have `explicit_indexing_adapter` that understands `*Indexer` objects. ### Goals 1. We would like to keep the lazy indexing array objects, and backend array objects within Xarray. Thus NamedArray cannot treat these objects specially. 2. A key source of confusion (and coupling) is that both lazy indexing arrays and indexing adapters, both handle Indexer objects, and both subclass `ExplicitlyIndexedNDArrayMixin`. These are however conceptually different. ### Proposal 1. The `NumpyIndexingAdapter`, `DaskIndexingAdapter`, and `ArrayApiIndexingAdapter` classes will need to migrate to Named Array project since we will want to support indexing of numpy, dask, and array-API arrays appropriately. 2. The `as_indexable` function which wraps an array with the appropriate adapter will also migrate over to named array. 3. Lazy indexing arrays will implement `__getitem__` for basic indexing, `.oindex` for orthogonal indexing, and `.vindex` for vectorized indexing. 4. IndexingAdapter classes will similarly implement `__getitem__`, `oindex`, and `vindex`. 5. `NamedArray.__getitem__` (and `__setitem__`) will still use `*Indexer` objects internally (for e.g. in `NamedArray._broadcast_indexes`), but use `.oindex`, `.vindex` on the underlying indexing adapters. 6. We will move the `*Indexer` and `*IndexingAdapter` classes to Named Array. These will be considered private in the long-term. 7. `as_indexable` will no longer special case `ExplicitlyIndexed` objects (we can special case a new `IndexingAdapter` mixin class that will be private to NamedArray). To handle Xarray's lazy indexing arrays, we will introduce a new `ExplicitIndexingAdapter` which will wrap any array with any of `.oindex` of `.vindex` implemented. 1. This will be the last case in the if-chain that is, we will try to wrap with all other `IndexingAdapter` objects before using `ExplicitIndexingAdapter` as a fallback. This Adapter will be used for the lazy indexing arrays, and backend arrays. 2. As with other indexing adapters (point 4 above), this `ExplicitIndexingAdapter` will only implement `__getitem__` and will understand `*Indexer` objects. 8. For backwards compatibility with external backends, we will have to gracefully deprecate `indexing.explicit_indexing_adapter` which translates from Xarray's indexing rules to the indexing supported by the backend. 1. We could split `explicit_indexing_adapter` in to 3: - `basic_indexing_adapter`, `outer_indexing_adapter` and `vectorized_indexing_adapter` for public use. 2. Implement fall back `.oindex`, `.vindex` properties on `BackendArray` base class. These will simply rewrap the `key` tuple with the appropriate `*Indexer` object, and pass it on to `__getitem__` or `__setitem__`. These methods will also raise DeprecationWarning so that external backends will know to migrate to `.oindex`, and `.vindex` over the next year. THe most uncertain piece here is maintaining backward compatibility with external backends. We should first migrate a single internal backend, and test out the proposed approach. ## Project Timeline and Milestones We have identified the following milestones for the completion of this project: 1. **Write and publish a design document**: this document will explain the purpose of named-array, the intended audience, and the features it will provide. It will also describe the architecture of named-array and how it will be implemented. This will ensure early community awareness and engagement in the project to promote subsequent uptake. 2. **Refactor `variable.py` to `base_variable.py`** and remove internal Xarray imports. 3. **Break out the package and create continuous integration infrastructure**: this will entail breaking out the named-array project into a Python package and creating a continuous integration (CI) system. This will help to modularize the code and make it easier to manage. Building a CI system will help ensure that codebase changes do not break existing functionality. 4. Incrementally add new functions & methods to the new package, ported from xarray. This will start to make named-array useful on its own. 5. Refactor the existing Xarray codebase to rely on the newly created package (named-array): This will help to demonstrate the usefulness of the new package, and also provide an example for others who may want to use it. 6. Expand tests, add documentation, and write a blog post: expanding the test suite will help to ensure that the code is reliable and that changes do not introduce bugs. Adding documentation will make it easier for others to understand and use the project. 7. Finally, we will write a series of blog posts on [xarray.dev](https://xarray.dev/) to promote the project and attract more contributors. - Toward the end of the process, write a few blog posts that demonstrate the use of the newly available data structure - pick the same example applications used by other implementations/applications (e.g. Pytorch, sklearn, and Levanter) to show how it can work. ## Related Work 1. [GitHub - deepmind/graphcast](https://github.com/deepmind/graphcast) 2. [Getting Started โ€” LArray 0.34 documentation](https://larray.readthedocs.io/en/stable/tutorial/getting_started.html) 3. [Levanter โ€” Legible, Scalable, Reproducible Foundation Models with JAX](https://crfm.stanford.edu/2023/06/16/levanter-1_0-release.html) 4. [google/xarray-tensorstore](https://github.com/google/xarray-tensorstore) 5. [State of Torch Named Tensors ยท Issue #60832 ยท pytorch/pytorch ยท GitHub](https://github.com/pytorch/pytorch/issues/60832) - Incomplete support: Many primitive operations result in errors, making it difficult to use NamedTensors in Practice. Users often have to resort to removing the names from tensors to avoid these errors. - Lack of active development: the development of the NamedTensor feature in PyTorch is not currently active due a lack of bandwidth for resolving ambiguities in the design. - Usability issues: the current form of NamedTensor is not user-friendly and sometimes raises errors, making it difficult for users to incorporate NamedTensors into their workflows. 6. [Scikit-learn Enhancement Proposals (SLEPs) 8, 12, 14](https://github.com/scikit-learn/enhancement_proposals/pull/18) - Some of the key points and limitations discussed in these proposals are: - Inconsistency in feature name handling: Scikit-learn currently lacks a consistent and comprehensive way to handle and propagate feature names through its pipelines and estimators ([SLEP 8](https://github.com/scikit-learn/enhancement_proposals/pull/18),[SLEP 12](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep012/proposal.html)). - Memory intensive for large feature sets: storing and propagating feature names can be memory intensive, particularly in cases where the entire "dictionary" becomes the features, such as in NLP use cases ([SLEP 8](https://github.com/scikit-learn/enhancement_proposals/pull/18),[GitHub issue #35](https://github.com/scikit-learn/enhancement_proposals/issues/35)) - Sparse matrices: sparse data structures present a challenge for feature name propagation. For instance, the sparse data structure functionality in Pandas 1.0 only supports converting directly to the coordinate format (COO), which can be an issue with transformers such as the OneHotEncoder.transform that has been optimized to construct a CSR matrix ([SLEP 14](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep014/proposal.html)) - New Data structures: the introduction of new data structures, such as "InputArray" or "DataArray" could lead to more burden for third-party estimator maintainers and increase the learning curve for users. Xarray's "DataArray" is mentioned as a potential alternative, but the proposal mentions that the conversion from a Pandas dataframe to a Dataset is not lossless ([SLEP 12](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep012/proposal.html),[SLEP 14](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep014/proposal.html),[GitHub issue #35](https://github.com/scikit-learn/enhancement_proposals/issues/35)). - Dependency on other libraries: solutions that involve using Xarray and/or Pandas to handle feature names come with the challenge of managing dependencies. While a soft dependency approach is suggested, this means users would be able to have/enable the feature only if they have the dependency installed. Xarra-lite's integration with other scientific Python libraries could potentially help with this issue ([GitHub issue #35](https://github.com/scikit-learn/enhancement_proposals/issues/35)). ## References and Previous Discussion - [[Proposal] Expose Variable without Pandas dependency ยท Issue #3981 ยท pydata/xarray ยท GitHub](https://github.com/pydata/xarray/issues/3981) - [https://github.com/pydata/xarray/issues/3981#issuecomment-985051449](https://github.com/pydata/xarray/issues/3981#issuecomment-985051449) - [Lazy indexing arrays as a stand-alone package ยท Issue #5081 ยท pydata/xarray ยท GitHub](https://github.com/pydata/xarray/issues/5081) ### Appendix: Engagement with the Community We plan to publicize this document on : - [x] `Xarray dev call` - [ ] `Scientific Python discourse` - [ ] `Xarray GitHub` - [ ] `Twitter (X)` - [ ] `Respond to NamedTensor and Scikit-Learn issues?` - [ ] `Pangeo Discourse` - [ ] `Numpy, SciPy email lists?` - [ ] `Xarray blog` Additionally, We plan on writing a series of blog posts to effectively showcase the implementation and potential of the newly available functionality. To illustrate this, we will use the same example applications as other established libraries (such as Pytorch, sklearn), providing practical demonstrations of how these new data structures can be leveraged. ### Appendix: API Surface Questions: 1. Document Xarray indexing rules 2. Document use of .oindex and .vindex protocols 3. Do we use `.mean` and `.nanmean` or `.mean(skipna=...)`? - Default behavior in named-array should mirror NumPy / the array API standard, not pandas. - nanmean is not (yet) in the [array API](https://github.com/pydata/xarray/pull/7424#issuecomment-1373979208). There are a handful of other key functions (e.g., median) that are are also missing. I think that should be OK, as long as what we support is a strict superset of the array API. 4. What methods need to be exposed on Variable? - `Variable.concat` classmethod: create two functions, one as the equivalent of `np.stack` and other for `np.concat` - `.rolling_window` and `.coarsen_reshape` ? - `named-array.apply_ufunc`: used in astype, clip, quantile, isnull, notnull` #### methods to be preserved from xarray.Variable ```python # Sorting Variable.argsort Variable.searchsorted # NaN handling Variable.fillna Variable.isnull Variable.notnull # Lazy data handling Variable.chunk # Could instead have accessor interface and recommend users use `Variable.dask.chunk` and `Variable.cubed.chunk`? Variable.to_numpy() Variable.as_numpy() # Xarray-specific Variable.get_axis_num Variable.isel Variable.to_dict # Reductions Variable.reduce Variable.all Variable.any Variable.argmax Variable.argmin Variable.count Variable.max Variable.mean Variable.median Variable.min Variable.prod Variable.quantile Variable.std Variable.sum Variable.var # Accumulate Variable.cumprod Variable.cumsum # numpy-like Methods Variable.astype Variable.copy Variable.clip Variable.round Variable.item Variable.where # Reordering/Reshaping Variable.squeeze Variable.pad Variable.roll Variable.shift ``` #### methods to be renamed from xarray.Variable ```python # Xarray-specific Variable.concat # create two functions, one as the equivalent of `np.stack` and other for `np.concat` # Given how niche these are, these would be better as functions than methods. # We could also keep these in Xarray, at least for now. If we don't think people will use functionality outside of Xarray it probably is not worth the trouble of porting it (including documentation, etc). Variable.coarsen # This should probably be called something like coarsen_reduce. Variable.coarsen_reshape Variable.rolling_window Variable.set_dims # split this into broadcast_to and expand_dims # Reordering/Reshaping Variable.stack # To avoid confusion with np.stack, let's call this stack_dims. Variable.transpose # Could consider calling this permute_dims, like the [array API standard](https://data-apis.org/array-api/2022.12/API_specification/manipulation_functions.html#objects-in-api) Variable.unstack # Likewise, maybe call this unstack_dims? ``` #### methods to be removed from xarray.Variable ```python # Testing Variable.broadcast_equals Variable.equals Variable.identical Variable.no_conflicts # Lazy data handling Variable.compute # We can probably omit this method for now, too, given that dask.compute() uses a protocol. The other concern is that different array libraries have different notions of "compute" and this one is rather Dask specific, including conversion from Dask to NumPy arrays. For example, in JAX every operation executes eagerly, but in a non-blocking fashion, and you need to call jax.block_until_ready() to ensure computation is finished. Variable.load # Could remove? compute vs load is a common source of confusion. # Xarray-specific Variable.to_index Variable.to_index_variable Variable.to_variable Variable.to_base_variable Variable.to_coord Variable.rank # Uses bottleneck. Delete? Could use https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rankdata.html instead # numpy-like Methods Variable.conjugate # .conj is enough Variable.__array_wrap__ # This is a very old NumPy protocol for duck arrays. We don't need it now that we have `__array_ufunc__` and `__array_function__` # Encoding Variable.reset_encoding ``` #### Attributes to be preserved from xarray.Variable ```python # Properties Variable.attrs Variable.chunks Variable.data Variable.dims Variable.dtype Variable.nbytes Variable.ndim Variable.shape Variable.size Variable.sizes Variable.T Variable.real Variable.imag Variable.conj ``` #### Attributes to be renamed from xarray.Variable ```python ``` #### Attributes to be removed from xarray.Variable ```python Variable.values # Probably also remove -- this is a legacy from before Xarray supported dask arrays. ".data" is enough. # Encoding Variable.encoding ``` ### Appendix: Implementation Details - Merge in VariableArithmetic's parent classes: AbstractArray, NdimSizeLenMixin with the new data structure.. ```python class VariableArithmetic( ImplementsArrayReduce, IncludeReduceMethods, IncludeCumMethods, IncludeNumpySameMethods, SupportsArithmetic, VariableOpsMixin, ): __slots__ = () # prioritize our operations over those of numpy.ndarray (priority=0) __array_priority__ = 50 ``` - Move over `_typed_ops.VariableOpsMixin` - Build a list of utility functions used elsewhere : Which of these should become public API? - `broadcast_variables`: `dataset.py`, `dataarray.py`,`missing.py` - This could be just called "broadcast" in named-array. - `Variable._getitem_with_mask` : `alignment.py` - keep this method/function as private and inside Xarray. - The Variable constructor will need to be rewritten to no longer accept tuples, encodings, etc. These details should be handled at the Xarray data structure level. - What happens to `duck_array_ops?` - What about Variable.chunk and "chunk managers"? - Could this functionality be left in Xarray proper for now? Alternative array types like JAX also have some notion of "chunks" for parallel arrays, but the details differ in a number of ways from the Dask/Cubed. - Perhaps variable.chunk/load methods should become functions defined in xarray that convert Variable objects. This is easy so long as xarray can reach in and replace .data - Utility functions like `as_variable` should be moved out of `base_variable.py` so they can convert BaseVariable objects to/from DataArray or Dataset containing explicitly indexed arrays. xarray-2025.12.0/doc/000077500000000000000000000000001511464676000141105ustar00rootroot00000000000000xarray-2025.12.0/doc/Makefile000066400000000000000000000204271511464676000155550ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXATUOBUILD = sphinx-autobuild PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " rtdhtml Build html using same settings used on ReadtheDocs" @echo " livehtml Make standalone HTML files and rebuild the documentation when a change is detected. Also includes a livereload enabled web server" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " epub3 to make an epub3" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @echo " dummy to check syntax errors of document sources" .PHONY: clean clean: rm -rf $(BUILDDIR)/* rm -rf generated/* rm -rf auto_gallery/ .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: rtdhtml rtdhtml: $(SPHINXBUILD) -T -j auto -E -W --keep-going -b html -d $(BUILDDIR)/doctrees -D language=en . $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: livehtml livehtml: # @echo "$(SPHINXATUOBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html" $(SPHINXATUOBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: html-noplot html-noplot: $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/xarray.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/xarray.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/xarray" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/xarray" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." .PHONY: dummy dummy: $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy @echo @echo "Build finished. Dummy builder generates no files." xarray-2025.12.0/doc/README.rst000066400000000000000000000002751511464676000156030ustar00rootroot00000000000000:orphan: xarray ------ You can find information about building the docs at our `Contributing page `_. xarray-2025.12.0/doc/_static/000077500000000000000000000000001511464676000155365ustar00rootroot00000000000000xarray-2025.12.0/doc/_static/.gitignore000066400000000000000000000000561511464676000175270ustar00rootroot00000000000000examples*.png *.log *.pdf *.fbd_latexmk *.aux xarray-2025.12.0/doc/_static/advanced_selection_interpolation.svg000066400000000000000000000747211511464676000250530ustar00rootroot00000000000000 image/svg+xml y x z y x z Advanced indexing Advanced interpolation xarray-2025.12.0/doc/_static/ci.png000066400000000000000000002312601511464676000166430ustar00rootroot00000000000000‰PNG  IHDRvx‰๓sBITแOเtEXtSoftwaregnome-screenshot๏ฟ>-tEXtCreation TimeThu 09 Mar 2023 12:10:35 PM EST๙3K2 IDATxฺ์ฝ}PSYžฟ฿ฺฺํฆฺูšฎgๆkk\*5™ฦ/ฮPญ%ฅ๖ฐ๔ืขใvรF๛"FFB+tD์ (ฺQaฑtm5 t 4าFyA สC ฆณาฮน๗&น< >ต๏W}ส’ไ>œsr“œWฮ9Ÿ๛wณ‰ฟC  Xลล(@ฑŠŠP,€b€b  Xลล(@ฑ@ฑŠ๕40hKลaL‚jะ-ฑ’ฮ๎“ฆึŒ&บจ—‘[ŒUงH˜„ ฃ 9†6_&Žฬo0ฝxต›ชK—0ฑg๛ํ?˜"I*์~1_˜ั๓๑ก๊q\ขผ@5ฯฉห ๖^บฬa[ิ/;}q่r๖ศ>QฅC‚ว'.†ฏ๐^บ|cฺu๚ื ลFบM@FหcWไv\ๆฎšxฒyp.—<ฎI\MjQ8„7PฌW‡แŠ$ )๏9ื;ลzV{4gฮinNs๕–คHNj_ล๊ซ+๛Šๅ/’b/หผศ[rปส0 €bฝ"ฬ๔๎aBีใ3"™ศ\ญํwจฉทฒ()>.D"•$*‹[๛mห4Z[”ฝS*–ฤE*ฉํktจXM9!’4UWวyeZd$#Ž”%ๅ7› ฺฒœฉ4„ฝม:ฆaาี•ค๏•I"˜ศธุC๙็oš?–ูษu)•=•'S$’ธcญ|฿Z{!?i)$ฒ3)%ทข๓กร๊N๖VgN"G&eˆ=zฎaุไ๚คู“–๗ทœUฦr•=|ฎeฬ4t.=‘m™=J•y0ชณ@&Ž)i้ญห–หhyคiว*๛MS=š“ๆŠ็6๊fฌuกY5‡q!อh$Eš๎ิ๐5GฤEศQตœ๔{Hห็J’ำ1I):“น;2gX’œ‘ผ@gๆพ@ฆ:bืiล7;TวSh!#โb3ฯต˜;`-9qโE-3ถ\Gว4Xx4-’!อB*›’RPืkด*V‚ชงท2?)†ฅ’˜ดŒ3\หุ๛‰!ฬผ/=2[มรีบฎŠŒd๖E™’^ึ1)p9๗ฏ7'ŸๅชRUAฬฆลU,ใ ีฉฬcง4ท็>~eZฺA –+‹Š O(หฺ/ุGัcAKWหซ/B•ขXOZŒGภโ(–ฝB๑^FrฒฑSฃŒŽ4ยL๕WๆgลJูO†II9Sถ_฿๒Ž>ฅ]พ1็|&Œ6œอIˆ็฿งIJตๅ3อe์&า ็๋รว๕W๒ไกkWญ๐๖๒Y๋พ,๓r๙ีฃ|‡๎xข}เrFh€Ÿืr?ฐT ๙๐๊ป(ฃืr๏U๋"าส๕6Šต<(8˜พ@พ๑ๆA\Nิี(BWณฟžD$ๆ5Œ<*–(ผL฿ญ’ฏ๓๓Zแ็พผดห่^๙็ŸKๅดkถฅ–ถ ไmจ!๗รˆ€5"๒ฌ—hm “!|๖ฦ‘ :๎งhŸjKc]Zต๛โลTณ†ปใช๙ฐŽhI์กฌ„ใ๊ส๚ึฮ1ฎIคฟžฎj$ลhฉ>—"eB’ีฝvฟ.๏UE*ัิท6Tซำใq|Qห”ซ“ฒถ™จHWiuc]cQฌ„‘ศJษTท่ ใบึผฝTf:Y ้<“$ŽLJPไWvŽ๗œWฤ‰%))‡ูี=รcฃ้Yาซ 6Š5;=[ฑ“ ษฌ่ํ๋žšฏQJ$2าฐ ZR˜บโฬ$R•ฮQฟ'%voฺฑฒบ†ึVMBF_R๎๑๒,vXาบ๑x%}คธwQ๊sBยโข๗ค+๏ะ้Gum—าi^โTะ™bq~ž˜พ^Khะ%0LไษึIsฃEP$.ั4j๊.e์aฤŒฒ๖แ<ลโฎd=๛˜aธซ‚ž@ล0ๆ“+02SM^—–ส’ค๘ดคDFœ<unฮ; ษ–REaeซ๖ฆถ๖BNtฉlด๓๘ƒม–3iไeสk๊๏ฝg0qLT$%+‹ซ[[H๛+dโ0Yv›ษŽq๛z[Œ1‡ล๚…œ=้|_ะOขวํ™‹ๅ6OฎX‹ZŒ“(–šููAU2๙^Pคศ๓ฯื4ถ๔‘oCํั8ฒฑ‹-mฺ†‹๙ฑไh๘ฌ๋ทผใOi็oฬน 'eb๚>ฅ_O-5๊๔zํ”๐@ฑt๊kนh฿wBุ๛…žๆพuน[่.พขWo |gฃื2ฎŸ!‡˜้ณVขจRฝPฑB•e๔?โฬv7k่"ใฯgc`๐ฦU์8˜rA Xก 2_ข=+ฬCdVer^9็2ถ(‚X9Yธ=6tป๑j๓ฒ1#/N^๋ฤกLTh ๛ฌ(ข‹›(eศU'b.ะฯc3[มekฟ`แฮ’z…52CฅŸ>+๒?*}ฎัึ&VN›ิ{=ย Uฌ U๘rถุC้@ู Vฑ†Tฬj๖๕ฺม0กพlm๔๔zož–มฝฑy X 7ฌ–“qb&งaŠ ูธŒบiหทTeฆ”HW‹ฅWฆป” 1+–I›M{ดuใย|˜cล"O•˜Ÿ"๏๒eic™ฎ=j^ 6ำ“Oพํ.Yฟํ6f0ŒDูj2wล{J:-฿ชzบ,Aefj#า—ั8mงฒ9ฌCNY๕2:".ฃ~ฺญ“Zฟ€ูoz‰โผน‹2|Qa๙ณ—(๙ึo5 +ndM’ซตU,R๋Žlฉu’-g|IงลjLตชK ๖†`hฟ'Lšคฑไฬ`3——Œl๛ .๔T๒;ฬป*tgพaQแ!…ฬฑ`ŽkHxrฆXlใงT[›zผตBUู1.xฅดๆ‹g’น์6{ŠEฎK9g;‹dโeญัฮ8ูDหTU6dฏFม A^šณ)b‰}Se๋k;QPธๅญflIฟ‡ื›๙—T&ศื‡๔รไ…ื&Qฌ‘๚S1[6ฎZมTผ=Cc) ทd|ET้ํ%ดŠํ7๐}eว3น.๕œ๐WดฯŽ]dDด'งดŒx่ ‚IgEซvตLฮIฝ อ‰alแษS[b••|้ปOˆูŸŸชNฤฌ‘ช'_เบz—cฝๆ”๐โ‘TูฌXล๊๔‘ศkuP๘ง@0๗k:ํ™ฑ๕P๏YgR9)†ใz9+!=8ฬ๏ๅ›YฎsK+ูA~ังชŠhฉ8]tVxgSO'ZNวฎy๙l =xฑpทbฺUiŒุW$โ_ส๋หัG yหศูทฬ=หปขGu4+ฝฌว๚S^๛ฆžr๙–w)ํ9๗ื4๚U+ฑง๏ำ0iJฅมUีŸ}ฑ8๑0+–XูฅWGำ-W1์ Š3ลโGง—ฎ‘ฉู"Œ\ˆ๕]F‡ย2ปฌ—ฎŽPา—uโJz[€๒fwสo{ฎว i๏l๔๕฿|ข?†Uฤ`๎Bบ–A๋โซๆ_8ฝ๚ระเmฑ™•[ตMพŽซ๙ัย•dSฦ•๚ใฌOn”ืs„ๆร บุl{๑๗ ษ/E_4k้๒ตŒJgŽ๒ฑีฬ5อ”ฦ๚ฏ[๋ ใ>ส่ะ4m%9ปlB-sึp}ใ๛ธ@ฑfЉ“ๆ~จฉ#/† Q˜ญi†3ณฮ:cฆฟ8ัมีฉc%LาEAB<ถ๗้XฑโŽ5™ฟัfXำ,›iษK‹ด3WcB™0ทกก๒0ร+q็…๋sื(ูฮบษสT๋1†‰,่˜ืg‡\„ีฑ>v~RAฝLต™ไฉsb'ฺฅ๗™K’Ui™9ึ{.Zยค[$„mภใ์<6วŠฅ+KKd้;†] 9ะ~@๖ฬ๛rฝฺ้ใRาช-\ซ? Zk[GกbI3๊ ืM‹อuGœ)–‘6uˆผคถwœNถกกน>a้Ÿžเฏํำก๔G๗„ื“zตŸ ค:ฤ)ศ=&ฃO-Jkฆวศฃ_ต.(p๗๑ำฉกซูฮe[x]ƒ๚ดŒ๖ด|BำJi๑ิ฿Zฆ=Q•YลZ๊ใGฯuB@{`kcุยYŠ“b8ฎ—ณ๖“ฝึEe~ฆ*์8ภ๖}]},๋ŠC}่ƒ๗Ÿส= Yz๖ฮ ๏ธ5Œ฿สiท›\Q' 2w‹ฉ[ซฏ8”\l"qโ้‹๊าใแุ๋Aง5๖ฆ%"ส้[๑งดำ7ๆ&ู_X„c๘ฦ—“x XFMผ฿9{]ง—[ฌษ์aผจJฃ–.\kb8Kษำู*–๙8Dซศ5ๆDฑ8u!๏ูไs9๕7*kช*kn Yร|มฐคตx.ห๏`-ึิฤศ~DW#เ~ฌa~5%rั,ึ4๗ๆ ฑUษช,Ÿ,ๆ้‚ุšม†นŸ9ฦ ƒ^?2คSs…R0 TฌwNY๓ัZชyฐmซfdข๏.b[~ET)7Pv9–บฅ(VC4V}่'4.k`้;Vo˜|8อ†ก%W&ŽPh๔wRFฟ'อ!s๗&C˜^'่รšฏลข3dู–#ฑŠ%jิZ๋f =li๎Pทาz^๚5oYH#ˆฃs}ธฏุ{้<;ฉฉ๖จอ7ฝฉ~ŽbๅX{๖ฝ็b%ๆฅAn+ึฌฑ_s<)$Œ.ˆ=”Sจiี+VDŽP$&ซณฤๆพ…ฉ1วbถฌX,bพbษ๒„ 3luN็kฑฦ›J’vฒ “–žซฎ์2ห&ซm๖Kxm“[บ;˜๋เ tฆXl^–9}พแ๙"ํXฑ„ซ๊gดTขุbxpฝQฉ ฟนปKScโeฯบNKืPJ๚๗ๅํtฏ1vฬŠX น฿_้๛ร!Žfr?๛%~k๛p;1ฺtœอNฦu_๘_ธเธ^|g…๏u™•`UB…ัฌXKนsอถ\kำ๙*ฆ?;3๔žคสผbYฯตัR ง–โจฮ๊ๅฌ„uิmผถ็Vuํ-ํบWฟ›๐…Ÿ๋SNp`7หQkซฌPn&คYฑZาiหi7ฒŒ”ัฎ'๙ำร๎-S!œ…;ู]—™ฝ3.$‚กkJ%Œb9~ห;”v๚ฦœkฉชzบaŸุโฑขzาyYงœซŽ`ง ˆ sš&ว ใFOO๚ŒหZสnu‚ฝL€ณๆู;ยž;ใลฒ(ยD7`r๔้‘า”r๓ ๆOฬฉฃs8™|b ช๖ฺ*–ต…Gk•ๆ‹ขXคYvฺอ๔ธ9Q5d›!+าR[‘๖Xฑ<ธfšOขXšฅU๏ฏj๏h/]nGฑฌ?K/Tฑ่:„Ul?i€j‰(๘ดn๕šง"elŸr‹@ฑ>tคXฮหฃ*[า]m‹ฮืšk)ัsซภ-ลิหE งtUฅงไ๑ฤ™ูNFyั•bฑฟƒn(Vด[Šล๕คอ›Š5ม)V ขฆๅZ›%nึ{๘Q3๗-#\ฬษ–$นฅXN>ฅ=Q,ขs' [3,๒ถ8Šeoขื_'—สeใBหœย+0"xตฃ‰‚|“ญโWUT›W n7บR,—ๅท9ืHg)…‹hd— :๘ํระU‘๖>;๛Žนh0bุŒ2๕qณaืoa๋ถœ๊~lงt#๒Xฑ7p๖็ผ‰Š5;ฬ.„[—ชๆ_๑ฦYk‘`ฟ๓์ฌgฟlธoG6A‚0s—[‡#Hw1|Qแ,…›ŠลอลŽทpiธŒ๓;๎0kRฒ}kq๎นZ;ษ!ุ…ิ‘Y•c6CWyvางฅX!'น$๔ฃตg๒๓๊F็ศƒ%๔๔‹;ฑ/iI“@–$q้วณB"ฒ*ฬ:V,s&@มุ —{ๆHŒPVZ&r่iๆwNฑ&ป*ฒs.is\…ํฉ,ŠbqcV1ึ๊Lา5๋ฮห†คฅดMwaข™๓รถฅุๅ —:ํษypฝ๑ฒdPWผ%4ๆ3แdขtปฐ?aํR๐ชsMฝตยฒˆ๎ภล‚kึุ@—sl’หท‹,หใธ^๓&ิ)‚ธ^ฉัฅbYVk8R,•วŠตtS†อDAถy๙ย›2NดW g๕rVย ๆ†|“I xู]1b ฿ไ๐ุLtQx๛ญ1gข ฟ†Ÿ(ศ๖ก}ฬ๙ม‡ฺฏิท2z๘Q3๗-ร-ำฒพืjณc๋#ฮ๒Ž?ฅ=Q,๎W3aบ 67ฉ๙–ลQฌYCน9]ฤ.ฉƒ^ฯžฌŽeต@ลb—e๚X&นูOwQ๕!—๎"V=ฤฝ*r6…;.ํBฑ\–฿ๆ\fKแoุ5Pล๎หฟ…สไกมA -้aธŸจb๑‰Xุ"YFVท๙๑y๕\ฦvQpš.>นŸฑT0ื6“™]*ฏ;tข!-kรPkข๖วํ๔ฐ|cg~Sญลz๙`3[ˆํฮc๛ะ\&บษฦš2[กฎlาาน‰i {,)ณMฺ‚$qX\RQuCkkmYNBrZlไ‚k–KY.=YAS–7VgหใฤŒโ<ืตsCiZ I\ )†ถฃฅพโุ^ข…9ตvฟ,u4pศ|UMcmฅ:=^jIฺ๎ษIŸ†bฑyDb”ชบึ–พ๛tE\dJFY]C[‡ถตQu2%Db?71›I9)6Y‘WNS“Wžก™”mฺgฆŸ*ๆhนEฑโข๗คค”ฐ Žห‹H+โฏ 6ฯค4กจ•f7๎m-T(โอWŽ5ฝ2›ดฝ’อ`ฮํธ8Š5;ฮ&ำVาื…&mOTค์uคXย6œž5๕’+AšUXฃฅIUJzG|ญF0ัl‡า„‚บ†ฦža=9Oฎ7.q๐๒ตก้…'d>4Bn—อ&\'ฦ7,ฃฐTUzนmฤ์^๏g”ชNล‹h'l#Sิ00ๅฌ7ฯๆดฐf;>ศฆต`่Bฑฬสดt9)ืญ฿N๊ลฅ…๐ Š9Q\ศฅ…X!VฒK{\(ฃธ_่ม‚ยขๅ‘โ– Šๅคส/๎ขต4ลฑุ€\ส26a4Ÿ๛a#sฌ swจฟฟํ”ฃb8ฎ—ณฒK›VฝŸšซบจVงm#Wฟp•+‹ฝออ๓ ๐”๒`Dภjซb9)ผ“ึฐคป`๗๛ มq้.่KYLr๎ๆ๒ๆู๎ิฬ{หครเัส๊–๎๎nytoJ™V7fra8?ฅ=Qฌู้–\๒>Mส(ณนนŸŠv‘K˜๔ะภu"KฒปููลP,6OบSลขs"รWฯKฺžP12๋Zฑ\•฿๖\๘’ทW@D๘ถ Uขะฤ๖sร'(<โ@๏BซCร™จะ-lIธ‰‚ฐQjs)x=E๊’ฯ๔ŸD>}P(แ/ZžหJฃ_`๔๑ช1w' ชนไหืn ๖๙วหรูหwKl๎5ห็žนmืศซ„?าป›ศฒะŠ๕d†ล๖›ฬ^ั{เ&ฐทŽคทญ๘ี4X™ฏˆfค๔๖ธ‡KjuZขmv[Ÿ{n3” ลb3•‡o๒๓bowใปInพžคสน|๗_ิ("}D^kฤ1ŸYnตฌฏJ Mบ:ˆษkจb๓=ง09(†รz9g›hฑชu99ZDbQ›;]&'Bš3`wๆnL0‡…wzLดไัD๙4i{z…šM#ษgฑ#๕บฎ’o3็฿&ฃ๗เฃf[†|q\ศแ?4่‡avJ[˜)ฃ๏MW†ใเSฺ#ลข๓–[ฮๆ$ฤุปE๘ข)–๙ึฝ์K์E๏ฦ+ศ๑ฐŠลf๏\ํTฑศ๕ืWA}›ฝ๕0ฝฦNฯน๕ฐลr^น#fฅe้็ปEFฏกŠฤ@?zาhีนฎหท‹๙…XไParๅท์ติœ๊+ศ=CฺŽศบ~์qr ›E‚๎วไ8ฬ;ค<์m”K{J้ …‰ฺ‘7ฃ›Šล~ŠฒwP 7lHฏ0o|JoULฬ-ญผM_›>‘Nณ4ฺLjศ[๎/€bเ:iGpŸ‡Šี๕Rิf~g,#ท,มDเฝ"c๚>=ืํใ&\kGณฐ˜๐๊=๘i ลภฬรึcR››lพ\Š5x.ใ่น‹rkไ์„ xŽแšJyDผFฐƒe ,•aขยํDฌผL‡v[ .šwˆอฐ|#sBฅ,ƒฆ:Xa;ฒX uฅฬฺฅ๘i (žbา๗ดิWะEe;•ตฮoZ+–ฉ‹fXกหฝต-yค:vrด€'์๋Eะ8tึYnป๙โ0Rw*f‹yRโ™ฒN6`‘่ษ ศญHคw$/Gฒv XxปfW*ู›sพkฺลฆ/๖Dมแ&uFฒŒ_–v Mธ€'U,n้—(]๋…๖ŠP,€b€b  X Xล(@ฑ@ฑŠP,P,€b  X Xล((@ฑŠP,OOฑTๅ‘_ไW๚W๘ฬ‚œŽœ”œzัซ\ชƒ@ @ ^X †ธVฌŽฑžพ|+I @Šฑ(rลU‡ฉ_W7f|ษ!š@d(ร“iศ฿น’dจ‡cแ–ลUงด๋ฎ€sˆ8<†ธPฌ็>~5g,kmDŽฟธoYžjˆ3ลRu—ฟhำ"ฒ.‹์หT๎วUp"iˆ3ลŠ๚ฃMฑH‘žธiศพๅ}uธD๎C$ย# qฆX/`ย=Rค'nฒ/๒[<‚H„GโLฑ^ฬŠO4 ู๐สโ‘J@ฑŠล@ฑ X( ลzVŠ%&๕๎ไฐeวmgV—7 ล๒Xฑ$ๅr?4MYDซจ] ล@ฑฎZ XฮหขOฤตN|™% ูฎIำ”e้7ฉ‹จXฯภฒˆ_h8ฑˆ9ษั๐.ฐว_~ฺ๕ฺ~ต‡G๏oˆ๊ ชzLw๔อจ๎Z{นฌ;‡–E๕ส๏,RYf&e‰]oช‘U๕/‰บ๛ฅัํ}G๎๛G kqัK๒j)–“g‘๗^9štŠ๛“ุื"f|z–5๓ใฬขŒ_อ ESž๑๑O๕ฒจ๚ะo้2๏ฅหลส์฿บ‚เๅไฏ๘Š'<โใ๖ฬM๔Kืeด<~Š%7–Ey-ฃ' /xZญ…๖ใ์EๅปฟzฦกQใๅ๓ฟŠบฝ[;ใ์hBล5„%Zyzbˆชš!,๖ึ›ฏ4 <๚๒ห_ํบ“>๐ฒ)Vๅ@=ู˜หMไาWฬz>ฯ๐i(ึ"Zึ7บ๏<ฝ•งแsf3)ํ“ฎ9ีŸJศZy]ป2˜๖qWํพhX ลฉ/–o๛ฏ-].ZตNฬคซnŒอSฌ€Œ–)F่/๒Zแ็,Sึ๋mŽ2ึ^šเ็ตKด10:ฃดV–ฆzิวdม›ึzญ๐^๊ณ6 Lฆฌิ+–แTพ+ฟ๛โ๛ฉb์ซP&D๘ำS,%ex'*๑tรศcืŠ•7ัญJ ฅง& Mฌฆ๔m…ฃืฑ‡๕๑๓_–Yฮฌ๛7l(ฒ?}แ6{ไะ\_สS‰‚|Eขฅ+Dพ›"๓œlไณ๖ฐ~1•๚–ฯได`คัึ…ง_์6บU0ž‰v๕‘XาคซH“’ื.@ฬ,n๓dR๔ฎ‹™๑กkD^d ๐s60v—e„ฒM็/fŽU \‡bOMฑ>ฐ๓ห˜~ ๋†ๆฏ}y็5^ฑLูw-๙ุ๊T่ยฒn;g|0jlตŒšอาตไศ๘๗๋ั๗ƒฟ"g์ฑ๔o ฒOณ๏ศ*VืสฯฆฬcJly>1?iฉS}o™Chส?rkษฑ๑!'Gณ”dzJ~เึฒOฦ๘qถ›ไP7ด๖Yป๕NฟlŠ๕0อr•tฮอ……#ฒPล๚ฒi๎Dล๒ญฺ$+ฌ๏hฏษ ๓ใ แH;WธชY[$ฏg๛ๆSํส-"V ุ[cหANฯ‚OดศG„พ!“๋ฏˆ(ิอSฌฑ9?t&ื˜๋วฏC๓‰*4u็Exq"ืๅTฑ–‰ผึˆๅem}ํ๊ƒb^3็ f–ีeขเำ์q๕Vํ–ฟA?สz”๏ฐŠdš)ซ ฑ…r฿Wฬšษชํลhศe†k+ํฏ ตTvฉODn—‘Wะ@oต๋rง`ณณ฿สY๙Ys™o#๑%:Zๅว_.7ฐผธ2พ๛Šรูณ(ุWึุ ๗g7X$ฏค[oซณ Bฑ€EW,7ืb=Š"ึ$0’›ๆตXำ๔ฉืˆ, โตบVžทˆŸXJrื’ƒ๙7ฝใ‡ ฆO๔3‹ช้ิ yL‚,QQ1 ์…/(o6ฟทฬSลผ˜‹๔!cE 7พฤ ›ฟ๋e) ๑CNุ!8พFอ‰ ฒ˜y.๛s„Uฑ>k+ฮึbuDa—๕ฬโญอl6๕ฑัเt–YฑผO๔ฬ1ีฅซๅ'•กซแJ‰ถcG_ฦIEฌฺ(TฟฤoนยLจฃน1บ(ฮS,ฤX„Š4๋Kซ*ŒNห๗รห#Ekแอ9•ห‚น^ๆ~ฺ:๏ั8a=กห f›39Vดอmgn้5~แ HQ„ xพŠe4ฬลzิj;ŠU๒ฐ}๐a๔ๆฅ๘ฃปำ๖็๑|ห่bั]v๋๒~ฐ-ƒrx4ถ$oฉบcK่Z–71฿ฃNŸํUTwX๓ฬKฆXงš๘a’ห>๛4“›?ลR4็-Š_อบ1ธ๗qSฎ'E›P๏๖›ฃ(ybN9ฬฃ:OขXšxn"ูฦดkฮlQ,ณ“Pฆ.r3ๅ–nSQมณค’_-ฮล–Psูhs๓^บ]epไ๋Xทึ2H"|—สนก!ถ:โะ๒ฬฯ*Z†Œฎk๙ฦด๋–วtน๏sรG2ฅ:cํUชๅ‘Œดtฉก๋8“‰Rsึวท!oŒHhFsŠ฿7ื}{Dง”๏œ๊vZYูฮฮึษ}็จ‹๓‚ ฉยEๆฉขd๒ชชvม]m0Rส- ๓^ต)ย\๒จ`~ยgDก~ึ`.งีQgY๏ยDAเ๙*ึฬ#yฒp-ึt€Hฐ๋O ™ฒปFœ[dษ๓>xร.‹]=u[fY e0์N๏“}?ใDฑi—EId–4ื›sx4AIธ4[ฎะฑปG#S_ถ>ฒึqfJถฏ๋MRหฟŠลฅธฐศี,›Nะ๛\ฝŠ๙๕G‹โW๎dจฏุ็Aษฦ.†๛xฌƒDpž๎IkBอp้ ฬ.WŠ%ฬ(๘ธ‚*ึuพฯm7จE˜7๐ฺ]atฅXึX!X๋ล–๖F‘,pตํ6หื†i0ธP,aFA๓XYฑF*Sํถญลdˆ•m1'xlN๔c.Lุดก JุIeEแe‚a8>‰๗ช„๗ 6;RŠู4็์"ฆเฦ”[ ไ…:,9;_ัœ–ƒXใ15 xnŠล ๛D๕~๐อT๛ อฺท!๙ถ ฃเฝeQทท|๕๐ฺจihpชธจ๏ุ๙kฤวว‚vuญซก}ิ~s์ƒ๏~pโ๖’ฤม/Ggน™Q๐ภญ%๛๏ๅk]๋žHไ๖’„{_>t6ŠE3 ฆ฿zใภ`q๗Cใ?\k’xkๅg“œอ&i๛LUQ๏’ุ์ฮึn‡]~xmฤ44bฌชบท’&าxูFฑžq<ล T3ฯฦฏHl*‹tฟ`#ฅ๓๔C‚กห่ม(–ลบmอ a฿ <ลZต%U]ฮ.ขซ›Ts? Ot7Wžศˆู.๖ๅ“s๘ล”O<กb=6ฏ5E(›๕FZม‰าmsM†ŸNIDดศsตei–น ้ณ๎พ –ส ิฅ’“7V]+wฐ‘ฎuัqyผy่l™ทz›;ฬฃX6ล`ล ˜3Š…‰‚ภ๓VฌYำลE+ูปHฝ๙‰ฒVฆ5#฿ฝอ#}ฑvuฏ๓`vท]๗๘ฑฝvะ?ฝEU๚ฝ์ž™G=ฃt๋ฤมหำ๎k|2{๓ซ(r–กโ๋}ฑ์+ึ,{_ฌำธ๏lQM๔šœmฮญ‡Sฒ]o|<ฺnšiฏ :@๋H๏‹ต_'ซ}๔๒๋UP,ั“ฆk๗ิฏHฌ<๓.ฑ~๑On}[ห5k”&ฌำน๗x-7vฒญkฑJใลAaวฉSนฃXฦ~-ึ&s&๚เ„มฒจาฒฟบซx๐;Aโ6EžmFAใ•\zŒตB}2๊u‚ Šฦงธฺ™ณnxฎXๆI€ค"ผ๛=n“ฏ›g2บb.กE๐ง*ฮ|V%XืPY๒R0‚!)ใฤ„๑ฑkลฆฟม'ˆg_ 7 6ก๏ึ |kช-m“M2Xd)ูZูฉ ƒๅ๓b-/ฦ๊ฦ–ฑ/(~b@ฑ_ฑY6_ขžภฏ<ซฃŽOXg“7œฃ>ีื&ณŸ็uลก+l3 –ษ๘•6\๖pwห’Qp™_xQ;›ผBWบ›}DUJ็๓eZห็ว;ฦfkห|Ÿห(ศunาvKฝ€ิ+ST0ธbx…ฒฬ‚3\หเวB‹๔OจXCลกหอ“iน'ZŽ…ฎZnž)ื7ฯr7๒ฉฟ’cฮ(ธ40ตŠ-Ÿธo™(@ัๆ<ัใD๖VWฦv>•ฝ‹ืmw fฌโ๒Rฌ‰-ํ3ŸD_“ธŽ›I.—ะ—7.Ÿะฬfถู‡jธ\๙ณ‚์ หƒไๅดnRฮ5ศ((ห f~๑–พ็NยกJ=ฑ_‰Š฿uณ`ๆ&›AsW<,€๗Ÿ'ธ/Vw‘เฦGๆ๛bYณฅปฅXค๏^‘`>ˆˆฝัญํmฏ่]ž,‹‚Vˆฬ๓‚๓ฺณ๖‹,/”[ฐxŒ ไ€9›าๅ~พ๋6๚ฏ1฿๑iSjีุ์*ึฌŽw'z็จไ˜>กสำ|ฺ ฏ5Aฬg|๙G>‹๐ฎฐฒ1'ใผะUๆ[~ญZํ7๗bŽ+pฝก–—uน”Vค(XWA0Ÿอ‚(ฬ฿Ÿฟ˜ญš8“ปg„ห ˆ2ีg˜W|‰Vญ๑›ฟม@Q„๐พXิ็™(ึรษP,@ฑ XNูT้ั„ภ'๖+_„ปW(๓}™|ขJํ๔ือ๗#^ถQ^„ŠEปั฿$n ๒‰จ!„2้ช–sนฉXTขฺ Fฎcป้>kยdสJ‰Lด—*bƒ่Dรถษ…ฬWฌYc[Z๓จย>s9ท‹น›ฏ๐๓ส๕I‹กKน[Lo์Kสผ-ตด}b๖ฑฎ4>h)ไ๊ •y‘’^j๖Oƒm๓_ˆ๒S1a์ ‚Iฎดกcล /ีPฅ†๙y๙‹™c5–Fvฃ`ฦส„พอ้™ลก ง4ทy]m@-ซbๆ๎Pฺช๔ศƒwWw 7˜h)’ณG ป3'j†T—–Š5?งC7ฯ%H‘ž‹b}Pน฿eW_ฎxbฟ"๙๕Gธˆ_Y์๘$๘ษ(ึ3•?Xˆ~,Dฑ>nส๕(นล๛ฝ/Vs.b( เ'จXช๎๒MฑH‘ž‹bฉ{*O!xwr๘‰Šฤ…j\ฤP,(ภOPฑ})ล๑+R˜gึ.s่Ÿ|๋\ุ3จใ๚/ถ oโ  Xเ'ฅXc=/ศŠ,R R˜็ฅX„”ๅ3จๆ†ธ‚๘ษ*gYฯ},‹`~ตpลชฝ์sf๓Sญฆ๗฿๋wธ‚๘)+‡ชป<๒๋ž๑ˆ99้Bึ_-ขb>ช?T๋KŽห€WBฑ^ตvฑKำฐvร—Ÿ’_m*ั2r—/PฌWEฑEj๏ฟ= ล*l/รต ๋ีR,‚ข)oัJัœ7๓ใ ฎ] XฏœbL฿?ะpbQณžผ่.\ Xฏขb๐pฑฦฒอy๐+ XฏดbŒ(๊Pื๙จ…ไท(l/ร@ XP,žฆam๒ีcž/ห๛o๕Q๑Eษhา5ๆ)R"#qD\๔•ึเั Jฉ๘@ลธป?tช๑ฮข–งešอa&ไhฃษ^๕‡›ิษฒ ยศŽช๔‚ญฆz49ŠhFJ๖PQeŸ้'๑&0u–eE“ซ%2งึ่xซ™Žผ&ถค฿รถœŒ'_า-R)วหณฤฅณ@ฑ~ชŠลQ=ะา t'Ÿ๛๚/ถh8๑ํฆล9๑X]๚NF"/ั4utถ5–‰#็๏-\ฑf‡ช5Mƒฯฦ*ž—bMถๆGKคฑ'+ด=ฺ๚K้๑Œ8๑\'ทฬเ๙Cq!{๓5ญ=ฺบยqโ˜ข–Ÿ@฿ุ˜ษDŸฌ๋์œ…b@ฑ^Tลโ่Ÿชท๚ใฆ๐Š}›ส"Wž๙ไŒขโwพ๚ฃ›๓.๔V฿^d9‰TVZVr=จK‰`. .\ฑž%ฯIฑศใRฑ’ๅ๕05ๅK$I…]ๆG*ฮ[ž๋จฌlีMฝ๏ี)&ฝฦ•;Cฑ X/‚b=ฆFu๗ ึ๒Œ๖cฟg<ฎฝtLž$‰`ฤiไ^eaจiพbMu๎•JW๔šฯhณw2 *mmพ"šaง#.iะ๓งs๘}ึ -หIˆ ‘H%1i*ํธEขtM!… ู™”Rิุpึ‘b™tu%้{e 7 2_ำ=อ=1Y™ยไืvWgHb“’ฎ๊ฐ หLvWฃำค’xEvฅ๖ผƒQ,ำุจnL๐p๗นhI\v+ 9q!Šj7ฬsบทผ(iฉ##ŽŒ‹=lžOศทŒีu{KRฤ1%ZฎŽ3ฃ %สXฉ”พ‰สยฆQs‹9x|ึ4x.Ÿะ˜””Sั๙ะŒพต๘hZ$C_V›Fถ๛x[‘$ŒsAๆกใB:S,“ฎฆ$%‘V9„นฌcขX๒Kฺฆs)lk–ฯktฃ๖6Gฑ '“Bค9•ไา2 ึdฑMD/C@ฑ X‹้fIดD–:ฯ&6f0าุบฮ{ฃรร= gณ$)ล}ถŠeิŽ I>งฒ}œ๔ถฅิa24=“3ณ&}k^iˆœฏp๒ิฌฉ๓LJHDJvuN?ฺY_’@ pถ‡-™ก23N,Užื๗ทจˆTฤูW,๊<า„ขฦN่p_‡ๆธL,อ็f๋MVg…HdัŠ’†aํฆ—+%Yทดmช๕˜”‘Rทฝtฺ๓สดศŽึbู +KG๑ T%3ัญ-๒“ˆ"2ฒXEIํ=;0i‹"%ฒŒ๒0)ก๖QU“sล2i ’ฤ;ล๕=ฝ}=ตEi!t่ฬไ๘๑ูษฆHI\RQc๏๐จฎซ:;Qrจ‚`ี" ~ฎกwpX?ุYW’)M)u๘๘Œirธ"EยคT&งLฮ ้Xฑฦ๋sH•ำ/ดv๖๕k+๓c#ค eƒผbI“’็jป๚uฝญล‡ใฤLN;๎็ธŽŽ&T,S็ูดFqพ—6E/ต๑,Ukฐ~Tงญ>–(•('๑‘(๋ฉ๐P›GzœŠj;3๛ิฑฤ@ดI˜ึu๕ Ujฦะ“Ÿ฿0ฦปƒbล0ย u“uสIšJ็๔ฉ‡้‘6}tv6`~้4ันjIหว่๙Œ}ล2tบัษกqษ๒บ8ลRŠรdูmๆำ#˜ค‹๔˜ฆ๚œIJqฏyฏ)บ๚ศฅb™บีฤCสุฯ๔ไล3iRยษŠ–๎^muvฒ”ฎลš7Qp\ฃฐษ15ฺู=H ์ฤ^ึ‘–แŠสžซ_s2'ฏfะแใl๛„จถ้$ฺ3ูR&๖ฌต‘ว๛:z๕&‡ฯš' ึนjsจXla2๋&-ŽY–“Qา:ฮ)VD–fฬผa+;๋ฒwึY๙อชXรีสศศ”B-7ziช=*ฒŽ.š๔:ƒ ๙€bAฑ}+H>งต;oสิS˜ศ„Hูชบ–๎QA—”SฉK ชดYษ๖qซbEๆwXw๊=+‘k29{๊fIค$.[+(}$ฉฐ{v–๙ษ$S์<“ไ`ขเtoอนŒ)ั12ษฮ8 #‡q๙Xล’(ฌzv†$g t #"งมZษQ•…bMjฯ%1า่œVพ๏ฮฉcโน^K‘๎]JHSช็ekิ]JŠd$ษ๙ลๅญรำึวุ mขป๓ แ่qS+QDก/qฐ26#G$ฅ\ชl๋ทึะัใ V,ฎ0ช๙‹XลJT๗ฮ๑แ›Nห๏๐hผbiKb#d๕ึfฏVJยคฑวฯฏ๏ะ=„[Šลz:˜๚ชำcษกKN๒1<์ฉ<““'cBvฆปุ3iQฉH)]J‘RํPฑข…ฝm:๋ฆ;yช)Ÿ.๛‘ุFXฑึY๒T็Z–คป iI\สูึaร๘รธถฤผVŠSฌ,kžbuaหอ 9ไLฑ†๋๒ฃ#ค EZ๋สซ™ย=LˆR0แ’ซiQฝฦo,>ฉˆe่'Iขฒธอเย^ๆUŸวัใSu)’๙-ษDŸa 3cะjJ่2'าฺฒคบ^nHอัใ Tฌ‡ด0ึกถ9Š%LwaQ,'ๅwx4Vฑยค!‘t-YŠF่`ฆแฆKูŠ”ศriดโ;SŠลZD†๋าฅาheฮฝฎๆคพง๖Œ‚จหฑฦi^ฅv*T]=šรqโ=%ฺ)๛Še3Tล.‘ฒ?ŠeyŠ“9V7จป'Œัq#Mบ0wซ@fOฑh&@ฑ`‚ู์อขH7k(]XๅHฑฦ›ˆ_ษR.๖>;]™)คป`ง๑Vใภsว{[‹3eโHvฒ<{ก#u‚QฌlฃXูŽFฑŠดถ-98ภถศSƒฺสข„H&บ รูใNหZH'ฃXถCR.หI๙S,Yze?]ˆ‘ฆ๊๗๊™ ฝญ{ค!ษj, ŠตX{ ฅ‘G๋†f<7 w44๕[ณฬ๔ฦs][J้้-ถขsด“๖Kุ{ฏฬข ฎ๎9}jช1=R*XpEV๑V ฏHฒY‹5จ’]‹5zf‚฿tKŽฬvข }ลbืƒ ึbฑ‰์ํ+ๆ—tกSรEHค`e]ฬ&อจŸžkญ ]‚ูƒt>!ซIsGฝ • ฉu-๑ •ล+5วำRT=g,IŽ “:Ls“ง[๊ตตf=็Pลธฃว็*–ใB:Y‹E^มjจฮฒฌ„ฃ:'Šๅค&Hw13จ: ู{ฎ“ŽยM๗65j๕‚฿ ชณฤสZŒcŠลZ,tาˆNไีwhต‚่›ปdศฤe\ะ๖Ž๗kหsฬ‰m’ถืั๔n์า—นŠ%‘ฆคŸmํี†ปช3โ>#œ“งfME)!;…๕=รc†แ๎ฦโCq!ษZQzC*ฉ๒|[ฟNืS{&+6&ฮvj_jmLฬdฉnŽŽ๋๛Jฒ’r๒ฉ›•๕O)MŸธ“‘ข๙๔tฝญชฬดhฉ๛bักช๘JญM๋๕riงด4ใ…ผคR฿ูVฝW*Sา9๏6Mฝ%คŽYล4kโฐฎง2?%„แnSฦŽƒํ)j G›™๎ญค™ องต๙IโศดผšŽvD1)๏ฆษ๑ใ\F>Yzmไ๑{•๙i&KCZ๙ฉฉ46งZGšhดท๕R:7ิๆ่๑9Šๅค.2 าู›lภ"š#„jกลr\~‡GณMฺ~ฏ"…aข๓;&ฉญIษ‹R{sf์n-<r ฃX€bAฑ SํQ๓mŽ„กจ›—ฦฺิ[]’ฒ—˜ˆฝ็RV^uฟ๛bัษฤ;••๚นŠ{ฆตธะN)ฝ?•โ\ 7ผใไ)๚ฌกฅŒฎโ๏†คผd}jฌตPมฯŠIJ)h์ิ(B˜–๙c;Šษf๔H))gZ‡M†ฺ“I!qI)้ำ฿ผ”ฑ—ปี’"ปฒง๒ธ4d~›ฐำไๆท—4œ2Z|4%2’Gสbช[๔v_z'ซ„๖พXค๚‡๒ฯw™Gบ๎5f`๏ลพ๓ข"dg_,ฒืW’DeกๅQŽWŠ=K‘ฦ|–ษฎŠ์CI’HZ๒˜ด๔3เ•ฃวmหq!฿‹\K์อฏBค์ทุ(–“๒;8ฺ[Wาควšฆgวด*ฅ"š\laEฅถ^TP,(ึห“ถณŽ8P,ด €bล‚bAฑ€bAฑP,(Šล‚b XP, Š€bก]Šล@ฑ X( •€b Š€bAฑP,ด @ฑ X( ล‚b P,€bAฑP,(Šล( ล‚b XP,TโีTฌkํ=@ แ~@ฑœ)ึดั„@ @ ^๑0x Š…@ @  XP,@ ล‚b!@ ( @ @@ฑ XP,@ ล‚b!@ ˆYฑ กXP,@ ฑŠUhŠ๕ช(ึิฃLLŽ=˜pdฒ%3@ „๛ŠUh ๋'ฎX๚ฑฝบ]๎ูž์…w@ แRฑ ํล๚i*ึิฃ๚จ\JK๛๏~ฟdษ’ใ ฒ ู’lO๖"๛bD @ ps-า]ฤ‹ุัํ;_|ฉ๖^ฑโxู‹์KŽหB @  XP,S฿€Ž8’‘ซื_wฟ๛]hhhddไ๘GŸ็vGดศศq๐B @  X/ทbีึ7Ÿ๙\QjๆV&Ž๙๙ณฎกล๕W7:บๆ_ฟว<999งjำำำYYY๏>,‹๋ฒ@ (ึหชXทz”tHฦุ ๒ูภๅAz๛u‡าา„ฒ๔๘{๖์น{๗ฎ“ &''ำ?“pGrr4ผ‹@ (ึหงXŸ}ฎๆT๊ฃC™ต๕อ›"!’นgฯ|ฎvพ ๋FGื๏~๗{‹&kJMM}๘1ฉล?ศU‡๛ฯœ?gffŽ9๒ฺkฏY๖%ว!Gsพ"Kฝ๋7K—y รหwร;}w)6ืิ๕รศน~ณ๕ไ3}G๕e๓r๏ฅหCN๖ธxธ„๑Zๆฝ^qn‹yํRK#๓#ฯฎ”U=ำzีํ[Oฺ๓ญ]็๔ชŸฃU^บ!ซe๊๙ฝFO9n†ฏ$/฿ๆlZว':ย`ซFyxvFJฮ!‘ฒ˜ƒy%๕ƒณำ7rb˜˜3=s๗r๔๘S‰ั๓iLHๆUพAฑˆq{่ทQ]ฏ}ภวฑทซุ๛๕ฤ้E=หด!>ฑ๋ทg’฿)๏{=j ิ๐๋W@ฑžณbq~๖A—_•;ฺ†Fํภ{Q,k@ฑ^uลŠ฿wะ๙ุิ่˜ม’๎‚lฦญหrฉX๐PQQมM๏ฏelสzฝž›1ุL๖}rล2กvฑฬ๛อไ:‹oT็&ฟทมฯkนท—ป;UทธIVSอ๛้ด}ีwิ้ฬz฿฿x๙ผ๕v฿Zฦฌฝซ…฿๛ร[tG฿ ›ใR}ืFฑvœปS}|๗z?-‡๚zั7}ศพA;rฏ]Kฬป+ฝฝฝuฆsr sู–Šz{Wถฆ?{™p:ท๕oWKึ‹ุุ๊*ึp๕ม๕์x]ห<‹˜จฺCผล7Y3ๅ™bูL์ศ~›œฮ'AS—๕._‘uรฦ5฿\ฦ>หŸb’5๏฿jžซ;ภถฯ':นรjOผหสฯมjผ!ก‘?นZฟ›]}^ฑ•ดผ๗oVn`k๘™lฺฬ ถMฬใ]Sl5—๙ํา<นbูig*~ื‹’๚ส.งุwฮ๒z™ฦoฉ๗mฐา๛7^!ป ›?g[์อu.ฺ„๗I‡‡๎o{ถ‘F^JžZฒ#Ss‹”์ึๅ‚yฐ๋Wงุื^`{พ๖ิ •žงXŽ':ฟฺหH๗œ้>๙EŒ$)งฆ)pาึ&„Iฺ›๓m๗C“‹งHŒvฯ๙x็NฉX"แ'๙uƒึO˜vอ‘$YˆDบ5๎cฅๆš Š…@ ˆgขXำฦ๊ฯ๖ผีwg{P๐มoDuฝณ>Oอ}๚๘ตsฝ–†ฟe๛ƒฃ;3z~นป๋ตจ[ฟL๊‹vr้DมมถแะิฟŒฆ:ฅ๏•ฉq(ึO[ฑj๋›น๕WNŠหั๒M๎n]ูันby{{[V[MNN์g?sโWฯl๏"๘๚๚.`๋,;ŠๅทใKnaฯไี๔ ฺ๓๖฿—™ต็]ฺ!^จnœuกt ะoึo~๗๗๏&์‰็&š;ฤ“-วY1๐ ฺŸฐ5ˆ๎่๕ลี1k๗ํญ!+‚zหk™`Gหaƒ]ฟ#aวfบใาๅA[รƒึ‡'์z๗-๚ง7ฏIฺบฏ๏ปปาปุฒymฮึšSฑฝ๊w฿๖๕๛ๆญ'ฎ(ึญณ[I5—oุsqะNหpv๔วฟ๕ Xทธำฝฝyรป’๗„o`ซ๙ึ.อ}—:1b=ีณ“ค0ูLอ!่ภี๙J0ฉ‰๗๚0o\"ฟ7ฐ๛ภง9ลžิ›=ฮำQ,;ํ<ีy๒=Z—•๏&ฮ<๘žฟ`ุD3kไr ู!Kxo†7/ฝsลrrุพณ[Eคšoฝ๐ภกไญ m๒๛uใื‹ํฆ-I.ถ]Š'ช๚นรฒ/โ๏ำ›=^ˆฅ9&™๙Eฅvptฺฉbฝ๚qŒt็ษฆม9๗’nำ‘ขึQƒqjฐํ‹ฝ‘ฬฮย.OMจ๖Kลqูชึพป=๕gl•$)›ุ™‡cMGv1[~Q฿3ุืsMu๘เK6‹ะะP—w ไฒbโโโด{๋Ž๛ฮฑ๋jดฌƒบ%†eoํ๘rp๎Dมถบ ๗{/ทำฎ)น‹ญ4O/Dฑธำ-๓~[q๘ฮษwูแมฝ๎ŒุL^UPA]น5๋๐V?jAๆDgึfzฬ๗ฮท™์ท<่p›pภŸ๖๙4k~;k่L<ั๎ฯ‡Lœ.žดCU฿๑‹์SkDีศว]ลrrุ้‹ ด ๏ๅัVฬฬ>y๎๚0[ว]|ีZfํqบzmi๘ูaฯงแ5Ÿdg$›&4B“–ฏn๊›งXc7๒?”nse๗๔<๕ขล์,่ฒL>ฌฯ‰วœ๙~ฺูS†ฆฟnง+พฆณฅ!‡+‰ฟjฒC$๛‹:-นOฎ~ ลB ฤณRฌกั๕Q]๋/NOงงwฝžฮ:ืะ๛uิํ๘›๔๕g{^ทL$]ขษvห ฎษฑ]ฟSO:T,z๊;,ง~tํๆร๖q(ึOZฑธ!)ปkซ์๚•eํ–/กbeddp…ื๋๕ฟ๘ล/\*ึฯsKb๗“'OzฆXsย๛ญ๕แŠฯต์ตu2ํื.Kว`‰ฮ“๔ๆ๓@˜ป๏๏6๗ญฯS3Y๊wฐšt‘ซ“iwู›)า;Z‹ๅฝ9๗Ž-‡-ไ7ป๑งฦwNฒRฑ๙ดํ่ำ”i.;+ฬl|ื฿7A31g-ึ†ทน!ตญs”ข%}ต‹’๛‹ X1šชi ฒฏfIqืoๆ_šฯ๎ยกฉf*ฎfGฒึฺเี)›"yํP?Uลด3oฺ›ณฎ๖ฑืฬญณ;|ุK่ฎฉ…+@?าHืผmuWฑœvบ-‹ชz๓{๙ผบsุ2รำžb Ÿal‹แa<mฏถค Oก,„ธ“šำ8jUฉยซ็ว…|๔ล๗{ฃ[์Pีว5Y2๕ฉ?KŽ\u๖T๗ูโˆO*๓ัปฯฅŠwๆีO›ฺ‹ษSู5ญหK>‚b!โY)ึ==Uฌ๒้้ษ๑ะ]ฟ=๗ะ๚๛ศ๚‹Ssหhผ9ษ฿๎ฝ๋๘๎_ฦ฿z^ซ์+ึไฤบ^฿ำท๓‹๛eำ‹ฒŠ๕ข+—พย}ฟโ‚<พ•‰sฎXนนน\แu:ั'—Š๕๚๋ฏ๗๔๔pปœ9sๆ‰' Ž฿ฝ๙^nf`ฒfŒฯ™ฏaิŽๆ๗ยนqึ”๘ญeง้.;ฮ{ถ3+H`\\" ณž๕Wg๏ุLืzYหF”ฆEะ๕_ฏฐฆ็6+™M๒]Y๏mๅ8 V,kvซ‡6X๗uฉXFำ-nไฒ฿X\t^Kึ๑หŠชm…็Y–Zs^วMz|ŠŠ%h็๊ฝoู๑vถ=ฏ๎฿`) Wkฎ=Q,'‡%[jK’7๛ ~/๐ ูวMตงXลน!๋‰Sท[?ฃ{พ=๒'FวCฑ*)Kศ#ญsคXqสF›ษ‡bษวชปฮž๚พ ‰Ž›Iๆฤ'—วุงXืฒp„b!โY)Vฝ_GuG40=~?0ช๋ตจฎื๑ฺ]ลsซw๘wั]'๔šSw๔๎ Gฤ;U,๒๘‰œโ๕{ฉŒฝ'โโฤ ๋T,็~ล)ูันb}๚้ง\แวฦฦ๕_ีฅbหฟห๐๐0ทKaaแา]˜-pเ;๓(–wศแ๓U5ฎštaJŽึQฌq`มแฑลS,~ฃ฿ๆCjอีๆซ_g๑ฃF-๓„มFฑฺqๆบz๏.ั๙-{ฒx‹ฯ๎ฯวžพby๏|ย<ž๓GoลโR;. H_ฆจว“Q,ฎ%9‡‰๙u‹› g1œ1อŸER,A;ท(ธกชƒŸk„ืLVo~jณ`๋๓Fฑด‰“รZVฆ๕ทU}~:kฯ฿โึิcFฑJžpห0288/ต7ึt~ฤฌXIg๊;ฏ~ผ‹ูฎltg๋หT‡ฃXๆง่(Vไ'็o t฿ฦ่จq(ึ@I @ ฯ(…ๆฏท_=๐ื๛˜ีoฯ<ธึ?%Œvsฺ๋ฝฏGี2ำฯภ๎่\ฑ,ฝŽ{-๎๛eTwhฝŠ๕jMt้WnN”หๅ–ตX๘รฯป5yๆ๓mg>ุ.บำญำ}เgGfRฦEa—! ฮd‡์0“Y˜aข0 i  l(+ @+๒E…๚U์*๒ืUรJ›_[@)…ญ‚จh$40ะย!‰‡vื}ศ9‰ุj}g^เ>\๗uธ^๙\Ÿ๋Pฑbbbธ\ฌสสสวQ,ํ))=8žฒœั˜ฐฤฝW˜ฎถy ]ูา34ศ”รN›มฮsA™าJ)3‘*F๚อ*๚Wฦ (ใ<ฬ ^_ืหlๆฃฝbŠลŒm ‹g'ฑะžNqอJบol”ะ3‹์๏้ขศyทม๛z๏นX๎ฆNy,‹u€šœะ๐!=๛qZK ็้้หWZฑุคฉ่|#?ณรชVฅ๒‹›5"ิ-K็jฟ็ฤOฑ†ๆšชชบ.วฅ\ ๖ศ1F+่:เ2sฑฦU…ฉาœSw{ฑ๕-H–๏[tSฉ™พใ;Ssห:Œsฑบ็ฒA0฿ซ่\,—„+๒ฟมศสฬฅCnนX฿^“ล๐C(–๑ฺ]ฒืธ@wŠจ\ฌตGf]โK๗๕๗˜Ÿ)ลสcซ๏Œ๓gJฅ.ฏ๓;Pะ2ฑ๐ั•Egฑ๖…ผBvfB(ึ๓2E@ฟ ~บ กPศี_ฏื๎wฟ๓ใWฏผ๒สํทนํน้1}บ‹คA43ิ*\ย๔้ญ]et๔ Z$+ฉ’K่1„‚Rช›๋฿”6P3 ($Bz”3ํ๘J)–ถAHอaม—TU”e'DB:k‹/ฉiนn  XL่‚า.๗h3%Cx^งลฯว฿๏Š@ั๙hŠลNœHMี(+ฬnMd2ฤะžฅ-—ฉ3Sศฦ่”SK‡ zฮ(ศถzs\dl’„œvบฬ๕›า˜:ฉO g} R3@๒I ”€EศฺWRฑจื๛&ัSmห.*+e๊)S่้Rๅ Jต:Eฯ]:™ค๗sโงุ้๖|สฎ7'IUUUr15bhRรK™ก๑R™ข™ฎไฒgœ๏?Q,Jอ•ื]๔๋[7ิšฏ{?>Z.Nอ*lี-y/ึ|฿ฑโdiuธ—(ึฮwช^าŒ๊K'sาฅ9gญฒ๋ฮ๎ฮฝ{๒ำกqโV๊พ‹e9Y™'nQ้ญฬ”ŠK›ป‡วG†ฏž}ฏำญniEŽVW)kฤิ™'W3ปฆ››ฤ:ิสž1~’ขqเ&ฃ'/YAลขๆ๊o(LD“๑ย‰์ํW9n'ชีBบี[E๒ฆF™[šฯs Xาฎ*‰0>œV๎๐ุฤ”ยำฝSŽจ์yEB}“k๏ฝX”8ฉ?oฎ,)IO*(’ไ”o์Ÿ_: ๙p๋DTTrqd~I.ึฅมณKH!ข]ล…วzู—_๙YEฉ”ๆใ:ๆฝXRQNyYำี1วช™๋+฿อQห+ซ4ŸฬU\žAoภJ+๗z+ๆT™ํ๓zซธAuŸ‘}/ึ[#ผชษฺบk^ฌ% ท4‘ฒBำุ†lMH๖hฬ‘i•้มเg† oi72๋ฝผ?ุy๗๕PลRวmศ๋\ฤ{ฑžปWฟฮ/๘๓๊aฒ‹ืตฎŠE>นนนฤ—ชซซ฿xใ ‹ลBDkhh่/๙ ทŸฯ'๊E–ป6ถดด”  bg“Jๅ่]xž^ฅฒ์ ำฬ`ลญฅฝ ?F“บ๚ˆซฐr@ฑžลbrซฤา\ฏSท{lI6๓5ษ;anมJคh๕๊ีŒ ๊Wฟ$–•˜˜XTTd6›งฆฆ\฿A๓Ÿมƒฎ-Uซีฟ๕ฏ™ตคR)าO›I๔๔ิภฮ'กX*d9๙9ึoyถหฦผI,ยจK(ล‚b=Šลed}บะ™Ÿ๘ใW\ผk)ถ{ˆลฤ๐9‰๚ใh2™๎ฝ๛ง?iํฺตK3ฒพ๖[ฎ™sss๓Ÿ]&ภเ“าH™x~โฐ#ฉฉ;ล๒๖TZ8๓žฑ๋@ฑ(๋‘,‹ษห๚]_rq*๒๙•,dึ๚๑+†o e๛๖นJTTTิศศศƒฎ]ปๆKฑฦฦฦbcc]W‘rHixŠ€b={Šลุ“—ๅฒ*เHB‚iv๎ึ ๏ตื<^+|๒ไIา‡z(–ษd"ห›››Wฏ^ย /pหI คRž" Xฯคb1|ูืต(w—xใอ\๙๊u@_ŒOœ;ฏไ2ฒ˜ังˆˆˆดด4ล๚฿๕โ‹/บ.$๛’H9x„€b=Š๕๘ุ๎=ีˆ#yฤฒ‚ฝศพคdaล‚bฑ–56>q๋Žฆl฿พ˜พGDห๋‡lCถ$“ฝศพ๐+ XP,ฯผฌo ”hู๙W@ฑ X"Zs ึูนmนŠล@ฑ X( ล‚bAฑP,(Šล@ฑ XP, Š€b=CŠ5จึžs Xˆbล‚b XP, Šล@ฑ X( ล‚b XP,(Šล@ฑ XOถ{ๆฌณs !-๑ฬล‚byว4;g˜˜ิŒŒŽ่tฃ -ษ๖d/<9@ฑ Xn‘ซ๑‰Iํ่่Š >Ÿฟz๕๊ ๔!-ษ๖d/ฒ/"Z@ฑ Xฌ_}3fh๛ืฟ^{ํตy๔ู‹์KJ€eล‚b=ŸPถตy\…„„ฤฤฤl฿พ}ืฎ];v์ˆ]ณfืˆ)”ƒG(ึsญXฆูน;Zํา๘ี+ฏผRYYiตZ=šfทkjj^}๕ีฅฑ,R๒ฒ€b=ืŠ๕aข|฿~WY๚ูฯ~&—ห'''4ะh4*Š_โฎ;’rHiAิ:ิ^/'๑ฃ"ึ‡๑Bฃโ$ฅ Fvญญง(–ท~#/ๅCฃื}Uyั๋7ฦษปE{LD Y/lะ>็฿า* %๕Œฏฐ-gwU9ผ๐‚ฮป!FeNฉ ฟคว๒8ๅ่„aผ๕aข:Sีบๅz}ท~sZƒnน%ฬ๋:š๊ ฿ษฅJ“Sณฤนๅe':ิณO{รgพn.”e%งพๆo3ใ'ีษฉ‡:ๆƒ]๎๛เแLi๒nศฯ๑ŸžFงถdhVฝษฒ๖ํั-Uใ…_,่ํ+z๛|฿5[šษฯ๚ฯฦB2ฦ?š‡bAฑV" ๋ึM Ÿฯiฑฆ๒๒๒๗ฟค฿=ำๆ_ฟ๛๎ป๊๊๊UซVq๛’rHi2ฒŒช’$J*aแฤฒุŸใฤง†ƒPฌแšDฒqZรไส)ึุ้/กv๘๑NฆตKท~Sพ๒Mษา[/‘H% ฅvYe|˜/‘dห>ผ๙#‹bW)Ÿœํ่|ฅ้๑Š๚ฉ*–=fํ-‰งฺ%SN/หfJs“e•G?นzcXงV฿๊พxR.•ŠJ.ŽุŸๆ“0ำV*M.h๊7ฮ/GฑๆuWTƒcิฦลซ•้R๙ฉซืnqจ6t\เYQ,A๋œ๊๚ผ๊๚…/gJ๋7dhึํŸบd bwรtฬ฿'Ukป๔ๅฬGร๗ XPฌ•dnมJคˆหยzแ…JKK‰;‘&0ุื‡YK\‹Xู‹หศ"ฅ‘2qบ-?’rชˆ„ๅำŸบูXHKื&๚{}Š5ซ”lโญ฿Zีk[1ลา~("G\ลZ่‘G๓–กX?t™O&„ี ฆT๙ฑ5๕'ซXž๗m\๋ร’j†–qŠฮษSณ*ป็Dวsญoำ?อ'alฑT\7pหๅDซ|1{น,UZึ ง€gUฑถๅ๖ลฝqศ๓–fรYŒฟr|ญ<ลrล‚bหฉ+^ฝๅย/|m0;ท@คˆ Cmธัl63Mx๙ๅ—๛฿8pเย… ,//'6u๎นฺฺฺ?แ/ฝ๔าร‡ื ็J ฅ‘2๖ลCSอว6X]tคนw2b]ฉโ“%พ7lศEnŠŸRฆ์๚ภCฑŒ]ว)๑ักaผะุ$IUง–๒}ว†ั6า={-XQ๎อHŠฑžอOQ4 ˜]:ธช ™ˆโ™(:L‹Sๅ‡ot–ž็M ผ๎่g เา2m๔™‰+j๏ฉุสฃBybj.ฺฮ‹ษN๕ซิเฝศ’ž ึ๚j๕๚„0ส๔ZฎœoXŸX?ไื‹จzn%๕Œ–ต๖“+ย'nŠKศ;=ภŒd๋.ฅ้ึ– ๊–เ—๕/_ฑผืะ๋g0๗~ส‹คU)ป™ƒากQ›JยฃOrฏ/W๗]์XOMžHอๆ5tM๚บวฬชี‚ชG?7gลบ4๏c[วi๒พ็žซ'ลฉ%gูA‰ถ‘Žฆ=ิCQNye๋-nณuวแาbqบT$-.ฌSp ;œื^>ZQฒs—49=7g_ำงZ‡ฎ,ŽwœจฮกIE™๔.3ฬ.๓๊ONฒƒw‘]NRปฬ๗ีq ีหญฺ_%ƒ(h๋ฎอฝืก๎nฺC”.็Vํ๑๖็bRU˜š[็Uฑผีxบห~A_ณnUฦXํ]๖Wu฿ิ_KGืfhBึ Ž›.™ฉๅƒญ฿p# ทดR_‡g2฿ืญหึฌสะฎ+หปlต๘(hผ>ฝฝ|t[ิ๖a(สfbAฑ\Šะฉ๙w0Š๕โ‹/~๙็Ll๊?๙›๒๕1™Lฬˆม~ฒoPŠe๋”m๖30p.–ก)๊˜Xฺ153น@๋7%Š ฒ”x~lดKฯฺุป?‘2™ุดข5๒$j• ฌวr฿๕aiสVฺaR๒๗๎?5Ez๙อโ(สrช*สค|*mFฺจc{l2บn‘Iู๒œ4jีฦh๑YฝŽฒ"'‰2ขฐDqIUล๙แฅ]=๏;๚Qฌฅeฺ๚๗ ่ 0146Q˜คh™r—ฺูmH3E’‚|แึxV{๑ฟึOซตŒ$&$FGฦ‹„ฒำม้ณt8๕ฒค๋ษ NึืQฃ่ถtN?!ล๒RC_Wœน‹คดพF $๙2b•ฑq๔y–6NT,?ลืIC"๘iŠขฒRน8žlšX?`๓v‘:|Hษ๋“=Wp๑ึั\irf๙ัOี฿ฺIฑŒ๕;S สฮ_ฝกี๕ซŽ็คgษ[วฉmฦ;๖Hณrw๔ ๋ิWU•9Rqลeช„ษหe™า็๚ดฦ1ญ$gึw|Kฅ>S’œY}๖kุคqไZGuA–ธถw† นv’ข๒“[#ใฦ1ํเว ’sš๚ฮฯj฿•Š_™ตอWฑ๚็&S^wy„จเขฑใpAฒดพ{ึkฤฏเจทิ/_๕DŸžnลzh6๒2ดฬrใ•ษ #‚&K฿„]}œพ[ปถสค&X๏_jา…ไM^0฿ทXฺ็็าๅšu๏O_ะฺิ†ล ็ ๋2t…ท๘T,๓์๖ทต[N˜/์๚‰ลZวึฝ๕M้( ŠๅโWไ?›น*วใฒญฌV๋K/ฝไวฏ~๙ห_r๑.๒‰ŠŠ JฑฆšSย่๘†๊แ๒‹สMฺ!k[2qฒ9…๖Iซ™ ‹ี$๑œ=ใษf๑&บ฿E๏8)‹ๆญ็ฅ5Nา๑„4ž๋ ท2*O&t+Z:ฦ/ก` *‘*3ฑfศฦŒJ sุ^๓5จฯฯŽฆป๐(ำvณ"žว”3ฐเeบ K๘โฑฑ8ป๎t ีjึ^ฏ๕ำjV{ศฏŠฏนC์พ9*ห}ทz†gฐมฦiฆ›ฒ[LOFฑ–ึะ฿'.ไข|ฤš่๚SŠe คX~Šค๏mžด…อFำ+ิิ|จš๕r9ฃy›๓• gZw๙pIฟSYv๔งื๓ห๘q‰T๔ๅว–ญ๕•MWษ–๊Sลษ™ว๛V3ำื\Y{ฑ๖กบฉ8yื!ฦฉ่๖^ญ”Ji+#GษJ.ue~RwC7C๊`ผX™ผซ)Hณฦ๊๑ปว@ม•P,RฑYื‹O /9W๊ๆฬิ=‡๋ sฉh•8งผฒyะH๑]OOทbMอ24‚vปพฝvฟ&d?ํT์hป2F๓nS?๗5๋Bธ‚ไฟฒมชๆ2ธฌs้okb”VŸŠEz4๓:w่{ƒที(ึsฏXA๚•‡bฝ๙ๆ›lทo๘ฦแื_™ƒ|rssƒT,๑c)–พ.‰ส`a%ม•.ีa Kkœb— ี&9{ฦ_(่0EZรฃaŒ0\ทƒ I(U๓่า‡ :๋ZjKฃถ.™ ัR่U๛๛}`๔กXv\†b ?ะsธJศภFไธ|ฦฑSขฎ๕ำjN`โ๗^๑~ษ”9nญsิ3ฅษ่eŠ฿;๐Dหฅ†~ฎ๘ฌ’–จYปีอv‚Q,?ล.ฐ๗m่V‘DQำpพG๋œ๙ร›b1ƒร๋[3๚[ฯ>P™)ฅ\k็Ž‘Eฟ๖ฒxตr—4งyหD๛คฎพไพ\ๅฒ|ๆำ}ฌค??$~#+็`๓วทFf\‚iบ‹…ปคโโใŸ\ฝ1>๏#k%๋sj7•*8z}ษYบ”)ออฌhn๋ปuchฐใL๕ฮTiๆ?oอ๛ซ'€ง[ฑ๎š(ล๚ฬnทZถgkถด.:WัKํ6Oล"Dnฯไาo)7ฒ.O๒&ซUหบPธ["หaคsฑrNบN13iค–ๆี}ฝ.r8๓yur๚กŽล‡F๕ี๎!—pขœ}ืฐ›b๙ฎไJ*–ฑ‡N*›w={Tึ™ฟzxzหxํ.Yศk\ RนXkฬบ|Ex_?q๙™Rฌ›เeŒf๖#Š๕ไbqZ๕Vหže๘•‡b …Bฎzฝwฟ๛ฟzๅ•Wn฿พอmฯM„bัห๎ำEิKขB้7%d”6๔ๅbั#ส˜มf็า่ฉษH ็‘SสTg้ อ6‡บุŒ]ไ ๔๋Œ6ว s๊U\ฮะiI|4ๅ{[ฅuCฬๅ^IR$ฏˆˆ&-jบษอคgนฃrผ<สํ๕Vคzช2Q$q‰MัE็าฬH_;๚S,2*)mเดLJฺ"ษ›๚3^พ`ญฏVRฌ%๏ลrิ๓TCˆ*œpE๓#\CU#‘ฉFZั๙แ.:ฉ่YQล๒wลษ*ี~)๓ย.AFJๅ’‹ลH๖FWuj้™ทฑณA๚)vzเtQ}‰7Rซ$Uือ>๎1vฤ์r‹ELž=Z-ฯก฿้”š%ฮ-/;ก๊็โZ๓บถฺ๒้Rัฎ‚œƒ๛ฎžห!๖ยฮc๙œ~—TชT$ฃงืใยPjUu ๕^ฌไ]…‡ฝฝ+5kgA๕ั›ฒe<[[™™™•*ญฺs}LM‡ุบฅ็f–xh~I.–ฟJฎ์@AใuU5พ/ฆ‡?ัฐ*๚ฌ'€งHฑธื[1๏งสlŸื[ฦ ช๛Œ์{ฑแUMึvˆะ]๓๖b-Yธฅ‰yท_hญ ษ92ญ2=ฬฐแ-ํ†Cfฝ๗‚๗;๏พชX๊ธป y‹+๒^ฌ ?Pฌบ ฮฒ–แWŠตfอๆmยƒแป๏พปy๓&ว๓๊W7nเ๛๗฿ฟ๚๊ซคXเ‰`3k๚ปพ่ื.ธซ3ณŸต…ฑ‘~ฃดsส%*๘Ts]ฑ~˜qญa๔ศXทภO(ึณ7ฃ 1ซˆฏ/รฏ<‹|rss‰/UWWฟ๑ฦ‹…ˆึะะะ_๒n>ŸOิ‹,wmlii)ท๋วฤ102)ฟh•l=เmซ‚`ํใa้ฆS์ข๓•SODฑฆป๋e9๙Q4ูž-ลbณฟยeชiด Š๕ฌผz8HๆฌDŠVฏ^อาฏ~๕ซมมAbY‰‰‰EEEfณyjjส๕ฤ?๙ฯ+ ศT PŽญSEช-S=ทŸฎAH]Qn9ป[Zฉณฑ>พfภ๖#7D{*-|#/TXX5qฟมžžึญ$ณ=Eาฦ8Y›qน…ฬ฿P5•‹ำฅษฉRQfqแมๆํio๘โxม1ฉpฉjฬ฿q.'5๗๐ี —wฎป|tuฎDาโยรชณ่โž)FงถdhVฝษฒ๖ํั-Uใ…_,่ํ+z๛|฿5[šษฯ๚ฯฦB2ฦ?š‡bAฑ›น+‘". ๋…^(--%๎Dšภ\`_f-q-bYd/.#‹”Fส๔{Pฃช$‰๊>ย"ย‰eฑ?ว‰O ฉXU‰์^ใŠบฌ+ฃX๗๕-Šlฑ$ปฆ—)ะฺฅˆ[ฟ)_ษ๖q=ึŠี[Gบเ’6๋ส)ึpอ6zqณแ๑ช=–Wิ๛ˆ;NuVdHล5ชฉๅ”i้ญ—Hค…R๛ใ>DSJ 9a๑{๓ฦ๘ษ*–s4–Iฺตตดk9}›๚Lน(ตธ์ฬๅพ!Zซ้ฟtฑบ +9๓ะงฦง๛$\;น35ทLyK=>c_†bอ๊บU—๛ตฦJYVๆม‹ื5็สdR๑ม^#zl€gMฑญsช๋๓ช๋sพœ)=ฎ฿กYท๊’%ˆ ำ1ŸT๖ฑ{—พœ๙h๘ Š๕œบrแี[.๘ยืณs DŠธ0ิฦอf3ำ„—_~๙๗ฟ.\ธp๐เม๒๒rbS็ฮซญญร๐าK/=|๘qญ๐๐pฎR)ำowœ๎cmŒH(Q1ฺ3uณฑ–ฎMi บ หvs/ีl#สม‹,์ฑฌŒbนณะ#ๆน(ึ_ล26ค†$ีyธbŠลWฑฬ-”๋๖ฎเฉxe>†j“ศ% Ÿ~LMษ*–วsdฎIคš™า๔่,๛ญฃ9าวnอป.œ์({งp๗ฬS}๚๊Eฉ•Œ๋.3Zๅ•,ฝฌ^dQ๗ถuk X€gNฑถๅ66ส8dŒyKณแ, Yพ_+Fฑœ@ฑ XเW„NอฟƒQฌ_|๑๓ฯ?gbS๙ฯธุ”ฏษdbF ๖๗๗“}ƒS,cƒ˜ […ฆธ[mธฑ ป่Hs๏dŠ5Pล'bฐ9ฟEUJูZดBๅ6Vะu$;!:"”-ิ(ซหtณ!Oน)"ts|J™ฒ๋U๙แl”Œ"<ฏำห@มษบย49PXDhtbJACื$[eีฦ„ฺ›gK…[ฃCy”า–;\”ร{L‘Jต".AVฏ BฑhqŠRจผ๕นตช*qบ๎-j๛ 0~eณ@ฑ Xฎ~E~๐ณ™ซb๑x<.สjตพ๔าK~๊—ฟ%๏"Ÿจจจ หึ)(ฯ*b1nžฃš^`ถŒ“แฃฅmJฃ{uั  น,‰Gส*–Y™CพึoJ(d)๑ุh๏ŠuGY‘“D๏˜(.ฉช8?์ฉXSrkŒ—ส ๒S๔pG‚๓ฦ˜Rค0I H“ๆ )แกru˜ะ1ี{ŽJ’ํฏ’%Eปฎ๒งXw๊Hw|‡ทhษP}:DdRถผ@*ุษs๖Œง”‘๒โลe5{eTดษ"ey }6ถฆ•U5X‰่ึฅP ORT(M‰eาf˜นพALWukšฌ ›iฟ€\‚›eูt^\D‚ฌj๏‘ฮ%ี๓ฑฃล๒Vฆ*=ฅ‘›ใ’D{ฟ๐๎สฦงไ($Iq|ซฐž๎ว๛_kํŸH›Vt FN_Aต๕๏ะี&†ฦ& “-cต ิE sชsCฃโโาšc5ฒD๚ฦˆฏ๊]xRŠต๔ฬุuอbZrช*สค|r3l–6ุ๊[EH฿*แยly}ซlr*หืDฏขCำัIEี^EvB”ใฮY๚qฑSžดq๊‘ฑบ็&ง์9uน_7hŠตจk,ฮ7}zMงV_m—›,;=ใ{๙}S%ข๔’รŸkF&7บ›ไาฌœ3JEfz+ษฯG/฿ะวฦ5gชล้%Z฿หm3—๊Eฉๅgต๓3ไ฿๖c*ึ็ีขิ‚ฬŠฆ๎qข‚ถ‘O‰Js0syOjVๅ็šถฃ•™™Yขฬโ=G;ิฬ๗ฒพ๊‰ฮเูP,๒‡ิศหะ2หW&7dŒš,}v๕ms๚nํฺ*“šlcฝฉI’7yม|฿b%ๆาๅšu๏O_ะฺิ†ล ็ ๋2t…ท๘T,๓์๖ทต[N˜/์๚‰ลZวึฝ๕M้(๋นWฌ สCฑ|๓Mn6‹ํท|ใ๐๋ฏฟฮฬŠA>นนนA)ึTsJX 1l‹ํ๕FKจฎ?ๆA๚—.น(‘–Bf‰ตทŒ๎52ฝุษๆฺ=ุ˜ƒmธ&‰็sบ‹+UฎœึฒQAi/“Rb๊”ำuN82ฬ๕ณืGeทะ=o‹JAu@IGœŠV•eTNWQซžต#F3ดหrž ผD–๔{หัขใQj๚ป์IฎR`&ษ06ฆEPซฺ้VIrํR[˜˜รๆlF$,W่h!๊ฑtัMเั#9้^2UmmGD›7๑| ๊๓ทฃŸ้.–”ษžาMi Zoำ]Lžfฎฌ๘ฌ‘ฝฒB—+`mณx“KR฿lง,šชpใ$wัษIจ๐:ซ sฦถ:cwl=clฎำฦq๒ฎ'ฆXKฮ ๓D่ŽำL˜HK‡j๙๔ร*๑ULsoŠๅ๏FขณฑXฎœฎจmhd&ฐ๑xŽ(…Vั7~๏•eL˜กi;Lอ‘ภ)<ุt๖’† .๙Qฌ๙ฏ‹S‹Oจซฦ/=xผmุ็rโ!eปค9M:๎ะ#อ%ษ™วปษ]ํ9ส‘ฎqslฬ iฦf}/'?๗ะ\ล:”Fมแซ6gำฅ…ส%*๕ๅฉY;sJสZฏะ๊n\j.ฬ”Š\ฆฦี๘ฉ'<Š55#ศะฺํ๖๛๖ฺšดS1€†๎nศอปMืฌ แ ฺ๏๋ V5—มeK[ฃด๚T,๊ะฃ™ืนC฿ผฝจถ@ฑžoล ฏ<๋๗฿g*o2™^~๙ๅ€Šตfอnb๗บบบ K˜ŠีK๗ุ6g+ML}ฌเฌRBuฆ#˜žฝ=ฝำ‹ํbU‡๛๚œIคytลขำขจ๑`œ๐X™`BhZ๓4ส)Y๑#]|ช'X1ไัา‡–I๚„Uื(ึภx๊่gอ^ฦ^าw>g_l–>ษ&ฅ˜‰hต๊ cFBWีไศ’žฅŠ5ฐŸ๎a kz้- ฺfษ&6Dรฮ/’ไmp—_ล๒ทใฃ+szฝ(–+๋ํŠpF€๏ะญฎมcงq(–๐ฝื{ีะ”ๆั:ถž9* 7’Žv ้Ÿจbนœ}ล_ัฉฅฏฃถn>ฅส^nIQ,ฟ7’๖TZ(=cM‚Dฑ๗ƒfี€ั9šbฉbู๚๗neาๅ•๛Vืืก:QwHž“Eนึ;ว;&(%Hฎซ\ลษr๛๕ฆฤjฎy,กelQsข@*’Unพงvb็k๙Š+Vje›ั9xฒZ*•7{Eบ๓่ W ฃชšŠค้ึž ลบkข๋3ปjูžญูาบ่\E/ด<‹ผ=“wHฟฅptCศบ|uฏb๘gqฒ๔xŸหœ^—๛Ž‹฿ ๆ…wใ๊ฏ™ฑvšOOีๆๆามด๒jฅ†อ†๒ต|…ซ๚ำo)ึคชะ5-$ซบฯๆฏž๐L(ึ๕ป2Fา๛ุ-ๆื34ซ24!.ฌzSร;ณเฉX฿Lวผฅแ1ฉFmzำ=”%=ฯฏb‘ๅๆ…ฃgฦ…”Œ…ไ้ำŒPฌ็Gฑ๘เฆ๒ณณณฟํo*ึo~๓›้้if—SงN7…นEBOw‘ิ0ds›๎ขFอ—Ry๙~หึ_ดี[๗‘ฮคท8ฃXัฮ(V๗า(–3 „ํS>rK๏3Š%QN๛W,vXZดฐLฉ๊ํ๏ขF„b1]แMู-ณ>ftญฬฌKhย|QฉTNŒ^ขXLฤ)ฑดลuKUฯ‰ p9ร5–)ฃaา=ก%Vตถศ\ใพนKแrsฎ˜b9rฑ$Ž`”ถงๅผJuEo๑rซธๅb>ค1*Ÿ…k8ไdhmจ(ซWŽนๆeŽโ๑=F.ึLว!Qjมแฏeภฎk,Šป$C_xmqpโพ๑ซ๑uฃ?[ิOู.]พ+xr๛nอฺQ›xWฌk“คุํš๋›ฐ๋'ฌชฯ&xฃ™ˆb=Oำ]p–ต ฟ๒Pฌ5kึ0o6 ฿}๗อ›7y<žWฟฺธqใภภืุ๏ฟีW_ Zฑ่พf๗้ข ?*‚zQT|BFiCฏ1@.;E{Dส)ฃ็เรŒfw:-ฤจฺ/ๅoŽXฟ).!ฃ^uลั fพbŸ๊ฉษHค^์•˜Rฆ8K็แ0_ท{๖งช25๛๙ฆhขำโๅฝX=uิ{ฑ(QŒM—4˜|๔zrฑฌCgยXj/าอmธb6จJ›ฉฟผ์Kฑ˜m^ุ๒9ิZJู /"R˜_ืรๆบดฑUีถืH„๑ฬ;‹;uฉ6๕ืˆใ™K c"ำืJ%,‰ศคล…ต๛ุI&l๊ฯ›๖ผ[ Jฅ฿+UP}๔sผฟๅ๎ŠE:}อ{ก_ท๕NๅังฒDz็W\ฑ่๗_US๕e–์๙g๏ศข๚ภำจX๋ญ˜๗Seถฯ๋ญnใี}F๖ฝXo๐ช&ko;D่ฎy{ฑ–,าDMุ/4mศึ„dฦ™V™ ~fุ๐–vร!ณ๛@ม๛ƒw_U,u†ผฮEผ๋น›Q˜Uฤื—แWŠE>นนนฤ—ชซซ฿xใ ‹ลBDkhh่/๙ ทŸฯ'๊E–ป6ถดด” Hล`Eฑ†n๖vuŒ9Rใ่i6ย๓˜œ(k &‰‹ฌ๚{ณูSˆ๛t?ศ™ัน)MFทž X?‘Wษ‚•Hั๊ีซA๚ีฏ~588H,+11ฑจจศl6OMMนพƒ๘็?๙ƒ\[ชVซ๋_3kI9ค4R&$๐รbl”0#๔าdeUE9IแิPดฤ 6ฝสฺวcJ)‰โญ‹/๊ต>ลSUไๅหrผ‘็{F‹อฺZฺ…1€bAฑ~ชŠeป๗€HQL Ÿ“จ?๑&“้๎ป๚ำŸึฎ]ป4#๋oฟๅš977๗็?ูe >)”‰ ะ˜n6*ค ฑLฮUœ@ฌจ๋6ป๖๑ะžขr C…๕^sฦลr2SDฅุลษฺยล‚bt‹๐aขl฿>W‰ŠŠŠy๐เมภภภตkื|)ึุุXllฌ๋*R) OPฌ็WฑLณsท๎hxฏฝๆ๑Zแ“'O’†<|๘ะCฑL&Yผz๕๊^x[NJ ๅา๐ลz~‹06>q๎ผ’หศb>DŸ"""าาา<๋o๛[TTิ‹/พ่บ์KJ ๅเŠ๕ผ+–ํƒQฝ8’G,+ศู‹์KJ@P,(kYcใท๎hส๖ํ‹‰แ{Dดผ~ศ6dKฒ=ู‹์ ฟŠล๒ฬห๚ฦ@‰V๐ํ‘ Šๅ/ข5ท`[๐ู‘+ XP, Š€bAฑP,( ล‚b XP, Šล@ฑ X(ึ3คXƒjเ9Š…(Q,(Šล@ฑ XP, Š€bAฑP,(Šล‚b XP, Š๕taป๗`nม:;ทเฒ ูฯ P,(–wLณs†‰Iอศ่ˆN7ฒ ู’lO๖ย“ ŠๅนŸ˜ิŽŽ๎ฏจเ๓๙ซWฏŸ@ฒ ู’lO๖"๛"ข Šล๚ี7c†ถ๋ตื^๛ŸGฝศพคXP,(ึรฑ๑ e[›ืศUHHHLLฬ๖ํwํฺตcวŽุุุ5kึxh‘H9x„€b=ืŠešปฃี._ฝ๒ส+•••VซีฃivปฝฆฆๆีW_]ห"ๅ / (ึsญX฿&ส๗ํw•ฅŸ์grน|rrาOFฃBก๘ล/~แบ#)‡”ฤAญCํ๕rq?*b}/4*>ARฺะmdืฺzŠby๋7๒R>4z๎hRŠydU„ฌmyWaู~Žำ†Ÿ3ฉmHใญO ิ4[ง<Šœ™h™ ล~T~ศ aปYO=โV๋rvื5ษญ&ชำุ'ํz}yฎ7ง5<^MTัไ๏@xA็ำีบ•ฤจฬกึ๑Kz,ห-ฤxMuธขdง4+๙ ฉhWAN้๑ณ}ฦงพแถญี™ปคษป๊;ๆ}ofฟu4Gšำค vyๆo|rrฯ;นขTฉHZ ?ะ=nรŸ8~PFงถdhVฝษฒ๖ํั-Uใ…_,่ํ+z๛|฿5[šษฯ๚ฯฦB2ฦ?š‡bAฑV" ๋ึM Ÿฯiฑฆ๒๒๒๗ฟค฿=ำๆ_ฟ๛๎ป๊๊๊UซVq๛’rHi2ฒŒช’คะ”Dญ‹'–ล'>5ใ*ึt[vxX\Q๏St,m๙ไ\E*zVฎgoํRฤญ฿”ฏด=^ฦNง๐x ตร?ไูx /ะ๊บ๚EถX’]ำk]ฮš๊ฌศŠ3jTS?๎Iำ7์ "Ž<๎ฝ๔SU,ท^ืœฒ‰ด+ฉโ๚rŠšนz235+ง๖bวUZซปq๕rใ{ลขิ‚ร_ฯ?ี'aพทr—4๓๐ๅZใฬGWฌ๛37.utจญ#สJrfสฮ๔๖ i๚ป/VๆJ“฿i๊_Dฏ€Zฑญsช๋๓ช๋sพœ)=ฎ฿กYท๊’%ˆ ำ1ŸT๖ฑ{—พœ๙h๘ Šต’ฬ-X‰qYX/ผ๐Bii)q'าๆ๛๚0k‰kห"{qYค4Rฆ฿๎B~$ๅT %ส!ำีปูXHKื&๚k์Mฑฬ-คซ๗t๕เชฉ0E“yลz๖ =๒hใ+–๖Cนd?ฌb=่ฉ 'hY_"t—๒ษร•ฏ4=„bq[ปJโฉfๆจฆ=ิ]›•\pNํฺแฐ๋ฮ–žœšOยท{Rฅe‚HหŒVye์๎‡ฏrงeพ๏ธ8ต๘ฤzฝะŠต+ท/๎Cฦ˜ท4ฯb ๘ๅห๑ต๒`ห  Š,งฎ\xu๗– 7พ๐ตม์‘". ตqใFณูฬ4แๅ—_๏เภ .d\+<<œ+”Fส๔ยjSaซะ”ญ[วtธฑ ป่Hs๏ไc+ึzjdOฺะ\ด#>œว N”้็บ#ำWNหล‰‘›ฺ่V‘ไ€Jป@Jn‡๑ุxAPีk{จฬกชšPฃ,I ‹ดัO์ๅŒคศอ๋yัEร€S~ _ิห’จ#าซ๒๋zูU†ฆ4ชL๑้กฎzq|thX?ฅT9๖ะะ^•ฒ5b}X4_\ำๅ=’`n” ๗x๋Cซชจาx‘‰ูuJJœ\z๖๋ฉส฿่lfx^'sQบŽ)RจŠ๑Bc“$UZง}™{O•ฆl‹ ฅวs ๓บ&ฉฐCะๅ\‘^ฌึC3T๊29]H{LDm)ค/๚O๊-๓BXบิ ผดฦI็ฝ-ูDํ(W-†ย๋U๐3Pะหโฺุ{6_ฐ™Ž‹zJˆฑซ6;!:"”-HซRœNกึฆ5Lณ–œ๖†nŽๆิw9–{ฝFฎ1UU๕๐Fv2#฿ฆ™z&ีwตU‰I“yแ๑า •‘๋ฑƒทีjT5คฆๅ+–๗šn6*คา0ชEญรภ<‹VY$&-ษN๕ทะ,กใฬฅิeu~aฅํˆว/๋๗๗T2cง[ซฤ‰qฬอ™(หัmoจขZฝ)ปๅ‘ร๓๏-Q,W†›sR Ž:ใcฦKคขฺซฯนuถถ!Iด๗ j”Ž˜ŽQ$ไTU”I๙T‰ด‘๎รYฎ0ฝฝธ”ยชฝฒD*(GVัฝฦณR๊ืญI ๑"Y^ต้๎ "IžT@uฌIำ[r…ญŸ:ค/5ปdีพBvLส–H[ใ"y.={_๕ผฃฌศIข,+,Q\RUqžtUญฝ๛ฺ้ฦฆจ‘'QM”1•ฑิ2ัลDq^พ8‘Zบญชwึ๕aiสVบ)๙{๗Ÿ๖ิŠ๕Sบ@หพ ฬM!>kv#uˆVจ<%ฤวU๐ฃX^.i#GOJˆŠๆ Eโ#7=$D{*พK *_&IŠฤSถ@์e*๐ฺ้vŸาๆxqYอ^UPaญ^ฎ‘๛พW@5Aาฦ6aบ•พ›ฃ#ทe๏ ก"#ž๚•'ชป๓คหK mรu)”…')*”ฆPW*NึF_ฉ…ฝ๑ดํฤŠ$๙)ไฝฆSซฏ6๎หM–๏žก"c'Š“3+ป5jญฆใŸๅ"*ิcฃฒงN•ˆาKฎ™4่n’Kณrฮh่ ฑญ4Kดปน{x|lrฦฅ&๙ฎฌ=Ÿ}.ทfฦU{Rฅ{T33ณ6ป๑kœ(Vึฮw5^›!•™QŸ+Lw+อkฏhl่๒แwณฤ๛:F์~๊1?Œb‘?VF^†–Ynผ2น!cDะd้›ฐซo›ำwkืV™ิd๋KMบผษ ๆ๛๋C๛\บ\ณ๎้ Z›ฺฐxแœa]†ฎ๐๖ŸŠežถvห ๓%ƒ]?ฑ๘Qุ๋บทพ)…bAฑ\Šเg3Wลโ๑x\ถ•ีj}้ฅ—๘ี/๙K.E>QQQA)ึTsJX aT+ฃX)๊]ฟ&_Ÿrš๊฿ทSฉM๋นฺdg๚บึ›ำŒmrฆวF _tDizหจ!:ก;N3ปk?ั@๕Ÿ†ฮ*$ฉd็ดณ lฺย๔ร๗^ฑ:ฦ‘ต๑EิฏCตIฬ๗ำ6o้4ค-‰๕CKฮR/=Xhถf/ถ‹ๆ8ฑ~๊iฟRลwํN6‹™VwัตูNYด#บ2ฅ”lฆๅ‡้\ฮ๖‘^{q!3^Ks๏์>ฒb$.ะ๒/G:๑2fธ—U™Cw๔KDฮฯU๐7…็bธ1Nr่mบ‹แšDZ :ง]Fฃ9์%ะZ:h&8p“้—7ฆEPg ๊๕yA ข4aI5wบี3,‘อ5bศT|RŠตค†&ุป™ Yศ#ใx -ํ๔*ž#r;ฦ<>A)–Ÿ›ณท$ฮัFๆˆง+j™€–๖๔X๚<,'{mฆฟ๙5oฤาไ๔‚œ}๕'”WGf+ึื๔09ตcี๘ๅฃท ?ดฯ\.%-Tr‘+]แ๚ฃใ๖™^ฒีvFšK’3wฯSยsX&อ9ใ\eิROฺ|.็ ^ข^ลr†nใ{ฝพฦIŽ/งhY™ตlฐหO=?ŒbMอ24‚vปพฝvฟ&d?ํTฬ฿ฯกป2F๓nS?๗5๋Bธ‚๖๛zƒUอepY็า฿ึฤ(ญ>‹:๔hๆu๎ะ๗o/ช-Pฌ็^ฑ‚๔+ลz๓อ7นู,ถo฿๐รฏฟ:3+๙ไๆๆฉXโFฑ\:mlŒ"Jก"ž๋5ฬWยTฬกชกฅkxฺๆa๓ึƒMkžๆฦ_%ั)Eงvฬh3j้แ^ D \`;ยczgฏ1ถด‹>–ๅ|6=}พŠ94ำŸ‹Vt-ํมกง‡๓Y–Žทค๛šฌ5Q_ฎซ่ส3'ึo==‹>5โ๋ตฅalธn‡#ชภfc๛ฏ{๐ฌX? ๔8‚้ŽGๅSaซ…NY-uKำฬ\…e(sธฅŠ5ซ”ฐO–ฃ(ฆzŒฝ๘_ห>•ัฒV=} ปช’ธฑsKฎ‘;ŽQฃ\๋ธkัksฟสๅ๔“T,ื์ง[XำK7ว mฆฦpาฃ"ะซœwk;A(–ฟ› †ล%H{?hV -๗}๖ฮศwื=*‹3๊พหgy|ฯป"โา๒ฃ_ฯ๘W,Jา๋ป—N๖pฝi'ูๅšืๅน‡ฏy,a$mพปฎ 9ฝxฯ‰‹Ÿ^ีe๚ZพโŠ•Uูอัง๏I“๗u๘L๊๘vฦะญ๎ฯฯ•ฝ“%.นxco=?Œb5QŠ๕™nตlฯึli]tฎข—ฺmžŠuก๑๖L!–ยั y#๋๒ด!oฒZๅ]ฑฌ …ป5!๒ฑฬsๆ รvหJ4Š๕ฬ+V๐~ๅกX๏ฟ>Sy“ษ๔๒ห/Tฌ5kึpปืีี9Pž €ํฺฺ=:พหัีใ†น}-ฆ;Ilื*ญaฬฑAcๆe+่œ‡ณ at„3ฝ!JTิn๔ำƒg{lLv›K^„ณฺ tช I"=Hฬ‰[>ž tฐG‡žํC3…ธทw่H’๗sๅˆ*8๛šฮฑC๊้ฎXฌN,”]ๅ}bŒวWฌg๒YZง+”R๚วธถ~*ต1Neต|A‚ษด๑ศึ๕s–กXฎฑ8W ™dยหqEH{๑ฟ–‰ต.i&‘"หาkไูงหแeท,ธ+–#*H}Rย}๒หต†์W6„%V 8‚–ฮ™uฌJYDPŠเfะ+หา๘›\๒$ทๅ7๑ฉX]…ัฬเี๘—ฏป\Ž49ท้†Ÿb๘gqฒ๔x฿าฑ…}วEฎั-—ๅTUช;oไVํHำบุTVL ^zAแฑหŽ‘ๅ+ฎX=A+ว๘ลยิฌ=Ÿฬ๘ซ'เ‡Qฌ๋w7dŒค๗?ฐ[ฬฏghVehB\X๕ฆ†wfมSฑพ™ŽyKร;bRฺ๔ฆ{๚)Kzž_ล"หอ GฯŒ ) ษำงท/กXฯƒbujํUขษฏ<ุ๋ฑcLๅ'&&ˆ>TฌNว์าิิtๆ =ERรอmบ‹a4_\Jๅ๛Q,=žgฯžํๆบwญนIูD7‡ฑฎwถ|X#฿ว|ตฏœ ข__—BsZฉ๊Tq|q“tต๋„Tป"ล5-]ฝWTE_ฑ˜›Kฟึ5x’ยsMฃทฯช$ฎมŸ๕๔ลโ‰*ฺ\ถTu๖jญŽU.“ั0iœž Vฑไ]Wdพ๛wSฌg๒9†๑˜‘†วนŽ‘cฒฦ"๛oz9™~ฎย2+พฦปbฑ_^D๛bE๛b‰จ\o!ี€ัPฑ˜(Vุ’(VดใไSC(#<ขXމ๎ีX9ลbGŠ&–ถธ6Gี3drฌบDฑv,‰b9ัum€›‡บU ตฅbAX3ฌtk[ฃq‰ Œ)+“Sซพ๕ฆXปขX^IEGฑ{bT_ัปฒไ่ณใช“๒]าฬท-๗ซX7Nฏผb-๛/]๎ำู\ !วอง&จ๚žไtช“ฃซฒวOšู˜ี–ฆนAƒอต้‡b ž๛&ไญ๑“HฟyzGŠล}ืywแไ™ฑu#๛๎Cฑžล๒PฉG๕+ล๚เƒ˜สฯฮฮ๖ทฟ จXฟ๙อoฆงง™]N:œb=ดtัs4oŒ*ฆ“ถ็ัำ๐’j†Fฑ๎›™”•๕Qา:f2.›ฑ๗H=K^œ\evvญจฮฅ#3Dํˆ9<4๔6ืTUีu™รบ6;"*์Tั๒/ฌพ๚^l6…ค™ํi{ZฮซTW๔–๗๐ฺ$=g้ม๐ŠeปI%ึ‡นL:็:6’ฮไqฆ1“%xค-ญ'งX?3๖ญั’พ๎ม็๘:[฿แไR•ัืrOลข|ษEuf>ญศzŠ5Hฺพณฮe.{๚์QYg~๊ ๘&mฟv—,ไ5.ะIฉ\ฌตGf]ภ๛๚‰{ฬฯ”bๅฑŠีwฦ๙3ฅR—วื๙(h™X๘่สขณX๛B^!;3!๋ง?PะCจ–แWŠตgฯ.kถm+&&†หลชฌฌ Rฑจ)์X)b^=ซ‡ฉ~Iณ8ฺ1):.1ด†Ÿง2ธv7ฦEnส๒ดx6หŸž-€`35sWEU•\LญฅBjฮษอxก๑R™ข™tnผ๔ู9ม"๘iŠข’|!=$,ๅรa.)?41oUฉ8>^˜Bwงถๅืด/ณOzŸ<๏ƒวœCคจ‰์๒้‰์โ้QFŽ,5_๕คN`C์ฤ฿‘/?ึc!ๅ2๚‹๙h‘ฌคJ.ก]WPJw.ญฝUŽน์ !ํ๑ฅ]๔*ๆ{t๊LTตh—˜pฝ1/ž1>!ษๅปŸฬzฬ แาข๕ฝOjย>/พฎ‚?ล๒ผ@‹ฬ้œ30-‘\p{๑ฟ–บja์<E๔”ŒผคŠk`ลZ2ฃ [ฯอq‘ฑคๆ Iขc. ๐”ฒีศP{•/Lโ‡9'6\)ลข‚๊I๔Œ‚ฒ‹สJ™:Dส”z>F๙Bทฆษ ฉ๙ฑ.Šล]ะXjญpkbฝฏ๛Œ‚Ko+k_ฉผฌฆย1‘ฆ`ฟลm3 ฮ๗Ÿ(ฅๆส๋.~๚๕ญjMืฝ-งfถ๊ๆiฏ8LผโเๅฑybDใ?{๏ีิ•ฏoปๆาำัฮ๊ิีyะ'ฌY2aญH(8ฉdม,\ด่?๓g ณpŒ”ŒP(ะ‚# ยW(ใดHญฃ‚ทXi๓ญ*ฅะQ(4r‹ †kL๖นไไBJ๋ๅYฏ’œณฯ๛์ƒ๛ลพœ ใdาศ๗M; พท52๕๐๙ซ*eวี›jฺQpชนt[๘ๆC—:•]—Žๅ‰7n;ิF๏(๘QVd\‡๕ํจFูXžน/Vc^ึ„โKอ=ZํV๙Ÿ 92iฑn‡Ÿ(%E[ร฿9Zฏ5’Ž‘Rq8-1q& •Uน‘S3+/SฏnR์฿ถ5ษmJqcำ6wUไszFซศ‰ค–$y ‚2่4Œฺบƒ)กA๔{ฑ่—)ฬฝาฑF๎•Y–odขาฏˆ ะฏฎ’ฯ~็pCQŒˆช|?‘8ฟถ‡ู‡mM5;๔‰นAz#,vV˜cDยม]pฆXถ7hNล"อ@ฑK๊๏ใํๆ[ขP0ซคค์ œKnMM{ื๘‚ ๕ลฆวyล"*H/|๒Me๗t1ๅ3_^SฤผฃฬwM|Q=๗หA+฿%๑๗แSู•5๖0ƒ`์บพ‡ฆXTn.K“ H แ{ู%๏เV‹ตTศB้†™\ู\.ณ~๓๙–kfgป๊hwขBๆผ1ี๒‚xๆคพ –ศิ๖ํ7๛x/eส‹UyY6ั› FJSฒ—ืpc5ฺฆชฬw่7Yฝ“w่’๊ำ‚ญ‘ฆ๖ดญ'๖ๆR'nLŒห>๚i‘›SWŒ}/–8u฿‡ ๆ๗b5*Iฃ‹E.ด-m๏…&-{•ัล์mb& น9ว™A!GŸ[+ึฝiuใฌT*ูธฌฬcWฏษ๓"ใŽ.ยZฌัk๒รtmHร7ง&์ฎไŠ์0Ÿ€EP,๎๕Vฬ๛ฉโjฆิซyƒส&-๛^ฌ-ฝกฝ฿šD่ๆXิถ๒แk•Sำ3ำ็*5หใป—ฤ๗ญ:0ขะmฌ๙–žๅ๛ฦิ๖' ฮดึ\๛O*Y๊บ๛“joใฝXOืvœe-ภฏl๋…^`&฿฿w฿ตทท๓๙|ป~ๅ๎๎าาย๖๛๏ฟ๙ๅ—็ฅX‹ˆห]+€ฬJฆioฌฏUด™ำลl์ศข:๖ฎHฯ๖‹—๋Lh|คa็.šใ-™้‘ž Šด[x|€b=~; ณ๒.XปฟฒQ,‰‰‰ฤ— 7lุ011ADซฃฃc๕๊ีDฝศ็–…อฮฮๆ€b ว‘Žƒ๔T@ฏ`q๕๖ jwhi—+฿>๊ฒ๕ิซu้9AฑบชณRd ๖)๚|์1S,f๕—GXQ-@ฑ X*“z‘ขฅK—2‚๔ซ_ชตต•X–H$JOOถ|๑ฯ๓ปw๏Z–TฉT๚ืฟfพ%้ิHš่ม ว m51€šซฦ๘Šlๆ4:๖มh+ แ๓|จWลr~ไ ๔{ุ&ะbŠลzd1นKคhี*Nข๘ว?๊tบ›7oฎ\นrูฒeณWdบu‹+ๆไไไŸ๔'‹ 0Ij$Mูyฝ›ฟb…อk…=J r๏=ลา้tไ๓ชชชฅK—>๓ฬ3็$’I OPฌงWฑšม3gๅŠ,&ˆ>y{{K$ล๚๛฿๎็็๗์ณฯZ~Hฮ%)t๐ลzฺหx็nŸบŸ8’อX–‹Aฮ"็’ฐ (‹ต,อภ`็๕๎œ;Wญ๒ทัฒไr$9žœEฮ…_ล‚bูฎหบัO‰–๋ใฑ (หูˆึค0>ฉw9#W@ฑ X( ล‚b XP,(Šล@ฑ X( Š€bAฑPฌวHฑZ•*ภS ฃXŒbAฑP,(Šล‚b XP, Š€bAฑP,( ล‚b=Œw๎N๊ ใ“z็cศ‘xf€bAฑ์ฃŸผั?ุyฝuศ๑ไ,<9@ฑ XV#WšJฎrv๎\ตส้าฅ3Wcศ‘ไxr9#Z@ฑ Xฌ_๕ฉ๛ฯœ•๓Wฌ๘Ÿ๙9‹œKR€eล‚bำ Gฒ;rตdษ’UซVEEEmผy๚๕Bก๐…^ฐ;ขER ้เŠ๕T+–n|ฒ๓z๗์๑ซ—^z)//ฯ`0ุmzzบจจ่ๅ—_ž=–Eามบ, XOตb่ฬูนำR–~๖ณŸ%'' 9) Vซอศศ๘ล/~ay"I‡ค†ง(ึSชXฦ;w;ฏwฏZๅฯiฑฆ๛๗๏“R|๗Lq˜ุ๘w฿>๗sน$’š๓Yr™ท›;฿Œ‡ทง0LœUีขฃ06ค ๙Vธ๓y>!’์๒+cNไษไ๓,ป"U@N๔LญฑNดƒฑyG฿_=nนช,ิƒTNdฑ๊'*ลxC:ษ˜{€์ุƒpโ””GnbpQ‹‘ถ}w0Iำ[|ส€_ต?*m%!|พ›คLตะฆT—*KาIŒ( ฿ธUœ˜›๓แ%ๅ๘ฃ^๐ัTฅษถ†oL“รŒ๕{ท†g)ดณพา~Rพq฿ฅฉy^wเj๙๛น›คtEฝ“ท“๎Qด@X}รฏลv?๗หฒท๛^หH๛\ฏž~จW™žJzท๛ตชไ฿๊ฯ4KbNNAฑ Xฬค@คˆ[…๕ฬ3ฯdggw"E`nฐฃ`พ%ฎE,‹œลญศ"ฉ‘4็V,พภW@แว/ฌ„๎…ณŠEดŠ=@(เ1ฎลbxาkจ*‚จ”จคcๆัRฌ–‚0๊[IUƒ๐IUฌ‘๓๑ž้ฎUด†๏&~ะšœ7šŠ>?doฃก1+˜บก2๙ศBRำžฯN —ๅ๚ไ๊ต.•RูYแhฒT™uกw๚QพMฃ็ณฅแฉ•๕]ฺฉ…(ึ”๊๊yEซf^eoŸ v๔|S็ตŽึO?ฬoุšฉะขซ Sฌ S“Šถ)Eไน/Gณซ—วvฟฒk๘‹ N๏Y๕๎b๎฿แwพ๘r๔dื(kปr๎ๅํฏป๖นฃฦ'๕DŠธa(ww๗ฑฑ1ฆ/พ๘โ๏๛‚‚‚s็ฮฝ๛นนนฤฆฮœ9ณw๏?แฯ?ฝ{๗ื๒๔๔ไR ฉ‘4็T, #2๔œM๑ง$J ซ1pŠqฤ)™Pษeิ‡ž Љ'Lฑ๊2|IRIณส๕ำ*–N!๓#œ^ .๔„*ึXu,iuฎ)V[QวO X=G"Iอ›‹5.7ฐขŽ๙งฆ:“ผqk^”ีQแ„๗Jฮซๅ4pb›T\:ืak!ƒ6ด9๗„ฺ*‘ป/c ฆXQ_YอาvhWm้^มฤœฟด'พX–์Šb™bAฑๆแW„ฺ๎ฏ]QฌgŸ}๖โล‹ฬุิ๛_nlสQ่t:fฦ`ss39wกŠeš,็ฮ"A{ŠEhฬกogดgnลk,อˆ๐H7ื' DVขะุ8†ขฅ2#4Pภใ #2ชฏ›{=ŠYXฐ'Ÿฯ๓ M(ฉr 'บ๖๒ iภ›็แํ,M?ีลๆไzIนจWJ๕• q ทฬ๗Œ$I…–ชง็Pฌล/…ๅฅqแKหซาืำI D1š™๑Žuk8…0ึ&๛ัยฌXธb๕WJ่4+Oค๙๐}3˜ฑ”:S%๐„a1๙ต=Fn่ ฝ,5า฿ห›*c†ผฑ’พ(Sแ๕ูDbษ]ณh;โ๛็4ฯq๛H=\ฉH‹|ฝ่้ฌ‘1Š=9พJ์a1‘5(ฟั่ฐ lป5!>Aอบœ่Q์ˆ ๓๕๑ฆ’ –คWถ;k0บ๖ฒค0_SัZNล๓ฌปuข.ตœ‹YC5ก๒ชเA๙ํ๓eาUG๋‹)GŠrฉ@พ๓’๙ปซGลณNฐMืุ{ฉ23•ša™›wช“;lTyi๖6qด4Rบ-ญXqอ4ํpช็๒กY›6KรฃvV~ฺcd“ฝ=p้รยjโŸ42Ž>e”9eJ๙ษQvใfrสQ๊”ฉฦ๒ใ†ฤย8ษค งป%Hำฮv6฿G2นq๋ฆํG?U]<%_Fฑ์ๅ](˜bMฯmชR=ซู{“Qู4ผ.ปoYl๗’ทUA‡u_ŒQŸทžบมอ0|๎uhปFใ๖จ^‰๏~.ถ็•mšคห† งตm#Qน}ฏlกŽ_ฯดฏŒP,(–ฅ_‘89ฬRฑ๘|>ทฺส`0<๓N๊—ฟ%7Eยฯฯ๏‹ค qฌXuiTg1ขฌgžŠีQJ-฿อ/Lถ+_FI/”ํžฒŽ,๒’ฤ$ล‡่hp~ฃž๎^ืd๘S^,ฮ)ฺ! ใั'2สa%'ฦฎโ*žaป ฒ#„๋—zM…ˆพม‘กฒŠY™7ศHRษu๗œ+ึข—ย:WŠ$๊+“ุ˜2ใ.๐ “”็H|้cNiIฑFNHi{ ๑๘‡FŠดSSv‰จค„’๔‚ขdบ‚r่›>&Oข›‡WpDBFLXฐฟ1ดคgNลrr๛4UbRuIู;r2ฤkจ+๚g5L่หsโฉ)w๏YŽตN†งˆ฿&Gะ™ ”ค็ไ—ทฆuตษิUผ%ูปณค|๚Š5Žฬ˜<มฒhAEsX'cuGฒ#˜฿ˆ”ป*๊†iq=B‹k˜‡hnwJ”†วๅ๚คUyห8/ลาึ—lฺ˜šs๖๊ตUณโpB๔ึไS๔‚ฅK™าญ ๛/5uฉ”Wy R๑๎หT C—sโค›vŸi๊ัjTง฿O +นt‹JJy<+<ฎ๐ฤTš!m๏7— SทŠ๗6u™๚ๆ(นD'ฝZMO๋้‚ิ๐„สๆ๗ฆฦปห฿“Š๗_7NM?จb}˜(Eผะ=:}o๚V๋กTiคejv*อ8uk ้Tแฆ่m‡ฎRv๊(Ÿ่Eภ|๋t—–ร|ฎฝ2ด<ถ7จrขipZ๙ํX๔๖že๙:%9ฦ0๓EฅjIาะนฑ™ รฝ้ฉษ่ไ๎W๖Œœ๋1*๛oŸ;ำJฌ*ํปkl<๊ํžื>๛ขZ=x๛ไ)อ+[nd๗Aฑžzลrัฏl๋ญทโvณˆŠŠš๓รkืฎevล ‘˜˜ธ๐‰‚งโฉNฐ{5-อŽb๚๋K"|่aฎ‚ฎyฎลาสsโล1๑้ง่aขz”€ฃ>๋n~๑ีCบˆ๐|n ึฯ„2Wdไฏ-—xณSญๅdB‘โI๑‰ฏฆ{ฑW๒นตUฌ™๐3์/€!สฑ†ไGR>ไ\ฑฝถk‡่ณ"*วฌห;โˆšuฐTฦx)i\ ลbฯrˆ9ซๅญ‰ฝ่FRGŸ;^+#2ษงซnจŠš็.Ÿะ2ึฤไ฿-ดlNลrv๛jRx–V?T[\PR|ช}„.ฃŒษŒ ;„YีOK™,V#+ฉำำŽ-ฃ๏BRํด3TแฌhN๊dfฌ\b๚ณ—ฆ*|Rไ๚๙ว บผ?+•ฟ“—s่ฬงmฺฉนหr ‡:ฒ๙TI^ๅUrค๒ุถ๐ธรMฆษฃMUy{/4฿SVn ฿ผq*บPW๓คRฺสศUถ†g›ฏ25คบฆ%yะ^ศ ฿\b”b\{M9@‰ีDม‡ Xไ้+ee•G๓Oฆ[ ฅ๔š,๏รŒฒฉ9ฬ'€๙(ึ๐hPlwPอ๔๔ฬ๔]KvัNล๔:n.ํK๚–๚wS•j 7QpzFoPr+ธ “ัowฏ’*u้พธ6๎าwZฟฝญœ€b=Šๅบ_ู(ึž={˜ฬ๋tบ_|qNลzแ…ธ‹‹‹็ฅXn^มvป OIีkดทฃ {@XQใ๘ฝy*–ฅฯ›ขงxyˆvท™ๅ„'3ญƒ2 ฆ…TO๋ไbบk+;ฅ๎ืh u๙TGู7ซมFNZvั‚ZิHึ฿SCอ+“” q=ๆเWd‰นŠ™fŽึb-N)ฌ/ัœhv$‹ฬ„]72ัฃj ใb*–_Š‚“ฯ3("{ฎgMW๑z*ฉ˜๓f=eชร&ซูๆขb9ป}ฌฌz๛†ล'็—Uืupท้AหโVN๋ ฬLB^,zg7็EsR'v‹™ˆhW้]cTy้ย™yqด?l*ธิ{ฉฝพšทYšP5`g#ŠRK_ฒาvF?ษJš๖โ>๑†ญ ๏Wฎ๏์ตLS]H,o;\ษีkSึb=ลฺTฺษ}ฅ9›]R๏p jJำี|ต๑๔ก\q๔ถยzญำ|˜bิQŠ๕ู๔ดa"*พ๛ตSทอ_ัŸีm‹Jv4iŸ๚ตดพๅIฝฏ$๕,y‹ี*๛Šeะงm๏^’ฌ‰;3vฎkzโaŠ๕x(Vm๗ืv%j^~eฃXฅฅฅLๆ‰>อฉXK–,QฉTฬ)•••๓S,ห=ู…aโ]๒FŸfํ(่I]฿W&๏1บ<๓ะrUO]ILhฯr๑ ‘“ณœ˜งรอt‰LcešŠ;šว‹กฎb)'์ F˜KฬนeEcถ?ฆ1.๋sปึ]†d฿j๚"›I™iุ๔yzE?^ฎ_Lล สo1๔‰ณ๑<{โM-cc†›HfL๗“ธจXฮn฿ŒกใDFจภขน๚EฆืhTฑ๔]ๅY’ ?ซงภJฑ,Œำข9ซปŠฅช`า_๘ึํๆYpฃอว๓่ฝ๒Fูห่ๅฬา4นึฮF)qšฒ๗95๘ณั๒ใ๖ ๔๐‘Qำta๎ฌMัไรญqปซ๊Œฆๅ[ๅ๛๓h๑ง๎+ฟ:บ(Š๕aทหŠevญฆโิpูัๆi'๙0ลjปน<ถ7บ๙๎๔ฤฺุุ๎็bป—X๐[ใz[ลบ1ฒjK7€NัgT๋๎จ‡'ข“œ*๙|L่๘@P%cK’ิั5z-๋้Q,•šฏ_ู(ึมƒ™ฬ๖ทฟSฑ~๓›฿ŒŒŒ0ง;vlแkฑฌ๛๗6ูeH^’ฒž๙'ศฮค„ๆศอŸ1k],ๅฤฮ๘OฉyG|@กPิšiัฺŽbๅำ‚!สฎถR”ผ>€X“?bฑ`ฦื44^i.ง—Z9T,;EูŽbูญปŠีณะQฌ้)อะฌQ—ญ…qฤ::ํุห›Gฑขฅ วUำsVY|žuแšj Wmอีo*ฃศ{gkไถ3ฝึซยด]Wห75|sแyํ\Šลe๒ก*ึhืีOฟ่ถLŠšธฑ๐ำQ'๙0ํ.G๛ž‹8:ฦŽYฝV9ฺูoดDฉปkฃXญgn,ู2p”›้7EŸ่\ฑธ>ษMัใšWb{ฃšf XOลDAกZ€_ู(Vff&ทkอš5s*ึชUซธตXyyy‹คXิ๚“0๚”๕vฒœ%hšgUF๗)G๔ฆ‰X์*&AŠ\gตŠ)ฝผ‰šnวHEฃผ๚|m]ฯ˜ตX‚“ศx—โ”\y{ฟqnลขำaWF9Sฌล/…5jkฑHดฆ+Fภ‘~n?6gใ=Aฑฯ๔ํธยœ;ึR#ฏฎi่6mGNmฟ1fgมS!ม;้{*"ผ๘ถkฑ์พฦชขโ:S%่2ำPQ,jq €^๐6ว#ึs0ฬb“ V๐ุQGcW1ถbชF์*;ษ๎,B*ขดZ‹eทNLŠๅฟซrิ”Y‹ฅ˜฿Z,cSqbธt฿ง620คH‹–&S‹ๅhEaš(zOๅ†skฑถK#-&^;U˜\  ^tํุถpiI=ท‹เีสดํ‡/i้ตX G-7าา –ฆ”Mอr8zฑ0ฃผร›Rฌ$VฑšŽ›MฉิๅWœNœิŸผrœ์ด>)™Š๕TฌลโดjKuๆสFฑBCCนซี๊฿๎wN๊ฅ—^๚๖oนในํ1พb‘~pKQ‡Ž8ฆž{q—‰ ŒZj‹6>ฝ{[L๎œ๘ ศะ5tฟ3ฆจบอภŽƒืH“ำRB้ฝ๘x"vรฝ‘šj฿=';F$`ฆ5๖ฯฬฉXด ˜vญpฆX?B)\ฺQ0ภ708T–‘, f๗ข`vป^B฿พghJr’ฤ?(,„1OV๓PkฦP—Cเ"eY๙ษ1๔NzAูดiซciฯ๔‰S3daไ๎˜=ฤ๔J7กDF๊'ฺฏฯrGAGทช:JHยb2๒w็็'‹ฉR๓ยส:,าไKeU-ฦ9vGคฒ๊@ชฎHกQ0S"“๓‹’ืEDาW‰L.m่ทำ`ธขG$คฤ„XอYฐ7‘สj~uฯƒํ(จพ”'L๗แ'Mื::๋U9๏l O8\‹๑Šผศ่จฑ&mว…ผmฉ‘ณสอ; &fgv<šผykr=จ5 ศ,+ธะคT]ฃ‡คฤป/iุทฦํU4ซดฺ!U๓'‡ข้WriOgmoฏผิ6@ํ(จผ๚aVbdekสสฌศธย๒†๎!ญFีiiV$ย[6Šๅ$“uขเญฦ<™4๒ฝรงจW_:พoำFiBe๗”ณ|pจXฆWO๛jŒ}๕pท[’๖สเ๒ุพu๒ษฆ›ำ๊~ัฃ๊eok า๛}ฌ^ซN๛ๆv๋เŒ๖ซWb๛ข>ปญ6~q๙fะžกจํห5ชปk_ฑพ"ษF}<ู48ญ4(>ไว๖ล5c๋iฺ๎‚ณฌ๘•bฝ๐ย ฬ„๛๛๛ฟ๛๎ป๖๖v>Ÿoืฏ[ZZธย~๗/ฟ๒โ)ต95ำ•๔ฑ3]p๖โ.v} u z9z‘oXJู•ฑ~EvีaMฎ“'0krj๋ฦ‡ผ๘‚ IพB‡zjŠbBฉื@Q_ญฯ(nิZŒ>YศษpsYš$ˆคเม๗†ล์’w่nYa9กŽžk7‡b8ฅฐดๆๅKœแ˜2ST'g฿‹% “i7-!3tœbณ็–QาN"1›.6oั‰qู‡OwLอZ‹ๅ$“y-ึ”บ๑ร‚MRฆข๒ Oตjง็€Cลโ^oลผŸ*ฎfJmฐš7จlาฒ๏ลฺาหฯฺ๛ญI„nŽEm๋!พVI~ูNŸซิ,๏^฿ท๊ภˆBwท๕ณๅ[z–๏SŸ(8ำZ{sํ?ฉdฉ๋n๏Oชฝ๗b=u; ณ๒.XปฟฒQ,‰‰‰ฤ— 7lุ011ADซฃฃc๕๊ีDฝศ็–…อฮฮๆ˜Sฑภc€N!ฃถฌฆถัŸqa฿ŽG vธ&tร5†Ž๖ฦบฺi€‘๓LRฬ๛=3า#ฌจ-๘ฑb=!ฏv‘IฝHัาฅKA๚ีฏ~ีฺฺJ,K$ฅงง [พƒ๘็?๙ปw-KชT*๋_3฿’tHj$Mฅอ“buUgฅ8*Kั็c–ธiู›P"หษOOฃ6ุไ‹v_™๏#fจหขฦ๎')tHjxŠ€b=ฝŠEะ ž9+็Vd1A๔ษ["‘ุ(ึ฿w??ฟgŸ}ึ๒Cr.IคƒG(ึำฎXฦ;w๛ิฤ‘lฦฒ\ r9—ค€UX@ฑ Xฌei;ฏw็์นj•ฟอˆ– ว#ษ๑ไ,r. (หv]ึ~Jด\‡๕W@ฑ XฮFด&๕†๑Iฝsศ1นŠล@ฑ X( ล‚bAฑP,(Šล@ฑ XP, Š€b=FŠีชTžr Xล` Š€bAฑP,( ล‚b XP, Š€bAฑ X(๋ก`ผswRoŸิ;‡CŽฤ3 Še๘ไมฮ๋ฎCŽ'gแษŠลฒนา Pr•ณs็ชUK—.Ÿน‚CŽ$ว“ณศนัŠลbชOๆฌœฟbลฬ?ศYไ\’, (๋žf`8’‘ซ%K–ฌZต***j๓ๆอ๋ืฏ …/ผ๐‚-’IPฌว[ฑพljฎฌ–oฯ-K ไไว†ฏ[\_ีyฝ{๖๘ีK/ฝ”——g0lŠ6==]TT๔๒ห/ฯห"้`]PฌวUฑฎ๗xg[N๘ฉ]ศWไ€9นั?˜ณsงฅ,์g?KNNrR@ญV›‘‘๑‹_ย๒D’I OPฌวOฑ]-gTj{Nม—MอœM‘ษ‡ฬท•ีr็ซฐ:ฏwฏZๅฯiฑฆ๛๗๏“R|๗Lq˜ุ๘w฿>๗sน$’š +ฒ 5%ษโ0?o7>ฯ/8$&ปฌ^ห~klH๒๙Gดถ'๊ไb>๙ส[Vc๕y]Z9m}E“๗L๖”…x๐"žฤข-”žาH๊v‡–๕<ฬdต๒ช๙g5Lผคgฟี~Rพq฿ฅ)ณ}พ8eฌดq‡]xเงฃo๘ตุ๎็bY๖v฿k๙iŸ๋ีำ๕*ำSI๏vฟVu›[™fI์ภษ)(ึSฌXŒ_mx+๑์…ฯCพ"รœ3ฉ7)โVa=๓ฬ3ูููฤH˜์(˜o‰kห"gq+ฒHj$M็}YEVฯ’(7oObY์ฟฤวบ~Zล9๏้๘~™8ŸB๊ส7ฃแวบขก.#ภอ+En|บkข.Ÿค)H‘๋,ฉ'VฑบŠึ๐ฤU๔#fhฬ ฆส%“,ศfฯg'†ห๒}r๕Z—Jฉ์ฌฟp4Y*ฬบะ;ํLฅฆTWฯ+Z5ำP,xŠ+่ิคขmJั6y๎หั์ร๊ๅฑฏ์bย…ำ๛GVฝ;ค˜๛;_|9zฒ๋ ŠES1~5็<@rcYŽŽŸิ)โ†กวฦฦ˜"ผ๘โ‹ฟ๏ ฮ;๗๛๏็ๆๆ›:sๆฬฝ{๐‡?<๓๗๎c\หำำ“KคFาtช1)พ”Sy‡dษ;˜๎์p{y-]^๔฿ล2ลซŽ%พ๗h)VKพˆ”W\9๖#]Q฿,เ?}Šฅ-Sชฒท๋A“zRซญ(ศƒSฌ{ำšŠ๒$z„u, Šฮ$ošW?e๙แhแ„๗Jฮซ๔ั*(<ฺŠ๕•ี(m‡vี–๎ๅLh็ใ้—ห’]Q,3Pฌง]ฑ’าณMŽOq]ร˜uYs*ึณฯ>{๑โEfl๊ฟ/76ๅ(t:3cฐนน™œ๋šbฑ}Y^„uุุUžŸ~ ชq่๋z 5ณŽ/-kฌJ_์ษ็๓ข˜อŸGฎT$‹Eพ^๔Z`dLขGORฎ{๐ู๑4BP~ฃ๑ž<้v7ศณย<=ผcฮำO์u๙Žุ0_o7พภ?"ฃฌล,?Ÿ—ศยจ+า_ฅ7ฒ_๕WJจ4ลu%โ`ฯร?"[ฎนื_“่ํๆ!๐ี ทพ๒r€hG‹ํW":MS—ืX›์Gr.)่22W +ฉ;Ÿ/^ภใ{{Kw+L๕ฉWห โCINˆM‰ณหฎะYUคxบ›+ม3ฉ–ญLฏ”๊+b’UQI#9 ถค<Ÿเะ„’บ!s†K3"‚้”}Bd% ๛•ฉ2›KใCค๖BRซ:๔cu{ฅAค2ฝBณ=ŽผNื^–้๋ๅM.‘#ฏ;hฃXฺ:ำEyยฐ˜Z:uYŸž๘ืlํซคZfน™ตM›ำ้ส<ีLฎ๋Og/$ฉข…™ษfTฤPะฌโVสท0ล"Wาiึ4์^O๎—คlhฎz>HU#/’ไห๋™‹2giหึ๓ญtฑ…ถ#iนi€ฮaฒFcNฐ›EรŸc[ฆ;?(ฟ}ฟมบชโˆb}1ๅ่kลญ฿ฟ-RV๒้ๅ็ฦKา๐ŠๆO'หถ†oบ)u_๙ีQ๚x'_Q฿jชrถฅFn”FJทฅ+ฎrEด๕ฦIท†Gง&œiช? ล€GPฑฆg๎6Uฉž‹ี์ฝษจl^—ท,ถ{ษช รบ/ฦจฯ[OเfพvŠš]ฅํฃz%พ๛นุžWถi’.&œNิถDๅ๖ฝฒ…:~๙?๛ำพ2N@ฑžlล๚ฒฉ™Yๅฤฏ˜=0Zฎ}ห|ยฌห"':W,>Ÿฯญถ2 ฯ?ผฟ๚ๅ/ษw‘๐๓๓sIฑŒต2๚d>ๆม‹้ใบ <รdeล9_๚ว˜Stjš*1ษ? "){GN†x€]~ฃo/ฯ‰งzข๎!ฒjIRŠ$๊[฿P"Tม!a‘;>'‰W‰i“ Iศ฿#๕งVคHห้ฮ๔ฤฆ#‘–ฟC&ขๅศWด`ŒœR?†…Gส’$ิYคฝ^“$ ๒ข~๔Mณทˆ้y{ลWฯSฑNัW๔๘ฎ‰฿qฐlwl0๕#?ฒ๘:=ห‹๎1{ฎ‰OฮษO'๊โaZWs]พ;!Œฒ,‘8+๗ูฎ้ฆ๏.  |ƒ#Ceฤ"Fj2)‰ ็ํQรผะ’ฺŽ:J#ฉ ๙…ษvๅหยจฺ#_1Vฦศ†ฏ(,(4^#ข]ฮ;4Fโ/Š—ลาฅ๎‘มบ1f•”›—Hœš!‹๖ ,หะธ‹ฎmก$ฝ (™พhPU™G$<ฺ–[ŒS๙กฅj‘Uๆ™rฺุพ#ˆพGQhZI๑ŒP?๚ฅึŽ,šbัW๔ ๑„ขะฐŒ๊ag๕_)ej,(&…ิžฏ0€พkา๒แนหqฒQฒไ๚Jาs๒ห[จซH๕ฺ๗ี9นy(Q—{่“Vๅ-ฃSล2^;ž)อ;eด๙ผญแาิไ—{ษฃ15piถp้พOตฮฟบ7ฺtxำฦฤด•ฺŽK๛SทFf+4๔E{ฯๆFnVxฑ[3คฝV_™๙ั0(<‚ŠuoบKหํa>ื^ZT9ั48ญv,z{ฯฒ|’c˜๙ขRต$iุ่ฬ„w0สž‘s=Feํsg๚_‰Uฅ}{ืกbGฝ๓ฺ‡c_๔OซoŸ<ฅyeห์>(ึญXฬ*,ปพd้WIู้ไ฿ฬ‡W๛%๙ไ฿๖๖ฝฐTฌทz‹อ"**jฮ7ฏ]ป–ูƒDbbขKŠ5\แa6ลT,๏ˆ#LOฺ Hฅฟ ๔`บ†Zฺไฦก ี”Ÿja๔ฯหช๋ฬ๔ฉ้‹=์'L”ทพ‚9ฝ‡NaฦI:NdฤฤHcv1ฝpฆ๔฿๛๏M0ยใ!ฺqล`KqNฏง~์ุๆfeh*จ๊โบ.+wลmฌ3์ๆ›ฦบŠDT1“?g—ฬตœ*ฺ}ฐBม”๑J>ต$‰›(ศV&฿?ฃa„[“J'Uภ _hห%ิuk ิŽ9๑โ˜๘๔Stอwะ#`D6z,*S˜ก :๚๊โ0zHฤ/EN[A9=ถi_E†ชจ9iœ€ปŠ˜sฅช3wญŽ.ฮxญL@AR>ฤMfํf&ณฑร›’2ํ%ุš ˜˜ฑช.ฯXvญซฌฤuu‹คX์DE-๚9๋™ŠU>“™$สนb9Mึqร่8fี๕ูิŒ_Ÿน~อจ.๏ฯJe๖ใฟ“—s่ฬงmฺฉYŠฅนธoำๆฌฟ™šญ^๕{ท†G~z‹k$Šด[3ฃNฟาžฮ’FfฑNEeฃญ2nใถCT๓P•ฟ',h5}ฅ<žล€GSฑ†Gƒbปƒjฆงgฆ๗๎๊^ฒ‹v*fr`วอๅฑ}I฿RnชR-แ& ’žGฟAษญเ2LFฟฝJnpจXิฅ๛โฺธK฿i๖ถrŠ๕D+3$ewm•]ฟโึnู๘ฒTฌ={๖0™ื้t/พ๘โœŠ๕ย /pปปจXโGฑ<ยŠฎ[Q๘e(Œฆ%%๎พa๑ษ๙eีu]#F‹6{Šล“T™์‚Œฺถ_ฃํฉษ๐uŸฅ@$A=‘eLXแfืัืš8O1ฅ(˜K3ำ๓uณ๋๓ OzยฤยK˜hด.K q†ฑjm;>ขˆค์ขJEฃสb{๛ŠผใŠอ-ศNฉ๛้Jจหง:฿พYึr๏M ั๗ฺคyldฌฦฐท,†ญ[Vrbๅv†๒๊่J๖”›ๆRฒRส( ]Eิน๋Z:?]ล”`xวœ7˜WXTs>ฬณ”„ซIŒปšm„'ขาิูัผ`jฦๆb*V่A๕๕<.aŸƒ•ํธขXฮnŸณ†aGฑe๕ mvAฟสFี—.œู_'ฅ\kSมฅf•:฿P™šW?jwt‹๒จ๗ฮ(น๖ำญ๛ใคqu;๛๊๖ีผอา„ใ*‹๑4๊“4นv๚vc^ด4กjภี‹กX๐h*ึMฅXŸMO&ขโป_;uIPัVฑศ"฿Ž&ํSฟ–ึท<ฉ๗•คž%oฑZe_ฑ ๚ดํK’5qgฦฮuMO<Œโ@ฑuลbถฏpฏจฟK+Vii)“๙มมAขOs*ึ’%KT*sJeeฅ‹iฐ3Ykฺ่‚b™:—tฺ9;B"ฆ๛๋*ำบnฐโ<ญ4x๚ฯํ†Žกo๓ฺฟศ๔ญล2๗I?x ฿rQŠ)ZTŠ1"_พีWVŠ\ฤŽS1#i&ใb‚Iฤฦฃ่~ญบrQฑึq#cuY:ิP#๒4ฏ=๓๖-cูU,n‘7ฐ6ซˆผQs๓๊JbBx–ลjฑS™ฬ–tฤธ,WI1Šฅ`์ห4ุยTน}ฆฒ๔“pJร๚๊,˜J๋ฏค็ FT๔ีล๔ธbๅ์ชz›n`‡๏˜ณฆf6ฤ”: ฝ~Qห[ฬM•tRฯCUๆฬ0wXŽS9U,งทฯIรฐฃXช ฆค ฿บฌ:ฃอว๓ฤ˜ฑ&Zฅ6lLํ็žyaภกbYŒGMOwJŠต:๛jr&ตGผ5คqวบงGฉฏ(ืโฮ๚kฑเQUฌถ›หc{ฃ›๏NOŒญํ~.ถ{‰ฯฝีอ?ฎทUฌ#ซถt๓่}Fต๎Žzx":ษฉb‘ฯว๔‡ŽฅQ2ถ$I]ฃืBฑžBลr๎WŒb‘+ึมƒ™ฬ๖ทฟSฑ~๓›฿ŒŒŒ0ง;vฬตํ.ฦช้U๒ผฐฒฃีvEกq6ต{„ล2า๛ู๎ฦLp2udู>ฎiŽท„•ร๚jซ%ฏเ1ำีฦ]Pฌฆ›ฮ๗Oจ+jŸท๗ฯt‡Rๅ๒Uื57^Qคฏypล2ศeV=iKลbFrธMว1^ณK`บIŠจกGฑธลฎฦšชขœ๘ zu\ศ.—ห4 "> PXVB‹ึ4gOš#W467~^ฤˆ๙*#ฬ๎ฆ™์(–ิ<Šล่ฅๅ(?r๗๙Zห4๖˜I†Tn๙า๒6zา —ดฺถ"GฑŽiญGl่ฒ˜+นฮ^~Tฑ sื3๛‡๓Q,ณ1Z~๋$Yง รŽb๕,tkzJ34๋wิํึย8้ฆ;MŠ•šฃPQ ฑขsOtb๕šำl-”Zbู๙Šล๚จตW=`‰ๆ–qzสvkชฉkฑเั๎Bqด๏น๘ฃc์˜ีk•“ญFK”บป6Šีzๆฦ’-Gน™~S๔‰ฮ‹›|xS๔ธๆ•ุจฆ(ึำ5QpNฟrqข`ff&ทkอš5s*ึชUซธตXyyy.nฺฮพ€ศ;(Mย๔w‡ห“่ ๘๔ะN‹ฺ๙€๎๛I‹™ญŒฺฦz้@ฒbฬr๙Pศ^ำj“ณ€Š๔๛ซŠ๒๓‹๋Lczต3ไลnล!เึขฬ๎ณ๓ูbLอž†๊ณ ล๕„zxmˆTร4๏kŠel฿์ฐ หn6เวพวฉl<ณ•b™–{M๋j“…LทปkzผK^Zฒ#_ฮ).ำ{๖Mk0+_Zญs Xฆล<”ถFy๕๙ฺบž1nFณˆ‚ั4ษะuลฒณ อf-–hึZ,ำ:7าBZjไี5 ฌJฑkฤฑิpทถสลตX<ฑyั=ท3žชmา>Rชo฿m™Ÿ‡ฅXNู๊ดo‡o3ƒิP—eนหP-๓f&ธาญtฌ.ร๒[วษ:m=รl7ทhdืb)ๆทหุTœศํ?aฑโN‘-M>ซตRฉ้Y[#฿ซบ65ืZ,ี…ไ[3/N9ŠZ‹%~฿ผเjzfJ3@ยœV•ฟcนหxํฃmX‹ biฟนI>ไ—๋้ฉตXหŒ[Œ/อจ๏0ฆ+‰UฌฆใๆS*uyเง'๕'ฏ6';ญOJcw&„b=-]ฬ้Wฎowสๅ_ญV๎wฟsโW/ฝ๔าท฿~หฯm1งbQซ๓Y)b^=,เน๊aสjชฤำ์&A€ง๛o$ซ=ฌb๘‡ส2’%๔fz๎1็ว่ํิ่ทr๙„ลdไ๏ฮฯOS฿RCjๆ]๘ผ`ฉ,ฃŠ่1ปฃ ทฟ$#=+%”ส‰ โH—นS.Jู‘Ÿ- {ํkRŠjบจXร๔ ฝ}ผzธ^ย์Lš’œ$๑ šWฉฑW๔ ๐†ลคfฤˆL๛vจ˜Coช๋Sv์*ฺ‘B•(@ฦjOYฝV|›\ฺ0aGฑ่:๔`weLOฅ7Hไ‡ํn1P –hฯ๔ษ฿J›€LQu›aแŠ5ฃญŽp; ฦ„๛ั3 ูฝ u9๔ ’ R–•ŸCปzPถBgฝูฑ๐ผื‹อQน›พย`ชL›@๚g0ป>Žษ“่ ๐ƒฉ„‡„‰ฬ๙yhŠๅธž้ฉ‰tก!ฑษฑaคฉ{š%Šฺo๙6ˆ|#๒ ๓๗0:Lึiร`ซศ#€-อK ฯะ๛[TQ๛[*:{ีอฆฝ“ŠตX๐((–้ีรS็พc_=œ?าdhา^\ทN>ูtsZฏ?zTฝ์mอกA๚๛ว๊%ฑ๊ดonทฮhฟx%ถ/๊ณ๊aใ—oํŠฺฝ์_ฃสฑป๖๋›!’lิว“Mƒำ๊AƒโณA~l_\3Fฑž†Ms ,tโWำ.oฺย /0o๎๏๏๎ป๏ฺ๙|พ]ฟrwwoiiแ ๛๗฿ฟ๒ห.+ ฌฏH๔๗๓ฆeไ›]ึจ{-๋9ตลi’!๓๖XŽสv๓่„ฉ[T'g฿‹% “แ0tœส'bOZ<…ขˆดŠFำไฑžณ!~D|C‹๖๛ว๒1aฬkต|ืHำM—นR# เi ’คŸ่šPษeAิ;ฏ‚v5/Pฑ่๙]</tA2B…Tnร2ส[ฺ้ั vอtล|yM๓&.฿5๑E๕ฆ๚j(N•1 า่ }พหไ6ZEN$ตขฬK”QkWฑจŠช)b๋/ZŸQฬ;z%฿ฐ”ฒ+cŠljฒ™Wprอุ(ฑอ†ขXu+D9Š–๑V+อŒฺบƒ)กA๔{ฑ่=),s;,{™v/wะ m฿‹ežcอeฉ‘ิฝ&ษfTu่-๒รๆGœ_SCoXฒฆˆyxŠๅธžฉ"+vI™vล)ึoพ2jๅป$Tอ„ศส{˜}Gฬซ&๋คa่šIKbžV๕vฦ๏B‹E›h=qจ09!1’ZตUœ˜›๓กขY;{อ…FฑOผ1ตฐijึZฌ Mฤ…I"[ลฉ๛Nด1ฟ|uฯ๒ฝXแั‰qูGฯw˜พบ=pฉ”x#:1แฯ4]=“@K‰.ฤŠลฝŠy?U\อ”ฺ`5oPูคe฿‹ตฅ—Ÿ?ด๗[“‹ฺึC>|ญ’ชŸ>WฉY฿ฝ$พoี…๎n๋gหท๔,฿7ฆถ?Qpฆต๖ๆฺRษRืŸT{๏ลz๊^=ต_:๑+ๆีรไป฿Z*‰ฤฤDโK………6l˜˜˜ ขีััฑz๕j๎ข^ไsหยfggsธจX‹ˆ+xชฐ•:0์ฎ๑์?ย๓่าf๛rแE‡Ÿžู๛™ำB;ฟฏ€Ÿ (ึc Xฬฺ*ฑ4ั๎ึํ6G’รm๒N˜ิˆ-]บ”ค_๊WญญญฤฒD"Qzz๚ุุุ๐๐ฐๅ;ˆ๓Ÿ฿ฝ{ืฒคJฅ๒ืฟ5๓-I‡คFา„bAฑณซง— ่๗t-†biป“Rd ๖Hส—ซ/ลbWyส#3P,Š๕D(ท"‹่ำนšฯœŒ_1~ลwอฦx็.‘ขUซ9‰๚ใจำ้nผนrๅสeห–อ^‘u๋ึ-ฎ˜“““๚ำŸ,6ภ๐'ฉ‘4กXPฌวn Kž@ฟHบaฃXs\Ž^่#)๛ษ1((k1-‹Y—๕u_rใTไไG๒!๓ญฟbธั?˜ณsงฅD๙๙๙๕๖๖ฝ{ทฅฅๅ›oพqคXF(Z~Eา!ฉแ)Š๕๘)cSฬบ,ปฏๆœIHะOv^๏ๆฏXa๓ZแฃG’‚ปwฯFฑt:๙ผชชj้าฅฯ<๓ ๗9IคCRรS๋ฑT,†/›š]-฿žSฐแญD๙๙ั๎Žะ ž9+็Vd1A๔ษ["‘ุ(ึ฿w??ฟgŸ}ึ๒Cr.IคƒG(ึใญXŽ๑ฮ>u?q$›ฑ,ƒœEฮ%)ฤซฐ€bAฑหา v^๏ฮูนsี*›-ปAŽ!G’ใษYไ\๘P,(–ํบฌ”hน9๋ฏ€bAฑœhM๊ ใ“z็c0r Š€bAฑP,(Šล‚b XP, Š€bAฑ X( ล‚bSลู (–ซŠีชTžrlฦฉœ๘ ฃX€yOtไWP,(`!kฑ์๚ ŠXˆbaป (Šล@ฑ X( Š€bAฑP,ผz๐$ลrฆX? @ ฤbช @ @  XP,@ ล‚b!@ ( @ @@ฑ XP,@ ล‚b!@ ( @ @@ฑP/@ ล‚b!@ ( @ @@ฑ X@ Šล‚b!@ ( @ @@ฑ X@ Š…zA @ ( @ @@ฑ X@ ŠลB @ P,( @ @@ฑsล๚๎๛๏ั"@ (kกึ}|ฃ.ฟ๙๐ๆฯทฏ•K}އ‘+้dฤ›็6ฟu)+๏jฉ\uฉ_ฏEE ฐ‰‰ษ)uเ๕žพฮ๋< ค!‘ๆD๋1VฌหCWณš๖๙น„sNm๘็ื๛ฟผูŒ฿ค@ $๎ฬฬจิคC<:>e$?๛/x|!z๓(dc๊ถแึ่x๊iZคMAฑ3ล๚ฯp๛๖ฆฝฏัVฎใUJฮบ:า฿ช@ žrฟบำGzรค[|็.แ๐C๋‘ศ iHtsิ“๖P, Š๕c(ึw฿Wฎ”ฟy.f^reI๐ูMๅืๅ๘Š@ โฉ •บŸ๘้ OฯODฑฉฆE,‹43(ึc Xcw&ทนlมrลมฏๅ_=|kz ฟa@1ฉ๎„_Aฑ›๎>ilPฌGZฑˆ_ํ๘๚ภƒ๛Im๒๎mžE ๑Tล อภ่๘ดŠตุŒ% Š๕่*ึw฿๗PฦฏlศฟzxๆE)๖}eม›|7wพŸ๗ดwxTํ{๊๐} ผฮฬนXž;u!ษI#ฺ\ซ hu/้"/\Hร่>๎฿xui]>~)gf…ว ๅˆฯัขถบGทช‡ง์ก\aื{๚ ำw %PฌลfR›46(ึฃซXๅJ๙C๗+†yฏหบœ๙*[rย๊ฉ XPฌGขฮI๚๚%ีL=ลš้H๘+ตถ‡ทR•m‘8sdำ๑ฬMแย•>Tž.uบs|~UแDft_๏๚๘๐ผWK๗*-๋Y๗ี‘Lษ_ุ๊๒{3๚ร_฿ฒR}หฟ๗H๚†ŸษU3๋โ๗ส•Ž{บฌ5ีีŒๆbม–p?R]ณฎKWษ]oฏ[ศ[AW_b฿=X?0ืxคณŠb๚สš‚ค่ีB๊Ž๐|WoH-๘Xe~G๚ฏ3…teใ๔-ฃ๒dv์๊ืm+S๏c3ฌส“žvqขเŒฆ~{ฆซฏ|ƒชซnบH๗/JWXึrm~-V/฿โรด„Nำ=๚*+Iะ๏ฝzSๅ –ฎฃณฝษ6ฝู-๎9*ี่{O็FฝIn ฉจwญดzyGลqฑีQKศmŠข*ŸY(’d=๐C๚๓ผ2fVฌ”‹Sง3!๒๓"9pํ–ฝตƒvš€๒ธtญภอ#0ณู… ป(9luึฟ$]ฝนŠล‚bAฑๆŒฌฆ‹๊W zb)ึ›น๛Sฐ๊ฬyE•๖9Pฌๅul˜๗z ๐uำ3s[[ซŸฃ๗ถ.้m!้ะx˜/ไGบMฬAถ#%Lษ•๘0y kผ>s5sฐ฿jบwE;PzŽํฌOfH๓sWS=~S~Hโ๕\fเdฌŸ้บอr—[ม•ฺguฦล[lปjษฒuน;! วผ๊e[™ตYo๘ฝ.0%"๐# ษตํ.f”ญ๓™uuRWอ3?ฏ๗ฯฆช๐y๕๕@ฟ?งส๕๓nฑGข่‹>ฆ3ตตฆ+พน—๕.f้šด|๋=ํท’หUดu”ฯQิฮฑึ๓x\cถ~<วฅV๗รL๓ตLตaX'๒[มว{HFcžoฦฬJ๙TษJซŒนฝน็+ฃM“ห|—ฝืŒbอ•aื%gญฮ๊—คห7@@ฑ XP,(–๓P๋‡‚ฮใGPฌเณ›๚๕ฺEQ,/ŸW฿L=ึคPึl`;พซ •๖‹]่˜๐ฉIrHŸ‰๚ ฑ@rR็ผ๗ๆๆ!z๗cjjช๛8๛๗t[ญz…pหGตJUo;50า{0œ้bฎ;BUYW#]i๊MZ๗ยVˆt้Eฑ’๖ถœ)๒๐้(ฆWG:ฉr™ำo๛1ญฌฏg\ฎ ‡_๗ฅีv๖6}$aญ&๐ห๔1ญ{„Œุlฉก ไ`้฿่.ฺŠ่cรlg]B็mu๖ืlทOW๎jZ)อ][๛ื%โด6Ÿ“xฮฟ0=?{Tฅฅ?๑”[9EŽ1ส3D<ำธŠ}{›ณข†Oณผ2z3uปg†๋w•ํิ&|ฆทjWค[QงžIPผ๗†meบ0QะVฑ๎ซXYZžy‰บ๚T๛GQVํaFฮŒYถ๖๙ถุ๖ฝ๔€*ฉIบ’†้5c+Ek…Tบ”hฮฅo๋;[ํอoT2)ุ›(่๎ร{=<๓\[ฏF)ฯg›๎฿>rฐYฌฝโธา๊ธ, SL5ว%tEญฮW.!}๐ฦ<Œ™[ปปฯฺ]๕ทfจึพŸ}6}ขญณ*๏ ๆ๙U๖v+๔.dุ•Gi๎Vทฐ›‹@ ]ฑไ2o7ฏนqึW็ใy๎‚ไฯฆถ\ฬw -๋Yฬ๎~]Fษถย๘*‡บ8Œ๏ถพขŠล๚ ๋ใuฎO้_‘คถ~‘๋ฐ 7๊Eฑ่YvึK>OZ3cOฑf>~›5’1ฯ™qบื{{5พ†;ลไN|ึธพฮสT…E>งบฟชะ6`dปlว60}ฉทๅ36‰sB2ฃHฐ]รv‹q ฎCF_OžŸ™’๚nล—ซยa็ฯ"ฯฆk™ส5ฌค๓u็0›F๏p๖ฏ์M?˜{๐๎|ฟคำฝFฎ>n๐ภ]๗ฯน_™r6u2šgy๖=lๆ7พลhผ˜ภt _ฯฌq<€ใธขธาญ=จ2Ÿึฬvdy[jฌบัคอpฟฒ๚ุYๆส\€b™ eqฏ่<™Kฒ—๐๔8›'™w‹ฉ—ฎฅตจ2Nัง๓6?FM ๔‘œำ›+je*U๓S,6Y๖Bฏ3ํ'ณ๖ผหYซ3aพ9_e:Y็ๆาC๚เy3—Whnํœ;‘ๆ4ๅ่๙u%ร๓y”ทบ…\๑„+VKพศ3ฉŠล‚b-0๒›?ฟ"ฑ๑ณ๗œ™wตtQหฒ`ฌ‘ฌ0-คฑ;ŠลcPsX๛ิฬงk•:Wk]นล๚ำถ‡~Y_[๕u6ถบWึžhแžปนQฬ๎+b™*ป‰3๛น™&  Hb,M9ุ ืชยแ(–ิ"ฯฆrYtถtŸ.=ฐ—ฮžwhoไ๔”9แ_cv}tฒA5ๅ‚bฅ1w'2,ฎ{๋dดโี#ชTc'แน*j†;€]่ย~\รฎZM๗’-*๓ซ๛ณŽแ*sŠ5u2–-ิแม\๖™w‹ีหฅ>ฆ:SK ฉFFc^ฅ–cอ0kษxL[—byผฑณ|#Kf1'vพฃXŽ[w๗_}3Zฒ)–ajซ  xHธ1/ c๖[;๗l2ใH-๐IUุธณ๓ ป๐(นะ๊vsฤ“ญXฺฒ๕‹ฎXF(๋ษUฌอŸo(~E1็ม’‹้‹ขX–?ๆู;R,j;„คoฺlWเ#”~ิitฎX>’ำ9`งZ™zุ๖ ปu)wญ—ฝ}็g)–ๅŽฆ-ลb{ฬๆ๙NNหiU8์Yๆน)ืฒ๖ร`Mยj{๛ๆs3๚ศ1wใ žอ๖oฆž์›๙aกื8ฬkXŒGYW…ากZ8ฎ(ษM|;g‡}^งสลสœฟb9(ิ\N2ห Qร&สoš”ฒ}ตHi๕žฮvๅฯ:ฆฯ=?ลฒtฮdผ S,๎พ,‡็>hc^@ฦฬํไฝz‹&w1มหžี9ืj(lฮ /QฒซX๓บนโ‘Pฌ‰ณ๑<ฐข๚†bY˜/ต›N@PL‘Bรlhฉฬ ๐<ผ=ืHw+ฺหฌKXš@oว$ษ.๏0ะๆฃ™ป.1็ูƒ๋ฆ„ัปๆDiรŽห'E!O_์ษ็๓‚#ฒไzๆฎข5|ฯ$Eหฉฟ?hW;๕กฎฝะฬ˜์ฦ—V๋LŠE1ซ+]c๙6พš*‘A‘เๆ—"7 yMิg๛๓dgวฬW้0]ESแมสo‡bAฑ~<ลr.E?{ฯฎD-ภฏ~jลb—ฆธฝ๖Iฉ[คซ๗ฯถ]ปฝ7ท"fKฎ™๎ใ’ืญ‡S์๖uLซ็ฉ้@ิี๔-Šzีร4ญHใชb0x/Fnž gl฿์x-VKQ‡wฬ)รlล9%ๅyˆv\1L่M่2พoVƒ}ล๒ˆ,V™?้9fา*Z~‚‹Zฏย๊)4ŸN)–@Vc.]ฯA’”คLC•eGu้lึb‰J:ฬฏ•y๑}3~Zลฺ๓>GณอษWPฌ'Jฑพ๛๛?Œ˜SฑlTjม~ๅs<์งล๊ๆ^ใ๓ช๐ Wใ4uฺด^|S๔ซิfำๆีย,ำ j์ฏล<ถ{ฃัซฉ๗Gํ?ย.F็ฝ.’[ๅ’b‘Oสอ๏ซแ^๗ไถ:Sก›GU,@u:นัzฎ^-เy$G>r๏zฏfสศmอ็ญ š_…$ˆ*Wฐะ๋RัwZ"4ฟฬ‡{/–฿ฆใฝŽ ๆจ(๋๗bYJมบmS๓iW Q,๒I๋Šxo(ฺูฤภm‚Gg/œZr6๛ƒล}Vำ&™ๅX๎ ฑ์–—น‘ูFœ[X/Nq\ป๛SM{L‹}^}]๐Pา‡า˜็›1n(๛ีฟลฎ๓ฒjษผฟ˜fข  vญ2็luP,โ1Wฌ€๔z‹o{สB=๘!บฆ๕Š>฿3ตึz ลj(N“† <ฝผy|ๆล}b{ŠีAษž๐bๅv‹/ท˜C8rB๊ๆœh#Š‘–ช๔˜0€ว๗ๆ1/6ดT,ซา๕Œ4)ต`ฬบtŽทป0ึ&!Lk๘ษGฑ์Zึ๚๋ลz๓ๆyM\ฐ_VŸ‘๔ŠE:}๗งDฏ3=๒ ึ๐จ”}{ซทอ,f$\า๗žฮ•ฌ%}\ž0\๚ฏ๚๛Nต๊จ|ฝ?>œzใ0้#๗คRO]ณrป˜Šล5?๚๊>Dณฃ’๖สป-ชสจ,Q5นBเ๗—T๙เBZฌ•MญดุเดŸปีq๖ส;๐YnิŸฉ ไฝฦบต-ผ>ป8.฿}ๆ6 W๚P  Wพฑ.บข๐>คฦ<ฏŒqC‚ยถ[ญว฿๐I™ดบu)}ฅsฺไ\ษฐห•้ผีAฑˆGTฑ’ผM“โฌ ล$Œb5Z)Vˆ+Šel/yป๙EฆWึ6ถuu\๏๊8Ÿแ๏DฑQŠwด๙‹อ-ห”‰%Mฺ.jsษZi๗o๓“ฅI็'mŸ‰ลŒ‚ฟฤ๊1ฌ,z๏q๎>kbbUถVฅ|งU*"Sพ๏๔ึฆฃป&S%ฟ~ุฮbนศ=ิคoSฦkชณU5ฌ3y;ฝํY•๚ž2ฤฅุtฃbูuiฑQ;ๅตJH๘ฆFg AX*“ส์๊็Ž์•‚ุง4รๆๆาค]qาBwgจuŽŸYy Ÿ:šฆTl‹OศปZW8Yb9๔5IปใBCๅwบ?[๎ีํห๔ำ:๐~ฅ"ญั้ธz$ิ๛ึ2๊ฎ=ง@^=ฅB{ดจฮ่ZC๗าR•/ฝ๑ศ๐bKZ[พฯˆขXgg5ž3]๔ใ—5๖Dม‡า&๛ƒxJ๘ฎด๚฿uL%๖รyS'ๅ7NšX:ฑeฦžpkฉอP„&๋9x’ํแรแ‘mk;•Eb=ก๑ซ>ƒุมฤn&v6‹ฤš~bล็ะคGFฦซ=G–NC๑ศุ’ž๊4ob9Z๒a ้Uzำ@KmAฌ2rGaงร-ัส๐]ชค‚ซ-:ƒูแtดE„Fฦ~sUgะี—&์Sฦ?>ฑŽํ ูŸQฃ—ณก๚Xœ8Bญ๕^h|Jeงhh๘>-6&>TฌคnโQ}ฯฉฝ‘Šฝ•š]{ใฉC;C"ณ๋๎:ถฦ”He่กามุืำPœ,=ฝีXนัJฉ"ิ๑วไŽ&s—Rqจส8ฑโ">N+ฎ๏ิwwV#V).ฝQฮSMา6eธชดก{ภจฟ๙]j\ศถŒ๊‰s:สอฃุ–เZ–ริ˜น;R๑๑YŒถงอa1žKU&T[Fล๏ญ ู๑!JUnmงั4 k<› i๊Us ยR•,ึyGฺูฑMF{Š๗G†Dฦวงำ‰<ฺyjฏ๗Oพ2ฮแสฯฤVM๛Nc0ล–ฒ3pbตหzี€ฑ๛ฆZl“ศ์”‰%d›๊TmงN|ะyาพ—๊฿{dHT†Zำc4คอ[f˜,ฑค฿ะศคหQ2ku๊NลsFž0๗๏฿‡ฟํ]บnฝi๐ณ_Xณขy๘ฎ่๖ึุตฤ&vณ_9%Hฌg?ฑtโ(3Nฝ? K,6บ†ไฤบ{5)sL)#Ÿ0–]gsGKศR๏ฌืdว…Dๅi<$ๆช4ล{“$Vxš๏๚7็น;J:กKฉHฝ๊2r4dฤ…J,Gcvจ่–ฑ&35[-ฝ‚ีืc๔พฒC“ขTฦ–ผ‰โwเซ+ศฤfV๕ฝGj=C1rภ„ฆ5:คงฤKk๎mชัฦ#ื—XQส๏Vu:-5iŠะDiXc&ำ}ข`ผA๏ึ$…)c‹}ห2ซUŠPีwฆOดTฅ…ผŸ้™_ิX–špช๓ฦฝใQSฌฬดhฟ๖–๊(pbู†๕๚หCโŠs/z๒ฤrฺฎJูใ>yoเปวๆฎ|ก`Rีฝท(ๅU’๋ฅ๒3eHคwVT*Ž^•u๒~ฬฒKlาp๗)j=๊c™—ลS“Ao๒ŽOส =Te™,ฑD’ฉ"}๋?z๕ˆ22กjุ ฟJeปwฯxิก๋ึถถ‹ctฤวT์Hbw;•ุตfฅฏHฌ็0ฑ\วฤ‰ฎ“šDuศ๗/ฑn„‹ใH฿ฅŸศyใส๐N: Q๙]มๅ:L˜XปK}ไ:(ฟ!Ÿำ%*ขฬW้„ซ‰%•@˜๋ณ๑XZซ2ชvDว…oบmงh<๗ฑผถแู7ฝ”ย#ะ‹ศซช๒;yาZ-Žฐ๗žี‹7xHrะ2บแสCๆ‘ฤ—%วFdJฝc&slbศ ๓DiปํL้7แ‰Rb…&ซ๏๘ฟ#฿ีeาฯ–๘+ฃ‘็WGฮ–๘INด๊.ูŸ'6xจ2Rช๛ ฅi&–ฃQ|๎; ๆ๊y(RD~ง{oู{ึSŽบT๙ๅ๘๑์r“~๔“%–๔fใ25t iŠฟNŽ๑ผTจา”Iฎลฒ\อ S}''ตฅ6#4ะD๐„x๘๐กรแGรV€YB์Nbงz๓Iฌ็8ฑไฑ#้WฉวธN๔Kฌ๚l้Š ะฑพ'Ž์=ั๒7$ ล{ฅ1(_ณศษ8ฑ|อ~‰uทF:/ฎย๏K๚ซ%/>D`สฉ?ฆŒH=ืะ=`พ3lบšฒmlb}ำ9ฝฤ๒Ÿ๎B>(฿Uค{h(ุuIฆ3ZๅŸX~ฃCโp<ึK3ุžcซ>[1๎ฒ4ืkึ:&>ั•X•ฃ๏ศ๗N}‰5ลสLXdำ]˜ซาBCw&6๊Œรา6ืD„ฮ0ฑไฯ7šไmิฏ5๎ฝๅcฟฤŸลมช‰5ลG?YbMค๎…ˆ?Jลฎlตฦ`oj@ูฯ$–ำq3=ส5ฦ(%žกq8เWๅัฃG๚)Gฑา'Žbน๗ใใT‰%ŸDชุ]ชฟ{๕ˆ2>๓…$ึ๓Xฮพณฑa‰GRใ=ธ๛%ึ่ีคp๙0คkoŒwขลัฑ3$บ ลsnพ<๙ตXKFS†๓N}-V\ฆwจเฮี๔UนŽq'ณ‰}๏%ึ{๒ x๎^’Oร;ๆนห๏b3)fL~—!M,๗hฎL–ฎล๊›ั๖œp-V๘˜๋พŒ฿ซBยไqbMฑ2ฆs๑cฎล’G๐$–\MGฝC—V้#๓ž(จ?+"ฐแอž”m“\‹ต์ตXR›น.‡›nbM๕ัOq-V˜rGฑ7ํ ๊ิฤ„โNง๋J?๏^ืw.>ฬ“‘SLฺ.!Jศ,H •<๙ซHฌ็<ฑคc\ijฯmˆห้hษKPl“'ฒ6ถ_=upงboฉt9แศ^”Ox่ฮ๘~hX฿x6iBฤŒFฑ\สโ8ตถว8dะจ3โwO>ฃเ๎H้Tฎๆ]kc๑gqฎฅห`BใŽT๔˜‡Z.$ส>ฒK๚Y•ฎcฆ‰q0ปฒuภFbMฑ2ฮ๕กHiFมๆฝพณ:?yGิฮ@ณƒHำฅ„(“‹oHคฎ 9>#[jณ’‹M๚eบXษิฃ๘w›ก๎›ไˆHฅ"ตัตฏค(ลฏชZบฅ)อW3ยCใ’สค=วจปš๙ฑ๘p‹Zฮ้'ึT˜e๙ฯ(hีdว‡„'f^พฉำฌฮด<5T•ฆŒ๘๚ชดkชR๖งฅ ‰สฎ3YSkXญ’๙2ธlยวษลํต@b‘Xฯcb‰ฃp•ยwพbIทNj(ษ๏vฅT(ใใำฮ6ธฆ‚˜˜NkKYฦŽm‘ฎ;AUwหืbษฃำL,งญง2#1<\ฉ‹ฯจjฉฯ ๗› o CšโดD๙&H"‡๒<๗งจหSEˆ†วลฆ–j๎8๔ษแa‘แGkฬำO,ต*DพฉTส^ฯ}ฑ*zผ๒Kบ“Rrๆๅž็„นฤ๓๋D™ธถ†ชจม;ฦ๔ท็˜ฤ’oโT˜ๅบYSbRAฃ๛Qฟ<ฑ&_i 7ๆชโฅ๛b)ใพนฺข{Hvรฤw{๗ๆ)๑ฐPi’๚„|ฑJรีวโa;ใๅiKฬ๕EาฯคUฉฤถช<้๒rดชคJSKw “nฎ•ฐkโjL๛Zฌ)>๚1ห{_,ว@]พ๛พXกปาr๗gs่ซฒcฅ๎Œ๘8๛ปvซฃฝ4~[ค"*ฏฮ:ีญ‡ล"i\ั{ดๆผPื…m@b‘XฯEb=–กa฿Aธ|ฟ ”ซ3น.ลa5฿๑ Y.ง)ฦ\๕Œเ?ึฯ าธ_จ๏–$‰๕๘%ฯ&Ÿ/…Vฏ“๏•ื0“ูซuลฎ;แิ๗tอ็ŽD+Cึ˜Ÿน Ab=_๛ฐฑปณ๒›DEXbq7›H,kV6๕U๎ำฝBยใvจ ช๕3œZอwโ–Rฑ->แ๋ช–g๑B$ึsE{ัฑปF'็ึฐ1€ฤ"ฑ€ฤ"ฑ€ฤ"ฑ€ฤ"ฑH, ฑH, ฑH, ฑุ.$‰$‰$‰ค‰@b‘X@b‘X@bฑ]H, H, H, H  €ฤ"ฑ€ฤ"ฑ€ฤ"ฑH, H, Hฌgkป4i;ง/‰5UbYmDDDDDฤ้Kb‘XˆˆˆˆˆHb‘XˆˆˆˆˆHb‘XˆˆˆˆˆHb‘Xˆˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰Eb!"""""‰Eb!"""""‰Eb!"""""‰Eb!"""""’X$"""""’X$"""""’X$"""""’X$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆHb‘XˆˆˆˆˆHb&๋dsyhัฎ—ฟ\๏ษƒ8๗๛ชุcล~หW$""""‰5ทซ้ึอ '#~์ญ?dฏ๙ฯผฟ.=นq Eฬ…ี๘ฯœฟ!k๕โฯ$๖^ฑ๓E‰ˆˆˆHbอ‰ฤวฆL_/U_ฬ_๗‡ :ฑ6่ฤฤ)‰57ึdญุc_<ฑ.่ซทล>Le!"""’Xs"ฑึ็o}ต$/โฅงฃHฌ9ต>b๏•%๖dพ+IฌงœX'›หฅ๓OะW๘ '–๐yY๙Ÿธ. ‘ฤzส‰๕^aLPึšŸ๕ฤ}ฝZ์ฯ|]""""’XO3ฑ–งฏ๛\ ‰๕อjฑ?๓u‰ˆˆˆHb=ตฤฝgวส‘Gbแo!ฑฤž,VŒฏKDDDD๋)'ม€ฟฤrญ_—ˆˆˆˆ$ึำู๛ชž,ฑึ+‡œฃ?DUd• น^งข!ๆ•ข=)ฆ๛๒ำu้Nz",๊งตื๛›tนสbOลีึ๕f\H-2‹อ|ฐP๚แ๋•ง+ฬCถvhGyรž Z็#mJ‰๛)ฏ#ฝจฟWZส๑๎๊6 ‰๕ธ๖4_ฟา6ศ๔4หี1rQo{™๛๎^lณšษsํต—ิณำB~/eฝจฑtYู‘ฤ"ฑf๏Dม5อ6ง]šธๆิ$9ฑ=cภ๎ผ฿[p8๊โแรmฺŽแOŠLXKฯ‹sšn—ฎIŒชI?y{ศ๙จไy๙มๅงตœฆพ3ซ•—RลหŽ<ฒืี+ฝฃXฺ^ญ๎๔พ๊ร๛š์ธ๏ด ไฎ9h่iŠEˆ5r:-ฝฺๅ๛*”o–ฦ์k๏ต9‡ส/m ๐พZต#ถI๋ึ~็Ž?>ฏ|ณ์“Œ~‹XJำmmyž5%JๅOอbƒิ] “ผ)ช]lก:mzิ…ฤุ๚๒&ซำf>๓พผy—ึh{ิฏ5u4ตfํซdจฒา๔:ปำfฉฯธ"ึ?ทยฏ’6เaW••g‰฿Ž ]Lฉ‘ถRFทXหฅฺ0 ฤš+iำ}cยฯญณ๗Rˆˆˆ$‰5;ืb“aฒHซb๏oาpผ!UYถi|ร8ฺŸถ{~๒๎>ฝxดkMล:ำลƒ%QฃฌฆGN}๋žฅโ‰ืšz3สฝ zWูp&ๅ’7ฑœ#๚Tฯpำ๚จN฿˜ฯ๘Dœ|ฟ?ก”๋๖pฉ็ทgไ฿”~เ}๕๘W€ฤrZ*ช7๙ส่ั%พโzภฉฤr›๔ŸาBหrตœฺ๏Xำ+—.šD]yื๗ฤžรฏx~ปแงiž_๏]บ๛N)ฑŠไZำ 9m๕๛ŠฝนซL1ูรg'žใฤ้*MRฏXถ(hู’7~Qำ#\“บqัๆ๔oUŠ—_\น็ฒ฿ใ๛K?|iู–฿h•ฑ@น่ฅํ฿๖๛Ÿ(hi๛Nตๅํ•โ5qีๆ… C๋่๕มห6็C^น,๏•ฏ\ตhKaฯธu3 ๘B๗ย๖ึyํห?7z†\lM๚Vวทฯhœ9Pk–~รั“zญรงบ ่ถ>ํ;cษษ้~u_ว‚ซKG าs๏em[ž?”‘j|วสFดอฦw’ฤSฺ_8p+G๏^ด๖šaCBวญญ๓ถท/O้/q/Z~๚)sN~ฯซ๛:์keชv•‰ต$ฟ;hGผํฏ~eส95ํฅี๎Mm4…‰uุีพ`WWtญี๘ม‘ิฏบคืŒh{แ@๏๋ึฉNœu 7LaŸvˆอ%6cะง๚ิ6iMvดŠŸด-๘สlึ๔ฝ ž[๗jt๒"‹๕๚ญ=9fฯRำ:z๚Tฯ๒่ถy[ลึงถLx)-๒ƒcคฯ฿ี๕N้p—k–๊ๆ%Kฮ฿Z} S์๚ ๘|ื‹ทฮัญฮ7k-}ˆˆ$‰ๅ>pWQ{ฆu6šฬˆนxตา o6hว4€œUถ้%ึ„ซWุ#RŠ,ญ”ฮสำwg)Kึบห~้Šฏ๔6hzฅs็Jงs“o๎ฤ2Ÿเ ’S‡+๎;G:ฅ฿Nพ$–wdIฌa…”guื๕,tฯษงํVชxูืฏ‰/ปi้ฉ๕n‹R/wšฺ?Y๊ykuuทถiŸ๎ดๅB”โJ,๑ฒงญ/u๏Kฺค๘—ด)Ÿyš‰๕ลO''ŸลฏžpbYคถyc{žFJฆž+้›_Zถ๙x—๘๗ถ/7Š:ฺ’tกญะ<๊”มาV,๚ฐะ่น+๏ƒeKขิFฟkฑฬU^{iใ~u—Y{wอก+^ŽฐTํ^))Ÿี6๘ํG+‚฿^๕ึg”_ค%yฒu_w]ทัิคถ>7]์ท๚Gจฺ็ซด"ฎ้_ˆ่V;*ฝx฿่m พ’๒ฉลดตmyŽY:Kmx8:^ฆw๏n–Nฦ3ิ๊DtงJวถœ”ึy1=ช6้็]=๓ทถ/jP:”ทXhugO฿@pD[ฐจ2‹รl‹n›Ÿ2(็๔่ฎ๗ไz'บ-๘๛Q้ฅฮ๗ฬ฿u@sO: ๐บ!Xtย4K^ํ /š†ๅ3๎D2E฿R[ไท/๓๙@ํ Q•๚ ํ]ชŽษkฒืฑŽD๏n{ตpคKzVuQ๗‚xรEฑ‰๚Lม๎ก'้นํหฟ4ŠMm›4ฑ์ต…๓โ๕9๗ฬๆั’|่ำฆฑ/ๅK,๛ลฮyปz3:ฤฆถnW๏h VKJ[ึ%Bิฝึัิ#b๓JŸ ด czszไL7ถป}ร…{}ˆˆ$‰5ฎO6ญ9๘ฐ๖GญMฌš๛„4…Vำwd”๙%ะล‹ำ2ฌญธ™Uถiฒ้.คกžGkŠEธห”๛ๆ‰ฑ&7Mพ%VพS‰%"ะ๓ศ“1"ฑœrbI5`ฏx฿ฺ%๏"ยRLbuห฿?้[–B€RbษAˆ“๊๕Ow+`e=f_M+ฑF.ฤ1ศฟmช๖ฎ\ธ.]#%–bแ‹ฟ5บGปไEežkคโVแๆWฦœท๘Mw!๒iู’ธ f฿ใ๗.‘_ส]์’7\•—๛฿Šไฝ/ํD๛E6&kฦ.จฃ๙VoT่๏ไTˆอI‘‚วเ}๑{็G๔d บ:กsw›๛๊ ำ_ดอK2uนGV‹ะx้๓!๗ำuฦๅ[ท5บฯS็tฬ;2 สn03{N™๋Rwฯี'e‰๋้GฝKฟ—qคuAๆื่ึ/ฬžทl;ๅŒซรw–ฆ๏…ญ]ฉ}๒่R้}มฉ๚ดmyแ)+ะ๋X๎„‰ถ)ตŒฟ^kBbyทรค‰eำฌ๖ ฆ [J.˜k'K,ฑ่VWบ>i๓0j]‰ตC_โกj*ีอ‹๏ฏตส?฿ฅ?mโ๏xDD$ฑHฌ้X’~ษ๎tส—?H‘KำLฌMQ"q†šnฆGQœพแY—ผCLฎ—*މญ?Sค—'xะ_^๎ kสEฬ4ฑ.อVb๕—ื~๒๙=c<๐ญนKแŸXฺ1‰e๋?ญ๗R็cึœ~๚' Žซฌว๏ซi%V๗‰u‹W|ค๖คํkลย?๎UJ‰ต(Xuet’0[ฑlKมuพ฿"W5๙ซ%ymะย฿u๑ฦไ›"u ทผ(Kƒ*x•ชแV้–ž๔šฐ,sm๏ผญ~๚๎+…๎ุืtjฤ๗oY‰‰่–ขB> /๙ช]ค”ป+ฬƒ"ฑถ5ๅFj[ํ™สOoz5ยWqาจ‹j@ฎฒ{ฟฟตz_ว‚่vษํญsKํNฌ6นฉ)%๕WwฤZํ–ึ๊n€๒™NbEtyVŸขฌคทฟา xฮาิ.zD]6tฑฯฮ_๖ˆˆHb‘Xซz{ิตรp๊ '๒่ส›ใOจ{ฝ^;Z,฿oๅษ๗\‰uRบBษึŸ๎›มฏ0ฝ๎ม˜ฤ๒YœXnญtZD k๊EL™XWณ๓ฑ๋u้lรกข ๋ง7ฆ}ท์Nล(_bmŠํsข ำ”๕ๆ\ด[YณาWณXซ’FŸ^Xตwีข {Fฅฌ Nr์7&ฑ^;x=ะปŽm^ถ9฿ ^ei˜ซุ๋ฦ•1U–ชOVMœํ&Vฟ_b}๑ หpฉw~„.๚ฺจ๋้]๓~6ฑโว$–tBใ'ึฤทosฬ<ฑ\#Nึ‹ตrz–๏h]pd ษ ฑ|ฯ}Z‰ๅ?์ธ“SfK๊˜ั}ฦ฿๗ˆˆHb=๗‰Uxธย&Rj]†—–็6—ฮm{}ั”Sซู๎‘pฟ›"ฎfจจ๖ลฦ๑aง;ฑ\>u๖ฮาฎ๘ฉCบหฑ๔“๕Š๚*n~o๚2~mหE;ต๘NT•5Yl9Ÿ9QะP-(˜38‹‰%Ÿา–dาz:ช$ณgFฑไ3O+๙˜‰%Ÿ'ูญ๑&Šะ`r๖้$–ูd3ูช๒[ž"ฑDSmํฮ๐|d† =๎ุ“O|ตฬs๎ŸลrบิTขŸ๔Dมmั~[5ป}ง&๗‰‚หrฯ0์}k๗คฺทIIฌ็yา๖k๕ฆGN›ตฃข๕tJ}๚ม๚ใํ๕RkhS\วขšฬN็Šๆิุš๔Œ๎^pฏ๏„:‘"rLgขสร,sฐSซ๖ŒbPพmwฺ;Nึ~๒~ฅ<{"E,Vn฿|ญYฤF‡๎๔ม๊ฤจK‡k๋๕ฬง4%F๕cฎ\ฒๅD›ทอX’"ซS>๚>vษ*๑Ÿ/๚ฒจ’ฺจŒ๊>›กoDuด}พœ=†๋ทค้.ฎYำ]$ดฝ๐•ู=ล,biKปๆE๗ๆ๔ูญึ{ตท6|ฺ9oป\q“&–Cซ๎vMwaถฺš~์›ูtำHš๎bงตtพœ][ท|{Gtณ}ฦ‰ฅ3พบฝ3ฌฺbฐสำfT๔.ุั“c’†ค‚ทถmธ`5 ว?WJฆ๖wชๅฉ&‡w'ต{ฦำ์Ouฮ‹้MmนgUvอ[iK๙Ow!ผKŸฃ“ฆป่า๔๏h฿pj4ฑlโ#[pฤ(ฟ_๗'๋์ฏ9ถW•งแ๏~DD$ฑž฿kฑึฟY™UtซCoณK+$ธฃฎ๓tT™/––ฅv฿ขทฃ'w์-zืฏน๒C“ลโ”:ญทโงOาs›นFรR‹ค{๕บ๏ํ[ธ~CCณ้}dไ‡จ“›ฏกnXญผะ Mโš“3ž๎bชEœš2ฑไ๗uฒ_z_6ืsฯŸ้xไNฟ0ฑฤ†Eiท–ถX_๙พณ“ฮไ!mภš๒&้มา—‹ฎลผ/MŸ่I,[ปnะำใa=๓“ถv•ช”ม+V,zqู’`Eฬ๑๋ฎฉ&ฑไ๑จ…ฟ[ถ๙ธม:!ฑDi๎šด=hัKซึEeU๒ฟŽ+hแžWพUธEฉปปยิฎy๓งบK๕ฅ๋ถŽ๙dฒฦฮŠˆˆHb1ลฯธ๔‚bแ์*็_ฮ=ก’แึรˆˆˆˆ$‰Eb]๚ฌ๒[๕•žybป|/ฏวฝน0‰…ˆˆˆHb‘X$ึsiัแ ซำ9ฺ|ฒpTujFOฟอiฉซW>้’!ฑI,‹ฤ๚m๊บฺjฤuตีˆถผ~ฯ+OพdH,DDDD‹ฤš+‰…ฯบ$""""‰Eb‘XHb!""""‰Eb!‰Eb!"""’X$’X$""""‰๕%ึ๒c]œ๓6อ€ฟ…ฤ:ถุŸ๙บDDDD$ฑžZbYฌถwN๎\œ๙อ€ฟฤZ๕Ÿ฿9ต“ฏKDDDD๋iŽbexz๑ฆ๐™Oฌี‹>_•uฅฏKDDDD๋i&ึํ๓ฺœMeแณX‹พณุ“ลฬื%""""‰๕4kฤr๏ขถNบ"‹สยg4ฑrE_I์รbOฑX๙บDDDD$ฑžfb อ#4Wึๆ|ธ่‹?-ฮ|‹ู/๐™Iฌใo/๚ฯž๚ฆุ{ซnิ‰=™๏JDDDD๋้'–p`hธ๗V_ฦๅSŠ“;—๛ซ8€Fœ๛Š}U์ฑbฟํั๗‰}˜/JDDDDkฎ$–๐ฮˆๅถiP฿g่๊น…๘ฌุ{ห ๖[ฑ๗๒-‰ˆˆˆHbอญฤryw๔ž8Xพ;Š8วY%9zฯฮ๗#""""‰5w I,‹]I, I, I, ‘ฤ"ฑ‘ฤ"ฑ‘ฤ"ฑ‘ฤ"ฑI, Iฌนยlb@DDDDDkvส[{ฯ~Ÿ]งฉ(kRVœlฑ฿c/ADDDDฤijw<ฒ)Ab พŸnทฒ— """"โ4ฝเaEwH +ลํqีGฤVbGADDDDฤŸUดร#งSY๙‰H +0›ฮFW๕\ๅŠ,DDDDD™พฒ฿๐๐ั้ึณ""fฯWb๊\qzณจ,ฦฒqŠ๑ซŠพ๙ "‚ฤ๚™สWูOท[-๖{Œh!""""ขKQvวƒa›ฅฒงNY๙‰‡™๖ี๓˜X๋ฒยฯ๏I๚๛ผีˆˆˆˆˆˆ^E&ˆX˜ั๕W$ภฤ ฑH, H, €ฤ ฑ€ฤ ฑH, ฑ~?&,Ljo%IENDฎB`‚xarray-2025.12.0/doc/_static/dask-array.svg000066400000000000000000000344651511464676000203310ustar00rootroot00000000000000 xarray-2025.12.0/doc/_static/dataset-diagram.png000066400000000000000000001154521511464676000213030ustar00rootroot00000000000000‰PNG  IHDRกQDุtฺ pHYsรรวoจdtEXtSoftwarewww.inkscape.org›๎<šทIDATxฺ์|ิV๒วๅFฑWL5ฝŒ{o๔ ฝ๗B่ฝ‡!@(กฅ‘ž า{%ๅ.=w๗ฯ%—Kป๔Kน๗ฃ}Zหฒ$?้ฝล2ฬ|>๓{wgตฒV๚jฬo jPฏษ๙H๊3œz๎z44444ฏ[<๕eิซ^Ÿ%‘๚l๊ม๘gE๓ˆmฅพŸ๓นทP_ฮ_zฮื…POั}‡็Q?ป อ๋ึŽ๚ ถฦห6€๚kิC๑ฯŠๆ๋Dฝงƒใ7Ÿ๕9_ิฏฉท`?gQr‰๏๗ิ[บx]-๊“_[ึ฿ฟ(ภ็๏›ฉ‡xBf๊ $ลƒdม<= ก]ฺ'”ิค~๕ฺ์๗Iิ๗R?N}5๕XkQoM};๕{ฉ_A=†๚*๖์gํขฺ•๚h๊wQ?ย.ฦmุฬbํค\๗ผ.•ฐฎaฟkCj๖›t๘d๊wRœ๚&ล—šF=W>฿Zล—1 e}ึaบ“๚ต,><7“‹ฮณใฒ#จ๗Q|Kุš5gว|+๊tฟ‡L๛๊ฝจW3ฤ„e๔์˜mช๛}}ฯI rเ฿ฑิ ุqชYถ] ิQšม๗n๕‘:ภ c๓๕A์ปวžซY๕l‹mฏฯพ๐y‡2xผฦฯฐ}ฬcSt็๘@ฝ๛yผแpjp#ฑ?€ŸŽกOจWน๚ฐBทP฿(ฮฑว๐T„†vi[]๊จO}ป€eP;I@Fเ ๊ฏฒ‹'ุ฿ฉฟN}๕๙์„;)มI:`„ ้ง,!,7‡ฝ”•๚ิ'QฟƒAd3๖๘ิŸb~9{?ฉ฿ส.W3€ฮdpz=๕c  ๙<๕นบฯ–๚_ึ@๔๊Oณ“aผว>๛9๖Y#๑Pนจ พฒ‹๗)vฬผฆปแธ‡๚v,ždฟƒ›ฑฏุ…๘C๊o0€SฐผKM๊gุ๗i<{lปYRุs๎ึฝ/|nะeฑเ๛ฒž๚L๖ฝ๙‚=ถ˜๚ทิOณ๏ร์ฦ)–ล๚๚KŠ/๓6๚c์uนฉC!๖/๊n,ืฑฤุ๗๊=ๆ•˜wกpYฎ๖p๖ฏย๖๚JกA์๏k—อ…วขm€ณz9๏ป‡% œXธ๎;fกีุใAxŠBCปtฬธ/;ม้2<๏๋.ชก๓tฏ‡็฿ฆ๛.j่.ชำ]ะ‚ุV๖๓zกบวา4 ำm๋ฅค๖-”]๘'ณŸ๕ห๑มJ_&Wณป hŸฝ:ƒไIx˜\TวะO์ๆEป่พฮnศดcŽ[my;O๑-wงฒŸซ2๐;ส~พ•มฅvOcY…ใฺล๖ฏภKe`ู‡|D—]Zง”,ว‡ฑื6@ภ}์ฺrผ–ฤพW`€ถำภ๊๖๓zถ ญu๛buธˆ 4„ํ““์|`™ฤƒ›้„Oe ด‹OeณWุ๓ข<ีึฝOWv3ฎYwv‚ฟ๚ํํมnpc~ฅ"พาb„ะŽ์|3;ฦoั/cว๊vvƒ๕ ปYัŽGธฉzœ๚ฏŠ/ณพ™8ตb็DธYkฮŽ๏™฿ล๖ี฿•าู๗ฑ์;ขํml{~f7A้พกl›พgรM^o;\“๑0น่ ิ˜ …์เ์ุ1B(ิ;฿nx>dtู'=ดEฐ๏P-ฅL่บ5+mห@wˆ.kถNแฯ„ซ”ฎณƒ›ทw.!…ฯžศ&ฐŸ „์]fุŽ„Fฐ`ศz/ำ9œOK‚P8W>jx<™็"„พcx\ƒP(9๙I)ญ,’ภŽ;Q…๒€Q†๗หพ‘์=^eฐช๙?ฑฦ'44ดJก_ฑ“.@ิ1}ค๘บส๋0ƒ,P— '.จ!…ฎJจ‡๚’๔ภบฒ๗ฬ ~†ฆจพ ๛ป@แ|vbŸญƒะwY†Nšฐผt;I6b'่?- Ta '๔|ถ=ใุ๖ตลรไขƒP ]FJ_rb„ะN 4ตcŽ:xผŸ}ด›ฃ‰ HยM  @’Oe๐8ะBoาฝ7J‰๊ภํmeก=ู1NY/(ฅkB/v…Fลฯูรn4กŸฮ`73a็›ต ะ๔ž! Bฑoฝ*ย/์XดƒPุ†Ÿ•’Tฐๆ L@่l„พฅ”ญซ?ยพแ์o็๎ฦฦSฺลoq์ขฃuฅร๖*v'๚ƒบAบ็รบP๗๓ฅtร€v„ž`'้/ู…|ซRฒ\ฤ2Pณ๗‚“),Wiฒ5Wฑ บf %๓$‹wืฐ”y”]€ณ”’ฌ๎฿ูลด€]๘แ๙แลถ%œร† J–๕๚Œmฯ_ ฃ]|๚&;๎ฮฐ›.ศ ืณ€ะvม…ใ่!v,ฝษ.”`ฐ$CฐD’ใD๗CกGฐ‹ฟูcUL t๛naะ๚Fh,y…(ฐฌRuฝp“•c€ะ0๖=‚ม๒_ฤ_" ็3(=ะ/๗ฆK„ะ:}ฺp„ฑ››JIc^๎ฉ*ƒา’ ด€m{cใู๋฿;ด…Lุ้๑ค่nฆ„ฦฐ’ฦ~†dฦํ:ฝž}oดl|๛h฿‹'ุM—ข‹๗๛ y๚ˆ$กKา[0xโ€คqทnยq`[šะ8‰๛ วoุนณ๚9eฤŠอห#&ว้1w.‰ฮฮ&ฆMŽีuๆL‘–FบะEโ๔š7ิˆ‰ั_็y @๛ดอ"๛Œ๒‰‰dD“ แ8เรง“I‰…Rb MH%ณำป วY฿k P'‘,ฬ๏'k]QคWTsฒข๓PแXซบ#k6!+%ฤ‚ํ้‚ฌฅ'gYทaคFีj๚cฒJ ไ"ะ` ฐฯ„Іๆemฑ#Y๙่ฃdํนsฎ}ฺmท‘D .‹~X(๘˜#GH+ TหNŽ5pื.’2>YyๆŒpฌถ‘ขีซษ๊วŽUฐrฅO4lKฺๅ—“a๛๗ วZNึำง“Q‡ ลYCท)ฅG๕Bฎ]็zเ˜ืtpJ>๙`๕๒ษฺ\๛™ั+ษ•ญ{ฟ,= xฟนdwฺ ๒มสรยฑ๎๎3›-K>^sฝPx ฦ“;{อŽ๕แช#ไฺŒหศษแK…?฿{หช๛๔จยฑเoทญmo๒์ิอBq^~€7OR๕ชมกฺ1?จ\เๆ๐๗ZUtQ@h๕•ิฌBBƒ‚Bัะ+‹|š›‹Іๆ1ƒlะ๊ ณS'แ%x/h๊‚R2 ฒ ฑxอา๋ชซ„cม็J_ธPสผ #ะดฝšฟ{7้xร ^€P8ๆO)A*€พท๊ฐ”%x™PzO฿9*€~$˜แ•  ๛ณ‡‘‡‡.––ต๔า<hวํีใ;ฉV]๒X4๒zว•B็h๚โ…ต•B5๔อฬLB:vDEC๓ €ฅ่ Aุำj@eจŒX !”Q*@;ฌ]+ @3ฎธ‚ ทฯSšง@7&๙ปvฉ๊…cพ  €uๅ!!่’Q บ+u 4ฝพxœ0€ย๋ฝ  ๕!Ž\๎ImO๔LT@+„–ะJก! @จ_ฅ y3+KP„P44๏-มซะคฮฅhYณค5!ษฬ€€ฎ,1ะj@e-มหส€Xg.Z$@ต&$™Z” 4ญ`ะg@e(,ใ"€–฿„ด/k่E  oQํ` ‡P๘oะJ ก€ะ7X! อ[: ๗ศฬ€ถ›=›,z่!a{๔จ B^PY]๐^PYMHะะๆ_‚7ะ „ะp€*ไฒ”OจŒ&$YK๐z•ั5 ^P๘AฌgฆlŠ๓ฮ๒๋5 I&๊a…ะ฿โย}5 zญคฌะื ŠІๆ &ต ฉfณfค๛ฦ๊ณ[๏พe ‰<˜t\ทN(xบ-5๚๖%]7mŽหจีํ•ท|9ฉ3|ธ Žขฑ nณแ˜1*ิŠฦ‚l1ศVษˆีxxต”B(Žถ]บุhAจ  ๐žiต๊“[{LSv๚M&’๙‘)ไๆฮ“„โ€.MDฅ’{LŽu(YŸญ‚ฃh,จK]ำจ˜ี{–p,€๕อ-ป วmYืค#ฺูฆง”X๐๙เsŠฤนญฯLRะ$ั฿„t–ี€V…๏oฑZ !Tะ?ฌ! อชf@‘‘จ™/ทyราh8u๙V$x๎ฒeค้ฤ‰RbตŸ;W- `–MกIF,ุWึฏŽ3โบ๋H#T(5d๏^๕&ข๏ŽBqFาmŠOIQ/ด –ชญ`๕hv9ไิจๅjำญ_—;B!ศฤ‰ฤ฿‘ิWอฤ‰ฦัฒฒPo)>ื๚ฆศ'ว๙ฅๅ๕๒T(๕ะEdaLบš้๕ภเ…*ฌรฟ"q๎ฒ$Fึ๑ห0=V0@=กsTญn  • Bƒ€YงJKEEC๓ศ|<%ร$Sˆ~ะ๎*„ส์‚]‚ืd˜ฝ(D/Z : š SตฺตIฃ‘#-๔Ch„ “๓„…่ฯŽY%M๔พ๓ฅิ€jK๐ฒt@>๏่9CJ จ,&Mˆ^–จ4!๚พ%๘:aไบv-ิc:ฒ…ฐ??ว@+„j๚ฟ˜ะ0ryF–ŠІVqๆoB๒"€สชี„่ฝฆชี€z @ฅuมC (ะˆ„’ู๑1cผกL†ษ7 ้}A&PXR๖€jMH ฒt@e(|.YMHฐฟa๙K2Lo้บเึฌC& $G“{Bƒ4…&ค—ฆ*ไชฎ•Ba ~นยt@OI%๋๊5GEC๓ €ช2L๊(Nมlฃl•ี/sงWe˜d๊€B™A t@sฏนฦ ๊“aR‚ิ&ค๗›d๊€สPY: ฒ2 ฒe˜evม‹6!ฉ2L-“K้€™6ฒ2@จ@_`]๐•Bห่€~•_ˆІๆU๕šจ ^ิ…๎22  =ทn๕ค(ิ•BิZ @etมC&N†จl•%รไU•-ร$@;้T๋‚ฏZ@+9„†ฐ h)P„P44่r้€ยผL•!ร$s ^€V6ะ †Pฟจืd˜ผ  ^ี=5b™‡–เ”ส€๊e˜<กะ„๔ป™h%…P @ห่€"„ขกyวช๋—เ— ยž ฒt@ก Iึผl•ฅ*+šตxฑฺม.@eิ€ฎฆPฌฯ€ๆd˜*Bร}P_ ่_%,ม{ @ม๗›+MTP/้€BษƒL•ฅชี€‚ ำiƒจ‡!2 ช “@+)„ย2 @_3tม#„ขกyร@†ษ฿„$บ5 ^P˜๏5….xY P/่hŒาื€š่€V„ช 1%หPX‚๗า$$ @ก If จ •ู„YKฯ(ห€B’™ฝG!ิ ีอดBจ@๋S}5#ฃ `"„ขกy@ี hbq1Y!ุ„4ใŽ;ิPK๐2t๐ž=jผฬ&$Y*ซ j@ฝ Zบเญt@+B@๏…xฺ็I้‚‡&$/f@Aุ^FผL••}ล!eี€สะ65โี&$3ภ๔ „–  • Bกณ)๕?๊UฉR&ŠІๆ%x5Z;1‘ดฅ๐˜4gŽkO;–ฤ ขŒH๐z#G’ฺC‡ช@+ โภ๔"ศฮŠฦŠฝ์2uzŒ‰ฏ0€4™0AJฌš๚‘ๆ“' >จ” Žฅˆ-J„่w๏ถLMฆ€- Bš฿Œ\ีฎท*๏ึAX}qํ,u^$8ฤYV7Wอ์‰ฦZ—IV5,TมJ$ผ๒่4ฒถqแXฐ`าำ†f…?@œIdS‹ฎยฑ@วฆYœ–Hœญป“ีcิใดuฺไœ=„nhำBA่,ศZ-ม;ะ/+$ ะี+GˆžB฿ฯษ!ี‚ƒBัะ$[ธ>*ช PYC2gมหข‡ Aฒปเed@A†Ifผ P-ชะ‹@มž=ถ€Y„ๆ_{-ฉE/$ š๎ bB๔ขะวฦฎ–ฆzbเrMส)MHฺผฌ (Œ•ีSŒ„…่ *šี7!EW5]‚wกงs'“œ๐z†ะ  T Uศ๑a๖€Y„w•B6wVHhp…Bจ–%U‚‚ษฃํSlณ<Oa™ำ„!„ขกI4ธ฿ฉh5 –เeจL&PY: €ส˜„ิq:ฯษ0มmอตŽำ-„๊ปเyfว@huEขW|บ”G† TFdfฝ ฒš@Tซ….๘cฉรษีญzบ†P}<ฯ์x!zU2 ๅฃ„BtWO€๒ฬŽ„j๚;4!A๓7!%Jะ+|PZ’ื2 ฒ&!{@…›`ืZ๊ำ*3€ชK๐m …u@A†IถจŒ (€tuหา••• ฐ- D๏Ymุฐ”ฝ๕๋€๊ิ%„jะ๕Kส!๏yHT6€ส’aบน๓$iK๐2e˜ ๓,k<ภฌLะณบ.x7z:ืงjข„N7P7 : L„่/0„j]๐j ่+!zง๚ƒn<ฯ์x Z‹z&ป>kึ˜zkไH!ึ๙ปิฎไŸ%™๚Šฏกl9๕I•u ^ะ‹]:ฮฝฆ* @e๋€สhBZญPะ5tม;…ะ‚}๛,u@B่-ฺฯc:  ๅณ3นฟ'u@o๋>MJ’ฌ%xO๊€ฎฐืu ก๙ดา• ก –: N ๔w V2LB2Lฏštม;P@Y (ฯ์xI ืๆWจ/e?ืกก"wkeท วณฬท๖๛X๊ั์šมs~ฅžค๛}ฺบื๋3ฺ้ A„๎๗์๏bณ=/า";_‡ล4{ฌžแฝฌ>w๕ำล 2|ฆ`~ˆาgุ/ฺcั์๙A๊X]Ÿ• ^PX‚๗šฝฬ&$U๔่P ižะ3gJจ‰ฝ… จ  : !๔฿ฺผŒ ((@ŒืTๆ$$™*ฃ –เ!๊%…&ค. @ญt@@่#ูษธุ$KPIฤTีตข็…POX‚7ห€^@ๅาๅ…Pศ|B๔i ะ/วgQŒz:๕ฉ4ฅKู |d๕›\@5ก ๕™Ÿขแ_เธ`๛t 2` ุs฿dูg X฿fK฿)พ‰aaิฏaูTxอGิ๛๋ถgทโ๛(๕ฯฉฬžฏมj6‹ ฏž๚ฦa`ฝุM†๖ุv#bf็Ÿ๐ฝฅw๊จocK๒`cจ฿ฦ’-_P…๚๊‡ูฯฐฏ` {๖|฿“lฤ๖_ภ3๎แl#Iำ์lr๙ฝ๗ชKnด-Arฮ=๗ล}๘0i2a™{ธp,ะด˜]๘ภยฑ@ำ.ฟ\-3P˜†ภ.^s๊Eท ๊w!‹ šขBDใhMHฐoฅ๊Bcปuณœ„ฤ ก)‹kdาฃey}แnu๙ญŸฝ’ฌmผ6oงPpX‚‡ๅ7/฿-๋žพsศต—‘ทํŽucว ไhัXแ8ฐ-ฐขขฑฮ/ฃf@ดeฤ‚~zิ ก8ฏาcฉK๋T จ•(/„ฮจ“Nฦฤดณข—ก~ตำๅ…ะiek@/0„๚u@ํ&!qChำD2ฌF=๒T๛tฎู๑ช ]IoิฯSA๎,e—S†๚Uิฐๅ๖ืีbpu?๕G@6g–ห๏๊gคึคพ๚ง,YสSุใ9 Xaษ๛]๊‰์๕sจEฝžฃ,ษั‰—]๊๋x&ฑ๗šJฝ%{$ถฉิ฿g๏gfกิ;2˜N`ว~ถ`Sุ๛eอa็๕ ˆ‹ู~hฯ~~”}฿b™฿D%C)ˆtฝ6ชAj*ษXฐ@ญ%t๋ S‚ 'ผ! D—+~ุ0าh8u^บhฌธ!CH‹)S„ใภถDS๐‚ŽsัXฐjQ@ƒ:WัXะฌังpฌtz,EดjUฆ ษ-„B4"+ห@y ด่เA—žN@ดCB"ู•9XO๎ึW5,Tั๗ค‹ล_Ÿญย,€ฃhฌลตณิฬฌhุ–หฃำTุต;m™™ข6ˆฦ‚L๑šํีIH2bมvํJ(g[J?าฒjtIRK(ไPhB๊Xญๅ™o๗œีญบุA่T @Ÿ™$Ž รดฉณB๎ส7;๎๑ฯ*คบ;…‹ํ"(+จS€๒@(,มOŽjHh“ฬ5;๎9อฮ&ี‚ƒBh ป/ไ2i%‚ะwuฐ”ลฒˆ5tฯIb™ฬบlเซ[z7L}อ่ืิ;ณ็Cฆ1_—a+๕ับ็‡ฐ&ง™:ฝK๗8ฟุ฿pหŽ๊ม qหŸ5dน!ห๛ชอgOฃg5B่บใ%‰eoฃtๅ฿ฐี’ํŸฦบุ๕ุ๓j ^P˜„$*รคuมหhBOaAV’ฬ.xศ€ยd%K๐ึฏ'ทl‘าc=e4!ม>’"D: จฝ ชUIึๆอ\๓ํ„่ก พf~>ื์x+-ฆZ›^‚4&I: 2บเe-มรฒ๙}ๆKข‡&$X‚—ั„B๔ ^(eูเ๓‘aK<ฃ MH]YดVprOๆ[(,Bต.xศ‚๒ฬŽทƒะcIzตx+…%ฦ฿ช…*ไ@1pิt@ืuไ›o๋วๅ YUคฐ`ว๊ะjAมไฎฤvๅf9ํ ๔วขbu ~Ftื์x;ถ ฬŒkHย‚‚@h(๗ณๅืLไฮ2๚„๎็ัl้๙iฟศภดฝBdพhx>ภูXก?ณ์ฉย–๓!ฮ๋†็ร฿eƒBฏต€Pอง,n$Ž2@ีว}‹๚{,i่B๕Žลฏnก]Yฦ๗Y{?หภดK :รn“  2d˜@[N*@— ึธj*ST&€ย็๔L (& @Aดn฿พ*บ…P@{๕"dyfว[Aจ  YYDซPจ๕€‚€ส’a’ู/[ˆ^€สาี&!Attl;r:gฒkยd˜xfวA่}@Gวด%‹š›Aจฺ]]!ป)<7=8j: ็›o ฒฉหแrผ ฟ ำ†„fไI›ๅ๓๒ T๋‚šฦเ™oก฿Q]฿„<ุ6ูํrD#1 tžท„PEนŒ-q74๑*mฦ 2ไน& ๗-๕~&ฯโ€ะ9l‰;ุ๐xVืzซI๚6ูoYZศ๊D[›ผต€(4!]ฬštศŸ• ็ฦฏ•ฎ*@e ั{M†Iำ•!DฏeSeh‹ˆธRB๔n ่A๔ฌAิ „๚ดmฉŽz„ž€ๅ๊่๊ฅปเ@(่๖neปเ@่ฯ+ฒชX!/Ouี๚gผAˆ „XไƒFcผีฆ*ฝห'&ิฺ(e'ณšฮ[๖ั- 7dแ๕Gุ๓'ณ๎t…-E749iศ}์๘\hธ)ศ1ิํย๛ fจล„&&…ฺร`Rณ(ฆ์p'้dร{Žาy{ใป8”iบŒnO๋#ูจ&ฦขbu๛l๋Šฆ”ี@u„ไฯ€สPู: ฒ2 ฒ„่eจฬ&$ @=%DMH€ึฏ_Fิ)„ฺ ั;PผŒIHุผ‹Phฮ‘Y*sาญฆJPYMHšจฌIHR„่——ิ€'!นP˜oฅ๊Bต ่9C,„’ุ๊ee˜œB่o@ฏ๎n.รไB@W•ี&ๅ€P-๚ฟ:&B๔N!TหZšษ09…ะฐlช@Bั.U๓ห0ฉฃ8%,มหž„ไตQœ^Pูฃ8กtุย]๐Yฌ Iฟ๏Bํิ „66Lะ V๚ 5 ›[v“’0ƒฮnั จึ/ณ I€^—;BZผฌY๐ณ ขPข๏ฺ: ั—ฬ‚w กฐ?มb’=’4Hะฑšข3šไช 2Lฯšศ09Pต ž5!๑ฬŽท‹๕๓ _ิLฟีj@Pิ0Šำ)„€.ช„hื์x;…&$จ} กh—*€"ญ ^b จ 2`Vf’Lํผa้ถyณงบเaIPึ„d…ผ sเcaผ(„ๆl฿NBcb€ส้‚‡&$5 ^ะc]&K[‚—  ฒบเeอ‚ื(tม?jกส กห ษ$uฎู๑vบบqฑ%€ฎi•;P^…ฆ!่ค|฿์x;)งีลึำ™l ด”จ€:ะiั U}า@@(Œ๔\ฆ6!eBฌ ญRYu @ซ6h@ –-Sณrnd…ขTaH$x๑ฺต$ผwo’+ธMเŒ5๚๖UปืEcม๖€่;ิoŠฦ‚้E l๛M4,›7=Z88LณJ;V,Lํบw/ขฟ๚jK(ไะ์mHต””25 N!Tญอฬ๔ABตXr]แhuิค[‡ Asj$ฉKห"qภกfsVx[5ฆh,ุžQฉไ†ใ…c€ขธLu"’h,ศ4ฎlP |CณฮdMฃb)ฑ`4“  &Žอ’สP^=ž1† จูผT’[=’4Xe  RL๔MHn!2 32๒ุ8พู๑vฯ.๘I๖ำ™, ิ ะ„๔ฒ€๒B(่€vญR@y!๔r๓IŸj๑ไผอ๓Bั.+%ร4๋๖ี1šnf‘ร4ค้‚qภ๛๎ุAฏ.๋‹ฤ™}๗*,B (ิจŠntภ„B\ัX•ํฒqฃpุG-†์ฌhฌiทฆ‚,dSEโฬขิผจHฝWตPฆ˜.]H~ธฦvZจก๔๙ูศ+svธ๖;{อTA่…[…โ€__6ษ฿„$ก'2วฉฃ8ตP=„ฎmMญชV{n!ิ "๚หJ_’พ,ภ„ย—?ใรย5 n!๔งขb”gvผ„ศt@฿a]๐กh—ฒUำ–เ!*@euฎBˆ^FึR€j2Lฒ2 ฒd˜d(ฬ‚ฯ8ฐŒฝ[๕ห0้š@h™IH2  2e˜d้€สะ[บN๑”(@:d@ฝ$ร: ฺ|K$$ท B๔ใb’Jษ0น…Pศ€Ž6ัkMZผฑ ษ „fล)กฟ0PcRyฑพ*Pช๏‚wก?•Pท๚=ห€พฃ“aBEปT บเ)’Fqสข•KฆจL•%ร$sง 5•aสฆpจ˜่€บPซIHN!Tะ์lU†ฉŸชzPธ–เฝ&รP™5 ฒd˜ ึีK5 ฒ&!€jPขื7!นPXzoข๊BOฐPc,€ะqmHPข‡%x=ฬ9…PhBRe˜ฦ๓อ{ทƒPm’Yผ]ฌ?ื(d[7…Bต@_14!9…PM๔ “.xง 5 Kู,xžู๑กh—€ถ‘ะ„${ผL•ี„ไE „จt5tม;…ะ˜K๐&2LN ิ ๏ฎ8่™&$/จL!zช5=AiR ™ฎาู€j~ฆ2 ‚Ž(ฏRซ–ญ=„‚hMz‘บv-ื์xปIHZ<:z <พj [!z…ฬgฏ๐ฦไŒ1\ณใํž5 ซ6ฐœชิ!ถ RศSํaฏ<….๘-]rค?฿ผwIH+ฒค@!๗w+ทกBยƒƒษำi๖เX„ยŒ่r{หv\ณใํ‡&ค ‘ ศูvฉ\ณใํžs>3‹T vกแŠRฟŠฯฅ๚?ง)ญโ้*ญ49๏ผSฉ!4ทS'rโตื„ฝpzr๓3ฯH‰ีyำ&r๏+ฏวน๛ฅ—H>…tำำO“ฎ[ถH‰ต์YาŸย™ŒX‡ฮ#S‘kใ‰dีw ว9โ‹$44Tอ‚ฺcyช้€B#ฯ์x;อูฑƒิ ฅ zqšิ(๙่ฤ9aฟญ๋)qภoฬM>8˜pœŽŸUcษุฆ7E๎๎7WJฌท@™ผVJฌs‹w’'–ํ’๋ก๑+ษ3k๗K‰BR#๋Bayช้€Ž‹MโšoกZ ่8๊Vb๕…1MิiHะใB@กr{7พู๑vช้€ะ๒ฬŽ7‹•C!ฤ่ฟ-,t ก0Šj@Wิiส5;BdS• ฯ์x;*็ฬ๛๏“ะฐ0!-ุฟ฿ฏส3;Bsw๎$1บ‘ไฅKI…YCวoฯฝ.์๕š+%๘๑ยIไ๋ณฏว๙๚ฬ+ไxัd)๔ท[N‘G‡,‘๋วษณณฏ’๋๕ตษŽJ‰๕๔๔-ไํmวคฤ ‚ะว˜(€(ฯ์x;ีt@!ฆฤ$Qีd˜xfวAจ^”gv|  Tำ…Pžู๑v ช้€๒ฬŽทƒะๅqศS)i"ห๑๕B+ตมิฏ ิช๘ดoฏฃ^!!!T"„ชชำะ  ฑ@แ๑œ+ฏDE๕$„‚ฝ^TB: ‚PUดG‰ “„baช(ี2 Z’„~ฏJ:5&o3PG฿ใ9yข5กก?"„ข!„"„"„š@จ z&ทช(ช?#„"„zB!๓9!ถฝ*Hฯ3;Bอd˜กšจพ ษ-„l2 ษ-„ย„ค–ฑ๎ tq|r.)kvผ„j: ็uะ้Bฟd๚w  “BัBBBอ TญีโP@iฌ]cB(Bจื šฬt@@่™ใLu@eCจฆzf,฿์x;p4ฮ‚Pะฅ i้Ba ~qํา๊Bฟ+(2•arก_ฐ%x @BัBBBก D_t@7mโšoก จก3!!ิK ๒Kชจ l:…ะ๛u5 <ณใB(d@ฏ5P7 บพฃน6ฉSfฉฏฃํYฮ—ใbK๐Fuก ะฬ๔FZื์x;…&ค…ต“OuŠІŠŠ*BA†ฉv๏ช =ฯ์x;…กT[‚GE๕"„€Nก๚จh:P;• กฟฏVศžžๆ๊BA†iC'ki(' บ8_!๏ฬr^๚ฃ €:…PX‚_n N!๊I?ษฮ•ญŠIhกกก„Fั“ช€:ะ˜.]|MH&ŠŠ๊ื^อ€>j™ผ:ฟ~ถๅ(N™บŒB่ฎž๖B๔ผ ๐นกฃฝ6)/„ฎ(*  N t…=c จ[ีPญตข็…ะู1,3กhกกก’ 4๎ฯ๊้้ถส ก™0ซgd˜f@BBฝก'ณ'‘žแl'!๑B่ฉ#ศ€šอีฮzžู๑n!๔5 ™‘้ฃษ3;๎9?ฏPศ”๔๒ล๑y 2 ฃฺ—P^… ่ˆ๕l”BA†ฉGีฺถส กŸd็‘แu,! !!!T„B|Lืฎj&”glง„ยc0ึณNถqBB+BA2 Fq๒ฮŽทƒPX‚ึข;ืPท 2Lรฺ๑ํดƒะ_้{ฌ้ YY|c;ํ ๔[ถฟฒุนD“V:‡cิfy u›“"rอŽทƒP่‚ŸPญ) เุN„P4„P„ะKB5ฆิ5kธgว[Ah.ำ…XๅอŽGEญ(}Œ้€>@แQBีะ่ถไ๖ิ\ณใํ 4฿BกtW_”wvผ„สt@A†‰wvผ„jMHuช๊ืMJใžoก?จ: Mศ[ู้|ณใm T“az=-+ะณใฑ& !!๔า…Pฝ(ฯ์x;ีwม—7ถ!!ดข ๔ฌ_t ื์x;ีห0๑ฬŽทƒะc)รHะpSษ0ห7;B: "๚ “az{–sฑ๚Ÿ : "ช้€jK๐"ชืๅ™™P4„P„P„PjิPฃ(B(Bจ!๔ ำ=อt@E ิจ*กว’‡ฉอQ‘ ส@จ™ “[5ำu ก€พ5ำ๙ฤคŸL„่BจYผ[5๊€"„ข!„"„"„Bอt@Bhฎ‰(B(Bจื ภำุ๏Bอd˜Bจ Oฬ,Sช่้ฑ|ณใํ ิJิ „jK๐zๅ…PMˆqC’…šอื 2Ln ิจŠІŠŠ*B 4ะนfวAจ_ิ DŠ๊%… (่€ž2d3@จ•จฝ%e™็Pccิ€šจุI!ONเ›๗nก฿Z(„€.‰/  n ๔{–}Dิ)„€šษ0!„ข!„"„"„J„PX‚7P7j'DŠ๊=ร2 งLj:BจถฏŸ+๏BoMN&ฦ%๛Tก?,๗อ‚ทขwกv๊B“f๒วา ๔๒๒T}ฬB†ษ „ภj@_ทขwก€~l"ร„ІŠŠ* Bซีฎญ.ม›จSM]ปึt'B(Bจ— ดexฌZj%D๏B$ ฒPงzS๒2>6I}2Mม นฒ‹ฝ=/„v™o @@่ษัek@y!4Žžfฦ&Xจ=ู6E]ฮทำๅ…ะำํRษŠŠvIBh:=จox้%aฯูด‰\๛ไ“RbnJŽพ๘ขpœรฯ?O2)ษุฆO<กญŒX[OŸVA[Fฌํgฯ’ัGJ‰ตไพ๛ศ‚ป๎Žsไน็HHh( ฉQรVˆ „ึ*,ดPง:นว@๒ษ  ๛ฝE“คฤฟ3c๙๘่ยq>:๒น3sด”mz๛ช›ษƒ=fK‰๕ฺช๋ศูQ+ฅฤzfๆV๒ผํRbบ”ผธh—”Xก๔ุชWฅ†ฟ IBgSp\ซฅ%€:ะi๑ฉ๊rsี 48H!Žแ›_„^–Xพ=/„‚ฝ€ฺAh๕ rช] ื์๘๒ t`xrพ‘y.mšHFึฌg+Dก ด–๖ Jวjˆ@hก๕SSษ ๛…ฝแW>{๖H‰ีx๑b2PBœ๛๖‘ Jู&ศ‚6ก%#V๗;I‹eหคฤ˜MYปVJ,€ผ-[„ใ ผ๖ZLO’ pอŽ/ogxfฆ*้ฤ3;๎9)tŸ“A-2ศฉA‹„†ฦฝฅฤ?Rฟ;99๐ แ8'.Tcษุฆบฯ"7ต่/%ึ}ง’’‡K‰uW๖Xrw๎x)ฑnMBŽL”+$(˜{vผsศOบTkจ.ล๓ฬŽท{,มwจRฏิผู๑vบJ!ห rใ@พy๏vC ่ฬฬ๒มุฉD“S…%xฐM2ื์xปว_@†ึจGžOษเšo๗œ—ำ3Hzฌ @่OQJ•*แˆ@hธห๑—๔์x;ีt@ใ๛๖ๅšoกฑ5(ฬ„โr<.วWไ์x;}€5!‹Mโšoกะ„5 “ฉ‹ฮŽทƒะ_ุ$$˜ฌฤ3;Bก–เAฃ”gv|  š–ล7![ดเšoก  บถn3ฎู๑v๚AV.UŸT ฦๅx4„P„P„ะ@Aจฺ„ฤt@yfวA(่’Fw้BRWฌภšP„PฯB(d@@a žgvผ„๊ปเEgวA(ฬ‚_ืัทฯ3;Bฟ]ZRส3;>Pช—aโ™oกRu@“ฟๅไqอŽทƒะ)€ย„ฆWา3ฐ& !!!4Pชจฎ IB@aF}ฎ]ุ˜„๊YU4บญฟTB@5Pัู๑v ]๐t]๐"ชPžู๑‚P @_KหไšoกšจV*ก€BV“ะBBBกf: n!T ุ๊Uีt@ฯ๊t@BจQ4Pj&รไBอt@B่—‹า"ฦ„~oP5“ar กca\c@ฑ; !!!4@j%D๏Bก4ฆK?€"„"„zBKt@งpอŽทƒP3ะ@@จ•จ5f@E ๔฿‹}3๊@่ฌิจ๊Bญt@@่G4ฦๅ@แ๗(ั„†ŠŠ 5.ม‹@h&…อ่NH{กN(BจW!Tะ3&’NN!๔๖ิj3ำ3&2L2!žgฅ๊B5=?“ovผ]ฌฏ—(dE‘Bถภ๙rผ6ึำ B(ิ€Z้€:…ะX๔+–EECEE „ๆ๎ฺๅห€๎ุม5;Bณถn%Q:ผ={Pฌ!ิณ๚`–ฏ ้Œ…ฆจKa๖Y• ก รดถƒตฝฮ@B(tิ/ฃ๗๗หื„€.ฒขwกฐ\YK+!z'j\‚— ก?!„ข!„"„"„า}YTdบ๏B#‹‹I$ะkฏล‰Iกž…ะQั‰ฅšD tz|ššตPY 2LvB๔ผ ปฤ@@่๒"฿ถใr็I(์มXOปIHผ:1ฒ*ร๔ivื์x;ีะ4ŠІŠŠ*Bณw์ ี32l”Bำึฏ'แ4–Y!!ิ+z_ฦXา3ผฑ-€๒B่ ํ/#รฃZSฐษ5;-„‚่คด๒'!๑@(,มCฌ๓3๙fว=็ซ% —R’uกะ„4บf}หY๐N `ฑgตx[ๅ…ะw2ฒษˆบฆP\ŽGCEE•ก•ั;ซฮ3ถำBก ฉfAฉำฟ?ฮŽG๕,„ยศVณใํ š.ซี’loูƒkvผ[ีj@‡ทใiก฿ฒe๓๙น|c;ํ TmB๊เหฮ:•hาบเ!ศ3ถำBฟฬ๗้€B๖’gvผ„‚ ำ๔่ฒฒN3œ†ŠŠ(ีš@B‰wvผ„j]๐ฉ๋ึqฯŽGEฝะชึ€FทU›‘D!tR\2น'}4ื์x;อ‹nl กZ๘gว[Aจพ”wvผ„€ย’'๓๋„€ฎค๚jZ&๗์x+ี7!qอŽทPhB‚Xษฬน ณใBัBB/I๕7!]}5ื์x;ีb๒ฬŽGEญ} K›„4…kvผ„j๚Tม ฎู๑vzc๛!ค^h„)„€n๊ฌsใ๙fวAจฆช-ม‹@(่ Nล๊`ะW™จ„๚d˜šจ๐ศ3;B๕MH<ณใBัBBB]@จQTBU& <ณใBB+Ba’^†IBoKN&ฤต๗๋€Š@((ผGvTBู "j&รไBกt@5uก?hi!zท๚/ @ณrนfวAh‰hื์x„P[kB=ฮไw๑ˆzกก—8„๚u@u2Ln!4s๋VŸจNˆ!!ิk Pฃจ[-ัษ5;BoJBๆั๘/อ)S MH›;—mBrกV: n ๔ฆ๚้็c;A†i‰‰ฝ’-มdarกf2LกBถ“๚อบŸซQ”๚D=„P„ะKBี%x€บ…ะ,๚๗Tu@ B๔กก^‚P+P7jฅ๊BoNช"Pcc่€B ่+S๙fวA่wหฌu@BจQิ „๚…่MบเBจจS˜e@]๐B/V‰ฆิ?งอ~๎E฿ิซ ๊!„"„^ขชฏๅ™oก™LˆL!!ิ+ :*บm™Qœn –เญt@B่1  ณ๊f”สŠj ะ]๐ฏNใ›oกๅ ั;P˜„dิๅ…ะหอS3 oXศ09P€C;P'j กยAcŸ`ทP฿‹˜‡Šz‰BhีุXำ จMZฒฤทoกŠŠ๊mZ=š ัOแšoกปZ๗Vk@ญt@@่ัคมeิกมพฅ๎ืฆ๓อŽทƒะ๘ ๑อ|๓ํ ๔กQ YY์ล้$@h=ฬ‰Kฐีๅ…ะ{[ท/WˆžB๏ฃฑ`9฿J—ใ…  ๓z๊1, š˜็qM*,$ฮž๖”ตkษฺ‡’+sใFฒ๕ฬแ8Tํืฌ‘ฒMk|dmฺ$%ึ’{๏%EŽeฤZvโฐoŸ”Xำnน…Lผ้&แ8Wž:EBBCIXdค*Hฯ3;ฮCj”Y‚w กใŠzชเ!๊wf‘ึคกไญญ7 วy๋สษญํ‡Jูฆ—–์!๗O–ู๋9ศ#/—๋ฑัซศน๑kฅฤzจ๗\๒ิ”MRb…‡๚UjZŽโtก3๋dจ0ซ5!‰@่”ฺ)j<ณy€ะเ @๒ฬŽท๓็'+dH[{ๅ…ะำc29ญl (/„Fะฟลs)้\ณใํม6ษชx|yB๔<zฐIuฒิ•๒ฬŽ$„F*a๏๏T๒ช_„Lำ‰๚?จฯค~žz(bžว!ดIfฆ ข|้R2๎๚๋ฅฤjฝb™z์˜pœษ7฿Lš.Y"e›ฦาฯ%#ึ๐C‡T8–kไแร$Ÿย•ŒX}๗์!=wํŽ3ๅฦI0…Pู๑vฯฆช))j7<ฯ์xป็ด[ฐ€“aํ ศSำ6 ๛ฑึฅฤฟกI๒ไ”ยq ฦ M๛Hูฆ3ร—“ฺ“๋ไภ…ไž Rb฿ey ๛,)ฑ๎สGAtž”X!๔ุโo๗X‚๏Tต>y(kื์xป็@R—ช ,วz:™o๗๘–)dNถBnฬ7๏๎qašMc=>],'ณใํX[ซ>9ู6…kvผใฐ?0ผy)5“kvผsON%aAABฅ„}p‘Bh ถ$๕•ˆxธห๑ุ˜ไB3™(LBโ™oกษซV‘Z99*„โr<.วWไ์x;—L&ืNแšoก7ต๗uมO‹OžoกZ ่‘|ณใํ Tข?ฺŸov|  d˜ กฺ้ฦญนfวA่'ูyj ่ึ-ธfวA(hœŽฌUTฅ็1\Žทด}ิก!!!ิ„๊u@yfวA(h=ฮำืฌมšP„PฯB(tมOˆmฏ ั๓ฬŽทƒP @A†Itvผ„jฃ8a žgvผ„Bาj&Dฯ3;>P๚%Pฃษ3;B๕MH<ณใํ ๔ตด,๕}^Iฯฤฦ${[M,๕D<„P„P„Pวšลt@5!zMYฝšฤาcผ`~lLB๕,„มt@ตPฝYีอRTtvผ„~ว๔|ณใํภd˜V‚่|พู๑‚P˜C2!zีt@ฟdMH" ณcฉcc’ฅS๊๏Qx‡ŠŠ๊Bอt@Bจ  ป“B  ุ๊Uฝƒ้€>ฃซt กว˜จ ‚Ptาจ„jูT}ผ[-ัฦQ๎ ฒ–๚IHn!๔ @uMHn!๔<}อ๔่๒uA!vวP๊?SŠzMฤ;„P„P„PG๊Pƒ “MYณ†ฤt๋F @‰&„PฯBhษ$คY\ณใํ ๔–”aฆ2Lฒ!Tข7vมปPศ€./,+รไBAKty‘Bฺฤ9‡ะฏ™จQˆ „~bก๊B฿คฯŸ|WPˆMๅtย7วŽx„P„P„Pว 5 Q j"รไBUํฺต€"„"„z Boฃ:>ถ=yฦD†ษ)„€Zษ0ษ„ะ๏–Z๋€:…PจP3&งชhก/ฆำๅxMˆ“IHN!ิธ/ก q:3&|SPˆ:กhกกก‚Pต ฉsg จ„Z(B(Bจ— T๋‚สBิ „ย(ฮู6 ) Bอ–เB่W‹ฒ†5!๑ฬŽท‹๕w–uZ ูสอิ)„€B=ฉ•จ…&ค94&dhQฌ !!!4@Z3?_•aฒƒL^ญ™—งŽ5P„P„Pฏ@่เZ-ษ$@gpอŽทƒPขŸ[/ห? >Pบดะ| „|‚ ำว๓๙fว๙yพl๊WKœ7&ญชL…F} จ[Sซพ๋_6B๔ผ ฺค™ตšช„ІŠŠ*B36o&ี32สLmฟ|9ฉAOฮVŠŠ๊…e๓5›(/„n7H๋๙‚ €ส€Pศ€ŽM.„,NIทP^…:R˜ชฺขNปใGืฌ๏๏‚P˜ฆิงzผํ$$^}!%Cะd กhกกก –เ#‹‹U-Pžู๑v ]๐ตŠŠHpvovผ„~หt@ก”gvผ„B ่๊b…k‘sPศ0.ช˜|•ห5;Ba4!AV•gvผ„ย๏A๔ฝฬœ 2;! !!๔’„ะฌm: <ณใํ 𐠄่yfว#„"„V„€Bึ„่yfวA(,็ฯf: <ณใํ to›พคnh„)„‡5!€๒ฬŽทƒะo™ “ึ/กะ5 _.v.V5ำี–เE ๔S€๒ฬŽทƒะ๓ู้>Pบ}<ณใBัBBB]@(d@#uB๔"šบvmฉ.x„P„P/B่*€–LBะ[™ ำ‹ (E tiBษ‹n\Bcา๏BU-*ญ๊Boก/–พ”BAๆhพAิ-„~สfม๋k@B่yฆ๚-“aBECEE „fnJข่ Tฏ๊BŠŠ๊Eฝ]ี-P๕้€ฆ—‚Iทบ/ฑŸ  ฏt˜Wฆ&๔?2Ln ิJิ „€ยv้ปเy!& ™ ัปPm ุ„ไBแ3 : กhกกก’!ิLิ „š(B(Bจื :ื]๐n j@อ&!นP=€“ŒK๐" ฐธฦB†ษ)„j“ŒสกPชŽโ4ัu ก V2LN!๔u,xžู๑กhกกก. บเญt@Bจู(N„P„PฏA((ั?i"รไBoVgมg™ส09…Pm ^P=„~ฑศงj N!`t@?šว7๏Bฮj@2L<๚~vŽฺ„d%รไB?1ิ€Š@(่\ ะ กุ†Šz๑Ch•่h[!z'ฺz๎\ต šxfว#„"„V„&Tญฅf@ŸดะuกWท์Iๆีหฒิuกืด๊U@5 Vศ‚\๋IHN ๔h{uกw ๕uมนุY,€ะ˜ะ02+6|•ร5;BonVอฆฺ ั๓B่4<๗K Pฬ„ข]rฺถ €lฆเ!๊ษkื’•< %V๚๚๕dำษ“ยqึ?๒iทzต”mZ~โษุฐAJฌห๏พ›P ’kั๑ใค๏ตืJ‰5๙ๆ›ษธ๋ฏŽณแแ‡IHh( ‹Šฒี๗ไ…ะ„QฃTI'+u กc บ“๓›o๖;3FI‰~Kหศ› ว8ƒX2ถ้……ื{๒'J‰๕๔Œ+ษC}ๆI‰ufฤ rv๔*)ฑ์9‡<1qฝ”Xกม!คQตH[!z^…๚ฯ้๑iไ๙ขู\ณใํ|"…โ% ๓หจกมA ye฿์x;nฒBฦ$(/„žฃ™™>&งฑBkา‚๓Y\ณใํD›duRyB๔<บฟI2>ฒ%€๒B่KคJpฐ„F*a๏๏T๒ช_ŒPฉ„ kขDl้งิGฤซฺ:/ฌข๐(๊mWญ" ๏นGJฌ ด+๎ฟ_8ฮ2 މ+WJู&วิu๋คฤšum$g๓f)ฑๆq‡š9–kฬัฃd๘กCยq–฿wŸ กผณใํž: am’์ํนfว=งํ๙Dก'๏ั9]ศซซ๛ํ)รฅฤ?ึzyeล>แ8ฏ,฿GŽต$e›ž™ต••3NJ,ฝบฯ’๋ิเEไ๔ะฅRb่<œตRJ,€Pู๑vฯะNU๋“Gฒ'rอŽท{,มwฉึภ2›๊dvผใะ?+K!wแ›๗n๗8,มร$คง&บ‹ๅdv|y“`zัูvฉ\ณใํ3=‹tซZ›ผœšษ5;๊๑๗srHซ๊ี@ฃ>!ดดUSBF+Aฟ„)ม?7Tjล โแr<.วcc’kีt@ ส5;B“้อHtQ ขŠห๑ธ_‘ณใํ TำQ'kvผ„€.K(,ำU/B5PX>็™oกš่C๘fว B?อ๑ี€ะฌ-ื์x;}‹้€nง๏ว3; B?คฺข@gPยๅ๘ซฎ„Œ ;ไ๚฿_ช(!ฃ\๎4„P„P„ะา]๐<ณใํ ดN$kำ&ฌ E๕,„š2\ฦ‹็pอŽทƒP}’่์x;ีt@A†‰gvผ„~ฆำๅ™(ีj@yfวA(ัk: <ณใญ ๔ฃ\า\@/Zฅ:œ๎2‰h‡ŠŠ๊Bก์šด.xีด๘๐alLB๕,„€ฮะe,E T•ajXา„(†่ชb฿(Nžู๑vเ๘™ฆบ˜ovผU,hbjๅB๕ส3;Ba ~LBb: n!๔c  MซU/Pป่บใ€œฬ2 hกกก๎ Tอ€๊TB@๋@Šย๎x„PฯB่-†IH"jะ@A่ืLT฿„ไBีIH…ฅe˜@(hnC`อ„jฃ8ฟะ5!น…ะ7: \ณใญ ิฐ?Kย5ขส„t "B(B(BจkMPX‚7tมปP#€"„"„zBฉ: ™e‡@่~ „~mก๊BณเB่ฟK๔O๊ฟ;…ะฟๅ”Pทช้€ปเB่ะ„Ÿ้w๊3%]๓/5ิ€ŽFœCEEu กj’…จS5P„P„PฏA(่zYฆ2LN!2 KLT6„j๚แ<พู๑vเ]๐ j&รไB บ๚+N ๔o†%x๕จI,' ]๐ญ}*+zQA(ะ‘@ฆ!!!TBบoื์x;ีื€ขX=BจW!ิ@B่>‹ จl…ะ56B๔N b}ฑˆovผ•ร”ง@ว4”๚ ผ๚ฉ €:…Pจc N tRL#€!„–ะQt‡ฤ2 ธŠŠ๊Bkะ;~;uก5rs-!!ิ+:ฐfsuง=/„ŽŽnซvมฟl ฒ tiกoูใy|ณใํ|E‘/๚ล"พู๑VK๘ k๙j@€†ฐ๋„.ojบ๏B‡ืจGfฦ$˜Žโtก็3ณHอเะ@hฅ‡P–ี(ส0!„"„"„บƒPศ€†gfฺ(/„&-]JjY(B(Bจ 2 #ขฺXŠว;ะ=ญ๛Yu2ศหล๓ธfวป…ะ๏–๙&!i]๐" MH D_$ค๒ –เ บ] \ K๐0 ้‹r&!๑@ู่คWตx5‹ษ3;๊๑J ัฯ `UZPช€ฏ กกกย 2L‘ลลj#ฯ์x;M^ตŠDw๎ฌึโ์x„PฏB่ํฉรษ๘ุ$๎ู๑v MH“โRศŽ–|ณใญ ด nฉ/s9)ovผ„~ฦบเyF€ฺA(hพ5€– กZ ่ช:อธfวA่[@งF7Tล่yfว[A่วฅu@gฐ*ฅDSu%d!ฟ‚h˜2ั !!!ิ5„j: –<ณใํ Tซ…ฑžผณใBB/4„FfมŸหŸ& ก๛ี&ค|uฌ'ฯ์x+=”:„D„Tฑ„PM๔ใ๙|เhกš($ฯ์x+…ื็5๔ื€šจ-„๊k@yfวA่yM4ฟkvผ„t@g๘š_้ ิ ร„MHกกก๎!Tฏส3;B๕]๐<ณใBB+Bo๕๋€ฮแšoก€Bฯ์x+=˜zฉช‚)„u@E ิจ๊Bu๚'ะP‹๋š)„e˜D `t@ตPทะ‹j9u@BBBฅAจQTB5-b2Lกก^„PmผV*กzๅ™oก ‘a*€~Cc#„ช2LKห0น…P3P7jะ6j กf: n!๔ “.x7 ะ‹BQ!!!T„B4ถ[ทRMHn!j@a พHงŠŠ๊5ิwมป…P#€บ…PX‚ืhw๊๋!–เ@?˜ห7;Baก๊BฟZ\J†้๊rด „-ม‘—ฯ5;BA† €ำ(้ไB@”ศ0อพ€ืJก, ]๐ฟb!!!TBU5‘arก*€๖๋WFˆ!!ิK๊ะ,๒œA†ษ „Z ั;…ะร@ูผ ŠBฟe: ฮๅ›oกฐัuกj|#?€๎เะRZ’อ็šoก็YิLSิ „€&–ะ „ะ2P@ร”ฑˆiกกกฎ!ิ {๗rอŽทƒP?€šศ0!„"„zBo5ษ€บ…PUˆt@Md˜œ@่ไมคfhUŸoฉwำ]T ฿ฒน•ฝ…PU๔ พู๑V๚เH…6๒wม_mั„d กofd‘…ต“ฯMิ)„พ•žญ๊€Z ั๓B่๑ฤ$}tŽrแe†< กบ ่/@ว)(ร„ŠŠ๊BCkี"1†%xทฺjฮŸฝ!ŠŠ๊%mPต–ชi%D๏Bื7ํคNBฒาๅ…ะQ๑ํี.xzพžzร5เแะ`…\‘็๋‚็™oก๛z๛`ึNˆžBญ&Qฅ4ิมuํ…H๚ท˜Aกั @@่&mศดh{!zืด5‰ ซH๕4„2ฆŸY๔Rะููdมw {๋+ศŒ[o•ซํชUd]w ว™{วคลฒeRถiฺ-ทvซWK‰5žQ๚๚๕RbMบ้&า‘ศˆuูdภฝยqๆ~;  %UbbLgม;…ะFฃGซฯณข็ะไ… IPp0‘<ฟ`งฐ฿าnˆ”8เ75๏Gž›ทC8ฤธฉE)tnย:rGฺH)ฑฮŒXN๎๋0UJฌ‡zฯ#๗] %ภฉม‹คฤ !อชวุ ั๓B่๔๘tryIH<z$m(ฉแะฉw2น<ค๗็๒อŽท๓g')dBj๙B๔ๅA่WKMHnT…ะ( กe็rอŽท๓ใญซำพ.Gˆพ<ดD๔๗ P BŠRย>ุฉไU๗ ˆิPช@๔WะKBญงO {๒ฺตd๕ƒJ‰•ฑaƒš‘ณ้ไI’DมQฦ6ญzเ’นqฃ”X‹Ž'…[ทJ‰ตไพ๛H Ž2bM=vLdั8›y„„Pๅo Ž๔†$ฌ];๎ู๑VงCœ5ิ ธยไญญ7 ๛™ฃฅฤ =ฟ๙แ8็7_OnฅฑdlำK‹vซ&#ึ3ณถRpœ/%ึูQ+ษccWK‰ู์'&ญ— ”wvผsดํO:VฉOๅœo๕8ิ€ฒ%๘XจblLr กผB!)€ฦ7;n’Nt‡ƒ%xหฦ$ท K๐0 ้\Rื์xฝะMH–™ะH%์}ฏ@hu%dH๔;[‚วP\Žวๅx\Žฟpณใํ T๋‚O9RBำWฎ$a5kjY\Žวๅ๘ oก ‹ๆซฒNง ๔PIิ€vตนCจ6 ้แQ|ณใญ Tืงห จ4…&คY1 ไฆๆmนfว[A่G@[–่\๓=ตOthOคt""B(B(Bจ' TPฆส3; B@ซิชะ›[„P„P/B่D€B<ฯ์x+=TZด[9ื!…&ค•L†‰gvผ„ยผ$†ะ7u]๐<ณใญ „่[—่€ฮ๖ศ5฿3สj@ม (B(B(Bจง ิจ๊Bu ล๎Pgิ!!ิ‹ชPžู๑Vjขช B5Pญ ษ-„Bดภ™hภ ๔Mƒ “[5่€ฮ๑ะ5฿ชืEEEE๕ „ฆฌ^]Fิ „B จ@กะฝB(Bจื ิLˆ „Ijฆ๕๋€^ม7; Bกิ ร*แบๆ Bฯงgซฃ8๕: n ิ Z‘MHž„Pƒ่8ฤ0„P„P„PO@จ•จSMง หj@ัจ‚Š๊55“ar กGำ†Z้€J‡P+Pง ZT ;%จ+};ร\ิ)„~X6๊ต.๏ …PิECE๕$„ZMBr กkึh]๐ ๗1ึp’CE๕ „๎O์ฏNB2ำuก ,jฆ*B?[hญ๊Bฟ*•  Ž!๔  ำขš๊€:ะssตPฏh…B(kBBP4„P„PoAจูผฬษั2 ฟ15B(Bจ' ๔:ึo%Dฯ กc๋คh๚“ uก๙\Vhญส ก+‹J่5’ิ„พKtjTC๒•…(/„ŽŠฌGZ•4!อ๕0`ฉ:กB)€fMH ใ@ัBB=กญFq:ะ”%KHP˜ษh‹ใ!!ดย!ดWxcD_ฑข็ะฝํ’ฐ `ทPGบดภ็_.ๆ›o'DŸ็oB‚ hHฎk\ 3ๅงG'X(/„-7—ิ ๕b’'2ก @รP4„P„POAh๒ส•$";฿๏BuMHฟZd@BB=กP:>6ษ@y ๔HIw ’+!๚แI๖สก %2Lศ€rC(ัร$ค/mFq๒@จn 2 ๓*ม5‚Bh %dhp‰xD.4„P„PO@(,มรR|lทn\ณใญ TะศHณ&$„P„POA่umูu3ธgว[A่แ…&ค‚ื[ีj@gd๒อŽท‚ะฏหhXฏkถ ]๐sb‘Uu›qอŽท‚PขOฌ\zA!T_ZM ™‚ธ…†Š๊ Uk@๛๗'๙{๖pอŽท‚P€Nเจ3BEญ๘|2†„)ญฺSย5ภBต.๘ฯฏเ›oกe5 ปj ก*€ฦ6"_ไpอŽท‚P่‚oก-มฯซDื Rส„่าี€ขก!„"„V<„๊›xfว[A(ศ0้&!๑(B(Bh…@จ6Š๓ๅ๓ธfว[A(tม๋2 =%]L!t@?ฟ‚ovผ„P†ษ1„‚ ่€~มd˜Bจ €U2 h&”่ @' fก!„"„zB]๐n!d˜t5 \BB/(„€.Qปเ็rอŽท‚ะ๋ำ†‘Z>ะ๏$,มB่gบ (ฯ์x+ฆโฦํ‚็†Paš“PJˆ „๊–เ5 ฎdื€B([‚ื(vมฃ!„"„V<„jชoBrกkืj2Lฅ๎T๊!!๔‚Aจ?สิ-„^_Zด›ไk@)…&ค•uกฐ_AZB฿e: ฦ&$ง๚ ะ6ฅk@ƒ+แ5?`Jtˆn ๔าณp๊ฏQOCE๕„ฆฌYCโMt@Bจ@แ"เF๊!!๔‚@จVjิu กพ จ(7„j: fB๔N ๔›%ฅ–เ/4€–‚PMิLˆ „-/Oฏ:ฏV@jBu: Eฝd-‚๚งิณBBฝก จะx !z'ฺv๎\ฝ—_„P„ะ€Cจ f2LN tk›žฺผˆ(„~2฿^”B๖WHA้&ค ธช๚LJบช๚o &^=ึฒ-iQฝบv๎™[ษAAz&ิ ŠMH—ฆีข*ปI๛Bฑึ๊FEฝpZซV™%x7ฺh่P\ตชV*"vŒŠPญVรVˆžBaR5Ÿบจhนฌ…y๖: <๚ไ…ิญแ—aฺUP?„ึ !ฃ”ชuกwตI"Q!ก•M†้‚A(ะห‚|็eิฝด 2฿ จzW๊ีธ!4!=Œฝzaoฒx1v๐ ”Xอ—.%cŽŽ3๊ศาhั")4„S‹eหคฤฐw/I\นRJฌA๛๗“ฌฅฤ๊ถsงz3!g4…ฮ`zจGŠ่1ม3;Vˆ'รได ษB‡$ๆ’ววฎ๖›š๗“๚„žไฑัซ„ใ@ ˆ%c›N\HŽ%–๋A ์wfŽ–๋^ ู๗u˜*%ึ้ฃศ]gJ‰LZEฤฉ]๐<ณใญ\ํ‚•o ก!A ๙วๅ|ณใญ›ฅ~&ะJเe˜l!ภ๑ใœ\ฎู๑vB๔บะ๙ ,จฅT๙๐ ’.จ†Re˜nไฐKย]-วงu่@?ผฐ]s๎œ”X๙[ถƒฯ='็ภ3ฯŒ คlำี?N ทn•kำฉS*๐ษˆuๅ้ำdไแรRb]q8™{วยqฎ{๚iส=;@A†ฉDtข„:#B'u๋O>:|ฟฐ/˜(%๘i#ษ‡๏Ž๓มu๗ชฑdlำ๙อ7จ€&#ึ++๖‘3ร—K‰ูหgfm•๋ัห“็\-%V(ฝ๑โo Qa฿y€ษU•oหn*)$ ิ@ˆ€@H (1„€z‘*ˆ4)กˆ„ชข‚ Š„"จ(UฅW‰„* ๐G@zฏ๎ผ9฿uฯฬ๎ฮ๎ฮฮฬฮ~ฯ๓>ปsหนw๎=s๏{พำ๚e๔+exq=w|s๋^k์„๔฿*0 ซŽ/f๎๘ๆึฉ˜8 “ ่!5d˜ะAึ๋ษู6นo{Q'ค:๋๑žชแ1 ะ!Ju<ี๑•œ;พ9บ`ฆาPชใฉŽฏŠนใ›3ก๊„”Lล9ฅL/Ž™P๕‚฿จฑ|5ะ›ะฤ€~์ะZ๊dำแŽI >†‰™Š ญ-ช^๐ษ8 {”๐%€ ล„Vฅ h@฿*ฃํ ]`@Whb@{Vษหฐ&t^Sชh]…ต ๕6 ™ำ่˜PLh-™ะqณfฅใ€~ญฤ9L(&ด๊Lhb@฿ฑาD฿i&๔ต…# =ซ่eุ.๚l0 ซ5ถญEฺ!๊ฝเ฿ร€BณYฤl^ะ๗ƒFaB1ก]ฮ„ส€z๔“N0 ˜PLhี™ะฤ€๊ๅๅ ผ8ฺlBี )ˆงUR฿!๚„ ฃฃ4่ะ6Xํ2ก‹XVษ8 PhŽ]ƒ~ด&ฺฅLจฺ€vp zL(&ดK™ะั7F@ฟ\ก—F›Lจ" —kา ฉgพdBŸ tTดึ:!5gBิ&ิ ่GŸฅŠ ญธ ง^๐1šUมw˜PLhU˜ะร05ถฌ‚๏€ขMh2าgUl@dBŸm: ำ!เ฿ฆHh?ซ฿ฺวล€&Z{&tเธq้8 ฅ์„„ ล„Vก ฟa๓A#ำa˜ฆT๘P” =|… hฏ*~ฏeBพ|รjต9 SILจwBz฿ซเ1 € ล„ึ– ]ุ๋cณ™ิึจํŒ0ก˜ะŠšะsืถกง๕(็8 6กช‚ณDUฺ่“ ?3Z5ก†i™}2zh7z็eBX ๏L(&ด๖Lhnะr5tว„bB+fBgWfะ™P ร4y…ช๋„ดjะ๗‚^ ฺฐญ&๔้…‡a๊NlZm*Zg=ฒ™๖ฤ&&ZS&tม8 U๐{•๑%€ ล„Vฤ„^ฐ๖๖ ƒ+3hปMจ" 7ะณฌฒm@๛{auŽŸฯฃ’า๚L0 cซเญ๋f๏#ก> ำ‚q@๛X~F/xภ„bBkษ„ฎำ8 ำGeŒ€bB1ก3กั€.hช—๛ๆU๖(hBs๔ฬ F@W๓จ็k^hฝ,h“VžM่ า6 ั€ถhBฝ|6^†L(&ด–L่:Mวฃ9L(&ดฌ&41 tณ*|,dB฿˜ูค ฌ Pอi>#่F?‡วƒf-^ไ ™ะ‚]ตqะoucƒUะ„บeX0ก˜ะš3กษTœ2 ปW(ฟw z&ด*Lh…gBสณzะ๐ึLh2=Zฮ*๘q;>ฝnฑญx๕l+MLh2}w๋„ิœ mา&4ะiม€~dั`B1กตcB“NHyดR ล„–ล„ส€zดาm@•็/๕เ^-™ะดะA๛๏Qฯว<๊นXาŸ ?~|ฤ# &‘ะ`@ง๚Aภ„bBkว„.0 ƒg*=&ฺ้&๔Fช^๐S+”ื— :อืelถ:ลรสn@ว๙๙ฝำมจgณ&t๎:๋dฝเ?ล€.lB‡Z๏‚} ˜PLhMšะ^คtoซ|;#L(&ดSM่}Tฺ€ณุ‘Gฦ๎.k}็k๋๋ฌa2e1 ƒ<๊๙@.๊9คฤว™3ฐพพadใTœ‡m›˜ะ>V๗n}cะฝน,P1บฮคI ฟพ๕ึkQG5โ๚๋K’ึคcmธ(ขŽฆ3๛ฆ›ึ;๒ศ’œำฯฎปฎaฃใŽ+IZ?ผ๒ส†)'T’ดฮธๆš†‚9.EZว\|qรaณgw8 oผฑกgฯžYว†j1 3ก;O™pฏฏ้ฐฮูhื’ค#lm๎ป๐ชงs฿…W†ดf”ไœ๎๘ั๙ ณ7฿ง$ixยY ฟๅศ’คu๕'7๑SK’ึov8ฌแบ#X’ดzใใyพtq7ŸŠliฃiE๎wญŸsg๖‚ฯขž๊yถ?ฎฏE6ŒSVMLh0Ÿ๏๖ฐ แข4,oฏฬ–ฺตฝฺสF์฿‘Smm#๖+UZฅ<ฏmJ”ึๆถไ๎ำmฤืK‘ึิึ–ถิ>ฅHk [ziถ๔^ฅHk‚-~ภบ6xร6™ะบบบ†}๛vXuฝz5๔๎ำงบา i,Hซ็คด๊ซ4ญžฝ{—$ญ^!^ฅH+ฮ‚”ตปŠ^ LจฬB฿}:ฌ๕=K’ฮ‚ด๊๊ซ0ญ%๛Ž}z๕n่ำณWiา ้”,ญ๚^ ฮญiyž—ั*็LH<š(ใ๛ˆล^ๅm๙ฝ]m3่ข๕œ๋้฿็Ÿ”แšฬI hพณ‘ญm้‰ฃlภ'2 unD*•Bงaผ-ึฆฮ฿Eจคˆฬ๕AGUYbhะ)ิIบ!่eสห‹Xฌe‹CํึNรutะทKh@ำจ็[Ze+ขปง]ˆmน!ฺ๋เs๛[oยว‹*ฅ๊ญ๎โA6h1~iตIo(พ4฿๏YแsสขžU ๊ ˆŒฆขO[Œ~ช พO…ฯ)‹zช๊›š*€ฎชุีฮ๓ษ W|๖ซเ๙ ถๅ|ุšF=๛sซj›ฉŽ=๊Qฎvึ‹V๐\ฒจง†๕ษขžŸ็ภไ ]ญ๒ีsะ๕ะ<+ZๅBS๓yฏลฮ=2ŸC*t:ฎขœ๊uŸF=แ@ฦ!#j+ฆ฿หpI Hึwƒ1KQ๗โ‹C›)าธdฮA=ส'Zcิ๓ un4‡ช๊4dหผ ฯ‚h1ขย0%ะS„~ŽKQ1ึ๓฿๋วAYŒL—›แ›>eD= ิน๙ิKMำ๐=แๆ”แR ปธ้ฮฅ(;ซYœ3Sปr…ž—น~bิs,ท:Š^j฿๓—K6p๔j\H8ะM(ํ‰หว [ฤ ‹ๅ๎เฃj~E=ŸถฆQฯ~(5ฌqXUี฿hqุ—z.Mท็;วy„ฮgูฤ|๊78ฎŒวฮG=_๒๊ส(Eคกฺ)"ฒ8—ฅrzะ \†Ne˜พ‚๎ ฺจŒว^สใ๓rะจKอ zล_Ž๊มธู‡เาณธ›OEš็M+ำqำจง ›๖๓ษ-€jBmน‡izภ^\šnมUAwpJŠšฟ(๚๘ฆ|พส1Jลา~gฌiิ“฿2T=้ฌ(/ZŒžŒเฒิ4ท]อe( ฮH#Qhผวฝ0WืษวฬG=ณ฿ํŠ่Šdcฮท8p๖eขƒฺCี.เ2tตฏื เ|ฟณg แฟัgจ'ิ ๊=?อ_ps”ก\j็ƒ~ฬeh2šŠvjจฃ—๖้ไ฿c๕?‹QฯธPหhFŸXœฯ๚ y.K—G๗๓x.C›PธขŽOวอggฬ–ฑฦš‰4ู๊“[ Mชh่ใฦ๔ ]Uช3ฺA\ŠขQ^Ÿ๔ถล(ไ N:N๕ิŒJ/๘๑JU๐ฦšญ;ฐฆลiaSf๘r€ฮfำ ษ\่ ๊r/IE…˜ดkฑ„›ะ]นE™ฯฟ[Œห ้คใdณœฝไฟซฮŠzjD„Iึš็ปAG็–]t —สภ๏‚Žไ2@g“ฝ8_3ฆํJŒvบ—ขYึบลb=ๅ๋%;แฝhfmฏŸ๗฿ำr๘ฝ.ท …ฺๆผ }rห~๔K. ” HสF:=่้Aซ๑nB7เR,„ช0ีิไc7Ÿ1Tู(7›๊ิ๔i™/Š„\๓hœฺurหฆzA‡&Tะ™ ๖ย;รลAEHงU๏aฆญ>ฆธ %jศklbขฟฅž[=›"‹z>็Ftู2ฯuƒžฐฮว*‡:ดฝ^ Pำ—oฮ%‚Nd?‹ํ็* ำƒV/;บ ]ŠKฑ ด~ถš]ฃฤ้ฏโf๓k:๖n%ฃQšJ๔ซ๚šEี๎?jf!๑๗ๅ2A' (จjxถๅR@ตภ๔ ีว7^t็ัฒ‰๙Ttr\'ไ๙,๊๙O‹5KTษw— ึฐOC๙)ิcฝภณL3๋ีัํฏ^เ๊อๅ‚ข๔็ ๋ธPญคำƒb„h.Kู9ึbtบ;2ฬ๓พ]A•0ํัžถฦญ–จgsœtงu^o(?๊<ง!๔voe;๓‚nๆ๙ %BM๎ฎต8’ศ .T;Š1=hๅPว”ปแCR๑ฝ {J˜฿MNฃžOzVๅืCmBUe๛+m*ƒš:=ๅฬbะธฯZl#๚ญ Eธ„ะฮ็ศvว3พ*h1. tต œNzฟล^๖<;—_=ึMพ๋7…oZ์1,รXŠศไชnj_ ๚ะช;ู๊ปXฌ•8วb๛U่Zd‘}ตรฉ๛O๕็ฎ๖?อฬŠ/ฤ์๔ลi„๗6F]€.Žช2๓ำƒฎภe้ฎฐุFท–QAๆ`นชŠr7๋x๐4๊ฉ6ตOธมํ๊m+%>มโ@๙ท่ฟGจNิ๖N=g[ฃ๙็ึฑN†=<ฝK-ึ<ใU๓จ6ฆด฿ด0ฦŸ ืXlR7ื?๗ใ๒@-ก๖$Іชš0›/{ฅฌ’"ฃ๑ง~A+จนมณGgาPVูค ]9๊YŒษžๆๆFืNm[ีัเt‹ูถฒ8ถ์*Fปฏrะ?hคล‰๖ฐฉผ-่/DชตD'SัQ5ู๙›› 5™zฤอ‡‚š G‘/ฐฐฑลฆš`aˆ1๔WWe ลฉื ๚’@พใฯ‚{ฝpขv๔jGฌ™ธV็’AญShzPEsi:ฬƒAืุwRดFัNM๛ผฟ ๛tะฅQฯวญ๛y;ย ษแAฟฐุๆKนิm‹Mh^wฏ}{ฅ๖Œ๗นd~ฎท8อไ™ฃดปAYfฃณคฟ”ฟthะ‰~Nฟ๖gำ๕žGŠัพOชk’๕๗๘๗ฺ_๚YT๒ฝด™†rกุh/€โ&๔๗nF๐๓{ห#y}ไRi~Gด#4๊™ต+žHvจI๔›W~๕่Vภ€ชMนฦ้ฐ0๋zP{า›ูๆ›nVO0ฦด€ ำƒ~ศส„N๊็ชNBู่—นn ๊ซจ็ค`ขฯดอ๊ศ$›DUซถŸgCพAห…UอซvmTnIวำฅ ญ’M๚ฌ1=hฦฆnศฦV๑9ฎ่…ต๗UtvBิN๋-m~ๅŸใ‚jtd„โPO{บ‘5ีัh ๊ผถ—ฺำƒ6ฒฝ›ะUxnห%ๆ๓FkœๆYิ๓A#๊ (ŸฟๆRM`ภ ะ–๗†žClฐ็ฃ-น,ะบ๛๔ ๛นIซฆqณ๙ฏีC]mzาY$‹zชV๕dkHนศ๓ี\ h#*ดจY—& ธ™หฅขปNช)ื>ช’sYอง ค†9)ถฉฤ`ฟw[ำจgฒ5`ช็“}ธะ์๙g—JMw›T๓Oฟ\แsเืXSฤฉ๓Œ"อฺแ์MŸแธ 5ิฆOอpจЇ๖ ™Œ>ค ฆหซ๕้A54อ:ถฺเ์&Xsฑ๏fญOง่๗uณšF=ฺย`ฃG<ด๕–_”หๅ"TฦGํษญ๋Y฿ํซพหIo7Ž๊๒ฌ฿ฺตLฃžo๘k5 ปMชา_ฐ8;ำะ.}ิธบ2๓gAฯ}Zž<`ธลj๚งŒจ'ภ–rƒ$#šMฺ{akบสKส|L™๖ๆfงIไส่ฟn1๊9–,ะHWŸT๓#]็ฑค›๚งญiิณY  e˜ดmไฃžšNใดŽแาดฆ-Žญ,ŽษzCะถฃสme‚Uw“‘ผ ไงU๛K:ึ4าหฮ…ะPlฏปžณ8’Gก&๒qruย;ท*…J้ฟด8=่ซ eฐc€ๆ‘๑œๅ…ี๑A;bqrŠu“ํด~้*šŠxUn'T ’}จล*่t`vRupjnธง ƒึk็1'บ ๏ZDTแ/AฃนP๋yกK#Hh†ฎฏZ๋“ts‚&'็๔H‰ำ_ม่Hื&๔ภ2ๅM97Yถ•?#ณ๕jต\ะQ์;ฤ—๋(่Wถp๕xิำs๋๗๒`ย๎็Ž?ีอeF/฿ๆ  ƒŽทุแRฌ้๛ฅltFะ9A{Xำฑ’7 šโฯ๎๓‚ฮ Z+ทฟถ๙นลัM~ด6ฺู‹๓‚Ž+ฐNี๗o๙ƒช&Tฯ?ตฒองนH@!๔ยภ_”…แ„ ƒฎชภนจณเWQn6J‰~ฃ_ไ–wบ 57ˆ n*EZ฿หื=่…๚5+™A๗๎หี2ๆWณว=ๅ†๕›๔“ฌ,่Q7˜ๆF8›y๋๛^ฐูว ็ƒ&๙บ|uผฬช:ฐฮ๒๏7฿ i2—šษ๎V?๗ฌ1ฺซ|šE‡ฟด?ู:ย๑ฬO'ธKะ;็Xฯ(fฺธึrgŸEšYŸšะ~VxธฉๆLh?k~:D•๔s›ป Šจk นๅส๙(”๒๖-ไ>็z^`ฯ[ส๏ใฏจ๚\bTRšฯ‹‹ธtnaฅf๒๑๒#R๙‹Aง๖าฯg@nP~…"ฅƒ ้๛ัDง8บšอZ0กG$โหาฮKŠฆž่+฿จ้ฒษ๚qn๛'&4SYPีึ?ซR{ี}›๙ฉ U{%่ษz๛ฯS,๚L๎๙˜งcnข/!{@)YD“rหo๒’ฐy)]—Œฉ"Miีๆ฿ผ๔~ฟ?tUb฿ยโ”ฃ;๛ร}๐ชด=2gB๕€žํวQ$๋นb„ชw๛“žฆฮ๋๔ฤผ๊…|ฅง๓–—่ๆvื<O๒mKภอ๊?<\lMซ&๗๗|๘คฝ[rRFเป#Cฯ}ูbค๓Ÿ'“ะ2 ๖RbB7๔|šกYผ.ท8๏ำ~ผkศf†๖O๏!_หฤจ^็ฟ} |นชkoทุฑๆฯiต์nพ์A7๏$ฆš7กŸ๗็ฤL่’ํง๛=Hั3ํผฤ$*ฬLt”฿ำ5˜7™j›บฑ?ห๓ง๒6Ia(oBW๔๓[>—–๒ื‰ อ›ฬซ-ึ$˜็๑=XฐทUw{X่Bจ'ฏZ0ฆำ“‡ส#ง_Kถฆ“ลฉ(หWEœฑพGzฒŽ๗—œ ฤอlฝิgม\ศ„ฎ๏ๆaบฟ”—๑u‰P•ัœไผณฆี`P{๔๔cฆ๙ตN9+)$แŒ™ดlbBwหฅก‚Oี฿ฤ๓าY์€:ก€ ใ็ทD.ญว=h™ะ๓r๋ฏ:)๙ฌtN๔฿รวึ4ฒ ะ.v๒าtVtœGtาชมมB็ช‹rฒSriๆMh๖ ํiฬ๐p}bBŸO>›?เูŒ U[ชูน๔m} ๙wƒƒขฑ•ํ.,๑QฝรW;า๛r๋e^ฮ™ะํ’ฯCุซ%ห†๘v-™ะksว๙ณ5m(#ชฺ่6›นอbถ9:ะ9ัŸ้Žฤ0(jชˆ d™ขMh?ž’,๋จ ยŸปƒZ8—bLhšW.๗7ก‹x!ณd๛ลผ0ถiLhŠฺฝฮ'ป@Gั๖ ุษx>•ผ{๘หRU*j๚/‹=ํS๚ตVLจข7j๗ฐงqง?ด‡&&๔ๆ\{iปGช๊FEB_O๔ถ5V ๓ใ(ข๛ —ๅVื4už'Žheป› ผX๗L๒ซ^ฦฟฯญŸ่๙ตobBว'๋ื๔๕๙๖•oดbB๓M.OŒอ`/T=ไ.U๏ซ๙ภ9-˜ะ5<ๆนูL•]๗ษ9*3r@5›ะณ่)๚๙m‹อ2T33ฒ„&TMิ\โz‹ํFUXXหŸ…ลšะ™ึุ&Y5M๗'๛็;&ฉเ๑จ็‘ๅI ญ™PตทV•โ|Umึญd(?๗R†n๖ฒ™๕ะyี<งนมKM่.ญ˜ะไ"TูK{XbB๓+O๗ฌ€ ฝฦฯฃ5Tuถo๒ฐ‡ฺF๗๘ฺVถ๙ลถ•)Š~ฮ๑gYŒ8ๆ๓๓›ษg™ะt๘ฒถp›ปพn๔Z2กฟjม„์ฟ‰ิ ^ูŠ ]ยฯใsE\ซกnTtƒๅŸ๛\สŠš๏S €ก็O]๎้JึซyาŸs๛ฬฬ™ฬฬฬฉเฎ?KฯHึซ045—ฦ-I๛ฑิๆ]5Iš‚ธO’g/N๖[ฤ๓<ฎ^™3ีjš)Emง๗O jx~ล๗_ž์ฅ`]a”3k*…งU๓}<*ะฺวำN;Zภ„~’{(ชสชfL่LL ฬทW๎oฦ๖†ฺfข5ถ M›“จทฝ%œๅฝž‡ณˆฯžว๘็zMพชc)สtbฒlkฝMhK&๔X‹ห ต้|?gBe(vศฅ๑ฐง›šWีv ๒e๙^๖๊a}$Y*ษ#าœ‘,แ‘’หผ”ฌาk{$T‘ห๘‹Uใ‡ต€ }ยฯAฝ1ญRี๚อ˜PฝTU%ฏjฦ๏๙>WZใุคzQ_ํ้ฮ๒s9•[-P[MUYชฝšz๒ช๚ญฤษTเ…EŠIškMๆ๓฿ฅฃ?ฯ็ Hy*&๛qไfR๙๓ี˜PUฏช:ฯื* š3กว๙๏Sอ[ฒถขc=ฟ๋ท4{ฏนนึ๏F=œก;ืำV๕๓2dจ$๊Uฉช๋>นๅ#<*Rค๖dj๗45๗า•G‚vJ>๗๕ดe]์Ÿณ6vjืคูึtถ™ ตปR„๋DO7k6 jขฝ….บ ทท[1ภอŸ๒˜fถษ'ฃศฅFXะ๐4ชฮ.4.ญ๒น†6šb มน‰๎Pขˆซ:ศmํฟ#ฯ,๒จ<ฟqฒญชฬWหํฟ†5Ž"!†{ogOG๛ฌZเ˜Šฏ•๛ฝ}ัๆLฆ –๘ต™–Z@รiMif]6่9ั0cKTมuัะdรศƒฆฟ<%ทlฒลI>tคˆtdX5้มคž&-๘R\฿<›์Pz4ฑๆqฯOoชYณ4๙E_7ญˆดต83kฤ„jา†ท-N! %Dณร1ทLณณ)๚นYำส›PอVu„ล*mZEwwc›2โดชง-Wภ„ :ภทั,tc“u;๚)๛ไ๖ืฬ]฿๗5kXฺผ`จ_Mi{vะน๓ปฤ๗€r—ล<2eปlฏ ีTฑŸt\ะัn0H๖9(่M‹U฿‡=๔~b"Uลhะ A_:ีำ˜เ๋ว๛๖๋'๔ekœบ๖ะ W๘๛=tQr‡‚ฎฺีฯ๋27แ_๗๋%ไฝ -rหึp#๗DะํA“u๊ภดVLhƒ5JํฆT๔ตุ†t๛dด>วŸ3†฿๚s๒Y&z~ะF›l๎ห‡}`M› ๓๏<&h1?ึฒ-\Ÿ ƒิ‹ฌP๚บ ›˜,SUดขƒงธฉ{ะอA|ใA[ถม„~–3pอ๓ว๘๖‹&๋{๛>™ ฝ2่Ž ™‰ฮ ๚w๎œฏ๕พ—3Ÿน๑M๗ษb5พ๖ปืฯ็ฟ๕น๏ดบŸใpฒ @i ๛ศ#‡b7]ซ๛gE ็ZŒ<ฎ๔‹Uไลšะs|ีbฏ{ฑฎล(cŸ9}˜˜ะ›]3s๚Vฒฺx^๏iš,Ÿๆ฿ofญ้ศ\๏tนลj๛r฿o‚งEศ.ฅใ‹m%-1eŸMM–ฉ๓ŽฺEช๛ภาjซ •มฬอhFอL่Yn0[bฆง9ไ:พ|%Okี"ฏ…ฦK}%h‡dูNซ๚ „hฯ sหิK9‹mEีมGฝหj1By•5฿>ฒญ&T\twะH‹=ใopœ™ะฑn,gY์m/C<ูb๔2;†ึ๗ฯ๊€๔”5F3ฏฑุฆtผ/mฑ”ฺ†*๊{pะŠnพ'บัŸœ฿™ฦXก]ศ5ใD.@U๒y‹ฝอ๛&หT= ฝcฑŠz?7ช?ช™ด๘ถYdSœๆไถ‘กM‡„R๏{๕ฤWyตอ9่Vk‚ลhจข”oYlวนป›a๕l?(ูถฮํLฌj๔Xlห*ณชจๆEn˜ีฮ๓O—~เ์“คฅ๖ก/Zว฿€ ฐ•—ŠWใRT-ส™/ˆlgฑSTWเวAL(@—a”5ป"ช๒_ฝ‹œซšK\ร-ภ„`B0ก˜PL(`B0ก˜PL(`B0ก˜PL(`BŠ0กZœE!ิ}ค—โกฒ่ML(@S4ํฺeกบž%Bจuถj‡ฏXใธš๗]๔ƒ uฺ‘ึพACyฃŽๅ–ิ>ปฝดIะิ o]๔qะwN‹CŒk็yœ๔rะ‚zs[j฿„พS`๙t7•_NLๆฺAอ ฺ9จoฒพฬ A|๙พํ๑พ๏จวชz!h— —‚ถษญด~ะrA‡ํ4ฯq@ะ>A'๘ถรrว[%Ig๙ iŽฟAะZd€ส›P๑PะฏลƒtVะ)As|}_]7ก—mฑj^\๔ “ƒ.zbิ5eŠล)๚tmnษ~ฌวƒ~tLะฆž–ฮใข s}[Eq—;ฆพnDะ'Ac’ดeค_uC U`Be๏๐{ธ2z=ดSฒพPu|]๎ณ"™ทๆ–>1‘k}ดtฮ„~`1’™ฑฉoF+ว๛Vะํษg™ไŸ$Ÿ5}มฟT uะษgU‹+โฉ่ฆๆข.่ฤVL่@‹ีโ็๘>2„/&๋a0hRฒLQฯฃr&๔๖\บ2กŸ0:7“ใfฑญi†šผn1๊jž๎ d€๊1ก บฤ‚o7+hk‹U๊2p?lม„ชญ็ฝ{+โธ™ล6ฃo&๔™ลh่eฎ=a‘W™ะ+ ˜ะืrหU๕๕—๊xGๆพŸาTณ‚‚>็FvYฒ@u˜ะufีง]šๆกœ ิ๗หXล้€dู9:7่j7ง™ŽqsธAM่J~ผAษฒ |?Uั฿๔ฃ ?’*cB฿ 4ฺb„๓่ 7,F'ณ6–Z๖จผn&?MLจx&่ˆ ล‚๚[์ม™5v ํd&t-7Ÿ+p^ŠfžืF:ฬฯiJb‚็0ก:ฟ๗}๙d€๒ณล6’™TU}Uะึด#าขAwY์ คํTmฎ^้'%จ‡๙รng๛2™Rต๙|ู กขฯ๚:๕ฒฟฝ™๓ฺ1hพล๋฿ถุ5eใ ง ์wXr<Cƒž/ฐฮ]mZ๋ษี: jใ>|~ฉœฉํLฒใี5ณ^็กถฃวq; ฌ๔}‹M†r9 œiฑ)ม$.@ePc๏?p จํ๖ทŠูP=_แz@ Pgภว0ก€ L(&0ก€ ภ„& &๔ํ }ซX{อ@•U[m‚*ซ6‡Pืkm1ก !„B•HE™ะUEจZตะL„Pูurะ๗Beี้AgทAณh•Pj~๔บ๋๙ ƒฮ Zง้Œ :พฬ็พwะdn!@ืใ‚ [-ฮบ๑… mƒฮ๚$hฟ6ค๓e7ฒๅไฦ ฃธ…]ำ„^Y`๙1A-้Ÿณ8i€"ง็}=จWฒ๎ผ ๗ญq€๓ฯ๛บMƒN๓ใœดb๎8ฃ-ฬ|qะOƒถศญJะ‚~ด]ฒ|Jะฟ‚ฎ๗ใยญ่๚&tQ‹SมํแŸง]t€๋ ห‹0ก7XŒV๎eฑšํ ๅ}าฃง? ฺ-่๐ Ÿ$็pJะ๙฿ zา )& FMจx)่„fึ-๔iะrนุ๊๘ห‚N๔งอofป‚> Z=Y62่ใ ฅ3ี๑5hB_ตฦฮFฝ-F#ฏ ๚[ะ}Nnล„NฐAฝำ๗๙wะ%พNUฏอ :,hีdฟm‚ฑ…็17่K˜P€ฺ4กร,Vว๏ไŸีฎsฎล6šc-F%Uตพy &tฌosฐลŽOฺGี๖HถQT๓ˆ ,F>ฯ๗ๅ{zz3 hL(@mšะ3‚ โŸฺ=Yฟข›ิฬ„Nvร™r ลhสM9šฒžงฉ6ฃŠ ชบx ็~]ะฑB€ฎiB๏ฺฤbฯ๔ƒ‚๔^ะึษv บสM้Jฃ'&tY7;ZŒz ฺาฌฦUGงC-v^สL่ค ํ-Vห๗ณ8$”ชเ ช ๚ซลฉ:9 ดX]/ำูว๗‰ฏด&ท ๋0หb[MIm=ีๆ๓8k์p”ฑrะ‡mRฏ๔,๖|Ÿlฃ๐7{Z2ฐ=‚Nทุ๎SfTU๑๊šoฟAะ]Aoธ้•้œœค78่AO๙q็Y*JU๙ณƒ๎ฑ8ึ)ไ๘ภ”าผษ€ฎoIENDฎB`‚xarray-2025.12.0/doc/_static/index_api.svg000066400000000000000000000066771511464676000202370ustar00rootroot00000000000000 image/svg+xml xarray-2025.12.0/doc/_static/index_contribute.svg000066400000000000000000000047401511464676000216310ustar00rootroot00000000000000 image/svg+xml xarray-2025.12.0/doc/_static/index_getting_started.svg000066400000000000000000000076111511464676000226420ustar00rootroot00000000000000 image/svg+xml xarray-2025.12.0/doc/_static/index_user_guide.svg000066400000000000000000000144351511464676000216100ustar00rootroot00000000000000 image/svg+xml xarray-2025.12.0/doc/_static/logos/000077500000000000000000000000001511464676000166615ustar00rootroot00000000000000xarray-2025.12.0/doc/_static/logos/Xarray_Icon_Final.png000066400000000000000000000406061511464676000227240ustar00rootroot00000000000000‰PNG  IHDR ฤ ฤญ pHYs.#.#xฅ?v˜PLTEฏต!l‰I“ชk่่๎๔ใ€ฏต!l‰I“ชk่่๎๔ใ€ฏต!l‰I“ชk่่๎๔ใ€ฏต!l‰I“ช๎๔ใ€ฏตI“ชk่่๎๔ใ€ฏต!l‰I“ชk่่๎๔ใ€ฏตI“ช๎๔ใ€ฏต!l‰I“ชk่่๎๔ใ€ฏตI“ช๎๔ใ€ฏต!l‰I“ชk่่๎๔ใ€ฏต!l‰I“ช๎๔ใ€Eeฏต!l‰?‰ขI“ชJฯk่่‘ํ๑๎๔ใ€ฏต!l‰I“ชk่่๎๔ใ€ฏต!l‰k่่๎๔ใ€ฏต!l‰I“ชk่่๎๔ใ€Eeฏต!l‰$n‹&q)s+v‘.x“0{•3}—5€š8‚œ:„ž=‡ ?‰ขBŒคDŽฆG‘จI“ชJภะJฯK˜ฎMžฒOฃถTฎฝVณมZพษ^ศัcำูeุgเiใไk่่n่้t้๊x๊๋{๊์~๊ํ๋ํ„๋๎‡๋๏Š์๐์๐‘ํ๑”ํ๒—ํ๓š๎๓๎๔ใ€ซ”ำ{WtRNS 000000@@@@@PPPPP``````pppp€€€€€€ŸŸŸŸŸŸฏฏฏฏฏฟฟฟฟฟฟฟฟฟฟฯฯฯฯฯฯ฿฿฿฿฿๏๏๏๏๏๏z`>n?1IDATxฺ์Mo\๗yฦaูmฉ!PEฎ๋ะy\%ฑ‰4dฑbaฬtแzร•^yม็u–ถPฤ‹lโEเ…ณr h–…๓>œ9M€ย}A Œ้!็็บพภœนŸอ$ฅsใ›๚‘  ž[&hืั™  ˆgฯmๅผh ค[t6‡†โnjqh8@ร" h8@ฤกแ ˆ8 h8@ฤกแ ˆ84 แงแ ˆ8zํพ† ˆ8โ|h(bฅแ@ฤัPรช4ุ โะp@^รya*ˆ84 แ‡†4 โะp€†Dœ†4 โะp€†Dะp€ˆำp€†Dะp€ˆcซ๖5Tฑึp€ˆkจแ6€": ˆธ–๎ะPคแFk#"Nรqh8@ร" h8@ฤi8@ร" h8@ฤกแ ˆ8 h8@ฤกแ ˆ8ถ้4 โˆณwชแ Šฑ†D\; 7<11Yูqˆkธฅ งแ ˆ84 แ‡†4 โ4 แ‡†4 โะp€†Dœ†4 โะp€†D[uคแ@รˆธผ†{฿PฤBร"ฎก†;ณTiธฉ งแ ˆ84 แ‡†4 โ4 แ‡†4 โะp€†Dœ†4 โะp€†D[๕–†  โโ|`(โBร"ฎก†;ทฑšุqˆkธqg@ฤi8@ร" h8@ฤกแ ˆ8 h8@ฤกแ ˆ84 แงแ  โ4 แว6ีp แD\œS@Dร"ฎ†Š4hm@ฤi8@ร" h8@ฤกแ ˆ8 h8@ฤกแ ˆ84 แงแ  โ4 แ‡†4 โj{ฌแ  ˆธ†nxb(bขแงแ€ผ†[ฺqะp€ˆCรqh8@ร"Nร@ฤi8@ร" h8@ฤi8@รˆ8 h8@ฤฑUง4€ˆ‹s๔ฎ  ˆน†D\C wf(b1ณ โ4ืpS"Nรqh8@ร" h8@ฤi8@รˆ8 h8@ฤกแ ˆ8 h8งแ ˆ8ถ๊งชXj8@ฤตใเ#@+ ˆธ†๎Pฅแฦงแ  โ4 แ‡†4 โ4 แDœ†4 โะp€†Dœ†4€ˆำp€†D๔ช†  โโ์ฟc(ขำp€ˆkจแ‡F€" 7าp€ˆำp@^รญˆ8 h8งแ ˆ84 แงแ  โ4 แ‡†4 โ4 แDœ†4 โุฆฝง4€ˆ‹kธแF€"ฆq 5‰ ˆษา€ˆำp€†qะp€ˆCรqะp"Nรqh8@ร"Nร@ฤi8@ร" h8@ฤ๗@รA3 ˆธvูŠXฬmˆ8 ฤ5ิ€ˆำp€†qะp€ˆCรqะp"Nรqh8@ร"Nร@ฤi8@รˆธญบฏแ@รˆธ8ฺŠXi8@ฤ5ิp็6€* 7ถ โ4ืpงแ  โ4 แ‡†4 โ4 แDœ†4 โะp€†Dœ†4€ˆำp€†qดศPฤZร"ฎก†j่4 โ4ืpฃตงแ  โ4 แ‡†4 โ4 แDœ†4€ˆำp€†Dœ†4€ˆำp€†q๔ @ฤลู;ีpPลDร"ฎ†žช4… งแ€ธ†[ฺqะp"Nรqh8@ร"Nร@ฤi8@รˆ8 h8@ฤi8@รˆ8 h8ทUG4€ˆหkธ๗mE,4 โjธ3@•†›ฺqะp"Nร@ฤi8@ร"Nร@ฤi8@รˆ8 h8@ฤi8@รˆ8 h8ทUoi8ะp".ฮม6€".4 โjธs@ซ‰ งแ€ธ†wFDœ†4€ˆำp€†qะp€ˆำp€†qะp"Nรqะp"Nร@ฤmำ] @ฤลู?ตัM4 โฺiธมก HรึFDœ†4€ˆำp€†qะp€ˆำp€†qะp"Nรqะp"Nร@ฤi8@ร"ฎถฝวชะp€ˆkจแ†'F€"&qศkธฅ งแ  โ4 แDœ†4 โ4 แDœ†4€ˆำp€†Dœ†4€ˆำp€†q[uชแ@รˆธ8G๏:?1ืp€ˆkจแฮ\ŠXฬlˆ8 ฤ5ิ€ˆำp€†qะp"Nรqะp"Nร@ฤi8@ร"Nร@ฤi8@รˆธญ๚ฉ†ƒ*–qํ8๘ศษกˆ•†D\C w๎โPฅแฦงแ  โ4 แDœ†4 โ4 แDœ†4€ˆำp€†จq4€ˆำp€†qW๏U @ฤลูวกˆNร"ฎก†บ3iธ‘†Dœ†๒nm@ฤi8@รˆ8 h8งแ ˆ8 h8งแ  โ4 แ*Eœ†  โ4 แDี{ชแ@รˆธธ†พํธPฤTร"ฎก†;q[(bฒด โ4 แDœ†4€ˆำp€†h7โ4h8งแ  โ4 แDœ†  โ4 แDNh8ะp".ฯ‘†ƒ*fq 5™ƒB‹น งแ€ธ†›ฺqะp"Nร@ฤi8@ร4q4€ˆำp€†qะp"Nร† …ˆำp แD\ž๛4€ˆ‹s๐ก+Bซ™ š‰ธƒsG„* 7๎ŒะJฤi8ะp"Nร@ฤi8@รˆ8 €"Nร†qะp"Nร@ฤi8ะpดq4€ˆหณศ้ ˆต†h'โ๖‡N5t ˆำpPงแFk#ดq4€ˆำp€†qะp"Nร† …ˆำp แศ‹8 €ผˆำp แศ‹8 €ผˆำp แˆ‹ธฝก†ƒ*& ™ˆž8Tiธ ดq 5าญDœ† @^ฤi8ะpไEœ† @^ฤi8ะpไEœ† @^ฤi8ะpไEœ† @^ฤi8ะpF‘†ƒ*ฆ ˆ;z฿ ˆลยอDั™A•†›ฺ ™ˆำp แศ‹8 €ผˆำp แศ‹8 €ผˆำp แศ‹8 €ผˆำp แศ‹8 €ผˆ{Kร† .โ>p(โBรดq็๎Eฌ&6h&โ4ิiธqg€V"Nร† /โ4h8๒"Nร† /โ4h8๒"Nร† /โ4h8๒"Nร† /โ4h8๒"n_ร† .โ๖ŽEt ะLฤํŠ4hm€V"Nร† /โ4h8๒"Nร† /โ4h8๒"Nร† /โ4h8๒"Nร† /โ4h8๒"Nร† /โ๖k8จbฌแš‰ธฝแ‰ @“• Z‰8 …ni€V"Nร† /โ4h8๒"Nร† /โ4h8๒"Nร† /โ4h8๒"Nร† /โ4h8#๎Tร† .โŽ5;1ืpํDั™ีกˆลฬอDœ†ƒ: 7ต@3งแ@รq4yงแ@รq4yงแ@รq4yงแ@รq4s๋บ>่๘฿ Eต ฎ‹&qˆ8D€ˆ@ฤ โD" โqˆ8D€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqˆ8€ˆ@ฤˆ8D" โqˆ8€ˆ@ฤˆ8D"@ฤ โqˆ8€ˆ@ฤˆ8D"@ฤ โqˆ8€ˆ@ฤˆ8D"@ฤ โq"€ˆ@ฤˆ8D"@ฤ โq"€ˆqˆ8D"@ฤ โq"€ˆqˆ8D€ˆ@ฤ โq"€ˆqˆ8D€ˆ@ฤ โq"€ˆqˆ8D€ˆ@ฤ โD"€ˆqˆ8D€ˆ@ฤ โD" โqˆ8D€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqˆ8€ˆ@ฤˆ8D" โqˆ8€ˆ@ฤˆ8D"@ฤ โqˆ8€ˆ@ฤˆ8D"@ฤ โq"€ˆ@ฤˆ8D"@ฤ โq"€ˆ@ฤˆ8D"@ฤ โq"€ˆqˆ8D"@ฤ โq"€ˆqˆ8D€ˆ@ฤ โq"€ˆqˆ8D€ˆ@ฤ โD"€ˆqˆ8D€ˆ@ฤ โD" โL โqˆ8€ˆ@ฤˆ8D"@ฤ โqˆ8€ˆ@ฤˆ8D"@ฤ โq"€ˆ@ฤˆ8D"@ฤ โq"€ˆqˆ8D"@ฤ โq"€ˆqˆ8D€ˆ@ฤ โq"€ˆqˆ8D€ˆ@ฤ โq"€ˆqˆ8D€ˆ@ฤ โD"€ˆqˆ8D€ˆ@ฤ โD" โqˆ8D€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqˆ8€ˆ@ฤˆ8D"€฿L"€8?ทˆ84 โะp€ˆเ๚•†@œg_ฺDq ๗น @ฤ แ€†Dqqh8@ฤ แ แ€†D—๑ล๏l"€ธ†๛ไย โะp€ˆ@ร" ˆ8 ˆ84 โะp€ˆะp€ˆ@ร"€K๘ฝ†D@œฏ>ำp€ˆˆkธOฟ6 โ4 โะp€ˆ@ร"@ร" ˆ84 โ4 โะp€ˆเ2~ฃแ็ใ?ูqq ๗€ˆะp€ˆ@ร" ˆ8 ˆ84 โะp€ˆะp€ˆ@ร"€หxฆแืpŸqqh8@ฤ แ แ€†Dqqh8@ฤpฟีp€ˆˆ๓ลg6D@\ร}ra@ฤh8@ฤ แ€†D€†Dqh8@ฤh8 แภ%|๕k ˆ8€ธ†๛tb@ฤฤ5ืFD€†Dqh8@ฤh8@ฤ แ€†D€†q} แ็c ˆ8€ผ†{n@ฤh8@ฤ แ€†D€†qqh8@ฤh8 แภeJร" ฮณ/mˆ8€ธ†๛€ˆะp€ˆ@ร" ˆ8  โ4 โะp€ˆะp"@ร"€ห๘โw6D@\ร}ra@ฤh8 แ€†D€†qqh8@ฤh8 แภ%^ร" ฮWŸi8@ฤฤ5ง_q@ฤh8@ฤ แ แD€†Dq@ฤh8@ฤpฟัp€ˆˆ๓๑Ÿlˆ8€ธ†{n@ฤh8 แ€†D€†qqh8@ฤh8 แภe<ำp€ˆศkธฯmภฝ๔ฏ6H๖ยฟˆ8 Gษ†ž!นแ†wD€†CรกแD€†CรัPร‰8 ‡†#ฐแD€†Cรุp"@รกแl8 แะp6œˆะph8Nฤ\ึo๛ำpเŽr wใ–ี.e๕๗z๓,_ั=4ีฮOโ.ูpณUožeัน‡†ฃ\ร‰8€†›ธ‡†cทNvะp"@รกแ๘~_ลงŠ8 ‡†ใ{5n@Vฤh84y 'โ4Žภ†q G`ร‰8 วN=ะpNฤh8โŸู@ร‰8€ซือ๛ำp+ งแ(p"เ;5์ข? 7r Gแ†q฿ฉแ–=j8/าpTn8 แะp6œˆะph8Nฤh84 'โ4Žภ†q G`ร‰8 ‡†#ฐแD€†Cรุp"` GฯNฤl`ฎแะp๔ฌแDภ 7๏อฃt งแะp" ฎแF+๗ะph8 แะp„6œˆะph8Nฤh84 'โ4Žภ†q G`ร‰8 ‡†cฟ่Wร‰8 ‡†cฏ๏ู‰8€oณะph8พiธ'}{"๐-–3 ‡†ฃท 'โพญแฆy–ฑ†ำph8ืp“ ๗ะph8ืp ๗ˆ๗3 งแD€†#ฮู@ร‰8 G\ร@ร‰8 ‡†Cร‰8 ‡†#ฒแD€†Cรุp"@รกแl8๐ฟญzิMNรกแDภ† 7๋ฯห4œ†Cร‰8 ‡†ฃฉ†q=mธ…†ำph8ืp๗ะph8 แะpดีp"@รกแl8 แะp6œˆะph8Nฤh84 'โbญแะpd5œˆ๘ณnฎแะpd5œˆ๘sรอ– GVร‰8 ‡†#ฐแD@Ÿnฉแ4Nฤฤ5Jรi8v๋•G9ฯ*โ ืŸ†uขแุiร ^qNรi84œˆะph84œˆะph8ฒNฤNรฑ%?ิpNฤ\‹น†c‹๎œฺ@ร‰8€kiธ…†c‹ 784‚†qืัps ‡†#ถแD แ4Nร6œˆ4œ†Cรiธภ†q€†ตnฌแ4Nฤฤ5hํ 'โโnๅ 'โ4Ž 'โ งแะpNฤh8 งแะp"เ ,5ŽNฤๅnชแะp4ะp"ะp งแD€†ำp 'โ4 'โ ืŸ†ปกแ4Nฤไ5Dรi84œˆศkธ…{h84œˆะph8ช5œˆ4œ†Cรi8ะW วyแq 'โ€Vyื–†ำp์บแ†wš๘"(ัpณcP งแะp"@รกแ(ฺp"ะp งแD€†ำp{Iรi8 แศkธก†ำp" Zรอ4\ wb 'โŠ5bๆ 'โโnโ 'โ4Žส 'โ€–us ‡†ฃั†q@ห 7ปะph8m8ดpK ‡†ฃี†q€†ำph8 'โ4œ†ำph8 แะph8h8 ‡†ำp"@รi8 ‡†q6j๊ 'โโnิ9ˆ†Cร‰8€Mh84ํ7œˆฺ3ืph8ฺo8ดืps ‡†ฃ†q€†ำph8 'โ4œ†ำph8 แะph8h8 ‡†ซใธ๑†q€†ำphธด E แ4ฎม†ปืWq€†พNรi84œˆะph84œˆZต่Sรญ#†ำp"เz,gŽํ9ึpNฤ\OรM5[lธ3h8 แะph8 แะph8h8 ‡†ำp"@รi8 ‡†q ‡†q€†ำph8 'โ4œ†ำph8ะ@ร˜j8 ‡†qY-๚๓,“…{h84œˆุจแf๙แ—†ำph8 แะph8h8 ‡†ำp"@รi8 ‡†q ‡†q€†ำph8 'โ4œ†ำph8 แะph8h8 ‡†ฃpร‰8 ะบG 7ีp 'โ6าอ๛ำp‹น{h8v้^†q@^รอ–iธ‰{h8v้ตรย_^ฤNรi8R๎Iๅo/โ งแ4Nฤh84Nฤh84NฤNร๑฿ะpNฤh8๒๎Nฤh84Nฤh84Nฤ˜k84Nฤy ืŸ—”ฎ4œ†Cร‰8€ ฎ?/)]œCรกแD@^รu๎กแะp"@รกแะp"ะp งแD€†ำp 'โ4 'โ งแะpNฤh8 งแะp"@รกแะp"จkฉแะph8ไ5ด7ฒึp 'โโฎk8 ‡†qq 7Zน‡†Cร‰8 ‡†Cร‰8@รi84œ†qNรi8zๅo4œˆ4œ†ำpฤyๅฉ D แ4œ†#ฎแ/AฤNรi84œˆธ* ‡†q@œUตฅแ4Nฤlฺpณ„“†หw็‘ 4œˆจึp7ฦ.พแ‡Fะp" ZรM.CรกแD@\ร-CรกแD€†CรกแD แ4Nร‰8 งแ4Nฤh84NฤNรกแ4"่ซnฎแะph8ฤ5ฌ?)›†ำph8ฐiร-5 'โ งแ4Nฤjธ…†ำph8ืp๗ะph8 แะph8h8 ‡†ำpˆ8@รi8 ‡†q ‡†q@ ‡†q@žน†CรกแDืps ‡†ใ/^ึp"ะpNร็…S 'โ งแ4q 7ผchธ๏๎Bรi84œˆˆkธีุ94Nฤฤ5จs ‡†qŽ๋๕าS งแD€†#ฎแ†oAร‰8 G\รAร‰8€kฑะph84œˆโ,g ‡†q@\รM5 'โ งแ4Nฤh84Nฤh84Nฤn‹บฑ†ำph8ืpฃต{h84œˆˆkธ•{h84œˆะph84œˆ4œ†Cรi8Dะซ…†CรกแDืpณ„“†ำph8 แะph8h8 ‡†ำpˆ8@รi8 ‡†q ‡†q€† งแะp" ฏแ&NรกแD@^ร-CรฑC4œˆ4œ†ำpฤ9ผgฤXk84Nฤqบน†CรกแDืpณฅ†CรกแD แ4\]งNร!โ Gœใwm แq€†#ฎแฮl แq€†CรกแD€†CรกแD แถnฎแ4Nฤฤ5b๊ 'โโnโ 'โ62ืph84œˆ๒ฎ?„ฆแ4Nฤlฺps ‡†Cร‰8@รi8 ‡†Cฤ ‡†หw๋บ>่ห|็74 'โ6๔oฝ๙ส;g‡ซ8 ‡†Cร]ฝzฟN}x์๊P„†ำph8ืŸh8ะph84œˆ‹๓๚ะอกˆ•wmi84œˆkจแsrจาp#ชแะp"Nร ‡†qะph8 ‡ˆำp€†ำph8งแ ‡†ใ{๙ฑ†qะpŽ8ฏู@ฤi8@รi8โ๎‰ Dœ†4œ†CรQ;โ4h84Nฤๅ๙; Ž?ืp๗๏84ัiธto|h ‡ˆ๛ฆแw4 wn ‡ˆำpPฐแVFะph8งแ ‡†Cร‰8 h84œ†Cฤi8@รi84œˆำp€†CรกแDœ†4Nร!โ4 แ4ŽJงแ@รกแะp"Nร ‡†qW๏ฆ† ‡†Cร‰ธผ†j8จbชแ4Nฤ5ิpŽ EL6ะph8งแ ‡†Cร‰8 h84œ†Cฤi8@รi84ํFœ† ‡†Cร‰8 h84Nฤi8@รกแ4"Nร†CรกแDœ†4ŽอผขแDœ†4œ†#ฏแ6q[๕@รAS งแุiรฝlทM]ŠXฬm แะp"Nรq 7ฑ†Cร‰8 h8ฎื฿k8 ‡ˆำp แˆs็h8Dœ† G\ร  แq4 'โ4 แะph8งแ ‡†ำpˆ8 h8 ‡†ฃฅˆำp แะph8—็M  ‡†qq^gg„"VNรกแh'โ^ฯกJรl แะp4q 5\g ‡†ฃ•ˆำp แะph8งแ ‡†Cร‰8 h84œ†Cฤi8ะph84-Dœ† ‡†Cร‰8 h84Nฤi8@รกแ4"Nร†Cรกแh!โn?r;(bญแ4Žv"๎๖เฎA Xรi84อDœ†ƒ: 7ZAรฑ3/h8งแ งแศkธก†qะpŽผ†ปcงแ งแะpิŽ8  ‡†#/โ4h84Žผˆำp แ๑าc งแq4q 7<1‚†Cฤi8ะph84น๗W4 G\ฤ<ีpPลDรi84อDอแ{A•†[ฺ@รกแh%โ4jธ… 4ŽV"Nร†Cรกแศ‹8  ‡†#/โ4h84Žผˆำp แะph8๒"Nร†Cรกแศ‹8  ‡†#/โ4h84Žผˆำp แะph8#๎Xร†Cรกแˆ‹ธ‡ ŠXh8 ‡†ฃˆ{x์FPฅแ&6ะph8š‰8  ‡†#/โ4h84Žผˆำp แะph8๒"Nร†Cรq 4œˆำp€†ำpไ9ิp"NรNร‘ืp๗l โ4 แ4Žโงแ@รกแะpไE/5h84Žธˆ{‰ห@. งแqฃแs(b5ถAถใ3h8Dœ†ƒ‚ 7๊Œ แะpดq4 G^ฤi8ะph84yงแ@รกแะpไEœ† ‡†Cร‘q4 G^ฤi8ะph84yงแ@รกแะpไEœ† ‡†Cร‘qซแ@รกแะpฤEํงฎEt งแะp4qทw]Š4he ‡†ฃ•ˆำp แะph8๒"Nร†Cรกแศ‹8  ‡†#/โ4h84Žผˆำp แะph8๒"Nร†Cรกแศ‹8  ‡†#/โ4h84ืเNฤi8@รi8โผv฿"nซn>ึpP…†ำp์ฒแžุ@ฤmทแ†NEL4œ†CรัLฤi8(ิp h84ญDœ† ‡†Cร‘q4!~ฆแ4"Nร†#ฮู@ร!โ4h8โ๎งแ@รกแะpGœ† ‡†Cร‘q4 G^ฤi8ะph84๗O4 G\ฤ=|ำ๎Pฤ\รi84ํDรcณC‹ฉ 4Žf"NรA†›ุ@รกแh&โ4h84Žผˆำp แะph8๒"Nร†Cรกแศ‹8  ‡†#/โ4h84Žผˆำp แะph8๒"Nร†Cรกแศ‹8  ‡†#/โ~ขแ Šฅ†ำph8ฎญ๋๚ ‡al(โฆ ฒุั~d‚2^4ฐe D"@ฤ โq"€ˆqˆ8D"@ฤ โq"€ˆqˆ8D€ˆ@ฤ โq"€ˆqˆ8D€ˆ@ฤ โD"€ˆqˆ8D€ˆ@ฤ โD" โqˆ8D€ˆ@ฤ โD" โqˆ8D€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqู๋4r†Qnr7@ฝE*šํfดาX8ซ€ๅกุ‚Y)™]%Y‘)Œ Laฒ้ƒ&เ™3๓๖‘~}/ โD" โqˆ8D€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqˆ8€ˆ@ฤˆ8D" โqˆ8€ˆ@ฤˆ8D"@ฤ โqˆ8€ˆ@ฤˆ8D"@ฤ โq"€ˆ@ฤˆ8D"@ฤ โq"€ˆ@ฤˆ8D"@ฤ โq"€ˆqˆ8D"@ฤ โq"€ˆqˆ8D€ˆ@ฤ โq"€ˆqˆ8D€ˆ@ฤ โD"€ˆqˆ8D€ˆ@ฤ โD" โL โqˆ8€ˆ@ฤˆ8D"@ฤ โqˆ8€ˆ@ฤˆ8D"@ฤ โq"€ˆ@ฤˆ8D"@ฤ โq"€ˆqˆ8D"@ฤ โq"€ˆqˆ8D€ˆ@ฤ โq"€ˆqˆ8D€ˆ@ฤ โq"€ˆqˆ8D€ˆ@ฤ โD"€ˆqˆ8D€ˆ@ฤ โD" โqˆ8D€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqˆ8€ˆ@ฤ โD" โqˆ8€ˆ@ฤˆ8D" โqˆ8€ˆ@ฤˆ8D"@ฤ โqˆ8€ˆ@ฤˆ8D"@ฤ โq"€ˆ@ฤˆ8D"@ฤ โq"€ˆ@ฤˆ8D"ด&@ฤ@^รี6ˆถqPfร๕Fˆnธ[%๚Iรi8qNพุ@ร‰8ˆkธkh8Žู6œˆ G`ร‰8ะp6œˆ G`ร‰8€๘ีŽโNฤฬเxll แ(ฎแD€†Cรุp" ]ซแ4%6œˆื]ู@รQbร‰8€๐†[@รQdร‰8 ‡†#ฐแD€†Cรุp"@รกแl8 แะp6œˆะph8Nฤh84 'โ4Žภ†q G`ร‰8€Hฝ†ำpp" ฒแ*h8Fึp" ฒแFะpŒpํุŸ@ฤh84y 'โ4Žภ†q G`ร‰8 ว๑ฝำpNฤh8โœ~ฒ†qŽธ†;ท†q G‰ 'โ‚์4œ†Cร‰8€8๛Fรi84œˆˆkธzk ‡†q Gnร‰8 ‡†#ฐแD€†Cรุp"@รกแl8 แะp6œˆะph8Nฤh84 'โ4Žภ†q“ทะp 'โโ,๏l แะp" ฎแ66ะph8 แะpฬขแD€†Cรุp"@รกแl8 แะp6œˆะph8žำOทแD€†Cร๑\รUp"@รกแxฆแžDNรกแDภ์uNรกแD@^รญm๏ฝ†ำp"@ร็์ยNฤh8โฎz4‚†q ‡†q Gxร‰8 ‡†#ฐแD€†Cรุp"@รกแl8 แะp6œˆ˜’ งแะp" ๏z๘  'โฏวม 'โ4Žู6œˆะph8Nฤh84 'โ4Žภ†q G`ร‰8 ‡†#ฐแD€†Cรุp"@รกแl80บ}ฃแ4Nฤฤ5\` ‡†qq ท5‚†cWูp"@รกแJ…‘ 'โ4ฎ๐WxŸ๙มE€†Cรi8€†ำph8 แ˜ธŸ5œ†qŽ8ํBรi8 แˆkธบ7‚†q ‡†qำถาp 'โโ,76ะph8 แะp”ีp"@รกแ4œˆ@รi84œˆะph84œˆะph8ๆาp"@รกแ4œˆ@รi84œˆะph84œˆ˜ภ๙ืp 'โโtW6ะph8ืpkh84œˆะph8Šm8 แะpNฤ แ4Nฤh84Nฤh84œ†qh8 GลฬNฤh84\ –ฮํ‰8 ‡†+ แng๗•Dภ›๋5œ†Cร‰8€ผ†ซl แะp" ฏแFะph8 แ8บ•†ำp"@ร็ไฺNฤh84Nฤh84E6œˆะph8 'โะp 'โ4 'โŽjงแ4Nฤฤู7NรกแD@\รี[#h84œˆะph84œˆะph8 'โะp 'โ4 'โ4Nร‰84œ†Cร‰8 ‡†Cร‰8 ‡†ฃ”†qฏkกแ4Nฤไ;h84œˆˆ;h84œˆะph8^ไขœ†q 7/}Y แะpsiธถคo+โ4Nร‰8 ‡†Cร‰8 ‡†Cร‰8 ว๓~ิpNฤh8โœ^ฺ@ร‰8€าฮ‡†›Aร@ร‰8€าฮวฺ 'โ4 'โ4Nร‰84œ†Cร‰8 ‡†Cร‰8 ‡†ำp" งแะp"@รกแะp"@รกแ(บแDภ`7NรกแD@œ๙ืph8ืpีม 'โ4 'โ4Nร‰84œ†Cร‰8 ‡†Cร‰8 ‡†ำp" งแะp"@รกแะp"@รกแ่/m โ^l฿h8 วศฟH=Aฤผธแ๊#h84œˆˆkธญ4Nฤh84Nฤh8อ งแD€†#ฮYc 'โ4q W=Aร‰8 ‡†Cร‰8 ‡†Cร‰8 ‡†ำp"@รi8 ‡†qqVNรกแD@œๅฦ 'โ4 'โ4Nร‰84œ†Cร‰8 ‡†Cร‰8 ‡†ำpˆ8 งแะp"@รกแะp"@รกแะp"`ˆVรi84œˆˆำ]ู@รกแD@\รญm แะp"@รกแะp"@รกแ4œˆ@รi8ฆeQร‰8 งแˆkธ๚"@รi84œˆะph84œˆะph84œˆะp|ืj8 'โŠำkธ†ซ5œ†qล5\eƒ†๋ แD@q w0‚†Cร‰8 ‡†Cร‰8 ‡†ำpˆ8 งแะp"@รกแะp"@รกแ4"@รi84œˆะph84œˆ˜€†ำph8w? งแะp" ๏~l แะp"@รกแะp"@รกแ4"@รi84œˆะph84œˆะph8 ‡ˆะp 'โ4 'โ4ฮิp"`ˆ…†ำpŒ๊ฃ†q,๏l แ๕ n0เ~ll แ๕ ฺ@ฤh8 ‡†q ‡†qŽท แ4œˆะpไ9ัpNฤh8๒๎ฺNฤh84Nฤh84Nฤh84œ†CฤtNรกแD@^รญm แะp"@รกแะp"@รกแ4"@รi84œˆะph84œˆะph8 ‡ˆะp 'โ4 'โ4 'โ†ธัp 'โโ๔็6ะph8ืpีม 'โ4 'โ4Nร!โ4œ†Cร‰8 ‡†ใi5œˆะpŽ8]g แ4q ืฺ@ฤh8 ‡†Cฤ ‡†qSดo4œ†Cร‰8€ธ†ซŒ๎Tรi8DP`รmp็ญ†Cฤ ‡†q ‡†q งแqNรกแD€†CรกแD€†Cรi8D€†ำph8 แะph80Ž•†ำph8gนฑ†Cร‰8 ‡†Cร‰8 ‡†ำpˆ8 งแะp"@รกแะp"@รกแ4"@รi84"ะph84œˆะph8 งแDภญ†ำph8w@ฎl แัW 'โ5ฺŽ๕g6qNรืpี“D€†ำph8D แะph8 แะph8 แ๘๎์าh8โฎz4‚†Cฤ ‡†q ‡†qฏ~@4œ†Cร‰8€ภb ‡†qyไ` ‡†q ‡†q งแqNรกแq€†CรกแD€†Cรi8D€†ำph8D แะph80; งแะp"ฮ@œ}ฃแ4Nฤ™ˆkธzk ‡†q&4 'โ4Nร!โ4œ†Cร!โ ‡†Cร‰8 ‡†+หNร‰8 งแศ{‚†qNร‘๗๏ โ4œ†Cร!โ€",4œ†Cร!โ€8ห;h84"ˆkธ ฒต†ำpˆ8@รืp๕ฮh8โฎ7‚†Cฤ ‡†q ‡†q งแqNรกแq€†CรกแD€†Cรi8Dภ?่4œ†Cร!โ€ผ†[@รกแq€†CรกแD€†Cรi8D€†ำph8D แะph8 แะp แ4h84Nฤh84œ†ำp"`ˆ งแะpˆ8 Nn ‡†Cฤq WŒ แะpˆ8@รกแ๘๏VNฤh8 Gœๅญ D€†ำph8D แะph8D แะph8 แ๘[ฃแ4"ะpฤ9๙รh8โ๎ฺg฿h8 ‡†Cฤq W?Aรกแq@\รm แะpˆ8@รกแะp"@รกแ4"@รi84"ะph84œˆะph8 ‡ˆะp ‡ˆ4 ‡ˆ4Nรู@ฤ ฑาp ‡ˆ๒.ศฦ ‡ˆ4 'โ4Nร!โ4œ†Cร!โ ‡†Cร‰8 ‡†ำpˆ8 งแะpˆ8@รกแx]†qNร‘ืpญ Dภญ†ำph8DwAฎl แะpˆ8 ๎‚ฌm แะpˆ8@รq\๏4œ†CฤŽ8งŸl แq€†#ฎแฮm แq€†Cรกแq€†CรกแD€†Cรi8D€†ำph8D แะph8p ฝ†ำph8Dืp• 4ไ5ม ‡ˆ4 'โ4Nร!โ4œ†Cร!โ ‡†Cร!โ ‡†ำpˆ8 งแะpˆ8@รกแะpˆ8`vNรกแq@œ}ฃแ4ฤ5\ฝ5‚†Cร!โ ‡†Cร!โ ‡†›ฑ^ร‰8 งแศkธส"@รi8๒๎ษ"@รi84"ะph84"ะpผ๗Nร!โ Gœณ h8DP …† oธ๊ั”gyg ‡†Cฤq ทฑ†Cร!โ ‡†Cร!โ ‡†ำpˆ8 งแะp฿ว๚A_m ฅุhธlฟ}ึpษพ}ึpฏํ/R‚#!พฃ่IENDฎB`‚xarray-2025.12.0/doc/_static/logos/Xarray_Icon_Final.svg000066400000000000000000000023401511464676000227300ustar00rootroot00000000000000 xarray-2025.12.0/doc/_static/logos/Xarray_Logo_FullColor_InverseRGB_Final.png000066400000000000000000001240731511464676000267440ustar00rootroot00000000000000‰PNG  IHDRˆ ฤัr~n pHYs.#.#xฅ?v“PLTEฏต!l‰I“ชk่่๎๔ใ€EeฏตYw!l‰5€šI“ชk่่๎๔ใ€ฏต!l‰"_|I“ชk่่๎๔ใ€EeฏตYw!l‰+v‘,lˆ5€š;€™?‰ขI“ชJฯRจบZพษ[ใcำูgๆk่่x๊๋„๋๎‘ํ๑๎๔ใ€\อtRNS@@@@@@@€€€€€€€€€€ฟฟฟฟฟฟฟฟอ@ง(IDATxฺ์ุมmTAEQ/˜-^NHˆ?3ฮ?<"@คmwBIฏ๗ํ ฮ๑อ €ฝn0มปฑ[้pฬ๐๚nํภN:C:%ฤ;้pL้pBฐ“ว˜'ฤ้pฬ้pBฐว 'ฤ่pL๊pBฐ‹วจ'ฤ›่pฬ๊pBฐ‡วฐ'ฤ[่p ๑โ€t8ฦu8!ุ@‡c^‡โ€๕t8v8!XOŸa`‡โ€ๅ๎ ;œฌฆร1ฒร qภb:3;œฌฅร1ดร qภR:S;œฌคร1ถร qภB:s;œฌฃร1ธร qภ2:“;œฌขร1บร qภ":ณ;œฌกร1ผร qภ:ำ;œฌ ร1พร qภ::œ๔t8t8!่้p่pBะำแะแ„8 งร1ร๋โ€t8t8!่้p่pBะำแะแ„8 งรกร q@๏&ะ ร q@N‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€žว—์คร1ฤCˆvาแะแ„8 งรกร q@O‡C‡โ€๎ :œไt8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้pฬ๐ผ„8`'Ž^—์ครกร q@O‡C‡โ€ž‡'ฤฝ›@ƒ'ฤ9Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz:C\Bฐ“ว!ุI‡C‡โ€ž‡'ฤ=Nˆะgะแ„8 whะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งร1ร๓โ€t8fx]Bฐ“‡'ฤ=Nˆz::œ๔t8!ศt8!ศ้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแโ!ฤ;้p่pBะำแะแ„8 งรกร qภ๚ :œ๔๎ :œไt8t8!่้p่pBะำแะแ„8 งรกร q€วw8!t8ะแ„8@‡ƒ3:œ:่pB รมNˆt8!ะแเŒ'ฤ€:œ่ppF‡โ@‡Nˆt88ฃร q ร'ฤ:œัแ„8ะแเK{]B ร'ฤ€:œ่ppT‡โ@‡NˆหM A‡โv8!t8ะแ„8@‡ƒ3:œ:่pB รมNˆt8!ะแเŒ'ฤ€:œ่ppF‡โ@‡Nˆt88ฃร q ร'ฤ:œัแ„8ะแ@‡โฮ่pB่p ร q€gt8!t8๘"B ร'ฤ€:œ่p0กร q ร'ฤqhะแ„8@‡Nˆt8!ะแเจ'ฤ€:œ่p่pB ร'ฤ€:œ่p0ชร q ร'ฤ::œ่p ร q ร'ฤ:Œ๊pB่p ร q€‡'ฤ:่pB่p ร q€๒ผ„8@‡ƒฺ๋โt8!t8ะแ„8@‡ƒมNˆ€/โ&ะ ร q€:œ:่pB รม„'ฤ€:œ่p่pB ร'ฤ€:œ่p0ชร q ร'ฤ€:œ่p ร q ร'ฤ:Œ๊pB่p ร q ร'ฤ:่pB่p ร q€—่p{q€:œ:่pB ร'ฤภงงฯ ร q@๏.ะ ร q€:œ:่pB รมเ'ฤ€:œ:่pB ร'ฤ€:œ่p่pB ร'ฤ€:œ่p ร q ร'ฤ€:œ่p ร q ร'ฤ::œ่pP{^Bœ:ิ^:œ:่pB่p ร q€:œŸป@ƒ'ฤน›@ƒ'ฤ:่pB่p ร q€:œ:่pB่p ร q€:œ:่pB รกร q€:œ:่pB๐o~ภฟt7!„8่ิ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆCˆ!„8!„8โ„8@ˆ!Nˆ!„8!โโ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆCˆ!„8!„8โ„8@ˆ!Nˆ!„8!„8โ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ~ณwoKšVbF[1 !ถ…ฅ,7-“มฑ้|ก‘L‰bภ>ฌ๕ไส•๛หD ฤ'ฤ€Bœqqq ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBqBq ฤ q ฤ'ฤBqBq ฤ q€‡'ฤู@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8Mˆ!„8!„8โ„8@ˆ!Nˆ!„8!โโ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆCˆ!„8!„8โ„8@ˆ!Nˆ!„8!โโ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆCˆ!„8!„8โ„8@ˆ!Nˆ!„8!„8โ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆโ์ ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBBœ๎&ฤ€BœBqB ฤ'ฤ€BœqqB ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBBBq ฤ q ฤ'ฤBqBq ฤ q€‡'ฤBqBq ฤ q€BœBqBq ฤ q€BœBqB ฤ!ฤ!ฤ€BœBqB ฤ'ฤ€BœqqB ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBBBq ฤ q ฤ'ฤBqBq ฤ q ฤ'ฤBqBq ฤ q€BœBqBq ฤ q€BœBqB ฤ!ฤ!ฤ€BœBqB ฤ'ฤ€BœBqB ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBBBq ฤ q ฤ'ฤBqBq ฤ q ฤ'ฤBqBq ฤ q€BœBqBq ฤ q€BœBqB ฤ!ฤ q๖?โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!Nwโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8!โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆโ!„8!„8โ„8@ˆ!Nˆ!„8!„8โ„8@ˆ!๎!ฟถ€BœBq: ฤงร€B\ด๗€BœBq: ฤงร€BœBq: ฤงร€Bœq ฤ้p ฤงร€Bœq ฤ้p ฤงรBBœq ฤ้p ฤงรBq:q ฤ้p ฤงรBq:q ฤ้p€Cœm„8โ๖๛ตm„8โt8โ@ˆำแ!„8„8โt8@ˆCˆำแ!„8„8โt8@ˆ!N‡!„8„8โt8@ˆ!N‡!„8โ ]ˆำแ@ˆ!N‡!„8โ@ˆำแ@ˆ!N‡„8„8โ@ˆำแ@ˆ!.aK!„ธํhG!„8„8โt8@ˆ!N‡!„8โ Iˆำแ@ˆ!N‡„8„8โ@ˆำแ@ˆ!N‡„8่โt8โ@ˆำแ!!N‡„8โt8โ@ˆำแ!บ„8„8โt8@ˆCˆำแ!„8„8โt8@ˆƒ.!N‡!„ธ~m๗!„8โโt8@ˆ!N‡!„8โ Iˆำแ@ˆ!N‡„8„8โ@ˆำแ@ˆ!N‡„8่โt8โ@ˆำแ!!N‡„8โt8โ@ˆำแ!บ„8„8โt8@ˆƒ !N‡!„8โ Bˆำแ@ˆ!N‡„8จโhฃ!„8โ @ˆำแ@ˆ!N‡„8จโt8โ@ˆำแ!*„8„8โt8@ˆƒ !N‡!„8Ž/๚wK๐˜_Y@ˆ!N‡ใ+๔ฃ5xส๏k !„8Žฏu8!๎ฑ๗g!โ@ˆำแ๘j‡โ๋pB ฤงร๑๕'ฤ=ึแ„8@ˆ!N‡ใ๋Nˆ{ฌร q€Bœวื;œ๗X‡โ!„ธ~๘ทฃร qu8!โ@ˆแพ;Cˆาแ„ธว:œq ฤี่p฿7๛฿Ž'ฤ=ึแ„8@ˆ!ฎF‡๛,ฤm้pBcNˆ„8โjt8!๎Aฟ๙Qˆ{พร q€BN็:œทงร qu8!โ@ˆ่๛sNˆำแ„ธว:œq ฤี่pBž'ฤ=ึแ„8@ˆ!.{‡๛หg!n_‡โ๋pB ฤ—ผร}๛Yˆุแ„ธว:œq ฤี่pBž'ฤ=ึแ„8@ˆ!ฎF‡โ๖t8!๎ฑ'ฤBq5:œทงร qu8!๚๓ KุŒ!„ธNˆำแ„ธว:œตืx๛๒zb^eนL,ๆ[7ฃe3„8่โ.t8!nO‡โ๋pB4ุพnอ9งๅ๏ๅŸ*Š‹รbฺŒ!„ธXNˆำแ„ธว:œลf—=–ธu3๚๊+K–วbฺŒ!„ธPNˆำแ„ธว:œUฦ๛—–cb.+!ฮbฺŒ!„ธธNˆำแ„ธว:œฆ๗Lภพu๕ชสห…`1ฏoFr ฤ!ฤ้pB'ฤ=ึแ„8(?๏ขฦ5p]น,fŒอH„8„8Nˆแ„ธว:œๅว๛ฝฏฃ˜[_XBœลƒ๓ข. ฤA‚wญร q{:œ๗X‡โ@„๓j๏ฝฐ่{/ฆอโ@ˆ‹ืแ„ธ=Nˆ{ฌร q`๎}d๕f\n๏|ซI๖่ป˜q7#W% ฤ!ฤ๕๎pBž'ฤ=ึแ„ธGง’C๊ฑ๛ๅฌFQ็ฟŸ k™\*\ค๏ฟฒฆล บ˜7ฃa้tp๓ฬฏB\ภ'ฤํ้pBcNˆ{๒f๏ุใ~๋๘a๙B˜/ด{1ngKศTf’ล|iท˜6ฃ4›‘_yฉืw}Bˆ!.^‡โ๖t8!๎ฑ'ฤ=ุ้ตoฉผ.กยตฃ‡#!.๐bฺŒาR=q๋ะ๘ตญai…8โv8!nO‡โ๋pB\ฆฝ`Ly*œง}๚4Vฌ!tฅ^ฬู`ขท๙ฅ็žยส q ฤฝฦํpBž'ฤ=ึแ„ธ|ทำล_‰s05เซลอผ#๋ำb]L›Qžอศoฝฤซ๋dชBpนร q{:œ๗X‡โ๒N{โNš็ดƒ๏_—นล‰ฌํh<๔Mโข.fฺ็L :›š๘พฬษT!„ธNˆำแ„ธว:œ๗๐๖+mFQ_‹;‘:ปกดฌBqU;wŸ…ธใNˆ{ฌร q๏ง๋>v@็์EไPXปv๔xU™ณ๔Lo3*๓ซฏ้ซ[Nฆขะ ฤ•ํp฿q็;œ๗X‡โฒ=}ญ}S}โี‹่Mh”›|kงธTํhรหMำbq57ฃˆ๏0y•๔ฎยษT!„ธ Nˆำแ„ธว:œ๗|G๑ฯ„ž๚‚฿6—:ึ"ล%jG[.ฎi1ซn๊u7ฃ€ฟ=MMš8Lโ@ˆ ฺแ„ธ=Nˆ{ฌร q้žฟV~ผ~ี OพeS\žvดโ?ZL›Qำ็ljาย้–Wˆ!.h‡โ๖t8!๎ฑ'ฤๅผกฎ๙$ถ๛มิโ“oั—ฅอ ลดl3๒๐.eˆs2Uˆ!.j‡โ๖t8!๎ฑ'ฤ%|›"*-˜๏šLพ%S\Žv4?Zฬ0!ฮfT๓พกใ1JKЇืดร q{:œ๗X‡โ๙U}{b๖“แ๎OฟB๑!o็ี5-fนฉพฯf(ล9›šrY๏ q ฤEํpBž'ฤ=ึแ„ธฌwิ๕^‰›lพ4R๋‚ํhฅ๙i‹i3๊บ9›š๐jv2Uˆ!.l‡โ๖t8!๎ฑ'ฤmแ๏5ฤฌ—Qoš[Mพี ๒ˆ~มฮL%#๚n9หg!›‘GQ•xช„‡ืตร q{:œ๗X‡โาFฅjw}ฆ๖9•Z๓=‚ุW์๖ซkZฬBzหอh๔นmhืœLEˆCˆkแ„ธ=Nˆ{ฌร q{๘{ ‡˜‹ีp๒ญ5†๓fฒคลดตŒผDŸ๎VฬษT!„ธธNˆำแ„ธว:œ—๘–บิ}`ืตš/]•๙๛ฉวๆ๏7M‹Yฅ ›Q๙_ฮฆฎส_X„8ˆโขu8!nO‡โ๋pB\๖‘ฆศ}๕‰$โeฒ^+2ฤฤฝfgพŸขลดตŒœMM๗„ฯญฎB\'ฤํ้pBcNˆK๚ ถุ+qMฆฮ—jผuฮ;Vฆล,‘IlF=๎z‡9™Š‡ืทร q{:œ๗X‡โ2งฅ:Oธ ๑n™{ฟR็…ฮ S๓LนYL›Q฿อศูิdwNฆ q ฤ๎pBž'ฤ}ัฏ,ฤ๕y[๊ wหโฆษทฦ+1ฟวยสด˜๙{›QŒอศ“ป\ ๊>Wˆ!.p‡โ๖t8!๎ฑ'ฤฅพง–2าŽ@)s<5โ w๐ŸŸ36e3 ๒ธฆๅซแyณ“ฉBq๓}ภ'ฤํ้pBcNˆ˜Z๏3}˜ํ ฟBษœฆลLโlFa6ฃฆด(๋z:™*ฤนร q{:œ๗X‡โr๗ฅeL๕Jกq&ะผ๒๔,fษ๘ใxj€๋ฟa=า4โโw8!nO‡โ๋pB\๒ภTแฮzu[!วRKฅไh“แหkZฬิ•อ(าfไljข[0'S…8โBw8!nO‡โ๋pBN^CUŠuร์$ุฯ@™ง๛fŸพผ๒„ธูm1mF้^Ÿv65ัj:™*ฤบร q{:œ๗X‡โาŽ่enปLu,(s;:~yM‹™w'ืแขmFn๒,ฆ;\!„ธะNˆำแ„ธว:œWcฬIœ1บLuฌฺ%๊า=yM‹™vซฒ…ŒœMMsๅdชBม๗—ฯB\'ฤ=ึแ„ธƒN[ยัlqŒพๅJ\ค0sแ๒š3kˆณล{vใljšตt2Uˆ!๎\‡๛๖ณฅร qu8!.}fJ^1Vซตqฌเ\gMฎi1s~อ˜Oo-dYJ๗ทBqฑ;œทงร qu8!ฎภ๓ํฬ7…ณึlซร5|ฏ3ฬล{็๒š3ๅfe3Šนต๛#โYCœ“ฉBqม;œทงร qu8!ฎภ๓ํฤ๗ึฝฆ}KŽ6Qพึ—.ฏi13nใ6ฃ ›‘ณฉIVาษT!„ธเNˆ๛ฒ…ธ(Nˆ+3๏ค\VSนิš%.ศฐwซฌL‹™pธืแยnF>Zณฉ›๏. q ฤ๏pBIˆ ำแ„ธ ญ)mร…&0‚แทeˆปVVfšoIปลิแ2nFฮฆฆ(šNฆ q ฤE๏pBž'ฤ=ึแ„ธ:#OยงยŽWuผ 11฿++ฯ๕ำbฺŒzoF'ฎพw^Nฆ"ฤ!ฤ5๏pBž'ฤ=ึแ„ธ๔๗ƒ‰ฦ• S'พe็›ํhT๙YL›Q๗็ฉ]†ซญC\'ฤํ้pBcNˆ+4๖dKฆ}๋ฮˆฺัอ“†ำb&ฐlFฑ๔๚SโI{ฆ“ฉBtq :œทงร qu8!ฎFpJ๙Œv<ธ๚|T%ฎqˆuZ…ลดu฿Œz-๑คซ่dชอC\†'ฤํ้pBcNˆซ”ar]šS”ัท[‰ปŽ๎ห๛ำbฆ๏mFแ7#ํ‚๏$/Nฆ qะ=ฤฅ่pBž'ฤ=ึแ„ธ*7ึ๙ FŸ๓7F฿า%๎v;บ0งลฬดe๙{ฉ๑7#gSร฿_8™*ฤA๏—ฃร q{:œ๗X‡โชXงป;์s0ufKO9—ัํฒ2-fข>ขร%x๗ษูิ๐k่dชญC\’'ฤํ้pBcNˆซRฒ๖9˜j๔->&mGื/ฏi1๓lเ6ฃ›QษผX๊๑—{Z!:‡ธ,Nˆำแฺ‡ธ็:œW,ฦH“ัขคัท๚ Wั(๖ฃฒ˜6#%ฮูิเ-ำษT!:‡ธฒt8!nO‡๋โ์pB\™๎”๋๖zœžŒพ๕^gŒŽ\^ำbfูฟmFI6#gSƒ‡8'S…8hโ๒t8!nO‡kโž์pB\๛ยtฃหB}๋—ธ›ํ(ภ฿™3ห€๏ฏฦdูŒ|ฆฺ๗[Nฆ"ฤ!ฤ้pBžื;ฤ=ฺแ„ธ"7†ษtท9˜j๔ญฦฦลvแ๒š3ษถe3J^.Œา๗[ห/'„8„ธ๖NˆำแZ‡ธg;œW.ษdธฟžุ๊๕CWโZ†ธ—Wž๗าm1mFY7ฃ‘๛Rปฯฺ!ฤ!ฤ้pBžื9ฤ=แ„ธJทึiNMtษ‘ำเ•y+ฤธผŠ„ธŠ‹y็อ(ลฏศาว+LEˆCˆำแ„ธMฎqˆ{บร q็๘{ ็fk`๔m1๎jGA.ฏi13l\6ฃT%ฮูิภ7[Nฆ qะ5ฤ%๋pBžื7ฤ=แ„ธZนS`w9˜j๔ํ1๏\jGQ.ฏi13„8IชอศูิภWป“ฉB4 qู:œทงรต qฯw8!ฎึC๎bซฃษ็๔Jใๅ‡T"ฤญ’‹i3ส[_ภ๊j ‡‡งร q›:\ืทกร qหL์~1ชLF฿ฯvดj ‹™๚!ว‰~buฯฆ๎]<'S…8่โuธ๏พโ‚wธฆ!nG‡โj%จ 7ŠM>พƒฉ]Fž+ํ(NY™3|ˆณๅŒสฝใWfํœLโ eˆKุแ„ธ=ฎgˆาแ„ธฃฝ†6Sพm^ฺธัŽ]^ำb†฿นm$๙~ญ๎EY๖อ.'Sโโt8!n[‡kโ๖t8!ฎฺS๎่ทŠฆRl่นัŽสๆ ‹i3ฒm&ิ.JNฆ"ฤ!ฤ้pBถื1ฤm๊pB\ฅ;ฤ ๗ŠซวG๗o2๕9tกญฒ?!‹™๑แGgทุ\cd8™*ฤAร—ณรuqป:\รทซร qี:T๐#}ณว'7๚–ฟ’oถฃP™wZฬุCพอ(็fไljฬ„้.Vˆƒ~!.i‡kโถuธ~!n[‡โŠšจทุM๒> 6ฟ$๐oa๑x;ŠUVฆลŒฝ}ูŒrnFฮฆ†\8'S…8่โฒvธ!n_‡kโ๖u8!ฎjข™=?ฝƒฉ_\•9็k.ˆcp๘WโŽทฃ`1%๗bฎย‹™z3sฮp'Zํz—๛.รษT!ฺ…ธดฎuˆุแบ…ธNˆ;๎ิศ๑ณ๏๛ฤมิŸ ฝ๏๙กzœ9%qฅ3XถตE{hW๓ํ.o"ฤ!ฤ้pCฮื,ฤํ์pB\ญฑ=๘=v“'!ๆฦ5?บQเู๙9๓qตำf”z3r65เฒ9™*ฤAท—ธร5q[;\ฏทตร qu3Mผ#} S6๗>๕รๆ_!๎hอ๎โส}6ฃผ›Qว›„่ฟLโ Yˆหแ๚†ธฝฎUˆแ„ธ๓ฺฝ†&S?U™{๋•ธ8ํ(ฟแ7-fเKf”x3Zทึ€—ผX!z…ธิฎmˆแ:…ธอNˆป ๋+qซวTฑฎพ›๊๑Wˆ;ž๑[„ธ๊‹y๗ํ]›ังป/ฦUูYชvฅฝ_j'S…8hโRwธฎ!nw‡kโvw8!ฎ#จ7ูงีใcž|ฏทธุ๏mmGแาQๆW}1mFษอ8›ญ^:™*ฤAฏ๗}๊ื4ฤm๏p}B๖'ฤีปUŒzำุdฆX5F๘/ŽฟืDโ._]%Cฌพ˜๗6ฃa3ส๑๓ซ–ผAˆ‡งรuq๛;\›ทฟร qฅcอ่๔™cŒw†๛u่'}๋M”ะใโมv๏,eโW1๏|ย5K๏ต'7ฃ?ภj๗VNฆ"ฤ!ฤ้pCื%ฤ่pB\ม›ลทMฆ^9ํถNื;ำฏwดเทq๕sูŒ’oFฮฆฦบ$œLโ SˆK฿แ:†ธฎIˆ;ัแ„ธ;๚ฝทz|ิY|๒ฝ5F€ฮตฃ€g)๓†ธ๚‹i3Jฟอึ;kผ ทฎB4 q๙;\รwคร๕qG:œwIท7‰f“yขไ{i๚โ>Eใ‰C\ลดฅ฿ŒœM uั;™*ฤAฃW ร๕ qg:\‹wฆร qUรTฐ็Mฆฮ“๏งR๚h๚]้(โYสวท‹i3zฃ•’ฟSX9™*ฤWณรต q‡:\‡wจร qท๔:œฺไ`๊้wP.~๊ำ|เwN…ธˆ\ mˆkฐ˜u๋ิ๕็•ํิŠKNฆ"ฤ!ฤ้p]Cฉื ฤ๊pB\๕ฑZ|ุ รฤูypu๚ฐ_‰;โbพร•4ฤี_ฬูi3šU7ฃQ'—้Q˜“ฉBด q5:\ณwฌรีqว:œWพcDMf‰Qt ๑qใพธฑun-w๑ื`1›mFGห๊ช๓ปณึk^Nฆ"ฤ!ฤ้pMCนW?ฤYˆซฏ1š;wวq–š5Gม๛19๘ผxๆSMG9C\ลด9›ฺ//อฆฟ‚โเัwจร}๛YˆKูแส‡ธ฿ q ด๙{ ]ฆžœ๎ฆวธ๓โ‘ูoพ5-fฤล์๗อ%?ฒณฉaฎz'S…8hโสtธN!๎d‡ซโv8!ฎม#๊Ÿ3ส๑น้~…๙ำฟqว #_จ้(eˆซฟ˜Scวอ่ฅฦ๎RํEฏ‘iงCˆƒ !ฎN‡kโŽvธโ!๎d‡โฬ ซ๘วŒ2ถLRฃแภฃ…}‡+cˆkฐ˜=๛๘*ธ9›eญฒ qะ"ฤ๊p}BูW;ฤํpB\‡ฑแnม่2Fฬ–ฃ๏มแ7์ผxb๘ ›Ž2†ธ๚‹i3*๓นG฿5ึฝ†“ฉBดq•:\›wธร•qg;œWบQEธƒ์r0๕Sา๐‘h๘mโโพร•0ฤ5XฬฆฎไfไljŒฅr2Uˆƒ!ฎT‡๋โNwธส!๎p‡โฎ9gๅP1สลSำ}ภšซ๏G?ีŽโฆฃ„!ฎb›Q฿ฉณํฦkฅฏ qะ ฤี๊pMB๑W8ฤ๎pB\ฉก๎'œอ~”ฃ๑e๕•„฿ภ๏pๅ q ำfไljรwฝV8B\ qล:\wพรี qว;œwW๙ฟื0ซ~ฐK?ษ ๏.~†ธ—๚ํศb>ท˜6ฃR›ั๊บฑFฺ;œLโ AˆซึแZ„ธ ฎlˆ;฿แ„ธหชhl3 ฎฮฃ๏ฉธ1๚}๘ฃปD‹ื`1g๏อheผ๎ฏ8kœMu2!!N‡๋โntธช!๎B‡โzŒ[oŽต9˜๚ฉ๗่{่œู๏;|r“xรv2ฦb>ธ˜#ำfดšR]…>OทฝœLEˆCˆ๛ˆ(ืแ„ธ+ฎhˆปัแ„ธใาญ๖h3 ฮๆฃ๏ก๋ธgˆ ๒ืž๐f1mF6ฃปฯณŠผ๎ฅU"ฤ!ฤ}ภ๕:\wงรี qW:œW๛1๎ๅ[ํ>oˆu}M1_ ต ๐ฦ–ลLด˜ซftคฤU:›๊™ฆ“ฉB4q;\๙๗›…ธNˆk๒๔สไ4หLB~ˆัฯฦฌRณo vt๏ฎฃฮbf๚b3r6ตำฃ 'S…8(โJvธ๊!๎V‡ซโ.u8!ฎ๘ใอท‰๚L=๒3 jท7เuกญ9-fาลด๚9๚4:“R‰‡งรu qื:\มwซร qMฦ{ํีg4๚~:๓&Nร7Nwฃ9-fๆลดฺŒฮญ‚ณฉทโnS…8(โŠvธฺ!๎^‡ซโฎu8!ฎษฬpazฺฑ=ฃ>๐J†Wผ…๒ล„ฝเษwธึ3๙b›ัก=นศฟœXไ/'SโโtธF!๎b‡+โ๎u8!ฎIภธ๐Tทั3e๔=3.ฦ\‡ฝWyฃWg1_๎-ๆฌ๖‹$๎oีB&iฺบDNฆ qP9ฤ•ํp•CอW-ฤ]์pB\ง^‰;z7ู่`๊๑~ธใ6€ญŸ๗ฬ?!9งลtถืfu3r6๕๎u๏&Uˆƒย!ฎn‡+โฎvธb!๎f‡โB(๘๗ฅ†?พ,ไWหฐu,๖–ลLธ-›‘ณฉฝ๎กœLโ pˆ+แ๊†ธปฎVˆปฺแ„ธOsฏO๎ž‚ZM|iๆ€ัr^๚y[Uธอ‹9,ๆd3บฐญŽk˜r2Uˆƒบ!ฎr‡+โ.wธR!๎n‡โบฬ ‡฿'šU>HˆŒ:า\ศ-›ไึฯชยYL›Qž_Bฮฆv๙%์Uˆƒฒ!ฎt‡ซโnwธJ!๎r‡โบŒรw”rฬ๖)ัม˜ัq^|ษi†L*ำf”ๆZX[v2Uˆƒฒ!ฎv‡+โฎwธB!๎v‡โฺŒฃg,:LQ Dฑ็ลŒแhE=me1}า,ฦ๔Y:จLโ jˆ;ิแพ๛Fˆซิแ๊„ธ๋Nˆk33œœฃFซaะ„ิ|5ผฟe1}lFษีฅพฒ2qq:\‹ ร• q๗;œืm]>Kฌ“"ำ€๔Sซี?e;š๖มช‹9lF'๗ๆU้‹‘๙l๊h๖+!žqๅ;\ลกรU q:œืg„:wป=[ ƒณs6‘ฒตฃ|ถ˜6ฃ™`:™Šฉร5q!:\‘กร qq”๙{ ฝฆ~๊ž๎^วฺQแ g1mF™6ฃƒW€ณฉทฎ{๗ฆB” q :\ฝฃรีq!:œG™ฟืฐy๘Yฝ~lำu}A”#‹ูc๕lFDˆปณ6Nฆ qP2ฤu่pๅB\W"ฤล่pB\ ง^C๒ูงูOอP$ฤ๕ษpำf”้ฉะ,๔รM|ำษT„8xcˆkัแช…ธ(ฎBˆ าแ„ธ†3่J)ข ภหptv`\]ฟท2œลดI“wžp%~๕kuบใ@ˆƒB\W,ฤ…้pB\”'ฤERโ๏5ฌfร‚wPN_ฦฺQๅiาbฺŒ,ษฏ†G˜ลN์"ฤ!ฤ๕๎pตB\œ—?ฤ…้pB\ŸปgbVท ณ๙‹้2โZ]F3์ๆปlFW7#gSo,Œ“ฉB q฿๗่pฅB\ —>ฤล้pB\งA๊ฤฝๅๆน'ป(ณYt ฐ&แฎ๐)3]<ำฯ“’;ทYฯฆ:™Šo q]:\ฅฉรeq:œืjjุ฿w๛`&ฃ‹2{}_Cรตำd1—อจ๔fไl๊…uqW*ฤAนืฆร qก:\๒ฉร q=วะ•๔ฟ,้ำ๘Ug๖M฿Žฆฐอbบ’jืIgSฟhX„8xCˆ๋ำแ๊„ธX.wˆ ีแ„ธN๗”๛ OทƒฉNฆ^X•nฏฃั๋p3๐/›ัํUq6๕OุษT!ช…ธFฎLˆ ึแR‡ธXNˆ‹&๕฿k่๗…๏๒ฒ>oVผมe1mFMฏจZ_?`'S…8(โ:uธ*!.Z‡หโ‚u8!ฎYรุ;V๕›๖vำดOใ…8opYฬZ!ฮft3ฺ~65ใูษT„8x}ˆkีแŠ„ธp.qˆ‹ึแ„ธfำิึขแํsPHˆฺŽrฮQ;œโd3บ-ณg๙:™*ฤAญืซรีq๑:\ฎร qm'ั็'ห~S7ไผOใ{’‹™Ž†ํฏูbฺŒสoาๅŠ„?฿e=โเ•!ฎY‡+โvธด!.^‡โบอ gหีoD˜†"M d;Zฎœn‹้djอศูิณ?^'S…8(โบuธ !.b‡หโv8!ฎ}ำๅh8!Lใฟ๗I:ฒ˜ž 4ฝคVญฏGพ๒ไd*Bผ2ฤต๋pB\ศ—4ฤE์pB\@I^CวaุLt#&ฏNWื—,f€๏\ๆอจRกt6๕่Šธโ Pˆ๋ืแ๒‡ธ˜.gˆ ูแ„ธ~ƒรฆปฬีq>05˜}๓ตฃฬ๏qXฬ+7lF!6#gSO^๖Nฆ qP(ฤ5์p้C\ะ—2ฤล์pB\็a๔ษฬŽS7่ฬWpซ•q’าbz*`eJ|?ฆ฿4Nฆ qP1ฤu์pูC\ิ—1ฤํpB\รŒฑๅ>su‡ง‰HˆึŽrw8‹i3ฒ]๛E›o๛˜~ั ฤม๛Cก๗—ฯB\—0ฤEํpB\L้^รๆ3‡€Oใ…8้ศb q๕ฮB?ๆ„๑ษUฏq‡:ทŸ…ธ._ˆ แ„ธใ่ศ๑ตI๙wข[ฬพ™ฺัด๓5š^ ๊ั(ํ ?ๅd*Bผ"ฤ5ํpฉC\เ—.ฤลํpB\วัแ๙k๕œ‡E€ณožv”tด˜ฮff3r6U~Fˆƒท…ธฎ.sˆ‹แฒ…ธภNˆ‹๊ะแิgฆˆอฆ]หู7๙ซMBœtd1mF!ŒJkใl๊ฉ๛$'S…8(โฺvธฤ!.t‡Kโ"w8!ฎๅ์๐๔ดูt v,ฆษ์›ฅU8Je1…8ks๓2dLโ Rˆ๋แ๒†ธุ.Wˆ แ„ธ–z>ˆ2ร๎แXŒ.ฉ•-ฆอ(ืEUํฮภ_(q2UˆƒJ!ฎq‡Kโ‚wธT!.v‡โฺOค8ปL:,—ošล‘Ž,ฆ$!ฤU๘I'๛i;™Šฟโ~hแฒ†ธ่.Sˆ แ„ธฆ“ี“ทซ๋Dl ่2๛fhGERYฬxฟ.าoFตผต‹ศR8™*ฤA…ืบร% qแ;\ข๗ซ? q„ \MY{_ˆ‹f˜)เVGึŽสพฟb1—u+umีฺŒฆญค฿/„8xWˆ๋แr†ธ๘.Oˆ ฿แ„ธภNฝ†ะ“sืู7ี+ฤIGำfd3สwcเ็๊dช5B\๓—2ฤ%่piB\'ฤE–โ๏5ด=˜j๖โยดฃ2ฮbฺŒlF—ฟ#รE๏dชB\๗—1ฤe่pYB\‚'ฤJ?vหู๗`ชว๑B\”vด์yำ‚๚-UloM๔g๔๙ƒ๏qํ;\ย—ขร% q:œื7f<3hํ}!ฎํOGˆโzำf”๋ช:๊”ณฉ.z„8๘ลงรๅ q9:\Ž—ขร qฑ…{ {Sa์ำ!ณํ'ฟ>๛jGeOQYLM"ืU5๋}I†Ÿช“ฉBdq:\พ—ครฅq9:œ[๘ฟืะ๙ŒุlSšLIกQฉ™ัbฺŒ„ธซฯฟาคW/"ฤมWCœ—/ฤe้pB\’'ฤ๏54>˜บ๗ร›} Š—ำQญCTSˆณ]~@ทฺ_๓Nฆ q<ฤ้p๙B\š— ฤe้pBœน๔๏Œ€O%~4ำ•+ฤ•š-fฺe[ฎชp›‘$n๓“0'S…8ศโtธ|!.O‡‹โาt8!ฎ๓S฿[ฝgb!Nˆ ะŽeณoิv4loถlF>Yฬoศฒนขรม9฿~โtธx!ฎH‡โ๒—~S1๛^oGž3XLlFงnZํพ(่pPฉรE qU:\€Wฅร q ๙{ ๋ไึ“hณฏ็.‹‰อ(ิฆำา~€ฅ:\ฐWฆรqe:œ—ะWโ†ณ๏ํvTฒฑ˜ุŒ‚|ดˆฟ›็ฑ[ะแะแ> q:\ผWงร qฆŠWพ็`*f฿ํจWลbb3zƒั์K"ฬม๏t8!ฎm‡ปโ u8!ฮฐ๚บc็$ใAดูWˆ๓Uฑ˜ุŒข}c‚$G—฿*„๖o :œืทร]q•:œ—ากรฉ‡‹๎อพBœw6,&6ฃXŸ-ฏg'Sัแ@‡หโJuธป!ฎT‡โr:๓๗ๆ‘ bf_!Nณถ˜ุŒกืูTatธt!ฎV‡ปโju8!.งำฏฤํ๏ym๖โ|W,&6ฃ€_™.๛ƒ0ล:\˜Wฌร qล:œgฐxอรp๏ฅ`๖ฝŽ:ผ<๊d*6ฃH7ณษR˜G‡ƒb.Jˆซึแ.†ธjNˆ3ฏพโtj ˜}ฏทฃicณ˜ุŒŽ~gB}Q–0:\ฎWฎร qๅ:œ—ี™รฉk๛หchณฏgT<ฟ˜v›Q‘อh๕ูv„yt8ะแr…ธz๎Zˆซืแ„ธดึนiuน๛ล์{ฝตx{ิbb3Š๓้b%๋!ฬฃร—*ฤ์pทB\ม'ฤฅu๎•ธaฦ์{ฟตจึ›Qจ/อl๑s๔%A‡ƒj.Bˆซุแ.…ธŠNˆ3[โsbวร0๛hG-.'SฑฝQ›ณฉย<:่p™B\ษw'ฤ•์pBœ‘๕—Y›n~1๛oGหฎฆjb3:๛๑"Ek'Sัแ@‡หโjvธ;!๎๗Bกœ9œ๊/ฆb๖ะŽzdk‹‰อ(ึทf6๘1๚Ž รAนw=ฤํpWB\อ'ฤeถ^r๓ฺ์+ฤ๙บXLlF‘o๊฿๑๓่pPฏรqU;Wดร q™ฮฝฏูWˆ๓ฮ†ลฤf๗๓๚ถ๓่p รฅ qe;…Wตร qฦ‹[Luq qบตลฤf๛k3ห}Eะแ ^‡ปโ๊vธ๓!ฎl‡โLญฆb๖ีŽ,&6ฃฌืาณฉAš9™Š:\–Wธรqu;œ—[โรฉn}อพBœw6,&6ฃะ0ฬ๗ลSAt8ะแ’„ธส๎tˆ+แ„ธไาฝSอพBœ/Œลฤf{โ3atธ!ฎt‡;โ*w8!.นดฏฤym๖โผAj1ฑ= มูิi?@‡ƒพโtธ€!ฎt‡โLฆb๖ ฿Žบ”k‹‰อ(@ํญม~€;ตWผร qต;œgpu0ณo๘ฏ ฬbb3บ๔ล ๐!LE‡.EˆซแN†ธโNˆK/ๅแTO อพBœtm1ฑลˆ!>ค“ฉ่p รeqๅ;มWฝร q๙%{ n|อพBœฏŒลฤf๔อOไ*฿่x.ศฟ ฯ ร qo๓๏? q:œืFพWโผb๖โฬŠ›Q’oฮ(๙|?๘บ?4่pB›ๆG!N‡โ ฆb๖ญาŽฺ\6›Qผ๛€Y๗ใyANˆำแโ…ธNˆ3ป:˜ฺี˜;-!ฮKคB6ฃC+X๛Kณ/ฤy0ˆ‡'ฤ้pแB\‡'ฤ•˜aLๅ—™#iำ–+^[LlFตฟ:ฃ์ง๓uD‡C‡โtธh!ฎE‡โJH5ฯx|x-1ํ qBœลฬผศfดyทฏ1‡ํtธเ!ฎI‡;โzt8!ฎฦˆclแŸ๚[ม‰ทqˆ๋sํZL›‘อ(โm@ีส่ษ ::œงร qM:œWdุq0•ฟพs๒า'ฤYฬ๘›ัฒี๙๎Tlพช่p่pBœ+ฤu้pBœ๑ีมิ %ถIkโ–ฬbฺŒlF7Ÿวอš๛‚W๔ัแะแ„8.Vˆkำแ„ธ*ฏZLฝf฿ชํจัืฦbฺŒlF๏ฎl'Sัแ@‡ โuธํ!ฎO‡โชศqศห(OŽ]ง^!Nˆณ˜!7ฃe3ชๅฉyใป‹‡'ฤ้p‘B\ฃ'ฤ•„LีเฬพEQฃKbฺŒlF๏T๗lช,:\เืชรmq:œWฦ4ฒ4๙A๗{…8!ฮbฦูŒ–อจหใธ‹๏ณ;™Š:\เืซรํ qญ:œWG‰ศมิฮY"\หื้‹c1=ฐล๖T qพว่p่pBœ'ฤ๕๊pB\กJใ`jํ็“ฎ!ฎำ›คำfd3Šฌ๎ท ร6ฤu๋p;C\ณ'ฤฒ ,U™{…8!ฮbฦุŒผ ๗‚ช๙I๗=d๔pNˆำแย„ธnNˆ3ร:˜j๎5๛† ›˜ล aุŒboF{Ÿุ\๛่d*:่pAC\ฟท/ฤต๋pB\ฉdใ`jนฉWแ„ธVำขลดูŒ‚๚พz"A‡._ˆkุแถ…ธ~Nˆ+eW ๑๖‰'ฤYL›‘อ(ศ๗gV๛Pžขรกร q:\ืฐร qตฆ%S พf฿jํจีwวbฺŒlFQฦ๙LNฆขรม“พ,ฤ้p๑B\ว'ฤี๗•8ž พBœ—I-ฆอศf”2Z๛5๎d*:่p!C\ฯท'ฤต์pBœ1ึมTƒฏูW;ฒ˜ุŒ๒_Q?ซวƒ่p รE qM;–ืณร qล›œK4๛jGำfD๘+j•๛Eพ๏_๐G‡C‡โtธ!ฎi‡โช‰y8ี“็ื พ,กื|\ด˜6#›QุzZ๊‰๒ิ๏t8!N‡ปโบv8!ฎšaTษ๚“๓‰งYL›‘อ(์7hV๚@ž๒&ะ ร q:ฅืถร qๅ|มมT๏Ÿqฺ‘ลดูŒr฿\๘]๎d*:่p๑B\ใ๗xˆ๋แ„8“ฌƒฉ?1๛jG“ฟf8๛K๊Wํlช“ฉ่p ร… q;ำ!ฎq‡โฬQฆฮp?โ|,ฆอศf+tำnป"= D‡C‡โtธ๋!ฎs‡โ 6J9˜๊!N;ฒ˜ 6#ฎภ%U์lช—cัแ@‡ โzwธgC\๋'ฤU|ฉมมTฮ์ซ qBœอจ_ˆ{Pๆชด รกร q:ํืปร q-cŠษื์ซ qถ#›Qฟ฿pฅฌอ*tธ"!ฎ{‡{2ฤ5๏pBœaึมT“ฏู7๐ือe๚ถูŒ‚0g• ม—Nˆำแ๎†ธ๎Nˆ3U|Mพf_!ฮbฺŒ(โ*M}~l่p รeq:s!ฎ}‡โjZ†“ฏูW;โ„8›Qป฿q…žญ9™Š:\คงร=โt8!ฎฆ(ฏมมT“ฏงYL›‘อจศฯtึุ|๓ัแะแ„8๎fˆำแ„ธช‚ผ็ฉณษWˆ31ZLllFU~ช%๖๏๊ฃรกร q:องร qๆYSOฯHห่*ฤyซิbฺŒlF)ฟE%žZyFˆ‡'ฤ้pCœ'ฤแต+SฏษWˆSณ-ฆอศfT็ืษผํ๕๕G‡C‡โtธ{!N‡โJ 0dy่1š}ต#!›Qแkjู๋ิU`;ฐ รกร q:ฝงร qล_w๐Bœ‰ูW;โฬ฿6ฃnืT‘ณฉฎR ัแะแ„8๎Zˆำแ„ธ๊ฎฟ็G๐3…ƒ`Bœvd1vฐmWไl๊ts‚:\Œงร=โt8!ฮH๋`j™ษศ์ซูต,ฆอHˆ๛ฏว>5uท ร#ฤ้pฯ…8Nˆ3m9˜zr.๒Š'h q6#›Q๏Q๚œ่)ก'ะะร7Bœ/ฤVโ:ธ:oY~o qพHำfd3*๖Sžู?„]@‡Nˆำแ๎„ธ_ษoB\7L๕ŠูW;โ เ6ฃ^!nT๘ศซี tธจ!N‡{.ฤ้pB\๗f.Sฝ"ฤiGำร›Qฝ_ษ7 u8ะแ„8๎Jˆำแ„8Sญืก†š}ต#[–g3๊โถ>น?‚ปt8!N‡ปโt8!ฎ‘{/cyๆ์ !ฮะh1mF6ฃr฿ค3Ÿy6๛‰qศt8!N‡ปโt8!ฮGX{o qพHำc›Qน฿=ฉ๗O u8ะแ„8๎Bˆำแ„8๏@๔OŽฎพ'ฤiGำC›Qญ๎zขe ป:่pทCœ๗\ˆำแ„8ำ—รฉ๕+จูW;โ,ฆอจ๓#ง์ฺษTt8ะแn‡8๎นงร q\›Oฺฝ'ม„8ํศb๊p6ฃrแVโญภ#Bt8!N‡;โt8!N๒J\…1ศ์ฆuู๋BœอศfWโญภMก:œงรq:œg;สฒ#ฤ9GฅjฺŒlFลพK๛?ต“ฉ่p ร q:s!N‡โzบw(iYt„8cฃลผป้p=/ชgSw฿;™ชร'ฤ้p‡Cœ'ฤuๅ๏5่pf_ํHˆ๋โlF]C\๎ณฉ^ิG‡๎jˆ๛“ไ๖Tˆำแ„ธพผg๔5๛jG6ซi3ขัE•๙c{:่p7Cœ๗Xˆำแ„ธฦ†ฅฮ‹qBœลิแlFฟh%ุป~‹:™ชร'ฤ้pGCœ'ฤตๆ๏5๔Xmณฏv$ฤYL„ธ?คAt8!N‡;โt8!ฎ7ฏA‡3๛jGBœอˆNีฮฯ=Rฏ;™ชร'ฤ้p'Cœ'ฤuwoJiwฤ่+ฤ-ฆอศfT๛ุ๛นLE‡๎^ˆำแ q:œืลylYj„8ํศbฺŒlFงm}>ๅต๋nฐฅ๙ ซร=โtธวBœ'ฤu7Œ)F_ณฏv$ฤูŒhuQฅ}ภƒt๐ฟuท็Bœึ๖Xˆ๛ญุ&ฤ5wwN1๚"ฤ™-ฆอศftฺL๚มLEˆ!Nˆโ„8ฒป‡๓N5๚ qฺ‘ลดูŒ‚Iณ๏บiq;(ฤ'ฤ qBœG[qกพf_ํศbฺŒlFํพO๙ทLโ@ˆโ„8!NˆฃฯDfกโŒŽ๓œi“โv_๛>๙ฎง‡Nฆ q ฤ qBœ'ฤัg"›!ฮ—วbž2์~ฝmฟVบ_ฆ๎…8โ„8!Nˆโh4‘ งYL›‘อจฮj๔๒Gˆ!Nˆโ„8R q^ฒม฿kศ7๚ฮŸH๖^ƒ'ฤYL›‘อ่ๆ็๙ั=Dˆ!Nˆโ„8Œdn€‹Œพk็ +ฤiGำfd3๊s5d‹ฆn…8โ„8!Nˆโ(0ึบทฮ_฿คิหBœo’ลše3โฮ|ฃrฌœLโ@ˆโ„8!NˆฃูH6ญ๓ฅทNnพŒ(ฤ๙ๆXL›‘อ่Sลพ๏Uw/ๆ#ฤ'ฤ qB™:ฃT๚x™z…8ํศbV๏.6ฃ„Uพณฉป q ฤ qBœ'ฤั-U{ #ุ[ฝ qBœลฬฐฝุŒJฅ2จœLโ@ˆโ„8!Nˆฃ฿ปu๏‚Gจ7:\€Bœg1mF6ฃ๗#ัเdชBœ'ฤ qB ฿ZอฝBœvd1[6#!๎เภLดธ%โ@ˆโ„8!NˆฃำLVpjŒež๗ qฺ‘ลดูŒส^+ฯOสษT!„8!Nˆโ„8RN†[ๆ5ปญ'ฤYฬŸ6#!๎์๏จ<ะษT!„8!Nˆโ„8าOด^‰‹2๚ฎภs งYL›‘อจ๎—jฆ๙ฟuK(ฤ'ฤ qBœว+\ˆ+9/ƒฏ็Kc1mF6ฃŽฦ๓ื†๏>BqBœ'ฤแๅฯครŽพ}\Ÿ'ฤYฬc_ทด.œทสศ๒?๋dชBœ'ฤ qBู็Y‡S๏็ฮ•`ฐโด#‹i3ฒVอ$ฏ๎ …8โ„8!Nˆโ(rDŸงา^†3๛jG3„ko็ฮa3๊{m$ qNฆ q ฤ qBœ'ฤQ๚ๅˆNฏฤM๎NTโ„8‹i3โ\9WLโ@ˆโ„8!Nˆฃ๐หญ&แ;นsๅYC!ฮ7ฦb–~๖’gE[‡ธ_ซ‡ืJ‘ โ@ˆโ„8!Nˆ#ฮหอL฿ศ+ำ qฺ‘ล,์%ำz๖qyฮฆบRโ@ˆโ„8!/G8œ+wๆš'„8คล,ป็OušeHs6uำ…์dชBœ'ฤ qB5_Ž่wGl๒โด#‹i3โZ_ ~L๎ …8โ„8!Nˆโจ๘rDร[โใน3฿ห„Bœvd1SWฆGไ…ธgธจ>…UๅกBqBœ'ฤaิ0ื]็๔nqฺ‘ลkูŒ„ธ๛ั6๐Ÿ“ฉqqBœ'ฤ qTษฺOvำ^(!Nˆณ˜{cwฑอHˆ‹6uฯ/ 'Sโโ„8!Nˆโ(0’Š—ัWˆำŽ,fฃง/รfTแข ๋สษT„8โ„8!NˆรHถเY||โด#‹Ya36ฃU์ณฉรw!„8!Nˆโ0hx็๋f๐.I!Nˆณ˜v}›Q‘‹j„พFœLEˆ!Nˆโ„8ผaพปQ JMBœvd1mF6ฃืหซฐ็—{C„8„8!Nˆโ„86Z C\สWโฆัื์ซYฬ6›ัดTeึ'๔ูTW/BqBœ'ฤ‘Mฦโr$+”f_ํศbถyb3*tQE>›๊d*BqBœ'ฤa|u8๕^๑,2๚nฬBœฬbฺŒ„ธB{๒žŸ{C„8„8!Nˆโ„8rฟaส;ตาUโฟqฺ‘ลฬ—๊mFBๆeX!ฟ๙Nฆ"ฤ!ฤ qBœ'ฤ‘๛อcNP์0g’ด˜น7ฃiญj-Qุ$n๘eŠBœ'ฤ q2N๑^Eีณ`Bœb1mF6ฃPยžMu2!„8!Nˆโ0“l4^าคูW;ฒ˜M6ฃแบ*vQ…=›บlโ@ˆโ„8!ŽTฦKfนF=๋a๖ีŽ,f“อhZฌr‹๕’฿ฆq ฤ qBœ‡#ฮIu๘iZณฏvd1mFฎ+/u?YฝœLEˆ!Nˆโ„8ฬc^‰ปี<‡ Sˆโ,ฆอHˆKถ-ฯpWณ“ฉqqBœ'ฤ qdœ\ฯ4>k]๓!ฮ,i1mF6ฃ.ืMธ+'Sโโ„8!NˆโH๘^ฤฦ‹๒ำq2!!!NˆโH๗Jฤ้•โ0‰ร`f_ํศbฺŒฌWลŠดฃ฿อBqBœ‡—ิ8;Y๘{ —@)โ„8‹"ฤUซรEuj{žA_#"ฤ!ฤ!ฤ qBฌรMศ฿k˜#คงYฬ>›‘งๅ/ชHgSPโ@ˆโ„8!ƒล—๏aฯผ๗าuษ Bœ รbฺŒlFอพg#ฤŠ“ฉqqqBœGถi๕‹ล3ฏฤอฆK^ppXBœ(b1~น„ธซ็lช“ฉq ฤ qBœ‡9์ซI่ฬ+qq{”ร`‚Šj1mFึซยE็l๊๒3Aˆ!Nˆโ„8š฿C‹ฌl qqฉ4ณฏูืGต˜6#๋Uโข s๕xม!„8!Nˆโ0V|ถ๗+qUฆ@ฑXˆณนu^ฬอb็rQ5ธจขœMb BqBœ'ฤ qขug๋๗„ธH๋%ฤ qำfd3Š[sืŽ“ฉqqqBœGฒื!ฦฅz๓์[๎(gฆด˜Bœอจ฿ฝ๛Nฆ"ฤ!ฤ!ฤ qBy๎Ÿ้i๖กWโFฟU๗ฏ2 qBœล โสlqQอฟม]มq ฤ qBœ‡!์ๆ๚_(z1๛ qฺ‘ลดYฏ"Uˆq2!„8!Nˆโศเ๊?‚ๆ•ธทัใฃูW;ฒ˜Bœ๕ชyQ…8›บ#ฤ9™Š‡‡'ฤ qคนy~อ๔™WโB‚fน7™BœKรbz*PแU‹jF๘ฎ$#ฤ'ฤ qBํgŠ_)ฮฝ†€“ ziGำS›Q๕ ฐ"ร!„8!Nˆโ่>‚rk๛๗ฬroณ„8—†ลดูŒ.ฬบ{-;™Š‡‡'ฤ qค)ฦํธฏฤอnๅ1rNโ„8!ฮ"ูŒข^Hw๏h'"ฤ!ฤ!ฤ qBฯื๓ืกWโยอ6›g_ช'ฤYLO„ธ +3n8™Š‡‡'ฤ qคš(^u๗๔๏5q‘–Kˆณว q6#›ัญKโd*BqBœ'ฤั}ขxๅyะ3!.ฺแิณฏูW;ฒ˜BœๅชsQ]?›:]ฟq ฤ qBœGt›ฯ๛Ak q‘–Kˆโ/ๆฒูŒbฏอลพ“ฉqqqBœGฆ๙kฦ๘฿๚Jœg๖ีŽ,ฆอศrUบจ.ŸM๑TฯษT„8„8„8!NˆใQQ$ต{ f฿@—ช'ฤ q6ฃ ฟฏฆ๚ฝฯัœLEˆ!Nˆโ„8š_o˜&ฮผ๋ษvฏOˆำŽ,ฆg3*Qฝˆ–ŸBqBœ'ฤ\ ่Lˆ UงŒrก.!Nˆโ„ธฯR_Tป2ๅธu%;™Š‡‡'ฤ q<)ฦ_j8๔ŽAผ[jฃœงYL!ฮj•บจฦอEq2!„8!Nˆโ่=Nผ๑ํณC‡S…8oYq6:‹)ฤE}‚•ขบyM? „8โ„8!Nˆ#ถอ้๋/Ÿต๛{ ำ('ฤiGSˆณีบจๆลซศษT„8โ„8!Nˆฃ๕c7sl^ƒ(qBœ'ฤูŒ"฿Zฬ;i๗ŠqqqBœวƒVฐั๋ะ+qa^ƒ๋โโ„8!Nˆโย.ะ+~w;™ŠBœ'ฤ qฤฏ5๛{ B\ฌkCˆโฺ.ๆโlF๑W่สใE'Sโโโ„8!Ž็Œ€“ื™e(โ` qฎ ‹)ฤูŒ:฿]Œ_u7‹qqqBœGšaโ]ƒDฏรฉB\ฌF+ฤ qBœg3ŠปD๓ยu์d*BBBœ'ฤ‘๕วZWซฟื`๖ณVBœ'ฤูŒย|่ณอ฿k0ห…บ „8!NˆณH6ฃะ+5^ฤNฆ"ฤ!ฤ!ฤ qBฑT?x๋z่•ธ๋๗ุ๕ผL๏ qBœW๗ไ$ฟ> ]TวWฦษT„8โ„8!Nˆ#ฌ?t9œj๖ u9qBœg3ฒลn–'ƒNฆ"ฤ!ฤ!ฤ qBฎ&o˜}#] BœMOˆณkSˆปฝV'ฏa'Sโโโ„8!ŽUใ‘โิk—oณอพ!–Iˆโ,ฆฒd3สฑX๓เmƒF„8„8„8!Nˆใ#G9๔ิๅEอ0้!Nˆโฌ’อ(๘Sฟu๎‡ใd*BBBœ'ฤ‘ฃp=u็ฺโ•8ร\คwP„8!ฮืฬ*ExBTdนŸM]ล๎โ@ˆโ„8!Nˆ+c๗•ๅ4ฤั“โ/=cqฦ|‹™?1ูŒิฝyฬ5‹Bœ'ฤ q~E7#ž{‚แ๏5˜}#ฝƒ"ฤ q]ฺŒlFัืkžบ‚LEˆCˆำ„8!Nˆ#ล๑เ+X^รฌแาฝƒ"ฤ qBœอศfขZงcNฆ"ฤ!ฤ้nBœ'ฤัnึj๐๗ฆi.ะ;(Bœง™Xฆ›kT๎z๒ๅJ/q"ฤ'ฤ qq วัว‡‡๚ฏฤmŸ๋๒#q฿Aโ์|Bœอ่ฏคB฿ะu๎w๖๐3@ˆ!Nˆโโo{€\๏5ฬยŸ-ห5+ฤ qำf้7Rกo่<ท8Nฆ"ฤ'ฤ qq!lณรก‘gี‰ค%Nพ'ฤ qNฺŒฎB*๕ =ืtื‘ qqBœ'ฤ?๖๎fIrJภ(ค…9o kcพEพำอยž‘c]Y ‚๗็œตBRกH$๑จโถŒXด๊_ƒๅ/\Bœ๋ย`šŒ"˜B\”Ar:™ŠBœ'ฤ qdXFฌ๏Yๅฟฏก๐–*ล qBœ็l๊ำT๐=vฮ‘เ!„8!!Nˆ๋็Hุ|Ž๊‹k฿%Vˆโ fๅญว)dีผCwฮ๔ู‰Bœ'ฤ!ฤulwฌฒช_ƒ๕\ˆ+ฤ qsšŒLFY.ซ ทธ“ฉq ฤ qBœG‚Uึ=ญ›V=Wี฿J๎ณฉ—g™o0หLF/“Qฟ;๔ศโœLEˆ!Nˆโ„8ผฮOป8|๒กฺ7ะ!NˆโœM5%ธ๗ๆWฏงF„8โ„8!Nˆ#๋™๕ัcำ‚๎ฉz"ฤ qssjJ'Sโ@ˆโ„8!Žเ๏๓๗ํm˜E@Gหสq้+ฤ qSeŠ2ฝ_พ๏œLEˆ!Nˆโโzํoุน ฺตฺ^๕็สท๔โ„8!ฮdb2ชโฎ๛ž>œLEˆ!Nˆโโšญ#ŽZ?ฮ#ป5ฎข?Wพฅฏ'ฤ5ฬ)3™Œชี y฿M๗3BqBœ‡W๛๔ฃOฌWอณmํฟฤ=ผ๔โ„8!ฮdด๙๓ฆษถฌ๛:™ŠBœ'ฤ!ฤ๕ชE3Mี๚tธbญ}ต#ƒYl R ฺe2Š>ž๓žkึณ#BqBœ'ฤzIต;์พ†—ฃร q&C!ฮdขร•หA๓ฎj'Sโ@ˆโ„8„ธV ‰ง‹jฎงล]Œ'ฤ qs*q1:ห=๘ล1šฎK„8โ„8!Nˆโ‚;*.คjn‰ธ๖ฝZ_ฐBœg0ร†‘'ฃหd”กmro{xDˆ!Nˆโ„8"ฟัฟJTO,ญ_ฟ!:œ'ฤตL“‘ษ(อ งyร/หษT„8โ„8!Nˆ#ิCo„…Bอ๏kธ,~C,}…8!ฎ`šŒLFinย9œLEˆ!Nˆโโz-%žY}^—ˆs็๚๎hzฝ qBœม 6]&ฃF!nr\ฦ!„8!NˆCˆkดญกโŠ๚ก^ตwๅwดผ\…8!ฮ`Fšaฃ–ธi2ส๓9ทzu2!„8!Nˆโˆผ–8๊h๛ืAฏ%.J‡โ„8ƒy๕.q&ฃLwแฑผ๎9™ŠBœ'ฤ q;WแŸm๊z๓&ŒXแเxฝฌ}ต#ƒู็-Nw\&ฃL—ึ\๏๔๔ˆBœ'ฤ q~ฉ{ใmๆ(๘3\ฮ8Nˆโ ๆh<&ฃ\ร;W?์8™ŠBœ'ฤ q^M<๙ธZ๑๛^mฟืหฺW;2˜ฝใ˜Œznอบa”œLEˆ!Nˆโโ๚์hhฑkaใ3๙๖maAฟ‘v qๆDƒ๙ศdt˜Œz„ธฮฆu„8โ„8!!ฎอ:jVท?”๏_็EX^/k_ํศ`๖{“c2jy‹kๆE„8โ„8!Nˆ#ำณ๎ำฉ >•_ ื32L;โ fหษ่ะ„j6^'Sโ@ˆโ„8!Ž*ซจงŸVgฝว๒'ขิณ=๕ธ,รด#ƒู๓]Žษจ็-๏ๅ‹“ฉq ฤ qBœG’'็พXม๏k่žฎˆหฐPk_ซ|ƒYqดNFำdTธ๑šโ@ˆโ„8!Žบ็ko‰{ฎฉN๋0ํศ`6~›c2๊y‹:™ŠBœ‡'ฤUt๕X lOU~ซภKฐ็“„8!ฮ`>—Kฆษจ๚-๛lชH„8โ„8!Nˆ#lฮ้๒snฅ๑ิYอkwŠ ผโ„8ƒi22•{แdVDˆ!Nˆโ„8r๏dˆqzฃ๗5<ท$ผJโ„8KNƒ™.—lMq&ฃ’(Nฆ"ฤ'ฤ qBๅ62\A~ิz•ชมš๏ธ^/k_ํศ`ส%ฯธว4U~…ๆd*BqBœ'ฤQhรั่gปšีW}3x†โ„8ƒ`2ฺฑ+ฮdิฐ๑šโ@ˆโ„8!Ž;\žU๋}_รณซ›Wฟั๗ŸqBœม4™Œส|p;™ŠBœ'ฤ q”ูล็งญ๗} OฏPฎ–~7,|ฏ๒k_ซ|ƒY๛รฤdใ"๔บฒœLEˆ!Nˆโ„8า•ฉHKฮm๊(๔ ี๏๗†Ÿ๕–ํ'ื,_Tด#ƒYuฤjMFฏฉ ฅบฒฬ‰q ฤ qBœGช0๋%ฝ-q๑—ฟ๓ฆ๗{ฏ6—ทg0q็šŒžŒส†ธจgSLEˆ!Nˆโ„8‚.œŽv?๑ึ…๖dE2ืภ๓ฦ_ะจฟ๖ีŽ ๆƒLF&ฃย‰ืษT„8โ„8!Nˆ#มR3ๆ_ญ)w85ฮ^๋ณฝ(วœ7ง!N;2˜&#“‘›ั]ŒBœ'ฤ!ฤ•^6ตดqU๛พบๅ เ9็Ž+Qˆณ๊4˜&#“‘g'Sโ@ˆโ„8„ธสซฆx๋M[โv,€ฟถ>ๆœป๗งงฬ~“ัe2* ๆูT‘q ฤ qBœGฐ…fุ'ีฃฺS๚๑ kห๑oปMๅ‰5“งL“ั๓“ั!ฤE|Zq#ฤ'ฤ qB51M๎ญ้ื‹/2„8หNƒi2z~“โ*\XNฆ"ฤ'ฤ qB!๗.\!๒jฯ้‡•ํืVจBœvd0๎\ŠpŽ๒“Qา๗gF!„8!Nˆโศ†9ๆใm๔Wก_e qBœvd0;“'ํัส^xLEˆ!Nˆโ„8Bฎ—ข>จ–๛พk/ี2!ฮบำ`šŒblคrYeไd*BqBœ'ฤ๑ฎc฿‚ฃ้ฟs์B๙ฺ๚ดฺW;2˜^๑˜Œ๚ฃ๑ฎ+‘q ฤ qBœGฤืหG๋Ÿ~๋บศ๊๖Kg„…8‹|ƒูo๋Rศ~ใฒJ!็d*BqBœ'ฤqOุ๘็ฏvzล๗5|้ท ฤYxL“‘ษ(ฃ๔ BqBœ'ฤ)DE~N๕} 7ฤ q๙ำdd2xLEˆ!Nˆโโjฎ”ฎะ#PํŠ](_IยBœEพม,4ปšŒ\VNฆ"ฤ'ฤ qBœ็ีr–๗ลพฏกy๏JNˆณ๎7˜^ ผ๛~สe•๛3ฮษT„8โ„8!Nˆ#เ›ๅ่+‚kสซศ”w้+ฤYไL“QŒ๓Œ ๗ก—๊ปž#โ@ˆโ„8!Žx/–ร/๖=ิ๛พ†0;„8ํศ`šŒBLF๗ฤJwค“ฉq ฤ qBœ'ฤ๕]$ล?ทQnKœรฉฟ๒[#,= f›—=ษ'ฃY}2ช}U9™ŠBœ'ฤ qฤ+PNศ”{w๎<ุฏVฆๅืพฺ‘มl๖žรdิ'ฤA• ฤ'ฤ qB1*ˆFรyฐ_m\โด#ƒi22น%ฟq ฤ qBB\นgูOฉงึ77 vdฆ4˜J›ŸB\ๆO8'Sโ@ˆโ„8!Žp๕)ษWท•๛พ็ม~q% qฺ‘ม4…8อ(ฤeฎปž$โ@ˆโ„8!ŽpOฒY—7,vz% qฺ‘มTโLF๎Iท/BqBœ‡W์A๖ส2Gน็v็ม~Z„…8‹Oƒ9LFฺอt]ฝod!„8!Nˆโx|“Bž‡ิY๎มŸ‰๛Yžีฏwํศ`*q9&ฃแบส{Iy”Dˆ!Nˆโ„8ข=ฦfZ <ธ หœZณ.}หฏ}ุณ{“Qง๋สษT„8โ„8„8!ฮย(หโz[โ™ธŸ]ˆBœีงมTโBœet]…ฤv2!„8!Nˆโศ๔›๋๕z2 ยyฐŸ\ˆBœ5พมวdิx2*ขษณ$BqBœ'ฤlUt“‡WHฟ?ยBœ5พมTโBet]ล~uf&Dˆ!Nˆโ„8๒<ยๅ้‘๑… ? ยBœๅงมTโLFiE๘ts2!„8!Nˆโถ$šFๅ๙ฝ‚2ำ[ˆณฦ7˜ํv0ลœŒ๎IJnK'Sโ@ˆโ„8!Nˆk๗๋ {„๗่—ฅoฯตฏvd0•8“‘๋ษฝ‹Bœ'ฤ qB\ฃ็ืŒg6*พHฟ,}…8๋Oƒฉœฤ?%ฤsๆd*BqBœ'ฤ๑1฿ิ๐่ศ์^|_–พBœvd0•ธฐูFˆ‹ๆฬษT„8โ„8!Nˆ#ลำkฮWลฟฏ!ฤ_า ธcAˆำŽ ฆg2r5นuโ@ˆโ„8„ธ๛MOจ?T๑๛:[แฑ๛6ะŽ„8ƒฉฤฝ9ล'ฃิฯ2Nฆ"ฤ'ฤ qBiฒŒeb ๅ๗a้oํซL%.ษd4\XoLฉ!„8!NˆโHถส๛ฆธไCa้nํซL%ฮdไZr็"ฤ'ฤ qBœ๗ผ-‡9.ใkK_!ฮ ิ`ช'&#37"ฤ'ฤ qqฅ–”Pk>ฦ๗+qื๑ฤ/Zˆ3kL%๎น…ใฺษT„8โ„8!Nˆ#๘(๕ ไ๗5๔[zp…8K|ƒฉฤEx)pฯฮฎ้Jrใ"ฤ'ฤ qBœืง2yด๘ ฿j๑๛…ศY|ํk j0Mฒ&#O46#ฤ'ฤ qBœWzAYไ๙tใฃฝล๏ƒ› …8K|ƒฉฤีŒ^nM/โ@ˆโ„8!Nˆ๋ฒ๚นŒRะกjณ๘Rฺ(พ๖ีŽ fdณหdt™Œj^GNฆ"ฤ'ฤ qB_ณgฏWeท4Y†v$ฤฬ8ŸFI&ฃฃ๖dT๒2r2!„8!Nˆโ๘฿ิญWํ=xX๚vY๛บษ ฆ„’e2ต'ฃฏอœLEˆ!Nˆโ„8โ๖%ฯ๖‘แG๙ใฉืณฟeํศb฿`Fz3d2๒XใถEˆ!Nˆโ„8!ฎ๘„รPลฏโ‹฿7–GBœEจมTโBlxve%๘ฐv2!„8!Nˆโบ่นŒU๔+‡โYี^๛jGSEI3]ฎฌ7ง“ฉq ฤ qBœG†%ฯaฐยXล๏{Uณ๖ฺW;2˜fฺฒ›โฎทf๖้สส๕fษษT„8โ„8!Nˆ#ฮ{ใ2Oง—‡ตบภkฃฺk_๗นม4ีšŒผb๔ส!„8!Nˆโ„ธ๊หK๏ซฅŠวSฏ#ย qBœมLRLF๎N:q ฤ qBB\‰ลNกทฤSีฃV๏DุŒ๑+โ,๕ fศทDaฯศ฿๗ใ๒™)>n@ˆCˆCˆโ„ธŽ‚.?ผflุjmŠปŽ(ฟbํศ๔i0MF&ฃboLEˆ!Nˆโ„8‚.tJ=œี๊+mŠ๛^หโด#ƒƒษHˆK๐าฬษT„8โ„8!Nˆ#\T*ถ ผส?ีWู‡r}ณd qnuƒู๏ล‡ษศ{F๗,BqBœ‡Wฅ)Yฆ{ฌ/ฑๅ๕ำkGBœม4…ˆIMPy~อ ฤ!ฤ qBœ'ฤ5โ`j๔…แcc—|๊ v_Dน ด#ƒ™ญง˜Œ\Z9ฎ›!„8!Nˆโxx๙GrŒ[สม›]Wพลืพฺ‘มฬ—โrŸJ55๙sห"ฤ'ฤ qBQ^<ฌ1{<ู็MqืgQyf!j0อนy&ฃแาJ๑ฮฬษT„8โ„8!Nˆ#า.ƒ’]ฮบ\-Wพตืพ๎vƒ)ล5ŸŒฎ.อๆO0w)BqBœ'ฤค&i˜™—L :ำuฤผ5ฎทฝg0ฅธ๘“ัห%ใŽEˆ!Nˆโ„8!ฎ๐ปโรเๅมd)๎:ยzต#!ฮ`vJqk&ฃ)ฤ…่d*BqBœ'ฤ๑ีภา`?W๖ั‹1„วlถ๒ญฝ๖ีŽ ฆg2๒พัจ"ฤ'ฤ qBœW๕๕0|ึใ9Vฟ3๖ฯซ qณอdt„yW‹!„8!Nˆโ„ธชฯง…ŸL›ฝgฟ๚ฝV^kGแตฏ`f๐ \~Fฟศฆ[TDˆ!Nˆโ„8!nปฃOBส_ฆbœ๏ วโๆโ5Pแตฏvd0๓~EžŒ.“Q ฏ๗%BqBœ'ฤโ้ด๔โ†oฺƒn‹ปfŠ_ฎg0˜ี'ฃ—ษจ๋uโ~Eˆ!Nˆโ„8~ตก ัNฎcl ~qรผ#S^Bœตจม >7™Œ|[CŽ7fNฆ"ฤ'ฤ qB!žM‹?˜๖๚พ†?—…W๕…oํตฏvd0๋ด8“‘ๅsฺํˆBœ'ฤ qDx4ญพŽธ%.Xำ าโๆ}ฃ"ฤน็ f ๕'ฃC5 7—ป]โ@ˆโ„8!Žo<šnaื ทน๐๑ๅ๏อCR๗†่uฬฟ“‘O็gjน“ฉq ฤ qBœl๔Ÿhบl>LFฺฆwF!„8!Nˆโ€-๖๏EนlLF|1า:™ŠBœ'ฤ qBXZ๗p2!„8!Nˆโ„8(นฝ๛ฯดใ0๑๎แd*BqBœ'ฤ q`ึ{O“฿q8™ŠBœ'ฤ qB”^๔,\[๕&#>โd*BqBœ'ฤ qะภœŸตฆ9–MF—ษจ๑เd*BqBœ'ฤ qะlฅe๐๕ฯาˆ&#–9œLEˆ!Nˆโ„8!ฌ†ง•.`2โ~Nฆ"ฤ'ฤ qBœภŽ๊d*BqBœ'ฤ q๏p2!„8!Nˆโ„86p2!„8!Nˆโ„86˜Nฆ"ฤ'ฤ qBœภ'Sโ@ˆโ„8!Nˆ`'Sโ@ˆโ„8!Nˆ`ƒ้d*BqBœ'ฤ qไqNฆ"ฤ'ฤ qBlqNฆ"ฤ'ฤ qB‹“ฉq ฤ qBœ'ฤpฟรษT„8โ„8!NˆโธŸ“ฉq ฤ qBœ'ฤฐ“ฉq ฤ qBœ'ฤp?'Sโ@ˆโ„8!Nˆ`'Sโ@ˆโ„8!Nˆ`'Sโ@ˆโ„8!Nˆเ~Nฆ"ฤ'ฤ๕๔!n™฿|D๐Nฆ"ฤ'ฤ้pBœภNฆ"ฤ'ฤ้pBœภœLEˆ!Nˆำแ„8€ œLEˆ!Nˆำแ„8€ œLEˆ!Nˆำแ„8€๛9™ŠBœงร q:\Nฆ"ฤ฿ž5$ทUNˆำแ๘˜“ฉq ฤล๗ป๗x‡โt8>5LEˆ!ฎp‡โ–u8!N‡เS— qq ฤ๎pBฒ'ฤ้p|๊ฦw]„8โž๎pBฒ'ฤ้p|ศษT„8โJw8!nY‡โt8>ไd*Bqฅ;œทฌร q:r2!„ธาNˆ[ึแ„8€ฯ8™ŠB\ํ'ฤ-๋pBœภg^Nฆ"ฤWบร qห:œงร๐‘รษT„8โjw8!nY‡โt8>r็ษิร๐"ฤ๗|‡โ–u8!N‡เ#Nฆ"ฤWผร qห:œงร๐‰รW5 ฤWผร qห:œงร๐‰หษT„8โŠw8!nY‡โt8>แd*Bqี;œทฌร q:˜Nฆ"ฤWฝร qห:œงร๐ห†8„8โชw8!nY‡โt8พ๏ฮฏjธ /Bq!:œทฌร q:฿็d*Bq๕;œทฌร q:฿็ซโ@ˆซ฿แ„ธeNˆำแ๘ถ้d*Bq๕;œทฌร q:฿v็W5†!„ธ๏๚cํฌกร-J!N‡เ›'Sโ@ˆkะแ†'ฤ้p<อW5 ฤืกร NˆำแxฺหษT„8โtธกร qkG0฿ๅซโ@ˆkัแ†'ฤ้p<์r2!„ธn่pBœภณ|UBq=:ะแ„8€g]Nฆ"ฤืขร Nˆำแx–ฏj@ˆ!ฎG‡:œงร๐จ้d*Bq=:ะแ„8€Gฝ|UBq=:ะแ„8€'ู‡B\—7t8!N‡เI/_ี€B\“7t8!N‡เA‡ฏj@ˆ!ฎK‡:œงร๐ หษT„8โบtธกร q:ฯ9|UBqm:\วwืPž:ผห†8„8โ๚tธ†!๎/Bœ@‡ฏj@ˆ!ฎO‡:œงร๐˜หW5 ฤืงร NˆำแxสญโœLEˆ!.X‡:œงร๐”หW5 ฤืจร Nˆำแxˆ qq ฤฒaึะแ„8€‡\พช!„ธ0~โuธV!N‡`{7ฤ๙ช„8โขuธกร q:ฯธœLEˆ!ฎU‡:œงร๐ˆ{7ฤ๙ช„8โยuธกร q:ฐ!!„ธfn่pBœภlˆCˆ!ฎ[‡:œงร๐„—ฏj@ˆ!ฎY‡:œงร๐€yk‡ป 0Bq๑:ะแ„8€ุ‡B\ป7t8!N‡`ฟ{ฟฉมW5 ฤฑร Nˆำแุ๎oj๐U q ฤ…์pC‡โt8ถณ!!„ธ†n่pBœภnำ†8„8โvธกร q:›|0ีW5 ฤณร Nˆำแุ์ๆƒฉ—Fˆ!.d‡:œงรฐ— qq ฤ๕์pC‡โt8Ju8โโ@ˆ ฺแ†'ฤ้plu๓มT_ี€B\ิ7t8!N‡`ง›ฟ1๕๕2ฤq ฤํpC‡โt86บ๛`ช qq ฤ…ํpC‡โt86zู‡Bใ๚ะฌกร q:๛\6ฤ!ฤ๗ธ?žš5t8!N‡`›iCBq};ะแ„8€]nqฏห #ฤถร Nˆำแ(ำแ^‡QFˆ!.l‡:œงรฐษeCBq;ะแ„8€*ฮ†8„8โ"wธกร q:U:œ qq ฤE๎pC‡โt8Št8โโ@ˆ แ†'ฤ้p้p6ฤ!ฤบร NˆำแจัแlˆCˆ!.v‡:œงรPฃรู‡B\์7t8!N‡ D‡ณ!!„ธเn่pBœ@‰gCBqม;ะแ„8€ ฮ†8„8โขwธกร q:w:6u8โโ@ˆ‹แ†'ฤฆ,์pฏ— qq ฤ้pฅB๓ฎNˆ๛อ‡)หฬ]ฮ†8„8โยwธกร q:ทน^6ฤ!ฤงรี q:\•งรฐฬถcฉ6ฤ!ฤ—กร Nˆำแธวพํp6ฤ!ฤ—กร Nˆำแธร˜แlˆCˆ!.C‡:œงรฐฑs;œ qq ฤะ๏B\ลW!ฤ้pคฬp6ฤ!ฤ—ขร Nˆำแศแ^/cŽB\†7t8!N‡ w†{MฃŽB\†7t8!N‡`๙z€aGˆ!.E‡:œงรฐชย]Od8โโ@ˆKาแr‡ธฟ…สS‡@…{€oj@ˆ!.I‡Kโk(O€ฌ ํ๕šs~ฐฑlฮืsฟ@„8โrtธกร q:€๗oหๆ›In>ถฮ†8„8โ’uธกร q:€๗ฃ?พ๖งทํ์˜s>žเlˆCˆ!.W‡:œงรqฏผ|SBqi:ะแ„8@ˆKโ๖โ@ˆKำแ†ื>ฤ้pLโ@ˆƒJ!.j‡:\๗งร8ฤ๙ฆ„8โuธกร5q:™Cœoj@ˆ!.Q‡:\๏งร9ฤู‡B\ฆ7tธึ!N‡ uˆ๓ซCˆ!.S‡:\็งร:ฤ9˜ŠB\ช7tธฦ!N‡ uˆs0!„ธ\n่p}Cœ@๎gCBqน:ะแฺ†8€!n๚ล!ฤ—ซร ฎkˆำแศโLEˆ!๎๙#ฌกร5 q:ษCœƒฉq ฤe๋pC‡๋โt8’‡8Sโ@ˆKืแ†ื2ฤ้p$qฆ"ฤ—ฏร ฎcˆำแศโLEˆ!._‡:\รงร=ฤ9˜ŠB\ย7tธ~!N‡ {ˆs0!„ธŒn่pํBœ@๚็`*Bq;ะแบ…8€๔!ฮมT„8โRvธกร5 q:้Cœƒฉq ฤๅ์pC‡๋โt8๒‡8ฟ0„8โrvธกรต q:๙Cœ?‡B\า7tธN!N‡ ˆ๓โโ@ˆหฺแ†ื(ฤ้pไq@Bqi;ะแ๚„8€!ฮo !„ธดn่pmBœ@็ฤ!ฤ—ทร ฎKˆำแ(โ8„8โwธกร5 q:Bœ?‡B\ๆ7tธ!N‡ @ˆำแโ@ˆKแ†ื"ฤ้pTq@Bqฉ;ะแ:„8€ !N‡Cˆ!.w‡:\ƒงรP!ฤ้pq ฤ)็ฌกรีq:Bœ/LEˆ!๎Oฟ qฝ;\ุงรP!ฤ๙ข„8โาwธกรUq:Bœ‡B\7tธโ!N‡ Bˆำแโ@ˆ+ะแ†W;ฤ้pTq:Bq:ะแJ‡ธำ็"Bœ_Bq:ะแ*‡ธฟ๛X Bˆ;Šโ@ˆซะแ†W8ฤ้p”q:Bq5:ะแ๊†8€!N‡Cˆ!ฎH‡:\ูงรP"ฤ้pq ฤU้pC‡ซโt8J„8!„ธ2n่pECœ@‰งร!ฤWงร ฎfˆำแจโ.ฟ„8โ uธกร• q:Bœ‡B\ฉ7tธŠ!N‡ Bˆำแโ@ˆซีแ†W0ฤ้pTq:Bqล:ะแ๊…8€ !ฮื4 ฤWญร ฎ\ˆำแจโt8„8โสuธ(!๎/Bœ€๗็ฑT!„ธzn่pลBœ@็ฯร!ฤWฑร ฎVˆำแ(โฆ_ Bq๚kYC‡+โt8 „8วRโ@ˆ๛7š5tธJ!N‡ ˆs,!„ธชn่p…Bœ@g;Bqe;ะแ๊„8€๔!ฮv8„8โ wธกร• q:้Cœํpq ฤU๎pC‡ซโt8ฒ‡8แโ@ˆซแ†W$ฤ้p$q—ํpq ฤ๏pC‡ซโt8’‡ธ้ื€B\๕7tธ!N‡ wˆs*!„ธn่pBœ@๊็T*Bq-:ะแ „8€ฬ!N†Cˆ!ฎI‡:\งร8ฤษpq ฤต้pC‡Kโt8๒†8!„ธFn่pูCœ@ฺ็›Rโ@ˆkีแ†—<ฤ้p$ q— ‡B\ณ7tธ!N‡ gˆ›ฮค"ฤืฎร .uˆำแศโl†Cˆ!ฎe‡:\ๆงร/ฤฉpq ฤuํpC‡Kโt8ฒ…8!„ธฦn่pyCœ@ฎ็๏ย!ฤืปร .mˆำแHโฆญpq ฤต๏pC‡หโt8’„ธหN8„8โพ่w!N‡‹โt8:ๆผ๎ูg#Bq:3!ฎt‡โt8n0็œq๐Tˆ+฿แ†—2ฤ้p้˜๓›[ไฎฉภ!ฤงรqล;ฮงรฐอ_?๙๖OF !„8.Nˆซแ6†8@ˆƒb!ฎC‡:\พงรqP,ฤต่pC‡Kโt8!Š…ธn่pูBœ ฤAฑืคร .Yˆำแ„8(โบtธกรๅ q:€ลB\›7tธT!N‡โ Xˆ๋ำแ†—)ฤ้pB q:ะแ…8@ˆƒb!ฎS‡:\žงรqP,ฤต๊pC‡Kโt8!Š…ธ^n่pYBœ ฤAฑืฌร .Iˆำแ„8(โบuธกร% qฟ๙„โ Tˆkืแ†—#ฤ้pBิ q:ะแR„8@ˆƒZ!ฎa‡:\†งรqP+ฤัqึะแ„8@ˆƒZ!ฎe‡:\งรqP+ฤ๕์pC‡ โt8!j…ธฆn่pัCœ ฤAญืตร .xˆำแ„8จโฺvธกรลq:€ตB\฿7tธะ!N‡โ Vˆkแ†9ฤ้pBิ q;๚๗ทถCy๊pBq:ถ๗พCy๊pBq:ฎืธร-q:€ตB\๓7tธจ!N‡โ Vˆ๋แ†4ฤ้pBิ qํ;ะแb†8@ˆƒZ!N‡:\ศงรqP+ฤ้p+C\๗ท2ฤ้pBิ q:สืพร- q:€ตBœท2ฤ้p๋Bœ ฤAญงรญ q:บงรqP+ฤ้p+Cœท.ฤ้pBิ q:สงรญ q:€ลBœ้baˆำแึ…8@ˆƒb!๎wำลบงรญ q:€ลBœท0ฤ้p๋Bœ ฤAฑงร- q:บงรqP,ฤ้p Cœท.ฤ้pB q:ยงรญ q:€ลBœท0ฤ้p๋Bœ ฤAฑงร- q:บงรqP,ฤ้p Cœท.ฤ้pB q:ยงรญ q:€ลBœท0ฤ้p๋Bœ ฤAฑงร- q:บงรqP,ฤ้p Cœท.ฤ้pB q:ยงรญ q:€ลBœท0ฤ้p๋Bœ ฤAฑงร- q:บงรqP,ฤ้p Cœท.ฤ้pB q:ยงรญ q:€ลBœท0ฤ้p๋Bœ ฤAฑงร˜ทฬฉรดขะ ฤ้p๏ัแ–9u8€V„ธ๔WSรO่pหœ:@+ B๘รฬ๐3:2งะŠBƒงรฝG‡[ๆิแZQhโtธ๗่pหœ:@+ Bœ๗n™S‡hEกAˆำแฃร-s๊pญ(4q:{tธeN …!N‡{ทฬฉรดขะ ฤ้p๏ัแ–๙jˆ; @ Bœ๗n™/†ธฟ)€„8๎=:2งะŠBƒงรฝG‡[ๆิแZQhโtธ๗่pหœ:@+ Bœ๗n™S‡hEกAˆำแฃร-s๊pญ(4q:{tธeN …!N‡{ทฬฉรดขะ ฤ้p๏ัแ–9u8€V„8๎=:2งะŠBC๛งรฝI‡[ๆิแZQh่โtธw,ฤทœ:@+ อCœ๗ถŸtธฟทœ:@+ อC๏fw้pหœ:@+ ฝCœ๗>n™S‡hEกกuˆำแพA‡[ๆิแZQh่โtธ๏ะแ–9u8€V‡8๎[tธeN …†พ!N‡๛n™S‡hEกกmˆำแพI‡[ๆิแZQh่โtธ๏าแ–9u8€Vš†8๎tธeN …†ž!N‡๛>n™S‡hEกกeˆำแ> ร-s๊pญ(4t q:'tธeN …††!N‡๛ˆทฬฉรดขะะ/ฤ้pŸัแ–9u8€Vฺ…8๎C:2งะŠBCทงร}J‡[ๆิแZQhhโtธ้pหœ:@+ ฝBœ๗9n™S‡hEกกUˆำแะแ–9u8€V:…8nn™S‡hEกกQˆำแ–ะแ–9u8€V๚„ธ?๐K่pหœ:@+ mBœทˆทฬฉรดขะะ%ฤ้pซ่pหœ:@+ MBœทŒทฬฉรดขะะ#ฤ้p๋่pหœ:@+ท{}n™฿ ำ๒ทa €ฬmมD6ZIENDฎB`‚xarray-2025.12.0/doc/_static/logos/Xarray_Logo_FullColor_InverseRGB_Final.svg000066400000000000000000000065221511464676000267550ustar00rootroot00000000000000 xarray-2025.12.0/doc/_static/logos/Xarray_Logo_RGB_Final.png000066400000000000000000001241661511464676000234320ustar00rootroot00000000000000‰PNG  IHDRˆ ฤัr~n pHYs.#.#xฅ?v“PLTEฏต!l‰IIII“ชk่่๎๔ใ€EeฏตYw!l‰5€šIIII“ชk่่๎๔ใ€ฏต!l‰"_|IIII“ชk่่๎๔ใ€EeฏตYw!l‰+v‘,lˆ5€š;€™?‰ขIIII“ชJฯRจบZพษ[ใcำูgๆk่่x๊๋„๋๎‘ํ๑๎๔ใ€Nr';tRNS@@@@@@@€€€€€€€€€€ฟฟฟฟฟฟฟฟอ@งcIDATxฺ์ุมmTAEQo˜-^NHˆ?3ฮ?;"@คmwBIฏ๗ํ ฮ๑อ €ฝn0มปฑ[้pฬ๐๚nํภN:C:%ฤ;้pL้pBฐ“ว˜'ฤ้pฬ้pBฐว 'ฤ่pL๊pBฐ‹วจ'ฤ›่pฬ๊pBฐ‡วฐ'ฤ[่p ๑โ€t8ฦu8!ุ@‡c^‡โ€๕t8v8!XOŸa`‡โ€ๅ๎ ;œฌฆร1ฒร qภb:3;œฌฅร1ดร qภR:S;œฌคร1ถร qภB:s;œฌฃร1ธร qภ2:“;œฌขร1บร qภ":ณ;œฌกร1ผร qภ:ำ;œฌ ร1พร qภ::œ๔t8t8!่้p่pBะำแะแ„8 งร1ร๋โ€t8t8!่้p่pBะำแะแ„8 งรกร q@๏&ะ ร q@N‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€žว—์คร1ฤCˆvาแะแ„8 งรกร q@O‡C‡โ€๎ :œไt8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้pฬ๐ผ„8`'Ž^—์ครกร q@O‡C‡โ€ž‡'ฤฝ›@ƒ'ฤ9Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz:C\Bฐ“ว!ุI‡C‡โ€ž‡'ฤ=Nˆะgะแ„8 whะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งร1ร๓โ€t8fx]Bฐ“‡'ฤ=Nˆz::œ๔t8!ศt8!ศ้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแะแ„8 งรกร q@O‡C‡โ€ž‡'ฤ=Nˆz::œ๔t8t8!่้p่pBะำแโ!ฤ;้p่pBะำแะแ„8 งรกร qภ๚ :œ๔๎ :œไt8t8!่้p่pBะำแะแ„8 งรกร q€วw8!t8ะแ„8@‡ƒ3:œ:่pB รมNˆt8!ะแเŒ'ฤ€:œ่ppF‡โ@‡Nˆt88ฃร q ร'ฤ:œัแ„8ะแเK{]B ร'ฤ€:œ่ppT‡โ@‡NˆหM A‡โv8!t8ะแ„8@‡ƒ3:œ:่pB รมNˆt8!ะแเŒ'ฤ€:œ่ppF‡โ@‡Nˆt88ฃร q ร'ฤ:œัแ„8ะแ@‡โฮ่pB่p ร q€gt8!t8๘"B ร'ฤ€:œ่p0กร q ร'ฤqhะแ„8@‡Nˆt8!ะแเจ'ฤ€:œ่p่pB ร'ฤ€:œ่p0ชร q ร'ฤ::œ่p ร q ร'ฤ:Œ๊pB่p ร q€‡'ฤ:่pB่p ร q€๒ผ„8@‡ƒฺ๋โt8!t8ะแ„8@‡ƒมNˆ€/โ&ะ ร q€:œ:่pB รม„'ฤ€:œ่p่pB ร'ฤ€:œ่p0ชร q ร'ฤ€:œ่p ร q ร'ฤ:Œ๊pB่p ร q ร'ฤ:่pB่p ร q€—่p{q€:œ:่pB ร'ฤภงงฯ ร q@๏.ะ ร q€:œ:่pB รมเ'ฤ€:œ:่pB ร'ฤ€:œ่p่pB ร'ฤ€:œ่p ร q ร'ฤ€:œ่p ร q ร'ฤ::œ่pP{^Bœ:ิ^:œ:่pB่p ร q€:œŸป@ƒ'ฤน›@ƒ'ฤ:่pB่p ร q€:œ:่pB่p ร q€:œ:่pB รกร q€:œ:่pB๐o~ภฟt7!„8่ิ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆCˆ!„8!„8โ„8@ˆ!Nˆ!„8!โโ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆCˆ!„8!„8โ„8@ˆ!Nˆ!„8!„8โ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ~ณwoKšVbF[Q(6ไ…!…,7-ณœแ฿้|ก‘L‰bภ>ฌ๕์•?*๗—  ฤ'ฤ€Bœqqq ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBqBq ฤ q ฤ'ฤBqBq ฤ q€‡'ฤู@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8Mˆ!„8!„8โ„8@ˆ!Nˆ!„8!โโ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆCˆ!„8!„8โ„8@ˆ!Nˆ!„8!โโ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆCˆ!„8!„8โ„8@ˆ!Nˆ!„8!„8โ„8@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!„8!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆโ์ ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBBœ๎&ฤ€BœBqB ฤ'ฤ€BœqqB ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBBBq ฤ q ฤ'ฤBqBq ฤ q€‡'ฤBqBq ฤ q€BœBqBq ฤ q€BœBqB ฤ!ฤ!ฤ€BœBqB ฤ'ฤ€BœqqB ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBBBq ฤ q ฤ'ฤBqBq ฤ q ฤ'ฤBqBq ฤ q€BœBqBq ฤ q€BœBqB ฤ!ฤ!ฤ€BœBqB ฤ'ฤ€BœBqB ฤ'ฤ€Bœq ฤ q ฤ'ฤ€Bœq ฤ q ฤ'ฤBBBq ฤ q ฤ'ฤBqBq ฤ q ฤ'ฤBqBq ฤ q€BœBqBq ฤ q€BœBqB ฤ!ฤ q๖?โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!Nwโ@ˆ!Nˆ!„8!โ@ˆโ@ˆ!Nˆ„8„8!โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ@ˆ!Nˆ„8โ„8โ@ˆโ!!!„8โ„8โ@ˆโ!„8!„8โ„8@ˆCˆโ!„8!„8โ„8@ˆ!Nˆ!„8!„8โ„8@ˆ!๎!ฟถ€BœBq: ฤงร€B\ด๗€BœBq: ฤงร€BœBq: ฤงร€Bœq ฤ้p ฤงร€Bœq ฤ้p ฤงรBBœq ฤ้p ฤงรBq:q ฤ้p ฤงรBq:q ฤ้p€Cœm„8โ๖๛ตm„8โt8โ@ˆำแ!„8„8โt8@ˆCˆำแ!„8„8โt8@ˆ!N‡!„8„8โt8@ˆ!N‡!„8โ ]ˆำแ@ˆ!N‡!„8โ@ˆำแ@ˆ!N‡„8„8โ@ˆำแ@ˆ!.aK!„ธํhG!„8„8โt8@ˆ!N‡!„8โ Iˆำแ@ˆ!N‡„8„8โ@ˆำแ@ˆ!N‡„8่โt8โ@ˆำแ!!N‡„8โt8โ@ˆำแ!บ„8„8โt8@ˆCˆำแ!„8„8โt8@ˆƒ.!N‡!„ธ~m๗!„8โโt8@ˆ!N‡!„8โ Iˆำแ@ˆ!N‡„8„8โ@ˆำแ@ˆ!N‡„8่โt8โ@ˆำแ!!N‡„8โt8โ@ˆำแ!บ„8„8โt8@ˆƒ !N‡!„8โ Bˆำแ@ˆ!N‡„8จโhฃ!„8โ @ˆำแ@ˆ!N‡„8จโt8โ@ˆำแ!*„8„8โt8@ˆƒ !N‡!„8Ž/๚wK๐˜_Y@ˆ!N‡ใ+๔ฃ5xส๏k !„8Žฏu8!๎ฑ๗g!โ@ˆำแ๘j‡โ๋pB ฤงร๑๕'ฤ=ึแ„8@ˆ!N‡ใ๋Nˆ{ฌร q€Bœวื;œ๗X‡โ!„ธ~๘ทฃร qu8!โ@ˆแพ;Cˆาแ„ธว:œq ฤี่p฿7๛฿Ž'ฤ=ึแ„8@ˆ!ฎF‡๛,ฤm้pBcNˆ„8โjt8!๎Aฟ๙Qˆ{พร q€BN็:œทงร qu8!โ@ˆ่๛sNˆำแ„ธว:œq ฤี่pBž'ฤ=ึแ„8@ˆ!.{‡๛หg!n_‡โ๋pB ฤ—ผร}๛Yˆุแ„ธว:œq ฤี่pBž'ฤ=ึแ„8@ˆ!ฎF‡โ๖t8!๎ฑ'ฤBq5:œทงร qu8!๚๓ KุŒ!„ธNˆำแ„ธว:œตืx๛๒zb^eนL,ๆ[7ฃe3„8่โ.t8!nO‡โ๋pB4ุพnอ9งๅ๏ๅŸ*Š‹รbฺŒ!„ธXNˆำแ„ธว:œลf—=–ธu3๚๊+K–วbฺŒ!„ธPNˆำแ„ธว:œUฦ๛—–cb.+!ฮbฺŒ!„ธธNˆำแ„ธว:œฆ๗Lภพu๕ช•— มb^฿Œไ8@ˆCˆำแ„ธNˆ{ฌร qP~ผ฿E+jผแบr Xฬ›‘qq:œททร qu8!ส๗{_G1ถพฐ„8‹9.ผ็E]@ˆƒ!๎Z‡โ๖t8!๎ฑ'ฤ็ี8{a๙ั๗^L› ฤฏร q{:œ๗X‡โภ๛ศ๋อธ๙V“์ัw1ใnFฎJ@ˆCˆ๋แ„ธ=Nˆ{ฌร qN%‡ิb๗หYขฮฝ?ึ2นTธH฿eM‹t1ปoFร&า้ถแๆฝ™ป_!„ธ€Nˆำแ„ธว:œ๗ไอฑวึ๑ร๒…ธ1_2h๗bฮ–!ฉฬ$‹๙าn1mFi6#ฟ๒Rฏ๏๚„B\ผ'ฤํ้pBcNˆ{าฑ7j฿Ry]B…3jGGB\เลดฅ9คzโึก๑k[รา q ฤ5์pBž'ฤ=ึแ„ธLท{=ภ™๒T8-N;๚๔iฌXC่Jฝ˜ณมDo3๒Kฯ=…•โ@ˆ{ธแ„ธ=Nˆ{ฌร q๙nง‹ฟ็`j๚มทW‹›yผG้ำb]L›Qžอศoฝฤซ๋dชBpนร q{:œ๗X‡โ๒N{โNš็ดƒ๏_—นล‰ฌํh<๔Mโข.fฺ็L :›š๘พฬษT!„ธNˆำแ„ธว:œ๗๐ qว;œ๗X‡โ2Oื}บํ€ฮู‹ศกฐvํ่๑ช2-f้™fTๆW_ำWทœLEกAˆ+แพโฮw8!๎ฑ'ฤe{๚Z๛ฆ๚ฤซั›ะ(7๙ึNqฉฺั†—›ฆล,โjnF฿a๒ *้]…“ฉBqA;œทงร qu8!๎๙Žโwz๊ ~\๊X‹—จmนธฆลฌบฉืŒ๖t65iโt2Uˆ!.h‡โ๖t8!๎ฑ'ฤฅ{Z๙๑v๛U+<๙–MqyฺัŠoด˜6ฃฆ)ฮูิค…ำ-ฏB\ะ'ฤํ้pBcNˆหyC]๓Il๗ƒฉล'฿ข).K;šถ!‹i3:&ุfไแ]ส็dชB\ิ'ฤํ้pBcNˆK๘6ET Z0฿57˜|Kฆธํh&๘7Zฬ0!ฮfT๓พกใ1JKЇืดร q{:œ๗X‡โ๙U}{b๖“แ๎OฟB๑!o็ี5-fนฉพฯf(ล9›šrY๏ q ฤEํpBž'ฤ=ึแ„ธฌwิ๕^‰›lพ4R๋‚ํhฅ๙i‹i3๊บ9›š๐jv2Uˆ!.l‡โ๖t8!๎ฑ'ฤmแ๏5ฤฌ—Qoš[Mพี ๒ˆ~มฮL%#๚n9หg!›‘GQ•xช„‡ืตร q{:œ๗X‡โาFฅjw}ฆ๖9•Z๓=‚ุW์๖ซkZฬBzหอh๔นmhืœLEˆCˆkแ„ธ=Nˆ{ฌร q{๘{ ‡˜‹ีp๒ญ5†๓fฒคลดตŒผDŸ๎VฬษT!„ธธNˆำแ„ธว:œ—๘–บิ}`ืตš/]•๙๛ฉวๆ๏7M‹Yฅ ›Q๙_ฮฆฎส_X„8ˆโขu8!nO‡โ๋pB\๖‘ฆศ}๕‰$โeฒ^+2ฤฤฝfgพŸขลดตŒœMM๗„ฯญฎB\'ฤํ้pBcNˆK๚ ถุ+qMฆฮ—jผuฮ;Vฆล,‘IlF=๎z‡9™Š‡ืทร q{:œ๗X‡โ2งฅ:Oธ ๑n™{ฟR็…ฮ S๓LนYL›Q฿อศูิdwNฆ q ฤ๎pBž'ฤ}ัฏ,ฤ๕y[๊ wหโฆษทฦ+1ฟวยสด˜๙{›QŒอศ“ป\ ๊>Wˆ!.p‡โ๖t8!๎ฑ'ฤฅพง–2าŽ@)s<5โ 7ฮงลLฟMูŒ‚<ฎi๙jxภ์dชB฿|ฐร q{:œ๗X‡โ6ฆว๛Lf{รoฟw๒งi1“‡8›Q˜อจ้-สบžNฆ q ฤE๎pBž'ฤ=ึแ„ธ}ฉฤcู†SฝRhœ‰74ฏผ?=‹Y28žเ๚oX4M„8„ธฦNˆำแ„ธว:œ—<0Uธณ^VศฑิR)9ฺคw๘๒š3๕Fe3Šด9›š่ฬษT!„ธะNˆำแ„ธว:œท“ฟืd•b0; ๖๓Pๆใฉมพูง/ฏฃ?ฎ>U‰kโFVa1mF7ฃW`ƒŒ4๒Sโ pˆหะแ„ธ=Nˆ{ฌร q•2Lฎป๋QsŠ2๚v+qืัีtT-ฤUZL›QฮอศCปเ;ษ‹“ฉBtq):œทงร qu8!ฎสuพ‚ั็ัทt‰ปŽ๎ฆฃb!ฎิb€6ฃ๛ ฉว ]z"ฤ!ฤu๏pBž'ฤ=ึแ„ธ*7ึ้๎๛L†ูาSฮๅvtปฌL‹™จ่p }ถุ่k่dชญC\’'ฤํ้pBcNˆซRฒ˜5F›Oชฤ๕ qื/ฏi1๓lเ6ฃ›QษผX๊๑—{Z!:‡ธ,Nˆำแฺ‡ธ็:œW,ฦH“ัขคัท๚ Wั(๖ฃฒ˜6#%ฮูิเ-ำษT!:‡ธฒt8!nO‡๋โ์pB\™๎”๋๖zœžŒพ๕^gŒŽ\^ำbfูฟmFI6ฃ?จ๊)ษ}BB\๛'ฤํ้pอC“Nˆซq_˜.`tYฃowณ๘; ำbf๐ี˜,›ัฯT๛~หษT„8„8Nˆำแz‡ธG;œWไฦ0ูƒ๎6Sพ๕฿ุธุŽ"\^ำb&ูถlFiยห…Q๚~k๙ๅ„‡ืพร q{:\๋๗l‡โส%™ ๗ื๛฿[ฝ~่J\หโ๒สโ^บ-ฆอ(๋f4r_j๗Y;„8„8Nˆำแ:‡ธ‡;œW้ึ:อฉ‰.9rš`ผฒ1o€—W‘Wq1๏ฒฅ๘Y๚xฅ“ฉqq:œทฉร5qOw8!๎ฏแฬc Œพ-ฦ[ํ(ศๅ5-f†หf”ชฤ9›๘fหษT!บ†ธdNˆำแ๚†ธว;œW๋!wŠ์#แj๒9kโข\^ำbfqถ“T›ัศ|ญUฟืr2Uˆƒฆ!.[‡โ๖tธถ!๎๙'ฤีzศข_์5p4๙œBiโขJ„ธUr1mFy๋หX[ญaแโโt8!nS‡๋โ6t8!ฎb™‰/Fต‘ษ่{โนำŽVอ@a1S?๒X ัOฌ๎ูิฝ‹็dช=Cก๗7B\๐ื4ฤํ่pB\ญ•แFฑษว&ื&#ฯ•vงฌL‹>ฤูŒ๒mFๅ๑+ณvNฆ qะ2ฤ%์pBžื3ฤm้pBQ^C›ƒฉF฿6/mhG.ฏi1ร๏6’|ฟV๗ข,๛fืๆ/ดY!:†ธŒNˆำแZ†ธ=Nˆซ๖”;๚ญโ(6.nฎี qัหสด˜ั7/›Qยอhฺ\c.“ฉBt q);\๛ทฉรu q›:œW้1รฝโ๊๑ั?™๚บะŽVูŸลฬ๘๐ฃ‰ณฟ[lฎ1o2œLโ aˆหูแบ‡ธ]ฎaˆีแ„ธj*๘‘พูใ“}ห_ษ7Qจฬ;-f์!฿f”s3r65fยt+ฤAฟ—ดร5q:\ฟทญร qE Mิ[์&๙Ÿ›_๘฿X<Žb••i1co_6ฃœ›‘ณฉ!ฮษT!๚…ธฌฎwˆืแฺ…ธ}NˆซšhfฯO๏`๊Weฮ๙š "ไ•ธใํ(XLษฝ˜ซ๐bฆŒฦœ3 ฃฟVป%ฤพหp2Uˆƒv!.m‡kโ6vธn!nc‡โŽ;52D์๛฿>q0๕gC๏{>@จ'ฤENGษC\้ล –mmFัฺี|ปห[„qq:\๗ทณร5 q;;œWkl~ไ ˆนqอ.F”xvFGฮผB\ํลดฅŒœM ธlNฆ qะ-ฤ%๎pCึื+ฤmํpB\L๏H฿๖มยมิฟอฝO๐G„๙Wˆ;Zณ;…ธ๒G‡อ(๏fิ๑&!๚๏_'S…8hโ2wธพ!no‡kโ๖v8!๎ผSฃั๊๗มcŒUๆ?z%.N; ๗๐›3๐ฅn3Jผญ๎[kภK ฌฝB\๊ื6ฤm๎pBๆ'ฤ]ะ๕•ธีcชXW฿M ๕๎๘+ฤฯ๘-B\๕ลผ๛v๎ฎอ่ำใช์,Uปา/ต“ฉB4 qฉ;\ืทปร5 qป;œW๏‘mิ›์ํS๏๊๑1ฯพื[\์๗6Žถฃp้(sˆซพ˜6ฃไฟfœMV/Lโ Wˆ๛>u‡kโถwธ>!n{‡โ๊*Fฝil2Sฌ#ว฿koขq—ฏฎ’!nV_ฬ{›ัฐๅ๘๙ี K DˆCˆำแ:‡ธฎMˆ฿แ„ธาฑft๚ฬ1FŠ;ร:๔“พ๕&J่q๑`;Šw–2qˆซฟ˜w>แšฅ๗ฺ“›ั`ต{+'Sโโtธฮ!๎@‡๋โt8!ฎเอbศฦ&Sฏœv['ƒ๋้Wˆ;Z๐[„ธ๚‹นlFษ7#gSc]Nฆ qะ)ฤฅ๏pC‰ื$ฤ่pB^‰[=>๊,>๙š~#@็ฺQภณ”yC\ลดฅ฿Œf๋5†[W!…ธฎaˆ;าแz„ธ#Nˆปค›Dณษฤ^ˆk อ฿k่r0๕ไt฿0=ฦฬ~๓%จi1#.fฟoๆ(๙‘M sี;™*ฤA“Wฆรu q';\๕wฐร q แWœQ๎ฯM๗+ฬ0ํเ๐w :๒ๅšŽR†ธ๚‹y05vŒ^j์.ี^๔™v:„8โ๊tธF!๎h‡+โNv8!ฎมฐŠฬ(ƒ`ห$57ฺQุwธ2†ธ‹ูณฏ‚›‘ณฉQึส-ซ-B\กื'ฤํpตCั'ฤu๎Œ.cฤl9๚~รฮ‹'†ฟฐ้(cˆซฟ˜6ฃ2Ÿ{๔Xck8™*ฤA‹Wฉรต q‡;\้wถร qฅU„;ศ.S?% ‰†฿ฦ!.๎;\ C\ƒลlฺแJnFฮฆฦX*'S…8่โJuธ.!๎t‡ซโw8!๎ช‘sV๕ฃX<5< น๚~๔Sํ(n:Jโ๊/ๆฐี๙:nฌฑVสช B\ญื$ฤ๏p…C้'ฤ๕˜๊~ยู์G9_ฦQ_Iุอ Wพื`1mFฮฆ6|ืkี๘#ฤมตWฌร๕q็;\wผร qwzsแฺMไฌ๚ม.$ƒพปph๘mโงฃ|!ฎbฺŒJmFซ๋ฦi๏p2Uˆƒ!ฎZ‡kโ.tธฒ!๎|‡โ.ซ~ ฑอ,ธ:พงโฦ่๗แ๎-B\ƒลœฝ7ฃ•๑บฟ๚ใฌq6ีษT„8„8ฎ[ˆปัแช†ธ Nˆ๋Q0nฝ9ึๆ`๊งฃ๏กwpfฟ๏๐ษMโ ษ‹๙เbŽL›ัjKu๚<5๖r2!!๎#ฃ\‡kโฎtธข!๎F‡โzŒKทฺฃอ,8›พ‡ฎใž!.ศ;\{ย›ลดูŒ๎>ฯ*๒บ—V‰‡๗?ิ๋p๕CW3ฤ]้pB\ํวธ—oต๛ผ!ึ}๔=4ว|%p๗ื6ภ[3ัbฎ๖›ั‘W้lชgšNฆ qะ<ฤU์pๅCo~โrw8!ฎษำ๛+“ำ,3 E๘!F?ณJอพฺัฝwธŽ8‹™้7ŠอศูิN‚œLโ xˆ+ูแช‡ธ[ฎbˆปิแ„ธโ๗7฿&๊s0๕ศฯ0Pปํฟ๙ฏ ีhอi1“.ฆอ่ะฯนะงฉะ™”J„8„8ฎSˆปึแ †ธ[Nˆk0nkฏ>ร ั๗ำ™7q†ธqบอi13/ฆอ่ะftnœMฝw›*ฤA้Wดรีq๗:\ฝwญร qMf† ำำ๎่๕wP2ผฒpเ-”/&์Oพรตๆฐ˜ษsุŒํษEฯ‰E๘r2!!N‡kโ.vธr!๎^‡โšŒ Ou=ร_F฿3ใbฬuุ{•7Špu๓ๅbฮjฟHโV-๔a๒—ฆญKไdช•C\ูW9ฤ์pีBล'ฤEp๊•ธฃw“ฆ๏‡ 9nุ๚yฯ/$็ด˜ฮ๖ฺŒขnFฮฆฝ๎ค qP8ฤีํp…CีW,ฤ์pB\^ร๎1hต๚๑ey ฟZF€ญ`้ทท,fยอhูŒœMํuๅdช…C\แW7ฤํpตBี'ฤ5xš{ๅ~r๗ิjโK3Œ–๓โึฯชยm^ฬั`1๗็'›ั…อhuXร,“ฉBิ q•;\ูwนร• qw;œืef8>ัฌ๒ABdิ‘ๆBnู$ท~VฮbฺŒ๒r6ตห/awจB” qฅ;\ีwปรU q—;œื%`พฃ์”cถฯH‰ฦŒŽ๓โKN3dRฑ˜6ฃ4ื’ภฺโ—ฐ“ฉB” qต;\ัwฝร qท;œื&`=cั้`๊ˆj Š=/f G+๊i+‹้๛—f1ฆฯาแGํdชUCก๗7B\ฅW'ฤ]๏pB\›™แไ5Z ƒ&คๆซแ-‹้๋g3Jจ.๕“•AˆCˆำแZ„ธฎLˆป฿แ„ธn่*๐Yb™คŸZญ~๘)ัดV]ฬa3:น7ฏJ_ŒฬgSGณ_9q๐Dˆ+฿แ*†ธฎJˆ ะแ„ธ>#ินํูjœณ‰ญญเ๓ถลดๅนš*=๐ษ|ำษT„8๘OฎAˆ ัแŠ„ธNˆ‹ฃฬ฿k่u0๕Sฟ๐t๗:ึŽ g8‹i3สดผœMฝuป7โ dˆkะแ๊…ธฎFˆ ัแ„ธ8Nฝท๙๎ๆแg๕๚ฑMืq๔QŽ,fีณ]\!๎ฮฺ8™*ฤAษืกร• qA:\‰ฃร qœ๚{ ษgŸf?5C‘ื'รYL›QฆงBณะ7๑L'Sโเ!ฎE‡ซโขtธ !.H‡โฮ +๕งˆ6/รัูquvศpำf$Myย•๘ีฏี้Ž!q=:\ฑฆรqQ:œษฉรฉ[๏,Wณaม;(ง/cํจ๒4i1mF–ไฮWร#ฬb'vโโzwธZ!.N‡หโยt8!ฎฯƒ31ซ[…ูฦลt qญ.#‹v๓]6ฃซ›‘ณฉ7ฦษT! †ธ๏{tธR!.P‡Kโโt8!ฎำ uโr๓๎]”ู,:X“pื@๘ƒ”™.‹้‹็IษƒฌgSLEˆƒ7…ธ.ฎRˆ‹ิแฒ‡ธ@Nˆk55์๏;`&ฃ‹2{}_Cรตำd1—อจ๔fไl๊…uqW*ฤAนืฆร qก:\๒ฉร q=วะ•๔฿฿ํ–๔iช3๛ฆoGำุf1]Iต๋คณฉ_4, Bผ!ฤ๕้puB\ฌ—;ฤ…๊pB\ง{ส…งมT'S/ฌJทฟืั่u8‹๘—‡อ่๖ช8›z'์dชีB\ฃW&ฤ๋pฉC\ฌ'ฤE“๚๏5Œvcย)/๋ำ๘fEภ\ำfิ๔Šช๕ๅ๐v2Uˆƒโ!ฎS‡ซโขuธฬ!.X‡โš5ŒฝcUฟ)ao7M๛4^ˆ๓—ลฌโlF๗7ฃํgS3LEˆƒื‡ธVฎHˆ ืแ‡ธhNˆk6Mm ั>…$จํ(็ ตร)N6ฃ๛฿2{ฦแŸฏ“ฉBิ qฝ:\ฏรๅ qแ:œืv}~ฒ์w0u๓Gฮ๛4พื!น˜้hุš-ฆอจ&ฝ\‘๐็ปฌBผ2ฤ5๋p%B\ภ—6ฤล๋pB\ทนaใlน๚ำPค „lGห•ำm1Lญฟ9›z๖ว๋dชฅB\ทW!ฤE์pYC\ภ'ฤu{ยปoบ '„iโ>IGำSฆ—ิช๕๕ศWžœLEˆƒW†ธvฎ@ˆ ูแ’†ธˆNˆ ่ิ+q฿sw†อD7b๒๊tx}ษb๘ฮeŒ*JgSฎˆปQ! …ธ~.ˆ‹ูแr†ธNˆ๋78lบห\็#Qƒู7_;ส‡ล นrรfb3r6๕ไe๏dช…B\ร—>ฤํp)C\ฬ'ฤuFŸผอ์x0u๓‡ฮ|ทZ')-ฆงVฆฤ๗c๚MใdชC\ว—=ฤEํpC\ะ'ฤ5ฬ[๎3WวqxšˆThํ(w‡ณ˜6#›ัต_ด๙ถู่ Bผ?ฤ๊p๙,ฤี๏p C\ิ'ฤล”๎๏5l>1ำqศ4^ˆ“Ž,ฆWื้,๔cNŸ\๕q๐Šwจร}๛Yˆkะแ๒…ธฐNˆ๋=ŽŽจMส'บล์›ฉM;_ำจ้ี า๒SNฆ"ฤม+B\ำ—:ฤ๎p้B\'ฤužฐVฯyX่1๛ๆiG๙GG‹qแlFa6#gSๅg„8x[ˆฺ๋แ2‡ธศ.[ˆ แ„ธจN}fŠ/=ง@/&qกฺQW8,ฆอ่iฃาฺ8›z๊>ษษT!Š„ธถ.qˆ แ’…ธศNˆk9;<=m6ˆ‹i2๛fiGŽRYL!ฮฺ†L?Y'S…8จโ๚vธผ!.v‡หโBw8!ฎๅฃ‡ขฬ๐ย=‹ั"ตฃƒฃลดๅบจช๘ %Nฆ qP)ฤ5๎piC\๐—*ฤล๎pB\๛‰๔ใgืƒฉ[ง€ๅ๒Mณ8า‘ล”$„ธ ?้d?m'SโเB;\ึฝรe qม;œืtฒz๒ถ{uˆM]f฿ ํจศA*‹๏ืE๚อจึŸทv๑Y 'S…8จโZwธค!.|‡Kโ~๕g!ސ๋ฑ)k๏ q‘฿ 3๊ศฺQู๗7B,ๆฒnฅฎญZ›ัด•๔๛%ƒ๏ qฝ;\ฮฟรๅ qแ;œุ84u†žœปฮพ้ฏ^!N:ฒ˜6#›Qพ?W'S…8จโšwธ”!.A‡Kโโw8!.ฒฏกํมTณฏฆ•้pำfd3บ.z'S…8จโบwธŒ!.C‡หโt8!ฮP๚ฑ[ฮพS=Žโขดฃeฯ๋พ˜ไึoฉb{kข฿>ฃฯ๏„8xGˆk฿แ†ธ.Iˆหะแ„ธพ1ใ™Ak๏ qm:Bœืshด˜6ฃ\Wี๙WงœMuั#ฤม/†8._ˆหัแr„ธNˆ‹-฿kุ› cŸ™m?๙๕ูW;*{ŠสbjนฎชY๏K2TLโ {ˆำแ๒…ธ$.Eˆหัแ„ธุฦกแ3ๆDHิlSšLIกQฉ™ัbฺŒ„ธซฯฟาคW/"ฤมWCœ—/ฤe้pB\’'ฤ๏54>˜บ๗ร›} Š—ำQญCTSˆณ]~@ทฺ_๓Nฆ q<ฤ้p๙B\š— ฤe้pBœน๔๏Œ€ฆ?š้สโJ อ3ํฒ-WUธอศ$n๓“0'S…8ศโtธ|!.O‡‹โาt8!ฎ๓S฿[ฝgb!Nˆ ะŽแdชYCœ—/ฤe์pAC\ฦ'ฤ%์๏58˜Šwท-&6ฃ๗m n@„8Hโพืแา…ธ”.fˆKูแ„ธ$ฝ†เ_ใ˜ูWˆk๛จลฤft๛3…๏Qำf€โtธ|!.g‡ โrv8!.‹Pฏa™‰โฎถฃชฏmXLlFพ*ณ้6แํX้฿A™:\๘—ดรE qI;œg@}{›fb„ธซํhู็,&ญ7ฃฦgS‡;t8ะแฒ‡ธฌ.`ˆหฺแ„8ฃฤ›ง Sโ.ทฃฒำขลฤfโ– ้ฯำทŠtธเ!.m‡‹โาv8!.0ฏมมT„ธปํจnฏถ˜ุŒB|Wfห]ยษTt8จาแb‡ธผ.\ˆหแ„ธ<ขฝ†q7b๖โ๊^6›ั๋ฌถฺ-:่pนC\โ-ฤ%๎pB\"A^‰๓n BvT๘ญ ‹‰อ่ง ]จLE‡.wˆหแ‚…ธฬNˆ3ฃพ๑Ntพธ FˆปฺŽlr›QืDฝ~ft8ะแ …ธิ.VˆKแ„8ำฤnELEˆปŽ*‹›ั ๚u๗ ่p รeqน;\จ—ปร qฉ:œz๋Ÿเ`ชูWˆk—ลฤfเcn8™Š:\ๆ—ผรE qษ;œ—สฟื0ŒฤqwQ้wธ,&6ฃ ฿–ู๎งi;@‡ƒ:.jˆหแ…ธ์Nˆหๅ๚฿k0#ฤ]nGฅ/‹‰อ(ย็Š๛+ูษTt8ะแ๒†ธ๔.NˆK฿แ„8c๊[^‰›7ฃ˜}…ธ.มฺbb3zญั1SntธB!.‡ โ๒w8!ฮ@๑†็ยRBํvไQƒลฤftโ๋2š0uyt8(ิแB†ธ.Jˆ+ะแ„ธlฎฝ†๕โ!๎j;*ๆจลฤfโƒEญ์i :่pYC\…$ฤU่pB\6‡^ร<฿6›}…8ณขลฤf็~ ูแ›‚…:\ภWขรลq%:œgคxฉ‰ณ๏ๅvTฝX[LlFaพ/ฃีฯาk๙่p๐3๙,ฤ้p๑B\'ฤ™T_=ข๖)+˜}ฃถฃa{ณํ`3:๐ษb~C–อฮ๙๖ณงรล qE:œ—ฯธ4๐;˜Šู๗z;๒œมbb3:u;ะj‡๐EA‡ƒJ.Zˆซาแ„ธ*NˆK่ฬ฿kX'ณžD›}…8๏pYLlFกพ0~”๖t8(ีแ‚…ธ2๎~ˆ+ำแ„ธ„๎ผ7 ฤ˜}oทฃ๒—ลฤfไฃE<๒€‡๗Yˆำแโ…ธ:Nˆ3Uผ๒•8S1๛nG๕ฟ*›ัŒf_až~'ะ ร qm;ํWจร q†ีื ;'ขอพBœฏŠลฤfํ์'9บV!ดhะแ„ธพ๎rˆซิแ„ธ”N=๔_tk๖โผณa1ฑล๚l๑~=;™Š:\พWชร qฅ:œ—ำ™ฟื03๛ qšตลฤf๔ฝฮฆ ๓่p รฅ qต:ีWซร q9~%n็ฯshณฏ็ปb1ฑสtู„yt8(ึแย„ธb๎fˆ+ึแ„8ƒลk†{/ณ๏๕vิแๅQ'SฑEบ˜M~ย<:๋pQB\ตw1ฤU๋pBœy๕๗ฆS[ภ์{ฝM›ลฤft๔;๊‹ฒ„yt8ะแr…ธr๎^ˆ+ืแ„ธฌฮN][C›}…8ฃโ๙ลด๓ุŒŠlFซฯถ#ฬฃร—+ฤี๋pืB\ฝ'ฤฅตฮMซห/f฿๋ํจลฃ›QœO+YatธT!ฎ`‡ปโ v8!.ญsฏฤ ำ0f฿๛ํจEตถ˜ุŒB}if‹Ÿฃ/ :T๋pB\ลw)ฤU์pBœูโŸ;†ู7@;jqู8™Šอ่ฺœMๆัแ@‡หโJvธ;!ฎd‡โŒฌฟ๔ฮฺt๓‹ู๗~;Zv5U›ัู)Z;™Š:\ฆWณร qฟโๅฬแT1ณo„vิ#[[LlFฑพ5ณมัwสuธ๋!ฎh‡ปโjv8!.ณ๕’›‡ะf_!ฮืลbb3Š|+PŽG˜G‡ƒz๎vˆซฺแn„ธขNˆหlไ๎p๎}อพBœw6,&6ฃธŸ/ะทE˜G‡.Mˆ+แ.„ธชNˆ3^โ`ช‹Sˆำญ-&6ฃุ_›Yง่+‚๕:Wทรqe;œgju0ณฏvd1ฑeฝ–๖žM ๒ะฬษTt8ะแฒ„ธย๎xˆซแ„ธNu๋k๖โผณa1ฑ…€aพ/ž ขร—$ฤU๎pงC\แ'ฤ%—๖๏58˜j๖โ|a,&6ฃ๐฿›Ÿq๓่p รๅqฅ;แWนร qษฅ}%ฮ#hณฏ็ R‹‰อ่ ฮฆN๛:l๔งร qฅ;œgยp0ณo๘vิฅ\[LlFแnjo ๖t8จุแฎ…ธโ๎hˆซแ„8ƒซƒฉ˜}รํg›ัฅ/N€้d*:่p)B\๕w2ฤ๏pB\z)งzm๖โคk‹‰อ(G ๑!LE‡.Cˆ+฿แ†ธ๊Nˆห/แ฿kpใk๖โ|e,&6ฃ‡l~"W๙FวsA}Nˆ{›Qˆำแ„ธ6๒ฝ็…ณฏgVด˜ุŒ’|sFแฯ็๛มืA A‡โไ7? q:œgศp0ณo•vิๆฒฑ˜ุŒโฬบฯ ฒ่p่pBœ/ฤ5่pBœูีมิฎฦi q^"โฐZมฺ_š}!ฮƒAt8t8!N‡ โ:t8!ฎฤ ใ`*ฟ<ศ์I›ถ\๑ฺbb3ชีe?ฏ#::œงรE q-:œWBชyฦ๓็ร๓n‰iWˆโ,f๎็E6ฃอป}ๅ9l่p รqM:‘ืฃร q5Fc ิ฿ NผC\ŸkืbฺŒlFoชVFOัแะแ„8.Xˆkาแ„ธ"รŽƒฉ๕“—„8!ฮbฦ฿Œ–อจฮwง๊g๓UE‡C‡โtธX!ฎK‡โŒฏฆV(ฑM \ำท์dำfd3บ๙›บ๋_๏dช:œงรq:œื•ฟื ร™}ต#!ฎgˆณu qนฯฆzQtธซ!๎O’S!N‡โ๚๒Jœัื์ซูฌฆอˆFUๆ=์่p ร q:c!N‡โf”:/" ฤ qS‡ณข•๘c๏๚-๊dช:œงร q:œืšฟืะcตอพฺ‘g1=โ๖๔“nnu8ะแ„8๎dˆำแ„ธ๎ฝั๐pช'ฤiGำfd3*m)้Nฆ๊p ร q:ษงร q›Rฺ1๚ qG‹i3ฒีพ๖~n'Sัแ@‡ปโtธวBœ'ฤuwq[–!N;ฒ˜6#›ัi[฿…Oyํบl้hเ๊pฯ…8๎ฑงร q cŠัื์ซ q6#Z]Ti฿vท๐ oํนงต=โ~+ถ qอSŒพq&G‹i3ฒ6“~p'Sโ@ˆโ„8!Ž์.แผF‡SพBœvd1mF6ฃ Fา฿์ปnZ q ฤ qBœ'ฤQเVChฃฏูW;ฒ˜6#›Qป๏Sพถ“ฉBqBœ'ฤ q๔™ศ,4Bœััbž3mB๎ a฿'฿๕๔ะษT!„8!Nˆโ„8๚LdำB#ฤ๙๒XฬS†=ยฏทํWยJ๗หิ Bœ'ฤ qB&ฒaกโด#‹i3ฒี๙Bm๛G/฿„8โ„8!Nˆ#ต็%ฝ†|ฃ๏‰d๏5qBœลดูŒn~๎A„8โ„8!NˆรHๆธศ่ปvบBœvd1mF6ฃ>WCถh๊†Pˆ!Nˆโ„8!Žcญ;เp๋๕๗MJฝŒ!ฤ๙&YฬฏY6#!๎ฬ7*ืฯสษT!„8!Nˆโ„8šdำ:_z๋ไๆหˆBœoŽลดูŒ>Uเ๛^u๗b>BqBœ'ฤ‘Y 3Jฅ๏—ฉWˆำŽ,f๕๎b3JxQๅ;›บ๋_์ŽPˆ!Nˆโ„8!Žnจ๒฿kแฦ๊M@ˆโ,f†อ่ลfT๚+•้GๅdชBœ'ฤ qBจ{Oฅฝ g๖ีŽ,fืฮรfิ๗ฺHโœLโ@ˆโ„8!Nˆฃ๔ห^‰›2จ"ฤ qำf$ฤธ8rSLโ@ˆโ„8!Nˆฃ๐หญ&แ;นsๅYC!ฮ7ฦb–~๖’gE[‡ธ_ซ‡ืJ‘ โ@ˆโ„8!Nˆ#ฮหอL฿ศ+ำ qฺ‘ล,์%ำz๖qyฮฆบRโ@ˆโ„8!/G8œ+wๆš'„8คล,ป็OušeHs6uำ…์dชBœ'ฤ qB5_Ž่wGl๒โด#‹i3โZ_ ~L๎ …8โ„8!Nˆโจ๘rDร[โใน3฿ห„Bœvd1SWฆGไ…ธ%บcž–AˆำŽ,f‹อhธฌŠ^Tฯฆ:™ŠBœ'ฤ qxถU่Hˆห๑๖…U0๛jGณึo3ช}Qลปhถ\อNฆ"ฤ!ฤ qBœ'ฤ‘๏ญˆy๊อ‹<๗ฬม์ซYL›‘ๅJ}Qล;›๊s„8โ„8!Nˆ#Sืฑ‰/อ๛ำDน:…8!ฎ๗b:&/ฤšG “ฉqqBœ'ฤ qd{ฐำ;๊qคฤอห]pp˜Bœซยb& qล.ปaฉŽ|ษf ซูษT„8„8!Nˆโ„8า ๋`€ส๒†โด#‹i3ฒ%ฟจfฐ๋ฦ%ŒBœ'ฤ q˜ล๙i๒™WโV๋๕ฎw2UˆำŽ,ฆอศfิ๊s5;™Š‡'ฤ qBœวุ๋vๆ•ธ่'If๓ฯ้๚โ„ธึ‹้dช๒[6รtœLEˆCˆโ„8!Nˆ#ืCํŸ฿ฤz%๎ภ์ซจqBœลดูŒ๒^?๏๚-พ\ยq ฤ qBœ‡ัโ็wำ๓H‰›}ผเa0!N;ฒ˜ป,›‘อ(ไcผ(?'Sโโโ„8!ŽtฏDœŽP)“8 f๖ีŽ,ฆอศzๅ_ฌHซ1๚L ฤ'ฤ qqIณ“ล8โV/x’f qšˆลฬโชUŒแข:ต=ฯ ๗ˆqqqBœว๋p๒๗fใ)ฤiGณฯfไฉ@๙‹*าูT?„8โ„8!Nˆร`๑ๅ{ุ3ฏฤฝt]๒‚ƒƒ็ยฐ˜6#›Qณ๏ู๑Oq2!!!Nˆโศ6ญ~๑ฟxๆ•ธูtษ KˆE,fย/—ืaตโœMu2!„8!Nˆโ0‡}5 y%.nrLP๑Q-ฆอศzUธจโœM]~&q ฤ qBœG๓{่๑?+›GB\Ušูื์๋ฃZL›‘๕*qQ…นzผ`ŽBœ'ฤ q+พ~ ๛•8Wฆ@ฑXˆณนu^ฬอวฮๅขjpQE9›:"ฤ@„8โ„8!Nˆโ๘E๋ฮ6ฮ”8!ฎ„)ฤน2,ฆอศfิญๆฎ๛?'Sโโโ„8!ŽdฏCŒK0๚=๔ๆูทQ!ฮLi1…8›Qฟz๗Nฆ"ฤ!ฤ!ฤ qBy๎Ÿ้i๖กWโFฟU๗eโ„8‹$ฤ๙•ูโขš1~ƒป‚โ@ˆโ„8!Cุอ๙/๔ก่ล์+ฤiGำfdฝŠ\T!ฤษT„8โ„8!Nˆ#ƒqsฆ8๓J\ศ่Ž๑ั์ซYL!ฮzีผจBœMโœLEˆCˆCˆโ„8า<ฟๆ๚ฬ+q!Aณ›L!ฮฅa1=จ๐*๏E5#W’โ@ˆโ„8!Ž๖3ล/Žg^CภIะziGำS›Q๕ ฐ"ร!„8!Nˆโ่>‚rGB\ภฃ%fนทYBœKรbฺŒlF fฝ–LEˆCˆCˆโ„8Rใ๖? ๎+qณ[yŒœS„8!NˆณH6ฃจา;๗‰qqqBœวณฦ๕u่•ธpณอๆูื…*ฤ qำS!ฎยสŒ›'Sโโโ„8!ŽTลซ๎ž›ฝ!.าr q๖8!ฮfd3๚ u{IœLEˆ!Nˆโ„8บOฏ<z&ฤE;œ๚b๖5๛jGSˆณ\u.ช๋gSง๋!„8!Nˆโˆn๓yฟโ_๔ฑถiน„8!ฎ๑b.›‘อ(๖ฺ\๏;™Š‡‡'ฤ qdšฟfŒFะWโ„8ณฏvd1mF–ซาEu๙l๊ŽงzNฆ"ฤ!ฤ!ฤ qBŠr ้ะ+qZKนูwบ8Œ๙ำfมโฮ๏ิ๋"BBBœ'ฤ‘hzร4qๆ•ธXOถ{๑„8ํศb q6ฃ๒ี‹h๙i ฤ'ฤ qBม๚ qก๊”Q.ิ"ฤ qBœโนQ๊‹jWฆทฎd'Sโโโ„8!Ž'm>`๓ฆืyฆฤEบฅ6ส qฺ‘ลโฌVฉ‹j\'Sโ@ˆโ„8!Žใฤ฿>;t8Uˆ๓–…gฃณ˜B\ิ'X้/ช›Wั๔ร@ˆ!Nˆโ„8bœพ๘๒ู8โโTOฃœงYL!ฮfT๋ขšฏ"'Sโ@ˆโ„8!Žึ๕฿ฟc๋๖๗„ธ@ก@ˆโ„8!ฮf๙ึb๙OปWDˆCˆCˆโ„8ด‚^‡^‰ ๓๗„ธX‡'ฤ qBœv^๑ปษT„8โ„8!Nˆ#ถxจู฿kโb]Bœืv1งg3ŠฟBW/:™Š‡‡'ฤ qืๅŸฤ|Eˆณ๓ q6ฃ๛ฟ’ }Cืน฿ูรฯ!„8!Nˆใฑw7หฎVFasfง0Hnqฑ{t;ีฉ{}t‚๛gญฑหถ ">GB\ฃ›โ ืฝ†Y๘ตe๙ฬ qBœม4E๚F*t…ฮ}ƒใd*BqBœ‡า‘mํฐiษsี}Gา/%vnˆโ„8งMF!•บB๗5kห„8„8!Nˆโ„8‚-#–ญ]ๆ(๛–ค_ฮ]Bœฯ…ม4E0…ธ(ƒถๅ๓๋d*BqBœ'ฤ‘aฑพg•ฝ†ย/-UŠโ„8!ฮูิงจเz์#ม BqBBœืฯ‘ฐ๙ี=ึพ!Jฌ'ฤฬส[S<ษชy…๎้ป!„8!NˆCˆุ๋4๎XeUฝ๋น!Vˆโ ๆ4™Œฒ|ฌ6\โNฆ"ฤ'ฤ qB VY๗ถnZ๕\U฿•gS/!ฮ2฿`–™Œ^&ฃ~W่‘5ฤ9™ŠBœ'ฤ q$xœŸvq๘ไMทตo =(Bœ'ฤ9›j2Jpํอ๛?ฝ๎โ@ˆโ„8!Ž๘๓gึ๑G7ŽM บง๊‰'ฤฬอฉ)๑H]B\ค/น๛Cœ“ฉq ฤ qBœG5ึ•๖ั๎ฏฮNˆโ fˆ๐m2๊w…๎9›j๗&BqBœ‡ืp‰u฿]๋ฎeืพYปฯ‚ q&@!N^zllส^ก;พฏ"7q ฤ qBœ'ฤUr{ฬบ๓d็ฆ…ฯ3‡S๋žบMS`…8!ฮ`šŒLFwูq6ีษT„8โ„8!!.œ๛๗น๋ช๔๏5์Xเ๙ะ qBœม 1ำ&ฃvW่ฑa|.T„8โ„8!!ฎใ{ ป–>Uืพ9—t/!ฮgย`šŒLFฎพ?บ๖๘ !„8!NˆCˆ๋ทพชฑa–|o’ฎ).!ฮ2฿`nu˜ŒLFiพๅ๎/8™ŠBœ'ฤ q„_^}ำบk๔ฤikบGใซ'ฤL“‘ษ(ฮ[Cœ“ฉq ฤ qBœG๔Œu๛เตzโ/‰—=t›`X„8!ฮ`šŒLFGp๚p฿ˆBœ'ฤ q|ๆรGื๐ุcp‹บGโฑ'ฤฬG.;“Qฟ+๔๖ณฉF!„8!NˆCˆ๋๖8ช๐"{>‹พฎแUˆ3 L“ั/&ฃ˜ื฿\'Sโ@ˆโ„8!Ž่๓wณฮฒซŸiUe้+ฤ qฝsฯ…w“vW่u๏-ืปqDˆ!Nˆโ„8‚/#๖,๊^Cีญ~แ‡Dˆโ ๆW^ชํF—ษ(่๓ฆy฿็ึษT„8โ„8!Nˆ#๚สชึพ„ซเ;”mส%ฤiGณnˆ3๕ปBoฝ๕X~wเd*BqBœ'ฤ๎A๔3๋…ฒ[โฎขฏ+฿าWˆโšๆ4™Œา ไ}[wŽq ฤ qBœว'๎฿GถosรฎPัตoš](๓eํซฬาŸ/ะbI๏‘Oึผซ๒9™ŠBœ'ฤ qžฟooร,บ:ZVฆˆK_!Nˆ๋>˜*S”ษ่ๅ ๒u็d*BqBœ‡ืkรฮีฎะ๎ป๐ชฏ+฿าWˆโ„8“QˆษจZˆป๎ป๛p2!„8!NˆCˆkถŽ8jฝœGvk\E_Wพฅฏ'ฤ5ฬ)3™Œชี y฿Mื3BqBœ‡W๛๔ฃwฌWอณmํฟฤ=ผ๔โ„8!ฮdด๙๛ฆษถฌ๛ž:™ŠBœ'ฤ!ฤ๕ชE3Mี๚t๘ฤ&Z๛jGณุ@ฅดหd}<็=ŸY๗Žq ฤ qBœG่%ี๎ฐS๔๗^J\Œ'ฤ™ …8“QˆW.อปพจLEˆ!NˆโโZ-$๖Ÿ.ชนFœw1:œ'ฤuฬฉฤล่p/ืเวhv๚\"ฤ'ฤ qBœQq!UsKฦต๏ี๚+ฤ q3ฬey2บLFฺๆ-ืถ›G„8โ„8!Nˆ#๒ซไซzbiฒ๘ ัแ„8!ฎ`šŒLFi8อ,'Sโ@ˆโ„8!ŽP7ฝ GษUะe๑b้+ฤ qํำdd2Jsฮแd*BqBœ‡ืk)๑ฬ๊๓ชธDœ;ืwGำฯซ'ฤฬ`“ัe2jโๆ-ŸŸหธ#ฤ'ฤ qqถ5T\Q?ิซ๖ฎŽ–W!Nˆ3˜‘fุจ%nšŒ๒|ฯญผ:™ŠBœ'ฤ qD^Ku_ฺuะซw‰‹าแ„8!ฮ`^ฝKœษ(ำUx,ฏ{Nฆ"ฤ'ฤ qBรฮU๘ตํ_]o„+ฏ—ตฏvd0๛<ล‰[โŽหd”้ฃ5—;="ฤ'ฤ qB๊?๗x[ท9 พฆ€‹฿๙zY๛jGณัcœฐ“ัa2ส5ผs๕อŽ“ฉq ฤ qBœGเีฤ“ทซฏแีv๑{ฝฌ}ต#ƒู๋9Žษจ็ึฌFษษT„8โ„8!!ฎฯކป6“o฿d๑iŠgN4˜LF‡ษจGˆปแlชQGˆ!Nˆโโฺฌฃf๕ธฆ|:/ยโ๗zY๛jGณ฿““QหK4X๋4/"ฤ'ฤ qB™๎uŸNU๐ฎjธๆ˜แ–aฺ‘g0[NF‡&Tณ๑:™ŠBœ'ฤ qTYE=}ท:๋–?ฅžํฉวeฆฬžฯrLF=/ัx_œLEˆ!Nˆโ„8’้>๗ล ^Cท๐tE\†…Z๛ZๅฬŠฃp2š&ฃยืดˆBœ'ฤ q”ธัmt;_{KsMuZ‡iGณ๑ำ“QฯKิษT„8โ„8„8!ฎขซวa~ช๒[žXz„=Ÿ$ฤ q๓น\2MFี/ัุgS@"ฤ'ฤ qBasN—ืนy—ฦSg5ฏ).๐RLˆโ ฆษศdT๎“Y!„8!Nˆโศฝ“!ฦ้zฟื๐’๐*๙ฦ q–œ3].ูšโLF%oQœLEˆ!Nˆโ„8สmdธ‚ผิz•ชมš๏ธ^/k_ํศ`ส%ฯธว4U~„ๆd*BqBœ'ฤQhรั่ต๎]อ๊ซพ<ร qBœม 0ํุg2jุxMŠq ฤ qBœวฎN๗ช๕~ฏแูUศอซ฿่๛O„8!ฮ`šŒLFeพธLEˆ!Nˆโ„8ส์bˆ๓jทm‰v8๕้สuา๏†…๏U~ํk•o0k™˜ŒbQ„^Ÿ,'Sโ@ˆโ„8!Žte*า’s[ท: ฝ๖๐Zoู~rอ๒EE;2˜UGฌึd๔šบPชO–9!„8!NˆโHฆb=คท%.๒wดแž๏ีๆใ-ฤฬ?นrMFฯNFeC\ิณฉNฆ"ฤ'ฤ qBANGปWผuก}Y‘ฬ5/x๘๚k_ํศ`>ศdd2*œxLEˆ!Nˆโ„8,5cีšr‡Sใ์ธ>‹rฬys๚โด#ƒi22น]ลq ฤ qBB\้eSMWต๔ีอ(฿Xฯ9w|…8ซNƒi22นWq2!„8!NˆCˆซผjŠทด%nว๘k+เcฮน๋ qฺ‘ม์7]&ฃbžMu‰Bœ'ฤ q[h†ฝS=ชฅฏฐๆฟถไ_žX3 qฺ‘ม4=?B\ฤป1BqBœ'ฤQใ๓ั๔uoฝMฟ^|้อโ,; ฆษ่๙MzCˆซ๐มr2!„8!Nˆโนwแ ๙สซงVถ_[ก qฺ‘มlธs)bภ9สOFIŸŸm„8โ„8!Nˆ#๛ๆ˜OŒทา_…สโ„8ํศ`v &!O4ฺฃ•ฝ๐:™ŠBœ'ฤ q„\/EฝQ-๗{ ึถ_ชeBœuงม4ลุHๅc•q““ฉq ฤ qBœวปŽ} Žฆ/~็ุ…๒ต๕i๙ตฏvd0;<โ1๕ปFใ}ฎF"ฤ'ฤ qB/ญ_ึu‘ีํ—ฮ q๙ณ฿ึฅฦว*๙—œ“ฉq ฤ qBœGฤ=aWเื_ํ๔สay๛•wAˆณ๐4˜&#“QF3่ BqBœ'ฤ)DEพO๕{ 7ฤ q๙ำdd2xLEˆ!Nˆโโjฎ”ฎะ#PํŠ](_IยBœEพม,4ปšŒ|ฌœLEˆ!Nˆโ„8!ฮฃๅ,ฯ‹๗…ซM/ศŸHยŠIˆณ๒4˜ Dุ'ฤ%Žs2!„8!Nˆโ๘d9๚Šเฺฟ๒*๒‚๒.}…8‹|ƒi2Šqžฑแ>๔R}ื}$BqBœ'ฤ๏มr๘มพ›zฟืf็‚งL“Qˆษ่žX้Št2!„8!Nˆโ„ธพ‹ค๘็6สm‰s8๕O‡ฟึKOƒูๆaO๒ษhVŸŒjชœLEˆ!Nˆโ„8โจ 'dส=;w์ฯVฆๅืพฺ‘มl๖œรdิ'ฤA฿Jโ@ˆโ„8!ŽO• ฤฃแ<ุŸm\โด#ƒi22น$]ฟq ฤ qBB\น{ูwฉงึ77 vdฆ4˜J›฿B\ๆo8'Sโ@ˆโ„8!Žp๕)ษOทๅ๎ฺ๛๕'QˆำŽ ฆษ(ฤiF!.su'‰Bœ'ฤ q„ป“อ๒ดธ–ธaฑ๛หOขงL%ฮdไšt๙"ฤ'ฤ qqลndฏ,ƒq”ปow์—EXˆณ๘4˜รdกLŸซ๗อ ฿, ฤ'ฤ qBoRศs“:หธ๛3qฟ*ยณ๚็];2˜J\Žษh๘\ๅHน•Dˆ!Nˆโ„8ขฦfZ <ธ หœZณ.}หฏ}]ุณs“Qงฯ•“ฉq ฤ qqBœ…Q๚‡ล๕ถฤ๙3qฟ๚ qVŸS‰ q–ั็*๔7ถ“ฉq ฤ qBœGฆ›ุ\๗จื“Y่ฮƒโƒ(ฤYใฬ}LF'ฃยšK"ฤ'ฤ qBมVE—1yx…d๑๛๓ ,ฤYใL%.ฤQFŸซุฮฬ„q ฤ qBœGž[ุร <=2~ฐแงAXˆณ4˜Jœษ(ญ฿nNฆ"ฤ'ฤ qBม–Dำจ<ฟWะ_f๚ู` qึ๘ณฆ˜“ั=Iษe้d*BqBœ'ฤ qํn`=a๐ฒ๔ํน๖ีŽ ฆg2๒yrํ"ฤ'ฤ qBœื่5ใ™Šา/K_!ฮ๚ำ`*'q๗O qมŸœ9™ŠBœ'ฤ q|์x4€ด™‹๏หาWˆำŽ ฆ6qัŸœ9™ŠBœ'ฤ qคธ{อ๙จธเ๏5„๘K:w,qฺ‘มTโLF>M.]„8โ„8!!๎~ำ๊OํwU|Q‰–พBœจมTโbLFG๑ษ(๕ฝŒ“ฉq ฤ qBœGš,c™h๙}X๚๖[๛jGS‰K2 ฌ€ฆิ‰Bœ'ฤ q$[ๅ}R\๒&ฐ๔mท๖ีŽ ฆg2๒Yrๅ"ฤ'ฤ qBœ๗ผ-‡9.ใkK_!ฮ ิ`ช'&#ท3ž7"ฤ'ฤ qqฅ–”nPkฦ๗+qื๑ฤ-ฤ™5 ฆ๗๎\๏ƒฺ๋ษT„8โ„8!Nˆ#๘(๕`_ฒบ๊ฝ๓:3K8!ฮr฿`šŒ|(pฯฮฎ้“ไยEˆ!Nˆโ„8!ฎOerk๑Fพีโ๗ ‘ณ๘ฺืิ`šdMF๎hlGˆ!Nˆโ„8!ฎ๔‚ฒศ้ฦ[{‹฿7 q–๘S‰ซ;ฝ\š8"ฤ'ฤ qBœืe๕sฅ Cีf๑๛ฅดQ|ํซฬศf—ษ่2ี9™ŠBœ'ฤ q|อžฝ^๙l”=าd๑{<8ฺ‘g0ใ|%™ŒŽฺ“Qษ‘“ฉq ฤ qBœวื๘ฅ†hฝj๗๎มราทหฺืEn0%”,“ัจ=ีxlๆd*BqBœ'ฤท/นทผ?สOฝž}—ต#‹}ƒ้ษษศmห!„8!Nˆโ„ธโC{ผŠ/~฿X qกS‰ ฑแู'+ม—ต“ฉq ฤ qBœGะEฯeฌขX้?๗ฮ๊จ๖ฺW;2˜*Jšษ่๒ษ yq:™ŠBœ'ฤ qdX๒+ˆี]พW5kฏ}ต#ƒ™aฆ-ป)๎zkfŸ>Yนž,9™ŠBœ'ฤ qฤyn\ๆ๎tใ๒ฐ๖ซ ผ6ชฝ๖uLSญษศ#Fโ@ˆโ„8!Nˆซพฑ๔ฮฑZชx<๕:" ‚'ฤฬ!ลdไ๊tซƒBœ'ฤ!ฤ•X์zJผ1U=1j๕N„อoฑgฉo0C>% {Fพ/่Ÿ™โ๋„8„8„8!Nˆ๋(่๒รc๖ว†ญึฆธ๋ˆ๒kGฆOƒi22{ส่d*BqBœ'ฤtกS๊ๆ๔จ~S_iS๗Z†งฬLFB\‚‡fNฆ"ฤ'ฤ qBแขRฑๅUฎพส>”๋›%Sˆsฉฬ~>LFž3บfโ@ˆโ„8„ธ*Mษส0m}‰}(GจWฏ qำd"&59@yไy›AˆCˆโ„8!Nˆkฤมิ่ รวฦ.๙ิ์บˆrhG3[O1๙hๅhธ.V„8โ„8!Nˆใแeไษ1n)ov]๙_๛jG3_Šห}*ีdิไKฬ%‹Bœ'ฤ qDy\\๐ฐฦ์qgŸ7ล]Ÿ}่Žสk4 Qƒiฮอ3 ญฯฬœLEˆ!Nˆโ„8"ํ2(yห฿ๅฌหีrๅ[{ํ๋j7˜R\๓ษ่๊๒กู ๆ*Eˆ!Nˆโ„8‚ิ$ 3๓’)แ_gบŽ˜—ฦีแฒโ ฆ2z๙ศธbโ@ˆโ„8!Nˆ+ฌ๘0xนG0YŠปŽฐoฏv$ฤฬN)nอd4…ธ๐LEˆ!Nˆโ„8พX์็ส>z1†๐˜อVพตืพฺ‘ม”โLFž7U„8โ„8!Nˆโช †ฏยz<ว๊wฦ~ฝฺ‘g0LFG่ื{๘ดธ`โ@ˆโ„8!NˆซzZ๘ฮดูs๖๐ซ฿kๅgํ(ผ๖uมฬ์_^แหฯ่ฒ้U7โ@ˆโ„8!Nˆ๎่“๒—ฉ็{Cฑธนx Txํซฬ฿_‘'ฃหdศฦOŠ๋!„8!NˆโqwZ๚ qร'ํAทล]3ล›+ฤ™ f๕ษ่e2๊๚9qฝ"ฤ'ฤ qBถก ัNฎcl pรผ#S^Bœตจม >7™ŒZCŽ'fNฆ"ฤ'ฤ qB!๎M‹฿˜๖๚ฝ†?–…W๕…oํตฏvd0๋ด8“‘ๅ{ฺๅˆBœ'ฤ qDธ5ญพŽธ%.Xำ าโๆ}ฃ"ฤนๆ f ๕'ฃC5 7—ป\โ@ˆโ„8!Žošnaื ทน๐๑ๅ๏อCR๗‚่๕95˜-“‘o็gjน“ฉq ฤ qBœl๔Ÿhบl>LFนMฯŒ 4BqBœ'ฤ[์฿‹rู(˜Œ๘bคu2!„8!Nˆโ„8ฐต๎LFlเd*BqBœ'ฤ qPr{๗๙Ÿiว`2โ„“ฉq ฤ qBœ'ฤ๐{[O์=LF|วแd*BqBœ'ฤ qPzัณplี ˜Œ๘ˆ“ฉq ฤ qBœ'ฤAs~๒ืšๆt๘ X6]&ฃฦ'Sโ@ˆโ„8!Nˆƒf‹เ/-ƒฏ“F 0ฑฬแd*BqBœ'ฤ q`5<ญt“๗s2!„8!Nˆโ„8v๔V'Sโ@ˆโ„8!Nˆเ~‡“ฉq ฤ qBœ'ฤฐ“ฉq ฤ qBœ'ฤฐมt2!„8!Nˆโ„8๎w8™ŠBœ'ฤ qB8™ŠBœ'ฤ qBL'Sโ@ˆโ„8!Nˆ wˆs2!„8!Nˆโ`Cˆs2!„8!Nˆโเ_œLEˆ!Nˆโ„8!€๛Nฆ"ฤ'ฤ qBœภœLEˆ!Nˆโ„8!€ œLEˆ!Nˆโ„8!€๛9™ŠBœ'ฤ qB8™ŠBœ'ฤ qB8™ŠBœ'ฤ qB๗s2!„8!ฎงqหๆ+€ฏp2!„8!N‡โt86p2!„8!N‡โt8๎็d*BqBœ'ฤ้plเd*BqBœ'ฤ้plเd*BqBœ'ฤ้pฯษT„8โ„8Nˆำแุเr2!„ธ๐~|{ึVu8!N‡เcNฆ"ฤ฿๏BใNˆำแ๘ิt2!„ธยNˆ[ึแ„8€O]6ฤ!ฤWธร qห:œงร๐ฉ;etโ@ˆ{บร qห:œงร๐!'Sโ@ˆ+แ„ธeNˆำแ๘“ฉq ฤ•๎pBฒ'ฤ้p|ศษT„8โJw8!nY‡โt8>ใd*Bqต;œทฌร q:Ÿy9™ŠB\้'ฤ-๋pBœภG'Sโ@ˆซแ„ธeNˆำแ๘ศ'Sร‹B๓Nˆ[ึแ„8€8™ŠB\๑'ฤ-๋pBœภ'?ี€B\๑'ฤ-๋pBœภ'.'Sโ@ˆ+แ„ธeNˆำแ๘„“ฉq ฤU๏pBฒ'ฤ้p|`:™ŠB\๕'ฤ-๋pBœภ.โโ@ˆซแ„ธeNˆำแ๘พ;ชแ2ผq ฤ…่pBฒ'ฤ้p|Ÿ“ฉq ฤี๏pBฒ'ฤ้p|ŸŸj@ˆ!ฎ~‡โ–u8!N‡เฆ“ฉq ฤี๏pBฒ'ฤ้p|?ีp^„8โพ๋วฺYC‡[6”Bœภ7Nฆ"ฤื ร NˆำแxšŸj@ˆ!ฎC‡:œงร๐ด—“ฉq ฤ5่pC‡โึ๘ปฏ`พหO5 ฤืขร Nˆำแxุๅd*Bq:ะแ„8€g๙ฉ„8โztธกร q:ฯบœLEˆ!ฎE‡:œงร๐,?ี€B\7t8!N‡เQำษT„8โztธกร q:z๙ฉ„8โztธกร q:Oฒ!!„ธ.n่pBœภ“^~ช!„ธ&n่pBœภƒ?ี€B\—7t8!N‡เA—“ฉq ฤu้pC‡โt8žs๘ฉ„8โฺtธŽ!๎ฎกฤู‡B\ๅ7tธ*!N‡ {ˆณ!„ธฺn่pEBœ@๒wู‡B\๑7tธ!N‡ yˆ›„8โชwธกร•q:นCœSฉq ฤ5่pC‡ซโt8R‡8งRโ@ˆkัแ†W ฤ้pdq2BqM:ะแ๒‡8€ฤ!N†Cˆ!ฎM‡:\๚งร7ฤษpq ฤ5๊pC‡หโt8า†8ฟ”ŠB\ซ7tธไ!N‡ iˆปd8„8โšuธกรๅq:9Ct&!„ธvn่pฉCœ@ฦg3Bq-;ะแ2‡8€|!N…Cˆ!ฎk‡:\โงร-ฤฉpq ฤ5๎pC‡หโt8r…8!„ธn่piCœ@ข7m…Cˆ!ฎ}‡:\ึงร$ฤ]vย!ฤ๗Eฟ q:\ฤงรฐะ1็uฯ>8แโ@ˆำแž qฅ;งรpƒ9็”เ@ˆƒงB\๙7tธ”!N‡เNวœ฿"wM!„8.@ˆ+แv†8€mๆ๚้ฮท2Rq ฤ้pqB\๕ท1ฤ้pB q:ะแ๒…8@ˆƒb!ฎE‡:\บงรqP,ฤ๕่pC‡หโt8!Š…ธ&n่pษBœ ฤAฑืฅร .Wˆำแ„8(โฺtธกรฅ q:€ลB\Ÿ7tธL!N‡โ Xˆkิแ†—(ฤ้pB q:ะแ๒„8@ˆƒb!ฎU‡:\šงรqP,ฤ๕๊pC‡หโt8!Š…ธfn่pIBœ ฤAฑืญร .Iˆ๛อ7€ฅB\ป7tธ!N‡โ Vˆ๋ืแ†—"ฤ้pBิ q ;ะแ2„8@ˆƒZ!๎GวYC‡Kโt8!j…ธ–n่p๑Cœ ฤAญืณร .|ˆำแ„8จโšvธกรEq:€ตB\ื7tธเ!N‡โ Vˆkแ†;ฤ้pBิ q};ะแB‡8@ˆƒZ!ฎq‡:\ไงรqP+ฤu๎p๋C฿ฺๅฉรq ฤ้pB?๚ๅฉรq ฤ้pปB\ใท<ฤ้pBิ qอ;ะแข†8@ˆƒZ!ฎ{‡:\ะงรqP+ฤต๏pC‡‹โt8!j…8n่p!Cœ ฤAญงรญ q;สงรqP+ฤ้p+C\๛ท0ฤ้pBิ q:สงรญ q:€ตBœท2ฤ้p๋Bœ ฤAญงรญ q:บงรqP+ฤ้p+Cœท.ฤ้pB qฆ‹…!N‡[โt8!Š…ธ฿M๋Bœท.ฤ้pB q:ยงรญ q:€ลBœท0ฤ้p๋Bœ ฤAฑงร- q:บงรqP,ฤ้p Cœท.ฤ้pB q:ยงรญ q:€ลBœท0ฤ้p๋Bœ ฤAฑงร- q:บงรqP,ฤ้p Cœท.ฤ้pB q:ยงรญ q:€ลBœท0ฤ้p๋Bœ ฤAฑงร- q:บงรqP,ฤ้p Cœท.ฤ้pB q:ยงรญ q:€ลBœท0ฤ้p๋Bœ ฤAฑงร- q:บงรqP,ฤ้p Cœท.ฤ้pB q:ยงรญ q:€ลBœ๗s:2งะŠBƒงรฝG‡[ๆิแZQhโำ_M ฟ ร-s๊pญ(4qแ‡™แWtธeN …!N‡{ทฬฉรดขะ ฤ้p๏ัแ–9u8€V„8๎=:2งะŠBƒงรฝG‡[ๆิแZQhโtธ๗่pหœ:@+ Bœ๗n™S‡hEกAˆำแฃร-๓ีw*€„8๎=:2_ q7R5(4q:{tธeN …!N‡{ทฬฉรดขะ ฤ้p๏ัแ–9u8€V„8๎=:2งะŠBƒงรฝG‡[ๆิแZQhโtธ๗่pหœ:@+ Bœ๗n™S‡hEกAˆำแฃร-s๊pญ(4q:{tธeN …†๖!N‡{“ทฬฉรดขะะ=ฤ้p๏๚Uˆ3:o9u8€Vš‡8๎mฟ่p1:o9u8€Vš‡ธ฿อ๏าแ–9u8€Vz‡8๎}:2งะŠBC๋งร}ƒทฬฉรดขะะ9ฤ้p฿กร-s๊pญ(44q:ท่pหœ:@+ }Cœ๗=:2งะŠBCงร}“ทฬฉรดขะะ5ฤ้p฿ฅร-s๊pญ(44 q:ท้pหœ:@+ =Cœ๗}:2งะŠBCหงร}@‡[ๆิแZQh่โtธO่pหœ:@+ Cœ๗n™S‡hEกก_ˆำแ>ฃร-s๊pญ(4ด q:‡tธeN …†n!N‡๛”ทฬฉรดขะะ,ฤ้pำแ–9u8€Vz…8๎s:2งะŠBCซงร- ร-s๊pญ(4t q: :2งะŠBCฃงร-กร-s๊pญ(4๔ q?\๐K่pหœ:@+ mBœทˆทฬฉรดขะะ%ฤ้pซ่pหœ:@+ MBœทŒทฬฉรดขะะ#ฤ้p๋่pหœ:@+ทk}n™฿ O{p ศ฿z„*€(ึ6ฤOน„ยIENDฎB`‚xarray-2025.12.0/doc/_static/logos/Xarray_Logo_RGB_Final.svg000066400000000000000000000065221511464676000234400ustar00rootroot00000000000000 xarray-2025.12.0/doc/_static/numfocus_logo.png000066400000000000000000000606401511464676000211310ustar00rootroot00000000000000‰PNG  IHDRฒๆ)๙fHagIDATxฺํผUล๖วor้QQlฑ ฤภภ๎๖ูฯ|่{๊รNฐŸ-v'ถปป  P๒ฌ}ื9gฮff๏™๛œ฿๚|~Ÿณcf๖ฬwึฌYSSƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒมชฦฺ^~sM๓ฎŠGโฺ0 ƒม`0X๔เzฮ5Mงž_ำ๔Ÿs ชj๊*ิWhะ๒Bซjด‚ะ`กE„บ ตแkด^O\›๎ฐ…ม`0 ƒE ฎฌฝ…Vฺ^่xก+„zE่#ก‰BS„ฆi4U่;ก…^zD่jกQBปญ.ด Pภƒม`0 fฏๅเZ/ดฐะก !๔ฎะ/Bณ…Z"ึ\]โ๛„#4Rh1†่rฐิย`0 ƒU9ภฮ๏u]BhOก๋…>šดš๊oกฯ„ฦ (ด๔|Z- KำZ๖Y3w๗5s4ขkูg“ŠyŸฒ๗›—{Uk๙ๅฉ์B—ฝ๘ฮา*ถใn‹oQ–%;x฿๛บะฮBท}อั–Œi‡%-ด—P?'พ^Zณ๚ฦฦ;Xšํฉ’ Œ฿m7ฌVจ^ฃšŸถ_'70ห๏SรฯฦC๕๔ทซP†:ี„ฝW ๅW๗;ห๙๗™ปmPK฿™O{ช‰ซ-Qว&yT‰j้ฝ„.ฝQ†ืก…ฮz'ฆpธD ‰ะ๙Bk•yiล;2<7็ีsหmฑฬlผ =นMa’๋;ฅ๔>›= mฉ‹P…:Imฌ27vฮi…‰U„n๏าBปไ &ๆ”เ่Pกg…žV่กๆ„พWฃะB— ]๊าeB็ ž“ๅw๊)t&?ฟ๊F uจ†]ฌธ•็ัBm๓4‘ j๔}qป]F่EูSy“สcN| Y่Bฯ =]แzPh๑$@Vแ%่[O่ZกŸrฏ:MแะƒM…ฺW›‡Vใa'p่ร๓ถ:\่,ก+…nz@j‹O ห๘ห„Nฺ_hกฅ8ปDmาเม}BocตDทMˆ“ž$ZผSกa–๏ด\R๏ใฎํ„*ด‡ะษBc…๎โ6D>฿P่Eก'…๎บœืพพ(Tจ—ำ^ํ+O฿/รฤึB-ฝ#ดp^`BูK<‰trD ึN่!๛ฬ‘3 4ษใ~๊Q๙m/ิฌนฯBไษฃาKซc=สeก.1‚,อ?ญฐ2ัtก!qNš๘ื๕X~ญภ2Sฺ่ถ•GซW‚uyc…_|ภ?3`yฮเ‰ฮห<้9€ฺฌ3YH`ใ๗ ไอ๛?กฟ<2aศขฟ{^จ{†AvIกฯน0y'Šฟ!๎๗Q๔uผูs3^ตก์$_A่Q3ฟฯBo1 Ÿฬ›;๛๓D,?›;&ถšง<้?*/^Y d/๒ูQ‚์๗™A0–CัใพZ,ข๒NhŽวฝฎ'ฏw%‡H!ซ๐$AW/&ฒU ศา`ฐB\ƒ“+„ †Cฎโฌ•^ถดท ญอƒqล„(`ฃฟ็ู๛5-ฦrmf๐xT่(๖ึฦVพศพe๙œT=2 ฒKX น#ถพb6E!ณท๕Cž$นน๓ ๖ศmฌ!๓“R%ฝ-ดPผฒูŠู_„†:mo๏+d9ฌ –C Zฒ๙Yล ิ‡sด~[%ๅ*k’ะ9ผ),ืYEฝเmรiสาšœP›ว1สQ{ฯ$}ณA๖Gหwบ=พb^๒&ๆ ‘อๆฮ‰œ=e$‡dd๓;6ูนšyฏ,@ถโ@–tye+1V–เœร –šอ/ศ*6rmฮหฒอ) Bs\š—าณผ-ดk^7„น๊ตžใ&L9-šฌ_8๖vลฒศๆd] 3$ขธษŸ๘ฤUฒ˜ณQ“จ}Nาe๏ฎะ๐ดaึรธO,*ฅญ๏e[พู๐ +๕๔]Ÿšแฌฆบฦ ฃส’76 ศf:ƒฒฃฒ ฒ_ั‘ฟy๖สJุ- ฝgม d]ปv€D๑ชอ[ด“UN๋”้\ซ/-Aม@฿—œฎ,5˜•,ิVhดAhฯ,ye%o์ึtฯ>Eh-กu} ฟw‹กท,{ฏก‚Xj‹sE}อ !๚}m:Žgwล /ษะc›ฮlลฐหฏศ๕’|JซK๓ฉqถ๏r0@6r]+ภไ่Qฟ,ฐ่@ถเ••Uฏ,@ถโ@–<™BK๚xeg๑fชLxe%ol{ก{|ฺโ๕BuB๋dS…zก๓x็ถSB์ฺฉž๛ [QปpฎCืKก’jy2i๛์ ชgwลwฎภnrŽๅ แ•ๅ๖N๑ฑ7€ฟฃrฐฆเ•mภรยudฃูํy๕ภๆ๚geฉMUศ๖๔I่Ršž1€lE‚lืฉŸW๖)ฺT•ฏฌไ้๗:Uhํูิ@ ฐฬ‹ๅR๗Iช]ฦบF่๑บDhู"&ิฎฅ๖;PสZc๛์7 mAQ๗ณป`vร๓ šQ˜<๕`q{๏ ถ๔ื‚๗=3 [๒.ฒcฒ‘ƒฌm˜e๑ุ ›,ศ’๖€๒Œํž–g [‘ –๋”ผฒ_y-ฅถฺ%mฏฌ1ิ6๎4๐ฦถศฆ๎m ฯ้ฮๆ#Pอ-๕Cดษ๏ง)ืHo-Eื››@?"ต฿ลBๆ=&!tฝ๛ู%˜ฅอu—Y–sฮฆฑ€^Yn๏9Tมๆพ๏9iฤ2ด!GูฤษŽศFฒถ‡kฬpNภร&ฏุA๖$ศ๖zีใ๏Ÿกไ๏ixฦฒ ฒไ:฿ง^ฃอUize%oลผ๊;L๚ฎฒ้ycทตเ๕ฉsz!พ+a8ทmฺS๐JH,่lj#I๔#sอ๛PS}&ิ_๕์ฬ.j™๎Œผแ๕สr{_=ภข4ูiสR,ฃ4ภาณยฉวฒั‚์น–ืChmฤวฦฒc\ K:HhnึผฒูŠYา B฿๙”๗6iye%oyYo6๐ฦ6dS€N|ˆอn๏}ผ Š6e˜ N ›$๚~vj—wE๔์ิฆ7า=ป4™ุย,฿r๒n๐dq{฿*@า๚sณธฬe8H่กว…“๔(ง;ปลฅ๑ลv d/ตผ6ต๙ตV?ศ^Tˆัšพห๚…บ/yeส+ ญxฅอ/๘ิํCiye%oึ:>X๚ทแฎo ›ž7v†eHA/€โถฝถะoม ต้6 ‚,mิบ%ขgงC>6ื‚liBั`b@ฑ…G๑สrฤอตl๏Gdq Xฺผึi›ๅj๏๑ZพnีdSY๒ศ…G6!-xธคฅำ๒‰D™๑สd+dMผฒ”‹uำ4ผฒm\ํำ๎ ถใ๚ฆฒษ{c;๒i\ฆ๏Kเš~๐ศ›TŒฌ4;ศเˆ่ะ ๋ิG)ฤ`ฐะ'๕๑ชmฬชT๗วศcปKƒB[่ ,Wh @6ศฺf>๙หY@Œl2 [่ˆคๅำE„๕๑สvOา3ญLuy.Mผฒดษช]’mO๚.ึšไ๑lฟำRซMqYd“ะHZๆ|<ป˜ฅภcะ‰dฉ้“T[–ฺq๖สฮ‹d]๒ฃ,<ฅt4๋6หRŸdูึg mV-Kภูภ ๛หk7sๆd-Hdณ์•ศV&ศ*Vผฒด[z“$?ๅ ฝยภV€lยZ+4Qุk,7x 29}วd?็กฑB‡!6ษ<ฒฅธ97๘ๅฯI๖Bh-yJ{ ฝlQ/ไQ๏`๊)•@๖œžณกY€ฌฯ๕ƒฒq.@6•–Ÿศ+๛^Vbeฒ• ฒŠี๒ส^ๆSวใiƒLmOzฎน||ฝฑ2`dฬJภด Ÿ7o๚ฎ'šฦc‚์8ฮฤแฅฤCPภl฿3 d]^ู,’ส—ŽŒ5Xš•@6ศ๐ฺY€ฌฯ๕w {4•5โdYืษ/'๙œธ”˜W [ู ๋`W๚ู็ฐแIด=๖ฦึ๑ทโี๎nW…<dฬlc$ษ;ภ–,@ึiหิ~ฒt<ญฯ๋Žฉฅg–ฦ€h@6ธW๖๑๘ณIyeฒ•ฒ86,ใ฿ทWV๚–๗๑ฦก๒ฦdฬZฝ~ู bw„คกวฤdณpคrศ \d ๋๒สฦ™ L๊็}ก…L๊ d[ฎ๔ศ๑๗5ุ๔•0ศบ:ฒ“ณเ•ศVศJก-ซ๙xe้ะกqํ๘vEzถO›ปKท ›ะ@V๒๘ญc‘ณ๔'กmอK฿เำ฿Fฒฅ:๊'๔ฑaอฺฺคŽฒู˜AถS€ฃI฿ญๆ่e€l@•r?ฏl"ฑฒู๊Yฉ™lฎบ’.ŽI”ิK๛ŸK)มถิMๆฒ‰dคS,‘’ว7ฺ .ูเ ๋๒สฺภๆXกZฟzศdcูZกห€lกฏ้€ƒ„Aึๅ•ถW [ ๋jw~^YJ…ตz“(รถOzPจƒ๎ฒ dญ€ิษ"ฌ€<}๎(ศ†ูl’็7รบzOจŸW d พfY๚อษฮa€ูdAV๒Ž-ๅใ•Š+ ญ•ฺลส^ๅS฿ใข๖สZฌF7v+ฏ็ูฑา’๕กI6ฑ—ถqkู [ช+Še~ฦฐฎฆq^`ฯI@ ศ–ฺํโB_๔สR;>Vจษ้wฤไ1ณ €ฌห3uVš^Y€l๕€ฌซญรY ผžyH”ฉŒ ใร ุŽ^m ›ุ VHํิl๘~—‰Yศ†Yษ{n{pมฒูด@Vjทt๏‚lกญQzธ^ล `๐ฮฦฒ–^ูุN๛ศVศJํฎะ >u~!ฅศŠขํI™ ๚ }์็๕lMlฃณๅฏถ+ุ&Hขr€l [ฮต-ย ่p„&€,@65-ye7e๏jP˜ฅษ๖“B๋O$๏,) m< ๋๒P—–W [] ๋jwร}ผฒ฿qŠฌP \9”๑8ูฮ76 ›๘ ึ]่5รw๛ยู9 @6-ตหW ๋์sช3€,@6-•ผฒybีR“นญ.ษํถ!1‚ฌดƒ๏tฃุbeฒีฒ–^ูณยNข\งฺฝ๋S๗[› ›ภ ึ:ธ,o{—P› ฉpฒแAVช3า%†uF‰่ื๗ช3€,@6v-ตฑอ…Œf[8ๆv4ŸHX  d]ว‡^”†W [} ๋ดฝาRฟŸW–ย^– ++y€๔๙n&xšLุฒ1`%๏ถป‰Oz9@6"-ีž†ว~าr์ก^^t€,@6v-ต&‹P&S}หฉๆึ็อ5Uv%ศบ€bˆW6–ำพฒี ฒ`ถแำผผ๊ฬโ๖–mOšฌ- ๔šว=f ํl:Yศ&2€ีpjShู8่);ูh@V๒สฎฤวxšิe^€,@6nuญ& โิp-๋ก ฮฤญ๕4ฑ†ช๕าF ฒPW๖ฒคฝฒู๊Yiษ„ะ๏ืŒSfYวสJุƒ…ๆ๚xcปšย2@6‘ฌฮb'1y=–zย@6BmญปB๏ึ#BํฒูTAถไ•%me1 ฒ)ŒNป^hGก…CชษKศJ@ฑ'ฃื]๗IำฅW€,@ึย[JurงOŸd;‰’ฎ฿K่UŸ{g›๋dภh๓ลS†๏๕R˜๗ศFฒB๗Yไํศฆ ฒฮ.ฝฑณtฺืรฃf }ภ1ๅ›–ฅ๏ชdจd-ีล ™W [ฝ ๋๒˜n,๔‡วu)eV›I”tํ=…fG[1 ;&gƒK^=ำ๗ปUจ ›ตฯh‡7@ ›6ศบ`–&cโvิ’€่>ฏ )ดOไ+3๔ u`ถ+๋็•}TจST^Y€luƒฌ…Wถ™SgMขค๋๖z>JolฮA๖ygฐฃฮq ฯ๘ใิธยผล‘}o๘^g่ d3ฌป฿…†้ยBฒู$Aึณm„Žฑศ‹•~z\่`>uฌฒBโYษƒE^ูซ“๒สdซd]“(?ฏ,ฅฮ๊k2‰’ผฑปq|ท๎šOูฤฦVศพ"ิ_จgฬ๊ฦCส๏ทw่&๏u(@6# [Š5ž—NMฮซ฿JทQ Md]0Kฑ๚ป„8ย6l<-ๅZพPhMฮช“รโY)Vึ๏๘P๒สvŽย+ ศJเiโ•=ฺo%ycป=แq= 7ุ+ศค,ว ;]่]กทbm๒y€61ูึฤ# —๕„v šฑ -ศJ๕ท.๏ิ6i—ะMDฒู4@ึณคU„6L+‡h๓ู-B›ะๆศ\‡ฤ ฒฎD๕7%แ•ศd]“จ‘Bำ<ฎ†PฏI”ไ‘eตOจg YŽA6)}%4 ะ†‹’Gv๔ฮ3„๓ƒf,ศฦฒญํsˆะ/†ํๅp€,@6k ซ€Y:i๐xฮ’’V฿J6มล†\mœ ๋‚€u ผฒกceฒYื$ชฃะCืง{ค›DIื้ฤmิหปwะษ@ึ่ธุ@ถ.ป‹โืึ 3pdcูผ‘หคGdฒYYฬ’Vบฦ"๔)Mบ\hฉ…ฤ ฒ4 ์|กฝฒY€ฌbตต R*ญT“(ษณป…g7ฐ7 ›ศjx/๒๚ญอศ.&๔]ุอzY€lฺ [X)rBfJ@†O๋บ5e T่กน๑ฮฦ ฒฬ–oพ๙3ฮXY€,@6 Wvี$Šaธƒะqycฒ‰์Q ป@6s ป E๛ศd3 ฒe@K™JPKฐ† ]i1q‹Z3…nเ ู๗ฮ&ฒ๔gฌ,@ ๋แ•๎qŸ็„บ;“(1๑rMภ6‰ำ M dศๆdฐ8๊ อศ–=_yศAฝะฒ&๓ชะ฿)๔ปฏ1Tgf“Yืๆ›ุผฒY€l@ฏ,ฅิ๚ต๚๛้ปฌ_๘][ก๑qzcฒ๐ศd๊ฐ/ท€,@ถ"AVใกญแบ่˜k9mW’™พแิwู…ูค@ึ•จ>6ฏ,@ ๋ใ•๕ลง),mA#„~๗๘อkBฝ#ฺคd๗ณHKอศ.fq ลh€,@6 ;ิ–-yir_vฏะO ๕ฟtŸ2 ณIฌณฅฅฺ-|์ภ ฒYITg>>ึkตƒฑBทx\กC#LdK้ทv1ผๅ*อศฒธศd๓ฒ>^ฺถB+๐IaO'pZ}{a6Kภ’Yษ;FฉŒŽร+ ศ๚xewš้qฟวx้o‡ โ๑ทฏ -แAู@ึyฟึ„๚[๓a&ภฒ9๒ศfd[๋oUNdา^ศd+d  ถ3ห,4†™S?5฿ง&L˜{•beท1๐สvxิ'@ ซ๓สvๅcdฝ&Qิ6๋„ฎKย 5า—ก@ถ๕(ตอ4ƒ{ัก {เdฏl€ฌไQ฿˜ซ09†sg€,@ถAv>จ-ฺBvjws˜Tิ}๑หฮฑไY 1Hd] ๆ#๗สdฒxe๏fจœ”„76็ ;—แ๐ฯ5ฝ ‹…ู•…&‡]šศ& ฒ%๐ร๐dถ๔j&"Y€lฅ€ฌ—–Bึบ$†4^ใœ4aY 1Hd]@ฑฃPX{eฒูผฒฝ™”76็ ๛.Ÿี=”—œโะ:ผฌ6$ศ๖ใธ&๏u)@6s {‚Eึ‰UuKŸY€lฅฌ—ถNh9ก ผทNำ ›ฟ2bศJ@ัwŠGvฺ@ ‘Wถ%)olฮA๖๙โ 0cฎh๕ ฤฉqท…yฟ๎ƒ๒ƒtา@63 [ห โMใ๘๚้๊ ญd๕๐าะฎฦ'‡ฬพ่ไvฮ‚W6 uลฎœร3ฏ์\ณŽ [ฅ ๋šD=b›…ŽŽา›s}6ฤๆ`Pฆฅถ๛,<อ d3ฒํygถIQ๒๘ฎY€l5ƒlู{—ด@฿า^BŸ…Y +;<^YCฝ0๊ืย+k++]w?€,@ึgตh`ฒ๏ -ฅ7ถ"@6ใผ/[ไ’]1h d[3๔ท8 แv/o:@ [m [๔ะ–‡ฌ(4!$ฬพž ฏฌ!ศŽชบร•€b๗(ฝฒ|ฒYŸษN/†Co์qQ{cฒษtโยปฺkŽ“w6` dฅz[ฯ0c็ฉ^Y€lต‚ฌฦ;Kงๅ7์Uขt†ป‡ูO$ศL้ˆข๎p]@๑RT^Y~'€,@ึdตฏ…W6o,@6กผีณท๏j7yท๓ƒ.›d#ูtcฑนปื ญfUxg{ ย+{ณ7อ๐Cฝ%uล~>@๓ P{ˆศd-'Q—ฆ7 ›่ ึŸsาšผณNข๑^Y€lD :๙hไpำUผU€,@ถฺAึ๕}2บผdZ&ี๐‚ดAVŠ>^ู?…6ฅNsฎฯsdฒ1xec๓ฦdฤฺ =n๘n“8Žฬzู dฅฐ‚E…>5ฌณท„zzี@ ๏+„๏d),แภTร าYP์๏5๗Wึฯ#ศ๐สพ”–7 ›่ Vร1”ฆ๏w@6%-ี-<ำฐพฎง4CY€,@ึฐLJ1ณ”โ๎ฌ€^ู›…๊ซd% Xะ'4๒ส:^ฑฝ7ศdฃœDยจžแ#z฿ธผฑูฤAvKกฟ-โฟšlใฟฒ€l๋ฒ' ฎ—E้ศdฒสo4P่“ *]aE€ฌ (Ž๐y–{beฒูˆ'Qงฤ้ศ&ิY—–ั(์s‹๘ฏๅlใฟฒแ@Vช+ฺUกa]ั๑ร+๙ี@ ี~oคำ€์ฏ~ฑ้UฒP,$๔–Iฌฌ,ฒู€“จร฿มงBK˜ฤfdsใyhไำmL฿๑X์ู [‚อ]9ลI==EเๅWOY€,@VQ6ฅƒUxRhS>”ฎpทิ6|edฃ๔สdฒ!&Qoชผฑq†dS๑<์#4ฯโปtาู :แ “ุ๎ถh‹6‰gศdฒž฿]‡€%œ”ฺ†ฏ,ฌ‹๐.qฯXYW ศ+๛ycbฒฒ๙Yฉณ$๔ภliใ•ศYษ3ดงำ2]ฺ\หdฒศd}ฟ3€์8Šiฏzuล‰ผ[ฺ+ ศ†˜DษฑฒฑวฦdS๋ฌ)ผเ&‹๗คำošLฝฒ6 ›D๛Jhd%yะ%๕CคŽ& €,@ ๋๛m'4หฒŒ๎u6ฦdห€ขŸะ‡AผฒY€lศA๙ŸBŸฝฑ1†dS๕:์`‘ึ‰<~๋›ze Avl!l…ฺF%ŠอNข ช๚โนšg"ั3s_]ศ๚dKปง—šhX7"r˜้ฒ&@ ๕‡Mต,ฃgษ$@6ฏ,@ rี_h'j๓Iyหฒฉxz๓ k๚ฎท˜ze A๖Vกข๏ฐK1F;ๆ๐–ฒoAK‚ิM่ž“ํดฐ +ี ฅcQ/_:iƒ ฝๅY€,@ึทŒ๚ฬฒŒ๊ e›[ŠปภปฦญN๛ศd#˜Dีฦฉ ›บืก†7(˜พ๋oB™xe A๖/กŸ5š$๔ƒะำB#“ุpจ˜ะ‘ึzX่;~&๓dKuฒ’ะ๗๕r™s‚a2@ 5 ^ท,ฃ๗…บdี้้>@:฿i_Y€lเถท฿ศโrชณค+7@ถAถ3K }m๑พ:ƒ;y.ฝ1,ศš๊กี“šXIŽ„!ผแ1์๓๛‚ฌไm#tE}ะ๒็P›าูุA–จ;@6๗๕@6,P”–ถ }asฺ{ีฒู@6แฮบ|๛B‹๗ฅ˜ฺฝ๒XสŽdI—า*A฿”‰`LDฯ๎ฒ%o์fBXิวm6›๐\ {>@ึทœฺ =`‰นw…์๘*ซ๗กวฒ˜ดฬkไ•%˜พห๚ฮ ศา๒แฆY€,@6cvy๐Ÿ,;์^!\Ÿk  >•ิwภฯ(to +ีCo0bZฟ ๐๓Ž{€์ฉ–m&1#ช hฺ =nYNŸ -šIm0ญj‘ึญ +ฒู@&-q-)๔•Iฌฌคฌ€์lฺ8ศd3ุiทl๕B—Zพ๗ๅN / Dq}โุา(`๐.‚หAถ^่ฦˆžwกaชg— ถ–4˜g›อ๖XLษผeำ cฆv งื;ปอ๙†K^ำ'ดๅp›r๚Vh‰ฌ๕ RฝS๖‘?-฿้โ* -x Iฑฒด๑ๆ\ฏlFAv็ผไŠไwZŠ7v่้[กEฒูƒlษ;ทœล คi||ช2ฤ@๊n‹้๛ุ/…ูุ›๖๙'usท[ื๙๎Z‹Iต๕‚ž$ฤ˜ƒ„š-๎Iฝ_jวpšMศบ๑wlชvบ2”Sฬทœr’พ๏–yฃ[๘pŽ%ซdK“ข“ƒ„_dอฝ…ซ ไ“ม`CกฝใY ฐท็x0ฝ๊™ญ็ฃ3ฝส๎์(โ๕ฒูŒzeiYํ หr˜สภฌฤnยPhSฦ“xวyhOท๗F ฑy๒๏ฅ8Ynทฝ„ลด๐กzm-งA๊๊gกฒRN ํhน@z+ซ<ฤิ–hเCRำm‘ฺฤ%/ +y? ดฦ๚€ึM_;ศr๙ญ๊ณ!b mxศzๆ†๒BozผK3yปฃx€,@6ฃ^Yาพ–!-œd}+ภฌ/ฤฎเwZฺU๔z‡,%ฐ9+Q\im–อฅ6ปV๏6้Ÿ 5ภf<าY /ฒ:\เ=nI-๖3ถด\€#(Dg™ิ๊:/ ๋ภliย*>^ูIœ๖ฆ9!ํ+๔ฑํ›ๅฬาDa ŸŒ‡<<ยฒศdณ\”?๓ชƒw€YOˆ]ๆทุTื5ช2umฒอ\@> หๆา*B๘XJAตศึ0ผ^Bย ค๖ท ็ธต}ฒ–ฉ"ๆถ๔ฏeD.ฉ•Qž@V๒6]ใŽำ๔ศ6 =่s?J.^ŸUฏฌtŠฺ?}ใ๓(rศdฒ9่ะ)-ิ„ู‹Cq=Uย๙ŠXJาOeY‚ุ!๖#]šณผOƒ…&Z>ฯ<މNี(ฝCW„ึ TcˆhJ ปo€Œำœๅๆหษตสrf€2ขwุ ’U๑{“ƒ(R†พโฦI—ฅ ๛yY)Vv˜ะ/!’tG ฒคS|๎๗…ะโY๔สJุŽœ‘ภ๋=๎jใ@ ›I+†๔โP๊š8ก|MOƒJpBPรqฅAสqj™‡;Bฯ^ˆxภ์Kำ๋.•๏V6cตpฮูn^mSู8๎ี๖t*X็ดสI*ฃl๒"ฝ'ิ'tLv๙ชฤโN™d%์ขูNฒ<]ฏpโฮู`ะE™ —6ศJ`ํ—{•ส๗`๚ฌye%o์pƒไ็‘ๅฤศd3 aๅ;mโ๚= 7็ิโ@NžูŒืD@๒ฤถe๏ๅ/ส๏oŽแฌ‹„$H;*ภณ‘ฮu&*)@šิ>ษ๛Hภ็ฟะ>\ู<‚gŸ –xจTF8ฮ5H]แฤC‡่฿\}ษฆB๐&ปถiฏุธžm(ghฐ-ฃฯ…๚งB’7u`ถ+;Œ7RฅฒX/(๔žฯ=Ÿงไ่I'77|~ฺDwฅฯ๓ไฎ•W อ ฬึs โฬ\N‹ิฟโf]#ฅทบ”ด%ภ๒๙ลม>†2smn๙!ภ3าไf—ค๋ีUฦGˆ๑ma๎ฆ&๐!y5ƒ?<,ษr’สจV่ธ™ i๕ถ ใitีีๆาชฤ฿๊ะ1-ฯฌู๋ ฃ"]OบTฝหyYษ‹ุ†Ÿ-5uy4/68ฒqีI7๐ฦา้=“}ž:*3*2@ ›y0+ โํุ‹2/`gO็oT7[AY—–ดo๖i จุ๋ ฯ๕ยcGถaRๆ‚อ.๗6็t7)[ ๘— O\ะBซ&QN.ˆ+`6‡ย7+๐1ศzˆ•7ั&บ“๎\ฯึO่€eD@พu๊›แ๒ ฒา’>ๅŒ=M•ผšr[ฏ๛’ืv`B $ฯvก๛|žป™žŒ์„2€,@6๓€Vพaฉ ่DสŠ~๑แOฒวบ6ฎ๖ฃ)#า๊7 ๔๗Bcœ `… nิซ`ป2 โYษ;บฐศ–€Ÿ@๔ื๗žศ๙eปFดŠ๒)คู๚_ภp9ลฺภ €ๆ๚.vๅ์&HP๛YขฌD๛ฎg* q$ฏ(-ฃู||q๚›ึ๒ ฒ<๕สF ฒ’‡x7ก™>๗žษ๐XO0I๏ัฒ฿ศ4 ถ’“ สk Ao”!Y€lž<า@ะ-คgถฐ2tsๅUภร>XเวๅAบ‡—9k’๔๐ธv๒ใ j‹›ิ 6YŠw›/=Syw็œทO„œ(ฬ)|eXฦR๛oร1‘-!hงจ+_๐(#Ÿ๒!o๑’ผ๋๓ฯืฬืฑ4ลฬ“lœ:'…ดecฺ†สหฉŽำkํฯก%sB–ำsNeแโ<ƒฌห+{gš +=KWCฏ์|๚X}’a.ˆฅzฮเY)uุRQ{ฒูผมฌ4(tcr0 ๅแSŠž˜ ญ`d๏เ€ร-eแ พฟi”aแฦ๏Rxส—{5o[†= ๓yิPFํ&†Žu9๓ล‹7อน๕Pะƒ%$H[%ภ‘ตบ๘สW„Nsrตถzโ›๘หหh~ฏk=ƒR\ฦ ้]”๕บS๖–€&ตฃzŽอrข๛๗ ›pะูข ี๐฿Rจฯา{ฐท๕8ngใyข๐}nทฎr&Y๐ฦVศJžะBค ฒฎท>วlŽ๑]กอ€€‘Bข€Zภ’๚qv…i†ฯ๗h1eXฤ ศๆdห7€ัŽ๗Cฦ,สขM!ใ๘ฌฎJฯKŒGูาd9ผRš ตyู๓ณ1ยฒ~ใใ0งžOณ-หoขœ\“!๚ฦ๎ฎ ˆ=4ฎใข“ู‡„ฒผฌO`7!MUภ์ึBž@&ง์šฬ„z™๓}กฯ…&1ฤ7‡ธฯ ฑฑgSศd+fหw๔ำฎ๋๕xฉ2‰ๅiฺศ1E่KN:"ว/>เาC| ๅœๆ็ฮตูœP\VฮN๊Œงsyๆ๒f๐{ี;ฦvฤo9ฬ๎Q&ƒค๕ฆะะจ'VาdจWˆcšณขŸ9 C]fs^‚,acึaBสๅบ“WvT๏ใ‚ู yyฟ%ร๚Žณ$’฿ ญp๏lo]…ฦห™hวg.’งร\๕ุƒwŠฯศ‘‡q7g#brวมึp ญทrRFอ<ฑ[6Žv้š ตๅ `_ๆ๐๛}‡Wmj3ํ‚์yY)>ีฤ+;*‰๗qม์’Bwœข•†žVŒ‰Mเฤ1€,@ถ `ถw๓ฦ’ํ็I๓๋6tๅํ3W=RXฦœฑ!หeN)—–/ดฝDNG+/ง%ธญฯฬp9MๅสภRศรฟ9ฮถ&ษcsฒูŠ…ู๙ฑ&Že}$็หŒ~šลูvžoCZN-3จG๒เ]ห๑พYสJ๐ฒะ๎ผa/๑Mt.˜mฯ›‚สXHฦ,Ž฿-Hะขอ8ิgZฟ฿yดืc>& •ฒ’Wถงะ๓Yูยsน2 :M่ณ1ญAEะ.Z1Lช/€,@ kB”FkNg5ฝ‚v&์?ธๅ`}๊‘`dc๖8–r™ฟฤ›p๚ค]ๆŠถพ'็?โร‚xชŸgzื4สIQ6hoŽเh็จ6พษ›Gส๗[i ๋๒ส๎ๅฑŒ?*๗qyg @{ดะ3†ง…e#x…=ฐหSV‚คฝฐY€,€ึศ:q๊ข๑ผ้*ฏ๛{—v*œฮUI๋ใ]kวGศŽๅƒ9 yฬ&ržา9KEM– xz„€Oแฐ—s?>๎นMฆๆ’NOˆะML‰๐xูfŽฯฅ>>y%žๅ๛๛ๅtu†งzX่tฺษž'˜ผฒ๑! J๏D}ว,ผOj]‡4 -ย`ปงะ)BW +๔อœ†๋MNรEa๗ ]#t†ะ ฎ9฿l&เUั๖ึโ๗zPำ๖Nษำ$*dYPเ;=สโชหAถ'OH‘บIฅ‡yGz{€l$-o#tฅะ‡ƒฺYœฒ๋fถEŠ;™3 R‰ีay=ึrถŠU9Nt4{Ÿ็ค๛_ฒงp k*็wo๏rˆy\ฯไkฌ.ด@1QBววึržUŠW=œOR#ฯเ+๖ฟ๚ASN_pYพภ+g1เฏล^ล†<•“ฆีs;Z‰W;ล๕นฏ๑DฒPFSฅr"}ฯm้}Nทw็o>€๓ๅ.Rฬh‘ณถค5NYEKฬm4"จj$š›#˜ผฒตšwชฯโ๛hผตrตs„P8ก{๛ˆ[Ÿฌภk5ดฝ<–…tNxฃ&™~M‚Fgw4@6jๅ/œ ็8Žnvยˆ&ฑวกํ<ชษ๛ฐีp™ตg0ภ'ภญ,i%๐N๚๖ลrฮธื5"h+จ Oฆ F -ใQNฝ+ตœ<สจŽ๛ฮวช*ฃ•ูฝ8O~ฺ๚่JkKๅเด฿ศขGะKล˜S4nog฿ว๋๙๊+cเZMm/oeแœH๔˜SRŽญP_aƒุฒ9šO›z‹r {.งฺ™ฤภูหพููUy.ึผ.รช‚หๅ„2‚ม`0X๕y๘ู#Ež—๕y๙๙Dก๓85ิ คฯธ๔0uœเ_ผ“{#๖์๔)ฦVฒ๗ƒม`0 –ิบ5?ไึ3Œถใอ5ฒฺ๑ฟี+a ƒม`0X& K0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0Xฆฌํๅ7ืด=๏*{‰฿]mมฎoyฟศ๎Cื2พถ๋oญžห็ทYฏJmKฑ3ๆบNต‰ขŒาจ“ผŠRj}^ฬHข]๛:ฃฒื๔'q๕Wžื ;6๖ฉ—y•๗๓ูุsฎจi:๕šฆœKZHhCกร…ฦ]#tฃะUBg  4Lจ—๓๗โw๔{฿F,*W}ƒะšB› mๆ#๚›…Vๅgj4น฿งžgrี}Wชฅk)ฎฝ๋o‡8+žษเ๛m*=็ ๗}"‹๒บํ ด’ะBง ๅบฝV่\กฃ…6Zะฆn=ฺRwกกB๛sนยuฟฃ„6๊Kehู–j…–7ฌc*๓u„–๊มํร๊๘ž  ุฎHฝใช๋L๔%ญeDuฒ…aylภ฿ฯฆ฿ทโ~}ึลๆึ;r[ญแพQˆ๚u_5ก:กถBร#ผ.}Sm mI๚Vฐ({Y๔›5่Y5}^/N7๗y.๚›ต… uๅwท๎G"หแg;^่"ก„ฎบ@่Ÿ<ฮ๕.{F˜*‹๚“~B[ ภืฟž๛บK„NฺQh)ช7ำ๛Icๆ0Wูoฮxฎ?แ฿.b ค๖ฺคiธ์6g s‹ษฃŽฅ฿ฎ๏•ใญซ!tŽะu\ๆ4ฎ.ด_ปฝe™ทw=ฃmู ฎไ~>{ฯฅ7สb †๗…j๑ะBฏqังุHฤ๕|Hกg๘s 4KhŠะGBท ํ ิษ๋~RC|ะโ>ฒ่77yt๊ปอ“๖M็ƒ2๛ˆทๅฒ-พ™€26ฅพตnฉ็o\w/*Ÿ๏ดึ.ด9ฏ๛qูำx๗บซ^[xํฒ1๔ํณŸๆ•Œบ8`ึี c8ต6ว;wฟ~ t/*๏}ฺmหไmA?ื‰๋}๑ ๛ŸํแCฬ€(ีŒ๚…พUŒท๚w)ฏ฿อุัb๛>ฒ—ถA™ศ~ข์ฮศ&5่”ผuด„๐Oลฬ3่X{อศ @vซู ก<ลหF5š-,ศŽทYา‹B= >D@V๚๐;๒rš_z•๙3~g~7า.‹0๗๛ฉ2บN1"-่7กC แy ƒี /m5า<ก1™†YZ-XZูฅ Vl๔ธฒŸExwœ0•hA๖น ,๕~ื'๐ูt…'@ปซa‡สG>ŽฏgŸณ๔ํี”ฦอน๏z/š0miะฯฅฒŸRปตูŸœ๒๗๏CํAถTๆรBฒส,g๓+๓ฐ {6@6Yo,-ใ=ซ๙ฉณผ˜ใO๖ใ๘ษซุๅฎ๋š|ˆd?็ๅุปIqFง๑2ฬg๐9฿Mื|@v2วiR ำ…]ฬTญศฮโ%Nฟ$-u์๛h–ปจพKhว<ฦe๒ŽฆฌO๖|ทึކาห4mใdๅ:>Œ๋๏ui9FึหE๏ˆขS๔ู?น-สu<–ร๗š็ฃฅฑ๕ &b:}Š฿็Bัฟ/]ฅ ;MŠM,”์…Ÿฃ๑ฮ6U ;›ทW\ฤหฯ H ป)M๛กeาK5ศ]t:QsK๙ฆ}]๗šคcdฝA๖๗๐๋๓h2Wo ฒทsณ?วœžฮหฤiพ๋๏yDM”฿{yทฆo~žรๆไ:9Wผs,ซ๒๙คq“`์g๗E^๖>˜๏๕1Q•฿ ฮ์ฯE ฒ)พEีทAcpg mแ฿ึ๚ŒV +•9ล\?ๆแ=พ„bึฦswํืฮ^ ๏2ื์G>ๅV(ปMฒษ 8ะ™ฃาEŠณ็ณ//4ฆZ03A๑›฿๐n :}พ8.ซะ9ไŽxžขS\ฮ}?}ห‰ืข฿Œนย~วฆd[ธLบ๘ผ2 –ดฉ้4ฎ‹›สต(‡nจ:ŠE•กฅŽfUB๗o_โ8ฒ:ล๚๐@>O1‘:\; xƒ์Dว3<[j'ด ทฅูŠ฿๘ด๓้„u {@ะ6U% ๛7ดๅ~ๆ $žj0นP์Œb_dR'^S\า ™๔ค4Y฿ซt&พ†}_UศZpž'ศพ๋๔แ๚<ศก้ณฉNฯะ|+7ฉ<ฟดปu๔\ิ ;ูSรตศ~c‰v [*๓4a]๗๓5eภqฑ฿)~s†O™๋@๖๔๓ูpฺIขd}ภYœ๏ ถ^ƒฝฟฝภง่@๖g๖7ๆŠ"นv'๖ีฤ๐้พŸศ๖,'`นํ๊ฑIj;็2)•ภrSล@าฬ3ําN๚่สหz†]ทWc;PŽ๎Tž฿>๗[Pฺฬเ๖rv ฒKxดฅฮฯอง]{wย:=(h›ช"TV'๓ทƒ฿yๆปู ขช:[SีwQ?ำ>ศฝ๘บก๛*ํฒฯำ์Q๓ีi9 ฿คi Kฦฒฃ๗บ‡'K5ESึๆึPŒcSœIน~EศฆŠ1.Ÿ0”ท๏&~•ำจ.aฒ=่@ถ‰๚10ศ6๐ๆ6ีJ๎R>ใหhอŠ_ื {#๚๙์ 8K(T /ป(w˜J0KๅฝŠ฿พฆxŒ@ึ Iฅ๛ี๒X๗๏nvF ฐ๕Yา}บซฤ@ึ,*zฑ๕uAใฉ8d;2xช`ดƒฯผ:!AV฿–ผ6mใ3ำƒ,–’AVUฎฅzY”ใ๒ๆkฏA6ข:แ๛xƒl€{EีWlธ>Oฒช:-ล4ŽT|/ไน6†>Oตเจ"Xสซvฅ6ืศcˆ{ฒG_ท†&6๖ภน&า‡+~๗†ฎžcูpํม d?wุB?ูพjไพื5ลฝ ๚๑e„ขํj๛_E?Ÿนg#E'3ฯ‰๕ุน)uRงh–ญ— d]๗;N3ณ๊œ!ฦw&v7)mาxฉC์๎๋Zร;Pฟฝีc\?อ‡‘glmฉณูBโBหn;Eฒฎ๛ ิ<๏uช8A€l| +ตป๖Š8๚žึศๆ dKซB4฿ูq1๔yท+๎s …=๘๔u*ฺฯ% ซjo;+`^๑ญ๗อ…ิ ฌ mแUdืไะE๗}iภ*K1ธสฟฃ”ld+cภู_๑Ad0ุ)\่=๙}” [บ฿ŠH;ihฒฒคŠ1s๓ว๎&ฒ]๒Uปจ› าœŒีถQำนSจษTลoŽ๗อ-ฺุูฌฉู-~ฌว•5ศJ๗๋สTqฮํฒ ƒl้ฟMคd๓ฒาwF‡ ผwz"~ฦ๑Š๛œคอ7^๊๋ึ•Vgๆ2 Qๅ5•rณชฒ_ฌl0nฎซˆ“%๏๎Jฒ๏9^T๕dทSไnve>H'K๕ผ3@ถ2œc4;<ื๓Rฐ‡bg* (วฒ{)๎Gป}๛e dงช’C' ฒ=5K1๗zu~าtž๒t ปฑf ;ฬ0(€๔ซ2œAฟD dK;œŸำxฌ;dSู:ลFC€l^Aถ๕w4๑คq€์ๅš>ซณf[๛บv)gซYžใ่uูK4![K0\ฒ”eeX…์ผโf] 4C€์พŠoœ๗Ve>@‘„~ป+@ถ2œำ‚€ฌิ์กIฑฒcL {ฐ&ื]ŸŒl Ÿฐำจ๘ˆ“Yฺ5e ีท'<@vค๐3<บWฒx (a@ถ&˜ผด]ฒฒ‘๑๎ฺกคSdoีZูzdๆŒ'๓MPc่๓Ug&gฤP๏๙(•[‡โnwyำzวีถํป Aถฐ_ฆท์ˆเ4qA@๖( ,›€ฌช d+ddKGCฦฒcŠฑLง*~๗บดพGR ป„ๆฤS=Cณqหdอฺ’dฏ๐ศ็[ฝ [šั&ˆ!œiยFCtŠ$ ฒRฒj๓)mY ›/•๊tEM.ฯCchw+kเ๊N3Y_„ิX2-ศ๒_=บ*ศฎซ่?จ\ืฉร•ึp6{RkŠ™-ฦ\dO ฒชีษฒ•ฒggdฅิฎž”Bๅอคฦ”ำo5sš™ฏ๊\y๓ฒsูยขN๓_ ำใูqนูำฒ๐ณP&s8”eฒฅฆฒทป.Q๕Nฟuดข?นร+็hb้ท*dcNฟฅ่ณ žดfEšยbhwmฑ้ทK๑j]ก_ฝ•wฎ€๔[O)พg‹นKc`า cžo“ส&Jาodร‚l'MฎฟBG:U‡ษ#๋›ิXณ๊Zpข"ฎ๔[>+w ๋ฉŽซv=D:ีHฅฺ(—๑ฅA๑ส {m‚ Kหป{sNว ๙ฐŽ]๘pŽ๛ฉufqโ6จม๗๑ฌำwฯ9ศพว'่…้๓t {'ซoฯคzq8มฑŠœภrŸXำ!0คอ4ง9ษcอše๑ฐvš` kูฏฦฒฟT {7‡:ฝค๘>:ฅฒมห\ฒ7q๙ิ);Xๅƒ์ฯœ˜๚:ึmผ๑ใkอบ39—.Wnp˜สห9ื)t๘M@๖xnฤ๎ไ3†ส€lภ๛U;ศพฤ`จjWืsV‡8@vพu–บžณ€ิ&ฒอœโhkฆbษนพ๏MุูNŽฏณู9œ“VU'ใgR% ๛+๗งบvpถม้i:€w?ฬ€๘ฯ& œำ>™Lตฝ๒OFzxf ูnvๅ2ซ๑ศ†๘žขูฟ:u฿eX$ศหจƒ\\0ฝฐฒ’4ศ†(sศ~แังภkฒี ฒ6šษ1/ํc€ู…8f–G;ธฑุ?๛ฤอV ศR:ฐ‚l—๛‰ŠŒTศญnฅ@IbPะD๖ผษแr๓์p|‡Yฑ~ษหKฒพแeฒ๖!@ถp แtUrh€lล‚์/š6๕wˆ๛U9ศN็ๅํฑ\ง7๐ท๖†ๆ๚B,ใržG๋Aถ™หึีษ'6้นrฒ~}“๎\ ๛#วเพฯ๙ๆ‘žA7XEณํx"9ี@ž`ภ2[๒ฒs๙yu฿ฦ;^‡;๘€l[.๓ล9ฌฎ|ฃTk]<ว ;อฃ&rญbฅN{m)วๅXๅ๔eม๙นtƒร‡ผ)k1'Vซ\y_dฉใyT‘๏ถ‡ะ6ูŠู“8/k?Mป๊ศ.รงFZjSแ]›`๚ญมฬตผAจ-รlg*~๗จ๓hผx Kuด๗4j_% ๛ฑ“ำZ฿็๕Uวjฒ…”Vxณืๆ์ัs#โ )P>wy*ฐZv"|๊ณxsqO{ห;ศะฏฏ๙6๚s;idIg*ฒ%ฌฦ๗ศ+ศใัŸ๔็#™ฑูซŠA๖y็tฟิKRƒC๗าoเ๒tํโ˜็๐€ป5@ถ"A๖ภ”าo]ยํ้/Kอf๏Aขyd็+ฃRสฆv๗>Wแ=าzณ}ฒฌ๔[ล๔[=cJฟuค+ฅZ/ t™ึ๏๒ฮะ?ํณw2”+กปB@vฒ3>DŸ~ห ฒห+<๔c~ ฟ {าodญD๙nIˆะ ฒๅzw~'๙ohณร ฯาoแ@„  ;.D<๊ีiˆP๖›sŠ'๗๕Vค๎๑Eˆ` ฒฑˆ ภxฑโ๏า+&้‘-{‡‹ฏ—aถ/งQšซฃSŠูจ7็=V\"ธAถŽ3 ธ๛ๆํ‡ๅ%Dศ;ข6ญมมdบ* ษ] สโrg#๋fN1†“ฝฒsDmž๒ศ๊จ-ฯzขฆ>YQต็^ๅฎวญ“๕฿œญ1o๔*†(V๑\ก]ธUฉโรt‡ฟ„9aI’ฎLๅdฏ่จuƒl ็๋u} Om'{ํฯ(ํš8ข uƒlกC/ล/.ฤ0ธs๘อหศŽV๖iีocู+ฒž Kษ฿O:ีRง๓ปิฆ ฒฎๅ฿ๅK1–๋d3 ฒ๒ฉŠญ๕E4'z+ฎ8Yอ‘วสอŒา @'“๑”a7€์z<ธว„u*dหW&08บใืg%ฒ kbฃwศVศž dฯปJๅa:ึภ3ศชRœุ€์išฝ๖ ๋3p๙€์ฅž‘8@๖\€ฌ฿’™"Œ็ 5ะทฦJŠ’๚ญฒ9ูาs ้?:Fํ•u๕ท์@ฦ‡j,9_฿A฿N xไฐีฉ‘‹ฦฒรybๆNw5ฌR@Vฑ29‚ำoy~ {BH@๑]ฒ•ฒวูึA ซY—‡i€ฦ;7ศ๖แ๖ Zท็iv๕Zƒฌ฿ภๅฒgฦฒํ9fY•GธsึA6ใJ˜~ˆโ˜gZฆ ›-ิ‰{ู~RGสทฮR1ž3ำ^๔บ้Rท•<ณป๓ๆว๙6 j๚+uY9ฒd็5iฯd ฒ9e@ถBœรวDฮr>`/šิ@๗QžMซd]ๅ่@ถ็vt฿๋ฯิ*ฅบซ๘ํใ ;\3๋>ฺoเโrZ]“O๔dMa@ถ ทUบงvูิ<ฒ+2๐ภ#›G-๕ƒ5+B‡ลฒด„ถPœSzึœOืtB๓m.yœlk๛Q-?ล_ีya[วฒžRไู5๏ถนFiาฝฏ๖>ฅ~goM๔ฉDu}P8ฮLAvi~๕˜ไ]ๆk+ผเS|ผเู 8=3QาƒN็๊‚W๚’>ผร[๕qตษ<ศฦGึ'ตะjŠ%ำ8A–t€b€Ÿษฉตๆ;ŠQ๚๐ๆ83๗‘Ÿ๛x€l=งsRใ>@u"›tฟF>‚ุ๗|:xouีฑ+1๚hอ๒เ>ง๚d"lnAึ]'ๅ}J;๖ุฉ6 u@ู WYJิJŠ>oฎฎ‰ ~Vๅฃขึ(Ÿn9$ั๊ะ†šพnqEิN้T๏qŸผชฺทะ&q)ฌG{่ฅษmฒtjณšฤ|ส@E@aw Y๔๓™pHgiฮฝ๘๑ำ)cสาšิr:ŒนŠฐ„ฝ|rพ_/๎ ำt“x๙ฟ&ศ~ฯ๐๎Lฝ8&v(w์_jžqkฯx^o%๘๏๊ำฆz:๑ผ4ๅฒ?๐†พn\=ธ- เ๔<—iB=แณ฿mA๖/'ึฟt?z˜ูœs๑w๗)‹nนSํ@ถิ็[ณJำ/า8ูาถโ$Uˆมแว๚„ๅ๙XไiŠฟ?ืฃ'm(eF๕{œHํk!GxU๑๗฿๒Š$AvŠ๋[ิฉปวž;-9;ีlB6ูAš} ?1ไสฌB+ฦ๋hBไฆsํš {ป;๔t"AH`ภ)5,wต'๔DฏrrษŠXฦ์์[› มฤkขxฉQ ศ–สบŸ์?ศ–๏k<ม๒ ๗*OBพีดƒษNง`ถp+ ฌ:ะw๘~oฐ—Nu?j_g—้๔“"ศฮb/ิซR}~แ‘ห๎w*๏จูฏฅ๛้DพE•‚์,žDสe๔ทน้šz!Pุฑ่mัทศฮcฏšWผฮ)ุ†T8ศš๖yิ7- ศ–๚ผu฿lง^#Œ“•๎ืฦc?ย\๖ˆพฦๅ๑ณf,{ท่ฑิฏ>5i2ป๏kn_ฏr;M๓ทง๋Žรd็(พEีท๑…™Eฒ.ฏ์(+-ฯ|ค&ฮถ™oVyฯc"ํเSๆ:โ๓ฺึ(€lRƒN)Žค >๓u9ƒ7€@ฺ k*๚ปEฒฎxKล€=ศ–kbบL4—v๋ Sฒีฑwrzภ๛5ณ๗ทปnCšศฺ่/๘บ๚:ไฒฆฺงJAึV๋xจ*ๆะdm๚ฎu*dME“ฝลขYฉฯ#ฏ+š๛ข=นิว๖b0 Zz:dJ๗YHใ๑3ี=ผส้็‰dMEซb"ูา1ˆหูd]ฌาA่"อัยฆใ๚2eฎYS]Mrเ)}”4Ÿไ“&J๐RJฏวฒช“T^Šd ั฿๐ูTว/z‚l้#๎ฬ›Wไ฿žGฃwผๅฉ[”มGeu๋ใ=qmฺbภ๘žOํn•uš f&๚ำม์ฤž๒รƒถ๒ุykข}ซdฏ Q>ฟrฎใEO•Ÿฒgศ‰Œ)ศถี๔]Eฒiย zY‚์ !สโKX๎ฤw„Tฏ‰Aํำ€B˜ำ=6ุถ(6ม>Rg๒tศ”฿ง๏œูฒ_ฦ \cเ้ฬ^b๗u๎๔ฬ ๚SCด‡ษ> ๋บ฿ d]ผ1F‘ฆั๐ท9ฎ๘KK‡ฬˆ5(๓šKS] Mrเ)?ฎฏ–wpฯะ๓)วKN“4™—J๎ๅ˜ฃ~ชก คwlO1ะ=ม‚ ƒ‡๗dด๋>ฆzŠw—vิ|ฤ๋1"รำะ 0T๘“ช|…1มlWŽผ†AGWNๅ:—p˜ึญๆ~}yฯ<แ™คธ฿็\'su]1รGฐ<ืC-ธ`Rวธฃฝœ฿mm jT<๎น&'^Ÿ ]‘6ฎฅ:yฺฐ<&0@#ด๗6}ส>ฌรถN mb9Cmิ๔]ฃƒฦ>{๔UOqzร. [X y:@ป|’รฉะ๔y+sฟ0ม๕Œ€์pO&HๅN›ฒ–c%Jฯ๊Lห‰ๆP่~แฐซ[9,ฎซqป›>ซp8ิณผ๚๕ปtŸ?๙/r๛Yง่๐้wค‰ฮXE๛8ษkำvwฎ ํaผGจI_ร า๓œ๊ึฎ6ฑ4{ค'๐oOิmพาฐJ ว}หวง ึำฅr‘วน+8&ถฃIJ'cฐ์่๛; ›n˜AAํ80{๏ถ,h9Nฯิฦ\ธY Qฅd‘๎Sฏธฉ|ŽFuฝ๏2™โLpใ฿Fุนžฟ'w&ซHu;„๋ผฉุ, ึใ~ ์}XVqฟ~Q[/`70–ฺ-ํ.5Œำ“เน1„๊ชd๋|kuA๊ลใ{ดQญ!ศ๊๚ฎ๚ }—O;6๎#์๓tœjmณkFC’฿‚b<๋ยKฺ+Kฯ๒ฮๆd9ฃŠแXๆqŸŽœžk%้>ซ๐ฝป„่็ฌ]฿†_{hฐ\c`CoHQๆmฌฒ „ะsพ>฿ฬaกkณฆชศฆ5€๘ฅ”pหๆฃงU)+Œ๎c*w๊ฏk<ปถŒHีw&yฟะuเฝ"kW•œ~+แzIชNโ๊ปขบnj}žฯ๓๙๖1~ ึOภพ.๕~NS†q}aฺƒoyถ‡L-Hฟƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`0 ƒม`ฐ ฑ?vฃMื•๙IENDฎB`‚xarray-2025.12.0/doc/_static/opendap-prism-tmax.png000066400000000000000000002231011511464676000217700ustar00rootroot00000000000000‰PNG  IHDR &${๏sฮ๛ฮyž๗ฝ็รยยยยยยยยซฝธaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaGภ.พ๘โyฐ๗ผ็=‡๑…๒็Ÿ~Ÿeวฑฑฑฯ๓ฏ๋็ฯŸ๎wฟ๛ก‡:ะKrห-#\vูe๑‡……`………ฝิถeห–ฎlซVญ๚๐‡?|ๆ™gvีถ~๚ร๘B]3ํ๋_บ่Ž;๎ะSzก๒ฉ_๒—'žxโฟ๘E=ตw๏wฝ๋]งœrส๘ร‡~๘฿๘ฦซ_๊%K–์๗%๚ฏ๚mo{[yจVซoqXXXVXXุหi_|๑›฿ๆ—เ…žyๆ™ำN;mฟ/ํท๏๘ฯ=๗\ก•uื]โฐoฟ ดื›๔ฆ็ž{n๖พ๚ง๚™ฯ|&วฐฐฐฌฐฐฐฃ ฐO\พ|๙‹zกO|โguึฤฤฤ์งnผ๑ฦO<ฑปป›‡฿๎wuษษI7๘ีฏ~ฅ-kืฎm์(ไ:๕ิS๔ฃล๛€v4–ใ‰ ?๔Wy๒ษ'EHทz๋์ง„\งŸ~๚%—\โ-7t“๗๔๔xห๗ฟ}m™%ผyณถ๒“Ÿ|๋[฿zาI'iน฿— ภ {eึaฑ}์cฟ๛ปฟ๛์ณฯฮ~๊๚๋ฏ?๑ฤ…JฒcวŽำN;ํ_หู=66&ฎ:ใŒ3Rทv[c฿;๏ผSฯ;๏ผฅK—.[ถ์ข‹.าร[nน%ึฐฐฐฌฐฐฐW4`‰Š๖๎ฯfททoŸŸีบท ก~๒“Ÿ์๗๘gŸ}๖_ล_46vuuฝใ๏ ๙๖ทฟ็?นV๎บ๋ฎF3กุ<ฐkื.oนเ‚ Dc๑ถ†……`………ฝข๋ะsฐJๅ…R๎แ๚๋ฏีซ^%š}๐ตkืช๑โล‹๗๛าถm๋๋๋ำสาฅKี์ฑว{มห๙ั~ค–รรร๑ฮ†……`………ฝrซฟฟฉู์t๕7๚ูM›6y๛\๐พ๗ฝoฟ๎wฟ;ปw—GFFnฝ๕ึ;wzหwพ๓N8allฌฑ๛ใ?Hบ๚๗พ'˜๏laaaaXaaa/`ฝๅ-o9rว๎น็N;ํด+ฏผrฟฯ~ไ#๙ณ?๛ณฦฦ;vงœJ5>>.|๏{฿;{๗ซฎบjผy†นgŸ}๖sฯ=๓ใm ภ {™๋ˆ&น๗๗๗7DญJำK๓ŸŸฝำŸ๔๋_๚_โK—.“?๙“SN9e๕๊ี<500๐ไ“O๒งื๐๐๐gœ๑ึทพUวฟ๗{?๔ก|๒ษ]]]๑ถ†……`………ฝœvษ%—ัฐ„;'œpยƒ>ธ฿g_๛ฺื~๋[฿šฝ}rr๒K_๚’เ้ท๛ท?๘ม๚ืฟ๖S฿{›ๅฺ๛๚๚>๑ฟแ oะกฮ?'žx"ำฐฐฐฌฃะ›ถ}โŸx>‹ฝ๏}๏›?฿๖eห–ลง!,,,,,,,๋EุีW_oอฟฎm||น็ž{็;฿๙oํฟฐaร5ื\#ฬzๆ™gโ€uจvัE]~๙ๅๅ–‡~XP555ลร๗ฟW]uU| ยยยยยยยฐีฮ=๗ฦ๑๏~๗ปๅ &ัี>๐๘@„………………`’=๗sฏ}ํk?๕ฉO}๖ูฟ๗{ฟ๗อo~sฯž=_๚า—>๙ษOบอO๚ำsฮ9'>aaaaaaaX‡dL>ซฟ๚ซตkื.YฒไMozำ—ฟๅฯ~๖ณ_|ฑ์g?๛฿๘@„………………`ช{}๑โลฏyอkพ๐…/4มz็;฿ˆฐฐฐฐฐฐฐฌ฿ฤz{{็อ›wีUW•9XW^yๅ?๘มƒ๏888๖2๚๐pš๙จๅห{qํq๙q๙qํวภๅัQๆ#๏ฬัƒNฐŽ{เN?tOๅ/๙บืฝ๎‘G9๕ิSwํฺลฦ๗พ๗ฝ/8‹๐ๅํkฺํTNNหใฐŸ=žฏ=.?>๙๑ึ`…`ฝBmbbโMozำง>๕ฉ7.]บ๔ฌณฮ๚ม~๐์ณฯžsฮ9Ÿไ'ืญ[wอ5ืถฐb˜‰หหkภšฃ๙๒ๆ่A'XGSX๐๏)งœrๆ™g~็;฿ac__฿๙็Ÿ๒ษ'ฟใ๏x๘แ‡_๐ ัีฦตวๅวๅวต``…`f‹ฎ6ฎ=.?.?ฎ=๋ํใ้?œฃว€€€]m\{\~\~\{VVXVV๔ณq๙q๙qํXGา๒?๛‡s๔pฐฐขซkหหkภša็hŽnVVtตqํว๘ๅ๎Uฦปื€€€€l\~Vผ๕q๙/'`}ๆฟ๘Gs๔|‰mถ}ใ?๔ำฯ:๋ฌฏ|ๅ+ปw๏๖Scccgžyๆย… cิภ ภŠฎ6ฎ•{๙gํ21ึหˆY๑ษO~Vรž{๎น?๙“?๙๐‡?ผ~๚ว์ณฯฺืพๆg?๗นฯอ›7๏ึ[oQ;++บฺธ๖ฌx๗ใฺภ๚ซ๒ฯั~|q•jวŽ<\ดhัYgล:ผuฦg```EWืJ–๏~\{ึหXcccห–-๓รฟ๛ปฟ›?พVv๏ถทฝํก‡z๓›฿€€€]m\{Vผ๛qํว`]๚_ใ9๚กฟึณฯ>{y็๙Ÿนึฏผ๒ส‹.บH+XXว `ํฐ|๏ึ5{๚ื๎i๕์่=ุทซฝmชำ‘ห&&;[vŒŽN<ฝmt`็ฤฦก1ญศ{ถ'ืสšัี#‡ฦu๋วิFwŒMn™ะz{\ปใ›†วุ"๏>ชCูi<2>ฉปZ#ห6ฏบSฎฝฺใ“}รiwฮ–ืVฎuV t&ฺQgขmัvํฎ‡ญฮณqี:Žž๕3#ฺK-ี ืmวต๎ํ]ญชๅc};๎[?๔‹งžY๘๋gX.Y7จkฟงwP๋๒Ÿ<ฑัใ}๒๏<ผ้[ฌฟ๊กrญ๏ัMZ๙๚าฏป๎หw๗h)ืCตมต~ลฒ ธž๚โ]Oห?๗ซตŸฝc–ฅk‹โEซๅŸๅฏๅŸEทW>zห“ด๒ฝ7ฎ๘ณŸ<ง?^ฎ%+๒}c๒wเั?๚#๏๘ร๒ท]+ยF๛ูปฌ๔๑ํ๛_๗ๅ%๒ณพq฿Ÿ฿๔ค./nYๅhฎ6o๚ึา3ฟ~ฏŒฏ#Wc\฿|ๅ8อด‘–r=ิFœ6>fใdส—ฃe@F\{ึ ฺ?™ฃ๚k}๕ซ_}ํk_›ํ oxqรฌฌฌฌฌฌ€Œธ๖ฌ฿ฐพ๕ฏฟ๊Uฏบ๋ฎปด~y็tำMl`ล,ยฌใฐึ?บ๏้e`ึ๎ํ›’ทฆ&'pภ$ˆัŠุ^้อK9Pฒ.๗ฝ\ิ T6ฐดาjCBb)HK ิ˜ร h†F'ฤL‚5อ๔ZฺKŒลพ=™ฎt4ำ•^NET๒ฎhฆAGำF๙Š-m=”kนvื™XF+ฏฐQษปjภ๊ฮ๐wฯบมฟ[= YwK`ท~Pผล๖›W๕หร“7ฎ๚ว6 ›DKf,-'K๋ฆ+œ–ฺ.ุ‚ขŒS ฦ๒vนั๊ ป>๖๓Uฌ xใ–v4๎จM›์๐d6*[าภm์ฆ(Žoภb%+ฎ=๋ํฏ?™ฃสซ|้K_]~๛ํZ๏๏๏Ÿ7oฺN8แ„“O>๙ย /Œ;+++++ #ฎ=๋P๋ชซฎ:้ค“/^ฬร}๛๖mฉญฏฏ๏o|ใ~๐ƒํทวภ€uฬ–ธJหg{ูทๆฤXW์่ณmริฤ1ะ‡<`้Peแœiแ%Bฉ9Iวi0–9ฌD15fP่๓ ภŠkภ:D๛๒O็่?๚๕๋O<๑ฤ+ฏผrhhhฐถฒAไ``````dฤตk€๕•ๆŸฮั~kฎนfL;แ„ฐฐŽภฺปjq๒๎ป…V{Ÿบ7ึ๚G๗n^Eถ{งำI\5>ฒ{ว€่J๔ฃ-ปฺ„8P ่#เ ึfภZ—qG$สN๖:ฅ’วีž"0d=•ภ/>”Žzจ%qF์#ุ็ไ๗๎ŒG:C๒฿Yฮ^ืiหห4vAVHfฟg  Cb,น่JXฟzz› ฬบo}X<”๛) ์ๆUย,a“–„ฟุfถฐri…ฺ.~}๑ฎงฦ~pภr”ีFฺปฃ™ฺซไ๛E2โฺฐŽ`…`````dฤตw€๕๕๖ทๆ่1เ``c^ฅบoyjo_wๅ›WML&ดiA<ฤิศืC์ihๅ์u”ฺ9ฎGž{+S”ถ๗ไœtญŒNthำสฯถr.˜VฉSUฦ้€!4฿๎ณkšฑJ†+ษoฟ!ฮ๒dมิv๎:av+‡dฤต```…``````ฝป๚œ7G7๋xฌ=หoC*eฌหณฬี ฅซพns+Iษr„น ฉ๎B+นMv„ฉจ!์.z Jฺุ™๚šฐ†๊ะž๐ˆ€`.œ˜์ jฅ6Z—ซ @“กz5Tgธหง: ณด;'ใTzว(ฒV]๚ฐU—>่kบ2cX8!<ฝzg:˜Eธฐซศ/…%c=ึืึต?ฑตญรjฃX5ž}a;$ตšฑ|วฉ`Hฮ;y๎ิ.ิŠ–W,@ีBVภ/"†V=ด€Vฉ/ฬ‚!,‘g”คา`)็’ฯ,z/ก€ๅƒDIห:UฅT•sแหŒ๘ฐX‡็ 5›๊Dขœ^" &G!ฬhฅ‡l๑z9ฺAจฬ฿`”ๅ„—€€€€€€€€ulึ5ำผ9z ธXวiˆpฯส;cXฆ ห“oZ)บbโƒ{W-N-ล^นฅV*าฮrpj|DิBก@๑\E8‰ไ–V๋คsิFฐ ๙๕v,r้Pข%Aร๑ผ.cฃ(Š€ ัสมDฺk#‡ต8KฌVw๛งOnีๅผป…๔ฦฯบ[&"ุAx:#.•X?๘Ÿ็อัcภ ภ ภ ภ ภ ภ ภ ภ ภ ภ ภ:,€ต6‘“ IภT1S๎์ณmรžตปF†wo฿ดw๋š}W8> f๊ฐ,๎ํ๋ชลBNNผUไuฆtย,E Xฦ,ุhคฮj',ุ้tpjrbjbL[hi(ฐ\ู„w90dฝSก‘Aไ$ดT{T–e SzกZXฤฒVพRR๋5๔š†๑:Œ,@ชแh=2ฏฦœ‰หฎ‡ศ“:แคeม"ฅ%cแ~xลฒ pฉ๑ฅฆƒ(b(้1R€ฃ$ญhปsภอXฌปEซu๙Zz/mิa8ZR„ขส(กถ่๘%Š•๒คNooH0ฬฮว/ห'l„๖\฿/`5๎ ^–V`,9ท๚ฆUบ๘๋คตฑ๐ืฯ๐ิ ชณzฏƒ0โ๒_i€๕ฃๅ„9z ธXXXXXXXXXaX‡ฐcญXdHาบ\ค•4v ˆ™Xx/่*V3ถธั=ŽZ‚MTก!ZWbS Iร3;'Q eป–ฒRัุdไ๘ษ3]Mฐ…ฌv™กŠรข,jกQ€‹,u=+|้…A‚•4@ฉ5s๔Nrž>€ี_KชBo8ผ…„๓โตคฯฆแ$ฒ*ฬ" T59๒V)= ]ษญƒJๆ;P… )ƒท–b,0 ฦ" n๖‚บD__ฺKH ภ‚ฎ%” สˆกชมX,XDK“ส๎\ฃหํซKvั Gด„ฌ์เ๒8ฮXŸ-่ะศyo4ฬฆจFเา,UโTITaฌFpบ2`Yƒ€ฌnธ๐wษบมŽฒzƒ0โ๒_™€๕“๕„9z ธXXXXXXXXXaX‡ ฐHuงŽV9ญZLย๛žVOŠ๔๎้_›ขูŠQ๒ูšูี=ีa ‚ˆgค,-E$B+รภH,ท!า72>œ^J0ฐ;Vั>16^K3ŒไŠ:ฆ4`Žส<ˆ‹ŽไW๏ฉ ๘hฅ]+‘๊$Iฬ'†hึ!b(ฦา…่Pjเ”yl*หช ค๙๓ฅฆๅนŸํอ™๕ึ#u™2VHtŠ’[hิย PQBm‡ฉ1Œ…”ฐE่P8ๅœw q‡ษy/ๅส๚ะrคJ‰AErj์˜ZJ!ƒ’B 1Kห†b'ฯr"€Uฃ Kู”กรFˆฐD+๋G๕Jฝะ2|Yช‰–8ีchx™^ช382จ‹šF@FึQ X?฿Nœฃว€€€€€€€€€5รn๚฿Oœฃว€€u\ึ๎oณbQ XU(pหS๛6ญLtU+‘๎ธ"ล ,qฒh7๔uƒVB ขcขQ‚€รzกํ\ฐyจ–ipBบเiชึ_ฐš(เe8ซข„๒ŒVปฦฺcuฆC„:†HrฝเIฤ •eค \ w!ฅ&ด\tฅs~น„ณœข:„3ฤ&\ำšŒ~ญธ~Žร…ช๒ิ‘ีMร‰ฬบg–ึ๑‹Zคิขฃ"'ั&%|้ัฬํwฤPร9Uขตฟะ#%็า*นสŽ6)!-šแัCa\E‘H"มฤฦฮ™๒—-^ฌtศ1ท๎่์๚6่2P$งไชRด‘าฎƒ๛”JP‡ส3iะี~‚,ำ$ช๛ื€€€€€€€€€€ีดŸฏšฃว€€€€€€€€€€€u๘kื=ื๏~dก‹”,ฆ&I๗5๗ฃ็>]ต0ฯ:ฌสfู๗$ต}“จETpˆi•JะปขŒเ๖<‹ฐ=^อ๛cž`สฌย๋น$Wนผ`‰Vำ>2œH+?ฅf่Z‘PE๖•แฆLœbRก62‹ฦาRป@„ขtฐศž}oตงซฟฎfศ 9‹ฝ|ศำศศ?f๕ๆT-;JZฅผำฐจc(ื -4// Šภ,๘`นd!™X ี gปลฑœŒ…ฐป\๋he5rณ(bBiiฒ7`k6`AWศฆ{Š_9-‘u'f•๊ํธ; w‹iƒNkฐQ#ซ‘ fx๒Lษc•—ๆ,’าtst็>ส๊๓ภ\T*ด ย8๖.฿_W,`-?_5G7+++++++k†๖4G7๋ธ,‹jƒ–kOคตbั4T•ณkO7ฎHำ z:LDSช•แ uOตณPrอe)8MW5`๕ซ$ฏ&ฦ*9ฎญ*ุาฦƒzˆฮ;V kAN๐ศ…ฺ;์ฅ๗ญ",ศ…๔ิณ๕”F;x‘าŠ์Xy‚ฬดิปi ์I€ี฿žเžŒŒO฿—5๔˜JDิฐด์ส ๏ฝ9dษC๗ฮฤ]ธฐAWธ%ฏfsี์ˆกeฑJญf3u -๋˜ณ`ฌ๏ฎงฌr๒ ๙ฦ๔ำZ{"XฎE่…o๚ึาณพqŸ‹ุข5โu:1-}|๋ั7ๆ ฒn`‚™fƒ”ฃ ํ+$ฏt“็Ž ฆ!ญˆ‘“—ฏ๏/uNฝ…นภธึฐยฐฐฐฐฐฐฐŽAภ๚ป๛ีs๔pฐŽwภ"ีฝส^Gซ=ซบ“เช ญ2]%ภj๕ศง2ูดณ๚ภ1’‘G ั†ๅ NM%ภJQมษ*ฑš๙™ดสํฅฐDWํmฺ’ฺg),PuAฌ‰Jผลน"คlขธํ+๐ศโUVฎขX! D ะ๘E”ฐU{‰YN=š^bปแœเ/ฬ"ค่zŽ๏\xจฮคEแBm!K2พUๆ#ำ)‹ฑ๎NrŸ-‚5[หศELะ^V*ดPV้0–‘ ู๗’ฎ„/„ต,ฑฆ›kิ1t}ภฒ€ 3฿A.ื%ิด"ท๔ฐสH_Cฮช!e„’kX๔EแฺR6ำั๛ัใ}s„ ๆ…@๙TฅŒ,๏c๕๒I`žŠฟ‘Jื–ฌฐฌฌฌฌฌฌฌc ฐ?'ฯัcภ ภ ภu฿ ,โƒำ€ตqE•็^'นง‡9ร]h5žาๅฬ`-คN@vฌ‹'ดพซฌJ™ wWฌำุjฏโ€ข(นp '\˜ท๏nbHฝร1ที• aฏกZฑฎชU —$บB™ซ Cงบ†ˆ?–Nข=i‘๓ฎWษ5+‰ ”์ ช(’Y๏4g9lG=[ิ/ำซw&พ,\8;ษี k@ตมciฉˆฟ_ฑlNฺป“ญ2tXฦ ม,ส@›๐…ก–ภl๎)1 'Vo๑p6o•นํ1hvิoถ—€eฎjศูCW&-]พtฟh…๘>NตมA†ง50กซ~วCงเ˜ผ|โ๛LO‘3‘ฅL‚ฎXพ f`…```````M€uว?xอ=ฌฌฑฉ;ฏฉ jๅปY(O˜ต6*ขZจV์ป‡ถŠoD6๒R๏˜ `‡vจKห]“ฉGfufŠ/T‘มฐ บา U˜U’ึฮม;ดDฆŠฏ%ฆjฦBนTฦ้มO„ ษ\Zoส ๙}7L-พถชŸCถ๛ส;]mXci%๑ึฦปทo4dตL ฟPŠ…RปFDฒซ“๚-ซ๘ Uงฎ—™์Brbฉ„S0V‘๊^ษf}^q|rฆ๊๚9œƒณั!$K-่ด!›"cฝ!%*Gˆดw-ฝ…d๔ ้ข%-…ADไOท ! K9ppŸ’S์5พยR_๎;ไฟ#ึ`บš Xข%ั•–„ A.=dปูKฌ@ฌะ‘ฤ2๓ฦ*ตส๘xAWฅ0)ธ`}๛^tฤLZ–ZณuJEK0“–ไผ#:ฺ(‰C๔v$OแชRXกŒ๒rๅ7€ซ,Kัˆ{zฃn…๋แ8ะฃ7BทREูฃ์[zณ‰eˆี^ ดzCQแึ[L4Bฃฺ๐๐`ฝŒ—ฏ7ันํ–&@์O‚Eƒฝ|Aา#:X๊ทN™ฃว€€€€€€€€€€€uธK>น๐ry็—Wฅๅขซ“pCM8E]็\Hด.Mหไ”€I4ึfแxฎEจ‰™Diฤ๛h6ฬžฺˆรxจงrƒด1^๒jDkz๗L]ฬaD” ยณr•งje"G0ล+ŒChSA?‚˜‰-™o(จƒ๐๗พษ’-j@{ "Vำ-๐ญ™tล–BBฅwี'C;J›9Pศ๏yjŽ>X_วs.dF!PŠฟ—€U2–Pร ‰นฤกMO/฿ผ/อ"ยk + j tU๊WDณ+ –JZ่ย{z`ใ€%ทอฉ2DXjธ3าแN ’K3NyJ&มA‚ฐ0—€,€๕ศๆ&*/Y1๑4"ผ~7๕้ำUB6‘b‹ฟดLอdlิ^Œ฿|˜ช€uD]wž!?‡t-ยถFิ:๏๏หX๗ฮ?}ŽnVVVVVVVึหXปw๏>็œs{์1.]บ๔๏xว๙๓่่แ‡Ž!;๋จฌDWท|U.ดJพ๘ZนHk๗C?ณถ1ผAJr฿๒TB™๖6โnHดS|PPีส๒NTlฤ๗ mM1มแญ:‡=uT๗ฝ[ืTภ4tฑZDา]ุ”มฮ๛ย^Ud0ซgฅืล๊J…Nดw8h…บึXฺศPื]ว๘zณบ•z:€I #สั:๙ๆjฏ–jฏ1LOฐt‘A5&:X,Š!zล*\Vrถ;กF bอvฏeฤา"ๅ8T9–ป๐™วr†s ชแณc…’๓ฎ%aAฒฟหฦj—เฑอฯg%w0บrDฏไกล๕TIะก+C•qช|X*Z5ช(š๊ฒ }โบRำ•oZ)*ฦvคฐจ ญˆ9 ไaี!ยF”ƒx=่šซ๔ฆwีŸXสr6oผๅห—๋aษ'Ÿรp๋ึญ฿๛๗„Yถm‹Q;++++++ุ๋ฌ๛Oเว๏ํํรlฌ;๎ธใw~็wเ๔ำO_ผxqŒฺXG}ˆp๊ฮkไU†๛=ื'ดสๅฌœซฏ;ัUๆหดLC'ี๛(&ศS"ญดNขj,ฃ<ล€ฺ]ฑ๔ศN~wญใF8\่ !9˜%Dp’;B‚_คภ—ย dykIฌะOมdฺtฐ„&J๎0V#œg6"ธ฿J‚๊๒…dอ—qภ2{ฝLZGซ+eฌน;ฺ ƒ๊2ug"ด พึ ’ไŽรFOlm3สšจส๘ ้Šฅฝไc™วเ2ˆ๛ฃ[:ณ7 uia๗ฌ#ไzใž์฿Iˆ/cฉิภ๛Kทร{สื8ฌ |ๅ+_™šš2`mธ๑ีฏ~๕]w๕sฯiฉ๕M›6ลจ€€€€€€€u์ึฏ{ร_ศ€%ป๕ึ[O8แ„Wฝ๊Uฺจ๕ฒฐŽภชb…ท|5Eผ9…๏_ ‘––)Dธๆ”แ>ะ มT์’ษF€‚ึจ•EตDี“b…ผ]ดgd่๙Zhด ไแ`Vฮy—๏๒”\+&ก›k"พ0;/",8ดี๚ขUN`Ÿถk฿ตฬ8จsอˆ]†ฒJŒฅพ 0โ฿{†1๚5ฒ€ตQmz๊$wญ‘iX"]}0WZคุbYโะ ์ฌัœkoบ,B„–Eก” ฆ zฒฝป`DMK\S„!qp|cNx้Vp |แl‡$ศ|ท)ยFXWKMˆ (bฌฟ]ถั* e2;hๅฐAงŒ๑ํWmกL~ฐ$ปPŠฃ:๑ขxฉ:1ฐPjf้]ไฑqำ-c|บoI€ฅฑึ!Z=eษGlตnส2ๅูธ์‘ธLc็3ะpq๙` G์ลZ>*ejผwฦฐŽ`•ตy7๙้ฃขทพซ๘ษไ_q/{’๛KX===๓็ฯฮwพณfอšo๛งžz๊† bิภ ภ ภ ภ ภ ภ ภ:vkูgฮั_,`]z้ฅ^xกท_pม_๘ยbิภ:๊k่ŠOษ'\&ดJDuฯ๕{–฿–…+ํYyG1\yวง๎๚คจ1ปLขƒ‹ฅ๖n^ลK$จชฅJ๕P{YฆALฃ3$„‡Xƒs~ตE/ัปฝส"ืๆ๐!B2…-JLƒvิ:+NTG}”ยAฺNิ™†๑ฮ็™ฐ@1 ์XP‡’X%l‘น์,xFG็>ซ'๓wื๛aGc–ึหI๛Œ๑#ต^ Dม\WุrLฐแดDลŽนnyŸ.๊,=ฺเ*=4˜~ฟ+ฝฤ&ƒWYcJ+“ึฑ๎#—ชกœr ฅC‰Vฑ%tิ๔้X!€…”ซต^Irhำ0ฅoไจEGลs@dฦ บ‚ฐ‰ําโ๛ŽรXFฎ2ชX2Vึatฝ}D‡ ๚G฿DG ษ…Ÿํ/X้ฌ}่C฿๚ึทผk_๛Zษ[aXXXXXXXG=`=๔ฦณๆ่/ฐพ๔ฅ/}๔ฃ๕๖|ไ#ฺฃvึQXใ7|QNp0Uz๎๋๗๔2qUดา ๏ฉ~NNuฏr€4s๑“ฃK‚‰Umœก-ำ€Uˆ…๊˜]ษตืH‡[iN ์Y๎ก*€Sgึvi—อซจ๎ฌ“้ิEฆyVN˜’ศ a>†™๎ฌฐ v!ุรืFืฦ`ฃŽ๊ฮฎซฃ]„D"$Wถ&P$G•–zJ' ฅ jฒ3ฅkืฉฑล2 ฌk฿2JˆะแKt% ‰๒ฐโ*=ฎจC›žL์žU"ฺ๙๏F.‚SŽU•bคจe 3:ผสnsU้zสJ 7<‘๚<ถูIๅ^ip+า”Eฃหด๔Fr๚l]†VY๔ฦภคuช8—Dี –h…˜*€ๅIณC„p•ฃ„ึƒ‡ฝ:Ÿ U#hk/ƒƒฅ h)+บ฿Zฮ–ฎๅao‡รzทพdqสศ4tตFส฿6ๆf'ถ๛‡S๙;๊_โˆฟ๙s๔ Xฝฝฝ'tา~๔ฃ-[ถhฉ๕๕๋ืวจ€€€€€€€€๕›Vzั‡>๗s็ฯŸฎwฝ+”ฐŽภยS†๛# U)rทmCJl_จHK+ฺŽ๎่ณฝ€’ๆqฝญkช อd—็šอี-สเl฿ด_ภJGศDU1Vฝ’2ฉู,t1Pก๔f!ำ,กืT้Pjฏ—€ซ†rโ9Œ0E<ค1€%d–EฦำIพN†ทVnฉํ(/PZGWp™ฑ้๊ธ๋^tฉœแฑtnดฺณ๖ดฤ(F‡๕๊ไ&;[น'ำZฆ0˜Evผa‹nบ,c=บ#ฤ ห๐QBปc^Vvณ\๖ุฮ-"ย”Ÿ>™4f…Ye‰hgš;*วาฅxฬX%`ัธ &–R ฅ[Jดด2ช1‹‡ๅ๋Z”AKดUK) .ญฌ“MY›R\„‚ข"๔@ึZ ฅ๘คบd ชœ} ๏2Ÿ%?D๓ึh{ะ`bึs=cฦ€Uฮ2๑ป ไ๒k{( Gtฐx๔๗ล=ฌฌฌฌฌฌฌฌฌฐฌฌฌฌฌฌฌ#iฝ๕อs๔pฐฐฒ’;๕๏นžiƒb,&่‰ฎ๖v฿Rฏดd"แชลiฃธŠ){(ญ3ฏP+(Qeดšฏํ›๐ชกgึeำSญž}›V๊ำร๗Q$>Q]…บ7ฐฟฐ ้SัุยœV๊Z–ŸหhQฬ:K๏ฃเI_Rฤ๕ภจึKz–[F฿{|*cธ|šžพ€ตโฟ7G7+++++++++์ธฌ /ผ๐’K.aฒห.›Wุ7๘›ึไยหๅ_^…S|po๗ษW-N€๕ะฯHrO*Ywง$๔‘แ*lWKRฅpe s}@r+ภส> X6ธaEfcํฉฮtีย๊bฏฬaเ”^BุกัBK4ท&๊Z‡ฺ"ž#2ˆTภ—‘๑I๋Ki˜q~:ฌฃžซบF'H๗ฆ_C7จ•ซฆญ–ผงZnิาw—.๔๚hิไ˜™cR?๛ฤึถ๚Y‚’œžฮำ่FF<ฑEผดไ)-๕ฌ^หš[†"…E/ fอฎ@Wฦ†HณHuื้9xAช,gขฒฺปC‡2|ฅ(”…มซฑ._˜eŽม‰ส5๔ฅJr<"x๑ขีณƒ€ณกjฟ๚์eJ{๙า~uฝ„SlฑZฝนŠ‹r!Bœเ)t•ฅบำปฏ%๗ึ8UึดV;5S้สtu่"Iƒ๙ร|#ไฦGฝdnยRฅธฺ๚ฌ้zC]@\ๅ ƒฺฎ๗”Z„Z&ฮฮ฿2 -8,ุ[—:ๅ› fyษ๗๚eฌ•๔๛s๔ “ฌฃฬ-Z$2`]pม฿๗‡k›šš ภ ภ ภ ภ ภ ภ ภ ภzถs็ฮ7พ๑๊_+–>๔ะCsNr…EW‹ขv?ฒบ’ฃัPลW-ทั”ไ>2ฮ™ใh $=!QNfG1!ญCWYฃ!-7?Ÿ“ำฦj&6าœeอwB~+)ๆุF;ไ!pQฏคฮhtข2PC ุ9ธFTŠjีD…z{W†!<]†r่ฐ1ชก“๋$ล7ะเE๒;้ํ 5ีujEKSปง์ๆ_tไ็๋jt$ทRdPE’;ดค- บผ(\8U฿๑\Zำณdม›ซ(tOํฬw‹;pb]5]Š*งŽ›ˆd1r“kูqฒน5ŠXฦ]ห22HXะ "‹ึ ่๒ต‹ฃo ํ8ฦโฤี ฃRตกฬdชœ๐~ohฒ—tๅ๓q@ะผUŒei{œ๔ฒ๘ ตˆ 'z<ฯpn;๗Ÿท[KฦูŠŽ฿ก'r๐ู๛‰‰&๗ฮ wตฆUŸ7;์ฑยcฐ๔.CWผใ|.S‹PK“4}๏cฯLŠโ[Ldwวi๒rโยKXOพ๋์9zะIึัdโชห/ฟ’lz8>>>oผฌฌฌฌฌฌรhซ๕ฬัƒNฐŽ{๘แ‡ฯ>๛์]ปv]|๑ลึชUซN8แ„K/ฝ๔o|ใแz๋ญฟy’๛’๋ไ)ษx ’^Qp ร}o_7๙ใ ๓๒ษ.OสŸC[“6ฉ|รrrั#MŠ ึเf่ชJkงt๘œ/Œ Ft,eฉ็งˆE๒Z่…ฆ˜`ญ;jำพ”TOdื๒ฃ“า0ฃƒจƒ#‡]‡๊ฮ=k{บ!๚ผ"‚jFl…๚Jญ33ฟLUึxiญrษืๅ-jๆ!๚(Ž๊๔A‹/p]:ซ“ี•:t๙BOไv7m/ร…ฎZ่2…]Yƒินทฌ8>H@ฐ,Ÿง-–มิบภ๘€6ฅ `Eั&ีุ๓ซงท๋๒o[ เRๆ’;็cษXฅจ•HญŠฆCฉe๊๖ ้‡ูhต_ฦ๒น•ลuzบ^็๘[…ีS#W*ชB„Ž’้ฬ=ึIy.ง&๐asภH฿S~„ฎ,i~€ช a;HcBฬึ้ญ๗:๖ห๒ฦvO๑วๅDต๎โžPก>iฉ K`ฎb ๚ ั;ปvณ[,ดมO– „r<ทค+ฮŠn-˜ ภ ภ:ผ&ฎ]-[ถŒฑฌ… R{ผงง็†nx๕ซ_ฝdษ’ฌฌฌฌฌฌ9Z๗ŸพmŽt€utุ7ฟ๙อO}๊Sฌ๛,ข„nsูe—}เ8๘q†‡วีำอ๖ฉnม๗<ตt_kํžี์zxแžUK’ฏ\ผง๋žฝ=๋)-๗ฎ{Lห}ซwMŽMe๋hนgdhฯŽึ๎ฑ{ทmท๑ ๙gQ้แgึi%้‹ถSœh๏pฟ6๎ioKู๎7ฆงv๎ž•ใ`ใฉ‘$วะแฐzSฯ&V›ผฅฝซำั’•]๙4ดใุdตใ3;'G';#“rญŒKžu๚†ฦ6๏W๗งฅvแ!+Rื6พ}tR[ดิA„Gj™ณืวตฤึLn~hS๊@฿า–kฃษฺ๘ศๆd—ม15’‡™ C๊%'ไ:tๆtฃ๙ดuีr^N็ฌ6ญ:uiเLgฎsS๛๑|‹t ƒนฅฺซ๑๖bใk_\;nฎ.๓ช๑คZฎห'ฦIๆ€ลจึติ%h‹h@ฎ ัๅ่JL!ญtzJ ดฎํ๗‹ฦzๅ๗ญOฎ•ล=๑%๋ๅ<ฅ•‡6%‚6‰ฑใฏ+ywKKmฉฎฺ‘‡‹ึ$2S›žุ๒ำ'ทji๑Šไื-๏“k…j#ืรเู$ืณ~ำช~๙อ]-œ—.ฝ้ฤJgฃฮJ็ฉ 0Uำ.Y7่ฮ7Aฎํjผ2g:ฏjํิ”๋๎๑™ัSr=ิmื{ก ฟา7ี฿Yฏ~ํผห~วq„กฟqจzช•ขฬ|/ผๅเ/๑b}t4‰า๒๐๖ืk฿L?๏Ru๊o„๏€พG|w๔P7Moo„ึ๕แ็K๓P-้่;ฅใ๋‹ฉหส}N้y-้ฆ๘ — tlฬฎ}๋้%|uXaX‡ม๒–ทผๆ5ฏ™ŸํคlงœrJฃอ7xฮ9็8ฯ=๗\|bยยยยยnฟ฿>G{€utXซีฺ’ญฏฏ๏ใูดํo๛}๏{Ÿ\z้ฅฺƒต๓ฆow๎ผ6๙ืO=pK๚๏j๕ปŸธ3w•=ญ๓oV+กต~yสU)ื๏ช๖x๚?iฯ๐ึ๔ีภzœญ๖>ำณฏต|๏3๋๖ ฌำ9์ธo๋S๛Zkซถ๘+Kห‘!8ำo5~ฅษG';“๔'ž^nt8ญŒ๏i?“๗ฉ6้ฎ๏ืd๗K็รยอ๙ื| G?๙ใGฎ_ฝฉ–ศ?1๕P๕ำ“f์ซŸ๛ฺw๕ภจh]๙)ฌ]ิ’ฟ๘มสŠ\ฟ\ูBๆกมฺmc}๙็f{’ฮ็œ‘๊ไ?tฺZnญ…โฯ'Z๐๒ๆ2u-บ็ฉt๔X๕๏ื`๑ ˜Ÿเ\‹–\น ผฎˆƒ๋2๙Ÿƒ?จœ้ฯE๑‹7ช๒ žฟ๑ส;๑,IUwถ?ฐqศ`โฉ้‰6P}E{๙ข5ๅŸc๑SนAใฯ-๖๕ฟV๖ฦŸU‡liwๅ:Cyบ?…Y๒'V้\‹\ื๕p]™?ๅ?XMจ›ฌOo๔บ™5๕ๆฟฌผพ๒6~ว๙sL;Gfล-ทิp๊cs ƒU^l๙'\mฎ๊c ๙๑ฝใkจ›ฐ%฿yฝ;rbLช+›w๐~ญNรQฝ}|๒ฟ/๕j{๛LK_Rะ฿kใV๓ฮ`๐฿฿nพฐ8_๖—ๆฌี๏yว=่$๋่3‡ปปปO:้ค๐‡ขฎŸ๔ง'Ÿ|๒ชUซพ๏~ณ†ฏ๚๔ฤ‚หไ“?FgัีS‹ฏณbQJรz๐fœ”,2ฑ˜T˜ไz‘ฐB‚œ™ƒ)jหSxJบ่M 7ฎ๏ํ๋ทฅ;Vkํพ หSๅมพnตI๚XYษ}๗ށc“ญœ>…ทs2Sั…gFa*q˜%เ™ษศ\ย๑ฌ‰…:C…ฅŸ6ฺ7<ŽŒ–œษ€ฺX฿ชด๑ C4c_๕ฺ}๖๎Vส฿"BปฅD9Bญฏจง†1๏๑ูAb ‚ˆ‘ƒฅœ๎dzfM2+p$+ฮ“VUN)hศัC.s4U%ˆ…˜ษ[N"ฑ๎—–ฅผ;*bิ=ไขP ทบ7‰Aบœ‹ชลœชjฃ‡bน๎^ฃ[ฌ•ล,B-ญํ๎$'ง^y^IรRหF้@{97ะ สน„.;ธ฿ู‚ๅฦูาํๅมฒVt† u๛ฒ\c™’e™{1–.ม‘( 2+“™งm๏n”ำ ๕ E\ญซ๕ฺWฝง3ทŽ๓๎.ฆวrpฏ”๓IHJeŽฌ๒b|f58ฯฌt›ลไ๘ข}O5R'ษ้รฏท๗‹ธฬดีงค:"yลฝcฅŽwt’JงW’eE๊'_^ึ๙b’qลท5U]์แฅษม ภ ;หI๎ฒป๏พ๛๏|งะ๊œsฮyม ๗็œไ%๒ั๋.ธๅซSw^“๒ูณธhJxฟ๏๊็h=%ยk‘…ฯ๖>"HขZ3‰ํฉNฺ”ฬž 9#"*fJŒีืญ๖ ง†ถ<Lร๖Mฉฎ๓ถ ผ6ฏฅjอ“"'’ู…5IT5ž3W‰จฦณ`^ัเErฝ6Ždฝ๕JtOฎ8กSTŽƒVิฉ—ค์Œถดฎn‘ S`…š์T2ฤซ\jง๐ั>€ท X๙ฟข$01Uิจ-C๕™ใ๎ˆ้่yQŸDwhืdFkา™ฮอ•๖ืuุ‹zีtู(5๎-W]Z์Vsp]ฒบ‹'ฅ#ผ้™๊)ณ–ึ-โเดqืœฑ—ฉ๎jLๆ]9uฝฉFแถXSดกn๊shิรq~#ษฝ, KยUฎ]n7`™/ฺ4์ŠฮVฅฆธ?i.ํ์Zฮผ#|๐ยฦ)+‘๚˜&6ิJ์(ำ ว`๙๒ํฅX‰็ ภฮทฃฟ])ผ๐ฝะ}เมฯ*ฝ01฿hซณ๚+ใ>^‚งดฬฮ๔ึ๖ัIkพฌฏ u๋Yดc๘ส๓ไ\ฅ~จp~vพdI๎kฮ9zะIึ๑eXXXXXXXaXXXXXXX/5`ญเอัcภ ภ ภJ9X;ฏฌ<้น/ผผณ่๊ช`†ชX๗\/WU‰Ylฯส;๖mZ‰ฬU*,(จ2] ๔V ณภU‚!แ”ž๊_ปgxซฮ!%ย๗u'9x-7,OชZ5cYท‡•žปAj2ัVา|าํHf"L+ฺโ<ไ:ุ้[ษฒB])ŠY ฃฐ ๊Y๊1ITข“ํชA‡D%jฦU–•*ณ—p๋`miO€ŒพŽZ๊’ฌX‡ฒ†ฎ๚Xบ%ื™ซซE M,J%–ี๛๋ฝฟศ๐ ืฆใถvผ—(oนX!ฮ’ˆx‘H$x* รชๅธš9-Iฬ„ึโžํข €IK๑“ซ๘A$ฆซFV O%]•Eห ƒe•C”ดดœ-p…ฎW&Z5$ๆํษมZXdฤ—๎ฌ๒ู๛7คG6๏ญHb“#๔ฏ๋ป]ขญnฉ?c/”€ๅ!ฅฦัJ`$็$<4ทT๖๎(ฌRH ”1`๙ท„ๅฆรใ‡ึะhี@Wบ๙zC&’ฑ่ะฒ๒v[w๎Rภ8Ž %€5˜ x"๛Sž”ูG'œLษw“,Rใ”ฅฏE€€€€€€€€u˜ํ้ ฯฃว€€€5ถใ๊ฯTณqE็—WM-พ–8 งข๐ฎ๕}O/Kมม๕2สƒ"'ฆ๕มXUpPŒ%Zส่0•c|‰&Sฐ;ใ‘ˆj฿š๛uจt@…}‰ร2ฎมRLฌސ#€Uฤ0?ซNงคซXีฌœ’Q฿;• ™@ฤิ*"nฺจ‡สƒ๊˜4ค‡t…D Aว็ต>ล8S˜๙ ่Yฝ;k’รf๒๊!๙žˆ๓Sอว$"฿“้JวAhtฐ’{ 6ขจv]ZtฐR›•’ฃ/xฝGtฐX๗ั?žฃว€€€€€€€€€€€uธซ ๆ๘ k๊ฮkชr„5`ฅJ…‹ฏี::XV ๙ๅ$๗”แN’{ฮgฏขkต…x_JKCLŒํžี9$ะวชะj๓*ฺ“๔h)ัิื๐^ล 3opษ‡[3สๆx็wyฏก\๏า„ไฎ:ฟ•๎’X‰F;งม๒?J6ฝY œ*ำ‡–ŒVึื้ฎร (jjำ›{C๕ณ$๏Wื2‘าี+:์T์ฅKv\Œu]Ž.|„`4fkฝŠfฦฮส  tฬX–๙แ๘ฌX6ŒสZึฐ+ข –ฃu‰dm;ม_xA|PไDŒ์มรZG๘Š์o‹`ษi<;J(๎1ๅ”zWf,ŠยXeมฒึกัŠ—†~8>ธ_ภโuA4=KcB„f,k€™ฑx10’ฉE๘dฮ2„ŠœRc&Ayๆใรv๐ฑ&@ ถN›aห๓0๏vย{™~4VY“/xoกtUŠ`•˜E*บง€๐ญแกz ผ{wW…ุ ไ๋ภ{ิ›uๆ๚ำพ%ืฺz>—ส™Sžcถ ฐีนzyซ๗czŽnVVVVVVVVVXึแ,ิไ`!B–จK[%LพieาYศ)ํ๊DดศเKX๋?q=ฌฌฌฌฌฌฌฌถแ/฿=G7+kl์วŸcษวo๘โฤMำYtตๅ`,ก•ถ'สY๐ฺฒท๛n PF๚”Ÿžƒƒ0VŠf&zItRูื]SSฆ%Jภไ(Vๅถ็|dPฃ%U “ ้„q„ s]B^‘xข:/{็hb,uvH)zทcyj]#œฎ\๙PฃUYฒ9ุeRญ็~๗ไ D\๒ ภM่^,๋5€YจQŒไ`1 g"หซ๚ุ๊>”Yอ’จ<์ญสž-ง[ุ Vs,ฉฑtOฅ €u,๖ห่Nš๖Š\(šหGžฑBฉ“๐^ึ%$)UรnQG ตโ˜ !?R์y$Ÿฦr}YmฐฌFš|ฃxข1ซŒ ’า๎ฺ”๖ีฃ"rมิ‡ฐบgญ;]]ฃ๘A๐ยoMO๓B’Xm#ร)ํ…2ผ![ŠE€ลฯ2ษฝฌžb{ฯ๖ižeพหP%ไ7Ÿsจ—ศ E฿:RzG๓อO(ˆ’ไ><ึœ)J2;Zส๒๖‹แชฌฐฌฌฌฌฌฌฌฃฐ6^t=ฌฌฑ‰—ฐR@๐ฮkะhH้ํ‹ฏuฯ๕dพหEW๒ค2บa9i์)3=ว๕เก rส6B+<เZ7{ตA„iฎชƒ†U2;h•ณ้IจฏD"Duย,ญ‹ฑt&“ีฬg‹ภg:f{ฑpJ#"!PUVภิeร^ฮ-หณ˜ภี)#[@LZฆแq‚Dิ|X=›ฎ 3Vพ- บiฆ„ฑ&xๅช;Zšฉ[HWNšH1!้๓ึh@šม…w}’7อM+c ŽฑยUฑสQอ*&0ซฌทlhCั€ผx#‘แษsw์ฏฬF/#†๚6็‘ฮัJsJ•ัFœqฟ๙๒œกuJน Q”1Kื `iฉ Gkิี28h*EA.ะPƒU \ธษSส‰eคํKด๒›ท๛•Xฎ+ลGิ?lฌšK ”๋j„ฟuํลฏ 1พ/ิฮโsห]%I 5ลทi๒„ญถ`อากฑ!ยม™t…่Œ|.ื€€€€€€€€u4ึฆฯผwŽnVVJr๛๑็ๅ)(–Tฟ Uzฮrไz )RS!VO L<–H˜สโŸ#™ .œ˜)‡ว;SzจAVM%'ซ๐”ำII๎~• ญ2Tฅ๚V)๖]้ฉ%„9ด;‘ฒศ3€๎‚้5ti่eผ$+™`VฮIOEv ํ3ซ„ wููฝ<3๐ฬ่ ีYฺ๋๋k๋๕,"่D่ :ทูยฉZ|กd#:eข‡•ิjงcบmO'า: aBbฆzาธEGูEหฐถ๐ๆA—ย ลจcกหžZ)ภE`ด\ปm ยp! 5J่58!2Xฎมl<าา-MW0[–E1W•๑Aงี%t„‘๘ œ}]YํH$XV™Y %”)ํ๖E๔>ส,ชI’ปฝไ6‹2”ด†`ฉcm‡…ฑ^™Ÿ-—๏lt_ฃ!จ™>ํj)ƒ‰๒ทC{qปz๊i+|{jดrP’O>_ฅม๚{็7#`น๚M#8ฦ ภ ภ ภ ภ ภ ภ ภ ภ:šซ๏’๗ฯัcภ ภ ภ ภ ภ ภ ภ ภ ภša[ๆ่1เ`๏€%rJhๅZ„‹ฎ&๏jjษuZŠจ*บ๚ล)=kษuปY(พัp.> %_"ิ>›ดบŠ™ƒ‡XžืIฒ?บZำ9X,ึIร*S )_‘4่ส)Gฏ|ภBŒœต๏ๅ-ห’๗€ๅZัb๎0oPO๑}๗๏ W ี^ฎ‘0XคXฑิ #w๙XaXXXXXXXG`m‡ๆ่1เ`๏€ี%โง๑พ('Xัี}7LyอŽซ?#Oกร<ป0ี"์๋=ด5ษ‘TzT(ฐg*ฒไ\e‘๑Š๒,B-'j๓C#๕DคLด„Œแ`_ฺจง๚ื6ซVxg"EไฤยwขDออ‡ษZY็]ฉ;b Y๖fbL‚ฅเฏฐ—)Mฎ!–ฝ,†^ƒ2ัฒš!•งหC˜ŠเเP-น€eณฮŒ–/ัฉO฿ษw†ฃ๒`l จสะา_Mณภิพีฎ๎๎eษ^ŽXyGlCโ\ pฌYx`คeq,O39ษ ฯ•ก:W<๔lD„ืiฃK–2fy"!+ =wO!,c—ฆ7ฯิFKุ๒ำe:DH์OืH&Zz กๅลหˆ^)Oต_ถ(Eฐฌํdก2ืŽ<`๕qดฦป้ คฏdภขr(ง]‚fw-ย˜Y^ฃ๋18P๎น~|๛ฬa๘๛Žk;ฯj;ป๓†ฏ^๚ut$/?+,++++++๋hฌ/~xŽnึ๑Xโงแซ>-ฬ’W๎ฐ:‹ฎoM,ธ J๒๎๗ฐ๏้e{Z="€* ฅIอฮjL•๐:๕๕huK%wƒTEษ๒–“>2ภU๑ม,…Uฉทgีซ๔A,žอ9์Dฃ$LV•ฉp){๛C๓‰.ฒฬa/k™•zะ i๎^๑ฒR4‹ม’ Cฌ๚_=ฝ- sNL_l>แNญฦnw๋Aำใsถผtจ––•๋"HaัฌNgFL–r‡\5c0? ]"[ญ™Œลี้!QศV-3ๆ I๏ฬ1›Ž j`mฮ1ูr$2๋ฬwยj–’B๓u.lP!คN?tภฮฆ‡: ‚[T!ด–กญTu/ลดสDxB“:1๓+,=$ี9ๆบL๐ZŸ@Š+…xJ),Wสs|๐เtๅB„๔๚ใg‰๖’ดŒ=…ฐ™ฝ‘^๊Ÿฅ์๏#X๛™๑Bhr!๔}เฺKŽ$ศ/ พย–‚๓“ก:|ฑ•/ม}FCฮ?uฌุ๘็ๅ‘ๆห#:Xด.๛่=ฌฌฌฌฌฌฌฌ—ฐv๏}ฮ9็<๖ุc<|๒ษ'ฯ;๏ผ๙๓็ฟ๕ญoฝๅ–[bศภ:š๋๒ฟ๘๊_ฐi˜บ๓1–@Jh•ฒฺqลฤM#ภฝ๎า‰[พ*ฺ๊๛ิฝฉaฎ˜u42"Lk+ธn`ฎˆ๔I๎ป:ˆ•ปeงฦ Lา‚ฎาณz*๋ถง !ี๛าq๒.dผึ‰ ณ)J=๚Jภ….าPvึf่ฑฒฅ์พŠสQะีy่4๙†(<ิ…C40ก๏เb…e ฒฑ้‡ูte%z๔>tๆe&;๏๊ั๒2Kภ๊ฎ๓ฒน9.S๘‚€UŠ(ด>{Yา๋N๚ž=Mม“S*๒#X60Q•žามFื๘ำgฯมพโ#๊Ÿณ๋+˜\{๊*ข|€ฌๅื-Sbb๓ทž(!๓BีลXฉ้j|ฒ๓าDH่`๑ฬ฿๙_bืฎ]๛ุวๆอ›ท|๙๒<< žvฺi—_~y__฿ํท฿šืผf้าฅ1j````````*`๕๖๖a6ึ‚ ๖ทปม็>๗น‹.บ(Fํฌฃฐ๚ฟxกหYt๕ไฯฟ!–’O.ผ\tตํŸุq๕gฆ_+ไฺปj๑พM+…8iภN‘ข”3-4Šผ‚ ส)็9ฉมž๖ถiภ"8˜Iซ”{จbน=AฦŠีˆ๒*57จ;KทoJบฃb,PŒฦ9๕พช“่,๒:๓ฝ,zฬ"ั•šดn๗ฐ๎Xหะ@Y•ฌ\ืŽDdำดLƒZ œไqำ’K`Z@;sฯ~หฮxSัm{๑ภB 4˜1ท ึq˜*"†Žล่๘ึKd )นศทXห6b-‹9ฯฺ7๏H€EFp๏ฬ:q0N~7โœไe“๖Nธš“$ล ษฉpC(ขqส๙๏^)E๐งJˆาg๋‹‚tzQฝงhเ XฎEศEู Rc!•iัCk3 ทฑ—ฅ3ห  Yd๖C3 ŸŠ#X#5< ฮ๘œญฯ้’–q=ณWซŽh—tUzoCยAฟบ'”8ๆhŒsHฑึท’3,ุ้๊ฌm_๛‹9๚ม/œ๚สWพ255eภ๊๏๏๏๊๊*๋#๙HŒฺXXXXXXXX‡ X6Viรรรงžz๊ใตฐŽVภj]๖ัก+>%ฝ๎Rโ€V fภJห—้aช™s฿ {๛บล1ฺ5ส&%…Bฆ!Qฉ๋9BWN–Mžฉkฯ๐ึ”ี8ึึScฑKๆ*๔R<1“ฯŠฑา๎1L$5>งฑ๊J่6ะปw๋yย,r }‡2ืพSซw"€๐มl”iีA@ะชUซqคถJN—ั(EG)iกดนqh\๋D…hฉมฏg pJNfwศาaNt๕้95q๎WธuZ†ฬช1(D•ิWTฟ(ู๔ใ๕ดs๎›ลH‘ฉฒK“ฺ’‡™vUฅ7Šม8iเpIYHว๒ –ฃฎNC`ฮฉ๑์Js\ฯ๊%0YฮิhU*—ั์JDSiฃ•ฃŸeฉb ~ึ€ลต—#`ฌCkGฝE{CทกPะSDภหฝ๓6๘l!ภhJr๒wญฬ:/๑ซิ3l9พ_ฦ7hต>O pุน]ั๘}ยัะw(V"์/JโTลpฦง้jb&]ี€ตŸ˜ฃฦ€555๕๎wฟ๛เvํฺฃvVVVVVVึฑXƒ—ๅ7ฌ‰‰‰๗พ๗ฝgœqF___ ูXG+`‰ซv^๛ู\pBฃ)ษ=Wtฎb… .ำ2 7wร๎G๎ด’*หศ4จC™†˜’#ซบสR๏tR๎yกช๕=ํg,Gงห9ฃ šาฮJกC‚}๙ฉ๊P1ิบ4ึษPBG็fํ๒T‚ญVOrํ๊!j้ssFน5<q k7๔ทง‹ษ€>Tัqิ€™‡ฑ:w}='์ D(ฬ2Rธ: %f๕ษ˜๙ UNคี:ิ\;@นำ2ญEvZผ4{ง3Cธมษลฎ™N•๗มiฟDี€Tzย(eล†pฅทs+๚๓0ำฺ9ม๘Wาs้Mข$ศถ …๚œคภCT,Y1cX†0รศUฒภไ ำh:เ%“5"ƒไถ7ชโ ฟภ —ug'น๛ฉ๒ย๙ภ”ต™ๅ‡Xฦ‹žํ3Š45\ย๙@oŸ๗-ฟGPฆatข :—)ไยไ|5ส™ZงึMOV้™@หฉpํรแ‹ฦงฝ,>ํ๚Rจ%? ๅฌ๊bE†vœชฟ’Xsฌ๑๑๑๓ฮ;Otต~๚ฏฐฐฐฐฐฐฐŽ5ภบ๒Ss๔ Xฯ>๛์๛๗พ3ฯ'WCณซ๖0Y>)Yฎi˜ะZ\5\ฮฯ,โ$-2?๚kอ'๗๘.าgี'ธ ฦอะ Kญ6 Xl๑ฌ6Kฝ'x๘qb–ำSt’ ”N•~฿2๎ำQซg_–“ แT๖b>”็ถkbไๆ’a;'ฏ”‚CAฺน/๘๖ัXƒฃ“ฦ ๖ ไ๒zOMZvำ˜าYN‡๒ฤC },kฟ^&i•า๐loฬ@,Sธhฃƒ3ะณ Id\XซF-๗Q•hUฮ:4]1—ํEึlดโฝ(ฐB็พ๓ึmทvฟ'๗9,๒๙๘y*.฿/^šฌฒ๒ฤ˜ฤJ&Ÿ‰ผ”S{ํHช(ำญ~วืJŸv}mgเPPNsK:๒๛XVฉ*XไŒRช9ซ}๕ลs๔pฐฐฐฐฐฐฐฐfุฮkjŽnึq X;ฏ์๘ _D๕J๎ศเิืL-นNh•ไฏ๎_b…+๏ุทaน0E“ื\:ฐ, beฦข฿ฉคี3~ฅ>จำI!ยัaFwj Vำ๚ืษ"8hจฺปๅฉ๒#ุc!Ÿ'าo $i.Ÿc‹T*D …w˜ฃŒฃeU-KLนtdหป;ฺeQwฤึพ‚“ฑ๕5Rผ`ˆfuทfศ>yNYoM:Tถ0`ัฟsyH}@-u&ผ:ค5’฿Sะฐ~ณ˜’™‚น๕ิยRห7ณปb$C]ฉฌร่กทกึิMขCc้ญืZ%ิะ/Uššด˜คูPุ"6„๎นiฆ,h่•r‚a้ฅศ† ;a>แํ„ฌ2ฺขYภแ๎Yอป฿ˆ BZlดถปภ‘‡8ึ๚ž—rฐ74[ท}z฿œ!เ@„มว˜๐4tๅ๙€,Vป๋ญ'’ฎfPxฆ 6t๐ีุlไ20ๅGญ๎V*ZYฏฟE3ฺแ๛Hxqjf•O~ฬ4่*+,+++++++kฺF~๐๏ๆ่1เ`๏€%ฎ๊,บšกpJ€ตท๛๎$ˆ๕‹+Z ฐฒ๏~dแง๎%“‘*$ฌP]ฏ "4evษช๎)˜˜ ฆด๗‘ก็s’๛P†WY`ะp ซ}๋ล๗n^•nZ™ส ชฑศ%œจๅgด(”ดŽ<วL ๏[ž,|ถำ%อXน—ค ข –~–Žี“ก/ ๔์ศ—SeฯIูฝƒ3B„ ฬb@eGBl.zh‘žมYbึ ๓i่ฅ ฏ0ฎPตะzcีด]#™.28;๊jภฒขทUๆ‡๊ พSrUIWœpฌ๖xว‰๓–ฦ.๗5ธถฃธ‰ฯบ‚ก๋:lไ‰ƒYฮ1ื: Uฮ‡ฎ@"ขŠ,์ใaC@ซ :ผ่C5Š ๚ึ๙ ^=0jฅ+gdƒb–๛jึกื"4๙ฦšntีเญrІcpุ ภ๒ฬOO8กˆBญ V1ท?!Zืพ&ย๎๚6–ๅ๛๓ท๒ฏ+ฟsx๓ฏ ”ฎLWฅฺ{™Ox‘.b:O=ึ‰ฑc&ษ=+,++++++๋0่uŸ›ฃว€€u\ึุ?Ÿขrา๏_ย‚Yท=)2ฌXดoใ ๑VŠnZiบ"ฺ'rฯกเ&ฑKUญ๕‘\k/ํ29`Gฌ…™tœฤU–“_ฏ๕.ฬByI ้d€ผ,๚เา”Joํxb 68ฌ9 [9^Vqa{[‡dี:+@ก#vฮ/A gv%d๘gเ$;?;|ฎEHฃ{&Z‘ฌMPฬ<ัจƒๆ๒ˆ& ีก ถธ=*#%'9]5’๊u๐u)C็๕“^"ด๒5#วซsสขuใŸ’๑'`ii,cœ๋Ÿๅฮ‹/3่-๚@พณnTฉOธะqC็wื*.ไdM‡†\Fไj0V™O ‘ขษŒศ )๊NT๏.Yื"lจด[ฏ }๋;x฿Cัh(‰‡P]8ฏีž&W฿ูกC‹qT˜  $ ๔P†๊/ัPกาฮื‡8`)zb1”ฒ(่`1S‹ƒ”๕C[๕'ŠฏOฉ@กg๙UึฉK;ะฬQH1‹y'lLฟว่p๒—ซฬ—8fk์๚/ฬัcภ ภ ภ ภ ภ ภ ภ ภ ภ ภ ภ:|€•2s|0E๏_โ€wkejษuBœ•๚๔u DD9โD>+0ส‰ไษs;˜E๘ฉโ•Œjp{vดt{ทmT7”ิD9š(3Cพ`(1ึš๛m^ต๏้e‰ฝ๕฿๒1t˜‡EeรDTYšก๒พnœ๙*าฤ7‡[dป3๖8ำJปžย]ฦ5ˆ5@ั&้=ุ๗nŸH‰็นŸU{็ไšฎli4๕pRzฬ๊lAษีี’ื*ƒศ3โI!BT[3Zฑโ(jๅVf—ดว ล|ฯ'ฒpก้jจ#zZ{™ž๏ูใฉิeื!ื2ๆาุฅŒj9็Y๐DˆสผlK9ฆๆ-W๗๋jMKP:hXJ!”ฑEž5*นโกDe>sf}C8ิrr*QฎซหS๎ฐ8&[8%SใA†ุVฆ๛…ิF”ฐ๑›ฯI&@^xW'}ƒ๘0@-–ีๅCB”ะกแrg๎™%พข2‚\}หvฬˆ2;cฝผ-ฦJ-“สษdU฿“฿QCY—ค”ต\‹ห•ขว[๕E9ฉ J-`Nฯށcฐฦo๋9z ธXXXXXXXXXaX‡ฐ’ึ่-_Ek4…๏น>i4hฮkD6‚A•ฦcBAT9oฝ ฦีฒUˆ0'PW)Ÿนฆ q๕Stต{F†ชํu–h•“ž…@๕*๛6ฎHQB!ั–งชธๅwHE/ ๒Tž™ฏ’๎ษฆ]นZŽV2]Mงษื 8cฉs์ำพeคฬีc,ฟ R#๚เ!ำฒ‡ฮU๏>ถฐ๚)9ืaส<่žบ4G‘?ณ\‰ใ)ิ๊งˆมY}tจŽๅ92ยฅU€U๋พฆ@!o(๏Wฆแ-&bX)(2๙ผฎซใ„_FCE๊ฝฃ–ƒE 3๋#)DจeYŒจlเLซˆŠญ<ถ Uˆ†HฆSถ฿€ฐ.จโ;oศ&"œaฑPะวกภ2ฮ่P (ฦ๎ๅ‘ฝฎe9ลก>ฟŠฉ‹๓ไ”t–b๛ เ0|XSภZธeะmฟ:ภฬฑ0Ž฿N้“35ๅ่๐Pก‰เสTˆูRส†7ท>]_็๏๛๔สt~ฮœ6Uก.o|6ิจgi*๖@ยPฬฉ‰P-ซฒT5`UtU๗hแ&คฎiฌ}”V*ค677+++++++++,๋ฐŒEช๛ไฯฟ‘2๚Y >xsŠฉ๕ฏUGYUณ้_+FัFข„Wี lไะ[5fืน็)-t|d๗ฤh LMMไก:Fญ ใ ,šsาSMžLH•”€ฯ๋๓ŠฅLW๙๕9ิ•๔(ย“กM˜a$=ˆบ~ฮtˆ‰|(๕›๔ฺ๊ˆ8wข‹่!Bงึ’•L^*gๆพ>‡Œ ˜ตดภ#/9รูโf([kŽi€๐‡งPฌึั@๎6๗ก.(”nยp Uา†ฦtAก\๛ˆ[Aัv์~่่‡R ™†ฦู–uv[3ฑาsๆ ULgƒสป[่‘6ห^†yฌTYB’สh ัส๋eดั$Wา3I๎†ฦสj?[šมŸ— –ญ?0]•๑พฐ UหˆGฐ,บั˜Eั8‚ƒำีฤ๋ˆณ็7xพH_.O€_Y…ฺณล๗hผทˆทฺำํฝ—QNโ:‰lฉษ€ล้ตว']gฺคV!v๙้!3HโนWฉ"คน>ุัX“7อ=ฌฌฌฌฌฌฌฌ™€uหืๆ่1เ`๏€ตใ๊ฯL,ธ,ฅบ/บ:ี~๙7๖ฎZLย๛พ5๗งhš๛!ญ”rพq€•FbFkโkEIœ้ผ้ฌก@ใ$ ‡๊šิ๙ช๓šสSฃำv ฃ‚™ฆ(ณ“มซTBžิง5]๕(~LKzภ6ข;ก ไภ/๑"แBvAช ช€QรDปˆ€Lแfฝ,ศjgTp„ขต3]{+๏ฮZ2๘ip…H!็€๐dx่ฏƒ#žธ๎โ9ศ$:l7X'เki9Œ*œมหโvืjสXไ[]J`๔™˜œŽข–KูG ›9ส6หteาš.ีRW•ถfc€๑หลU,8้(กA w่ศ ำษญ_๊™eล›K5"}eภq๖ฌCใ%๖5ๆ:,๑ย€ฅ‹:4ร~gE”tumถ@ง9r•ๅj๙„๘ญ›œž?Qj4๔ื“6Œศ0–็.”"4pํ็R–‚+-ุ๐ๅ๊oWะ[3ๅC ,หCต๋‰,ํz^ มN!tŠฮก”ipง7ึ€…บrVXVVVVVVVึ๓ ๆ่1เ`````````…`๖,qียห“โŠฮ/ฏช&yMฃZฑ(qีฆ•INฝฏ;)Tm\Q‰W‘า”หV๓ Qฅิ๏0ณ/ MฉทJm๒|ข]‰ฏ’LU@prˆั^๊โซยyไช|ไ *ฮคซiIงฬX.+ๆ}WFฌJ J$W'`U๑›Wi…d#$ 2u49แP฿Mืœ&vๅฑ_ฯยฝZ9ว2pึภศ$ƒYฉฮภภ`ะ?3๕ส 4ๆ0R|4๐0ฅฎ‘JR๊ฐ“‚C ldตHXU2็`U๗ฐžSู@ุ๊Mษ€ีฉห–3ฐส;oฏำV&ณึxgสษ.ณ๓ฦ<ฉฐไœK4[ฉ!qิG"ณญLŠ๒zO/–ooOศธ["ซkึ$AUฉ$๎ใ8•JO ญ€UN3dฃOฆw๛hY˜ฏœ4gžhภ„ห8ชฬ๑ๅLฬR ซ1yะ•้๗%w/XŽฐ,a9’งŽฬฤkK •_L>ฅ|€U)Kไฬตโ†pซIถใๆXAญฬE๓ฯOญžiห•า๓๘ฌ|!uฎ?ฎฆrลfwฮฅijหี=าžƒG)`unฝ|ŽnVVVVVVVึLภบํส9z ธXXc;ฏ,่Ž฿๐EaVgัีป๚™ศCฐศC’๊๖ฏีCญ์ฐ\รs'Cฮ,ผช,`&ชj๗œพบV ฺ'ฒษ:X#Y=™๎L;\ bศqฆ็ กPrŸœจžห ๚ตฆ ํeevuพD๗๐tฑ…ฮ3ว+ำ์9Q…็ะษ๓ไG8ฬฐ*ึ™ŽzจGVgอ 1‘'บH™็ฒiฐd&":.…–{๙ษ็๋iteล@ขZŒeศร“ๆำp‚’\ƒ1A1ห_(๗Œิั:‚˜้Z˜ƒ ชfŠช๎€–บ|BฝฅPคe]~‘:D853ด_ŸœำJCTf๋tƒ๋-—5cฒa๋4•ฺโฤฐZ3ตม\rฎฟ(รd๔nŸAK๖Bฝ1–{ืฒQ๊ฮำg—ภ๋)&ฏ9ผี[ˆŒo‘kำ๐xy(วMu–O3d๘ฌสxh5sˆฐ)ฅ~ใกiฌ!…UFfห‰~๛)/จO~^)ใไ I๓“PฐuCdŽ—38๒‘&"์‹u๙หRอัCช๑w๗—‹‡C…T›?fึธ๒ฯ€‰"ศๅ้๘ \Eธ<‹รMงสิˆb.แัฎไ€€€€€€€€uธ๋๏vŽnVVrkj๑ต“ฟธbฯ๒vwร๎ปcm^•คีsqภ}O/ำ่+RG†`R"น!*‡๐ฌ\•๕ฅา ž๓=5ึาsS้ฯ๖ƒ:fuจพ ฺgO~}UฤPx—ฃiŸ๚ตั Thษฟ๚ ๏ุE—`…๗\ฒฺ|ำR[ต์tฆณwu’ฎะWชyดำIภข์ะ Lญ”ฺ่QZ/uจK}ํV=T๔ื่ะ*tก€ปZ#ˆYx 6eฬŽaoZHฌVฝŸ ฬฆวส๒๎†Šช๓†JOฟฎษฌ…Rจ,‡+Z"ฬ๏|yi7ีณสธ’eพ=๎บไ`ฝฝ‘฿jO‹ืซeซVkdy›จJiจr<ถ˜VษO=EึyoฑฅฌšgํฅR_ž6[2`mQ…/์™ูlvBทUพ—เ0tLส,ษฉdฏ™šjๅzษฏž6ัPซฒชY9กข* ŽdแƒมืDK’ว ถ)ส8r็sะรฒg{|6๒ฐงธ™|<+B‡E+Ž#8๔ž™Bเ“dฅ fษ7๋+๊BŽฤ๖"พจ’ผ*uk1๗ฌฐฌฌฌฌฌฌฌฌสฆnฟzŽnVึุ่u—ŠซศpŸZr๎oณ๒yโช หS•ภ}Wค„๗4ิ–„&ญk W%โw๙a๕GzSึ๐Xฆซ๚w„:่2ิภDZ๚tˆpึ฿๏zน*๖Dคrธ•vษXฆ.5QB–c ƒ[LPฑT-eN~wUฏ.ฏฒ๒-bN๕รฮ4(06ะปื. ŽNgj# €^ธ\ •ตธฺkPฑF๒ึiดฅŒัpฅ*๚iHy3งต™เ @+t&ฯฝ–wgเadjH]Oฮุ˜ฦงฉ€ๅ8 ghัRV€ีš)p๏กทQะะ้าผA„t=0agวศ&ณ:ำS(†;n่8`นn๙ŒๆJZชtwŒ์ฝ‹ณUี•๏W๎ฏ~Uท๊W]uซ0ัจ1&&้ดฆ;jง์ฤฒr[[ฃฑbูšŸzฃ’ด1ZรmB‚!ะAmD/ŠQ ”ทˆ€ศ8œรyqž๛ภโ๏;ๆwฮ1วš{ƒฦƒ๒ศX5ฯฎตื^{ญตืZ{Žฯู฿1ฟCทcIeพ‚Ÿฌอ‡…ชž,b,ฅากe)ปZ!ฟฺqีdpด^๋เ฿m๊๔้m บ7–ี {ฬีWำ“Žชญ?—ซ๒]$LอJV\HƒHd5ตMแDฏ,^โงะBฤQk1=๗KF Œ_œ๔๏"๑]hุ—ๆ๏T,้…บฐ|rภrภrภrภrภrภrภrภ €๊งคyภuภrภภโ๗h54{ @Š™์x<ธc555กซีณ‡Wฬ”ะLGs6ดฺ๋…,i๕ู่ZะฉแoธsŽaM=Ušะอe;z]2ฅ:lมRฦ"oลDu%@RO'KืB!ยXวฐฟGDฎPู0บŒฆด๎H&"Zฑ)i1ก;‘DQŒ ฝpKศsืn:‚ sZธ0™ฤ0#ล #$ู,fา•ช3=U/F> iXั๖N<า kา`KฐZ l5พw(œmฝ๑z1ฅ#ษULaดM.ฒVฅีSคฒ:YบฒTชI๎ธ๔jฯa๓ื›>XำTM‡ืqึŸBฅ+=Q*]้๚ล0zFฑ\Rิ7ดvฏ ฅFฝvT่jyด™m๎PำBฐณƒ‚ ]R๏,บปš๕ฟปŠP๕:`ั,ชZฬฒC41ผงPตo+*]ล|’วีa฿` ›ZEIด=/ข6ถ9๕พ*,ช 0ฏชOขwZ“ํ“์ำฒ๒i‘พDOษ•ๆ่ tK„ณa๓€๋€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ“ึฑ,ฑiFฃ๛็=ŠfjZล๔vโหกํ+oYŠ—ฐะฦER?‡ O›V‰bุผNsฯํไาy…_เู็ฒTฮง้/J|ษษื!=j‹๊เทฏ=ฦ–M๔ปพ2ึุกีBW+:Pฉ{Rฺฑำ˜ฬ๒ูฑZevz:ฐ8“๓>จซช„CษUkepPบํฏฉ8Dgย>ฌภจPฌF,zึข=)m่8jqก2N>…บcŠˆ๕ืท๖ฎฺตฬˆgฮฏฎึA็‹tT*m(`QŒ,EDV๐Rภbฃg,ฏZcดชขทฦก^ŒฒXฦๅ!Fป”;<”ภซศyืฐZ„Žj๘ท•X์ปTซU8S7N†็B;ซวหdVหซ็!^อ๚Fภฺห‡Zฒ๖๕๊^CWี๚ฆ4y$Šjฏšป^ฏ๕ŽฏถชŒ]Šžซ” Xีแ('ย๎Owฉสม:฿ั;P๏กGซ^v( ฏฃต>แ๕ํ6ธ้2N~Šฦกi`MvึตลธิB9tžN^ภฺ?็๑6ธXXXXXXXXX>9`kภ๊œ๐ƒgว Xก๕ ๅตq"ซ่kmฎ+ฒเบ? ำ,~^ฒเWฬฤB%-แžศXยdกœ0)J][ŠฒขXQ&PšQgหh๛ษXnฌJัะKŠทยฮตฌ#y๗8*ภ@A+‡-เ-<*iaผ]0eAY9ˆZ6G-RณPhฉ‹เฅผลิืวo'บ"aะ“ DxBcˆ ๒ ณฐUbl`ŒQ&ภ๙ัPg‡อซžฒฑญwi“Xฒrฝ1ง)\ฦ!ๅ้dฺz8ึh”g/ึา)@™๚ 9!๕•ผ•ฑl‘์~ฃซjฝ,ฎo€lศTา%œ’า€€ขŽตชฅชLฉd‹๖ฅ{ฺ{K“าu3๕ŒUHrป๋|,ciฅหF\ฺ3hหrU1sคVvดิมVฝBCญoํZ‡๑ํจ‚WฝzhMb๕œซ8” dล›$U าฌD1”่ฌวฏ๏ต9๒ึ˜—oืŒ{j{ซ๙จีBฮ0ธtlMร๖—2ะI X๓ža๓€๋€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€Uฌ๙Oฐyภuภrภrภrภrภrภrภrภrภ๒ษ๋˜Vว๘ปบ&34{ ฺyOIตมไ~h ™ฉณZ6ษำญหdต๙Sๅเ…ฯสซ2ฺ&…Dจชัร8>ฆิฐง“จ8”,FSJคซ`I%{iZลƒš009Œใ IQภ& W#fต^ฒมB E~ &`1ŒF\\MึLƒๆlฦX ŠZ่0 ฆ‹๙LฉX!‡ŒA9ฆเท๚มใ{ญ=[BoุัWkOคลุL@bcก†U†7– ด%€nุ…ึญรำต-=x๕==rบภLค%mj|•๘’ง~`ูไ]‰3%ไQ„บ„ถ@&š ™dต่$–เ๊“ญ ๔jฺ–อูฐeเ๘^32ั/uำ,๋ ฎhลPส๕๙Tวฌ‹’5Žยiฌว<จ๙RšฐU–Eฑ–#คFฑๅA๎HžฌหWร)eš†ฃZ๊ภte…’zฒVลYต|o!ฦnส2™bฎ…Z;ZPr›Bๆๅ~ฝ= `ฉฑœฆa๑ฎVHาก —f@าใMอ๋yฅฌำ๗ีo์ู”ฎbฎบT2ีึ3=V\u ึ7k„อฎ–––––––––OXวV"wวภณc„™ๆOฅpJศ 3b†WฝvpŸ€,˜T‰็๛Œqด}^1SZห&qi›•ฅKQ9+AาีึOfHyP[๐aาแ~ั“หแQสR™Bl“c Elู$ ดE๚หG้‡ิำ‰—C็"rํ c ้ZNHHqDภ ๕ ej…8˜p8ŒPg0v๋ดbฟฌCบ๚ญ#({‚?ป๕[R๗๎๐vๅ๏5j2ึšฐ ๋ฮาlŒRับ=แTคจคฦๆ๙ญฅ*ŒŒC`™๐™ฏฏชซ@ฺ+0]H„ึ],ฆ” ’ฉก:fี“Aกrตpง)`Q๊IfKUw1eฉvใ!nฯpฝืR7šฯRฑP ‰Wฟณ/šณ#๕ฺ๋8ฝึ ฉ๊๙ฉฝ}๊Iฑ์ฎฮร-•รุŠํ[Jำา~VŠญˆƒผ ๛ศึZศRA\'้้%)q์ฑQฅร–v#ต0€ท#y๗ำ์Šo/๎%ห๋qœ ๚ภin@๕žWงซ‘@ี)X‹ža๓€๋€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ“ึฑฌฮ ?่Ÿ๚@๗คั”%}BัืšVญUืฮฌ๕ มR€ชมA๋{๒iX:M2฿5IB€0_ฆB๗๗ “ภŒๅฝด"SัฉEา:‹œxฒwตฦC่Eวท,ฺฃYWเ3Aซ@"นB-B9€ม์zއฉ๎’˜ฯฎึ–"E๚, ตัโฑ๑๓ถ‡B„ึˆœq‚ฌƒG*G‰บ๛k๔›ถี่(QํฎVฃ,b๓ฒญ,ลธ‚๗‚ฅh8ฎๆLxบฒน{iSื๒]XH_{๙ฐZgะV!ฌ*ค•HSE+5*‹ถXแTdบาT๗DW1•8e่๘r_ผ%’h+'ึR 8ชฎZ3Nฃฏ•ข ฃAk)_1โ,ห็ฉŒฅXฃ้ี๕Evื%›[iOYG5พuF๓๊ใ฿;uฬข_ฝHW$› mฑK C…๛‡ฑU+0จฐ +Žว˜อv/Rฺํ™,ษ&ธš๑*ฐ๐]ัส“ํ-ฑ‡ก—>›Wี*ƒ!Xhท7ฮ9ELœ๕ฅซhอzsโพJ๗pดซปํ?:‰kษ #lpฐฐฐฐฐฐฐฐ*ำ๐ฒGุ<เ:`U่Šนํฌ‹Ÿ•0 •pVนฐ|hฮใBWม๐€ี๛๘}hX‚†ีXPป3็LฎR‡๔}๒ฝํ้เซ1ฝ:€ำฯ๙TRื™ 8i์์m1ƒ^žา‰)+ ’“่!ลCb>ๅห[jL€%ญ0๚J$ง† `…`1ฑป-ๆฺS$c เจ:ท4Q—~ำปMฮ๒NW—ชฐฐฐฐฐฐฐฐฐฐSlGะ>X…P ชี `%3ฺข7/จึภิ’n{‹J๑ ๗ —า~ฦ.ŠิJW:>C้*5ฌใX ซฏ}ํkkืฎ]ฒdษ็>๗นiำฆ9ะ8`ฯ้•W^5j๋ฯ๓•W^y๗woถm๒ไษภฌ={๖8`9`9`9`9`9`p:ธ๒•ถฃoฟงง็Œ3ฮx๋ญท๘!์G?๚‘ึq›๖ํwษ%—|๋[฿"`๚UฌD้†n˜0aย_Xh ฅhšฺเKใฅNลฎ3คŸ๏_๘ฌึภ๔ฑำฆผˆ6ผ๊ตCVHž;:ำแ"]ัC๐Mw›Dูฎ–˜dJ—K:0ณ๏Sป?[xoNฒฦK!๎ส๎‚Itn*$ถำn”มžโ#]'ธ0+’XYำแC |ฐ‚ศ>ZMณ)7CดะธBนJe ๖d*bS(•ำ= นํ* าkTSช1฿_pJ๋pS ํปรhv*ผ๑๛ลo\๐Cั๋5P'ŽตยXม%$*ิ:>C๏y๚ ‡๐ฐŽXซfฐ}๛๓ๆอป๐ย `ฐN” \5nธัaยำ฿๖ทื^{ญพ บบ๑ฦฐฐฐฐฐฐNpภz๒ษ'้Ÿ้ล_ฺืพ๖ีฏ~๕7ฟ๙อŸgฌใ3-Yฒไฒห.ฟฝ๗Kภ๚้Oz็w๊ ฯ<๓ฬๅ—_—[ํ•‰hชbภาภฌCMซุYฐh%€5ํa‡ำั—ฦGงฤฟZ. ูพroSLฏฆ‡BGณDูŽf‘๓Dwะไณk6‡2;cj๖ษTง๗€0ึ๊ ฉ๋LAิM‰๛= Bกh๎šบค,IMดE๚5ุชฯIฮฐ๙ไ… บ R%กไA9ANร๊๏ู7ศฒ6ฌlCาฒv`&ต C5ึyฏUœK—6uQภขYƒnS7‹u–๏์‡ีpz+€ฅ3!ภT`+๐PE"$NU๓฿sŽY[`Kหe!Buฏ๘>เฝh๚๖ษueฌูี‚CBใ 6~ฐuซ;ย๑jviฒฺฆxX!l<ถเ/ { ๆ๗ษใ@ฏฬ ๔›zคˆ5&t์b&‰p5ๆ๛ะ้ื†€ศ๏‡bฯก๓ ZkดRG…kv†WET’€T“9lY\mจน{`MKฯ[อhก–ณ@ถŒwฐฌะฒo`[G฿Žฎ~ดฝุhm)งh๏๖ุยฉŽg;œบxฮใjำ+ก๑<่™ฉ,ฑ—o ็$œœสๅฑ_ŒF๑7ึ Ÿ1ดA)จ’฿>œYSทฦ7โrpื้2ษrl ฉ ๐b้สyตx™lใ%ŸN2#๕Yมสbผ œ๏•+;˜^๊ —[D@ณ_’%Cr๕๑hWˆW96„+kร6๛๓ฝTฟ๗Œ6=˜ข๑ี~sC้ƒว ง(ฬงซฐ/-HN/ฏQ8y^๏ีฐZพx{แแ>.ไNy๖๔ห2˜ฎB8น่ผต๐v{Et2วwmฏš๑+ฯnม|๘-8JgxฌZoo๐˜ํญ}Xƒฮกw็Žฐ}๛3fฬ8๋ฌณ๔้‚ ์SŸฐ>น้o๖oq๓ฆ3ย„™I“&ูฌ_๊W7tำัทใ"ทO>๙ไ“Oวฐถm6jิจฆฆ&>}๑วฟ๕ฏ๛iwภ:SKKKs˜v๎y{˜0ฟt้าsฯ=—?`๚—๙—E่ฟ`๙/X –‚ๅฟ`๙/X X๏อa๛ภ]|๏{฿ปๆšk6lุ๐ฦo\tัESงNu qภ:ฮ“J„‡พ๒ห๏ผ๓ฮอ›7Ož<ฐีฺฺz๔๗]ั˜๖pํ•‰ƒ3ฦษpย๙S™•upํ\บaษร”€ี?๕4fb Lหœ-ฉ]hฦ2ใAว”‰u;อ่งผทILฦ9v/คLล”,“$oฑฃ’ํx`ศผแš4ธโ๘มh้ฎ๖ๅmฤg+บxT’ƒลLฌPะ0์KวMผBHLใHฦ]๒๖4X’ฃŸ˜๐ม4)kอP((GŠ๑z8lŠ+wW“~บ“-Pญ–sฐาHดM{{็oํ`Vw=ง6Zx ๑3ซvํ๛ร{ญ‹ถหฑีBบ?# >ๆไณไeฏ๙Xš‚–Mฟtภ`a|ฅiXล๘Aอ‹JXy;ฦษๆEF•้$kau–ืAูˆหnAsผ์@E[Ÿฮ๘ศ3Oฮf๖ไu์‹๊f4ƒ*:หW*ฆ,อะโ%ฮูf&ซ(Kำช6์ฉ&~ูมtผฉtู‰n&•0'N™q •l~๘์a€๚ kฑฑช`{ศ๓ี@((FY'โ"KAฏ%ฟrยp tตฆฅg}k๏สๆ๎W7ด-m๊ข;<9Œ@{AW่๕ขฉฝฉฦhซ@Vไm5 w ^๕tฅม|สtV„*Iฅ +ณfนฐJ 6ข›า‚.ซๆฉ)นึ=,คภ*Hๅใฌ>‹PโE^ซEฺณŽ็„ฟกŠDุ;Pส้ํ*fC๖jา}ร|VvT‹vซฦย|๚JXZ€Aa…่lIฺBvxช฿U^g‰–ก์้ ฌO!•๎๓4pB|JŠ฿ŸRขึฏช4Sดฮi๘/ศึ$ศ^ํึฎGแ+dAถO†ฎNnภฺบ|„อฎ–––––––Vฐถฝ5ยๆืหซ”มX“F๗=y?Zิ๖ฯŸzเอ็ฎ‹J„ฤฉฐB ผdวcีณm\$ผzษ(ข™ ม&j ์ู’PUvำAkˆbœf7‡๒dŒ฿Œ|JœŠ๙๕ โะq&ศ‡p(ู”J&’Kฯปs-;b‘เ๒`‹‘k2ลžzX‡[‹ฒcCย>ฆ๚โณk†r็!|FrชŽEฬJ˜‰ัXF๛" ‚ดXk[zศyอ]‚_X™Ÿ‚v"Žh˜I๚i6งะฐส˜—Zฝ๔œX†า’ข•เ™ไฎฒše๊x{ขŒ‡_หW~ฒZU๒๏ศu$•$ฬT`อ*^ส[ฯR`:ค ึ1_QH‘ฒ].Yต5q›์7I๎V(,‹!’ฎ"‚(`้–ซwNf/“ัo3๎ใญE•-œซ29๗jบึ%I[๊2ท„]Y•ธ(:s†_็ฮ]X]-‚cณ5:ใตใ7%yตฤฑษๅCฒ†)มs!๎+ตlGย‘+ษ‘รt˜ท๐Iา•–OXXXXXXXXy:ดํ6ธXX•ึ๖ศ๗X~ภ4๖ม—ฦอš,>ขo>'Fฃ/oXU๊โ[$}ลฬCVˆ9BศX฿jญP&ฃ๎Fต.ฦx๖่1)Wฑปgๆฌ ™L๚fx#]ลxdมˆYีbšmRu 7#… ˜Œด^;ElP@Mƒ\.*dศ—อดt๗ฃI,ฤมค]Dฑƒฟง“Ÿภ@/ใฅ|aะชVอณฆDHCQ์จฉSด?ด5!ฑฝ%ฬƒบึท๖โี-{{฿ุึัึ#ฃ฿๙ROP0ๅh“ฆษฤ‰:;Vว`ฆVฎ‰ ณbx$ภชขp)กฺฌš#%ยฬL ฐfต—V ฝ%-:ฬ่ ขฐ9ษ ภสภWจ“UOหสำ—ศXP‰xW˜2€•qแะฌlv|จ’_}">€ชล€Vฯแ I5า[LTงุงvป้~ศ฿ˆ#แT๕fศGย=นงฃYซ{กะฯล่@N๒๕0Vา–๓P5i]Gi$คจยLcง; ๆั(އ%Ÿ$]9`๙ไ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€eซ้6ธXXu=ยคั๛ฆH๒9‰ฟ่Œqฤฌฺหฐ04ิV ฑตแณ"าฌ!:Lw•rฐฌ ้Oฺฝ\๘RฅBNŠ—่š ฬ™ฃ๎CS ฑyฒq\wˆ ดu` Ž~ง‚@AAํ (fีร&ฬ๎\ซแ4C}Pฃพ†l-F.ฤฺ26`“ษ’bE5›Tฝืฺณถฅ‡9์TAQhx AW -ฌ†™๖เDสั๏TaะHrŠk1—?ฤยJฐิ4aผfฬA%ˆj1O‘โฌe2“ƒz,)ะ[อsฯœd-C-c%V(+yd‹ไQqยTYนa~z:Y่ƒู„๓}P๋3๗๋+”Xฮ…tT%T๏PZu๔˜J[Z{บด|1๙ไืกฆ@fญS๙YX@F ทhP๊I3ผˆYDVœาo~[ iE+„dco$J„zภม‘Ud_ฌfรํ^{›t@Fว€็^’?p\B ฅ+cMB_†OžฎNnภย๙Y๓€๋€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ“ึว Xญco๋œ๐ะ•่ƒLZe"่J๋ๅ hฒ๐ู1RQว@UษXศผ๙+็ ๛cิกๅเPู&–ฏ ฑ9๖้-’ižดBํลL•ูภ1๔ %‹0œD๋Q“3หฆCปนำ,&๒h้๎GN—ัศ !Hk๗…;GฎUจ™€ย˜๚4fฯF-ฐƒ@Bภ ƒี5>>๏๋fLUUf๔7w๕๓ใDญปญLัภX,ณตฝh…๙•อX+hแl3๘ฐใ‰=๚€vำ2HฅJD™ฎะ‡ฒ,RRjโ๚ ฯI"Œd%B+SำLB<jใ™ ฺT42หdu•ylนžืี@Aอ6mc5ง๚ดตธไE๕Sห ˆีbษข†$w`–ตZฐฐULd/=’hฃZแึR0)มฟRBปศ‹Oต™๓มถ aะIv ตPฅ]sุรีทๆ&๊ƒ‡P Š#Wบ๗ผŸ dลkNBJeโพ–ฬ"ฑiข:oHE(KAW ค„ซ6.bq0ผๅ“งซ“ฐšืŽฐyภuภrภrภrภrภrภrภrภrภ๒ษหหหหหห๋ใœ๎Z7ยๆืหซl{บ•Œลt+ฐ”œ๗…ฎB–Uƒ4,2VZ(c gOaiBa,@ BฮP๔มŠh28;๎ึ-่yฅS}ฎ1–0„ฎพAtม๔=วปˆyภšz2YฦฺืฮŒญiศฑKุZ๔m็{ำฎ™˜U`8JXฐu โ–ƒ0˜’ฝ ๔8–ชซE‹j๒}ผp$1u,|Rอ|Šฃทฺwโcา,yW,Aภj VXxซัI“o˜mฦฑiเ0หช๐HMgฑƒณาK,ฮXTv‹กNใ_QะุปGถ๎ํฬ9R&Sญ:h!ผ๛๓เส ]i.”f™๒ˆภRCฌb,žพห8‰gfา์ฎภ"๑๎ 7‰ญJู ดŸฆ'น†ฝไห\โฐ0ต่k์hฏ›JC2๓G+ผืIจึ',U>h<โฒฎ„ŸG[ธ์hUmฑ‚grำ…}ฤ{†ใg๛๖aืสŽq˜0OฌšใเRหใ“‘[œฉOฝ ูWฌ…whยƒ๋„v\ะสหฎ–––––––Vฐvฏa๓€๋€ๅ€uฤข„ยXำ`ตปฃ๏ษ๛kฏLคh˜ 6ฌฐ\ฮš ,;ธvฎุฃท๏D€;ˆc่ท*pIิใ@Zตu@สv —šนSzS„[Y- *ไรH9!0GŸะ๘T(J5บ~'e„q+บด‡ทPูฌ ว- Ÿ4ƒรh๎๊ง๔ฦcPี&zฑCKƒ๖ุ้4pxcŒ| แต คPiRฯ-๚`ญoํm๊์ฃJธ%(†ฒ>ž=WD „7aว*ƒฐl+†tด ๚fฉ–คฃบ aัZvZ†R=฿5ฤชWธ:Z)ฉl—tบFเ‹œD?'ย™โ้™ŠžU๚ <)0ๅ•รmSบ™๋ั&Œ‹oฏR]naw ๅJ๋fฑ, TY)„clcUมยxฬฐWฦ2ME4ฒ”ฑ1“๙t•ํฝฉาภ@ตชส๗Oะ๓“p'ฟYT‡{;๙อป8p?˜›ฎBgว.•˜+: +๐SEไ8Aี-Wm\$ตPAW่|VฯฦฃึGฌ–#lpฐฐฐฐฐฐฐฐฐ|rภ๚D ญkโ= +`ซ{า่มใ„ŸBEา*ห0]เฐVฯF๗‡NSBBH๕ํ๎ฏํ์”duBณzcŠqญฆ5๒Xb ƒP#`Dpuขrค๒œญ‘—=ส“/|ิ,ิe‡แ* @„9ฺ๐„x†c–=า๏*1Sญ ป๛ืถศณbข}O#›ฏ …#กDุำAŽฤ–๙มcฝ9VZ์๋Ži๊S$•>์ข'๙}์p๏๎้ก'žโQ์้ƒฟ<ึgส<>]ฬ๖ีˆลr„ŒLTXBPŒœT œNฒ˜U ร๕€U‰รวแวo฿a‰9ฐ&[ถะ!8‹ ช ZกPi‰H‘่ช"ฎ>า2Ziถธaดy฿†+คฅๅน}๓ขิจb%sภ|,ลธฯz}U4ส‚5uƒถ๘ฃด๕"๕~VK”™Y9X๙“Œใ–4lโ8ฏ.SF๕ฃบZฅW0]ฝฏpซœ\…Aแ€6ฏŸ•:‰ฺ$E~–ฮ)™ค็ฐธ;ฝค(แงuาd๖ธ$Aฺ๐ชืะŽ/cฤ€ตg๓›\,,,,,,,,,Ÿฐ>)ภขPด๊œ๐ดพ'๏BแQฒ[U๗vๆฟK ฦ,๘ฺห่็.ีฤฯบF‰ˆฐฑญ<ก4ั;J=ขm—sอ\Vษoa6ทๆถวข{ O!8ศ˜6[x”k,ษRษ,งุ้ๅBz๖m+pดซv‰สƒใำฐŠฅทšฺ4จเยlq5Lƒีhฃ_|ว.~F'์…,3ถ.#มdฤ~X็ชฉณ 8ตฝCfh์ŽƒaษBœมฌเมฅ“๕ q ๖ฯŸซCฎ_˜วฎo[ก‘,{ืภฉ6 ŒM์ฬรใU"<’ศHDk฿!€ีั\ฉ`จ๚”Vข4Wงb… ฒ`บ7ิษ]ฆฌฆฅr„๙ีชน|ๆช„)™ชtUVปำv›]^ิjTๅK%H•ษฌmฝ๒’F™้œฎ่z๋ฝR}ษ8ค@ J&ชใ‚D@๐O‹,*ชbqไlดบŸ!ž =J็ix„~๙_ซ7|S‚{้ฯ2aศใ}kK๕ม8ไฟˆS€'4|eB>;‰ชhXม%ย0 ทna๓€๋€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€Uฌถํ#lpฐฐ> ต=๒}H‹e ป&“ี@›๐ฮ™ด@64{ ๚AQ “N„ฎฐio๏๎ฏ &ฏฐ[”่ YWLŠDf†จ2๔tR[Œใ็Mีฃ!ลo1ƒQ}K%ฯ$“]cFะ์ดjaิ/t๘zสบํ่.J>8๘่ไฉ‚cdิโNƒ'๋0โ `wV๊ขณฅˆV-ู๏วึดJฃ5ณƒœZ๗ €ซ˜าฦย $Zํ 38ฑtqฤมdีT"อฅ3†Wพ‚Fa%2–€ฏ†OเฌZ%Qล ]X่D…kฺ^้m๑˜ฅCs–*ถถ&S ‹ไyE%า‰าŒUสjd๘(@*๑YV“•๊X)๕;ำƒ๒Ÿ๚MูัุศัฆD๏สbธ5ใ๏Z9-VยใซšNฌŠณั–sใ"\tvV„ํ…3Špพp้ฅป๓|๑v๋๋aFo๐_&!ชX๒ู“ํ‹xV%„3†]ไ{ฒพั‚a ฌ“ฅ@๓F0อK 7yœมซžไ๎€ๅ“––––––ึ Xธ!Gึ<เ:`9`}(ฦ"]ณฺว!€ฅ ฅ-,‰ค‰บศXตW&X<บEŽี๏ฏ ั*s}ซh[  ฉ…YไT๔˜๔MฺˆชPศฺ–—B‚0Hขผ4uœผึZ‰O9Bžte$ Mถ‚te)i“จq”Nโqˆ;"โ`๐ะไhSmŸ9ƒCภaะU5‘œบd# ŠJAโ‰ฬrขi4 ฎbVปšŽขต๕ศเุ0/ะูืMแ5z6๎ZjB๊)q(;[Jxฏค3ซF“(ญ^!*ึฌเยฤ-dผJ๊ซส^ษญด’PŸ2ปใ™Wšฑ เzŠิั@mิไBEFS}%–u‚ญฆ“็w \ # ` +ฮคๅUlfฉ;ำฆม–ๆใ—v ถUM>ณ2ซ—ภ’ฎซ ฃƒช?ฟTัิจฒีW• MฏUcKฤSยt•ง๕ๆg๊z-๙๒ณ5ฌ”)๙iŒ‚ฤpœnหะข๓‚ฑ`ฐhAJัjลL`%>8ซu9`9`๙ไ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ubnฮ‘5ธXXะv?p๓ž‡nW๕>~_ว๘ปhูrส,•–ฐ๑)ึงหรเŒq่้ไ‡e‡ปคKฑbqSงคf,ˆฅvS‹ึฃ;ืฦx2ๅY2VภG*า๗๊ษj3คซศYR:vYผ–fคFdŒa>X$pง๊‘Žก;fะXV™tจว•ฝcต< p)Fฃฌจ“F้kํ”œmักงณ?@'`ฎ%mh7J}Kp$DU9ศภฃเ0j 4’;*‡ฤ4แ”,ฬฆิ†7๒[))า้ัš=*i…๓ฌธ&›ณ)ึฆฌต™ย,ถภŽš5ฤช>% 6ค)W]ญ8+–ช–ซชฐา€`Šbฦ้Nฐ๗F™ผŸภข"qV5ฒœฟฏ`p`ฤk›ีž๔มlc๐“2จjlq˜(ีฅรหๅbถฏ”ฏ[H๚g88r"งล-^Dๆtนฉ<#๔SศวFUฌ M*ข๕ำy$ษฃภฌฌซชUG๚๏E๖่?‰๒_ธcใศ ึฝก๙B)iœืงคซ7Ÿ“๐๋8า•–OXXXXXXXXy*<แ>B๓€๋€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€Uฌฎ–6ธXXึ‹€ี๖ศ๗130},kข5ศวJผEภ’š†ำว๎_๘์๐บ8†C฿Kชถm@“ํ}๓6ทwˆ`bอฎะG ฏ„4–-‹ใ้")ŽFรก๖‚)dN˜ฌ€4ŠAQณL*q4ธฬซษVŽm)+ˆ#ฮ€/!l€fxภB0ษฟJwŠgปํŽ]Œ…ฒ)๚` ๔ ฑ ๔ษฆยGๆ‘Gk๏0ฦŠษgตZฬHร:ุ#ะ TGbH๏ฺ{cQมm d†q`ฆ5s่a6RcŽ fKฉZš “#ฑB˜ฦใข™xsฐฺถวƒ1XS๏5eำกb<ถล ซใตb`พ –Nธ#›ฅFeaŒคKผณteRŽฒ!Sแfฌฟ*๕ “ ืp6ธ LŒ€ฅ^Vๆใ็fmซ ภ"ฟฆQ„๒g{ใ"€พn?ศ™˜คNสUผ|&แI—™O9!ฏศปฒtฅฅ™bศrŠมล~˜cl๕$eๆ\dqE„t0จ\U •Pิา U–ฑ๘฿u|้สห',,,,,,,,Xญ#lว1าญYณฆ~แ3ฯ<ใ เ€u"[ุ๋Xภ,์šxžสhมF*ก …Xน๖สฤแUณ%สn^.#C/ผeo๏:dศ[ ช๎0œ7xะภ#8#ปฎณผ ะJ ญอคซจuม.(†ข„่$œW ไ9HงŠƒ”็ไH๚(ร_ภ4rDiภmฑe``cไ`๖ตใณ๏ ี #Zฅ„ฺ่๏%_#ูŠ๒ฬ๔ฯ-ฎ@C๙ns<ํkxXAสjลh่”F`ลฑWม.+ฎ“ L‹พลœBoผ6Šกnฦp๛Žย๘๊hE sไมƒ–]’็Yฅc•า*เb+a€งฺ๗ถ๒ลฝ(€ฺŽ…e๕ฒb!วสฐบZ*๕–ใีบ]ASeJญWxPUส”A‰\ส ‘ ]ฝ๙œH„ฐx้cmJฅdพ+1t&ชภโB<„ญ ้iQUทมแL๒?"uบข8~pฯ&~ 3Uำ‹ŽwHุŠƒ่$K้*๑“จแ^˜๎X–|๊๐าqง+ฌใ5qฦ“'O6ฏป฿๎ง>๕)g,,,,,,ฌใ X๛๖ŽฐวH7mฺดsฮ9็†nZอš5๋‚ .ธ๒หฌๅ“ึ X–ฑXhอ?พ ฅtb<[dฌ้cksŸศX6S:๚เึุัวบ~์ށ ~๋[{ฉbmต-N(‰ฎQxDCa,:`ฑda่ฤmฎqSho4;FฤXpXdฆ๘.&MทnมnฺKwช{ฬ:'โศ‘ๆ’’l๋25Žขข$วย 'LฦํkI;๊\ิVXฎค5ะGฟxฺฒ…r‡bตลภ" ;ดBส‚ ญŽ่ F+|๐ฮ‰„^Vศ#ŽzŠ๚U%fฃ%ฯ๗่…ญ๒บgi3ล +ผe(‘7E‰ะšcอTวณU๓ขง๔ฌษ์I+คไว4๐ฌ6*1+`Y๐25*…mF9›ขU฿}นŒ7•WE"์U€qบช$ิu‹โ€…™I(hjขฮ{jh๖ีณๆ3š‹˜ua–๖ ิย‹(oมuLส |L{ƒRฯฒ ฤฑศ`๚์|ป dkย{’‡D๊ๅธ ukฃ๑UapีฯpฏjใB|p<žturVOว๑ vMMMW_}50๋Œ3ฮ๘ีฏ~5<<์เ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€5ข้ํท฿พโŠ+.ธเ‚ำO?}๔่ั}}}X'4`ํy่ึ–1ท ณ0ฯ…Mw_ทํŽo๏}=–(]ฐ๐ศี7ูx๋ŽBŽ๐Tp6ํBฬบ"{…0#6 fd~Tˆิฌ<ฉ*ุ2…ั˜ม 2ส~“ซ;ฃW Rฦ3์ท'Xฝำ_~ 49Ÿ]ญัy!R”Z๊œฏ+ใู‚#ห))št๘‰ซvfUิร๏แใjูPิ:,kแู†&P6สธฌ ”าฅU.฿kีฦ๚–ะ*+ƒฦ?ฝย:Gเ›Œ5๕€Uฟฏ€ืรํ;ฌ3{E ๓ึ<ฑ… r^๘ศLิิ‘จถ+คd_;ะA=b.yH ็(Rš vบUฌy‡f๚G%= ๒ฐ’.3ๅ|†2”bโŸŽS$Z~œpดู+„_œ@WQยฦ๑s†๚u๚`ฉาRx$fนธ<,qภแดฟทk„ํ8Fบ1cฦ|๊SŸ๚ืืฎฎฎwy็าK/ยพ0w๎\g,,,,,,,ฌ8}ๆ3Ÿyแ…๔้เเเใQฃF98`ธ€E฿QV๓oBQกฎvw๐3ํใ๎ /ธJ้ŠMk๖๏D&›ปฺหผ%๛{ๅ+” มzT‚J0HO`กืnูฤฮš„ห๎\c$“ˆ้Fj‚านำร0ไ˜ณง~ฅ9ศ?.Dจ8 ๑›ฎกํฝุละเ€บ3P$fiๅ;๒YฒฮIq0ู3ฦ๐฿&af8”STะ@ฎ~น_Wk”Mี@•ษz E™ 0aD์๊๊dๆปyฤ3ลQMoฏz'Te๑%ไGณš[,ึ.lXužš+-อkฐvฟW)~W[zfˆ) ;ฟ”d;[ำฐbยูHy,3ว cUŒ94 &นคU็•P_ขัVcŒ๛m฿R[ซฑ~บ}ฺ.gั๚U GชG(ฯ˜๛ฌึฆWJฒk๑K ‚4ฌท„๒Žเท)ช™f( žฝ^ Xญ[๓โูฃ์จhฅNถม\ดrZ †PZQิฯ9ณEจRš?๋œ turV฿พถใ้š››๋ฮŸ?฿ภหหหหหห๋ฌ๛๗๐‡?์g?{๑ล?๘ใว๖๘<ุึึึฆปwo฿พ}ๆฬ™ฮX'4`5}๖ปนsยUlเ*ะ–ณจXชkโ=สUh“Fฃaฆ6kŠZ๛๊$๑q˜>cฤฌ ฒ 6ํํm๋‘4mRB๚่7ŸณQ‡จA]ƒพŽ’rฯ้)<ม˜ชU8BXB๏ฬ#~@.ฦi้อWฬŒ*I๓:ZBะั}} ด๋ HNฏfถuว.[า #Gฺoฬฅำ&รL#ค Pจ>œ4 ˆ; ผ•ู+๔ด {g&๐_O,๚5;CŠ| ‡‘ฝญJh’squ\~WŠคฝ๘*žถ๏dจSSaะp&ฃา—\Fc31ฌ~I.B^~orUQฉRFบฐถฟธyMV๕\ูรATต5†ต†t•ขิ้–๎hๆgฏ\ำ”YŸ-Jkพภƒ‰นํSฉล% ซX']9`ez๛ํทO?๔ƒ๒้ฒeหภ[ว๊เฯ<๓ฬ 6`ๆ฿๖’%K0๓ย /|๛฿w`:๋๓ฯ๒—ฟฬห์€ๅ€ๅ€ๅ€ๅ€ๅ€u‚–ุ้ฌ}๛ใ/พ๘b}บu๋ึQฃFuww“ƒ?็œsv๏™ัฃG?๙ไ“˜ูตkืy็็ภt V{{๛mทvฺiง3f'5`กmธ๑›oฝzu—+lmนฬะnิŠƒ [ฐ๐ศ,xดม—ฦฃC”.~๗†กžŽถžM{{ป๛ัGษo๗MใEg*ŠU้;่ํ+ตึoLืeL q‹9ณ‚VVpwƒ3ฦqPwํ•‰่—ฑœ›ชฝ<Kdแ์)ัn1e๒สฆ’๖ฝC๑ฐฒูBC`โผฌค”!อYซaู+lู4็ฅ่S`รฐMjVm‹บ hำ@}PEL๚LFฑ/lSdึS+ูฤ!ผฉqC [ฦๅกRNG}ชโTใ&๙๏โPำ;cI~Š๚ิ๘๚2า ฝ Xส7Š๊[}‰žยาSตBี9ถ๊ศPูฃ- dŽก@้ณJภ*ิฦข๊Nฝ๏C:๊ชขฮGhำีธ๔ฬkG๐ ฏJ>{ะภy~;@6๑]ช๓VวW TO/G $ะWr•๓ช$ญ๓‘จDb๑ศ้ s%œไฟ )ํ)๙’โซพญไ-~mO(ดrภ:๚๖_|๑ลฏ|ๅ+๚ดนน€ีึึvL[฿๚ึSO‰ฏ๕ไษ“oฟvฬฌ\น๒ณŸฌำ)›ƒE`๛ฟ๛ใXษหหหหห๋ƒ+dฃŽค}๛ฏฝ๖Z/Xฝฝฝวไเ็ฬ™๓ฉO}๊ฟ๋ฟZZZ>้O฿|๓อ_โIZ>ฒI๎๏~ึYgqวฃอไ€ๅ€ๅ€ๅ€ๅ€ๅ€uโVmhh„ํรไ`>|˜O—.]z๖ูgรใ๗w™†ต|๙๒๏}๏{>๘เพ}๛˜Neภ๊์์ผ๗{ม้฿๚ึทฎ5ำษXl,ฺ๚ฎtห77|ฺž‡nmwG#฿๏œ๐ƒฐะX=ำวwŒฟKx‹5 ง= ‘,๎ถ=@ซ-{{% กฝu‹๘Z-~>–Qk–&Iš๓ธ`z๙เ—#uHy‰CY@-Œ˜‹ฮ=ม“Kุ)ฃƒฦN™z5๐์L|แt๗ถ_ด›Oƒณโ0CF…”?ฤภVมพคI?ZP/8นjู `ิภศ`AŒO6—Hอฏ5œง\%ฑ ฃรึฮต@aฌมhF;ฦะ—Yก<“Lฝ*ช†แ„e>VCฬ2‰Y6‹กtxร"!Œอห๕UuXธ4e๓๗#€Kลชšํdถ,•Cชƒ5 +Žฺซบ”๑าGšื3GXa‘Œล<ญ– ฐ6URวx< อึัชHด*Pฌฅัฉ?gƒฅฑ„ผ.๛~ใ/ฅ@=฿รฏx-(๎,ภ,=^_ะyJ\Kนj ว8€7%ฑE ดภึูƒ5.—๋!5Lบ’,+ฆXญ^ž€‡๊aห๋$ฌZญvึYgญ\น’O}๔ัO2๚tชึsฯ=GGตใXษหหหหห๋งมฺะ๎โ๛๏ฟโŠ+ึฎ];gฮœ๓ฮ;ว๊เปบบฦŽ{ใ7ฺฎ g˜NAภjjjยี5jิw฿}ฌ4ๆใXฺX@ซํw30  +0H 3ŠVฌZธoฺ/q xไSŽ4cกgDท+๑>Xดท๗ll๋ล้[๛บ›๎พŽ–๑ยX$๋๎}=‰U”ค/ž๗‡Aษภข๙S9NP† jโ^˜>M0ฮฎขฆ‰GŽ:ิศG‹๙lฤฺpHุ)หฦัำ1รXTk˜ฬCศI๊"Ÿ d“˜)ด>ัŒVVฎJแปœ๊๋–ํ@T•‘ƒฌฝMJrŠBX#(D5[ษ8cญ$-;ภะV>uŒ๑A"ฤc๛iไญยˆ*ธ˜๘ฉ`‘RณซžœR=,Hห:ชWฌิJ^วธ้ฑลัŽ g๔M-ี—1‹๓,ลจฑ๋:ฃ๖๚‚ƒGbฌr} ฃ,JH๐%ใ*WY‡t+๑„5Œย@Hค13๖Pfx‡ณ6h†]ฤ’!V8o+[g– ;U’ใ่kf8B >นŠMpjฦ8ไา^x3X'` {๏ฝ็œsฮ%—\ยœ๔c5tำM\pมwqฏ™>ษlฌOn:๋ฌณพ๘ล/ž…ฐฐฐฐฐฐ>8cxฐ6ยv# mูฒeŽG€p๎๋๋;Žไุ~ฝ7฿z5ฺ†ฟฑ๎บห1ำใ›ถ~อฮ๛ะชu์m-cn!HuŒฟ Kะ:ž+‰hO2ํ่‰voZ5u๖-฿ูตฆฅG œ|ŽaŒ:#๚VžตsY๗D‹Y{X5ฏBดภ|ิฝI่jลK่ปžรญิ(†“ฆƒdFนส ฒp๛J๑—oูฤ4vต๊Žฎ?๊ฤอbyษ(HK‹๎’ษL‰ โ™\ใYI0&ƒฮจIอ๋บ๛ฃ&Bฟ{Aฎ เ0ะJฒ0NหŒqคาจ ๑ษXGฌ"ํฤ–+Ymeณ%z/Q"`.\๖๊_'รU>ธ>Mช\&ญgWฎขY|1NWy มฦ มฌkY_ฑ’žฏ€ีฒ!๋•๚AUฦชOu/หžณZ”ฅ Q …<8ํ[—Ea=ะd”ฑอ ช’ๅx3X3ƒึึิbŽั".ฟ‘+ๅหื\iZ=๋`ๆRƒ)ฅ=ซ€ช‘–f๘/พศtืCsภrภบ์ฒห}๗]วฃฟฎ$w,,,,,ฌฐ๚k#lว1าอ™3็๊ซฏ^ฒdษฎ]ปZฬไ เ€uา“W^๑Uบ6lบๅ›Dีž‡nภอd,๒ึIcไฆ<ค>๏ัะ9Š3ย๚…`ึฆฝฝ;๚ฤu}๑๓า๛S• บึ้Ÿ๚zglผ้๎๋DP˜5Kฐๆช‹๔CŽGt}Rญo๑๓Xซแp`„6 $+fŠ[Dห&- จำขuภๆb)รžcŠดf.งRzโๆภ…ปข“;+้q1ื›Lฆ เ๖•9bฅชy$นŠUg็ี7ˆsฦ1เฌ}'จ‹Œล *๊›ฯ‘ฎ„D—ฮˆ ๛Œช€ล๓ี๊„yญRWMฯž| €…GMŠ/ตEหX๊ฬnqJฯ@ 5้ศๆš#oษFำฯrฯคีv ๔ …•;2ก0‘V6G`ใN XฌวW๏__G~`าjŒV.L๖•qแจฒ5หZiTmใsล้ๆ…<ฃๅูีไ›ย%ผฯำ๙ŒTฝ แ[W่ฅง0 โชl K ฏvฉd– 8•[งโ€ulงžฺ๑ฌsฯ=wTu:ํดำœฐฐฐฐฐฐฐฐ>โ๔•ฏ|ๅึ[o?ฒ๊ไ เ€uาึ–ฏafงŸ_งeรŽัืƒฑ€Mxrแ€%SขDศB„xdป2b 3zฤq(ร]‹ฌ‰&ธ๐Yฦtฉ€9rฬMfโ๖ฮตT ี-"าOe˜ไฎ–1ฌโ Y9Z6$lฒ*O$6ฎ“Pฌ’o๋ˆVฅ๊?Y/†Z๑K[ตึ็#`ฅซ`ูหjyถ”f ดๅาxvž;UญะฌY?_๑Sล^hU*Q–่‘เฯ:q(_r\E*Fษ๛3“Ÿฉ?˜อ๘ี๛TบอW9\ู|}๕>!@ง…๑“งZทศ1`w๘ฏ€…2q๛้K)๓฿0ผn๎‹ต>5ฅ]A[XP_Je%ทษ์ำว fM{˜8ลฆๆฦค+ฌc5ํ๋a;Ž‘๎์ณฯnnn๖ˆ๏€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€uฬฆ›oพ๙ฅ—^๒ˆ๏€uึบ๋.Gภอภ#fธƒฑ4แ9์เ'ฬทป4ำ3}๛กTŽ,e,ึฬ้้ฌีคฬ Kไญลฯธˆ^›eN:ๆฑG๔นbพ0ํaษXg;},zvlฌ†&H4*๖ณฺฑ~ภ)๔ั`>ฬc8<ภYข!ีW-9>OๅC๑Q ป$T '+ู๋!`G10XM๏ฮ<จก„!ฃœ„]Wซ D วเ๕`3ฃฃง@ภฌŠตA้8 ฤyP,รGฐฉฤVํะ}[Ÿ$7} ร-Kั*i:ำj X\mxอ<๙๘๏ฝก๒ขอ‚/<0๓ษQf-ศ#‰t<แ•บ+UpQฝฌาlบzุธบศfMMษ/อsใ%Qู,๒T๏ฅ,ูxำ;๏ณ\Œถ!ลโi๙’Bก ‘:.มH™๚13฿TGx+น-h๖:' I๓-ฒฮmz<Vจxอ;ผ2pมZxเB๓าฏ™ววอxR„ขƒคด“ด `eบJj J ซrฑ9`|๊๊a;Ž‘n๒ไษ็œsฮw๙ห_r‚™œฐฐฐฐฐฐฐฐ>zVรษภ๋ค,ถ7_Ek@าฆ[พนๆšXร•@Ÿ–1ท\Zวภย<‹ฅr๚f<ส฿๓ู'‚ฎˆ_ฌ,ถ™Aาุ่•ธ@ !PญB'‹ทฐฤrำื‰ท๘๙ฺ+E]….ฒิฝุ้E3t่่”ฑ์วƒ}a508‚YYCJฬํผb9^b„หsSซฤ6๕ฅdฐต๎}ฆ9[%+‰YขญP๓ฒ6•ชฆœ๗h=ชy๑ญxZb์ืƒ †จCตš “‹4ƒ8ฤ0"-1ัXี™็rKมธมส”xRชre๘ยg‡Wฯษ€•tCžจยณ”fj›Š”pซฌ%EฌrbซV/i์๙0ไษ˜+_*oม๘@ื,INwฌ ค\L•‡ฤ /? ะyE๔ดป(˜ฏจYdฬ]ELxฝ*Bpธโีืฤs‰HŠ*]yฃ Iา”ญsiค4jสฌีs๒n'แ'ต ึ•ทraœ  ๒{อa1OIํ‹u Œ.‘.ลkฤSg฿เ๑ฌ๎๎nปคญญํย /tpภrภrภrภrภrภrภ:ž€ี;8ย๖ษธW_}ut˜Fu็wŽ6ำตื^๋€ๅ€u๒ภผฒแฦo€ฑXh,qŽ`แ%@L๗3ฟ Dจ?้3ฅVะkD€kFซCMฆr„~จฤ:6t4๓…เ:( /G_ŒuRพ}ฅ˜‹B15>V”มูS๐ฃฏวฑ‰a้ห(oiธŠedln5cX๒e\ ีถVฬฬu`l'„4g‰์~โT(i"› ๙ยฃKญ*r#`ฉำchโ/ฺูยํ๛๐๊ะ@Ÿ$๛S”QฅฯŒ€,-h@JใV Lั๏1ต˜•ฬ0 +ุ๑๖:€ตj6ฃlลว๓&)>*Pฉbฑ„QํUภ ”h:นขF‘^,๕ƒฐKlb[ม:Ih‰˜:9ฌžฎสlwึ๖ทหt๘FŸ‚ี@ห$๚BไaซXI.$wชcBXงb้iRษ)Ju*๘Š ˜โ๐‹`(๓โร F`ๅลx้Wอถ๕›ฃ>€m=œ\ฮ9•มฑš fะพKหณ๑inX}€ีาาrm˜Xื\sอตf๚๗พท`มg,,,,,,ฌใ X{{FุŽcคQ๕๔๔xฤwภrภrภrภrภrภrภrภ๒ษห๋u [ฦฒ๑ๆซ€V,PˆGpฒ๏`ต?๖3รพiฟd"Eภzแบ1ลJ!แF๚Y0ำณcะ‹kิ›ฯษˆฟ`แƒปภiๅŒ‰V๒หaศZ13–3›1Ž=5WฦvKH$ดหb“SQ˜ฦEbCฤjZ%ฌCง+ `สh‰#เ๊‚จ„IuB* ั+ไHลแ]ษ|+5ถ"าโ้ ]s์[SF฿v‰ฤกฎbL$ LสOโ'dำ\ศX\มไ$‡!™ภโภ. XŒปแ]๙U•๐+n$Eอห€วŠcVร‡hr:ยา?;žฎ1`%4ฑ Uๆ'%_(žaฌ˜eลทง๑ศนP)+ห—ำ7ZnซะRp3วc\Mณป๊ถŠ–ำ๛ช†aš๓Wฌ06Sำญ"`ูรฉf<๙™ฎL‚^;ฝLxJ๛+๙๑จยี‘TญpSๅqˆˆ ภฒฉxoฯŠ—žTง๎V,ดภ‚กa ฎVฬŽํฯŽัแฬปฒDลฏlำๅ‚_X#›Z{Fุ<เ:`9`9`9`9`9`9`9`Uฆ=๛Fุ<เ:`9`ณถc๔๕Mw_ทๅ๖k€YJZXˆŽๅ;ž+}อำ?'Q๑G~y_*ฐBœxˆ ๅC"„NฒXฑฯ‡…ดฦAžwฯCทโ[เซ่ึหะง3ฬฃQ…คึ€ฝใฝคด8ดj้ €๖๘ซ‚lซง1&…8”#zaฃsk5ย 4ฏภj^kฝฒ8/‘)DkY™rค้ภO”ฃŸ;‹žX)‹™๛ฮต:ลSฒม๑gE/ ฌ8Fฤม—'ศ(ฮ ฦu’jฃ ม’ƒ7_’HNฏ*†๑‘YKfเใ๏_6ณˆ฿–ดlMร<3ธ=ดTHfŽ”ผbt[kpวZช)—š])‘ุ’ˆt็NiผpถT_e †€ตyyฎ H,ณ.๖๕\ฺจTฺท่ช๋XซŠ !2ฒ๑6.ห’V”๖^ใˆBะๅ>4๒(ฯUxXฉ4ย4‹—>Wผยmฟqแคะฏ–W:rPญ๐Wจา|๗ตYฬยŒ––OXXXXXXX'`ํ๎a๓€๋€ๅ€u,๖ปŒ…G`– …๎๘6ˆ!0KณSู-ฐH?่ ๑ˆ๕ัี2๒กŸUภboKซัรO™GO;.๚`Y&ภšŒฌBHฐS‘Ž\b้Ni#Dqภข6ด~!J๘ j šdแ\๚-ตจภยcฮืVO๖dฐdณิญวRE๐Jn๏๒ฦตs™๛O๙2kF[—1์‘s‰@#สhFฒiK nCˆpTฏ‰-ฬวธHKEโ๘๖ฐศXูไ}ูL‰ฒKfุ tฺฒฦZ/rฉปX•™ƒˆ…•๊+ดi.ถd|๓‚’tืŠGAคVถK7Ži`ก^-kำ’J‚ผ-Xo"o?]aธ•ฮ€z฿[/~Bj>N5rำ๑)ำhเU@`๋s(IฎK˜F!ุ37ซnj๊เŸผเฉ๓า็Lvาnช@T‘ซย๐E+Moทcb๔_2ฅซ–1ท์~เf6ฬฃ)i9`9`๙ไ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€uึฎฎ6ธXXวธญ่ไŽาbcถ{ห๘๛p {'‘พฯ–๖•ยC_ุ€^X`‹ช“_ž@ษ@‡m `…พX`kฮใ4jฉ๋x{?ยU3ฦ1ˆ,ะMใ`ˆV)๐Fl„t•ษƒถH~Š#ฯIฌํโ.“แ j™อ่ซี่8P?ŠƒjMnิ%ัeยฦ‹ก๘‘œRฐ'ฤ๒!ธ’ขt >™AoC`T มO3ฦi0“™ไฌXฃม/บi‡&โl0อท๚ ,ŽํO.๐Y=-œฃl2‘ท†๒6':’ขšผ56g—งไ๔ศ7&3ฝ๕ี ฉฦy!RTฅRsํ5i=0J”๙–4”A๙†7C=ภด4ผa>>+tฅ๕ฬิะG’ZHg|ปูx–(‡๕U„U๗่˜0{Jึdม๎™xz9–"พŸ[vd ํBตฬeฏfKฒฺ ๋ษŸ=~ษซอ–ญ3ศ11* r\ลไ<๒ฟ,–ސQ5XX>9`9`9`9`9`9`9`8€ตณซ„อฎ–ึฑo๋ฎป๙๕อท^^ฅ7|๔',๔}”ํpk๔•่=ํฤb ฤFhตi@ฬ–ฤv๊ฆฌดBํFŒํˆ๐glฑB6ห4އi๒$3๖๏Dย™ฦUO$,ฑ9†jถิฉผd๕ะ|ZNาœ“_ฅ<_DP›˜G)4G—Uตข@I3Oฃ‰šำผŽ ขย =$ณ8ศ๐–+ถ€Prrงจ!’ขฌ”C๖R%‘Ž‘ษขpŒฌอ่้•ฺs”#S๊ฝย+ฅจR€Sfชึิ๓\nOฑ#น†Fสฌs…ˆ๕ต&ฃ"WX๓ตฉ|ฑธ^hผ๚qpCB.ฺ๋  ’แ฿{Ck‚JแลjฦŠงษ ำ‹•MEVฬŒล 7.RO-F%BuMชฎ—Œ† ฬaN๑4๒Th‰ษpซ[฿ฺŠ[‡-g™ิแ( า…!B๑N nขJThz๛ัฆbXด_pฉข{aZ{e,<:`dฺัู?ยๆืหหหหหหหหหงฟVภฺฑcว7xฮ9็|๙ห_~์ฑวธpฬ˜1ฃฬ๔๔ำOŸ€…ภfmผ๙*๖}์™ไ:๑At—์%m ฌ€nQ ‰๊ฅ@๕$D็‹๎X‹Hโ=C`ฉ6Š;รฉ ฦ สม“มCโk’‚๐'ร๘UำYขœ„WU$*bdU฿žุถbx๗ึ‡‰ฎ›WาeTฌ+’(ษฃย Dช gะปAPืฮีtf›V+ฬXP3฿นˆง FชฦๅJQด‹ Wส6j‘ตืŸ `iš|ล!y–Z)RN@„ˆMzข’นGAฤ ลพHfบฒข’f›Z1tผยŠ™บฐ^OTปŠ(ฮฆ@Tอr! ยšyr๕ืฬห„—D[3ล‡ŠmRพF7๚ำo‹œฎึฌค+๓มs…œิ์วTภฅJ(Eซา Q’&9U>2R›หฏ2n‰ภึx$‚gั„PงB้Ÿ-™ษhTซโhJ;“ู™ุŽNcว่๋UdใSฌ‘L;๚FุœNฐNŽ้๐แร—]vูw฿ฝs็ฮ œwy๘ƒd6|็;฿๙ฯฯฮ4 9`9`9`9`9`9`9`๙ไ€๕กฆฝ{๗y็ƒƒƒ|zmท=๘เƒ˜นไ’K/^|ขI„lkฎ๙`ถ;พ~p๏ค1” Rb๚MVหiกoe?N.ก๛%%+"Da+Q้ คDˆžW๒“ุวdyfฌใฝ่vY`GœBฯ.9๒มพๅข ฑ์1Sใ^\๘๒SิP ศ1 ‡PฏT?dx ฑ๏ฝa"ฃY€Fวค‘ฑฤV4dXณฎณ” J }>็ฤXteผ)ตž๕Lฐr๒E&ฤถJeฅ.ƒYั}ิฺ7„ท0ฯ][Ejถจไ&ภฺฟ๘kLZ2…ขx‹ตQ0$QJf/๓R^น`‹jŠท สNถโd๎ถY9?ฺ ^F>ซ< f›รซ็จB—!ฯึ ๗CผสiดDfwuฉ"šeG2ฅŠzbไahR•ะ|6าูๆaฃห๛™วำ*็ุRKEุ\V—;!\๚ฺ์฿EAูฆทง›฿;+Eาข‹xVGbBๅ?็†6.Tฏะี R'็คฌm}#lN'X'ู๔็?yๅส•^xแฌYณ๚๛๛Gต{๗n,,,,,ฌc8mi๏as:qภ:ษฆ/๙ห€ช[nนๅ๐แรซWฏ>ํดำ๎ป๏พK.นไ๑_|๑E,,,,,,,Ÿฐโiบu๓็ฯา—พ4fฬี้งŸฤOlฺดi๊ิฉŸ๔ง็ฬ™sโฺ๊o}m W‚ฑฺง<„cุ๓่O่€ล…Bศฎs๓ญW +,ึฮEฏไdkํsY8Kฐ5ศaŠFฬฏ ๐$^Y!$ ฏ'~1!ŒีeGฌ็=ล„ &91=…  cC‡ใ1MงRr.ลคrLขิ๊9ฬยษ C6 ฆ^ูท)ฒว4,ฎfไรhE&`ฉ?VทHO#ผ7ฝ1&EัWฬŽT+mcˆM ZJtล+ย™หไrล<ญด)ฤW‰ฒ๓žึ ๆQ‡6`ำ"K็GfซwurชX9™6ัJ‡jพTšืjŒ‘ํŠไคฐrฤJR…uิ๑qสX ณJ:L(vเญWๅ๊ฏšG}6J{*n…›สS้ฯRzณลœ3&Šqใีี๛ŠFC/ผคฦW,o‡|Wช!`%ดฒ๎hธ่r้_ยBผตผฒ_m|ีบ]ฐ์๘A&`ชุ่‰Eฦ:qธ๊ฌอ{๛FุœNฐNส้#p๊เมƒy(,๋ฦo<๚;;๛๑Uฤฺฆป•m๛Oฟ]?a๗ ุ>mR็Sัุฯ๖M๛eวc1/ลsฆ<ดwา˜?ป rฐu๋มอหั/ผ[ถฺฌ)hƒฏN๊Ÿ๙ด~G‰ึฏNยF๐ดg๚๘๎g~mb;ุๆัน๒e3ฅะ์ยiX /ํลฟu?๓=ฟพ๓ุr฿ŒGัj๓Ÿ9ฐุ…ฬฯดืŸุฟdฦทg!4".โํตนOํ_–c+c^^Zท@ผCC;ธu{xร"P๚ึซ|ฏ@–lZ&!+ผ๗†”Lูด3า๐*๊9x”ๅแ๑Pำ;RบyM^ \m8๙Ahแ449i›—ce9ผว~0์N฿(9ๆoLว‘๓>cnXˆG.ิyด๙ฯ ษ^๐^œ<๗ดผฎˆฌษW๙ž๒]h˜g ›Z๒>ะ๒Wuƒ๒๘ฦtœdน@xd[6S>šฮฟ๕ช\…ๅ“ฮœI4=ilXbO&Wฦ™7rx9า<ฯž์g’ืฺถฐ2_โ-ฤ&—Gศ๙ฐุาป๒5า—p๏ฝ)Wร๙‹ซ้มผ=+ด๊อ Kp;๑sูฯเa ฑa…p็ศ-๎@ษฉ็ฦ‹ีธ_=ผิไ.ฺบ/‰™ศ&9T|FlDNT•*ZX'6ฝOะp•— \-๏4แปฬV|ต๑ไฃ6|m๑— ๑ฉ—ภc๋ฤ1ร&฿๚ว~&๖ล$;ทูz{k๘๘x86๎€ๅ“ึ1˜:;;ํฏS[ถl5jTwwท]็้งŸพ๒ห?0…ห๏Ÿ|๒ษ'ŸŽ>m;ยๆ็ะ๋ไ˜y็ำN;ญญญO_|๑ล‹.บh๘๑ื_ฝฎs฿}๗~๛ํ'ิ/X›๏ฝmรบ•ฟ`ํู=k`4บ็ัŸเัส?ู๋ศCณฆ`p๓3-๛๒7’YS^mg7๏ซ?;๑งฌž้ใ๑ฎส๕๚˜็[๘3๑]๓žฦำƒวๆ}/Ždฯฏ๏วปXoฤšXปฦผw™;”ย฿ไ๐๘รฯฌ)X-ฎ6๙ซy-[]Šฟ`ฅ฿Bไ7,ูบR~รXฟX~?เ/.5B’Yท@~~ุผเึ Š?Gษฯ›—s›๒B๘แ*v๕ฦt„ฝศoฑษšแZฒฤ‚%?-„ ไเ_"ค?8อ†็OM"มฦฅธฅ/ฅKฆฟฦ™YS†ฯŸ1–ผTT&ฟ!ู?๘KI๚(Vค?hู฿l๘‹NOY๚ = ๓ๆW%n*Ÿ๔ซคฅ?zๅƒฉ-=ตoฌผjึ‰ฟ`ญ_ฌ?_•ฟu้๏X๖Gธ๔KUเu ๕—งxแZ๓ฝ้็ซโ79ู—›๊O<$ฎ,/S๑3•\,ป0•็๖ญืโ—้ฐ_aฬ๓'+~ญะ8ฏฟ`๑ว้๘ V๘‰š?bล๖ุฯธํ๊dหหง๗z|ฐพ๙อoxใ[ทn]ฐ`มล_๛฿~ํฺตgœqฦ๏~๗ป;w>๓ฬ3gžyๆ๊ีซพO>kอ5ภl๗7_ี๙ิเะkำLœล%‡ทuหฝM‡ึ/<ฐ๘yqฅฒ9ืำฮYํ)7Vฝž™ฒฝoสบ&รW[ฦขe ™ฅ„W›|“:rmนfbmบๅ›X๖ศ๗%a+๘fแ]ฬะBใ6ฅˆแฉ๊็หจอ‡๕iโี=i๔ะœวcmAบฯW-šฒู๗ชู๘์ฝิไฉ>‘(™kึผu?Šy3)ื‡ษL๔ตวSImV'-E—ŠF๎ูSข•ถfธฟ๐3อmMฌEุฐูฬ*BM%gฎšjถF45i7๊ณ•s๐CBฝ&JkJV>ถpดัl]›I`/’อตฤ!O†๒ †`DฮL,›฿ฦ๕ณฯSสมวc—จ }2ˆ*๓๗iŸ๊๑ irฝf‹๓ฝ)ซโื•r๓+^\Eส๊SŽ7จi•lฐ”…VIำ:›ๆไฤห‘ฎu้y–lฯrำ๚•ลพฟ1\•’ู;' ฿/6>ๅH.ืง22รwฬฺไjš~ฆ^9XฺzGุœNฐNši๏ฝทvy็๗…/|aาคI\8w๎+ฏผhu๙ๅ—`†ป–––––ึ‡™kํas:qภ๚๋šŽห(B๕kุ๑ะeeธ\p'žใVฑZ<ึ/”ั|กฏG|ากj`ญYฦp :4[gˆEว—ะแส`ขDHh !)Cบย<xƒฅฐVX ah๔šงƒCด7รฃฐSผ‘oวrฒQฎ—gฌr\dEถe3ี๛ป2บ>ัU–ถฬ‡ w:เKี (ุพเฉฒฺสWดโก ฃํ8วอC|KHdฌ2jVƒซ ฦŒ5ํa๐ลเgKห ฯ%/ฺšnDณ&๒ฌo˜v*่c|โุC๒:๕งฑ{y์กŽ ๆ๕Š2y๔Ÿ™ฑ•„‘ งโEn*0Z็Gฯ3ฉƒ.๕๘๗/™๑:‘n4ผ`๙หP ฐ2าŽ‚ฌึIฌxL$ฐหฃD_ž`วขZ;‰๘ู๋ฝ*า๑ไ ’u<ปจ๙zYจJฬ‡šถ–4๖@Wb๒>Q—”b้ฉV)ตๅJ๙๏ผเฦsหห',,,,,,ฌ“ฐึํ้a๓€๋€ๅ€๕๑zฎฟแJ4–&lŽaืฯศT‹ธQยS๚…Rเ“ามหJAูS'คPqP&qE„˜๓8พ๙ึซYqำ-฿DรS`ทฐแฦo์}=๘‰˜B_M ฑ>ศxb™3๊ CฅF‰F๋Q่Xำ-ีิ"ƒZทNขcฌ(ฺrx:oๅB๛ิ )XฑZb.่ฦ‚Œ3ฦ‰wQ)5…b]ยXษ6)‡ฯ„GQiMฦก๕-.o$f3าTมeใ ๙ิ+R๕ๅ฿ึK„$0ๅฐ r้~i—•|,ณ/WpbใiแRkPqฑš=EฬfMn ฏัžสฺAฉlGใ4ข’ข'๑ยโฆฑTUCฒ#ตก7ฆฟฏ>ซมSMO&฿ีบzญ“7Fก๋GKผ วฌฅิšใูH•4ตฺฃœJขบBลล*9ำึใTvJ ต&ญ"ฌhลyV฿ŒGyoจ,h@+๊S2Šุ8ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€ๅ€uฤiํžž6ธXXŸ„Pศ™ ๋V)nปใภข ย0k\ ฑšJfฑฬฦE˜กามจฉZ;๗๎IฃY๋†›<งะ€w๚n๐CผJร#x Gยท0 ‡W๑v๚ย‹ร๛ณcิฑh์l /e๑eS"๊…ฬtm.ฅขiฮี\์์ไ‚โƒ…ฑrN@%สFจ+ฅย>Œp ฯัดิ็‰๏Mฑ™ฒT.‰ช ฑX7•EบzŠ2’bcภš>คาC)้JK2ำiฃomปKฬJGR้LMM„ื,๘ยu๐QVผ Rฉ’ฯญ%Zฅพ?oจ#Tgมau เ๒ๆ>๕>‡wXีUฯ31ซ๊_,sณ•สมW)3ืม-ดbf6^7้๖1ฯ…v;Y,tแ[•2J‰ขpีิ„]EaฝพT‡Xช kSบRYP3ู-xล๙“ ญNvภZำา3ยๆืหหหหหหห๋Dฌ_๚ื_|๑๙็Ÿ฿}๗8pภท–ึG้kภ^ซฎบlๅ_ล#จ Lำ2ๆ€๘=กฆU‡ถ.cสถTIคฃ๎%๕ปyม]๋๎\ ๖1ู่1่ฒ๑vŠS่อ7฿z5CะฑXŠบแ๎nฦ์+ญˆ}๔•`n>^ล:่0ฐMD๔ุSเั๛7๘&jx5rุ+YPJํjฺr(๗+๒XttฅeๆRลkษUOQ‰ANr๐gŒ‹QS+๎ฅยฝ1ƒxdJHญpUฐ๕wญ๎รHiนงBW้]AU าชฝฐ^‚Gย]๓Œ้:ภR”ฉB^=fEe๕๒าL‘„ูหเW%pŠvM/2๑‹†าj1 6k ๙ฒขฑู๊ Jwฅ๎ตaรฌvฅข”–ณ้รภบŠ•ผ๕Tซ›๘ฅ ๏C๓ํQู%]@็uิ2‚สX?‡uš{ฆวg—J8SPo…zPห’ชCึONด:ู๋๛Fุ>ฺ~{์ฑฯ}๎sห–-[ณfอฅ—^๚‹_ยท–––––––ึGฌร‡_tัE/พ๘"Ÿพ๒หX`ื',ฌ#๖5@+6k WRฺร<ปWbCzp บiDI฿๎ุ5ิ฿าR ฿งท่‡ช๓ึฑ›ยS<ค˜๙ฎ8…%๔’ภเK๘๔ํo\Іร Žน็ก[ฑS Q^ุ 6ŽฃลI˜ั}”‘ ๑ŒL ะC‰p๙โ(๚3ีนQL/8ZวQกดฐ&‘‹ฎ˜Y}vŒ[$‹Hฅ470 [PC‡ัM‚yฐŒ_hัT)]RnปอXg ณšFชod๏ ฟ~?Œี็‘ำƒTชhฅฑ9ปศZ9๓ชวู4๓†Y๐ฦaมjyลสึLกP*ด"EV6 าBHM‰THใ6•tน~:๋Sgฌ๎™NU๘ซธEิ{Lต„ศซฆŒ‡T@m8็z* _Y^qUxต๑Vัแฯศ/ภ, ไ*ฅจำNjบ:ฉke๓พถฐำ7žvฺiฌฐฐฐฐฐฐฐฐŽ `อ™3็ย /\ธpแ7พ๑/}้K=๔็`9`9`9`9`9`9`9`:€ตbg๗ัทฟuำŒ3ฮ>๛lะีฒeห/^w๗wcฦŒ๑ภํ€ๅ€uฤฆ4ณ๙p ธ€4มB`&ca,d:S ๘ˆ…Lฅ๋lน4ภ บi&iฒ3่๋5ื„ฉQ่šA†{ไ 3X9[5‹๋p |หโKพุt๗u/,ว Œ:x๛"1-p$8Nขฬะยiาง,ซ*ะdVวv๗W%ฬฃ์0รูS˜ั%ตƒฉ}6abqฝTJ‰3ิมzึf&Wฑ๒ฃŠ{ีๅศฒ9Œi76KSme๛f<ช#ฮˆbjำฯLฏ์ฺŸrฐ4๊kแยJ๙Bใ๖žsงlMCMฅชฆU,พ’รYhึwญช์ฬ็งšฤ๋๕'*9L"+้\๕&[šCะ*•^๑๑ ฒซeฟž อ๎สŒฅƒuli]–•žI …vไ`ร†K/]ฌงŽฯ๙ิhาพ›xตhW้rฌใXKwtฐ}๛@จQี้ดำN{๕ีW1๓ึ[o้Zgu–n,,,,,,,ฌX งๅห—ฐบบโ{ทnjŸ๚ไ€ๅ€ีธVถ๗ูืƒ(ีzXtฎfY”ก,H็*๔์ˆ่๚ั/ำfV์x‹เ]9ศA…hุฒ๙จŠกโ๘ Wร฿ฎผโซ๘ห/๛สŸ๛<ž1ร€รภŽ8๔@ิ ‡ัอFชZ6ฺพ’C#้๒Eภชฑxถ; &[XS+โ‰๘8ํaŽjิz‚q€ว‹…ญi,T!2ข•R‚Žฃ่cŒ>dณุค๒ฦTซยุจ(ฑแฑฆKh1จฐRษง {U่สเZQeฏ€ณƒๆช๊*?/๏ภยฉ`Pฎkึ๛vๅ!|๖ฝ“ฦเซaนŠ_4ฅซSงN%ภzc[็G๏๐๐๐C=tมœ๙cฦŒมSXXXXXXXง`-ฺุ1ยๆืห๋„๎kศXDเ๚t๔ฺ@+4šถฃ๗| ๗uw=˜f!ฉ$DExŠa› "6IZดfภ ฉ ห๑ฬ`ี‚๓/ยใ์Q็๑|ภ,,!x‘ษศ|oพ วกnนC‰ฒก"›Tฃ Dา้ฦฮDczฒG‰เชTiแ™(ฃฐ‚ํหv˜ช4˜๊20[ƒuˆšฯž๓ฒS(ตPธu“จ๘HดโฦตlS›ผLภ๊~ๆ<๕yทD…0ฯ%‘๎]Ÿช}$B Xกิฆมฒ‰ฑชี๗ ภ*ฮL=cูชทฮ๘ Xxไ๚Vaดiใ•L๙jŽ?ฝ-๔ชฉฏ}v„WS๘”ณ_๙เฉคฃา[…ฅ EๅTw๓’ž^}m๔พRฐถR ึž_฿Oq_๋า่+œ์^ํXXX>9`9`9`9`9`9`}จi–๖6ธXX'z_ฃๆŸh่ ั#ใq๓ญW3ฯ]-x"ภ๘ฤƒNb"<๕Dt่อ™ฎNb#]คd@( ‘˜มSm็Ÿ{แœำฯTฎUฏฟgกแ)ณT+Dรึฐ:ฐ0"%pฐ^BƒทVๅ“ว ๅล๚ƒซgใQ๔>ค ย_4œ1N‡มำฅ“จฤXˆ(ศ‚wฑ!ไh๋?Užcdตึ A›Ÿฎšฃfะซ0D1ศ2V๛c?{?d:sกˆ†ษƒT#47ยำลๅ€]Œฦ%ีฺ19•D๏†ส ัญ”Vิ ไฉจwฆP$RœขQ…ฌ\†+Kภ|uฏIEแRทฆาmฐฦ7ทM<ฃ'Z-R=cyBฌวlไ*MW7† ๚ศ‹ฎxgมK_ๅแYคๆ2ไ*อ^็ีg;IŒ"๗<๚|=๑ฝเG๘šจbx ำ•–OXXXXXXXXyšณน}„อฎ–ึ ืืะ"กกwร๚ฎD๏ T`กงl„XBีŒElœุˆv ดx@oฮ$wl\ฅ*!žj>ปาๆฉ ‚Ÿ€Sฏ๗3ง_Ÿ๚?ํ๔—Ÿ3Xฏอู–ฑHuุ6ขข‚ะ‚็ >ตyO3.โ€ฉai:ภ\uhŠƒk็b†…q€JชŠšนฏไกฺโl` ื‰– จj|ช์ขv#jZจถYภRิ๘ช๙หœ:ุห XˆตŒฉLื,xส ีึๅมjšส3ฯยเ สI๗›nฝ๋hฬ"HY จŽดส 9I’Sšฑ6 ๚F๋๏`‹Ÿด:)^+ {ั[" TNอgืr=jำZ0™Tฎอ0lD3ํE'`ฉ%ŠM`็SพJภj๘ ฟฌฟบrภ๒ษหหหหหหห+Oธw„อฎ–ึq๋kGaฌ†หตB3Pเ“ฃCคT7Q ๅ<ะ‡กx ซฎบ PE{6ตiเr*ƒ˜Y|ษ™ีขz้>€Eญ ตaXย,x-่€Š่ไฎI/– ฏ2G™i๚X„7)าLGEฝ4>ึ๗5YE%kp@™Lฤ้cใ@๗qw`MlExะศpŒมชฤV4Xช๛hPึอก!]YฉจPˆ4ฝ eง<คข!3m,๊-(ตะ ห ‘ุWฌ(ฐฆd‘‚ข”–Œแ‚VฅtตŽ™๘Lซว~Y&จๆo˜‡ŽะBYŠ A๚fš>–Unิ’ๅMX Yี9N^J\‹ eฬ`ต๓งฐภj4/ลษฑi้vP=žnญงถฌย[’ฃ*กอm'E!ฌถN|ฟใ‰ฑบ&ณฎl๕Dอƒฆ‡2„ญ ดDดUตlŽนี์,ฉ”๚`5พYด’ไnAวฺšŠ7v9ะVC๛ึย๙"{–ๆŸi4ƒึซฑ(\$"…ิkัJ5ฌ2ซฎhฎ+โ 5%<้ๅฃ/=ีaE1ึ)OW'5`a}›\,,,,,,,,,Ÿฐฐฐฐฐฐฐ>ฮiๆ{ญ#lpฐฐNฌพๆรPe W*cฉ"€|TZpEสI@,N~ึ\๓ุ=฿™}ไยSDcB้ ,ข๚?ํt4ถ ขRฦโ@B่‰… ;<๖…ใvวทลˆ๋g๗เณ7๏{9๘MSUd่_๘Œ[๔ตŠ…๊‚ล‚7ุuH™ ๊ไ‚&“ัˆa !VะZ„#จŽเc4ี่ฎkZฬ:WiHถtล#ัะหษ(ห,๋๓^–ฒำณ8ญ๐ส*’ร˜ฎT)—า2]%๎‰(จฑ{–HXlพษวชทื*-VsฐึvฌOq++S(uข=บฝšvกาs‘‡—ิศŠลซ‰…šฎวlBŽdำฑŸผพผ่๘&โŸ 6๓jvลKฟ็ัŸทช่สห',,,,,,,ฌ<อxwฯ›\,ฌฎฏih‚U_ Œ…yD5‚ม–ฏมr4–TT๗€@Q:`3D.4ฌLrฒcOฏอู`ฉW๛™@+fฐEุ"fัK‡R‹คˆ‰-sฬ#RXแ†ฯพ;iสEKzบา#)%ˆeTจ`ศjzb—ชb ‘ˆ!YMญศXV"DภjZภย{’-]iล@ [vYa๘n๙ฆhZyปฆๆจโ๑ 3ˆฏ๏3$>ฦ๑ƒzไส[: c ๋ฅ+-„งˆ`GแUส๙%ฬRึแูr % ญซ;mษ๊ซ ก8hฑ(X˜ลงมƒ$$urวฃŽด๔ฃ€ฅใC้hฅ€eตKEa5Uท˜Eฒf7n‰WŠ๕๕ฺ)vว ’a}z๔+๒ขi1AˆyB๎g| q“ใ)W๓๋๛๑ู[ฦ฿วk๚ืƒV';`ฝฐถe„อฎ–––––––––OXง:`ลK œŽฤHž๛KใA!ˆ ู่๑^`ๆชณ}HTEV;จ ฏ\ฅ€ฅš ึGc†๛฿นภWU_๙~๎ฃำ:3ํดvz{o๏ตญ&„W‚@Š x#O(Dซ€ Aฉ( Wˆ(T cUEฐZข"‘GDๅUคX[(ฐฤHK!9X^ิฌO๙1‰ๅ๐ไLฆ?TพOๅ๊avๅŒพ*W7D๎ ฐไo$œ{/xฏ. ฦGฟฯ[yฆRP"ศ๐žXวฬz๕บง๙พฺyt/๘๐!ฏkษลgagฌฤ๖W!ํ™qŸน้ฬ Žƒ‘DgdnGๅ1UVฏlฆบ๙๛ร๛ฺGnJ๐)&#๕์~ลtอ=™มb ‡g{}\<ี๋e์a=ป›FอVึS{b [ixิs|^ย๏}AŠ<‡ซ7@˜บu*MๅcŽuธ฿นY#จฮ n๖ึJ๘ึuฦ๒Ž!e๗%<<ฆ๒ภฐ€`X€`XIXฌx?ฆp,+Eพkพพ)?๘Rฃ&ส๚ฉญƒบŒŠŸฐ”Tโฯถ#ลgโ*-•๔™s์ƒ*‹mFQšขว"ก•๙]ƒ,Yd๒Rw๏ฺ ˆe๑Lฑึใhุมkฬ0lง‚nXaท;›—NOXœvrrFQari=r๐\!Hy๒.lฒเ9 ๐QEYsว๏ไ—…๗๊{๛ๅ˜์Hำa+lR*ฦRุ๖^—ข:/ฤVNำปก๚ฬ?ฮL_e #›G ษร– ž†‹ดZMlฌ™๔ฦณ“Bฝฃ๚&,๏๊ฤฆ?Ey๎OOํต๐šมvšฟe <_ ‘ฅ<ฬ๑ฉbo#๐ไo8d>”>่NW"]”๗U3%ตดBฬ๏–ึ้๔ฉฐ,`X€`XVึ๏_ำธ€Uํ+4ƒ!5 fij,3us0ณ”I„E&ษQŠะŽWแผ-ํœEi™•๖=œ"4๊ทยe e‚ U าcgุน@ฮEใ๙5Ez/+VHณpFšฒw๗W๑u$๑็อผงเฤฺs/iw +ไ'oA้Fํ"8yภ๖ณ9]๙“Šฎิฒ2|บ ผT฿›ฏjง]ว—ฤ๙•ลC‰ี๋‰ W#5ใไ ป!x7ˆ–ฝชฟ>>ฎ๔?ํ.เW์‡๑.ฬ?†u๋aั็๙๖a ำธฮR‘๛ธF€X+*าอถ๔Dญ–ฮRn‚ชศNฝฝคภฐ€`X€`XVฉZพ9ฆp, ภฐ, ภฐŽาไe›bภฐช5`นfi๚B๕มš๓ณฺb,•0K…VบQ}CุาˆถTใ๘%ญš‹ ก ฐฟk๙^มMถSมฬQร๖hล _BศP,Tๅฯๆฆ(ซ^S ทšสP7ŽVŽbฅสŸgA]Xzฦ04:ฯ9N…7๚๙meงฉ(ผgPัW„7พลo@๓^Jaืซ๐uNอ,B`a]šฃƒ๚0)บ+„ทF๎๔Mวฉศ”‹~ฟ^คy}ุJi,ฒ้ี`:Iiี๔ฑ^ƒ`‰ษ๔2ย†XๆJBUขEะ*$loŸฮ*่c๊Œๅ ฬผๅ•Yx๓ RI–f้Ÿณd๙ิXฐ, ภฐ,๋ิึคฅ›bภฐฌcุ์Ÿึ4ภRk+&ฃ%c&ตฟ๒uข๗…])ฯจ[j4ฏฉ ทŽlพฟ;,}๏Ž"*` „#ขOงhgf‘Oว๘ญsN`ถ_lกภฉ ”๗ISูๆูใฬ›ขœšrpบฅั'Sฤ g—K,ืศญ‚ ษึKื๛ิoนs€2คถ฿SKภ Iหป‡๛ja&ัฏ฿Q่„็ณ"ฺวจู}ไ@Ÿะ๛H9วhดำ[็{๒ฐ๕y„ษ"]ฌด.>5ํฎฐœร<™ษ†ทj=ฏwฬ<`$! ่ร'๖๕K๎ืXkฟ~b๏+ฟPฆทฝŸอt?ฌฒษ๛ฉฐ,`X€`XV%๊%๏ล4.€`Xๅ™wรRB0์ฎ^๐๊efK™ฉฝ–๗jทPด๚โถฦส‘)Y(RV่ภ๓“-&Y0๓TNุ€สBฉยžฯ๛ๆz์ด˜ฝฒXszะ ™O_xุž4ฐ์UyฒG\ฯF๚`ฉqWxŒฬƒฑธ*์๕eŽ ฐlฟiKฆžTฒšง‰"•๕a๋&O&:jCUm+](ฒq$๒นผ<[FEำ๛๋ั p’… D‡ญฐ…zXb<จ;tถฐ {Y้Uy“t?ภณฅ‘2๖ฐuY˜๏๓ อีCปp(EZš@ำิLhตฎ็…fo€•2€5~๑ป1€ `X€`X€`! ภช c…Uฬพฒc›ืr[™๙บญผqEŽ’wข„าf๎7}yx2>–E>ขเO…สC…a2œNa›ž{\ ณ?Mm)zะไt‰๓ช๔[ุก|V€ฅDaHNแ“†ัa1„กHีsUใฟu์`• =ตไรh"จยAุด"ย5˜'รnๆaSƒฤ^‘ณ%พฟฮ!๎$›๛ฮฒšo๚ุHแ|8)คฯ!ู่=O๛ ๔+ผ}แxำA†@ูˆ…ึึ5p”ฐƒgCจ๒uuIฝO}uฌq‹‰i\ ภฐ, ภฐ, XV•šwjpภzต]KูŠ [kล‚ฅ‡4‹Uฆม– H>…œEJ;F $ฝ~น,๔~“Wๅ%๊jโ ู รœš6- ฑ'X!c)(„ ๋ 5 Š฿๋Sฅย[๎รศŸษ!†ท๔{ุ๖x์Iฅw‡ๅ›๛o่ปฆหๆ~ไั๐yัด๔#ytU‘Rnฯ|9%ฮ๐™(าำU0'ฉัQ/ฬีFบDฺฅฺ๚ว“ ฬ}[z์TŒ๏wxอ1 ารวไDsด 3zž–๕ผฐNซ๛"์›Xz๋T"ใ๊๓ฉฏ&€5fแ†˜Fภฐ, ภฐ, ภ:Jฃผำธ€`UŒญ์ุFณ;‹ท”`ฒฅ*”ีฆaุ๋ม™์0A†™mฺa*‹๖ป๔หชžฟ๙kป(Jซผ^ฬฎIีS  "ธฆ๙B ณfแดอ๖จขฏQ”ƒ”๓ฦ!78`ylVภI+ยvช…ทใXใTJฝนƒw% _•SNค^ๆ3;9ฒจ?| วDภKgนŽ€ัG๗7๗?œp{xGBa‘ข๕Hขฐ์1้ส'ฑq7ฝn=gCwผ_†“‘qQJW\ๅฆ๑๕ด2€`X€`X€`XๅiไทbZœg/))้ัฃวฬ™3}ฯฦmOฝz๕Zถl9iา$;€€`Xี๎ซึขฮ๊‹ฺR—qภ‘แ)xŠyสN ญิ๔r๗ไ[XžCtภJdฉะ<7d+ยฉถ|J`T่]qakๅLm]D–Nๅง๒ฤฅ๖ [๔DNza฿˜ท–p๖r’ะธ„ย‰ฅฝiEคฃฆยนำmN3aืฯ`jๅร๛†š๛Œโ$ผแฬ‘ศ*,<Ÿ่xิๅฏมง!rภ๒q”kLฺ์j8Œ•์ึภ…&nv๘๊*XVVqq๑ˆ#าาาfอšฅ=๛๗๏7ฎ4hะ{๏ฝทpแยฦO›6€`X€`XV’ึ๐?ฏi'๗ผถmหหห;๏ผ๓4hเ€๕โ‹/ฺๆกC‡ดYXXx้ฅ—ะ, ภฐ, ภJ2ภ๚บ˜vrฯป`ม‚์ฺตหหk๛๖ํฏพ๚ช3iาค\:€`๑U[พ๋ึ?ฟ1Mถ4Ÿยนญ์™2D๕ajึๅท˜%šรลT฿x1ฐFaยณล้00oธ๕z–ํc๙ ฝหIKA ฅ๎l5ข2ฎrภrฦ๒คaฤfุR”็S`}CภZบtiฺัJOOŸ?~9€๕๙็Ÿ฿xใ™™™ซVญ"šX_ต€`XษX็ผำ*6Ex่ะก๋ฎป.++k๙๒ๅ„r ภโซถŠ}ทซยmอ{จถX>Ÿํ๙ใw์›y๗ฮ๛o๑าu#ฯ …+žcาแtužัs๔‘่ฯ ญฬทnพ.’{R๐v๊๒”ขืbGs:กฟ'ง $–จ‡๙5ฯ†ษอฐS9–ˆ_N?ฎ iๆฝ‚›ฬMฃ~๋„!ฐศŠฏซธ^๕บแC๎W„จŽูฯ์˜ค%_์bฺ(W9Z9`9uiEวจำ›ญˆฑJ็ึไS`U2` 8ฐ^ฝz+Vฌ ŽX_ต€`Xษ X7ฯ~#ฆU `-Zด(--ํฑว๛ไˆv๎I@ฐ,พjซุw ozฎ์ุ๖q7`•ฺCm฿ํQ;ฬƒฎโn˜*R@Š9o…ฉ4๏ไึM—Nัxำตฌ0฿ไSN:ฟ™ํิ๙#}ร๕ย<ษ่ำแ๙ HคRˆฯ3่T๔ญ|Yุฒ๛Dฒฅak‰H› ฟ ภท๕wo๏g๎f9%Nืา›Wป‡็‰^คษE„qรƒC 3žayปฦย,LŠฅ^mืRM7lsY›๓lฟf/เSŸ’€•ิš˜V€5t่ะ๔๔๔ฐ`ซeห–t ภโซภฐ, ภB€`ฅจ๏qฝแิŽ๑ฟ์ร0%อŒบก=a็ษ#ฯ๋iง(ช”œŽNแiโฎณg—Xถิู”`๒ˆ๎ฉ(ะO๎ฯฅuฑ‚3G^ก9I8 hŽศฐ^๛•ญ]%๖าt: ่ฐD—#ท่๕๋eC๏ห7๗m้iปHี|HEa๊3า๙ณภrฒt@Œ”ไ‡๘ๅฯuฬ๎ ZWะ@jq‹fKZ5ทัดu[qณMŸ๚”ฌfพำธ€`X€`Xฐ,พgOึT™Aมี๊P`Pe˜๕แจkE]๏xนฯ #PP\๗,กz~†!ๆˆไล์xลce—,Zฏอฟฦณ_cฑYhๅ†%ีN3‘z"ฏš?^ฮฮyย1ย)R}๏3ฝ„ํ$้Žื›เ˜”^D+miq8EhKŸ &’R๔เโh(Zตง๖™mไ๑ๆ5 a1LSzษ1A0์ฮ 8?™…Peฐ%าาŽฑฟโSŸ’€uŒข˜Fภฐ, ภฐ, ภ:JW?ฑ:ฆp, ภชFพ[(p}็๗฿bŒ๕ั}ฑิธมึฎ<๋d๘ฅžœ’ร‰S"ุกg ™™ขฒKfhe๎f)อคเญ‡ลดย'๒ิค๏9&พ8๑$ึ˜ำ๛…zนwˆz^๎-๓^žฉt*าK 3‰'ฌHW๏•ยฅ฿g ฺH๛†ฤฉ<9’ข๗ณPถ1๒๒Bบ๒ชvWškๆPe๋B+ทpรงภฐ, ศฐ, ภฐN ญŠi\+iดiำฆž={fffถhัโมิฮ-[ถt๋ึอv^pม‹-ฐฌš…ุw๚_fŒตcoซ4ั1–&าQขะBฏGHษ>"w๕+3จ$ '˜”lZsํUๆ~QŸ+U4m;ร“„€F$[็Fฺ“zR,nฺฉUg๚รH9ฟ6ี9"’_s”‰4ณˆ–f `้Iีๆ4ฌ0ฉgvš๐*uY971$พ0wGูFaง ‡]แฏ ะหููฉwฮย_jิฤอกสV์A˜ญุ>๕€`%ซŠ‹‹ดis๓อ7oผyแย…๕๋ืๆ™gJJJฺทoo;฿y็ยยBรฌ?ภฐ, ภฐbช็ฃ+ct`%‡>๘ใ}๛๖iณo฿พร† [ผxฑAีดณ{๗๎ใวฐ, ภฐ, X฿L%%%ฏฝ๖ฺ9็œ๓ง?iโฤ‰yyyัUฯž=,๋„f1XฏŒจ4 กA•…|MีVไ˜yeRโtf*ดˆˆซผ[’Rภ.๊ห_ซ—โฑŸY'aฮง KพB†๓^M^ctฬส'g&Dra…Sxซ๐[vŒŠษDยปศ™#ui‰้OXถtภ2ฎี•ฏค๑ ๏@4๓ ฒl8ˆaY•ฐIPeŒ%3r๙ฆใ—ฯง>๕ซ๔1 :ฐ’L-ZดHKKป๚๊ซ‹‹‹GŒ‘ŸŸ๏MŸ>=''ภฐ, ภฐb*๏หct`%™xใฟๅ/อ›7/((ธ๕ึ[ เ=๑ฤญ[ทฐฌฏ€?นปŽ๑ฟีิ„๒57ŸฒQ!]…a8r{g|[Vตu%’ ญฬ]ป[TถGร™์E}ฎดๅškฏฒ•2๋หXส สึmi๖ึ๐แๆฺn-ฏ›=jŸุIะืฮๆOgbฆz ฺิูาžฺ_กฟ*™นญ˜้$f๚[3[๗GตbOjืฤ7m]/C%O๕\ฺ“๘ผ‰'๑๓›m™pงนoK;ๆ๛ูEr็ปฺถดแฐ๋_Clฯๅรค+`+~ๅ๕ยtp้ ๖๎ตฒG –™ญ.๏า๕ีKฏ-๋”ท๔ข.2ิŠํดu;ŒO}•ธ_ฉQๆฒ‡_iะ €•ฺฑcวผy๓|sร† iii&Lkฐ๎ฝ๗^‡'(ฟ„‹w B! XฅZตjUzz๚ถmด9kึฌ&Mš,Yฒค^ฝzิฮฎ]ปž๐.B~มยwณต๙ื์š>๖“G<ฉ`ห6นแฝ‚›๔ใŠ-๕[‘ฃอศ/XƒFฅŸ=dฏๅu[ัตปญ๕.ฏ_ฐ<ิ2ำ/I๚q%bแOV๚ัE;๕ณ~3“EŽืมแฏnR#Oพฅ_žฬqป2]e๋๎พ๛๎ฏ'๑—*5.๒ณ฿ปw6๗mฉ ซ3๛Wฅ?(Vฮ@‡ฟF~r‹Ž ั๒.]๕ซีโŽ]๗*7ํัC๚‹O}Š‚ีi๊ฒ˜Xษกโโโ‹.บจgฯž7n\ธpavv๖#๚จ๓P‡„๏fKZ5_ูฑอ{7_aถพwG•u‡•C‘:๗ฐK%;*ฎz9;ซกUมฃu/ˆึfถอ} zศ{˜ูบ7sWม“*„ผw€ฬซˆผX[Uแฃ‰}แเะ…Hบw9ื‘ช๒ถMV่ฝT๏]ืรสz๏ิ`gP-šฟ ฝ`ƒ›/7ฒ๗โ-•๓UWi-9 ลŽwฐทcP ปื]=ŸVฯฬึฝ K{ž๛U–™ญจ‹O}Šี`X่ห๊Sƒ๕๑ว๗ํท~๚M›64i’vnผ9//ฏvํฺนนน‹/>แI€ |ฐ, ภ:กกฅ1 :ฐช—€ |—C่ฎ=‹ปšษNมX๗มyƒM๏ZูฤRX~3ฟฬห–พS‡‰ฬ„J!QiOฤฤO๖ C–า}‹‰่˜Fฌฐฑ็1Šฬ๔ฎGZBx'ฟร1D=C+,฿ฏง8แต X+๘#€%ฦาสณฟจkh5๗ฬ:s~VเSŸb€uแไ%1€ `X€`X€`! ภโ{ถ‚ซดh็vj})p๑\’ˆDฉบ๒็B๑ุ์ มฐ5ฅr…+บv`yW$mชฟฅฯำโฆsฺŠฺ–z๊-œEโ=NฝW˜.๔Œก๗&-ชŽg>OJฃt[คqWคน—žห_๖) ซ“ณใฅรด –่สl๖Okฺบ‹O}*Vnแโ˜Fภฐ, ภฐ, ภฐ€`๑=[qfฐแ๚ฮš]ุ่ฤ)D`TQD( KZอพ—w้๚ๅแN๎นm™KX๋ฤรMG.ื;Zi3|4œฅGๆ•ๆFBžใซ๐ห™Aศ๏ะณฟูฏนoKOb๊vำำฤXฎ=ฌj7]™Z=๙ฏธฦS?ษ0าฒ=|๊S ฐฺM|%ฆp, ศฐ, ภฐŽR›ท(ฆp, ศฐ, ภฐ,`X|ฯVจYผวˆrŠ:ท{๛ฺKDz่›žะ‹x|JเW/ฝยทฅW๖ุRๅY^ชฅอ๐•h้ฆu;Lเ%จ๒๙‰ตว‹ฑฆB1–Jอ*‰ฎX‰tuฬ,ฉH๋4ฌมrPถKํฝฏ„VขจฎXถT%–ภง>eซี๘—cภฐ€ ภฐ, ภB€ล๗lฅๅ† _”ํฒุๆษสS2ึฒNyฺTสIน'๏nว๛ญ…Ž\.ห๏nำฮHQYล0Q(บาอ‰F<•ื-=ดฤไ`ุ+๐วำ๓อ K*J๖ คฬlล ๊ัีy–ภJ%ภjy๏K1€ `X@พX€`Xภฐ๘žญL3่ฑฐ*j‰s1–0k้E]ฬ}[zลดืJ๛ตS2SQ"—ญaaฦ0,ฯFฺbฉยT4ฺ๑F฿^คุ๋43ุe๗ฆํข+3ร)ƒชi’nf€e›2% ๙ิง`ตธ็ล˜Fภฐ, ฿, ภฐŽR๓ปฦ4.€`๘^)ฆŒžjฅw,ฌ๐n[ฑ8ญิ’™ญซVฺ๖+cจด ’‰ฮ[ฺผ>ฺ๔N งI.q๔cย๋ฉ4ั•๓Shสzณ๏| ภฐ, ฿, ภฐŽกfw%ฆp, ศภ๗สmYi„๔J‡หฬ}[Šซ|>;3ฏ•ถc”ฆTNะ+โ|Cฯ†]FO7บJ๖ัื%–ฒ„ถ4iฉ–ผ๓, ภฐ3๘`1๚€Uำ1 bภฐ3๘^น€eH$ภz๙‚N!`)]จ ”’Pข+=คRwŸชEE๑ž=ิl<ธAํŒฎŠ,ŸงS†WmH๙ิ';`e฿ฑ ฆp,‹0ƒ๏ฃ`Xฐ,พg“วิ^AmๅvvภŠฬlf€ฅN žTห†pฒa/xW๙ผZ6ผา์\oฺyบกUj–g ฅ"นB™FPmHy็';`5BL#เXa฿,Fภฐ,`X|ฯ&…Fw์โ3ไญ<๗$sภา1"ชะ|๖hฏs]unw*ป‰Vรก*ูร้q|ฺ๚œ9*~็Ÿ์€ีhิ˜Fภฐ,ย พXŒ>€`F€URRาฃG™3g๚žีซWw๊ิ)33ณmถsๆฬ!šX`X€•|€uNมผ˜vาO]\\.ถE9N๛๎ฟ™ง2ฝ_๓ฮฐ* ฐz่ก๖ํ๛C}๚๔! XA˜ฐ,+ษ+๓ึgcZึฮ;ฯ>๛์฿๎w›7oถu๊ิYดhภฐ€ ยL™๛/ฆ“ถpJt,ux๘‡้J?ูบทo08[ุ ๑ฺ๎ํSf6X"*ฯz1{แ‘œ`แaบW๙ว_<ใฌก฿ฅญˆฑx็X๑หดvํฺ.]บdeeๅๆๆาษภฐ€ |ฐ,๙I Xuอi\ ภ"ฬเ๛ฉs_์fถฆ„P>๛ŠหฬVฤX:์ฅFMŠ:ทc๔O™iDผญจืณ‹ฎŒซFรฏŒฅฬDWfถวห*<\็ฮ;ภB€E˜มw‹ัฐฌRี07ฆp,‹0ƒ๏งฺ}อฌbŒฅtกfzVงaqำ,ยฦX/gg3๚งาผญจr‚nFWใฟWX*i`ูฆฬฒcx็'/`ีบyNL#เXa฿,Fภฐ,`X|ฯ&?`yโIUํf‰“ฎ๘ฆฒ„ฯงีc๔Oฝi˜ผจชฺฑ” {4(Qจ=fผ๓“ฐj๔LL#เXa฿,Fภฐ,`X|ฯXŒ>€`Uฆ2๒gว4.€`f๐Tป๏ลUผีiIU>Ua–™–6ฟvFฟJLX†S,ˆ-ŸTwฅz,Ÿˆะ6าRC,๙I X้ŸŽi\ ภ"ฬเ;€ล่X€…,‹๏ูไw฿๏Gำํ„f‰\ฅฉ๎&ฟŒรๆžY‡ัฏ›|ค๋•Lฃ&ภ2า„บ—P|lวx] ึ/&4lฯ;?+ํ†งbภฐ3๘`1๚€`! ภโ{6ษ๗‚h[QพI#hๅอ–llฮฯj3๚Uež๕3Q)?จM/uท๑าูžแ฿)MN<ภJJภ๚u฿Y1€ `X„|ฐ} ภฐ€`๑=›๎kฦ: ร“ฟTkO ชชZทi=ใV•ึเฟฅ™a“HKeๆศvjP‘{aฃผ๓“ฐ~uํฬ˜Fภฐ,ย พXŒ>€`ฅณฎy2ฆp,‹0ƒ๏Uใพล`C(,แ?9aย;•N?๕“ŒgQ—ัฏB3~`น ึ/n๏ฅfคๅํFEW†\ย/ึ๏›tเ`! ภ"ฬเ;€ล่Xีฐ~ูgFL#เXa฿ซฦ}5ีL,žtบ๒\ก3บš—QŸัฏB๓แํ็๋ฯmihฅฅ๖+Q(บ23บฒ*rฐ1€`! ภ"ฬเ;€ล่Xีฐ~๛‰˜Fภฐ,ย พW™๛ มK™มย#he;ฎq็ำ๊`%้|ฯฉ1๚#ฯ8K eDี๗ฟœ้Œ5๐[eฆjw™*ํเ๛ไš๏ใไ๒ฮฐ€`f๐ภb๔ฌj Xg๖๚cL#เXa฿,FภฐŽา้1=ฆp,‹0ƒ๏Uๆ~ุ๛J+FW‰€e6็gตŸ๛U–ฃ_…6;ฅฝฏlE•Xnย,ตศฒ‹w>€…,‹0ƒ๏ฃ`Uwภ๚฿‰i\ ภ"ฬเ๛iแ~ap ก฿Hู่รGTฺษ}๎™uำ']จ eฮXŽYj”5๖์๙ฐ,ย พXŒ>€`}๙ณผฉ1€ `X„|?๗๎ZN๙Ašทหz๒_3ฌำ*]hPehฅ‚wฏy—yซ๗qค“ฐW—cภฐ3๘`1๚€`! ภโ{6ี๗N๎สชปJ“qFย”‘gœฅน•(4ฬบๆ๏ฮดฅxหk'4lฯ;?๋^Zำธ€E˜มw‹ัฐ, X฿ณีฯ}๑ึS?ษ`๔OSCั1Xj†Y}๎L31–ห่สฌฐQ๙ษX?ํtL#เXa฿,Fภฐ,`X|ฯโ>๎Ÿ66๎ป6๒Œณฎ9 XaŠPŒ5ฅ €•”€๕?.ำธ€E˜มwว} ภ:J?๙๗q1€ `X„|ว}›.T{b‘;C`! ภ"ฬเ;๎ใ>€`•๊วwlL#เXa฿q๗cู๐๏”ถl๐isิtิ‹กฐ€`f๐๗qภฐส๔ฃcbภฐ3๘Ž๛ธ`XG้_rGว4.€`f๐๗q?ฎNฉฅป่ส–š‘กฐพ‘๖์ูs๋ญท6iาคQฃF๔ำฃ๓ฯ;t่0~<= ฌ ีถmฎฟ๚† ž{๎นฃG์ณฯlgAAAZ iำฆX„Xว} ภŠฉถำN๎y๛๗๏๑ลฟyX—\rษ 7>ZXXh‘ภฐ*R%%%:uบ๊ชซ6nธ|๙๒6mฺy็ถฟ{๗๎<๐ภŽ#:pเ€Eˆล}?๕ๆ๓สl]}ฐz๋๋k฿พ}EEEฺ\ตj•m๊ืำฆM›š6mš““`X)ใ*ร๖;wjsฮœ9็ž{ฎญ4kึ์•W^!EHˆล}ฐฌ ิฺi'๑ค|๙ๅ—จVฎ\io๗๎ฺ์ฺต๋ใ?ž——`XฉO?tัขEพ๙ฬ3ฯdeeํปื|[ทnฐฑธ๛UXb,qี๐๏œeฦะXqิฟ /ผP๋3fฬธ์ฒหlภฐ*Qลลล;wพ๖ฺkWฏ^žž>dศfอšu่ะaึฌY!๗qภฐโ๋๛ญ‡ฦดXต)A๛๗๏๗๐‡?X€SŠfวŽููู6lฐฌสีwQทn]{ซอœ9ณfอšSงN}๋ญท์ฝXซVญy๓ๆX„Xวชb,[]}rธท;CŸิ€๕ฯญวด๒ฯฟt้าดฃe85|=:mฺ4|ไ‘Gด™ŸŸ?vlY็R ภช,3&##ใฯณ6๗๎๋๔์ูณ?฿ฑcฏ}ิซส๖์)๏ฤ–U๘๐๗qฟ’์พนถœxN{[Niากฏl๗“ฐสัƒ>hผ5eส฿c›u๋ึอ<ฌ5jิฌY377๚ฐ*R#FŒ0บš;w๎15ไฯษษ) %%%ผcB•ฏ๏7(ฆ๓ฮœ9ำpสป’ถัๆอ›/นไ’‘#G~๘แ‡Œ€Ua?~ผa๛๓ฯ?๏{๎น็žnบ๙ๆ!Cฎฟz~มโ7 ว*ดยFlกO๖_ฐชฐv๏™™9pเภํทrDลลลแ1คฌ ึฦkิจq๏ฝ๗†oปื_๋ก‡2จŸ>}zํฺตWฏ^]yจDมwวŠ$g่ทฉบ+-GžqCŸ5X฿m~KL;‰'3gNbaึ|`X•(ตฏผํl๙๓ทooh•““sย w ฿q๗,๋๋่Ÿฮฝ)ฆA'V๕a฿q๗ฟฆฏห?ภXJํ„Y =€`Xa฿q๗,๋ธ๚‡์1€ `X„|ว}ZfDๅภpกฐ, ภ"ฬเ;๎ใ>€`@g4บ>ฆp,‹0ƒ๏ธ๛€`! ภโ{๗q฿ฌสิw^ำธ€ลW-พใ>๎ใ;€u”พ}vŸ˜Fภฐ,พj๑๗q฿, X฿ณธ๛๘`Uฆพ•1€ `X|ีโ;๎ใ>พXฐ,พgq๗๑ภชL}+ณGL#เX!„BภB!„ฐB!„,„B! !„BX!„BB!„€…B!„,„B! !„BภBงV%%%=z๔˜9sฆ๏ูธqฃํฉWฏ^ห–-'Mšdh›oพy๑ลืญ[๗ข‹.zใ7Rี}ำ๎ป7nม๘žjโ๛ฎ]ป๚๖ํ›••eC๔ำOงถ๛า฿๖ทมƒ7jิจYณf<๐ภ /E*้“O>1ฯ>๛์sฯ=w์ุฑIฏพ/]บ4-A}๔Q5qXจาU\\ฉแพEYให:y8`U฿7oz=x๐เฆค๛ฎ;wšหห–-ำๆœ9sš6mZฮฅH1effฺ?QZ=zดE๕๑=ิ3ฯv์Xhฬ˜1W]uUชบ๛๏‡€UM|่ก‡:u๊ไ-\ธ0333ต7–ฒดi“ปl›kึฌ9ฅH1Wู Fๆuฯž=‹‹‹หyคชž~๚้† ~๖ูgๅ ฐะ1d oJPXFsLภฒo~๚5nxวŽถูญ[ท๑ใว๛ฃใฦ๋ฝ{ชบฌjโ๛„ ๒๒๒กล‹gddุJืฎ]“ิ^Š}๛๖u๎ูxqฯž=Ÿ|๒‰ญธฏ\น๒x—"•พฬ๗ผร***š7o^vv๖ฤ‰หyค๊เๅ—_~ื]w๙มฉ็>ฐP%*๑~™๔๔๔๙๓็—CŸ๙7hบญZตJ{,…_CcฦŒน๚๊ซSี`U฿'Ož๙฿ฝ^ฝzIํืน›7oพ๐ย mฝaร†ำงOทoฟ๖๑.E*}Lš4ฉFทoื1ณgฯ6ฟ๘โ‹๓ฝO๗h๋6โ~p๊น,T•ŠฦกC‡ฎป๎บฌฌฌๅห—๛ฮnป-,๖ผๅ–[†š’๎'V5๑Bฌm๚C3fฬ๘อo~“ฺ๎ปv๎io๛u๋ึYธปwoโฅHฝBœนs็fgg๛ๆ† ์=oืก:๘๎ฒ7๋ึญร=ีส}`กSMMํŸถ+V„วุM›6mด^RRr๙็งฬญd'ฌjโ๛–-[ฬ๋mถisะ Aโชvฟธธธgฯžoฝ๕–6 ;v์XฮฅH%ฝ๚๋FTฺ|๙็ํชjโปk๘๐แแ ีอ}`กSe-Zd฿/=๖ุ'Gคฏ`๛ทพqใฦ#GŽดt š6mzเภjXีว๗^ฝz]qล๋ืฏ๑ว๋ิฉc18ต7]}๕ีW]uีฆM›ๆอ›—™™๙sฯ•s)RL—^zฉนiรบlู2ใๆป๏พป๚๘.ๅๅๅ…eซ›๛ภBง4ส:4===ฌWhูฒฅ***ฒ๏ํงSงN๋ึญKI๗ฐยFฃีวwƒisำยํนsS/๗%้ำงOVVV๋ึญgฬ˜qยK‘J2๓๓๓6lุขE‹{๎น็‹/พจ>พKํฺตณ&/K5qX!„BภB!„ฐB!„,„B! !„BX!„BB!„€…B!„,„B! !„BภBฅœถlู’••uห-ท„;ืฌYSซVญG}”๋ƒBB่d4sๆฬดดดgŸ}V›Ÿ~๚iซVญ๚๗๏ฯ•A! !t๒๊ืฏ_ร† ทmf๋}๛๖mบ๕ฝ{น,!`!„N^{๖์iผyฯž={์ฑš5kฎYณ†k‚BB(ฎ–.]ZฃF ฃซฉSงr5BภBU€๖๏฿฿ฌY3cฌu๋ึq5BภBU€ ”พ}๛œœœƒrABภBลา์ูณำาา^xแ…u๋ึีชUk๘๐แ\„ฐB'ฏM›6eee 6L›“'O6ุZธp!W!„,„ะษ่ณฯ>ุ๋ฑcnnฎญhOIII^^^ใฦw์ุม๕A! !๔UPPPงN๕๋ื‡;ทnZฟ~ฝ{s}BภB!„ฐB!„,„B!`!„BX!„BB!„ฐB!„,„B! !„BภB!„BB!„€…B!`!„BกoฆF9HœŒโทIENDฎB`‚xarray-2025.12.0/doc/_static/style.css000066400000000000000000000032741511464676000174160ustar00rootroot00000000000000/* Override some aspects of the pydata-sphinx-theme */ /* Xarray Branding Guide: Primary Color palette (Hex): #17afb4 #e28126 #59c7d6 #0e4666 #4a4a4a Secondary Color Palette (Hex): #f58154 #e7b72d #b3dfe5 #8e8d99 #767985 Primary Typeface: Acumin Variable Concept - Semicondensed Medium */ /* Increase Xarray logo size in upper left corner */ .navbar-brand img { height: 75px; } .navbar-brand { height: 75px; } /* Adjust index page overview cards, borrowed from Pandas & Numpy */ /* Override SVG icon color */ html[data-theme="dark"] .sd-card img[src*=".svg"] { filter: invert(0.82) brightness(0.8) contrast(1.2); } /* https://github.com/executablebooks/sphinx-design/blob/main/style/_cards.scss */ /* More space around image */ .intro-card { padding: 30px 1px 1px 1px; } /* More prominent card borders */ .intro-card .sd-card { border: 2px solid var(--pst-color-border); overflow: hidden; } /* Shrink SVG icons */ .intro-card .sd-card-img-top { margin: 1px; height: 100px; background-color: transparent !important; } /* Color titles like links */ .intro-card .sd-card-title { color: var(--pst-color-primary); font-size: var(--pst-font-size-h5); } /* Don't have 'raised' color background for card interiors in dark mode */ .bd-content .sd-card .sd-card-body { background-color: unset !important; } /* workaround Pydata Sphinx theme using light colors for widget cell outputs in dark-mode */ /* works for many widgets but not for Xarray html reprs */ /* https://github.com/pydata/pydata-sphinx-theme/issues/2189 */ html[data-theme="dark"] div.cell_output .text_html:has(div.xr-wrap) { background-color: var(--pst-color-on-background) !important; color: var(--pst-color-text-base) !important; } xarray-2025.12.0/doc/_static/thumbnails/000077500000000000000000000000001511464676000177045ustar00rootroot00000000000000xarray-2025.12.0/doc/_static/thumbnails/ERA5-GRIB-example.png000066400000000000000000000666461511464676000233020ustar00rootroot00000000000000‰PNG  IHDRˆ2เG59tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/ศ—ทœ pHYs  šœmIDATxฺํ}ผ\UตพDลสSQ_ค*้ Š PAP๐YDคˆXขiา‚TŠ‚(Rา!ก%@! C อ$คP%…๙๏o๎nVVึ>ำgฮฬ]฿๏ท’{๏ฬฝsๆฬ9๛ซ}๋M…BแMnnnnnnฺ$ธนนนน9Aธนนนน9Aธนนนน9Aธนนนน9Aธนนนน9Aธนนนน9Aธ๕”‹ํMoฺ4พXืฯ‡[ƒฏ5๓?NyพH็{-ุหย~๛nฐU๑g/›์หฦ฿8-^์ŸT?฿+ุ๊oQรฑ%ุ™ํH๏ vcฐ[lLฐOี๑๏฿์่น&q=ะ‚ลz๑uฐ็฿T\‚ZKธo‚=๏รgƒงŽ๕มn ๖Jฐง‚}K<ถ^ฐ[โฝŽ์ฅ๖;ƒ]฿+์4'7I_(uฃผ9ุโ"N๑œ^มf[์2ƒ žญใฑถ3Al์'ม6 ถNฐc‚- ถaO"ˆrฮkญQอg?๓้มพ"~v@YกวQ!A0ุgใbมHงˆวฑ๙ธ ืSฐใ&ค ˆวŸฯ3โš` ถ~ผq?้แV6Aฤ๏ื๕.โg{Dไ;‘$ึkA|?ุฌ`K‚์๊F<6ุฬ`/€ศ@l๑1,ึฤล๚ษ`วK‚ภ฿‰oI๛฿Wžำอม{)ุใมvฎเ`7ธ“8ื๐*~li<–OวŸ?wvG$ฮYัำ๛๒?ldพjม›)พ฿6ว๛ฤฯ@ภ—I฿ะปภ๘๙Zœห?‹วNฤฮXฝRuศ๓Rœฟพ<ุ๊ตเฑ์)~๗{%ฮ ไภ โsโ๛ฬcJ…{‚]Cฉุ89ฌPbก>Imˆฆ^๓ภŒื4ฏห็๊ศธะoฟG่iพแQ฿S&A\์ธฮใ๛…ท๓บ„[%ฤ†1ฦyฉx1|ฑž7แ๙๏ฤ฿‹\โฑo‹D๖ะ a’ใีฯเโฦŠ๕สฟฝ‡/‰ว>*ปฯ…๊๏bแ)โ๚J๒qWyฏ\œ Z Sผนw/“ _,$๑ใ^nKฎ\Pโ๋ฏTื‰ฯ}b(‰vqโ†้oด‰‹ุ55ฤocฬœ† ฯวg๛x_\b๑๑ร˜$Pฐwป[ๅ ๎Gา7พึ'" ~ฑR‚ˆ9š;โ‚ฒnd\)A7ถ๘~ฃX๎xX|mุ.Hื@Sโyzw๑kjl7๋ฤ๗ดHฎ„„๎ƒธ5ƒ >wI๊๑ฤ๛6ฑาโนx=“วีHeˆะฯ์่๚#๑๗ก2 b˜<_“ดบŠ้C๑๏-‰XUลT.A์{U๋ฯึ‰ v 6#Vร\"ยeƒ#y.Ž๙–ํk V1-u๓๋ซํ๘๘ุผX6นQj%q1บ0†Že™ƒ๙xูฦัc})†€Nห"ˆRวTnษฉb:>n–ฦJกฟ– ˆubuิœx์ใymVHฃ#‰พl…b#i3&๛Ÿ–}โ3ะ๗=์Ccฮซั“๛_๏ƒpssซ‹ง้ๆๆแๆๆแแๆแๆๆๆแๆแๆๆๆๆๆแๆๆๆๆๆแๆๆๆๆๆัn๖ž๗ผงฐำN;นนนนน•iZั c oึแp8ๅ#ฤ#N‡รแp‚p8‡„„รแp8A8A8GGDภีq๊ิT๑ณ›ข`ีDNย*ค;Kงฤ็=โแp8E{ฤั„S€jiAl์„รแpt AVK4O5~+JTo้แp8]˜นเลยŠ•ซzoฐIฉIO๎A8ŽNม˜Y ‹๑ต?Œ้นงŒ-SษJ Lท๚™„รแ่d ™๏"AzลุMUxoฦ๏l ฦ)โ๋ฑ๘'‡รัษธๆ'‹๑ญ??ุ๙‡|ฯ‹3”1๏๗จยู๊ฐวึžต<$~ฝy +มึฏเIj‡รัแ๘อํ โ์!Dมๅ‡ฃ,|้’๛Šฑ๗๙ฃ œ ‡c5ถีะ"Aly๊ยชUo8A8A8Gก๐ฦo6๏;ธฐo†Ibาืœ œ ‡ฃPx}ลช"1‡1ล'<ตฤ ย ยแp8 …ฅฏ./ร>R  œ GOภmŸ+|๖Q…ษฯ,5Ÿฟ์ต"1œ๚ษล‡N™็แแp8zŽ๚ห๘โยCsอว็.๊’ู0bz๑[yฦ ย ยแp๔|๛ฯ?;||ฺผeลวŽำฟ„„รแ่8่ฒŠ ๏FN7GRŽr=ห ย ยแp๔ ๙ ูƒe>Nก>ฟู)wฮ๖„„„รแ่ `Cฟ['›šถ ๘๘cOฟPุฆฐยiทOu‚p‚p8=9up‘N๋cๆใ -!๑ษณF~ท‰NN‡ฃำฑrีลลŸ}n๐l๑๑ูฯฟT,‡q‚Hœ œ Gแๅฌ่&ˆ๏\๙๙œ›ว?]|้ลฏ>wม่ยq7<๊แแp8:‹^๚O7Aœ˜wรCOuk0๏๏๎-|ฺ๑NN‡ฃำ๑์ ฏvฤW.ฝ฿|ฮตฑaa <็ปW?์แแp8:ณžฉ› ๖ป่>๓9W฿5Mn้+ห‹^๋œ œ G‡c๊sK‹‹ฆงYุ็ย{อ็\qฯฌโsฏภDนCš8—ฺ ยแp8Z„GๆvuI๗้?ฌ˜€ถp้3Šฯ์7ู_ฝ์''‡รั้3ณซK {%ฦ‰^E๚08่{ืŒ+Žu‚p‚p8Žปงอ๏š5ผ‡ฯฝ|ฮyรฆ'ส่•๘โ…๗t6A\์๙`SลฯN ๖\ฐ‰ั๖O๎พมฆ›์'‡รัฎ`—4ช“v;๛.๓9ะhฺช฿โื' š๔4:‰ ๖ถฃA?+๑{๋›l๓`๋›lk'‡รัŽ๘๛ฃฯ โ๘`a—3Gšฯ๖4˜€“ozฌ๐้฿๙!ฆ€Mซ ˆ‚ ฿๗…9A8Žv† 04hวำG˜ฯ๙ๅญS fx๑๋_2)I$= ๆ›CP๏2~็๋มฎ฿์๗N‡ฃม‡“nœP๘ฤiรอ็œ๒๗I…#)Hฒ่i๑พBzsฐณ@ฦ๏bฤฅฏq ฌw๏~5:Ž\แ๗ฃfvฯ›fIใง7O์ฮOœ~วใล’ุGๅ<ๆ!&‡รัI0‰b“™w>^๘ุ/‡šฯwฑวyฃŠ_vศดย–1aำ<ˆMฤื'๛ซ๑;๋{2ุf"Iว ยแpด#ฮฏยG9คpฮะฐ๐Ÿj/ว]hw =K7›lEฐgƒ์บ`SbโvFภ‚ ฟปฐฑšฉ_มห\G›โWœRœ(‡1ข'j๊ญPq.นซซซz๙สUํAผQฮแp๔p`:งฮบkni#E๗4u™^y}…„„รแ่dœ8hBaฯ๓Fu{+ ฯ๚KพK้šบชž–ผบ„„รแ่d0|tู่ฎjฆื–ฏ\๋9Pp๚ๅ]ร„=ผzx„„รแ่`vีร…‚w๐ว{WKzk€ะi ฐ๓z๎ข— œ G'ณ ึ=่ีๅk=็ ห่t็ค.ํฆ'ๆฝ่แแp8:๐^s ‹žO๙x—๚๋คg^p‚p‚p8 ไ‡๘`—&ำ‚ืฮ-|๙’ีsจ๏›๑|๑yใๆ,v‚p‚p8Œฝฯ]”๐ฮJ>cV๕QW๚แ'ŸwŒ…NN‡ฃ“้nh-4้โยฬ’W’^0๑้Šฯป๋_๓ œ G'cง3F…๚ฒช“พ0เžยฏค๘5’ำx 9A8A8Ž6บขฏ;'ณโ ฎฟน๑ย?{ถธ๐ฯz%3 u ฟžณ๐ๅโ๓1แ''G;ฃYฎŽ|bิดร€R€@„๚๎˜๔\๑นำ็ฏM&PrัŠ_{้ซล็๘๐SNNŽvT:q# ;HGฯฤŸ๏›]ผXคฑjีลว7rza่”ฎ†{ูZฯ๛ฬ9wN๋cลฏQ‹็eLsฎ+'‡ฃ€<3nไŸ4ัOFลูq“p๘U6A@Va๔ฌยˆุ฿0ๅูฅk=ร‚ศะi็ก๓ฺ ย ยัฆ๘ไY#‹7๒ื0ฦOFลษ7=Vผ(ดงฑ๔•ๅลวฏบษ๎pิcOฟ`^K๏o“Š_CฬฯƒธŸ„„ฃMฑธ‘ท๕ฐยVฌ๔า๎g\{?ฺ||มฒืŠ_ะยฝำปเ™ปvNgŒ,œ๒๗ษ฿owpq~„„„ฃ มุ2vŽ๘๒Žž4ธแ๓ว@ O/~ฅ๘๘฿yฆ๐ภฬ…ลฏšฝhญ็ํp๚ˆBฟ[Wฤว5ด8ขิ ย ย‘C vl%‰_๋ xขบgc—3ปยŒ[%fHฯ\ะีำp๛ฤ็ bภืcfฎ!‰ำ†~}ิ๎๏ท„๓ห[ง8Aด;A ก„๊Ggฺ)=ศ%‰แมนล๙มFL๗“ึฝH„‚pคF‰"!ว‡OW?งKBZK์• 0“่œ ฺœ Py€๑็–๙ำ!˜Kลฯ๖!ำ2w†ท…แŽงX#~์่X๔าŠืยC๘ๅชตG‰>2wI๑ฑ{ฆ?_˜๐TืืฃžXฐึ๓>๖ห5CJฒ/ย ข :๏ฉ]ฃ=ม™ภ0้๖Kt฿์ำ๖น๐ยัืŽ๗ื œ๒๗I…_2)wวEIŒ/พ/9)nฬฌฎผยK“ŸYšฬWmูoHแ์!๊‹S๘มภGœ ฺ U hฃwtฎŽบ0ึฆk=m~wษ"&๕”RWศ@X;ๅFeขฐผIgtQใeฏญ=ร†Q|=tสผตžทE฿ม…s‡ฎ๖VฅwGDภีมž6U์`O›์ึ`๏L๎`S‚Mฬ:๘<ฤ1ป.์:”$rQย็kสœจRมNbkฦำSคูpQ…A๗(O ถาฏ9ฅ๘?BN † ้q`bœ๒X _]ึŠ ว7๔`GฤมvTฑOฐuใื็ย2bใv๐ พwอธโ‡ŽŽJGWโ๒ฦ7<๔Tพ‡ฟŽ{ช{Q๚ึŸํ›ฑx•ืWœะ่ิ้๘ำฝ]ฒHจ6 ์(Ž๊|eyฎฮe6.!Ikฮƒ่›นเฅ๎ผ•ˆ?ฟP:`่มM๒J[b ุT„z์ `7ด;A`–,>\Yร“๑ิขWr(P็ไ{๘สฅ๗›ฯAB‰E~ั_i๋ฯญœ๙wฎ|จxNฐ}๕๕ๆ4R"†ย€< โปYx“)O3 R*ญ์œพXtNcใ๙ฅK๎๋ัqGฐ๏$›lBฐGƒSโ5Žม„๕๎ป้ษQ็วQhซงƒ.5บ‹2IฝwขC ๐”€K๏žQ|nปvSS/;โw@œ|็X๗฿ฌลzD,š5‚ณ\  žc–Œ7วŒ>โบ›ๆ@(ธn๐๓฿šู3ฬ†๘|“ย–น#ˆ€~1ั+๑{ˆฟ7ุ$„ซ๒๊Aฐฤํ๛^ลฒฦ ฑMฤE#gt/JX-เ๓ฦฐโ{އ5kธภDj๊ฝ/E9„>Rrท,๒V)x์u]นง!“ปTZงอ[– C!’๑†7ฦPW”บ๖8‚8"ุƒมึ/๓oœ์gy$ฤั “ซ๎iธ๒'>ฤT ๔R8ํ๖ฉษx;>oฦˆนƒฬ[คา]zVขไว5GW3€ฯ€ืšอ๒\1ฬPi…W@๏’บLื…M”ป๒‘ใฑ<ิC๛๛Wฐฮ๘ ‚m$พ‹฿ห#A,}uu ^Jอฑงษ[Xณโิ๕z }€+ย*Ÿ๗aQโyt7kWจ]:ย‚ฉ\ฦdโ9†?ัิig่เ๕TฏRr ็T ไงŽธ๚แbŽ๏ัง–ฌ๕tุณ‹uzฮKyฑน"šY๘ะช*ฆƒอ ถ"ุณมŽ 6+ุ3ฑ|vCJม†ฤฏ7a%ุใGršค– ูžPๆXixฦJฺตะ”ฉศ-ใ},_นjญ็|^ฬ–Msํˆพ˜™-4J5–›ข: _๓ภ“5ฟ.บŒฑซฮยI7N(&‚๑šจ.ซฌชGู์ž็*œ0hBw3œ%ย‡ๆ7๊4I้o ’็3ฌัก฿ัDกรๅ&=๓B๑ƒ…ฐ~8 …๓†ญŽO0vTํฤq๓3~ขัตk๘ผ๛fEiŽ['ไซY BAƒ'gk…wรฃŸ™ตศu-ๆ]:Bwk~๗„ดZ๐๚Šฎส ]ส๚P8wฃ)แ~DโxUF#฿ยธ‹O๕-ศs‡๗lm,เXศฉฑtŒ…ฆ7ส‚ ๆqd( @[ >า‹u‚hc‚x๘ษลCณ>ฬผ%ŸผGดฉ6%$ ™pท’ฯHยS†ca"tะjฐำ๖ย+ูู้H<ใX5๚ฤxื<žŸต)rsU*W…0> NeหB9 ?๓ิเJพ]]าใ’๒‘S๓U๘;)ูNgtyrบœฤ|13‚8Žว [jจD;›x ว„Xu:=[ฑซvห#ฯดๅ{@\๙€K๏/j๘ใ} ”จ'p หrฃŽ= "mช“@ฝ=ค๐ผkฒๅรฆฮ๋Nฤขด›แตjqK<ทฅ6V‡\>ถ๐?v้•Rฬๅ{EฟA จ *'้M)•”ฦ= „!ง>ืีa>ฬ๘{ศ% \)ฏ”EK<๗ยฺีMMmูฏ9ฝ5N gฬขญ๋เ๋ $Yูum›ฮHภ‚tH }H"เ}LŸฟfuบง๕N^ฐ๋หฟ็"7>ฃ‡แฒล็"‡dแ3 ฮOญฺS์5I ฺ!>๒šqลฆฤณ2ิ ธ`ร Š—E8ๅ$ฝูเ›mซฌ<š1ฟKBใŽIk{_ฒ‰Cฆtxฮ๊ภๆc๓D„D›.|ฐงฦ$’MY1Mx่จlvuO3=;ฦ์`จHCื0w‘U(‚7๔Mโ†U ฯ ๐ษ ๗ก‹—ใ-Qา‹ฒR LNฃ–ฤA Ÿ=ทถ}†"q|Yโ฿wฃล~ฃฌ:LŠyf-L>นBv„gษมH๏ แ0 †ฑ JuHบŸก8;d๘šฃDY!†ฎq$า,)'ˆn2Kฒชvคพฯื/oŽฦ ..ฤŒqึฃฌฏรฃล๙ผ2ำnุ๗ข๛Š๒cfฎ–j– ฦฟิ๕‡ไwš%QŠ‹jซ"™{ฺ|!™s๎N*ฐW;g,๎ิข๊*ฝ›g_x5๙<ำOnšX…g0งตฅB= J๘ฺYใ๐ณ็ูย}อฏญsŒ™ˆวฝ(U[9H†๙๐Œœ ฺ” dษ_ช“’@ฒ‹\3šศ+†>ะฆ1ำ,eH,’X,eทy ”/>หWGซ/2/CศEฅ€ฯ๘&KE€จyฅิ†Yข‹๊$ไ!ŽHHLฃ:m๓˜gcnฆ–ฦ@๎โKํ๘Y)„E‹m Rณ้oน/˜V*gtไ5ใ2sณEๅ›u€‚๒๕ตฐ'CTท‹ "œ้ัฆqU์FๅGฉF)๛lA,bจjา `มB#ห@ ปลpQJžนปบIิ๐c&ฅ7,ดขปœ‹ซF#1ชปษoIqัN5|BŒr๛˜/`OD-kx”๋P๖€ัJ!$า!`—wYๅฐฬ@5•7๕บi๑ž•ก‚UNู@…พพฺx]ดผ๗๚๔ถVฎ Kพ,G–NmJ์F3GŽ!น+ข~อZ(คศlื&๕iภS…l$k7`aล‚ศ๒CซF  ˆ ฑ ’๊…‘อYอ^“ChะG€j ฬฃ แ5ํ™ะ‘ก l-Iy์ชjอ*fY(๒xn–Œ‘ฏ( ีขw,.€7…๊ฉ_%Juู{ql”ดŠ-di๋หฑฟ๗œŠ ท5wf–vโโX—ืมee๙ ŒmK๐็UศOwต๊:v$|ญไ๛สUo˜ใ๘]$ํAˆำ๔M์n๋ซXO`฿‚๎็เ๛cฎŠ’๚ณ<ŒตมyZ‹,>๓ฌpฆL ๑ย{ ?h'‹™;ร'u=O dงC8ฤ`1๔{ยKฐยคววฆ<ฝภlŽe*ˆIๆชคว “แฉ2แ-)S2/ภ…qQฝŠ@œ ฤูX„'bX8h 3\Y’Hฝย=ผ8a)ํ{-‡ไ]*$5(*\fI” „พ†‘ี1lhฒ*Cš์ชS๓ฃน˜ฒ&€L๐f๕w์rๆฺˆฑSCH.<7aAฅ`mŠก–หปB-ึd3>“u๚ึนั XJ4Oฮฯศ๒Bx]‚PRน€} 7ว๊$ยั^žR’ฮ๚sฤ&@iฏUฎ}}วœˆษ๒„๖RผQ๛ฐB{๗ˆ*) =<๖ั_แ‘C‚@7˜‡˜รฅ9 ห–`%EJwฟ๐L%=YZ)ๅ‘‹น›lWž‹vชšคl›ณ(ซฌห๔(s ‡ฆ4 <7ฉyฟฌ}งฮฮ๔€ˆm3f!`GŠสฤภrไ‚หะฬ*aูHhน่”๗"ซy8c%&วš„B$เกาS–ภฌๆฌNkn’'ayชvฌ#|wตฺฑK<&6]ิัฒp˜๏E„mAv;’ธวูˆ๚r$ฌ”Bh' โฐrVุyฬฬ๔”:ส”์•8~'ˆœDjwมv(ฌฎะ1nฉ^)ไฬ์†5^2:;ณtv่พืบซ_)ไŒu•๑ใXอS๑‘ี€q๖TxN‡ักหEsZฯŸภB ’เ฿็F‰๋,)‹j๒PY๒๒๓I)m„บฌฒNํีเwt‡๑ฝ‰l‘„Œผยpฑ|ูุ๑ห˜{ึ9‘a2ฦ!.˜ส-€ศ๐Hฐ)กเ &~ฉธšสUPํ•ณ"Rก(ฑpC+kŒ-BKxฬ"g'ˆD๊ขภN ๕๓\(ค„rฉ|@98qะj%L+FษT)"Kนค”^ฎU\E์I•ึr๗#งg•บIuโดเๆgั@JฎZป๛๘์ถ‰ี3ฉX<๐า.ขš๓ผ๑E ต€!•ฌฯ •7R๚#ีธธณง0Tขc๘บๆ_๏ข๕HV*ศฆ† ๒๏๎lVฟŒ\ะณzyXJŒŠฎ:ณ6Bปฅสfy _ปปแ๏wัฺน =ฏ๗–Eภ]UZซ=ท#ฯ๔๔gl ยKยc[ีi Dƒชpคฐ๎eœพ–๖=†ำlcF2TX…ป๚Zใใ ฉ —!๋๐%(ัPษะ{’ B ต@ฮ##ัtล›น@Vฯ<šฑซ8ไ! น@2#ฝ‘Zภ8stsUvKๅชtY&@oP—f฿–๐๕๛คผe™|ฦf'*กmfXๅแ,ใEธ5ซk\~–ศ ํ`T๑แ|่Tž…2;ฬZน†๖ไ Žณ•ฐJZ_สะwขศaฝZBW{>ุT๑ณwlf]‰฿7ุ๔`ณ‚’W‚ศาจgญทฅ™ฅป_p1าCฑโผิv‘2ลฒ;Tƒ’ะGfจe–ตำฒวธนXŠ+ต๙ฅh]%9ˆƒฃKmM์ชf็,5ษL7(q!@๘ฬ’๙–@‰+฿฿แ‚ฤ ไxเUิZโห|™า ช.!{vฒrU–งมืำ=l๐’›.lhช๛jlี!+vlฃ•?ษ™ตK—^‡ยA๐ฺp^WšQ๐ธจ5E =ผ++…< ๒5YนงŒ ox‘ทu๐Kk+)^xฉผถ๊ก๖ฺ*‚ุ#ุŽŠ ฮใ‚ƒk:มf<ุzม&:ฺg-ผ%‘Ž.๋ฌซ mqัด”&?ง˜Y๚๒œ๙‹W-`๙–LJrd ?Kqำ*Nj‘u (ัŒศfงุำบคx@]~์bK จฑ9๏;}น{fLอxต€E Y› ,Vrมยโธj4“c3ๅนF้ฉใ๚}ณฟF’-9้ฝb1ิž)+ฌฐ๐๋\ษ ˆีSีX๖i•ฯสอO*$ฤ ษ ฏขัUlHฤ#L”ๅdๅ—tCซฅ0 ๛=ฒ๒คˆ ฏอ„{†˜6Uฏ`“๘๕&๘๘‚ ฿๗…ต‚ &aจม‚ฅํnํ(ui๗„NR5ภ†BjกeU„$&ฤอSผLฌึš@eh <ว ใ๎2%l–U?Sษแrมf5†ฌ^4v_Vไ"~H”า›HIฌย|V‰'R3“หe;ฎHA% ตฉ"EN““rๆึŽชDษ2=ยํลO้{&๙4*ทไ‚$”`[`บปVU แพภ˜าT๎Žแ,ํR‰ฤjupŸญŽ[๖“Hhญ)œ_-ฝท„๎žžมฎvz๕่ฬฯA,Uฟ`ฮืƒ])พ?,ุ๏3^ใผAX๏ฝ๋JฅšัR]๚ตn๐T> p ๕Saฏๅ๎ึJjใฆมcตŠ๋q๐ bช:ูฐƒvnSX่* ฌฦม‚ข]z‚‰W’:k๑ฑ˜f…้V/ใ“žสžKฤ,œฃๅ๑iตRซ€ป}ฆฤ{ิR"๔>Yฑ#ก=.vซ<ฬ๙ศูื[J'Š9ญ๛fฌs“ y=Zo4x3|ฅล™฿ม๋)il ๛ˆฮ๕็ bาอ“ง๕ึฬ}p๓\็๔ฆกk[Xืpงฤ!A\ฺl‚ชž)‚เn1ฅ=Ÿหฏธip๓ žjีไห#"+มŠฤ)หRห,\Q๕BA6YS/“ฤ•$้น ึZeล8๖ภู่Y ~†?ธpศœD9๚R (ฉZ@ปฟZ๛Mฐ-E่X0eyตU&j5m๊๊าฒq๖}W็U(•M2)ฦUZVิ้็งแว2LๅkXนนั C‡๚uG?ฑf๕>ซ-P”ฬg' ษํaIoู๒Hฌ๛•dง&u˜XJlๅd<ฤิ‚`|5EหŒ๘mน`lต้g2cAศช|ณสๆฐ+ฦcYj™ๅ`ผJZ"V*t iT*4ฦฒพิTดrมNZๆh๔ค8ฦฆๅ‚0Z”ฝฆ–•ฅ‡ิ’\ไไ4„/RU,๔2 ซโˆปๅEุฦšd–Z$€œœ\8ญ3z2X!ึล#ๆฌก\2ผ“•๔– xvB๋ฯA—๔ฒ2M๏Snpm๊๛‡๗ชœ}š๖BeŸ๎>*‘ฮu %ำIqพJRŸgฮบมž ถ™HR๗i6A0ๆ™ คtkสAJ†ขH™”Ž”Yะ I]6'=ฅิ4ฑrม๎OฦŸ๕,Iบ)O {ฤฒพชP• ็ภ>kPM5U;W$I๕๚j€ ่'T๕๛lไโ9Iๅศt๕ ๛RคœƒU•ล„ฉœg"{Aึ:งขUQศgIฯ†ษ\้}`!ๅuA•[ซฎฏ +Fศฉmฉค2 {$x-่‚๊5ั LyO(Y–ฏƒฐๅ๖*ู.Ukฅว…ะด•ซิวB…ูI/ชะRถฅ๐๖XฆZด2žcฐyมV{6ุQม์๎Xๆz7Nภ‚ ฟปฐฑšฉ_กeฎtษSn‡|ŒชโFฏe€:KUC!ŽฌœYกหๆH8xlหoFuฯDXา—๒OFR…YžO X$บส:k+ร••\)นtฝ•ีX๕่„f˜ํช*+ฒX ฦ2ฺ13f†!S ฬ8HๅWฮ=ฑ€j%ฮ`xJVAi$"sgึ1Xปr9 ฮZ๘!๏aํ์%ค๎ำ‰ฐCŠœ‰a…xศd3ยazไ,๓pฒ)ึ"Vทi/Šไ)‘MŒŒชrฃQ7‚๘Aฐมๆ›ํษB‡7สa!ๅbvณqQJฝJมึ๚jชYไ‚•RำdGฏปึes”ุ-ฦตk™xฆwคZˆ:T0T” e)ŒjbcGrึะ๚r๏`›88งKํvrf<าิY]บ•xˆAg•Gg๓Eุ˜}ิ๙Y@ =*ู€h ฏ™งBk๒>`ต๔6tศŠ1tคTXี€๓ –:€ Yฑฺงฦ\mฉ|@bำฅฅบ็#• ืR็žี์ฦœ‹ rฑืแ.VฝIวž K@ฑฦ…ึI… ขิŒTๅA9่๖>ฆUฮa‰˜2Bบfภฐ"ซขBฦ~ ฮลb.ลวช;nูชปZG†_ฏิœf๎ย๊Q†‹]7e๔M+ž ฌŒณฐุjแลjฯ,ซD5 ์59=†,‚ตb฿zฮ…\ไmŽ*•M‰ธžRฅฃรn‚'๓…Hาs‘]ำr“@ฑJ๖7ศ๐,"ั%‡$+Gฉ&.์ฆๅ๛ลฎ็† ฬ-ชฤ‹ฉ”:g”ปฬrB&ถk-รE9/KR‚‰ฬƒU'1ˆฝ'ฐkฬƒPฮ’ษศX๎Dญข๊}ษc™ป"€ี ƒ๔˜ไ๓p=ฅfH)pxกX€ๅ"9DyCd์œ9™qsึ–ต–2/gํ๗Dศb ซL—ภŽŸŠฦฃžฐตย่U-Œ๗A*‰ฎ;ขu(Mz{RVฦ๊)ฑ~˜eHๆฐkœช๕V+A์lbฐ?ป„ึ้ภฉA Œ%ฎช"$ƒx&>d,†’O๓—ฅป#)a0ีฏข;อx*รM)}”โ้.N๖เ=–๒ˆดฤ0ซฑ่BR฿'ฉšt’‘ถ,ะรม๙ฺปFycT1ดs่ถ4ฮกฮ๋p‘ซ4นžBสร+,Mๅนh„\ฌ+หฒuษฅN>sึดฌ2Cำ]J†EŠสmญtdฬœy)]M๕ะ์Eษ\Š์[XnxEr”2ืUhปˆ๑ฆj*ษซ็<\ซC~†า{ท*ญfQ™ิืษh]‘Fฒ–Ÿ#’แ่;ฑvซb\ฐ ƒ์Zง‡ดงtณ:@หKผฎฉข)ะฌ8ท”>'ึ ฃ”PDjV0๔gt"1`„ัฒfeห&7. ๙0ฏมx3oฆพชฤ‘‹มfงฌIYyบูจฉฅนภ็uV  f-'ลษ8ตž'€ล ฏ]๋ข–y\ุ˜๏ฑ†Vง้0ฏ”V‘‰bฝi“ำ$S๙ๆMคr1ฅ\ไœ{CXZY๐\ฐIcUb=†|ีJc =Lอ•1\์ถฐวขหฒช%สnBฯf&%E-Lธๆ 3ฐข…1ัํ"นiXMI7ล1%%RBtฌ™—ป~,ถ๘š‹ใตLถiๅ฿€ื#–UOฐช7ใ.gV฿งกoถTวํ r้!ฟjUํณด)๓Pอ฿bฬž ๏%ฦŽฺส7บ์8ๅษเฺAUห3"์z)ioฉ๔R 1น€๓ฝKXCwaฃ2๋ฌมk‹ืษŠญ bฦBOlะ”กZ ๎ๆYตฤ\ใณj๐ศKŠ!rแ—๘sฦ์Vmษ9.2ตWฦ๐1ฯ{ญ๒3๕ ˆณขœล&•”นถ3AH œตซตiสส0แ—Šกjื˜‹:ผœb\ฟฐ๎Fฌv1ๅ;:VEXฑ`น“๙Pำฤ UE5ฑDฎ3›ฮp$AX%Ž๚†ยb‘uรอ‹pฤ‹อg:.Eะ’{NU7ีYอaฅ RR+,'FOƒศค sRVโŸไ32vฤฃPรฺนหšzY๚5‚bฟJ=“โVฒ]+หZ๙@zฌุฒ’ล„<_–˜คตX3ฏฆs.ฦ ฒ—ฐJs-ๅึT3รb2า@}&พ[Is ๋่2W้ฆ๊๖-ีหJpย  kI[pw˜——I=Vˆฤ`ศ1ค๊ฃฉ;/ฅ เ%แB#9kศO0ถ‹2>๎ฐ@ˆใKM */ฃn–DRG&ฅv+ADr1Eœ์*อช่`ีo™ ี Iึ์๎zมZLส…”eะ๊:oฃๅัu~k†ยธ&๐ีšKฐšTGรul(ฬ:๏Zจาš]B Gtจศแu4y๋ŠญT฿ย*1ํฐ„ ญาชๅ฿ œYฬ`•๋Rฝ@6ำZีS๚5%๔ฤฟ]…„ zy,ชฉQ่ƒค&<1=ฯ๗0ตƒจV‚ป๘,‚“ฺd๒‰9น่Z2ภLH^wาYฎ>๐ใ่1ษ1T”คฎ็่›‹ป%จเ้™8ขdW1ใณธt9A]šฌฎvฝ›cฬ\๎โฅŒI#‘5›ซsŸSึRN๊ณิUl่ O•rฃAห‘‹‚Žญโ๕7^y l์cณส›ฅบk๗”>ฃืH˜ึ—ฒ<ร”ถyณT"ะUPึ0@ฯฐ๚ข,๒ณฯฮศgrฮyทีตต]F่ฏ)ปงท0~‰N&บฝƒืจ^Y๓Fฌตi๊bcงร|@Aศ๙ต๔&คB),•ค๋์ 2OO—ฃh#CT๚5‰\BWใYฝ+ฒ‘`O&Y]ๆKยะืŒแcณš–4ม&GึๆK๖,้็ษอมF6กKขe˜ ]ๆต๊“UM‡๛w,q}<ุ.โฑ LR‡Iaญyด_ฑiชz้น'X”E๔เขฒ‚„1\TA`!LU"Y,z=ฐO%6 จธaCฅ77—r๐Fค +ฎv ฮ ‚Inผ๏Xi“ฺbกxฏ;๕“ห๚Lฅ'&ต‰ไภฃF"5ZTJ$L7ูAlๅฒt้จ์ฐวยNฃb,MŽ•`ร]j˜CaSาDX„ pW#h๕=f5๊ŠญTW๒ข๊โต–ŠแkY‘”bยA*\ว ฝฤ9ท๒+์‘‰Y๊ฮ2$วR_&X๓ร›I…๒๊'ƒ=์เ๘cL๚รช…๐แจ†Gw˜๊-U๚y›šŒไ`น2ุŒปสbœˆแ“lR‹uัฅ;0ฌG˜‚ RัR'K๙žbbโ;+ครYสบBJ/ถฐCฏ[Vr_’—”ชฐบˆJภฅ„”S+ŠอœYy* ์TสH‰7ุแฒ[ป”คŒ๔ด-ูn/ศษR@ฮšฝฌล(uธล"T€ๅฉhฝ*^รฒ๔wิko$*“อCN๕tบฏU ็ZdMีCEซํ–ฉาVœYtะl‚˜ขพGำฃม~ิIโฮธค 9Z‰ฮฑ๖Xบอุ9Ÿ~G๕ษL้ t‡โ๐”ฌYณYโfฅภnTYแดslฒHPŠญุษาUMฒoฑฺ2มสB,ฉNu™_9๋Io:œ‚๘44ŽR๓žตคKๅ‚A๙‚ัu@ห‚ ๔VณBส^<5็ึรกdœ—…r*อd‡ผ™hฝDึkฒ\ำ ๎ญzฐkง(_*ฮŸRF}ฮ๐ญkน6YfNUgMถ๐คw#;สณrA•' X’่าฃaตฅG…2๏ ฑอ"ˆฑ:ฐQTa}ฝS‚! yก่ธฃvน๛บโžYU(VฬR๎ุžSuืyŠ4 t<˜7 œ‡ฌrF6qงWŸsจ#ƒฉZ2ฦอค/+GXีฤาAw—)‰)5T… cววJฆ”– Xม…cำ็‚ƒ๑‰R฿zยา๘bR?‹ dijชั-ฅ3ฤ๓…ฦ ‰คŽณ”ห๊ดภ…Œ› ๚ฒ†๊H/]†P,I‡qRsร้Iiซ*๊8%ก‘๊ธ–ยฉ๐ญEzๆ5€ Zชว๙†อใ@&]U๑ศZbป`[;W|ฝN+sล‰–a]†ศา9๊ฒฯ*s๗•+‘ˆ‹•7Zj*Y–~~9ุVํšไุT]s- B&{YึศะnH์จ๖Usy๛ˆRLหว฿อา7bฅMชฅศฌvz6AฌVH‹ซL˜๊ชซFย’ฅ`7{Vฆ”ฑ:ใณ‡”นฐฦาฆvไเสฑ9‹Rr~o,tฅฆฦ=–ซj.+็2ศ!#ตLฃ“ก"+YŠค›ผ9$P„RZฦŒ™l–็ฦR)e็'`U"ฅšตV/ค]ต ๛้แMธนO…ˆห Ie์eณโ€(ฑฒrๅตฝา“ต}•dŒ;[qY^ๆรO.^Ctฐฉ˜J€E‹1ฎ;K6\j$IHqลโ็ušแ+DOSใ’ฅฺ๋;ี!<9c…Šฐaร$CฃV5–UMฦ๛^Vฐ้ผวšแยีeํฌ€โฤF„tซ}ฌAผ#Ž ลdธ หฅฬFกฦ*&๎ั๕Kญ†”ไ๔8$ฐ ฒช5*ีจร‹ ^ช>=ฅ_.Xึ t+Bฦค.x,ึ๐9 a#BUcฤ\ใฏ‹ฝฌณDษtGj*#uŽ;โฌ™ล€ีG๗ƒJ&ถS๒€œa@`7XJŸJภฤ,cฝมัน3vฟPใ|rม;ศม๒ป %&— ‡สBฺ+b_AชKIไa"aŒอV=ึ pV%=@I^Vศcๆ7หrๅเฃ#Z$ฃ -@,Zน้!ฆB้คf9vYุกI )ilนนีภšW{pˆE๓+—ฺ;๙T?@นภ{ฃ[ชw$I]ตขkฦแMฐ4‘‰8TฮXu๙lฤ,!5œฯ.บ/™Tง‚'“ชฒ๗‰าิฯ•˜ฑaฉ๏2'ย,vdz‡ฺ(X็ŠูฒฦัJbนษศซฒ:ำFโณาๅๅ0า– ฑฝโœ†ฟฎฝ!าก'ซฺ‡eืœฮ˜ EฒาJ–5[I^ƒr““’-ั3ฟ-9rซx‰1#B70JP์ž๓FlF”#Y LŒ ย๕ำƒaไฐ’s +…Uฝม‰eุ5แๆฐJ6SาภๅBN้โลฯ –ๅ้าธ”€@ฮM…หJ Pั=ลxpฌ<มNษwฏ$ฦFBฯพๆ๕pHTศM ฃ—}ึภ9J ฒTาZDๆJRsุubะ๊ซ)ฯˆa6V-P!'‚ษl&xซัa>™*สƒ„๛ jฦง๓้้v:gb‹๕พฒช‘บๅIย๋ใš‘แํฌฯน#<ˆ€u‚อG๒ ˆ; M–ฺ@‰ซˆ๔N 5„ฌฆžj€›]ชฤฒg`๐dปั‰‰U$Œซ…œั0:!#@A8†ฑดLv5๕เโ•ฃ,แ™่าM6.มำฐ&ฉGD&O‡•ธร็bƒc๕/gH6pทจ+ดpณโๆCโžก„›ว?”ES7d1คย†ฟ›ว2eลOชrห๒ิt’ดT๓[=!U‘๕ต– ฑX;mkžณnŒณvํ;ข‘c$ดBฐงแ๏ช๚1ดBํšไฝ:ฏ“๒fด๗k๓€Dฮ‘Gไ‘ก๏Zฦท A์lŒ๑๓–ๅ#ฌ˜#ช(Q/ตO]๊HฅฦŒ๊"KุซศZv๊]>™—#@XX”ฌ๙ฒƒYŠล•s‘ณฤ๙$vY ชuŒJI…œ่:ฅ.Žญึ–J C^Yr2[๊Z$n'$j๔ณด}˜LMM0k ำ๊น„Uคeผzึฒ|W7_ฆคิ-oห ;โ๛Ÿ*/Xk@ฅผUุgอˆฐ๒–%SeFญ+ํ@W;!A‹ƒM 64XŸfฤ5ขŠGK1c—ร๒ฮzฉ}bษa ‰# อ…q˜1›กึRLY๙ย&7}ำ๊๑‰ฉ ez็DWX‚’"ˆ[ AึEฮบ}ฦัIหแHทซูะ)๙ๆR‰PสpžAชzจะ#kIp<)/U&n-ูŸลฮใY๑7R ‘€Wk•๏Z„fล™oืซ–๗ถ<ภางBจN*ทrทฎซป‘ ๘"5Iๆฅ4ศํขs=+ดŒb…ฏe”zใ1ฎ=2_˜๊ƒ้‚ˆ’‹‚ฝฏ`ฯกุ0~ฝฐ™ใP๕๎ปฆล‹ศ็b๗t)mJ brฬฒZ+ฬ€ใจe6ณ ดvdr็รืŒ๙ู*งิ้/ฮ|V๒ษฒ‡ฤš’gถv} ~zWVjFฏ5lF๎เ๐ปศAิาX tEŽlดJMŠcๅฯqj†‡EฤšL›ั จsI)ฏEš’y/ฉ.lอfะไt3!็จศˆลY๓ทต„ 3๔%’šF‡*dtโ %b†ถ5๑]ซ&6vAlD™ฯlใF{์ถ„อWb_ธˆ๙˜ึeฉ(™=: 5ฐ,)rŠE†u๑ฺL:#่จ$ ญ0ฉ”ฑxโ6ๆ”2(:œ+Ql ฅ๒ฅไfฐฑแ%V*D๖ฐ~ฬ1ผC์XX๚„8ล9ˆ‡‚}บะคAธฌ›“ี๕s“ษ)ฝ+I)\Zี/•€แ"\จ—พบH)Zฒ์T฿˜2–|คQฮ—u‘ณB…ˆ”e…ธdRQ'ถ[ ZcO :ƒ‘8็ฦ!+.b"Yช็๒K`OชhoT†:Šฆ๊ข‰‰อŒ•ฟเ=-ๅ[63ฆฬ้ผป›ต†^่oL”ซ๏ญDe$ม‚l4”9ฮหN)?ทตQศ๑D9 rsฝ e&tœ_–3ึswศ๒z@ปv๏ฉัdI”`่ว๊๊])๊พๅ,ƒr.rฝˆยำAฉŸNPฉy”IUaตื)ฅQ"฿b๕HศฯŽษัิฐ„+ฌIsy…žี‘สฏtk‡ I'๛S‰{Kแ–๒ฌ’K…)‘/’ฅๅ–l ฅุJั๋p5๎_ฉ82ฯ*เ} gุงz>œ Z@Sเย‘๓ekผฦไ)sม๊)]2'f)ํขr฿‡ี่›‹`‰!CV์ืz~J>šš:ึย•u‘_ฌฅ g[HฐฒE'.[ ฝ`1^pbฮChศฎu†ีฌนสฅœผม๋iอs^ฎP่ฆ็ึ.๏๗EณสกLน›G๏ BJ,`ˆ๙ฒั3ืสIO{|ยฃฦ่ Qํ40‘Dึำ่๖‹;ฅToNS%ฝN- ˆFqH.ŒC4่่D๗ฏฦแWe7ี”$ษpcฅt่q#lw๕b›n…™,ฐCJฦcLึ#*ํตโวแz'—‡]๓๔X)Kแ9nk๔บXCiฐณีxฅ†0ๅ Vำแ เผHฯZ็ไXถฌ%=Rปy,ฬุŒแšOอ ื)๚ปช3ฅช /๚0RBฮษ*ม&C฿7D)‰{'ˆ6'9j9ล‹$`ล%๋1E ;\ฬY*’ล๙ภŒฮnIฉ]+bฎ่๗8ะ:|0!๗aล—KมZ,$๙ถNˆถ štๅhIj้ๆมูFุลšฅ€0\฿Ln›๛ภ"K' @I๕๑b’ฎa9มš=-7 บZŽ๙xฉo๒จuFjv rLrฃ๔…ฤfฐ๔!'ˆ#,\ิ!ขH+คRqdฤ8อจ™.”ศา€gร‰Y)ีPห๓ฐ@}ผ–&‘ิฤ.ภ’ๆศยึ‰ษd ฺzLช'ุ<ฦส1›ล(ษญๅบญฐ‹.™ฤbr9 สผBฯZ๎๒จ—˜EX\q๐Zƒ {๔Œ‚ บQž;๎ฟนh9‘‰ฐจ )ูul๒ไฝ-f+A๊ัaqถ˜Q kญ๙ฮภม†Ltฅ@ฝ?โถžH%3ปคววญฏถห˜Cƒ๔ w€นซววนk=Vƒo๚rraข““สไ{e๕‹.ๅน’R์z๔%C}—$๚A๒.๊r็N!Iฯ€ะ3ยB โœjQ๘R๕ฅ ๋nษ๘@,5Fล“ฤ฿T?CJR\‡ป.W]k„‰ล1[๚OีซD„l๒ั]—ฉnุT่ฉ œงฌู‡ˆŠŒ3ิeหมัั๕ถฆจษฒ[J{>เ้f8& S2!ญBweUฬ'ศๆถ”†ฅฝ‘;VฤPเNพฬ;ฌilจซTA*DNฦ“cnญฟ%k]D0๒\h„ๅŽ\O%dฅ?ซzjะค๚Œ๔ฌิภคR ๐ข๖–œ :„ xก\Qื‚๋า:bฟ‹ึฌ€จlL‚a๕|ะƒแE<ฐฦ–~Vๅ`ฑ?Iํ”ฌzwขา™ป–Lบbแฅๅ)`-drRก๎d'ฌF1YฑฤfKK~#ฯXiH`ง*tค^L{ิฉ&SyฏiPB=ีCก )nN4ภ๕วšฺN—$ช๐Šยjบป2๙ๅเล:๕๗8Aไาํฅf>aK€;Fึ‚ลTฑTต rฮปMIr” Nพ!กzสr๑u…†&ฉr`ษ#ฯRฑ๕FทฮฬศฆIฦ฿GซผŒตซ–“ุ้ภU…ฯfƒปxยื“ไIำหzUชฃž“-ี`n\,tž ืชUฅg›€๐ฌื<;๖ 1ผฌ๓Hๅb…1ๆิ ขƒBVV่ฆฐิะ $ดŽซ"ก%มส!˜^ฐๅขฮค้‰Xjน`น*บQๅ w`qฦจP_.….%ฮษkฏZ ตQar็(K€SษGk~‡Te%\ž๚=สล6jุNj–‚๔’`V‰จี3 c#scึ i+ š*_ีs8Rำู฿รM—ž S ๔XV'ˆ"ูผฃkญyƒู่ฤV˜ฆRPJfuk3OมฒKKLญšP.fD~)3Š*———_ฑฅwbุa้iž ร…ฒยK็'ซฉ๐{BVaชzH/ดZf;ีa๋Qช๚ฒ„Z‹4T๔๕ŒฐŽxn๐เR>๔r90‹ีI/ฉ‘ภุษแWฉื|Hlะ@๖;(uืJ งุ9AtAH}Nqcฌœอ8บ &ีa] จ้ณหt“2ฤฅญR@ฒ9๕zห•BฉD)3 =๒” ญ–eฮ ๐ุy+#S^••ผฦ๏|!ๆi@[4Sสป^@8Sn|RRูzำbั3โญb„uถ๊7$๙9 —‘"%ๆ‹8ผศ}J/E'ZHP‚iศฝYว[‰ŠaWNHR‚“ผ€S ชึ@“jโ฿ุY๋ฤ ฤ@Q๛}nณoY&k‰้ฅไ"ธซ–๑ๅRะ2ู์ะีต๔y*—๖Œ๓1d%Kj๘‘ตhข€น"์ฆk ?ถ :ทVชดšแY์ะ๕`…l๔ Vูi๕`y๒๚E%Y*ทภะ๓ˆYใ™Lวฦh›ๆ๏ฅ„œ :ˆ จ&‰DvR^azbHO-๎จvY#:น!•๕+ —ฅ<…ญ๚ 1ปRSเฒvกฒ\ฌGชๆRRCv?S๊zY–เ bซu7ู* ู|„ุ ”šu ็\๋ส5ซ*H฿_dลผfsh๐ตK…Ždณ+>JTlถBbๅb฿:NNmLฒ›ษU9&2‡ฆ1Gบ๘๛g%:nYE๒˜A^ีพฯTฎ!๕ž*ํกŽธหคbjภส„ู๚จ]คuNฌŒ‹HฅOb];@Kœ๋๎ๅิๆ ฆป็๕่OijT/Gด2บุ๓็kก7(ตab;K7ฬ Umž กฑJยฐNm)R&…๛๊ยศ‘‹\ึjwจž@LJ€%8ถฎ๚๎๊็`ณพ<ีภล:๔R!ฃJํQำ(ฏ1๙‚ภถPbห:ื4ะะ‚โ.+ฬRกบv’ฤฒb‹๕ข—์–žยtล’ีฉ7`่•นRโไๅฌHŽSJไHp$็sH๏ฮซุR2๕ๅ@ๆฑœ :Œ ค‚ึqัำรด[ฺhp7„ฤ(Jawญaฬฉฌšบฮ่๚LAฅ๙–ฯฉ`n-ฤึhPั• K~CL^‡ืบล฿ฤzช”ฮ‰2๚zihก;-งAY kŠ 5‘/๋Fศ•5Yฏ…pŽnˆ#“ภ†„‹}ฉ64ปI•y—ญEๅัAA๑5,ยบžษ[ิ-5:ณ^๓Že"ดศฺu2ห %ฅ†&ฅ€ผŠฤ"ป“๓๖-0v-;brัขŒ–ถ…)ฌวถ dย(ี{รบ•ฬื๙ @{่$#ไ26O”ย๒3BัDึ\xูฌ‰’๋]^ €๛y#ญฏU Nฌรq'ˆœ‚ป>่ษNZโใJ”’ุ้4=รอS๋˜S9๋’€@2๚๛†ไGฅแ4ิ>0Ck* —ฦ\‰ิิ9ฺ˜฿อ~)ภa;,™ฎUธญUะฺcฉRRน[็5ฅGฎZ‚–LD[เ์hl.ฒŠ1๐7ฮมขœ๚[๐bYฎ\Jl Ž|บ฿ฉ\ ($ใัd#๋๔‘าsˆตฎชฑ ม}วข[๋Nœ7ยCšฺุ๑rhQ9€ซ-C XpชAk8ฺ%ฮz”y2ษn๕ฃPฎš}+zภNปปmนะฅJIญkJ็้Š –ส๛สฺไ@I˜บNY9yฒ#>%ธI`๓ย๗ฐธส9๗ต8Aไ2m .GXGJ9ณ๋ษดfuใHเ)บuk๙[RZ๏ค‹ต*ีงaAŽrฤยฒeข|6/`-C+ีฃภฮ๘ZไQเA"wV นถ<ฤœ`‚=์ใ๑;ƒํ.พฟ;ุฮ‰ฟu ฌw๏mwc ฤ€Z์–ดL‚Žaสฒุfจ}YG๕kฅ›ฬ๗ อrไH`6’ิ๊…bn&ฅ\ŠD6I[ฤึ‘ˆฯZช•do5tY+šณzx‡–‡U๚‰0้กaาrโ‚จึ Kอo!Ÿ#\ˆfฟ „‹ศ@-ภYหตžG‚๘@ฝม&C=>ุ ˆ:ัƒภM€?KE_่3ค็778.„.,้‚z„งgฒ:็๖ +yฃืC’คั`'m–0’žรYษrทผCฯXศjF+ห๛ฐ*ๅ$†OW๒^"ค6Lrฺ๒a˜!฿h`#™jlK‚PdpZฐŸ๕ิu๐-ษ‡ec•ีKะPฎกšี8ค( ๗ช™ฟๅ'†UะaŠ๚sนXB4๏เŸฌ๐๒?๐4R๓1Vˆzป‚RูKb?@ฉั,่Š( Tำd9็ž.,•๐fN›zT•ชช-•อAll#๑๕ุ`๛ช็|I%ฉว:0I  สฎจUาฉe…1IŒา อŒ #?๒ณ‡eมšมฌ็—,ž’ R;๒<แGqsB8 \Yาฮhดยข]ัญ›‘Gชถนxไ่*g‘B9yดRมู์ฐิJr`ƒ“๒๘๒†ผฤๆ1ฌ{;`)ฒ– v(หFK”ขฎ0ไŠcอBฦ(—ฺ`NN5]t)%Ntlฒ‚ตฺอา’Sปไ0›F@Zจั๎๑rq@ิ#ชค๛บี ขkฉU>งอ<ƒ9'zวฅ๚ฒภe&๎H๐ืšฟA)=œ, นUX๛fh69A8A”/ๆ †'\UฮUฆดyอ)gคนฅน_o คQฦˆKอ$ฮ"ˆfušื จบ*%ทะ๎}ฅะ]ต6ซk“TKx†งิ๒ชt|m-  ณžp่แQ1ไผๆ†g๗`๛eฏUvฉrjื ๏สQํ!kฦ1[ฏ‹ูๅ‚ULƒ›ิHุL`pP;๔uT ๖๘P๒่ช2ฏCi|xจ,ี–`h žฤ‰mP–์‘cp&์9c&ยJ,wDาฒY€"j3FYb๒›T*ฦTาJ}›ว?]pดฮฤ่Tล฿ฏRkŒ(ฎ`h๓อกLะ,โ€–Bณ„DลเลŒlŽCr’%‘ตŠ{UสH฿`'vS™ณœyจO/ฮvnาผ G๐‹Yc.w-๚_(โเ=ลŠ6จž~Gsดน`Guึœ œ ๊BึT(ส^#ฤCฝšj็ืV$ฺ*ีDชกด(ตy L ซ>†์q%a)G>€ฯ Ÿ?B‹สRซ ฯP่QV฿AฆปYใg)“ม@ŒRu‚p‚จ AX`šs0šT*^6”ฏู`'jุ ด–แ"Žntt>8๘Eย3ี–ฅส qlš„”IณzH8‚T p‚่!!ถฌ $ุฐฃฆ&ฯ๒•อำg\ฟัCŠุ€ๆ((a–šฦๅ่<ศ<†๒T;H‡๙ลฯแ]„ƒ|ฟn40ฌ !ฒญ๚5ฯkq‚่`‚ภxMึฅภn๊jทตโฮI]ฅตอnฌผ‡,ฉE๕’|ฉ‘Žฮ„L๊"ผ˜สS ฒทไผaำบซ๑ฒ$2๊ J•Kศ ย ขกa่สc7‚]Iณ1Ys’โh„คon$๛=ุภ›F Dtงะ€k๒$ xเอš[9gผtมœ ฺ‘”3ศฝ2"Ahu[GgกExศP/๎S‡๘=ชเP ‡™2h ฿#o›!QใแQผQpรtU๖Œ์๘…ณะ8ๅ่y@๎ใF๋ฟวf ›*๖5Z.†ภDปfIิ8A8At๋ะC฿E6“9”‡rqฝฐFa„ซ พ๔•ๅktU7T}mแH'ˆ6ว01P}ฏ*ว0:ํL‘ซื๎6ผn@\คJฮbNN ช–ะtƒ MGงBGชU *ศ(•]๓ZอZฌ/นk๕D;„K œ ŽC.[ผเชฒๅpด 8ฃถๅ]X6K59าถ‘`Ckป( ;At0vมB๑ิแ่DPถฒj*ะ่‰™"อq›˜!ŽQชNXภlZฐวƒdใผAX๏ฝ๎r8ฺฦจี{๖ฟธk&๔ิ็–6Eฒž@ี!ๆซS™ุ ข:rุ0ุฃม6{;_๏lฆ{‡ฃ ู}ฤีjrดAผ%ุ๐`?)๓๙sƒm์แp8ส’บ|4Neฤ?)y'ˆ€^มป(ใ9๏ว๓โืŸŒaจ^N‡ฃ\ w่ะ+ฦรU ˆ๛gxG;ฤ๎ม๐ลdQฦŠ0าฑฐ๘œb ,r๛ด‡˜G% L>ปณฝฐMBLo”s8 F๎wั}Eฯก}NN‡ฃCp Gฒqํ‘นKค8A8Gกp๒M>sฮลf5ฤฤง_๐“โแp8]๚e;Ÿ9ฒ(w‚@?„ร ยแp8บG๔Bƒ M&‡„รแp~;dZaห~CŠs @˜w๎p‚p8ŽโศRNฆรห^[๎'ล ยแp8 …หFwอr?oุดโ+Vฎ๒“โแp8…ย•๗?Y$ฬฅ๒ิ!~Bœ œ Gฎ{ฐ+๗€™Ÿ8mธŸ''‡รัŽฦวv๕IŒNN‡ƒภฤ^็.์}มh?!NN‡ฃ ”๙†}้’๛„8A8A8Ž.<ณไ•n‚8์ช‡„8A8A8Ž.ผถ|e7Aไฆ‰~Bœ œ วj์v๖]E‚@Wตร ย?m‡รั3๏|ผHง฿๑ธŸ ''‡รฑ‹_~ฝpศc Sžu%W''‡รแp‚p‚p8''‡รแ่)ฐoฐ้มf;ลxผWฐKโใ“ƒํ่แp8N๋›l๓`๋›lk๕œƒ Dฑkฐ‡ ‡ฃ๓ bท`รล๗}a๊9 ๖M๑=ผMœ ‡ฃณ โ๋มฎ฿์๗๊9w]|wฐ ‡ฃณ โƒ .UฯlฤN‰ฟw ฌw๏i;G„‡˜‡ร ย$ˆuƒ=l3‘ค๎ฃž๓%•คWๆ฿^Hoข ›[ร๏6ฺุ๙ฑ๙นkิ๋.,ไฌฬUJ3b5Sฟ๘ณca…ีeฎ—ลวง”“จร1=Rศi‡›Ÿ;?6?wญxŽj”๓ ฮอฯฎNN~ม๙ฑ๙น๓ฯีอ ขi'๗?ถฮ:6?wน๖คckฤ๋:9ธนนนน9Aธนนนน9Aธนนนน9AtTค—Ÿ‡Ž๛L฿โ็ม๏ 'ˆ_h๋ไ๑‚‹='๛PŽฯ–ม–ำc๛Dฐ ๓ธฐ;-ุฯ๓บะ๙=ั^๗C+>ฏž@ ฿ ๖XฐŸไํf8<ุ=ม{{oิcณโมฎ๖๎ทใฌ[ฃvืz9:ถ๏ฤฯŠ“ž่Œ{ขU๗C+?ฏN'‡Aฆ#ุม‰๑็oฮมฑ}&ุV—xn \ม๎๖ฉ๘…ม*วถ_ฐ๛pใ๗O๛Zซฯ]œurTฐ{ƒ}2lˆ–’๑{ข๎‰Vญผ:‘62N0>ณ๑กๆ์ุ†šm๚”`_ieธD_ถำƒ&ุหฯ.เ}มฦํ‚fŸ;ql๏ตvผqม๛Nซv้/ ฒแs=\=ึ๊{B‘yบ'ฤฑ์b^คแ๗CึฐN"‡ฯลฐTb๗6 Lนˆlƒต๓kมฑํ;ž.5ภ่FoฮVม>]iฤ๘o@จ$ฦ‡7mาqฝ5สม?s ๛ฯy‡ุแ!ศ<›H((ุ-ธ์/มnถ}ิฒ๚`ซ๏‰Œc๛ฐธ'vjล=a!๑ุว$7์~ศใึI๑ฟม.VL‹bํญ๑Mƒ]‡c?๋%ฮ๑m•“cฃฑgฐ็sSฺ(žŸ๊ต๊žH[ซ๏‰2ญ๎๗Cืฐv$‚wห]œz์Aธซโ{ฤ฿9ฺ๚y:ถ่RvI#฿ฯbŸWดโฉ๘‚m'vT่ƒธ ;ฏFปjŽM6สลฐภบJข—y| ฑ!ฦฯฏjี=‘ul1œ3ข‘๗D็ํ๖j๏/d<ึา5ฌm "`฿ŸC}๔ๅ๊ฟUœฬ๛๋G–฿ˆ๎cmฝ่ึพ;ง็nร๘๚-8ถ7ซ8๊โ/E ฮ22ใิ9;6๔Aิโ{B฿๎Œ]วPฦ>บ",Gวถ๓9>oT๙ฺธFๆศผEึฐถ$ˆธCDm๙1มŠ*ฝcbh?cวั+V•\]ร• ฬ๓ฑuเน{ง๘I฿bฅว=ุ้j9พ˜W๚[ฌz€•09<6Tฅ}ถSŽM{1นฐัั$…VฎmK๒ฤl =~Xaะ[ฐ|ฟ8๚ณฑ. 7ื6*F—็c๋ภs7/6ฦ๕Š๕เ่N>ตง[ ว๗%ฑ0=์$?ถๆ›z]ƒ}?ๆ.ถ"ๆšถNด-AœKกวฒ‰๘๙1A3.บ]ˆ็ฟ+ีผิSŽญำฯ] ลmะำŽญNว๗e้ํ๘ฑ5๖ุฤ๋"๔๘ณ๕b‰,อƒ์ไญ‚ฐ™๋DDฬไuภืฤฦ”ํ„ ึ[ิ/รญคz๕ฤc๋๐sทnŽฯบ9ฟ'โวึcKผ.๛ž~fฐƒM“น–fฌํN่L^Z?ลIN<๗Oม>฿,ญ–<›Ÿ;\ๅใุฏ{e%ณ๗วN๑๋"ผ-oข‰-'#yรบl$‘๎R:$ะณ9@=—1i๔žtl~๎s๕s—cซ๐u!หฑ[ฌพ;W<6Tz.N%\8!™p cC|๎กไ๙@,Qคง›Ÿ;\ๅใุ*|ใb"Z๗ฝ-ฏไะ‚ุ5Vœoฌ#cนฑ"ไศXEFนˆmฺ(uฬ<›Ÿ;\ๅใุjx_‹<ร›๓L -!|1y๓อ˜?Ÿ*Žโ9๋#๋csŽ_Cฉณ_O=6?wน๚นหวฑๅzjw‚@sศu์DŒ2ษw‰!gDษ‰ข›vfld๙CรU s|l~๎s๕s—cห๛๕ิV…สคzไv๑d}$~๋˜ผ๙M<ูƒด:a%r{l~๎s๕s—cห๛๕ิ–lI”๊}ทจ7>/j3Nh๚ฟจฤ๙ๆRยY~l~๎s๕s—cห๛๕ิ๎๑ึุIธtฑ~ „ศ๐๏ŒRฏ&จน=6?wน๚นหวฑๅzj;‚ภ8ร่ŽฝSHฌง”Nํ๗๋w๑ๆ๖ุ๙็๊็.ว–๗๋ฉํ"–sm…ง๎Ž'๑5ธ}หศภฟTฟปSŒๅˆฉHy>6?wน๚นหวฑๅzj[‚ตฟ˜]|=๋€ฃ๙฿ md๑?B๙8ใuฯคน=6?wน๚นหวฑๅzjK‚ˆ'๐์8g๎ุW Kซyž>qQW:๙๓!น  -ทวๆ็ฮ?W?w๙8ถผ_OmK๑dNŠร3พ3๙๛Fฝ๔OŠ็ก}ด๘`ฏฤฆ‘๗6ฐ .—วๆ็ฮ?W?w๙8ถผ_OํNfq˜๘๑D~7ุฃฬเC๔*โุLg๚Fr|l~๎s๕s—cห๛๕ิ๎ฑ~,b์๎ม~ฟ†|ํ‰ข์๋ฦ&wi็๖ุ๙็๊็.ว–๗๋ฉำช˜์'๑๋"ฃqdfหGๅๅ๘ุ๙็๊ว–cห๛๕ิ–A5ยจeฮ–sd๗฿l๗`lแ…–c๓s็Ÿซ[>Ž-๏ืSปDฏ่ฆa"าม‘q1`๛ํ9ุมๅ๖ุ๙็๊ว–cห๛๕ิ }ะD#8*Wo*ววๆ็ฮ?W?ถ|[ฏงv'ˆ๋ ฮแšc๓s็Ÿซ[>Ž-๏ืSวŠ๕นนนนน9Aธนนนน9Aธนนนน9Aธนนนน9A๘Ipsssss‚pห๙ฆ7-ใ9WRM ›Uหฦฯะ uœ๘˜5ฤ๗šปขร7ิc‹?,s6 ๖-ฟ^ ๒N(/ืใwโข7ตลฝ๗&;%ุo*lkไœ่ฝะXๆืŸ„›[KธaZื-มžˆSพzลว๎‰bj็[wื7จ฿฿0N๚šl ็gฤ_ƒฝึ๙’0ขา'ิ฿lNœU“ธฃH ด฿"ุ0จ‚ป;ใuึไ๘ป˜m8g`Y|-ฤ๓๗๓žฃt|ํฉั~,nZิยq}ZBมฦวื๚๘›‡วŸA๖๚บ๘3ฬDx8.<™๗ลŸ๏‰ฬF๑ธyฌ'๛u๋แๆึ ‚X›™ ™๓ 4r$AX‹ฝ๘u)›€๑‘q๎Uฎaฤฌธ8w<ฎcใcฟ‹4iห๘๕ง‚2^ำส~ฟูRป๒€ำ‚,~ฝS$ผ " >Eๆ6]มปฦ็ํวg๖Š็R{๋l:Gj r{—8?Gฟ)~F๎บ๎A8Aธนๅ FŠŸcภหw* ˆท๛})OŒม๛k ˆ?‹วžฆ˜[ภ๗‚]ฯืฤn6อx์ย7฿?์ฤIมN์G๑x็ˆŸ_lฎ8Qณv–๑ษ2L„ทŽฏ๑!19Aธนๅ… ๎?วb ‹๚M Š๘=หMk ˆ฿‹วๆŠ๘wใฑฝc*หx ‚x{๑ใ ‚ว? ุŒฟ…็ži็๔Sไ๑‹`ฯฦ„น„„›[[ฤ $๕๛ุi_ฟ;XกA`Sี+จ0šR$Šท3^็’`ฟ๏๑ฑ CL;Fฏhfš*BL’ ๖‰; ใ๗Œน„˜fเช<›โืื T>น“ฏฦ0ืฝ~ฝ:Aธนๅ ฮษYค8ๆ-‰eฑำฒ"|P\pฯฏ’ 6‹Ij$ฌ"I}›LRWB%’ิSี๏CFSโนุ"ˆ๘ป8ฮฟฤŸ์ษ˜\?_ฤฅโน7ฦrทฤ|ห$OR;Aธนนนน9Aธนนนนน9Aธนนนน9Aธนนนน9Aธนนนน9Aธนนนน9Aธนนนน9Aธนนนน9Aธนนนนๅึ?^oฤฃ^l$ฤIENDฎB`‚xarray-2025.12.0/doc/_static/thumbnails/ROMS_ocean_model.png000066400000000000000000001577661511464676000235450ustar00rootroot00000000000000‰PNG  IHDR*. I989tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/ศ—ทœ pHYs  šœ฿cIDATxฺ์xTืตถcบz๏!Bˆ^I๔{ว˜bŠม`lŒ›n;ฦ5Nปใk\ใฤ‰ฑวษM๎Ÿว๗ฆฌ|{ฮ–6gF32Mbอ๓|ฯฬœ9sfFšู๛=฿*๛{D๔=•JฅRฉTชSQ๚GPฉT*•Jฅ ขRฉT*•Jฅ ขRฉT*•JAEฅRฉT*•JAEฅRฉT*•‚ŠJฅRฉT*•‚ŠJฅRฉT*•‚ŠJฅjTƒ‘39jง •Jฅ ๒ำŽ~์่oŽพv๔ธฃŠF๒ๅ่๎{‡ž ฒ๏ใb?่$”c9—อึsฑ฿%ปิz_Ž๖xsษk‘ต}ฃ฿9๚G‡ต%:บ฿ั฿}—xฌฤ฿ษีำุฦg~ฮัลั๛ŽฦZ๏kฅฃฯว์๕ฝp฿Ž๑ฒตฝซฃท}ใ^wแh‡ฃฏฯผฃN!~ๆ|๗o(?ื๙฿แ;t^๋d‚Šsi๏่A๗๓ศQ‡Pฟwว ๘ฌ็}เu/๒ุ>ึ}ํŽใ๛ ใฑJAEๅ5ˆฌu๔_Ž&8ŠrิาัhG{ั๑[œPฺภ็brผ !วr.:z6ภc˜„?s4วฺžเ่็Ž>” โ\jตปื.๑๘๗ภQ4@มT:นลป“)^ณนฃUŽ>ใ3—๑ศน๔q๔WGโ>&๏๎๑ฯt'ฝๆึ1op๔ข็าส๔1ถv฿๎ทrŸโ่ทŽฺบ๏๛G๏„๘™Zฃ๏ะq{ญ“ *ฝ-tA ฟ๋ํ๘…๚ฝณŽ๔Yฯ๛˜๎ย๎ึ๖{ํwo•ใ=ŽจT *ง.คฤนgˆ“๋์~์žQcP;Pฯ1y`ว๙%&ฐST๗๙oGแหด?…;เ๑J๗๏emฟึัrwB rงฃ‹ล!8ำtoGน.H{๑๘m^Š{fzฮxCฬ๋oqํŸ๊่M๑x”๛ฟอสฝๆhพ*ีฎ[r†ุ†๏Cญ{{ฃฃŠว0Y~สgฎ‰tŸ๛zฟw๎๖Nฏ"จ,s๔‰๋x]eM;๚ศฤŸ9๊~+‰๎๛Iช๏{็๑ ฯz^7ย…ฟดใปึลัGtc'า=โ{๒ฃuฟ฿็บฟ={ืฑ\ฅ าดAฅึ Qดฒ&กู๎mœq๖ Tnu'}r9ˆf„ *ฟwฯ๔Ÿฤภโ๓.04ไXp5:ภใฐะพf ‚หTq?™'GfฒŽต+นทนแจ-ก~fw๛#๎ค;Oเ=บc]‹ฟ๋z ๔.OV๎ถw\วež*8๓~ใuึนท๓็ถwฯ๖๗8zภ},่g฿1Lœฟqt3‡เว:zศ˜c๐<86&฿้ตB•G\็+ืn1ฌMvห…฿v๘ป8ฮA~/W‡๘^ฦ!ดส๗ฮในAŸ!ผ6\ทลฅŽ๗rT฿ใ{Žrxqf™๎๏hช๋๘e่xฎRPiบ 23ะ”ุ–ถPg1ฐท=AŸกฟ{ฦ้h“๓Žแyฟฤไฺc9—›lEบ๎ำ ฑญน )ๅ""AๅSyf๊Nไ-ุ#๗,๙๎›‘ก~f๋5‡cBฒœฃอ๎ู. ่˜Tญษ๋๗ถ *็;บหz;2ก„หฯ๙/74Pเ>๔3ปภำ=รNsC?๏“Wกๅ๚|เs7๘ตย• q‡pD๎ฦ๊๔;ษvกhz(฿;็†๐๚Rcเxล๚ฎ•๕๗=;ฏJฅRP9•"7†I๊-GฃB•–วแส„ี๖Aศ่อ€ŽHฐcน เ9ณ๖ +!๕•'b;*Sฤ$หQ๙ฦzu^‰บ๎c8ำoGฉ ฬpTฦˆ ๛—ฎ๋ัฬพ=ฃอtแ"1จbณŽฐpTv:zี@[ธฯฝp?sบ๛๗‚”๊–Ž&ศฟy$ฯ.ฏี9!ปก‹Q'เwžโพึyึ๖€฿ปŽJภgˆ๏฿ฅin^ย.า๑\ฅ า๔ซ~~๏N<‘"ฐG8)๎ํก๎ไิๆป€ส1|๏นnธa„6Žฮqs’๊I์ร„Xีcนน+ฑื%รข2']่U๗๏'œญ฿นฅฦ˜Xžตช~๎r]ญ(๗=สช”aฎ+ะu.wซiฺ„๐™‹u„๛ŸๅNฺE)๕ว๎๐๎k}ใ>ฏต๕™V;zทญ*‘ี๎พ+ฌชLV/ป?เbถฒ‰แ3#gฆƒ๛ผ$ทb็9๑น.sC,ฉ๎,Tธ๙N|—ื‚๔ซ‚ ๒-~-ชชๆจ4๐๗๏ร› ฤี€฿ปU?Ÿƒพg†zึsgฒ๕ุ.€S= RโŽ?๏๛|๗๗ฆ ขRP9MrU~์Nธuิฯ}์vท|๙o๎YธC?'T:นI†wรฯเŒส?๘›Gฉไฅ’A%&ผ*7u๓Z^ ฑ,zQ`‹{vi๗Qyภ}o_Z}>&ป!ชฟน`…3ึฒ?sG.๊‚ฬ[ฒŠยธgฯ฿-&7ฤ๏ยd›C4_ ๑ป'ป_‹bๆฮช๐แsCัฝภฟ%๛๗๎ๆW=่๓จT **•๊t€|8l๕๏๐ฝƒนV*•JAEฅRฉT*•‚ŠJ„Šผ’๚~ช•JฅRฉTT*•JฅR)จจT*•JฅR@P)//งŽ;ชT*•JuZ ีžง๚„^=(’z”ต[h^ูhAœ๘ว*•JฅRึj  ่๘๗ืEa๋Tl **•JฅR5P้๎€ส?ฟ. [ **•JฅR)จœPiE฿ถ l)จจT*•Jฅ rีอ•o~›ถTT*•JฅRP9! ๒ท฿ๆ†-•JฅRฉTŽปบ–ตค๙*'l)จจT*•Jฅ rB@ๅO_e‡ญ๚>›ปx)V฿]pw›ปBG_น‹dB#TT*•JฅRP๑TT๘UVุ Tฐ:|ด{ปฅป’|_TึซฃขRฉT*•‚JH ๒__e†ญp>›s‰t๔Žฃ> **•JฅR)จ„ฌ2TพMFุr>ฏ๐๙„–xJs7ผƒEuw‹ะž๛ฃCŽTT*•JฅRP *_9เฎยtTโ=็จิQš 0อํฌ(จจT*•Jฅ โฉฮe-่ห฿ค‡ญp?›sูj‡|œKพฃTT*•JฅRP๑•ฮ-่๓_ง‡ญ’iSเคธท#ฝไh”ฃ ฑฯGw)จจT*•Jฅ โฉRT>uภ#\…*eŽusQ>ttป6G?qท?$มEAEฅRฉT* TZาวฟฮ[ฺ๐MฅRฉT*•ใฎNจหŒฐฅ ขRฉT*•‚ส •Ÿ~™ถTT*•JฅRPQPQPQฉT*Ucะ๊ีซ19าตื^ซ r Uโ€ส_f…-•JฅRฉ„:d@ฅฒฒRAๅƒส{_d‡-•JฅRฉ„๎น็*ฅฅฅ *วPPy๛‹œฐฅ ขRฉT*•ซงžzŠ:u๊DณgฯฆุุXบ๒ส+้๛๏WP9*๎Š"/l)จจT*•Jๅ๊ ่่h๚ว?A6l RJJ UTTะร?ฌ ๒Aๅ๕_ๅ‡-•JฅRฉ\uื]”””d@…/฿~๛-mบ•ŠŠŠ่๕ื_WPi :8 ๒สฏ ย–‚ŠJฅRฉTฎ@{๙ๅ—“}๙ฯC}๛๖ฅ}๛๖)จ4TZำ‹Ÿ†-•JฅRฉ\%$$ะื_M^—›oพ™   า@ตw@ๅ๙ฯ‹ย–‚ŠJฅRฉTฎZดhA๛ฟ๋ *๛฿ ศ<๒ศ# * •g>oถTT*•Jฅr๔ไ“OšJ„y]–-[FK—.UPi€ŠJะ“Ÿ‡-•JฅRฉอŸ?ŸfฮœIม.๏ผ๓effาoผก าPyโณŽaKAEฅRฉTงฝnฝ๕VJKKฃฏพ๚Š๊ป”””ะ7จ ฆฺ9 ๒่gย–‚ŠJฅRฉN{ญ\นาฌ๑สeีชUดbล •ฐA%‚๚ดsุRPQฉT*ีiฏ๕๋ืำ™gžจTWWำ…^จ ฆ Pyเำ.aKAEฅRฉTงฝ,X’ฃ๒๓ฯSNNฝ๒ห * •{ู5l)จจT*•๊ดึK/ฝdฺๆ๙็๕‚JMM mูฒEซ~ ถฅ‘๔ร_v[ **•Jฅ:ญ…ี’ ๋…”?๑hN–›ฒkื.ฺฑcฝ๙ๆ› * **•Jฅ:]tีUWQ๋ฌฌซWฏ“๖>‡ †I›Š‹‹้sฯฅล‹ำmทึจ@ๅฎOz†-•JฅRึBT฿!ข“ๆจผ๘โ‹ิฃG+ๅๅๅ&899™žxโ‰F*จ๑I๏ฐฅ ขRฉTชำZ#FŒ ร‡ื *่X;y๒dำ๎dฝW$๓Tพ๏›๗4oaKAEฅRฉTงตฐ*2ฺ็‡rACธ˜˜z๕ืO๘๛|๕ีW“2gฮณ3BV่้าX@ๅ๐วๅaKAEฅRฉTงต0ู?๖ุc๊%;;›๎ฟ๚FUUU4aย๚โ‹/จ[ทnTZZj ำ@%ฏS๔‹aKAEฅRฉTงตFŽiZ่‡z,lุฐกฮ1.ื_q{‹-ขกC‡าŸ๔'ำย‹#พ๕ึ[ช๊'ฏS4๐‹Šฐฅ ขRฉTชฐ„๙P&ปwo?~ผ้า๚ะCี™8“fฬ˜A๛๗๏T๐9‘ฤส7Hrอฯฯ7น#วc  ^xโใใ้ณฯ>3 '"งฦ[7Pษu@ๅฺŸ [ **•Jฅ I๗{ฏ™01!ทo฿ž๎พ๛nบ์ฒหŒร›Eษi T3b(w฿}'ฝ:tˆฮ9็“๛๔ำOำ]w๒sงOŸN p.๘[$&&š’a-p=|๘pบ๘โ‹๙gป่ข‹ฬ๑โA€Kc์ฃPน๊็ƒย–‚ŠJฅRฉBา<`œ„ึญ[SllฌนF;๙ฒ *’G%)ิwf[Љขอ[6‡ํฐภ™x๐มรJ.ทo 4ˆ’3bจrJ6ล&Dัศ‘# L 0ภ๔หj™@ว5jrห-๎ เF;w๎4ŸH Pมใ๕\ช5kึPJJŠyฟํo ศ#จไtŠก+>ถ๊๛lฮฅฃ7ฝ๏่งŽถน=ๅ่๗:AAEฅRฉšธเT\pม&ณy๓f“+ณไไ$JสŠฃeฯŒกน๗ิPz๛ชิ?ไ‰=Dม B aง’Dšya!]๕~นษg˜ถนะใG?๚‘„Jะ{$22า@ีดiำL>Š<€ๆ‘Gก๏r๙อo~cเํ•W^ ๋๏9z๔h๓~ฏฝ๖ฺ:‘วkแo(หฃฯ:๋ฌF *?ถB•3Eปท[:zรQ_G{๋n?ืัn•Jฅjข๚ม~@}๔ฅธ”*ูŽสFQ—‘…ินถzLiOCื”ะYO ๏OคU๏LฃๅฏOฆ^ณK(15ฎผ๒สz!eใฦ”W’dชXฺถmR๙oล พด์๒โฃ’/๗ฟา\หหฟo๚๐รอฤ฿นs็:ว)++ฃื^{พ๋eาคIฦAcธPฎp|"""())‰nพ๙fฟs…/๙K๚๏~G'Nค)SฆP๗๎อ{ทAฎฑ4|หv@ๅภฯ†…ญp>›s‰t๔Žฃ>Ž~แ(รž๛ **•ชQ gม'ฃ/ฦ๑ิมƒฉzd -_พ๔hH9,ŽQ;บ–๒ฺๅRtBUฌ้A+˜D๋›b(9็ฝIF[2ฦ่์wงึัไ๋+)33ำ4I“nเฎว๙็ŸO๙…ู”S”H็ืรL๘˜ฏธโŠ ษฅ3gOง8็=m๚a— #๖.\ykl๗ิิTjีชตlูาL={๖คูณg›‰ ิฤฉฆีkVS\F<๕\฿:ŒํH๑ษ๑t๙ๅ—‡~นใŽ;h๊Œฉ—KYฅTฑพ'MพฅšVพ9ู( k๓๛ใiหใŒ์ถฟ:ˆบีdSTtu่ะมฌUูšbโ#จหภtZsจ]๙Qฅฉ๐@๒ไข%ีŠ’Rโ(ฏ0‹ฦŒS'\3aา8๊:4ถ>ุญาV๛‚pB4X„ฐ  ภ&๚oพ๙†Žลแ™ํท›๏œ‘P*จ!ณfอ2ืX™ก ยศ สสส2เ „ฑh*ฏ๐๓ŸTฒPูำฺฐๅ|ถ_แ๓ - ,๑ŽžsTช ขRฉuR(„พ๒K๘ฟ๛?S‰|€mถ™‰ **สX๙Hุl,Ž ?&ต[า˜ป&ะฌืาะซ†SlZœ™ไlw&๏้ณฆSzN%8€ำwQ)อ}`ฤQ๎dCป* ),lgื…-‹_้O›๏๋N็ำ“vฟQแฏ๊P๐m.I=๘ฺ๑\_ฺxošบฉญqเ6๔้ืƒาrbh฿kๅ!๕เ@ v๛๒๕ื_ำ{๏ฝg’UvAh๊X_เ \S฿ +!/\ธ๚๔1ญโ้oฟ5เ๏ ๎ฃษ[zz:uํฺีฌํƒ฿฿๗ฃ^ฏฑ€สล?ถยlฮeซฃ๕๚QฉTR่๓๒๒ฬŠน๕]๚ืฟาM7d\ไ<๘ใง็P™8–:๗์Lอš7ฃq๗M1ฐาqJ™™๘ 8A๒&rbb(ฝ(•z/.ฃiwTำสทงš“@p"@œท9Pม5ถ๑>  " /์ข0ค๐>๒บ๘4mk{šนฃ˜ฎI?๖@Lนบจœˆห็Ÿn’aชฉ๏†E‘๘ฎแ๒sฯ™>„๎เ์ีwi  ’YG;>ถBHฆM“โŽp๔’ฃQŽ๖Zษด{TT*ี)-„6PแŠŠ†ุ๙8ณEkc๙ผ… iฤกั4ใ๕E”`*F'q‰q”ั!๚ฎ่FำKK<‹Vผ=ฃ^๗D†ydจวพ-๏cm?mฤV>JNนขƒ!…KQฅห"แ๛2ไ (R'๚r๛ํท›rๅ`น*T๐ '‰ธpRpป๗ัG…๕zT๘ปŽB•2G๏:๚ภั‡Ž.pท'9zฦ-Oฦuข‚ŠJฅ:ฅ…’Z8#˜rA#3œ%ฃทลฉYq {๗dš๓ฦuใp๊ถ  ๕]ำ“ฆ;ฮภษ™oฯ4€มE‘NJฐ\ˆ“ ฎฅป")€ ์|$IโถW9*รƒŠ ูNJ}€ ฌภe***2แ–?๙ฯAฟkฎนฦภซ}AŸ8u•••ฦตCพๆมwh๗๎&7•=ธF5าwฝœฮ ข ฿T*U“&œนพ๓ฮ;฿iB๘๘ใ%N็ำ-$ฮฆgงQ็ฉฅ4๛๕๙ด๐ญนu ฑ *ฒชว†€ˆ=‘`;็ภช€H…๗c—…๏cq…@ฮŠปฅ:o๗Zi7คณ๔๛๏G์าK/5Mฺ6mฺDใฦ3Nถก๑U฿~๛mฯ๛ž={ \ |z์ุฑๆ;—๎ r™”‹uƒ"๘พ!๗eืpฑc‚œ'๔rA |T;!iž4}s[ฺ๖PWฯวฮบชcผฏ<†ฉฆxi  ’V’pTrv(RPQฉTMRhธ…คWไงœˆ‹m๗?๕ิSฆI„ˆฐฆ rะ ําe‡”Œdšvs•`˜๐m™o"ใฒสฌQTBŠK‹๔C@IIGJฯO0๛r‡dะ<~ท•ึษ/‘๛ษ„Y†•ป>้้‡ปผ๘_–ัแ7Jh๋h๙eํ้ปKอcpPฮผข˜ฦG•Srจb\6๕‘NWฝี+hญคเ๕!•“*vOžPค ขRฉš\โ์าฅK)??฿ไ œฬหงŸ~j5"BtBล{Dาๅ3ฯoอศj๊9ฝC3PูhYก#ซw!\. !ม•!dฆbพ็† Iถlpล&ทข! rh฿[ฆRGB ๎๓q!ฮ' ุa€รŒWŸป4นพJLl)จœxฅvL๐ฬชO **•ชIiผyf5\ดI?/!T๔เƒ† `่มฎฒ’AQ›O‹yคHPแปฌX†l๘6ฃkuถ•]oUึ้หะ"ณJ8y–.1ๆว ††”ฒปxปํถศๆoล>~0HiŠฐาX@E†%C•‚ŠJฅj2B rTใ๗๗);ก bจE‹AฌEดu๋ึ:]MQZ xศษฯค์Œ#•,ษัt๎c๋ไžศ0 HPa70ธ`ฐธgƒ•6ฒs,KBˆ…AEV๏0˜HฐฐมA‚รH}ํ๑ฝย<๖qCˆTNŽR:&ึฉ> U **•ชIU7Hf๗ฟสO*ู่a์฿Ÿz๕ํJYน)ฆu„Qิ`oŠŒnMqษญ)1)t3=็œ๕“I๋๊Ggั›V|ฟ/m}su฿–3คN๋z^kว๎uย%ลฝ[CƒQbfํyฏn;{นึŽ (6ค0 ศfmฒ->‡Œ$XHล Rlj•xLlyํง rbAE–ฬ‡*•Jี$ฤ+"{ต6?ี.นนนพ•qงค๘‘ศ˜4e]6u ชgงน‹าฅPงฒ๖TPšD็=^๎ฎฆ‘8vืWปŒ˜!ล่'ต”W–ไkR–AR้ ูซฤ+ิ#ฺKXฑ๗“’k๔pธG: ม’`ฅซโ•"!ฤ N8๏%ผ(จœ8%; b7! E **•ชI=Lบt้rา'Œฏพ๚Šfอšeช|พ๘โ‹ฃGtBtlk๊Y•์•šฺกิฉw2]๗Vw:๔a/ชYP@ฦeำโ+K้เ•uึย‘eยฃถvง็X็ฝ?ึ€ ‡s์ž' )ศCฉœo^ณzi[บ๔gU~ภฐW.ถ ”โ ดŸห๐แๅpฐ3bLฐ0„‘>ํb$มค>HiJฐา8@%‰ฟ5'l)จจTช&!4^J‹ฏ๎B‰ษq&้7*:‚ฦo,๖Lจ๕rU๘>_ณ#sdVฟทy?๋n+ ุMึ†@ฐโีฤ €`ศUaXแ<Hๆฏ’‚ส๑S‚*ำ^[ถTT*U“ึsฯ=g e๏ฝดeหZฑb5สดฺG๚ช„๐8\™wyว4•pdddะ AƒL8'”ฒ้๘„H>&žf/ˆฃฝใ(99™vํฺEm"Zาใ?og&^Lภ<ม๒d ูยชํ้ิปo๑{์1ฟ‹1a๒xJหLฆฺณ;๙{ฆฐdู1;Q๏ฑyด้๑ด็งร<j๋หYaฑ›ย โ๘x/mฃษฟม`%Xี„๙`7’ฎ mW๛ฝ๒ซŠฒ๘oฎ r@eสซKร–‚ŠJฅ:-u่ะ!S%”™™iดกaดใ ?ะ@ ฦ˜6mฺ˜Jqq1 0€-ZdJ‰C…คไไ:tW๔หL๚เห,ฃร๗$;๐า†าณข จ๐Y>&ULฆ<กฒ;๐ฤ/;ะžฒhฬ,็y‰Q&!–_cลส”_–LถvขQŠiั-ๅ6๙rกAYบฬุ ๑๊"จ HŠ“y9—;ไ–ิไXน๊วฝŽสgฑC@^-๎%จ0 ๐฿‚แฤ–„;G……ฟซํฎ@vี‚สq•)4้•eaKAEฅRึ%อ๗{ฏ Ÿ IœŽVญZx™={6|๓อฆ|ธกว฿นs'UV%Haฝ๗Eถั=ฯgาฝoิ™L1‘พซ|3๑๒$z๏{ํฉKŸ*๊Kห–-5y1๒5FA#ฯ)๖ˆ์ฑโ+\บฬ์ๆlXdU‘Woนˆ!ฏ5-ม`*}kR<“pํu$ฌHHa'…+yุE‘!๛พ 'r?†@™ืย@(…m *ว^๑จLxๅฬฐฅ ขRฉTวI›7oฆ SŽ€ ;*o~‘gฤ&&O๖๖9Vxาœฒ,ีดื”ฌ ็ง๓ ์: ไฺ>ผ}ํCด์ึ>žแ ูž฿K/HแวุMaP9๋๑jัšZดln–+^@๖บ=2 ๆUูc;+vธGŠAW†ŠdศHAๅ๘€สธ——‡-•“\Jษ)‘wํตืาƒ>จผJีD„2เ‚ถั๔แ~P›‘“+&O†ะ‚๋~ ฆ‹ื๑8`œ‹!ห iาฮฮ4๘ฌด๘ฮŠ:+%C็ฟ้[ญน]ŸTฟรย "[๏หp „Ÿ๛๖ชศ *|,€สฦ๗'mzwล%F˜ืผไฉ๎้žxU7ษช™S"ลvNค›"]*H‹  )จ[ล9 2ๆฅณย–‚สI^›คMD4%&gQfn)ฅgu ศจX*//ฺAฅR5žะRNn yoRaืไฉŸ;“ๅงmอ (‘!ขŸ™aT98ั‰ื๑ช1Œฺ็ำšA4{ฮ,ŠIˆขewฌณjrล" ๗5เฑฝš:ก"@† ฑdใ8นBฒ ๙ศP†•Šeฬk^p[ัQŽJ 2ๅ๚ฺ๎Št@ผย@ @EยŠ—TŽ=จŒzqEุRP9‰š6}ๅตฏขc๖PลธฝF}&๎ขิœฺดi“๔*U~ห=zวาฟL7เyญ4šฝุทaีˆ?ภH@๙๘ื>U M ={๖ิ›ด;eฺDฺพใ"Zบt)•ษ๕C ฺ็ใuRrci่สŽ”Sœb๎o|fRxัB)~ู‚}9๔PYpŸ๏u_\เ้จศnตvk@’ี9^ฅศ^๙(TมŠWb.฿VP9vŠํJ#^XถTN’PึM}žk@ฅ๘ฝFๅ“๗QNi-MŸ1Sy•ช‰tหญช๊G‰Imจ[๗x๊\Gััญiสิ๑ิฎ(Ÿฮ\๏‡ (๎—vŽ3IฝŽฬ3ฯ˜ž(#FŒ ˜๘(ŠŽ‹คูื—๛WN.่•๊/_†bbฃอ๕ๆซ \ JGยŠ :๘q (ฒzH๊œ๗&๙5ๅบๆuF/ฯ5ŽŠL”•ู๋]gƒน)ฒ<ูN~eธฅศกŠ๗ทTŽ-จิพฐ*l)จœ$ ZC๙…•TYณ‹*ฦ๎ก~๗•OูG]่’K.ัA^ฅjBz๘แ‡้ฦo4z๑ล้†n ถ…ั๔“Oำ๋ภ ๋ำ_งำ[๏งRLLkz๙็ƒบ) X.ธเJฮฅใภ˜~uน…ฮ#ฒอuนE4yoZ|G…dp}๙ฺพ 1ˆฐ‹"Dd๎TZ๗#€สด‡šืท<หฎaวภ@‘ฐb;*^-๎ฅฃ"“_ฝ*|ย…•@‚ ฃ rŒ@ฅ}*U?ฟ:l)จœ!i6&*‘*oฃAรŽ€ ฅ๏ด}“ Iต*UืwA‰‰m่ฑง’ ”H}๎jอ๚X3fLฝวบ๊ชซŽษ™gž้ ํผ1ŠV=UCณn๊๏wLX2…E‚ ร GŠ ๙@ (ซ™Fgพ=ำ@หฬป|กŸ๙็eึYŸย}ฏี‘ํZ/H‘ูd๗ู@U.2T„คf•cฃT†>wvุช๏ณ9—Gฯ9๚ศัOญvท_่่+G๏นก ‚ฐ(YFV6•tŸC‡๏6y)าM้6๎|ŠŠŽญณh™JฅjšฺฑcedDาำ/&๛แ„Aๅ๑'“(!!ยtร 5]wXzMiG‹~8ศŸ7"C<‘๐โ)๒น|ู…ว ฌx{†น†ฆ3ฺbฺ—๊ €…„ฏี’ๅz>^๋๑Jฆ•ฎJ ‡%ฐุy, (ฒd\Aๅุ€JีณkยV ’แจป{;ฦัวŽJ\PYฏŽJ˜Zบtฅt6น(”>3Žจhเ<ชจจะA\ฅ:Mดeหy”œAํŒฅงžMฆG”D[.ˆ1nหฮร:c4ี= š•’q{ัUGน%Xx`%œxgwลn วŽ e้gMปฌy]7ฆ๛มB.hทฬ็ําฑ;‘ึy๕W ิS%คPX *฿]ัํำhะ3kรVธŸอน<่h˜‚J„3#ฤ’;O:zฮoิkฎฃ9>๕žฝŸฒส†ัผ๙๓uWฉN#!_ฅถถ’๒๒Rจฐ0ฦŒฉ1-ร=šย!ykF^ุอำ‘!<†œ† ‰LŠล}้ดศ2f™ฃ‚ุMจlฟ&อืF์:ม-๋yมEฏต}$จxตธ๗ฏี’mg%H$ p'aHAๅปƒJๅำ๋ยV8Ÿอนไ;๚าQฌ *ฟr๔ฃCŽT๊Qๅเ!”1p$u[z€บ-๑ฉวยF -iฅf๑4ผU*UC{ทL™>ู@ยภญMพˆLt•แู๗ฤโmุืYสฬเHY๘ึ\ฃŠ*_๙๕ํO็ิ้ I—ภbฏ๕#aลnooหซY›จ„๎ฑaEBŠ‚สฑ•Oฏ[.lXhIH‰v๔ถฃ ๎4Gอ5sดฐข RฯเK–\@]V0จt_ไSฯ๙๛ฉxไJŠˆŒ>j-•Jฅฒ…ส@ด่ว๊ษr;*’๒iึณำ 0ุ—„] ถ!ท„…„X๙;ไ#รAzฐ฿โทๆMผ{œ”ึmZะณŸด=jucน ‹์J+C@v^J0P ”ณ($ร=Žผ:“>9–ฮ|qผŽ3ŸCs8ดNXPPฎแะ o—แ vR8ั–>@ฅำธbŠŠŠข๊ฑ‰u@"ะŠล bwEบ*^น)กบ*ม@ล+iึ– ) *ง&จ8—3๊่ d+nฏqt—‚J]xแ…”Vาร&หPYถ— jPz๗!”ึก›Yต๕ฆ›nาธ‘[๎ศ+ธํถU[X&aคIดxษs|ื]wiE—๊˜4‘หออฅปwSํm)*.Šz่!๓ุ /ผ@ฅJฉ฿บ”R”์vฅM4ือZ4๓‡tธ”XŠYฝƒ๛าE‘ษตV 88s^˜a^ซE‹ๆt๓#™uย>€ ูฌ-ฌศพ*vƒ7/ษ<ฏE L‚น*vฅ—ฃ‚๎ม * •~On[!€J…[ํ๖,Evt›ฃŸธ’เข โกฑใ'PFๅX๊ผๆๅYเ , ิกคิดปฦ๖่ฃ๊ฌ๘f-ZPLJ*๕๏฿ŸfอšE‘๑ ”Z5†R๚ ฅ”N(:)™สบ๗ ื^{Mfชท8่ปทq,าฒาจhT už฿ƒ๒๓๓i๘๑ิ&ชฟDนxT[gฌ‰คฑใฦาูgŸM‡๘AE:(LdBฌ 1 ฐำโ*x๒7ง™ืNNmYงTุซzวซฒว TBxมŠtO์nด2gล†–‚JCA%๚hcุา†o'H“'OฆŒAใจdรJฬmKปvําAท‰i๕๊ี”าฝœ:lK้Uฃ(ตG?*Zต:n:`Tผa/,\O‰ํŠiถm๚7S5ศตซญญ%Œ}3gฮ4๎ษะฺa”V’IนC ๔\ำ‡ฆ=?—†\^Kใ๎ŸB๓I%%%Iต—”ะเb†ฮ-ม}\sBฌ„ŸรB์ช0คฐKต+nI;ฏK๗ร€์4 ๘ฐ!ล†˜@ฎ รŠ]–lWฐคs"+yX์ตVผช~$จœŠฐาX@ฅฯ็†-•$œอคvซ [P|Nฎiฃญoำ˜8PZ:r๔Šˆ‰ฅkฉใy˜l>เ”’s}J.้j&’6iYฆ฿…Tแ๊บ๋ฎฃ‚‚:๓i๎ผน4lD5EวFS›จj;สทB๒”งfัฌื๚5็4ููิcq-}yสQ๙( !๓œ็ˆแ…ก†!DๆฎุZ๔ฦlชุะำ๏JศŽฒ๎D‹นฒ„ูไAลซแรI p‘ŽŠW_^ม:ซb‡~์•ญTยWค*ฝ฿ถTN`2mT\eอYJ)•รhฮผyว์ุoผ๑†‰O:tศฌฒบaรZถl™™ ็/˜Ogu–ุ๐ุๅ—_Nท฿~;ฝ๒หๆนธฦ‚gจXฐ`Uซขาž]iภะA4qส$Zพ|9]qลๆฬM'ŠบฯE‹QRZ:ลฅgQZี(jท๒B l>T๒็ฌฆคิ4ณ|&uTTกฟฯ5kึ˜฿0ฎ‘฿๓‚!ๆ7ฅ{ŠŒคข‘ลๆ;U2ณ3MxxฺQย|ุ ณ - *ถLฎูีA๒6”ำท"&yนพtร/*ŒT /H‘ฐhqย@jฝ@ล B์ฌุ?ถซ"นxไฉ+TฺฅSฯว6…-•ิ)))‰RFŒงฬผ<แœน?๔ำ&Q๓โ‹/ฆE‹˜ศ.ฬฃ–ญ[QLj<ฅvฬฅฌล”5ฒ'eM๋GY3๛S๎\GS๛Pvu็ฑŽ”ัณˆfP‹V-)!5…Z8ฯM,สฆฬก])wv*ฺ0’J.™DE›FQฒ*J›PNษ ฉUDkjฑˆฆฯšI—^zฉYญ)Oทzซ3@]YY™84ำBำพ๒๒rŠˆฅ”๒J*Xธ–:n๏@ Š฿Mูx€R๛ ฅ šcใˆdHˆU๕ๆซช‡RFฏ*šƒฺ+ฃพ๛Gำ„WฮคŠƒพ๖๔%Sสh๒3s(ณg6๕Xูงคุฐ"ฤN eภยก„u$ะฐป"K›qส-รฬ{~^ำฐ ]@ VQๆv๙าmฑo๒ZEน>`‘แ้œHง„oหCŠํชุ ‚…#Tย•๎m[ *'XhๆGญiยไษž ุv๕ีWำฌ9ณฉ|Pๅ80า&ฒ ต‰Ž ิข สXLํf๗ฆ›FPทkfSฟ‡W™&น‹bzœ-า.ฏ:elข‚+WQ—๛7RทGฯ3๖šื~8F—GถP๙ฃkจt฿4ส™3€Rปตsภฅ.ํ้ฌ+B^‹ค1ฉkืฎๆ์5+7—*SlV6ญ[ทŽาำำ)ตf ต฿ฒืโ‘ažฃ@ฅ{š๋แข=๑ฤฆ$ๅฅผVห๙๓iส”)4w๎\ใผฐ๛ฅ:ฝt๏ฝ๗RLr jxa%{yน_ใŸ]D}w ฃ๛฿4ใ๕ED‚A ๛2ฌเพlyฯย. ใพ+†๛ซpง[~lูซS ผxMเฎ๚๙ # ฎฏ๘hฐ‘์Dkƒ‡ผฬ]ฑa%X—Z™<J๗Y/!๔#EJA%tE8 ‚๙&\)จœmt&คึ11™”LซVฏฎ3i!—%ป ‡ าฉpA9•^0’z_7ƒ*^Vg ๕โ ‚MX ก๖…U•&' *:ฐ/ร เฃฝŒ@ฌx?†}๙6ށฌk> Pำใก ิโ้”1ข'E$ฤP^๛BZ~ึY๔ไ“O6‰Ib๔ธqU‰RjFQ๎ช A8cไdjั๊pกฃ P๑๙ ฒ๑ฐ yjท์<ŠˆŠ๒Mบ†ึิR๋ˆ(J-,กดŽ=(ฝฌฅ๗จข๔พร)ปx็Lz„๓XgณH%ผ›ฆ^yๅบ๓ฮ;c'ท>|˜’ ำอo}ฬKgี๙Oze™ั”W—šIŠกฤ NXx  ย`cป+ !2‡ทนฒGถทืJ,ˆ3ฟ/ซ“l{๐ฃ!๕€) /pUdฒ,ƒยAธฯ#“hC]ค0Pyฒ]ๅ จศ’dศ†•pA%รฬ=แJAๅ$ฉชชŠ 1๎ ๒Iฺu๊`“v#:ั +Gำ่—›ม gR,‡ํ แ>\pย˜†–\6ภม H์๐cธอ ยŽ ฿ดp&6r]ฺDํ.™E‰ƒzSkg2R[kูฦ@ูgฎขˆ๔Lสšพะภ ฅh็m@ ‹”V6๘n#<ิขUkฺทoอ›ฟ€"ขc(ฃฯ0*]r1•ญ<เ74ห,ธK-ไ”7ห/่ค4ล๋๒@๓ๆอ๓็ƒก„=&!Ž๚_;ษ*ใ_^VRlPฑร<แฐ{@™๖ฺb#v\8YึvV^d}ฏ ฑฝ๖R๓G๏-ฏำzŸ;ึ๒บ@ ,ธˆ๓UX8L$ท{9,vธศ^จ0Pฃ7.•–ฎŠW๒ฌคุnสง๎Jื‚J่ R๖๐–ฐฅ rŒcสdB้‘คส˜ฬL*<žฺ$ฤPูลใhย๓‹อ$ฯ|0จ0œ@<8Iˆม^ุฮท-V5ุม]แŒjฦ1X ? -์ฺ@Hlฒใ‡๏฿j”ล”ฝw%NEQ)ษิถุW†‹พm๒x้ฅ— LไŸs>ๅžฝ‘’‡กไ.U"ๅฌXkฦ8+[8+ล[„ฃยฺpDY5“()ฟฅ๕จค๓ฮฃฒีGบ3คt9ห•eGึƒส<›๚ฌ“z“^Ay‹Vว†P ฦ“๛๗›ฐoBv2EฤD๚๗ฉฝsŠ:0Nxๅข˜ๆk–“ย๛ณธบ‡ลฎ๔aงEvจต[่gvŠงา1y๕‚ุQaFฎ tํฯP‘ฐ‚pPx;฿—iฝ@ล–„ *ึ –<๋ีไC?PTย•ฮถTพ#˜ ฬ‰–่SะฒeKjัŠš5ofAiii”ž›N๙ํ๓ฉธK1;าœUc๖ธ‚ถfโ๏๑๐F?€` แ†“ฺ๘์qา็ŠŸว’ะยแ!๗Y์ธ@ธอย๐#™ฐ‘[ŽAสป๕สปv/ๅ^ฝ›า. „ฮ):>ฮTศ<๕ิSj๒@ŽHDZ:Eฦฤาะ๊jำ` “D๖™ซฉhป *๘BBํQ "`ฅำ9Žึ าตLใ?,ง€k nใฺ)Tr*&ัศัcuBoโB’5พ_1Y1”Z”๊Œ™4lD M›>Fm`f๊T_.Hˆ฿=CรŠtR ;คฐM&ื2œ0จ0ภHง…แƒ๓R*๖Jส&ยพ์พ0คฐฃ‚p€…%กE‚Œ„vW…ƒ์dZ *ธ-ฦkABUฑๆหะจœ ฐา@ฅMa•>xAุRPi`gศsฯ=—ฺถmKim“ฉ๗‚RšxcMแHฆ์Wfำœgฆา๔G&าไปวา๘Fา๐๋†S๎’!&5พธๅlป€ ๎7 ฯrd๙ ฿f๛’ฅ„|ฦฤ Iw„ภ‡tf †<ฦ๛A )Vxอ€Š่ฤฮ  6]-ปŒ ์งwSๆ๙(q@?jMำงOงGyคQLpฦP๊}๗wSDd$M›6ฺnNE;๘AN `…K’Š*R\Pมu้:Ÿpภ B?ซ\Ge๙‘k(ณืš5kถNๆงA[N:™๔เ$}๛h๊ฟนœบ/๋Jฆwฃผ๒BŠˆคิิTำ2เพšฃ้ฆ4ฐอy9/ (Tผzฉ gฎษ†๗&Pj๛xชœ›W'ภย cฏิ Xมใ~6ฬˆmYœฟยXธJศซ๙—7ห*า=‘ ฺญ๔รm›/C?6ค|ฎ 2จ”<ฐ5l)จ„ูฏk๖ !g4€ึพ;ูoubฐ`‹”Cศ18๐ค^pู>*ธtฟbฯ2ฑM.ฦวใลพ๘x<ุศณ$ Bœh+†้ธุ ยฯม}๙\q~ \ฦ6 ฑรย\|฿…”wณ-W๎3สฺq>%VWR๋่(ำ๊U eฉ2„’GŽ๒Aสค๘“jทํฆp9ฯ)ๆ๚# ƒ๛pUpอ ยŽŠ ™|T2:๖5A:™7-a<มฺOLคญชฉขขฺ"š๓๊œฃฐอzr euฮ ฤ$฿ฺ=UkŽrG$`เ6C ‹Wฟทไ๋สื็ว<:สผ๒ร= tภMaXศ*ธ,าaมs8 &\†–Ž‹\Uู๎l+'ด!ท…zมŠ—ฃยฐ๒น 'ถT๊้ภ‡*•C<ืไถอฆ‚n้4็๐Œ–g ^e3ะธaœยl๗ ๗๙Gอ+‘ฺkhศฒA้จ0 @า-แไZพ/-b VxŒAEๆธ`_;$หžeุ"!&ใŠ๎f”wx—O–๋๖P๖พ )qtต‰ลWWW›^%!แ1">ž ถ๏ฎใจpžŠ๚มํโM{ฉ๚‹๋€ Œฟืสf฿sซฒชnจ๛ขิgๆ~Jศศ1อไ™w8=xTงžเศrพIvv6ล;฿ญพ๛šช?tŸm฿ฑˆ:Ž*>สู€ๆฟ<“z.๎Fู]ณฉ๖สaGA‡|Žฬcม!กFVฑ#รกY‘ว”]j9…C@€ Vd~ @…๗„ฐค##%+†ุuร"A%ุJส ) ไขHhฑื๘ฑAEบ( *แƒ N`ร•‚Jฝ๘โ‹ดcวำ๐ซดด”ฆ]ี—ฮ{ฌฬ?FŽฟTคb/ฃn/.แB6Vโ9ŸaˆKนSคtTไฑ$Œ`›,Gด๛)๐~€้ถH'น-œŒห๙)-ก Š 9!Lฤท๛แveTp็Nฐ\ป—๒ํฆœซ.ขฤษ#)"1,ฒ†๕NๅFh=หห)e์$“H[x%”5weฮ^ไ m๗ม‹ฉrTฐ๒ž ย๗%@ุํ๏Y2ฟMV๛ุย๓exGV1ธศ… ฑ c…vA!๒ฤ ๛๑ 'เJธ‘๒ฺฮะยแ"ฮgแฮถ(๔rTl@ ิZ_VุkV9*'Vจด.ฬ๔Ÿภ†#ธ'ํฤฦฦRMM ูำ‡ฮ{oฌ฿ถฤ’ล ย๗ํๅฯm๛โ^ธ–๛๐W–สฒ?ู๒Zf๔s?K‚ 4๎ŠะdุGV0ฌpNฤๅ\ฏลฯ/บ๛"+,7ฐโ@ 9,9ื\LI‹g˜Žฏhจ†ฅัSๆT›\ะ„/*%•R๚  V”โผW>+.r ตปhœป2&ฮฆุŒ,Zํœรา‡ตy็QUuต)Gฯบดn˜hำงฅิอWTฝ‹*k|สkืŸ’’ฒฺๆฆ=ฯกค„Lำ{C'ฦ-œ%&&š฿.2ฤkXlล†๛„F†Š9ทลvKLZตะ็ํ !าแ1‘Oิ์qOย]ฤโPรPแœ†ฏƒŠ์R_ุ'ุ?ะงA EAฅ~P‘'ฏกชIJ(eมbล˜„ฦg,ุ๛ำ5ื\C๘รฬ@ฦWฅEษNŠ”!สฮ &,‘๒sX๒LBยƒ /ฅฮ๙/vJธไํ2s_ห9 ุˆpี,oๆค[ *์่p(y*mฟฟ“ฺตรๆbxษปๅฃ›XนaQี{)c๓JPNqqqQUๅีฝ๗d%;ถm฿4jรบ+HFษrย€ม”ิฅตlA‘11ิญOS–ํ๕พฑŒBคภ9“ีn฿$ูฎ๕น)ๅ“๗ัภแป จ ฝ‡ช_์—กถSFj;ณnำ1้ย[ึ:u่ค+zŸqวTp‚$sEB/7%P.Mf๗S:)^แ™bฦšWงัา7งื้ฑ"K˜1Fษ<9Žq่FB‡}ยg;*v“8™ทยn‘h W%XbญtT"ส๑‚^(คศๅS VTNqPAY฿์ูณฉu๋ึิซW/U ~มฮ:qึŽfk8‹๏าฅ‹i๒๕๋_๚จ/€]‚'!B$ฯผถู?ะ@๛Hุdๆ<ร พ๕ูมNาๅD]ปu6ฤeฬ๕I:*ฒผ9-•‚;v•{.2BฯŽŠTr•ณk;%kzอคคค˜๒L„แ๐ฟ>&˜•••œB้3็™๋™ณ็„• สEวลSRวฮ”7}9u8oฟษSจภaA่น)•#vSลุ=ิoโ^/ธฌด/E}๚๔;&Ÿฅc๛ส^!ลด‰ญ“ s: !ถ ฦ4฿๑ษ๗O8 6l@‘ŽG คX†yอ9* -\’ฌาวŸธฤใ๖;ป๛QฮŠg0ฎศP rP0p›ACvช•ไc\ืŽร๑Jฬฒœ,Uๆฏด๖ถ@ๅษZ๊ƒ•  า6ำ?„ฃF *่Q‚˜}BB-Yฒ„พ๘โ ณ`’ำะsใๆี๛d๘๐แๆฌ= _ *ถƒ์ฬ@ยH {S†Žไv๙2[žฯษป๖™KfK(aลซฒˆ›5๑>ุ†A- ฐหย‚‹‚y0EธJํ๛…๛&t‡`ณb\Xhktต+Wํ5Qm๗๏งœอ›)iโDJ*+3!ไ ก ว;งๅ๙็ŸงํทSEี`๊ิฝ;•t๋f`อ์"๏]|ฟ4มyo Y • IhไKษ,คvํEiรฦRษส=ิs~๊?aฏT๚N฿gถกฏŠqW\Hัว$D6gฮ*hึ‘ šp?]!฿'Œ5#Gีฮxkแข…ิi|ว€.Š/bป*všพ€Cฤ๊๑สAแื‘a น†ฯ™ฏษนณ“๛T >ฑ’c"@E๖Q‘ย’n‰ $ญ. o็~,r ฏ–๚ฒŠtGlแ6œุ%ส #_FAฅA โฮแจQƒJrr2ํูณ‡>๘ใ:ฐG}”FŒaฺิww&ž/paะไภ๔ง?)ไ/Wุ†„๒พฬVท3ุํค1ถ@% ุŽtZฐ?฿–ีAœ`หƒ–\ทร+ิ#ณ๐mqๅว—ฑ?‡‚ผœY๕ƒmrE jเู=8W…ใชT\X๑ @ๅฒ}Tะjปw?ๅ๏ูMi+—Qˆม—Ÿk๒ˆ*++ จ"ท่พ๛๎;&๐๒ฃˆ&MBญ"#)กK)%อ™Bฉ+SฺYK)}๙™”น|ๅoนˆ2ๆ,คุฤD*lศ™8ž ข๔ฝฐจ=ฅีŒ7ฅษฝๆ๎7ฮ ิcม~2๐งลQZF‘y]ฏใsฯ=๔์ณฯึ๛๚W]uE9ภำ๋{U”ี&Ÿ6;Px:B €49#ƒ’gOฅ˜‚ฏภŠ–K\hŒ3s9”พf*%ŒDIK):%™šทhAฉYYT9tจ๙>`นLDกVcฬœ3‡ฺฤDSโ”ต‹y?&๙๗Zื้9ธŸ ๗8ฺuภTeฮ\@1i้‚ทnj™kศduลWPt\ๅ/\ใ๋อržฏO บีข @๋TŒs`eิJKkKื_Q๏ฮ,็ทŠ ค„dz๎น็‚ฎgี:šบo ด*ฆ๔ิŒเฆ)๊lไeคS๒‚Y”นuEฤฦPn~พ™คRFพ’ขg „ิฬTum๕Q "ƒ27ฤ+๔#Aฅพ๊ป‰Ÿผ๐Iะ๔k๛›ฯ~QฒQ‚ˆlั e- ,ม\hน&ŠฬGแภD:.ธ/+xm Nช• T<e๑;7ลvSNคTผAEถๆUTPBŒd4€ ฮHWฏ^mภคฐฐฮ8ใ ำซcบu๔ูgŸ}ง/์—"]๙c“@"A%dr™ภเ‡(รG2IM&ุJ๑๊ๅโ)ฒดPๆฏศnธrPฤ`†AŽ ~าฎ–้เจp˜ว4~s„๐;)พคฺ]>Pนa๒/w!…o;ืut`ฟ้ำยอ๓p WX.o.สฺฐž’งOฅฤส”เ|Z:“MJf& 2คผฐ ‚คF8(11”0 œ2๗lขผ๋}y3ๆฝ]็”๛~*€๎ฉาn>ส˜ท˜‹KN9ิฐ $pใ;›1sกฟ? „œำWeฅ/wเzฯOzL7“ฮฤฒ@Cธ=๚R๋Vm(7บ=๕n>”"ZGอ้้^ึƒ:ŸQNm[uคฎปศ9]ร>ใฦ๗‡8r.I) gšอZตข์หถRโไQfœAศฑก ZŸฐฎ:ฮถุ–†์ฌ %oฮฌ*าฝVๅจ™†een ฦ“I—๗งษ๛z๙รอผ?šฮwTwW‚ Cื˜#ื’`ใ•ร'96JG…๎Vหฐย#tฤษต +\ชฬaร C†„ฏœปัƒH0H9ฐา@ฅ•*8Y WTฺทooุื|6‰I(''ว$วtำM๔อ7฿“/ƒŠ„Yฑ#…€vงล@ฅ๋‚็ห3’ ’5˜ใ#œุeาrภ‘๛p์™กƒ5y๖ๆ•/ฯ*๘’T8ผWล€ห-ปŽlsbุ0! †‹qตOณํ๐ฎ:ซBรนม1AH๐ิžK๗Sั{){=เe%VUPB‡Bjแภห๗˜mึข9Eฅ&Qโุก”นoCœ™k}๎^ื฿Mุ Cตปไ@Nตๆพ-๙gnค”&jาคIAซั๐E.Uาศ์์๑5’ๆท็E *pT *์<าwถ฿&Šาr)?ฑŒสbซhHlชŽœํภG Mš8)๘ไ}‘yฎ?ลH๎๓* =Šย๑Švึmฉฯยถ‚๓wPbiใ”ส%AˆาL~—๚Žeึr+€ะS /Nุcแ~๊พ๘€Iฌ2`Uwฝ€jณWQMBช‰O5I‹ฉ6uีฤฬฃฎ-˜๐O๓fอ)&*–*๚ 4!Q Mp!ฯ๘TY1่ด/Fต‰‹J:RBy/สฝย๙n\ป€,๒ŸVฌXAร† 3n๎.'L˜4ด๖]„d^|73ิฆMำฟ_S'™ีv>ฝช‚ผ\O๎,‹๑aล๓ฃiุู 6jM‘Œม˜dทHc๒=yIษ Fพ-OฦdŽŠ3e +2ฤ ยฐ‚<นZฒ +จ๎แ.ณvˆแฤฮK NX_&รHAฅ.จ`œWพ๊ง[ทn”™™iJW๛฿ื/‚ืYE –๗ศซ฿๎ HK+SH%๔HหSฎXส?~18ษมƒ!†K eๆพWหl้–ุ9+^ฑn™่งPแ/C†Kq๓ฎ#n ƒสกX$w"DIึxภ6<ฮŽ &t@B=จ*B€ 7“3๛‚๐Z *˜เน ใฺ œƒว>.&tุšยk๙a…c9+p”๋ฌ๎๖ๅชHP1k‘รฒ}?eฬZ@‘ ‰4j์ุฃreVญZE UƒL.Š %m๗ŠษIYใsPH กฟJฯ๙๛*kwำฐ>จ6 ี&/5 RfUGอ๑]ทžAี‰‹จ:m)Uฆอฃา„ก”_@ฑัq4|z์ฑว(.:žRšeาย…‹ดง‰ซ—^z‰z–๗ฅธฒสนl'%๔,ฃj$‘Œาm4๎CHใพt๏d€g8„ˆฤ[8ผFX๎.๒็ฺ๗e๏+Vฎ ธคX*Z@V”ั’'ีŒMž8ศ฿!Wู‹ ,PL.ส…ํ่ๆ_๔5!นƒŠ\สรี…ฒ‚์Jk๗“’แ{•e;ืวGYถ,aEๆณpb-;*ภ"ืA9ฒ„EŸ:<8‘RPฑ@ลญ๘ GTะJ ๓Ÿy'–nb€บฤ๏ด0ผ˜ในy4์P9เsU)H.9+์ฐศํ[wRJrJHI5g๎#GŽค์‚ถAiSg๙แฦŸ—ฒฦท !”%Cฝๆ๘gฅjะล4ด|ปqVpปฆtณ ๙P†ตœnฎูaฉอ\AตygSM—๓ฉัส‹)1SZ›ljฌDAลฃฑ_ํจQ–J๑ƒห):3,cภ:ยหธN˜RK ณฦPLNฆฉ$Dซ„ค๔tŠLIกถ:˜0*ั"˜I=ย<'ิคn/ก‰๎Oปฝ๚จa Kท%ุ๏™ร?œคส%ภ่๖Š*™#g7šดO\dŽœl`iŸศI๗ู Vๅ๑ู'kœD+C@^ฐ"[๊หRe{e^ูvQ>ตrRย/Pษชำฎ"T5‰ชŸu๑*๑•yฒsญ !าM๑ฺn'Ž‚›๚Ž'!Fฦxํ2jฮฑaห6ุ"d<0IH‘น,ถำยฯฃb—๛aลnๆƒdXs[ึฮ;๗xฬธ$n ววF ็ฉิท๋ญ *ศ11วpฦ฿ท…uhท )๎๓o๑๖็พ:R: wล€สฎ# z†€v๚`ล„‰œ2-ฃคŠA”2iๅ.YC๙›ถ๚C>์ฆ /…”(@ืศQ1 าo;UU๎4๎J๑{iศภ>'ฅอ,_((a‘TฒWQm๛T ะ8๛๔<ฃŠสฟWCฅ฿๋Mๅฝ๛) ่ซ(™3o.u้ี“ZFดก˜Œ4D’๊8ูW๏คดอ+)aL-ฅญ;‹rฏุK‰UMใšแร)ฑf(%Nฑ ๑฿น™œใžYด๒ํฉGu—๖Zงว^ศฎ&ยธภ ็v๐ไฯ+!{๖ ŠํคHwู^V„aล๎GๅUŒภใ˜ /ฒ ‡ษy_NฌeX‘]jZaw…;ึrศ”p”“+TDปŠPฅ า€คZมษ2ey6เ๒Gไ%/`๑jŒ$๗ส{ฑ_Sภํ6ี -œ็โUI`ŸษJP์ณ88ฒก›?ใL๘ฐ๓ไ๊˜_E‚ B?eo๑ป-ํP—Gถ!Oามu_|`แsS ]'๒TฎvsNRฎ๕…„ฬ๛qฃw•g๓ใ`'ลป?f~เชHPแ$X * ™ฤ[„‚๖์๗;,&\ไ —:#ƒ’ไฮnn ย= 'าQ1]kG๙ฺ๊Tธs-€ล8'1๓ คVjำ—SmฮjชmwŽธ+p\†6›BCฯ˜Dฟ7Š"[Gš๊!…“๚{๋ ^KQฌฤ—๛ช‚Zถ4kU๙มืฎๅ^น‡’วฆ˜์,สXท’ZGE™6฿๕=,^ผุผnื๑EGญๅ•[&CC๒$ƒก‚CิpU)pS0มc`‚Ÿฯ๙.rqC^m™oWr•คฝผˆ,uถ^ป๊Qบอ๖ชส2gลฎโfprM nวeห์ฎpฮJจJ(ฐ๒ต+T๒ณญ*ย‘‚J˜ฏๆn2Œcจแ*ศ_ฐD\†น/ร‰๗ลซ >—]lป*ธๆeฑŸวรTใธTฬ%สศ%p0คเถแBืeแ€€ ‡„^ € W๐๋p> ๗<1แ“๋”dธSฎฬ.็ไ-ฟ3ƒ/๎zE&)˜๓j๐rŽ TเฐPู๏™ฯ‚21ฯรz?ฅ๋|>๒๑ป)s„}๚NGฦ์กมU—๕›ดฯ๔Vจ˜0*๚qร>ต7ัฐ^จ6kฅq\ จธj฿ผ ด3aŠSe]ฅSYHชmKY/ ๋vื๙^ษ2vฃ+|ื ปั๙๓M่แดอoศkcIิดT*ST'>X<๏#ุž0^ ์ƒTCธคLสๅ68ฮ†!~๎Pห0b/ b‡€ไบh|"(+)ํฆ˜v3ƒŠฬ]ม5`ลk1ฐpˆ“mXเจ2ผ@%\@๙๚ยJฃฟcบTยผHwย R‚น&^ไ Rxฏ\Yฒ็ฅ`]oูf•Ÿx€ฐE&ฬA<จุหพห เvศฆ=  )ธ6‰ณขS- /`ศpยPhaPcg…ศธ(WIzญๆ‘‚P56,๙๋๖ญา8ฟฃ8ARํํ.x9ฏPaH#แถ้ญโ ำUืMย5ีGขฬูไฅœ๋ƒ๔L1nสย#€bh็๙’iฑท๛ฬุGๅS๖™NตXi™ก ถจชM;ำ@‰ ๛ด;‡†๕ฝˆj ึ๙ช„ึbชT†œ1‰JZ๔ก˜ˆ8SEง0ROฏ“ิdJY3งNRxๅฎ<(?ฯ>?†Lร๗ นŒจžถ๛–`ผ`็แŒผ„ฎิ่›„kvTธ‹ร&—ํ๐*Kถร@vƒI†+^•™ว;แ–ว0™ป'Aล‡‚*น–Nดๅ2f๎ฃ"กคพpฯืAEAล•]aKA%ฬK G#h„๋ฆH˜‘ ไ ศํ:ฺฅRXา)โณ{ษwiูr~ฏeแ9ก-๕๋„t์ลฆํb\H๐C{$ฺ(| Pมu<w’๐ƒŠ๋ฌpพ JDฅVโื๗ฏAไNB&ฬใ&๛K•m ใฆธy) ) *R.บI๋ Q-ด฿)8)\ๅSถ๊u[ๆบ)Tฏ— ‚ะธ'ี=/4โ|lซ->ื๎qภคฆdณ—กw๘œ€J์|?จเBตPn\ํฝ[$ˆฆฯšI๑“kŽฌSลrฝ๘]~ค:ฬ_!vู>สฐ‘ฒึฎฃไ~iส›๑ฐ^9kฉuDkjูบ%•v+กy๓็šŽฦ?0>|˜z๗๋E ) ิฌyณ:y4๋<*ฒQฅl\‰q๛p็i 7x„ฐH)`‘eฯ2/๏K‘ษตR์žMฒŒl‡ ‹x$ฌศjษ@ฐย ยแ „นไš@ *,„BษK PNฌ4PqC์แHAฅฏ$ุ†BŠW"˜ฏฤXฏ†qมZ๑ƒ‹lbว–ฌ]$ญ[n–๋ 1ภ ลพq+€Š?๏Dดิ—ŽŠ$ีB€ \4ZxิYะะuT=Wฎ๒ู๑ˆ๛ฎเผฒ3W๘๘หงะ™|ฎ=T.ณส’๗์๗ ”ญฎุQู้ห?๑7ฉsฦq/“฿ฒท/ศtž]แ๋™โ‡”yGJ“แข ฬ3`๔% )ี=ถšk$ำย]4l—y๐@1๛:๛Pษ\a—: าrบqY ขKihีฐใึyต)hษฒฅ[ึž ๎ผ่H8๖4‡5ชกvะœLQใฦ3ซพ7ค~}‹&.Zผโ3โจt\1ำฯ๏px%ฑr~ลoื๘ํขย *๋๘>ร \ๆŸ9ฦซ้;ฑ์บ๒b‰ผ‡ฬแใโ˜^ภ"pูฒC@zŸo#รkแฎศถ +ธFู2 จ„+ *Y~<ี๗ูœKŽฃ็}ไ่งŽVป=ๅ่๗:แด•`ห’‡(^ ๆบสMฑ[๐[‡H–๗eย›]^hƒ W Kรv 6ุdo”๖"ืฐ`ˆเ>๏ใฯQqC?๎aXAธ€‚1s8,<˜๋Dc8tธu€ƒ*ฏ๐Œ็`ะ5eฯ(›พกnXจ”\ถ๏ศšCไ”ฐ“RวMb@EไงH7…ทฑ#ƒไูŽ›} ›bJ’จ“—ย•>€T๗Tึ์2ฅษี/๐ ฎ!ผcล.ด7ก Tก!œ *ŽชZOกพ-k);บR’Rอ:@šฏrดะ0/งฐ€า7อ๗—๛6.]ฟ์H่‡ฟG|$T๒ถ๏ ฏฟฺ๔[3fฬqyŸณๆฬ ุ”hชู_Igพ4F์่E=ฆถงด‚$*่–Aห๊j.ญขธ๎ลิๅพ ~œเp฿„ผ|~wœฏจaภ`'ฤ Iง๛๑๓ๅm/ษ“ฝภ"—_ห<๛ฤห๎^หy*,Nฎeษuธ„™a%ค|7EAล•ผ,฿๏%L…*Žบปทc}์จฤัG็บฯuด;ศ1๎u4าQณ&ๅจH` ! Iฎ๕‚ก@ "C8 Eย‰]ญd'นูV-™‡ํOด=Ÿaaะ‘ญ๎๙6ˆ๒6—0ยฝRเœ \cƒ ฮ์๐:ไ๘ธ!)nง[>๛คเ8ภฦ8)ขุŸ[ !ๅrqา#}SL7ฺํG’f•‹Žไงpn —3sำ9@ J‘๖ฌ`มAใฆ,s๓SdIฒ#tฃฌbzฯฺ๏+5vง„ฏ ฐtuๅ‚ €ล์›ดุ—\๋6‡c@›Rาฆตjูสฌีปy5uo>˜ŠŠŠL๛cฑ๕‰ิ~๐ƒ๏ิซค>a5vธ!‰๓ฦˆ6,W๚พsH/คืŽ›ฤ฿yAฎ สว๛Dใ94˜ห)ศข-›Sฟ}L๏-ทBK–,ฆโช๓{OMฑ™Yพพ:ร'`ฦwฟ/ถแ<ส฿.ร5NB๘ทห-๛0๐›ไถธอญJ๐›เุa%c‘เยก!ฮ™๓Jผๅ.†™X‹๒eูn_ๆฌHX๑Jข TNDฉ๒้ *ะ๑ ฃaŽ~ˆ0๓‹ ฯ๊่GŸ:ฺๅจธั;*ก&ส2๒j็UvTผึ ’๐b'ืz…z$œศฐeL›แล Tผฺx๓ #&ึˆV๚ลน#wœล™โๅxœEภภCฦฑ9๔„มWuฦเศg|pR๘ุfEgท‚‡Wjฎณ๘แeVจ็`ดRIดn่‡iแ–๘[๖ปgฺ\แร-แค น›ฌ๖Aุน( (Iค]์๋ฏ‚c#D„ชŸšฮ็๙ฤะโŠน]ถๅˆร‚๎ตE|m๖T๙›ร Œ˜h–žx่ก‡(&2†๚ทi*††ถ]M™ ํฉOŸ>fฑยxภืื$;๛ธม ็|$Œ้็]šŠ0vอฃ:Iำ.๘ภu๔_๕_ดrๅJƒ'mg eาัฑ‰ิn้fJุ๊•บ๕๊Eำฆ๙บิv/ši\:|—y้ ^“๛แDซ๎๘„A „฿ๅ๏ฟev7ๅJ์๘}ใ>ผ?‡ล๙1{—‹*ส’g.นๆv๚™ง‚๛าUแj ๎ท"Aล†•๚ช{๊ƒ•ำTฝตย๓ู~…ฯ'ด$pไ;๚าQฌฃ?[)ศ‰sดฬัฏฝ๊hพฃ–TV‚น&๕5uณ“r•{ไฆุ`Tdโฌ\ลิหQ‘-๗Y\š,ม$ด๐ชฎv<š%6œฅฑ๋ณ5 T˜ไY„ b'๏ฺํย๑zุ!-—:ใ,‘์Pโท๋<ำ๋dื:ษณuไยŠ,K6•>nุG‚ŠYเz_‰3&-œมšDฺตn"ํ’#Sธ#- ฦT9๛#™ึ@ ร ไJPq ืต…๋} เX”0ำ\›6๛Ž ฃ:›I ุy๓)ฃM MYHร ืRŸ™ิ.ต/ๅๅๅ“ ว[X==ฟช#ๅืt2nะ๑xš฿›KzVgš6•T‹คRํ‹6ดUiญาพ•v•JBBmhCกIb5b’JR`#,แถ„ฐiฐqภุf๋ม๖tุ=4nOุ๎ erM8:ขgz&fฦแ˜๙&฿๏ๆ“๗ฝงพฯ?kน[}ฤ‰ฬ›๋Ÿอฬ๏ษs๓ž+^—๖^น8๚้๛3ค F›’a๖ตQ€ฉ.ืํะ‡?”ญ;9-Xฐ ]qลูนv*Ž“ภๅ์๓ฯO๓ๆ/Hื\}๐™ฯ|&g‹ๆํป sƒ้่ฝ๏๓๘Šฐ( ษ็@ัฯ‚๐ูTPfEืย฿„~€x่๚x™‹gZ}ึ)3๙ดfท๚ฮTสขxF…v๛œง „fE†p(ฅะ(•+3Tอๆ‘ฃ๋k๋m๓{๑Wฝxc๏‘@ฅท-’พฅC฿่ล ฝ๘^`F‚Šสฐvใ6Pึษำ6uน TšK8ฝด N”XB*อ‰““#ฌ8ฐx๘ฏ#}๙๐‹J€A}บt?๊ีฑc*ฒ+z}้‘IA“ฅถ?๛๔ฤ—๛ๅž }Q,mฦqฎO4yTฒถ฿ฒ๊ๅคฌ…้-zจผ{,S’[฿j"ฺ;วt+j[VFฏ๛HX›ห>J€]'฿”<ธPZ™ภ๕Eฟšื/ุ˜6๖^?๏’ย4?™1็fำ%›า}ๆง9{๏“KA็zW:f้%iอš5SฎYัฬ›๗ฝ๏}ู1ถt๋ฎบ"ญฝ๛ฌtม“ๆ/ูฟ๑v;สาhฎฯพ ๆงลท_‘Ž๘๒ฃก๘@ซา/ ภe๋๘ฐษืฐpฌฤฒzuZฺ้้าื]‘็. ฐ‚ศ๊ซƒg˜Xvw…`U๛xะ๒u้เgŸสฏM๚1…ด]‚‡ ฯˆบ@)ล0P‰@ใ™ EŸkพ#:ม;ƒ–_฿ฉ‚}+ŒยAXAฏ‚อพ|U•ัจTP้ƒJ฿‹j”่๒ฺ”๕่ลŸ๔โ]vู(ฅŸฏ๕โ้ล๛น]๗—3TฺJ;Mถ๘M 2 Dฺด)]`เ๐ J›Fล๏ƒ‹ญ‹ใ‹a˜ฟ๛ณ”.dธศ] T่0p“9๒RG›ขปพlfXข๛^ไ…†ก… ฬiU๚mลูๅผ)OndSx|ืผไว}้‰|) IBxว๖ ขl‹eอฏ•Š.ึเ„0ํŠ@%ปั.ปwlXแ[ฅ‹y๛,HK-+.ๆrฉฮwพ“-\’ฮ=t๖UOฅ\‘ž{๎น)หh ภI๛ฎ ฦ\'†โ๙็ŸO๓.H~ฮ??ร—งรŽ><฿Wย`๏Ž;๎ุฉ8๎ไuiฒำ r๛๏’KOtตe๐ยุฌจ์Tk3กp">๑ธ>๚ัด๊wฆ•7‘–พแ๚ด่ขMi๑๚SาEKาพ๓ๆงU‡š๖ฺ{๏ด฿ยำ[z๛๋ำฎ'#4D๑ž{๎M‹–,MGoบ/F๔นQษ?ƒร€โภข๓%`‰BDpแถnผd$`ก[ศอํศฎ0w(ฺ๔๓‹ฐึkZ—V*ภส+; *ปVfŒFลฝญ:F1ํฏ๖โ ฝ๘Tธ้ ฆ}ชๅ1./\6gฦjTด)ฅแฃ RvTJ ฏk๓Fi‡‡ •ผdคฟ;1ตนิMุเ™Prฝ๕ #‚๐nจ(Nุ':Ž('ัฑ ๔5H•,จํฮz•lฟg ๅ๒Mฟ-YNณ€ษเ๔&ขา/๔;ฯ๚ฬ3ƒaŒ” คQ‘๖D!1-]?„ eฝc+่Y+*ษฮ5*ƒlJฟ(gTิ–ผ๊ฑ้ส=P9rฮ๚๔ฦซ฿˜พ๕ฏอ,˜ฟ:oํƒ้ไ‹Is็-H฿๚ึทฆTพ๚ีฏฆื์ณw:สcา‚ีd่P้ไ๚ฎO๛/? -]บ4อ?`A:mหUญร%฿{0อ_ผฮTฬo~:ไ วงSO=u‡2ฆJ๔บ|ๅŠด๚ฆ ้ภ“ืค๙+–ฆƒ?|๓ภŒPฐr๐o}$อYr`ฟ•ydผ“[๏<จ๒๑ํ฿;นt๘ภGำao~(Žวำ‘ทฝ/p๐‘้๛˜’c~๗งy.Mฏ}]ƒ”_\sฆcฆ;‹โูภ&๊V่8*ํา?๚บ๘2พshmๆ{, ฿ี๚.|Vะฌศนึณ*ฃยJ-๔AๅเUฯX๋@ๅœฐŸ๖โ'ธผ_ส๙ำ~{ฒNlyŒะๅฒ*t4J”ไ.ฐา;*ฅˆฆpรsน๐ƒ& @G›๋& ฟr|}ดฤฟKๅŒๆ}‘’M‰o๊&B_ งl๎ึ-:›<;H=ฯNœ˜ฬŒž *}0ษฟ˜Ÿ”\๒Q๊‹cฮถˆvsผ8ึE‘-๖~)d•๗Žปา*s"Q&Eู…@E๓tŠ๔ eX&hSะฌ๔N)าจd#3f๔ถ่ฮt๚‚ืงz๒ฅ/}ฉuก๚สWพ’ๆ๎ป ]tึวำ๒ตงง›nบyสJ>๓.้มs็ค“:;ฝแ7ฆS>+ญฝq}บโ๋ทคณŸป6๒๑ห‹$‹โืฎศV๕สด\๒ํ๛าาNoผš ้5vษX|๑‹_L๛๎??-ฝ์๔ด๒แาQ_y4{cš ฝOึ~ํรiฟ3O o›|เฦ‹าa›Ÿ” ๕ž `ก$เฤGG%@ส€ำรฏฝ?|๐มYCRฺฏO๚ำู‡E<ปR๘๑1DสY0k๒_8–Qมd๎ข  ฤณด*็ฟU๚้๛ชไหŽ๙ภ˜cํQ๏M'ฎบ*อŸท_k)็๕ฏฟ*/Tgt่๙oJ๛/<0่G?šRสํท฿žŽนํค›R„Š_๗ฑหำฉ›ฏฬฟ๖—ฝ2}๖ู้ ‹Žอ ฃด+ X‘๚ถ;rฆEฏOs{†=ฏf -?cmŸe0้gQฒซ๑s๗ฅลฏฯต`ร‰cบงฯŒปั F,€-p‹(;—๒>8”<9๛ใะฒธำำs๖I๋ืฏO\pAoสๅ:eฤๆฬู7-ZrLzีซ^•ึญ[ทหŽณKฏgอ)ท ฦR่ตh?๕d฿๔:๔Yค+ˆใฃ\ฆ๓๚<น ฃะฮs;}๖๘() o๕>ภฯลว เงดฬ๗e!…พะฎ8ฌฤ์๕KY•Wฆธ-yฦ ?ฺFˆ *ท๕อโ)๑ DนTvฐ4ฌดำRVข˜7โฮ’Bœไ™=P<š2&1{Aลญถ#จะ–L=_u,p ฌ๕ศz’/<‘3.ดF๊หP‹ – *ภสๆญฑmuI๔>TZฤจญฃมM—N ]ฆวT$”%ด(ปขP7P^ภ3v]nุx&G๓~”5ฐ(T๒!รBVepูqƒ˜฿'ฏน=อป_บ๏พ๛Š'‚’kฎน6-YxxZ~ูiม~๛๏V’aก…yยiำKo˜เdสฅืฦ/ุงš๑๒cJs˜Ÿ6‘ๆ้_ฝ7อ_r@:๏ผ๓r6bผyCปo4฿g๑šƒา ร๓{C‹๑ฑŸ -ูp@๓ณืพsาŠว”3เพ4ๆrL.ƒํๆ๑๒% ์ฃ๓๎qPษ๏ƒ๗ฯฉง]๙ฑtน๗ฆ5งฟ9บfcZธheZฐ`ฌถ`ฟ•๙A๔ฎšgt๎นค๙๛ฏ:*๛ฮ-i5ฆรฮ|SZ{๐๋าiพ?y3ƒ9,:ึ‚บ‚คำ)mฬด2๋ณ†yœึฑไzเ @G๗Q/Mภขฏ๖ๅc>๋\ๆๅโ8ัพcLฦk๋mืฬ*gZถาฌŸ’จถไ.T&šmwgVš)๎_ T๔ูŸ„aฅ,Lh@า~ป่†K๐%ลBF‡€[u2๚ยคค๔ถพ81V Sึ–๔e6˜งไ่ “ฌ5|‹พTตPzM^ฐ2(;}vฬ[EฟฐUผd-J?๐jษš†O๔ปŠzฟrีถ|ึ5O็vๅs/฿<€• pfณฯ\ 0<๔๗ฅล๋Oอ>*ื_}ึ‚ธnE-ซZ฿tใMSๆฅ๒๏~7ฐ„์เzศ†ร&@ ‹ใฃn:q, ๔[oสว๘ˆkึฅำžพ2Ÿw;๘รผ$฿๎ง?i:๎ธใา๏๎๏ีจฌ:์ด๚Ž ำก๗^˜Ž|์†ดได5c€ฒ๗kาช7คcฟ๒ศธ ๖ฅ1๏A7๋›< ืo]ึ^—ส?๏๏ำคl…ว@y๎Oฅ /x<vฺ;าkป$ญ_wg8ค฿฿฿-"ๆฃึ“sbZ0gi~ฝ—nบผ๗|๓ำแW฿ŸaEpTฤI่ส<0:%ใโƒEŸๅฅฯ ุ@KDบt บญผob&–์ŠฉIย แ€ต+kบ2% ฟ๙ŠกS”' Rfจ˜๎ฏk์ๆŒส-ำwซc(ฦŒ•ฌtq–mkA.iQ†eDขm•a๗ึyD่ฑ)๐ฮท8*,.n#b›s`iฏGc4Gโ\%Nู@_p๚Rิฅ E $€^ฟๆ๒l @eหx™GฃลPทงม_>1๚RwPกซA_ด๚ฮY–฿๋"sžตภ &@HŸํฯ&๊jก;oำๆมBYํ,๕{ฃENfqrฝอ‹eoq=๎ ฆCn:+อ[ด๖๘8๑ฤ3HPฉฒ‡A-ึ%ํˆเfwvฆ|๋อ้ีs^๛u??/”W}ๅšp๐ไส3W็D‡SวฤE฿}0๒›7ๆฯฏฬแเ`่~จLvโi'ฅUญJ‡^Z~ž9๛ฯKวพ๔@ฟฑเฒ(๋ผ:fฦ‡\>1–ฦlฟ๎ผ้–๚_ฦู๙ธŸฑำ[]_‚ฟ4 Aฃ.ธ๘‰๚?jฅืGฌ}]ฺฐaรn9าวผ๓๏LวsL:๖ุc๓๛!grzmvHVHณจ บ zŸ€E—I“%p!๋””ำะ‘‘]PะšE`๑wน็ .ทdYpฬ๚6/)ผˆ‰ห+๓๓•ญ•'{› rะ๊ม์ณQb7ƒสำGK1๋@%Šfw…ฅ#รฑ]ภฆ)mญะ่Vธbดh$็ฃ=ƒ‚฿ ตb€ƒz2ถ๛ภAิฅ๘—ŒM[ฒ›ฮน+.ฟขx,€๑EL_~‚: 1๚ŒM…o7ฮ L_ืข๛น @„Qพ8ัR*ถฉ๛ำฅค…อฝY2 จฌ๔โ˜ุWPโ’KSฝE๑่~่ ^ๅ•…4yYๆqZPdรฏ๓าฝ่1ณ+๏๏>ž_ท@Lงg}ํt๊ฏหJยt๘ 'งีg™๖_q`ฺ๑ยๆห&ฯu~๘แนใfwXม๋โ‹ำ•๖๊tอ๋าัฏ?6๚oูฮ8๗ลurKบๆื.Fu์)’า้™_ฝ;pภaดh๋8ฌ{ถดๆใืคu_|`Pฦฌ5 ไกลš›:ี•Ž/e๐tฝงƒแ™ฌšJAK๋&Qต2*ส^œsีSZ4”๒ห6gpจlุ๘‰๋X–>๙ษO6๎ปผY$ั–ๅ5วe^๊W็ำน๓ๅฒฅฒGผfŽ€‚ื'’g OฟปNก๗ฌ๛)˜ฆ l„$ƒH) )ขf…ฯ“‚ูDทโ๓ˆ๘Qฌx aญZ•ฝ$XQ(หTl3T|hlืจำ“w1จ +ํ”ฅฉsง Dฺๆ๘”&#’Miฎืฉw๙”T๏ยมญดศธศpHhฒ*]ยaล๗E0ก/4ฦY"ู-vนcง?ภP_บ๚2”ีqขscZปvm็y๘แ‡ำA็™กฮ3™[วQกlŠZฌ๎‹งต˜*๋@& ั(ร9๕?ฮC5{ง๚ds?›ฌ-ŠL*X+š–ญแ“๋ŽปญŽ+rวNoู๔/\8f67ชeฟฒf+ZžNฟo}:๕ชƒ๓c,?ชด๚คหฒื๖Qเฅืๆว!C˜umีy๎ัoL„์๗"๖G๗งชดH; ๐ ปโวดL๋Qฒ? LKv…` eล2*ty ศae*ท*ฟฬศ1I•%ฝ๘@/>ื‹ˆY*M‹๚ฐ!‚TJๅœˆด tPi๛ป‹Pท”1โตน‹sืโ๎๓w|AA#โฟ†ฃ_ ฟt(ใ”`…๓ฎ๒/™ษก[zJ-ฅๅ]ืัxถG‹7ฉ~-~t@)(๗D n5คPS^ด˜้ Ÿ…Nฯงใฅ๛่๙๔ฅฬ๔hฦ4™g้พO˜฿[4ดˆj฿ฝรQbi‹โ/฿žVœzxจ.9byฺ๐์ี้ฬOlฬ‹—fี๛ี}ห›oI฿ๆ7‹‹พ๐…ฦVฆXถjYz็_?แ=T^.็ชcM*ื่๎ผ๓ฮฮ&pฺ๏ๅห—ง3Ÿฟaฐ z7‹"f่ ฬp ๕?ำŠ’!P8•Q ซขศฐ๒H฿g็ํ[ฅ j…ฒ++V—๎ฝ๗ํ2Dšดjีช,พํถr&l˜.‡ธ็ž{า^{ํ•๘่ฑ้ฒw•ผ๐าด๔๕ืฆใ๏y*{8LะrŒพ$ƒืsใฅ-1R•าy ๋โฏM+N:(ุฦดแื_ŸŽพq}š฿™}่Cdƒฏ_๖ฟฟ~๘รศ?ใ?N๔G”๖g๏t๋๗o7K~&–๚ศšัล˜ํณ`nฦ8lฑ๗ฟŸฮ8ใŒtศฆใdQMฆฌZh‰w-mปœ6 _r็ฬc[C+๓จwŒy๋(ไHฌ2๔G๊ไRw—ฤัg๒ฎด฿‚ลiๅŠƒ๒ฑeใงนs็ŽOX?ึJ-กtPy๊ฉงา๙็ŸŸฎนๆštๅีWฅ๓.ธ ;็žx'๒พd1๘ฟูœ_+วUวCว=J.๓|fฌฝŸศฐbmYษ-ƒŒK†4 าdษ4Qๅฃ~›ฟ`!ปาฆ_anŸน˜Q‰YTุา๗Ÿฒ*8ืขQQ๙G•Ÿ|UŽ้ฒอPฬR!& T~2kK?ฺšๆ๐ หžดJp4•F…•ฎญฬ1›โโZฒ)ด๘iฑ-eOโยํ xiก๑ฒŽ{ญ”4+m€ยํ\ใPฃfฆไดxึ-ŽNu#ํ—\ฌ‘๓บ1กโqฝdฦพบๆญ‹3รโฯณฐžuqฑฉ฿34œวิgฎx\;7ค#Ž8"g+๘uฏ๎•Y4+GยO-œsๆฬIo๛ท[ตศพfฮk`3jž*e็"๔*z=žAZuฺtลห%n~ถ๔“ำมWœ˜.๘ฮ;zฌใKฦe'Hฺเ1ค<‚ฎ…6]e1้cญษuvaจาO.๛\พ9m:ไกV$˜>๛ฑtษษฆƒ—ž–nผ๑ฦ ฏใดำNOE@ฃ๔#ำ<้T๔๗-o~sZถ~ร˜ฮ้[ณXTฒ<๐บ:N‚aด'4ž:็&”๓w†šGฒ.YDฎLอ‹ใพDdฒ€”+ฎฃฤซpQญ๋๔ูฃ]Y!oๆReTฆำ6c@…แฏ#ฤ$สc%Y*%XVโiาขŒ*ˆํ 9ธฎ1R1ฃBfศE้R; P"ฌ๘‚ห:ร@%vE(๐ฒ@ฬพ”ผYผใ-@Itผ๔๎ฤ˜ฮลลิu9์ซฯ&๑rญเบ€_„t้2^‹ท[G8)iN<€ศด4l.:Šr^๋ฅ+46์฿๋๐๖ดtอŠt๕ีW็މV๏พ๛๎๔/๒/๙3#_”+ฏผ2{๎น-ŽO<๑D:โรำI๗Œyษ.พงJY๎C6ŠฒY. ๑ƒiๅนkาฦKUวญ?>ญ>๏่tฮŸผk)L๕๕cX:Fty๑k ‹ ?บ_ด {ว‹สJ>‚ ก*(ฃขN @%ทก๗ใ'ๅฌ•ฟ–[—^ป๏๚ดไWVๆ“hW†X๊ฐ๏๙๙8w‚tฤ]๘[ไiเ/lฮ๛‹.!ฌ>ช/Œอภยวฯ9>“AqXqh”ˆ>นe\ผ์Xi({}พoค๘ฅOไ2ว—ozGฯ๓๒ฐฯ0S–X?ฬฐีง๔#P™ŽŒ•ีซ๗Qb’@E†o_/๏^๗฿}ึJิuด9ศ6 SGu”-iOบยJ›จทMpKงO`Ž.Mฅž. โม—`0ฌ จ)1cSชS—๊ึ7๘ผ฿]^TRPYๅษอOฆO[ื๛ต~kถ…ŸฟtๆปฮL7ปำE[.็ฆ๗Rฌ๐๐œQsๆg{|2 d<โ ๒กc๏='ฝ๎oฯว‡ใ‰?NœOใ๓?ะyส2Ÿpโช7dอ0=๔Pฺ๎ขtมฏผ!-›ป*OkึuGqtZนrๅ„ืปฎึฎ=&ํ=gNšฤ1c![?ึ1&ะ๘ิธ‡ฒ%‚เไ่ฉ2)€‹^ใ`h฿๑9ฐ๒O+ฐ๔ณ+-หsถว|kคgั๑s๏าœ๎:…ฅ$๚๎R;]ท *ต๋g‡`ฅฉฤาึพŒKำ฿ธฟvmYnสาt-M•^™ด)nUแคK้'‚ŠCEืŒJX)yotŸฬA…s?˜* K๓VH‡ฺ"M†‡Mื๗Dฝ วะณ/n1[คVผฆ 8จx–&Nฬเฌ\ฃปำถm&|V.๛๒ ้เ ‡ง…ซฅ%+–๔ยฝำฉ๏฿ึ\}lz๕^ฏNg|์ข ๚%J_๑ฝใฎฃ๑ฝหdT๔ฺ˜Cƒ uูบรำณฯ>›็อ›7ง%K–คcn\—ฎธษ ธh9ำ๓ฉพzบ-ฅ’+‡tูฉ๕ํc#4Nแด[ทไPปy6๋‹2+8™c๕ƒcqฤ{า%๛฿žๆ๏ณบไโiี๒ีi้•้ฬ_”ฮ๋๒<็‰ฮiYVpL:๘ใ‹>9ส P*šปโะ|zไƒsh๎@;’ป|z๐กŒJฮ’0๛Jพ?/e=hWVFd‚จ๖“[&Žข่ mณใ๓–-ใร๛e€1๔์ธkฉภ'fX่าฑGร‚ณญOz?ธฮณกL\จh`ก‚ู?ำy›1 bZฅฎฑ›}Tึ๔OO*ลฌ•าbธ!ฆpM&q๚[.‰Dผ=ฃ๓MFฃ๘งtqด๕L mI“า;’Q‰,1;าิบ5หา+ฑ,„จ7ฆ s;๐มผ#ด,แ9จะฺญ {ส๕2 \คƒˆ็uแ2ู`มA"–†xฎ4G%–5Jูฯึ8ฐxYˆ.ืwงห~็๊tัg_7๘฿฿๐go+v„๙{‡วi_บฉ€จฌU๙“ฬ™o>”เทผๅ-้๘“NH ZœN๛ฬuƒlว'vTE gRxฺมmJฬฆxธšB€’S3&ขจจ๔s๚อใ!•Kธ#วฦyoฮSฒu~ร‚๋ำั{œN{cบxฏ7ฅ๓_†ดjฮkำ[n๋D6]zYZณ์โดlแก•Š$d^ธhI:๚ทฆC/บ5Ou~ํ“c†+jษพBฝ`„ม„qFไถ๘}•A ฉวศื1b`๓ D”}XแW๕ ซโB๖็ +1Z^ำฐ`BงLํ๗ Ÿ๔พเ‡Ÿ[ฆ++๔ฝŠ3ํLุf จจฃฤn•฿๊Ÿ~ฟ฿c@ลk‡น;˜@๓~})xยืจƒัiดํo‚”˜•‰>MญขM™’ด4eS๘5ปu†eMJPั*M"รภลo๏ BFaฌฮv๎1ˆ8จp_;Fว…] ฆSย3!%P‰๓T€•*ฑิ+M ๗M๏^gPผvณWฅ‹^ผ6ฝ๑wn—aquํห๋~๘๖tC็็๖jมษ1›oHงกดxai๙๑ฅ?ิ[ิฟ;^*…yU„ๆC๑๚t์่qัฌฒ%1ฃคjK–.%Oัพg<ฃ’aๅถ-O•}2œ๔bำโปs.›–“.ถดbมกiฮ๛คณฯณ$๗w_:๓์ iŸ}ๆฅณ{K:๛ฐ;าขE‹ฒo ย็•ซJ‡ญป2gnึ฿ต5‡„ฝƒW›วฤดyดรGฦ.ๆ๑&ˆ๕นF>iœมŒV|R?ฒ)ŸถฎกOXy๖้จw*/Œ…28*?qฬ™ทลP๏}~๘l๊ณJใฺ”™ฒอPyfหศQK?ปhkห\8ˆ4วŽ@Iุhฐ–‚L ฯภ”@H๑๖gŸˆณmF\mำินแzŽ6ร6จธpถ+จดู๘—ฺคัจ๙4ะžธ[ฏฯ?๒ฬ  ‚Xืว ๐๚ัด๘ศoแXnด%PqH‰ K?MฐKC]ฒ*ล๗๏ศqร฿–.๚๕M้๘ปNNkฎY;กูEล:๏,K\‘Ž|ร ้ย็ฏJื๘ฎไึไ•8|๑เถ+ฮ::(ค=›ใ5;ลใๆ% ล_Ÿ๖UวBว3บึบ&ฅ”M!K PQVEฺeVฒeฝส?ทoุืgสฒ{วภคwšฃ(9V=ฮ8๐iษโeนs๓อทฆ๙๓ฆƒŽ:7sิ5้ฌ‹Vู‹•วnH7฿rk•๋ฎSZq๔ู„ไnฌqz~@DsจrฯUช2t0๚‰๑ษโ>qB๖ค}พ‰ญบ~&ŠEึย˜naP๚ฬ3E–์๚ซก—ž˜ ๒0 V๘_๊3ฃฯ~L๐ฝจ๏ั™ถอPyzหศฑ›3*ol‹Y*%Xั!B๐Aค Ld2คถ8ฎ“;ข่ัy‰ผt;nซห๏๘ธ”‚|ˆb“;ฎ[ใป=พข)Kััไญ้tผ}WPi2Š%›&K ำ t์0™ี็9คฤึfฎงค˜ธLฬฎ๘0F฿Nต%M‰น‡—ฺDถฅRฑ์ฬราโ“Vงใu^:ใื฿˜ฎ๘=€%‚สฟ๗ึt์mงค}ๆ๏;<ฏฝจํบศt_ฒD็]้ศป7ค–0แ~๓–.HW~๛Žมsฎy๋)c‹[OI็ํŽมพ๒:}pก—z€9/ลš‹—y}”ŽTnˆึ๚ฎK๑์  "…บ}L(c5ฒŠJ@9ำE••๗…@ฅwู†ฝฎJ๛อ/Oรw฿y้ฬs?;†(Ns Žฟ๖ƒi฿y๓ำืพ๖ตt๖9็ฅรฮ~S:[']ูˆฎ?ๅ[ใ2จ|l๙Xภ|^wEvmส dd&put…•ฯ<3a]n›~qฎ_b[œ€ฅ[ม0‘ฯ–~Œ*3q›  2gี๊ขhzX์fPyฑ%^˜ี โฅRถค(@ˆjขˆท2๒ฟิN‚L‰งˆ ฌ4uMก^ จh! }l)n*ณด•}J ƒํ5ณ7ร2$ฑ„k#ˆฤ”Z˜c9)v %@GHqP๑,Š_ๆญะ1“ยํ={R‚U<@วJฬฆ”†ฝลฬ‰Ÿr๙%฿0ญผ๐ศํ€ร53)ซN?t\ฐ9ฬ”lฏ9{ฅ๎;=]๗ว7N๛$ด7ฏ]๖ƒ๛ำŸป1ญ}วy้ต—ฌM๛8/]๚•›ฯ{ีฟป7G ฉืฺ๊^ฃƒw45AŠภsณnฃ๎“ฆŒJึJOมขฌฎŸใ (T*๙ฒ;ฦฤตAญCJ/.yอ้๘ฝฮJG~tzแ…าข%e๛}…l๗5ิ`‘aQ'^›–ญX™yไ‘tภชCาq ศH”ต๏๏Ÿ๖su ›๒๘xๆฤๆมฌ{e„’N’pฝ€fยu~๛฿0H!รขมฯ==ศฌศข_กcŽNEกฯงพ๏ศ&ฯิmฦ€สๆญ#G-์J,๓ pมL ตฤ้4ย 5Tลaฅ”ฝT๘ี0 TR-€M‰RถขญไำคQp“ด ัฅKน)พลˆS‚&@@$ผ\ใ๚•}ฯฆD‹ุ ไภโY–˜M)e\ธ\๛Ikฎ jiำuHqX‰@ฑMน+ฎU‘dำ7๏H—}๓m้ส?ฝkB้วฏkoX—VŸvh:ฮSำ๙ฯฝ!]๛ฃทmงS‰พ(œ,ฑไไฯๅ8kผืแ&~;”{‡ณe›1 ๒๘ึ‘c’@ๅงแt~/พ=+Aล3QƒR*๗@ฺ@ลK>dSR€!žรอแ๎ปJ>-T†จ…ึ;IJบŽธh6้UJY•hdV๒D‰]9qแvำ9•าŒŸRI'BK›^ฅd$็“Ÿ บ hOๆห1f_b4Uจ€Rฦ‰}าขN ‡_๚ฅฒเRjSฆใ v,“xf"@จ`ๆๆ0J3…Ÿ>MI‰ฒCD„*฿Oฟฏ;๚–Zฒ]ำB c9๗OqX๑Œ >+ZT๚‚VVEC 1~S'`EA๋๒™ื=“กใา…oK—.ธ=น๏๚t๖™็ค?๓?Ooyห[ำชฅวf@ษั๏๒ัy=ฆ[ก–g,๛นL™œ5ฟ6ถ”€Nน์๓kใเ"t ๔#[ ‚ศ'Cgะใป‚2|<Z’Mƒ’}Wฌ เไน~๖Dšk™A๔า“mŠŽทŽ1๏Y}ฮfำ6#@eๅ๊ ญ์]c’@ๅ฿๗Oข+{ฑO/๓ฌฒถeS‡„ณ๑2E R”h็eœRK2`โ]FT"ฌ(3ปo\ŸมโJ)ขiAm›คfืิ:/sP้b•_‚ŸRฃ Tข&>†`%ถ'{WOŒX๊แุ;ฎ1| ฃŽ+Rั6 Z•’˜6žiกŽ แ‹~ฬฐx‰ฎ)รAข๔˜~™C ๗‹% M~ป๘ุ~_๏r๒.จX๓Iฝบi]—โ็nlโ๐“๙ :ย๛ฦB]@ูRใ๓ิ™ฃฌˆBƒ ๅฃrะGๆฉว_โำน าน'ผkPzกฉห ้^๔๘ ์vษแ‚ซะ๕๙๙12 •ฒˆ๖ใใ้yJ6<0ถ๔!ล2*Œเž"g&}นลพวeฏอ๚์D•ฌIya๔ W\Sฯๅlf จ<ถuไ˜$P๙ต^ะ‹kz๑ฟ๖b[/>6+AAซƒJ)›ข4u๖I!ำขl šืก8คD›(>1ฏ TJสn!๏žn uฑ\2,๕฿คYๆ0[ˆ6กo“J)ณ_C›p€s|Rขฉ[์* gZJพ0ฅlUฬ๖่2-ฆ>I}ЉgS๔ฌd€ ขJ<Y–ฅ%Hq€ˆ%™.ฐR๔ข฿)uํ๘๓๚lคฆvmއ๎รใ๋vฑ์ดŠ2/:฿‡ดสXR$f LtชPiFะ ’ฑ›8w๚,_ฐ:OOžฟ`t1o+๙จ(›ข ฬ‚n]–ณ7๗m@ŠžXส๒qXpะ \ƒF„rNvชผ}หr†/๋ &)cทyผ ลmš”พoส`๎nน_~l0ฐPก๗4Ÿ‡ูธอP๙๘ึ‘c’@ๅบ^,0h๙ƒYgกaEYaฺ`…๒Nิฆ๐7 ณ)nน๏ๆmMนดเลึ้6Pม–Eิส8ะ‹ลื๕Z`}ก/-ฆ]ํ๗›๎[2‹*Mพ+%P‰ฅฌ6H)ตCsœb']@.ฆ-•†š\zKe5ฆ๊:-ธพศFoG๖nE,qฐˆ๛ภฟ(P- +ŒฐK=QฃB งษ<.Š๏W[้ˆ#•Rkq่zืSำฐF๎ร{@ฯฅc‹ ึA…r"ํ]BZX•%จะaฃฎ•dฒ่xyืค(T`,:f]Zvศq้๘3๎ฬ]A\DะขeQ[ฒbร•O™บ]๗ฬXTJBฏ"ญJŽฦžGฯGฆC%— sxถŽ;ศๆฬ๓cฦk‚๎C Hr |W&ธึฺฬ21ƒ็ฏkสIŠ\‹าŸ๕ค`ข'ฯฟฯึmฆ€Š ฑปฦ$kTฮ้ลzqU/งY * † ข)ฃโYt(1›BูวKJMZ”8QlดมGH๋A€Jษ:ฃ7ฯฆ๘œ n‹.C—๛0ฟhะึ–e5†ML.•Ÿ†JIsำTข~กฌƒH[ๆค$ฆ-•คโ~•ด?jŽฝ$Db๖D‹ฐ๛š๐˜œลNwศd?ขธถi^Oฬค4iUšJ7%!ฎ1ฬฎตแ~๎‘โ.ด๑˜ตA W๚[ว•rž*ฎMaบฒƒŠBC๕ดgX๙ตฑล=gQ๚:Yิ๚่˜ฐVต*็œ{๙ๆR)hTิ๕ฃ ^”UQไrOX)z|ภ(›ฮ=0น๋ง?9ั๊ ตุ๔$9ณ!`ภฦพwูภ๖ษ๑b๎gW<ีฏณH Rฒฦฎf๔:t=‚4)๒ฅAฮv6oTvT็้ฝธษ/›• B™Fภ!ศpqญw3+ฎKกDฉง+ค”ส7ฅ)สz23ฑdไท:ผ๔ใ…#L๛Tn‡ิณ%xhƒ–ถ.ก.๐ะค“‰` I< Šโc6iPšZป€K%)?Vd|?ษฌx้ยญใuužี(eน๒|r3๛Q:nร4Jž )AJ›ภึก#fVข>&––:sฦK>ฅRวL๗ัใz?เฦcFบdUŽ๊ƒ ผt\`sFโcc๐p๖ีO็NeำQ๏อYต2“%ัu‚e\”I‘น›Bฎถฒศืm%œUะํ#0B’3*๏์ฤŸไณO:rๆjฯš ตภ ๅŠฒBzlฤธdWrะM๔ั‰ูฟห?สๆ ฺ_z2ไ+pฅณMใ=a›1 ๒ัญ#ว$ส๖โ๙^ฌฏU™ำ‹8kAEZ€ƒRP T€ณ*n›๏Pง—๔(.Šํ*ภJำ0Bฒ#žqภ2…`ฐม:hvผ$'sฒTฺ&z‡Qษ•ถ ฺผbบยJSงOฉuบMคS‰‹3ว‰…ฅ๎^-ชz<ฤพ>ไฐ4โ ิ๕ๅnยฅฉซ4นคE‰eฃ.^?nQฟโs}ผใm1€=@Žฮ/@…ว,K[VEกฟ•UQ†‚‰ฤ*กYZฦžuํ3T€ ิๅ*ํ=Qงnง, C9๎”A๔Mt๊™Ž\ฺ๙M3T๛อง' $ซข}โNถnง๐ั๚ฬ,กZ—ัc['ด(“นมuVก ๏ห=e› ฒb๕„sื˜$P™Ÿ๏sd๏ฝุ8ซA%vํ*พ์ ‚Ÿฟ_Sษว5)ฅ,ŠG*%ำ7B๗วเฑ๐F} ™€…๐ิะ๖LฆE_*Mฟบปtu„Aฅdฏ?ฬ$ฎM\[jn2—ๆ‘2 JJYขaๅห'+Sะ๕”กศ”Ž_ฉ\;พขเ0hŒ๛๊ฅŸ.€า–U‰ญฯ~œผไใ๐ๆ0Rš{ค ฤฅว๑ท{๊๘ฑเ8้>่VtŠจึaE็sP_šณ[ว-ๅ๑5‹nห~(KขYA‰ด+สœเn›ต(o฿:ฐ้พ=bข~›r”๓1,QY“น,๛eหxฆ@!ถƒ”Œ—ƒ2”|d{Hdd.—Ÿ๚ว37ฐจุ“ถ *ีBไ-บสา คภค (๑ฎเลห>%๑์(Rด*๎`T่๊๑vd-lดเF8๑ห( ฑOดL๋9๔ุ^แื(‹b[ kื2Q)bV Ktษช4อjณ้๏’-้2๘ฑ*ัkฤต%,๚›๎ญ8[ฉ$fๆuฒ๘ขหhหdEมk‰m˜ปjU<ณโำcื—gฺฒ((<† ฬ)ๅ‘9เx“Uิy m)*ภข๋ิฝ’ตŸ}z‚ ซฒ Y`บyL่Šƒฌ DบeSไ‘ข๎…E ขVcฒ'‚‘ ๔K@=-ยฟษsใSŠณE๚„ฒ”o€ @:“OAื8‰ า_ฐ๐bกฃHฯฏ™B฿-zั9ทงm3T:r {mุ‹๎ลฒห>า‹Wz๑“~\^A%l฿๚/ks*^์syJ ‚ฟ YoE.eS\8๋sy†ม ฯช”ด.<.V๐ ส,lสD@!"คฤฬŽ›ั๑๗๊นบdIฺ,๛› ฅ-›า*ฺ@bค4eMฺจํ๕7ต๖ฦล=š๊ฑ.๔ลษ6 žฃึ%e$#VIทซล,‰ฯสม๗คฉ[ศ๏;šสF~9™ ฆCaŸะก8ค๙็ม;แ๘ผp™ œ–=&ญสภ ฌ๐๗˜ฟสc€๐์ำูฦ็SœmeVeU่Q๘AZ+่T˜-?nฟ›&gOž3RSg ึ๔ฺ'ดuไุ P๙๙ฃ๔KC +จ4่T}V โู”˜I๑ฒฯ๚แ:J“๋ฌร@—ฌ ใ R‚”8 ๎X‰คฤrฯ!Pa?ผcษกชษไ.ฅ ฟbัN ๋š67ง `4อ%vฟญRวฮณJ1[1J๗K•j?(Gธ้œž฿MัโkD›ไ 2๛!วณq(`฿8“‡h+c•`ฅtš Eืัžน]ฬ˜ฤ๖๑R%zใธม_ฬฆ .ว-Zื๋๙c๙' le^FฉHภข ‹z-:/ ะฉ๎“5-/lžhศ๖ฉ-ƒlส ฌ๒แq1+ทห~(ส๔ณ)‚ Kิใ8่๊๙h ึๅฺ`'ฺW„ฎ๚[ทัmsfๅ฿Žgirะๆo;ฮฃgŸ\ฏ}ะ๓กกb๐gfจ,_ฝธบK๔ใ/-๎๊*หz๑๊^ผชŸฌTPi่!ฃขrฮ0H๑ฎ —i‘Ž Ršv์ าฆWมฅึAe๑๘dVbฤLŠ?ฏ/ๆา€DเรAE๗‹รKO:-ํ+GZšZ}G…•a%ŸaPไภแY _่(ญQBภ†_—ลRLIQrmmjำ[h*\ฏwฃƒฎgถVJณvT|_Kรqมฏญ*ร ญิ๑ร๕ฬ๒แ๙(๏ธ$Šศ›@ฅไ8์c(‘กิ๛–ฯž฿ํ๔VมŠฮซSH ฟb*‚‰^ hY2|ฦ3H.๔ษZไ2%%สKสค๔ภCI6วWž›L —+่b"่pโ12ะ( “!ๅ‰qWY€ๅsc๛ ัXษ้ษ r|_xบn3 T>ฐuไุ‘ŒJื๋๖xPมOลg‡Š—*฿/kr่พภ„๛งŒ*qุ ฟๆb‹r—NขR๛ฑฝ9คฐฯ@Hœไ็๙sบฃoผ]์RŠ1~<(WiA‰เะX†n ฤvj6ืท;ีฦQ—‘]‰ะหโฌš˜‰p๖รลŸ,จtงx-ทuใ:@ฅิ}ณ)nฑ? Rผ์S‚ฎ&ญNI“S<žs;ฒ7@คw๋DCพ&?ฯ4-(ฑlJvE๏Sํ B”``ก4ค๛P† rFๅ๙งฦห@Z๘ฅ๙ิXyEบ2'9{มผœ~ปฏNษžไ๒อ๏eQศŽ0™ฬ กd}ฺwฮ๋:6—~z‘กคฟ9๛ำƒyฬ i๔่ว ?ึ|oิm†‚Jฟ3m”ุมาฯ ;ฮ^|น‚JรๆfnŠaจธ V 3*n๘ึคU‰ฑM~*>ฐ*ฅลฟ ˆR|.Ÿ์ํำnv็vร %๎`ฐ5(eYบ:ฯv•&'\œ}ฝ<ภB่ฅ"ท๒ื)0กS/w๘‚_•ถL„๖‰r_`๙›N/oบ~"จ”ส>+Q—โ€R*EXi—๔5ฅA…8ะฒpฅlJ)JBฺ8‰ใ  ธพห฿ซ๚ฟว๒รJษฮกE0ดไาะ๏<>ต}~ฌ๓&—u๚ะpt_;"0d>๚pBๆN ;เ„ฆอฯญวฆdค๛ RธŸry๚ซ<(๔็ซr๘ฬ9Sิmๆ‚ส หl„่ะ๕๓ฅดใํล/{qG/~งืจ|รมฅ‚Jจธ๓l—๐๗ๅ—Y‰+ฅE:Zฺ—ฒ+žูfว_๒cq0АEฒ€ —yธ>EA๖ษง<k%Hk๛1 dๅ 0)๛8€0vฺ์ˆ–ฅ R๘ๅ]L๏ข์ขื}Jพ"ฅ้ฟ.d-M.ๆ๙Tุ?ˆFแ(—0%P‰eX*Aส0Pi๒ะijMoสBล์KiคB N8^+Mศ๖ฬJ“ฎK0ญวbFMฐ4eY\t‹Mฟ„ŒKฮ”|ษœ ส\ กlค๐2ทs@ั฿€)๏๘~่rA๎ว๒ัฑๆฝ#MŽืฯ~ฑ<‡พ/)ล^><Ÿb๗PทY*๏:rTรทIbG@ลง+ณ #.๕y?,ึm tษหฑช?fจดAJ)“โSš#จxVลฏw} ฎผQ<[šoTจ(%ซํb<ืี"ฟMว!๏8๋iสไธฝทkฟ))ฑp7ม‰ท๛บฐ4š฿9Œ4อ$jบNงร@ฅ4Uนฉห'fƒบ‚J){R* นูœรท^7AŠ›R’c”๏ฃaQ*Zผ๕ฺ้Lผ+จ,žY!๛40‡ฬpB&ฑ+p3&\Oฆ„}ัcKŒ,ฐาฉ.€plu™Ž):1…>ซส/VไTศ (›"@๙มหGๆ๘ำ—ช$2@eู๊&?jTP™DX๑:+1 Vผ๛Gกดgฃ๚๏ะะ+ใ-หmfrmฐB„L‡g@บ‚Š—sq™>ทqึ‘—ลl ๐|Aก”โmบn฿uุaษ*3)>)ตป]=งบŸZื฿>ฝธ){Aฤ ฯฎ ำ` Ÿ คˆ“6H‰ทi๒\) ,eQ‰ถIฟ๏m๐ ภภแ0Rิ:,^t๖E๐แร c Q‰—้~1ภ…2QgR*็xP$fี?ํ›ŽŸŽQ)0ษำ๑eโ:ž1*ใล?šC๐!8QP๙›ŸฏLFRทู*ดรT& TJaฅษSAญ€E†่ไZj้mหจธ}ึ–@ฅฉm9 eฝฬ!%S์*zlf$EM‹หGžm)eUb+.ๅฺ™iคEbุ ก6ณ6ฯค๐K2๊ผิT๒Fม๏ฤŸƒEาฝOvW๊ฐqใ4ฯ”z}.อ0`‰ -๗ฒ*^๊i‚•&ทุอT๊๑หšๆ ›ษlา…ฃ(„g๏<"๘c้}ฉc l8Œฤ ๚˜q‰Cลปv”rz.F`ขืหแ<%.๖ีมN๛ญ=ทีw˜J8|฿ D^๎A‰b/WคWzก๓สฌx6EQทY*o9*จL1จฤROผ }‹~YจF๋Z‹|œz<ฌ;t|๖Ni2ณgM<"pD8‰:%AmI ‹ล?:^x|f&)บ!ž7ก‹ฟvc›ญ‚Eลgืฤู7M™•6‹ฯR8คxฅไ0[‚ ส?,tฺถ๎&ปNล-๐ใx/ดeT๒จS๑ ลm๐T•m าึ’6ธ0‚เฐ,Jฤห7Mแคฉ}น-t't+ไAลฏCณB ‡ะuาp;eC๔9>˜-๚w…>ทบ3Ÿ‚}o้T!เะฌฟ๙Šมˆ2(Š๚สช๔ฯฏฌฬ—+b6ฅnณT4Sjิจ 2 J?M—s?•|*๚ะ.,๎@‹KปJK"่…บž้pQฌ๖!z”@ฅิ๙รm@Eว‚ษำM c๑มzœRษวีบ!$-ม๎]โ‚ัฌ”J@%Pก˜เฑษต- เ๓ybVล;Iขg.ณ%‹zฺp#  pFSษ'LiฯฐฒTฉำ'>ถ(w•&Xiาฅ ะr,ะฃtื@•ฒ(%P‰ๅ ฒ:t*“ขŒY ลXR:/ภ‘oŒฎใ=B;S‡1>ว}'ก-Q่{‰๓สˆไW`Dกหt๚฿^Yใๅ |*PQP๚ุ(๊6หAๅ][GŽ *“ดตeTฺ2-‚ีr”ัฒฌ…ู…จ”VJY•ฎ าVr€๐A‚)€Jtœู”*žQq}‰nฃืษะFw€ใrŽ3‘๐bqH๑Eย!%WŒฟšฝ4คห\๔: Vb้ง)›าfTๆ‹ชƒŠ›ฤนEป`…ถk๏*ขำ$HuHtศ$•ส>mๅ •8๘ฏ+Tฺ:„š\wปZ่7JWm๔ด)AJ[ฦ„g๎CTาธDƒ8†r[OวYวG "pกำF๐‚ภUกฟ๕:aใ0์9hžศุ)๔ูิg^฿?dG่JdL•t!’๓Ÿฮมy ฐฒํ—ใภฃŒJf9จ,ํส;ทŽTฆTJ>ฅ(›B›žพ(€@ลอะขVฅษWลฝU"ฌYกฌใpแe บ„b6ฅT๊๑hื—*z\ ฝๆ)้‘eขC/oE๖E$–{—2แ…ฯช”Vš@ฐ`!็9\—eใ|•+,\˜๎•บšุฃiั๖’ ]J%i"sฌt~Xงฯ0`i•ถหKว1J/9ดp>:๙บV(ทแรgฑค%๓Z%Pq๋}ืˆyทš๖QขWŽฟSฏ›a“บ๛แ๛ฯ7„Ÿฮ}ฎ/Q6ไ•>”&€๑ำก9…—}๔u3@e฿จ0๘r”จ 2 2 Rผไฃด* ฌ ช ~๙t•h Wj3vศˆz๗C‰%Ÿจx9ษหF~_\sๅฅโ3“้—Ž:Vาชp_โ๗:ฟ;ฟRาq8qฯค่+‚vลม!žwe_ไTJพฅฌGlY~Hัป‡ฟ๖iภDำCส,ƒR{rจ๐ฺุO>ํ/ฏ7ห0P๑ถjoรnสช๐zขศ6Š•4fญJ“ฏใ&7y๓l๐กฯ–Fl๏๗[Sf…ม†^†ฅหc๋ณฅวแว€฿–ฯ.ฯษ{ฯ"}ฟ่>๚,๑รHŸ1•sผ#H๑๒gN“็Ÿห็uชpXภTuร@ๅกญ#G•IฺฺผR†AЇ`E ณ›กi1ื‹๚QœZKฅ7„‹ธQฏโ7Rโb7Qจp;@E— สhำV๘1า฿ภ?ฏ๑Gฝ@ด~ga%ขนZ\†Jิง8PD0โvJ]-*žQqP‰ร"DMฏ[Sส่+^ssท6PกE™ŒŒk~๐ZqญLSV%ห(ฐแcุ|$-๛๔ุ๋ aฅ4๓(šโ๑qฯ"œ–“€%t]Y)s;๕|>?r”ก๕niEขvDู… …า ๐Aึ„Nb)‚›บUPฉ 2ƒa…9?ธ2z‚ฅโ_4,๘ฑจษ๒พฉ๛ววะ{ rฬv4ูแว์Iษ๓$ถวฌ ฅ]ๆฬ๏๘[วIวD—a”ๅญปt๖”œTฝ%N<ๆผ #จ4u๛ฤฬ‡ƒนเ4BJษ“"‚Jtฌข5ป/r X๐ื6ฅไิŠFฅtJ‚Z=&ฏŒŠ‹~™ตไฯีตAฅญTา D•6ฑ'`GXqถCh,…กgQ่sๆฐขS์ExIXฺ๋>ปq่'ขzมยพ?0XS†D ข2ฬฯ๚%2% —+"˜4AI ฯจิmฯ•ฑuไจ 2 าๆฉจ(K@›žgShSV0ไPูบ_-QOา”YiำF3ท’นšƒJ์๊‰ท๓หขkmS๙๓8^e/๙(๋คcขd~OœD์Y๏บ‰าฬุแqšZ•cษ‡…:v‘M‰™ŒุŽT€ˆ’GGํ๎ =–ทCณ/zจDX)eจ€=ถnOฆwR7ƒkสชด•ขฆฆญจิ๖]{›rฬฐ•@%ยZษƒล‘:ัฐ‘zฉT๒YXธฝghr€์(™H}ทN^6๘@g‚ึณ%6ฺ`ไ_ท)D๖pPyp๋ศQAeŠ2*MฮดGK%@ล;€ด`s_ ฅษๅ5Zห7MXn3v+HL9Gฏ•Mฦpรใแ•ยq y6EวAร"ุfณ(] ล็๋t`๏”า}ฒ)ั;ฤKTโb่0'2ป˜7‚Šk7่ะ๑ม|ฑ=T<ฃเ0<% ผlt?ฝŽR7P Xโ1)AH[งO,ค)]ฌ๓ผCoะธพ>SxQถฅ๔5VMโZ‡ฒœ็s œ่๕\าš(;‚ษฺ6เฤฐ'‚@ฤ„ห=JฐRท *Tุ:rTP™BP‰Bฆ'ฺ) ูZtฆ —สA1›AฅQƒ2์>ัU7fS\„หๅฆI๋ธ0จQ็*z.Jqะ_ŒคดM?Ž โ(mฒ žMq็VŸ|\‚ฯชP–(AJษ๊ก%šู5e”ผ4<่ycๆฆคU‰s‹ศฆฤ๒—,ดศ๊rึFX)™ภ5•w–aํษm@žM)มJ<ฮ๖Jบ–าte มŸ ๏˜ใs๋Y•ถ–e?ฆLaึ็ุฟS”=มฒz4&ˆ`cถฤ$J[4eZ๊VA%ƒส’ี้ฤ๛ทŽTฆNฟ-ภ€Gค(Pใ™œ\=;m๐ใ0มจO)มJ @b๋rคxสป Rฮ๋ื&mฺdP˜{คื/เำ>{gƒŠม— คหไ่Iโfmmฅ๏xiาฆดeTJ™•’ืG“ธu˜6ฺ๗วA‚xฉxืRจD‘ฐ jใ‚ส˜‚’sm„”&ใถถnž8คฑษฮหj^๒‰ฐQ*๋ดeไš`ล!R แำห๚c๗š—€š†r|ัฌ){‚8–,Š ล†wFบยI” *EPy๛ึ‘ฃ‚สƒ >”5XŒKๅLเ8๏๓€bfลฉykd,5‰i‡มJค๘ๅqFOำ< nGูPAโ "ธ˜ง7-(]ฆ ณ่Œ*„›ฦ9”„ดMRท{oƒ”a'ฅYCภŠ jYฤc จ$คe ฦโข๊ยฺ&‹R'OำฌžฐDp‰ญศฑน :JSz?•@%–0ฮgo๑๕ฬ$ๅœ(’๖เ1}.่ิL”9iำ‘Œ &ฅP…” *]@e}[GŽ *S*Q8!ฅญิม„tฎgVZศฑ–wˆๅ™X jสฌดeXบDS&%fTุO์๒]Ÿย๋ืkิx\}™ำแใ: w’ํ]AeXท—~ขั›—*Jv๑%PaAŽ๖๖%i*Y”vx๒ฌ ‹ปƒQิมDข๋ว[ฉษ Ÿ๐‘ ๔์‡‡ถษศQ$[šคฬ}ขน›g|ขHน >J—ล๋#ฬDำ87#Ffะ ,คห8^+-dR๔โŠ>38ฝาeณ#™’Qณ'R*จt•{ทŽT&ys@กCป`WH‰p!ลKAz.e์้ อ=Q"ฐxฆฅ*Mฐาt;wธ๕Y@%P!ƒค($&›โ†wฬQ่ต่ ›ฒƒส0ูaโ าคQ‰‹^ษ์,–Fบ€J[m:„tฌ็ๅฑ๘zyผUu/ร@1ฑ/tงธ[0ว1oI๋ญฮพฅAŽM าmcโ๛กt]ผ]SษศงgGแ3eK>s:>X๐,T)‹ข`cฏฯ„ฒ'ศ๎HฆคJ•ษ•๕๗l9*จLม&ˆN€ @†‰gKโ“็ฐ >iธ 4v&Ha{ fจ่:0คMโxFฯืT๖iหz”ฆ 7ฅษŒฤาOยG้งT2๒ึฺX๖(้2J™๗e๚oฉ{ลK@~ฬR~G@!-‹ชk-pซ๕.(‡”ฆL†ฟfŸ)ไ—ฎะฒ#€า%hŒ็KidU์ให<*[ยมษฐ.œ…“ )Tv)จ,๎ส[GŽ *Sฐ•@ƒŒ๛ฆDh)•~JPaลKAฅIวดDถ•}ฺ2,~ฝŠkQ๔…Z]๎ฆ S๒ก4ๆY%?>บพิ)#DHi๛= จbป|b้‡E}จ4 H‡ Gใkึ^๕ฅ๋ฃHิ฿ุIT๒งq ฺฟwหๅฐุใฅny(–v ๅ$$Rพ๒ถ๑ฎูดฎ‚_ŽkP8-รyK7~7ดุ๋๘่sKŸ aๅ D'ป2†yกT@ฉ ฒK@ๅฎญ#G•)ุJ™ ๔eคˆFo+ฏ R('ั)ใฅ •J€†ุง/7iWbธฯ €‚?„‡ƒ ื 40ฌใ6:Uฆ$ฺไ;ฌ้้u1”อณ>™8สn‚•a ‚ “‰้เ‰‹Ÿ—}อึA%Nv๏จ4 EKVj_Vโํb6 ‚J๔UV€4*nF็เผ้>๚Ÿก<T,้้๒rอ ™pu๊ญีš9ึฒT๊)]็`๊sต–์๕๓ฦ3'z๏ฟ๏ึู]PฒณQท *ฃฤจœt็ึ‘ฃ‚สl€Š/ภR๋7มJ TP<ศFเ?โ๗‘่ฮ‚ำh<ๅ๎–M%ž(”%#BET"ฬ8ภ(›ย๋T(้8่๘่๕i|มs๑lฉฐฃ ยยอ"ค.ถ"๓โ๗๙@ัcฤ๕)m าึอหฅ์Jณฎย’?KฉไZ/ƒQา( xRt?ฝ็๔ื{Tซ๗"†qภˆ ss$ OKƒZข๐:vˆq …Œyบsrœไํฮย๚ฉต๘oพ"วฮ”sv$CR!ฅ‚สn•ทm9*จL!ฌDqฌ.ค่KJงnก฿V๚)A  โ =A็ ูŒRxฆฅiฒ‹dc ‡2N)›R 4)žI!่†Vtf\ผ„ใS›ZฏฦฤฟKฆwq’CฐB้GูibHงปิณ ๎๋V๔]อฝbV"t(ก‹=KจธqYำŒ`ฅ Tโ๐ร&Xแต–ฒ*ฅ2ฯ0K~ืU4yฉ๘kuPVMฦs:fZุiWgŽMqฝ๗”ูsฯ‘X"ฃEภmžฆL ว1๗คc'(zfว๗ภœQฏyGEฒปN* TPู• rส[ถŒTฆPPมC`คP๚i3|‹ู”aฐ32:e ณรI,ใ‰Aน†pศ๐Tbx๖%ฮ<Š๛O๗Ž๖_ฟฆ๕ลฝMbV…์Eœ็า%ขFลA…< 7›@…ผMฃR•ฆrUำŒกaฐาี~ฟ TJุฌฐ˜{wŒ?พฎS&B๏Aฤูx้ดuด้=ฐ(‹แ%NKใž.ใ ™zL™Pq€ั๋ั๋าพโฺŒ \๏y9วŽš1™ @ฉRAe—ƒสํ[FŽ *SธE%aูืงDH<|‘GL๋๒"\l•$&1x~๖ญ)“ 8ค:%ƒŠ๖[_ภง๊TฆžIพmz„x]\เcซ)@[n]ฃยฏhฬอ†Š—~6ขq› 5มŠ฿.อ ;~โ,_เิ*หมc“เ8jังดจ/e/ ฦ6|`]ทั๛V๗gJณ—}Kฦ€%8๑j=ฎรKRด๗ใ$+H๑ฎ9สžˆใ๒@Q่ฒ๚สชมิโaz”ษ€’ )T*จTP™ฐEํๅบ~Jๅก˜MแK:‚Š‹w”„นบ^ทฅEุรอืbน&ยI|^(BIฬิ09ŽG Tด8*%”QA%๊Jv่ฑˆ๐ซ๕ภJจ”ฒ)@ svบดว•+ฑิfHV‚ฟ.–qผE;gผEA-Gฆล}|t*`ึS|/8ฤr^๏ESดฎG`Tข‰๗ฃ๛ธFหณ€d ใ-ฒ-ดุปLŸk ๅg6(p˜&ฅJfจœz–‘ฃ‚สnxœxเฅข/ถค4u” ล~Ÿ#34xฌธ^%FฬชD}IฬโDMJ Rx†26ฅ๖สะจ่ผ~)G๓0`ภAEเา"m โฅ”z+*พ ฃฌมmผท*›~๘เ ›ค0ำg#ฒ6บQ3+Tš &ŠrgฦŸฃใ2qfลcE็uฝ@ƒ6uผฯŽ=วฉ[‚ t+dRb๖วณ*G‘ฦs*ผ—๙lจ1รแฅ ืช์z•hW* 9ผธึ;}bสœลEท๓ ะ>อ!K ยuzn~ำ์ƒ๕€Z}#”ดมJจx)ฤ0ีถ ž!์ู… E——:“J฿6`i‚”ฆ. 6}Jฤ”ฺ›K ห? 1|E"าŽู8ย;ฮโœจ(flYม +ฺ‡8o*พื=็ฮ๛Š‚™V€ปwฐัน&P‘ฌโŸ_YูšIฉ€RทY *ทl9:€สนฝ8)€สSฝx_๛zฑน‚สn€C“มYดสๅŒ ƒus`…,ŽžOทY•RxKณN๕ๅ\zพXb1ัmP|ุ`,y9จะ!ลPBiะ‡ธ/GฬจXโw- ณ“D„:จธjiQ*๐Ž *๚›rUฉLำิ‘ิ+M^1Mพ*ฃt5๙ฐ๘1‹ฯ Qชส$.NUD=T„f:ƒšŒร`‰ฆ†ั่ะ๏๋ำป้’‹๏g‡ผ…ะจ-*~๚๓UYD+mสtศคิญ‚สdลผจœ~๓–‘ฃหk๋m‡P๙ป^ฌ่Ÿ_กฟ+จ์จDแ( 2l*r›ˆึห0ฅ๐ฌJฬฌx๋—@ล3.%ำทRฐ0๑w—R้หu*€Š๔ว๗C`)ภŒ๏™จeBฐŠ;ฒย็P!b๘๔^ˆฎษQ_ล฿ร“ษจ8ิ+x/,>+ซiึฐ"KŸr๙P]J…”บอZPน้™‘cAๅ ืท *;*V(ลVฬ8x(HS7N„ _{๎ฏก"zญ”ฒ+M]Bqก๐วYคฌx+5๎ดญ…o4 n๏% ฒฅt]`%.qnŒทพบ— ู-ึ์๛ฌ*ฑ๔ำRฺ|?ฺLํ†•บฺ@%fOX2†ˆ^&‚ ม™‹8จาgCกpงWบ‡ผุก…p`q!8๏ๅ8gสK@บ3!๎mค๗)6 2=ฅ์ŽžKฃ๗ฌJ>mู” (u›ํ rฦฯŒฝื๖z}wUP™ขฌŠ‹^Ztฃ]~Iาฅ}ุํ๕๕Eฌ/a/ณp›’ฦคษฑถษRฟฉ-™9.*ั๏ล3+@ ž๚[ฯ จเˆ๊†oQซข่šUiสC‰g•I™ชlJ*จLP9๓๚gFŽ•งƒ˜๖ฉ *;ฑ•„ฏMเ2j&ลMฑะทhมw ฅ&~9zถ"–—ผใก4(vE}@„ |&L𠅐๎ซลPัใjกขผ[”#ค๘ช”ZkGษฌฤึe๏Œqฃ1oOv1mืจ์ HiาฅดAสฐN<๗!ศ~)$๐qƒ7ฯ’0!น4`qHqฝJ PJภโZ ลณ"zฏ๛๛ฮuTัฯˆŸื3+jGฆำgชฒ)uซ 2m@ๅบgFŽ]?_๊ลถ^k/~ู‹;zฑจฺoOึ้TvTบ๘ ฤQ Rˆ-S๘ฦย_ฟMดJs" เkf]ปzšภคด฿< ‚ a+–่ร@…์E—rF›^%‚Jœ ฿”ํ‰]?t*บM[FeT`้ )%@‰ฯ๋Nฏ ฤ>ž์G จหศ„0บะมๅ8ิbาๆแ.ถฅาOS๖ฤ…ถฎ[!ฃใ Bx6ฤฝ}Jถผq/#๑—น[iเ`…”บํi rึตฯŒี๐mภJ[ซqwzUMœ‡)๏xเ†‡"โWโ&pร‚,Žฯ๊ "Jš`†ฌe*ށoSฆร(`Oฯ฿t9ฐtอฌ”wฯชD a@ฅd? ฐŒาล4,<หโ(ภwเ๘$aภฤณ,L๖ฬˆgHNฃฦo๋ฅ t๘)bZ40~y Rธ\๏็า๒&ฮป”บ>‡SัSท *ำT่ส5OTฆ!จด‰_cหq]ฏE^ม<‘ฟลŠ<_Xั)@R xั/B/็ดู์๓e> DJš”” ฎำ๋ึ"ค“ zฤด*œ'tฝ lt+m™ฯจ”@…ฎŸhŸ๏^*t&uัช” eWม‰ฮ b๖‚ย;qศˆ8x้ขMgJพ4qฎPฒ4žYaฟขๆ$Šพ9%ใ!ลAลห–ภJtMR๐โ3‚hWบ9ฯF๗ูšEฉž *gฟ๑้‘ฃ‚ส4•ฆn.ข๓‚‚P^๎รŠtนF#t9ู&7ฃUแ”็ื—/šื2“ล9‰ถ‘ร€ฅ0,ฎP UAxษเน8ญธ)b7P VฺJ&ฎ\*x9สลร฿˜ี3 จt”’ฅ4ปHลฏ”PJฅžจ'i`่แ-อ"ฎE๑็ร์ ๋{ษบ>ฤืขg ภโBZ`mUษt‘๗"ŸL์๔™ั็OQ3)uซ b r๕ำ#G•iฒ k). ‹ BwB`๒ณพ€Pq8ั฿~Yฬคx—!่"tDร6‡๎๋ๅืว๘œŸาฦ&P๑๋๔ุZLดHฐฐiมิยWึFHQvCก๓%Šwด™ภ5eTดGGฺ‹ฟPŽ ฟ๗ฮ| จธ•~‡ฺษœx้ลห,ฎ)๑๒‹ธ€@Y]ณ'1ƒ‚.'CŸtM๘ๅ’พ…๒Aัฯ‡๒คC‡ำt]•{œ๘ํฝDIHญฯ˜ƒI Tผ›Žหšf๘T@ฉž *๓{ rฮž9*จLcX)Jคเ|ฉpP!ณา*dZ•&H‰ภQบ8๑(AJฉฦ฿+mSาภ่8h‘แืทbŒเขีแ„๓28ซ–บ`ฺ๔)€ ‹ฟnฏวPะ*็้ปฐจธจถ VFั˜ธฮฤEญ 2ฎ?ธุ”l•g<\s‚ฑ]ษ{%๎3mอภ\l=ึใใม4ลa๎ัCF‘iฦ 5,อrH๑ŒJn๏ฆ„๑3*ŠW๚S‘'ซSทบอPูU:็ชงFŽ *ำTฺ„ต%H!pmŒPTb ]qH*DI็ยc๘cล,MฐฤLI—L‹ „ฑ7gj.‹0ภฐ ภ‰ แž&@Ž/๘:Oๆ †g(#_ B…จ("จ่~ภJ[จ+ค8ฤฮ๏ža‚ฑ/ฺ*ซธ–L‘ร™—ybูฌZ์‹‡kT๔๘ฺส8ฅ๗{,}F Rํฟ“Rนง+^๖แผO/งณ‡9>“•Eฉ[*จTP™Pf…฿(*Zผ๕…‰F%‚ ตsืญ8\ธ>…๋“ุI4,t{t0ด<—Z<›~ฉถŠ#f‚+ˆYl๐ไ u9jV" 0อ˜ฟษบ่>Zp…6ŠAฑำg‘ฆs‡,J„43‚ŸY[—Gุ้Tศ\ฤ เ-๋ษธxi'‚IษZ฿;’8พฯบŸ;๗๒8๚[ฯฅ็EไpพT6-้Rbษง Ndi่`+M8๗๙S:ฏ๖ใษ๒Iฉ[f*จlxS#G•iฎS)Eจเฺ*คhš‚ฬ Y–(ฐ โ‘ถฮ 2ภŒฯ(,m% aจPฦr!ฐ`ฐ#ป‚–B‹!% €ลAH๑Vศtะ>ฌEW +0ก(Mmฆไๅผ๏พ{;P‰pไB_ฯ;‚บ T€+/ตธ&ฤ|”y8vmู&•’ญฟ.Rศส๐ฬขฃ zp—ฒŒ1ซโ~?Tข`ึกลKEžE)‰iน\๏๑W๚>)๎•RฅnT  rๅS#G•*]&๋ ]‹5PจYA`๛rT"ฐ kcXJ\ย๓้๙UณWจ]S—37eX๙ง)ง็{ู ˆ6l…@FฟˆตุiTX™ปฃล฿ต#ใ็#ธธKฉtฤ]4+8‰FwZืัธ ู€eุไ+่?€wvฅใ>%‚† "’€Tส๊4มŠ—ฮJฅ็TะMF‰ฅ”m๔๎ณาเKRBXKiุ‰Yพ&Hแฝช,^$ไ (uซ 2Tฮฝโฉ‘ฃ‚ส4บภH[่‹h๔kฌF Tผ59fGบdR(3๑ุ<ง Dค8ฌ่น=ซาฅ๖dลŸำกHแบฬเด๓ žS4+ Tjฆ+'‚Œร„ยกค”AQl๘๎ร โ%ยJษkฅ จเWB็ Bนงไ4๋B๖#fsฺL่—) ้9] Nฤ๑^mƒ–h’จห™Žำฑฟฯข&ฅz“I‰ RฅnT  ฒ_T.฿$1ปH‹"Kฺ–-YใฎฑdPภ€IŒ )%Pก๔ใู€ฉ V8๕๎ –8<ัAลm้qFNฬฌ tu JวFL„@ฅฉ“‰้yด๛๑q๑jSfลฦฤ็cyZœ„Ÿง P€`J>ป#‹RทบอZPนl๓ศQAešmK๐6L;rVb6Py9dTธฬM฿ฺฤฒฺณ_Lฬš)œ8 }๑๊Xัํศชhฑ–I๑9+ภ ฏ1ฦห}Pั๓่9t*€AI7J,ึ๎*๋]Aฑ|ใ`R‚–ุษใ`ฃฉaฅค—‰ะโb_ฯฐxptขZ:}0Q๓าฎจDฌHค”2,žYAgใืใงขrภ@๏G•X˜๓ไ%™˜]QDถwฒนื—y๙Lช+~YฉxW€J๊6AๅผM›GŽ *3Tšเล‡แ๓ /_t‰mะ๙—อฮAF็ฝƒพ$ถ3๋qนY A"B…`„/] …ดธN#จ0Oจ V|!มM‹๛๊ž1ฺ?I—้5าvหbH†Pมษs6ZŠc ศณ'ฅVใX๊i /E`Zโ฿%ฝ š™ุไญีžMq๏ŸtL‰Œ๐๎&2)M"ูaCฝไYฟ^ฯกŒŽฒ"Sทo8จฤถ๗ุr_ลฮžD.œีพTvu&ฅnu#@ๅา'GŽ *3 Vโ๐Aฒ „พ”ษ6xฦCแฐBษ่๐ฮDฑ่@hs๖R  œ”ฒ(Qdจ๓บ-ๅ=/@ธ6eTb)ˆT=ภBู‡ืC™‰} t9€'PQ‰Œf‘e!E;‚Vข`ถฉำX"ฌ”ภ(–‡š:„X<{A{u๔BqŠNฃ@ืฤK8‡CLX›+\จ่=.จเ=ซ๗›ท๚๋} 9^ฑ“M—ว’N ฑผb6ลƒาc€ม…•บีmO•๓7>9rTP™A k๑๚EKโฟัŽP๖‰ ยๅ,3ขฟฃcm๔]q} "Y ฅ้K8~‰๋ถ€Š‚&‚Jฬจ”^;N1หAลCฏ6\e”Y`Avอ mฬ>ะอJ–6}J—rฐRาฤxfง+ั‡hแ๕8ฐPj๓e‰PโtฑEฺต0K๗•i‚/!้พJ.d๒ผฤจ๗+็ท™ ๐๕ฌI›ส!ฅ)๔9ำใ๓|ฅ’O”บUP้* z rษ“#G•iธ)๘OœxM™;ฎXศŒ ,ฅิ‚Vๅ+๚›l‰เฤณ'/‡n๎/เˆ@เež*d[(๘s ฎฺ„ตqฮŠ_ศผๅ3=Nบ/Z ๔+ZXตp๚ขหB?lfmศ]3)MY’€7ป`ผKฟ—Xาeด<€ ื๙๕๎Œ๋E๎ฬK—๋XFญ‹C‹ jUขำึ฿ซž!tุ๕V๛RงXS‰gค0Ž‚ฯฤฎศขิญn{*จ,่ส?1rTP™†[S[00Bบ+{ื”Dฃต๘ซ“LJl&หแ@Cช30พ@xwO›—D,žYyน!ซ แJ>€Jฉ ี–้‰ภข,•[ห€ ๅ‡/ ‘้ˆ%"Fœ”ฒ/Mาฑ4T๒bมœŽwฃ:‚r—;ใ(QKฦฤg!ิu ํอ€Jฉ4ไGpฎญ ฑััๆN&%N้๖๎เh{_ ฒy<ฎฮึญnTz rแ#G•i *”[๎X๑RFก„รโญ/Z‡ รEฐ.B‹รD lTฺ ลภk[ฟ๔ป€bว…kidx=ฅO[9*๎Ÿ‡y0> G]0€Š.ง[ˆฬ mอ>ำGนœx]^๊rกฌ๋\ฺ@ล3*d=x’†ฅไรˆD}‹ท>ปฑA%:ำ R| ฒ`?† ฦ…Q| จศ-V๏m/ท๐C็D๖QถมdTCœ)ี)>วGๆฉ$œT๊Vท *ใ rแTฆ้A%ยŠ ^๕ทท!-nถFvฤ#‚J)ฅtฝ—W†ŠรJ๔Uq รK–ๅ โส˜ENฺ๖Sว๏Œฯ( Tcฤํๅร„…œ’’nฃE“97Šๆ"่|ถšAz‡“ฆะ•ฤฑzXฺัkPpนฅอH!3Uš ห?Xำนๅd^:ุ‰ถLJ T~y บษF}_W@ฉ[•! 2ฟ*็?>rTP™A a…6dJ:ฑtใ%ื„4AJ4h๓๐…ขญณgTP‰ฐข„ั~Zสคธ_Kืด๛Ž€ ฟ}* E#4๗ัe๑:ๆๆ่rŠ^อ™‘?ˆN)‹h‘๖, ฃ๓%P)M;ึ>0"€ฅ4):fO”ด<ƒ8&>!ša‚€‡ึsxฒะUแฆipกBทhDอ๏็WฌS mŠรJำะห’/ q๘ผฯG”บีญ‚JจฌLž๗‰‘ฃ‚ส •R'ฮณฎแ—ฆ๋Jผฆ Tบ่:vv๑KN+ผFLบh Ewำตด#๛V ผXด `ฤณ:ีโฌS€ฤgอ*htt‚e€ เiหสฎ่:€ ฺ=Ž๖‡ฌ†ภ…๛h๑วศN โƒ=โ< ฦRฮAs‚[-# ๔œ ๚S& s=ฒIžUBซ[–z.ม… T่cl‚›zFฅT๚๑P๔UกฃMŸ™yืญnTบสE็~bไจ 2CK?,ุพุปุ๐•ฐโ“ถ4๗ฎ”.:ฯ๚x‡ว6{ ;šษููN ฿g๖g]f )o!วƒ#š Tดp๊ถZุt฿*@ e“~Pผฯ๋มฬ-ำ๓DHมรDฯฃ๛กฯAcA…์ˆnฏ็ีk๐a ]NึGใs‚šŒใ๔ธz?”บฦ๔ุึoณ)_ืฉx)ˆหP|xf4-ฌpRท *ปT6<6rTP™ฐ[ˆ๙R-ูำ{fฤ5&อi—าษฎฬVD@ˆ๛X•.&rฃ<็ฮx^ฤ}phม5 vลถ0ข4`ฐยI*จTPฉ าฐ๑GพLK iiAm๛{2!ฅคSi‹าู๋Y“Qม…}ค E+Ÿไs’ถ™๗ †e๚[ จh‘ึฝ€๏e.ดเ3\Q@Vฅ( $ปจ h%ปโe@K ๐/y)Hฯ/ภย๋วฝ๔8‚*ํnO6วh๕บ๔\ส.้ธ&ช)[“)y)ศฯ“y!+ฉห๐P1,ซXทบUPู 2oeบ๘์วFŽ *ำxkrwําfป;u&ปT&&v>vXฺฒD๎์ู-22Z8ฝCE0ขล^‹<% ล€JIฃBP‚z*t& N๔ธ  ฅ)ํ]cdด”y”ลแqw ๖๙;€Š๎C๙ˆRฅŠ๎รbZu\ฺ|xผ‹ํ›cๅSยq*๖aœ>๗๊kio*๗ิญnTvจœ๕๑‘ฃ‚ส •]•M˜ @ู‘}žฬ,ษฎธฎm‰z ^4บxPHH๋3YƒfE!0 ำ‚ธญŸ]๖3k๑๖,ฐโะข}ะcซ„;ฒpึๅสiŸตŸส’(€ฺทu™€‹iวบ฿0]Hฉไ†ึสปขถ -GณรœึญnTv#จœ๙๑‘ฃ‚ส4ฆ#LLFฆe:รH—ŽYฃถืไ5Z8™d-8ะจ(ใ! ฤขลK@1X-เัใ*Ujq'c!Bฐ๊Nศ.Vzํง† า๙คSๆe3๋kสข4•ผไVe{{๔๒ึcขnuซ 29 rษ9*จTX™าฌษt/Mฦ1`uวU†ฅo๑.e/่ฌกป(ก5ุE+3จ`–วb๎™ž’3q์ฐ‰]hŒw ไรŠw=aจฆวลTญt<[ๅ็]X๎บ— 'uซ 2ล r๚GGŽ *Tฆผผณ+@ขห<Ÿฎ7YฅฑRf„๛แ๘h@E‹?š•่ีฌะD™E๗Tข>ฃi„‚k@VชฦyT@บ—>ะธุU:ง8€ฒI8^ทบUP™"P™•S?:rTPฉ 2i‹qi‘i๏jฃน้||ผ‹จ4]ฝ…เ@บ@E0ขฬ…ึ\A„ €้ฟLL0l2บฏzๆม[ล)น(ต4ณ+>%8(๏hืฮp๊VAe๊cฟน+าฦS>2rtymฝํz๑ืฝ๘ษd‹ *{˜NeOิๅ”Zฬ@… ‹|ม๓ๅ”W,จเใ0่ฑข๏Nิr*lŽ”—๐0๑์Š{”0‡ / า\ฆy\๋Vท *ำTN~tไTืŒJ•“T + nคk…2;๊4‚Šw๐ธ้œ!ˆ.จผFผ ๔ฒนยบ?‰ฉั’]š€ผ;!ฅnuซ 2AๅคT*จิ˜†ฐโ๐เe @ฅ )1(ผl า–I)‰j•m๖xdXโ๓E#5 'uซ *๛๖@e‡GŽ>„ฅล]PyนกUบพ‚สn฿mGึ}ฬจเย๊š‘mPุึ'%Piสฆ”f/•ฤต@‹Ÿ๗็ošฤฝ;ฒ(uซ[fจ\zโฏ3*+๛งK{๑{qn•Iข.๎ณVผ (ฅุึอุ@ลu)mบ˜’๏ˆŸwhม0ฮcwตzืญnu› rย‡FŽQ_[o๛H/ฎ 2E Re๖CJIŸโpะV–qXqŠƒJ›&&fZ"ด4M…“บีญ‚สpPYž.=ƒ#วฐืึๆ๕bณ^lช 2ล โภฒ;ภฅัิฬ9Š 3+ฑำ$|u=‰๛–xฆํ๙›†YjYมคnuซ า*๛๔@ๅธŒ@ๅตrโoz๑มT5*ำT†AL€์ฬcิุ5ž2รZ“#ฌDป่ตRสฎ*รf'uqe0f…“บีญ‚JTŽ๙ภศQ ฿f9ฌT(™Nผฅ’ส0PiบzืŽ{ฅLำปบีญn{.จlZ๛‘ฃ‚J•Y*Sฺvd๊rจธfค/qŸOž.ำฒ๋VทบUPฉ RAฅL‡๒ึt†”ฆาO—ฒPุ์ซ๚ 'uซ[••5๏9*จTP™5ะ2๖ฝIƒ2 จDะhตN†_I“บีญ‚ส.•9=P9๊ฝ#G• *ณXฆ๋~๏ฌtนmืฉาN๊Vท *ำT–ฅMG>2rTPฉ 2+`eบ๏๗(ฐาฅf*2'uซ[*จ์4จ๑ž‘ฃ‚J• *3ํ5Œข]ฉ9uซ[f%จ๐ศQAฅยสŒ•ู๐บฆ ิญnuซ 2ฉ ฒwT{๗ศQAฅ‚สŒ”*ฎ@Rทบีmฆƒสาด้ะwŽT*จLหผjo*ิญnu›… rศC#G• *ำr๑ฎpRคnuซ,•ื๔@ๅ wŒT*จ์ฑ%ž้)uซ[๊6๋@e๕ƒ#G• *ณช‹g&ยI๊Vทบํ1 ฒ๊‘ฃ‚J•้…2“^k๊VทบUPจ,I›V?rTPูรaeO” uซ[*จL1จ์ี•ๅ๗T๖PP™ฉฎฒ“๑๚๊Vทบีญ‚สn•e๗ŽT๖0P™ษCwล๋ฌ[๊Vท *S*K๏9*จTP™ั Rทบีญnu›) ฒ8mZ|๗ศQAe•šeจ[๊Vท *S*ฏ^œ.]t็ศQAeJ๊Vทบีญ‚J• *uซ[๊VทบUPูPY๘ถ‘ฃ‚J๊VทบีญnT&Tธcไจ Rทบีญnuซ[•*ฏZ”.๏-#G•บีญnuซ[*จLจ,ธ}ไจ Rทบีญnuซ[•I•๓๏๘ฌ"ฯ๋ฟฆ็ำ‰"ยœล>ฤ>ำ>ํ๕cbฟ่อพหูํ-ๆฐขƒวุฒžฯZนC;~้ๆR:‰‰&~Ž฿rฦญ‹฿๋6wSˆ๘…Žฑ-%Gุkฝ—jnnn1;sๆฌฯ๛j๐>ฺ!ƒN!(๖9ษ๎ฟ‚m฿”ต˜พ‘ีl4“ญ฿Uฦฆฎญน~๔ภŒT€dู๑SZ_๋13Pƒ๐dทตาjsป€<ฮ‰ว๓‰๘%wŸ[ทlห~๖้ˆ,ํ˜R>ธ „@่:ท@๋#ฟLYฯjw]ฤูwYD฿s_›น์฿VฐAKถyVJเ"D‰“•งตH‹บ/ึf๑Š๘1ป†œขCฺ1๗ทงญิฒสg"ฉk๐ฝA+=฿ั:5—N,?!Z`้ญœXR#~zqAhŸ*–๊Mงnะ>sO๋นZl?^;-BKyBrืึๆณต‰๖ˆฆน?ยีaoพ๒ิ๗Q์>AฤOฤOˆG8Y฿ห—ั๚Aตฎจฬ๐็แ๛๏ฟhซ๖น๚ืฑŸฦ็ฐ‡ฺฯื’qj5MำBA ษ‰์ต~1!ซ(๊๏:pด‚}3f6)Q4%-ˆŸˆŸ`๐ฝยญŸ)ัยฯ€าoไ–฿๚›วำjุ &$JสOz๒>ฎn˜สสŽฒ์ปฑชฤสแฎV้์แ”๙Iฟช$โ'˜šๅ+‚ฦRฦ&iZ<ฆฝๅlๅถศ@‰7ˆ๗'—O๒น๕'ๆxฎ?"zbตสฤM†ˆŸˆŸˆบ๙lไjmv”บู=์๕ห,Œj@ยžํฒH{l๐ซkุตlฤŠํ1#ๆEข#บŒˆŸˆŸ๚„+„ฤิ4ีl8“ยg๛x bXVyuฬ๐$`๓˜เnภฃŸL–้S&ฒ€‡-+$โ'โ'B›ิ\veƒT-†Z ฬม|๖ย˜ฺฬ5ƒ="~์ชt’ ฉพูnธแข„ช'??!KอึัrŽูx$ธ‘ ฮฯำV๗ื›ญ่[ฏ๔Š฿f~K4)"~"~B<ืm‘–ƒ$ญns ,ฒ‡i›ฤฌ?“fntuิ*2žภ>n8DD„ณฑ;๙เ@|}"P~โ”ถง๐rฯ%t1\ŠY๋…ฬGึ๖๘ๆlจค.h๙๑๑tคxGOvfโ’]ฆ_ฃ>i๚ธฉ๋้฿๐หฌ€kฑDบT๛Œ๗ ‡ˆŸˆ฿ึ@\พฺpƒ฿TV0vภ’^IBLฮEลe€Œ2ฎํวรฒโฟ!ู€wแ"~"~‚‚สส…ๆŽeณฑัbฌ้฿;u๘ฝ‚oศ๑’ธ–๛ *gลs฿H๛ค\ŠDD†hžL-ญR€๔a%›L} ’ผฑสโ$$๛ไŒป๗‚ฤอธ•์3*ล๑๑'=0หFQkคฯ[†“ๅ^โฯO3๕QT๏‚Ÿ+ษ7ปC–FŸ w2บฐUf +!ธ™๋๖h…ฺฟV]–ˆŸˆ฿uXปShๅฃาQิ8Xศุ˜36ฟต—๘—๗2๕ช*$๎’^์ฑฤœำฺ1Œฅ\ลุ๊กŒ-Haฌ๙๙ŒZKธ+ฬg๛ย… !ธๆำ6๚T†ƒฝ?x?๒`ภโm์>หฌฑJัK๘ฐf็16แCำ_ทำg#VำrP +8ำ…U:]๏—๔6๕‹ช+ฬ3แ๗ฆ _?’ว๔ฤ"~"ค€ชŠรŒ,j`เuฟห;8‡ฟ*fgฬ_3d๐ึ้พ˜.’รB;฿Ž]ฃ๕1P ฌ6Nๅ:ลุัพD?์eฦv๐โ"ฦv๒YzZCฦŽ”ˆืๆ4 D๕จ>Mฃ™ฺP‚6๏sทk‘ˆŸ ขUj|>2สู๕ฉŒ yA @,ัืณ0 ฬ–ƒฺฐ฿€๐?‚ณัoกW5l4O ั@๐Š็s[0ึ๕6ฦ๖ไ0vโ0'๗_[ซxฏ็ฝโฑ•Bห‹+ ^ฬEล๓ร@๚€ส&v{v"~‚ˆVAผ lP04?ซdlืj1ำ_ูฯ;h๕$>^?f.qFUOJึะ;ทเ1™hH M:\ƒช฿ูW๕ธ‚9ม]@ฐq๏ปไ‘ก—z.aฯws๗ “ˆŸเั(ฝO๚๚‹:‰6}ฦ?วู๚๎ชวlœ"Žูc.bQh_ฦ&1๐ฑแ{๚ iqฐ™zEฌใlƒแผใ?}ฅ๓œ๋2–ruเใn๗ ภฦ/"วเ๚QฏŸ=„ bHG่ัGJ:๛}BƒŠภQ0=„ ัKฤŸD€ีw)^dpฌ๔’๐ไฯโฐู"’นิœU_<๎ห ๛Q๘WๆNบภ ๊ีโ:tš“ฏลผ‡ย*q๗ฎoC‘]Žปด›แฬ็“ (ยศ@Fˆำปˆ๘“jvฌ,hโ…"แ‰ลฏXcCyk†Œ์Y๖#ะ~มo้>ฏ@๛{๕Žƒ์0้๖ว D(จ>๗ฎb๑–ญ!ป"ภฌ๕{ต†~Fฤoย8q+แถA๗Z3nปนญ•V›ˆ฿zภ7‰ูสm็iE1‚%กงณด;cๅ D@๘ˆ‰<p[‹9ฺf๕ใึ†'ฤˆเyนงAˆšuฃ†C"€Hตq‰่ฃลา๕3`๑6G_ฏDรn@?ะŒ?vภๆ'fdŸ ฯb•g‚/U‘ีˆจ‡฿%ฎฑ˜•!ชว`H็ญอg๛ฌd”a้lุ-6=™bะmฃชfm™Ÿ˜#™kเEqLœข.OšŒฎŽD๑ิก™Vo๊™”—%ถมุTVRฯa0mํnm%3bลvญ0;HKrB|€2…ฆ๓๑๎r‘T•Lฌg8\8>ใค๒'โท†๘ทs[']A็…๘l=4Vฝzuuย๕†e?…S–๕ลT"จ‹k9เbjq‘(๒breย€{ ๓๛Jพ’์fฐคโ™ำขz4Y฿ัBี๐P' •น4ฉ้ิ\"(‰bnฟ็๖;nญA4ใท˜ๅฃฃ๖ %m ๊แย:ิดGรA ƒษ6 ฿พ~7$ฺุ;ˆจช1ซv๛๒AะวฒG$ฐั2คsใิˆ>ฉ:k7<งF๗ุ‚๘พGฤ–HฅA- U ;ไ€๖˜Tj„LภMMำดLKิ๋%ฤJลาPžŠึ‚•$P๚E„ 1๕‹ˆฟB‰nฺ[NฤลŒ๓oน!โทjiŠŒ ˜๔‰Nˆํs{4ผ(หซdธษแ7ฃ๘˜ขัไu์๊†ฉก๛–ยิ/c/ศf˜ ;<ย*_๋w‰ ํ9{ˆ๘ ’hn{นUrลญ.ทแึK4€ˆ?z|;v ป3T9D๒ดซ!"ๆฯ๚PPEทฑ1 ่ีโณ“ึQˆผ‡ฑ”ซ„็~UK›m`IpQm_ฮึ’น&eiบQDD รพ๒้Tง า]M%ัฦ› จห ๑6“@}Tœd๔ฌJโB๘/า๋3‘ด•่lp@ JO๘XเฎOƒRัH\Sc BuDD รLY#tํฮCกD,5:๚จทDFฅ]กถ |๛ˆผ่<'Ÿ:…ี^’นZCtKH xOjB&มn€ˆ"}8€์ืŽ#”ุ”;๖RO็่๔๑ปจ ย ;(Ua๓}6ŸฉŒ_l๐F€[ฬกฮJจwทN7@ฎฒl็ฆY๖๛จ‰fไx^“›ะ/1นบF>CนC$ม‰๘]DX ZะS„?a’ศ`ŒEIE+ํt PตOqขL˜ิ๎บˆฝ7ˆdšญ*QAŠ9,4"=~ศฆ&ภไ๛&j Yp๔ส๙#ฑ‹ˆŸˆ?๎@ฅ t@พ -š็ฦFผnฅศ34ฬ๑ ๋็‹Qู์ก๖๓ฉcXไF|44S“๘n1}c๘hลอ/J|ฆฎ`sท฿ใฆ>ขŠ-ศ/qฤO$โwVnpaธˆ๊Ve%ฐnผฟ3‘๖ฏ^GN’fU“ ุ-อgk›็a1้cฦ:฿่ฌš๚ƒจ lโf…Zึ8/W๑๑ว่x†”)‘H๒,w@จ#–ใmช16๊Mขˆ฿@๊๖Žƒฺ๙่8{uŽh๙P `cื้ซูs฿G๕CW ๋ะรAษIœ›พ ท8โ'๑ป j†6ฆอ๕~sปC…ชBูxDˆง๖รs]ใY์(อ๚ฃb๖An†VOˆม ืiย‡ฮ๚ก;Eป๓'f!OYโ-ธฟˆ๘‰๘ญฦง#ฒุฃ2Œiป๓ริ ฅำ ค.ƒล4Vlฏย\ ‘ัQ†ณTฑƒ๋ำษฤ๕1 UxIS{ก๛b๖ฯพห๑‰๘]คั‡ฌดฅะ็!ฦ†ฟ๊œVVฤ—37๑ู˜Œ$้p”r/€bฆชDโญ~ห5r ?็ซอf็1ึ๓พฤ•WŒ(F4๙SSi6mปถ๑LvฺขmD.; ธNFูจyŠบถN2A&:ษN X!™ิI"โีงj†WVaธ6๐•;ˆ๊ZวิGFฎ!(DฤOฤ7 („กโ'H›NฮฑJ\ฮเr~~ศ่ฬ๋ฯFฌึ๊‡ @…-ฬ๒‘pง๙ศำœ๛ฃว›ฑ๎wš๚ˆฉjdDDV` *ิawกงoน๐H็ฒc๎0,จีcพP)…x]kษN~/jC้5์*Rฏ}c"*ฦv@ฝ็V—˜ ~(>,ฒxเR$โw z/Kฬ6rงE,ul+ ฃล= jอฯ็น1ย;$ต>ัภy‚๛๐มซย๋๎ทนฬK๚{ึ:๛‡/้*~ว s™ธฉพทscˆ๘]‚‡fฒวŒD๓@ฆA NปฆัEๆ ร3KฝฆJุย๓” ็๒ญar&Nืบ๖eEฮแ๋'ˆ฿S,๗4ฒ†ˆ›A cะช›ˆŸˆ฿ ำz.๛jtv่ƒn๗]Š;…‹ล๏ุbฬw #H``NฮEฦPO๗๚_fฑŠส0ซ*ๅ>Œข”กญ€Zผ๘=Rˆฮเ˜QลYฆญm๋ŸGฤ๏””‹ขm } Š…‡าภ์ล๖@ึฑVฮฏฏกรฑIYv์”ฑsEะ๐Hส|ออหzŠkQบู?ซHžฌมพฤ&'RเHlrV Kl;3:NdmrC–†?xุหฮJ ‡ำ•ŒตฟR$ •๛ฎ” ๆ„(" j5IณuE."~ณW PV ,มัiณ‡ป๋ว๗{LމB2p‹AOž("nH๊7`HO๚ฤ]'`yoo‘=๑‡นมอูXฌวU…๖อX&โw ‹z6T่ชlZหpI9ว`UY:า๘า๚นn‹Œน/’ฐ‚=฿อ@ฆ.ฒงq PสะM@Ššฎษซ\ฏฺืHฤ๏X/วYv๐h…gi๙XวŒะ่ฬIw"” –’—†ฟ?ŒŸ๙+I$9–ˆšอa1ไy>ใฟšฑ“ๅ๎; žซใต"ˆ ื†dฆl?Oำอ"โ'โทะวฌแvจ๕๙ฏหƒŒฐMงg๊ร-โทญ%ยีฌ,oFะ|;v ปฟ-e๑†8ัฟ๘ึอƒYqZCwžฃ; ฺ‹ฟ+Bห2 ย๎ฆฆiฌฐ๔(?ฟu่4'_˜จ)‹p;„… { ณ)ๅH(๙‰…ช.ษƒ Mj.ปฆัฬะฎฑ$Fๅ้3์“แYZšต>ŒวัRqฎแw+๐๖ไˆ็๋'Jืb่:ฮศคฟนูlบ‰๘ ,ม๕ IฃV๎q5 c—ส#ฒัJฤ:๐ฆ$Šeเœ•S๖n`C‚vช_ํ<Fp บ8ืง&ว Rโ€ธ„ม็#Wณ‡S์Y๖“ˆ฿กxบ๓Bญุต B-)๎rt๋ WiฟซLKไ)เ๙๎5Ÿฒf—vฮ6๏+g_จ7E้‘“แ?uTœ๋ขฌไ8Ap๑เ๗ยๅฦนดc "~ข์๘)ญสVืนšฏrฐ!‚ุ๊”r+ๆต๔ญร ๗C๓๓๒Bภ›]ึvQ—xฆ}ฤ๔:(ฟ>,ร่นt16>++’็DaswBฐ‡ ’๒ & โ'โ ไฦฎa๙WธAPqหญ€Nฬธ๗›๑ฝ๗ตน-ไ `Cีีz™ะํฬ0ม‹‚โrํผ`Edถบ7h ไอ๎Eฦ๚<๖0Uœ >Iณ~ฟ's๒ตฮdจ๖)–ฆจ†"LชeQQะMGจL๖a'fKA6d์‚2SŸษ‚ิk}ฉ!ืฉRอต›ป‡ˆ฿๘vŒ‰pฤ+ย†7บ”ะS‡ฟ?r๗ึdX7Yกฌcญิ›Yxภ'‰pษŒ[šฯf 'ญ3vp๎tYล-I5Tยเษ#!*Kขฺญฟรpเh…ถฑ%ฝ ๔ร_›บ‹:&/“ฉ๚G‚ฯถฦgyˆ%ร–t)เืว9่6ทภุB‹u๊xr๖-Oohq@(ๆโผ\ผ-น‰Ÿcทntฏฯ-f๙x`@)o๏แเ>%jัbฦ›ฬุ4หPmaฌ?ฟ89“บ6ํกœ“ฒ –M„๛ฐ็ฝษทvg‹พ…๚ี!€‰&jgซ{"ˆanท๛ ท๚๒y}nํ‰๘ใ•^K5H`‚Nนn|rฎ,q6…Žื/:xŒอห+6&JๆRŒหi,›นd“pbb1ใปไํ[J๘ะภ/‚ ปะ์๊แจแG๙.‘ฯ/ม฿DU๑ฌšfฒV3ยlB‚่ะ)weฑค†^นำ๊๔XขY2ขujฎึทBบบฐWข2ค\&ฮo2ฃ] ฦฆ}๖ฐว;fhา้ง๙นตK"—]ˆฟฬ๏CDU‘/3+'ฎณ_5@ŠฒํN๎‰๚จZW/C‡Cตูผษขฺ‰z˜D c๗#น!ชmคคชฟA$  ฉ๕z๏eฺุํf—ใˆŸฃ ซ^ฝzR๕3%.าฟฬk%b๗OW&๗ภD9Fิ6†O6ีNTnร๏ญ?1Gห y1jGํ™l™ฯฬ[’ฑ,tฤฮGC35pตไhโ็๘-ทjไ๊ฑPEDŒ9j้ยPhQmfŠ้ฉำgB/ว๛?มX‡kh`jำ๘ห›๙“กCU„ฯŽ๎—qภlพgGRๆk1r‚ Dˆ่N”QฟT๕ทI‡< }}ใgซ-"~›ป)ษLX๊; …V00Ÿ๊ผ ๔‡ห‹Eg„3A”œœXฯะกjƒ7{วAืŸ– ๒&๗ูศีž>Vb˜ ศ๑ˆ๓I๐ชทƒj‡<*บ๚ฑ\y:๑uฏฃ%ž๎2I๚ฃนํๅVษmทบ.เ6O†sโ๑d&‡eU!~eo๗_๚ร๐ฝ3K๔}”ฑa/:„sŒ€ยGยฺฺ‡<}ซฯ‚0›ตศT๙๕)=Pวบ๋ญ!ฝr‡ฯ†:์jืŽWœNXณฃ%\nงนmๅถŽz<2Jเ2 ิฮEบ<8*uก๖๓ตŽ‚็ชำ|=:;๔ๅงษย™4(1fฌ๛†…‹็.7rมจ็PwHฆF>ชo…ฌOฝ}ฒฅ7ฃ>ๅ3pŠ์p 07ทธสไ ็N%หฟyจNีHlA7I5fภอเ+ต๗ร†rf ‘ส;iPณ•N‘:ุิล9ฦ&ฏ›ฑtsฉ๖;—สzช๏•”‡PูคภXึร้$ฏ๛s(สฒtKi•ผฃˆŸใoฬ›q[ลˆ๘อกข๒Œง#`3#Vl๗9ๆƒมซŒ-วมา๔œไชˆ ๅิ"/ยฯโฦ‰อs”ฬs3ฅ‚Sา๒4 ‰œฑโ<๎หฅ>h…ญ์`aะC1Cžˆž๘U$&๒.RโŸ! นmำ™๖7ฟ9(!'ุ?๛Šx_ฬ๔%„a#Nฦฟฯุฏ7ั€TPบKป:๎ึ้์ว๑k]}J ]"S›Œsš0ึ!B๐BU!SถcEุษ๒&T\A‚$BขŽใ—ณ{ธ=ขŒˆ฿เ[๕_Zvจฝm/เใ๑šกC…ไตป๗Gพ1SศGฺ<ฝ๎งพไไŠ &มๆฆ )eุDษ5G๋ใPn่โ–มํขrˆ๘อA ฏaณถŸูyชNDRฉ_2ึฆšคa€ท].๐jฏฅฺสา0 nŠ|ˆ‰Q_ •ีผzจกรU 4’œH ๆถV}ทฑDๆะ|ฺF-ด๎ื๔|ญ3„•\dข๓ญ์GQตฃฅ„nxก,Tไบง๕\WŸธณTๅฑฐ@๖ทrc,๋I})PฺDแyMH๎ Y๋๗/™โฯ”kนI='โ7„kbถ Ž ทˆตb6ฯok D=”Ÿอศฐ‡všฝI“ั=eƒ$›X ๑ไ๕แ สฤ6L๖?EŠ…ย:Mฌถk5IcMงnจ๒บชฺ๕ํุ5ถ%ษฮๅึŒ"nSนอ$โ7‡';-ะ๔<ขnHฎซ็ฤa„z Xˆ U“ย@%ุญ8ถ€๒ฆฺCฺSfฐ€ส g๋|cศh‚ฤ€ง๋๗ธแร๋ก%า๙ซ\ฃXฎ<-i“ปuธฟนมhHj9Tal2ชบำฟ {˜ฺtK]ทวU?ษๆRvปิๆ™`4A 3|๎‹ค‡๙tพแศ'„v"?ว*ม ๙<งcTŽJ/&ˆ฿Wป(๘”๓มํ4š*ไ9Uซiš๕5Tๅ๙jtถฃๅš!๔งf๛ศ5U^pw6๕#ศ!ฮ—ม: ๘—{๚€่ฏmฏฬ7 ๚ ˜ฯญาอ}d?จ„gdP๑lu &นf฿ๅฺ{ŸŽ๛KVฉว๑วธhj“!Zท4Ÿmญ‡‰บฒ&”&Mฃ‡ถ ฎูมฃ‘ธYฒู[}iุr๋“์๋๒ ๔†ษŠ๘<ห ร’ˆฤ-๘ฉA^ปVS1‹-๓ฤน+ ญ๒ช/{น+H6ไM^วJˆีร๋r/€ˆ฿ภฦM A•$+’jP๏…W@dไxฟ๛Žฉ ‹ื ณดˆผqkw{ฎ;๖ฌ†ฦCๆ1“ ปค/^๓Šฒuจ)ขกๆ€*eหTข_์2‘ผ‰ู3FoโD‰ฒ๓๔ู X’#3/*_ฏJงWถข/ 8รำ๗ฺŒ ?SYTPโIฌ‰Cๅ>}มj X,V$kvา\Š“ณwy๚œแ ะ๖WŠ„๓s์๕•Hpฌ๚ึ๒ฏ†+๏v๓ข"8เŽ–s๘,o6+ณฌ๗โฮฆJพtภ*ฉห-ๆ๎ณปส4Mฐ7ขฉj-|8TlไAงลJํ์#ุ$อ็;๕%=อ2XGธ๕ฅŒฅ@}$เดผXฌศ-๒~n’I_D66Q฿้๒–bฦ๗Œตญฦุ๊aB4zŒcVกnโœฉ์Wิ>Žฟศb๚Fgm7ฎณ๏๐ ถtsiะ๗แว๗w่e˜Q์',ƒnPพš˜X`‚a1ภ#Wิ>ƒ—ˆ?†@bFรฺภlด๐…Q ž"y"\-*ฮD„*๑ฌH;|ํpล(2ž”]คvวj0TYฮ`Aจ้ V8VQฉ`๖ช‚[ึ`๊#ัu >c๙ื*—^คAD๚‰N„ี–@๑ ๆซW๑นัธ‚”ฃE,ฌŸ K1š“หธต๙lmร4ภทๅ(!o\ฬg๏ช์ž๊แ Ž๛i|Nฯ‚๒E่fTุณ–ค?ฌ‚ๆNผู๒ฏE๔ƒh’ž๘กЇศ‡™& o`ฐa‡‹€M4๕„•0“S%๎”ญ+*ณ๎๊C๏t^K`ัส šf‘ฤPcๅ็ฟZภMคมคužษ29+LB๗ก๏Kณ=dไ๚g†G„M3ลyูน’๚Hด€`"„ร$qEยWธึ๓๓๖๑GD;เDพa26VI.ภ ””Ÿ ถ Sqธ–`สgb€ๆงั‹Gล๙[bฺ]๓B’ ธ้ฃ่'่o(ฝง๚‡*ศˆผ tษ_ฐ๙%>๏ซPNืM ฮ!: ”}lิ[ึฮYไ^SดแIO์+b์อ๊ฐ@ปล- ฃ*Tไ )๓v—ฑฝe'ุxฃฅ๎Œฑึฎง hะN~_7๕๘โ1S7ปฑ6qu‘ึ'@เ ย[๘๎`ั7˜ฉY#รฐa\ณแLmePกฌ็ฝิ7ฌฒvUฤ…Pต รAฤ๏(f]๐‘šะะะ๘fฬ๖ูˆีฺฬM?@Qศ#ฆภฆ.dัญšฺ_a๊#*{+;31C้Sฟ—ดlห~โTฮ?P|™พx“ธ%[่|๙่ณคฬ7ž  =๏ฃ~e%โ/-ฐ๔k1แŒถ2WR?ไPี@A!5๘ฐ้fภ5ไ]W_LaฬชึV Uแœ„ศฑ0EœวSฦณ&—J—สMยฟ ฝ% ทP@]Uฌ ๕P…^-zg›™นฺŒแ™ ๙/Fykเ๎“ลˆ๖ˆmชQ ,=0 3 _์ฑ$๊$ภค$ึฉนฺ ›d4ˆ6yƒ๚ธ*ท๛7ทุg–Pฟ[ \Kาnํ.งA-6งหŒ็ๅโ๏ƒฦ„ำwสiงS๙ lี—ฎn˜ชEv้]1฿nภw๏O˜เ๑ˆŸฏ3ˆ๘H^๊นฤง8 ตaใว:fh›z PATแUุmทLc?ฐAtT๘!Dญดเ…พBwS>๛1ฝo?%อ๋พA`๖p๕ฎ่๑่7„CแฟขฆชwฏIm์ชˆ3<๊o2RPQeQaฬŒuฟ‹๚…ี€ŸฟูนBพม"โ_%k๑Fณฏ“”ฤ๛ƒW๑Yพ๏sฐ ืรแนn‹<ƒ ณ6„า›ม(พญBBีyTZ.ฑฯฯ/ฌดT2า€ฒอฯ๗%~Ÿจ~H4M(ขžณQฌ.sŠ…๗>ษ ( l =๊2~aˆŠžๆเ9๊ฑ@‡k|๛Y”@-^จ์;|"โ๏ฐ๑sl็ถžฺ` c1iSณ3ฝฦŽ2l่ฉb(žฎวืฃณ=วกฦฉ%ศ›!: ฉซžญ ฤk[ๆั`ฒn๗5๚ีตฯ฿gฏu์ช†C|Oษี๖ ๏จžืาฆฝl^DฅE MM๒๊ฑ๖ไ๔์ฬ้„7ษŽฤ!K:งЉ†!AkBV‘๙#bณ/pMuร€ม-}—oGQEลž?Hƒษ ์ฮ๚4GKลเD6ฏ`#.ม@ สŠปaP5Sก’iำำy1ช๗๗Q&๓h.ฤ้ว˜HŠำeŒ_"~ฟ™”"๐แหทk๚ ฝ`ฉา๊จฃ+F2ฤ›ฃ\ 6w ึฅ+ีฐlีจWD8งัฌp๘-ฝ…ัl?}5:;เฑส‰”ผภ๊กิbิ๐ห$ป!ทlnซนีc ะใWƒี?.๒ H Vsแ}าํ#ขv@๐]o๕v–ูยoวึ<Ÿ๕2ผXทj ชRWฟ…[5—"\Œ๚ผ=Tโ!\แD_้<ไํc;WQ?ˆTต[ท‡S†Uฏ^๒‚]T+๒Ÿกa&ž๘๘๙` c+๛y%”$กฅBฑjk–ัีMภชQŸะe–๘กบู่`3z์ งนay/฿™่ฉิb์ษแย•†วm‹$F๕p4ใ๖sAอ]ำภฬ1ๅธ๙œๆƒบวrpžc(ไPณ8`€†“ชษ`ฆ์f๚ฦโ*ฤiนวฐ˜)cj26ดccM} –8ผGW่&๑r+ถ!~Ž?s๛ซ๎๙2nฯ$ ๑+งใ‡D็Xฺอ๛ฺ1Yล๐วˆทFกq‹ล๏๙cF๕aŸฤoy๕6เไฦ:^หุฺ่ว pญูคฆฑˆJ้mไึˆน ุบ!€ีพ}5+ุ8ล๗่๏ใuิ‹%ฤจ“ฺ‰„61ฺฤฟv็!k ญ ุผ’ฌ ฤฟึ’๕žหฺ ชนk(=˜๒๋๛‹:A;žDดb Hท฿„7รŸ๘D[`ล่[่K(ผBˆ/=†sฟ9ˆ?้‰ํh/๑Oฌ'๙s๗žŒ>Dƒ'Vะ"{ฌีNHเBุฐูaรภไฟsW]๓xใ่~้สํNฤŸ๔ฤฏณ`ศ&ลฌำ฿/ Ÿl๑F8ฑ๖Uดe๘a[4กรzม6ห V•ๅ{้š'ํฏdl๊—DIOำพ๒ ซCุ&!ศ"ฮู.wฮ๔ฆBง(ม+›คE฿G๖’( โObโGRV{ผฤ?}‰ภ๚‰โ๏หs๗๏œ๘ckั๕NFผ&๚ูะ‰๘“–๘์ฒญณ๊{‰?s ŽD  =92XAfPŽ „ึ"ฅN"~งBํ๒๏\้ํ ๛ทะเHTืๆน๎š'ญ*†’MŒตบDL๘คิIฤŸHจ0N•5 1'ฤWŸ=Kƒ#ภๆนcกDช*B‡}ฤu8ZJฤŸtฤฏ๊œ๎หฅ` ,ฎt{Š~‰฿X”Iื;‘X?!ก๛ID u๓|CuNํฌดชKŸ;]„ ƒlFผNื:ัุถ0ก‚mD‰v๕ฺEฐPŒล‚eQ{HYแ„๘ซ|\ D’๑'๑ฃธ5ฉ"ฺ๏f wˆ—*ๅIH<ยน๗ฎiž"ัถšะ‡!ุจ;‹8๋ฦ3V0ว=ฟ๋ื›จฆฎ€hศฌฯk€๔ืIฯ"~ืฉใถ‘h%่Yf}5PqL(Z:ุปhqcณำ๕ต 0ํ๋ชฏoœ*๚^ ซํ๑' ถ‰‹›=œ€€˜ž๘++๋tƒ NงยSใก;]_;–นz—๕ืซฯรDฎ#หลล-Hง`',๋Yต0ถz๎ิŠ’|ัœqt}ํ„~36์ๅะซN„}๑ป€๘‘ฐ1๕ ฦฒ‹ ‹M‚}ฐf”/๑o]เ}Ž 9'!ƒ๊ท์H7 xช๊๋จทํ๏n$โw8๑ฯY\LUQ+A™{„ ุ•ๅ;่ iฃžใฆเDx’…(QะV๕cฝ๎ฏ๚zืtuถ‰๘KงN06ทน(ฎข”๙`$kO 0Nแb฿@ณ๓œƒAšdุ>dฌหอ^Oภ–๙"ฺง๙B:[ี‡8f}M"x@พ๙๗yศK&)WQ็ท3ิu๓6cŸไK๓็๙;ดสb็$LŒศW0๚_ขฏ.จ ‘;M&e‹‰ศ๋ย;‰๘ใ„lแโฑใu"Œ ทนŒ:ฟก/Œ‰š N%ิl&ุ ณ QF@I5crกI9,dlOŽW4PMBฦพcษ €ˆ?@ศ.fXฦฉŠ[T์ฺ€<ถชฦ…›6nุvวษr๑ˆH1†‡,q‚ฝัVึx>ํๅeeEbย็‹:๙พท =ฟ#0ดŽ๏…Cศ Š}เยœ-พœ่สๅaฏ:Vภw๋๛™Š๔ฌ(พBฐ๔5žq๔ืM…ทซม'ŒO๘พ‡ฝ"~Bl๚ ฃุ\B ก๊๑,Œ;@พ*7 Pั;z[ี_ฬ๖แ" ุ "PาฬšnืBดMvํŽชืิO<ะํvขฅ.ชmœๅMu.u‘’ จY#V˜š ๓ฅŒฅ(๛จ๔FฐŠ7ˆ๋ี๙Fฑšœ๑]ีc&*ŽYาEbฝ”<r5c“>๑^ธรปฉำ; ‡๗ˆk‡ญH€ฅป…>ฺ€n$|7๒D<+อGDy?M•ณ5]Cปแ๔)฿™๊aU†rB/*X์?ฟ ั๒๏b1ปj3‡เ, ฿ขล…Œอ๙%ฒฯซ’‡0dn[ %-ก'‰๕ผ`?”››N๚˜ฏj๑๙าfy)Œ<"–wgาฦ๊F๖Y%ต Chจ?เCh_คฅ๘๐yM๛I'%=๕K๏D„ม๙€๋r๎Dvw์/s uZง>๔Hซชิžฟ‚\๐ํ๖~ :7ะฦ)โ๓ˆVภMa‚*ฤ38u๊ถmหออ%‹ฦฒ—ณeณY๎ฦ†Žว9วน'โ7๖ฌYxgณ}ฬ๚ฃ!fตษขW“ฝ{/ฒ๏Wฒ 6’•––ฒณNU8ต ผ…L^กเ8ื8็8๗D๑„^โwว ๊ดNวœ&"/}%:ีชdฏ ละ{=Yา›ูZ๛ ณO"} p๖Œจ™|x—ฑร๙9วน'โฏzf+ส4ฏตŽDคR?่๛๚๖e"!ใศ>oส?๙XฯฌzŸ๙ฯชpผด†ม‰"p๐๓GB๛ƒˆMˆ|ขxฃ(ไลน'โWƒqฯฆrWY%็!ฏแVx ูvƒฺษฅ๛y3ฤตตฺ็ฐ๑Š*(|ๆฯNช/"า#ซu€ƒ=(Hฤ๏~`i"€ˆ?”L2fbฆ>๗บwภชสF๚jMž”๙u| ๏ฅฮ๊ภทษ$A๕h v{V}ัgิ๋ศฺฤใ ๙N›ฝ็ฝ"ฦ›ˆ฿Rิญ[—mธ1ไ1๏ฝ๗?พ๊Mนฐฐ9า๔ ๖}ฺaช€ ย 73@!•q~Uศ-๋‰AูฆYD–n\š๘แAภ$ำเF>ฟ$vQ›$๐ปšฬ‹; lU7>Xบib๓…เ œำL%.ๅCผพ<=f๖ุ๔๕ฌ(Jฏ(W’๚=€Šcโfd๓ฆž|šMภ่ณฬRรw†ย๒ๅห5B|๐Avื]wi๑ํอš5cํฺตc=๔;zT์รแ๏ๆอ›๛๎ปูๅ—_ฮ8 }฿ก'~|๗Nฤp ]uีUg}๛๖e-[ถิžŸฃ'#Fx๛ห_๘_}๕Umล O5ด•ฦื_อ๔&xพ๒หก‰_ล๒Ÿ:๎|โ็๘=ทญฎไ๖_rธโืวืซˆf"oTRT๕:Ta@B”€ส%Rๆx่;Vy(†bF‘q]n๑} Y฿Zทฏผ+อืˆ็ Sผ…;l ;๘๘แa๒ห/ฑถnZ#฿iำฆฑ7฿|3เg๑Oš4)$๑๋‰๚ฯs@โๅ•WXZZZ•โ4hqโ‡ซฤ๒ˆ+ˆ>nณu7€ล„๘•ถ>[ฅc ŠงขuขศZ}ฬลbc๎"t4ฺT#rt3–๗๒ฎ๊เ*T[ชdžฟ฿๚๊šๆบAโรjI=ชf.&ุ[า็ W6฿Oฒ๑7mฺ”UซVฅงงณโโbํ๙K/ฝฤJJJด็›7o๊cวX~~พ๑๏ฺตKs๕‘ฃ+™็—Qฉ2vฮศU฿:บ฿๛ฤึ:ิฯ‘คw“พช๗ˆ๘C฿S็ฮe๘ร<พ|ธ]:u๊ค=Ÿ7ožgำ6u๊TโWฤญ6w?๙ไึฐaรฤ‚๑วูอ7฿ฌm๎b AƒฌVญZ์ฦod>๚(+++๓ูล’๘‘ฝ ท ม›ฝ‰๕ฤ฿=ภq๕ะhX๕๊ี#๋[3ผqีpฯ`ฉA_)$ิ฿ร_๑;้gEก‹_ๅf๐š‘ฬ\›'ั,’ท๔Eฑญp‰Ÿ[ฑ‡ รA9#ฝฯกล๎?{GฤจHฬ๘ŸyอใซGE+P›ด ˆบ@ฅ"ฬจิ๒nง[vฃ”ๆgี๗&ท _๘฿(๎‚^?]o? \LC_P”Š'@๚ ๕\O๕|๐sŽ$งแ๛๏ฟื6eฏฝ๖Z-|ำ)ฺCv'?pฦํ ๆ๎,ž"mKป๛๊ฉ €Šถ์.๕ƒP>lซ›๖ด์ฑb`A!d‚๋ฎˆ7ห0Y;Dึ&ๆปนพิ๊ใ฿๏ห‡~w0์%๑>VชŸ: ž.I6๑‡"ฺ dtO#ouNษถีฝพศ.( f_?รยฮบ*ดขฯโ%ธม2ณ•๏]ฟ๊ำ๛็@นแ๋”Œƒ๚aœpใม{จงKฤOฤo_Yf}•$ b#ญเpมMคj๊โ†€cณ‡SOKโื=Q$๏ฏฉYnท›[Q ช} ๛ ‹:Š๏ƒ{าะ๓G@โบ‰๘ DBeNชŠYPH์RK้์โฝŒvพŸSแ 6๗๎%~}ไ  ๖๑ง7ฯUfทแ๏?วซ๒ŠZนf’ ‰๘ D&ก๗฿BWP~T•Dƒดฎ[ๆ้ &๏กžๆv๔yX\kอ=๔”gTQ^ชO˜๕มซฯ!‘kสgBˆŸ@ฤร \jะ)ูdc"์S_%ษ?4OฏžHยl๎2‹ƒHจไ@(eช>1๊Ms฿Ÿ๚ƒท6/ยIฤ๏(@ฐ-†าˆ@ฦ๏าฅKMฯ`฿Gฤo๐ซ"SROเyฉ’๔/ ร $7ๅฃ๚๊๋N๙œฑ’|๓฿3ๆm‘Pˆ„ย5‰๘อ#Q#รธC‡D,ั5wก‚7ะusึฉ!$'เ๊Sฤฏขo"”ผ7nDQ…Q1ซrษ"๗งŸ~า;‘™ปhั"ํ}ศ#็ไˆ [oฝีฃุูธqcึฟํyJJŠ'ใทI“&ž๏Vูปศึ๔ำOู 7 ้๖่%—Aิ๘ ค ‘อ›——งต๑โ‹/f—^zฉ–3€ถ@R:?๘?ฐ%Kฤ>โ๛ูSO=ฅตญ^ฝz ญDB"€•"ิ57D™ี‰d@uArก›ˆnฐAตญ5EhBw฿}'ผlฉฉšb'ะถm[ึฃGv๘๐apกง@jaำฆMšฒๆG}ค%rเA์ .๔!~<ศ๏๏ปืG‡ฤ฿ญ[7ํyฯž=ตj_foฝ๕[ผxฑXP๎ุมฎปNิVF™บอ˜1dNฤO 8ˆSฤฟฐƒใމ_อข!ฆ๔๔๑ฺoผก p๗฿ฏ ธAั@&/ศ[ษ,ใs(ึข'Pส›๘,ฤ฿€+Vxn8ฤัEy ซศKใ๙ึญ[=วwyD‚ใกˆ?{นz,@QQ‘&„ฆ€ข(Š๘•ˆ„ TTTฐ+ฏผ’ีฏ__qƒZ—.]4]}ซUEหŠ๘ก๛Š๘QใฃˆตŽฏชฝโG!"~มฤp"จตL(|ใจ€…าˆกˆภ{ ฬ๔วŒร.ป์2ธz๎พ๛nxf๏๛๖ํ๓!qใฦi. ธzฐš9‡#Ž;๚์ภีƒฝ…5kึx\=ชขืฬ™3ษีC ธŠ๘™๋โP@.”:„œr8โวFฎ อDYD๋๊ีซ=๏ใ&€Yj๖nูฒล‡๘A๘ฑGr๙™gžasๆฬ Iะ้วfฑฺล1p9แ5|พP›ปุFaฺ%(~ถญ๎ŠŸ’ฬแœjEขฦ๊›ผ‰>๗D‚แ’„ภd&~ฬไU-฿มƒโ๑"$;๗D!.ไs–ไLฐ`ฺK‹๚๕/"*3n—ศป๙nุ Mฟ—_r๛ˆ[ทZ6hื]บ็pj“๋๙;nเ6˜•๒ต+นmๆv๕/๊cิฟ’œ๘9พเ6™ทธ˜~๏ต็ึ„&จM฿pปTพ†ูO*ท e›0z…E :W—่^ฃ|L็๖l‚ฎใg˜}้พ˜ n7๋^Kแ6.žO;๖/๊cิฟ’š๘9^ๆ–)—Gธ{๗ะ๛ลpQแSD็ืฆI6!฿k.฿โVฮ-฿xฎnั-หฯ็6@mพล๑•[nลŽr๛ƒ฿@ไ7Sมํฦdํ_ิวจล”ฟBํธ} Ÿ_.ฃ๙๓ตผkb้๔s‚ฺ4@ikH'6”†ษ‹฿6:W๐ทUƒ Žื๑-๙8†[_??lถ๔Iพึ3^›^v์_ิวจ%อŒ?ภ–ฺฉ‡ ๋ืqหญŽ๎ตฟs)ต๛;Xuว4ูฆ ๎ใ๖–oบ๗fqปอF็๊Yn๓bE!ฺ๖g๙๘9Kญฉ;ๆMI`อนต_วiฃ-a+‚vลญEyพbฺวจน‹๘+ศ๋X:NC๘“Šว•;แ๕ๅาw๔ัา๏yIโฯ3œXtHฯ•|ํน1๘วx^G์Oฮ‚๚ฝw”ฝcแ๏„O\ฟAj‡Aปโูว":_ฑ๎cมฺeƒUรnekโ็ธ“xn]น=จ‹\๘ฮo๘>ท4]ว๚‘[3๙V๛7ฃhSSี)cฑฤถ]มLฌฆk฿๏tว๏”ณWฬะ๎‰ล,Gž‘kIY"๚W4ํŠYณข]ฑ่cแฺ•จ%ฟ๓v2:LปูฟlM๒„ด“ส๗\!sW๘‡ ~ฉ๔c๖—ฯ‘Œั(ฺd็v™l_๔๑สŸs;หญ ึแl’0๒1ำ๒'ฆDœ3jWLฺื%๛}#ูžญ<}พl;ใ—พภ๓ไ๓Kไ…‹๎2๐6นdBฺ่nฝb8sต]›์.mรŒ็!๑›ธuŒ•หI78ัž.r฿๎น6ธ–ิ.kี)ี๊ฆY)ขdว6ูน].:gฟ่^;ว๏{ฮกvQป,์๗Y~˜9ูนฮฎคo9๑๋โถฏQzr‡1่W`g๛jIpฝ‰๓no“ๅาs๖tฉ๚ฟฃvQป,nืหฦU๚l\ืฟ<9mคส#rใeจ฿]tฏ,ญŒ๗"…•ฎท๘"ฺฎMvn3jต+ชvเยทŒ๘ๅ‰ส‘iะHั^ฤํ™5wท๎8ค(g่~1™ะ`uVคํฺd็vั9ฃvQป์7&ํNˆ~G๗w/y’?ฏฮ…ด้q*ดO~๎ก‘˜ํฺd็vั9ฃvQป์7&ํN#}ปฟืลด*9VHฦ~ฉำqงะQตษฮํขsFํขvูoL:-ชeฤพำฅ/๗’ฑท(w๖c‚d!lื&;ท‹ฮต‹ฺeฟ1iKโWJRj๙vผฯ•ฺ‰([gป6ูน]tฮจ]ิ.๛Iปoๅ’iธิโฦ]rh"“์ุ&;ท‹ฮต‹ฺeฟ1้„8{ฅ ๊num๑mุ&;ท‹ฮต‹ฺeฟ1iwโ‡j^•da“‹iป6ูน]tฮจ]ิ.๛IวŠด‘‘‘‘‘๑“‘‘‘‘๑“‘‘‘‘๑“‘‘‘‘๑“‘‘‘‘๑“‘Yคฯไ๓KนM ๓BFฤOFๆnโฏมm 2"~2ฒไ!1NH‘ฎ๑๊& งศZฏ…ฒ๋wฒ๊า UwUไH“Uศฝ? Œ฿๏๙dแฟrปˆaิW•๏สํ๙%๛jส็๗p›O็•ŒˆŸŒฬนฤ฿_wN%ล๑*ฎ[-(หฃ๓JFฤOFๆ\โ๏ก;nป*ยญƒจส๔ัy$#โ'#s๑_ภmG$ฤ/Ÿ/Ci>๙ชทะy%#โ'#ณ?๙แุ5BWศอ]ิrอๅึ„ฮ)?????????Yุ‡T”Š9ทฏIENDฎB`‚xarray-2025.12.0/doc/_static/thumbnails/monthly-means.png000066400000000000000000012476471511464676000232320ustar00rootroot00000000000000‰PNG  IHDR็tึ YQ9tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/ศ—ทœ pHYs  šœOIDATxฺ์ผ\UตฦC๏ฝ…–@B !คัEค—'ŠŠ๐EEมฦร† "Šข"Š"ฝ„"า;จtP้ฝ๗:ก็ผ฿ณพษบ›3๗N๎@k~ฟ๏žถO3฿]฿^e๗ซชช_ @ ๗๑@ @ q@ @ˆ๓@ @ !ฮ@ @ q@ @ ฤy @ !ฮ@ 0=wฟ~ŸJ๘Wยำ ฏ%<”๐๗„>„ฯโง๒ฟฒอ›ฐwยํ ฏ&<ŸpKยก ‹๖๑5อ˜๐ป„'&๑Lฃฯ๎pž]ยo:yถ-๖=ฺŽึพณ;'l\็@ 0}๖;™จ9"แ>‘๐5็„8ฏm3Sยuึ™๑„u>™๐c่c๚๘šถด๏่{ k$ Ÿ /ฺu>•0sM›ฅV๏ๅ๑ณฏƒ:ึ,ึฯ๛!|gL8>๘,!ฮ@`๚6์N8ฝ•ว6ฤym›O˜0|j>ท๔™อฆ{ู๙fœ†Ÿฺ5žcำอzyœ™๊„}ซ๏ฉถำแ;8๛-ฮง๔@ โ<แฏ$ฉอถห&ŒOx&แ„>]ดY.แธ„,D~ŽŸฐ@ัn•„‹&$Lดv‡mVMธุฎ‘ะ๑KXW๎hยุ„หํX๗$์Xด[ฤBฮ๏ถ6$œฐd/ฤ๙gMฎึฆp:บf=~Z#6G&\`๗|†ํ_†nog๛V_๓X?›pigฺ๎ป็7lzœ`้3:แL ฯ็{ป2แcS๐qอฯูนxพmงใร๎็็ ปฺ;๓฿eoฤ9๓ ป%i๗๙xย ณป6หุ~;&์Ÿ๐dยหˆ„9ํี๓ฟ7แK-ฮปbยevฏคฌ์nขgs›฿ฮ๑ีฎอ&ไฮทP๚ตญ_\›Q&Hcb๘3 ืบั…8)แŽ„ึ7ัอฦu\ปa ูqธฎญ์xโญq>(แ-uN”Š๓๛,<๖\่t8สถ)t{—ใฝ-๗‰ง:แค„7yv๎ธ Xg ฿ตญญํ<ึfœu~\aฯz๊ส•ฺx‡–0Q'[>ม„mŠ๓วฌc…๏fฃ„ลz)ฮOฒ๛ุ3aฝ„๏˜ุ[8งฎย1๖ืพฯc--a'{Nณ๗{…฿ำ๎ X@๙}ฮkb๚aท\ฯฏ์9}งFœำar€ตศถ๑<๖ฐ๏vญ„oZgยInฑึ9pพ{?๗Rœ?f๏฿มงl/์ูh๗๚ekw-Qมก@ โ<}'ฮ‡&์ผnxฦNฤ/ฺaต…Š๕ษำืโ๘x3?jวk๋VถๅQ์wช ซ๙ มƒจ?ญ็ฅŸอ๎ใ/=„O/m๛~zJฤนตผซ• ธL|-ัก8ฟšถ๛ถ™?ณ ยƒ๚Ÿ๕ไถˆ:7f-ŽwG;ล็า็Gvํkุ๒†๒Nท)ฮ๗เ)I?8วหoห_,ฺ}มึ)ฤ๙ฅEปำl6Eวฦคิœwืbรฬ?ฟ-ฤ:(†ิด{ึ]ทฤ๙o{ธ฿์๛ฦท…z k๏…8?ฝhทŒฝ;{๋ืด๖Ÿ  ็@ ่[>“y“๗ต0๒ือ๘รตyฬ<3๘ตืฺอjž฿;อหํรmทฒ6๓Y๘๔ี&6–ฎนฆงปŠๅWkฺq์๓‹u฿ฐ๐WŠฺ๋uJลนoˆฟ?›ญ์พV่@œhWœ›—๕2๓Š๛๛9฿ตนtss˜Yอw{0!m<:&๎.'—ซฺ็G๖ถ6€น?7O\ล=,bํv*ฤ๙‹ใํg๋)ึำqpxอyํึต๕ตeาY๓L?๋;ฆ|X{อ=า๕K๓าฟY|วซOq^vl|อึฎน[Uๅ@ˆ๓@ ๔X_ยผ้o)4ูๆปยjYkw  ‰=,4{ nๆJ[ป1„zeแ๒ŸqŒฟชน6ยl'•9็m๏ุyžหuญึJ$๗๒นmnžฦS;็ณด#ฮ-็|– …ทšE$X๗=zjŽฝdร“อุอซXพ—๙ถ๕Cษ9๏q~X๗ฐw!ฮท๏)LพN๘บvsํFฺ๚ฯป็๕ฌSˆ๓!5๗๘7มgaํ+[h{—!ใ๚Pœฏ_ดฝ‡{8&๘2Bœ@`๊ t ฑถช-?iซVnูœ‡๐โX๋”โผ{_ŠŸ!lG:ฯ๙q-วณฝ็x2/.ฺ,—โ๖ง@ํn™‚Š6 v#ฮgnSœjฬRฌจธ๏ซ}พอฑ็ฒ็๛V฿m๗๛‡พmˆ๓}๛@œย"5ZฝŸK๔ฑ8๏ษs~ฝsญฎgžBœ/Wov๋ ฺฃXๅ)็Dt<^ณ–โ|ฝขŽํ-๎aูเส@ qพแKทXˆณ6|WOนมึงbฑญฤนk3สฺ|ฮ–Oฑ๒y\›y,„๛oฝ็7$œWด๙YoฤนUแžฝ…ะ%/ทŽส฿7ํพุโท–ใ์ ขiˆ7฿{›๘อรBฐgœยwgVหŸพฦ^‰ฌณ`†๗@œK`ฎร~}%ฮ{ส9ฉฝซ‹๖p=ญฤ๙|ถGล๚ึˆ๓ปoยญ฿อพ๛…บมqัŽ8l๛5ธ2Bœ@`๊‹sยfฮQ–wพ™ sŠN์ฺ 0๏๙๕ึv- งร็ [1น‰~ปy๏๎-†ฬชลUฟะB—rbO่uVตz ซ]Wญฝqพฟำ-W{?ซ"฿qYแ€๛ค=ทmํZ3›ึx:kี๏น|ˆs];Ž๛ ห~ดธ๏๙-ฤ๚ FภoiCโ๙jํฏXqฟญ์ปŒy็ัอsุยฎแK-ถ๏X„pO5qn๋NฐฮกŸุ๓Y฿๒ฆOWx}ŠsUี็ฟถu{โ๚ฮ;บ๗œ g๔$ฮ]ิรำึ™ณ‰IผทFœŸnํ63๖2ถ~9๓พ_`ฯใ –>๒x;โๅโฟf•ไ7ตwm;{ึ  ็@ ่;qพฃ ๅ‡ฌซๆ๑ลW๏ถถKูPK™๗ํ t^ๅ“L$=oF*…8gXณ“m\๋ืM8ž[Žnyิms†8Ÿรฦœ~ฦ<œg๗6ฌžรึY๐ดๅใ๋>Qดั†๖zศ:-.0dGโๅัk<๙๋ญำกห}[ปEฉ\o฿ื›6lื1ล8็ห๗๖ดV{ิ‹MบygX‡สœ-ถฯg๗|๔{$ฮgดษขอ#,็๋cq>าŠ๑ฝfV๛ิŒsพ€uส<`ฯi2n็6ล9ืzžฝฏO[ มฆ5โ|ธwข็ถ}ส๙k๖,6่&็|ฝฯz[‹Žxี~‹wุต,!ฮ@ ฮฌ–(@ „8@ q@ˆ๓@ !ฮ@ q@ @ ฤy @ !ฮ@ @ „8@ @ ฤy @ ็@ @ „8@ @ ฤy<„@ @ Bœ@ @ โ<@ @ˆ๓@ @ Bœ@ @ q@ @ˆ๓ภt๗๖๋๗…„ lป]ยS๑ZZ?}–I`fๆ๘ภ4สงG'์๛œ็ถ„ตl๛`ยzSp์'฿g ˜๘sยO๒7žJx%aก„5๎ฑๅOล3 BœะHpท„s‹u๗ดXที{!žm๛?ถŸึลy๚4แญ„— w'!aqืfํ„I๖„6w%|นธ–ว]8แMŒแxO๗•#คฏู๏W๘ร๛%ฮแ-ป†U‹ŽัชfSแYฌืb<๗hผ3@ MNลz!แช„fฌi;‹ตํึ]’๐๑,็T’ค๒ล„™lนฟ็“ล:f–q^+ฮwDVH85แq toดฆฯ ๔๔&ผ0ย] ‚}ค;๎Nถ.ฤy ๐’๋ฝ็m้9OŸ‹~่–Mธฃf_Bœi•Sำgพ„O&<pTMฅJ{-}๎ํ-'Gฤd โ|z ษY&&ฌdห[B ,ึ๋ˆ๔ˆ„'รxt"พ‹ NŸ L`"ฑcn๏&:แy#ๆmฯIx{ฉาgธฅฯูqทt็"ิ้ฬ„—ฎKุง quัห๗]Gฯc!ื~ฅ„g฿‰sทnฆ„›ธทVFซ๏ณ๎Z๖H๘•๏„Cœำฎ8ท฿:๖lย ฿๒†dนoษ้sŠu„ย‘ขsฏMq“„ณ๒ํฦฉๅบml~ณ„—jT CyŽ„cŒ“๛ปx๎ฒถ?Hธูฎ๙ไ„ูๆ2๏ึ$]ฐDัy)ฎ๛Rยร๖ฬvwว๎๖@เƒหฉDŒ๗% MxีxNน4แ>kงhฆฺูฐKฏL๘ญูŽ๛ฺ>ฟ6zสย่็๐๖6aยำvฬ/]า๘๔#ฦ฿,Oฐ๖p๐๚ถผH|็็†fŸn๓๔๖ IุจX‡Aท˜Ssธ}ทฦ ญ็_Lธฺต›มฤง็๗บํs๗o!ฮ?Ÿpyq๑^Fˆแn~mˆs฿z^นฎtžฑ'}.g›โฃž"็3จoTพว“pqย†fค๎โ<˜f ษW์๗+|อถ]*q๊ข…ฺ็ลyๆท}็kCœฯn‘Eฃญณoผญฟฦญ{ภึ‰Hขb:ึช็x7tํถฏ็ผ๙็)็Kนํื9.์๖@เ/ฮฏ1งmqฆ]๚pa‹โ์ึญแ๘rm๓ส๛๓=m^๑หwืๆG ว๋.ภvŽ๏<โ<ะข„yŽ๑X'q˜Rฒ฿pœ[ทฅญมญ›ฯBษตP \ฮ`ูCน‘๛ญjํ„ตoฆ8_ร๖%ผ๗.d๔3>'Xพ็ถHยูฝฌึก๎RำŽฑูFพ[bชํsˆ๏•q^l qL†d9ฮน๊qฬl!ไฬ๓[Vk'5ๆZ๎๚ฝซs›Q๙ฒu<~q ล๙ฦ=g๋oWzRมลื[จๅๆ]ฏ็๗ใฌ6Šฤ}ํˆs[>าžล Tko%ฮป=w ๘ภq๊หf^m9S/ลyvฉK]ฯ๊\ผd|ณS›โ\ๅsฃlจZ๛jVิ๒9ณSแ๑BœฆU2žั„๐:ำั5_ฺษX๋@เCวs-;ฆำ๛๙FO”ฤs@ qA4T7ด<สูฬ ๒„ฏจ9_๛*ๆม™'พห@ ๐a็–ฟนฆuฆฉtoยฮ๔s@ qa0Tj!Ž/[h็jำษucแJล๗>DโœQ"nตย6L˜๕ƒ~๎@ !ฮ@ @ q@ @เ}็ -ดPตาJ+MUŒ[zjฅA้oู*H๗ฝๅรqK5พ๋wแยล-u^|Xrก๘ะsกq^อx.d๊๘P\ุเ”A“นp@วi >,นP|(.ไwฮoR|XraW>œฬ…[œเนiล…โภ’9ฮd.าไ๑ก็ยก-ธpX\8ฌไฝ’‹ํโ*๑แŠ๖`ดc\ุ>ธๆwqกใ&ว…โรถธะ๓ก็ย& ๎:?ฦำs!๛y>,฿mใมถธ0๓Y 0>,ธ0ทุ• ๛ŠUd…~ 03{pเ{hŽH๏๓ รชqฃFฆws„[^!ฝหรซqiธแƒ๓ถqฃWlดcฐeซqC6ฺฒ~ฬ่ษ;ถฺฒ}๘ F๛ดMวfŸGญฦาŽGชFฆๅ!+ŒNฟอฑyฐ‘ฃซA#FU†ฏX M๓cา๚FษวQ›FษSึฑฟ๎cุ1ถ้\๙cFๅGคuCาฑ4"wพรซ9๚/Wอบ๐ Œูœ—็Zrh5๏€aีารGV -ป|๚kรฐA๙ว.ทtบีธๅำ๏sลๅฯNเ๙คg3z้ลช1๚Wหฯ?o5|žนซ ฬ[ญุกj์ ฅช1ฏฦ.“ฐ์ีุมKๅ๖ใ–—G/ตh5r‘๙s๛ๅ็›งZ~yชasอ] cฮjศlsTC็œซ1?๋ีr ƒgž=O—Mำ%gœญZ|†Yซล๚',3ำ์ƒfn`ภLณ5๔wํ˜.าฏ1OsนYๆศ๛ั~แด}ัฮรuZb‘|\'`9฿Sบ๎ดz๏ธŽฑยย๓็ฦ ์ŸŸ•ภsศฯHำAKฆใ.มณjฬ/e฿ษd๘s47ฐZ>ไ;L฿้ธๅ—ห๏ๆJ#—oผŸ้=g฿gžท๗šwhิ˜ฦปง)๏–๛ส6o๎jำ<บ^pเ๛*ฮyIฆ๖็ƒ?Wฝื/U๏\๒๊หฟWMบๆG“๑๏ซIท๏SMบqฯjาuป5–oปš๔ะีควฎ&=ฦƒฟช&หษำ;๗mLŸ>,แา๊ํIVีkgV“&SMz๖จjา3GTีฤฟW“&]RM|๋๏ี+oญัๆํ ซ—฿<ตzแซ็^?พzๆตฃซง&Q=๑๊_r–x้ะ๊บงฌฎy๊ศ๊ฌญฮxเธ๊‡ฉ.โจ๖๑Wœฐฬ~ฯพvL>ๆซo^ฝ๖๖™ีoŸ“ืำ0/<ฦ นKoตzํณ3^|ใไ๊– ‡g๙aี“ซ|็ผ|Žวqulฆื~Sjแ#วT๗ฝ๘—|Ÿี[็็๛โ8lใฎz๒จ๊Ÿฤe]]hcส2๗"ฐM๓ฺฮณZ.ื'=๏ž="Ÿ๗๖็ซ๎~แ/๙บธง_;ฒš๐๚q๙ฺธožี;“.สืหwVฝzZ/Ÿฺ๓ฌg๛;็๏ฌzแ„ผ/็เ๘<‹I๗"ฃ๎“฿-•็ห๏A>๎๋g7ๆวtNฮ๗ๆน]ฺLzโO๙}็oeผ}า›็x๋่/ไeฆวmS๛๎็}ฯฑฑ๚dฐ_jๆa[Uo๔™๊อ?พz๋ˆญ๓:ถฝ=~ๆ๒;g|ตz็ฌฏๅi"เ๗1:วR‰€๖›‡…ลƒx฿;>|ใ7[ToฒeƒฏA“฿นz—ษ|่นy๑aษ…โCฯ…ผทฦ‡ž ™z.„cฤ‡pฟUxŠy๑aษ…โCq!ฟsxK|Xr!S8ญไBx>,นฉใBถ‹=ฒ>ไšธq ๗ส}•\ีqก๘Žฉ็KอkปxฏไCฯ‹œO\ล}p]ฝ|h~<7ฎ‹{ๅน‡}ภ?|ž =7fN3>,นwฅ;>์ย…p็๙sฝxาไszพL๛e>L๏ญธฬš<ุยgโCว…6เร’ s›ดฮsa_๑แบ–ช†๖›ฟ๚Bฟกaพ‡\๘๖-W๏uE๕ๆ“๗Wo?xc^~๛ŽUo=z{๕ฮ=WWo=rk๕ๆ5ง็mo>Pnว๚7ub๕๚EG6๖M๋฿xฉ^xฆzํีWชื&Nฌ|โ|ฌ7ฎ:5ทgปŽอ>>๛r๕๒ซซ7žyคzcยcี=OฟT็Sีณ/ฝZ=2แๅ๊_๗=[|ำcี!W?P]rฯำี ฏLฌn{โล๊๕'TR๖ฟ๓ฉs[Ž}๗S/5ฮ๙๐อี๋/ฟrฎ|Ž็žศ๛฿๚๘‹ีEw?]ํxส๙ธ›vu5๊G็TฟzRฦะoŸ^ํœ๊#๛_Rmง+ซƒฏบฟ๚ส‰ญ&พ๖Zใฎ89฿ฤณPฝvกี[=ฏz๛๒9๓๓KSถฟvมแี3}ฏzะชถุ บfฝตซ?ทQ๕ะฎUฏž๚ซ๊ล#Rฝtฬ^ีหว๏]M<7๙Yพvฮ!ี+'์S=๕ซชปwLu๓VW|๊๚?Q]ฑๆGซKV\ฅ:ศุ๊ฒ•Vฏ.ณju 1ีนGUg๔_!Oว/ผ|uภ\Cช=fTํ2หฒีnณชŽZpxu์Bรซ“Yพ:eัี!๓ kถaปฺ1ๆ ๓ก๓k๓์ฅVฬ๛า~‡~ซgZ&Ÿ็฿›ญW=ถฯŽีณ ฿ืส2๗ฤp€gP๗แ~ธท;ฟ๖้ผ฿ ‡๏^=wศ2xf<žฯ‡้ซe5๑Œƒ2xVy>''<[}8เุ ๎รwศw๚ึฟฯฮ๏๒;๗]_ฝ๕ุ๙=โ็;}๋๑ป›๏5๏ะใฯฟ’฿น'^x%ฟƒผทฅๅGŸ{ฅOธ๐S๚W‹๕›ญ๊ŸŽ7C๐เt*ฮ๔&ี“_ุจzfป ๒๔นํืฯ(?ำ}็œชw๙ษ)ฦ'†็อ{5Œหป๗kˆt-#ฬŸ:ดš๔๘๓4ู@•‘สŽxระภxH†K6D“๑RMบ4u oวเHSŒNŒ9 Q/ฮ11ž0ฆ0z}ฅaHad!>ไT์aศq฿z็lCBƒใ๓ะ–ํ:žภ5ซS€}$€1าtฏฌใzu|ืŸžฬ‡ž โp๐\GŠ“0๊ย…^ฐ‹ Qฦ‡]ธ๗8ยs!ฟ1๑ก็Bฯ‡ž ™Š="0ล‡NY๏นP\ๅนv๐กธ๓HHร_œพr=โAฯ…๐๛•\ศpพ’ =z.ิฑผp๗mผx/yณ์ศRtVrอ๏ม…ดz.คณว๘PŒๅžl‹ ้จ็7เน0‰wธ”\Hป,ฮ๖โ5_ถ฿ผี๛}ผขร2ผ็}ร…ˆ€Hฝ๏™—ชW'พ–ัฅ3Qr๏ต Ž IBค9Ÿฆ,#6฿พ?ี›O=E5๔อ๋ฮจพํฒ†POโม๚+/eƒE3ŸEkฺมƒฐA4#tธ‹rฯฉ b็kฬ๓ˆtDโaฤqุ็ฅt>M97โ‘๔‹ฏๆ๖ดป้ฑชqOuุตV'๘hต๏ลw5ล๙j๛\Tญ๐ณ๒ส?ฝ ๚่—ๆm๏ญ/ศ"œ{F"ุžupอ๙~MpJค"8‚ ‹q„!วฃC$y:ข๓๑พY=ฐหซ[w“,๎D1B\ปแ:ี?W]# u„;bŽ˜>mฑy™๕g->2/#ด๊ˆopเCชฝg\ํ•๐™—อโ[@Œ#์9mzึ]๙๑U๗๎๔๙,ž๏6ี_< ๓งณsพOuN๎ฟ๎รฝฐ/}๖Gdฟzา~Y”g0าzž๋—ŸŸ!S:˜ฯ๛8qฮ‡๏q฿JœK ณ?ฟ…Y•z๎pก#‰u้=—@็็7ฤwอ{E‡๏ภอฟ฿ฯพฐ —N๔๗™~‹Wร๚อUm๓ž8a‡๕ีฃลล•ว`|า๋/ฃoFภำ‡M่ๅฆจํˆ/๕๎cDะ๋oFฦbํฯ•ศ€ฤ0’งใcCว{|1๔0eไ ฉฌ“ว ƒU๛หล8“†!ฦqไ๙a?ฮ๋=ฮtrฬcxโU’ก*ศ๘ฃ miƒqวฑนฮ-oŽ Gy~dlโยร$ฃSžrข็๔\่๙N’8‡ฤ‡โBE y>ิoŽ–\(+นPpฮ๋นƒ)ห%ฒพŽ แ๘ะs!็ีqJ.T'dษ…~๋ล…ฬ๛}K๑^rก๋ž ้ฌๅบ่ฯJ ซณขไBAฯ+O3ฏgrdD|่น0s`ชใฦฤy“ yxฯา๛Yว…Yœทห…โด.T‡พ็ยYฉ(ข>็xอฟีoduไ ŸHF้ ๐žOeqŽภhra6ู[ˆ7ผฮˆmyาšZฮž๑ดL;}๖'H#dเใง^hx9Ÿถแ]ไz๐8"ผ™G๛แ็ณธ฿Gฒ็zOฝ๙ฑ๊บ‡žห‚o:๋wล}ี™…5๋น'ฒŽs๎Oๆmฅ๛LŸ้ื?Tvหใีั~8 )u ๘7;๔ชjƒ?^Qญ๓‹3>๋หชuพ< ๓ต๛ฯ|ฏtSฤ?ืG6xุ™?ใถ'ฒภFๆNŽ$๊yพxู{Y๔%ม‡”ุฯi}~žถกˆว๗ัŸ~={ะo฿๎๒๔žo}.O%†๑Xใื๊ษยแใฒ วฃއaŽxgศ Ž@Gxnžกีพs,—บ<ใฌWญcฑO$ข๑อ๊๚˜งƒAp—ฝี‡{Cไใ็žืtNเืq€„{Ž4Hน๑๚%วd๎๚#^๔w'ฮ9Q"๒ข๓]495}?ผt.=๒ซ๙ใ}ฆS Qฮ;ว๛ึ)โ5_ฆ฿๙Yoo‰๐žฟโ<}พฮ  ะqx ษพฒ๓zู/kฎaŽ'ศ ryษTX๓œsB2™Œ›ฯ"\BƒB=ษุภˆม˜ฬ‚ฬฅก*/˜ฆ2ยoqซย=ีษม}a๛ˆูไ๛„x๒Ž€๔u'ะณ๑ํ ›'u”ไpRป…6ฝ๙t่เ)B%„Gด ั“ม™ะซ฿ะ~›7ŒO [ OD<็ฤf๐vJภ๒šcŒ‚๐ž๗-Š ž฿•โz.„๗ฤ‡็Z'>๔\(ฑŠ ลƒ‚q!ขNกเ๊ "๊$ฬแ?q!ฟEฯ‡โBฯ‡ž %<‡d‘A๚อ—\X๖\ศy9Nษ…โร’ แ ‰]ฯ…บ๎vนใัฦsฑw58N–ž ูŸถดQ;–u\ฮUฆ๙Ž-๋9้y๖@i[ษ‡|o|งž ณเ†ณ@ >๔\จ(.ะ… ๅน‡=๒๎๑.N-.ฬ๚%ยโCใยN๙P^s:)แย๐ž๗ฝmˆทไ๐$ฬbขqNX:็ๅุœ‘อตpO๒YH \Cซaํqฮ*ฌฝ ๗—G=งคgฆƒC  ี‡ะโe‹ส ฃ‰N'พ;':–๘m๑ป่ิ6”ื\!แ=ไœc„๒ƒ๔ฅo4„๙ปฤ9นๆ๔๊#ะ๑aโโŸฝy'30D 2B๔\่๘ฐู1้ม:ว…สอ.นP|<๒[๔|ศ๏Ÿฉ„&ฟiฯ…^ˆ{.T'œข}TฏCขฮ๐\่;สจ!ฺ3_rก:HK.”€ฏใBฯ‡โ+ฆดำเAqกฤน„9ว`?ึร}ดโBึณ]?|XผlRวซ๏ิื๓ŒิIขบโBๅ้gqNg‹๊fXzO|ุไBๅ’๛zสC๗|(.TGบ๑แ{ส…๐ฏผ๒ส;๏ฝื\๏y฿ฺ†g๐(# ูs‰sย9x s^y‡D*กใˆฤ!ผ็ไlK`3ลcื›s t™โญฦปผำi7gQhF่าั ุ†ืzหฃฎอ"ฬ>ˆs–ิw„6กๆรw>ฃZ๎›งU#พ{f?ทวyy=žk9b™eยำi‹'œmxยูฦup]\B›kBTs-ˆ+๎Wa๒„ึ๓ธฤ=๛I ำ๙@ว๛ณŽmส•ฯ๙ส้ๆ”๒๖๑ˆ฿vY#ล Yƒื_z>G#ไ๏"ตอ^uBชI!HSๅซ#F%ฬ๙ž˜xขลxฬO:โ˜Px„.m1G#ะะด!LNH<‚œฉ–„บ#ฮ%ะษOgยข+็pvฮวพ„ึsLD?Sy๙5๏ฏก;qฮ‡v็>฿{ฮYฮขŒƒ๒ณPŠฯมZ}฿Qง๚”6มwMžsฅ5t๊9๗^s!ผ็q๔7ฬ…\น์rมฃWฟท~F™g–s+1:F)F'&Sๅหs๎ฝC„mฒŒ!Šื\ฦ(F„ฯ›K†' † a‚9ผL๎(/ Fฦฦro1ฦ”ผภW2ฆ”SอิP๓ขำ“์O[ฮงBๆฝ—ใLฦ›ฮ#ฯทrฟe˜b๔a๚G๗3Še@ส๛ร>2"ๅแ–๖žp ัCoo@โ\^&ฺ{Cิฎ2Jๅ-โ˜2dๅ‘๒Eๆสu๏5’HWผ๗ขหE`ไผKบxkz‹๐u'ะ๑R๒n•)พำGฉไXRH q”\/!กฅทจEๆ” ห)ฦง็ๆ…๊ดR้5ŸR๏9ฅ„๋nJธ-ao[ฟ`ยE ๗ุtซA ๆยWW๏า…ปˆs๑ก็BMฤๅ๔6นP‚]|(.๔|จ: *๎e|ˆ ๚zŽเ"~Wx›UฌMB[๕2”f#.TTŒ~ร๊„Tžvู็S{ิน้น)หๆีษษ๑=ว๙( xE!ๅž =ทˆ u,xศ{ย=–\(Oธ็Bํิz>d›๘ะ‡ฮ๛B›ญrิีษเCเ}Z/ขว๗ุไBWญm.TัUu‹ -jญก&.ไs|๘žrก๘ะqa'|Xzอ…vฝ็ม…ํ}žRฤ&;{w$ฮ89ผ!™Dค๒ฉ U8โ‚ภว+Ž็๘ุ<’…*วฦ๓-๏8p<ูืxฐw=๛ถ†}ตrD๗J{žŸ็๑N#ึๅ9ง=๛6>ๆว็Vƒv8ตZ๚KวVพ<พZpใ}๓t๐7–E8kD9ข6 mถ5ฯZปไvyฃ 9ๅuฮ… ็๐ภ๋œ‘ฮฝ!ถูF[ฤท:ธฎ‹v'๓<B๑Œ็็ga๋9<š\๕$ช๑่ๆฮถ๔Œๅ*"'a› *8งฮBฌ9B/5‚๏5B™\t„9AŒธ%œ๏5b]ยœv*ฆBs์‡„ญซXžs„99็„ษใi็œส{Gˆbฏผq ๏=tp]œฐOซQtDx3ฟœ"pง&ฯK˜็uู้๑ัฬ฿O๓=Š๓"ฝW‘D฿S6๐˜“ถม๗NGŽา2:๕œ—^๓xฯำgฃ„ป๎Mุตf๛๐„ซH๘A;๛NK|:Vไ$ทRU†•?6qM&๗‚ำใM('F'F( c“๘๒ฑM(†F….7…นช{KœใำW7'ว’u: =RNค„นŒRUiวp’a'ฯน๗tะF!Ž2\ีF๙‰์hหq1ฌไญ็๒๙๐G t_ฑ Dy ่\เบ}˜ค QŒGy$žๅอไ‘วC๔Oท5ฆฌ“งI"ข2ZีV†iา้บ๏h๐Fฉ๗”)šภ‡ี*ฬฏ+„โช๐T3SŒ0HkTข9TpKเ“‘Š—aNฃ„f$ฯย+)ง*์ฅYธ้๘x‹œ—จOQื|Jฝ็๔ &ฬm๓ณ$\›ฐzย"Tฆ ฟ0คp!ลซ<Š =vแBฤ’๘ะsก็Cqกชฏ{>Š<ยโCฯ… สวฐN\'q๎sช%พญฃฐqฦ}ิ‘ธcŠ ลลœืsกขvJ.T|ษ…Šธw๋>์ผฯžล…Š ‚<ชSณŽ }าฎส๖ฒ:ผ๑Bธลข<Š 5๒|(.lฆใ๘Š๎Sส…*>ศ{Xว…LvแB๑a_sกB-rษsa'|X็5Ÿ๏ypa๛น็ไฤ"0ŸสEG*็๏c๖๐Ÿf^-BD^Bฤ:žA<็r,b[น aG #tฒ Cวซ FtำO6ขัผๆ/.m†œ#†่,ฦŽhฦ๛ท|ฑ-~—ซจณ฿’{xตฤV‡ๆeyอ๑คณ~แOPอฑ๏WณŒ๙r๓ฎณ[ตฬืNi y:ด็@ุำ‰ภนๅœ0ฟ๕1ื5๏‰ํ๊@ภ›Nพ:ฯ4นฃ(Y‰นx˜ๅ,gแMฎ2๙ั–อ|N#ธ็๊ฆ—๑๗นึดอaบญuยุdฤ5๓ˆcBุษF˜#bบ**‡ˆVa92ก็ฬ#ะ๑˜ใ 'lqฎ|tยๅฌวฃއžsธD7"œ๓p tฐžkd=ื@๓*b7ฅž‹„7ศยœP๗ด>…Kฯ5Wnทฐ๕ผŠํiจ+ื›~DXจ ฟ ~D[t`ฎ^zองิ{ž>3%—0(aV๋ดQดY4a•„Ÿ{qพำŸNืรeเ)B์s†1๊ าœ‡†ท/‘๑หPตa๕เ+|S๙ๅ2>}ศฆrKแw“m%ศ1N}ีs…o*ฬ\!‘๒šK4{oถŒQy‡T`ƒK˜ฯฯ–‡Dโaฌ๗žy…€{๑*ม*ฏ‘<๗๙ญๆ๒(ษจ-ฝ:“#ฏ‘ฎรSๆnm€yค๒8y๑ฎถ(•['ะe๚ขJ ๏๔แฏ u๗!๎2J๑ฎ)๏ ๔โŽ@Ÿt้ไ๊ฦผ/ฝ1Jmขๆ<๏!"ˆๅ<กอ๗ผFœ{ฃด#ัAŸ™์1โธ>Œณƒห๔๙ฺษ่ฌ3FมŽV ั/ฆเxs&7a5๋๑\ึ/ฮ๒‡ู อฝๅ฿[ฟษ‡%Šปpกยืล‹โB‰ฃฤ‡M.T๑หV\h๐\จœs๘ะsก็Cฯ…7J8{Q.>ฌใB~๛jใ‡,+ฉ–๙—\(TงธPaโ%*็ฝไBuTzทธอ๓กO๋ห<Jh{^๗•|่ฝํ q๗\XF ดช๒..T็ฑŠjŠ U€Oy็๊xษ^t๑!yใxฝ{+ะล‡ž y >ฌ็S… ฺ^poo๘๏ รH–^๓า{\ุ7\ˆทpt‰ssk†์&A“ร“หžu„"ฏ9แ๐ˆ ฌแวsŒ '„‘ฮ}Yฤ3ฅข~`„6ภSญ0vBยณืูBภU%/6m%สผ่ๆไkH6ชฝK S<ŽrŠอั ๐u„ธ<๒L9^z uD9*ยฦผ<์์3Eรำsษลฺ,๗\ขฎล9ล0:๑ MkำŒ.ฝเไb๒ฯ†i7’*oนŠฯง7ๅ{cTกœ็ช<;ฉ1ฮฏ\^_Nุ€ "…ถ๛ฐสR +รKย\๙ูำ#8๋1ะไaฦ๛ํอ“ญ H–ูง,,วตฉjฒ*(ซ ฒL]žsŸ)ฏผG2Ve\tKฬ+ด]ํhฃํพ7bฝAชg#cบ•ืศ t? ›๒^U…฿ฑมโC฿ฎŒ๒\ฯฐ_‚’ ลอ%๊๚ๅย2W\\จศ’ ีษูŠ =z/บ๏hU“ฃ์ดจK๗–3แC?ฆ|S ร‡V$ฐษ…Sภ‡อ:%๒~|ุ"ี๘pชqกrฯ๛FœZน฿"-น lD}ฝ;> .lƒˆ$Ÿœ‚dxศAq~MqŽธ$ค]•ฉ#ไ˜+Œฯ1Bฏ9๓x“%f๑8#\ฝ I˜Gp xฤฦxฮ 9G,#ฐY gžu„ณ#ธ๑œ“cฮพ„ฏD9  Bฯ๙l+๏๐.a>๋ธํsจ;๚๖เวคƒแอ}0U๘>ภKJม6sM4แฬzˆtBzฮ#ื๘ุไœ฿w}๎๔pV๓ข}า~อกพๆูKŒธผไ˜ษcn““nEๅ๊ดE*Ÿฯ4โ—cๅp;>ํ$Šีๆuฆ iG˜“Oฎj๏„ฏk่5rฬูN;ށะWุบ ภ)„qฮ๕ะ) ฑy๒ๆ@ืงbq=}ไ—๗ažsฮznใŸ็๗แNธปm๓๗N ย๙ต ๘=เ=Wh;ยœwข๐ชํ๛ h)ฮW๋7?า~6แpทผmยฺ็-๗–๘tบ&เ๔ร ซื๖ฌึ อผ)‚$ƒTใฆโ-โฟB“X eฏซศ^cˆJœ+ิฯช M†B81่4ฯMV๘ศ{uไษ๖E0ชXขฌ๗aŸ*.ไฝ.qขฟผ๑๘j๏Œฏvปv2พ{ี๘๊Wฏ๖ธn|3็[ี”๏./็มSคผv_ภN็*‹ถษ-=C2.}Sใฺห3ค๋๖(บผ็ewoถ๒Iค+ด]กต ้, "yžฟoห์bJ๔๐ต#ฮ•ฉbS{บห{|ษwaศิPภpตbI9'2กใRยž%ฮ1Deœ&t"ฮwšqTu๔L๋ึb฿Wฃั1Spผ๙.Ki๗|Xว…๙+นP|Xpก๘ฐ ๚ั*<–|่ธP|(.Tฎน*ณ‹ ี ฉ฿ง็Bฯ‡*ฐ&ศ๓]rกjw”\ธ฿ ใ3—ˆ wนฆ1ฦๅใซฎฌ็B_@M\ศผชภทโB‰s_๘ฒไBuR–\่#…Jคm)า%ะ}xป:+K>T‡…/็๓ฯoq๙็โBu4ร‡ž ›‘Dผ–๒ะq,็#:œX๐aๆB N-.,ลy|ุ็‹ถไBฐL็ม…}ห…y๎gy—8ฯ0Œ้ภ ๑œ“8G”RKใใy—๗/3a๊x‘™*$.๏ทยา฿xห้iถ#ช5พ8๋–ฺๆจฆ`FณŒˆ&L} ๗ฮภ๓Mh:ย\^w<แไ™ณฝฮc๐ฒใUGœ/๒ฉ›\ƒชภSะMนรtFไ ๊xรU<|q<ง ฉ๕ฒU์$เŸ{"็‘็s=ง $มจ ไ^ #ฬๅiV๑3ฤl3๔ะxพ'„~:ขŸmูำnyูนมZวฑ๐ไก๛ผoD6a๎>็œuD8 „๏9žt–ษ1ง๛ฎ8&s„?นโx๎UะอWYืuา แฎ๋๕โœ๙;ูm˜3า฿๚^rบภ%“S)s๑<๒๓ำ๖f.บyA_‰s@ช3ผ7ˆsjt"ฮwHโ›3 ฌล qพUว๘\ภ>ธMqr฿็}) ย5‘งชyL_ !yŠ00ศ““ภBœ›aั^>|SFจ Qrํ&]ฺ^UB…Fœ+TZี‚ๅ)ยฐSฎขใึy —”q%Œผ42ภ”[จ\mŒ4„๘ž฿4@ๅยH๗ฟใ›Fโ+—œX}๑‚“ซํqB^f;  Lyp0~นfŸ*ใWFง๗dk_ึ{ใS๓€๋Zฎkฃv~›ฆๅ9๕LผA๊รWห!†d”๚pNyลฮ้บ†Vk†ณcPšŠQชBIอNžžŒQŒP‰$ณผ›ทฟ{|J็฿eTu์๋ีbYWŸ"qnว ’Pฮž๙ฐ๖๓\(Qฎr๗.7๙PจB]๒š‹ ฝ0w|๒›ั๏ฆ,บจกหTi]oค‘ $๎๓•ฯ}8นRZ4dPงคธ~“h๖\_ย%Š=r\…‚KไŠ }ีu฿Q)ฎ*นPbปŽ %ยหuดซ๋ดT4‘็ยR ืyาหฮJ๏=๗ฃYˆ8้tยtI๏QM‚๔.4๙ฐอบM>,ธฐไรฬ…ผวtnย‡S‹ 5”šใยNฤ๙ช3.ฺ’ มฒ3ด/ฮƒ งฌฐxืzDM€ iGtาฎŠิชฤNศ7…าไxสษฯF”#t}Xบ ต!†UYuสๅF˜ใ!G(#บา๓ฏฟg.ไFA7rล™G ใ๑Vx๛โ[’9`[+QN(;๙๊ซ๎}a็Sรžแ%็~U„๚#ฌ”7Œวœจ\EžซณธV(z‹*ฒ#ิณXๆ‘m *ํY˜Sญฦ:๏โ-ท๐uึ!ZนชจŽ๗ปBฺN‡Iฮ_Ob]ร†!49V๕&ิูWยฏ9ย™ใz/ท„:โœPu ๐v†fcธ4<็๒šณqŽว[E˜J”#ธ›c”s๖œ”Gฯ6ภ‡๋“ื;็ุŸ๕‡fu๖y(=œฃŸžญฦ‹ฯB=กK4฿Oznนภž…พ7zBวฟGoฯฟ ย!ะ5„๏ Wˆ๓oฯ8ฐฺyฆej๑ัhGœGX๛๔@ภ-sาษ#Sล$ฬ๓ ec@ž$'ุปๆ2Fำ:Bฺ5–9FŒ P9ๅๆ*XTๆG{รhYF๓;ขชๅฌรpรŽ|๋Šฦิ‡Wช=็ใxzดม(ๆผฟfฑ8+ž&๖Wศจ .)ผ”๋ภ0,ฝ฿ๅ2>ฝAส1e๘jžฉ7N™gึ{ใUFฉŒั2|ิWp๗iๆ.ฃิ์บ“@๊R@ข๑„แ‰wˆโq ู ลxUJ%ผFืํึนwACmaR„N•‰mฌ฿Nฤ๙f]<ฯ๚ต๘ีœk๔(ฮำgผD*0—pyยf ฟ*Švi›5:<Šฝ—\|่ลนถ)œ‹rl4_ฮ‡ฑร…JัะŒชช๎๓พ5ฏJๅ>zˆฉยรล‘š‡ุฎkpEษ…pˆjbx.„/เ’’ ๊LK.ิ่ฃฏ(ไ ๗\จh ฯ…ŠhzN๔|่นฐๆyตL๕)บo๓a™๒SหTน๘.l1YนฺŽ ๓๘็Žปใยฅaฉ]†*Š="ฮล‡S‹ ใโC{8_mๆE[r!4ใๆ~ผs˜C”{aŽp'?t<<๒พ๘œDนŠตeฏน iฯtBำํVNc—{๑^๛™ฮ‰$ด3ˆ `,xพ*ำ™dU์ณ ง กต•—็ง๗ษo$‰s ีb่Kqฝ™—icWbญ็3'Ÿฐฌ+๊ถB›โผๅพำŸ~  8ฟ๐้k:พุ‘ย1\(^—aำJ#˜แTGีู•—จ0N UV~ŸŒ:/ฬๅ-—Q%oตŒM…ˆcˆ•9ฺ2ฬ$’ อ”hซ3Oญถป่คf(;ฦ๖“ื ฯ(๛bศb”ย=Yงขm\Ÿrน~ ?๏๗bป4&™ส‹ฅs1•—฿ทW;ก<&วใ~}nฆDบ๗ฌ•B‡vึ…น+ดS•๔ๅ๑“็กAt„Zจ9žณŠฤฉฒ‡wMJya=๙ฝb;E‘4ดF)๏/บ’๑›Eำ5?ส่“ะฟ฿lั0Hษฉ$4oFir,w"ฮw™stuส|๋ืโภน็ฃnHธ9แVŽi๋Jธฤ†ป`บ`คSศ…ส5Š ฝ0/‹ฟ‰ล…๖‹แBผฉ„>๓[QJฟqกxPEษ|evrํSU$ผหผmyล‘^ณพไB€ุ† แq! }ษ…„ฝร‡์#.ิ๙Tฏรsกผไž ๋๘u:ฯ”raษ‡๊eพz]สFฤ(=่โยr,๔ฒรR–โBŠƒ=พ‹ปแย?ื†%-นPUเ3z.4až๙pjqกฦ9w\ุ[>Dœฏžฤy+.ƒf๊Qœ๖5&ˆื *Jีจ5ถ9fŠ ฉ‚นŠฟแ-งz9S ต!ย๑˜#ีxพูไ†#ส๎xฬi#๏6โWZGœใ9Wˆ;‚]XW'ศ๖ƒcql‰xฤ={ฎO9๗‚งœผqฤy๔D๒ศ้dภหฮuั‘@'ฟ<\~ลYpZnx+žผs›ฯก้i…e7Ed๖YdS/ฒ m…ฃ#x5”Zฎn]cgก๏ยฺ™—P็xๅ9๓xที*ศฆ!ฯ€Šผ๊๎๓าYV~9โ^แ์ˆ~มฮนrey†5Kฯƒ็ฒ@ถ{ฬฎs!ทมsn…๐ิกP+„‰\H๛๒4R .=,ฃ’2H1,ฟฏ๑]ย5ู—๖*Žฤ: Z๖ง-)วPจŒWภu้๚1๚Jq^gLzQ๎!CำฃlWืฦ‹๓rผtoถ*Žไ+๋{๑…‘|X'฿ฃ๏p‘Qช\tD:9˜ู“$ัš>\ฝฏ๔ะPPษ{ šรตน๙ฝต4ŒlŒฆwถ,ื1 3ฒŠฮaˆRp้๒๏ๅ๙Nฤ๙ฎ๓ŒฉN_xƒZn5ฆ8ฌƒˆ๗\œ‹ Uw\ุไรฒ†ฅB\จNส’ ๙}๐›Ay.๔|ั7ŸทอoU(Aฮoุs!œใEบ็B‰ึ’ ท๛฿šqฯ…pŒผะž ูW”ž ๅ!/นPJ.ตห…uพ–โผ,งโœญ:,๋F๛๐รN–sก็Cฯ…Mฯzษ…ž .z.$tพ ๊ฝๅ}5>œj\ˆ็ุp!€{ษ‡ˆ๓5f]ฌ%‚ม3ฯ[พวถaQŒ#๏šœZ„^dผอZDฌŠภ‘kฎ!ว4lšBฺถxญษ' \oผไสฎ*ํ„ป#ฮYGุ;m%ด CG”๊^Š๐บs<๊ไฅsnuจโ;}„7<๗x้t@\ฆŒ”*ํน๓รฅq,ฮEGb,‡M“ฯLจป ่f1=(ž^‹9 —ัž}›p+^q7œZฬ„w.gVEะฒท"_a์๒๐7 ูฅ๋ษy>ฮฤxรฦsงฃจอ๋“€f]ฏ/ 'q๗#ว~ยc9วŸˆrMข:๒ณD์งใฒ-g—|N+`:>ะพ/ะWŸื_œะ,˜H'‘%คE0ๆ}'โ|๗ูU{อ>ธ๋อž8 ใOภอ"2ไงแ%โ;ž#…v๚๓าKTsข2$€†ุ->gƒTE„dศS„aไ…ค ?ๅqk๊ร}5` 0 3ผ.2โGว˜ฤิษงW›F6L•K‰่–ฏ็วจรP%คใTž%ฺจบฑฮซq•gษ2Fข้ฝAพห๒ีy”JฏบเฝEšzq๎‡eซ๓ต2HU4ชUa$พ;B:หaึผAŠื‚็Hีฉปคˆ'nxwxgจt์ว„fžm€|ฮlึษป™ูfศ(ฅ๔๖U๑ฃfฏญU(ฦE5๘,ฬ™&ฃดqใ๙ฦVg,ถa-~ฟะGBœฟฉธPz.๔QDฅว\Dีdoน:)แ?ฯ…*w“…ณ‹ ๙ŠC$ 5Fx๒ปึผ†SฅuํSrกxคไยฯž๖ทZ.„เร’ ๅ๐a๑บธ\^๛:.๔|่นP|Xzฺ๋ธP๗Wrกฤน: ๊ผ่ƒ๎๙ะsaY(ฮwVz>๚ิฯ…สIo๒กธyใร’ ล‡%Š5บ@ณก๑แิโยœ"ฬ {ห‡ˆ๓ฬถXK.!ฮ฿๐๕W^ส9ตˆs…ํโA'ฌ]"q๋ว4—@วใŒ็Q‹๗qŽืศลŽ'tœ๐w ซFhปผํ=…ซ+Ÿผ๋„ฦsฮŸ๋เsฎ•v–เx:น?Šธๅ<๛‰sGžuย้„ภJ.=ืฤufฯ9เ$rwฮxยณWN4๑$Yฮ๙ฮVˆ,{ฬๅูEX"doปฌ‹W9{yํXชB.กญpuผเฬgQN•rฎฦ๘&ฏฺ๒ซณHg๘0ซ/O:"›vDถrาฦ&๗‚\นไฺŸyๅˆซธ๗ศuK็\{r๒ ๓Oฯ6 4q ศฯ๗a้* —ร๕ญ€žฤ9ต8Fร้ไ;R!> 3‡๐W h…\็Kล{ ๏ซ็กƒ€)y็ˆs:w:็?Iโ|๏$ฤ๋ฐ~ˆ๓8ฯF)CPฉ,รต(ด‘็SคะอยC„ 4ถน†•ม{€ฑ"aฮCง,ฬฃn~8Rพjน๗ƒ”œZDบชcŒJดไbIษ U๚ษ๛ขaฆx‡0d๑8iL่lิ๒Yely%sgzosจeB_}rฎ%# ]yŠ:0F%ฮฒเุ๊%7ชล!‹ฎโ๊ฌผz—Fฺ„๘PโผŒ$ช gทcqกD9เฝึุ o'ฺsก†6๓\ศฒ:#๔ โท.n๔|่น •\(^rก๘สs!๋๐งไB๑aษ…ชณั.J {ฯ•<่—\่=็uCฒ•Uฝ8๗\่+ใ—Eโฤ‡egฅฏๆ..TT‘๘P\ศ๛ฃwฃŽ <(.คM“z>œj\(๏yyฮ?:๛b-น,7Kˆ๓๗ƒ O Uคฆb;๙ูxฯ๑ข#ะ"x–๑D tผาˆbฤ7นๆxส๑š#ฮ)๐†7:ผR :sึ3๔Y;ƒUrืๆ„ฒใ5Gจำ‰@‡yยxสณง•๊๊&ฆ€+B แฎ๊D ไ#:zV„p? I ำ\‡†dhB:ฎ่ะ้Dœ๏3วเj9—ซลฦณ,โC%ฮ้ G”ใ5๒y—f6‹๙v'ฬ1dXจ—_‚หฯำำก‚!ฃaษd|สkกะu’1ๅ N‰qUร“็ƒLขže<<[œrZ{๖)ี'O{ต๑ฑgv๑œKภำžใษ„‘ฆสฦŽxเYฦ่Tั$qu_สฺฯ“Yเx‚๗ต๒ฒ—b‡r–ž"฿yQ'ฮ๋B}ก8u8ดสท”ฐP1+ฅ~x(พoŸ{ฉ๗Dขƒuz_ไidชํ‡ํชg ๗.็qึnE’๚๚๓ฦมŸk>Rฎ%ฦh2z;็{-<ถ:ภFต๘s็๏›87ฏ`“ .l๒aEd57$ส=zฅw›๗˜Pgฯ…ฦ๘-jdๅj๛แ%ถหB“ฺwมชŠ ยโB<็โBึืq!ผฤ1K.ิyJ.๔ตB<ชฃฒไBŽ_ว…l+๙P”‰yฯ…u|่Ÿด็่ญŠfz.,รล…eD‘ธะ็ขทโB–=๊๑\ศR๑aษ…ž๛\ฐฒeฃ˜,|่ธฐท|˜ล๙‹ตไB0dึ็๏ .œ„Vฐฯ?•กยpไi#\"x˜5น๒ฯU์0pBฬอxตๅsฌ&_วซŽ˜ฦ8'œ๕ณฏ๚อ.ย›}}Ny)ฬ9็bx5AEๅุ—๓BOฎ9ย1{W5†8^nช~ง{Dเั!๐Bti์j๎ฏ:โœ‚x9ค™โgIgฏ,žbซฎผ๊œƒอ๐kท]ึ’ˆz<รpช›w8๏gข6“ณ1าณ@w๙่้ˆk…บ7วNำ<›ๅs็qพ๑[a6<๒ /มฆส™GˆณMรนI„็}๛๙œไฺขOŽy:>็B`7=ๅ๒'!ญjฯ:เm/™๗›v* ว๚gญ : \œŽพๆ 'ธ'DžŽ‡oฮž๏ŒvYใ1G์[?่ห฿ืฆš ๚M๐nt"ฮ๗›spuภ\Cjฑษ, ‡80‰syŒฒืจฆ@\q.Oัคษ9ๆฃ>ฮฬc„สุ`#Fรกi(3ๅ1•ก)F–๒ UเuŠ2ๆหjรผ†Eร๘ฤลc(‡ |ีbŒN…สKไkศ Žห9ธ :ๅ†r>\ี‚ฝ0ื๕๙ช๑‚7NKรำซๅฒ/’ไ๓๋s๓c—aํญŒRyŽ|w_ฐส+ไวB๗Fฉ7L5์šŒRผHšสว๔แ์่สีฅ†"าถผLจ;‚๊ึฝงฮ๏ƒpNฅˆ๔„Nฤ๙‹ซ.ดq-[๊ฃ!ฮ฿ฯฮJRฤ‡5ล23ึีฐ๔โฝ๒๊ฌ,Gฑ(”cŸืUpฏ+็‡X๓\X tuXJจืฅˆK.–\(>๔\H๑ก็B=ฯ‡SใำRอqao๙q๑น๚ทไB04ฤ๙๛ฦ… ฝฮyำIxQั\ABฏฦซชO็)tš๐=ๅtrูำsTa6„ผž]ณช๛T็ส;็๗ภ;Bง ่ด ฏ็^ฎ๚YœใTXย8[xฮ๑šหป)#Tลo„๋ดžeย๐D`a˜*d qŸ7ญะp„ฎชชc„)—\ล0*%XฝpีพeตuBBธk˜ด2คœใ”C 1ฑ†๗ƒอ‡ุ๛jฤหใ Qฎรฃฮ8-แ=Lส•งศ๏ห๙}a(๎ฺชZฑG™sY ๔าct‹้sไ S_มXแ2HฝpQ^&๏ฦ&Fง#Šc{.ŒDucBญ:q—aฯุไiJXฆB35฿–`รฅR1ใ'รดqพฯโ+U—ูคG ๘Xˆ๓i…=โAึˆs…)๋ฝไ]๖|่นyึ!ุ๘๙( ).T‘†…ท๘ส๊๊ค?ˆWJ.ิ%"ะแCธuโBฮ+>๓\(O4๓pธธจฅB•uขผ–|ุŠฝ8ชS@ฃiิ…‹[qกO๙ฉอยืโPนธฐ;Oบ็ร’ ้ภ๔\จซL=๊ž sแLว‡ž 3ม…ˆjว…žฮ=‡๖–ณ8ŸปK.Cg q>-paฮ๋ุ1๖ก฿„}ใFด*mยศ็ย กLๅ๕RTพŽp&๐s<เฅwœ6nฤนฏึ^‚๓ะŽส๐ฬ๛ยr„ุSa๏&‚q€ฬ[Ds—ชPค๐y๕ไŸsŸŽใb !สxภ›ีิ QO"กอqŸ0‘ฏผ 6,ฯP œ;/+d;MA.lฦ้ —ทi‰'ŒยดณPถ๘o!๑ฌวรW\Uีsม8 ฑ—ภฯyู&ๆUa `๒†sY…๔ †ŒฃŠzบ๖—ฬSŽ\^o3?ฯ•yฤตว6”ะั‹`็ธOูจ๒:้บ0ฺdจ)|ำ{ˆT^^ฌาธTตwฎƒฉ:ดพฮPญ๊็2ธ}˜ซฦ3ึฝ๛BH็=yะ}Aคบ1ะKฏQ+ฯ๐ก ๏,sา™าiรzึ้=FŠภP่ฆ†lหyมขŒ~ พ็GC u(ฮ๗[zฅ๊๒›ึโ˜Aq> ๑a“ U ›B„~\sึ^r!0/ฝคฌ๓\จŽ1~›ž %ฮๅฉVŠ \P๒!|!~๑โU\(^(นP๙็%ru\จฺ%ชP]ษ…บฦ:.๔๐\ุ–ž๓’ ลwบ๏žผ็ฅ]฿‰:N|๎น†ผ+sะหจขฒรฒ๔ข—|่นnz.๔iž sํ ๑aษ…ฬ๗8ฯกํ๊ฌ4.์Dœฏ=O–\†อ>_ˆ๓iศ6Dส3Š—ั ณˆUq'ฌALก7๒พสˆ๐า๋๐fนโ@โ!ŽืAOX<๗žrฮษgงฏ9y๎trนผt"0/'"1วฝ ๔˜"ฝะ"ฏž๒‰YFาa–ฝดxสญB{“O‚Q)Š0Gภq>๖ล;Œุ”H็ผ ืF ส‡ฯBำยๅ…สญ!ส˜žฯ๚,อหNN89ไyXถ$ฬ๋หŒŒจทcๅ{ŽO;7vx^ฯ0q้ruฎ‘{Qฤ<เnžฏฤด แํE8ฯ‰)ัดG์"ึ•ŸฮqีQฎ!หxถz†tฐ็˜ฏหข”F 4C_ˆs:&ธ.ฎ[ืิiX๛ๆR:ฐZl1๛"!ฮ?์โผธง ›ฝ๛ๅๆฆ! ใcขN!ํƒW9ฺ2๚0ถ0ฮผw…„๗เ@ 1uJ๑›ัoญ ๗Q<*ย&ฏธธP“๊œTu?$Zซ!ห<ย q๗\จ%‰eq!<z.”0Wˆฝ็B ๒vนฐzม๎นP9๊>’ศ‹๑:.ฌVญŽห‚™ž =ึu\าMธป/็๋sx.”ง๕ž K>๔\(>๔\่๙0 sq!รา๙d|ุfq.>4.์-"ฮื™ทK.!ฮงO.Dค#ฮ)๔Fศบผ็ex;กํlC˜#จูŽว๛M9๋˜ฏ6อ |๖ำpk* G;ก๒ ๋Fธ๏ฎธ/wศ#N‡Bม‡ Dlใ๕Eเ!บดžฮ๖Q•z๎ ัฎœiฤ ‚ก๘ฌ ษฦ>์4๓คˆwyŠ%P›tpŒ,ะอƒžC๊„ม๒๎Šฯ)_=_ƒy‹%H๑z็๑+Nn์$sŽE;๙๖@๙๒๊ ๑ohๆฦs.ฮc9ๅ+ืศณBซp ๎K‘tj,๓ฬล‡็๊ ๒;Q ~พคฤ9ฟq…ก‹๏เ-…คร‡โๅ‘ร… Ž U›รsก๖-นP%สฃ\rก*ร—\(a๎นฐŽตŠ[yิี™ะŠ ฝX/z๚ั,ส‚™%ึฮ,บธฐฎรฒ์ฌ๔\่๙ะs!๓โร’ ›โsแuปuแร.\ˆ@ฟ}Ÿถน๐อ?~r4’zž๓OฬฟxK.ร็q>=r!Dฤ9™‚oํ%ถ:4{ภK/8ห„ฟk๛๓(’gคพE#(๒€{ฃณ€)๐โ\a๎J๐a๎ู[nCฏI˜ื‰s๖หCปู๓ใน&ฎ๛ไ่Dœฑเะุ๊…†ืโ๓s†8qvE„F๑#ฃไผ)Nใตbpx๏โM9็v*คŠป,c˜ษภTnธฦ&วˆcส6 ‡F8&7<๚ฌ „ฤำWv๖รีD๒ฉP'ิ}e]คฒ`œ'5คะเ๒ก†Sธฑ๘P…3V[z‹ใB~ฃโ>มผ นItรY๐Sq‡็Bธฯ๓ก็B f4ผZษ…*Wrก:*K.„[เœ’ %ฬ%ฤลƒๅผะ*ชจUผย๗ปใB๑`)าK.ถห…โรบh"nแsัK>”]|่น๎z.๔ร๒•\่๙ฐษ…ฌoม‡p๋๊ธฐNœ็‚p.œ]ฉCฝ็๋&qŠ ม๒s†8Ÿนฦx็๔uพ์ฺ๓3็YpดSQAyะ™g›ทฮรz๖๊PQี|ฺ๓!เŸu๗`ึณ้’#๎"4ฌ]แ 7^'ฮU]_โฯพ:]ธ>j.t*ฮ^hX5~แๅkฑ๕œ‹†8Ÿ^ล๙ฤoœ๑ž}<ทYŒ&‡ึQ ) นzq‚ฦdี[ฆ$2rพะย1ฦd€~๊ไำ›Fง๒รๅ5—`ง ำ๕<ปห˜ฝs2ne`สฃอ:๖ใL๒HหŽกฉกไ•‘ฑ็+&๛‚L e—้Oพ‚7Pe”ึ…ujูฅพšฑŒy?Nบ๒>}QฉฒRซ๐vU*ฎƒ่ƒ^—‹.ƒด4L๑)„]ล“40†จ/คๅ๓-}ม-ฺ๒~ๅ ฺฮsฮุปxˆฒ1Šqš๐ฎ0uฃท•8ฯ๙”žC”๖.คฝSqกซT7ฎษZœ:jํ็}ภ‡๏9š็\๏kๆBว‡ชดํ‡ไw .T:ฦย โCqก„7ผลvธK\จํ๐aษ…โร’ 9xP\ศz‰๙’ 5<[ษ…๊ , `ŠรJ.คSกz>lQ$.,รK.๔#Zˆ ล‡u\จแแ๊ธPeธ{9ฒ…O๗นฅ.ไ]z.”0z.–\(>์ย…ฌ๏[‰๓wqกWแ์Š๓๕\ผ%‚s…8๏ Q๑^~MTmgsผล„uKค<ฺ>ด9žn„4"/;๙โuy๊yํวฑ๐˜s\?ยŸ๓|ีM8โ‘๋แ๑ภ"์xฬ#‹๐1 tylUลทช‡s ‰Mr9‡rุUTŽyถ3ๅzข๐อแูV„OC‹ัเ ฎ‡ฬ{ซyถk?ต๒ม9ฆBร™8–†8S><ฯ@hๆูฆ๛ีุ๊„เ๚%ส}าพ{ึัูม๓D\+Š@C‘้y พ'ุvดกศ :5ไ็‹y๖งญ:5ธ>Uo sขLœ็ะ ้—คก๐]่ญฤ9ฯ†๏€๏J\๏)๔Nฤ๙ ‹ ฏNYtD-ถ™+ฤyˆ๓)๐œห[”ล9’‹ž Œ RŒŒฤย#ฮB๓Uƒ1*U c‘้:‡ž›:ฦฆ7H5n/ฦ*™WNบฤฒŒ9z์‹˜g๖ๅธ~(!ŒRŒ9o{2|ๅc_9^็24ex*ภCZyŽส0O…ฒ–iโ๎‡jeช*tซ|K‡Sซๆ[yะ1F๋l‡ y'ฤ‡ž ™–|4„$๗..4>์ย…๓๘ะ‡ตg.คX<).ไxpก๘ะqao๙qพมยKดไB0b๎็ำฃ8Gผ สขˆWเ?xนU•]รฉแFDใ๑ฦc^Wฝ' ๊ iGœฏ๓‹sีxฆ ฑ๐ค“ŽXD4#ชT๔k~ศฦ8G่">5ฎ9"›กโธ'ๅN HŠ#—[รz!8” ํๆ\œ‘ส:๒ื™r…พsN…ysLฤ4Zแ฿ึ๗™@fY…ืไ…f›ย๓นUBWว‚ญgยล#๎Yp?๒sญSๅาซ`๗รฝ!PyN<ณฝ/ผ3?7€xe=ฯB uWAAž็เฺนฮ!qNŽอq๗<Ž|L–ูฦตฉcƒ็ก{AL7ลน แo%ฮs…z tผ่V _แtXจฃ๐,๛ม๗‰8?uฑๅซ3๚ฏP‹/อณXˆ๓้Vœ๏ฑI{m๚žzฮ ตSQšฒ@††( BCƒ#ษ็!JpcPJdcˆ–?๖ว๓๓:…nb” ์‡QI[แห๑TตXขฯq”1 ๐2qLyไู—T-YšไyRฆ„นBDห๛๑9 \W‰า‹^๎ฉๅ2ฬณnŒtyผa*ฃิW,.ร9}ฮeYฉุ‹tŸYV/n•s)ริจxŒdŒb "VT4ฐ๋—๗ `Œjธ* ฬฉ ีฬžข๗ix‹=จ1fuB[ฦ‹‰๓ๆพs7ๆu6N๑"ฑ=M;็ฏฐju๛Z›ืโŒ•Cœ๗‰8ฏนะ<็]ธะ๑!๋ฤ…โC~CโB~ฟณ๊คz>„ทึ๚ำyMญบโAํห>๐Ž8Dพ(นPโพไBqrษ…โร’ ล‡u\(>๔ขผ––^ฌ+วพ์ฌฌ+็;,=ŠๅยV|่บ็ย๎า}Jกzq^rก็CuTjX5๘P\H C‰ปp!ย| ๘ฐ‹8/ธ0W‚‡แB‡Nฤy+.+„8๏qฮX฿žq!ยั‚h“P๒Ÿ+Žืo7ยZึงิcฎBpพˆฺฟgตำi7็z๒ฮษ๕ฃsr˜=๋ŠˆIๅKk์m ‡G "ƒKUo๗รญ!€ู—ฐ๓ &.• ๕์š"U{๖G,+๗‘อณcช‚uรดSGภF#pี†๖ˆv€hๅ*ส†ๅ^„5ืฮ}า–ใ"Ž„ฎrผiฏ๛ิม o9ฯ ฉวs่แ๐ีIมsQธปภะโ<}พฮ  ะ7b2F฿ƒtb#Œ]๐•โœ๕x0B0X0d0x0„0ไ๖-CM†จผๆ๒แแฦSDŒQydฌษปฤz…ฝำVUๅe‘AสผB8}ศผ๒ืeœ*ืจhฤudD๚ยLส›/!รดฮฃ^็]๗$คuy—‚฿ธฬนฤSไซืๅ\b€zใดNœืๅŸ{ฃด4Hา‰GHaํL๘ฯชHฌqฯyวไ-จfˆขG †yๅ…‘(~džžv?อแ‡๐8QD้ๆฝภุล@5aฮฟˆ๓?ŽZฅบsOึโฬี>ผaํ}อ‡๏พ‹ Š ล‡V๘]๑ป“ทูwRยM๊<๔^sฤ3\จ๐sฅๆ๘ZJ๙ชXฆ8ฃไBถตโB๑ฌ็B ’ •k^ๆ˜—”ญธฐUt‘Ÿz.๔sก๒๋ศ%–กํ%––ฅ'‹s_ ฎีx่ญ†[z.%"ะyฟ่˜,น!<6นq>…|ˆ=๓aษ…ท๎เCธฑฮ ฝ็.บxK.+ฬ๓แ็}อ…๓๗JœZŒHB0!๑h฿Bœใ5ง€B›ํŸล5`žaัฆ$”1Ž็qŽžv…ีiชต~๓์Ag,u€ธ“PD`"Uu\กใD„)โ1+!.ฯนฤช<ฟLฦช,.ฯ9๛ ๊นฎGu‡ฐV๎ตฑ๒าน6 &Ž ุฆpภตjlvถฉ .็eชbw\ƒŽห9t^ว`™ฮG›Ž ๅZณŒHๅน~๗๏ทไ็ฯ=os๕ฬ#ฌw?๗๖ักk‘H็|๊xสฦ›g;Eจ_@'ห'๚Wฎภฎใั ภ๑ธožฯg้ลน่ไัkœ๖ถ์ย๛ฎŸ\ฉjะ ฃส๑œ—๏S๗ิ‰8?cภศ๊ฃj๑•๙๛๗(ฮำgฆ„๛เึ„YnJQดู$แ<้ซ'\โ8O& tโแ9๏เ๓ฺ›eผ๑›-:>^ป็๏\ศฏ$ฏ’ฑ~่ม*G>b 1‡‘ฃqอ1๐|‘ฃ2—า{rึู[ไ "ษ ล8“ฉwolz"]†š๗สะN^' ‹๗๖-+ง+๗ั็—หภ๖†จผ\๒t•^ฏ:Cต ๛๔ก๏พ ’ฤบํ,=G*^็ร:5ึฏเCหแี๊ R/ะKบzซq2D5/cิ็›{ฃo‘ Rฆช„ล„&‘&ฌLŒะษศl‹„อK”ร?๑:™ศงจ†ฎ„y_ˆ๓ปึ๛ŸZœตz„๋+>พ๋Owฬ‡๙๊3Nšฬ…ผซโCq!ฟ๑ก็Be๓ก์็t*\\H๑กธcฐฮw^EIศj*.DิŠท<Š=Jไj?_5]|ุ.–ผXถซใBE”…4ล‡%๚P๗2ๅงไยV๙็ญผ่>ผฝฎ‚{k็eน็ž }‡ฅR&ฺฎsq!ๅž›\๘q]๘ฐ-17~F5๗’ 9๛vแยNลy+.Vqื\(qŽPช; ‘๏.La‹ฐBดศƒ๋‡C โ!G˜ใ%GŒ#จ)ึฆ๑ษ็ }๏ Tsg_@ีw ฯ!ภˆxW…ˆq trฯ๑ค#๊—[:ธNๅ_ใ๙V3 ๗[ธทBสYV˜9fD8โU9๊ˆdŽว‹ฬตbHEœ"bูŽg˜gร> 9Wnป„ถ๗*+o”วฮ๓ไXฬสฉ—Wœ๕œp-๒เ๛คS"ฝณ2"eผjป/๖†๑†ฑฆ‚K็วs‚ฆkยHS๘ฆ 7<- g๗9ๅ+ไC4um2uพเ’ถ๙œ๚บ6@†ต7T8๏๒Piค2{ะหKyŽ0kพšป7L}ฑธฒjฑ๗•^ฃrx5 %คชี„t๒+)€คpN๏A—QŠว‘ŽุQq8/ššก—ฯUU/œะ2อ ตmส[Dx(†ฎ;็ปru๏F›ีโ5c(ตพไC ๓^๑aษ…=๑กใย’ล…F๘๐[า๏Ž฿คฤน๘PีุUฉ vธP'ศ;.๎T>zY๘ฎะศยUbีsกrศ๋ธP้B*๖ๆSqJ.ฟƒ:Ž๋Žฤ~พไB_์ย…t9>l[ะม‡u\๘๘'sก’๋ญ8฿hฑล[r!9oˆ๓พโB ๓ˆsrฉ›‚FใcีฎG•ผ„›D œQ ๐–ใ้–W›ผsผ้l#ด‘XFX#)"ว:ผๆดศ—ไโoˆoยูยO5ยœฑฮ็ไœ+ทฑŒภ”gฯ,๛ ˆ0 ้EenVำ๐Yˆs ›ธ/ฤ&c„(็E๘Nว๘f็‹†ฺc฿฿5๗คฮ9::T0/k"ƒKะ๘๔Sไ=ฟ๗ฺน…ธ็} ๓IMx๛๙๘@'โ์กcช ‡ซล๖‹,ูŽ8lยแny„?mฮN๘จ[พ$aๅขอ‘ ฿.ฤ๙ƒฯถBœ๗FœนŒNฤy.,“ ัnัโƒ!ม00ส•ใƒ!‚ใ=ฒL1Š0œ0ๆผั%O^M%te๘)๗Mแ๏พ=S Yๅ[–ฦ"็๗ยU9“สQิะF2@KŽMืSv*x่:ฟฮฯ๛}aิะn^—\่ฏั‹qฯqํpกGษ… ๑ฏ๋ฌ,นะ๓กq/#ŠสฮสV–~,t๏I๗\่zษ…พ@œ๗šk85๘.Dคร…ชฺ^ืYฉ๗.\8้’&z.คฝ'>ฌใยผพไBฤ๚ x.์-"ฮ7๎ฟxK.!ฮ๛Vœใˆ๓,ะk†ข๊๎ƒ0Dจ"O:นๅxะหtฤ3"ŒeBีูฮ๐hqผ๋ไŒ#ะ๑ธณŒPงะขวa1โ‘sqn–Ÿฝ๗€“ฌ,๓ถฟ}7|๛ํปบ+๋ŠP1.* c ™Q’"ขฌb@]AQเQTฒ๋+bX1$ƒ‹HR`˜Aยณ ฬ ็;ืำ็ชน๛™Sีuชkบ{ฮ๙๎_ๅSงNW๛พž;‘ อsx cีznำอcฟDgy=ฐ คฬ@ขMำ€qSฟhSน2่d!เ๕_?/๛วธ.@ฺฬ,vm7•œ@jŒบณ_แ่5‚mšบัlเื3๒ฮ@,็€ฟ‹วฤ}ภ-ฏคgŽQศฦ€๖xŸp๎็0*ฯนๆyฃ ‚น)๎–๐\L็x๙ฬ.เpŽ_œ7Ž็;" ssห฿‹็pู d6ฤศน้์Dอฉท›}ืธc ธ/๙ำ)ฝธฮ๙^`–=ุฏ œŸZB๘Yซฟชึถyฺณxา๗-wฉll[ึภ๙!ูsNญ๓Yแ6้๐.mๅp฿สUช๛*mฝ…๓~เเw'›ฐ3JdจZyื!-:พ็ืใ\ะ๑ภศtBญ๏รนม๑ม9"โS1;วE#ศ:d:ฉั๑Œก๕™1 :vSบเl๊ศลyภFห๓ฑ?q ˜š๏h7.ฃ9‹=๗>ŸิE7ต=ฆนwkŽ=FŽขSปน๋”ŽW™w-ฎ‹žวhQ„sPพ3\็ฮ๛อLูฬ๋-uHcค2ฅ`๑ิ%;AUทัAgํฐดnฒดิจดQ€~ฦ๖#ฏงqนฯdๅำฌ฿าšภ๙กฏžUธูฆตv๚[8คฆจyฟz˜iแD๔ศ'€ฅ๒เ’฿Špต฿ฟK~ฟนF=ŒZ(ซ…jeิC๕I-‰Y=ฑ–}ไZhด9ืยจ‡๙biช‡uึM ใ๑yuZhึ@ฎ…fๅs@ฯต0‡๔:8EuZ˜7‰หฃ็Q ฃF-ไบz่bฅ#ีิC"็ฃดฐารจ…ลใฦึC๛i๔ข…์งาย4ฝ&8_ๅ้]ต{๙?ตp>08/กผ฿ศyš›  H'Šุหคฏ5qiฮ8'rN38€ ศฐyn‰sว"H ๔ค=s ฌ‘๎ฬ~"œ3B็cภ9๏ว~ธฮพx=ทy_>Žฐตแ˜ใยธLšพ l๓"ฏแฝIŒŽ8S™ฅ์หq"ฐ€ฉ9v๋ท^ำรM)็1/yœ๗เธp๖oM9วลy1ปภHพ™ \็ตœ'žk„;ึ‰anู€ .วใ<_@ศฝฮใผใุM‰7r๎จ=ฌฺ้y>วฦ9ฮ๙›๙]แ๓ฐ ย๏มBŠe E๚ ฬ Oไ[สซฎ์ย๙โ›f/๓"ต…เฆ–4๓ำืxUq๎+_Sk>ใูCIk/ทw–vๆ๏๑าะยyŸi฿ต•คญแ„๒OGหFคฅ/iๅ`n88#šQ›8ฦ ‹pฎณ้,๓ผ=:ฃ:…1*ฃ/ŽํnL%ผ˜Fฉsg“กุ่ล9ร1"ไ๋u@ฃSษ{ฺภ.šวเฅฯเ๓พ.:ช๑˜=<ญณะฑบจ‘Ni้นSZW‹žสวซ้”ฦZKGEg€มuฬNi์VฌCšร9ฉœคvฆzสk๗๙๎า ‰9ฟคpAข‘ัฅป/ ชเ‡ว4wHq:S-%ั'"Fท2๒ป˜๓ฅt?ณะ›ภ๙^;ซธy๓Mkํฬ7ทp>p8๏SGiแฃฟ์I#œซ‡–whVะCแ\-ไ7งช5q1ืผ˜E#”วi1"แ Qcิย๗Q -ษตะ,งลฯ๕ฐN ป้bพu0ฟญึF-ดผ‰s”OตศGOšQิ-‚-Š^ง…๙beŒžื-VฦF™Q MmฦีBžจ๓]๊็หhaฅ‡ฃดจyข…Nต้]ด0–ŒZ ณˆดฐ_=ฮ7yฦำปj!๖๒nแ|PZุ/˜ป1$ญ›oภz๒อู฿ŽOXpRัrkสญฺ‰’ \€4€ „qฟ๐ฮ๋€q@ใyภ/€ฦ๕5็า.แ€๛#Zฯˆ6Rๆ~๖๋(7›ื๓์›๗ถq`*”E@Z ๆาEX|ภธฮxxRML. cBu„ 85๕ŽะํHฯ๛็>ำอ9|6ภ–cjฝฮใผ†่4ŸลิwLM็’cฒน๎bCŒŒGใ ์ž3Ž›ฯeิ0ทๆจ9—gš;N๚>ฏๅ8อฌเo๏฿ศใใ=่ภ8; Op^5฿฿?Wณะน๊ฃWpžfก—ืs8_rEลใwอKฅ D่ž[–@ฝ œŸฑๆkŠ๓fญ]kŸxๆชฝภ๙฿”6ฏด็…†p/อžณiึ๎า์๑cJ๛hv_„๗yN ็“ผๅซ้8คฌด;ชe้ผsGาๆJ‹+๖ั!ใโุœำqtœ+‹c„3•Gผc๚zž6n๔<:จu‘œ>š'q›๋:†:จฆKๆต›ฑ๑Žh/Qqn๓>ฃ฿zฑฑž๒จQMป๋ๆ3€cŠ{๎”ๆฉ6H๊–โ^่:ธปHcj{Lkว๑$rŽCJ๚ฏi8งF‹๒KŒ๋ฆธ'GT8วyฤeคัšHQrHq01ำ๓>“๊'ฑeา…x3Jจู.DŒ?fDžK‡ฯฤg0บk๔9v+ทใ9ๆ์sS…ืุ`MPf_4—์฿จ?ŸมFl<—๓รc ู,p`' ๓™น๊szฅF7๖kซฎํ{T๗m‡KGฉ}งz|nฌ7/ท(ํพา)็ีsฉ9e@ฎ…๓~WLCSฌ”ยFช,ใ–ศฉSรัte>ucฯเ<Ž{ฆp"p@Œ ล™ฑ:Ÿ1eะดvœงX›h๚ฅ‘‘8๛c$‹๐jWใuพyfฒ97zฃพO|ฯ˜ชnZฅ๐mทไ<ฎ3ษ{๒อใˆ๖ฺoœU{ฺ่นCjคศ‘q๙<๔5+ญsฌFqjะ๋าญณฬาXgแHฆถใ€b^งึ2ยนจใชฏ6สา๘YQฌ \yๆ~`œืs€8€O๊= ึาฅg?ผธ7†kคž๔อฮํFމฆ›Dพc็s`›} ฮ\๗ุ<_#€อใ|ฆุDใๆถํ1>็ฟ็ภqeฆผป@!ฌsษy๖88.M€7Zžืฉ๓น๘์Dว๙œ|^ม.๔|ฟ์B๏B ฯ1ุนํฆล{๎ˆดณ0ไ(5 š๎๏d}`๓CๆGส”ดษ!p^•mะmฌL๊ี‰žS{ฮw9๒to็็ฎณnqมz๋ืฺ๖ฯynOp>ำmฦ0ซฺ:คฆฐ ็iีœี๐0':ฅq>:2ช%ธ „‘vฬช>Žฮจ)N‰ูฃCƒแุ่ืFdฆqšŠ™ื!ฦ”nฏGGUำYิAฤฑ{๕>#Œ#7ำ:…t_Ÿื}{ฟŽ$`๊ƒฮN—˜Nจi<'>ฯ๛fpN็ฑ:‹ฯNuพh๋=M{3€ฃS#G๙฿X‡žช๋^ฃ็ะ๓FHยyLk ฮ๙แ„9สแ<๏ไžบ/9ปล1<ฅq-ชš"ฅtLข<:คŒKรฑ์ฑ‹;#ƒอFeฺ฿œ/5‚๓ฝnอโฮญ6ชต๓6^oยiํๅvRioตC•ำ๚งึ!ญืB๕O8Zุ๓ …QิCต๏ฎzตL~FฤKฃฒ๑าšsก:ืจบ๋Fึw_gํwvาด จำย˜23ƒ|จ…๊aฎU๎7ฌ›Žฅ‡qฟš  ๙Sียุจ.–+ๅโ๒๗บŒข|ฑ2๏โแ<ฆทซ‡cฅตวฮียจ‡1rŽช‡หhกzตจyะรQZศw]๋U ๙ ‡A ๛ีCเํฯ^นซbkฌ๔ไขียI๒ ฏ>?ม9 ’๊kวsขๅŽ๒ฒ๙•€Eh$(  ๐บEอฃรz;%ะบ€m€ุขน8*นฯ่ฏตว€ตl SPภวTz ™ืะhŽภฟ ์ร{ปบ{›c‹้๑3๛Œ)๚์Hงณ<๛ไ}y หBฃฤHi7Šl๔yไœ7ท๚oขา€/Ÿ ทก DํIัธ๙ฆุ;?žใแ|้๗ผ™~ฯ๋ฌใ็’๓์~๘;ฉฦŒช๎ย;ว%œYฬCn'xมœฯ—Gฬ ็๘8๎ใผุล ;๎sN97<฿ล ปฆ้|W1`8ขiG๗๖็คฏเะ฿A/อy x;๖ๆsำศ๙ฏ฿ฐ~qแ›^_kŸz๓Z8ŸqpNญ)lSฅ๛jU–jั๘gฮ%ตตU%ซ๒88ž8: Nฐeฃ/ œg๗๊ิ่เ™ี้ัลiยนา๑ำมำนŒทqzวศŽฏKP^:vฏ๙ส™ษ!ล๑ใ5B4ฆร—ื~วhx”ณฏดฯาŒ๗ัqŒ2๎ฯ^[๛ผ๘สaๅX๓บ1QkR…๔88zl๒ิkํetHฃSฺญ9\]อ9ั"Sฺ๓š๓ล ๅปๆuพsฑๆœวx]S์'ฮ๙ฮโ,R\ฅ!Slดโy@:ฟf๔–ึsบ3ั":ณ?ขGj#8๑๋ื,๎๚ภFตvฆ ฮ/ซ#gMsŽ[J{rid฿:ค๕ZุYœคqVฎ…๊a็h!฿AG\๑]T๙ช‡Q ๙=Eๅ๗bฦPฌi๖บฟต็1-<ืCฏG=ŒZ่B#ฏ๋haฅ)วko„X๛ฃเฑn<.D ึQืิึฮcน&ึ7ŽFXwฑา^yฺlช ๋Q วห(ย๒lขบ^upžงตG-Œ%>jaLi=8๘žaFัฃF=ไ๙iฦ9Zˆ=์h!ภnVHฎ…ิรคง่!ZhzŸz˜เ|ี•ปj!œฃฝ่aซ…}ภy5"J(IณะošLH!u$ฐcTภฒ{8(็€ข (ภบ—YๅDฯZ@X7ญ5€A™KขมDฎน6y)๎ฆจฅณ๏ล~Œš[ุํf!ภ๕>ž ˆsษ{๙‚}nฆห๓Z@ุ๔x ึ†kkIๆฌo`xๅาNํŽFฬ#œ๓ูe3x>Q{ŽัšwฎsL~6ฮ1ฯณๆ>vฝ็˜9ฟ|N.xฮŸ้๏ฑ|@3บฃ่วšƒ9 ฑห<฿%ณ„rมœห๘=ฮ‰ภ๓๙อ`฿œ+๊ฝ‰X›ตม}์hสwšถ้ภนอเาhตฮน์4€ใ๛_ZOlJธ'zฮฃXท&๖ง๙>>‡๋<ฯวน๎๋r๓1^M่ฮiL…าq–9_.Rฤ,‚X/ฺ-šA=ปิZณ๎ง4œืฅrึ9ฃ๙l฿ิย˜:b๙MzŸฮฅ…jื=ฦจQ }~๚XFPาีรธธ๊MKžb&Aิยบ”๗บ้XžๆŽฦ9๐qšEท“่aิBฃๆv๎pฎF-D๋ข๒ณYฆzตว:z˜iกzุT ™mž๔0haฟzœฟใ9+wีB์า[ไผียM3๏ป=ม7ˆ*rIญn์สnƒ-G†ูค #Š ภrภ1 h6๐0UวtภW7›่.I๓6๋ิู— <ณ_^ t–€=ฏ:นอู๋?ฯๅ6ฏwqq€ืq?ภส~yœ็ิฎ›qqt็ยzoื๚mGชฌึ”ว๙โF—ใx2 ืษ€ฑใใx@œc#šฯข€๏Y—ตภ๙‹‘~><ณY็“s ณโ‚I,ึ๙๛›oWzม<6ใ๓8โ  h…jอ1uฮy3ีูŸอ฿86—ใœY›Nท o4›วธ  Q€๋ิบพ NุŸฃีšFฮ๛ถ7ฟ๘อตถใ‹Ÿ฿ย๙Œ…s๋ะ>า}˜5 fYAล/Sชฉล8@นใัpFq4„s#8&81Ž!๓‡'ฆF'ศวq–pฆp…ิกN œ9ซ8iFท;๊5 fwUวูใ:–ีสนŽir๗Yšzฮk๒7”๋๋1NŸ#lว็F–;ฉ9จ็Ÿฯ*œ๋”ฺ)މหG ล(’c†๊=Fั๋"่ฮ๋๊ฮcCธ˜ยi—๊˜าnไ<ึWฦyพ:ก8 g*;iล6„ใ>๖ล>‰"ัŒ‹๏,‘ฃิM›9็D‡บŒ๊ห!ฅฃ1)ฯDžฆต๑–W๗วjํ‚wญำœ—฿22ฃดฯ5#1ฺTฮq\ˆ๚ฉ‡™ŽารJ ๙žฉ‡~ัBพ฿๊ก‹Sh!‘U~C–ๆŽ$3jซ ้\Z˜k‚Z่bข๚ \G=ฬตใ6:คึ5g๋Dท+(V็ฐจƒci!ืัย: ฌำฟบหeดpฟฅQ๔˜Q`cO๕/๖+ษGp:B@G c-บzต0Fัs-ด@Œœซ‡|ขฦฑ’Q #œG-Œzต๋๊aิB๖ก&-$rฎXฃ6Ik฿์น+wีBฌ8oตฐูF,šc=T9€Nd1ีแ–7เ ะrLFบถอุ„Oป—คฌว‚[ ุHตe@@ฬฃี@&ฯะkำื๒พ<&ไ ๎์ ˆอ฿Ÿ๓~€จ`ฮ>s ห8Ž˜j ธ์๋ฆถวQdžK('jn=zLoท๎œsอ๙ต๙็˜sฤgยx?`™ใˆc™  โ9ไs˜ o}บอ๘bฝฟcใœ™Nพ๏ฬ ฐพ๘ขe –อ€vข้|~ปีsŒœ{Mwฺyœh<`ุ ๋Žฟ#šNฺ๛Uw>˜Œ ุ ~;์‡c'‚ฮ/~๛[Šห฿๙ึZ๛ฬ๊/lแ|ฆยyJgฟ้ ขX๐‹ขX|ๆHืaR๊ioฯeภzJQ๋ไL฿4…งgG็&Žฏม้cดย‘_9ภี8Mลิ)ำqฮเ$Gณํัv8ฤ7๎ไŽร๗ฆฯŸ2ช–ั}๊P๒zP_ปมฮฟL—Xผใถฦพใํ๊ั‘ํํuฎฃ#ff เ˜ๆใ๒FzBz5ช‹ ็โ,?ˆซบ5…‹s}m~„C็›วHy:ป5ๆFหuNนO8ง31๕ฟ8ฅผฦL๖‘šs‘lแี1ตฝชต์[€๋ฝ#)ัิg)*า&p~ไ[_Qๅใoญต ถXง—†ptii฿ฬ๎?(k‚t`๋๖ ‡นๆzศยO๙Rฃ9ฯตะžh› ฮ%ฟ9~{y_~ƒ๖๘ ๒<ตะ…Bu@0หG=ุG•๘T๚กขO.X๒๚ไgิยจฏ เ+-ไ๕ci!ืใsฃvำB-ŽZ˜รนŸ'–๛ุ/jaž๎+ขวลJ]=T ๓.๎Q cj{์ุnZ;zต0)i7vงV ‡Dอีย\ีBงŽํQ ๙ข‡ฃดt๗L9คฅF-์Wœ?๏i]ต{ๅSŸTดZ8๙Zศุ(า{it…1ื๔๖ุ4Ž๑Qˆตยิ`FI&GxqIPดxŒ๋hฆd†Fqนํ82ฎ็ |๒|ก๔m.sฃแผษ:8๗ฝQภž๘~Žน.๚๏g1JฯพŒBฐv[เ |sพ8Ÿฆ~ปBไู9โผŽrGวฑ?Sy_ฮŸhๆ=๋>็X›ูqฐุH8งึ๗vd‹œำธ@ืฝmว“ŠMท๛Erปํ‹๛E วแh‡‹ท?!๛แ6๖Žm~ž๎ำ1ๅ}ขใฏ๓šŽjt\}฿ฑาuJmโ„sjอฉอ๒๒๎ะ๋œา|pŒž๋ๆ#…byง†3jWbQอ”KQ๋+M_7ำัiฑู‘฿7,ฉF4'”๗#ลwั่grJ1nำ‰8k„ิจf™š๓ู{5‚๓ฃ6\ฃxเิฺ…[ฎ œฏ_ZQต˜]ใ3ฅดsช๑A\ฎิ:ค=่aิย๔0~7cใBต฿Žฅ;ฦbcF*๓&ŽฏF-L‘์–Bณ ˆฑฑš:มmหtธŽ–๘๐pjˆ”Aค๕rh(K ๊Hเ Ž ๘‘ภ0mƒ5.Dเ๘c๓5 ˜dฟึˆsˆ7ฐmgwห ’ผ๏ t๒์วX ฝฏๅ=a—}๓ึfa6 „•‚9 ฮ๙‹t ๘u„๛ณ„€๗0jฮ็fกaขŸƒฟ ๛qœ#๎ุ—ฆ็“โnF€ภ๎็"บฯqr์,เุล3ตœq{|dGœq?p @๑œภ(< ค๗;ข๓ค;ยวYุฤญg็:sŒ่ฝS€iฒD„sบน7ู์ ฯwฟ œ~หทsทqญํŠทp>ฃแฮึŒ[กถgดJ{›่ ‘J‡Cbส&ฮNŒะr้8ทœKžgWจ็9F‹"4|ญำ้78jึ_F`O๗—ฏ์cว'G็˜ฦ‰ฤ๙ณ(ytu:y>Ž็ๆ=.ํ‡๋\rใ6ภฎ๑|^ซ๙~๎K๋ขIึcjปscงy;.G‡4z]ZglŽ;น1ชk#E1•G”ศ!"Nจ๕•ภธ5”:œŽ สแฆGัล15ใ;gz;ทํภ>:ต็๗Ž8งiผ๗ัK!tl๏y๙แVล’3ถo#‘Z<๗์dฎc/Ÿ๔a  ศ˜*ฃ฿v6็2F{‰ไธDhมบs<‡จผ อKž8ใ€,๏ ,’gDะd฿คtŸ<ฮพ๚s'6[ใXธnDจถ mิœH00  ๆŽY8SวชQ6 ณŽ8'…`ฆ”€†qผO~ž'ฒฐ  ูsใ๙ต‹พ๒8"ๆ,ยๆ/{ขๅ9ู|ฏชฦ๎ฑธรwํ๑KO๙ฎ้‚๊:ฯwa‡ˆน๛ไzl2'œวฆ„œC \0ฮ‰žs‹H. ญง›;`ฮฮฟ?ํƒลช&p~ู{7,ๆพ“Z๛์+_าย๙Œ‡sFซเ€R_I3$๗็DpL€:มจ„ใmb“kf๐\Sy=ฏ1%Gั๔ILง,ึ_Šต‘ฆqๅkpq๚Œขใโ๘นม๊XŒž›ขฎฆ์ขjั‹Z˜šf:j-”๒ห\ข]h>ถุ๒G'MฌำBŸ3\ Œz-๋ด0.ฤR Nyำ>KวXช…ŽิT ๔ุ“ร้ja,๗Q ]ษK}ิรจ…du›qฎF-DฃF-ฬ#็.fช‡jกpฎF-าs-คlC=œ -์Wœฟเi]ต[๓i-œฮSว๊y—‹.?-ม9๐„๕็D(,ปธx@ šQณ#;ภ—ืฅŒFrนฌซฃˆ1เš9gaจtŽ:ท…t๋รm"็จด&pฮู๋—ใศฌ_r],pv8‘fSฒ‰ŽๆิG็\r€Pมศ9ฉ1Ÿรฮ๊Ž;ฃœc˜hJ{๗ˆ๕๘ .0๐น๘[pžน 8~๋พsJ ˆŒ/8๎€โžo์T<~ืผิ€fjœํdn€ปฯi๖Aฺ:QrSืoู„อ 97ภ; œ/วq )๓|'Mกg๑ˆ,Pฎัt#z๔7๓ฺูค๘ใGQkŸ{ีฟตp>ำแ&H้ŸtตชžาเJh_f{hnบm88'82:;1B„#„ใไ0ำฐ๒โ0๑<ฃน8คi Pฮึ/โดuœอ*ฝ.ชbช&)ฏรy4ŠฃรวkpFc ป"N#Pฎ#Šm๕๏G%@๗6ฦ~s๘ŽQ๖ธ/,:ฆัAฉฆ1Š^—โฉSgวqKvm๎=ฏkง3๖ลัBบั๓˜ยŽ#jคˆ่๕ไึใ’ฒม<•๏–ฃ]ฏตฮํDฬuSใ8คฆท/yโฌtจฅŽ0N0F4*u/^๐‹Qธัฉœฟฃ„๓6จตK>ุFฮ‡ฎ‡Q i–YiD๕๏ปpฮoหEJ,คเe,I๑7อ๏5j!ทMmW 5๕06TS๋ดอQ›lึฆฎค่Y น†ไ˜ZจrŸ๚ฃๆ.n๚๛3โ+}~ิยะ๓›ฆธซ…ฑผ๑Lyฏ‹ž๛ทศ#่qzิB๕0j!‘s๕0j!ตๆ๊aฎ…QวำBแ<ืย˜ึฮ>ขช‡Q ัF๕0ืยฆ๕็haฟz˜เ…O๋ช…X ็รำBฦชQC›"šŸP,พ๊ผโฑ ŽM๖่‡-๛ท)ษบmDA‰๚“ึI{ytดvขืฦ9โฌc36๖M9ฐ@๒~F–MŽ3ฒm๗x lั&pฮ~œืฮ{9าŒ…;œs>Lm,;R ธN‰œั=ึร+Ÿs ๘ีฎ๛…sŒ่9๛aฟฆ๛ฮwœo 8gก๘%=ๆ‚@/QjาผY๐นc฿ํ‹[๖๘‘ หOK฿ง;ดทภ9ัnฒpำึ๙ฮNฬtวาqณฉž™Dฯ}›มฆƒ่<๏Cช;๐>@็๓7๓+?๒๖โ๊ฝณึvy๕๊-œฯd8O)Ng้„ฆดท๛ซt`ข้}ภ9+8%8/ŸQQ;‹›rจ๓Sฑq˜x ๛ฐาดv#8kฑYœxŽอˆxฎ้๎N'ฮ็}d๑มw‘Œ๋8ฆ˜N)NkŒqŸ2ํำtะ˜šG–๒:๕=๒ณภษฮสvfŽ€ฮ9ํล)อห L้ดๆ2ฏFฤˆH‘5•4ร%R„3Šjณ7#ต)@U„'ฆญวTNOใ:ฮhฌ7' Dคˆ8ฃฬ๘ีั%:„#ฬ%๛J-บv฿Ž3๊Hก~G 5๓c7_ณX๐๙ kําฌย๙๕p”็}๊!Zค™E$๔๑;‹ๅ<ฮ#Gียจ‡jกpฮcv\ ]ฤหปธ;6M€๏””ฯ˜s-4บ\=Dง\ T ีร่haิรจ…ผง •นๆ™Gะฃฦห\ ใ8LวcฦlขAZSใ,t๕ะF~ฮ-๕qฌšzต๋๊กุฑnZ(|ซ‡u‘s๔M-DืิCดpั’ำ;ื“-9ป“2๏t๕0ืย:=์g๋ฮ฿๕ข•ปj!ถๆสOnแ|ˆZHz1‘๓ว.<>ฅ ๆภ9iส4ๅ–€O€์ฏ&œ๗า ุคๆผ[ต5๋€% ”Oฤภด!อห€p฿ืฦs@z๘ฑ‰Ÿฯw`ึt~  ถ>ภfแยัc@&PIj6`ic3ขัฮ<ื๘ ภ=ัsเศษ*เ๘9Oฝv˜ฏ;ฯ,\๐9๘{qฬf๐>ฑฮ๓F๔ž๓ษ฿๚ฯU8#๘Xœถ๓‡๖ี9_pAล#'\Ÿนqใฉ็i๏ฬ(‘s;ี{E ฮƒ คตsษ๓Mว๗ฮ(:N-; tƒงฆ=vvฬ…sR๙ ๓ซษC๔ /ร6›ืn๗๎Z๛ยฺ/kแ|FGฮIgป๛ะ๔<[!Š^ยฉœioi=ฏ•Nh‰"8&ศ&p8>ยนQ้ใpp xฎu–:ค81ๅ;ยน‘"๎ทy—ฮลIธนtM‡)›:Ž:ข8Ÿ[ฟ๓งหX๋\bผN˜็’โ่šaห้1อ3Fฬ๐บ5คqฑืใt=v-ฮฤ9ท8ๆ)ฏAว)ๅ๏ฌCส˜ 8"ˆzG๋'MMัM ‡าUึSF87JฎCชƒ ”!๒z๊ิฮwธผไ>๖น๐ขtƒ tแพxืง4ƒsำ3๋R7ณu2บcแ|‹ฮwจึ.๕Z8ฒŽาBา€3=œจา(,6บŒZh:{ิรบ์ตK~ว๏beฎ‡jagฬXฅ‡นฦ†nฃขะUI๏จ‡Fหัต\ ีพ๑ดฝ‹ฉ๑นๆZ{wD-๔x…s›zF-t ‡Zgกี,3FฮีรX๊วฦ่9k๔0ืยจ‡Q ฃช… t*=ŒZแx_*฿ภศภร๙v[ื}jหZ๛ย:kดp>ใ#็๗T้mิX’าnZ'ัข ยน้{Dp^phbฤรมฤ!uy„JqH:†Sๅ]ฃA:jึa9ท+;Ž'ฮ›ฐ.๔ฦh’‹wกœHP๗c8žฆu ฆ„ฦ๋ฆ‹ฦๆr:ง|VำVM#Žโ,bR;ฌนิ)อปถว‘vFหcฤHHะuJM็ด’ดN"FŽ“Šตๅัฑ4-S‡TgS@ฯแ็z[g3:œiฦy้Œํค RŠ|2ทบšwh%c\฿yพ฿ื|-M,๐7nฯkฤ1ฅษัIKhฒ8ošึ~์–ณŠG๖คึ~ทํ๋Z8vไ๖ู”ฺณร(|ฎQ c๔=.Z้ทY่จI•ฦEKฯ%z3‰„sK|ขขs๊^ิBo›QฮํนF=T ฃถE-T์กปฒวด๖ุรื iฎ9เ]ZGฃขƒQฃฒ`Y๊_ญb™ๆpŽ6‚๓{zW-ฤึ\ฅ…๓aj!‘๒1?่‘จ๙ฉ฿M๕ย)โ9A8g#u˜Hงsนํฺส€ไD#ผ6‰ร๊Hตร;Ftืิy –KาูI;็x8๎k{SX๚๙ฆตณคถS;ฮข‘pภ“บm€ัšmภ$ฉ'@ ิ;Nอิ|›็๑นI;ฯฯ/็‚cฒ+=็‚… ›่ฑ€ยฮ"ะk}<ฦผ'ภธ๓ทฬ9`ู™ใ€ฎ๐MFFg8ฟš‘๑}%ุา,N8Oำ*#๓‚”tฟ9žฯo๊?‹˜cึ8Vgฦ›๒n*<ukัm็ฬu็ก“ฮจ5แœxภฃ้‹ิอwƒs^ำฮธรฟ7์ดUญํบ+[8Ÿฉpฤœ/-‡ฬ85า6™y^ีœ77ขฌF‹ŒH˜ฦ้0R‡สั8Bบ ฯ๓qฎLqฯป๙ฦyฝ8o6ร๒zGฃุฆt๒)+ZA2j#I๑บฉกF๗ใศ6Ž[8๓;)ฌม!ีpเc*ง‹#63:คำ |i?Fฯ๓ดNฃ็v*vŒBYƒs๏Mกิ™ไ:ฉšค๒—8ฆญ“ЉฃiWbP๋สนŒMเLiวแฤี ลpJqPหว„๙๐๔V rt#๕่DึซจzŠ epพไิOคyุKNฎX๒๓4ชฑ์ภ๙{K8าฆต๖ปOพพ…๓a้!อ1ัCยฉ…Dฮ ‡&7@ฮ๚q.-๕AใRo*๚›ภบบฃฟ‚ฅษc6Qœ๒ &ฤR Ÿ;ซซ‡ฑิวfqFษตAiกz(๘ืia์๋a*|ฎ…yึิ(-oiฝ3>ฎ*๗ฑi\,้ษ'ฃ ้น ็ja>ลB-$‚ฎšNŽกƒ๊aิB๕Mญ๓๙6}3kศวีGมฉ… ส‹j12j!฿๋J;ฯU ซHzฎ…d”$=ฬด๐‰Kwัร …<ึ7œฏ๔ฎZˆญนส?ตp>-\xึแ ฦˆณoŠ”/<็'#ทKkฒt€)iwqญŸŽโ6,#u<ฆวk€&0 <:›l &2ฬ๋X๖1@ พึj๓ูmHเฺฑ]&โKิฤษ:Fาv็{วน็ต5๎\ิIฃะ]tเ˜จ๋็˜<6ycมะpู?‹.Pห{~ฯ฿‘(?)์7Qunณภเg0ๅv‰€ง‘|ฅัวจ%’่‚‡๘ณิS>ฟ‚s 8ูร๓Sํ:วC$;o,RๆZจฺก]-ภ๓…J๕0ja์ตšs๔ะjกpฎช…Nฌˆ.ฃ:ฯฯ๚t_รmฯ1๏ล{ฺŽsiไœKD„oฎ“ขฯmŒE‹ว์Jsu8ฟnืงฎ๖u๖๊^เ|าฮทwวฒ็ZฺVแ6Q๖Uฦ๓๘œUธFฮํ’พ๙D๕Oง”Tถn‰๛€sœ"vŽณธcw๑˜ส<ฅฝ๏ทดษทใsQSพ ฮถvัจyLใด;0๗O&œGgำฮฑห.๐. ๘™์P์์b;3ปpม๋)ํ@ŒแPฦ1uคcโ„ฦ(sf: ฮ=ฏ›๗kอนiํŒ"…cรั$bDSคหJ‡ใถฆ“JDษtเ๔v€<ŽJ‹ฉ้šห4?ฦc„จŠ %งดrP“3Š“ษwฝŠเฐโไฆ+ข้8 ิ็ฏก˜ิg วด|}#8ศk‹…_Wญ]๖นทด‘๓a๋aิBๆะC"็่!ฟ/~gŽ๛โ7š๋až2J +`ฮใงฺฃชh0ซ–ธ่^ิB.ัฬษ†๓‰hกzตPอๆsๆZ˜&oTzฟฑฉ‡jกฅ=l่aิB๎ฯ๕0šYFผ.6…Cs-Tปi!๐ฎ ๆ–๛จ…ยน๗ฉ…iqฝารQZ๘ฤนK๕ะ”vฌินิ๐=,ur”Vอแ–ัB4ฒิรZ-T+-์Wœฏ๑ŒฎZˆญ๕ฌn#็CาBาุฉ7gฎ9 ผˆ˜๙]’5ู##ณึฌภ!‘๋~ปŠ๓Zเžศp๑ๆ6๗์DŒI€€4="ฺ๎ว๋ƒHk'ุeŸDฏ`g›ธŽใ•€#` @ฦ>เดo lญ;*ฑ่D†ฉฉฦุˆ&๓lDลญรgภจ9 Dำ๙ป๐<ฮq™6˜;๚อฯมsธ๘ต วม{}ฆึ<ึ™•ๆ๘xฎฉ่Fภ๙วnํBy๎Œฦ&Hผ”v"EคobัฅK1ฉ‹qJน/ย9‘"ก‘ฉœ1]ำ1A ฮŸ)›tืึA5:k,)ภ%*ŠCŠร‰cIสฆฅS ช}$gG•ืฑฟช†=E’xฆSZ:ญเใ๋๎Zป|ื Z8ถF-Bิร>ฃ็h!‹Wf,๓Q 1ภ‘5หuiํ63S/ะQูG!›ศQi๖ัเบ๚hŽบ#์\W3'{กR-ดฦ|ผ็็Zˆfซ‡jก‹qฑฒNs-dCฯ์’Oฏจ‹šฃ‡Q cCธ\ #œGSัB"็jกzต0ืรŽ!'ำ#j!ทีCต ฮนžkก ต๛ซHy/Zุฏ&8ๅ3ปj!ถึณ[8jCธ O้วคดๅปปwดcฒป่oฤภItีTv"ศภื\OF#6็ฆวูใภ&pˆ’šmc3ใยH๛ฤ๛๙ผง‹D๊ESภ= PE›’-w.7dLX&L #โฬf'sแœ็ฤฟ€Žฑ‘ฅฯ๗~™‘ฐ;อf~bkฝ‰p๋ผฏ้์ภ9๛ฑพœKวฌ ์งฦyไ˜ูฦ{˜ข„1็ณ`คฒsœk_CC9๖รฅX8ฟyŸOv~oน}q“u'=ญ={—KฅMk2œ'gtัH็ึิฉีUu‘๓Oz‚NŠใ‚l96HgิQ_ฃšรtvง™‘ษใ่0อ&Hฮ๑ตูN›NŸ]Mƒิ๙ดฉi“ยฬ่d๖’๚ว q์ฮ$ฮแ์แœ๓<ฦ4"Cv+ึ๒B1gG่สOฟๅ'ษp2ญฏฤล9ฅๆ’™ฟš)แาษฺJา8Mmw4ะˆ๓Yt"BฃขBึ–k6ซRู;QrS0๒ุ‘๙พ8”4ฃf ็น\]ฒป1๛ัก%Jd4‰1C}กœฟ๚QDน]ล [8_ฃิิB#—Q๛ัBตโ(I๛p˜Žบฐฦe/i“ทNใวฌฦ๋๊‚M0ญ+W1kถั>g:ชัq’ผnzจšฺ>Q-ด~>ืB๕s(œ›ึ>fึ:›eช…fq_ิBRูฃZ_Žึฉ‡๊ บˆก‡Dะฉ='jฎช‡นช‡ฃดP0ฝ6ิD๕0j!PญๆZhใท\ อ"สตะจyะย~๕0ม๙Zฯ๊ช…ุZซถp>L-|ไคƒSc8็RSon๔žo์”lขpŒฉ1ศrฟอเš‘qปธขิ™s<, 2‚ฯg#B/€่€"€ศลบk@“จ-pˆฉr๊ฏป ฌA็๕œW^็|๎ธผ/)่ฮว-ล*!ำๆภ7เอ~ˆ8€0mบ;๐หqPSn7๖%ื]”,]ฟ’tภ9QeŽ›ฯ@?ว็6Žq๑Mณฤำ}^ต$่Ž,cฺYฤ ;€ŒGญq<.>p|ฮ>7S็z0>/ฐm}ฟ5ไซeผ7ฦย๗๓|"ํf-ุ1ž๋€9 +Mเึw่ฮr๋ํ๋๗็Sฺผาžยฝ4{ฮฆYCธKซ๛wiO ื/ค๓{u๛ ฌ!-œฬYํ.ัฒOญ›c U`CJ๔ภ†Gฆฑใ€อภ ี2zป อ1๒mคfEq>ฏ3อญท๛/ฆŽœเp่z‰า4ีuๆ}mnd‡ไ^;ณ?ŒืtœyŒณNš,6‘ำ7ใ,_ฃ๎1RDMฅ Ž๎ฤิึโ žz๓ˆ“สœ_Œ๋g฿๖ใŽ3 ˜;ฟ— ็ำ(‘MŽj7#7•“ุฑ8&(FอuHu"xใ„๒ว ฝxืฅ้Vณ๓ศc,XูๅฝŠ<%‡G–ธ"๎ ยฉฮ๐พZปหตp>d=ŒZศB“zุoืvด฿•zจฦฑhXL]‘p๕0jก๚่ผrSิน฿ลG'RจGQ }z2Œ>&S4ียฑ๔ะqjก˜kaŒœG-ไต๊aฎ…Xฎ….Vข‡Q ๙ฉ‡=kaิรจ…aqq™…สจcA ีรจ…ษจำB๔”-์WœฯzvW-ฤึzN ็รายGN๘Fฑ่๗งtšย๊ภ9`  cูเ‰h"ๆD—๓๑iึœgDฏํ ”แผท๙่ƒ2šำ็‹Dว9ภ&|ิ2sทp€œ๋Œ้ข๖:อฯ.a•]วc@}”ว4zฒHSทๆผง]%dฺ์Eฃๆ?0 ฺษใ ;ฦgaaAุๅ1ž#ิ›†ฮ~ุŸm€jอฑล๓.+฿xE๚์@8ฑบอๆl็ะHถs๋๙mH[`*ป%œ{๋T–วร็ฒ๖ุ9๏.๐~๎หn๚Mเถฑธ๗เฯึฺ—6{]ฏฃิ่ฦ~mีต}๊พํฐb้(ต๏Tฯ ๕ๆซU0]ๅkซวฅJฟฎบ\ฉ…๓†[š\ZrDg๏5๒Oน#…ส ŸIว)0 ธŸHD…p@uFuึV!๊Œ@๋ชlp-ะีร\ ีC{o ่ z่ ด‹zˆฦL"g ซ…ยyาน\ ฃช…gOต0้_ขซ่aะยFp๊Uปj!ถึ๓Z8œs(ง[;cิธšsกœqNท๎๕๑ ฝQGเpฃฆ™ˆ.ฉํ,ฉไิ็)ๅDŸ‡™๎๏87ภŸ:รw‹='๊Lฤนุ๓ชšrาพSt˜jภ๘ร๓ำๅใw฿ุนtn๊ฐ ?4าต๎*ฝnณถŸT}๊ว'9g#  ็4ฬsF<0mgy"เึฮผDำํฏ9วผฏ7Šn9๛pI๋‚๛…sฮ')้ทi๊๛ฅถ\ใฝ€s๎คY0แoะmc๑„ใbA…็[ใฯ{aŽ`c|ฯY8i็w}๛๓ล‡๎^k{o๑ฆฮgtZ;pN”zj4ฉQ๓~แœhjอ๓X[๎Xดฮฺไอ(NŽ˜ฮ‘hแ.ํรv:{qJuฆf๕y"}“ืv‹ๆž ๛‰n8Žภ7)—1ตำj€น 0g๛F‡วg”ˆQt๋1นz๓8>ˆZK!ู๐ญo8วL_ื๑$สํcFปqF‰v—ึฺฮ๛ฬHeฌฯฤมeๆ9#ซๆ›?q้๎ษšภ๙/vycฑ๘˜ญkm๖A๏hแ|ศzต0W›h!T๕0๏ณ๕P8'าk๏ หw์ฆฮฅั็ุ7ร์\G3๊๊ฦM _:ฉบะ0-์ฆ‡iผไ๎ฟjค…hšz "๎ฯตP=Œp๎ยคzจฒ F-า๛ึC-vRทววb๖OฏZ(‡Z๕Nz;-์Wœฟ๖9]ต›ตฺฟดp>$-$JN8:ถำ๎แŸ}5ฅำา)๚ๆ>าœณq$ยJ๔’4f"่ฑม๎ฬ@74ั๖&้๏“•:฿Kืvขะv#๔ˆิ’พจ•@8๕ึ‹o™“jฎนNฝ6 œsใw^Ÿ ่:๕ฯใม9ฉ, ๙ฦ&ฒq\Dพr๊๒ู ๔จ?ฃฬ€q>Qj`วX„กŽ8ท้€ŒSO*ธu฿ฮk5๗Fั๛…sาอi์†๑Y8็\'œ๏!Qn๋รY ธ๓๎๏eวvkเsมžvvw~;๏ำฮ๏)!มรฟXk_ู๒--œฯh8'›_-<%ญโำฦจดธ n8(4ำ!zฎ“Gฃ9หืั7idPีะ(6+rใะโl^!HQ]ชๆฐšฝu3ว5iš„:VคH‡—Zี‰ิXฦฺG"A@SฎIฒป1NitDIkง07rŽ3 ”`8จDŽˆแ”ฦšK%9*mbCHู4ส]อO฿mn8ฃ]R8G9ค'o“~1CFขฐ่Œ–๛X๒]’5‚๓ฯฟฉX|‡km๖ื7kแ|ุzดTc๕ฐ‰ฒ๑AีA-c‘ ๎ไแR*]ฌฮใm๔ำ:๒nฝ/–ทrœ่แDด!oiข…h˜zตP=dก2jaิรจ…่`ิCตะศyl–‰นเ๛อ๔P-tQ2.Vฦ™ไ่aฏZXํ{-ไ= Zุฏ&8_๛9]ตkแ|xZH๊,@ญ9ถ‰ค“ฮcz? r4Jsฦ9อ๐ุPงFวy XŸ่˜3ร๎้Žmf:ขp ฬrDb=าค‘Fอ6MำR3ตสษ\H $>šFŠัpK๋ป}]Lๆ|^ฒ€bl"`Žี๙๔ภ9cึ?g๑i@8Qv@œวˆ”็Dุi*ว˜:vDแ…s FxDฮ‰บวฮ้d`ู€fS๋ษR`f*ธเ๘วYๆคฌ็uๆu™7T๓|๗ม๛ูC8w1 iZ๛ฝ‡ํQฬษ—jํซ๏ …๓™็้oๅŒ52ฝ.E‹l6ณ่๔ ๏gว†๎ฤ:ขšPn๗aว‘ส™ฯ!ืณใบ€๎( ฎwK์u†๎ฐ"๕bฑV~ฌ๓ี™^•๔ณแภq>rั 5ำzskฮqJqHq<ฉฉ$}ำ”N ็Tง”ˆQJlBu•1’้^kหq<ํจŽ๓จ3J๊eiใ๎Ÿ็ฤภ1UิTะชy“ม๙no)–œ๐ตvๅ7฿ีย๙ฐ๕0h!๕็า‹Z๕0jกฉํ.Tช…้ฅฦž๊`ฬศ‰ฅ>่!zั-ฅ}บjaฌ]O 9œŸฆZ\ซ‡Q ]คTียจ‡yจ‡Q ีC#้hแ} ่_รbeŠtซ‡Q ฉzุ‹šสพŒฒoดะA:ต—Zุฏ&8_็น]ต›๕ฮ‡ ็ ฯ:|d ึi‡ฆศ9Q๓็X?Q^ h์่N]ด5่D˜M ฦฉ•์€B`S@งกxุbฎ›อkmŽึdTฺD›ั๑DŽ1"ฮDWIต&mœKขฑDmIWOpNว๓ฬฑEณฯH5ๆKn๘]2jาSm๚‚๎Q^=€s>+๏oW๚‰n@2€ xำežจ9Ÿ…}๑Dโq ฬwŒฟ+@ภไœsn‘w€ศบํึn'๛‰ย9 ภ2Qqข๒Dฮ‰ศสPอslฃ\ฺ‰}ฌืp,w?0บC@E'Sฝฮ๏+!œ฿]}mซ [8Ÿ๑ฃิจฏี!]|f๊KฤˆhQZฝวuL #[Ÿู๓พ5R‰ž.Ÿื‹jz;ู้ฮโ5:nิจ‹#S=วŠย๔“:9U Gvผอ๚|ฮ“ธ~7ฦเ\โdโtšชฃไึ–s้๕)@ซFชฆใิ„tทiœ |œW>g'T8ทI0ำi {5wทำจ๊&ุงำ8 r8฿6 0gฑVY.O&์dYuOp~วต)rN3>เœ๋:ดcใm|nปด๓X?`N†็sศ฿‡ฟฐฮ๛เ@;ฯแœ๓9นฮ฿sฯ฿ฯY๖๛แ๕฿‹v8!ํƒ๓๐ณO`Ÿ:ั็‰คต๓<ภ›๏็(็๘tฎๆ˜rขๅDัy อ๕๎›?6œ๏ทV ใx=3^ ๔ไภธ๕ํ?ต้Mเ/G|%•–ิูื>ฐQ ็3ฮำ?xVำYฟˆไ„2าŠ™ซ@:ืSsš็4G"ชฤcฮงฎh†„#รL_Gงลฺส”ฮY:คDRณธ*ำุ‰–เGTฤฑAqžyท‘f3ัๅ\ฦ๏vQย5pq.MUทq์ฮn%ื‰ยู$:ค#Jืา3ฑ่”โ„โ๘:ื7ฅ ำhkแ)ฝW้ปจวย8ชฆrโˆ’ฦŽ๑ฦ'ณใา(,v9&Rไ>pnู็ฅปK~๓ูbษ9Ÿn็{nX,9๕ตvๅwถฮหํ๐า๎)ํแพ•J;ซuมๅSZ‡tโZศwS=Dใ:Z˜มนZhวํฑ๔0ืBa<-V๎3’™lQ:+ฦ†&)็w”ื้แ๒Nc†r>'็9„ขe๊!:ตK๎Zh9ะ›ฝกu๊!Zˆ ่D่ัCต0อ-wFyZุ้ฮ);p๕P-DีฒRรzัยด€oD>jก๓ัƒ๖ซ‡ ฮื[ญซbณ^0>œฯt=ถoH8ฦงI็:€nว๖ฟฐ๕2p๎sใุงบฺiX"z ิyฺ๊ฅถ%ยi'ะbD\=žG4–(ฎ๕่D{ ˜wฆ:จ@"ภ8ฌtvŽ…ฯฐ’Rฮ๑ุฑhฤ€; /อ๐~เ็Œย]~Zส`ภ(1 ฮI็sSหฯyแฝW5ิB@—(7็…R๔9ฯ|.ฮ' d$p้นๆ~.๔`ภLžรu๖ํ๙ป๊คภ๋uฑ8gมƒ’ ข๎ยนฉํFต‰~ำŸืฑภ๋จ฿วฦฺhLวs…๓ป+8w&:@๎x5๎k ็G๏ืiิ˜ื>ดi ็+œ/ณ๑๘‘ำ*พŽ)้v4Hฺล:œวzK"่tอ5ฅ=ŽฒาTwU.ำPnใฐbc9žำ9Jฤๆeพ™๎ŽCช-j%•ถณเฟ’3‰‰c‰๓ˆฃ T˜š‰ำŠƒษฌrข=vŽณz“ใX‚้™t ๆฑ{™้›:…œ8%ŽtbqH#Dcฆ[’ฆฬ๋€๒ชYQฯ๏Eด‰จ“N(ฦ่Aฃๆ็}fคใ๗ษ4ƒ๓ฝ6Lc‹๊์ส๏{/p๚าึสœัKญบพ[iดpL๙E็๛หt๕0jแxpžka'{จ๒จ…๊aิB›ฝ๑ปW YจJgขชyuz่็ๅ\˜YฅR™,Qง….Fr?šษุ=๕ฐ๎; ช…4Ti!ณวีรจ…,.ฉ‡A ๛ัร๔\๔0ืยj”dิย~๕0ม๙๚ซuีBlึ ต8Ÿัzธผตp็>˜€<ย7@ธำEšฝภน‘b˜r5 ฐ โ4ฃณ7้ฯ4“n‰่๋<่y๗ผ–ศตตๆ#ฐ?ฬ&pDMšคqิYcค[ค_!Sไ[G"็%€9Oฃ์œc๙f ฟpฮ๙"}žล luใ (C ๕œ่9‹'”ฐ(‚qžYฐ–“ฺฮ9ภ\จเผpœž#ถœฒ ฐ‰l4ึ#•lฮ็H็าN๊tL•ง–?• <๒HO๏ ›Oš;‘tปํkฮ[ไ›ภ๙ว˜Fึู>[ฟฝ…๓ฮ๔ว•"G8ฌGธzt๑/“ƒสช?ฦ๘!ฌn#R@dNทvf'e3F‰L๓tิใ†ˆŠ1ฒ“;ฏซ~q:GŒฦƒsm"็8็ฉV๕€s็็o๕ฤฝฤ€qg๓๖ดแx.9{คมS—qF‰–๓zRฃๆU=pJทค’KœIŒDP้ศŽช•ท”ล๙๙GF:WŠม๙›tฦzๅvๅ๗ท๊)ญฝž›9ฃ*m•๊๚*nแผแVi!๚‡ฉ‡uZจŽง…ฬ>ง.ฺห\ ีCบท๓˜Qr*ีBข่c้แtอ"๊ฮีB)&[ o5=สฃ๒=่U ๕~๕p”šมกฺL"๕ฐกฆ‘jQีBtชิรจ…๊a‚๓ื=ฟซbฝภ๙Lืรฉ …ื}jหิ ๐6š๎ๅพผm‚tRโ5žƒีmŒiJ ๐ฆ1ัe€Mะถwภ›fbิ$๛< ศ#ZL2 เsฐHˆ$ฅ:v††งภฌ๓ถOŽh.0Ij5#ผ–ภ๚KŠว.86ฅด็B9็ ๋็ผŸ›๓e๗dlœo2X`Q„ฯhGxkฯyผ—อ๓ฑ`Mt„Yฮฉ7ใฬ0ฎ›nฮ๓๚i2ื๙พ.x8EIะQะ‚‘y๔ฉq฿Cค<ทœโฆI uถฯG7oแ|……๓Ž'pnr<œ๙K๚NQ$R–๑เ๔?#F8•บ‘ŒคUว)MซFq8`DEx š MEgด—ศ9:6Œฟ1ฦ฿–Kข†๖$Hiพร๚%ฒA•ถูqHqFฉ$อาห*•ณ/‡๔7ŸฉQงู)›DoJ(ว๒ญœ๕ํ#‘งป๒๐๐ค ูฐm{pFศฟฟ…๓มh! •€šzจiXิร๑ดนูQs-ŒณะัB๊ีน์"ตmNg’ๆZXง‡.8 S ั>Œอ ต๏AOp>h-ฬ+=ฅ…คผซ‡MตํC{ะย~๕0ม๙^ะU ฑY/zOvEึรฉไผsR‰ช฿}ะŽ)šNบ้๐เ<ญ{ssJ& ภจ€4 ซ‘`าณI›&ยI]'ๅฺ๔ํ๑ปt?ัXาฎvส็รŸF 8วฯBว้–SJ=ึg›ขAฆฐใ”ํ01ŽZรpH…t‡4FษgJmๅx›ฉ์+ฬ†ŠใY9œgิๆo˜ณศําN}dŒ~—ึ—CJ"U๕Žcmเ|Ÿอ:”rป๒G๊7rย๙คm็&ํCัBRฺ1 ํs^u/ZHณE๕0jalžuHzศ%ฏ#ฅ;ฯZ^๓ฬ‡ฅ…lฆณฏ[ๅงF-ฌn/ฃ…vdฏ๔ฐ‰&=$eฝ-l็/์ช…ุฌ?ญ฿ศy ็“ฐ]๕กทืn๗๎โšmUฬy฿ฦล[ผญ˜๛M:i๏ Ž; ม9ฐŽu๋๎žขฤ% LภN2‘f@@ง.™-ฉาิ=SใLิ–H9@N๚6H๚8เฮsฉuฆฦ(็r˜ฃำ4Rฝ] ’Mค8g˜คVธ$โ œ-wพใŸะ5•ฝะฑaปกšc๎๘2บฆ; (ย}Œฬg”ปyœๆoc๚ฝทฆศ8 ŽM’oxแ'odZBํ๓๑๗ดpยy๗่้}ึ๋9ุ๋6ฦ ั วฒ3VmŸ‘hธฃี0ล๑RXfึฆบ๕๒qGอ$0๏วAŽงišcม9ื—œ=าจ้šอŽฌƒฬj+›n๔๕า& ฮ๗฿|ิŒเhW๑แ6ญ}š่!บ‡ฒX‰F-ึฑ|ฃ#ธz˜"โี8ตQz่}U-บzธ้‘'uด;t๏™Aฯ#ๆ+’ฆZ๑=็่aฅ…ืัรจ…ิ‡XM[Ÿ48ำ‹บj!6๋ล+๗ ็mZ๛ท๋w|o๊่Nไ(O[ํ•ษ–ž๓Np 4‘r P‘๚M6ภM$ธาIกถ1ขาภ8siร8:ฒkถO@N๙ฐก<ฮWgAศ?‘eRพYtเ๓ค๓“5@J;  ศOvА;ำšr6#ง+ยF…้่cม9‘rRู]่0mฑi\ ์ƒฺฟ๛ฦŽMœ?xฺaiZBํปํ{[8oแผ~ณ9อrh †SJฆ5—ภ9Qค|ฃ!าฮŽDŒ6?๖„Nืv๊)S ป3ซh:PŽ# ˜sI&๗ฺ!\ฟขยytFํ>™[ณJqD‰=ซฅM฿โ฿I€๓^ทFp~ภป–Ž"ส์ส#?ฺ/œ”5@:ฐ…๓ษีB@ๆa่aิBแ|,=ŒZhษภ.˜s[=T ‰ฎำอ}P:4๊ำWT8OZ๕Pซ๎W; ไจ7งืFิCตpเ|2๕pฮ_U ฑY/้ฮgŒN8'ญฺs›ฤ]ŸฝๅF8฿๛๏Ÿฟ์โฯผหRบฐ#ฎH๛ะ‰4'šH9้๋DฦIYุIโyœ่9‘๊ๅใyJ;หYP idPgMWo>#ู6%#KไœH๙ƒ‡1™ๅ+œ“Uแœ8lcคฌใDหs็ฦ฿ZA8ฦ๓ํ๘>h8‚oxแƒgจXx‘ตถ๏'ืยy ็Rœฎตsวศ่,Žหย่๐อใ˜๓`‰}ื?+ฤŸwfr „›ยŽณŠ3J„็•วpHนœ)PพขFˆ–qHฑฮ—œ)กHc†pFฉ5'JRSqHฑ๋๗Ÿ6p~ยA๏๎tNฮmฮั๏ฅ[๛ัฅYฺขาn+ํcฅKi็Tฃƒธ\ฉ…๓ษีBเ›๎t๓F๓ิBa=๊!7๊ฯs-TฃZ{Ž ็h!81Ÿ*ใVd-L๚–C:๗ำ ตารQZˆ๎™Eต๋๊แฝ_8ฯ›_าU ฑY/yzัร~fดN฿ดINไ†ถ*ฮy๙ซ‹Cžขๆฺž๏jษ˜้MM/ัQRitฦธ1œจ8uึ;QงC9ฯxy ฆk๛ ›”xข฿v#๏๙คZO—D์ฉ{ง๎ไ4ตcมˆด ๓ว๎ป=]1'ใ€ˆyฌี็\ฎHpND\‹pฮw8'•]3 ้ฆฐ๓|แ^จ๏ท๒€๓๙็™J>๊l฿ํ?ะœ—FUๆะ๕.PfUi฿ชŸรค‹๊g—v^iW—vUiŸ ฏ๙riท—6ปฒMZ8ŸB#ีˆ9๖๘Ž5๋G5ย9u–?ำศŒXŒšห].:ช๘ศYว$'ำดu.qBsา)ยล่เ>ู`>ฬัๅฝุrwP[ฅn5๏DฮๅR€ืyๅ6ฆCฺฐ3๑ะแ๋๏้ฬ_ฯmฮ1๔9Ÿ้6ี๕P-dQ‹ZxY๙ฝดรทzˆ}ศL์]ณฌ Zhวv๕0j!ฉ๎M๕ฐ0–.๏ลclJi!0^้แ(-สีรจ…5#%ง<œฟๅ%]ต›๕oใรyซ…หฦmจๆDอ๐O?ฯIvเ~แ(8?๊ฉVœย5‹ ึ[?ูใwอK0Ed”:Mฮ€oŒ๎็ค‡SฟM๔#Mœ)ฉํtx'๕ฝ)˜3—›๙ิณำ๕ู๋Dฤปฝˆง;ผ3ฺIg7โ˜ำภŽฺp:ิำอ›še>+pพ่๖k:ฃฒจ;ท๒ˆcSaธ…ธณhƒั‰ลฆณ1็{ด๓๎‹ใฮฆœ๚่ิ{ ฮ๖ิ‡ฦ…๓r๛๋าn@[K๛ปาฎ,m๕์9›”vZ้k—vIฑดGPRiื๚ฺ ฮwi#็Sx#•“9ฏ'xDq๒M?ํฬ|ลๆVณaนฐซL–ร๙^ฟ?*Eะฑžv\sกBŒโ~,nค{านผ_yมฝ8›หฃ†3nco3ผA4Š›*pูชHQฒชฮ29ก8U$ฉ่:ฃŒ"ญณ๊Lฤ์ฝฆ>œ฿๗ฆใฌณ9วoืย๙4ช9'“H=ไR(g#‚N$]-ฬแS Isฯ๕P@7“(๊!5๋M๔-œชฝ;&ช…,24ีรฉ็ฃดฐ‚๓djกc#ีรจ…>FŠ๛ตฐœo๐o]ต›ต๚*-œO-ผ๚c๏,nยึ ฬi๗ป฿\hฅ—$(g๛R ๅ_๛^€ไU^–ข้9œำlธgd้หto฿๏k’โNิ›ะ™9ํฤฉฆY\0๊fฆฺuาไk๊ลIก'ฅžFsx]ƒ9Fตั  e8 ึยณ˜ภg ›N฿t๘ฬฟ๓๚b๑-sF5€ใaw์ป}'๛ ฿ˆฒ/8 e๎๔"ษคฯc3ฮำwซn"้Dฮr#์O6kอ…sš F@งc๛T‡๓‡.8>•9ิู~Ÿp/pพNig„ปcูs-mซขฆ?G๖ผ“J{๋ ็qDศชซฎฺ่ ๑๘—l‘!ขโั๕~ำ<17fโ€2F่?ฮ9:9ก˜iคฏcD†0ฯบื๎Œั™จcูห๓›8ซสAD๊ŽSg”ู่x คH™[‚๏*-Oใ#O๎ำๅ>บ[oY9tSฮ~oง“rns~ษฮฅ‡‹~ธUฒalw=๒ƒๅ่!ฉ์นNช‡qSัณ๗Ÿ๒฿-TีAตฐ›๒|๔pฒดฐษ"eS-์U' ็hก๓ไงผขƒ๊aะย๔ ’่aฎ…๊แตฐœฟu๕ฎZˆญจp>H฿Xฦ&{;k๕Wฅฺ๒C๙ลhนNโs๓ผŽน‘ส l2FŒqŒZYนเคธ3†Œศ3VทQฃN”ป—4๔nั๏7|ใื ศFgx€เ'=h:qšัQ'…๗ใyGM<ฯq๎7cแ07RฺšSkฟ่ึ?คK>๛ร?๛jช7g4]„s:โc6Jุว’~—5"œวNๅuวMค›๊ผ*‚เžW}>าฺญ;็1>ป)๑<hฬงœ?|แ ลข฿ŸRk๛๏˜๚}ฌฑ’ๅ๖žา ท?Tฺทณ็œRฺ๚แ6e?ฏ*–ํแqKiOp~S•xiOi#็Sฮrัฮว‹Lยนํ๙#ŠqBน|ฯ/–ยนŽ(†ใYทแฬโ`ฅน“9ˆhP?5˜u๛ก“2ๆํ๑Ž-w>วsp9ย94ว mบ/RM*แœzVขo,’`Sา!5๚“9ฃฃžcน"า8ฐwฒฉ์Œvเ๗/๔g6็คฺศyC=&œ3 $kจW-TฃขkQs-์ฆ‡€=Pง[ƒจ'„ขM๊aฏZ8V๚Xz˜k!ฦŸ0J …s๔p*kaฎ‡ใjaฆ‡ร๚†๓ทฝดซbณ^๚Œ6rะ7œbๅี‹ว_๘็-็cmิำ˜@9#ซh–xQ7lใฏ~$Šี๚\%๗;ื8'๚d'"N9๗ wฎ:ืu็ฏ๐@9๗๑ุฮ'ฮMฦ‚Aพั .อษžฒ๔๊ถ?ฆE 'tฦั1:‹&{9œ๏X็Dโฉ_g฿๓ช๚kแœิzŒฯย๑bฮษ`แ l*nึkur?ป]นผ๕พ้‘ึ๐ฅ'‹fŸQk๛๏๔๑^"็[ึภ๙!ูsNญ๓Yแ๖?–vYi[„๛VฎRๆWi๛่-œOมอฮ์t*ฦฦHๅ$Zด๛%#้›Ÿบ`$jค้”bใ9ฃ8R8UŒฺ`็_ฆฎลDLRœ3ภADร'โ”๖ฒMœใ„bใม๙v=5ณ)็?2ˆดอ~ึyŒิ˜_๓ตdƒฺzิฮฟ๓ฮฑ็6็—Ÿnแ|้!ใ$‰žซ‡ใm”Žึi!ต0Fีป้!ภ๙ๆา้B Y Dาจ…Dมั ~`{ขฏ„๖็uZ่นˆZศ”๕pสhaŸz8YZ˜FJNฒ&8฿๐ฅ]ตkแ|๚hแ7Ÿ๔ขb๛ฟzNฬวƒsjาฏ๛ิ–iLิ’๋/Iœ˜DA‰€’สL 3Qแ๔œ1เœqe€f| ๏e>9ฃฺž๑พCSWxป๑: ๛Zjั‰คe'ๅ วธNc:"๏€0`^็ต‹ษ%œื€6 ~N"็ฬวฦ‚sภดv7hฌG๊|ฬ2ฮฉpN๔Ÿภ|*ภyท1jูnพo๐0ฮ฿G›l8_pูฏŠลsฯฎต?ปอคงต—฿๒๚า>[๔8ฃ…๓)ด๛่“C:8ะฑ|๛ยล#ะž[ทอfrDƒqดpLIu฿h‡;uๅ8hฆ:NvmๅD6ำn)ฃถ^R้uJ๙ฬ8๊œ—u๗:=9ฆŽชณมิr็“ต ฮฟ๗กฅM์2›๓ซZ8Ÿ†zHsธบฑiเ\@ฏ๊ดp,=คY‘`ดะE9.๋ด›Jอะรฑา็{ีย๘\?ณpฮgŽZศศ:วwR0%ดpŠ้แะเ|ฃ—wีBlึหžูย๙4ายๆฝย9F34,฿LGfเ™"ฤฅuๅบจ8Qโx_]ํ8ืiฦ๓_V๊i๊ฆษ่4|wjส‘†QŸŽ™ฦฮˆ7 ฐ‰lŒVKซฯษขuX๔eเ๑๒ำะ3n-e ”6๊ษ‰s˜cD๘9V๊๗ญญงyYvฤ_^ เ|2ถกย๙•g”vิุป|ฒ8›าๆ•๖ผะ๎ฅูs6อย]Z,ํโำาพYณ฿Uย๕K;ฆ…๓) ็D‹ฐ็#&œR}ฝ=OK€N”H'z<ฌ†GMถ‰์ซื็ไpบ•"F8ฅvม_ฎใFlE…๓Cท‰xีุœำvnแ|๋แ๒าB2btข่ฮัCดMD ‡52mZุหbe/๛‹pŽr=๊aซ…หฮ7~yW-ฤZ8Ÿpkj๛ 7Rา#„ๆDม‰ึ*dข์Bป‘u๋D’y@N„hฬI›็>เ์']ug๊ฮุ7 Xฯ๓ฏOึ oCชษฯฃ็ตฯญ๊’ปmvฝpNฃ=8vjไ'ํŸŒm*‚๙ฐแ‘น็KtAญ๐๙ํ{ฅถIีiฎํ{T๗m‡N๕๘\๋อIu/ญจ๊สGL+ท#ช็๒ุ/๋ศตp>ถ๛‘j-‰แŒbฝDะฝm}ฦฑบฮ(Ž(p>ŒฆGƒ4่qBฆฏ5"‚†3ŠSส์d๊\ฑvkถ5‚๓|ti๓ฆฬๆœนK ็ำLImGีย^"่“ฅ…D‚ีCตฐW0oบ˜9ต๔จ…Dฯฃถ๒ัร็›ฌัU ฑY/V ็-œOxบ้-{ฯ๚เdฯฺ๋๔ฮฬtRะIแฆ~œ๑ktiๆMk'ž€ž่9uไ6่I ๒pษM dต๑Œ€c๛ ถว๏พ1ู 7> )๙D๘้$ฯuๆลs,4`ํถ|ร น๊7ุu‡žเ|ฆ[+ภ=lิž ๆหฮI๕ค™ใ†ˆ มอ๋ฮว›†๓ฺหHข้ฒ๑๙Ii%bไ‚Nฉตฌํถœแ‡้ถ\csฮต…๓iช‡h c%——าแZj๔p2ตpบ้!ZhF•€๊แ€๓M_ูU ฑYk<ป…๓iจ…หฮjh9 Œๆ€3—ภ:ฯแ1ม ็๙Dลญ#งพœTo๊ษ["๊ŽJฃ{๘อU—๐้ฒ9ž… า้„O๔œ™แXป-_8_p๕Eา„๖฿ํ3-œทp๛F*็๒pH™—Nƒ9š&QC-œOฤ‘"า๑ข่lำล9ีตŸ๚Jj๕ฑv[ฮp~๘6#ฃjlฮ9ปทp>M๕-œศ‹Ak!฿IำNzุว t๕oฆi!Zฃ…Dะ9?6เkทๅ ็]ดkแ|๚๚†ห ฮ™{.„าภKjช‰œcDม๓zs`lŒจ2)฿ค{๏vสU้า๚r๖ฐ“ย๎lท…ฯiศ๖ะิ†\ œq,>๐9นp^ช๗ฦฺm9ร๙5—‹oผขึ๖฿mงฮ[8ŸุFz;v๓Cรซทฤ!ฅ“1‘"ภ‡ซ—fGF•๋œา‰tŽฏ๊5๙คpb๗Šฃ’ตฮดศxค›sž-œOc=ผcม%[^Zh‹rฝjกใก…S]ียA่!L˜}dฒV๛„๓ทฏีU ฑYkฌฺย๙4๖ — ็คž่‚()DฟI[ฯมœfoDั…w"ๅค~ำต›fญ=๛ถโป˜ œŸ~อหฬ]pn*๒Tจ•#ืšl๑|ถZุฮฏ๛}๊บ_g๛Ÿ[8oแ|bอpF‡้5ว!ฅ๓.ฮh/p>ˆ&qั1p>ศmขpเcว&k8ภ๙?92ฟธฦๆzฏฮงฑF-$’>ŒmฯKGขๆคhง1“๕็“ก…+’๖็๓?.Yซ‡œฟcญฎZˆอzE ็3ฮ‡ ่Dปฉ5'ยMใ3"\คsำ}ๅ-พูIjฬ‰˜ำHŽ๔w€žืุDZr๚lภœT๐sฎปงxtมรษhGผuฦกMq8ไึœชqL‚๓‡ฏฟ<}Ÿ๊l=>ยy ็฿pD‰ kร!ตฦ’.ผ4ฑ๑Qท่OlŒฤ๓ีอิŠฒํ}ูQษ& ็ Ÿ’ฌเฮบC๑ฤฝ?ฌต9็๏ย๙4ืCงX ฮ‰œปPIไ\=–๋๔0oว๓ีฑฝีร๚แ|&ia38ŸีU ฑYฏxN ็-œOhจ้ชNดtฃ่t+ณะŸปอ'`g>9ฉํ<`คk9)์ภ:อา~๛[˜ำŽˆ3pฮˆ7ๆฒG8_xึแลc็ฦ›ฅg7n…ัย‰ภ9แœ…ฌ๕ K8Ÿ7งXt๛5ตถžŸoแผ…๓‰ot+ฆAฐ6"็ภ9FJ{]TH๘ๆ‡1:ฉ4”#บ"9ฃฝnึา$‹๏F ็ฮ?]<๑็ีฺœ฿|ต…๓iฎ‡่ tlXZœฃ…tkฯ๕‹0Ž^ล…Iฦ.ถz8นZHOเ<๊a ็%œo๖ชฎZˆอzลs[8oแ|B5ใภ9‘rภœ4yส้เ„ไคน“ฆฮsˆœๆ€8‘q`žfqิง๛t4ุ้fœ:ด฿ภรi.yjุuำ์b๑ณœ?~้Iษb4ฝ–nœ{เz๗ฮœ฿๘‡bัืึฺ{~ก…๓ฮ๛)ฤฬ฿alDฮinFดจ[ิวๆp48Šอ‘๒š๓&ๆพญฮ/ฝ็๐b๎}‡%`มpLWx8?jงโ‰๛จต9ฟท…๓ ‡่ 6L-|๕>gี๊al”™kแ;ถ๙yซ‡C€๓+รQz8Sดฐœฟ๓ี]ต›๕ส็ตp>อตpุ€N„ฐ™้M๔๛Io๘Bฒ•6ZชAด๎ฝNฟ:A9๗ัษX$bN„จ:ใำุ‡3ม๓ณฎฝ'ujฟํ/w{tข็‹ฏ:ฏx๔ดC‹GOnŠžฏhฉํs?8Ÿd$0/ฃ|`E‡๓‡nพบxฎyตถ฿wkแผ…๓f)sะ's;x๎‘ฉ๎™ตฬ๖ํVkNค<:ข&J„ สN#…†นแˆ^v๏‹‹๏>ผ8๛ถ็ใไŒbO&mฒดฐีรeท+KG s =T ‹โฮ_ำU ฑฮg† ็ปj“๚>ภ‘n:ด 'bN-9—o>๘tPNŠ๚~็^›jั1เHส…zบ™s›;5่ิœ็——4ื‡‰j’v\E79๙ลฃgึ‰žทฒ๔ษf`„š๋ๆ-œ—p~ห5๙๖นํทW ็-œ7ุpDฑฟ,œผถ9œ“’Sึcญ%Žช‰qD{i7žำู:ฃฝ9คฮ่ษ7ด8๕ๆŸฆ\ภ<ม๙ขำWT๋ฤฃ?_๓ซตน}นรyนui;ท้เ๔ppŽRo๕0j!`ฎถZ8\-$Z๎‚%z8าเ›ๆZุฮ7_ปซbณึ\mนร๙๒ึร™ข…€9๖z๑คร9ั๏wเข‚4†#RK๗u"ไคฒมPง#;tfœโผŽ}๓Dywง˜คs;@9ม#ษโถ๐œŸด`>ฮฦ฿ƒ๓ฯโˆpฮbว๎xpิhบฮ็฿v}๊cPg๛~ii็ƒะาV€๛๎์g ฬ1ฎkƒvน่จb็ GR9™้;V๔‡ฺJัผ า œั้ฦษqาฝ๋e{อWฮL)ฒX“ํ๙Aqแ]?*็Ž'ว‡4u*ฦ]กแ E๑ะ๑ต6๗’oL‰ศyนบ…๓ษัรAoQ ฿ฐ๋ฉฃ๔0o‡N–บ0•๕ะcํUัยทเิฦzˆๆ่แo๎ัŒายฦpE ฑฉ็ห[gŠ๒ไ%0วNz๚K‹K6|ำภ฿ฆนp`ทฎœ๛nเ›ิw ‘.ท‰’ฏ๖‰ใ‹W์~jŠš๓8ใิˆภ๓zาฎig„ ผ๎žั#ศ_}~๑ุ…วw"็ิŸcSuc<ื’๋.๊น6ž:๛๛ๆ/่tฉ๏wcก„ฟ‹pฮyeฦz>7~…„๓;ๆฅR‰:๗ห{ฮˆศyS-o็?ื? ๐ุฮ)๖ภcƒsุvํQilะๆวžPฌ๗ญ39šMำœ๎pพžงฅบTf!sI๓("mํpb๑ฆฯฬHŽpฮˆ&f(cๆV้œคถ="ลwฆ4†k็ว๎^œXksw๐T๓}J๛viฏ+m-ญuHฃ…ƒิรจ…o๑คIีรฑดp:ยyิB6ิรจ…ฮ›hกัs๔p&ia#8ืบ]ต›ตๆ๓ง œ/7=œ‰Z˜cw์ป}ฒAm4#:s ›u œ๛_ณ๗™)ล™ๆ€8ืฉM๗๙/ู้คิ4Žxเžื๓ภ9Mโ€๒+o ๘3€๚ศ#ษFม๎MณSj;uๆำฮ9nšฅ%eŸฌ:าc@y็|G}4ูD62จ7'c,๋อ๔U ;p~ื-ลc๗฿]k๛๎ฝืL๓FZ:ฮฏื/oธ๛ฦ-ัA9ฅฬ—%R„Us>Yต“8ฟึoN‡ๆG6„ฺx๛z‚sฦ1mv๔‰ฉt„sjZนŸYส\~๊‚ฃRงhฌn#ฅˆอ๎\๐&ด๖ฮ{ฯขXxJญอฝ์Sฮฯซฑs[8ŸZZศต฿7zุmœไdiแt้าไก‡ใม9Z”ซ‡Q ฝ=O YคdR=œIZุฮทXฏซbณึzมT๓ๅฆ‡3Q ๘‘w7์ดUฮS#ตาšnคฎฆ#;Qo@{ี•`œู็ฐNล3฿Xj๗ฏ›gj๚๖โOLเN๔œ4v@ื็@ฝห~}ฝXํถ-๙ำษ่>ีทด€p‘)โ฿ œ/xไัbแ๛‹[๏{จณ8!œ_s๗ƒ ฎฉษล;Š+nป?ีๆืm–|๗ขำyฅC>p~ีฎ๐pเ=w ผฏึ๖ส—g œ7าา๑v~y ็ฝoค๏ๅŽiพ1#ฦ๙‡v:|“๚‡‘ˆลนฒิXโm}ฦฑษฉขใp์H<ŒฦFำ%JSZ9^"Fิฆพ๖g%guปjฉƒ๚ฝำา8&NS.ัฤๅXp๕+—ํ1@8œQภœฟ๙‚E'Kž8+ู ็วฑฯ๘ซZ›{๙w{‚๓rจด?•v}iปMwม^‘+5~ui๎Q ํ๐M ูFiแ๗F~ฯƒ์ภ‹Nu8ฏ๋\ใjแฌฮ้่aฎ…,| ‡ha๊s2œwำB†๊!Zุย9pพ~W-ฤz๓V ง็v๛WทKถเ˜};p^Wซ๘ล'ค†Xภ ให่’~gyน๐ก’๒ ฯผ&ี“:อ4เ๊fOูp๏4bKๆqkถ/๑uŸKtR1^ จ้&8(Okฬง:œง๑oW—ฮmŠ๔฿y}'๊ฟ๘ฦ+ฌงt๊ฟY,บํ้qขๅ;็ๆะi ž๓ั ฮฏพkYเๆ|’า˜9gf<ฏอหVH8ฟ๗ฎดRg๛~u๏žเ|<=,ทฟ*ํ[ีใsbิบkหmฅาฮ*ํบ๊๒)ห ๎ว๙๐๗T๎pฝcญื;ฅ1ฅ“ด>œPขDฦƒ๓ำo๙Ig_ฬ>ฒใEว"”ฮAŽI›ฎXc”0ฮ‘!œาื}็๔Ntว“๋œKœำ๏฿:#9Ÿ<‡”ฺuพ9’๊IT“ฟqF๋RŽ=๔๐ใ?O6SถFp‹/—ฯฌตนณ๖ฮ\ฎฒ๚๚๚้'Šชz  tP‘คHS๑‚Šˆ H(ม†"D>j ”4B๏PละI B %…4า!็;ฟ=ณ†}฿œนw๎™{gๆพ๏๓์็L93wๆ•ฝต๗ฺZ$็ySทInำ๘RฏคัตBปš‡ๅ›1!ญ,`!ฮยร–ศ๙ร๏h๒^ŒR#ภBT]แaฤยlb๎gผ{,ฆ๏ฐ มAU‡`!ค,„‡!;x,คœใa#ญ6“๓ƒv.Š…ฤV[mtึ 6:โpŽ‚+#5zถEydDYkศนๆ”*U‡|ฃšCะ!ใ_๊v”€ผCย้3งŒrN:ํ๒=5ฮ”^สฏ[Rฮk}Aภ)g‡œsอ?}S๒ษ‹๗ๆˆ๙[ฯุ"ฮ9ธะiOษ9๗!โ(่น(่๔ทงฯต฿หุฉsอŸ ส!ๆDจˆsu=Kง๓bn˜’๓™ำ“ๆฯอŒsฯ๎ำ"9/ำตwไI๚vi<ืาkำuศ:ว4ฮ๏(,m้‡ั\D.žBฬq/ษ9I'๊‰'ฮwM” }+—ไ@ฬ \n3“ฏ‡n*$R(!๙j™œS๊“Q๎“RบษuB%b’”s’Tส=9%ษ ๗†iI(‰)Geด๔นr์—‡,“Œzฒ๐“;-"CฮฯJ’O‡eฦจWฎ.…œoŸฦC๎~/ขBษ่๏๓ว3ฒ"’๓ส-.„…žœ{,๗„‡๑fแ!<ไoีcaนe๎@ฮๅZnT น-<๔Xศ&‡๐ะc!› ยร ณ๐ ‡‘œCฮw)Š…ฤV[m˜tึ v,„Œ O#$็(ธฟ๑) „ฤกศzSถbj6†p(เจ็ฬ7๗„|นญoฅํWE?suGmงOฃฬโ่FๅUtนfhตBฮ?๙P˜3—kN<„ศ{lˆฐ๙AูบD\6Dา#กขำฯ๏†H;ฟTt~/˜ปAฬณสี™;{sฟฟฮHฮg8๓ณž ฮ9๛์Rศy‹x˜ฎ~i๊๎ฃ”ฏึkuN6็พูQXK—ชด ็๏ฮ๋W0 “*๏ฦเไŠืrG’›[฿ฮuฟXBสขฌ„‰ %„„๋งฟพน"ฎฤ๕LฮรเzPึฮ๕แถ'๎~2}ซVูw˜tฎ- ‘สIFู!~๓่ะไาัƒDt–U9ฟณORฃฤจQืpาp฿ลัม{œFwp 6’XสYทX๘ŠรCฐ๐บ19,ไ๏J๘—……อแ!ป !™mmiržl`ะ ๙p#SpํภC…ช*‚ฐ‡X๑ฐDr~๐.Eฑ€œƒล๐0baใ,•XSVmชํดw“ลณsJ"…Rง9ใธ|C˜E๎Pqณ=่ttˆ8ไbพย๎ฝํ>ฯ3"OY;.ํ(่๔›Cฮ!ๆ2„ใ๓๕บPวู๔ ุœฯฟกตฬ|ึgฝ็๏Œด’vpŽRะ!ํ”Aา!ๅ๓,,s9„›@egC…€ฐG,l™œฯš=ปะวฦ9็œรIW•›ฆ๋4vr๗Mc๋ๆ^›ฎูม{ฬJjดฌž4๎.ทค๑4ึŒœฝf,` ัฐ‰ื[r‰*D2๚๏Ws%๔”Sฒฮˆน๚ ร…1็˜รa ‡AzธอJy RŽ.๎”ยใิ9G='˜oN5l๕บค–3๒ํฝำ~“Lฟ๘“ฉ}OH>ผ”dแ])/๓ูำ”O›ณ ™5oANUOงิโ^๓•FูฅฬM•๓˜๚ั#9/œ8kถ{ฯŠณsไๅโaบ๎ห ็[5๗ฺJ’๓rฑดฅ7฿ต™ุ-าxฆ„Yุ๎าฅKา™ฅ}๊รc6j‰ง#J4I2!้<ว9Y )็ภชœ“ฤ “8•$†๊pgIFQ}ุค gu๕Œ๛Kฝ\?’{3;ั‚%œ๊ X)gเeษ๙ํwœ™,Y๚pfŒ|ต_‡–ตป๗d“ฑOพ‰v‡ำธธช;ขมBtแกวB‡ C<ิโyฮ#๘[ ๙ )๎ฌไcก Dมมึ`ก”vaกŒโ„‡ยBŠxX9?่เ‹b!QBฯyีฑฐ#๐ฐณb!ส-$e5R(ำ1Tsdทo๚มqh‡ ซ„ƒ1ช7์Rฮ7ใŒNใ1H8คฒฮH5ฃผ]ฦpz้‹ถ๗ผ1W=/๚๛ฺืT๒ูOณใœkfสน]๛‰ฏ9็ศ๏Uœ฿*xก$€&…=aฯoจ๐˜]ง๙sœs(_'"ถLฮง8ห6Dฒขฯู็ิuY{ฅฐด5?hU"ใ๑qwดๅEฏ%ฆGจ ”ฑฃฉฌ๓9ลˆฤ%‰$ี—t*"ํ9}03Fผre)ไ‹iŒOcgฑi…’ย/&n\$nž๙ใฃิฺ=‚ƒยC…จ็tแกฐPไฯฯ Y ม_^ 4ย ฑ|แa9Xัz,DExX"9?hงขXHtk™œW k;+B!†iL.T๙0{บU=ฐฉ‚;ม๏-ba๓ไ|๊ฬYถ•/œทˆ‡้๚I`๗|KฏMW฿ภ๎‚ŽยาR~M์3า˜‰ฤŸฦt๏ภญ_rpงฤ“Q3*็$HpHB9’tขฆŸ๐฿\๒ษ}ฮฅฟ’ดจ๊‰ฆีศ’ง^๗o)%กY —<ั(ไ\e์$ฅี˜/ek )€D€›'็ท~zฒxษ}™๑๒ศหJฅ†ใๆุ๎ใiLF_ฮุOฅฑYซ‘œท ๙{z,ไ1แกวB|ณฐะฆฅˆzˆ…"ฏ€…QหUMUM,ฤ .โaiไภƒv,Š…DทญJฅV,ฌ<์ฬX(ตš’vH7n์ม%ฃ6ސ๔S๏}อˆ:ฮ๋'ชนญSฆ!วฉ]fp8ถ3๏œ~sJูEย!๓su:๕ผsTC‘sฬ฿ ่จๅ”ฑ›แ^Jย™mn=้ั๚สS"n=ๅ >sล‡tA‡„งD^๓ทy๕ หY<=ื3%๏”ฤฟ๘ฌไสg฿ฑˆXุ<9Ÿๆp๔๋กผr>)%็๐eล%’๓ZJaiK?dl?ษ.qธํ‹ัB็_Yp1Vษ&ษฉ/gG™ ฯ\ )Gฦ{1oV*:F=6ฬ‘s๒J9๛ฤF"็|?ชสฅิœ๛;j$€฿ƒ'็~6}เฯศ๙Mท๕*Œ– ใ…w49Ÿ˜1ƒ๒ค|ฤ9็ํŒ…เ ๐ะcก๏7๗XIขเ6มย๓†ๅ|#๒ๅยB0Bว  ……|ฟjเกฐwแagภยrศ๙ํP ‰๏w[ฏฃษy‡ใaฤยฤHžL฿ w=ˆ8ส๖_๎m3ถqsงพ๑ox‘9ฒใฬN|ใ‡ฝ #ิ ้vž'0†ƒ sDmGMื(5\แ‰F ่Fฮ๏นิ๚อ1‚รตFฆ‰œงdUœ#ค|J~Lš)ใ(ๅจ่๓ใิ(y็wƒ็Rแ€;;ฯyc?”_๎"9/Nฮ฿›:ำL๕ฒข๗฿๋žœWK[๚!ฃ๒\เึ-R ’ ้ฝFเ๎N‚J P šฃVั bDBjๅœi2JยD/ L‘|ฯก; ›ซืEr–์Wปฌ]I)}ญ8C(ปีl๚ภYไิdม'wdฦ๓#.๊hr>…ฯXde๏$’๓[ฦ ๑้ !่sTs๐,$Tฺn๓ป๛[ …}YXXฏx&iำ!ฤยถŽ“+ ูv,,œo_ ‰ ็އ —]”ธcG/:j:๓ต e@้๖ณอฑฏlwผ)้ต†1ๅœ๑j”ษฃะืณz'0‚๛๘๙ปlž9.ํ ๏ธ0ื?ม๘ฯ๑mH7(ถVฺž'ๆ๋…:Ž#;ฤœ๓^™4ฎ?ืS>žฃozะ้_O฿›’w^X˜Mฮ฿Iษ9›Y๑ท๚'็มา’ไ๙ึ>ธ๕‹าNย/f? ’QJฺ•ˆชื’ไrN)'f=˜๘œฉ\‰จ%จ๕ฆ ‘„#็|Ÿj’s‚„—ŸE๒j‡ัจใ„ส!็Co=9™๗๑ญ™๑หึDY{GGฤรefqDธ„…(่Œ ‚rGอๅoSณฯ……9),ไ6!,ฌ7<๔X˜Eฮ๙.ีช"สยรฮ€…ๅ๓Ÿธ]Q,$พ฿mŽ&็/G,ฌอล85ย/ˆ๚ƒcฆZะkพRณM- zV จCะ!๕ษ์บ็Lว„N=ำ”ำื#1/s๓‹๗š)cีฌฟ"ba}.\ฺQลK!ๆกฒŽ!%ํ”วs$ ๆ๕Hฮง]xขLC)ว๎ำทžI&œซไฃว[Y๛’ #Lแ6ข9วnแB ๋5Wo9%๊y๕๒อQ๓็UฎNI;ๅ๋{+—ฉ๏“|‡˜GržMฮวNžQh฿ฃื™}๊œWK#wเขœmวทYดธ{:aฉG=qCก฿„”d”17Rฯ)๋d ]๎JN5Zจ’Q9อซTS#ƒดู นพ3žz๑์Hฮ#ถ!็เ!ไcกศนวBฤ‰ฐ๛>ttฟqW๋ไ<ฤBฉ่ยCa!$\xbก๐PX(\ใu2สjS7ฤB๕๊๓ผวBฦ฿y<Œไs๋๎{ภึEฑุขฺ‘œG,,,อ|.•œ3 ื๕]”‘๋Nธณีไ'wTtˆ>k{=s๚ส!เจโn๒ลO 5N@ส฿;ํ7ๆพเๆ๓ฐCะQฺํxืล9าP{=๏#"M”๖๗^LQง?=OึQฬ Qmณฆšโ.ยฮy‘œ7%็ฏพ7ญะฟฦ_z=’๓Hฮ๋kัืLBJeœ2B*$ค)!WH9'QSRส‘$ค‹#,8T“Z+ํ๔%˜*=%1Dี!Yไ;)HบฝJnฆiาˆช†qeฏ*๕#—Hhน\+KVำ๋i›็|VซD^‰)?›Ÿร๙ฎx-7JHcŸ^๐๊ฮภฝ||มy;Œ'_{$็+‚‡ยBZM๘-`_~ƒาท๛hไควB‘ื,,ฌE<ช,Ÿ๛ยCŒศcกศ6ท=‚ยCตฐะo่z,”ŸIˆ…žจ{,ค’มใaฃ`aนไผ[t[+’๓ˆ…m^จ8ฎCชQฮืษJw๙็ใษืฉฯ1ษิพ'FญAส9ื๕=.{บ`W๋kฤ{๑F ‡4ำnสxJฮฟ5GฺS’Œ*แๆ9”v โ ๋๏žzdแฝฮส๚ษ้หญ›๒ญฎFุ_E+…‡ฤs>็RBฑ—ฒฮฯ0ƒน+zFrษy#,’)fฬขฉ—P ฉ'็๊IW‰ป๚1k?šY‰ฉDJจ‘Bปp’RIข˜ฺ&rฮ‘อJdI@Q‹n?ศ’~WทGยxŠคTฦslHต"&ๆ}y%ฃ$`œ_sใ1ษฤ๙Wfฦฐ็ษyฤรŠ.ˆ&Xจt^ฉ่ยEkY9oX!ค˜ืสค-ฤB0Px่ฟJ8XE4‡…เศ๕9 Qอ!่˜๏‡ln ฉ:‚๔›ŸรB‡ Qะ;#Bฮ๒ำnEฑุ๛]"9XXัตย๎ฝ-ZRอฟฒ๑6'BOI|=s”moสีQฯMฉ๗œฉF ๏นิสัUฺ.b )็5rxฯZฟZW8น] ๗/๘๊ษY_^/yฒ๛๖ฆบCฺyO6่q‡จCุ๙นbž~&Sุ!่(่ำตrxSิีืโฝ EฮŸ{๛ƒdฤฤY™qโigErษyc,$J†J7 ’Sนห0N&i*ใฌrNช>sนหฌHใดก„”rLส3CฅจุตC"๑$ %้ไ5ONพพ@ะQ”H^ •ฯชผSืUฝซ\S> ืŸฯAฯฅWH:9ฟz่’w็๕หŒ‡Ÿ๋ษyฤรช`!$ิใกฐฟW‘sfe–YOไœฑ]x่ฑฌ–‚…,)่`"ฤœืะF๕๘ค๒ไ€ิ =ชํ<๔X–z<์Lไ|๏”œรBโ{‘œG,ฌ๐ยnล=ฯ*YA_ใ—“อำฟ_Jใ‰Z'็จูจจแRษ!ลp”rn๗9๓'Ÿ{ฌ‘kขน%R^,๚ฏธQ๒฿]vถŸ7wภนxLๅ๒๊o9ฟหฝ0ช2|Ž”฿3๒ํžK;9ึษ‹๏อสŒZ9gฮxค๑Vธb‘๓๖Jใอ4ฦฅqช{ผocาx5;าX!๘ฺi,Jcd>ฎŒไผัึาว’ฅKตจิRฏ ๚งe๚Cbค2OอพAฏๅิส Šฎั?rV๖ฦmๅ,ส8IN)๗$9e,Iจ’Uิ$๎Cเe,‡rคkซEๆ€Ÿ’~'๔ šูฟS’๓+o8ถPฮฦฯฤฒ๖ˆ‡ีมBิ_yI@Qา…ยBอ=ึ"›-.uaก*ˆภŸJa!•š#เก63มBช„‡ eถงtaก<9„‡‰œ๗ุฟ[Q,$6rญHฮcnX่Iฎิb๖yHยฟิํจ&ส9๊:ณั1…[ุ-jqa๖†*อธ3๕“หะ Rlj๙SCMIG)7ยœ'็ๅ.ศ๘ฑŸฯ&้๗~๗{ษ๋G๎kŸƒา๙7žkŸีzJผM=W๚›O6ฒา๖๔;๐น;9๒อษษ3๏ฬฬŒใzY.9ฟ@d›c็gœ๓…4›ำ๘Rฏคั5i|1๛|ฝ>OฮG'Q9 ik*GV้"$R†q>!ญลสbษhHิ5Fˆค”ฒN#พมwญิR2J&A"J‚‰’ฤsI*‘˜lˆ q.๊ปH๙ŒE,:9ฟ|ศฑษุูWeฦฝร#9xX,”๊›Uฦ ‚…ฉVหฅbกJ9ส9ฝาX(<๔X(S9แกฐPD@นข{<์Lไ|ฏ”œรB"’๓ˆ…ี ็ŒW[eฟ Œ|~ญิใ์&nํฤฆ'cQk #7‘s3Z›0ยTr+#OU\ฎ๋FŽำ ไ…ำ๛•\žœ_ทาฦFะ1•{ฬAฆศc,Gูป}๎๔gณAภ็็ณฐi ็xT๖zํ=/‡œ?>fR๒฿ 32ใุSฯ(—œฃ†ฏ–ฟฝ๗3ฮู>‡^Dฦyค1$’๓N”&‰ขบ‹Lฃfฃrฮดฆ.I)ฤ‡ส:q)–๛ผ๚อ+|3—|’„ข(ƒ๋’gง^kฅ›ๆ๖Kf-พกำ’๓‹—ผ’^‡ฌธ๕้ณ"9xุฎX(<”r.๏ ™ฌW,Rๆb!ฅีX`๎qร8ƒ:z,๐ฃม‘œ๏ž’๓bXHt"’๓˜&fึ^ r–ผ3Rจ•%ฅู๚ธ็ฬ4rNIธEJฮeจ†ขŽZ=ๆw$ovsq‡ฌsิHดjฏษ,๎uทL้บต๏_kss|GEŸp๒ฏ์ณ@ึ1ฃฃGขNtVr~฿่๗“GฦNหŒ฿ปEๅ<]รPฑ3bษ๙!ไ’เœำ๒=็Ÿฯ฿_.•๓ททJใ4พษy\mZ”’ ‘จ) ลฉš$ถฝ’Q๕^โLy'็ีJFรE)"Šว˜YWํษ ฎด ds๊ยk’™ ๊ดษจศ๙…˜Œ˜qMf๒ŸHฮ#v Z_t^-ึ ถ UIคน์ =Rพ.<ฤ™ฝQฐฐrใ”œรBb“Hฮ#ถ๓ขท9_9G๓ฃิNื๊i๏ฮ;ฑyื๖ำใใ๒%๋MFฆฅ๋ <ฦBไ_Nc฿$๖œ7ุ๚8๗๑ษƒนh‡d”Q7K#ภ1ไใ?ฝ้Ž&ใ†l N~\Y5ศ9=’ๅ&ฃ8“๒๙อๅฝ)๑”๙]sไ<ห8‰วดกธmไ๋ŽO3ๅบฬ๐xŸrษ๙&il–1#žหๅwE฿ึngLH;7bึาw.,dฬX่๑PX(ว๑ ซAฮ+…š{.,ไณ ฉ,ถDฮร K™หE[ลBbฃอื.—œื=ฦ0ษอฟNฃฺ ‚}ฤ—’ญS\#Xk1ะf~m|โ]ษ็=š์ำoxr่€็ํธีฯQฏ9ว”˜Sส>%%็ๆ็,Zธ0Yแ”œbž’rzฮmฦ๙‹๗)ฟ๗Qf G๙8ๅํ(่ธถ๓็yzม9ถDฮเ๛M>W8F-baศ๙ะว'ทพ:)3๏yzYไผQ"p' ็ฝ–ZนFrชG?5ค0 ˜DŒ$„ๅ…ไ'`‚ไดRซd”ค–๑A1ˆƒฌsยฮQe๏„z2้U็ศ๗RโIYซอ+Oƒ๏“ั๒ษ๙9ืž`#”ฒโบวฮฎHY{F2๖=DQ$็ Eฮ‰ !้บอ6๎ผๅvรร …ต†…Dˆ…ยCa!d,็b!$]ํOยBp1’๓๒ษ๙ฎ)9/†…Dนไผ๐0ๆ†ํOฮ‰cnimtย›ฅธ๏เ๓๎g=l้|H:ทOป๕ไ๊็)ฬจFๅฎิB9‡Œ/\ดศHบ)่lฬ™™,ygdฎ'ึพๆุNจคต๗tศ9๗1ŠƒŒcสFฉ;ฝ฿<Y‡ ๓dขฮ84ˆ:็@ฬ ๑“พธN$็"็ƒ_oีY๑?‘œGrH ฉ\รรEYปๆั๚วHF‹‘s’Q‘UT’R:’8’Qๅฅœฅ๙ผŠJ$ฆ~/I:RำIJ!๊<วm฿s๊ษ9‰ช’Qํ"—Nฮฯบๆ„ยฟห0ฎfไ|x[วe4“Œ^ชั๙๛ื06#’๓:ภย๏ฏ*2\x่7/™ลํษ9XHu‘๐0ฤBmVึ๚y่ยB~†๐,ฤ\,ไ{‰{r.ต=’๓ถ“๓]~ฒuQ,$6Lษน๏ฟ๎Œxsรฤa+ืž2ฎr๏™1G๛’แใM†h‹œo๙ื๛“ต~{ใ2ไ|ร?‘l•bๅํ<~ยํฏ&=๏U ๗ง๛Zrอ ๏y&fฬ]Pึ็]<\{ฅ’N๙ผ mิ=็šqAgŽ8&kŒ.ƒtCฦ J!เน๚‘๛šQŽ๊~ํ๐}’งถม\ึEะ!ไsฬN_n&ไผื—ึMŽ?k๘4โ__ baศ๙๕ฯŽK†ผ~f๚งำ"9ไผƒึG๗ๆั*'ฃR‹˜;KxrฎาvŽ*๏ไ6eœ('V฿งจ’N‚O›|N †xŸ๊v˜TVBMโ}EิQ•%ฆจK **˜฿ญ,4MLIฬรภฅ“๓3๛ษF*eEฟGฮiQ9on\F3ษ่eษ่A‘œืถBะ=Bฮ!ไ๊C !๋บBP=๚๗rฑ0$็vฅฑPฎ๎„FQ C<๔Xศh6๐v'ๆ—Grzrพ๓Oถ)Š…ฤ฿kY9ot<Œนan”ฤผฺไœEY๚Yฑ`ัƒฮxต.ฟ’ฌ‹~ษทผศJ)wว<๒q฿ถฯ#F์Qฯ้E‡ฌ๋ฉqFะ฿ž>ืขญk } ไœุำ„ขฎkใษ9cา ่(็๓ŸeณอEฮน-…2ŽJN;ฤbMœ๘…ต|Cฤฯ๚๒zหจใ ฮฝฺุ่๋4ฮรˆXุzr~อ3ใ’/ฝŸ?ไ<’๓MH•”Vhฉ<ฎ”ี๛ลœrŽQK๎2…ƒœส@ฮด|Ÿ%G๕+†ฎ๏ฅ’๓bษฃ_•T”Œ๎๗[ s›วธOE 6ษท๚.IDน\'"p๋ษ๙้W)น๏™qูร็ฦฒ๖ˆ‡Mฑฐ๐,„œCศม>ฐPช9G๓็`!&“เa%ฐฐ%œซBะEศ9๒˜60มC๕ ๓ุจๅ{ #9o=9฿q๏mŠb!ฑ๗bY{ฬ “๙`ผEล๓ู;,ZZnฦซAฬ!ไ๓ตwK๒ฅnGูใ๓อOนฯิ๓ฟ=:น™ ษqทพbส9D๚฿Kn5ูศ๙ฤV93uN๒ฮŒyf‡ZŽ‚N6ฅ๓…๗\jไœูแฬง„เ9๚ะy e#’๗๋Vฺุิ๏ฌr๕Rโ์ฏฌoฑฐ๕ไสŽตอœฌ8ไ„^‘œGrkันคดj:ฅ›$Z*Uคฟ’$ๅH๊‘ˆ:ž$ญ08๏Ž *uQZ้าอRRT"PŽบ9G%Ry*I7•|g%๋>"—NฮOป๊Oษ=๏ ฬŒKช9฿40@ แ๊„ w‚…Tัg.Rฮ฿>Xศํ,,ไ9๕ก‡Xฏ$9ฏFH9WนปnCา…‡Rฬ๙ž฿…‡1!m9/†…Dษyเaฤย๊(็ฅฎํฮ–lzา=ษzธอH8*บณฦ]๙x˜ใฌ ™ข๔ผวฦZฟ0ไbNLžีz‚ูพH-สใyzฮ +uŸ;+W๖ม๘dษk[Pา g9ส9Wv‚ktสฤง9}่๔–‹ CฒJา;k๏y9ไ๒ผi^Yq๐#9ไผศy…ฃฌ5๐อมIฟื[I็ฅฃscฑ#)D$avœyœ$ีDIฉิtns>ทC็๗ๆ$ธRcƒZw)r.ลˆDTส‘7Lขฤ๏I2ฮw$IWB*ื๛ภ-“๓S๛˜>~Pf๛sหuk? ‰i,Nc*А{๎ดผ+1ฃƒzิ2G< ศy;เaˆ…๎9|H,ำ๘็ฑ EVฅฆ‡๗[ƒ‡`M{c!%๓ฺœTY;Š9ุงRwm\RๅDฅ€pฟ3ca9ไ|๛ลBbฝอึ)‹œ7F,lJฮFTsQžN?9ส8คใ7zฬืญษWถ;พ 9_n๋฿';ใ1๋Q‡Cฤ‡Žœh}ย”#ใด่[ำ’g1'šVz‰๛ข‡๚[ฯ:คŸ๗DA‡œŸ>7™6gAœ๔ก๚ึ3ๆาษหไJ๗Sาฮ{0† Rฎู็ŒNฃ์y็ n<ืH;%๏๔žซผ‚N?y[ษ9ฏXX:9ฟ๘ษ1ถม“wj$็‘œw๐ชBฏeธH>ณศ9}—(Ft-%ค$_จH(F‡ jฐส•ฐB๎5†ˆคถTrŽrำไœ ๙”Aฝ•$ ๚<2‰ฃื’2wฉb$ไ$ฆ~$]เ–ษ๙ษW๖Ln}{pf\xyQฮ๋="ถ<.‡}! ……Rฯม:ฉ็ ฉฌแศs u^ญ“sฏš{,ช๗๊;‚…๕ฮˆ…ๅ๓ํzt/Š…ฤบe’๓ˆ… …”ถW‘œำ'Ž ’s แPศ)m็6!rตO2eืf่หf G)๛ใฆ)็๘ป!;uฎ‰R{ะ!ึ/พ7หxˆซ“g›‚Ž’ฮ๛๓#ีฬฑ=%่*งdำqฯๅfŸงd็v:ฅ๎J:Š9#ี&œ+3‡ใH๏๙=ซm–๔[a#‹rˆy$็ญ#็>๑†™fล‘œGr^ŠQœŠ|oภgๅรye่‘ƒ“sG ฑ ”“ว4๓ื—ทK‘cฑHป๚5#j’ฮSIh)ไฅฆฎฤ-\“[ป’Sz+u_ฆq$คrb6ฃงพรฬ rฮw%"—FฮOบขg2๔ญม™qม}‘œGุ^)_๔M‹่gRN?บŒโ4็œ‘kฬ2วไํไ๖’vฬแ\#–Fฮฯ๖š™fล>8%’๓Hฮk$mƒZ4ใ,Z"็ส% JFQˆ0A"(ๅD%W™"ษจJ;u[c†H@IH1าฒNฒJ’ๆ{ื[Z$|>,5ฉdต๕<sอ๙ฅฏ’ะX5?rsHF1yขคฅŒ„”๋B2ช๋ธyrงหzZีFVœ{O$็›มร bกศ๙เฑƒ =ž๕R.๘{F ๖X( ไจว…เกฐไˆช,ตฝT,dU รsFฅFdยfmVชด]#4ๅวแฑgวG<\–œoณg๗ขXHฌฝi$็ ›’s9•ท–œ/™0ยข9r~ุ l4$—vสี9Bา)iG!็9Lแ<9WฌผฯyFๆwปไ?VๆIว๕siๆžCฆ!ๆJxKkมยEฆถCา)mW2๏ i็(sG™งไˆ๙ผ๛BฬM1ฟใยB;ส9ฅํ(็Zฬ>' ่~o‹c?ฟ–…๏!o A‡˜ฉฝr็มั๖o%+z}r$็‘œื—^านxษ}%‘szูHFQ…คž‹œ3rN@ฎ•xชฤ]q$™$ง๊น”jN‰งWŽ8_Q0Žหฯ&P] ฎฦiขGยงq=ญIJ๊ญ$๕‹ŸOฉปŒ‘HJนฯg“rฮwT)ืKไœ ฎc9ใ…žzญE#’๓ใSr~˜ม™ั็๎Hฮ#–…ฅnT‚…ปzrฑ _ชฮ฿ฟ6-u?ฤBn mฺy9<ฬยBž ฑฐฺm?*i=๊sh’…ฐ/<๔Xศf†'็ๅbก๐ฐษ๙ึ)9/†…D$็ ‹ๆ{Œ+ฑผพ๋ๆศ9๊6dว๕WืิoโG?edผkŠAsJุ}ฯ9ๆ+๕8{ƒ8”๕ฮ{ิH๚—=mฃีpnW0?ๅbNp%ณ7œุ9า?N ‚ำk้ๆ9=คำ9J!ๆ(๒GฦNKžz{FกทืSฮN9ฆp๔›CฬQะe‡aฤัjov#่ธถoต]“ฑhmUั+i'SปF$็g?*9{ุ›™ฑ็๏ษy$็5จUะต˜„ดŸ๕Uข SึNBJฟ9๓ป•”B4•˜J)‚ค+ูT้:ทy E™ฤS3~Eb ๎c$DB'BŒ"C๏"ษจdมj–w๒*๔‹๛|F’ชŽJฤ็ฃŒSŽลrฒg‘˜zrฮ๕ไ๚’_๑ฺ`ป๎Dธ wMdณm!"็ร?ธฎแศ๙.้iฝYั๛ฎHฮ#ถ๒7'"ไ[}ดY้ บฐะuฐP› yLX^€!Bศ้-g ภ-aกสอซํุโ!Ÿ๛v?๏œฯ/< ูจ =rภCฎ/ืบ%,คชมใaญba9ไผ‹b!ั%’๓ˆ…-(้ฬ๚ฎฤB=‡@SฺQ‡˜3ฯœ>๓U๚ฏd…{[๙:IU๓/w?6Yeฟ š<ฦธ5‚็ ๒ผVฃสฃˆR&ZO9=Gิ{ฦฐAถ-ฌL]ฅ๐๔•ฃข๓ŸัซWZJฦต โ2…bฑ‰กrนSๆIEืข’s~$ชz ง^’ิว']_ ็ๆ๖+ฬ™ ฒ˜ณ๘ฆdวท& >นรขศ๙Q๗ดkงษyฤรฑฐ$,6๑๚L,Eาeš)%]žšwNhlšLแ„…r…`&ใิ-ล’†X&‡ีn๓QOนWฮ…‡|py‘l‡ไœฅD]๓น.(G\7|~๕K]็zฃเ‘ฌข่ั‚๐ไไ๋˜—Bฮ?ZrฏE-“๓฿คไ\฿;Œ“o‹ไ<โa+U๔2ฑr†Xจฑj =๚๖ฐขฎชฐ@}่`x(,„จ‚+เx่ฑ#!,Tล็‚I<^อ9็šLแษ9xํ๑PหXซ๛W๎ูdl•žทFr๑ฐ xXB๘ภC… )บ ๙ป !˜M๏๊ฎi`!ฏ a!รQx๖x,าส—๏BดA/‡œ3๔ซ‚ศŠํ๏ษIW๑.Žnล๛ฯ๎ฯส8็เ4๚ป๛‡งqฉ#็๏ค๑jืช,ž็ำ8ฬฝๆ'’๓NปU๊าวZ๕าWาค†$Gฝ}๊น$แT95ที{I‚Dข‰RD)ื่!อ๐•I’\ŒIภHDีgiฝ‰)8ห™Xฝ็$œrE—แJ(ฅเˆศ+9ๅฑJ)E:’”zrŽbE2ชRT๎ซg^}—:‡วUฺฉ^|†˜$ฆ<ฆy๑2Jา๕$น็z“œJฑใwภ๏ๅ1q•rN2:eมUvไo6’Ž;5ษh-“๓ร.์Y(ใ„[ษyฤร2ฑ๐ฑVa!ฝฬ๔ !„ช.rDํ๏๔ทซ้เฃ7Œ !กพผ] บ'็เ‰วBp<๔XจML0๓XXRw…ยC‚ฯnฆu๙žy>ท๐PXA๗x่'xp๘ฟวธ6l๎rฝ<‚…พลŠk๎ฑาvแกฐp๒‚+-„‡ํ‰…ๅ๓อRr^ ‰56‰ไซไ}๙O,ธมs่^๓la; >ฅ๖”ดณ1€ขฮFs8—ฯŒแไwœ๏ฬ˜gD—๋„c๛ไs5ต™ฒrH-๊๓ห๛๏n๚ฑ-ปทHฮO_nไ‚ฏn`*;}๊nbeฃ\uœMอฆ}๎๓o่cjธ~gVj?๑ukYXbQซไะ๕,ฬ‘ใุ›"9xX&ถQ`มร …‡]Bย……"์`!“ยBฮีk ชๅ‡ฟ}…`Šฺ|„mยB‘sฐH*บฏ*?EๆมยJœ๔fsยBฬ&L1?oX”kำTx(usภCพkˆ…ยC…2=สค<๔X8v๖U<๔XYูภ ห!็]w๋^ ‰ี#9XุสU(}ฮ=สฃ‰RŠนฦ’Aบ!็(ใ†ช๏‚No7d‚Œj!Fอ†4Cฦ)aGA็ัFE‡ดkF:ฅ๏๔š—าŸฮ7:=๏จํ}: 9œŸ‹šฮgAแง Ÿs้Cgำ€๏ก9๋|พฃ–{ใ)#ล”“ฃDCl!ป”ตำ{ฮ(5Jฯ!ื”ฃgอ;ฒส&ษรwณsปหฮฆ„CŠ1lSฉPำษจศ๙ฯูณ0>ŒcnŒไ<โaฑpษา‡‹โ!ี%YXศ฿’ส……š˜‚…E‡๒๗ศใrr็๏X3ฝe’)e,ไจพk…๒ๅPiป๏=! ี๎ฑจ„‹;?CxจMHU:ฑดนชMJ‘s>ปฐPn๓ยC…! ีŸฏ…ยC…=Bฮ…‡`!์฿C;ฌถ’๓MRr^ ‰ี"9Xุ๕<%f๖]r…(โ๘=wภ…€0, ำPอQศ้็†|CฬQสElน 9GQง˜rs9ฤย Q็ŸำWNy;dฒNษ:D;œ•๎ƒ๒vJๅ9ฒฺN  Cฤ 6 โ”ท๓žx>“6 Pิตษภw _ฝP]’sฎ†p\H.ไ’YไG๕,bŽ๓:%๊œ๑…”๓zGฤ฿~ฦ๘—rค๛ั9ƒ๑ฆˆCธนๆozโ!ใšํฎ๙Y๓$ำๆ,ฐ#-œOฯ<ฟO‚ู์L6ุ€˜ฃศ๓%๚T ฐCิk Eฮป™‚@[Z69๏ย]qฮำŸ7v“!ฆ"๔๎ผži˜ฟฝi`7>ยลตฬZบ๔QKD้AViŸTV’ิ๚๗(๋„จฃqŸไ‡sP!P$HHIPIšิH<ศ๔ˆ‹ค‹๛v%c„J9‡๒EV6•‰“\ชด]ๅํ(้Y '“ฐr,7๑Wู(A๒IฒI(มTHฑR๐ูUž๊็บซฏดAWy;jฅฏพคSIฉWฮ๙=๔T๒{“[1I)„ร๖Zๅ๓ƒ.ู่d\Ÿฃnˆไ<โa…คฅ4มยœ !v]ea!ม9„ฐRจอK๙C€…rn็๘;ชฝEญ-RUฮฒl})žh‚.๕œ#xnH +‡"๙|)๒`›Hธฉๆ5]# UMไFภB๙odmV UMค–แaˆ…,๐ะc!_y•f G/wJขูฤะˆ2•‡ฃ8ำ7NI;BNxrNP๚Nู8ไ˜๗02ฮˆถ”ˆ›:Oฏxz›rtˆ5ญ\฿wำk‹;>›!ทวNฬ1ใ;๎cสGpฮ๘ผ์๙ ญ?`ก’ฃ˜๓{ิrฮใŠ:ศ ‡ฌำๆq[๘rษ๙สi<šฅฦqฅใซงqฟ;o๏4ฦๆ]ืOsJcTพ็๎€ฌŸ–?uพG5๑ฐ}†ฉ๎sGซฑฟK—.=+Hะ้ณ#Iม[Q๕+“h’ฬ@ยฅษ—็qพฅwค‡D”„Ue›~V7j’zฬีWษ‘$‹ฤ‹๛f~”*Iฯ‘’ภZ?wž ห b๎ี๓0!…ฐW"e3@}ไ>้DษRฐH* •ๅ๛‘oRอegsฺ๓๏%ฃ8 y’๎ษ9 : ฉFญ)ๅzkๆฏOH๙ฝ@&4Zmแ'wZ"ฺ^ฝ•• ็คไœ๏šฟาyษyฤร๊b!ี%ณ฿ฐ ๒wไีsaก๏i๖Xq๗NโยBT_๐Pฃร8าพข๙็!๊ya!xไอแฒช‰†ฅฐPmAๅbaˆ‡! ๙<ฆ (_A$r.<๔X(‚ฎ2waaHฮ……žœga!หc!ฟ+‡๕@ฮ7Jษy1,$พIษyฤย๊,JกQW)†Lrฒnฤ]ไœrjJจQu.LฆคDBˆrKŒ™:วzอ)#งืœ๔}ห ’œฅœห๘ N@ขQัQบ1œƒ์SŽ.7๗ๆโ๋ปž\ธตOฒŸง>tิy‘vสๆUN/:› จ่l8@’)Gล†คใˆNษ9D›€tธชญฆ%rฮผrˆC%ušuKFbF"จYฟRlคค{UˆDT!%ฉr.ฃฃp)‘$ยe๎์๙žyฉD>%T ๏หC‚๎แd ฅ’Nรq}้yฅฬV๊‹tŽจ‚ฑส!็๛ฃga_‡Šสyฤร๊,‘ta!BฬณฐPxb!็sฃšห  ……๊ฉึ$…ย๐2 &hž9˜6๙2w฿๎๑ะซ๋mลBอWo Z฿|.ฤB‰ำด …ก‚ฮ๗ๆ:y<BN…‡ยB*ญ ๋ !็ฐ{Q,$พ•๓ˆ…UX^Y•S๙‚›ฯ7bIน7D’ฒk+ฟpJ.fOทRk)พ(้(ะ]ิh”oˆ5’]J/9 8D9ศ=ฤž๒–^G< :ท™ณฮ{ šC๔!ๅ|ใ6%๐*}G…คำงฮg‡์ฒ๙@๙8%ๅiช ฒใฯˆpAะqiวPฮ*๎บุzอQต!Vาžืpn๚ก‚Cย้฿‡”ฟ2iถ‘q*ิฯ๕ๅˆ9มk๘ผจ็(๏จ„ๅ์พผจณp๘>—?iUYั๕เใ"9ไผq–z!้จ  'I GสQื9๒˜z3!$ฑ2ฃ,ล‚q6sสฎQภIFI"(฿” คžJ๕›๋ถๆ๒˜’05ญ๏ใ–SปT!$))ๅ˜UๆูšP้zฉKJf{ทvฉD"็$ูๆjจ็ พฟ ฃ|BJ’N2๊KฺUZซลฦ • (~ˆz#็=ฮ๋i›3Y๑‹‘œG<ฌ.Bะ= ๛8๒๗ฅ๛‹–m็fa!!,DE …‡))bฎMห EX๕๗ฯc2Ÿิ5Tt…jช จrZ,ิ&ƒzๆC,๔x่ฑ0kณR=๘เกฦฯ ู์fa!KXศ๏่จีVrพบลBbี"9XX…บJ@่d&†‚N๏ต๕KS’ญฒlFxฅฤ#2”uศ, .ๅีuzน1cƒ\ใฌ ฆิผ‚‰ฆDฒฝๅ_๏7‡๗ๆ^ƒZN้<ไž dž>vJๆ™yGฅGA‡๔SFฯ{Cเ๙Œ๔หฃ ำW†Cš๙n๔ๅทfIAว@Žพnฎ%ีltะ๋oe๎๏Œ4•žส~ีl@ภQว9า*p๛จษ…uu1q–ส9ฏฃๅœ฿_Rส!็D=aกศ๙๏qไษŠ86’๓Hฮieย(‡ฤ“„†ไ“d“ฒฯฐPj‰,๊Š‘ŒILIF1์!‘"9•[ฑR•ojฮ-ษษฉJ9—วค)!UYงt%ค> ็m%่*Ko-9'y๖ จ’ะฐฌ„TŠ‘บ’ZพฟJ95rIง$ฃดp}นึRŠP๏B‚^oJ‘ศ๙^็๖,™0~6 ’๓ˆ‡ƒ‡YX่๑PXH@C,d“ ˜วA tx่ฑะ+่|O_Y๚J"ฎ›k ห!็๋์ฺฝ(ซlษyฤย๖YH#’8ŒS†sJฑ5ฺ+$็~Qv กค\œฑWฐQŠŒ!›'฿ลุ=แ†˜*[o๎upt”t~ฑ)Y‡œซฟrŽ‚Nู<Ÿ #:ๆQฯๅโŽK๚โแญไ๓ฝ็ŒTรแี_๔0ƒ9ศ>ืrN?ๅ้”ฐCพq็บัปฏ™ํ(๚$J!็*k็3Zคฤ<$็užมฟ†YKDVฌท฿"9ไผs$ค2Kb์Lฉ†b(F”า{‰b„๒-ำ๕ษAะIฎPA4V๓คž“‘„qGK๒๒cz”๚่a2*cคๆ”ขbฯqฟตeœY$]U$œJPEุรd”sป-งาvร‘ะฃ‘่cN%๗h฿sŽ‘1€Ht”bT9฿ํ๏'ูฟฌ8 ‘œG]lก>3zํธ[_1UšRdฯ5~ู฿”ํ–๔U๖ปภˆ6็๓RJโ!่๔œ˜ยAฮนอ{Aสืไ๕ฃ1>/dx่ศ‰นฑfiดey๗ณฟฒพ/ฃMภฎ+#ิfๅศ?%๔l@ฬน^rอj?{ุ›vข.‚‘็5(็๓ษa6Qาะย/+๊‰œ๏า7gเ—๋๎{L$็‘œวีl>›’z’!F‘p’„ขžฃtp›Dิ'ฆš๕ซ‘j$f(%œว9$ช$rš๎Rฏžื˜!Oะณ’Qฏ(้6ฏใ็mYJ(}rJฒ้‰9‰จŽ2T๒ช9ฏ•s™ กยq]Eฮ5ืื'ฃ๕NฮpฦŸmC"+๖ํืท,ržŸe9&?๎โŽ4Vpฯ๕Jc\~ลž1!ซ’Xˆb‚…”ทCึี;,'wž็๏^Xจ“Qฮ …'ยBศ3•Dw&Lช=Tฮๅ์ฮะณC๗หมย……`Ÿ๐ะcก*ˆไV_L9ขš{< ฑฐศ๙Z;o[ ‰•7Xท,rxฑฐ~ /†pMTiOTksKDล‚Mi:ๅ็ฅ๓ๅถ}ม๒อ{ ขผ‚็yฒ๑‡คำ‹AGAงข,๓ปrศนๅํšwNฯ>ฤœฒy”pˆ9ฤฮ๕ย๙^มต#คคซ/rxk3˜2ฮชุ<๑ซศ๙NศอฏฯŠu~๒๛Hฮ#9ซล‰dณQRtบ’Q’-อE‘1ท•„‘”‘ิกถ(z๕'ค„œ}๊U๒0•ส$็๗b.ํญMFฝ;=Iฆ’PฏXลผ–๋A๐ฅฌ‘Sพ‰B$๓#’RBK.ำUe W9฿ต๗_ ญa์sล?ห%็{ค๑ลํ๓‰ํฎiผ’ฦriฌ“ŸI๙…˜ฦU),ไ๏< Qฯ……t‘sฐPีDยCa!็เxHฉนF•iณRำ,<.†สนHบ๗็๐*ปฦU‚ฃša^.๚^zHธวC฿oบต๋ฐP=็ยBชˆB< แภAอ ๏H“ฬถ’๓.;mW ‰•ส'็u‡ ๋kQข๚K L—j ‡‘หz)ฏQ)<Wƒฐ๓~<.ณ9Ž!e‘L+ U_%กฒOฉDyฝ}%‰=ใ‚(ใคฯ’kŠjD()elืตีจษ๙N=ู~๗Yฑื%Vฌฌ=]ค1ฤฉDฝsฅฑ}$็qUjQ Aฤ$,dณาใ!Xศ฿ฝสทๅหกว„…๊GGa6:ลB™ร…žค{<๓|•‘๓XจืqไฝสลBt…ฺ\ฑะฯ7๗Xศ |o๐PŽ๗8เƒ…ยร,,ิ?ยBˆzฝ‘๓5wุฎ(+ฎ_9o<ŒXX U˜rqTi”jสส[*O‡LKa็vxช:Jyฉค=์gว`NDRwฤa@๙ฃ๏œ(gyrN0Vqt2ฮ{b๔B ;q๊ฝฏ™jฯตยแž๋ลุ7๚๕้‡คcวธ5อA็3า‹ฎ๑krพpก•ฯSF๑ิ ฆฌ‡๊zญ“๓ํ๚<`-YฑVHฮ#9ซไ…bDยD๙!8ท“Œขq$แ$้B‚ˆŠ”“ศ)!ๅy%ฃ$qJH)๋๔ ฉŽYษฉORŒt%คผลผไœๅ— >ท๚)รR๖œsพ/แ$iW๕ม=๏ 4ลขฎ’N)Fฃf๖7rŽ!$€ฤดษ๙ŽฝN)T„ฑ็ลๆคแผฟ‹ฃ๘ณ๎IใฐํKu;š4Žไ<ฎJใ!ท฿ศa!#xศ฿น6ใ8ส,ๅ<เyฐ|ะจ2๓ดp~roืฆฅฐP3ั…‰a <แ7)=VbฉDŸฯ.SปbX(r.R๏วJ ม?ฬ๖„‡!Rz,df}ฝ‘๓๏๎ฐ}Q,$V\o=N<บ3ใaฤย๚\(ฟ๔œC€[ฃ„W;รFO:ๅ๖Œ^c๓`>ิrืU6In๙VW ฬเก๖ฬyVšฉฆt…bŽJŽป=ส=sุ!ๆ๔Wsคgยฮyจ็(๎”บฃฆฃบCฬ Tuˆ:ไœkจ๊ๆ:?eœE=‘๓๎go†}Yัeฏ฿Erษy\ฅ.f“8กl rh:ษ(Iฉ๏A'๑R‰ปJบU๎ฉ๒pฉE”ธSึIIY'e๎$ฆ$”JLรไ”#&คต‰sต๕๏•๓t KctF์๏ฮ9-฿c๙๙ห2’ัƒ"9ซ’‹~็ ูtำธIอ๏QW™;$]ญ@j*๓พร ๓ฤม?๕กƒijำ!ดq้C›˜ มMแ!Š|นXศ๒X่ซ ฒฐPŠนฐPp-ธ6ยยว2 ไr||า๕/Mฟฆ€‡ !็๕„‡๓5ถพ(+ฌป^Rย๛44F,ฌฯ…โKษ8eํ”ชทU๑ฎF ภฃžCะ๙|จ็šง].AฟqีM’{VฬฦชัkQฦฬqi”ณCศqด' ๆจไ\ฎีŽศ™ีAF้ณ†คC๙Lz^ƒสN`Gพศ9l0@ฑ'๊‰œoท{ํw‘k๎~T$็‘œวU๊b9eํ#fไ&ˆ:J‡OH!็จ!"โ~|˜\Œฅ@C`5X}—2Š“r&ฆRt[$็C๐EฬๅฐN\.I็3‡ษhุWฉ๓d~คkเ็๙BศI่9R…0xlฮฉ ตˆkLBŠ[1๎ํ$ค”rvฤ*‡œo๛็S›T๘๘Q฿‹ส.kOืi<“ฦ๒IS๓ฃXึWี๑PXI …‡ดฎ@ส้งึฆ%x่สU๊1ำt aก9 S-ฒ๐พ฿ธไ6๑^ba9x(ŒSวะsร›ภ๙๙ๆj๓แ !็ยCซ Qะ!่\็ ๋ !็ซoป}Q,$J!็Ž‡ ๋sQฺนd|ู†ผฃIOx)ไนฺ :ฃื ่จ็|F ิsฬืˆrฃิ่งฯœrvFถAจ!ไ”ฏCศQษ!ๅ”ีำพๅ_๏ทเ6jพH:ฅ๏:W๎ๅผDS9H:๐จ๓,6ˆ:ห ‡w;›CŸ฿ไ<’๓ธZŸขVPZHโDRz˜œโกD5„€ˆzG^Ž"็Jุd’D"งEF กษ I‰ฉ’ะ0I๕๊:มsผŽื๓>ถs?H ฉJPUพฉP2~/Rฟ9ABJ๙+„œ ฅ,– 9ฅด“๋*c$RL˜อ\oไ|ซžmbฌ็c—๓_น†p{ฅ๑zซo †pqUc…”Z ูdมB)รP๎๓ทb!Jฒ'h`~บŒ3ณ๐2ิTต‘วDnkฃ,ดูไm$็แH4๎Cศ=†f˜ž˜‡๓อ=ข–ƒเ!ื๋x฿ปM=ฯ”๋ ๎ ๋ !็฿้พCQ,$พนNyไผ๐0ba.FชA(ป€•’WJ๕ฎฤ๛ ฮc‡9็8ธC)o 9Ÿีฏ—r‚…ฝๆ๔ŠCž)Gงœ ศ8*9q~6 พ๚เ1ซใ6A‡ศณภy”แ๓9y ’OPฮ{๓3(g:Qoไ|หS๏ฒ+Yฑฦnฟไ<’๓ธZณ–,}ุส9็|›)$ฅ$R$Uจ็จๆ )‰ *I!c$RŸธฉฌO Ÿฦ Aชฅ))๕ษจR%ช %ฃ”q๒z…„ฅ )ค€๛Eฒ้“Q>K=ขž”+ฒ’Q™ใกšฃ–i๓BฤœM n“˜rDA’Šj‘/ใฌ7rํO-”ท†ฑำน—”Kฮ ๔~#๓qeาดด๓ํ่ ต ภ๋w… >นฃ€…‚‡`!„œ ี“6‚…rsz๔x(_•บƒiยรจ{<ิcRอyMˆ…Yx๖†x(uไๅ๗อŽอ สึฅ’Cด!เ”ื๛y์ฦgymจGž}cำCช;JบJ๙|Nขศ๙ๆ'฿Uจc๕ฆ,ržฎ•าx$ท๒ว›ูะ|3งบวor8๚วใkงฑ( c#9ซƒืcึ{Ž[ฑส:)C๔ฦH"ๆr0ฆ'$Œ„ ลˆคิ๗-ŠศR๒ศ‘DฯtWฺฉคิ'งJ<}‚๊Us%ข$ตDKไœŸ็g–k<š>ซ–ส๑}2J๘อs”"ฉๆ นต“ˆRKrŠZไ]Qฮ•ˆาg‰K1Qoไ|‹ใOoฒแc๛ฟ_Z1ท๖zŽˆ‡๕ปภB6,-นปPb!๘'Gw มฐP$]ใ=:mV QัUU$\ 7.…‡Eโฅœ+xŸ–ศนวBLjI้z8ฟQ้{อUึO นfฺฌ……๔๓ มBT๓Žฤยrศ๙ทถฑ(__ซŠHฮ#6Rz †Xศ฿=ุ',ิ&&jบ94^M=ูยC…Rจๅฯัa1PแIธ๐PุnTz,Y—#;กŠ'B•Pฺค๔› ~ถนวCฐยBR๓†pยC0,ไ:ƒ…๕„‡๓URr^ ‰ฏuู ’๓ˆ…uฝ(ต&(้ฆผ๑jจ่กชำKMฟ5GC๎ศQkl Tณq ฒฮฦ฿lพม๗ก฿ร6‚นไ|Oพฯฃบฃธ9ั^ว{๐ุ˜`B*ys}๔œ็K๘9ฒฮ๋x=Ÿ>vœเู8€˜ืkY๛ฦว฿f›"Y๑ญไคซฺ:V2]ณƒ๛ณ2ฮ98๎แŒ ฮูลว<9_ฦˆ4žLc็Hฮใชต๐NSะIJQŽHžHคP;d๊#w0O๎หษXๆHRึEาU๖Nb๊ห=e<ิ$!%$‘t‰ฉศzMHx5…D4ŸŒ† จB๓{์^%RT’*RŽZฎอoG.ำ<ŸrอHๆQPŠค‘„ชฟŸพVบz#็uFa&|[žฺ/’๓ˆ‡๕…K.`!ส.xbกฦM†Xจ–aก&^@`9 ม_Bฎ2s‘g๊=.f’pa รAณฐPฝ็~lค6$U1ไหุ=1 M๔๐ฆyยB6- ็`!ๆ^5ฑฐž๐rพ๒–;ลBโซkFrฑฐพ„‘€ดขCf!ฎsH%„—j0ค‚‰mญ๊]I‚ฮ{B~!้tสQบ)EGฅ–ฃ:สบ\ี้ๆ;Q!ภcl8๐ฝธO9;๗QนQห!๔อ—22.ซ:€ืAุi Š€Œ์˜‡ฮfภิู๓-๊œop์-#ผ0V๑ˆ•๓ๆฦJ–Hฮษ ็—็\‘ฦI๎>ฆš+็oo•Wีฟษy\ตณา„t้าGMฝ ๏\๊นtฤ‘|ส•—วคฆ‹œ+1Uขๆ‹’ไI-’z๎ฦจ{โ&ขพทRชP%H“&5#kTšT!‘๑P!โs“Œ๒]”ŒŠœหฝ™k#eอ“s‰นžWY)rพัoฯl๒๛๒ฑล)‘œGณศ9Ÿ๗ีŒ๖:ฯ ‡ฏ‡››\Sซ์๐ซ/kOืำ˜šฦw›๙9Oคฑu$็qีTBŠZ„ฅœ(พ˜๖pค฿C๕ส ฮ—ต‹œkฬR_ึษm9ปkฐHณ’ฦ0AYWยู’*.e ฝqYV_นpะ‡๐Pค\Xจถ…\#…TbกšS…ะx9_q๓]Šb!ฑw7Œไ_๊v”E%ศ9›Dไ†รื=๚F๋ฃฯŠ•ถ;ผ\r70„ป ใœ/ๆวJฎใ แ6Mš:น?ผfUกฬษMย>’๓ธjg}๒ %ค”ทc\†Z„ยAB%๕\ชฐJ:e๚ฃ$ิซE*็๔ๅํษษŸ#?ฦL‰ฃ’TOิ•pz2.%จ˜2๎ืี฿ชD๊-“ะๆˆ9฿อ“s™‘bง„”2N’zฦ2@ฮื๛ี฿›*y.บ๖์ษyฤร†ยBLสภร Uถ-/•ผƒยC?]xbก*uภ#…>„‡ยอaก฿” 9ส SฝๅRษ=!๗ีC!1็ป๑=น\ฎ< ฉ ’jxH"ทย๗v)Š…ฤ๒kDrฑฐณท1Ecœ}ู”ทSาฎ™฿„ๆxCฮQาฝฒŒ:^‰ัiๅ„ZๅไlpAGมF!็>๊ธ'แl*๐บJ}U @ฮๅุไ|ฃ†ชยXiฒษ๙สi<šฅ๖จtบVOใ~wiŒอปถŸผว๕iJน.ซlS ‰7฿$œฤ“๏Žแ (G๎ซ฿œ2Nˆ9๑๑ง$Ÿ.}ฤขnษ๙!ฝ›ฬž๗ฑๅัqฮyฤรฦยB;ฆx๘๖œฆXจMKa!๏`ก6๊ภBŽยBแก฿ฌ EาEิ……!†›—~ำcกึ…พ…Gค;ฦ{ ,Fฦฦคน7ริฦ„วBa pQXศh:ˆ9#>Qฯไผ•œG,lœ5v๊\#่ฤ#cงู์oH:sน1‰;bศKึ{Ž)ณภ1ˆรygwBฮ๎(๋จ้rFง„<ซ\œํEะร@ี‡ฌี6ฏSyฝ ำธv๔๒๕JฮW๙V-‘฿ุ๒เHฮ#9ซี ้ไหrI)1๑โ$Ypปอž•}ำ*๋D="9ๅ1‚$LฃrDะๅ๎.rฎlก‚ฎRqtE˜œJARx๖‡%์~NนWว}R*R.ณ7…ส๒U–สw@!’BnLpT• HH™oฮ๕lrพ้ง'ปžr_fl›ห"9xุXXศJ๑,ๅฐPx(„ธ๓๗ฏ–แกฐP ปoัtแ !‚๎qสใกˆบ/qP$พ%,ฬ"็ก๑ฅWษตA)CPฉๅ”๗ktคˆ9฿›ฺภ qฝ‡œ ๋œฏฒัŽEฑ๘ฺwึไ๑ฎไฐA/Xิ+9๖ZEDV|{๛Grษy\e'งSฎ0ตHc„Hจ่น”j„š.EไK‰' D•คT้๊GWR๊#%‚๊EWฅOL}นง/{Wโฉไ3์ฅ๔ฃ€|ฒซฒM%ฅ^ลW2J๐Yฅฉoิ“s’O_ฮฮ}QฎJ ;ืd”ไๅp๏๒๎7-ร๐พ”]ธ็7(ณf—{bฮ็”b.๔}๖T จbˆk S8sฎื ๅ|ฺขk k ห!็฿ฺ`‡ขXH|[‘œG,lฌ๕ปิsศ9sฯ!็˜รAฮ้A‡„Cฺ๕_๎mท!่๊M‡ŒขotยVึ YGEฆ๗[ส5ไผฃfฃWส™ฝฅžwศ9=๙๔ํsˆz%็ซ์{พ๙ dลW7'’๓HฮใชHBš&SŒbไ ŠA ๑$!%ัโHBช๑B‰ หM.ฦJJ_Ÿjฤˆณ’R฿Ÿฎ$U$๗e๚Qh>D๐•ˆ๚ž๗,ีR"*ƒ'‘s๕W’|ซ”]฿] ;ืŠ๋6cั€บ/ใ9ฝ’ฝxgf์x่ล‘œGs%ข2t"4รœ๏&ฅH›IFQ‰ธf\'”6ฬ๕H๎‰zMFEฮป๕85้q์™ฑำฯ/Šไ<โaCฎ,,†X(s8a!ส1ธ ?rชH/x#,๔UEยCฟ1)< ๗mAa้บฟ/B.,ไy?‘"tcืศHa Uาฎ{a วBยc!๓?พญ๎๑r๕ถ/Š…D$็ q˜8ซ0ZฑjJิrสูญ้F)W9;D๓8ิv๚>†ฮƒœcGฉ;] :%๎WˆฌLฺฒิ์Ž2ซ9็๛1fฌสู+AฮWใฬยxธ0พฒัž‘œGrWูฤœžห”`Rrˆห๘ไWฬ||”zฃ8ˆธ”eฉ(R‘Hโ4']&q๊ป$DE˜ฝƒฑ’Q%ฎกฒžๅผ๎หึU ช„SI/๋1… ผู‘ๆ๗ชท•HfGŠไ'ค$ฃ”ยึJ9{%ศ๙6ปŸ’์๛ป2cืƒษyฤร†ฤร !ใ` ๗C,ภBอ>'T๖.GwฟY ึศtRX(๒bก*‰ย๔}™บ'๘ยB)ใlD๊y™ujป7ม1=สS*น๏ร๊P๛ไำ๋!็ซญณ}Q,$พนJ$็ kอ˜ป =yŽ‘๓ก#'Zi๛EOฟmส8$ใ7ˆท๚อ)k็y๕จc"Y‡ฬฃฒCฬ7?%7}อ#ิsZ9W6ฃฮPัErQื9ฏRe่ี.gW%วีัฯฎDฝ“๓o๎๖ื&3ใ}|yƒGrษy\!็S๛Y฿9ฤฅƒ>A’OR‚„T%$d$h$ฅ$p$n$hจHฏฦใ^1"๑“‚๎ว ))ีQ ฉส,}R&กJdEภฝ3ผqฉDJ|ฝป/๔3ฅ๑ุpะx ๕แ๛1K๊5์ฝ ุdUyํ฿ˆณฬ “สŒ S‹Šโฬ$ ฦจ1FอMโๆ&&1fPฃซcŒC&CLTิ˜˜DMะ\gลฤ๋จQEihฆ่ฆ™้s๗oื^งืykื๗Uw}Mw๏zžี็ิฉSงNีWต๚]gฝยง(;Dwl๎โฤ?่ฮ|๑9U<๙Yฤyใร-Sœ.TS8็BmƒUw-.Tํตjฯแ]ฌŒูD๐”.FzVบนซO†sกgE๔žโU ้\่M0knน‹rธP|ศxOๅ<โB@ฺฟาูiŠ™งUlๆ|˜ล๙~ศ…`วhโผqแ'ฮ/นveNmg๖๙9\™ฤแŠ๏ฯ ‹q„7BqชsฟŸ…9ฮ9#ะนณNw๗Gฟแsน็œฦpˆsj”w|ฺkzฎtv–=ฤป ^„<ฯ[clHฮ{โ‚z{๊๔๙Lภๆ.ฮwxส+๓฿ญ†๛๔ิ&ฮ›8oท็7| ๋n็ฎปใSน6ši Qฅr*“%A™:๕jค:Kดห=WอขR:Uw้ต่ฏ‡๔ิOฅgฦเ3>_มญq -ตM฿Œช.ิ]" FO)งฦPฏคš|ีWา•๘žไอ*ฮ๛”฿๋~๖|ธŠง๙ถ&ฮn™โ8บ์1Uธาิ tฅฎซฑ‘Ci›๊ฐฌ คบJี˜rŸ๗ฯล ฅณSฃJ์=)…sVq~ย ฏ่ž๗๓\ล)งพe&qžn”pAยw>—ฐท=๖๊„K~”pJ HmSrกา#๒๛F.ฤ=w.:บ›kาฃะž$ะฝห๚|\(>” —(ืลHe ๙…]LPSL]œ"ฮีsƒฯ„ฯG\Hึม–ภ‡ˆ๓‡>์ัน์ผ๓3‰๓-n™7s:๓ฯฟ~๙ŠœโŽ@Gˆ#ะ•ช-a๙‹ฏํา๋r9jี๒t\vs8b•ฺsา5j Ž@GธใŽหQว๕บt9าบf|Zwฎ,–ธœฏคฑ \œ[‚8W=} ๗็๑3‰๓t%แ๓ —”ๅฮ๖{_ยต ฿›๖๙“Kทยค—ฝฅ[s฿ๅ@4;Fคvฆ Š ”ไ]ชทฤ!‘KN€vaIsgษvี!๚ธ!„ฎ๊ั=(•{คฦqž๊้ฎ๗บ0ŸTC.Q๎มจ;CJื”+คYฝ๊ฮค%ฦ๕^5๏˜ภ—aฮE šeงh ็Ox๏tฯนชโ้งผyVqพƒญVย;ห๚a ็'/a„% ถ€ด6fž๘0r!ๆเC2hœ ฎ๐! ท\ฐ„[ฤ…pRฌIG +ๅ\ฝ:œ %ฦ=[ศyP\ู่Bฮ…rษยค7|ำ๙i]\ศล็CqกฒซเCธืœ”๖œึพ…ˆ๓‡=๔ัน์2ป8฿์๙ฐqแ–yปโง7gaฎw:"ั ิ,q‰‹ฎฮ๎:x๋ฟ_’g:;ฃืชคถ3V์๐฿Dwฤ๏Ÿ›—ธ็ผ๔œ\“๎ ใๅฑ9B5u›๕๙œโ›4}.(ธ@ฌ๓>xฯิ็ƒ-Aœk<\ ๗~ุ๑ณŠ๓ท&ผชฌฟ*แ-๖{bยฑq^}ฦๆาFภํถpA้ฅo๎ƒา5?๙‹{7‘ ม.;ํื-งlฎ|ุธpหฝ)ฝ‚ซnฬKฤ7๙ท~rC฿4C #ิฝ.qฮบFซ=๏ฟ‘ล8ฎ9 ›:tึๆฌ„::ข(ๅ}!Sี^ี'5‹“#ฌ”uึqหqว•ขิเNŽ9sqaจืถOk็sล๙}Žฅ๙ิฐmโสล9ฎ๖^e}/๎ฯฑ๏~q^}ฦๆาFภํถฐA้า?หik.(ฝณRฺ"˜R;•า้.บ‚6–'จSP*๗\๕็ Lีี]u—^“๎nบ\!AbMนCŽน‚QฅฎหR@ช Tณ‹5ำ| บyŸjล๛งถ’ฦG8EฃนS;hๆ[€8๖#8น๛ลgCว๑Bv:‡ใ^ฒŽฏ๑ว ห ื„—m๐B็ฝ ฯiโผ6"ฮŠ 5V „ธฑ ?ภ‡r_sม u‘PNv,๙‘ใน่พ7vชทF„ธะำึแCนไJฝŠฟ5BS!๔>U‡/>p!โ|ี9Y˜oๆโ;๏๐ฐ๎gผงส…ฯ9๙/ปv:€_ฒ5๓aใย-๛ถ4 ๔หฎ[™tD95่฿_~S›ธไคฑ“พ.GQN <#ๆ้๔ฮ>Œ#ัŠ˜Eุ*ฅQŽP—8GไjDบzn๛่|8gีอ‡\t=N†ภใ฿|^/ฬทq~๖ถœ4๑sf๛‡ฐำึ— ำํฦp†u็ี็ol.mn "ส—8็ห6ปEPœี „‚U‚Pอ—•`vJ๋”kคิrŸ‹๋#ี4ฮ]!‡าุฝั›\(]Cฎ”m๎–{ ช ิฟI”€k๎;ŸŸ ้์นถ’R4‚ป๋s›}0Zˆ๋^ป๎ด๗œSj,…คฌฆ}ถ›็_(fฤ3+Nัส๚;*๚์&ฮm“qaแรศ…uธP+ล…p…๘๎>~Q\(=Štqกเ\(>ŒะcบH)t.T? ]”ิ…I88f น(๗r‹r5 ๅ๓Pญ๙€ ืŒ–[๓ˆ็v~ไ‹ชโะO๊žtovSp๊อ‡ ท์ย๒๋WuKาQŽƒฮNC8„79๎9ยœ๔w๖!žพsล YดSwNปœrjฮฑZgR5jอทK๐*}ปผฏk๊:8ยqŽ่VธnีŠ{ส:y=๓†vผ8X*m_s1<๙/#g€อ<6<`›ํั๛จ_๛\ท=๘๔n›๗™‰ 7 8฿จ\ฺธ6 ฝ๔อฃ:Kา+ฯสม)ม(5ี`ย”ํคvฌฉฦ’ภT/,ฎž„/AŸDบœt uฅxช&;ฎหI๒วตŽร>อ…น‚QถˆบKŽฃฅ‘ijt$‡Hอ฿xŸิUฒไ3Qำฃ~žฏR8qrึมๆKภเ‰zYWsฯqอ=์็บnแา8๗มถด๖vปงqaซF&LเB\b็BSส;|Xใย KF‘DบืขรO้บh/\z/Zถ‘๘ะ^Šี_C\จ ”<&1..๐ตFj๊"ฅOจเb„–jนฐ/ Hุฬาํk๎9ฎ๙ฎ#ื^[;6.ฒo8ๆิžเ๊!h ว,sFซIœ“๊ฮzRโฏน๑ๆ „๚ษ๏๘J๎ฺp”™~๔~* qDธjดUƒฮ:"Wฉใfํ‡`ž”๊ฎัl“fซ›บD9วี… ‘ 87‰sึyLฉ๙\`8๒•Ÿ์Eธ๊็Iูผ?า๙qฮ๙ฌภๆnณหม]อ=/ฎ๙ั3๒_Kkonc้E4 ฆ–ํจาš๓๚ิm/0j ฅฌ”แข#ZqP„z'c:R; ๒$ิ•ฉztO๓๔FIJ๓Œ)๋ตQ@.ศ•บ้Aจ:+8• —;ค&Gๆrฬ%ฬyŸ8eฃD9ฃ–pีrŸ>ณ<;oฦป็'ฟ}]๓)Ž}ฐญ&)๒e๐ะดใฒึฎ6)๐QัšQZปธ฿ฟ.ะqก.โE.ฒMต่~ัnr.t>Œ็GŸHแŒย\|่\ศบ>“๒นn๎iอ=?dงrอท>l\ธe฿โˆ๋Kฎ]™ืี$Ž™ๆt!งแฦpŒ^[žฤ8ธaี๊nี๊[บ[oนฅ[ฑru1ฯ๓ฌฬ@งQœ๊อ%xqขY"v๋4‹“XGณDT#ฒ!gฝVK๎ย].8B\ขœใใไใ€ซ›ผึy-„9๛pn€๓:๊ีŸฬ5ๅ\t@ˆsั‹ l็๑z^9๗=j˜๘์6๓ุpฬ=Ÿึ5Ÿโุo บŽโผ๚อฅ€maา ^7 ฆpŠR0*g˜  G„Y่€๛ิ๒ธ$‚RนDทฆuvyชมิ˜!5ช‰uฅzบ`w!๎ฮกžž)'Huไช…'8>ฟสฑฎœs&˜ึ^] ะล –ฝ0'“Žฤคpช$€†Rเฏอุ X๎๙กๆž/”kžn)iLŒ๚DยC์ฑื”Nš\}๚=U˜7>Jธ™~ืฮ…p ๘0r!|กน่ฮ…p‹šจม1บh้ฃแ)็B็CฏMw>t.ิc๗ใj๊&.Œb\\xaเC็ยX[..”kžbฎYห…y๓แ&ๆยH๎๙Bบๆ[6.ฒoJMG˜/))๎ˆu„๖3ฯz๊ะNM๚7฿าถ๒†๎ถ›Wfq~}ๆj,‡hGจใฌyษ๕Yะ“๖ิท9/q ธ8๋แKงsDฃ฿๐น์Tซกœ„บnี…ใšซ™[Lm็พf‘๓|ŽใBœๅฑฏt~}ฮฐNJ>็ฦค?๗,ศน8Aj?๏Ÿ™๎ฏ๙ิE๙ย!9๏W๛\{0\ฌุ\นฐๆž/„k^๘nื„/–Qh,w)๗N๘”ํ๗ม„ๅ w&\‘๐โนžฟฑนดpป-l@๚ฝ7Œาูiไ“O‚NFๅ€๋ถsGฉŠ,๏B/ิ X J%าqะีลX|นชห”หฎวีd-:์rู•ฉPoๆฆฏ+ุ$ภtศ๙แ๕u.๊บฌัhjzง ” ›ภ“ฯFG|&}Gb„๙Ÿ๊ปด3r)งติ๐›ฝ8w๗|ก\๓- ท|.ฬ)ูซฮ็B~๏๐aเBฅธ‹ แŸ‹^ใB๑กsก„ฒgมmโ:–โมศ…฿ช๗‹œ—ฮG\่|(.Tึ|นฐ็รึNฌศ‡เžร‡๗.\>t๗|ก\๓ฦ…ํถ9าsร!ดW&ม`วฆ้์ิŸใš#ฺIcG€‚koZ]uรอฝภGœ๓|ฦดฑ Nท๗ฟส’\ปNgwฤ,B—Tyyถแ@๊‡ฟ“ำฤพˆyœi {ผฤ4ย™%๛!๎ะฬZG4ใฺs|ŽIš9็ฏzp๎ณไœ8Wฮ๗อE›ฦศ๑~ir็อ๏4FŽŽ๕:oฮฑฮqูN-พ>›อ™ =_(ื|KB#เv[ุ€” ŠDๆœ“ž˜ฤ'Ag l+๓ฯ๓cธ#i;Aš‚Qyxเะ‘ชิxo,ง`5ฑ TๅถPบpWบRั}ำาc๓"น?›:oน]W ฉฦฃˆๆ:J„๘š๓๒}O๎Hฬ็@ Jm*ต๚ฃ4@ข,เโ7m๖โ๓…ฎ5oiปm\จ9็‘ s_ ๑$\˜„)œoภ#5.D๔F.็B vํึโCน๎‘ ็[Vsก๓aไB๑กsก_คt.TjๆCใย๎ฆญๅร{.P@š๓g๘ถฏ5o\ุn๗ไ‚’4voฅฌใ#Xqสณฦใnœ๕^œฏพ9oใyˆt‰\๖ๅ9ˆ}ึา๋ฒุEิ โ%ืๆTp0‚‘+กŽ[เฦฝgŒภgด`7›%๎<šใ๑๚t›WS;ีำ+UŸ๓W*>ฮ?X}หญ๙ข๓ฮ๑x๊`x/ˆv\ฤเ= ๚ษ: ณ`sๆยฮ๓…rอ›8onต`๔[ฏฅโฅ *œ ถŒr_A๊ฃ๚B9็\‚6u4W*จRม ๊y๋O$G*ฉ•Jฑ๔ๆJ5ศ๒tL9A—ืPเฉ๓๐ฦN D  u}บf‚าX๓ถ[?ข๙3"Sฮ๙eoู"ฤน๓F\๓ฦ‡[^๑๖.ฬศำถž 7ภ…ธหpJไB‰๖ศ…,ๅถ;j:†.`z6’๘p.๔Fnฮ…ฮ‡ฮ…ฮ‡ฮ…p^ฯ‡ฦ…z,๓กsก„9Ÿี=€ Šqฯ๔€škธpซน‘‚ŽGH#ดYGh/+ยQ‹Hๅ>ย ฮดˆP\sึoปiEธ€cฑ?S—ฮ1ถqwฃู†๐Eิ2†Œวผค‘;pูโ๊pชŽƒฃฏzyj็—– ผภyq>:ืบผป}ล•ํ7\ำ t?๏็๓U‹ะ—Pื..pQฐฮ9๑ยžืน๗|ั}ทoฎ๙ฆ็a~็ช„หร ป{2.o็ูฮu+>ื๋ˆž’๐Fบc|x[๛-ดsm็บูœ็u ๐๛฿>แOškธฐk;ื.๚ šญตุp;็ๅ๑ญอ่?oต๓l็ฺฮตก}ฟฺนถsm6ดฟY;ืvฎํ\š8o_๊๖™ถsmาvฎํ\น6.l\ุฮตk;ืฦ‡Mœท/u;ฯvฎํ\ฺ๗ซk;ืvž ํwะฮตk;ื†-Mœฟdsช‰j็ูฮตkC๛~ตsm็ฺ?lhณvฎํ\น6lqโผกกกกกกกกกกกกกกก‰๓††††††††††††††&ฮš8ohhhhhhhhhhhhhhโผกกกกกกกกกกกกกกก‰๓††††††††††††††&ฮš8oBCCCCCCCCCCCCCC็ Mœ7444444444444444qะะะะะะะะะะะะะะฤyCCCCCCCCCCCCCCC็ [๙—qัข๗'ฐrfุ—e๛/…ํO. l฿ฏlvุพ[ย —ทฯปกกกqaใย†††ฦ… š8oh˜Lย?J๘ˆmปwย• —VH๘๏V$| sฌ#l๛o•m„6.lhhh\ุธฐก‰๓†ฉI้•…€V๒xZู~ฏ„W%,)$๔/ ปุ๓5แ๊„›พœpธ=vZยEๅ˜๛๗์ฑ_-d๗ำ„'์m๑ฯฏ%\’pCย;ถู$งๅw.ฮH๘tยWœ„ำํๅ=|น๊๙จ ฟ6แmถ[ ฏi$ะะธฐqaใย††ฦ… 64q0-!’ฐLDXˆๅภฒ๒„Lxhย•๐A{๎ฏ$l_#๕็ป๖ุ๒„'”๕Ž-๋OMธž๛ๅygAเ„ฯMุ)aŸ„๋Np๎ฟpใุg~cยปWูฦ0ฯฏ๐‹ส{ู6แ U!แสgศ>(‘ุHธกกqaใยฦ…  6.lhโผaZ>(แฺB๗ @WKหฝ๎$ีงrœ !ํX๎$แฅ ;„›๐Vปฟ]9ๆ~Fย'ุใใซ6ภRH๘„„ฏsฮ ื$< Bย_เ?˜ฒ๒Ÿย} ฿ป์wJย›หีัFย  6.lhh\ุธฐqaC็ ๋DJฟPศ‡tกูีา[V†ซŽท%<ค\ |sImา>]]=.แcๅ˜‘p|ูNŠะo„ื'่๑FยEย$\ึIฃz9”๛= งร๎ๆฝ„TฆŸฉ๐/r๕8aiยพ„6.l\ุะะธฐqaใย†&ฮึ—œv(D๒rG"วสพ/*WP๗ง๖วฎ๖ปOย๏3แ ้ƒ*WHง"แt{Aยอs`Ÿ)H๘๕ kžT!แ?,็sตsh…„T3๚Ryฌ‘pCCใยฦ… 6.l\ุะฤyร:ี=ติ๙7แ}Ty ๒wฎ๘•๛NxfYuj‰ qC@#-วyฅ2ฝX„D:TI:บผๆ!พP[ด1ฏ๎Rฮi› ฐ๔ž†3nOุีIธ์(ปBHธกกqaใยฦ…  6.lhโผajB:2แ%-็งฅ้†า—่ส๙ŠrฅtUIUz“ี}ฌl_Zาwœ„?SR—ธj๘อP/๔kๅXzฝ‡n*ฎ<–I8แฑ%U๋ม•}พŸ๐ฒHยaŸFย  6.lhh\ุธฐqaC็ Mœ744444444444444qะะะะะะะะะะะะะะะฤyCCCCCCCCCCCCCC็ [ั,Zด˜ฆ+ํณhhhุสน๐ ฟฅŽำ [1#ฺgฑ‰ล๙ฎป๎ฺ-^ผxƒโศw่Žฺu‡๎˜ท๏Žูc‡XpAถhุs‡๎ุฝw์Ž}ศNyyฬ^`‡ผฝ_ฒZ?๖a;u‹พ{ท๘„vํŽg็ถsพฟ๘เw‹ณ[ˆฝF๛ธฺ๋๛‡๔8ห#ึ->๚€n๑1ฆ๓๘วœ๎งm|่่๑ฃ๖Iุท[|ฤCFฯ?dัkธ[ทx]F๋พsI}่N๙œyo๙ฝค๗ฦ๙ๆใต฿่˜‡๏=z็sะnฃ็sŒp,ว็ุœฯโt^G๏?บฯ>์ฯs๕ู์1~ซผกฟฟ็‡ฮฝ‡ฯษํ1z]CUนว่oฉ๗เ๏%}>ว{`w,๏™ฯŸ๗ j฿ปสgqHy-_๖็ฐ๛ฺ๛๚Œภกๅx/้อ๒wฎ|๔ญ๏8v๙ปฝื่๑ฃw>Že‡›่๏'4.o—Œš‘ 0>๔฿|็โย1n› ๗~ŽโBqŸฯ๚๐กSโร1.วk3†]}|x๑<ึžํ&ฟๆั๖šเจฃ้ŽTฟใฏ7i{<ฃร9๘฿ทีฆ ๎ Evงุฐqเ&็|Iึ๗๖๕#ฯ่พ๛ุ3ป wf^ž™oหŸjwใKO์n~yยo>mlน๚๗Oษ๐๛ทฝ้™ํณmo๙™๎ึ7œั๒บำป[๐้=Vฟ๒ิ๎–ืž–๗น๛c/๎๎ศ/uwพ๗๙ํg=ทปํฯ๎๎x็๓บป?๑ซ_๚ํ๎๎/fwื?ฝ(?ฦ>wว๏tkพ๕šnอ๗ะญ๙๎แฏอ๛ฒ\ณ๔ฯบ5Wผฝ[๓ร7Ž๖Yทškฯฮ๗y๎Ÿ๕๎๎Oพดป๓/ฬวหx บป>๔‹Ÿต๎๎ฏ่๘ ฺŸ๓แ=๑Vฏปีฏ8)ฟ/ฮ}อๅo๋บีํบ;>ีญนแŸYwวู?Ÿห’็fคu^7Ÿืฏ๋ึ\๓ฎ|^ผ๏ฏ๔|s~๏๗_ƒต๏“็]๔G#่8ภ็=ณไณเ}|ใีk๑Ÿฏ‚m์รพฺ?m็59ฏ9ภ:ไ็้9œฟHŸแš‹฿ิuซฮI฿จ๓บ๎ฮฯtk.{Kw๗ฟJืnwหศวะนๆc๋˜:>Žอ>œ;ŸAzO9๐wN‡[฿xfพฟๅ๏[[Pป๑žฯ฿œฟŠ_9นปๆ…งไ฿ลี/85฿ฟ๖Oษ๋lป๒yO๏ฎx๎ำปหฮ8=c้ฯœ-{๖iOžu์๙ญY๓ฟ“‚ะํี=kั S`ฺˆwa๘ะน๐›‹ŸQๅCŽ=ฎ๚!๚v๑แ4\ˆ ๓oก๐กsa.>4.ฬฟ๑กsแฅo๎๙ะน฿aฯ‡‘ หo+r!็ž฿oไย›>”๙0rแ€ แ2๘*r!็ล9D.ิ{ื‰ ล{ฮ…็ยศƒ5.ิ็8V8q์y {>Œ\8ธะk\ศž ูท๐a ล‡ำp!฿a๎‘ ฏ๛ฅ“๓ถศ…n".โš?tัv๋ื=|ัŽ]sฯ† o]}๓ท’๗นmี#ผฒ฿๏–[oอX}หฺฆc๘ใซV฿าC๛ l[™pใอฃฅฐสึyฬแ๛๘๑uLG>ๅœ๕nXตzืื๕ฺ+Vฎฮธ>แฺ›Fธๆฦ›3xฯ๛หำ๖e+Vu—_ฟชป์บ•ฌ ฺvี 7g,M๛.I๗xอM๗ฎบฉปเชป๏\qC๗ญŸŒ๐ƒซoสฑี_Ÿ๑๏—^ื}๑’kปฯ_|m๗‰‹ฎ๎>๖ๅฝ๐ชผd›๐ฉ\๗ีŽ๓ๅ%ฃc~๒=x-ฮมก๓as๒ัy]|อส๎’k‡๏“๗ดิ>ึฏ๘้อ๙๓ี฿…๏SF๚nีnlg฿U๖๗โ๓ฃ••ฟ}wŸ็๛ญ๏h๎7ฝ&ะ๗p_ฏฟข|7๘๋๏ฏ๏€ ฑh๛๎ฤEปu{-บว{xใม-\œ๓ํ /9)œ‡ t๖่ฏํCะvŸ>k$ฮำฒFฃ%(ะ}๓ เ€%ขGŽ“ƒืจๆเ…เ5"5QูX%v)ะษ&๛)XIn„^}ู/K9เK๛ณ็ ภš๗ฤsณp$0Ddณ”0ไBC:‚ฉผTฐษ) [s{๓๙ไ ืไZœŸQŽœ?เ๙œ ะๆu”*P\ G‘๎Ÿ‚Y \* ๅผย9ล`ดูOฯั{A(ฐ็|ๅY‰ีน๋Vห่๏3!ๅ6ธXข๓+๋๙ตKะูฃ๚๓น–c๓ทส฿ร๔๗’O๏๘›Ÿห_]Lš”f!•พใ3 rภํง๓คผN ส๏… ๔วgŽ‚Qขฌ/@0บอม)}๓ขใปw,zbG`ฺ๓'ฮโC็ยฬƒโCใBŽqก๓aแB8N|8เBxฏ๐แ€  %@ห๏ฝ็B๖ฒ}ภ‡โBŽ%ฮ‰\ศy'>ใยŸลh>0.„๓zไB๑a +ผ“฿c ]่;j{ํbฅธPแ\X๘ผ็:ฮC|ฯษ๘pp฿/V๊yๅy?ใBฮq^>t.ไ=๊"ฒqal?(.ิE๐๔๗ชq!ทฉนฐ|gk\ศ t.„เC็ย…เC\๓g/: {฿6OํฟhฯๆžoDq.๑$q ๔ า{Adp๑…๛J๐+'๓(คฃ(๗๛ธWวqม-p฿/DQๆ๛#ฤึ์ฃu€17ขี!ท$v8ย+oฬfษvภ}‰g5ยฺล9ยœ ฎ์…:ข\B3?ผfL˜ปบDบ„๚7–ดไ.ฬ9?ฤ9็„@ืล฿’p1ย/N๐น๐๙<%ž๕štใ1๖]นzโC฿รมŽ๏฿ื‚มo \ฤฉj฿‰sxœmืqฎ๏‚.RH /@lธ๗ƒท{ษข}บำํ๓อYœื1gt?|ฺ™9เ\rฺHจƒ<ๅฬม~'หยY ” TbฅwŠ[ฤว>H=M‚]‰ฯ๛'ไ`”`"”บสฯ•ิ)%เ‹โขฐN D@ช}ๅvศ] ีฑฉ&๘!ธร}วi็XGว%&("U`H`™ฮทแ—็คืเ~ฟ็Pฆ@2็เ|๙9*่sM ฆ “ืใ๋ @A*ฏั_ˆโ+WIY!ธฯŸ™ภ๙[ภ฿“R}ลUาใy฿t๎ฅ ภ'ฃๅ๏“_‹๗ฤ{SPžฮ[๓>hื>ฯ๔wเsฯN_๙.VIฒ<&ค$ธ๘- ฦh _AMpค๒›qืhกฤน\s‚Qะ๓…ใC็ย~๔3ช|ˆ(>t.์นฑ|G๚์ขภ‡โย์–>p!๋…\X\d๘pภ…ฦ‡.t>t.”ศื8๊X&$37๐ปK|3ฦ…๐&๛D.,b0rกถE.ฬข4ใBCผ˜จ์!็B็5.d9้Bฅ_คŒ|่\xm>.ใC}nฦ‡ฮ…d4ฬห‡ฮ…ผo๑กqแ€ ๙lgแB]”ฏq!ทศ…rั gๅCนๆ\ค„ dัc›{พภโ|Lt›Hฉ:็AฬW]ส kฎบ‹wwมWVเŽฉฐj>I\ปŽˆ’.7<:ด+Lธืึ`.ส%ศ.+ยWYฮฒ ๒%EผKภ#rผ‚ tฑ<8g)ื…:=":่๓ t็‚‹tฟ.ึNฒ>ใ^œ—๏คํ7\ำถ๒†๚wฎ†๐๎/*อq`’pฏ9์โ๛ึว้1ถ๑Xบ\tพณฦ†rอ_บh฿,ะ›{พ ฤyบฝ„?$ุgŸ}บYo็ษดงข€ีฃๅq]1'ฐ$ํƒส าy<˜้?๚Agy~XZ*EŽr`pึsืฆv&เ๑XNsWš#ž9ฝk@ขเIมา"ูฦ๓9ฎ‰ๆ>ๅx@ฮDqย•พHเิv๎(ˆ"E‡ˆtQ‚"ƒ^ฅ–4ลxrœHๅว๕z<‡sdฉืw7GAฉ‚M‚ฒห฿ถV˜ปหใ)`›; =(• W:hอ9/ธำั]า๑ไไXบ็ภล‘X(A๚า๛ŸS ๋ข๏((ืฃ%[ M=#U3“0ฎeqŒึ็ฆtw ิN@PŠเ[ˆ4Nwอ%ฮ›{พฐ|(.Dp8z‘๓กธะE๖€ qื Žฅด>Œ\(>paq•๛’็ยย‡c\(>t.t>t.t>Œ\Xฤpฯ…Wฝcฤ…+eฤ‡‘ 9ฮOฎทธฐ\ใB๖ญqกxยตฯœ\(ฎะ…<–.\JคG็ฤy/ฬ=พˆแม๖ภ‡Qะ.๓yฬร‡.ิE๑กsก๘ะธppฑrr!ท็ยY๙ะ]sakwฯ:6t…Šงืฤั E9‰จ ฮ(ฆLไ๗Mภปhฏฅนวิv่พŸตฤถDา๒’†ฎmา๎xณไy๎Ž_sใZท7ฆ5ณEน 1็.R]Kศz:ป s„1"Yขญtv€ธFt#ยไ>ส๎ƒ฿ฝ"/=ฝ]๋.]ภหQ—Pw.g฿t‡u็๔ฝะw >—@รืuทดbํ…ฃ}ำ>พ s9๔s9๘๓{๋{ยอฒลyบํ%ืqpฯ๓อำ9ื !N@ศž=้™๙พ‹sถgง<ˆsฅxซืRO}ห๛ห)’knฝ็คS‹hW๙ตOv<ˆ“V0CโฎN „z1จrน7๎rs\ ~‰Q9ฝฅnนwzHม$,˜ถšƒ’_[tZๆ็!์%ข ,Yr ธฝ๕’์Kฆ`ดธ%ƒQึ•๊_Iนˆsฏ[ŸไœปS๋2”Z@Z ‚{ฑ๏โกข ศ' ๔Roซc็sฑ‹ ๙1/iเธิั’"<8—pJX฿‚N.’ึsก8F`Žฎ๙บบ็ฅซ๑7ฮO๘~ยส๖]>ŸpIY๎ผ5บEฮ…|gœ]ผ;Š 3ฟ>ŒูD๏ฮ…>๔‹•ๅ{˜/H:ธPbVฟ%q!ฯq>‰ sก๓!ฟ5ฟะY๘pŒ ฏ๐QpWœ—> ณ4?า๋๔๋ J=๘*โ<ืŽงs่๋%‹ื>*ดภฌ–`‡#7ŒฃI-އ‚;ฅg*8%Eจฉ๛ำภ \๕Žั6S9&๎”หIโ๙%ะ8๐ "€z{lจšร ฮญใyp๊nylจคL…าผจwึๆ็“n}šฌRv๏šถๅ๏t้} TNน”r:ฝฝV‡น.7~' HqHๅ๔๔๖‚ั1ื|]sŽ‘ฐ]YฟOย%<6แญ"T– oูšRธฟก๓กธะ๙ะนพ:๖้๎ธ็‘ ี$ฮ๏:ๆ๔ย‡.t'lลyŸฮ–‚Q3UM˜nน!o>mไ่ะ4†ฎฎmบ๖–:๒Œ’)qž๐า7 $Ku—ƒฎเ4งาY‡โ:i=ง[*uqฎnร๊pN@ฉบ;๐}ใฃุ€ญ4B๊!๗Bi็^{)!ƒH Z98–ๆญ€ถฏ$H๕๓๛แN(A(@Œ—Ž๎ฝPgฉKฏมV@ส1ŒบะvgศY='ึlzPลธ‚ฮš8ฏฅนื‚R้แ๏นฮA)อฆRPŠ`ศก%า฿W๒๙๙๗<Š๓I ใf Hฉทtq>KgบRsอ=O๛๚:๏ ฿NxLนโน—ˆž๛[s@สฮาโรศ…โร๊ย#|h\(žหโธPB[๛;๖|h\˜ฯย‡.t>t.”@ฏpaฯ‡‘  ๙ญ๊w๊\ศ๏พิLธPฝ1"–฿ผ๊ฟ{(็›๗Š฿ฤ‡โB ‹”็๓qกฮฯ์่ฒGง]|Xห(ŠeGŽ(ฮ๕Xj~ฯ…ต –ฮ‡๋ห…ฦ‡ฮ…ˆ๔xต4œ$ฮ7ชiœsแ๚๒!!kฎytฯ. fTL่กธ๐sˆswี'Šsวwว›ฝi›RสW†ฎ5qห˜ข~น˜็r7—฿8.ศตOMด/›Gœ#พU‡-']ๅ”ซษ›ฤธงชKว๔s„3ข[ฮผ< sภบ:9$ะฃ0ฏมt^_]5๏ฑทษe\xะgเ้ํW5rห฿ sผืIœ_ทฌป}ล•ฃฅึฃ`7แ?Qœ—๕™G้=๘wY™๚๎อEtอืี=Oทใ>k๗_ &์๛๚ ฮ'>๗žฤง›ต8็?N3ๅ?Q]ู๖šZ57สŠJฐ˜sKอTช4N๏>,ว1จฑT—ุ˜๊yrrrญ"N€ค๑:JL-์ำ$56HP0สบ$ฅ‰Zส|/ฎIแค’QAซฮG8฿๐ฎปๅdไmJฯTฺขฟพ\(ฏ5tฌuwŠไ r“<0 ะTฐG)x๕VวŠ.บ;@ฑึ<ปrฯผ.ฝ–ๆ..็(Œสซ•*(- %ะ๓า)š๏ ‹๓์xA•๋zK0;WำธuM้”SคFHj†4๏—-zไDq/z4;}M๕†/™p…๔ป 7๋*fบ๖นakH#Š\ˆ่)|นP|8่ฤnc&#:zŽž แ0~œ •๒.ม'Qญฦfถข/>Œ\่อ2 ฏปNห…~มำ9Vำj\I +{H)#๚๓ž๚.x'๗Šk\8ฉณ{He๏๗๑zv่EzไB•[Mษ‡฿น0wไ‡ห๘ศž แวย‡Š 9ฎ/โะ,^๔เ‰\๖]ด}็๕ื5>l\ธยย๋sฃ8Ÿห=Nธ7ๆ็ž>_iวฎน[.7r>็\ฃญVTฤนื—+UอZชป๖ฉ๑.ิใ่ฐXS.เ$ห-Gœ#`qสแˆaก–Ž.Aธ– .q.H ณ‘.xส๛$ท|.ฎztป&ฮบjำฝi๋ผG.D๘E >ง๋ห฿ฑwฯ๙n(m˜J ;$ะญ๗Nปฤผฟฮ‰s5˜๓r๎ ฯ~ํลื\xฬขุ้๓pแsc๗_”๐ืSŠ๓‰ฯฝ'๑้fMภชน๔บ0ฟฉฮฒw€Lœ๗ษญ๎รJ๕๔ฺ๒Iมh๏ฌ—๑j HีlMC๏๒(จT งภFอ„$ฮ=๕†CZโ(ัe˜@ฉิj๖sิๅคฌฅ`3ฯ้พใS]ทๆผ๎Žป?~ื'ปีw[wใํ์nบรy=ฯญMAj$๔)$˜$ˆUฅœpwณ0zj{ rŽwАส w—\๛+จUฺจ twะฑ>งัAŠ|m,‘7ว“จ๐FVSคxJฌ JH.ฮ๙ ะ T"}ฏฒ่แ๛dอถ\ฤ/„cไŠม,โท๎ud๗mŸVล๏๕v๚๛u8N _J8ขคs๓a sฃ็ย‚ฬaฮ…eู—๑ฆVzƒLqaฎ9:*ฝ๔ตt::๊ย`ไB๛E.์›ฒE.$=> \๘ำ1๓แฦ๑โBถkปsก๘+rก tqก‹๔ส=Ÿ$ศ#ชฝgEพสa๗วCCธ๊4 ฏว|7เยu็ฮ‡บ`ฉพ‘ี็E|ธ!น8ฎ/"ฮตh๗‰\๖Kโผqแยrแ@€‡Y}๊ป=็Ÿ{ บ7~‹ฎytฮc็๖U!Mลน;้๎ดฏดตฤ๙5•๚q๏ผฎ๚`=น ๓8๏<6๒Ž์ั5Wฃ4ฤน\f„ญ๊ฟ=ๅ\‚nๆ็.ฤ็คธK ๎˜ป๐ึ2^Xw๗qฎัkชE—0๗๏Ž8]ฉํฺ๊.๗\nz๑์HBzZ~ว5? ๏.Ÿ๏ณ฿ตK‡b~ลนg†ฌ )๎ณˆ๓—&q๋์[ล๑#q๓๓ใน}ึ”โ|โs›8_เ) ฒsNชบRะ‹ณฃ Ui˜q๎`Lา7+ฮนสMJ'a5ภ! ์gzใ"ฅ*Uฐิ6ช&ฐw…ิDฉC}Sฑ๖5โฅv฿P:฿๚๑ผฬi•)ˆฃS๑w&ฆkึ|1คW฿rvท|๕ปป๋n}๊ทunwืšฯ๕ฯํ๎๚ธJผ&ม%nฤป„yMLห)Rp*Idt’|šŽฃmฺ_Aญ\๚(ะ%า}๗ํ^ซ้้Ÿs5™‹ rฃ#ห~P๛ดต่๗กtƒึŒ๙(ฬ]œ๗~จฅล-šg|ะ47URฅdกะ˜ฮ"ฮ็>Gvxซ๘“๛>vฤy9ๆ๋ ู–ส9?ึn‘ ล‡ฮ…šํฅ๓5.”่w๎๔๛ฮ…๎ข;F>œTNE{ไBตๆ™`ša_๘pš›g›9ึ.Tชl"๓แไBเ\ธพ|ˆ8๔ฝvŸศ…`mฆ็ ื] ืถ{็kwัซ โYŸGฬKœณฌ‰๓ฺHดฺฬ๓š8๗ฦ\ฝ6ZอExtีฃ@Žผบฝ๛>Z Q*๎)ํธัๆžŽฮzฌ$า}นtฅถ 1ลนฤทdsxํนœs_ฦm.ิีแs> >+>?e=๔ฅj๒ฦw ม\๔|7‰m–๔|ฟโผk@œƒ็๙๗aฟ ฟถbFqฒ{ํฝ|ช8a›ง็-ญ}s เI7ฤนFจล๚๑Ÿ| D•พ>hV„๙$ื( ๒”LึีิH5‘r~lไฮ AeI)8J& s@EเฆŽม%0อฉš)ะ$่8C9๘Tฐvํู9(อฉจ)ะ$๘TPŠPฟ๖ึ๗uื๒๎†น[uว9-wŸ๔)ž—ำโsHPIจภฯƒฝุ(คjŠ4ษQฏ‰u๘1ETuœž"w/๒ฆM>g]iVqโ!6HRฝ๋:4Šหi›ชร-:Pฅjƒs๚f๚ฎf‘ฃัzV‹9๋€ิ!็\A้,โw๏wd๗มXล[0ฟ8Oทใ•๕$฿„3švผตค๓฿œ %ฬ๛ฉ… sv‘๑aไพศ‡นP-+(ป้š ฎว}‚…นฒ=z‡w็Bี‘๓\จT๕1.„๛เƒศ…”$>ฌq!@ผธาECไ๎žวFo‘ ลe๋ส…บ@๊u์บชฬ$ฏC—w.Œ|่\(>๔๛๎ฎ;บ8w.ิล–)ลฉ๙[;ืธฐิœ‹ ๙Ž๖|ธธฮานp}๙q˜mwŸศ…`{m฿5.x\8hลyHiๅ!%พ&ฮOOฺ%บcน;๎พฟ\sDsœUวฎ!]”;b=๚๒0ฆอvฟˆเฮ=Pgv5NCฤโB{วuOS๗ดt‰๏(ผ.์ๅยืs‰nญKœื ”v\sฅฎGงEนƒ๗sฤนฦำ)[bฌใฟ\๔)ฤ๙W_ึ;เ.ฮ‚ฝ๒ำคฯฏ“8ทา ฟ`4‹8ํ$ย๗๛W๑ฤ{M%ฮ๏pYยึิํ๐)ล๙ฤ็“๘t‹&เ2&-yธšศ๗ˆไI) >C7bŸ๋[sะณSคบDชำ๓`4vnWJ`้’žS4%ึๅ ษฝี}(‚2R6™แK๚:nNัŸIQฯFA(iššํK วzIัฤย-Zqฒ _บ๊]’›Fา ีWžมr‹<ะ๓zKoŒ"o’คฺtu3ๆฑXŸฉ็iRJพ;้“๊8c}บ_L ๕`ฺƒ๋8ขศวจ‰Ÿค^‹ž01Uน‚šXฉuIษ$ฮ๓>|gุงจY"w‹”โœ#ะY‚Yฤ๙<๐จ๎_w<ฉŠ?๎๘iฤ๙‘ ฿Iธ แ{ณl฿5แ‹eห]Z@บn\ศ๗J|ๆ\˜yฒย…ฮ‰‘ e๏นPผVพณ=๊ปงX8F.๔‹—ฮ}๚m.ฬ\F.d>Œ\X2z"ฎผใ_2ŽqแŠฟ ๛ศ…พŒ}2ฤ‡ฮ…~ัาyอลปDบ๎G๕๛1]~]น0บ.เฝqœงธื.Tz‰ยด\(>4.tลน๘pCrก.VŠ ื—็ฝ๗๎นฐํผโผqแB ๔ุัฝ"ฮวs\ƒ9็HœวZ๒IณฮตO็ฺ_โลxๆrธฝแ\Mโbวv‡ž+Q.๗^ฏห๓||"วก์๎9"œ๔๔‰๓š—;ปฐห-๗qi.พฝ+ผ7ฅซอ=็|=mฝ&ึ]ดkผZ็|Fƒฆp.ฮฉ;วษ–ุN˜K˜gTœ๓H—8ท”๖(ฺDœใ‡‹P๚N๓}œEœ}๖๏^}฿ชxสถปฬ+ฮหqNKธธt^Mู๖k ฌ๏™pEยJาีห๚“ž{Oใำ-_œทจ๏6ฬไชณ,AiU˜—๐วzt’่€Œ[Dฐ 'จ8G9ˆ!ภtื<ฮ$—ฐร "˜Rค๊&ีi]ฮ5Kฅ(ธIฐ*่Kc_gN&KJ}ู“‚9V‚Nœ!j0qŒฎธ๙ูAb‡=ปFเVบืLบcใต๎์(xVs&G 8๕~$ส๕5rา๛‰อ–|jˆมsผภ (Xต์>ฒอป๋ยKpฮ๛ฆWญบ๔$่ทฉ๛4ว‘D๊&bฉฅู)*ใ๕ผAาฌทUฟ๑ดdšฤQBณˆ๓Wnwt๗‘]Nฎโ/v8~ำฺทDl ็ผoจ%>4.4}‹\h|8/ชนคด๗\จ‘h๊ำ .TjtษB้นPอว”9ไ"ฤ‡โBฯฮ?"5JQฏraBไBs80r!J™#z ็B๑aไBฮกฦ‡Ÿฤ…็BฝงuแB๊~!!raฬ8ชิฉธ0๐แ@ืธะ๙ะน็>t.ฬMใ n0.|๙จกฌsแ๚๒!โ๘๛์1‘ ม๎ะ5.๔โผ:ำ๚$q๎3ำญส โ|ี๊๚œ๓˜ล๙๕–๏KฏEณสฝ ๛าฐอปณ J[_f])๔ฑ๖…=ฉˆX„ฐjฝึrม5&Mยลนฯ4sสพฯ5๗๚q‰n‰iฤทๆฉkถบDนงฉ{ำ7mผSปบตkคœฤ95๘|ืฎ‹+ซ6จ=ฮนงขgq-qŽศโ}฿&A^๐ "ฮำy{O็ผฯYฤ๙ซ๎ป๗ฺ๛Pลำฆ็[:ถxVญฅฦ ฉมั ๘ Šี๙U้Ÿ้œฦนจq\Zๆ’‘;ช#W@I๐้๕• <Šไ!าๆz`๑กs!ฟ-๑a… 3๖3บ๙9jธsใย์œ'žยep!ตไ™#~ˆ\HJ;|8/z นื๓๑!SฮyŒ ๅฬืธˆ yd8Š ฃภwฎฎqกRใbƒป์ฑVนPใ์"ชDกฦ…>žR\X:฿g‡ (ฮ๛฿‡ดž ณˆ๓/‰๐7ภ*Nบw็[8็ึ๚‰ฃƒ*ข& š'™ำu‚า|5?t๎Vs5,๊;ฬ๘ธ0–š1ซฆoru4'\๕€]†(g P ฉญฤํ๑:rR2 ,ีเhฺY9‚:žG '์Uซ฿™๗gฌ๊5y^Ÿฏ .ึ‚ป(๖TMG=ไ(0๕๛ F Dๆึผ๏ฬพ‚า˜Fศ‘AซปN5.ื(6HŠŒ%:4bHiพr|›อถฯฏŸ^GiูuT7y๖Iข}Cค8D,qJœฟv็cบs๗:ฅŠฟญ‰๓Mล‡ƒบ๓า‹cŒ Nล…AจซงGฯ‡ฮ…ฅdค๏ย..TjบZqก7มt.๔~ ี{#rก๚iD.ฬมรดน ธ็‘&F.๔fo‘kœจ “ธPbน๓Œ\ศ๙>์นP*ขHฏ›— i]`ˆi๒ต‹•q>|%ฮล‡‘ }‰ธP๑ำk ธP#0ำ~Š ล‡ %ฮ=&r!8่>MœoRq๎ฝV_ฎฺaŸŸ>ำฮ6ูs9ใซB:{l ็๕ๆฑeฅ“บฃDบธ๙ฬr9โrป%ฦุฌ&gXฏห๋้นไฒˆa„ฒDตๆ{s6ฅฅวนไั ๗4๔uXวwท[ฮถ„4๏Csฺ]PˆcแโใQ ซ)V็ฅS{ฃ๐n9V)[่ฌj,T๚.!xภB฿จตTชNลณŠ๓ืํvL๗™}Nญโ{>พ‰๓M%ฮuqาห|ๆโร9ธPBฝ็CKy๏๋ƒ ณƒ.>t.๔R็B~โC็Bm ล‘ U;..„฿2r๑> \ศEสfฎ;ข(๗z๑Z๙Bpกr็B.ˆ 'Žน๏5ผ\(๒a่ฮ…ัAฏ]ฐ EU.t>t.ไ๘eT€ 52ด๐แฦโยYฤ๙ ุc"‚ƒ๏ฤ๙ฆเยAว๖"ธข3kิวฤ˜mnธ ๑(ฮWVฤ;โ\๕ไืT_๎ขRใฝผ๎๚ฒ"&—ญฮ0—+,\๋<&ก๏cืx>ฯ๑NํfO!WZนฤ6Koเ›ถy}8๐ฑg8ใผ.ไ˜๛น{6ภCฬ๐m1ซ w ๓xD)ํะฏzมF฿ŸŠƒงตป@ฟโขฎแZDแฎTx[_ะ฿G:?น็.ะม,โM<ฐ{๋ƒฎโด๛์ึฤ๙ึ$ฮ๓-–“‚ฮ€ษXŒ฿๙ผ~@ SซYR;๓UฯR7™P 9AlWฝค๊›]๐ฑ^R๚๚ดB5R`…ƒ’‚3‚F\Rั็€€’ ”%išิNzืวsะฉดฮ•cฬๅ 5Ÿ์O*'mH•.้๓zฝ^ฒ&ภ=๐œเ ‚Mุฝเ๖m!อ้ฅฌ๛ใๆ€ฅ„z]ฆฝ~†คB-(3ใ่ต8bHB]’< ีhจ2FMAi.ๅ5ิปภ]Eถi๐†ธQSIGb–8Gณึœฟ~๗cปฯํ๗๔*ต๗ MœoB>ฬ‚zฮม…=.TใBoฬหœ •๊ฎrŸž uAL*ใ|neYฟŒAŸ ใBr๑กs!"~‹\˜9ฎ๐กs!‚—\ศ~ั๖:๐…เBฟเ่(…ศ…สˆ\่)๒ž๖ฏ ‘ c:ผ|‹E >ฆธ๋BŒบsก7Ž๓๚๓pฑฒž”Qu.,ูโร qC ซฮ\\8Kอ๙ธ็D.Mœo:.๔Š8๏Owอ-ญฝ็AŒEงQB=Šsฏ9_Yๆ8ฒโช๕พฌ8ร.ฮ%ย}ฟZน O9อ.pตM‚U้ํ—u5Ts‡œฅDด‹uฅง+5Sิk b}ธ๊พๅ”K+]s]bt‘"ึ๛็็ŸงCe:žy๘๊tฏZ|ํ๏=๘Nxƒ8๋>h๔ๆaพ์{wไ‚ัz๊'ฝคธ/๘๏รรน@ŸEœฟๅAvถมUœq฿&ฮท:qฎ›ฤ๖˜๘.sUt๖๋ฅc,5oฬ`ฅƒlqŽbf็คเฉ8ขGษqQคj'่h์M FฝF‘4ฦปFA$ต’š 8ๅ8?ช5“ค์ฯ?ง63œ9%ภKวๅ8Fน๖R"พๆL 6kฬ ช ๏฿๎Ž9’S0š฿C @ฎบ:ำ{PZ0nๅ.y@ZK‡๗บLOs๗ฟ“ฦีๆปP/ณ์๛€•เ4ฬ9ฯฉฟๅ{ขฮีr‰xŒั๚Nา‘•‚\พ}cCgฺ†H๊ึN@:kท๖?ฺkqwมงU๑}žะฤ๙=ˆวธะ๙/w.dด๘ะนะ๘ะนPiห=Š ๅสF.T๊บ๘P™Cึ(2rกาฺงๆยาL-r!œŽqก7ชŒ\่Wœ๑y๙0nƒ๋xฟฮ…คต‹ {Žฯลš๕ศ….ไkฅA!W่q$›gล”๋qb‰อ9WMyฯ‡ฦ…บP)>Œ<—.๔qชำr!p.œฅ[๛ทs"‚‡฿ฏ‰๓{าุ+ย| ฮ-ญฝb๓๔UธP›qŽ๘“ ๖nJgiูžพภIt*u[้ๅ^[ปบjฺ5:Mโ›ฒษํ๖นแ.ิkpA$ศ— wQ๎อู<ํผ6&N้๙สจอ‰๗ rิc'๛eๆšป0WJ๛˜0๗ฺ๓ส๘3wฮณ8G|q~ืๅ฿.7ธ8/]ฝI\ฟ=tu๏/ฌC๓ธ.~็f็พAY;<ผŠgฏ‰๓ญVœ๋ๆB;œš5˜(WZq?ลœ&ฅ<†;ƒ ‚ ฅn–ฬพรฏj๖|ค—:ฑ{—_Qฝ!Bต+M‰่D\\RIฤ5n9K‚S9I, X”ๆ็s‚ัาู็e7†ใชฎหXจภา๋kAงŸฅกœ ภxœ็ฉ๓วำพ“ฮEว๕ ทๆด๛ธถZjg์^,„Tพ๖า๗ใX›rสŒ฿โ•L‹^ฤ”๔]ั๐ูโœ&Hr‰HแœUœ๑‹ป8ได*nฟ&ฮ๏I|(~ƒ๏ฆโรย…๙ป+>tืพซc\(>t.T–‘๊หล…โฦศ…E๐ๆ฿qไยย‡ฮ…๊`Œ ูถsวธPวธะ๛fx ใ›ZŠ๚บเž– AไBฯ"๒Yw๑ญs๐ืt.Œอ่œ 'eี๘ฐ&ะ็เยพi |่\จ>ชiW)CZฯ|h็Š็๒ถ็tj‡ g็OJโ|‚Cš8ฟGล†YDษแ๔4ไ(ฮ-uฝ&ศ\˜ืjสwqฮv sDค๊ว—šภ๖†nจฬSฏๅ๒zฃ8ํ็ขV‚!ฎฺm ๏วVผRู]ˆ{ƒถ่x{jบ;๋ฑ†Eน roุปฆGx-ธ‹o ุ้%฿็ฮ_ฦาE๘h:๖[š๙ œsk ื_์๑ๆ†+^8‚ฎห{-ŠHฯB๓8jM.Bˆs}๏Pœฟ}‡ƒปฟู๑*žu7qพต‹sƒt_iร T๛}Š(ว้ฬ‚ำญไฎฒ๊NL€ขQ55r#$ฮีNAA A‚ z*ฎIส4ˆฅ๛ธ>”%(iฉžู R#!P\ง>ี“mลพ”sผ๓‰qŽ7ข๖˜gีVrnW`ฉ€ิฯUi๑ Xํ|ว0ลz๕ุ$):่๚๛y*ฎws/’๒ใล™€T•Nzg้เ7ย*ฎKw†ด”ฤ๗./๙๒ฤUZ‡ภฃš๕ปฮ๙›๗Y}๕ˆำซ๘ภAOlโฤ‡‘ ณ`\ุ๓กqก๓แ€ \(๑ ฿9ฒอณ†๔›’0Š %DKธยi[ไBtD๙ช‡EเBห|ธpะ-๙p]นp๊~'ฝฮ\\XบตxNฯ7>ฦ…rุ็่ฮ‡ฮ…๎ข;z™V˜lั๓กs!๋โรศ…โC]ฤฆWม'~ต็C็B็รŠืEœ;ฮ"ฮŸฒรžนr›8ฟล†Jsw๗0มn]ฺ]˜ œ๓[†ุใHต่œปP”@>๎LตาฑฑYะฺๆ5ี<ฐ,คzณอ;”#Šu ž+๑H/—รํย:ึ‚ณT]บ\q u‰๓8โŒ็๘่ฒ˜r_{ฯ~1Bิฏ/ตเบ๐ฑ2tฤW6รชp‘ฤGฦฉ๑ž:ี :สเ˜Ws sพ?วŽ$ฐ๛ิu9ใล5ฟ{ษ7ปป/๙๚iฎ'o๏ลนีš‰s{ม<๔u็zRฝ์#ฝงYฤ๙฿๎tp๗žฉโ9hโ|ซ็Sฎ ล!b=;Kž๊้i๒ˆsฦPlˆจ๙›fuพณถูๆcyJ‡๓1|Y0๊N8Ž)™ˆsีใ"‘ึษ๖Nฯม)'€อช‚ป>ทึšไเ๘๖่LปpŽมe 2kๅ) ๕@ผvN1@Žn’Ÿ๋\#Œ<0๕ดN[y๗bw”ชนA,ว‘ฤ฿˜บI‚OาF !f,ฃง‰`ำ๋…hjผ฿K๕@`ษwัิงO”2F€”tฮ…็oู๗Qื<ฃŠ|x็›+:ๆิb๑กงภธะ›a:ส-๗YJ}๗q_โB๑ŠDณsกฤkเB๕ๆใBนฯ sนfค;ฦ‹‚ฮ;‘_“ฤ๖ด€ o;wเ๎ghzล\|Xใยศžu4ฉw‡sก๚x๗ศ‡‘ ล‡โBž+ฑช\ใ๕ฤ‡ ล‡ฮ…ฮ‡‘ ษPNร…ˆs๘ะนp&qพใ^น๚€&ฮ7G.Dจธs๎ขฬ็Bวpบ?I˜วนๅ>-Šo W UlIhˆๆณ.7xฉuoGหนF { ฝkึ5jฬ็„๓ฉฟ$ๅ8ๅ ๗ฝ๑๎฿ท {ี {ฑfฅืsf๘2๓z๕๏Ÿ’ƒP sนE ‘ึงื}s๑3ช๘เaOjโ|3็ูQวก:ธP™A‘ #Š ฝฆs!ฟ๛ศ‡ฮ…โŠ5็๕๓หIiื์r็Be 9faฎZ๎:บK]sมBยฌ\"*ส!Ÿ cIP์%R่โรศ…ั]{๕ๅ€ •Uดo‡\ศบ๘0pก๘ะน๏ž๓กsกŸ”IนP|่\8‹8ฺN{MไB๐ˆ6qพYŠssฯ'มำูฝœ sAย\)ีช๑๖z๊่{ŠทDนw+ฟ์บ•cออ”๊.ุkฌ™๗๎ๆผ›ืำEŸ๎ยZ=]๓ฦ%…่ด๓\v๓ >G™yณ7๏ž.aฎฟร`Vฝe68$ิcฺปRพOtใวsซ7sี—/๛^=ไ'!~๗พา๕ƒ/ww]๘…ผฬ==V็กใปsะงจ+ญD๛Xปฅภ๗โ\๎y๙f็๏฿๕๎Ÿv{Dฯเ๎Mœoฎโ|ูณOหุhมจ:ษฆU็"]ฉ^›žkๆ$สTปงฦGJ๋$•ƒคz>๛ฅ ฑ4‚CDw฿กWAœค<ฆๆpšWฮŒฒิฌ_9Fฺ‡ 4zp z‹ภ๏")จ๔`ัื•"๏ะล‘q›oวะcลใฝ็เุ/B่yฤz๐;้ขAอQšิ0.ฆ1ฅS‚ยว;i›Rsำพ๙xƒ๛คwช–šL‚P"พgิ]j”๊rำ’๏SOžล WใคI%–ju็ฝ8ๅฉ#ฆ%iํิYาi๙๓OIœลร๋พ๛ุ3ซ8็ศ'7qพ|ธฑน0๓กsaqะล‡c\ˆ“้\จ†c|ฏ #–ฆqA^๑พ›๓Qษ"Š\HJ;<นแ]ๅB ๐ภ…™ฃp๖๛๓qZไยw๚sตOษจrก๖‰\(>œ… cMฏGw>*ซˆฟฑs!หย‡=โ ำC|ธP|จQโย๙๘pRŽ"าม+Opแ๚๒!โฤ]๖šศ…เฐ5qพฑ!Bhc‹๓~>zh็b/บฑ๊๐-จ&Zฉ์๊ฤ.H„๛mPซห–“>ฉหx์\๎ฝึMฏ-\b[ฉ๖<.ง›ฬ้\j้๖ฑ฿๗ฎ๓—…ฺy›บัซู›‡งฒฏ,ยผeๆอl—!ธ‹๎WbฎซVืz์าP+ฎuฯธๅธใˆ๐„^„K฿๔฿l^๑uw็ฟuw~๋8.บwo๏9Nb฿kั+{qnย=Š๓ใส9๘f็เCปฐ*^๘ &ฮ›8Ÿ6๕q/ ฅ1XอA*Ž9๛+•ณคb๖ใ‚ไ’ซม‘rฏm–cNภคZศ”)€$เ์›ถ™pวํ!ธิ~žขฬ๘แ gw๔์๎ชี๏์]#ข1_sฐ๎rอ(%TižcขุHฅ‘:ไ^9โ>z^์Žd๗#ŽtZ`๋“ปต 4ฆฟ{ ๊คt๗ZS$‡ๆัKx๐H๛ๆZUžฏ๔^ž0'๘๔Yฟ|pีซภา= .๛€๔ฌ็Žพo้๙r*ว‡ะ,)/eฦ๕-ฏ;}m j™พŒSW>๏้3‰๓ทz\wมใฮฌโ#G5qพูŠ๓ภ…ฮ‡ฮ…=:ชกšฤy =ƒศ๘ N@Tรc=ชT'=F.dœ|XใยA๖ธPฏธฐŸ>้โb #'ฎ–cŒqแ$>œฤ…‘ kโ ๅ +ฝฝฦฮ…*๛)๓ไ๛๑ขโ9๘0rก๘0pก๘.์ณีJื๗9๙0ra™(ะ;็… ๓W นp}๙q~าฎ{MไBะฤ๙f*ฮ-อWยฯoฑ#๛Š•ใย.ฬ5–ชใ–ํโลปืdK G+งyนน็๎Z"Zฏ!ญิz ๋8<บK‚“๏)๋๎Ž๋\-=ึ€๛ี๕{ ๛`nฝjงญ†Z๕ี7 บ๊1ำเcCธœฮ๎ฺ}tšนๅู'mง< ๓,ฦ]' ศoฺ9ํ_๙๐H งวฒˆ๗๎ํIŒ๗โั>8๏q‰๏๖>ฏsž0‹8๐๎‡vใฐ*~qป&ฮ7[q~ลsŸžม–%ีจอfM่เ๑าq;/้L์\œ{žGSภฃฆ;8ๆD ขสุอ๐ลฝ้G่”€TฮŽQR8 F/พ๑๙+ำ}็๚๗fเQsฉภ4Iโy{ภ๋ไ`Wฉ“ykื=จ ใ`฿ห1ุgM8v-ˆ-ฎนฮ/ฟ๏nŽืน“^K5Aฉืส‡Ž๏ฝจเ๏ใ#ึิKภ†7J"ๅพfลำะจtbฯ๕ๆ็;ธ’i™%ค้สา่ำ4?ฅq’7‹›7x)โœ%=;่,A HIใฤ!โทลolq~ึแ๎.zา3ซ๘ุฃžาฤ๙๐แฦๆยฬw ำฤ‡dˆ๐]FศIุ๙ls๏ฑ:–™}VOแBŸiน€#"ฦแรศ…š‰นPcูึ‰ ฝ๗ว|8๖๘ค‹™q=ํ[ๅยศ‡ชINร…ต:๙Iฝ9<ลณ‰โK็Bน็๊บฯบšฤฅ33Fqฮ๗‹Q|ฦ…ๆ๊uะsaนˆ>-ฤyไยย‡ฮ…๋ห‡ˆ๓“w{"‚รทkโ|กฤ9"kฃ:็ล•ะ๓›งDฏX9>ฦหล๚5ฅ๑›œrA้ไฑนšเฉเž>ฮv f้—Yw๖ๅๆšซพ็€/+u่ฑ๖[ŽธDvmœค‹ฑน]ํโ~~฿ปฆ๛gฉT๓•^_.Amู<5ปOu/๑ส๓c๚ช ฬฎyล1๏]๓า๐อ๋ษๅrศ๛eA็_`^fมž๖อu็E˜KœณT'๗u™}๎โ๕~=บ็ ณˆ๓๎yX๗‰ฝŽจโ—ท฿ฃ‰๓%ฮำํ%!ม>๛์ณ คจ+ู<%ลq.คภaฬY'˜`ซา๔ิeXฆš‹ษRsฃา้ทO๗TWvฏ9Lมมม#$อŒrMค5’8'%เpˆpŠ”r_๎–ฏ~w^qx.ฮ‘า&๛@rอy“๑ B\วเผ|ื=ฎญ:–ฒAi-XŽ้ข“R;kฉ7sškฐ_h๑ Tจฏใฑไ๙lc:้—ฅVR4ฉพKชลL๐&Hฝ@/ฃฌbฝ๙ผEqิีU;7๏JKฦ ‘ึN@:K0*qืGืเ)gV๑ฑใถ^็|ก๙pcqแŠ๗Œ}[ฯ‡ฮ…š นะ๚k8ๆฮ่ฮ‡… โรศ…š\นฎƒ"xๅป2Rใ\ŸŠ\Xปˆh|ธ.\Q๎งๆBO‡\8ฉgวคฦv…'Ž[ซqaไERำล‡โByไร(ะนX –ืเ9ฮ‡=๒uไCœฬ\จ้+ร–๘ะนpq~สƒ๗šศ…เ๐ํทNqพะ\(มตั„95็EะีฤyM”+u]๋๎\/)ย\5rสี@MK.“s๎u็ฌหU๗ฦjrฒ5ฏ\B\p๗|™9๋q„›DuL›— ฏฅฃห้Žโ_อ๎|฿oY˜ำพิ๖Qmนื†ฏช8ๆฝธNuLื฿อSี%พ{ัacฤ<=ึฌ๗โ\ฺฝ \i–]๓$ข{qฃฏd‡ผโ5|ๅร:๛๒Zjป๕พ)\้ไ>@›ฝ๎โฆ€Yฤ๙ฟ=ไ๐๎‡>ฒŠ_qฯฉฤyบš๐ฃ„K^Uy|›„ฟ*_plู~Hยw +^^{}ย•๖ุiอ9_ว›jภ$เLภผมจฦ^ษ5*c^๒>ลํว_yS7œ๎8ุtส/NmŒฒอำลSะEH *qงš— ฬ$@๐JpIเ‰cฤ๒ย๏ษ)๎ฉจ€วิฝXi,วฃ9„ธR-น;๓A๛วี๏๓ธFฑTฝ๖œ_tด&นFต†HตNหQ {:ง ๔8ZHฉธE,Kคพฆ2fcฤา ึIๆน้ตข8'€TํฏjะงฝiQŽฉdŽฉTNฅKฯ"ฮ฿qไqN|FŸxlkทP|(.คaีBpแผ|นqฅฑWส!Y|บ้๗d\ุ๓a๚m๖ฟ5,.„7ะโรศ…ฌณOไBsx0rก๕ศ…jนะ๙ฆ–นpZ>œ† ม$.sๅkBฝV‹>ฉA\ญ๋|ญิ'f9:ส=/ฎzๆB s๑กsก๓!u็… ๙^9๖\H#นuไร<ฝย…นณ{:ฎsแ๚๒a็ป๏5‘ มึ*ฮš ]Lmhก?pอ+.ฌฤนใr!kฆŸš!ฎ๚n ๓ฯ๐š_|ฌๆ[ยฒคgGq>็ฯื฿ีณิvOw+Šs5k+ย<ื™ำ- s ทผโAค็uน็ ฝ8/ึKœ๛x7]Pจ ๔„Yฤ๙ว๖?ฒ๛๔GW๑โ]๖žWœงถ Kเึ„๛&œŸpXุ็ด„O‘ุ„špœซ๖5q{-ญ}ฦม(T7ธ8w<%ภiฝๆ)ฅำw฿]ุ‹ใฃp†ิิจt)—@%ะฤูม1R๗`5‡#@%`%U0จŽํ›4@โ๙gษ๑๔~>Xวจ9฿ตภSฌรWƒ฿ ฦใ๓>T๊๛MLA๕tำiา:kฝๆEื/:่.ฮ๙.จแ•ๆ9G็RJ)มฅปuœAwjOวา ฿iRŸ›๋3 J้พ–คrา‰@๔'ฯšMœอัวuŸŒ*ฮ=พ‰๓…ไCu“ž•ง็ฦ…y[ไC–”nhlšsกฆฅsฎTwรๅฑje์Z/ฮ้ไพ์{q>ี.๛^aŸ“พj๗›8_ˆ่๚ฃ๚]น็€4Rฅ%๔ม(W้‹0Wz^FI#๘@Œ•Hƒ๑:)ะ้ƒR5ŠโัšyZ7ม(NโgGม™Fไ™ฝๅผT52rWœ็ณฟถ H•6้ฎ‘ p&k๋SPW๘ดO P#ุ‡`”๗ญ†x~>8 ๔I๚ค&qต๙ฟ๎ O)"iิ𠀕ŽฤŒ าwส๔Cไค๏Tnฒ• 4ฮ<6จฬฮฎ๗๚ธEโQทI*'nัฯ<ฝ[๚3งฯ$ฮ฿น๘Q’ำฮจโSOhsฮZœฯส‡.ฮ็ไCใยศ‡=jฆ9|่\จ฿YไB~cโCJฐJ Šล…}ฃถย‡c\Xฒ‘"r,9ๅฮ…ฮw๗.ิkNโB๕c=rก`› 'ฬCtoฏอ?โ<๒!bZ#ำ๘๐=ŠWแB–p่นL 3>œZ„%>Œ\˜ฆใ;ฎ/"ฮŸพ็^น4qพp\่kŸ[kiดฏ5pัฎ ‚<:สKm|™&ำศ1ฤ7"๛c฿_}๊#Ž‹ฮRโ›ลIจ#ฮูจQœŽฉr>ณนนp—ำ^หvYฅqาJบผ7l๓Fxq~xฌอW ๙ค\œวZ๑A‡v.”ศลณบk‚ผใๆ–๖7๎เฉํa|š๊ทพq~ษืืvh/เฒHwy_žภhต,๎MœgW>Š๓"ฐง2nฌึ‹8ไกวvŸ?์QU๊๎eงwซฅเ%แฯIxQย_‡}ฮM8ม๎1แQaŸ๗%ผ,ˆ๓หK<ํฤ๙z~๚?Oส˜Eœซแฬ\มhUฌ R H Lqฮ}ึซDบงชห5"P•#ไA’ถ• Rม(Ai™ท‡ภŒ R ์”Rูู‰ ืบตฯy.Aก‚@ฃJๅฌ‹ต SŽ฿ฮkิŸใฯีk๚๋+วาธฃšใT J็K๋ฌu1Žณ‹=้œ>N( tค|'4&J5B๏‘jxฝžWฮŽQฺ_มจืXๆ&Ji฿นFๅ4ฯR[ฎZsคฌ”J๘๓[เุฆ/;c6qฎG-ฮวจแำOlโ|!๙‘Oณ๒aๆย"ฬงๅรพฑaแรฬ…e9เCqกJzA‡ ๙}‰๛|ธJ{๎ฬ€ ฝžt.ไ๛/>ฌqก  •ึะฬนไB%|่\่ผ9L##‚ึ.>บจŽ\8‰๏ฆแษiนPi๙‘ ใลอ˜&?ึpnฅป;ฮว‡“๚qฤqqฤZูฮ<_“,ยwษะ่uใB๑!Nwฯ…ฅ!\ไร<= n8pZ.:ฮRs~ฺ{NไB๐ศš8_h็|}o++ธ€๑ๆ๎ฉฤ9ขQ๎0๋็J๗ัh.š%ŽอเZ ~ัฎnํžึิี๑-p\o็ฏ๏ฎปฯPWš|ๆ“ๆŽ{น8wzsหWšะ๖ฯXuใฑz[๐นFม๎Mเzq-ื\๋๊ิnณบฮธ๖Qฃฟะ™=žฃ7|ฉu็ƒ†pE@ซๆ\sอ]t Ju—0ฯ]ฺyฌฬ:ฯวAŒงc๖๕ๆฅI\ญๆ๗$q tu˜บห,โณว<บ๛าโวV๑า‡์ณQาฺำํ™ Ÿ›ใ5๖K๘^็๘ำ~}ี •ฮ๑>่ โ<ŸlฃงˆุF'W๖|(8ฅR้๊>1.๑ลน5?’ณ#งHฮ‰r่,’๚่คx˜z Zแ,8*ะไุ@๕š๋ =_จฌ8ธฟgwhืาO๛L‚ุ0ฎVแimคื]ฦQy๎้๛ภwC)์ฯ:โฦQงJ฿3O=ฮฉพทIDลfq}“คRณ™๋ษSฐZK}ฯ)๎g=wm*gz ‚TFชแฦฮ"ฮ฿“ฤนj5#>ด'4qพ‰๘pj.… % แ8๑กsกาฺ๙ฮ;Aึs ;ไโรš8/]ฺ=“Fcะฤ โ,ึลcฮ…พ=บืฮ‡โBํน0Šq็ฑ ษ…zศ…~j\8ฑ๔ง›ภ…‘ฝฬ ’^m˜)>œไž;Š?ำ*R&a\ศ๗าท๕5๊ๅยRญyฆบนOห…ฝ87.\_>”8Ÿฤ…เ‘;ํะฤ๙&เBื.ฮ] {1เๆอว๔|นบ^;s,qฎ‘c^4.ถๆ•หัฦG|ScŽ[Xwqฮqฎ๎ํQ\{#9ลนืŸ{บRฺuŽDnIฅ{tฬง็rฝkท&ะ]ฬวฎ์~ŒฑnEd‰๔8ใ< ๗ส๓(สใ2ขฏ9iํŒRปโขตฮyฉ9Gœห!—๏๓’สž๏#ฮๆ4‘c6:ูใ.ฮหxตนฤy-e},Nเข‚7‡c{/ณˆ๓ฯ?๊1<๚๘*~ํaS‰๓{'\–ฐฟ5„;<์szh๗๐๘‡~9ls๑;์ำฤ๙Bํ>=#ฏฟ๎๔\? ๆ HหศŸพฎ-ŠswƒJ ž…ผŒฮนฤ„เtบ8W@ไ๕}ฌ๓๘šแุ‚-09& โดนืใuฐZW๐*1๏ตฺ฿ƒEm๗T’k๐๑Eq_uTฎฌ N FษP'ฤUo๊ฎQM ฯ™๊ำุ0ษ›#•ัwcฉ7ˆs๗\ แผๆาSUƒฎ๏›พ_r๙%แฃTอ์ ช1แG~iะIฎQฎร$M฿qฮ่ ‚RR:qูำo฿ฟีฏ<5–f็๏}ฑƒ1DŽ/œ|ยผโ<–๐ฅ„$|?แทห๖]>ŸpIY๎าษ\่|่\จqS.๔ฮูฦ…ฝ8\่|Xพหcฮนsกœs๘ฮน0๒กธ0\จิH‰s็๑ผRใBw˜ #j'7๊ต#๚นฯ็ •ฤlขz9P7ฉQ ฑAœ๘ะนP<่\ˆpื>ˆs็Bพ3ฮ‡… แตž ๙ธฌ ๔ผยKP33oงŽ็็iD2โYฮ6๋rอใเฃ^ีรso''\‚kสๅlื๑ต nw๓•พ.h›บปGa๎โ๋่•า^็ส(VNH]ืใž๊ฎt๗่บO๊ธ>pยฃ 7Qฎt๗>ๅ=คปG?^8ˆ Ts7 “ ฮy็9ตย/ fœg!^ Gฝwอ“˜ฯข\s!‹๕2๗< t็ผ~5Uฝ4xซผcป7†›ี9?๏๘วu_y U๚พ๛M;Jn์—ฎํฏ)~ tkGฉฝฃ<~กื›งV$์Ž๙ฒ/5็ ไš8_อ/?ฑHนฺํโœ๛ŒŽคjฬb็aนE%๘์RอmUŠปืXˆส5JAJd\ zอŸืXฒญธช}T—aนD เิi๑*ท'}พฏxจzภฉ็p‚@ีน+Pิc@ขูทันถรฯูTภzŽฟO๛๔ิฮ(ะ็u๔๗๐ ิƒั(ฮใsฅtฦšKŸํซ”Nwาต”8'e๗ัEพึฌช›:มfฎ.5้9Mณtkฯ†2ˆq@Sฮๅ๗‘ง”’โI ส๓f็๏;แุ~wฤO™Jœ๏eณ)ท/D|Xย[5ื’eย[Z@Z็Bxะ๙ะนฐ*ฮๅZึธP|่\จ%|่\่|(aฎดv๑ก~gVW>hgใำ$ฮล…โ*๑’s!|ม~‘ ล_.ๆล{J๗mj8นะๅ\นo.Œ|่ย=rกฮ+raฬ ชฅนG.ห(Š+ '‰๓X..TณLอ?๎ymšEแร"ฺล‡ ล‡=‚าฤM|8เยrQsZ.ฬข?๑กsแ๚๒กฤ๙$.Sˆ๓ฦ…3บใ๓AทUApG.ฑ่]รๅšk ˜–j‡จEไส5ืผrฑ่–8GŸsม•=ไžณ?O๎ธ„นเข]๛)…=6}“@ืvฅฏป`i์“P‹&พข‰oใแZ ๙ฺ7–Œ5‚ซ o‰usส{w‘Z:นปH๗ฑiŽxgฅอ=็yUqฎQj็jงv่rกธ่Y qžyฃ–QŽ๋ตๆฝ๓‹^ฤy9D๙ภiท๑jžฺ>‹8า ๏พ๚ฤ'T๑๋๛๏?•8฿าฑE‰๓ีฟJ()jช#9•๑Q\Qื;@Z0(U“/– R *…9Šsu#.ฎx?FอSำRRำ5ืVมš Yน+n~gŸ~ฉ€%ม z๎ฦธซฎเTวำs5˜นภSฃ›W฿rv<ท=ฮs:็xพ,yว@5ึืา๕kต—c#†โkฮ•`โ|Poฉ`4ึZบ[dAh฿— ธAฝ˜กBฏGะห1ำ๗O)œ}7แ’ๅแhv‡๘๎~์ลฃฦG๋8\๕้<!G€;‹8ษŽk๘า้_็ด๖t๛XยI^;T‚ึต€ดฮ…็น`เB๑แ€ ๙8†N์ฝ๐ยqโCqก๓dไBพ๐!ฟ็B ฤภ…ใi_q!ยSโY|่\ภ5.”นPขนPวŒ\(>š– '๑แ\\จใ;๊=G.t>Œๆb_Žุ}b6Qญซป.TŠ MœWปทO็‘ ล‡w.,c๘tQT\จ๒›9Wฦ›ี›g.d\Xพ๋๋ย‡™OK3Mqแ๚๒!โŒ}๖˜ศ…เศ]ึ-ญฝqแ์โ<6o๓๔ๅ(ฬ} \,บx๔๙šแhE์ส5Wvอ,ืr‰sRู็>$ฬY๚(ต(ฮๅ” Qœซ6=vgwqพ$ta‚ำฺc๛ฒเข๛๘4ฟPQ‘Gจ]>ืฺถIฮ๙Xํธอ๏ก‘jEฌ๗ ๏๙๊ะฤนฤธปB฿ฉ==wpl็Œ๓%฿‰sšม™8๏ลธน฿WZ;โ< ๓าhฮrแ–ท๛็0ฦy็ ะ๎หO}b๗Ÿ'>นŠ—t@็[ZZ๛๊Wœ”"5ร๊ะrๅ<kโ\ฉn‡ศๆ˜๗้›@๎น  –ก  Jqฟ๒ฌต3~Sคyด9(๒tB-Kฐป Gwˆ%ใ’›‹mถ)x$๐sั…ฝ vgข?ผแ์๎ขŸžืy-›๛ร5,]๕ฎ~้`ฯค๊ธQ ณd?๖ฉeฤดฮX‡^ซEฏ:FžZซฎ๚ˆฦฺsคj Gซแ๚H€วNล|/ไŒ+๕]n:ฯ! ี๗ ฝ.วฬ ’€’@GH๑Tะุ‹๓จฦ™่ำrญ%Aฏ]ศšEœ“้็oGว3วN_›k\Fฅ9วOvHธ14.์ปช{ณei_็B็C็B–j'.”ภ–่† yL|นD.Œ|จ๛‘ ฃ`ฏ]ฌd=rก๓aLฯŸซ/‡:ำWkะใ…J็ร˜MคF™ฑI&|่\่ฮนs!๋–)4ธ๐อ๓า1 ณร.>4.Dˆ{ั,\(๗นp}๙qŒ}๗˜ศ…เจ]w`ว—Lร‡ ืƒ'qO‰žT[<ษูuฑ้‚\๓ฬฯว5Wวt„5B!qŽC.qม๏^‘—vPJm็8๎œซvฟ้‚€;็žพพdŽ๎๑ช;—@_bzฺ่๎ฃิ3‰.|ฺ๒๐XL‰๗uO‡คฒ{น„7"4บลE˜fจY›๊ฉำ}?F็ซ&ิผฏra^œ{็}ฝ๙UงดKœKŒ[J๛@”ณOY๒<5~หฮyšึˆ.ฟวyœ๒‰๎yE ฯ"ฮฟz๒“ปo>ฉUึ!6qพฅ6„๋kฦH๓-อa๘z0ห”ิค”8Qีš+ๅ*พถFพจฉ‡ฅ Dน๊้ดŽˆืผ๓ปF)…JฝHtว"ญ+ๅP+–า. ฬ$0ุษY!ะ๓€4z^ ้(๗ูŸภ‘ใ^|ใปs@ส’ WฏปHฺW=ืทq(ฐb]&ฟ ภR|c ฉ7M๒ฦH^Uƒ>่Z์ณ.ฬ}ฮฏฅถu)๖ZKฅ^_้แ|ฐƒว–๏JvSฐส๗RไL.-๛๏} LืปQอ๘ญ””ัYำฺแฉG๗”"พฬใงvฮำmป„N๘ูrฟคS๒ ็ย‡.Dl๖\จฮุโCๆโCqกjศC๐ุEJukื…*๑a๙~๋๗ู๓กqก.Pชฟa๕เF.„oเจศ…โฎศ…โพศ….ฬ แญŠื"Š"๊๑ศ….าใลU]„ˆ\่MBcืw็C}ึโC็ยฑฉ๎š;:สีฎqกฆXธ@)uงฦ‡์_๘ะน0_(|นP|8+ๆ๒ฤฮ…ณคต#ฮ'q!@œ7.ฐ\X่Tqvu์ ๎#พ\ šeฎดnญซK;โVอฺิ  ]็rฯ%ฮ] {๗v to,็ฺ]œKผปุŽยวง j ๓8\K[Rใsีแปจ_Zi(ทาโkโ| †kฉ์E|k.๗๎ฆWปฏม.๎บs5…[5ก)เ"](จQCH‰sอ9ทิ๕&ฬ{h์Z™gžล95ๆ๛'ŒฤyHYื๛^ฏธะ…พีฯ"ฮฟ–D๘ทฮ8ฑŠ฿zฤAMœoฉโjฝฬชsอฝิว3…โ อ=๗tw?+>4.ไœฤ‡™ )ว(\8hฦKฎ:ฎ/"ฮฯ๗‰\Žm๛ฎqแฦ็Qค๛Œ์˜ ฟชRg~9ฟ>ร|YจฟV8u@—HFL#ฦๅ€;ไŒ#พฃ@Wc8 xฅนKŒว้ๆjวRBpๅ.ฮ•ลe•ฮํš฿ฎด๗Z9 ๏eๅq]ธXRัถ4ิฎkyฝck๋ศ] ;$ฮ‹`žกN]โ\}ผใ|ต1ฮ‰๓(ฮ}_oล๙%_๏๓ชc^เ3ะ5]ฃิผ;ป„z|orปื[Syว๙’e0‹8ฏgžุ}็gOฎโๅGฤ๙–]KLฝI’บO๊`\ญ=ฃ„ผ๖kะ%ฮีI]œ#X”ฮ)๗Piฟž).งHiม%๋‚sเ=๑J5( ช๔ฝอ฿ู$ผb—โ™ึw>/ฅณˆ๓:๙จ๎†—œTลWŸs4 แ่ถ๙ ถฟ-4Azk H็็รฮล‡.,<ุ7„+\ศoก็C็ยว›€หV–gฺ้?้คฏ๏)ฺ11jาiฆU8jœำ&1ฮณ&šV“h4N1qQAัดั8ด#DA'ิจ!Š žsฤ*"9œ# ‚ิฟ๎uพ{ํงพฝj๏ฺป๖ูใ๚ฎ๋ฝVีชUใฎz๖๓ฌ๗}Ÿ7…zk,ˆ s๑฿ทB“ญxXc!˜๖ิXจ๘ฎฑP<ฬต่N,—‚… E… ๖ น๗ึ‡…โaŸH' ๛ฬโฦJมรIฃีj,,x8†…5Š…Yฎ0ื—@หหว1…ผf๑p ็ๅ{+ฎ$.็๗kฤ๙$,$็ฎช]ต๛ฤyŠ๔4ฒฤฺ๒kวคe฿ต1๋p3ๆม)ฬอ˜ื๒v:}็sMแ๐—ฬนยฒv/;"็๗ฒฑku&ฝvkฏวชM2“K1Ÿใู๒~๖ฟ{!GมyK่ำdŽฯฌ๙คŒu=F-ลv]าeาณ๔Q๎cœZŸ0฿ื๓ZผO'ฮ‹ป๙Xๆผณ๖ t๚ฮ‹H๏f›gึฬe$Iuฎ5ูข่=‡0A0 G:่Zๆ ๙ิœ"ขC1,{ฟ!ั}๛ฃO|๗mฃฯ^๘ึ– žzั[[™โ\ฎฉQiYจยš€ ๚˜\f๋ษ‰ฅว๓|๎ๅ}‚]แ_—~r?๏mํ๕,{—”Jฬk.)]ศฝX3คŽ”๖UKqžยผš๑ ๕ฺษl‘ฝฮe™ฏูDข>ฆฬ…Fจ๐บ,%œ€jณกœ8โนp2&ซ^ๆ๘ฮBH1@โ๗3ซ!ป๎~๓ัวQo|On;8ฟcฃ2ึโK%Ÿq&N)ใƒุ^g ค‹ใaba;6oนxXศ๏ฅรCพ๗|‡๛0ฐ›๛‰…๖’๓M,4kฎ อšง บX๖‡เ˜#‚wŠ๓ฤBฐOAžX‡5๒xโ\…;'`แคจ๑P,๔„e…D๚9$fUQ…โabแ˜9_฿HตIY๔าs>ฯ$ณ1™x่Iษ4 ฬส ณ์้Rฐ๏Œx(ฺฦ&ฎ(ย#f4„ปo^o"าขโ|ภย4ˆK1ี‰๔&–บ๋™-฿U‰Uฤp–œ๋ฬฎ7^ tKืunwนข’vGช)ศsœšณหศ)˜3jั'ฬ๓ุฑ–ู๕|Ž<)‘}พฦ|†Fv<น‘MW w%ํE”็ตyณฮ๛ๆ˜g?z ๕4„๓พ!ฮำ0Kวf›็จ6žง”ต๗ŽRซบๅํ]y่–ฑท‚<‚๛ถAะk^L฿< ะeิํ7_ q๎‰Œ3็g้F;์žฝ๑ิ[d็›]œwๅพฤะWูฤฒฤ9g๏้ฆ2 ม Žฃ•D„-ไ„n๔ZBhฒA‚0ีnบIFShฺknๆ\B D˜ณ…LBเ>uม๋Wgƒjs"ห1ShCFy,›๘ุ๙ooฏCN Iฆโ๕ๅ๗’Rc{dŽฬT๙\ถ์=]ŽSœ')]ฬอqkV,t=—fฯ๛\Š $จ=ฃ„บ๏ŒYฃฺ. ฒคำใฒ3หC|๗x )nIผV^cูhๅ€จ61หrามLโ7ํ{โ๖ฦ๚{Kvk฿Œฑฺx˜fYหฦรฤB~#ๅw0 ลภŒฤรภB+†ˆz>wžฌ ๙m#@๓Xˆ€ฏj,[ฬŽ‹… เ>,๔ฤฃ๘&๒xlk,ฏ&aa†ท{นฦB๑ฐฦBK๖k,ฬ“•“z๚ฟG,รร k<ฬ“•ฐP<ซ(๊รนฤรฤย<ฉ™Xศ1โab!๗ง"ƒF —‹‡ญ8ญ๋MฤBb1q>`แสŠ๓1aยnq^ sฤefณๆˆ๋บ<]ž<&ลyŠx๛FชeฆZั\‹๕“Dyeฯว์;Nq'& ?‹์ฯศy3พ6บcิๅ๓fืๅ๎U&ฝท=Kู๓ธาsฮc๎ฝข.{zไ nŒZีฃิŒy็ัwฮ6Rœ#ๆๅ8ด—๖N #ฮ้Aoฤyn/ }็๓๚Œโฌ?ปว่์Gป7žvศ๏ โ|ณ‹sKบฒ7g™7ไt^)g1ƒ[rึž์ค็X3 o ฉิ๐v๚ไ ต˜+฿ืชณ„โl‘Yขt6็:N2 ‰ƒผq€ฬy<„ฎ๎'ฯฬLŸ0๗qๅRสๆอฮKP่OHZฝž‘b=3K™…—ผBฎณฌ3{1ำ)9๛H๋t‹="Aต฿า๔yNล้^lๆHืbŒ๋ฌQ~_ฬ๕Mษจ'} œูcฉ0ฏ!~อวบ๗ันใ์ฯw BJ{โซ็;พœ~ๅYฤ๙ป๏s๓vฌW_|แแทฤ๙jใaีฑ\<รB็”ƒ‡‰…)ะ  ๙ญีxhฅKโaฮ O<ไ2X&€M5‚'fอ ๋,ต™๊:cžX(%Ї}X(ๆๅพ,|MฏฑฐvyWค'ๆถ’N๎โabแึS,ฤร ลรž ๚<,ฬjขฤBพOโaba97œŒา<^b!ฏฏรร ๛ฺ7–Š…หลรVœ๖๕&b!q๐๕qพโผฮบ๖ t3ฃ“ึ๙แศžโaฉutšขกm™zfฯ๊fึrฌŽํi$็๓ZœณMฑM œ๋}:ทืp3฿™๗z_vศ“fฦผFท๕Œw็=†ะa“ ็–~vวฉฅ@ฎ{ฟ๋“.๓ฒ้=ๅ๏}โฝ6…ใ๑/‰พ๗ฮ1พสโwY๓ž‘ecsฮษš—‘jYถ>fW—ฑ[ส^y'ะ›เฑ๓’1o…y1ฬ|ขyผYฤ๙๖ฟธ๗่ซพ_o<ึ7ฤ๙f็ํภจๅtkt4AธL%ฮ9+/Q Xฦg‰{5Oฅ7ณฝ_CjศXค๓0[ |,TdBฐR˜บ…ดA ฮ eณ1ˆ]ลkfŠ,ื42C”Bb •„J@%ฉ–า›%ฒ์2C"›Yง$ฌu6ษใy}fIตaR’ำฬžืN๎’8?g ๆW 3* kK>-qท฿ฒ'ิ๗แ๏]›dI6%ฃŽ๊ณl3{ฮ?•๛่`วL™5็y(๑,L2š#—c7‹8?๖พทhGz๕ลiผ ฮWวฐะ1iหภรฤยฎ บเa‡…Žฌฑฐ>QYฐP#L…ธXXใกXh{Oลย๗Ÿ๗ฮฮ#ฑะ๖˜ ณ_ผฦBขฦB.‹‡`•X๘มoฝฃ่‰ qฑ/q‘่รBbZ,$๚ฐ0zbก%๎ ๕ฤB๑p์๓‚‡cX˜็ำ`กฝ็šฟี™๓Z sL๖ฅ‹…ฑ ฏ <ฌฐP<œ —‹‡ˆ๓4โ|๒ ฮW ็ ๔ฤ9ูบZkŸyฮ2Gp+ฮ๋า๖<&๗)ไuiOq›FpŠcDm:ลืู๑โuyzŠ๏บ<=๗ฅ๗yฉž=๐9“ฝไต1ž•๖า๛^xMผvฤนฮํŠd๛ภ๋๑fuOzF'ึ๛ขสชwฝTZ๐\บ๔๙฿Š๓"๒๛V;Rํป_™+kWpqž๔N˜+โๅfuk7Š˜๓ืz๛วg1‹›Eœ๏xิ}G_{ฬzใทนู ฮ7uๆ‡bฒDq๖ผ54Šq+K*ๅtค‹=ใลฌอ8็ีQZ’O# ไ$คอํ›snฏ=ๆ๖Bคrž/ฤ "–ฦEK37–g™xŽ๚I2Z๗ŠKdอ๊H ใตศ†B‚!ค์ฯ๛df‰ื$ฉ๕r>Vf˜xLH.—%ิš$ู“g–ไ(6HiบงI\=ทž›}—cฃ„ ฆ^ฏ๛ฯ ฟ”่๖eะ๙.@Hu)ถœ3ณEf‚ฅฦuฒ%ซ=Fqc†sอ๖ป^ธ;รฏJœwผ*โฎxึ{ใดGa็ซŒ‡cXXFค-วฐะ฿…S ฤB]ป‹ืฦŠ…ฮฤรฤB~ร~ อ”ง0 ษ‰…Y'ฬw๖`ažtL,ฬ“Œ‰…'์ฺ‡}Xจˆฏฑ0+ณ้\ฏฑฐ6s4e–ไ'ึsาำ ฎฮข็่ษถ—ปฦB๑ฐ ลCW9Iณ &f๖<ฑ0q/ฑ0E{baฮ๕baณ`แLโฦฟ< ‰ƒๅ?โ|•ฐpRvuนฅํธถ็lpnฮ47knนzŠ๓zvนFo™QN!Kไ>…sบฒs=หํำ=_k_๏x]ฆ^gว}ณโž€ศ“ ๔์ณ๗ถ|~ี"วเ๙u–็„Ÿทฮํ—D่จŸsะำol|^VM\e๒๗#9ฎ๛ะล฿็้2๒•ศo๏:Nญ'ƒŽ n็ž›A/ู๓nkPยŽทOŒ9NํŠr๚ห›-Y๓n\ํR_•ซฤ9—WKœ๙q}๓‰้g๎ๆƒ8฿์โผ5Ё„๖ˆsวฎ,™”6๘;๒Q๚๋ZRJ๏›„ด" m6ฉธูvd2ำgžC)5หมeg๏ๆ|]ƒ๋PHdR™Žส?Yถูื#™Y d๖ZฒzนGท„ิ“ต 'nฝœไT‚{9GwูYปป+ะ%ง๔9”s๙์,ณ๗ tึ์๙‚๎ตIR๛Ÿ„ydด๛๛็ˆ5ห4ํ›”Œฺw๎˜*…;ใซ˜-HwK่f\Žm 8–LูNฒฃed–3ญท๓ญ'าvฎ/&ŠM\๙œ{ด1ณ8๐!cี๑ลฟบใ ฮW +q> ๆไฑ‘> &Ža!ฟง‚‡‰…‰‡‰…ˆ๓ลย๛ฐจฑ0ุk,L๗žณ‰๓?ึ๕jึ๑ลว฿y็ซ]ึ^cกx8 ๊ผ๎hด‚…๒H,Dฐ‰‡‰…‡฿h ฑฐnS knMbกFpYu“"พฦBลy…ูฟžX(^ีXˆ@็z&ๆe๑3…บe๒5j8g/|…Dba_ฝฦรฤBท๕๓y%๎‰…‰‡…&ๆ๓ฤย1™X˜x˜X๖‰‡‰…œlk,,',{ฑฐ‰yXX‰sฐp&q~ำ_™ˆ…ฤมืOƒ8_ๅฒ๖Vำ๖2ล9‚qซ˜ติ[qฺ—5G ง Msท4Iห^r…ฐB๑jษ7กจ6Kฮพz{fฯwUูtK๒ณผ=ฟBฝฏŸ<ณ็fณŒฝžใ๎g‘ŸI~}โœเ}˜=W ๏)ยผ๋)าู๖ฉ๓8ใฺ๒หVฯ<ฯ>tรlบัo„pWbฎ(V,+žฦˆsDw๖ g {๔–ท๊Šryˆrฺวfœณ฿qjอ>วปตทy|y]@/ข]็๗>qฮmณˆ๓ฏํรFปž๖๐x๖žJœ7๋๎M|ฝ‰s'9š?~๒ีๅvฆ\ท}ซ‰e๊ล้ฑ:M|ผLฟ`๛‹ƒ8_a๓ฃฎœjRœ[)ukbึีšฺhh.ตm&šnต๖Tฆ๋ท3cฃ”SBšNปบํฒ…\Yบ(…ดA๐ฬิhzd‰&วฅC{f “Œf้ฆฤPrศใC>!žฤั฿ุŸ!"๕อฃป์นฤ2๏“๗ใ2Dิ๋’Zษฉู'M็์Kฯ>ฬฬี'!ฒคำัB้`lYg–sึ๎ํfฯ็๕Ÿ'!M“ค-TŸiท–๕jŽ•B๖SZฎiฆศ€€Fฆจ7 ูค_ธ‰ฎ“วึpษ,R#ธธญ^6๛๏‡บ+uOB e?ฤ”๑AัYล๙qปU๛X}q๚qพ๊x˜XˆY<์Fง5x8†…`^…‡c™๒ ‹ศใทว๏R,ฌวลB+‚ˆ ํ๏ฎฑะ*› ลCฑฆฎฤ5…x5*โk,k,๔~KลB"ฑ03่‰…\ -qฯJข‰ีD๑?ฌฦยn๎ybขm โabแค‘j์ฤCฑ0๑0ฐly‡‡5๒}็…m…Qƒ{๓ฐI/อฤB\—/ฮฏ? ‰C~u็ซ…fNkง๏ฬx.w!pSœƒm9wfŠณ‡ŽIC”ฆ(ฏหึ ลซ๐]!ธ9Y`Y;—3R ๏ชข/“n|fๆ=Q๎๋™9Oƒทz|œ๏?+็9[๗”=…นโัe๏๎gห}ฮฯ†วฺSŒๆœUCณoปo‹hqkšฦuฮํอm)ฬ[ซ8กŠ๎po KูKฦ|ฌว\qžsอS`ง9โผTtยqnv_โผ+™ว๙=ๆฆฯ*ฮฟ๖ิ9:๏™์g!‹Š๓fl็‚ญM|›ธiu ฃ&O."ถM|ก็ฅ็q_ฎะgฤQ›Zœ7๋ฑ!‰ƒ:huJ8ฤ?dK(sใŸq3‹sJ๔ ˜’BDวฎsู’?ห$2ื๎Ÿ'๋่ศโฬ†f‹์7yดว๑ญ_;บฝqƒฐAไ์ํฮฑ@9{W’—.ยfอ3+D@<฿๐•๑ๆฏฮ]& จŽฏ‹n๖ณ/รc’] ฉY%หๅำๅุ์QฮฮK{๒c์LxH>Ÿoš"IJu…ฮy๓๓2่-%ฅŽrคžโ#ว ALu'ถ2 jงbย,[ฌๆq%›Ž8๏ฒ็fฮนcJV…™๓Fˆตฯฯ๋iŽCœต^ <ฤ•rxฤAf๕˜GtAEœฟ็แท่ศ๛๕ฦOพห–็k†‡‰…+„‡ํ๗[๋ฯG฿9์Qฝ๑œ?ธ๕4โvM|4ฎJTวผก‰‡ลuฒ์ื_Dœ็1ื็๚9_้’vˆgี๙็ผาโ QฮฝN[หตŸœู๋‹=HŽข<ำIษu•dหRNศšeๅ9g๙Bึ n–š›%ส๖์งฬั>:Kอ~C%กฏ?๛่ัซv=๚๛ํ๛ท์gทe๖+y๕2ZฌKPอ@ีๅž–ˆฒ’˜&!ต’@7cณj)ึ!๙iˆ”คิlQฮVหiˆิ–Žๆฤyฮป๏็dŒๆบถ๋Tlถ(ณBdƒ$ž๖–!฿fฬ๙๓f ้dk๖ผv?฿yพ๋๔W6ณ}^ทKR!ต๖}ฺฃษm”8;fซน<“8‹฿่๏ะg<ํ‡ฬ๙jใab!d%ฤy)]รB)ฮฏๅั๘eฐ฿žฤยฤร ลร ตบœ'*^งลยฌ่I,ณภฒ ๓ไeVy|…‰‡ำ`aโažฐd‰‡5ฒ4ม -sลยฤรฤยyโ<ฑP< ,ฌ๑plไจx˜Xhi{žฌL,$ [ผ+,์๐0ฑLใไTƒ‡5ถ•$เa…‡฿G`แr๑ฐ็7ี‰XH๒๋yศœฏัœ๓V„ฐY๑จ0ฏวง)ฬ–ฅํfฮณ_ฌy:ง+Žน ažโ™-ฏ!jงนฝฐฉง7ซž™๔ฺล=ซ๊าuณฺ้๎๑–๗›9Oq๎็๕>–•ฮc็ซއ‰…~GfมCฟ๗โ!฿}ง่ฑม๏#ฐ฿ัŽ๋ ,ไ6๑0ฑP,๔DeœศZถ8ฟลฏMฤBb็k8Jอไฒ็<„ขโฌqําฎ0MQšณห-gทคo3ใูฯฮ๕sC@๓ˆO…x=nฬศฬz–‚gู{fะ็ูฎุฮ,นืoฦ\qn%A9ฏ๑๊๊M่8a…ยนap็ึ™sM๊Œ‘ใำJษz…จถd4 ฉแผj…ปdฒQgZห้ํํsุI๙ง™s้On~?3‰๓ว~/gOœy่โ|ตลy`a+pfลCอฤรฤBซHฬฆ_ณ7ไ๏ซ=ฅูb๓;k,k,k,ด|ฝฦB๑ฐฮœkWc!™oซ† น žีX๘ย3๖ใa…ง ลร ลร\–๎ืXX‹๓ Y๕ษJ๑0ฑPO๐pž0๕`a‡bกีD‰‡๖š‹‡‰…:ฎ็่4zโaœภlt1€๋ยส๑0ฑd[ca9? ไ…หลรVœ฿๒ื'b!qศoโ ฮW ๋™ุณ–ถ#๒ฒไฌrfฬkq~์๖ ฦJู๋ฌy๖›ง›๛ฬ"+Tณ|ฝ๎ทๆh—๔๔g[n‰x–ธื.๔Šํ,aW ปฏ.ษท ฟ็D=็ว๔sะ.Ex๖ุg(ฦ}oฌผฌx๗ฤCšฬ)๊=กัŠsLแ4o ืuKูฑ๖”qnฮ>๏สฺkืtKไร1=๛ปณฬ}^&=„zฮ6Ÿ'ฮฃl=O&d‰˜x/ฏ3Ot๗ษื')f็฿>โ๑ฃ‹ŽzRo<๏žท?เeํีq/hโ้CY๛j‹sˆฑBฒ˜ )f–ผ์-gtš„าAig–rZฺ๔ฎ$P2Z2Ff,้ฬฌยผ6>‚\ฅ‰‘e›lupwvฏไฌ™–™ขt๎๋Oย$K }ษ™๛ใ๐ำ=_=๗ดcZ้La.%ธ/‘ไ4ณH™=ส•JiTทุ"ร–๎๖R{XB๚ณŒ3O”dlk‡X‡Œjdฅฉ†p–n*ฬอูc™ๅœฅ฿ฒ- Nn?eŠ๓า_>/s.%๛cค%œE`ท c‹Y"rช๛;ม>‰,#†JOบ†qณˆ๓ใ๚sใŠช8๓นwฤ๙ZT‰…Vd. -[ [.VX8†‡fฯKปx˜XจQx˜Xhึผ เš`&ึ#ำ๊ฬy…šพ๕a!O,มร mš ๓D@ฝ๔๕X *ฮsา‡Fq‰…ถ€‡‰…nตX˜ฝ็ivช!\โกXจI[vx8){žXจ0ฏฑะt+D ลม [<ิ=ฑฐ/ฑpนxุŠ๓ƒ}"‡4ˆ๓ีฤB]ปอ&๋าฝy็Xฤจๅ์้Pžๅ์Yยฎœ}้fฮ3ใl๖<หฺugทด;…นแo฿cf„3ณž"~w”ƒ+ฮyณๆูKOไL๖tkOAญ๘ๅ๓๑>ตk}_ฦ<ห๚yฟ>N}BDใบZผ/ด๘ฌู๊๏<–๗~.s{ษŽ‘ฮ๏f<๗C9ซ1๚zฮsคูXนxใ๖ผg้Ÿ$ฮว\'eฮ}L่Rœ—า๖nœZ8พ฿sraVq—>aด๛O๎ร๏u‡iฤ๙ฯ5ฑซ‰…!อชc๎UยV๖M‡ธ|*ฮ๏ๅ๚+*Cธ—โ@ˆs๚ษ,s+ek]I็rล9„Riถิ์9โฬYUศส5‘1$๖ฅ8‡ˆ:ฮ†์nบQ| TJ3เšบIF๐9วw{e~$ตt3GคAj"*นd?$B =๔ วŒž/วŒ๛™มunณ๗Rj@X-ญ‰)[‰i฿ฒดิ~าiลนžฯล1kŽRจ;Nศ“"DkขUฦ eฯe'ฮ5ตา ‰0{จ;ฑ#ƒjq^bฌo7วซUdด%ข†„ิ,บ฿eMŒ$ฃfŠBlทท•ว๋๓„์JFหจ-'ะ 9“8โปQEuœ๙‚ปโ|ตลyb!BI<œ ล4Y`a+ฮ vYุ0ว ลรฤยฤรฤB0ฏฦรลฐ0Gฆ%š5ฏฑ0ซˆฤDล3๛k,|โ็๚ฑะ– iฑ0ลz฿Z*ถูขภBณ็้๎O๑0ฑPO€4‡็i”i‰ปx˜XX‹๓ลฐฐ:Y9†…โabaŠ๓,Tlทท‹…ฮJฏฑฐ๘uˆ…–Ÿ/[œo๛‰XHrƒAœฏ&"ฐๆ—Dฦณ›aฝDqn9{Žหัašก)า=Fkฦ8หธ่ก็†˜ิ.๛สฝ์ธฐz„˜๏[แ^‹๓ฬœŸ[L๎า>MเิyRAQžฮ๊lyO๙นไL๓ฬย+ฬyN฿๏น๗ ๏sหษซฆ็ผฟtซOs9Od์ กํgนง๔์๏พl๎sบ0zีปฌy๑1็–ท๋๖3๎ฌ^m๙นฅ๎u&ฝG ๗fะณG^ฑํsG–ฝปo–ดW™•็฿_O]ชง๖ฦ๓๏{งiGฉแฦโฺ~Xู๗8b47Jํสํ;ฃ฿7‹˜'ฮ๖พๅถ๋–๒๗o–ํuqพย‹ณญ8ว•:f“wdฅ?mYฅœJGE† s)ถ”3f™งR:ต›)rๆ6„Hใ#”ฮฤK2บฒK,ษ™a›Y"G ี3อษๅ˜ˆจ8หั%‰์ำฑ,ขงœบŸˆ>ณ๛ƒหfะนŸๅžud฿zคtาาP‰m]ฮนะโ3Lวbษจ„ิyฟš IHไ}e๓fŸ+า‹!R’ั–lฆ8/ัkฌ•๎ฮ๏อ์yŸXGTAF {+Kฆˆ๏ุ‰*œŒy2U`ย2N๎Wf๋Nfื ŽMAจฐะ฿\ba‡b!8&*ะลB…y…9Z-ฑ0{ฬ ไ5๊‘Qcแ_|ฝXh_z}8˜b}าZ ,L< 9"ึX่฿fๆจIฑ0ฬ2วฐ0ล๙ดX(๖`แ&š๑^ ลร1,D”‹‡‰…ฅ$ฑp–ฬ๙ƒq> ‰CnpAœฏๆ๋ช ซๅฬ9G ฺ‡ญุV+ฮฅlk‡๓์ฑV˜ZพnIน๓œYŽ@ฬ๒๕Kzๆ{๏ปb|›—ํ)ฮณ'ปOœงู]Šs_ท'๑f/ปŽํไ s3๒yBb!an๖ฌ๙นO? ๗šๆ ๓l ศฯowฯ,ษื ๓4„‹ฒ๘œ•žl เ!ฒ๛สณ๗|^ฦผฮœG๖<Lุ{|-ะฝ\ฎฯ"ฮ/xๅ฿๖ผๆ้ฝ๑๛y*qพูcำ๐•ฯฟWmึ๓U๊ฤy+r ั”‘Uุ•ๆ๔RV›ต$า) ฉ3E1BศA๛หำlว’vณd‰ศb@˜pี…๊0l^kฎง8—Œš%า)๛ฮน?„Taฎqบg&;๛&นก$[!…ˆBBŸ๙๙นl‘ูส฿ ๖ืมใHP-ํ[<žฏ็ืišล็ว็˜ูr็š-rึo=๏<ห%ฅiŠ4FR!ฅอณ’ย b9)ดค๏X–u–žหn„Z–wฺูGH›˜ืโัำ>žไื‘mxœ’i'SdFg&qwฟ฿ ชใฌ#๏=ˆ๓„‡‰…‰‡cXศ๗ณเแ,Xˆ่๎๐0ฑะ“”žจ,XศฑฎŒ๎ ษๆЇ`!X'๖%&ฆ8O,k,ดฯผฦBKะ๋’soj,ดลงฦBฝ:–‚… แแ,XXŸ L,Lq^dฆ{{‡…ฃ4ˆ ,ด•h"‡h็Œ๓7{อW)Nๆx5_JYปฝๆ)ฮอ gห Qg„ืๅ์fฯua'ฒ”iต้[^ฮlyBr_%ึ9>ลน%ํ็VeํŠs๛ห๛ฬาผNำ:^{:ื็gค+{ sูำ‘}RLฬ‘ ,์DIšDF%ฎŽZ๊‚xๆ|_ษฉณ~๙์-กตœึู็Žw2ใ—eทšศu฿"K<้Eฬัฒฤนฃ„Bคทๅ˜f4=r๔ฯฃฑ:็vD9๗•ะาƒษh!ศ่}Xฉ ›EœŸ๐”167=ใK/ฤ๙ชgฮ $โแ Xุ ๑ๆ{฿™(Š…–ฒ‹‡ ๙ ‰‡fลม='&‰…‰‡‰…Žc;-š5ฏฑะฑi5ฺ๗]ท๙xL…)ฌ๕ู“žx(z฿>,\gลBEy…ฤ‰‡ูZ ึc&ลร1,ป,Lฑพ,q^aก^ฆ ดXXJ่ปQl วn๐0ฑpนxุŠ๓4 ‰mƒ8_5,fp๓๚ฮK_1lฉ 1™ูeDg๖S{™)BuAทŒ}Yสž฿ŽRฺ๊๘ํ’ศ์๒๚BQฉH๗ไD-ฮuO‡๖zฎyŠsoK๑ž%์Š๑,็็x฿ซ™๖s‹Aa™$qฎ‰[m๚6m๖\1^ฯAฏวDนู๑˜g rKลำ๑๒"ะอคwพ‰e‹sล4ณสหˆถ:๛]gภ'ญV๓x้ศฎจ/&xc๏ฃน<‹8฿๚g.{๓aฝ๑ขม ฮ7ฝ8็_#ะณเฬ{ฆrืQsฦZฅญปY(ว๗ฤย,โโF„๏}๛๓{ใล๚Gƒ8฿์โผshฅ—–Yฆ๔ปaBร™๒BF๙งฟ,2z๕\น_ื‹ง๛ฐ&9ๅบฃ€ ข’Ÿzฌฤ2ชซ8ย ๑ิMXQN@,ษ9Zญ›–ขุZึฎ8ฯ์Pํ์|dๆว,‘๛ žิ “าw๖sœค”Xh๑ธ๖nา J,giฎGXโ๎e{-[($จ๕ศตœล\›๘y_bฟ™า'—\ฦiiๅแŽ”N9ซ%ŸŒ-โqt:–เฆ8งวr™H8ึ์ ๖ฤ๖}ฟAœฏ6&๒*xุaa๓][ถ8ฟบยย๔f,ไ๖4y ม<ƒ5Ї‰…ฮ๎รย…y"ฮyผ ๛ฒๅถ๖ˆ‡‰…ŽXซฑ0๑ะ ฑ0๑p1,gลB?[MแฤBณ็“X่e๑0ฑ0๑P,Tุ{ฑŠŠ™๑Pq‡…โXมรฉฐ~sNPีXศeลyมยY แtปLฤBbo โ|ตลyŠฺ4†3รผœ…ธฬ์4N3kฮ,ึm\ฎท—ZมjF93บ–]๛ๆ๕@G้ตbฒ่›fgŠsGจีFp)ฒ-ลฯs9ใาr[‹rหุณ* …๒Bข:M๐–#ฬ}œฬ”_™๓l๐sอ6|[‘Mๆ:Kส๋ฌณbทนŠเธmฺๅstY๎ž’๕œC^;ด/ศ4‚‹“ccใ,ลทR ‰Yฤ๙žท>oด๏่๖ฦ‹zืAœovทv๚ลฺพ3J™R=[ร/sO‰ฉHQขi๖tŒ”rนูg&H#ศงcา,-ิMื-ไ‘žp dKฆศqA9h๛„Ly^ฯฒxศhฅ(ฏGAF!…HŠน์ฌ_‘$Ÿ s ฉฑุฒ๒ฺ7|) ‚!5Rจg6ฝ๎Oทœ–ฟdณ#$๖R จๆrื s ฒ(nbId43็^N๗˜๏Y+ฝ–‹า“ท๛๏\uฯ‘lอmm%ใ}V`”ฺ ฯ๙ฃนy๋Ul๕qพฺnํ‰…|ŸฤCฐ๏1Zูำ "ๆลB—์O,dโกX˜xXc!‚ผ  ษภƒ‡ำbก๐ ลรฅbแbx(ฺt ฐฐฎ.ชฝ:j,lณแ นOšmึX8-v%ฤภzyšม•H<œ y|~5–์{bแL†pทฟแD,$ถึฤ๙ŽR#œ๗];›YŸv!6ณ\แjo5‚ิ^rฬ23žay:ณง๛บโ|’p์ํ‹ฦ‘~‚8฿}ูฬy_ึ<…y๖ฺs›sฬ3็ู[ฯ็รI …น=เำํ๒„สR6u{CŸ8ฯR๗=ฬ๖r๔^›5วUฝ๊๙๎f™๋Šnนy–ปO)ฮuzฯฒ๖yูy3๔ลMฝžmพ 87๗ฑ,yŠ๒bt็whqƒwพhtลป์—ฯปโ|ณ‹๓ฎ|€ภะKฦ?|c†ฤYs`tฏuิtL]C:อXึื7&’‚Q‘ฆD\†xB(ษI’œอ แ4ซd)ี่ศฟู[ž=•R๛+อAt็–ฑK@3๓M8*ˆฌ‘ฆpP“$ฦ ๎ำ0i12สใCH!ฑ อบฬ9NฯฟŸ๗แX2H๖_"*ZQๆ๘ฒ…œjเGึจsvัI๛K†ษL.E˜;าชa[า่ ถ”ณ#ฉef๏ข„๔ฤGํฏแปฯ๓~X'ฬ์๛{bฎ๏|Vq~ุ]ปmulํƒ็อzK฿oโหฑ๏:M|ผŒบ`๛‹! 1ย uๆ%.Wœ_๙พ1,$ฺ฿ŒxXฐ฿Hโabกx˜Xn‰‡‰…fฯนญฦยณŒ]LL,ิฉ}Z,TxืXhkO…‰‡นต&rŸ•ภCหฺœลยฤร๚•x8†…TD&ฬK้น๋ฮ0Ÿไฮ>)+๏˜ทฎ—^ืyช-ส๗%O๘ฬ$ฮ9btลqG๕ฦK~ฯAœoq>๖B๊xJ{)fŽŠ0oƒึ”฿แฬ~ษsdฏ๚@›!’”]vต็MศNฮ่…8B,นอŒทADฮ๔AJ"!–Hฎืsษ่๖ช”S2joฅd”LQŠ๓บ„3๛ห!›ui:ีฒNoƒ|PGฌM"ฅ}หว‚ฒen๐ฌY๓๎๑฿ีL้‚ฯ็๎ไ$กฮฅ\ณหBD฿หw‚1RQฮั"[gbศhK๚~*ฮ zีo™ม์๓i3็!๑ุdMyl2M๖jวvณ็3—ต~ทฎ๗ณŽํฏ“iฤ๙›8ค"ฃ/oโูๅ๒ณ›8j็หศข“e นœx(๒^ rrŒx˜X˜x˜Xๆ‰‡‰…‰‡5&ึXX sฺ็5Zช^cก' -gO,$ภฑIXธึXธUDmYo#ขลยฤรฤB~ภร1,dLžxXุ s*Œfมยฤรฤย<๔;x8 ถ‚‰… g*kฟใoNฤBboา4โ|SใแZaแพช\sฑKยpMัžฅฯฤBๅํ๖k็๒zธqฃvทฤ:หท3cl~-ฬ;!ๅhธ:s2~฿{ฮ๏๋97๚ฤ9%ํˆrฤ9aฟ9๛9>{ํ ฒฯ|"ีŽe™_๊ศปพ์นๅํY&ฟ'2ไc๎์~†๓าk๖`๋b^2m้9b{฿ุ?|ื๛วž•Œ๗RV+ผณผ๊#๏Jฯณ'<วžMq" ๕ึ๑ฌจ๐Z]‘•ณˆ๓K{๙่ส_ูG<๒ƒ8฿jโผ่„โ€ƒฌ3X!ฉำŠ๓ถไรmฅ%}HŒ%dRน …e›Aข3ห!คft!”ฮฺ%3ไXฆYsณNi๚–3|๋L‘โœ๛CF“๐ๆ<_ณF–oฺ[ … ZŽ™fFvI'’ฟฤฃNyื่ฏ>O1%$œ}‹ว๘r2L่J,๛\59‚œฺ๗oฅฝ”๋pŽ"‘ *ฦFํ|{ฉหฌ๓๖ถ%–ฒทฉ(‚r9วฑๅ;[สŽวฤนHSdŠบ็r[Gิ>โaฮhฒE”rฮj๗ย{Žฯaุฦ‡MUึฌVd๔๋M\ฟ\พ>ืq>#&‚{โabแโ<ฑะ\ฆฺ\UๆgฑPs7ฐ*ฑโabกโ9ฟ\,^7j,k,tฌd…โ!Y๑ฤBณโšผ้ผ๎mbกธ‚‡}Xุ‡‡bjฎ4&&&vcำX8†‡`!"]‡v๐p,ู&ฺ{เamdธT๋œฏ}a<_^็๖Z”๏‰,z-ฬ{หฺ{สSธsผiF๖โ<บฆw–ดc‡Gœšม้ิฎแฎˆ๚ไฤผ‚ฌซฎ;ฉP#[,Œ“้`ฏ(ๆXดช์ปห์‡k{;‹ผณZq๎ศ3ณห็Qข>ฏผŠฎ~)zD~ษ’g5Fํฯเ ก™ฤ๙ {tๅ_Gๅqพลy๛ฯ฿ณ๏–tา๙”ฒ…„’`n๏BคยrอวฺR?อ,3๋สeอ4@"CนิmุฒBH&DB a$ซร1WฒY‚ญ 7{T๗XJL๋LQ:ต›-Oq๎8 ห9๋>I๛ฬ๋L๙#?zlQD:ฤา๒vE๘$qnv ’หsฏ$กโ ์่&vฮ๑ํH(1อwษ™ัl—1รท}ŒbJิn๙eฦHมn–“3คeeM5>สŒy)g๏œ‰g(ใ์ฤ๙‹๎ีๅ:ถฟ๙ฯ8่T?โฑSัหช/ฤ๙ เกXˆะ{ฐP<œ$ฮลBEŸcาาม;๑P,ิœK,ณฤรฤB3่V$%*ฮ๛ฐะูๆuึ<k ณŠ(ฑะา๖ ีŠpฑ๐/>๎NœืXุ‡‡>ฮjc!b\<œ‡…m—‚…|–3ำ<ฑJŽฤCฑช๑pV,๛4ƒK,,') g*kฟ๓ˆ…ฤถv=|์Vฦร๕€…ศคใzB"ซ๋:e9:อ~๓o—lนg฿{žะฎ่ต J g้๚ˆ๓‰…f+นXุ ๒ภรลฐ1ึi๘–Xˆ ๒โabกYm๑P,๔„$xุ‡…Š๓บผ<ฌฑฐฏ็n๐ฐฦB๑ฐฦBO0‚…` X(N‹…[ ว๗&rY<์มB๑pฆ1€งมยๅโa+ฮท'b!ฑํฦื[nๆ|็`ํปโสysรอ*าk=I ปฌฅปก@ตฯผv!ฯžg3ๆ“SŽสQju6}lFw๔+ะ}.หyVค@Wx#ฬ็๖—๋Lฏ œ๗ี๎—,^ส ิฬ’ฏ๗ๅ฿ร“)]ต@mะฦๅ๎ปl์๓šๅ๙yi๚๙g็|่ชพน7Ž๘ซโ|็ ฌฬIFํ3ฆdฏ๏LูOO๎ฒAใX2-๙$,!ฬž?J9ษ A"!ข–`J$%™PลyfŒrฦyŽ"Cdฆษเz=JอJ…นฃ{ ค9(g๕Rv™ยœx๘ษวตค2š%œ–ป็พ-A< ั\Pœ[Rฌ8ฏอzฤ๙ฌซ%ฃว<ข&ฮผ๏˜“rฦ๖ท?r(k฿(xhถ\Lฌฑฐ)… โfีลยฤรฤBpJ<ฌฑP<œ ugฏฑPท๖๔เ 5z›„…fัมB„7WcกโผฦBึVยรNtWx8&ฮ น,๖—\I<œ g็ใท'b!ฑํฦฟผ\q>”ตฏโRฐ›IOq>Ilš…ฆL|WdwUยฺูป/้}โผ่ตXO‘n้{—…ฆ๔:๚•-ำถทุ“fะซ†ฐฮ บใแ็ t๚ฯs\๗Kธ4มc96mฯL๗6ผืlํ‡{็c%•3mซgึื6M?,โ๒ฝa๔ฃฟฅ7Ž|ฬCq>ˆ๓ โ %SD?ฑ•brTŸฟ๖ใmfˆ yŠt2้’ิW#iEœCFษA4!‹๕843?M แ&Q‰*dแง|“ K$%[Dนจๅํuฏ9ู!๛)s^ฏ=—dŠ $๔ก8~LœwพUลy’ัฮpหLนž”€ๆํfˆฒว’ํ “ัiืLโจtYญ:ถ—ห็ฏจ ^>ˆ๓UภB๚Œ มร ลรฤยฤรฤB๑ชฦBCผK,9 ม;๐ฐฦB๑p*ฬณทผ—–Y๓ ็ฤV็cXH$"ยลรฤB๖'Š…@œHOFdu€โ’-’-็uV<{ษ{D๙ค‘x}7Š8฿๛‰wŒ~™w๕ฦ‘ณฉฤyณ๎^*‡ฮ๑eu๛ฟiโีๅ๖Lบ(๛ฃ‰O5๑ี&ฮnโoใ>/hโ‚&พTโžƒ8_OD‚2vฒE ๙์ฬqลมป8ฑC%ฆ:„&;๔UBJ้ท4ฃฎis„นแฌ_๛ม)มดoœ’ฮฅŒำใ2c$1•จBjษA 5Jwaˆ(AV=3Fๆุ ๛+-ๅิ)ห8ํฏ4S”†p™-Zห%Y_๓๏DTำ7ศจJ”qฒ?ๆKท‘•$คซ˜1šIœƒว๚C3vผ๛1Seฮ7{ฌw<์ฐ,yมรyXXแ!ว่ณั‡…โab!ย<๑01N,Dœs{๖šgY{…fฮk,L<ฐฐ &&ึXxธ!ฤ๙d"~gqq>`แฺฏ==sฑ์fมsnน™โœ[žBžํ๙Uฝ็f๋ำฐญพฝ.uฯู{ฃฝฮ฿mๆœ๋1ฏ›m[ๆ^z}Oบฯ“ฏgž#ส[ฮ{๖}ๆษŠ4ฝ[Y๒๕ิห>ๆD_๗—‡@3๔ปrพฏภj ๔™ฤ๙ง!~๊๑ฝqไฑจ8oึฯ6q.ฺุฤฯ7ฑฝ‰›Vวณ‰“‹Hฟm_อต(ิC฿๐พEœ?}ศœo€ŒQ7็JY'1š›ู+ ญลนไ“€”๊^l฿9 %;Dไ"3Q$›cŸฅ}—f„$คi‚$Q…ภB0อškๆฆำ0D’dˆxวชA`ํซิ๐H๊ŒsMแ ฅพADไ}๏ํJ8%ฆข}กลํYโ้สYภ›‰ŽSศจำ$ี”ห}9๋WBwฺก๋_œฟ๒!cฎŒว=v็จ’จ5ศวฐpิแaN?HqNˆ…šb: @œ๗แ!‚]’๙โผW_฿ฎํD>w>ฟ'๖Tฝ่—Tฝ๊Š๓ึ๙^sฤ8[ณๆอu„yWๆฎ@ฟb๎dย๙—ŒgฯฅF?9ข<ห๓ณo๗ๅ๛๑},(VKYwฝฺา๛ิ๋อhNa>oT[fอณส+็[M>‹8฿๗™ใF?๙‰ฝ๑า'=rq~ป&>ื%ชcะฤรF=ีq๏oโฎƒ8฿(โœ๙$;dฟeEF  ฅtSwโœฅโœ}šูwI๙ฆข\Tˆจฝ‘๖ERšiน{–นง0gSB‰ทLS๗a๖“M'Cฮ~n—Zถioฅๅ›\†Œ,ށ8*ฬ-แ„ฆKฑได/s4 !5รต™V7*ศ~ห:3”ู๓์นไ2% โU:gnWลŽ๗>~็ํdฅx8†…ใโ\,4S^c!ืŸX8-Š…๖Œƒ‡5Ї5‚}}X่ว น=๛ฬณตG-ูTฌน ดบตkl$ …JR“.TฺษmGใธ็ำRค{CR2ใfว m๗K< ช#„$sEœฟ๚aใfN;N|ย–็+…‡ซŠ…dЇฑภB๑pŒ`๐7Cๆ|F<\U,ค็<๑p ,k,L<ฌฑp!<ฌฑP<์รB{ี ลรฤB็šƒ!5j„ู‡…fอk, ษ ƒ‡}X(๖™ฤ๕aกxXcกญH5Ї๋ งมรEฑฐยรYึRfœฯ$ฮ๏vณ‰XHlปูึ็+ษ WSœkhfษ๖Bห๒ +w๗์SฏMๆ๊ษFิ"ฤrzพีศญ \”Jง่๋ฬศ๏–ผWcึ๖EฏปใtO@๔eพ5)หฟI'จห,mgซŸโผŠ๓13ป&๒ธซ/๘ฺ๘๎WFื|๋K Oqnx์Zฌ์๏•S๔gถ|_.t‚cฑตT็๗Yฤ๙g|xtอฮO๔ฦหž๚˜^ึฌห›x๊hสษƒ8_O๋๊์Љ) ฉ%œ๕ส๙็ธใHLLZVH)™ „7b‚•T*พ5xณ$“rd”ผ๒Fึศ‘A๖VึใาฬุิY๓ลค3K9!ฆืr*vš‚ฟ๎ตไตX^๏‰^›ฆMผ๏™Xณlะ"โ|-ืช‰๓ื=|ผD?bวIOฤ๙FฤC„๙ฐP<์[‰…`bx˜XH๏นxXc!Xื‡…^ฎฑ}ŠูฤB[|๚ฐpนxXcแBxุ‡…‰‡‰…โabก'%ฤร๕€…๋ WMœ฿w'b!ฑํwm็ อT/”ๅฎลนไป/๋ตถป2K[ศ0อR๕Z`w†bŠํ,UqiงํiDๆ๕RZž™๚์…gŸ"|)หฌผ‚›็PX๒œ๏wัฎ๖๘ฎ  ‰ฑO฿๛F+ฬ[qŽ oโง็~ฑ‹kฮ;ซํWŸๅ6ึj-&ฮืjญช8?๋ฃฃkฮToผ์i=8น&v5qฃ0„ปYuฬฝ*CธำFs.๎๏hโ๔<๎๕ใ๒Sšx๗ ฮื+…ˆโND๋TผสKฃ$L| คัzฏDLฑอ6K8๋qA์3ฃฤฑ9\’ง@ืฎ/Sด”E๖B:อกพหzฅSrา,็'ึrตN์ป฿ฐuล๙นฟงด'vœ”Aœo`<\k,T +ฮม=ฒฦ g…ibก55ฒ<ฌฑP#อ•ฦB๚า๓DๅBx8-Ї‰…พ^๑pภย5็๗๘๏ฑฤ๙ฦ็้œžฅํซตๆ sEy๖WžฝๅDทญX๗~ฅŸ;วx!œ“9šห–^ฯบZ๓นxฮ…}๔E•Œ๘Dq„~ญึzๆซ-ฮฏ๙ษัOฟน8๊O˜v”ฺ=‹ำ:ฎํ‡•}#B„Cน}งๆ”บ71*}ๅc#ำš๕ฮr,ท} ฯ@n็๋โ?7ข9aฎ ๑j/2K”{โNLฉฆฝ’QKะ!c\—Œ2๎‰…”šQ‚ŒB^!n9ฯW๓ค:Vb9Vhฅ–}๓”z*ิ!ฃผฏต,ใLk&qฆฟœ›g\ลŽ=}็ืRถ.ึXhนw…\k,Tœ๗aaฎG, ,ภC๏ฐึ[q~ฯ›OฤBb๕AœoPq^๗ฏช ช„y{๑า[>ฯdฌŒRkูํแฎ เฦ2้•!Y[ชใผVb-&ธ—•€(ยผ้ˆ๒"ึๅ?น๐œ6†ตf๐ิ+ฯ์่ง็|ก7Žzึ“ฆ็›=ž†“ร#Gญ๖ข๔“ฒO2FK%„‹ฬfA0K;๏์‡dZยฮํ๖g*ฮนMBjถhฃ,KN!คQ> ๖Yฒ:ฌ5็o~ิQq=ฑใใฯฤ๙ฦรตฦBส฿qc๛ฐะ๒ฤB{ฒk,Dฐsln4<ด/“–์ฃ:€ึZŠ๓[LฤBb็ )๓ถ{-VO^2โcณฑ›หfhํnหุ‹ูX\ดซ- Wœ™ญฅc๘ dษWํT#๒ฺพ๓Rสn๖|ญณๆ7,โ+2Vีqิณž<ˆ๓Aœ/แLต๋2Fซเชทท–”q’!ำcู"dาžJ Ž –fQ,kิช0ทW“}Rb#-I7๏กฮ๛pฐึXœฟๅ1๛g๗ฤŽSฤ๙็k…s๑ฐฦBM/ ู้ถGปฦB๑pฃc!+ฑะq™ฎq~ฏ[NฤBbอc็”:ห{ˆ๓bธ6Oœ‡†j]ฝ่ํ~'fkOSnพ.9;™๒๓ฟ<—9ง฿| เ,Œž๓ฏ}กk3จใeฯปAœโ|ik-้๎+o[ส‰3{ฟKy%„ชAœๅ™P…๓vศฆ}้6๖™1ส2Nฝัf๊๒†ฌ๙z็]๛ฝ่Ÿv 7 ฮ&ฮo= ‰Aœo|,Tœ_ธJ็– “๙F3ฺํา}WŒ‹๓(O๏ขpMใบy้9Bญ็ลDAŽ€jว‘™A/1ฌ~q™s๗ด—้ฤ7v๏ฒXจ8฿๗ญณป‘vuผ๔yฯฤ๙โฺ2“%>๑ษญ.ฮ|อ‡8Uoi ฉ™ข์ทSม9e?ทC>!ข๔efY{ฮ๕5snฆ(ล9!%sNp}X๓"ฒๆบH]๋รใถด8฿1O.๛งุy๊Qƒ8๐pI+ฑ0+€j,tE…ˆs๑0ฑPa? ็dฬลย'cแe?~W‡‡› g็๗ปอD,$ถ๒Fƒ8฿Xˆ0'ดA\WrŽ8oฤ5็fฮ๗qž๓ษํIฏท]ฦR๗b7&ฮๅ๑F8้v ๔ฒo˜ฟๆˆ๒Sพ๙6ๆƒ8oฤ๙wพ6ื"QลK๖ ฮง็OiโsM|จ‰G4๑๏ฟ ขฦ็œฦŒŽ ”f~ศฆฃิศ q<๙วi ็}|,อๅฒค!%œ;ฌ~BJ๖าS๋"M@P[2J@พถช8งงF—ฟป7v๋+ึ\œ7๋gมธฎhq.&†ีXจ๙[…sq0ฑะžฎ ‚เ!› g็ฟ7 ‰mทอ5็k‡› wqฮˆต.ฮ/ mสำษš๓œ™=หŒ็xด’uWˆ{y,JŸบข)Wk5Œ%›Jœ๚œ‹ํืv_>ฺu๑ัท๖์kท[ZœŸน‰*^z๘ก^œฏ–N๛D7jโ9M|ก‰ใšธๅV`œŠด@‡ˆ~๏Šl$S๓"ˆ$ืyษุ๙ooณCd•fs,‘d4{,}ฌ4=ส2Mˆจ&Hฤฐ&/้%?z็่ย+8บ๘ชทํ'ค?ฤ\fdซŠ๓w=s4ฺw|o์ย+ืEๆผYŸฤ๙สโแZ‰…‰a5‚yเa…ด๗ˆ‡‰…|[ƒ&aaโแฐฦB๐Oฌษ๋_ฟuI[าN ิฟ๙ฝm ฬทบ8฿๛ฝ]๓#_๐M‘9ŸK—๒D7kโลM|ซ‰?ู๊\า1๗๗ผฝoh๛+!˜J]ึ)ม$3!…dr›ย๓ป฿า’Q2E๖Wf'™ข4‚#Sค๑ัB™ขPส๙เปคฬŸ'.fYdŒศูg AmŠอmUq~์กฃั•๏๋_|ีz็G4๑ฺ&๎4ซŸฦVวร‰…ฌ ลร น ๖ีXˆ`O38ฑว ฆลB.ฏw< ์ค๗L‡:ฯЇ`=็โ!=่-š%ชโทŸˆ…ฤถƒkฝˆ๓5รรอ„…dƒL๖Šc-ฃฯŠใบยœ“–Œ=ฯi(ะ๑]‰{UฦF้)ืเอ๋uู:โœŒ91eŽภZท<๔“Fื|๕3ฃŸ~๓_X๔๘๏~eฮ๐ฎ:1ฑ”EถNœ๕KG_ๅmI๛ ฮq~ัwบ)u๙ยร7‹8Ÿ Kง๓ใ›xpnเ๙KBบ’ค”ฬ๙๖†2ำ—ฌขœ์แx ศ&๑$ุGpูห$ค๖››5wvo’Qึใ?{Lf‹6š8‡d;›˜>RI7[ฎk%๕s$fษ!ฮฏน๖csฤk๋๐แ๏;๎9ฃัU่งฟfฝˆ๓O๕ฤ'Gƒ8_,\I,k,tz_9{nDq.Zถ/&ฆ8Ÿ 7ฮ,ฮ'`!ฑํบ^ฤ๙šแแfฤยศ+9›Œv+ฎธท}\3๕ป{ฤ9๔1qn™ปn๎–ฐ+ฦ)c/[ดวฤ๙ฎ3:aพลy_(3ะ๗ภ(ฏฒMqžจจfฝOณ(g'cฎc;Nํ็6ข‹‡ž๓ฝปฟ฿Jัฤ‘/zf็3a้4†p_jโ…M<ญ‰งf ๐ำฬ#ฅ}ฤT๗Z็Kd๖ไธ6ra~D๖œัAd€ ’๖K@z9 ชไt’8—ฌMZ’R3G๋u๗]๏=๔ว๘žF?๙ธ๖5O#ฮ๙๘,ฉ,Hq๎>3qd๋D฿ฌ_n:ทsส87Aึ|fq~๓ๆFหUฑ๓ฬืM%ฮ›u๗&พฤ9M<{ฃ๖Vภรๅbกxุ‡…ฌฤBMภรใ qณไ‰…3Iœo,“๗ฝทลB.‹‡‹‰sฑฐฦC…บx2cž˜„…Nฐ 6ฮ$ฮxว‰XHL#ฮ,xหาrŠไ+ๆฦฑฏษ–—=ยฐ›[ˆk๛อ-k'.)ฅํ๛สsŽภลุดN˜—๒๕ฮฝงฯ๏u๔๛;qNฆˆ๋๗?๖ฤ6s„ณ2ฤ”ฬ‘Žฮ|n|rศ)D•ฯ‚œ—L@H๒๒๙BF๛)3ฎm๙ๆ0็|Nœฟ๗๙R๛Ho์<๋็ลTใRม๓๓Mloโฆ+tV๓แe๛ิพ้สแa…โแbโ<ฑPq.’Ek,Dœ[ส^c!Qc!ฟ}๑p#/pPq‚‰โabก‚œ๗XHU€xhพ(ึXˆ@…ํฦ๐pญe‹๓q"๙ํัZaแzมรอŽ…fฌปพ๏jถ๘RฤyWš^;w๕ๆ1]>Ÿ๏H4ŽU•า๖yY๓Fœ๗eฬ7า๚ังŽ๘ิใ[qNd๖~๕™'ทพhE;œ/ฬ๕ื_๐ตน,z๓Y)ะ ๛๛knึ@ 7lฤ๙žsSช8๒ล/ZTœOƒ‡อบg'‘~[*ภปoณ^ฎXgฤQk…ฅณ€๙ญ^˜ๆˆ!ลนค“q3๗”F๗-ศ„ำฝ™Eg?ค)KูอmvqN@H ˆ(มef“9‚จR‚ชัD’สbH+.อ”ษฆห3Ÿ•.ฯ vฒE5อlแฺ่Oโ<ล๙ /Ÿ9๋‰฿48ฟ]๋‡+DFบl{O>โ|ณЇ‰…F๖แก๓ฮมฝ 5วœ ๙ํoq‡…Dbกใเj,ไ>โab!=๙โa…}xศษ–1<ฤy#ฮ๏< ‰mhญฐpฝเแVภยฑฬu%ฮsฦ๘˜A(,m๓3„wfิ๓qวnซ2•ูoดฺเ#า็?ฑฃŸœ๖NŒท‚[q^2๊ญ(Gธ›MWœ›Y,zํ๊Nš๘w์็ tKฺฯพ๐๒6n๘3ง^๖ƒKบ“Tu๑’—L#ฮลรfฝก‰‡ลu2ๅื_่พS.s์ืื K—๚ค7mโE%ๅ๚ภ“ไฤ™ฏNถ8yำGny&๓ข+฿ิ’Ong‹‘[๚๕๚๗—šA‡8ีฅํั…ฤ๙beํ๋}ใแ›>ิ†"N๐‘6ธ ย ฅฤSฒzทท}ฐ=žนa… BHษ ๑™r9[4Žโ3ฤ-šw[eอ$ฮO|๑~‚;w_:•วxl๕x\ผ9ฎ3ฮ๑ตฃก”sCcกx˜X˜xXcแbxXcกี.ำbก™๓Œ…‰‡b!b]<ฌฑP<ผห>aกe๐เ!X่xนฤร <œRœ?๘ฮฑ@œƒ“๐pภยอณู๊ใ๓ฒ‡)ฆ{ฤ9ท๗-ณgผ์Uไ ๓็ํ}่]œŸ๒๖ั>–Vคทๅ์ qสO?ฉg™{'ฬ‹ ‡˜งว^z๋ฉ(่2่ฮ…/;ฎN!ฮ/ฝt๎wPลGมAoœ•6๋ค&๎ืOiโV ทY—Uq้h=–ต—wƒ’'๕F{šธแภ‹/2DRๆ๓B` žPKO๛~7aๆมr ๎ถะŒzq๗ƒๅœ^ณFู[IdๆHB ้ยฉฝดั–ย‚ ฅ”"zป๓ฑฎาษmw}หIํmฟ๗สถฝ๔ฃ[ฟโํ}!คVJ:๙,(๋$๘\ษ้ŒYๅ3ใ๏8ั้ฤ๙‰'พจ-mํ‹;่ ƒF[iQโž™q2=RfภrYB 9e_fŠ$ง.ฤ=ูwM‘$ฅ'J !Ÿš!)ิ9Fb ฑ‚’๙ุ่%ํIF!ข”m"ฐอef่๗_๒่ฏh+ฬ!ข-!=be*ฤิว ฌBชด&I์ใ6๛^^\œŸpโ G?ฝ๖ใฝฑ}วืดฌ=๓=eD$=Hฤวšxี=#บE๑,คตGใ\๏๕ดศ˜ทฅํMดs2็”ณk๎hตž์xwaโrญซ๛_›+o๗ไHฉB่sโฐp\œ๏พไา1#ฝŒM'ฮลรfซ2„;mฑ๛6๋•!หื KงyขิฤฃŠญyค๙›ธอภหแkNj‰้KถP‚๒๖ฯ๏~Kปตำฌม~Gุp;—ไ๖Rๆุ4ฉฎล„4ณE›"ฆ!๖PZชIf‚ ๑ผแi‰'[ฎC@น~—gœิFKH’ส– ›ฤใะ‡I "JIƒนํ%ซ7ัลล๙๑';R‹8s๛๋งฅ†ใๆ7สูวรVŒžYถ๖gš๘&หฌ' <œn%‚m‰‡œฐ ๊โab!'(ลร ม>หืท Ї`VbกxXcกx˜Xุ ๔‚‡b!"] ‰ลฤ๙ฤย๕‚‡[ 'ตuฝแu?z)Woล๙‘=&ิ53C gฉ|๕]ฉ๖ฯ˜wŸ็ฉวw}็˜ยูg sณๆ!ฬmqgงวผ็^/ฦp:ู;fอv‚ถดฝ˜š Xธฐ8ฟhฯฺ๑}๑ยฟdฺQj๓๐ฐY#Fsฃิกพ“~๓ลฐดYื-ๅ๏฿,๋ฌ–.๕Iน‰'—r๗๓^ฺฒ„๒N๚/!žJˆ(คาI‰ ™ ˆฉ๓ต%ฎˆunc๗a?ว@<-Ywฌ„Tวb3็”$’1฿,„”Lฤ“,"š^ษถ๒จS:โ๙๛ฯ๚ะ่ž๖ม–|š!bห~"I+๛!ฅm้{๓Sหญฤ0็|•ฑฒuO‰…”น‹‡5ึx(‚w‰‡b!โ๊ข 7 &rY<ำภ@ฐฐฦC๑/๑ฐ ‰ฤBวซ%nf,œEœ?เAทŸˆ…ฤ:็kއŽฦ๚ฯปy่Y๒ž#ุBะwฅํแถŠmDwcฅ๎๋๚๘ั๒ธ6ธ๛8Ž๛@p!บdฅ์E็ณทฬvเ๙โุ๗>ป5๋‹ำฮ๚?k-ฮ/ไ5N˜Ey๘ ฮืn%&ึX˜x˜Xˆk,ลB7ฒ8ทŒ]&สๆอึ๒ํ )ศษ7"ผใ๔œ—’vz;๋ผ๔œ[ฎbฝunื$.Ez็|6mUƒ๓ให ’V ื=ฅาa฿ ฮ'Š๓oํพdtแe?์็m|qพ"Xบุ“\LŠพ‰g”ิ๏g ผrห3่น.๑ฑ)Y$ส7ํ7'ศฌ[๒ !5cDีpผฦ=๙@J!จfณ‘–3z็l1;"ผmเ^๚‰–H*ภ!ž–ฎCH-๏„ˆšQ‚„BJูบŸ๛q<ฅžRKG1Hโdˆ"aณŽšEœฟ๋๘gฮZฦฮ|ๅบ(k_๋๐p:,d%๒›K<ฌฑจฑP'๗>,hx˜Xจ8ฝl฿y๋ยXถ‰‡‰…ฦ6ฑltb!-EโแVภยYฤ๙x‰XH|ศoฎต8?sภยu*ะK†|LˆF?zg๐ใ /e๋xดธฝ+yWคŸๅฎ„qบก„9ข<ฤ9๓อž๓fŠ๓ˆV gd?:cิขฝshGœวXบœ฿U+hธWฒ๊ํ‰”M‹8?็{{F็_ฒฏ7{ม‹7บ8_,]สBQฒ้3๐JฌOŽฎฝ๖”6[dŒ,แิฝ8CBšŽฤRศ's|! (๛F#ฃ”iBFฦdลu ฆ„aŽXๆ:คยช8—ˆถ๓|‹น‘๓ฮ!ฆ๖[f฿ฅฝๆบปS"๏ึัmœ๐เ3v๔1๐ธ8?ๆ=O๋J’๋8๕ฬ—ฏต8ฟฮh็มยบค=รั”5ฺ๖Scก'/7 &‚IdฑลรฤB„:xˆ@WœwFoeถนfo}X˜b<ง]ˆ…บนs2`+`แ,โ~ฝ‰XH๒ญต8ฟฮ€…๋gฅขย4็”ำk^ฒเ๖๗Šs„8ๅฺaทQลy'ผ‹ุF`+ศษœทBฝlอžง`๏ขน>–YทR๗>Sธ’1;้แ๓žŠ‡๎็_ฟ`ฯ่[{๖๕ฦกฯ฿๐โ|EฐtZQภ2P‹M<ฏ‰_xuล9 Bš"J:BF1Dฺ^.Sึ aขŒา !ฅด3K‰$ฮษ9›าI@F!ฆRŽ#“ำฮ.v2<๔งJถKn3“๎!gK^ืฆ ‡ใTLi,ๆR|๖ย\‹๓งŽ.๑?๕ฦฟœyิšŠ๓๕.ษž‹‡5า ึX˜โผ 7 ึXž‰‡5Їbก—โabก'!แ‰…โ!XศถลืWดรรญ€…ณ‰๓LฤBbญล๙€…Xœ๋ะฎ@D@1ญ›ggน;ื)m/fhMœ“GH#ช๊W†๑กืํ่๖ค—2xวญตQn๏๚ำ๋น่ลฝ5‡ใ๓ต_฿๖€๘9Yฒูg็_๎ลฃ]๏ํgopqพb"‘‘r๖3J9๛๏ผถโŒ™s„ธฃศ9‚Dp;„ *!ตฏ2Š~๛ฦ คูSfy^rgะ๖ุฯ3z๚ฟำ’Rฒ8”xถ#„ นไXu4†Tฺ“ฉ๐f๖gฦ|`=—ูวc๓|T#uฃวu(k๏็๏|ฯ฿ต"ช/>wฦ‘ƒ8๐pE๐ฐฦBZ|ภร ํ=ฏฑpฃTK,lวžฌ 5†ณฤBŽ๗ภB']ˆ…bœีE‰…‰‡ง'ฯYc!›็ฤ๙}x๋‰XHb็ึโ| ™ื4๋Fงๅœ๓"าว๚ั#๛›ขพ3B ฦp ๑V 7ยผ๋#oถ oฤ๙U}๓\_zˆq๖ตท!เ›หY_ษต่i—Ž๎แ์nๆ\q~ๅUW XX‰๓/Ÿ๑่›฿฿ฯ<|็ำˆ๓k›ุWboD{}เืตŸS.KแPz&!›”lš)‚ˆBL Hช#ˆฉฝๆrฒDl ส: \w‘ณ–ฦGB36Mศ!$”เต“ม๑XD8„“-๛ํl!ซfนMb ฑMาชWิ”‡r<๗?๔ วดdžฯ–์fU–)3‡ฟใธฟํ2iu|๖๔#q>เแฒ๑฿˜x˜Xศ๏‘๋VีX่Œ๓ ลร ื#*ฬลBg™‹‡ผvณๅ๏šร‰…์k,7k,ลAC<์รB๑p็8ภญ'b!q‹Cn8ˆ๓ ปต”ฌ+eํcโฒk๛ 5…ซหอข›-wlXfด‚ด8›ฏท•ใm9{ษŒw"n๘•|ํ~๗"ะๆ์๏zษนฟ็œtE:Ÿ‰ูtลzนฦ \ๆฮํฮ๑ธa#ฮฟs๑่ป๗๖ฦ ฮgœs>๐๊‹sL ?ˆsz&!˜ฮ๐ตฟ๊<`2์3kDv๕’3้z,ืป8‡8ฆเnว5"R „„ผnฤ9วอ!#D†‰๒NœีŸr๊1ํ๖™Ÿ฿ŸYrฐๆI–„rฐ$ิ,™ฮ๐<ง#ี๘ฬ้ณLqŽ๏V็o?๎oบ๑Ju๓้/ฤ๙€‡3‰s๑pZ,ไ:๛ลB9Xศ๏X<\๏โผ ฒ๕ภCฑk, ำLฎ -ง ๔/ฑฟIโแfมยYฤ๙}pซ‰XHโ โ|ภยe‹๓,mwdZgTfi{]า^๚ฮ•ญภD\*ฮK๕z็ผ เฺฬy้1o…:โผ”ฆ›ฟ๒ฏฺ/ฤเdำ›m'ฬrฌY๔ึ๕]กN6ฝœ้บบ+ฮKฟ;zญŒฟKq~ล•WmY,Tœo๖๗G_ฝ่๒x๚๓^4ˆ๓AœoฬEถHWโS/zkGDนl฿๙๖า{ฉ8?aื;;s8ˆ"]"สb?Du=’Qลถ™ส5!†E3็‡Ÿ~L[โŽ๘&ซ ฉ„Œ๒่+ๅ“ูIc(3fžฌ`ฌษทไ฿ฯ…วไyx~ +ค”ืย~๎0@0ิcrถช8ฑO๊ส\๋๘๔_8ˆ๓WdีX(ึXศunซฑ฿นxXcแzฤร สโกXF‰‡‰…เ›x(โก๖ฑๅ3กิ_,Lศ'%›R๛(-ฑddBMˆ๗$sฤ– ม’”rฒ‚T‚ห์Gด็ >Oˆ+๗…ะjฌ—œf๕%ฆb๓ย+ุf$ค}3›ท‚8๋ฑOh?‹พ๘ิiฯฤ๙€‡ เ~k,d ึXจ8฿(x("Œ-]มBpœBd‹‡5Ї‰…เŸx(๒yฑ 9qAu๎da7‚M1ฏ`W”งx/o&ฝทVJฺ่ฮD๏ๆขpo[โn’[QœŸ~๎ัŽ๏]ึOy๎ ฮqพ ุ)#D\BJท"ชS1$‹ฒO๚1!fdC6’;ฑY ˆ,ฏ„แœ}็,H(คะ2KขoA4ํA%  |Vˆw>/g(๓™r,๛ ญ]฿,:ฏอRS2Iœ$€Cf ’ัJHg็o9๖qญaW_|๒ด็ โ|ภร‚…J๐ฐฦB„;ฟ๑ ษo<ด งฦB๖‰‡‰…ผ?๑p!,ภ@บx˜XศI]๑ ษฎ‹‡5Ї‰…ฬ๓Šxจ8Ÿ„…ฤอ>h็ฎ่rฬW+ฮ'ฬA๏ถอพๆ๋]œทู๊F;๎ฬหŠ๋ึฮ๒๓&4Œko+ย่J‰<๓8หแ-{/ู๗V Sโฎ@ืฑฝdฮ zะ™{~้พqqพ๗Š+ท *ฮฟธk๗h๛—๕ฦ ฮqพi!V™-bk™ง`ศYH„”X๏ ๒ ษƒ์a@„qd”JM‰(ฃ$$คพ?ณ< -H<Ÿd”-คB ™„์ใŒู็6‚ฯ‘วถผ“ =ฤุŒฏ‹ืยษž›19™ฮ"ฮ฿๔ฎวท'9๚โc_8|็,๛ภร ๙‚‡5’1฿xh_x…c๑0ฑ“โแ4X˜x˜X‰‡~ŽวЇ‰…žPตฌ^,คr!๑p+‰๓{‰XH๗AœXธาโœLy้=๏ฤน=่1๓ผ3“3›พ‘ฤน๓ศSœ[vŽ8o๖–ภ+ๆ‰…Všอubฝ๖ึdN9z๓|@?๏ฌ}็œษ๚๗ถ็ฤˆห~xๅ–ภBล๙ฉ฿ผht๚w.ํ'?็…3‰sๆŒ7๑๑&พYถฟ8แธป7๑๕&ฮiโูฑM|ญ‰Mœุฤ.๛oุฤUM|ฉฤ?โ|X‹.ฒB'‰!Tš AT!Zd†$jถ๕ผ์ฃคT๒ ๑Cค๋ ๑ฃฏˆฑ‘1ห"cแ$3ฤg!uฎ<มeŽแณt<™rจ—lyญ”|r;b!3F[Iœฟก็|f}๑‘ฯโ|ภรณ๘ญฆSปXhuLฎg<๓า5!žXจ;xธาXH๖ฒw๐0ฑLบxศ ฑ0๛ั yผฤรญ$ฮ๏ัˆ๓IXH โ|ภยฑ,iฯ2wE{›1ฯYaทฎ…yqHoMุ่7ฯ 9ๆp%า:ถฯ|ขฃ่1™๗ๆ๑5•ำแ},ƒพ๋Œฆp๙๙S^fืทมจผฝ—ถย|ซ‰๓ฯ}ใยัi฿Aoอก/˜Uœฟ\ฑอถ‰ฃzŽ๙ู&ฮ››๘๙&ถ7qำr7๑sๅ๒Qฟˆ๓/†ฬ๙&^?:ฉ๙ตxฌะขlส–p™๕%!…D!,ื+ล-ว๚ุWฎม›aฯฅ#V‚ฒrnื.}S;K฿*$สyสŽจ#;ล็j๏%[ฒH~๎‹k1ูขไฝml%qบcžะV๔ลIง=็,ไw*&"ุํ9ฏฑp=โabaŠ๓ ํ;Wจฏ4๚9ฺท/๒; ษ”‹‡‰…˜e&n%q~๗๛2 ‰฿ฝๅ q>pรถ์™X1qฮ˜53ธล Nั^‹๓ฎไ}Šsัวฤ9pz1g๋Fจ‘E/cะธ}%Vื“^Bƒ8Kํqo:๐บถ[นPขk7h.ำ‹ฎIV็Ÿ๚ฺฃ9oOo<แูฯŸUœ“ ฟ~น|}ฎ๗sป&>ื%zŽ{@ว โ| คห^–oน ค'ฉ$tกรต\”m๖R๖9jc๖8:R ฃข•<แฑฝ˜H๑9BJ1L’จ๊MFˆฌ‘F{|ถVๆื^ำŒ~tอIฃ_๓ก6ถ’8อัO์>ง:น โ|ภร‚…dvลรz๑;๎รย๕ˆ‡5Rถ.&*ส-y?X˜'€ >_p/๑P,ดข๖jz[F฿ผ>zŽณ3๘{9ทด}+‰๓ังฯนธ7๗ฌVœฟ‘วx์ฒ๊๚ฅ=ว<ธ‰7ว๕G4๑ฺžใ>ุฤรCœ_ัฤYMswฤ๙ฆCเ์'ฅฤ^(J9!คd0ศnP†8Mแj sขฮ9งืHสํฮวุYล+น ๘ˆrฒ uฒF๙ะ‡ษ1Ž&โณๆŽวฅ๘šk?ถกฟชณˆ๓Wฝ๓‰]ฆญŽ>7ŒR๐puฑP< ื&๊ซ!&Š…ˆs๑ฐฦย•ฦรฤB„9ญโกXHU‘xXcแVฤCฤ๙]q> ‰›b็7\yqพ ภtนโ<ฤใzYmฉ}:ส3๖ aฮ5ทaืŠ๒2‹๋Šy๎ปข' ส {ำ[q๛ฺe<]gดืผ—ึษ=ชถ 7<๕ไ/Ÿ?:ๅ›฿๏ว>๓๐E3็อ๚Y์žธ฿”โ!=โ5ี1‡•ž๓SฎBื-—ท5q~q็รZึโฌ<™sษิ˜๘@JืตŒฝ็n!ฉTJ9้gคt’2Oฬ‘ุทาdtา‚l”h^ใcGW^พถ’ฌ™!3E[Uœฟ๒O๊ช9๊xฯgq>เแฺเกXศV,\/x(}Xh$"โลCถ`!ข@ˆ๓…๐P,คd=๑pณ`แ,โq> ‰฿ฤ๙€…ซ-|™qŽyY๔œฏ'qNฦฟ+ท/๐ญH/sูs^gฯ๗0skK฿ษ+,ฮ๚\ป :เ—ืฯ{iวช5ฑฑPq฿}ไkป{ใัฯ8|อหฺ›๕็Mk๏ฯ๓้&nตกล9% –'tะA*ฎโ"kA฿ ู ทŽศษe†ุฒExษธb™๑Q˜K4 ๗Yาyืทœิ‘T๎ใ(!2HS03‹ษ!ฺ้}$†ต6โ5โ<{3Žฬึ็ฎ-Š…d~ลยฤCฑP39g|Hฟฆ(W˜'r]<ฌฑP<ฐpmล๙gD,$ถช8ฐpลนณนeฤ‘ซ็y1]#ฺl๕ปm†น”ใ[n๖นหžaž.์ภuู์ๆฒฏต ‹ธึฺˆ๓๗o๖่ร_ฝจ7๕๔็อ*ฮ_Qยฝผ็˜ŸkbW7 Cธ›ๆ\ฟาฤ/U๗๙%Œไฤ๔&.ภ~4dฮ‡ตœ…‚9ฟRb)โœม•ž,!•„Zฎ™โษL_ศจ%X”Rสษผ] ˆjqฮ~‚๑g๔Bฒ…ด2hฝปึodq๒ทM๗}ชใ˜Oฟh&q^ส’ฮnโฺ๚์e9 zN9ƒzทั-V…‡5&.&ฮลv ฐPl kaNfผฦB…zbaโabกณฯูŠ…dุืปkF็pŸ[MฤBโ&3Š๓อ€‡ฎฒ8Gˆ+ส่%ณKL+ฮอ ๗oVqŽปy•@ทฤฝํ9 z'ฮKๆผuq/ๆ]i{%ฮ๙ํ~๛ีร\nXFœึทF๏?๛ย๘๓งอ,ฮฏฤ)e”ฺ) ่fjŽใ๎ูฤ7Šk๛aฑœRฒ>62ญY*‹?ณ‰๛Œ†ž๓Mถฎ8a.๐ฒ?`9rฬQ.๚•ใฑ]x”8วi8Mเาฅ=ฺๆ™)Jaก%K %๒1ูk๐bโผฯ Š~TNN/Oœฟฌ็|ื๚โŸz๑ฌโwšธq]Zฤ8ŒžฟPฮŠžู๋ฮnm,džดxXcaโกXˆ ฮqžธUOฌศญxX—น‹‡‰…`Ÿ›Xธ˜8ฏ๑,๐p6q~—{฿j"7น๙ g็nธz=็๔;w3ฯ‹จžิs๎๘0ฦ…u3ฝ-?@โœืF้7†vŒ๖๒๚ศ๖ท็้Ž๐.cี็ํI2๋ล8ฎ๐e.zปoqฮพ\–ฯ+๖,\ž8?๖Œ๓F'์^o์ ผ$ey๕!šจ์‚2์‹€ศข e_".จ1โฎhว๑sIิDLŒ~ธ+ฦธ|๎Q‰‚$ฦDฃ ธ$n DAู๊ซS๗ืœz๎w๎พณtฯSฟ฿™๎ฎฎฎฎ๊๎{ๆ9užๅqฯ๙Xโ|ZผˆsAฆ;C‚ฯ๐V3Ÿ_0ำ…—ฎผ8KJ๙Tร$uแณŽ‘Qh 6ๅGa^K๋ิz‰rีU*่ิ|_ }i"(ามXเฌkwkVฐ-มฎsฬ`t|qฺ๗>ง>}แk%ญฝŒฦฺก๓T_”โ<นq.D.ิบศ…โGฤ9i๐โBšศญ).๔zsoYKsง{;|นkฮ…ชMฏqกDบsกฮ .L>Oœ? ็ฃธPุkLq> |˜ฑแ:็ZHi๏ฤงฅŒซ#y72Lโ๗v๗ป†g๊ž^าโ็‹ูไฌsอห(8ึu.ฉ•๏Eyน฿‹๋า ฎ?นวE/ฉ๐บฅ‰\—! :๕๙ิ`vz/่้ฤžโ|lqมปด๙่ล?ฏโ1หRœง8_Wห๒Oฌต€T&๓บYฎXqN„บ8ืูซn@—ฮ‰›$qฎ”Nฅ ฮดญ‰.ภำ>.Hˆฉ๎wI&ม'Ppซ็ๅ)เT๐ษธ5ฦ )ํS๋uซ๗e๔Y็คzTAŸC๐ยล๙Ÿถโ\ฟŸ๛ๅNœmuวeฬŒพ…ัๅ๑9›‘โ<นPลแC็BปsกFIค‹Ppกบ๘pMpกx*vj๗4๗๊6๒ก }๑กsกžƒ ๅฌร‡.ฮล‰ฮ‡ฎ†8?้#นPุณ็^ฝ!๒aฦ†kOœ๗cำLœ#bg‰sอ๕ฦ9—8oo%`Im๏็y—ไี‹"ฮๅ–ทวI3ตถˆs๊ศIow1ฃคฐ3{ผk€ืพง]่ๆ—+;เŸ>ฺG๋:‚ฤ9โฟ|6เบ๕qฤ๙๛ฟqI๓แ‹~Vล=๏ฬ็)ฮง' ฅ{๘จ€T๐€Ti›8DzฌNป‚‚T9็๑ส>Ž‘ ๔qœm…๓สฝ[ฑ ,4‚รYrOํนP‚Oฅบe= “p๕/WFsˆ็ซ'ฮฯ๙๚›๗๋ฒ*๕็)ฮ7qฎ`“™พญO‰sนDฺF#pฎ๙ํ‡{g‰fHชหคๆNzป‚59H ]^ H,ป7Jย)"]ทšฆคlโข“ส)(Uใ$aฎ๕ธ๎rŠT๋Q“€็/ฮ_ูŠsoธๅxว<ฤy“iํ)ฮื๚vrษล‡‘ Im\(*>\ .%ฮแCว(.dz ไpกRฺ=๕Q ‘ ล๊ีฆ8_ธ8ฟ฿‰๗ษ…ย|ฤy“iํ„8_l๗|”8Gจ vญํฤiq‘‰Sฤ๙็ž6.๓๏ฯิ‡—T๔ีYnธบ8ว๏ฮAโ\๋JwyŽ›ร‘z๎ฮ'ฤๅฒหm็5่ล=๗ œ3]฿นHแฮ:i๓zj๛8โ_๛Q๓พoO|ฮหRœง8_‡หต› Lืล[฿๔ฑ.๘$}S๗—฿๔‰n>ญ ๔v9๋š ฌ€Tฉ฿)ต่Jy๗T๗ี H 8*U๐H]$)๎ŽปF8ๅฌ๗mู†€”Zt—z?]คHชฏิญ‚N9_4‡ำ๙จ๑“ฮ‘KŽ$เ๙‹๓3฿๕๎ณฌแ-็ฝnM‰๓}CคKณ!„pแ:เCqกธOน๐๚›?ีF.ิ}ส"ฎNฺwไยศ‡sq!b>t.ค†๖นPC๎\่|นฐฦ‡ฮ_œัŠ๓Q\(์พๆฤ๙ฤ๐arแš็๓]$8{q^๘พNu่็ฟท[GWt้พ–}5ฤ]{Šๆ๊ๅื7ื\7๓xต\๐.อ๚๓]Š;ณฯiGv‰uPsŸใ>่^œxRๅqฺฉ[tw๗.๏eัลO.\ต8Wะผ๛_Rลฉฯ:#ลyŠ๓๕$ ]รA้Š›?xหgPะ)‡ˆเ”[mซ๕rŒ$ฦฉ;ง&“ภ”ฑk Fดอw‘8–;ฃ`‘ภQ;F™ิQส"๕ฺKถี>pIแคฮWจ&ฮๅ‚j-ฝ[ฑ‚Q yPชsNžŸ8ู;ŸืฯŽ๘ฟ_|ธฺฺโg-~โ 9B๖™ฅ+ฑFฐ>p๒a… ื0F.”wไV๋ล‡‘ ี$N|Xใยีแร:ย…๐aไBG.t>t.”:::๊œเร ™ ว็‡ŸpŸ‘\(์vฯ]ฦ็ำภ‡ษ…Cqพ6DzWGญTําญwสA้ƒํฺวžๆฮsrซ]ฮwY^„๙Uืฮ ๛,ฎฟฎร@œ—L€ฎฝŒ€๓:tๆบwKใป‰G Kไททฝ วmวq็13ำy\„rแล๙ู๘ƒๆ๒“*–โ<ล๙:_ึB0ชเณ&ฮoธๅ9า8 Jš!)ฅ“q/ F”z“9ฏ››o0ชฦCคr’nN}คืJาธK$บ‰็>AฎR8๕^r‹โ85R‰r5?Bœ๋(]Uทค•™ดIภ๓็/~วฒn๔T oŸ-Šs>้H>\{|(ฮƒ๗j|(.ŽโB2‡$Lu Šปถ.„#"ฬ#::โช{ธgัก]\่โ\p.t>t.„uป!sแ8โพ'2’ …]๗Oœ'Nืฒฆล9้5qืa›๐F|S‹ีkSM|Hy๏fจ—9๊๓็rฬ9ื:ฅนใขซž]ฝ๊-˜ำ…s/ฬMD#ด๊Œ‚Cwท์Cฯ1พฌGธ๗sำ๕š"๖“ ็/ฮ฿๘ีœตKซ8ๅ™/Mqžโ|=Fฉต\ฤ€”@Q[>7€ึt๋บํ<งHทzฌ T’ไ˜“ึจ€L๎‘f ๋ฑค๓ณ#งFม!ติ„{J;"[าฉžฬCW}้†ุnq‚ท/๋ำ}#ฮ๚\Š๓ไร9๘p p!โtD:ฉ๎E RMœ๗ฮธ;ไ‚ปํธ๋8๓๚ห~{'พRืŸ\X็ฏฟเ{อ_ใซ8๙_’โ<ล๙:^”ฮ‰0_จ8ฟํยฌ" Uะ Œ"พตRqŠดž€U๗ฉรTืbฌrŠB˜ส=QฆเTต˜ค€+Eo>n‘R,นฺrtHํ๔ิvšปQฉ…๛ิa ธไ‚P-บƒไโS9!sฅr1n‘ฮUมจœ2! xnqพ์mหบฯฎ†?๛lŠ๓ไร9๘p-rก‹p็Bถ‹\(ท\|่\(.„ ตD.D ป{ย‡ฮ…ฮ‡5.ิน0ฆต Zbๆ:|น0๙p~โVœโB!ลyrแ€ทJ:7XศโใัF‰sf{ว†g=)—่.]ส{q^บกำn€2;\vJ๗|f Kœ#ฦg ๓ๅืฬ ่}ziงฅฏAG qฮาญ—[^D๗ฌบ๓veL\Ÿ:ฏ}ู6คหw็WRํx’ G‹๓ื}้ปอY๐ฃ*N|๚‹Sœง8_ฯ\ฃ๙Š๔›ฟ8g@ŠศVpyหm_๊[Nฃ#\$w“ด›ร้พฤั‰Žล‚ื!JธS?ง7ชU|fjเ)ะร•Q ˆP่ŽCภ‰X๗วžบ๔:‚QAฎ‚]๖ฅ}ห-RZ{\B=u“c&pR0ช๓T@*w,ƒั๙‰๓gฟuY?C>โOฯMqž|ธ\ธ q๎ฮธsก‹s๘ะห}เExะนะฤ‰%ิล…โ=๑DไB๑ฟ๛ศ…Z"R๖SใB๘ะนะ…zไB๘0rแช๘0r!|่\จ๓O>œŸ8ฟ๗q‡ŒไBa็}Sœ'ึ—N˜ถ˜ฯโ้UqNw๕VXGืวจแ‚๗ฉ๎qŽƒŽ‹;ิE ใฌkขUโš4u]tW ป€8็|;!v5็&ฮ๛วrัฏพb:๏โฮำฝs์U›Ž{ฏs๏ึน]ยปv!ฃ๒ๅ๕”™๋}s9‰sฅง8Ÿ—8ี็ฟำผๆ‚TqS_”โ<ล๙‹saAa~๓ญ_lnฝํˆsU‚U=็ižช?งึRu—r‹4RHฉา:IํT0vq้Xฌ`TPภF๗ูนฤน๎ืœt๕›"ym9้šžถฉ “ Tม(nผ^ซํจ9—SƒQŽ/บEX็ข`[NM j๏WwแsŸFqฬท,๋๛ˆW&ลy๒แ"‰๓yr!|WใB!rกž‡ 5v >า›ƒ‘ ็็\t.”ƒ.>Œ\Hช๛(.ค๑›s!|่\จว5>ค1fไยศ‡pกฮี๙p\.ไ๛šFq~p+ฮGqกฐSŠ๓ไยE็ยจฅs|%œหŒ๏>ฝˆn0˜nฉ๋ุ.ยผฺอ—8WZปะŸw็๑มcsะ{qNฃธโrwsื%–‹p๏ลyiFyŸบnฝๆ1ต๊ฯ5ƒ.๐ๅ๋ใ,๑๓˜&q>๛ํๆี_๚~ว<%ลyŠ๓๕mQZ'X„Eมๆmท}น L›ๆย๎>bEปž'@% ”‘j ใ=ึ,`ฅ ฤLi’$'I๗ฉปTชง( #0ล‘ดจ)ณฦ=ญ“เำ๙:\"ฃ€บJAฯศส‘b\PMœำ™Xว)‡Hท‚ŽŸnล^sR}ชัW๐ฮˆ:!.” 3฿ัt‰๓งŸฝl&๋x๙งSœ'ฎ].„ล…‘k\่๎นs!ฃ(#^\8!rก„:sั =kวนP\ต.๔ •‘ แCธPฏ“๐ฏ๑ก‹s็B็รย‡pก?€็ห…ฮ‡๋+Ž#ฮ:๖‘\(,Mqž\ธ* าEแย2* ๗| ด s\๐u/ฏeŽx๗˜็๕WšT๑โNwi฿าืj&-]โบMเฃ}*{%} ิ้ํ&ะ้฿ฏ/ฎ๖ M฿ๆ็ฝ(wฐฮEนีิป8๏›ืูบQOศ ŸฯCงMœฟไ35g~?ซxะ“^โ<ล๙z.BPŠ๏า>๕s&0Uไ้๏ขJ่ BIWŠƒค ”ฎํ๊ZฌGขิ!*8U๐ๆ)ž4ฟมQ'”ƒค`1 tœt8๗q”ดN&๋ฑw$ึ}$œ"j+Ypฏ€<๋XI฿ิ9ฦ๏๗ตx'๛Qโวี1;ูล9ฎฮž ๏h%cถiV|zชวๅˆ๓ำ๏ฒAร=วห>•โ<๙p\8&Šแรศ…ˆ๒…pกป้pแลE F.ิcu็BqŽn#J0G๎|"ย แF๑กsกOฏp>t7.:: ๐œ/Œ็บ F.„#๘.„นq~`+ฮGqกฐใ>)ฮ“ ็็.T[Œ%ฮๅfkŽ7ฉ๊8ๅEdDบ o\q\r:—ปSNฃ4Dน5h#อผkโึŠ๔NTทB]Nบบwf๏๋ฬ|q=)๎ฺฏ2J“8ฏ๏›ศI(KธkNบu“วQ๗ ƒ๎น"ภc*๓า1๏:.๚q'i๙๚<‚8ง๎ณ tƒๅส2rŽ๎๖หKCฝI็/ิ4/์๗ชxเว็ํฒU‹๓[จn9bปใหxษทxฉญU‹ŸทธจเD{๎Œฒฝ^w\Š๓้ซํšSGN`D@ไฎ‘ป๊~+่yงr‰่b,(ภRภEชปบ3\ไ‹ห่!‚Sf+ะSPŠcญ๛ Lๅฺจ[ฐœ#4-rP+ฉเR๐‘@ž JJ๓#า7ฝ๑‹Ž‹ FYL้R[G)ฝ_็จภ[๋™ฌภิล95ซzL +Ÿ%คgR:/œŸ8_K"}qค7/๋ป@Gผ๘“)ฮ“ืŽ‹๎ผฆฟ1ธพ”ิธะกํ๔ท*Aฎฟํศ…คบG.ฤU\Hำธศ…z,>\:ย…~qำนP๛ญ๑!วฒ่แC็B็ฮ…tฐ:บ8\:’ๅฐJqพ/Xฎฎ8ฟื1‡ŒไBแ๎)ฮ“ ็#า‹8—จVg้fy—บ๓>-w<6wsุ์pjห๓tA๏ภบRณM๓4Dq'P้ดnขดwมหsฝcRฺ๛ู็/)๒คธ๗ฏ)ท๘Eฐใf๋พD7.zq๗๛ฯศ.&เฤk้ลนี™Rูญc{tฯน@0x์<ซภ๊๋นx!>qพถ๚8โูVณ์ำ฿ฉโศว?\q~b[ท-^_ูfใ—ˆ›[พลล-๖1qยสk๖)กล.ๅ๕ง8O็hมฉ €Gมคฤต">r(ไ)%m“ S”ถรษ@ +ธ๚๕4Gข]ม—‚0gˆuค4 ๒มชมTเG]#้ไคผ3ถ‡ั=ชมt(ฐ๔ UM”tŸ@ืq๎ม(ะcฝ ฝท‚bๆอjQZ็ :ฃ8W๐M0ŽxW .'MŽš>37๊;๑ฦ{๚โ\จฃิเr-ฅใˆ๓'ผiY]Fผเ)ฮ“ืฮKธPโZg‘ ;7ฝ›‹\จฟQ๑!ม…ˆ๑ศ…๚›F.D G.ค.ฝฦ…BไB๑V ฦ $๑กsก^๋|่#ิœแยš8ืฑ“บย…d‰ ๙?‘า ฿ณ‹f‰s็B็รตศ…ใˆ๓[q>Š …็ษ…๓่ลq๎ะ บ‹s‰ๅาดmเ|[cทAบzแฑ3๛@ #์qๅ/:of{=.๎y฿ั…)"ด‚ด;/5sazฅฃป๎#lMไ๘ผ\œำ8Oฤนwcg†9๎๘ภ9๗&p์k„8Ÿ%ย็‚ฤนฅํ๋<|ป๐ห฿ Hงnm ๔qฤ๙3>๚อๆ9๛ํ*Žxฒqลน\ํํสํ๔ธฒอa-ฮ Ž๘ซ็6ๅ๑yฺOŠ๓ J›ๆ†s๔RœsrOฃFธำˆ RA—HDทœ ค >‚(j๖ฏ‚.mงเSA˜ึ๙L`Uœ#FQwI-clG-žGbH™Yx* %…Sทzฬธ :! "ฮฝ–’‘Hบๅ8pิ Liคsาน้iวs๚ดŽฯPA)พ{7D;ขฉ}ํRn]œปs&@œŸ๖ฦe๗๑œyŠ๓ไร๑ธp|่ฮy …ศ…8โ2็BอŠ๗"j;ญ\ศs‘ ้๒น๗ศ…โ+็C–ฤฮ‡ฮ…๐กs!‹๕๚(ฮ๕โ<ธP)๎‹sกD9ูQฮ…œgไB}๎ขร…\Aฌร…”๔|฿้{w>œqพ฿ั‡ŒไBa‡{ค8O.\ุ‚8Gุอ[œ—t๓่‚ป;๎ฉ์จ1ีf—^ื„<๗ร˜ตnไXฦฝ ๕ud\ไ˜สQvท˜pเŽบ7Žsฬjลy{ฮคฐำ- ๔ฺ>ซ]M€s~w‚8ืyบ8—ข|ล๙S฿7šงข*๎{ฺ๓ดัปดร้ ุ5แ๑ี•mNm๑{ุo1q“฿n๑^าโ๕|‹ำ์5็h?)ฮ7ไ€วTA ษƒRา5ฉฏT€*IขR-˜*HาญIWิRk[๊*\1๏Wโ\%โตฮEบwwW@J๐ว|`=ึญเตู้˜ภ ธ๘๖1AZง็ dๆŒ๊=pˆxoาKE’รฅTMบ#ฮ๕™˜จ๊๕™่๓ภ1ยฑ๓Vจฑi_sKœ๔๙•"เดๆญงโ1ญ8™เูKqž|8&.”oปฐwc ์“โBq—>ลqโรศ…๚{…๗œ นXนm#"ะ #ฦ>๐แ|ธTwมนะ๙0r!|่\่=Bœ u๑B็@)OไB๘.t>t.๔š~็Bพฏ๎;‹\๙p=็๛ถโ| ง8O.Sœ/DคwbT ัlf๗ …วQMvฉฝึ๋{a/Aปnฉ๎cžซ‰s„ซRัอ94+ข}0๋๓ๅฅiš S‰v่พ่žๆฎ๎๎:'๊ฯC7v-.ศมฌšrwะฃ8๗Zsq.<„,ˆYโผ…ฮ‹๔u:‚ำฺ9ตีๅ}qคk๓ิG‡<ๆyซtฮๅ‚฿ญเ”yŠ๓GTฤ๙ูๅ]Kฺ๛ํZผVฝฌkEœ?<ล๙†”zภ8ตUŒUknฝ`F่ บ_G  i?บีzQ฿)bš€ŠเSBSท8!บO *มฏ็ฐ lGPJอฅ&)ภฃ๎ฒ&ฮyŒ‹ค’ฺ๔˜ˆ›Nฃ#ๆใ’Sง๛>ๆ€sิ๛Œส)"ญb๋ิฎ`TAง฿โฐ๋s๐nฯคยบk๎ช$บ€T฿—พ[ฆธEx‘f]ˆ๓Gลฒ๛ˆx๚GRœ'Žม…1อyพ\๊๏O|นP‹บนPBัํ\Jะ;๊ถฦ…\\HYMไ"wฑย…~ก’Ž์๐aไB-5.ไœ u๐aไB๎;๊ฑ๓กืœร‡‘ แร๊ป๎๙Zเยqฤ๙=Ž:d$ ฅ8O.\^็ต๚็ธ๔.8(}ึ๘0 QK๎คษm–0๗tx9๎Zg]๛ิ๖2Nญog‚vVฝ5ˆ]ูƒ€งybaŽ8๏หYท๗ๆ8จ=G€ก}Nˆufš{#ธZ&โฃsn5็ƒn๕ๅ˜ยwฒGœ ˆp\๔xŸ๓Ÿ„š๓ำ๗๕ๆ๑๚V?๚น๋4ญ=lทณD“iํนฌr!0โxxr‰๔K+Ÿื๚าฅญ[นsmภฃเIb›๎ริญS7จืโސฮ‰ ฟฦธ!g4Šc ™ฯF ใ"q_+้ๆ8้4L"5ฺLR2ƒตดŒ#ฮ๑†eีfVย้Nqž|ธH|นะธs!|ธPโดศ…๚›ไ‚คs!)ํ‘ iน”ํศ…>~อนP™E๐กs!|น1ๅ=rก๓aไBฤนsกwmw.t>ไฃ8w.Œ|ฦZ๓ž \(๎|ธ—ี็:d$ ํโ<นpLกŽุ4q>k˜ ๔โ’w"ZBดˆฯพN›škอท4๏>œ”p5›ฃ‹ปมE'R%Dห˜3o7่ž๊;ถ›@—wQ>Hig๔š‹swฯน˜a‚šฯซ็ฅึผๆ†วฑi ็ต1s:Gาิ7ท๚แ\xฟ81)ฺnN๛ภฟUqเ>g\q†ะ๎ฌส6›ดธด4vฃ!พznY‹”๛๛††p—fCธ\f/rค”s–8'๘4aN*งถWฺ5 C๘ัฏ4DŒ *4)€’„ุ& ีcญ๓พฉ1ิkl้5ธ๎ธF๓wŠ{D๗bBF)d]L๏ต!ฎ็c]ป@สฆเย\ฑŽƒFp –ult'ฆฮ’TNฮ…”M่บ%ๅBˆื™ |}*;้์||—Jœ0qะณ– บI;ž๘ก็ษ‡kˆ ฃ8ทัi8โฮ…Lคˆ\(Wใ‘ ๕wฎฟใศ…4|‹\จฟ^๏.;ฮrไBo็\ศ…ยศ…ิƒโBŸ_๎\ศ๋ ตภ‡pก๓กsก๓aไB็CึyVsก๓แ,.ิ=v>œqพW+ฮGqกpื็ษ…‹%า‹@๏ำฑƒ8๏„rห}5โY5ึ}ผฮ~_^w-๗ผผพ๚ๅ๕ƒ}’:OMw™+>่d9ฺล7bป๏๊๎.z+ภ]๗ขjุซ๎9โaอX4›[๎Ÿ฿@˜ณmฃ๗1K ็žึ>่ํ๙qแA‹‹rึวฺ๛I็ืW›G๕ฟVฑ#Ÿ5ฎ8฿บล—ห(5nUึo฿โ๓ถ‰-~XบฎŸi๋?ะโ;ฅๆ ึฯ,ห?aM๒แฺฆพdษ้๖/]บ4ูsฑƒR1%๘์;šธำ:5?’XVิฅคJห…ฟm&ฝSjฉภ“ฦeิฃ+˜ขvšว ส|Tมš‚Oถร1ข๖’๚KoฬJื{๊ตคx* pะIนฌ nบ=GS#'\qฟฏฅ„"ๆ๙ wศ]#ึ)๕.ล๘ศG9u฿›งๅ’@งC๑"ฬ|^[โมพฌkFUรc?ฐแŠ๓ไร5ฬ……๛z.tq>7‹ =]ฺธPซโ5ฦฆม…๐aไBzoD.”่:ฒŸR๛นnŠ\H]xไB‰๑F>Œ\ฎŠ 9?kไยศ‡4ว„ ]œWนฐ‹’ ้5N€8฿ใ‡ŒไBa Tœ'ฎ™ลลf/ชqส]4M๙@„z๚v๛ํNl{|q—ป ฬ9/‚ฝoบVะ]ภ'•[๐†pล)Gp๗nxLuู#‚88ีึ$. ์žzผฮ…ทฯ9wท\Ÿy|7†3q_(๐qม=wqฎๅZk€7ฉโไท}ฅ9ๅ_ฏbŸSŸ9–8ŸnฬซฃSœ zš;A'^ทzฌภFทมc†บF<ํs Dฉ%Wฐ(ื†ZKQVtูฅžšvm ุธi๖งLม,5‡u$\'ฆถŒๅQ ส(wuไ๒่=%ภG|พ‹C๐๗8๐โœฐฃ8ื๙* ๅsา็ ฯNยผฏงิ๗$ฐะc€ฮต4วw1ล๙Iญ8ืผๅ๓tฮ“ื0ฦ’๘0r!B/raแฮศ…โ s็B๎G.„#Š|โEl นนโp!<':๘p.dlZไB็D็ย๊พg9๒๙‰ซ\จe=เยqฤ๙๎ญ8ล…ยถ{ฅsž\ธ†„บ‰G๊}สถื—GวW5โ%ๆ5/ยžt๗พ&ฝ4Ž๋๋ธโ€A๛"อg๗BฝVำญvป&ng5\3พฅ?ฏO๗วžกเŸฏww/#Fึุ1๓H„๛Bm9“ฤ…ˆ๓๒อษ๏Z๗xXŠ๓็ำดŒjŠƒศ๓€TkthดคfjาICTpฅ`—Cมc„p@h)๐R`Iฺท‚KDฝ\1mหุ"œ๔8 ๗‰ ะษq‹›CI <*sส…ี็~ก8JอS:=ลภ›`”บVAŸ]?ืœ *7ฯ\ค๔hยฤ๙๑ฏ[ึ๑W?Tล#&ลy๒แ\F‰8ีธะ๙.,ฮlไB๑ƒ#ย‡‘ แรศ…pgไBjาk\E.„ ๋\ด\].tq^ๆโCๆิŸG.ิyย‡ฮ…ิžw|ธsแ8โ|ื฿?d$ ค8O.\Sโ|Dsธ^\าฉšž๙2ๆๆ.wbปŒd#ตฝwฤi วพๆV›N“8๊ฑ๛็&Lcื๖พSปงฏ๛,s่ฑKปีตƒ‹๓Bูภ ๓ปuy๏๓สqxƒ8ฟ8A=}์พ๎๕๕“(ฮ~ำ…อฑoง*๖xศ3Rœง8Ÿ๒…ฑC+>ปB]PZ็Aจ3๗—yฝป i ง[Ouง#ฑ1“ค<๊1มณพ็้—ล<9D@ˆๅŽบ฿' kฦ…ี็.ฦมลกฦQ‚ัุต9บๆ๚T2ะ}ศŠA๋ั2Ž8?๖ตหšำ๑CUœ๚พ็ษ‡๋เโฅPใย9๘0rกxE|นPข›ฦoฮ…ธ฿โ†ศ…BไBฤzไB็Cธ0f๘8JTร‡ใp!ฉ์‘/.œฝงŸ\Hc=>3ธj>\นpqพK+ฮGqกp—็ษ…ki๑‘`ค›wbwืRบg }kzฦ85—†เฬgฑฝwาฝฌEผฮJO7Q๎u่ธ็Uw]๕Šcฝ ฯอxD%+ ่ˆskฒ78_่too›s][ฃัึฆ8เฟu๖Wซุ”?Nqžโ|็.ะ?3Ÿดภ๖uชปTฉเK๓s ’‚ฉ๕ˆsค็œ ๊พึ3.GมŽIqdž0็…G7ƒSTws๔พธH8Jซณฤๆoว…9จปๆ๎œ+ ๏าูIซล-๒€!A]ๅ5o$ฮ๚“4O๚๒‡ซx่{ฮJqž|ธ๖ล9.์\จo๑aไB๑›_ฌ„ รน4๘ศ…๔ศ…๐aไBjิA๏๐เ๊rก๓กsแw*|งW8ึฒˆฤ…J๕๏๙ฐฦ…|n‚๘Pโ|็๛:’ …ญ๗Lqž\ธvล๙,ก^ึญา‰'ตฝŸ8ืธ4fฃถพ) แŠ`ิฝท๗๛.ๆ8ส.ฮƒ ๗iƒŽ์ลuฏฅˆวfpพ~์‹sšเ‘`$iํ*(5๎yƒ^จท็ƒ3kษ™w~ฅอ9&Iœ฿ ็7ฟW_ฉbื?xzŠ๓็นฬนดมš๓(ีฅGโ‚toื}\zŒcค€Qฎ‹EญSฆšBฑ8FžฺจเT๛ก๎าkทIy๗.ศจ๎f๓‘Vg‰ฏ๕@ิ!Djชค๎้œ๋3่\;ๆ๕ˆฦ`tยล๙ƒ^ยๆ ็คŠS๕†็ษ‡ษ…๊g!>Œ\จฟk ลิ;J ำทรนP๛`ยEไBšฦE.Nอpaไรี]๕pก?๒กMฃฮ.tืœt]:;|Xใย)็;y่H.ถcื็ษ…ณH w๎xIk๏œ๓"ถ{ฮ๔าlฎw•ฉU/้๐ฝ@ #ฮF9ๅฃฤyต>=ฬFWKœ‡‘t9qNg๚(ฮgWs‘ฎ‹rอํBDL]Ÿq~ไY_j๐ฦจbื“Sœง8ฯe•หmWซƒ๏…sขTq‹8฿ ฒ่6ฌเ‹tNf ปฃวฬฐeถ7ฮAฏ๕ๆHิ_"ะืz{ va1‚Q–ฺ่4ไั%bdPฌญ์ว6y ]Vลษo‹ฑฤy™e๙2๎โS-ถฐ็ฮh๑ใ2๎โธ HsYL.ิ฿ตx-r!ต้\”ƒ ลโร๙rก7•s.$ปจฦ…‘Iƒฮ"pa}9ศ‡>ต"raŸฮ>Š %ะuกd‚๘Pโ|้๎;’ …ญฦ็ำภ‡ษ…&ะญ1\'ภc๚บw‚๗๎๐&่๕ใล]ห%RKอuี1ŸkผZM ำHฮาไWwฉฮ‹ข|ฺŽ8๗๎๎a>:]วฎ๓‰ุkโ—ฟY)ะ…I็‡ฟ๎‹อฮบฐŠO<=ลyŠ๓\ๆ”^๕7] Eฺถงj{เHJท๛!ฝNฏWjจ‚1ฦแแฆ”า\`.ฐฯ๗YโตดONว Fib“#ฮ™”T‡Žงœฮฮ} งoชคล9๏มo>2‘โ/q๓GŸx'พๅ/ว็วถุคฝP๎๏ำโโwhฑK™Iนqคน,๊๏Tน๎๋jธP|$.‰\(๗\|(.„#ธ๕ –p!/#๚ุฒXrใ|ธโถ8Ÿx>L.œ0q๎ใีJอ:uู7ูำอM๏บฤ9ใลฌ><6Dคys?ึnkโ๚j‹sfผ3*ฮ๋๋ฃsร ฦฐUฦฌqa>โ]๓Isฮ{อ›#ย*v:!ลyŠ๓\•^ุ‘ ฒ=(UะHก+xcฎญn้ฦ{ใ-Ÿํป+H‹Bว‘๎็ใจ”zZฅปJย8‹wf'ณึ}Wศม:f๘rซ€ผ›ใKญySบF*๘๔ ”@ด]ืญŸ q~ฤ/i๙้OVq›jัาฺๅก->d.ั๖y-KqžหขQกDaห‡‘ ๑v.dฺฉ๊p!|XใBt็BบภG.t>Œ%7‹ล…๐aไBฮฟฦ…ˆrึัีž™ๆ.„#ยบN Jœ฿๐รFrกฐๅnป5‹ล)“ส‡ษ…(ะmฆท‹๓Aƒ4ญฅึผwำ[tŽฑ j sๆ›[ท๒kฎ›ฯ๙ฎฮ>๗‘kQฌ/‚8๏บ_€pA>๎#ุ|–|'ฮห๊อฏ„ธ\rนๅW\sภ5ŸDq~ศซ?฿๗ตTฑ๔๘งฆ8Oqžหู่‹]ฐง L#Ž.๎1"›ิEš)U๚ข#ๆ๋พ ็ุ่‹ณ๎อ•<ุ๓€ฯำ>ง‚q–‹Cgvา6}vนฯคmฒž๑s:/g7?Y‚ผs†.œฉติRC9B๙๎ฐnยฤ๙แ/}Is๊฿~ฒŠcช็_ำ งฏๆ{]‹ำสทpฟ<>งลฉ)ฮsYl>Œ\่ื แ…ย‡ฮ…ญ๐aL{‡ ู/YFฮ…‘ว]\œณ_๏ญมh98ะ็|ย‡ฦ…q^แยI็;vุH.$ฮล2&N(Z๓8Fmเž{]ถur๏RุK*บงtำฅ\๗ฅฃ่็ƒš๓ฒ^uฮƒ8_่(ตY็ํ๏ไ^w^Isฏvs/"T~ฟ่ฐผs็—ทŸรๅE _5กiํ๗yๅ็šC๔*v<.ลyŠ๓\ๆฟpnTันXAA.นJฟyฅŸบU:ฃ‚3ฒ่๕ฅต@ทž€ฤฺo่ใ:Gz=ต๕ฬ้%už๏ย\.‘>9eSคN5‚ฃ;ฑาา๔ญ DuฮJเญƒฮqล๙ก/|i๓~ชŠฝแMซtฮๅ‚฿ญเๆฬRcนQyึJ0๚๐็น,6ึธบs็Bาึ#๊9ํ#r!ŽzไB\๔ศ…5>„๗j.๛ธN:\่*G๑กs!ํ๕y ธqŽk"ฬ+\8I|(qพก‡ไBa‹]WํœO;&Nจ8o*อฯ$ฮี่ŒNๆ. S^ำwj'ตฝคณK„“พ8ฟrUโfก๗ˆi๎6O&lcฟืœGฝ2}ะอฝŒZ๋>ท๖xt>žพฯg@8}?๛๕ ฎ\‡ฎ๙ธโ —]s๏WWลyJŠ๓็น,h)n†ๆใqK@Jณ#ฆ Dˆ)เ”Sค”€Tเœดธj_๎9Hq<0ี}๊ TŒzm%)c“ศ pP;๏u๔.ฬ{งศปด#าI๋lƒRw…้/฿=iŠ๛ผเŒๆมtฟึ›วNko—วท๘z‹;6รๆG™ึžหš_"@แBD(ฝ6จ5‡ ฿‘ #F^ใB็รxัฒฦ…ใ๐!\่ยนt|xฐฦ‡.T ๘ะนะ]๒ภ…“ฤ‡็z๘H.6฿eด๖I็รไยษ่คjWSKทv#L;Qลy่ฉˆS\cbญ๏fยีDy็Aคฏฎ8๓เkย3f9่ชSฎนาฺ}|šง๒Gq.ืฏžqฮ'Uœ๘ฒs›ƒ_๑ล*v8๚ษ)ฮSœ็ฒๅถ+ูืA+๒Q@ˆdj ภL๖crš ;—ฤ]rœฆ VS฿.ึขใLว ิ…zญ‰’#w’jช)y0ชวZดฝ;ไ„RGษนs_จ_t่"น\s'0UWvขฅˆ๊ณŸ4q~ะs_ึœ๐sซธ฿๋ฮท!๑-ณล6aพกาฅู.—5ย‡ ล โ‚ศ…โ1.Z:"ภ#๊y็Cธะบsก๓a =๛wภ‡^ท๙PงํโลJmO9ฮ๘ชธะ3ˆ\(ง๙.tqธp’๘Pโn๗9|$ ›ํผ๛Xโ|๘0นpr—ฎฉ้์tKwˆR ‘Kงrูๆก)"Uโ๔๒kV:ฦ}นS•ึGช•ฺ๖มทr]u ๐N”[ใปพ๋บFส…๚๓AวzOawP tvTk฿?vi็ผcZป qพ.;ต+ฮ๗๑gš^๖๙*ถะ“ฦ็ํฒU‹๓[จn9g Lฒxฉญh‹‹ ~ขฒ~็7ุs๏Hqžหz#ฮป:@GWu_‰รkCงaR8;‡ไถ ๛ ำำุ|ฒŽ๛ฎคwาัุ&E7= uj Yท*qฮ:*Xฅ๋1‹งhโ ŒE๒บy„9N™ฮงsŒ่HŒkค๑@zฌŽฤคo๊VŸณ‚ัห฿aาฤ๙ฝž๒ๆ˜๗~ถŠรไ-ใŠs๊e5ข,ฉ—โ=a}พ:š|8ูฮนsก„/"ีนP Š\฿E.DดืธPˆ\Hส|ไB็ร่ฎฏJœG.t>dั๓1eนไ–2ื€ •:"สล‡๋Ž#ฮทฝ๗#นPุtงฑล๙ฤ๓arแ‹sญ::B๎8ศฅฝs็E zguฃ&‘*QชtnwฯซแFอ6#ีส\๔A!ฮ{8GX—1h}ๆ€ ๏(ฦฝ)๗่ฮปผฟฮa๙๕+แY1Nอน0I\ˆ8฿๏Ÿn๖ษ็ชธ๏-ฮฯBl๋–ษa› '๎ฺโ๖ๅๆ>•ํRฑฌ‰๓๏ฆsžห๚)ฮ•nX\ Z ฬpEhzฤH!AA[็จž๐ึ z—‡HN’๖ฃฎ›w[œฅ่.นฎ@็ศำc=f V ˜iŸ^—้ย=ึjโ‘ž9วtฬ9* oฯ‹เปOkท๚สพ!๎ะˆ๓Ÿ๙๒ๆจwฎŠC_๕ึE๋ึ>ษH>œเลธPb“™‘ uฟฦ…\ฌŒ\ทD.„#ฦ9้pกs_Mธ;ยƒึ๘/>ฆ\ํโdไCสzเรŠแC็Boธp’๘ฐ็1’ …qลyra.๋r‘ศ์›ม!@ฃƒŽฃl5่ิZ๗sพKS8ŸmŽ8ฟขคs_qอl.งนwะ]Œ>˜}nโผฏ=WZ=ย<6ตซ5s+ตโˆl„ผg ฬ้ๆ–wย\ูKgzม›เyฝ=Žนื˜Sƒฯg1iโ|ŸeŸj๖{ัgซธ๋ž8ฎ8ืEศํสํ๔ธฒอa*๑iF””u• ž{ค8ฯerข6(Upลธ จmิ\_๖ ฃrฺภิก€ญw•X?ยiWฌ`ฯkป=0ซ)QญV๋4ฉอ$Xu!๏A)”c๐Iะ์5๕น0ฟ๗ๆRg้ใ‚ผถ’`ิะ๎ณžPqพ๏ำ_ั<เํ_จโ/{Š๓ไรฉโBq‹๘0rก๓ว,. |(ฮp‡q!|น0Žขค์fziPญีห#^ฐิ{ึ.L๚q’ี๓a [ ธT๖ Nš8ฟหA๗ษ…ย—๎‘โ<นpฒsDm+D๛kQฌบเ-๎๙`ถyก†เW•z๋หฎZ tฤy?๓sๅ๐^  šย™0๗ูํƒn๓ึuพ๎€ใบ๛hนฺน+[ภ„9ฉ์d,ฏsใไฎฒ๗I็{?๛“อ>หฮญb#Ÿ0ฎ8ฟ&<พบฒอฉ-cซษa›๛๛9q~}‹h๑•Gฆ8ฯeqฯ –ิ็†s๛ Ž1>syฬU€ึcQœkƒ`ดม[L้$องฦSฏMwgดKŸ<b:จฏืนjz๏€o๎ิฒZส~ฝา7Ii๗.ํ @๗ญ+o “&ฮ๗9•อ‘ob๙Ž็ษ‡ำม…ํ฿ญพลKคŒ;๚ผ๓:F.Œ|X:ฮy ฉI๗&lฃธ็ย…๑",ูโร๙p!™Qˆ๓ž %ฬแC็B๓ภƒ“ฦ‡8?๐~#นPHqž\8๑โผ8ศ๎*๗"ตฏAWท๖Ro ่า Nrญฺ(q>˜k๎โ0ํิšห5—๓๏i๙ฃฤx™วมFม ทjวKz~๏คk_๖0ำผo~ง –สฮL๓่˜๛\s„;5๘`าฤ๙žฯ๚Dณ๗๓>Sล6๗{ผ6zื\c%็š\1OqˆŠ8?;l๓๖/ฐว๊ฑuนpqี7Kqžห๚์อ+คซ?ะ7BrG…ZDง Hตงฌw๗ๅšxw^คf&ผ{ฎ Zใ˜5O้t(vKั]๒ ต†ุ๘-vึ๛zฃ7Ÿ฿N *)ซศด[fึwฉJๅT@*ศsaฎฯXAฟ๎ฯ_อ<๙ู'ฮ๗z๒ซšร๔ฅ*๎๕’wฆ8O>œ.์De๛w-~ˆ\จวโ’ศ…๎*  ษ0Š\่Y<ฑœ็\่˜ฦ๑5>Œ\ปฯS?฿๓กsกD9|่\่|ธp’๘Pโ|ซ{9’ …;ํ˜โ<นpฒ—ฮ9n…)ยถฃTRฉปถt๑พ๎\ขติปP๕”nฏทึkอa๏SฺƒcŽ๐ใำ๚Ftิžฮฮฑนk>J ๛8ด’ฎืฌt๕ๅJทTvฮ“๓๐๓ugNZ๛ค‹๓๘cอžฯ๚Tw9q๋<ญฝ]6iqE‹ปฯ๑>ะโ)ฮsY‚Rง qNจงœE๎w้œm Iรฃ.H# 5˜}๊ใm:นSŽ#g“ๆYƒืBFทษ๋ว}๋ฝ†’FF๎Q3J่คnRSY:ฒ๗c้pฮๅ)่T๐ูb}Yฦ็{<้ีอกo<ฟŠ{พ่])ฮ“งŠ ๕ทLใ3็B]ค๔&™p!|XใB๚s8zw็B็C็ยฺ…ฬึธฏฦ…t[‡ yŸQ\8'v3หแร N:"ฮGqกpงปง8O.œ๗\หแt5/nt๏J[ต๎๗ยฦฉษQ๖ฺkO็ฆึฑŠฐฬ=ฏิœ„:โaŽ๋_fถ{w๙1ข<ŒL#ลฝ;WDธ€๘งžžดร๖ซBf|แ]^OAl๘ตž๖ัf๗gm[6ถ8ChwVe›MสไŠ]ฌ!พอฐ“๛WยkถaาEi$๗su†Oqžห๚–ด๖ฮฝhƒ-Ÿ)P#u\ŽŠึำญธo„Tา>ฝ‘ZlฏCืvคu–fjตภ‘์nM๎Žš๓๎ฎ๗cS#Oี๔qoค}z[็ยyRSY:<ฤ9ฮPม4ˆ๓๗'อมฏr๛,{OŠ๓ไรฉโย๎ข[ัฮ…4M›ล……gqก๓aไยŠHw>Œ\8 ฃธpr?ฆญGw| ศ#:::Vธpา๙P๗ผH.๎ธรž)ฮ“ งBœวtnšรUลy๛x0ฮฌ8็ฐัˆV:: ไcีF5‡“X/Mเjฮ9ข|0-ฮ1โœFo.ฮ-…๔ŒKC˜_i๕๔ยๅv~ๆฌ็ขฤ4ˆ๓]ž๒แfืง}ขŠญ}์ธโ|๋_.ฃิพŒ€n—ํ[|ถ;ฑลKื๖3ร>ืโ้aร[|ฏ๙o๑Mึœ็ฒฃ?ห•AS{_A– fๆ›ป๋ข ฎ ึ<๘๔4Nล:K—ฆH5ก๎)“ฌฦว1Xuw)œ๎FนSw6วซF=๘ไ˜u๋iชŒJS๐ฉ€^3’]œg;โฑฏnณ ชุ็น๏Nqž|8U\ุน็-F.๔ู฿.ts.d}ไย า =mญึa~๚ฺsšร•ู็คฐ๗ตๅํqH//)์ฑฎr็ธ็œ็Oฎ\ดฌฆAœ๏๔คื์ิWฑๅ!ง%ฮงIภน,< Uะ$๘ฯปtmรr๎:๋~ื๘‡FH>JLท7†&I8GฑAœu/ž•.ic‡ธ ฑ.2ฆ…ผFwผO?m.œคjฦsะญ๊):ณK”+˜ง‘”‚Q9H H๙Lืฃeฌด๖Gฟช9ไOพTล~ฯฬš๓ไร้โย.“จๅ3ŸMzบ๛€ i้\่|8.ผญฮ…1ล”8฿r฿#Grกpวํ3ญ=นp ฤ9BืF‘ลy็Qฬ๖อเสจ3„,ฮ9โ็ุๅุ,-ฆธ๗ท.า‹P๏ำKCธYzpใœ๒Y#ใข8'  tdฟ๖๚aง๕+ย9ดˆsฅฐ“ส.Hœƒ)‰ ฟถใใื,}โ‡ชุโNqพถฤน:ํัuo้าฅษb“า‡ภIึ-_๊S9=๐รaม้‚R„น‹V@ีป{'ใ่ฎ‡qlƒ@•ฑโ.ีœค่ถ๗]ึฃo*iš~|œ›_|๐นฝx๊3,้œ]ฅ‚SญkฆFœ๏๙‡ฏl๎๛ช๓ชุ7\qž|8\ุ5†“#ธ‹}^+.๒€ ล๐ImไZ-ำh.. N{ไรUqa5]}Tฺบ_tเBƒ๕จrก๓aเยiเรฎๆ|Ÿ#Grกpงํ6Lqž\8EโœT๎2๓| ฮKรด‹Ž8วa6็ๆ+ญฮ…นืeำฝ๚t\๔I๗z๔8็๊ฯ{็฿:ฎ{c;\Ei็3ฬc|์<—8G _fBา_];uโ๎งำ์๘๘๗Wฑ๙มJqžฮy. HๅtXท.๑†s{ืฅodขS฿ุปษt*wวˆ๛๎ฆ{:คง{FwfTj|˜<สAƒFFQ„ว:Po›ี๋ั`fฏ‚N}~คk’Nzงึ็Ÿ6ท}๗ีS!ฮ๗~ฤ+š#^…*8=็œ'N๊~ห‡ฮ…ฝฐ5‚ ปLqG ต‘ )•q.tw=ra๐#D๚*นฐฦ‡5—๚kD.์r๘ะน๐าืฏไรศ…Sภ‡็[฿ใ~#นPุPลyrแ๔9็ผo๊ห‹@'u<บิฝหlsฮฝašD8ฉบPงYšwuฟ๖๚•#สzq]tา%ะฉ?๗tKuงู[ซ“y+ส{ทู๊ณ็ถ8็4G๛ฉฅ~ณNฝ]ท}š{มฌ๔P๎"…บg ึ:ฏ5'=฿k็ๆQ”Kxsœ?Iม๚R>Ž8฿๎oiถรwVฑูฝ–โ<ลy. H จH;T`ฺ D%พๅ๕Aค เจนฬ๖%`T ๊มn (A'*๎ .<ท๎ุธ๓ไขถz๚ 5ž ŠrP: าุIั$เฤ%bn/ฉ@@๚อ3›พสฉ็๛=ไๅอ_๔ู*๎๓„ทค8O>œ*.ฤMŸล…โAšภช{{มานaนNซq!|่\่|น๗šึjoฉิอƒโ’ฯโB.R’ฮjฝ๓ก_จ^๔Љ็์yฤH.6ฝkŠ๓ไย ็Vปญ…บ๓>%ผ\ฏษ–ˆํ็€Kไ"ะWฌLง ยDgูti๎สsutิ ;฿e๛`ฝฃผ2ฺใ˜วฑh๎’๋6^ppWฝ&โงAœ฿๕กol๎vู๊UlzฯSRœง8ฯe์U”zฦFj} ๆ-3ฮด‚Uอ๚ๅ ๏ํœฃ.ฤ]๗`T]t^4cเวsฏzŽื๛mtใGีjฦ๚่ฤป(๗”MPœ!’พฉ€Sงฮพn&๘ิ6Zง “ _;ˆ๛หW ๔uœา9Ž8฿ม/kŽ^vn๗}์ู)ฮ“ง% ๚m.ิ:q ๘p"ฎส…‚sก๎ร‡ฮ…พอ|ธ0๒Ÿg%น[2A"ฮฉำw.„ ลฮ‡-&Uœoปวแ#นPุt็ษ…S&ึ%ฤฉ3/โ|ะษผA๚ธDzป^ฎณw5GเzƒดKฬMvฑ‹˜๕™แŒ\ฃi\๋ณfค#ฺ}šw{฿ญหผ๖ซ๗ ‘ฯ*๗‹ ?5งa๎—<ึ {ญ๚คŠ๓ปม๋›m๒—Ui฿“Sœง8ฯeQR5†kƒ19FJีT`ฺีSโ0“พ)—Hs~5๛W3€ปดN%Fƒ=โ๑y6ษJŸT`(”๑<ƒํjRmฤ›ป๓๎Zน{U‚ั.๘TC#ŸบU@๊uๆ :Kƒฃพ’ึแใ)ล=Ÿะ`q~ภI/kŽ}ฮgช8์17ลy๒แt.-ื8๊~ว‡ 5bM|8‹ ธ๓ไยžใ\œปH†};๘0raไCorษฑEtqฮ{ย‡ฮ…^น>t.D˜>\–ี็w๐‘\(lถํn)ฮ“ งnกs{‡˜N}zจแฆ™š‹๓+‚๋์5ุ—”๛<‡@g,™wug|u้@‚บ๋ฅ.ฝ๋e>y๏ฒำPฮึ1ปผึฤฮ…y์ฤฮใ(ธqฮ9—Qโ|‚cรฏm}า๋šป<๘ฌ*๎ธฯ‰)ฮSœ็ฒ(โผ ฐ็JีTภ9H},Uกš๗{ๅ ำ\~ปบตฮR&ฃศž%บ HอU๊ฐแุด๗)iข1ีำิ่ไ(G็‰mt\คkR7)hา6lโ)๘”cคu LuŸ๕ไ๒’๕F˜+ฮ:แฅอ ฯ๘T๗{ิ›Rœ'N็ธฐKฏp!u็U.คZเ@๎ธะ๙0ฆร;2ย๙0ra๋ž.ฯ>แB๙ฮ…\”„แยุ[รน>t.”(Ÿ>”8ฟn‡ไBaณmRœ'Nน8W๓7O๗šs็ลYง^๛ส ฮ]˜่—3@ศ๒\œ ๎ไผ.ฑ๎Ž๚ฌ.๏ฑกœน์J…๗ฺxOa๗Ž์๎šปภพ<4ถป,คณGฮ…†  ฟถๅฑฏjถ:แ5U^วฅ8Oqžหุยœเช ไศ’ิงiโช”FHฬBี ๏k~xอปš_ฌxw—โ9K์zP๊ ศ.า‹ฃิงSzJe้๎0ีบ$—เืSB๛Nร:6@บฆD8ฃ•J“จ>MำS4›ฃธHล!บ๕Ÿ_8”ถ˜tq~๐q/mNz๚฿VqGUŠ๓ไร้ไรภ…ฟ5ฮโBF–อโB&Zฬ— แ!ฝotืู/Zฦ‹๐กsก•ํ ธ๗u.”S.ยƒ>t.ฤ-'.๚‹ง‚;qพ๋a#นPุ<ลyrแ”-}ส:5็๏26ญซ๑˜ฮŒ๑าdิvœseOgGœใž;่—ฑŒHG๘S›Nฺ๛rsิ—W๕kKxR๖^ำั่๑Bˆ๓zz฿็Aำปi็[๓š-{uฟท็1)ฮSœ็ฒ(มจะb]เึs 8ๅu ‘h w๓สฮลz^A+Ž‘‚ำพ^œ`”€ิ…8nธฉl฿คzLฅF‚ำ่ข{‡x์๛นgŸ7ศƒPงI”  Sจงh๊>ม(ฉœ฿8c&๊๓งFœ฿็่7~๒'ช๘‡พ1ลy๒แt๒กFซชŒG|นฦ˜‘ ป.๏โร๙rก=pก ๔ศ……{.๔ฺv็Dq!ย;๒!ฉ๊‘   =“ศน๐+หฆ‚%ฮท๙พ#นPุ.ปฆ8O.œ:q oV๓บ๓าั<Ž1๋๋ฯตอีW๔๎นฤ+ฎนฤ๘ฏธถ๙ฟ้n‹๖KL “ฺg‹ป0ๆ๑•ๆจปปNc9ึwเ‹{ฎ๛z๎rซ‰๗ดzๆžb๏bZป;ํ8็——ใ&]œo๖ภ—4›ufฟป๛ƒRœง8ฯeQ‚Q\‘2งVฅšพ)่์:ต็ฅ†Q๕—J๗TzปR9ฏXqฮŒ[ไiœƒ.†=ฐคžัR5๛€”มtจ}d๛่ธปXW0K8n^หบš(ง้A(ู DปEิ]–tvฃ]g‹๕eGœ๚ 5yโวชxะC"ลy๒แ๔๒กqกธN|8‹ KฺxไBืsณธะ๙ะนะ๙0ธ๋=โh“bงลฒ!xันฐึOรน๒ฟ@้|’ฎ^ใB๘ะธ๐ึฟ๎T๐กฤ๙๖ญ8ล…ย[ง8O.œ2q๎uๅๆŠๆ›#ฬKช๛@œทฯKบ๐•เF˜ื/†pมŽ›.a๋už:๎bุmดฟฮSโ๑t”ืบ่r4ิˆป 'MŸ๛์“ฑp—hท> ๓qล๙ฆxQณูฯจโป=0ลyŠ๓\ฦFL)ร=QึW€ฉ ณKำผอ)๐ป๕‚.ๅ“šห_฿๘มฎึrŒpz(ธSรc‡ษ๗แ=ฆฦ{ง;Bผ‡๏ƒบ๒่ œส '@ี-Ž9ใ€€๚’๎9Em0z๋—Ÿaฤ๙แxas๊iฉโ˜“฿โ<๙p:๙0pก๘O|8‹ K๓ดศ…]Y๘0raไCๆฌ‹\Xใำฺหภ‡.ิถ๎ผณ๏ณ/LFฑฎ[o๒6Š ็"N:Jœ๏ฐ๔ะ‘\(lนUŠ๓ไย้tฮ฿ฝ@งพฮ๘1ฅบ!฿ฯ /5ฏ—็<Š๓๏]>้Qœวิ๒X฿ํ5ฑ้œเ๓ิ%ศ]จ_บศ#๊Yลน7ฅปส๚(q๎ตฺ้nฤ๙Žxns็#_PลํwนŠ๓็น,J@ช JA›‚น’ชญ Tมจu้œฬย•#Sา:Œโuฉœt๙uว›NพLบpv๎้—>าLทฺŽH๏ZโทaฎฯHฉ›zž&pคปใMI0ฺ‹๓๛ฟ yฤ}ธŠcOt.๔‹”ฮ…p € ™FAY\ˆ@wืธpฤ๙wL.œrf› ์p›%=_vŸ)N๙eW ;ฒป˜F ปs~‰m็๎xญ>[น๐฿฿ูBŸนใ?5ก_qFส:NนŽzušฬ๙ุตซLœ{ํนฯiGุ_5%iํwธ๗ำš฿=ไUlฒใaใŠ๓ณZผดi‹ืุ๎-ชˆ๓๊๋ื6—&็ฒxA้7ฮ˜ ฮ็ฏบ`ซ ยิฉ4F๊Dน‚ำา†HJ๓์žW%]ˆ =ําk.ืใpํSlVไƒ_ž ง‰ภทวบ๙˜4 o‡s๎i์ยฆKT *|แ้อญ๗ิๆึO=iŸy๒ฤ‹๓๛ถฌyฬร?XลIGูขฅต—ซ™oท๛gุs็ต8,ลy.๋Š ลึธฐฬw.„gqa์ธ๎‚;๒aผเ่\่|๚๚ศ…\ภ1‡ #๚H4ธzs็CW8็CธฐๅภiเC‰๓ฅ;2’ …ญถOœO&Nฑ@G”+…ฝ๎p๊ฝx็๙๖vyU&ฑziึตqQxป˜็5ฃบ„น@ช<๛๕&sž ํ+ฏ6”“0ฃฺ่ˆvฤน ๔หCใ8jฮฏš‚†pท?่)z ฿ะqลน\ํํสํ๔xŽmwฎˆ๓๊๋ื6—&็ฒ๘ฎ‘‚5ง ผTsy๋3ฮx{ป>ชk”HgŒYqำgu๖L ฃำํม'โœ๕pRษ๖lG$๕’๑u๎น@ผn’@ดt๎>Rk 7p‰ไ•@๔–=~ๆไ‹๓g{ฟ?jwส๛ซxเกหดัฺฟแ๔พวk[\&rmฑMY๗–งู6็ด85ลy.๋Œ ล โศ…œานP๕่ทUธะSฃ˜vท›พEŽt8ืyxไBบs!ฮ7|่\(t๑ ๊๓€Je‡ oไฆ‚ลMwูrทๆฑ›*>ๆไ๗4[lบƒ6<}Cๆรไย)่e„ššผuฃศฺ[wอ{.Qฎ็ ิ]ั็.ฐ]”{zปป็บET#ภ๐Zvฏcฟ๔Wณ]tาุฃป-Q๎ยœm:ฤ9๋้ๆย<6sa>โs›์ะๆwxbทzom๔ฆีๅยvน&<พzโผ๚๚ตอฅIภน,~@ช` ฌ ˆvn‘jส 2ฝ๙‚QีaJœ+s๙'†๓รcpHะˆุฆฎ‘m=…=6qฃ>œ็kc|ผฦRาgฅn††Fนu๖๛ๅq—ฮฎ@ดธDท|ไqอ-z์L@ฺ Nภ›mนูŽmเyฮฌ`TA๊ถ[ํฉv]ล>.(fฤ)ง่ีๅ[+๚๐็นฌ3._่๏฿นP“)JG๗Y\(.>Œ\E.Œ|นะ3‰ผG์ฎฮ:8ฯ{i๘zธtu็C.Pย‡‘ แCฟ๏|ธpZ๘pทlŽ>์EUq~ศ=xG4๓เิฉๆรไย @œทโ[b[ตEVRู{a^ฤx„ถ—๓,ม๊"9ึžใtrั]˜๓<๗ฃ8wฑ๏pะqฮ//๕ไrtœ“พผฬGGœณ^๎WXใ7๚ŒR;zฃ-wซ ๓M๖}Tณั๏Enuนp Š๓ตสฅIภน,n@ช`Kมž‚6_—ฟ}&ศT=9#วต9dวอ F:์…อ๎K๏฿4‹—ฦน›iํนฌw\(๎oD.ฟภ‡ฮ…rะล‡‘ แซยW๓ๅB็C฿–ใr.ฤg}ฌ%w>$›].๐กqaท>4.ผ๙งuข|๘ฐ]v฿v๋ฝfน็rอuณ}~ำ “ 7 ็ผ็rฯ%ฮqิ‹cŽ€ีm'ฮWฬ8ฮดฝrภฃํ]คใšว4xDzอA๗๕^ท๎u้๎Jๆธเ:Nwหปน่v>.ะ=ต‘Ž?j.๚„sแF้ฎอ&{?lถk~—}šwfLหด๖$เ\โา`4Bpช–\ฆ7Mฃa’fซcฑFฌ)0U@z…3้œ๎’GญืGaxwืE8๎้๊:6Oืtธ3ไ๓ษc0*แ'E˜ซฆRณฬตาูKนR7ˆ 7ฝ๛››๑จ“.ฮk๎๙|]๓y์{ป์Ÿ(๗๗ M;.อ†pนฌS.ิธFq‘ หˆตY\฿ีธ>œzFsก ๓๙pก๓aไยย‡‘ {>„ ษ$*|8‹ งˆk๎๙|]๓ “ ง{มGlK ๖ฉํิžใœ6sJศ๔ชแ˜3wtปๅพ$ธํ๑~ํyฏW็=ฝ ฎน sOmwวผ็ส(็ง๓๒๔๖ซฬ๗N๏Œ}‹แ& 7ํธ&บ็๓uอ็ม…o ฮZ 8ฏพ~msip.‹ส Q *น ๒บ ณ 8;wH›ึ๋VAžus๏๊.q‹ฅ}ส!RMฆืNบภF\ฯๅ„ใ&ฑฮkว=n @l๘ฦz r€kฎเณค”ฒ)0ปW๋tk)ํธCฟ=๛+๑ๆ‡O47๗|ฑ\๓v๙dIcา๘ ฟkฑƒ=wf้คฉ+ 'ฌฏย<๙pแยย‡‘ ;ม*ฮ\ุ๓aไBjิ#:ือๅ„ร….ฬG]˜ฌอ!\๙ะนฐ๐แฌ •๐กqa๏œท|่\๙p .V๓ลtอง“ ง{qื'yPwพš•vE=%\BึวŒy๗uฏ5wื{”8ฟ44Œ๓m\œ๛์s__็^k‹๒โ˜w็สˆ๒9เœ๋5ธโฑcปืถฏOMแฦไยY๎๙bธๆe฿[ท๘r…ฆญส๚ํ[|ถ๛p‹ห[โg-ž<ื๋ื6—&็ฒ๘ฉ3F1๑az^n‰‚น’ขฉเณƒา9ฉต”PW0ซTwงrดŸ่แฤบศZ:&อ‹จ›$๘5k(q…h|ไHัด4ฮมฬ^ีT Z''ทศR7oz#;Qn‹‡v˜Tq๎๎๙bนๆำ„ไร € ฮโB๑€ถ‰\(๎ำmไB๑ฃา#:ือ%ฮแB 9Š ฃๅ<ต้…\่|h\ุ7„k๙pภ…rอื5.บ{พXฎyra.“ฐเ{สz'Xญ+{฿nลส‘c Yo˜ฆ๛ŒW๓ฮ๊ัืใ่ฌG~i๓พqฬŸgวฃั่ ็pžๅฝc๛Uกปต3ง•ฦszฏIๆBwฯห5Ÿ&$็ฒf"}r$ฌ`ฐ ฬ่Lbƒ”ฮ}๋Œฃ$WHM‘ไ)8% U#9ฃ 6ฉo$่๕DW<:qxLวŒŽ oวlE9ฎนฯ0ทฦoฺu}๙gžพว๔ฉ›]0Š[๔ฦ‡Mผ8w๗|ฑkอ3 อe"ธP\ั๒แ,.'ด0‹ qุ#*ๅ๏\ˆ0ๆขจ u๘ะนttŸ7๎cฯ"F>Œ\(ฤf˜ฑืูC๐aเB๘pภ…ฮ‡๋.าลสฮ=_ตๆษ…นฌฯK฿เ วฑบณ#ฮฉีหญyšฯGค#b4mป4ิœGg_[›]~y่žNธk‹ฃศ ๐4๖Hm'U็๋สฏฐด๖8๓œัlฮ…ฝ{พXฎyŠ๓$เ\jม่฿?wๅ5xrhจ;TP๖นงอคqRIฝข%)U๐Yๆ๖u–4?ฒ:๖~†บ twvข›;`Fืวa้จ]ุ ๊ษงl*oฯy0&Mˆ๕œถ-ฎ9Q๏ˆพ๎”ๆ†WŸa’ล9๎๙6[ํ‘ฎy๒แว…_x vแยN˜ร‡ฮ…ิG.๔ๆrฮ…pn #๚˜วZŒzง๕ุ๘2r!}6|ddแC็ย9o๙pภ…ฅดงใร๕€ ‹ๅž/๎เtอ“ 7˜…tn๏ฦ๎B]uD๋ภe_1์t๎N5MุผรyMเF‘ล9๓ห] _aำ]๛Xดพั[น๐ะรบอGa๎M๎ผ)u๋ป;้>f “ฮ…{พ้้šฏ+q๎๓;—.]šL5อi า:็\ต•ฅco?6G๋ฤ!‘ฺ~รน3Aจ\คฒฎ%ค G(คฃAˆl๏ฬ๑ธwŒฺGโˆq‡ปๅๆค๑@-บ๙ฝ Tต]{ฟOen๙๋าค+^yR‡I&เ.๐ฺ๗ัอป“มh๒แว…x๏E.,Ž๚,.็—น็fqกฤธ_… G๑XแภA™‘ถayไCใิ:ฮ‡ นPY๘ฐ็B^ื>paธPนฎนp/V๎^ๆšoš\˜\ธมˆsบฑWฦคน(w๎ยAg:อี\เFq^›oNWv„9ธย๋‘โ<\@ˆ๋โ๓ตqj๓็๎ Onดัf;6๏ภŒ ืตs.ฟj๑“0`~}ฦO๒8๓X7เcUๅๅรๅ๙ทวšว:1ว™|˜\˜ณyฌyฌษ…ำ%ฮ s‚ร๘fgkk"_yฌyฌ๙a"ฟณ<ึ<ึ<ึDŠ๓Q็gšวšœikkkrarakkk๒aŠ๓Q็qๆฑๆฑ&๒๗•วšวšว™ศฟƒ<ึ<ึ<ึฤด‰๓ำ'ฉaIgkk"_yฌyฌ๙a"ฟณ<ึ<ึ<ึฤิ‰๓D"‘H$‰D"‘H$)ฮ‰D"‘H$‰D"‘HqžH$‰D"‘H$‰D"ลy"‘H$‰D"‘H$)ฮ‰D"‘H$‰D"‘Hค8O$‰D"‘H$‰D"ลy"‘H$‰D"‘H$)ฮ๓CH$‰D"‘H$‰D"ลy"‘H$‰D"‘H$)ฮ‰D"‘H$‰D"‘Hค8O$‰D"‘H$‰D"ลy"ฑพH—,yU‹ฺใŸด8บฒ๎’ŸY"‘H>L>L$ษ…ษ…‰็‰ฤZ&เvูจล%-ฎj๑ึฬ‰D๒a๒a"‘H.L.Lค8Oฌ BzI‹ŸทXโ-Ž*๋oืโฅFFkฑ•ฝ๎ใ-~ัโ7-ฑลพ๖‰-ณ์S๛~ก=๗ิ?n๑๋็ถุžำ?Oo๑ฃW‹Eˆk™€๏฿โ†ง•๓พ}N‰ไรไรไรD"น0น0น0‘โ<ฑ&ษwฏ—A‚ํฒs‹ส็ต๘—woq‡๏l๑a{ํ“ZlZž{S‹‹์นห[Y๎oูโ rA-ฎิใ๒บณE€?b‹K[ชล๑#ŽZ\3–ฎ&ŸSณ๙Bภห฿J"‘|˜|˜|˜H$&&&Rœ'ึ$๏โ—" Nx๎ฟธRZoืโๆ›T๖ณE!ฯอหใi๑ด›…ํDngูใ;—}๎l|?{^D๘าตuuด]๎ุโฺ)๕Ÿฮg๒ท’H$&&&ษ…ษ…ษ…‰็‰5Mยบส๘O%U่#vฅtE!#ฟโxc‹ZlโฯKZ4ve๕>"ฎฒฯฏด8ฌฌB‹g†๗W๚ำFภปs๏k๑šตHภ))Uทท4ฆ›Zl“ฟ•D"๙0๙0๙0‘H.L.L.Lค8Oฌ "LฉI->Pbฌl๛ุr๕t—า$c‹Hže;ฅ,Szิˆซฃwช\ฒผn,] R!_\QŽ้9๙I$’““‰ไยไยไยDŠ๓ฤšฌ+zPฉ๑น}‹๗Š๔สs"ฮhฑSyผM‹SสgจŽจถH๔mgูฯc,้ษ"นrจR+t@yฯ7๋สlจ+Z'WGหU฿[[โn]V^‰ไรไรไรD"น0น0น0‘โ<ฑฆx฿(3]nบคŽœฯ/WI——4ฅืY=ะgส๚Ÿถx\ เ/–ด%ฅ5[จzzู๏w๗u@ภ|T้>๚ญส๖—+ธ๛ๅo&‘H>L>L>L$’ “ “ )ฮ‰ล!เ7ชƒจ=ึไg“H$’“‰Drara"ลy"ฑvศW๕O฿ำœส๒๘˜2‹๓N๙๙$‰ไรไรD"‘\˜\˜HqžHฌy๒=นฬฆ|oIฏR๗ัn๑ศ|‰D๒a๒a"‘H.L.Lค8O$‰D"‘H$‰D"‘โ<‘HL\ ฺถ๙Y$‰ œ 5ฎj,‰D๒แ’[^~๋Xœoฝ๕ึอมผFฑ๛๏lึ์q๛อš{q๓fŸ;อ`ฏ฿ผ[ง็๖lowคลฦๅv“™uฺFทlว~xอ^wุฌืฝถฌร=7฿ผูwำ‚;os‹™็vำ6ํบทูด_ืฃ]ท–3๛9pปอ›ƒvฺช9xื๖ณูsๆเ=ถiญฝฟหVอAo>ƒปoัดใ–อwด9เ.3๛o๓™๗๛๗6o๖l๓xtŒ:wญ็vฝฆอNKf วzA;ฬ์›sน็›๕็ขื๋๓่>“;ฬœ‡Ž็เฝ๏ึิ๊5ฺV๑^€ฯ]๛ั6:6‡flฃ}๎ผ๒๛โj๛๖๏•วlงc๔]qBš;๛V“ใัgqะา-›ƒ๗ูฎ=็ปvŸฝพ/ก๖›ำ๖๗ฺjณ๎ปแ๛ัใทšy>?m#่{ใปุฏถี๛๏Q~ƒ{”฿$๏มฑํs็•็ฃ;‡๒๙๎Y~ปปnผiณหํf ฿ฟmภ฿ย.mฺA๓ปัจ”ล๘ป?|ษš=–lฎm”ฤป๖๘P฿!<นn‹\F.d?ฮ…p|ะ]o:ไCธฐ{ผอl.ไ๏ ใ%็ยฝV๒แ€ #ย‡‘ ล 5.ิkป๗ \ŸF. >\๒wนฐ็C็B๘๐Nฃyฎฦ…‘39=::ฦ๗ี}Žฏใรถœล…็ฐ >„ ป๏ธ็ร•\จ[พ{็B::๖|h˜‹ uพ๚F.ไ๏#raท]เยลโร,9 ูธฅม8G:นpอrแA๛๏ืtฯ}šƒ[ดพํJฃ9x?ฐws๐พ{Uฐ็ส๛ฺFh_wPทฟVโ^๗œม๛ทธWsะ o๛๛๛ฏ|ฬk|?ํ1vเ˜ํx*็0ƒ๊๏wฐc1์?sLพฌืฯU}ผืฝ๖>ืฟv_;G>๎ฑ๒๓ฎก|'ํณGฏํYพง=‡฿ำœ฿้ž3ฏณ^™ใ‰฿Cฐ฿Q๕wทo๘^†฿›Ÿgท๏9ฯž๓๗‹ฟฺ้฿€ถ๑฿฿v8๐ ƒf๎sZw`ป๎รฝภ3X .ิพ;/ูธนฯ’-šไภu,ฮ๕#Yๅใ›ำ|vปใšฯ๏p|w3w=ฎC\ด๎ซ๛œิ|๋?hํเ่๑…{œุ|q้๑ดŸOnul๓ัMi>u—cป็|ฝ‡๖ฃํxฯ/ํ|B๓ฏž๔!'5—ž|R๓_|p๓ํรg๐Ÿ8ฅ๙ู#Nh~๕„c›+N;ฎ๙๑๑'7฿=๒”ๆ๛G=ธ๙ๅใŽkฎใฃ›๋žwtณ™G5ื<ํ่ๆช'ฯCO์žป๙ว4ืฟไ๘n=ทโ•'57ผ๚ไ๎พถนไฤ“›๛{๓~'๕๘๚'w๏๛฿>ฉน์แ'vะ๛jปžะ{7zะzอีงำ|ฮฃ›[>๒ธๆฦฟxh๓ƒฃ \๔Y่ณีkuซ๗นัวw็tใ๋Ni~๛ๆ‡7?ิ gช}้Vว%่๓.บ๏ƒปsืqh฿:~ฃ ๓ะ:}vz>K๖ใ๛Zว{พcฯW๖:ฑ‡พOญโ๋พ;ณNวขฯQ฿‹>ฃ[ฟ์ๆึฯ=ญY๑๒ป๏^จ-Z‹ว฿}ว๚.๕[ะoBะgจ็t_Ÿƒ>O~”cบ๛|ฯ๚ฝ่x๕๑=๛ย็งฯVจ-Zฏ่|๕่{ีoYฟw–๙๋=๔›ๆที|๐w๎พ{6Z๒ๆ"๐{ถไ{X+ะ_ะ5RMโ] >t.ิ๗UใC=ร‡ฮ…๚๎แ:็B็Cม๐กsกNเC็B=† ต>t.ิ฿Fฯ‡ฦ…โL๘ะนP๏แ|่\จใฉqแ๛6>ชส…ฟ=๛F.ิน๊ณˆ\จcี฿ฌธPแ)็,็B}&:ศ…๚,t.๚\j|น0๒แชธP฿๓\\่วษw็๏ :ึธp.>t.„็Bพwœ k\จeพ\ศ๏%r!๏G.ิ๗->t.\ >ิลIqแiK๖lŽh๙09pqธ๐ฦ๋ฎmn\~อJ\{u_nบวอ-๓ํๆ–Ÿ\4sหๆ–KฟีแึKญน๕วฺ๚ƒ๊pหใ พ๗๗3h๏wฯ่๋ถzํอ—}ท๗MWwsำ/ฺ๖W—อเ๊+šsีสk~5s\ํฑฐbEwซuฎ๚y๗šnฟธด฿อ๛รๆๆŸบฯใŸgธVฏํ๛ƒ8์xบchฅ;๛œบuๅX;๚๒™ใฉํGะ๑m[ŽฟN๋8_ฝO๙~บs-Ÿ‡ž๋sœณฮฏLk฿O๑=้{๔]๙๗รwคmยvƒ๏•๏ึ^๔]—฿Mw|ํ๗ะŸŽM๋มฅ฿ช>๙่s้พ3A฿S{ํณ}พ๏จsืญฃ~ฟ๏๒ฝ๋u#Nฺฯ†๋ฏ[‰๖wธโ†บ๏ƒใั๗ง๏k๙๕+škฎ[ั\^ฝ๚ๆสkgpล5ื5ฟอ๕‹oษVอAK6oถZ๒;ฺ฿fษƒS.ฮ . `tซว R๘ฯ™  F `H๕<-๏)่y‚C bผ$ZX่=๕Ÿฝž'XT๐ก@P ำcฝNทฺ‡"ํCAŠึ้ต เดมฆ๛ฺทŽAŒQ๖ฉํตฮMม…R}V:'ง‚ษ๋_t\๗žz^ฏํ๕นฤ๓ึ็ฆํ+˜BTz ็‚Qฎใ&๘ ๒\œ+pต€ดˆ์๒9๘ถ๚žค;ะr>์_วฏc็ณบ๑๕inภiอ-z์œมจ}g๚Žฏ{๖Q๔ฝZง๏TŸน^ฏ`•็๔ฝqฑG็ฅcัyLว`Tเ3”๚พ่@‹๖๋]ฟq ๓฿้่>ีล เร‹(ณ%๗m˜ฆ{พ๖ฤนึร‡ฮ…‘แB}็๐aไBxมนP๛ไoนPฟg๘ะนP๋๕๛ยอนP๛… แXqWไBํ[๛ญqกนPtฌ‘ ็B]คิ๑F.„‹"j;OไB=ึ{F.ไ๏{”8\ ฮ…:พวศ…พ/8}qˆs |่\(ฑพ*>t.ิ๖pžs!q" ๚ธุน>œ๊9}>5.ินPS\จไ๏b1๘P<(Q~ฮ’ถ\ธyบ็kQœw‚ …ศ)b/ sDฺ@˜ทpั‡h๊„W[ฑ* ˆ‹@•เ‘ุ้Žืซ แ&ไB.๑o่ืน8็Aาๅ}โ่XสŽนปตs่…;ˆ๏e็ฝGู/Bฐ{.”๏ง็E.JD‘๊๐๏+ˆ๓มฤyE Wม6บD๑@HทˆF-:'^บs-^๚๏ฒฌ๋ny~ท~Aฉ"ฮตŸnฟํ~๔šฺา‰๏๖3ื๗!Q.t฿M๛ ๖ำnฃ็$ฮ‰๓ซŠ8—0_ q.ื\ข‰Kvl$าำ=Ÿ`qฎ Ky*xPเก@A็๚– D๊ฑ•ˆ๊?`๖ซmถ!ฐีึgฎ็ุ—Cวฑ‘่Tภฅ@่y4 4L r˜œนD€๋" ‡ˆภOA‡^‡ ฤฑWPฅ๕บฏ[•z^ ว:>ณ@เฆuˆk?:Vณ๖ƒฃใ๐เะ8 sฦัื๛i?8"ˆsฮOะฑz@:฿`ิƒIฤ๙\)๛d_ัq"ฐึq๊xu์๚\Wผ์„ฮ]ำcaิขื่\ๅ)— (่๕Zง ^ฯ๋{เb )็ฏฯWว.Œ 6 FGคZ8?ํOŸ‡ปOžMข฿3Y๚อ๋๏‰ฟE เฮ5W0*ท๒๐tฯ ๙"๒ท ย…ˆsq•sกDฌ๓!\ศํ แŠƒ:Jtม‡ฮ…๚ป‚ ]ฌ:๊o>Œ\่Žนs![‘ น่นPŸ!|่\ˆ8\จฟaํ'r!œนsv.tฏq!ว^sอใEJ‡sกg-„ แC.zุ่เC็BeฌŠ oxอƒ{>t.ิ-฿™nแยศ‡ใp!็XใB-‘ ๙ วๅC\s]ค>ษฝา=_,q^Dๆ@ลy/๎wcMฌ VD_ูืt ฮ%ฬwฉMคโF๖วษE„1Bต๔Eธอrส[Tlo„ด‹๒โ’ฤ9เ9އืน ็yO{฿|Wฌ˜‹sฟx‚Sฮy!0—@".ฆฬrพ๑q [ฐu}#šuฌƒ฿S‹‘โ\ฟFส…œI'๛ฯ‚ฬ‰๒y๔Ÿลwทyํ~๔}๖B{t.$ฃB๛v.tแ๎\H– ้๔dมp–sกง ร…ฮ‡‘ แรศ…ˆเศ…rผ;> \ศ็น4็ศ…Qฬz๖๖นPk\่YDฮ…\ฤจqก‹sธ‹ต๎œ{jปKzFqก๓!๏ำ๑แB‰t๘PŸๅชธPฉ๐ฮ‡ฮ…๐กs!๐[]S\ˆ8\(8Žห‡ธๆpaบ็‹ึs Lาฺg9ฒOow!D›ž๏šnๅtJ$ใ2 €Iพ~ล ฝs‰UŽ‘ฎc-้ไฝ`+ŽณxH)็=๛mฝML๗"š ๆ~๛ลมEKeว•ี๙ >ห๚,’)0+_ฎฏงถWส๚ว๑ป๛ฮออื๏_่n๕่๑ฌ๏๏ยฟฝ?K˜ใœK˜ใ˜wปwปcํE]็ู_”เBNฬr(ฯi[J๘L๔ป]ๅE€rŒซป่\pฮqส•ส~ๅ"Šswอ%ฮ…tฯ'ุ9gัีm8๚\yHฒ( "(Œฉ;Mบ๋ฤ•r๊1๎9uh ๕คp+H`ฝมAš€๓”nS็^O*)N‚ง”r zญ^ง})ˆRPคฯŠเJ๋ตoqxปj> (@X๊ฝpด(ณŽฮŠSญGœsผผg‹ภ@”tF‚Eฏkญฅต`นSSถฏ vW‚eŽ“U็D}\ A,5ต :ฉฑี}}>คป๓นR'๎˜kY•[4฿€”฿™‹ŒFืฬื=o—฿m๑ท๘^‹W—๕[ต8ฟลสํ–ข[ไ\จฟS็C็B๘ะนฟึ ๕8บk:’…ค[็B็C็Bbuธฟวศ…๎{ <ว@†Œs!iไ‘ %ศล‡‘ แฐศ…8ฯ‘ uฬ๐›sกืี;’Mน๗‹\จื๊~ไB๘ฐฦ…๐กsกืวZs็รศ…ต2ŸZ6€\tสฏF‰๓ศ…๐กsก๖:ฦ kƒ ๚œ วแร่šƒ๙บ็ษ…๓็ๆ˜ปŒโŒฌAว•ล5ฅ๎ื๋Ž=œ”๒่RทB–๔a„น ัJ*y‡"bแ:W7ต–*๎ขŸบb. ฤท‹nž๗zไ"ฤ}zOวศbX1ผ1k{;ืย฿.ฑ}'Qm5่๗"ื^ฤ๙@ค+๓rฤ=๏a๏ี‹s๛๊{๗2oณŽื๊แ็Zf• ุgโn6)๊})Cฉs_ี{๐๛gม9ฟชˆ๓ฝzฆฮ|ฑฤนปๆ`ก๎yป฿โ-~โฅ•็๗n๑๕ฟm๑ย๙ผv}โำ‰$`‚ DtE^D็8ค€“ฮFงtP’ฦ‰[„3„kN=บืkฟZฏuฺฟz i2Cz#‚œ@เ”ดMฏ‹ร AœS็ว@ฝ%–^OS&o๎ลE ‚s=›กใโxH!T0ฆภPA’ นM]รบโ6\Qฏฮนแ QCฏmจ%ลีว๒ ฏ๓Qlืาฺ ภฝQ็,ส…ท;G๎ น#ล็ค๏๏pฎซชติ‚‹‡ใ„{่Yิๅ๊9้๑็rพ…qOŸ'ญำล๙ธ ขkพP๗ผŒบsน;-ตล}[œก๊ถล๋7ิ€.ค^>dกฑuึpกFเC็Bwฯ ใ…Jœkฤน~7ฮ…:๘ะนฟศ…๑ท บ3น>Œ\F.ิ๖ๆ )-‰\ˆhญqกนะพ iจศ…ฮ‡p!ต๕‘ ฝูœ s.(ธ๋ํโœ pฮ…ัEูEqฝžๅเ|ธ*.ินQg๎Ÿ•7๑s.ไHฮgmsกเ\8Fื|ก๎yrแ<ลนฅe{ v/ฮqปฉ.ขฌ๋ˆ๓Z3.RšKฺpŸbŽxฎ5[+โื\้มJk๏ๅP“;ส–๚<ง0oQsษkยœcเbฏc@จ‡ืFq>ธ‡า‚Y฿‹งธ{J?.1QB*w}Tฤนฒบบs ๑r;็คปว์‡~vqฦั๗Qœ—c๎Žำ‘๎น๊พญไ`ก`4hHVA_–ฑ ๑=WCธy‹๓๖ปะqDqฎว`Œ •ณ\๓…บ็ํฒq‹KZ์ฺโ๖ๅขๅ>a›m[งลk]œฯ๕ฺ๕‰O'’€ฉฅิีwV4Cยi&อ’`x๚'‚gH๛ˆŠi˜ฤ}š&!I'ฤััzVš#ฤเใŒ“่มึ)ุขถ’ใA˜+x๐tJฤ5slงฯAxj"อๆpoผปk๓m~๛ฦ‡u‚œZvA‚[uƒ8It•Xบ8งIโผึษƒQฏวฏีœ{Vืฅ๒ผปNžYแต—ฑ†ิEตณ๓ Hk 5๗œฟ;…Nน( hแทณUญ Jฉน๕nใิXŽrอ=oท9z๛ปc‹oqhนโน]Yฟoศฉพ3ํ8zc8๘ะนZiม๋ี๕s๋\HŠ{ไB‡ ฉgื๋œ กภ๋ศแC็B\mาฯฉƒw>Œ\F.ิญG.ค!gไB„pไBRา#๊ึ๋จแB฿อ%ฮ9็5.๔tพO:พืjฮ๙๛v.t>t.ไ|#๚็๊\๙puนะ๙ะ๋๏๙แCธ0fVญ .ไBsแ๊๒แ(ื|ก๎yrแฤ9"ู๋ฅ%6hvๅฉษฝXNz„D—7+อทfu&ท๔p ZD.ฮนฤนnG:ีV๋Mƒ8ๅc็ณบขG\@€ใฺ๓ุAบo{mx]/+๏แuๅณj๊C๓9R฿ใE‰Ac8๊งนh‚ƒ^k๒&7Yโ๘YZป7‰๋ลถwฝ/๕๛๑๒ต๋ŸNฌ8ืข€JมŠ้ธKช%ฉ“^K(เŒs…\ม)€4C๒ด6R=I๕เ๊ตU๋้ข[ฤ6ˆFhqผŽ Tš7๛ัcํWž๖MZ%..i็ =ีHg^m๋5˜๚œตต“๎ ั์Ž`T๗ตŽ.ฬคa1ำลนืqบ‹ํ)ซo๑5คแท6๒อr>ปุฑุว€ุ/0Pc;_มN ฟ;ฎ;b,5q>Ÿบ๔๙,ีYฟ3„š~฿rว เ3O_ฒO5ฮ\rฐ6๚g๊ Nq…๔ขืqณ]ฎ \ฝ!คคทร‡ตฎํ^‚รE<~๛‘ แC็B๑›๓!5แ(ฤม‡qย๗ฮ…=zŠ;๋๐aไB๘0rกyt.ฤี\ŸีธPˆ\ศ#zI็BาHฮนะkำci\นะ;ม;ย‡ฃบsกg8:ึธp>|Hส๛|๙ ">.ค4ม๙ฐ&ฮืานนpu๙ฐ]vบ็’ญGrก.`๎ฐไNื_ื๘0นpžฎŸื5—๔`ฤylšล๙ภ=๗1Ygˆข26ญwญ-…ัymi†G _k"!ึ฿ง6n๕็#ล9ตํเอ:^๎‚ผถŽ๓\ppDง}–๛.@๐@์[ฝฝ;ล}Š{ญ&<คฯ•†@ใัฏ]ฮjย]๔"Š ๚็ž๎MIทvs-ฺY๚๏ต”:tโ~ถฏŠ๓Ut_ˆ{NC so7ฆ8๊ใ—ฝ*ฮ…ƒg2‰น .<ตล{์๑c[ผežโ|ไkื'>hqฎ…ั>1ต€UAžcง!ต„4€Q`ŠD-%Wหค~าๅ”O๊๚tหี‚†ุi<ฆ-’IJ(้ฌL}ฅถSงืsa€ดQŸซ-'M]rๅ๚๔ŽGอฬ์ไบ9ตผฺฟnw_๓zU‹)wว๋"้จ,ว—›ำแ|œโคใHQฃํ๕๗”ฒoๅ๎ฑืจs‹๋โ%๎yJผขฑŽq!ยg ๛๛GqฎฯiUKLใ็"Dญv’เ™ๆW\ไเ8ว]Hีo-Žีƒ€_๑œํ฿ฏŠxอํีFณ€mัโ๏[์—้|XใB๊jแBฮเ@nq้๒zํyไB๘ะน1ญ๛ฮ…๎พฦnใ.*แBR›้ค :F.คv=rกšม‰oz๗น๐ซฯ๏็w;ยa‘ i นะo#"เ kนฑฦ…~มย๋ฟ} †_๔‹ๅA\ฐp>ฌ ]ฯJ๒Q|/TœG>ฌ]u>๔sาovMq!ฟ7ฤ๙85็JŸผ๗’mGrกฐ๓’M›ไยลๅยXฯ\kšป‚ปะ้ำุmฆ8ขธรEL!6qวaD{ฝ๙ตๆL3Kz๙๕•mฏ?๗ฎ่ฑ#ผwWg–ต eŽ/ยttถฃ๋^sูkBพ–2๏้๔ณŸฌ:ธใขวšnฏAUาฃำpุqร‹˜ํD7ฌฆผปฯEu0ฏ•p1!Štฅนวfsๅฯ|.. š– L›‹็˜]ัอMŸฃ›๛๊dขp)~Ÿcฤ†_{ฺ’ฅอ36ฺฉŠรfJp๛xDE`Ÿ=Oq>๒ต)ฮy5๏œ`ฮำ๘hzEำ3‚< ภ•๒zำ$ึ้ตฺn83wuK๚จงgใzH8‘ฦIJ(ށc|pœธ๏:ƒ—IีE*จิใ[>๖๘nŽๆิ๚ฯ/lnป่3๘——ฬงŸzR7ฟVณl%่{๖#:QฏวคpRc้N˜w&=“@”:Co„›ำEWศวzC%ฏ ๗ๆn‚งฺึRDcช{ๆqT‘7?ข6R๏๏n|ฤ9วN ์ว,โ๔‘z+Œณx๓#jฮ=Gœ?w“๛ฟฃˆืmฒ0q^๖๙J‘lฆrฎškKไB๘ะนPฯ๋ฑg ๙๗น0๒!\ˆฐึ{8๒>)ย…'sญแBm:๒z&C8"ศ#Jœ‹gqแ๙ฯZษ‡ฦ…๐aไBํCƒ5.ฮ…ิŸG.๔ฆqฃธฐ&ศ#๚c็B.xD.t>Œ\ู่W5ว๗ก๗€ u<ฮ‡๓Y8฿wG.t>\“\ศ฿๗^X]q~Ÿถษ…ยฮอ_œ'.@ A;ซ.—Z๓JZป฿jŒๅ–ืšฏตภก–8ฅNืลน rOw7๒ฺ๋Wฬ๎l๎‚วค๙ˆ4kถๆ <ตผV{~mฌ5ฉ่uฺŠ8GŒป›Šเ•:ฯkgน๎ตัr><ึข๛wฤƒožูvNาyฯhnยำป(จฅฉ้Ÿœงƒ๎rืŸนxAฉื_ฦn๎^ส6>™ใ๐ดNเข=6<ชอOฏ 8W\๙์ฝ ดmgUญDAส)(ก&` U$$@ DJEฏxEAคข+ฑลหล‹ข/Šั`( <ZDฑธ*—‡/จz1 ียvzัQhฟฝqqพ˜ภํ$โ.‹Ÿ{สใƒT-uษ๒rS฿zศํ˜x๙ต๏ณ%9N7U•(._[rจ!ฮโdฺ๑๒uBบ๕ษฑะ๑ะฑPšq‘sฐะ๑ะฑRลoำฑ฿ฏc!Š…nต/T9สU]X—ฑPธ'สXvŽฐฎ’swaฯา๕ ฺฃ^^[žG๛ฉ๎"ฮ!?Wlต˜Tชีฆt(ไ<pฃO>]ใ}๏ฤฉผ< AWฬ!็ฯธฦญ?uอcบqส7/Eฮฏ9ฤ? qŒ™บeIr>๙ุซžิL๕gbฬ„”คR-าํ.ใ’ฒป„ำRk;J””,|yฅ'm๔5๊๕)้ภh™)3fIดจพp]*IT%G3gUี)ีข!,ีoI4ฯฺณฤศ.%ฏฬึึํช ํ๐๓W~เนฅ‚ด๗๏^ฒุ๛ฉs6n{๗3KRชŠ‘ถีพ˜C๋•k'้ผ?O‰ื“9‚ฯŒฤ4Wˆฒณ/ฮฟ^=สtd™|ๆyŽ/Iฉ'Yัเ=ถqS+OHฉnmEฮ]ž๏ฆGz‘“Q_zn%๕z^v"!ๅ๗9gn๕\r์oป[3๙ภใ๋œด 9?nˆฟโา!>ฎ}ฦํ7โโwก๓รึ ้๖ฐะ๑ผัฑฆzx่U๓),ิํ`!ธ^๑๕ฺู ~ฺW%๑!I฿Jnแื๗่Ÿ_้9ท'็'ๅ5ะ#ฟๆ…œ๋=gณC๓ ˜CฮŸuอ[/ž๛-วtใK๓ุฯ้C|*œืฯŽžขˆหG๑๏C์‘\=._๊ฑW7<=่˜•pLจย่O˜J’ ๒X†œ๋vฦy5 ดคเศูIX ”8Šำ ชคM๗ฑh€DP‰—๖%y:2JคƒŒ,ำ}:g–o‘rIฉฮuBฮYmฏ๋’l–สz/฿ฤชัวnฑ๗า–ส‘ชIW้“ŠT ๎—^pzyใƒ<ฬ}‘Tฬ ุศ้k'Ef7w'ๆHBIf™1์=›91๖๐๊W…5ป,ีฏ๛lv7ทส‰+฿‹bชฏ’„ิ•$กznD…ผ”j#fR$ช;qข‚ qSี”~ในไฌ๋ฝ$ถฝ๘ี๋ŸดmY๛มป‰‡Ž…އޅจˆถยรญฐะIฉc!tŒฺ๘ํ๚tŠŒ…เกc!n่เc!†l มรŒ…เgฦBษ :^๐ฃE๊.<์aaฎข็ บc!๏%c! ฃŒ…<6c!.๑|fŽ…=<œยB'เYฆ๏๏#c!฿_VQ๑ž—มBžรฑะ๑ะฑลl}V๛ ณ๗ย\r~าท1‰…Š^ใ๚‹5๎nn(ฉ๓ˆ”Cฒ"ปค} YœIฟ๗ั\๓ฯ†ร๕”‘$ิ ํHึไจ›fฆo๗๎.้nบษƒ„์หฌ๒ํd}U๘]Šฯc}_{Lฎ๎ฯU๖\Yฯฝ่Nๆฝ'ฝ!๊ฉz=’˜[๕ปJ;#ะK 1xƒ่ปS~o†=฿“W๗yœฟ&Œๆ6‘ตื๏wxŸWคbวย„/่uลk*ฝๆR~์ภŒ๓JฮฅHํตeb&9ตŽYผเ[oำSฏqุRไ`ƒ€x(ษsืaษ‘๑zไ๑)žœf)งB๊9จŠxีHI“.#+๔dฦtu]#sW"‚ฑ๖ก$rฆs]W•H‰(RIฆ๊p%|แร‹tณTพŸsZ}L‘ย๔ร6ค๐Aผ‹Aาžป!็ป—lTb^ไ๏ร}ช,i฿๔\:9ฯŽฬ#้าt๚AI ธ!;AwYค'ฑ>G]ฃbไฃ™rb™“eฏ๚๘โB6ž๓^z’ศอศนWทzIจ›&qฎ๛จ\ัŸ๊ ฉปห“ด๗z1็Fz.‘+~sษ๙๓ฎw๗šๆ๘ตฎษ๙Uัๆ2^‹พZพ๓Œ‡nˆๅx8……ธผ ฟภCวBvมCวB&S่59‚ฺŸcกใaฦB๐0caฉz ‚'=,,x˜ฑPฤ}ธฟ‡…އŽ?ฝ6p-cก๗ฃgBg,๔–#๏_๏แแfX่x˜oหญ[Y”ษyoก2“r'็>๙๓เกcก/เ์/,dหฑp9ฟึ“Xจธํ5ืไ|ืษ9ฮ6็ผ—•ๅอ"DณdœๆFpTฮ๔ž; ญcฤBพ}…UIซ1›T rฮ๓Žฦฏู"A•ล‡ิ>ธ๕คํ๓หโตgฒ9๗yฏ๖๏IฃืxฯNเr๎ฝ็5ๅ็๐}<ฆqถ(Q๙]ไ๎uฑลไ๐•ดตฆB๎ฝถ0าHผอศฮฟงฌvpIปหฺ›ใjxLฯ Aณ๏N—๖&็ยe?Žฏ9ไ์„ฟ๐n]sMฮ rŽœg_]ฦษš'ข๒‡ข๛q่%1R"ฅช nยTj๓Z๚'๕n#ก๕ส Œ’6UŸ"ๅ|ม้ฅทฒบฟํฉ‹ฏฝ๊{kลH๗+1ๅ1JPURาฉํิSY*Eล!ใE YผศXH?zฦB๐0cก_มCวBฐล1n ณs|ฦยž๗ำซาƒ‡`aOแไ‹เXX”‡Ž…އ๛ ตHฉ็s,œcw฿o;b ท๛–59ฟ*rร:๎ชใะžGfฒe_$ไ=38sfC็๊9DฺGž๕ฺq//ษFkeษบLkุ{๓วƒผ๛cผ‚ฎืฎืญ๗ฑ'ั‚ธ_ถง%๐NธูŽm{•๚Lผsๅทq๙|6’kz๓ƒคำ{]ฅ๋!๗๖~pฏฆื–s_‡”ื น๑ฝธY^๗ต˜ำ~ญœ[หD–อk฿yถผซ x U-เNั“พ_ศ9 ี}9ั@ย_rํuใดkxMฮr๎+โ๚ำur๎NฌžŒ๖๚ยp.&!อาNค่>‡dห็x#q'ัp“/๚ ]ยI%…^KคŒTฟๅDฌช"๕Fs#%•Q๕ึํEโ=่J|สxกแถR R๒*็ฅ/0„SBซฏDU_‰’’Qฝ&7#๒„n*ฑใ}บcฑ\หฝ—Nฬ๓์`ฝ~k%จ>๒ศIพWณr๐๒บผชๅ฿Iฎ|๗*็.ๅฑ$ฆ™ฤ๛^SHŒ^“พO‘ œ‹q‚GฎKีq'RLภJLwŠœฬaวืฺ9^u๘}ืไ*ยC๏ฉ๕ู๖Sxธ:‚…ฺงใ!XdŽ7X่}ีŽ…Œ๕bL$XHOต—ฑP•๏.ชo\x–ฐP8(<aaด๗d,ฤตฝ‡…,ศm†‡ผO0އ<บญื‹๎dพš฿E< Ž…เแ2Xศvผ'ยnP็ํ-Vf< ผอฑI}ฦBฝG๐ะฑP๏<_Xศม"็'ไ| kr~ี`au๑fไUŒุช2่Dšไ€#ง™ศBภ ็"Y>๓ปV…!Eqฝgส•Mฒู„‘J<ไ’}๐>r/9o๙vฏฌ_fฃโˆห๛gภc3กฯFqฒๅ้ณ๓พ๔‘ส ฦŠๅูๅขเฦmพ]S‰๎HืsoQื,PีวE=^S5 r๎Uvศy6อหว@•ูป)ฃ๚อmlNz <;Q9๑ตoป๘…๋ฎ๛–59?คศ9†/ศ9™๎rNE๎ใvๆฉ๋œค5๗_2๏W๛URยpH6คN †*H>‚†drNตฉ'g!ีใU) จไ–ฬ-'bvฏ’I*=…ภซ’#ึHEบูF‰iq(ภsK•HRฝ ู๓ว)งaฟอ฿I&#xผ๒ํUฦ้ฝ* ำgฉZ๏™ืคไ”Dd-'ทn ว>ฝืK„sฯ๏ญท‘ฅ๑YB๊u$“>บˆนฬz-" zฏ8๒๋5า?ซือ~๗ว‰ ฉ+Jๆ๓~BํgฮqอN^“๓ซมมŒ…އฝุŒ…N์{๓›…‡Ž…ด๊p,ƒ…ฬ gา+๊เaฦ~ว#,T•๑9ๆAฎ3Šค 3–สปˆz`a!๔ร,0d,๔jทcกใกฟห`!๓Œ…ล่.๐0caoัา๛ึ }ๅVx่๐=,œยCUฒ1'x่Xจ๗f,t<_ Ž๋ ็๗ปฮ‘“Xจธตึไชยยฏไยลื?zQ2Eฟ9flFฬ|^ท5๏พยชฯ—ูˆตBถl$Z&ะตB้ํ_jๅ้ynxณdษ™œ๓A<ฐ.›นyย[ŒภฅsŸต.Kฝํ๔่;กwล“zŸ?Zฬ๐๏๐จUk›?>RB๘จ:ชใ=ร=๓ศ‹3ูศŽ๏ :ต'ƒน๚|>“e๒i|O{\.ส฿YkฦNŸjี|4ค#-ล2'฿Nะu>‡œ'Tฦใkr~uมCวB;:a u<ฬcึภCวB*ฬ,Z‚…Œˆฬ•Uผ:ภC0|ะ>3‚‡#, IzฦBแm>Ž…ฺ'UsวBฬืz•ํf๔ค้އ›aก/ศfฃ;ฐP$๖*‰๏ํหฏ[uj~žkQฯ†qฺnŠ\;1—9yŸ3“<—ั๓xฮs฿ปWำ}Qขพ~Uฯอ˜ญ~ถ"วใ์พž{ท๘ฌS)ฯnๅNฮs”ืฦ๋๒ู่,ฤ˜ณพ/จิลf4๗8fจ#i'b|\i`œ\จBŠฯืทp็Tฅ ๏k9ฅ๋n๑kืปC7๑ญkr~ศ’sN๚ฃึŸถI:'aๅvŒb9•GฏQ)RยสJ?ิ๎ŽyถกG/f–?R5(ฤ|H‹๑<สเจTP†DญTŽิc)Ga%ขCBZฎKชFFฅ>$ฅช•ไnHDบญศ:‡DดT†คช’z1s๒*ฃŒœ’ผeiบWiจๅไหrˆน4fำ?ฏุ†„”Jบ'šTœุ}ฉธป+Aฤศyฎ.๕คฅผท,qw ฟ‹+๔ป—ัkวwฆืใ‰&}ต\•๗ชœ\ฏห˜dy;†[Tฮ๑›‘sฺu,หุk9ูญ๎Q็^ผ๎kr~ โกcกHปl๖ฦฏ9๊๗เ‹t`!ำ |กUQ ๕3–ชrlิA… ห]x˜ฐ 0,ิ9=Ž…เRฦBื”'caฎˆ/ƒ…ŠŒ…:฿ 3fLซZNRชJ’’–,[$ํฃัใฑ… œRBช$Lก„…ชrI"_๒ˆ*๑,ไ\ี%ข๏{๖FฅHRฮ่ฝิีีXE2ŠูN™<$ต˜ !—ิ}T0rต‡ฤtชJžC*A๙œ$”„Œล*de าp;ฏ#'ฃžln–๚๋๗Eฏ‚e…ภT?iฏ?“๛๔8ฺH0ต0ข•rfœใJ ™ื็ฏืฆcƒ“ฯ…F๊ห1ฤ )ฝ„”ในงปw32p9ฅณ.ไxฑ๗_“๓˜œƒ…ยฝ›N/:x่XˆŠ„฿VXHKฟ#ฐช9x˜ฑc!x่X(\qดcb•%HV{๘ฬwFก€K?฿ง›.๛Œv=ค@฿ซ๓‚ณ ™QNŠ|r’“Rฬถk.;::๖๐vn๋-hf,ฬxุsvฯฃแzส#คล'ภฐP็เกc!๒v=&c!x่Xศ‚ภ2xbศูรB…cแrเ5‰…Šcฟ}Mฮw"7™ุอำ7>๑ฎBXุ๊ฎธTำ,œนs%ซธ‰ว๘ฏฉ๎ฬ—v39ำ…tb™๛G๒v#พMv€ฟ"อ฿c•aH:ไ๒4หผ็ถ๎ฆqนjž%฿Sไพฮ>“,ํ๗EŠƒžๅ๊Sฃื|V{UุkฆeขVีm–;fpฅฦneยทO๓๒;แqa"‡Q\#๊Œฌช|ศ็๓7~งลql7žp59?`ษนป ๏ฦ‰~5f๏fวWG%I๗ำ7ฉD…ค™#ณมฉ’Ruี9+^1ง๗XII้T๒n๋$^JถŠ!œ’GชDชŒซ:๔ฯZ์เY‹ฝ9{ฑ๗รฯ฿HJu_D‘u+2wๅ ฃ!%ƒ%ูU'ฦCJรภ''“$‘๙zM*ฃวฝทมnถ‹็)Rวœ^{H๕พ๕>หkฤ™9’Sฏ ๅื+Qฝj}Vเ๚žT”ผ"ด™„Ÿ^O}Œm"!eกFI$ณ้‘ิฑฃ๛qeึ๋tูบWร๑.pู๛f'ศนŽW…ฅ฿’.หa{งศ๙ฏ{ฏฺCšใอ'|ืšœ๏๎6r …>ยสฑGw็Xiี๏ยฑะ๑ะฑ|มBมรŒ…Œaกไ์ยCวB;X่}ๆ >๕๐ฆ‡…ๅqn ,t2^ท๓…สŒ…އ…๚ !ํ }แ2ห฿ณ g—๎2rป>วB$:  uฬx๛xˆ/มฒx่ไzG@qท*F$>‹ืlNJ($็้๑™ผJ*]โศ|\—Ra !u๙&‰Q้?.ชTo]*a)•"%ซแ2Œlณ$กxn‘ถ+)-w๕]I*๒NํWcพš(>ิq๔n'ฉŒ๋žpVboQIu˜๙6^ ยจJUCฎZชF๊Cค2’SOHGฏํฬพt4Wณ๔tส“Qw<ฮาึR9’}=ฟ’Qd™T ‘๓บ{ฟŽƒา^๏ yฏ'–ฺ–…ฃ|Lnvขส)ฅหŒRbบ“ไ•wฝg}_9.ธ็ก[9฿i<อ๊น0*cก{`!UuHX่dฯฑะ๑ะฑะ‰นc!ฦmธฉ;ข$*X(้:๖Œ<ฬX(’.ลฑลห"็ง๔จI,Tๅz‡&9฿i,œช๎ำ—฿๖๊}ไ\„จ#1๖*บฯำfไ$ฃญZ ‡๙ฎ2xชๅC@–่ฆ๚Tƒ๓X.#ฃ^‘๏†#œ๊rฎ{Ÿ8UํLุu?ฝ็sLโ\ฮž{ธ{&pฯฏ7ฮ็‘dพ๘PIqG๒ศส'œึณแ[V$ŒๆงษฯM…<;้b๗ค๓gr^ซโ~๓- บˆ9ใุตc A;Eฮ฿|๓ป,.ผลwtใGnpไRไ|8=tˆฟโำC<ฏs7 ๑๋qฅCœท฿qˆY์โฬธ๏EC|ฦ๎;}]9ฟ าžฮ“QŒdXลWP๊คŒทธ9ีT‹HL๗๙mำทงฤำฃ*แŒชฃฮTR‚ฆ$‡m””้vช@J.หu% ัgYS๒ท=uใv%คบฌ>หa฿ฺพฬ๘dJtญไ(ฉณ„sTๅ‰คฏV|จ๚XbูŒฆ"f๚ึŠV๔ื{o)IฉWŒu๛ศ!iฏ&eนi–ๆ{kB–ybjิ3zาๅขl>Wฝ๏-ฯs€ILuฎ1’ำ๛ฬ†o,i?yึV'%ž"็Œ "™ีmo>—œๆq๗ฌฝฤ9ไkY๛Nท๙จRฝ*!_ฺŒœป <  !๚}8::f8ฐฑ‘ยรŒ…ฺFx0ยBต๗„FมB๐ำฬ„…uๆy 3๙ฎื{X่U๒eฐp ƒœNa!ฝ่• [•ฟมBฏ',์แaฏ%จ็ู:๖e฿๐ะฑะ๑0c!x่Xจ็p< !ํมC-0แ•เXจว wŒœ~ิ$*UrพำXXฉr1›|ฟๅU%6#็_่Eu~ดBื5#]Qศ9sต}žน™ภ5ีo*ž„U*้-ฏf`F”*กgฬไ<!ฑ๖ฑju#ŽนWซำnŒึ›yžgs?ไ<›ฤ๕ศนฯ5‡๔;๑wู๛๔J'ฦลี๊ถUื+Žชต/ฆxี;ปญ7Ÿแ„ง@ณ`’• Vกo๖แsฯำb-ต:d:ซGpi_–œS1' มweF,qฬฮ!็}ืลŸ๊ธn<๑†[“๓แt!Qุ:ฤต†ธdˆcำ6งqQ๔๛ ๑ก‰?Cสศ๙ณืฒ๖™'%ฃ๚฿ไœ6ืœŒ2ณœŠ9}t Qีa6.‰(๗cŽ„9}๊ŒOcv/ศ9=†ฺVืkตG’ว=ึฟชE%ฝๆ%๖ฉDท$wัซ‰ำ1•่œะefฝอ’ะœ`ึ„ัช%๔tyfYC/gbฦ ษa{งศ๙ซ๎~ฯั๛#.ค~$ฅTa‘ถ๋q$œ>3œวŒฯQRR’QUu”˜ IXฉ่{•๕’L›4•ฯบgฮDRฺวึไ i‘dFK๗ใ^์๓า9๓ผ`Gz~79"๙คช่UฆeRUT)*ฝเ7ู9Cธ฿:5แฮ๑g๗]ฯ9฿ir>œo†‡ู ๑ะฑfFzo๔ฃฯจฒgRNฯนฯYฯ†tHเต›U?"็>bmrCฉ}ด‹H๙6ว"๊๕ุDแLr~ั]N\\๗์ฦ“ผล2•๓วuศ๙oคmา!็'ฺuษแ/โปํˆบ๓/A_“๓]&็YVๆ’แeL‘U–ไ–ห……Š๏X“๓ซEๅผŒF๛่E•œ+พ๖ม7/-ƒฦ฿๙ฦ8+#ใีด น0+เA‚ VพภPH>คžWrxว๑ฝGฮฝ*พ&็+œdไฒ๊ ฃ7ฬgผ๒ใ'EE~ฌร<\-าe$รZว4 2ฮ>ฃฦศ Ÿ 9WRค$†ŠI‘4ชjฃ~Hฐ”45‰bธฎ7&Dฉ'ป&dVEžLุR[ฅ–Iž้rJ’ฬš`ฆ๓zŸ'Žq‰'}๓5)1o’ฯแ\ท5ทs์๛:}์'คษ%นV‘ž~๊h [&๋9 ๕๓Tdธร๋@ขษ๑ภ" กคŠกŽŽOFจ1ำwช2ฮ1ํวงoง…ล\a%ปuฺސ๓W฿๛ฤๆxผใิ๛ญษ๙โกฐpU<์aแVx˜ษ9> ๙Xใ2Xˆซ6&b๛Žณ=,„œƒ…e~นWŽศ3iกr_ฌ๋9ซ',,๛OXุรรfมฑƒ…™Œ๛ขไ 'ฑ0,แคa!x˜ฑฐ๙o่):Xธ:rูฑP2}๐pY, ตMฉฎณ8ด,* ็๓ำovไ$*พใ†ื_“๓ฎœฏzข_ผTัbฎIฯง/ž๎โ‹o8งDœ‰qTฮฝ’YIy‡ Cไห{ะรp^ษ<ฤ^ไ<๚˜]า^ษนI๏s๖yเฅฑัZQVวซฅ1lฃqdFฮฝ Žฌ๊9d›พrzห™N@ศ=๖|q<‹z}๐นืพ1\› ๅT=ป๎g ฬตตภษน?–…ฏะ๛๓@ะ๙ำตฆr๎วUpGMฟy2„แ†˜{drฮo {ผฦ๓ฮ!็๏ธวฝ๏พืIxส-^†œ_sˆโ3„ปKฺๆแษ๎ร้7 ๑#้6'๏ฯิ6krพ'zวtา*ธฯเอซ้žfI'R3 ฿HนN"สุžืWu๑jzgศIUR.7Œ+Iฬณ6ฦศ(-ไ3 ๆFrXๆวBš%ีŒ^Lๆ„{ฅผ‘iN๙นPฏ:žช;อจ“r6ีกDยYf„k‘ฏสฐ‰„TAŸ(ษจฎ[RšรeM/ๆDๅฟ้35งfw5๖>ฬ<šm*IEr‹?€ปVปs5m ๔~r|Bx๋อ‘œŒS•๑yภY๎ฉวัoฎ}๋x๕พ๓Yไค›E(wœn9ฤป†๘!>1ฤ3โ๖ร†xว็7:ิาอฐะ๑ะฑr๖๐,ิษ1 อQฦB?ฆ uxˆ”ผ๓วBHบca1„ <ฬX่DฑOŠ:ฉฯ†–Yาี๔f\ซXhD{,ฌn์™T ๋œa!๘n……ฉ (ฃ-8˜iงcavy๗Š๚awขbl‡I\ฦB VมBmใ=็/ซพธ๖กุศN๐›aกb'<8Dฮ~๓#'ฑPฑ9_cแLย>Fส+=D1p‹pbRˆq*่๊/GโฎหNฮฟ|ัy%ตืัŒE3“.vญฆ›ิm*9bdาEๅ]฿ฃๆีฺ\ฮฮํษฎq‹๗็;ป๑G฿jูQjrcTธถŸท=Eฑุ7Jํ7ใฟ๑~๓แt!.โiŸฟช็Oฒšœฏpยi•ฤะษ9ท๙J9RLP_Y็฿ [ํไeศ๙Q6›๒zฤว๑rๆZ๊|ˆ—ญา>f<๔1gNฮ9^่๏อ$๑ะ•>ฬ–fชX่xจหยA…n  ‘ท๓๛ q_O ๕[ง/ฝมย ธ 3๎๕ไๅ™`wฑ0ฐฃ‹…ZถยB'ู ร f,t์๋aaฦCHบ-Vึž๗š์>Oๆ˜ยBo%p,ฌx่XhํŽ…ๅณŒฯ ฃ>ฐะ๑,ไิ์/ณ,๒{p,\ 9ฟล‘“Xจ8๎F[’๓5ฎ*sBRฮ1lƒœหY=‘๓rฟหหใ1:๗ช:็+๏z]!ๆ_ศ…๛ศŒกขgbำ“ทึC\ไ๙Aฎผส^I„<aำ็œHf#แถjw3RอซไSฎ๎'ํnๆ7๖ฅึm=ผ]!๔ค{ๅ<ปด็่]›9>u_’ถ7ฤ\‘ฅ้๖็จทป#?แ.๚ึ‡>’ร๛B€๗šปC{Tห๋bOšq๎3ษ}aง†zศAศ3qŸT‘hT ~7ั^แี๘9ไ]'฿w๑พS๎ืงsฬRไ`ƒŠœธฎBฬฝชR%€˜ฑขމ๖n”ฤe%’Q]ึk€œ+ด_%๔Y"ห#น๕Yฏ8ฝำ{.๒†‰QN๊HUEAฒ9J๎ฌrL‚นi๕ฆSัฦ€.›นณI>'ฬ‰zฝ‘ผพ:&Ž Oผ๖2๚่อO7๊Hrึจ˜!๑%จ๕~ศy๎ฟ์HK=อออ(6™”fพg‚^{]™‰๔SซDิว์้r™ว<ผ฿๒ž‡$\ฏYว’ฯ:žv’tr์p\ๆ๑k[จฝ๕่‡.xฝ—ููsศ๙:ๅ๘fผวป~฿mหฺ‡ำC<ุ{‡"i๛uBฺวย์B+ƒ๚ฮ !˜m๙‚ฅบ๒ษ\ yn๐ะฑPท;r<0Vอง0–Xุเใ_ฦBรรQ;๏;๕ฦƒ‡ F @ฦยžฉฆ“๓j์วด รCวBa/x่X่:žมBŽEŸ˜n๑Hp,\Eฮฯ8๚ˆI,Twุ๖dํk,œOฮy&R]"ˆ:r,1/ฤDค^D*ฺ…ธp›dภo๚uP+ุซL˜๓จฆ๛uศy้9็ึฬOw"้ไ.{ฯr•m{๕|‚เบœฝ[17 ธถ๑๖+พ8&๋๎ฬ๎„๒NP%๏J๓™ๆฃH‹ “ฤšอๆWGSSTrnฎ๋๓FาพึRA7bOฬ!็๏yเ)‹>่xฺํnณ&็›ฌ‚Œ๔Wฐ$ˆH9œ“tzB่=—Tฬ}[R๚tไโ”ษน^†G>ำ]ฏœปดนฐ๎^NbJ%Yg้แsฉc$d#bš๚›ฤ”Šฒ๒ อ5)ํm๋ข๗=ัIขH2ZฮT ืmW^๐ฃ3ูCสYzMƒฬ{RZ๗รํ์S=๚.ตืZ“R7LขำŸรๆ†q)mˆบ%ฅ>SGๆtTๅฦฌ๗Žง-ฎ‹g•๗ฎืษhŽ/+JT!L’*ถ{"ูีฑซคT1‹œ฿๘ัLxโ>ใ;ตั๛7—ั1็๘ื!ฎ?ฤา}Ÿ_'ค},„ˆำS๋X๖๐ ไa๐ะทEN Ž๕F u‚lคl่Tส}‘ƒื3ช๊[๕ผสฬ9.์นGไ‚r๖m{0|ไย๒[ฉr๘ ็๑เ๛/>tฺwuใiwธํšœlไ+€ศ3้ƒ๔สนห\‡คยฎ?uC’F๏‡ิ}บgvฏyBŠฃ,ฝฤโะgฉ}xŸ;๙โn+—โ่tนURŠฉN๔ ึ„4ช0NNb๎Žม‘˜zB้U›r#บ”ภ5ี ่‹l็อ,ทดDาซๅ5†ฤ๓สทxIฬŠ;3‰& i œsr๊ฏง'ท๗j—_ใไL๏i2EUั=QmผZไ2ฯฉVRŒqIWพ๋‹ฝ5ญ1ดใr๎ฉ๏ฝFlช๚ปษZ•จ{@|seœศ#๑:า๗‘Tพใ๎ฎ์—x*๛˜Z๕œืเ2vŸ`เฦp"็Zฐ‚œCะ‡๓9ไ{เโ#g<จ?y็ญษ๙มJฮ•"‰ค’?[?นD“D“Qอณซ]~ Aงr„คSษ‚ชV๚CW"Š”Sฏ๑i๔NzBส~|ฬœ“ชEI#9D๖M/ฅšš R9rcJBณœป๖ใฬ,]KRŒ{2Zซ@ศ(;•กœ€ึส• ธ มV•่สท=ต$คžd*Aซ‰g$ก%๐„๊f<’z:9ทˆฦ ‰Cู/นปซs“Œ&งw๏ฝ๔๊Qญš›{ดฯึkะ๛PBบ๗รฯ/ฏ_ทุ๋ำqฌc—ใqปsฉ7๛ {ึฑฌcw9ฝS๏>9^้ฝZŽœงoัศŒ!žี‰ฑ–rn~ขzYq,tiฃสข่‘,*9›JบšŠOr๎V‰ฌR”]G•"ˆฝWŠzv’Nศ๕|Q๒j•=ฅ$ฅ%ั์$žฃp9^%คู‰ไ}ิwiŸ‘;2W‰ป[ฒQK>ขอ%œ๎๊ฮ~=‰ญ๛Ž๔šŠค_’ฮ๗=ป\Vโ‹Kq–CฎๆV‹<)Cฮ!w๋บ5+๗ุ“–1„“ๆ๏๑k้๖s“ าหื ้ึx˜ฑ0ใกOฌNe,ซ !๊TิมBUไ{ไ\็เกcกŽวCฐP๛ ๕๐0caQ™วX่๘ึฬื๒z๕ฐฐQeB๋๋aOoก2caเจ๒ญชน๐0cแxX•Iั*ิ5‰ณ๓)%Aƒ…އฃ็šq•ึc๎XHๅผเกaa‘ฐวพ,ภs+:‚‡๓9X:ฎЇ"็<ๆ๐I,T&ื[ฌฑpc!ฝแ๎ฦ๎ๆpu;๏ง2Nooช„7UL#I•จๅส+ฒd#๐UBt๖W/S‰ฯi“d7๊ไฺžๅิ…์9q๎๏|_Ct3Q๛Dบ›พ๎ฮศ2=?น"Uษ‘ฤ7ฯใ็Tษ;•e7บษ฿sฏนWฬ{Rv_Lษ ˆธkO(นMแฆๆ˜็๙็|76Bm0ญหnํ•ฌ๛(>weŸ3^c8ไ๑3+็~๔C{Cปqๆqw\“๓ƒ•œ“๊Oƒ77Aส'ฬฝ๚ใ๒u๋Ÿั/๔Hj•žJ‘‚?๑\=W2ช๛ด๖ErซPโ‰l“>๓2fๆ้งึฑ=$5J˜œ2fฌศ‡$L}สJฤJB๒ฮLžIซ Ÿ;9๗HฤถŽsณพอQ&ห8sลใฦC\ใƒt~ุ:!๚;u,\ฉ`ƒTามB@ยCวBžณLฐะ๑ะฑPว;x่Xศฤ ฦDVCE)ˆภร„…เแ ้vข๖ฆe,tiธca&็#{ œ7ุ—C˜ง๖ž3f<ฌ ”އ†… 97,œ<‘Ÿง9๎ ๖น_ว„ฺจ้A?s:Bึ้IฏX8เ x่XH๋˜Žฉยย<`er~›ร'ฑPฑ9_cแNWะ้=Ÿจž—m}ึฉ–ใz]ศvGึ>ูณฝลCMWA ต ::๎b’9Eฮฟ็ถ‡Obกโ๘›^oฑฦย]"็๊5RŽ๛:ี๓m๏K๗Tฎไ,ปxyฎ=็nG•gWณMฎภวy#_๗™ฺน/ํ‚ุ5„0รmฝJนZอฮ๏Qnช๛APทฌr;QอUo'ทi~{๎Rxค์8้'Izv\oHนs'็ฑภR*่>๖ฬฦโ๕i;๐๗i‘+์,vT๗/ัSžGž๙”‚U แrnช“9ไฏเa‹Oะ๘ฉ๎ผ&็39งMฬฌz๓Gญ2ŸลM๔g_คๅ‘ ŠT#oงาฮนห}ๆ/+z]%มคy7ฦศx๒Sg๘&Iv•:ชบา๐z=*Gฝ>ศฆ;; ›œำอ†šค1วšุF?gM๖r•ˆช8‰dŒ +ษคฮI&uY=ๆJฎuฎ๗BาษถXU“m๛๒๊y5O2gใ๊~lฏท๛9˜a.๎ฃŠRG๙ฉTŸbRญ$%'wHzMHq‰๊S6Œ+฿ํปŸนุ๛ษ/๖~็6คŠ!Yี๓aŠดชCq>้ธCฮ฿๐๐ใFN๕ฤ‡~`Mฮw 1ฬ\, ƒ0a–cก0<ฬXRํŠd9‚…ด๗๐แwมœ๓d,ฌm?= w0๗ชz มณ„…พŸ<ส,s'ใ#,ร–ลB.;‚…†ญO‡‘๔๘ต.ฺโ†ปธ็qll;ยB#๊=,,พ*†…ี๛#าาxุ`ก&\ฤyฦBฉ4ๆœ0z]™œ฿๎๐I,T๘šœ๏&9ง๚WLเY๏๕๖ช๊uกฎคmŠ ซbjวต๏ƒน)‚žฬ็zeฃžๆ็์๒v๑fฤฟไ์66-O$@–พช1\sฬฯœsฑ:}๑ษ๘ฉ{ฌษ๙AMฮuย‰ฅห.W!็ลe;— UuผGา]ฝj๏3u?ฏ 3wปลi •ZๅฑŠŽ’ญ’ธ)bธ'dติ‘gๆ๊q“Œฆ๑C$s๑P$ค5™ ษdM๘ธn•กšt*‘$ู$แTR๙ฎglHู•hEb]“อ^dr๎‰jฎygž๙๊eŸง์sC1 ่t{S=Oฃ…œล˜FLNฺ‹)”›}NzzฌRไํ$ฃ^อ\ฅs9ใ#๎6ฏ๑แ'œด&็ปŒ‡Ž…˜Oฎ‚‡Ž…އޅŒbห &Ÿ๎X(,ญxแnเ ๋ s[dฌXจ฿ฦpžฑฐ`ี„Tฝ‡… ค…ผ2o ซ๒๖%ฐะษถcกใ c!.๑Y”Iคฝมยw?ณโaฦB๐pฎЇ"็บแ“Xจ8ˆ59฿M,ค๚Wษy8ทฏJฮGu๏7๗YืQIvSธ:ถอMแผ—ุ]โ๔>GdS2wศyฎะNŒS๋นฏgr^MเจJKเsธฝ?j-ฒฉอhŸช"wๅ๎^ลฯ„฿>›Jฬ๔ฯ\ฯณ๊๊y๙พ^ˆธิ ๖ฝ๒•๊ฉcฆ๔ok๎! ๏ฤ ๙์ปhL|ๆ9ฮ๒ถ0Tชๅึ^‰z,Hนk{ญๆฏh7‡œ_๚ฤG,๎I๊ฦs๎u—59?˜ษนช61๋2=ŽSษ่2าv%i%’๎"ฬฬsž‹ :แRa%ญJŽ•@็a—7›ฺe:วเฬซฅZค„T๒n…คœVแ๎ฮีu# d 4ฺฮหไฎNrZ9฿ฮค›M2Jย่‰ค^ทชCJDe๖9gั!‡ถ๗หŠœดฆJzื`ษว ฅฝ1ป๓>จ!}ญษง‘…&)ถ๑$ำ+FDํ1Yปoหํ.- 5พp ัoฯ9ญ:ทฃQล(“๓ํ:ปฯ"็฿s|;Zฮโร;ืไ|—๑ะฑOU๐ะฑqWއnT่jข‚…xmP=๕cฝโกa!p^๑ฎX(ฒึมB฿v,‚ดสr ฃ:=ยยv™˜;‚‡หbก—ฏ๗2]Uไ=`๋ต$Cฝfzเ!'*๏`j,4<ฌ˜“G*ฒH ‰๏นฬg,็`แ,r~‡#&ฑPqื_“๓ชœX(wก†ฤฌา‡ฬ0‡€Rี4นt•uGถ‘ต{๏น™{99ฏ#ี ๆAlGาy'บyดUZ—z9ฯ๎๋ฝภฐm$GศใHnmฏ}4วอฅf้อjŸฌ”'๗Jฬํณpนzc &RM^Tฑ™ๅ• นt‘GŸทWหฟ๖ม7—c์ซ๏}c‰Q{Ex”ิืฯขHำcoี๓lXื๘๔คœsช็™œ๋"็า๗,>๕”วtใน๗น๋šœ์ไ\ จ$“=rŽrปIฉค์’ZJ†IฏฅnSRŠฯ็œTกj‘’=ทZj)4ี‚pฒ๕‘<^ญ.#%yTฯ‘Y‰Vฎ„w{ย-)อ‰ซฯื๕HF%ฆ‘m๖z)อ˜จI@!ูJ>‡Dt๏ฅ/˜eซไZUฝ๔ ฎะ๖\G๒™ษ:ื#้uwม–gืฯ˜D>๗ ฺ‚EX“๖ืฯะfUฺŽฦ5๕ช3…%๛ŒสR=>๔Zฅ ‡i •pฮฦ5#/ฦbAิงR\บจูไฑ'Œf"๙c'ฏษ๙.ใกca&็ซ`กBวx่X(rไฦpSx2บR๛r,ฤ€,c!ฃำ\นC5ปรe,๔‰Kbแf*ขฦKปฉดปดฺอๅ\ๆn๘2๏กฯป K‘๓แ๔ะ!~ˆO๑ผฮ฿4ฤฏวAy‚๗/CMŒคˆ~ุ๏ˆั”:ฟัšœ๏‡d‚Žา*=fS )๎ฒJ ”2X๛ฯษoV๋ต=U๗Rํธิ"{+ีขจTTS0%X$^H8!ฦบ>$Zตบaฒอฆ œ%.วคJคั™]‚™ก›ชD{0าJ-$Dยุn% ชš‡๓n Sล@ๆKDขZ"*Ke;Dฝ<^ -‰(ผc‚คŒใฌยึ›Kuถ๗>ูฦฮ‚๙ภฅ—6ชํฦ )(‚๔hFEฑธ)Gน’QIuN๋DH๗)Yีq๊ /๚„็๓๋ฟซญ„YีO~ืšœ_‹•`!ฆms๑g๕Œ…"ฺเแ2Xจmดฝ฿`กใa #,๔๖รยชp้aก‘๙md,„`g,Œ๋]ปถษXี๒j†๗‚‡Ž…บ &,GXธ๖ฺ˜P T,4<ฬไพ.œ:.……ึC`!‹š\,,‹‡ i๙ษXศ1˜ฑ9Eฮ› บ™•UBHฟณUษ )>'‘a‘โฏผใ5•s]—E กUU{ุW๑n๔ฌU๏‡ฯฑ|&งฏคS๋X(1ผฎฒm8๏ฺ9w๒ŽIœ^+ฤœจไ็}๛ฎหkŒ…—าฆ ๗’œฺ+9งwž่ธ๒W“;ฏœcmฎพ`:ไœลฆย_w๒B˜Cฮแฌ^ู๋O์ฦO?๐ž[’๓แt!Qุ:ฤต†ธdˆcำ6งqQ๔๛ ๑กDฮoาู๏ห!๚:โe59NOึฉ8๚่ฃwญ฿\ธ๚๓uI๛*fp๙ฤ,‹จซฺฃฤRฯ‡|N็"บq2†5sอc< ‰จWW™๑ŠŽ’–’ *ม‹™ฏl2=$„ู™ธVŠL–ุ˜ฦAะฉœฐ?7rYฆ๗OFฯe3ƒ)%ฒMI6!ใžT~๎ผลห‰)กJบฮ้eก๋$ญ๖๏ฏX์—s7nc\ฆบD=>“ฦฝxยมธIR}PŒT๊š่e‡ๆแ~oOจีpจญzT‰GHKrชำ*L๔]†—ฮ๙ต๚ฯuฌุ้,Nึร๛Q’JๅR๗;‰W่7โํชvฮ"็Oธw;rฮโฏž๙ภC–œ๏6‚ยB}ง.iŸ‹‡ีi๛ฬ5XจcฯqO8วuฮมB*ํเกca>+ŽฐPฟ๗2าwลB๐0hฦ๔ ม6wV‡ฐ๗ฐ๕Pฦยจf\s,~ 3๋ฏ๎รCวB]*„กŽ…์/cกใกžLำ-šฅผ7c(“z<ฌ๊#‘s#ๆพ๘์xX{ฑcัฑŒ5,ฌธ้=่…าฎืๅXจ฿ƒ๐0c!c WลรBฮฟใf“Xจ8แๆ7\ฌฑ๐่]!็•,BะฃŠซ˜sน,฿ ;ี๙;อัnศน‡Uำ+!M๊Fโฬเ#ธธฝ–foFฮ{Žํ™Œ ห|Œฺฐ฿fL&i!‘ึgฌฯ้หo{๕โหoyUฉ.๑s_z๓ฏ”๓r๙‚W”m่ื.ƒœSฉŽ^mิ ำ*37ว{\ฬ!็•d๊>#็8จo6J-tˆyYHะพีวฎใ ฟชัNpmมฅQSธcป-’4ณูฝr>1:ยๆุฑ๗Lpำ>S)4ฆr๖บ›)3ษ๙ง๚G๖ณ?ึณtฏeศ๙ICผอฎ?_‘ถ9oˆวuUูฺ‚œ๛6G้๚บrพN.%งข 9Y๕Tฦ ฉV๕ฎ?rO_ygv/—"เz ‰ซ‹ดบถ๑ชศAฮฉQM*•sfเ†ม“j$”ฅrž{หm4ŽหIฆFฝๅH2ณ#ฐ’a%zTƒbq xณ IจชCJU๙!ฑิน%—‹zC”ไดุ{ู๏,_๘ƒ๘โ5฿รฯฦ1ข^WbชP๒ช}๋น””RMsc,‘๖™ลuD’Uวฝฯežม”%คศ9ฝZ„Œ1Kื=I…„ิไtxผ`SสนŽ/฿ŒtL๊๘ิqคืค@Œส‚ชL๊ื๖+bD%ษ๛าg‘๓'งkŽง๘่sN]Wฮp<ไXึ๑Ce,ฦ‡ ม<วBzึQ%๙˜ฉЇ†…. 9OXX๛พVฌ๋a!ฃยœX&ใหฆotวย.šWว œ๓yOmฑ–ฎF๒"e?s1ฯxจ๛3*2ขpcŸ:๎t๊ธอXธ*r~๗›Obกโ„[ฎษ๙nbกHT!RHฺี{RไUษy1 )v%็™บL็Cย]>mีฯJฝjžzอGsอ]ฮ›nํ#‚žฅีึำMล{า๋ r‘s‘>8fววgRไ์๊ษ9ฟเ…„‹Œ;9/„}ธฝTะ‚ฎ๏ƒv๕U‰ฅUฤ™ีฏ‹ัœฦyบYม฿ฅศตHใvฟwฏš›Zยe๓(*aw๚fNปw๎O๕ัดัW๛™ณŽaœ-ิ–ป–{ิg’๓๙_Ÿsžฺ<๔คe*็๋๓฿HผฅCฮOŒห7‹๓รCสšœ๏"ใ L๕Fฮ$$’=A ๔วฏ?u—‡าซฆ?vfฌป‹1•$๏kฃฺไ™(s\ๅถ1BbtPฟ•ฤi&ฝ”Yฒž ปป‹œ[ตคJึ]–้ภ๊—Œว’xโ์ี$ŽษKgd้ U‰>๓‰งข๙ภxƒ„ษgMH-):•}ชล๋#!ๅ5† i~๓พฃ‡ด?๕ถM}šน”}]c8—?uศ{ฬํอ iํญ4rŽ฿ษจŽS)า๑ฦฬiŽ?zจ˜ำงIลSวฆภqg,wื‡ฯ"็็๘}๛ใช†๘่ N[“๓]ฦCวBท…ถ คcัฑ๙าย8วBoเกc!: 9ฎซ“ท-x9V,ิBY๊‰nล;X˜อเชSนIบ,คาœฐฐเœฬ.3‚7ย วB22ช๊ Ž>๔?6<4,,~Nะม7ค๚†‡U๖ž๑ะZš2ข\ุ‚‡ฃหLะฝฯ<ศนt๐,ิ๑๊ผbแp๊~g=,๔ฑจ`แชxXศ๙๑ท˜ฤBล G฿hMฮw›œGฟ9๒๖j่ฅ>่!ถีoฎisแ.•H'i๎ภ๎=ไษเ-“๓†4nBสปnํFะG๓มœg){๑Lสงfกณ„ฑ8ตkL]˜ผแ€/’]$ํWษนWฮKี<ศน*๋ m[๚ถฃก๙ฬ‡ฯลI$d|Kี„พ_๐ศ2v7๏ใX‰Eูหq‹ีภฮำำ˜ผ๊Gภ8<พS=WlวDm”\๕ศฤ qw็XF‰vŒๅๆ๓…ง.>w๎Ovใg~฿.kOฝhˆgฏeํปภ$ข’ฏ้OXnุถjBช$>6R๕ฆ๙Hแ‘พ{EษMดฦYง$œŒ“‰Dด1jณQ7U2=˜Mบฏะงๆ5ตUCท”Œึ^หf–๊’K*=รๅ:๖g39;ฤ<y้ฃ”<Ÿ%๚ฎSo*RฮRZ–œŸ฿xฎท7cูจpQ๑ง*’๗ฒฺูZ๓„ิ๚ึน๎ฮ๎n7๚^"„๊มMใYg๎7ทฤิŠIF}š—ห แHl1Vข=ƒE#ร(Md„๔๎;ž>œ?ๅไfnดวGfMฮw qฐžตPาd๐ะฑq•เ-X(ฌ  kหOHฺมB$ำnึึ`กX˜|6ฒ\ฦยZ9ฯฤผศXˆG†.ำใฒ๒ข ขฅวฑP—Sๅป9m ซ’ˆVฬ23ข`1,D9ฅ3๒นุeŒณ‘ฆSซ—7มย‘qX˜๐ะ+่އŽއ ตOฦฒฉm(ฐล$ฆ 8ฎЇ…œŸp‹I,Tฌษ๙.“sมqgไีŠไผŠู๎ฺร 1๗1hy์•ป‚ัnไ๐ฝสธu™oำซ ;IOใศr=;นื*;ๆpiฦ{BๅฅZีๅb๘.ๆฅฟ<ศนHy%ๆัwN?บzำตE๗sืB&oV฿์คํƒ9—ฑ'b^fถหม=< Š"B฿uŒ_srŒ~Šœใถo฿Ÿnฝฟแ1้฿gžyžŒG$๎ตw<ิัW^]๕น‰ฏ„=คํŠ9ไ฿^ดลส™๘ู3N^†œ_sˆโ3„ปKฺๆแษ๎รq๛ทq=ป~9ฟว๕s“!หืไ|?U‰” ๊›1T๚๓ีŸ๎ชc„่‹S‚ซ๊ๆTŒP ๙จ^$] 2แฦqT:ห๓!QจRฟ่ห+ฎฤHmฎธต a iำำ–Œ฿F๒M9ิH7อธ$ฉJศ”ŒชWRษฅHถค˜:Wb๙ษot%ค$ญธ ‹˜+9 {ู^ฆG"ัŸญล๘›่gdCส)™gฏบ>•๊นผ๗ z๊ญ (Iจ%จต‡ฯฝG‡ๆ0Fโ๓.ษฆNล็฿;แ.L5)ป๕w๛1CบI๘iˆM]฿๘‰5&KTใ1Aโ˜Uตˆqj:~^ฯ๙>ํ~-แฑ๘๋Ÿ{ุšœ๏2:โc€าge,ŒใTx˜ฑะ็™ƒ…T1qs'่QgœXจ๊u๚ุ)V{ G=พ=,DŸฐฐŽ@s,Œ˜ ำ„‡…sจฌk‘2c!dพƒ…›โแ`aƒ‡™œ# ]=f<๔ฯ'ช็ล€ฯฐะGR.…เa }๓Yn*่=, šง‡ย แ็ฃ%……ซโกศ๙cNผๅ$*NธีakrพKXX*Ÿช‚9วNแฃฐถปฯjF_ฎK…!Aู}ฉsvv79uCบ”7UWw2c0—J;ฉ”œg){vr๗Qjiถ8’์ๆ39งงI;ไœ๓W แ†ํDฬ กb๒s—#ปˆr,ถ,}9้R!ม.๎Nฮh็๑v#ฏdๅ>าอzฟ+‰ทŠ๙H•แ†‚ึ้ปEฦ๎ 7™œง™ํ.mฏŸ…ƒ^gไš“๖ฺ‡>ำ๎3 ฟ์7ž>๒”eGฉษSแฺ~v๖ลb฿(ต฿Œ๛ฦ๚อod^๑ ๗8ไ๏็‡ญษ๙ฬ=a.gWBXdl1UสรPญY%!-ีขgmHโ0?Bž้9'&HฌเcPa‡œcˆฤ๋ฅฯฎ™ฏญŠD๊!oชณ!$m’Sฏน”Rž’ะf\šช้1Wฝ$wศ11wSR*๙x$zM_ฅถAยฉ*Iจ๚$•ˆชj~๙&†_๋€PถX|ๅย~_๚TBช ”`=?ฃ…˜๕KRŠ”3UšYฤ$ชฝห@ั๋๊=ฌ6+yŽวA•x&%Š‘vR5"AmŠm? ๊‚]L•Bสฉะcp&9gŒฺQj๘“งŒg%G๕Kพ&็๛   9vfaas๐0cกใกK มCวย2.นึ†…TfฝŸผbกIฅ36๔อ<7$ญƒ…3"w,๔…JวBˆน๐ฐƒ…›โ!Xธr๎Xศ$ ๐ะ บIต@ฦC๗๑žu“ล;9ฏชขx˜ฑ< ,s3,,=้†…Tฮ …ƒ:n WลรBฮ๏q๔$*Nธ๕šœ๏ฏ r•ณ9/ฝแTออฑ][ลถdํ|๓Foต๚›‘Yห-“8ฏšงŠชหเRๅ=ๅNฮ{ี๓\ฮ+่‰œ๗ โฒ{x–พปKป“๓f๑C’v™๎ $]๑ฺSาuษุ1‚+Žํ[ธ่CฮyพeO๚nฒZก!๋Fn!งัv›ERH4>ึ{พ๊ฉi๐‘owdํฃฯะ๊ ็Zh %qอ๔™•๓ฯพโง๙ชณบ๑ขG=`)r~ฐวAMฮ๕'ซd?hญ “โาชุ–๑‡$w‘ธ™.ื:๗j‘n๓dม‰nฺ"็ธcW’า ็q5.ฦิศggู`&่๔h–ˆพสฆZn}„>fง\V6$r˜ปัGYฮ‘๖‹gฌษ๙.“sวยŒ‡ซb!ไ\x่X่xˆฌ]ืฉำKฮe\ FUดจ‰ ณ;๘2Xุ[ฌl$ํ=,Œ…ทŒ…ีฐฐ๖r',ฌ}ๆ ี[xจว 7มรูXจื๒๖ เฎ‡…ู์“pฃP'็ัrPqpErŽj๎Ww5EYpฒวeฑP๛ุ๊ฅยฎหtวย9†pนืั“Xจ8๑˜59฿5rFpu๖6ไ<ไำŒ๙ฺ^ฺ:๖ซt#7อXชL๐ฬIป7๓|4nซW5ฯค<๕NOสx๗ๆoFฬG•๕ัฆ๗U฿;}ห1ถNdGงQ%/v*ๆQ)G_้ฎcฬข฿{YYปKโ๗๛ผศ‘ˆmc ็ +y,^žG๏•kฏฎฏ@ฮ}ฑฦŸท9N\า๎ฏw“็rื๚Fึ๎sะ1ƒ‹mๆ๓ฯึY‹/ผ๚์ncธ&็ปฌง`%…ชฺ่[ (ฝŽซ$คTฯ‘+TbI฿ฆ๗œ๋ฒ๖๏ฦ5žp’RMขJD‚กคกบำฆmE"˜e๋ึำน›’tRu๒ัi=—f8 iTƒjนWU”ŒJย้}•H฿•Œช"=ไEฎฎ ะ—x๘}๛d2zๅwltmฏุๆฉVŒ"m’าLพ€CยIBuUฐL๐] o‰=‹ ซ’๓๊l ษˆใŽ=!%ฆN˜jก"ม˜ ยคE%’ัฟbญgn ห>^p๚ uœ  g‘๓{฿j 'ๆฦkrพ‹ฒ๖† H5ไิ%็Tฯ1™รภฌสƒ™uMี4ญ๒9ุู›dช฿F$Gไœjชป€๛ศ-z•๓œs\]๒ž ใpiฏ•sฌ‘sH\C๘†ฯBŸk%็1*ญTส#Š3{Tุ๙์6#ฐNฮ{†q[.ฺHฎdwิปŸศmฯญฝช!&ๆ‚ฦเๅ๏)๛ชไผ๔ฉป๛z˜ฬ5ฯiณฮ7%็ๆ~๏ญTั+IวSaˆ9ไ?ฮ{โฟ^๓3๘๙วบ&็39๗ZIำT=Rฒง?aช8sRญึ{ฏšุ}T=๏Tฮrฮู๎ฆญไิ{A๕ZตZู R^ฮีำ’AฏŒ$œ1ธ&ฎFไsตศ็ู6—SยŠดฝ’Vช+ฬ๑%EฦN…HR๖!jฟค’Pล•พQRไDr๏ล‹o์}๛โซ฿xหฦvJ^MH๕f†T{?}๎/ฮ๒ Œ’จ‚q]๎๔ผ7—สง>๛z๐Y0bhi๐U๕ฯYข'าI…WŒ‰žฬษ„tHVIduฬโุอโŽW%ฃH8๕ป™Eฮฯzเ>ylŠK~๕‘krพหx่XH๛ †€ซbกใกcก*่เกcกžฯวฃaสฑืฑ่Xˆแ—^ซca๖฿ศX่12ศ์xqิน่™8 ซdgฦ๙ ฉš;Jส.LแกcกZx,ga!}๑އ.s 3rŸTเกใ$_„ฐPŸหถ๑0ก ]Mไฦqหbกe๔๘Œ…:uฬ"kื๙ชxXศ๙IทšฤBล‰ท]“๓]#็Q=ฯnํ๔GSษ]Ij,g๒จ —j.$ช่ นƒXYลต!่๓hzคP,ไ๋ตW=ษœGcทฒ9๎฿>[[ท็9่Yาnใู ็นทพึแ๓€œ—ชys๏-ืwQ๚บ๕(‚๐NVycฑรๅึ"็ฑ`าTฝฟ;หืS•œ็/ฏ๒สwฝ๋&n่ฅญ8ช+{บ0วึศฮ€6%็ฬŽg|š)๊gŒdุืr~ู๏L3Bฯใล฿เ59?ฺุฉb3ึว%ผ๚๓Uฉ ฟQฑ7ฉณชฏค@๛VbŠูณ{Kฒ@ตsx3ฉIJID‘ฬk%5"'็Eบ‰s1ใ…ผJิU“Šซ<4‰kUศๅvr%กTE|นW๓‘i๔]โBฌ^JUˆ˜Wฎไrซ$2’ัฏ]yั•ค%๛-1ฏรตXษฮ๒{h{…ฯK'๑๖QzNใฑ%‰E;ฤถศน/จ˜"ข!nz%ยd}K"5kzŽ=ฏ ๋ธq“C1พ ณศ๙๓Nง(Hqษฏ=jMฮฏ‚้`!ธว่=$ๅ,ฎ‚‡Ž…Œฒb„$XXภร่Uว๐cาฑฐมCรBˆด.g,)ˆ2f,„๔g,tR๎Xe,}dš{rˆ”๗ฐp < ฟ๒ ๗แแ,A   maณมBI๕มCวB๛l—"Qับฑpดฬ=Ž๐p,da&c!x่Xธ*n๓[ObกbMฮw฿ญฝ!่ๆุ^ฦ฿๓๚}ฦeซ๔†จ#6ว์L่F๒v๛•ูก;ปOฬFoFญนYฮ฿>~m เ˜ฟ*็Aศ}ฟuŒX,”^eถ Ÿu!โTสEศeผ'Rmโ-ฟปŽ๊`ฅv‡k๓ูื๏ฤชรอ๘0'จศฟEfM๎ิ…•แณg!ฤ‰๕f&p=b^ษนWรๅฝ’๓ไ]ฐ์gDๅ็๛z์šŒฝฮ;ใw9ฟต/ฌ.9^๒๘ำึไ`'็:)ลฃ!ๆ็ชว,'ฃห&คJ 9„‰;}œธ๋y•Œ"ŸฌฤZIง๚pๅŽPrQœ€ร„งส7‡DEษaIF!cNฮSe๛ฒ,บ๖Z:9wษฆUˆช1cั\Ho"ฒNค$Nฤ\IfGJHฉm‘Œ*U2๚ๅoI‘ดoศฺ็JฒจฤQ ฃsำm$”zญ’š๊2sา๕~”Œ"ว๘HfPJพต/%ปšo์U๔pธ_6-ร๖G[๛i}V๓H=›ส`|๏[t,ใบ8~ฉXŠ˜๙๔”'ณศ๙ูn๏-.yๅcึไ|—๑YโŒ4ร]X(ฃrธ 9๗%ๆSƒ‡Ž…zN๐0caฦCืด”รBU<มCวย†ฐ%,ฌ—3:9_ ดฝ€…๕Œ…,T†ŒฝbกN[เกcแฟๆมรRฝ u;xŠฬ๋v๐bŽฤ=๐ฐมBแ'xฺ็บ,f_ ]แ„ล’Xจว้y2๊\x˜งฌLฮ๏{ฬ$*NผMึไ|—sรJศsˆHช?:‘๓eฅ๎ฅโฤำษ9nืMลำbCิS%ด‘\๛๓Mๆtw ราฬtฏศVrNๅๆš;9ว.๋ช=ัHฃm@ŸKib.•Bi1๘ศ…• nEฮฝG’<๋ศ$\ไ”๏Kjิจ่{UEDVGxฏ6U๕JฮCaP>›๘n—–ฒ็1y๎สžซทม6ศ9๛(๏ูิ$ดd4ไ<Žั9ไ?ฅKฟวK้kr~(s?‘|๊—น็Tันภีอu“๘J†ฃr๋ซ๖ๅ#‚œœ—ฤpธ A"9E๎ฎ๛Š+Aฉค>ฦ•ชงษ๊๋๓[๕ิ“ีQโšeํ>:ศ“QชDJภp8งฏ„T‰™Wฬ=URง/ศy•n๎}็4P๎}{ญ)๓ต7mlฟษcถMฮ•@โ ืนชZ:W"ชืฯ($%Hภ‘ฎ๊\ทQAว Šdt‰hCฮ•Ÿ9มAd๒HยBD=,2–๖žภรZูfฑ t# So๙ฤผŽ$ำ&p;@ฮ›ชwHธ‹๒!W|V;ว‡ซ-šH 0๘ ”ฯ3ฺสgถ ){%็Sf€|6งผ้{฿9ถ‡œ๓>5 Iฑ๏9ไ๓ozyืญ_๑า'œฑ&็‡9ื‰?]*่>•๋ห’sไœๅ>\‚qะv'dคšH7•@ิไcH ‹T=รรcuฝ่D๕€DV๗ฑ:V+ฯZqฒซณ7r?“sŽฦ้น"9ญฝๅ&ำ,ษ–'คTNDฬีทจd”DO‰จFแส๎ใฯ&K%ก๓+พvพdt๑ฮ๙)'2M’NฮรลธJ6yกจ€N)s~‘น๊v3„V2š๛Zฝrฤ… ฿ˆœวwป์‰ใฅ๔ช?็ดš”"oฆC‰้,rย‡ž#.9๏๛–!็ง qBJF_>ฤ๓โ๒๓†xูšœฯรรŒ…เกcแ2ไฑฐเZเกcกช‘เกc!f”zœcaํํsลบภๆp8ลgŒt,ิkด„97,ฌ๎ํ,,x˜ฑ…Jค์`ก*ะ‡อ๘ณ ข-๖ฑp‡๑ะฑะ๑ะฑqkจฆ +:า—Ž'ษชXh็Ž…ู฿็ฅo1” 1'd’…cแชxXศ๙n;‰…Š%ษ๙A‡W5BEˆDŠœURพ9/9 qrp*+QๅํูHฬวฎูl๔f6„Ivฎœ๗ศน฿—อฯp,ฯี๓ต 9งrNŸ;ี}ฏšห _๓ใ‡ฯตVฬƒ˜Wฌ˜ ็nœVษนล8นฃ|ฃฆ๐E›‰ศฃ˜ท^พ_LฺLตฐŠนKฺ'็ฺ›ผ“sW,๋_ศ9p!m๗qjyD,r~/W‡/แGฌษ๙กHฮuย Kภ%Ay๚ฦHŸœzt8ช •4IPyŽ:๎*Fคy"H5งศ5•ไ0ฒ‡jjฬ๐ญ†7‰ˆ7ืm6IตW–šqj๔Yฦ่›Jฮ}ถo๐ฦ -Lƒj_ฅชDศ!%1g4•ๅ4Mฮ•€*!U|สท๎h2ฺ$ฆH71Eา{ lฺRภŽs1r๗!ถ-ญr7|๚j]ี w~›ว์2ฯUNี<)’Q“"PๆAu9๙‡7 ฝว%ฏฅdํร้ึ)๛!ŽŠหG้๚šœฯ;5X๘ฌW<œฤย>|), Eฮ™3ํXXา‡p,ฌ" ๏vkŸžอมzXˆณ|S=uา ]า๎XˆัYฦBฬ฿2Žฑpsrพ฿ฑP]A฿นใ!ำ-ฦo‰…>๙bE<ฬXุxฎ๐ศก฿FŠฮมย๊}“ภB0ะฑpU<,ไ”Mbกโฤ;พXr_-^ฐฐ˜”ฝ๋uีั๊i1ŠรU<ชฟŠอHบศh! V‘oชฌfผU+ž=r.2Žl<$72›ฅี8xwผGฝฬaื˜ย%rฎjz1„Sตt‹S5ม+.๘๑y้ณีgIีœลŠ)s4ศy๏sูoญV!w‚ฮจ=ลฒU๙Bl้g_ก7>ปษ7ฆ€ฆXจๆo3>ฬ๛สb…)<šล k‹˜Cฮฟ๐วฏจj”/}โฃึไP%็œ”$2•ส9 #•๓ญศyb๕Ÿ›ฬI'์HFuฮ๓เฦŽษ›ลb|’v2g‹›„9V31ฏฏ=ช์าa"ึsw'ฦ๘ศR*"a„V“RU‰|\šช12<’ ฑ*ๅชyี|rฎžJ‘s%ฆล‘xN IวIษฉb‰dtวฬ ฟ8๑‡ฬท๖i:9฿‰„TวDฬึo`3ฟ…Yไลg,ช^ŠK^๓ƒฺฺ่ฟล“—HFฟ๎šœ๏ฒˆ่Xˆaeล” r๎Xˆd=cกท9๚tŠ cLส ษ‹๑iUาNล\ฤ|TŠ‚y๑zGXh=ีซŒ[้ปj>วAUQl“œ๏ฤI๏wT1ทชy3s๓mฮNฏ๏;ศ}ํมวษ~โ3ŸEฮ/xe3Fฯใฅ?๚่59?ิษนNHzฝjD5'“๓อ’า*ต ู1=ฺ่—’R๚:ฉฮ“โ๘N‡1Aต:`U"—"7Rำ—…ข๎วฐฐ`a<R๖Œ…•œ๗ฐ—vวBๆ€g,๔EJ๕™ƒ…ช–ƒ‡ฉb~จเa3้ƒฉ`a&็…ฅjพKxธ ฮ"็฿u‡I,Tฬ ็kY๛.ž ใHดEโEฬK„๔^}๖ˆ่kR-F.oฒ๙ฦ๑ฉปฯCศcสšjw–จgG๏$5ฏาg*๏F๖}ฺhœt#ๆฅา.2ฮํu$W2ึซ๏ฃฑ!สล๐šหMศ้๖ำ่{t7 r>2๓9ๆ;,๓ฏฏi‹‘YไขW๏kuHqฮ“ฟoMฮืไ|“„ิF๛8‘.3w#1จŽ็–$”คิ็้bไฒ์าO$›JJฃHšนนป#;cŠผง’JQ%่2Tถำ>ซqษฅฮBุŒ๋ยŽN%G‰˜7”JBๅๆหุชDั[Y’ัฏฟตฤฌC้ด)9ทสฦIUบf.ณญี๔แ{ฺอำrๆ—?บ้แ๗ธ๔๗Ÿธ*9?7 ฝ|Mฮ๗?‚[M‹LTž๋ฑู!L ‹ฒH* [8๔}ีŠ9XˆขdุožS๎F˜#,daีฐPคฐเaยย*“๎aao‘2ปXจฐ>๓‚…ช–ƒ‡‡2bœ—ฑP฿1x˜ฑะ๐ฐbก๖แ๒ีEฮ๛ภ;Nbกโฤ;น*9?h๐๐@ศ !™nWฬฮ.า.bฎ(๘้6$=น•SEฦŒ ขN๏ณถ)nๆ’u‡ัZำw๎.์๓ ื ฉณ ๋จ nํ6ฌน๎ฤ<\ศku฿>^ษ9๏AQcH๗].ญื{(ๅCu‡1Ÿพร<ำ<นํW‡‡ฟ?ศ๙ž?฿@'ฮ๙‰X“๓59ŸHHฃŸฑTษc†jษ!าบJิ้Ž๐‘?Eึฆ3nZ„ดผAึbTMIHญ'า๛ศ]ึ^ซZแผญ๛ต}™?‹R็Jยไง๋D฿}}ีศ‡ั@ัg^ุ•„d\Igu!VuHๅ_๘ƒœo฿ฑ1hณOz-฿x๛U2š๑9๔๐cข iฉ"!ฉMฏ^9: ศ๙นฉฮึ9.}-ใึ๚!>;ฤื‡๘๗!~tˆqqŒา๙akrพ XXรˆดœสกOณ@™๎ฆ!~=๎ฟT“.โ๖[๑ฎ!vˆO ๑ {ฬ‹†๘ฬ‹8}Mฮฏf'%j๓Rก1—_wQŸ"็žœ๚์UŸร[eๅฉฺT qจp3๒,ˆz„L>*Yตjฎยร๓)ม)‰งŸ0,R๒S’าOณq›ฦ™ษRIHmtXญ)P…่๒>“ท!๙,FG_๛ณสPI7uุย]X.ฤฤ2ท่ i“˜BฬuY•ฃ˜\๛-ฃืึฉ๊ˆงp.พฺ“๓_y>2”โา7=yฉส๙ม, ฌ)Uoรย๒๓โุœ ็ ก’"ศฦ]…M›แaมBล@ฺ ฝ—=cกทl4X( 1“' ตx)’NFฏแr๎cบTuฎีs—ถwˆy—œ[2pชแ๊!ฏd}Bฺฏืเ[ลทศฺm^v%Žๅ๚M…c!`t{<็มFฮQ4.๙,Š@ฮSŒ แฬั๊Nฮฏxฯ›ƒ=_xฺถ$็ร้Cฃฐuˆk qษวฆmNโข ้๗โC‹}ํ?๕๋ ๑)ไู๋ส๙ี}•TฝQ๑,%iฌrd๛R+G1ฎงšEcol3gSœ eศB™;cฑ†Pฒซคน\†DsH†Š<ำฟธ‘ค*9R2„ฝ”$ฃฺVษ่ฟฟขu–LS—•่‰œำSฉ๋Eพน‘ŒสmX‰eฏทr™„๔`4Fjฦ ัŽ  ฉ%ฉMRj๓ะฏ๖ไWฟoŸ ~ŠKฯสšœ@xX{ฝm,cM(U2dIVซำเaTช๋(ณญ๐ะฑะŒGX*ขŒ…๚ฝ<ฬX(ฯ แaฦBmรฬnวB๕•ƒ‡Ž…เ_ฦBรร9Xx0โa3๏e๎3ฺ‚ๅCฮ'ฐPqโฑkr~ `!=ไ8ธห$Nแไœ น*ลฮวฏ9วซTล‚^ศ9}่ฑๅฑ Dนฯ+ฯ#ะ*iNฃทGำทไยฎ๑hบ^Gvูผ๓Jฦู7Rv’)Y™—-Y;3)†ฌ๖;Iยท ็พqฐœ๊{ส๊ศนฯ wrA7งซ=9๏๙๛\๐Sยำxr~าoณ๋ฯWคmฮโ๑‹Ž?Gฺ๎‚!|H’sr๔ัGฯ: >yG–ุrฎ~HfไšS{]ษšคqช๎ฒw๛B2ชํป ‹ช5JJ"aq _cœTŠ๔ZKoeT…T‰`JาฃT\…•d~๒ลตŠ^BGzฌฮ้1ถ+U"EณŒวทแสฎ Q$ข"ๅJ@39฿l4#„4RM—=ีผ฿-WBc๛")ฅท฿ขNBJธฤŠQ$งW{r๋฿8~:q้›Ÿzศ’๓ยรฤBุ‘ฃ7วฒแaSค <์ba"ๆ›ำ _†*t_ลBUf! E„‡ ฉฒ',,ฝไมBE ‰Œ…›แaฦยํ๓‰ศืVช„…#<ฬํ>ปŒ…ณศ๙CŽฤBล‰วlฑฦยyน!ณ™๗;9Wล[คฺฬเ8}ผ็gw'์T!แลฝ\ไ‡jฦญ…ฤ=๗ช์ ˜\e•๗Oฟ™๏qfœu\@7@ถฒ:ฃŽ ธ ›ฃ#Šร("(๖"‹,Šb€a_ƒ,F}'Šˆ€ฌ ;$,B !rฟ๗๊฿>๗ิ{ปซปชป๋Vฟ๗yฮSญ๊๎ย=ฟ๛?KI‡0f•KาญJˆถรฤื๎pฎ]‘F@ะฬ}็ึม5๗ฎ; ŒนNื ษ๏@ฐื฿ู—สW‚ซ๙SU—™j฿๕€n{ล {แE.ขx8Oท่V8๙–พV‡๏?8้คึJ†cหง˜ว฿qฌ;็ฒšวh๛Yซhžแ๑dˆw8\หเO ๑ฮ์œw#œซ#รแpญถ7ุ๏ๆer* ฉ๖š#1I w๊"L฿]œlซๅะ๘|&อ๘\–€Jbษค™.ฟDŒ!o(ฉCRŠฒNธCHT‘„šถาgฎƒŽ’\"ณ.รŽฝ{I,q๛๒โ฿I’่แ;E„sพฏแ`N8ท.’MzmBฺ็L๕_Jฺ๎น7ฎ฿€:† Aจซแ|โ๕Mฒv1๕โe็ผM=I-d[ [sZ=จ‡MฮzJ ีAM[เvj!![กj!๛ฯ›ดP฿ค…8z่ด0บๅ -”2๖ฆรธฌ้N ฉƒ)-ดฺๅ]sฏ…VSn{J ๙ณบ}ีรVต0uAจ{แ|ๅJ-DŒ[ylยy'sร‘‚sบโn€8ขฅ๗ก$^AœƒไุWN0งป๎แป็tฌc‰:ื›aP›‚tjHœuฟๅ}แ|๚‚…‹$.zฅo‡นžcหฺ ฤ,S6ฑ8G78$ฃ(แD๒€ฒOธE-ฮ‘D2!]๘๚E๒ุ& )8G๒ษฐฎy+pŽ๗ฬGฮํw:ไdร–wบR฿ถ’ <์p~ึžDLฝlง ็5าC?ี|ภ๓9y›ฅ่tปูืm๕บล‰UpN=ค๒>tฮh!ืฎASZˆhาBฎK๓Zh/Tฺๆ:ฝU-๔pnต๚—‚sฏ…มนีC๛^jแhยy้;ข›j+ฤp๊กภ๙WฉิBฤธU—แผ&Zˆuim@:vŸ#๚;๐บผP๕)}ำฺอ`น8><rยy๋†ำ๙&dว~p ฐ๘ิญŽ๗ส็๓5๐Xœt๋œฒjฎbKภy(Sp๎b@8wCัฮ›ฆš๋๎ฤŽ"œŠ†!ผ™)ะฉƒ[ร ็ ๏๊›ญเใ๐]0์eํแ๘ฟxˆ]Š7cd8๏ฆ„ScุFgg€ค ฎ‰ัjษืYพ‡[$”Svฎ<-ใŒ $“Quzbเ็ูฒ@๖3ใ9ฎ"R€g?hLtp>ฤ ]"&ฃบททoGo‹6Lฒˆ๛HH,žอJแrๅ์๕`2้ล-~Žuฃutฮ‡ํสHม๙‰฿i๏'SฏŸแผฆzศ๐–เ\๕0yŽ]ุ‚–ดะ^ฌ„ฒ7œ์g[-„ร๎ดะฎlkY ‡จ‡^ ๛ำร”ฆ.nZ=ดZ่๕ฐดฐ๔pฤเ|ใOTj!"รyฝดธๅ์มนบ๎IOน๑ถ/Ÿจิยn์+i‡k8W@o"f๛ฤตWผ่t-jt”บG0ถ๎ต๖<ลญต€์ศ›€ิ์T9{ ฬํw…๓ั\;ึi8Žc$แ|ัดdH`*Žุ}‡Vเm!f„X „[ู๓%7๎ถขoŠ๛Y!~•๘eฬ๑!ฮหpษ(Kํ์.ำ7’ฎUt89}HŸ๘Eร ง—& ณOำL!Ž=—}Ž$Ÿ‰ฯ›wvรย: Sฒ๖?z,}๕0$œ๓_ nJ@_zํ่Jฅ†.ๅc๐G[p~า6 ง1Sฏ5รy]๕ฐดะ๊กีB@4n่กืB|๘ฝF=Li!ขZH=ดZ่๕0ฃฃ‡็›ฌZฉ…ˆqŸx†๓i!\m๖›ฬ๛ƒ๓แ*_"nืข•&€ฃ<=Dj%šLlW8ท€Ž๛๘ ฎA“แn(e๏เT๐ |(๗ภใwd้=Aฝ›KฺวHnx๓ข{P|ฑวŽญฎRD'ญcj๛พ๚๖แว้๋ำุoŽR๗…๖•—Vฆ…ใl=ฏ]’ —แผ›’R-ฯ๔รjF์็๋ #นHภ}ปHYfiQ๎ๅล๋6ืBผIrสพM๋ฒใ| NB‚๛ไ/k๓฿ฎO$ €๔9ฏœ)N๚p$พY€‡็งlุˆฉื๎‘แผฎzh๔cดดฐค‡^ นšหk!\p^ฤดZHื<ก…uำCซ…/ผ:Ig=์8_ญR ฮ๋ฉ…tฤ_8~t*BPฬ’u‡ลยั[ฎฮz<_๛อ9๗ไ/พ๓๊œ เbธ[ˆบั5ืมtาปl}ฺ˜…๓๛\บเcใˆ=wj ฮ{=ฒทš"‘Sวfฤ6{,‘<"๑ฤ "-ท”ตgv-ืก\ฎ’R8Iธ}๔๐พไI(‘ดญNKEแฝ๘ฺน็>hฬภ๙ฉ฿๋ ๅb๊u{f8ฏ+œjCŒšฆƒ–5i!็ix-„qBถืB๊!ตฏ‡จaตp๎ซgg=์8า๊•Zˆท๊2œื47D๙yซCแ:žLปฎัwอาu-g%๏ฬใบ4-gวsq้-็ฦ็-XX‚sœ3Rƒ๖:ฎ…ยฅLใแ๔ฃ๏\๛ะ๓1บpพ๐ฟฤ >>฿k็ ็ฮ‡”VL“ถŸ ˆ•ธ0GžBwผhภ’Sบ>(u็ˆณ„๗ใy@บฎX‹ฮ1แsตญq้๑ŠฦืฏชอŸn่5ฯlเดํคG7Soุ7รy/VŽฆชร-บๅตฅ้N ๅญ<*i!K{L Qโž๕ฐKเหkTj!bชหf8ฏqn8pŽuk่ัๅิvฟ฿œe์tศใชดธ?wยbึ‹/sย-@%8๏ใ ฎจ-Y!๊pฐฌ=ƒyม๙Cทo<~w2฿๛'ฮ3œ29ิAoUކ5!%€sข0’ฮ้’lr@ห™tข฿๋‚ุฃIG าNบLpŸ0ฌง*!ลพ^ููๅ"ฌ.zgพ๘zอ] ็งo_,™๕ฟษ˜:eฟ ็u†๓ัึBซ‡^ q็r-ไ@ป&-˜Cฃ…]ฎ ิยŽ่แ’ฒถ ็kVj!"รyแ| ฉํร็ง—ึŒ…๛ๆฦ9ท`”?3๏e๔ูs œsZ;uฅเ๎แ๎ึCJ;ๆvE\ึย6เ‘K่lพฯ๘ ็ฮ™r๐H&ค\ฯ†กE(ป$€#EB‰RNjŸe์1gŸ:ยqe]'&ฌ,ใ„ฏ;W‹๙4Gเผณ_เfสlเฬ‹%ฯœŒฉ<(รyแ@K@๏gยzว/จ dซ6i!nู^ ศ›ดะNy๗ZˆAp)-Kz88ฯz่เ+ใ*ต1nตๅ2œื87ไJต‘์=วtw๔่Š+Lต{อ9 ๎@W8ŸฏN9เ‰น Š™!ž{ฉ์œฮ9๙<ฎhSวN€+วPเe๕ˆฌ…fฯ๙๔ปK“๓mพ๏ฎฮ3œ!ATฐq8G2Š๛H<‘GบFHJ5>ฌ๑˜e๐แuY‡=่:ก8:ๆ๊ลีA๗N่KFtŒp๛ฦ5c/!]า๚Ž๔8ู.ิณฮฯ๚Qฑไ๙S“1๕ฦ ฮ๋~ฑ’.๕HมนีBƒƒ๊V ซ…œกaตฝ้ข‡^ นโาiแ’ู'๖้กืยฌ‡kก๊a†s…๓ -Dd8ฏทbj;ภ|Dแ|ส$qฮ9่,ยนsuฮe*ปฎ]tะั_ท`ิ ็pฮพs๖œ ”ร‰7pnงพ๓g•ร๎Vฬต็]bkสzฮg˜ ๚.฿o๗ ็ฮ‡ "กd4&ภHHแเg#๐{/‘Tฬ1ฑ8„$คH2C2Š๗1•พtœL_ฆ8๑|ฟ^ษ/^:ฏ‘ˆ.ผฐฑ>(—5ฆัง'6’Pข6รyœŸฝณฌกJลิ›อp^w8'‡1-ค ฃz-\ซžY-ฤkะภคrฯz+Zˆ๛Yซตะ้a†๓็_]ปR ใV๛`†๓šร๙Kงํ/1bx๓d)7GŸ.žมpิI์\“ลแyใœยแ–ณฌ๗s sšป๖q‹ฯw{ส๓แภ\œk็2œ๗ม๙‚ว๏‹>~ถžฮ3œ1ID้ค๖3๋ฯAb’วุ'Iวœ๛x๊ษไauีฅgR{0#˜Ÿ๑ญฦ "|’Y:FtŠ•Hฆbฏ/"$ฃHJs2ฺB:๛ฤF09ล Ÿไi8?็'ล’yg'c๊Ÿหpz(ZcbX๕ะjกj—่กืB๎9wZ =๔Zศษ๏N ‹“p๎ต0_ฌ์_ 1๊aha[pพฺ้•Zˆท๚๒ฮkฎ…ุ}މ๓W.?^เ\†ต=(ื\๛ภKn9Ku9๑9ผ(gฯ9Ÿ s(WญE07Ž<#i8gะ€Žy8โุเใg๛๏•แ<รy’าฬ +œsจ’G[าŽ„‰%ูUkm+@Žrv9‡ฮฮa2ส•l่หDๅใG5"DT—k๚’QD>า )๖+‡„”Iป ๙ hŒU8ov๋๛฿”‹i9jิแ<o 1>'คำรa…sซstอ๕fI ฉuN ฅŒ=ฅ…\ษ–ตฐ3Zˆ‹ผช‡ฝค…ํม๙'+ต1n๕:œถ๖Šฒ|8pŽฒ๖ื๏พZเœแฤตท0 ‡KฮpผฝSN8็c ็)๘ฦc|>"…yบย-^<ม4ู๕ฤฎ๕ถเ|ๆรD์€ฝk็ะา,ภCMDเฐ0BR7,ธ‘\rง0z&1HงฌKy&†)|‹+คฝๅ1็บXย$ขq%s๗รฏk๔XพzY#๒QฟบCpAPฆˆ$eฑcฮฯCวTLป๕่ฎpฮร๑‡ ็ิรaาB‘$ง…ิCฏ…€้ตPสแๅ^ Qš=ฬZุ-ฤwH=์!-l ฮฟถnฅ"ฦญ1๚p>ฺzุ+ZH็1\+ึ^ฟฒFฮต็\ึงฉรอฝๆr”งส ๆ7”ณฯ™฿W>วฌTc฿y ฬu5›>๎ฏ๙Kสg=ฺt7zaVผ?–แ|3บ] ‡ด_O8็ํjiเvRธ-&:žŒr๎๔%ฐ#ั”„T$!}ํ˜-$๕พกG,eBส‰ฤHn‘Œ>zธ$PMNาEฟ ้’%ืKt๓๕Ao.นVขต‡ปช#๛‹—<๙หF)'Wใ Aล๗ฺ็mม๙๙{5ฦDLปํ—็‡†86ฤF!ึdd=ยฟƒaิB‘$ฃ…ๆช‡^ ๆ^ cŸนำBถ( V ปYก…ƒาC^|hSE ญ๖ถ็›ญWฉ…ˆqkฌะ-p>jzุKZศ๒vฤย Ž8ฟ๓J ภy์7Ÿ7;–ฏ/PทœpN‡œ.น]กf{ฯ็0—่5pŽ็„๔nu)—F?s‹๋ิฤมีsYฟแœC๚ะbพป ็ฮg=ทŽฯลaํ฿+p––๕‡ฎธ:1ํX2ส’t๎ๅฅ“ฎฝ—ฑwษ)ึioy N$f '‡ภกทNL$Fโ‰(ewื4’5ํญฌœ฿ะv.wฒpŽฟฯ#๛฿‰(ส9แ!!ใNQ„๓ ๖้+v1ํ๖‰็SqC†๓๎ƒ๔’r•.TฆดะแœฅํV ๆ๘๗:H-ฌœ'ดิB ็ํj!๕ฐ‡ดฐ=8_ฟR ใึ\ฑ[เ|ิ๔ฐต`ŽXtัั…๓ปฏ๎ƒs๎9œ`ๆ.s@9หู= ศน๋œqเŸ#ƒไB”เ\Kๆcิ ฮ้ิโ๓3๛`็:8ท%ืƒ†๓™๗๖ํ…ืV'ฝ๗4œฯ~ช4ํ฿ฦa?=ฐWเผ--ฬZ)ฤOC<2ิ(cฮ;•”ฒ\‰ฅธ=ฆฬnQLF๕>wหญ>N–ดฃทR'ลV„ฎ`. ย๏ศ„ด8—ฒห็OmฌJฒpŽ$ๅ†LV๑Z?ฝฆไp‡pฑษ่ฤ2฿1 ็ฟฏฏุลด;Žm ฮร๑ล…x4ฤ^u์ฑ็tNZ=ใnrSEไ/R6i!๕ะk!{อ{L “zX็ข…เF=ดZƒโq+Z0ง๖ถ็›oPฉ…ˆVเ/Naว ปŠ็ฒๆŽ๓ๆแฟO;n|ฏภ๙Kฯ=7ๆใฐŸิœค‡แ๘›ฟึืงZืบ๊ฝแX*ฤตสนธ}็จมr๘ลCโŽsB|0 pEB„$. “าDB"I%๗ฺ๊•f‰•DxMRu…JำฺีEขkฤพหธH{/c'งs]P€Hูแ‹h1ั๋ๆDTอxพ๚ฦeลขื/IiฉW”I%&0ใขDHFๅฑ๎t_2็๔พ็๚ƒs<๏>’ฺ‡k”nsioญเw–ซLLป๋„แ\‡jLGrโoU‡V๊ะUอญ๕v—Tไ„ดZh๕0ฅ…ฦอขVฌg+i!wšCฯœขฟ<ฉ…ิCฏ…บ:ญTสZง6๕อซฆ•ดฯQญVมy•ขฌ=ก‡cฮทุฐR ใึp1ZZุ-zุซZธ่าc๛‚€~๑1M็ษทkOk พทวแyธ็oLปNฆ‚ห*5@ v๋ฐ6:ๆธต`>KKฺYฦ0‡k8ว}œืส๖: „‹ูtY7[+ $โ เฑ?\ฯจ็Spžrี฿˜qGใข>‹๘ว<œฯi”๘งโฐƒ: œทข‡แุ$ฤ• ้๋†ธu ๗†ใHยบฒ๏ฃฅฅ~!๎ ฑˆ๋se๎เa&งq/9ื ็ptโgฤCHRฉซ‚l๏นธ่€คIiœsuร2N$OฯŸฺpJjzฐt3&คšŒ"^๓*yทโ!‘ ษท$ฃฤ‘dพ๖ฦๅลโ7ฏl$HHH๑}ภ๙ม9บใX’M‚;๘๔็pŠB๔สัœ_xPฃR!ำ๎>ฑ8_/ฤีๆ๑ˆ%ฃ?ะS‘แผszXาBh๕p8ทZ(Ÿeดฐค‡^ โ›ดz่ตfU๋|x-„๖QKZศฆ-|y๑๏ข–ดะ๊กำB_mะ็=ค…ํม๙F•Zˆ7n@86-์=์u-|๕๚3‹Wฎ>Eภ๑ส•'6<<ษ๋ƒs๙ }‡6†Pศaฟนsœฅ์ผ8Gฬาฒ๖บOb—‰่€g|7๘^ษ(1วดt@8ส๕b†”S๓y8๋yก‚ะ.0Ž๓๐|NEู;ฆตวŸƒฯิศนa€๓นฯว๊ ‡rp+p> †ใฤ฿4แ”/ำ฿{yŽวน––๖๗.๑ค6ดฏฏฯอศBBj๗๋z8็ฺ$’,นd_8แๆMษK ๒&)eoe,c็บ ิ๐#8(9์8—„sI#•ไRห7๑ฺยื/*ๆ/พ@ฮมcผส—H"๚ยซ“Š_;ท๑~”vโ๛xๆ8ฉ&$ ๆฟˆež๒:U;ษ™I)ฮฝwBฯ >๊œ_tpรมKฤดฉงถ็[†8ล<6๔จศฅœต:JZhแhกีCylต0ก‡V- ณบjตแตnU๋็V แ˜SญBจV g/:5๊กีBั>ีร&-L่a„sีร ็ฮทTฅ"ฦ๛H‘ตฐ๗ตP <:ภรy*^ป๑\)aํฆ๓ใ„v<_ายGom€#๖hฮ_šแaKู-[0็wธํฅบร9๋าŽ๑สฒw<<!ธๆผฐ{้ฑ์Œ็.y<–)์ฺS^่ฒƒ}ฏธๆํย๙‹๓ๆลY>=๔ะVเ|@= วe!64ฏฑV๏ ว‹๎3ๆ]ZึO!ถีฺ๛ว๐‹†X' p‹N—œษ#“BฯหAD๒ส2 ๊pฬแ๙ฆฯค ŽฯภZ5/˜D7“c‚Kงˆ•Hจ†8๔งk’%ืK"‰ษ($›tฮ‘Œ>สลsฏœV<ป่ไโฑ๙'ำๆžR๖iล-ณO“๛ำ_:ฉ˜ต๐$IJ‘ค2+“nh”~ฮ;[>๓ฅืฮo^ณิใG[p๛ƒ‹R๏ฟ‰iำฮQsป‰ํgl•ั‰Eg‡vฟ;†8>ฤiŒœƒBƒ kผoด0n€R ู’3Rฉ…ผเตะ๖šC แ่C 1QœzX็‹ Aแ€S ๑๏ŒzhตทิCซ…ฮ;9๊aI แฤซZ-็=๋a๋p^ก…ภ9๔ฏJGB G[วRnธ๘ถ‹ธ๊pบธ้—฿w†ภ98WzหCXhoบ8๗ุ]}๎๏‹ฯหP7…ใ08„wหxœ@โไ์ฺ~ฟœแ\#๐ฝฦe๏8 ฉ๛šฃ+ฎฝ๓ขว๏lถ”‡๏ี๓uzฝLฐGฉป–ชหเ<:๏ƒ7Vแ|‹}}(œŸิnnŽหp>ฎฟ๗vฮีาม ฅC์ค ๕ฬA’๑t–]vูb,็H‘ยฝมc…qIต์S’Ouภใ๐7uƒโgแs\โ}๘$ตบHฮYŠ[|=_๚+?*=•ธ†ษ(K8Yาษ„rkฟ'่‰'J E๒‰$ษ่Ÿ9ฃธ๙ูำ‹;ž?ตธkฮฉ๒:Vผ‰gœr ง(:>๏™…'4&รQส< œ_xั„๘=๚ธg๊IฃZึn>๓ท!ึคmB\โ˜aฝ":F๕P4ˆ ฉMช…qO9เฺhก๔—=ŒŸล–|žืBวs^ ูk๎ต๎pฏ่กัB\ดคZ-|๊ๅขz-คฆดP๔ะj!๚๏ณถ็[lนQฅ"Fปฌ}ด๔pฌj!Wกม|/พๅ"๕่ข_{Zuบ๔–?pฃŠ›ŸAะวdpผฮj๎;g฿๙ฤ”v–ฑิฑ:ี~B{ํเฮyq้๛Vืœ฿โdxgo>Wฯ!๘}q๗ป๛Žื,,ฏ˜ใZ9~/|o#็s^˜ฟKrhญหฺ;ฅฅCกหๅซฃƒLL ฮLร}ูม{๒7]if<A็H“ฺXฎษกFxฏฎW“Ÿƒฯa_;’W&ฅธ€„Nสตk~H2Š~๑%7ˆ34็•3%ก„;„”0~ {B ๙D"zSgHiึ้’œ"จโผ๎S–~"EBหVYป”x8?(–ฯ๚ธ{jK=็oC+MˆๅอเŽ•;”พMo๏าฉz๛๓*ตาCฃ…€๓จ‡V m5ีBฃ‡๒šีBฒdk!aพวด0๊กัB6๔ะj!๏C๋ฌNyบOญฮ}๕์จ‡V q?๋a๋p^ฅ…ˆเ|ุดฐ[๔pฌjกฌD @Xทv)iW‡sๆC7ษ-K ๗็(ีFูตNtฺ=็ะ9Ž.:‚€ฺ ‡”šฃ„ฝ๘(i็4vํ—r๕ีtฬ,,ทุ‹st๊=็ศชนE}@ห฿C๔R้๚pม๙๓ฮ๑ฟอTด็๊a8พไย6ะ{รq”wไhii?`šŽŸŸfbชฦ_Cœbต,ภƒKHe‡ก|“uฏ8?L(้z35ฎSt‚7{3Y.Š็˜ โ6<ห8ัขgเ<ส9‘>โIRšษ๒utАŒ^๕ไ™11ฝ‰ณŠK?K๎เq.Y$ข}J?:†ๅdt@8Ÿ|แโฅโฮ{ทีUj˜ธ๙ฐ^}ทƒษ่zKมพ1ฤ*!‰ูY>Jฺ Wช‡%-ไภ7ฏ…tอ1 ฮk![ˆRZจภkZh๕Z€ฆZ คพ!ฌ^3ณOญยIงZ-”~๖ฌ‡-ม๙ๆ[lXฉ…ˆ5€๓แิยnัร1บง]ืฬqเ-}ๆืžึ(o‡ƒn ็ย1kยฬ*ตฆ็ร๙l็˜า๘œ4{ฮน"mึฃฅพ๏ธs<|Oึ5_ `ˆ๏‰฿ํำ'ฌำE็๗Šˆpาž๋+.8vฮ ๑b‡ า๊*ต&= ว๖ˆขo•ฺq๚:๘uญด4๏า๒๗G๔vฉัาาึจ-[AU^ศขธ™(ขGRงsยpL9D ็ฐฤฮ%‘eBสแFผD๗ศ๔œ‹S„ณ=’J™%ืฝ˜LŒd”ษ$\!8Atƒx‹ค‰(Rั‹;ป˜<}’๒5ฺ’O๔bJ2:†๚,๓฿^ธŸ€B*๎ผ๛ธ–เผพrJ ่๗ฐห2ฤง๔j๊sœถ™าะCjกhรmI กoช‡%-4zุค…\‘F-ไนfkEฏiaิCฃ…pปฉ‡V ญz-คZ-ด๐nตPึUf=lฮ7จิBฤšใs๋z8–ต๎9@N9Kล5G๙๛M็๗vœ` ร1W]๛อ:8-@ฒ็œ ษ>s@9ฃงเ๛ฤu*ปฌšƒsฎ฿d:ป~Oึ5็ส9 ไŒYฦI' ำEgYปภ๙๊;oฮŸy๙SqP‹pญั)-ํ๏,1ฟ"žqKˆ›ฒ1‘b$w๔†ไR’าSฟูืgษ]พpคฅ๋iW…dTœ ฌฒk‹8ˆ“‰แ๕œsอ’ฤ€ฃs้+งไƒ )ะ gœ]\๐่คโGฤ็™ "มEฏ%๚R™ŒŽ…~หvเ‚ ๗‘าืT~๗ฤั†๓ง;(wีศ{ฮGZ Z-D™{ิCง…^ฉ…2\Ž{ึญ*่๗ข–๔Pตe่ิรVดีCิCซ…xฬ’bาธำรœฆแ|ณ-ึฏิBDภ๙จ๋aึยขแ‚Hƒ฿0ฅะฎ=็€qqอต“ฺ˜ฮ^sย๙\’„s๖›?1wA1ใ๙๙ลt^‚s™ฎยฌฦ^r๎7ฎน๔›;ื<ีo๎หS€.%๎Zึฮuicา๓™ฯฝ^ด๖p-๊ว๗ีB›xˆ‰TH"ใ*5”ถky{,ั๔{สูSวyšภฒd=:s…tId9( +พฐ†IiˆžHFuฺ-๎ณe—pฮัC‰d’มd”’Q$HBฯzhRq๚ƒภI๗%งp’๐y่แฤz"ูŒฟ ็Kžž˜8็็n/้Mลmwjดแ|~วŠ]”d8Y-ไjดจ‡z_4zhgjhŸyิCฏ…๊ž—ดๅ๐—ะB ื#zH-ฤz4๊กืB๋ˆ[0งz-คZ-G=ZุœฏWฉ…ˆ5ึ\aดแ|ิ๕0ka!.นzธGฎ@.ำฦuwx#ธาK&“๋ฎs€']` >xŒ2vก‘็ๆ ค[—ฝgเeๆvน๖ใหบ4uฮ ็ œ๓"†uฬSๅํวt้9G_{†๓~แ๑ูsใ๗๋c๚รyGดด_โY€;ไqz;K/9•มืํN`$งœzŒ๓9ษ'#$งLFน{’ับ9Xฤ]ๅผ/ฅ•Kn~ห‹'ว>Kธ;ถl“ษ(nู_ŽdŽ9O$ขง<ะ&ฅHF๑:#$ฐ๘๖=่เบรyGด4 ๐h&™p ำ.ŸKwœIฉ้U—žL๋!yล็b]J7Ÿ:Fnฅ๗%๎HRีๅจอdu‰ศŒ%EI;ยฝัุ๋ห!Hฮ1ภศ๎3็คbฎbฒ้!ม!H6๐^L?FIง\L*ฦ๗๛ิ1Y€œๆทปษ๗”Šฟyิhร๙RE†๓Z๊กuวSZ(€n๕Zˆ!›ะCj!ืฉึkค‡^ กwQพ๘ฺนQญrbปœi]t๊กีย”์ฉ‡cA ๓M7dฅ"V}8_*ka๗2ฅๅ๋!„๓‚์1_เสณ”O(˜ร-ˆสxถฬํcภ;แบl้๛ึชd9r–ฒร]W‡๏ณำ์ํ๐K8F‹ฏ’N๖Yr‡/’R$กH Q‰๗rฯ/^ณ‘˜pr๏9œ%&ดt๑น๘PŠ(%คsNฯp^็็vq๐R๑็;U8๏–ศz8ฤ‹•pฤฉ‡^ ,{ผS กะCฏ…pฮkค‡^ ้œKทัBwnต๏ฑz8อ ะด๐Mณzศ็Xฺ.[,ฦ€ถ็๋Tj!b๕5—/ฒf- œ ็ึfY;งฒs๘a๛๖1` Z;87=ไR^Žพsฬ^ZŸฆpฮ ฎlนํ1'œฯrโๆฬ_˜vฮu8œ๔ž?;#kaฮx๊y๙฿c*๖:ฐp1ศฯ<Šษจ&ข-ร9๗sU’Kฌยc$ค่ลdr’UqŠะcษJ$กu @‚[ซp๊p Œ (A๔Qขฯ‰ง์๖]’า—ฮ“[๖Z"้„;„ฤ็ณผdฝ’<‡k†่*Ygษปํุ๕หaH่๕DY}†๓4œO๚ํx๙o”Š›๎๘Y†๓ฌ‡ัCฏ…u๊ ท^ฐbˆzH-ฤ-€zXC-ฤNs๊กีB\ธคZ-ฤ๛จ‡ธO-hSญZ=ดZ งz-์• ๘‚๓ฏ8ฏาBD†๓ฌ…ฅใ่-oฮqภญต{อ˜ต`พฌ€—œ@NืœAp[‡ƒSำ๑สฅค]]tqอu ƒำApฉ6ย๖œฤ>๘YฆŒรนฝ า4ำโŸ~0kกƒ๓๛œO7›l์™แ<ร๙hฯ!ZN^u*ป”cย๑AB‰ฤIฉuŠธ–Žห9‘”สQพ‰ไ ำ‰ๆธ-H๊^ุ(๏ยd.ส4‘H2ฑ„cฤiํ๒w„คŽ6’U8ฮEโ w็3‘ๅฐ$8HH0‘˜โ–ฮ:‚ฮžGbหs๐ณ‘P!A–^K8Fpะ_ณvแ„๓ณ.ุYพ๏T้๖C3œg=ฒŠๆqK…ืBปืBีMัCฏ…x/๕PืƒE-์B=๔ZˆO%=T-ฤ฿B=ดZˆ ‹ิCซ…VฝRญZ=,iกฎงฃf8pพฺู•ZˆXmอf8ฯZุ—๓`*{ˆ–ฮ}๊~q…้ณืzฎ[ฦ๕i„s๖œพsฤLsกัm‡…d2ภ<๎A`œรA'œใ< @šฝๆถคฝj…š ็๓๖ํ=ใ๑ปฬเ'๋๏Bไ๐-7฿๛ไ๓ฅ B6๖8 รy†๓ัvŠPฦbะฮ\ $”ถ—‰*ืฑ/‘ŒยUBฏ%{+mo ’ะšภ9J8=H(‘lbเ‘$„pฯ็_ คี:pŽเ๊  $o๑๘,|AžŽ๗ูฯ‰ืl(’ุู‹N•Ÿ%Sแe8Oภ๙Nฉธ๑๖C2œg=ฒ „ซVj!sซ…บZฒI 1ฤŒzX8ทZˆ๛Vฉ…ะMซ‡ิB_$ Hœๆžู(;DŠภฉ็ &7nป่ภเ#ฎL#dใ> I)Iภ)Z|…$กฒƒIHDๅoBผrIใo|ฝQŠ๗ใ$งt”ภโ5บK๘<$O,คร„๛HV๑>้?Ÿ+ษพ–ว๖ย๙vแŒ๓$ ~*๐ื ฮณvFฝย%Oi!tz่ต€N=๔Zุฅzhต๚SาCภyะBh๕ฐค…ธ๘@=4ZํฃZ-ดz˜าB„ีBY้f๕ฐGดฐ=8Wฉ…ˆีึX.รyึยกร|@N#วs'ืู5U,sท=ิ@้ ˜‡ภม!gˆn:ๆšI๊vล;ฎUรw‡sฎฟ`.5kzรŠKล…อA วํฝ0 ? ƒแย7f!;่%ฟ{ฬร๙]?W7๋ฅd์บ†๓ ็uu˜ด<“N“’Q:็่ลDBGื•Xq๓โo๚เIB>๔บFฒ†ไฎ “QN"–aG!ฑdYy„๓X#๙DŸ%#89H.eŠ๐ผณๅ–pศ0>wr#9…Kn$บqฐž วxn“S–€"…{Dื๑๓ฤลGขไก๛“ว.œ๏+|L๙๋AฮณvNญโ1๔ะiaฌ"๒Zˆ5jิCฏ…]จ‡^ 1ฎค‡ช…m๊aI ๑7Sญ๊ZJ้7ZˆฯฃZ-คณณZศช"ัรายvเหฮซดฑj†๓ฌ…8โ>๏๙๓bต/อžkœ`ย&{ต้rš\ซไดs…๕n‚s@9ืรม๙็p6บๅœุŽ๏ELR‡sฎpรโLษ;Wณ๑;โ๐8SQ@Hk6*" ‡Ÿ(gผ9ฏcฮ๏|์นโg^Jฦ.๛e8ฯpฎQ์)็0$;ม8„ธE€s”"CRวจ&{}YาWฝrILH‘pJ)%JO๑<ฬ$4žวธ ษฆ4 ]ฟy.$ฉ๒ๅ˜H&ร9tใ%ยฯ@bL'ฮั4ํนd )๛~๑Yšึm2~งเด๓ท—d=7ถ†๓ฌ‡รฃ…ะ<๖ž@:^ซะยnืCh!‡มY-ฤsิร’พqMิร&-K์ยฒ๒ข๔ะj!nญชา™‡Z-ไ}ัC๖ ;-Kzุ€๓5+ตฑ๊หf8ฯZุ‘๎,€ะ8ืนโœ๊๛ฃพc xบไ)0ทซโ8่ฯใ8@นูqฮž|ญv9]m<‡๏„@nศู๏pพ๙;\Ž๛ีํ@:นะ@Ÿ๚ฬ{ฬ,s๓ั[วœuฦ์โžง_Lฦ๘ ็ฮ{&9ฅ;nc’ŠกG”„ษฤHŠ8‰z+}ขึญGฃ%ํH<18ษ ธ!แ„{ƒฤ0๖ใ€๛ฅIk+eฉq’s|G,s ฿_œ๐l๚S‘๔"ฆ{ฤO$คv2[]s๖ฯฯTHวcNnงใOื\ๆ”-œ'๕๖้ๅป‚ำM?฿!. เ็มนศsu[iBผ™]๔็gฏ?๓ฐ|๖›$`Žจcฉ{;p~๋๔g‹ปžš—ŒŸ์;ก-8วž๑ื†xDo฿YqC<โั{™็ ๑`ˆฉ!. ๑ฯ๚Cผโn2œ็cเค ษ'’P8GJ$Xp=ธ็N\"ิํ gษh,ใ I ส*ลา;#1Ebศ„TีvพC$ฃ๘Ž่ขใ{ิช๎Eๆt{$ษ่๙„{D'I)ส9œ ฏI้ฌ‚๙Xƒ๓ฯaฉ/ีฦUทแ<๋แ๐h!zศsj!๚าtอv๓ฟMซ…ข‡Zzตฮธ๊แฐiกีC๎ˆG/น๊กีB9๕ะjก๎FวœoเผJ Ÿศpžตp8=@_,้ึRnธทD๖@ใ9GSฮญว3œ ฬ)๓tััSp็4๖N=๘Žๅถ๗œŽ= ฟSฦง88P/บ๘๚ผฌX›๕hร=ื๏ฌม๙Mฯ*n{โ…dx๏ƒฺ…๓# ธ qDโœท†˜m๑ท!๎ ฑ’พ๖๙oำ๛G๐ ็๗ู9๏แไD'Dว>๗ท!โ๓p„้vฐ„ษQ—NัejาFขyCL@ู+่ฝAC<฿nB“R%฿'w#‡[qŽBRฯ>ฬ†ีpŽPยษ)วHF๑œฅุ็Ž[”ŒŽ!8?œ$AOลe7˜แ<๋aใ฿–Fว>“%ฝโ฿5+dœvฃฆดะ^ฌŒZˆRvีรaำB๊กีB\ก-„ฃO=ฤmิBNฦW=KpลMืฌิBฤ*ซ/—แ<็†ัIํœ?pcMฯณ<฿”Fw€D„ฌcX๊ภดn<์ช3{๎5แม>{้/ เทzHu@?—“๏ๅ็ภŸŸ]๙ ร9W8— #แ๛ฦญ|๗บnm,ม๙”Ÿ.ุœd์ฐืํย9๐e๔2xœ8gฝW›ว{#็mโœ ็ฮKH5Jฯ#โŠ/ืแGR–ุe‡M@™ยก–2อ%บฯI(B‡5! E&œ$d„ฮN}งH&ู+ฉ{”e 1๛4๑]†$ษ1 !Pพ)=กแ๗G€ภื1็วณcำฎdฦ%ฮแฒœณW:1%ผtฑฬha7๊กืB๊a“โ"ฅ๊แpiaIนS>ภ{ิCฃ…€q๊aI ๑฿ู่แX‚๓/l:ฎR +g8ฯนแ0ภ๙๋w^ู€rL Qwฌ๙R@Œฅํ]ZาnW–ฑ฿|Ž–ถร)็d๚'t(๛ล ล๔๘฿้‘ฟศ็๓w k.฿'†ฬi'รs0เ^‘I๎cะ9ฟ๎ง‹งฯIฦ๗8? ŸobปA|‹๎๑ผฤ9[†8ล.ผ)ฏRหz8ฒZ๕0ก…จ‡^ ฉ‡^ ญงส๗gตpe=$ฐ๋๏#z่ดp,๊!เs_Wฉ…ˆ•Vหpžsร๔!Fโเ๎็J]-ึ`ฮตot:ไ์f9ปฌ‚c๏w‹ๅC๚f!เApq‡z๘๎๐ณึ๑]ชำ.ฟรณ3สํ!ฦhnx๓•๗ฮ,ฎไนdlทว:็แธ.v"6mฮทJภ๙Dwฮพฺs7๚๘๏BผK๏ 13ฤ;2œ๗RBฺaืผ%8g%)ืญpnƒ“!G7DPวญ๔["QE‚jคแ๚.๑า5ากSารŠฤ฿3^ว}&ฐบOyฌย๙ฏœ฿ฯTLฮpž๕ะภ๙ˆ<;uZุMzH(‡๎Y-ด=ๅV eE๕ะkแ0ํi๕จ‡V ๑<ฟgNj็n๙1จ‡„๓*-ผ'รyึBย๙N๎Žฮน–ฝำๅํฆ•iœ,ฯตistU‚๋เๆ'ทฒ฿\'ฅฮqb8Žาะ9žใฯF?ซt๘žธๅcฮ/p~ํรฯ%ใ๛-ภy1ฬeํแุ&ฤ_BผฝŸŸ๓‡ke8ฯG{IP$I&q๊šC2I(’L›Œ2!ต๗ฃ;„Dt๑r.ฆK ๅ0%ฃ•฿+!$จHNq‹วฮปดท$เgจธใ๙S“qฮณŽ’&ดฐ[๔p -ดตm>ฏ_ีฌ…#ฅ‡^ ฉ‡=ค…ํภ๙gฟ2ฎR ฯpžตpค/>~wœำๅํ&8'ˆภูOŽว tง8ws]๗œหŠณx] Yกx$ย8[b‰ปYง†‹ZH8ฟ๘ž'Š+x6๎ถปp~”wdโœท…˜by3nๅขoŠ๛!ใ๓ ’ฃฆ‡x“แk ็่`๏ภฒห.›Uq4’Q;™XK;K็ก.ˆ๖FวดZ๒)„(cDt0…หใง ็ํktŒb9ง๎E%๚cŸ%]#๎(วsเ†ษย๚7,™wถDึษีฮ:๓วลmฯ–Œs๘ำถเ\ห’๎ ฑฤ_ฝิซ ๊ิ/t[š๕p”๕0ก…VE•ิCฏ…€M๊แkกwฯ…ส&-คZ-tzตฯ!๒ัq8ฬWึชิBฤวฺ„๓บ๊aึยั‡๓่์bˆYขฌ@ฬ^nฤฯพTL}ๆEqฒ1ภ๋g^Šำ;uฐูฯmม›ไ)N@งปฮตfœฌŽ็เd‹k›ฒrTH8Jฮu0พ^pถปฮ/ผ๛‰โา๛ŸMฦ๏ฺ6œฟ+ฤ๕บJํzt8โ s&!ึฉํ๛š็ี’๕าสดplก ฟ3ฤWŠsc "Jข5F$5โzœฅ๓|โwbษถ–ว๒ํa€sI.ตg2บ?L6ตt=๖Vธc้๚ }}8Ÿ.Q8”$9e๘@pŽiรภy9Ym ฮ?ใวลอฯžžŒณง.œ<ฤG}iึaจx^ฮซู-ใZH—jaBฃฒฺkแ0ภyI Ušดฯy-ิqข‡V ก}ช‡%-ฮฝB#ณถ็Ÿ๒Z•Zˆ๘ุชlฮkฏ‡97์@ธJ0เฦฦฯšW‰๘ณฑ{;„=ๆฮ~ฉธ็้e`ืUฮ..พoVqแดg† ฮyA}ไpฬg๋>q<๘ฦ๙ ๅ๛|uาq!ฝ฿บ_œ{ศแ`  cp๎ตษ๙ˆฤ€ฝฌ…ญร๙๙w<&๛Iลwvูฏ-8๏•ศ<เ•mFw‰'!Jฑฟ–e‰ฬภ๋HnC$avศ/qM„๊่1)ตq“V‚ผฝOgˆ็"กไ`"œƒ„”‰/๎ใL9ฦ฿‚@ขสdCr2ฺ6œv๚‹?อ:=gถ ็EE฿Oขw่j๔e8ฯZ˜*aZ่๖ZˆUaธ๏ตSว‡K ฉc^ ‹ -ดzhต;qพีB<ฆ-ณzH-ฬp6œ{€๓*-D|ดM8๏=ฬนแ(ภ9Bะษzตา'ิ@ฑ* pท/ฯ8Ÿ<๕้โŒŸ”8็ฮ™q €บS~.>`@gฏ9\q‚๘\uื ว๓โž0็คtผ‡ฅ๎าž็J3)Aว 4Thษ9'จ#๐Eฯp8?๗๖๒ฟกT|{|†๓ ็c!!Eฒ้R$œt…˜rw-^C'n๕9yฮ&ํ2y“ใLJํbyณ จOX—ธตj8Ÿ+…ย๏'ซzPšฉฦ์฿ŒN=žGš‚s$ญฦeฯ>k32œg-Œm ;tŽz่ตšวืญฒgc ็piกีCซ…|œาBื{.ฟฟฃีBmjาBผ‡ีEV qŸ<วxะPแ฿พดVฅ">เ–xE=ฬนaQrฒ‡๕็^œ๚ื'ไ1ฮCภ๎ฤมฒv”ฯcะ MgI;^Œ#dB;Ž˜?/;แ=๋์gŸฝ์DWP.•ฮq๕ปฏ.Eึยมร๙YทM/ฮฝ๛ฉdืO๖อpžแผwาช;q V฿ุ„?๖}c๚ๆวKDทI*๎฿ถw๑ๆŸwkœว(€ป$qํ&คH‘่ม‰1=“ฅ’L?} ชพŸฅ ft๐ุธQฑVเN8 šฯมBBŠD4$›p‚ผ๖Gล›ืธo๏0’Q&คHFCˆc„฿[{ุ‡<}—}>!e?$ห/kภฑdS{ฮK%๏H<รg-™sz_h/)๎GGˆษ4ug>ษ(๚3cRj๛1ณฮK=ๅ%wœแs฿wA@—uk,{ื๎\{†๛าoŽส๔ฃs๕Yx,ะ-พๅข ฿~Yึย!ภ๙ษ7?"||}ง}2œg8๏}8OžหI$ฅHF‘p^น}๑ๆี;4\“UIH๐LHH`๑š<้ปœห &‚/’ำิŽr๋ัมฑำุ้๘ฐ‘โwbยฬ๏๐ฤR<วื๐˜˜๕w‰ข๐œInณฮ๗;y็โ๒'ฮJฦqื6\pพฒ€4#„หpฏB๏p๋ดPs่กืB€๛”;ฃ…Eัฌ…ๆ”Snแj!_๓Z ฃZ-Dy?๕ะj!~ซ‡ิBzนEER†๓Aร๙ฮซดฑโ๐มym๔0็†ร็ฺK>ะมlN?'ภฬgXฦ-s ‡C);œN–ทใฝ็่*œใณ์3'p๓h*]ืืฯฬyNM“>w~จK)<]v8้Xyฦ๒vธ่\3งxว#@9‘ตp๐p~ยŸ– :ฉุjงฝ3œg8คtT~6’PธBLH‘h"ต€Ž~tฎ ‰ฉ$ค6)EBหqCIF ษtตูฯH๗œ’ถฬ“S†q๛ห้0้๓qf$ค\›ฤ=ผ์#ีS9‡๋•๘:ุЉฮY€†๓}NIq๑cg'ใืWึ๎ด๖อB<โตณแ™ื๖ีฉฤXดq7 pึร‘ํ;Oj!หิฝRูN-๔€nดp(z“าIงR y๋ต๗ ๔V ูVฉ…Œ‡ฟวj!๗‘๓]ฐŽแpุK}=O คฃดท€jNX์—Ÿ{/ศ}@4{ลKน:ใœภ๎a]สีQ^€ใ๘ฌู ่)Œ‡๓ธ /n:œt€9w”‡[้9ว›Gา(iW็ตฯอZ8H8ŸxใCล ท<žŒอ”แ<รy7$คZ29œษgฉW’ธบเ’`ผQฮy๙%›HฬจฉRž'Ÿs๒!‰kูั" 3)e้9’R ็61ตษ(ส/mษ'{วัkNwœAทˆ?‰'~wpos้(๑wD่ฆ,ภร๙'Œ/&OŸ”Œฃฏ๘YGœ๓บGึC็žฃฦJก”"RZยka ะฝชถ;แ|ฏ…`่กืB่ŸฝO-˜Sญย้ถzH-ไnwซ‡„t๊กีBฮ-ฑฟใิยvเ|ืฉิBฤ‡Viฮณ๖œ็ไv)ัฦะ7]#&ฮp€P:•ฅ฿€์™sD@0ใ9ธะxฬs์งFY;@ู'่ทrเ|ฌeรภ9ภ9‹็เโ—เ\กผไ”ร๙Vง\œokŽs์ 8~๎Iว฿K=๋Xฝฦวtเร็Kฯ๙7‹oปธxํๆษลซS&ฏtพภ๙ซืž–ตpp~๔(&<#›๎ธW†๓ ็ฃœ”"*v์s šˆT2JwI%sํ=—Dำ–tฒผI3’1$gt4ม‹5‘๐wร{แฤ0)ฅcm XK๖@š)๊ฑl๗๒ฆ„Sz-ต”SW{Aๅg QฅฮuI๘,pEQ^,ะกJ๒~ณ/> ๐ภpพ๛Ž/.xtR2~žแ<๋a?z8,Zhx<ร!Oi!ยk!4 zhตฐหกr-jaิCณ๎2บิะ)ง…qr:‚ZH็๊!ตฅ่ิCซ…lูแ๔zณ&3ฉ…x ุดZ8ิR1็_\งR ฮณ6A:WuะEg9แ<‚นN-งX–{ฤนสŒำุgjุ๛(wXฃ+ื๎q๘Z๘์|]Oอ“’v”ฮฃ<~ฆN็z4๎,gŽ6.((Wง\๎ซ๓ภฯGœ็โร9Rขพู}ฬรโ sJ=‡ฯษถาo@Ÿv8ๆ€๓W.?พxๅส%^ฝฬฌ…-ย๙ฯoธฟ๘ีMำ“๑ี๖ฬpžแผ ’Qƒzฏ:%คM`ฎ“ืcI&\u<6N‘ภ9s0œt&it“ู‡ˆฤฎืUNฅ฿๋tŠผ-ถŒœษ#K=9Aง ณ๔‰)n9…X๗๐ฦไI4~†~VLJ๘ž˜t๓I)2uˆbฮฟซฺฒ๗ ็ใ_LzxR2~vY†๓ฌ‡ีz8Z/Tz-„>๊…ส’^๚Fx-„Fุสj!ด‹ns‹Z(ฟฃืBบืBซ‡V ญ&ด0๊!>ZH=4Zุก ะB^€เTR ณถ็๋8ฏาBฤ๒ฮณZˆV(J‰ป๒V ็ON็3†ม)ุฒฤ{N6Ÿฃ=ฺ๔ง^x9‚9œํ™๊ค{wท€t cO7@y ƒ€๗ฮนšM 9^ฮ๕–9[๊Žื1ศMƒž~0 ๙œ้x>๎-วภท|{๏น3ฟฟ”ถc ๔[.jบ‚9‚ žตฐ8?์š{‹#๐H26ู~ ็ฮ๋™˜J8PBŠไ ซาๆ€q๊ฎ่L:ไ๊ฆววHP้แ3m๙#‚ฝ‰xM๗ #ฤ=ข ฤ2I8{YjŽ`ูคb>ึ!Eqw/BืฅqP'ำ1ง‰Iณ๋dbฯ้๕ถดV'ว฿ู’หษ่€pพ๓qใ‹ณš”Œร.อpž๕ฐ3ฒoาBBนีB๖•{-„‹Žฯ๕Zฝ‚fx-ฤchHB ฃ{nตŸฯ๓Zh๔ฐค…8จ‡^ q>‡นY-Hญฒ๏œzH-ฬzุ2œฏ…u*ต๑ม•3œg-์ิ[,o—s€s;…pฮ’v๖oร%ฮแ sˆฺฌ„‹NhBเธœใ~tฑรฯ@Yธa8ืq |,<วฒvฎ.0H๘ฦk8ุo.็่rพNX็{#œ?~wำwภท%๕qŠ๛}Sไoxๅ๊S0๗๗๙8ร๙€p~๐U๗?ปแแdlผ]†๓ ็u†๓~zผ%DbeW0!erŠฤษ&’N:C:K์งค[wœƒได]’T&rZ2*eก-ภySอไฯฑ)๏เfoeฑทR“QsNAFbสไึญ’ŸGืถpส1งs?wฬญฉ๔wuฉzฮw๓E—[,œ|Tฑ่ขฃฬัwNH—๛ฺ9๐=!zฮ๗ฟlj1แš“๑น๏ํžแ<รyฝ“า?•v˜M38&ž๖–‰)n‘ด^?ล›m(๓ฤ{t—๘โ1'๓‚€บR’ฐโoฃkn.0D๗‰0ฯาP๖cG=—ฅRO{๎รub`็ศอ>s๖˜‹{_.Z$เ\xภพ:๕tฮ ่2 nส$ RๆฎpŽ[พŽฒwพ'๙฿ๅๅsๅปPw^ขKืตต็๛\rOqภU$ใ?gท ็ฮว`Bสไ“IงuีLJ‘Tšฉลoœ๗โ๕ณท.8็ล›ฟ๛n#Iต๎;๛ื๑^$ฎ์_ว๛นชˆN;K%™ภฑทัNM'่ฒฬ“I'๗‘)วtš@2IDขสืน:HหH›ชคv~ž๘”|TzOœวry|7(‰upws1ฃWเ|ป_/ŽฝwR2๖น(รyึรQะB:แ^ ^ ํ…L;มขmE›ด๏มน^ ฉ‡^ ก ะCฏ…x.ฅ…lg๒Zˆ ิCซ…ะM{‘ำiกืรxัิB5งน๓~•๚๗$เ<อœgตะภนืย^ะCภ๙ฮซด๑•2œg-์ฮ๛)U๔gMkฒฌ=Tำ฿uบ่ถผฮRw ่‡@'œรMg?9žวs8ะŽŸ!ฮท)7็p6|.ณ‰{พHืŸฒโ:qžฮwqใQสg•Kู๑ท๙X๓’kญ-๙๑"€]ง†5j7O–HนtะSpN'ˆKปƒs žใ.u๖ด๗ œ๏vั]ล^——ŒO๗ฎmมy8– qmˆG๔๖็}QืK>b/๓A!žqทฦ&ๆตฝ๕|ผ๏ ฮsR:ดฯbฯน-ลไc_โŽ[B<LŸT†็ธ`IH%)ลktš็p=ยcœ‡)1ฑๅฯC‰<#$jL@YVฮ็9”ศ๖="มใช!&|v'/]lWข“cหCั“iฟ# แพMg—sŒ $ฟi|ํm•๒V†ƒ๓ุ็OงŽ7pห„4|_๑jj๘.ๅขพS€€~ฏu€๓๏3พ8fฺคd์ya†๓ฌ‡ญkaปz(Zศ •^ ูสใตฝโy่กืBผืBVY?z่ตบ‘าB„ืBN]g%ีB๊!t;uฃ‡๑5๚ฆ๏ะ่aิV{AญฅZ-tpต:h{๛ œSฉ…%= ็ืฮW:•Zˆศpžตฐ8๏ค—เ\{มFuR{„t3 Ž:sฎ$›c๚ะ้ขw”ณ้tำq XŸก{ำ9๐mฎฎnร-]n|>ื—ถวuiฺSฮwqอี=gนปฮผทแด๓<\ P8็ะ=@1N{ุงพs๋–Kฯ9\rะ€kงe๎xovค'เ\ช#ิมจ‹Cฮลบ6~ถt:๋๘l|ี…‚n„๓๑ฟปณุ’{“๑o฿ฅ]8?’ฐG$ฮykˆ้ะๆโž+8฿-๑ž•๔ผฟ ฑผพญฮsR:ธฯ`8A˜N ื Y๗œ„๊๘ฤฤ’ฅ›๊ #$!eขฉ‰ฉ|nx>&ฌ,ดเ9P‰ฝ ฺ›ูิรษdฟ3 w์ฺ]ผthR%t~t=Zœ[˜Q‚v ๎ผu%št€ข{ฎฮW์oๅ[2ห๛ฮ}Im pฮ๗v3œ๗Wใ‹Ÿ฿3)ปNฮpž๕pd.Xฦ5jบ*ญI m{ŽีBปฌายMZhZ}JZˆท|ๅฟu่‡ืBีร&-ฤ๓xยนืCซ…พว๎4็zI ็€os!3>O็œ.ผน˜i๕0jกmQ๒Z่ ์fภ‹พFซเ|คดฐ8_5ภy•"ŸแR€œ๏pํลNNMฦ฿฿.œรี^F๏/ƒว‰sึ qตsฤ๗ฮใ9๚๘j|N†๓1่์Aร99บ,๏ไ.s&ฌฦี—‰%ššXส๛ผ"AฅDว‰ U๘L–ฟห9HF๑~wŽHt‚่`ั)ก“ฅ๎VLL™Xฒ’ะnืŸย9$Žซ‰p.๛ะvEฮ' ๓sRpŽื๔๙ุƒช‰คถ ภ]tสm+[˜ค๓<…o–อพ~๊7‹ล'ฃX|ยฏŸ๑ญฦ๓HL รŸูœ็—ใ‹#๎ž”Œ๑ฮณถฃ‡ฺ3(8ทzhตะํ9Zจะ์ตPt๏uZAŸZˆฟ๘7Kฐ๗Zจ.|“าU๖ZH=๔Zณ1ฉSผ๐H}ฃ†Q'ญ2ธ*์lYป…sผfœvซ…Q฿9)฿]v่?฿[B Eโ6~ท#ค…ํภ๙'cJ-Dผ/รyึยกบYนึ๒๛ฑซ[dNn@g9tuั=ฌs`]t€3 œทึMว-|บบ่œๆ 'ภsˆzา\Ÿf]z8๘tุใฐ7&AWหอ้œ |c79ฟ+uคลฦw ฐnหวeXแYKหm$แ๊Sd0!@อuu๒3Pท๐šบใฮmษ| ฮร๏„๗๒w$Œว฿ ŸmB.˜ ็Ÿ๛ืbวษ๗$cฝo œŸ„ฯ7ฑ >E๗x^โœ-Cœb;ฤฑฮ15ฤi,‹ว๋!ถ6๏9Ÿ“แ|,'ฅ,yค;2)๖Yr*ฑ--ว”q์ล]๔๛F2†Ÿรฤ็Om ^ I๕ˆ#p7€]* ‰+&$N,๙g‰IฉuŽ๘3”‰ *น}ฤ’œาAbะYbน'{ื๙ฝ1ธฏ—n" V*ŠพRLบjLDq‹ฯๅE n๖;ๅ`=ใ„•\"*ภv|๖๏งcDงN7q‰ย๗)I(ฟO&๚ ใฒwœ๋่๑ล!wž“Œ_px†๓ฌ‡ํiแ ๕0uZศ๐’โu฿ธียx1ฮiaผ€I-T˜Œ~ฝR-šhาBn•ฺ ,}ง&z-ค:-Œz่็hPฦ ๘NSU@Vํ–ฐณฒสV.XญT=ดZศู'๒}Ž ถ็+8ฏาBฤ{?žแ€ƒK6m;เIฆํr.ขษฯb"†ค“€ญL๚ิโใธ/]%Ižฌ๎วาN ่ผoŸฤ1aๅkœvlห=,ต฿ K<้dUiฌไWขูึบtฆฬดิ‹๊K1=V&คถด•๏็wมdU‡Nล2N-ก-9E]^ึอ_Œ/&qN2v8?รyึร6ดะ;ทCำ๚ัยR ปีB-ล๖Z(n:~žำยxqำia„้”าฝRญZ ๑ZHอŒ๒{ฒZˆ็จ‡N ๅ{ชาB|†-นgๅAชw^ฌค๒พทaZtข“ฮ๏ย่aI ฬฮGP ๓•>ปNฅ"2œg-ฒ‹๎]ืฉ๕ปR€štIuขc6๚า9%ใ*แ`ะๆ7฿Aบ…u93๐<๓Zา>]u<วs๑l„"{ะu ;ืฌล’}€z๘]Kฎนบ่qสY๖ށr , ฌ๋dx?ธ=ไโbk™บ|๛๙{˜ž๗๊ธะa =VBpW=~็๐น2T.Dผธ`ก8์q@]€๙nีBย๙6gRl{๎ษX๛ฟvีฒvwE.kฯว€ษฉu์}—H•œ๓ฤ”๖่–Lแ€  Iข<‡าF~>JCb'๊j){,ฏไฐ3๋‡„๋ตcถ(^;z๓b๑๑_๏sŽดO0–{๚u็,'ตŽ4]k b’ส’TŸPฒ๔ฏู5i,e'œs€K`5ู-๕L๚)ฮึ9ท ฉ_[วื,œ{7eœšฐณด‚9๏ิัœouิ๘bฟฮIฦv็f8ฯzุA=๔nzB KzhตฮYขmดPtŽๅF ฉCV UฝB๋ ‡^ ฉ‡MZ่๕ŽบcืTZ-tzตะ‘๓รๅจ‡N Kpn{ญSnpjีBG๎/้๗ฮSZศJ"ฏ…FG๒*œ3๋Tj!b™e8ฯZุ'ER๑|yN7<–‚sMX€<Xr๎'จใ\€ฑํG็nt?<Pอ๛xพฮžs๖ #็sœOP็ลNp‡ณ=Nq' ๋ไvY•f€ท)XJž2ฉด๎Œเ]๕Y๘พู&`€Qๅ๗ลdš’๘ผNWJŒ๙๓ญcฮuqึทบu”ฌSฤษอ6!eOช“๗8๘H6>W8฿์ศ๑ลทœ“Œ>'รyึรaาB็%ธ4ฅ์ฑ๊‡-:^ กะCง…ะ‹ค๐BD๓RZขI 9ไฬkกํัNญมH mผwฮ5ไ๛ไj7ซ…ๆยlฉ*ศฎ˜ฃZ=L๋ซˆฌ†ฮตฒ@ ยhขฝๅL“:ภ๙GœWi!b้ ็Y ;้ฆ๛k๊ธG˜@ขบๆ๚š”ป‡`4๎sx\ทจฯัถ€N'@mƒ€ฮrwป+ท์Yทฅ๐sต๏œม็่Igน{ s:jV˜๏‚ฝ6ไป4•๖sl๙yฉ : —tะ้œณœ]ม!รไิ —’yถ3จณฮ๘:Mk฿โค?_?ึdฌ๚๕ต ็๏ qฝฎRรํR๚{C\aฮ$ฤร:u}_๓ู!ฆiฯ๙%ึ๗ี๓แฮo<œz82หิ฿๒–ํุุฟ์ฒหf๕์TR๊หyหนZf)‰๔„ˆ;ถํ6MXัs)ฅHHQพ‰ฯœ3มb$'ฏ3‘ใ๚ธ0แ‰’(–ถวžAทฅžแนฆ)ไv฿ญ‡^:26้ณษชKTๅoทำ…นRอน|—v ห_ํz%๋๚Xุถซะ<˜ณ•I(Q†OF5 ลัิฃjคšฌR๛ฺใ‹rN2ถ™4vแ<๋แ0kก-w7ZH็ทI ํฟmฃ…ซ่AwZ(SZศ^oง…๓A›ดะ—ฝS ljมึkกืฏ…ช‡%‡ŸzG-ิˆ฿งีB^ ๔Aพ๖‰๛‹ชvพ†…r{Ÿ{แŠZ-ด1‚Zุœไำ๋Tj!bฌยyึยแt;<N๓pN๗–ปผ29 ฯK฿7วถ๛ะลAื2๗๙ เœดn'พณฎfใ:5„-…็nuผnŸปฯฮQฎ€ฮ5kั๑๗0ย,iฎ=ใ`5\Vrบู/ฮ ˜ฮตค>^๔ฐa๕&87kิ"œ;่Ž.ฝN™Gษ{ฉ~ํiต€๓MOธฑุิ[’ฑสVํมyฯhcพ:ฺ#ษฉuk์04ํŒ. n‘pi2%ฏัIฦ-ฮ[„คษ(๚C‚หA]ฒiฎ๙ฑIi๘ูHช^›ธUร5Bbส„Tงวต8,็๖=‹ถ\าป2Ua†๑wk๚พPขชั๔š:c~`]ฉงา๎ไต ฉŽl้fชฯœ%›Zži"~7ึ5ฃ8าแใ‹|N2พuvvฮณ#จ๙ถšฉ…ฺซิBhื‚-ฤmiฐญ’rซ…ช‡MZจzุค…v/บ‡๑มjก้Sฒ๒"„Zg/Nฆ†ธู ฟVGm){ขช(ฅ…ข‡N ๋ค‡€๓œWi!โ_>š๓ฌ…ร้‰ๅ’{ฮRl›œdฮ2๎ุ_mzฆzรญ8ี˜ไฮ}่Zโ๎Kใž๒E‹โ9ผตไ8;ำ ็sMIปwแน๛ผ็บ#=๖ศ›wนะ`ฎิaj{H‰ป…p๖‹๓พน-A9~ถ ^(pERpฮ=็๖๗‰็ย๏o๗ญื(7ผyใcP|๙ฤ›“๑๑อwฬpžแผ‡R;๔ฬ&ฃt‹$tบ2|ีHR๑>LlG’Š„mษ }ฅํHดุใศI๊,ฃDโ‹ฒN าOœไSz.CTš@nง๎๚2G๋[ง%ๅD3กฅkƒD๚ฝด=ชฃT้Y0ท8ฎยN8๗P๏v(W๋•๖๙Žp๙f'เ|ใŸ/vผ้œd|ใฌ ็Y‡Yฝrฆ„ืB๛๖Zจ:$Z]S-,^นคฏšศjก,aตš‚j"ฏ…v%˜ัย [-ด๚RZ˜rฃฉKCัB^X@…ต‘บ`ตะ๊a ฮS๏gพ๊ ีBย๙hka;pพยฟฏSฉ…ˆ๗d8ฯZ8LG“c^,ƒืกftึญหN(”แ กัAG๏7BA€N‡–พวsC๐u_ ot฿ฏฮฯตp.}็t7 ฮฎWc:Kื[=เฎ‹›ญำ๊Kpm'ฐฮm9{pฮr{ืKnWฉ•.xnc.@ลE„:ภ๙็=ฅุ๘œŒnถC†๓ ็=”Œ&\/ศ๋ธส&$Jฅ+ฌvj8J4‘X")E)8ฤั-า^หธ6ศฏ9ƒำฤตdˆ๐<Nq‡B‚%ฮVญi™"W†•V„ู2nฎ"ธ้๖yหM8‡[ค‰๙ แ<5•;DพœNNMt๗ษ,zศu/‡แปฑ€ง;ืฮ?่๘bปฯIฦ–gd8ฯz8rz5ซB ญ–ข๗ œCฝช6i!wƒ;-„ƒ=lาBฯด“ฝz-๔็y8‡r งm*mฯฐZh็fดข…UpฏC๘ฌโ~7ia;pพ|€๓*-Dผ;รyึยแ‚sธฐ!ใ่จsswฎ่!Wช:Xrืซ5•ธ๋ญuาัŒ็ถ๕ฌ’๘*8็@ธ๘y๘นt:็vzปs_l6เq\ชภ๗!p~ตš‡sวฮao@g~แำG__|vโŸ’ฑโฆ?ฬpžแผวT›ฒ|Sa} ษ–$˜!)ล ก‹'7สู‰AGLN9๎ุE2‹RIL/‰—$ฃ๊‰[dzะ๙8บ [สh‡๙ตl9${-ญษ๏Žƒเฐ๙ ํฉApLFS‰ซ/A ๗ โHะ%๐hเเ๓v๊=ขNpนCv)พ๗‡฿$c๓ำŽศpž๕pฤตP๎ทฉ…ฏฝqyCฦ^ yกณU-ิพ๔–ตะjŸีBซ‡mhกีรา ๆถฝzท๗u{ฌ&0฿ ซ…Ry`ดฒpo๋Tj!โษpžตpไa=NV(lๅ`฿ดธฯบ_,ๅํŽ’้คึร}‚ดธ8—็˜พu†ธแ ใึa—apZ๓่์;ƒ๎ธซ]w‡ีu–ๆ์e .rุกpพผb@\i• Žsฎiณ๊ไ็k)ปฌNEHoฮืŸ9ๆฦdฌ๐ี ็ฮ๓ัRŠฝ็H6”ฯ_|AP๕k๚z8ตw=๎อๅ๐9๎ุีภธEโ ืƒ–>๐<า|ปฉๅvzน=/๖)ฺD;ŠYฦYJFu€\.8Dฯร9๛~๒T๙ป ๖šฦ๏$‘Œึฮ?3aทโปืž—ŒMO:*รyึร๚h!]ตp๑›WŠz-Œรๆผr}c Zศ uโ B K๗ญ๊๛ัBฏ‡qˆž‡s๏ ิฤ๓ตตG€;ก…ฝ็หm๔ษJ-Dผ๋รสpžตฐ6G์ฉf/ท-oOมyธฅsŽวRzฎ0ก>๖„uณชอ:๐%8งsnK-œ๋ดy@๏+Wž(1”cแไฃ$โ7ุ5j„๓ 7E?eํq…เ–‹สzฮ7:๒šโ฿ŽC2>๔ๅํ3œg8ฯG+€ŽRอโ๕ซpฎป€c'(ฉห.ก<pว(œฯaH6ม’ภsvHe,๑ดปmํพo›จฒW“ษฌ&‹HFใw๒ฬl๛าTนงํี๒M๋–หw‚ว๏สฝ^8๗w+พs๕๙ษ๘ส‰ํมน๎ฒ|Pื]\โŸอk{‡xTื]|!'ค๙่คส`ธ ‡MZH=๔Z๗z่ด็^ ใNtง…I=ค๖Y=4๗ญถs4]ฌไ& {ฑฒJS›-xžiํฉาย’†๏ๆีŸoฟณ:มy•"ฺ…๓^ะรฌ…๕tYตฆSัฅผ mpn–น @žะใ-uพ฿ยบ›oฃ๔s,œcj;๖ฑ[8ว๖ปxsภ~i๚}bzป/s/9็„s‚9๐i4๕œซ๋WฉR๏y;pพแแWŸ๚๙”d,ฅd8ฯpž–’R8/puยK+Š์Žpปo%ํ8ฏรI Ÿแœ‰(๏ำ Qืคฉร’์ะ$3<ษ&ฅƒsฎโฎx”ฌโพM,™„๚^P›จ๚พx€9๛M5แ”dำรน&๊ฏL๘rํ’Qย๙ง๖ฃ๘ฏห~›ŒMŽEปp๙oำ๛G ๔J!๎ ๑w!–ื”oอ i>:ฅ…2$Ž๋ญฒŒkก“kาBj ืB๊กัย^ฉ…^+‚s;‹ƒZhแjกีCํVาOi!๕ZXG8ภ๋Vj!bฉ†๓ฺ๋aึย๚ม9สฺใฎ๐gnธ็„j็œ Hร5ทNนu๔Š3ฐ—BฯฯŸaมฌT‹ๆ ็ํย์ย ސXt๑1โพ/บ่่Fy{ชด8ๅM+ิ,˜›ฝๅfฯน‡s:ๅ<งŽpพ!W~C2–xป ็ฮ๓ัrRสแjvPœO๋ŠิUโ€$$dHฐXqu2IถŽ๘Z_RฦrOณธ ฮSฐฮ$•}๋mยน$ค,ืื„Tพ›Œฺau~ “Y wk >{อฅ\]Kๅ;ฑhฟMัE~)ึฮ7gโ—LNฦ'ฑฒ๖plโใํm^ป:ฤzฮ๓ัQ@‡z-„๎ูj"j!cs…ำB^˜Kiก่กืBฎ[kU ํไ๗6ตzXาB78/Fj`บv$;wƒ*ขใaตฏีฮื_ทR ๏\ฑseํuีรฌ…๕;ฤ5็เ5”.:!œN:aฺ/ญ@#˜๓ฑฝตmaB‡็KPn฿์;—ํ ็X?0'œ#dฝ๛๗่ภน๏7gฏ9`›ๅœš/็ะโxฬาw”ถ#๊็๋|eฑฯฎOF†๓ ็๙LB:e็พฉล˜Ž@าษพsธB่ญ4{ฯ%‘ำUkHฤ$5ษง?lำพ[„Bz์ป๔ ง†]นใ#I’ปอš$๙›,˜มLvz<#]•ฦ ฃ…„TFuฦdJโภฑp—ฯษใ…{~Qขฎpพ^{[^๘ปd|๎—ฟฤI7ใ๓Ml7ฤŸuiˆญ๕ฑผฏO ฑe†๓|t\SZˆ๐Zˆ‹—ะซ…๊G=ดZศ๛N KeํสKZXก‡๙ปญฬอPฮXNo๕ฐ-คF- š‡Xธ๛Dญึฮ฿ทz•Zˆx็ +เฤํฦฒf-ฌ็rvภฏธ็ุหญะ\9 ฮฌ\ฃ“ึ>๔3ใ…œOxม‰ํ:ดNย…฿ร;ฉ=uภฝ~๕ฺำ$PЁšฐํzฯSp‡ภ)œK)ป๎1g:]~†ณณ๋ึ:๕๗Œ$œฏ}เๅล'พ6๘ย๗3œg8ฯGหI๚ฮ_M#…#PWg9ย9ฆศu20งนใพฌQcwH<_9ไซ’„1bRชเฮ๗ฆห\tป†ศถ"ฦ/80ตษ(๗ด ษึM7C๋ฤ% พบFธ/‰็>๗%ฃ!^ษศc|Wu€D8_wฝŠอ{a2>๛‹_่œ‡ใบ๗&bSsฮพฺc๙7๚๘ธD2บE†๓|tZ›ด๎นำย8 ฮkกบ็ข…Vร”ฦ}่ƒัBซ‡mjก่กีBภน]้ๆแkกีCซ…๘;Uญส…JีCซ…๘n๊6๎}๋ฎWฉ…ˆะ E Ÿำำz˜ตฐžœiN-—f†#0ำEGคœs‡Kนๆ.J`ฎ}๎ttืใ*5ยy^๖rฟr๙๑m่;GI;nKpNว‡ณรเ๔๙ิ๚4๙๛\๘n-œ—๖ัOปฎ#•# ็kํYฑฮ„k’๑ฯ}/รy†๓| ฦ1‘ค &๎ฃleHNอPธธำ.‘–@bjo๘ฃ๎กฆ„TƒสKฝ”%ฅโ%Y4S‚ณ฿W’PS ?.6เoำDิd๒S“ดd๎1Ž๘ูcŠ๛โš‡„Iจ$ข๊I9'พ›ร6•๎ํภ๙ฺป๎]|๕฿'ใ฿<ฆํฒ๖plโ/!^”‡ๅฒ๖| ฟz-dนืBธอิCฃ…œซ‘าBซ‡ิยXI4H-}rZ8h=ฤE‚9๗บ๋ดจ‡nRผืยRIปีB…s๊"ต๚๕ะjก~/uัCภ๙2Ÿ\ฟR ดภp๋z˜ตฐพp.kยะญษธ#œฐpN็ธไMฮ9 ƒนยyธ๓พqฎซิp`Ž˜3q7‰มฯณK1๛จ$pD็œปใ-œ๛s?ฮร9?ƒ๋ํ-OxืŸรพ๔šๅ†7ฏฑฯ%ลธฎJฦ๛ใ2œg8ฯว Rปo7$™qJ;เœƒœ"y#˜_๚พ}พะ9เ“ๅํš”ฒœ39ษ'งโmฯ%Hพ๘ฝ˜คbอOHœ9Uน๔wr/<“Q}๏ฉJˆฅิ„l ~Lฦ๕๏ณސx2 •P๗ ๛h”pถ ็k๎ผOฑ๑Y—$cรร&ถ;๎‹!๎๑๗สnาŒ<.รฆ‡ะB่ดz่ต ›าB๋ํตใผ๖;‡#Qฮ๎/jฦูบๆL~7ฎ<รE๔f๕PฮกNฺ‹•xกWจ.นโv*ป ็ม๑|>‡ืS€๎ศอš7c7๛ส9y…ใ๗,žwX๑๒o–x้ด›ภ็ฬ?๓ภœ?sุR',ะL@๗็๖6ต฿œpnหแ|ฎYำ็ใ๋,ƒQ78_u‹‹ี๗น"๏ฬถฮ3œ็cPษจ๖a๒‡มฑฏ’ *'c \HใไuฒDSRปว;๖คx•กHๆ˜๐ฅ&๗่ภy|#wšรerษjq7๏‰ส๔\;u‘€"๙ด%ผ0าMqŽLBJXฏœฏฑำพลฮธ4๋rlปpŽี@3CญqBQ.ํœฎซƒ6๎fฮzXo=$tŠข{อญขe๎N ฉ‡^ 9.ฅ…ฏ…ษk€tฑฒฮฉ…„๓๔F ๙๗WjกีjํlWขB็จ‡ข…๊คณอ‡N:แ1ZวPแ|้ต6จิBDเผ๖z˜ตฐเ<ภธ1ำYj๎๐ิ4vํSONgท็ฐl—ธ(os8ๆtฌนt 1Hํๆษ @Pœใ๙“&D8็ysธ๐่tตํp8Bvย5/บ^8hš๘n^{ำ ็ˆ:i!แปพXm๏ห“ฑฬงƒ๓p,โฺ่ํ;๛น ๙j็^ๆ๙๓Ž>Ž[}ƒ!^Iil†๓|Œn2ส$NKด%‘cO6#$คpะ‡‰ ล!แ“FศKท(N็UGHvjYctŽtP’]ณฦฤึ&|ยนŠdงฉ•gลK8็ #ฟปมๅ6มึ฿+u+'ิใ5&Ÿt8N iฟฅ8Gฬ๋็ซ๎ธ_๑ู“/Oฦ':ฎcำฺ๋Ykฌ‡ิB\จ„ฮก‡^ ๑8่aR นนมjก๊WJ ฅ฿ฺiaK๋ีœฃ^าCซuธ๕Zhw™ๅ:^˜,U๐๗ฃํทZ(ฮน๊กีย่œป‹”2 sโVต‚๓ทAฅ"qน๖เบฦ9วŠต[.ŠซึXv^*YทฐžุwžrิใTv? ฮ;่tภน๖šG๗Yแ\zใร๏ˆแj(K็ิu–ป/บ๔ุF?9ึฆ)œใV๚ฬuR;Aภ.“แ ky;๛ฦK}็6็พฤ‚y ฮต,฿ย9ื ฮWQฑส๎—%c้๛๏vแHย6nนVา๓Vฝ`๙กซีE+%ฮ๛rY็๗f็<ยA๙7๖๗ฤ้!)E%w๛bV้šœาฺ๋™G฿‡.pฎๅ๐ฑ,น5ฅ‰ล=คแm~สzU˜คิCw ้|Ž๋ใ์E†ุsฏ%ฌถวRQ†IH๋”ŒฮW๙แลงOผ"kp|†๓ฌ‡ฝฅ…ะAhตฝ่ผmQ ฒMZจ[ฉ…FRฺ˜*…ฏฺfร๏.'œซ›๕ะ‡ีBฝO=ไ฿oต0ยy ZX78ฯšVj!โ—อpžตฐG„๓ฦœ*=@s,qวŠ3q#ค๒u3DฮB{ ฮ๙ณZญ ฮะูkฮn๎ว5™t~ในกjืŸูu์WPHŒP—๑ฯฌฮแด#๐ฑฤ:็.J ฎ0^rูํ๐8็œ—สฺ˜ืฮ?พ๓…ลสป^šŒฅ?ี6œร _F๏/ƒว‰sึรขb6‡>๗7Z๔แ ็๙่„ฅŒƒฒ๏ง'6๖"ž:&:ๆโ ้cผ'๎ณๅ $€ซˆVZ!d†#Y0/9ึ,้ฌบ๏Kฯ™จาI j ๛ผ๔K"ฑ4 v)!ต *๗kBห9MOฉ$˜LฬuAแ>๗ฺ=็uƒ๓•ถ;ฐุ่ธซ’ฑๆพ'd8ฯzุ[Zˆาv๊กีBฎV๓Zh*‰JZ่ๆoุข‡UZh#Q๚žิรtะตดฏ-ใpท’๚‹– =,iก๙[ฝRฝึญ็klXฉ…ˆX๖รฮณึฮ9ด-ภ1 *N5v|HŽ0ฎe้ษ’wำg^t_ๆn'บะซเเKจ5pˆฆ{žt†qำฬรy-.็ล]ฐ.Uvš‚xr๖›๓–%๏|Nwข—&ปซ๓/@~็•}ฝ๓5…๓hr๑ฑŸ\œŒ๗lธ N:iจk%ร๑ข{บ')ๅฎ๏˜ฦีAHJqŸรpŸ๛ฯฑW๛ตY6 @็*–ณDำN4๗e’ฅไำป/ก๔›S~ฐ‘}ฬฤR]›่†›ค4้Y—LŸ“„›ปMBส•Aฃน*จ“pแm'Ÿ<๚ฺd|b๗“2œg=์)-”“ิ@ซ…ิCฏ…\+ๆดัดม^tzๆุV*UicฅVธ์^ mI~I ]ู~ฉbส๊กัB๊aI ud๕pพิjUj!โ๏฿Ÿแ๒๗3Zศพrย9ตทฝ ‡Hไy•OUj!โํ๏หpžตฐ‡์6ซs‡œฮ ›8GฤY@orฝ๊f€\ ฦ-”๛ฦyŸ‡ ฮตW;๖[ื็qฝฐ NธV๐|,3ื)ํŒœ[๗œ฿‡…r๓D@ื œ๒{ฮ—๖7ลฟdผsญ…๓w…ธ^Wฉ]O€ว{C\aฮ$ฤร:ต}_๗g„ุ=ทEˆ๛ไ๏ ๑•"๗œ็ฃซR+Pส‰D”วd‰(\$๎สล05Dฬไญ ถฝำmง'J&cJ๗ฦ–ใ}ใย—†ถูฒOŸ<ฆrณ‡=& &!ต็•&1๋นฒว<$ข/ แ ม1โบ$<žร่ฆฃ-็[Šตฝ6+๏”หฺณ๖–  [=ดZˆ‹•N 9-ฅ…์็N‚m…๚ ’Z-ด nตpz่๕ถ4q—šJoตะ๊กืย^ะCqฮWูจR oo.kฯZX8ท๛ลฃ๓Kๅ๓pฯต'ผ๊M{ฬอ๕ฆา๖TX@Ÿวา๐า.q:กš๎ข6๔|๛-Ÿ้zฬํT๖œปา๕บ๚-~OำnJเ{$7ผyู๏ž],๗?็%ใŸืV[p+12?ไ-oูŽS๗–]vูฌbuNHuฅNœRŒu:Svn”ฐ# ท|ฤเsแ<้ฃdyช4ำฏ(s‰aษ•ฑษž&Št_๚ก˜๛าฮฤ}_ชiWก1ม-ป?ฯŽฅมoแ๗y๙วŸ-^๘็ไษ)O‡[ิKp‘oXฌ{ะีษX๕‡cw \ึรีB8็ะCฏ…Vโะร&-4ฮyำ&u[ัBjŽืย่ผ เธ ค‡ิBฏ…๎นา๏f~/jแm?฿ง‡F ฝ็KญดQฅ"~™ฑ ็Y {ฮ˜ว๒m'ฯำฐ^๓ิ๐ธิd๗”ซฮw”ถki8Kฺน๋ยyยžzM œฮธ฿kž‚๓๐ผ-Yo‚sณŸ=N›7่ฝ็๏฿๚ิโœ•Œ๗ŸฮณsžA'คfmLPฑFHห6แIภBขŠ[-}ืH๛+cI%‡ น2N› ฦiภ>!e2hฆžsฎ]ฟรIว•รŠR%่ 7&—MŽ”wง่q=P”p"ูDB*n‘–ทหkป|N^›ท็คœณเcy`ฑW%cตํ๓*ตฌ‡=ฆ…:OฃI ๅะAฏ…บ'katิฝš’๐’šคjุ9Mำี-งดะย5ต0ๅฬฬSZ่`R Ÿ๎็๛๔ะia/่กLk๘†•Zˆซpžตฐ‡เย7_‚9ง๒m็%@wŽyฒิ=็ฎ๏ผฮYาnแ|(๏1cษ:๚ืsยน๋็k•%์ๆปˆ฿ƒx=ฝ็๏๛ๆIล๛ท>=ดฦื3œg8ฯว Ru ƒ๎น๔Sbเัm{๗%ฃX5„าO๚0แ&…็่41๐Y,uทฮŒM ›œMD›€ฑฯฦะว2 ุบF๘ &ฅvฒฑ[“fจสDT?ทษฉ7ซะdo/พธ/แดฅœ\ื‰จ C"ธ‡จ+œฏดลลF{_‘Œ5ฟw|†๓ฌ‡=ง…ะถ&-„K=tZ(ฏCrz{“๚>oืZc/Fไญ>—าฉ W๔–—.„ZwผBนญ๒ิ๋๐{X-ฤ}๊กีB‚9ขดฐ8๗ว6ฌิBฤ?๋Šฮณึ๚(8]sปBฬ=๓%๐ฉR๗„ƒ^šโžG(7;ฯ๑sฃณํม<ๅžžq–ภ{0ทCR@๎มœํp<๎‚…ีŽ๗ุ5q|O]แ|™ญŽ-๛“๑Žี6ฯpžแ<CIHฎบ๓VœsธAHHแ!!ล`$$ค˜Hภฃฏน†(„ [3‰] ŠNŒ$ขฤu๊/ง๛ โฑา9โg–๙๕T;“ั”knv๐ฦŸม^r]˜l €ฃ„S!พŽฏ=๛ญ†ฃิ pพสื๖+>ฝ๛eษX๛ปวf8ฯzุ;ZภZิ ‡I-ฤญืB Šร9N ใ”vฏ…ฎ4๕ฆโช}ิCjฃีBั*ฏ…Žy“๖ำำ/V:-W\๕ฮjกฝxiตๅ๎ณทnTีฮ฿๓‘ *ต๑Kg8ฯZXs8ืagqา่\0๖รฯ|:งนซsPnA;ๆ ๅฅ2xทJ-บๆุoฎโถoผิk๎บ)œ—s?ภwioนwิ-œปมoฉ’v๛\ผhัpพ๔fGบๅฤdใ'6อpžแ<Caธ< ๆ(ร ๅš\d๛*ู“Žภ@$ฌบwBฃำL:.มน&v’Dาๅก๓ยu;H<แดจ"็3!ๅ๓ :]tใ:ฅ&W&žฉฤT๏ำ)ฒ.ฟง,แDะ1G2ส•Aธ„๔น๏|A็ัซ+œbำ}‹ฯ์zi2ึู&รyึรัB IHก‡pฦŠC=๔Zจปฯฝขฤยนญโ†๋Nณe&ฉ…^9]๕ฐไภ›umMZ่มผ๊ึฮpzH0ว๏Aื\ดP/&๐ข%nฉ—ิรnะยvแผJ ฮณ๖„*F8'[ˆฅรn‡ŸูkNuืi๎โ†ศ_|พ/๘ภ™แq„ts[าฎpรN['ฐc šquฯm){iป‡๓œs?นฟoพฃธzญ ฤต็ฒ้ฯ‹ฅ7U2a•ฏf8ฯpžv๖Jฦพs;ฉI(\!8ๅหฝฟH\‘ฌr็9๚อต/ฑ”šu>Gw…ษœuวYษื%n”›\\rdr†g=พ,ญWwJ’d็Rฆ‰฿ี ‡ci'"บJˆบย๙๊_ฺง๘N'cฝo:รyึรžิร&-จC9ƒƒZˆ๒w่กำBอlาBSชค…ิCฃsผV šูิƒn‰Œ ซ‡ha“R ๕๗'œ[-คZ-ฤ9ิCu…๓ฅW\ฟR ๏๘—2œg-์--ดฅํZ ณv@ฯำ ธ6ํB์ฝ ผ\U™-ๆA’0$a3h€@d‘!phง'ŽmC7๒Tค}>vkƒv-"8เsj‡Fลฺ ตล$2„‘0็๗ฺตืพ๋|gŸบuoU†{๓ี๏ทRำฉSง*งึ]kขไษ˜?๒เฒฮํแ"็hจฦๆliพym„aฬz6ไ6ฅve็ธ85ๆถ˜๓ฺ็ิE –›”๖แไ˜ฦช9฿แิwU;ž~aฮZ่ๆอน_Bยˆ}ใ๕ม‰hP”#็ˆ !š1Š็1๛3€Qย๓Hๅdด(Eก)์rด…)š)1ƒ๕ูษฤsF.ฬ-ว๔เvMJc$;ฏฆฎื:ฟkคŠRค”JฉฆŽ2"„ใ@b“i๋ธF3$ˆTmŽ„ํ6”1B˜๓9 ฯฏœ๕_E’๗น9w>——ศ…0ๅ‰ #ฯ• aุ๑<`ธ0f$>ด\˜หv ฦ™เ\„”จ8n[>ฤuไ‰ฒ+ึธY3.q๒^il›F๎3฿สb+Ž<ง\จอ2y็•7„หจอ๙>ฯhๅB`าNnฮ วฉA‡9ešธ๕ ๓\ชU‡yEj{ษ kฤผdสSmv๎vCLs.ฃะโœrฮ0sžSYžšฤumGใm+mCh ~[๓[~’ฦฐ6ผv๊)๏ฌvX๔๎"ถ}ฺ)nฮœ๛ฅ฿ ; วzIคถร€๓๋, p›ใ„9B๔‹ซ5ทKŒ*ลhQ›–^ชEdJ&k)4YทH๑IฮzEBฏV๏-i๎ตHบ44Š๏}๎‰๕mตาˆ–Žแqุิฬป^tJu็ N‰Q Šอ(ชฯ้|<!สm€ฑ*Fiฮ็ึ๊ิืฉˆgพ๐_œ;ŽK>ด\๋สม‡ส…0๋iฤšๅBt#ึ,JZธrก๒aƒฮi๒!ณwศ‡– kฉ๓’>ฏ|˜๛y`!|hธ™B<rก๒กr!Ž‹|จ\จœ9–๙ๆ|๚Gตr!ฐฝ›s็ย๑ฦ…ษ|FCng‚s์9M(าเƒ๑™ัucา9ฟ<pM‹—š๕๑\:ว็†p9ทื€v=f]`hิœk:{ห8ตb7w5้:ŠNSุ™}Žแฑ9bŒkรk'Ÿ๘wี”“฿Qฤ6๛Ÿ่ๆอน_bฮำ๘Ÿ8Vโๆ๛–๊Dะู‚ัtDŽRฝeคAœขf“u็Y์ƒฬบl Q=5็x\›ญAเQุแv6่I`R\RjคFฅrdJ_—"CŒL1JฯใbM%…่ฯ_ล&ฐไ% ฒ1gืb4‚รใ8ิx๋ๆ้'žW๖š/q๓œ;ŽO>ฬ\ˆฮ์ไย€ฆN๎– ๑๘ฐม…าอ9eฌ๗Fฺ8ขะŒขk6LtJ‘iI†;Eลณ้Nf>›^›*ฎอึhuฦyŠˆ็จน‰ค๎ํัœณึ\jฯsGz5ๆ้˜jฦw5็rœ๖šๆ๏่>1ึอ๙๖'m5๙ฤฟ/b๋ๆป9ws๎—ˆQŽFฃ0 ขtอ]๏ชงdํ9qŠhQ๊fฏo{OฌตฬMDŒRˆjDˆขncŠRB"ภm>ฯˆRQ”ฆLšnsM็‚E(ล1k$y\ภ|๎ย๊๖็œ…&€cะฤ๋๘™ฐ=9E๋†r้วœ๑์7Wฯ๙๓ฯ๑์็ณ›s็ร๑ห‡ส…ˆžƒ• aะZ.Œ>ด\hน–๎• •aŠษ…เ๒a e9lไฆ=† st\นฉ๏‰#ยœ'.„I*ๆ ขณ›\จๆœ<จ ีr!ฬ0ณwภ;šค|Xซ[?๛„Zพฦ้ธ,’ษ…ธ’• •K\8ึ๙ๆ|ื=ŽhๅBภอนsแx4็Œำœำ็่นคท7Œ-อ,วฅ&iy;v`gd;ฅฮ็ดyF ๕์yฤ[z}#ญ=๕lฮๅxณ9็็2ผ๖™MŠ{ฃ9œิŸ—ุ™=/<„ใๆ|ปcฯญ&๗–"ถฺ๛87็nฮ2(ƒลๆฏ๋t]&;ฆฎณำ9)JำHก<8‰VDฮc}#ป Œ9ฃเฃdŠ;ป๛j 'ฎiŽ5ฺ^‹1ล]ธ๑lอ;…จBลฏ. Ž(žใ>5ว1 ัzห‚๑eฮŸ๑ฬ]ฝเ}ฆˆ“^่ๆ๙p\^\ˆQ“เCห…H}g}บp!Œ<ณ\จ|จ\HNาบršvๅCr”ึr+*ื)’ณ”/iๆK|H๎c๚<๏3Mืส…ส‡ส…ใ…aฮwˆV.ฆLหอนsแ๘ใBm1ฯy–8Mด6‰ณ5ฺฺ ]Rฟk#อ˜ฆ.้้นNœฦ9เขแes7พพTsž"ู่œ๋๛ณ1œ;ทad]M8!hศปt+S๗ำ็ๆ|›ฃฮชถ=๚œ"ถุใ7็nฮ2ะˆบฎงFFQŒยŒธŸ‚อ’p;W˜|FŠjต:œกฦAjะiา!™ถI1ชuŽjะk)๎)E“Q"พฟMไ{rนlผ๋%๘ฐcฮoๅB`ส”ฬ๙xเC็ย๑yaว๓hคี ำ๐ยค3ฺLภ๔Jไ=งชง†qตˆนšs้ผฮfน{ปฆง๔v1๚ตู็l gำ๒M4ฟึ๔N๊๊m{รธหHนZC8,&คล€๑`ฮท> ี6Gž]ฤๆ3๎หœ‡หิ€ซ~Ÿฎงดlwiภฝ7๖๚๚uษฅNภ~ศQขว.~qฆธF3#<ฃFˆฅFpูœงศz์ZŒ(า_kSฺ๓œ๓B6ทร่… ๋ ˆ>M/ท#L5j*Eจ๊x aŠJ์›อ฿๘~ฌ#ฯๆ<ํ‡ั|ŠQD‰(Dว‹9ๆQoฌ^๚Oq๊w๗kฮ'ษํฟ๘pบ=+เ†€ญ๖ Xฐ™ Rฟฌ7.Lฯ3‚4ŒZ.?>ด\#ฺไB“ฆัgๅBMc'๒1pำหติGำตใz‰ •น8สศ<๙ะr!๙Pน๛!*๎„ำว9Ÿน๋แญ\Lท9๓|่\ธDฯa€™2ng‡kิ9™๊ฦltt็6&ฅฝตำzayญ‘ำัS7๗ Nภ}ีŽ™ฉๆvฟา‘>๎ŸF[ิฅ๒ต.๏vAMผคถsพๅWW[๖บ"6ํˆ~อ๙…oMท฿pAหvฯ ˜[0็ลืฏk.u๖หเD้‡^ฃ฿ฅh$„šษุ i›Ÿ่N ŠtN€‘คt”โl_้ฬzp ˜ขNkหู๔ˆขf™QDเตฺด\H>T.ฤ~ศ‡ส…7;>๘0\๖ุeฺ์V.|้s>Ym?qืjPœ2V๙ะนpGฯaŽbย๓๚#Oฏ~:๏ด๊—ฯ8=cŒ๐~ำv8 zู้Ÿจ^~ฦ'ุ{๗˜บ๔^์_๐ฺพว?r ุ)=๖€3e›Kฬอน_ึ‚็rไณอb๑2๒กๅBคภงztๅB๖ยะ(9นะ๒!ปœ+๒š&\ฅ\hkฤ• •m„œๅBไB๎ฯ)๊ใส…l>T.ฤ๕xเรpู\๘œSไย๙Gฝนฺg๗Xg๙ฺ™ วy)แษฤ2 ฑ๋.olkโี,cฮ7›สa;ผ Q๗ำ<๐์k{2ตN๑Z-‘F*ป."ฐ1œ6‰ำ›t๚hฬ๕ฝ์MปํเŽfxif<0ฦตแ6>ทฺโะ?o`๓ู/ฏ&l=ฅJู?ฃโยpYn๎?0Bs^|บๆR'`ฟ จท4ฤ่sNธฐ‚X…h†dฟ„ฆล…Hั;าํ๔๙nฮฒพธ0.8>ด\ร๙Pนc)๑:pแegf.ไ%ธฆ™™BjยmJนๅAปPฉ\จ๕แ%.$jMy‰ษ…€r!฿K,ต,ฯ+า >๎ณ๛ฑ .ฤโๅฮ.ทั9ฎ๙ะนpœGฯ9็\#ำม4ซ‘~๔ฺ/Dะ€๋hณผ-ฬ6บqึ†ใ1ฮ็พธ ฝอœ๋X6;vญq[ปตง่w†ฯ)z^Kgทๆœq๖ปwž 7็[ม€o~๐Kๆ|ำŽฌ`๛แยตhฮื)—:๛e Dz 2™ึฃ@฿}cŒ 1“๕ุ†‚ัฃhะQ“™ฦiไZ;ฑC@j[E"…(ำห)iฬ E} ๏3*ฎ5ๅ|O5ู*~)Jiถ!HaิUจr์\ŒใภvHd”ˆQ#`ฌ›๓ถ่9M{5ธ4ฮ=Hฐžึ๎—  ใŠภu– cCสิdMน|ุ ํB%๙ะ–๔ุ(นš๓*ฏ‘ ๅถ\ศ[๎ีKๅBƒๅB.VโX,Ž>l‹ž3j>PQ7F๙ะนpœ›sDน“็ธฒhค“!ฏ!kFฤ๙ๆkiถ๓m>ž 8๎c–๚ฃ฿๛Lุ—1่Lwo่าLrm๔f:ล็†sš–ฮ1m:=˜|“ึจ5๏ลœK~ฌkCš๐–จ๙ค>๙ฯำฺ€าjฮแ๔N—bคชc†๙{ŸE%„ข>›ˆesแuฬ9ถ๊kโ๖qผ"L็E‹TZcฎตๅ€„ฌํ†@ค ๅm5ฯ–]†ูู˜‘"5*@๙8อ๖p‚”อ(UB„dฮขˆ A๖Cภฅ่yฏQ๓^Œฟ>;เ ้๖ฆiวญฮ/๋ีœ.Z.ไุEห…ไรbผYุNนัn๒กrก๒กrกๅC^+WY.T>ด=6ิ๔๋โ(๖i*๙ส…๘ศ‡ส…–วบ ตั๓‘Dอ7>t.฿UํŽ&:™li‡ไ;ŸจEฟiุ#hบล|็๋ด#W]:„ฐฏ๘>ษคำ่วTu’ณถ]kภลฐื"็0ๅ)ีnkอ;๋ๅkอเิ๔๓}uqภฮ†ฆBฌKdC 4:‚๐dWใœา๙๙Wฤ4PDŒฐ/\GQzฮP๓ ›Vฎใหlc6š`5หšโNht‰Qyฏ WŠQ} ๛GไbT๋,™โฑR์ฒ)#Ejฬcีœ่๙ ขๆแ๒ล”ฦ„๑A_ ุUž{[๊ค‰ะS6Tc๎|ธ๑paไCpaธO.ค‰ถ\Dt=saxmฌO‡ู-๗ฉ)W>ด\ศใP.dู^ฏ\ˆT๖ŽaAZ‹ž2j>๘ะนpœ›sะิญฉ็ูœ3=˜lVSz2๎๑1๗šฦŽ็ฦE|๋ฃ1š.๏ฃฒYื๑ัึ†oตN๑4๏—า๖k3M๖ึy้’j_3ๆw฿\=ถไ–กจ{:ฆฑธั่๙ ขๆ‰๏v๘N…†๋ฉ้๑]พ.}&`Iภใwผช๋ื5—:๛eฐัข`ช1Bญ%ฤ%R6ua/ฯี}๓ษฑๆbโ4Ž`KัฆX‡ sƒxEดษvึ๔Kฃถฎ’bT๋)LUาDบu NBทSฑ ณk…ฆำฌ;ง9g„๘ั์E5Œusฎั๓AEอวœว?‚หbTp! k‰ c9ร…ฑy\เCp!ขํไBM)/-*v3็m\จ}2JQqๅB]ฌTฮŽ• ษ‡ส…0ๅไA5่cY2z>ศจนsก_ฦJไ<ื„s ำLำฏ“ งyฦ๓ู`'ะ˜gร-พฑ๊@ บ˜zMwg๊<๋ึk#ำดƒ|jBื่2Ÿ"้H›ว{ฌ๊๊๛1ั๙ฺSิi่iๆ5BŽmW๙ฝเ6๖ั0่&ี=7กSƒ^˜kgGซฅ1lุ'ฏ1วัršs›J/cิ4Zุ=ทV-ร’AG},saล่๙ดC5ws๎์—ย…ั"D|~ S ‘แS8ƒู`ฮaิ๑บhฮ!J/;3bรsqฦ๏yCฉL,u)ถuแ Œfs†ฎฆmvƒs+65"฿&Dีkบ;4^ฯห’9แAงfŒeฎR๔|็ฉ๛{ิ๙pใแBDอ1ต"p๘ฉ่ไBp ๙ะra„แBผ๛"ยไำ@+–&X(*ว…6U] y/|XโBŽ$โ๓• ี˜o\8(>ฤๅ&›lๆQs็ยว˜#า›ฬ-ขหัœ'#N6 ;nt<ŸถUร ฃMSพ๊ ๏ฉV}‚บA็ku@อนm—FฎฉฏEฯฅžผaพ=O#โ ไLƒธFปฮ6Wc~๏ํuภ ‡วฑอX็ย=฿โ)5__ๆฬ๏\p›™aท!ใ6?N?ึ๘X๏LุูIทม‡๘oมีuฬ็}๘oข œ  XX7bmธ{ภ6ฮ‡๋9rž3~6†xฬำีีแ็—ซซ=t๘™ซซซรอนŸิ๚ฑ:ป ๕c๕c๕cu.t.๔c๕c๕cu>ts๎'ตงซซรฯ/?V?V?N‡XXXใอœฟv,ีD๙q๚ฑ๚ฑ:๒c๕c๕ฟ‡?๓c๕c๕cuŒ;s๎p8‡รแp8‡รอนรแp8‡รแp8nฮ‡รแp8‡รแpธ9w8‡รแp8‡รอนรแp8‡รแp87็‡รแp8‡รแpธ9w8‡รแp8‡รอน ‡รแp8‡รแpธ9w8‡รแp8‡รอนรแp8‡รแp87็‡รแp8‡รแpธ9w86„rย„พpภƒฟ xฅ<ฟUภป๐pภ๏ฐ‰l๓฿์.อธอฟc‡รแ|่|่p8œ  nฮŽแ ๘š€ xJภๆsN‘็ฟp]ภA้๙# ป!เeqv8ฮ‡ฮ‡‡รนะนะแๆ1B:/เฎ€•7œ฿4เญ‹ู|>`ชผ๎?๎I+Œ฿ 8Pž[๐›ดO์๛M๒kn ๘S"ป]ไ9๓๚Dx|PW%๔y 8ดๅน์ชgzˆ€'๖~{๚|๛:;ฮ‡ฮ‡ฮ‡‡sกsกsกรอนฃ2: เ’`ธ์ฐOบ}NภSชาy. ๘Œผ๖/&ฆ็ฐฺxฝ<ท$เุt{Jภt๛ู)ehnz๛A†€ฟ09`fภ} ZŽ,๏‚™-ฏ๛vภ^lท — ๘nห๋nx๐ซ๐)'`‡ร๙ะ๙ะ๙ะแp.t.t.tธ9w๔Cภ๛›ศc ๓oนRš๎ฯx้<…LNไน}บšœืL2]pก฿.ํsO!เcไyฌศพuภŸyJ"ฺ_งฯ๋žžž๛hภg[^‡?Fo3ผSZ>ะ ุแp>t>t>t8œ   nฮVR…>++ฅซV˜Gค๕์ฐY"ฑลฒM%+ซOธ<ํ๓ปGฅวฟ๐Wๆ‘tด๐พ๒วa-~๖ำ{ ฝj“‘ฎŽฆ˜šˆ8;ฮ‡ฮ‡ฮ‡‡sกsกsกรอนฃo2š„ิค€หา›HŒ…m_–VO๗Jฤ5ู’gฺn‹€7"=ชeu๔)…ีัž8\^šj„ฺ0ณวฯ}Pz฿‰–๊ŠXฐ_€'งฉ๓œ€็C็C็C‡รนะนะนะแๆ1ฺบขgงŸ-.้ฅ็˜ˆfti:gคoH)?“‰~ˆไ™๖๓RIcz‰)5ี@ญะก้=฿‡•YSWดVWGรๅ้ถ915๙ฝ<E๊ศy`ZfGฮส6™€ำทฅฦ(Nภ‡๓ก๓ก๓กรแ\่\่\่ps๎1อNdณ2ญ๐}MR—ะ‘๓ดJบ2ฅ)ฝK๊.O#ฅ็ๅ†€ฟ™า–ึ๔SS+๔๚ด/พ฿n๋˜€฿Ÿ๕ก๔วว๐4y~๋DาwคY–ทคฮค›v!เํR}–ฐรแ|่|่|่p8:::œ;‡รแp8‡รแps๎p8‡รแp8‡›s‡รแp8‡รแp8nฮ‡รแp8‡รแps๎p8‡รแp8‡c=š๓vุกš7oZลA“ถฏž<ฉ:d‡Iีก;Nฌๆ์41^ใ>`๏วmv˜ž›–n[์ินžป๋ไj~;U๓๖฿นšท๗ี™S:ุ}J5oŸ:?mz5oึŒjแ๖พ;†๛แ๖ปT๓ฺตŽงN๋\2ณš7gŸj}ซน๓๖ Ÿa€โyณw˜™ถูปšw๐n้ตำ;๛วฑฤ๗H๏ เ9์๛€icย๓{MวˆcลqวฒgุŽ›๛ใ>๑ZcŸxก{ฅ}๏ุู๏฿sz็8ž6}่ฌ๔ู‰Yป 'พ—ธŒ๚k2ฆ5q@‚nรว๖—๏C?}†น{†๏`)CgŠ=ยsแ;ŠŸ5mฟบw<ฦนปnQ:็p๎ฬ}r็๕x๏ฝๅปใ~q‡=;ศวถWzo\‡๓ ็$ฮ_@฿c๖ิpพฬžาA้8~๛ธMgW…๛๘=Nฌƒ็๑ผ๛BWิA๎_8a฿๊„ุีIw]๒!Ÿq.•ธงทใvำย .ฤy›๘Pนฟณฬ‡ส…๘ฝ–๘ผ@>.ฌ๑กr!~“ไCห…ไ\ห…xžฟ7ๅBผ'๙PนP๙Dน|hนฯG>์‘ •k\Xโรn\(|hนะฟๅCๅภๅถr!๎“{เย*๎ณc3r– ๗จ๓ก}ู#เBl๙ฐG.ไ6ส…ƒโCtตuยSpcขsเบใยนsM˜Sอ=๔!เ1ฝๆmฝ?ใ‡ฮฎๆ2ปsx๎ๆkt?ญ๏5'cฮนต๛˜ฯSฺgใ3อi~C:~ o7งY๔3๓๓ๆ}–ŽqN—cํ๛มw2ื ‡&:งšp๐!Mฬ–็ฑํ!s†pจWฺ฿!ฒ-ŸŸ3gn>ึึ๓Ž฿q~๎`๖ACเcx~๖๕็ŠuP{.ฯๅmย>ๆด!$8xV5๏eิ_3 .dๆ„mช'์ไฺp}›sœ$k๛rห‚Eีฯ_X-๛‹“ชๅฏ›_ญ๘ห๙ีŸ^}bu฿+OŠแฏ=1p๛กsๆWซฮ[Pญ:ทs;โ์2Vี ๑ฑG.xN๕ไทP=๙ณซ'>Š๊ฑฝ0โั๗ฟ z๒ห_๓ณทUkฎ๛๊ษ๏พฑz๒ŠืUk็Tk~๗ีš›฿Uญน๕‚Œ'๔–ฮcK/ชชีUU|ญzษoVี“฿ฎช'ฎŒ๗ื๙พjอ์lณ๒ ีšSญนๅŸช5ื฿ู?Žๅซฏษ๏็ยพŸแ›:วžโฝ,#Žว๗ป์ีš๛?๗‡ใฮ๛๛ฦ๋ใkโkโ๖?>ฏ๓kพSUห?ู๗ๅฏชžผ๊ฌฮ็ฯใ}ใqผใ๙อ;;Ÿธ๑Cว‰๏๒ใu ๑3Xเ}jเฑ๏Ÿ[=yอ฿tŽ?|็๙๘ฟ๘สˆ'>๛๒๊๑ฟดz์โW}๘Eค;~’—T_vfŽโฆz๘+U๕เgใ1>๒ฎ3"J—ี{J๕่๛žฟซ๘}ใ฿๖พ๛|€}<พ๘;\ใยyŠ๓X๒’๙=p{้™'Wwฝ่”}Ž—?œ~j็๕ํฯ9ตบuัฉ๑๗p๓IงUฟ;แ๔ˆ฿฿นฦcxŽภc7อ?-พฏลพylT.ฤ๏,ž฿แ7ฉ\น‚|(\^ **ึธ0&3.ฬœkนฯใ7iนŒ}[.$Ÿ.ฤo9๒ๅBl๘ฐม…ไ5ห…ธM>T.ไํน0๓/ถS.7“… k|(\ศ๏$๒กpaผO>4\ธ๚ํงvๅCๅB|GสƒไB๎Ÿ7ไBpฒ๒กๅ;๏• ๏y้‚ศiไB๐นฐ\HพT.=az57ˆั็G*t\W\๘ศŠ:xhE๕ศƒหชGXZ=บพฮcแ>ฎ๑Xผฝryท‰ฅmธ=‹ฯiI๕่ฒป:ื| ฎ๔5๙>๏ฏฯ๓ฝยq>ผ๊ก๊แีซซี?o๓ฑ๘๐špŒx^9'n๋ฑโ>^—>W|๗1tyป€}ศฑ็ฯ‡ืคฯ›ฟCs‚๎ซ๋pะํๅ๕NVฎZฑ"`๙CซซVฎช๎_ฑชบ๗มUี’ๅUw?๐Puวฒ•ีํฟ๓OEเ๙ฅa;ผfY๖ม}แ>๗‡ํpอm€Uซ:ึ๐”.๙;฿ํc๗^=ถ๔ีc๗Z=ถไ–าcx๑ป~W=~๗อCฯ้vxถMภwŸ฿๛”.ุื๘ลn๙nปพzŽซว๏M๕ฤ๏7^๓ไโŸF ‚ Oฐsต๛„ญซ&lY้ศ;ว3็฿=`a๕“9‹"p๛GณEุหโ…‹โcˆLขTฏ!Piะ!F๑๘ช7Ÿ…DE4้แ~ฃ4๊็tถ‰ๆ<‰ด(,TŒยิR„็ฃ๘ƒ8ƒƒคI จpฝไ?ช5๗]าˆAlB๐ญyเฒjอ]๏๏ˆKŠ-ˆDˆซ$t๘~ู๔Cจ&Œm!r}๏๓ขˆย5Ž1พgxฟh:f[ุ?DSMผQPaืฮ‚Žว”L6ส–šภ/฿Xศ‚ฯa[\๓6^c zx^ก‚‚š‚ดฐภ@แGs\CฃธE๏ําYฤ†T.'… aฦ3*โwN>ด\ฎื.ไ๏ซม…ซพ”แj\(ฟู‚๐– ม?ุฆฤ…€ๅBว๗`น๗Gส…8ๅB,2นxภA๒a‰ ๑%\HŒ|hนp>T.ฬ็AโCra^จ4\ฯงฤ‡%.ฤฅW.ฤนMƒ โ6๘Pนp|ˆจ๙ถฏ.šp\…Kž† kฦTะ0็4ฌbˆF›F6ํ/๎dTนMอ„‹นทๅ๑ผ@ค็วaL?htaฃI‡ง9ืืฺฯV2ลiaข๖9าฑfC-†8/>?ฟ\‡ว๙๑ุีL[Sอซ๙ถพdฮอ>๐เ๛PsN3M# ใ ภ„฿‘L๙bฮ๑ E ข้S4่dncAิEqว็ 0ลˆFP๘๑yฦ$šข ฅ„€`๒๑ฝwถ ย7G… ‚˜ฃ1ฮDผฐผBั/ =<วใ€จฦ1เ}๐Ž ฏร๛ˆxแ>ถำE i\๓{PAKัŠ๗๘}•ขN0๊ฆŒ(ฉPล็ๅ5>oj†ž฿ OQ๘๘‡ฯ…W ํ‚s$šœxoว‚}rข๛NQ-D DqBŒBtฅ ฮmœทฃ%Aส ขM4็œ๘อธะ„ใท‚็U€"Zเ๙pŒš_4แYี%Žฏ๖๓่๙ภ๘ฐฤ…–a<ศ‡5.<ปษ‡8ง”-า”+โ|%ึธฟฤ‡5.ฤcไCห…ไ<แยศMไCแB7ธ|พƒ‰V.ฤvธV.ไ(yมr!`น>T.Tžฒ\ˆใเq)2 สr!๙ะr!๙ฐฤ…ไCๅย๔น|hนผง\จ|(\ˆ๗Ž3ฆEฬ‡ส…ส‡ย…–๛แBภ‡jฮษ…\ะ'๑น ‹|๔ห‡ˆšc‘๒าMž]ฝtย=”9ง๑3ฆ&ฎfเีัะ–ฬนXšn@x2ฌ4Ljžj&JM9!ฆธ‘WณMจ้Vใสmํcjฬญ.^~o…†dผsฤ—Ÿ `ึYผฐ รEหK vก‰7ู4่ˆtำ<3bฎฆvƒ๎/ใŽยvŒถ/IฦœQvผg4็fฑค[ๆF4่ษlำ|็kฝ-ๆผIง๙n‰Ž็”sŽKอ€ฤ‚ฐ๏ฬูแv6็ษผซ9็uฟ\ˆจ๙~žRฝnยี™v๕่๙๚0็แ๒ZG3gฮ์;= ซแ—งŒtะZqe๔'F~ &qืโyฆrBฌfsแiฃA˜Bˆ>ง!l‡kDŽ $ขธHB&Fซ!Ryc–ฃ)ŒQฬั@ใ1xŠณ๔บœพˆ๛cL …๐C๔ข/‰;ค|>ฑๆสNz<ขBซพิ‰>Aโu*๔xLjŽ!8ฑ/ˆX<ฎฉ–Lูฤ๑ใ˜ฑO€โื8ฏG$ ทqœŒ๐Cฐ๊๋๐ำUxชhๅ๋x›ยVฃ๑\lšBช†že้sีาDUฬBคR8AหTฯn‚4ฆ๛โ_าcsฺ)“๔[F๔๛Œb๔!ฃิvฅฉอ…้๋๘ญ@ด25”ขbuๆœQsˆQภฃ็ƒๅCr!LŠ๒ก?g>T. G>T.ฤ๙D>ฌqa2๓87\˜๘ฐฦ…ษ๘ๅŒณPส…ฬดฑ\จ`ธฐฦ‡ส…ไห…,ฒ\Hnถ\ศถๅB.LZ.ฤพศ‡ส…เๅ6.,’-ji?๗A>ด™Iส๓สƒfQุ–ี•… ™eะk\ˆ}ึธ|(\ˆš๓ตษ…ธX.$าœ‚5ว"%ธะฃ็ƒื†0Jูฬญ^Q3็Ucชi!V$SžS“%ํ8ฉๅ,F›ีไ›ิp](Fทmทึa~๖šูMท๓๗ขi็i›F6€ฆฐkV€ปn#ู63กป๐P2ๆ\0กAืืfภœร$ร,kช:ฎiฎoฝoE„ฝ+ช›—ฎจ~ท๔มˆ_/yฐ๚ํ=6๐^‹}.IๆŸ้ํx๏ฺwพใฎ=|oŒ ็๓Fอ8ฎษ6ๆฝs z!ช>’K\8ภ๛‹ฯๆ\ะ'ๆจ9ฬ9เั๓qPsม แˆ45s5็0Œลศ7LzJำŒ<\ใ9๋.ฃpeฅฆp่๋!RsŽ•˜๎—ขฌฌiŽ"†QFพ!~ด๖€ƒxKต”Q˜AฌQœjํ!๖๑DFA QF‡๛นAฦบu์ฯc฿6ฒ 0UPใžR>ณภ„ฐฤ5ทรq๐๘ข)GtŠ5ํ*H ญxcDฐฦ\_O“N1ฏbSณ(ถuฟฑSไต†]๊XYkŠศฮpัขHยแ\‰้šZŠภ๔Nคo"ต็HJฝ7œO1R™ฤh7AJcVJ๕IoV(F๑{‚@…DคHฃๆ4็#‰ž‡หึื๐๋€wควง\๐๛t=ecญณ$โ|T>ิ็ษ‡ส…4่เ2ๅB๖้(q!๙ฐม…‰k\(|XใBMวV.ฤsส‡ไBึ๑๛ท\H>ด\H>ด\ˆ:u๐กๅBๅCๅB๒œๅBT.L‘๎แ๘ฐฦ…ไCแBฆฮ3๕>saZฮœฏ-.” ๛ๅCšฝFฯ {ปภดฑ>ปhฮ5ชฎ&–ฉ็%Cnำธmํp!-น- น9/Eิีฺ่hพFฮ ฦผVŸžฬlฉN[ฟŸึtw ฏง9ฎฦข‚ึถk$šsท้๎š= Ÿ‹เgXkฃฬ๔sญ5ฟฃƒsxw็๚—w/ทoธky6ํุSๅตN]K ธxะํยt[{5ฏSฉึผ›๙ๆ]?—x<šฮ๚€ฬนFอ‰‘FฯรeAภMทผต๐S~๐hภ›zyํ†ฤงc’€Qg‰?œˆhใ๊๕Gžaำฺ‘ฒ#D)5c5ย”ๆภvQxRštv”วี|ืฑพ"4E‰ุฤ& ˆ๐xNyT‘DมวH D!ฃ&†ŒXk4˜โ‰Š64ˆภ ขจ‚ฉ ศโฏad ‘#ŠH Bพ7Ž'<Ÿ#M+>฿j?!2YชB๏ ๑ „mขF๓ ŠRF๖ฃลตFƒ(>นภฆPL e๔HkW๑ฝจPื บK#ฆ๘i |กQ”ฆ^DืUpDแšR4#๗Nำ%ิZโ\I็ €็pŽ2hปเ๚5v๘0Z„฿nSˆ๖[ciฃๆ#žcu5`ปt{‹€Ÿp! ืlฌ‚”\อŒ๐ก“• c๊zโCๅย ฒafไCร…ไCๅB.@EŽT.ิ(-นQ^๒กr!~็ไCห…šJฎ\H>4\zแB4 ซ๑กๅโ4ธ‹ † ใ๋Gย…ฉ๔จศ…e'*’ •ธpะ š๔๘lะษ‡† •‡ๅB๐Ÿ๒!นP๘Pน็˜๒แบไB๒กra?|hฃๆDฏัs็ยอ9ขฉjๆฒ9oซ_ึ่y›yVำLs.ฆผaฮ5สiS5=ึช—Œ: n)-_ขส4ฌZ›ฎืzไ๏ฉKฝVwŸ`ำึk฿ตf-pฟบจPJ]ท0ฆฝdสyถ\ษœซ)ว}DฮoOื0้0่4้0โ0ๅ„šsœ/•wคถ๋๗หcm5ฯ<ฯR๙Cm‡ฆœ‘sญ;OMใ†3฿ถืยจฃ็ฉ.พQง9/EอG=—อ#ศฐeZดœeถู9เ้จๆผk7$>ณ9~๘i๑:D!kfo<๖Œผ Ž?ฒšฌย2ฅงใ=;ณๆš่น์o€ 8"ญxฮHฯภY‚ ๕V>$**าœƒ” q›|hน0๓กrก๐กr!›*F>T.dด› •š‚ญ|จ\HnP.ค™$*2ฒmธ0งำ.Œฦท-ฆ๗mp!ยY.LFผศ…0๓ฝr!ณ,’-*–ธeฺŒŽ|จ\ศ๏ั4ฅซtๅB6ยK|8Z.ฤ9”๙Pธ ้ไCrก๒แ นS2hะ• ๛แรRิ|คัs็ยาฺีผฉก+ฅถkิธfJูIฝิฬอ˜sMkฯืŒšซ1gบ<ธึฒ+˜ ฏๆึ››hนFอญQTณnทiDูตsฝ, ไใ‚9ฯ้~๘]ซแื๏ฟ5ขoฃไ&eŸ๛‡๙ีฑฮ|…้าฎฺK&šuฆม/N)๏ :ฬ:ฎy`[ฮำœณ1œ^Gญึฺscฮ‹ ๙ไฑ }๔=ฆ\S๚ะ†๓mิ|คั๓p9*เ[r| ekฬy๋k7$>ำใ2ขษ ฤ๑GbTฉ6Aโ}61สฉาฤ)S7Qฯอฐ๊ŸFภฤบJFž๑Aชu‘4ืฌ๋ร}ฆvฒ๎P#ฤถปน , R6{ฃ(ีkˆ5มิ . EFพรใ9’aศํ)‚q กศ”MMฤsŒI”œใโˆ#1ํq;์M˜|œโRAnทง€ๅ1S”ชีˆaอบF%Qช๕‘า๙ธQสั}4M8๏XŒ๓‘—’9g๗แ~)Fiฤˆๆœ“๚ เณ^L\˜อฏ\จfrก๒!นะพฦr!พX.Th“Nห…ฉ4+g‘ iา๖ร…ฑถ,เฃre่ัœทพvCโำ1mฮ๑G#!ลศ @ผฐYRlV”ฦฒเถ6Dbฺ: ;„&็พb[&งท'ƒทK#_b๚&gษฒ๙Mj–ฃฒš’ฉต~ธM‘ฤˆ‰ฆj:"Eฃ*ฌฏd$ด ึbดŸGพV=นๆชกyมwฎั )6IB &อ1›qt#SoŒœk$วซA”2Z”ฤh6็บ=hI –า<)@๙>„Fฌ4Šn;$บzญŒคk&_d^{-Š.#ฺrz{š‘ฮ”ฬž–RŠ1k*™๖‰k6#ไ…sงc…p>ำ$ฑ‰WฟˆNDฯญ9๏3r๗gO8ธีœวFืฒPยkป์orภ5น ํฮ‡– ษ‡5.Lๆ'rœแB๒กr!L8๙ฐม…ไCๅBFVม‡0ulŽˆ่ตfฉhงu๒กrกv5W.ไcไ%ๅB5ณส…kฎฮ๓ำ{โB6_ณ\ศ๗-qaช/ท\˜กFพีl+***ฺŒขR›qคeAฌส…4่HsทE222saZค้•๓฿cร…lฉ|.d–๘pmqก5็Dฮ‘!tุ„ZนุcยฤJ›ฃuใC็ย}dฬVฏๆผัxMSผล\ืฬนi ืจ Vƒฎ#ษิคฦq๚;c]šบiTูnkึี4rัbฅ1•+ฏ5๊š^฿ˆธ'sฮ(6็…+Œqe”ฝญ+=ำึW˜ื”ŽืFะํผ๓%bชy›i๐ฺ@Žฉ๎ฌKg#นละ/5๛เ็\aพ฿8dh๔dˆฅึ<งธรฐ›z๓Fำ>cะqษั๛ิ N zฺ๐ฺWO˜ูjฮ˜0}ค†ห ๛=š๓ึืบ9เ"ิ‹•)ธ"%.ฑ฿๓๓ }Žค่g๚ๆfGŒ˜KอyCŒ&แŠjƒœุฆฮt๊ฎำFeู`ญกv ฆีจ0ฎ) SšfŒิ`n๏๊ŠbB๔แ'พR=๔ุซๅ~ฆzเัOWหนฌบg๕ลีาี—T๗?‰๊ฑ'ฟัชhx”"I{Qdข&3ีดgฑi#=)‚ทoAŽQ\๊ตชb๚5]4 \ึ{b[.JPฬˆ“Fิิ s„ฃเxŸ#ŠR5่ฃ2๛7ืŒ‹ํE2Bค†‡)ว€^b๏„ฐ *ปbใ๋6nmค‚ั"ึœCœrœP?ๆo6Ÿ]}rหŠxืๆG`ฃOŒpŸoษz*gw>,q!.– #ฒdGธ|XโBึœ“ yg‡+JฏๅยœฎใวศaไCๅBšn๖‘(u*ท\HSnธ๐ั'ฎˆ|hน๐๎UŽ|ุเB5ส…ธธ$gู(ถๅยฉ/rกškๅBๅC5ใ%.ไ5›อY.ิ๒ .โ๊bฆ]๘%ฺฒƒิกพ–โž๘ฐฦ…#4็ส‡ตฌŒๆ>2‰ืยœณA&นpด|s๔MvnๅB`ฯM&Vฮ…ƒๅB5ฎีๅฺ๚gฆฏ—"็Rkฎใำ8Wบfา๒ธ1cบฃึึะkzน5ๆhw!อ\Mฌ5ษjf๕2ฆฺ้ต1f…>๑พหV อท†Y็ƒ๓34๊่ำgๅโƒs๛นlดฺพ็ž๋Œršj@gกœƒฎ3ั5็พtคš.B่็ฒ ๙๘๕”NฮHuช=ฯ็”tiot7} bฮต{ผ1่˜๓7lฒGu๖ฆ{q๔&Sฐั‹=ญ}Œ0.ถ!/๘#ฃŽ0IDฦtbŒ‚`อฏ˜๓๘‡?อ๗ตu็VŒRเB”ฦ#ซ0็iOŒq&ฏึZjjฆFz!žlงr ฉ”nžลEาิ!Dƒ…ธ\๘ECŽ๋(4!ซซซ}>?พdีGช›—ค๚WXqQงซ+๋Œย~ fyโkีš5฿ษฦ<ฆฌณnฒ›ฉ.ี^ฺ(’˜Vฐ๒๕…า๚๖,Hต†“ย”Q'—5่4้ถQ’ํ @Qส:tฮ&ศf$๗^in‡sˆ G’ีมTc€ๆ32‰๓rธูพฝฆr""DsQสบKˆา~ฬ๙ท˜]}j๋๙Eผ{ห#‡5็HE”(&เ๛‹cšv\่‚ดฮ‡ฅ‹ๅยšJ\kึธP๙ะpก๒aๆBษ4ฉq!#๖ย…L'ง๑V.d๙ๅBpMเ)ห…เณศ‡† aะม‡– ษ‡– #%๓฿เย‚ฉ.๖ไ่ฦ…ไCร…ต}.ฬ|hู๋๙=)ฒฆ]K” uผgiT%น โ,ฆ€ไรแ.\สอโPo.ฦœM ษ…Lวใk‹ ตAฆ.XŽึœพ้ฮญ\์5Œ9w.์ฯ 7ฮนdtณ1,ิœ—ฬyŽš[จ9ื”v7fBอ–FหบฑืPˆ”ทdื’™ต•าา๕8x|ธOƒ_Z`สนFี9'ผัY|ž’9ทัtmงเ๛.3XRHy_ฒผ‰ฅฆ21ๅหV -Bไl€R';น3ญ]kปs6„3c ะญQœZOcŽiั s~ึฆ{T็lถgว๔fฮ7ธ5`/i๊v`ๆผ๕ตŸŽ ๎vแŒ_ฮ๔อฃัR‡aˆIกฏม6„ำจน6CB—โ F9z&ื๊A|RTฒC1ฃแlฎฆั"\๛๐ฅีotquรฒฦวYำ#Ÿ๊คโตHetฝ-ŠSHqฏ5FB$ `T‹เ๓๖5v;พะบัถ4ัR‡d[งฏ&Bำ=™‰ๆRฺ‰ต็0#)J8\ฃ8.่ฤ1|i[–G@jz')"žฎxœฏH๋ไ่ซ~/:Bโ–ๆื@?ๆM[R}nโ‰Eผgฃz1็ณ~๐ห€ฑฯ๔๘฿Iใ.p=ี้ศธD1Xธ0s™qฎ)๏vกฒม‡ย…5>ด\H>T.ิQ`ส…์ผฮฺoฦล„[.O-ยhวKห…เ”ภ7– ษ‡ .ฤ๛เ๕– •… ฆš\Gn+q\‰ K|จ๛๖ย…4๐– mกY ส…AW.dSภa๊ะsใทดธcนฃ%ีœƒ q’ืย˜ƒ• Gห‡0็Glพs+{o:ฌ9w.๐ฅึ`อฬไถ)ยตFpmQ๏RzบรfFฒฉIฏฝึฬ ฏu@o1ฑŒำำด\ˆmŠ\8>T“mน0Eๅ‹\H3ฎ\๘ฤ•C|hนฐฤ‡ึค“ K|h'b(jำRๅB–w%>ฌ พฤ‡5.DWฤ‡5.ฤพ*ฦH{โรตล…h6‡*๖cฮฺrZ+๛l>ษอ๙บ6็ถ[ปณmอ:๋ฮKณศ iลm ป245น0>-Gฯ‘‚oฬŸFlญ‘lซ๙ฆ ึ๔l5๒ฑYj์ฆi๏|Fพ5ึzฏฆI.&่qูzt[KoำญA็wก{'?ง.tซc_ัRฏoำ๛5e=ืแn๗ใฬrึuืๆ‰‡นพ5็bะูR1Hs ฌ)—๑n˜๓ทmตw๕๖ญ๗)bๆnฮ7 sŽ าัbฤ(ีOข‘ ็›stŠUŠPs5โ5cŽ•ิษฅqCQŒฒyำุ9o—ณyตฆO็›S0q”gˆ๋œ”ึ๏Cˆsh„)#ๆht„๚IDz *aฤ‚2ืMBฬ>๙ํ(6ฑ 'nใ5จนTcฯšห9‡ไจด”฿ˆ์ดยาŠKภšqทง€M ืฉQืฦK6๕]๋โUฺˆ’ญOื2ํb Qšบsค^$ถหฮฌ™๓UO"4ฯNใˆโsXเAค1ŠSt’ัxl‹๓Žใ„uม˜-6Oฤ~Yฟ‰ˆQ?ๆoทŸS]>ํไ"}‡gธ9_|H.ไ+๐žๅยฺb%นPขๆส…zญ\ำษ‡ส…๘‘• u–ทr!‹hศfg†“ ‚฿ภ‡– ฑ่ฎณ\H>ด\ศืY.ฤ๋Š\ุ-Mฝฤ…ฅEGš๓6.ด‹›\˜ฐ|h-Fถ6•ณ“4hาK 5ษ‡ถyฆ๐!นP๙<น0คิ™้™ SŸำึcjำ$ญCOข4ฮrF:/…ๆๅฏŠb”ั#ญษŒ‘ลdะใmฬ Nu›ุ'๖…ิNœSq,ขP้yœƒdk-!‚(eดจ_swS็T_฿uAฺ๙h7็๋‰๑k%>lpaโCๅB-้ฉqa๓yK>T.คัใo‰\ˆวulขfฒ• ฅพr!x|hนืเCห…เทศ‡† ?ุišiน0Fฬม?ra็u{ŽYย…1@๙\จ|จ\h๘0sก.dฺ๑nv๖ป5่vฑR๙0qa-๒1แย˜}‘๘ฐฦ…ฒอฺโBtp ra?ๆ˜ญงตr!ฐ๏nฮื–ŒนŽRณQ๔bnM—วhฺัxmg‡cลh ีคาฺ๒๛อผošpm~fMบ6iร9fLว‘แv[ 7 {žล.ฦผิพ๙ืจŠ–ฑi+ ใ฿์ํ•-ฦผ”Žn z[ }k”ฮ…ืE2็yTšsš๓t_ฃี0่:~/›๕ยy30s.ฺs4‘๓wnณO๕๎m๗-โ”-vpsพ1™sFŒ๐‡œซโฑ^-EŒj๕่:ฮ…‘๒%ง๘Œkˆ`5ใำ V˜ฒษZ> †(J)y4*k+ปฅzง๑@Œ›‘“ฃธล—Tื{iLหDz&Gฎน~3ฤh<&๎ณ”j#GvœQzพัผษฮ ถsƒu&ฐ๔8ห™‚ ˆKฮD'๐˜คพSฐ2 8ž7ุ๖ƒํูhb๛็#™ƒพ@”rถ/D่ าฺ฿พใœ๊›3๑แ้nฮืg&ษ‡5.d]ฏแยlฮ \ุเCDGร5๙ฐ6ปฟฎ,’• qm๙ำย…)W.ฤm›F.„Q|hธฯc๛2…ผฤ…สรqแh๙ะpaฎกทฉ๑๘<ฅsvผ›pe#บฎ:” ตQœอ&bŠ;yŒ\˜ฒ„J\H>T.Œฝ ศ‡ส…้9๒แฺเBtŽSDZ๛1LkๅB`ฟ-œฏWs.ฯ<หสBรธVƒ^JuoVถ)ึผงN่6ย[ช›ฆษถcยhฌqŽ4ณ{ษ๒บI_* าhา9 œr›e3ฯจ;น1jMKค ฝFผmƒบ…oึ[ฌ(Dำmวyแ^๏—BบFหmท๖&ฐ ด‰บ๚งฮ์:;œศQsI'oคถc0่&ใb—XwNƒฮ่yšป9ืถ๛T>eฟ"nฑฃ›๓ษœ3bดšs‘˜Q"ŠQึหฑ†ุg๗ํ\ฮ,„Œฮ–…ุ„pbwา<[FŠ•aฤD )D#GAŒ2R!Š™ฌlt1;งฮ๋qุ„-ฤ'›ยAภึ:kคFoxMhบ%EคB_ฃต–`ฤhาLใิ๗RQjฬ•:p]Šฌ๓คTwฉัs5่hGƒฮฦH0๊“*LYOu็ู งD1 ดะิL)Gอƒ ีn๎kซ&iฅข0๊˜๓wL›[}{๏SŠธxทcœฏG>ddฝฺฬy1jnธะ๒กr!PใBfศ‡ส…:}มŽd$–ธ0มr!ำูษ…ไ4๐กๅย˜~1\ˆเรฒฎrกๅร^ธะn ฦH>๙ะra[ƒนโนR๚;>•:ผ—ฒ‰8n |จMโ”• ๆqlXฌL|Xใย4ฆ|ธ6.เA๐กrแh๙ๆ™O™ส…ภnฮืึŒธํฐm e›1/ฅธซ๑6๔Fณน›JŸวจ™nๅvNนFำiฮี|+์๘0kเ๑Z}Fเuดุ’ฺg•ำ †ฯ™็ฑkฤœ฿ฅŒ…ฃนีฯdำษmไบ”ขพฒpปAoญ7อ็J๕๎œว^๛\าฏ aฬK๛ขฑๆ˜2_FัYำ]อx~=อ9K"}ษ]uม ฯnํผพีฟMฟˆำทrsพั™s^`ฮ1R‘#\7jฮ ๆœ๓ฅiพsgYBT#กH็„ˆaวแ5’œ๑มƒmJณl)ขIYsu%g๏ฒ‘j,!>!&Y7NกŠNรˆaผ†)‘ุ"E๑ฑ๊๊๘<„+ฎcบ$…›ฤI FTWี=ช๘ิ็ๅุkฯ[มZฐ้6Ž—๕๏ตจ9ทมตv<ึศ‘ฆ‰j]ฆ‰จท6Lข(ต35RAสฮล*L!JŠิ‚็R6‘ึฮ1Tจฝฤy๖๛ง8'a XšมzHŽยcธf”Go๗jุ1๐š~Gฉฝsฦผ๊๊qษฬcœo |จ\ zกAฆšsๅB๒vฺV.Œฉฬส…ˆด’… ๓o/Eสำ XสC.d฿‰"U|hนจ\˜y/๑‹ๅB๐xฐม…ึ”+ืญiแBncน๕ย…ย‡ส…qqAๅํ™ทQtญw๏ึฏCน|Xjง 3u:นoๅCๅยฤ‡ส…ัœ'>T.ฤใส‡ส…8‡K\ศ:๒^นต็ส…ŒR{ๆvำ[นุ+7็fฉืฅN๎/ฅตทR“๔๖R-{#•ž5ฺ4“าPญิ5\๋ฟ—,จhะ9g$ฦœๆ|Yม˜3๕œQt5๐yžw๚พ˜†฿a–šslฅomฆ{eุ๏fe[:{)%˜uอT 9ฏehำ7๙?ฯๆนeไวŸ1zžM7ใใ Eƒn๖_ตfฃ๖vŒ_๓ะฑ\{.ฃี๚1็๏ธo๕I๛q†›๓ืœ๓‚?เฅ.9z”บ๓š๕nQh"ข)†*ŠS6ฉaืู 4ข@ˆ‘๙ผQ๔ˆชัดCํศK˜šพA$ฒ)๎C˜ัdCB\RPrR< D‘ Lูเ ๛A=%^‡HR๎Bœฤi~k:M–๘^ธฦsxœเvm๊๖น†ผ*˜tŸ๛ษ–ข๖ฅณ\ปฉอ–ฺ"Jฌห์ึู˜u—jะึีB”ฒ๎F=™๕˜๊‰ํRf|œ$˜๔ิน87‰c„‘๘ F9็ค"ฃf˜‹LฉœB€sฯใฦmFG{นPศRฬขsq?ๆ]ปฯซพ?๋ิ">ฑ๗3œo@|ˆ๓็ภ‹ražjn+ยDe>T.Dรฏฤ‡5.”^– 3*ฺF’q๙ะr!#สเ4๒!ธูEไBDึม…ฌ;ท\ˆEJท\H> vvD\ธฆภ…k |จYJm\hฃฺ๋ณฤ…v๊Œ:๙PนPวฌY.$ ยิg>ด\H>.Dษ๙e\ $Z.dvHฏ\ธxแข‚ Gห‡0็ฯš8ฝ• ถอ๙ค iุj๗ษึ่นญ'Vcdฃงฆ๖ผUฯ†–uฺ…™฿Logฺz[ดœธ๕พ5ฬ๚m๗ฏŒืZ‡ฮจ฿ซ9ว็โgPอ๙ถ฿ฏบh๒Eฯmt[ XํžL่๛๓s็H“š๓'ฎlึถ—ๆgฮmำ$5ฺ่ต˜โ’๚M“ธนต“ฌxวHแˆAbj'S฿!R!D๑บEืฺสุm๛อ'Gแ ŠTb˜sˆS–mะPฑaฬv/ๆœf#ญณs~ม‡U?šฝจˆOํ๏ๆ|ฌ๒กra ’k\˜FD–ธฐม‡6kGธQcๅCr ธซฤ…์ฉaนัฬ+bฑ๛ต\H>*q฿ ธP ฝๅB.4 }/|ุึˆณ›9'jo5่ไCๅBฆธsj‰t๒กr!๋ึม‡ไB6~#.$**Z.ฤ฿p๒a/\Hsฎ\8Z>„9?~๛ญ\ฏ–BัŠ”dlรzI{Aส&E'ำ7ู)œš๓ ๗:ฌ๚ษœEE|๚ฉnฮว*6ธๅ>ส…XhJ|XใBDZษ‡šฮฎฦr!~ฟ‰ษ…\\”Kศ…ไCๅB๐๘\#๗1Rฎ\SูำsๅBFมKฆ|hyฏฤ…%ฃ-๚ฮtvๅยFฝhฎGsเCๅBmg3‰4ล๔ศส…ศ*J|XใB\'>ด\H>T.D$“4$ŽQ |ุ+"ญQsra?ๆู“gดr!เๆ| ›๓R}y)bฉฯตีŸFฉq@ำYซอ9ไ์ส๓ “}ป˜q'1งAว๓LW_ฒ|ศศkZ<ฃ๎Ž&พ[*บš๖า˜ณRใทšษ—ˆ{ษไล=–ึ}จAo™Wฦบ๒า,{Mgืจyj๔–ag†3f™๓ฑ˜บญๆ\บฅ็ศนœ{ sžŽGำeฃอวๅkœื:E๛ย‚ƒ้฿9ฟd๊ี'wxj/ฺึอน›๓^อy๘ฃว%QŠ?ŒฑkqŒQ”DA DHญ ;„[1Jตๆ๊ก(Q5ิ๔‡)—่, เ6#>)G ัx#ๆœc„D9hึ๑ฆQโ=)H!ZAข9W3Žํผ7ฃU ซmfฝ-ี“Q%พŽ6า?ืณฎ‘mWไn‚ดะูฝ1า‘B์Vlgาคc ฤ(ฮึฆั@QฌBด2bQส(:ฬ9GP!ๅฉœแำ ขE8/qฦ(y€ฝฤาŒ7า Eจ Rิlช…@ํวœห~Oฏ~~๘iE|๎ ใœUs.\;ถ'>lpaโCๅยZฤ\นfฟGๅB–ชH๖y‡|hน\dน†œ|จ\ˆว‹\˜fˆ[.$7Y.T>T.d$ฟW.,๑กfX.ltƒ‹•ฅ.๑– ษ‡ส…vัาNดทY.ด –ฉไ'๓กp!ฟๅm” Z.T>$ฦ๒ณ>dฝฤ…%sŽk๐กrแh๙ๆ„)3Zนxฺถnฮวฌ9/ฅ8kMบญVร^ชYท5รGช็ึ:mkฮiะaช=ื๚ršl<ฮ่๙๏๏mฆถkไ]Aณฎแ–ญXU›Gng‚N๋v ›ํบ^ช ทต๋ตT๕–”๙F๔sC ญ ๛๓Dบฐูเต์2ปผfเ%eœฉ๎y4ๆ๚๚๐|[ไปT‡nำิsนา๋ํˆ?์‹ว$ต๐˜๓หv< ๚์NO+โฅOููอ๙X5็7{Fฤ๚2็ฑ1RŠXjƒคœŠ—F_Eกแฉโ‡B‡~ตน‘6JM˜žIศ”KิQB”R๐q/„%›บALๆOC‚๔†คธCˆjไ้œ์€ฎQ)6™รv4ศš.สmพฟ.บ}[”ฝ-zค5๖#F6“้๏hบvธท Rฮ,Vqช‹!Juœฮ๚UAชขขืฌWO3ัฃEŠgง๑ั”N˜sผ๛๚๑yQtjไ5็ˆE1ฺามั$ิZ–šยApโ1UˆP\#ๅ“๕•Œ๕cฮuงWืyz_˜,7็เรuอ…ึœƒ •32นฤ…ํ)โ7G>T.” ๅBp๙หr!ฃๅ– aยม‡%.dไ<U[SฯXR.dดr!ท.$vใBึุ[.ด&ฝfึuมฒฤ‡– ูนfูY่9ทe>–ม์วnฤ๋ศ…ˆž“ล˜ว, šsร…ไCห…ร๑a[ƒLๅB–๖€• ๛1็๓งฮhๅB`ึSœBย€ญ7sn็œ3าช†N#—|Lอกl“ท3ใุtL.kมี˜3Bฮ:๒ฅ’šNs~๓า4‚ฮศน˜๋จ5}Œcื์x7Œ์๚Nƒ^ช5ทฆผ4Šญaึmtฝ`่[ำูm€ยŒ๛๘ธi์V[`‘ัf cฮูๅ€ิg‡3‚njปsJป\ใ๙’นฮBิผaะป™sึฯ๎๐าฎs้žZ็ฮณŠ8ำอน›๓‘šsึUbึฏซฑฦข‚bQ€ XขจaW\;ส& QRฺ<ˆย้–Lว„H„คะcฝ$€ํ˜ข !๚๓๛:s~ผ๔า(N%b%;C์Q๘2-๗!X5"ฤ็qอHพ*อ;…)aฃHmั#ฆช2๚ฤˆ‘FตhบMsท‘tฮEืฑk6Šฎๆœ‘s{ฎ#ŠS^ำœCTโตˆ&›ยAŒ"mำŒbค๘ฺ฿ทรนGมปjกฯห`’9์bSS99ใข“3|U‚ล5ถฃฝuQๆ฿gžซ_ž{ผ›๓1lฮ• •3า˜ƒ-’• ๑[ค1.ิๆ’ส…เ๐8ฬr!ž'*ข๘ะr!วMZ.ไbคๅBr™ๅBอlZ\hk๊5z^โรZญzฉrกอ$๊fฮ5rฎe>ส…ส‰Œž“• ‘AD>T.dƒUd_.$*rœ_7>ด\Hด\H>T.-ยœŸดใ.ญ\ฬฺฮอ๙˜4็…1hฅ(f- ฉ]ผ5E:ฮ๓ซ™ฺฌฏM๑ตึ\วฃั„ร”kT\ฃฌ;‡1ว6ภb1๒ถมำภkšปฮZ/ัe jะปuSืFlลศถi8W4๕CžMท5ๆ๖yŸจIีFฬูุอFฬ๐‹Ž1็ตฮ0ื:r“6^ซUWฃ/ฟVม†p4ีร™s๛yuฎuปv˜—}๗cฮฟ0ํiีๅำ,โงน9ซๆ7วฑฎD)Vๅcค(ี˜#BTkฤH ฤSฺ!jtดA”า&5๊กimLแ„€D*& :)D D(ฤ(Gจ๙ะ‡ซ_Iขื๓ฑ๊๛K>#Fุ›ฤq_*$y I qํˆLเ~7qช๛ํ–๊Icฎ๕ฅฮmต๋รFฌ(- RอpะิvlฆตS–D)ฃFŒแuุ‚๕”ขจฉ„งAOŠcZpx.ึbBƒฃั ˆQ4€‹็e0Cqส@@/šsŽ`ƒ…ภE„ืˆ Qˆข)R?ๆžซ—ๆๆ||ธฎน5็์สฮ์กย˜“-Z>ิฑdฉฎ\นPหiศ…เ๒กๅBฆณ[.็-’K\HฎณœลใะmฺธP๙ฐW.l‹ฆk๏r!ท\ุšQd#่ :ฃ่vฤšŽVใˆษ6ƒN.ไˆ5๐!น™C๙Pนๆ<๑a6็a[Žืธ๐ร/ช9็๘5r!๙Pนpด|Hsฦ…ภnฮfฮืฅAื4๖’9ฯfFg\ศนšsฃUซ cพาsึˆ๋๒;’9‡้fz;รแq๓฿-}0‚&VIu็๋lํ:ฃ๊4๙์๎ฎต่4ใฌSงIgDฝQ'^HAทY 9๐ฅhนvd—(น5฿Qc&“กhRีฐง3Fนkfฆœฦ<ก‘๎ฎu่ถ>๛“}1ยžWปต'S][้a๖นีฎ๕๓ฌqฯ็n:—๛1็_š>ซ๚๊ŒƒŠ๘๓อyธ,ธ)เ–€ทž฿$เ฿ำ๓ฟ ˜›? เzมŠ€sาs7เ.ynแธ6็แ๒ZG3gฮ)2bิํ9Vไ™2กƒำ7!ข ลXกซฮ๊Dฬ!@$ขใ‚ pุˆวฬๆ†hข˜bดƒf”"“B"“u’{Fqแง‰lจ9)„(D)ขFŒแqคผ3zฤศ‘ MMฯTJ๑ษh=ั‹0ตต”๊i#HŒ`ฑถT›,ฉYทฉ๎ตบหRZg[ฤจ-ญF‹ฌ(ีศ9็ŸใฏCtBcา`^ุHข4 R˜sGT)EœleŒa๔??7‹ษ^/ขh’ฤ&J7FgAจ"R!zห‚ฬ๙g?ฝ๚ ง๑•#6ด๖A๓!น฿๋ ธp8>ด\จ|H.ฬ3ชq*๊˜HแB–๑X.ค1WS NมB$๙ะr!๙ะr!ธ|hน™Eส…0๖เ3r˜ๅBพนืm\”,Gย…ผnใยRZ=บraiมR๙pธศนm วลสา‚%นฉํธญ\h๙ะt๐กp!žS>$โรน8>ไsห…ไCraฟๆไgดr!pเฤำœš 5rฺwPฆ`ถ‹ตนฦœkชฐฒjไ<mโลนืฉcwmwšcฎอ฿˜พฮฆo4เ0ึl†ผฯˆ9L๙o๏yฐ๚๕’!ƒฮวpMƒฎฏWฃฮ๗แ‚€FๆiาีŒ๓xญA็X6›ึ^kฺV2าš‚.ึkัrš๑๔6ํi]ฟf8h4=ี[gƒjRปณ)—๒Z:;อนB"เŒœืŒxกNฆศใฑZ?‰ŽืฬyKไผํ|ฯŸ™ŸM๊เm4พs~๙ฬƒชฏ๏1ปˆฟ˜<}Xs.›,ทlpCภ,ณอย€o$“~dภOZ๖sOภbฮ฿ไ‘๓>/๘ฃvำำึบ9G=%Vไ™ยk.UŒ"“ณ[!DƒpษQ‡ xrไขชฯํf#7mฤk MRShˆแพF{8ณขฏใ๘ ผž]™ถŽ๗ร~Xƒi7›"ฑ๎๛!๔q๛๚แ {)ีS็ณๆ“ตฅ :ฉ0mŒ"*ีฃWฎ]อ9ขEšฎ5—ถ"Fฉฃq'„บJRคjา“(ืธŸา>Yห ฤ' ม$1‘Ÿ^.ุušŸˆ>Aุาdaฟค0ๆ7ŸtZ฿ๆฟฯพzค7„$Rไ๗ห‡ฝ˜sๅย\ƒž๘0s!ฮ๛ฤ‡– u^8นฮภส…ฺBน\ืฦ…Zฎ\H3mนP;ถ+’Wม…บ^ข๊ส‡jึ๑}X.ิ†sส…u/Žeำาป`YโB@นะvlZ>,4ษŒ|จ\ศํ- –– cวvแCr!ธT๙ฐ— 2…ภ‡– ‘‚kๅยั๒!อyซ9_\8(ƒ>Rs^ซAืzs…*ำดK๋€K๓ย9s|ษ๒กฦnjž-`ฎaถึ—3+nผ๛ม๊—w/† Qo3๋บ P‚šxึฺuึŸบs6uซอ”/uฤ็wลล ›oส ๕ๅฆิ€†ปQ† ‘q™W0ไถฦ\ ๙ญ?ฏž\ำก๛ึœk*ปFฮ ธŸฃ๙ ูœณำปI๏ฉ‚‰š็‚4โM๗—9฿kv๕ฝ-โUSw้ลœ๐-น>`ถน(เ%rQ๖f›“~(๗œโย?l# ็ฬฏ‰P๏๖ว<Žn1ชฃิข9ว\sŽหJ]นs ŽPM1คตฮU˜iด‚u“ˆฑ:บQR 1บCsซ๏CNAศ็p›Rลฅ OFุYฃ‰kฆศUqสใข8ํ&L5๒eSHYCชตฅฺ ช$Hณ€ซ.5ีVE)อ9gžCŒ2•“้ํlWj!šjns*'D)Rูˆ]…)ลiสศะ๔aFyธ0รูซ9G*gคH Ez(าBƒะ)ก็tR9{|ว์๕cฮcฮaQุ–๐๕ฃ}”ฺ อ๙จ๙ะpแฐ|h๊ฬ•3โO|hนPSื๓๏1<ฆ‹•%>T.ว-’K\h9WMธๅB|ห…9O9q$\H>ด\จผ\j$ืฦ…ึœ+ถ5ัld‘• •• Ywฎฅ>ถ‡r!๙P.DJ;๙Pนท•โq๒a ๑zแร^/เรs>T.-ยœ/˜6ฃ• ƒ&น9ชฉํง†ผ—9ะบ}ํ~2ต๚rสถ4‡ใ(/;็ื0ฒฌ)gtœ†นqFลี`๓qšr˜qšsงAื; &]GดูฑmunฏiWl๔fิ$ผึ์ใฯ>*๗_๐ณอืŽ‘๛฿ 8ฬlsiภYฦœ฿–าเ๑7็ฃธp5ปsŽฉ#Yiวซ๓9ย€Kคˆ"Pฌฐฮ2ˆF†ฒAโสœขจi‹šฮNแH1†Nค^ยœณกE; ๓6ลว YW†ถ’žมj4 ;šMฃHg/ัt8‰฿_ซตฅฅHปฅล๔ฮชล ดฮ’9gฅž7Fˆsอ9๛๗บ๓๋‰ู@‹ัtFัั ฉ`ฮฑ`„็be”@ทHนซŒAุ2=‚ทQ‹‰4iˆา~ฬ๙‡็VK U|X7็ƒ6็ฃๆCแB”~๘0s!๋Š๙;!ฆŽ์4‹ไBษ‡ส…สส…ฌ7Z.TพQ.คท‘f.J–ธP$ญ ๏• -๖MgฝญMว1[.lห:R>ด#+้๎ฅษบ`ูfฮต๎ผิ(ณeฤd ัŽ|XโB •‰ ม™jฮษ…ˆพว‡m‹˜– cถ\เCๅยั๒!ฬ๙)ำgดr!เๆ|4็bpz~}iฆนšshjZ5อf0ค0ฆjT9† fv‰˜๓[ฅ>|ฑฬ._lŒ9ก้้jะ™ฮcNXcฎ๗ื…MฃW่ฑฉ™gs9d”:ท#ฝฝ‘ฺnGอis8ูถ1šNวู๑g4ๆL]gอฟิ็Foูnิ”ฦo๒\4ๅมLGศถนพคืއฉๅ4็x฿ดŸ6s่eะล˜3ๅฟ‘ž บfhSธ~อ๙ม„_5๋ฐ"^ณ๓nฝDฮ_P0็๏7\Q0็๓ไ>าแ๏˜&MKฉ๎›# บ›๓uhฮ!$ฒ)&~#ย…นไ'๑a๎*พๆฺ๊ฆญญฦmๅBฮ,gธN ภ5Mตึ3u]yP9‡wrกNดX[\ุmัฒ-rกvo[ด,ญด™\ตษ%sุไC.ยุ่yiฤ$๘PนF=๏ฬ…jะ ’• ฑอp|ุ+ฒ|Mนpด|อ๙Œ้ญ\ผฝ›๓Ašsbฤๆ\Mณ งืšš๒Zญน5แb"kสRิ\g€ซ1ฟริ•ซืฮ๋šาฮš๒9ืจ9#็|ฝšv>งไิจ/๎‚[Ig]›รญะy็ฌ!ทcอด1\Šš๋vEc^Bมœื"ไผFž5Rึ †ป-ฅฝถ-นฆฎcnนœ#ูง๔๒| šT๓T_; ŸตX_ฎcใV.oย๑่๗๗ั๒Œ~ฬ๙7gV]}่แEผv—ืIZ{ธœpe—๗ุ3เF7็ฃŒ๖›ฟเq๘# !Wฬ฿ีl0‡Z6š–lฮƒqŠั!ฬi…ˆภธ ฌCh@ hW๏ Zขจฉฎฎ™D S;หปdฮ!ุ ๊ Dู]XE ฆนk]บ>n…กŠA5๕*>u_*>ค“๒XืcRมอค—ฤฉคณ3l๚{ท9ยLe-u1ฮ5—ึœ๋ˆ5=วญฆsช(m‹1ตt6PB$ฃ็ฉ‰VnŽDƒŽฎํแ\ƒ โ๙‰fEฑ)ื็_ฃG%1ช#†Eยm+Hั ๗ูP‰็:ฏ ๛œ_|ฤผ8#ธ„+Ÿํๆ|mิœ๗ห…ฝ๒aอœ'>ฌq!ฮs๒กpan W]]VโB5็ไBp ๙ะr!๙Gyฐjt%<6nyP๙p8.์uมฒ.ไ9ๅย6>,•X“^,๗Qsn บr!ฬ9 z)›ศ๖แฐ|H.ฤ๋ษ‡ส…เA๒!ธ05ไคœŸส…(ฏ(๑!ธุ+ย ใ\W.์'rพp—้ญ\<ูอ๙ อ๙h/ึ˜ำ`wŽฦศ6 ซี•ซฉdำ2๕…ฦfฉ[9;ฒs™ฆฒk๔[#เึ,ks7คzrฆด฿pื๒†1ง9วs|šxkิํ๛ gะู$n™ิ ว่นšsMqืฦnŒš[ฏๆ\HฺiŽBณใฮ†<ี็จธž[Snน1็น;ฟd[ิ บึธงดw›†Ÿอนอึล%]ฒฃ้lD=.Hญ}ญฟฯQj฿šsxuอผ#‹xฎ3{1็›ฐ—4„;ะlsชiwyณnS๓Flใๆ|-_ะ‘จ า๐‡šŒb]yำๅุธhฮa˜ะuโืŒŒBŒP„Bฌ@คภœaัiSุqMช9‡ SHQJq!h ดŠJŠD\S(*@๑Z+6๕>D1ฃ๘DI˜jšg›0ตัt›๊ษT}lงu๒ฅ”ฯnM”4bิH้ด้œฺฉิŽั"5่Vjฝ%ล(L ฮxœ'xžฆ†"”‹itฐภ๓3š๓prฮ4ฮQษ‚ฉ™@9tฮฦo€๕–ˆaœ๋Œ5เช€฿ง๋)ป ฦYั>Œ)sย‡™ ‘n >ฤน  qn“… cไ<๑กr!ำููฅฆžส…เ—’ปJ‹‰ไ%ห…–ษ…^๙ฏฤ…%.ิ๗ิ๗*qa[ฺ;#้%.,๕า,u;žญุรrก๒ai‚+Kอ2ต๖\๙Pน‹7แ๙"ย…˜}ฮ๓Sน็ฎๅCš๓‘pa6็ Uง‰ญ9?uื้ญ\&F/'ฆ+sNyช‘ntOุSิeาๅŽe+[Sฦี Ž๋šถฎ฿l—vื่ธึ๓q5๏vk๘4่ทฬ๙ํ2Ž Ÿ•Q๔F็v;MjอcำlบaวOึ™ร€vซ%—๔tš๑\Gะ0แึ‹1ฏอ9ืศyŠL๋1ฺูโต็t”›œ—%c^2็ cฮนm-ลผ?ๆ‡Y}๏ศg๑—3๗่u”บฑ฿œบถฟ-=๖z ฅ๖ม๔ฏด<\ถ Xฐฝู็ei[ิœล6ss>ส ๊รฎ„ใ*ะ&H9~…&G9อ9Gล็Aฤ๔:t…ภ`สžฆ่AhจiK‘s Š#ˆขาœZ™˜M‰ูถ"”B”u่€V ฺH{(:aบ๗ภลีotqผฆ ็m>ฏเใ๚๚~Œ\ูzLจถู…ฉฮo/ฅzZQฺVƒšฎ ‘J3ะ5ZDs^2่%Aสsƒภ9AŠว™ส‰์ ฆงศPข˜๋‹…<ฮ7F3!:qฯล/พ2 NญๆmD€pC”"]ณ$FีฌAฐFsD)ฤ,R?๑บ~ฬ๙ฅวฬฟรพs๒1ฝ˜๓2›rb"โYrฎ%ฎ.pAฺฮ…ส‡ส…˜q_ใBฬ/็U‰iฮษ‡™ 9๒|H.ฤ๙O>.dZ;~{ส…ส‡ส…\คด\H๎+q!8‰ฃำzแยา}]„ด\Hพ+qกnว๗.EาK —jึK\ศบๅยา‚e[๔–๛K}ๅBๅร’9Wƒn#่ฝraโร^w "JN>T.Œ‘sแCๅB.4๕ส…q๛ภ‡ส…ฃๅรhฮw›ส…ภ์)รšs็ย~Lธ˜kโh๔jๆ˜k‚๒}ฉU‹pJS/=[อนšฯx฿˜s6€cค|ฑ1็šฦฎMl๔[#ๆ%#Mn :ฃ๊|uญKทะ…‚[๏+w|ืqlฺม]SsฬFจูนๆjฮmณ7ํยญ–\›บYGูดkห‡‹šขe‰ ษ‡%.ไศธ–fฒพ?saทiฦœ7+• ีœ—FLฺqrปฦ…4๎xฝแB๒กr!”ศ‡ส…,่• ™Iค\8Z>„9_ด๛ดV.fOT9ฮ[kฦ\Sวkๆ\ะˆœ๊“QWM‹fJดฆ ‡วbญnj.อำื“ืศ0SฺYgฎๆ\›ฎq^นฆณ๋ตFศmวu5ๆ4๎ธmkห๕ujุ™โฎฉ๎4ํl&วว5็5ทณำู$N;ธำ 3ซ _2๛ฝ6\\%.d?ไ.์f„Y6็6}&ฆt€ผoฬyซAOใฯjๆ\q4M]Fคูฒ]ุฉีำhนŽ๎ใ€ย‚T›)ฏ5HL็{m! ฯ9็}1ีตว?ณˆฟฺวอ๙ธ3็|๎ยˆOˆะฅgžœkหฐŽ?ฒ๘CอQAXย)t:S๕‹ฏŒโ4ฎเsฤ nฃ๎ ซ๘H„`ฐMppŸ้}ฃ0็0xIŒR,QV}ษวโพธO จ6zคยิฆูท‰S+Jู๐ฮ6ตcGc๔แfkๆEiญ[ฑtคqJ‡โFญฅ5็ถ9\)ZD!สzK†ฑAtb‘"์Cฺ๋ร~bwโ Fูจ( าdŽ˜ยฃCแyœฯxžŸ‘Œโศ@ูุ)์ทs๑ใๆฤ฿` ฝฺุ่่nใ2 อ90)`นy๎คe.ค9'*’k\dฏraJ_|จ\ศTvŽ‘$ฒผวpan~sส…ํโ7ฺ6ๆLนPณ}ศ9ไB\ใ๙ฺEK„[.$Z.?ะย…7.l[ฐ์fิ5ข^štัฦ…ฺแฝ43ฝิล]นฐต{;ำฺSCธb๔\นPฃ็รqกึŸณ)น“… qž*’ QnA>ด\8R>ฤ๏ฟ ๅยั๒!ฬ๙i3งตr!pH0็เฟ^๘ะนpๆ\_ต™๓ฺkํดnฬ]ษ k๓0kฮsณ/ำ]ผ=O‘s˜SFฮD•ี จนuFย qm่FƒฎF6}+ฅฉk„g| โwึกi๐4๋8.˜๋gน]ข็KMz๛ ญ– zšgž7J /jฮmื๔W๋ย^0ๆ5ำ๔๕฿~/"›๔‚9ฏฅดkพ”าNSnK!L๚}๎e20๘ูk์๕ผี:{@#ไฆืAO=hำy|˜๓๏Ÿx\๕““/โฌ๗qs>^kฮฑยMจขqธฦhฎขXe็ฌhฮ”Žย€!Gืa6๒Jใ^ข˜๚ษ,H’๐ŒBBฃซ"F!€8ณ ,Š)ฆqณAกฦิuˆ> Aˆฦo฿๙๑(iฺ!&oqyC!บC๑i 9่w๏xuอ]เ6)€ุ†bภ๋์‡๏๗ซยย€Zู๎ฦฅๆI๘žฐ ณอ’ฺบะw›lว ี"Fฺ ฆœ๕ๆ*Fต๎ ัRj;็๚jS8คhฺNฦผ็ \ร>โ๛&QŒื!j‰ิcS.*แพํบ=ฺ ฃํ1}ํงฦ฿H?ๆฯšS3…Š๏ž๖Œž#็แฒ]ภฯž—๎ป ํ๑cN>T.DDP๙0s!GDฆนา™ ัธ|จ\ศ9ปฒ“ ษƒ๖:~ศ‡ส…๘m๓ทฌ\HCฮlr!W‚;” มY4ะไB๒9ชP0ไ– iา-โv*e)ฺด๗R†‘ๅB6ซ+qก6ณ“/J\ศ:;bญึ,“ •ส‡ส…ไC;b’ฐu็%.ไh5ห…์้๘ฐฦ…xฤ‡– ษ‡r! 3~ส…ฃๅรhฮ๗˜ึส…ภ!;๔9w.์ณพ\Go๊˜sฺฏFฮฑ+ึ— บ˜ฏl’ิ I*ปึ›3rฮ†pธึ1jlw‡\^ำœอึTtkย FบKฉ๏4ๅ0฿0ไืง๊{‹๏ฯ๘แ:ื?บmY†v์wฑ1็ท›wwH9#ฺ่ษ]วฌแ{ใ๘ZZปFะm‘fฆ}ทEฮ b {0ใO๚šบA‡9็ sฎฅพผึdMFกีขl (ey๛ิ5=Ÿ;\เ)|f[ส1ฺลญF็๛>#็?<้YีOOyv}€›๓qkฮ!<ใส9jะR-Vม๑วV/่dฃEH_OMgขE๓ฎ๎sF5ว1uQ\3Š@ะ ‡Qิ<้Œ 1ฑค)j2qญใะtฌ™FŠ( ฟ๙วOิ„"D$ฎ!<)ีฌ๋c6JŽืย์_yว'โต5็*Nqอ๗%4๓W…(’ญํfิูฐ QZ0ีำฮKgf‚mŠd"ี)›!!Rฤจy/ใิ(Biฬ5ตs}ijNF’ุIฃ๊5Šฉคแxโพpn†s•ตฟ0P8ว‡›s>AสŽฦฑห{0r˜๓หN84งO[เนG๕dฮรe Œฬ8ท*Œฤ๐Tฮ๎คณ“-*’ c2๑a yžโU.ิRๅB6\อyโCๅBฉ\hวC’ ™ึnฐ\*qแฏLถ_Gพ*e แ๕%.$” iะ-ฺาni๏ธ฿ {‰ ‡ใรา˜5ห…™• มAฅๆ˜9ืพ–ษ…๘;Šsˆ แศ…ธ&*โ5ไCๅBผgโCห…ƒๆCๅยั๒!ฬ๙้{NkๅB s๎\8 snขืฅ๎๎:๖ฌ6ZMอ9๋˜5zฮhl2็9"ŠhฃŽ๊bฮYsSฎ&tD™5œWอ9ฎiยaŽmzฉ๙[ฉS;:ำู-‡9‡!'๛–๛jfFa{ผ–)๔ฅฺs~ŽWcQtคำ”็h:อฉ,z4RตIหRฏ€F38Sožฃ่0็ฅศน6…k3็šพฎ‘r–๋๙fฯณT#ฏ็>>7t๐}4>{Ÿ“ jYษจ๗cฮผ่„๊ฮ8ฑˆฟ™ตŸ›๓๑jฮัu• ฎbืฝ0ฎ€รxุีt<#EWี hศ•RŽณQ‡CฎBEฏ€ัƒศIัsD0ฑสqŸ‚m#!8ˆปDXB^q๛'ฃp„0ฅ@ฤmฆ_jdœโ“ฉEtˆฦภ>f๔ศF‘5RaชiŸฟ2&ฆ”–๊ำต;;-ณiœm–dgkฤจ›9ื๔๖\{ฎc„ Fuœšญณd*;๋i5Bฤš[N๑ยyฦm๑ทad)5BŠทูณฃ`€`–bSฎT ณE1ฺ6฿ทื S ูฝขดsฉษฉค?xQฝ4„CทอO›y=ฆ า….H‡็ร>ŒMแ03š|จ\HUโBแรฬz[ฮL|จ\HjฃไไImฦˆน]จ$๏‘-2ใศr!ฎ๑zkสม…ไCๅBห‡jิK\h#่ฅหถN๏ส…X˜ภ6.ิ…^kฮฑBิziิdไราผst›ฺŽๅB1'2ๆœ\ˆ6ำˆ โไCๅยฤหเCๅย8^อ๐a?๐กrแh๙0š๓ฝvnๅBเะ'Vฮ…kŸ Kั๋า\๔l’4ZX0๊jะ[อนFFSp6ฦ ซ9gฤ˜‘sึŸำœk6Gช1าZ#ฎใะ˜jnkฬํx4ฆฝ๓ต0๙0ŒŒร”๓š†štnƒnฃ๑4่jฬ™ tvฌ็๗A3สqkตFqL๙n1็ฅ&qyๆZ'ฎๆ\๋ฬ5—Y็ถถ<›rอธะ4๖ดx“ห+ ๆDž`š๓ฏ๖ษ๊S7*^C2‚คยU…งFส™ฦŽํ๑:์ƒ๛ผ—Eจ0ฅ8ฅ %T˜ฺ ’ฺiำ;šศแ{ภk(Hu ›ŠSm˜TJoทŠY{ณ๗&p–•ีี๗—7&๏๗}y๓~ฏš˜`28%*ล)1ฮเภไ5j‚cp ‚ ‚gDQfd”A5Š‚Œ4ะ4ะtำอP-่ฆ๏wO=๋ิบปžSuซnีญบU็๙๖๏{๎น๓ญUkฝ๗ฺใ\Uฦ้ฃƒšฤนgŠธ์ฦG^–ฉL‘ฤ9B‡อFGu/ฆJ†urˆ}ี๓%ยL๕EEX้ึุซิ\N5๖วฃฏR่Œf๚็GฝไษuฟgŒ๓_ฬ^ฤ๙sช่ไฑ—ๅ`|ฦรซ8'b๛ฐ–NއޅˆŽI๑ะฑP}่vaกLย๒฿ศ8,t<ไoษ๐ะฑPU2.<นฌv0ภฑPF–Ž…'^Dยฎˆ…ช$’(๗j eู %ส#r9 u่1‹๎=๚tฤ,บ‹๕ˆ…\ๆ~%,ิgอใผmJX(ƒธ่รม๗Qใกยiน‹๓ุๆใXศ6โกzส=s.,ิ JŽq,ิu ๘Œ…๚ํ๐๚บฐcธŒ‡ณ…ำลCฤ๙ซ๛ˆF,$z็-ฮค@7a”=ฏEบอีŽฅั]]ๅสม ฮ๛‰“ สฆp๕ึ^ูฯ(ฮ๋ฌต๕’vฤน2ฑดลx่ะ๊_—8๗rvฤถ ๑˜A?็ฺต๕Vทs?7š๓>๔UV€@_U(q'8IกจวญEq #ึบz){ncำผ็\ๅ๋ใzฬCถผฮ˜‡ึ†ฎ*Šุ&a†]ฑBร*5j“ธ๊}๊wฃฒฏ.จ[๒ษŸพ๒k์Gœ_๔๊w–ฝ๎eล๘เSืŠ๓…,ฮuFZ#Pdง‘SSYI cR!ฅYYี<๗ต๎ฃŒมํน/‘,ฮ!D%•,zนฆ๚)!b๊!QUๆ[—!Š฿พ๚ศฮqื™ˆ)qยŠ#ฉ๔ นฒใส|วL9ว‹€๚ใบ.า+ย+BK=˜zyง“าา๘ก่"ฯ{ไ3rB*๏ตI {๖|ยพ๓8ใทฉ”“’vศงf•‹”B@cฆQำ )๛š ํใงโq\ฏ๖Kะ๐\d5“;๑!ฏ=qDๅO>8š9สsส๛!คdZี‹8?ๆฅON%ีฅ๘ๅkž9eท๖…ƒฤCวย๔;™.:‚kSมBD^ฦCวBeฯ…‡^ฦ.<ŒX(|ˆXN‡ …› …‡ …‡ ‰tวB้1›>Q?zำhสˆ…*หX(^ยBฏ&*UE,Wึ๎Nํ6็ผ6ว:สต_\X่',#สXะฑP•D:6ca่:มcX่x8XุOฯ๙v•8oยBbซ?รN‹…็ึ๎ฦlS~,/‘๖พ๓œ5-}I๐ส%ฆ6ไLจJน}๎๙๊ ฮำ„gฮ}š_๖žsํSึgฆ#่%ส]€K„{ฐ๏Œซืt~tอฺ่ืณ๖<ซึwปถฏ q.ฮคEW)w8ษQ,wFqฟแ{ซM.—ฎ3}#”-G”GAฎ๏>ฮ*๗Iน*c\ผ"ŽK๓ะˆต๊}oศญ#๖๑}ช6่Kœ[EB?โโืพคณŸท)ฦ‡ž๚๘Vœ/tqNไ“3ำฉ4-gŽฆ#ฮSถจ""‰ดˆ`ชYf5"0lˆrŸl8๑ ‘ฝฉ‘สุK%›Nึ<๓#ำ7ศ๑˜kGณๆKD:มuหึ•%๗ฬHจQ‡วVp›<๗‹ๅ๎"บฑ‰iS‰งศท*4ฏ8Ž[‹ู5]wq๎ฅํ๎๎ๅœ^สู5FRšEy9˜ฤ9™!e„DHูๆJญDJen$BK9†}ฃ2ะุ“ฉใW็ุ๋เ5ไฑC้ไ๊vฒ›d<}6u?™V†๚็฿๖ษcๅำ!.๘็gดโ|ภx่XHถqบx˜~ปยBข„…ยม(ส๙สx่Xศ฿ฎ›ผน)šใกW8JœƒcเXฦ9‚Mยย(ฦ'รBm……ช4โพ%,ŒUEัD3bก๐0b!"Oๆ–*‰„…๎โ|2,ฌ๑ะฑะOR:"ฮ…‡Ž…เ˜แa   5N2๖จณ/ใกc!'(…‡ณ…ำลร$ฮ๊XHl๕ˆVœTœ+“jฅะ}‹se`-b)sœŸ5๊*X“8฿`‚JะB ซผวซน vq^เ.ฝฏ\๛๕ฌ9โฺล๗ฏบ-—Oฝr๔2b\๛ต@—ณปN HœหฮMแน=บท„ฯฦ็ž7:แGำฟ†ฬtึqน๗“็ฬx>ป\ณสU‚ฤtืoFย;WdิUแ5ฆหชิˆm๚-ๅV นSaฑฮ๚๔ีณฏฯฌ฿ฅ“ˆ๓หดM็สทผข~ฺZqพล9ๅhฅr฿\–ฦeั>๎ŒFIU4R•r’ฝTูŸนJ5#%๋ฐi”่p• BH!F,๕R๚ฬ]eอฃ“ฐgฃๅ”ั„$RาฮV™#ยXๆฑTS$SDVไ“วTHœs›gำElไฦ ’“ำR‰็ๅ!›.ขส็โUqš‡it*!ีธ:…Œผคำ]Š๑ฬ๎ำC[g…$ฮนž3ใ คาGEq.Bสmc)97‘ใุ๊qาkษUสHฆืUBŠก—~็3ัซ—8ล“ปฺI<.xc›94:ส,ณ๓ด‹ษฐPุ–~“Ž…ยFU ณ0 ‘ยราq๐ะฑpy0ps,ฃภฌˆ…2Š+aกgษ'ยB็ย@murดW,Œ๑คeฏX๑ะลyฌ"๒น็ ปeยVแaRฮ.< X่x8 e|้•FยCวBeฮ&“Bแกaa:‘š๑0ba้7>U,œ.&qืhฤBข็s(ฮ๚ฮxข๑๑(๖žs ๔=๏šทญ9ึีsH๘xYฒŒฯ”Y_ŸU^๊9งŸหิใ์qๆsัฝ'๗ซืq.Ž๘&N\~k็„eทtN๙๕๊.Qฮu‚๋xeฯe็™s7„๓2vB"ำหูG‚k{๗ญDบฯ‘wcธยˆตฎู็ƒฎฬx%ฮ“ท2u'u๕ณ๛ษ—ฆV‰ฎ™ใ'gต{&๔นUC'pึูgฆฯอOf๔ปxœ~ฤ๙ๅoyy็ชทฝชป<‰ญ8_่โ"š2D˜น์๓สQ'ื์บ:Uqžฒ”ท~%‘:ณเt๏อใฒeˆDDU6 ‚ฉฟ<ฮ๗G'ฃ"x*ห„ุA ‹‡fL8Kคห NŽรNDEbKBœเ๑<\œ๛๑<–&y‰gลฆืTโ้๔|M๓ๅ\ iฬœ๛œ_•pАชืRฤดkฮ/฿-e›š7.g~ช’Keฤ]x{๖ฃ์xtkWฟฅH)$ิKใcฆจ:&•๓œผp๑š ลึำwน๊฿y*]ฮ%™Dt้žl๕#ฮnซบt:ฦ…oyV+ฮŒ‡ 'ฤร ฤyฤยKXH๒๗ๅx(,ไoVx่XOฺน0w\q,ๆUฃ˜ฑP= •)ุฦc”ฐ0Štฯueะ{มB ๕~ฑ0โa4ส๔๙็.ฮ…‡Ž…ยร.,ไ๛k"r๑PX่สžƒ…^-ไ๘๑PXจžsแกฐ0;ธ'<4,ฌKํ+<,aaยร>ฐpบxˆ8฿oค ‰ญไทโ|Pโ\%ฤQMWœ3๛9gฮk!ž๛ฯว9s›H๗1lฺวk๒Œo=หzา[็„2็š}ฎ๙็ฅYๅั]ฆrGว"ส๕8š๎ูseฮ฿ว^~K่ž)๗์น๖s?พ2็ส๎ป)สะg^˜rkgPF๕Dน .OR๖<๙๗Xprฏ[ปหวŽ+W—๙Ÿ•ำป8๏ส–๋๗7ม8ธ.๏k‹๐,ผปทวp7๗ฆ“@ณ…็Wผcปฮ5;๏XŒ>๓o[qพะลy26ชA‹า‡&ืี&R:ay;ค”ณ๕๊ฃ”qุฦœi๐ฒอLB! d"D| E"I'ศ”ฦ…-/dOผงาว๚ศˆHD"JŸฅฬD Uฎ้ฝ“สด‹xr?ํมWŽฦื~=๑2มq~ฌJFฃaR$ฉqQษู˜ ฃn”ิไ\ณ่>JHYs๒ษ๗ฒAู-_FH๖ ๓ช๗|ไธ1"jŽ์‰tฦLฬ,หฃ่๊ญŒfHส‰|Vท)C”J6™3 …ผŠะ2๊ รs7:“๚ิwคl‘ฒขify๕[Gd๑ทภๅ&Bส฿‹J7ฟE๔-ฮwุjœ1“โย}v+ฮŒ‡Ž…ilZภรiaกื วยอ†‡ …‡Ž…ˆGแกcก—};‚އޅ\W_ธc!ธร6–ญKd—ฐP่Xจ๋ u‚ิฝ;&ยยฆฒw•๊G,”'G๚8:๏;/aก๐ะฑะ๛ะ‹c&มCวย€‡5q๎"ฝˆ…ั(SXhxXยย„‡Ž…็๛F,}›๔/aa4Eไo…>๐ˆ…}‰๓วI#[i+ฮ.ฮณx็fD๗๔˜สผJ€Kœ›x‹u‰>Ÿƒ^›๗Ogๅ‚ห๛ฯฑ>Jอgˆ{ษบ‹t‰oซ๛ณ๏zqฆห๕*‹Gdkฮ9ข[fp๊IงŒ ปD<[๖kŒ› ็$ฮฃ@WA๎ๆxๆnJึ•M๖Ltํ]™isวื๗5Zอหะ]ฌ๛‰อ—ธทŠŒt‚ปiz>ัS๏หํ žๅื๗^๏‹#แ”๙๗Iสฦ[๖]ื}@o-n3ไ.โ๕ฝ/qพ๓k฿๓๊b|t๋'ทโ|!‹sV๚g|ภv้Ÿ[2Gส&NGœ{ฟe:[!อY๒”=R้f็q%์rษ…„*ณ q‚Di†yœ_."ช๋9'y>Hจœฺ!†žY๕พษHD!œ_พb4Z>vYื\๗cU ๕rw7Lr‚็ปaืyฏž1‹ฃ…”A๒’Nฯ๑๙* >*H'Mธ๎„ดั Nใ„”M*KMฟฃ~‘“‰๓8ฎODTๅ๏Y˜ง๛pŒๆLCN! ไ4 ๒z `>6‘ำ๏ฝe๔ไc†8>;บ๓w ทnˆฉLยJโœ9ฟ}‰๓W/%ล…๘ี;ŸŠ๓ใกc!โ\xุ๒๗ <4,Lo ๙::F,9‚.ศu‚R' Uึ๎X(ŒXจสฃˆ…'รB๐”ว+aกใกฐP"ฝ”I—H=่%,”@Xศ็YยBpPxฑPx่X˜ฒ่ยรˆ…†‡].๋ยC๏w\s,tท๎B]ๅ๒ูฆ,บJ%าษ‚{hบส฿%ะีsฎ~w7ญ‹โgKŒoฐ>๓$Vcนwฬ&วpัฎฑbู(.Ž-๋๒ˆฤ{’€@.+—ฎc Y๐˜๗๏Z!ก5ป\ฅ๙…์น฿ฆ๗์ฟ!Uฐี‰w{๗็n็W‘}€ศe๕‘ซ๔ฆัา๕HFeŠไ=๖\ึc๓D1R_คz$!”– ซส/‰TŸค—hบSฑz2KNฦ*}WvศG\AJ+Z๗kV#ฒš‹+"šฒZีใ`TŸ  โœหD^ งั~ล๙qฏZ™qัปŸืŠ๓ใกcกJ~๛ลร๔:šมขc!oเ!8ศ฿ญ›–  K-0ย แaฤBแaฤB‰๕ˆ…:Qฑะ1ะqQxฑ<ๅqzลB4ˆX่'g }ฤšcก‹sวBถยCวยฺ#`ำY]X่c'วฌ๔jaกFฆ ……”ค  ฝง,”!\4ŽS‰ไ˜yU]ฆcปeวห NJŽšyฎžbeฯฑณ1[พ" ` q•Kคว๑eฺ/แ/ม๏ๆqร.‘๎ณฯๅโฮ>๏Wxืด!‹E—ๆvปhไ;*‰๏Fq๎ฦ{>c*&<{Uๆ๎†q๓่ฐฎc%‚๓ใลVฟ\Ÿpqž‡qูาq–;๏cœ8wฃ9ฏืไ&z๚mษ@N}ต๗ถ๛[้Gœ_ก7tn๘่ฟใใฐค'q^ญ—V๑›*ฎซโใ…งŠ/ๆAนฤnปฑŠๅy$ๅEถaU(ฆd๛ะVœฯฅ|3๕tU—ำ?โI๚ส{&คใ@PTยž3ฌ๊ำƒฬxODษษ(ู!ŸM+qNvฤ็๖zฆHdT„Tฝ•ย.=ช6„ƒ* คน็ส )S๎ย<Šrˆ'มcP]฿็’ัเ๒^•๖ซœำ{0cฤ^xอX๗ฒN•๐ปA„ด$ฬ!ฃQ>G>[•ง+๋ฃ~V๕R๒ฝ่ปaฟ“Qาb๖ห9EHUสY อ๙Eฌˆธช_3่^šฎQC>๋Wฅํส>๙)๋ฬฮฤu9;ื•โX‘ัฏพ&•.๋13” ’*Aฦ฿d•ฟ ยdžQMูคJฤ๕๋ึ~ž^;‚วธ่}ุŠ๓ใกc!โ|&๐0‰แก(ฎ๚r,ิ‰J๐์ฎcก๐0bกgอ ๐Bฃ ฝฯ\XŽ้„กcกDqฤBeฬK'( #6*„…Ÿว๊ Uv_ยBวรษฐะ๑0bก๐0bก7ยBwsWต‘ฐฐฦCวBo๕๑8 ๙ญ<์ยย(ะ}าE …‡ยB„zฦรˆ…iz…‡ใฐPxฐcนอฑฐ/qค-ฑhล๙`ลนJ]<๕-ฮญ<ฝ/mฯ}œ#x๏gN๓ะซื'ก„ —๋๖บ,ส5๗<๖œวr `•ซKœ#ผๅ†.!๎้ˆtb•นฉหxN"gฆ“9ืธ4ฟฦ–ŒztW๘‰UHœwe–sŸy์๗Qd]Bีุณภ.๖wK่Fa๎ฯแฦkัฌMฅไ๎ฏ,ธU?xฏ|Wf;={ํฝโ]ๅBูวฎนปปŸคะ$€œ=ฏ/๗ฆ๓๓YknผŸ๑L~๚ ๑ˆ๓kvySgๅว฿RŒ]ŸดIลyต~ทŠ`kฟ_ลๅU<1ณMงg‘ฬ*.โ ๛ }ถUฐ ลyตษIlนๅ–ณภทฟeิฉ=e‡(_ใŸm๕ฯwบๅ›ใ)sV5"MFGนTPฅ›/ื%  >ห’ฅRฮๅกฯR%Ž๊W$DO™"ดJ*!สAUโ)A๎ขœญg†”้t1ฮv‹Ž๊์zมQ/%ค์๓L%Md Dฆˆื/ืdน+3ไฦOdอีS.ณ(ˆ(—Uฎ)*rฉŒนzX9!ยm"ฃ~ฌ“ั8Rจ.็”ุ ”Sd”Q?Nฦqฃ$.ใ`ฬ‘ฮฑส‰,АjŸz-/‚ชr๙๊z%{ฎห”qVคT%๒*LNลY„ง็ซˆ1ู%eK)๕Tof2ฃฬน๚แถdVอั้Gœ/ฯจ`Œ‹?๘O‹Vœฯ:zI{_Xˆุq<ด๊!9€;F<ไo9:;z฿y ‰ˆ…เ—#ช&ba)[.,:๊:X& นฯDX฿y=เaฤB3xฑะซ… ฝ'bกฐ,bก๐ะฑฐ$ฮว•ทG,:‚}ยCวBปeะปฤy น.<4,Lsแกcกสฺมร€…ยถˆ…๊AX˜2๋vรย้โ!r‡ฟ{d#K์tZ,œ},ื‡์=ศ3%ฮร8ต$ะ-ƒ%ฮ%ะ๓Œ์ฎพ็๊ตJD)[ฎ>sะฯˆn๕˜หtMŽ่สNKœฯ็ฆk|™ŸX“บ‚cTFฏ่๊%๗lบส%9F' bฦ]โ|M.e็ฎหฃฟ’5aœัอI Ÿ3ุrJฏ |พ<-nธงRsฯBGแ๏ฅ๎!Cมภณะ*หW‰x-ฌ}[~_ื%”mv{ฑ„= ๖ฏฬyrฆ~C๖/Pl#่๋žฝ๏Sœ_๗oํฌฺใํลุํ…฿‹8฿บŠ3ํ๚ฎD8ๆเ*^oืษฒo1‰8๗cถเz›9ŸแE–ˆด)C๘ีืคดriฑฬน นัŽJู!C'ฯ ) Yd@4ว—หr)Žfp6๕Njถ8)2(3"B‘กQ๖(’Pฯ”‹„B4!œp‚หบ่/ปบ,๋DTY)•˜*ปฎ์•ฬ™ิwฉ,‘‘cซฯ‹ฅ>Jย็๕๒yห๐ˆ€L๒ู๋d—ๅ-2ช>Lws๗า๖ฎฬ9ฃ„ˆพJ5น.ืb"฿^_&c!U_9‘Ie-ฮcxF‰ใๅ~ ั„€*S๎qสF3F"ซิ“ต>1uA;&"JŠ๛'’Zํ็˜T๒ษ}( อ๗ฉษ);ี฿M_โ-ฯจ]ฟc\แด™๓ใกc!Bc&๐0‰&ฏ$๊Œeฬ…‡Ž…%<๔‘’เกcก+a!ฑPnํ%,$zลBแaฤBadฏX่YzวB๐<ŒX(ฑP†o„ca๚~MT;ฒภCวB t0ฮฑะ ป๐0`a‡%,T s๘8,,คTูบ๐ะฐฐ6†‹xŽ ฆžsp/ba…i  ง‹‡Iœ?๙‘XH,๙๓ล)ฮ็ฬ.ŽRหฉ/qฎ๒๔œ1๏šu^*mทct\W™ฒนoKœK˜k–5bY}ฺl%~ฃ œบ—พsผ‹๏า่2 u๋๊EW๖ŸK—c†\FpQ˜#%ฮy\ยPNํต\šฯึ๔RฯŒฒF›ษY=๕gกฺ5Zอ ๆโˆ5+‹๏r๘ณิ@อลญ›ูEว๔๘8^ฮ๎%ๆ1Kn-ัพ6˜3qฯ>™ ๊dˆ๚ะ๕ะถhJุ๏(ตปฟญs๓ž๏,ฦn/z}C'ํrผ3<ฦNU|ำฎฟฉŠ/‡cNซโ9vœ*ž–/฿Pล%U\์]ญปยcูŠ๓YX”‹๑ฯWX๋1*yษ๔ษ(g๓8™4๓žK8๊ญTฅจJๆdAุสL]%ํ”;สิMฎ๋;‘NŸ้ซ~JฯคK,Ccฉfฬ’‹ˆB<‰]~1F@ูrYU้ป๚-}๋Žฦ"ฆส‰Œช๏RŽํq–ศ:™!…/Hฆฤuไœฅำ ็Y‰v/ซีธต’8ฏ…9‚cไธัะoขI-฿?ๅฬี_'ค”rzŸค JdTโ\.ฤn|„!’2ๆ*฿TˆŒๆ^K•p&BZๆ!~\—ใฑ2ไIœ๓x_ปบŸR‰๘พฤ๙ท%ฟ…ธไc/lล๙€๑ะฑะล๙t๑ฐv๊Jคํ๑7ON:r]x่X่', #ช}&bกŽXศถ„…ๆ%,$ธฮ~a!วsว@‰๓สฤ3bกๆฑŽ…l๕$.แaฤย๔=U฿cกฤ;x่X่xุxข2`ก๐p::  '็އn*'9ห‚\™๔๔ฝ›๎่สž{๏บ๗–›{i™‹s7์ำ็งฒqู^ณๆ๚|ฎ{ฝŽ#แข9œ๗ฒ{v>”๒s]ฟ#‰sž‡็”Ÿม๚‘ฑ฿›น8ฟแSึนuฟwc๗—nKๆีqฅpฬ โ|iพศผ}D.‰^+ฮภป@<%า)_ำ\Qzหˆ้าz|ˆ ใถ(๗ห=อ>ปW%ู๗ฅrNถ"ค็0ฒ%สษ8อืe2$G`•JสW๛E\นM™ขุG)ZสŽ+>xh่v้๛~T็=?ฝฌŒdW}›zพา่!R^+!c$งๆ๛า ฿8วW‚šจฟ›บ\ณ“ศฅทˆฦ‘Qw(ฎหูๆีw[“L'Bd๕ืบGHSŽแ๖˜)*”kชwผห ษศh}{้]โ!%3EhSฮ 9[๊2ฮิ7™๛)ำฑี้ฤมใy<—ว"[D†(Qf(็์KœฟใYc}!.ู๕ลญ80:๒;ˆx8-qฮI*วCaa6…“วƒฐะgr;๒7ํ]Xจฬ1xฑPxฑPโฏ]ัๅศฎ๛ฦ,ฉ—7KฌIภyY๛ช์ ๏โ[\ฯ๏3ึี‹^็^v๏ฝ๕l%ฬ;ฉ?๋๊3N‘…zฬœง๏:gฟ๋คe‘วน๔›P๏๊)7g/W฿`ยU"ฺ3ๆ.fี๏ํฦoัOŠ๘๗2.๓๎๐M.๐.ฮXซเ๑ผฌ]๏…ฯ~uฎคะI‚xฒกqพr฿wun;เฝล๘ฤ6ฯš๕ฒ๖pžU์า–ต’Œf๓#Lฑ๔9™^ea>-BZ ๆDVn๔YBF‰๊2dFโ\ูsศๆo{ฟนˆจ“PศDTY"q๓Œด‹]•ŠปCฐˆชHขปญ+C๎ฤำ‰ฆ‚๋QnW6‰๛่6วp›JKy]"ฟ๎fฌqC่r’'0B"J‹์Ÿ Ÿ•Jb'ขF๛)”F๓)'ฉ‰„๒},&Djู zv3โC๎ิสฎ‹าcฉB"ฅ2yหไำWต้t!Nh|š๏)…Œา?ž‰h]†™ล9ŒdI•5"บฏ"ฑ}ธ0'!ลปq~ยฮฯ%ล…ธไ/iล๙€๑ฐ ๙ฝ<œf๓7ศสฅˆX(N ณxwพˆ‡>cปฐ0ฯ7OฝๆŽ…œ”z๐:j†yฏXจ^rพE,Œ'+%ฮKX˜ล๙8,ไ๘,Tฯนฐpบx˜ฤ๙’?oฤBbษ–ญ8xๆผะ7์F[S็•H|pลฏฦโฺ_Œ Heัๆ๔šcฆ๘` Wgู์ฑื: .ฯคณU&{ๅ๚๎นๅ>๗\ใษผtณำ๕˜2๏‡ฮ.ŽtSyฝœeB็ูs u฿ฏฌน‹xT๓>xอ8W๙พฤaฑ• Ÿ๘เsใs์e๙ˆ0ฯข{Oต*bบ›ฆน˜u๋n๚^เ•#ึK๎—+–˜wXˆั0[ฝdB็$สuฒวซ)ข!ฟน~ฤ๙ชฯผทณ๖s(ฦ/N/โ!U\_ลcฬ๎Iแ˜mƒ!…yT๑‡v๙|œ฿๓๕ƒ!gZq>ร gUFก๐O:Vษetษ‰ูขำ%ค ขู˜g๙f‚ข>>ฏ’PฆHYฯ–#ฬe„ๆ}ๆ”ozน#คMู๕0ž9Š๓หฝทา3ๆ2v…tŠˆพ๓งGu๕_d“ใTฒ)ซ๛ฉ๏’ญส ๕_*3%R*e^ซH6กnZšyฬ็S*qo‡Y นBGGhRe‹R (฿eสuนถ{lมožŠ&ค๖าฤ๙{žS“—์๙าVœปฐ$<์ ๙{I#yb…f#ฮ…‡Ž… #  nนแกc!8'<ŒX(<ŒX(ธˆ…ฒใX‚‡ ีcฑ๛Hเk’…N”ฐะใ"N„‡ba์๛๗ู่ยCวBM"ฉGL:v XจqkเaฤBแaฏX่x่X่xจใ-s^ยBแa๒๘:`XจŒบcแt๑0‰๓ฅัˆ…ฤ’Gตโ|`=็ž™vcsคง%ฮs๖< ๓๋.Hโ< t2ปydš|—˜็ธ,่e ็ๅ๎teืy๗‹ศ“‹บปถ+ณ-7ฯ’—๚‰“0ทา๎ฎฌซe๎yฮ๕&ะข‹ปๆชวwบ vฯด{6_[ะ๋m<แlฦp|พ='๎ษปJฮCŸถ๚c_ธ ๓๕6โ.ž๑ฯฤฒ๎ฅต.๛ุปฺโศ5ฯฌ{ฯผWHœฏษ'p๔ป๑‘}~๛๛็7็๛;ท๔กb|๒•ฯํu”n์ืdื๖๒พ‰ฮุ(ตฏไ—[ฟ๙cณ˜'~ญ๛ๆžห฿ฏอ‡ตโผฯตrปmSฐๅŸ)l ฯ์็ป$‚J$^ล” ้MŸ๏-#วvหIœซlQฎั7d<ิ?3ๆๆdL0"Tช๎ๆBัุGฅ๙h 7=Š™"'•Mโ\ทฉทR]ฤBซว•S1ฯฯ๓นณฑ„:ค”cิw!ีˆ!ND4->อ;ึ่ พฌQวhศftหW›—ถsฌaำ๘กาX!๕bฆ฿ค”~[ศ('o4๏ใl7•ง๚ฬ”Z ฆ‹s๏์:[\ึSๆT™s๏Sฏ.ซฤ39๏๗ช$ึ๚็฿{๓บ•,.gVœฯ:::ฆึŠŒ‡`!YRแžปตKฬ้oIXˆ:z‘ปด๓w/ฟ\YsJsC7 ศ›w-!็†๛m๏…Ž๎rŒ๗1k^าฎฒuํๆYu/นw#8อ฿&ลน* piฏ„zฏหgyวูqค™ฒ์ั-–Ÿ{fYลญgค'ไฝฌโ‰ ุTถ_๚,|„j3“๗ื/฿~ฤ๙๊ƒ>นใซ+ฦžcOโ|กว‚็wพ๓Eฉ„ญฮfwWiฆ>ฒ)า๋HไS†;u6a๓น๕(5๕œcะ!๕žreษ ฉ|S&@๔j8„T}ใ*฿๔2Nไž!ŠD4๖˜Ga๎K5น.b๊&p2?U9ปH/ฯฅ1Cš?,2ช๒MR็DD”{“!าDคิ๛ฯ=sณ็ว.๗ใh็m ]ๆqd‹ผ๗‘>MqžฒDสฉ—œŒ—={ฤe sส4!คนLฒ˜1จ„wสžฦฬœ™ขYd4 ถxYโ0ฺปYˆK๗{y+ฮ(ฮมร.,คlุ๐pบX˜ส›3:๊VฤB„‡Ž…sแกc!(<ŒX(TซŒฐ0fห……%ฟ 7w+aก„yฤF๙p8*ฑP˜ฑPโผ„…แaฟXGู %ะๅ่๎XGN ๛„‡]Xจ์9e๐Ž…ำ็uๆฑPx่X่eํSมB๗๎ศืSQ…‡ยยพล๙ำทlฤBbษcZq>(q^ฅๆ]ๅฟS็๊,ฎkqŽฏฤy่\W–ผํu๖\ข^ยD~]ฎพtฯh็k/E–h’sปุ%ฝูgnืeฯGๆ๎฿•ฐ1ำฐuกฤY5w™ฤIl{ศจฮ{ะฝ^ๆnœ&“ฒ โผ.๛็ตๆ๑jSY๎†๎™๐ !bถหือ> s}๎q$บ‘้‹sw'J™.ใป ฆ่;–@Wซ„B™tตFฐฏq~—?าน๓เ]‹ฑืฯoล๙B.k‡Œฆ๑A_}อ่xจ๊ ฝlTแNWœึฤœ˜ˆˆfl+‹.ายึณEฑ„]™!‘Sฯ)ce+S7HdNYp•D*C็๕jDฒ;qDš“Q2D๊ญTy&ฃท๘่ดีeŽใv๎ฏ์ๆฅ๓š<ๅฎฦสธs,ไSฝ•…1mส‰ŒRึส็GLนืถ๚Nผ๏ ๆผืาG ้บfฉ;)•hื๗ŸD‰—๕’5RY'„ดŠi‹s9CDี_.Rš‰๋K~็฿z}3Iฉ~๋5!ต๛ฉผณ&ฃ•(๏‡ŒJœŸธห?ึ•*1.;๐ญ80va!ฟŒ‡`ar๊ฮx่Xจ‘’็ยBึ\œ ษ˜  ๙ปบมฅใ\Ž…Mใ"Uา^ยB"bกฎ—ฐP'D ๕๎่ฎqkยรˆ…แaฟXฆIˆ;z๏yฤBแกcกNพhl[…*oXˆyt๑0bก๐ะฑะpM้ ฑแlภB„zยCรยพฤ๙3ีˆ…ฤาว>ผ็ยB๏ลญหู6๓zชซK`็Œx็หฯไนฬ=eิญ=๕œs=‹๑.sน์žฦˆ™U‰5โฮE ๆ•K KP)kg]ื๏—วี่ฐtq$Xฬ*kVถg๐ฝ๏]ๆ๊1Wf}E้ฦcxนน‹ฯFqžฟ+๎MeEr7oSl(˜ผญ ‚\ย; s^V‰ t=฿tล๙Hแ$‚JปFฏY?zำา๛า‰ฝ'ตIฌฬ"]ใ๎ุ๖#ฮืV"๎C?QŒOฝ๚ญ8_ศโฎ{aฤY๏ไPนF็E็ !„”ฤ” )FHž]gไ๔ํeะ"คJ%ฬUฦN6ุˆ5รWB”Œ ๒ฆ,คOโr i,aD4บฐ‹„:้|หพ›โ_ฯ9&—ๅฬcๅ:วˆ”ช„Se๒nจคฑC>๋n๓„ฯ+.-๕™๒๙ศ8jส฿ืๆs๊žJ•rŠ”zฟฅˆฉฒG"ฅ> ๏Xข๗ฉŒWคU%žฬิ%ฆDFฝ\S‘ณE้vexdgeœDc)W%ผ!…ศrฒํ™&2Š9XEF!ลลป๚๛้Kœไ๙ต๑RŒห>๛สVœ ษ4 ๛มย๔๗ลฬ๋Œ‡ยBต๖8สNx่X(r]X่x่X(œ“นšcกZzJXวฃลžqวBแ_ฤB.Kฌ;J˜ววึๅˆ…-9,$๚ลBUiผ]I ;::๊$tฤBพแกca2ืœ <ิIIฯvป!\ตฏg,ฬ=๋ น.<N“8ๆฃฑhล๙เล๙ธsŸ= q%ะs=‰๓_Ÿ7–AGคฆ>๔œ-็r27Cฐ›น—S_ตส™nFb^^Žh’€Rฌ ฺูZ˜{ํš๋ํŽ๑>ฯ;๔eห9^%๏สจz:b\ฎ์สœฏณึ93ึฑ7บK˜[้}๊7ฯใ้ฆ”c cห\˜{•F[๚ฏฝZ! ๓Xฎ๏ฆ_q>R˜Ÿฎ๊‚‘{บ๗k‡0}ษ๛ c'v๕ซ๊แ๚,ะwี8ฟ›ปuF๛d1๖~ํ [qพะ3็u†€ํ’L*ๅฌ๓‚šฦL5”ฝMš-บ๗ไD<$ภ•QIัุ.H\ˆี[๎ูre‰ศ AD•-B˜C\x‹สIp%2๊ๅ”ัธH"Bขาฉ@ใ้วu๙ดใำeŽs.๗(๔Kฝ๊r7ๆตiิ‘้D "สฑ|š{<-๑็ฦ9ห„ล‰˜zฅศฉˆฉฉNฬ(‹ฎ™๋~\2 œbฆจ.ำฬฃ…jวbˆ'กฌQ.eื>•GNถาุ ‰}นผgbชL‘โท่ฯญฤฟ`ฌ'>ฤๅ_ุพ็ฦร.,ไปฯxุ: Kํ!ยB็Ž…เŸ๐0bก๐ะฑP%๊WฑPี<Ž…šGฑPxฑPbM9ฟง2ฮ)๖Xฆ฿งz‚)ค€Da^—`Vฟ๋^ล9„3=ถ๕ด๓82„ใ๏ลณ็}‰๓^4๖šC\ๅ[q>`ฉ4 ุ†‡=a!ศใ,vUM“8๖cฑX๚WิŠ๓ฯ9็L็F#Z•Qฎข็แูญงfู๓TึNึผบ y2|หฃภTโž.็y่I[/8ขU&d นสDซT<ฮ2O#ำฌน๎ื&+/s5}&ก/฿3ฑช6๐์s่*k—!œFฅ๙‰^Ÿสไ'+ฝvqฎื&ง๖้,‰ๆ๏!fฬ%|ี_ฎ๗็‚ณไ~ ๙ึ…‘j“9ัG๑o๏‰X6t’<,aกZz …{ uฑะษหˆ…ยรˆ…“‰๓ˆ…?x(ฟ”  9™"<์ยB๕”cfXศq5,œ’0/แ!X(ž๑ฐฦBฒะ†‡=a!‰H9ั7‰๓บฬNคค๏งŠพxแ}foฅlบgฬ}ฬุj+k๗ะ๕aึy4’๋U˜ป ื}KbŸวณ่:ั‹8ฎสฆห฿ซ๚็ว|บsฯ cŸ7mŠ๓ล ฮวC์๖‰€ึฦXSึ™๘Š€ึgเ'็5)ลญ8g#”]€œ(kDฦ๒CY;ฅˆ2Pน๔BPeŒฤe q7<๙”ฃฏ“Qe”dBไ3ฬ•อq#ฃ˜1GŒ+ žl|\Wฆศห?9^๗Q?ฆ(AZี{ฮ‚`––„;„TDbIŸ‘‘„PV฿DRีˆ!—‰TŠxV<๕IRŽนnŒ'็aฤ“฿$taฉฤwZฆG™p&{ล^ฃม<๊LJ๋าฮL"‰D่“ฬŽ=—e"[ึณR=ใ|ฆฤ๙'_6vB!ฤๅฟถqผ*–2๚™*>ž/ผŠZqvaกช6hq,œDœG,๔๊วBฤ›๐0bก๐ะฑะ+ˆ %ินฝ„…๒ยp,ิ ลz้zฤรˆ…ยอˆ…ŽŸŽ…n ฑฐ„‡ช(aแLใกcก๚ษu‚นฦBœื3F,์ยCรย๛ลBแกaa่ยCรย้เ!ผ+๓žฑPcุfLœ?๗/ฑX๚ื‹8_ะx8ืXX‹q๕]๋ฒ"‹๒ษฤyW‰{vb๗น็)s.ฃท๊1|Žxš_ญฑfึญฌtca™ฏน››มญนงหฬญ.k—K{.mW_ป๗žkœšฯ‚=่.neลข€•่ำeU”พ“zท๔ัzืH0Us็rwฃw—๚8ว…ttต/eบงฒ๙ข[บย๓้๕x&ฝื็‘K{<กย๕แ=๕#ฮ๏<แ?;๗žrP1๖}๓+[qพลน)|ำ™๖๊Ÿ<3kฤ>แsถžฬฮnEBJฟ%ใณr_3ฤ“Lƒส :ฺ๏ฃผ งdD ฃ„๒์ 4vG3|ฝœ]}•žr“#/=WFGe›"”ฏ9๙{uxฆHๅ™บฟˆ,!aฎPfI"^๎๎สฑ าMโœ๗จั3n:ฌก4ใ ัMl:+}o)M–งs๎ฉ\XoๅผŒ“" งำ0;J‘ณA‰ˆ’ab5$Bสพ๊zW฿ฅšVๆ์ˆ7ึ6e๔นฦๆ๙ๆฮพหฺ๗ฺfTิโ๒oผพงฒ๖j=:ั฿TฑEพผื[qงNs,ไw'<,`ก๐p2,๛๗„…๊w–฿ƒฐ์:"ุ…‡Ž…އ Uฮฑ0NขฦŠ!วB"bก๐ฐW,–ฐฐ„‡็s…5,Œ‚|B,\๙Ÿำฦร.,ผr๏1<4,LฟAแaŸXจjนˆ…˜#&<4,์ซฌq€…D/โ|กใแ|ภยq‚ผ ะ=R–ปŠฦ :ฝใdะ5FMโ<›มq_DงซEMxYufซL”ซ7Xๆ]•1“ฺๅๆํsฮํ„๗ŸปAœŸL๐ืญ๐2zw๗qcํส๊6eม%ฮปœว'0Š› ก^—&ท{‰์^Vษhnช+V?ฤ‘m๋~ำY^๑ 9็๊ฅwงŒ้KœŸ๘๙ฮฝง~น๛พuปVœ/Vq๎"E#…TฺY—zrๆž3๖“ˆ๓z:cคR@HจFัจ์] “Nฯ%"2"๔aBH!šสœ@@E>5{+ีgdV็สš;้ฤฑุแ๘๋หฆฌQฬ2IศGAฏ2wž[eฅZผŸาR/ฝNV dๅ c~๊หˆ‹ษ่L-i๚ญ‘eชถ*;ObHdง๙ไf 7-BสH-ฒCี6M.€ˆ2ืผŠธ๚็{ฟผฮถฦธะ7pะ๙<พล;{ ฃw…๏lล๙ฬaaWfาฐ0 &รรษฐ‡nแaฤB•>;rœ๐ะฑP™sฝ ๛dŠ F,T‘OŽ๐๓ช (bก‡cก‹saกp0bกDz Kx‚‡s‚…‰ก7 …‡]Xˆ0ฯx่X˜~Ÿšk'ฆ๕๖‚…ำลร$ฮแฏฑX๚7เภw.f<œO0Šp/k— ŸLœ'nณฬๅพ^—ตgw๖$@••พwผ™๛}ฮxt _e}ม2][ฦจyฦำว„u‰๔’Aœฬแrึบk†vฮ๔+ำ,a๋๎๐.ฮฝD2KMฆ6›ฮชvp‡hบP—แ™uถ"ฌ.าJ9งŒ‚็ไnŠไฅ์r"๖าM1Sฯฅฺ•IBฌ‹T*ำ3‘8Wึศห6ๅ๖หแEJy^มR9งส๕‡`V1ก8'#”KBks"BD”ศฝ‰3ตา\kๆ[W1kโ|ํฦNl…ธˆ7ทeํC‚‡๊Aฏ๑0`a Sฆ5ใกDทฟ……އޅ2BS;ฐPtgFWvวCaกท8ชองW,Œx(,๔Ž…ช"ŠXจŠกล‚‡ ณŸFฃ87,LYsแกcแ,เ!%์ฤฌ‰๓็M#K๗'ำ็mY๛W—8ฯฅ๎.ะK"]"›>็ฐb์๗ฮืถโผ็ d”ณ๒OฝWvS†4น๗m๙ศ†HŒ•ก„SY#z*๋P!AD๑”xส0‰-ๅœž5‡ˆชผ]ฝ—*฿tโ˜!R %ืนrJh?eส๖8ฉŒ„Tค4f… ทdJฝ๊„ฐส8ฃ8Wฆ1คSGฤŽ(a ๅทvอ~]'‚บ„๚ “ั^W_โ€ํปG Y\~ไ[ง+ฮ HŸiล๙,c!->†…‡ 'มCฐ  ํยCวBพiŽนฯ:๗‘iŽ…^ฦ๎Xจqg ๅด๎Xจ์๙Tฐ0Šsa!ย\xธXลy…2ปt,ไ๗$<4,๔Ÿฎ็ณ ฮgGล๙ใฑX๚๘i‹๓ƒ‡C#ฮCOถ›ล้ถธ’0gDšBbฝบ์ฝ๊WiนN ‚ื7ˆsฎ๙ต•pฦ™]ฝๆ๎ฮทๆŽ/ป๕ฎ๚ฒ:S)s=r-gฬฝวKฺ Nก™ูั๘ qŽ(ญ~ฮ†/6q็ฉวŠNKB=Žr›iq>nxg~ป๓฿็YŒํuญ8oลy3!MbjอมฃโษนePCุmcyG'w[fภส RชัB>;Š#.ก๙่V๕"ส0HยGฆEgv๕Sjๆ.วฐ…RJ8!†;5ลœบ8็Lฟีฟ5ๆl‹ำ7ณฐ7ž‘ฒ่*ำ$ƒค~K‚หŒฑ!8NA†BฉัA2;๒J/ำTyฆฒไส$ฑOฤข่ฆF*…ื˜4‘าm<ฅ๓สcNฎCคTทk^/ฯอe9ธ{'๛<3ีด ีžEา’+31Kใๆœ ŠŒ๒‚„๒›โ7ฤ๏ ย๊Žฤส(iซi*๏Pึจ/qูF฿S!–}๗=eฮzฬ{q.,D„ƒsเaฤBแกcแฺCFฑ์นaกฬ1ฝ๗<โกc!#ี„‡Ž…ชยaณๆเฑP'#"žมซˆ…[ยB7bกŒๆ":ถXXภB~;ยCรยฺต]Fpยยarqbแ<็yVzA—ฃป—ป็๛ศ<. tๆ^:^ณ™เWฆ9Šบ8VkeUm”ญ#ส•)ฟ่ฆ;ำ–`Ÿ;[‘.กN้๛๊œEWผ;™ซoMาTŠฯe4๐eฝ๎(ฮK‹ฌ|ษxMฅ๗3ฑT0–ๆŒG๎ู๒UVแใ๓ข@_= ,z_โววt๎?„b์๗ž7M*ฮซ๕ปUฌ[ซ๘*.ฏโ‰แ˜mช8=‹๔gVqAgฌGBซธF๗อโ|—6s>฿ฯ๖sโiˆด_†5ซฟ6๊h‹›-&Hฬ‚ญH(fHi<p7•Hงรqรคz|-ส฿ษฉŸR™9CUš้•q…”B !‘rNืศ ˆขฬศฉaM‘๖สษเ`๑<สษ\Ieœ\ๆu‰0ว,๚T)„œXPY$อ๎Uึ1;dง+ฑบฯ๗…’Q`Ig_โ๓ฏํฮY,;a็Vœฆ฿ฆ๐ะฐะ๑ฐ q:ฒ/ใกc!e์“แกcกpOไฅ^๓ˆ…šWฑœใ"ช=bก^:ช๗๕4{= t?9 ฬ=ฯ+s8–Fniไฺ†<^Mก Dฃษ&็๎ฟPVœฃ๎ๆo็+ โ|ฅต3Hศฯwqพแg'tธ๐”b|๚}o๎Eœo]ล™v}W"spฏ๏9ยqงT๑ขE)ฮ}Dศ–[nูืโšฟ"ล@ˆƒ๚,#eŸf[{็฿Mฤ“1BOJ4)้d—! dฯษ\PบIh ภื•1)u3#M)eK&‰หnNคาLํ‰ไv'๛ ฒ<6„SY)ฤ๑n๎ๆ„RY*ศฉz฿EŽKโ\„ถ){ฤฑสŒฉr@ๆO“ญณoNK7คT}—ึฟ›2•RTอ–ฮขผ.wq~ะkวN4„X๖ฝw-Zq>Sx8H,LฟKรรq'13vญŒ‡]X˜ล9ื#N†‡Ž…^ฦ๎XจŠ แขcกฒโ uR2bกOณp,ไq#rYญE Uj_ยย‰๐0bแTฤนฐp๐ฐ>Yษึฐฐ =cฎ–๐pXฤ๙‹žุˆ…ฤb็3ษ “iู†ปŸ=ฯยผพอขKะ+ำžณโต8'‹ฎ ลนฮ๊q>แ"cด๙DDี;IyฆLŽ ฃำšŒ’Y*,z1ษAฤdงNeŽD”ua?„Peๆ*SG„ซ,“}"ฅšัKI#†|nน.ป™Q\"มK%ฆb^‹ฦน)ฺHpc†‰๗ํe|๊ลว$๊ฤ๋Hแ„”Yษื5๊Lฬ[B*ยู๐Jค”ีc)3ค*ๆ3ญล๙—yฬ/ฤฒSfฮ๛ฤรŠsฤแaฯซยCaกzส…‡E,œ#zIปc!8ศm์ใิ.,a!ฑ0๚i8ฦ•,o7r,”#| …‡ฅ>๔ˆ…z๏%,d ฃ8็3F<œ ปFชe<ไšถ8๑“ฑX๚คGถ™๓>นแภฤน›ยๅ่eu•ยซlฝ”Yฏข”5ืB|ญธ},หMธW๙:ท“%g฿…+๏H๛ศ {ฏน๎ฃ2w๎ณ"‹r‰ฤmฐฒvฑDy้u*ปํsฒืg3ปีV๎.ัลy™:บ2๗ฤ~?e“YผN..ฮUe ์|\qŽ}iญ*๒A ๓พล๙…งv6^vf1๖ภ{ษœฟบ ฮฟŽ๙AAœ/ต๋ซŠ‹ซุม๖I.™U์‹@oล๙<\:;ฯๆ7 ฦ!ฮ<;‘MH)DิM฿ส7q1žˆŒโZ|ๅ‡tฮZuX"]๊ท$”E— 0O‚}d€TŠ๙ฒรฟ_—dBJ%ึๅ"์3ะ%ึEJ•1‚ฐ–ศhi‰๒XzlˆฉH๏DโษจŸtะ๛žLœณ…ˆฮqžzษณ'A?ฟ?w)žฉ•ฦจU1๋โซo-y.ฤฒำ฿Š๓a-iูสžฐž๓Œ‡Ž…އˆsN^&,tึ„xH๖ฑPnํDฤBจŒX๖‡ e์ฑะK#๖‚‡.ฮyผ๖"ฮy<Ÿgั#: Oฝ๑๐็ Ng ™q>x˜ฤ๙Kถ ‰ฅ๛gญ8–ฒvฤนgฯ'็uฯyศถงาvnหb_ท7•ด{&ั‰ธFT#ธไ"็7ฌKก๋ฟธq} Dธ๗›หม]ขMโ”}Wถ[™j7/c;ั๋œ(+ผJ๑u็sGก)c4ฝ&๕ิป ืeฌ8WyพNฬต8_?2Nuy?๚Lž่eพ๙Lˆ๓{.=ณณ้ื็cฌ—ตW๋๗ธ๊๔8ฃ็๓‰fงุ^3็ฉ฿Bชสqœ;JDูV>Rขaั“‰ศฏี฿ND‹1BRHšฒE๊“T‰งLฺD*%ฬำ—|็ิฮ ๙A"ง๊I'kD–›วR‰ง;C*yฮฉ,อWื๋้…ŒNDlEXEVEDตTฺ e‹{3ค๔”Ž่œฐโศ๔Ywหw:ฟ\shŠ9หอ€8Ÿญ50qต7–<bู?ะŠ๓!ฦร^ลy‡ลUภย ๐ะฑ1“ซฑPฅ๎ %ามB„9X๘ขCOซM0#๒˜๗ ง‹‡ 'ยร::  9™แx8ฐpพแแเฤ๙฿5b!ัŠ๓แยยษf›7‰๓X๎ณ๑งKลK™Sฤฉ2ใQœ๘บ;?]1ถ%่.a.qฎวYœฒ๋*sW ปปยหั]fuSY#–… ฺ่M๏™๕š _zอ^I  d˜ชฏโ|ถ2๖็—ŸูtีO‹ฑ.๏๊Eœ?คŠ๋ซxŒย=)ณm0„ปฐ3ๆโ~x_(<๎v๙ƒU|ท็๓™Œส€ๆ’‡฿Gค :‹™ฟาXฮ)G`9งC( ษINรl!ฆRอ๊ๅq”•—QคVฦJS%ฃq๑8Mฝ๋ฅŒำDฮฦ,Hy็|>ไยต‡ฆ˜ห•ˆซ˜o BJฬบ8ฦ›G ฤ ฑ์ฬทโ|˜๑pŽฐู็`!ใยCa!‚ZXยBแa a!ใUJณ……ๆแaฏX(“‹l0"T‚Z"ฎ@”ปX—H็x Y‰s.๋qถ2‰“ ;' TVNL%k>ัRฟผgมOะๆืดT%เโ\]cโๆ‹8Ÿokโ+~าy๐ฺ_ใ€พงืQjdงu\wห๛v&L„%฿พ\ๆ”บWัษ}ๅ]#ำชuD>–พ_2kล๙ฌx8ฐŒฐ<9" ฯ""ส5กยฑQ๎}่Ž…ฤ0`ก๐ะฑPb]xุฎนมรZœ7`!ัŠ๓แยB๏7ŸJ฿๙L/ฤ,โZฦn.ฎ็ˆ๒sฎ]›D๙WฏIก๋2‡C„ซ฿œหสœK “qV?]rฯoงœ1ŸLฮไ๒ฯA๒vN.ฬWqผˆธแ๙๗๚ฟ:^wA1๘ุ{{็ =Z๎…”สM6ว ื๗J:ษQฮ ำ\^นป0W_ฃF๙pœ—kสญ๛สcษศห\h–๗แsB“Sศ;ัฎ9็฿z{1‹ฑ์์ตโ|X๑p`!}ิ˜!>#*cฑPY๓ˆ…ั๘อฑp๑PX(กโแ<็>ต ‰ฅO‹Vœ!–นศ`{™ทD5โฃkึ&1Ž(?ๅืซ;ง^y[WO:กQkš‹ฎู่”žฯง™เฝ,ฝ7ฝฏ๋ณ@W๙|ปๆVœ฿sี/:›ฎฟธ๛฿[qŠ๓)’า์ฌ:ศ…ร;NลญแขฬQ๎ยlE>!™Pm!ฉš‹N&HคT%Ÿ2*R/ฅ้Qใ0อิ…จฯuฏy ภ&ฮ}วXฟiˆe็์ฺŠ๓!ฦรนฦย3n:ฌฦรˆ…*CXฦฉ/]Xจ‘iˆ๖…†…”ดทx8ฤyญ8nnˆ๓๚ Y`๕`KPjJปโ'.ฟ5‰rถu2้V๕—{นฤ8‡•ศๆiˆ๔™*iฤาv!kื<็W_ะูtรฅลุใhลy+ฮงHH•1บfฟา›๛๕”)‚lQ^ น”ภ๖ฬJ5ฏ—ใD@5sW‚œ๛ฐO&Hk!e„1฿ฝ–”pฮฤข|–hธqํ;›Wญหฮฝ็ร,ฮ็ ็dอ…‡gึ #RFฮๅˆ…ฃ+ฮw<ฮ๒™ทxุง8๙’F,$–>yหVœน8ด@/‰sฤตFง‘1G#ฮ ฎ{ฦแ-7wฎs๗ืŒs.#ะ'็r\Ÿฯ๋๚เ๊ื๗Lฅฤ|ื Nœ_{QgำMหŠฑ|ฐ็ญ8Ÿ"!%[”GŸ ’า/ธผ"คŒวกdัˆXjKจฯ\=็๊?Gฬ๛ุ ‰zŽ้๔rNzบ‡Eœฯไš2}เ‡ฃั๐˜8ฮป:›ื\Œe?ฃ็รž9ื(จ๋๖8าs.!ญš๓ “DHEV9าษBชŒ๛ํu=ธ๙G‰Œณ๑คฮฆอgตโผKœฟฏžucูํŠ๓a็ -ฮมBสต…‡%,T8r;ธ& eŽ),ฤTMNํŽ…๗›ฑpถโฑ;ฝ๒iXH,}สฃ[qŠ๓)-D1=ิ็ˆi•งซ„0ืจ5น˜#ภu›๏Wžเฑษค#.qh'[ฮH5ฤ9ยฝ5Yk^^Yp็†{Zq๎โ†+:oฝฆ๛๏ัVœทโ|šคBzใyฎ{7žœส q…,"ด! ๊ิuˆฃ๖หก]Bž œแ๎์๎„”๛@r1œƒ๘BJ5'x&ๆ๛.dBส fœคEN{ำiญ8?๊อwQŒe?฿ฏ็ จ’hx่XHฟ9โุ๑ะฑPx่X~ฑX(<ŒX(ˆ…Š๓ษฤ9{‚๘ฤข็Uฬๆฺ๘เ‰ฎพ็ฉŒ2ช,!น๙d”๋๊”!œ2ๅš๋ม~n‡ธ๒š๏หษเyUŽศAFWn88d4าปฟปจล๙ษG๏2๚b๙/œsq^ญ฿ญโƒ-!7มœ ๅยรˆ…-6cแ]๗SใแBยยฤ๙3ฑX๚ิวฮน8Ÿk<\XX ๔Yฮ “‰E่ญฎ„™m๕+#.ำ7•ฑKค#ศqlš\อ จ<ๆoMม!ะu[›5//}N|n๚ฎึไX๔โ|ี5ึฎ,ฦง๗ุu่ล๙L`้dO๐แB|ขŠ•Uvฑp!]๙Ÿอ7}~Vล9ู"fœCE0Uฮษaฅท\2สu7K’{ฑF นฃฑ2EสALy ศmKF'Ž Ÿw๗‘ฉไ๖ส;้lxเ„štm^ุโ็ว|คำ9ฎห๙ูy‘9ฏึ[q>ƒx8KX(q.,ฤ9]x(_ ]ฆ๒GแŽ…๊=X่ฃืZ,์ ม?แแBยยพฤ๙vฯlฤBb้Vs/ฮ็ ึ%๎•H—yฺl‰๓U๋7ค์6‚กญฬนgสuัฎŒ9b\โ\}ๆdศ5n !IVžmtk็ฝแุŽ่$ฺี,ฮuRƒ๏ส?…0าญq>r๓uฃๆz…ุ๏“ป-ˆฬyฟX:•'๚ร*vฏโ†*จโ‹€!อย<ลอฅ˜ษE๖‚รœZๆึ:… r]dBชเ:ู#ˆจ ’ิฮu฿ง1kัXœn~4฿{,y?zฝฝฌ#ฏ92}ข_R ฝพ๏ค๏Œ์%žปŽล*ฮ๘X~ˆๅ~~พˆ๓}ซ๘rฯญb‰ข%คำภรYฤB–cกNF โๅ&,ิฤ วBแaฏXศIล|ฦBWฏฏ,<๕ฦร๛ฦCฐฒvแแBยยพฤ๙๖[7b!ฑtซฟœ/โ|ฮ๐p!aก„นป›ฯไ’S;ฐ๖9ๅสŒซ\3้\ๆxอ2ฟมศใ*NPฎŽ\<๙@Y{zoŒUป็ท)ๆ๋โ†ฟฏษซงp|ำR>fzœLY“E9ฏ‡Xิโ|๕cB์ท็'Š8๏ K{y‚‡UฑOๅ{V๑ะ€ ฤ4าอท~eฦ“๒ภซ๏<$๕X2ทึ g ฒ็ ๕^ชค]๎ล"ค"ชl!›*c๗!…ไJ”›8'๋ฅ๗E–M%ชlq_†เป8?ถow.\{hŠฉ.ศ'„t}‡%qภƒงw:๗}4/Wโ|ืN็“‹ฑWอq~^!ฮmล๙ฬ`แLโกฐษ฿wฤC ๔&,t'wถย@แaฏX8Œโ\๏ ์ร…^x่X่โผ,${.]งuUŒ˜8งa!๓พล๙m7u™(z์ทื Eœ๗…ฅ“=๘UฌจโcUฏ€'!ค™Œn^ต.‰oป๗TสIŸฅgว”ชด]D4Ž)ีeศ(ทAะˆฆฅ‡ลBส{ใ3้Eœ3'๙Œ›KŸญ‹sf(ณ"VกษฺMู")YขQ๓ฃsว๚ ณ8?~๗ŠœVŒๅน5„[ศโ–/uแแL,วBฎKX่xฑะบcกใแBยBอo็3™Lœƒ…8เ  ฉV๖‚…އ ๛็;<ป ‰ฅKชำbแ็•xM‘ลyส8Wั๏B(ช<š’vฤ6แ‚KึฝŸANึŠPD0*KŽEPNๆ<ฏ,็Lžp˜ญ%6ถ2f›Hœs›\ึ7ำ=MYpลD3ฬ•5Gœ๛๗ล็ฝุล๙koํช,๑ุ๏S{ถ†p=ˆ๓อUWล†*F,า๕€)…fažbํ!ใaฎ)ใS่ฟฃฬ/šรุbอ}›พŸ๚๖VDจศ๎@F=Kไ็.ิe็%2Oโ6D+1ูš๏„*C(Fโฝ๒9A@™‡ฌพQ.c&E@Rูฒ๏„G& Hœcr4„7žœ2D›7Ÿ3:—lH+ฮ๗8๙„OŒอ;ฑ’ฏ๖$ฮซ๕า*~SลuU||ฯjพ1o?TŠ–ฮ:๖‚…รr,ไ๏RxฑPื#‚%,t<\HXศVx่XH–<โกfบ  ›ฤyrRนฦร„…‰๓็4b!ั‹8Ÿ-,œ/xธPฑ0 sข”i–ณwS๘B@ซŸ‘ฝ"›ย)ƒ๎ไr]็ธ•YŒFQŽะDHช๛57ค่ๅ}อ็ฅ2r‰s‰lUจ=@%ฺ๏Ÿ;Ÿ ฿——จGqฮๅqฏ๒๗ฃืๅ็k แ*q~๛mc'ฎB์ท๗^=‰๓ษ๐ฐZฟSล๓ํหผคผ้พนRGU\›ท+,mฯŽฮ4)…ˆฎ98‘ัOH่Gt:N˜\œ๛\ุอ็ฆ2AR๚ƒ•‡ืฮมcฉžs๖*้ิ_ tw5ๆถR ็0-อ'!%‡ดร๑'&็eeฮ๘| žR nˆธHปฒ์d้~ึชร๊Œไ”๛ธณศ›~0:ำ๗มณT g฿โฤ=;Mgc๙eO*ฮณใ%•;ญโ๗ซธผŠ'ฮทผd)Z<œA<ŒX˜๑pRqfd;Rโ๒w\ยBแกcก*‹"ส}˜ฑPx่XH…€๐ะฑPีC*erยBx่XHF]xฑา๕ˆ…๕ŒsแแZำ็;>ท ‰ฅKบ3WX8_๐pกc!BV%เšฎฬ*โm*โœ๛)ู+s‰ปฦชI”k9ูr‰r2ธซ๏๊ฮ–da>q>Ÿ—ซ๗ั{ึ|]>!แB]ื๕]Œุg์YpUl0แ-กuข2;ตA฿rรJœฏฟฝ๋d•ว~๛์=ฉ8๏ซตMงg‘ฬ*.˜์พี๚Œฤ:[ีๆ K{}ฒ็W๑*Sล?ถ#!…xqžสุ6žQ'-ƒ˜†^ๅFN$‰์†๗\สธ”1๒žK5ไฅŽร.ฮ =1 Iโ\#ไ\จ๋sœณŸวPฏ)ณ“!ฃ|พS๚[ษ!่!ฒ\Ždด^||ท๚[ฎฤ๙^ฃฝห/?คqพugฺ๕]‰N[ส9\xฑPx่Xศ๏ข xx๗ว&&a~๏˜vq.ก่bQยูE`\rnW?3B]ไv๕•s;!ฑชว—๘$4K3ง‡]œ๓~๕y๊3Zหื๕™ฦ“#VAเข\Ÿ?๗MN์ีwฉMๅํ๑ปkล๙จ8ฟ๋Ž๕]'ช<๖gŸ^ฤ๙คxXญƒซxฝ]'SพลD๗ี1๙2วฆ3OหฺŒณ UคŠฯU๑๙|๙BnkxBบๆเ11นฌH(Y•‘aKฉfrณญM*ไŸ5ว7d˜๓ซŒt‘QJฝKฝRd”c{-kŸฏ B ๑$DB_๒S;/;๛)c„๑“H'๛ž๐;[แฌฮำ<ป๓ฏœัyักงuถ;๖คDf5vIๆO|.PฒGT>k+ู"„มbY}‰๓“๗%๊…Xพ[t>o๑ฮ๐;U๑Mป&/;3[ฮ๙็“Ž_ญโPEKHg 3:b &<‡…“เกฐะิ์ฝbกฒํรŒ…އยBฐMx่X่x่X๘‚C~Pใกcก>๐0ba‹‡=Š๓žืˆ…โkยรA`แ\ใแbแ†/‡–(Woดยำ'สะjqโ“lฑLแKท]ˆ๒บR๖rร]ท^“bX—Ÿ๐พo‰t}ž)ืg h~"eƒU5่;‹฿ISy{‹…ใล๙wUŸˆฑ๏พ๛rะ7๚ๅ†ี:ญŠ็ุ๕sชxฺD๗ญึ]แ1๎œ+,์มOชโ-…Rล)-OBJื};‘Jฤ7&9”i,ฑๆounฝ็๋ษ•8&•ช4บ1บ๓ฃS %†'่สล2ฯ่`ฬํWศ1ฌ "I@6!คd} ฃNฒDdธาน๔€s:ฯ๔ฮ?}๘ิฮ ?๘ฮ?|์‰˜รืNOD•9ว2Š"ณFฯ“ฯ˜L>c>;ˆhKF{็'๔ฉ$ฒJฑlู7{ษœฟบข_ša2z|{็2ง7WqVตโ|vฐม-๛‹gึxช๔ŸฯVx่Xˆ‰f‹‡ฝ‹๓&,$zศœฯ:ฮ5.&nจŒซ2ๅไซ-\จXIตDu\์ื}%าีs-qพ>s•n#B%ฬ1ztใe)†uQ1 ๓5eฤ๕9ซ,+๊r}ู@‚ัซไ—'ซlhฑpผ8ฟใฮป๊๏%ฦ>ฃโuโaต~P็K'บ๏ ‹๓พฐtฒอtn+[Ÿrห-;‹iฅฌ‘สื*ขIv2CึˆlฤRFpฑ/RJ•1ย0f๓น)๑%…ธ๊ชไะ3F"›wษVู#ฎs-1ฬk#OฉณBdมxoRศ)b2JV(Oๆ/~)i๛ฌ=ฮ่งฮ๊<}฿%ซrxH)d]†Id ซ<.‚X,„ดq~โI{ฆ฿s).[v๐œ–ตW๋!y{i.ห฿›ํัA‹๋ zฦCวB฿ SU‘ใaฦB ว„‡%,T›OฤB"b!ใ  ู  ม;แกc!๛…‡ …‡Ž…|n-๖&ฮw้นXH๔ะs>ซeํs…‡‹ %=k$ฬU‚ฎg‰j4#&•WปJูc_ต2ฦสœใฮN)๛ฦ›ฏL1ฬK%.ฦฝd]—•=๗๒~y๛Jฝ๏\'W๔ฝ่ฤG้ฤI‹…โ๖;๎ฌณ1๖g฿ก.kŸ),์Iฎkุ?šnkฯŽNtบ๔ธQ‚Y‘Mศ'™#B$“,ˆ๛1ึIsb3!eŒ}d,0ๅก฿’๒B ำจ/ํิœ[อ่eŸส‡}A ษ๔@ษ ‘5โ:B[™ก็ไดD@ท๙ฤฮ+๑ฝด๎ญวu^๛บฃSผ๔ฝ''rช-$๕Ÿ๛Q J=w:๑{)t#ชล”-๊Gœ๏ฤOึฃ•b\z๙ื{็ฉโ๚*cฦOš!RxI^˜ท?ญโoซ๘#žณำfฮg’9w<~฿wjยร ว๐ะฑ:‚uยCวยWพํ„ ir<ิ‡๗{็;>ง ‰%“‹๓Yรย๙‚‡‹ %%ช%าืt›”นPtqฎrํuก—ZR฿G‚)#,Qt๚[R,„ๅ.ํ^)เ&n๎Kำcฅ‚๑]ฌ๋๓SV๏ืbแฤโ|อ๚;?ี›8Ÿซตm0„ปpฒ๛ๆ๑แn๗™นยาษžไ URลุพ?ศ_lx๊™ฃ”x>๐รD>ืwh"ฆš‘-rส~ฒHlษ(AL!@!ฤนƒ2%ง2E๊t"ช-วรพศ ABษ‘ ƒ˜zษฆศ($tง7~ทณร›Md”-d๔ี|L็e๏>)ว]˜ฒJlŸน็™ฅŸํหไ๙!ิ’ัฤ๙๑'๎žJ”Kqษe_้u”Ž›ืไา fŒพqU์yต‚>\ฌ{O9Ÿต‹u๏“ึmสขทX8ฑ8ฟ๕๖;๊๏%ฦž{๏ำ๋(ตqxXญ‰ฮุ(ตฏไ—ำo>–V๋แนฺผ}ุ\a้dO๒{๙Lยบ*.ฮๅGทW๑Yฮ8ด1ฝsดtS%MฬุRสษ>H)ž2PJ>!ค—ฎ๛VสQาIฏ$dิ{.น,qฎ’D๕V.„N%—๔VBH้ไz๊/‡@f ษ„xพaว# !eB Y%ธ,‘ฎ}\†œBr1’Bpา„@(-—ล๙q'G๚M—โขหพิ“8๏ฬ^9ๅอ…”ฮัฮ90Rฺ+vH>Ÿ9>็d๚Z/ฮฎตเŠ๑ซKškqพšืุ0‹rVœ น%<4,Lฎํ ๅยรˆ…t—9ชดฝ„…d:า3.<ŒX(็xุbaง่G{y9{21รe๎๕๛๏บ="]YxeŠKโำ(๓ภ๊๋R {ฟนฤนZึ™๗“ซฬษ^ ๋Gฦ;ๆวฯัลปW.ธซ{+ฮ'็7ญY฿๕9{,q>#X:iz^"<งๆoญbว์@wB ภ3ด คฬ๛•+1bภ ŽU@D„ฬ๖ฝ|7Uฏ%3hีo)S87@’!"]#q†™Œj ฅ›d‡่ฑไ:[‡ษั/ !…tŠ`’b„TDิ‰gS@R้มฤL ย แงค๓ส;Idด็eq>Vg8c\pษ็็Zœ_2€ผลรยr,Dด  eค F,D˜ƒ‡ช$rwแaฤยaลCpOฃแิo.,คฌ]x(,tqฎฌธฐะo๋ 1ใ.,์GœoทรึXHฬq~I‹…๓oy๓†ญ ก7:๖@+‹.7๘(ฬ5F์W ต\2rหๆp‰n9ุ+ฃฎy๐ใฎ๋ži_gŸ๗ัt•ฬฏห]โ๛๘ํ-–ล๙ ทญ๏2,๔ุ}ฏก็3‚ฅ“=ษๅv™ฺ=ํ๚e-ฯไ)ำฃGร—ฺซ€๔ะ฿็ยœ‘jๅœ”fRฮฉ๒M๕U:1e?ว0'ิODv˜–ฯ'&(฿$seูศ(ฤ‘^qz&)ล๙„ ๊น์Uœ+หDiจŒ‘x>>G ซ๚c[๎็วœ๐ัฺไ+ฦ—|nฎล๙ฅญ8",dR.<ŒXˆ0ืxตˆ…;ฑpุ๐ะฑP—มCวBฤณ๐ะฑฮถ_,ฤี]xธฐฐ?qฬF,$ถZ๒ุน็—ถX8?Wษdl}(ปvกญtฯภฏท’v)ฆฌ9Brใ-Wื#ิ6ดlธ„๙๋‹โ฿งฤzœฯ~๕‹ว™๓๏ฏ๛1ฆอ็จ๋•ป๛ุ5ชZ,/ฮWฌ^_๚‹ฑžC/ฮgK'{’+ฬjฒ็~[ ภƒ[”xฎ0š5'.\{h๊๗ƒไึฑŒ„“ฌฎ\‹!ฅ”’EโvŽƒภฒVq%KฤIˆจˆ)%าไึ้1BJ†ˆrL%ฦHrg'kคlPSึˆฌ;!7cสๅ้นคท“ฯ๏Dซเnq~๔๑ปิfQ1~qษs-ฮึiล๙ะb!}็ยรˆ…gญลทˆ…้ ๓ร†‡Ž…T  Ÿ&”โažห๛ื็ฒuผะจ:๕˜ฏดู๏rธwทv•ฟซฅ€ญจำ uะm>C=™๖ตโผ(ฮฏนu]ื ]๗{ุล๙Œ`้dOฒ[?ฏโฮเ~—๗๛[๎om^{Hg๓ํ฿J1ูย‰Je‹4BR !%sqยŠ#๋ฌ‘\‹ษ๙<_๖AZ‡อ‰าIˆ'ข๘%฿95‰rŸแN6Œ)ณd/ู"ˆงˆๅผฮ_‚ฒwn—’ฒGTศฉŒีr‹่‡๔๒ˆ€…n ื8?๊๘ืccœษgๆTœฯ—h๑p๊xาs.<ŒXHyปgะ…{ยรˆ…\&<ŒX่x่Xˆp:r‚า๑PXจ๗  น๐p1`aฟโผ ‰ง.yLงลย ต”๕&&[C๎"{ิฝ Eบฤ$ูfๆœ#n“Hฏbจ„9ขŽี้= ฮ7ไ2๔u#๗Œsg_“ล9ยฺวฌ๙็ใ•สŠฏถู๑Qœว๔ลเ8อ-๋RB)vไp‹๓๙=|ฬ‡>ŒS๛›*–ด<8qฮขœs๕=฿Hnํd‹ฮปe4KDจ4‘ฮuถ>๗W#ี4Vˆ^K; ๋ฃฟ59‚RชษL_ˆงLž0wโฝ‘5ขค“RK:ม่,๛0H‚ฌr™วœสูฒษGbฌ2ํNค๑lฐ๓บ๏ŸH0Ÿ-‚@ซเnq~ไ๑LฮR์โOทโผลรมรˆ…๔—ฐ!ฮ฿lฤBฐCx8ŒXž  ม(แกc!(•มLฮีษwtoฑฐ[œ_y๓ํทใcญ8๏Mœท<‹d4ฯ=๏u1rEโ\s}ษ–ณ๕>rฎ#บูB8)ํ„ฐ‘5’{;™#Ž!ศ4๑8๓5{คั@*ำ„PNF&ผ„บL‘ ชึN^บLo:$’เบf๔Mา?I@8!ฉ\†จึ๛๒~Ž็qy=<Ÿ1<ศ่ฦฯ่t<{4Zใˆใ?2œฅ๘ูล๛ตโผลร้ใแๆsk<ŒX(aฑl#r]xฑp>โaฤB.; $ฮฑใ„‡Ž…mวCa[แกc!ว #vแa+ฮ+q๔F,$žาŠ๓ โผ—ๅฆo็.ฮำถเ.Dh™D<ู๓J˜งธ๕šฺม}>ล%‡๚œ-—0็ค 9$–G$DัฤMŸ‰Dถ"–๑ูฎส™๗ี&ฬ]œ|ช^ะ‰ขๅ†ื๙Wฌบฝsํฺ‘b|tVœทโ|ศฤ9 JWšDชobคพIศ)๛ุjด™ ˆ$ŠฒG;Aฦiฤ9e›J)dBH_%$›!๑T_:Uฎ๕d’ด•aฃพLe’ฒK„+™ฅญฟpVบ,"LK#๚,[q-ฮ?๎ตITŒŸ^ดO+ฮ[<์Kœ #"ฬ…‡Ž…>ียฑญ๐pฤyฤBpะ๑PX(œใXวB™„rปc!"]x่XH–]x่XศVx่Xศจฯ.ีŠ๓Vœืขื๓ฦitz,EH•Q9'DqNI'"RH'ยœ`A\ี‹9ŸคO™s‰t„7Y ๖๑žศ7„ส~"3ฦ๛‡”๓Ypื!ฅ๔lj60Uืีฏษ>ร%•BF) …ภr?^ฯห •si^"็‡๗พไช]ŠŸ\๔ฉVœทx8#xฑP=b!D,Tฯy ็#F,T•๐PXศuแกc!'„‡ …‡Ž…rXจ—เกc!ŸตใแBมย~ลyOY๒จVœทX8ํๅฆe็žนuqฎc5nM‚ำลซ‹๑4bm๖ก?p๕ึฎฌ{ๅUP—๑็ˆๅ๛u๖\๎๎l๓u•ฏ?[ผ๒ภKไ%๎}ฆบN hฬB)s๏Gœ_พrm็ช๎.ฦ.Ÿhลy+ฮ‡tAHฝ๗\ล"ขส†ปQœ (ๅ„”Bเ4~ˆลT๖9฿ศ(ฒ(มอ>.ใX,ฃ'๏ r AT–ขอ็„“0„Qsโ9^๎๖๊=•ป3Ÿื5Yฝ›Pe“”ฝ‡๓ฺxN>oJ!ฃฃ„๔‹Sœ็ุ๗คฬf)ฮ๛ีžญ8o๑pFV Ž…:‰ฑฟ{๐€c"ฮG<ŒXจาuแกฐ:๒๗'< ๋|&ว ฒeŸช„‡Ž…œ :Jค๓y.$,์Gœฟ|๛ฅXHOwะWz่ร,า๛็ฐฆณ์ึปŠ๑มVœทโ|!,ศ&<ส!2 ‘Qeฯ)cิh!Bทอ๗ม##๓"•Y2ป}C๕‹ฒ4รlไ’(-z"๊dน s<ไ”RCตx>ู#eซx~๚4ีฏIo'Y'ž›๛๑5RBฺ8ึwwNฟอRœ}ม'Zqโแฌ`กNVF,ูF,/†ม>๐0b!sแกc!X&<์ ๙……เ ๐ะฑ์ธ๐ะฑะ๑ะฑ0๕ /BKฒl˜.้ค' ฆdŒ่วไ5๑๚ศ,A`๏x๒xฦข็๓ฎD่Kqฦ/๗hลy‹‡ณ‚… ƒ‡ ็ G,Dศฯg<"ฦมรˆ…เ๐ะฑ‘อgฑผzฝ`กใกc!ยZx่XˆP:Rโ.b ‹Mœไ7ทv~qใ๚bผงqN6|‹|y ฎŽูบŠ3ํ๚ฎDแธํซ8ช็-!๖b‹fฆ๚bฌอ๒œ9"ƒฎา๗ฆวนๆ็ไN‚—๋RD:้Sึˆห2†›I’ภƒงง๒N7“ใฤ† ๆ•~าใ.s)2W|ฦˆ„ฮฆณFใ“Gc‰๓ฏ๕ž๔Y•โ๛?o แZ<œ,DP ใKX8๑ะหึๅณม6b!—…‡ณ……ยCวBeะ…‡ยB0Px่Xธy๓9xธˆฤ๙K^ตด ‰'ตโผๅ†ณ%ฮ๑*บฤf็IิR๊ฝๆ†ž2สsฑ6^vๆ˜8G”K ณญ‚LสŠใN=—ตs™Œ9วฬไ’๗ฯฯณ่—๎ฝ+[ž…b็g_yK็ง+ึใ]๛dฟโฎpฮย1;U๑Mปฆ*พ\8๎ิ*hโž*.ญโ'U<ท็ ๔๙QRZลl/ฒ'•ฝSบ‰X‡\องฉิ˜ ฯสค+3ไ๊\วคˆ,ุldภืD‰rOˆฝส<้ฯ”€ฌช็ŸฒO>_2t”‚v๎๛PV๛็_:๒=ต๐‰qสฯZqโแ`ฑPxXยย๙ˆ‡%,$"โค.<œM,Ld฿ฐฌ:*“ŽรยEˆ‡ˆ๓ฟri#ญ8oฑp6ฤ๙„Qฉศ"vำ๕ง˜Wย’ำ“8ื6 ๔k1*ฬูdะs9~m๎ฦ{ZuE-เgๅ๓ฃ์+ฬ0ฮ๗<\n‘rร๓ฯ๚๕อ_w{1v็฿เ๑-ใlฒุ…xUโีqฅpฬnน็w๒๕Yลร๓ๅฅUฌชโทโ|กา*EFๅbฌKศ‚}>Šsนส +ฃฎžKฒERศ)™อx'kCฬึ"sDVข A…Œ’Yง?“ฯYn๐Vฯ๗pยข็_8โ=้๗VŠ~ถW+ฮ[<( KX8Ÿ๐l็JX(ŸสฺIœ_tZๆ\xJฺ&Ažฃฮช็c๎?„Zจ#เy/ธฒ๒}ี%๎.สs ผ;ฟ/F,”8ฒ›:g\ฝฆo๛H฿โภ`๗™ย1ฉโ๚*c†pO๊Œนธ_Yล‡๛1Frย๔*nม~จล9% *Oุrห-[Tเ’9’ |ธ,ณ$_2?Q…ธR๖9›fIIˆ%DTY!PไS}—ฺOz™SLๆยสe^ณ\˜JN่๊Gœxุ๛j์วไS}‰๓\–๔๋*6วณ—๙,่u๙ ๊Kๆ m๑pn๑ฐ„…އยB•ป …s …‡  …gึbแ‰๓zลำฑx|Ÿโ|X๑ฐลยน[I0R–8ฏl็ง๖z|˜๚ธนO๕ณตRVqŽ(ๅI)”=ฏลy%ภ%ฺSpœ ๘๋/ฎGญaงP&hืˆ๓/[ู9๕สŠ๑ึข_q๐*ฮษฃิฮ‘€ฎึ#ซ๘กทMืdื๖luนdฝkdZตvฬ‹ฟคŠWtฺž๓vMwQf ๑TOเ๊{พ‘‚ าTฤนษ‰™&ค"šn'2สm2Bโ6•oŠจRฺ‰๑Y#อ1Žโ\c•ศPPฦชrVˆy/ใ…Zžž8ฬwื๕x๕ใพล๙ชx\,-bFฯ™ฯŠฎะูฮN›-j๑ฐยร:N&ฮ…3ณ……๒ู๘๛ธKวzํฺฺใ”ำ„q(ใ|(ฤH„ˆ/‡ถ$)์AlษPIH’NŠ‘sปฃvต&vŸฤlณีžo๔ฮฆ™0ฯw_๗บฏ{ฎ็ฟ๎็=ฬZ๏aญ๙฿ฟ฿ทž๕ฌg=๏ณึ;๏5ื๕\ƒลBŠ๒ ๕oŸ‡xhฑะ๑p๔ฤy"บ ฮ{ วvล&pอi" ถ)ญ}0qE=ฦ”uนห;ฤ9๑,ฬแŒ'w<:ใ฿แ‘ฏ้qั5G :> n6คฯฤฮ๎*ฮ๑<ืซใิฏ#e>…ฏั็ืว#ี๗sn1ํPœ๗K8รZ๔ศ๙ีข9ŸŽ1ฺ ตจ๛C`q;6่‘rŠf>ผ“R‡9ถจaไศกn‹sŒแ Olณฦ’์ฺฮ”NŠxฮ;‡cD2ІH จ3๏mu ฦ๕%ฮK)ญhxdIป๐ศฤ๙9_9:6*ลื~xVWาฺ dิึ†๚"็Ž…h๎H<ดXจxˆฟyๅ GCœ+ช(W,dรLbกโ  qƒ’xจX8”8ทxˆ›އ‰๓๗ุฌ ผqญช๊Žณx่ฐสฝG]˜ณึ<ˆkg‡[G™sะ๓ฌ๔ิ-vG-qŽZq8ๅIk๚บบๆHcฯฎ9ฤ9kาัมbฉํ8>‰๒๘ySW๚x3bqnษๅืGฉษา"ฮฏ๕์๊บ฿ฉO?อลน‹๓ฅCœ#ž}ึลwBŸ๛~ ฬ›}ู๊bpdH' ศยuƒ 9ลn5^C:&#8ไt๐"สๆHZc‰ื‘พ ท ไstๅจฟล#Ž๙ส-7 ฏใ‘ฮ:> 3@JAPgฅ๎อJFใH4เ‹๓O|๙˜๘๏ฉ_น๛์ั็s๔Ez~ฦfธ8w,ค8GX,Dc3‹…ภŠp‹…ฤรัภBึ”[,2ํ]ฑ๎9๑Pฑ็#Z,$*โ๓‘๚O,tqน8฿!ˆ๓&,DŒข8๏แiํއc.ฮKX7xhฑ™4ภC‹…ฤรn`aIœใaฑBป„…์W,ฤ9xผลBลCb!oBเ๓)bฟโก‹๓‘‹๓ํฑy#"ึ{ƒงต;7ฌฒ03qžๆœcA|ืRุ!ะEœว}ฉ๋8ลฎUฆŠ#:ๆ‘–‚ก้๎น&.{rส๓ฬsŠhˆ๊t3!>‡@O)๎Q|ง ‚™ า“xฏ^็yบœสฟ4‰๓+๎yจ๚๚ฟ?ZŒ๗ธ8wq>ฎ„4‘าฎ-ŒซA ฌŸป1’Q4Šƒ@แ‚[„็pGPฒษฺE3’RR ผŽa[qฮHJ:;บX;N‚IŠ็HTˆอ‘๐œ๎I*&‚”้ํxิฯ„ฯ‰ฯBŽฯrŽ๏ แโฎ…I„็v8่Iœ/ s;ฯ:ฤmญYธฆ”๑%็l๘&ตๅ.m—ปนCฐqSู!ฮ้ไรNŽyญF>Dˆu่โขืn8Pะำ-a๏฿;็_๘ูƒีฟšSŒŽ™แโล๙๘“า๑XHๅ„KฤB=ๅHLันฯYo‰GQ’Rฝำื8ฃuค ๕• ’QบFJ,IZู0‰๎œ:Mใx๑WRึ’ ‘„ฒf๎›%1ฃŽ›Fฑีx๘โ”ห9g\ุ๘ญ็tฺญ}๏ ๑Lˆyp„ไตSSWbŒš6‘ุ๑P๚8เ!ฐอเˆ‡  ฑญxจXˆZํ‘.‹…‰…ฬR,ไ๑ฤCลB์'*โs( -*.ญXุ‰8฿zฺๆXˆ˜๒†ต;็€‡Ž…ฃ#ฮ‡ป พ๑s้C #_๘dอั็kฑ9\tึฏณษœˆ‹s8฿iNyฌ+/9ๆษ%ฯ‘บตวZttj—z๑X."ป–ท? ˆuไ฿FaŸๆข็Z๚”ฒŸลน8็8ฏcแศฤ๙E?}๕…_โโลy?:Fฅ๕ฑฮD๔๙Ewฤ C'„$„dŒ„$ ฤŽ Gพ€ˆาYพ/2 Geธ "˜อŠH,A้‚ซ8็k<iœl„คโi™ฌงคปD‰้›๊q,>o<ฐ3]"ธiฌหว๘%าฅ”v"ฮOโ\hi\๐ƒ๎8็ฝއuq>xhฑภC‹…ฤC‹…ฏ%,\<ดXจฝ1 !ศ๑บลB ๐ช,Dบ:~\ปb!ถ‰…๘ˆ‡K3v"ฮท โผ S6๎Lœ;๖Ÿ8 ‘ŽTu\qธๆใˆ๙ žŒ1๙Žร๑Qœง๎ํL‘๕ูLŸ๗_#ๆฺ.wk็จ4Mgง0Oฮ9šยก= m4m{่฿Z'้๖ีษัB==‡8ฯ‚_ฮE>หฅ๓้yjŽ:D>งsรย=P}ž*ฦ>ูลน‹๓q&ฅ}มจ“Q4@*‰st,F‡bˆO/ŽัAญ%)H&"บ+$ฃลB "Š(#k"ฃHซd๓6HFธBt†ุe˜Ž]6cใ#vkgอ$ศ)'œ๛qpƒ8"ืฬัjธnO\1%5„s ์.ฮG&ฮ?z้๔Z๗hO๒I็އc‹‡ฯšย)ฒ‹ปลBโกลBึe[,NG"ฮ-b[ฑ™B็ฤB๛ำ‡‹ฑ็Q'น8wq>ฮdD„4E๗๗† ž ข„xๆน›#…[!ŽฺJQฆqbH)'8Fธ.Z+Gaฮj์แˆsNJqอ.ฮ›๑p4ฐโb!ลนลBfY,ฤผRไ ‰‡ ycาb!ฤ8๑Pฑ7%!๖p. uŠub!pxHฌ‰ฤBโ#ำ๗ ๑9-ฮทโผ kป8w,4โ\kภปตขœโœ]ุcธTKŽ}โฅ%สฆTwsŠsํžำvNuOฮ)d‡็&]ฺkใิL๗šHO้๏tฮ๓X4Dz_žyŽm“Rฺ‹ยฬW?KFฐ้็bฐ๋ฝcแะโ|ๆY}๚วcน8wq>ศ่/ฌ ๔แำE]^- ไ 1!…0โLsิ "5มฦG ŸH็DฐQœ:H m ข:FˆคDี t`:1 ตx๗‰๏ก๓BHSˆำAg็avF€`2•]›(a?o0pnv%ฦ6 \‹ฆโ#@ฦATIF™ๆI๒JG ฿พณญw|v.‹๓c/™ž3lœs“‹sวรก๑pX๏&"ƒxhฑ"%,DX,„[ฒXQ lS,$ๆ ‰?ฤB,ลBf๛ ้”[,T<ฤ~บฑภAœ ‰‡Š…XЇЅฤC‹…އร็›๏:ต kmไโฑฐ9ืmร฿)็8Ÿึ#(ิ$็ยขœ]šš๓(ๆฅซ{ lsF:๕4ขŒณ6Vห#ุ’`ฮ๎9lŠbค˜#ต<อWท;;า8.ง '—<7™g>ฆซใ8vƒ7็รข+Ÿฯ A๑l’"ฯิ}f ฤŽ…ƒ‹๓ณnฝฟ๚ไcฺ'บ8wq>ˆ้Hล๙cƒ4Zx]kŒะsทืฤ9I(H)G„iํ9็๛ฒ k pณ’sD๗Ž #lƒธฑร๑`โบJ ดp|ุŠt:็t:ว๏g*;GA๔c?วnหธ]l€wŸ|>lฯ2Bฤ฿า`IF9?^็&๘๗>k1๚Qœฤ9;ๅ8๋F็އc!๑p8Xศ,"‹…x,aกโกb!๐l–8้ฤBโกbแPโบ์ผqษ1h yณRฑฮ8๑Pฑ๛‡Š…ผ‰`๑™ธลBลCลBฟRยรŽ~๗ ;็›q„…็Ž…ถโผ๑๕ไŒkฺB•†เ(5yNŽ*ิkฮ{zOๆฌEGŠ;ปค"ฮYใอ•›ด%ทœฏ็Tw๏ฺH.ว†p%G็ฤฯF8qเน(โy#€7๒์tŽZƒ0ืz๛$ฮyƒข“ล๏ตล๙วnนฏ:๛ฮ฿cื|ิลน‹๓ JJปUs™œ"ฆp‚”ข31ˆ)H้_žน6RtแEณmวO’1ึ_’ 2ๅ‘$1+OV:ัจำไsZu•HJA๚8kœ.บvfc#O:E8  ้์nŒ}8ˆ)H*๖แ‰ฆ็๘Œผ€mkk๐๑=แ๛Cด~YuqŽดX์C๊l๕๗[ช๊o฿o…]นฆช|ซZ๔๘•U๕ฤ7ณ8Ÿจคดqก‹ฆ็น๒6Nž‹sวรฑลB-๑ฑXศ–Š…:bMฑ˜€ว%ลB;rtฅลBลCลB–๑ะA'ฒ{;Eมij†ิ5B„"ศ(ˆBI)"Qš!QœS ƒ„‚4โQ$S6B" 'ถAJ๙œค•3sqor4ŠsธvHฉลฬe<qŽ็Y #=w๎ฅ}#ฮ โœ้ด6Nปมลนใแุb!มY,ค{nฑPoV*-ฒฉd Š…ภ%,ฤ{‡%,$*ช๐ถXHบว:R-ีgGืœŽ9kย9ช,‰๕(–อ(ด์ผฎโ[Rึณ0O้๏นฦ6‡ใ9ฬฯชwa๙plLgง0ณ a—ะ(ฟ[อVฐโ|ก”t๓๗?ฤ๙I฿ปท:๕–฿ใm‡฿‘8kRˆ;B!=.฿pniผไC!N– ๑ง๗ฆุ]^›‘Žว๛vuqพ4’าn4E „ˆฃ‚@Fูฤ‡$ŠbŠGuHสHNIxN"ษNลx9้ฤถ’RMŽ*aๅ>ึ/‚เBLฃ’Xgยฉฎ8k)Ij๔นพG%œ—ืb;)ณ^žŸWืโšสุฯ฿šฃต๎Ž฿'‰,ถA๘๑ž˜๙ํํโœโ|-‰uŠ๓Eบh๑ฟ‡‡ฮญ=xNต่wgU‹๎?3G/ˆ๓ร/œž'6N๚Ž‹sวรแcaวxq‡รลBฅŠ…ภ+โกb!ล8ฑตฺtฯ-โ9ร4xb!ŽปˆkЁ ตฟ†b!Ž#*โ%<คkN<ดK๑Pฑฐ…‡‹ฑ็!ึฐะŠsลBโ!ฑPฤ9๑0cแgื๐ฐฤ๙ฆปLmฤBฤšบ8w,9WฑึษRaศฺsŸFื–ย<oeMธ‡เ๋๊œ็:์ปx.ธธ็y4YJu็๐,ึeฮxvำ5•]B๋าณ0—๚๒|>ˆr๎ใ{B`$[พู(.<ห้b}ฤEฆดใs‡Jœ—พK›…ะ$ฮต9`้๕ฑ๐ˆ๓้ืฆ๚่๗c‡๗ืฉ8?b!fŽY&ฤรภๆ/1+ฤ†"ฮO(ผgรtKBฌฟŒ‹๓ฅ‰”2ฅอ‘B,š_ )ศH]!’P8ยL๋ฤ6ำีMRG ฤค‹้žxฤ>ญId“ก&qฮ€c‚ศ๔OAพŸMŒX‹วtw69ย๛<วƒxฒ{1็›S ใ=จล๕”ฦ้|^.~_๘>ฺลy•ปใป ๓Ž๗เ{ฤq‘˜ยA9]”'=็bฒJยฤ{$ฅˆGœ/บ๗๔YœฤนŽsา8๑z็އ#วรNฐ‹ภC‹…ฤภ",R[,„^ยBโ!;ž+",ฒ้œb!o6-****โg•๐ะ˜ๅโุWยBโกN!*โฦHฦCลBลCลยy—ี๐ฐIœv"ฮ7 โผ kธ8w,ƒ‰cทvผŸ‚vbืPGWƒ"> tm วฎํฌรNโ\็ŒวšotญแF|J‡ฯ‚œ‚[ถsJป8็Yl๓u:ไtิQ{NวN8๑4+]ลyญพใำาจด๘Aanฤy่Lmื – ฑฎ฿sI—:๗7‰๓ฑชS๏Dœํฏฆ฿p_1ถ;ดcqW{ีดฝ*žŽู*ฤmฦŸ1„8ฯวค็ทแ<.ฮ—FBšฤ๙‰tาgod„‰ขD K›ฤแu:F&I…ห„m.R’4 v6ฃcŽ็l”ฤŽฟxYEJ'…9ˆ"D7ศ"$ผ—ฏฃFวณ๛:ำ0๑š rญงด โ@Hแแ<ณd&;~o4ะ-#W2jล9้›qv๏ฒ‘ฝ9R#ฑd%&ษ(] Ž1dใผ†c๑>Šs8ฆฟณV’N›ชั="1UB ขIR‰ภsR;ฎ‘๑บu‰๐>MฆrrN0ล9ป3“ฎ้Tœำ=C0“5ฆn๐๙๑k.9+฿/ฉึฉโ8zR!‰$45งชน๊O฿X-๘Jซnล99ฟ$คtศโ ฯLoซ—e๓ํs]œ;v„…#ยCคM?w{[ƒLโF-—าิ๑.0ฅ;lฑื-9ขLล9๐อb!^รq q“๘ฉXศ^ ้šc›โ8HŽ๋P,ดxH,dืz|6ลBเ›โ!ฑ๏'*๊aลBเ`ฦCลBށ‡๘}+ขิx8†Xุ‰8฿8ˆ๓&,Dฌz็Ž…#คwฦษ‰sNืœโœ"›"p~šsŽ€0ง(ื}*า้ภs๔šึฉCคฦnํ)eต็ฌแฮ๛คบŽ]ƒHฮu็ZƒฎฯSฝz่๐"สูyฝ–โžfœS˜ร9Jœูๆ9u_…:ปท''=;่Iคา€\Dzฉi_“8WQž›๙Mpqoฒ:๒๗cหƒyH็<ฌ;C_ˆฝ†)ฮ๗+ˆ๓‹า๖*)ํB|=ํฟค ฮ๗uqพ4RˆตนคEV๐ˆyA๒Pcี’3Ab‚ ’าB ‚"ฒ R๒…ฺG์)ร~v,ๆ# I~6~^vlgš&on€”2V;฿#ฐMW็Vื„4ปEUj"‡TNิW๒wŠ฿นŠRT8Dt‹~}jŒ‰*ฮ๔๔Zงiฃฎuq๎xุ!* ั๗!eฐ(ฒ”วb!v ๑๗NŒT,$VX,คภถXH7b!{w”ฦX2ญ]ฑย›xศ1jZsŽP,ฤ5- qร‚xจXศ โ!ฑP3  kxจXˆl ^ Sˆ‡cˆ…ˆ๓ wšฺˆ…ˆี\œ;.กƒ^J9งุLุSœkZ;ลบบๅsำถŠ๔*สูLฎึ้=u.ฯฮ9sŠ๓$ส)ฤkโ<ํทใ๒4ฆจCDkฃ9ญOOฮzvาuž9yšcฮŽ๏์าN—žย›sฺ9ืผQ งq57 tmขgลน๔ฐโ~ื๚Zอ5O็™ศโะฏข:์๊฿c๓s์ธฆต›ใึ‚่ฏ<ญืDdลˆ๓ธ]"จ่ˆ›ๆฒฮคˆ้‰ J‘$แ˜…ืE2wฤ‹ติห กฑฦG:็ ฅt’ดห1ฮรnฦJJฺ้ ย าล#ˆ*^ืบs’RCฃขวเXผ$˜ฤธV~>ฆญ’คใ9S2๙Yœ} ยjลท6•"!ต๕ฌŒZํ%k.ัฌ*|๗Qxภ‚่ภ๏ ฉ๘ร5#‡จโ|ฟ๓งWง๒ชbqต‹sวร.แกลB6W,‰uเกมBuฆทวฟษ„…Hฏf:ทลBŠrลBv^ทXศ‘lภBŽW‡‹…t‰…์ิฎโbกf -FโžnV**๊xโ!ฑAS,d)ลย***B  ŽๅZRq๚ทMmฤBฤชฏsq๎Xุ='bOT๋จช#KQnลธnSฌ๓ุ,ฦฅม™ึNืfฅงz๔X{nr:ๆyZํูฉ–:tv€ฯMไไv~zvะ™ฯ๎๎์ษY‡cNัnภeมอkัPQฎย<ฏฝ6^อิ็ืfห›qZkn;๊xถ&}"‹๓ฟณ๊ +UŒ7pLงโ|ำ๎ผย1/ 1;5vcCธ(่ๅธ้!ฎI™†pณฝ!œฏvR ขโษq\Fœวั4 ขิ๐ฅ๔Aบต‘ aผGx…@ง\/ธ#t8@ZูฑX:žณฮ„“ฎ7๑~ V}Lo')eณ8%ฅ ขLื$)ฅ8g 'I)gชHgำ$Qฆยƒิโ็rlš†NqNฮฮฬ–`3Xgช]žUœซcฤิv Rฆpf็ฟt)ฆ โ™~ฟ™”"}ย|Œขnˆ๓}ฯ›^อ๘ทซŠq๘7]œ;Žqžฑ‚ s?ƒ…ุฏMอ๐7ŒฟผดXH‡b!Sภ--2ปจ„…b!pNgœ+rloR*Zท)›šึฮืAFตฎ•ต™™Œrฉv&†cคฅ LๅŒฝ ฮ฿5szuย=Wใะo,ฝโ๑ptฑุ๑ะb!ฤyภ:‹…Qะง~Š…๘;fX,„@ฅซฌX!ฮิxลB์ใqŠ…lgฑะฐd@\ž่๑œ7*YฯN,ดxHฬBp)๊h9;๗\ฑNณ‰๕f&๑Pฑฐ&ฮ XX๛๒ๆฅเa/ˆ๓๕฿:ต ซ,ฅโฑptVษ…-5ณฮนฆทค๔๖น’าฮภฑLaทcรด~บ-u๛ฉลcฺb-:kห™พฎ๓ฤQ็M/้ไ*์ณpgMป:่ilปถ็”๖Tgฎcิขรžjึ™rฯeพต…m งฯี9WQNฑฮLc8uหนดๆ|กๆฝ"ฮ๗๚ยซ}ฎ๘E16๏#‰๓พมFฟ;ฺ'ไฤคu•Sอฟ฿’็๛ฦฮบpฬA~ะP้้VZ;#Žฉ)9ฉมฉย6› 1@๐@JY{ฮIึhโ} rLo!ี†q$งZNชโœข›ค”ฤ”กยœd– ™์bƒ"„]๘Ž@0Ul+ษTqnบ%ซ:ž‰ ฅน;1œ"คqยต“yฮัข@G}ๅฯ๓ํ†8฿๓้ี๔Ÿ_UŒƒฏt็๑pt`›ลBญ<ฤ฿›’ล็„‡Š…Loวฑ แ’,2ีb!pxhฑฏใ5‹…,)aกฌ$ฒวb!็ ใธNฐo(ZณXจiํVธkZ;ฑ฿mฦรึnV&,์%<„8_/ˆ๓&,Dฌ์ฮนcแ(น่Miา*ฮู™]z“0Wqฮด๖ุ๘ ะf{1lQ #]<นเLGgZ{็ผpก)|“+]็โขืt๔4nM›ฟลๆpA—ฯg—v[WกอฯCEป์+‰๓์จง&qv|Me็R็|ฌyทฤ๙ŸQตืๅ๗cรwุลน‹๓>sŽ่.h:!q’bส'๖ฑใ1"ธJ‰ฤF’๚ํต๎ๅ Ž ž q$Y$]š๒b W†$ไ“๏ร#วq„ฯ}ฆaœ1ฤ( t%ฆ ๆLe๛ฐฟวไเฐ&_Sีญค๛ฌSฤ:N_%ค๙& \;N$3^๊)1ฯdB=Dฏ‰๓iŸœ^}๘งWใ€ฏป8w<'}8X1<,`!ยb!›ฦY,ิ๔pลBดXศื-R +ฒ๎œ)๏ - ‰‡`!k๒‰Š{Z“ฏ๛ˆ‡Š…tฮ๕†%3Šˆ‡%,Œื2ฐฐq>eวฉXˆXi็Ž…ฃ'ะ‡rฬUtS ๋~M{็๋๊ฦืfw๋|๏$Fู!บ๗8>6‹รx5ŽV“™็Y”S๘Zqžฦฏๅ4wึขณv=Gซฑc{LiGyุ?’Eดฆž[ก.7(jฉ์M)ํ๊šn^จ8ืeำฺ{ )ฮ฿นVำ.Y16ุ๛(็.ฮ๛ˆŒš4ภIE=ฆฆผƒจšccช Gิ‚DวˆskQgIiฐFฉ›L ้Iฅ๋‚sฐ‰]%บFZฮNฦLuW‘n#tฬ๑>œ‡iœ#%ค์สฬ™พ–lช8ทN’ฦiS8œ;w&†S‘œ"|m}Œ›ภuKœ๏vฮ๔๊C?นช๛อลนใแ๘เa k)ิŠ…!,r6ธลBโกลBโกลBvAทXจ#ื,ชXW,,แกฆรw‚…Їฅ”u็š๚nฑPhZ๗œx8‘ฑฐqพN็MXˆpq๎X8Z‹Bุ.M_ทQ:NS฿u&บvืjึ9/ีบใB6ืž'ว\มE1ฎโม๊LoOฉ๏Yุ3ีข_šรๅ๎์;bq^jบ?ฃˆhŒฆcLอ9ล9…9ฟw+ะ{Yœ๏ูปซท_๒ำbฌ๗.็.ฮ๛ ย J๋-ฃLNQใ๛@Ёๅ8!)F:$g์ฬหDRv4ฆ#BJาJBสFD๗ฅ๔๗๛d0๊"G"าญซฤ9มx?ฮษ”๛แฏVgFฆขวE/‰vuŠp6†‹]ก‘ย~t๓T Lคี‰8฿ๅ์ใชร๕›ลุ็ห3]œ;Ž-&๙Hœ@6แฤฐ‹;ป๓2“ไŒ ใเ‘d‚|า5A:'๋1ูHŽฉ–ณ Št†๋$ tหู ~คiœ๕/เ๎pu่•YI!iWqQกฮnดๆ<9D์HOŽ‹ัKโ|ง_vืีลุ๛K็น8w<s,Œb!0 xhฑ฿l&ฉXศ๔u‹…็l"GฌขP–ฐฅ? ‰‡Š…x$v„…‚‡Š…Šqฅn4ป@๑X]sโa kx,ผ็ฤžยCˆ๓ตถ฿ข +ธ8w,cqฎฎ๘H ]ญ{ึtuบษ๚a—๑:ขเๆจฒ$ศต3ปŠsฆˆgnปจ›ฎ้tไs๗v8่!˜๚I&ฃๆžำWมฮืJ!ฎนฆ๛ฒ่œ๓ใXw‰8฿;ช/๘Q1ึy็‘.ฮ]œ๛”อž]#)คG‚4ฒฦAD”ขค“Ž๊'๑ฒFฯ†I ญœ ฬวณL%ษ)IฉบG:+˜c‰p ฤ-tQG9‘dš;ƒŸ[!%ฃ*ฬตฦ2ปv์ โก_ฤ๙ŽgœPrตลx็e็w$ฮำ,หาธ‹๏†XN^›โก4๎bW'คพ:ฦย‡ฮอXŒjYSต‰Š…ภ;๖ูP,„ปฮ้Š…ษfฑxH$*&*๊ศสŽฐะโaยBบ่ ›„y ั{C๑ะ`a?ˆ๓ืnทE#"VXoŽฤy?เกca๏ฌ’ศฆXงฬื่0gŒ่œCHf™ซsฮๆiน ๋ฯm7uํšห&qน{{xlj๚6,=ฅ๊sีๆนซ8—”า~ญ5ทโœ฿ำB™A?ˆ๓mฯฝญฺS?,ฦฺ๏๘ ‹s็พ†EL8;(’0ŠM6Ib$Žแœ•ยpย!โ|[M}9ีฺKK7ตš%‚J’ชฎถ9’๏i}eJ]œฃฃ#ฮZt%ฆ>ดN กฑ4็หฃQsคr†ศ฿yšg"Šzห็vBŒ^็;œั๊ |ซ{\๚ฉNล๙Cผ(mฯDคํ Cฬ ๑’kง™”ห8!๕ี-,Œ๕้  ™าnฑุ<ดXจณb!ลบลย๛ค?ว`xH,ดxุั*เ!วห)ๆ๗๘(โy# XH<$๖BœOvหF,DL๊\œ๗<:๖ึาzh[‹Žวำ\Ž)ฺฆั\ว–DwฏAˆณž:็lWšฎv๒ษA)๎I๐#–tีFฦI:z[ฃ8ฆฐ9ๆš๎/๏/5‚ณฺฑ4ญพWล๙ึ็Zm{ลXk๗#\œป8๗5lRŠNฦHม้ 3ข๕‡ ฅ — คุๆ,]<'!ฅcฤ๏ฺๅ๕%G]Gฏiร$ุฯฮศ;EXF˜วปคฉ๎’Bœมฺt>'!ๅ<฿ฺศ ฟ}?w’ฮฉRSชEžณXœโค!ลcŠ๓mO9ฑ:เฦ๋ŠฑEŸ้ZZ{X{‡ธJ\ข๒ฺm!ถrq๎ซซ=เ!ž ‰‡Š…œunฑบb!v‹…ฌ_W,lยC์ณxุ๑2Xศw‹‡l๖ฆ7-์|๓6,$ฐ0~็‚…˜qkโ|อญทlฤBฤ๒๋v&ฮ๛ {oฉร;ฟะํv|Ž๏ž์๖กšวณ…ํ๐Mแ4ตึด?ešำฉP† O.{๎ข“U๛MคญN๏C‰s[Ÿฯฅ)๕ฅQkฝ ฮท<๋ีึŸผซฏๆโลนฏแRงEwE‚ล™็xdz#› ฑc/Hฆ:Hฌ1ด๕‰?„ใl”Drชต้Vจ“€ยR"ส๋#k~4ˆcDBˆ$>/‰งvๆ>KHu–/ฦแ;Œ„ตฎœฝ—kZ5–Œฦ€;qHh$ฃฟ9ญ'ล๙63Nช๖ฟแ๚b์zแ8่็8ฟฤK๘ณn qPฺพ˜้๙!ํโWท๑ะb!…g yฃRฑฎฒลB:๎ mฺ;ฑP๑PฑpViI<์ส,„ธ&**ZqN<ฌaarฬ#Z,ฤLsเก`!๖๕š8_c๋ญฑฑ”)8๐ˆฅ {si๚5„๗€ฬJท๛ๆ›ุ้GAอดn>ืš๒,ะ134Qณ#ฺTิFแŸœ๘\งžาเ;]Eัms็…ใU˜k—v:ใ๊Ž—บ๗ชs>๕ฬ[ช-?qg1&๏๖็.ฮ}„Œ’`ก{/H๋๑ศtuM๏dบ&‚ว*aำ๚DŠyบ๊tฌsd›&ฉƒ4+Qว‘mธ†Žษh  v-ถก‚’v$F gtŠะX)œ3บDO฿ุ๊PŒ”Nt')%ล#:ศ่g๗ค8฿๒ฤ“ซ}พb์๔้ †tฮรบ3ฤ…ุKŽ95ีXพ0=ฟค@F๗uq๎k4ฤนb!ฐ%,ค`ทXˆฐXจ77 ›ši–ฒ‹T˜;ฦB,มB์ก๐ะbam”$ฑำ*ึฐPลน`!ๆœ๗š8_}หญฑฑ:Sชaœงฏ๑ะฑฐ7ำี!ย็=๑ืšวถ๎c,ว]EตŠ๓˜’AอFotฮ“kฎNณ VMืฎ๊Yคsy8Oวโ\…ท฿Zg^z s๛=่gZX็ฺ˜ฏืฤ๙f๒j๊™ทcอ]wq๎โืฐษhชณŒ‚2u.Vื˜๎‰ชv3ว#%jš ชว“ศยARืˆa#>—๊๑:ษ(;จทˆt‡ค4ฅ๕3๐™๘yี5ฒู๘พขS„ฆJแ๛‹eแ16Bzส–{„fH Ÿ ค qำวcu"ฮ7?~Fต็ี7cว๓.์8ญ=ฌCCโๅUฝ๙‘งต๛}<4XHกjฑS,2ฝb!ลบลBบ่ Kต้ฤEโกลยŽ๐0a!n8–ฐP๑ะސไq5,๖%<ฌa!บโกf๕BœฏบลึXˆx๕ฺC‹๓~วCวย็ฌ1งSNื|n็่*šต9„&๋ืuD[ธlFAอฆpฉผMทณมYO็ูŽ=cนNEzMˆRืถฤนŠpอ เs๑ใ้šw*ฮ฿tสี[Nฟตซ๏~็.ฮ}ˆฮžู"L_‰ฎH)›ฑS/ˆ๖€qฎ-ป๒‚œ•ๆฺ๙นt8nH›$1HJt‘ฐ าŠใแ8-ฑ8‡ซƒ›าฐŸ ŸกคTCคถ”vคษฃฮ/ืดฤ9ถYwŽาt*ๆœ_|฿"จฟ>ต็ฤ๙[ฆŸRฝใ฿+ฦ๖Ÿ\ง แv ๑ป+™™Hณฝ!œฏQมCƒ…๘>X,$Z,dร4‹…๋ )ะ-ชHW,T<ดXุ&,„๛M<๒ณ็•ศJ ุซแ!ฑu็ฤCƒ…ฝ„‡็ฏ™บu#":็€‡Ž…ฝ-ะ5ฝย˜โ|ฎธ็ถF}กj๋zณŽ;บฆ๙›ฆซƒlgทซ@ฯ็a>Rqn฿ืๆŽ7ิ˜—|“kn๗[ก๋โ|ำ“ฟWฝ๙ดc๕\œป8๗52~๖ปณ วDJAฮ,ั$ ร6AโHิM!)ฅ๛ข้iฅ6Jฒ๎‘+Žม๑lฬ„๓ฉCลm m]9ๅ’dT:H%Hธ่ถ๑๗“ดใœ๑ผilPŒไœGB ื้œM๏dNาฺ๏9ฑ็ฤ๙›Ž9ตฺ๕ซ7c๋ณ/๎Tœc4ะฃ!๎M๑…ชžฺ๙p4m"ฐใaใa Š…Š - ‰ U +*Z,$Z,T<ิG‹‡ธ๘ผtณ2l+……๓5คด eฌšลย^ยCˆ๓U6ฆ ฏZkNลyฯใกca๏.MณVQlslkP4—uuัsช{rผฑญUลนบ๗ฅN๒ูA—n๏ป็ญฃพsำCdQฮ๔z็p๓‹Nนm]ฺi—S๛๐ ้ๆ โ ฝกฺdฦอลX๕ญ‡u$ฮรšโŽHหrC๓๗ ;O–ื Ž>‚วดญO—0ึลนฏ๑็ Jp/0=ำ!แd%ล:ศ^$aUeQBJยึ$ะี5Rกฮ๚Ku๔9^'!ล{X/฿$ฮ5ล”Ž")%!E &\๏็nฯ„T]#žGE;‰(?vฯั๘$”O|ณี )pใX{ฮTฮดkโ|“ฃOซv๙๒๗‹ฑีว/๎Zท๖^วรฦCƒ…ภฆz+๐b!1bธXศฆq ตฝ„…*ฮ๑คษ+–ฤนb!žื๐Xˆด„‡รมBลCลยEพb1*bฌšโก`แxิ›w"ฮWโผ หพถ3q๎X่k<—ฆZซkM—œ๕่๊๓xk[tnซƒฎ5ู๊AOกbuม“๕n ๊โœ็ืfkY์!ฮsc:Nย<ฆฺsฌZƒเฎuqทต่าิNq%aฎโŸซืฤ๙ฦว฿Pฝ๑ค›‹๑š;็็Qlใ‘c%อ1หค–๋„xqส.ฺฐpงมeEœ฿๏ฮนฏ‰MH:7บZKHัMQ Bˆ4J1:-$งซนxฟผL๑dอฆึคkww’S์v "หfsL๓d*็๔๊ฌ^\Cฎ$!Rสkf0ีŸŸ-~f6•ร#ฮƒGQ8E ขh‚jNkืNลธ!’ๆ๚๖š8฿๘CRฝ๕ฒ[Šฑ๙้Ÿwq๎xุWXˆฟw๖™P,$F,„ HXจ7๎ˆ%ฤS‹…ฤC‹…ฺ๊8Ž7'KXศฑm YP,T<,bแำ7ฑมฯ–rv|W,LxXรBˆsโกมย^็+ฝyF,D,;ูลนca๏ป็ ŸฌwnWqฌu่็฿Vบปบๆ6=ฯ)ฮตœใ6`๊ํH7ŠsภQœฃŽ'Mใ ภS:>ฏ')๖ึๅึz๑โ(5y3Tะ7นๆ~ฝ&ฮ_์wชŽฟฉซlOŠsธแซฆํU๑ผpฬV่ฟQ5๔ๆH๛^˜ฒ‘ึsq๎kโR%$ค>vyญ~’5ๆ wx$ Œ„$Nz$ฉ‹ZไฏSฐkhส<›$ฑ“ูอ˜ไำGฑcฒR<'m่$tุaืn‰uM‰)ฮณcฤ”v6„C')Reูฉใƒฐนพ?9ฎ็ฤ๙FGž^ํp้Šฑูi—บ8w<์+,ฤ฿7]sลBโaBเ –๐N;๐ะb!ปบ[,ดธH,$๖)b[ลนb!ทตf>b!ฤ5๑ะ`!๑Pฑ0~vโกb!oTZ,Dอ9๑ะ`a/แ!ฤ๙ŠAœ7a!โ•“ืsq๎Xุ๓โœอ(”5ฌ8gXMวฝ4RL]cM(t‹gุFt l-{rฯs$žqoDน—f]๐ถ4u9NลzฉKปDฐYฺ๔ัkโuG__m8ฦbฌผ๛pะ—tฌdXO˜็Žywˆ/ษ๓ƒ1vาณฝ~ฦ$ฮŸ ๑!~b;็พ&!%IB๗นึ’3ฬIๆ@PA๒@๐๚โzล)’ฯžI*‰)ั9็น้”ฺEš๎I"สz<ƒnQ)eใ::Y™‚\V๕ JJ•TวฯŠฯ–ˆxIœวHiึo&ฃ๔หญูพ!žั๔ž็ฏ;Œj›ฯVŒMOพฬลนใaa!–nY,$ถaaไ1ˆƒฤยTฯญXศฎ๐znb!๑Mฑ ‰}MXH<ด7+๑ cfเแPXXรCลB็5,Tqnฐฐ—๐โ|…MทkฤBฤ+ึtq๎Xุ‹"’ยQบึ›sผลนŠk=Fsแถก|้ฯŸcมi่x5M5ฯ"]บนืDธl7ueWวฮ[oJe/9ํึuoๆNzMœฏwิทซ Žนก+ms่ฮ๙`c%‡)ฮ๗+ˆ๓‹ฬ1—†8^žฃฉๆ i๛-ษU•‹s_‡”‚,!๓‘๓#9c๊ฅv%ๆ|[=\›ยYญษI'm5$j‘=Mmื1Cำ–d๊ฌtuˆH`K$”DT"ํ*ฏ„4฿XHืฌ.]/‘„+๙N]Šs38RQ4˜วQA็ผcีVŸฝฝ›œไโ๑ฐฟฐ0Ž{๎๖6,ค8ทXH์hรย„- ูฏฃ ๖คอช0'ญ8ฯxธจ=สโ`อ=ทXH{kิD2งB<“7)ฤ8šแ{SทcƒคK{?ˆ๓)‡|ผzหฬปŠฑแ๔/น8w<์/,„ห‹ฟkƒ…ฉฦ–:๙!งถ๓ๆคด—2txฮ๊หมฐะโ`“(g๖“ŽพT,ไคŽa!๑PฑP๑Pฑ฿gเ!ˆroุพ /_ลนca๏/บวถY›็Lkง8็>พ>ะเšDyIœqnวฒ๑m๊ธŠq NEพ:๑*ุ‡sย‚ฐWW]ลฟญฏ๏qพ๖แWW๋|๐บbLฺขcqพBˆปา(ตป( รZ-ฤ-r๎!L]O5็๘jˆ#อพ}Cg๒ฟ ๑ฮสkฮ}M”›๑sbซ1๏ฒMŽQSืH>Q[Hg‰d”$Uj-s=ฆฬัeC$uัIKbฝ๊’–ฦNว\g๓’Œ’|ึj(…Dที’j]=>/ศ'บc62ฦฉ8Ÿ`)œ;็๏=ณฺwcฃcพ่โ๑ฐฟฐ"xhฐุั†…Ї ‰+ีโ– ‡ƒ…%<,e ‰‡Š…šึ†…ฆ้g -**ฮžูx๓ทkฤBฤหW๓ดvวยๆL/uSืšrˆpฉFaฎโ|~ม5ืzr†ํoำฺU`[^rฮmmท9๓ ๎|ษฅ็{์ uาญX_๘dนk@Ÿ‰๓ษ๏ปฒzํ๛ฏ)ฦr›ฟท#q/แ์kไ„T"v)ษDŠ"วใฆโ5#!ี€๋ฮฮล*% žฮLiฬš†์ึUฒ)๑%aฮsัg(ƒiœต4MK<ตณ~.|G Ÿ่D BŠUคs*!Y๊๗‰8_๏ภUS?~{16ฐืœ;๖ขA๐ะ`ก:ห5,ฤMอ jบb!ณŠFŠ…ฌ‡ทXศฑi<ณŸ,jZ{)๓ฉvsuๆฤร4ำW„9kาY—^ŠRjปMื†rMฮyษฝfทy+ฦ็/(wžŸ?HšA‚'หก7xx7~ qพๆ!_ญ&ำUลXnณ]œ•8G|ถฤŸดS{๚n{]œฟ๎=gT[Ÿ~k169๒ Kญ8w<์S,ฤ„4ษดXศ๚j‹…xMkฎ‰…ผูฉX(sั-ZKยรb–9๑Pฑ0นๆฝއฑ[๛๋ทmฤBฤ+V]:ลนcaฌาŒn้…๋๓`WฌโฆฝkPศ๒=s๛>TJนญํž_hP7Wบห—n(”ฎKEภ‚v็฿ฆภ—{S`‚pฮW?๐‹ี}ฅฏ~ำ.ฮ9๗5bB๚รcc*'"R'ˆsI%`:ˆ)GˆeqŽ4Fิฒf]ษ)‰,’ผEwที8j=ฆ๋๊%—]C]"ํFฬิอbMฅ’Q:B >วgJ<’O sศ)๖ม9ข[”ศ~tใ๐]‡่UqพแพRm7ใ–bผ๙๐ฯปs๎xุWXsค`—ฐPnาe,v-‡‰…Ÿ,j|“ห^ยC+ฮ-ึฤนๆЇฺ “xจXH*ย9'N,์Dœฏ๘บmฑ๑สืฌ๋ฮนca฿‰sM/ี’Fp๓Œ/‰s$Žbท$่ >Nญิpอ^ื\้,ฏaEบ๓sDปฆฤซณn…|้sZ7พWล๙ช๛]\ญvภeลxี&๛ธ8wq๎kฤ„๔Ž,Nๅผํจy๚ำE-bI2F'\ )๖ั1าืx,/ฬณ“ฎ๕้l˜ค’ฬO็86u“X#IbZ"งV˜ซsฎ๎ymtญ1วตiZ*‰9า4)ฬแฉ(วsRŽQร฿Ÿะโ|ฃ}Nซv8้ๆblvุ%.ฮ๛ ใอJธผ ‰g !z๓œoลBŽXดXจBWฑp‘มCูWยยม„z ี9W,l็ฤBึ”[,d เกbกธไ|sŠsโaฏ‹๓ ถiฤB„‹sวย~็๚œุŠห๙ ส฿JNtI๐ฺu{œซfปงk#7{=LซงุtยjNŠGS”บ|+ํฌ๗&>PAว๗๗‹8อพVซ๎๙b,๛ฦฝ]œป8๗ตDค๔ฎฃ#†Hh„ศU&‘ ™$dšฆ๙lkดPlŠGwHๆ}ื:ษ*ษhฉ“Ž’ค{ฺ๚L6eฒ5š6”ˆZqFF›œsxHŸ=Od 0…ฤุง.:R;AH‘ B 2Š๏9DฏŠ๓7์uj๕ถใo*ฦิC/vq๎xุWXŸKXH7\ฐฃึŠXhฦ-ึkฑN5ทซ๖zo๋จ๋Mหแ`กŠsbaฃsฎxจYTฬ "๖%,Œ‚]๑XqN<œXุ‰8_imฑฑ์*.ฮ {ฑvŠ๕RืqญอถตใถkปŠ^Šb+Œ็!kSษK๕บฯบ๘*ฬhล#าq-๊ึ7ีาฆw๖ฤภ‚'ำโK๛&ยผ๓Nฤ๙ส{}ชZeŸฯใ•๏้โลนฏnธGู=ท‚šn ๐@โ8Ÿ76ขcž•Zะ9")eํฆึณซ›DN‚ชไ49Hถ&Stuฬ๕8uŠ่œืๆ๕.’ั@$ใฌฑื๚J8ๅุrŠŽฤุ‡ัAษhtŠ@๘o`Œ^็o๓”j็้7cหƒ/rq๎xุŸxhฑPฦ(*O€‡mX˜šDถaกลWb โกฝYฉxht^ยC-ํั†˜ึ9oรย&]ŒWlด‡‹s็พบBH4ฝี ค’คŒd‘nPxวN๋q๖yชนฬ#tR-zฎม,น่ZรษŸฃT]v#ต™œบG6”ฺบ๖ฦ๑i๚Yyฝ็h๚Jโ‰ิM> Eh'าeฟ๗ฝHF)ฮ฿ด๛ŒjทPŒmผะลนใa_ฎ"j_„…*Œ ‰mXH<ดXh๙Œ…ผ) ต฿"า9–m8Xh๑ฐฑ๎œ75+€xจXศาโ!ฑ๏'ยZRqพส”ญฑ๑ช•ฆธ8w,์ปฅต็ $ต]๓…ฆ‹zฉ9šŠ๓G%ต\๊๙…T๑๙ †๎ฎ>฿คำ—ฤ9„๙k tฤ์$ิ5ต]าYq>XX>˜xŸ?A\๓Nล๙คigU+์๑ษbผ๕ำ\œป8๗ีqŽฎล$คtPH"“@‹๎ฮn #Œ๏‰ฎว‰%2Eบ6#1ีZL›ๆฉs‚IXUผRชnธ:ๅv~oญžณชwซ5@๒™ฏ1D๎> โ‰ฤSฆo>xN‹”f‹ˆ~๗ฐฝJF)ฮ฿<ํไjฺQ฿-ฦถ๏๙ฌ‹sวร็ฤBเ1ห`!sลยว (ี›ฯ+์+=/o๋อ'Š0๏Tœ/๖U“ฆ]Œ—mฐซ‹s็พ:ๆจ„sR5๐•ค8็˜ JHฦ@ๆ0ฒgเ้ฏล๗8 Xy|ไˆ1#ะ๓ุ!u‘ิIื๑e*ะœฃ9$™ฆวณถSป&3}_ˆhTณูˆ(Rุ$ฃxd)œ„>wอ!1z]œoพหIี;?p}1vุ๗็އ}‰‡mXˆว„ 5,Lขึba-‹HฑPGPฺlฤC‹…ฤC‹…*เ -faฎxH,ิ}Š…šยฎXศ "–๓0pƒ’xจXˆพ}€‡็ซฎฝU#"^ฝข‹sวยZ:?\รฉkมl›ณ-ัk%ั…`ย›วPศkอpา฿'R:{7ฤ๙ซw:ฅZn—ำ‹๑า๕vvq๎โWWฤy Qฑq(ฦชˆ‚ะธQดฒ1Rrท‘ส R๚—gฎs]ถฮ6คT#ปHš6‰ !ีNluืฅqœ%ง๑ฺ™o›ฯูฤ๊1=ื`Gv|/L_ง3D2Šืฐ/5>z๎[‡Vฯ^yPŒž็;ŸXํ๙๋ŠฑใŸqq๎xุŸโ`ažRaฑ0์ณXำy ๅ†e ตF]Sษฟly๑P๋ั๐ฐึ}}8Xจย\ฑ๘<„W,ฤwฅฮ9›ภมพภร(ฮืฺฒ ฏ^q็Ž…})ฮKณฤuพทฆฝ/w๋hฯ5โ\k„Ž\ณ‚ฆว—าส)ส™:oล9\s<๒gำI็5จศึิvžWS๑Uœๆv๖†]๓~็ฏz๋Iีซw:ต/]๗m.ฮ]œ๛๊˜ŒขQ:ฃFsi8{๑จ8’ถ$\AY๏w&R98*งรบl† w’ฝDVk.บญqิิO6–S๗ˆ‚]ป๋่=/ฯ-M›” ็‘@ผ6\3I(‚.|ํทgดๆ%฿๑‘่๒&ฬ๏ทqพๅ[Oจ๖9๔ฺb์ด็๙.ฮ๛-Fฬ#ฮๆy ๑๑„หKXˆ ั%b!1KDsvตัf ‰…าy"`ํXณ9CŒ8Sแ=ืu6qร1V@๋y้’ณ {ŠvŠsญ…็๛Jฏ5อLoj‡ะๆy}ภ สํŽซ–แฤbผd\œป8๗ี5BŠๆ= XFฑัL‡Fล,ˆ*]๊๎xค0w๋ๅฃŽใL\๋ฤค9นตštญs$Aฅcฅ š4””ฺ๓ะbC#ใ\ีjๅ•‘\๓Nล๙+ถ9ถzๅvวใลko๏โลนฏฎ tิƒtbS;AบดnRI# ๓wถ๊ปำlLเ@ุ,H#Rบำ’™ทK„”.ํ๘Nrช๓ึ•Œ๊œ^M1ีZJ^#g๘โ๚๑€tj9>oฯhฅo๚ิึจ ”มพ็n{\๕žพYŒ]w›ู‘8๋ฌฟ qoˆCฌ&ฏอ๑Pˆ฿‡ุี ฉฏqลย_œิ๚›ทXจด`a|Z,ิฬ"‹…ฬ,2X˜๑PฑP1ฮ๖์ะIโ๐ทแกfY,ควu*โ๓ํส„‡Š…ฯ฿๔พ็kฌ9ต ห/ฟvGโผ๐ะฑฐ?—ึ›S ซ‹ฮ4wญ;วใำO57‘ฃ๛=OดŠ`๋P[GZฌiฝนญQgฺผฝ 1[ฤน๙Vkฬ1ืฅยผ42mžนy@๗ผฤ๙KงUฝlหฃ‹๑ขษt$ฮรšโŽHห7๗ๅ…ธธ๏K,u๖ีBŠั_ ฅAlฦzAlC|B”‚ ม9ฒ]ึูแฯั9ควk'_Mทย\ ฉ“œํl\KนิN๒*ิm‡a;Dิฆ•bBJBฯ‚ๆF ค\ฌทd]%R8ั๘จœ"Š๓ํถ™^ธUล˜ถ๋นŠ๓Wษ๖1!พถ7 1+ฤKBฌโแห8!๕5nXˆŽใxnฑ5โ SOŒ6,ถ”ฐ8จxhjีm—๗Œ‡ ๕Q›z*jฃ7\^E9๑Pฑ8H9ฤฬ†ใถ๑ๆ‚8/พฌฑิุW๗H)FไธVj"ถ)ŽH lLƒฌอ2งSรFnpb่ฒะ)RฒiE8Eฒึฃ+I•n๏ฺ58 tฆfj๚giพ:Gข)ี๔un65"e๚&ศ(ศ'›q?sQ๗Y=๛ชg.ฺ/ฦ฿ฮูซ็ล๙[[ด๗•ลxวฮ้ZZ{บ›yฉlฯืn ฑ•‹s_ใ†…iX ‰QŠ…ฉ›{Rœฐxh<๖้ไ มร"j๊ฝNฦH8™ฑ?วb!qŠ…x$*โ{! >๛ี๗๖BœO^m๓F,DLZnญช[˜าซx่Xุ฿]Eธฆตkณ8<าUืน็Zฎ๎tษ5ŸcฏอmqVš}ฮZ๔Rs8[Oฎโ\๋หี)ท๕๋ฅM]ฺีํืน์ะ๎7}_๕โ7^ŒeVv(ฮแjฏšถWล๓AŽ]ซ ฮ‹๏k,u๖ี]RzำZ‚ฮวmGตˆxยq‘Iตตแฑ–ฎ5–Lๅค0&้ิtr[‹ฎ’J%ฅ$Ÿ…†FตืH~ูœIื1e“NE9H'‚„mฮ๐Em%R8แ2๚UWฯ|fŸHD๛@œ๏บ๎ไํซC๖๚z16}ปqะU8ฟฤ#Ÿ๑(ภ5ฤJi฿ล!’cฎ๑n็พฦ !8ถaก4zซa!ั4sธXจฮนbกฝYฉX(ำ0ฺ๐N๊ศาฺ ‰ฝ|$jณ7ลB|~โกj๔‰‡‚…ฯ^q`_เ!œ–ๅ_ตfuเ๎—ฑ๐];_ญผย8๐ˆฅ ๛{i-นฆถ๋Œsuฮ<นธu็%ืฆณkS8ฺ*Šฑmgžsด›ซMโ5้๎Rื5๕}จfuM3ฮK๕๐}ึญ‚eึฺฑ๚วMฉ-^ดษ๛ชพbtฺ’baXO˜็Pœ฿?ึX๊์ซป„4ฅtFbŠFq cHซ„ใB’ษZrK@ t‡<†ฎQ‰ 2}R;ซ›^"ฉŒsŸoบ7Bq^|Xcฉฐฏั!คh’–ศhN[D%แš€ภิ‘tา5B:'คYาๅaƒ! :6tpT˜+Yฅ๋dŸ๓•B›ฑnฯ5…]…9ˆ'‚ฤŸ—Ÿ‚ฏฉStอ!ฑ๙H)j+)ฬฑKŒ^เ’{-ื<ฌ๋SฦbuyํิิIw@งMTa๎xธ”`!๑0Vฌ•ด,ฆ Ÿ6,ไ๐&,$ ต๋ปโŸn   qS’xจXˆใ*๓๛ื๐pผฑฐ „ดๆžwำ5๏jfS\บn›r>ฯ\uษญ‹ฎAมไไฯkHcWqnท)๘!ฝฝSn๘+พ>ป็rอญโฎ4 “าีB"ว]bnˆgC1ฤ๛{Xcฉฐฏ๎R8E่T ๒2‡kฑ๏‘๘:F มAGช#ŽำTqˆq vŽ)ใX2Iฯ|Sใ5๎G8๊1$” =๗ฑ6’้— ’ ™$ |f:gจมG6บ8c?ฮ‘š‘ˆฦฎฤ3฿U=}๖žีSงLซ~xงž็๊žwห5๏งp<\ ฐ็ภรF<ดXจฉโŠ…tีKXH< ใˆmŠ…oฑP๗ f'œข[ƒฮธbกโ!ฑฯ)ร๋Š…ฯ\ธoฦร‰€…ภCuฯปๅš;๚๊%็โปๆ˜#ฐอ๗Tฮ4sMึƒ7Mcจ3] วMอไฌWqจ้ไฎูKsืญK>’ŸAW}"ธ็]เ†ซั=๏–kOแ์ซป„d„ ฺAเ่ „}pD"a%KŒ#Iคหรบpu}ฉอ๎ ษ2\'[%ฉ)X˜]mฯฺG oพ‡๏/‘O›šNฒI๗Kษ'CHh$โื#พ†s!ฅ=ฑ0เ`์าฉฝ3N,์!๎๙;žี๕ZsวB_yฑ3{vฬ)ฮE˜๋h5ŠsึYำMWกฎbฆจ๓8mฮZoฆฬ—œ์Gšห•:ฏซธnjFg๋฿Kฝษ…/ฅใใxฆน๗2Vtฯ_ป#]๓๕]œ๛…ย '8’0ฎDDIฦPG (E;H\#คIฒ้›ˆยฉIณq# ฤนRG฿LHS]wญžฑD™JžR๊ณจ/‘V๓<‡:A๊!ฌ ว๕‘ˆb,„9F]yP ผ kž…9ศ่9{ตฤ9j-ฅzโƒ;ว่e]iาzีrหฎแฎนใแR‡…Yค,Œว แœณฑ›ลB8ุฐPgลB`—โกb!๑ะ`แxจXจxXยB„b!œ๒„‡5,d‰Oxญ†…A˜'v แžฟไลหบk๎Xธ๔sveOบฆณKCธ)โ}*ศูv~นึf—7`RๆuคYIhu›>?๗‰ฒ#_็fCล“8Wฎ๎yฏcaL5ว—ปk>^โฬ๏\โ3รn"ว#~~ญK๑ตนK๐–;;่ถแแ฿oมฏีฏตgฎ๓ฯ]๘๛iˆc5w,๔k๕kun๘‚ั`อนแ8;็้—๑๋๚ฯใื~~ญ~ญ๏หฏีฏี?๔๐฿™_ซ_ซ_ซ‡‹sGํ฿ฉ_ซฐRฟVฟVฟVวBวBฟVฟVฟVวC็ฺฏำฏีฏีร}๙ต๚ต๚uz๘฿_ซ_ซ_ซGฟ‰๓#zฉ&สฏำฏีฏีร}๙ต๚ต๚‡;๓k๕k๕k๕่;q๎แแแแแแแแแแแแแแแโรรรรรรรรรรรรรรลน‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‹s็.ฮ=<<<<<<<<<<<<<ฦ=?ซงาBGIENDฎB`‚xarray-2025.12.0/doc/_static/thumbnails/multidimensional-coords.png000066400000000000000000001204321511464676000252600ustar00rootroot00000000000000‰PNG  IHDRˆ|/9tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/ศ—ทœ pHYs  šœ ‡IDATxฺํฝ ธ-WYๆo!!๓Hศอ<„$$@ƒ Ž€กตilตQT4๒G™DาQ†ฅ Bƒ ยฟdTBˆ†ศ(d #™nfย…ฤ$๗’Zฝuฟ฿บ๏^ง๖pฮู็ž}ฮ]๋yงjWีฎ]{๏ช๏]฿]ืDCCCCCC๖#444444‚hhhhhhัะะะะะขกกกกกDCCCCC#ˆ†††††F ‡ฤOฌI๘~ยถํ๗hhhhฑuยu ?ฟยฎ๙9 ฿H๘aย_๗์„ซƒ่>•p€ํ>แm ๋-แใ ด'|.แ๎„o๛mFŸฦ6 ง'ฐ!แC ปŽ8ื K๘v‚6sฬฯ'œŸpWย ฟ2ไ\๗K๘pฟฺ๐ำี?Jธ$acยตzฝะ๏๛=แ๚ธฎLุsฤน๔ผ+~“๏$ผ ฺbยy๑YZžุžีF  &ฝๆ''ื„ฟช "ŸJ๘nยฑ!$uฬl‹พ™ฐoย ๏K๘ˆํJย๋๎Ÿ๐”„;๖q-COใ!LLุ9แฬ„๗Œ8—ฎู้ ์#ˆ4ึฦw;%แพ {%6‚ ž—๐่„[zBฟรIqžฃBธ๊ฟ็ฑA4๏๙7"ร็zuยฟ$์‘pLฤ/ุu๋ZžD๒x}ฟ๖ผ6‚hุฒ‚Vย๑?๎‰ู๖‹cฆจ๗c>Ÿ๐ช„/ว1ม๔˜~YG'|:f็W ›แN้๚_ีC‘๐{}@|Ÿรโตใ Ÿuฑ~dh%ปุ~ ฒg ๙‘วว l฿O& aว1฿๋พCB‚๗• ๘nช ข็˜ฟLxำฟ็Ÿฺ๋l฿a ?๒ใซ๓œ๐x{JEc6ถคกDร2jCโ๊x่wKธ,แส0uHฝ7แq์Na๖๘ํุง๊ญša๙์ทฦLด- ^ง๓ฺ๋ฦ๗yRผ~hย—‚8v ก๛†ุ๗ห —W็{๓ม9๒๘4Aคk๛ืrย b]ำ‹C+x(Sฮคฆฐ œำ๘ง„—L๘=ฅqต_“‰“c%:_ฌ๏฿m_;๖ฉ๚Nฑ.อแฌ๊\บ–ถ็ตDรlฤ้•>ห^Rย…ฑ฿4ณฌฮ๖„—mA โ็‚”ŽsศCK๚ตุฟkยใ;8ใžฑ๏7พZ๏ฯ๚“พ+ใ7- ฺ๑ศฤโ:2L9" L ^fทํ๘=ฯฉตฌะ~บ็\ฦwมถ=N฿+ึด6O…ถ๚๒๖ผ6‚h˜M‚๘aB94‰ซอฎฃJะL๒ฏถAฤ๖?Hธ*์๕ง%™๐6)„[้k6Sพฌ:ื›lฆ|i|แ1Ÿพื…~~ถฦ๛9ืฅฤNถšก'พ‚‡ฟœิ๑q฿๓Lื”bF4ˆj;ฤlS* โ“ี{>4ˆF หCืN‘ ~M‡y|๖LHึธtกัc?Wdอ๑๚ฬM๑z๗๘พ{วฑ?จlํ็Ž๑Aฬ็๘ว‡ฐพฯ5ู_:-‚Hใwb฿ก†Cฟg๘ >`๛ใƒP”ึใ์๕ช|7U>ˆ๋›ขDร๒ฤWNA์ณLฆH•)_๓}#้ีแh฿มฎW๋ว…]}M\Ÿ{฿ฆ™โ๚งฬ!ี๏๑qž_ž Ši่๑กฅืฒ6ศ้ิ1฿m‡๐ๅt]ดC%ะฏ ,ษ฿้๛ '!๎ใc}›ุ๗฿#z่˜y'รพ็ฑฐ๐˜ธ๖๗‰b๚฿Š, mโ่๐งิQLืœลิขa๙โI%ขE‹!ˆx-ก๖‰„๏%–๐ูiวฑหื่xนi…ึ๐ ‘mํฝD`}7พ๓^ไ็#ฒ๋Š ๓ z™๗ฯ}๏?ไ|๕๗๊z฿ ผอh„๙ฐ>฿มฆ9[imoณ๗ž%๒œ๔w‰<ˆโw?ำ็Aยg ษƒX฿“๑ศธ'r>าžีF  A444444‚hhhhhhัะะะะะb๖ฺkฏ๎ไ“Obุ›ํป๏ณ}ทfํปร๎ปCw๘๎ŸqhZ?xอะพC‡ฤRŽุ;์ธi๏ี2o นใN1ป๎าทฯ๑์ำ๐ tว=`๎ุ=w๋ึ๎นkทv]๓๚qุณ;๛t'ฎูฏ;๑ ๓ฑy›ถ=ไเ๓v-Y?แภ}ห“_ำtฬษวีเตI 'w๔&ฌ=rำ#Ž>,ใ!‡ฯฃฯฎ ญ_๛โ฿ฬ็ั๒๒g>)฿ ž๘|žoโฯw_๙Ÿ๎ฎษou๗^xv๗ฃ[ฎ๎๎ฦ?u฿{ใ ๒g๊XฝGฟ›^ Z๗ื‚ฎQŸ-๔๒S๓RŸง฿Žcu}?-/๛ญ_ส๐฿Ÿ๏ญk็?้q๕6]ทฎ_ืฎ๏ลG=บร๖้๗_x๘# xถํ ?ำ๑Žำ๓๙ร/8&๚<3l่š๔๔ฝ๛ ๚พ๚ฎ=๙๔š฿EตM฿S๛ฯy๐รบw์qT~F๛>้A@฿๘ศพkห3๑๑๋ฝ็ั›v=ฒ;cง#บW๏xx๗Š๔ ฏบแ๙ตCดOฯ้i๗;4CxฏJิ/VๆH๐O4ฆ๑yซ– ขœ๊ื_%ะ’—>ฅ>ํqไQ–แ๊ˆ๓~ย– D`Z“ํอ7˜ะGฏ๙ˆ,%์t“kฉ›Z$เK‚ึEบษตM7gO|x~ดQh]ฤ!่แืCงUยP a้ไ ม๖ื?/ l 5ํำq"m“ภP—เีบHAวK(Š(๎่๋ปป?ๆ๎๛๓ส|]~mึw}่ฯหg๋:ิ@ฏ๕9"(ะ5ฺ้$ฐ๔=๕๘M๔{่{Jฐ่3E:h~ลปž๛ม๒>ค๚-D,‚๚L >}]ใฟ๘ท๙{"ธ%ะt>Aื $๕uzฏ ื|WใdI|Ž ฮš(ดฎA€B"JA฿ฐํ>@‡ฃฅฮ-bอแ9๏้๎9๛3l่๗ึwำ{!xv|WศQ฿G฿C฿‡฿YKฃํ\ป+„บศก& ใฃBCฯ”Žำข๛F็ัคLฯฯชึEBZAเYa@ำุ๛$‚x๖6M„Fฃ BอWNฒ ฿+#›๕ ซRฉ*“ฏ‰๕ต,K8$แšq]ีถ4Aผ"4ฐฬuฃ‹0$๕ph]@‹€ ๔ZBS‘–z\ƒihŸ ž™š '=ฌ‡„œ„›Žำชฅ  1@๖ึ?ฮaฮ:ณ~พށH` 0|๚lด๚ i๘zŸฮงใ$”ะ”œ๔$ฐณ๖’4“ป๎5๙ฺt}ฎพณT .}'1]“ˆAืญkึน๘} ฎO๛๕กh]ไ%ํI๏ืน>-ยษฤฯ Q8Iธ&ม6ก&4‰>’ะwƒ๛๘๕>ื8ุฎs่s๕=๔ฝ๔๊;ฃyŒบF&๚O๕้๗๚|}gXฟ‰>SŸฯฝซ%5จ‡๎}‚ŽE@ฯŽž#iC๔LฒI่9ๅ9†0ดm๛‰ žทํมกฤใฬจ)ํ`#‘+L{8อŽ?{\ีอi„n$\๎บ นน€‘ˆŽีŒแว Œ6ก‡‚ู“ฮ'•[ะ๙j‚ะƒร์PK|ฬz๕`"œ๕ ๋!• ๐ัCฎZ๛ตDS1 iz@€"คh.ตDh่}zฟฮ-rะฬUšŠ„ญฮ…ฐ๑๋ะ’ฆ"่}:ฮ5LDZ"Pฮฃ๗่<บF„ณไฦ๗ืตAบดHฒไw๑฿ญƒ๏ษoฉ๓C5I@ 3n7Iีšค @ˆ์ใ5ว๙>ใZ†๎๎ ฎCืฉ๏ซ๏‡ูkิะ9๔^ฎ๚}f#๚ {๑{ฃi่hz˜…†‘ƒ๗;ยิ@ืน4ษz๋nGๅ็O`ฐMKญณ’›žฆ!ฐ๗M๑ย๛2ALNG•zพฃฺw{,U฿้ถชS฿sฎSฃๅ7ึฌY35‚>ฆ`„n.†ถKุ๋ฦ–ฐwSf"ญฃsำ๊=N*ผว‰EŸ๋ๆ7#่บ$Œ๔`J˜้ึ๖ภ์A'่แ– PีCฯ,Yว 0žผกŠPDะ!ˆ97 Wš€>Gd ๓ษฤ!฿ฦ๕ฬlวtu๗™oฬว๋ณ0ใ0ใ็ฺ<€–บ>}†›Žฐ—CzฏŽใ;นร&B@ƒะRว๑นh%ˆ–ุๅ๙-๚เ๛YwS“๛#Xjป›ค๗š`ะ"ธ/ 3—HB–7j่<š`~ำoฆuf๚˜D่ํJ฿S๏A่YเžEhh‹†LP‚ˆDฯ›–" bZ&ฆ}ทน_๗โํ™ &#‡ฃฎห“ใ๕0‚xKA{ณ„€Fผ„–Pฬzh™5#์–Šzะ๕ภ#H๕ะkป€ฐD@>๎ eๆˆMB“p๔™ฐz-อBŽ๎|๎ู ฅฯะฌ“Žฟฯu‡1ฮb-๕>Kื+‚‘‚O…๗ิแ$กฯ@kprะ๕;ฉ:1 iืโฟGMภอ‚ฎE ๔ั ๚ศืN,๎ใะ๛Lฅuวฯ-pอh&ใ†๎%]?ฺ 9p>& #่บtฯใWงA0t฿ ำ"=gzฦ๐่๙ำvwfOC`+2 ๘84‚O…ฉ่ถmฆLL Hšั่!ะC)ีVjชP„ดf์๘"ด.แฎ๓0“afฃึ}8ฒัFˆLมฎŒ €>Dz0๕Pk้ั9nZaๆ,a(มว Yวi&'fŒ๎ ลฤ$Hh`ใG˜๊s$|%Puญ˜Oดฮgb็๖(*9รฺ๎เผบ&™ฆค…ศฉญ๗ศฑ.“–+โั15ู8BL2 |I Gญ›จ0“ีk‡“ฟŽ๏‹~๒h'''†uTฆว>ธ) อ‚ฯ›„$t๏B^N6Dgฑไš๑i"รs {{œ๐Ÿ&9๐\๚D  AzŽง!ฐ๗Oแ&่Qh1šถ‰v˜oจถฟถrRŸัm.Y์N๊u[ยI]๛4๛ืCฎ^D hปซดบ๙๔ 0ใวŸ€Z๛๐C:žฐWย๘ะ>œ๔ yd vf ิz !BHตOHตvฦึฺกsธ ลO '“!„ี็aพ‘pลเัE๘%ิ2IภKภQอ๕8A่X…ฒสT%R@KมA.ข๔=๐/เ„ๆ๓<ิ—๗เัน๘}ู ’จแQO˜ป˜qืฮmื ๔Ÿบ–เf"'Ÿๅ?G+EZrฮIตˆaƒ๛œฯ๖H34^๗hาŸ(=๘์0EMk@3nš–ก ‚Xฦกฤh‚xt”7V้็ OŒrฯ็D˜๋9UYโำ#zIZฦ)ห‘กMคlุฬ@| EHศฃ๒ใ“๐0>=h จฺฆ‡ ๑ํฺๆ๑ํ€‡›ดฎำM$nj0Iฑ$S 3l+N[ศกล์เPๅsuNˆ€H'œิiขŸดิ็rMe]ซฎ-ณฆ.ย|ั> ]‡“ ฺŠˆษMLต฿Iา} ๎$žWก฿ฒึ&๔ฟ๕๙ะp€;A8 ฐฝvr;œ%่^Z่ะษ$†(<+๐‹x>;ฅk‚ ๒oš™ ่%C$1-„๏0=C#ˆUš(‡CYยฝซƒDค่ฦืโก™$„กไy๏!‘Œศ&OŠr‚Y!ุ๋~1/I1›ี:ณbH฿๖z„37ปxศง“ยะcๆตฤA๘,K๒=(ฟ!'% 'qฅืŠำ—ษHฏœฺฬ์1ฃญฉ!ผ1oน ƒ;ฃั(jญ‚\ ๗Qิฆ&' >ำIาฃ ณ5*TึCdค) เE}7;น9ศเnย4|ย!ฬG“ L’๐I 7'=G>q"นgašƒpW‚ะWดˆiledื zระbgRcื์# ๙Dz@ฤ†Cุhyฐaำ๋:kข@ฅืkย]%hศb&^œถนภ๒ฬแฺ™G6็@#a< ว1˜ฉ4[ฯพDJยA(ใ™dธฏ;ฏป๗ๆou๗~๛ส๎G๋ฏํ~xว๗บvs†ฒข•ฝ-๒Pค&!LP„ำj …pYw–CŒ|775A \jญธภฏ/๕๏๊!ฤN.žฅํฺภไT;ฅk-ณQ฿q5๊๐Z7Q-„ ๎Oฟwตช@บ&='“„น.$ขษ}D1-‚ภ79 A4‚hฑโ b>‰ญ ”ยณทGกฤ*ฏลิ็๐ฐXสGxLIN”ภ‹“šผœuอท๛bn’`๑’˜”ด$Q ฆ%œu๘'‘>ž€Ÿ๗B8z‰’เ•ณY„ ๓‘แว—~n.?ท˜Dr6็zOื|ฝ๛๑uv?๚๎๕๎ผญป็ฎ๏w๗}w๗ƒ ท็m:Fศ„"็L็๙่3si๔น๘Lั์ม๎g๊\H๐๛p>พ“Ÿ็aพ\Ÿ้ xnข‚$€๛)jม฿W็ฉ.็Q๏  7=iน๓u`S๛‰ 9”Š้ห/โ^tz=ํH&ฬKƒLภzF! -ง!ฐU8฿ฦ84‚ุสŠ๕ี~i„ทz๙ ๘ฺ†P—' ะ๘& ี๎—:FBขžลโS@ยึํฅ&\ˆy~ย ฟ;น ฉ…D4sงœ3s…ยNUIณ\๐๏ฆห DาศžVข\Nœ“B๋_h. (ญBฺ„ˆโ฿฿‰โ๏ศ๋h:Fว๙$ยั5‘์ืGNฎ-ี~—>ญยฃผ๊฿ฌฎๅฤ๙8ฦ“k?Pํ“ ย ’ภั็ผ๖ฌmฯ๎ ๕ใ๋< ดถฯ‡ ธ7๑น๙ไ‚จ‚Zข๚ศdฑA892!ชLC`ซ’ณ;รGกฤVLD3AnB๒ฤ7JP˜} น8A8)kกืzจ‰ถ!แ™?หKN๘Lแ่นฺฉb ำ)R`Ax^LA9CZฆคคd3’ˆ"อ๖ตฎสฐน๊j"‚\@N9 ้=นJlา*คuHะห‰-Bฤ ข๑ร๏ุu๗=ฦป๎ฮZ†^‹(๔y:^็ำ{‰`ชMEE=หw‚€<ฬ“”ฃ6Gี9ผŸใุF~ื‚น mย“ฉ+ๅ™ูต#ปNฤ๓jณžฝ=Šd 7CMโ‡ ขŽpW‚lํนฯ5ต๔zฺฺ6*А‚ศ‹˜AN; ถr‚ะองH'ดสPJ3ฺƒƒ๖ #ˆมกํdศบ†เน ๆF€๓’ฺ/‚ ฦBU็ส3๕๐'Hฐ‹ t\ฮaH%จI\ห๋I่K›Yศค?คึษ!๚M‚ฮŸ‰dตพณ.k ู”ด™žฒf!๒HฺD.Gžฮฉใฉ:๋นต™ศแฺ~4 }'Bb)kN˜ฎ‡วึ„B™Bi=ด–œ’š$ฏ๊ผ‰Z๐“Q๎กตv—ผว๛(]'๊ีูืร†WŸญ๋C‘ด‡I ํ€ kศฺA#ฑฝˆ0sศ‚ชสำุ๊w๑ŒC#ˆฆA”mภอL๊Š๏ม๎คv‡๙h$ๆfฆd3 #?๕า8W!j1ฮพH |…\>C3xีU"ฃYB>ด‰ฌ=$!.@&ง์G0Wศk"\—IŽhiแปะพžtl๖[xษ&๖ํ๋7๙(IHƒ๘๑ตdณUF:žไ9ชฯบ_มณรkิ$A2~ }o2่^ฐ 2@รโ; D](ฑฅuำ“›› ดบty‚ 9xโž็`ิj-ข.ปD„ึz.yิำwา็่5๗ฒ๎Yoฺฎ๋›V ฆพ!ำฅk(แ}Yฆ!ฐีภศณตGกฤVNฎEx3 ’โดb|}~‡: คNšs‚pS“T ฏF*‚ศ‘DIHQ. ข6นเxฆ๔„›btNmGเa๋'r(›…ข™ๆข์ VดRา$ะ๘oทdณPvJ'โศ็ค]ˆ$ฒๆ‰)ฝ_ว(บ)7+JDB็ะน๐iไๆFI#q ำยMC}p_ฤ‚rp’@›DXกm่HAค‰L๏')ะMPต?รkAีZฆย:ŠสE˜ฌjbจ ย‹bn๒ ู˜๊ฬl'๚’D๓ิgแsำ=์Nu}งฅศ๐gQD€_ะ๛‘LKƒAxsฐQhัข„ผ๖e‰RoI๛j ธyษฃ—ไA่=4ะ!qY)ๆ2“๋‚z˜T˜k[9ffฯ6ำY &A‚>k !ิ5ร—ึ ก./ำดœฬฺ/’Oข˜œ‚d๔Z๛ณ™)!;ซดž5i$z๘4ฒŽZLฬๆ‡™…†๙œXx??kแg[ะ8ฒ™- JS"\อB็๖„=Hสษ5;–ชตŽบาlศ็YDmjช{Uิ7AyYึูฦยฝจ๏ฅ๋ำ1$uzy‘ีRKzy๖ฆMj!์etFกD#ˆ™ ฤ@อฅ>‚pDAธ™ษ5‰ฺคY8ษŠฦŒ„ $ไ1!ิ$Lฝ๖ฟื7ชอ+ะ~eI'(b๐ฯ„ Gr"ึณ๏@ฆกnฮšA†B\๑KDฌชf๚, ูBาBค=ศiญ‰D@zฮnš ๙u๙l฿อA}๐๏ฉ@Š}„a๘๏t˜r{ึˆิr\]fำSํ/ฉ}'ร2พ‡‡;ม1Eน61ชuชทK๕B}^๓ Ÿตย0wzz}>]่ใE›ี๋q ŒฆA8ฌฉFใฎil๕—๗†HฃะขDfัŽBิภwr@ wr๐ฎaŽkื&€Žืร‰อU')๕Œ๒Œ6ฬ&h.ค<A;0ฃ–@ ๓’fษู ‚9I o" B˜˜ฒf‘4€์;rศf&E2ฅkยพ/›ต‘่#!ณR>O:ฟ>KBp!^ฎ-8วธiจ&79jแ_Bžu๗2+%โข…ชฎ=_ฺ^LO2ง™_รซสึัOฎิฅุkฉ๛ฏแ}3๚ฬMu‰บ”kฎEดIš5พ๔ู˜ฐ ฎIŸฃ{yK๘i$ไๅฟง!ฐKมไoA4‚Zห‰›ำฺƒ“>…:kšก„ฎExRB"ณcฯo@("ุNž ็aœป ‡pBg3ŠG*]™"๐I~#K:›•”์†yHARzฏ(3;ชๅเFซPฟi™ฏ”W‘ฮ——้=h5„ฐอุฦ ๖๔i~พBฏ‡Iฉ๘`tฝโ›M`A~Oˆ/eสฝWธGD9yฑoXB_ญ}๔‘…gqืฆ'v๊# ฬPh:ฮKŽ{5WLL|6Bฉ!วtใ๙ะ ๊ˆ‹๕L้u”–BŽi{๎ชณ$„ ”Fณฉ%ไ•ื Lh%ม…ู แŸฯ3๐\sIค"sTzoฮ™ˆ,ํb\๒*ข|xฺ&ˆ$LOตภ4„b2ช…zญ11fD้r]_้มฎฏ\ำˆk+Daื็f(wบื-S๛ยw๋0Zoว๊๙}จ}z-าก_n๔! ๅสฤ„‚ฉ งปฮวคs’HภK฿๋}บตOZ…‚‰’ž'—I ๓ฎ๗‡Ÿ†ภ>โ~๗ŸำiA4‚˜ฺ .N๊:G-ƒcค๎“IMI ”ฐ0!AฬP!”โจƒ ใ"$CฃpำP! DhD:Qˆี์Vพ๐eเวะบฮ‹ฯƒjฐ$น6“g์๊9B8๏ำ์=^5ี{LG…X๏\~“J๓!ฯc@รโZ€k:ต_ฃGณ€$๊ž}ๅ@X‡$\‹จ bX.…gtkbŒ:?™๖dำ•DจซC@‡cฺฮ„ šสฎZโืจ่hฦ% Yไ2สฬD jไ&rฎmrXO… ถฟนึqhัbjƒ?9ตi ?…wœำร์W]ะบฯ@ฝšG ฒฃVฺCฤlโ‘๐3ฺDัฌ‚+๋9œUZยM—mชญdeฝsd“9ฏs”‘NสZฬ:Iจ๊3๒๙"’ฉ\W้ํ&8ณZ(;ๆ๘* a๘ฬ ฬLŽA†ูม~”bjŠc5Q h.CฬO^ฦร‰ยฯฺ T็Xx(opไ‘O๎ิ–ภืk‚t.ฬRA่3i1K้y’แDLRtL]ฮCฤภไ†ฤ:กฒ8ฤ‡ๅOกไA^’HB๋"‡i•฿>b‡:็B#ˆั-G฿•๐„Kl฿Z๛ั๋ดŒํ'c๛ถต`=ƒฺซ_:IP(Mณ4=ด9Ÿ rะbร๖ฯ‰`–๊คB๒Z& B ?มfศGั€4ๅ-AdbP^$‘ึKTˆXo@จG„PึHT๙U> ˆ!ฎกฺe{˜ขะ \P›Yษ5ฃ89pnดดฅะpะ"ส๏dDQkaฎY๐ฺCc๛"žœ(๚J€@ดTugต'ูy/ Oฬรฉ-hยyRวฎชsB$บ๏ศฆ๖2โˆ๎+ญ>[ ~แรำ๋( ย๓่s-ะžž.ำ๊๐&‚VนถF#ˆั๑ุ„“œ ชฏKxฉฤ%หฉAะjน ญ ฏ๙ 3/้กำฆVณFf—z๖‰`มŒไูย%ฬ3|Y† €PT„]ูี[1กไฐWi‘๔FeึrL๘H„+ๆะYศš9วMY…์˜์HวT“Hฅm 8œkbps’Fœ฿D13ล6Hbฮ๕ิืแ„แšอ’p‘“KดœีN}ฅะฝู’›ฃ2Aฤ็ำoม/ฒ ืกฮภ–๖กs2ญ๗rœบ ซEK๎ไyXlR9 eะ^ฺํFงี#๚ศ๛๏8ฐ:  ฦ“DฏเOc›„ŽhัขฤึA”ฤ_,Aเ€–ƒ ๚ช-๗กฤย โฑใลqw%\๐…„วli‚ Mเr‘MU๙ม ำZ๛๕ภษKูn*ฐึYป^ฏว4UGฅ i›๓่œ,ยูœบสw[$Qi)ค@C๖7จต่m7gb๐&@น๔น‘7Qœึแห(‡ ่5$QŽsG9๛jฒจฬRฝ๋ๆg)&ฅ๊s0/ab*~Y˜๓ฺCaŸ3@ๆŸpฒ๗ะืต™ษK๗ีvrbจ.yu`L”:y๔ฎ๐๒แƒ ฏsPถE๏ีqฯƒ๎cฒถ็@ศ ยs(<Uf,๙ ‡—ํpุT๖Q;๎4'ฒpA,œ *แ…๖z๛„ฝbไะ.vrฮS๕ร kึฌ™ช^.-‚Rแ„ž>‚ะƒEูฒW๕pkถ–5๛?„'cQšบDอฤ1sศ! ไ3 ์sข[”สเธ‚"ษ™ิ‰ค-”:M*฿ญ ญึ(k๔คMmขD-ไMXปฝฟท-ฺฦ0ย รแ3!Ÿ็ŸAฤV "* ’๐๕B}„U๛',gยรa=จภห‰x)œุuธkMt ฌื ีk"˜tŸyึ๕ฐvงด9•oGตฎE„โZ‚น๎qG๗2N…$กRฯŒฺdไTh›4 =ฯฏHไ05‚ุiงP๓Q๗yi˜๐น„ห.M๘รุพgยงฎŠๅ[ Aคq฿„๕ ๑พฯ'๒ !dƒYษ›ีu›ผO6U\ฉ,ฌ๗–อ.mCDค}hห๓! i๔ปึ๓ู•ษ“ฎK๛”4'‚x๑v‡L‡ vน|—q˜๏็ฅqfยใฎy‰\ฑฃ˜>˜pKยฝ 7%<3ถuยณชcŸ*ึ7ฮO๘ฅๅ sลฑ‚ /คเ๕˜F9ฉฑŸบI‰Mด|ิL*™z5ร๓ฌ้bข sืดˆ\OYภัูm .?€โ|Mผ˜˜ไ/ˆNq™$BxBeF/กสนศ•Phฌฤ’ๅ\ˆืYูฝษx>‹๏{]ม…๐zถh+v€c - Œ๘Ž$แf('ณZณ0ฤQ M Iช๓>ตก&‹>๘1„G{k[/๔็=ด=iŽฒาpuz/พ5še้บ6ง{{!Zฉ( mDฤ@โžHG!ํแ…๗Aฝหฮ%ฤ|"”†SวLฆoi=แŽj฿ํ-QnFๅD hำ ˆ:~>Rเแาภฦ+ ‚ค%๏‡9‰์_LI๘ˆุ)y$‰QRƒ๚Bu๙dbR&4…๘4๓.qฅ~’ไjgํ(ณ อ†ฬ1] Žˆช‰ ฯฃ=,5y9’e็ŽwกA… ,Gค˜œ€iNuฬ‰=ว๔džTWท:๕B~}QL}f&/O^…ถK่;Q YPฆรk=]ง๓โ CZ~JvŒสƒp-ปฯOกgLš„ด| •ฬNส…AˆcvฅŠŒร$Ÿ—ฦv g'ผภถญ~ำJ&ˆi๘ˆ฿ฆั7x_8Ÿ—๐v‚ะ O•L:~กฮฃย๋กงœฏ3A˜3บทงี ชณK๖ฒ„Ÿ|r*$’3ั;Eˆ ๊’!$2G`"ซจŸน#ด‘“O5ฏฃ™&ี,๚>ฏƒA)0ูเsฬKž1๎Eๆค๐O๔ฤ@ด“E]เฏฎ๊ฅยฝ ฌ“…๗)งีฌHวz)qiYึvTˆฅb+ณ|MzคU๔ๅA่9a}ฒ๕| #mฃ6็๔ษฬ4ญL๊cve ใ(LเคV>ุ{Pmmๅค>ฃฤ ฎลD ˜š ค5x}ฅบY ƒ๚B6Lr3ฤ่ญ)Ÿ ‚ะlณ‘;ฃ™u–สฆU‘:’ฦ๐Ad็ณ๒”๐กซล!-‚{ZA”~บส๑E8Vกกu7฿xDฟึN๊ใz งสE˜cN๊s€ใฉ5ŸสวP4*รHa*ย๐ผ^ณ5ž*ี{=‘ฮk5Qฏ๎+ัืญSS ฃŽฒ๑š”@u@ชน๒Yu•˜ๅ“lืgb‚ ซี๓ fชี๓ฆ็‡(?r5t^ ๖M‹ ผ๗(L@NะสEVf่‰ ๙O8'ย\ฯ™$`งฤ2„f ‚G:)|Np‚PbŽฯjdฅฉ7xAธcห๋,‘Mืmพzํ5vtŒR=ุD4A ^vzNwดชXข˜Oๆ*rˆ†?ลฬ‚‘ศ&J{ณฯwฒ(QNภ49ฺฦˆ( :฿ 5A u\๗๘jำU๏ต้บ!หZุ๛wtญช}วy๎*3{NR_8ฒ=าษklี๙žPW๗วฎeเ‹ B๋˜5u๏i‰†Bา&uก๔~'ื๘D่ฎAPลXฯ…„ฌ„<พ‹q~<=o>S็ื3†yw*ฑ๛ฎsr?†ก%สm!2€ „ถัโะ ยgฬ(แฬฌ‰ŒRิ๋Wว{๓7uAตืร‰วตฬัT9ฃJMืf&หฦแJษŒ,๐]ƒˆ– 2?!ฬ๒~GdJณZDhZE5๛.db๋ตSทO{˜ฃAิ๎ฮ์กไฺรSY๘Z‡’ร0ยเท Iฐ"ฮโำฉ|*žำQ4Eขึฌ๑P_๋ี:oข๎X‡฿ม{eำqH๐“€'ฒศฮ‚I่พๅ'T›ค; Q'ด ยฝตอ WNZ‘€–„บ๊9ŒจขE ์ต{์:'cAl%แd ‚ะ’ื‚TX‚บ0„np=๘ผ๙;ูคž€ค}Zา้‹hฺ1่-]องf‰X๒T ญ{-Kอ(ฆาsZD€&‚ŸmนฯƒชณJฐษE‰ :ศYV๕€&A‰๏a3oœsศmcˆ12ยiLิำ(b่%iM๚=\ฐ›ภฏ‰ †ƒ&๏๕๏๋คf$Qฤ@ูp๏ฆg๙uCขฺไไํM4ๅฮjฬQlฯU€!ญt=บdL39b‚ฃ็‚Iƒ๖i‰9ถ๎1ษมดงuฬUS!ˆ=wศ…ๅ$ˆศฦ‡AL‰$T6X ฐ9๕็!mง๔0!ต–žฝh จี8ฒP‡)Lฆ#BX)ณ์^h๖โ Peป๘ฅฏ3ZYืกEิอq(าWฬIU E6ๅฬ่D˜ุAะInภDฅี" ขgF]„'>Œ๘’Xแ‰‰กO+จMPN6๘Aœjวณ๔iฬ!Žญำ DŸใšy๘๋@„SEฃป.ˆ3 ย{`s_ี๏e=฿s้ณIvิ„Cว๊}š์่~ืคH๗ธ&<pša1‘ยฤบะ!ํCฯ š๚4๖ฑ{๎61 หL?HX—pํะbŠƒF$R[5+Qั0AU%! ‰Q_ษ šif xW/ฬJdปŠhƒ&A…N' ฝวํมD9MgOผuฅB˜št|‰า แๅMนtฦ†7•าPlB !ฒ˜œขํจ 9&+็KฬMลzข–๚ขกz5ŽฺdeŽ่H%ˆขาrt๚nม๏eัKฺณห๛ยึ›#"›z;ึU%:\ฐำ๛ฃNฐIpฏฺ์T“Œ๗ำึ๛ต-“}2ากื:ฟ๎q"๓4‘AzH๋ฐชฎ๓˜vEzฮ๔์-$ณน— ๖ฺญ˜ศฦa™ โ‚ฉำb๒!ณ’ˆมO๋D9มh’^฿จ#‰วฮdC“๐ ฐ7ƒใฺ6a2ซQ๛,%K“Dt5+š5–จRj‚,็Dศtu”TU$มถผ๖ขแŸ`฿€™)|>€i-˜ฅJ๙ะ=น“Dฏs;’๚2กKษsโ(Sัภ6พWญ5๔ร3•๛#"ฉฐ—u"|Nก)๖lM’]Ih) ƒ„:ฬI#(Yฟัฯกnƒ้=ฆ˜ฬšณ๖`ล๘่wฯwo*แU[! HcN…;ฎฉ_T ีจ ;ฏไDQ๛#๚๊1ีฆง>๒่3)๕˜“\h็P^‘ 4ˆaDะG hDhEnNsb้3มajชซยR–รKs4ภaอlX้๐๚>๓ผ iฒzอ9่LH†>ู๚๔ฦึg”’-‘uฏ๋ำ$ImYฯลR„๒"˜ศ้3ฆUt์ป“๐8,3A<,แ”žํEณ›“z™ข๎zีขD#ˆษย+พฮ*Aทฯ๎ฅฌศ8,3Aจ"๖ม=O๘l#ˆ%ฮ‹ šค Rะ๋amฑฏฅA๙d’฿(ก!ฏ7WcMถ?ฤ๎<๔บ:1 ๒ัรฌ‡4;๎š„F๎ย4›'zใะ จาำบj"“Œ ๎พ็ž็แŽ๏o" “›%ย ช‰่'_ฯŽm๗5ธiล…h "Ÿฬ>๏5Œ๊>c3ฏญlฦP‚ภ๗aD0๒ฟะHฉ2g<Wฤ@nˆwโซฮ5@็}2๊ๆCuุŠ ŠฏภยPฝื NขรFiyš โI dV็„8t;่๚5I"hC๗?‘zz ALR›iา!’๙L ฐว@$แ(,3A\”Yซ•’`ถ์!BAพ๑ฎLw}Owฯ]฿ฯหLrPW1 MิBิgQwTื๙U2™WGญรW็h^eตฎฤZGUัIZ’บไ‰#ฬตึ Jว๘I๚"จŠ?ย;ัี™ไา&"s™ˆ%†g>๗๙ ผŸ5ๆสบขซkฉgnย๒"~2‹ึKm1๒wtฏk‰ฮญื”ง' ้๊แ”‡†„&rS!ˆ๖ู•ฯฑฬ๑๏ F`ฃd๑rฤcN๊!ˆ๕ปVฑน 'Ž•mg‘ ”!…๚TI”Xˆ๊7x ท%{[I‹,ู_ ดั(*ธๆฒJŠSDSฅbbชฬ๕—"ดU‚_šBึ๎พปD2แคqศฬคข}ะJm&ืะ,\ห๐ใ$Švaค1วaํ5š๚Hม“฿MR`‰m –‘:wH้ŒบWu)ฃW๓ษš&•k1ลœฒ#Z•b^๒{` †R8ฐน'4yเžBฐป?หอL^ๅ า๑ ็๑๗IS ส‡zHp_ำฑN…ขštŒด_hแซ ‚ฆAt ๓6ฅS!ˆ๎3ะXiZน๏๑$q๐„!ํแ4{ญญœe‚๐์iˆ"ˆล„น๖E4‰ จฉ„i ฟ๖้+๖yถRโ9ย๚?œŽ’Wขง3ฺ@<h"๙npWwห›‰"รท•<-‹ย>LฬฦฬJ1”ใ^็)ฟS•๕› สษm๔ฝŽ็a}<[ฺ’ิ๓ศkpอฅ/:ษ}%~ UงBฦ•ษˆ\”พๅZk’$Bƒ(u˜ะ$ DiU5า=G(uW›4™ภpBkยไ๏เ)ญภ๓+ dษฌ$mยiะ๓Yฺ…ขž”|Š๙I)H฿๚KL‹ ๊เahฑ0‚ธ.Z์ษตGlsยำํธw&#'›ขI„V10๎้ึ—ว0b๊พห ่kๆbวkฬ1ีYซญ\ขตœ$†‘งšยY๓"ขฬ†แอs@buฅำฝ‚ม#ๅœ ๚‚#๊’žiMˆซHมหย`jขc"Dคmด %ขID!Bัwัy๔Z&$ํำ๛1_ั'[š‡‚~๎ยดj#pเพฝฟAAฬ“ †ํ[I&&ฯฮ9Huฅ๏-AิฤB‡อฎ<๙ ญk‰pQpฯ3jตt๓ๆ„Zฤ@ฏfยC#kธC…ปF‰oภฦศ–.ฮ๊่”฿ศ~๋1`frภ…~CŽ„ู„?ฃDF9iˆ|์œ”๕ศืืŽฬU๐0Zฯ€vrL๋ลฌeพ}?.ิ3!˜ๆ2‡ ฏนษ๒Qํ0บ2#–Y-0คYz๊^ึv”้}ชั<`ย}uzย๐ˆ%/Q๏e๊%ผฝŽณอษจˆ8h>$€^LคHฅŒธ4๚ทk9ญ>"ฏY5 ณBi<:แทc}๙ygUƒุ฿ึŸฏฌพX?ถrRฏ›U'5Cฝh$‚ภไไau‹!w๎‘)ํv`๗Edgฃท ีl0i uณ˜l~ŠŽc …๚r=&ี1"มฬฒš= ไ$ ณI)ำ%yN๛$ฤE9ฬ๑5๘๙ะ.œD ‚4“ศญจ#ผh๎ิฦไ]้zŠ: h 9dแc ๔HD^ #'‰rL8ภ็8๋‚๎๗h”dฝชs‰–่ จAฯ๒JE_๋$ศฌŸ:^8ฌ๑ฬฝXwžซ7‰€0)y‚ึบดอ•@๗lj‘€–๒๔JมคฅI”H๓้b7ญZL'ฎูo 7๗(ฬHขห>žpeผ> แKหN‘ๆ}Kยฝ 7%<3แ}ช2>ˆU„qzD/]ัWว| ‚พำ4&ัl‡<"'2!ฤœ„ŠNฅV”ผ}15YทธB 8ฌร?Q9ะ$จ„fฅNแ .dย\=Rฉสe˜“ W‡ฒบIษษ‚จ‰ึฐi=“–4—ะ(Bc{4ืŠน‡พฺžดจวœ๏~หีวY‹๐ฯ๒0Z3้•สทไy„ฦPšUฆฃ๒_ชเฃ%Fึตทj‚ะ}แนรr&1uVuลใe`FพญSแ˜ฎ‹g5ๅ8œฆ)|z้Ž–?-‚๐r$ฃ0#กt‚mผ{œไoหค~๙43ัl…ูœี27-† ศNฅ๎>!‡zศx0™แzใ˜๔–’๎‰`Z(ณฬƒOˆktห<ฬ:nๆaถ>'#บฏR5ฮ็u“’“šC0gแ —‰Kพ!T•อTa‚ ฏ๛dๅวJ๔hฎฬ!‰สผ51A`Un‡Vฝ ํ‚F@น†xํเฟ๔ึชฅ7CD:นs"๐PX&(c๕ภ7แ„๚วๆnbr‚– rะ๓„s™r๘tขsณš พ4 ‘QM๔™ะ39‚8hฟโ‡!ˆ๕คธจัิbฉ‡n8ดบฉ๑hใๅ7ฃA ์y@15yŸjlฤ˜›x!ŽY‹ฐ"[2ADยœE๎Cํe&ไwู๚9„YiŽ€ญษมซธฺ~ฬQ™Œ$s]ง่5ม:ๆ,Bฤ _ˆHA‘T๋๏ุ]ฅm42๒Š9ต †ฝฎยlGโ‰๏<ŸA)H ทw†'ๆ…#›พู'โ‘h}5ฅ๊| sfc’๒R๘ท๊\›QๆะZ๘ใS€ ๏cขc๖ฺf&†ถ้xž7‘็ึ๙คaธc[e:”1‚ุNฃฅa˜‚xQยรl{ _I๘ –ุผิขฤrAl9‚xศ! ๔๒…ๅ&ˆ0-˜๐ธ„ื&…ึ[ฑพ)Ž7์rd†ืa‚ t๊f%๑ว“xCzrY๐่#Rฃฎป„S‘บ๛ฤธ$E… ๐C$Aแ=๒Rq›๗ฯw”ฆL•o’pT๊JRฃGgQ ฐง"๏@s"๋›Q๛!ฆA๎‡่ซpŠฌ{ผO‹F. ฺง๗๑~r๔~ศBไ9เ žA๚ภอAc0#กณ‡5‚X“TWvtƒŠ$ดฎ}‹!ดฉ๘Y +ร5ฬMฤX{[H›e๕$จลขn^"็ยฌT*‚F<}~} สL:f8~dTL"TใQM… B€‚ท ’ฃ์9P pHน แ/ิZUาR0}%Œ"9ล9฿BGั",<ท6yฉฤฝ*2หณฎKณ'›ศ )Yืแด๖พtwK˜jG5ZDM๔ยจ! C„ž-u<‰q"_็าs)rะ๓GsฏฉAc0#qYย#B๔"ขHA,1A(A&$n>œบแuฃjถขํ‹้ แพˆถš„ฟ;มะ"๚ฺ‹ึ!že[ฒชี+˜โqึLG3ิ2๓ถฤ4o4@}!ฃ.ไ,ดu !nA8Iธ"›ยŒ4เŸะuA:ทW•ญณฒํsณษสยl๛D† "งT“jSิH‚ ๏ก&ˆช๚ํ@ŸGb๙๏ศDญaHI’ฉ ก’๕า)r!ึกmDญEิaิ 5ฏ๎yฝญ‚ะyะไETxVหั‡๚ ๚V}๗yC*^๏™๐้„ซbนว" โ >4‚ุ„ ›s“4iา"จ์*'ูBฅ6p,gขP}%หo ๆบด$a.JiไxxUo Mา kบ๔sฆŠจศมบภa๗Gจฯษ/px 8ฯคf6ฏYบ™—d&ฺhๆ%LLtช“ฐ๖๒„ทf๓ู๘'<ัމ:;›ฐู ฯธกฯDน’m:๗b†bNxฐีคp๖›้ฉ.ง^|E”๑พตศ‚2Iฅ๛‚`†…ƒWy๕*รž@Gษ ื๔˜i'”ืะ{)๊WืaAPแMb*qุƒ6}ŒมัW๑๚Œ„—ฤ๚K^ณH‚Xำ‡FK<่Ay‘‚nL บQe9ฺOํza>ƒฦ๎”ศ(U7ำLูกญEร|เกYธ… (๋€ร34 d)ำมjอI@ซm่–๓0`ฎข,‡"r6FW::ำA@ฏฉDY'็4KE+pM#PLSC2ณ1WM2ธF]‡bœฆ11ATๅฯŠz9’0“ ไŽ๐Y~J)(…K5\ฒเ!ี8o.ญข2แาTรD2Ÿฆเ'Bั๐GPƒI๑L2๔|‘ฉํ5ะ$H–รA1อฉฤแ|Œย$ŸืSmโ ˆติ๋EฤลfZบ*ฬM—6‚ุBษr" BคI•!gYM๓iฦฎY–€np๘rS๚๔ื_ดM๛๋ฆ44*Q0f{ฮแ’๊งูen๎ม3ะอ ขฮNึนjCd`ิmJŸใฮi ^ดMAd ๊ฑๆ๎ฺ์‹p2q๓๛ Y@ ้ตฦคกpZ]หทm์ฎปuc!‹iŒŒu ๓(iR“šฎณŽ๒ส!ฝ”Vง•๛ค!*dY]ใ|YŒN๗G!‡0ฬgเƒผ์<ษš๘ฯย lแฺฦ$C~ฝŸ:QzFdš" JขฅžI๏1 ‚8้๐5›ƒ=ฦ *Wรp๊qGต๖)๛$คฑผฝฤบ ‰ฟึ์ลKะเ„š0‚ˆCว๗=‰‘พ'’ภT$า บ ขะƒ(‚(โไWฐPสR ;‘ัMz]’ศภmึชฑฆฅบIŽ“รญยฃ˜x6หลฌƒ`ฏฃ—03‘ื ํแฆD๋$๐W๔E@๙96Vaณ€cFฮŽƒœD"†+ืoศฏ็ฃyL’A๒›IS0sXษกuฏD๒-X_๎’Umบ ๋rU_ษว-ฦ๏ ๛ฯซ GmD๓@TŠ˜‰=‚ะ๓B.แญ"ž9ฬNz๎ไฌIL ŽXS=ฦaฤ’D7aซัFK4t3สฌD1o๔ีฬ†1“๖]=ht +E๙ข$ฮkmฃ๘Žh4*ด2k0-g. f”‚9`ฒฐ‚w%>฿ˆcภ๊ฅ+ะ ๊jญ•“บ๖G็ !-‚@PSZcƒi˜œGะ8ะR6)Œ"š"Aื|oC!ดœiށฒไฅไพ๏ศ— ZƒงRี5ช๒–D>ซช?S! ^ศ(ญpฑฝั”B‘ฑค๋œ„kย| s2/jNนž3=ƒzฦH˜›ATสฉรŒ˜˜^`PV๕฿จZv#ˆe& ”ฤgk–ฃ›X7ฏnZ’~pฮัk”ฉฉD5YƒI โ็Pื(ภFb\๎ Mยข]03eฒPฤ ฝ0W˜ƒณุณญ*๊€มฃk<’ฉฮEpgr8ฉ}ถ_ฤ๓MHs9V™˜๚ศ@ว‰P\๋€Xœ šQมyD ทFRดษก/?ขtน๓fDด2•Y0B“๕?N2JnLDฐyๅื… j5y?’โร qUtฉรGๆ•a3๔\ี‰v˜–จ็ค็ŒgMด‚฿mHฏญœิg,’ ^fPAิžฐC#ˆบ1!ชMR|Lะ>/46Š ๊ะWฏิ™g"‰P๓i(Dม?ฺŽ๊=%rI‚&ส6d "ฅ๐แฉแ˜๖™m!fชr <nIx’ƒๆ ‡‚j—ะR`—ฏ@B]€(jฟDำ7t๕qŽQวm ขศ๋”ิฐฮ`9AฅIUdekข2ะๅ0ขํผงฤb B-Mฐะฬ้!1, ป$-OGา฿€ฉD1๕Uผ+แœp(kน็" โi“lkฑLCvPฏHIA„@ฦ)คฃฒH๑G่aำƒG"ถf=ค^ตตดd&ฝ %“6ke! $Wี-6ฝ‚„gUื™ิCๅœ 6Taฎn:‚ไฤkMYฟยืopื€yi’qKœฏns%เะN้k 9`๊ค.dแeฤ™ไ,– œ$ˆ\7ฆBG2ดSI”;ฟ๙ V่๐2^f€*ร’†ไุฃV“ืSาาปฦ ๔ จU`uaค; Y‹ภ| d๑D.yโV]bรย*zQ›ฉ วซ3|ODNjพŽsSศ@เ5p-ภ?๛่6ทซ–ฅฃ"‡TGT๎%ดW๏S‚oŒ๕im2q่ๆ2๙cฐœกž: oJXŸ๐—†ฟฆx#ˆ8ฐ=.œD;'ˆaษCrZgS’ฺชPTe-ษqส–ฆ“ืQฅืจ๏„ ็J„Pp@WอtŠYฉฯœTๅ@H๘K่K˜็๖ค–P2–ef '5‘Ln>BSpณ‘“„ถmˆฯ!\3Y€(d"Zo๏k1ยGa‘๏ํO๗ Qt๙๓‚a^‚ X'ziฉb>c*q๔กฝ7๚ฐฬqBย3ฎ%x๒|ฒณA,#Aศ@Swฺ‰าขฟ…@2‘PkฅฤFTq-…๙ไ4TXl˜Ÿะ&๔ใ›@ไ‡>€›˜<+บ”ำŒjpCHซHA!ก—rgึไ0เ(ษjQW‰๒ท!8A`:๒๓ฌค9ญ๋ณ"ดผ>ยIe}#…ัQ :๙K๗ %Y4ก๏yึJ‡รlŠJ๛ Ž•O‡อi฿: 3bbฺnQ๏oฑ|!แ/!Oxhก(ะbด \›ศv_ู„-ฬŽ่’Rผ/lฃ$CA"ูฬคxx%RQฯส4ฬ‰VชปศY‘ƒพ„๏%฿พณปเฆปop{wัท๏่.ฮ™0พตฮ,จ=กยw"…๕A๋+มŽณs!ญ"€u‰8ฟ>W$ฑฎา"@“H‚โŽ^‘~„ฒ‰๛ฟB[ฅ็ฤŠ'ˆ่ว13BG$|8Š๖ญหNC Q)„๋[‘๚ั„ปอก^๗DTแm ๚ ยK|ใ์kฑฒ bกฅยW"A,ง1‚8ๆˆน=พ‡`Fโ‹ ?2W…๚^ž๐ŠY ˆพBTOธoฌฟ†BTu,๐ึโคV†ฬIิS’?B!ฏ‚ธDจ่}ัWnร› ฤฌูœPw…%;ท๔ขฦ์d•B๋,i/'‚‘ภ– ึ…ษGคpM๑ฝ฿2aศ$!๏>g3B–;รU=V็ั๙พrm"}Ÿ)`‚7ึ8ฒถ‚(Dก๙ดยวE@ัL˜,KQIHยข้t์Kkุ\ fฉaj1ูถ™ ำ(มŸฦ/'|`k&ˆl š4tุ‚œ จsC‚ฮiกv’Mk2YRTƒ ๓ฐฬูœ…ซš=V3‰ฌ็’#A…ึ‚ะฑ๒H@ ฒฬ=ยH$๑ฅkoอš…2พ„A๎Spญวด็2PIŸ๑ˆ %-ฮ็d%,” <:jk15‘ W๒=P:š[qo9Ax[๎ฯ•J'ฏ=r 4(ฬA|)แ> IxNศ+VA|<แ้v] $|!แ1#ฮy*ลฏึฌY3•็ญป•ฑƒ๎X}aญาdr>ศ˜๖W%`1/EหQf{ฃส8g-by›k2y๙lซ’J‹อวtา4$ฌฏ๚๎†,ค%ˆ%<%ะ I…,$`ฅ]|๓ๆ;ฒP—pฟ1ด Š๒!จ=lี‘NQ#IŸ-เœ^$ m"%-k‚Fš๚M๏ืq๋ย๙ํ1‰fโ5 V,IPgจŠ„c”าdQฃ]ดฤr™™ฆBว9 8 3BKุ9แA ๏N๘‡„Gฬ4ADสท|ฤ๋ํ•A๋''˜ฐ๋–า –“ ไg aŠk“Œฌ)IขHnƒ“ƒฯ๔ฦeS†ฎ„้ไ€9‡่"™„ฒ)*:V๛0)Ih’…|๋†อๅน‰Hาvํืฑ˜„ะUuaฌใGUO๕„8L@D5้˜žคตH{A‰0j‚€d๚Bp฿šDๆY\๏@Eู๘]‰n9}ใ๎ฃม๗…–ำQ=‚8jS๛Xn‚Hc[๙}ปYb๊#ˆˆลJยŽ#๗๙„‡n &&สฬ‡ ฐใๆAาjB๕พ8ฌƒ$<ฃzุศต}ไ‹ฐJข”ฆ๔ฆฃ,'สehฟ„8“ผยLoํษzฦ}c๏๋3ๅŒ#ˆพแ๙INs‘ฏkB่#˜ฌ ฎuAbB_Rž_/โศh–‚RDเ,v`]L๋qฯ M‚ถ(Aw๔ๆ:Yc0#ฤg™ˆฯbปX?4แๆIjฌ‚  ฅ*S ๓!| รšปิ%7Š9๊ฌทgMใ)๊r”๐Mี I๘ฅญลIM8+1฿ฒวK5”5ฉ AยNย_ๆอฤ5 ไKะ,ศ%์๕˜˜<๊rภ'qKีมz’ศดัT}Zร0’F ื~œ \cขU้ฦป๎žฉ{ฑ.ข Žูxk f„ ƒwu-Qne oฟ่ถ–{”กEเ_`9|๊[๋ปsฎ๚n1ืhพฯGธ&ข”ะ๊'$‘{LGO๏1฿"{“jย’)ดฦดZ•ถฑ >ˆDฅ๔ฬA,š`AฬฦPตf‚ hC๚๋ณกูปfล"‘‚BfAภ #โ๓W/q wฝUXuฒ@ซ)ฌแ‰IKฟ‰~ฃ๋-ซH>fSUใ 0#ฤ‘Q6’x}|ยŸ4‚XcXH๋ฒฤU_ษ!ฌ“๐–ภ#3ํ€PR|ฺ๗ษหฟำy้-™D iฒ๙“<‡ยk.iนาCA}ธoญjฺญJุาฑvsO”1˜‚Pฺภร•B`.iฑB‡r„e๗Aค|2๙ฌฟcณรค„}<ฬOฺฏ0RAฤ ํ$พy L.8ฆ)QฤF+พ’ Œš๖ฐฒ โคDszก มŒฤืc้qa#ˆNสš^ฮ‘3ชรI-แ-O9 LK๒8ฆ๕š0R ~rผ+Mุไื Dฃยซฑฎดก๋_ วดœR%ml‚8ุน}P†`Fโฌ„รh”ฦSตญฤ *ซA฿ๅ$‰œ!-รทๅ/%2dK"?„ˆCฏE๋ญ\†ท%็A$rใm›๗Žl„h zKUฎปลBฌฤขฏ'Jf„ ”6๐™„ป#}@ล๛jฑ‚วL„eHK(_cE๗๐;PsI„ศแำW~ทแ#ืงฌfะ„ถBทTUZั4j‚๐0QŽiฃะฒๆฃ™(2 ‚ โฆฤคqœึบร0KQLi์”ฐKืข˜V /=|—… ขดœฉๆ๘ฯฃLZƒฬN yD†ดL+$ศykะ[๎ฌา (ใMm&ฝ:JญG๔ยGn๕ีze:ิb1ะ kfDƒุ+Z*ฟ์ผ„7Rึจฤ หขQ7ธŠ๕ฝ„_มzRย‚ฒ๔๙I๏ผฟ๔๚ชm่๚Š$(ฉ้ษ “T33-Ž ไใูZ•{Aฬƒ ขOส8ฬA|:แO ‰LN V A,WๅหF[AHฃh1Aœ๐เอe๏ว`–๚ATพัb J#o้ ˜9I๎Wfว'q‚„พ—ฆ๐ ง8ฐe^’ '๒3ห[ 8ฎฝ๎5‹03QAv]๔ตn-CA๊˜ˆ€ๆLD•Qา]ู๓ ฦD&ฺ 0#๑ ฟ=!„_™‰Žr ฆ3hGบ1G„|šaRีKEh๋ใ๗!x2ฅ๛H-ยk6๙ะqhm,’ ิ๊๛ฒfG>ŠLฅ†ะU_i1’ Žฯฟใ$˜‚ุ˜๐ ?Gl64‚Xแƒrษ‚šฎl‰‘ห}ง\‚ูฃ—DTTe๖้Yฝ (ว:‚\ƒX?คZซ›กšsz $ตต(฿~kdฏหคH™๊F#โฤใ7w\ƒV‹ฉฤฅ#—0ขผ๗ด†ฒ@%D$@๊๒TUลg@!พ UQ='‰š,๚N๊ซK`ำ@อe) ์-ๅ ฺKKaาQwฐ›†!Bุfผ QAW6sดˆFฃโ„าวcf… ขา™ฉr฿ –ˆ,‚ ่ิตdฤ฿หยร;รQu•ฆ?ฎ ธฐG๐lฐ,่ฌD}ฌHaX ฆ[Œ 0‰ฌ‚P]* )‘.&ŒŸงฏล้b๕2๊ฟลQ-ˆ4จิ|ใ "›้&ภŒ˜˜-š฿ำส}o A“xZˆV}งB๋ฮหNj  ˜#๘>๓๗D62ข๑C๔iล๋ฐ6j@NใzGฯ๒9จˆก "†“„"รจ{Mิฟ‚ ผEซŽำ{ฝดzฏ!ชlไัo<'อฅš&7 ฦฤCNง} f„ .k&ฆญ˜$ ˆamDi;ชvค?8็=›zW~x๎3ๆฤๅ็fa!ณƒ„; p(ฯ๖€G+๐ูญIxT๋น๗Cฬถ&+‘ ”].2P๑BUธ}็ืฏ๏๏ืฎ๋>pว/T๕VDAŸ 'A/(๑๐ร}ไโo็c๕š์t7๑อ‡ ๘ฝEŠี—9Q…WปyiชAุ1๗yัa๓Šhค๖’%"ˆw&ฌmฑŽบฯ4งีZT„ แ? A๐‹;H—~.CCณM x4 2 qL; ่5m7้ษ\๛&ภฦhฟฉ™ฌฐฺ†๚aH๘C๒บ7|๑š๎Œฯ_•ก๕ื{u๗ถฏ^—‰ภ›0 ฺ๖ชฯ\ั=/ฮK‘‹ฮC ด BŒ)s"ƒs49ฉAศิคzA[ƒiiบ๑าฏdF}žZ,'\ต’๎4ื.A<6แฮ ข‹.ึrj‘ฦs๖X ํ๋ปUO๊=#ณ๏ชX๎a๛N &ีyB#ˆ I""จ B…้7ใ‹?ำ{แูฅตค๖‰„}๕ฃ}ๆ&อ4 IiLI„›nนc๎พ1P`N็\ํ!Mแoฟysึ^ู+ณ ้ง.๏^๒O—f<๋๏/์~๏o/่Nไey฿+๙[yฟ^‹žพฏwงี—๒1"”^xS>ท’eฎา๙ฅuf@‹>wTฟฌ9จ^ด‡F๓&eใ0† ™pv%N[‚ธ:ิสข>L“ ^๒wกm3ๆ:ฉ"ˆ3PฅดLxMฌฏ >พˆ˜uF“ B๒j8‚^ E๕–ฮ…ูTaRB!f9Nd„ม๛๚HBC็`วY|ฝ™œn07Kวต4Œ1@zˆV+Aศ$$อ@ค -AB>vI๘ฟ๒๎ฏๅๅ|๘›y๙‹or๗ค๛•๎็๔/ฃฯ๘l๗ะ—Ÿ๔'gu้•Ÿ๎๛Ÿห–/ๆใE " ŽEšˆHd!โ†กฯรก=ฮฟณ5ฉD4|‡๔yื…ƒœjฒPeท฿aฏ#แอK@Ÿ]r„HAณ๚„YนjŒO๐พƒ+‚vฐฌ๏ฏื}์)fร6‚˜|dแหฮ_ย?;ฏ๙z&OฯฮI…5ฎฟv“9+s’7Š$tฌ'มOO:รI‘ำเาn‚rญNาีX ่ฝ็˜ษMA€๔๙B๗k๏๙ื๎w>x~ึ๔๚Q๛ณษ/Twิsฑ;่™๊|ฦ{ปCร ง}"oiˆ(D$zŸ4 ๏ญ_น6›ฎD ZŠ,DF๒ULะb~xศCNจ4< c4ˆง๕ฤ›–€ š๐7 ฟถคaฎiœ๐†„o%•:I#˜'AQํฟ=–oNxzๅXy๊sž #ฏYณฆ๕=šฆค,๘ี*iู™HCš$!Ge61ศ็ ๗˜&กuึ23ๅค‰`$ไ้๋เ!˜ž% 9 ™ ะ,กh5i"อ๖฿๔ๅuYKp‹D า"คไซฯษย_D rX๓่๘๋๏ศxะำ฿๙œvkŸฑผ<๚ygvล?gBม์ไ ™›คY<ใ็ๅ๕พฤรF‹%ˆ‡xfฤฤ๔๎L/ฬ5็F™ุณƒ๕ถ‹ํช๋qอ”โ-=๑”ฆA,Nฃศ~•sŽ&๊"ˆ์|–ฐWฟ‡$œณณ’H–ด}™d‚ D$นห\zn~8ค gลq›™—Šึ€Y)‰V๓!“’–"๙ ด ๛ใ๘™๖G29์+oํ๖{๊›บ}Ÿ†n๏rF^‡0คU๖CwS&C… mB$!RาviิะฺZชตn)‚ุะtั‡1q฿„uaRวI}lทา2ฉำ๘_รœiำLLณ;p>gAรE™ ดฬยžนIปศ๛ถ›7๙(ิDF~‹๐Od2QeWอ๖ำ1t™“ M„gHใ—ุZC6%%m…์œ๛0ะึ•:d๎‘ะึŒ^ยZณzi ๘$เ!™”ค-@ ยžงผช๕gN๋v๛นำปฝ~๑ีyŸŽ(๔>‘‹4AŠŸ}ใน™๑gŸษคคhฆญฉœ๗– ˆA”1„น>1แส๐นž-M˜๋‘ ็ ‡#ซ๚OบYsํ!ˆืVN๊3bุสIฝฎ9ฉง`rRD“p๖;6๙ค5„yG๋g %Q%ยศZ…ดr"ะ8ไว๘%Wฤ R`ถ*ำ“Hรากฐ$ญๆ/@ฆ%ir(Kƒ๘ฉื>“รฑ/x6{฿๐ซoฯคฐ๛ใ^š a'ผ"c—Ÿzq& ํ“fกใด๐Q"-eข’v!Ÿ…Jy#[ƒถถฅ ข.!3 3’(๗…„‡ห%`.Yv‚Hใƒ ท$›pSย3ฃปั9ๆชๅžv้มคา2N้Z˜๋tMNŸ{&Sh๘D๘ ศ}้);ผ9*‘ˆ4 x>qฝ%ิกAx๛PขšVkไ’ๅ4H{ะL^รZ>อ๒%ภ๑+H+ˆv~ฬ 3)@ฺ&MbŸ๚บฌAศ$@‚ถฝQˆ€+G5ฺึ@ส[‚ NHแตQ˜‚๘z, .์Zข\5Aศ”๓"9NคICšBZWT1I…*๛Hœ‹่จฌi$/เ—:-Rธ1ข›pZ{ึ4‘M[K3Bๆ%๙DสyA AH{๐—–ฐรรŸmะ฿ฯ๋๖ฺ.rdb$ัฑŠ B‘ ๊ฤ๙ษ์ะ็*๔5‡ป6‚˜Aœ๘r/รŒฤYŠ8UหัnsxํY ฺ˜y‚9ซฤ–%ˆญู4-‚X_uD†!ej&แ๎„›พ8ีDนFซsˆ ๅ”“๋!ฎi{Nด sS๑Q(ูNyrZG๋J ”ู€ ศถฦiMฑฟ\^#œโซ ”'฿e3<ฤUัE m•ƒZ&&๙ ไ;Hธ฿# {™‰อ$S’H2ำฤpฟ“~ท๎ฤ฿ฮK‘‹ถ้="r&D๒Gจุ฿ึ๎˜†ภ>>ฤ-ึŸdf„ ‰ๅN ป๘ถFmŒร’โ2A$dถแ˜1d„šฒG4แซฎ%K๙–:F5jฒร›ึŒซภ!2  ž„4j/)ฬU[ q oUไ‘r<‹ ๐5Hธkป|ฺฎmd!Bู๑Qฯหคคˆ tœHFN""}ž๋t=ุฦWปgฉ ‚r3ใ0#q~7AŸ๊Fmฬ{dำาufbศ™ุ"ˆะ"ฒ&กiF*ก๏u– 4iZฯไ ๓MตJœีฅ5ด”iGdก{า"คAจฌ†B]‰dRจชข‘คI"E")!N๛๔ZD!’i ำlย`ั.0C‰ ๔~™ฑ๒ช[ีgQo KM›c–“ า8Zนd๘๓dรo%\ฺขกพˆaeพ๋AไR&!Bึ Šะ&07‘„'ฮัช4ม•ฑ๑ŽL&ซฅeผŸ๛‘‹J๎ƒชธBf&E2ฉ†ทศ@$@ถดฬN๒จ“ะ—]ฦ!ข๐ˆ&2ฏE8:^็ิ9T฿I๕›T%Vลึ์ฌž†ภ~๐ ษัz“`™ โI‘5}[•E— ?ูขED& 9ฅC[ศฆ&ๆ๔Z„ฬDฺ&๛ถ–ัญLพ‘ษs" ็๖jV )•ญ_3uๅ;HƒP๎ƒ4 -ฅ]ˆ@djR•Vi๒ˆ4ห)ศ์คd7•”PG}&r&ะ*ะ6”Uญm€r‚LL:๚\]KฮชŠำ"r~ฦaFLL\ิ๛Al]ฃ๔€˜ ‰์˜NฤณขฃdFn<#“…^Pลƒฉษ™้ฉ4 Šบ๙+]`‰ 4K—v ไ8™™ไ{PชฌJ‹ฉI"-Cๅ6ศชV๖ณf๚2?i)Hห บซˆCDกา6ศข\๓๐โ~"]“ˆJ=#สoพŠ#ศ–’ ŽKกd“`VzR7‚hc^„0?D!ˆค!H๐Sg‰~…"2 ็s6!IKHIZรmั/‚R^— 3U`+ศ๔$ะ ]ๆ#‘\Eู๊ HXP†[•]ๅ<ชืพMd๓ไw~5‡ลRBC…ศBๆ)ญ าX๗mh!rŒหA.—ฬ^ฒ๓?eSกขิnบฌฤ<B๊“ D#ˆญ‚ rงะ 0ัO:k๐,ฃ^f'"ะ nฑ^สdXScฅ„ฬGšฅ‹d^QH[!ศ9ฌ'๙&ไง9JวK› ฃœ !NXฌถ‹0D2I ๖า2ค]H3ดMฏํ™ศ .rฉh›‹กntน‰บส5‚XAะ/|–› ข ๊ฏ4‚hcI‡จม”อC9dsS๘&\ƒเx"›่%ก๕ฌAX>„ศ!;ฏร 2๋CŽ_ู๖E m•ฉIย]‚_ยXฅ.„Ž%aN$ขฒดฅwตฃู>ัO:Ÿ rค…่:ถ‹ppห"R‘y ํ’ะนDDบfอp 9๋๛ฮบFฤ•๋7L„๑Aœข-2y-ŽiBUรq}Šr2‡ถLL๘#ค=ศ"™s"4‹ ๓T&E8ญ B‚ผ๚H๋ตำdRRฏhAคˆรZKi4๚‘ mŠ~‚ฃ๓‰|๔9zŽ i฿†๖iuŸ่^'ข@หaHรPฑ@™ฎD๚|_ฆยŸEtšะb8Ž=ฤ๒ฟŽรŒฤŸ&ผ(แภh๙ผงืภkัฦ๔ต •แPนะŠข48ฌษ“ NQšยJฏษ$-แ,R@ˆ‹Dฺ&-€ ท2ํ\zหyๆN฿hฬ=ƒ฿Sฟ™„7>ฝ๏ขo฿‘gฆ2ัก”"\ #ด‡ฮ-M…พึ" ๚PะTd!ํCหญfrZอfงi„วI0#qmึ5‚hciต‰u็m๊N'-แ๛›๚ไ~Qํต˜š"๒  u%i.ด‡•6db†ฏู8ไ€™ˆ2฿ฬ&5ค5ษ6-Aษท๏,Y็รฆ;บ๘ ˜฿D*Ÿqkt์‰่ŠTYˆคคUศ E/l@ฤ!Cืฎ๗ˆŒrฉ๗ …77‚ฤฺD๚๏&AsR7‚ุ๊I‚’… "หบ˜กBห(hŠฦฉทฤ 6i@‚œิฦ๘คศท `ึpgฝ:ฝ2&$Ivcดzี๙ะ8t^i,๚<…4 ‘”|๒YศO้ MB&'i"i="ซdฬฏ"MbZ!‚Ÿหœ(๗ณฑ|rAดฑeHBNก H ไ^๊HGอ&‘@Zf_D๔ฃฆlวJ$์@3o‘…„ซ–ฺ&าIh]ณ|A}ZCฟกดœYณHฟ/ม4q’™CืK(๚bห์$b™I„!็ถฬM"4}/*35M‹ Dพ“`™ โ–่Ibๅ๕งม9๏ษX-"„}&‡๋. U%‡"ฯ€o_Ÿ ƒ๊J"เ!AณE3Gญc๊Q”ฆ9ฒo‡H2i ฟๅฆEL" ]‹œแา(Dไcˆ(ไฐ–ฟB~‹๋#x ไทฌ2ง๕4๖1‰ ๔ŸN‚fbjัB"ิฅ.ะซa๙Dž„b…อN $hๅoAhฦ-Rภ1)๓Ž^K@ˆ D hK9๔๛Š$0_y•ด ™ฃไ่QH›IL“]ไ k1ะ—‚›.21\{Aู7@i{ษ™9ฤฅn) B‚แ๙=ญํZ‡ ศQ@ " 9ญน^j*iŸฮ/…“„Ž™๓฿}wฉฎปฺทฮ:A~์๑นผส$hฑ8‚๘Pยo๋m๙ฺ฿buDฦ_ผ=ญgจ๐฿ๅ็n~}อื7„œชw‡U%:6n๊„ฆmD-eTZ€ˆ@ัHTTEซะ-AKศ@K }๚ธsšฬfฝG๛คAHใ ัNฤA-%‘ aณ:Fฤ"r:~ŽF[ž d2œ N;F3ํl๛"FW>ˆ9a4‚XFำ"ถAฌ=>๗๚˜‹๙ผ4ž–piย$<ดฺwZยี W$ม/ิๆ'?B€ $ฐั(ผf'}$DC!E6A"h zฏŽ!๗aสโตŽฟลz|ทฑtqH"•'™ำ๘ผ‚๖pšฝ>;แ‘#ตˆ…(๖qฒhัFหYnฃ?๚ื33Iศ็A๖JŸฌIvs้ก ๛:ตœ๚`้ค๕ด}กCZWย—Yป›Ž˜้ณ฿อHไ? ๔๑9ืึ™oBะk}ถพŸG2แุ&$๖๚จ ™ะOขhYw}sฏจ๐ฺฦฤ1.ฝ>ฦ!๒บพa8u ๑f๙์๕;ž:โ=|บ7E๚€ฬROkัฦP‚๘ม็Ÿฑฅฦฟแ๎G_h&ญ๓:k ื^ฐI›ะ2สƒeJ๘ั฿@1“CิuสK๕มŽD์›๏P–ะลกŒ™ษ#’œ ๐3เœฦฅm˜‹ ‡๋ร‰ฬv?฿5ัŸ›šI8Yโqฌย *ŠVค8๑i>ไ%ภ˜>AœBM˜&มธฯKใ3 ็๏ม“Fฤ[zโ)c>็่„?PRฒLWJsmฑLDไฐ%โ‡_B }กื๘#hqYCšwa“ อณๅ่b—gฬ๊1A‹ำ Œ๙„„๔5แ# sPm 4 zFC.ฬัะ๘ r n%๒$่! |0ขญ่๚ Oๆ)ˆ๓•Hไถ่1ฦาฤ_ใ†‰0#&ฆG$์bฏwI๘O ฺJ?<๗ƒS7%ม @เoSw๏๙gๅe&ˆะ"ฒถ]ๆศธ&นฮปั!ศ%้œ&™;ชiๆไ@ฝงIูอ.dyภ‡๐=ธ Aะฦ pอเF# ๚๕Qฤoƒ…๎า๊[๋-งย Œkร7กc๐ูด1]‚8่่—R้ใฐDqlๅค^7ฦI}B]ํ๕}๐G4‚hฃŸ ’เ^(AdŸ0Aค๓Ch 9ชIK,ฏฝ`“ถ ๒„ฝ^๓๕ŒlrJ๛(2'’)x๒ณh ] ืlz๓07œ™ี‹ ๔Gฑ;ญ=๊bจฬ˜h7zchตSG5ูำ,ดืฮZ๊บ /€f"่<ฺฆ๓`rƒ0˜AฌI๑ถฏ^7ล๔หแ7๘aยzi ถ๏๔ˆ^’?แ”1็นฐRผDSใอกh)˜“ค9ไฤ8E=‰ึท™ ขGA„ภ–ๆ5 m• $)ฬKXˆ0$EูๆฆI‡ฮ‹pึ9npื-ย›กUธณYะ &ข ธ~A้ $๊ีZ‡((ืOBๆท\Aทฤิ Bฝฝ'มŒ$ส}$แน 0แAด1‘ฬ‡VA‡ถ&aฒD?Y…ุ-Eฌ#i f— <๚ธ๎M_^7f„ eพšศ฿h[#ˆ6–Œ Fžำ˜› Aค๗fำR˜—T^ฃdZษQMฅOA tK กN.ๅ7ฐำg™ถeมˆใz A๔Eเ๏ภ„…sู้Xวผใย฿฿Gฤ’s}f+สoเdฎตฒส!H‚ฯย;๏šค•B#ˆ6–” p†ฃ= „ถJ“€‘กข}˜™"ป:k!ฐJต+•น):Ÿe๒Pึuฺ๎แฎ"4‹ีT…ฟๆƒฆ˜š(D2˜tฺ๎ฎy๗ฟyeฦฦ๗ฟ"/W2Apไฑซ>sลD˜‚ธ ณศฅpTถD๓' ObK˜9ˆ๒์ำ>ี[ร10;ตโ~ล_กc”/AทนD EH‰$่pU,๗hฑผฆ&„y Sdท\K)dA”>็ผง$ฮ‡u8ูy3Zฝฦw‘๗…๏@ยNัทกE ๘ŠfA„fน„ฬโ๔ŽO™( อ˜อi]H šyc"'ต„1šัN5APถร#Ÿ<7BวRŒ๗’ฐW“ƒ~+"า`n3ณไเmZkม_rBยSขทุO‚]hU}ฌข=ˆเำ)B˜š๔฿;IH“pรJ๕A<เฐต๓๑โ‰ฐฬ๑มณwGgp๑ฬ—ฺˆ ฿ปฺvFยKb% ฏi1[&'ศม ย ๏๕…หสไPˆยฃšคE„‰( ๏DyšŽห็–ฉHfL#ฌ‡ำ”M๚Fโš5‰*|ำ}๘;ๆ„;ฐCฃ(ไp๗`…Uชฌโป`หsหq#๔=ถ‘ๅ8"Œˆ2‚ˆZ๒\๏“Aa?/๖ื#ยI & 'ฬR‘?‘56ขฬ2๑*ิ5™dFb2wำ$drบใงwทฟดล$‚x๎G.šหํƒHcฟ(์wP•H*<ตฌ๏ฏื fื‰]’zข$ษฅฅฺคย9ไ‹ะkƒBdขํYH',aOจฅ„Wั,า&W|ลG€I(HCย.ฟ'’บrฤฮื˜!# 1A h‘x็„›กด๎N๊>ิๆ'ฯ„&ฌีซนz%ื9ašŽ๗ๅvBqg6ญZ๛ด„9ฺ„„kdธ‡ห'”B#‘#—dm"i]ฺƒฬI"‡•N๛บถ๛ƒs"ฬAจ“๛Wข‰้ZูฦฮฃหRwTว>ไฝงากiอš5Mb/I!mUZ‹f!ข0‚(ัL–1I!„G&…D$ีeวu"fษนi๕์w5r)’ ’ฉ์FŽ| กV*ลFด”;cัNJ4็„(BhUไAทZๆ5ฮkภ6' ดB†๒“ฬg>๏]4‡ˆp๒–ฌ%1œ>-‚ฐh&ยsd™ข—า‹‰!MB„Qยด‡{๋ฏx‚xึ฿_8f$Š้S ๗[iq@ทนิ7#k"‚hฤl:็A˜ำ H>ยY/?ทh" ญSjC‚'ฟNวb.b]Bฎ‚๗#ยฦอF‘1]zZ'Ÿต‰ˆจ*Iy"๋CQ2ด‰ˆฒ™ถ“D™ฅ฿ฝูWQ xAA^ณฯs6†aฮokพ—จญ|ˆ๛Lศ๕>‚ฐ|ขUXโ\‰Z2mฬ๊œ๛ จฅD9Z้๏^SฎKd ืhz-,๗˜†ภ๛ะcบ฿๛ &ยŒฤพž๐ง /+&Š)—'ผจ™˜V0I˜#:ร^็YคฬLA"์ฯf iIˆ e@(9,2 –lŠไ- / ึ,ไ!OB# ธŸœู‘ใP"’’€ฯว“„Gdฑ๖”zO}ฮlHวLO๚•sูA หŒซลนwญ ?หฺฤฤาMLh•กฟฑ9า ‚ไ39(ค5ม‡ด™”0-m้hฅฅ$ˆฝ9ฆ{ฦฮ›3B/๋รฬD;ั/ึฟœ๐ ฏญœิg4‚X{s๏>œิA.aท.ฆ bๆ#Ž^วๆDธpT็๐อ$์˜ษ"ฬ,Š&!_‚fื6ƒฮรi;3เRๆƒ’ๆu’`ฦŽŽlkื(๚ิIช5 H#ทO mdก‘ฏ‹„?ŠZ๘j๑หA?Mีไ๒ ข๖ี@ไา๙gๅ&W‘W“$q๋›^”ฑZbฯDO฿ื'Bซๆบ0‚84ฬJยฅ๊Ž๗J8'ย\ตณฤ* D๘0? #ˆzˆ(rฤS”๛ฆ๗CŽฌฉข™ส์฿๒rˆฌ9™ณ™)ฝ—Ypฉ%Psah‹#3ฐk@ /จ็&J$”๕ค˜WT™•๑ฎKš„ฏz˜kญEธYษห‚โZ„4.E›ฅ’O‰oณค%,9A|t๗k๏๙ื‰0#ฤ็T{ฉFืๅฺ˜ญขD#ˆีD{$‚๘•wm"ฬAœlxTย๋'ฑฮ4‚hc‹™X—๏a”9ชoศ‰Cj“ฐR€z'Hธโ(™ฉ*ฏ๙_„!y…$( H'หฺิไ$@&#น'นฮรb๛ยPJsศBฟuงำ…,,[ผ7็!ยY)jXB[๛ศAพ‡ฏ~tS9 Ji$ิ‰o฿yอs2พ๛๚็u๋_๛๎ๆW>ซป้ๅงfฌt‚ุ ฃป'ฟ๓ซaVMLi|กDซfศึ-งจrDส%๐.iฅฺhh%แจ5žิ F#}ิขาCOiัขj AHPIๅŠฃ%คœˆศpvํขTs5<กฎUื$0%eม a„/ขึ ๆ8~ฑ๙;a8i๔ˆ‡ŸบFQ‡ฉš)ซ$ทแ_pŸJ|—D@#ƒฺ Ÿื+ +G2ฝIDแ>ล๛fฅ๔RฤIrึDX์็ฅ๑Œ„ฏ$์X9จ›‰ฉDcตˆ ‡lfJB;ฯžoธhs่)aฏ"ข™่aนs"fฮ˜\Š0•ะ mb ช‰œ ื ช่ า•–1ฌๆำ€ฦัฎZ‘Cั๐C˜ำIย_ฯ฿;พ+ฝล1)yˆ+-GqNื}!V#Aฟรป็''ย"ิjร|Yย>ี๖c+'๕บๆคnัF#ˆF3B'œ๖‰‰ฐH‚P๋*‹x[evบ&ย\O้VS˜kFŸิหฃ'๕ฦ๖—'l?ศAด1 Aศ!ก•ร:ๆ™$d™)'ฃษEฎ„E๛”ชฎ ้”`A๔ิb—ทWBb็๘"๚ยHต—ฏฬQ› zJƒ;5๘๗จเ~˜โ!ญไAไถฑ้ท'AฮCZW;AวŸ˜ญ๗ยb2๙าุ%แสˆํAผจimL2ศฺฅy„Wฎำdล๔๒l;’ำ"”yi’&0ยXู๖xถ—ะฯx]ภ5  กX•ุ:zh ไGI„๐ะ0 ž๐้‘ปPพห๗ xศ๎ภ๏@—2ฅำoหoษX rัW›อzo๊i์๖=ผ;๖…Ÿ ฆCช๒ธFmฬ— |ๆ*แ%ม6ะฎ๊฿PเXe'๏ํDแMยMขฎ['฿Y‚œจขข่+…1ะฎฮgจำ@ฅ1”๏…–P9ๅู!ด~๋Lh ๚‰Zš…‚|KN8ผ[๛M„F‹'‡ƒnHุ5โบH-WยCs*qลkึฌi’r+uิŒlแ%๔“…อศฝœ5‰p™"„ี‰ข&ˆš<™ฬอNu งO•m•b‹ฉ ฿…๛5*ฦ€ฏก๖7ธvPi …อวPง‚๔›@‚ึ๑E]-โ คu๕ฤaัฯ;s"4‚X9์œp^ย“ใ๕พ๒ฦ''แฯDMƒhcุํ[ยJะศ๙I˜e3S‚w†ซ›`รฯอpD‘•ฯแB9…$ฐฯ“แfงสT3 uXFv^ใษMQUIrฟ~7a PM#|9ฐฯr(ฃ!rd-Bพˆ(ฺ‡“šdธYศwXJ‚ุ>ฤQฯว‰ะbแไฐ]ฤ๏พ`„fqI#ˆ6† i 5ƒ๙C‚ฑ˜–๊šCVR"“„"ƒา1นึPe^b&Žญ=ฯšeZq-ร2‹ I„เŸ“ymIwๅต;ฝอ™\๐m๔d@{ฎƒk sศ๋๑๋6โc_ษ’Ž๒๚mEไฃ’—ื๐„8วช#ˆ}๋Ž|ฮG'B#ˆ…‘ร6 ๏MxCํผถ๕็'|จDร†;E1mH€ๅ–คhกEิพD5ๅ%3qไQอ’ญ3DAขX!Ž ‰B ”ฃ โฉ%™9ฉ๔j hตขj^4'แญrB๗ihDž =P>#ขยผ„7์หฮjDdQ‹ Dด]ํq๘ณ?2A,Œ ะ…ฏก„ด&ผ/แโุ1'ŒFm # ท}KIˆI`– ˆj๒ c/ฎ—+ฟาpH•[-ค5ฯฆM`าQmVั”ุฅ>Q8tศ$Ž)pโ0อข๖c %…*๛y๓R1Yูnˆฮฟโ๔;gr อยฃว8Fๆ%๚Q฿๘า฿-ๅVAo๏CปCรกDK”kc††ˆB ?„7๔)=–‰าพˆhข๒ซ^ำ ง ŠW฿~BBH"ฬ6 \H8}2'yอแๆค:wa˜sฺ A…ศ)ม|ล๗ ˆ์œ"กๆD-B9\’฿๊n8w2V#A{?A4‚hcต Y๗1ธ6™)k‘'1+กJฏa^ย์โีJ๛รœ,hšSHแKๅS;วก‰ ค|ตฐ๗pีชx•4'งก&+ีํY้ไ=`พS๘j&ย0)Qย›ฆ@Z—iIฺรบ>ฝป๚น-c5:ฉE=๓CกD#ˆ6fะy-‚ศZีO•al_๗A”สจสฎVึต4‰D(”— ”ณ6นx์ฟถฑŽoค3MกA0๋†`Ž5ฃ…9ธ‡†คึŽ็สฏแNjw@S‰ต˜ห‚ะ ไG ฿›๏ yเธๆyฟึ]ฬ'eฬโ˜ A์uHทๆท?0A4‚hc†:๛! dณ’ZyชW„eS3 i"๕‘ศี`‘$†ะt‚€ผคต–์CzB™จ?Žc‹FaฆงBaxิั€ฉศ฿cRŽ ฒsMˆuฬGบ>i๒+เx๖>บNศAฆฅk_›eฟ๕KลฟฤŒีJํupwเ3;A4‚hcฦB๐’Pฬ™ี2%Eณปš~ำั ็5&ง\&\ํK“Fก๗ปนA1๗๔ย€,\[๐ ฬ7ไธ๖1Pึ‚จ)๋ํgP;ฮŽฦเฆ0';ด^K่หd„ใY$แ฿’ำqาฎ|ึS21\๘ด_ศXตฑ็AกD#ˆ6fฬ€}“ŽL+9ŒUU\อT—ยศูืV!5Eา"(3เ„$0‰ิ๔š™6๋„รgุ๊๎ศ…}ๆŸฺกmัOฺy_ฎr‡ณk+๑ก5H่พŸ›˜ดิwล๏ ณา,“ริb5ฟ๚๖‰ะขD34$ฐ$$qด–ไ95ขWดด๏ัJลไ$ณ”ย_ๅ—HK a)Y5ะk' ^๋zŒไฆ)ศยA#ฌ=ฐ}4uMช’ฬf}ขเดล58A 9@—_7็ม๗ ‡๔EฟzJฦืO๙ูUO๛ส['B#ˆFmฬ IPvC‚QยŒŽsิ2ยฬTฝŸx๋ๆqิqŠY™™$L=RแฉNก LOฬพ๋Œcf๐ผือM>ˆx ‚€œ…ๆQ๛L|1AN†–ฎ=่z8ฏ›ืxฏŽQHซฬK๒?ˆV;Awปž๚ฆ‰ะขD AlMฑ๛พO~รDhัข๕Edวlฆ˜y2Iิ ežG9นวtๅ-$h1-A $ัG ฬM๎ป€80๑hษyุ‡9ง65y›็6‘R๎cpำ“™๛L0/๑ดSf*ŽำwU"œฬK฿๚ฝ_ŠโA>๕uกD#ˆ6fpHx‘U-‡0ฬ%#าฬ[%-๒บe$“'‘ฃ Bซะyฺึ๗๗_วy๘(3yf๏D !ุ]“@›`V_šƒ๛ œŒj คภตK่K+ะk๒๊liŽSรU๐ดL—ฦ/nฑํn์๖๚ลWO„F ฺ˜ั!GT„š„žป^gแๅgbจ๋#ณSไ ไPืฟ{MฎษฬปOs ิ„€ถแฃํZวฃ„ศAกญ฿๘ลŸ๏พ๘จGgrXํฑว^1A4‚hc†‡„ยMยPP฿ƒ*๑.pษ#(ต‘’6กื„2ำ๗™ทฮํ‚*ศNฯ~ A0“ฏ…5$แ‚‚rg6Kฯ๊†ผj๒ฉษ@๋r2 ๕5บๆร็๋ตŽั๗R๎รO~|6+‰ พ๚๓?}๖ฤ‡gฌZ‚ุen๗วฝt"4‚hัฦ  DL1r‚ฆา dซGศปฦฉ]ชดZ)oณpดึEš[™^ดdDjฒะบ„ต๖qฝ๖สจฎ…ิฮo7)aF‚€ }Ÿ๔9๔บ^ฎ‘ใ=N2ผOพ„ˆAyi๕ฤ~n?w๚Dhัขlฬ€พ9ย้ใoฮ3m"p|†ฏใ5;'ู >ฅฐ%€!„<3j„ญ'กeYิฺBWวR๐Nะ1ฎ๘์ขภโQH˜ช ดฮล๙๔2้š!%,€ฟ‚p฿ƒ4ˆs๑“™>ฝ๖กซ• ๎“bืŸ9m"4‚hัฦŒ‚R‚VB›ฝฉ„œ์็ช!„€ิ1„wR/)7"ŠšH˜ซ\`J บึภŒ!ฯvื.ุฯฬ9ยฺื`œT๐Uธำ3ศคึ G}†2 ๕่5๛XฒŽนŠskป+ ๓า9~X๗ฉ#’๑ษƒŽ_ฑ๓พฮyแDhัข@ฬ˜%์eZาL[ืํ็Z—ภึ{$๘๑I@”นภLy อIsม/แ œ ุOๅS'*กjs๘1ต)ศอTn†ช}"… }ยS๙Lฎำ๗ื&1ฬU.ji"ˆฯˆBย฿?`mฦช"ˆะํ๘จ็M„FKำ’๔ฎHธ:แ% ฺX่cฦ,มIธฆึE_z์cJิˆBมจู2วใ(&QŒ๚Dผึ๑ถ–:ฏรฐkฺฆ™ปC#_“‚ฟWฏkmยg๚๎ ฐ ฅฺ„ๅไŸ็เzuN| ๚_{ยฯdrP“LL|๔IY‡žุน฿ฑซ’ ถIqG‰ะb๚ไฐmย5 ‡&/แ› kAดฑ‚@˜b~ับfฝ˜Dd?G J`J๐iฦ,SัA^fยรF5[GP N.1ใh้ว่:i1ZrŽšT\hื&ซZ;Aฐ;9๔มa๊ฌบ …า"U9ฆ oี๏)‚๖ ‚๘ะ>วdฌ*‚ุqŸn‡‡?{"4‚˜>A<2แl{}šะข๙7ล0–0—`“–P“@ำฌWB›ูทขŽ™x ฯสf]_3h xi$˜ƒ“ฆศB!๚‡H ]-y฿ฐพ;ฟ)uแ~‹Z๘sฎš(ุๆD๐/๚ ๗=`Vาwมสผ$็4&ฆ๏๊ิ vปกฟ?ALŸ žš๐{ oฎŽ9U?ผฐfอš& J8~%์๐GH๐J K8K๐y8)‘>Dํx'5ˆ‚h#jiŸฮฅฯCุ๊hฒ”ม–01i?๋ฎš}k มH๐ขIิ‚ฝO›pSะ(ํ F)ธฉKืฌ๋ะ๗ภ‰cณฺƒ›—ะ V%Aฏnป{"4‚˜>A<ญ‡ 4โ๘๏%\aฌ`\ืฎฟ}‡๖–๚ฟ7๕ฉy|งA,ฃ‰)ŽY๙,ฝยฟC๛ฺwhืฟ:1k7ศ}ึ%bN๊cƒฎฟ}‡v5ด0W$OLธ2ข™Novป๖ฺ}ิะbก7ีฉํ;ด๋o฿กG  A444444‚hhhhhh1E{ๅฤ5›f์บ๋}qย…8ๅาุ3แำ Wลrปๆw%|7แ6๔š#<๙๊๘ž0ฃื๒„›ใž8ซืืt`ย็.Oธ4แWา0โ๚WิะbeูyีlšA‚ุปฺv$งeยkf์š›pR%`{ฏYCGธฒงmg๐๚%˜^ิs์ฬ]\ื๚ฑพKD๚ญ])รˆ๋_QC#ˆ•!d็P7ใกา๖ ]1ƒื}p%`{ฏน/๔?้šม๋&˜f๒๚{ฎ๓ฬ„วญดก็๚W๔ะb6…์ุšM3|ํื&œŸpa}iQs๛ ˆkึ๐tN_3J"๋‹ยตว,_ฯwน!aื•๖?๔\ŠAฬฎWอฆป๖b๙€Pกปสโ-=๖Sf๐๚๗ Sๅ}Lยi–ฏ฿ฎg็˜\AY› ฌญ ศoœ˜hศ–ฟํฟ.nn๎ฺบmkํธX๋ป4S์Oeุ›N]ญฝY˜cvพ;Y<็ัํk‡[๎/ ˜ส ฑช๚ใ!Ž„๘™๋Bฌ-๗ฏ ๑5AY› ฌ-']ฮ”ฎ|zvไ?฿34พภŒL>xs์ก‘9ฯ๛ฦกโ`Eศa๛ž'CtˆK!,๗?ศ}AY› ฌ-)€%™lชใํณื๋แ๛ฺ๑๑ไs์\์ใ็!‘,ผัžPวBœ๑฿,#{ั1wๅ๎ฺูผ‘FP๎ ,น๗๔ต:HSเ=ฒแWŽŽ๗ๆ?ํ*อฑO๎ชํฝ8อ”c@๛ุsajN–~ ŒmŸ)@ˆฝ!~ขU(‡ํ‘ว‰5kึสฒต๙๗โ#ปถR62ำวKูเฦลซวฦ ้ฐฦ๐๚fไŽแ\U™๒ก™z†|อ'ํ_}ถ? ๑{’/eeสฺšmHฦ| DYฌy~ฒ1๛พพ{ฐศจธP–=?น๏rmืปSลs๊บ๓ษ‰ข’ร๚x์ฟ†ใธ5ษโดrก๏2ไ๒๋ฟโ@ˆ/…x4Z่['( ส‚ฒ6/] Lฉรุd ห€๗]š.$Š๚ฏ$3aซฆฐชŒอฺ๖5‘Aฏ฿น!๓ๅ?๋ซKฆgwUฆถฟโTˆ3!ฮ…๘ƒr!v—%qฎ”eAyeย7^H#(Mฃ2"]ภ@kYฎ—$ุT฿pmฯลษ";ๆ˜WŽŒอknqภ›ฬ๛ญkEyฯฎปJSฎฉyDP”ตE@N-คฝzง€.0ฅ‚ข7ภ‘ฌ`_ ๙ฦ‰๑9๕ลypคๆวJใฅb;๗ใQI]j็Pอ๑ๆษซu)ฤj›•ตอ]Q}!( ส‚ฒถิ–[H~nฒ!CFn0H๎ Ytผ€ว}2kหŠMฎ๐ภ๕ฯ‰!?๎ณ๒{p–”"}/ๅฮูr iป฿JยIก๘Fmฅ้ญ™ฬšฌ:WiQฯ„C&N6|่๒Lฑจ็ซ5ชส็๎าƒYPˆ๔ฝ”;GGฆ๒มWL๘ทฌทžนVvvฒถ้ไีฺ{ ษยเ›ั—}M2™๓๋eซถU[ิcอูหส”eAYP^Q:ฒ—&๘เฦYซชีqฒฯญ™}!ร6pณุ;pต~:•)๓8žw้‡!( D‹ฟ5๋ึ#V=๐)AY[rห•™ัฺ|สญขvุ7‚Xm2Yu๊yTZT๙[l ฐ% Fง~1าฉY4™—ย ทฺ่”™ธณเฟk‚ฒ ผd๏“๘ฤ๊”ต%ณdk›n”&fXฌC3F#ๆ@š๓พศษ€ื,oฎ€ทีo(ฎm …dแs๘๒Lmไ๎l=ๅๅฬ<;”ๅe฿|tJ&`แ IY]Iม2ูุ๛ขžA—‹‚~ัkฝUtjข€๋Kฃต Gว*อ‰RmuJไ๘ฐธ [OAy9aิๆ ”ๅe฿โถ้~๘ฅHBฐว๓ศ`ษฌษฆทœนVtrขถ๕์๕9Mtู๋9๗\œ.๔ไชL9ืL๘ >(ธ็‚Ÿ ,( ส‚r๛๋ษภ’ –,฿ €็%†8๋ๅqหfgK฿&’ส~วทนz็Tฒื”cc"‹ืBฦฮs‘4x=|ฐส‚ฒ ,(w…žผำ5}ไœR™4`ฐqdjฮB^œูy;วc;า’Rว!I pŸŒ;w>ซ‘ๆน˜๊ ส‚ฒ ,(w…ž „2 =ง&๊€5`n.ม—‚#๐ไq3ฎฯ-‘ูz๐งŽู|zถ๎Ow ็ๅa`ฏ™.ย็ํึ๖!ำ‚๙ฝ ,( ส‚r{nฬล‹กGฏ_d€นฦ“lก0gzฯqUฦ๘)๘โญVms(ตv๎zq.> ๖]œ*ช3 ภ-š฿ ส‚ฒ ,(ท็ฦฟ1 ฐ^ŠH5Žุ1ๆLถŒถ์คฝฆlพdธฏGฦE;ฮMfan เžำW็<— ›aซ๓0ฟ”eAYPnฯ-ๅs@foน่G|๙้๚ Tฏ;ง:๗@๐ฒ@†ฐCžท๗โd1 Š}oGU8พ%๋ฮ.๚™Ug฿เtา๔ˆกช๓0ฟ”eAYPnฏญส็bึ‚sn๋๔Kcs JVLึš[ุข<๎;ซF@UีIs‹Sะ๗d:้ญ!„ฬณo8[๋S”ท]- ๑bฎาžm‘ฑทฌถฆ”yNด”eAYP^๘ญ•…ญ\Eย๋NฟMeง€{M4\ุฮNnXรX(žLšOถn?(‡ํ‡C์ q!ฤ๙ฟS๎_bgˆก๒๖>AYP”;+Sฎษ8}๐มw’U ถภๆป๖Lšเydปผ์kถ๐gWHŒs-ำ๐ณy%{ล ’sHิ?งฮcฯmA[nK(?โsๅื฿b0ฤgCฌ ฑถฟ6ฤืeAYP๎ MูฒD eู#ฮฯvมอjฐศ์7P6+Uณ๛๛.Ng;โ็ฦฦ๕พบร2\ผ0L๖ฏ#ฮะ๋ฐืำ‚1~mนๅ‹ฐ๕„๘ล—ถ๗%AYP”ศdƒxW^š+RU}ƒS๕ฌh๙๓อฑญสำย๙0ูu๔!{ค|–S]~K]3 ช^ ๖Yw -[ถ,?—นwdฆ๙ำ!ฦC|_ˆ๗ขว๎dž๓Hˆใฤš5keAyั Nฟkํš!sป๗bฺ,ุ่ญ:HmD“ี ท} “y€ปฏฟิ[,๖]ฉ=u`ธA.x๛์ตฺ‰ฑ›๕E9€ t‹๚ใ๐5๓๗6—ฦAฆง๊ ใ,—zdCๅฏ๗Lx hสนฎ>/tœฆ์~แW…8โWส๛-Aน›2ๅU๗ฒ้ภQFN สส”YKฮeผ=งฎ6ญฒ๐vแึึ?7๛™ํtt๔F]I•ฒWค“ิ"cีข"๓๘^์-ฯ๛:083kข_Jquื็Ci๛น๋แฟ†;Eษฎ-7?b{ˆฏธ}+Nพh๖xงdำ‚๒สษ’๑yฐŽ8_ฟ›สฝฉP fr๋™kตCฎป/Lวง€ŸZคCฃv๖zฝใ.้ท์ฒZ/o=ฃ}o‰2b/ญ˜ฃs๖—^\‡Š @}vโN!฿lr<›)‡ํฃ!ž ๑'ัGฃ…พu‚r{@๙^ฤ ส)[๘9zฑฆlk1ษ*{O_m8žqN˜ฦงดฺ\๙›๙dไ*1พษ'Vs{^ุิ‘Cๅฌ@bCQษ~{"๘์แรคEขถ‡๒ฯ†เ‹3!N—๑pˆ๛C์.Kโธ]-(ท”๏๕} ส)[ุทศ$Œšฐ9>–ส2อินะkํy&WpNช/ ˜^ฦเ๘''ฒ•ฯmศฐ78ฃ"ฆบ1ผgฬy>็ศ‹™w๎œ!RวW_ิVp๓ˆ ,(wส–k^˜ ๙ูzU#— NmาฑbRล๎ฬvณ(c;>^{ชoธะใlš=€mœษr,<ฉ„์ท7ƒ:๛]žฉทpวูnีณ๏็Zwdส‚ฒ ,(wOฆl๐Bwตวs3ใ“Rel^V 3”6//ฎ„๐•~ฉ™ู๛iุลจkE6lศš[ฑ๙๔บpณปBS”eAน;4e ŸI“ฆ๋–gšŽ]ฆhผU>ซตมจ๘ เ8>(วL/WFผ(P>"๙BP”สUnnฉ-eI™kึฐลป0ฎ@ว•dะฉŒ˜๚ๆžดOำล๛ ›ๆ๋…<๘y]”ุู‡`ฦp฿พwแwึPฯๅ0ำ/„๘œ… ,( สํฏSA‘ƒ4ล-ฬฉ’1ƒ5Dl…iZญHP@ฎห-ๅhพUิxˆ{M๋า\Nภๆ8<’ใื๙R€9ื™; ๕zm`๖bwโต”๗&b ^0ปืส Aนณก\5ะ4eนizฑท๐๓‡Gำ]vื 8ๆJl๒‡‡ถt฿Tอ1Y๕๓ฬ@๘•ฒหใผหข^บ๓๏๚ฝ้คƒถ๖ฌพ3!fBœs๛V— ‹Cๅํ}‚ฒฎ!(ฯ_NM็๐–›qๅ™':oœ•"1Pz๖๖™k๕ฌ๘รŒuฌx^ฌ๛Zต…k็nฅF˜ฒ8ห‚น”sูปษ*ผๆหำ฿๊n(‡ํ_•ท_IลA๙็J9ฤCy]ˆตๅืkC|MPึ5ๅj้โ๔๘ฆใ“[ˆ‹]฿ฬdศืฌ๛อ,Eห%ซ%kfมiย€ฬฑdูdผ่ฝปชํน8;ฑรดใTrฎัฤ๖๓๚žฬ”ฮูkไผ”๐นYhว ๅ฿.o0 ˜-:‚๒ฅ–_?ศ}AYื”๓@ฆั#ตh–š๘Œ๙NฮูJเ์fก้๕ฅh~‘อ&zXfอข๎f“"R™yUฃษ†ฒฆุ4ๆ5ำVทl๏“s๑aัๆZr{ห(ฟ=~GPึ5ๅ๔ๆ๋ Ldท Xว น! ภhš๎[ฬษ;า8'ฦ j‘0Y1เิ~ั-ฮtซ&LW๙}œŸ๋šdแ_ง7'ZฯŠถX่๛LˆวCl ฑูbนกถGB'ึฌY#( ส‹๖‡ฒ”ฟk๓ผแกฦ~•ง1ข0uท|๐สฝฮMญ๋ฉoีAguพฑlš,โKูlfž…Jนท๙๊ d3ซฯแ๛๗ฤ่จธR#eในศ๎nmๅ.ฤ/„๘G’/Leสหฟลr„ลž sSžx8$หธ9ฎง4๙แพฆี0็t`บ>œง70ฒ)5iฺ@ิdฑใdถล;็ฟฬข}ฌ”LyQ›GP~4Z่['(๋‚rzช)ำ ๊u=ภr eบญฦภฒX“-์|–ฺ~?(ีทaำ$b2Fฎ …๋‘ฉszMฎฐj๖๗–ตส^[NตT๛aซ~j7C๙_–‹{ <ถ—CL†๘vˆซ!~ณlใ]–ฤqปZPึ5ๅ๔"_0b`ดอ$lA ุn?ก#ึr๒Gผ0gš๓ ™E7žo5หน๋ฝ~|";(5ฎซN้อ–ษฌฝKว"ฯtm๕…็—ภฏๆSPn(็FS˜ึgพ่ผป• โึ่ Gว mูgฆ17:ษ9จcถ๒น๙๘Z’G6vก|H Cณจ๋อ^;N-vˆž|ฯPฦ่ปk๒พ0ๅถ‚rฎa0ลY4@ํuv™uŸูลยC3E•EOYeA๙ู›''’ื|ตt3=š์7=ะลC™์–เœ|8ะdยๆ๖๑โ!rๅzผืั”iำจโ!๕ฺ+Aพุ€Cœ ,` สํ—)[I˜้ปท….kง6žมฉo%ํ.sตยฏ/2V$ฆ ฆb"žƒg0วm8vฅ^ฒFญฒ5คุฐU๎ำกปธ๙๓cฃทฆJU‚ดขCy_ˆ!ถ/FIœ ,( สwทQ๒ึXQ1ะี–šFํ3ใ\UีKถlะดŒิj“Sำ=ฬ?™็ูsžtอ-U]|๑ใขNสj๊งฃอ%šฏท์P>๊KแB|ปูy ส‚ฒ4ๅFM™๙)uQ.โกี’ๅฦ#”โา7ฟ`่ง~ฤ)’E5ุs๊jำ˜็s<ˆs‹€)ฝ™’*ภVI8 ๅ“‰}geSP^ส ดเธ,อฆD›Lq`ฐั`ˆ–e iฒ#พูwนพภๆ– ขๆX*๑ูญท๏ไพŸ๑ทู Mอpฺšจ$บURD๎ƒฉ+3ๅฐ›gCw ์‚qP/สฆ >ฅp^B0S!๚g’eeฏ•ฅiึผืด ขHqฆฬ}๏yaฏม>$^ฬxqค๊Yิฃปฯ ฮ-Yr3ƒกœtWjสa๛ฒฑƒZโqฑบฆ…>SPn;ูฝqbขะzฉ‚ -™์ณส™-ง๏R๚†#R™4‹|UรP<พฮืฒo+ฏcัฯ>Dุ็ง…ด ึxAS†D‚ฒ€)(ทE)ญีhศ %ญ๋ย็'+ูชšJผu'ำCRวU_ฆ๖b9$Wัด‚A‘ูm29ป*( สบ† =vปXิณ’ตอๅภาุƒญื/บ5หh‘z3ฅpdสฮNฮqoฃ-—YวF๘ิ'( สบ† บŽ๚ฒsVใ7Cf‹์`>ฺภI†๊›HR•9ํ76ฺ๕๎dพ9 :ž๑ืAx‚ฒ ฌkสs5Sฏต"SุขุถณณCBํ3bXไ์‹ฃ…Qูl8sบ0วXVŒป[Jฆ` * <ฮ๑d๐ฉๆqt”e]CPฎฎ.๐†Cา83%{ฆVุเmuqE†/w|๚Zำzb“<|-ณœ๋4ฬ๒ ฏฅwเjึ:TP”eAyู œส|s[nRˆYsใCC3YซLหT%อqSGผ๐็แžำ›M yัBพเผ4ฟ๏RZOFชx๎ะh1๕๚ฑศd‚YP”ๅv€r.๓ฅI‚†Jล€1วV™ุณ๙๖_.@‹—q.ซ% Fžภ|ˆ…9ฒl฿<โณc@ษb๛จฦฌ>๎โtrข8†:่ธพฝš9yนŠŽwญXฦ˜ฯ‡• ,( ˜‚๒‚ฎYๅ„ืtปด๛ยdร‚ฐ>>z+;๎ษ †8ฯ3‡“วกวๆ?\gcซภ2}:ถ๊,์๖ฯŽy*ฦ7™๕ปเ5>฿Qh^หึQฟ–žา(l@กM ‚ฒ ,(w2”OJ5๋หธ๓(“™’แr ๐6ŸžจปUิ<< i๕ษพdศน’7l4๑9ฏd[ุ3๐โิฝdม๛ฑภRค)‡๛@€›šํ'tฐฑ ,( สž)็๔_๏#‘ท 5ล€>– ึ—`6เDsMณ๑ฤฤœตฑV[ ใk๛๖๎r๊ˆMŸ&3fมzผ๎ทฎภ_miRส (™”eAyฉ \ฅ๒ต‡Wฎำ.WŽ–ำdY\๓อl9นฯกkฯ2ึยR ว๕fฎmื๕๒ƒœF4r4๓V'ฅ(S”eAyA ŠANฎขย?/—ตพโ4YฏK#oœธาจ?“Aงd ฒd฿lbYxjู-ฯarH๊5ฝVJ/‡/ฯd็๗๑ๆ3)Ešฒ ,( ส ๅV2พœห›yF แb๎žำwSำCธ}๊ภpรH%ช28–๚dณๆคฉ#ีศฆ์ซ2ฌ~ู> ชฒnZญ๙เZ=ฎ{ะฯำcTSี๗ฆC…eAYPng(ทjบpศ˜- 5=ุ&zไห~ู๋ข)$งKฃ฿๎ฝ8U;^ๆึš5…<นoถายOก๔๎ศ่zว`*f?"ๆ›่[ฝนาฒแ๎‚rุ~)ฤฅ—Cฌ”unษ”}V80qปจ๖รLฉl@‡ฆ›หล2t[ภH๕ ›3บ4nnq ๗๛ง [Oƒf๊5btฯ‡ม d*=จ@ขˆm‘ฯฺปฝs\ฎyบยu6”ร๖ฑร!~”Iฺ!B|VPึ5:YS6 Qi@ƒ…-’ๅไoX‘ศHษ^ ฮd3ๅิ~[\ด๖๋7ขฆำ™sNข09ƒ๙{1๔ใ)ำว Œธป ตบ๛_%e]ฃ“ช/|6่aํ++ะyซช-ฌLฮi๒5-ฮฯญgญdจtุQ%ัlะF/ฅ2\ฮ๓M–@wฆ"ƒ–wยm*+&ำWF]PตOน๛ฟโ‰่˜GB'ึฌY#( ส‹๖‡ฒฟk^ึ๐šฎ-U™ฤ_PีโŒ฿dZฃsฅtญTqส๘ ๐J๛œo†ดu”=ๅฏ+Sึ5:ตyฤ/’qึm7ฯM?พฃyฆl-PŒ๚€eoฏbขA^เ๑XฮHษ&)yย7ˆ ๘ฬ8WŒa’6ษ‚ฒ ถP๖ž›Nฮ6b 5P_ผ๏าTญไFัjำ”ํk“9|ๅง๛‹syฐRถ๕์ตขA$ถฬา๏\ž)ฎ—ชโเ๙ศ^ฺ๐ .ฌ>ฅ…๓Zš <ีึyPฎ#!>ใ๚~\Pึ5: สพป@๖œoจŽจ7X \+f๊aฺƒ)<•=งฎึฮ„}Wๆภุ2T ธ๓ฉฌUฆJะต๗ๅ…ฆฬ"#ึž๑4/ซค2๗ธ%œ๓๓!ณ์dแa! woIร!ห*Œ฿WIœฎัiPNUbผแ:้Rฐร๚`ฒˆๆ x^_ศช๗”ฆA^ฦศ้บิ@Sืผน,Wใน’ทx๖ภฎ็ไy>‹Œิ!k๋b(ืิ<ขkt8”s5ห>ห๔ฺlUs†-ถQu๑ๆษ‰โkฦ:‘้ๆ๊Žษ^๗\œ.๊ํ๑\•‡‡ฌ]3žํ็ป๓|‡ 5œฌเัN‚ฒ ,(w”s}qfk nๆg†๕ฑทEl8ฎkmŒ๊mRๅdย"แ๖จ;5XIลRฒ ,( สme•‹ด๛ฉคฆŒะl&^ฮ%๎้ย๗bถ๛E:tcบeณhฯ{2žษ{J฿ `g๋Ÿ็๎แ=กซ3OP”uŽ‚2 ฒI)Oไ#ร7 –’ธBC>91ึฑดฉCฮ้วล:1ฯฑ‰ำ๖W๚G็Tjุำmgฎึผ‘Cผ)‘ษ€[๐”e]ฃ#กlาE 8$`ฬฟ๛Œ€b๚‡9รฅฦ1Qถถdํ™ร•รLS’ˆ!I\๙๐ี๛•#ฃลต‘8L'ฮyWh”e]ฃc3ๅข@‘lืช'bญึ์;v<๑้ภ฿bpฺTYฯมhร‘+๕,8t่ผE|_šฑ ,(๋ฏ)2ภf๕•U^ฉกค>ถ&๎#ƒฌwีฑ‘›ตw.OืŸ ซ4k_bีHt็ ศ‚ฒ ฌktE23๕ป4UP๒ดˆ'…ไสโ^Ktฝ50ฮ{ญ ฒต ฐch™Fg฿เLึรWw๘วpคฃeD y‚ฒ ฌkt[›5P˜ธ3ว”(ื…—“€.ท่พ~บ4 uน&หx™J๒ฺฑ+ถ›ณพฉLูฒyn*0 ส‚ฒฎัซ๎d๑๓ศลzธmไ &‹เAqdไfฑจGทž้ศ)M#กทฯ]ฯฮมc1I„ฆ“5šY~š#๛๐`ถE>žo <ฮ‚็๗Žsx_U š<ฎ๎>AYPึ5:"SNu๗‘ˆ%o•™๒Lฎ’&h.1ƒ"๎sNdkfกฯfViฯ–)็l;SšrีLBm‚ฒ ฌkดญฆฬยnm่ย่หึdนุHMถˆ=“ณาDูเAถ๋ฝQš5™w๐) NโฌDS muzท6AYP”—ฝ๚โิุํคํ%ำฌอW๏ `Hfk•›3ฃšlมยx,S.ฦจˆ๓?ฑ'8•!ฮ^/ช/x-ึ๊ ศ}{xn‘/็„GiŸ6AYPึ5–ส9Mตสบำฺžษ@ญ<ฮฒS_.gวก^@ ะื—ฒGณfหถืปL2Vฯฐ9œฝ๛ญ๛]๕E3Xšฒ ,(๋mๅชL1ฌธโฏ7ะดพส4ˆl™!ฉ<ง๘ฦ7e~๏ง–tMNษ-Z•ล|ผ’ๅญ,( สบF@น™ฆ๊Eoช6˜E:เ]ŒXส”ฃ‘้šิะ[Žh๒(f สB"“ARตอ|M†›(้AP”ๅฎฮ”[๙Wญ๋ตัณฆE๛.NีveŒ๎ฝฒ5€คŽรxู่Cฃ…LAfอ‡†5 LฎlNษUYh๋r(‡ํืCœ๑?Cƒ่ฑฏ†ธโRˆ‡e]ฃS5ๅ”1๖1จzฑ3๚้พY”อๅ\โLฺฐY}1\๑ฮ@oNUlgฦ8ูDหฐัญัšจ'm+ส?โ๏†ุ็กถฯ†๑7C|&ฤpˆ สบF'T_4ำT›eีdฬHVsŒ ‘™อ.๒อz#[ว‹ดDV l=๔ฝ3ๆHนช9ธถ2dษI@™,๙ซ๎๖_”uNซSNmญ่ฯ@•ฬ•บแแ้"๛๕ุ6ฝ$ฮคMH=ๆ๋†ต0'(ฯสO„๘W๎ำ!~MPึ5บสญ่ฯา4ๆ=ฆ“ฦF9อ9gใษsดx'(ิ]!ฮ%โŸU@๙ฯPีฬ๙ qœXณf ฌk,”๊w-ง?›ึK&ฬ@ถlUฅk๑cฝื” ส’/Leส90{้ ว FCtr"Ya๑xEฆœ{ฬ๔dm‚๒@๙วฃ…พ-๔้ๅV% ๓ถˆ+)ž;<ฺะ$Bฉ~ศ๊ฎ”๏ฦ<ฤี„˜&#v~YuAI?ฎฉ$Nื่b(็˜Fb.pdมTXš ๛F๋ําพ๊ยเซEๆG&ฝ๓ษ"VŠฆล‡ฎุOTeฌมช|œsh”ซ zˆฯ‡๘yๅ๗ขc๎สํ d*) ’dทF2[4dพถฌ๗ีcใต'๖ ึž?%(ืิ<า๕:ฒ•ฃ‘…ๆ:้สื ฮฬซeภ€7๎์๓ีtฅ๔ๆ''Bf=Vภ ญแzV mว˜ฆํ็Y'(wI&฿AYฝ9†0Y่ๅ้นU๛M;๖Y๑ๆR+6ู?ฦฑ<dน๏ตg๎[‰ำŸ#ีœโ?ๆูH"( ส‚ฒ นr…ฯBญDI)=ูืVta๋TฦŒaวค๊˜m๎qeส‚ฒ ,(wต\มย™—โvๆT&ฝ+dฬ[>โ•ัฺกpพ8cžฃจm={ฝž5›ก๗ ึ9ู—ฆ,( ส‚rืห)อึฒะ8“ๆ8J6ŸำXยื๘[xy#ช7ะ•ษธ๕_:2Vptvแวน/๚ํป8U{kเZํศศอยิHี‚ฒ ,(wS^ศq&๊™}‰\N–ค่ฦTel<1žํ๊wizNฅ†lวนษขbร๋ฯ€ž}4ซ,@ปต ,( ส‚r๛eศน,vฯ…ฉd;ณeสyKถ>๓จ๛.Mี^;6Pท \๑ำH]Ÿช +ฏ๓๛M ฑ…C๓ฦ JDP”eAนc7ƒkNณ๖LsF* qI!tฟŸฌู•ญ๚เ›<‘ปพมwืปS ๅvฉ  ใงšL)”eAYPnŸอdˆเ|2ฅpฝฅA;5Uศฟ๚๕าท8k%Ff ฆ$ๅท\U™AVmๅv@›=^Z~V}ด8ฅDP”eAน}ค‹พK3u =‹๒๑+ณ^ฦlvŒ—$^๊Ÿ5๚หƒ# ล wภ(UาfPผŽํ‘!‘นฯฝฒsฬŠrv V!าโ”AYP”ๅ๖‘.R@ๅvหE–ŠFk’รๆศ8ศ—ธล&๖9S{ฮaาƒฯ”1H›O_-2์œ,ยตiตฎ‚n‹SJeAYP”Kบจš Bฉ™ษ q‡ฟgผ9 švฬ‡ฏŸOฮ๕C"ษA—ZhฎQ%น(S”eAนใค ”Vนดฑ฿K๑bœ5xฤY4๗฿Qm`&L‡ๆCYุn:9Qภ‰"žทํช\โจu>>@โE>iส‚ฒ ,(wท”ๆ>dš1@ฺ๋ํัฑ"›68ฮLฑ๋ptผฑต,ญœฉ,–,ุkส=nุช7)B:a T\kfูv๖Z1ฅํMหO๗วๆxุชU_TL)”eAYP^~-ูa86 ~1 †ฌš6iไ ž‡–Œ$‘สดท…วศยษสฉฐเzนช`ƒฺ๔dd{-์;œ?หพ๏ย”HP”eAนฝ4ๅธRยเ†มฎื5}ิ€c€sฟชšฏษฎiXyา-(ฦฮp9Iƒ๛=e๙iส;ฮOVy[ส‚ฒ ,(ท๗f๕ษฉฮผจ%ซQ๖ณ๕h๐hๆ็}™((ฒ๊ใWn6TzpYd๏ล้"&Cถู๋๚vOฉƒ๓ถฃ‚ฒ ,(฿๋O„8b ฤ๙T๎_bg9อš๛ๅ๊ญ๗ณ์vt๔f‘ญZ๖‰<แณเธK/.;3 ๒ผ”๐ไพหล1<ฮไjไ†T2†๘<ๆญร>(|}ฒฟพ5ฎผY๒€f>๏ ,(ฏh(4ฤช๒๋‡8โgBฌ ฑถฟ6ฤืๅj 7๋Tซ‚6~hมqูšฯFใบ`_ฎfY3ูlซ™๒72REjั/๔ลา ๐Hก๏|หuฎัรK&dห่ฬdๅ๖ภํฆSE›5๒2V›๑$jฒh^w3^7zชj|Ug8AYP”KเงBœ q.ฤ”๛๏ฑป,‰ใvต ๚ทM?ถcฎเ}(ะ}ญDอไˆœ4๑‡็x){ˆ๛๛q;uJ>ุตY๛2ทx‘ั4ๆธDฯฝ,ร{"ำfq๓.j”eAY๒EMอ#‹ฎ1[VkxqF๙b€2€&ศ>ฅญู’cYยท<วฃšZฉˆˆ๓uั}mฺาฦ=4Œฌ((ฏบ“•F? ส‚ฒ ผ€›ฏฦฌ6ฤ2ช๊ƒญฤ-ฝh6^๛›*-=ใLู2๒V*"ผ$CHส$Ÿ#c3{Aน๓3iAYP๎บLูห6Q$คขoวฅ ‹์ฐ็โTํ๙รฃsค “ฬ<ศื1oฺ811G๓6้#ๅ™aศtใm›ืฑูซw๎53”eAYP^Eฟ-ๅ(&ƒ!ฒ%j^หณR_ŠV59zืป“EiZช‘„ล4“,ธไql๔f!/gบ๎ฮNn”>ย}Žฅพyทำกํฑํ็ฏ „ฑึ}.p‚ฒ ,( สหๆฆ\฿€s\l%›ศฅง๚†ณ๐)฿N%งฏดHMจฮตc๛…=›ํgD”ัQUาย4jAYP”ๅ๖‚1^ฬหรรฮ๋ฬ€ํ˜8ŽzœXibR”ส†sฦCๆัฬฯbCๆ๙ห”ˆNพ”Qา<'‹ส‚ฒ ,(ฯฌ๓u|๛เƒ๏ิŽ*ฒ]*๊žถ1ใEผœ~ฬ"Rฐ59‚,;ื"M๙[ชl-ฎiถฑQ๑๓{ชfM-ส”eAYP^t=x>บ)@YVRž๖ๅง๛‹Œ7nกŽ3็\##œƒ <-L๋Mี1ฃAำฝ—‚ญโ›๗ฐ#าค_>2–ฌg6๛œิ!MYP”ๅEr>Uูเฑ!็ZžmแฮฺR~ษึขœuˆ €๗6(ฯย๚Zฑ€gƒO‘&RM!U}๔ฎฒ"ฤดc๘XLไตmฮdส ,Yส‚ฒ ,(฿{ฆlRGฮแอO’ฮjz๚ภpฒ)M้#^Œ‘อqฮ€N3ูolู™“Hศ„S‹ƒ}—fšzZส‚ฒ ,(ท•ฆl/Zž๛วF+š*ฒP ท9c*ŸหDัrษ|ซบ๖&™ํใ;>4"รeฤTฌ[งฮล๓9พq„ิะ/Ž๐ด”Vดb/vซถ ,(ท˜[q5๓R rdฦศ <vh f:rฎi„นฟ*'ธTkUึ็็ๅY)]ชฤ.†ฒษLKย-บป ส ซfฤช>ต่ภ”ๅถฑpขธช,.nงŽ[™™m‹p>๓๔ it๙ๅฒแ\vw ฆฒhฒZฎ ๔อsํ:Š์k็ฝ/„ๅy>พTภ”ๅถ“-ฌrฏ7น™z๑sšอืุHhฤๆ‘Lๅู,พฦ wcูา74f4ไœb ท์ืห#9ศ๓4ฟcAYP”kw๏—‚๒ฝ-๐Yๆiฃ™ณฯฆวnฝ_ฒ"š<™s\ฒFECdดdฌ่ศ,๒‘An$ๆ๏!Eฐ(h-ำ9ภZ๕„eมฑ6ผ‚ฒ ,(—โำ๎O…”[rฅp^ื-&6‡Œ9ึisC{สlึ๊”su<`ต็ไxแฑ์แ‰ษผ/kKI&€ปไFQw์#C็šู|[ฯ^/ฺภ—Qข”ๅ–ด๓E๖–^t(?โbˆโ q2ฤ็ๅ…ห”๗EF@V‘$ฎoฃฮ-ล>ฦVŽๆgไ‘a๛ูy1บhํฉŒธะ–/Lึ^=>๎Yฒ ผ@P^#ล†r+๏s‘ณ้ล—/ย๖๓!พb2ฤJS^8M9๎n‹ํ5”ทสE6$ˆวสบa๖็<'bรy3 Jม>5ถ‰ฒrซUๆq+cณ&๏ฉผDM ‚๒2By!2iA๙†8โ !~ปฬš‰ <๊ ผ†ฆพUิ๚๖…์ฝฃ[ใ†ต9{๚ ิ6s~KAึ็šM)ฑi$œ7gdY8pžOงโJ…rณ๚฿fฅh‚ฒ ์กงLคv๗$ฤNAนu๛๒7‚Eถธ[ฃwโbŸซ๙ตŽปLSˆySฯL๛ด*ฯsqบถ>€›ฬ6๎ส๓ '@hW•ิ๙eh—๎H(฿+ˆeAY%q‹ [pŸสŠ๕™ฺโQ@lฅh,&฿ŽŽีm8ญVxถ†xดมน,8ๅฬญ๚‚ฒบ*odEลษkา”eAyI3ๅB—[C์ฑh๒œฑ7ฤ…็CNน5Yvˆก๒๖พn…rjQม๋ยหีฟvlผฎASฦ–zฎอะ๓าRวฎฬตถ&tnฦA๑BEE๊๕นY~๎zแ๑?๑s˜X;ฃ ็1=ฺw ฺใน์g#79[”ๅฎ๒—B|ˆŸ(uโ!i‹ฯ]U+ๅ– ถGB'ึฌYณคบ๏f|9^๐ฒ!ฆภ6ฎR๐ถ•9{ฬWKุื'ฯง”ฬฺสึ ๋ฯSใฬKร‘;, f?Z2๒ฯ‰K่xน์~!?ิ–สK๕ป&( สห^}ถ—ู๕Wพถ’/ๆ; คj;2ไ0 บqน[l,๏›ฦPmสHOYถ0  q็๊‹‘ฺ‘แ›M›K^)g้๙}ฮฮZlRย—*ืหฝ6•1”) ส]“)&ฤใ!6†ุlัไ9 ๑\ˆ?‰๖?-๔ญ[ฮ?”…ส”ษ ‘,rญฮdร€–ส ๔aสอ๘z๛๙ษไ”่ฝ?œมW่ผ˜ฯ.j™ใ–k`้Aสsศฮษธอ >ทุg0๗€๖๛ฒ@)žA›๓aขt`pบSGeAนซ <โ฿…๘…ฒ๚ขˆ&ฯ๙ู|q&ฤ้2qˆeIทซ;YSๆ8ฬw€`nt“มืO๑๐F๐q็€ฏj…6XGฒ}2f๓ฅH5ŸTตaว&๖ถฯ,E๙€bฮ฿ใn.Ÿ=๔๘Ni”ๅฎ‚๒‘Z7ด: $tkI-จ วฦ็d˜[หVi@ฏ s ๏Œ68ฟYฦKl๗๖œง ื|†m‚ผŽ๘ตกฏg๔ž“๐มw>ฐ|›5ql๔fวkส‚r๛M7”›C๙_†๘รา๛โsตัgา‡_ภ๓์กกตํg'“ฐถล8๓ถ่œ๕มเ\ป฿ฌ๎ห' ืภJฆ[ๅCaYดฯmPฦ+™ƒฒ6_ีมd\ซผ'dล๖AีJ ~จ ส ๅvธฦJ€๒‡ธJmrY}ฑทYG฿J€ฒ-ๆJลpd3ุๅ)Yl I ็qt3ฐๆ2๊8N-ิY&luฮฉ McษŽ๓“ลฑO๕ ฯ๔ณฬณ&–/tc ,( ส ;๊vYsRI\ทCู€•ซ๕2วฤ.m/•5มิHบ๕+Rๅqq6์ฏƒ& P้ฮ;6zฃ0";ฏ๒ฯเZoESฆใŒ$Œา‹eAyEA๙จ/…+ง้f(็0lฟ•ˆ่โEพอง็ŽWB็f฿ๅ"3ถส ๋ถkfL๏ณaฺถ9วeีภๆ5ุ" ฯeฟušFอ}dซฒ œ๗ภmฏ๊ŒeAYP.ก|2ฑ๏L7B่า0AF๊kŠษŠ‘+ฟ๓f8ŸฏฤBF๑พน/ ษื=sxำ†ใl˜Rต7OฬBJษ62jรั๑dFฬ‡M•nๆฦB‚ฒ ผ2กถSށ๚๏eˆลhˆj]Vงฬˆ&?QฺCศฝี0๑ูฆMว`ฌฯฑ+e‰็ีล~ฮล ๅไ ใบผฯ•ฐ๑(iใ9 954•cER#ฅุGKธ—aบH7”uฎ†๒๗—o/—# ,VืบdกฯCฉชt [†๋ณSƒs์gอhสธฟm86ฒ\a2ˆ=ณ฿ื>วึœ๑ฤ้X3~๓ไDั๔(u‰n,(๋2$๊d(๛฿Sงฏห=เF5๙co ษม=7[ฯw๓yx๒!`# L’@v ›็๚hา\‹์/6ฦ€1์ัฃปTž”eAนำก์$R๕ฦ€ฐง4Šงœฬ๊z}v DYlxrึย๋ฝ๑ ฝิญฮRำห tึณลูลป‰l๖k๚6ฏ+ืฬbอ*ผ็.ฮ†e]CP๎†LูZ“ฦEีf(oูiช…zฯ…ฝ\ฉš?‡ี ณŸ*[(ฌส~y]V‘›4‚ฦผ๛ยdฝ9DP”eAน- lฮntฆPš8ึ—CB‘ฬฏx“๑G€<%Iี8ซไœ;ืฑGp๕๛†’uฯ|คฒ_คŒอeำ‡•ไ=น/ํ็CeAYP”ส ฃŸx! ˆMoไƒg<„ 4อ<&rส์งRโษม’๑"{Xง ็yƒ๘*ŸŒ\–m@n๗4^ฬ๐Kษ"+TO”eAนก ฌzมƒ–๛ฯ5ย ุ6f:๑^;>V/o‹หฮ ฎOLM#O๎ฝ\dๅ`ฺฮx9ก$7/eฝน๗โTƒ๙–า4ทpูฅ!‚ฒ ,(w"”‹Zไย”Œ“+RNjdนฏOJ^’จ*M3YƒLs"ฒio๋ฮ…ฦปSล๔คŽM%ะฏ|ฟ๐?ถ:N[Vฆ,( ส‚r[@้‚ฌ2Wšถ๕lี„•ŸYfxkฏฮIT[ุ1ฑdAรHสv“๎—4eAYP”สVi‘*Y`ภ.%E6fแyธ!;ไสฮb#กหฮใใ…$`ํz9^ร6=ุ๓XaZ?Qx.#yl‹>L์๙ฮ^/4i_!‚ฒ ,(w”ญ&นี8“"cg4Ba<ู้๊‰ษพัฃ้ะ๓็สI~ฺ‡eึv๛bgM””eAYPž›)วmะน8“"rqููหGฦŠส ฟ๘0นeแฯizฑดhฟ๖ฎn๑ฤ‹ฅhฌI{Kฯ=‘ —)eAYPฎฅฝ3ž 1โœท:ฤฮrš5ท๗-ฅฆl>€j ฦzN]อถWWeฏ”ฤm.ป่Xˆฃิ.ื^อ1ี?>zซ Lฒx%€_d+gๆ|xŸŒุžสd nฉฟFถ”eAYPŽกsๅUๅu!ึ–_ฏ ๑ตฺื)“1ŸปU—šU]ลุHศ:๛ฬPสัrrUญ1๕ฬฏญ?ึ3O ๎M}k%นฝ ส‚ฒ |`tๅK!,ฟ~๛K‡B›๑F7 „Œ7.‘C2 ณeBฐต:ฟK๕ๅh@vฯลฉข$‰Lัx์๘ๆกอย๙jไผ+ธ.บx\๏ฬ๓GV–/ฒ ,( ส ๅ๗ขว๏,ลŠŸ$B๘ถd ˜‘ จฬุRN1ฐ=u`ธ€/ฦิ&๛Fญ!SsqrNgทœ =€[ช-˜‰—งMญๆ9œ7—นS๚ฦ{๐/&oเ‘กFAYP” สa{$ฤqbอš5 ฆ%{๙!๎ิ#›ฅ๛ฮ4็ฌุTQ0†i}iv฿srผ'c™ธสdา™[“+y3A‡`<ซ๓z๙ฃ•เ1Aไฏ๊s\ํ|๔ปZeeส๓๛CYศ฿5AYP–|ั™ฒ‘›ภแk‚Ÿ98\;84S๚คf็๕œพ 8]ฏจ0’้žปYท๙ฬ้มhว–‘\ู๋CฃE–อ~มy๙„…ป‰;๏ฯฉ ‰ฯฯยž4eeส‚ฒ |7P~4Z่[ทX(ฉ ู/=™ะ’ษdจ%>ฬแ$@๕รOฉฎ ‘ƒว˜0’ซไุP๚)ง&“`ˆ๔Rh‘•ฃe๗โณท&CTo๒๎wปหj AYP”edฦHM†๘vˆซ!~3ฤ!v—%qป[-uท(นฌาš.ถM—ฐ‘ วฮpe‰gŽ0๖๛ศšs™20ฏjฟถ฿์ฎNตiู^†0}œ‘N`ทใมฎส–eAYP^ฒๆฟp—kฮ้ฏ6#gป™ซ'^yงฬŠfฝ”5eเํ‚ช๊ขํผ่ศ˜ ล๏3•1็ฬŒค+ ส‚ฒ ผจPnus.S2d๔๖e –“/,ำMtฎ๑ฤฌ๓[€3:2ๅsŒ’ฒฌบY+5ฑใdQKOืๆั"CพำะZซ“V† ,(7ฦช๛?YLr๑ล‡”[Cnฑ&7o4ใMe+tQๅXะหอุKYxฆ๖Q‘‘š2ค*{ฉษ ~้ฑ….๘Ž‹ูiื9ฒ๊”ๅ…} ๗˜Iฏ,(็d ๛฿ซืฐQๅ€5ฆŸ:ํk~mBHฮl(๘ธฌ๐"9ผ1 b๔฿E1)œcืปSs1โŠ|SูตตcKS”eAน-2eดW$:๑81ั05คJz~@=†~ภึ:๓8ฯฮ๓v๚‘1๏ p๒ำI`ฦูฌอใ+ใœŒpjล%.ฮ œ‹ล?Yu ส‚ฒ ผฌšฒD๒0kfน™๚wŸs ?ะ}GFk5ล=งฦCF;ูk2cœแผ*น#eมIถ;ฟๅ`{^๐โ‹” สบ† ผ,ี”‘RAมขืษฑ[ el๘Tค2aบใฌkฯCถ๚ฅ๔a*!rŸ9ฃกญ5า!ƒS฿šใ.—“Nจ=ๆ?฿ €๙>+3”eAนํ2e3๔1ภๆช+8ฮ$‡#ฃ7๋~นn<ŸหbษคใE=`ฟแ่•"“ฮ%์&nื5p>Xจ;ฎ๊ฬณ2@XP”ๅŽะ”ฝ\tฉ+Ž๋อrภา่1งt-n)f๔•ฎmน๊๋๎‹งI๓ุs‡F‹วXhœใeqlผถ็ยคเ+( ส‚r๗V_๘L–ฒถBช8rฅะˆ7”ีภ2๖พ ฃEJx1Qฆ6ซำ^KN,ฑŒธx6๖3๕>ฌYฆฎ๘F}Aะ;ป๑แ ๒5AYP”ป>SŠ;ฮMเ#3N˜>{xคhi&ฃฮMธฆKฎopบศฆั{:ืม!๎้r๒4๗™ท—*ใ1๔เx6ฏ1ๅ›ฌFAYP”ปBS๖5ร&]ไ:๓e~วG†œ๊†œ~ชมœ็›U็\{อฑ9%r[Kc}ฦuŽ๓(S”eAนซผ/ศ,qCฃ‚มt]` DSyqล…UTฬgย52HฮThง3$2๘ๆ> อ4,VJ?”eAน+ฆY{HL)ค†TyY\ึf:tซฎ๑†ฃำว‹‚ฉฺd฿ฐโƒ์ฒjส(oำ&( ส‚rW@ูo”•ูค8}-ช]๖ฒี9ษ<=;04]๘งฒhฎร:Wว|l๔VQง AYP”ปฺบำsi;|yฆะ…ษV๙šึ่XฆˆG4ุ'๖ fๅ •2"Šฯค๑นˆKโ>ฌ์Pž ,( ส]lIฤv—@ด]Jไดœ๊ 2`ภi%mน๒7๎rM$V:—ฒ ฅา_ k‰๖ญาhโฺeAYP๎J๋ฮcdห6๘{เh“ซื๏ปœlœท๑ฤDแ‹์)rSฉSฐ&cว.'‰0๑Z› ,( ส]ื<ย"_•ญ'ศ €™,8ฎผHeวฬศฃๆ™ล=๓6ฮ6‘ŒศึOsŽ*_ meLัซLำW=๐)SPVฆ์๓ƒE๗]œ*ไŒธLฮฒใืŽอfทฑ๕'P็ภN1ำDฌ3}ฑ4b&D•'[J ซ{ๆ|ผ&mๅ{อ„eAนk5ๅVFEฑ่๋‡๑66Xšๆ[ๅ GPaฯฑJ‹๕‰๓ฬใ์€๏พ0ฉR8AYP”ปซy$ฎพˆ3{K_ฉaesสชˆTณHฎพ่R‘้D6gหO^Mz;ฌU!( ส‚๒ฒGุ~)ฤฅ—Cฌ]ฌ?”TๆŒถึภ๕9r6ลšฌปฮท2S@ะ‡96งฏ/Ÿoู2P{3‹|ฆgซฝZP”ๅๅ๒วB ‡๘ั฿b ฤgใ%ฅ1งd แะx๑ฯฐฌ6Uรฬcฯ- œ‚์ฦ“ล๓=^eoUcždD$( ส‚๒rB๙ !ถป๛_%ใ%U‘›อวข^,3˜ \ฌนY9mฤผ1(งหiถจLYP”ๅๅ†๒ฏ…xสO,Uฆ์Kำ|k๕ฑr๑ฏภอทbCิฅว()›dmCQYP๔R soูDb@–ฆ,( ส‚๒rC๙ืPztฬ#!ŽkึฌYpMyK9-$ฎˆ ‘#6,Jeฤดq“E3#ฑS'Foี^้m่ึKeภš,า^P^จ฿5AYP–|10ว$ผŸŠ†aช‘2ฦ๙ๆl๛X8๔P•+S0ๅN‚๒w… ๑ทะ๗ใต%n}๕f2\€๋็ํ!g๔ NีNฎ๛Vไส&Rp•y’6AYP”ฬ‡,ซ0~ฟถ ~ฑœ3ฎ?:rณฉu๗ๅtๆTEซอ,ฺeAYP๎Z?ๅปี›S#กผa 'ๆตœkๅneกQี‚ฒ ,( ส-่องว๏de ๑ิa.๛ญ2Oา&( ส‚ฒ <ฯJถqส็X(lฆ+S”LAYPn1;Nuกหีค) สฆ ,(ท$U—,( ˜‚ฒ bล…ไAYP”ๅe„ฒeAYPnๅ˜U๗ฒr’ “feeสฺ–ส‹=๎IP๎ฌืะ$“”ต๐ฆmฑกผุ™ฐ ,(ฏ๘๊ -ผ ส‚ฒ ,(ทqฒ6AYP”eAY› ,( ส‚ฒ6AYP”ๅปw”ปa&ไW:๐5w๛{ถDฟkW๔s_๑๏q[WCนC?HŽ๋=๊gฏ๗ง๗((๋W๏Q฿ฝ?AYก_\… ฌ๗((ท๗๕ฝG์๕๔e…Bกhgเ๋› P(‚๒Jื๕—B\ q9ฤฺ.yO?boˆ !ฮ‡๘r๊;C •ท๗้gฏŸ}‡พฯ…8โญล~ๅา`™ฮฃ!พ;ฤ@ˆฯvม๛z0ฤ็สฏฟทœBู๋ >†๘š~๖๚ูw่๛Jˆ—”ํ –K๛ƒBˆํ๎W‰.|Ÿ=!~ฑฬ tผ—๔ณืฯพ฿ำ…ุโ‹ส‹๖หฅแZˆง฿๑D—ฝวO‡๑}!‹ปฃŸฝ~๖๘พ^๑๙?๏ ผh๏Oฐ\ฺ๎ฏ'0ฟE๏oUˆ!~eฑq๕ณืฯ~‰ื—B|ฃZPึฟฐ๕>ฮ{C{s๛$_่g฿ั?๛ฐqˆซฅืลTˆโษ๓‡๙]!FB|ฦ-๖xผฏ†x.ฤŸD๛Cึ้gฏŸ}ฟWŸ)/ฺ๛,—๛pนBอJ๏wษ{๚ู|q&ฤ้2xŸ๗— $Cๅํj์๕ณ๏(/ฺ๛( …Bอ# …Bก” …BPV( … ฌP(‚ฒBกP(ๅ•[b๕!ํ<Ÿ๓…๘?๕S,ึ๏˜BP^ษ0x œำ๗Bก฿1AYั0ฏ„๘ซฒ`Xˆ!^-sˆ#ฤัgC๒91ฤ๏•_๏รrฐ<†็๏๚พ*เw์—C)‡w…๘dนฯBA๙๕C!๚B }Ÿๅฎฬbสnฃ๗ส>ฟโZˆ?*๛kƒM@๙1ืmถK฿WลŽG๛u๙๕ฟvฟc฿SแB้๑w๔=”ปfง{Œ,ไ–_ใป)e;ๆ“Lฤะ๗Uฑฟc/ฤŽ2{พsท฿ ๑๋๛+(ฏ„?˜ทc๗$z๖c(1 NX๚พ*เwŒิํ฿็ž๓!ฆq_ำ๗WP๎ฦ?LQฦeE›Žก%พ๚/ สa๛‘R‹_K๚ง๕=”ป๑†นa็สEAYัฟcฌด)=PZ\๎+->wน ๚๓ฅผ๑ }e…Bก” …B!(+ … ฌP( AYกP(e…BกPส …B!(+ …BPV(Š?อœ*XขakEIENDฎB`‚xarray-2025.12.0/doc/_static/thumbnails/visualization_gallery.png000066400000000000000000001132451511464676000250400ustar00rootroot00000000000000‰PNG  IHDR5จพ˜9tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/ศ—ทœ pHYs  šœ–IDATxฺํ˜Uึ†Erฮa` CFฬจHฎYิีeอขkภฌ .‚J†Y]๓šuYรบ*ๆ„#J‚d F\คซ๎_น์้๊sฯิญฉžะ๊๋็yกงบบบบบ๚ทosฯ^ž็ํPมAคRฉ€ิRฉQ?”ฝ๖:›๘ โบหˆexj/?,โ๋๖'6แ ๒wฆa๎T)`ฝ?o”ยญ'ŽŠธ๎‰ฤFb'ัปˆฏ๛.q>ฮH (ย–nฟ'ถ‡—4”้๚#๑iAAต0R“J@ œว’3ˆน MผJ๔+.ฉ)+1ฒ€uืวำ๋๚RSึใ-€ิ”ษ/,ฮ"~ *Ni(Rs#๑๗bz]H ˆcนŠ๘Ž8‰จMT%Ž%ฆฦLj~#:Bjคฆ”ฟฐฤpb ัWๅีŒeแย€s(_Gฎฤ7ไ†ผฅCjฬ๔h#HฤDโ~,‡ทำR|ษ;๐~a_fบ=ษฦ์k7ฃฤีๆํžcั‡]WGp1qชฆฤlโถTฅ=5 &1d0๗PT Y็ ๓='2๙{eโสบ1็๏ช๙a’รeˆ๏i*R๓ ‰‘๒2^2ฒแุฯ\‡Cy?๓๘ฝUะ๛pฤ†SL<ใ8…'#ฉAO คค.5นgeoฑผ9๑3ˆV,;x' ภT"6˜@ภ_`z:—ธฬ๙-D~2๑๗UMแ=Tๆž–ฮbูฑ&˜ผฏžc‚ัMAม%`๛' 58๐~Sภ:หeฏ+หสฏ,2Zjถ'หุSฉ1=ม3๒‘โ{c~ ‰ฟksO๎Qฝ ุฐ…‰17H (9ฉ9ƒฟฌ‰–๘าษ6‘ŸeA_2บM!๎แ๛๏˜ห8!R๖E6๛๒V~2ัkSภ{hมจ-–](เu˜ไ{ู)๖W๏S3~ํอžอบ!5บงf7—ไwะ๔ุถ ˆƒ๘RฑY็ๅฤ•ฅฦฤ„๛‰ฏ๘๗น฿หฝz_~?ช ๗แˆ gฒศ$ึ-!5PฒcjL้*ัะg๐%ค*Qโš๔Eโ{ข-๑Fjฤ๒z…x!{jฦ‹hz˜ yพง๙ต‹ž)*ฆ น‡๘๛ตฉYฉ1S34d3ŽํเT ๓T ำฝฌJjš๒s๊‰๕Wฅ@๓ฅ๋Ÿฃ เ5ฝทชงฆ–๊ฉqพ-5 รฏ_Y๔ิ$DๅZโY๑Bคๆ,H คคžิšแฟˆมพทณ`์อhsrเpๆึs!ฏแ”Ss_ซฎฦ=GD|Oq๏J-พคตIภบ<๐๏œ•aุื ภs์ำำ+ฏ2’P‰Š๙{?>˜ๅฯ%5ๆ๚๛pœk ูO฿๒€Zbzp๙๑+น‘n+คไ๘€`.{ว—}๖ๆ๏:~H™๏๘ล=<—ุœ/ฦฒd๒ฎอŠ๐>บฒ ๕ใ4Mฉqพฉษe™สแ}<‡ท•ุว<ถฏ ‹แฬฉฬqดฮ7H Hmžš,T;‘ฟh๗p๐0Y ฬ<6!R๓gRžRHฉ้มƒ‘wp–าKbะ๐!&ุ„ผฆผพ+๛)‡ปฒฟ็ฌ“ิหฑO&ฐอใเf~Y]ญDฅ/wi๏เ ง'Bคๆxodบžฏม9*๘ุšน“๙ ฿YCWqOวฮš2Dึา6nุsR3„„mใ๗„Lแหว;๙ต†‹็™๙snyg๑wึ•๘>—Ÿฦs,ยƒŽ฿“ฝItป‹๗5EtIM5>ž{ถ…๓ Rฉ€ิ@jฉ€ิŽชUjyu๋ดฺCบ‚z%€ุ~]MIห$ตต’ิซ•a!ณžSวฦฺง๚6ตf&idS+ิŽH่v‡๒Zu$ัŸ“uฬซ7ORฅฉอ|๊๎ี0๒9{จ$Iีf65Z๘XŸ;aฒลขž๛ื๒๖้Q‰™ฯญ`*ืฌ็ีสศCํ–‚V,๊d†ะ*ฝ นZšŒ$5[tดจั\ะLาA!kn#ทoฟฏ‹๚m:๛4lgำ(ซ‹Oใ Q1 ทืคฝนž~Ÿu[็๘X็!wีFํ,*ืk้Sฉvณ$5ปฉีฤF<ฏr>Uดถจึ8หว>':ฆG ™{ี๐š์U-ŠGสอމ8ข„=๔0ษ็ฐม“-=:"C’6h’…แGLดH์ƒแศCn๕9๊ภ[,์;ฮg`฿ฑ๒1๙œ#oัเ$ŸCŽ›bqะะi>œnณ฿ฐ้IX sณข‘๏นb๛๛žๅFฎง฿gฟฆ๚~ิD yผg_—คูEkั็จฝOตฉ44‰X.Ÿcิ๘Ÿม™#l:๔‘Ÿปมค่F=๛ธ๚u'ฉl+ฮ˜F~Ÿ1ฏํa[๔9xา‡N{วIฟ)orะฤYr๛๛{ร"ฑ†žฃ^ถ่vํK>นWพเำ๙Š™๒1๙CŸ_๕‘๛ิ/๏Ys฿G>ง><วbุใŸ๙œ๛ฤBqึŒyNไ๖ ‹|ญ žZเDฎง฿็‘wผ๏#?3ƒ<™รถh8hœOอ.๓ฉฺ๋'ี๚œo!ŸWศั>อOบอขอ93|ไ็nH๕ป฿”ไๅโJmฉqRฉิ” ฉฉๆU–“‚ถล…อC‹x qผผO๊ธŠo(ž3Š็๘0๓† ‚ิ@j 5ๅ_jš‘ิ\ถwป@ 5H ค&-Rำ›คf็WmœDSต฿73าฮแฉใง$ช*›‰ษbฦึE<uO„VRฉิ”oฉiNRsu•ฌ@ 5H ค&-RำซGUo๋ๆL'ฉl‹งฺŸO์ฯฝ0ผฬ8๛…่ฅ%ž๓:q คRฉ)฿RำขR5oTต๖@j 5HMZคฆ'Iอ–อญœDูืศYศำ'zdถฉuถ๒wรฤ๒รŠ*Bj 5š๒!5$57Voค&˜  „„r์'‡“DสŽร‡OHา_กไย…%5baIMŸ1ƒz9๛d1เ€›}Ž๚ต]ฒฉ?O).๒ต RpดD๐๛i>๒}i –Ÿแภ^c,wบ>I๋ห“ดธุM˜ิˆฯำJ้AR๓๕ฆ '\Cgฎ`xˆ4 !บ…Hอ]Rsryx&K)!!‡ๅฝ๋ฃ{)1Zxdฃืw์๋>RT BJGฏ^ฑ่qห>ฒ๑๊t้s/~ึIฮˆ็}ค์่ื’ขฅฅLพg-p๒๘ศใ6๐ฎ,ค4ว'=๘‰\Ooใˆg๛„ ฆ~L~พR\ไki™ำา$ำ๛%<ฆ๚sj}ึc>-†aั๘˜‰„IุTEคeฅ๊ธิ@j 5šดHM๗Uผ ›Z8I5ัํ&S4n—Ÿ 5šธKMซฝซ{ku $๊ถธืืa~ฉ „H คRฉษ/5ซx๋6ถpa ฐฉด€๏ื$'Ž!ฆชยS๘~W5PxmE( ฉิ@jช{Sjg’‚ิ˜j๊R˜pฉิ@j 5t๋^ี[น1รIฉ้มฟฌK‰1ผผ11‹a™‰็Œๆฌ'ำ›3ค"คtCj 5q—šึ$5ทีํHฤฑy™+ŽRุใ ฉิ@j 5t%ฉYถกฅLพฉิ@jขะฆru๏๎๚9D”šgˆ}ˆBjวๆAj 5HM น$5‹7ดrฉิ@j 5Qฅๆพ9”pภ—ฌ๏ๆ๛๑“บๅp i‚ํฤ…Td๊๕={z>t๚ฑี˜‰V7ชฎ๎=i"5X?fฅซาBฌ—/-\ˆัQ฿šD‰‘…\๏เ[ญฦX ฤ1)r„G-<ี'_สธ8๒ณุ็\›พ็$ัŸ“”™^ฏ๗ภใ|๔12คSม{Œศ๓้zM’—ศ็ศ0XiแฏMา๊2)5R~ˆAn๐ัว?•`ิ…คf—ญTdฉ)ฮ8าด}ฎwั?ๆCงหW7fฒฑtฅp„K4r=.ลHง…K1’่๕ดไธฏe2'๗Qร{>๔ัฉิ2อZ~—<ณศ‰N—rขลK๎ืซ_๔ัว@АNŸ>{ตฯsึ[ศวไs๔~ศด๐Vg<`ั„~P&Rฃๅง…ฯ๘ศcoH๕ป฿ฎr ๏แF‰p{"ฑ‰ๅ็b7๑๗X^~โัาๆ ด-ฬ "H ค&ฮRำน{5๏“๕ํœฤฅงฆจqRฉ‰ปิdUฉแอhา%'๑”=5 ]jvPคRgฉษ!ฉ๙p}–“IM‘โคRwฉiOR๓dำ.Ajœ Yj".-์๕7H ค&ฮRำฉ{u๏uูNb$5EŠ#HMฅฆIอณอsฉ๐“๏ัํค!U#ถอS FfRbภRตฺ !5šุJMvทk;;‰ƒิGฉำ$Rฉ‰ติtฌZร{1ฃ[ qš๘—ัร!ฌ‰ŒŽ'ใ๒คR“ขิผถถ‹“˜HM‘ใzj 5ššK™‰ƒิ=B ‰ฒฮ“ฤ9โ๏”Uk“้ตฝk๊ธ'„ป“ดปcšOึmำ-ฺOMาqBžEง›CLฮM6]nHาํJ›%‘ ฝ ƒ+=}OŠบHแึลญฦSฆ ‡/๙Z25 …D๎Ÿ๓๓,z^’DJ†A™Šญำำu1P‰^็Yd฿š$+oบค„้ใcฅt tjถ”ม}ฯœn!A๎๕6ฉŽjz/ฌ้๎$&RSไ8าญgooีw๗ฐ๖{7k+พัb้WIๆnุ๊๓๎๊๏-\๙ฯ+หฟ‰ฬฬe_๛<ฑp“ฯฏณส็>บฑ—2!%CKำใ฿๒ัา$N-5™ชฎท!_Kฆfdzท๗k_Xjq๋[_๘Lyw•ล˜ื–๛่tl๙t1P‰qoฌฐ˜ต๊;Ÿล_mณŸูˆg๛่ใ#Sบ525[~N๚๓”วเ‘น,R๎gWซ้ฝฺพW จ-ีโŸ๚bYสƒŠ 5š8KM’šญ๎ๅคขKMqลH ค&๖RSฃ–๗F็>ฤJj่vqqf‚t๎(คRgฉi฿ญ–๗ไชพN0๙^ฤrHMฬฅฆSอZฌ๎๛ฉกใฤGf&AโๆฏH ค&=R“ER3cี~N 5H ค&ชิผkฟ@โ$5ห‰JฅนฃHMœฅฆ]ทฺ#+tฉิ@j 5‘ๆผชU{oฟ‰“ิ3‘eฉิ@jา/5mปึ๑๎ขŸH คRฉ‰$5ต๋x/8d?ฝHผ@ผcๆ€ ^็ฟ๗ฮญั!ร๋์ุ|ไkœ“N‚ŽO฿b‘๕๑>m›h๓่คิyhฒอ}S|ฺuš…จ.ฃ’๔ธฬF6ธ๛ ณ‘E%ต ษB•RNtJฑ3๕›i์Zj๖?cš•า}^:[[ถ๏ฒ๘๎ว$฿ni๖ุื‚๕[v๘,๛๚G‹ูkถ๘ฬ˜ฟัB6ฒมี ฟL]—"d…*ต Iแ K–bกฅๆ๔G?๕‘)W>ฟฤ‰|/๙<น=ƒ|-นบpง<R" 6l๚๗N‹ืเ#๗Q/•โขงr"_kฮ;v%ูฎHYj๊ิ๑><๔@โ 5‡…ฉิ@jา#5ญปึ๕๎X~ธH คRฉ‰TGฎn๏“ฃ๚งหO“ฃ,ƒิ@j 5%#5™$5yŸpฉิ@j 5Q่Rฏฎ๗ู#‰“ิฬXถRฉิคKj๊yS?่Rฉิ@j"IMบcŽ $—ŸD,!v‰ฌ‹2“0คRฉ)ฉiER3aู'H คRIjิ๓œ408HM}ข๑ัV๖ฒโHMœฅฆen}o’c@j 5HMrึ๓~H ฑธDทฝ‰ฅฅฝฃีฒZ๚Aปว'๘ไ“‹‡R26ลF4DZ:ฒnOข“ฯk{o’6Lถh๛ศ$นฟ{๖Y”lp๕ke฿’็#‹2dJด.ๆ(‹z๖;aช.ุ่LWล.:อBช”)ห=/ต้}a๙ƒ_+ \mCฆยk™ศ›Dง์หsBŠD›๛งXศcฌS๏ป]•คใฤ$mhัใล}๚ผrƒEท™c|ดtง@2HjnZrœHM4z๗๎ํ7?x$’ab๑ีV›/ุแ#"-’ๅ฿ุศ็Iq1l!‰”นฟฝ๒ตdjน.ส(Sขu1G)2]฿ ฯ -๒s’ฉ฿๒ต ๗~ฒ็รu[,คg๋ทIพ฿hณํ{-)KMฃz’3Ž$Ncjfm 5HM้HM‹ ‹NtฉิTฉ‘Bฉ)~ฉฺ้ธพ๗๙ูว'ฉy›ุมEใJežH ค&ฮRำ<ทกwํยกN 5H ค&’ิ4iเญธเฤ@โ$5ฅ>O คRwฉนzแฉN 5H ค&าฌฺMx+/:9ธU้nNร4รๅ'H ค&}RำฌKCoฤ฿;ิ@j 5šHRำฌกทzฤiฤฉงๆTโKโQโ1N้ ฉิ@jา#5Mป4๒4๏N 5H ค&’ิ4oไญฝzX q’šEฒw†nMอฒt๎h๕-๔์์gn๖้คRทปทฯ๙ๅ๋ี๙aณn e#(Xv.ŸฃA๙^ดฐIษ‘E1๏x…Lฅึย#๙า2ฝž”2.%Iฺtฅf๋to)8บ~”Lฅ~e๙7>‹6oณRฃSใฅTJ5์ฺ“O˜t„ษง<ไ9๗หw_ZไK๑ค,5ญ›{[๎ธ&TทEท™ฤ€ย|K[j>,B@zŸศแ๛cyะ๑TeuSJZj\BS–คFฟHMู”yคKj๊“ิ๗%NŠะml.ํ|B,ไK4๛‰็Œโ4&` *eฑ)–8wฉัvY”šฐ๙f 5E—š^mZxพ๛๚@ฬ‡ฤฅZfxศwฒฑวผฅkZฺRcบ{Ÿ"N็KQ{Hแzธ98‹‰็อต6ข1ื‘Zล7‚ิ@j 5n๊ๅ4๓Ž~๏2'E่6~ƒยห&ๅ๛นZN{๋j-5Rxฟ7ยB>ถ๏ซฃ|๔3Y3ฦ๛ศFี ็F‘‚ฃ็8้๙า>๙ๆฐนgชOฮ˜<‹#’„Uัถๆ‘๙“”-+๛ž)ฒฃฅIฮ?ฃ_Oฮซ#็บั๋u•${ผU•๛พ)Zr|ูQ๓ ษฯIฯ?#็’2ขฯ%yN่ผ๒9โํ+-R ตณ›็{]IบM6Tพค้ลแ๛]ี@แตฅ9PธุคฆWOw%dฟ|ปฮยœอ+’hฉ‘๒ณ~ก…|L7`ฒ”๓Ÿ่FU>ฆลศœํฃ+<ฏ>ษณKพฒ˜๒๎*- R,ไr=Œ”-+็>1฿GสŽ|ŽAฮ=ฃฅFnOล”๛5cFŸwWo!ซr๋Š้z‰œOH~๚‡U฿–ข+๙ทu l็Ž!eฉ้้ํ~./HM{j 5šxKM ๏€ืฏwR„nใ~|9สฬb๑œัœ๕๔E"C Rฉิ”sฉฺุ้โ@j’ม๏H คRSrRS‹คFžošŠ^ะRฉิิ๔ษnใ๔๊}ฤฉJ๗ฮ”จสƒ€ถศA„้Rฉ‰ติtlQlใs 5HMœฅฆญ๗ำ๋'ฉYศŸH<สลซAj 5š๔HMM’=@Yฉิ@j 5‘พฺy?ฯz48Iอ2~b0฿‡ิ@j 5i“š k4H คR้;“ๅg๖ฤIj&+ฬ€`พีิ *L็Žš๎๗„ฌH™โb8่๋|dสk˜ธhไLญบ‘’๕vไrะษิ`ูpฌF๖ฉ>บาด|Žฎ๔-f]ม;๗๚$ab!ค๏9ำ-ฌ4๎3m๖๛c’พg'ัฯฝ.IืkmคlIมัr%Ÿำ้f›๖ำฆ๛ดฝsš+ฝฎพ-ำปe ทAŠŒ<ฏดœศย’g|rพลูŸžํsๆœs-R fJฝHMฤ€ณ‡ŸBkษ„—_พY๋“/ฅ%.ฒQา่Fjํผ$๊1ูะษิ`ูpdึ‡ษl˜u๏Gๆn๐ัฐฅ0Hนไ™E:["ฅๆข.๔ัโ"๗ใมฯพด๛กฅIส•|ฮk+พตXฐiซฯสoท[„ ŽLฏทาปuJทCb ๒๙๏šฯ|~[๖Žลฏ _๗๙mษ[)KM็๖>z&X ๆZ+•๙~mS9RฉิคOj๔<:H คRฉ‰&5ผ_>y.8๕ิิ"n$ฦgว@j 5š๔IM็gว:ิ@j 5šH฿.ฝ_็พHœคฦT่พŽXสืL †ิ@j 5%/5ี;ดฬ7ฎJฉิ@j 5‘พนูึ๖$q’šนz’ฝt†ิ@jb-5ํ[zูฯ์Rฉิ@jขฐOnง|(์ถสณิ|ฤฝ3๓๙๏ฤงH ค&}Rฃฯ คRฉิD’šฎ9oหg'ฉ@ผG|Oฬ ึ'๊ลDxฎYw Oหž่๑1๓ผษ=อ ;ฃช–)a#SmตธธRrรึ•"d๛คUหย”ํ:อ'+oบ…|ฌอS,ฌš›bัaสtŸ.ฃ๓|บ_a# Z๊ข•RVz_”g!Sฦฅ4…ฃ”๛dฉูน#“tปสF๎ฟ>>–>6ั‰Lงื2 S่๕/Rdไ9๐๛/ฐธเณ3}ฎXpšลต ‡๚\ฝ๐T‹TH5’š๖O๊คขKMqล‘>=ป็Kก’งฤ–ฌศTm-.Ž”ื—๛๔ำ๎ฒ๐โ๒o~ด—}ฃฯ—?์ฐล3๕cRpžZดูโถึ๘ศ‚–Z\คฌŒymน…L—ยtœ๕ฒฅ/™š-ำถ ฒ ฅw]ะRŠŠ,`ฉั)๕ฮzYคาศณLึ็ 9ถEg%ฮ฿}Šš†ฝOทฮW}HฒŸLi๑฿™ยD“ƒQ“€ŠG๒‘ฤdH คR"5Y$5OŒwฉ)rิ@j 5$5ซ็งžSแw1†nC์W„`d ไe๐ ๓7คRฉ —}SSฉI9Ž@j 5š.๖ๅOAœคๆโ.bน˜ณๆณˆฯ]gฦโp%เแผl›ZgซในรอA6TkVRฉ‰ฑิด๒ฺ=>มI คฆXโH›ฬVHMผฅฆ{n`โ$5๓ ›Dท–3๓โะจม=5H KMปVก๏1RS,q=5HMฎs@{œคfŽ™MXศMS)8)ฆฑฤ5ธฉิBjTๆ–$NูOE‰#HMฅฆOฎ๙๋”1q’š?/›ˆ๑LN‰๐อยJีVRะ๎Ži>Zjdjsว y>oด้zMKT —%‘E% r›:\bI™.,)%L.ํ…ิ@jโ.5zrF &฿K= Cj 5ฑ”s VO6ษฤIjฦ'›ิnH คRSJ=5๚s@j 5HMTฉ‘ณKโ$5;ˆ#~!ถ๓฿!5HMšคฆM+ซ๔ƒRฉิ@jR-ข‰ีŒยธฉิ”ฆิdzm๏™๊Rฉิ@j"}z๕ฬท โิSำ'SิฒJบvดnงfุ้ZVdกAนคd!‹๛ไผศHม‘โขท/ืำล4eใ+mK 0t˜”็“s“,์˜ฏฑ”Yา}kžO—ld!ษœฑ6Rผฌ4s•‘#Sฉณ1ยJI๛ซลNพ/๙^๖ศœx-] ฉ๓ณc}คจ๔|ษFฆpK‰1ศฯ๓ยนร|ฎ_tฒลธ%ว๚บ๔w๒ฑŸ`‘ฒิ5ี ค&b@๏ž›,@)Skี|N‰!ได๒บ๐ …ฃฒม%8{ ŸXOห•l|uAK)ฎโ3sูื2]z๑W,ึ|ฟGสa๎†ญ>ณV}็๓ฤยMฒไ‹Ÿc!ลk#IIYdำ ำจท๏ฺm!Sาๅไ๖ๅ๛า๏Eพึฺฆไ็|๒IภฟฟN"z;ด|สฯSฆmdชถ%/ฏg#ฅๆอ‡, #5บ่f‚(โฬC“ฝ:‘}XŒI๛C–G์SœR๓ _zšว˜๛Ÿk‰H คฆ„ฅฆufพ๗/ิ@j 5šH฿ฝฌ๗%)h[<_ฮHฌฦ‰นล(5๏ํ–w$.Nฉy’่*ฮ%ๆ7ถRฉิคAjิ๙!)-ฉก[fศcวBj 5šฒ&5ฝํKg‚Rs ๑บ๘{”กใษ’วงิ,t-ƒิ@j 5้‘š|วFPŠR๓…ใ—ีนๆคRฉ){Rฃว%เยฑsรี๗z(๑€๘๛ฤลOVๆฑยHอS\ิ๒0ๆnโiขzิย–H คฆR“™™๏s–”ขิอ“฿eซ_oKยzq 5HM้HM๏}๒ ˆNกงๆ”ฉนฃใษฝ\ต Rภด2+NฉฉI\M)ๅd๒ฒมท-?า'๏๓rฝ ห†Xค*5ZJ%ฅ=P˜nˆ-\#ฎFY(ผOื_BSฎ…ธไKีr"SruB๙ุ/Ÿฮด [˜บ๐๕$,c ถํํ#ำฏ ฒA—ขฅFฆ=K๙1ฌ๚nปN๗–ษ๔h2.Sฌๅ>djถ|/2zO*๕ฎ>๚ฑฐ็ษ4kWถม’5ณศฉAL`‰ฏ8ฏ ฒ๘คNฯโIžหK2๓vฑO/฿m‘๊wฟI2%Zง้Jฑป๔ ๅgำ“X)ัJNฌขˆMถล"u:ถ,)ลE# Gj!‘iึR:dบพ!ฌ๐จ”H}Œ]้๕:Eผฯฮ๒นvแP ) R\๎]qจลƒ_์ฃ“ฯ›๚๙@‹TฅFฆ๑kR Ft›I เ๙ฆŽ xJี4iœ&ณ”ล9Ž๔้’ํ:ี!ำkฅ<Nฑ ข!ดRoUB™ส๛ŸžŠDพฦr๗>Z:mๆ#ลBฆ”#ๅว ทกำฝฅœศ4jห_Žืะุ้ฎโRb ฎย‘บxdขPi+e?ฌ๐จIuŒ-นUโ+ำ๗sАณGd„€XโB์zzฒฯฮโณ๋ษ 6ฯL๕ัย“๊wฟ{ฯ๙ฮ—^j่๖"งh’B0บŠ$‚ั”D!,๓?1Rฉิ 5ใ๒œ4จ๚>ถ#6๕ฬŒเ<ฑีโ=SPŽืน“ฆ.A-eฉ)rิ@jโ.5HjึาgDค&1ƒ๐ํ<ซp"o– Qkรณˆ#D02ื่3Dท๘H ค&Dj22๓M‚(‰บ-3ฦ;‰^J•็ํgฆmเ๛wHอษฅ(4ลG 5HM๏|&ˆอๅ'บอŽฒฬ๑gLษpขฟFิ:[ฯž๘ๅYงEmH ค&R3&ฯIฤ๙%ช๒eคซฤฒืฬwSmฦฮ4-k—ŸŠ+Žดiั Rฉ‰ฝิ่1X โ$5หๅฌ~—$‚ิ˜2Oหฐ9šง:;_†šo.๏ˆ็Œๆž๓}RŠฝ4ลG 5šธKM.IฬX“ฤ*๛‰ฑ๕dช"0ษ_XSีฟ)=ฟynCK"hY‘2!ๅDงำสดปV๔ทkˆ$r{๒ตด ้}– ณ%8"Uฺ`oผS!…Dsw0R  ํŸเ#‹lคธไ(A‘๛,ๅคฬ1r=™bmฒ"ลB ก” -˜–/ำ๐รา๗ฅค๊Tm)'๗ัฯ็‘•FF >ฯR–šQyNโ2๙^QใHŸœฌ|ŽมJฟ6ู้B&tAW#%(ChŠฎ@ง่Zb$DH๏ณl˜ฅเคXศโ๚rƒkl…Aฆm๋วdJ9อพฎ๚์ƒตฯฒPคz/–ธˆkƒ”){ไB~†B"๓ ฆ<ฦ:-_ฆโ๋sDฌ'ทoฅi.q1์๘๛8Ÿํไ#—,แQ็YaคFKj‚8 ๎!ศ๔)D0jฬƒV๑ 5HMธิไ^Ÿ็$ฆR“rิ@jโ.5]HjlฺHคฦ\Soศ๓AธXŽ…ิ@jb-5อ3ฝฎืๆ9A™„h@j 5q—šฮ{ๅ›$1Aคf=\ยงH ค& RsMžH คRฉ‰&5=ญqVฬ(œF 5šธKMทซ๒œ@j 5HMrบ๕ฬ7`<คRฉิคEjj6ห๔บ_™็Rฉิ@jขะ‰คfึช๏ิค‘Œ๚๙ฤม ำฑฅtศFI763Vํ็๓ไชพญ฿G6Jูธฑp-MaR#m™–,‹+dF-2ๅบํ#“,dHY๔QฆNd‘อCบฦยUา ืำ"%฿ฝG•า!U˜|jยค๒ฦล'๘ธR๒ ๒ัาแ:_ž^วBž;r=>S–š+๒œ@j"JMvคœจlซะ ˆศฦๆว‡์D6Rบ1sฅ๋คเ„I•๎ญ aŠย—ฒ@ฃ ™rํถฒ8คU๔‘ฉำVกMU๔ัU2_HQR#‹E๊ดj)ZPยRฉ%.กL%-งW๏๓Iๅ|ูz฿(ŸmŒ๖ ;ฏด๐ค๚ฯ&ฉysๅw@j 5HMฺคฆวˆ<'H คR…Ž]{xฏ,&8MพgfFŒแฟ˜:1H ค&MRำ4ำ๋yižH คRฉ‰B’š™หพ$NRsธ[ฮ›4๏ฯ 5HMฅๆ’<'H คRIjr{xฯ.๙*8Iอ|œรRฉิคOjz)ฯ คRฉิDก=IอS‹6'ฉ™cŠู นišฎI๗ 5H IM“Lฏ๗…yN 5H ค& Y]บ{3ๆo $NR๓โ.*7ž ษ’ฮmตฎ/ฎb‚)$ฒแyaMw‹็ื๔๔ “Iฉ‘ฉ฿aRฃำ”ƒRำ ฒก7ศ‚œGฟw™…L–้ื†^ฟGvLM ำฅuชณ.)‘!ๅr)h๙ด|๊ใๅBJช<)•)'a๋ษsGŸ๒‘็Kุนค…'Œ”ฅfxžHM4zทฯL DHชญู๐ถqฯฟ๏พG6Jน |ญฐFึ’šฐ‚—:E]4๖Vกฮu ,~๑]H๒—oึ๚X… gG•ญ DZธ I*AณŠC yุƒN‡w`ฅั๋ข’J*-ค „ฌ'%CŸ๒‘็‹มu.้sฮต=Cช฿v$5อH Zv&.!.%บค{G!5š8KM-’š>็็9ิ@j 5š(ด%ฉy๐ณ/‰C™„ฐšO 5HMšคฆqฆทฯนำ@j 5HMฺt๎๎๛ษ๚@โ 5๋Dํง[ˆ๘:H คR“>ฉ้{ฮt'H คRUj๎x] E‰#t›Jฌ ฯ ฤcฃˆีoฌํl!ื +Bจ‘BๅJ/6ศ<ฌฃL=ึ้ฦ-Fวฝ‰Nี–3ฅธ่ดg—<n]๚ปHศืโ"ลBส N‡—ว1LNไฑืE&๕็$?O—่jู•็‹>gfญ๋ไฃืs NAค@j“ิ์‡้N 5ั่ีฆEพF!ูภ|—w…ลทSG๘ศ๕ยฤ%,ฅ;ดภฅhภร 1๊ข›VG–กฎ๙ฬGงc[3•4Yฉฯ.yy/ZH๒ ทฤBฆยซt๘PY ูFุ็ไ*8ฉeWสIุ๙ข‘๋นง R๎ท์ิีป๕ญ/กmญ7 /คิผhช๐;๗๙๏‰กฅ-5f`๐ํfnๆ๖(…้Vƒ๘”ปž–ใฤ๖$V๑ !5HMRsฦ4'YjŠ3Ž@jสพิ่นb 5ล,5ู]ฝqoฌค mัํ-biว‹uF๓˜šJ๗]Rsrน,hษ5ฃ๊๐ช<‰฿ฤb$/™ธ๖ฉิ@jRำ0ำ;เ๗ำœTpฉ)ถ8ฉิฤ]jZt์๊~ๅ๓@ŠG่v๑1QK .s—Ÿ!ึค๘fk™‰‰yt/ฯ0Cj 5špฉ9๐”iNโr๙ฉจqRฉ‰ปิ4๏˜๋]๛ยา@Š8Px0๑นฉ8 –wU…ื–…ย๛&๒ฬฏคˆฯ5ๅ;ลh่mjญŽ็O\kาฒคRkฉ9h่4']jŠ+Žd6ชฉิฤZjšuศ๕ฎ|~I E”“ฒฝ‘ฟง†{ี%ฉ5CdH™˜Q8เ ผ—โ๚ ธวง[ิ`„žH ค†ฅฆAฆw๐ISฤจงฆHq=5šธKMำ๖น%ฯ, $NตŸไLยMฬไ9QบzถsqMaบ;vซ้7/ฏํ๊ฃู๘ผป.GK!]xP6‚บ1–ชซa6HAžษห; k์ฅฌœ๐มล2ๅZ>G7ชa้ฦa ต\ฯ•๎n+V่ัUHR~.Žํ’ู๋:X|๚e[ŸOึทณ๘p}V ๒ผา็–<7 aว1•R‡คฆ฿ Sฤ)๛ฉ(qคg๋ๆ~ร๑ํW๙„5D฿LพิBฎ'ถ” Z†ค [ณK”$์~๑N7!้หRTคเ~[๖Ž~ž+•=j๚ปN[ึEๅฑ’วTงK‡z”ว฿๕นh๔๖รไ๖ซ ๛l;ษๆ[.r"ฯ+yฮษsำvS๎7i฿ลปเฉฤIjไฬย&ำเ ข_„็5Mฬ*Hทšฤ๛ฤ1<๓ เ7Rฉิ„HMV^ฟใง8ฉเ…‹-Ž@jสพิ่cฉ)^ฉiœีล;๗‰๙ฤIjj,ซแy=8|1ง}แๅ‰Y,Hณขค‡Cj 5q—šCŽโค‚KMฑลH ค&๎RำˆคfุใŸ'ฉ™eYIฉิฤZj๊ต๒=zฒLพ H ค&๎Rำฐ]g๏ิ‡็‡‚–-8ใi9ั›่ร๔7…ซ 5HMฅfศd'H คR…m;{'=๘I qšณ8ำ`Ÿเโ$H คR“>ฉ9lะ$'H คR…๚m:{ว๗Q qบtri๏hง๎ีFD7LWใฃ‹บXx†ฅtปวVxQ>ฆeศ%Fz๒ตeZตแผฯฮ๒‘้ืR๘ ฒAื uaะโจ…ำ•.ํ’Mิ”kƒ<'คจ,2ำbู†–>๚1y|ดุศํ‡ƒ0แIIj๊ถ๒๚˜ไRญš๚ˆl”4ฎ†ว W[ ิ่ิdGšr:…ู%Ck;โuuZธ,Zฉำฑe,p}ฌ ‹+ํ9Œ0่๕ยาฏรฤeร่s}ึ]wฆ\nน}—8๔>S๎ืk“ใ ผ๋ƒ@โะS“(Hu5q•Rฉ‰ฃิ่s.RS—คๆ๐ฃ&:ิ@jขHMพํ”Aฉ ›หRStฉฉ:ว;๒Ž๗‰ƒิ\(ๆ…ะŒิ@j 5้“š#Ÿเ$B!บึ|้x9…ผ\=nๆ}1wšจš-ซy>˜AH คฆKMฬฏ_ $N—ŸŽฒ Rฉิ”ิิi้qุx'คฦLNื‡๏ื%VนBxL‘น/RcS๕Zึ”dฝH คR“.ฉ้ไ:ํ@า ฉิ@jา&5Grซ“Tƒf๘3DObฝšดVึ…ิ@j 5้‘šฺญ:yOz;8\~:วำlTใiฦš_qH ค&MRSปฅwิมท:a!™+๒ฝnGl ๊วท๓r)5w&ฦิ๑฿C!5HM๙—šƒ&ฮ $RsŸ๙Zง1b“ฮอํ^ีolๆ}ฺูย•zkpฅq๋เจฉรRpยาฝe!ฦ bŒฎu]uัFเห†UJ“. )฿‹nœ๕q•H1ฝ^ฯ%ZœยdTขท๖ฺฎXผก•…™2,ไzZr\วCฟ-Xy>ฆ,5โ$๊ถ่V‡˜gฆd jsˆ๚RsW€ิœ\^๗ฦfใ˜๓-ยRo]E,uJsิิแศ’#Sฟ5a๋F,ฺจ|ูฐjQ’4ๅ{ัBจซ๋‡ญ็’-Na2*ั{mื~พyถ\OoCn_๏—|/a…0%)KMหlo[ $N—Ÿฺ–๖Žบคฆ0Bฃฅ&ฌง&ชิD•Tึ ซBํš0ฉั๏ฅ0B&5a=zฃMIH-5Z~ข–šจBSฉฐ8'QถEทช|้*ป;๑หŒแ7๎มiQQ/?นค&ชะhฉ‰ฺS&5กฝ6Qฅ&ไฑฐ*ิ.กัRฃซ‚ปค&ชะ„IMXO‡ว(Bฃฅ&ชะ่}‘ข&5z=—ะhฉ)ŒะFjjed{}วพHœคฆ){…x;คRฉI“ิิ"ฉูwœ“…+ท…ฌ#{jบชยk+ย@aH คR“ํํ3ๆต@โ$5ฆ*๗yœj.I=DL†ิ@j 5้‘šzต2ผ๛ไ$‚ิ๔ใ”mSr!sดKj๘๏ัœ๕dRบ‡T„”nH ค&๎RSณEGฏื ฏ'ฉ™ว/หƒิ@j 5i’šš$5ฝฦ8มไ{H ค&ชิ๔ธๅ@โ$5Ÿˆ๋๊ฟใโ–k 5HM๚คfPฯ?;ิ@j 5š(ิhั๋vํKฤIjŽ1D7ž•ิdO ฉิ@jา%5-ผAG;ิ@j 5šHRำฌฃ—{ๅ ฤFjขsE„uงfง[#โMb฿ฐ mu๎^-ฐšrXฃ6Oฌ–&<บบด|,L~ข"+R๋วไ>†ฅpฏุยBน=-MRไtcฟaS Ÿจ)ั+7ฺศวยdBหDง\ฝ๒ธEM๏ึๅšณฦPlRSƒค&๗'YjŠ3Žtฯhุศ†UY›งFงwป„'ฌบt˜D%ฌBตงฐ๎ี#Nณว@ฟž(๙^tƒพ๊’S|๔c.AX{๕0'Z ไcZ†\๓ศ„๖ฺš5Wœ๎#›fุ~IมqอYSPฯTฟ๛ี›u๐rF<HฅfC„uงf'ฆ#y๙ศ(ƒŽ!5šธKอเ.ฃœTpฉ)ถ8ฉิฤ^jšv๐:]๚\ q—š…xฮžฉู9›"Cฌ/ 5HMˆิT'ฉ้tฝ“8]~*Jิ@j 5ผ๚W ่ฉIm}95๛6๕ุVวs†'ฆ}oัช2คRcฉi๎ ๎xญ“ธHMQใHซ๚u 5šXKMต&ํฝv38”IุAlภ,-…@ไOอฮG F่ฉิ@j„ิtธฦIคฆ8โzj 5š๖^๓ž $ึ=5)"kjv^†หOHM*RSญ™7ธ•N*บิWิ@jโ.5Uท๓ZŸ๕X ลG่v O๔)'๒4eWV๓wvPน•ืิ์\rA๐›RะถrHjiพRjtใชฌฌำkร*H‡UŠ–„ศ”้๊ฬQ_W>ฆ‹&สF๖๋M[6ท๒ูL'ะ้าrŸย„D7ฦ๒ฏ!H ๗CฒY!“ะb!ฅcb]r=\Rฃท๏œฐ”๗ฐ๔t}ฆ$5UIjZ_๎ค‚.ถ8าฃeำภ”฿ฐTฐ๏ยT‰ึlฝo”…”)$z๛ฎJำน}น\€” ์ส‹NถX~๑>RN ๒xศ“Š,ฑ„œ่D๏‡|Lnร _[J‡^Oห\ุบ.ยž–๎๕œ ซŸฒิ4j๋ต:ใ@ŠG8Sั๘๘R”\ษU%Wึ”dษ•’FSณ‰YœŠioฉิ@j šฬN*ธิ[ิ@jb/5 x-_ ล 5ฯ=Uนดว-7 Rฉ‰ทิ4๕ทผิ &฿‹คRwฉฉาฐตืb่ฐŒฬ OAhŽ#n(Ž{'1Lฌ๗ 1RฉิฤYjชิ4“H คRฉ‰$5 2ฝฆ'L$BaทˆฅOฬ1•คๆฎฉ9RฉิฤZjšxƒ›\่Rฉิ@jขPน~+ฏ๑1)lก[wโ;–รo<๕B \~‚ิ@j 5๙ฅฆroPใ œ@j 5HM$ฉฉืาk4ไึ@Š+Žจžšฎj ๐ฺr;Pธ81)A"–์’-S–š็9ิD”šŒ&… ฃฆ†5>Ztใู๐ปŠ7๊ํ๋4๔จล"ฅ€hX๖วc|œ4ะbแ)ƒ}ค,9ใh ูุ‡5ิ-Wr?ดิ่u%ฎ๕ดธศ๕ย {ญจ"–N๎’‚HYj๊fx Œ ค$ค†อYO&ฅ{HนžงRฉิƒิ์ุT๗l'H คR…ฝ๋ถ๐๊>*Lพฉิ@jา&5kŸ้Rฉิ@j"IMๆ^รฎ Rฉิ@jา#5•yk sฉิ@j 5‘คฆv3ฏึมWฉิ@j 5i‘šบ$5ชž๎Rฉิ@jขPฉVSฏฦ~ฉิ@j 5้‘šฝzชœๆRฉิ@jขIMฏz฿ ิคwช๔๏y†C=ใaiภ~u_พOแญ€mฝiAม~ Ž1–ผฉIPš‹ภ~”๕(k๛pพb?Gb๑F0ย~`?Œ 5ุ์โค'=๖๛`ฉม~`?G 5…€‡c?ฐe}?สฺพœฏุฤH คRP~ฅ†nญ‰wˆๅฤ2โr^ˆx“Xล7ฯEฌๆ๊ŸƒJx?ฆ+ˆลฤsD^Ž๘‰Xศ[ย๛1–ุ,^๏่R:O‰}0s),,แใQƒ๘”หุ›W็G๛’ึs Ž Ž”฿X‚8R๑ฅ&ƒ่ร๗๋+‰\b 1’—$&๓\>ชY\ฺผr ๎ว@ข /Ÿ,๖รœhKำx/ํห r_Jใˆ#ˆ##– ŽTผยๆš๊cฤmj๙T5xk ฿๏ชoญ-ฦม[A๛1˜๘œhช–7Mผ.ฺsFAฃ qJโษา8โ˜ผ—ฆใัTdิ$'ŽI๗๙Qภพค๕ˆ#ˆ#ๅ7– ŽT|ฉ้Gx|ืO34mฤ,Nณ›%?D๎ž[ร†=ค„๗c5w๗Y้tฆ’ำ๑๑@ฏcKx?'–๐๒TpJ๑เว!.R๋—ิ๑่มื’อ~,Yi=? ุ—ดž#qqคฦฤLพฉคRฉ€ิRฉฉfฏฝv–๐๖_แiฑ โ๙‰—R\G๓บŽว)๎๚.e๘ณ5วโ ๑๗•\‡ๆNœ๛ฉิu UuตR๓Rศใล"5fบ๓r๐ูๆซNLทณ!5ฉ‰ิะญ๑ ฯ๘œจjk๊oL&>%V&*งr)๙งyงˆ9‰:\ท‰™šœ๘‰gœชๅƒซรžอ๗อิู+ˆˆฟ&ึฃ[mฎฒ๛ฯNy|ARรS–฿ษSqฟฬ=GC๙ฑ}ธ์<ฎq’จ…ฒ/ฟ—y_— !๘'W“}ต?\œn_™ืJ์ zjR{ฉก[}3๎Bม๔ฉ9˜๏77u;๘๓ฤแโ9๓‹ 5ฆ—hถX~œšน,‰:!f|H—คๆ6โ\๑๗ณ,5Fพถ‹m™ฺ0oฐœ|)ึ๏กคๆa๑Xเฯฐเ$–ฏใž—Cนึ‰‘^!Ÿ‰)๔ถE<฿๔ภ<ศฬ=aKธ&Q”๎5~aขWRHMˆิ๔ฝ ๋๙ฬBHM?9˜—nฉyฯ!5ๆ2QN*cjXjฮ š๎ๆ๒Nภ๓ 5wŠว๗‡n"9๖ฯ๔]ภRrฆcc‰'–›^ฌo‰ึBZฦŠžŸร‰ฟp!บ*€ิเ๒ำ*ฃ"ฮฟ 5ื๗๐\โืฉiฌdก5?VEjธdz<:๐zOจหOwŠq$ฝ#HอI<^ฆ2iูสRS{Mไ๕ฬๅจฎโ๒ำโ5]Rธ?|๙ษ๔^Uๅฟ;๑eฉถ‰ฦtป"qY/เ=4ๅcะQŒY๊ฤฝHFjjšO๓๙์mคQผoy]sนkค 5q‘šใq" ฎR…ŸW…ƒคฆ6_๚0๋?ส=ูRj๘?ธ!žสOแ^…—ธ%h ๐$!5ฆ1ฟ{9–e90P๘yfจ=›%ฮ”นฟ€—๏/ O$>tHMเฐdLห฿aq;‹6ƒŠ฿7ใlB>—#ฤ@cรqผV–ฑทฬฅ0–šช|ฌฏ7Rศิb H ˆ.Fฆค†ธ\eDฆZ)ฮอ๒RทQGI^>+H @j@HCY—อ.โž!ฅธ/ฑTฝR„mœ&าฎ_NdHU€ฯ้J๎›€๓ 5ทhฬYN บใุ€ิRฉ€ิ@jฉ€ิ@j 5€ิ@jโvะW๐ƒˆ๋š*ืq)ฤ˜v„นSฅ€๕@ผcVบq@jสสIk <%=ฑ•8,๎'{ิ ZLฏีŒx‚๘Š๘‘๘ุ_<~ฑS๐๑D~| ฑ‘ุN|IŒyญ โ~-ณ zโ๕z•,@!ฯํ3ธจญ9พ&^%๚•ฅ๏_)๓]ป5Mฏ5–๘U}ฏฏƒิ@j*ฌิะํ,โSอ_๔ šส๓่ึžธŠ…ฃ21œุBิ Po‹ฟsˆฺ|ฟ๗Žไxnsโbโภฉนมรwศœำ฿™sัœŸDUโXb*คฆ่฿ตcŒ‰GO ค&R#ัพโฑ๚ฤƒ๋jณ๙๒%~ฑห“n๗ำิvgš€ Nๆ‹๕4๑ฑƒ_๙šื๓k™วพ ŽŒ๘>jำน—ย๔t|`–๑cˆE๒R%nแž๓šoˆ Tฟjไๅ็หนG๋uขญุž๙็bฑฎˆŸ้uู'`y%b‘Pว๓Œิ,‰๐+ฌ ค”`lฉฯ฿›SBึู›ษ็๓I วœต=]g.;ฤข|"ฤ฿๑๓๙~Gโ=Ž&=•ย๛้'โˆ้=›—W7๑ใลทฑง?ฑ‰ธšๅฮฤาs๘ฑแs’่}‘—ท$E|ฯ๏s„“gŒœp|8ฟจR#Žฟ9ฎŸ'I วฟ๐๛0วo1ัญ c 5ฅ!5โฑงz์yโ>…e.|J\pฒส_๒JwCพ<าา!5?GsฤDโัฐQ<ฯจ" l ywq๐jล=ˆฟhญ8XอtT<๓…๎ฤbdž O V]X n4NIอ›&0‹ภถ˜aw;O/>N๕;”ƒ`€เด“๗มฬ"Hอฟ™yฤษ ฑe0๑[Xoฎ0฿sฎ๒๗ีฤ›'๔๗ciฤsฤ%ิฎ…s‰w4ว‚๒2^2฿!ว~ถแFt๎mjlพฃุm|9ื|็๋91qMH97๓๓Lฺmbdะ/๓CTใ\๓]ค.!ภ๋ึไห{BhS€ิœย"eถwฑห฿€c;ˆ๗ญ Nฑž๓HMiHอv๎Yู[]ข๘ดmBฟใ0xc่‡๒฿จK#Zjๅฟขพใžฃช)ผ‡ฝYขz๗ดิ˜๚dขิ˜โฟ$๛9Šxฮ๑}•๘๑ลหL๔าฒิค๖วฤธRณฟ‰ฅฏฐˆŸณ‹0ฆๆ%<-ึ[Hpl Vrฯ๗Q€ิ”†ิœมมๅ!ัฒD•_#?ห‚ฎต๒@ี{๘;ชแืR๓wืus—๘าฮ“A_:ว [/h‰้ แ๙>v%~‘ษ€1@~ฮฝ!r{?%ฦ!๑๚ูE๘†9[์๗ ˆckๆ๒e฿o๘๛w8wฏโ›5š+šI:(ฤcโ9r{ง๕พr,๊ท้์ำฐMฃฌ.>K€Fล€^“๖nไz๚}ึmใc„ึ็NคG ๛ื๒๖้Q=ŠGสอŽšF~Ÿ1ฏํa[๔9xา‡N{'~SvrะฤYr๛๛{ร"ฑ†žฃ^๖้vํKนWพเำ๙Š™๒1๙œ>7พj!๗ฉ_ณ8ๆพ|N}xŽลฐว?๓9๗‰๙…โฌ๓œศํ๙Z<ตภ‰\Oฟฯ#๏x฿G~fyผ3‡=์ำpะ8‹š\ๆSตื9Nช๕9฿G>วPศั>อOบอขอ93|ไ็n0ฉพQฯฆ$.Wj๋$•mล#G๔Ÿฐ‡&๙6xฒลกGGdH’รMฒ?ˆ‰‰}0yศญGx‹ฯ€}ว๙ ์;ึB>&Ÿc8โฐ๑>N๒9ไธ) ๆsภ้6๛ ›žไลภ07๛!๙ž+ถฟ๏Ynไz๚}๖;aชฯแGMดว{p๖u6อ.๒X๋>Gํ}ชMฅกIิc๒yƒ_เ38s„M็‘>๒s7ค๚๏C๒๒๋ืฉqRฉิ”ฉiFRsEๅvN 5H ค& ฝ{T๓vี.H คRฉI‹ิ4ฏTอปฎj–H คRฉ‰BฏUฝ7ทRฉิ@jา"5-HjFUk๏Rฉิ@jขJอึอ™@j 5HMZค&ฃRu๏ฆœ@j 5HMz’ิ|ทนe š4bฒ”rXป>บฑ—"#eG7z}วพ๎#Eฅ คt๔บแŸืฟl!ฏN—>gั๑โgษ๑ผ…๙Z)ZZสไ๛–๒ฆEOทw}`!ฅ้๘๛?ถ8้มO|ไzzG>'L2ๅr๙ูคธศืา2งฅI>&๗IKฐ<ฆ๚sj}ึc>-†แำ๘˜‰NยคF‹o*ค%Iอญ5;:ิDŒ#๕Z๙rศฑSœzL-7๙ 8เfŸ#o!EKส!T๒ฤ๑‘’t๐‰S-ค4xjง$9๘คฉŽŸโ#_w!ย)?O).๒ต RpดD๐๛i>๚ฝษใ#?ฟฝฦX ๎t}’ึ—ดธ8˜0ฉŸง!ี๏~’šอ›2ิ@j 5šดHMซฝซ{kutฉิ@j 5Q่ฃŠทaS‹@ 5H ค&-R“IR3ฝNถH คRฉ‰Bท๎Uผ5[ฉิ@j 5i‘šึ$5wิ๋ไRฉิ@jขะต{Uoล†Œ@ 5H ค&-Rำฆru๏๎๚9N 5H ค&ชิ,ะ2H คRฉI‹ิด%ฉy aŽH คRฉ‰B.Iอย/3ิด๑ฝ๖ส! ถWˆ7‰Uรง‰oŸ๋]๔ฯ…๙ะ้ฦฒม•™n,])แ’\Oง…K1’iแZŒ$r=ƒ–๒ตคศไ>๊ใ3ไž}t*ตLณ–Ÿล%ฯ,rขSฦฅœHมั๛฿๕๊}๔12คSมงฯ^ํsœ๕>rนA>Gง๚หด๐Vg<เำ„‚ทDJ”C๛ Ÿ๑ัว?•าฎr ๏ฑฦTdฉ)ฮ8b๊๕={z>t๚ฑี˜ฉFึG ๗ž4n‘ฌณาฟ•4Yˆ๕๒ฅ… 1:๊เ[m„Yจ๕,กRย#ษ'cBๆไ>j9<่ไฉ>๙Rฦล๑–Ÿล>็ฺ๔='‰œคœศ๔zƒ๛๓ัว@АN๏1"ฯง๋56=.O"Ÿฃ๗รJ ๏xญMซห’HฉQ๒3จ >–Hฉ~๗;wฏๆ}๚e@ 5ฉฆสฤ7D[b 1’—$&Cj 57YUjx3štq—žšขฦH คRSอ๛d}ป@ 5ฉฃฤ‡| "ƒ๏g˜ฟ!5H›๖$5l–๋$FRSค8ฉิฤ]jrบW๗fฏ๋ค&ต`๔q)฿฿ฆ๊xฮps ušd@j 5ฑ•š$53[tu#ฉ)RฉVป!คRkฉ้DR3k]ง@*ผิะํค!U#ถอS F่ฉิ@jGวช5ผ3บ9‰ƒิGAO ค&๎R“ญ†๗ฦฺฮด-บต&!–หˆหyyสใJKj~เ_F‡ฐ&B0:žxCหOHM*RSญฆ๗JNb"5EŽ#HMฅฆ#IอหkปAjฬ๗ฌ฿ฏKฌ$r 3พญดคๆ๏M”už$ฮOU`Jณ ๖์ํญ๚n๛ึ~๏f`ลท?๚,สf๎†ญ>๏ฎโอ•฿๙ผฒ›Hฬ\๖ตล 7๙๑:‹ัฏ|๎#z))Zš––&ูp๊"™™ชฎท!_Kฆfdzทk_Xjq๋[_๘Lyw•ล˜ื–๛ศTl๙พ บจD ศธ7VXฬZ๕ฯโฏถ๙่ฯmฤณ‹}๔๑‘)š-?'-o๒<2wƒE*ม(ปzM๏ต์Nb"5EŽ#ีฺdzm๏š๚?๎ แ๎$ํ๎˜f‘utŸ๖S“tœgั้ๆฦนษน)I—’tปาฆ๗EItc/e"4=]คp๋bŽVใฉำะยฃลKพ–Lอ6H!‘๛็<‹ž—$‘’aว@งcห๗ฆ‹Jค€๔บ8ฯ"๛ึ$Yyำ-ไg&%L ก•าญฉูR๗=sบ…<นืค๚๏ะญฆ๗šžคบ-บอ$ๆFนSCทZใS_,kLฬโฎ*๓#H คR"55jyoๆ๖uRัฅฆธโคRwฉi฿ญ–๗๔๊>ะถึ'ฦŸ1รCพ“ํˆ Dฝย\ .uฉกAฤฤ™ านฃHMœฅฆSอZฌ๎๛:มไ{ั€ิ@j 5ตผ'W๕ $๊ถ่V‡˜gฦีv|[ฉJ '>"๎&๎` ฉิ@jา$5ตj{๏์s€H คRฉ‰4‘gทฺc+๗$สถ่V•xธช(ใJ[jฬH็JฅŒ 5š8KMNํฺ์rฉิ@j 5‘Jฎtญํ=๘ลมD(\‰xŒธM-Oy|[iKอ?ฉิ@jJCj๊xฯ คRฉิD“š:_๔ $‚ิ๔#ฬลขlษั…฿VZูO//p^๚V๎rz!A:wดO๏^ฯ;ถๅg็v'?ํฺ้ณ๛งŸ,ถ๏ฺํณe๛.‹๏~L๒ํถฎวพVฌ฿ฒรgูื?Zฬ^ณลgฦ>บแ— ฎ.ุ(Sืต ษB•RNtJฑ+๕ ลBKอ้~๊#Sบฏ|~‰๙^ ๒yr{๚ตไ~่ย๒xH‰4$ุฐ้฿;}>^ƒ…G]ผTŠ‹œ@ห‰|ญm;w[์ุ•dป"•`ินn๏ฃรuฉ‰F^็gวๆ#็_ใœtRt|๚ŸฌŒ๗i๛ุD›G'އ&'นoŠOปฟNณีe”Mห’ศwฟa6ฒจค!ƒ,TฉeH OX๊ทLcืRณำ|ฌ”๎๓Bะ้โyr{๙ZVJฝ*)EHG_€‰6Nถ่09ฯGJ˜.l*ลEลฬฝ.ฯGพVึŒ๑ํŸผ5ษใ-R๎ท!ฉนkE@โ0๙aa@j 5š4IMฝบœA‡;ิ@j 5š(ด๎Zืปm๙‘ฤฆLBะD:้ž\Rฉ‰ณิt!ฉ๙lศN 5H ค& ™$5yŸ$NR3?`ูbH คR“&ฉฉ_ื›'H คR…V]๋y“— $—ŸD,!v๓ภ ๋ขฬ$ ฉิ@jŠGjrึ๓ž2ุ คRฉิDกen}๏ึฅฟ $RSŸg |‚h+H๛ศfH ค&๎Rณ๘๗CœDศZจA|J,โBtใ *DGทQฤjžƒbคRฉฉR3nษฑฤโ๒๖&––๖Ž๖๎o~ ๑H %ร ลโซญIพa‡…lˆดtH–c#Ÿ'ลeใ6Rxไjไ>๊ื’ฉๅฒ(ฃAฆD๋bŽRxŽผใ}]ฐั•ฎ‹]สโ“YจRฆ,O|{ฅ…L]ื.ๅJฑ3๑ัZ-/~Nู—็„ 9ษิo๙Z†{?Y๏๓แบ->Z‚ณ๕$฿oดู๖ฝ–๎THืF๕ฝe<ฦIฤ๙%๊ˆษณๆธ ัq‘:#@ี‰,Sธ–จ\๎SบณZ๚Aปว'๘ไ‹‡R2 ›’D4DZ:ฒnOข“ฯk{ฏM›&๛ด}d’฿=๛,J7ธ๒ตฒoษ๓ัEeJด.ๆ(‹z๖;aช…Lƒvฆซb— f! Uส”ๅž—ฺ๔พ0‰|ŽA๎oพTpฑ ™ /Eย36‰Lื7ศsBหD›๛ง๘ศcฌS๏ป]•คใD›ถN๔้๑โ>}^นมขฬ1>ZบS‘Œ‹O$Ncjfm 5HM)IM“๚๒๓Žw’สถธŽา|bืL K3J<วL็p คRฉ)฿Rำ‚คๆ๚E''ฉy›ุม“๊”ส<5HMผฅฆทโ‚D)DgzZxฒฌขG&ฐf ๎$†‰ๅC!5HM๙–šๆน ฝk $NRS๊๓ิ@j 5q–šnMzซ.9ลIŠ=5 xBอn!RsW€ิœ ฉิ@jสทิ4๋าะปbมiฤFj8จ5'Žašแ๒คR“Fฉiึศ[sล้NR Ftป‰ธ—Ÿ 5šxIMำ.ผK็HœzjN%พ$ๅbV๋า ฉิฤZjš7๒ึ^=ฬI„ยMM ฿ฏIผฯ?P ััญซ(ผ…!5šŠ!5š๗‡@โ$5‹d๏ ศE)tu?Cฌเj฿†ฅ‘บSบ{[)ฺ.~พีG6(VรCศด๐ฐtoอึป|ยRบeฃงท๏ฺ/^ค๐|๚ๅฟ-คเ่B’gอ˜็#ำฅO}xŽ…,Lฉำย%Zjd*ตLำึE%ฅเ่วค”MŸฝฺ็ฉE›-คLhแ”ลKeฺถAŠฃ>™~ญำ๗%๒ศ7}ภ?$็฿\๋ฉ๎-{_ŽVjHบพ#)'นฯ฿ไ#ฯ?ƒk=Cช"าคKc๏‚ฯฮ $NRณ$ อ{Iฤ็š๓๙~5NiคH คฦ!5MผcฮwRั'฿+ฎ8ฉิฤ]jwn์ู้ฤIjฆ๒5๕ณ™WฃบีใKU•ิ๒ภ๋๘H ค&˜-›z›oนศIE–šโŒ#HMฅฆQ็&ฐOฮ $n…O&๒ˆฟ'F|N/žล๔๎๚~€จํสธx๐DŠj๋ึญ!5š๘JMซฆW.vRมฅฆุโH•&๕!5šุKอŸœHฌคฆมจ/๑›™ไ‹พธ%j0BO คR๓?zf6๓พ:ยI—šb‹#่ฉิฤ]j’ิœ๚ั…ฤ้๒ำI<๏Gb;Oฤท=ย๓Z˜Iมฤ฿‡/ใ๒คR“ขิดn๎}๛UN*ธิ[ิ@jโ.5 ršz'}๘ง@โ$5ฆจ]—B$“:šร๗ว๒๘œภ4า Z& ั0X‚`้ดฟ~ต2ษฦฅฟmX์๓๋ฆฯmฤ๓~๙zตล~ุ์#GสVงห็9%Œp ŽAลผ๛ใu2•ZสŽ\n๙า2-FRVtสธ%)8บˆeXบทนžLฃ6ผฒŸE›ทYHฉัวGJฅั]ปฒ“ง‹s`โœ๛ๅป/-๒ฅx R•š-w\ใ$…‹%Ž˜‚–‰†BฆษjY‘้ด‡ผuลQ๏\HYWYศ็ๆตฝ_ํ#GƒLใถาป…เhษั |/.ม1ศ๔bŠ,Sฉ๛\`#๋=<‰^Oฃิ)ใ–(IมQE,eบท,ิiฐไ็›nW'้4.I๛้ำ-,ฉQฉ๑๒xK5tx๊Ÿ0้;ฯไyะ๗•Q…ข0Rsย'ฉ๙ฐˆืร็r*้๓&ํ2,RฉิไงW›ึ๛F9‰ิKิ@jโ.5๕IjŽ™}i q’s ๛)โtพต‡t๎(คRkฉi›แ๘ะŸTtฉ). 5šธKMฝœfเ๗F'ฉy8€‡ 5HMzคฆwป o๛ฃ79ิ@j 5šHRำฉ™7๐หA๖SRzFAj 5š”šฌ–ฮโRฉิ@jขP—คๆˆทฏ R“”š๙H คฆฅฆ}+oืำ“@j 5HM๊tj๎๖ึี@j’Rณ ฤฅฆWOwั@ฟ|ปฮวรๆIดิH๙YฟะB>&/ๆ+S…eฃjนgไ๛kฝต฿'yvษWS]ๅ&๒1r-ๅGหสนOฬ๗‘ฒฃฅIฆj๋ื–“ฅ Zฮ˜ฟั็ี฿[ศ–ฒธจAงx'ะฉ๗๒ณะวุUจR‹ฎไ฿ึ-ฐqœ;†ิค&ำ๕ฬT'šhิ์˜แ,่bืFZH9‘ ‚–)็ฤ)ฅ{ Wสญส“\m!†ฅ5ฅRฉ‰ติด๕~~๓!'H คRIj:ถpž๋q’š…‰ฤฃfๆNbคRฉI“ิtส๒~~็๏N 5H ค&๊w@ž’8Iอ2~b0฿Oฏิ๔์แงะZiณB\๖๐อZKbยฤE6JHญ—D,ื L – งA6ฒa ฑK~ ฒaึล.™ปมGŠ…) —<ณศBงqKคิ\๔ฯ…>Z\ไ~<๘ู—r?คเhน’ฯymลท 6m๕Y๙ํv ืqี้๕VzทN้vˆŒ–“ฎ๙ฬ็ทe๏Xบ๐uŸ฿–ผe‘’ิได๗๓มSN 5ฉJ•"กลEžิ".qัศIอt#u๛—๘่วdC'Sƒeรiฐุ{งZศTp๙-?ฒaึล.sฏOข‹EJaา๗œ้V๗™6๛1I฿ณ“hqษฝ.Iืkmคh้‚œRฎไs:lำ~ฺtŸถwNณ‘‚ฃŠ]สB•2ฝ[ฆpคฤ่นRNd ฆ3>9฿โ์Oฯ๖9sฮน…‘Wึ8Iอ$b…ฬ— šs 5HMšคฆ3IอGฯ8ิ@j 5šจs5้}Lซย\@ฎ2฿ฏMด€ิ@j 5้’š/Ÿฮtฉิ@j 5ฉN@ฉ‰ฒ-sต†๘‚XMŒ,ฏ=5ตˆ‰ฟ๑฿ูฤ1H ค&MRำ%๛uซN 5H ค& ี;ดฬ7๎'AA2ฤข=Qอ C!rหฃิ˜ ืK๙๏š‰มรH ค& R“›mmKฉิ@j 5QฅFOAj$^—uำQ๛ฑ$คfฎžd…!5š๔Iอ>];ๅถRฉิ@j"IM๛–๙๊% mญ7 W.0”x@Gโฮ๒(5q๏ฬ|ป๑izฅฆ{พbARใ™|ฒ"Sตตธ8RrCืU—๛๔ำ๎ฒ๐โ๒o~๔ั—}ฃฯ—?์ฐล3๕cRpžZดู็ถึXศ‚–Z\คฌŒymน…L—าtœ๕ฒฅ/™š-ำถuAKน๚๘HY‘,52^หก•Bฏ UZi๒X>โืน/๙่ผ2ํ๚?ณŸฐHMjrผ~๑HMั& KŽšชญๅฤ•’[ะบR†ไ>e?sณ…,Lู๎ฏำ,ฒ๒ฆ๛ศๅm๎Ÿb!‹gถ‹Sฆ๛tgัŠ$ฒ ฅ.Z)eฅ๗Ey2e\K“ซฅ'ƒLอฮiำํช$r฿ๅฑ1Xr๘ุD':ฅ•B/‹Tคฤ่๓เ๗_เsมgg๚\ฑเ4‹k๕นzแฉ…‘)a’=5งHอๅQj๏฿3ุๆ๚G|ฎYw‰น\%z|ฬ<7oซ๘†H ค&Djบu๖ป๊c'‚Qkโbน™ขธœ—๗">I|?‰Tื๒j8จTU1ลHMู—)4šโ—šj$5ํŸผ5ุ\~โoLฮ &šคŒšฬP<’๏$&Cj 5š0ฉ้b]ๆาDFDพ_—Xi๘oCx๙ัฤป|?—V'ฒxp`ๅR–š"วH ค&๖R“ีาšธQ!ŽT!ึrLH ๎Z{j*™ฒฤปEWˆ`d~๙eˆ`๛คRฉ ‘š๎]BฯำTfr์๋ฤiผ์tโAฟภxฝห˜ิคG 5HMซ|๏%Aฤ”๎ฃ๙G‘๙ก3บผฆt฿Ceบฎลœ5ŸE|๎:3‡˜—tDทmjญŽ็O Xj“ู Rฉ‰ฑิไๆ?ฉl‹nํˆ \ฯญ ฿฿Hl&ฺ๒:wส๚nt{ะ ,Eฉ)–8RญY=H คR๓๘„@โ4ฃ๐ยf?ัญ%฿ŒปชŒะSฉิ๐๙ORฃณฎTฉะฌ๑]ซรbpWโdพ*๑฿ฟ+@jN.Eฉ)–8‚žHM์ฅฆ]+ฏํฃ‰“ิฬแIwrำT N i,qMก.?๕่–Lฉ•ล(5QฅFคf๋A—ฒมส—i"ำwๅ6Bฦ;่bšRpdร,%ฦ๐๑๚|ดฌ๘๖G-52ตy๖š->ฯ,l!DŠŠFJ‡AnS๎ฃF ›9ƒ_๙)1YจSฐ4ธRณ R\ไ๑ึE+-ฉ‘, ๙™ษ๓ใ—Ožณ๘yึฃ>?ฝ€นž!%ฉ้ั5๔ผุm\•/#]%–h./‹หฬหโๅงโŠ#ตณ›ฃิธRธ R:Ž™}ฉฯะ/ฒ ึฉ]h!ำwๅ6 ๒1™๚-Eห G[ฬบ}š•ชญค ำ|ดิศิๆŽ๒,:฿˜ค๋5I,Q1\–D•4ศํ้tr‰%eบจค”0๕<)2ฒPgึ_ฆ[ศF]gIqัR)'ญ“R# Xไนคฯ‘K็แsรข}nZrœนžกPR#eV'ฉ๙๑ฑ‰ฯมไ”ฯ3ๅ๊Š๛๑หSีฟ)H ค&ฯWi^a€Ÿ–วˆิ๒ๅ‰LFบizq๘~W5Pxmi .ฮ8ฉิ@jZY๓Ib!5t›8ˆ่L\B\jฎรG Fํ90.โ4าั"“jงbšAj 5šฮ1ฑค&‚ิ๔#ฬลœฝ๕ใหQ‹ธGv๑œั<๐‹D†T)IMฑลH ค&๖RำถUพใ˜ N=5—๖ŽBj 5ฑ–ื˜2“๏ERฉิดส7ถ*Aœคfœ$˜ธ๖ฉิ@jา/5ฒlƒRฉิ@jขJMพฯš‰“ิ์ ๘ล $ไฟทCj 5š4IMฏตH คRฉ‰$5m2๓ี Kฉ) ศ”V+ฝVษŠKdปzŽEXq@]xPโœ|Sื‹๕t1Mู๘สย‹)ฎโ3sูื2]z๑W,ึ|ฟGสฯ [-fญ๚ฮ็‰…›,d!ษ?ฦBŠืF’’ฒศฆAฆRo฿ตBฆคหีb'฿—|/๙Z;h›’Ÿwl๓ฑdE Hแึ๒)?O™ถmฉฺ–ผผzŸ”š7ฒHMjzzู๖ฝHM4๊vj˜Ž-Eล ๊วN๚๐O>ฒ่เฐOฮ‹Œ).น}นž–+ู๘สย‹)&ๅYไ”Dvฬืุ฿3ีGสAฬพ5ฯงห 6ฒdฮX)^๙RอลเU™Fญgภ•E%ๅไ๖ๅ๛า๏Eพ–.ะ๙ูฑ>ZVzพ”Dฆp๋จ๒๓ผp๎0‹๋์3nษฑ>ท.…|์ฦล'XJj๎žHœzj๚`ŠZVิŸิHกิ”]ฉ‘B“6ฉ้ห๊Uา@j 5QคF MY•‘ฉ)fฉi™ฟ9Sฺq„n๛%%ะํ8™ฤPR๓ _zšว˜๛Ÿqš็@H คR“ฉ๏Iฉิ@j 5QฅFƒe@j53ž,๏Hผ]œR๓ค,Zลล๎ๆTห…H คฆคฅฆท๗ำฎNJ+ั-3ไฑc!5HM”5u‚2 5KB[TœRณะต Rฉิ”ผิ๔๎ำ'_‰I)JอŽ_V็š9n 5HMู“šฌฆRคfua+Œิ<ลE-c๎&žๆูF?ƒิ@j 5%,5ฝ๛ไห๐’”ขิอ“฿e‹eฆฤย’ฐ^H คRSJR“™™๏ณNPคๆ^ฎZP)`Z™ฟงิิ$ฎ&ž#ž็บ+ตxถแ:้xณ๛tอ๑%!4ๅZส‹NีrขำreB๋ฑOgZศฦ-L~~]๘zU|pฮ>2ฺ t-!Rjdฺณ”รช๏ถ๛่to๙˜Lึ)ใ2อZ‹†Lอ–๏ล`ฅR‹ห#บgม๕]d๒็pbษ‰šทลYไิ ฆฐคXช”ล'uz๖๎๏L๒\žอฬ“ˆ๕~z๙n‹ิคฆw>1””f0โ๒ ซ‰nฆ ๑!ัฐ,Žฉiุน‰/ RH ฎ”kู(ฮœsฎLษ5Œ˜{ุŸๆมG/ิย#ท๖งg[Hูฒw5เS6่ํงNทRฃ Uvœ˜คํ]S“่to๑˜ตoฅ;RณuzvึŒ๑2•บำฟฦYศวไ1ห นฯ฿ )'†พฏŒ๒q9ีSศ"งy^ษโ“RN “— ๖นm๙‘yŸ๐‘๋&,โSฉi?}z e@jL”'x&๓1ซyLb“!69ฅ๕f‹*5Z:\BSšRฃๅม%4…•นผ$คFหIaค&_fOqKšืจ0Rc –)4ล(5ฝHjdฯ—ฆ #Sna ืˆซQV ปค&l™จR#…FKšยJšยJ๎ฉq MaฅFฯƒSฉัrโš’)4…•-ห…‘)4ZjคะZj”่&(+ <^๗Xฆ}Iคtวืฮื๑฿ฝL๐‚ิ@j 5้‘šž$5?l฿ๅค/?ํršฌศ]ฅ5A'คRฉ)˜๊ญ2๓NLYพ Tปค็ฉ1i๕‰bูbH คR“&ฉ้ีšAYƒ”nH คRYjิ„Œ ส@๏พ๔โŠ’”š9?คRฉ)ฉ้ARฃbK 5H ค&ชิ่ฒ ส€ิ˜”J<^w~IJอƒฤFdL–q‡ฅ ฉิ@jา'5บ~–Rฉิ@j"IMหL/{|^ e@jNใ„$ร™%)5ต8อสฬ"<—๏ื€ิ@j 5้‘š๎={[้๓H คRฉ‰,5ทไ‚‚–ัํซฒนtEผฤ7"ไ๙-Œ’ูงKถ๗๋W‡,.(ๅp‰E*••ญ๙DTee9?ษ>x*๙KQˆPJวขอ,คXศสุ)1:๕[nCฮ๓ขๅDฮฃ๙oูพหGงปช^๋Yn]ีฐuE์D๕uƒ5‘!ฌšบIuŒ-นาซ็$๒ฯ)BJฬ‘"ลeืำ“-vใŸ]ONฐyfชžTฅF?•ฤAjŠ#Ž4้าุ— 9_ˆN–’กsUVึ VXee9?‰l๔ W,8-Y๕ะ…?๛่‚Yyำ}คXhq‘ไห†ฉพz>)'ึ0Mถฐ๕3ฒโตN–ศjุZHไผ1Z:ไ;ห็ฺ…C-ค€่ดํ{W๊๓เ๛ศๅ๙œฉŸด(Œิศ๙‰$^j่๖"งh’B0บŠ$‚ัb$฿IL†ิ@j 5nบ‘ิฌฅฯษELคฆศqRฉิิŒห $R“˜A๘vžU8‘7nห„จตaˆYฤ"™๔๐ พŸa†ิ@j 5แRฃ/J*บิWิ@jb/5™๙.C&ˆอๅ'บอŽฒฬ๑gLษpขฟFิ:[ฯฮcxๆถiั Rฉ‰ญิtํู+_™I คฆXโHต!5šXKM ’šฮฮ $NRณ\ฮ๊Gท,ณ,ย๓Ž1uข๘~สม=5H KM^๙>?IE–šโŒ#่ฉิฤ^jZdๆ+‘ NR3˜ุ@ผหฌ'FxDbฏ ฑ›๘;.?Aj 5ฉIM.IฬVำTpฉ)ถ8ฉิ@j2๓Mซ์'ฎศ“ฉ^ˆภ$aMUฆ(59Y๙Cพย”B&คœ่tฺฐF*4EW`mS‹‘!ฝฯฒa–‚ฃลB^^ะ…/ฅhไX น\งหi๖uีg—ธไ>[ฐๅr๕˜Lฑ6HYฑฤB กU[ ฆ<ฦ:-_ฆโ‡ค๏หํ๋Tm—ธ์๘๛8‹ํไฃณ„GgฉJ–^I\Rบ‹Gš็6ฬ']|Rส„”ƒlคdฺํ]+๚[„5Dข๋’!ฝฯฒa–‚ณGrDบดU‘Zฅ~๋สึwป‘ี๎๑ >ฒrธAŠKŽนฟRNบอc!ื“)ึ)+R, ๒3”’กSขำ๒e~X๑R)ฉบ๘ค”“๛ฟ่g๑ศส#!…GŸg…‘š‘yฤa pŸAฆO!‚Qc๔ทŠoฉิ@jt้+_œ$ฆR“rิ@jb/5อIjฎห $Rณศฬภ๓AธXŽ…ิ@jโ,5Ij๔‰Lพ H คR“้uฝ6/8Hน†ฝึT็แSH คRSฒR“ำฝgพ™ฃ%m‹nญ‰wxะ2โr๑ุe<>e™ผ„CทQฤj~lคRฉฉRำํ๊ผ@0ฃpิ@jโ,5บ๕๔]ฝ“R“‘ธTLทบฤJ"—8œx+1NŽnอ๘\๎ฉญฮูŽkฬŒพH คฆœKM3’š+๓ิ@j 5šดIอฌU฿9I5ัm&1€xš8*เqำK3J:q คRฉ)฿RS“คฆ; LE‘ธฟ‚ _›‚” JฃืทHMvค (ฐŠ ้ ˆnl~|่ฯNd#%ฅ|)ป"]WKS˜ิX้2]พ”ตXศ”๋oทํด"eัG™:mฐ^O}$\ล! *้*ฉ J้ฐา๋CไS*•ฎด|)IฤOฏ็ฃฅรuพlฝo”ลถF๛„W๚L%€dwํ้ฝถโ['ฉl‹nํxІzฤBb1‡xุ—ืน“&ž๓ 1ดผKMFn|’bะุ้R:tzญllfฌฺฯ็ษU}-[นฟl” ฒaปc๙แRpยคF6ฺ2-ู ,สZ,dสuG&Yศโฒ่ฃAฆNห"›‡ผu…ซ8คAฎ'‹Cjพ{นNซ–ากล%Ÿ—Pn\|‚OXZพค „ฌ'%CŸ๒‘็Kุน&<๚ฑTH;’šGๆnpฉ‰F๋ฎu}yp4H!‘ แ…5}ž_ำำ'LjtC$ฅF9tI.p”šž@6๖ฒ็ั๏]f!Sงe๚ตแ€ืฏ๗‘… ‰ข ™.ญSขuH‰,)—kA“๏KหงN‡w!%Uo-•)'a๋ษsGŸ๒‘็‹มu.้s.ŒTฟ๛ตHj๚\Hฅf5_พN๔฿[ฝพฉ์pgโโRขKบƒคRgฉiนปwœ๕N 5H ค&ชิ์s๔@โP&!ฌๆS#H คR“ฉiCRs๏'๋@j 5HM$ฉiLRs๎๔@โ 5๋Dํง[ˆ๘:H คR“ฉiน›wวGk@j 5HMTฉ้{๖๔@bS&ม\3™โ๏!ฤtH คR“ฉษฬ้ๆMŸฝฺ คRฉิD•š}ฯœHœคf^ภฒนH ค&=RำชS7oโ+@j 5HM$ฉi”้ํ7lz q’“W~#ืŒiห#™_๐ผฤงœฮตฬิ˜cu$V๑ ฺVฏ6-๒5 Aศๆปผ+|พ:ยBฎ&.a)ฮโ–ั€‡bดRUบฑDหะื|ๆฃำฑญ‚™R,t ผK๔๛ ซhจฺพ%2^ฅร‡ŠJศ6ย>'WมI}พH9‘็Kะ9“@ฏ็œ‚H%€ด์ิีป๕ญ/œTdฉ)ฮ8าพ[-ฟ1๘ื๊^Nd๓ฺฺ.oฌํ์#ื +Bจ‘B–b,๐ฐBŒ2๕ุ ำ%ZŒŽ{™ฆm3ฅธ\>uฑฮ[—.r฿ตธHฑะ!ฅX่ใ่’ข/ทง?'๙yJIีฒ+ๅ$์|™ตฎ“…\ฯ%8‘๊wฟ6Iอ˜Hœคฦอ4ฬํQ ๓ฬคuฤํfพ›ˆ)ฤH^>2Q๘ Rฉิ8ๆWษ๎๊ymน“ .5ลG 5HMฆwภ้ำ‰ิS`ชล3–๎ฯน๊ผ<ร ฉิ@jดุ่ี๙า2'qนTิ8ฉิฤ^jfzž:-8๕ิผCผญ‰๘ส<ฯNQŠ|›Zgซในร…ต2ีƒิ@jb+5อ;ๆzืพฐิIE—šโŠ#MZVƒิ@j 5งL $NRณเ`"ฯtฆ”ฐu‹ŒะSฉิfrฝฯ.vฃžš"ล๔ิ@jb/5 2ฝƒNžHฌ/?ัํฝB<็&โ\~‚ิ@jR“šฆํsฝ‹นะIœฒŸŠG 5HMฆw๐‰S‰@แMˆAQพlภ๗k๏วSีฟ{}zถn๎7฿฿~•OXC๔อไK}๔zฒaKฉ ฅ#U8_มลˆยณ๛ล;„4๖RVคเ~[๖Ž|ŽnTรารjนž”}ฌไ1ี้าฎBZ0]Ÿ‹Fo฿%ท_MธุbำุแN6฿rQ ๒ผ2ศsNž›†ฐใ˜Jiาพ‹wมS œT๐ยลG:vซ้7/ฏํ๊ฃ"ู๐ผป.B6RR„tแAู๊ฦX6ช:=X6ฬRtšฒl๘'/์ฤีะคจœ๐มล2ๅZ?/j๚qXC-ืsฅป\ a…]…$ตœHt:v˜ฮ^ืม็ำ/๚|ฒพล‡๋ณœธฮ+ynยŽcช฿:$5N˜HœคFฮ,lา'฿ ๚Ex^ฮ–ZL,%ฦ๐๒ฦฤ,ึฌ(™THMœฅฆqV๏ฌ๓œTpฉ)ถ8ฉิฤ^j๊ท๒9nJ q’šหชงsG!5š8KMฃvฝำิ &฿‹คRฉiๅzฬ”@โ$5๓ฃ,ƒิ@j 5%#5 IjN}xŽH คRฉ‰$5๕HjŽžH ZถเŒงๅDoขำŸXฉิ@jา#5๕v๖Žฟc'H คRUj<98HอYœ>นƒO๐qคRฉIิิk“ใ น็C'H คRUj๚œHœ.?\ฺ;ฺฃUSฟั “ฤี๘่"„ฎถ@ฉัฉษ)ส‘O€DH“o;โตญดpBญ”ฅS“eฎ๊ยPP๊ณ —hด0„ฅ_ปฤeร่s-ึ]wฆ~ฬ%8๚œ “gื๛4ค*5๏๚ภ ค&บW๗ู(i\ A60ฎV7ža))a…ๅc:ู%Fz๒uuZ๘yŸๅฃSฐฅ๔ษ]ซย wƒ– Wบt˜6ๅZžZV~™้ณlCKน –นฐc๖>S–šบ$5&‡žšaีฤUH คR“ฉฉ:ว;โ๖ูN 5H ค&R,!ฉ9ˆ‰ฤAj.“]iฦ@j 5š๔HMฬNay๏:ิ@j 5šHRSง•wD ฤ้๒ำมQ–Aj 5š’“šCงฝใRฉิ@jขIMK๏ˆรฦ‚”nH คR“ฉฉช“w๐คท@j 5HMTฉ9๒[‰รๅงy<อF5žf,ฑRฉิคIjZv๒–H คRฉ‰$5ต[zGtK qšรxฬืj<›์t๎h๗ฦfใ˜๓}\iทWKฮ&5aล.‚cฉ฿ืz)l”ฝ– )Jฒxฆ~/ฒa–วTฃซkฝ0)ะ๛่QFุ>†ํห—#ฯ๖‘ฃ‘๋ไ6ไถu๊ท|/ฎ"˜Aค@jตฬ๖๖๗†“‚ถEทึ<ƒ™sjqนzˆ4wšˆeฃˆี\8rPEš๎Uฦf—ญ}ยาnuฃ"ำธe๚o˜ิhคเ„ฅ{‡btญงื +ุ({-RštM๙^dร,ฉFKl์ๅzaR ๗ั%ขน Mุ>หXผก•…™2|๔zraวDพ-W}>Fjps qบิถดwRฉ‰ติdd{}วพ๎$‚ิ˜*ึ}๘~]b%‘+„็uโห„ิ˜วLoฌ)‡BdkˆสH คฆHอใ‰“ิ4ๅŠธฏo'€ิ@j 5้“š>7พ๊$ี`Dท™ฤพ ั“X/คฦ๔าŒ๋้9Rฉิ”oฉฉW+รุwl q’S•๛<๎บ6—ค"&Cj 5š๔HMอฝžฃ^v’สถ่ึŽุ@ิ#Ž#n็ๅRj๎LฬSล?H …ิ@j 5@j๚Œ $NR3_,–ฝฉิ@jา'5=ฎู ษ\มpวwนŽ๙>›2'D-bQ?@j๎ š“!5HM9—šš$5ฝฦ'ฉ๙DtAŽ‹[ฎิ@j 5้‘šอ;z]ฏ~ัI”mัญ*‡ฏโฟป฿ฑฬ~ใœธฉิT\ฉิใฦ@โ$5ว˜_sD7ฮ 0ฟ๔ŽMซิd4 ldร †ฅtkษq OX!ฦ0๙‰JX1GนOzๅ{[=โ4 y ไ๖ด\ษ๗ข๔U—œโฃsIยฺซ‡9ั!ำ2ไJน#์ต%kฎ8B7ฝMื>irฅw๋4wโ’ิ4๋เuพbฆ“…+ท…ฌ#{jบชยk+ย@แฮซ k๔ยRบeaA);Zxt!F๙X˜DEp”ษ}ิ้อ๒}ญุยBzRšคศษ†ฐaS ŸจRฐrฃ|Lo_>ฆeH"ื+.ไ>สใฆืs Ž–WzทกXฅฆF oPท‰ิ8เึ L#ฅ[#โMb฿RฉิธฉNR“3โy'คฆงl/&2Gปค†อYO&ฅ{HฉฉbŒ#H ค†ค&๗†@โ.5"ฌ˜FJL!F๒๒‘QCj 5ฑ–šฆผŽ?๋ค"OพWœqRฉิด๐wHฅfc!žณ'”๙eˆ€๕คRฉqSญI{ฏ…ฯ8‰ำŒยE‰#HM์ฅฆzsop๖uG)ํ‰PธุโคR{ฉฉJRำ๚๒@ šฤ“noK8พฌLไYาม(0”hLฬโTL๓#H คR"5 xงํค‚KMฑลH คRCRำ๊ฒ@ GสาDžๅ&ฐAj 5q–š* Z{อOบอIœฒŸŠคR{ฉฉาิโโ@Š+Ž”ๆDžH คฆ N่ณ๐”มR~–œqด…l์รj‰–+นZj๔บ ยึำโ"ื {ฬ๕Zz›a๒ตf*E7S’šz-ฝ†ƒฦ9ิDฏ$2a้ภQ…G ƒNหuฅ่†แ*จทตHฃnTeรๆ–?nnํ๓฿ฏณ-~บƒ”Ÿ_ตณ)a ตFŠหฮฏฺ๘„Iอๆˆhq‘๛๖X˜lษ๕ึltฃำฝuŠz‚ฐ"›๚ณNYj*7๑5GโโH๙Œ%ˆ#\j๘ร@ิ#ถฉวถ๒wรฤ๒‰ก%ตj๙‹‰ืๆuv ˆ๗ˆCJ๘xŒๅnZำ5๙Pข‹ดดŽ•s)”ไ๑0ฟŽธuง๘๕R*็Gะพ”ๆ9GGส_,A‰ิ˜๎8bqR'ฺ]'ฺษ%ตb๙hพฮY‰6]’๙>kฃ^ ๆEุ›oR){ˆซล฿%zqLบฯ๖%ญ็@A)ฟฑqคโKM?ยใkผ~šก้n#fqš,๙!r๗6์!%ผซนปฯJง3œŽทˆz[ย๛๑8ฑ„—ฟ ‚SฺŽ?๖q‘ZฟคŽGพ–l๖cฉศ’H๋๙Qภพค๕ˆ#ˆ#ๅ7– Ž`๒=H 5H คHM80{ํตณ„ท O‹mธธฯ๏Oผ”โ๚?šืuพo*ภŒฌ_Xฎ`Dิเiณณ9<-‚ัQšพฤฺฃ“ˆ79@ด4ีiM0โ‚s%jŽะํ4Qฝwiขป•n“T0ฺ”˜Rต?ฤpโF^njจฬๅ:*W›)ศEภช๋๘Lšpะชอ_/ฆ—ำ™?ž˜>œn_™ืJ์ ~aฤฤฤHM์ƒ๊›๋ฅโ๏ฦ๒E0:˜๏77u;๘๓ฤแโ9๓‹Œฬฏปูb๙q"อๅ@‘จbฎ๋v) Fœ+~–ƒ‘ šลถLm˜78จ|)ึ๏ก‚ัรโฑภ!žแภ”XพŽ1สตNL่๒™˜Bo[ฤ๓อ/ง๙ฑ“๙์๕”(J๗ฟ๎0๑k ม Ž Ž Ž@jŒB‚Q_๑+`=฿ŸYˆ`ิOยฃ"ฝ็Fฆ{7'•kแŒฮ FMทlภ๓Œ๎๎E r์Ÿ๙•w“3๋K<ฐ๚–h-‚อX๑‹ํpโ/\ˆฎ ‚@AAิ ๘•Q'_ Fื๗๐\โื€`ิX}ษ[๓cี9ฎฦๆ—J^๏ ีm|งธ;B0:‰ฏsWๆkั[9Uใ_;๒zฆนซ่6>@ผฆ+๎w›_U๙๏N610nW$บใCS>ลXƒN๋ฯฃšๆW๏ง๙|๖6ม^ผoy]ำM=ม Ž Ž Ž@jโŒฏ๏&ธJ ๐{^ ๐ Fตนหาฌ(๒ศ–มˆ๏ƒฟ@S๙๏)kเ%ๅ4ภo’FๆKx:Y”Pภฟ็™กb ใlพฆฬผ|1ภo"๑ก#๎‡ b๙;pฯโฟอ`ภ๗อ๕๑ฯๅ1@ะp/ฟ•ƒ่[ฆ ›ƒQU>V‰ื)‚เb ๐ˆ#ˆ#ˆ#= ™_/5D7ณ @ีJqN…—Šธ:โHโ๖ ๔Y!ฤฤฤH 9ม๋๒`ทEl๔CJq_โ`๘JถqšH—|9‘ูP>ง+๙ํœทqqqR@qˆ9";!Awโ€ิRฉ€ิ@j#๓dสE]ฒ"ผIENDฎB`‚xarray-2025.12.0/doc/_static/view-docs.png000066400000000000000000020261231511464676000201520ustar00rootroot00000000000000‰PNG  IHDR–฿šล'sBITแOเtEXtSoftwaregnome-screenshot๏ฟ>-tEXtCreation TimeThu 09 Mar 2023 11:58:50 AM ESTฯ็Xz+ญIDATมAฎEY%E๗u^dT Œ˜ส๘w;f?๏๗žv฿ž5cA A€WQ#แ๐a[FE $\๛็๚หoฑฟ<๚ฮฟ—„ 6ํ/ ๕}๗มฉ๔๒"Rs]šœ '_'ภL!ฉกz๘G์žจ„qโฆR๗•„dึ๋už`๓่ฮฎ7|*ใN?v๕ฎGม!ชZyึ็}/ฯœw|ิnิ-Dฦ 8F๛ย๏W๏๎}o_?“ย4g๓/ฌ™ต6vIใ’ยปE๓็฿;ธ){ฅ๗3๏ผ ฐพฯ ๑9๗ลว็แว'~จkc๏_j5ห๏๛็๏Ig'ฅ‹‡gi๑]“7๛พฟœ>ARฒ๏ใbศE๔Hภน”0RฯwZ:฿ฟ็‘#;~w-`F_ผ ซ๙ฎปnJ๓ปœนยาƒ‚HT'I$๒้แ฿๎ู$๑Xœ‚:…ฎปšœ/m7ๆษล]ย{:?ๅฟ\๗J๙N”a`์ึื๛:žŽVฆq”L 7๘๙มฝ“ฟ๑G๗>ษ๗,<ษู š-Œวห"ฯ›ษ๒w ํนใ๗Oศ๕H>b*ฃฮ๏,žnเฟkษGQxฉส–ฃธฯx๚นdฑ๏ต n]RฏY์๛ฮฑมu๑ฑ#)ปเ๘iษภŠ#แŽ3ฒขŽฺ;ญƒ์}ํ๕ฬN/นึryp€ฬี๗Ÿ ƒๅEฟ฿”&''~L ๊‰@q ั/?c๗ณœeแ๑8ฎpั/w–'ฌ๑ม}›๛๐aศๅEฏีธฦƒ\sุ๗฿_มx๓w^หmQด@๑มึพฏ๛wŸ๏ฯไžมMš7๏ะFฦKy|"ง~฿๓}ด๗฿w^๊˜qทฃฑง์ซsผ?‹/ƒ9oฯ ฯkค—ฟๅีปYทN ๏๛1! ‹ฟ๏๙gtw”“'Xwƒฑ‚WPA!ฑ๒ŠH๊ํ?_๐ ๖ๅ_~ท Ž&˜ภมษบ!ี7ญ_\(}๔$ •*H,ย|๓๏๖Q๛&“twฤAŠ:๚พ‚๘eท ก'_t “R๐น/~w3"LัUไ?็๋ผoˆ๓‡;3@็*Iธภž๙฿ปื๗๛“ทw=R€“œอช'vk"R๘ytYม๕ ๙๓o7eไป฿(ฟOะ7จ>/‚ฯw๕ฒุวŸ๐eจฎ๏ฃึอƒ}พ๓~ŸถVูงต๚ต%ๅ]ส{๖๗}ฉ่%๗1A+"้มˆK1๎n0B๑ปซผ/ฟ๒5ฮหwแC๑่Bีmฟซ:@ญแ_MำาNƒฦ่J ' œC๔฿ธ3Eฅvœภtบ๎jd/=/š}y—ฐฑ'ม็ใ‚ำ/ฉ฿ฦwฆL‰โๅ?วq๏ยวฯํ๘๊ณํ๘ AU„#?ธw๒็รฯฯ๗™}ฯภเ6ฺ.F3ฏว/ 8’ไ๏V๒>ฟใหว๕H๘ฮ˜สV~8ไ๗Xร}~งS\ฤ๗ใ”๓๗ู๊N๚ฦ%ตz(dอ๏/dบ๎ป>๖ฬšeE๑ \ล@8xฌ™Tะ Gm็ซoฑ/ึ3๏%a‚M๛ tZC}๗ฏbฃฌ|ปค4990‘๋™ฒบใ!๎เฺGN๙?œชiตAx\Pย]๏[ D v2ฎK?์ย<ฒkถณŸpRี)›๛a$ไเฒ€โ4ํ;ชผ๎8DศŠ2T1๎vใ แNศ\ฒ„ "ฬ3–Šฤbย Šเฮ= "Ma zœJ#๘^#)J9AVI8ขป๚xฅL๏๐LIฃP๑๐Cภพ•ฤ‰ํ„ž>๏!฿้tเยl;AO ฯbุ™cT฿AšขD{งืมอ฿!e8‚ร“แมษภe%]^dr์๋Y$™p๘I@%@iแ•ตแ[*'|ช`žš…ๆZEีกz~๏ฦ&Ž่>๏งมQ tcƒฦqLุhR‹Cฌn๐‰`’0ฆ็ŽNTV๐„อ” @ุ™8เปฟ8 นีXธuF„€@ƒำยัUWŸฯฆb…์`‚ค|Xสy‡†๖Vวู…ถโ_ฟSEX๙ฃด&9-hT๓ำSศk>U b›*;Jทฌบ†…ฅ)ข@ะ๕>x ”%<๙’/wท"€ a฿;วN{PPˆ—‡v ี “ˆฮๅ๐จ้ˆฬแฅ็‚๎=ุLŠรdP!ำฮC`Aผฐ„ฦะIม& ฒๆฤINฌโณJJ"ะ(Dƒ ?tšp๒”“^‚ศ!K=;…ม‹์ธฎ;฿1yฮ€ Dอkœ่‡ฏ ๊หo๙Gณ‘ ไ฿!b๘A0บ†{KN3OBHผ<†๖ซ’ON1ป‰@๔•คK9$Vvq’žV†๐ฃ€Vฯษ.พ}ฃ ng,““€Š…๐๒j;ฺ4จ่‡ภ ว๑๐Žฒ+า ๒„ง๚๛ “(`ฉdg0qœHr ถu @I„๑Q2eฤ้ €’œEH3ๅ>„AQ2๔Hก๕ลนภ{h"“„ˆ๑ู '@w็‹ษSมปฤจ@Ix็‚ฤ]๛งเŽ“/v๐์tv8ผ8.Pีต"&ฅˆ@-D“๎/ศRIMƒG฿$า ?”ไเdxz~ แ‘๖5๏’ฮ๕H$`๑X3 "€$>Šฺฑท~มQdsF€4NสวT๏ืw์‰๊๚>OTโย‚L:?6ุ@’}…bˆล ฎโ †š]*คซกBายŠŠๆi 1zส@rร|h%*[@hเู`u๓}€QuWœO)H%…‚ะ•ฅwZฑpํE|๋> ญp้ฐƒƒBธsIาJ๓4โๅ5uา]• ฐ!ž xWรcฆ|p"R`เht‹๙hีบ๏%Iœ9|€h@qBํฺณ๗0ฉฃ้"F ~ฎ\ึ฿฿๎?่ฤ.›~t!`ขxŸŸLฅp๒์ึ~p๘Ww฿ˆูmทห๔๋๏:Tวธƒฮฏ,ุDฎ๚ฒถ“(ไ๎ษxุ7พ(ƒขๅฑค๔ฦ8๚๚ธๆกY/bฤุฟ^ƒ๓X๗Ÿ`?ne–‚q์“l๙4ฝ๎<–เEฒ|ี. ฬธิษทฎc๑๘ฐO‰FrมM๙j๋คผŸ๛์“ ๋/มต๛๚b|ะ\OxGt๔ลถืึใจŽJหภ๎)"wqวฤK`cๅqังk4๘๐ƒ/'@ˆณz ฦฒัฐCœพนE‘ฆมR์ขœ๚๛ิ๎๋{Mzn์c๎๊(๑๛ฝ ๆEตMXม|ว๎ƒ'{^wƒA๓›ธ๑์ัWJ฿ภ{พb&qxัœ#ฏ}ิn+ฯฟŸ)a—๔GูhŠG๘acธิ฿Uฦkยรฑ๚~Gะโั๗ใณ๖๙่ฌ๑!Ÿ;Yฅฉmะำ๚ะฯ๎๓J ็_๛v|‡k๋Ž/9zฯไ ฮพ๎ฃั{๚่>๘ำฝ/@ฺ๔บ่วย h/Mซ“2Gยๅ`ๆฯ‹ ศษท;]฿๗;ี˜เC:ผ,>ยรT ?*ภ_๗;v๛Z1พŸ—bฃกฟ}๖ฅ*ฮ›Eธเ่:๖6พ่๏ค[สž่ี๋SVB๒๗ฯ๛›ท›4Nฐร"าฝ๚๗gศญ?โ”`_œุฏํ๘ฎH"ฟธcฑแึ็ทพว๙๙~ถ๎‹oaPุล๓ฌ‹;žpธFาฝ ว๊มๆ‹g{\ศ9ˆป! u—๙๛๗ ช๊ห2M v้M๎ขยGฏ{ถ๛~—ใŸใ{ทzpxqw#&ง๗™aš฿U:ื(ธ๎Xาฆrีล๑๖ฏ%|ฯ›อ฿c?‹ %ํl+ธ๊z^ฺขlำJเ‘px…`?nd๋ŒŸ$ƒ5๔ปzวR(็ผ๐B€๛U๒=ŽUv๚Gฟฟ ไFd฿ศ฿m%๖๏ไ์ไ iท๒๏นnw]ธ๔ฎแ๑ฤัผ๋ธ_์w๏๑Žปฒ^3เ2€ฆ€‰วD๐˜Yqฅk๐๐ฃ?๘kฟš๔< JUพu๐๘๛ซ๐ิ–โœ*แ}สKฮ‹ริ4ญŽR๛ง~วโ{^฿ฌแ|_รฟ๘๎ำ;ฃฎ;ั9/่เ€n .jcaศข‰’รถึ๗m‡๛์_ ๒/nฺ?อ๘๏w %ธ;๗wo6๊์๛็ฎแฝ๛๙yแทสA)ŠD~ภwl,<@า0’4ถ{๔/|์ีฬqyธ•?๘์๘Œ๛:^ช L๒ฉ*่ว˜QคขhีEฤ~}ฟcGปK๙๛Qำ1œ๛๋ใmrณ †WL]Qrฝัท„ ๑ฃฟฏqฺŠ€พ฿>“Ÿ๘;ะ"l็i๓F|t–๑Nพฅ_ฃq่้u๙ อnเ๗๓ร“ึป=vภ๛?ฏO€๐ด!7ฦq”ทokสสoใฃo€ืŽ‹ี๖i/ฟซืๆํ> hˆฯ๛ีDฟ๊๛$^@๛z{{บฅ\๑๓ร `n{ขŽG่นD3๎+›,ฌโvp๏4qoƒ\Nfฯ['ฝ9bTป^ง|์x@ฦฮ‚ธvญไmcศฝ{๏aึW†ฐRรฮ๏ก1›๋\x์ๆศ›8ez*พD.๖ลษํฺŽ…Euูv3Dz๐๋ยฝf;ธษ๒'„ฑ๎y์hไฬ๕{ข}ัื“฿ฯน0Hฤ“D[ศ๚บq†นถฟ๕/ฝ้ล>v^œฐ„ ธ๘zธ๋ั๙‚ฟพ}ส‚:rฬอg๋ผdn>G๓๐ƒ๋ผ‹(0|sฯ๖า>VฯฟP็๖ะใŽล“?:“:ฎฉผแG๙ํุ'lมlผ฿G7™h=๘ เเ?๖ocงโ2ธe›h4ไฝoFใๆูฏ฿งˆึบื=’š#/ นเ๑์7Uุtฌํฅ$Aพ๓(kžหvuœ€€m‡aฌGภชธ|๏?i๚รมฏ็::lฯ๗ิqv_^ฟ็pA’„MhbwJด๑?๒๊ฅ฿ผ_Ÿ‹Œ4(ฎฝ{จ\W]๏{ภyฤุOฦy€s๓ฑ฿‰้_Qz๒Ÿ]˜s{cžCเฺ๕เAท7฿qŸ†ž?ฝ GMU\๛เ<๋žs*ฆ =๛yฦ฿฿ๆำฟ~ป/•ผ?ฆ‰zต(œoyMn๛ฤko€๐;Rข{wฏฦ๐็cdPุ™ในืO๖ฆj๖#S`วป่ชฺ7y๙]ผฆ๚dด๛จ•อว~งž~๘]\rโCฯน=ูr๙๓/ภนm๗๏๙ฯ๏ฑ*TdTŒ(TH'ฒ“โHค์oธ^๑ B*+โ xฅ„|žwEกL6๓ฺ?ธ๋nปเ๘บ“๑ย๐tฒL0’K.I€U_คIPึvบฯ”(D๐‰w‹ภpบ |vคฒFโYXป„๏โพ9˜฿ป†้2`[ƒเD[rC๒พ2dŠˆยฮ>‘ค๙b๚m^@๊๒q|#}ะฅA@x๗nน›ึศณAxโด#๊"ข5รจ' b0š–Eจˆ00 EbSYิ๚ไ™๗มฦ0,+:Yiหƒ๊๓ุ˜๖ุvxUศกดฮแq™ ํX†Š(cFึ,Bม’ ยธำฐ$า>œ(qTํ+ ฆแบฃ8 @^.๋เ8ค’ s 7yy‚†ฐŠะ88Q{œฌฏ๋&|R „!,๘ -(‚`โผฐ.p‚@ิ)ญ ๊x่ฝƒ b42หz@|@  ถฃ‚Š๔๖ฎซธ๔ฤง๑ƒ'ฤe*k!่เqแ1Q“" %๘ณQMPUŒ*‚ธ[ฏ 0J?ž๋ฅหัuX -ไร—{๊ผ$ฟงจ pLŽ )ก‘๏บ2 @ฮฏY˜@ษแฦYU~1.Tu~|G‘k@I.udeP&@๐?แก0”]ฺอd’ุ๙ลBED]๘ษ!Lม$<‚}c1 „๑พwv‡—’oสYpฏwxUŒ[ะ`  œฮษ๙i•h๎เบ7‚b1™ุฎT€„…D๘๑@L‚"ํ“?4Tห๎3b2iวAp๊\ฏ-™ŸRt‘–†p‘฿|}ืŒBr|a'4€Gฐฦ]eนS?UขPโ#๎แ‚ภอฐw\ม$Irฤ@ภซ๗ฉฟผ7 ".ดCบำ;,ƒ‚่ณH๒ 8๕•Bˆเ3อ ๎Y‡ฉจ8๔์‘$ŠŸร๐Š็Z‹™ ็รCU‘จXQPpํ”ฒภค๗ม"ส@ Oง#ฎบŒศoq]Œqา๔)|0 jS ษ UIˆ๔$ ๖เ†๖u๕ฆXR‘šุY=ข •ฅเL€ฎ‹Hc=>xu4ฅ‚š‚`ชวIXPใๆ๏่HJเาS€ ;Pผ*vwn†๙ขD,v7Hฎ๓ ([ญ.O;‡cW$_$’โฤ{`0ลผู!้dcgฤA.ธA€ฒแ3/:L)‚2€;ำค#ษ$้ฤ#0ฆๅ{—ยูฌี฿๑[r‚_อˆ‹@–ฐ€(>ฒ4‚Bาžlqภู‡ำ2ชŒ๚`V\Gแ1ู็y~[๚€ข๋@RDา‡๘๏HZTฌ†lศภ „0ฒK.‰ะU-ˆ_-C•$Y …€!p+DPยร@NKPT(W %คp};”#H๎๔„ ขgว˜x#$9Q ธLAŠSๆ“ภ_๕D‘‚p]w.Gศ;ืธ๐hฆ)จ„ล‰ข|DDลก‰๊ฯุ|"hฐึŒม๐โ /ม€X๕พzpฑœ๚ มƒK22ƒในฃ#"ซ"—qก€4„* @ฎ,ห(ˆคMๅ๊v‚ลˆ`ยวŒ‘fyT}Šษบ$Qi ,=cŠ„’†ก‡ง9ƒ‚‘$ z?ฟaซะƒ;ห•”u@ะ฿N?%้-wฒRฑ“ถ TฮvPๅ&\‡€%1บยe…แ™ห —ศƒ˜|cuัhr€.ุa1ไบำLXF!A1$*๘3ห!W|0‚DN๔ˆzgจาa฿<ิ’ึ(D˜†ื ปRคCpPวฅฟ #ไน!านโEะI‰,มQI )>มภ‡ัธ0 ั๓“ ไ€"D๛ฆ๊์ะเE‡1€dฏ ่<๐ƒไ@E"ค‚&q@“ร /q“ข3\(:ีa<ศไhKI$ ƒ‘wX Hฺ=€รKL&‡7$กี๗-L4Z}์B:ธ๘r‰ ๕่pm e‹โค๎rJภ!`)*(A˜^†ดใ Qˆ๋๛\Iป;๓IkH๋$‚wE a๛T"iข" `„ขยS ”4 เ,๘R0;_E(A`ฅ>t๊€๎ไ๘zV^ยๆ}trz8ฒhD‡iph'/‰ DกD'ธ@๎ฤMเ!„ง๎๔l ‡มษ๔*จ$ธGyœ๊I*˜ถื DแŽEฦฒฮ .พ\‚เQ๑๐ƒUทณHŠ หค 4?/ด“&L ฏQPAชั‰ลก‰…1/`ท\Dฏq:'Mค’† ˜J€@H4ฝมB…รCหBมCN1า$ฎ>KK,ืทoส'%wVฦหH้นท7ฯR:ยICNๅ )ผ.ภ฿ษb๘ต-๎ฤโ•6๖=iำฑk๎ฦ†^yฒ ัuึจ๛0HฆS๋๑w๎ "<ˆึ๐‹H*Luoฃร0ึz4#€ %ฏ“ƒ๒Ž›zูปj”(Œ=ๅ๔ฑฃ๊—4โ<ฏe๔‘‚U ๗Y„ลkP x๊็Aค%ืฦƒV…รจ?yx€ูlิ‹Ÿšฃ@5น๊:œsึ๕EŸfะR}?zD'’ฃรXb`}ท Aะ 2ผ^uqgPญผ ท}&e๗‚ฉ์Cุ 4"e3“ฃฝ{Œ—>hˆรšฦ๗๔กํ;๒ ๘_ขƒ;; V”ุ่บ("<8ู f›ๅฺา™`˜dp?น#HC+. โฮม;้฿…“@€rxBm๊}WฺะC:/\€‚Y€ุ็ƒ+๎8*จc๋zๅ@Mrแอstˆ,ผyyIษ ุ~๙7์ขxwา‡†&8ค๎{๐ฐถเา'ฟ› ่ฝงVธ๎?.-„ƒ#Qพิ )J6ฺn>j3J#๎•K๋ไ”ธO๒€ป>‰ฯDย,ํ๘šlH aะ™๘‘b’ผq–,Nˆ4ืว‘+$แล7ค e๙unด๖>ป’P=๏s๚เฎฎ้\[ฝ๓0‰ x9œ่A๗"วแ1ฌV‚&ช˜)|TuœGๅEธJS๔้Vว SNม๒๐ใ,‡่3บ้qภw รงฒา>z~_za~ฯุศ๊Y}๊งึใฮ†ไษAด6yR0„๙พฐ ยX๋ธฏ€'งเ๊xr`^"ูy๎๋>@AƒแะผเฑƒO]zทw๔# ลˆ ุ%z $€คIzp‡^RN ptลฆ„€์มQช๎ฦล'F๏ภ้“ฃฎƒ9fี{pฉdิ๒๗y?นซžH|ท บH^Š*j‰ฌŸบ(Ž] ^E •$v*”]‡:๗ยX!ษฎกุ‘ปปh๚ลษ)*Ÿ๋vฦยพ#ˆKS๖๑wg'ั:1K?7จ๗,\„gฯ–ถDโ‚DิwำRˆกษ‚(า๐j]œพš`ษ๏.•B€ž๒ุ mบ5[=ม }…จฃิV•ไCจRโะ‘vF๎ŽO+Pร/“ท4ุ้ป๙ษCš้•OพNPโ8ไD2„Tภ๎๑NBy|ท@Tนถ0L“ฃQ$ทtšœ—7๒ ฿} 8ผP ๘Sฦ”'_ญ๔PjคเะฤiEฤs–!`๑ur€สภ8ผUูผลQศCVXP"ˆ`Mพะฒx„ ˜d;# ภฒ>#ษม˜ข$‰2A$Oo‚œœลq{˜ŽPPOช" ž DภS2py2C๚’๘l“(มฤ&_ฝ>๏ใ๓:H!;ม”Q !ฤห[‡฿'RัH9 ฑy ฐqz—ธฦ}˜\$ เmw$๘šRๅAภษลsญfaยXศ8ำ่ฐย9Bภ2จ'ฤย0qษภ2 >ฦ๓ ฝ‘&ี])€QL4D2ษ„/Fฌใ่y8 †ษ A๙‘ฦง’ B‰๘›pžด8ว์ไAFา๚>i##‚"ŠvJh’x‡{@H$  t]ฬ&c‹%ฐ"EฐCD‹ใZ#ฮ››WHศi๑ใ• }Hnร"a๘,ˆBs:ุ5…a„ซ'ˆ(฿๕มIา\7Š;95€ , ล8 ™dœภธญ‹• โa;k}šE”ภ€Fr0า) o๙– 5๓™` Xม:1ณ•รฑ… Ÿ(”7ลx คƒi šT”@๔ผ ;1ศ%—เ‡3|Qa๛รกฒษwฃษว:โ๒gq(„Lข•ขฑบถ|‘|ˆl0 ฏy็7aฆย>๋a%,/ห`>ผภุ;ฆx„@‰ขk%$รีญ๐p\# ขๅwภฑCT!ภ, คoE˜aชฬ%ทe๗0x€e #  a0่ป15‘ท๖ฐ(M6ฆrpŠ€๖ฅLภร๛โ&ศ้Yถ˜ €&}$•a๔hฒ^|™ ‰Kล•๑“o—!Re?\\ศอ๛gpˆศใ!@F$ฆ‡p-ซ๓เ๗ F @๒Bสo&/‰ 4n˜$€Q่ํ๗N _ำv‘ qpข-ืฎูiโด€์BLสšงิฆ€…ฑ มๆ0มސ๘vzง‰Nx฿3ฝัฬU]7!)VW@$oŽฐ๓๊ƒ†ๅ™@rkภอ๑a4Kฟ gpย๒}vpใˆเF ใQrIยีu ` ูศกข‡ส7>ํHP@ FTi] ด(` —d นฐjAฦ๒m’&฿ฮgช iวD 1‰‰e'แ~wX| GOัf๊Is œ%1๖ยnคAฒแห ‚ƒล@ภD`๑qp ำ p—ญ$: $$ข@‰4L ?@๔‡\~dpFฏ|sr#z7‚๋์†1U‰ ม๑qฌg†‡O†ึ๘}!ณq,๛ฮwŠปศฟ‘แzื…ฒ0koGฌ Zซ Q>๑PถX๔yrIQ0C@1<„ำ็tใikM*9  S‰z๘Sฉ หล๖5›d`ั๑iŠd!๙|[nOk Eˆฮ€1๛QYก˜๙พƒ•t/„ธwI`uw_  (ธ๏็Ÿพ0„์ฮ๒8‘ฐ4$|Rมฑ`ๅแฏใ‰\aถม๏}!ค(PWt_?ฑฮรษ‰x,๔j ]],ภฤฮำฬAถB๓ ึ\*f)2 Sl&ป2& +ฏำ้8 l(0ผภ *ฒ}ฐ5ภƒนuX gฟฒ‰^HXrฟo@#8่ธ๚๏{ฟตํž…๑!b่:DfŸญ^ee]_†ย'AQb–‡^@q‚๕J2(— ช~6‹๛~FแhA(Œฐไ”ูั{lด2๒เ๏‹.N7๕ปฏˆ„๊ี:๕โ'ƒ*Oำห˜—9ช= U๊โFŸ็ธพ\๎‘ญfัีลB+c`qอ*H๛้๎& วq= ๒v˜@–CGบร๊Sห"าsจ‘๚% ดซHi1™PAจJี%แzbg’„–„1&}วนน—w˜CD!zK}๖ีลyแ|™0tU•\rj]H`W๏0$ L๗ฯย7{,fว ษqZrสVtA,็ตใร่๏๘ึ๗้ๆํ‚  ;๔ ัQง3๐ฯŸ—ผx์‰ˆ \]ภเ๓ฏ_นงปภทฮ๕^ŒY-XVuฆไ…ก9@๎๛Cะส๓}~D„D๐h!bฌฌไ๙ivDฺ/‰ จŽ6ฆจชCRh=!ฃ$ ๎ฑฉ&Ivžง๗บ{K z่ท๗Iuกb๖็ํX‚ย๊"ˆฤแ^@q’XtŒ8„ภ~“?ฤIv็'rล5k„ิqตc %7ปžIp๐๊ๆ}; ๎โŠ๚grˆjฒร˜w;ภุa cuqWธAiฃํษ๗~aฟ๘S$)z]!Nห๐ฤ)†ž๊ึห.#‰™ผ$งLiE๕ ˆ‡…;O8mkxea ฆ3*ฆuภใ๛Ž$ฯด‰ปSN;9Œoขat^y฿ื‡r˜STๅ,ํณฉgœี}G ~–UE‰พ๔'(ฦ…Vชi8ล}6’‡2๙ป>˜wQ‚‡%ง(ั฿1iกe๑v๕๐•ใ๛ฆฎE$ fว๙๚๒ฟ๘ษ(Š้A์ืxFx˜ˆ€ป„-ๅ#ฝ์๘ฺ*p€ณ2฿u%|@^บ/ƒึชS> ื๗ล๛?š/x5G๖ฝส'ยป^๓ธะ0‹<`ใ3ใ!yw.f๚ี็\qWธใซ…6๖ล(ฏŠ"*€lžว d~›jQ"X"#?…<ค๋ภํ@๙Œ;๒๛H0C„–@Gฐwoฝแฤ๊ร/‹่ฆ๖h๐๕ๆ บ๘™ภE)(2ช (Œ•๔aี฿ฑฃ_tG๘ๅW”Œ๖n๐Ÿ6]ˆBs|๑e”.2<–ป0มNАผแcำŸฦFวใ’ษŠ๛—c3GีŸพNห]’^vํึ‡j๋.b7˜ฝผn=†žEF8>ัึ1๔รํ‹๊JN๑ลOPr "dd^œเCิ~rE€@<ŠS ‚™bzWฬ“ƒ฿ฐซร๔ำ๑d4DbˆE\ฦn๖ธK1:ห‰๐่Hn๒ฏd๐๒๘ศ—w$,่ ‚ฟŸบ ใื๕|}nB!4 ˜ฺIณฒ(๊ 5;่€ญ๕ฟถuึึV4iฎ…๘#’„ "ˆ๒ๅ~ฟžW@Jะภ๕Pm[-8[ิ†fQศสู{ น"N,0t•A. D๘ฆ‹rุuTขึ* $ุคŸ:ฃg<:wน‘YhXร๐%Žะ<บ85”ฤ‹†…]\ฤฮฮ"\ํศิ๛๊v|wY'Y,9รุลboRd๐8hmIr้ภ@๋OHฬ]aณ€ D(/d83ƒp7Wฅ/iƒถj#*€แ๎&4.(c๖ภ๊pฦbL a … ผ๛ ๗8 ๛๊ผจX แˆx!›8ำส ๗ษ<หทŠF ฒtสยed…œ๗mม์ฦช8A„Zณ ฌ&ƒ„ฒ จHaฝ๏:G.ขiQค*iฃ!ข่bฮkpŽ6หraผณ—aไ0 \^ฑอสฦv ฯู—bฯธฬ:d,ไสew=‹ไHฮช่‰`ู@Mฦ@KำŒ3'pc˜VPข{ึdjดq-๗fs™หb''l™ฎl‚:แ๎Oฅูั9,ร ฮ๊%เศสŽMTŽsHwื;โข8œึM6/0อl/QQntวต#ƒs{1ฏืฝ๎;ณ8^฿\ ‚5de˜ aฌv[zฝ\ุ่&จ8oQw-Dut ‹็,\/G…ูฯฃุ๊ฐฎt“wเŒป์ส&ื–+o9ป;=0๎lม6!ƒฏF'Xญg฿‚๖n)ฏAปzงึฒ้ฒ›lSร๒:ฑ.C::y่ Iœษ€GผEซอูฐฐzt˜kqABwฃdูสq‡\];qR€›\uCDŠ.(3ไ์า+d–น;^2lปฆ (ตก;บ‡i’งผ(’+žZŠ<€ปศ`Ž+ บ'3.ณ8ๆk฿ๅvุ9๎œก,'ลัฐณsชŠbไŒdลภŒgwน+(Œ—ฝกœนF…aฝ๎œ‹oฎ@Pีf6Xเ0hำด^ถQœ’ฝ’ฝบ,ห,2(NฮpM\ฌž 9ณฏ)zœฅAe*(H {ZทูdPv็น:ขิ> ุษษdมfๆฬ๋๖ฐW›ณสฑgZ8"j๋EY C/Qบs๓;๕ฏษoฯพใฟ>B !€€€VB จฅR@ R (€(@P(€…€€D €@!AbP@ ะฬ๙ไG>๋Ÿ๙—Ÿ๙_๐ฯn]p3๖JIšํ:$]vsnCฦซๅaWE-{}qŠ๊ว฿๛ฺ๏ฯ๚฿พซิฮ‡?w>ฯ~ใ๏สงJAJqc๘ห๏~๕พ;ฟป฿ษ5เํ~๕_๓๘kฟ๘ฉ๓20๏O๘๓อ?ๆ๛Oพ๛ฝ่'|๐ภ๙๐G?๙ษOโ/๎sฟ้O~โ#4ฐEูw๏ฟ๔;๙หZร๋๗ทํoาฯ~ผ๏}๋๋_๙ƒ?สืฟ๓โ}_ฏ๊7ฟ๐O~๕3Ÿ~฿๋K?}้ป‹ฺฟ๚ย็ึฯ5ฟo|ๅ๗~+_ึโƒ็ผฬ฿๘Gๆ๓๏—>๛‰=r๖๙ัท่ซ฿ฦ7ฟ?๛มŸ๘'ผ฿‡>ฑ}๊>๛ท?๗นฟ๙™Ÿ๛ุGœม๘_ฟ฿๚A็#๙๋ฟ๒O๕oƒO†\๖0sกS๖๚แืพ๘_ว—ฟ๖๏๔โ๙่ว๎ป/ฦ_™ทฃ2 [:แ๎BH๊0ะ2‚šaศ6 pฦฅชvืฮศ3]๏ ญฒ5‚•`P&ฮ๒ฬ$dผฆ<€ๆ(#ษใศ’ำ่ฎฮ[^Ž‘Œ@Oฉ „B]าภถ%ถœcหMึ8ปHAU *gCY.ฐ!มำ=งม๎‹&Š๗ษ#Xภ†GŠmฬi_l,ย‘p vQ•b จท๖a:5ฎXผleฃษ36DPว”fชmWๆผไ€ดvัจิzํua‚Ž{ถลuŽ.”\ฺฝ็x๖ะI˜ูY.$HบA:ญ•Zย 6aa’iQ Cv๔Vmฐžg฿ฝ่1ึ#Hvk6‚Aeก=Cgฐ†ิๅ5l^ศฑฐ‹{†*†™jภyใ}—ซr*afC ท‚ดƒ-สเฐปํ%๕อg9+ขuฆ w ก‹;Hภzf_Ї๖„€d}hL Pฯ๋๘ิ6&9ญ.ั!๕‹ฦ`ธำbŠธs8›^๋pฑ]'ฮ€ J๖จNSตf ณoŒCฑมs”ณ€ฉทถg'ˆT”นMAัถบร™K3M7XHnˆฆ ฒmคณY+90:NHโ<ซ1 Ž,mฤ.gฯL๏ํuจย`ณK&e๘าqn„ƒ#็D4 .งp /ณ38-๎ฑแฮฑ”มฒปอmVE‚แ< 6P$AจMผฝ฿M-{}qดDU—ำ ‹n2Aนฺน3wfzโ@บยฑ3ฆฐฮำ–อแL K44X\‚]U ฒ‡vญีฦ๔2pcŠ!ูh—ั‘ต wGw‚}ˆaPG_‰{$“ืlฯV".- Nรพ.ฯ 0Tlno= ;tธ`ฐข4 ˆCV€Eบ6๊(ฬe@`nใ๎4ไh[j—มรูน?๕9รxนFtEœปg๔ภLึq;cม6 แเ„ฑ†˜0Žฎฮ”ต.‡H ลโก2ดํ C‘๔ดฅ@[์ƒล‘›]ฯศู@ชถกCF่j”พป็4ุ๖๙.9l03Gฏ•Fฑ'ผ๕แ3ำัษ)’5ญXบฺๆ@Tซb'L:โาr ‰:#[[นนูห๕็฿๛๋g<•น๕่Oนไ๎‡yโฃGค›&‘{ฏ์™งใฟ๛๗_}๎ต{‹›=๙ฯ{nŠึ๔๙ึฯ๎;์๓ฯ๘ฅW^๙ล/฿|๓ญท฿ปw๏usพ๐‡{ใฯ|๓ฟ๓ไฟ๗…ฯ๒ฮ4iศถ}็•Wž{๚ฏ๒…ซ1พk๘sท_}๕ง฿๚ฺ7๖ป?|๑ๅื฿~๏ฮ๑ภ—ณ฿๚ฬ|โ_พ๚฿~ๅ?๐*ๆม7ƒO}๓/}๛_ๆwž๛้ฯ_๛{;ว/>๚~๓sŸ๕ธ{๋อŸ?๗์฿}ู่ง/ฟ๚‹ื^ี๏ฝ๏๕ท๎>๔ศ#๒™gฟ๔{O>๙ฤo}๚๑ปG7ธ>_มท๖ูW฿:™<๓wโgใฆีV๚ฺณ๛ีฏ‡o<๛ณท๏7ท๚๘฿ฒ‡MS…ฑ1tlฅ–”ฆFˆM2ฺphาีญSŽ$ษFณ4$ีญ”ค(r Zภ$ฑูฃ[ณ;]้R”ถm—‘Dณt›H'€ฬtืJดmบ€่ˆtซ›&BำึๆPบRck%ฅฑ“\ดjGkv…‰คู…@ิุl]&9์Zไุ:ฒT—ˆ–ด1ซาDm!…”‰‰ษฑูชถดำNฃ&tดตฺดL[ะดC+šฌN+ชญvกฉ “Lt5mบฺHข]mฯSฃฑีP’ัhฺšฒMc’(V:Uฤ*1ถR๊ฌสาА คŽlำmปmิ‘M›j!•ถ็ Eด[t‘าdฮฆ›ถำ&mกvkDะj›%ั#‘ŠrfŽฅญJjุ lฬe70"mœCŽฆ†Dk›vะ๎่Dุฒiccv#Bาtu%ี mดแธปZ„i%Ž ˜&าชฑ5าDJก ษYอL:ส์dตUZ5ข‘ถญฆฺดคtXUั ตุXmQุvˆŒVํสฎ ษัถH3Œ-,HMงQiต sIงVN •*šVฤF+•*‘–&!v2ซ’Bถตถg6G&ฉhVจDฺY›$ฮ&ฺRLฬA“sจ๎NทiUดี6ำju;pค:‚ัตข™ญXHจš„–clišาณศdขฃ็žอกตMIFหŽJาvตฃกอžIdšญ–R5G7ญa2ืs%$ †าtEmS‘jฦจณ…ช ฬค‰ŠดดmSฉtฃฺ–ฆ;ช˜j+6ฺBาจ-FูJณไ`(=iต*ˆถ=IU'›ช0ํด Lปญ9$V‰ก(4b),ชMCฃˆ0VRษค4ฺ๊๎Jฤ!6+ !Lณgอjค9&ด&ศŽฆNciฉช่VZ‘Fำ๊Y‘&ห˜ดc›šYฺฒ1]"ะ({j$ัV\;f=w;ฺฑอ‰่ฦN‚m5Qาฆfฺฎjˆ`บQ3™ษฎ’L;SดARe;บ*Mคlูช6•6I'š‘Vท%mฺIุJี"ฅEรฐMฌTŠeK—จ–!ษTž๔ิ&Hต์ถัุ†64iง›N“สvด3žy๙+/พ๗อฝ๛ๆ7ท uเ๚ฦ/Ÿม/๊ั;}่ฃ์ฬๆlัlG4IiJชซ 5ํ(—tขS&าƒๆ<9ถัไ๖รํ'ไฯฟ์K๏๓o๎ฝื^|ๆ้o=ฑ฿๘ิŸ|๎ฑ›œ’&๗|๑ป฿ๆำ฿}ต{Kๆ๒่่ฯ๘๗>๗๘#ทฒ๗yใี็๎ฉฏี_ใ™ฟ๑ฮsภ๙มปฏฟ๒๎๋ฏผ๘ฃฟ๘‹ื๋อŸ>๑›3#m4X๛ึK?}็K?๛ปงพ๖—฿ผุณวๅ˜‰$P๛๖ฯ~๚ณ^{ํ๛฿›๏ฝ๔ฦ}ภžŽ—หMฮท_~้นo๋k๓๕งพ๗ย๗ฮ-ุ}็๏พ๓๔“Ÿโํ๗๏ู—็ณพT๔=๑™ง๓ื฿z๓j?x๏Ÿ~๏ซO๔ซ฿ผ5ทf"Qแh.๛ฦ๗Ÿ /ฝ๚๖’;๙ะง~Oพ๐กว๊Eา”ัิpFUปวจ a*ช5ูM;JญnBŽฌ"C"rฆ€า61GฮถeM.vVSA8ได•mฌ6eากvI’Dาhถm!Z]i`0ูุญIฉ–ŠD0ฤจd›Vฺd;9๗ธ@’ขAT…ชม่ไhฒ‡Nค)ญคš4ูดM26mณšณ#=šถœษT"แ„R 3ฬดBtฉFgาLd4rฎŠVศlตาF&iำžฝ”ณ‹Fตš†’อฮสฎnถศZถํXฉ$&ฃFฆฆIบa3้ฐ”jถ“4Rค†ึบki’He$ํ(:วžซฺต•‘ฃlำ4‘@!ฮฎS’#ล)jdุถฮํ„6]“ตฺm'ีๆฆtQzL6vhbัถ“VE"S้‘=ฅ’ํXฑGB”B„า"ฉaŒคqฝh2ŠชดvŽ]’!VปวNณอ6#ญ:ว•”ฬDz–ˆVlฬH hซ!Q‰ Gบตื6 I๊(ถa2ชeถณฝ–FฉU#mป™ILฒบgnชฆฮคฺู’๎ก{Œ&3MTd*)ีLrŒnjตช@ฃฉสRmwIยฌVาฺ$ฆH[ญVยฅอชH˜XM””U ฑฺ6+1ijI4Kฉnฯ5ZซสazvPb&Hำฺ Dบ•ŠL˜lmkฆ%t‰f2ฉlL“ส^“ูฆ›igjคQQ tp˜ะdGgX6&์นษ0[บณ4kM’ฆฑl‰dขiอq‰คMฅ]=ฅ:*ƒิ9W[า ™-+VrmตiZiZญฆ‰VฒI&–mNWคM่š้ฮฬ™ฮดA8:l%rคaฉjีJขกt”ฎฌถญญ&ฆ†ดซIL–nห6ำ4rS9•F @JIๆฐUํi&‘‹ถIUุˆชึนFำ…DR{m6$ )sO าV›$‰#ู่ถ QีQ”„‘”Ilำฺ6iว๎ฬfขQYšจขƒfอH๔ศฑต‡NคQญคj6ูด%Gš๎j/›ณ‘Nำ–o%’h4m—j4™™ฌ )บTฃ“อค#gm[š8อฑUl‡dฦ๔ไา:[4ช%ฺ๊Dbุู๊iu+Iาf๋๖่f"!1ŽษJL3+ณ‹šyีง<๐๎‡oŽห7wŽํหฏ~ต๗^๚๙}(6mMZ™&!ฝ^+’:ถlมลJH4šุdฮhิyy๔กOแฯูใฏึ๛๋๚๚๘ฝงฟ๚ูฯ๚๚#ฺ}๛'ฯ|ใ้o}๗วฏไ˜|๚หอ๖ฤฏ=๐eฯž๒็?๘๋๛oŸฝงศๅๆฮธ{๗ึeฒ็ฝw฿z๓ํ๗ฏป๛›?ึ7๒ๆ๒แOฺ๒ฎ$" €ท^|๊+ฯฝ๐ฦoผy@ไ๎ทo6๏ูทพ๖ย‹oฟฺ๗ปw๏น™๓ต๐๋๎๚๗ฯฟตหญ;<๘ภ;ท.ฑ๗๏ฝ๛๖ฏz๏Z}ี}๓ฏๆr๗วใO฿™8็“เw?๛ญ~๖ใ7_ฟาy๗‡_๖K๖‰‡พs418๋W฿ฮ๗_~ใํ{0ท?๒๘'๏>๛zVUตDgฌ6ดาฺiŒDด+•4ดDM )ŒLค1&คЉޕnYlฎV#ูdRYa“JฌEฑiGชีVkจึŽi9˜,G$i&อe+ั้v"‰FลDO 9๖"ณA๋ฬl74‚Y"]ซ‘F“&ŠF&6ดi‰่ZปำŒœำ3ฦfJ`ฃš’h5HZดvGดQAดษษHšX-\:ฉMษ–ํƒาฆขm61ŽฅฅH„3I้(M#+M5ˆ$ฆiHDงbgต4cU*M4ฺ&ฅาJ›FไTc7YhอM"ณI5‰HDuา$;้Vถ’ )-D+D‡ฒI$mซmSc%ตUหdชซัค‡้ช*ั —คšT์9ใ่J$ัmฃ้ถฉาไิhƒdช*aณm4šัถErnถ”jณ=๊œiJdภjฃฅH”jjMบา* ฃIำ่Dm]j„”™eฯŽd˜คMฅ=ชีL#LซDบIVHะ!่ŠM L’คD#ฃG›vฃ)ปiห&šj m†XUบTZำฆ“‹„ฒาJำV2อœhš ’Š` ‰$,ัŒษžIDฅk+ใP• ‰ฤ) bu+šVฅฺR“™คNำด้าŠL$ฉHาf'ถZiŒ๊š‘†hFัจŒ;ง์ะ&'ฺะIZปm"ฒป าค“&YฌtSฌ6ู$๎ง์˜šfcำ–$J5…ส๔์,QฃาD+iฌDRƒV๊"ค’i($ศi*ํ.f#i*‹ชI+$m{T”Iข4่ =hPถQz&คH[m6c[•ฅ"ศฅYmณdกๆ4MุษDTBH m๔@tk‹(,]าCR9อ›4I7บญjZม mต]™ h7›lโTMIั„™T5›„\ษฃษ&‘61ั“ข9z$นฎย&ีhkฌถMสN$ฉถ‘‰Zั”ฆฃีutšnฤ&5’:ฃค"Iจฆ%•Xv‡J›4TดฑฃHZmŽฉ$’-ฃณีีLcHซh‰n;mHWศ„‰4 Iขญฤคๅhียสฏๅญวๅเๆฮๅต๛oพw}งกFฅkฒBFŽŠqึั #ef9ท.,*"ข‡Vฺั&•๖x๐#๑฿~็™๔/_y็ฌ๛ฏไู๏ีWž๘ย๐๛฿ุ๘๎_}๓?x๖—๗I.>๐ษ/‹ไวพ5mวq็๙่G๏y้z๖ธน๕ะG?๑ู/|แท?๗ู?|w}๗•gŸ๚หฏฯีพๆKฯ}๏+_๛ั?๚๏พp$า›ืพ๙dŽ››››หe’Œใ{ไr๗๖ฮ[\๔ํ็9.77ท.ว$โ๘ฤในะญ<๔ภƒ>๖ุƒท๚ีqs๛~๊s_ฯ๖ื๘ศ{ฟ|๙นo๕ซ฿}๑W๗๎m๕ล็ฟ๗ิำ_x๒ณฟ๛!a?๘ง_xๆ๙7^พV?8฿ฉoไ?ต฿ศ‡oขˆ๎๋[฿๖^{ใํ+=๚ฑOแ“Ÿf{Te&-้diฅาDBŽ:ฃตญ6iu่šD’่NZHI`$mื ฺจlc;ำ m›]{vTAfe๕FฅถัLšฌส2DีVF3ำูฆญlๆ0“ž‘4m4ฉœ&gำฮeg‘V4Hจ@ฺ1{DuEฆุซƒ$ขัht%ํ@4ู(าH$ปฉTฃํึ9I„i•DIถliฆRI’•ญžb“‘!ฺšN—nŒจถ‰ค)†I*gฒI –J!S—ฮ}NhW›D‚i*าัJ%Rูฆ2ขถM ]Jœ•–๖›3PMศฆK–Š™fไชปดI&‰๖”#k*l“jย$ชYฉ™ž“ฌhตfš]]{&#&Žไš,‡Lดถด‰าJ“ช6MชQELฦm“H$’teฺ!ึฌt3›ฬๆhv๖”M$ฉ:ดJ2šบสi†dcc“ถi›60ึTHบzอJl5š์Lบค่ั&–.šฉ”ึhg“6]ๆDตดษV ฆIcฺ#‘\c“H!ภ0ขs ัm๋เHศ”ฆำ™’6™jvg7’ะVw2qฆ•จจ์6ป“5ฉ,,5Qอ&KIu3&ฉPvํ๖hWS Efฺkt’ชำH’$ฆ9;„™’]a4iช๓L•m&1ืซหาฒIƒFRiฯl`$ฦNsฦ˜Hbำ05*9eNฃs)uฆดอH"mHZรฉ`7๔XงaFEฒcuถฮv€Imะ์ฆRI[[=ยLรDS’ฬhHlู&อฑญJDe9‚DŽ„NำาฐีTบkš4ำ์0‘ฺฬ93†MsHW–$EW8’&Pฬ&ถIีž6ปิhขฮ6 œe76ัœI”Jะฉ=ฃ•Jลd’Yธึm›$ Iฺค2- !IˆZงัMชStsž=ฆัะตIO“ขแ0๗c๕’ตv!&;œง$Mจ-1ษtบณ*ะดษฑ;*mNฑ9ฮ›ดgฺjรf‰F;9ดI[ฆ ๗ฅ&I‚Nh7Q[MาEสฐ้ฆR2mฯฺัdXH4bฆM%ฮฒhฆ™vำ$••ญžข’d$ฺ๎4mาถด(’&M/อศJฃ™๖hJคษถัiŽง\UBฺ…‰I*š†ศtไฌ™XT"Gm+B,ี$่j $ฐฒ #CmšnขZฬ่6‡„lvฺ#ฎ!—9zlOูษฬZ]ฮํอœj7๖0วO“๙/พ๕ณ๓ฮ ฏฝฝ|๐ส+/<๕‡?๘๑๓7ฟ๚ฮ_ลทŸ๙ั๋๗แ๒เ#ฟ๑ๅ๙z๒‘ฮuหN?น๚ฏีฝ?™Oร/้}้7}๘V[้Ÿ๙3๛๖oๆ™—฿Z>x—ฟ๓๔หฯ๑8\ฮฮvZw>๖ษ/<๑GOƒ฿๘C๔ฮcŸฬวบ“B๎<๖‰฿zโแ—~๋ใzเๆฦํว>๕™}ไฮy๙โไๆ_^o?๊วŸ/๓/แS฿นแŒไฯูŸ๎_ฯ๏ฝ๐ฺ๛'๗~๙๒/ž๛๛ฝ๗ฅ?ธuc.๛๐_๚ญฏ๐ูผำ{ุ๓๚โืฟยŸ>ศC}ˆtkฎ{๓ฉฏ~๕๗>aิใŸyโ?qฝ๖ขIRrJฯแะ-Q่9Mๆ8&ฎ›‘MVณ๋์c.–ฤษ&;งฃ$’•0ฬœ3ฅ๋lLVL'๗5•hmrอ9ณ5ำL'ฎ“Hท{v7‰6f’k6ำ๊6š$39Gงํ5G=/ตต€ซ๎™\“ใ2Iอฮ6อร๊ฒmœ7น^wr=Dวัึฮq๎d๗ศ9ษคKฆzอ!-[m;›ึžณDบœusไLlีtฮ=R›vโ09ง's๖ธบ™žาdฉdอ๖8*™Œษ6ฝส๛—‹๛้jL*mNคฑ•MRำดำฺอt’m ๅ~ฦuŽ้L#ญ]zษ!วyอาฃษiwฏG'lK6๋ฬ๔lฒฆ ฉNี3v423iำšsรก4‰ืY™™cfฑ9ฦ}fืชœczๅ’$ำค้ฮiF2gฆ‘vd3;ฉ๖ใ4M'งดปี8ตฉc๗`/ณฃ;1ัูvK—.ำ$ฉnฮใp^ูI&‰Cงgฮใ:s‘Dวโ<ยฆ็Žk4ว‘ใ&ฎสlงฮhไ8ทำฅํธ†#งน1S]ษีa{lgL’vuิี4i๖์ิ6๊ฮž้Dบ‘ไธไI“ฆ=ดญ ษ›ฝง[งฃ•ถวN7ัฮ›h.7GฏM—๓ds8ณ+iาุfYถฑ&้˜๎ดiqฬ6ป q•v"วdj—๊ntr1—=sึ9\v๖์i{Hš-ฮฆ*Lฯอฑ๗G 4ฬ•sI+ฃ็0ฑ T๏_๖ศญcN!ฃหนg'ำ8ใHG3gบsšHฒ“E;krœษ›Nว‘สnฦLลถฮถšั้˜ูหฑMฮIคบ6™ฮั]3ณฑjบ๊Lg&cา้fตวฤ =lj›nคizM{ฎ\r\„jำใำl˜าžœKฎiำ๓’˜G๗~lŽค;ฬhี๎M็”3ำจ:ggำjำ=Jซ”D/9ตง4๖ุ&ตi%ำไ'ฮšvAฯ๒ฐฎ๋~ฝ[7ภ–@jILcูป28qลฉธ2T%็9ศoห/ศI†ƒTชrjCDdฃXށฐZjฉีฺ฿๛Wึบำ›>ฎWฤส&+ณvF3ษษ้u๖yว5nบ:&I6ูj,–$ัs' c%ต็คฯd;๎œ้$ชต๖&™‡ๆฒัG_ๅู—ถณ mทอ>mาZM๎›œ44‘5ื๎๖hขIถQง›l’ชฉฬ™#wEฤjฺ\73iบq2’;5W†œE%f๓jhฤf$3ojšiF7สำNฺi›ำฤ™m'Iงาิ™ผlkmH23ณณjึOiuำ้–“$wEฒำŽป]7’ฬcg/ูžg_ฅ7ูIGรๆlg6sj“kZนฯ™้3W*+กษ6ภtฺก{3M`†๒’้ณgfฦดปvูžyyตkซฮฌ}๖๙x>"mซMu{3]ษฒwDTำ)€ํ=ฃ™3“ษ}f๎ทำ›๎ˆCg+"–ฝฮLฃ๚่™๕ิJu"^5ฝป3บ๘ล_ำoผ๗?แืโGซow฿๚็ำ๖ีWฏๅk๏ฟ)^ย๊ฟ๙ญOพ’ๆqM{o~๖w~๏ฟ๏~็eฏ฿z๋ี๋๛L›m;๕7๗๏|๑๘ำฟ๚ฮ‡๏ใอ›Ÿเ/ฟž฿๘tฬ4O6ภฟ๑๏ณ๒?—>๕๚q2]็ีœ)ืTภฟ๖๛๔ŸgษW๕ืฏำdๅี่ž7๓xท~+๕ฏํ—žื?๛๘„u_6ซN๒๓ฟ๗๊๚g฿ใ–๛แG~๗[฿อ฿Bฮš๛‰ฯ๊—า็ล_้Oฐท฿๛รฏ๓?ย;๓oผฬหฝ฿๛๚|ใG?๙้ย๋O๒ป_฿~g‡SฏhบŽๆดMšYอ้8ž้ฮฅwฯ4"I๚ˆษss๚ฬฝ:ำฑถm‰ค<๊tฒnนาIๆžl:ูใส]ำ7ฮYป];ู9๛’6MฏN›]dฃู9๗ีต7ฆ™Fu“NำN'žvNใถอsฺไ8™$นฯJ%9ณูM7ซcH๕๎ฮิัถ๋นษ<^ฏํฦj[uฒ†=๛๒๚nงฯ9`็ีํ6ถษถa๔มY•6Iฃk๏ฎijฎAาคะnoTีูlืcท;ฯi“q$šG๎3T’ ฝvlฺ&Ž!k๏v’Œv{=M2หFปฉ่ค้›ท๎cงwTNจๆqณ’ฝะฃ‡ณ™ไ.ปํํ6อIผ9š™v4i๗lฑ”๖h4bง]23S•ul4DH๙้_ูว[ะ7//?เGW_mึ๔๑น฿|๗K฿๘๕?ำฏฝ๕ฟ๚๚ฟ๙/~๊—฿~็•ใๅร—๏ษ๋๒fแฟ๔ูฯฟ๛[_Yqคš4Lด\•ŽTœddใ๊V+ษ&ฅDฦŒฝ\;ู™ฬjŽF"›ฌฆ›ฆฆ‘ู˜n้† ฒป;11+I“fhVšˆ$‰XbฤNๅ๙h'.คU-ฺ6™$ๅัุ OI4›้q7YIลีk;ฒิBHำsJ–.:“MGiH˜ I+IDMŒ‡วSนชาŒัn5)f3:๗Nษ์ซnฺAฅญ™›ฆ{VtฯŽร(JาIK›6S คqYYชiงนq“0ˆsGบs›๊YํyFM6[2s&์-•คขsชซชbตž™ษ ญ๊UA:rrzkuฆษŽ3ขฅ"I&ออFg3คํJ;ำ๔… ดI{ำ&GUuป๊Œิึjงgœอ nยคอ&ฒŽ–H’Iฒ๕4hศFS‰’Zช๑HšไA;์ถWว945M$]Iัtำ%ษYmHDาแJ[&[C5ZษHอไšูฮLFRซ[ง9ูฆฒฬTkบง%{ฒcณชšl๒t_oงํ!aฺJ•&M+ํH&ฉน*S{kS‘†0ๆฆย4}ฑg;Kด;W6iำu“ฬcิn(ahšhf#S*"ึ6M“ฆBบฎMœN2y์๑๎์ั0อุŠœ$—gošsgขถmGcข7Y4Znš๎1e[ฺVcฯIทM๗41ฬ๎ฤT4V$bำ’$Iาf+B๎ูCฺD›Juฺ2™iฒ:ZžŽ3ฉำคั&$m\TXiW"+าsบฒX‰0–hHŠyคำ”L:ฉณ=sr6YMWDNCZM:Mทัู;อž46m$Imeวjz_฿ิšั!ZกฒฒaYฃAR™lำ$ค`ecยศๅฆC"อy2S บ็ฅัดJfbฦN{ ƒะ•G“=ํ6Eฒถษ&Rด›J4dใถทอ˜๊ZlฅWฎv๎$‘mฃ#‘ฦฅXE$Rุถ0#uท;;Cคcฦ๎,I;J“Lา‰ฆ4ijžง&V*กีะ–$9I†Htโฉ‘!ัYzz›T;ฝ้†šZ$H็H{“.:A&ด%Š„dาV’1L;ฬgฃvใัŒ0k™™tบงMfvถI"JMžฉ๎ใ6ฑู8%m“&5-ทถ3•@*7]Ž*Kงjจ‘š›ฆ‘ืžgำPนฒ2๋62s&[ปฅ5ฉำ้ฆtCrณg๗ฑj“H4ดๅNšTlตCb% lำtะข$•ีชVซ$ๆ/ๅwพม_ัท>x๎้฿๙ใ?๘แว?yณ•Oโ๛+่ฯ}Rkš„QLชซ์๓ง๐ƒเ‡?ัG/ฯ_^๚—฿เ‡?-ฐํหหญ4h๔ภซฯฦo้๓Ÿ}๛aŸป้>Vท2€WŸ๛ต฿โปŸ{๛•}1ีf๊d5ํ›H_>๐ฃ?เG?๚เ'?๙่ง๏๓฿ล‡?}S ํ}Vvึูy|๚s๏~้ทพฉฏ‹\xy›_๛ำ๏|้_๙ฯผ•—>Ÿแ7x฿,|๒—พ…/~๙‹o็ถํำœiฒ‘l5iC Y‰ัี†™ ‰–&›ัˆคš67M˜ฆษฆ;หZVฃCˆ*ดฺk•ฮT้l3iFVRำfฺrซ$*ฅฉ$าถD%H[ด อ†"šอ“ดฌ•ีH)TbšTฏ $$้”V’&!:qฆณำIทMUHa†๊fg5B้lัจlป้๔ิ๊*šhณRMš n'=ŠMhSd#P–ๆt$“,จึฆˆj+ญ=71†")Q*QVIšfEE›ําู3M›†ดฉTส5mำ–ัLดMI…„นถ–ค3ขนษอš๖ึ šˆบญv›FrE›–˜T’ฉดษvHถVŒ ษ4%ี ญ ’Rt”์d – iๅVAT[Š”ฅJปห4)ซCคŠฤFฃฉIะ“ІAา้–iHa"ดi5‡IRค“>ฺน3•R„ดW„ฤJ‘$ีmฬŽฝ Cฉ4•ค•"I29z;ฒUš&ด’ฒ็šŽDคQ‘Z™v["BBา6mkGฆ“ต™Zi’ชN๖ู&eา`$IMํl*อH›ฆ7ํดึm*ญถZซ์™g/1$2b›ด‡คอาDIR Hh[%iU +-„์$ค‰#bขTVZีbชีดส’4ถฅ‰R&`Lง[•6ษ4ย(H4:ELฺถ˜ฐimO#e๔กำb'‘ฌ.‘h…HฃI่m[1ูJ“ฆM5iB•ฎi&’$*Oฉ•ชษิถอP›ํซg:•h4"\T E% ŠmjccfำLฆีFาฉ FฺึGไ็>ๅ฿{เwพ๐6+ก “ฆMดy๖ๅ๏|๛ป฿}๏๛ƒ๗๏ฝ๗฿เƒ?๚้วฟ<๏๚{฿จ€Bฆ9uภใ>๕ฮ๋Oพล:„N†:%?ฉŸ{๋gฒmฃ‰&eVฺ่}พ๙๐{๕ญ๏|ฝ๗๛ม{๏ฟ~๘แGไใ7/๗ว?๘ ัh˜n_ฟ๓+๏พ๛[ฟ™็Ÿ๛ ์{฿๘ๆŸีo|๙๓ŸLมฟ๛ฟ๖oฟ]ฬฯ}๖ห_โ—>๛v<7ญูฆRšŒฺ˜ฅบฎMFjVME&อtฅ…I™%#ันบ+ัH2‘EH’j/’#ัถm;H‹อาuถคฅอlฆšํ–คZฺN$;Y›Mฆ‰ขE!M•ขั†‰]ีฅย„mZBH[eBR'ลR +4ค Fx0ˆvZ์าm›FRl6ฃ›ึ=ำ„ $.ฺPM{VIฦิแ%iƒ(iรRmฆัhฅ™“ขฌ้4“th5ฅ]‘0JWGย&™ดี–0F%bg:fถฌ4ฅ!ฒ‰$กสบํX•`ึƒlšp"iงทก!‘4sc&นรlณ!D"ˆ‘„่m[ไ ]•IFาิ6mN›TคhำM$VWช!vM:‘สขื`!กU…‰Lฆฉ6QฉะjU—c˜้ึฆE$ข h’ŠซQKก‹กMาNQาjoeค4›ถm3อai"“9บกะT;ํ+I"5•d›(šR…&[:Jšฃำ\‘H“dง)Qmษ ’ฎŽ›!R--$#šjKKgDuฺฆญ6‘ฎ†i2iค4ฝšvTRณRfฺX*t˜4ณL’dฬ6mWข•ฤ$Q2ณฝ"™fฺป;‘™์Pš•,ญำจฉะnำ4Pิฬ)ตUั$ษh -JฤฤŒจ”5ดสjฃย ้&•LฆญV2uด” + บdH$šlํ6ฺถm#aSmH4mทศ4แ"‘กBำPค9%้dจฉ‰gณ คEQัhRฆดH:ค#คaJชลN$ฅtฬา‘j‹–&hK—4ํ ูาF›UŒ4ฉH&]ฉnซ4$˜•ฒi’Lšฑดย 2ซรัY F9“lIDา่ณญŒLีj'33ชY+L›ูค.icข๖VR “•4Mซ4*‘Qดญ$™0ฺข{U7*1ˆฆM‹$ I IKฌะ’b":*ถำาnj%SีMS‘่ตš:ำX;6’1๔B:[HBšอฐThะT(—hตัh"},i4ฺf3ำ0€mIA4ZFวldาV’Œhชญhฒถ#%PิT""j*๑lVขASดะd้Žz€ุัด’‰ŽLฺ˜ๆtบwŸ๙ฬg>๓๙O๒ึG0?๓ ?๗๓Ÿ๚ลŸQ“ฦาšz_~ใล_ู7ๅ๕?๙ื๖[฿๛เG? m–Mำj ]ฺ0"sวC*€6$ดํdgะz_~๒แ{฿๓?๛ฦํๅ7ํทฟ๛รŸ๔€H2+ฺQ๙๙_๙[_๚;_๙ฬ}๛[oเพ๗ญoู_ๆฏํO๙แwฟ๙ตoจ…y๛หฟ๚ฅ/|้ณŸฌFb$ฅ$&L$ถฉ]’จ–ไฆษถ’„!ผจt"mำฮDLZm#6ขง™คTฃฯฆ3Qด™ค ตU‚Dชฑ#mlb“ถNgจVTท9H(ีF’TำZญUe„)4$ชัธ‰อi#ญLrNŸ ‘F็ดZI2RดESCีัQถจถ6ีดปw๏E&Fwงษ#‘TซMถI่N;อ–าภIชงdMาฌ-ˆD0]?A๐ื๓m›–y๓w?๏0 f(u hIี๑OาW\ั•๚qtษabโ‚’&ึ›ถVก)P้ะฮภดDกภ๓>๗uปFำ*›4i›6 Rซ;")า‘HlˆฑQด RU:#ฆูRญจฌjdข›ด3š4"šีJT)ฒัH"„ี›ž้0•( ษlR…Sd$4ำHšตRฺ๓6v‹ˆ ฺBกฒ 3ญฆ’L(m›3c(mDBฺอา–ศLค‚Dtc!ำMUD&1ฝ9้ๆˆๆvI”ซi* ’ฒ/m f ญํ สv{อ‘ฆF“qขT›ฎˆIO›ŒˆPํNG#†i‘H‰=v-ณํฑ™ถa(Qœ M‰&าBZ•4F=jดFบญ"ฌZ%าU4@RนIศ˜hขi)ช((‹4Yํไ„ัหMšคฦคดbฒ‘Hš•ฆCc;•$s๖ฎ$’[]:ถM+›d'ปEˆฐKฺญ&0’&M‰Nlณซฉ,ฒ‰€Hขt‰&•hZM›™a4S•&Mบ[I’Tต…iIน‡ษึๅ„ดZRำ๎ฺฎ…3บ6ํHNBKiTคฃšูดบLŒดาŽ0ฅa€ฑ›ีฅtfEa!ผญM ‰จJ`๎ะNMัตZ;ั”ถDoR ส”‘tขI#ตณถฆiา’Cธ้2้ดiX•dvด•ุ่52 6Dฦ<ีŒL†ถอd:ฑtSR ฅด=Yฒบi&กญT๕สT’ŒlำฦHJบ๎–&ฆVCl2mะ˜ไL๏DขFรJL’ฤถฉ&$ีัQชๅ5JดSีnwท:กY4™˜จVPข์้fฅ’tะNeาhฤm‰`,]ญฆlาา4ยซถํ@„ฉี†ะ’แ&4ZัQคชmgšL)ฅ *ฅ0D้i$ˆ4U$™;ฺlป}ฅ!/๑์!Qi\–ว•0“ฦำW฿สว?ํฏ}๕ท~ใ๋๒๗wŸ“ๅŸ๘ยฟ๗Ÿ=ฅmจ๎๛ั7~ํW๎ฯ๛หฟ๗qo3ฏทO>๛ฏ|๒แํร๋œ“xพ๙{๔ง?} h๛ษ'oooฏs&ษ๓วฟ๗๛๒๑>hfฎNwT}็Ÿ๙|๑งฟoซ ๔ต_อฏ๘ื?๗แ›_๙ต/Sแ|วไภ๗นทg6™4sวVš“NOญmƒ$2+\kบDb’4sฅtždufอญ‰AงMคVM‘vั$9ผห’ฃ›ฃปษูฆL:ฉsโYi&mฎนsŠปz$ฑฺฆu{ใ53!บnบ:ZYำnุ๎‰VW‘ ซป#Fา+•E‡•5MœvyEr{ปมฬๆDNศŽŽถถญh้’„าว]!“FOMืา™ถmถำ๖ษsRี„‰4๏บDส2ุGZZB 9™ธ7ทะLKทํN ำNv๗uฆษRษฤi7ฃาสdฯ$6์๊ˆจ&7L,ญขYฮ“ํ˜ŠษNžแๆpา้ดvทHt2›v7•[z’ˆฅ$ๆvฬ•EšูฆูรซูHU—ธšv6ั‰ิ6^mาK+ทษนEฯ432์J6M;ๆษlRMล+™iW๋ถ’™™I;ตัT6ฌำV6n› ซmc2รๆ๎T’N“พหฅM’†ผ&๏f3อำ$gึํ5g;ฬ‰ฬbT๗V ํ่*ตzำ{3MฃSำๆต=ใ™ฺNดt›ญfeO๗ “คสฆวฌRš]ถล ษ๛LืM:!ฺvWdb#'ฦnณ=Tส’ŒูฎlD)ฏูvบฉถ%‘fe'i4‰T”&;นck6ฏ้ˆfน[m"มผž^ํฌดM%ๆคO5FณฮN3๏"อlSฦIงMd=6’jG‹“4ดูษผถGห"d๕ุฬฤฤฎ.‘g^ตY1IxฒiถEf2™ฆM์ุtา6kุ๔V3,[!3ข๗พ*4—•งŠฬhz{๚–๎ปm™iฺฝ6‰ๆvNfฒIวeำฝฆ(dCบบkhณiฯสิ‰N“ ปอtY:J/cšึฒDV‹]ภฆabJโ%OlSฉฃํ-""#/ปต›6R)Œฌh# •์หุIปถT‚i๒Œ#ณU "‹หฆFjLž๑Žอ+=‘fซํ๛vJDฆาlขฺ’hบ;ฆงfiฎน้ฬส6ษ4ํZืฆEc™ฎ”žถ้ฃถษ0ท’ฮTGVีhป5ฒ3k๔ฆ'AiณUปf2Iฺ‰›Rฒฆm๒่ถ3ซšd’ถ›g_ัi’ซK…ะfFธN'M๋Nท™ืฎˆYG๓J:;nฌฺj+ดญุ ตืญึL6:kT์Œ๗4Yฺถูžญ›{Zi IN๓ฐ1RVGป—-]#0 ฯูีIฅMทio2D#“N๗๖๔IำdฉDคค%ญ$=ูvฺI{#‚สฦNฌiEญlถ€1‘ี$bบ@ซัc4่ฆ๗ถ™ย+c๏c21ปv๗xŸฬ๓อ5&ฒ“็=y&๐+?าฏ๒๏\€๛๛_ี๋๎7๓๛{ึพ/nw@;7ว๋4๏ว็์ฆ™ัoฺ฿๛_๎ฏญ๒ว€ืw| _โOิOุศ๗๏ŸฮoถOพ๖?Wอซ฿๘ฦท€ศูษ์ฺอฮ+็ร็ฟ๗๛โ_ย฿๊—?บ๘ๆo|7ฟ๒๒๓k_๙เปยO๋?๘ฝŸ๐•ฮ5อษ6๏›w๓61‡อฎUmn29ฏsN฿Ÿ๗gลฬœศ์G็-ณบทO:ฏ“1]šพ์ฺi_้œฝ๚qอํINgz๚ถ+kV‘fปฏฦคgฺฑู๊้=—{*{ึซfU$Mmณjฝๆฬg๎๛๛x’นMWzŸ้ฤษ}Kt๎อYnอI;6“วฝฝ็5๎ฺุLถ็้vv&i์ฆๆ์[๚Ÿ9็ฝQ-;ยผฟŒ์'iบ}฿N$s฿ฯ+ฎษมse๏.อใุ‰G๎ุ6๗=™ฃฉzmG๏4:=ฝฆ{W๛–œผ6ูnใผ^๛ษJณnึดlบ‡ืr์ฌ•ีmฯžฉhฺฒถz๏T7‰~ธ๗ฅฯห={ใnžs็D๒๊๋ศวืถwVjฆM๊œy๖ž6ฑ๕<ํซฏผหแƒ„Oปณ๖ชณ3s:๙ุ'อภฒ้Iฬ{ าIgถวmำ4ฉtฦ&Ÿ6c^gL๒บ๎vmฝ็ฏ9{๗+™3Ÿษ๓.’WN}vgff’t,{๖๎Mkฯ๖รฉำง<#o๗tโ•nnำNฮวV{0้nn9ญ›์”=›ื&]“ี%un๎น๓๖ึฒn27ปูMdฯ“ุนํNฆ๏w#“qงื||Œhso๗้ง›M6“4๎3vfีฝู;“ฉ6ำœ~œ’้ดนต๖อณซ}e^Žฬ๕๎๎vฬ9ำ7}5e๗ิlง[;uฬt๚ถ{;7ญฆSๆ•ีีTVO๓แ๎‡ํ๗ี;}jทำพ^๕ๆ์kุษ=ท{ฯI;‘„้๎๛[l=kฑ“ี7CT๐กีฌ้vs7›9นูง›uF๖iผข4F่™cฃซพฆ๗ew7๏ผฮ$sฏ๊ฎฌศyื๑~฿๏^Nๆmd๚'S๖>์+3I3{บ้ปfฏN๛ฒsv๓|ผง#œฮ้ซoSVVišน๗ฅ{<ง;}šžนg’8=ฏฮท^ปฝ๒ษk+”ฦ&2ม•D4u๖๒๛ฟ๘๗/ฺo…ทฯ|๛๗ๅฟ๚พ๖7ม๏|๓O?ญƒ๖ต/?๚็ฺฟ๙นšNt็ำฏซฟ๔w~แท˜๏๚๑๘ฟ๘+_๚ฉ๚Žgฮœ๓๑ำผoทjฎณ™ฮคะ„‰ŠฆอœY๚4Os M๏ธužูฬvœ๕ฯ๑๓ห_๚ลฏ d|๖ฯGํฟ~๚‡~เ;฿ฮผMฮ๛พึ๛๛ถ4๓žE"nึ‡?๓ํ฿๗ล๗ง๚oาท๎S>๎ืฟ๔ฅ฿๛ไ~๓นŸ‰๙๗}ื\ญ3ำf3"ฅ‘;หถโผคw"ๆฝฃ›ู6G’sรnถัJฺœง ใ0ีษ™V๐ฺd[Dวถิ8[&็ดiZm{JวŠMcวฮHๆ6fBvd.m{=ฆ3ง™ะฆ0sฮัค“๖…ฬ{^ต›Uefบ{Oลlห&5ฺ้อพ’$IะํnNฺ*ำฤ้คiปv[kfˆํฤtๆฝ6ฅำพ>Mำ [ูLๆ-ฯ.3ฃ™5ค๗ุRอ+zbวคอณZกฺdg2ัถS๚ษ.mาธะษnr๛ถ1ไ0ข6้ฬยŽžิ>•ไ˜Yณ๏ษั‰tPํ™ฬ&ON\lาIห>sfซkj๔คํi3™Fห๛zn*›49ฏฆ†๖ผornง’นษตฏงใYSMึ๔vผ*%qD5}Nใ)mฦีฦ™ำ{mไ$ฑณป6 cลฮM๖ฤTซ‰9ูัlsีบ cš—;ท"2sNาฬ}๎hโ๖ลำ๔๖n{2em_าๆถ™วคญNv&2nทญิฑต้4:II:VํU$&jๆ9นาt8OฑCํn’œถi{่ษๆิ์๎iฅI6๕1iศ"ํJ”Ž“ิฝkัธดbฃ7ณตํ+7้5™“v"ัvำJgิุรธำsฆšv๖j๋5=ฒํLวไสFv7ณ3[บ 3{›:ํฉi''bนอ>zZ™˜ู์น{ุd๗xล1›,บs[O+Yู™œ“(๚ sนM’T*coฝฬFฦาฌํ4m4sทMvาฉศ•LฒCฺูh›๗‘Fณีอ‘™#;i}Bsšึึv;Iฆปํ!m๋fŠ]wา“$“ค๔๎•Jชซ•ำ Iาคiปูmญ3ฃ‰ฒ™{พž„jร๎บ็4l›๖ฤžฉณญvฒ•Ž ฑทžIBc™(+2R[&!นhาMทfcIzฒ๏าœHบษซyีvต“&M’=ษhป๓†ฑฮญพื+Dำ๛:q3Zูฮนฒ า๊™๔$•v’˜ำีนO๚ฺ%1ำhึŒ็}šš559•9Wดูีึjgnsฺq$ีศAปษพn,4$M฿&“ฑWงษ94ญถสฦ$ื4คฃ5:“าgอฒmๅผLi็ใšน;-ฏ]™ดI*v A4E๛_{็Wพ๒๕๗[๓ํŸฎŸ๚Oำฟ๒ฟ๙/ฟ๚sฟ๘[฿ํง๘/พ๒ซ๏ร๛ฯ~๊ป๖-3์๎๗฿ฺ7฿ |ฯOฬฯ๘~๘ป?๓กm[ทGปซeM @Hh&jMข!ฅคI“aๆ~๗ฟ๛฿ร๗"sีŸญ๛๚ท}ฉl๊6ะฺ{wL" pพํ;พ็‹ฮ_๚ ฟ๓ผ?๚้7พษ๙Vม๋โฯkŸ๛๎ฯ$๏H$:บ’i็Vปฺ6E"ๆึ*ท1™„ๆ"3นต…0ฺีฆ‡ฆCbตป้ถm‹FงI55ูสJJต›ŠiฦˆMทะNhEัhi&/vh[ตษ”YY!ู ฃํ&’jUhJˆRณaา”•แt+ซ›\คฮ"ข‰ฌ๊๊$“%mำUฤDฌถt’J6!ิ6๔คU$… ฯd›*ข1#ŠJ IG˜ด4R!tCC&‰ิษด-kmg'!Iƒีˆู์B#ฉศสอ4He+L#se[[%š49[K5z"y…%$‘…HำFvƒ&]“ŠvRลŠi’จถmฦŽL…ŽาMชSmš†HšjŽ!M+i *]Cซb"ีถKn# มะfihาˆ่2“Fซี B4ษ&:I%Bฆ•ญlr‘šส’4*ฌฆ“ฤ‹–]]’I'ฅmาHฤˆDis73ฅอ’˜F06šดhบc&“ฎVฆRฒษ‘IฅL%Z.‘Tว„:IใฺvUš#ั`u"$I(ีkeฒยสR’้hคfกำŒฌนฅซJ“$‘ูุtt4™ I$ŠdดM7XGฅถcซM%ำตmU3:ี6ฃRฑคMSขฉ4%งƒ&›–mต‘B@ฅh5ํ$ษฐXต›Š$j’6+๔6ณIŽ› าฌ…m6dš5ฉฒFใ>ฒฒ*5+โLEช[jาžlฑาต1kฆQm๗$ ฒม‰mฺะ—6eB’ ั’าั0FWย M’„›*$4nC*I#Œ่tŒ๊๊lณ*M2`ษD“้ศ4]U“ฬvฅใาŒdถฺL3Rsูญถmd˜eซy&&แHn“Yi!1ญM›ŽJor"*ฺู=้$อ$jทป-ั‘”คำ6]I›f›ฆ! ณูŒT:-•v-Iบฆm@›š9ฒ[ำถอ6"$Lๆส อ†dดm"Z๊€*ฉจ!ู]IŒ๊ฎฬช0•Œ‘4ปmฬ4›๖%ตiตบŽFฃ่ษTฒA"ฒอ๊Iฉ,‡ !’ฆdฉjGษ$)%iจ4ยค)ภB*I#D"5ษฦชnช›N˜D'ีˆ–$‰ฌฎJSCee“๘ ‚—lิณ<ฌ๋บŸ๗{{ใmวุวม6[-`H าUTŠิAีA;ํOห ฃN:ฉ"ั†SZ‚ฦ6˜€1>ฯง๏}๎ซk‰I“ฆhRhH;ี&%อHx‰ฌะ4กัฝ๏~ ฟ๙;๘…ฟ๛7_–๓ฦ>๚้_๙๏~๙cxทโ๓๛ฏฟ๗ๆืฟ๛์}๋›_‹?๕฿ฉหŸ‘W^™ๆๅ๏ผๆ[o/ภ+๏{๏{_}๑ย๓.=ๆf๚ญ?๛‹ฟ๖7ฐ$Kh4‘i‡mˆD€vฺำHั ;y๋ญทz๋ญ’๓พฝ๑ฺyด{'Hปฯท๒ฟ๔ญท|ะt“04šWxใSฟ๒s๙ใ๛ฏŸ฿}ู}็ํwผ๐๊'?๓™๐๛_kSมl‡TตS‰&ทหFลศdฆ‰jW‰ดบmถบฝHขฃœฉา$2อM6กMสh4)ญฮŒaH"žษL3!Uศƒn–:ฮVปL*ˆF+Ue:'5ตmK5ะ„Duำ ’$Xา’–mA‡F›i““ีฮญคIฃ‘้ฌ์ถอTฒqcW”ศlวrUฒMH‘l‚EิPีถVกคT2ฺRIัD%›์ห6d’Vƒ}Œ4Œ0•$HฺะฆrขI ฒฅ*Mœู<+#‡จd!iงี6—j7D’‘“ ชlRษdิถ…ฦ๊ส$“ช4iีคT:ีXัQi•&&qj’r“[<ชขCVซIฆ1ฎฆถญิˆ ตศf‘ƒ์ญชถ‘MึฤjiL!ัHฆ%#mตLj”*#้)ฑอ๐ศ๖คซwำdำh# isฉค“hดi3Jึคm-e˜ฒ•4)•m:ฉแฆ!ฉjuษ„$ี•&Fฌ$™Tวึฑ™nJ:บฅQMฬ่ฤ0ฒปญ&!4AZญnLzสฺz^๓ˆTฺุฆษิlCบ›’JVถ7 #ษLว–ฌ”D"nทอˆe!“ถ‰ T'IGฎlH ‡อ6MD†L6}rื4า&ณLsขซ๔H’McgฑI%ฐ$’ช )›๊’JฅŠฆ!i† ฺPปอŒ”j$UsบM#)4Mรข9avฃiฺlH“{ฺ5ี[SSฃ›&Z[uถดหีแดi%ˆ6• ฌิF3ŠNตญ4‰$ฉถีIข๎N$าIปgบm5$ั%:#M›L{ขR"ถ›ศL TZJc4ปY‡N<›•JH;mฺZ Qนmmบ™Lšฬ4)ีธ’HคซอFงท!ั BขM–IจT:5ัฑ‰ฮv;3†ฉIš^yJVลt7 Gาtc&iบ65kB#dฺMaำ&ำŒfบซmญ†I“vโjSจ‰.@*ขปMRD2)p›Cf›lš[{ษ& iษ6Yรค“ŠฅM-iาญีiั„4ฺl"IU0M…ัถญ%IJ“›hc;I‘&ธ3}นEd&ป้ =้$ฝ“่i’”ˆฤถTB่$ญ-ีฆงZฯJ$•ูช€iศ6ŠถHBขhณ‰A%Lช“-yศMBKš)I3๋&$ดฯw_~๋ฯ๐฿๖๗ๅ๏|r^ะ?๗+อง><ฯ}ใ'~๙—>๓…ฏ๋๛๚ี7o฿}๗ป๓…฿๙อ๗ง~ไW?๙ร๏}๕ไ•วใล‹รKเป_๚ซฟ๚ฺว>พ๏•}๗๙๖7ฟ๙•/}๎ณ๎๗?ตฟ๛ม@svNฆ!‰™บMืะeb"h๎อNiJiwr^y๑x๑‚wะ7ฟน/~๕C๏G฿๗jท~๐ฝฏ}๙/?น?๘ฝ๔7฿ผIfฮ‘&•&/ๆฝ?๙K?๛ั๘๗_ท฿]€<^yฯO๒ฯ~์๕๗ฝV› sฺmV—eปัุฤaถcช&SชอฒšŠnฅ1Aiฅ›vz‘# าŒK[Iij05HฎนฯHีํ&bW##S้Rคำ'MS#ำคาmง…ุนำlฺ*อˆ‘Unl Z+dBVš› mAduคˆSณยUmUฒษ†mD”@j๊&6L†”(ฺ%E’tฆฺn9mตฃ91‡็ฺ&mฺ–Ig’vทQ‘Nถ่IฃDXูt;G#ษคmkง“ฆ•HbฅทI’˜DZำ4„Mฺ๎ฺ61UGVZ›&5rˆฆอ@ซี•ฒmํQ้D๊x”ฆIdlVVล่Vฏจค“ค•6ฝ““dคฒNtJาุfทEำN5mดi๋˜4ณ›”ฌษึ&•สHšTบfkถSญT-ษฦ‘ŠlบJlDh•f.Iฅฉส’š EhเTืญยVv\hi’žjH;MฅX&‰คฅKบUU#ำ`8ทฺM›‰ ฉ็šDซ$2MถKw‚dcuา‡&ดlฺุFsบุดูG7 nาษlฮ”‘)aดฌtฉ„cnr™Yกc3šˆ&-ดๅrู๎d3ขCึศfฅ“iRฉ6ดคสJ ฯ‰Q!ฝS•Hษฐฆ]ฐ5้@b0•ฦq7ทmต‰ำ์ีDฒŽL“6šjฦๅJ“0fLj”-mถ1M•ฆj6&ฐูจฅHJ']U*5I็lnDซLi(าภดY&J›I“dI Hำ่ฌfZ6$'ฒM UซฉH2=ง}Yฃ์ส๎ŒL$ีบuิถรฤLำf้œญ่Iง‰ถ]6ถQงจDiาt4šคmฌษH๓hD3ตฃ’mถํบํ)ำ”•›ๆฌ{Btศชถ;nฺถ6˜=L›:›‘Jตim$าš•!้„Vบzง“H’H‚š4…ฦ`$k๎ญi$d5*E9&+iา†ทnL’I“5sร6ํะุู4›”6™„b6ท.  ื๊„)3KำQ–ิHJชQFgณฃE&6ณ!&ีi@fณาF“I‚hะmnAฦLฃ{ๅฬ)-hNฬpืV*U$2Fฅ้&4i\้DZY6VN•ธ#v”˜ฤ6]ir&ง#š :L]m1ำฉ”M“อlžGย„ิ”iปL˜Nhk37&ี่#ๅDฃไj*iฆXLช4ฝฯ7ฟ๑ท[ฟ๙พอ7Ÿหผ็ƒิg~๑—~ๆฝุwุ๙ศง๑Ÿ~๑oพ๖•o}๑๏ดฯ7฿๙๐ฟ๕ำใ~๒ฃฏฟ๖พ๐}๘G^—฿๙Ÿม๏ๆญฟไ‡_ฅoฟWฟ๙?๚ƒฯ็oฟœ๋ฤ- อถญ €R–VZ!ฺ*ัถ˜hปWณ๏เ?๐ก~OพVู๛๖_๏nพต~๘ฝ/๖ๅwฟ๓ตฟ๘๓?๙์g?ญ๛ฐ‰Dฮd(@‰{f฿๛๑_๘น๔ูฏ๗มฮใตไฏG฿๛xOถคว&]*ซiFLšN›V๊T;บ้F› ™•จHšr%ัฦถบ#9“ดEษศศf`หฆฯๆฺNdaVงw7งCํmn&“ชjตขฉฤ‘[D:ญ็ๆ์Sivบ7Lณห ณถฉ&ซ‰ุ“jศ NณฝH&Rญ›4้ฐšV1$‘ณงSZH;eFวิ4ณt& ,ˆI†VsฆษฆwฺŠJKKMชmษ–ฎLUู$ุําYรฒ$<ํฆำ&•ฐใYฎI๓ˆ ษก+ืถfE™hบifG'‡ˆŽ&*UL'ูŽุlจ๐hาH:ํพ<Y6iาuNฒูค‘Vำ*“ฬiซ•4”5„ถธmgE;Y›nwํ!z):้†คIwปš†ยถšŒจจำi6 ณmฺT›ถ&ณบ- 9ํกrcฃZZยˆญฌL2ฅฎlฃฃYั$:&N]]™า M;†”L'Qาf&3ปm›‰™[vัhiาRmEฃ$#S์MฺญŠฤ๎vใค‰ฤMหIFWK5ญ4YVZs๒IZ%Mn๖๊4+ vŒ•M:7I“jŒญ๎hTFา้ฌศJ+๕จtRฝ้Œv“Mฺ„™4›&Y-ตูษ˜‰‰U6ูั"tswๆฎMM•Lฬชn{ท“ซ—mา‰›ˆช‚4DCู Iณอึtาtฐš์†6-‘ฬ<ปัฉSb%ษฆ4ฅTงQU!Mf-ฯไ$›„ฃ)Œ&ณ‰4mทeฮ6iDcฦˆํL7QI›•3™V+d6ูํlOJูD›Bำช–ŒN[mgฺห )๖ถณBธ่[kฺจ4yf๖ส•4™ƒŠ@5–kทฮ•„TอฆN'71™ดJาQ4K%Lw:KK ฉ์ฃงํMoh”MVฒษDคueขีฆท;โ$™VaฃATLณBชบ้•ซ6ำšคฑšnฑC=นข3ล๓Œh›ขH…Pสฤจค3NำCชดั‘]#สd๊Za'ีRM3า.›HNต6ู m›สFB้iฐ”ณาJ4Œ‘,‘AาJ๋สd’Iถฺฮไef[ปำฮIAiiต ดM2Œjทƒดทณษ$mี๐จ'2n[ฒ$น\ิfš3ฤ"Zฑ\m;›\1ซฒฉ้d'&(”™๗ฟ๊}๐•ืณ฿๘๛ทพๆ @จดดZ‰”j#aบ—…6=ำถfžW7ไ$Cูฌคี6ฯ๏็+้w๗๊ฯgษ+o|์3ๅ?๙'?๑/_Vw็c?3?ลฏอW_เู๖๙ƒ/ฏ๛Oร๗}เ๘?๖ฃฉฯ๘gฟ็฿{g๑ๆืใฟ๙ฟ๔7fฤKฮซ๏ภG>๙—๗oท฿*ธํse3[Dบ[3ษLื5า๎"#Iาาfทy๖๕๛ฤฤO~โOพ๖นoฟsี;๓gฟ๑ฟูoIำฝw๒x๕‡~๘cŸ๘ะ;_๙o~๛ป๏,่ๆู”ัูฆฌ๎ู๓J>๙ฯ~ๆcK฿๘ฮพฟ๐xต๔ฟ๘ล}ํ4ณv;˜ูฉJc็๎uw6ต ™‰(XศHาt_œwฏวฮc'aถฯv2้r๗6=3g6ี-:a&—jrงช7ป์INบUirงา๔4su๎s^š;9ํ‰ฬaถiYณu’GDดiซญM’ Iฃ๖:ษLœnšNฒq7‘Gœคฒ้FๅnœฮZณาYญฝฝัแัC๏iM=ำ้}๊LG=ต“ ฃๆ$ีŒฉyฆะGŸ-ฯฑrไ$&Ršw๛8ๆคาซu๔๖ค‰LฃI*Iตซmศxฮๆ™ ;:m{h๒ฬ Mdš๔#lI˜i2ปZฉDณฉวyนw๎ผุ™0ฎ‰&ป๗ฺว™I–๎3ูไI™ำT7{ฃ’ว:ัk36HๅN——™MขI“‘Yiuอ5jv%ํI›––ŽvD& hข9ฯs›j&mžMฬ#"Ohฒฉ‰2`kฎ้ศ–ป-}๔…๖NคžtrฏfvRฃSI$Nุ ›œ้นYฉž>ำ๎อๆ$"š—&ฮ#•ฝpดgืŒคฺv&IงI"Z[ฝbg๒ุี๔pป‰D‡ฦ3ูmlๆูŽ›&…Nๅฎฆ{jvึc3ปlฮญฆำม•ฌAศ|๐‡ฏฟ6ณ๛ๆ;ฯo`/œs>ฦใ•ฯ็w๛๘ิ}๐็่y็๗เ?ซฯฟ๓ฝทภซฏ>มk็=/ฺmŸท๏ผ7฿ฝo=4mว#Izต3mบ‰9ฬu'5 )าุยฃ๛˜nF2ทCขฺฑค๚ึ7พ๘ล?๘ต๛ลท/d^๛ว? ๔~๎๏yyEุ๛๒O~ๆ็>๓๕ฏ|แ+๔ๅwสz๛ ฟ๓ฯว>๒ฦซ๛ศ'๖Wๅืพ๘ฏ~ํฯฟwŸwถป›dฮใลy๕ƒ?๑ซ๓๒฿ฟ๗o๕๙๋ฟ๑G๑v{๏าŒ&@ปฒ&bH2œGŸiฏส6้„[wgbา๖ตŸ๘ิฯณท๎ซ_7_๘๖ห{w[๎mbฮใีzฯ|๊ŸO๋๐‘฿?ท_ฝ?ส๗ ์ฑ;S๎nบษษฃ3็ใŸ๔?ฝฟ๊_Mฬ๋xใำ๕/ผ//ษ&ฤ๔ู๕dัˆๆ™MGฑ‰›bDค›g“V%ผxj“&ขU“1๗(!™ปํdวบ’s“—‹ž&ๆูj›้ด=ฏฌIชู&[บ2“ญ{๔q’ษvชI็ทึ6Œจฮบ’์ฆmnLลMš™]ํ>ด';3ทC†่จฌคูMงษnนซi็6ฺะ๖%šณwš>HƒTVwัวdšูtฯฎIฤ„ผผ;SงJัฝTา™w ฮ4ธM6Ef2IŽwท'4›y™eL๎3ำฬับ ษฦmอM:ื\mvฒ็A‚๊mํฮฮู๎‰i7œษN๎v๎Mาs29gฺm๗hg(f#qฬn$k๛ผ6mcิjMZนษีIHv๓4gZMฒ’บ9ำhี„˜ฌFfรด๖Yvฒั์์ไj{*าฮห]")ญรhขšฌfทฺใสhบ'™”{ีฮ˜Gšฌ˜&Tท}Dทต:IMทY่dCsปง;ำ&‘yู๓ส”ŽF7-๗l6iตู›mRH๛dkถง;aะคTชู>ฆ#ำธg=ำ•ษญศ‹j•IwซT๒rฒœ<ณMฑbzc2fโุ๎ฆำ4yาู™8ํำyd)ฌ์่ใฅ47น3อD’๊f3 ำำJ๎ฐ™mkC†๓x1ลฎvาM๎$ƒคY53นนตาจ˜ถWำS›คัุ๎ ณุๆ ฮฆ:ื‹R “fา4fo{;sฃูศ“wทghnซd:iอ5้@ซฑป8šดtg$srืฝ;c'ู>ฯ‘าถ]mbšถีค›๎Oœ4žŸ็Aพ๎็๓๛Ÿำง็Aญ9šbู–-วžเฤฎย6&ษ ึK6ผlุม†%S9คภ\ุมๅวถ†–,ฉ[–ิญnu๗๙}Ÿ;ื%หธง’ส”๎>Jฏ‰dึ0GตษN”ฆีฉนถc๎ฆาJ^ํ%ฺณืˆCค5›y'rkƒIEcš“์ี9จ๖้ตํLžส&ษa;ัT’9sฮ^cท1•อฉ}:ak›ไแdท›aฎžหš๛ษ&Z่ฝ;ืฮๆ๔์ัั\3{ฟฦ&œ™™yฐื5]ฆiT›๛!ซMŒ้ekฃbา\6(๗ŒีFาd56๗คL6TธD’ฦษLvfปi“1้๊–=ณsฬ•W9Mไ*ีคlข็V“†บถำNฎ„’IZืฅ๖แc›&ฒ๓ซWทํ0:iทญNfwฺf!ฃ์Lถกง;วฮd{"ัh4J:ป้iฆปตำฆูซI‹๊ฝถsvฯ๒@Dฉูุ ฝลŒู้ฆ5ฉiฏ’r]คyจ%คmj$y/“๔lงR\Nลุ˜œ9Yง๗ฎM:O็ด•ช\snิี0“&W7น’ฮšห6›์ํTนส์f็ถฎpฺ&Mฒ™ซ;ื&1“ษ™4้๖ุf+c– >yแทแ'๑—ž๒ปฟ๗—๛฿ฮฟ'@e^xํ#๕o}‹ฏ๎o~็›?žา‹z๛?๗่๚๏ผW ด๊3Ÿ}?สG้“7ฐ๛๛~๛๏ๅo฿๐ฮ~)Xณ"นาI2's๋ีn;ัฮ5ฝ•Mdขง=Grำ“ZAาถš4y๏ฏ๙ฏ๗ืLq{ ฟ๖›ฟ๒ _๚3ป+ู$Gิ๙ŸษŸแo|๗อ้wฟ๑ย๓/Ÿ฿๔+/พ๚•O~ฟ๘ฯ›O๔๏๏ฟ๓๛๚Woผ๙ฮžzx๒โซฬ็พ๔๏๊ฏฦฯ๚ั3๛OูWฟ๚7o๙๗๏@gw„$น5GšถfLบำNw ˆLLH*ู3วŸล_๘ํO}ง~๗—ฟ๗G_ๆ›?๘เฉ‡g_z“Ÿ้Ÿสฏ๊ฟ>ย-~๑็Ÿผ๑อ7์๛ฅt๖bฏh‰ถ;mฎ}ใ[฿๛ัป€G/พัฏ๊ฯœYชฅดŽvวm4ˆ&ฮZ‘ฉฅ@“m7ญ•Iœ^;$ขำฆ%Jำถฝย]ไIีดi‡ัุด“˜‰ถt$ 5f’ค“$รnใ:'็hC8W[็ฤF6”.ม•…Žถ1Rจา=ัคi+mWฆนmvฆบJi“ีดm›!ํ^›+sKiต‹ไฬฒฉํคm[W˜&Z6ี%1ษฎ•IนบCช9nบOงญe6a‘LgŒฺ๔nฏ{LTrMw์V.ู“&ัde5f–ฺธn่f6ฉŒ=‘cW›ธQHMtบฉฺถ๖l—J`ฅšnกbํดช)l*ัคฉ)ฎยึดiฌ๔šsTฺู"3ูiญ๊NฒQmป—ษeRฃกl7Y3i›6$#ณRcˆ$:ศ๔H3อีkา3’”๛๐4นeิขMvฉทZน$ญHR! =z›4 mบT*๎M คJ'IseฏฆอI&Yกถา.If˜Mkฺ๎š์NK/Yช„‘ฝฆ3“Mำอ6Iฯ1i6ฎถํสํ้]wฑฑQฤIsร•ฅฅgฺkฺ‘๊=ณtฆุฅณM0อ$‰]ษๆชj›&๗ฎญ-šmจุ4-ีhคคŒFรนFสข–ุุดYFœถำถ’Iุ)kฺ!f้nGEn'!m‘๎`Žhฺ0‰Œœึj’a$Žกขcฎ‘&ข—cf*iฺณฮaAฺm›DzŠ-MI …FOิtฺ‹‹Dซ•ๆl6ำi•6@…ีญHฺ๋\sN2’nึu52ฆฑJงญnปiาค6ebคอU‰9บฉ ำsํำinv„ธฦ5m›ฌฉน…จ”V6้œํำัi\3ๅd'Dซ4kFbงUโŠ•SตiR‘U[8อŒขญฝ๚ญฏั_?ไวžาG}๚SฯK฿๛?~x;ภ๖นs์g^๚‰็ๆaณ7๘ใ?มซฏ๗ฟ๔ยณ๐ฏพ๕ฟ๛q๎}89@฿฿w?่‡70๓ไษใฯ>๓่Sฏฟ๘K?๓‘๔฿๚เoฟซฬtŽ^v—d$ฝฆญฦ%L:I"าFMฅqห2mัฬVซhLy‰/๒?๛ฏ~๊7Ÿ–`ฝ๐สsฯ=Sซบq’ šGฯ}Wฃโง~แ?{ZUšG/ผ์s่รs?•๔ฯฟ๘ห๔ว๗๛ต•™yx๐ฬ“'ฯ?๓่‘ํํ#?O๛ ฟ๖[๏฿{=ฺณฯ6ฬ|๒—ๅ_|๑'ษำ"œg_ุ ฯœ๖ชnด=ts6๕ษŸส?๛๛[VT๒ไลW^yย่Š$5™‡>๚ษ_๘อ฿๛ม๒แบถ2็แ๖่ษ3Ož{๖ษฃ๙ฤ—ำ๒sฟ—๓่ษ“—Nีn iRิ๎ปฟ๛G๖{oฝsม“ื_ไ—๙ f]i4”ึV’nตมฃMาจษfhœซี5 ษU๖j"’iงฑ;’vทีff†„ ฐวRฑ™œะIKdZ‰V๊g“ค‰t6ข:ั3ฮ!.t๖สDใฬฦตU31RY@’TตQ)!ฺfฎ&U-สT6›4B—%ชซ#‘ม@ำDhฃิา’ซ–dcg„eฉm%ฃƒ๊ฦmrขIฅัvM"‘ำh๏ุ(ัm็ศ•.P šAT€šhฒiตี&INฏษNฒR!&ฒm$‘ บถiาI[ีชฌฌญดAทุƒี(ํŒ™WtZ™Nšb Kล&…Iณ6ญ“0M๗ฺ์ี‘Œฆดฺ“คiWK๖Œ ‰ษ๊ฑัD“fR“6"!U้šฉRVขฦศฆฬdŽLสJคซM‡่๔huk+#ทd’t[$B6*ฉ”ฒ3ถEšฌฝfณ’ KYดฺ™$bB›NWำFRิฤสfšBeฆkาjšdF7ถmถํIN6\UZš์ศงZ•Tš˜Ik7ˆVj&5vg•Fโh์คMj*"gป-š@mถ‘6iBm๏2LDต:›ฆZ์65ุšl$™ะh ใโอฺ๊่Ž\ฑปKR‰)ฅกW7ิI&’lF#ฎ›6šชคาdR#šชสิ1M›$H#ุ„‘ัุฆIzีฆScF’™I4า่ด"TFฅ5@“ญTฃHฉlสj3Qปฒh)I&N"rPูดฅี*ดฎฆศ•i*,ีป†qtช‹qฦแฆnlKC3™-ปัmfฃนฆ ’3iต"hmฯ-+šดjScttฒ’65ฦคบ–" ปัv4)ZอeN$E[šDญFสV:‰ษฎุ”ฤ(ŽIH6Y&็j&ŒHVปk#Fฆํ6)™ญmi"ู $=–ล•™IŠ1Cš6”ฉ ;ฆ!5ใ’ธW3สZ™่˜๎ธฐลd’lฬ‰ีฆHค-hด ฑ+ณ’ชT!•ฆre#T]F“H†ฆICซM•ถษ6จ\ถ4k1œrต›œ1ฑ‹HฺG[šคIrŒฏฺญฆปฑ3ํฆ QiFtEŠIึฬn 0ัŒ&ํภDW!ะUAฯุVิ%3อถ-คดํe‡@&g์๋฿ฦง_๚โ๋/ฟ๖ไนŸ๙ฬ๙฿คณ ะjžฯใ_๘ย“‡G“wฟ๓‡฿~๛ซ฿๘_|๗ฟ๓ท‘g'o=ัพtKาJ`ทืv๑๔๛oฟ๙‡๊ฟJ=~๑ๅW๒ณŸ๚ฉฯฟสsฯ—ใผ๑฿แ;฿มฎ€แด›Xดฅ›ู‘nฺT3A‰6mHฺา›๛=™K“fโสeิรใ_ุห้๔ิu๏ำซWฏLฯฮดปiฦ-nž}ฬ๓/ด*2{ทUฎ๓๘ู<๓ยี#ƒZyชOฏŸ>$๓์+/=๗๒ซำ‘iŸ๎Z๐๐ยsฏฝ๐โ๋Qช๋บ.M'pล‘ดฃyx๖ลืž๕๕๓TฦfปwwปEd“$š๎•io™yํษ /อ9›ถ[Z๗ีำ๓๘™ื>kดู^{m[•+ื๕ฟฏ๚อท๔๓์G>๑้Ÿ๒gžtบiH3ฒฒ štš•ฺV“lˆ5+›&63;so’ไd๏=gwบฆn9'๖l+dถูšjš์8Mขmฤ&๗8jฺาีvMึบiาI•6ถšฬœT๋š๔ไ\9iฮLjยคkwงš)C2ํ•=ฝี์ด6–๋>Sทฎฬ,"้euZ“ZlHLนฺNo๑Pงึj•D็โฅ)หf[M๋บฌf2MS:ษ„ศtšt˜Œ6ชญอVงงฅ+HอVz`ฎฆšดํRต:M:3ุ5๋Aทฝฺ’Mzต[5œŽไ~6ฝงฐIi\Ik*$b›4Iยtquึf2+ีฅi4‚า๊ฤI6ฺˆ†Yน๔d'ฉyบYษ@ฒฎอึCrขiี:cศฺๆฌ&Mš9ษฆฤ&›4nษรถบฎNๆnC’mฺ Mปาี•$‘ฌ;๗›‡kn-้d\ํvง›™+ทุ1ไซLg3ตM๎๗คนต!f๎9‚อ–:-!•ตlš8›ู๎ฆ7Yญี$ศ&bn.ฬีi#]2๗k›1Bšฅ‘คƒN:ฒธขฌาrœบhจิh๔˜K.NZmป-l3ืdŒฎฌSำป54ั๔าูคำDฎAวj›ฉDZV*jฤ422VฺMตN3.ˆาฉฅmค# mJป6„“ห์•Tฅ9ปำ+>„Qm•GZปn•ฦˆ6ชำ+)'92{]am‘$ปฐ‹ูi”V/š‰†ญk8s๛ะl'ู‰Tj2W6d้lฮ4M6e๋ZืฮฃmˆL‘อษ\น7์จ–$ถม•f:kJŽฑบฺ6Mฦ•kwš๛IRูิ\.›&#ตา‘ ฬก‰HD7šjฏ(้ŒpขาŽฬฝ‚tถmม&=sชeVฺm/5ํŒj[ซ#›tvฐ•l‰ฺฬŠ ‰dcฦ%- 8ฎํ4คูd[DฃะF“Tึะฆิ\ๆrMt&—นšM2z5ณl=$'zฺ6โ8luJvŒไ*Ž‹Ž[mตบVฒZCธš62ซ(›V&gj๋Jz2›sง๋œ`*“ฎnMk&;=ีํึ=—ญ,inป™ฬ&Iฅ‘^ู6:ช$ัT›KeGNนฺฆs๓ˆYตmี$2sฺด๖ุdRiฺ\ปปอdDZ;ษd™N“&M&Jติng.6†lฃรศฝSMTืึฺ1Mš]SทvK7rK“^ฉา่t$ืู์•VบIˆุฆ์šŠ&"#ใ๊คืH#6ตีค€„xx๔๖w๘ตoเป?๛๊ว=็ž๛ฤ_่ญ*vฮผ๒‘พฺษ้฿}๋ญ?ปง฿›็๛๔ร?}ใฝ‡ว>O&‘@   EI@‹‚$ด(I Z๖๕›ฟ๓ฝ‡Yฃๆ;ื~ใ็~โs๑ฏฬOฝไห_|7พแ๒แฟž-€‚ @‹ hQ$บic:ZูH“M;ย<ญษ&-ี–•น&n=%-ค4šdG6cฒู{›ง;ู๎Vห,ฉ[]„ บ{ํ คM’hํถ{ีS+’+iฃi\บน\seฆูถืuํMะN!IL5d›Rป๛^อ ีŠถTคฅmMหำ๎u จ่DฌตปํEšiืภ4a฿๚ร๛ฏเ๋o่ร ็ฅ|๒3_๚๒'žIpmJGา\บฉš5้ธถ์tD“r้ฎณฑีibŠMdsvV‹๊fzฒW"&™ฺฺhณฅฑ•j+™J๖ฌถJซY#-ํ๔šh๎%’dgmณ3ญ–kณmfค{o/{q’1ธฦ\อฎ,dc‘J อ2ญŽFDc:ำจjต›Du%hbถh"‰รุU–ฆ4›&Rm4{ยฝ้tVคQฺfPRน ILS"Iอ^Š!Ibƒ*ดNลถL3N[c'‰iCาฑ–6Gš๎(ำDj"ญ4t5แ Hื^1ใ4#—ึnฅ9#vชM3ษ&ฅz้lบˆยž}hขtฤRUi;]ใคีŒศ‘ฉ‹6ญ.e›U ฉดs๕:lUE„&M๗ปiฺำ\ด,ฎฌJทขW๗ชu5็thตญ‹[CถัคฑL’‘Nั E›ีi"า™f:ตาข•†ฆ:LS‚ฆM54I$„ัหEฃ,Ih’”+;ฃ7jcšฐKkๅt„,„ =M$)mถข"ฦRZ1šV4cN“ฆ!!ปั hฃำ6ญาุ™B บŽˆVฃ“2“-ดนษลฺ‹ิl’ู๊žN'ฅบl†*ี้ฤTrMูdช–.&n*dฺ‹ฒl4ถTQฃSณmˆt+$ถ์’mฎ&ง#6ซ“อYบXWตฬ6sถkฏZMzœaำj/œf7ป4YJจ)H Y1ูH34ชถ,liEFi๕4mฺ4C“dJัต4*"‘M'ืGiำูmทZt.DฆjlS„4Fวn“*ŠJ:ฅซ3RตQ’ศTิ4mส6ญจ"::9hj4ญ4#mั ‚‰๛•.GŽD.]›สfฆqฉ๎t’์dนดคQ%›&ฆXŒXmซ–•ค‘$ต,•^บa…ถbZz–u…ฒš‘ ุšh๎—ฦ ืY›lาPฝvถิl;H{ํuอา1วมณEฯาด้""I*5Pณตi'iา‘ำจถช)ฅ‰*7B ชX$ขFŽฎญฒJ!กqeGFฺาVฑišญ!Iาๆข‘ก’$jถ$Rั$ฅญB[คฉดM;2ฆข! ช’ฒฤ6mาZ†คF*1ญ4จ6ก]&$ทG{๛ฏ|็๋฿๙๐SŸพฝ๖ัW~๎ๅซทz฿คี>่๖ูฯพ๒ฑGI฿๙ำ7๖๎ฯฟ๚ลWŸŸ{๔8y๓o๘{o๕ร{ษหฯ=๓{๖/=<{sฟ๖‡?z๋o๘ฟ{๚žy๑•—ษ็Ÿ~ํo฿๙‹o๘{ s๛โOพ๚ำ/}๏๓ท๏}ํ{๗e&ฯฟ๒าฏ๎ษณฎฟ๙ๆ๒?ป{ภv๓ย็ษmธฎ~๐ท฿y็{๏|5O^ฤo~๚ล^๘‡Ÿy๖฿๚เ{๏๎= ฯ๙ๆOพ๗ฝ๏ห?๘w฿|ฯ๔G๊Oฝ3Ÿx๙๎›‡๛ƒ|๘ฅฏ๏_แท~ใ฿~๗‹฿kโํ7^gตŸ{็ฯ~๒อOพ๕xi๏๐แ—พฝ฿๚o?๖;_^๛ฤ;ใ>๚หŸ}๋ว฿yผ<|๐แ๓Oฟ๙฿โื็๕?~5Tˆต\ˆ=ฎญฐCC<4  บyf‰nm1,ขŽํTH“W`18‰W4ี™–=cอFเœV  ื›KฬJlฐNBKฐะช’ฤ“—Fยeู%uI‚ `hทXMœำ‹%ชUH๎yฦจ04qy@R0q๎ซ๗ฟ๕•฿‡์O~๘|ผx๏ณ?๕นฯ๔แฝำ๒‚ณํ๎๎, ชโBคk5,จฮI"สYGฐลฎ์ๅ1h™nดฆ(๊Hc-e`ฺฺ็œe+%Eไz'7B/ำุ่ฤฅu:My้ฦฎ”ใQa+Iฆ‘สœO๐’,ะึœŒ[kLš€‚Kcษ&qZtฉ…&ด›)#L รฒ มsr›ฃZ์NKฐ„๊ยqูZŸย€Y–:ฺถลlฆ “‹โ`ยn+ะ4ฑขPY.ฯ\iภŒษปฯlื•fa˜  š‹RฌฬYฮ.Oืว๘lwห@U@ถH"]ฑŠaกšfขšsวนหฐnAฬˆฒY ‹แฦ์Vิ€07ศU"0ึ•rษ ๒™ณ š‡i*pฯdn\ุฅE;gหญ าภk’s SฯหjKฒ“๖ฦ6ถš0ผฎ๎ซท่Pqr ธศˆCŠๅฌบx%:)6@ร]ข•a†ไไzƒWขDDภE7Cƒdแl Cน\Jฅฺ* •ขHน’ํ#jJฺ™x(kฒ!kโr–hม“gm:gW+@wg–ƒั๎Mิ( te‰E’€œ9I[&ซาl=๎๓"(ใP" +มๆTD;€จดHLุh”cA‰‘ "6ยณš2jD;ำ4›ฝM tฦฃl—ฎy ฐ ฬI 1\ชศi<ิา2'. แ&ท9![tซ,c@ธšข44Mh\อๆฺšฆ:รฒ3”ล0tศesES XmเZ™CคฌฤL7 j—ะ!้TŠ@k3,4h ซนณ‹ภเ@าภrn ปzv&=๋๑–เbฆa๗<:ุ็๎.ษจBฐ›)&I-1,จฮI ๊ฮชha;ป—#Ž0@ใลbฺZใ@šศ‡๘ ูจย9gูHธ&U7f5งษษ(ๆพhยปl†ฝœy1Z[Kส‘ƒa„๊<.ฐซ-l๎-krDuIร-v๊–ช‚……Uฯ€ป ฑ9ณ,ฌN ่Sฌt‚ลpXo-Š ฌO`€$KUถภปnš]fธ8ัฒ)šิ.8‰lฌOIฐŒ2ถ‹)ู7gNล,-ษฦผX3'ีศล@[g9ฬํน-(sxํ๕o}๛๐‡_๛ส์‹—ฟ๐“o}์พ๛ฅtใผx๑ษพ๕ณŸ:๋~ใพ…o๏๗žฏ ฏ๚แpŸ?|ฎใฝท฿ซ?๗‰ฟ๕็฿~๛๙?าWพ=/฿ิว๛ตŸฤG฿๊๛_ฃ๗ฏฟ๋Ÿx๗#o๘{็ฝwเk1๛ฦ[/ฃ๓xx}ํว฿™y๏~8ฏ_ฬ'฿๘ศร็7พ๖{฿๘ๆW?8{fD@็เัp\฿_๘ฯฟ๐3szใ3๏œ๗^๚ฦ/|๎G๎๚‰Ÿ||๐ง฿ึ้ฯoไญฯ|๊cŸ๙ไ›Ÿ๐ฟ๚ม7ฟฟฬ‹ฯ๚ฟ๕ซŸ๚KŸœพ/}้๏ศห—ฏฟ๛‘๎'฿๘“ฏ|ใK฿}๙~๑Sลฯพ๑่ป_๙๊wพ๙ื^๗#/๑'ฮo^฿|ฟ›์หeYภธํฺXฌ A”8HAจำF1,ใ ggPXle;% หž˜,Xœะ#.JิV( Ž„ฌjแbAใิ,!BŽเปฉd{กu68@์ฒ,@@n‚หRBณ  K.ีvFด}๕o}้ เทฟ๗๊ฬkŸฉŸฉŸ๙ว†–$E˜K‚๚ยVฒิFQ ‚]cg8œE†}ม!+WภLJ์* อ](";ฒYpHZYa๖ธQ ปร\Bc.รJ’Ei(N›\ –†X” ย&สtสt`Ff‘ถH‘!…lยq\,=Kบ*RQ์iˆ„‰M&าย bR๕dฉฉ ซ1ฌHsƒ‡ๆ†4ธปดฺ !Š`KvDป, ูๅ2.%`-กƒOยฒJgšกต/sหvHC€Xย$ ”Q †ฆQ็ฺ๙ฒฅ”Gฑ‚ึ%ร Sุค๖p.ธฐ’งF1,เ”lิโ‘$ข zุๆ•Uršู{!ูI˜'ฬนJไๆ% z80E@ฐ2˜2 .ฃœAMXEeช˜ญณžs7™F‘]1• ‚e9ฑ@ฬฦ€ บค9t‹ Eฦ1 ‡Œ`ไlB ฃ8ฯ4Zํีโ"(ššเ#ย.{j(–   Xว"Xfน–‚U,ุDตŒ8]#f‘]ู1`;ูBจƒqฎ bk(ชU™a†ว%%ฃD๑ถฆเไ€ƒณQCs“ญBVุ(ะ.^gฑ-pคk˜ื tV J’dIข I‡cŒ…ZŠ@pa˜B5 g˜ฮ2ž„*@t‡๊ด`Žแ๎2ฌƒ2หnRฃ1‘D$lMณ+ แยiฯŒฬ€@-…ศŒ%ฉ‹ ,0Hs Df๑ฎ'Bบบ9Š Zvbฌ–‡ศ $!“%Xƒกj!iศvœƒา…ญณธไโ„ aOw\ˆฤAช$L˜kpฌi ฑ1ฌ 5'&s๖ถก!\wU&FEๆ๎ธUม ษFZฐธ67 \้,ๆq W+kvv€• x็ฝOาฏ|3/พ๛ฟ๑k๛๚ใDรn–‚*),ป์กกุ€L เQ*L—@ฏN…จ3–“{*Eฒx`ืUˆa6#CซŠ๎bkญ pถแ ล2€Œผชล๓ M5n4–-\<รqA„3Bปต€ a+95–ดV5=z<ฎf๊่žfp€aตีV€ึพฏ|๑Ÿำ๗ ฬใฝŸ๛ต่็๎๓๏สฺ3HIฅใ9gู2–,`LFŸ.ฌzฮ๎ุๆ2ีŽ๔ -๗Fฉ4;.=ุฌพาm๋X€ˆ๙ˆ @บZpcIฉqxL3<ž๛ฬ +šp3*†dัœAc ใjhBM=กฦ˜€ญใ(3ZJึ•ƒD9่ฒคˆv‚ุ“b‡q—ฉญ$`เrTe‰V ำAูุBb l,๗สMแ˜ฒ‘2#าxส˜–\(ˆรU6ฉMdุs็YชูœtH\\,l$็T.ฌห๎Œ#$ldณ†ŒŽภาl6Žkฅ4&…JธฬเาŽ์”lี ˆกร6]ฺQ…%ฯตaฝ’l3ษTคpš!! [Œtc,O3–WฝšYƒ€–’‚ˆ:ต จ5Ys˜Œำฤ@Cน9]-F&’.B+2ูš2&ณอr $Nธm—@yฬpY‹ฒ–YŽจภซJF„p็1็.–ณ‚†๛Š94อเถžีŒBูI((kฯ๖์ฺ84(๎ดข L์tqร”มู€ีM†ฌvdๆ tฺ M๏81ฃ.๋&ไฮฬ K’…คY’r‘0Bฎ๋ศNEฑ’า!ผ;m’^`๎d {HYบสfฌˆ:0 ‚+S๛8“gๅv๓jvผT†[ฎ*B29;wzbุไ์ฌ#—,*ฝŽEข:NLtjma `]ถYย้@ฐ็šjTึš˜3NถปฒSป`ะ.Pt˜ฦอ$ฆ'žA‚Žbฯ่.LJถ"ฐร๎V์์‹ๆฉŠEŒƒM™ƒkฎ†-ๅDv- ฮN•22Jvก‘Ž3Sปญ„%ฃfั”๒ิ$jไ2l”รฬๆrฃ…”†ถณ›ฅ โต๓ธํa>๕ฒ๕ฐ `uFx@ ,มด๙ ๔เแลณ+6ฐูH:ˆห˜“ํณ‹ษุาƒžNศj ฅ3บเŠฌอUUุๅ Kธ"ุ`งDc ต•HpTๅ’eๅฮศ‘+[มฑŽ’คe3๛Œ‹สะ˜4Gวžั€้ึi ฆ็‘Kใฐuu'TA;98ฒรMยย€ฑžsIภ0ฬุ;วQp/C‘ธ0ว—/๘‡ฮ๏ษ๙?ฏศฏ~ๆkโฏพ๛u?๓ึ๋?๓o?๖๙w่|้๑๊รืŽcะž7_ว^‰ท?สWฟ๚๋ฟี?๘่/ฝ๛ฬ๛฿๙g›ฟ๘๛฿ู?๗ฺg?ๆ/๙ญ?ๆ฿๙้ื฿๛่ห๛ศ‹๓ฮ์ฟwๆ/~๕~๚ํื?๓ฮหิบ/?๒ๆ็~฿๛?}๕๏ฮ™ิy๔ฺห>|p^g_พ>?๒ฃ๙ล๛Aห๋7ว๐๚๗฿ณW|แพ๖๖ฏ๑ซŸ{๋๙ห?๑•฿๚~๚ฝื๒ใoฝ|๕฿๛๒ฟ๙{วทลฯฟฟƒ๗—๔๛๚[พ๑ฃ?๑b฿yค<ฯ{๙wL็oืพ๐ึป็๕็(`บุีอ‡@a๊ฬฝฺๅ™ ิta yูc—#ๅ’๚8‡ู}^Dีy4โย*:#วnํณปำ V—™๕•/๕โSWฅi‰JD•นC$ฝ˜ศฝ๊ฮ„$8ย4tฮ์ฺภu‹ืลNฒŸฤีI"Šธž=gธเศ^ฦีึฤณ'6pฃs_๛ิฯส฿ฏฟฝชเkoพ|-Ÿ็ยi็ษ}Ur๔๑dปห.^Fš& ‡ก]6คy•เ<๖ลœ…”ŒpฯฌฯณฬŽฉxr.ž8ะดล…U_DWธkšY‡i'ษญNใ 7\›ฝ'Ÿ็(ฒํ+ธ3“D‡• –วšฮ•๋4›wปn๒ถDEฬวบ$แ&”zfฐv็™%Dm” CGuปฐœ™S7ึกa๋c"‹ ถ•UŸAู๖ดb3aภ\b˜ฐŸ‹ป8d>gฎ$š=ฉี4ื{˜๎ิ:อpิปค€˜อy6 ฺ@y[š'{๔œq )xณNื บ‡๊9xฎำ^๏บ7รฅI˜ๆŒiฯณ๒†5ƒปหฮๆ™yผˆX‹ณ+นr็9ำa…•›Gภฆย…{ši`ƒS”;ณ ฬบศaว6r ๖qป3อ ฑท’ƒ. rฮ ฒf‡š๖๙Šฆ^‚Œฑ ็2ณ#6ห6บ์‚€๓ฺƒnปซ1kš๊ฬc5.ฎจ;าv—๋ˆa]ฦคGœ\|สชI ิ:ภ=์vvvว:.`p‚อ8Rv‰ ฆNฌ๓ฤ๔1ฬž:ผŸCŽYเพยšำเ6์-ถ6;d^ ๐ฎฬฦr_œsFŒ[์๓ณ>…vJZ*ฯ<ึCฑ๋fq‹š=j‡V`็Oฮ,'€Zv๖ั‹ฃ\Y๎์qฒd.๓ŒG8์ิฦโž์4ีŽ8,Dcรžๅb,ลฮr๒ีcfrŸ-ฐ3'‚މ1ทu’ ™ต=๋์<๗^Ÿ๗6pตXถKอ0\Ž–[JยcowูtAQ‡ม๎xวว‚Ÿ๛™0vธืxฑำx™‹†(,D“8Š ฯˆ}1นำN(Yl€ Ž;43e œ๏ฬำ4#๗p…ีฌ$ตธ็1gลU‡%€eZfm&k›EตyัฬฬŒƒ[wฎง็<ูรN…ๆ,ถืปํล˜ <๙xัก๒ฦ์ฮ‹’ถฺกรc_œjฎุ,0;ๅูquมๅฤœxšบนโ#ฮ•6รฌv`ๆษ„ห ์ฮไ‘…M|บgwpฦฉg\fGeาุร๊ฤ6มNฬฮF๛|ึะฃl‰:‹ˆ;[1nถฅ8๓ธ{๗:ูส อศL އCHญ8อฤย24์c}Œโช๊.-ิฮสpšžE;๎cธฯำŠŒห„ฬฅF9†ัูฅ otHธฮี†9ฉลR๎Q๎ิjž†a—FF, ฌ๑2›œ{จe!ยป์yาฑsฮฬภvแ๎qว๎,ม=,หF?A๐ถณ๙}ๆ๙พŸ๛อ —ข(’ZZศญl$€ณู์่^Oฎ'P @nญฦnF‘,+ฑฌ%Rร๕pf฿s๗บ…แแั๓gฯOฟ๛๙ื฿้ใ‡ฟมซ๏—ฏ?๚๖w^}yq๑ป_ๆ}๙๐าg฿zใแ[ฏ>zลG฿ึว้ก‡๖ฬํัฏ?qž?<™'O๚ื๚ํWoใฝ'๏พ๙๘[o๒Iฏีo๛๒_๗ฟ๑฿ไป฿{๛ัw฿z๔ฮ๕๒oฟ๑ัฯ๔ณง็ณ3ƒ:sฝ๒ไ†ภ๙๚=\oฟ๙๘ ๗‹๔?๙Unฏ๙ฮใGฏฮฝNŽpไใ๒็๓ร'โว?๙Ÿ฿๓ฯแŸ?Oล‡<บๆ&nฌ้ˆฎรHู ท8ฮัฃ—;+]ผqป €&ม๒ย+N%Xฃ๒0I›AE1ญl—ญรCu(ข‰q`B๎pb๗hyธ]<I80"ฌ๎!ฌIฒ4˜U 3F€ ]ไี@P 4ฒิK๎• vXHiใ,เฦกdฦiั๋แ7_๛rดณป็xv‰๐บnwบžNVํฝๅๆ: บแ ะNมฆง้D‹w™%\Hๆฺ-…qW–+ฉ†ี้šฌ 2Dฮ**SีมN\1'NํB"jก ์‰ํบล0Vว1oฅlt7Ab ะลXไBŠŒˆš•†๒ิEgtธbฮž +"ˆ–ณg†ฎ…m!“ุ›่-€๕l,วf^B ไ’-7ทฮฺNม 4ณ ^"B1.ถนŒ@†+;ตP€เุ*P‡;ห^cศก‘:wฎ9y6Rอ9d sc7ซqpvฦyพ/ฒˆc;03็*6ล‰be8 [†gฅ4“ไ่‚ƒืะธKย€96ำ๊:…ฎ N. ,zMัA58‹ล–aฮุภv%cีi\/ธ"…vXDฏใฃ้ฌ–ž,ึBธ๔กบณเจE‚F;ฉRU†์pฃฺูL*(ZŒkฺ๖dฬ›mทมฦœฃm{"vi–s˜6ุ*ฎRŠ]6ใฺูฉtะ$.๕บrbปc‚’‡บฬi„(ีA๗NW, จu้ฑร" ปโ-€CจำตK9Šื-8 ็lŠืฮเมำKฆ“{GFป]gp‚ํrddวขึ9g!ขFไฬ่˜บฒˆiK3‚ป’ื&๎4ฬDฑ”1hQŠ#ถู‰-าtak‚ล‘กEา ๖พฬ^Sฃํฮrcฝธ/nNQPk2ใCx*V1š$ฃ‰&วนm"ฌ‘แVSg ท(ตญก—0&ฐ ‚ฉQ|H'๖l-หโๆ2F”ฤ 4Zgญ‰ึฐีHbR๋r!-ฺกศhoฦิาJsQ,šดZS/z(8’-e m๗%ธb`cหaฺ[l(“ณcฺbแแบ๎skข{๗š"ฮ5gสBœ%ภd™‚ W‡vV๐ภ\"ค)Sดvื‹e w†2‘ๆ $8›bบธตaZ^Šˆดู๋ยรฦ™qนุk8ั"แIbYP/ zู.%ศP*jVฏfาๅฬขiำcงŠ6<ซ I๋\žถญ%ดฝฤน…uบวVปxT ภž๖L–ษ‚ค1้่€’ถqJ‹ใ๖pซฺzูe่bถ’`T:ถฤ uNงธ"XBg s#‰YNK:xวyY‹^ทๆ๙G_|๚๗?๚_๔๑๏ฝ๙“ท>yฑ฿๏๕wn๗ฯฟ๚๔๏~์ซ๓ส#ว6`ฯใแัๅฐฯฯW๙ำฯ๛ว?ฐ๛โŸ๔ฯ๓p๚ษฏžพx{ฏฝ๗ฺฃ๗ฝ๛ซ๏ฟฮ‹฿้?ูw฿~๋ญ๏ผ๚๎›๒ษ๙ท฿|"_แฯฟ๚rฟ์๑€ไ#พ๓ญWผพ~๚์๙ห>y˜กร๓Oฮซื๕$fn=บพ|Ž7฿ผแ๖สลใวœฏy๑bฮใื_ฦ๑zจ ๎/ฟผฟo๑ํ๖วo๐;ฟ๕7?๘แ;ฟ้—๗Ÿ?๚?~wq ,ๅบาŠpฦˆ0iXo์๐ถ มkโ๊h:ศB.ัh@ฤ @KKŽุ5ตฬคฤ„py–ภศP iญ]hHlฌ 1:  `ฺœ=H&^˜ุ Aแ‚ด–ŒฎดJQ‚็ๆบ1Tป˜๊Reอj0*‚ฬถœ๛aน†QŠ “2ฎ†ัh’%W# ‘ฉ4œ$X ุภ์,๊8bh—ป+๋•ซเถ‚bq ] –2:ํLรภะฎ Qศฦ€BนชดQจ4ฌ@ำ่uฟช5Y†กmcXs  ถ&; @`A(M์ึ8ย@8.tqว+‡มขฉ]ุข"ˆ Zญน“คvœLšT@ืแ›k+ย aว,‹ี9ฦ#ั9˜ดKˆNX N—TP(ภ JŠ"\ŽUศ”Šb$,ธPใ6ญ6ZสA0ตD€ ณCWƒ`C ึC1๗a icก‹.Rฤฑน…]Ak6r3ษษ–6าุภ๖šCณสuf—ุHjAฤข%๓ฺ@VำจN ํ0”„ฑฒฤ†t „“zลศGv’ุ†ลˆkšชPšฎBrZfC:“˜„™ํค๗a่‚มuซV]j3ตŒ€ @˜Fฑj!H„โH ย…yป#RI*4ษยAฒ3•  เm'Zฐ… ฆ‰Qโ0tVฬ‘ัJ€ล"#Dภฤj&Vb…ศ"lD– %€…E‡6’†…่ถŒณ๎ฌซ1ๆ ,D…pปฆƒนDmƒD.FV\’Œ)1%Œ"ศ`.อc/ฒv%ip%I๊ˆ:0€นHภ!dรE`me`ว] ,หs›ใ> 1‹6Lp‰"%†` ‚j@ A&มี”(GTPƒvr%ัlb `\ƒRD `ฒ.ิa",’ฉ›มœc.+,0.†ฑฐฅ]Z—ปู @ตD9ศ‚$ˆQิฌ•MๅยUsŒ๋ธ]ว†˜@„ฦ-้j'ธะ 0(q0*A$(ˆ$PงZอpแbืข˜qฐฒag+[ใ ‘Kจ0‰์่ภศ€fUซ$ุ,)Hะ6ลฺ8z„#ถ”ซ]ศyศdูช4’ ƒ€aˆ0\Dม`๎ข€ฆ ŽŠ—ท‡ฯ๎_์?ณŸ๐'o๔Wฎ7^ม{ฏ<ผ|๔ฃ฿_>บบ้X@ํ๓๛9gแษŸ๙_ฬ$\ท/ฝ}6ฏ^๛๙/>๚o๏[ฏ>๚แw^™็o|cฮo~๗ษ>y๖‹ž๔๙[๏ฝ๑๚๏฿ใ|๙ฯ~๙๔9๋\ืผ๑ฺkเ‘žฯ?๕ำฏ>๒•ฮไบ]sฦ๋š‡‡๋8๗๛_žvAว๋ั5ฃชจq]/?๒—ฟ_ž}ใ๗ฟ๕ฏ~๘_~[฿{๏฿{็•w_=๊ฃŸyŸฏธธ0"ษRGœ&ษ„q!0ˆฮ•ฤ!Y(ntต;ำฐXธa/ฌต๔้€$‰ำ” —žKŒขศ0€ใ hgฌˆPpO ภ5์ะ0ฌ†Eณยbฏบว ศล ฆ–แ]@ึ5dํrt/ก qiทอQ$@…ก$Z&XถฺQHŽโBfp‡€3K‘yth€C “UE lb์ฌ †qCBฤ40Ska\‘VVdBŠ(VwRcฉŠ ฏŠม6\mš{ม1ๆข กb™HๆกนCH300ญธ–i“ฎw1hขTํ  ,Bฮeะ€บ๋F. ^ฒฒHC#mฉ#M .–4ฺล.”1phฅๆช ืX80ศ,&Tv๐Hta€pอึv˜ปบี4ุฝJ`EA„iเPหฺ8ญ@ฎม$a%[Œ ด‚ุ C-‡b@ตู a๋ผฎb„%ฤศฐเQๆภ2Cส™ลภhYhุa4ว†ฺีA†" naXˆฑ๕’YP.(Aุ\vp2\Œ™ย„$'€ซ บLcฺํึZรฐB(ข K &ๅF“D cg ู! ยั!ฺ˜ t.Yแ$NmœtvYaaAไึ!ฉๆึฎ๗Š%รำฦrง1c2๗œD @!aฉ$“ำ๎2 0–ภ&’0 -์‹ฬ!\่†mJHcวA›;ยฦr!๑–!  -rํลษh„ไˆ2aฐGwbะ‘ซ†ธJ€`6lda๋ŽๆฺMp(A(– ฏ\%CƒฅpŽ,\qต;6„… ๎]ฮpCa!€น,)BopŸม*‹ิdUiศq‡0F%k๊ s H ‹1ีล"iฤ&Wล†‹hKะ`สjށฎฉkˆb[็\ฦิาF Š„ ˆ2”ฅX,ี๎ AXศ!3˜$ญ`lyVป/#!)น „€;ห,ุฎ‹\)–,*ฆ@hSIkLใeฦe*YA€I)ขXilŒ vฃ•A&ๅ่ฤ€ญรฤ‰‹ๆ !ซKkhš{,‚ภเDๅŠiƒf’ึสรJ@ยๅU‡H/gปSrฅรbไ.‚ัX$อด0ฺ์In๗ล0j/jฆข‰n าƒฤ dฐ‹Gฅ xmg wlบhึlซ t !ฉ–€ช65h8(ข๊ํแลๅ้_~๑้๗อรใ๕ํo>žวo๓z๖๕ณ฿ื๑ล+๓p๒ๅ_ี๓—๙ๆ๗ฟ่/ฟ๓ๆ๋ฯ_ฉ/ำ๏๎ฯ;ฟ๚๘ูำง๐7๚๛ผใ}?>"๗ใ/พ~๑ูงฯ^|๕ฌ๋ั๕๚[ฏฝ=ฯ>@ฎ›ฏผq^>ฟ๑‡Ÿๆใ๒รฟ๚ัwๆƒฟwผ๕๛o๔>๚ีง๛๕Ž•!ต&wบ…หlD1ทX ๏๎ TA$แzษตQ˜ds’ฅ)DE€pก’ฦvi…ฆ0&\ณƒaรสANฐ,0ูRS, ฤๆh$%มฝฝ"4จ$Q™ ’ฬaA \ฝ4ฅ!Eฤ  JDฃkuM"i ฎ ย Œเภ @ฑH^$mญย^;"ิ’โฐk… ‚ฦd ขฦFำภJ  (€ฺ2aLะธ๊ะขQf%Xกdmๅบตี28(œ’dแšคI)  @‚†l็˜;V–,‚Sน(,#ขJฆมhฬ๖~FEb- — b›m!‰ะud‚ต,&d‡Pˆm@กฌU†BPษ"จ+\uj!มš6ฎV sŠแ’Dˆ‚r‚6Žˆฐ4Xกชวฆ‚RLำ 7’hณฎ‹ต„(ๆ\ศ.!a!ˆ AV€ช`ฑ,ฃDรˆEะ„ —kบๆz๔๒ฃง_์Ÿพ๚โ๖ฦkŽ๊ณงŸ๎ฃ/>~อท_็฿ปvž๒Uw|๕๑ร7_ฝ=บ๕ู็/๔ ?๙๐รฯ>|๚ีโ๕พ{{๗ๅ๕๕ณ?รŸn^O~๗็/๔๔ซŸ|็ตฝ๏๑พ๘๔t^ผธอ5เฬํ์ผ๙ๆ“ฟ๘ั[๏\๛?ใณฏพ~๖ง_้ฃ็๕ฃ‡w฿๛ึ฿ผ๛วฟคฏO~ํvs?๔ลg<ๆ_เษทฟ๙ัำO๑_>๚ว฿๙ูณื๚ƒท^_w‚…P จ5hGศ@ขJ@นถข^บUฎฮ^‘ต!ฺ@Pภณ Eˆ €ไlปก]8อRํ๎•eลๆ6บมbSาฐ ณK™,ˆฺ‘"$ ว&เ„ฎ-L”า`’ jMเฑฝฮ … ม$ฎณx-‚4 Iฑ๗ ˆi<”^#‚ต1–š €ฅŠœ€H$่ยaLC7˜bm4จUต X(ญA†ญุ80aบนษ‘ั ข๖€1+Šข.Ž €ษjฬD’Nฌเถtจธ‘n30ห‚ณ!!ฮะLMgบ^\I€Š fAจH†žจ%ฐ=.ชข†)ุ<วš๋ฦๅ![ท'––เๅ-Y„ก[0IZแš›B%k˜4ภ ˜ :5‡0jV:ฐ8E8‹…‹8$ E Qฤ GŽ #ข,‘ฎ†eS๕†D’šฦ์Žฑl’dดห\ต 6rsdศ ’Šr\‚BT2ฎี#ศฤฐำ\\HUELL6"…งf'@H– ค@$ะ‰๛™lภา‰ซdeฤ˜มJ!Cิัj/)+iภDฅ(› @Q€กfณฉ*.fซ]B4Wใไๆ6s๋bฅ˜*ะฐ––D‡&ฒ™8ฉm™– ‘6@4ƒ!"XVx&,!ยเ6ซ‚8 YK %„แ^H7ย\-Š ฒไBRHIMยํ2l0‹k˜ VSฎก ‚Ÿ 8ููEMณ|฿ฯ๛kญฝwํ๊\ฑำ'คSLB$ก‚‚’L@bย ‰งภ)€ฮ$$(‰@˜า9โธlวeง๚f7ตึฝฯอu`„9šฤฦฆุ„‹่S!โ8ะ.ดL ย$Œ๎ยmภcจ@RบAยฦ˜CKWH[บL9n@ŠnJษ๎จชฤgO-ฌ ล"ฦเ‚‘„กˆ.ดฯโœva›จ* ธKˆ๏@M)ิ๎5 ƒ@;dŒ(ฒfP•+Va ฉ !bขe„ศž=t™`Rb6@คjก‚$&นreg่ฒh@น(เบM›Gม „0]ฦี QfL jQ]‚@ยศัึโโฤ์ิ๒Rดƒ2ต—Œ Qd„(ผ เšคหHขˆา 'ขk"ม–N้ฆ`HZˆXI2ฬ!ฐq›ึๆ99น…ศ@`n4€ข‘ใFE{เศ}๒8PE Sฤฦf่™ใZนญขa-={}AQ: “$ ะThบDฐ€hRAีš4›!-ค,‡œ˜โฌบCCRดตL4,ƒ\Ya\isM‚ศrƒ `†—วฯ฿=๖฿๎O๘ปwฟ๘๎+ะ‡๖พ๗๎Go|๗oึ‡ฯx‘฿๛ตฟ๐ว>ฦ›๗ฟ๒ท~ใฟ๙ต'็M?ูท๔ๅ/฿oแO?๚๚‡/พํ๏ฝ/v~๑ร/~็‡Ÿ}๎~้ใw๗๙แwฟ๓ฝ๐แผzF`>z๓ั๛ฃฟ๐฿ฟ=๙xฬW?y๗ว้+๊๗}ต—พ|๙?๙ป๏ฟ๛๕฿๙G๓แ?๛ฮwฮท๐๏S้?ฝ๗฿฿~ใO๏ใ—เ7๒Ÿ๛…O๗๕G฿๛งใท^?0_๎ษ?๙ŸCเ๗|ํ—ƒ?|wพ๘ญŸ๑{๑k้O|๒‰_ฏ๛๏??~๗—ๅ฿๓G๗๓ฟ๕[_อ๏ยyy๓‡~ง/ํ‡?ญx!+ก)4–ฎญธ!’Lฃภc๖๑˜๋-wท}ž;หธบฒปรฮใS"สจ<žUัŒ๓{~จปอไLFดฌ๑X_‡ม–ฝณฯ็ิพ[_`้2wฯู-ฺz๎}ศรi<‡ap’Bšฅรn…2/๊ฬฒซซๆแŒ๎ฬJ)Gฉh h๎นอ0ฦธ็๑pXฌล'<‘4ืํั—ฬฑ๎“ปืฝzpjยะแเe9uเ.๏KœงnLx๗yฮๅๅ๒ KFๆุ9ท๐nํz—•sๆjปฒŒ'ฌฦFํ,ฌต7ฦ™a>wŸ เศฒํš ‡แง{ฏ>š—หศาลห๑2ฤาฅๅธwๆ้’9‰ซ † b๗ฆฬมมlู†…G็ gž˜xlูู‚TVv.0{&ฦฮq‡็>ถe.’pึe๗ไCuฎณษฝณฏNฉx†fื๚pึ%g‘ตuธพุ ;๋ศ\๊๙Z:ว!iํ๕ C8ฅoฦฝ๓ฌKtO ฬx—`„6 ูต™‘“.ts^gg๏ฎ98Šฦฮ^w:มL๏ถ๗บืภAท“*์ผ๐า์๒ ]yขหใvwœแ๑ไฑt๖ŠqฦฮฌB๓z/ไ*ู๋†}2ŒNNฯ‘i6vช-qŽ…๛~ปค8&UOžg็่Eง็{ป๋ิน>นt™vฮ.ัณฅ–=ž๛๔m$œฤร\ยdถy4ouWB™‰9sWJขวn3#ตTูœ=ใ™3๊.&žy ึ1‰ํั็๊Sd๕ูm6owvี•vสZงŽุิ ๓“ ปทฝ6ใแAฺฺู1่คอx†ษป=ง๖ฮ๎รม)ปฌHตำฉน‘ฆƒo๏#žฑาƒ9ำs.‹ฬQอ๐า'ๆฎl csHžฏอ>5˜]ั9bธธy์ผ.Š6>ก|l{อฏ<โศuฮ›y20Dป}žkศฮ,l+wฮx<หฺม:kXป93วฝฯป00ƒีRงŽŽ๖ุป๗ษะพ๓‚ยฺw‹ดKฯ‡‘๔้รžƒ#NBฐธ:mตq˜ฃd๋อ)‡3บxฑ%SภŽใsถ2‡1วsfZฺN.<ซ’ๅฬ%ท—ฝฮm๛!Ÿ+xฤw๏i…uu‘;ีธ=lš8'ƒ7นฮถ๛ผgŽืzฎคS3/รเžtนณ;1ฮ:w™[D‡๓\CTy์xษWถ3s†u/‘วaฐf!‹†ง{น๚8pษ๖๖U8a เ8x‰ู#ผ๋‡J'็ยฯ2ท{3>^{;๛;?๒{?๎—พษ๛๛๚ํ฿๘ู็ฯฏG€ว/|๒อฟ๒o~๓ฏ ์๏๚๗ึ๒_ฏ๏ฟฯท^|เห฿๑O๚๐ท…๔฿๘—>ฦ_๛๓฿๘k็พ๘?๊ฏไ[//Ÿฬ๗๐ทฮาๆฟ๛/~๒วไฟ๐Ÿ _๗Ÿฏฝ๚๘gแ๋โ_Wž?y]f>ywบ๏้฿‡ใ‡/_ฮฮpf2ณุ่nํ6_g–]o*žŽธ/๓4 ว๓์=;2wธ^ๆฺ9;จyˆNS/oz>_Ÿฯ`”™ฺํ์ฤน๔8็้์nใ๑ธ>/C๖dBfววฒใถา ฦอวซC ฮl๎Yฦ7>6ž๐ฤ 3ัŠGฺว๐:„คำบfv!qžส"h—m{ูKแOo.XบธMw—&E1w‚+{"๛0ฝถ7ผซ=€8›.็ัK^ปณ€€Kmu7๓m3๚<ญฌโahO๎ฮึฦํ์วหžอœ‘3ฤ}Šหcs,–วกๅlZฃg$^ลฮBZ๎๕nไ›ๆ! ;๐5KทrŸw’…iฮํiฃ3ฒO๖O'9kธ*ยBลฏGgFf›๛dมe=บญv=่ฎๆ้ย^v™“C$๑ผlx˜ร smฮํ.ู}‹'ธงูrw/tc๏Lž–ฝ๖tzๅ9ฯw\ฯxฦว>><&็๛๚|Gq––ณฮrฎแNํ$ ฏี๓ฉxbXZ๖หู‘Yฝ^ฮ๕ž“เเŒณฦ<_^ฺฐฏี(ฮBอ2ภผย›™‹ษฺ<Ž—็U๖ฐธ ำ™G์บfw๊่าNฏqwcฬ>๓๎™7œอตกa ดๆจ‡:Crหu“ฐ๋ฬ<ี00็}พmูศ๐๒tถฐ”ีr/l'{)ฏ‹ {ศแ™}์žƒ/G๖"=Œ8›ฦะมฅ{โ\ฏpฎฐน๋s™xำœAyล๛ุข=Ohถ.ะtื—'=Zrฮ0๐tŸํึyih มqwญvบเœ๘s[๎Ep}ฌผaง๎J^d๔ภtgส67Zlฮพqdูe—rฝ4“๛lw๏ะ#ืฒณ›ํ้tฃ]๏๋ผไ,<ํ9;ฏณอœ†fฮ8;ฬว ๏๓น{•™ำิuาmฐ๓x๙๐ไวูำrื—/ƒ9สYVlง}8wgWgทg<Œู›หไ|๓J็0‰sปstšร๋ฑfดYถูล‚์œŸ/†”ิฝ/=Ÿ{:๘r5nตJ๚ฬ]ฉi9vv.์t_ถ็เCp#ฮ0qjhล9ฯูq|ผฌ?๚฿้๊›_๙ไฏฟ๑ณŸ“฿~ฮyภ๑ํ'ผ่W?๘็?๘๎ฟ—๙_๛c฿๚ึง๏‡Ÿเ๛ฟ๚k฿›๐Ÿ_?๘ฺ‡วื8พ}๓ํ}๑๏vภว?๚๒งฟ๚ฝt<ใ[พใ/พ๓Ÿูoพl๐}็ฬ=ƒ(ภ}}ๅ็฿๗๕ืใWo฿{๗อท^|rๆ๘ๆ]ฯฟ;๔ฟฏพ๏[ๆ_“ฟ๔๛~แ_๚ฃฃ_๛๕ฟ๙ฺ๒ใoฬหG//?}ณ๏~๏ฟ๙o๚o๎/;าC฿|+ฏ?w๖?๕ฟ๑พทฟ๗ั|๑ฯ๓/~๏_๛W๐Ÿ๙#฿๘๚W๛๒;ฟ๙ป็฿‡฿๙๙Oล7#ภฤ\ึ‡:vฯp‡m†aZvnNaโผข‹2vฟ๓Šอ>Sœฆv ค™awo*p‡GษeVGํึ-t&ฯฒฐ›๋ใ15wศd‰p ศv+๖LJอ6Nlฌฬ ’ฤK…H=Oƒ.€d0I..ซa„wฆฒ„๑ทฅ&$l=^&˜2 (ูฤฅ็&Œƒงฌ็œNg1nฑซLNลvrๆ6ฦธ๎ฤ4 จ4'„๔๙๊หi‡ภT!ฺbกap2{สภiw5 ;๚pŽ>Ÿฦ๊i0=ฌ6go'gธำตvๆ4]o c>.ฮฌ์ฦ2๐ˆXXแ ๎N ฆ  Nฃ‚fฐฝ3.@ชะ๓CฝƒpบyวฉzV:โ,.;l7sวลฝ ภเXท–ฦกูƒธ2ƒCสgN ;ญ™,A†ˆSโๆYŒ`”ปŠ003่…็YฐŽืfv&ศ(6† 8ŽถตQ็ฑ;#๗นฒUม g‰˜ๅธL๋@งiAภลjf™7 ๛„มa ฺjqว‘ณFW’GQํL๗9๒pŽถs/ฮMši0hYHศ `;๋ั=]@ฯy B›Y ์ล}ึ2ppศ-ู8 5ป8้n8(‹ฏ๙ย„††s3 m{#<ธ|๖้ป๛ษyžS๚๔|ูหO๖ฃ/฿x๛ๆ#๚๐_๘[๓๙W>๎ห—ท?z๊g็ซ/Ž?โผษ7ำ๗หwoพ_žO>Fุํo?ไ์ใะœWฮ๛๘้}๙โ|ํอO็1j๕๚~ฟ๘)_์ซ๏๎W^ž็,z=_๖๘ษ๓็oพ๙๖๑๖แx_๗หŸ๙ๅO?z|๘ฺ›็ใิ™๋๙พ๙iฟ็ใ—๗_พy~ต—ฝ™฿๓๘Y/?บ_?oฟ๒r^F๘_๛ู_๘ฃ๑Ÿ“>ปอ‰์‹!$™(™ไ…LW่_๔!๋ž‚„I€`ู์"$ถอ0ํส ปuTŠ# ชIฐf9ไะ.ˆ:z'—ฦuZM4ม@Š*um@"Rฤา๔2€6`+Wฺต6 ฮji›‚ (ฅ*p ฆ\EXŒ˜-(€aT`คฐ |Ž6ึศเY๎Bก):3I,& ยฤฒhด3&'(›*€,˜ ข@„Mc‰mำ‚a’ฒล0Zrˆ2‘5ฎEZTรRัŽ Š%—]$M‰‘ สมณfศ=q)„ฃฤ9nฐd@:ต3”ขมถT„จNAธ ู"6 ศฤฒ%‡ร`ˆ ชซ ชร.5,ัฦlŠhHี.รยาˆDl4;ฌฬ,ฎŒ, ๛H˜ญ[ˆวGฒ1Fฒบ3ส:ษฒ›บ ข›7‹ฬ๒Š"Lkฦ ˆP"tฆeq€ษ-‘$ƒ ŒขP ! MEชฐP$  DŠ%„ƒ@.@'Œdท•-…`ง™LI–ปศ"MsาหNช@KOF E€™้`6‡rj‹B%ร’›c๋ถฑpฬUY3ภD!tu D:„ษZ{ฅๅ˜ฌ)ม bมB([€ด™Kg`icุI0บ”ภ0*BDl;fXๆ\ .l+ม\P!ม]สœแa`JZ2ะฐ€E”Q&D˜“%„โpdZ„ PYย:e‘Q'“ภร๕>ˆˆ4 ˆ–6ษBH€ršณ+i้bลH *Ae€€S)NTห’จZณNซ4 $#Dถ,เNc4‚ˆBบฒ;C ย€E,ก] ,Y›!iซM VทvA4Š Zห๔ฺั–จ‰šฅ%ฬแ‘‹5นฐใฆ90๚ˆ{3iBFถ5*‹!†LเH-HSlฮคH&ฤAb!&จ€@$bล4‹ ิL—F!Œึ Dใšd€ K'ฦj\—จมม2“ฅฐ”Pฆยˆy‰D5นฏL กGw((„ฤำ`ซfฬขฤS 2$Žะ,f‡H$›ฬYะ† ฆBƒ4, ต 0&Mhลฦ2Aฌl„Cล… บจ % S‚ ฬแใO?๛้๛๘๕9ใGo_พ๒ษGPfxผไรห๗พ๗๓ืร|ํป7gๆ๘ๆ#gž?|๏หฯ็๕ห[9พผ=๏>yผ๛ไํหใ`พผวป๏|๑รทท/O?~๋Q}๓๎๙๐/๘นo฿>พ๚๑;Ž  >^~๑ํฯ็๕๕,€ใy™—ท็ํว็'=œ9จ€๚๒v>๙๚ž—ณ๏~๙ๅsoŽ7๓๎+/}ๅฃ—ทใจ๘2}uฯใง_ไ๛?๒๎6ใหป๓๎+/๏>y3ฯ›ื/Ÿ๖๚ู‡m™‡o>}y๗ษใ0r“Aq‘†Aฝ05nhฯsๆปนK”แาŽ’€ห์า;ฬน,ณ๋!ปฮ๒ํfฃwvึIgุ4QXtœลๅยฒž€„!ฝmฑ#่vZู›i0“ฎD4d2ำˆ#แ†9ว9L๋ฌV+ ‡ ฐ๔2ๆธอ†ซ2ึฒา ฬฤBŠ3ฅ*ฒ†(S›6lพรA(6kfq’A7‰ตฉ{ฉฦฐ\f#ั=ด4l,2Œ=_Ÿย Pด’ร]&'fkE&)RMbผgฦaA”ณลยาy$Tัd๕๕‚โ(รึ,แ ะฆlำ.ญ0sนห!gแ#บ Tฦโย‰Q0qŠฑ`aRD 'vYสE‹mฝฬ’;บLZX&…ีีˆฆW #๏ๅŒ๊xX’Eฐ–ีL$ฑ„tAืa 4F Xผตเœตศ *ฮ์"02Y8- แdญˆ†ซAกิv—u ธ…ฒs;ัสfƒรู‚ฯัœv7gGaZ7ืรธฐก!m8Šกฬ0 ภ g&)†'h—K @;H X‡๛uEึ,"f P@@@P@P@\\ไ๐ห28ใLัRใโฉiwฃpnงผภs–๑,ญRๆ้œใ๙<๛ะ0วน๎งค๓ํฮ์Zฌฅ›xฮ๔ำปg€๙ญทงฎKอ'žแLปง“ยสw\pwพ3ซ่ไนg<ฝwๆ›K{๐ฌฮผ๎I`gืfษฑhNุถtpdฯ\ดD3.ฒ†Mฅ8รŸ%:ซ cะuย่๓1˜{‚ๅŽžžgv‚+๗>๛ซƒง๏ฯมoฝฤ้„ม™9<ๆฎํฌƒYlge$/s;๊=฿=]ฉu฿žg4ผœฌกอfj ๏แZ;oอJƒ4ืฟฎฬ,gIฟnyzฬ฿ฺฎ์จใZqO๑uก=็ฃใภ,”’๗gวžซทธ1ฐwž}`V”3ภ๙ช]iŽ3๑๏}พ—=0L๎ฺŒำผ๋์uฉใ : ฏg˜๏}ฮปฉ)ุ๋}?š†ส”็บ๑bพ™„v็ถ>{'แฌ|์Ga˜รฌ'ฟi๑F฿ ามC;์y๗ใ๑ .A ื6d„๏B+]iuปœรaYแ4์๐}Œ’ wYžๆท{ง2วว?„MขŽGไ๎ทS๏”tฎ์œ:W™…{พณMเCฯs๔ฦฺ$ [lฆsnSฬ+ฤฤCณณุƒOxง9๖’[หๅp^ฮ.ฟ]ุ# `,ut๎"ป3ลำ3ํ’œแณผ๋๙จ[ ลaฦ็&6ƒฮฦปฮำ๘e>฿=~˜‘ฅu;ฮ๕0ป.97wkH7๑4ฝzxš!ž฿{aฮ›ทmพ๒œ€œฮŠ้jล๎๓;6Rrณำyฯ ณฒF๗้œณ\8ใิภฑ p(6ำ๐ฒžEq<4มv=บp—เ(= ื ?Ž๖ธ]๎ห!นืWแ™>=\w๊œูrWฯไอฮ์ไ๑ ฺ๎ฅ\๓๋m1ฝธ๛ืๅ`๘ อ^Ÿr๛๖4gFโ#แ0›๕eƒvฆ†=็›‚‰Sgแ>;—ู™[ฯ้ศ—e๎y<›_mW๗uา ฎ์)~wฟ๑xศ๎™cวa฿+<Ÿญญ๎ไ์ไDNgf๑ežฮณฟŸ๓y๖์หgm๗ิฌwš7s™๏าฑผ%ž๛๗}๗ ้ทฤบ=qฝร๚-s†™j<ห…๛ธภ2_๎3,p?8x๎™=ฝ๋Yh๊จ.9ฯ๔ฐโึล๒”U๓ฐwผ[Vษฐก€๙]‚ณศiŒบR฿ผฤ3k;v๘ู฿{ฤมพ{๖vFœ๎3R๓‡ดc6่ผใฌ]vฯ2L๖,ฬ์ธ;—#๎นvgwf;ํส=๘xฮ0ฟBFl‹/™ู†p๋ถพ5ๅๆ๚^tฆg –อๆœ๓ำ๙.Wwภ!น}ใLนm์ž๙เe†!nzd๖]N็wๆท@ถ๎ฯœ˜ํจ3ปตซx˜ำ๖—้ๅ>จปถ็าœร5–ู,oำไ๐:O?ฟฯs\nร๎‰ุ๋}พห7–ฦฮšใ็ธอ‡ซlฬoหณ็p฿;~๎ิ8รŽr†iD=เ A;ณ—eO 2์y๎ๅจ ค-ฌ;n‘ฃพฅญvu•\ฬWพืคaแ๛~~E”ogณรz่าฒG๊G†ภ;Œด6Zดฆฝหn‹ K!Œ†ื็o™8 ไ๗_cšแฒ๎สวUQdถYไะฤป@nฟฎ?g7–Iือญภ•ฅซgxmk\lƒฺ™‹lิ๕ฮงNบlแยV$ฉ+—ๆz:m@ฐD_๗ใ;ฌฆšไ7;SE๛ํ“žั๕ฬฎmธ }็™Cla ดwŸa*ศฐ™ฝ…F˜Y„u[œหฐTะฌิsjฉs;,nหฬธูŽc-๛ํ= Xs!—8wเ™ำI0{;ƒสลm`ูCทn)็ธwห,ถcฑ๓ํž‘3นqฝS7œ o2on{ปmญ˜๎๗ั;™nตU——aWnฐ>ฌฎำDปK—Fแนูฐ“ั~๏oํ๊ฮะœฝๆค #YŒภ๑‘ Œัm,‹ขเ;‡vมmPู™Š…{ไZ2ทF`rึ๘ย[v‹V?็ N+อ‡ ษ๎YŸัก3๎์†ๅrวf†๎]แดSG– “  โ้์ุเ]’ูkE ฦ€ฃณ]6๊ล0ฎEน๛Zฐ2”ู––kาฤ). KK;ว4(มpc๏7,#+‹ฐะฒดฦ)นž๙eถaiiวๅฬs]๚ผฐƒ(ไฎ #“ลๅ^nw๘h๘ @็:ท฿ฟ[q จโป[Afใ"?งญK]/๋mgเLqึZŒษฎ™w@๛ฮฐ%๋.ใจˆpข6oŸ 8@$I’$Vหฺ๛wงร…pž[๑โว๏}ซฏ๗ฺฟ๎๎ฺ๔ึทผ4ท{vํฎ๓็ูอ๓ผป_{ฟ๛ฝ›-ื๕฿ถ฿ต๚4Œwo€น;๏ฎ๕๖ไUŽ๛ก๙๏฿๊o~o์w฿t๒/ป๛‚yeํ๚งnc๊oพ‚“๐kฯyฟนฯž6g3ํื^ฟ•d๗dถ5Qว๛ฐฒžf?vk๛IjIjฯพzื•ˆzฮ-<ฦRƒLฏ ~vl๐–‘+`ซฬgzนน`ณ iฆฆื›yคึVาfkƒz““๕zzI๕ึ๔๊์Y;ฬ{oฟซทีVน+Š1ถื๋Ux๋ญ_ูึBฦ#wญfอ๊–aหshฯืuณตืQH…ly›ปซ^ํฌŠ6๓ฮ์mทjษjู=ธm>ทื๏ณฉช1Cฉ8ใุ=ฝ๖ึ=ฏีjงutญi๓™อฉๅlKนญ7w‰:บjAจ๕iŸyดปชŒzUƒ‰‹อฝšz็5ำำ˜ๅ.fC๖Xป^ ˆฒู6ใfPcท7็เdทy”lถ๊๊™f/—ฺkhlปo›m<ต{=7A'fทอ-G[[ะฆ-}fฉฺุ:็m+jุ์น๓^ํW…ถสVา=]ถฒ๎6"ค6šถ฿ึkฯํm๖Zฝ๊9uณ๖ใอ6ฦœz๗๎ฑ&ฑใ๕~ฝณuTN^’šWžฝ์๔•.ขV;OXr†L_Gvึ^|šfธZึถฅ่ฒlล›ถf7/myดvnOฬถงฺXช่๋๑ภ‘16๋g{เฌฅ>?๕๒4hL ะ๋เฝžNJ[๏ู–[ A…w๋อโ๎VณW+'‡5x6x3"์๎!๋A{็Ÿ~qsืsฯุ้(o๗–๗๛ื›฿ฌํ๖๋ฝ~:น-๛อ2hZ›ฮถžF}ต;[ๅh ํvํีำ.๘๎"Cžฦ <[uฆืaปvlโำฆ\-fซ†jชM;7ฝ=C์ึ^k—+;๋T›อF2ธฒ}M๔rอํy?g›ทุผo๛ต{ฉญงjี„m˜้ฑฅ๒ถฎ)๖)ฎ๎๖พKฝ๖ฝศx–{ฯสบŽญญMbณFี`†ฯTฟ๋ฦดkJู:อŽฦพ๑Vฝ อ${ฟ}ฺบ{ณฉ๊&พ์Tœัึ๖{๚zoื6ำ๊๊๓Smต›๗0{;Zjฟ™์ท๎^ฝๆ‰_X!s๏ํ^ฯน‹จี;7{มถ]2งศ;3ณ•ุ$žอJ%ž,glBอึ6๗สญญˆตูำึ>vฆ[๎YภXdฬใ๗โ_ฏ‡œ๑v{๏์jzZksqYาœ›|็qณํตzQจUฝnmึฮvม—7L๏f&›‚Sำc๑บWทูพ#_ีlณ”ญ=m๋฿๚๎๚ีุŒำฐ,ฦิoงษูึ“V“ํ๙ดŸ_ๆ,ำ,coูสีjš9ถG˜yŒูซซzhตหŒษฎY6‘อพัณ๖ฎ้๗ูณ\x๋ฦnฆนฯผทญSšฝถำึkkVิฦ[ุ6๓๖ฺ้ฤ๛<œี…ำขฝy[๖xs_ฐ_Oณธ–=ถš_fต3oท‰z™ำึื>oฆีปฦq้yoฑ ›•eูหcVOฮอ0Rุถผื}ะM๎ฮ๗-[&V๋;ถeขญ่}นทi6ดป•ง)q๓์ผอKน"yf~oห ๓xๅ6ูู๓๚๖xิu๕ิฌ6z;า์=7fใju๏ูฝฺ•mฺฎ‘1“`oใู์ฺi๙ฯใโFฏsฯ4=v>อov^[ธ๖6Yข†Wึู66์ฑํ์{&์"™ณ=o“g[=VOฃ๕ญ.๒๊ฝิึ[›”iา๔ๅ_ณ]–wQถ๛ีsYiอlMณือ๒xuŒษฃqMcณนฅ•ส†ู^๕ผ๖ขุถ‰ฆท<๓หค]บ6ฏZ1Z–ฆส9วถ๑บy—z=ฝ’฿ู๛ก kณm–Au™ž4๔†_ผ๑๖ใ{คtึgnฟcุ/,Gผmํฑ}ํฒ๎ณทๅ๗f›Wหnึฝ๙&ฯV๋๒ฒๆึใ›ึถษก-SdlK:{}`Y—ีฑณถy๏ษcs‚~}h‡ถ{Oezฌๅญญญ†ฎอ:7ณฅY{^ํจ^ฑrีำˆ&ฌiณีุWฆWkฟšl™-๓ž{อฎีลถgvษ6฿ฺํ 6ุยWถ๖ดy6ึืuก1nงg—]•ำfoฯ๛๛N^h{๚ช๊ฝ_›‘y›ฦ[}Z๕J]žmxญ^k/B›ณ์_๋jLu~6ปผปงืบัหšถีฮnog,hฟei๏Dร&-=ฏ–5ใZ๓ฺดฦ{3w๎๖^ฉข๕Zแ์ฝผ฿maf่ญ\žั^cฐตื{?mฐzแbอ\มVตV›w๛_ืุ6งปKŸู๐ถk๋ัvจทฦlฆื~E*^=E:CหFงๅŒญ}ํH+Ižฮ ถฝ๏šn^ฬ˜ษl๛ๆญ ถๆถ}ฮชญํบ›=49ยf฿ญ‹ญ^n฿I{ฝ‡็gืGFฅ›๗พย}Mฏx—สIn>ฟnV6ๆๆ[ิป=M•=€]k'ฏํX˜สพ๒์ผ›1ล6Oญณู—H:™ต๊ํฦ๊ฉqฬrฦfฝิถY˜—{็>ผq_/ี.ุ\—ฦใ๎ขทอฉบ฿=fmำปณ=kZำfืถxาŠฎ{,*/ฯ!Z๕,c—๊O›๓ผr/BMฺ!ด็ฝด๎ีVฏ๓dm{Œท~Œๆ6๋ญต ฆ\y๛ž‰vดฒํํYw5-kฦฮหv{žถkwฝGRณฝyนWz5๏ฌ้8ฟงŸฺ๋žo๗ณนํฺึ–ๅฉnึbม;ุYF3˜lX๗ข‘5ฏ=›วชkๅ},'ด™)n{๏n+‰mฯ๎ˆใ™ีk`{gทต๏อ๏ qฑF—@Kต>+W๗ถgœ๎ฒ^ป˜ตข๗<&๋1o[<๐+ฑ…^ฉฆฉฌยฅZ}3ฺ์ตHซauฺ๓ฆน฿ปอfๆšo{šขmดฅอหŠณ—kถอM+=mผm'O๎iํ๖Rํ๛mŸถk5zำีปžฝฏปีkZxwฟ†ส‰3๕๔า=ฺ๗ฒณ๖๖ฺูํ-E^6”Fg{ฟ fผj{ถด๓จ๕{yฯอš„ŠŒlฝง฿ดฬูgโ๐{h’ฅ6๛w•zk—พๅEmฝs๖3งีถr_บ&ี}โ:X๙๎พ์^วwๆธ7ฌ๖[Sลš‰k๏วKซm๑ฝ{†ผโณถWนๆ๖๗s–tkSKoั๋ึต4ใต‹3L_›วn฿๏฿l๕ nฝา”6Tน๑ั๖ฤฯ~๗ylณู฿mvั{V3 Z-ทฝ็ึถบ‹ณฏžอ๐/?].๖๔๑F[ฯk฿๎๗dฺ๕๊ป็cื.ำg ปํ฿n{ฒzŽ๔ก๊ถm6—๓}šŠฌ๖ฐทGŠห฿ปŽ-ฏืnl]hำf[ึขว฿ฌํฐ์G๕&จ๎ท๗๋[5อๆUoอv_อใ•ห_>W๏๗๚๛ํ={fๅ๒c[ฟi7๕๛n๕n%šตื๏ทผหวdษkฝk1=ึ5Bผ๛^ึYท;yุณE๗sฝzึFปถyฏืปUด›mำ‹ˆู๐ฏ_k›ถซ฿๗;–Mซฏ6[ซ—ฬธ–qIย2ห:ณfm๗Œ5jฆwc๗้mYkBธื3™Lvฺ๎eJgySถ&Œกำ3–ฑจ_oำv#ซญMถื~:ลวmํmDฌjmณึึp0ซ๗๎ตผงwฅๆ๖๔้yตแ}Fo—๋พZ๑หLฅc๋-๏‹\Žๆ†ngํ^นFsz๓๎ญซ๋ต0ล]ญ7๋,ก{t๏ข7žyqญ=๓ๆG๖lฏwฒt๗หšญe-ฬฒyšt˜žึ๖3zฝ๎%๏ฬ๊เึนnฯำสJ6฿ห:๎ฌi{๖Xฮษ?—ณํ<ฅoทvํต'๛๖๖g๔ี๖ท๙ZuฟX๏0m›o›๛[ฟ๒^๓๎๓ๆทถึ9M[|๎ํื-Ÿ๗ฬๆฝ๖›ฝ,ฦฝ๙-Uผ๏๓>ฟฺ๏๗vฺฮ๒ั๙ลmฟ์ว็๛6~ฉSUท๏๓ž:ํ๋…jตmฝRฮ๙๏6}ณ๋E๛ฮ๖๚๎”นGว‚sฝํ‰ฺ๊|ฃณจภ๗v๙ๅฑ5ฟ๑ฆฉี6{{uWฤ~อช๏ต๋8omฟฟ๗}๚lปV๓ไZททต๗๔uuูดฌิฒ™ตดSŠ๓๛๋ฟ๏œwๆท฿mฺ^ถบฤฝ๛ึ-ำfพo3๏๋๓ฟึWฯฮป7๛sฟฮุ๖แๆฝ๖n๖ญ฿๏Nวๆ๙ปหEฝŽž^ฯนุฯฒ—oฟช฿็ฝ{loทv7o?๛tถ^ฟZ๏๛z๖ปํ—ฟ๛ฆ๗๚ํ๖)ฮฝํ฿๖ใฒ๖๖ถน๋ิฉ๙พ๗พฮฑ1S-n<๏๗Xฟฎ๋ห๗๖ํ๊^ึฦlฝ๏–{ฑ—๎๎kู๋็N‹7o4mฅ๎1o๛‹LXฟ={w7มžฝ๙ZมLzฟ-ฏมฝ_|๏๊๑ึi—^ูy6๛ํ๛{ธ๗w#นŸJUฟm^฿yป8wMพซ๏ทํ๙๒”ไzoฎ+nšท๏uฏถ{ม๕ฐšืึ๓๋ฒ?ห๛|)๗‹yให<^ถ7O๛uณ็›็่vืฎน๗า๔JฮฺSŸ฿๒ฯ๛zฟท{tำปท๑&๋\ล๛๗ฯ๛๛;๗๗๖nป=}๚~ถททmบkŸ๗fํ’[บ๕‚เA’$ษศจYทาuบQภ—š~ป2…mูฒŠd๑ผ>฿’๙ฤ.Kู’ฏณหo๊™‘ ปh4s_KฯXVYฮ‚ฑX],ฒTถ์—ๅ’้œํฦ๓ฟ๏l›dฐ$อป›#นT์า[“๗๕ืฬพ฿};้œุrใŠ”ะ๙ณํq?์ฏ>Y่ญy‰]o>jw๓Isหฎึ\ๆ4กคบ‹›์๏$%๋’{โKซ๕šT#ผ๗ถvVซ›mฑxูฒ๎ฬฑแ/ฟ[v•O%ฺหีฒtฐ${fทEบ||ตผห^“ฅ—ฬ7ฟ“๗ื$9๎D›ฺœณํ6๛ื|ufฑ-[๖Iำ_,ณ~๙ย]'k%297vฆsทtIše๏๖hพ. ษธ‰lI.l\ฌEwฑศRao–ซ]๎สrkDjlษึ{6–.|ื—“R[ฒdy/ฮb9ุ.ัŒหย฿่฿ญใjทmiพ็vGฅkzนหหmฟœH%2๋฿ู•้ฝดF๖eKrบ[บ—ๆำ/I?vป{XsuปYำิ}Idษeณ฿fหKิ2O&&‰bถบ๋๒J"w๔ศIย๒x๙ใ›gsแ†ลฯฒ}n‹~๋๗\>ำˆevYBฟหลlJื“ท้พ$ใฯห๋–‹]›ุdgพ}/๗ๅฦูฒ๎พ%kVrนW๚†๗ณ%ีไ-๗t๛฿zฒMv๖ฏๆ“Š,}ษw/๏4๒ˆ๔Rษท๑.,ึ‰ุ[๎'^,]์ปn.–๎5ืฬ{ู,[^ทษZ‰erฤ"๙–พ‹ow“ถ]๛๏ouํไšu๒3นต]‚lฝ๏๏๎Ÿ#{๖n[ำ{๛บ6ฆทธไ‹›๒_พฆ‹ฮู’o‰นT’$ฟ/9Y6ปlง๛ฌ—ใdZrบบ๛–%z๗Nต‚%ิ๙?ฅรž+ำํฒgณoณ]บ๏ปฌoˆ/[ธผ…tb—ณฬทl9ึ%ฺxูหฟ๎mถlํ3็•ฐ(๗๏?n;l|Kืษeฏหท™ษ๎/ฑ~yIถLทวทcK^๒’ฅ\,อๆ^~๒$]D,๛ไลหV)ตธ้ำล๋พ.–ธฯาLธ$^–อCฮk…ศ’_W>œŸ๗๙*}9็หพmฎๆv0ี|&ำ3;ห๚m‹ลR'ฒE๏โ่|้็ำt๎}ะฎ๗nญ$I.™ๅbูฮXื์ปMž™r๎฿ท‰ลฒ๑ฌ-‹ํู/k๖M์ฝa๑หj๎ฺๅปo ™{~ฺษถl_ธพไLื่์r?็lู๐หูงฝ/ฯ๕_–ป๔d_D\์;™ๆ~,๙O“๓Mๆon๓ไฅฮง ^ปฑห;Mzซ.ฝT๖อrWุซ”3Œ-๙w6–.๖ญw[B]’Kf ๛ีr9t;ษ‡(ไB“g๒บe฿Ž$฿ท๘๏/Ÿค[“kฎ—ใๅ $ํพฟcปœe/๛ ฌ]’N๏d?ฬง_šไKพ๗๗ถdอ5ผ6‘๛‚N.ณร ษ|ท\ฌป'M+yฮ[ฟฟ๏Oฎนผํ๒ณๆึสž|•v’ชลTrจ๏๚mฟฅ‰๖ํถV์&;Iฺฅ็t–จไr‰,าdq๙ํR\rY๒‹Xถ$&ˆ‘ญ&ั,]–E~๛ˆุถ;}Crหฮ™co’๛Œpd๋8นwถ%d!•ฯw่dฐmฮw0,&๚๔všณธo้e๓Kปอ^,>Lพษ! 4!/ทL6wพ|พM Sฺe‹m#งโvู๕•`ฉeงษKEv‰:ห% Ye:ห๎ Yšw“]ุพ้\ผKีwฒษฤ'7ณ,ป4ูตป๏ษ"‘ฐ„ๆdœููต:ฝหจคJ๐‰หL/g๓“ฆ%ฆ›mหš$lปMษฤ๏n•ษ‹tqYไS›วM'ษ%g\"ฑjึ!bฺ5‹์cอ๊/ตwโศเ๛อยn‹4Xๅถl฿ไ†3ฺู เ?0ฆีูlษ"กอNxอ์r —\๚ํ^Œ&!’tษ bDH,{f;‰ฟ“X&ศ์˜™,f:,ฬฮฒL^ฒ%หฒลj–ฟ#"็{ฮ๖mอฅ ษ์ecัญG<9๙d=r†lbYทluฟฏ—ศ5โ’“cป์Aพฌนํ^ัฎฝ๔2_^f3!M>•-ณ‹FR7ถไคRณq’'ฒภˆOๆ‹อ&ัDzๅjdB Xg๛€dษฺI–’„ฒปู“ฒษ—ณฬอาฑr๗m9ณ d้%๚YXศ|›ํ6Yz0ถXณKฺฺแ’๗๗"ปX’Y๖‹oB&้บcŽ-•e็์ถ,gyีศd›ไฎ,โ ่ฒณmู:Aภrทlฬiิญ\"$kFŒ ำ฿ึ],น4ฺmห๐๗9‘$›yฝฟ$ณ !—irน๗=ั%Šษ–žษู๓—%—2iี‚Hg_›ํ†T1uฮฤ็›ูv;Iา}฿ฮŽ…ส…^n[%Ib–B’‹…\œT†,’eซAฒ–!พ$JถณูBฮ๗›3ฮX–ฟ< @&'๏ฝo“ฌษ"‘2[sR=G“‘๐}ํป' –ํป.~mทmใcU“  ๒ีsทm[ฦŽ๛๓m˜ ‹™ฑ…Žํถ๕Ra๔๔๖‚…qฉบญb ‹,บ9ห๖ๅ%K%’ห-ป,๔ึYผ_#:ู–ษชฒไฌฑ๋[ฏฺdณฅo[ฮ&Iพเ-ัˆE|ใบ^„='Išฤtsƒ6Mฯๅ6!$๏๎พEr่โฒะ|้ŒอYฃ2}ูrdKš“ศVง฿พ,ฒHบ5_’mwgG0}“#์ถ$u†pc๋ธอฎณvHคf>ยl›TฮbไสEH“dนœoฑฌ๙ัํ`ย—ตณL“ฌZn ƒพ-sfsฟI.-ฯV[M#.a‹‰ŒhฬบคEถฯ-–ส"jw9!&ษX‚ฎ“ู$YzW๑ศˆŽฅณXฒ„ฑ-ะฃ/๔ฤ2คŸ’ทอ$ู~—ฅn&IเึY]TJถs[๒%ฑฤ&‰ํส-ทf๒mณ,ำebตlอๆล"[^>"ๆู๙Z๑6c“,—$rษmœH–\ๅ$ถ‰Paf‰ฬฒˆีฤm๋’TลM&L&ฑญ•„ฬnฏ——.{้šดmร\Lภ,3Yšๅvnฒ„-d3ห!’$ธVณๅถeฑlKท„บX.ƒจู›.IฌnณDšฤฬy2 ฑvw๙o.DศธคKDœLR7ห‚ษฤRkวYb๒K^bYd‹Uw๗jaปฑl ฒุ-“บ„lฑ์ถmiš๊ ฑ<฿6nqaษฯๅ"V,S›d[^ศždzZ]nนa ํwธm2lYRหฑ]ฦdษ*ัYŽU0ๆn#‘™-ฒZโv IB2๎„0L6ฺ`g;ฝu๗’Kค‰4บฝฑ…‘m2_ฮoปmU`iษ6nศ–ณ6I์tูdห2Yvy…ลิ๕ฏฏ–`20ัrทEช’Y-โ๒้,ฦ=’rmจ-›Hbส‰ูFVษ%ำบfยm,z‰ษุf.ํV%g—ุถ=ูฒ $ทฦ‹%Ÿdบู6m–$๋rY œR;ษjาท*aฦbŸtn๙ลazพ [ถbีล์&่ึฅฑ์N–K'ซ๏ˆนXe0ุข,IŒXmฑฝ๛พ ลฬคFX"9หFnn~ญ$อ๗…nถ,˜N‚ป-@ฃฦm36—…Jขน}š]ฮDง[fK&ค–ญ-Y2$P—์ึDบ,ำl๙˜$,—ษฤคc&"t,F†,ฝPYฬ% ฺ^ยฬ`z‘งN$Ftษzู dn,›฿Dย6๑’O2ณํ๒UฒY-ฯ7'[2้™=2•…-.ฐmหฏ?"–›m1Mาพ1€ฒๆ;9gหD"yี#d,|iŒ™Yf‹ฒX9[Oฟส’“ ม4[$ฺูํยtn๗า}IฺFrฦbงฦอํฒEฒ- ,ƒรr3#ฅ์rU—[Lr—๐gษ’ Y@Hฯ"Vg.™O–’หI–Xพsฑ„zrŒt6Dฬ4ณtฐzอ„ํ&‘‰IžlมฌYฏใšl7–aB|ถๅ$ฎ ัl1fทiš(›XB่๙,ผลลทx[6ฒoB-tป\\„ผe๙ฤ pษ“obY˜ฆ–ณ ‹ป฿บใb"1n_<๖๗๕‘ ษBฦํฒžˆ๖– Y.ม ท๒พ"นmฉี~ษูฮ\b๋ษeยe—b’,๘พ๏๎ะ”,[๊ซฏฦ๏ํฆ‚ท๛B‚mlนฃื,กปล–‘ะu\ry์๛ะ5œ›“๋tษ6gฯU{81‘ฌyoฑTสฦKฒe๋ตํฎ/๒]5W[ณๅn9หZษณไMฦsgฒฬ’ษ๓"™^ถ๙ฒฒ‹%ณ๐์c้–าษm๒5YŸ› L฿ษๆ์๖’ๆฅ;9ฆฎgถ!=ำหียท๗ฉ'ฤฤžฏ{jลโq‹ฅ’6_vบษ"฿t‰ฐ[ณ˜mฃ้[พ๏tfIrษ#;!ฮืทMไใๅ=v[nฝ์{zฬ2ป๎l—ฐDฺh๏๎e*ดพ/ๅ๗ฆt[n๖e™ฑผ์v๛"u –4บŒษห–5iฒ|z๎2น๔บvท™ˆ์ฆร’%ทไNขาm—m6žฟฝ๗YถLTs๙ฒห™›o๙ึdฮๆา๛–ญ—]n“d)ณไจ ๏ฆทNŠ]2 šœxuI็‹ด๙๒m๗,qY0&“ถn.›ใ2}ๆ๒||›ทไR]๏ฒœnกฮฯXf’ฅ๙็หถ˜Xฒ$๕…9ฆ ่฿e$ฒลไ…ฮm’\’๘{oนห„cหLถ$๗Wถ|[eณป™^b๚‹e—MEHC{็Sฉถิ๗IŒ฿[พ™ฑ$ษeถ๑n—ีพ๋|ตlร,H“์;~ษฌŸ„T"—ห…ข’นผฝหขrรd“5ไฒK—nร๗ถ—๙&หอ๒๕™›NดbrYณF_ฏปdษ๒ํ๔Œ*๛mฝuฒใส2ฅปx ๙H๋/n›ัsฒ๕"พ4y็v\–\บ%O*ตพ Kfษ๕Lทqษค3ต$—šmษอศI›ฯ'[H“Xง๗_PC\ˆๅํ—}ใข]{ฟ๋fIœN?ํ๗o'bวณํๅ๏๋ษลฬe—X#ซ๘š[๎7‘/ณ$k}๎ํ=7‘?๛น’ ท1๛ลพK4Rw›ม’ศxผ,น๏๛โำpn9น๚Dbผปs•ค—™˜,Y๘ษท”mฮ&n]๚ถj๑บ|‹ๅ’eูlŒo๙V–gษe‘ษ๋;“ฅ[_^ˆ๔บอทEุโสยF$z์ืŽ/’ไซo?ว†นๅiฺคูํ๖Jฎฝษ‹ษ฿ฒ"Yฐ์บžฯ}ฮู‰-œฏ›พUฬNnฒv—6_Œ4’๘่๚-™X"ณํฌ๙ž๔ปt7ท น$l“M>฿—วv๋O.wn–฿ษใ,ณeหv฿tm–์ํnRฒ,h๓ีWฟท–ุl๎K#[f๒ปnKV๒eท3LOฌOฯฒ&หง็ฮ“e2บfo~ปLษแY๓dทไฅาๅ.f=9YzฯทลUzIณe๒ใข›mœ๎>๋5ฒwA*™ใ_*o5์ฆึX2ถ๗พ†“mู้๖ณB฿ถoŒ`XภeIซ๐%].๓วq๓v๗ทฯ™ Dwป7ปๆKฅ—=ภ;I’ˆหฎ›‰…ุdK ‹,น‰฿ท‹Jวlูถ’๋4Mœอๅปๅฯl3Cฒ/]fJ ถHHิX:ทiu$ ‚I’$ nPหš#\n‡ LฎW๛,6็ึ๏Wนo=o็ถ,=ฯ๗[KXตnวณˆWหุ!ว~ํZw>๗J1ึt—ฯโl‹ถl์ฝโฑ5๗Uณํๆyๅทดศถv[ใy?6็ขทl;f๏ฦKb—นอJz๙ym,ž์฿๏๖š๏kวjV๗^OถงG+หo“n;{๘๑พฏoฺž›มนชW฿;็วตa/ธฬJะผeฺ ิุMาVณ๏็ำถ๋Nฝฝช๐ฦ~๓ฉ=kป\ฯซฐ๙M๔lXmูfcy๛๑”uvๆeใฐ’/oรํๆ๊ป๎ู{๗3EUบูยึู ฉ,wตe์์็€ฝๆf๛ท7ูุฦ[๙ฑ_IY๋ฌ๖yน•ื;ป.^ถ9ทฝ|ดmฌ๕V๏8k๗๖%avูฏโฒ้ฎ๗ฝๅฬด—ฝา]žOปโไ=ฉmศๆ—xsฆห๖ึ๊๘ูk฿๗พ{ฟ์ฌูบ๗zฏ๋ํ~ึ๖ๆkฯKอํnฌ=ถฮ{Y&5๎Hiํา๙Ÿฌ_wHo๕่mฮฮถ}์พŒฃžhsƒ4m&จณ7ฆ๓ใษf๋^5ฌcทัยฬทศ4&V‡[e ฿๙๎๖ปHฉ๊ตฝๅvใ-~ฏขW [6คซตษvะํํ,^น9Šw–ฌjาถูถN฿๊Wํตํฬ๒๒ํ๛อฤkขq–e_V7wvaท๎ฺฏ๋ž๏๖"k6๓6xoฯโlk^ฒฏลoฟ๏ทู๋๋ํุ#vป๊ฯ;ฬQ{oบํl“5yžฺูถ!ฏ๚“ำ๗.?๏K๑ผปฑ้๕ุฏ๘ลKj.1TOoำ:;{ำููหื{หต56;œS๏ี{๋v\๋eๅw›Y Zoฯฎฑ ˆ=?๛ฮ'น๋e=๕๐nฮNญไž•U๏ ;ฟำ“–รRรvฤ4ํื•ออช˜]cำเ[ิ›gฏษฒทŒฟ=*0ฆaNึ๓$k3KUcฝ๘พeLOxVรฤLสw๕๓žฏoถ๕k๑O_๎f&ๅž3‰ํ๑พmฯuู๖ถWซ้†ฝอ^๒คyO๚็q็.ุ๖บื7o๊YๆdฃlขฺvNญ๓&?}ต>?ฃxฑปใ๓…_วึ^โถŸฝๆูฝ6\EวืЉ๊ฮถ‚-‡็๓zซuฯ\l๏๑Jญ ็&kึ]๔งฎฟํๆso๛|๓นญ[†kญแห{svซูJฯูอำร˜ve=RูญiJใด{’cป{฿Wฯฎ้Yอาง‡UKš1‡i=๗;ซ•จืฺ๏ิfฦ๑้๑^k ึ3dWuisY๛บ1u3ู=๊ฉญนฌ•ื๓˜ฺ๋m๏ผ=l๊ZX•em;ื11n อ;mšพ๓›Vpcห๓๏]ฟ๎šฎ๚gณ›_ณฺ8Y]ีi8ธฌ™๔จ^ฬ#.{พU๕]ฟbŒฌG๏๛›ญYปน‹^Oอถณ7ุ–๛zํmำ“[m[ึ{{8Ž๔Lz6g๔ชอ˜œ"‘ฆcิพพ_†อถx=[[/พฝ{Sต[๏€ม์๚ตผๅถ›๖*ณ฿0ใ,ซ๏k–ณ7ฌ–iูใงCร€ึ]—mo๋5ฉม๏ญ๖^ํ ๛^ท7Or๎ว๑ถข7ฉฺ:›™ศจชsทํ˜ๆm1พ๗ŽŸแฉถํ„๏ญฎ;ทzฤฯฮ*ตๅh"ทข~qฦๅ’%„ยq'as๙๔ิ›ึ}r[)๊•๋ทใ [ป๊ี[์n๑ฒฑ฿็}{ฟ% f3=๕ฺ๗ถูyูืB๕ํฺถ^aบ1๑žพ0MวUร“ื๋ห์WyOณ'ำŽถ๗หc&฿๊7mึำ4yอ~ฃ1—๔ผ๗O ๖๖;1๏็ิfเ™อmm๏ชNรฎป—็Iผ<~๗oๆ~ฐ>KWิณฬvk]m ไ์nาi ^ี็~C nณ๊ํตoฦล~ณO™๖c 9ฏฆ~–CCคใท๕{เุƒO=ต–นุ^๓ผ^๋7l6ํๆฎิ{^ท`›ko฿๓vOทยฮf๋ฉ/ฏ“ฟเY้3ฮ่aœ6ๆตพาถž™ฆqฑW๕.ณ๑ศณ+ฝๆ“}_๏Y›&dnคผหo๗ญ”ศ#n4ห๔๔๊{ฎฝVVหD ๋๒g_wถ‘ืœlฯ๒ึฬฺโํป๔๙๔๖z;oฟŸ-ณท=~ญJตฮถูMศlใ'๋<ว๊๓œmKO€…eๅึ฿ฃฑx[wถ=S4oฬฝiษLx+vuMmฺn๛}บ1M‹7๗พm;&ีฺM็/ๆ๖n^ฏr็ธิํลLฮv๛ฯ๗ชWk็๏5ฯvฟ๖ž–ผีฏwfหฆเ๔I5Yvฯูทพ๗พv็ฎทi^Odใ็ถ‘^-ท฿ๆwl๒/วmปจyพฏukgอ78I๚๚0ญถีyณแƒœ›ญGฺ™ฏใ?gืฯซซn-oฝปนัซmๆใ-=ฟ_๖MI๏ใ{๚นYz[[iฬถs๛u่๙<๙๙{MดcooฏW๛e9ปต)pื์{ฌญฆ๓ปี{/๎๏ล&๒๏uW[7ฌ=5*ฏxzU๓oฆฟฬ,ƒฌ๕ไํบาช$๙๕ปณeฟณuฐYl+=ฃ๎ฏ๛ๆmใ๔ฯ๗lZ 7ทฑ}„ตmห๚ทฬฦ S๛^{5™~ูำึฏ๏67ซผ–฿ปฅ๖b๎฿๔”^sจyปฒ55็๎n็Uตถื›๎ุ“บ็ํดœร๔ฒู”G์ฺู๏๕ด้ฝฦๆ5ำzฝk=;฿9;ขX้ซํ๎ิž;ฏี๎[egฌ[žฯ๏ฑ4oZดKำt[ฏO๛บn—็R็๛ฑฏeฏๆmหถงผดš๑ป๎ิnC5ถ๊ํ๕ชโMs๕ณๆ)s๓ปฝๆหฝอmxLินs–cWใฯฆ;[Oัไธ๏6Ž““z฿ฺš–ํtตež}ๅln๐ช๕ป็X6อ๙หšš.žฎ7๖›กืrอ๖๏‡๗lถnั๋=3~~s๑ถทแิlsw;\๕ฏฮๆททฺvืฝ๔žjK๏บธูTp—ผ็อEน๖ใฺพ—ูpฉ๏™บiพ฿ฆ–T๓nร฿_ึฬ“ฌi๕์๙{=oบศฺM ฝ๔ž๎๗๛Mrญ•ฏ{šยmฬงWำฤๆฟŸ๖^“EzฝพO๛ฆตt[Xถฺ{ใํ็ฮาผตุ็wฎฟcไซูท๗L;›๔ผฅy;{ฑ บ1๚๚ว&๕ฆM—ุLิgž=ผู๖๋็้{ฬ[๎๘แUl|Wโ฿sฟถฆ๊๋=uิ<+ำ๖g๗;Ÿฏ๗^๛ีผญn๖๖๖สำ;ํํlkc<ฯฺ{šุพo?n>{ีฝฒMำซๅฦผ฿Ž5Š๗ตs;ู๏k๋6มlึ๕^๒{>ytะzZU[›Sม๏ฯ๗y‰หฒ฿็ต™ฟ5ฆuืž—t[W{ฎu็w^Oฒ sฅi\ด7ถ}{ŸฎผณฉฒZmฏ๑n๛๎๙๑†ุหฎวkศ#5ฮn_็^๓ฝZ3mRฟ๋%ำG^แ6Sซ3ซหะฝยุ๘ ฯŒ ฝง–m~ฝ3K2ญ›ซำฺจKt{ถึพ_•ืฌqตK{7ถฎทอํปOŸafฝ๗ZงcททฮT^[~๏๚๗,uVฝjpnWm.ๅู}๎ฌษlำ้๗Nkqฒdฎo*6ก๛๒ดู1žoโ้˜=rฉู๐n๓๖ บฯ๑œ;‹/O[6[วฺ›]ไmถตzต6POVงฯซ๓Ž54\žškƒฮt้i+ฑูvs๛ž{้มฐๆn*ฺX5nHz„wํึป์M?๏{…ู@ป็์Vฅฝ7ฌ~ุี๒Rwnํ๛ผenซ๑nซฆฟ๖๒jkูข๗ถ7o๗ื-ท7ญ]฿ž๏ล๚อขำจ5}O๕ทGฉกๅ๛33๕n๛U-jว์มฌ“v-b ?5jฒกW‘ฮ๔s๏Ic<าzD\“}ปฦุํบ=Tฬf็๛]yT–;c๖pŒxm>๏ีฒถM๕:๔Rฉ฿ตวป๚eซฅ5ปf6ืี‹V{rึ~s๛ž๏๋๚พึhฦlหอึฟิอ฿@…ฦฑ_oัy+ฏุŽๆซา[!g(ผทถฌ๎{9cC ๋ทž่Gk,๎Eoๆฤ๗ปฝiO1จืkฎ™wฺh๒ตkwฟ๗ฟg-Ržq ซŸz/emทแนำๆ๘5iƒv๕ง-[๐๊๑อตฬึ๕yฺูค7๒ธถ<ำVร›฿lญ๗เm๋ง=ฟ๖๎ๆIUน27ณh๖ซ๔`k๓ฎ˜จขN ํ:ญี่žOๆฺภฯพŽฝE$mw_kฏ๕%อฤ’[oฝn~z-ถ™—ณlฯ๏_๓› ๓o7้KญQใ,๔Rท;{๏ำ8wฦย:™ไj_ฌฑl๑ฝx›cฝํloฯ๏<}ะฌ๕๚๗tหฌ๓ถฎๅ/k๗ถดGgU๕fœีรู/ฏbo;g2fฎ๓'หol๗fTฅงm5ฎ{yzscฐMเืพ๑rั๊๎๖ฮหชm›~็ํบสซW[ปๅบ7zcv๕™mญงงต3๔Z๓พ+๓๎ฺGY,ำ7g—Gี^Z›m7ฟฝg๏]๏ถl3ร,u7มหฬdฏๅทบ>๑ฮป๖ย6 "3ป%ตˆำž฿๎Yี2ฮอพฯ[vท™eiฟ฿Ÿงlภ{~ฟใ{๏๗žJฬ˜m’บ๓/:~s้=›ล฿]ฝ=CำI\Sž{ห๚๓V/ฦบตู~[ญอEm฿๖ถT๗ืฑิ๔Kืร{ก~Dาึพ฿ป^ดฝ฿ฎ๖ชฝwผ฿ฮKฝ็ต}nฟz‹ู–วงiu๏L/EŒปตfฯ}ถำ๏)/อ๏ฝN˜๐ึOWํๆnyฝWฯฎ๖,๗ถทi{ซต็{๏ฟˆiuฏผ5ํmฟnๆ(โลดcผ็“๗ngL่\Ÿ<รจ{฿บnฏ›‡]]=น๙๗Si5ถ}ทMoee7mจZMmฟต{…_oี ฤห&ด๋ฯjทQหข้ช;‹์:Wฟ[[ๆ฿ฺฬVทe฿b๗~yWำฆ๋ฝึำuช[koษ๒ฎy๊™s๗^๋]:ถ๓๓}๗{๏ผ&๓พ๏pC"&๙›๒พ…mปฮ;7o{–u๙{km์2๏K๖vf๒๔๋๑ห๏๎SUl๚ํu/[uy฿L/๚=๗nท~ฉ๋๕4ํฎ~6วxิ\Rq{nšž4-Mาฝ๋}ฑฝ}๏š้ฆM›ฎo[็mUอปฑี‹ื-ใฆ1“แ๕šฬvึ๙๏+ฎ‡ั๙ฒMOฺุeฯŽถ๖s๋ฝŒ1y๎mท๗๋Uมถญ[gฯพem[M๖ูทฟnx{]๗บe^ผฯ๗kšีQšตx๛Vn๗ณ๗๖z~๏้ทนQ฿๋ฒ|อนฅzCำู้ๅ=l7๋์๖ถฌห!_ฺฺผึณทmรGบ:ๅทํทฝ3'{๏ง]ไ›~=๋๊๗zึอ:ฎ{฿{ซืถก฿๏อ๐๒m?J {v‚งถŸี ญ‹ฬฌฝ็๛vฝปc2V๗=ผ[ท—ฃ.^FŸฒฺ2~ำ`ฑ๊ํตqรšป~๗}บข6P1$ุšs?ต์ู่๒โLi๛\{ฟ{ฟฏ๗jsปฆธฯ>฿ฯฆao{๓ํฏDk๏q}ฺ;_บ~ช฿าZ[ฝyžn๏o{Iญ๗“฿-๕v๏๏ป]emถฅ๚l:O๛;K|rธnyํs›๎Yฝึฐ_Qีซg๑“ีฝฬฬEโ.๏^ท{[ณ5ป๎ฝ๗Oทณi๛ถบk๛%ฏฒ\6๏ูขหnOมฺœW3ฃwฏฅื7Yํuหอw{kฯsณํศ๋=+ 6*Z9ถป]๏๎ซ๘ี•iฃผัชใu+นถkูฅฑVท฿๖o;๗m{ว8๗๓~ j–Jš&e…+VฐWุ= a]ฌUhš(ํ๛k8Ž›ญซ‚Pถ›่Ž}ฆ“ำำไ X๏ๅYญ”ผ’“#้๐๕ 8ุท{ษQaยkฟ–s<๎๎์จ *ฃ.$j5}ซ;ม๗'ๅAš๑ส8_@ŸžบมัQ๖8ใุฯข:๎XvKหn๗แ฿ว๒ฯ๕๕๛๙›ภฟ๙W[๙๗๎฿ฝEvaฉ ล€5™L.๏๘?HLE”ฑOOnคม๕-^o[~|ๅ˜๚ํ‰฿w…นแzlZะ๗ด=nƒ™oM๊u๓C`8๊ธŽN@QาK่ว’$๑๖ถ๛ฎ; Q฿พ]"๎N฿ท–๕Žx ฺฮ๋ม๛aฐ฿็wx๘๔ํm?9Lด‡Cข ๎qe „๎Žฏ๒>l=6Yyt–<_bๆฏ?๙>9/Sd๐1pัทปiHKี_wฏทžฒลฺ jvš?“?2iw๖EŸ!‚LฝณŠ๏„—ƒŸู74‡^ฏ9X‡วœหคŸ|4๒ฦ'%ฑWC…}๋S$ˆƒ{ํ๑n\วีp์๛ฟ_wะ&Žษc‘E4แฏู“‰Xฏ_Œ ภลuุ„y Œ],9ๆ‚๛8…,Žพ—siœุvฌ?วCŽส>ๆkcท}zฝ๒„ทฝ๗๒1๕๐ฅPVู=[• ฿ฏฟu|๘qมถ7G™์}`lŸ?Ÿ?'ฟ–กขN~Ÿฏ4นu๏Pภpจฑฃฏjl๗3ูรa˜ญ]Ÿ,Nc]วh4ฝ์ช;.๑โ๋ม›*ธv?\Š_๊šฏื˜2ก/Ÿทฟˆ?ืw˜=ึ๓๎มk๐ำใWภฝ฿ฝ฿๘พ:งŽษ๔jW#•žm๔ว“7ฦh|๒1a”๕ม สmw  ลฺ>_$rล๗Ž1๔ ย๗อ”๏๘๗บ/4หx๘‡๎ํำƒยc๖ๆ๖Šฺ์1ฃฌ’๗Tฅขะ๙ฟ๛่ผ|๎9œŽ8;๗รจQTฬ๓}œ๛์๖1E์ม๓ใf๗jฺH‹น|t˜kk{๘าDฺ/๗เ™–wิy'`Mtฟ'wgเื_*ร’^ŽฅฃืaN]๖เใ’˜ธ0.cงŸ‘๋kใงท7ผ*ฤอ๏ฝ_ฎNsสz:48‚๏Žโษ~l ๏N? @ว๑;A@˜ลF42ฎ๚๒nิyv;u'Xฺงdฝ๓žภmg=มŸป็ฑKโryรŸ=}8Lดแ’8ฐ๎qƒ9X›”w^๙ษว—บถm <:kญ๗7๐ธ ถ_~๒}๒K๑ูž/\๐ํZ8o –C„}eฎ7”ๅฺ)(ro๓˜ซืqwYcDibx~“ฒ๓๎มฯLwโn†หcC฿}x ๗วํษภำO.=๖ร ~๒Y3p8ช:h€ƒฃษ8z฿ว‘ทn1ึ๖๑‰m7C0~>~–„Iป~นvฯฟF์ห๓“ž{๛ม1ก๕ไ!Qะ๗c:Rt๑ฟ7~ฯ๋Ÿห็_ไ๗๓—งp_๒Ÿ๋?ล฿๚O๖Ÿี฿๙›ฟงgฺุ]}yทธo฿พขHœผ๏yํฟ๙หO>†ซ๛พภ}๐บ#qฬ_ฟ_ื}6–{ w\"ษ้'าฬN.๚ซW“5Zž๋Cxไป^ฟษo\ญƒ๕ฬ;}ŸหX๕—๕‘‹โyx\ฑกวw๗ีCพAI >o`๚แ >:A=3{:๙ฟtdtาแ๑ัฝ{ฃ'๊ƒ๐ฏใž—0‰๑๓ชq{Œ๏๚ˆu๗๛oฆรโ๘โdพ+๙6ฬท"๋๚โ<)Šถวฯa๛ž>ภซ๗็oฟ?N6H|้ซH‘yศ๎ฝ{eาพ๘~py8q๖์:เคำn)ไcำvืqAY2†8ไ๛>๎ 5'พ๎ร|Vื_ฟ๒ุ้่เ๊Eoอป“ำL{๎จฯเม„{1๒ฐ์—Wพx|ภม—ซื'๛F๏6ฝ–ทwฤOMšEŸํ}เ%˜ซ็Ž๎พึ=โฬ฿’‹Zpญ๏#`†ฐโใบใx๐Q๕ธัืŽฃo้ ๏ฯฉ>)พธ๎๕œ0m/ฯ๏{?๎}wฟ]๊ˆพ?;~\y:ถฟ8ขดyคwล!:ิgภYปv;=ฺยง•฿ytoŠCTฏฏถcm฿์ฦ„๘pyqน^„ะ้๗~Yh้/*๊z฿tCื”Gฏฃsซ๏ฯ_J p]{ษfŽฎฎ฿฿“ฆŽ฿๑Q€๒ณ๏’รธู๘ผ;;าผG+;8@I?p]Ž>่“Ž่็,นง๘๙WZ7nz#xฝ~฿๛a|u วีŸf:ห`?๛@nๆภ๓Y๎Tฏใ‹–ฺi„ว;Sฐ๎๚ะŽN`็พŽBsเ๋>ศฯe]}ฟ๗-๙‚9 ๎83˜็(๙0หฦญƒ๛์CPวฎวส>F˜๔!}Aญ๏ต฿G/mืขฝ3~jpขใ^~ฏๅเMพ๛ํๆ๘Fb'๒-„ย๐˜}"‹D๙๑ญ๎๓{โัลซื๏วŽซ[xำ๛ซช็o~ใ๛ฑษ?๚/ฟืๅŸ๕๛๚ฬ๗๏ํ๔Ÿ“Ÿ‡฿‘-๗i?tฆ~+ใโ/^w๗๘แO™๔#6(cื็UTสBฒJˆ#:ฏ7Z-"ด๘8ฎร{3Vิ‡ฟ๚cฺ๓จศ…€L๚ู่ึง๚vณXค็m”^œ‚ฺl8L!KcrDง@a ฿€-ญ ร๔z ขฬ.;๘ยOS อ฿Qž"‰เMšGท&Œ‚ƒtๆQDฏศ€ภลvr๒พ๔l4”%ม™h’š]ี!8รมqื;BQุ‰ˆฎส‰ฺUxU8t6ปฦQR~Bˆ |ทy$Zยช=จผtื0์=ŸูฅทW8„‹ด๑๘“^O–A,8}qุฦU8ๅ0ุภi™jz๒DUณ๔&qr๐[$ถŸ๋+ “๐z๛ฦสXtคมyP๎s]LฆŸ€฿†b๐่ˆ„3$8e์โผฯ๏<$…p๋๐๒%y:Q<8@“Cัˆ*ฯทพคJR ฌญศƒ๊:mŒ8o’uq๗ํ๔™$•ฅBx`hพs–`‚?@Eaท" ศฯบlหZ ์1๏>OO„๐'†zน๏'.ฃ๚”0ธ0ฦN’๋ฃsขi!โ็ำฤต่48ฆiณกšJt๎ธapฉ'(ฬc๑EฆeษฝQN]ฉ*2ธจแ™AJrMt™žคi—ฺใ ฯ+ศ3แเฤ!๙]QyH$bH๚ห+ื€“‰0เ(! ภSB๎H…}œูปค จซž— ศ ๛ˆช ำๆ)œD5™Ž%ไ:€AXซฯ (qดฐยยฃบ]’ฌ‡)ฮGv๎Q0PŒห฿9.๖=5ฏโ4(ˆลว=ูบJเํ€ฤขqถQz™~ชฬD1(=G” z#\๎8B0ฉี‰Vงชก,J ?๐ถ> วฆŸ4j) ฌษ้ ฯŽ8a็๚ลd yy฿8€ฦVุ8ฝsgใ“%Q”€กWwวL˜@t˜@‚ฮฐ‚เบCeH\o‰@G‚Wu๙TฝYI+/˜ซv„ส4จเธะDปH@|\฿lj–r๏๙dU<ผM*>Eๆ๏ž.ƒ: ฬ€ :๚c|u=๋ดำšFศฆI๑9M†บ๖Q’‹nฃ!Imซ๊@ำฒž'$cิ‘:๗ํ .ย}.๔Bu™~‚ป๕—ฟ๕๗้_ฟ๙ูฟo๚็‹$ฟด!ธฉHโซP‰ARAใ;ค™@KE‰ๅ ะฐขภx˜"ะ!0 @  ช@Tภ ช ๊8N›%Qมท๗„‹Bิมว :@%พ๊Bช&q@?7*B‰วม)`*Q้ฬLWt ˜)นD › ฯฟ๓๗๑?๚g๎ฟ๓ๅ…Pnƒไผิƒรpk4œฆ”ๅข€ (๓พํ‹ส„WŸ‡๊รบHศžชา 49NDำ “–฿'‘—้ฑ1๛L”= 0ฌ,`ฏภ€‘‚ใเื„MˆK’v !เRPgลA์˜7 + ‰ƒCฑฌลู์I3ไ๖ห^ƒห๐zCX ˜ฬDD4Œ๛Ešx„4ฮวŒขgQP(Zุ=T†^}Yฺ<ำฯ๒4‹"kคก?C#HBฟ‚Cœk^Ÿ]ะญ.้ ๓ก;ฝIIx7พ^ภ!ƒT”้sUAคญๆ#!ไ5ˆP@Iฟไ€เฌ0จ๖‘Iz=หMQ€ุ้รๅ!าคqt๖„I8ๆใฮป] คs ฟ#รz๛ตๅยƒฤgวฬY‚Yฦ๔,3@Xำu Œv ๖“pG๗ D7D4‚โ๋ตI(’cคwXฯv}ศู’*เQˆJ3<รณใŠ-hอL>ƒฦAๆp^อิ7"Nณ.ซ๐Pวˆ๛ไ;(๛,8ธ|จ๘Gใž฿_yAeยOqGu e?^U „YฒT% ูัP$=ฌ„ฤ%‰—@!อšน~๙dk”‡'CST’Sแq๋lโ„r]”^ผ๏D|oซฅo^m.&ˆaรZŠh$"โsฏี[าแฌ‰S @iby๑ฮศ"ฌ Œ็wpผ๕๊ฎ;}…จ$)ฬฺfฟ\0h2“ใRIขธลŒคๅdVxOสฎ‹4|O(๛ชฐ ๚p)ธแ‹,GzmฟGpŒัฎณภ=WWQ"Icง8hฺใ8C  wB~ฒ๒วฦ,O4—†ล‡อภq“[็ 2'}@จ(ŸG*ใ‹เ0ฦอ6_`>;Fปu $ขx$ษ!0nื“-โห๎‰8ง?่B`}F‘ฒเ5Nฟz6ณพA(๛ฅ Rต่8๏:H์yfG็ณสn4-vO\'ฉ๗qpฮ1%Ž๎คฃ–๐!ศ้ฮ…„เ%฿บŒ๔0๑u\G*๛กHฬšห)ฃ ๔5 @Dl0:!ข}!’อฮAsฑศเำแฒ9“Lq๓ีmV…า ๘t->H€ฐๆฏZ๏้Aโ“€–@รXู†F™4f๋ฮไ฿ทแ฿๛;เ'ๅ^<ฺฝ_c1Rโ}ฤเ"\Ž€ W| ๗๘ฐ๛ู‰wฏm/จฮ2o€6บ8ใ/xฮ‰IภฐญaG‡!ว1ว๒ฝ€ŽR8ป;๎xพ๗ไgาQแ/๏=๘ขsผMำ๙a'้h฿๏๏q‰9"ใก;ฉฯc)T—๛ใuฤUบพ><‰D5บ/š[$<Ÿ_w}& wฮฐ฿–๋ษcp=ƒŽฺq{E 0€ไO{๒ภt๋ ้์ ภนไ‚Eช๎ZxขฉJi่อ"FYp)ไ‹€๒๐ฤŸ j” ซƒ †Ffภ„8มค๒rำ@๋S5DNO8PD1ฤ๔Yˆ#A"8•xW๏ห8‹€่บ ELฬฎ๐|้@๎$1M๙ˆZba‡‰T‚็ฝszแq]&#ฎˆJ›–FิA`u‘i~ŸŽMฆœ%ค]hh เพษญQƒH๎6`dฑเl ะVACŸ๛X!lฦ.ฌ-%I:*Ž 8ป๋ฺ+ŽFๅw #W>ธ๎˜†-tdŠ‚ฆห  E@ัa\u#TXNแ1<ิp]b‚@C"H#,x—Q†@ DศฬอฦJ ฬ…š\ฤวโVๅi ŸๆU *@S'ކ๏‚W‹(Rํ]Š’๓JHฒˆ8จปc=$;?T68€๚Š@vณ๏…"ชtDโiข];šฉ$ํ๗‘jข๎xGqysn'๓ดจI|M~ศ./ ฟž; ๘ ‚dข\ม‰’ฝofพDภศฎw–E&5๏Œ‚fB:.ะ‚ฤSR๔ฆ9ย!ิ).9^8ส`Œtajสฦ!(AฐG‘„žจhะ!–‡H!!Ttค ฒž85แ„s’J†ส˜๖›๓‡„v…!ชฬฐง‚‚FผTฆ*ส8ŠยŽณl:_ฮจ pย’KNOGข ซ๎ฉ0$TqๅณNk^0(ฦ๏ผU•! 1จ์เpภ.ŠศรCฟƒ(,=2$งค<ฒ ฬa๐0“สหAPG3.: ?LEฌรคมSAaจ(ภ๐DAร6€0,กป‹RD† ษ9œ „็„zฤ่ซeิ<๑ย 0 "GU\n›ฝย”เผaชiqไญ‘ะัMzyธƒ:1Dt(8%†n ีห‚Š๑€’@ฺˆข QBb*H5qcކ‹g@ฏฐ1-+ห^้€d๏8š˜ฆ8*฿Y@jJึร QBLD‹0 ฦyะY88าไ(’tz † ธ9ฐD† € ‚0E! VSฅuA’เX&NMนเพK.สtฆอ็zศˆ@•‚๊H‹ำ (ูU\8์] *x! DีtŽŽยpCB@g–Cะษค"—T@ฮ]H]N” (ทำ3มcำรTUสpnฆŠฑ(ธๆ"ศ.ƒh&ZDA zไ1”\ˆ „ ำH €f†!ข/{ ŽL3๚๙B2Bร†‰ต'ชสีQž! 8Jr —ง1Dีก้%)ฬ^ฟ7 QHฃ้€Tไษ!iaX†ˆO&„‘[*ัพภป0$ฌ ยเค,แ&l~ฺ‘4ภืฅ e8–Aฝ`tŽj†cŠ#uˆ#d&(W…รnฎี๑ล”ฏWฆษ)็9šดž๋ฝฏ€หฮศ™œ€ษ˜‰yVUuvุ[F™xoปษั™สฺ˜วจุM'† จ‚๒ปNCรภฟ‰ำqBEสi๖V‚†[nภ้ฮSล น}ˆฃfฎNwํ8/ก€{$nำ๑Jๆnฟh<Šืd3-ะส฿๚9žAฃA3wศะิ'๓ู็rฟzฯIหณมŠษahkEงW๑n๎†Cว…ƒƒ€ฌแ›Aํœฃ|รฮEg~3<4‡(”/ๆ0ˆt\ธ๕}ทญNแ ฝw ,|รอฏ#๚ๅ๏แณ้†$šธ6งฑฏ‹;ฏ๏}ทฒจล‘ฺ›fG f๚&9ย฿๒ฦำทsH‡ๆฮ] s๐tdไญ–G?uๆ‡๖เuื?o2$}ฬƒ&ํ๑ู่1t_ คูฉส~WN* ƒ๘๓93ชธ๓1นŒฮCีnบaFGฺใ๏๕žขxM7p ‡€Š0œซ0„)ŸL‹0จ"ฬw;74;ำeN&aอI*ฃ$ฌ {Ÿ#Cx ๐อฅ๗Dไ|%๓nถ๙ใึƒผLf๎`ๅ[“บqบรTเฎaษฺaฦใfธ_ฮ@หษก8“รุGK3๋๒šปแ฿฿ฌ@d7์ฮ* *cป‡7ฟ้7ๆ"3ฟ]^๐Y€:ฤ6ๆ R]i๘๗วOๆ›๙๏!มu๕ธhฺ‡๓cdYพวณล{7ลP๐Œ Šำ =W5Dัƒ›ใฌช3ฎ#๛cจใPตฆNPv'"&๖๐q็.\HR  า๔่W9 —ฌ š2โฑาl3ท๚ RUบ›Dฺ4U• Œะ๕ท[ใ5ฆ“ฟ4_ีะr๓ŸXฤๅถs‹ื฿ว฿ฬ4ฤศ{ฟYี‚ดฉฟต!ำ€{“ฃฏฆCAš||mท4"ำ๏๛kx7ฒ""นมeGY ๒๔ทw~8ฮงt๘๒#‰่pเ;ๆเ@t@นๅ๗๑b>˜แ•ผ|ใ๘_๙žฏc๎ƒq๏ฝฌ|๏ˆ๏๏๎ฝy:ฮ˜s…6€ซ๎บ_ฏ—พ> านแา›]ˆใX‡oุ็ผธ;z~>4๘w|ฬ็ถๅ๓*/โ€ฉฝz๕Ÿtผ,ๆ๒ฟ/ฉ๐ ๛ค/9ฑญnŠŸ๏ฝ฿>g๖œŽป›ืq0(ห€ อ—๊๙๓๚ุšGฟaฟพ;2cŸix๗ซ;†Y็^mHxป_ฯ?–Œfขƒfx๚`เ’ิ๔ ัฮจX~์ฮq—:c ห]ผโCœ›ด1ฦ7รux๙jlงEž/Gฦ:~‡3p‡ญ๏!C๋3;ˆรู๓ู|/yๅG?Hุ๏z\OžฆKพฃš/฿ฌ๗Eฯเ†u1จ7ฝTฝทฑญอœรwๆษiฮŸ]83รภผ์* 0ืฝฆ๚๋?-อƒสnฏy€ไๅuรผแ‚เ=5qะ\ถว_ ทW็w๔5tตตฏปป „1ิ๎๏x›฿ํ4า๐Wีd๘“ๆq๗฿wูjํๅีf๓ผวปว6bฅํฦk†๒rsภI๏ƒธGฮ าโ๙z ฎฌQๅฟษ#6žรแฝง้ ็ญ}ˆ๑PX๑œ•ฉ’๗ฏ{’L-7ฃ6พCฒ3ฺ7ึtลoDุ!{๚8‚งํ็์พ_ญพฝˆทณ๘#`lWr+๛7๕ธืซ๛kj& oๆ๛1œะ ฎ๗:qF”}๘ช;zjณ>์ข๘๐๓?๗๗“๋่,OŽ฿Wาw:๓’šwผร/'ฯุ๛^ &mผำ๋ษํบณ8ีป๖๕wม 1ฦ่o๏q”ๆ๘ใปw๏#™ๅปผ{-๛Oo๊ฎซ—รดื$ฯ฿ป|ฎ_7ภัํวฟP‚rแฑท$h๋ศป„๙๓zวหั™พŽ|๑3W7oy3„/;ๆNั€cำŒฟ Wฒ‹‹™๋;ษ™๋?๏วTหoฤฃฏผ‚››…นƒ{yๆ Sรหป~N|ป๊”๏Wผูwย9-๗ŸzOฦ็Š~/ ๎Woฝ?๒ฏ xBณแXqไชะqerภU€˜i๘—๗๘p]–ๆQ1=Š๓kŠw๗แ๊ ‰ๅ;น๕โ™ิ{รฐQ?;๎ฝใ์โt;ฏ๏฿5™ใฒ0่ทฟ|ถ๙š_>vnฬ˜w’๋q๏v่พ;‰ ๆMฟโฑํ0๏.ฅถ๗`แ"œfdxใัA8ƒ๕Rgx๗๏Fi๎Ž฿แ_๊4fk7ำƒ†f!M;rพƒt$๘…80ฝ˜๛ต{ํˆ }๛ส`ay<ฮmๆN^๙OY˜ฦซ๋8:7.ฬ{๕˜๏ๅ7k_ฬใ๔dgt†c๊์ŠŽu๘mํญฮ๒Eƒ7๛’SVแ๎Puุุ‡wu—มิ๘~อฝำ_ksrEอ=~YS\y€ท†๏๘ \๖\{l‚๘๖พ๒ผใ^?yณu๛ฺW]าด+bฟฝ๋ี๙ฟฟfƒ฿ะ๖}}\7๓OoŽวฟwัŒถw^MH๚ฯ๏_สWเํ^ฟFnฝ€—6 ฮษ- 3+ผ—ƒ๋๑ŽืฌŽL\๗> ภx้›ฝzƒ:๐ฮ่›๛†9_ผPwˆ๛]่ย@pทฺนท‘ิ็๚้%ต]qเœ_x็ฏ3Œ(LŒ?ๆŒ{s 3๛~น|๛nf–ๆธ77OT˜G๔฿/Š:๎w๏๘M9}ๆ์แ=ญบ*f†•=ๆ๕ธำc8ฌx๘ม็ผ๏'ืI^ฤƒฆผบ๚O:>(ผ๓ฟO?˜(ž๐=|%Ÿบv=sฒ[;ธต๙†%š๔ฌ79็’ุฌ*ร4QFˆƒVอ „'Gp:3pz;#Q8*ืu A‚'ะุhsX ฯิ$gค๋e‹Iสกฃ:7'‚กfฐš\4ญ uoธq จรพsƒw‡้O๗JCฐ๋1cŠ๓]a้ฬ\7I๎œ็ัฮIS2QXWค{u}1Q@zc@ภŒƒิ›08f‚ฝฃนH^มƒ;ลคมA/Jgพ…๎ฮ;hDฝ2gq6\BzNb3 ํป{6ƒ$Oฺ)ใฦฦŒุƒ8ั๐.bบ)aPTfฒ9"—ถฏW†g'07pฬญ$†šwT “ๆ›`ํ*ผผฬมU{tุLœงŽ&ฬ/AP˜๑Nศiภzฏ{žˆs97%#ำปบ“7วVว)์Lrา…‡&W78rA P7™sา ำ”•Tผ)Š๛=ฟข-™๙Gxฌ* d?Iผ:n@I๖ฟ7gx*ฬ•8€ธ\่อ {\ะ ะy>;™Iข฿?`/€^'จŒภม์ัM่ 0ูœ#ุ—ra„ˆโ]Rั brWฏฟŠ ภ๗™`ie /๚/ก๓i/n$วีมฅ๑M@จฒฦm็์ั-]ƒ๎8ฤ‹Ns’a็my๕ฬq:s%*jm>f=':’9UีA˜งึdŠก „sึ๏qp :อP5ลํ8ฝซ;| 2q;…ˆ฿ก้u ้Žสจม™2~J™“5*’•wNHฝwSกใ๐ Z :z jๅ1arsิษฏซŠ าๆ‡\Š;qฟwv'@ไŠป๚โ2ฒko4‡ํ<Fไ€ี8ฒฦๆณš˜#ภฐช3็\o>`D!+J๑เ๓ฆ.ำณCJn4Dฝ?ื@ฅธ^ิ Bง0Z„/Eไ†qf`~๐ษŒ็T=<ิ๐r2JL*a|†@-5ฤ YnE™ uำศwsxโเA@ๆ’‘๑H‚$Irฯู๊ปฟ•B๎t…ชาaฒ<๐&ข —rTศQฮฎ$”ใN๙ย@ะข!y$ต%จŠ„์˜๛T,ˆ‚FXถบ~PMแ2"<ิ่ว}0@(Bก UP@ญ฿ คช5]d$ร'B'`Iิ];JาŽCŸๅ$กบธžNNnpฑE่่Š(RDkธˆฎ D8ํปน7พ!†%` "T"HJJ–+5."%ฉิ๘pYa|๎ีม%ัุ๕็บ ืEˆ@lHhA„9$H (|ZhZY‡xœ,H;Cxืเ8<ฎฃ -ฦ]xbd(ฤV žฤ ”dฺฑ1* ดงa5ฆ^e!+P”$Wะ้G๘žนะ ภไ”ึ (lอŠb์ปbrฮQนพ9 oœ!;yHžM‚ููE€ฆgฟฎฝะธ๑^฿ก”ฤ˜‚ƒAdX qขฮkํupธ@เณE€H@N"๐ŒT๋ถa|B4 ?k68f฿i๗ wJY$<6๘J„บ๙„2พ๐ิ‚๐ยฮบe|ช๎ BA ˜l•ˆ4™!ชYD‚ฺ|ฮ๓8BรVz รŸ€*ะส ˜Œ š‰t;&฿ƒP9\^HRZ๒่J)f3ห˜œˆp]ฯืqyCGƒ:o"F™rP ิyvWmศก€‘tทภ๕Qว$vB)dA)~าluฌษu๋{Ž๋‚ะ"]Jฑ# ,ัธ_8๐fฤ‘†vŽฒธNฯฏ/H (าAJฐฑ*ง‚;cwŠ%[A–(†ซq‡R14.จฆ‡€ฤ<เ๘~๘อ` Šจ"ด* ลณ สqผˆDf–O@•ห”quืค๔!กMfˆ@๗E4แ๙…W HDฑไŒŠ#”๎ๆฝทoa YA†D„`ำ€ฬห่Gฆ .)ฬC ™rA4vtา๗‚Kp2‡˜มุI’P_qไ€!?)ue<ศแ๙uu%!มใ.tY*U่ ษ wงƒ(จ€ฐuซ›rEฎ๛ˆภS$Wูแฮh € R- Š…h4ขDผ ฆ๒ภi"vบ GXZ๚wะaว,w‚gฯM9.=ุhž‘`eจฐ("‚BŠฯy+Cะข‚ < " Š๎ท‡ฆ@|ฬปH@ „ไเm”\ev๖์†;<|"P=6นกpe„!H]๎ฎผ!่qz๖†๐WํถH#๋‡กP4ศ ภวB3!.๘0| Ar+,"z… ศ๋ŠcR- “เZ่ผา=ญ, `?7นธ=วศ}รmฌk}rq|๏D`Tlq!@%QกฉB  rv ้‡@"๕2Hv}‚0Œ“พz‚U”)ืาผ ฮf€e 8xกpDฆูH@EwNPิC>&ฅลโ^6Wย4ษŽลบฃQส!ehะ"ˆ0๑ฦถ๗‚ธฮ(ž&DLDjA4:'ส nA I„>พ"ฎ{!}tD ‚`"R@ลL—ิ-dqA๖c’๛ุqjด บ[๑ุ–ur$ƒnT.k'!@E!(€‘X@xt๔ั„Ž8ำƒ/&]'…pP„`P ˜Fถป*N<ม0€  Ž)(ศภ  TP4๎U<ิEO†%$A*H๓ฃฟฺyhมFˆFJŒ0…้๖t'! t‹ฏ(FฯMH#dE(ฒ#Hฦ L…b0tp "JB‚ธดE![Vgj๘vฤู‡T„ษ๐ -`9GุคลM ่.wtฅFั‚๛ชuŒฃหN 3,lJŒŽ‚PกˆƒA8๖IศP๑@ศฃ81†ZDโยŸD`ฑ:า'ศ์:#@&'œฉ`ฆŸDรeฏ`rBAwำ;ะฐ็]มfr`ภบA&™Œฑw”`|ิฺ์QA„˜ิ‚ม‘Lฒ |#pณ/ร”$ ฒ… ^ฎป:&า6!\d,Fบฎ=), ขSฺืŽGQๆ็&๋Z‚yœ_0โ‚Q๕#ขFPw*‚aYˆt Rเ 8ๅ$J1ฅPN;hฤ…ƒย๙ต๔ืฃ†โgQrี ช+%ธcLL๔ณำเแ:I่ƒ=ผ #)f’ฆwใŽ&)วก,ษล rเโีณ8@†ฤ‘ีณ] v!ง€ิ‚ัเ 3ผ๑D*-Lภ4๚ฟธ`๎…หว'๛ุ[bE"๓uฌkhร๐žP_ณ*เ๋nัlKQ.ป{Nr-Žนฆ‘TEฅ~"W”]}ถ€3ค จˆaQๆ……ํn{C๑(Kškˆ#๋ˆ,0อด3๎:ฬS๐L ~ฅA๘p;ฬา‰Lล~ต๓Dข7สแbพˆ0ˆผทง;‰ƒ“บม‘ๅhื ภA!V„&Yฦ=’!Š)ไ€หtH ่า V๗ ะuซƒOYQร‰ ‚่ฦ.ฉ,œ๛sGวฎG'ะรDUษ'Hธ่ ฒ.TเiPฃข;ฏ6:9> cฆ 8"เฉฉQGƒ๐ดไฆ`ŽรgฃC3.ŒฯŒ๊ ๐‡b9N”tฦภขะ่!ลSPšhŠค)๋๊ƒ-๐P&!)Cผ๋|รธX)๐๛๔Sซเ่“๕‹ š‚†T ๒†ๆ%จศฝZŒ่†=qธKoP&b &Pำ่!zh2xศ‰‚m5†ฯ**‰ฬ†PV™ข\ฦ๐$ฉYšŒY๚รก Œ` € %ห†rบHฆz`ภ HH„g‚Hvึมี๊Eไา๊žœIเฉ:๛Mgว z@็ฃฏ,Ž‚.MฝึAฤ๊ฬB8IากชœoEMU}๏ R$JB.†a…ก่ +Aล~ข4pฦŸ๎R‘‹ฝช&tํ | ‰.#ดlžLค* Nำd|ก*š=8แ.<’ตฒ\$ึ ]*wc„ —<ื์@^มจบไฎ?}T†idK BII~‘—RF„ะn฿(๓R7้๘พ.*Fบ“เPŒ๎2Š(qถaYH RยฅZฺNŒB%N&ุ๐='oXŠž์%ฆZD‚VPC BB($…ฃภ•q‚Z_20 /ฏ;ธฃXฎึเฺฑ0.๏\ฉaQzถช*@เžŒ.JAฤฎมๆฯ šŠ01@T์๑‰‡vœฯบ’$*ˆAE‚วตื—(rฤa@ฑ„ Tญฎv€จˆ๓*€ฎAMฎฤ8Dฐด๊ทฤฃซ:$Fšฦ‚f ฉั@8ห† dg˜0ฌิ  tคKโ™ ƒ-@U้๏/ˆ$ิ ไ`\V|‚€๎u‚?IBะด;ะ๔^mฌ&มW6!๎Ap‡็p9หบs‰6Iฤi4LOcดม+Yญ$ญ‚ `ะ๒ไˆ ศฎ]ว๏ญใ€Aพจ’0<ŽXฅ?@€๓ 0"|U‚,ๅWUJ1๕{W ˆฦบ†šuแaท’๛โh์Hเ‚<ศฮ(Œ€0yr%Dˆ…™ว‰u&ึ…ฑRƒ Ex๙"จš*ภฦะS ่$ถNJs๙@ม๐๚ ’ภผ?;โึข1€::(็…ู}‚ƒ@ ถ;K * ๔ภAฐธกpbืb(Š]oE๘@ŠชAก*IT9ฝKže้ษ b@ผะhํ89~0Œ"L0€bมใฮd0!มOŒธ"ภ&ะ—d\b]œญ฿๘.€๗่@โ"XF?ว'P,AXฃ็F๗๙M^=ชD^|_ใ๕|ฎป]๙{A7ฆ์šล`—ผo_ษƒ ธ฿พYhv•-งเใฉK฿อGฃnWƒ฿๕=ฯwํ๖ๆ6>ฺฦิฮโVะ•€_[=APีdŒtŽ Eฃoรššฆฎฏ฿_Gweใ~ิยy*๕ท~B6[—๙บ:9‰m3มGพvd‹๚๓ฒQ"ธฃ๛ๅํ ๙ณวŠฆp_Œฝ^ซ฿ฏœฝ›vYQฐS‚'๚{ฟื“!ภ]9๎x„ลz๎{คปขำ1}ฦ\hไหื๘๖ฝณCส๐ๆล๏ผDก{‹—‘ญBส๖“รฮKFกย{ภZซ่>/AFn^iาvฟc๋#ข๚8vงฤ‚kQIP๕v'ฎ๏Aๆ&œg{ บ๚~l~ห‘tEฟžภฐ้ำท^_“๊็a9๏๘ฝ้}ง์}๛SeASsAศŽฯ๎‹เ^ls ฟ@ฺo,๗ห9[l~๚Po˜ฦฦ๏ข๊7xถA"0พoฤึ’พ}ฟ?๏zD/ฏ œมœ์๖๑๊=๏gฟธฎ’'ร j=oeXย๊9–์š‹์ุํ4ฏกไๆƒ ว€๙ต฿6๔pn(PดŽทมjเ๕)}V4.Žผฯ๏฿ท– Wฟฏ่กฃฃjP๚—ลx ็„ฝZpฐuมเผืˆ;ๆๅลžO^ฑGt_}-o~้ฯeyแ'b๘พ;ฟข”$บU[rbl฿ฃ CTX๋/}p’,ฟ9Q ๑ฐe3ฯo -Љpฺ}๑[{‰ววฑ๏๏% ()ภพ้์xgŸเ= yWถ#„;฿’\ณฯŠGPท๙|n฿๕E qุlศy๐pˆ\ip ๚%BMDuCRพๅ)ะ8ฅไ)xCะๅุyuค*วู๛ท~@Nชcืี<:‚“N๏ซ—฿‘79|๏nŠ'ั/๗/ภ“ว9•Ž๙ฅธ„๛ฅื๔'ุEำศฝ“NL*(ษ๎๘~ธtAๅRึฦ แ.€A๚ต.ย_{ฦ)9.X้"ฏ๖r_77œ„_ „ฟ?ง๏;'O9๛๋Ÿ@‚ผ“ใฯ๚ดSไ9K๚ฝ…x E๐œPทตญ๏}๏Lต ล‹ท3๏&q‰„lดNg*i(“ †ล…\œๆ-ผq กๅฃ#a์;QAถ œ— xu@งจ~๐xๅ!„‚ื๒{1ฎžแAถปฟ0ฒ‡vd^~ผ๛9~แ—Aจ๖nXน๎โ@™ภŒ‘x(wq“ ฒ$๕๖S+Iะp%๛,~Ÿ|าŒCWฟิuพ]P‚๑)5ุ+.พ}Ov‘+S•๔itทใุ9Iyใป&y'_ั:wฉp:‚l@วธ๎ 00เวตำuK€่ลฦ‡๕ฝ}๎๏๛๗>๚G09=มŽ,๕ำๅ{‰\™์g~๏๛๛๙(yŒณ+็๕q฿๏=๛ตi—฿VuxูbณR\ํ+$ฉ๋~z฿ฝctWซ฿ฏ>z๙ๅใ๖™ ม๕Exu์ฟ/ฟƒ+€๏ต`พnื1,oัฑ"ุทฟ๑๋ไ๓‡พฟ๚๓๎ง†ฯo้…ˆปŸŸŸ๙๓๓๓}™๙I๗ฉย๋อq๗๚ทื๏๖๛๘๏๏n_ว๔…๔ูู—vวฦGเ๓ฯOฺMช‹;v ๗t<๖ฎ๎็~ นู้๓๓G3„jญ}“;ฮคบฦจC•ี‹ƒŸoq›ม^๛๗O๒ม“ืwฟ‰Mู๒ใ๏ฑๅ'n๔'พ9๎๋U๊4p}๓:v_{๙Sต€๓ไ˜ํนก}t\9๚‰vฌsw็ฯ๗ˆใ๛ฯŸŸ๓๋฿€sg๒ฮ #bูWพ}๏ฎuvƒC๐๕ qsง –ฆtŸฏ๎๖ฌ๘^ืฺ7๊ๆฏ8ฟืตn“3‰ฏ๒qัH.นOฐน%ภลb‰฿ฎว๏พŸ๛๓oณ~8:†‹v๖ฉน๙ึ๒็'็๔}7ฅฟ๎Y_~H<ู‘บ๛็็Ÿ๓ฯวโ^_ใฯฝcโฌีศ๖wฝ฿ฟฟ๎฿ทฟ{4๒„่…๐™๗^‡่๙ฃ>ทŸ ยใœ>8S ‚&ุ5A๑~วภ…‹๗ธ_ล๏‡฿๎ๆ_{6๏ๆnๅHŠ=\’๔ม๛๖}‡฿ ^;C\๙ฟ๗๓็๎?ัŸ็ฝ†P&้น?{ำqCz๏๗฿฿ํ5ท6ก|]_ืCๆ–Žw\ฟื๊ๆํ๔Žำ9œš๗๘พ'^ฤr็}J‹@”ใพoฟฤ{เOŸใ๏๏ฯ๒7๖‡“๓Aqt๎ไง{๓EuzWฯ฿Ÿฏ3~ฏ๎iํหม;:nl๏{๗ล-ะ๎u ๑Gvbpuผw๒๕ใฟ#MึปŸึ๏๗็๙M€ะทวEะ!ิ๕๕Š/๎p œทุ๗ษๅ๑๊o9v– ดพ฿จvำ‚t_ตณ๋เง“9ƒN6ิป๛>9แ€๕ฦ๚๘พ๙žo?๗๗๙ย๘ฒ~พใฺปว็]๗๗มฦ…Ÿ xy5=หบฎ๛๖๎3ฆ@ ชY–ZN๕w`9ื‡ข’@'้ค;ฝ;ผฯ}น–mŽ9ืฌ˜ฝฒm๊ก3ถ๖\็Jข1™๎\gฎตqฬูษžทณฃK—ํ^=Q9vง†ูฬฮk{ฎ,iา–อ๕ธนฮฦP]ฝwป้ี>ฒฎtsล4Y•ฝฺฦ#lฯฺyูฝjvาt์ถ$•อ4ฆฝŒ๋สูๆ่‰59ื๋t[SูMvต)2S™^i“ฮีq_W๎ุปืใ๔LŸฏVาyทษ™Yี;i\k๗ฤmฎบไ;‡‡);+8s„หฑmืv!ษIถนŽ๖Šษ๕ฺm›๎Mc$ว}ฎNš&ฎ๖–ถišsš\ืdrŸฉ du{š3fOบืฬ\9;/gฆ$้8ฎณ๛ฐื๖’4ป9ปชWls็ฑCMf6บ๛ะ8fW’ำ“4Ivๆdาหฎฎฎqๆฺ”ด6ป—š19๎ฝ๔ุ๊ดูt'“d—ึคyด}์6ฃีํL;™๖ญd๔v+mrจ-ูคมฦาI้}œฎถว้œค:sุญVานvฦ6hฬxศu็ฅฝฃb็’๋:{๊ส่ดฑ]ํL๖‘tFฏhิ๔ฤ๚Hฎวœx-6—k๏พ~ถMuMึฃm[šหct_ป็๘๘๔ํ/ฟโ็_๖รท/ป@ (€Pข (@@K %%@oืเ;๖“๔>๛๖•™+ทVuา๔จฉ5ฏื๛ๅ<฿ฟ๒๋Ÿ๊ซ฿~๓้ๅ>%@” %P€(€T   €€ @ ๐wo๚๙ฟ๘ณ?๚G?๚์#๋อ๋z•nW[š$—ƒžํž๓๚๕๛ฟ๘ี๏๙ืฟ๛๘๔ฺะ” ”(@€€€ท๋๓~๏_ไ๓?“?x๛ู›นฯฒืDt'น:ฉ•ฬhฝu๏๓๚z๕฿์ี๚ูฯ๎›๗(@จPB   R T(PP@*B@๓๙็?ท๎฿ว๐๏๒งžธฮ้NถRW$U=;[ฆcs฿๗๛o?โห/๑๋฿~xzู-@ €J€€  • „๏}็อŸแ็๚วƒ๏๏qj}3ืษีR–สฃorั=ภlผžšš๎ีดฃ•3’yศฝ}u?ฆismซW๋zM๖กูC›T’Œค๖ฒฎ์‹m#ลๆ๒ฐJšถํฝ๗ลใ’หksb๔žญค“ŠJv›ำIฎN*2๔\y3™ฃmw[้ๆาZi์ฬ•ๆ์ฝJfvณซษศiซi“•sฃiฎอ•ธ’ูqšฎด^ฒัTvำอฤ๕(ํf3Is™N žใศ›i€ปช“ฆ'ฅNฎsบ{Ror;ล4ฌ้•๕6M%ฉ๋ค›ๆ‚"‘™ํaฒq'xtง7ปฝzEe›Fฏ้๔ต๗AmrฎหJฯ4iฒš‘\ฃzยV€J›tฎ™ฒm$f๎พัjy$}tํbศ๋Zอต}ำ2ํ˜ืศไ๊เ์y๛ณacอFfO‹ฬ-;‘nzฤฮD†้ฃฝZSฏ=iฃิdฦd๏„!สžM7—ws่ฆMuœ™6ฃkoS๓HLฯN4ฝfฮHzฒญZI= *mŠK&ำ๖๕œึ5gณ›ส น๗\!G6+ฆฝ6W“71:งŽ/‘lูŒLไq๖ฬiH’ฦŽิƒžแฃk7ี่จJีษพรtาซ{ญ}ธXšู™ธฒึaฆัT:Qš™สkSIใ$๗•G]=าฬ^ษฝถ้5์๎ร๋#๎]vฎˆ๎0M6สLฺ๊†V+ำดˆ™Lป ๆZRiฒอt†นช}ํสe๗ฉฉGฉftๅe๒6ำ:อrๅฬฬพสfุ๊ั ืห$Q{’ํLw—I6SถปอคึLtRำ“˜ฑiทvโzDฝ$—žดข’6s๎ๆข)บyŒ{G๓HšดIAt‰ูดํฝ๗7Ÿ>โ‹฿?๚๙ๅ>ปฏ็ผ๗‡oŸ__๗ง๒“๏ฝ{ซืiฑFi;-Sูำงืื/พ๘๊o๙๋๗Ÿž๎ณm๎sžž_ŸŸ_โั็Ÿ๐‘ฬcz™ณKรFูํถ้}๎ฏพๆ้7ฟ๘อWฏฏ๗ูxฝฯห๋๑้ๅ์๙ำŸ~นไ*a*ญ๖ฎJ’Lฺ๎๖ๅๅๅ๕/๕ฟ—๔๔๔tฮ<ฟ<๛฿w_๗?ง๑ถำ+*ๅtณล5ง‰f๗>็๋๏ซ_ฟ{yฝฯ.เ>็<๊ำ๋ฏ~๚ว?ัId$Dšถ0Mั4ณFศ*pjฉ™ฮDœLV๎ธJfฃI4U*ท&I!!ขร$\Š)ํ”Yฑฒ GฒฎvZ™Iฺf `ยดญัh++$ฃฮ^™ mvi๗dซV%†šVท#9mกfี4’&อš–์ด’าšb;ำดช$ ›6Fšด:-C*อ,„ฆœXlว’6mL]ษคำต=ฆMu1Mn5.‰Qปัฒณkฒ$ษ.-ŒˆmSฺ)ี$ข…hfc*D้]$ำ’ขiDำีŒq’i‰0ถ™–‚ˆŽ–ูLUฌlIอdฅNrsI24ฒกa9‘L#“ำŽ$ฉ6<ศฦ6ป“Hj๖Iฅ›Xื1ฉ ™" #ฉีpตCkj*™Zปฑนฒฮ(สiN6ฉ้Jฅœร8ฉ4ฅiาIg%:ถI:KZŽาธ2ึถFHชa’ด›ค™jขฉHดZ่ดvฌhฌFIFGคบy[A)ฆiFฉา4“1จXํI›hBฺไดaฑบํ0ญ‚Œ๊๎D'›คย =ํVLฆTIR‘jSƒM)ม bk’ 4ฬI …ถ)KอtFฅRGข—ูค‚tใช,QI˜$&กM\3KSฌœNŽŒุฮูฬLzคd]ํ\L˜6QBRmฏ4ES!I’ซ'ํ#้FILปit;ึพžืฏ๓ห฿|๙้้h๛zŸŸž๑‹฿๐๛฿์๑ƒ+๏*i“ฺศ.าvฯ?ฝใ_~๓๑๛,ฐง—ื_}๕๕w?ป~๐ทoฎiฆŒ HZ=บูง็ื_๎›_๖๋ง็W@—๛รท๘ฯ_่‡฿๛์๛o็๑ ฺjชํmษ˜ศถ็พ?|๐฿็_ฟ๘้ธ๏๛›oพ๙›Ÿํ๘ว?“Ÿ<พ๓ฎรFู6ฟ€ณ๖๙๕‹฿๎๛฿๙์{฿ywอปL’mฅIkถาD;I้’iฯฆฉฌถ{*›dซีfฎ›L%Dัฆ•BiM%ฉด+5ขRฑ“œฝฺฉt$ๅัคm5"—ฃj2m“ำnซMกีู“;ฉ&-MMZถดฌ„ด›#fLตfงอ^R‰ค‰ฦฮ‰„”ตŽ6ทฬ&—Dฉศะ&ฑถฅษถ้–mRำŒาe;ZูกMำฺdHฎฌำำEชQะT"ดสส\Fgัฺ;ฉคI\ปqd$j•€ฆMyำ]Šญlซ=Yษ•ซตะ™ฆ +•ุ๊v$ˆ+4Lบส์fI2MT‚ดtชlGJป•6šๆ^H"Zf’FCซ`*RฺฆIBฺถ๋๊ด aZr7๔สคั$29'ฤดด’F“)‘ “j—‹I ฒM7ฒ!ี6š(หถxคtำm‰l้4:›dšส44:•ภ&“ถurJ]โjJU„ŒŠ่Zฝ"ฒขkปU™JEKํฅุค•ช”6‘ฦฤ$awป[Sฅšl„‘A[Yi2—ูีถ{าอฉi’ถ7#a #mTdฦ๔ผ6!คšถฅŽ๔’ฅM5šiชRฉF#“iำ&"ฬvณฅS‰f“่Œ[Z•’ู์6-ลส}’d“Iฎ8ค.ะยœtฆU+อ” ฃีูNๅ$W:"ำคปำLซ•คQ i'Ž<ฒชšŠ™dmบxlJb$MX่๖ๅ๙ๅร‡๏?=ฮ๖๋๗?พ๐ฃ๏ฝ๛์๑.Iฆ{w3k œ}๙๘แำื๏?ณ€Oฯ/_๘๘้้w฿y7WwUh-ˆฬ•ง็งo>~๚๘๔pŸ๓ี๛O?|๚๎ป๏~๖ๆอLาfmวTtg๏พพ>ฝ๐o===_}๛๘๙/๘๔๎ํgnจHศฤช!็ื็๗?ฝ๖๘๘ํห7>>?=}๖๎mๆฺ-;@šh‹H"๊HGJจ4WqFี6ชญ6Dยฆ=Jำ่œ=ั‹Pฆ˜Fw2ูrDŒฺอ ำ] ‘&'=ํ#†ึFH'าjะษnฒญร‰,ชTSŠอ " ี4”JLาถปY“้,›V้tฎฝ:วl“–ถg˜ญูfšถmSM›ฺZญถ‚ฬfูถRB็>s‘Œ่žV6Sศ&cกZฉKฉั$%-9\‘6ฺุไHD+ษธ*iง]= e)M2i.ํ*vˆฌž%˜TปCๅt\™vT+[EฃAีๅdฺšคA›Dจชซาฝฌ–ฌดiKงITต6"“Žุlูิ€Jš"คv[;Saฒ••“ษvsขฅ=uš2ูXM๚Pอj#r•ฺ ์่กždkฺ๎‰mู&‘$ !@tฎ„๛œดq ูจmš5$ๆฎih{ยTืฆMF›Ah[V—ด‰J’้ฉUFJwฺฬHค‘hปkM›0f5ชT๊‚c4@5mฅr$1ชะqL™UŒŒวvง’ถ]m.ฉ])f&Wฅ›&”ฉด YนปW—WeFV–‹ต2›š6E’ŠฎDˆุBebี’r_้–ฆMI75ดญ•2ษhšn:G!446ญduท…™vfญrษ8น“Mฃ[[M’NVkiVซ Igชฃะ(=[w,MbEM์tุำื—็งง็ณธOŸžฟ=ฏทwMR ‘๎ฝ__>~๚๔zถถž_๏oŸŸo‰ืœซt4:W๚ผฯOOฯฯ/[ๅ๕์ว฿ม_฿ผ๛,IซŸ 8K$‹v&Ÿศ†Oุ6ySWๅต‘l“ธ„ต[ป๕_๑Ÿ๙Zเ๏฿ฟ๑๕Ÿ๕๏๙ฟึไ&ํrยึฟ๛็ฯ+เอฟ๙๗฿ปํหฬ#ฤdœํvห‘ฬบlQ7ฑXlRวํ[า“ ๚ฝ4k,dหำX–4.๙’eถŽy“ํ“%l)ืXผm๓ํๅฎณ`ฑI›fญMฮท.ำua๙๖“Fฦป-"“|๒f]N&]"‰icท-ูV1™ฏยlŽ˜ุพ[ŸฒvŒn*5š!ง…ศั๔>Ib’ๅ€nถ ู›-ๆ๘"b‹-*€%{{c น;พEฒธฤ]พlถ@š…eฃ[โ8๋lLา‹HB:]2๙š,ื;ฎ๏.‘b,v—ธๆถNาLbถหศbg‰}s=น˜ENยฮz"Zฒœปhฎf๛p๙lชŸฝ’ผค?๏v#ๆณ-๚lอv“n—ฉLvzท๘๚nI’๋bp็N˜ฯh.;๏อฯ%g6*mEย๊iN๎.rห๘iพ&s}?๎]๏$™›O~][ๅ8o!3 “]\๋u&VKew1๐yฌนI–ฏ๒w_นษอKX1~ณ๏๚ฑฑ˜+IFทนXิฎฑ์FšX๊–s๗ไFยย>r๛สV[ฌึน๛๎ฮj๛’้R&๙ถ๋mNฯ|_ฒN–,I|b–ืีใ๚ีฒ’%ป$ŽะXฒfOๆ’/™๒ฬ]๒%†Mฟxคษฒl๑ณษ‡ัปญ{ศŒ})ืภ6้|๛{K–dn‹ท|น$‘l‘ทญ๎J“ท-ผm˜mห’K\$[<๖’—ฯvํท.อ๗E$๕{๖คI4xK3l™๛dปษ Kj&O$ฒdz:ทœๅซlๆw—ํnI2rTM|yูืลr2T๊ฮ()Cถ-็lื;wงYผo[๖v$[dนช.ฮบหK|๗๑้ฒ],$่u›‹ๅ–4ูWษญทๅป“šส.’ใงษthึป=I,้O๕ึˆŸ,๗๓pหž๊tํ๏o฿+ภ๛ปฟ๕›ไ๗๕nท>kkๅลบ๗ ๛฿|TญO.๙MLื่๏o็ๅvฤ๗ซmืญk๗๛  ๚๛ฝฉไ“[ฬOR]๎นฟo๏=h๗๒–gหฯฯE†˜šึ–.ๆฮลๆ2Yพำˆำฅs}็7&_ˆฺ=ขgห๚}7C\r—;3‘สEุฒœป่?C๖%ฐฟ๗›._rงษ’s๙>"k^ฯ}Ry…ทž&ฮt฿๖,ูl]ใŸฦ’ๆหฮfอฯุVษK๎’฿คoทf,น`ห&ฯ–ฟ‹sฑeอ๚q๙ž{t|ฝ›uต฿Ÿ฿-[ฉ{?ต]c๗5ท“ธLปพz™ธไ๒}๛๚ตmูKๆŸZโ$LๅทWษฃ=vื/ปฑ๋ด gจหv]R้e[ˆด้u†\n—}—fc๎๚๕ึkQ{k“}๚rl๗Vrษ๚l'ยผ™n’EŒฅน่Y,_๔ll_œ‹n#5ษNฅปป0HœปOœ,“%Sหํ„oู็ณุ6/ฏไw๙็ฎ—ฦล=^-ก๗่'“ท,ฉฺปH–I–๎{f]ใ‘ไgฑฎหš›หึ๚|ทHJeฟแ)!มไ-฿Fณf%๙าn๏z'ฯ=๗\โŽo:๏$ปศฯศณ –Zo฿ฝ|ห|ป,ซ–ๆำŸ4นห‘~nYwy‘๓Yyฝ฿^ยo/–์ปหzตhvูฒJxู,ฃ฿ถE’nโ๚ฉหญ_้Ub1๛U๙์ห;ผn#๙นc|ฎีe[7K:ฑ|4†;r4ๆKu฿ฉญฒeˆ}žK๎2L–ห]๎ถIb"Kบฺ๗‰oR’AxVีๅ’Ÿหoธ\ใทศ{™ห}หIูŒู๋Y~~d76ถkน6ณฐึอฮšปS›m„ลึ๎๕ๆป`=+Drึk•์ถฑl*ทนf6หฆสโ๘~ฏ_ูEบ5ท&/๛อšภ่ช๏{’์รรv›u.้Œ$’dัMr฿eAY,ฝk}•AN^ฯ+• ยูO๒&๊žK“๎m7‰ๅ๋f‘t—`๋๖ฮ๗d lข3‰|—ฦ#C๔ฒyฃD๏ๅษล์ณ|‹sYf•ๆR–Yฝd–คทฒ๙q\ฯถHฆศr›0{๒C˜“๘ัœ์๎k:’‹6.7 ษฤผ™หYฺn ‰$Gf’ฬe‘.{{„ณ™ฮฯ9",]’ๆ[“๎ิทงณL$โ\`†ฌูต฿—ทิฒฅ"‰$Y&Žœnž6D.ปฺซoอŠDา-..c–ๅrwR•ฺฒค‘Wปึmถฌผ6ป›…ุ‰ฤ*ฯeพjฑƒ<็™Lˆc]‚l[ิ~* dษ%๙๒D8]6jqaบl฿ี7™™'K4)ษVJl—า9‰[๎2ษqฬz๎V˜5"ŸLN๖-fษยU๕๛-„ ‘๛YPฑ…์2ธYm3มํFv‘\พ“ท˜nงrฮภŒœศ–=ษlYฎ๙๒ป[}[ฒฤš—0‘@ณEฬjป{[ 3ถlY พ~๕„ุ[^ž/YโฦะฌEฤถษๅDฒฯ6อๅ.‹LJ#šจ<ฯuทอ[,Y œ‘I2โI๗ๆQูข•ป l“ฉeต›4."Š.น{I็.ลถr–|:oul1้r –9นY#นคkืlป๎’ฃรฒK[ฝddฒŸ๖ถ˜ฃูสณŒ]yrซŒIรษ dน๎ฒAฒณำฮ‚้าฐู วคKf#›%ไ"$›ฒ1‘,'kฤ2฿^๊qปˆฌYณ]ถป&$dห๒I˜vmฏถ–หๆบŒpvลg!๘ไปF6SYvmHค—eูข™•™!g‰H:™$!หผ~)vv๋ฆฒฬdศ†สš{c๘Nš๎˜ุ๑4rI0s%If“ป%Oฯไ}^๗ฤ๒%พ6๙’HoุดIฟf๖โ%Dปอ๖yนpjุ๋–\’\"[yM7„น๘ถเ27l6ปโ๒๓‰่ฑ1‰,โb!’^ถ1งV๏Ÿฆ๙ถ™ณf"’%ทค๗cอโนLqœlไบ}l!™,ษfR5‘ศšแฬฦ๊nlnปuถ=w–ํL3Iฺึ]m–lI]G||GถืนdA๘โ;›ฅ,ษjK’ูmูVฦฬx|.Vถœใอ’}-: sfถ4.อ2ห฿e/ศMl๋ผ#„ษD\จ๓๙ิfั“uู๒นœe‰ฯFฒฑัธ,ฒ5ซ}IฌvvgฎžH$|_œuบLผขฒˆ]„$rฐ้ศ–žปษห–!–\2G„ุญ›%ฦVทเ&้2Yˆœ$๓# JŒDฐอuาอ–ษใBถtƒ$ษฒ]รYถm“LfgัlcT~sgืM"ฑ้vซญuบiฎ2bWฒm–‹\r–ุถ šaw‰ภeทg[g0uื|๒๐๓;์"วเ’$A ภไศ>]ุ))ท๖*&e[ฎ–%_ณ๏&vzชk \\ฺ็๏ิยฃ˜์ู#ฎล.ษพo—รํn‘`๖m_๏๕''!~๛ˆ|ปO“ส›nษ™ฬ!นm้y•vพEฤี]ฅษE๎พnyทfNๆŠปŒ•ฦ…’% ทq•i๔}K๒]˜˜ฦMึ†ฤ[ฮฒชz›5’E‡ใ—™๖ุ”5Iปmํ–,ฒ/น“@โm๖-7[ถM๙*๙g*แ,ฦnจ/ยEธๅ๖ๆŠิfณ๒ไmปcฝW —_ผอfL—ai3‹oั๋b—ฦuk1ฑ.ษbถlท‘ลๆI~s‘uผ‘,mbl๒ื–_ี–}นLๆ ยบ๏บŒœืoโlฬˆ$ผu’ไืฑาไo๗ฺศถlnฟ›ฤˆัๅ๊6๛–EHF—๎J๎—–ถYฎ๎2ฉ4qฐ5s’mQwg์จ˜3–ํญน[&ๆููฒ„ไศ†ผํ6ซ๋-ุ็ๆw=พ)}๋sถ}ษy1ด}ูw oinป•ศ3ตฟโ{s–%–่OI6ย๊’ศ’หฎ2ฅt"ฑd฿๋9—ะ้vูt$™&นปธMถLv{mDBf6k’ษ™d๏go๒Yบgนgฝe_ฒIฺ์็๖ฬvœq๛^/ปไ๒&‹ๆ“บค,K:นํ}™'‘…\j้ฬO{๎cHrq๋7L–rซํ[6หะd๊o๏H่ถํ%"•Yึ๛&ูKณค7ฬlญ5ฉm๋–ๅvwหYษd9K’%อฎ_n;›-reป4›]“g&1X“ธ•ศoc=žl6วขทืผคู"ูฅปฉึถ่ปT.5Œlว"ฒ0>‡ฏ๗$’hบ็.?ถn w๛,€ลิ>^ศๆš4–ธ˜b—ž๖.’ู˜$€ ฉ|I^ฎy“ู;ถ ;อะ2gู็ถY\ๅ 1@,sทฑฎ ื˜ทŒ… Oพ™ ๙b๚‘%]mูw[Clุ.ืลฦ&ยmˆ%™,2ูุ—/79ฮ’U67!‰c,dู"ไ›?ฦษี’žณ%฿I8r’ฌm$b&YbHเ๖ไ-#!QH.ƒญ4พ5_าsYKคY#3n&=หฤ –]ณ๙ฆ์๒BF"ถิ-&n[n%%ญA–\kd4– }:)ญฺ$ูY" ห6ขsค฿”#ญX–”Fฝนฺe9™ อpำฟฒeส“Hคุุบ!ษvŒ1r‘HwK„”ลKY7ณํFึ›YH4”ญ4yqูฒ“ธธ™ฑy๓‘ฑe>ํ}๏m{†1Rทๅ’„M$หฒ!ษ๗ๅณr๑“ิ’ล‹&NบฝฤeถY’"7L$ปล~อฌ๛6ปdHคฟtํ\“Z๊zน๋ฑ†ศ–uAๆฟๅ_,s&[ฒK"iบ},.‘!อ5!ห “$dk ฑ„–5i’dgoซ๋่&"–ธลนuญXfฑe๔O^\$ุŽuพ“ไ–T่าุf, t็บlุpทูะm–ผ%˜ƒ,s‘“9!eศw๚}‘‰$ซํdทnp6ฆ4’Dฃ•ภลm,ธ๙–Œmฺ|n/ฟํ6”Œ.I.Œฬ-“fุืณใ‘Mา็ล๏Œ๙l]ฤgI–‰€›๔าDฒุyํ3œิ… v๚๒๚๎‚ํViณxIฦjเซ/kๆ–ฐmK_ยŒ^๚Rูศ\๖ฎŸดnป๎ห฿’ู–-ys6๛๋ยf“๙L“\"›,/นฌ‰‘ฅ๖น_หYฒฑeนฤ-c™ส’k2–cฐ์ฉeห๒–อ–,O~cถu;žศฆš‘[์ๆ. ฌYo™อึท#๓8ป-้าS 7็v“p่๗wgoฮ๕œ™สอ๙๚e‰ห’เ๋‘7Yท-xs๛-ๆ5™ฅ‘ิu%_า,s#ฯV฿8nั|KRI"c็์๊์–$[%=ใท/l’ ๆrฃ`ทm’Iบ-น็{–]dฑฬฒ_vต)Ÿd๏๙ะ๙ชoฮm2‘ห:้ณm_f‹ศqI“}vฆ5vำญk’ค^ถ%X\rg6O๎&๋"ฺญ’^H์2ั™H."M6YICnม,‹^ณ™'ฏ้ท„‹?ฺv็%Uzฮฅ™๒%wก™๕–&ฉ;ืฦ’&๒ฝึt@L๚}บ\<;9ูพ9ปอถ๏n9dป-ซ|šo๗]ฅOาถฉ4l‘นอ˜ๅุ1™m_ๅลษ2ท,IXศn๛Y$’Yึ(ทŠศF$’ไหF$Y6|Ngหพ5หยร,ษฬai๖ฯโฮึV฿ำ์™า$OพM/|nN‰$žLบฌ๖ำ•๓Yท฿uฑI,ฒๅ.’ึ๑BLLโ๕๛wI/โๆŠ‘d|หษ5rKXšฝufืฤแkย›ลวฌฺrทุฬL’ญŠ—8›eฝฅ7W‰[cฑ฿Šุ’นนfห’‰0“ษr’]๗์_C–,๙จ˜Tถซฃๆ `ื?Y๖Ÿภ›loŽุ๔พหƒM*ภจm๓q`หFoปอc„@ˆY[‰ล&ห-o๑eท…D"ฑdn{•๘๋4 X"‰IgŸษลH$!YFr"0’ฝฑmfฒœฯyฎ_ฤโ:d"sษn&\ยšูnูn’ฉ2้~ฒแ “ฌฑnMถฐ,—ผnๅ‰,rdซu๒ŽYŸ 8๛๕<ฯบฎ๛๖๖”xˆํุฑใธIDฺฆQคRQ(จ $ฮA—p ”C$จจMiำ6m’6๓เ!ž๛{๎‹ต์.{kG7Š$1aค)‰ะแ@"‘`™h+Ijvถ—น7iš4*!†VKหุ‹4t’vC"bำถฅ‘$“KfปldhญHšฆ…ดฉ Mh‚ดำ\iSMฺ ดfณะFM“N*DOทด"ค ™ๆชn–aFBฝ–คvOฏM’B#šlํj˜คA"ํH“D5‘AZMปฑ‘1Woฝw’f$štCB%1hึ$b-i/ ศบูไBeอ6๗YhˆtGgwบFCงฝ c'ฝGBB“dMซชปญIGšฦฺถCYŽV‘’@–S kWW0จ*หdศŽ^6งHgชd2กาjƒฝlฌ‘ค+#TKโm2“1ฃVฃฉฌJAำยี„ดฑjชYาN]DWณc„Jึ$Tฆาiฃฆึnฃ…ฆ ฌK4[†‰‰&’ššอัณำ+#BT2ดฒ›ญ4หƒINˆˆN“‰จลdvาTjiฝvณŒiBำฅ คL’ชhœWgeŠTฺLน•ส-'&mิLi์ ฆ"๊Vซคฒนฦ“6"ึvภvNฏณWณู:I%&;ถ›4$™\\5khบ1=ค‚ูฮูฑB ,=zO&ื”ฆMฮฬญอH)P–b›ๅš6iัฺ ตR!J\ฒบjาœ๖ŒkŒ๔H]9Mซ -‰ I;5ฑZ+“ค ษีmฅฆ้ถš+{อถSc)MD$ฉฮ•๓คm› `ŒๆH[I:ฑqŒอ:vฺูLLNฺดฑ‰ษญMฉศีกdRนd’ถieซ5ปnrq“ฺฤœ8ข’&•๎$ณvEย$“ณˆmRณBi)Tั ’œฎvฬบ’mปƒศ๎ดำ™žํีสดชDต’ษ„\ š่ดจำูํ4„คาN&™jM]อdOŠฑจคอ ฆฆš6Aฺั(V*‰ฬฎk"ฒQต‰˜’žng:ํ>tฏ›ก›ฌฉkršJKI[bM tญ$™D›‰Iโ:บําmห0ืนnฎ4ช Iา ›ึ2IฅํีจJ›’\ัXณอn\9ูฮษฌฒฃฺ8™ฮ\iSˆฬ&rฅfdŒฆ+ูVฃcN<ฮLํึ6I“;ัคี•™R%Qษฤค“„ฺRM[ฃA3อjW&ีMง™•jำ^ ;iTข’6W…"ีจ–I’@6ttดอŠlา4วfš„ยะ$™555QฅcImVส–e4ชฺ่6+‰ Q3STe M]uœšฆ›ฃอว‘•’7ปฑ“lฆ™tต*’”ศศ$หฺv3อ$g›่่3Nn6”Dคs5ฑ๗lMBaฬiฺ4˜t๔>ำ-nฃ‘ๆlฌ+๗-ใฤฆMh ’ะ@ดDตฮlฎ๖:ปw[gํvm€ถ€ถP9อ\g3าฌš†ษํl์<ฺj -I€ถ@Lาั:$ivNAS9ฺm“@$@[$&ฝฦDาฑฬtถuwตi็๎lm$m$ะH@ศ่d36"gcsู™ใ€NลQ๕h)ซqฺJrฅ=ิ์&„”ฝๅ2dRัvทฝฦžฆp฿H&™nโ>9๗ั\;ืถ{vkN3*[๖žœs%Iซ€บ_g๗่ีm๖ถ\zอilฒปฑษ-ฎi“&ngrูNO…6k–vg{ี,ฆดg’l›๎จ‘H29,ษeๆšๆ\s๎+›I’mํvฝฯญอUฑฑIFUฺM"MmJญ–ฬูฆวjณ›nmงฉ&งF‡๋ฎั%ูt›-mr27k˜ฎ\หถ=2ษH้ิถjvฏ^41bา๊ํšKZี’ŽA›ZR้,FMW…d˜ู•J7ฺtฏTไzุ<ฒ“ำฐฺํๆjฆIkฒงปฬฃh๖htš้I\;›Hr‰$ษีำ๖่ค•ณย6‡ศ\อดgฒอ้m2smœnปอส๖บบ+iฏฑด;ANฮ^ง;๖jคšถฉ6Gbอบllณ›๖J&าl˜“drmงงณ่ิlาnฺฆั–N{&+ ‰ษรvrนฎdoอœอ๎™[“imฮr—œฬฝuฦR‘dZ๘ีฏใ_พ๓Oh๓kฟ๕{_y้ี๙เg?๙้_่ร+ TฬLฎค้<้5้!iฆ๔™—z๋๚๖  {๒๐๑Gพ๛๎ป?๚้ฏ~Iทดํใ/|ํซoฮW>๛่~๗ป๑n+ะถ_๙฿~๓๋ฏ?w๑ร๏}๏ฝ' `ธฆทธŽปmQ‘™ไ์9›ณiดอณ/พฅ/พ๕๊๓_x๖ัใ๔ูง๏ๆŸ๘็๐ร'@น^ฦฟ๚^y๚ใว๓_ชI บ๗๔p„hK3‰4#zฝา(เ™/๛_ั‹?xต์>y๘๘ƒ๗~๑“๏ํŸูwพ๓ณ‡น=~๋ฟ๛ฏ=๕๋ฟ๙ำ๓ฟ๛ฟ๙ฟ๗oรๆณ~?ำ๛ฟ๙“฿|ภ3_g๚อ?สำฟแwไ?ืฟSvฺฑูvdข]Nช‡ฅ€ฬํ™7๗›฿~ๅ๑3็ฝ|G๛ใ๗;I?ส[oฝ๕ญ7Ÿพฮว?๘ณฟ๘ซ๗๎Ÿœดs=~๎อo“oพ์ษ/๑ฏ๔ฮO><Šn๔š[rฺ‰P]I'1ุVปตญŽi#iบWšYifฺซ[z]•ี+ฎ6M+ํIGUyะ‹1f6›œฑmถท“^7ƒ$Rถ“ฌ6›ส&Gฃ‰4h" MลดnIŒ6šI\.ีฆล‘KZํj#ฅmlข[ฃ‘ึHGำญไ๖H9ฃSZJฯษ[ภ&‰‰Œ<šž๔$]NNํ5&V[ศNฎฬD[mฺt๎่DDe3ฝ2๋˜์ฆฝ"ษสน'า๊1ง!s™{ฮต;Iาa:jH=อู่ืvัi[i“๖vฮ™์ฆGศํ$ih•H"ณ—NทUนค•ำ$‘&ฉ†dฬฤvgา%ชยค'š:Nคํnd:ำ^ำฝVVf:๗ำฉt4โ Fค&‰ฑฺสฝM:อ•™KหัœฮJfฏ!‰ะ•ว&M๋DดWจ&I‚ิMไฺถฝwsฅM+Ibา๋๔N‹ 3ษูีM#XZดM’ํฆ ˆ\™ฬูXน2ท๔ฒWท*Cบฑsํ๖ ฑ"&นฆ8ัk๏้ค‰Kฺ--อ5WฎาอVๆ$[ iงืŒฮ•ู>š9หFำึ้ญ’‘ฮีIฆ3kS้Lปู“ปD/าซWšLท’5ปโ\้=ศUsB[…$ฤฅำถๆ"ฦฝ‘DiŸ$OฯLะฉ‰'"UCฆZkง๗๛vำi#ณSทk›5jบWOn#iฒƒ6ฝ *Lไาดt๕ณ๖ึ\’Im๒dj;๗F:WG$“,Rj›M“ช!€ˆฐืHฆบ]’ทฆซ คU’$iSฺูถy๚…Wzใ๕ฏฝ๑…/>ฬ็Ÿ~๘v]iฯแณฯ|ํ•7๕หใพ๛ำOž$8๛๘ ฏฝ๖—_าใ๗ž>โ'๏~tฟ&ๆบๆJขlg๗สžคษ„‘Imt๗ษ^ทgžใK_|ฺ๛รรร'ฏฝ๒ึ๋/๐๛๐ทฟไƒ‡]ะ๖แ>žk_zๅ้w?}g?๓ŒŒ@w?{r=๓ ฏ้…๔_๗ใgf7šc’า”ขIูัดฺH๚ฬ+฿฿๚ฦ—^๚โ็ž~๖๑uำs๎Ÿ~๚า/?w{ฟ่{h๖!ฯฝ๚สห_yn>iฏ'ฝPY[ู^งseฃร^)I›F’™$`๗z๊๙ืพo[hฯy๘์“๓ฏใkoผ๘_ไO๖ไ“{ไz๊ตoซ?๚ƒ฿๊็>z้ใฯๆู฿๚฿๖ žหิค•Nž{๑๕ท๗ฟ๗น๗๑„!ภLIJU‘ltš& ก-ํ>๙์ณฝณ—^{๊s๓ัป๏๚ื๏~xภ๖ัณฯฟ๔ึW๘๊ห๗}็ง฿yร๛รms%๋บ=~๑๋_าื>วำผ๎o~๕›‡O3"ำฬ–ฒด6’Lาfi‡F’išlา๖ SหŠD6YกษŠ”fฉH“ถ๖$ํจฝdาา4ืฆšฌLDีีZgฃ“$mำ๊ีH:š$M”ฃšค.i* i %MY14ฆFาด:m ฉฦฑะาฺ6ดูŽ3]t๗บeซำ๖ฒูํา์ŽIbดีฆ{%sMฺถญš๎่Fc’ฐI9ีถ[จซTลด)า^@l5)Pj™4กšF&ฆํๆ>›˜คุดอ้iีิœ)Iฃ4ฌfคต$ขี;ปr(JumJHh*Gา‘6$ำ$9‰Zi™โNฅ’MGemำจ” Sํ.จ‰,ˆLS82 Sฺ6gฏ$Sด$ูัดI+†กูŠ&ภmึ$ฅ)E“๊Nำdร0ค2iถ&Mฬ ;m[ฆIv+ญiง]ฉแฆอ˜$›FฏดปRmjฤค่ฆ;˜™\=งซf;m"ฆjทํšญซTฅฉeถ"าชŠLŠี’JWBdZต^4IŠฒ•:ถtvบ‘& ์œˆอUห๊mP)ี\™. ศVB›J ีัษHคษfzv™AWทmงiาX’ฦSm ‰ ัMd7™ๆ&e›mMงญ”’$้lตญm๔JdTLGtฌข&&ฤV[CnuL**MŠš.ฬsฏผ๖฿๚๊ซo~๎๊'๏์ฟ๘ๅo>lฏวฯพ๐๒ซฏ~๑ฅWฟs/>3y๒w๕ฮก@s๒มวŸ=ูใณ๛ง<<<ศ%€hHd05ถำสP€ถm๏๗๓๐ไTฯ“O๕ฟุO๎ๆ๖่ฉ็^x๙ีืฟฦ+_}๑๙Ÿž๑ƒฟๅG๏?@๛“๓ไ์ั๎<|}ส$ €ถOžvWg๏็แ“๎SM’”’Jำดj[ูFZษใ฿ญoพ๚›?y็g฿๛๋Ÿฝ๓ม๖์ ฏพๅ7^!O๗3๗'ํฃ$€Oœ5z่Aพ๎็]฿๚พ=ฯฺš%ห–%ห๎๖ุN›jCš IฅrDN(๘/Ž8Hq” กŠc25i’๔เกถdหึdiKฒทผฅ=}รZ๏ss]ฺ9็๑ู:guื๕t๎๖IฺฉTt˜ฑดj QR•ูmนฮฎ“๑ูืฟ๗/_ไl]—ํลฯพ๔๒ฟ๔าซ_ฟ~ๅ่ทoผฯ฿น๓pท€ึœ๓๔dw6;ืณuwบ฿ํๆf๎ga๎Ug$ฺฮ9ืูฉ:็บoัคCU่L‹†$i ํบ?น๓หw฿๛าอKทฯ]ฟzแๆ•อ[๗ืuY€ถ๋ๆยฅ+—o_8์[?y๏ำณ“ํ…lะvฮ๕๔๔๔ณใ<7ๆใ๛gงึuvI€ชtd๋TcT”&ฅต„ด4ีŒ2š™!ํ †9ญ2K2ฆQM:ฬ  M;“ ‰Yฆ™"Mf!ฃC „$“YEš‘ัŽdFาh[ 1ว`อTาvิขmฅ’TšคUš–€HCdึค˜5าJฉDบฒhงh‚จ ัˆj[“ฐtšฃF;ˆฤ`*ยˆYณ RI ำhฃFuฤhฆ6mšฮ ชีาJ›ิะดฅ23k$ะšอ,‘คiHHบV™kG’DฦLuถ3ŒfUf4ขณtŒJ'Mด1cึะ9FฬtชbŒ3ฉ”กาถ @…ค2$มšYI#] Œ&U™าQ€ ฉะš ี™ชฅCฒv0“&e!LBDD%mkTาศi’ชVŠ:ชอ JอY™ีˆคš‘0j’vฤ,ีษ(ซฑˆRค*‘2ฮ‘DZีFF:SC’0c™‘&RKjhา9L‰ ชา(‚d5KšcQUJ#†AอาŽYMƒ`&Ib๊ฌึ์LmfBปฮ ีdฆฮกBี”ัDตt”tRดรฆkfIขIi…AำฉZ2"Qษh”ฌ™ฃIฃอ4f›&™R‰FฃกƒูTtา0%fšฎํ อXg—Mฃจั$Aข0š1าhh้ ีNHั9Lฉ™N#mูิd!บฆํX’J$3[ฯ>๊็n?{iž}๏ญŸฟ๑ใ7฿wฯƒร‹7Ÿ|๎ล/ฝ๒าk/บ๕์‹฿Z๏฿๛ม‡w'€ถHฺถํ<~๗อ7๊๘ใ;ใทŸ|ัว'›ž ดU#cฦhณ–T!H-MPชUฌปวw~๔/พF๎elฯ]บq๛ูฯ}้ตW๐+/x๚…ฏœ=|tzv|๗๔ฬmถfดดmAถ h[•Žvฅm4V๋่LึคIฺ!็Ÿ}๑๖๓๋G?y‡?ณื?n{๑ฦณ/ผนฯoฮ๗w[ ด-˜mก:ซ3Iš1“ัšัuv,IuขQUP ๛Gg๛?“_<ฺmฮ]y๚ต่ฟ๘o67ฮื/]๒7_}๊_ฟ๗มูqบ๎Nมฟ•น๘พ๙ซ฿ฎ}J Zญ€R-€าคƒ9ฆ™ฮA†Žฅฉ…T€$#–ฝัฃgฏ=uๅ๊k—ฏ|p๗“Ž$h{p้ส๕ซ—o์N๏๎—ฟธ๓(ทe๓์ไG๙็ใึ๔ๆ;๗๖'ฝ0hฬdีJาT Y 1J4fซ‰ฉษŒY h›6ต4Rั”BตJส˜’ิ i ™MŽj32“YฉLEBˆฆLKฦšJ+™ลH:กษJ:—่:า$ฉQ:ซ•YKH4ญeL3C%ฦ"ฑดmป0‚ฮ)FฐถM'*2HL)•Tfu˜]‘D#3)ฉNตXCiา1าvขRaจคF’!ขหพญ&ฺค•Z’J‡ฆ–คฉกsา‘‘NfšsY›4&+Sำu“TL!#3%ึVmiฅ#ช )™0—๊ิY Mื4Iฉฦh•ดˆŽฬ6fLฺ ‘jขSซI$i02ตk#กŠdถH MปHฺŒ6-ี6ฺูคค"’4U$1Sลิถ–t‘@JKb45ภŠศชญE ,™Q#cF'%E%5šจhิf$14ีดƒดmƒEตj4K24ฉฬJk3m#"อ(ช“ชfุC"˜)SR2คาฆM†tuถญ€&ฃF#้อ ม”Y•4!ร`MI fฌm:SAE’ฬ‘ฑ.mDIjVš0T;H‡IU:งŽŽ่:jฬJ[าfšัดHะ9“6‘E)’น7ข ‰ฆs&ำ *“IAฬfZ:ชmhด•D‡’$#ณสJMZ]IJ" ณCฆ4ค•ฮ’‘˜ภŒYI—a&™I%cjฃซdยfjคPI €Vฯ=๑ส 7n_:pํท๚฿}๏฿ƒ\>:w>ษ๑ฝ๐7๏}tผฮ฿~๑่๊‹/้๏œ“ƒฃs‡›ฑ๎NON๎ ฐlถ็6Ÿ<~pF–ํ๖เpป9Xฦะฮuท;;>๏งฒmฯyvr|์๐แf3ๆ~_ฦv“นž=xpฒ“‚,›ํแแฅํ่z๚้ง๓๔ม๓W?{รn{t~s๎H`lŽ6#tฎ๛ณำำใณนZŽฮžnฦŸ<Œํนฃ๓อ2wงง'ฯ ฦ๖ันํf3wงง'ฯ ะถณm 3iR!ฦ€ถ’ฑ\ผ~๓™KG๋ถggงผ๓‹_xow้‰?yฺ้่3Oฟ๘๑ฝ฿}๚๘ฮ.Idlถsอม2ฺน฿๏ฮNฯŽ๗ญ;ธ9่์์๔แษ:มrxยแfkzv๚่d`9<๑ps้ษูใ]6็ฮ]fw๘ิfspฐ]๋๔ไไ๑YK”ข ฒูlท›รƒe“Dืuฟ;;;ูญป หม๖hปูnฦ’่๏w''ง'+€Œeณm—M2ืมษๆp ิุm7หฺ9๗ปณ“ำูTาฆSฺHซB*ฬJฃษ@e๖่๚ๅฑ,>๛่;ฟฝ๗`\:ผฐy๔ูGฏ~Wcsึ็/$$หๆ่๐เ๐`ู$:ืuvถ;ูญkŒอๆ๐เเ๐`YFขsŸœœœ์;ซYถK็6ห<ปhn–1w'งgฮฆ,ํนํๆ`Iดs๎vg''งV ศฒูปr้าvณฤŸŸ๎ฯึ‘Œ k,ๆhuฬฬHKH–อ…ซืฎอ3ู๚/xใ‡ํฏฟ66W._Yl/]ป|๙2ไg{pp๑๚ตK—ฮ/๓๘ั{๗๎ฌ๓ฯ์Ÿฝ—cทžœฏหc98›OCูอ๕k—Ÿธzแh“Oฦว—ทห๖่ั๖่`Yๆบ฿Ÿžœ๏[šอแแ๖โัฐ฿=x<ท็ถ‡#๛ณวงgว๛,›ƒ๓Ge s๎wg'g๛ใูแ๔ฏ~๔๔แ๙รอๆ•qธษุlฯl—1B็บ๎ฯฮฮŽฯๆ6‡Žถ็ฦบํŽ็ๆัมfะน๎wงงป“œภุl6#ัน๎wปณใำu_DำIMf3JAJ“ก3Z%mš9L•"BะŒ‘*อl2TฃQ`F#ำŒ&ก#‚)SฝŒ!#ฃฒช1ำฑ$F ณVFEดQTำศhาฮ$PŠb4“&้ิ1gZB*5ชีJฃ‰ŽQ)•ŠถRKƒ6ตTŠtŒl$3Kตsถ3#บ๊0d$"MbดชYล~H“UfŽ)S*A“tŽI"mfZDJC*IƒH ๆฐถT›ฬZLm*‘c1›9%5&5Gƒ"ัši#ร&รHg:ซฉTb.ฃ1‚iฆcึ ŠT ด†ชŠHวBJซl B3Ijj)iG‘™BD #หฺiJ“4ฃ15MPM$#:าIฆ&ณร0–Œฒํฤ ณ’2Aiค”$ณ•#:˜กา„ฉ!า2ฅ“hชm4™Iei่˜Md$mk4คีะJ'K3Œ$ฉfšiด™:YฦHรIDCอฆŽZ3ŠvLc&!“v”ค)k"TFฉ1B4™ฉ viU+’ึ่ช‘$F้œIศ”ถi…hฺฃาššaHฒHๆ^ฉDjK"ฆ๋าŽŽU›’ข€ถฃŠDTต R"ั„HCZQJ5‘Œ*‘‘ฬ0ื!’iาTฅ#MšŒ ขฦL›5ฑdศ’uฬฌt`$รœMฆถขI˜"ฤl‡ "bฆา`๊F&6๛นฬŒ่ขฉ,cŽŒ1ฺ‰ฬ9:€ถฒฝฬณท—ำ;oโฟ|•็o_ผ8–sฮK''๗?ป๗W?|๋ต็๏™ๅฦ๓O_๘๋๛Gฯฟ๚สื_พyแมGoไG฿๋Dาถห…gฟ๐ลoพ๖ิงŸ๊Gฝณ็ฏพ๘๙็^}๖ึSWŽŽฒ๎๐รw๐๚ํO็ธ๘ไ๓_ส+_=๐ƒŸ่‡_ๆK7o=อ'๋tลy|๗๏}'๏ญc/ญํ•_|๕•?~้0Ÿ๊๘/oผ๖?z๕ษ—ถw๕‹7ๅ>”สัตงพ๔๙งฟ๘ไี[ถKw๏๒oเWŸ|ฒป๔นื^๛ฦo]y๐ั›?๙ัึ š๓/พ๚•o|แ‰›วพ๙๚฿|๏—วฅ9z๖ตฏมK7o>|๏๕Ÿ๔_ผsšŒddฬdืUืhFฦš,รPตฬฑู@ฒ9<lXฎพ๔๛๊ญg|๔๚ฯ~๙.ฯ|๓o—ฯ/wๆ‡o'žxโๆsWท=์๎ฟ๘๙ฟ{๋ณ‡ศ8ผz๛้—?๗๔หทฏ8ฺ,sเ'๏ฟ๛๎O๛ไฮƒณ}1ฦๆ่ฦ๓/ O<{ย•ร8;พ๛›ฦ/๔๑้*ƒ‹ืžx้ฅ~๏นk7ฮe|ื๏s๙`กJ+.>๗…ฯ๙๙[O_9:ส<;พ;w~๒๚[ฟz8w @"ƒดSฺ$šFGฅcŸ$m;ปvU๋องo?u๖ิ˜gc3็ผv๓ูŒ1ˆe{ฺ_~ๅ…Wn_น|46๛ำ๛๗~๓ซw~ณw๎=#Iฎ<๑ฬห/<๕๙WฎŸ฿lป{p“7๖ฦ๏<๔dv{ๅษ?Ÿ}๋ึ•‡๏~๏{๔ฤซฯ}แฉหG้o้['หนซ/พาWžป๑ไๅรํXฯํ‡ผ๑หw~๔ั @ถ—nฝ๐๊ ฯผ๒ฬ“—ฮmึ‡w?x๛‡ฏ๐ึ'วง$€Œ‘XH+3้6หYืมฺŒŒ@6›อๆ@eYฦ๖๐ ฅs>๚์แn?Ÿ๎฿oไ๏}๛สฟท๔๚G๔ใy๋๗'๕ทn>๘›?g่๙'ฟ><๗อพ๙๊อ฿ๅ?_ท?ผ™2Žฎ<๙ฅoง๙฿ร/>w}3๏๔หŸูธtใย“ฏ}๗๏|๗๔ๅ็žบด้ษฝ{ฯพ๓}๔๑ƒ}*ซฬุŒคฃtŒ5Mปำลบ˜I๎ฝ๗ม/z๎ึญK—ฎ=y๋๐ožs\yโ๚ฅ›—7๋ƒฯ>{๏ฝ;ห•ํๆฺ่S/ผ๖าำ/=q๙ฺั’๑gฟ๛๘o๊วw?:›๓๐๚ ฏ~?~ําๆw๏ฯฝ๘๕—พp๓ย๒ป_ๅ๋๏่๎r๓™เ‹ฯ<{ๅแุ๏Ž|แฏฟ}็Nถืž๙มwฟ}#แฟ๙้๛?อฎห๖g^๘ฦ็Ÿ|๎ฺ… ™ปใO๗w}๏ฏ฿พ๛ูฮdน๙นoๅ…ฏ]~t็ฮ‡?z|๓/?qใ’ใO๛ม๋o๚gฟ์ณ•Œq๎๚+_z๑•gฎ?qแ`3w|๚มฏ฿๑}|fŒŒ%ูฬนฌบf$:$sK็:ฺฬีœdcฮnฑfM–ัฅ-2F2์็H›ฎ2#›ŽณอYYuคฬ)ณc!m;๗ั1ว์Xฃหุ$cLบN๛5ฃคfZ“}ฯ’Œ$Iš93ำฎฦ2dLƒtjญfฺe†›‘.ณ๛‘UCยbXอมHšVg[sฃƒ$คmงhฅห`5ข2ฑLฃิฺ mปฬu4YาhG’ฑdฌฃ;™mึN5ฒPsN๛dูw‘‘”vี1dŽฒ๏ด๎ำ&IGฦฐ„ส:ศ5+id5ๆบnา5Sฆ`ณtdk2d™ƒMป๖##s˜j•ตฦ4ศฒ้œษœษ.6ฦ’&ีถสฒnฒvฎฉƒŒiิŒฮ™6†hiณŒfฆsํ*Mfๆ˜MbŒŒอ˜องํณา.I3ื‘ลbME’d™ํ:s0kฬd Z+ccmVI:ดํพK$ff'๋ฌc—ฑ.]–e™ึuฆำœlF๗ณฃMต๋]2’D:RsVš‘ฬqะŠV๗ช๛4Cฺ1วŒŒf[Sfณ$ัaจสkZ-{๋F’6sZJฬคษ˜slฬ&ฅcRd4๋:ปด9R–ฮŽtษb4๖๋lห4G–‘vฎm3หฺeŒณ•Ž s์ญs๎‡’‹%าšษฬLฆถ•0*$kำ6sfฬ›}วRหh†ูu—,ูLƒฺ๊๎F ฌรบปŽLฃษH–tฌs]2;ฒ"IฦศฌZ5ๆƒMฦฺฎ6QฉQbส~cdคSXdŒิ์\ฉ$†ฆๆ2ฦฒsฆฦ\วœkฒฯXื๕@ค3"›.ี&ษH้ s?ณ9่า˜E†t์ช:RํฺQ ฃ๋`Yณ7ๆ0F–,หœi็>s&#-5—Yซuj$’ฤHuถญ!อRŒ&งk’Hฅtุt้fŠMW›์;าŽ!ษฐt]ฯฦ2Y™`Vปธv๙hณg๎ณGฟ;พxโลƒƒญห่87ๆq{|—๗ฝ}ใฦํK7ฏlท๛‡gหrถฝr๙าน‹GหกํซG—ื๛Ÿอn\พzi{แ่๑ฝ๗๎ผ‡sฝ2–% sฬfํ˜#›t˜๋~nฌ#MR]ืนซIF$ษฒูdค™g๗๎๚๑ผu~{๙โ…KG1็๖๒อ—ฟ๖๛฿}๎ยัมุ?zฐหrp‰็^ผzฺ๋ํ ๕ฯ๖้๏ๆ๙›—ž}๊เโฅƒ๓๔Qืy๊ี๓Ž6›ฃK—nๆ์ม<\็แ•ซŽถ›‡ฟเฃ๗฿พ{๚์ล“ฒ<๙สWoัn๎๗๛อแๅทฟ๔ตƒ๙๘฿‡ื‡gจ*GOฟฅoฝไ็ฎnญ๛“G๗O6ฎ^{โ‹Wฎธ๒๓Ÿfฟนp๙น/ํ?้โมฐ฿ฏkฒฝ๔ไณ—.nๆŸีฯ.ว๋ธ๔ิณ_~๕s฿x๖โ6'๗ฯ ฯฝ|๙เ`ูŠf,Gฯ|ๅ๔๙หืw|z๗ll/\y๖ลรห๛?๋GŸํ*J‡.ฉŒV;C" YF—t ๛u๏;ป/nน๚WึๅgŸ๐ใท๏|๒วŸ–$่ๆงŸ๖ืฟ๐ลKร้งฟ๛อูzt๑า๕gา…›—~๑/๐๑ฝน<๙ฺWฟ๓๒อgฏl—9ืužmŽฎ\๊k฿Zฦ๘๑?ธ็t๗๘tฟหr๙ฦ7พ๕ต+รํุ}zณว 7_๙ึืพ๛์๙ํ2vวํฒู^y๎s—.ฌŸ๖Ÿ)<ž|v]ษูzx๎โ/ผ๒ํ๕ฬฯ?๘ูต5vๆ้>g–ญŽF–น์ฦ*ฦ™fญ™Fภ๖ๆฟ๐สWฟ๓าุ<ๅฟ๙ณ7vธ๎็œfป›ปณฮ-vปu7;อฮอuณ๎็lYป๎ปฎZจqํ _Oซฟ๗ฟ๓ฬ…อX}๋/}ใ๖ััf๓>ฅํ•ฏ๘๐o}สr๐“;ท—Ÿz๕;่มวwพ๗g๎€ŽถํŒฤจลlG7ํ†9ฃ)@’อfsq~๒ซ฿<~๖าๅ ทn_ฟ๕๖GฟตํœหอO\9ใเ์ณ๛๗z็ฎsฯ_{ๅๅO_ฺ์ฟ๛๑ู^zโษ็เฦีซ๑ื฿?Ÿ๎๗หมล๋ทพํnุn—qฒ>\ณx๚ฅ๏~ใลง}๖›ฮ]น๘ไ‹ฯ›g>y๋ญำณG;ณr๖pžฯul.฿x๚ ฏษ—ฏ_ุ งวkO=wๅส•gฎ๔{๙๑'ป๕แ๑๎t7— 7ž~ฺ้ฬ“GวsG็ฎ=๕โ๏ง๋ใ๗ฝีม฿ƒฏ~๓๖น+=~๐้ƒว=8๙ึ็ฟxเG๚กG{ ณณๆ’นŒ๕tnฦพ#:H–a้\ฯฦˆ9ูหC7#ณ™ณฦnQI†ฌY๗s฿lาน.šŽuฬฑ™c™ตฎ ]’aฬu,“9ว f-sดษาั=smfcำeิj‘คิXฑบfญัฑt$๋:ืษh,ฑ${Fวf˜IcฮX‰hŒ)SอŠฐhค–)tŒŽูeฆษ\F:้จดt]–ษH‡9Zณ๛์w]Œจั K›แ ƒดฯ5c—‘„.Kณmวยข:ืedfูMC#หมฺtฎ{sŒ&ถฺฌ;ร:23าfํlศฐŽฌอfsฮฌS$้บ.tY`vฬ1ฦาฺันhX“}lึน้มAปูฌsํพ$’‘v๋š™‘M4*ฦฒf]ๆฎีcd์7Fอ™Šeฦm;ืั%›aNk้’ดงgปอ’Mdฬฬฺe์ืŒ414ณcฤบgoะอˆdNอ:vKeคึ}ึีะ$๓liืfŽลHฒฆ๋N3บฤาhฺ1ฒฌ3‘aถc&าศฒŽ์gๆ‰uฬ,ฃฃบฺ๏ว2ฆtฮL้"ึ์*ตiF:ญ๋\วœIlb•ศ")ๆ3ฑ'52m6ญ&BR$หhf:าม2ฃ™6ฒ;่(4ฆลœct้4ป๊ZณK†ก2HD–ŽNต_ง๎ว’ ‘aัa?ณ!:t&ญ์ณ้:m:X*sถอšูส~•ฮu 2ึฉFฬa&™#sฌปยIiŒน9ศ:—id!ซนJ๗›šฑO–ตCm6:*ูํ๖$Œ$’i๎:—Žฬt"™!งห:ืฤ้˜‘ฅcอl:ห’•9g:’Q๋ิน$•uํ์~Y–mึำ˜V#ํF–1ณ๊:†ฌีฝฑ๊’iŒ9G33vKšŒ1tท๎ึ,jiวt0G’ฦ ยำ&Mำฤ<ฌ;ืผo.•Y๛ึีี๋Lฯ " ˆ2I‰คf„‚vุaK@ฟวภvHโำ’IุAHฤh f0k/ีี[UeญY[f>๗ฅsj=Kฦ่ฆI3ed;อ 2m็˜#cฬแŒy&ฏ;7sณtCซMd:ณา‘Œ$้iฮšฅY4Yื9'ฃฑฤHฮ:ถ#า)•ตรซ9Gุdˆ,•TšLซžmlNgื•)€Lฝํ๑๒ล้Yฯv6›mฦ$ƒํšœ>|ถบ6r๎ฒ}q๎ญฟy๓`๊ๅw๗๎อ้X็ธp๙ยๅ๓็6ฯพxr๗“;๓เๆทพ๕ํk็Ÿโ?ป๏‡Ÿ~q4o|๐Oู๙;ื๛๖{žฝxธ๖l›ํลKว่ื_>zv๔ูใ“๓๏๗ฝฏม•s๏ฝyแ_ฝ\ป๎^ธpx๋ษษ“ฏ~๙ณ{ง๋L-m็:gฦๆ๒oๆ{o]>wz็ฯ๘฿๘=}น{ๅ๊๛฿๛ฟณ฿ป;_ปs๔GฟzxฃWท๛—oไซปำ๎ฅ๓ื๖wฮGv๖๖.]xcs๗๑š^ธpmo็๐๕{Žพzฦีš:I“f&ํฺ9›dfฬ.SI0ฦฒd{๚๘eOW‡;cwcปปึ;ฟ๗๖มนอ๑/อฟ๚ใฟบ๛ษ“qญo๏๗๎?๙๖ลทฟอot“›Oo_;ธpํเ์“็;๋มีท็7’ƒƒ 7ฮŸ๚ัฮูๆยอ๓›s›'ŽŽŽŽ^ฬCgFŸ?ป๗ฃ>|ฒ๗ฦ7~็ฟ๘/;‡—พv๋๎Ÿ8)ด–ko๓ํซท/ฬวฏ?๘ณ๘o?~y:.|๐ŸW๔w฿ผq๛ญฏ={๐ัO_พ๘๔ห'ฯฏ>ีพ็๓ูg'฿๎๏ใฟ๏^๖ปฟ๚๙้๑ธ๒ๆ›7ใx๙ฏ๒O๙Ÿ~๒ฬ๎ฅเฝ฿๘ญทฯก–ๆ๒;ทฯ์ฏ_|๘‡ฟ๚ษัุ=ธpใึๅ ว_=ษๅน์Œ€ส์˜1อภˆPญึJฅ’$ณ๋๑์ญ๏:wp๑๚ป็.x๋๕o??พี็?๛๐ห/žŸฝZวฅ+oฟ{๛๋็N็็?๏ลŸ๘d}นน๖ํ฿;ู฿ฦoฮ•ฯไซณฯ๏?}t๕๔ี/~๙ฃโ'๗ํ฿๚ร้}๓เฺปo|๒่๘๎ในฮ9-c\ผ~้๕ƒ~ษฝ'พxpบ=ึป฿ป}no9๙แ๘๎งฯบwๅ7o]พฏ>~žณ+…œ๙ณฏ๑ov๔l\๎ŸW฿ผp้๚ฅkŸ๚ฬ’ฑnบŒ1FSMFถbะึถm @V ถ‡ป๏ƒ๗ฏฆน9wx้สอซ็Ž~๔o๘๚ๅฏถ๋Uญาฮvm‹9g[0gืูvNะvถณPํ<๗ตฏฦ๗ำ๏ฺ?}p็O_๗๑ฏ>พ\๚อ๔O้?๘G฿ปีy๎๏|ใ๖ห>๙ำ?W๊๘๗_œ๎\z๗๏Ÿ๕O๏>{ถถcmfญUZดm[ษ˜#•ฮ1ืั dŒฑปปมท/ใ ็.x๛ย{O7s.ืฏ_พrธฟ๓๚ำ‡Ÿ๚๘ยธ๙ต฿๛ึ›็็ฟ๘ื๚ƒŸ๘ษrฦ;ฟ๓ฏเ๖๛_ฟ๙๑ั๋{ืuvfปูœฟq้ีฝŸ๘ใฃ็|๚`sใฺ—om^ž}๚ƒว็wOาƒซo\ฺ์ณฃน?ฯ&ฺ่นฎใ๐สี7~็ƒห็7g~๔‡๏๗แว^/ื>๘ํ฿๘?น}ํํo'Ÿ฿7wฯžญs5ึำ'Ÿ}๘ƒ?๛~ำ‡๋…ฏ๏ษ๏พw๑ยต+‡๎<~|x๕ฝk{‡›{?๚๑ฟ๑ป๋น๓็฿พน๗๊แ๑๓ํ…ŽMฦ@fฌi‘!ษจhฦLX๕lฑœญ&‘M3d้ฦLGhณ–ต]g›udjี˜‰tTชซ™t3ึกš1[ฃอŒY๋\2ฆTj3Y™ิ˜ำh†d-3Ykอ ํT”31’SifJ+ณ)๋่0—ฉ5™4m›คmJK2dฬคษ”1št4sR]š•i“hฆา Sfว:“ูจฮLตPHซZๆ4าฦh2dSัj•Œ)3อ`vถ’&k†tดตvtXๆ I•†h$ ฉภ6ึฅkฺต)cฆ:ชต๊4d„ า1็ฐฮ,mช†ฑImๆยhf˜-5ฬูJi%aถ]#‘iv†dฌ fฆŠ KฒดS’ฆkH—ณ3d‘4ฃLMบคY%svญ™i$fืŒ™4ขฃ23W]2ณ้04ฺฆ๋จˆณ†,i0f–uŽ0ณQ้Hโดษ:ฒŠfY:‹ดf5FขฃM3“fลฬ(lึqถL™•้ฬ ŒˆะšE"Kc6:ฒFš1ๆHๆิ)้2ฉœu‰ึL›Š,ีฬfŽซ1ห4)B4ฺjื,ร ษาŒ6fตขL™kชฌgอิ˜Yf:ฺ˜ RcŽ˜ะ":ยขdaŒž-sฮšอ˜iZฬฌJŒ$‰˜:‡YฺŠ,!3บ1ฮ†0ีิ่h;™ี!‚ูv&ฃฮ:U1‡*CX$H3fฺฎgkปŠlšE†ั”ค#ีdmœญfงชฌci‚Qฆ95ๆฒ9ค%]ร˜้:3›ฑ˜d4™MgFm4fS!ณฮึ1ึคอhmฆ”6ษH™3•ฎกƒ•usšu€อ‘4šฆmG cm+อR”9 aNYlถHฒlฦXฒฎ`ษf๐ไม๛>y๕sืฝพ๙ู็]sๆ•ร+็=๛๊่ฮฃWป฿x๏ห๖<่๎ƒ'/^๏๎d็์๔๘'Ÿ>๛ญ๏พqy๏p7Gmั“ณ็๙G๙๐์%๋บ™}ถ{๗ั฿ฝ~ย[7ฏ“ใ“5็ฯ_ฝt๒๚์้O~๑tg๎n$…2›ร+_ปนny๒๑g๗Ÿ9w~ปŒ—ฯ๊aฎ_ปฦๅํ๎ัฃฃวฯไฦมมๅ›๋ใ+W.๎<>:9[วแๅ›—๔ž‹W.œ฿z๐๑ใ'm3Fฦฌฉ“ฉ:Fdฆ@ cปอœZ ตปปๆต+๖O๚W_~๔์ลพธ๗เ'?๛๔[_ํฏ๚๚ฝฟพไใ—๗^x{๗เฺล1Ÿอ+Wฎ๎.ฯ๏?xตู์๎Ÿป~ๅœฃ๖ส•›๛หหปŸ<;zตq>-8๚ลฯูG'วg๙๒ัฃ_;๙ญ๓ห๙ ;็ฦ\Z่\.฿ธ~ห;ฟพ๓แO๏>{ั๕์๕O๊Wผs๙๐Kื/_ธuม‡_<{๖๓๏๓ž>}|๔์x=พ{๏แ/๏ฏ๏ฝตฝ~qปํซๅล๋ฏn_<์ฮO~ษรณeM?๛โั—ฯ^}ฝ็ะ™ณณฑŽd์žฟtแโ…ฝg/_ฝ๚ซฃ'cปœ#IฉNi•ฤจฬaฆาา$ู,ึ‡แษว_{๋7ฟ๎K/๎_8<ผx๐ส๙๔๎‡Gใแล[ื6g๎๚—็6็๖ทž๖่์๋‡o]ูŒ/^=น๓“?๚ๅ๒๊ษ“ใ—ว'›G๗โำ“๗พฑฝxธsฐ1ึู*]ฯž์๕๗๏}บฎ6o~๐อืฎ,g๓‹ฟซ_|ล๑๚ช›ืOŸผ>yyด9;Y+p๖๘;ฟ๘ีWGฏ—ืห‹~๔๐=ธtฐฐป์ฯณgหP2”jตŠฑŽfFัŒชถ-`ูY.ผ๛ฟ๛V16ํf;ฮ^=||้๒ล‹‡{ห) ……ขะ* -Z€ฒ๛ฦ[oฟ๛ฮื_?ฟ๛7๙“Ÿ|๔้i>]มw๋oฮต=ฅณูู฿.หvเเpg=๔๎ร/=b>=^ป}เ๒ม๒๊มั/พzดนpp๓ึ{็w๗_~๙GO–ƒรf<๘์“ท฿ฟr๙ฺง๛Y[jž<๔ืหฟ๓ฯ๏ฟNuนu๋สXถc้…ื/ป๒ษซ—_eoอBถsfลซW฿9˜^๙ซฟ๘๐‹''ฯ็ฒโใปo~ํ๖฿ฝ|แฝ็๐ลัƒ–Z_<;บ๛“ฟ๚๔I^ฯW๓ูใ฿x๏๗ฺ;ท{ฐžkv6ฒ์^บpแโม๒x}๚ลว_œl7{›ํ H-cฎฃ•#3กดsิbY)ฒ&MbhอฬhPฬร‘HฅhDieฦ่H[”–3Ckศ0“fhา˜งCอtส\RX+ฉtPm+ฑ$‰ฉีJPฅ!)ฅี*Y•†4&$sFฅM;šiˆถัา0d”$‚9ฬัN&HŠ ฺREt RI#ชM“,Jอ #3 รœT[ณ’d fLdถJฺZbŽF 0*ฺ9i*Hev@g& ิ#ค“ดซjR4)ฆกอ์ฌ16JZๆฌDƒ ฃc†–ฆณํX4‚4š•I@“คIPคkGšกMhฃT”j5ฺdอˆtึ:ƒฬš3iคU•ฬ…`dŒQ:ต า‚L$K 4TKMยฬ˜ํPคฺYตฉ ƒ93ล@5ณ#cฆ"ฉ…Dฃ”‚ู%%…ถคi:DชU -จฆำิ ฆ“Aขด`Vf“1–ijŠ6ŠjTC"™’Ufซm“$R$HซฬL-‚1ะฆฺฉ*34ฃฆJIฉฦX6อ„I›vณšฒi ICf::ำVรุˆŠ„ลฺV“ฆ4ดค!h’‘@”ูDffRmค:uฦ$ Œ$3รXb5'ค”™1ญ"*ะj’ŒMK“Zm“I‘าอ`ฆณƒัฮ˜Mว"ษศH ํ<}๕zฮษฮ๎vgggผx ูl7ป‡{ื/O็๓๕รG?น๒ม‡ทn_>ษษแๅ[ฯ]ษ“y˜\นt๓โf3บฝ๔ฮทฟwอน“d5rqูŽegoูŒjแ์ีz๔ัO๎>~8ํ๎mv6''ฏ|~๏ซ฿พ|๛า[o]ไ๓W›รหฎ]ฺ๏๓ฃ{w๎<‡ฦ&ฺŽัƒ๓ืwฒ› ทฟ๕ฝ _๛๖ุi5›Kป–q๎`w์ฏO?y๒ู๑›฿ูน๘ฦ•ั็็o]=w0๎yq์ฺ๏~ใ๐๋็๒ๅrใสแฮ|๚ูรGŸŸnฮo€ŠJf†$2Hณ6ี@’ฑู;ท›e˜งง'งฒฟ|qo™งฏ๎฿9Zwท—.^l_ŒWวO>;ฮ๛W๗ฎ^;ฟ}vัำ{Nพ~q๏๒ๅรํ๕ฺK›็_๚“ื—฿๙เ๐โ• =~}ส๙อz๏ซGว/ณ;2ฏžผ˜;;ฎ_]6c๛ๅหฒM @ญs๏าลƒฝe=z๘์๘ล‹ฝkWฎt๖๑๑ใฏŽOฯถื๖v/ž,}=Nž๖ไลv๗p๏ส;๛็/ฯยฮv,๋บ{n`og๗์แหg๎ฟปx๓ŽือฌณZะณณ“ว~vฏŸฟ๘ฮw~w7<ฝ่้ƒ‡๎ฏ–M€@Z3 hฬ˜ฃK›RHฒูl.์ฝ~๘ีฟ|zt๗ใ__ฟ|๑ๆอ[๏ผ7o^พฝ;ž?8z๐ขป{ป—ฮฌ{ทพ๓๗.๏wlฺ9w/^;\ฦXฮ-#6gวGว/วุ๎œ?mณตณ๔๙้์vปษ&Mซ่œฯฟ๘๕/?{8Ÿtoู9ธฐw๑ส…ฝe}uีวๆฮฅหWท;s]ื“ฑปูl0_ฟ>YOsxฦ…หรืฏื9ปct™ซQ ส”bฮ&Z&2hำIฺถ ฮ^ž๛๑ฏ~|๔z=หv๗๐๊ญฏ๓[฿ธต฿‡วฎฏล> อแี+Wฏ^|>wฮํํŒำณณื'''ตm_}๚ั๛฿ฝึตพ๛๗๋ๆ๊o}๔้gŸ่WฎOZ@iZลŒ™Ž4ฆ]iอ*d,ห๎๚ไซฃวŸ>บ๑ฦฅƒซo\ฟ๖๓;๗ฏx๋โ…ๅลัััg_ฏ{W/^8ทdฬƒ›_๎๏ฟฑฮe;็˜๖ฯ/หv๎dฅฌง'O?๛ๅ/๎>9Y๖ทํ|~zt๔ไมืn_น฿๛็~๘์ใ'๗>y๔๕‹นAjปูู=8{๘๒๖•ƒoพฑ๓คืฏ\;>๛๔แƒ{_ž๋fปปHบ์?<ฟ]ฮฺ“/?๚u็ฝ๛O_พ๎ถฐฎ}y|vx`๗\6ๅ๕ษหวๆม7nฟy๕ํ๛?~ถwๅาแตƒณใ/~|๗ูฝ9F€ษ:6ปคห๖ล๓}ฝ'จz๐๋_=˜O<;9{๔ูณฯœ๖ํฝ๋ืw]ปuqg๛์gŸ฿฿|๐ญ7/]ฟva3o]ู_Žฟ|t๐ูหญ6iBาFซ1"TฺคฒŒƒ‹7ฮฅฏŽŸฟ>]ถ;ำWฏวนํf/cdุnฝ8ํ6;›๙์ัใŸ]฿9ธz๕๒ๆๅ;ื๗wNๅgwŽว{Wo_พ๚ๆ๎๑๋v7g๎?|๒ไlsฐcณปn๗lœe%:'Teํfg;–‘yzVูœ;ปปmง“ี้jYฒYฒ,หแๅซท~็โมม๎ฮfณlฏผ4Xi็,e,ฮN็บฎ{vvw[;sป,I@ึy๖๐ง?๋k'ทฟ๑ๆ•kท?xใญำO}๑ี—ฟ๕ง=YO šVFHH‰hH$c์;ธบ,/Ÿ฿๙๘ซ_ฺล7?8๊๑{o^ฺ์ฮŒํ.ƒซ—ท™ ๊d>พ๛หฏžพธtZ๖/^นy๋โแแ๎๎ฮvู๎ํ__vTซณ}์๕ฒwแโ๙์O๛ฝั๖๕ซำ์vol6˜sjgE@–ํฮฮนƒi์mฦHh;kะQ™ˆคบ.ฦ่L1ำฮ^œ|๙ƒ๑็ฟz~๖: 7฿[๐ื์ฟ฿๘;—วฏ~๘ณ/ฺฑฟปฟทป๋์้๚โ๘Y๗ฒู–e €ไ๕'๑งtๅt_ฟ๕›๏๏มณ/?๚๕ฯ~๐gๆO~t๗๓''g€B’EฅF$Bค“ดJ…ษ๎๖์่่ั/Ÿฮ๕๓๛Woฝ{x7o^ปฒฟณ}y็ัƒฯŸ-หๅํฒู์์\ธ8N+ดฮž}ซง}๒ๅ“ฏึํa1gOŸฝ\บถท์ฏ?๛่Wบw๖ทฎx๋ƒ[oŸ?{๚ีW_|๘้—ฟโ๙ฑh%cY6ๆ‰Wฏๆ๙M๖วฒcต]N_Ÿฑ;หŽuQ`,›ƒรฝv๎mวf ิ<™Oพ๘๓l๙เๆ๛7.ฟ๙๕oฏฏž>:๚์๓/~๒แ_ฝด"!JG;):TำiT;kš’6ฃ’‘กfืถ‚hmฺHBS’†ัาูคiดF‘ฉ!ณณฅข1ตZฃม0ักc๖,fฆf4‘ดด2ป$ญศLd†จ˜ชEIPBี@"mๆXฦ)…Ž™fJGฆŠFฺ6ํFฅ)า&‹.ฆt2ฃฉ$32ด42ะ62–H2e6L’*…ช3ƒtFซ’ŒHอฦ•N:ฤHวคi23)5ำ’Uฌต4#ํะด:3า0’*Ma)š6sdคR™ีbi†ดด1!šึิึŒฒh"mCQ“DFš!M ณk5ซD“ส2คsRM0•ฆ# าa‘TtVa&“1ศh'3Mš5ำ๊คF’ฎCบดั๊šฎ]GGšค1kิf6•Ž5ร 35ฺ"ณ- 5คR‚†4ฺฆ2ut˜*Hฉ™1–Œต:$ีF›˜#“†42bฆ้L‡V$ษ’Lฉฅคณ‰€ji3ญ ‰ช3”ŠFuษs†สŒvฦ์ชfGGฅi†9ฬ)Œ0ฺVIซ‘4!iDำศ˜sฌ%IชTZฬ4˜ฉ1kmืhŒ41ขZŠˆ„FC ฃ" ํิ$iิX†จตk”Hส์L“D[t‘ดhCG&‰fฆฉQmซkบถM’‘F3าeถ:uši"š‘‰ำ Eš3fๆhK”–Ha3E+อ˜Cํ˜3‰Q iฒ~q๗‹‡฿:ธvp๙สีซทn}๙UOPc{๎ตW฿>฿๖๕ฝป_>y=็fท/Žฟษษo]xใํหwvผxธŸg๊ฃใอ๕ำณ—ำ์๚๐ร}_9‰V็ิuYŸฟศมๆb ษุ์์ํmถฦขูœžœ>›ž7ฏ]นqํ๒“ƒ7.œปุG_}q็ษ๎ๆvฮhuงฏ_Ÿฒืงw๚?๘มร9ะ๊œซำqv๚:ป/_ฟ๘๔๓ฏ_ป๖๔๚ฝผผ๛๐ัฏ๎_บ้๋ทฟ{ํฦํหงonv^{๘ไีฃ“œihFว0›ฎฃCŒ˜บ2;ป—฿น๙ๆฒ็Ÿ?xx๔๘๘ไ๒๚๚ฌXv๖ทหู˜ฃI–eปูภูษI9}v่๑ณ‡นy๒7/ฟqไหฯ=๐ษz๊ฃๅฝK7฿พ๘๕ตํึพx๒J–%d$I’$@ @;ืำืงsฒููlw7หาหfww3–Xืณณณณlฮ]~ใอoฟ๗ๆ;W๖–˜ณหฮฮ†U9ืuฮYcูnถdะ$ #ห˜ฯ>๚แ>ธ๛ัืึ;7n฿ผz๒อo^บxky๐/Ÿฝ๎L€JCบHฃM-SG(ษุ๎nw–ฑปปsxแโ้ษษ๓็ฏ~๕“Ÿ๔๗~๓๚N–ฑ:;=99{žฝ|r๗/๘Oพง3hนfžŽ๙๚t๏๖…k7?x็อ๗oœฟฒทŒv69ทณ]BตHฒl๗6ลฆgึ9Oฮ*ู์nึฑŽ$cดอœ’‘Œ‘Q##UˆF4„1ค้+ี6ติ$csp๑า…y"s๚๑ฏใ_๚ƒ๋;๏ฝwsspR0ืู9I2ฦฒŒDJH@$ษ8๛๔ฯง๑ำ๖฿[ฟ๙ญฏฟ๛๎ญ฿;๏|ึ|r~v|ั‰‘,cdฐLีjKu„F›6ษfป9~๚ไฃฏNฏฺฟ๑ํซ7ฯŸอ๓;Ÿ๐๘qฮE^ฟ:ซ]ฯ?๛›์o>{v’ ํœ'ฒฎงใx็า5dูู฿ฎ;ฒป๋๋'_~๘ฟ|๖้ง฿z7฿ป๕ึต+o\ฝ๒7๖6๋‹‡O~๖ชู่บžžN‰ํ๎ฮvปฬั1ฦ’eณYhฯNNืuฮ‚$#Iฺ$ƒ€&.ฏ?xr๗ึทฟ๖๖ืn_๓ฦๅkื฿ฮ• ็^๛ืŸžŸ’ะ˜3cf‘hฅ#3*ฆั5IjTs–Œ1GfฦX$3๋่ฌชคu6ํhFๆ2ƒ‘DušlึLeถ’TfuาŽอฬจjgY,ฒขiG7‚เฌูา๛<๏๓๏~๏ปึฺc๏ฝปwฯ#บมฦ N†@ัฒ,*ลคd—ซโ๒™ ง*9อA>‰ุช’]’:")R0!‚I ั่yฺC๏yXใ๛๎\W–ดฅ€ยŠI(3ˆ@H#c $‘B ฒ@ฆค-'ฒ@ฒฅ*+I…ฤFTQQq"ษ†โt" `ไฤ@€ „!6–ภ[ฒŒ%…#ภrฺDrE ‚eKธ#’4ี"(AX8,#cS-Q#e '8]T‘3D",XdbIXƒ…%0E*81aŠ„ŽLป‰(D–lŒ…‰,ค…‘Sฒ$ห$ถ-“Vฅ1"D๋RƒjF2R’ูe€D€,Š"›ก‚[6–e)ฎย(2Jb์ด!‰ฐ:H‹,NCJp)ยDบ$™ฦD ƒd#!2–aaY%mู(#I`œVค์@ขฅ16A !76`D![8l คL0ฦ ˆ™ŒYถฐ„A(‘!ฌ „ซฐ@QๅD"ฑX$i,$„ยค‚Hl*H)Jœฮ@‘„า‘I‘-ู "p‘e–Pฑ,B"Bv#lง%–$[Iฮœ…ฌPีˆดAฦฦR Šœ’-’ƒmlฌŠJ1"ไโ"ไฐ3‘@6Nฃ$H!ฌ BH'6E‘`ฅm#กTิFคŠ%cา‰ƒ%ุr-iL'uธ lค$าฒmIBBr"Œ จA–Xถ‘k8a–ตค$‘D๗๒ัK็–ฯ^ธx๕ฮdบ?|dจj(ข4 ฏ\z๗๖๙5ฯบำง๗๎LฯD[๚u<<ูฟ๗*฿zใ์ตห7ๆ.ฮ-ฤฮ‹อ}ทE9:>ษgz =O&'ป™ IŠ่•hล(ƒž !„d(Mq7~๒|๋ใs—ึ/ฮ]์ว“อญfกa„kื์Uฯ๗s1 _žŠ(ั+j๛‹ฅiNv๗^ณzๅ์ลwฦk+อ้ใฝ“ัx8žŸo๛ทฯ฿ธ3<฿|พต{:>Ž^#I(ญ\จR ((RU้p RH’ฅ?ฟrแ{๏^\mƒใงฯ6๖ทฆ“๙ัฮมŒต่ฏ^X๊o๏wL‘ข?,ฏ/€๋มแคซmwzrr๒๒๔๊๛ goผ9ฉฯฮึฮi7ŽทvF:ฟz๕ฺๅษ’šใ—ฏบ“Y?z€ุภสใื“๑4›๙ฅ๙ลๅ๙8=P๔Vฮ.6๓mކรรaื_น๚ัวพทไ“๗<{ฒy8žฟrํ๏่:`ศใแp4นด๓‹หe๗€Rข Imฏw๎ย๚๐h๏๓ธหŸล…;๓?๛็qง=ณพผขฝC3!$H(ˆe pZ™%sฆฉ๔V/ฌอ ๗†ำแฬ]ฏ฿„๚k๓=‚<Ž''G'‹รรฑ.EปิŸ๎์Oบ0RD‰่5ฅiืšมล๗ฟม‡็zวO=พ๗๔๕ดท๐Ÿหปs P„OGใแ๎qง•ฒxย™๋“3ฃP‰๋ค€A@†ฒT‡ญภ%-ฮNเP„ี$ซๅ@ฅišึV ฮ,ฏœ™o1žอฦณ:ญ™ถQšVPำ6ฅ„ิฃำใฃแะํ 9ณ~i.žNUMi›ฆ) ฉ?ฟฤl๋/ๆO๓—฿แฟ๚w๛๒แา•ปทฯ๔๗sฏ I`แ"ตฒ!3า:INชP€ย’ "yrxฐปปvi๎สญปฝsํ Œ^l๎l๏ฒทชฺํ๎๎อ๊`ะkgฃแมVIฅ)ฝฅ?็าJ…$!I๊๕ซQฦงG฿|๚‹O2\ผ๑๙/๘wฯ-.ฬฏฯฯพ๐h<>8>ฉฑค3๋็ๆ6wGt5ssƒณg๚ฮูdo`Zง= Eฤ 7w้rsด๛์g๛ทฑฦ‹เป+๓WฮตฝS;I€qŠล*R'IXถPึภ0qmฒดัH˜ฌฎ(–ปD5ิ8p$วย ูศ8•GเฐeหXชŠ"ใš5ISฃŸ`ชJJ.FgI;K ยแฬbG%zQ W;q'B8JTษ5้*‰KิP:ษช,RศRศำeศฎี„“F2‘ ddƒ์PบะeF *ฮ”-PE–C)[จ:2Mฆ‹-)3]ํ,๊la)ƒดK'' "#z5ณฦDKณp”คโB’ฑ*R eึจจส \ฺpฅถฒU)ˆ‰jQ6,I( 8Jค”3{fยn•ต-]b5"E–N™ฐฐ”MเHlE HหีVFภ]ฉ–Dqื5ลQdA # c‡:งำ=zŠ&ษ$อLX`H‹ช(R„ฐแ$g-Kฦแvุฒ-2Q"3]ำ๕RB‰ชยหAถ‰;ูช"ฒDuTิJE4 ปFV[’#ยQชํฮช้PWcg’edHQไ2ฒศฆ€„ำdI`ู8‚BXธE:ศ”์p%2—๎ฐ3ฐฬd!ƒ8ายJฅ%Yv:ซคH\TรN”„iย$ฒขHM15MฆBชฒรยู‰Tˆฐ .АŒฉำ่ v%P„ู p(k45ภšŽฎEJ!…Š\" ŒฑาLจRj^ะ ‡ซา&„าุRาขต2)QV&*(„ญ.ช…IQFุMdล!ฎ8lฉ3ีY(ญหี™t"™ุRF”ˆโ€b't$ไ@ห„Qgภ–"Œำ™i#Šห\bTC.P„j3UตาุŠTฉูZต‘ €ซณFโpDA%%ืJWA5‚ฮ™*FHศR#7‚ฆเถ-ก›ฮE`c'N …๙xห_?^Ÿ๋ญ\นx๓ํ?[Y๔t๋้hœั[Xนx๙าอ +็็5ฝ๘ีO>;๎อ&ขi๓tt๚เัซ๑›ซo^ ด๙|gx;ขอูt๛ณ/๗ฟ๓ฝีต7ฟแธ7๋mฝ–าฮฏ\พ๚ม%o>|๑loT€mcšœN^~บ๙_Xฟ}ฝื4ใอฏvžoฉwษ6ณqท๛๒7›žฝด|๓๗tOj–ฏบyซฟ๓อใํใูl2ํ๗ฏŸฟ๎ถ=}ธ}0;ฎฝษdผนyจปWzsฉื็ูึ๋ฃแศอŠ V,ปQFมR*ร้™Td“ac6ุถmc0 šาดM”า๖ๆ—V.]ผr๗หทVšขัฃ฿๖ๆั^3ŸงวŸnŽ๎\ป๒๎{ืFร'Gฏ†š[[ฝv็๒›sUฦรg๛ร๑ i&วGฯถง฿ฟปz๛[‹ฝุ฿ุŽฆA๗๖Ž๚g/พ๙ฦฌ‰๑ห—ฏวi๔6ถ `+ฆฟฺน>๙โๅหืN๏๎?๘l'ซ๛๋o|๓์J<}b/็๎,ำ‡?๛๙ษๆ้A,ฎZZน› 8ู988Ž3K+Wn_~๙dฃš…;—ฎญ.žG€‰hWฎ\~s<๗rgi๗h4“_์ฯnฏดใฃัdาeK 2#‘l ƒ…MฆLHภฅ?8w็ฯp๋<ว›ฯž฿ฑปqRsn๕ส›ฏชŸ‡7๖NŽ๖_๗^?|~๚›‹Wฟ๗ฃ?}ลNwฺ5sgฮ^ฝผzฑ?|t๓ล๊…+ฝE6>๛ลO๚ซož—…าฅ;wkตฐmcภถม–๊แ๑ัฃg[ใ›—็.ฝ๛ํk8zvผ5royํา…3—๒๕'๗๖ภ`0Œม€1#PUJn6ี‘ิโJฆpฉ!@m4m ัึnผ๓GลŸž๗Œำฏ๏?lŽ๓๚๒๒ฅ๋7Wx5ข,พ๙ทฎ_XG่pใ๕๖ึฮI๛ฦโ๙๗้๗W๑““i.xญw๎\> 4๙ฝทืG/_<~น:9๊ห็๕ร๙z:Mณ3ฦi:ชศT:.Eุ’ @ถฉm๓๐่๘›ง{zํb{ๅฺ%IG_oพ>ู๖ห|™Mง‡Ož]zw๙ๆป฿9ตOพุOิ๋/ฎ~๓ส๚ษำ?ผ<~>0`lƒ 4ƒ••ตsืืถŸฯฝN†ำฝำ้ัฌ.อฦใแiๆY@ใฃญ—ฯ?8{c฿x}๏pใิฑธv๕๊ๅ๗ฯซvงพ~|<›ฏฦ"ฺลหoฌje~se๏xšไูaฮ™t3œญJฑ ฐ#kC'˜r"r็Ž"ŠำVBฅ8คฦุ),[Yม‡ฑ‡E–่ŒkvnีZฎ2ˆLehf‹ถ2B!ƒฑฉŠปŒTม Žิฌm’šY*ีูuYคข†0i่J‹”ฒqgิLŒdŒ… ถ29$œ8ƒ,JTญ0`Šœb6ถมQƒ;;WˆP„ๅtสŽHaSPฒX‰dp: ฦ)T„1–ป! bK)หีF –S’6"$2XFต aฃjAY‹,ู(ญ4ฒ",h2Bส'™d”‚Aสีค…‹L'E8Œฒ์Tvrc!)lฐมng€pล•,iJATJุฎ ี‘€ฐ$นฉYซ”ไ€โ่YuธKEˆPฉช&p’iนฃTูขfุJP*BุถHŠ6ฃ#š \ฅ.dwตZธ%MญีจQDศ–j็"K)‚ 3VJ€lษVฆภ–‹jj†!"#dห .ิˆ๊hาถ hไ@เYษฮv’›gฎrJถm+CiB”Tฦคิูชvฉุ‚ฤ‘$Hช-[ลB)c“™–E42คฒJ@ig$ฅิiื@ ืฐงฒม’,ป ‚$u*ูYฮ(.RM่)าvPขฦTขQฐฉ–ฑฒ*mEุฒ"23Tl FVาh&H\m0@ƒœI€‘6ีช4„šฦเ”%"Mfญ!ฦ TCฉ2ณ+ …ŠEส8ศฮธF$ุย‡•5”แภˆ\mชล&SI!ฃกญvDJฒfฺnไb”ฮฺั@Dƒ! ำ,Q“€*Œ]ฒdhSŠHู‘rSll…bnnnผ๙ปฟ๛ๅด›พOnฏ.ฌ]บปธ~ซฆJi›ถm=9x๖เำŸื<ž.]hšVŠˆR&“๑ๆๆืงทŸด1xน๙๚`ิ๕–JSบi๗ืื—๘GWึ๏|๐ั…ฃัิ.ƒล… ฝํGฏ  •B0โหW?\]?;ืฤษๆ๎ฦpฎ,@Pย๒๑'?๕ฟ๘เอ•›฿h๖{“ั,ส`niพืซ‹O&ณIํF“ใงฏŽธฐk5|พ7™Ž&ณำ็๕๊อAซ๚bso|<*j ™ิ” „dGล2AJ‰lPำ_บ๕'ๆ|I E)mำ๖{ณฟ๋๖๛ญญบุ๋ฆใใ‡_ํ๕ณ๑ฦ™›๑ู;ฃัLัŸ_Z์ir๐๔“ฟ>งัkฺ2wถ๗N฿Z\hํ?:e์A๑๐ไ๘๑A\:ดŒŸm์Žฆ•RjJใ—_}ziypๅ+o}ดrแtชมโ๒ย๒\|๓ลืž<<^^<8ๆฺซ่G+๛ณ้สน๕๓ืฯ๗`„"_>|yแ๒…ีwืฎผ๓วพ~0œถKgz๓sMฆ Ž^,฿ม๗๏^iปัแแ้ฬฅ๕๕fG๐๕๖dnm†ฤ‰€tษฌ(%ฌHlภvfึš3Gฬน๒ฦนk,Aฅํต}uปฟ๛ปฯ_๎ึ๐๕๖?๛โโท฿_น๙Gถ๖้dšMo0˜D=}ูธฟ{t0ห ‹๋ท>ฃ??{g/—ฯžฟuนฟะ ภ` "tzธ๛่›ฟปy๎วืVo|็ใ•ฃำZšม`ฉือ๖ฝ๑p๛0€ก*&ฒ-"Œ0`ƒ6•นป๚฿_?žKQzƒ๙ลฅEอŽ7๐‡_l Ž>|โแฦv๙mใ๑โๅkWฮ]X›๏1๐ึฝ๛_๙ไร?บp๋ฃ๕๘?t๛คY;wvemu!;(.W้ฟ๚Ÿงทื–G[›[๛นดzํฮอ๐๐๓๘ีึฤ2ฮ’ฃ‘%H!„ Jฃ“ใอฃ‹oฯIp๐ษๆษ๐ธ”(ลต>_^<๗รk—ฏผ๓ƒี›๏Žf“,๓๓ssƒV๛ำƒร๑๎ุ€ภŽฅต‹o฿ฝ๓พ>O›ๅหWฯึํg[_๓@R/๖6žไำs๖ใ๕ตปWF'Sิ,-ฮ-L๗โ็fฃ^‘`ำ๖็ฎฝ๕ง฿=ฟ”oŽ[Zนธฮlผ๙ๅฝญแ๔ฌฃHH€ม„lHJa…(A„ฑƒ œV&B"›$SยคC!ห`I K(ฑม`ฉฒq‡ Bvุi# % !ฐ3 lห6ถ…ถSDจXi ศ0ชฦค‘ ,[ฆช”tุ€ $ŒŒ“”’BI$V&ถก–ไ0Kำ™ุH– v8%›ฐ”&,ƒ%…%'AโƒaGb› CBถห&”-90J„]"Rฒvเ‚%;ภa–SXTษ„ย&USUศ)c€P$&„(6ค@ฒm ษ"HR‚P#@ถŒฐp%Rr˜,สดย)าฦD œvb [ฒซ%!A‰Š 6€ฐŒ,กD. ‘I( 6$ d” ;ำ‘ฃaXฦ&mHุี–ˆฐA,ค4ฦ ไฬpฆAฅDBB!Q@`ฅ+FBDZ)Eํ”Nศ@ ฐKก"์t: F`c‡ฐlD Yุ’FD`aPMิDุ ฅ ค @lษˆคฐ"า,”V&J!pส–daPbDZ‰R1ฒ3Ui: :e)„%%(…ย$์,าศ*- $N!ฐq 5ชย p#ษv‚‹;S`P ู&!qฐฦถŠP`่PbƒX€…ไPmŒศถา`Œ\Jcฦ’D@`ฐ‘m0€ ํmุ@ €จศุะุ T%Na$Jš0คฐ„ €คฆํ- f‡ฏ๎๔ง[\yฮตkVW๚ชxxฐ๒ีณ‡>zฐurา[๔!ˆPณnฒ๛ว'๏ผณะL6^l๏ž*ฺFR[ิ๏ถ๖“Ÿ๏ฟs๛ร7.^]]]๊ttz๐w๗ฟผlo{Tดœ`ุกRฝ่ษห๏ฎ- ฺn{c{g๗จ Š0ŠˆVyบ}๏ฏๆ๔๏ผ๑๖๕๓+ k๎f“ำ—฿|v๏ƒื“ฃฺi:™์l๎ฒผ์ัฦซํฃqGำo๋xvฒ๗`ปผT๊๎๓ืงiถ!ุN9@a‡,*คŒc€€T๚Kk}ศn6n=๑์ฝ{OถดXๆๆKB7<ไ'?ฝ{๗ƒ;ฏญฌœi˜MN^|๓๙ฝ?|๖๕ฮas6ฺถH1ž๎ํฟ˜\ปvOŸ์N=-ฝฦpt๔xใ่ฃณ อdcใ๕l๘์๋฿}๚‡“ำมB๔มศHฒTMถLศ$aษX6ุฮ้p้๗|๔๖ทnฝy๑๒๊า™นศ้๐`๛๙๛ฟ๛ํWOทบy๗ๆ๚9ู}อ_—ใํฟ๕ีี•ตฅpžผ~์แร‡_ฟ8ชำูฯ{S฿ฝ~๛์ีปgึŽถททฯ็๘ImฬFฏoฟ๎ปwฟ}๋โ๚๊ย™œw7๎?ผw›g'ฅ[ฑ1ุ` €ฑฑม€Mทฮโ*a…BBถœ…” *1ทv๙๚ิ้๐t๋ษง๗>ไ?‡วร<๘๕/ฆีเG฿y๏สํฮํ๎m|๓ณOฟพแ›๏\๎€@E๔ฟหVๅว}ๆปƒลืฯฟีฏ~ณt็ึwฟ-Œkฮ^แณ/ฎ>๘ํKwฟ^\gฃํ๗—?ซŸนy2ฆ„ํศLe2Xแƒc€ คVใ๑้ฮ๏Ÿ฿ึ€}๖l็เ4ีดŠhศA›ฏฟ๚ไ?ทพ๛๖ญo]Z;ปึศำ้ษแฦoพ๚ม“}ฦ€mม๋ญฅ,๋าอ๕ณ7W 9์?๙฿๙่ีv]’ฺFh็มง?วท?z๋ฦตี•‹หส๎๔h๛›๛฿|๕ล—ทYR4’`w“ั๎ำGŸ-t๏^___ฟx>ศ้้แำ/>o?‰้\(ภภย–-LI:…ม8DศnlHHEฺNŠ1B@ข0 „…ฑe;mฃ+@Yช)02%RP!3$YX","’ฅtšL Eฦ)ภŠช,ถ%+$ +!lง‘…H9HกBHฐ%ย)')ไH@ ยฒญ*ฒp‚ภ"- 0gJ•b%ุ€lY€ ูย’ชH nชป€™ถญD– …์ภF2"ํจaฅeŒqคญ4"หdš ฒŒl‚”0N#ภ Œlƒญj…J–Tๅ*.i Bvฦถmd$dษ#$l[ุBuŠ e [id9 ™#ษ’AJ,`aYฒ3M@…• ใ"l!R&%K€dก@H†ดข CM„D6‰"Iac‘ Aศ8eXฒฌX’ฑd0ย B‰รฦ€edl9‰ r`! ห์„t„(Jc @‰…ศถp@N ”จ`Iฒ]$ccp•###dv([eชย8* ุ ฐqึฦ8HR8%d!;ddหุฎค•ฆ:Š –‚ฮจ1ถฌŒDยiห)ู$##I–-lIF“``ƒqJITSษ ŒB ศุNM@ฑ’ฐ9‘ภ`ศp‚2HBย2HVIgb Pš’ช&œŠPส€@ฦvš€DŠZƒY9l ะ๖ึพqF )l+-ฅ'“๑ๆ๋ืŸoพ=jื†ง'ฃ™š๖์๒๒ ดัุไl:'รfyq๙Lำ4ุฎ]7žๅดทึ๒ฌวม๎่$็<ท Efฮ&ใฝฃษยนฅr?ฺ€:M†G‡‡ง,ธไ`ianpต=-“ง'‹๎๕2ณ๋๊ม —.อญ6.ง›Gฃ๎0V^่fคYผ0๏ต8ฦฏGฎiวรแัุหKsk‹ฝลV=ช๋t2>ู?N๛gข?Šqง™››็หผ'ฃgอBVฆํ๒ามดln—q,Dฏ/ ธ}y๕ฮ•ซkgW›~A .’'ส.๊ฆณƒฯž=zu6ุvๆฐ–~ฏนธ่าu` าูอf“ัษษแษค=3ทด๖๚’2๋t2=:–๙ล๓หฝ3า‹tNFวว‡G{ua~๙Lำ๖€๑ธฆส๊นม9&พุœ-wอ|โq ฺม3ต๕ัๆpุ.eฏ๏dF0Xxc0้Oท7N{C Lฬ(ั›ฟ9?jปWฑ฿-ฮ-ฮ.z0Žvดšฮ“ใQfiqpnก/ udxrธ0‰ฺ[,m:ห‰ฺซซ3=5ิูไdwr8ํ_ธพrก|~4บ9wŽr๖ฬนน่1NNท7† ึึฯ๚๘ไ๐่ีlก‹vmกwfP๚ๅt:<:>:ซ‹sKgJJ`mq๎อ+็ฏ]ฝ:ฟ8๘ ‚“-[ื๓Lจ๓yฟตขี)$ูฒ-)ะ E›[€‘ ธ@`$— :9า28IK–Uœsv{ว๚ฟ‡9ำmฏnีฬณฟ|~๏๕Ÿๅ๚๐@ใ8ž฿๚๒๐๚๎ร๕ซซำ๕ิ~y๙๒๘๙งฟๅๅๆ|๖t}ฝŸž??oพ๚ูบ=อ8๖ๅห๓็OŸ?{0๓ไ๚๛ืื฿ญู๋๛ๅ๙ำวๅ฿ๆ็ณŸฯ>ํ?~ฝ››ป_ผsืงง?๘ำผ>ึU’}ฯฯ_>~zพ}๓๊ฏฎฮYฝ/OŸ?้ำื—›wW฿kฏOŸž>=x:}นพ9v฿๏ปฟณฮฯž>>ฏ/ืwษpš๙‡Ÿฝ๛๕ฟ๛อ›ทฏฯkMฅลฌผ์4Mดy๙zyำ๘ำ๚งง'ฺ๎}z๘๙฿ฏ๖Ww—*€//O๚๓๕_ๅ๛”๓ีU’ฏวี7฿๚W7oon๒๒๔้Oฟ๛ง?<พ๛Ÿ๕ปWฟฏ๙ทฟ}ผน|๓฿อon?ๅŸ๙/?x๛wฟ๙๏๛_dฝฯื๚ำ>ผ<๕|๕๊฿ๆo๕ํ็ง—‡๋Ÿ๗wฟ๚ๅๅ้ฟ8๏๛_๒฿๕wฏ฿ฮ._฿๔o๑sฎบฮIภฟ๛๕ฏ็๐?๚oํีZ'“ฆ;[œ๖Ž>~๚ํทฟรฟ๘ @หหห๓…›ื๐๊หสำ‡๛แS๎.ืฏfวๅ๒้ร‡gW฿ผบๆ๎|{๊8๖หืววŸ~๚๘rผ~™W7wื?{›หใำ?x๚ฆsj๛ๅ๋ๅฒฝบ=ฟฝYืงฬzy๔วO—ำพ}\]ฝ}W็/7ฯ๚ห๓๚ฐo^.—ววง—น๙ซwทoฎืีช๕๋ำง?๘ดฎ฿~“Yนn~v๓t<๑'Oท๛Cฏ~7฿>_ž>๐——?๗๖tu๚ๆ๎๊zงฝ<y๐ำO>_}w}wฟึJภฯ/ท๕ื๗7ืmqdŒฬŽV๖Nv’tณ;™า๎•ฦ4ูซาัFv{‰&ณ2:]ฆMk๏ึ๎ฺ1ูtC&‰ะE’Df๏c๗hฮฦjำhบ9บฯ[d “ดUimiบt]ฎ้ค4ขใX]›ำM๖ฬiง;[บ:ำT๓าFWยt7v&ก\˜$ญา9_gGf%Kฤqูศ`ทํ$3KY_#ฌ$J๗ฦ˜:.็ไRtถlบคอ์5LZบAŽฬ%{m'3ขmใ๊า}ฒ'๖ุโ #:/MณฏHZ%้NvgjRMฒFEร6—Tštค=ีn'š\D]vc‰I้N6iตtตญถ’#3ณ“]ฺtgZฃษnl‘ฤ๎aŽEš”—๖Hfeb${ดY๖ฑซ{ถH'=จN2ข™švwNkท๛ฒ[ไ”งJ7Nwุ9-™์คUMkห&ณO—ฆ1kOEZขฃS1ปฝdŸZี$&๎tjบ.ึ‘6๖1Kคบฅษ ว๎ฑEึš9น์หั,ณ’ฤํIคt“5L&—™ƒS› „Fk๏}JjUi2—๎UณูIrฤ‘}š=“šฝw”์ไ zฺ9Mฒำvวดำพ\#ูั•iฌๆb=้คัMฅ๛”šฺf๛ด์(กฒๅ’ฝฆJ‹L9˜์šาmdDWz$htฮฺje'bๆ(;v&[๖–šฬ๎Ž=ฤ์ษ^iํ๖ˆฮZk8&eฺtwทอ๎4t๋–!‘=%ด*ฑึ๑rqูL&ำ={v’;ำึศZ้ไPฅอ–Mาฅ๓าฬฺ+;)TTโ8ฝy}—ำŸ•$คmZŽ•~๒‡?๑ท๕ฟ๛แส>Ž—ฏ_žžŸ>z~z:./eญuuu}sw{w}sปNง ฅ{๒แง>พp:ญื฿|{๊ี๙t%ั๎ฝฟ~๙๒๘้รใ็O_ฟ|i๗ฌuu}s๐๚๖aญ๕๘๙ร๛_พ|นฝx๗ํ๗ง๓93กh๗>žŸž~๘๓_พ|นพฝ{๖รฬเ8Ž๏๔๎~x๕๚๕oึ้ด๗๒๔้ใ‡็วฯ//_ี้tบพฝฝ๕ๆๆ๎๎ดNธผผ|๚๘แงL฿ผ๛ๆแ๕›ำ๙บ{y~๚้/z~zบพนy๛อw7w๗kญ$๘๗ฟ|๓๏ๆ๏พๆ้ผjcf๖ฅ{-3รๅๅ๋O?๐_๛๛๗m(๛๋—็๏๚๘ำ—ห‹V‚$kฮWW7www๗ฏฎฏoึ้”h๗>พ~๚๘๑ใใ็_ฟ~ู{ฏuบนนฝ{xu๗๐p:3ฃฝ\^>๚๘‡ฟ—หร๋7ฏ฿}suuMŸŸž>๔ใ็N็๓ป๏พฟฝ{8Nmฟ}บนป{๛๎๋;๑๕๙๙รO?|๚๘แ๚ๆ๖ํ7฿ฮฌO฿?~ธฮ็w฿|w{wor\.OŸ>>}๕๋—}3๋๊ๆๆแีรซ๓๙jfฺวๅหำำงž?—‹8ฮ7ww๗ฏnn๏๑๙ใ‡O฿๒็๓๙๚ๆvญำ็ฯึ:ฝz๓๖|พzzแงศ7฿}{jNก์ใ๘๚ๅ๙ำ‡๗OŸ^^.แ|uu{๚tu๕๔๙ำ๛Ÿ~ุว~๕๚๕ซ7๏N็๓๛๑ำวŸ~๘๓๛แ๕›Woฏฎ&เดๆ๊ํฏ๎Woพ=อ)›คฆGง“™ิผ|}yง๑๛฿ใำ๓3ฺ~๒ๅห๓ใๅๅE Lfึ้|>_]ฯW™ม>Ž—ฏ_พ~๙rฌตึ้ผ๗วฑN๋๚ๆvญำหหื็ววถืืื็๋›ถ_žŸ^พ~Yง๓อiฺ^^^พ~yพ\^ฐึ้t>ตฝ\.‘›ปu>—หหื/——ฏ{o5kฮWWืWงำ9‰ๆื๗ฟ‡๑7ฟ๚อํdกำ=ล:’ฤ๎ใใใ๏๘o๔ป?แงฯส>Ž/ฯO๏แำว๗k~๓ํรซ7งซซ$ะ^.—งวOŸ?~|~zผ\^"็ซซป๛๛‡WW77วq๙๔ง๏gึป๏พฟฝ{˜ต่๑ryz้ใ‡/Oว>&su}}๗๐๊๎ี:^^พ๘็?}y~ผฝ{x๖อํ.//Ÿ?}๘แ๋—็c3๋๚๚ๆแีรร้๊ชํ๓ใใ‡Ÿ~๘๚|sw๎๏ฮWืmŸ?๘๑‡ฏ_žo๎๎^ฟ&3?<~๔๒๒ตปkญ๋๛Wo๎๎๏ื:I฿ฟฝ๗ฟล฿ี/๎nฏทcwษ i้วže'Mข้N#“ึ>„‘Edฺแฅถ˜Hฦ^๒ฒgข้ถีtQvvƒ Iัƒ$1ํ>ช9uwอNจnฺcฺ#aeฒฒง[g'•-et๖4ูv aeŸฒŽ—ถDง5ฆำ6fgšD/๛H;ฉYvาfBชซIZ=ดอtึa๏V3Iุอ0ป:fูฃ{ๆซ`5ฉฌถ=JลjN‡—ซc๖คjo“ูป;cญศุบ7์dทซˆฉTLื๎^b'MšhาHญCฺ}"นด{›ฮจูšู#ณอ •ChvSQ9h3mb2“vบ4ต"ฑว‘d๏๎9‘ตทดk&ูฅMwำZูY์tงI&ุGตI–„ฉP^ถฬahณอš=vทvšD#[w*!‘ลDำ]3ซป=vีิาห้คีMmmwฮ“YูckjvZณ ณAฒ“ฆ์t6IWบrน/ษาด‰DŽต;อฮ์Iุปป{์LX9šI’ญ[ษjdบ›1+ฦemฦLRาZ$mทฦLŒฃษลฌ!Dลฎฃ]ฆYปvฬžฺฎNขvำต’‰NMหึ =U&m;๚rJŽd'จฤุฑ.ึึีฦๆจ4ำ$ว^4้คiำ}ฺ1Šฒร4GSัdณ+Mบ“™IMwฃR้ช5๖ไญM็]วAM$คmบi3:Cุู$“}ฉย’%ฺ uิNdu๖นmณ`บS#†rd ‰œc๋&’LcทRƒษžhuซ๎jฌ‰ๅH1;)[{ŽI3MซZฆL๖๙ํ๋ำ]œŽๆBถdvœF25mfญซ›ำี๕ร๋ฝw!333ณ’$ 2su}๓๎ปŸฝ~๗]’uZ3KIfฎooฮWWo฿uo™ฬฌY ทฏฎo๏บ๗ฌ™uส $™uswณฟ›๎ฮฬZ+3ฬZฏผป{xMgญตN˜ตn๎๎ฏฎo๖>ฺB233kf’เt>ฟ~๛๎๎แึiอฌ$Y๋ๆ๎๎๛ซฟู{gฒึif$ฺ$‘F“ž4]i8ƒ2sพพy๛ํwฏ฿พk €$’™™Y+I@’YWื7ง๓ีรทปDffffญ$ฌำ๙แ๕›ป‡ถkอZงฬเ๚๖๎›ซ๋ท฿~Ÿ8ฯษH’\~๛/๖ท{fึZ™มีอํ7฿อ7฿gฒึ oฏฎ฿ผ๛–œNงฬเtพบuบฝป฿m1™™™ตf็™7งo^ฟm‹L&ƒ7{gฒึ)ษฌYงทwฏ๖.2™™ถ[ฒึ)ษร๙ํํร๋๎MษLfึฌ•lvvำh่”ฺฒKศฺl-U€dึบนฝปบบ~๕ๆ›v—™5kึJ‚$+็‡u}{{์ญE’ฬฬฌ™มร›ทwฏ๖H’™๐๐ๆmbึJๆtu}๗๐ZœึiึJ‚0k]฿ฏฎ฿์o"™Y3ณp๊๕อ=fอZ‹ฬZwฏ^]฿ัYkfกP๖8r่ึvทš8บ›=FO‰I& ะ€IฎฎฎฮงS[$ษฬ `ญ5ื7็ซ๋v#™$ฺ*™™$WWืงำY›$3X๗๗๛๖6™™I‚™9ฯํF2Ih 3“dฮ็ำ้ิถ%!3I’@ Tฃรข;ขฆZ=d=ฃ˜ตฎo๏พ=_ฝ๎๛ศ:ญ5+ Yงำร๋›๛ฝw5HึฌY“dึ้อ7็‡7๏’œNงฬ$!๋|พ{๕๚ๆ๎~๏]ฬdfe&ษฌ๕/j๏ฮฬ:ญ™…๓ฬซ๓้๎ีซ๎R2“™5k!q{wu}ณปgๆt:‘$7w๗Wืืปฬฌ•ไtพzx๛ฎ{#IfึฌฬHPขฑวึ–ๆฃ4ปณcMาีฝ+ว$‡HWŽT๖‰RํˆDฒE’/™eณUdŽภLMํ:า#™ูvฉตH๗ไาฎ:ีhR;mาtขiวฦ)ำŒdบณw๗dfKํv:m(•ตฯูกdหฑ„TทVFฏt;I2cหฎ$ข@’,Qูฤช•ฉT;ฺไศ์ัh“ฅ ฃ้ql•d†ตIด“†่XšF—N6]ฎ#œ2qฐj’ฌœzฺตฉม0ํ‘ถmำM๗ฑงU!Mšคฆ4GปK;![ฐลži๕ุู“c๏™Zป1V๖I$IฒmญŽ5kV[ต“[ฏ4Gตา‰™้ž–‰ˆ„้rไX‘Hต'งฦๅจฝs5ี#mb ้œt๋นDF&ปต5z4ขcOŽ9N;aTตณ+4ฺด“Žฬd›คณww’ู™nป’ูณTาK“˜’lูฅำtณำ-aูหnv“dาฆQีd ƒV0 leWšiด‰m๖0บ51&อะhดu”™a‘h‹ษlMW*M'M—cE๕ผํํภ$“๊ห6$’$ง#ญ่ +ญ๊@ฌF[mZ•$;+•สฮฎฝ›9v4[ฺธ$=vvt๖.•le';=#Jkwว,M’hป“TงNปyaํ4ฤš่์์ษฤฤ–=ููาsIข-Iฆ๖%“ใHkฑ์8ศ4ู!a8๊=ึไาถฺ)j์ุํ๊>๏์้jดูาัค{Rตฒš‘ฮnฺ์•uชฝตฉ4i.1mzฒฏFcซฮNš4Kฃ๖NwIึZ3’™•๓:AHBfeึ$มZkf€$€$Iฮ็+ €$ณึฌ$ฐNงๅHdf’ซต€$@ฒr ุณ“ฮ2+Žv;ึiอื๎vGืฅณcf&9;’’$Iฒึ$dญำฬ’3“ฤ้„$f&็3H’œื ’ดN@`ญ53€$@’Ykึ,dญ™YIฺฬ,@ ถG Iขี.NT๗J32 $!ณ2 €$ษJfH‚™™ภIdญ$$I’ๅIะdึB3kฎ’ปc า•ฮœ.ึrŒfjvวfฦL[’2ณhIดM‚$3ƒ$@ฒ’I ษZซ$ะ6 ’$aฺI์ฆ=กฃIXk;ำฬฌ™ษ039ŸOฮ@H233H‚D’u:I€$kญ™€$@’œฏ€$@’™5ณI3ง 033W@$Yk’Qฅ'=วŽฆ[6]หฬฑ“๊!Y‹ฐณk'ร๊Nฺ0ปฑฟš้,ยN/;&อฎ้F—$ฑwถ1I๋p่DB[ซ๛ด3;ยด™’˜lาๆฒ3*v‡iาIiตY‘ฒ+!๛xYA ด+๛ผ{œ2ำm๏รูdๆ˜4lฺ&Iฦไจ์’$‰ีฎžŠ์fทยLฒชmƒFcว6Sะ iW›๔ˆfฆยŒrT9N™Œ–Ckwe6{๏t/*mi‡I๖ ณ:ญ๖่aMŽI#คM/ณณ›Ne';บuฯ์ฌ่jณ&SSŽฝป;รศ4:๚rุ‘H„ษ>eํY3Žู;ฝฤ)ฉูI9าmOซฃ3Q0ู+ฤN+•N*ปIใhw&34mv“ฬtี™65iาฝ]’ซI7›ฃmDโดI›Lš&;ฺตSญ#mทฎ4V"าv7ywŽมž6H˜4ถๆุฒ›ู3-;šˆF›i–้’ˆๆ%IBiWzjคว$ปf๏fMŽYMบู:I“๎-m›)ฆ]ฒอญ ๋$ฺ]Hv4M’.Eˆ์cาดd&ปk‚}ุํXาSrิถ3าธดmงดEฃูk-+า6ญLf.kŸถ่์Kอัt:sIw){Vอnwf๋nwkหbvb์ฝ{๖œ’K’ Y/ฆŽีญ—้I’ดูตง;G[ “HZ๛ า์mทณkิn์ถ’‘6›&‘้HD›˜๎nGsฺŒหถw+!BN5่J์ค{v‘KnMLฺdMถVvWปŽไ˜9ตั‰dงi]vbgœจCZMŠI[25Mw3ำK๔ค™#dGา–ฝs‘Kฒ€$ ’ $ €$ ุฒ›ถฺjiฺฃIด;ูPI’$ €$I’ Rูํ๎‘œค;ู“ฎN2ต๋hŽ(’@€$@$IHู์D2™ต์u$ญ]ถำ@’’HH@@@ภธ่%วH2จอžI/cG31@$@€$IH€$$$Pm›6ป๛ดvL#iรษู% ซ†#ถ]‰B';YปSบ ้จ]้0‰บ4M 4’Šฃ6eh4&{zIgฆ;UํVsค'HกŒq์d›ด[›N:วnu&ะ[ฺู:+ษlูvutYจส–Ng๏ีLฦJชGฺ1กบKŒุLGณปตาh “จQ!Z-ํtw˜foBฆMบyูiฒšA[m์ฎ้ดข(ั}ด‘„ูm 2“hณ'ศฆ“=ifwgณปตถ”dCš8™รtถJFงmw™ษ1#กี’ Iอฎ้–˜™ฮฌ๎์ฬฎjvณcฯCwขs์YฒVฒญVึŽdK2วNGdjh4้fVš่ึŽฺปขฒ›v]“!qศ.!ThGm์ช„ฤKLvบ3ีฬ๔RmตZ's$v‘Iฅ[บU;ยฺปีฤ${ฉ์ฆ3ว)k&G4zŠXบwทidŽฅce1ูว˜์*๋Hำดiีฮ*›F&i“&ัถสh์ฺ์vL๗ะ8ุ๊‘œwd7ษnt4ำV ต[‘Hp์Dล$EeฏSD›4#บึพ่กปtํDฺT›ˆaํดถ™IT ‰{&Uปฅ ‚I’ไ` sN๛ะญฝๅ๏ศ.ษ’$mๆ๒พ˜ๅ†\อ*พ}[–๎Bšญ฿๎/ง฿"ห[4๒ˆ[nID:ฑl9ล"ึn์‚mYิ'7ฤK7฿ฒ-1๋ๆ"&ฎ๛ฯํ.’XlถืMโ‰t–‹vฯNฟลnฆซฝ๗ฅRpjณ๗อ๙“_๏‹ผ'฿wษต—นนํ6Yฌ๎ปJาK>wบฦf`ใb k๓qœษฒ์[n6c6ุบuำKV่วjนNฎ.w˜1Hˆซ฿–ฆ˜m้Mฦl˜ษ้/๗ฏ๛O฿ฺํธู’ฅถ›๚b๑่'Žำ$ฬฦ–,ูj๛[bA‚ซoU‚%‹tษ{หูz{YปถdI:วf7ฃ6]HNบ5๛้Q)›ๅ๚r—-ัe'ว_$ฦgฺปฬฒSxŒlฝอพL๚ปๆ/„m;๋\/ษARู๖]ฺHๆแฮfหัˆฅซc฿',wฬบฮ ณ5๗-išหŽXr‘.ฬไHฏYฎ–“nv™,_. ข%GvษRฏ™}แฅบšฑl>ษ<ษnูz^_\‚Z&๋ไz๋๗&ซผห]>i๒NFะณe๋-ฟ%ห7“]\บ.Ÿlฑ/พ๊y—nบmทN#๖ลw•ผ-;br!ษธๅ’ฤ6–ส้EฆL–ปบืMทn‰Hฟฮู6„‘\2ัหnษO3‚ศ—๑นv’๛’4cฑ8๐โ‹ฯธIูฑู$2๋–m๖{9ถ-๋_rนlศ'N^ฤlw“ฆูถลf_ย-Ky‹ณ›[,฿’›ŒัY>˜จู>}‘๗ห „Œษไ๋ล’๘rkถฟog6g"G“,=qyนพตฏ๎ว๗Iำ "n3ฉ<ณ]ๆ5D.บe‹X/ึcนTฏ๖“Dฅf2r ษำทไ6n\บ˜๕[6ฯ9๏าOบน›ฅcทI–Mˆล ึๅKRƒ%"o๑M‰œ้V{น๏ฒ‰ˆu็sนI€,ศบ๛p’H’ณหฯšU,[š—eํฑ –Žํ๖ำw๎ฒฤW_Hโ^:[6ท$Y€ไZ’ูใูdำ๏๏/ฝGะ‹{•ื5๎๎พ{IXŒ{๖ษ™†๒ไ?%ท๔ผE_4ษ–lภbYชzžŒfฑฤrฟK๗2“K0ฝ$บ.๊๋œ`=._ค‰ไฬึHค6? GวvqMždŸXu^+ึ}[ฝ#•-SฝE4“\,ปห/9V"฿-.ษ๗ํiทฐไ–cV$JšฯK{฿ฎ๑ศn๚น›'.ูอgBzMหฅIุ’ถ‹-_ฮn„p%฿์็๏O–~ ์ฺ[:๛,’)/฿ป“Lย’{ 9๛|‘xฉf๎4ํๅภe๒%ณลี*YwO๕๏[๒ษaน๏’ด๏—ฮฤ2Mt๙–[*"ฑญ[‡’tํ5๛}฿ฎฑ์todŸ›ผ‰ไDา_vัศr๋!ก™rปฑืsœ.*๓x;I๎ูMX&หๆ๒',คy5~ณลาmrอ^›๏ุ๊ๆพฝผื๋>็{I,dBาoบ8๙ฉุ&^ๅ้ทc ๓ูตู{‘ๆfึฯxXๆIn๚๕ษe—x์wœฃ๑ฤYถธณ~'หk’%“ฦษd;Nง฿m3ลผ่“_#aน+‹฿พD—lษ5{ื/!‰%‰7;ใO๔อํ;I’๔หd1Kฮ–’uษj-ฟ๗|หฅKฤ]๒คณ…†๒ษ5%ฒU๎^ณF๎vw]"หึ฿D>f@I"วูบป7$ฺmถตูง'b๋๒โUPืไ;{๓-ใ’Hฏู]&ั฿b–‰%‡]]“’๛–\์2๏ฅ๛m_+‘‘. %ฒ นl฿พeอm!‘jไท๏ถ]"5/ฝgษ_bฏหวๅ’ู์ำทX <–œ.‘ฅ–อHŸปป2„อ๗ญIษb์ฮ:น/;ปฺฒซพmf๛ผh:ฝFE:B^ำVํfq™Œ•tฒู€ฑ‹Kn}ป—s๑หd6ตย&–f’ฝn’/ปนห*—ฌผ%r้g์ƒฦ„Dn7'ไTาฆ^ึลœฝภBทlRอf๋]H:ณๆ’ฅษ2฿WuY‚Dš›v4R/ูz.โl7iB็d/z33ูห็^ [๎rkณYgqบ%s™hEย'^šqป;‹˜ำZ-ˆๅo–LIา-ร่w๋ๅฑด[๎vc>๔(‘ผ๋ภeหMถsนืฯว*ระ|ˆH&ป๎ŠDFอnsI4๚Eข๋v|+ฬถต—DJณZ—Kf7'‘w[มถิึI—ฤeŸ,3YNๅ{[ณŒนY~฿าภ$š%ทป]sษษ%}้ญ^๔ปป6–ญqก;–tฉุ\"—&Xทf z๚—#wน%oห$‘7๗}_žj;๛f™์—ผ$้f–๊ษ9ษฝmืๆโ.ง‰Fทฟ9›LูeIBl๚1šDlปŠdหynoย๚\๘’%ฒ„ฮ›E]ถ๛[Ÿ'๚๖ป฿ภเุปฌSิื๋uฎ–ฑa3w'็ฒ^|ŸU"ฑA–lษ2lฦพOO๋—$‰๚.n๙’˜qฯoฉ=๓ลzaปฬš-†ๅeฺI6–}‰}฿z ึๅ]ขƒน#„DฌM6sหdršฎiฏ]&฿๏ทmH$†i’ทฦ"๑ฌK–˜.}qgWก'K^3แOrฐ๎zอ"–f_&" t2Kถ๐uษ’ูํฮภ2%›Ÿ4fห}ลlซt‰‰ู†%–ๅKTชkVไ แ–ญฃ1GK&J€LOพฅqื๛zบ.–$ูœh]ถ/ฐณU6‰ลwำ{ึi,™r๎“ศฃn’fŠูฒุd ‰ถึปํ4ถK#„›d ว”-น•Yrำi๓๔YขำcQ2‹pCd5“ฮถ!หาฉฬๆ.dฑRบ3Yทฬไ$d*Mท0คตmุ(ษฦ%"‘ษ\ฒลถžฤ— dอ์Eคeน์ํzท!ห.฿ิ"น`้๖็ฺž,›m6ษษmฮยู์ฅdI2ูlv}!ณถ-–šๅK<{V~e’ฟF7saษ1ฟ~ํ*$ห27K'66ร˜}“้;Sั bgง%๖ะˆญ N&#K[™~}ณ*‘ุˆ„้bฑก“nๅ-แฦog$ุั฿ฅ%0[๏kอ0d2๒$[8w!bฑ’ฅ๓ษญปํm''ฃBt้Bฯfฑคสถร๙c“5]ูrฉ-ูb‹‹A,ๆ>๙K* ธw—ƒลุJYฒ=๗^,X68ถๅL˜ F—ฆณู%’X2šc,™$ง๗$ห%{S๛-๎’&้๔…ษ๋5อ˜ฤnคiwหN7;dถŒค๗Lฌึisษ[vRฉฝl‘„ฺ๘–1`!Iš-ว๗ฅ68"I5~“Hงห–`๋คใ›g]๘,t๋bYปdŒำH|OึฎI$‹evึ\|/ฤ'›ฃ€D’ๆูŒ ณ;็ตคR €^ๅYY’—mอ้“os฿]“-ค^๗ำw฿Ÿ์žๆพๅฯ฿4{ทl}‰ง๙.ท-š๎;ฝm฿พ~s๏ษำdnฒๆฟปsŒ#WE฿-ํฃถฟทห’}ฝyๆ฿nว9ง—nfT’๏F๗KฅKฟๅทฌ™ฌ‰š฿ณู„dษO๚Wknฯ$ีG๎e๏uฝuู๗ํ๛ๆา2ท\’๘๗ฌšŠy๓ฒฟ<9๙27ทK\_พ†%=$}"ณ‘๖}ฮ๛฿ษ*ฺ้ลž๏&ษwีณ้™๗’oy[t๛ถต/!–ี:š/[ซ7็bo฿ฮ—e๗ำrŸ]ฆR‡7๛๒ๅไ›นๅ฿ฒป/๑-*ฟณ”ฬE รlk“๗๏๓๔;‹๏e๒พ๙๓W3'™๚vIvส}—ศู_๐ญํ|‰๗CถW"ฯฮ%KยaIูUAั~ึ_ึๅฅyหี_๛’]–Dผn_๋^zว;—oณไ{ฟtYํ5'Ÿฝพ์x’œ็๛;•Hฮ๖ํ๛>q๓โ?K๖r๎ฮท๏ฒz™—บmป~nศ๑ฮพe๗ท๖&หฯอฎารฟปYำN'vฉุลูา“o๏sK_พ{ฉ“m‰ˆ,+พ_žฦ\’ุ็ๅ|๛ฺญอš<๘ฒฯ๐ฯฝdชฑัฎอื๗ถ/Y2ๆฟฝq“f"–Qo$ฝ+๚E%๑N2Hค66นๆ฿šท%฿N–๗ท|ฟษฒดlทื]oท5ษKb™ตํ‹\œฑดฑ_7๑.7ฯ]™Lƒั4„}ูm_m•ไ๖๗]›ปืz[’%K๛u_ฝ๔.yWtšฒ/}ฝTฎฏ7?ฎQหŠปฬูฏg๏Kz›`{็ผธๅ๚์\ฟ'ืuฤนŒdั้_.๖\n๗oฟK็๓๏ฯ `{Mnป๎โŽืๆถหค์or๗ฤ…ผ\&ู{KHๅทf๑ห=ฑI`ห๑K/u๏‹N6q’lทl็>o9็ฅํ:ณd?๗๏๓ี1ท™l๒ํl.}!g๓ฟ‘Kศ’%.5๓–}ษฎษฐ9k"ทe$O๛์K.น‹ฬๆืญšJ—Uโ…๊zAพํ๛5ูun•I"’œํnฟำHƒษ—wฒซร๏}|sนs6—ž๗|=ฺ4Cๆk˜$ุ’~ั๗~ ‹Š์—![ถ]–{_&^vหพ›ทmปeRห๕"ถิ๒ี"ํm๎wYXฒผฯํv๓ลว—ั'ฟ>๗9๙6‹ษทV“r—ทฟๅvๆพe‹ผ๋~๋bษeห๗7ํ—ไ5oo—ฟ|q๏]๎[๒7’ฬ๛คš’\ฮ๖mณํํnษeฌl<๙›ฏบ ™]-หญำฺ\๛๑Nฑไ.•}ฌป๙๕แ/=›ภj™wูmhnฒ&ฯ๛]ซlืuuทoy~'Ÿv IฟGzf็K’]ฑI฿ฟ%Y>JS\๎$7๙–w_”Hr็๋๘ฝxฉ๔ห๗eY"a%\’`ูš}w๙๚o 4๘ฒr–‘นK้w{zถทf3‘fฒปอb|๙/Mฟ qนซm๎๏ฎž๏L{lง??๙m_}ROฺุdNŒฟtพนไ—fพผฟ๙ีmๆย[I’ห๗้ฝ%w๒พปผห“ห“๎๕๛N.ผฺฬ&r๏l๕็V^]/ngl›^๒ฯŸZ9J๑š๘ซUบtป๔w–๘ใฟศ‰d"q๙ถฬ๋ถ^–&k5[ฒ\vŸพ4ทู—฿; ทอ2้ขฯ๏?Rbษ็๛_c~้“v+‡›mษ{wฟw/Vy€ตqน82›‰ฬ_s„Lผป฿oญ„ ธ]ถณผฒ:ๆz๖>:Bศvฅซ“ดS•ส_O~vฅmง1`0’ฮปfุUษ4ฎ3ฅ[[ศ9Gsๆ„๎•9@k๗žsDต้g(tTฒsrn๗‡็ง็OŸ>๏.nทใใcๆL›^้ฬdฑŠาIo2ด›ด•dnอi'Bทป:ดฑ‰ฒIM€ ๋41ืtำmว&) ™šX@ฒำ๎%=ขig‘NTš0ZiฮฺฺQ"Œ์๖4&ดํvป3;‰tv†‘jSญู่Fงืutข้ึT‘™ัะh[šH›+›.ฉฐPํt+3ฺาT:rตKาคฅBํถำฃู4&iouIUG-๋SAj‹ ฉtbRปํhUำM*RU‰Dc’mlขMš‘่ดr%ริVฆ‰ต0ปFš„คฦีณ†ฦF›^ ‰‰€™$rBํีn;ยฌ$ขBป_’•ืdM†\Uก อibฎ้ฆซioii!anฅษฦ*8ดณั0ฒ“lคiOWfต66ม4ป=mฃฉ$cHKํn็บ ฑู6ฉกีจjwึุimบgส.ึ2 ™ัFU5‘šขWดำ&MІˆ0–็จฆา$h-IKU]fอฮฅIDB๊ &ซllฎส0UัR] !+ีดtำผžiขJ›lg"“๔สˆtZ.9™ุอ˜tทmถM›Xjนzึnw4บ!“$ฌาI™iฤี/Wฏำajš6ฃดซซ3‘ชr%ฅIฆฌvF`ึišณใB[คU‰:hA'ซํ&Fาึl$™$ดCฃnkcƒฤL7›^T’IFuทฺ3ุiอิiีhฃs๕ถซT“EKc…cMo็๖xx8'ฝzณท๛รนOฆzeฅ˜•$‘•8็แแ~ฒฏ`า‡“๛ร-3Mถฅ‘&&ฝ’9๗๛แไ่ข7}|x89Œัฑนถ4ํฤœsฮรำใว?๐๋oฟ}๚๔ ///๏฿ฟธ?ฉาะคI; ฎิpฟ๛mNvL๖~›๛>‰†HWดˆRฃาŒdดํ๊ˆลHขอฆi$ี†fš3 k*2jo™ั6LiทIdv๔TVฉฑEnšถJ„Œ"ดRqอnjNT›•6•JคRข;ูHA[ีI์ฐ!ฒiญ”tซbฃญ0ZKTฑJณ’ฉฌJค6กšvป›`*Fj“VHdจิ-Vฺดiบ9•ฒฉ,™r%‘ๅB-•tfVe1Mซณษ‘4ฅฆำๆjๆš‰ค๎ผž366ษLณ&.ŽR• …†HTv3zฉŽž‰`ฃบถ-ง์ฮญHK“าคQQฉ1Aธบ‰$*"บ&JCeC7T:“L๒ฺlfLดฃ“IU"aฒ•.dดZœnซGaด,Hics5’ัดปู$Iฯฆk“š5‘ชhfUQP“^)ฑl jฃซ;Um:คธ˜V้ช&brUำค)Mฉm[SฆWš‘ถ.SืiQ!Mb›6MซซœRm* ู •‹e„*ฮtฎF€ดirข=่ดำmฮ ป.้dL,WF*4Z"’D+ข-ะ’6ึํีi'’ศ†twญTท4ำ ฤขMสŠ&ค™$ัV'Iด ขa"ฒกฦ์ŠuLคq™อL™v’ดญšF5“สู‘Tส‘[๛ู&ำŒ๊๊$š0ฉHฅฅ…^iC&ก-อฦ†nZอหฐ๔FฅŠTS4"fๆแ๘ี๓›—วOฟ}Z3็$€vฝฮx๗๒๘ๆๅ๙แแ6ุชi’ผฮVทํธฮหำำ7/๙฿_ฏfN&บืฅ๛๒x๚ซง็็วm"ัฐIกlM;3็้้้๋—ง๚๙—OŸึœ! ปตืร™w/o_ฬ$รฤฦีueŽญvฺ‰ๆvฯ๓ห๓Ÿํห?๚ืฟ}น-`fžŸž๔ว?๋ฟห๓ำำํœฮDKฉ่`ฌvไแแแซ—็—ง๓๓ฏŸึฬ9Iด๋}yพฝ}๓๔๔pŸมฺqšชTU4"!:ZUmะคฺถ‚#œถQฆํุ๊“่$&ูถi;ถQTถ‰ณ“NชฒU๖ไฌT‘ฦฉEฉŠ“FgปๆjฆF‹kํ\ ก0@š4ฆ[ำ†มDzตTCM“ต–‘ศ๔ตfชฒDฃัFญ9)––ฆฉ“ํt&œ\šWณ*l4’V-mšศ•{งีญmฃูŽ–1ƒดง•&ๅึ@ฉl"ุดI ›n[“TคfKdไd_†Fe3ฒฺ5Hฎiท:I+0Cำ^%ั„ฒ‘mซi hค ’šTˆ$ŠhถAาŒถvrฎชŠ0m™”LT+šดดXkv์LดQe'ขฑฆ6r…vฆฑำถถ“5Š์MอBR %$6ik•™ฑถmะœต–@FํUMฅ ฦaด*™tVwJG’“MZseฤQบขaฎฎmI์ึ6ำ™ฆmทฉฺFลศศษvZฉ4†#mา*•ชฎถ'Fซ*I€5\sDบ4“˜jN^ฉ $ูธฒ6Y’ฎv2‰๎ึ2ษ4Tj์…ค ีดอฤ่(ฮV‚f—&๊ญ4Dฎm'ำ&า๊d"Ašถ*ูาF71Mw"ฉ6m6Œ4*ฏHช(—NLํ:mFวถzMVSs อ!nฌฉค"ฉpฬใรใปฏ฿๐ํ/ฟ๓฿๙๙K{[ฺB  กก”(@%ThIhงื›ว๛๐ื_๏๗K1‰ˆคYํ4๗7ฯ๚๐ำ๏ห็ฯปฏอlQ   €P@  (@@€ Q”™์๋๋ใษทoฟปo^^n3Nœ$D,[i;ๆ้้้w฿ใื_ฟ~ูZU(  €B@ € €‚@ft๗่ห๓๓~x๖eN็ศ$›n๋5ค1๏็ํWooใ๗฿ฟ?๘ใ็/_ฺjJ(€ P ™ไ๑้้ใ๗๚๓ื๖๒ี›๓pหLผ๊๊ดtTgโš™‡วงoพy๗แท_?๖—~าถชP@(„(( J„ˆ$\ฏ๗๛|อ7฿๓๎๙้๑L([A*ํศฆัPvฃ’QขัดiWk$“kฏ–’ฦ*Hiฺ4Mีmณ‚Qญอl่ฦ šYš,‘œMำิ่H;ษnฎN9&บฅ่`’ฆ+c&“ูE'A“ซ-ง]า0qตlาvฺCXe“[ฅฝดiH‡&ำช„hึžŠAณMSa4้&VimาLงšฆฃDcซ!!™UYvฤvz!B&ำlงขI+ีKŠHr.iฤ&Fษ,mญT“ญ)S'%F4RSา$$&^ใา\›QำMขQฺJฯi“ฆi5R`2Hฅ…ิ. ฃำถจฉˆธบขTTW‡ะจjร˜ll›าf”JฅMšฉ๊F“hฒฦ&K*Saƒeฺi"หๆตI็ศIทvฅอ”LาB’™hw;1IˆไJ›f+ข:ฃjฒš๖TจJ ำ.!E*Mš`T4“0ฌTย0H'iฃ*ดฒIBRาŽ(ห†Lฒl&iณ);ู+šfd’่uฺh‚ฌlve‰)ํ˜ “VฆZU,Y่ฉ‰ฅš*5\qI™hlน68ูHชฑะค'i“ฆฉ!MาZ‚‘’v:UDK้ชL‡@ฺi4šlว4[Zจ–6]@Vw:icฃliNšn1šด๔•คlคIำ%ัิ†”p*ME^›ํ`$์ญ‘ะhmf’:Mีค†้คฝๆ’Jn๗๓๕ื/ใว๛ํฟ๘๋ฯ?๔๛งOฏ๛ชอ"-ฉhKฉH)”ฦQ€Šจ"@ก(iS‰Žฎช€P„$I่VJ@”%"ำ-LดศดdR&จTcCฉข i4eP’๎ท็7ฯ฿พ{๗รว๗?~๘๊ซท3%•Fฺnฎน™Œฮศํ๖๒ีำว>ค๙ห_๖ใO๕Ÿฟผพพถ ji•ฑ•FAˆตSกRJ–!  "ฺดAlด)(iกฅHC •Mจดฉด•F"ZฑRiZิ@ "กT!าDชQ QE("E„‚ชณ2ฤRฤ9๓๐๐๐อทพ{๑‡฿~๗แๅ9ทฉฝr›#5›vบฏ+ฝ=>พ๎?๕๕vห_๚๗Ÿ~๙๙๚yฏถี ด•‚”DT้าPAช ฉ@6•*P‰ถE$@ @&i)กUMJ"คV KDCดฒ!TTiˆ,%‹˜s๎๗ห›7฿}๓๎‡฿แ๓็g'ำัณฝ’ฮ—+“žtd“๕pปฝ๛๊อ~๘แŸ๙๘แใว฿๙—ฯ_ฺZMDงYm€iK'Q„6D*าข‚ถJJšRXBBจฆ…iAR 5 E› e[4D„œไแแ๑ํื_7?|๗฿=ผ<ปe6“ฆ[RWๆ5“I๎๎ฝฟ}๛๎?\ๆ/๙?๒๓งOŸ^w[mฅ4ฒ’J‘ฆ›*’*จA"š2ดJ*4ZัD%กi! ชกhฉ Rาะฆ4Q*#JIฃ$ดŠhจ,*e*4J"Bฺถ‚ภM—MZ%กฃ•fB“๓๘๘๐๖ป๏x๑ร๗๏}sฟ?7G๊ฌjšpd:ฏ๓ehwตู‰“ใาสi้ฒ™=nฎาะษTš๋jŽLํซKyนฅ‰ดั$Iขญ๖ิดุLฃ3k'DธJ3†TUlgฺก๒๊Hoพ่ตํto›-Y‘9ท๘๒บฆI*uš๔šXนšส%;s|ฑisตำถรmฌlcำฆฌd“<คฒ“ฺธ์k2›ดฒ‰3ั\าiยาtฆyึšHv]9าdๅjส}ทštjํr2 mkํ=cšษ$ต๛ฅนขกFŽNkผžู!f&ทๆjฏฉํฦ+“ฝu’^4s2f7ืฒ4Mคi'ื$ซอU“ž4r™‘ด[MgDณ“}จmV„ljฆื4ำนฒkวlwmา“ษฆหQีvๅ:VำNE๋ชฆ*ฎKฯฉyuตu%$’$6M"I@อ๎\ WาLต)ฏ'G$KWdr.MEEฅ†K6๗'ฏzีาSู—6;3“ธjฏนG‡!ุdV.ูdŽษngWl;้-i5้ฦNkค™N&9ู=‡ื๔R›„ู™i.:คั4ญ์E›ˆึgูv$IbUอใศก9์…ไ้hVๅ„nJ‘]๗|ฦ…ษi็aˆป6อŒ#`ด4-+ir€]ๆKXM =๓ะฬ๘มทndู่๖๙แภDพญ๓ฬ‘โnmถGfุ้T ฌ7„‘—‰ัฑ 6f’w›:๑v‚HP0ฮž๋]ƒbทลฆ†€jฅ™{๎=5%Lรถ“ฃ๗‹mณ๑ ฃฌ!8ขeUwTื AลfYยa€ง”ลธ๘r>๎แ๋ๅr™ม\น.ใ™ะฏ{sw„™ตกน๎4ฏ๗็?๗๏ฟ~๏๗฿๚๋}ฟnปฐ๋H[RBฑฒศ๎ศะ#M$eุ๎L ศน;™eเjJ”‹tQG"O91 fฑŠFฐคŽ๎•…์ไ8ูถdนล6มง€bื™UลหFJc3ƒพMI4Kƒe-Urฝ’˜ง˜B;Ÿ็ท๏ฟ|๑ฏฟ๋ฏฟ~~๓ƒ.ฆ::๒๚า&w]๗ฟใฯ๑ืŸ๓ฯฯ๎gฏ)ฤ=,z„›Uก ล•ฐฒ=ว}%›๗Y,(ภ‰ฤA šภtต0+บžใ@‘@xZNŒ$ดK1Ÿ็|๘ํ๗฿ื๏ฟหฏฯทo€#_ƒรrw—ฮ๗Ÿ_๛฿๑ฏ๑ื_?฿๗๎l1บ.…าุจ3 /ฒ g[,*8ฮW„Yด’Fึak0ี\XU‰๒;ฐL&ฝ!xPธE‰0ผวฝNœIJฟRฏ++!j;ฐMะ€ฒฐ‡g>฿>฿~๒ห๏ฟ๖๏ฟ~๛มพ‰๔ฦืธฯ๙ฌgbCzฟ๙๙๓_๕ฏ๑ฏ?๚๛๋g{ฝหeํq‰2ฦ๋>k€ณPw>่ธฐ ๏Jปก0๐FRHc4{›ีPVbgตฒœ@…fW “”pฑฌ ล\ข&œ8‰ๅVKโภญ83?ฮ็๛oฟ๖๛ฏฟ๖๋ฟ|ž็™๐qฺูฒbฯWงฅฏฏ๛็Ÿ๛๗๙ืOชธn…ะ!t!8]rA)ทœn2Šs-™˜hh นw้จP๋จๅF!ไrP็&แภa–n๗4`+1ฝ>งฑฅ‹‘^ๆ๐JJถ่Š์ฌq$ธh4ป ! NŸบ๑j3เ‰ุP†!‹™็๓ํ๛/?~๙฿ใท๏qฆ๏ฝnƒHƒ์นณ“u—ู๋๓9ฎพY>ุu”m7ž้ฦ€ธqฉ™}๗™ฤฝ€3๐ฤb-ูth˜‰ฮฝSษžegฯฌ..๎ฝŠ3 KxbŒฯ๊๗ถไƒo-เqsเ-ทฌƒรำ์K–ทุฮO 45 Kงyšฮ—wป ๖|;๊n wฺ:—=MMปดtœqแgqฺ‰}ๆํ.๐้ณณy!ลQe}k“nฯฮ™ณฬ0มำ/๗๘"ืฯYๅgฌแ"๔๘8Š{'ฏไpฬ๐3\6mปืฆ…า2‘ฏ93ž< แ8๗.† +;œใ๙ง รZทigฮ5m‚ฤf;evพ9Žofเ๘๕:#[[Š๖ฦทธฒZธ_ŸsPG็ะเ;K;Wฆu‚มa†๛>ฒ–ฤวb7[่๚ˆ(ลCžๅษฆผทญD๓ํ:>cF{๗‘wโ8่ฒ[47vw^ๆ,a)wฐsv4้k‹C‡ๆ8g‚ฟ๑iง%;›ฑนทh†3g๕kJ?oNL14ึ๓๔n๗-†ฮฮKืAu$wฝิJ5ืฯ˜7กy๐รฅa‚๚๚6๛s&?ณ3{๑gโ๋$ฏ,ช–ป8ฏ๗€;0ฒ๒ๆgeZฺ"ู.td่ย—3๓\„eำ๑0,;รฒฒวณฬฯพ†o^kฃะ{๖,f6u.ฤง๓ŒW4Ž›ปžแ~-J$œY[ุ=อฝ;ž™#M๎ด์,nธอเเHs฿Y๏ Y]M /–Eญโ&Š๐ิฬ๒„ฺฺทฬ่’~g˜๓?~ล๓Ÿี@G’ุi๕น‹ฎl—๗ฒ๗๖zฟŠ6S*cณ€…&ห ใูๅฉก•Kูย$Rณw g $มK.‚b”ธ;ย‰q" –ถXุ9Œฎ@E*ใบ๓%‹ห>ข˜ฒฅสฐใ..3้๎‡ุ]fฯฮลXอัฏ:gd (—รqŽ}ž๓<็ฬ9็แุ 3=ฯ๎;๗ซ]›i`๎น_๗็๗v๗๛Zd‚ีธN๏ั‹Eา3sฝA๐ภa ๆŽq-โ|ชิ’]8‘ณ3’_หใTอ้ค๛le่.‚c#—$ฑh›ำไ›1#ฒGศ*$f˜ขส /†Š# ท๏vๅZฐฮฒJMw & „๑b5‹'3IาeŸŒ Iะ™๙๘๙vพ>ฯ<Œต*‡คํ|ๆf[ow๏W๏ืืm[l่ˆ,ฒ่2ไ”ํœYทbฅูƒŸw_fU7“sฦ*VEAฒชTŠ™ฮ๔zฟ๊œฆcณ๐ถใž่N‘€๎ฮY&:์zล9š์€@0@๎9M3อ<๓๙๖๙|;ฮdc!่แ|iผkxเ๎๎ผ๗็ฯฏŸ๏^‹wZ/ุ๋  0๎7Hc็eŽH#eQ8Œอถ’ฌิ$3ยฯ6ำdKมคฺุฎ†กr๐bA N^‘•0“žD ข…Še:Ÿ๓๑๓ํ๙ๆ่ตศะฉ†u6ใ}Ž7ข0>ำเžฎK…2$gหนk—ey>ณต„Xh็žูqภw5sI6ถา›v+C J3^’Œูอณl3ข[pd`ืœจ‚y8EํZ’ˆฌึ Ž8>•\ห.dฮฏS่bL‰0,nีษ “l5x๊0ศkm\งuย„Z@…ฑฉํผ'K_ฐ=ˆขฐห:6U\ว iNุ3อrmงฯmใฮั†9วtษ$p(ชัฆ5pเ<+์๋}ซ้ฬฮ}็ยmeฯภ‰ฏูมvฯน Œ{&๑fด‡Ž2\7 sๅ Œ็๕Nทญf๔xwฐ…คavๆฮ8๘.1ƒe‡V;:M{–๋qWBiนผ‡!7tGฺน‰s\ช‘2L  `9ปป–!7ั๔ดœm์สฒู…“dh{๖WaยH้ั‹ิ€ๆ 6ญ. =!' hia‹‹ืใใ้_Ÿ“ˆ&lด‚ŠŒฺ8ภŒŸ3๛ใ‡:Dlƒ'/- ฺิโฆMปฺ ์Œป<1าtmํg5b`g_™ลVฺ!˜„๓ย$แ-ูะ 9ๅย์โbลยUguMนHœ็ฒ๓ZB kn 26 sทYBve™ูธฬ่k๑k๖ะูŒu˜qพ๊9 คป์ฌซƒณLxhf1Sq"–HBEIb&ŸฟSะp]I์J๋ค ์™ะaG็ฬ๋01*ผ$ .ณผv[๑<.@fk•ส: ผหมค cู๕L'g[ฯฎ,ร์ ท3Tœศปlh5gคศ†XŠ˜ˆE’ตฝ็ภ!Eแ'฿vบถt…ๅนฎvmqr6Xlg„A›ล่Q„k นOLC.Rำ6เuxะธ^xVc‡ญjQg|ฦๆpพพ +‰-k ๆฐศ&‹„—๓9อV\ษ=qถห0zฐ3ฒBธฑ.แ"`ฮอ)ฝ!า]ฦŽซืjฮihšpiC#AŒk•B๒ๆไ,ccซeํpyล0F—ฤฌำ\น„žqฮDƒ์ƒtม€ภB;๘๗@ฮ2 –โ‰]GŽ 1ebމ์ยิ  ‚ษ ะฃปฐซ9ยPœภJ"ตU๒445`Ssc\ฆFb๊ั†eUมgศ0ปqฐ”4CบJ‰ฃ๒ค() A™๋"มHฬ]‘@ฺ~}ผพIp˜็ (าŽฦƒโผƒฺตฬ4‘,@ๆษKw„YV0ัต:@ฒ๓ณฏ|อผ๖`๐0คA’ร*XฺเชใCpช}Wi‚แ€ ”t.&7 ค€L ซซ1ˆH•'~tI\%C3โฃ8ญ†ๅ rR{‚K๋ M€1!)hซTลqNyƒƒ„ล'+#Cฝ6ฆฆ๖มๅืC€ “‰‚.ซ#รbcฯสN„€Yถ!2Ji@ โ’ึe';E’NC แ†งš t8 1€–ภ๛๚ ลu\?NสŽTฆซ(J v&ŠH๑>์ิ}q;ึM/8ฃีงPฮ9>๏ใŸ>ฯƒ ‰r๓ฬ…&ภJ๚„ีฦล‹๐Iั!ำม๒โŠC2ผ5Q”๖!๔h7ฺฟŽ…Dq๏๎หœฉเๆ`๗๑ฆ์จd9Ž่ไถษวีvŒƒuบMบ"!ML1€B8 ณ@Xไ qh9 เไdย!kB‚š‹ิS๖๒ัฟ@เ‡฿ฮf๘~๙ริ…ฦj ๆ5ุ๒๙wU ๎FF๑ถง์K{ค๖~พqพ‹ป;๛t฿7ฟ;Oน_’Ÿส๎z๐RัเพNhบมoๆt-:€lํพ๗;~}฿‹ ฟฎWฯฝ๏โ๛{สD’๏ว฿๑\Kื3`ห]‡ŸFฺึqq๑ึรvIrT^O{ำ…|U-f๐}xOแกpใ๚[๕-~†ฎ~๏[ถ2v"๓ึ_็ใ๐†^ฟบ๏lP›๙QƒตฺŸฮ+S'ฐไ:t฿ํCท๗qฮ4_DM็้ลqฝŽ่จฺ๙{็$@eทm๚ม=„ึ๏6ะ๎… ธ?]฿q% อŠมฟสถ†๖p฿=7K>Ÿฮnzผฑำz>? 5๋ษฏน๚ฎ>๘ฃผ‘ฯธ…˜ฒŒ}{๏ป{’žฟ๋ว๗}ปฃ}ท/;€']{ŸŸ›๚ู™โ"๘cwฬŠ+๘โ่๗/๛๚ฎ# พ&ฬํฝGใเธลยแ๓wLื]๙ว/A็Lฟ๙ธqo๊ฝพD‰<๓ก็๕?๏ีU๚+kๅ1ฌฎำp”วมแ:๏Ÿ๗ื๓น]ฟ๖น ฏ.ๆ|btq~oํ-ฉ่#?~ฐีพ๔๖ธLmŠ~ีฏยO๏S๋w9๔วl+๛RะYต}ฯT๊{<pw๔฿รž๐=ผ๖eฝโ๖owท.ๅ๙~๚w๕w๕ัgCฝอ=iทC†ˆช๏ฝ พ๏้G๎\๒๛๛Sฟo‡r:งว๗๘'oŸถฉ๗8_\œžvฝ฿U\[็ั฿๎8‚ค๘2ž๒~{9’๏ฎxl\๐Q4a?๔YWผใฟs* yไก"๋“;็๕Kฃ~ภคปฟ06ผ:๑ืXข|% ่Oญ}wj๓š๏pQ่ถ&Ÿั็๙๚๏ูฌ่€ุฝn„1๛๕ืผ)๚ม=ๅ?p.&ฝ;Eฬ?๖๒๑U!่zœY|ท!๛˜ƒตŸO๏๖พเปฯะ๕KNพSz๙A๔฿]๙“๒ี๔o| มสl`‹ั๎๏c]๋”๓{ฟฟ๋usป‹krsŽลษ"ฺ๛ๆz~MŸใ›‡z[U\ฬ~ต๛๎+ˆ;๘ฮkส๓-๕‹jกภ฿๏ใ~ ร7ฎ?๊พมo่๏๙อ๏2‚่ึ_~๐“ฟํWื@แ}wอ๕ฎ๛ิืށFpz WŸ๎๏๖“ฐ๖ํื vพnผ฿ๆิโ#๖=๐ํ€>ใX<ุ;gภค›ำn๔‹ร?@ม๒k~q…(mgํ๖‡xh้๖(๙XWืฟ_๏~ABญไ๏พ}p๐jW?๙๏ํดซฟบ?:z๊“ทษ82„นฬ0บx฿฿๛๏๏๛^งไ้๙฿๕๏!๎ศ๏v9™€ย๘เhฆ๗ใํ๗g'’ฦ;ถ 9 ซใ‹ซ฿ดฏ๏8าเเ๛:๗๖F+๙ใvฬเŽโ๔?&ฏ๑๑ณ๕6r฿€eโกJฯO.<๑คหๅ๕Ÿใ]ิ฿้xฦg,ฝ๗ob‡ท9น-ยOพฺ๗็}อ“<๔vqk๎*™:wqu~๏แdว}เฐต}Ÿ์yสzt‡ฤตsิฎM0รวฟ/`๏๙ฟ_/ฯnอถWˆ& ๊๖แ'Žทyืฃ h์็๒&{๖–+Cž?ํ้ะE๙฿ฏวไฑ}๘5ฟื์ฑ)K0rํฝฬลM&สd–Ÿวะq๐_rHฌ&Hโœ[~๘๎๛}ํƒ qงm<wkบi๒ล%ฤŽ‚sโใฮ๛ศo0eํ}ฺ๗ทCE’cฎย1ฏ๕y๒’Qฏฐา( ั=.Eนฒ3พ“ว1~oOซบ๒ฃกธƒฃซŽ๕ํ>x๑ฦธ๎ใkํ…„คผฑฺ฿}w3wAŠ7U„ฦMฟ์ฦฝคย๔๒๒{_|ฯofศr:o๗็—฿๋๗zหช;๋?‘๚๓7Sี ~a‰n:๘Mฝฃb๒“๙๛ถ/นำ‡–็นœพ}๖ฆ๙‡งฦz‚ FeNo|qั%9˜ฉำ)Šฟ๏฿๏Ÿ@“ฯห›Mิ๏ะนaG_z|์ุโพง๘”~๕Nn๗ูYl@u•€ปy๋tฒฝ ษI8 @@muป+sŒไ๖๖~ใโ*Žpฎ::’รป๕๙ฑใ™หพ๛Ž๓…‚ ‡๛7๕vWœฏ?3ศZMD9ฝฒไใ+ ƒ๓ฮ~O)€ฝ๗Z๛โ†ฏŸช๓๚ม`๎?Lุุ“ทx|:7y๚F๗9yใฑ๙z๛cwใผแsชK"๖t6˜/๎๏ึd€<ร‹vLš'฿q‡ฅ0SBoฏๅŒw๗๛๛๗ &YžดกX๏ร1€๛Rยƒcโใ๏Ÿqาh>|ฌ๛๏๐ผo'ส๎h๕ัhฟ}fC(๏~t’ คไ”ฉ\คข#พ{>š96ๅพ"=ธเโฎŽ๔เZ฿พฑzCฎ๏8–/ฒ€$ไsฏ๙พŽk—ึŒ๒x’ข‚<๔Z7๏๕ก'wž>๘nG‚C‚๖๔๙็w~ศฏ฿๎‘Uื‹lรŸŸขผู3+ู~“'oภ}q๙{<•็{฿vูฝดฑงฝ $ฝท๗ ็ย/xˆm)ภ`–G,'_AฺM[๎~฿๗๔ ™ลaa๕}ไซ €EŸƒอโ๛#พGช>fฟ๋—}ปณ็jŠTWฉmegq๐๎%ะบ‘v*Œ‚D j3ขชๅ๕1ฯฉี]pพk`.=๊บ 9v฿๎๖5|ฐW฿_ห—B–r“›7ไ฿w๎klY/œน%$)ๅื๖=ษ„ๅ๗.ๆท)#DยิอvŸ&พ~ช๎V?z@ศไงŠธส6ํง[WwN~cLื๖๙^ศไฉ“'@ฺฆoŸ<็์ิฐ'‚2ยฃŒ)ใ/พBภฉ›nX฿๏n มYฦoปCœz‡Dัแ๑~\~gะ=๏๚๎–*ปฯ™TA[๛[G ณ๊I "I้9GŠ๐๕=ฎ3ฦh|พ๗G?‡^|ิWgp๘ต>ฟ1˜ู๕}–„๎Ÿาป#>ืm็’ˆ•ภ†Žเ6ย’{|ฟสรใn๐๗8ูp๓;?sผ๕Œ๋๘๎ลใ0๖ŸG&l์ม[>2T๐ษpไknฎํุ~๏{๚pย€hžำทฬE๒sสN™IOf๙๙1Mฮพธƒš_็~'C”H‚‚๙ฒV (‚px!„8^แ๖~๘ๅ†ฤ‰bt ^ xž‹)Lxวี]Qbz*ฎl8.€รZ*มU-ฆ ผะPธ"ˆไg๔๗ษส๔๒Tq๑‘'Ht]t€โ#๘Ÿ€ทํ>vฺล h-;๔ˆƒัผDฌรXœ–นืˆป.ห†๔4ฦ A[ฉ!ภ%ใฅ฿ Ko 1 1ึ_’#ฯ. uิP9ฅ‚mFฒ—_เa‡/@€&๚y†๋n‚†Xฃƒ“ถ|๒ 8OU๕ช.ฏƒผI1เ็ภณ8vเ;_$pU†ลุ)0ุaOŒ*พs'_๗E=@ฐ†p–ฌโย—l}un๗>jข~ฮƒ*ป'gzr‡ฉNไซโ`€A =n๏.่Žษr7Gr\"งแษ‰ "p,ฒKานwrGw”ฤD่IŽ_tzCๅr%พo!5ฯH S!^ฤ—‰1๑?;ญƒหŽ'ฉFDb๓ฑ"J `K€ห j^!MUี>๙b๗Œ อร”Uxpุํ‰ใะœ/ŠU๒dษผ ๚:z้`e,ดอชKIฑauิ้ษ`฿เ=>ธะ๐:๘เOŽ0ธฃรf>/ช+โ'y 7าำบ‹ขmม๎ผ!Y$'P๗3*BJพoชK?๊Ž‚a 1J้๕ ๖D นญปw01๙FŽo4:Fฐ :L‰ฎฏ<”‰R$0ีUพหkqg๑„_ื]ํHิBฏ๔๔ P]vkJrGWื`จ1จโ}Ÿqƒ)\wษซ5ู๚\Dpภa’EGกŒึฟ๘ 2œฆ\แˆsจz nD ขˆืeHƒฒฟ๙šั๕a <ย๎=๓qBrฎŸ+์ผฑ ฺ้ …ฐPqwค ŸŒ€ศ๎cจFR ๊E่ศโ@S4‹Œ}5€ มกoทƒ.บ“šaย๊เรรื{vใˆC๕แ‹ฉAbt 4ศ|็rg๑กๆ>ˆีQ†ฤ๐ƒ~ชNฆะ"}์โรณฯ๘ aิเ'Ÿ๗a|“gฑŽ6€ Aศ๊พI-ธฅฬ`€Iแ๋) ด฿}๛ฝธs'๙N์บƒ|)Ee'„( !ฺœiPฑมu+ ฦPDฝu่ๅ‹}๐ธSท„Œweท2ปฎzงbร“ซHƒภะ48๒I๎+๗38 ๐$ `X]๑กLO๎›เ; ธ ค…ลZƒค๗’$I’‹œ}ูฝ.Wผˆp-ํn–)ค58ำŠ์เ, หHรE? /—‰พq}'`–๐ ๔˜Lฟ4Š‚$ทgŸ„ สA”ห๛Š.  ฐ R { ฿ไ'–ร‘ไ‡'xตUŸŒg˜ƒi๒พ_œืี?๔„8ธž`„LU:FNฤMๆ—ฤจฑพKjืย8Tx‚QขT-ีณฬ )HNhoใŽค์ฟ'EXว!็‘šWฦ”;พใศ‹)ฮ่ห 4แโ?p†`ซƒPT0dษ‰pโB@ฉ๑ปใ€dะษมsyชฌ ๊ป$@ๆ|ฃพŽนœAภ‰•o—žˆ็|W#อ๒ะฌตย4ตŽd SพL l๛พ‰‹Kไ ๋˜Y†zHชpD“›|a @“w_-*ู•ฃ u้ih๐J,L%ซโF08=]เaDฑC!j๐บ3]–†fa อ8๑„;ผu0แ“3_^ๆ4€+MA ตD๐Œ#MdFgฟ๛{O๛]|วŽฟฝk]JบŠI@๖แ_฿:*บรค๗เ๖-`ดพEL&<คnษUP'ฒ฿๗M–—ฦQ•ช‚2 Œ†งr@€i3GW†T‚l๑๏๔๏c๔ษง>ฦu ึ†฿ร็๒]ู๗ๅจค๏#|ล‚ธt[k]่ืแ›aJ „๊)z ฐ#นอzไฺ1ฌ,œ`›{Ž๘žลช]œ/yˆcƒbœž’์>1๙#J‚เ‰uใ ธ ฬใTŽŠFๆฬ=ข่ ฏ๗๚I์Oธษ๖ฒ$T rk1"ํ๛WEgU‹๏=„*๑N$bว;V†!ึ:I”Oาf(\ล`ri‰ข˜'ษยจ‰ี}AFะป)$กCฒ๗Žจจ“}ว‡ฮbŸ๋ฯ”ฺศ๗|็ฤฏ†pวy/X•ขA-๋”“ฑžX๐ KLช^4.iแ8฿๙mฎu๓`฿ๆ5hษT๏๏pp j’ฤ บ๛{1’ำ + ๋.ฮxP Xท๎ใม ๋ฝลˆไKžฃW?งฎc฿้Ÿq“วป6KT‹v= 9…[ท=Ljƒ๖3*ˆ;8‹€ธNb†‚„`O๑เุศ<5 Gžเ‡ภƒม‡pTป/GิัŽa)M&$@›ึ<๗1๚฿yQ&]#ป‹Šˆ{wใ/_xEQ‡p+ข4ไี๋qx๗[ภ๒ƒไ^ะJVR๔N๐ใฏWƒง."Hใ}ป?M%xอ๐ฎsfภ;d6Wฌโ†Hึดy|‚ซ€E้ƒ ใ่ ธื…\สQk๑S f0:๙๙๗’๗™?™อ๔9zฟ(Kภˆ\ิU Œ๐uK*บห ขํ (๊ะฯเYาpq›^๋”/ู๏ปฐฐฎโ%โeข‰จร 8c„้ '@Gฦู „fAqEFŒื๗๙ŽฎŸ|่#n&ซ ๎››๏ส๎›ฤพO๔ :ธซ๕Zึyญ+’๒P{ˆ$7กึŸ0ฏ`ใุ^ู฿บ'Wวฐฒกขm์ˆož ํyqš๎:F _^ถ๏Cฑ`มไŸ‚eย!1ชะ/หE4๘œ๒g~Ÿว{ยeo๛zืmžA้ŽW#Aพ๛๕ฮ€m*ฐฺบธ“ณ"ุ†…u@W ด“4๎B8@< โTŽ@B๒'“ะ้ญผ#†ภ”„ฬƒ๋=ๅใ๛๙วN>ฝั์Œ':Zฤ๔ษH๘ํภhฟ๎Š:มpoนนoแ›Iฐ๎ฃIB(7ู๛ำยดro็๛ผืํล=v{^aลN ุุณ{^tฌึŒ๛nrF้@ป ข๘ ฃ$‚„€ตJ"$๑/„*โ๚่fRท ฆภง๖•1”ฟฮ๋๏B/šN+สtxวฉTขˆใ^IIP|ี˜p`U9พCTิA%S‚้๐Œˆ"ฺ์.แฎx๏ฌA„ํว|๓ฃiๆ฿}L:T๗%ั A#Mช๏บ<ผฯกAฆ่.2—M;ัา<@๘พถ๋)่;ywX"R๓$ < เ[‚„พ$t('ศ0‰(Vnุ P($ |๑"ย)X*W6Apz^๛/"ƒAR`าupqฉ$@Qšcข…๏Pห1D-‡ฏ%AP'€–ิ ๑๋w Š8‹ๆ๛FbPฆ ฉบyTาq`-Q๓' `ง AP8ณูa็เL€}—๋†ˆzQIฦ%Aณ>CŽฐผะผN@Bธ^๚๎ ˜–ยษ^Š ฤรcภb…&";)C, ศงภj  Pลˆข’ไQ$VA‚Tํ \Qt—’า™uซapสทI„In˜{z$ภะ4ฉ่™แ<.ตZƒS)๋FR 0Qร’ีš๘๑๊ฃ K:(‹?=๘ชฌ|ณ“,พข0ุฦ1ืม‘y@ัQศ๙˜Xšฦ>ํู๊_ * Ž4/9 Yิ]ผ๎ปaL”8ห{็vr‚Ex0ญ €`๐ศ๓ ฐ€aqฅล I€ˆE๋l฿xW‚@IlPุม๐จ>K๑ผ#(‹ม0:ฏŒˆ๐1ฤรvL> (/]T†CๅwงRง฿B–ั‘pคQ{ษืw ลชธ๊ฆ‚ะ_Nฃฃ Bƒ\^ยฌ€ทเ0อ๘˜ญร“‹ๆqะ#@น:็ IูqํณปQxŸCฃ™ขDAWู„ษ๗Y’ฟ๋4:Y"-T‰๐ม๑ล#Tื89^"G@ฑไ’Mศไˆ` ญ:•ขยมี(๛,ม9ม[ฌ…‰๐๓๊ฃศเA‘า๑ษvMD‚ะฉฐ๚Rธใt เร‡TF€"}ขQQC๘IฒAPG0<๏cUAE\๘๙ๆQXF#/)pT)\'"‰#ศสฮwวพoพ ๐T†fs˜0๊Šใ#-Žฐ&œ pึํ/zWJ:Œส๐คธP(เฤ‘0,h^~โ๑*R(โคe%ม5K ข"A€’€—Pไทณด4I๊^Š(฿’“ฐหร…)ฐ0Vต‹{ zกญ9DŒQHXH่ค;5d๔{ฝ‡'o€ zขd+ืูจZๅผฟmCOƒฬoGฤ(J7ะใƒ{น…;เไน@<ุต๋ณซก๗ƒŠxฏ๗ฯoฬย๎ืŠv*ผ;b ๚เฃŠRล๕-„ใ.น9ฐฤฒว@ฅ1$Du๏Oฎใฑ˜สwซ›0ŽLํหฒDธ๗7@w]]ผ๑5]>ึ๑Ÿ‡~๗0ม๐๎ลbgzb๒.. ฬถ”>บข๏u_Yษ้U๋ไOวC,Bv@พ $/ฑ|7:ะ<ฌ68+ๅรคแ๚ฎbม๚jจ%p?๋kA}Gฝšx‹ภ<ลr—ฦ‹‡wœlbจq๔ฝb“TบŠƒใผฎ๏ ๏hำใฎ&๓๘ƒou…บศกฯหZม'ภXขZึทศqงZzrfฤ%่๙๊ท;นะ็ž;E' ~ฯ๛^ฐ )฿ES>ูมฦ’๎FืŒx๏๛cูy—ุw๓pŸ๘ปอfา|๐็ฅัต นษฐเ๐+2”;ฒLD‰eฃxZeHย€๒: ชย็N5("่; ไ๒`v,ืWภ‡ฦKO8O๛๛oิฝ˜Œd/ธqนๆแyQฦe~ฑ†๗y็{Y‘ˆะ์Bฃ๏A%;ู 8๖•์w*ตฺผ฿„ำทŽ๔ษŽส๖yvำ}๐G์ไเซำ—ๅยŸท;ูํ€๘ช,–2๔๕}? œฝ{ต๏nJpS๒๛[„;๘Ž๚F~@ ฿')i๗m๛ญc็๗w๏wwฉ Q้{ขQฬ๗}“kGtย1[ภB๘ฺ‡Mปˆ Uจ;ไ๑4‰กsฅํ"ํL|ใŠม‰pฃ่Sฅ1พJbภธ๕ัล'&zPงHถห๕ ฒี‹2(น๓ฌYNน๎ฺeช๘ั X$r฿ร-JWwlืฬKธแ jw๐ๅ๓ฺ!]`l‡zมผฮ•๛ไF™ย๘ภ}งอค#ฑุ๎ลI@\็w๛w˜>7ฆr๗W™ทเหิ”๛^คป:lƒฯAW{|๕นcy๊นnŠ๓P฿๐=๗ัyb>/‰ฎL‚ู–ฦีื่^wค๊UC _O€,ใฒ,%ฏ๑ฝ}@ฆyX1vxX!'ปฎ{]๏ม๔Ez'}์ฦ๚ŽFั‚๏ ิ“c‡ห?<๑‹น€l"@ฉภ} Mฆเมษ}}—qภํ%zx,|3๎c฿ี)ญ}uาตWt๔ฅฎฅศ•๕ํrz;-ฮพZ_h๙ท๋Do๗|#ฬปaล๗ค‡W๛Ž๏_ภฏŸM,ไอ.…F๕#ฟวfŸ'"฿=L๎๖๗f/'S๔cya์ฺล๕ษ`8ธป๋ไพขHัฃ–ม<,,›GXB^Bฃrค(:จงJb@8แ_ฟ฿„N†* ‡ฺคp๗l$ฝศ๎ถๅฉ”`ž,€๗ฉพ‚QXก๒๛๚ ฅ;เ์฿ทƒร“}ฝ๋gWqA[ๅ้aต๛฿dศ หบ–šแ๛ฎ?๖wG๑ฉ๊gู `w}_ะ$„c_}`W0ด๏›>ซR>],นOัh>…f3`ๆKแx‚ะ5Ggƒ!ฬ^}&(a0ฃพyd•๗$`๒ŽSDดuยq'ณŒ‹ม2€tคว-กOBิ] ย๊อ`ปฏ})Rช๕ำคต}Cณภฟ!ŸhsS‡!†;SืuA–)B;๙( ูF˜tPvฟซJ$‡ปe_็D*๓8/I8๛์ะ„aี๛๋ฯฟป;;<พ7ม~u็[ฃOป ^P'*Š๔ฎขค๛ขฝ๗ต“Žชถ๘eษด๎ฦก๎jฝ๔Ca™เมไแฃ9Uvo๛ุ%ิ•G6๓ข+‚Rq๔๋$จะux‚๐0)ฒ/พ\@_d:!)๊( ”}zงณ ฟ(wฮTธŒ2T>ล‡ภฦฐt็วฐ/>ัƒ-๏๎ฮˆๅฦ๕ำษ.ค“0…พ๑ผ.‘๊ิ/฿?ž:`Y€ค๙qะสร4ณGาฏป‰a ป๛:N‚ส๎++๖ฺง;‰bGgษฃnงœU๐โ‹;ธKŠvkŸE,ัNเตต8Ÿ B๗vwƒญภs๋Dผศ๔Iทลุ่Fท<’:ก฿ชฉ_ฃ„@๔S„aถร๚ž๑มp้Ÿ€$› จชๅ๋ศO พ๑ค@q‚่ํ„g œจว@†ลo‚Ÿ““s เฯvม:1:ˆยrpท€ภ*เ๘2(7๙s2t๐G AT3๎"\s6ƒพธ}‡]px‡|6 nญ-๓T9ผ|•๎‹dํษ|๏๎ฆPTฤงžžcCบ๋`ด]M8FUeท'ก๙ฟ{‰‡มช๛2๙ศF0แห๒วDฑ+V๖ป^ภๅโฑย€„!|๋›qั๓P๙~Qฮ0฿ฅไIะ ลฯเ่ƒฯ—vj=ยƒONU๛๗—$ƒอใG;&€@„42LxBG โ๏&ญŠ็5+C๒ใฐhฦธ6’๙Š/ ๆ>8‹์$|ะ^S<๕8Ž}AG'œo๋฿ผบ8ๅ“ƒ์ฑ&}โทืืŽาคํ๗uv%C๗}OฟฮญโงŒqx<F็47ศdนใ^'ฐ๚6ัภ0g‘pะqมำฃ@๚ๆอ๘จN๙yžHˆPต<ฏูข๐้๐Eฉ“๏๕5ธภ!1“๏ฑ"(ฐ8*ฒ๚ฎA๗”[‡I 2นฃ ’f๘เเ#ด:xž@ีส๔0A˜Yาณ4 ,ฦ~พลB<4 ฒฌธ.ˆ‹t$ƒ8’ึMฅ‚ข„/|&ฑ —๎เ,.Lห๕๕ล$ K R8_พ8๎ O8F0oผจฐŽh(E`๏๓็๑€†มๆค็PขแไKข™Pงซƒaฐ{๘]ภyผถมษ '€jีแ:๚‹›7  ่LXึั’๓\W]ขฌสลnปuHLฅ †ˆŸ—มุวจ@๗้^ุb๕=0์3ฎ€ r25?ƒยธ—ฏHนt,“X<ัH ˆไ2ๅถŽ$#Mo88ผๅฎม  3h`Hˆ—ธuM๐ฉ(ี*,๎}ใใท๏Fƒ๔๔ข\@ธธ บu้32Oทjโวืย€xฌL 6ฟ‡eฃะฮ๊ึุ๚O๎ป๓ฃ‘ˆ(”@Eนฦฉhห{,>2ึ๙<ๆแเ#ฐu๓l, ฎฏK#แY@จ€—cณผApHˆฦห%—A๛ๅƒœL๊wฆKฤeA'7อlล/ฅย'ู๎™ %Bไ๏;”ปr(ฅYญ€"iๅ๋‰! ฎ6Ÿ๒ํ๔๋ฎโั๔DฮlDม@๘4œๅ@9“่Pฺ&ฆAJky\ฺขp84ุ,”#๑อR^ฑฯะม€ฃ฿G"‚/€X€] ่IดwbŒ๕ƒษฺžwE–Œฤu๋p‘TN๚๚:โœ๔ฯรฑ{-‚ีๅœš$X?๎9I๊HZ7… ฿ เ—='ณฌŠŸญ{ –๛๚ าQ&, yŒใธ;ฆ9†ฅฑ‡ฆŽ๎R/l*Aฐนnพำฯ๏ˆ–ฃ_ŠM„โ\˜฿Œบ5ไเˆLๅFkอw?H988Tซ‚‰ฆ-˜๗fX”wญ(๎vว_ฬผhƒS8 (คh™ฺ์qTฮj&žHฃbแเŒn๛=ืe‘UME< วŽดฐ„9ZŽn;ฺ7(Žซ.ƒlๆ.@๋p๏XาA9ฏรbQะqŽE2Š๐™ย‡H๙(1,๑บถcX&Bคฆ๐ธ+Q4$5D xnaH0)j‡e,”*|ยฯฏ๏ ใDNื ™aƒWษ(*™d>พ‚‡ฅ/Q็์€W€,ศ –ฦฉ %๏t//‚˜K/„ ’ไ5'ฦG@{ว=‘€ˆZ”—เฯขนX[C†ฆqิค&Ob:ว]ก‹mาq A)Gฤขมฐqโฮป์htrrGcFFฉfึgฦ‹qตซWธ[Nv|ๆ$Pบ - wY@]ฐ๓โ‹pรfฬii0ั฿ Xโ๙ฅใ๏š05ฤผv&@@{๊T<ำ บ vต ฮป^nS€•ื%#Vส๏ช rฐzczฯ๛4ลqชR $”8แษฉT#งภzั๙Ÿw8ผ(€ิ;7ขm^๓ฆมีฤ]๖่ƒCฆ๊›Tmโw, E‚Gฝ<<˜NQr‚Y_ ฉZ0'^ดฬ฿๕—ห ิ 0{฿รw}‡`Šb&8๔(‰ปต3H<ฟaดณบa"K!๗œ๎:๙B๎HŒนE RšD 0มT๙ฤ_ ภ_6-พ/ชlˆ็!ฯไ}0ON๘ด9Dว'’6MIEo~X.าำฯ^.ใ2้๛๎:ข‚’ElEXผุืๅ…ฉงร™ @มปOpŒRคึถ?๏็-/"๔j3q๙ แRw>'|ชNห฿}บษŠฅž&l@ส•-Zฤช๓Jะฮj_ขœ`I—s€™ŒฯชอูรV!๐๗=๑]†!~ˆ \p%=‰ –‰h|๑ฦ๋พ2ฃ{dว™l฿‰฿>žYต“;>_r…ก|สขQFชwaฎ @๑ภ๎ฝP3เๅ:L0<ผƒฺพฃk@A4าเ็็๎g๏IขxOO€@†j$rชGPc*ฦึร๏๏Ž4ศyลˆ–”ฬ›R_P(ว$๖ใ>ฟแ๑]u+vž  0A,J0ค… ˜๖}ู‰; 8QฎEsrtซo|ผœ_ญ†J๎9ุ}๙ค‹}^?บw_tแO"‰2พ‹_‡vใ๖[฿๚๗ทmŸxƒ†๕ป>] J†ไŸ฿—ใ˜๛ณ;ั๕€.ๆ๏#๙ S๑lี_;.๏ฯฏซkๆoจนn์่vW่เsึ๓็ม>Ÿwa]๕”#{ฆ^,"zypW”ตcwWLง {:œDเ้%ๆ*:ิ์บหัWg๗•ฝจจฮ{vž๐ป=‚๙}B“Dนƒ฿๛๛Yชบับ8wวฐc=ˆŸ๚แ@จw๗๛๙ฝ๐ใฟT๗ โpิ๖›๚ cpใ:>Z๕G^ฟ#ฎม0Ddœqx\๗>๚่฿6พ๚ัง๚o9>ธ;แีํา]~฿[คu=๑๘>cซ๔0รวศdฯOฯbใตว3วแ‡๖่O~Oษe๕5็ฅไLˆ=้ำ๚tเ.€ 2ฝพอv^ม ๘t฿š%|—g๐NฐฤFž( ^eย œ๐อwฦV#้ใโc฿W6ณธ3{์‰‡ž(่็๗ลๅ}๒6๐รหo๋1ผฮส †๔;=่V[œzž”เQz็ฏ๚ๅใC๛๐Gใž ผ^}๋‹๑ญiึลํ๔ปฝฟ๊น้]‡ฯม>ษ,โศ›_ืๅ7ูท=ฟท_ห๕้O”b#Žทw๎พk7\tŸฺุPNธCI^พ๙฿wต๕zz๗๎ัๅ@7$ไv๘เไ*ฺฝ„ั฿>โœ>!GจGl'pฐc1TNโ฿๘ศบLHEใP8›๑ึˆใp'b€OซGึก|uืืŸญ(Eๅูc“O]pโกโอรSj๐ผฝ ๛dผA็”CูŽฟว=ฟ๓ฤH/†?ผลzวฏƒฏรŸ@Oฦ"Gึื๋๐e฿ฐ;ฮฏพ1อ๓ฎ‡“P2dœใใ๎|ืทn[๒๖ำปม†๑;>]eC๒๎๙‰๗๊ๅ5›ว}n{ ธx~dopwญ่5โ;ฬ{^ะ“{€]ธŒไ๙Tก‚]ฮ%ฤ9>่œ@F‰wธ‚E`ฦTbั๚Œvขฦ‚!œŒ@๑ะๅXr(เน{฿ใึ‹พะ‹ณ๛ส=จI}Gืซ?:0YwpRฑ0H;ผท7สใ`ณ:บ/แh๗โฝ•?ๅใx ;็ท3โใ—๘ม‡O8Zฟ=๘ส(†}๔๑ฺB์๛ถAˆ'uฦ/9ธ'Ÿ๚๗z{\W฿ฉƒ๕มOีญกม:ู}q๗ uy=S<ƒท๗๎>ฬโyrว ฦ้iPo{์๑{๗C{๔เ›นคฎŒ›สรเฬˆอk๒ผเxฤ0 „ฃ;ฟhŒฦ`๐ ทaIy.v˜ุ์eq ˜แญจ๙œ †bม7Ÿ„๋ฎŽƒฏ}_ฯVrœs ะ๊}เ}๖bqxญ)ื]~[ฟO๒o8๒๒ธศอฟ็BN…>คvp฿ฃJ~~ฌx2บๅ๒ฏb‘tyใใใ{ธ:ฏืวร~’ณ(แcบฤS๗มท๏›_๛ภ ถN~ช.ฃ’qิ}๏พoฐE์๛ฤึข8ธC Hๆยฌฺฉt๓๒ƒ๚w9ฟ0|^๘ƒtเฬหๅ๓qŸJ\ุฦ๋ปOAเรํใ[RะฃB>ดฦ่ฮMึว&ใ(๕~ฃ™฿3คโย็ฃ —P›!?๏็๗ท[dณH†:๑“ศžะว’ี๑ˆ๗๎2Eใ`‡xBv$O Fp&(   ฮy4ห๘wd'@8ฮ.จ Hษ S•}ญo)ษแu#ศ้šyF1vย=gื}A˜7*i๔ฤd"ฉQฅขผพŒ ฎงรCใ €Cš@ ž๚ล›hฦvูทvq[ฌหปO_ฺอด‘ด๕7sณ%ก่vช‡_7pื6๒๒ฃŒฤ€ฺ<€๐oฆwžA๑ ›หลฯ๐ะใtฐc฿lฬw๊a:๕R/่@|0"Hุ้ฬHฤกd’ปหฏฅjXูมธ’5…I$ภ’$%๎ฅ—ŸvŒตู฿็qz}@จvt;๒@ว€ุ่ว็๎9แDี3๎k$Sฆ€#H”9เ’$K๓ฉh๑>{Uย4ฟธD#๑๓JฑtฒE‚ม}๛เ฿œ๗“ฃพQ๋ฦ๙)า๘ีf็‚G„•๑ก๋๕โƒ๏๚็‘P‰W{Aสq_‡ๅ๎[c;ผคฯฝ5N€ฺn~฿์คUๆ|"cส15fYฒh๛]ๆ4๑โ[SA<ก›u˜พญจ2Gดฒ>7Yว  Ž๑๋ะน๓Y›ี๙7โX‚B๖j#LฟŸ฿ซZdฯฐCแsช“ƒGใ3dQม“๑w"ˆŽ“ไเ.้"œ ภ•ๆ้Dช“.๗n~Ž!ิŸ ฺแผํูฮ"ƒ ๚yษไฏฎGส‡๒ฉ)D ปฐkwƒ*ˆ:'็ซvฆืฺด“˜!ฆ1PŠFเอ$ชฉ•๗!ถศŽPผ8DPโว™Aิ`r_พษ™ัœข‹[ฑ.๏N ๓วš้ฦ๕Gv!P๋/~ฮŸ‹ฦQnAzh‡ํco๒โ!|>Cจ์Qฬ—ก฿ุG5€ๆž|ก~ฆc็๛๘4{’ฺ๏{ฃทไ‰ฃ;Uฬ๑PDwM>! เcงœˆ3๒ฦ4o\ข`ภไ ~l๓/ †$่โ^~—jว?bณฦwy'G‚‡๏่v ๑1‚ ้;Žป~ฯ?K๓ zึ}!๐”) D€วŸ!f35…ฅJDxซืฮฝ8@:Fโว…P่cEƒ.œื=6งŸue๕๙)B~นz๔}e-โศwธ-๛>๗‡Mฎ0 KBไพN{7™๋oฺโ!!x8ฺ8jsศ็๗ใ{{ นงา‡'g)่ป=๋วศหอ„’>ฅ *ัษน๛บ“ีUโญzv ้ D”ฺPใฦ0ด:‘œผ ๐ะJ; zํ็)๐‚ม๖Uญ*พKˆลP๋ เH šย]–™‡สว` œLš”y‚ฆ ~x$€แw9 ็— ุx฿๛ห‹/=‚4(โD‰@sXD4น๋|๎:`aGชำ‚j๑KŠ0~ n*,HQแ’ ZC8Œr’น‚’ภ`่ MEs\‡$B๕(ไ๐ฦŸื'ฤŽŒp๏๒บ:†`‚Xั‹๏ข hบwฐ๖ถ<9โนŸ™U“Ÿ Tน๓๋เGp7)’$หp/BŽธl็‡ชpx}`E„xจArภZงBL฿uฆ€&w@ณื–?8†ม ฮภ  n–N$—}&diย๓๎FŒyœ…ขV:q#…ƒ'าค€P๑‚ˆ๒Q TtI0ศฮ9œ‡;<ชX@+Pน ็OวัNสฏไO๐๚Z L“ดƒŠ&—/™ ฺฎณ 79สพณฑ8Q•~โiw–ดbp'ม@;{VŽซ,* 0 šฟzœๅฦเ#บ มpฝ’๚ƒฯQฦn@ัEะแ็อ[้8 "ฟŠ์Šั‘/ˆK|+>o=>.ะ๘BD ช๋L‚8ร่ฤฉŠฬ7ฎ.ˆึ0ŠZค‘eœ2ฝŠฺl~{๕Ÿd(5\ิE า—็้นT8>ษฃ{0อ2๑q๖วŸ๏_„™"ใบ(ำฃศFฤ"wb N•ถ฿ธบ Œ๐;าFC^๗…–(˜ดษ:!AO’$"ภtŒ์ขศ่ฃlSศwŸK๔` Dฮ>9ƒVkU™*Xx_%สx๋`๊‹*.qสZฆ่'Šb ล 2ป#Dฤ ภ๊ฎฎ™+GŽ“‹ลพz$ขnไ]–ขW๒อืัaŸ8‰ร\œฤอM"สใ ฟ{>แ`‡ฒๅe(™c๏฿๗w ˆ`ิศ/,8oa‚!$_Œ‰ ˜ย์๏(<|ฐAq;„ž ฬฃ>‰โ# ฮ‚ˆ AFP๑ŸwP†s† W๑ทงดงar๖‘œด„uคˆD="prพ‡!(ทฮKvE!ค„‰€มŽไŽ@iQ–XdยEo…Š๓/.T๑๎ย›Q^T‹มmจืัM†ถeu๐yฟโ0M!่ซ‚เศJํFื_vxฯ<้ลƒ2”ธๆ๖m!—2คค]e$ ”Wx3C๒งบ๘ไ‚Ru.>LDrู[ร"ŒSฎ*sfYโƒ’ เbญๆาQ†ส Ž,ฯfa-’รc๑,"B"ŠhHt`๐1†เ‰Š€@Aฉ…‚PวyทR‘ƒลแฅ๐`‚jWฤiจIฃญ่@„ยภHƒP :eแ%—_3“FCFXtเ’ŠAๆ หq€fD‚rีA&สTสฃRHƒฟ๎… "Qต`]aๆ( —Hโ"H*NpวงSb,ๅ™็W"๓ข๏r’!ย.เ AผSsmTTช œ AGใผาddฝ๚ำXMจ?ุGB0BดU4๕ถแเ „‰S€€ะG€ T}๕บif๊กบ" *@(ย”E €˜ป;•,L๕yŸdฒฑล' œ@`p'อ=ปhHย˜ไ!ก %ข2ชิg".‚ฎขB*^qฅ…aE!‰ƒlWชB\&Šฝ7Zt •rE,D,9` —ๆัIRn+คซ L๑ฆA–฿C(s‘€ว[ะf‚ ”qะ๚T‚€$zhhข‘ ะัฎGJ.„ฬ@@โcฺK.แhTa2 Y$(ๆ ‡‚ี $ีญ์ฃไkcBา@ๅQ^pรMนo>‚‚่wjศHภ8แช*a๚H,ฮ2xสqึ@Pก 0XGะฑำq€๓-ฃฃ€ฮี—‰J.{A7" Dเ€/r๘เเ์ kX–ฺvงU—ก"€ pADษŠ”ฏ%r‹EณH!J˜ุชb Qลธ€ ฅˆEิJ9 @<,•†‚ำ"!pฉฝŠU„ก‚ฅฐ่€=s๗Ariณ Ÿ4Tš„ฤ๗มž`!&™!ภุล<‚ ŽสKศl—’;fภ•ec †ะ‘F‚Hฑn žยณ่+0G‚ †h‘ pn" ŠRƒbH‘ ”uทปJ.ฬXœก20ฤhXA…a๐lสศช€ุ ข$งณณณCoC!iจ๑8ดธƒำ๗ƒ3DึสKmAwร ‹#ภ| ๊ข#j0Dข‚ะฒˆ€า0‘"`tฐ‚‰—จส1~ํฑผ "pyษUฑะ์€…pะ+ธ…X#อNนฃ"Lิมฉ„\'๙-5ค้a,fG(€Jม@RFuฟฝK8๘คx‡Zขภา๏๏JืQีภR๊•@0 ež#ฤyฐA ก.ฯA,ฏตqิqyŽงผฮ%=2Dd$, `”ˆ•w"HิMงp,า ŽFฤ€qA„`Dˆแฑ้๏ ค€พใฉi๓ภ,ฏ;A>hY:ฝO์(•~^ฺ่ไฒฃTCA- แุyฟ1ื|A๑†๔ .พkฟVธog†J๓เhI,ณ ๙—๏xเCLณv฿OP ˆฃ$Iฉี€PXh%–โพ็< !=`aเฒสรx์+–†p\ขN"๎๚–$=LA,คl& Uฅ/+=E๎Ž!๐ตใธ๋a๓"ดภ่%-— „‡pๅšาm=0aศ๕๑9เ ดv} ล~vy,”'๊ฅcฅ„_”&ุกฆ ˆ[7pณv}M'‡ม“ู]ž"^v0ฦ)ล—่ฦwhช€จ]‹ าS@ตŠ{ผเ ฐผใั๓๐ผ๓p“ ฎ;๒0?`ท02lฅGะ-๐Cูาุ฿็†ถรใf I^GA††\ Av—”9Oƒ!ฦ์bQ„!ข•A,bWoiVpyโTฑ๎ภ0#S„&ๆผ เ——เLฏ^๗๘aPยƒพ๓/uo>;BธOู<‚+พ›บ…๔ข„˜อ^w๕ล<พoฟ—I` ป ‡้Gio`Ÿ5ฟqp๕ุtู!W+9๑ฝm?่,฿‡wํ’ท๏ป๗‡Y ฿?Q่/(๏฿sSK?R^z}ฅ,uF}~_€ฐป๏oŽ“ไฐjไ๚ฌฺ๗ํ(„พฟ๎ธมำํŸ~฿ˆ‰Owื}ๅŸํ‘…lle_r`(ึƒเƒ=|ปฯฯq๎> 9๛๕๎.ั;้๗วŽฦฎญธrว5”ท7๓๏a"OFŸ๐็…๔๛นŽ_โซ… ž๒{น บ๘/cฟŸฃ3ซ;/‡Zแ]ฺ8 ธ52@˜{ื๙E^ุป..?ป๕ฮ5A'u.ธ?ŽŸงAut๏ญร ;๔Ÿฺw฿{“ฃ,_ฏ@๘—ƒ๛๛< ฏq็๏d๒ฝมตkKณ`8่ซz!8*ห๛๏Cฎพq๛‡๔ƒฐ av๋ˆk โnฟ๏;ฦoSฑvืCpbYืืง๐ปƒ|L๗_ €ด;็็€มd๓๓ํ€Cฯฑg๗ฎ๐ฒ๏ว๏Eผ๏วgศ๗ฎฏ5Ÿ๎๋#ฦอ‡ZP_—&๛?ล?ธ’4๗๕฿ญLฺ€ผฝ๓๚๏ท7™ลqqง8บƒ๛ใฃ€ฌฑ_0Œณ๛ฎ`ะ6ื`หพe—เ๖พ็ๅ๏c‘}c?>่๘๊์ทB.ชเัพ‹๚ํง้นk๗๗=.๑คk฿ฝ๓?ผใ๚_ั=o๛}๓โ)ž}ฆพโ>Fฆพw%๑Žวv\๗ูํŸน฿]้eฯz|Tํใ}\ๆนฟช?ฝฦญˆn๏jัืื๎M}e5z‡!‰!้๗ฯo“อ8Gยน6นUYtิ฿~58~่๘๏9ญyฟŸ}_๑Di\ฬ{๏ฏฏ3ะ$ุ๓—=oไ}ว—บ๗|๋ˆโz?.)่{Q3b๒€.๘หเnย›~@ ฯ๎๕๛fช๋Ž๙๖ggธ๙}W๘†-ฐZ"Wƒ๗fx๚๛ร;ผๆณ๏๛๖ฑ#ธผ๏ว{๎ฟ๎„;ฝง์}—<=;๏ใh"ฮ-๛ซ๗๛zค^u๖m์ั๘คผศz๏Žข~€1๗ีqยOท_ฏ๓ธs Oึq}w~๏=ถัสโŸ=;ป$(๏~๘ษ{Šว{Fotฬsl๋8’‚>๐~~๑k๋ฌภs_ื็x›.๛๎ฯ๓ู๖HŠพ\๘พํ๒ล๖_g™เgFธ0พƒฟŒm๛ฯ๓่ข'w Uฤ‘ะd;ฮไ†yว฿‘Ÿตฬ}o ๛จ๒ฮ}>๚จqใOปŽ.๗๔พYPญไfฟฟ๛{๊\ƒฃ๖ฯ_ฏ[ภฟ&๕๛โ:Cํ†๐หืuศ๗†พ๔เ5ณdฑ์ช^ภ†\ต๎a_wน‡|ซญ?Ž6kทฃ0๑|ปkฃ๓นZW๒i๐tต๘๎พปรcOv@>&๎๏ภI vฉ/9ถลฤ-ๆ๑ธ?;ล฿จ*O๛;๗ฑ ฉV'ฯ ึs{ฟ๘๛ใ#WSYฦ~ฟw’4บoโง=ศ8฿Y๔wๅ~L"Š๏41ฃKพใอ”๕r”0›๗่๊/ ธoŽ+.ฐ%หุง)ฒ๗ื^พ๓}œต˜ŸPG฿ก฿ิ์ฐปึ้ํŽู๋?ใว;๎ฆฟv7Y๓๗7฿นCŒ"‡Ÿ2ŽพGo~!๑มิ ๏6ม49ฎพ๗่๛๒ฑDโ_ฑแ๎*N>๖ๅEะฯฯ๏๏Ÿฟvo๐้yศ8rศมขไ๔ฅŸ/ช๛ไS๚-$๐ฺพว=ub_|ษ๐T j49EฯฏŽe๏๓ˆs‚เ ๏๋เำoC๔฿ม๗๚sไ}฿วทษSGfษAฬ83Œn~๓ํg‘ัษm๏žwxY่‘๖œ;ยถั1[E็Cˆcฝ8‹ขqCB=เRฝ็—ืณ šศn€F<๏อ๋swXผ๕฿์CŽ๘ฑฏ6HฝซำWิ%๙ฬฟ๊ทไ@:๖๏๏ๅ› ˜๘‡๖>ฏe๎๖=แgฏ~z;R๗ู!>Qฤ๗๓ู‹ฮ๋aŸผตnwbพใ๎) hํ๛วN“๘ไ^l‹ผ๘*=U.INำใ฿†ะQฯฝƒๆ๖“ถฐจ~ฯ!oWภนŽป}๏k\q—{kU— ำ}พŸท๋ล1๘Mqฮž๎ญyGั๖ร"ยร ๓ฏ8ษรu0๙h"gr(9ž€บ๙ๅษ฿tŸvีฟ็ƒณษน฿m—คส~owhŽŸ๛‘a" ฝโyวW—O|Atkล่ฯ5~pIgํ๗Aู‚๖{๗Aฐ๙sวั?๘ฌ>๎ไ ‡ุ˜ทฟไ}n๓ตnใ๓vŸ,฿;ะุ„‡ง7้Ž๏ฏๆเ๖ลฯฆ€ขIG๗6žW๗F]฿ุ_Rจsž.W\|วถๆต GŒ๓๛}]๛>๛ร‡ฌJฮB owผ้ห}ท๘_๚ถดเเ๒Ÿ‚Ÿm<_,bวm ฦพ8ถ„y๚‘็฿๑Gดป๒7cธษ‚ƒuสผ๙ฟ็_OŽ{m๏ำฟ'฿`ผC์ฯP๛ฯ5่=๘Ÿฟ๏(ิ~๋;w ยN$Ž๏„1xq?o;ถPธd฿doค๗ุ์ผwโญuOxใy‹ดะg1ะœ๏ถ3๑่จท๕ocjใ{฿ษ|„ฉƒ|ล๗ธดYลษ๏?ฦd]\e2ผแ j\ "ืm๗๛ุŸ}lพฝุK๏๋เ“ฯ1ๆ/บ๕8ผv฿฿๗u~๎า?dF–ื๚ฯ ณำ๔?็oIGูวv“ไเร_œ็ท|ฝ#ผ#๚ป๊ฌขGี฿๕W็โ“Ÿไณว Lฮ๔๚ูณ๑็_ดอ?ู‹Žnใู0^Jฬ&ใ๏ปฝ๏cมึ฿฿n|#ฑรฏ5ํบ“ื๔๗œฏ8๏–ภƒษอฎy–›ฬcโุ๒ณ[;$e7ฺฦJธEจฏ๏g aœ-8ป้๛พแ๎q๛>พƒŸ'าฺ}ะแั)pž่๙เ(~ˆžB%7ฆฆวฟ=๎๛qฯฝ“J|๏!เะ‹๊ท\บ}r=ฝo฿วววปg8i๒มแ|}_]†๕ฝTุ˜ช๕ฮ9ฝฌhw€UTฤsH่@*EŒษ7ฅWžK#<คค'ภ็ญ.-C‚ธo,%Nh2ง#„!ภXยFภQ=Tชˆu"#นf(ฐPมขล๋G‹งO“์ซ€y" rมย2<๚ˆsมฅŠUฤ{U`gฮ5h ›‹%๙›-ซ๎๐‹็ื…"hYผุl—ธญqŒภถเ๛พฎ!QD}ึลq.,C%`เBl๛๊ผฯ<สฒŸn™ิ "d I$ช\‹ฐ^คง๎O…Œ1ญv^6X฿ฑ ‚๋–ริ T`๘IรปkR~๙ไ๐™ฦลAIwะ”<!e:,3คEฝว @X…ป…&๛ฝiฐPา_=ูษ๑๛ำน„uั‰:Vdง$t๛ไใ(Iว“>ฐซ๋Zษzˆ4ี'ญ๛„Axq๘ฟ๛cGuw=พ>KKBzมแุฤ”vyFผR™ว๎๋พ๏ย’ศ๚q^้9]Cค็5d^|ไ๎ป.;#ทถ๚๘ภN’Aค8 ฮฺAjlMสŒล๊๋แZ\ˆ!0่†dท๏฿ผ? '!= ใca๗H Pำk@ย—qฦP้*0ˆ๎”ž7ƒ' Fฃ‹/~"lg€:0)P„๐{๎Ž ห/7H2Œเ ไๆ,พัุyƒน k–Jส ิ&œqโCแŠ ‚2’์๔~Otึรอ’‰‡r๖๕=œ‚ฌ‹"i~ใ๏ภเ กQ็ง_gฌ]C>2fีuvถ=ส›ฟ”!Y๛ฟgœuะใบุ%!,ธ€๗ท TๅYฑ˜‡}~฿7ห"9เ๎ภ{b€๙ทk€ž|ๆ๎ซฒ์ำ‰Ÿั'.Nม Rฦ/Žฺa@ูŸดŽม;^\ป๖:$คเˆใo†!CW)0่&๔บ}ใิยƒh๔$ศVฅai˜฿ฅ@™ะถg8B’ธ๖ค€ {L้*€๎TโฯsชN”ดv้.#€rfหบ;›้(šิH<ศ๐&็วฒyศฤวศH;๒0ฆ( †G$@ ฝMธQˆ>็tkBซžw]สWtžช'ไะจ)ซฒIK-Aะยฑฦศ€ Qd0o||S๑๒.Š์.b(šศ \'P!่;พŠ€ หn_['โ˜\E_qโาŽ jD`t!wแmฃ(Š>=้$์Œำ“.๎Fnโœn{YŒD๔(B‚ะพ.:™ƒน„ฮ ^1„‡ˆxoŸ6ม |$ˆ'U/ดD\ˆ 4้^ƒ"Ucยล1hปlฝภ฿Eจฦ่hศAย0“/@‘ ‚ซ4ษ„q!ฒ–-DtPtc‰‹aGl#๏ @๎ป5ฒ<พฤฆSไืAภu๏nื๎fAืŒw*! exa_…ฬ0ฎN๕ฒ/8‡Mบช๋ัƒt๊’?\7ยINโลัq'3šsR)ง^+08zฆ๙I’Žs<p0๐ไะลJpCŒ`„ภบชlG" ม‡–เม€‘€BQˆA ;!พCMOafกะr<9m๕cˆก(cp'qS|ฟ/ภเGJศ]?€ ๒นS8:ศ๎ฺwc–TW',ภ๏Œไ<).Gžงƒˆˆ’รเ3„\_ฆจtวฅจCu่ศb—zจƒฃ#rIฉ*nReHร_ำไณูึŠโไำ&z&X†Šะ๕>2TPž@$ฎOL‡ึˆ Mโะ"vj"a$ร์๚*D0 *'ฤPˆTˆ‡i8ฮ AŠZ˜a2 ฐP@0†ฬ‡‚ฤwะั,ช๑ˆ„๕ฟpฝํมัGQHyท/m˜“‹ฎฏ8ภๆ$4C! "น3.;oSฌ(๘ษ ™ˆุ7บค้ก่67ปTG‚€…๕y—o'สw‚ k๚ฤp๑น‘ถ‰A0›rDXฆม่ ืA:uu•จ*-แ8F-@หK†จ`ฎ เ ม€๘Pˆ|1$LTู‘เขL2a}>ฤnQƒษpฦล้ะ;ฎ@“ปผิ คั๑ ตขฉ:๘็AA็ปvAqEpฦเN€3. ์;*า}ีT I๋~Pu=นpŠCึvง้.ซ„ pR‹๊ใฮrl์ศ`p์RŽ‚D?œแ)Oฦก42QLพx%hจ8``=N—p"ขq‘#๋ S€‚Eˆๆw€Dเ‰ P‘ค๗žธ/A9จ€ฒฤE Lำำุฎแกํฅyต(พฉNtC1หบ{=‘ArBฤ>ถ6Ž;‘eAม้ †ะืชฉAqaำ/80-4F‰ชxปฯ yIAีชM’#eฑ๐ฮแโr1<,ฝ็ศ”ŽLuะ:C„%tมqิกบ''ลHฐ,หล# @่†?DGC“ ข}๕ฎฅฐ˜{ฯ;‹‚fเลEั”่๒าnE00#'RA”qj ˜+‰€Dฑ5>JEq˜Ph*˜คธ$ƒ=๋"ศ`๛6ฯ™ w‰(ถฤ%Kฮ‡ฌˆคฉm"มf๑ @v eษešR-8ฉd?รWเฒŽ "‚CAˆิัŠ&#.€!qbJ`SPร์^”€ฝ ฒ  ฏ;D'œ ƒ…5๓˜Ap!มพแะผเˆxมQDD_๎๚;TE๗6๔็] Z๘€YIฤ‡”w‹†๐„ขH ?L9v @ใTbŠหH0‚ใCU4jื0์0<@-Hoปm"ษ…Q H‚< ล“ŸญฬOื฿๘ฒ๋ #Kƒ(ฆัรj!2ฤP๒|๔8ฟ ‚€‹…ภ Dฟช˜ibp๑5I)NWยMa2ผ8U!ป๕‘ ึŽ"ˆ ปnjdำล5r18$๓๔[~Q —UT็qTืKร๙|ซowฅhฐ >ๆˆ2> (๏ภ3'ะAยฉ(iX๒uๆC ฮ‘่ฤ|Xแq‘ฆ๎N R๕ žช\dร ษ&ืผOฃ@@$ฑฐ&ุ.!„=ิ๏๋ภผˆ@B0e? Kภ?=h+ จ#ภDM๘ะkKˆฤแ:k,(xวi@๒ฃ้|ทฏาแฎ Œ1Pยศ>8คภฝ็8<อ<8',[‡ลฮ‰!Š\w€๓žลbดe๚`rใEbp๖—GวR่09๔น๗k0X‘qัแ@;Z?rผ(ธjๆJeฆ‘6ฎ*€8Z”ญคทŽ,้‹บƒ:0?ฬˆydz:5Q๐"นyทมสช4๖œ„;Dx๘KB ชชโ30 ื ะQ2‘A=สWha๎cฺณ ีU๐wวอ‰ศq;วะช#ENurAepš$Aถฆหไ0ฑvš3'Œ00 ,fUo<ๅO@T`y๑ษbtแยH€@ฮŠSXแ฿:ไฉๆญ.›|าะ @(Dาp%QY’tบ“ภ์ฮE4ผระš7 ‘ไp8€ะ๔\บ<๊>*,;x“ฬ/Z๘N5์PŠ่รฤ!7ืำ๐aq˜ˆย๑VJ,p^8ฏชwh˜ง?ฬ01Lเ@๛‹K ๘ Q€˜ๆf?`aํ"P(HS๙๘ทaฑP r‰คฉฐ5 3:๊ชCŒ8ยTๆXQ``ะลuMy@|๐ู_ษ…‡ำ-ฆ@๓04ไ่HผfB๎ซฐ S:Šใ€‡ิh๔ม"สเ+;0ยแฮงh39ณฏ,/P็k฿yะ.฿9 :๎h6ลฮ๊่ร`$ุ{˜ยม rวภแฤv๓ฦ>‡p@)ณ฿w†ฯžƒ๘ !ˆกf%฿ะˆqd'„ภmŠ๖S?^ˆ"x€P$(>ส<ะยข5S H’`c"7๑Žชโ@ซŽjLxฅ  ฉaW@ืแsd%ัwž|า€บ$ˆคy, ‹KIŠั#ž~*ะUp5ขมค(ฝ๔Œ฿HไF‚`๖Hถ์iŠ œ;ฅ‚o๗`pž#iใ‚ฝ*ฆคะaZณฯณกj qv!z˜(Cฬ42ฎฎTณ˜cŒ:Š4•ฤคF ะถ1ร1ูว ๓้า”;ม”ฎ“ดKA`d`I ดฬฬ่œm`pึ™DPV–G™qv†ะฤำกยV%e,)ีD@คyˆ)ง†˜–H9่[Uฺฬ0 p!‚ŠีLโ!๎ธNศฒKˆY‚คิ0e@ๆฆ›UD$ษฤ๙pJ&ถ%F–ฤ,#Ž ๎ยึป2ฬ่ฒอยŽญ+ข(Hยถฐธ"Hยเ์M1H3WDaŒ‘ฦ•เ{'ทำณสPภVร V1Šยš‡C-v‡0ข+g๛n๖ ๏ฬ2Œ8ตหf3ซ์ท>ั)L๚ ขจมุw–รฦ.D๔ไฮ~?จq๙›<ํสำ˜ฯ‰Z%tV๏ฬœ้ธ 4ป{ถ•&u0ฝ"NžxSๆแฝf‘ ƒZ}pดมQๆ9ว\X@ๅ*ŒเBผ–ไฉ8ฎ๓FvW"Y`7_g]d†Aa Š}<˜q˜t็s,z;๏๚๕NpŒ๕ูุชiพŽ ณ;E@ํsฦใ(Pปง9‹์œฦปนE&เฆฤุ๓Uฆฬ6eS RำFัยl{(๊lZˆ ษไ<™ 0พwฝ์ ฤฃ.ํ๎žง1๎ ณ๛n‹สP 1 ๔์Ž6‰2๛M๗ตK๔,™}฿sy~pพMธร:ำ˜oŸpาWู73~6wu›Žุ ๚๔ษ‰I์ฮ๒ฒ•’œทๆ๊ 0ศ*’ถLM ค 3แ๒ZZฦ9>gืJŠูV’uภรบ3อ0A`ต์ฒ๒ง๋ษ™<งป๒bŸพxƒ๏‹/ขkIu]?g:ูswrฃbŸฃ*Qึื๙y…tวลโธneถวvHปญdS–๕บธะbเŠฐmBsWฦ!ะ’$ต9DรHอณœ[ป;auFjใah็Dฝทo'cZ w†~๒ 4=ั™1m์Vepฟ็i™˜/Nห๐S9DTŒไœป;0žแ›ป๑vุfมv๘œ…•‰7-y๏ ฬ2๎ซํ)(˜ผแH3W้ยCL•คZ๏์€hŽ๚อo‡ H`!t5ญu9 รฺหๆ‚วa๎™˜มคไ๑ํ๔qfฟEนฒ๛7sN`๖อjlั2yfXj"ˆ—๓ฦ‡-วฅˆ„ู˜m*[กw๐๕๕ฌฐ˜yัƒ`€CชZทoa‚ม $%yฐyWถ 0^vๅใ่กชesะšํฝ.œEหVาF๖'ชฒ$N_ซ๎ฐKKด–{–ณํ{s๒ฐศส›๙พเ(๋๙ูฑใŽฝญณE{˜a˜`uๅภษfcy+ซแl’aฑ4\8Hฺเไธ‹‰43"T๋kaO}จwๆญ๎ฎ† „;ณuฺiฆAข'ณ์๒ภ๑`~๏๏ธ๓Yฺา‹~ž๖ดฮ์฿2ฒฬbkำ|™gk;ะŠŽ3ีv–Cฑภz฿*;dm2‚ไvf๗ำ๋S6p๖ทVAสR"(E"Y+๊ผmaรง!hร&พป๛vฟ๗œงŽีฎซเู3อ}mŒu๎…˜บrEGภไœฌรn-ีาส์~ dฯฟ๎ Ovฦf๒๎Jจ9ฏขฃ3฿,ฐ๙rถูf็:9 Nผlว}๓ท|ํs˜”qu™ูƒภ•R”„Y–‰miYe^3ฎ๎ุ)f‰ž„้ี *ฦ๎๙๗ว™mฆzใ๛Uจบุึฬ์ล73rlผ๓_ท*d<งกปๅ็์™šw|vˆAO็ฏ[Kll8๓`sเPผSU-ีnซ฿aศœ3d aจท;+ฺ“;>ฯ๓๘~~ตหบฤน8pถณo„!OUhก๛s๐์ฌ์<฿฿“\๖ญ๓๚๖ƒyฃฦ,ฏ~l์pพวศqๅAร๙‡๛ปง9 ์a๕pf†7ˆฆ ีlOํ์Tอ๖ํM.ป๎ุ็‹ }ไ้๓mทปwa้งไ.m฿๔๗๑ฟณ{Kd'fฮพร๋หs[ๆศหXvู๖#฿#ฑ:tึๆŒ]=ฮ@๋ฝ๎๎9๋ตqe/L๓ss{อ7แBi๑.ร๘มฐ˜q๊-wขมi€FถŸŽร7ๆpGุฮษr๗ฝำผx "ย–‘ฐ{kw—ฯCฆ:ฮ.ผ"งjwŸ“๛_๘ฺ…เ๒=โSปD ณp๊vศด‰ฏย€ฬฎ}๛๗๛ื‡๏Ÿฮฬ๗ๆGห|oฟํไน๊,=ท9ฬ4ึ่_ฟ๚Gปo๛cฦuฎะฬฎ5—,+|ดฌฏ“วร่?๖บ3Y[kf:ง๑฿๕่5]Yg—๛๚ไ๖๔z๗๛พRpg๎a๏~๋ ๓™5ข๘ึื;มญฬ๊ ย šื๖F˜‘C†็›l๎๕พw ฆqทsV|(Nต; ๛p‡‘ณ๋ภ่T๑lyวYB< .8ฮ€ฒร›ญU๛๛yตฌ6ฟP๓pทo่Yดตmm‹๓aจ%/ถN|ณ๎!yU[ธ\ฟAkz‘NฟšEDSw˜ตฺDญ›฿จฎป+’ิพ๓ NแQฅซ๕˜ t}AB`uศq„ ]|Lm-2ฃ๓ฮa+K์!o๏์‡ณ๐^mn“{rาุSJsBษc฿ทน™ žXุณ=—H&ๅพG10Ccงึvƒušแ8s1›\Oร€๛๕ฆ3V›LWลp5™ฅ6รข-”3ฝู_sšq๑ๅ€Lผ›.ฒ˜ฤ|SณL]ˆ1NณYเํaุ3๏ˆ ญบR‰i_{g9{Š `Š*ศA‰euๅฮ.-q˜f˜'๖,eืุEU vWณeqdŽุฒk๑@r๖ฯC[่aํธ๋<@Œึงฒ ยi†๙|€้โฎ๕€ง™=๊Pๅฤ/„=Yงooูฒ\พฆ}“FiHใฌhErย1;Ue[ํk{ฉ28lEVESุzหู‘‘•…`m„ษ5๏xšj#Kbp4’‚,ทX™3์์“ท๓ ฒ(Šœžฝ;๚it๘ซpqโ[‘<%ฤi‰Eส˜๐ใsด1ค–]utซ๛๔๋oฑ‘XDฺ8eรฺุY^๓ิAvv‘–ช †ะฤ-Kณฆ็ไขต"ึึUูด3โะฦ(ุ$6.ีcˆbI2Lgๅ๒Xุุญึ๑ฐN็8Cหlgsa๚อพ้ไl๎|>FTOณณ(œ‹#~ฌ ƒ๎฿ใš5rู์ศฐะ[‹qEˆmแอ4Š^Iฃลู๋?cx”9 อฎ Qส4ฌแSˆอล -ฮู่'7วว๐ Cดแa#ใ)2็๑EJใvfcZฃฮ n‚า็‘M*฿ยฝี{˜ฏv60 อะว;,ด„๑ฏ฿ˆ"1๛bUถŒมT กh2ฆ็ฯุ฿ kฤ๎U ะAUV3๋๊C(ๅก&๐ฯžงsKย…โs฿9ฬงK•5ญ์ืฟ็ƒณ๐–XถY฿ifU-›eŒAœUมฉsbV F]ƒุ้บ+,„ส ใ7w‘fl&kทhRฮ๘–าa1'ํc]็๊A—…ตPภ•D,ใeษ2็ภผ›ๅธHร๔ฯปษ"Oำ…™ t๑ฤ, 0-฿lธ ดs‡aว7bงR๖Q‚Jป์j‡ณgaฑฌภˆึ†7-ตqpf0™ึ6ํห’CA% ๕๒ะc฿$ั%}g Z˜ณ‚wย XC‚“รน3iพถ^0#๎™ฮัโABฺ้ฮj฿ฒ7jBํคซd–ๅฮ2 ^&\ ›๋Tmหnฏ—ƒŒสึ}b&•CPฑปฒžฮDฦbŠฬ๛ทูƒ0M;ํ™aฉอ4  dชอ˜}๕ไœaง'w็ฯแl”"‡พฝ;พี`G้‹0-พ ’Sธ๙ํ#ƒ‚:ษa€!ปฮิใญr๐๔‡ผoaA’ ะœsา†'ฐต./๓8Rณ+ฉถ …๑-˜fMH|#”ฝŠ5\๚Ž( ศฒฎๅใt"ส“%ฃษแฌฮแ( ้2[ซ€œwtQ$๋ฏ๓๛Q฿ YUL˜l˜.„ŠุPจ ‹T€"vDค!#XcP‹ฐZม%ษ‚Fˆ˜ Sd@E’™QLืฺ!ม@Kwว c†ฑI` (Ž"‘‹กT&Šbˆ‹Šณ $5[ ‚G`ˆIi4cˆ‰ฐˆŽ หย"Qตห ิฒแ EH+Hฮ I4มLRh”จฒ aุ$"ลแ #”0 0€4F!BJd  kMบ a cุj0ขยฌ;์V ย)’.รภ :๖BT6jSkXมp] ่ช%2E€‹˜ ส`„9 ่$ ณ+ˆ1๊iwฺA ฆ„f้a(3ฤฺa ` Eวศ0 (W„อRf„!!ฮ „(c๑ ืBะํdL V๎mฑ˜ €m-2MBPขJfิtหอ็Œ€$“รl๋ฅ‰&ี‘ƒะpEฒa4c์-MˆPd68ฐ’eแ2ขHi*Ž5ปVตฑ๑-โP^8€ขป่ชด%ƒำ–ฌ`P@ึIัย(ซ–  XAHสˆษฑAA,4 gi„ัœaกฌB˜‚p1บฬ0รย,ธX ศQะ–Yกยภ4tWMžF) ภฤLtR‡โA„จ2 ˆhIธ|าŽXqƒห2,…[๖ ึ„ Qจiะ%หMะฅOฬ0๔Aฌฐ,ฦ”†kG"ค•Dค‚PBNfย@๐2Sฦd3สขถVNถeˆธ jiน ฒˆ@[Zำ &;โ&MA2DJฌฐfAšฮHมภ†y((Rดย0OF%ฺC†D"ณ่6b๒"f™2มPFuฆr1 คVE1‚rื†U กฌF@IW*อไ +69,.0Kรˆeห"QPฐhk‘Œ Œn%โ4ผšว 4JDภRhฦ2มฦร b€0บC‚(‚QŠ159`ไ„  ;&โ(„ปm+ฦIA\ฑq}:ดTม่ดภ€MฤB–R–ˆภ,Pฌ€D† Šเฌ€๊@ชŠC+;"€!๋์:ฐฒ(S`S(ส8b-S“@ฉะภb„›„c๚0AY#จ$สิแ:ƒ2S`†กหฐภฝฌไ–=0 Eถ•4]ฒ|*ˆf&ฉญ"E†ข4ฌq‚‰"vR„‚@Eฌš…0†D%€ฃ nDํFpJคE”๐ฅ(8 ฒ%Sต I‹ฐภ! Pd ซ\( ุั$"H˜DDQA2s$IPs†ขต† แƒqญh L)sl[r‡Š1!ฦ[aM$น:ยณ\Qถˆ0 "P@@Z'`มกุ“ณฆม๖k$Iใ"9คN#&;”โ0ต€hh0กFฌn& ฅe”;ปใ€ั+ส€T๔ไl™Aag0าฐดฤAห}$jๆ ^IbKบ6zา6ˆ้‰@FA –™e–M@vฑ”(V˜v" ถ!uœษj…F•่ไ"จษิ‹ C|c6จ 5MŸ•j@kj!0šณ-DS$เดŒd+ˆ0Q'ภ%T„ž;HRณณqe*! ) E Iฦ43Q-Mฅ ๋Pุ L‚ํ(ŽแFป ˜˜`มห $\1fŠฒ4๑Aา8ำ‚@WVฮเDๆแ`bCœฒ"ฦBeUอ ึ$รUเเlW”สPh` mZ๚–›%-‹ SS4DPำœSQภ;ศ;๗8 Y.[ฦ ฑฒ a;š KA `โช€ข6๖ ึ$‹bLm!5ศ˜ุ ƒuฒSD๚ ‚I’$ษ‘ซืŸปa‚# 2€ยPฑฐ6 ‚๏Tข„%Sจ,ฮ@šูก็–(ย‚ฦตŸ‚*` ก((%‡_ฃ’ฅ…ฑŒ:ป  6=4ๅ.. ะ-กฦƒฐฅญ^]!Œเผj‹ 8ซึL๋๐k T ๕L `๏‚ba๕)ฦ๚dœม1?ง3:€‚“_&’&›'y นญ 4$5ฝัWg่ผ"L=Aผ–ู ’)dGศ‚ฌe”‡Qœj&rเง๋ ค!—๑Mธ 2Fๆ1`ผF' ุ| p…(Hฐ~Tย€€2Hชึอั'aWC‘ฤ—@ฆ หtค!Y‰๎3T0ไฬฐ;x‚t6=ฅ' ๊ฮ๎>‚HB;•์ˆื‚JR:R๚Œฬ>ฤpภบด&7…. ยkfZ]Œ’ๆ)1BT[zvS๔‹Šภ&† |ํ€†ัข(ป™๘€ ่\ขฬป˜j8ทŒbŽcฅEฟใี0 ษ(Uฒ;~ž(กา@RCฑไ„$ี7ญH๐ ~q,ำผ‚พก !แืސ!x’ีy๕ศ8—V”ึ(e—$ขA*ž"O/ป‚ ๕‚Rง๋ƒl`ภสŒ˜๒yyC ‘ "1D ค„ญ Xb๐JD1œร๐คไ8#Iว@vฟKœ ’„nๆารศI”p87ฅั“h7 5จczภ:ฏ๋1๕  8Yc—ึ๔‹๛ƒ!Uฅt`๎&Xภปฎ หœฐ–ƒJ๎แc๊”„จ@พ €ํภ,ษ ๊”่'ค็ฌ  BๆMC`'jQ่‡€ญ๎j`€†็u(H†ม„ลT็aœžh‚rpว0ˆ,ฅAQค(}ฌ$MYไญ`Pาd~ีชg่Wั`”’๓@ภฑw„' ขrซƒ0pƒ‘4ŽŒ›5ผภ Gำng-tโ@*,‚ใมม„‹บด‚pŸชE๚ใ#$ —Ÿ"šึัใ{l~เ]@[๗a d_ !™P&จ7; ‰๊ŽyT‰01` %`œ„42ช+yภ้PP@๏ ะgWปง l€ขี๚,$ศr‚rXนนฑ˜@๋ภ0ณษฯ“0oแภWG“c_๕ฅŒภฅ ธ4NƒDแ์ค0pH$"ๆYฐTO(X†๙ฒปPไˆ #Oฑร; Mป่6จHฯ!%๐qe็Mิ‘ ข•Gว๛N,ˆ็๒$จ#€C#F 0ฎh&฿ไstT% 8ศ&)NŽ๚€ƒ›€7โซะ๐d่0ษ;R@ระฅ`}-nส2ธุ่9๘ƒGqฉภ๊ข)@๓ˆาM}จœvraVสเะ‹œล฿%มU๐ณJ.๎Vฆ*h#pขœ&ˆณO B%๛{ จšIลํGi ภun;ขdวL  ข–—ฦ่ำ๛Cซ:E€ด-,เU~”d™jxะฒ๏ม๐แ@ฆ‚ชƒ8!‰@2({ žœ[u Uะš˜0ศn…`_rุ<บkโอq ล5 ‚ฒgu;Ÿžh Šxะแ!ีR!พๅ“’;Hฯi, B  ฒ‚ิeŽำี…_ŠรHˆ k/ยเ€!–$ลค๑ฉ’Dฤุ์’ใ@ฌuP  ("‚ฤฐq~pหมิqฅเ๋๎žY็ฦA$ฌtEฆˆด›ฐ’P( qพ ๆฺ็wnAi~kHXZpฌำOญw€'GDย’ฤย๑ภ"์BT่ผษš็=ฯ\๖ๅ?รร‚ฆX*l7bปฃC,v8’ณ์, ,ก8Iไ&ผ\WI‚ช‚งBร!-@ิฏุ|@วA.(RงBRH$Œ๗้)‡ัฺู8…€๗™"€’(H๘ ~ล,ส‹&อ๓Vณีf Ky]ญ{bจฏซ‰เA•&i ึ)) Lˆ?‰้ ?˜Wj’š–ห :ข๑9hV\'—‚ล—m"…Qถฉdะbทฆรw|ล?๛/†‡)ณhk’ิ๋P,Ž[ัเมญ,ƒเIฝ <8xDื fG… Q้ื tนs๛๑'@aฎ€pด 5ช–q‚ZขกWeƒšGภเ ‚”ฝ›š@†hšแ{žUZ@ƒpz"7œLวbฟG๘GGฤS๕/j’@ป †ใƒ˜@๏๘V$ณึ๑ฅ „5?ค-โบ8Cจ…ฤ!ยย\4vพsฤ8๘ˆเก์๕!RD’฿ย์ไมั& "DHๅฮ%ŠH าำtFyY „X๋vqฯo4วHse?ธซฐ{ฒห๙"ŽktDš-๎Ž์˜oมม‡€.?/ฉ"ีoˆ–%a@Rะอ#ำš๗Q( ต$žภA1ถดAG่Q&@qะ๋y1KๆWF ฤด$ฐ-๕๊/ควไI#ฅ-ธY,โ  s๐fpUQ>t€‹8 pนาปฤAKิa0นฒl$ฟfมiQpž›6/„B‘T€ o"ศโNSKฐb ฤฐณ8i4Aš@สl๔ฏึ=œrษศขาะๅนด“„@ R&ฦJ3ศ;b’˜†…E5NaGqใAPฌธ”อdเy`–ง“2h่ฉ}€ษa๔gฏ†แI(rฒ๕่ฦEL<28ZGฃv’ ฤชHดQไ๊ ฒPQฦ]˜2•~\ฬๅฮqญรไำ pฏ€T‹`ค‚hฬขŽิเฮ‚ ๖>ีL@ำ o$หKRcญ๓จูjฮ‹๐ทล๎๘ภ4๔_ฺrญ๎๛O‚ขฌดใฑOฟท–็‡รa|ธpน(‰;(๕e'#ยะ(ขผ‰ศึ๏ญ> ุOๅq๖ฅ๑Hโน(c*ฤw‚s๏vซ๗‡งง๏ฦฆทโธ@๗ฟ˜"IEฬ[“(ส๓๒ภk๔~ฝc2pOศณเคฌ?ซ…ๅ๋๗๚๛Žsำํ\อะ้ห๛๘‡ฝ99๘Gก’~่้‘~ฟ็›_rxเโห?๏(ฟล&* ท๗=ฆ\ร™๘L๘ฃR็ทŽ๏>"฿๑ฃ฿พzmƒWwๅ=1๖ัJx ขแ‡ฟc\ ่#ฮ๖ปEท}T^’์”๗๗ึคฯ?’=ฤ๊ืํะ=v.gGwyิ๕mผุ1h๔4‘•ฐ๗OSvฮ?ืƒจจ…ญ๗้=!๎ว:มxถ๎$ Sp฿~]u,ตaฐˆoร„(ธ)…›`็แ?ะ=—e๎”ฎ๏ฆ>&ๆป๘›‰ฬ วง^ "ใˆ &Kฮ์๘๒ƒ}฿๛ๅ^ฝฺ…œส–@t’eม>vเ๙๎ษ฿}ป๙ลฌ๙ร๐ปพธัฒดคT3๔ฐร}gผ฿ฯๅฟำs)ฤk_‚มทุๅuBํเทฟํ๔7๘ฮV%ฌŸw๔!!›7ศพ๗p๎๖1ถ๏๚ปB ่…4Aโฤฏ๚มAiฒwฃบ(ห–ข๗๏7฿๙ม?฿1Kูยwิโ๙czGE ํŒ2UT๛๏VŸ…์!รแe๔ฃg’๊ำ#œึัQn{๗nส?๏เtgx”Ÿ์bจdก-–p„ํ ่๎u๏๗฿ตK@ฌN€ >€ล>๘หwowމนnภ˜~~็๗ฆ๘ฮ้A€)~นป๛๑ง๗~พ๓X฿็นp_๏๏ŠŽ;tY๕งƒ๓๋ท}๎{nry ภ.zŽu/H็งyฟn๏๖ใžGๆtG฿ื|“๛˜wฦ“@wy๕ฑb]เƒNk^฿โมAY’ฑฟ๙ฝ็๐`(฿ฎ|มyp4ๆ>งมด›…AY9vขm‚๖‡ํ)งau๔{f‡๘\ูqGฉ{<ฯ๑็>}ฑรแพEziNณˆOนฉiaf็V๋6๗๗*HุV‹๐( ฏ0<<๘ป๗ ธCๆฤ} %ด๏ใOภ.B @Jฟfง๗y๛๎ฟaแฑž7ธ8๘#vLจ“ธ{Gฟษล1ฝ;๛q{ข}วฯึ๗พX—??0žv๗๏ืbิฎฉ –—4vGไZ๎[๗ณฃ*,KโฑOฟ'ยƒใDcๆ;ฯป'ภฮ‡ฺWซg/ฬ„แวํ)ศัM)Cฐ๓เ๎=y9๓>cำบพoj๒/ฮ!€ฑศk8:จy฿ว‚&;๏C๙-วใVา๗โฏDY๘ฟฟ๚%G~<ูy๏'Ihฯ๔฿๛พƒำ‚.S๋A๏ wฐ‹Gึn฿ฌ‰#ร฿๙๏๚๓k๊-ฏc7วมuฟo๏ทุ‰I~’ E”RฟxPa์ๅฏ๏v“bA_๑โฌ๏ฮ`]žล-Oy{ใศp2n๐?,/”ญนo1ั0cgฯูŽปฅ˜”Iษ็jว^h_ถ๒O\†^ไAด้u^0Oแ›=๏ค๑มEzVคฬทา0"`X๖Œ”1แู7ˆŠ+6š„ู’ณz†ะทœๅw_๛M๐๓>ฒ~ฝฟ๛โ๑xฯnzšญw๗นwy*|้\z๎๏%•ตLํ(*‚}ืท๑๛แืพษ๙๛ฬห๋{๗›DฦKู‰\\ุ>)ึ„์ล#ธwพฒ ๓]า™ฏทฟ๎์้ศ.ฯG“ลC฿Wฅyƒรง€?h๛๛๏ฏฺณ†บ>฿nK “๏ooุอณฝOึืโข NU ๎;ูI$G,;ำT",8๗ŒyP\€มำ"l&t? 2„อ\ซเ™pjš] og พŒท฿๙๗วI/‰๎‹‡วุFB3ท๛v฿๑>ฅฺ่ษ วา๏c<VRฝ ล(โBุ๏พฯ”ทฦ:๒ฏ1ฉ๛ไ๘๏๏ฑหบukxล๒eS’ ,tw ‚เ;ฮ๗aXrPพ๏ยr”%๏w๏๏๚“อ‘U'๕y๔ '\ลไ๔fภแw<€žฆ฿dฝ 5?๑็Šฺงม%฿~‚0{t์Kv‰๊9มƒขฎ’‚|^ฑ_<=ป’฿k$ŸdงำVPaMฟซู <ว๕šน“ะป“`"ค…ศู๕{ย[๎ี๗*ƒ๓฿_ ๅ90ฺ๓m„ึํ>o3 ๒@yUGˆa็W๓œำ’ยX;,ไค:$ฝ๛๗๑o๗ึ@ๅ๚ฯ฿อv๋๋ผป}eํœล™๏ำ<"€ุP{๘๛๛ฮ6NปŒีc@๏ฬฤpพ; ๊f๏ภ‡ึ]ยดัเุ—ฤb/๑ถฟwตรณlž๐ฆวัgšI‰๖ํMŒ'ฌŸะพ–๔>ั|g ytืฬ”}ฎ่ึั-=๙W=ม7ฒฒŠจPถeายบลภYq ื3๕dIt ๖Žฆ:ลฎ๏๎็O๖yhน๓พk(/CFR๓๓ญ๏๑uผถพ๒q$\งรื๑ีl;\A๔๊ๅนขข์Žd!฿}ฦ{์[”๛ฤบบ฿฿ุๅีjษ๊๘}nK>Š—Xฌ“ธ๘‡cฟฟรk!ฏKลk๗๛_w๒d`w~bุbศ›\N์œ~ู`?Lฟ‰r๛จPx๘๖ฆx&๗mMa๖Dจ๗วx_!.Ÿ|;.๎%<‡๖๎.ฒำธ๋ž5ฟ๓ Hlrlซ‚x0 ยฐQฝดห'`฿ น;ž i๐>๏R~†ยz;ฒv>_๖gื฿ œ_|>จ๔*ํ™พฟปO ช“W@คฬ•๗!๗vำ‡%„ฑ๖อ+ ๖ป๛L~‡}ก,8ม์šˆ–ถํ๛ƒS4-8%ำ†ึ๕ฒ~๘—ษ฿หเ:p่่ตี(`‚้ฤ$ˆ ปรต4ฒŸ 80$IŽxฮQyป4{๙๚ธป™69pm๒ฝ‹„eL^ˆ0w‚€!`พŸ‘~fผSœPbภ๎€ส2jw:8ๅ “ใเŠอ„๔L6๖ค@i2":ฟMžxเีๅe=แyะา$r๐ิ(LIีืnf…Ÿษฉ[๗ืE7เD๏|<N@ภ‡ ผƒŽ‡Lฐ3(Jtง5O๏  (ภ aิv๒-<๙แแฦ๋็„ก€;ฝคPูjYp€J`tqP‘1ื-D|œีก/=ัe่:>มS<8BE์ญ:Pิ20ปฐ8Q 3:ยผk}wK Ÿ๕AZ#บžœW24ใƒ˜i@„•i5ฐt™ฒ„เ๒9B‚ดyภ”หAudญืนง.ิw :Xg"„ษvาQƒITีPAgเโu]น4%t้:2'เฏ@ัžT&7< Tyั๙:Rยฝ/ืAสสƒ-’KิOพK[#ใ"sŸMัe7ตf "„]๎k้ก๊๎ํ>_]–~ฎ๏+ ศNฺ่ๅ2ธ๏Y—›ข†(@L*M,ํฃ„ฅ๐่๗ฺม)85r bY๕Mภ€ล}„ŠŠ„ม`ผฒ Q”@Qไ]œ(้Žแ๕ฎ”ํ.L-ล>@หฤwŸ`ค”œศ.(Š3h'ิยม|vƒ˜฿>y„œฟย%‡ฑๅŽ€?0j"ใ๔y‡2 ๐‹Ch๑Wไฑ ก %๙โŽลzuxงzฯ๘ฏŠ ^R‚#ผkนCหพ์ฐ$8่iv&—ฅN่ื<ฝTtL 1Y,rฺิช๓NX๓ร_ž$r?ะร+ ๆxตพ‚่—^ttษ Td่mgโธU‘ห่2tyŠˆJอบ+“hข๏เ ?`'8จๅY๒iุ•wซOVช+๛f@2ิž U ช‰ŸšฏแJkภํ๛ฮไO๏ebท‹ค-ศ0•ƒcกฟธ๐ุIwo*ณฦั๗o*Z"ฑซ ฑ;iมW๒N๙ฒ‹‹dฅDBศ +j*X๔ฝั๖}€Š  +s˜p๖หเ่๏a๑9Q1qWql๔Rเ ‰pWAL’ขชP6Ž2d๙๚จำ4-d=wย_—_ ฉ'"ฤ(ยraฟ„0T์ฮ=Dj"T|ุฦศ+€฿ุฑS7„;Œธ–[‹“เ€๊;Ÿ7@ ฺๅ^-มฆw้MM0\"sื๑ฬ€šํ+a68 ฉ—kPR๒{ญvz6ุฟwด็˜ฑ๎๐ƒูภJgษw v’pE@ทรแเ ’’x5l4 ฟ}๐๙ฦ?&)ษน๔@\fทn€n"vŒ„ณ?hAAขEฆI–ll๗UW„๋D'ิ๋ธปฏxpย฿Fศ!–๕iว7หพƒำ่ฏ“K@uXผะ” ธ}จXx์vr\wงฐI6<::ฯ^’๐$PH.๛2žb}zDึE<๓ร6qู‰ƒ“ึ๛x๑”ๅ๛ฦ~่9๑ฝย‘X๐ฝน“‰Ÿะฤฝฺุไeฦ?.nโ‰oBยๅงŠ%d-Š#)9%€ลฝ3๙Vœœํ(รเ€Hด H#wุ๛—ภF˜nญ$DP2Hม่บy€คˆัE 2ฮ)a _?พ๎๘ k๘0แฐ็ #<A๚„ฃ˜.้ž„„ํ9ภ 9๎„Gš~ยป}u'ฑ‡ย]O)‹.เฏ๕ เ่ยนงxg๘ั}Yอ๛พว‚8hๆภจ88›ฝ โๅง๏ณศ_Wุœh)NŽ,k็๔ธ ๚อž/w]…“Qะไฟ๋พฮ}๘๏.แศ€๎ผ Nชqอ b1l๔ฺท่ ‘ฆเArก 3]ทrแ]๛nŸจŽ:Su4ยไ”H„iฎสF8๔บำZg9Sถ|ปฏแRu๊฿พไP: เƒ~l^๔ลพฑดุ่›S‚ะดเผ&";iษ๑๑u๚คั:/ๅพE QHพโc9฿็!]wนv l9.า gะคฒcผ"/=c4P ภ๕~๎lดop๓ž:๊(˜=)2ค|%cL‹"ฟq* „Vƒ" 9$ํc4H>“ไฒ'นไภยT•ณL๗ฃ}ํKDa๑Cมฑ๒1 รจŽ K6fี]xงภ:Cๆ8 ็๑<๎พฏy<*Nxฉเ๐0 เ๐mW}ษแัŽQ์)AไR>:GQz๎บป;ศ๗'„dfวู๒๕๚< Pฒk]ฦำง์>/Lจ:ˆG๛0๕„โฐ-'มY0๎5คPฦ"๏฿๔—ๅีฝะ'ว‚[๛ุMอำ{[์๋เ@Y ฎร7‹เห‹'Oa —คฤศศoYYzภ๑่น |+žฒล&bpB 3ธึ)์๛0qhŸZŒ0`†กJอห„…leUDลไค1%,แฝ๔๎>พฐฎ{z†ว๔j๐ม…:์>ๅ๐` `GsS8ะ\้ื็#(‰฿๗พ๛}'ฑ‡B1DูฝSH †rr่W%G]สยศ{ฦลiO4(nฌ†J@ๅ!โ><๎๏kฅำ W€ƒำ๐[ึKณ—๛ท๓ท5‡ึ๊ร<แ4๓๏ฎ•@ศ๏D€uh@DŠDpฦ˜ย๘๏๓)\าY9Lฃ ภไCฌIƒภ!VขภIVQบ๚๊เJ๓4C|”แ้†DA@  ์(@‰.dF$mฺT^ ˆ uI ๆฮ๊Ž…“ Eqe$lร-่_๑บYy‘F˜~ฌ{่`ช๏"$ฆฏีD<Y‹\`เE_oŽยฐc๐็>#—„ จฐภ„ำTุ‡—ฯธECFPA:9’@กk๗}7'cโQyNย›)žฉ`"I(ฏq1w,.ฯ‘คะ‘8”AXKํƒ@~ฑZ!8LTดดุ๘ิQ–ภ้h€š„:€L3@2:CEY;LO"@บ8A<|ณ@(แ0DyF‰ม`ภ#เ๋“€ยเึ‚(ยF!A"p U™"ิดฉผopLช๏ถฦ ฎŽยั ‡]งFŸฐgฏ๎ސนมๅฯlqfฺ๖{˜1ูhrฒvโรšM`ฐ€j%% ๆQu<1T0เx฿ ("ƒ‰…œศ}I+ยฤขS@ˆ0ถูG a<8cเD็แMธŒโ3ุุ๔่B ๙กย pˆลˆ<ชkœf$ำ…tT‡˜หหะ”ฬ๋ ข54มม!ลˆY_› MaC^ˆ้๐รลX`*P&/( าฌ T|๋‹๚๎ปนmVFYa†้mX๗pณAชพ๓*4Tฺ ฦ<Œu9$ฐยโ>๖ศ XL‚ณฃษEตZฉ0”D˜Dƒำัใ+ร@wศ€*C]H$!&XดPไฮ%๑Ii8>… “ฤูอ@)$2ˆ‰2‚(ฒL๖@แู2MB_8K ตA…QA(EPเ บ๓oำEfsH(๐*u|mฑl/จป€'ใ] #(d›mธปx{//"ฐศXฤn6‰xโQ˜!)>4KภwYบฐAE4~ฑ*Ÿษ—๋V -m€€ๆ๚ขRHw‚”:๐#€Dpห!‚์Œๆษ!ภร :˜œ*lœ๘ด)RRขˆ=ษŒขHPE_‰ํ๑:#„•ฤ฿y๎ แnMโpภ‡†œ’ๆ-— ๓(@@฿ญแ;ใ๒S‡ํชYUแฮฑก@“kญฎŒทตธ'ท๎PCŽNqทdวa0œูภDูyฉ<ฮ}ชIjโdฑฐ่(๔\๋D)8กั…ฬฝwuC7†Pด๏ชO…G๗q–9Mx@๛}๑d•ˆdั‰k:+Qำƒ€อ‚ Œ =ITŠษB$;ฝƒ1 พ3•๔€โ@ฤ‹›ก4?!;ฦฑ^๛v+ร 0ภhืษ‡ืfษŠใ@6=>ย’กเเซ,$#ธ  " A฿“ฏื•ฑอ็ทษ‡aG'hาะธql`จ::Œ$ัGP๚)F$ ฉภ(สซ; ‰%A๘วŽ่V]Qฃแeู7ฦN ๋}๗/1กบซ%็฿`ยyท๏cแมภ€่^o eIlื0ftI„‰j@‡ค$<ฬNbC๛—}NFษีqฒ๏8Aทเ7‚9+†๛Žฟc็yš@Aฒq'น„ญไเšd{J‘Q"Y๎Ž๓cกหๅ J/(,Ka ‚vฦeบa์๒,ฟ,;Hqทf$!(ySUึษGภ๓ร ฑWเ7 j”Šป๚+-8ห+ “ะ|pฑ บ"žช1๏ผŽย(‹:๙๒ฯบ๎ศ0งˆหฮ๏๋ƒอึฑQย=[~zzQ๓/“ฃะvึ0 "4aะ๘f‡ฟ[คIE$‘^>{Eา(}ž 9;พy™PKแ ื˜h'‡ฅˆanZล‚บฤวKใฃ๘Mt9dัษฐเเ}ว6C0X%G%๏–ˆ$๘<๔(ISvร; ยนurHฐVฃม้ฑฬbX^ไkœ cq€1บJTฦ๎€OT ฮ๓k๔ม๐่>" uย”๗uŸOF]<2,aฬfdฐำเทเƒา'$I’$Gl^s๎u† –ˆF€ค-U Bf(*ู งc+xทาข":ฤ๐โ๘๛h๒gยBb๙ๅใ; ณoQ*'พkเJฏ๏รa€r]$ฉ2ั|w๙X๊—‚€]\๔๖นกะอ่ปจ๋Kฟตฌ/’่ žษ ”“si ใ(ไ$ั[0๛๊ำ#!๊W7=$ ๎,$–คkT<iษั•1ึ๊เอ๑หD}wOT] žŸ0ฅuW่ฯwE"Tต6ๆฬ๔ไฮ˜ฟ<9‚ไ–ใไfsa…4 0œ*7๎0ZูeEayq`JŸฟp‰{๘_ถฤq[Z๏S–œธXใเฒ6|ปฅ†u”!ศD&œฦใ๐O7&Zั‰ะe) ?กาษ‘e•๙น8y๋N!เQ€kวภ_ถY9$์๏›8…ฎ{qL?|PpC๕ฏณทn฿พฯซ€้รำj*๚xCใ ำ์7’๊ ‡“๊ ภใCฉ“gๅ(นะ๎ —](เั{]~๒nุq‰H˜xฆv{0…-๚]ภWฺ„๑O mV G |ั]i0๛|pฏOล›ภ{t่้mƒ3ŽXzู‰ธซ`Iเ `4€ฃu8š>๖‚๕Upา๚‚HุŸ#ธใ ๎7Gpxํ!Ž๎{งา๎าwม}์น‹ง6ไ#เ4'๎ž๕…4ภ๎รฺฟ=x$อ!5๕ขๆษ๛ฏz/D๙0|Tโงฏฟ๛†ฟqฤ’;H†e๔†Qใ6O๎ฒOกwWรsนมัฃƒแ‡pq  ฏ:\ZvL“w=.ฟ]!ž$kภ๎`ลธใี๚เๆำเGQภฝ์ ฝมL๏Hฮโเิ๊Qอ‡Jู&แฮผซpทษบซเ+vD'ยn฿๋K\Ap&}ว฿@C‚วP๏๘ป€™8nz็AŽ>๏}๏๖๑ชkD฿‡๚!$7>๒6ี—๏€>‘‚ฅl้Ÿด‰ั}|}๎„๏ฝ{ ฆแAA.†ษ๋q๏ฃึท๎ฅมย#ภฃค อ v๏3์Šศ>x\ฐ็dr_ฮ“`tRเvศyจผx๐ำ_่ช(ผU†„ด๛ไอ._@ฬำ;๚ G– ยˆ๋N€ฯƒtsำ็“Sโ= ฝ ไ‘๘๑…ๅ=ภฎ;ฐ‰-ก่`๔It_L฿všiAแ ด8Hะย๐”๐ž๕‚๖/ย9๏k๗$~ปภ“NX6 วฮoแ{”ฃิศbื^ํเู<7บ#— o[›@๏๎เ˜๐eC๔่๚[๗}sAํใฺณยyผCธ<๛H;่ฏ›DU็ํ๘Rแธgwมจ"x`Y•3-‹D4๛{”ร/ฝ๙ซ;B’pศ่<ภ‰พจŸ}๒็ž>:™\ธธ๋4˜อw]}p’ล๐]>แmmยLกฏŸ๚ข(…ธ๑%ˆ!ึ]H8n‹๊๗X‚a—ํ>‚4€๖6‚—๏‚sŽโ$๑ฏ=œฃ๖’๎V}]vo~O_:'@3#<…}wผ๚™”$!}๚ํโฒกv/ๅปOn[l\๏U ~<*ฬมไ่u๖พป๏๛ Š[ฐ‡g^ค*4xแ_)Iup฿Oตซƒlkฟข?:Y~rq‰ฬCŽ‚izฟ๑ษrฉปโำเ”s‰] '\ผฃ?@WXFฏQะpื{GLฺฏ๋กดŽ“ฃ5฿&รฌXส“ฟฃ“๗่เห’๎*˜)P—ส‡บรฌupC๛Ž ฯyว;Žพ%Žƒเ๛—ฬปuฒ๛็ฯWี˜^-qn Lภs๘ํ๒๏>ยชVŽ[ศo๛NU่Šu}œใ$z๗ึุ๐!T.๊_ฏnถ~t5: NฃEˆYฝกf-๚ํ‚โ ลO^wงะž›ฃป๕ฦ๒# Ž#`จaั•ˆ>บฃๆw„฿q5ฮŠ#}ว)$a `@r0ผ#R\.๋ฎ`gg "A๏๎CQป_}uIŠ @t!_ YZ]ˆฦ'วโ๎๔p‡ษฤฌBš'pๅ)ศY ๑ถำโ‘!ฒ8 ธ`ฆ๖R\w…ธผƒ9zBวDโโ‘2Iฏ7„‘3ํ๎H4.h7„ฅ๎XฎaชBพ๋่๗Y†ฬ A*iฃผ Žใ๋?๔$์)ฐ4@A"…ม๊H’?\˜ ฤีภM‰`ูyM๏เAAฮ\E`"pgใ9Q˜าๆm_ณ8 †ƒวŸ@ะdฑฬลฮ0W๏ุยTญC‚ศ:k PถตFฺฉ š‹@2“้้xy`’ZFจƒล‰d็1ƒu›EDšxฅคว†๚เฮ/AฌY†*ะ“AGจธไx<2–eกขฤU0A:ฎ*8A3C™8ณ3…ข „!pw —ปไœZ๓]๔+ r…tDzIhฎ๗กˆืุqํ„ฦ๑]W“่ญ`Lฆฤ๓ส Fณ]๛ณขrvw_ฎ@๐vgำ=: A‚x PNูaAฺ8๔UœnAดบ฿quว้o|]…๏†ภห๏๘บ.ช๏4„’9๏ =ก[๐ใ ่ัทH[z‘q@#ฐ๚&1๘ุฟหXU<นศู*Im„Ÿˆ๏ื๗ˆฅaMใ0aiw๚“$6pG€x#.๔ทX@’ˆป(J๘Žร(Z฿ฏc\ั=้›@qื:มฒ3oหใ๛cฟ็๏ษ-›Lม์8เ–oดโ๑ถ้ใฎ>”‰&ท{จN:ผม"ธlฝืB€…g;ฬ›)‡wงอŸฏๆก|ืŽƒ‰2ฬ๛ ฯ}ŸywG;(:>ๅN(ธฉญ; rฬŸehaษ`$แะํ-,ธ๋?เ*2ขyฤฅ  งโ„ฏ…ะศฦ!}rz™ศ๏.jG ฒค๗ด}ŠŒษ=จ>ะ๘.๎@6\Tห+.hDัpํ๛ป[A'1ึU’Cqžอ€ฟฟv`ลA#]฿กณHิธธงbว๕๋ซI‰žผ๏nM}€6>:3>ะZ`ํฬ“ฮ…Mบ‘ีฝร‰ ฯฎJZผ๚กw^@๛cqจ [ทc฿ใ่ํ€#Ÿ, วgRยv๎U^ะ-๘|;ฦผy ฝฯ๕เ๐5แศ#N˜ฝ ๊›Š˜Aญปฦ‘๕๙w็ฉด,ฉQTL\ู฿฿๏?E ิ„๛‹ฯFš่=6๕$ _%z„•h~๋พ๑–ภซฉ a™ยน๐๗X๕,•้‡ยyGSŒF_q" W™k`Šษ๓๎skผƒRA„ซbู]&พZxธฦ!‡w๗9ƒ: `q๕w‚wJGฺ๗ตvGGะ0ฯ$Š้๔Oๅๆ๐ŽŽ+ŽHrw S๔Tœ๒๋&i`a๗Y~ฏH_๋ค๛$ซ “๎๏้ํC$8&๏เh4q}ล=’‰ฦ ภNUส๒‚พˆ—{ว7ขศฎั&xEbjุyไเุ!้cฯyๅQ”OงัขศG;๎ฤS‰บ3?>$ย“๗ูสคุเ าศT1ธ0Wzณc๐๛i๏ืพ3ุเ;y๑&0๎ำโง฿<0ฟ‚รฃ๑๗Uqศ็†yย๔‡‹ &Wf˜สฆ๙อผ่๕ๅŸไ“฿คG็pึj  ภ๕๗—?พเ,๗1ฟ๕]ผy๎ๆnา! ยโ๛>ๅ…ฐ๒ฎ็3ž๑R ‚27๑ฌoถง๓O{˜h๋“๏ไ8€๏>9๖œญMษN๗1ฏป{฿>QAผ*=u฿e \G72•—:ต8iPDฃ฿๋ฏqิ็“l€|ฤc๔ีคฏท$,‰์์Wฃv]œƒ‰๑หŽ์ำฐฃ?2|ˆถ๎๋Œฑฎผผ๘่ƒ๒%O“หOแู?ฃึ8๏ร฿โเ ไีN0Qู๏S๊v†ทใn๕—฿๙ ยhง"ี]ตsdลOฃ•ศ4#yWน๐ฏK>pวเฆ๎€eŽO๖ำX'o|C6]&|๖=ำ‹@N๘ึ๛๚เR=^mEyn๗ƒƒƒ*BV๗๊?`็&๚'<Oฃ๏ ƒ“u๖ฆว6 ž24๗c;฿ปฟo›2๑พวŠซื-– –ค`Ž ๚‚„Aี๚T\}!rซŽะ7œq\~จA?๘ึ[ู@ี฿›๋Oิ/ฤพปว‚“–?<.ฐŸJ=๚ƒ๕}||z7 0ฮ\žหฦ๔ฅ้-ŸฤŸDฟXs{&}๓7LฎLฮ๐ ›~๒X n'๏๛›pะv@ญ&ฃ^๏c่Ÿภฦฦwวษk.—ฯ^๛ฃภํ๏๛๘‹โปผใqBํ}ฬ เ ^ลมqลO๛ฝกpแ)ถ6พห๋๐P๚๎ร๏ฯ9ี:xc๗ฑๅ฿ใ๕ํsฤยwๅภ‘๐ญ3•$Eุถ๊ hฟืk/พBมฮฃx๐[Iƒ?ช้Ž Z(|ผ=เ ืู›฿1ึw]<ไธใภ;ฯŽ์!เ,?>i๋oํ_ Kฎทgึ€ผd๛ๆไๅฤม}ทkฉยy๔Cเ$เฅG.พ‰|Sขw๒๐vp{ฮwฮ\#N๏เุญƒก๛ฑโเแํ๓๚iวํฆ bบม๘w๖๙ั๎ัnƒบหฬ‹ A}๐[์h{x ท๏ƒ๏uv๓‰ว๗wEญn๛๏ฮww๕†Ÿ฿˜‹(,๎๚๋~วย1KRฮ hเ€๊๖)]ศ}eผ๘j‘าฎƒ~Mnํฯฎณบ:[\}@–๐'ฒ๏Nkq-^๐ๅฝeย„์ธ๔๗9ฑuvวฬ๏.7โใฯ}nƒ8๖u๘๔hผฏ่ฟ‹๏งJD|๎๗แ#I82A˜ขดoฮจฃ๋—$Ÿื’:ป} ตn€ฉgw๙1ไฯpใ7ธxy|ฝํ๛๓~ั<๋๒ถฐZGุ้C๎[พ—Aภ}๔—๎s๚บ`ฟฐ๎Xษๆ…ผ์๚๕ส๋ซฯ._Cึ&ศ๓. บ๕3Ž‹:t  เ้ฮฝไ%ํ๋yฮหแ๊=N๎Fp=๐ๆ†คมำ%O…ลwลวPkGซ,P7.ข|ฎ+%+•‰Yx๋ภ€€๒d๒ๅ๑๗(>dc\ ้^๗…?^ฟปฦ๛D๕ป๗PมPะ๕ฆA๏Tฟ๑9’–rั9n฿ซ๏]ีyํ๚ty!'”๏$จ-่GŠ@ใไๅ๗%‘ใีl“- €nOGฏฮ’x)ˆ_+C+ข€ฦ}ป_ŒˆซxจฬbAฑ>๚ปจJ ,9dรธ๙ุ{Xฎไ์ฃ๗ มa|6ŠQ?พ—‚hz๐ลGNฒธwทชNใ‚’Lธƒa] 3‘[ ฯ๏p†๔สB๘;œ,ข๓๏E8๙<ฒŒภf5p๏Š๖๘พ๗ฟM]U1—ส†J$qษ็ง5n@ะฝูŸ{ธn^๋๛]C5แ’$มรศจ9๙๏ฎ๔Iญ์`ฉ2ณัdปš'w๚Adv๚%ํ‰๐ฃฌ@8ม๚˜o›|\๐7ไ;Dq!Gปย$! ๐b8๒ฏ_=8๋;งศ„ไ๋ปž@ทV(Š%ฉqภษ็ปำฏญFw๛็ษ'ง๋'zฉ2ร‹Žk,„๑sะ€ปเเv s‰uQืž(ชฐไ์pc`๛o2ƒ‘Hn‚ws=Ž๏ฃc‚c^ *“Hšธ฿}gฝ๏o๖Ž๊PำR7ภ`iลwN๗๘)? ƒoพuึ8*ผo๙๐ย-/€‰ส~ค๕์๚ยoฏV††๔ไ={\่0฿ฉ…d๑e๚๋ซƒฒ้ ‰p@=>แ๏~๕ค*i$au๖3ชvข0T5์ u๛#>wํ๛ž}v@\สF๗qr'a[วูฯ‹;'ฆ่กดรdS่;:/ัํ‰PDgจ #*rฐฎ.ะทใF/ฤ๓๕>;0Š า๙ม“๑ฟdใะYื7จ‘š๛๚บ๛฿๎žL}W ๊กlจp฿in>GrƒVtยท๗ลปฟ~‘฿-ชœV๛NBAmFHnิ^F;@๙\ฬ่QB(ศ9œีI("๏รz็$Wะ5ศnu์๎‘็ีวก่nบ๔ฏ0Š๛ฮ$H–ฐ!ๅุ^['฿๎ษ}Š๑}œvใฦ}*˜:G๙โาTX<ŽOฆ vžลg‰๊41ฟ:ยIS{U&ูe2์]?ฝ ท๋„๘]œืถ[‹;พณš๚v1 ศปี๎ฌ๏vฝ฿+^Uก‚ตงJฆ$uื๛๗˜‚๔าพo๖็>ด—t๛๎กZ@Z๛ €ฉ–H`ฏ€“ณ.พฝด2KK{บนeT๐่ฟำ=งGี7เN|a‘็9˜ฅE|a๗บ๒z5ธ๓ฟ>(Šฟ๛eจ;“*p0D๐Q6‚s฿อ๏๔c”Y*ุษๅ—5;ร–& €.$ก€ZT}Š1vpˆ$๕Rj๗‚o(I ‡ม-&H˜๚€J๒พ$v†ไี:…y ภQ@›ฦจŸ@B$”„mE„…@ โขฃL๙IFJผ PฤXร4ข‚โœขwษ2$ฟไ๗ƒ„ D(เFt ] ๐˜๏SA๗“ฉ†$๑T(ภsŒ\๑Rผ‘@ภ๑๘Š๋ 9|ฌI้ศ3%€–ŠF!ง฿็LAษ๎v)›@kQpยงH!ˆ€ww ˆบฏ[ลฐN:ึคvภ;Q š‰„AขG$!Šผ๔e'฿ฑ ษ—ิDŽGTฮiL "^8G‡& ‹ƒฮปS‡ภม="Kแ‡7จšีม—จืท<-98``@ณ: "ภไเ…xฌฮv[2๗,งA)dŠใฅ”7เ “๐เ™เ„O บ{wX,Kำ ๐˜}# ";กํฉ8KศXJฦ@ๅ€„๗ตฎBขษ:n”‰๘ƒ›P Œ่"‡›฿7)y7ขL "A@ฯ\DaB|q5’@?$sŸฯ{ำ€่€Š๓˜ ฎz฿ค@D^ใŠซl๒๗yะเ"ณ๓ย8 €ย๏T๖Jพผl62ีˆ„ ‡(>!!twwŸ ˆ๊Eœ+๙ใๆ๕ }ภ;q`cgœ)€M€ )Hด;:4d…เ…ๅฅขวˆ"ขRูF{@ต IชะกI‚Qt^ฦล-€’@ฆI]N4ฉO่Kt”ื้'“ๅl ข` „gpE @F€–^ณw๗ผt„˜ว…๊h฿Xxิr @–|w}คLeภy‰WjYr` ทป๓ฺ‡%_^ึฆฯศม!Br`‹T'9 @%๘๊ชzp8P ๙#ฝLนึ n` I‡',& S๘O‚Sฯไ Hฃsดเค ่8ีn‡ื tลฤ๒บ ’rะ8ฝค†A-๐ƒ้ฐธ๎ย1ข‹ใ„l%ซLehปฯ[2ช๊Hฆศ}7@ˆ/ (p๎฿‚FdmทงGHษE๑†hก ”ธ•๖ญร๘ฝ/)ผัžE(า{Gp€จByml๗sษ|๊๋! €บ฿qส‚?0ˆRีใ Q„ฦwณSbลAp๙dข2„‹/œ™๛ด ‘(-.‚A:ฺ?ฟ[F€LŸ‹๊ QPฬฆ แจยš๛ฐำP๘ก๚uw8D๊ ๐ใ ๎@M๒.ฆ›žTWชไˆ๛ธ‰R‡šRศ่๕_ฅˆDDา”า๏ZGค้๖ซ‡ฅืบ‹KDEžใ๎Gqˆจ7ปฌrใๆ $่่t๚>/OYO†Ÿ ช@c๐ท ‘ขไาึ'ˆŸ“ฦ๎ผo”ฎG‘$Ly‚*‡'ง.0ศ๒‚Hะ„ะธ๋\ธแtป๏ูbsเณ]ึQปšA˜กรRรศ0q่๙C%ธ;‚9ข+๐^#‚;ภ”น,Z0e]}$ภfQ๗0 J ฯ๓ สฐ฿ŸอฆDDม01•เร๎3Kflฮ~ีฃ‚ฟำ‰jจL:๙^\0&ภ่“" ชS๔ฅงท&?Lธ.XNูฤถ>นรfา'ฝํ“บ ~‚&w+ื!M์(‚เ$แษAEE ค8ย์ธํค@D1!Eวฉฦุvัๅ†"จฬธ ’ฮั]hบ+B}ุa8˜"ืˆjU| ู๑yˆ"๊๎3T ช+Qี๚“ไŽDNา๔^'ŒWu%๙"‰ศ.ชแ„c zPญƒจ๙์^ยoื๊ƒฏ TโR'ท.‚่๖ีุ๎ˆ@น1?ŠF:0  ๊hOโ $่ศeD)j|7žˆ#บฯ๛fๆQpย;Qัq๙กŠ๑A„yxORˆโา%$ฒeŸฬฌฒŒ่่J˜คฯN๏ฐ์ยt๛ีg๗ฌ็5.ธ„ L“\ฯบa„*”%5Oา@ ŠUฯO›Oˆเ1 ช`๙โ6P!(.M"’qL‘u‚๖ก=ฅ€0IU`":‚?&F-”(า$เวYสรKˆ7šน0w๖แoGื-tฯYิQQˆŠ%S๔เŽพนYА|dawN‡œ‚mอŸPณPะ,๖P ์ |Š๑<(8ซPBะวอ7 คกvDae‰'าšˆภมต8อ'zXRt๕)Aม€้ฃฎsฒ4AขพรXt}ฤใ— •AŠr„฿ ร„ขเšเ…ๅ1:ฑค0ฐไ‹ก0Po'#3อo}Z,ม๔wvk(QHะฝ›แัฬ%Gศ๓ธป†ฯFtTˆย’๏ž\\ฺp8ก r๓7โ่ฎCูภไ„`u๒เ…€H`€$โ?)*ล‡ฐ€ไศoUSbzXLgๆ\`_xฉ'/D06 ยK๘$sฐ…%Qี๙Š€!ฉˆ ˜๓ไ™E!BWุPฆไU‘`฿S|‚ร๐#ภขระฒ€0,†€‰(EI0‚๏w‡09+๎ฏ๓–bp\x)Knย๙w ์HณŸซCQ›ธx้!ขแีŽm€GE๘2ฒOฏ:LUYHjใำะoทโ.aCก“…“5‘ข|„ สHขม…ฆ๘ไeQj„ไไ? ฤ!ล๙ŠO.8,ตฑ%ภฦ๗ฑ@ŒgrBt˜V `ธํ}L O|"๊P‹ซศ!Sแ"JCมŽ๔ฺBE๑ ้๐ค$๑๒BL!I Oัq‡ฮขxpซ,kYด๙เดาๆ ๛๖!ช้สะF`๑f#ๆขƒ V•๎!aTI0\ๆ=ธแ ๘ฎ> Be|ฤํศบˆ้Fœ‚M”ahh(P$I8‡t ชว 2(upก„]'Šท -)C0่ตI@ว>Nฃิษ,/-Bบ:GะีKP†S๎พฆ‚IkัCๅธ๋จ๑`ฬส"‹$ย่รƒ เbX`V ชIaพพรถ€หxrึัฤฝ>๎›ท(๙vc€ev†ฯื h†;9A๐๘๎nํฝFt”ƒ$!รไƒ;l(Œ;iœ#น๑ธ+›}†,eบf,$• R$†‚แ} <'†$qt€0 TฑฃhN ฤu@„ิ๛๚”c1$ำAxx I†๚†Et ฃ ˜ะ๔ศิƒDyE…้ ‹๋’‰(Vจ$ดŽ›ษ@‡แAfัay@ฑk… ิ ,Nำcะบ่†e„๎œœAำธ%)ฯyŽ`สHs้GแvtR}ฬ ˆ"‚„๖7พRgP‚า๘”iฐปŒMLฒฤ1ฑ’ จ”๒2‹ขฺร~ภ B| หยๆ:/3}Pภu ฬƒ1ˆ€€Hขtm$@ฆ_ณ4สล๕g…)‚”$‚ถ=ช@ฬ &@‚๒๙Kเผธะ)xHึฑฤEภU—ว]๏‰'~ขิ‚O•—วฟฦฉa‹€ Lำ[Q!Š88kจŠ9หƒeรc† fqฮŽ8ต ผ,’4ลฝ#L/gAะcำ*#9จ€€๛๔—ƒเผภ?โร„L@ ‡Eึp•žSุฝr—QN‰ฟ่พ˜ 0HH9"Q๗จฺ C๏~ฯ4ฆแต๚ู#0บEฬC † ผXhจ# n> กํลญ$8๕มi'/9ค๚ปFpQ[U#.ฬ”้ษ๎/o=ฬ"‰ๆBฑค๊@œˆwำI€CƒปIฑ/>\˜@ฦJ9& €„–wœ2C\็๙ แ dX0\7pQvMฆล ]`@์_œœEeC&่ สGv(ผ›~กสšวแพ+”มWu]๒ร @!abYฮ=้๓6&†ห)X^?ฮุuจI  ^ŒŒฏใs๓เกl…~r^RฅC$เุ-oˆR]๓เyฆํJ?Aม๐ฦ8{Ÿ๏LฟWjฃA IŠša2"”คๅ ††—กw1ผŒปตIเเ๒* b€ฑ npศY„ [H‹MฦะLขเ‘@E๘๐มgyd'ถE-T€ขU Bcทฐร0ย!๕}๚๐1H Šภเ’ฬ| ๊ธถv"h“ฤ‹k1Q๐ๆ '‰2ญ๗ไ#ซ4‹& ‚r๎|๒คจˆ ญฤO@yQqL๑๋.฿OะB 8ลมดE๛CžงPษ-%CิOช.DขHY“ัIแม๒ย4r(‰ศ Pฟณ‹ูL๑A ๒ ๐@’Hƒ3L*ur‚%"<๖ไเผ€“ย0!9‰P… (w ศaำ๕:Wๅ ๊บรdƒ(้ฐรภMฃn4žื(จภืB’vœ]ˆ@สขŒF#@ ไnข3แH—ำำ’ฯ–J0 ฉพcล‚ ฒN vพ“ŸะŽ—อ? ฺBโ uA!ŒT˜|฿p’]รw‚ฏbxqธ0ข•ะ -ู L@ห;>6QิZžัศข&`l5ุd\๑% แ‰ฮ.   ๗ โvg๚ชSเฮœIเ๑RสกฎW๏"t… =œสYฮอ“ัuะ.่i>Tฟธ~& มฌƒฉR” ศh๑h๐Uฑ 7_B€|NไŒ8,ถ!่ฏว๗w}พ๘ูถ|๕Oฮ๗ๅ— ‚๐ูข,ห’ไฬึ} "5‚jขนส๐ถAuVฝ}๗c๛วฐ„0ู;{Oั;๚€๙ฦว„เy\฿v.‚๖“+{ษ฿?๓๖=Cป๕๊{hฃ๔นs๓ฒฯ เโ๘๘—ม}ฏ…;8>พ\5P๙ธ•qzฟ๋ฟD|ž๔ฮปํO๖๖ปษ๗,๎๙๗๙”sทฏถฝย€๐%Ž฿๗}๚๎พPะมฝ{ูg฿ืจ๋~ป฿๗ะฏ_|๏๋[KZl*ั-ยโแ aศ็โหs„๖w฿Ÿ|<}อ๗ญlL๚ะ‹;|๐ูื๘0|มใ3la‡sokบ๓๎๛๋‡_šฆหัธื๛k็ฯ๗}ƒฏผoพพWฦgo—{ว_ญฯฒภง{ŒไบOqOหุxใsฺ<๎yภ๗ํ‹|U๔~>๗Ÿฟyƒณวูw๓าุ๙้ร?3:์ๆs‡ึ๏%ฑ๓‰ก๑ั๛๘…v {ต›}๔เฏwว‘๗๔\_ฟำ7ทฏ๏ฦใ๎ๅห…ๆ๓หyนซอฮ ๛žxำ68อuั;kŠ฿พo็๛๓โถ๖ซ›\ู๕๙พํ6ห‘{Jzฑ??Mฮม>ฎป›oฃ๛^ปI>๛`v€ๆ๘ฯ็฿ขฟ’ึ๕ุฑ>#€ุฝ๊ึน๛๘`oฺพุณ ^พAe+H ๑ล๐ฦณสœ|฿i๏>b"ฦ฿_ญฦJ ๛|ฟ฿ฟน;mฒ‹o๏๗O>Š*ด า่ำ‚Cไป๔ก10แ๓๚๑^>ผ6พ~๑“๐ฑ>L|์ฯ‡ยรkyeN ปq‘็๗พ/uฯgฺLอ๙ๆฯี๖-ฟป~R๛cWฆ~Ÿฏ๎:๙๗฿๑c›ฟ5;+pญ'ท๛Gำื ๘o งส|๖ธ7?๖]!Nฉ:๊๏Ÿํn$ฝ๏เ๗>@๏เฮ้บนบcx๎`ื๏แ๒dbํปnจ฿‡y^orฃผษญw`๎]p้m|๗ฯ฿€}็๚N๔ฝ็ฟOนo็บ#าyศ๑ฦu‚~๛๔ฎีdŸฟฝGน~ร"๎ท๛๖^๛/๓พฏ/ธS"I‰ ภงฑฤ)tุ_{ฉ'}_วำๅโ.vค๗—yyฝธ๛˜?๙ถรNฃ๛v|์ํxปแ— ็ Ižํว}Ÿ๘ลฟฯ฿ป,์๋Ž๊ี฿฿ฟฏ}๎๛๘,;๒นIrชL\‘„โ'๎เy๐กํืหŸษ็ฟ?็Ogภ6/ส฿sš<>8บ๛๚s‡ึwํ๐w{ฑพz?~sืvWปูฯโฒๅ;ฝษX๘๗๙ป๏๋;มป๒ๅ฿ธถ็w|’_ ”๊;žc๓พ๏฿ฺศ‰์x๏=ะพฏY‡๓ึ‡ๅูฟ๙^ฟ๏~<ศ๎ป`fถดฐ4พ๘Z๎วI/nmใ้ป฿๗ZGฆ{$|฿ใs๕จ^N7๑ŸZำซ<ท5๕฿๗๗'.~X˜๛ีŽ๗O~9๗;N>น}๗๖:ู6ณโ่๏/Ÿ?CX์tทาžKkวlษรต-y๖่>ึ ฤปu๗yŸถ๏ํrK”0๎๔๙น๓ๅc~Dวzใฑธ๏๑ี,ฮ7พ ๐๗ฟGœ๙ลร~rŒ“wŸ๗ๆณ?/Ÿ๗oฃ} dุว๛bญ[ท„ค๕ๅ๔ิ'๐ ย๋รพ‹u์๋^uแํฃ"82`าCu8O๏๖ฯ+๖ฏ๗ี ๕กM?~๚๊พW๛ฮ๘ƒ|ญ:บ๑>>ฦ ๎q๕kึ$y~Gฯศก2s‡ททฮc‚?๙ะื_ J๎่i&Žk๛๛vS๙๕฿*ํัqะฤั!ๅ;>>lทฝ๑๘/ฟพ“ |็Wqowc์;ื๘๑>๏ุoWW฿ฌDุปฺณม€`’๋เ ๐ญ๓{ปoŽจkน๏เ8ธ‰๚๘๖๏ม5cวr_k™~๋โฏrย8๏ฃ๏ด}ื@‚ซ{gนTN"xwz|ฐoO็WH฿?์โ:HsU>ฃD"ษ}lœปc็ำwvกž–ใ๓‹ื๓'EมฑOGทn ?‹๏€;ต˜_๚v';}Vtt 3wši}๙์ˆแ6๑ž พใฤห{ฉH•ค๗ฯ๏o*ป๖่yภDา็ๆŽw4ุGa]ฅ9๖๒ญํ}>_w๛พ9!GVyฮ๖{EŸ!4N}เb1}๖_onถๅ'H†๗๑~้}‡oB“ฃŽ…ใo'pUว๊็๘๖฿๒;Fm}รwuทWลลyK3Huร๙ว||วฑ?;๎#ฅร๏>?ุใธ๗{๛๛ƒ‚c฿ฏ๏uั๛`Lฤฐ๊ลฏ๕ฯš<{ ํ:Buงg€ษั|™฿†ห8?ัuว€ำ”ฎงBŸฟ๏ไW>x $ขษว‡ุ@ธฤ&็2vr฿๛ววๅŸหำใ†Os๚Qฟ;e}ไู่ว้๗ฝ็฿…L>๐ตow‰ฐ„3vŸ †<aวงษ}ษ็g/ฎาฤอ๗ล—๗ฦ[- ฐ๑^ฦไ9Pโ  ฏŸบษณ๏๋ตญUwม}?ข.ŽQ8ภ็.๒vฟNLร๏~ฯฟีํ‚ž๐`~}ลq๗1ยŽxี!ฮ๏o= v๚<ฒ“\3v;“;Ž้๖!w๚C_„7๎(Lง฿ธฦ๛พ7‘฿r&Œˆ Ÿ฿๔๘;˜(q๘฿ใ?ญvrปํ}ผ๛/ฟ–;าง๓#นw1ฯว๛‡ฟฯป๙ืๆ`%ภ‘O”/ ˆะถ{๚ฎu๛๎\#ปฺ๗{p@9>ขkภ:ธ?o๛ญ‹kโศญ๑>๚าๅ฿wŒ„€qีใƒ๔9=ษเŠ^_~฿ƒg,ไ๘ำwิUน•L!8อืพร๓๐ฯ๒S)็J™Ÿ์๗฿฿Ÿะ๙ฐƒcำ?ฤัู}$Ÿˆว่๑w Uพฅ<{ใ;yR”ฝTอฅ'ุž<_๊๎์qบฤฟ/@ฐ ฝฟT’›า๛พ๗[๒ี่™ H๗ssqqฤ๏็•q๑ฒใ๓;|ๆ็ใตพ?‚ภ็psy;ถ฿ํ๒~>ํ๏~žt๙ ๘ๆM`วi/I๔y6z๒๕ทŸุ•วฤ๏ผ๋f“cœพหpต็7Ÿ๗ฏก๓็฿ถPษฒฟ_ฎ•Ow-ิง่xฏเ0>ู๏๗ฟw~‡ฤึ๗แปฎfฎˆ‹H4’รนžv๓วน-n$„฿“ฝ๓ฟฟ]‘0ษˆ#Bะ๘์€ำิก~๗‘ฦขซฒ,‰( žไฯโ ำ๊=$ํ3ฎใ๒C>ถซบ3^ฉ!๗คฒ)œ„N๏~^ฦ”ส"ิ2cWqสxCฮษำtถuๅ%อ%šH”ม…๒ˆฆWqA Wพํ^“ม;*U%=ุบ๒6‰tœลYž†ฺฯ๎ O*฿]’=„a}u๋L‚ฬ ฟ&ถ/ณชฬœ[dvฅ๘–ฤฎ่ศ๊ <อ PสTn Z๏ฯ/d s'a-ษชžG=0—ป!สs’งษgส๊บw96Š๐ฺโŠหร(o๛พป่๒*มƒw(SƒณธXP๘F}“ฬตw๐…dz๖{-^2UPnb๒|p#ฤพัวEMม&A…] SD .™œ †•o{ม%%Šภ‹ƒQvโ)†™‡B™ฤู‰Ž‡โ‡ฦ๎ษ› ๕๋ฬ>คEpolญPา๙๛็ฯฎ:?_aักไVU๗ฌ<8v~ใA.ผ๏—ฒฤ_ใ„ฺauˆ7๖ดl>?b7ธ€็U๏pฐณฯบn/#L฿บฃc Gแ„+xpฒกไ‘'ฆวzใฑ่Y>โ /“ฏ|=I™คoAป+ป†อTวXƒก(า8A‚ ุ: ษ "ฮ :ญธ*'B๘b”%ฆžม}๓kqGq„'˜๎•\šf3๑๕uH๑๚่}Tส?~ป‘พ -Gdำ1๒‰”]9T„”v + ˆฑz'ฑ Nด ทฏT˜_›ฦส$*xp\ˆ~o’xใœฐ์คโHพห๙ ฎ?„yWp์Œ1ถโบศ=I!ใ ('จQ\;Ÿา๚Ž}เ๖ภ…d๚ฦ๑๏SžมI~ณญ ( J@‰‚ )H—2(I…๏Z@๐ๆปfยE8^Dา‰S%ˆึ์มAP'y ชŸี นQŠx}\_!ฦ7๎&Šฬ„ืิ)œีีวึŽ0 8๓$!ร‚ฦu„)แฉdถ1kLฌ๏$ชz  ๕@ุษ}xhŸirล๐ษ๋ฝwŽ ๊เ๑}Qืๅeภ’๏ทธป8…ฃTฐ‚โ‚5ฬห#:mWม1ษํ…‹(ดุ๋)SA-ๅcฝžศgGyฉ ]!Wาg ฃฃ8$?ภ……อE‚;xEN>ฝ3ฒไา 2ผ่ฏ"ˆƒ๓ฦ!๓gD'"LOˆฏcูฐศ๎ซะึิ%}่พ8น^๚ๅ๘80 *™"ฌจ@๔ธ0‰€ฃyจ}5fc‚ ๔Ÿ๛H…ฌƒ H :X๎ภ๔ภำ3Ž€นซzแI‘ฃษนหƒLุ๎]โวQจPฤ“ป‡(LโGง@A…ฒ %"L€๐บ@ฌเMม+สห3`G€E าขัIˆ้ฅ ษธ;.vjIw-ญCdAนCุ]ะ€1่บzri†๚ฯH๐ะํh…'@XL…ฎไ" หย%YWm… ยdฆDHrิษ€Rิ:^H  :ฒŒJ’ศุI ๒p๚้! n‰‹ihส… hZ.9จฮ>วd\›฿ uBD˜ฤุEX €T!ดJNฆƒ,  ›รq†GมAFฮ hdืW1ำ๎Dม3‹XZp]x์ `“aPrž ์€KN Azู)7 šฮ๖วMEGหTp‰ล)ไิ๋ ุ๑2ฒ พ‹*j† LคFT`เ2€ไJ"†‡6จ…,ww้P@รป‘d`๗EP‹IX!#ฤq์qึˆ๋ร$ˆuvฑ`wะ8?0(0Iโา บศโ$ ๖ไขiMฃnเ I&ืCซเิข.ฟ*t;‚,ม;w„! @ x`็B฿เ,eบโq~pt้ฅ๒9"!$N•ภฎ7@โ#;.iDtzjิเุŠ9ผ$‘ิฤ‹+ษvq2ƒ =พƒCทย(บี<ธ/2ฐฆโ]Š (้แฃG!ฎ‰@”"„8QŠ:—์"0 ˆะ —1D0 B$ไt0!Ž‹ ๓ :˜จ"อH‚R Šก`†วNขXX‡ˆข^Yrฦ$;ขคv €ว๊ๆ!'!gจpๅB88 ิหtb€‰ฆ๑!@eษ@  rภ๕ฌ•&w^€(J$แ ศ U |8ชปdม]IRYป™qL{I€ึ8™4 Mฌ@aAOๅเฮtl ๗€&ำOŠ rWxgP กส')Yp!"ช ๊ธ|„ภฌ ฐ\gษB )@ณั=ยเ:๕ส0ˆฦ๐1ธศNNีŽ(;กt`แA`ทyใ€,๕ำ]‡8ˆŽMรใ&›@X๙ r๊ฝ“Dะ(ไภ๘‚`ดซ"JSฃ&กเฌ$โ@ศ$Tขจ&๚บex%คGฤ—4ฺUcLร’;หN\  ๐š„•"Z๎(๎*†kขœฌ”‘ภfฌ;๓#๘๎VQ@8• ‰Ad™@ม› $Šรใจ]LฒSRฐเrˆปNDฏฃ„UเA]์ d 5หr‡ฐ.ผ;D@้ Yภ@uรโ( ล…๒๎ตฒ„€ง ** ร#`œS‘มeญ^‰–ฐERM  ฑฮทไ$D…4ypึ๘ยu๘้d‰@Šœiเฑ‚เj& ‡!@, *ขขE% 1(’Šเ้ŸŠAขo้ม9&6˜ ง๗ษฐ*ม€อพ๐ฺ "ศ๐@„i) *@วx]'‡ั€Fจ(มYaJQะ/Aะx$U—`hqว@cf JƒัL๓N†^]ค๔I ๓าจƒศฦ (%!y0๚2Hี‹สั>„ ,กg‘ขšWม<;ผ๕กR๕AL" ไAF์h ฑ;้๊”มA 0Œ ่(`๕คศ`ไไข#8LชQH 7ฎ$ื‘ฝษ๐PŠ€ กm? DІ€‚ฦ€.šขGPู‘ฤ’Œ€๔S0JN๐๓” พ’„ฮไ๐(r}Š’dQฆ†)i"m‚ไจaq%# ๔NชฤN€%เ‰0Xž"…ภฝM€@X็3ต^ฬเ๛้จww:r†5ธษK kฏ๊จ>˜‡)u„ @Eรฏ”03”ขภ:žช`X'$๑๕X8% i †‘A@  ,<ุป…ุ<11ะ… NšW„‘ :7ฌN‚PX u_ ApใjTŽ AlxY^(9 nx‰*@^Ÿˆ€|ฝ๘Lเ :-mา(QŠ‘มD‘Jข2@4ศกฐฃ%‡MฆฦE๐ํลcจะ๕ษด D‚ ยƒ\[lห$—Ÿ™ิะXญิ”~<๗M™I–%ิ๊ซ*~ฃc น8€ ŠŠT’V ำขHฎ<8}E"๗-r6ุาfƒƒ/$ศ(Iถjข€ีฃ% ม…ˆ0;,fr |G€‚ตร$ “(รฦ ไi๔่ =  A—P‚ชฆ ฎ(0ห % ฉwmpy x4 Žฃมhp]ัm„€ฐใึญ]เ€Ÿช"€Cณฎ v>๒แ่่€ hB t@†!ƒ  DŠม}L€„ฅQ5 สๆทn,žเ ’่ฌแ$ภถ!!`(จ†FHยeJbHภ?$,ะเม{5!vงH‚žฤลน†ญ> cั"ธฅณ AuSX I`t%rrz ธจNธzd๊6๓ปoWr[y4ข๒0œ(IU]ณtA˜ฒ pอ‹แ†ง@ ๊ป~<๘PH ัHIIJt ำPe€€คภI่„‚dTฦ€Š}๋†๒คื เ+Iƒ้ฒ E4”UF…I@y๘`xhๅฑvJai*4r}4vxคQ@‚žN:Hยฤม€๚D6ไภ‚ย:ภCAฤ0ึสจข€้E*Š}h~_]44ขˆชฯ๐ฎบu็ˆ "ุ่1i‚RG$<}WaŠ้0€ฒ„ < 8Wจ"‚ AEยึ๙ธ’„ฒ+๎N@HA€ Etฌ’&H "-–DPัส"’T'CJeําŠ‹ ขขBƒŠฃเยโ๐0X€ae ต•&š'vŒ}b€Dแdเqp*+ร$ ƒ)‚ก‡ ๅธ($a žญ‡ƒ(บ4@?8จฃcˆVศ3๑yXq5‘ภ]O ะร"ธ ใ:ฃ™Hณƒณ`๘ก้นิDมPใ ˜มrbJัT41J Š œ ะ~$_ถูรw๗๙#<9Zน ถฑฑNIKยป‚‡้>pเ๕ภฐด๒๘nฟ'G‚๗y—๛ๆฟgฝTtจหฏผฮFhr,๐‚ร!L?์์X6๒ 8<แง_œ๏๙๐เฺใH๙%—ภ4นป’ xJฤ๗1๏G้ ˜ูแะโ๗ ˆษ๓x‡q๖YบG/ฎฃใภ}ปg'’dQถŸ|l๘ƒ‡ฎ๛ง฿๎#๋"Lฤ‡ยวc>ซ ,$ฐ(9=•yqษฆ๓ฮ๏#šบ‡๛ฎ๓ฟ.Lz๙>`yŸ„N)๚บb‰์ถNSPฏ ่เณ#nด๘์ใฃธŠ9\ฅ)Vี—‹๐ณ๘ธ„yuP๒ษ!(lA๖{,พ"pnฯ>พ๏๎Sr๒๘Iวไ‚sฯร$R’ุ^z่a@ ๙๏๑€‡แ=ฎ๚„=OฮBา{•ำฆใ็ณ— ‚"ูะศาkE}ึE?ญธไc Rpๅ|ขy™%‡*ฦเ่ ;ปคๆวlืพฯ~ciฯžฒ‹+ุ–˜–}7|เ๔ƒdh’] ๎ุ'rƒรARญœ/่๊ศž๑ƒใ๗Lคฏไ€'…cร฿๘]ญฃ/m1๚วpw‰9h4Šm?Aแ‡ลWxฮŒaืAY๒A)›มEูๆ{:ทูร๕}>ฉ7€šะslŒf‡ไƒปป4Ÿ=?4(8ฑ,พmm4 8 ฟoฯs๚ศณq๔ีรu:Q„K>ฮ๋ a0ฯ]†ฐุอ ๕แน่‹ซTƒˆ€^v{?T.€ฤN„ปžฯฦ7#้N์๓>ถ‡ลyื=๗cท&Iยื ถ)p=ฬ๘ฏwH+ใ่ๅƒฌ‹0 -$Eซ๚zึ*z@ ๏~๎ๅŽa—ี๏O?$๋>ย=ป๑z~x๙?ฟ๖|ธ๗~š; I=;ณsพ๘๏F๔?ฝห฿พ‚๗?๑vนฯ'๗๏ปฯŽสุ ๚ูOฏ;น๏๑vป—K&ย7ฎžX๐ฺซ?/žcW๑๕Mn์wห{๛๎๑!๗bืโYฺุ‡๏พ%ท๖L,ฟ๙-|ื็Z“๑็๛๛>ฯ€ป‹ฺ๊yษดcวf]}H*บ๋w๑๛๛o>ž๛ฮ๛>x}๖ฟ>ศ›7บศ๏๑ึCฦ8ผปว#k^ฟํ?๏i๚=|พ;๗สLํ์;~๙HAฒํฺืว๏e็ง๗ุ=๒๏@>.q฿งcบZY฿Gฮ฿n}1Dฯ7๏๗๙oคkป‰ฟฟ๗ญ ษM[ฑ๚3ถ๘žถ?๛ปv๋vE žg฿ฏหล.=๗w฿๓œฟ:๏v-cŸ`?{Ftuพํ{ศoU๛๑ƒ“ƒb๕`Cธ1‹็x๊หป็rž[ฯ็}๗บงๆ้A็๊๗p์เ฿b๏nใ‰๋๛29๖<Ÿพƒ๏๎ื์๙๎๗๙~ิ๏๗{฿Gi=ฒQยพๆฐ๚จๆn<_ฟา~og๏ท๛ฎ>ๅ‡_๏ผgD๘-ึข๓วแ•จvอ๗<๘ป;ึv๋ั=ฏž๗›ไ}ตƒ>%ม6ุWทฝŒ็฿–0 ฟ๛m/wเ๎,๖๐€ื์{‹๙xqqkHœe??~ทงgน…๗ฝRฺJROโฑ฿x?มuOๅ๓=๏Q7˜’๗ฟsว<~~ฯ_ิ…ตฐ}˜l๗'ุG‘็พํžo๗R6ูn|๕„้Kฏฬ฿฿U|=w.Ÿฟ[ื๘~์ส๋ไ‹พลz”๎ป๏ๅ&๕ 7๕โ๓y๏ฏ๛;ืs<๗Ÿ๛๕}vจ๏๛วC{ ม๒ณป—.@E{{v{.๕๖ผ/}/ป๐ž๙—ฟขr7ฟ็โ๖แร;๊๘ป๙ิŠิ฿~พnว๓โํๆ๗#๚ฃร€_ ๏ุุ!ว๕พ๎7ๆษqr๋๏ผgg|๚้_฿žฝ๏๋`า4;z๚๛|๊žBn}๛๘ŒŸฝt็s๎ฮฟงzŽีgํcx์D๛ู3ฎ^ฯ้๗฿๎ญษไไ€zภฯ >kr<ว๎^žปšฯฺ฿๙๏๙๗ฝ๔๔Eพ6|ๅ‡ท{ฦโป๏๋2]ทฟษŠฯ}ํ฿ํฆkโ๙พ}฿ฆ๚}_”‹™ƒณหCEบ AŸ๋๙ฮฟฟmๆ}ผ›ฟใ‰W{†ะAคž๛ ‡็8|ฏŸY’~Ym_ึi๚=œŸื>iดใŠๅƒ๎ด#UG๛ธž—็๓๔{์i5๘zBธƒe๔<๎Q๗๎ทo\A"oป฿๙สงkฯ =]๏๗ฝ8›%กู๑Ÿ๓e๏Yษzํ๙?ฏใท>> ฎฯป฿=๛‰^ขซš()1?|&ŸTk่ปJyโุหๆ?eงต0{80 ถdEห_Fp?ทำ‹ฎ }๎ท#ห`<›? ?ศณ>ฝ๔ฺ฿๐“๐}ภฉdฝฝL%ก{ภศ}AฦโปI๎;Ž\ัค์{ํ:่‡CMyค‡;zX8};ไ' …{๕.?ๆะ฿ฦŸ^ ฐ7๘w~ฌถŒุt๑^ƒงXI>ู๋nจค„ื|ฌณเ14ธh~๑่๚บผ“cร…๘แ‡ะ๎_ืน7โYยQฉ39ะŒn9็่๘ช๑ฤœ๓ฬฦม|ค<][๔ัเx๔ƒ-Žุ‡q๓ณวปงภ‹/fg(>m“วใซ#ุ/lๆ‡๗Uˆ=ฟ›ฎ]่ฦ&;?Œม๘๑žผFเน๋ผ๑ศพแ—๏๑๒ &„รหžzS6:X]ญgน_7”TภHB|ํ๘>๏K:เ&่O?Š—“ง๏๛ฝโ‰‰ดœ๗ฃ‹y>๎กx8ซบLvุาa8ัภฒ ้n?Sาƒ๐'๘๚๘<8fŽ=ท> ๎นป`K๒ƒKQศ๋9ๆ29‚`ไwณžฉ‚™vN•ƒ„-๐๐ธn๔dขFv}~์ฬพvŽ[œ|hE‰>นำg7ใ*ู๙รOป้ย/๚>๑๔kfเœ๚ฤแหbถ๕=๔?ศเืsxy้๏s๗< ฆม๗P‹โพอร"ยZ m๗ฏ{š!q‚chฃx฿มวŸฝภ#S–๔๓น๛Ž+๗ตžnวtwj฿๊หฯ็)หx๎#พvf<ิ}เ4ิ@ญ๐ุ๓–e/@๕/ษแเ๘|๒pOืi.ท‹่vˆsั >๗<ศๅˆ๕ดA ะHA›wูธVwp กEฯห?รs๖a…e8๐›๙uษำฬร ๎w{ย๏ˆ.ฦ๎Œแ3ตฝ,ูฮKำ“œ=ฝศC๒฿qํ`จa(?ดEy ฯxO@หเ}kL•๋ƒ|๘„R}$ๅใ{=v0๖1y€‡‡ วฝึืฏ™ตT|ๆG€=ด๚๒๋Q7่)นฏ฿a<6>๔ุ˜c$0 ห&v฿=ซGเโ๐๛G๕UtgLxš๚๊!๖๋ณพ`kะ‡ๅB๎นP"‹eW๖๎ๆ,พˆมNIx@วมว“อ9รOฃ๙าžรZ|yAa }[๚˜ฦัษ๏ใ>ก-ๆ>๘พฎ†ํ—โ+ณ=nnu๘2Y฿G%“๎s6~q ฦ—๏๑ก ๊ีx๔ฐƒ–%่ฬ\w‡Nธงิ8ฎ๎a› ลw'ฯมŸ')?a๗ฐมีqว:žฤง8อCŸ}๑ๆ๕0xบZญ๖ฒjŸ35ีู้‘KTกฃ~?ฮ ๗ศ#]'ภ“ฯ๛9๑=waฺพ8จ฿1—'P฿3ถ‘‘ พm$งO?< 7๎-ษD=๘8ž๔U–+๖โ*Jp`๓g๛ฎ#`h~gๅxฆ‡_pWเsฟa’!ภ|tqzƒษ๗ณr๐Grนb@ภP‚(๐S#ผ5‰?™ˆ€(@#Œ,ฌŠ…งp๒ั!GEgยฑAU ๙Gง*œK ‹qสt‚ED2ฝฆ0$>ธงK ะ €]วณฑา*JTาเ์ึTร,Aพเˆ€sก}p™ ชKMฝ^ขF2ชถ2‚ซไ.Vpก˜TJ>XU5S`q Aมฉภ7ฮŠ]$‘ฃ!ˆcaœˆzป$ผ„‰pว)*” U~"’Eภีp์R$`ฐ&_\™๒ส %ษf(ฌ*I›ˆ‚ˆPิmw ๆดJซJD pRg=”aœd’`†4“ Ž,Q% bHEz(Qpฉ U€ XรTฃ(ฎ 80$IŽD๖ฝ๒rง4}๎qB4ฬcQ˜ŠFM #Šพธหิ„b’""” ˆW:อ"$ซ<‚ภ28เ๏1r๐•# €ซไŽฃโู•ไ…/„AOจ่๓ฤaA ุŽT‘†)Dะ‘๘‡7๒0oŸMMX”e$ัเ0Žp๛ฎผฑˆSภฦ่ฬCb์๐๔`๛เ"O๑๒ฒ=J0าœ$K8ยK ไLf€‚I0A&„E„S–E9N3WRยWลb;p‹‘IรTกฃฯ H€ ๑ฺ@ะม•PaA@ฐ(ขFRdส'ษไป๑ูWค‘$’ B€i™ƒ@อ:R8 %"„mq'ใฉRdศ!iะม‡+สtL>!D˜-H’aHีพ#_zภŠrƒบKG=ฮร "jงu ‡ฤ'\EH;‹ๅqF’-Ž๕ยd๔]'˜Y๒มฎ ˜Š’-*RPฮHฦ vค‡`ฆq‘" @_ภ#ถฎ`ร'@PˆEรU Q#ืœŠ$฿G„K€ฃB๕ภˆ‚ฤ็Ž *Bx1ข" hถฯ'_XP"ณเเC*oGแ๒+ำธก|2ฌ+0’ˆ„$jG๊ ่ ฒUำ๎ภL8‘๓ชคj9}งฦ`ึฃeศ}@๐.ต$(0ไT4˜รNฅd`@@:ค,Y"ง‡@q‰ ขฑูฬ๓fต3ŒวฦuˆA8‚C‚8ล$d™J„Ž ’"$€d5ฯ" oฒkภ่+RBยฃไo ‚$%h @1€CP Yฃิ(œ"…Aุnq@๒ษˆ‚"€dBภล ญว} ี h#y ‹๘!œฯ+ŠUภไว.Ž“u#‹4%ดมP8Lฒ ํ`E7"จน—„LํiR) [„C-*ƒC(!ฏบ˜นฯ{&€0Pี’,QL8-ยธyฤR็๕}'(‚จz-‹‰ˆ'P&—ต}เ$ฮ–7” „ซQˆ&0ฅ™&…จ๕q Ž๊ผ‹"ด2€*ภŠ ˆ<”Žศ‹ ส1(”7๊เ[คPW๏“†A+hIwฤ…คำภธ@$กฌp@XU,K‘ฐN>Tัˆ…Smทไ>ยฅ˜b)ษณo‰L๎”ฌ<]‚”U}ืรqQไTฅิม@<:0E›T,ภy}ย*—XR2ฅS=LCežBุ]ต*„bqะธผ๑ุษั`"@ฌส ‘GI\L‚ฺ‘า…ๅ…Qไv”! ฒ&^Xvคp'yๅ๑แ‰ไ€ฆCtsNBจธมจลUPศmTJ Š๕๐ผี่ผg[ewDGOค ฿•จD!$x€Xฤ*ำ;&ะ|w8‰ยซ ฐtiA#าDนˆ๗AำดH๏ืตKภœpReƒo*gR6C้8Q€v3“l‡ึ฿ีMฯ”๋jQŽ“p^u๐DจFh!ไ™–์ƒ ฉ$]X\RQWZŽฉ(๓ะข ล .O'|ผใ๓A๓$p’Ÿ>ฤ&$กAuฤูคจฺuˆ2jไƒŸึอฅ>๕ตธฒมใไi๘๑ปŽยy ˆYฺ8Hเ@Qจเซ ิบRz งษห&ษn@A@\"^ฎุ ท€๋œ฿ไ‰a๑มpป“GธŽณUอ3ฎยฎ•ม๗R่๒vA…ฅe\ไ…ฑวŽ~—PD#"าRฤEัโ™&’q(๋๒ฃYPฆx~)‡gžy ฆ็wไฅrฉฅ“ร ๔3]8ค$ฌ2D_—E๕Z  ๒เธ/2ŸžM(—p‡ฐม8z"}ž@#๐dไRP `@c…Ldpu๕้ppเl@#๐ม:พOŒ@Rแํ>n:ฏ‡^g6D$่ƒๅ_žœIs $งšนHผJv์๊ฆ๏ธ้นฅC๐ฦ๏๓ป&uวฯ€(Bณ€ฏB’ดฅ!ta)˜@nภ…$k ๕ส"€ป:ำผy†เ๙yrBMะฤtz)fc$˜WA#P!วX‰ˆˆˆ๑ธpฮ“แoXฤ^ไฦล3กศ h B€…€จแE+ ศ;้Ž. —Œยซ@8ภ’Š<&‚๙xPšฦ-ด๛eŸ!'N ีเฏ3OtQฏ%งEธฅquŒn๒Tzใ0ช„d„qŸP?๚)ElุP!5๒›”ฌŽคG\‘ัlBฆtฌบ๑‚Š.:ภL์ึ-ศ ›ษ]ws‰ก๘`น<9“%ˆN…0^œ|์ปbwxr์‹aศ09า&…aฃ(า#‘. 2‚แq‡๎/n5๒.„Q๘_(สEYฑqฉNฌK™฿c_&pQวแ๛ปo$<@ฌรN>เำ2)BOb&tuะฎป๐# ฿pภEเ*Aา‹aะท๏Hจ€ลG‰‚ูลyะ~p์€]e๙— r ห\ ฒร๓>๔Pจกร1#Mโ$;`@ ัX@๘XcฑธํN1B1PใมGŸ๋โบำcQ`’4L %D8QV‘D`ภ•TGหyŸ๗CสAฮ‹๒`ส ฐA‹qDฟ!ฯ๎œk Uo2]œb–จ†๑๒œชPƒ‡dID{}ฆ#๎แฆIDตข ~๕,VนA]ดใ8g†จท„แั#0ˆยีFv„^ ูฦ%‡`สy“S5ศw‘ฃ์œาฉˆf@—ฉุŠ€T๎เญoD…|žๅ@ั11žVึ โธg‚Ž˜:HdGw‰c J ฝ(X€‡โMะหฦฤl` w Eฬ} คK{ธใ“ภ‡๓v3Oˆ‹tศ’@’ฌL Hx”๑‚ฯug4?ovyแ ษ$ยๅ8x๋Ÿวถย„ฑR^ดหƒ๐“ฯ[ ใฦ~BF ‹T๕ต‰(F๒†–—‚!q๚Ÿ'V„๐ณฦw…ฮOโP.ำข‘๙จ๊‰ฏญ"ณTธโ9P‘Gๆ}๓๒ไฑ/B0=ฆM–ฆqQ$BฝXTๅŽsŠšฦLa.|q‚ฝ\@ฦ้ŸX†"฿cหKN1 ไn๛๘>๏่ ‚า'@`Wewpำ7น !ฎ*B0ฅ๒ึ;ม/๚๚ฎ€ฑ‚\ิu<ฃSบ'—ล…ภ๑'ล@ศ๒๓Tฏ่%4iw…pDเ๓ฆภwqะยะฬb‚บ๓IEฐ š{ขUŒ$PผSb}ตp๓Dษ ีD11ขvy“ฃจ”3,0 (H{Œ.M+ฉผ๑ๆ‘ฏœไ|ัƒƒุ๐R9n™ฤAว๊ญปGเŽ]}j7ห›ฆซNึaฒ๒ž;ˆUชษC2Žฬอผ 9ˆvŸ7jza$ะพž BA๑่ทภA+๊bํษEช—Ÿฯ3‹?๏้#ฤ, ฐ@๕๊|)ฅ๑พHU?๔zz@ฤ™ธ๘ˆ&๓ข‡ &PW‡ ๚๙=พ฿,ง พ1๐;-ฒ"๕€˜Tว฿$น็+๊‹ุ†{~๐ฉŽ5ไธWvhgฬ๕D๐๛Š]H=ฮ7nž๕ฺe8nถ'รqp'๚ฌ&"‚B;0Ÿ{ึ๓NpvT๗—๗1่ัHัใมn+xB›มด๊พ't&ก๙ /?šืฅน๗"_ ƒM’< พเฏ๗K ำ๛}๛ู‘ษข‰Ÿšg]฿ฝฆด๛?xxUป2‚ษ1 oยผ๕ไWD๓แฯ[|1„บ๙โOA:pฐ ๗‘ไฎ>่ฦม๓ท ะ7๙เ‹ำ๏’D€ย… ฒI]=฿๗ุ.โzฏ{๛มO•ฎึฟv>yนLํ๖Œ\G‚หฏูฏ๙ฉ`:ธoG๔โf >o0ž*ร‡ี,I H3=ว๑งๅฝs‚rxไ+่่‚?J=p๚KWUื‹๎ไDๆกะ ณฤf฿ซ0…aHm:Ub]ฎ๏Jพ=ปG„Qมฐt๐T7๗ตลว+xrD๐g\|ู…้Žฺฺไ๘เรำนแอ›p'ะอ—่์๛ศ™ือx๑ ๛๚H๛%๎tใ“†Ÿˆ!งrฤ2q๚ช๕แ๎_๛]หฟž๐$ฺAซธ…94น๋L:ฑŠ}ว๑˜{พyƒk– /[๚)ูx๖$ แZฒGA!สใ๛ษฟS<ฟ—XB:๘˜๔‚ลŸ ๑7๚6ภG๐t;wryว‰ฺ%*ศY฿kๆ๒n^{~{|Oธ‚'$I‚ใQฝิŸKคCf[c๒ๅow˜xีี‰แV์J๙๗แ๎Zื ๘ามฦu’Œƒ๛๘แ;`1๋ฐˆ฿๛)dฌ๗@๘Yว&ไฝ๏ฏวณnขภฬ๔ๅ{ศ๚,๙/}ม๙ฉiE[|2 ๔ฮศ'๐]@CUํ‚ถใ…วฟ฿๓rท๏๚ต฿{k:๑‚๚pบฏ๚)_KW6:ึ|qwจขrGฬ^\‡พ฿E ๎˜|z›‚y0ณ๏%ˆํ{œ3~b_[๔๎๓ฎ`฿z}*ŠXภ๗ƒม4(ภ[ษ!QqLพธ!๎๎ J8๒ฃ;Uฐ่ถ๏พ๑ั~J฿ฃ~œHึGโxA็FoฉฦีMgQ(ง {นต็ม๕S=๎zkภO”๙w€Žฏ>w[๛ฆ=แบๅ->าฒrแ็o~AŒA๛ฮ&ช!ค๑3ธ“ฎฎuŽx}q๗฿ืyส}ึ{1ๅP$นษ๛…฿'ุˆU7A็uUก ซo๊ฏoต‰ŠwwLฆŸ)p๕ํo6xขย์เ0ฤ=๑พด๗}Š ฎ’p ๓?H“…Gีฦ้ืล7Œ๔nw\…4ํปG‹‡Ÿz‹gใี‘jD]š’ถ๊ฏ฿๏๐Šเศาy*Iิ๛[ค^wŠ*ี๛“tD๐NŸ_œ_ใY๗ํสทฒ|๐^บ>๗w7r‰ะw[IGhƒปณฬภ๘ˆ hp'l๘{||v{~ืmn[/w๏๎$ฑ๓IŸzGงE‘wั”Zm~|PDอ^๋๋[ฦ฿}วไวีขภภไ่๖‚ŽŽ๛{{p’์rฑl๎<หž_๏@ˆ\ู๘ร?yZ”F`ซชโ๓ฦEศ=ำเN•ฮPบ๓—_aช๕=โวฉๆ‚๖ณ ่9พศฅฌŸ~?ํภฏจฉz€+ทถก‡_?&ŸTฌ๗๙G;k8๏}ว฿5Ÿuฉ&ูcค๛–ฌ ๓x“ปSนซh ้ ษเ<โรี5๏ธ}#6๐6๘žืyฟํ{๏ฝNN๐{๗๖)งqฒ$6ๆธ๓Ÿ.ุ? เ‡ํท๚๋9๙แŽงโืh๓‡B˜6$้m› €๛ ŒU๛ปŸw!%า๗F.่?ษŸฆ‘y’‡Uื ๑/vNแ+?๙ไ”,r;๛Gหพปพ‡Iจyลๅฅ&ฟŸzญ๐ทXœชดฺุึ๓‹ใS9่ฌมvท>พฦปrฎ7ฒ๙'n๚ˆ็_B$ฐษ๔โฌ0ค‡‡)_Dxq๕˜&๗ธ๏๚ฺ๗ถืำDแv\๘ŽŽNA:‘เu!^๐มสq%C[้็Šธ ™,ŽัธวwYT‘M€pฝ>รi@0ฬัฃว.>Š2ภย’ใD้ฐข<ฉXX0œ„‹์[—(๑ํึRๆกสซ•i๘ A€:๗ 4๐๒ข๘ฐ ๛\cœH‚Xแฎถ’Œธy~ศf’ศPNบw1"Fzฒ^฿๐ศนŸชzขเ›Rฐ›ฉก&+J๖ๅ๔}ๆ์ู•ฟ ุ2๎_oฺqŠ˜IUx€qCn๎ 1>(”พZ’กดฐ๓gิใฃณะ`๙Q&r็?์ตH0  •ซ๘ซoุ2$๔็่เ‹+šอ.ฟp€w [ขไกŒb…ฐp๐y#๐ภQภม œt๑๔ึ*ƒ๎žคDเ(  ่u‡ r7Z ‚vYแ, ๊:แ>hdqF‚œd๓?๎๔J‚‰\๎ึ $”๏3™,ސ้>ŽGgšภsˆถŒป)๏!็ z%}Gfฃบˆ0/^Dฎฏ!Tผ๏ร5คNิ$:’‘ ยฯOโ๖๊-คxกHnuจไ‚}`< p๔Gัข๐u7)Pฯปๅ go9vๆฝาn0ธ:DชnฎซRUy{กhhzGIQ ภภ@ฐป บผm†"˜ซฯXE฿ย`‡pลว\ฦว!เŽ;ฒ๙bz"ฤ @E7 UนS;# ฑ~W0]{ป_ฃ,jๅ_wq•r*จ๐ด>โ๊F8%u4ภ8่ฒ๎กAญิ๏~nQฺNNวLY๘r๋~žฦZ1ฮ@ˆpŠ๏ยfAP1ลัcฏ„|มfฤอOHไลฃฐUa๗๙<ŠXX0&๐€ซ/ัBวrะ๓รbฏ@ฦ8Cกด ัผ ์ฤ„ีพTNŒ@:je yq^H<.x๛ง ‚฿๑ถพ!vdื*Zคร+ฉ‡ˆŸ˜Œ(้ำtซ๘อG๏‚ˆX†_'๒ํู฿:Q๒เ–\๔วM}P”xpT๘ช-@๎ท>RR’`๑1€.ฌ๛๑ymฏ`pV<@>F”p‚๏]Aaเc<โำฏl6นฦฝDขธpโEใŒ‚3M>Zื$”ว;๏ใช?C๘$ „ฏ‘“"ธ๓;ญสƒ„*<น_GM ’‡0๚ณว>โ fฤๅ ”๓ป„€ro ๙ล่Œ0M้`"ฦo์฿C`]y‰ผ{าfiษม]ITˆw Wธ‚ะ๕5g^แ# ๔LฺณAŠž…๒" ะ‡D@ฑ…๋&t„Obc๚ำปข`ฌ.แD(˜โ 6๑ผ็%Zส’€wKๅ;ฝ.สN฿Cธ2ฐŠŒ๕"&˜bุ•‹โธฯŽ–๏<;‹ธt๖90‘’ Yjู Fตำ“ฃ-’ด:]O=nฦ๓ชQqA˜$มยK๖I]ใxฺด &&h6ู1ื—ฎป ไ8>e‹:–!ŒFqGqต ลˆยeฃ๐ŠkœัโฯW{"ปEz_ ั ŠŒ๖สq'ขˆR8ี.๘>ฃขช๚Nฅ )1ŒำัHต;|ศไgMTCAnแƒ๎%bPทฆ(4ˆฎjลP8wbŸฤ ฟ\Bง$ึคA(a‚{ ีH;=จ(ป v฿W,๒pžฎG(ฆฺเ€๋eXบZDpโ 1˜dแ.n™„u7"YVืฐไณY้ชศย;A>* ล@ฤW๎QxL’'Zฝxด๘๏Cv›xพo}๒ ‘ฤVŒ~‚ˆ˜๏ี†ŸŸz›PีงP0a"’GฃNๆๅบผ••>คž(‚bšนกข!\\RT]๓ฮขA…r๔" Ÿม4อไ8 {ัIจ๋x3B;T$เp ฆสฑ—‡zฐ‘ฝอIAฉ€รฏOศฦ1ฎ<rB*Tƒ‰W|ใ#๒JŸXฬ๓TสฆC•TF „ธG†คEdุbฏ`แฃนˆุ#์ฮo~P่"ฌ6ฐO€๐I˜~๒ล*พใ `B˜fมaม NAถ3„XJ' z;ุL8KคซฑัGLๅ#ZPวฬ4หโA๘ณƒม๛wผ›j•†@Iิƒ๐!์ADฯj /‚ Œdv™ๆดขีๅ"i5Qฐ๔‚ใ!Yะ่๚งศ$3ณTฟ่ึ…KvยืแAฌeศ'๗"bVHrˆยใคZภ ธอชy)$ฒ๕xs(pฬE๊ว}a ัlม8๓C ไ‹๔Aฌ;XAลบOฅัL0.ว’ภผK ˜<ิ(าCิIโNโ"ร2๎@QŒธ1่ฮ‹Dฦ1@˜ม ๑๐็ศงgฐุ$๓ˆ”Iƒแ‘> ภ#ํ@กZp,Dภx03พ๓"ฺVข`:Qษu„2Y๚ะ`)) ีlF:ุ%ถ~!8ุไ๎.Xปา&ฺ7ˆ˜-ั<,YะDl_ฯึ>B‚กค€‹ืeฯำ 8๒" ะฤข แ๘ก,V9. ธ:ูัj็lว‚เ่dษx๐pม8\w‘umG็เธ_ฬ8™%ทBGn-.ํฅDH่1๑ใPจๆ๛๕๛8-ภ“›๏m‘F9ิTh 88 x *~E KOฎ๗>_ ๗‹ี„ปtT?ลX:qMˆ ‚G|*ผ(ƒwK()Uล๙ษEคฯซ,Yถใ:งไศ›P,ฐkRล}๘':ึย8zํขcz Šร่Bธ๙ผd:ๆบ ฏฦข ธเื@D[ภ๙ฺQ๕€๛Lเล\_„aอป๙„sงุSH™ณ'อสˆcB]ฏƒ}ฅฆ๘ีมI4œq๑Gฌยฒ7ๅ๛žB˜eิORฉจƒใ;€zม๋ํM€\€‡€?‚†_w฿{ศเ}ƒgยฮiแ‡5<ฮ๏ใตuผ'%e|๗G—&^“ฟX3> Ž๙์…*๎{ย—WT’koฐภฮWด‹ฮ. ฌ฿ฏ๎ FC๊ข:ๆFซรธส uปs๙ฅAํ๛ƒG‚<ฐZFwažัŠง|(tฝ^๗3…`ฐ{j`HHวxพ//นM+cš›ŒŽฒE^ึ"ˆฟุT๘ไ`’!พe}รๆ—ฟ๛ (&Pt๐q๓๊•r|‡›ส…co8k9๏#$–8sq๕ #เ๗ล้> F@๐!\้>กฃท~š„ฃo'q%“ต{๒ B ฏAeu‡ีชท๘T่LX~rภ`4วG`ฺŽฺy{gฆฌcผcYฐืwฃk—hส๛‚3?ˆr0Kฎศ ŠUแค๛8สH=:SˆpฟพS)ภ“›๏oƒ$ยๅ'๐{œIFY”)ทฃ0‚'|ฃ0˜ ฒ7ชATV€ ขฺทฅ}qฏ@๔et์ƒๅม5eP “D8 ภu๖#จbp'X„๑ กMัมVutŸูึีEยฯค ยก|Š ^H฿ก”ม€&ั]‡`Œ‰tmคฆ… ญ๋วว}]ฑ7Lqภ;ฏฃ5ง‡/aXEŠีสบ€Dี.ืŒ wtQด๋~ศ9ง€t:ƒ}! AA-ฺY[‘N๐};2a6ฤ๛:fภฝC๎„{W@๒..vำคXฝฟ8DQ@ศืG=2.\๘เ<แH‹C@’ไW2๔MH˜!‚€ฅ|ึWƒ€หA|qมฏ๛rล*ฐ<ธุWGšภ P๕ฮmw๘Q& (I0ๅยฐ*๖4O&‚‚ญพfงืน{{ู™ I๘›FPr๎ขฤ'ŠL•g\S๔โฎWมๆyพ"ๆ์๋5๓>$ษ 6าผณU7ฬ8:ศภโญC5ฉีzxk_ศf„ž*ฆ๏ ผ๘2แษแ(ล€‚จƒcž]< ิ2ใŽ]1- ๑˜cยเเL2๚!ขjH฿ตก.ކˆค!@ะ๒aืdฏ; )VP๏บ๐vซ^3$ไn—a@AAโถสค ˜|`šaเwA š็ฎ6เหo7;bๆ09็!’A฿:€‚ฦUmศฑU‘Nคoว,CฒqzL0สขุr;„`ฐ/„แ`ผตลyAT/E๙Vะ฿k์ ่)ฒŽศ@เv;™4…ํ€€OZ”mืฌนoำ่(ุไPฮช’]?ํeuญฌi%ซw6~Q็tฯCฉิcืw๗ซwึOxrฐY<๋๑.Ož=gสQ7iณMฦjคSึ๓X฿1c‡;[อฝz~๖๋Weyํ๐=ฏญ}ษj‘Vm’fฃ|Ž้u˜7ำkืƒe็ำšถy]W๗ฺฏ‡›฿ํvใ9ณ^ดผุI1ถy]wmทฑve฿ห๚ู๎๕=๏ฎํผ์๕s‹ึ)ํูถ~฿๏\ๆ{yiฺฒ|uŸf{/ิ…-Œyฝโบ๓๖Œกaฆฅื๖๒y๛๛ถpห6 a5~๒$ห,‹๓๒;ฉUi7m‹ซ+๕ึ~ฎ๕+žzาdY3~ๆmฏอฝูฯณ#ŽjŽญท.ฺj }ดmV้ฬv)ปKซ็m?o฿ป๛ชiณ›ท๋ใ…ŒrูถฅชZฏอf๕ฮ๕๒,ป}๖xY๚fฟžj็฿ผฺšญวพีำF€mถZ_iฏงŠ๎ืึฏk๚เ๑ึ+-ป–ูผ{z=ณ‹๑วM๓๗w=)ปยl๏็}๕ธ์mรฬBfเ๐^ฺ๎๋="ZใY ื^?yจธผV+yyต๔_็{ล?๙ฯ๗z)วฅn;ปบV#w๗ฮkะiu๎Œทฝญtwฺซ[วžฝœา๊k฿ฎลถทท ตšฺr‹[S฿๑.cฃi6๋ึ$ท>๖4Žใ-q๎ฺปk็โชzฺ๋๙ท๓ฮณตN๋คึๆํฏ๖h๋S:)ธ๕}aฅุฉฏ็4qnอ๒๗ๅlฏ[ืฃiฒป™พn ๆ~฿_w‡Zฏฉ–]ถ๋ฃ^,๋Zส7๏ฒๆญw“ฏZ^ไฃ๚ึนฮใซoh็d!วผอ ๆฌ›ฮ๖์กฏ๛n๗žปสฺป7;๎ฮ๋ี}ฎ}๗~๋ๅห—:ž๚๋^-8ษ๛6ง“ขoี{ž\ญถz๚ธm๎+wTๅoทm{6๎’ษ'ฏ๎พป{฿gkMฯi—Šูส/วa6Wึูeฅ6฿w๏f‡KตตWฏ๏็—ทษMซฟ[[ƒ›กoท๖u.7ท๏ขฅvทs็ึ,๒Ÿฯz›^—ุR›ถใ๚^ศ๔|๏๏๏ฏq๋™&ฏ๋;vืHO฿twษY็Ÿฟตoโิฎ๓ฯ‘Kฦ๛}๔Uต—qซTตum<บzฯู55๑ถฯ]}gš็.๖n๏จ๋พ›vŽo๏๕RŽ๏Xš๎บ๎w๔ต๚๓ฺน’nwmLบKธุ์ณwพ๏ธn=ฯ{Fจ^7n'ฯนปำw๓ู4อๆ๋v—ทฺฺผฉm›ฒŽkีซ๑ต™!ฎิ๏r}๛๊๕œฆuR๋oฆ1ปgฯช9่n'snๆคf;๕†ๅ฿ฏc~jxฒ์;[}}ฤzw๏พ๎;|฿nฒืิ}๛๋ciJ2R{ํอnร๎๕O“ZisŸL๗ถื๕ฑป฿๊{&็dก|o’ฅw]อf๕๗}R•๕~฿zซžปr:g฿~๏~ม—Kืึืuตz‰๛k๖.๒wZN๕Vช๋ถ๗๖^ฮฅีืnw‹mo{9*๕S“O“z็๛ำื:ุ#zฌe=iท๛vถ^ŽฝQขฏลซj๋๏Ÿ{_ฯฑจชi์พปํlฮน้+ทถๆุฐuจ[]ฦญฟ ซEnบ[›-wn}x{5_ฐตฒnฆืฝภึ}๎+ญู๘ถ—๑แฒ DBmy%ณ›‰อ๏Vฎk๓Ž|จ}๗ึuฎิปD/ŸS‹rโ๗๖ ู5wถgc}ตแJ๛›]m]}ฝบOอฝท฿nท๘rูฮถีwฉW|ทyg!Wrงผy{ธke™โlwK็^eๅืl๏ํแ๎ ทฟEX๋๎๛z฿gl inํN#&๛zย๛c(rฝujซWzg งJz๊ป;็—ท็๔บOZญfฌว฿~฿}•ๆs๗ื.งีv+ี ;>k๎๏†iXถฏ๛™ๆ~฿฿wตๆ67yฟsบwZค)‰บwส‰๙ฎwณ๕ฒV๛:๏ผ|ิ่}=ํ๎N๏Š>฿ืs)l{”vvMฉmฯ›sื>๓t•ื๛๛pwz7-฿๕~ฏRŽ๏ฐฝทqW๕Ššพv^ Rํพ6๏m&%ฯง๖พนIiทk๑ถ็M\ฝšพฅžswงปw_๏šf๋:ท„๕ทณตท^iๅ.ต็อW]๛?๗11ŒŒ  &0B6Œ € F@`10b@ 0ภˆ€3€ภ 1 1ค`P 0bF1€1€ „ภ#1K#0ฤbd  F`0€b €`ˆ€ภ01ฤF1#1€0b0 €ภf€0b fˆFX1€ ฤ`i€ภˆ“f†lb1#€2Œ˜  #ˆม(0ภ(0Œ€1€6@ ˜€ภ# €มˆค0b i0#€@`ˆม(0€`ฤ@`ฤ1bb €ลฒ‰([m ศh 0bFb# `ภˆ1€`S 0# €ฬ€FŒ€# #1#i0#€@`€Q F `ฤ@`€@L ฃ฿kw฿•ณูึ๊ถRa™ํ]้Gฦ(ฒ้นom ๕ฝ๗๎ิ26ูJ.elฑU›eฯ+omข๓8ถ•h{Wฏฺ2ะฺž๎{ห$nZ๔.ํ{loTฝๆอdึ5ฺ@lวs!bๆjmซม=OืfCSผe“Jถ7"‚ถxซห#`SถทmU`(˜ฺX]ฺูฯํฝฌ’ฉ]jฟูรฤrwห6›อ”๊๗ึe”ฎฑa{…ั†‰ฟปวถ‘)ง=“–aูะตm@rตอ6JถทUญู&ฎ›Wžfฉตfoไ่ภซmํ)ะฅ์์ูชy๏๎{๏<ถ"u๖ภำQ{Kฒ์ญีmฅะŠ๗{w้มลูำำ-ุร๊{๏•ฬถห[Iนเญถwลm{{‹ษe๓Žๅ-ฯ๗y0cฎn๛]ญl™Qk{๊ฦŒีr†ึฺชํ๕๛าnzำีะฺ คึZบ{น7ˆb๐ถฒ#=6ๅป๓Z"eH–t;๖๎ต† จf๎๋ฝm@bดถญ6KฒEต ื~๏๙ฺ lKVุ6•ถ4s4ฟๆฎทm"าl\=›"ฐํ๎ถฉbนmg๋ฏinชœฌyKL{ึL…mคูฮทฑ.ีฆKmผงสฤS–)้๕๓lw฿!;9ฟ๑8EaฟGี6Hฦ๛sc@*๏ฝน;VฐYฑึfศฬ6๊ยZ๏ฝ๏nšูFตuฦฑตe[›ๆชฌY[m™x$3ำb‹อEL:žฎm{จk l31*ูD฿ท฿[234S7œ๐zฃ๕tiฦVธMg[[ฌถ&รˆŽl็Q`โอžKํญUอCมฦฎฐmT`›mQ฿˜ฑ๊๚žg/@•อึe=1(ถ1ฌ*๏ !$g[6l4ข์uKf6ฉ˜Eตg•ฐM!ณ %6#fm;+์ญีถb˜lฦ9[ๆถk๋K-6าิ๖๖–mWฮo :ig 3ชOฦAขๆ•ฤพmรQ=X{Sีฬะถา6MK-LRฐอะLณฎm,teoตiฦฺ5e^ถ๕าjjaVFdKฒFPอl„=`ิั๖‚–€=RA`”ฑ๗ชฤVัZถefฃฑ–ูฒu๓จe๊้–G๏eำjŒqFoeื๖Fุ@}๏!IlฃR {,ัๆํ_ฎn ๛ฉฝฑrz–ภฌ่ฌถm;'63i … ojหŒ*์46ZWถ ฬคย(ซํ!-ญPํถ-เี‰g๖ ่ฝnญฺผพ”fฉ๋ถอถปท™ฃภ๓h๋’™ฉุWใู!fUlททN˜๋x’ณอZsแฝwั•ฝeหเๅืSV`–ทyJม4 Aฬ*kิ@ีhoย`No/ Fุ ‚Œุ[ม{หบณญยจ™ญฑ^๏Nk8[,ƒqฝ๒นVˆ›=ZM์qFtdk๓๋ํ% „=ฎzถก*6] l๓ฃอ|&ท2ฝทRถMํ4#0 wV6Qัุฆ%Dยโmอชฐอ@ƒue›กภfฝ๋d๕6–0๕ไUห,ฦ(พŠฐmŒ\๏๋fm9ZŸฏJณŒิu†1m‹_ŸaS]๓6Wุp’ฯภŒ o/ท-43ัฺ&ฉlฬ"ˆ|๏W [ํ‰/็.h–o3MัVbฤผ:khAii›ัฦศ๎rฬZย…dฤ6ฤถmwg› ,m[ๆZญร อ2lบV{ฒZฦbณฑu{d‰าyksอF „ uใmจหถ‘„ฒm_]ณๅt:{oธhoฺO`ฦบณk›A%oถ™ŠHXŒกญ™:l3ค‡ญซfPู0ฉ†ตmด๊“•›—tผPุ6›่ถ<ฒvนwŸO–๋ื6†งmอ๑ll่wlปป?„˜ึcด-mšั„cfญIฺ–`hจ€4$af!ekถW,ุ [eJกิถqฃสซฬFd๋l-(ฑ}๏๎73BferAlอ ุ€ัFˆลŒMC†5h{ี™ ไ%Jว*Œ™*#mXตc‹Ql•m†@KC)fฦ{*fณข๊mT‚ŠฒMw†™fฦdซณ0 ๆl’m -ฬ ƒํซ ภ6T6…†วUl‘M˜‚7rฑฝRDะ* H† Šea†•ํืใ† (ณ ฺ„Šอ`M*ฬ€•-˜Xฐyœ-Aช˜Ž=ZE, cPHy+c5.i{uีถMจํลBศถ้l จmฒษจ€Aเm! ศด!V6ฆˆึˆผUต‡ejฬคša2ew๖ณFK•m จY0›Tรl[%1Š.Aคฒ 'llI˜A›b#`ฒaฮ-@+ำฝ‰ีf @ฬ„ลว%6c๚‚1ŠY\`ิ*ิ’ฒ" 3BlRy๒hFช1 F&ญ ‚ฦ์X…Yญlฐ-ีh๓ศFIำฑm][1FฒดHš๗Vชmรฆ ฅฒm 63Cm 5cxS,ฦย&31ศž [0็ญิKส๖9€ู*ปXฐ™fmUˆ™ฑœลุจฃ๖†j๑ุUฅทืฌคญฮ6ค0†H`M›b213ภดุT bS`oBฐJภขaLผm:†Y"o6WูsM!BKส$Šอ<สVzv›“1ปฃUƒaญU†ย์–€b Qา˜Y&3ภีถXถส”$M7]า4ีฺฆ21ฌฺZุ<ปšml$,V.ฑMbkf l2`ด‘HถรLฺ ญzO+L^Rฑีู^ ี,mUฎfฆYC!fฦจ1‚ถIูfศ๖ๆ 3$•ท…–Pฌ™ชฑfถต์%ษ@&f01ษ6ซ˜px[6T Sกํ=c`u6o]bช€น0€ 0ผ ฑš1ถ‰4™f7เฯ ^ไlฺ”˜†๕ศ0˜–330ฯ.”lbฆ€ถอโm• #4ฤ8ˆู%o›N5ฤžื" ส fบ#H03*Š„ภถฉ“mู’ถmฑ6•ปถm`ฐ& aซ0์–Gdกถ` Mมข ุ\Z ถ” ฌ,aaๆฉฐ )-3e13iTฐg”k6˜P`๋af(…ฑ’อLš F 5ถู&ั^iญL˜Q0 ภ6hd๋ฉfฐ`Qู&ึV็ะฦฆ zะˆฬ&ƒ™m๓์ ”1kBhฃทƒ€Cูlร๕๊ฌฤๆม"$dbั`›ฮiฬ‡ถ™mฎฒJK l“ฒษ„˜ืศ๓ัตmf€5h„mEทอ’Yฆ ูXIi K#`S5ูิขhาฌaช fฤFจถ‡Gk#lBRค.X`ฬฌ‰"ฆhฬ4cK l $3Cญbึฆ„ยฐ nƒ6„ŒVxaG„f „lร†ดHึS1Xฦ6Pฑ ์U6žช วด92†ม@ภ T23ก`fi6 ิ‰สุต ๎ืClุ#ke2์šgF 06TœXฒioฐีฐ๓%ฑ™อbk4,มb2ฑ,BYฑ 1ชhF‹ฮ6€aNm†4ร LูฦXƒย6จ&’“fฬณ›˜J0F4fšฑ%e 7›+3CIoE„ยh[ะ05l$ส0$หศn‚5 อj˜1eHขซฬ,ƒMHl`+J&O(๑4ฌว5ƒม`BฦฌชLAk`Œฬ #ภลโm@ioVฟฑมณ,‰ศšA•ดญ0Œ="E)ุ†„QฑeF Dฅถ X‹@h0ย e‰ 4มVฒชmอš๊fบAภ›B๑(€ล@กตl3,l6„&กCYถmB{zfึ"1ฺผน‚™ษQžฑ…„ฺƒ4L-พฉสlณM2[eทฒ PฐYtF3hIดMSญyf”d๖ชpฐiSาึ›ย)˜อl(ค‚!สฐฆั$‚ Œฉ €I ฌ1@–ภุl)B‘ุ ี6 !6`ฌˆ`€LdฉุุŠฌd`  ศ€* ) ฦฺFœ•1lฬฬคa&5ˆูl6PRยฒ1lฑฐDJ`ŒˆUำ๐žฃ:ฑf< 0ภ ,ฑ …ƒู6 ุำ3) P0ุ@ชCฤ` i1ฐˆ ฦ0ิ„Dุฐฬจภ˜6&ค 1,‹*™ูฬ@`ฌ" 3€ยlB ก1J€ ญ€Šุ6ฅฬ6P0c ิfอ@cึฬ4! \ –มฬRฅk›V&รŒ€R*Œ ัะ`โอ6Ucฦ ฦ `‹l6…1ห8ถQŠ1 АYŠฦ ฺfุ@)3`Œ!jภด "ั0ศ‚ŠูฐมถakdB`bQX- V ฐ,6˜หˆ0 ƒ 6Œ 0Z0‹Pฐ 5)0ฦูฬูC0หภŒ!…ภ2˜mQศฦd2LƒPค’˜(D4l6U< ŒZ0ฤl•ƒŒตc6(0E&ึf (cP5lc $สฉฐ€6ห’ @€df1-3"าุlณaฆT‚Plฆ€jF f ฦฐ „0ฦ2%ฆašุ‹ $0jl0Xขฬ„3ฐ!ร˜ˆ‰ฦห# ›%`ถ!@’a˜ถุ‚a3J%eLุM‚TMc๏a6Tภฬ†"ำ 4ฐม2`ศ4 PฐูŠXถE‚เ ฐm6ฉ`ˆ2‚?d+(›)ภUุNc-&ู[9˜HKW๓ุ กฒู&€บทกŽ5f(ffไ5uKฦฌ ซVF`›,ฦV‘ุถท0 Rุึฐก๖^!$๖๖&ฃ,S+ฯpU`še‚6ตcหœฑ%ยจQ˜ฬจ1l!ฐ1‚ Pภ›LลฤXูf@›์*ึะŒyึเa›๎oFT,๖V*›ตัณŠ!ต|œf‘TถŒกุย์๛vบ0ลฦฆ4bฦร6๋ธถญfอถVส0ƒํ๎†ทd2†’dววˆlธ[ Sฑu‡† ฃ6ๅฐ น๕บšอึฌค2ถA ~woฃ„63ิฌSๅžšบุฌmLUา“ลุ’ m3‹ฃ !ช™ฑal2p9ถm+๋lƒขฐ6ชำš&ฐฉmผe€„ัm“AถยŒ™JผiƒY3ฐว lมVfดี^ฟ 2šูฬzcใญ T3ถmำชlฺฦPมZีาndT—--2ใ}ซ•ัฺ–Yฒ•มฬดdๆu„MกfวฌาlUŒ้˜FXiTgQธVร* ฒํ๎ฐa` ้luซ"žญY!ม6Hwgจ23ิถุlบcาoดmถ˜ิฉฺx3๎ ›G[ก‘!เบyฦhcใํฒIุ๖ภ–Xฌ#”YฉV 1ณ๋m™€6†ฉxž!ุุ6ถY1ƒ‘ฐM3U3$-›0ฺb—ฺ˜z›ฅว% bฑษ๖ฬVMถก%lฉตญตๆ๒จJk‚iฉa์weคmู†„ Vล๓ŠŒฉYค1 •อ†ตLฃmLยe2ทํRรDฑฉjfk10JูฃEŽ˜ญm {.P)ช2cV`MืZ๚-mฒฑฎ ฅmL†ัr€ฝeปQ™†-f๏]ถQcl ™Zู„สŒYT5x,YbžLGะ@ฑฉ๒6ˆmซY’m` ช์Aล˜าFห6^ˆปคอุณไIหๆ?Ap`GAˆฺS๛ง ตm์ึ+lึŒตฤตถ—QQmณYe ~7z[vุ˜UึŒ๕ฅm1›•๐j์6(Lาดl“-]ส๓บูฏจํผgmฦG๙๏)Sดl^๋ฎฺถ๑Xw๗าc3ิลพๆฝอ Cถศัฅ‰iฒYูตกฑญอ๋}ท{›Fท๊ัณ[Qีท๎3ต˜ั ›สฬฎตแห2๊‹)sCัv;5ฅๅฦ,ฏ๎žฌำ–พ๗syิ+ƒมํ็ฌ๋U%๓ึe.๊ฅ-mฉm ๔า3)ทR[6oษๆ˜^Oู”ืZณzwดžฤฆ2“ํํv7•&ชO6foฏทWฉทอข๖I‰a์Vy™ถ๕^i•ูา๘ๆxm ฆ"ฤ_jƒTjOŽีทๅฯ0VfปึNฆีฌ฿f๎^ฝšU๎๖Z-ฏš๎Whฺ5ขฮ@Hn^ฺbณอcํฎTบุkj{kถ0ิถปูซž]š˜ฆ3+ป†Zถu๓๕พณ๎ฐl๔ัณ-„๔6๗T[™ั‹mจฒบ6<ุBฦ๋‹อฦdNถํึ๔m•฿XK–M‘•*9๋๛ฮ5ฝ˜๕lปm้๕dฌี๖ำ^ฏฺฅ -๖ิฃg ๗ุ{|w0ะšอ~๚s)๓พuญ้ํlrฝๅq^is๋ํkถ 4๊ฝMจW6ฮ^ํ•ไ๕vYํƒ”˜ํฒ[xฉ๎ึ{๒dึผ๗อQร–) ๔@ถGตํดวฝ๛ฝ๗ทๅ#Svoณ›ญ=ปY฿ฌณYํำZe›-๊๊ญ๎W,6ิฎeฮFLO6าุ๏|ฯฺญ”žะล^ฃฝ๗fรXฐป]ีsXคำ™IksU๋7ฏพ™]ฃmป=๖Bพณํ่ณ{™QˆปฉฯvญžaๆeิKอf+็๔”m7็{ท๗ๅ‡YmŠไnถŠ^฿นพ7‹,ฬํ˜ฝ๋ฝ'ca_{o[2ฦžz๔ Lึ๏nุZ์gวณ—–e๓ฝฝkFtง]฿ๅฑmCe๏ทmo2Xฃส๕^ๆ6gญชฌ๊eตRbถณญ—j7=ŸHๅฌ๑พู 6ฯ”4๐(ู"อโ[ํฑ๗๛ีGNต{ฯ2wํณ›yรๆฬบ๛๓ึ(ฬ๊^^oูqภ6๏ํ:าƒSjj๕nkzŽญ๔ฬคกฌๆ=3Wิํ๖ซผMฃK“N †ญ›z็บ)ฉํmฃ(1{!ถn๓g๗2ƒzp[=ฺYC_ฮ”มซšอญฯl!ถอฯ๗zม-ณผn 4ปญหz฿›ymํั{1‹ฑ์ปK 6k๒*"mฉใžŠ/eR๛๕>ฝmฦx‹อ~dŸี#ๆฝฝ…Yฝ฿O[oc[Usซ-[์nZ˜L›G๏}scๅีพชืm”คฤlว@๖[_ชู่{=ญrึ(฿7ใต B%[ r{ซ}ฌ]ชV2œฒ}f ๖๋ž™๓ดmฮฺ}i๖บzษ๋fฟขถ๓žuQ~๗{โ๋฿๏Fม~[ูz•ูVdwฦT^ํ๎S“-)fPTงทใ.๕u›๗์บ-เ-๔vS/cงmฏบปšฺ ฅ๓o –7K[@ฟ.žlภfzŸm Cฬ6l%ษ‚ฦ๑ุU œ[ฏzทmDkีVl†รs”ุ™๚ฤd2fFป;Vฏ%ฝถีoฟoŽ*๎ึำ f{…กb๋๋p32rม“-ƒถญZ!#ุ.ฯซอšVตgิh“~^ภุิถT*๏mlณจŽิฆๆf)nณk“„ฬLŒีป฿๚$n#Ua#*๋๎วzŸ`ภ(ฒ฿ึbJ/๖[l‹ฒนีSูถ7ณ๗’bฬชmช‰ต฿ฒพ๏๖ำƒอ&ผ…ฺP•tซvงฉฉใฅsŒ&้๓อา†2ค_‡hmฆ–ิ61mฺ*์Ac |ุ~H4Fอ๏z)s฿œืท …0‡ร@s8รืวษdฬ`๕ฐถา๚ฺVทหฬTลถI%f[Ym๚rท๗uql2rAjcกoปญZ!ณ๖t[จึถรปชeตt“มc[ํิถT0รีS๏๕Rฃfs๔qVT `fปzปIelทั+olห0{=๋vถส{aภฦSvh•๕ส~+˜ูk๖ค`xl+4ค8ซถฉฺญปพoถ‚ญmoก6ฆ^ํฆ"ุ๕:u”7›#ตoŽ€ด•!]{„6ไษฦ๋&ฆ F[…ELcเัv,Žš[xัถ;}ร๓h[!ฬl ณณฟ>ฐ•a[{ุn[ษs๚š้,gFฝุฬฝbUฬvื{lสณq“)—ญฆoUfํ้6Tไท‹ฎJ0LoiC๎˜๗์Vซb†UŠฦvถ๗Œะศ9zlFT;ฝค์Wํˆ$ท๑^išูb–—ถmพฟ์3Pl›ตฒฝ—lFfถ๗ฺ *ฑํSฃ!ลY5˜ต฿บ๕ž๋ถฦS#ฺฆ๗ฺฦฐ…ํs^aผห›Qšฅญ ืโษކQ=^o@ดmุ^a4<ฺn–Z†fณU๕nxC^ฺV3†ˆอ(ฑŸ}ฝ`2ตbTอv—ีณ<`ู๎อQฅอLฝ๗&zฯ๎w{/6๑:oหศ2"นซ้Y3Kซšk‰m’Wฟk…’อๆตj3wพg[6 –ส “z4ถณฝื4R›:?zเ6๋ฝv* €mใ=ป˜lžไถmไ๕ฆa#าf;ึ๗0`์ถVถ^eถู๛พ?8ซ ›'อๆฮKfDcฦj๖Œญ2ฺkอFฺZVุiห@\I6sgo าฒGz0๋g5ฃc[d ฒตญzฤ Lฺv0œตํ{ฐiสถฦ๒nqตสิV‘lCooฏถE“ด฿ถ-RlPฏfถQM ฐขLถู๑zฝ๗์Pฐอ๓๖ุรRกกf6Mณฆf7ๆ cฟ๓*ด)้&•‘Pฆอณ Rษอvฌิร6ซณh›B๗žeขHฑf”›;ฒxJฆ่ๆ์!UnRุุTฆ ถหS๙ดUะ3e(šฉ๗ถญRfDnู๊ฉทปคตูLฯ5๐Sณก‡`[cอ4‹ญๅmKvฺjfU๕,Y๛Ao๋,ญeลA/oณ~™‚๒ถ &ใ•bาf0ฺฬ™ํ{yู†`Žฅi[ึ[#%าฝWฝŒญาฮ6ถ อxฝŒถm*‚ฑอ–สภmCฏ(ุ๎&)(jXณY่hjvceฎiฟกgฺช=ตMH$&–Kซ0ถk“Jmฃุhณ฿„ฎะะ )ฒAฺ~๋กy"๓ึฺš…H"ึvS™J0˜mgฯฃHSขXS4ำซc•๐6‘ฑษl‰ื ฮฮ้kQ‘Œษ,ฃจfsั2๋ ึh+cv๛ฌจม๊YถูvoนMIึภoฝืฌ›Y†ฝ”6–ษธ#O+&63ฐญ๙™ํU/ถู3Žh“_ฏ4๔„ถtQmูWงฐูa 2ฃJฺfG5ฑ ถ™ˆฝฅ~ป๕^ฏฦ27,Bfก™ฉูุก7‡ํฌxmฑงท * nะhฟjฦ&k๋ฝ^6 Fณu…f*AeJฬญจํกยZc‹ฝHEŒo…จ…มถฬฆ—๔jI /จะ ๔6มbŒ]๒ Hšฏฑ–” ธะฆTld-k1{ฃ๖ถ ป={3ไูา๊Yฬ์ว›wค-ฟ{ภ˜ีŒ—าถYdLฐตฉฑƒุ6›šฑญ๖รv“d3ค[^๔b ชฺ–UำhใlcŠzาฬฮb<ฆŒm&BSป;ชWzฯ.ษlŒ$Dขฐn+,ำv7aศ6งช‰=LชIณ~4q ˜นvz•1ฆัุ๏ŒZ้u&T[*bฒํw๏u‹*[์ET#l(ฺ˜ขQ1fร์Tาห๖$คlีPPฆ ฐlณชฐIšอ—ฬˆผ‡7Cร$ตั"ฦTชญeš๙mต3/%"‘Fr‡ีส˜ฬฐa3ๅ…gf6ค ฐTฉƒดm6ภ†&Y`‰š: ,ิLSdfg‚ ห`หศฬ4PP3”ฦEรฬP ถูs3–ถgfg#ƒ„Eฤ@„(lAฬlู,%†ฦถmถ‹ šrcฬ˜ศb!juคŠญถ•" f#R”l3x`‘aŒŠm+€‹ภธC๏5ถ+อ6afชมjŒM/=สjซ5X6F™y)-า fZl3ึึภfW^ศฺ63,MU* ๒ฬ ภ€”,K*jlC-ชม@fํ†`Œฑฦ:CcVZฦ„ะ&lฬF•ˆถl3ฃŠํภ`ลฒibxІX` jุ B66#bYำfŒฦZ›M+2ˆ9ข‡ฑT)มฦถIอŒV,ณ1cฃฤถR`‡K6Fƒfั‘ษ„mชž5ตf†,๎%"‘H`[]6†ป(ูRmKฉh3 ‘ล๔Ÿ 80Œ#‚ Dอ)ˆ,Qสด•0TญอณQ“1–มฺฌท0œ–a(ฐEfLu ˆ`3ฯ6ืษ6ย`ฐŒฆf{?(LPl0R&ขŒCJุ`k›!ตฺlรถZ 6›&[`Iœ‰ฦึศPEภ`‚,ุฑู6ˆ…‰รุ”Hฑูf0ซ๖l๎Rร`€mั D˜Rงข"ชeใYญ์mฐก”Iด„๗ิ †e`[”เ $€MƒmฆJี€ศ66ฐ ”ณุ‚Dk[HV*ฦ๊ย<›Meฐ6kŒRภฆQQฐณmIC˜ฝmS•mร eฑŠmฯฆd0ฤDม †mฺ€Dฬ†ic Tkcุค`ŒูfุBึ`ทฤ5( ›lฅƒmHd Gณm‹Œi Œู6ฅคร๖6+š•ภfsื]3›Y”ถกa’ฺhc*ีV๐gŠู0ฐXU6p ึSeRABฑjk›pซˆ›ศŽ0cœ3ก€1"™มฬ`ft™,HBฐ'BmX+@Qc›%€ ึ2EฉLZ/a{Ta ภฺ” :ถmd3FตฝP0€euR,Fa ‚ภ• ิ0#ฦถภ4ูBรD‰‰šeึขฒฑFYู@ น“ถถ)ิf8Œษc$e $˜Kฬ@6ฆ/˜`ยเ)c[bฑ่ยFIƒฬ eสl Fฃ˜Jอฺฌhcข่0 ฌm"ณ02๓๘bk4C3c0˜Y FˆlšB[<ฏฐ5(ŒFVซ อ@F—์Yณฐ—hขะm Dl2‚aจ:๐NHW66L&< ฯ"ๆ้ถถขQณฬ%f3ejZƒซุf 2€ูOฆ˜X Rm%๊fณ 6(b`lฦ…ุ0ฬยK‹ฒู6ธ$์q!0™!ฆัๆฺTbm[ฆ4ุŒZeัฐP3˜„c—ฦฦh$m6lำ๑ AถFฃ@6 สยBะFศ0lKห&lB.h0ยhฦ(๑ฦ ชmm™š(tฃm jD ƒYกZš# DI30ฌY3`๒ฆMBŽa3ฌฌ0ขQŒYยิ€f†ดQ!ใอ…L `0MYซ…ุ(‘› „6 0mQฑa2 ฏXส†ฎ/ €Iณ5Šฃ‚X‘0”M'64M“ฐั*ฒ`“k0G 3ๆqD’™มฐ ฬ$dำ l&‚ฬ2ฌะfM5หจY€N4ษ›Q›5†‘5b-Tวถmx†6‚ภXVm@ šล6ิุ6ูl+!ณQ0หฐ2ษˆยŒดV10ณT<™ดฌA™Eๅค๗ฦV ณ€1—มฤ D` า&ณJf €ฌฬถ๑ณŒณa`ฑชlเ3rณ!ฤYณ1c,Rป mณกŽ@ห@ลฒฌEำ U™บ˜ฝmc, ณ๑ฐูŒoฬู@6†ฑฑปๅ!41@…ูค ฦฌศญDณl6b›MŒR`@Sจ๊`3+,ซ…ต TmR"`๓FฆLl†iJ0aืI”hmM˜ฐƒDบ)0„ฦf,#ฐ™,"ข)C‘EVBjf6ำcฺฮ6” m--›)ย2[ Yo`6cG+‡Bฉ‘ณ0ภˆŠา piYVาTšึLบŠก 6lhj aAemc›y๐0G6ฌฒm6f6tปž&ถ-JlณY CdcKชFVoฦ5ถ7 cฆRˆฑูLLEๅฮfฅVKึ‚ จฺDE&ฦฌุb ˜Rข…W*"ZD†อifL$ช›@ฒLุlXfcฬุ"‹ˆ2„,eฉˆขษ˜ูlˆA[ข&mฺˆ–‘ฆ’ตmฬp‰…จFfhŒ,Dส (…]`Y)[ฆจ*•ุถฑศ,จh6ถ๑˜อˆƒmฬfcุFทz š€6RฬcดcฦR(ุc ถว kร6”ˆ16†ฦ„"uวฬPชeZ˜4อจh ผ1หฬ c5P˜ตฐฌ"ข!2cฬsณ1˜ ซŽˆH&2ฦยถ›mZJa)RŠๆ fถ!€ษ‚จI[@ุB4› hHi6–`ž‰ฐธ*…h6kฬj รชัŠฺ(ลฒฌlฅhช2e3;P„ูxุ`ฬfฐ 3ถู j5ต ๖ถ91ceK‰ศณ f „ถ1ŒK,ฦ„‚บƒTW ญณjr„จQตศ`ณ‘)@ฤ@ซ)eฆฦ๋JB“hฺ4fhƒฅิJm6lึ0@ุถeiาQEE4†aL›มึVEะf4ถF„ู@โฬlEษl ำ6CSฺดฝฺ†’ชทดŒาถšUึ†‰ํm›๊ช‰›M•˜™nฆ`msŠ7ขmฌš-็ ซ—oK œ(๒ศh†Š6ฌช6›ผlwŽMภPV อ/*lV5c*6{ซ\™ู,Ÿ]4Vีjjoฑ™—Pฅฑ ๔6&žRฬ60ภฮjูยƒj2ฤdฐฌkZฬtekญ˜™}ๅcถXv[gซฦีŒท$ณMฑ˜‰ป=7ฆญ’ถMWon@รญ๖~lข๓&ึg+lจdฯ,,นถฒู(šeิุมLั๖Hฆm ๆณคm ฉฺ2`ตMืฤ„,ถุถ=า]ํฮ[ลฦ(bอฃ8ณ7Ÿส`ฺl๛๊K.l,+IrสุTษ#Xัm1Eุฐ๓vjX๎๎mFลjฦยšฐ„7Kb{{-ฌV&aoงh{`FfัRW6T#lfฐ%aฃุYมฐม˜jb ภชฎ3ฎl ๅmvณล๒๖n HหถบูฬFใ*†ต๎๖4Yล<] &ญณ‡ฦH71ฃm]v@™-9›m]ฝ+›ะV{*ผ,ด=€ฐทXŸ5‚=m;ขš XmSอl‚ฮb‹อ๖ ปnwณฒ 6tf›Hตgฅ}WอQ โ 0ตQ9ร0ฬj:kV ๖ฐlGส˜"ัุT3ฃสึZ dฦ*ณršํy-Y--o๋nุ[ydVบหผ! L3ถ(ฬlำ ธdฺ:›KrfbซศบvฬV[Rfฅ˜-ไmฎ iู––a›ฑบรฑล๓บŠฦดื}ฬS%ฆญตา:๊˜1 <ฺž.7cีli53ณเ๊]ู˜M™-,‚…lƒ•i›M7Zkถี.ฉL›ฤ๊YjVgm3ณ7_ฅœ์™*a†šงyึ$l:๓ฎ0“๖ํ{{ตด%…ฤ๊H"› 3ฉศฐชฺlหส"eฒข„Gะ6tm1jฦ นส6ฯKVอŽm๋„ฐm=4Jk@ม4ฯ˜ูถ”ฌถฬTึิ`Yต3ฆฌฒ%ec่ฒอ6‹fชๅMฌx๖ถ˜T˜)–ึS00ํu3กzscิZึy+ถYผ‰กผุHณU3จุถy9Dmฒ7]ถล"Œ ม2m34ฅMซ๘๋ฒ-ำฌฝบัˆนYฐฝฉd ฎฆmyิฌmๆวๅฬZฺ{ฯ?oปฟkิษmx๊ๆํๅ6Pอถ™’‰R›‹d ”1Aซm64ำํ_wjiVf˜ช6ถjCด˜าyoS˜mูถ•k๓๐^๙ [j`ŒŠMฐถตmIf้ๅ(“jฆป๕z๓ฒฉภ0_iงy#• ›ัmป3Cwฦศ๊๐ตอ†ูขˆg๖ิห'๔6ฅบฝ๎ูgTฺฒ'\˜umฬi›’m ๏๗บ)in#ฬlํบด!2๛qดดัถถล{rL‹‰e^JํฐูกฦŒIj^ป ถmซษืกnึ6x)Cชcql๐›=๏ด๗ๆทMGฃึw†ฎFKƒ’ป{6[•มYmŠAt6(iZปถ!ซฝˆy{jฟา}km+ƒ‘๎ฬ* cญไM)3ฃโฝm6rู=ผ• “5 ŒC9ถ›ฑmQฝง—ถ$ZkฆZ๋ฑท—ส Im๖ืi4ใ๐• ›ัm๋ฐะถฉ%*ณทฺlŽวฆซวUดํm•MS–ญปถlkU;3ืฦD3ป๋y9์mึGต›ย‚ิ<[Kญํส๛q4Jไฝwฑ4^6หLk%kขŒ™ฦฎkfช0{ฏ›lWณFlถฉ ฉXูึํึฬฬฦžwฺถฝํ‘šี๚ฺbก๋ˆ๗5rถนnmถษ"ู$mCท X‰iZห@dหbฬณผ๎oญYฬ@๋ฮ๗ ฐ’-ง’6FXNC#CฒB”„ฑQM(ณq˜ื“™๓ภศถ ภ66Ž1๓vณ-@7`ห!ahk cXฃ€ฒ6oฎช~3 [3 บl“ฉ6ูฌปjf›]่Vž6mMg{+ยฦฆ ม6`r”ูฤฤd13-@›‘‘ิถ&63์vรb3Kฐu‰์ูึ Jk6ภfŒำIXD[ ฑU๏iสฬlณy็ziฦคถ ษฃ0ำ"ถ‰”f{kช•iฃq‰yBฬzี†`ฐถ็ิ™gืฒๅ$aม@ึาiิศะ•UลยFšฑ—h&Uฝm0ฆVณิ๖–ฐอ(0k,์Xะ,™jิ๘efฺ*ญmฌQ0eถvฉ:ถ5ถj`ะี๖ BชM6ซ๊ฐmvจ”g[ถฆ™ทpd›‚ ฝfีถ ย†6อ2dok™F›q6จlถ X2™2ถbบถ1ร(ทž ศ`SZlfอฐกVฝ ถํm๕ฎkiฬcํท F๐์mw l-สD0าQŠฺฃญžรชฺ0Fk›ึ]62สชSaภศ %HฺูhMX‚]คกha1Fšญปฺfm<ตš•f<ฺ*  ณฦLาLPMรฐœ้šๆi‚ต5SAลfrีฑgฌศVวV€e+fจh,ชcึถ6ท•ูXตฦŠญmผUฤ6ี›CฆแmM*oฒA[คmC{iZุ˜ดญrbฬL6Uƒ1d๐ึ‘ภhlฒX๊šญฺทุ๖6าญ5ใๅฌ’5{ฦชุ–ศ&W46Yf&]†ภf ‘Šขd๓ ฦJึ`ฐํฅˆฆDXึf›‰ยฦRXaฐ66›mM*Bdซ ˆ™m6ศข`”j" F ›ˆมุFDฦั0lถถภช”04ด’ูำ"†jฺjถฎ*k,S ›mรlG6lฺ†jุผคFซกฉdํ4ถ1Q0`‹อy ฐI4‹ชฦ[Jคฬ#รุDcำะJุ˜!RVำฦ ้๊cŒ…จ”ฺู005$ฐm›I"hY›1R"afc3ƒ-…$ฒถN(klHDฉนšๆค˜•ณ'cƒั–1$0m&F$3(0F„eภด]ณ•สฦ,KถT0 "c3„ฺ€ฐอทŒฆA-, ‹ภ< hฦDมXะld CUฆ•H”ฐ6c„Œ Dฐ™1‚+0f‚Hภ˜ปlž„…B[“Fฒถ™Tะ"`†Œ ั"ส6lณ ณ•`๔xฅย.f€"J!RMDB$F€YfOc–ฆiฒ5ษi$[†…@ยภดDฑฑจส@[f+•Y–ณ†03Œ•ศณm รHm7 จฬFJฒ1`ถ™(l ตC–รP…Qux —Tim#ด1@M„ฦlŒi-‚ค อRdถ-2]ภl{€dQcl$ฐํmE4ุld…-Z‰ฒอl[›ฑฝกฦ‹BCบุฦุQŠN6PีDภ 0€=M€Y–lำ4 ุ€ฦฺุฐš2R2@LCJ[VฎสBM–ูบภŒQ ถ f6lKq“m›„ฑ‘ฺๆอRิ ‹Dุf&ฆa›‰bf8๒ฉ–QแB‰blDฐ˜Lณmรผ%@Kษl ตM–™IwU a6ƒ?![&PYุ ผ‚ศ`&ศิ˜ฅ™lฐซ&†BฑฆXฺ ฮaฤZm3ย๊Œ˜ฅุึห5ฃถแ2`Lฑษ„’aลm๊์ ZcVล&$mUษ6o.ก`[ ฟ๒lUฐม๐bzุป\ฦๆjˆอ†จm3e‘”›šX’ฤB„eKศfญQ ฺรi2ีFถที.ด‘23i ุบฐ7คฐ‘2ัตท kษช6Uฺไ!–=]ƒ%™Mถ„—4ฉ’ฑlŽQ!ณm*ร ˆ [FVž4 .๖`)€]!อสฬŒร&ุฆฬjณํฒV`ฃวึ ›mฟฒ—ล6eสต๗”YคaซjkSณฤผ*"อ Cg๓&;ถตฤคŒํฉฐช‰b ,SxึSiพ-WMcHS3n๋˜`๐8 0RWl ƒq, lึฌjF›M 1 ๆํี.ด f)ljภVษŒณด ภรฃซตMฬา๓จUb[)mkาฒI2K01Y,4$es@หF‚4b‡†-ศโี,0ฒ+#f'IณEeึfj€ ะ ท๗ซตคlฒฉŒฒ5“-Ž52;†l&[)`8=ณUุฐ–˜†–b„šAฯR `[KŒด๘๖ZJL04BภW'ฯz Iตฯ่ฤ›%šˆ™้,ˆ‘xลH1Tlแุd šg*ฐe\ดCี˜“ำุ*lP-†%1๏ฝn-fคด7D5fm †ดP็&f ด-b’mlpํน@@Kgcu——d# rรOgปLlดFVูlาฎ,ฺถg# ›™ญj'lR‚„6๊:Cฉ)z*˜\ฦ†ง\มtัPๆ‚mL„E"0mlUถlmศ0j+ฏว™ฉหม$lึ$lof๏บ‹šlใช์อฆ ด&–l†‰JูดฐฬZ JาRšcUละ[yKฅตxํ1ก2,ทaD„™Ab›!Yศุ˜้zท Hจฒื&Jdฃติˆ์ูึม†f:C™อdEilภvHVฬZ%-3ถ-eคB‚%ltแ$ม”๕๙,ถˆmKฐfcซ,€gR‚มf$H"37ญ-6Mหูึย2Œูั„*ฆ๒}C€€=…3D``*ีd3ห8mตeXู[FAฺ๔๖ณWWmFuูุ,QาbHษ6ถEๅŠyaahฌฉชฐvy๏ bะ mึPร›คซg–8›šูHถ›l…1”a;งk˜€„*ถ-HU6ึยาPๅ={Seรj6ฌG[฿ฤ›ศvฐ…ค5Œ,ฅต‰mk%่BAั5๖€„ะ9xdษถ=Kณ”๔llถm -`l*ฦฆRfšBุ6ทำC ถ-6’fษw0ุ€dฦหcุTิf4;ใ,^8#0เ๏z+X8{…)Bf^n*aรžaV}1ฒMUฌํ„‘-Œ-lcUeŽ1ณ๗t.0๒ถƒdcœูIณม6ชvsl๑จ5ี๎xoB”gsฉถํxฯ€๒ีฆฺฆjร ํ™-ฃฝW<ณ%•<๗ลถมXB›Y•Flฐ)ศฬคฌ๎๖6ฐก์๎›…ขM{ฃเต)ฎๆ๒ึJ‡{ฟuZ‘U6) ฦณฏพgาฌ‡บํ5ซุ{ทญOถํeฺ๋๋{=;ใ๊ไf-00IหถP=3ฐ!ekon„c%ตฝ™งซlo๛๊‰ูHIล–’ฉ-'ch๒UlฐอจYฌฒJุf3ี ZRlB.Xnฮ0!;ถํG็T]ร่ฝW๓Ny[‰w>ณค1.U–bฌค]ฆcDSบถษฬธป๓6…ูV4Eม6/๗\m[˜อ0ปซI:6,kNตG6X]ๅํL๑6ฑ—lำ)ถ™G™%ู@bมบธY3ถโJอฦบถŒmHŒืpe{ู๚tฝŸFฺุ๋วY13aSc–N˜ฒทุ0ณ[ุ—mํอน›yk~฿าฉ›f–ถฑJ†Tณaร&—ูhบฌzษเm™๛n~๛Uiลผ๕ ฺถูu›ฺแม๔๎ป๋‚aผืhd›Jทm{0WฟK *˜p}ฌต"๏ญ—ฯฑํํฝ๋[ชด=3{ฟื/หน6ฌา(้c`วฃvฌศเx๏™yนQh[ึขพ;ิ0F๒jสš!๕ปฦุบ\i3\Yฺถทhm^Ž}o ยชMŒgt๎์๕~ve›งTOตผฑJฬฎรVCฺ’“ cร(*๓›ห!hUใ!lsz\๗ฆึcต๏๛ถอ33ฺถืขŽป;*™Ms้< –ทe5eCx๊6ฅผทฝี‚อก๕ุikๆฮบS`ฟทคaตyน๗iะn๖ผฎjo1^อตํษ*‘า฿ฝ฿kห˜z๏5–ปปMfmณ-ซญ›‰U9n™Y"_น=C"moW›ฒ{๋ถ-ศ๑๔ฌ[๏g_mึk*4=o๏Pbvด-[๙๗):ด1ฦ›Kˆfๅy7ซm์ฎอ6{ีล,ดgฉ.ตQ—mจ๛ทmฟฏฬ่qืฆหfลžoอy†+bถถ-1๙ฝ%]ฺภฺo;6Q™7rฤาซkฺ›Mไm฿๖จ66ศป๗ีuhiอ3ฺ†'u๚nฐ)ฐํ]gŽฮำ–ิo๏,SะโžบM๓~ำ;ุ” ฮm1eqู)เฝ…ˆ2/ท ึ๔n๖ผRุถ๔2ืถํF+ฃtท7{6Vฺึ–จ๏Nfิ0F๒j|ซํอ*z๏ซ๗tŠmภณข„ู•฿c“ซฝ9™fœ*ญmฒ๋,`fWNŠ=+G๖lฎzKo €ต7gๆ5gๆ๙–l.๏MvภฤญJ™6ีึศ(fฆnjO–ๆํ!ฐ1P[้kอfงea็:๏=eGษv]m3XAณ€&pผTลF;*–A•อŒชXณ1ู›:—ฺฦรV™ฌ4ื๓ฬ:6l์j$ู{fui™lีุฝอ6#0ZZwอจฺ&+ฆฦฬึ|นc&%ดฝyงk@ถ็šn†ฒCฑmDlถป•Mmo-ีฝ๗ 6ฌ`›2XYุ•ท™ 2/Wf™๊ออˆไฦปๅๅ‹aYbfFโŽ’ทWKl๖์V[š†โŒ›,6CอY3ว•`Sถ‰ณ kmฃš.ำฆฺXKคyฆš ีl›๓f[ชะGcvZfำiณgn็`pฌนษp5๓เUˆD{[V›ตP`q้no323]ำ&@lฎxJวรVT2i4oLสถท…ฅ.ณท.Tำฌปm0Ff-&ืูถา6g‚ถZ๓ึUณปฦˆ ›ทWNด๗ิชูฦยฅยถmKษ๖ึนะถloบปm6ฺู7 Qz๏•ž™ 2K1d•ทขA๎NญูR]6YbBuซS๓ฺฎ0๏Yซฮši`wม“m6ฎษ อŸlŠู&;lอB3]ฆ ชญญ,จฺฐ%mๆแ๒3ซ†uำ2Cข๗–\ม6์ๆ(Ÿ[ฌHณัd!ใ.›ต@aqฉ`อp—•-ฐU›ช7Mวผ๊ฐ ’I#๖ผ-eokฐX7ฎถูFWšzv5d6e4ฆฃkชf"“ฆ^ณลIef](ežJฬ'ต7,ษ์x—"ณm#ฒmra›ฺ~S…๗ิ{๎ฬ€™ฃ๔๖ขูoณชฮผูFE#L๎Žึlฉjทฒ`Aw›d–Wณฆโถi๓Xวโู8ฯzะ †็?I1ˆmฒ#ะ jf$mlชญญถภฬtท—=ณ=\mN5kถด Jถ”d{๎ถลaรฮWokV`š‚ ์\1ขโEQุ ฺ›ฆ›)[ต)q]ŸลฦฬV™J&ๆ‹=ฯKูl†๋Fec i็ู Xถ‰ล4nuูgฅฉมL\]ผB๎Rถ็ตชผqR ฆ{~^ˆ‹`{o:่›ฺ~ฃข๗ิ{:ม<ซ๊Oo(Zj[* !ƒd{9mVื€6…X1fZ่Žฑ ึถรณq[9ึถ‘%3ช0Uoหช้อช6อฬb[5cfŸฎš1`*zƒ‚ าฌLฺ}๛MฺุfบสbI4ซวm›M1;TยฐQC˜™Šยึจู$ู†5Yฺ˜!Eš๑&•ฒ๙ฦlvฅ0ึocbliV‚ีmณYปฐญ๗ฝdืฌผอ@Fอb™Qุำ…aAaก=น’ฝ็n‹ฑซ`kP’ถงmตฉI’žอVm‚™u‰ฑŒู^bŒ8• …Yฑถ*˜ป,LA๖`ญธฌmojฉู&'ฃlณIิ7ฝ๕ฺฉ a[ุ* ฐร`[E›ั šU@ฌอU๛ญุ‚อ๎ eK2Hณo‹ท•ํดษ9pHŒ0bTgL›„8{K‰ ฆ[laH-2ผตน*›Kะc ›‘lIุhึฌถก/ภๆตtํ้ฝีŠีฮhd2˜24 yŒV~๊ฐPk—ฑM`y}WmOM6ภ๊ถ›Q”ถว`ฑี๊ไม˜LีŒ˜ล*k^กY23ศ6€G\’A„ˆš“v ถŠดญXถฑTดญB‚•Dlcnชฺฬ”†-W ฐgJmn1ฌ๎ตฆ6ฉถลถ%KbPlIฦ“U#าถR2gkƒํง#bTžฆ๕xึณMm.ขŽ Gz3‹]clแ ๋ฝฅ ƒ‰˜12CJAy3ฌ2Žฤ&ฬK$ถ๕tุ&ณF-ฯšGฎ„Yฯ”FoX-ืฮฃ‘ษุfสะ,+ใัjeƒZ €ูุ๋๊†ฺฒK’ญu›tฬšฉU2Fฆ=*ูฬbล“˜ข๕ ส6P›ฅ8BํญXfถP5Kฆ–ˆmnช˜อฆFถฅ  yMm0šIm šCต-ถ-t”ึฌbซ7&#A’คท’s{kc๖žRAฌz,๔ฬ์Tชf่†ุ0ณฆ"cBŠณู$ู0)f, ณ*Ešอ†สhฦ •ย3ยq…mYโสฆฐ1bdl‚)4š‘,)ฌฅYl A<.F,–˜ฅ8 U``ีc˜,ษFmD“žทํฃcŒฐ๖ิฃ`lKฑฬฎฆ`ฐ5lฐง*T ฤฦ42ฉlซ3N1 !FŽVใPK$hฃ5C,ฬ๊ฤ€ าฬ8fภ–ศีฦL!Œ+Sˆ™Œ BL5ž]ล0อŒ `65…ภฤ6c3 Š!ฌ1f5˜`cc]aฐ t˜ฑ›m[=ศˆอJุฒ…ฬfฃ4 f eQ0lาbหถคD,ูไi4ฉˆ`R˜ฬ”Yอ`ถถชข ›ฉbl#'รฆ%ล๕–\ฬl[2…6<ำขFFŒญฐDPJ `ๆk4 ƒ dถu2ณ‚ฦผิูŒSณ66ตฒย #1c 0ภBมd3ณ*60อBAมฐ „˜ฤlQŒ`l@‚6@&F˜ JY3*š‡ู6O 2#ษอ‘%5Dšู11b!-mo;-ถ3Sง26FฆG[’"Dkจ1`F•™‹Yฐ™กN2ภภฦ6-!ฦHภศึชo–˜’4ูณต(0 ฌ ,`ถซ:†eฮ`ิlJฑfคaณ5"ฒฉŽูฐ‘mj53FXPV๖ด˜lf–Šmฉšี,0"1`ำhฎž06k^0Hc™eร1ถeJ66ฑ‘UŒy[HR›ฬุuม5ถช“ถŒๆญ๋๗Vต%,ํนถlำซถ ‘ห6๏๛j›ฬVฆ^qAr๛ํmฺŠDyซM <]ฯ๛\`œFฦeณ4ƒ่l๎ฬYศ๖6฿}ตํmผึ*]ซรค1‡jึ‹์Q™!•ฒaeกอ~๏พดHN์ฝ็โ๎{๖ถๆ2~ฉ`3KีŽญ!น6ฝงฎฺุผoืึฆ(ƒ.ฯfีดm5ปอฌfrวฃฃีบณ—‘ฐ็๖žฮฆjHผญ‹Vฦ{ปโm‡b{›œู™:ูvจทWzŒ;็v3ฒฝ=vU'˜Jทตํ:Vฺ~$3oพใํงณหบณอ›+ …๗ถs/ณl;aฦชdำV=ถโŠfห๑๏Uฟญ ฤาžk;3›_ป~็o0‘;{ณซฌ5ิถp}ท{›–už็มฐj›Tฑm๋.ี@{ซงศุ๔6|m†ึ๖ค๎ หช7 lพำ๖ž"บท]o2V7๕{๓v้๖xค 1ณ]ง7qlึ*ถ… t๕๖จ`๓ฌv]šู๖๒ํœ๘ya ฌa๖ฆJ6ํ๖์ฦ๖6ชบใูƒฎฒ๋วM€๗’๏๋ฦ๖˜6mฎ๏vตู^?w[ญšOo๖ธ๙I๑ฺณ{ฏฃ]›(ี[*ณg[W4Q4ฏ„ั\–Mฑ๎๛๛๑xํ;ฝaBfoฏUซป.อoOW ผm-Pdณฺ#‚ูฌอ๚NY็ท9ลxฝํวทผU์ฝm]XuฬTํ4%b{k๏บ]น๖ฮ}ฝอWถarรึ๖ฺ๖หŸด๎พฒ{~•.L›ทžiGˆ๖Nหร–ว•ไvณG\ศ0Fปฯถšิ็ึ]—อฌ~ฯ`ฆๅ๖๎;๋ฝฑZู๎Z… ๋ญ๋นต cฯ}ฑอฆฒฦx{‘Ht{ฟQeร$ถYm๏6ื์้“Šืปy?RฑดออX่šmJฏ…a‰„‰1ำ๕›ถcฃ„฿›ฃถูึnoOษผํา5fม5๖ึO—ธmญ™ธป฿{5)ญฝฟปoษ”`น Yo+)€ฺ›l•ุlป๖@U,š=ฟน๋ปุozี€อ๏ฝ๏หูLme&5ทํšg8ิlTฌมฦV๗zทุƒnญๆนŽfฬf\[6ฅe4ฎฒ—ฅาถฝทจšลช-HŠmoหึrดทšVŸฝทฉปƒ๗ึฝ‰‚m๗๒m›J[oซwต๗ึฅะdฏทฎ้กevŒMฉถm*kิj๖ฎ8Q{ณ9ีถอ‰MKVณ{Oญ=ตnvซxญอณ้bi› ีฺF๎%3ณ ฬtฝฑEL*ฟ้ญธHถูึฺ…mึ๓o๛R-lSืุ[*Z๎micณ=ๆQหvฮ”BMลฎ๛+`XWš็ถฉแm)fฺกYMถ (œ๎์–ษh ้‚6vS ด6‹ฉtล6[ตท]๐ๆๅ*eตแ:-l0ถ m“ผคปl1-@ฦoZฆง\ุฎฎฦฐ17[dc] *›฿f/T$Nšhm Smห@QcxU]™ๆ=;ณูXจŒ–ี้~[UTดถV๐ึT4 ีxK]KใLึLํฑา]ลLYุŒŠ2ฌทญ{iฺ8'ŒuฅIาlุ*ย<“ฤ€Iี(Afžก*๋){[L$แ๎>y{ mU…ฅ`F;†`?ดฅกฒฆ ถ1ชm“:[ศ`ษฐ€โK‹ƒเ^ๆ…ฅ˜ญ2ซbฏ;Ehwทรฐaฺh$#ชDฤ˜„Q~ำ2‘ฎ lUศ[ุค9t5ีฆ0ฟอ^จT˜jA[ฒช5•mV(ชfฬXjžงช2ํ={๏W฿มะ ŒWŸปทดJ6"ky๋;š…2ตต”ƒPfdถูfฉœdCตlF่‚ึV8bฟฝsxฉ/Zช ˆยฬŸ 80Œ‚„ึ้ฟใฟ ,1lf‚†คสlำdณคซฌ‰ŒHeผU]ฺจ[ส6Zc{:ุc9‹YซZำ0lฤฆ*“)ฬ’fT>Ž6 š^/<ิQšญฺ0๖S\Eถ†ฎjaMm›†V๊b3’™uฝู๔Šฎk‹ูช(/รัXรฮ‰ฉ6…ูฬฺT’ภนก ; อ‹ซ8hƒmUณ=ฉ*ฌgฃ\Œ๖ S›xu๔ดํjตต m~บ˜FTฆถ–Bิ†Mก=ถYบT Q‹xรFบเึณ‰สอ~{็ฟ.ต(33™;ภ’˜!ิฎbถi oช๊ณ lFคBoซ+ฦะPำ%์ kฒ7Œ=–ๆLณชุฐmะ@Uุด 5ซG์ฆ‚hmึ์uWตfLฬv…TK™6ฎช… ฆ ฐฐhWีฤjฬฌŒทšฦ๋n‹1S]ฝ6ŒMกู"#UสVอ๖ฺT’€๎`ŽV,Lูิหล (1ึu]ผ;๒ec’l.3ญิํํmUQahฎ ๑‹‡œP[/๊ฌUีฤfF{l#UUฑF-bl•86ot/7{ฎใง*10w0z&‰ˆWซฎbอ˜ƒ7U9X“ถMŠุงู`XWฺž”ฑmSร๚ -ภฬ [›‰ฐMลใฉŠคงม^hafEญf3ฃ)Bน`S4› ร^ฉ€๐fชK&ย0ฦŒA“ฐM[JฑmXlk0ณฉRฦHตm5QณhkขŒ-†,c$ฏ!ข *lฅlณ0์ก0ƒชbl†N4†DฅถmoHQุฬ*ฺlณว†)‰ภๆ‘ั`Y) Š,*3รfdX!PSV*ŒdVf๐€(a6รfca]ฺVd *ƒA™!mfV˜m6ษย ุlญHmCOU m ๖’BfPjฦŒ(Bํ‚MณYƒทกJT๖fาฅ6สฦŒลLˆ`›ถจ’™Yู4ู6›*2{ญฺฬƒฆ€ฌ!dฬR J˜FD)lSWc cl†Pc{ฆภดUฉ0ฦขof *eถg…mณDc›mj#‰0`›ง,ฒCก–ีƒูุBะd“+ˆ`6™ฉ„ู๐f@ณฬ63PXีQ”T€!f624ฐ`ฆ% ฺZfจ๐S*€ ๖€’JŒม*š1ˆFะ.aP˜7f…Rฺ›ทจสฌณmad"bhฃ-ซศฬฌฐHูfR๖ฦhีf`mQ6‚6Fด6J T…ฝG#ุฐ @ฉํm SUฦpŒ๊ฬ6ƒข`›cณm{ห6ฐ †วิYิย ผ,ฬll`ลไ€ฐจbาฺ ุƒ‚โส‚(L0ัถlภฐiณสถžช๘IžXd Šf6ฌKอfcฃฉ ป„1•ๆ ภ6ปC”ถู.3ร˜อ  ŒUdถกiร‚ฑฑAjoŒฆhf‚Yjk*่2Zf4ภR•ฝgชสfตa €ฤถ ฑmU3รhš1VGžฝA*… 6ถ'”†xzjรศฒK1จ–%…™ู ร –U €*dx< •ุl ›]š]H‘ล& Cko 6„ ˜มฬฬhkL„ํฉโฅัศณC,Šขข Fcิ่ŽElๆวฅU,Œm › zdภฒน๓ฆ‘i:`X๑ตญcึถU Œe˜‚3RุdsศfVu˜fู6F0ถรX"Q2ฌ™ ฐ ๊€5,[m ถัศB 8AฺH%{๏อaภ„šX` ร–ญ˜9ƒiฑฆlฬkdšP0" …ฉ5๓ึ`cb‚M™‘ลFุ(o`ภ"ิhffฬaฒษfM ’คB[รAad!จ‰อr)[1ถA@ฬ,vy[ฃษHƒZ_oK`ุfAxA fH+cฃ)fˆ‹`lุภˆ ’„m#ภCภผยฐฉH5มฐy“ฉฝidF@$•ฒM9hีู๖{;ณซ˜ 8ฬŠหณMŠ6#ำฒb0”Efฑู ฦฆ`bL6"ส5#าf6ฏว‡ฐถ็ฺb ฦt{ƒF Fjbฬ€4Lถ™ฦ`ดœM6kQVˆP[ุ‰‚‘z4† N๓ชxธ)f3“aขอ˜‰Q]ฝA7ะ0v_3Clหฌ5,3…l$ตอF2ชbุณ%L!!dŒBAณM…ษRnูLษ`ถ”ฐ1#‹ล‰Aฅ6‹!mFฉv{›ญ•4 ๕–ส ฐฺ,ำฮถ"‹อภŒŒฉ0Zถฮ˜jำชFœถf3f›b€ฉฝ‡,Œeฬฤ„‘˜˜ 6ZฬMYQ‘)rไjŠุ6KPศฬ62จa=ถ Œฎฬ&j4ณ.l ณถ ึ–(€YZ1ƒ ŒEW0˜a4ฒiฤฤ’„„afcฐ*ร„aDc“< ƒm*3#ฐX,ย•2ร6Xฎfผ•V™mLฤDฬฌ2ฒ ™ุรฆš1ษVฐม Vj˜ฆศp๖ฌjXี'k๖ฬV8ญmš‚1ริYณ@kอKอสฤุะ0c Ff‹ข€Tม$1™ฎE`‚ชh‚: m&5kฐ™dX†1 „ฉถู`€ๆ† ณษ๎ถ66(lณู6”ณ6a@Z ุฒŽฅฬถ‰B4ฉโ6 @cฆh0dH›Xi H ีaํŒ อ(!%@ฒfณAภ„f6MHยF+akd"ำb6“M;Qล`ถฆฑั˜อถ$lŒm,ฐ5ฑฦŒ‚ก`$a3)0@e4ฬTซคP.๋ t$2ดb š™Dถ,ฆ1ภ˜j›อXฬ˜’ูฦ๎ศฬfP"a›อฦlrด3 D4lถ.ขฬถˆhภ(0c@4Eภ0›ะ``ฒi1ฬฬ˜ฬ(h5 า=cƒaณ JVJUเZ6lS„ฐฑถDd™ถฝ2, ฐฎ€อžอุ่กอ6„0ภ`ุfรึ3`C‘M7๓XXจŒ6 :# RRฎ4*ˆ‘ ึ„aZ-ร‹1ุˆb{ณัฦ e^ญฌ›R‚ฌPณ™อฬlm !ถU\ฦ6@ D4`” ฬ˜ส0&ฐฒ2šภ@ศ@`ฺ ฐV €YL ณmTJฉ*ขีlQยBุf [7›‹ถภdLถ'ฐธilฎุcaรj™m&Uุ˜@†ฐอ„ตล˜ม&f #ไอ†•c%3H}  ม•ฆ: a&อดฬlญภภ€ถูhc…aZYff‹"ˆB0˜ุ„a!ˆ`ค(˜m ฌ ำF\€E0„(`ฬ€ Cc` ภ‚&Mƒ@ €ˆ–ถํJM*‚Rฬฦณ(šMมฬF+Mo XดMฆ•m3ูไฐเบ6[c0ฦl$%0ฬffkฦŒู”‚้ ™6ฌfฤ ™้ZVIเฯJห†%cj˜ฬ๊fŠŒ1ปynุ03*ญๆNะถู Tฬh๗ฌIBถfฯปnุ๖ฆด:1ุšmXฉฒmJผ*d3ช–ํ3šg-–2ดนฐmD(๓ผบทษdeก™0H6˜T[ฆƒอคgVsุbฅ๊=,ฌ1kƒฺ6Kšษฐมr˜An ฿l[ิ1ฝฬ’m3]๔LC›†4f=ekt lูNร,จm\ซถ MVฬโŠYอK3Dฦ6b0Z5LŒจแ9ฐf2x„ิภตi#ูlซ`ีถึค`ฬšu ˆ6 Fขๆr`์อฝน*ุฑชฬhmCฎ6ืถกีฅํyOง้ฯถ!รŠ‚mถ•ฑดอFˆ๑ถs‹=€ษฆโxอ[Q›™•™ 6j+หhAc*ƒFื๖2วถ{X[ึL*liaafฺิFภฦbƒU˜ฉ‚ุPำ33ฌกekiา ร`S[lก ณ9i6ิ6ฎล–ฉMžแ€๋๒,#รH›ภฌšตห@ cสๆฉม]5pม˜ู๊ริฐต„m2F „6Lš็}“น ถถyo]*จI-kดสœฑ™]K๖ž‚‹ฺภ0T%k3iฒุซซmF€xนมภ าV–๊y๋ขอŒŽซอL`[meษP˜ู…Y วL D6kmVb3ฺ6’Yภฌอ53#ƒ-6XจC*„ํ™.ށ™ฺ…า"ห ะ,`ี[@›ณ% ƒmก จฅ7ตษุ„ูuaYุ1ณ.6 ณฅ™‚EX{ŠC„”† ฦ6W™ฅdำTณ Sร4bฐIฃฆM1 ภณv”ญ๗ฆ-I•บ@ถญgํ.sฆml-ฝ ”ZhฒฬJ•=ƒ•6xUู #FชดถAค ,vาlกร{S‘`ี๖VSˆ5H›ูะจฐษˆ๊=k+ส{Kถahณึฆ&,`‹a†sฯlH…0oำ`aซmˆžฉ i#ฬ„ฅ ฺŒฎŒ3cšl\K€lูVŒฃ‚evฬZccX+ด‰ejx."a&ƒm(5J6MZ๖ฐdL “Yl[ใน)๋อSค„๐š:Eoท•ธf๖์1Eจทถย{&ษ์ํอ– udฃชSฅŽ`๖6๏1”Šผชฒ๊ธมnKuฑauืํฑ๖ ุืŒ\7($rWmnWU &y/“›ใบ ถŒงykุภฒทgdช†‘$Gฤต๗,NงฅขถอฦCJSืฬ้Šm{ำr้<ำถ…9ู6ณูิฮŒAu‘ีซ]าฦ˜้mx`cคฮ]fถCWฒXTuT‹ฦl,๖ยขjํyตK†อ{†vถฝ ŠZฝฤ์yาไฝพูŠผmvึช”ู๓žีZฦ2†„™ฑนCฅะำ”๕ๆiD+ะ•hปํgถ=๕ึ[ั{{Kem๖ถ๑ udฃชtU•0์mQ”Wำฎ~˜ตฅ\ุุีucถฐmq7Hญƒาิ้ชMซ*‚ึ-[V+2nŽ๊*hkO3{ 6ไ์mcUqE™mcผ๊4ฉูZ็๊m{#ยL.glผo/Lข1›=5,blS—ฒฌๅด1flLณํ๓ุŠึ~šะ0ก^ุจยฮฌoO—‚ูีฃา์สพี€๕=™=ฌ„v ถฎผํช†)kYฦๅmI`พฏ6Žžuทgซๆ–66W[™ฺ6UŒG›*{ ฝํfาL[h^wอLZ•^สฒ้ฎ๗อท~ืล๛๖าีิ6Iถu+ญE๓xo๗[Žใใอ?รฦอขšฒ๏๓ปmTลfi฿sผ\ถู=๏บl์ฺm“––m5ซฺlXy.์ษVd฿”ฝ๏ป๛m“Šd{)f'ทฝ‡ จ๗น`ฦ๖ี๕Yษึj2ฺี&[V๋แv/ๅฐฝyทŸ&„aฎพฝ`Vช^ญo[ฅะถๆ]m€Uอฎ์-c$ƒปฺุf+aทlvw๏ฝ+›Zาžฮšifuล6]อl[—hญทg๗๛ซ์ T,7dญฺ6๓ฎ=ลTˆฺha˜จฦใฌ|vBูถQอ@X›ŠŒตฐบ฿AตMx˜ห`อ^๐จsfฏบŽฌฉ–ถ[‘๕˜๎Ё†ใญ"ฬ5ห]fฆ…!ๆAอe+…ุ#ฆ{Nํ5ซาZ๒ m–$ทmฬชญหy l๓ูญFึuOทก0ถqo“`อvkm2%—3๋ฝUช B ้ {ืt–Mอ๖^[wxฆ(ดฝ ูฌ}๓c2 m›ถ•2ส6หึฑ ฝm-™ํ‘าโeBซ๋f€6ฆถ’ๆถ—UฎำFฬ6นk›%<4ๅ6ณ Dตฆฎอ„ใ(ซ1+ฯ(•mfีL€๔ผ๋า˜ฑ›ำK ๎™ †8a5คี™ูSu`รš๒r ึc๊ย@ว–&\ณtย8™ใ้๘ฑฉผ๏)ฃญKํmYEฌX๏Oท5uณอฌฺ*uฦsw›๙์d‚a๊ฆ r๐6,๖nญMHMwžู คuKภฆฌูc•ต y๏ตี‘ย๖ฎl{›ฅymXฎ(ี๖,Sœmี&ั’ู^*@{K๋ฅ%ฅํLm…t์jW‚›ปŒ% ทU7ถetmP่ฝAอ ฎ)‹ืKฬฆ`๊žว4c๗\-]ีFcุส,NAช์ผํ†*6ณถซGอ"ใึ•bฃ=LpLจต&Wฦฝฬ€ถํuๆฐLU๚yoฦ,WŒM*ผoฟ3ฃBZzบ –ช93ซ%|๎nlc4ม0i๎ุ6ยpฒl๏$6er]ึlลบสL›ฒุXฑ6-ูึ$Nษ๖ชPฐํญšตqหFญุx]ฺ่zดฮ๖ชm“’h๗ZI ฺึ—–dFาvJm์]‰ Sm๏QมMifใ•1JC*P๏ Jl˜R=!“Yฎฒ1‚ึุiผ&-หuจถฅ1"žลi`…งฎํอXuRoฏถj o›šฃmcuฟฒฉ sl)7"ณ~Mƒ!ถW[‡ก’sQถนg'ˆตXwf฿๋(ค๖€(ฆ`•ป๓tม6ฃi0Š่ถัฑmD3W[fiิคŠ›อ,WณB˜6e0s่๖ดdึDงm/SIl(c“ฬšlมึŠylฆŸํ•mทฝ๏ฤ*dำ—F•›ๅุbบญํ;”J›ซํ ‰m–ฒ*fตฦฒMถITaŒทEัะfdo๋˜ด`yฒ–ย[ีพ9 ต Lnฑ๕ฮ๖6Rฌค f›ษชอึlฃU‘MXQถ}ใj›ฒMk(X–ซ 3“;“bˆูณ๔์6‚aซYึMุ^ สV[ต๊lสfkt•ๆถŠMŒY([do™PfใM%Mnฯ3ยV=€ฐa'ฐe6ำจ‚žีlศ6หmkPiFUHห์เฉmถ๕›eตซ-ุบ`zIacvเm๕หถƒ*ู:6[,mb  %€iใlfชษฦtุชbถผญคฑ๑ฆNh๖^ถm’>โ†šUึLXี่qmZฑPถ ชiณmฑu5e†กหถoค@kตใ-Dถ/ทงb๖r‡ฬ๋ฅYC“ญfฌใขฑฝ,ดชQอ2๕^›R {ูบl“ฐุถ6กฬฐฉณlหm7 ดฅ1์ฑu ฐูn4Rศj ˜%cP“†M%^ฦ5#l๏ญ{–ีส9c9hA*&ฃล63L๗ห6 L•l’ฑPF1 Lูฒ*lฒฺีภ๎ eชฺูถอIi™-,W’Œqุถกz<๛ าVฑV5Lยfw,”ถัF€™m\G ฒํPถu[ศ,ฮฆ&๖h“0๓๖rA(›ืะlˆัZdกi›ญด"\ถC–l‚tฤถJณf,6&ศlOQmfP๊v[ถฅนถ 41†)จ6ฬmัEไI3 ๋y„ญฆh Rณฌถ]๑ไฒ—•๛ีฺ8Hœบ์1šด™™qั0 TฒI1y๎aศm)Œ„ขjณ’ฺ^†ฉั€l“สฆ@[ฬโj-3˜yญ์ฅ ^ะฺtด(ํo๛Mะ$m„U “ฐํูกbฯbถญŒฬึl›บ˜้ฒyWณj+ยด›ฉั๖ภy9ร"Aี ˜อ€f# [อฒIำฬ[IMHบFฟleณษสง฿1cv์qf{ 43ุLฬฐท™โ๑ืs[๐ดถซlHส6 ๖tษำLฐQญšj›‘๛ญoศŽฒq{ณํ*[eๆตฺ๋mบชžb๛ฆึcตลฝmZFzฝ4Wะฮz๙BYw{ฝ๙1[ฉŸีถ6ณ๘žwvEฝ7๋4šzฏŸ๛ณฦ(ูีf6›าั/ใnlตนoฏ๋ฅศf†zfฯุŒเ]ae›ฉ๐ฺTl๋ยบ1ฝู6๚ตฑฑAณlzŠฑผด;y๑f4ฯ7๕{หN<ถๆ๗์ำถrฏป•a”ฒoSฒอ‰จ๕ุปณ๎^›Y%ำzิ>fsซชƒn‹Gฏiง๑l#ฦ–:’`AŸ{ฑPป-ลฬ6Nค-)œiSแ™นาณปดด™)์^7kโ=๗Q`ksปส–†ผปืฎูถžJฯm'4๑~ใํ[์ู˜ถMRf๒ฎว™ํ}ทW`ภVซŸY&c[œ[:1k_=ิ›eถPฝ~๎ฺijVฯN๏ด”Ž ฯ)lถ•lศ}{ฉะ ีC–อ&xฺฮLฺ๎ผยkƒส๚ยฺศจปฃ_ฃฑMOฑ ๎>yxํูมอ๓Mฝทgหท๗์n›Yญ\อdั[v4˜๐ฌ‡ถๆ3wฟ6[#ฌGํ ณuUmk^U{oŸFฦ6ฺnฟ๎ึHc@:+ ฏzๆvม€ุ6ˆุฅsฺจˆถ“๗์#ถืc^ฆmk๒ž;dDwพส–ฐฤ [บ๖Z”qvŠฮ๛7ฺ1rmืป-Ayเไน]ฌYฎดw7ฦุh๕,ณs[Bฅ๛Kจhฎi๓JฯpYํVฏmo+W;sSFy๔| cc{mศถจzk†’{n6šํฆตแOkMas฿๓ปทํTฌ๖tฌ^c0›น๙Wณ•8˜Oณ€Uฑ%3ณ—นYzE๗#ฯ-ว67ฯห˜ถํ[6ฆป]ต›วb ^์พ๐šp›ต๖iK2ด๒ด‡1LVa,ฬถ™๕r๑ ฏn6›ฐอ€omL@ล†˜ยฎไั๖1โถชณm)Œ%*mฯ&rฦ%ว{Ÿถว์T…ฉ8๎Vฅมz•฿5ถ ณ[จ5ูžร,ฃp4ตณ9dตZ๋อf+žปbอุv72lษ#&†Mkร2:6›37K&[)คๆ”5eฺะk›ช\Y‘ฮช`}ฯS3 kฟืฑา2lฬl^e3/หฬi†ฦิClฑ•Ys๖ย7่Wีwgณd6มaคื.ทถํ[ถQะท”›X6€_์.+aป[ซ ซ^›!mIด›ๆ๙]^3บUภ`ฒต[/หBUlv3อด๒ฮณฑab*7ต/ซq,ธMฝู6Pธ@ดE,.ZผU6cLผžHณ)&›งzgšv˜‚๔Ÿ 80ไ‚ DิžwXำ๙ภd.ห Xศึ)น`ณฌืJ ฒ๛2Aจn1FHใฺf• ศY)[PlYํฉัุ[ฃณํb†ถm{ฝ๊๔VมZko]๋ฑVnonพ‰ฐmอ]“”m฿vฮำrdูA˜YnฝดแฎัUcญโMซTุผ๕_ฏ๊ํํ]ตฅaบฉ oึ5็-C]อmำ…Yิ6sฺ๓Ž'ุ5ƒ9bซty๕เํ™;F%ณฦX 1Œุ,oฤ'ฎlห*ะpƒa์ฺบ=)ฦะ๒MนfำMพ=Œˆหถข@คb$โMšGd30 Aฺฦr˜ูณ%อ6ฑ ๖]1ด0ึ2ๅ1ณRgงภl.ถญฎฒ‘}๋อ ๆฺีlRฮ๖ฬf›ัํm„Pษ;Wถ–ญถŽํ"dฑmถ‹6S๏๙Bฬ๖ฆ๊ถfvZรศlฟ๎kฑmO7c+ขกUฝifH _}›Bjทญ.ฺŒาmo›*Lhปw๖”Ruณj!›Qe{๚*0ศšุ‰3žfวiTsช moบฯP๋–MbƒถUอ[ณถสสXS‰k›M%i†š‘ฬ3๕ฝ^FTูHC ท&ฑ˜qฬฦŒŒ$ีXขู๖ฺB™ALf{‹"`-`V#6ซT–ˆอๆุf”หศ๛ึรฌฬY…ฑbธถู๐fSท[26ธPษฃหุZ๕ึฑ]„าf›ญ ฆF6รVจYo/Qาฐูv,`›~+”ู{ฺŒMุšึบk3’ะณณ-ฮ–ฦฐm็n6ฃะ$อXฏFจr๖ิA๕fS•ฝu…ภสkาn#>™1ž5ฉ(ถ :๏๗66 อ๎F๐8ฑ-ส6kV๓ดญcซˆฑๆไศ`คจmฆชyปพม#ชณGย\แi๗n"ccู–CRm’y{mกฐLึ๖[ซฤ–ญอำภŒ&IQต=›าlSV#ป›ี‚aUy๖บณq<ณy{“–*ฺ์Š„5‘Zื›)ศmถูJลชมถถuฝญmล›ฝลขZc#๛Uบดจy{fฆถฦR%›ถ‘€ ™ฎล1`ุถทmTดj›ญW0ฎr6‰ซmUl$XYึาvšฏ<ox฿ฎN€ุx:uT๑ถอNƒลิc+ตaลผesษdMบ8ถ๎ศ0ZPอVฦจ>ฐ ฺึ๋uณI๋q,fVหF‚ด=-gmฯ„23=%&ChQW“ุ๐Fส”EkkตSซ[๏น3ิเ™อ๖า"งถฝทป XvฎผฅL”ฌƒ†mห ซ††ก{]ฬ3,ีmฑูึฌฉึุๆถง้‹Eอถm˜ญMOcฉR›f–6adŠ๓V’Y–oฟsTธ…™อjใึ๔๖xื)๏อณ ธg๋๕r*6ฯปwพฐ6ฅ~ฐะญxอใ† ฅm–Mช]>#`ๅบq›ฌขvณงซb{“ปzupc-ต๗ใํ]]ษLm+ูถึ๊บํiดm๛๙๎vฑj7Ql›ฅ๑ถฒ™Qš๕ถEU†žฺ+:ƒดฎlk_ถฬqฺ{• สl›™‡ชnV7ฯ<ฎbd[Sถษต=๛W‘๖ฑญง›ฒูึ๖{ื็์ฝ˜!™jฟ\ ๑>ี์ฒ7ihZซ่œ์ํ|‚MคูDg$-“–ุh[ ืŠู Toซอา๑ถ๗w้ผ7›’{ฌ5ิูถmfู9์ีงW๋x1zำ้กทกูึ.๏มl๗cณฉUํfรV`๘ป๖†ึหoฦ฿Uj›†kญ๊์ฑuุ๏็ปTmEขณ1าmฯาฐ…ฬVm3ฺ๖6—ฒาิ6จY š![_ู†Œซ+lฟsฒตu—น”ูฦ˜๕Jฌn›) ƒ™ eน'ถfฝอ~๚์ฎํค~๋ฆใ๒fS@:{๏๗;Ÿkfรถป<ีžษwฮF์dไบปll*ษ๊จธv๛ธลศVœMt+1ญ!๖@oป…K‰อ3TZXว๘๗~฿•`›ZnZตO๖Vg›mปๅ์ถปZ!#๔ฆthlkmฏป•wm=๓ป๛#ฒํัปjว,Tฦw5v๏๖น~ูใทuWOฒ-ฤKซt์mE{ฯ-Gc}฿^š)ห&บํYยฃIณU6c›๗ๆK™4ู์ํ๎f-ฃฑบU“ญซฬ์Œ‚ํ}าฟsดlm…ˆู๐˜ืฏิญYษV[ธผฤถ‰ฺึwoณ๊ฃปท๊ทL—อ6ฉ ฝ๗บSlo]ถbm\๓rศา๎nฝฒZ๊ํฅfใ๎mํ-ุ$6Q\[๖๖ฮฝํ–anaž! Sฦญ๑‹ ถy{WหMผYoพ๖Vfฯ๎};ตA'ƒ•9aๆqฃMqฑ๔ถ[ซ”๊q๖l๏บj๏ัซฺ7fิž;›q-Qa๗ฺีฑํัฟฝฟ๎4j[q๐ถ๊บฝ฿๚เmMืยบ‹)ošฺ›ฦาX‚ญY๏ฝwu ่ฉ๗1ย2๏j…๋b€Ho๏OšŸแ ฟw฿wˆ"ณทyญWอ๊eaฐฬ<}yinร^Om๋ผ๖๊๔ฎzl5ลŒQ ํ=๘N๖^ท๗Veฆฺ/_๓’aฤ๛ดญrฺุFZด~กฌcใn[ฒS 6adŠ๓V-’x๛ร_bิถ๓,%Qีlฏืซ€ำ{:6-ๅฺี๋Vฑป๐€†๕ตT3ใ7/}ŸFƒท๙=พฬ@ฤ“f`ำvัำตฦ0mหOีmFุ่9๗าb k+"l}—ฑ1ฆ๒ฎฏo‘ญm๖๖ร๛wปjd{ฝ้๋†จฝHฦcๆํๅ(aฏวฌ“ตzึๆ]฿ƒIๆgจฬ`ญฌ๗tƒRะuฟ-3fYห%h€’&ณอค0„Œ wmื,3Q๏็๎dุ}ฝMดวโ๛ๅฌ-เ์.I๑ฐฝv f“๎6U3C`[CณFชƒํตฯc X๊’ึ3Fซ&ฅท็UBล๖ัoฑญ:a……›๗U&U‹ุžัี6†0#lฒ๓ด=vNฦใๆZ1l3  ะถm—j0ษmcUฺBTcmคฦิˆฺ ทgJBะุ†M5ดnjƒกnอ–‘ฑถีn!6ขฺŒ&ฦฆPฤถ%Uิfำ“ฉcflุ”…ะfฤถ‰ฐmh*&^`4 jj›YMฉ ฑง Fง5ูึ‚bB‘X ˜ฉฝPย3Œhำšลf!’mlชZีถ1๋=ง ฒฑvWุd5KT˜BฬถmนMr*ม3ี€`ษl#f‰Œๆฅป6Žดmฦ"ฎ6o๏จ0Pท ’ฌซ1c SdD SํYฏFQด=`fTkŽŒi0ิi jณGฯnFฆ6›ˆa ถmจS—7›Lกlูc๓†mฅจ”็€c๓’ #’aขQbša3ี0 Xฺ6Xั—ดฬJร&กฺ•  ห”aุjEฑ!ฤFMcH–ฎj›mUตภฬ๎ฝ๎ŠmณJฺxฃ@ข2yฯ๛v>*ฺึK€`ิฬ6‘`ิุื›จ!3cM@ธo/R™!™mBฆ%eฺ๐šLัdƒƒmd๑fUSิฦ€mU๓\‹5 ๊ดฦูxญ๗?Ap`IrA Qร๗฿`v ฐฉA$ษ0ั@`žIาฑ i˜J€f6olซB#*˜1e›;{’ฬ๓ูS‰ญ%Pmiถ43ฌฆ Ša†*3!ภถ ›ไPm2€lอ3r5รBlfŒำณ05ุๆ‘ T@3›Zต€อด๗j•PฑฝŒR1๏ ;3ุิ#˜อo๏œฅ)—ชฝmD ฃุFุV–ดŒmถปcd™มcทชx{ฎชmำ*/B5ฌU4ม8ฑAkzUย0ณM5ศด1๓Nีฬฑฑๆ5cก–dณ9ม@˜็้Zuhณ'i ™b˜มิ๔8œ€›†ิฐŒ Œ1cL€aฦlFJ €„E0ศสฺl AVŒa!ฦXก@Q#ภรฐH@™dุฐmcฬ06ฦŠ0”316c”B สฤ €e3[cม@)‰hf3 „ )C$ฦ”l€ชฐ1  3HยถU€t™m,ฃยฤf32&0 ™กข)ภ fBู4•’!4@0ฐ 1X˜  ฒ!Kf0FิฑM@‘0I6ฦŒ0Dฦ˜aข™e2I$กD*J0Xxุถอ&cƒ”`2lcEฉIX™m›อJ@40ุl" €Šฦ„Z 0@9ฬ ฦŠ6 ุ@`I16Hเย6ถ,dฃ0ฤf`Rj ‘ฬX•ˆBjฬ@†‰J…@Dภ†Mcafb`ˆ6ฃŒ ๅŒ1!DRhขhู0จุlca›-ฅ"‰2ภฐ1€ข†,จyถ06ฐ@•ุถi‰laศฐศjฒEJ( ภุฦึ$ ฺฐฌ@ขฆ! @'3V0–ช0lmaTมl›‰„แ*6ณ ฒQj,ŒอDบ)c f(„ร#D QX@0ภ"CV ฦŠ c*04BD˜`ศd`f6l›e™"$ธ 1ุ $"€ 3›ู c06ฦภaถ’ รf#ฆ”%`ุlถH@Rจ อ\ d”ิAGl2 mถฺ0k 0‚6L0ณ-Cฃcmถ Dน‰1ACขEฐalจ`!เoำู,lƒ.;J5bc{˜ช`ช†กmฌฎอˆ eซYถ)ค˜ji‹At‘ฐ #Cร`^ฮจT6b€ํฑMIลถั&ปaZS6๑p/็@df3nส‚mถนปœŽูฬ““1ำY%fšg#fต”ซษถฅWูFeรฒfAดV ญ$0ฦ{๎-K ถ%”6ฦ@™อlอศ@Dƒ]ัfS`ำg‚อ6ึฤŽ.ZีFญmX1Sฃ•ุŒษ’dcถA)ƒฌ3ล&ผนRXฦœ `ฉNkะZถ)TมFZ‚eE ฦศ…มxญ’0ูฐI…–bึHhd %0nžห†aL็^RF„m3šๆjลา7k3kŒRc0ๆฌ`Vbฆ™=šตน’={€F๊ฮ6ยฦไ5 ฒ6 Inุjฤชๅ–%4ำ.ุิTDถlFi@ํm6,3D๊Pa0ุI™ sต™E๓ผ 3ฐโ.5คaXT6Xฌl›hึ@y4 \^์ฌC`ฬoปŽด…ยฬู@›ชชmF mKฬใR,fศ :$ถmdมm[ฺอ’‚Mุ`›๋ะˆ56ฌก$Œ˜llIX!#KถอhBผชต๔1›šaฬึฐภ( Œ๖ฆYj‘๊ซg Vwถ02›HถQรฆRาอฺ66ธร’sห‚`ฺa Sdกfต-‹ P๓ถU ึถ>่ข$O ”ข66ลฬ" ลฦฌ่*4ีเj1,* 8อ›‰l`:cถ๊ฒl(iฤ6๓* ฒอ,ฆTPBดMY,m%muGb›‘lศถ–,TฑMภ†ฝuฃฆ E˜VF‚ล&Aดไศ’อŒq#‚m{U๏–N 3kZฦ`โฬH<ฺ›Zค|ืฦlฺuปXร ›ŒAeๆIŒ C73ัX]^ซ!ก…1ค13ฅHSmญูb@5ฆี M%*š1ข่Œq1ณ้ณ…ถ ฬภŠThU (ตA– ”x3ศBฆณYุ0]– ฅฑ‘ู ูถฑZMฃeฺDv“VซJถm›ญ” DภjAfร†*aจX†จ"3ฆีู …=eถ„ฒ 5Š ๛J™ุย JuM31 €Š{)f#-ฐmsUยด9 )b lcฒM*ฒ1.AํŠ&ห J…อ&;3fฺ‚`๛I%3ฑŒ‚ Žมฦ"E3ญš ฒศฅSW0˜M&6ะXฉUตkeLรXXำlR { ‚ญmซฐ3ูฦˆš–Vf3ข-ญVŠ6oุข‚-ซ0ฒ@˜ตDc)ภTตŒ6”0ญฮ2+ัf#cH@QฑLณชุ@)ˆภž Pb ’ ขฤ˜˜4™ชžM(Iคiิ๖6WUภ M"ฅklf›lPฉmŒE)ิฎB5f&‹ ซ„mp{ฝ,,˜ว”ฒm๛"c€ย@6”ฺฌ)ร$5Y6ช"V7€ถm’‘ฬ•z1m ,ฬc(’ฬc’าถตmJ6fฐฦ6Cฆำx{kVหZดZ{m‘ks… x[K ”*Fี`ชR00ำ"R๒lึ–1b(ช`5ซŠaHกถgFmIJษฆ9Hi(iฏ=Q‘4ค@4jถIW‘ฦข`‘‚maณ3 R˜1ตh—P0ณ1€มdm–ต`6e{[‘e6จ0&XฦUm›Œ•ฆ-#lTฅFฬLaฒูQmUj1&Œญ€dL…Vฬfšช6ณmZ5ฦถm,ำZถญMญ-hตชdถ!ˆe`Hb[#š&ฉbŠbิE ภ’ณmศl‰!Dฒu#J0PHˆm๖ƒQ ffJน6MXะd Jต{‹+*มˆ!c]Eุl,SดˆR ›ภ6†mุีุภ†ฺัJJช5› ˜…mโ63,K€yช/6ห.หlPb6YlBฦY(5›ii6 +ฒ TถQ˜ฑษ–อfMช6Uตณถฌ!šmUคฤถงญŠ™อX5ฦfูlฐmcตšFหถตแฏาRฑูบ{?KjถžฅKlฯฺหgญTณทfF&bณึ6KบN6fต๛ํ๏ฯ๊›ญฝึถตRŒ5ป็สๆm๙qCF”jปM]ตmž™ใข๔ถYก๕{•Lศถ๋ysื7l6{ํั“Hบm—jษ์ฅพ:ตษผูq๏ตu_ฺ๖ชf›ัL [Eฯ^๛๊tšู Œฐv.ฆแฝ๖ย:ฝู์Jดฬtำ'๋ดทญูz๏2™ุ้yoฆดy›-jm{ebqๅด๗sc:๐่้ำถจf3ฝดยaX]‹i,ยoฏป‰งฬ๛ญหa.kf›s<yำJ bํ๎ฺMN6[w{˜,ณ\๕Lา_อณถฤY•zฟwูfฦผ^Cบ!0ึูฟ฿๎ธาญyฯดถญกkvJkz[~P@ํบอฆNฐฝํๆฃxo…f๋้ศVmปุV]mถ:ณfs‹คl‚j-ฯซฏ๊ยฆทํNุbฏ๗๋พถฑิfดอฐฒUิ๛ีu_›0S(Kไุ๐[b๛N๔ถทNgƒ'Jไผํ6ZZ=ณ๒ืถ=ศz›-g7ง7b†ถ๎พำ๖fz+โGSฒแjmะฦ๊ZšMรทรkฐ๚~ฟ—คVoปmฎ0ซY๖m‹ณืฎาสะฮูlีดง&ณบf/<‹บyุ ฮา]ํฝfŽืc^k^ฎƒฦ co๋์๗ฐ;๊ึผkุึส5ถS๕๒#jบ๕mmจ[๕nึŠา๏ฝB๒ผUฒU›Z_mถสฐg›-5Xไvห,นถ๕loVีญl;๖์tดmUoคgฦิถ+๊ํํ=๔uwoา๖„แฯ ”ห^–งqญำ๏mu–Q๗~๎SBณMๅผiฏQK๋žบ]›mศz›-ื…ฺพ ตm“แญซ๛‚ูึf๎ำฯ „ํบต™=Iซhctฒxz๘vxRm๑๊{o~9ขฉทตQQฦS6“อธี‹ะT]ฦฺษf„๖ำF–ฑžล_7cžษYซบฝูbfงื˜ูา6nผ|•4V1ถM์ํท}฿ึ}3› ญmkฅะณoUถi4dชw}[ุtWฝ๗ถP”ฆณืหู*รK6฿ีถฝ—„ญฝieฏค˜้"นถ๕lWี้์ํุlQ—ถสุ ํ™ฺV‘ู๓^]ออ มRŒ[ฮe/์ๅ%๋๔ถงXžีํับeฅฝง’ฝzทVณ”nl3Yo.ฎ๗พึฦ{งJ%๖žL6›าปnm›ไฅUด1R-ฆ‡[ี์ัต฿๑ธฑท.แนถจŽว#fX6ํ๎yบถ ํ$6[w๏งณ`i5[ฯ>๚฿ฟ—„mIณ& *๏น2•m{Rg$ึ๖ฆฒ:ƒy๓ฮทwz;Uณ๑^;myำ๔ึ}z›VšืVถตœแญnfo\t—mึชถMe{ตT7{oทบถmIฑmvแ,ถถ2Gถmช๒{8ŠLใฐD6=( lSล63-—m ›ํฑ^K+iฦ,sตถทTอถๅฌE–ซ™A๏=[w๋lผญCซถf pื42…ไ๗ถvukฯkบ?๏)๏mvง6_์wš๗หฅๅmใlํ๓mz[93ดอZJฬฦีฬส)ู6ต5 ูฮญฒf6ญ๛F{PVmi๛)ตAŒywทamKš๕FSW๖‚ฌฺ]#yฟงดšสy[›_ฮnซ›ฝขhํY๏zฏNึ6š]šๅกยถ–3 ชํท็ฎง ณถ๋6fO]ว›uืุt—ฝอขฒ3๋ํ2G˜ษฝญ*๏6บฆฆqถฉณ้ฉ)d4ฑm*สถทw;—M๖‚เภ@’#‚Qs;ฌ฿Nu6ฟ฿๛-ฆ5๋คƒ…x*อf๊ฦ๖หญ`ฐV๕–*fฆฎ{6ฅะ[‡Vmฑm ณซ‘ผํอๅด็5๕†ถ/›์mฃ๋[๓^ฝl;ึ;gูš]kh›%(์วW3 ๕,%€,ลi]d{›[wSOท๕–ถ'XmŽ1ปพะถคYใmSW๖ถอีVmošสูค<๏=าดจฯฺ’}ะอฦ–น,ทŸ๕jฃjmฃู%6k+๎๖^KcบํฉVRmณ}ถ ฺ๖ส๕ฑท™Tฝท้.6Yแ๓ฌทBl๏› ผ฿{๕55ฆฮฆa…2ุuู๖๖Z•ฒ-u6๏ญ–ฦ์vณN 0 1ฐอuc{ixvซฺ3“jfช๒6ธ›nฟทขFีf{…fฺึ…ไอ6ํ๋ผถ:ฒ)~Wmลตmฟงฏฟ5{ ั๒>ึNไiต†6ฬ’ย ๊gงdจ=/ลIeถ๗ฦบSฺsmw™1<ตฺ˜ค฿๖ฺ†คmฝญc*b{sตUฦจฮ&e๖ถMiy็™๑;๗]{c;ทg–ง้าhVยฌญ่f&dx๋พํท-ิtืlWดm*ฏิลf[๊zo[ฅุ6ปp^kฺห™Qฑ฿๛W_K+ l›ส0M7สh€mิe6$e[ซ3ปc๗z๗EาŒYˆi”ถ฿ป๛ฦ๖[เู-ฑถI53ุ{่๎ฉ=ƒL๊64(j›ถU‚ฦึพ๊๕lTŸ๗\ฝwMอลฟง๋ณฅ๔๒ถศไฆ฿๖ตดอZ.์ซ™wฯ่ ฆ๓Rl‘Vgถฝ งn๔~ฎํ SซM์๎~๏]าฐ%อz›€ชxฟ็บ๋ฯ`,bSฟญTmL๖Zฃ@ชy๏ัwฒd4Nf‰V6ีึฦัฦส้ฬeDโnืษIว6กู๊u=0–ว96Y5H˜„ถMษึฆญขaถ9AหฑAVw;Uูfถ“ฎ™œŒJไุNifูัMณปฑฮถ6kl๋vNง‹5ทิฬvJปvฬฝ4ซmณ‡;็T1cืLฮ6ยZvดนAsXิต80ถซศ!jษฒQอl=OgฒถeNt79Iซœำl'sw#38ฮุฒ.ถ3;์บฉšๅฐุฆฮmeZน-YขตนyH›6M-ฮฃe›ด —“h,G#ฆฅลv๏U›ดฦนwQฒf ฺh›สM[ํxฌ ุv•บ ง๊ถขี9วf–โR1ApฅฮุีVฐKA#kฆอตฺ”†ํฮCbื 1*$6HงฃM ณkูั1ปธ๋ฐUุ๖\ส•บฅูN้™ถโœฬf ฮตูรsช“ุฬถัZ ญฑฃอIQปSืฒจถ˜ํ‰“1ต0[ูึ๓t&ห6ขฮถmEฑrฮ™ัๆnณc†%ฺ&ฑs›ป3K0›ป{:H—œbรมLnํ:“0ิัฝ๖ ›Vฬิ -ฤฺญCf9 ˜U+๎P4iณปMา=ณ™dดŽXญฺ}ฒ:F:v7‘ฑyจ{w”เZ๓ˆg=:ูภชปU€d2ซ3Œ-uZฬ.‡(ะ๎œ ๊U๗Žซt˜4›mจไยาุฐจสฐf8ŽŒชu๏ุŠฺ˜ตํนฮฉVf่–f‹˜ปš๓ศlFษYปv‘”lณmดฐcึ:3[ขซษqฒลุ0TGฺUC3Z’™[Ž3ถYฺ6Kjiฮ93ฺlปvฬ T;kฺ iฐปnชฎๅŒิปeูD“ฐ:vgเ.็๊ฬœ35C ต๊\ fVfปฆTั@hณ'ฅ{F (ณฃฑฤบsN-‘2šcณyจ \sฝ8\œฮbุฉปU`บ3[ฐฮb6LHหูXm;ำฺถsXi6RฦตB;3ฑaHEถfˆjSฅ{g[ญl\หvื9J ๋vฝธ๗9“s่4ŒS๗t9ๆnœfงฌ™๎ึฉฒขอ(›NFไ่ัึ๖ผ็œปู๓ttvแฒ8e๎ๆกเ?ก๗฿ฟ๘}่‹๗ฯีฟ๕w‹๗น๏z1ๅ ๛ญ๘ฟwฟ๐๑7ฏศ฿ํ๗ฝ็ปไ#์nปํำ9ณA9ูV/}๐{ฟ๘๋๏=฿๙ฃ๏๛๛๏ว฿ๅt์ย6๗พจฮC6–Tฯœญฑ$น94fŒฮ1 mูุใค†&ํฅ5˜้)jccUh๎ FใบvK›Nvo3(ๅœ“\๓VjN;gs’€ฑํฦmMฬฺฆ3๗ฒurrw_U'“agk๗Nฬpbอ๎cmญa›้ั๎5,Mmฅœ้llฺ‘๎]Zlถ˜วฮญ ดญS8อ˜Dโก]RิŒuIbา›ภถv{šœ%ฅฒ\ถี6ด๒85ŒขVฃ{วูƒฺ่nU1”yrฒฉ2rJฏดต›ƒป[{q^นW\ถหชสบ›”jณอๅ[‚ฉs[ญตน†ย<๏=k'a›' -Žs๋:ํb[บ›{ฅ*m๓3G์ุสฌญS–6ขนŒ D›ฑSe0วF{*ย1ำ`™mฌ แฬ๊œใšฑูๆtoj›อŒสฮ9Yแ;ัuฮN›Cแ๎&nlgF{Nmgถฉฮiปฯป*@ถ์๑t”ุ๊`อ๎Y5ฝำใœf†ๅฮรรš้`๗๎์Hป[Uตอs–BฬูnงฎณตœsาF*ฦ“ 2 ๎ฬ์žงI2ๅ<ฺ2›YfถvฺใœLuZ]๗‚Zำ๊lำชจ,ึำ#’^ikป๎nž๓ส•ม์vชฌป‰ฺดชmun+mxฎQe๎ึฅlVƒJ†ฮw+อvP•ถนป9าjikm2J๎€mrสะ&จQๆุh—้ yฤฌ6,ฺฝฌ แฺhV็dmป๓่Nm63ซcงษ๖ผฯ=tีœไfX;ืŽv98ณKทวฃ‘"ำนหฮต eใk&;ขkํœšูขฆcร1“ฆว}ตดูVงlถ ต–csถ[iตœsไZN]cฦํT&ม@\3mPZrN5ณmทN;=ฤ9บœmตถ๎ฃว6] ฉ์d]XŒส%๑ุ๎ฮ9ฯ๛ฬ=็Aึธ์>ฯ9•กว ูv บฮ9ญf!ถ;— ซV็%ญฉN‡ีธNูJlm๊๑/๐C_๙ฮ๘w๖?๛๙๏{g™Oา๐ฃฯ}๙=๏{฿O๔ฯภทŒkปีฌ-ณํœๆฟkฟ฿๑๙ฦทเ_kษ๕S๏ูฃq นู<ฟ๑ฅฯ๎'>๒‘ฮg>๓ฏฟ๒ๆ๏|๗ท้๗๙๏ฑฟ๔c๑‡ฟ๗;^อฒ=mงz๓Ÿศฏ/๋๛เภŒืพํ=ฟ๙/ฺwฉo}$ก็๎‹ธง๘ๆ็>๚_‡ฟ๚แ฿y[_๛žŸ๙o๋๐ฝฏพ๒JณYw9ุฆf๑|๓ซ๘๙ำๆ'>๓/พ๘ฅ?ๆห๓๊ซ฿๖g์w}่๘Ÿyต“Dkqฬ๎*z๙ี๛Ÿ่o|ไท~็ณ๘G_ๅ‹WฟๅO้๏พ๘ัŸ๘K?๘o}ฯทฟCq์vAQ๗ึq:ณฺ/ฟ๑น็๚ๅณ฿๘สห๓/อŸฉŸ๛ษ๏{ลใœปS3ถ๙๊?Wฟ๑‹๗๛ุ เ๑=๕?๚๙Ÿ๙้{oนีุ4ษ8 ›ัqœM=4lžีฎv‚ูt'›DชวุดคึmmฃRm]ฑ$r7ซ6.˜๔8‹kปEอŽMืๅ$xZ;็S ง0lฮ`.ุตVห)˜ฎํ:YU€ฎ-[ฤต]ัiœ 3uบn†Vถ‡ํ^็$›์ฎถำˆ8อฐ‚‰ŽAR\ถปZถkKง€-ปŒšูฒชvทG็ภึ*znXbปืส]Tนgldเ$ซVืR‰ฉ&kป ฤv:ฑfขฅ`s'pN๗ฉS!•eุ˜ว9ฐmBืPPv-ๅ„Œ‘ป!)ฎEหะฒญYวัT[k&โzV[ๆˆ๛,้bร eำณฺุฐ)Ž:gn;f$ฉ๎ฎ2ณั5้œล์nMอŽ1pไ้ถS๕ y8“]8ฑ{ษฎฺZชˆฎmŽฉ pฎป Žm˜N#iƒๆ๑xั˜aknูŽูิชMถตฉถS#6ปขccฮแj‚น๐(ภbl9ณ ำป›Iฑ-fkUz๎–ณ@Žke"E๋๎:k ่ฬNย:mูษz ห.หกc[Eˆeป‡–ะ6›็ศ9ํชœาถฑ%bื`WใUณMh&ตmืR‚รู ’ฎK)†“ญMGRต5ำบส˜ฌ#{k„ม๎):3ฒณvš›ศ6ิŽ:gfฌฉญNตm€mk†Nซแn๗ฉf\ฃSžึJทใแะŒ๋v/1จ•ไDณ :อGถXv์ฮQ(ณ9๓8cึFwทlปc็4ฐ‰aหŽqถjภŒdWะฤ9g&ฦ๖dง@๎mgCUqwทๅTม[ซาฦ๋ฤแn{tาm]ถ„m:ณ aJ.#@‘;–ใ:ู*BYถœ2สุ๛™O}๒ใศ๗เ_๙~๖g~๒/ผkฅshz_็Ÿ๙๔็ž/'๛๚}ygมpƒช—_๚gไW๙W~ํรฟ๕ู/|๕|วป_๛๊๗ฦRฑm o~้๓Ÿ๘>๐“O~ _๚สืพ๙ฦ›oฝผว+ฟูOฮ'?๖่ร?ืๆฯฤ{฿๕๎w,อdู๐›ภฏเ7~๛S๐ฅ?๚๊ืฟ๙ๆฯ{ฏผใ3Ÿ๔'?๚›๙ฟW๗๔ฟ๓]ฏm92fm6˜†์อ?WŸี_๘??๔Ÿ๙ส[oy๙_ฦ›+mqฏK-ผอฏ}แ3Ÿ๘ไง?๏–๏๚›oณQฤฆใrf่ดj6'‡y,aหชMฺlดšฮฦ0C6M™หฒŒฌ‘ถซ›G6+‡•ฅฮ‘ฑ qgv(2f›จฬZRšอขA-ƒ53'Sกข™5หsJ–!3<ฦ0(8P!F+๎ฎ‚‘6ี+@ลุPGfN\kšี‚ฮ$)%ธ๋l+6“Lnฑธ=j]“ฤด(ถiS6hœฐฑฑMถVฐฐvฆm`d์:•ษ˜ลV6$หศ\์6h(ึฎ)hส–ญน)ˆ6•a‚mซKคbl”ฉสถซใาะdŽSู๎3xฤštกh‹‘f6ตี63ฃ•ซฬ…†ญุ=2SŠัtฃŽูfหae[tRcป˜นv์T‘†ู.œยH ฅ1ท„ฒlZรFpญึJืฌZyจ“ Hm–VฆaP"Pฅmƒณีฆb…fITjๆคa3ฒBH@Uธ:w+p— 7‚D 9[!ึlGf[ฃม๙๓๔a่็๕|uกBโF€1`.ccผ$ฎใธ9œ&้ฑำmง?tvg๖‡eg—i'›ฆl“6vโ๕mll@ŒนBท„$tK฿๏๛ตฯชณา•iีVๆ(ฺUญ’–V"(I5ด‘!&Aiซ ™F“hT%€m(ิ,BZJŠถMฺฉ!D4Zญv%" #Iฉvฮ(Iฅf %hC‚Tตh$ฺจhีD˜”RQIuF Œ”ูสJ:dsVG%QI2ฉ&0ั&!QQดญ‘จ‚@ฉ™Hั (Z™M‚eฅี‘ชR4š(กRัItT#IคฺBJ”$ญD“L I *’*RhfSD@“$bึ(H˜(& ฤˆPีŒ**‘ฆ*I”ถฉBu’9Iฅjކ ภhUKชh…$J‚h+ E"&QญฉiCjDI`ะ†m›!@E)ŠjAปย(‰ฆ- 0‚ฮฎ$้ŒDชHีชนยศจR#”KJi‘คUMAภบ-7t ปถฌFตๆ\พr๑์้ใวŽ<ั๑ฃ‡8x์\๓๐ู›6ฏ[จ–TPE%Dอ h›คญฑ๚[๖uื๛'๖Ÿทu๏CŸฟyรบU€@ขญBDะ9ว‘hั ’ กUHˆ ฒr์ภGGŸพzอu›ท๏ถ]้•ฝpาชkึ_ณ~ใš’ขˆŠ4s๙า™ใฏ=ไ็ฟ๐่ง——kmTšŒB „ฤสฉ7_xโ?y๚ๅ฿๘ไยXํ๎ฝ๗๎บiU–/ไ๐มรว๗๖ฤ๑ฃ'ฯ^˜ซๅ?ฮีcUซ6lฺu๋|nรฅ%@ `๙ยฉำŸœ๓๊ำWฏNFH$Iะขˆ^บt้ศัO–ลฦท์ฺpอš%€ฌZร๗ผ{k$‘Jั P•PI)h‘(ญด Iค €V"ŠVi›H ZJŠจ‚šIP$DZDP อDBRhขm($ชfจP(IRณ3’‘ZTŠIคUh%ฉˆ”hขะ$ฆ"’j+„”Qค! ญฺJE(%A$Z%€FาRIŠ DJชญ$กŠ„R%ีV@Tตฃฉ*I#M›e&‹ ั”ูŽ4%„&ัขF”ะ’‚„าJ(-จI$Аj2+•@#Z&ฉDD"4Jข•FTADTƒ$ข™EฅM ‘’ˆด1ฺŠˆYZ‰DKIFะ„šmRQQEUPJ4E(ข’จ"I+ฅ้˜" ั$‘‚H4ี(กPด4‰จ" Š "RZี6 ัBkDAR3E’4m… ค0)’( tถ!  •(‘"ดJˆ”Bีˆ6m‘!RดE:ขM#ชญจาด1ัFJBh4ำสศSgณศL#„Bจ*E))@@‘hi ขัชˆTi(-iค•hฉjค   ˆ’T'C*T!ฃh‹$P2า@%‚hU%hIF( E2ด ญ@ M€NE*ค„€จ‘ *I[hd‚„‚จ !ัT)iC[! ํ‰%ข…จ’@‚6ีˆ$QTงŒ๊ ”tถฦXะYฉ M*1Hฺั‚ค •TB"ˆะ*(LmURI …’X€‚h #PD ะดD„VกQDFญH•ูŒt ˆDQ‰ขMmPP$Z•”$ัถŠถRFR"SŽ%)A)ญA ขEขค @'a„ฆญDะ""คด…€f (ฅE"HZีD‘ด3–ๆ$‰ี’๕ป๎๘—๘ณ›ด3 ๓๊ๅ Ÿ~r๔ะทzใื฿9๒้มืŸแๆ[๏ุต้ž=;ื 2ฺF (!ถEะ) ฦ-7…ววฺ-๛ฯuำ->r†ตC‹ BŒ$าMี ( iซ‰B0#Š*ัB„‹Ž|rแ์ๅ๕ทlนk]#N~x่๔ๅss๋๕ถ์ุพ€J u๕๒™ฃ^ลำ?๘^=|ฎC‹ฎ”ะ@ŠJสู^ษO๖โฏ?พฐุพ๗{๎{เž;oฝa๛ฆีYน๘ษ‰฿๛๕}๛xo‡๛žัš๋nฟ๙wlผv)Xตiอ|ํ›[๎นผLJZ"Rอส๙Coผ๘ซO_ต~ํฮฯ>ผwใบีCฺB่ี g๖ลŸ}{O๎{๏ฤล+],ฦส Tจ‰’ŒZt+/<ไSฟz็จอื}๖sŸ๘ปnพqวฆตซ\พp๒เG๏พพ๏ี_๚ฏ?๓kv฿บๅk๗|fืฺ…DXั3๏=ƒ'žๅk‡.-ถ฿vื๗=t๏g๖^ฟeราผp๊๐G๏ฝ๑ฺพื๚เะ}Oภฦ{ไมkฏ‰่œ)Wฯ~๒แkฟx๒ฉ็๗Ÿ[ตicฮž_n h€ธz้าูฃวฮcm>;7ํูผจ–EำฑzŽ[nณฺ‘mาF$mขดD"Q-ัBDตฤœiฒˆะ*D$% ก$ญ) ี"จfHUข ค้B[-!IAAชPMจQJดD „†6คฉ@›(H‘0$PC$5AT•hHhJR-ePBiBจ$าข* $าJซ•„F• •` ID5Uh3Y”„F"ญ (!ด‚ ชั™1ฬถˆT5!QIP$MSฉšTฃ @(MญT IัI%ฺH B4‘‚H[AP ดDชMFฃฉRHข€"C ก$tถƒ @:)‰h!ขTฃMIจ@4%"M#J[H1คH€@าจฆ-Œจa„Tก*0J ีดEกจDE5 ฺD ขH€ f ˆCj–@ Bต$T%ฉ€6‰DEBSU%PiตIBำ*ภ$’*ะH hจขm!C… ดUBTำิ$iHด€ถˆ$* P1TGกH % *ฅ$ ‘L3)AฺซŸ>x่่ั“g>=wiyลXฝ๎škท๎sำืm\ปX "ญ„คDj6าคฉV!‰VI+ Cˆั9ศˆาซ—ฎ|๙็๏\rว=ท๎ถyต DBซQฺH U สจ†"B@Z‰Dซ-ค"r๙๔ก๖ฟไ๔5ท=ะอ๋„vฎฌ,๒ๆ3o^^ฺs๗›v๏ธ‹ ค ๔๊น๓'>x๙ต็ึvƒท์ดvฉ"-„ŠฉH(ญ€‰ศฌะ“๏พึมหื๎ผๅ–ฯ^ฟCšRU0—/Ÿ=}p?}แาJึฌด}็žoุฝฺ5hกBคฉๆสลณง๔รง/\iึฎฐuว๎๋oุฝ}๓ฺE5ภีณงŽs๐ม;๑้ฅ+sฑfร–7z๋žอึ,DZ%P‰T # ญB”6ัB(ล0' I €จ– ()Y‚f’ˆP(IRBtjdiรŽ=7y๗›้Œa ]>ตญพ฿3฿๐•ห‡^z้7_ฟ๋๚;wญ’ดD R3’‰‚$ก(kv์ฝ{็ปฟR35(HคhSญ&4$ฺถŒ6QJWX iฃชTšJะ6บrฃcg/žo฿ฒeื0{๕ภ‡G/^พธ๖†m[7๏ฺˆJ0+กW.œ๚๘ฝWิ๗พƒืN๕šm7ดi๙๘ษS'ฯฬ‚ฦˆ9“hปre~๚›ง~๒า‡O_ฐ๗ฎ/ม7๘wฟ|ำ ญ>฿7๎\๛o=๙›ใ_๙มo?ด๛พ k––4Vฏ|ใgน๑3A ชฅMFGFg—?yํฉ๗_ผtayีฦ{สwk ั._๙๔๔Gฏฟฤปฟs๘๒ชืธรลK'/TuBJฃm—?y…็^อ‡ฎฌ฿qวƒ_๙g๊Ÿ?vรฦ,Z๎ฟ๏sw฿ตgส๙ั+'พิs๗f็๎ํ7nY„˜W–ฝ๒ฤ๙๎ัO—7zวรฟ‡๔ปํฝvuิจ>๔ะท_ท{฿{๒๙wOู๗“๏ฝ๐เM_ปyําช!BขAK\ฝp์ญื_๙_8u^”Hฺ‚p๑าลONœธ’ฌu๗ฃฮทm_ญชUฒhŠTJT()"J+RชMc…’DQฺDฉ‚šTDขt(-Œ$ZE”Dšฉาhำ# %ฅCšj•• ชS’ฆ4T(จDEฺู •TiŒNB"J5%) ƒj„„FH2hK›$J%ค•DŠYLBค$ฅ1ข%I@5ฅ- ั†Hะ6šh%•šด‘F@% ฺ ส"’ถ %ƒถด ีะาD„TีlGJBBาvฺคšชV"J[:"Uฤ(จDจ†hCC3(1@hฉŒPกFัึœŠ’’ะŒhA"TIะPกMigŒ‰ †าBT‰Vi5Iˆh EˆH)’F"Uš&US %423UJะ&E'’ดšะ”–4H;„FตFจŠะHคM:ซข•ˆAUI%4*ƒ""4m“,RAฆ$ชญšDข m5‚" š*m้H Z %"ZtD+A RฅMำHจัC+C€ช’AiKŠา@[JI„ถ MBดกŠj“6ƒ$i…j(mฃ %š`ค:se๙โ…ร๛฿zํ…—๖ฝ๗แOฮ^ผฒผb,ญฝfำŽw=๔ศ—๚ญ;ท^ณ:iKอ‘TกFัึฌ– $4ƒ„„ถI Zะ&QdIPฬห็/์๛ปฟ๚›๓๗/ว?ผmห*ZQUJซี$!B—?=๔แฉน~ํๆ;6.Ziด$%„V‡คjrุฯ๘ฟ?๗ๆฎ?ปก›ื1‰EWฎ\๐ษฟซงฮ๛๕_้Wทปm]*#ีคšาy๙๔ว๏>๛w๑ฏ_>ถ็ฯฏท_ปiอ"”ฆ Z4Œaดั6จ4ƒญ0ี/}๗}๒ิ|๓O๏พ~## “mตsๅโ‰c๏๎{แ้็_๐๔๙+WๆXZZปm๗๛๚า#_๘์ฎMฃษU…ถb@—ฯŸ<๒๎ซ/<๓ ๏๔ส2Kcํถ=w>๐…/?๒ลปoุผ:-WOŸxo฿ฏ^x้ฅ฿|t๔ยฒŽล๊ ;๏~์ห>t฿-;ท\ณHj๙๒๙#oผ๔ณง^x๓ภแ3ฎฬ,Vmฺดk๏}๕๑/บmําR่ฅณ'๋ๅžzอ?9waฅKื์ผ๓ž‡ส#๗ฑm‚iฆ‘”ขญ."m•ฎ\บr๚ะ}?๙๛ฟษพK๗ใ฿๙ึ=›ืดFVฮ๛่๕_ผ๘าพื฿?~ๆสฌล๊ ;๏โWฟ๐{woฺฐ*จt6‘T{๕๑#๏ผ๒โsฯฟ๘๎‰ณWt,ึ^ป็๖๛พ๘ล/๗อ›ืq๕ฬ‘ท_๚ล ฏ๚ฝฃŸ\ผบฬชฅ ›oy๘ฑ฿๛าทm฿บv)4Zj&)„ะคY2[ณ” šP@่(ะJ‚f’ˆPจ$RB”6IมRฺ&ฉ†YRm1$Cอt`Zบ๖ๆ[๏๙าืี ฿90?๐ััณgnทk ฺฆ šดญJ’1ZiZ„ดU"]$ณข€j‹Fซ$‹‚AMQญR€Jšจช€L) ดež;ฐ๘… —6์ดy๋๖ฅfš++ว๘๔๒•นu็ถอืnญ$iฅeๅสง฿~๎G฿๛ฮž๐โbๆ๋๎๙ฺŸฺw~๚๓_œ:sžT‚’ dๅสฅ‹o<๊มณง/m>๒ลG๒อ$m#mYณ๕ฮฯ?|๖ิ‘w๚๖ G^|ƒoพsหบอ!!ี$m2b0ก™2ชWOๆล7~๛ึ ๋ท^ฃร}cั–มส•sŸ|๐๒ณ฿๙›ฟ~โฃK‹ตื์พ๛ั?zdื้ท฿๘ั_"ถ|๚ๆพ฿๚๘๘ๅฅํ๗๑๙ฏูWnุ8-าาkถt็เ_์฿๗›๏~|๑ใ—{เฑฯ~ใ–kkฬ•Kg=๓—_8ฝผj๛~้+ฟ๓•ฝ›–’hUปุpห็พ๔๘ฅณง?<๐๔‡—ํ๛๑s<ผs๓บUh5ITXนp๔ํ_>ฬ“ฟ๚ุ๖]wแ๘;๛ร{NOษิQišb๙ฅ๓Ÿ?=’uปvo[ตzb‚Hช"–6‹”ดQTด‰J#U„ู&ฉขj4+m")MฦbจฆH™ํ`$+†•$้œM5I˜US#‰ดmc4-BกA[iŒd QญFRํLFK’Y ส (%ข•4 mKYaค -DาYƒ*a6eTI”ูŽŒดMK%Fฬ"ี„าdL(Hก้J!!Q-ษSZฺtdTดZ‘&5cดญ$ š hฺRีa&iอFšR#:2งhD4h+2ดm3ฦฬ”ˆฮู2F„QEZ%ZL‘ˆถIขดขmh’@[MRUษ@ Hต$URM’mQณFๆ6SK"Z3†DQD:™อRะ6"ะŠ6QีFuจ6 *ฉPฃ™„จTฦb Š”ู†‘TดhHF็ฌ‚@c0ซ*"Img: -ะJชญˆคณBfคT#ฃๆhg2D ัjA#M(JคmBRด…“U-DาVจŠv6eดAF็์ศ ฺฆ5‚Ci•จ2“ักฺfคณช"‚ษ˜ี4ญ.$Eภl‡4ฉญ$JS!ฆฆVขm„0™จะูลB‡6mEŠH%ีVˆ$ัฮfคีHดmง,‚ภขEGญ”TUJFฺ™$Pฅ$ฺ†& TgณHี$Aฏ^<{่อ—?งgNฎ\{อw๓๐ ;ทฌq๙ฬก฿}๓๕'๚ํc๙๗๒ฑ‡n[ฝZEŒ6ฑ2’6Si3Fด“‘D5‘–ูŒ-"ข%€j:ƒCั$LŠ"คJ”&‰„šๆ0ฬO_๓์•ปo{์OโsืT5DZ5กฦ 5"‘ŽูPั6)&*‰ฑHrๅะซฯ๚๖={o๒]U’ล<์เ;ฯ>๙๒‘eDซขค‰ˆ*iข$จ )Zˆhซ`ิ”‚RUIJZT$m3k@d๔ส้^}๑;ํฟพต๑กo้v๓บซว}๑้็_๎ทŽžYฺ๙ฏ฿1P•(-AงD/Ÿ~Wฯใ๛฿,๖ลวฟ~ืฮ+ว>๘๕ /โป‡Ž[์๘Ÿ~†ฅ•9ฏ๗ณo๋g๏\^s๛C}๕๖๋—ฮ|๕็O>๑ŸžบดGฟ๛๘]ึd๙๒™o}๛ซg/mฬ#>~๕.Ÿ๚่ตW~๙ไํมนง{7m^cž่อ็๘ท๐๒‘m๗ืพqำฆๅK๗ฝ๔าหOำ3+ื?นkƒฉจhK$‰ถิสๅ‹'๖๐หๅy๒ฃs—ฮo*%4Mฏž~๓‡฿๙ึฯ_;}ํอ๗=๖‡wํZs๑เ›ฯ๘ูฟ๛๐ศล๓อ๒๐ปื. Zข5/œxู๋'ฟ๏~๏๕w|้k_ฝkวา…๗|้…_่ศกใ๊๗1ศี“/เ๏ฟ๕ฑMปxธeGNyํฉม›s ุcทo] •ฦจ0“jฺิฌ1ข-bˆชˆ”ฮ&้mDดhhขาTh›คЁ4ณ–f„˜ฉฬฮ%$#]ิ I“”ถQึnpM7ฎsเ็ฮŸฝzๅ2ขฃm‰4!ชi’ถUDH+ฉAตHาhFjhH( ชmƒXa1ฦ SชQ! ’$„‚ัHRณm2’TR‰ฎศ(‘b`:๖ัม /-mถe๋ๆญ‹ฎฎ\>๘แวW._tฎ-ืn]GItEฺŒ“ฟ้?ใ?๔นร—๋ถn}่฿ทฏณษงAˆdิฌh:!ฃหหW๗ฟ๑ๆฉ+W–วฎ[nฟๅฆ[ถT;;+ฃฬ&รฦ;o๚=7|๛Nฟฑ‹๗\ื-kฃ„dNญะ ้4“ ธ๚มหฏผึ{งๆฆ;w๙๐๏พzdj’่สง๏โนoงฟ|๚ุ•ŒUป๚“๗๏พ~าก'๖๏›1m„”N‘ๅ{๐ฬู3+‹อ{nบ๙ฎ{ถvมœ]ษ€ŠZท๑ฺพ๘ภŽ>ธ|โใงOผ<ฎ]ลี‹?๕ฬ›g/้๊ฯ|๎ฯ๛ูํซFฺถ+ษh"ฦึoพ๋G๏|aฏฏ\ฬ+}–m›7ฌ‡”VVWŽ๊gฯ/[ทy๏ฝ฿ทx๛‰ฐ: JงBFRํิs็.žไุ๒RVํพแบฅ5ซซQRf22MmjˆP‘ถ‰ฮ*T32ฅฉDE’ึLeถ (’‘Ž™ษli02f’ดญ•1ฅs$ข่$ฃm‰4Dาดš&hซ-าJ*ญYIขIjhE™ Z4กVXŒ1hฉT“Tศ ‰@‚เดY๓0๓u?๏98ุ €X Q$EŠคF‹YrœจNRทiงMโิอไC—i:ำๅCg๚1? ำฬด3M“t2$M"GeYถ6ŠฒธJ$HŠ๛pA€ุื๓>wฏซ5F3“ดฅ‘ŒQ#•0•Œ*1UฺˆšIGFa& คM#a๊l’ C;FTSYกIR-0ำ&ฃF:E1ฅ1ขs9KPZŒŒŽ%2fตดfฬฬ$#4ะ4ษ 235Hค*sฮŽTฺ ้œ•Y$P!‰่R$BFซ‹aถS*ิlFยhuJอh’Ym#„VวญvŠd”Bk™ฑJ%e •a6-ํh“ฆ:EkDFซf ‰ะ๊l,E;Sำ Cมˆ m›vˆPญะvVF็ฬbQiZH SฅJTGF:ชPฺฆŒŒ)QAi็P•V;1็$ั–@:jญฆ"5j า&ฉฉYIB‚Li;CgฆฬvhjษHF’jฅšค‚!HศˆนœฦhŠDMS‹ฉะRYT‰ &SฃMำdŽŒB3h†ฬฆขJฺ$ ีฬ4+Q#IชดาฆหD%EEหLFดsV#J›,H"ฉVำ&1I ีXถ‰ˆjFฦiJ5‰ด•ถM›QIปๆœข*มŒฬf aJƒฉŒVฦh™33ูŒ„ดัš่"3#eถ‘ฆ9ฦh้‰่TหŒ#e@uเ๒ฏฟG๘๛ม{›๎ๅฮ#ท์ุพ2ฺฤ์๚๙๗Ÿ๚ณ—7?r๓๕V‘1จฮ2ัขmฆ0C ˆถ:‡EJ5ัๅ”ัู M*M%ฉ$Lณฅ€ฤ@ำ4™ฃŒค‘D˜s๖๘วง.ŸEซs$†นœ$E าิยh5%I‚9[DH RญB’ZTฌฌ,ฮพ๒ย‘|ื]_ฺƒูฅ•ซŸผ๖๎‹ำ7ืลH‡$$ัYศh$ฉฅษคB—ฒR…Tง6U@ๆ2›’@งดI†^yท^z๊๕ลพฏํ?นร๔s7mพ๚‡ๆW/=หฟ๖ฺt&ฉดขe&‰œ๓ต—๙๓7ืn๚ึ฿๛_~ษชฬ์ฮฟ๛7ฯฟุำึก’Kฏ๐Ožz๒ฎ/‡ํ๗พ๕๙eๆ_?๘ื็ฉŸs็ถžษ‘ง^y๐–nฺuxIVkDh[K fจi’LำœรTฅm†ฮIดลHฅฉ$ ภLev&(‰ชi$หZXI›BE’ะ& ญ™”Tggกi(ค†ฐถame1ดM.Ÿ|๛๕๏ฃ๘ุฉuพŸํ฿๚โ]ทlEจ‰j?y๊_|‡?๑›ซ;o|เw?๘อ๋ณR๔+?๚ษŸ๑wž;ทqวm_ฏ‡ฟ|cถฏhZ$$`fดื:็‰Wžี3ฯyํ๗Nžฝด\Yy๐ฆป๙อo~i๗ต J™‘ฮV#ฆ’คjิะ ็฿|๑}๗฿?๕๖…u\;wโ๔ลๅ๚ู๛ษŸณีะฮ+gNŸฝyไ;อ?๙ฮฺbu็แ›พำทFv8tรมƒ;ถ\vื๏๏G_:ด}mํdKQชณM ฃ”ฬๅ}๎ยลหซณา ัูดJT*Aสฌ(EuŽR™-`ึศ % ี™ึฦฯwร'็ฮฟสsฯผp๏๏ฟ-LRษ๒ิฑ—_|๖ฉcืv๚ูG_]hT;"อ5›ฅ&:;udaกฉิB ช"hฺ6IE ี)Ibฮฅ$„j†Y‹dถมล๓—ฮปดบqกƒซสLt็ฎmปv]ทันS็tฟฤX‚$‰hiขนผพaรCŸป้ž฿ธ{ซ‘4Œ=๛ฏ฿ณ็๚kวฯ~๑ีนo๕ย๓ฟzๅ๘้Ÿฺwณฃ#Izร_ไoไทŽพ๑๎นฯ^—W7฿๙ตo~๋swm] 2ถ๎ุzใ-7/žy๑๘Gฏ4อ๕ตญป๎zไ/์๚าWXBmฺฝw๗๕ป6ผu๙์๙‹›eคํ,‘4 ีบ๒ึkฯ?gOž๚ๅ฿ฏำŽณg฿=ดtH—W_์ศ๛6๑;w฿u็ี$ฒXํึ๛ฟๅ;^๘{oฝ๓แ›Ÿณgฏา6ญๆใืx๛ํOวพ๔•;6ฯู4อb๏Cw๕๊koy๏ฅ็_หททๆิ‰Oฏ.vํฺuฎ-‹…ˆŒ-oฝ๖โ…‹—/_YสT“F$ญHฦJcฆัข4Ziีˆ1‚†6"ัถiIEhข P$E+ZกAซ2ฦ i+‚& @IBšN‰คW.\๚ไฃจํฒ}‹@—๋Wฯ~แ‡'ฎu๑้ลหW;Qiฆ๕‹gฯ|๒ัฌ^x๚ยฒ5(ๆ•๓็?=มg7_ูqๆชY ค@"i•aฺล^ฟฃ'x๋O>=แ๒ี๕e“>>~์ท_๙žอ๏พn!š–4’$ฅ…h+ –'}๔ฦK/ผu์์ฒ,/œปzแงภๅsg.Ÿ;3ึฎ[ป๑พ=;BFFฦฎปพ๒ญ์ถ๛ฯญ์<|๓อ;7ฏ(P•V…DEซsูeั9g็D%Adจโฺœ—ฏ^[ยr9RB•M$iD]}็้'~๖gึท~่๖๛พcำ"Mาา&Yy๛_๙›Oฎ๎บ๕Ž[๗]ทie คAM !ณb–šหๅผถ^H#ขi+า9]นvญชsนœณ•^พx๙ท฿ปbYปณk็ฦ ‘™tฝ‰”dรฮปnพ้ภxๅนัปGฯœปy}฿ฺjMๆ0ิ…7~ƒวžy้s[oฟ๏ก฿๘๖—nYR@ซmฉ†Vhฏœ?w๖ไษห‹ี•๖9w๑๘ู3/^]Zlบmวฮฝื$5ฃ’ าY-$MRกf‹T$iตฒ่h+’j‚LB’ดค้2‹’(%I"Iดล McFL€HP‚ขj’@งภ@"ช‘$•าiDBซ‘d-*mขI$FB2gIฬ’ จhณชชาฆฉช3ณ  ฺะ9QหŠูจhJdฮŒQชฉถษˆ0›QI)กด‰Vำฬ)T2Iฺ" สœห1ฉ"!iuู„4‰ T’JMA0K%i4mfAด*B(*ฉFftู€ˆhiคm‹˜รัDQ#4ŠNhชZA‘ถ“D"ี&า‘6!dVHEikขBณ&’`ถำ™ั$UA ’ L2$ีR fCi ฅ2HฺชสdDัะ6m!‚hาNE#Jข6I›1@:(5‡$ญjh้L4iPอ"ิ”˜ษ mฦ@gฉh‘ู˜ Iฅ -MHดAตh™‘eSh) าข#mต+†ถD’N•VT*ญPาLDšh›ค‘ค-4้ฬนLFŠe‰4!คZ‰‚ํlJ"˜ะA›%#€$ีฆmคกัาHDฉŒฉ‘†ู44eชญFTUcvbไฟ๙๊›W7๐~๋มv^ท:!Eชอส†อ+Iจๆ๊้OŽฝ๒๔“ฯฝ๘๖‡gฎXz[๏}เ๛๎นm฿–Eว๒jŽ๐Ÿ~๗ุ๖ฯ~f๗สน_~้ต/ู~๘3_ซ฿บํ๊GGžzๆลW฿=s-‹mปoผ๛กฏ~๙žwnู0Dื/ž:๑๊Ÿ๘็/ผ}๊ยีฑsฯ-7o9w๒RกกฅD#กZ!›oz๘‘+วz๓ๆป~๓ฦ i]=๖าk/พ๔สี7}้K๗}๐ฯ฿ผPH\;๗ษฑื_|๎WG|ำ๓หฑeฯม{๙โCw฿y`๛ชๅๅ3๊฿ใŸo๖ท๎ผz๊_y๘๚๎ป๎๛อo๋ถ >}็ู'žx๎ตฃ|zy}ฑeื ทภพ๒เมตV้ต ว_{๎ฯ~๖ไ๓o}xvนุ~รํ|๕ั๛o9ผkS*dŒ„าZฐบaำส๚'็ฯ^`็”ฬๆ๒ๅ+—ฏ^ฐi฿๎๋ด1%Šย0K็0าฺvห๎๓๐สถฝk#sชฤ\.ื—K‹ kk‹9}ๅS—7zh฿พ›†Mฒ้–;๏zโO>๐๘‰ห๗~ืoอฟwu ื๏ธ`Vำฅ๕๕ซKซk7,Fฒบ๋–‡}๚ฺฮ‹Fd๊๒๊ีKWืญnบm“$ํlJ@&bh™’ ๛฿๗อฟพ๋Kt็มqjร ะˆฮ|“K๗ฺณ๏–E@’ฑzเŽ›๖ly๛Ž๔๑™๓=๒ฟwถ~๙๗ใGn๒๑๑ใ'ฮญlนะ-๛WG-ตMฦถ}๛๖ธ~พr๒ญืŽ็ณ‡mฒqe^บz๙๒ี๕ZAYฟpๆrร๎๋ถlฒฺญ”๊ๅฃO่้W>9ท๛o>๙‹ว_x๛ใำ—ณyวก๛x๘๓Ÿฟ็†๋VิฬXF4็{๑‰๙“O๋_/?๙ฺG?พผvญ>๔ศ็?ฟ๓ำ๖ำ'๘เ“‹cใž;๎{๘แ‡๎ฟm฿Fc4คญ€&ศd$XQDขh‚”Vฉ ขh# ีKว?<๚๋?ร๕,{๘ย=๖ํ\-Z-’ŽTKR@ฃaDQ„@J ดP]^9ัปO'โ{็/ฏ์Sำ†{๖nูฒeญ6#bฌmผc฿ฝ่โ‰g.]พ"B Sฯพ๑ำ?z์ูื^yำฟ๐ตฏ}๎ถซ]^%€$ษH•’ž?๑““g:ื—ง~๕ฝ๛ƒ syํ๊ตๅ”•ตM[ฏต๏ฆ[๎y๐swํนiรiก้LFคZh%"AซHญ‘ขาBข’fjBƒTm ัด#DiQญ m[I‚F$ฺh’‚&Zขญค”  @I@ต #$ ขJm+†h‹DBำะ€)A’ถญ$Bi… ด- „jBCeH-ลˆฮšT“˜%4ัšŠTจจ (ชค’ถ*ฆ– ฃญD ฺษH ด#’ิBEจb6 IT …ขm;’!jดั‰$"Š0„B„’F‰EDก‚ฉ-$ f4iŠTt6ฆŒJ)ƒBg IดCCฅจdะ@Uฃ ฉ( !‘9;# ฉดƒjeดด0ŒH›จ(ŠJ •ฆ"€ถh%าด@„)a F:B"0งภญ๊ˆค„่$ 5ะF…$ALขD 4"‰$@BซฦะJ)Ihf*ˆPฅชBIชีH@Jj%ฺV"mUBดTZัŽhI J AiIฺาATI‰DซกM‹ถIiT;eดญDUBfEP$Iš O:๑วWึฎฟ[ฏดฒ2ฺ ”Hcนzโ#O<๖“ง_๘deวฎ๏ผpโ_่ร๗>๚๐+_๙‹ธีฬ…x๑ษOพำm7ุ~๐ฆี“'Žฝ๔ใ๏๔๒m—N_ธบvเ๐กณ'?|๏…Ÿ|tr}๋๑;ท๏ฺดzํ๔G_~๊Ÿl๛่ร=mุถ{๗๊้๚็?ไ…หฟ๑อGoูZ-Bา˜O{๙™๘ฑทฏ\เภ ืฑ~๑๔ว/>๙ฃวฯลฟ๒ฅ๖฿q๓O^ฃ‹๖์ฟ๙3๗์บq๛ม๋W;z๊•วไฑ็ŽžXnน๐ฎลr๒ู๗ž๎žน[_gn_=๗๛/>๖?~โ•s;ทwหฺโฬั๗พฮY9 ‘Pฺj”ยุrร็๎ฺ๎๊‰_ฟิSG>w่แ]‘ธvโญผ๐ๆ…q๐กฏ}๙ถล 3น๘๑ซO=๑๘ฯ๏ึ]ื๏ู|๕วoผ๐“gฏฬ๙ตG๎ำๅต๓ฟ๖๔//m\พz๎าล๓Ÿ\^\ฟ้า•๕ซืNพ๚ฃ๛๏๑ๆ๙น๚{ltuฬป/<ต๑ะ7พ)X?๙ฺห/\ถnุถ{๏ฺูx๖ฯOœY๗ป_๘๎ร[‰ึฺพ}oบs๏[O>๗ำ฿๛Ÿ;ผsห๒ิ๛ฯฟ๐๚หญท?๐๎ู‘1(mฉู"IUฌmพw๛๖*$ ฮฝw์N^พ็๎๗ฅฝtโฤงWฦๆทm„คt๓พ]7mx๏๔นำง/ไ๖๋ท๏ฟuซ$tVไส้ำฝๆ{ใบ;>shำฦตtฌmฟ~ฯ6$Qอ• 'ฝ๐๔ ๏|0๖฿r๏=7oT Dšถ €Dถ์8p๋ึฝหฑiฑ่™T!‰eฏ๚่ฤ•๕๋v์ุผ}KRค”ี={wlZนvๆ๔ูำ'/m๐็๑ิซw}๕Ÿ=pๅฬ™๓็ึ7oบnืž5”$b์ธn๋๕;VืŸ9~bบqฑ๏?‹_}แนC๛vs`›s'฿~๒gฏฺu๏รw>ฐk!h$ตผp๒7Ÿ{๊ืŽฝqำุฐy๋๕#'Žฝ๓ซ๔ล~ใท๎฿ณA4 Zr๕ล_๛ี“ฟ~^ผ†=;ผ8๑๎ฏ=๑ƒ๗}†ซ๏žบ๗ภํ.|๐ฮ;ฯ>~ๆโฅน๖ํ/ฺ$ญVิ4c1ฒฃDdะ‚คคะๅีห—/žปฐaฬJอฎ/ฏ\<๗้Gพโsฯ<ฬ{ื6๎ป็แo๖๏ุsๆัVตั˜hC+)€ถ(”&@!Zช3ธvๆฬฑ็๔ว๒ หM๏๙ย—ฟ๐ศ๗ฒ๗ึตqํานOŽพ~ไ?์น3๋D!4IZ4„€ๆบรท๐่o~๋‘ป6s๑ฤ๙๘#‹Cwแ๋฿สgฏŽ1าJZQŒฑVZ„‰ "HชUีdฑaร;๏ฺ๙“3งฯŸ}ืxŽ{v฿ดsตŠJขWNพ๛๖หGžyใŒ k๛๖์\ธFE ŠJื็•Ž๔ษ7Oœฟฐvใ=w฿q฿]{6G[!ZJ’ลXYl˜%hQŠคiEE’ีทpำืญœ:๓๑ัw_y๑อv฿~BซBb^9}ุ๊ฏล๋Ÿฎฯeถ์ฺต}ึ5ษ๒๊๚นOฯ\C6mพemmE P‘jภุฐบqฮ-\2ฯ9w๕ฺถ a^ฝt๎อŸเฯžz๕๘r๓<ๅ/๛เ=ท๎ๆฬ‡ฯป๕ฝ—Žผp๏แร7พecคUฌl฿sฝ}๑ฝฃ๖ิwฅท^ทy~๚[otรm๛ูึšิTข(‰ญJศ๚้7_~๖ศซว;๎ธ๏‹wํะ๕K.]‹-7lุฐˆDถl\[lX^Yฟ|ๅJฅš *–—>เ๕ž๚ีGห›ฟ๘ต๛๗nธŠDS5ฏ]พ๘ึฯ๖า‡วzํญOฦถ;z๔ก๖,(•‰HII"ญ•ฑฒ2Fg+าฮ.ื/ฟ2mธถบa…&‰ชุผuำสJฯ_น|๙ฺฺถฝ7?๔ตoุ๘™ฝk›ว้+—ฏ]™ถฎmฺ4ข2ฤ๊ฦตkซY^นpJปiหํ>๚…๔ูืžใ๓ง^ูที…Oฝ๘โ…w๏-๛wmก‘ถDBช๓า๙หหหื๖}แ+๗zVื>y๕™Ÿ 8mึฤ,ฬฤ|ฯ๛ž>งปOo๊nukCป„‹  f5xผŒว•๑ฬ—Tฅ๒๒5_’ช$•ชŒ3™สT*3žqสc[ุฦ์ @H€vดwkํEฝŸs็ฮu}๓?y๖ฑผแฎ๗ฝymT[ฅ™;N_บ๊ฦป?ม›๖/O?๙๐?}๗?x๘;งn๛๐ืพ๘ท>๑ฤ?ๅ7๒๑Ÿฟ๏฿ธฆ5€P%Kmขาj‰ ส,M๔าฉWŸz๒!็๖.ฺFVซ+[ฮผs๒…็Ÿ{๙ีS}ำoึ‡?๓ตฏ|๘ฆCปข€RDK ฆ@)-Dข ( Uz๏ฤ๋ฟ๘ัwŸนฐ3wน๓SŸา๓๎c—ฎพ๖ๆnปn๗ลwฯŸ๛ีำงJต้จ™JSS4Rห,:ฎฬห'^gkgkฯ๑#‡๖ปรœซ๓'Nœฺ™ซ#ว๎฿<ดkŒะRีZะฮJ•จฆUDิX}๐ฎ฿{อS๏œ๛~๖๘๖๗‰{ฏป๊เž๕ตัี๖ๅ๓g~ๅษ๏เ‡๒ํ•ล๎ตใx฿แฝKigI(ะํ‹็_ษท;y๚ย{๛=w~m‡ลl"@!‰ฅ@’M5cŒพ๋๎ฎ}๖ๅทNฟ๛ฺ+?ม฿ฟ๏ะ๏sรี๖nฌ%s๋ส…๗Nฝ๖ิ/๙ึ?=yjgล๎๋ฏ?~่ชK์ฌV/\„ฑพฑ1ึึ*dคฺาRคษbฌํณ[p้โๅนีTญ.]|๓™‡๖oพ๋s—๗฿๛ฉ฿๚ญ๎น๑๐ZีACi“ฮ&ัลฦๆซoพinญ–{๖m๎ุตkmซํ+ฯ=๓ฮoฟ_~ตw/f๙ล฿|อw…† E!h+0งHPMIPฺT“„4TJ i !4QสHดจ "@…ŠVc@คกH •ึH5‰AาJ+%Ib6mK„คm‚@jj*$AฉŽš"ข`h;k$”"ดIh+‰D1ำH‰†ด3"HUQ‰jZED$Dะ‚6aJˆ6Cข‚* Hดช‰6ีา‚ด+†-•(H+ฉฆ‘EBSM2คLhUB!ญ()ญDณF„’BFh[E‘ข#ˆj4hšมlคfƒดกI•išJ›ด(คS้ีˆ Pˆ†BDgๆ"กJŠ$ยlZ ”T#-ฦข€$U•ชข…*” ME%DดFAaฦ JZญV$4mU#D5hˆิŒ $‘ฉBK ลฐhงถ*I€ถ@ำ ญ$‰ข4aฆจฆก-@‰ช4J„*HhสHค4Uƒ†PUHฺฆi!ะBฺ "AT›ค šjR 2‚PDช$i'Tขi+าZก@ซhG4:›$ ˜ฒPีดRtUอ RS2v-B)T าt4ี 'ž}๊ฉ็Nฯ๋>๓…฿๛๔Rำ ื]ป๛ูำ~๕ศsŸ่แ=Zบๅป๏ะ]7ฌ๎บmใ๏ฟีฟทเึใา=WNwใท๑ฯพtn็ถƒหืz฿gฟูึง.|๕๊ต'ฟ๙่๋/=๊ๅ๛๏0คe^xใล—~๕ไs็~โ่ฎK๏ผาป%ๆb}\บpโนง฿ษ‡nณs}}9ห๕ฝ๛๖ุCuพ๛๓โี๗–woฌ็ย;ฏฝ๐Nun๏>ธoœzี“ง฿:ณgืซ/ู‹kท~๙ฟฑ๋v/–ฝแเ<}โฅgz‰ะฆE“$@ณ๗ฺ;๏ผ็ฅgŸc฿3ษ]{฿{๑งt๗ีlฝwโW฿๛ฃฏไๅ็>๗ษ{ฎ=บ6qอ๏ฟจฒ8p๓๛๏ไW~็ฎอล่~฿ื๛๑ฏŸx๓ญw฿9็ๆ $ีtnฯฑ{ฏ:ผ๑ิ+/=๑๘k‹น}๙สjืแญญ‹ง^๛ากcปCš Tัdฌ.พ๙๋Ÿ~๋‡Oผqภ๘w^dggฮฉI’ h MTgTZt๋ฬ›ฯไแG~๚ไนk?๒ๅ/~ไ๚ฝk M4้ฮ๖ึ‰_>๓ืNพ}nต๏ฦc{voุูjึ’BuŒ1ฺU[‚†–(@ซฉN’ŒญจŒE’ถk›Go๘๐ืnธโb฿๕…7ฺj$Qข”Qc‘ ]อนSฝrel<ธoํตื_๖ษำ//ปฺพt~{๏ัหง:๓ัฃ{7w/ •ฦพCว๏๙ิWฟ๐ฑ}กnพnฯูW฿yใัg^z๎๙ณŸธ้ˆk๋ว?ลuไุพัซ๎ผๅ…ง๘•—ึ๏์ovฐt๐ž;ox๚ูŸ๊ญื_ฝ่ฆDeM’$+–Mซ$ยค’$จf@K ่{/?๖อ—๛&{พ๏ป~๓K๔™;Ž๎].ำdC% DbดคM*ั‚&$„”าi‹*$†DทฮผyโฉŸโิ–ื|่ณŸภW%1ฺา๎ฝๅฎox่—/=}j‚T!- ฅขH#‘ถ;;oพ๒๊นUฏน๎ช,hฏl]~ๅี[sต~์š#{๗๎KTHค™•คPZjจช41[SD"%‹ุg~๋ฏœฟ|๑ตืŸัื฿=๙๒sน๗ู\๔าู/=๕ุOŸx๖๙7ฮํศbืžใฟ๑ภM›{๗&-Rฺา"้ๅ๓๏๚๛฿z๒ิึละ๏พใ–๋6 ฅ ’šัj$H2+ ฬYm5#UB‚ล ๗}๔ร/œx๗ไO^~๗ๅวพ๙๏xํcฟ๑ปwd฿bl}๗ีง~๕๓'๊ไ™mdใฺ{oฝชซ7‰ูนณฺหๅbŒQ‚ฦl“‘––cฌฏ-ม๖ๅี\ตาีึ้W^~ไoŸ๏พvnนใ7ฟ๐ฉ๛oปะB[N Z:[bv–Aฒyํ๎ร7พv๚๒nพ๑๘ัƒl,\9ฮษž~โG?๑ิ…ื๔ญ:~ไภกc฿ๆข#ีQีJ%ฦะูJZล0ขํ$I€’กE’าTUEHำ-š$Iข%B่PIgBhHE“V"2@ชี43ฉ”ถ“(‘$A)i˜ณ‰ดESHhZFฉŠช™vD™Bำ4Iค” ˆDหlBำ0‚Vา6"ัาFยคฅขฺ2#1ดZš$าJาฮ€4@CขASšมˆJ†่, "˜š6mkŒ9งD ดQํ@Z3า93B’ดZI+ ดาJ) ’ZŒ62ต ฉU‘Q!กจm(D  ImuถƒคIคจV*ƒดฆชHiKล์,ฉH$i;kฬšRUiT[#Hb,ฆŽ*• cจMZQm!QM5Cซ’ถษHiPm+ข้จˆถฃšHhขbFJ*‘จ ษl#IBHฉก™1Hฅh h% กญ• ญf)FU€้dBๆ์่ัhำ คC*ญ H’ฉ:›จ ‰h%ี”Pาb0iiƒ่์ 1H`VสHา   ษะhฉจfQcจˆ& ฺ–กs,†˜-UาF™ษˆ5“ูีLRE#ญัา&J!ˆh“HQEฆ’†YeH’(‰U*5Šf„š ขญd$ัถี*ฉDŠ.w-vํ๓ ๏฿™sัEยTฅ,2[ช๏ผ๙ึปo^ฺทš›฿ฟฟฉD๕๋ฎ?zอ‘งฮพ๒า[ุMฆึb9ฦˆvvศžuูฟ฿ฦb#ศbนต{wW—/\šsv„ฑ\฿ุ8ppWำš5๖]sศU›ใฅ๓oฟ{.–QHX=}ๆญ'ฮŸ=๓ุ_' ์ฺํ]ซ*€J[:๑โ‰ —;๓๘ทO=ะ€ฺปนgฯ๚๒าป๏~๋ํีๆ‘;๎พ~cŒŒ6iฑ\‰คQfŒHํ๒๐5ท|เ7>๔ซŸ}ใ{๙แฯน๑‡~๚๒›ป๎๚่‡๎ฝ๏๚]m*’ลšี•ญ๗9{ๅ๒ึฮœ๓๔๙ํญญ+[Wุ…ฤ;>x๗๕ืูญีK—.ฝ๑ฬK—Wืs็ฑฝ‡ึฃi‘Œน ฦžอ{๗๎Rฒฐเๆฺฺผrykk+(i5Cy๎ไฏๆƒ๛ศ3{?๔ปcธz๚๊์›ฯ>๒ศร?{่มwุพ๊–โฮ ข Iกฺู๗๘ๅ?ื๚โฎ{>๖้ฯ|๎kื“f์ฺX_Œี๖ึjตCFูน ๓๒ฮึj•ๅbน\ถa’n]<๋ว๚ึCฝฐผ๖_๛ณฯ฿ถm5าฮFXฌ๏=๘ภŸห[Nฝ๕๊S๐{}o฿>ป๚W๕๏ฑ7%AH1t&‘คศะjต"‹ฑพ{‘nom๏lฯูตHSƒนuy{ตสฺrนkญYŽ๕ตƒปๆDwญํZ,[s{kkี=ฆ.”ฦ๖ฮj{5ว๕ๅศ…“๑ožz{๓ฮ๛ฟภทำ๓๏ฝ๚๘๗ฟ๑ํ๏ีฉญ๑gŸฝ๏ŽcถeQ‘,7ึ๖์?ด‡ํ๎ซฝ๚ชลgNฝv็aA“$ฃ‰H’ลฎ%ฦฎลฺ๚ฺžตลๆฃ#ชหต]‹ํ+/ฏHT3 %Tภ2U4•*Iถ(ศX,‹’ -h็•sฏ<๖ฝา3ฟิั~โ๖ƒ‡๖„6mชmK‚ดaฅšF5K Dตau๚ฬฉื_x๑ฒฑ๎๐๗฿ถ๐Q3dะdชD€มl‘$ีdvga˜;—^y้ไฮฮj๓ุฑ›๛vKmmoyํฤปs:r๘ๆฝCคsV%„BFาด$Pฺ‘Lฺši#{๎๘ฝ๕๛ฮท๖ไg~๚‡>๕ƒ‘$c,#ำ‚นฑพ๏ๆฯ~๎ฮƒ›๋LJ*f $ าv๛์ูท๑ญo?wq›c๙๐7~ํ.ƒv,ฺ‘vขฦjv9ะQ:4จ6า ตชฑ๛ฎ/~แา์ฮ฿?๘่k๏~้‰ฏฟ๘๓!Y,’ฑ`%w~โฃท?ฒฉญDกdŽŽฆS†ั%ภP้ผ๒๖ซO?๒W้ก7v–w|้ซฟ{ฯ]7๎]˜ibLะถmคcjช•ฑk๑๎9๖พ{ตฃ$ซ4u่๘พ\s๋่ํ๋๔ฟ่๕ห็žyไษ็nฟ๕๎๎฿฿ขZ‚ฉvfJค@ชFQ$ม์ mg2FฦสD%iI# e5;†ัj'ษ(ฃ "Y˜ซ™ŒR%)ีV†ฆ4BJๆœc ƒDVอSD‡AGฆ 4ษ,Dhฉ&า6iW’ašJ ด ’†ˆ’$i*:K%ŠŒค)dD†ฒชัF@hอHu U4Rfณ RIดฬŠ ฺ]ี€$Iฬู‹™T32kนh!$ด•ฑ=ปhHำjง$‹hK*Q™ํVm ฺhชL23*UE2Œชถ"Qกa†2“TLF%JD1†T5ฃ@Wi†1Sš$กๆœcคcŒฉ*ะ่”ฑH™Muิ‘hฅกsฮขจถQคi› ะPก*Uh’0;I„€T™$mjFP’YตHg'ษ(! $’‘ฉกL‰ฆัด%ค4™dvŽฤZ ูiF*JRํ‚‹jІŽd6@š2;GFฺฦlฃษ mƒhง$$ m“$I[CK! EFา YPfa ƒข‘ะV›คด ARณอHZZ"mีT ณ IF: ญfdึb*IŒฮ•ŒvHฅ!sฎ$ @%H[$Eด4U*™dสˆ@ซjฮ‰‘EU[’่ 3Aฆ6Y˜Œ  s2 ฉisพ#G๗๕๙w^x๎gน”RIF";sตฉห—ททถ,๗mฌฏทFๆl2ฒถฑฑพkฝ[ฯm'ห„H’$Cำ6P%P@[DBH‰]ปึ–หลฮฮฮ•+ต ฌถWWฦแซ๏๙ส?๑ญ หะ†Xอ9@5ชv.mmฯ๗๑O๘ซŸพุ๋ๆŠ$๕๖ฃ฿ปr้สry๕พM2าY)0Uฃi+ˆ* ะฒ~อญท>๐‰๘๋๕[๏|ไั=้฿wฯm๛ํœ@Iปu๎—Ÿูร?~์ฉ^zณ—.\บดตsีวnึ7ึF††™ํy๎•,๗๎฿‹5-R :›ˆ„ฆญ,ึ’@ ฅ2Bqแล'๙ำŸ*ท๎ŸwโCk‹ฉฬ~อก=žy๘'O๑]\iD#Uี&iTิ ็ฯฟ๘๕๓๎฿า|ๅ๓น้เขl:ฐwนsแ… ถ:ื ‘Œa็๔™s[—7677G"1ฺี•7๙วฟ๏พดyใง~ฯ๐รวึส่œุ™ญน‹Œ1๖นv๓ศ57๖ฃk›๕ฏ๒ว฿{ๆ“wฟ– m •YรŒ-Uก‘*•คคY;p่ภr\:w~๛โๅ1ึาถSFœz็ย•+cssฯๆฆฆšฮdมม}{vo:ณu๎ฝs3้ rย…3็.ต#‡.บ‹๏<๘่K;|๑ห_๛๊งn›jวฝพ๏่ฅ๑_๐้ง^|wq`฿ iŒดmI:vาEem}ื๚ฺาœ—wฦ iซ…jถCE€‚aTIฺฺถำL ˆฅaTซtัAง$IG๏๒๑หŸผ๏#˜mVW.Ÿy๋ี็ž|๔'?๔๘‹'๚wๆ…_พ๔/฿๛่ญ7\„(Mช‘Mํชี่H›RมชMˆŽ ัดถฮ]z์ุฦ๎%!PE5M4‘DSm•BiWmd P*MFๆฦuฃๆ๊์๑G~ใ'žyไูs[ูณ๗๐ตท}ื=๕าsO>๙สฅ‡ฏฤ?๛ต‹}ห9า–Q†(ฺดl:๙๚Oพะปูžปo๛ฤวnปแุม…ศœS F†ค$ZษlWLQ@… chW’ ๋G๏๒?;z๛๎ั๗~๔ฤ“/ผ๖๎…หsm๏กcท~๛๕๛็ป/<๚๐ณ;๋kื}ๅw?ruGึ#j1kห5ฐฝณ3็ชDซ$ Cดซnow็โ6X฿ณX[.r๙อง{่มษู๕ฑy็W์ณธ๖ะฆ šฮา !ัt6Iคาฺ˜‹ฬŽIฉŽฑนv๕G๔ ฿{?๊โฉ7Nพz๒ีW/wฯn 13’†Rณ‘ะดhาฮ4Mฅ*ษˆ$คฅ iXค ‘ E›ฬŽฦํ0ฺIชDRฃšั4่œฬYD›hCฺV‡h„N- iRซUวŒด;อXอ™$H[ํl#aขFจ$™ญ4‚fถM‡&‘2U 2ฦœm–0m m4ZmJBป’ ฅTf2าt–ข•fJZmฺV@ฃ"ัC‡ศ(T;คtEF"ดณำ IหVId” บฃ"@™ดคฆขEj1$C)FC-"ัคดU Veˆ$R+e,D’І):'†$ƒฮVชMg“DZFฺจ‘่จNช2 ฉjg2ขAY5‰ู$Bะิ,Bด4ฺดํศH$ˆFESmŒJ’ค‹jฅณf„…ฎๆศhg 4$2f›Iˆจv๊ช# 0ZำjLm›Aš2gcˆ*‹tฮฦ”ถ‰ะถm‹0U3B%1 ‰Cฃ‚HคาTWณ Efอvt.‹9+IIฬi,@[š๊€’$-muNกช’4A•ŠDk6™ณ#อ\$ช chŠ’"‘hกDา*i‡ดIš อ"‘™mิ c!QŠึิ™ฑœ้0าP)!ี1F;A’jอhIี €ิbDLฤHง‰Hฃm[%5K$ยs5'‹ Œ6Tฤฺีื^}หmื}็แพ๛ฯ|๒ฟบ{mฑ{ ิฬˆF๗์^[_wi๋๒ึๅŒ5Œดqe๋ส•ญ+‹ต=ป๗,tฅดZ-I$…ศ ‚˜Ri‚ฤˆัEต\บxy๋ส๖ฺU๋ป7ต…ลฦ๚๚žตnŸy็ไลq๓&ŠI บ*้Xทoฯ๒า้ณ็.œ๊ฑuEb,ฺูฦbืXฎ/W.]^uถM3H@ญi†ˆFสN[„Qlบ๚๖฿๙฿๙ื๓Wol;ไKw฿r็กนาv"ŠK/?ฟํง_฿๗o๎_~ํฆc‡๗ฝ๗ณ๗/แy‰4‰†.—c๗๎œ?{n{{ซ]ย ีU;ฺ€ย่ฌ˜U m1:w๎ฝณ๏ํ์s๔ฦึดณ"r่เฃื\5~uู้+บhPˆฉNIงฆฃก—฿~๋ฉo_>ฝ็w่Oฟ๒‰{ฏ?ธš‘]7~รGึษSgNฏวื่์ฟ>y๖\฿{๔๘๑อ!ํ\ํฌ๘๎_๔ะW฿๕น/๎?ถfN 2†ํำฏ=๓๋๏-7o๛เ=W/ๆ\ลbฺ๊ๆๆอ๕ล๊ย๙๗.ษฉ&Aชกฺ0˜€ c‘Œจayท๘ฮco8}๊ิ*๛ืHฃง^~ๅไนn^ไุั!a$:็ๆ‘c‡ํy๙๙wx๑ฌ/,Œ6ฎผ๛๖;oฝณณ็เuท฿gNพ}ngyตWํ?ฐทM#ปoธ้๊=ฝ}๑า• —uSฉL$‘ˆ.&Sฯ_ผrาฬฺฎ}{+ƒดD"€$f ษฬQข6FtT+I ้–mอภ ้œฃํP") ดU0ึvo๎ฟ๊ศแCm„‡_u์†nฝๅฮ[ฟรc๏ผ๛ย๗๎[ืู}เพJi*Mฉ‰ฆ"IH˜Z( Iา@*•\นผ}๑ยล)หฑ๛ะก‹ล2!h6#ŒBกดาJC2ฺๆสs=๘๏=๔์]ํl]x๗d/ญv^๕ฟ๘a=์l]:๛ึ{๎•็Ÿ—๎ณถุ}w๒+๚ัร#‘ดPด$‰‘d$IuM4Rs6cนพ็๐uw}๔ภu๏ฟ๏๓—.]ูYษbฑุ•Sฟ๘ๆƒฟ>๑ึล]G๑?าw!ญjTซ)R‚ฬำoฝ๖ฬ๔….zีoึ}ื฿pd=-0UdถA(Bฃ4# $‘ะ6BซfM’]๛ฎฝŽ]๓มฯ}๕โ•ํNห]๓๔3?ํG^ปธkืๆMŸ๚“ฯ฿uอฝ T,—cs฿ฬห/ฎถถƒฆ‘Aิ่Žํ —.‚{–‹}โ‡฿ึS๏eฯ5ƒ?GฎฺณHีˆ‘ณc ฺถ(ๆฃJข4$c์ฺsž๗๛_ฟq๚๒™ำgฮœ~odO[*ชŠ"ญถm…RาŒ11+ €hg$ข”‘ค”ƒVาHC“Q@ต!•ยTZŒŒฆeสPBV’ฤ M“H:S$$กcHhขี6!iั’hH ก3$ั6‘ Œ ZชL‰hฺ’1RTมœ‰*’A1† b‚ะ!5[’TJ%้า@ฺูฮd$$ชZ‚Nล(‘า6’V5E ีB A;Cฃฺ„ ‰!ด ‚ถชm‚จ&iKTD-%ˆRดE(MS#ฃ"ะค‚H'šQั๊l‰าˆ4Jt€ชˆ4†ูŽ$‰ชj1“!mahำXT#าสHฺR@#IZีดšลh›V5*UษHFำ(Bห$ค ฃค$กษจPh1PกZEc-ฺถ’$HK"†กmฺJˆ BK’dด%Hbัา6’Aต%ั (ัP‰า!šถฦ0ค‰…๊lG„6MฬvPีŒDดญ %Ai›’JLส‚9คZภH&i‘ฦOœ6๛}ๆaพ๎็6เ`%V @์$H‚;HJโ"KŽc7UาNœL“N^tาi๛6™~…~vฦIณิž&NmYV$Q–DJ)๎HqธA์8็๎^W%j๊”Œ4ี M'ล(R’hตขšTSงH  ข ‚fD†‘ ดmขh!HชIฺB!UI15ฺค(ดˆ™4ษ0ดjtˆHKeTด:‰ู†}ท~ฯ7๏y๙|แ'๖฿๚ฟ๗่้}+›fPึฎ_|ํ‡?๔ภ้ปwํตcฯา๏?๗ฦื=ธyช่tใฃ๗>;ูี{Nพo AAดm‹ 0bฤ@ฆฉkซmk ซŸฟ๗วŸ˜mu๋พž˜mูqหพ}›ึž?๗์ฏ>}่f‘f}}mZ_อยาฌฅ ฅB3๐ุแ-ฯฝ๙ฝ๒ไ‘c›จ–ตืฬฯฦ๒–M[ทo๋G>z๗ข][“!ึVoXฝv‰ฉH‚‘A)Iถ์ูswพ๓ฃื๊ใ๏?{ๆิ‘]หsอะ@’qใฃ7z๓O—n๘ฟ๗ํ›7nXX˜›m^œŸ'H€dใ†ๅGอไ_ั{ถํ^PขY0U‘€ถjdˆถ‹––6ไฦ—ฟไ“๋ำถลYาŠK—ฏ~๕ล•ฬoูถgAยฤดM22Qhz๕๛/ไ/Go,>๚ฝ๚'Ÿฺป}iŒ4vf๓=w๘ซท^~ํอทOผ}|๗ฉอdโส๋?{๎ท_,ํ>{ัถคฌ^นy™?ฯ|บ๋๔฿}๊[งo]™Ÿiฺiˆ่ล฿|๖Gฟ๚jœ้๎|฿๖ษะ/?๔ใ/ญ.์ถkฃฐ~ํ๊ี.,.ฬ/ŒV:ฦ˜ฺHS“ดv6[<๑่ฉ]ฏหoฟs้วถฮฐ–k๏โ™ท?]฿๗ก[์ฑี›—ฏฯm8Kfป=|oผ๙๖ฏž}๏ก?:ด4จต ฏพ๕๖นs7oูu฿™S+ฃืทฌ,Žต฿q๑ส•ซฝeใ0คฎ๖๑๏oฌ/lฒqำr”$กmซญNึืW•DGพ๘wŸ||alบu฿กIj๚ีซ[\œŸŸ•ถ ชษI@ijฺ)&@าัถืฏ\้ยโยยยŒ@;E2g‚j*ก "€ €Mดีคmฦโ†อ‹๛–O/ธ๒มฏ_๙ฯ๏ผ๘ oฝ๗เษ#ถoIจฉ6อบD m[(#ˆาPชฆตimuศยยา\FัBา†IJรDฃiคฅRส E4ึฎ~๕ษ๛๏ผสkzํย๛๏_ฌ_ไหŸ หW6n:y3R’ ํิ*DˆถคEกic’ั&c~y๓ถๅอ[T5๙๚๚“ทํ_u๓m'๏๊์฿8Q*@จถF+In|ม{ฏๆW]ฯ๒ญ=q๚ึ[FัขคS„T•HมบถPB‡คF2hPๆ—6฿ฒg๓-’Yาบ๒ง฿๘ํฏผ๗ี๊า๎ั฿ฟw็๒h[bฬ/ฌl฿>็ฃต^๛๚๋ฏoธู.–$04bบฑzํ๗_\ัšmูพyแ๚…sฏฟ๚๊นOฏฎZว/็๗ั,!่zพx๓รฏฎOฝ๐๖/ห๑ฏทฮฦ†ญท็|๊p–็!dd๊:!!ั6ฦ-;Wๆๆๆนq๓ๆอ7Th„ชถ’fา’D'DำI$$J‰NEขj’ชvจฬh›$ชAดIี’คะชช!‰HตัV)า D$ˆV›AาBช%:’ถจ$ZI*า’ถmค M RคMˆ„A›Vb€6iETRกดMBZB iMด"BUC…–2ช™dLข‘ˆhฅBห„$(Dˆ–V@)ัv"HK‹’V%ฉ*ฉ$B"mซ ัF› •D€v2ต$ƒT%AฉาดIฺถm„!i ษ4ษ,P าชšจ4#•กดฺ$ฺAŒ2bR•ะ$C[JCซiRF(ขšPีF‰5…Fำฉ‘คD่$U ฺ2ม”6 ีHP$ฺ$ฺ‚$ฤค*อˆHUด”j$II!„ฦิค’JJต F‚ถ•$mฅขชาTซBชMิคC#คhา4ะฆmฃ ขอ@ด ’Um1$:ต’’*ZSี$c5"!ส@0ต )%*ะH#…า*IKUKฺT’ขm:$ ดmBดTBBt"Š„ึd*d$ก’@;Uš’ชถ!Bซ2:1"TŠh“ัขฆ(MดM"1[zเฮพ๓‡_หŸ๊/รื็^9v่ภ๎ญ+ ]ป๚ีgฟ๛ํนื^๛twทถ๛๘ั“'๐หฟ๚b{<ถwe๎๚—๏พ๖‹_๚-G๏>{|…iHŠˆ( (mซฝ|แรืŸ๙มถ?t๛๎ๅ้โ๏^๛ี_z๛ฺฮwŸบg฿œHI26์9|่๔]wพ๑๔ห๙gพ๖3‡wฏŒซ_ž?w๎ฝ/ฏ,ิ๗90K+Ks๓7>{7›l]ฝ>w}'ทžึG๗๔ป?๛ษึงฏOฝeษีฯ{๓ฅื>ูzืใ฿}lื๎=‡O์yํW๘—?ุฑ›ฆ‹Ÿฝโฏž{ƒ๋ฅZI‰TTาฬoุธ็๎๏r๏ล[>r`๓ย 1K’dZ[ปy๓ฺชฮmุบฒ8ป๚ๅน_=›w?๛์๚ม=”e~yeฯ=?r่฿๒๛ณx๙ปnฐ๖ๅง๖฿ญ๚ฃ๏ู:(ญD"ˆอ๛o?|์เŽž{ๆ๓m—๎=ฒcใt้ย/=๗๓฿{Ž}๋•_…/6ํ>}ฯƒfn~๘“๓ื/]Z<๙๘ร๗y|็B"0iiํฦอkWฎ\๋t้๒ีi}ํฺ•‹_~้งYX\ฺธyใยศส‰วพq๗?|็ลUฎ}vืมอ‹7>๛ํKO๐ญตํ๗œฝ๛ไแ]ึพ๘ห๕ฏ~๚ัส™๏“ณทญlu็ฉป๏3/ํ๘ทำcw฿ถuแ๚งฟ}้็ฯพ๚๕ยฟ๙ศฑญ๓‹w{๏ท~๙ฺs?]ซgN๎฿พฉื.~๖๖๓?|ƒ้ภ™n฿ปิถ:ขJ#!ซ_|๑ ๛=ำษ}›s๙‹๗~๕ณg฿8ฟธ่w[6ญ_่'๖ื/]Z<๙ิู๛N˜+@kดZ@Z…าVชำด~ํรŸ›ฟz๙๊ฦ;Ÿ:{฿ฉใทฬ'ดD+™ƒ ’”คi1‚BRีV#ย„๓++;n?sxๅฏ๛าฯ>ฟ๐๕W—&[fะข I$ m HคUฺฆT32fƒ๊๚๚z[สHดmI[ะ ช๓ป=๐ฤŸl:๖ๅฅ/ฟ:๒ž๛x}รแ{9up็ส"ำฅฯ?ํo~๒ฦE[N>๒๐‘ฝ[7ฬg,l;xเศฆศค) ภ”(€IำFH‘ ))าi๕๒ป?๑Ÿ}้ฝ/nn=qว™Gžธถญs--„"ิ๚ฅO฿}๓ี฿๚duqถํ๘ูoœปui>TŠฉด•F Eค@Ecคั& ีRI่๔ี/>๓ณ_ผ๐ฮ๏nnพๅ๘™?๘๎รวท-ฬ†VTลลๅฝ{wไฅ ]ป๐ษง__บ2eqึฆ&‘้๊ๅซŸ}r†ฒฒg฿–ๅณี›ซ7oฌ›ธ๘้ห?๙V?{๗ๅฯ}™ูา–7๎็฿8hy CŠNSมlฬฦด ฅฬ”…ˆ„ศ”6P…„จ‘*Aขค4‚IกTขPIจh“าH!0กEข…P„I”€Dก*F[P"ชPี๊HPZIั MะB’hEึJ(U"mคช ” ช %QP" %"กPT@ A(d„ะถI B@ eHดJห$3IT˜”$”TดH1‘ฉBhคะ–„Nํ@ ะMด€ถก(J!!”ญ(Z"R่ฤะH‚าJ‰v‚„จV„j%‚D+Edj%ะJJD*4RจIข%RA+ŠBT‘PชH–„$ช I4IhL5า0‘jmABKR-J(E Ihชฺ6I(@€Vช „DA‰€V# P- ก `P€D+ก„@[ !Z%ZZ&†ะm“ hAคQRTh !L5C%๓w๎=๘ญอ-๒ฅ7>xฅฯฮฝบดฐ0—1Mซkำุtโ๔้ƒปถ,oYนใฬcืฎ๖น฿๘ป|๖๊ฦ…ฺู๕‹/ฯo9๑่ฝ{rวฬด.‘(PH€(ะD+สฐv๕สง/?๓ƒืฆ๕k—พ๚โหซ[œ9๛่C'๖oไชw฿z๚ัoZŸ}็7?ฝ๒ัKหK37o\ป9ถ็ไายบr๛๑ใ‡{๎ฃwŸม๕๗n~dำงถ๏<}๖ฉo฿ู ฏ๘พ|็ฅ๓ฃk—/_ษ-;277ฦ๒ถƒ'๏ฦ๏ืืž›๔ูห[ท./ฬน๕ฅ-๛v8OE !“‚ A;ๆๆทผฉำยยฬlดะนG๛ํปฯๆใฟk๓าlmuZฒฒuma@ช"!Qsห[๖๛๗›๏ฌ์อ7^๑๙w7oZ๋๋7;->>cDP€jZAXุq๘ไมๅŸ?๓ฮ‹?๏^ู0›u๕ฺๅKืฒ็ุฃ<๖๐กm/ฦsฯผh๏ยฎwู1'm“๙๊W_|ๆ้_ผ๒แฅ๙m[ฮฟ๒๓๓/ 0ฟcวแทyrฟู๖c~๋ฉ›ใ—oผ๓าOพnyqnฺลฏึv?p๖ฑว8ถg‚ซŸ~z๎ูฟ3oญ>๗•gฮฟ4ศผ•{ัฟx๊Ž}[๖œx๐์ฅ๋—๖๚นg๖ห77ฮg}ํ๚ืWฆ•=w฿{๖ฑ{n฿ฬzฟ่•Ÿฟฐa้Žฃw4(Qฅ:‘›_~๘ฮK?๚ญKํอ๓๏_๙๚๋x๖‡ื?|~วฎรท?๐'Ž,vl9๔ภทฟ}}/๔ฺ฿อ๛็็งฟฟxyๅ๎'ึ}w๎฿บd๕๒• ็_๛ๅ฿ฝพ}๋ใ™,๏วูo^ฝฑ๖์ฏ<ถฬฯึn\พาๅƒ<่#๗Z˜ํ<๙ฤ=ู้—>8๗ฯฟตiรBืo^๚๗Wnฝ๋}ใžCทฎŒjQBHฆีห฿y๖G๏vๆีห_\๘:๛>่ท๎=ธufZ›.}๔๒ožrใา้cงNtEBPZ@ˆ$Dาฎ_๚๐ฅ฿๚โๆMw?5QH0D""2™:กJƒ Dฅ5จข#๓หหsต7VWoN:+F@[ำค 5j6" UคƒI!  UญJ*!1ฟ8ฟดachฏ^พxcZ›ากฆ"DTซก {>ธ๗่ำอฯ<๗“๖นOฦ๖ปพ๕๗GNํูTW?xแ…๒๖Oบ2ท๏์wม“๗ถu)ณŒ1ฅ5 !L%„€‰ดEณ‚jคQ]ฟฑz๑ณ฿/๘์‹๕m‡๎xเก‡ฯœฺžvŠ™h‰ˆJJาี/฿}็อื^z๗ซ,ํผๅิท;ถy๓าP)"dhI$จi*อ ฺ ฆฉŠ4"Iu๕ห๗ŸงŸyๅอOึ6z็]>ิๆ“"„สา๒โCว6ปxษืพษ_~=m฿>˜jLอ(‘v๚๏ฟ์ท๏_hš {o?ฐฒi็ฎรw๓ศล๏_ŸJ ด0๕โ๛ฏ๐๛+7ถ๎ปw๏ŽMณูโฦw2fB%าb0IQกำ๚ฺ๚…O/ฎฎ”ๅๅๅๅหจjB„ˆBC)AD$LSi“P!…hา6 ‰PU–jTก@’6-dิ$ถIฃ„ฆ Eu*šขmiA‚ค-D™ข1S’Iตญ(ฺFVญ ญVRR BQ !!EะHKD :T’ค DัŽจ$„ึคebDD%I)Ii h ‘ฆCMFาถ ด* ชm#ˆ่คDIšvขคขQI4Bˆะถฅ@Ijค E!$  ่คU$ดD†@55† TD‰ดTขฆvDD‘ถจ’T„ี4Q*Œhถ „J(mGB…$&ข%*MตmA•ฉZ#5B"กmFUคEคi”Pm[ดI$M‘-ชŠ$ฺ0iฺ$%D’j5 E„Hตm’TZtH#(ฅ1ยdŠ„HIA0ฃ•D D52…ŠFš$5jŠถ%1DI’‚(AšŠD กก#R%ีj›TB ี&ฺ)‚*Ašฦด5TT‚@ˆ -P”ฉ†6e@ำVัข…jฬๆ7ํ9๔่?ธๅเ‘W_}๓>ฟp้๚อฮๆ7lบkแg๎9พceanfๅภ~wฮ]ฯฟฦG_\บน6ทi‰S'O>qโะŽM™Œaห‘{พzใเ๎›"t6ทธใศรO|sใแ]›Ckny-Gzโ[ใศึ…ลYV‘ [w๎?qื‘ ผ๓ปฏึถ9s฿้{ฮœ<บB:…๙[๏;๛ุอ๛V6อW7ํ:~ๆป+›w๊…ทฯ_ธtmmlผะมc'๎8yl๏ ]:xืฃฏ.ฝ๚๚๙+๋ห[vํตฌ#+๏๊6๏๛๊›oŸโ๒อตiร๖ƒG8u๗]ว๖n]uห#๏-l๎ญ฿}yใ๊๕ฅ-ทธ๋ถป๕ันWื๖-งฃQF˜2ๆถไ์Cทฌ, ศlqi& EfK›o=๓๘7ฏฝiq~๛‘S]๏‹/ฟลว็ฏ,lsไฎณ๗Zู๓qฦm caห{žฦฅถnXHDตณล๙งž๘๓[ž{แ๕๗>๙ส๊๚ึ[~๔๔™ปถนlฟํ๔^}๛๎e•Bว๖รgบ9บm็กS„h็ถ์:๚ภ›w๒ย‹oผ้Wืnฌลmo;}ไŽ;N?บk#[n=yxื๙/–Fร€จ&iึื6ํ:|๗ฃท…HFZtถae1"๖๑ศฌ์ุ๗าหo๖ำKW,l๚ฤ=ž9บ๛–•LๆฒธใิฃOž*AA 3‹ณŒดcqวั“oฺธu฿ ฏฝ{หฏoฌ/ฎ์บํไm'๏๙ิžงw-nœ+Y}์Žonฺrหo~ส{ฟปxušํผ๕ิัSwy๒๖=[ๆŠn9๚อ'—nู๗๚koผ๗๑็—nฌY\}ุู๊;๏?s|฿๖ ‹ฃšHm“ ,lูผํ๖3wnต.ฌทu์ฤ=ง๏8rlฯข4c~ว๑Gนqy้๘ญปVf]ุผy้Gพนm็พฅฅ๙Y’Žลอ{œ~จ;wํž“(w:z๗อนqx฿†Yๆ–ถx๔‘ีซO๎ต2ช‘J…bฎRT4าQ“THTซ- TชA ฌฏฎ_ป๐ๅ•ฉ•นล…………!’ฬมอ๋ืืVื”f$:ลดišŠjMฅดS3หˆ๕š € Xฺธดyห๖E๏˜n|๖๑7Wิr HTฃำิฉฺj‚0ขข5hฦX[_๛'Ÿฏล†๛vmธyn~ึต›7ฏ_๘๔หŒฑฒoŽ6ฬอ ID่,i E!H  mud’PTฆv ฺฺ่ๅ๓๒/้๗>นdำ‘๛x่‡NnŸตU •ีIา๋_ผ๕๒ฏŸ{๏ฺย๒พ}gž<{xรbT@ซษ-@j JBำ$JP4จ†` jบ๙๕ฯเož}๕ฝ/V—ผ๋กงฟk๛€$”Ps›ถป๗ุ๒+/_ปษ๏|p๛งoบm>ยค`๒…O>|๕nd– ‡๏8ถk๋สาŽ[พ๙ิm๗=tฺ้Z5†D€ย๚อ๕ื์๘ณ็ฮ}พํศƒ฿ร?:{|iŒน vฬๆB Œ&QI˜Rดฆต›—๗๚นฯฎธiyŽํ;vฎ$iUะ†v$ชP Q…†ญTJDง‚ ‰ ึ… ่‘˜Zš”V‰ถF( ขšฉš ŠษHUงIYŸ2’ˆA E‚ดD•(@F:ตSBซD BA‚‘ถRAšฎืˆT)A„Žh!ZL@@ํ‘PสT!@I5‚ช$ํdŠ!U 1MI $ฉj%Q€DงNษJ4Z4ข"ญ(ีDD4ั*‚hZ*AD B#mKV@$ %QU@#CK”ชEf$Tดt’4 ข(ฃIฃD‘vjG‚j+2bRะ†v$š*Hh‹)ิ(‘tB+Rj’D๋…ˆ *‰าJ*A'%Š$A*Šiา$ iH„jฉˆtm2F‰QB•-mA(”€‘ถ)‰ะฆD‹Ee QhJR‘IืY„R"ญ$QกQIQƒา‚m’DE$iตI !‰€R•ะถ ‰˜ฆ$ ! m‰(@ข“)I’%ดฅD‘ŒV*’$P‚)"$ขคEC %I@‰ถ(Zˆะ4 Q ฉB#Ci I7ผ๗‘ƒg%•ถดEฤย–m‡x๒๐OD&IPดšŽ๙x๒Oๅำ4% K›nโ๚ืOฮFงVฐฐํเฑo‹ํ ้—‰daeฯm๗๑฿้้” ‰ึdX\ู๘เ?๛_B’๙ล-ท๑ไม;ฏ$‰ดฉ†€ูึcบ๋ฤ#Oj"#2ตA+ ›vพ็ป‡ฮกฉ@ซน๗์Yั) €jฺฑดแ๖?_็ตษH•%Šbถผu}ไ_+บt่ฬc‡ฮ<ฺŒˆLš~ขEลๆ๗“~h !™'๘ร %ฉไ๐#๐ถ‡;EBตb๙ๆŸ๙VDซCT‚VŠนๅ•}งฮ๎;๕H hดา$ลุ~ไžำw|๕๕โฝ{–˜jDฐ๓ฮว้ว0ัฮeถฉt*ร,iI—v๏ฟ{๗ปŸ,4ก mw฿ภฏˆ6Iฅ M4าถูา–[>ธ๗ศƒอŒ)IŠ6ฅcnฯู?ggˆฉiˆ$ห{O๛'ง๎“‰ัจ„ I ]ุq๘ไc‡O>ึUI*-ๆ6๎ูผoื๒ย˜ฆๆ๊ีk_|๒ู•‘ป๗๏ูฐ4?"`"S'ณšHL…ภ4m D2’9„ฉด’–๊P“’!บ๚๛ ๏๊?ูำ^ฒพtไฬ฿z๐ท,คฤlDv๊ค ั4Lื~๛สหฏฟ~๎ยดดoห๛Ÿ|p๋Bง!!‘dhฆtช6#IFำกฺEชZEJ“`ช0˜VWฏล๘๗ฟ8๗๙…ีฅ๗ภ7ฝw[Zญ$–6,ํ๐ฑป๓;ฟน๔ีฏฟ๔ฺัรGŸฟ<หV2ฆim๕ยฟ}ใๅgฯศฦๅพเโ๖ฅ2ทดqหาฦอ’ถณŒfjCด#ํ๊|บผa6†นลM[ถ๏ปw‰ด:MฉšVWoฎญญ™-,ฬฯฯ iคบz๕๚—ฏเGฏ~y๙† งwh฿Bีh่z$"hฆ‘If™‚ยPฆi}ŒูะReีv#SUาRาu?Ax๖=^ุ๗yฯ๋๓๙1ƒภ์y€mึ†0’,!ู‘์4ฑ][Yœไไ๔ฒO.’‹œำคฝh›ค'‰›ำิIฤลฺlษFFFZ€‘ุั 0 3฿ฯปฯำ ฑ™Dฮฮv‰ูpNฑIื‚m8w:ู˜eํ,KUึL๎๖pฮฝ7h6ฬ9ป4าd๊ฐด ลฉf0k๊li3ะฝ9ำฬฎqZr†“ึึ*—๎ฎ3ฒc3#7ฆd3{•a3f้h9นl;:็น0—`Yห์”ฮภฑว๛ํฯๆg๖ฮŸy๏_๙ภฒฬu9GN—ฑป๓ะ]ณ:‡ฒํ๎q7+ษZlำร–ฬrตปN+jถนปะQัŽ™aA1›…‹ ๗z8*งœป–qiุุœข@mŠšlEl๊<[ูLำู6‚M…yจ#›#f“วTกmป7.Œaฬ.๗xุXeญอ\็ดbbv๖˜m4Vำ้q˜ศถ๓๐ิiณซ่pทkงsp4ญซฮ๖hSNใ๎;Dณ%hม^ฦ็>๑›๐ฟ๛ีฏ๘๎๕ิ๗}่G๎็ฬ<๕ฬ3o{๒S=ํีฯ๊Ÿำ?๛๖˜S์xๅ ฟ๑฿ฃ๎๏ษkQU-๗f+€ฮlญŽwผ๏G์=๖บW>๙ฯ~ใ์K/sl๎uN฿๙ไ~ํำฟลW rzh๗L.w›rฐmตูK/ฟ๕/~๙๕[น฿~้;฿๒ืO็{๓žท?yxบ;๎\\b:œ;nu&€๎fฑ6๊ฤ๑๊7žิฏwํo|w๏ำ฿๛‘_๙๚‘ฟ๒ึsฆ…ู†๊œใœว˜‡xแฟ๕‰?xs/=ผ้๏๛‘_๘ฅŸxา9๕๎ฝ๗ €i1.Pู6หญ=ํฬ๋/~O็รwพ๖•๏<>y๏ฯ์G>๒K|Kgทiถ“;wถ'ฯพ๋ฏสGž๛žท=๑ยง~7?๚ั฿g_`]๋[Ÿ๛๘Gษ๛๙๘—_๊ษ3๏๙_๙ๅ๗ฟ้Moจิโ์ึ=c6ปะึ}ดป\'้ป๖๑฿๘‡ว๔?๛?7ฟ๖G฿zmwปนlzฅŸ/?Ÿ๎ทพ๋m?๙ณ?ร?๐7lถฃSฎ™ำ'0๖จถปMญ0{ผ‡6ณตm-@ญ์’ห#v:ตvQน™S,๋Ž'yt็N้qc–;V๔lงุ๊lๅtBgD–๗ชN5.†rzˆฆm๎6ๅะfฝนวี9็ฐํzฬ{rUฉ:wอYgขูฃMะถอึ๎†์ถ%N:v:ณikถณตภrN=<ผฒฑ,kฎœชฑM{๏ฝภ,ำb\]อฆ#–ีcfำm๎๎l6w๎ฝ็์6อfไqุฏซaบ‹M4pœtาIw :ซฮnณuu‡q๏E 6[GปหUขeQv6&หฆ%ีธq–ป{ํ๎่”ฑํ:QMุ๋#0จevwgศlvถ--ๆก5›ฑuฆžcGฑูฬp:ฮffปe็!!*ต]ํtBถ3m็ภ&Jใq\”*ฺmธ”ƒูlปูฑจชs ฮ)l็ฦฝหฅดำSvกึ!<บc0mฌ ู2‹‡tถำม๎ุษํฮฮึถY9ง‡‡‡{Œ8ท–5.์'ฺฆ๎vทmrIภ4†หฃฦHฑฌnฦl'dถอฝฬึใฝญญiถ“ื‡นwc2็ุณ'Tlคฮชcตถ.wwภ…ปบื๎ฒšsคnmีฆ%]็kW+เ.jWŽะดc๖hsฮขm†3vํl[ ัbŽiอฆ‡)cรฉZ๗๖ภฒคฤaฏฟŽ’ตฺ๋8g3‹–{k๋(ง๊lWซNย9ใlm'ุLงŽผฮฦJuิ๎ฎM‰ฐmป›eY$๕๐PPูvื๋‡{=j2าNgฺภYgŠkƒ˜๎F{ๆน_O๖_ะ฿ด]uG˜ูNsgYcู&็tฮY]โ„ฌfZtO=2ภ๎ฉkAภดธ<2สv‰eYุ๎Crตํ๎q\[๗Žีึ4ษ™อฝฆq๎อ†‰ฎ'UEๆn9ฅ:ดe]๎f wugื๎˜sคVƒภ์žถZ"tนว9w๎๎ุช#[d•(ฃiมชถG›rฝw8p๑Tคห6“๊0SM;9—พ๑งŸ๙ิฟxใใ›ุl๋๑๑ป/ฝ๐ฅ?๛แg๐๓๖ี—w๖†๗ย฿ลwฟฑ๐ๆ'฿ำฟ๐ๅ๋Ÿ๙ล/ๆGŸฏ|็ห?๛3?๒žฟ๔๔k฿๊็>๕;ฟ๕ฑO>—^ํู7x ถ‡ปดฆ ฦๆzR`ึฯพ๓นฬ_ซฟ฿ทฟ๘›อ?์k฿๘7ฺ‡>๐ฮฟt^๙ึWพ๐๛๕O~๗๙^y๕ฉง=2›ปํฐmaMบ๗1toฟ๒าWพยƒ‡w<๗ž๏yๆ™S๋ๅ_}แ๋_}๕้๓ฬ๗พ๏นง฿๐์qฬLณ3ญ;'ื^}ํๅW^}]ใกื_ฮหฏ=^๎ใk/ฟ๒ยื_๘‹งฎุรำฯ<๓๔“'FnงX์ฯโฟ๚๚G๚[ฏ{x๊ฝ๋๏[?๑#๏}ำรฆjใ^ง,ง‡๔ฺ๓๒cŸŸ~๓๕ง฿ฝ๏ั_™w>ต๎ฅlw“ช9I U๗W^๙ึ+ฏ!น/ผ๒+cืw_z๙/พ๕โ3O? Oฝแ้'o|๚มฉNjห_๚๒gง๒ฬืพ๓๚ใ“๗ไ—~แ็๚‡฿ิC9๋๛์ฐย9O{ำ่—๚ซ๛ ๚/็แk๔ูŸ๚ะOะ;฿๚ิ}๑ซูO~๒SŸ๙ยฟ๑๚Soy๎ฟ๘Ÿ‡?๓๖ท<๓ภT‘ํึ้๑ฬงธs7ฒฌฮญณ๕๘ตฯ~์ท๋ฃ๔wฟึ๏แ'ๅ?ภ๘ฟ๛๑=O๏ตo}ํ+ฯมg?้?ำฏฟ๔๚ž<๗K๖/ฬ‡๐O;ลฒˆœp[๎v•3ด๊ศ 9M%meQน[{คขู&อๅฐ!h]ˆณ 6ป[Yวf::m‡ธถู†Sญ 2ฺตํœ[น7jุ์@-รmJ็ช-lฃดq†kซิJน &้”mซ๊ ฦ<า4Vิ์œYFฒ๎ฬŽi›มดŠdฅ3{ ภฦั9ปl(ฑbNGีฦ–m›ณmญN#elœrธ€จ!็P€Y'๎๚ล—@Wฟ๚ล็ฟ๒yๆ๓ัห้76๐ฎ๒/฿๘ะsoจษhj๑™Oฮฏฏ๔พ๖สรร฿๛7ฝํ‡๐=o{รร Œ–ใLs๗Ÿ๚๕แŸูท^ใ๛>๐?๑แŸ|Ci“I f˜ูศz๕›Ÿใ๖?ืฟ {ๅ/พg_z‰๛ํWไWoลgž=…‡w~เร?๗oเS์QวŠฝ๖_ใฏใฏ๋ฏพ๒ฺ}รs?๏‡‡฿†ร6ซฺ.ญ€ฮY฿๓ฮฝ_๙wฟฟ๑;๐ๅ?ฃ๕โืไณฟ๓ฬฮพ๛๒‹๑อพา}๊ํ?๔c?ทมฏ์๛฿ิS›Mยฒ)Qe` ฒu๔ž๑๛ษ?๙“฿๚ฯ}๙ /~ใk_โg฿๔=ฯ<}vฟ๛สK฿~๑ลฟชg฿W~๑฿๙{็—>๔๏ž‡Blc1a.M5! [mไ0 ขฅ6l[ะ˜ู j›ญชณ-ถfฅ"w‚FCk[ถM™%N…kB!m…mดฆ™J0ู&<ุe—“$ฦ6’ณeK†efีญc †„ŠีๅX์ิfำ6UPm ฌณษ$A,ุ้•ุh–ตM6“˜1ŠมlIq 3%3ุŒpท4i†cูUฐํฬฆb0˜ุึๆศฆ“`‹‘@@V #kด คmK“ฑ-†ู6Js ซฤ6Z‹ญel€๎ึTณQศิGdณLูฐ 7gฬfYZุvบulภn๊viุาbV!1Ž)ณQูจ˜5สฆสย)ถ‘,ฮƒยุ–จรœณk%…ร๎า็*[ฒwS* š5IาŒ…3XƒอŠŠูhชีฦV*KณP่nY…™ใุุfจอ@4cm`˜๖ 3IๆRuทX Tต"ฎัฑšk อ`f0ๆZjs 2tFฐ’ifห4Œ”1ซb]ุ6ต8dซฤ6šล–Iิฬ@ฦถko|ป๘๖์šPรf‰2ท"1ูึpำฤ#%.Nม`7uห& ["Lfq46*…Œ[G๋„mูตN$06๊0ต0รIrฐ UซŒPŠmJ(3K“lnอžšlF[v๏V P™ปžz๊k2ZEคบฐfเธs˜ฬLjณaถe’bถ1อล)UŒชmUmส์๊˜ุ†@า ึ0dป$จ ำ& HaJฦ`ํฎภLม(!ุŒรHณm[ต็ูทผ๛ูทHฑA™ญ9G—จaZhถMZuฎววฐ‚สถ[ชถ5Šง๎Š8`v๏Œ%2^้›_y้›_งž<๓์›๚๎๚‘ฟพ+?๚c?๕3๕๑์“งฮฆ่<<–็~๖๏{฿z๚]๒“ŸŸๅ+Ÿ๛ƒ??ฯผ๙/ฝ๓ฝ?๘๓๓งฺฯ~๘m_๘๒ีทมิŒrށ`ฮ {ๆoภOญ๗๐Ž฿Wฟู็ฟ๚ฅ?๓็ฯSฯพๅm฿๛พ|๐๏|๘็~ๆง฿๕ว็k๖น?z GขŒ ŒฆL{;฿๙ึWฟ‡7พ๙>๘ƒo}๒ฦใ‘วู“'็]ฯ=๗ฬyรa0ฒ&&ร๋/}‹Ÿ๛ฝ฿ญฯx๙…ฟ๘ย๏๓/ผ๏อ?๘“?๑บžฐูึๆตฏัวู?อ—_{๊-฿๗Ÿ๛๛็#x๎อOž$ษ6ถVั@{}ฏ}๙ฟฑฯ}้…W฿๘พ๛๑์‡ท>eถJr“ย‚๐๚K/~๓ํ%฿}ล็๏cฯ๛๒[฿กืืkdzํ…?ฬ'~ื~ํ“_yฅg฿๚Cฟ๐ทๆฯ}๐๛฿๑ฆงF…8Dlซฅ7ฟGฺฟ}๘ฮ็>๑ษOั๓_๒ืฟ+ฏ<>๔กี๏หำUk3Œฦใœ)a5'ƒ„ึl็ูwภ‡?๒w฿๔–๗~๊3Ÿฃ?ยฟ๒อo~๕›/ฟ๚xฮำo|ำ[—โ'฿๗?๚มŸซเ๗ฟ๋Moxr*#3iR›MˆmnฅŒฃฑ]จถ]ท•fฌ–ีอšฦC3ถ2 ำ h6ณMดfS4หI3Daณมไ”…B:…&๗สึบ†Rวแnฉ ูฆŒ%SณP c˜ภ g &3“ 3bณขVฦ(3`1,ฎLด&สใฒ›tมYQๆ[Hc-˜(ด•™ Fย6 [›kภfv2lcQ •‚cซ€ m2a[ใุ0ฉ ZมTk1ƒqวœฃ4V5ไฮ‘ะฺˆjj:ป4–jQPฬ`)` ฃ:ณ;วŽ0ซ ํส+๕ุJยjญjฑ9ใ ถfงจูtฉดน“’ํ’Bไฐ‘ด03hง`$ตr/ฌูฮฌฒ*ฑaศ&SŠ3†)ลฐd†ึ4€mึAฬ’ํข"%L†f5Šl‹T๎ฺ–Aใจ)ณปY4ฐ!”6#ฐ;‘ปุ0นš,† ีVi–4PYsกฺด \l3Mห8“์ฬwpฌSXBˆุธ5Cฉ3‡en"LศJA%ฬ`ฉ€`…โN‘mfV—ง1ฬd”อY;2C[5‰sง”mฬึ8(›อตdฑน“Ts-ลp6…ถ-Z  hาถmฅรl 9ศฐมฆ)S6 ˜M#aฐึ4 sๆ")ภ 0k‡1PXค๊ฮ&ckIM™mŠ  dำ e–h„รฐFฎfE`[Rถข’F0†?A๐ใ๓{ึw๕ผฎฯ—h;ฺาฑถ”Ÿ…ƒmD็4บ,ณl$šhะd‰™q็2„HŸ๛ํ9*K&˜Ucๆqูf2š–ั$\ฆฦด-๎„B4[{S-VัˆูhA0“g>ฉb˜Y’˜ฐ ีฝ‰80{oฦ’‹อ$}C ถ7)0L๚า๗~๋?Wํ๎?ภฦ/}๛Wฟ๓๗~๐๋เgฟ๕;?ฃ฿ัฏ|6ถฐฯ7ฝ฿็/๚o—๛๏๏๔ท_wฟ๒฿ใ๚็๐ใ๏|๗ฟ๒ืทz๐‹oฒฯo๛ฅ๖ฯณฟ๘ห_๛›o}๗ว๒ƒ๛๖ ๘ี่—๊฿์๏>Ÿ฿๏~๗ค_๚ี๏ไฯโฟัoฮ๐?ฏ๎๘ฝฯ7ฟ๚ƒ_้๏แ?๚ง“o๛฿๚ว์ฏ๗๓๔๏{ู•ูV`ิพ๕_๛ํ?‹ฟำ๏แ๑{฿๖ท"}๗ว?๙“๚฿ํฟป฿“๋๓K`ฐYช6ร>฿๛๑ฯ๔_ๅื32K0ํ‡๒‡?70*i๛๚ล็‡เ็์_ึ7฿๚O~Ÿห?ัทพu€)4ศถ๗๕อ๔Ÿ‹฿๛ณ๏๎“?ว?๛1 dli1฿|็‡?ำฟ๘ห_?ภh– ๓๙แฯั๏่€c`๏}๓+?๘?๙'Ÿ_๙แ7๑g?๏}cฐญ ,dฐMฑo๐w์ฟ๚ัO~๏แ_ีฟ฿๕ๆoพ>ฟซ฿๛~๚ณ฿๛๙ฯ็ง?ตom`6ฉm5hนกู?—ฟณ๘฿๛ƒŸเฑฎqฟฃŸรฦoัŸู?ีฟ฿๘ํปฯทพ๛ฝ๏่๏ๆฯ~๗๐ฏ^ €ๆขฺ0iฆY@šgึaB6 ฐ43’3ย "Hฒ)6E e3ณ28l4 # `มF3„ฤตแี‡ m7-a0F$ฦCˆฑอBlF`ƒh2ddซๅfˆYECยฤ@`ฑุ0ต™บ7e' s…ถ๗๔‘ ร`รBa–m0ำา6Rš›ฆl[‘ mX<.bฺj›4ู&าlV S‹อร–Vู† ณfถ&“-ˆFไ:ฬ &ฌBเA*lซm)0 x–ฺ˜สฆถ˜จ4!ฌ`lœึbณjoeรRฺ&จ6d&]6V—y ุ ‰PุZ6‘mH3ฤจq hh1@F๋ตž1ฃถŠุŒ„ฬV•4ปฐi_ฤ6ถiงmi%)ศXขmล e+ฤ’65k0ฃlKiถMฆC˜1ณ*fŒ0ฤB ส6,†q Dผัฎ0ภฤ‚ภ (ุ”!`0ถกa ‰mส6`D Œภ4›Yภ’b#์q ฬิŒ`ึ(€ญผvw†™ฅบท5ฒ `  ›อYลmูคXญททfBTฦ]ฌe6ณTmSz#6ฬ๔ๆAko}š y{ฐUhฐ 0(ต13–[{•bช๊ญ4ุuู้๋v„ฃ ู.c!k7ow=‹H‚mP๑Z"Œฉq>๓๖&ฝSl'{ฦจjฬeต6Hณ๎ฝฅb จWฐm[-[M๗ต–*Qแ-7ˆูโฒlำŠŠฝTafTj–˜lz๖Qวึ‚gํฝปh`šูร07XรถJ๎ฬGูล{ฏ…jcถfe‹™MefEา*ฏ3Tฑต'5R3ฑ๖ๆอ!#… fฃ!TภึๆqUjฦ6าู kี6fyz†ฯะฺ[Q@ฒm 6ใ,6F๓tนธ666/Ÿ•ฑ๗ฑ็ชšท…อ]๕y_–E)2!CY๏ๆญาามfVU@XZฺๆšR€{[nq@)ต๗X2ถF๋vŒeXšีV๊*l ต๎ัถ%V[=y๎๓6จJ%๏ูะึึzยฆ*•lv65ฃจ=๕ lิณถ5ีภ‚5ฺck>,#ุTตทNHRyOS…m[3‰a[ัD’ฑDCฃฺุ˜bี`ด78[ฌฃ ุฬ[Us…2fำ‘ฬ2*#ปWทadzธIkoE&ŽGฺfตญณ5lษzsอำQ)1ูณฉณ๙ๆใ™ฅm๋kf‹่ำ&ส์uญต‘ฉy็ˆฮ{ง‚ Kสด… ไพ—ตไศ๖XJ๖ฺจk1S^fvL๕foŸึเŸ{ถ7jcแใ)โ ฏcซาj{ิ30อzชทแฎถท›ืVOึผ๙์kใ>rู{“z†ฬ3๏eฆr3ึพvgj0ฒน‹ nภ๛zŸn}ตๆVm๗ถฏํนฺ6{๛šƒุh=‰Nฮเ}ๅ„0บ=ฏณV5[๔{oถฎ๎าŒ=ณwสjd๏}M-•ฯ'#ฺ—]b>ตผดข™อศฐv yฝGFHฌืj(๕uื6[ญ[ฮx[ƒส๖บp@oช›ฉโ†๗T.#k{o‹ดa‹ํ+d„อึีlFฉณ=Wณyoทๆ™ 3ดdถฝ[nถำcบ(4๎ลณยฉ[ลผบGWถmำโ…ณBไ}xฐ‡ชฺิ3ภฝ^_c1๎ฎํiดgำ{}Cํe๎#ๅ›ส4รฝ๗xw }x[ŠขถตถฉjXY+_ฟุ…ล้ณํk;9ิbfๆตฯ“˜”}๎ม^KužืกตO‹{{๖ึฉOnอถW]7ืใ์ฝ7’ฅฯ7y ›7ผซ๙ิฒ4ิ4ฯศ@;สืv›Rูถ^+ู—๚Jงk›ทาg9ใmถ[kO—mฅุ๔ฆC…M6ซหฺฝ=[Eioปู)3+ถ.k0lฏ]$ct/๒ž6aถsฒmDZ;5v“ช์๓Nฉ๊„น{๑ฌTํZŸ˜WMRถiฑLvฆ#๏ร๔ย๓Vีml4๎๕z%tฌผ้๎ถฑแู๔๕๚†ณฑญ}อว]ฑPฯp๏=้JkาŠ7๖ชUmkm›ชaาgๅ}ฝ(๓ฺ็uปmvๅ๊kˆอl“ฬผv“nบถืไบmn3ฑึ>y/ฺถm{p—ฦfโบu=Šmิ่>)มฺ๖ผOอีุ5švอƒY6ะ๎tfณ‘Qeณ^ึ2<.ตTt๏๋้–6m›kํ‘Œv•่ตfux้&›ๅฎAkณM"ททlณ‘ผ๙+นXf๓ฺuบูใ๎Eถg2ฑทwŽfˆถตvbึณmีตKใQu%Sฏlึบ์ฌ‹ูpทนz{›†ฑโ<YLๆ}Uu6Œž๙nญ‡‹eคbถๆ่kf๎ข๛่*|๖&๕ m๛ฺ:•yiล†—ึ5ืฆ1p‡อYj{๓u๋ํ๓๊t์m>ๅ๊ a๖ๆ$ณึ1”. ›-ืmkŒฺฌ๓ฺ'ฬ์ฝูปฯงf6๓็>Kหศ{๏อ5บห1ข๗๖eŸpYๅฆีฬ†ฑ kส‹๗ฺศ‹™๕Z7<ส5vŸ{_/:Hฦื{Ÿืฺ ฒฝ๎ฒˆ†ญjV‡ฑิฦื๓๙^XใYDฺ6ฒgƒผ9์กซ5fณV%3ืํ‹k%6O{[*6จถต–Œy๚ช.ป—Mcบ+™ข‡Yว>อ…6๊นzถ7jcแใ)ฒcผŽy๏sŸoฤ i–ํ๕>wณูF]๖5ษฆ4–mmบ4ฬสiฒฝ}นn7ฤ-oร•๗ลU[ชอถy•ปฬN[ณขฉ›5$ฉm‘21žืgŸ>๘ฌ˜คŠตm69ญ ฐtš=V,—Sช๗h๐ ุิ=‹พ6œTšดmฝ7]#…กๅถwwoถ!k'Xฌึฌ}5 jู ญfmtืถljฬžkk\!Mg˜ฯตjฯ›๎ฎumลxปpหยhหอVฦ›ธิใส’ํs‡Mฺฐ.o๎Vฆฑํ 9m4Eถฺ%†ฯ–q1oณ’Uึถถ•“ถู2จฦูิ,3ซ+ bอj๛jฬจ;๛šqฐ•ึ6mคboฏถGใุ{_ปฮmX้๓6vุณธ-W_3์ฑ>Ÿ๖นํYFuณ4ถUีถˆํXจ๗žYfdo/Iขู6”๗$ญ„M๚z๎ฤ„ญผ Vทฝ54›.๏mิสฦลRE„ต›jค“–Vแmถณฐ๔nญณ๑ฌ—zู$ฝŸที]ฒษ]cSำจLg˜Oq|}้๎Z™า์kwh‰€ปง 3ท5—ูึื–]i3ล๎ฺ–งฐ9ฑฝกRุ•l`๖>“วYm๓^YefƒOนํ^จฦูhึlS&IbXณlKญฬ{#•7ใdv4dเา{ 4ร{_’าฬ•อผ,k8ผ๗ฮษŒํอ๋๓K'o0ะv5Fณmนz‚6m}mzื‘ดjo/ %มaA!jN๙G์฿6๖HŒลไสžด‘h6a+cศSซถxb๖t1t›žํ้$ ญื)œณัขป฿›ทฮ`’`iอ4X๓ชžลX™v็mUนฑX_ฯิซช๛ฒq€tnSxฃ:EํทCึฉะ๕bผuถฑถ฿ฃ[t68๗๖*“˜nุ†%…็}}ฒIฬถ™=สื6๏u๊ญฎj‹™ฌฺ80lฺ[ภp%๑6ํฎูถฅ4{“ฺึvฺkmF—ถaKšGร{kบอ”o3‡๊“m–dณm๛๕}ูถ6กญlfRฬPxป\ึ+_‡ฟ]ฝ%ฉฬ›ŠูJฺycmFงูOฑ €#ฆFดq๋1Šmิtณ7%‘Y4หผแnv2Pฐ๙พ๏๗ณท"ห์‹ลดฒื,ฬฒต ฝปถ•–ญ๋ห์‡][หw— ๆ๏nŠyณ้๎ป้๎ฮ๖ึnืm  Yผ•m@้ํ๗จศจƒอiSูŒ้้ภ6›ชิ๖๊า ฦl๋ธูืฬึtX๕V๏1ๅJlyuฅmj[{ซ6ฎฤ i–ํ๕๎fณ:๖›4ฝ=ห0WMฑŒXEำ–šQืถmฆฤ& ™Jฆmˆt3K+_’ML5 ฌฒ๖ฏJ™ša›'๏W็ŠอVlซc4ห6U4BQh4๓&+@!>า๊โญทุJลำŠb+ฐ1T@ืXF›ี1€Z1ร6;˜m#ซฺ,Œ‘m[ฐWั‚jc๐ุบฉาc”"o๚ YŠฆฬาc๖›‘*ฆ(k0LSอ6`ส6๕&‰๋๘ฝgธm๓ฌvฬ0…่ฅi`แ2ฦt–ฑ‰4f4ษI&*XSู้ –[ลG˜ึƒŠ†5-ฆŠbฦ&—ฝย€Šข’mลญYZ๙Š{ฤ2ฬ*ปฝ๑T‚ฬิฬศfฺ๐tฺ ค1j{ค$4จp†บตํ- PˆPX–ำ^o6ก:ƒ&T{A„m† จ† ›ฅ= q้–ร@ยถ(iyึญj,,ณ-ฐ๖tkQ ใฉ็+ ยSญ#oฺฅKำ,ฌฅอ‹่2แ{ฺฦ‚žดฬ`SFบ!ป{๏Mฦฬ~๛ตHNoLI—ฑjปุ–0ร$'™8ๆ€อช๔ 3หญ‹#จีLฅ6f’s•๊ลๆMฎถR›ฬซลํ85๖ฌ ฃh{ฟฎดทซk93กmฺ๊cรosซm D…กํ)‚œ‰Rมคzำ6[$8//Ÿถ aQญศฦnT[›Eุุจd›ฤ6ณYlญชํ$F:ึทญSZ[G5ฐด'ฐfฟ๚^Cก 6Oอ[5กอ2v*Mšmkiผน^ŒmUeŠ"kofกฆตฐ‡M™ต%ซทอ,หฯฏ`เชk0+า&]+ฒeS4^}’ ่€อJšf3หV้c#Eฅ&ฺ’GชJถyด˜ฦ E0๖6ฃค†ฅWาฒ™ม(”ู{ฌ–ก[1์ๅv_hฬถŠF6”Š3ฑค(f ูๆmq*šชŒกฺถ{ภRิุZฅ-SSl†A1ฉถ,oZด฿’%แq›6ฐe้๑l“RตY2Ff๖tตู<™C&ค7"ฟ'ใฅสDผญ% –ฑm•:FลY{`nิl˜ฌzูBนšmp›€aฎkf5ดIƒ"›MIณWGEฺ,ˆflใๆชฃ‘ฦซhฺโfq%๋ …‡*…"26ชDj„ฒูf Sช‹Qต ก-dc TƒีDm๖†dป†•ฤสl03๊ฬŒa…5HFJฤTยภYŒมฺŒขŠf@ H`ีV1&•ีภlุถaLXXญl+f6c2+ถEยถา&˜h PชVฯึ`hm9Uภ‚3˜ฺถ ฑฑภโาfฌ9l3–ก&–*{#ฦ`vœฒm›1ถฬXนฌง)Uม@‹ศุถŠ’ฅ ‚m&ร4ีจธดšmk€h0ˆขฝูR[ขZB6fฬถฉ 06*ฑ›ี(ษ˜ ››5E`ฌตmˆฎา ƒิฺ+SŠ=กXV ดa€m† ยFhHmlณe LfณUi›4อ ,S•5ศ@ ˜ฝ6 %*J5,Tุ ˆอFุุ.ชถsุ6ณ Y1ํ”`, u4ถว„y lqต{š<ชข+y3ˆฑดฑDยถ!mุ˜& บŠจบุBย6ข.ƒ‰.ํm[‰ธ@ฒฒูlฦL• (l`ณY U5ŒeP)› S=–6(าŒฐLุJขฒ฿"…†Mcภ € +0Œ๗”ฺ ˜ู์๗l‘Œ”f šŒTaภVใ‹$UีฐPษถ๏mSยˆ`Vึnฑอณ ะฤŠ‹ถA‰™ f8ย›yƒY\ikBแ‘‚CDถ km‚ฬถ…™iึT%Pu/Dาฆฐ†ส@ม ฃm"e'„a2lC•™1Pฐ U˜ฦ2ชุfPa &ชฅว€ „ซ “lH[ล€ŠV๖4 ฬ6ฬbl0 &Vล=MY4lmLฒ฿ณล)Li[ฆก ฉˆ€ Lุณ1 –ๅTE ]6fc๐ถe€ุ4X•ตaZgf–1hbU—ฺถM”…13œชถอณํg™•+YO(l๓ฬ’emณ‰Rlfฉมน ฦฤQุุ`F–#bค bj x\l  UV~ aีV@bฐ…(6สh†ชํู ภ€6eณ๗ๆีฉ‰!Mอดะ&สfD๓X•mุถ„›M,hๅŒ 3€ ec›€lN d๘ศ„Œ1ถaศ”มDHCE›๖”bภ$#ฬฆำŒ+๓6d`"6!ซŒถ.ฑmภdเฒ!emฯ@)6 ญภ(6* lC3ฐF–D @€ลฌุ† ฦ(ญ]Wุ64EX €อ&‘ฆm m6ตUD00 ฦ ˆลุ๖ฦdHdำ\~•&KZŒอXย•ู ณl/ ญšฃ€&x%ฦ์„ Cี8ฦhSS€YNภfดaa[&dสKE0ค 6(˜b`!tgSผงฤPอvดŒhƒŠ4kซฤฬ่5ฬ‚Hฅ Jฑ™ตคม@%ฐฦmS‘46dkj",Xก˜A๚ฑ?๘/}๔ฃยs๗พ๕ฦืพ๘ฟ๖๗>๓ืไ๚็ฬ'?๑‰๏๛ะKc›ฃ{ึF่คํ้Iงท_๒k_า๗•g฿๚ๅื๒มฐี69วฝีฺflwปส ๅœvoo~ๆวยฯพ๚แก?๖๒ทŒอCz–5kขธcยfึฉOY6ฅ™ฃmต€aqDCอl:fษภ}ํ๓ฟ๙‹?๑?•Oาoฟ๕Wvิณmป๗ี฿๘๙Oไงๆoส๏ผ๕๒ท้r)อ7ฟ๒Ÿ้/ๅ_๛ฟ๛_๙๏{ๅo๑Ÿ~๎sŸ๙ิ๚๋ฟ๕๕โ?ม฿kฯ๖๖๏๔๔ฯ“๓ญเ๎ŸV_๙Gเ๏์Ÿง?o๐ฟ๑ฟ…6ผ=์”ตYยา–jซm[kน๎Vฎต6@5Žนr๏–ณH0ถณป!”9น“ปUฤฮุิ-ำ4ฐฦ’ูrถSOm ภ\l•g’]ฬถN8<›\•YอXึrํtทศึLŽXณRฦDา&-ห[Lฌอจฺ่ึ%&ณMkญ`ึFkฑะ.‰5ชๆๆvZ!›ซฺi4ฐ4้งa[ขต'J„ถY9ห. m9ณฉ”&\&˜†)ฎลฦr6lAfฎฌi†ป†MsNeถฉu&ู–อๆศษcf4ƒสขdปทฆ&ญูึŠ!iwXUaƒDณํFล)๎ˆ ฐัX1ขร–นip]G3mcทœs๎‹ฑdุส”รํ4•Tณห๎ชย›L…qหžจe6ซ,ด;งh15'ึคตYuŒ–Œ•ป2อ2—t6บBหุ40ุkm$#VปP4จ`sึ6ว QฒนTต“,1˜คู” ๊‰G!VฮฒAkษY+Œลด%MญmŒ{ดตษ)lWvl!–ๆฎiป5ขvีJธซƒ5ใBU6M-P™ญ* ฮhBg ตŽดอ&:f,ฤุฦ ง“‹ต*`ร4)6ึRถ)ืœฉaป7ฮใl6.šMhF3‡p7ลป์ฎ8ณ9l2Rmร.'๋6V,0Kณ”†d+ซQ$cตษV3ึ5:[5Vฃuงmษ ตณX‹YmŒdB…Mณว”•ccVวฆษn†จmBถU4›น”ฑMKlใhfญm7ํ์จ1c;Kุญ†]š{+lฑrยูฆษ†’2[ฎฆTต; lุฑฑ‹J@ตฑ‰‘ฺชย ษtฬฦIkญ(w๎ุฮ90c!-ปcก“ุ6ซภ4d ณU3อiหฆv—ำดqwซYภZl‹ฉ ึi๎†*)Pษ.งˆŒถLe-3–Gณkบl˜Jฬ8DQืจุฑšกถฑโv‘ณ ŒึฅkฆAkญŒฬFbแ]ฟ็ฃC๘๏{๎ำำื฿?๒m๙๘ษฯฝฯ~๎|วw}่{ฝๆt ษฦฌำน›็>ฺ'?๒]_ศ QหPq g,ศ2Žถiงvอ_็ฟ๐ลื฿๛อ7ฏญธ.•าClญฺฺึศฝi๎ญาุ.N05Žฤถฑฦ\'Uน๛ฦ~๕ณ๋ง?๕ณ๐y๎;๛?ถVฒ-›ํฺิ฿ๅ฿๚ฺณ๗}่ฏพ*;'ƒต๛ี๖…ฯอี7๗วฃ?๗๑o๖๗พในฝ๙๊็~แ™Ÿ๚ิg็ษ}๒#^~ํ—๎/โgพ๐ส‡ีOู๚๎พผทพ๏วง๔๘ฅ_๙;๗ฝ๏๛เฟ๘‡฿ฐมŒ•ิvmŽŠฬช@wœfห ;lSCsดว6ๅโ:ำฝvw4 e[@%ึifPAีiฆOFd›\<› JeไŒ…eัMd Ggฆญƒ LํR ุถmT0l93T16ลŠcfDœYก%PŒซGfำHkUะV0Z˜ :ฐมฺฑ1KสถŒl23iN ณM:Yaปฃjซqฒ–ญF6i ป“2๋‚‘Rm๗า,)#ธณ™{23”M‘–V)๗.ŠูชV9iร*mดส€ํZด ฬส˜;GX‚ณeป”ถ@Z[kbX 2‹ 1l‘๎n‰  ’ฆฤฌฬฬ$'lณษ ฐจ Yด †Bะ4‹$ดฑ]ชยฎ์B wซส0bลฒl3"อ‘Pณ(ไi ี&อชภLูdUถY1ƒ ถฬ( ณ:ถ!lXถๆ$0ฬ6ฉmปVว&”ุฤ†…ด™{ืNiจ„ถ ะlฃ’m&ƒY•ๆฎน‡ม ฑฤํˆ3b*AUkวhรTวฝ –-ZLZต]QmาLฦr6DZ‚ถํฬ@ยะE#feณํlœ™vฐPe1รึ๒(f๎dXTศฆƒฅa”ู8:ึฺX!a˜Œjดฑ-Bุ•{แ$v—:LFeV2อAก…•uถญ•ถU)ึ(2bอ•0“”-3Kฬ6;mุ&w;šr ฑ-”ํบulLrยุlก˜น[j4: mmilฃdๆ˜ธVภvuท“;TณาTlรชฐ ฆฬŒอ ƒœตmhwสฐ’˜ฺฦ A ‹@’†aŒอ8"HwsHซณ l–ๆฮฮ*ใฺฃHZHe ›m-Gลถ™X#สV5ึf9าZำึAFปTร†m#i6›Cฑก*ฦๆคๅXf b,ง•†ว๓/พ๋ๅW^yๅ๙ฺ๖สy฿๛ฟ๗ป>๐ ฟ_าWฟต๕ne์J˜-)๓ฎ๗w~ห๏™ว ฯŸฝตปmฒ`cถI(สผ๕ฅ฿า7฿ธ+œc–ญ€ษHฺ๎มฌrะึ‚๐๕yผ ๏ปฯ๗๗ทGXN{…$ODQ4&ุ˜8Iฆ้x'iz˜v:ำำฬuผ๋š๋š้ญv:ี้คiงm’ถ1žˆฦ(จQAๅฐœYv]vูe฿ฯ๋z<ฐ‘0kื ‘65/}๑ำ๒แ฿๐็ฟ๕๐_๛{?๓๎—ํŸ&)‘ดํ๖้๛+ฟ๙{O>ะ_}฿ฝ๖g>๚›oฮฎmB๎ํ+/฿๛ะ~๊ง๎‡฿๓ถWฝๆžไ๕ฝใ›Ÿ{๘W์+O}ๅฅทy๖SgŸ~ๆ๎#?ฃ๘k๏~ี๗žcoxƒฑ?๛์ื>๚ฉ?ณฟ๘า<๚ึ;,Y ฬ `kูฎ9ฑจ–ฎัT0f[กหถฆชฎ]b &IZe h1L็T2 q CฉŒœฑฐ ]G“;Œฃอ@’6ภ2ฃณm้ะ0Zฅd6ณkdำฦ#ฆนP(a3KFq ณิ‚ฌbˆตmร„˜r0C`ำู0–ิ ภtศฦฦPฤะŒ03eKSf2@LMอpjฺXฦ˜6LฦFLล33ณ65€ 2€ฬ‚Š กH€k่“i•\[-…˜WWขุfจ‹’Uภ–ฬ †…%lkืฅ# •@ะŒ43&11f SVฆ2 2#›ษt6ฃV Xห‘]Kˆล.ปๆHkฆ ƒล–qำ\ˆ4าถA Z6QhA 0ิถQ`“F[ตำฺ!6ฺ€%˜B‚m`e 0l&&–i@2›*š,ใ€‰ l€ลย&`Xภ `ˆๆย@Šaก`tl ถุจVa6iU3คม6UHX˜ ิฬP!JClcUƒ ิฑหถบb BQ† ™อด˜l‚ฉmj’†‘€“้lอj ฐIฒkSฤย์šR™af‰™!‹kr*sD ษh 5A&–%j[jื6จ)cuฌนf-W’iะ0รภ8Sุ0V m0ฤฺ”€Zฆุ&r.Š˜5ฺlšYP,lยfLภj€dŒc€ืŠ#ยf“ h ฐMTฤฐาๅZช˜TStqŽ6eฤ!ำŒ@ง]fPม๊P•iำ@3f6;‚ุ25อXGฤv้œlae์ุฬธrlดh6Kh'a3*#0ึฆ@kศฦุถค;7๗ิอ=w๎{ฃ๏<๛ํฯ์ท฿xำOํ๗พ๙ื“ตฟฬg?๖กO>–Ÿ๘๏}ใ}~๑ำ๒ำฯ๛†w่?qO ฐ๋๎ห/|๙ษ?๛ิS_๚ึ‹ฏ์W?Z฿xๅ1l& ๎พ๒g>ปฟ๛ง๛่gŸ~ฺวอฟบ๒_ผ๎5}ฯ์OซนๆSO}๖sŸ๛๒3฿xแ•›๛^๕บว๔ฤ{๕–W?pงฌrฝ๒s_ห?๔็Ÿๆ๓/uฯƒฝแอฟํm฿๗–‡8ƒ su๛?๛๐๏ๆ…ืฝ๑๏z็ฝๆNf˜m๗ฺ๏}๛ฟ๎๏แ๗ฟ๋žฮนIญ2t็7ผ๕ฝoมz์นฏ~ไIะl:มy๐‘G฿ำ๏ั฿ึผ‡P็s๏9w๎น“พ๓ลO๎ซฯ๛บwฝ๕๛฿๖š{pzโ๛฿ฝ}๒c_๘โg?Oฝ๕aƒูํKฯ=๗ตฯ|โษ/<‹ww฿ฏ{์อoพท==ฏ@ำ็ฟ้'?๛๙ฏ~ํ›/ื{z๘ฑวฟ๏ฏผ{_s฿ฉแ๎‹ฯ>๓ฅฯ|๊3_|๚[฿yi7๗?๔ุ›Ÿxว฿๒่#ฏพง๏}แซŸ๚ไ_>๕ีgŸ{้๎usCoxใใo{โo{์>X฿‹?๙ฤ—ฟy๓šG฿๘ชW>ฉฯ|ํ๙—ฏsฯCoz๛เoyไแl฿ึ็>๙‰ฯ|แ้g_ผ๋ืผป๗๋•[ธ€ป/<๓๕ฯ๚ษฯ~๙๋฿~้:ผ๊แ7ฟํ๛฿๑๘cฏ{่คw@`ูด ิ(ณ)j0ีฬhP%›สจŒ•‘:ฎ™ถ™–%Lf˜mV0เdkW,nงถ2ดm„•:v)ณคPถ1…ชMƒ‘ญePู0+ HFe`“fkฉv!ˆ5ห8ถ™ฆ,VAc‹ีfำI ฃf˜ิ1ณMIฃM(›2‰fิ รจQ˜Su.ฃ`สฤฐ3dƒ˜Mิ‰นXŠํ'a†‘lP0 Faฦศถ!ม6#ลb*l˜ึ"“kb›2™PL&ฎ2 ี66ƒ%1tŽaˆ๋บ4@Kf&mุถ 4Œ“ญ]สย,6lั1 ˜อtฺfKภสtฺpUŒสbึฌถ•!V*T3 j[‹$–mfdซึLc[ถ&Tฺ๋œN,. 0Tึุิ๊l"ึCC132ลถฉbืV็t.3สE0ฐูย ˜YซNุฎ]'aFสZร,™Pฤ„Q ิ‘a…m–bาดa 4rัlC‚.N*ฐ2ฑmสdณvFLณM4Lฺภฉล†)ถK#Yดmƒ„ ร(ำ5ฦม,fFKดL&ถฒๆ˜้˜Ža 5ี&$vึฌfฆˆUAษ†Uรltฺ–,0$d›…aถษšEbl "๊์ฺ๊@,\’!ัุตZตฒ!Z;3จf3`ปlƒ 3V*ldPFฦ0%ฌY1ฐ้คl.Bถ)วh 6E6 „Qย6š™U2ุ†ย6WRhฐ`fbiดicC‚Fa”™†m–คm6Yed.ฺ สFจสตฑ-pอ bš2แp ถฅ fูuvl6s]ทื+/~๛O}์ำ฿xๅ5Oผใ๑ว๔฿y๖นO๊/Oพ๏แ<†๛_wo๊zแkO๔_๒?โO=๚7žxรผ๔ิŸ๎ฟ๘—Ÿz่๛ม;‰ื ฐWพ?๑{ฟ๖+ฟษo~ง››๛^šืฝ๖ž—Ÿ๙๊ <จTŒŒv}๑ูงž๓ฯ|ๅน_๐๕ฯ?๕ไ‹ฯ=๔ุรwฟ็'w๛ส7?๓๑฿๛ญ้g?์หฏ,ํฮอ}๗ฝ้Ko|เ‡฿ศ<}๑นง?๓‰๋S฿ผ{{;ๅๆื=๖ฤ_}฿~๒'่}€ฐWพตๆฏณ๕ฅืฤO?๒ึ๏{›˜โฒโ๏yโ‡๐–๗^็ม;็;_ fข2ฆn~๏ฯยs็ž๛พ๙ฑง'Pฤ฿๐่ป๐ุVfํ+/>๗๔็ž๚หฯฟp็ฑwเใ๗{็้ฏ=ญo{อฏฃV][แžG{ร๋บ๓็Ÿสื/Ÿฤผ๒ํo}๑๐ก฿๘ํOฝเฆ›s็ๆืพ๙นkพแ๕oพน๎>ฅ๎ฟศŸ>๕๕^|eืฺ๛๏ไงฟ๖“๋ฏ๐_wM/|ํ Ÿรษ็žฟ{nฒs๏ซสwo๊ืพ๓มWž๙Ÿไท~ใ๗Ÿ๚๚หฏโๆพ‡๐–๗ผ๏g~๚'฿ฦ๛ํ<๛g๐๋๚Ož๐ญ?๐ๆ›ฏ~ซฯ}็๎wฟ๓Kพ๑ฟ๙มฟ๙๗ภฃ์z๙›_๚ฃฺoมงžy้ๅs฿ฏz่u๖ภ‹Oฟย๙๎7พ๒ไ๏|ไพ๐า๎ธำ=7~๖™๎ปีฏy๘™ส,แ Tr]˜ญ! mา\ีlูฅญ’mตiอLๆtใฬt˜[ušูŠขQ…ํZกpตVฃุFึฌด™!K[9ตm ึไmFgYVZฒุœ\6(”jnฎkr.U –*mืdƒjซ6f`Cฉm›า์ข:• ะF–ี’›ceu6kๆ8ล6 ชh.ล2ฺเJ˜dใbsชชห.ื&หตฮQณ’tบ.8I3ƒฦ,“œฺๆสแศฌึลHณตฦ.ต–ฬŒิถ&uษ5š! ี5ฎ-Rฑ๋Rจ`mvIขอิ5np"Xฬt’อเB+†0œช์RcqฑซNg kฑต9งMZhฎฉ [Qำ(ิl['ึฌ0ฺม€Tฺ๋ช\ถอูV:ํฒaฒญ-Gม†–,“ชm`A›€]QIvูๆสูeB5ณT`ืlŽญsฎUKL“‚˜ๅฤ,ฉุถ้$gภึโดไƒฤUmf;ล6€!Nqf‚,ฎ1;&ูธฆช‚qm‘[ียะช+ปŒ&ƒ6Z&ํœฺ@9หฌtั‚Pญมu+;‡š)ำbูbดaLช6Hลv™อฬ.‰4ฌm]sฌRุฃสุ0[j†6Ftฒ‘ ธฦ20bฎ94RYm ืPdCงiDปฦ‘ฮถ0ซฉn 0 ีš!c›™sถฑjl.Ss5Iaธฎv[จ“Mฌ [F์B@กฺlsๅlฆถ3Nอ@ฎuฺ:mI`ญ์ฒฉโฤถไถ๊$gไj7VˆS ํ bc\สnดฐุา)K.ถมสlCUิv][– 'ฐฦRตีf—: &-`CŽbk&2ฒH˜]LgญูFmฆ!ษ . …]l;Ul”๊rฑmv iุูึ.์”ข-aTอ6ฬึŠ1d–•ฐ–ูฒEf24kค เ8ฺฺช‹J :‰m“*kึZญ“ห˜ึN3าถฑ™—ฟ๕•ฏ|๎ษO]๗]w_y้น/๎O?๔OzOฬ•ท?~๚N€qKdมะR@ วp๛๒ณŸ๛ห฿๚_ว๓ ิ฿น๗ฟ็mŸ๏|ๅฝOษทyl3ญมR๗๗๊'๗‹ู๗ผ๖ตล{฿๗wฮฟ๓y๘ž;๗ะ^าoไŸ~โ[ผ๗G>๐ใ?๒ถื=๐๒Wž๚่๏สฏ๓ท๏๓‹?ใ๏xอs_๘ฬฺ?๙'y๖‰ยฯ่ปพ็๓ญฯ|๒๗อ‡?/โ๋{๔ฟ{๏|่ฮiต๋๎ ฿๚โŸห๙Ÿ}๚ัŸ๛นฟ๑|แ{š-fตหแž๛๏ฝำถป€ˆ# xีซ๎ c  ฬfVU๖าณ_๊Wฟ™O|๘c_ธ๛?๐มŸ}ห๎ฟพ๙ท๏พ|๓เ<๐๊švญ^เฏ~เๆ๖๋/=k–ฝ๘ล/}ท_{๒ฟ๙รŸyว›๏y๎™ฯๅWo๛๎Kท๎พาืไW~๙๙ใWศไฯพ๏]o|๙๖ื๒็7?๔?ใ›G฿๒๏ศx๙/?;๒W๕/๎ผ๕'ึ/ฤ;t็;ฯ<๕๑O>า ฯ๛…o?๗ี๚/๒‡ž๙พ;|฿{๔ช{พนOั๏๎ส/}ๅzไฟ๙ลwฝ๖W๖W>…oป๏ฝํƒ๎<๖š—ฟ๐๛๗๕฿}อCozฯพ๋_สŸ_ฟ๔Kฟ๒;๐๑|#๗ผ๐๕'ํ‡ใ๋ทpฬพ๑็๚‘฿พ๔=๓๑'ํ+Oแฯ?๙tื+/฿ฺ ธSm€อ(HJc %ฬ†Uฮpvึ.fg\;งXี66!ป"˜•e—™TฐณญsYMIณภฺ ันYsYส†•6uu hฉkvญ*d4E„Uอ สฦ&tๆบํ„ฬฎ9๊0ืš:;ฅฤ1ฆMืลj vkฎBไงv{)…อ๊tถ„Yดญฮถ\gๆ๖rsถ †-l›ค:luๆถcปˆ์ฺีฮE™Plnwtขย`๊์า™ฑณ0;น( ›†ใฒƒckˆฮ6ปๆDZpœต!aๆDLฬ uUท—"ŒMญอLŒ 6 —ไf+'ูFฑค†Lถ‘ูJ๋๖ธQXศfฌ่4—a3ณสrฺ–ู›*CอlBv Rฎํ€u%eg›Cค&g.`&ณฆsณๆฒาbึฉ]ดs6ฅmGkV9นmำคAljŒ*Z'ณk่D.ถใ่ฆสE:ปึ&K›ํโ:Œ1Kภ9ใ”kณ:e:uถ1„.ื! ณ*็2ืฅณฒูถa•Nถ:s68\ปm]šJธฎ๋ช#ชถูT.ฒ$๎8—๋tg@L\ฮŽQKณ1ิ6„l…ใฌaฆฐkŽU1–ำTหH9ฅึ† qมฆ#`7qนข"ถํคd[LR0dHท+tIฮlฎณ#3  ušหฎ]ฆ4IํFืลอุฤฉY9›Aฬ’rmมถ™.eg[ัฎอ:“#ฌอฺ:7KƒฅlSmXGโึˆดm9)€)lCภTป†WฌZ'ณูt‹k็Š…ฮถฎษ`mูษน\Gfฃ–€kชฮvญSัถฌๅฬ1Zถ–›ูฎ+9g“ ƒ-Lฮ9ูชฤตซsรตึีๅœ„k›ฅจณอFณXqfŠ M3ฉขฎ4cj วมlH\ื*L(ฬขฺEคนhม1ึฬนธIsกRlษ™อndบร65&ืๅิตPXŽ\ปฮถ-ฉำl[Bf”ํฆฎหZ‘ํิฌุ0ลp{Iู6€อL”‘9อ\Uiฐภี5ณgfฎฃฬ`ฺtvn”|ํ'=ม\ืฝo‰๐ฟ๖ฎท?๖ ถŠT›`\ ธž๚‹Oึo}ๆน7ยใ?๚๕‘8๙ม๏๛žื๓ย๗฿กnถฺฐlาฝฝแ‡๎ป๗ฮ{^๕ฺื<๒ุร๋๚๎w_ิฏ๓?ๅ๏{ฟ๐๏{}๒Wž๗<๚ํื๏#ฟG?๒}o‡^๕ูฟ๘่ฟ๙ใฯ?๐C่ฟOึ#=p๊ผ้ตo8ห‡๚??๖sลฟ๊U\w_~ Oํ—๙็฿๘ท๋๘ณ?๔ฆ7=`†ถsด‹TV˜klฺฅธถmรVวุ(์าูๆหฟOป๑๛โหฏyโ]๏ป๗oผ๛Aฝฝ{]ป9็ๆด้Dmvs฿้t}๗๖ป/_ปรM ื๓฿z๎k฿x๖พ๏๙ฟS๏y๛ฝฏป๏ฮ๏๚!†Wพ๙๙ฏ๘Ÿ๊็๐ม็฿๙™๗ผ๙M๗ั#ฏ๕}๘โ>๔วOz๎ณั?๚‹=๒ฃ๗?๙๎‰;\ฝใ=?๒แ•ฏโท>๚ฏ~๗/nส๐Ÿ'?๘ฝฏฝs“|๗ใ>vฯ/ื้วฮ๕ฏบฯฬซฑ๗๐?เcwฎบ๓'|ๆซำGพ๔ฅฏ}๖k็๛_์7่_ๆg}๏ฟม฿~๗ปฝwณว}๛ฝ_ฝ๖ใj๖gพ๖ฬณ฿น๛ศ฿๛๗ฝ๋ญw๎น๓๘[เวาXC w,รต*งkqkบd””ๅ,\kM•ํdeX [+ื#SปุRL%qIะฺu๊0ฬิTฑ1ฅฐ๋ิu้ๆ์บดฑหŽSc *D์า%#วุ:ิflโบ๋œ๋:ฌˆ์&1ปศ\wu3H‡VZKณ\—DŽ6ฑฆถ–vําuํœซTบาฌอfอษ5ถjUฬmNg[:\v{ํฮอัLะeํ้lWRdๆฺํNgฒiVsำัvAU-Q]ืญDฆศฬๅlซ]iKซหlgษฎฮูำi'ืๅš4ฎ๋:็”cึภšชŽfะŽ fืธmึอ้ฬfS L0ฑรฎuZ,4cปฝœ\Uะ9นjฦฎUuบทฆฑEชcufš9•MNmX ›ลธ4่ๆิฦdtVGe›ชณนะldฑนtฆŠม9q]ง6!ฅnฏ;Tุ†Tˆ[ำตญnŒญ’ีถM์—cะIลhุ่n/2€บก•f;pํ\p{ƒ …pู,–นZ;ฺต‹ktตXQ9‹YๆฺY—ฒูภi1—อu”\Ngv๗ฺอฉ0J—mทงfฉBฮต]ซณ๊\N‡mC้tอกฺถฑL92RŒญk็p pี์ๆjูช q้ดรf—[ฅปืusช6KgอUซ”ถ‹‚nfC๋โbœ:ฑmU€‰Xณรฆจ1ฮŒํบcuิสj3’๋vGuฺโฒ™ถHHklW3UฤNญรRภตฆn๗'ฮ~51๛0?ฟ๗;œู๗•ไpQM-”ข(–m$ต“๔&AoดE‹^๗ฆ(ะฟขฝสMPM ˆƒ:HmหปD™ฆหPw‘Š๎๋lœ}ๆœ๏๕yRศ` Šv$šD’ถQ12Zฅม์Œ$ZSF‰ i•1B;šV+‹t.eฬ9G2RฺI ih›eะ$2ฆYB'•1—‘™1D[จ˜t9‰ถขอH›ฌhˆv,aอL5"‘9fS„hgšiฉeU™f’Hลa˜fGSJuขฦhดšN #้42ส์L2้h‡1-gg’@%F;#FUdTา–ฬ1ฦ, ชmuจ4 ˆถYึ@ช@fฒิลLขEฦhUšHui™D–ณIF’ˆvFา0!2’ด“&$M™:’ัถ3‘Eค„R4ีะBFJQฅๆ,Dว’E4i#svŒMK3iCDBˆัLฺฆ•ดIขCmf’ูL0$CŠ6Iค$•&CSชีฬฅŒ(ณ‰ฆI:•‘ะ2#2F็R202$™ƒเะ“์ว?x๚ปงึt.ทฎœ๘อฟูŸแฟ~ํฝ?๘—ฟ;฿y Q ‚Yhg;ฅ :“Ššuใยตฏ>๐๚Žร฿}ๆ๋;V’.Vฌฌ`hว”šCƒ‰Š.oฮฏ฿8{~k๓ษ3w:ฐ7ฅV;y๚ั}/ฝ๒มว฿|y๑โž _œ๛ruใ๔oเ่สฦˆฬ๊๊๑Gผ๏ะ๓ฏฟ๑๚วห'ิ€ๅฯ฿xๅ—๏๕สฦี๐ฯ;z|sกf‹ลˆtด‘JU%ฉ9€Œj[S2ชฺ&ฉ4A•š‹"avฉ GŸื~็ใณg_๛ๅ/_wหGwใ=++ซcd{.—3๋i#3&หฉcmฑถžฑhU$te๗พG๎ฝ๚ยŸ›฿ม3฿y์๔ก›+p็ึ๕๗฿|๏๖ํญ?๙_็ฟXŒะูํํs็ๅkหญ ๏๑๕๙kปN>|ๆ‘ปืc๊Hาyๅาฅ/4+๗|็ปGWwฎŒ‚ตรGœy่๘_<๗๚k็๎<๕-‰X฿นc๗พซe‘ฒc๏๋+wถฎ฿ฺบqฺปoปพ8๘c๗๏_cฌฏฌฎ’Jปvเ๐ก๋ฏฝ๘ำse๙“gพ๛ฤทO์^]M ‰ฦhฐR ŒR •ชŠHJ*1g‡h›ฤœ’&ญ‘0g#ษอ๗ž๛ฯ๏^พ~โ๐cฟ?ซงึr๕็฿๕ฏฮ[yไ‰๎๙Oุ0ฺฆPฅh[!†ฆZ3ฃฺ42’ฆsN3€†6Uฃะ D3ฦ์Tภ ช‹$˜‚Q%QiDะ’ „ฮถŒFg#h5EPฅŠ i[า‚$ญsฑHขE ณ2*JIา6ณ‘ัAD•ถ()’!กMIะช$ษ$šก4RIZฺฺY$Š‘คชd๊0T[i’0ME H3@ (‘ะTšBา…$“4‰j2ชi ฆ!M‘B ษ˜1hZ€&กmดคั‘F*(&’ษา61#ชJAuTš ฺ ญTb4Lกญ‚Hะ6‰NJi)HhลlFˆŒขา$“กU4hm ้Hˆvš้HRZbศbŒฮVขmฺvdš–hฦ˜TƒัHfDP*‰B** Q$f[†PŠดšI‚*†ดฅE$) ๆHHhZ:%5ดฺH’ถH˜0BDฦœอˆ„ช#ฃ ญ"ย์4’4!Š!IK›@็ฤLFต@ƒT‰Fิ"#–Š@$*D4ดLjC›6ฃ ‰"จJ$U&อ Zำ$I%"Š@“ะ–R4ช&jN‘ฬ ้lขฺ"ฉ™.*ดh+ QU1 B1สฺถIดB2“6‚2าดขUhZišาJ›ฅht$ ํ์LCฬV2d jฮJSG•ˆš‘ภ@*ŒFด2B1ิU@E2tv ฺฦh[-ช5ด•คZ’”–tค$ัะขชBด‘$U$ฬFt„$HาคQŠถ4Zฅ:F:ˆAชQรค$iiU’9g‘L"H”ฉ’ˆูช‘Dต)"CTกCช$B›Šด’HJšศT‰คฉI 4ัi$ญš-iQm„”6™MIjศLRˆ&jH"s6”$5eT5ษ ล0mฅPjš’HBgฃSPญ1f่ ด%‘ถQ0Tš4RVฺŒ( E[้ขชmG’‘ู)‹ŒถsN‰NŠ๕วŽ฿๛เC๗ฏ'™s{๙ะท9ฑo฿ฟ๑าkวNภƒ@dTำDะ„(ˆ& "lบs๓ฺต,๖ุทฒศHะฉS ญฆš"จ (ีฒvๅาๅญฎ๎ฺปถถ1D5บบบc฿‹ๅ•k7๏ผz{๋ฦตญฑถ{็ฎmีX[_฿นน+_}sy;mเซ7ๆง็ž฿0ฦ}๐/~p์๐ฎี‘iE‹sLะVH$ี˜FŠศj$Zm€‘ก๋ปตcฑ๗=p๒ิษฟฟ่ล?๚ห฿{๘Ÿํฺณsuc๛๚ญืฏฯน9‘แ๊ตซืnnญฌ๏ุป{ต Tุ}๚ž๏ำv{ํ/๎ืฯ‡_ฟ๐ำฝG๎}์้่™ง๎ตบvsู?—อ๏=zr฿ฎ ดๆฐถ๓่กƒ—^นq๋ึ\[฿ต{็ˆL2J๖ๆํญ๋W๏,ึ๖์ูณฐะQยฺฺ๊ฎปว๒K฿lmฯ–JbŒtฬฑH’hu{นผq๕๊VW๗ํฝบถ@I3(ษG๋w–6๖/ฟ๔'ว‹ต๓ภฉ3฿๛ั๏่‰๛X•ช&Yi›ก”&ฺ„"ลo>{๛น?๚ณ7ฎfูB’dฑถฑkัฃwไSปฑพาˆ–ถEาาDฉF#0o^๒๓sgฯ^บvไ๊v็Z—ืฟ๘ใ๗ฮž]w่กณšŒ4Išy๕‹๓oู๖๓ฯf+"2,Vืv๎>|ไ๔฿~๒แ{๗๏ฑX†Lฆ92ํD -0[H‚DDX-%Tฆhข‘i! ีj‘†"สLGRี€$™–—;๛แ…'ŽŸฒ๊/}๑๒sฏ}ศำGnฎคชHR3 S Kf2P4ัNZ่ึอห_ู—6๎พ#cฌญoฌญ๏X?qโิั/ฟ๓แ็ทถ>|l฿๎ท.\พt้ห›Ž์ ([_uา•ฑ๋ฤม“‡ำ6mฤbc๓๐x๓เ}ฯ|๕ๆ_Ÿ}๙นฟZ๎๚?พo์ุXu{๎8rฯ]๗;ฒBC›ฉฉๆ๚ฺ๊XษrนณญB[dฑฒX]sy๛ฮ* ฬ9ทท๏ศbme%  #sึค*ษXYLKJ#D€iฺ่ฎ๗<]G๏zไ‹ฏฟ๚์ƒณฏพ๖๋Ÿูึj๑~ํCkm‡ํูD,V(‰จjD Rภ›W?็{๖็m ซk;v๎฿ไฤ;ฟ๙๘?ัใ๗>ธ#’’ -RZดZ @5ช:๏\ปั+ฟ๘ล[หY@’ล๊๊ฦๆ}GŽ๚ํณ฿๛๑žพ๔แAbPR iJg% Jด$iEฃSˆ@b จะ€&ัูŠˆ@คh”TAฮฟ๕ทฟx๎ฝ+'๒รอใวw ัNY™M’ˆฝ๛ฦูฏ์<๚ฤ3ํ฿ส—ฟ๚ำ.๏๛๖“{ไเ๊ห_œ{๕ู—>ธB% {ฮ|็™o?pr฿:n}sๅร—ใ฿ฒ–Pฅ‹๕]ป๘ษ๏?y`cuดฅ[ื/่์ูsŸ}~๙ๆๅX฿ตศฉ๛๏ป๗ิษC;VD j๛ซ฿z๋ีW>ธ~psDกD ขDh[(  Œฆ:o^๘่ฟู๓[Oฎซ฿ปoฯส๚]ธ~๋โ…+s์ุต9ฦŽS=pโีฏ฿ไƒwป๘๐“หJ็7gฯพ้—=t๔ž๛๏ƒ€ษฦส๚ฮƒงฯผ๗ก;Wฯxโ๎c/๘๎{็ŸrpตT@X!Dฅ…B‹ข `วฑ{O;ดwc1ท๎ธz๙ฟ๓ซOฯ}r๖๚๊?แำ๗ุ ‰ @€€‚€€Ri$ฃiะ4% {O=t๒๐พ=ks๛ฮ๕๋—ฯ๖ู้7>}๏ƒฏnฏln๎๕ภ๑JdฺŠ–&#ีPPฅช‰j’ชmCUJ‰J#ช€J‘"TขะDขH2A@ fณืz77็ฃ฿ฌ„V“jUTขtd$ํึืoฟ๖wฟx๑๊ฝ?>๓ฬฃห7ซ?ใkgv:r๗ัซ7ฎ|ฮ‹ฟ๘O— ].๏\ฟx๑Z๏๛๎ฟ็๔ษ}๋ฒ}ใ๒๙ืโ๐์ฦฝซc!ซปต๓‡ฟ๛X7Vfอ›>>๗ฦ‹/ผ๘สฏ?นrJ็dlบ็วพ๛๏=ุ]{Vฆjศ๕O฿ีฯ๘^f๗=kƒฌz้]w๏ฉ0cด-4QัHช”jชPI$… Šj2-šฦ$$„ฆะขF$ฺIฺจดขITƒก-ฅ,‚JІดค$ DUƒค(:hB"@S:g2d@ •RA R€$-ŠขhจD"€"ษฌ6’(… T2kHUZ#…‚ข’mฅmKำ’ ATšDSจˆ(‚"Qด( ณDD)5QA(ฦ L$@HJาVดZฬ’$$ B(ก-J’ค@A้@6i+“Eจ"bˆา’ะ*šด)š’†R…’‚$FดญPJ5 ฅ„HRฆ-QSำii“ H4Uดั"”่$E@ ‚j€€vะ„)ณ3’„A…IQRกB…J’ZBD4BหHfต‰Qะฆ „ูถ‘ษก€ะ‰BIฮ Uญ&!ษขJ%ih’’Bค&AQ)dhf#†4ข• f Š&ภขE‚ Uาสะ6H ฉ&ีIPIHDB J"า––ถ%I)iะ @iต’ถJ"ดD AU)m#DUMฉศ 0MะJ’€ ŠVจ‰ู&มิDšคT[I)*Hซ`ดZ‘hชฅ• hข€* ””@QQ4h5ฅme$  ช@ HU’T„ถ(J$IšFT#ชBช!€15ค‘ถn\พ|๓/vญ้์r๋ฮ๕‹ฝ๘าู‹๖>p๒๎“๛kทVŸ8ตใ… ็์[;ฌlŒ›—พอูW฿ฟธ„าAฉdR)eu฿žCw?xไ๙ทฮ>๒ป‡>ธ{พ~wฯ~๐๙m T$*4mke}c5ื.~๕ี'|rkฑ•w<๓[Oœ<๛๚›oพv๔เๆ๒ž๋+W/~๖ู?ู:๘3งป{วฑปฯyl๗J๎\ฝ๘ห/ฟt๎๒ฎปพ๛ฤฉอ๕ีฝ๗=๕ศ™๗?~ๅ_=๛‹ใ+฿ตw}นูŸปž฿ไั๛ŽฌF‘ฆ4,ฏ_ธ๘ีg~ณqไไัฝ;ฦสŽ]›;6v์ศ๚Ž๕๕{?๙‡๖?x๑๙—v-ว™ใ๛ึmธ๘ีŸ|q๏ใ?๘ึั'N฿w๗ฉ฿ผ~๎อฟ๎มอ'๏:ดนธ}๓า็Ÿ_ฬๆ‘cz๘แcฯีgŸ{eใ‰ปOํYท๕อ—oฟ๒ซื?ฟu๘ฬ๏>yl}}ั(HbดJAึ7wŸ๓เ‰=ใๆลOy๋อ7^๘ห/?ฟฑz๘ฟ๘‰ต๘ ‚ำ`฿๏ย0๏ฯ๓ฯ9wฟบบาฝฺ.hCป!$@ฤnl4v0๑L์4]าI:“Nง๔e_ค้LำL2ํ‹ŽวNฺq'qมมcณB’Y…@ ฺ๗ซซป๓>|˜ฏผtโๅ็ผ๑๚nป๐Š „อรoผ๔M…Аษ… F2›ข1$H ฤD)‰9Sa  ก(1ะฬ„$คB(H!DTU*˜2B DˆฤH(Qฤ@”ม€ ะ$Pˆ@‡"ฅ€ศˆIh€U*ˆ&จd„ !5p"€RHEˆD@9  ศ!TรPk"ๅมP1P Pš(‘’Z( $ะlLะPB& € @@ )d€ $"@ !Œ .$Dฦ€  ˆ#D,คI" („*(!%ะL„)TZ(PJbHFฅ()""€จPMค  3ฆ  J1€ %4 Dฉ”`0$ส@ ฌR•ภP#0b*H!I@J8 D2ฬษคT”0‘€€˜ฅก@` ก B( 2ิ"h 4#ฤ"@กก€T%ฌˆ„"$„ „@คY2@H)Œ !@EH ' Hed"$LА‘C&”B‘ @Vอ3%€Pƒ T B %" @" @ค! €TจAฤ€Pฌ H(` …J%, B€fร   T€  Cข1@Œ Šม€O๐๛๗~‘ิ๖ษS/=๑ศฃ/œ>xmทpรฅตน็ุ wpฯ_๘๎{ฯyตK๎:๕“O<๒ƒ็3ŠIˆ`๙๘ฃฯloญD€Bจ$ฬญ#—\x๎ฟ๛๘๗๘สK/=ด:ถ฿๚เฅoภ๛~๒ส฿r๏}cืฎ=_qํฅว.๕๓G^{ก๕หไม๏?๘ฬฎ‹ฏxร‘}์<๗รผด๗๋ฎพ๒’๓๗ Hจ ฐ"l"๋˜Œ• %l๎=xไ‚ Ž]|@ฝโŠKฏบยื~๘ฟ|๙ล—Ž?๚ณ_<๘ 9uโฤ™5ckฯ9{6Y†ศz็๔้ืOœุqต์ทks‰` `(EBฅฐฺมนผแ’ ฑหฏธ๔Šทyํg+?๕๘ณO?๓โ๓ว9vf๓์™Sgฮn๏์ฌ›่ฒฑนนตตkkc™:ˆนs๖์™ำงฯn๏Lrูฝต9็ฮูณgrck฿=รณ'ฟvz{ฦฎ]ป๗์Zb šงOผvj๛ln๎ฺฺณgฯ†่,ๆ™SgNŸูู™ำหๆๆ๎=ปทV†,0ื๋ํณงO<ณณณ^ฑŒีฦๆึฎญอeฎwNฝ|๒ิๆ๔๋งู:sโ๕Wž๑…Mวๆžs๖๏`ฬ9Mvž๖ๅWฮn์?|ไข]mํัŸฝธs่๖ ์?g%l๎=|ู[?r๙-ฬ†$าฮ‰g~tฯ~้KปŽฝๅื;zpฃhž9}๒ษgOp๘]๏ุฏฟ๙โC{Acธไ฿~๏|แผซ>๐ษ๊w฿v๎0jพ๒๓ฏ้Ÿๆ๎ฟ๕7_ฝํึ฿นrIฌ“/ฟ๔๊๑๕๎coน๓๗ฎรa(ก X8 UญZ็ขFฤœŽ!จฃๆฌPงแ@\@H€Iމ*ุlฝณvs…Z@ฬ&$ลข ดYNPLT… G1ณHQB€ฆขฐŽt‚™Pลฒก3šัtˆ&B„ฬ")Aช‚ˆ’ˆ#ก*PAŠ(YtภบLek#B+L ,œ(Y ฉ1 วศQ „M† H"กH”ุ,„ ad€‘ฑจ„(3S‰fPสฐP $Š2g๊@tณ๕P@ฃ’iFD*สศ! ajLdŠ(กƒ†‚แฐ„Q6›j Q*ษก5รPt†U:tิบ™ช•Rš.5ซ‰ุ0!E Iเ$‡€ ชฤz{ง…ก@SŠก€ฃYN0SK…#€*ๆT+JAHา0cช9 3 eไŽขf):H…%œE*R$ำˆ!๊4 Cƒจ ภฌlQก*Pi uภ„ภRฦ(ˆQัึ8Q"„j2GN@G:กฃ‰" MD#”…(ฑYขษDW"ห†#B™Rอษs8B‘@‘(Jี$AลaYT%u ฒ@ˆYŠ(#sRจL)ช…BB๊‚1†3!ฒูTk€€Yๆ…&( , ฌ€ฦl† …+aจ€ ้(ขˆฬA ยPH™` Q%ึ๋uสJ‘ขf‘ส@ฅญb ‘:X*4*ๆT+ล!Hƒ&„0a&ธfล9…™s8ˆI•แB*@9&“TคH!"&Š’8Fณ!I Š@ Z"ะ ฦp.กN( "#ท๐#็=z๘บC‹DbF“SฯไwๆึKุ/ไž/ไ]ฎ๕ŠซŽฎ† ๓้_ฮcฯM€ีฎq๚๏พ ซฟ๗kŽžณน๋‚#7|๘๏็็|๑๓_{๐ง฿พ๏n<๏’kn{ฯ;฿sว—๎‘fŒรืฟ๗๛ฯ?zฯWฟ|C฿|๔u6ป๖ฎ่ฎw฿|ี‡6c6†`$.ผ๐าซฏ๙ูื๎โƒwฟฮฦพ .ฝ๑๏ฟใึ[ฎ94gn]๒๖Og๛/๚๋ป๏ร๗๐;๋อญs]zํ{>๙ปn>ฒ{๗€Kn}ว‡<็‹๕๏}ํs๗ณuเผcื฿๔ฎ;nฟ๑๒#[\๛พฟ_ž์s}๏พ๖รWฮฮอCGxใmฟ๖ฎป๎ธ๚‚ลชี๎ฝฺูปg34rตg฿มC‡๖์ูฝ!ซ[Gnญpfืg๎พ๗{_๑฿n{์ฦ›oyว'nฟโณ็vํฒ๋+฿x้หฟ๘ๅ}฿๚ื_85vผ๘ช[?ก;oบ๚โๆPœeE…ฤl2\จI:ฬ ‚ๆฒฑ:x๕Mื๎๙๚}ฏœ|ฤ+ฏพฬรเหว๙ๅงW๛oโn;็ขj/่žปไ_›‡68๖ใ‘/XM `0„ P†€HDจฐ. ขศ5ถ๐†ฃห“rๆ์ูณgท+็ูณฏ์žฟ๘Wพ๕ะฯŸyๅ”๛ฮน๘ฺ7ฟใ=เปฏ9<& ‚ฮพ๔๓Ÿ|๋KŸโ฿y์๘ฮฦม‹n๚เ‡฿ฦ =๘•๛Oธ๙Wษ?ุๅ+๒๏็?ส#?พ๑๛ีฟ๗ท^ผุœใฉ/_๛g๎๑ ็๖‰ึง~ใ*˜ำน๓๒ฃ๗อWพ๖อ๏๔ฑOnoผเา฿zืวํถ‹ฮปนP:ิซฯ<๑โฉ๕ๆ9็_q๓-wพ๏ฮ[ฏฟh็ษ|๚Ÿ๑฿<ห—Ož™๗ง๔ภgํุทศ-Ÿg๔ฝๆพE ภฉc ึฯพ๐ย+ฏณรGฮ้ิ๖๖/~๙ไูฝื;o๗๎Mt ฮ™!แิณ/?๕ฯ>xrน๖C๏นโเล{่ไ้3/<๙bซ‹ทตต5จษ‘…ัˆ๐ฉŸ?‹ง^{ษMทผ็ๆs˜˜/ฝ๊šฏัwฟ๑ิzฦห/lx๑ฅใ/X๏>ะ็ฮ‰รQ“RE`:g UกŒแะ 0”e0ษฉ‚@ FbT(i3ร2aึๆtรeต`J0Iะ‚ฦPh" เœฆF QEภX˜ )*DP( aŽูšแbข"1m €"ดpVk'. C+ )!‚T•Š€pึjH FมXƒ! R*ฬ9U)"2qูฑQD Q…\`ฮPg tย ค*วBS คcฌ*€$ีฒุlฤ”Lะะ$*fณ†L€fเiฮ&hิ"ฆQšฑ@s B5„˜ CD"ฆ1i‚‹ื3E C…™C˜P „e”‚ิ “#กŒ([็2 ˜h€•DMsฝšค ฆ0Xศ้:Q!&ระฌPˆ!า‰Œ!”Q6ฦ จrŠฐX์ิฬก”Œี‚hEic 4TuฎMฤิช9a™F"J8R(pˆsฌ›ษ   cศdฺ‘ข$ดhRCร€u,ZZLU!`โบVCJ ณ˜š‹ ฑ4ง€i32qฌM`F0rฮ99จ*P`่บ‘`ะคึŽ…PจQ้pUโ@,ญฦ"Eำฦ˜:ŠP\ฐัคATิJณM ‡ิ€@อ1š0@)fCfs8ˆU •ˆ ฉHU“cภ,a*ฬ%f˜ ’Zsษ‚@@น^็„,œh€ ภœำl™ฉ T4‡+2fข"dอT`HP9qAb.’Ecม:•ลบฌ”#P2–@@ฐฆac„sCuN"ตjN SภAhŠ ™CฬY‘"4iŒ…(0 $ด š‹ฌcY่š* 5กณ†เภ`Ž๕ๆ@@$bญ$E'‘ˆNŒฦœMY!b อ6Vทร๖ๆb! @a ๊ๆžอ7๙wษํŸ@2*7฿๖ณฌ–A{฿๖฿{ห]SKฎ—k~๓ฟ๛ggห2€นฑ๛ภ•ท~๒Ÿพํทk’9ะ;๏xwหึฦ2จ$\Ž๐+ฟํ‡›q,caไw้m8v๋]ฟCPซe็hโุ<ฐฒw๚z๛ฏAหฒ,+ซZ๓ฦฝๆCแp„2ฦr๘–฿๚IหC` cm่มk๛ฑซ๎๘ะฌFขc5Vห€qฏใ้c"8็ดฑฌ–Aฦฒฑะ›๎๚ฤe๏๙uDฦ.ซeิค)‹lผไ-ทึ oxŒ1–e $tฬu*ฐ๛ะyืฝ๗ใW๑1v–:\นŒ1ฐก่ัkษ7E@$ๆโjู šฐyไŠ7๔ฒ?\NรแฒŒ{v_rหGเญ**e,ซe‘นฦแw๑{>1ห"ปcŸ๚ฏ?‹หj’ckฯ๗›่ฮฯ‡หฺ5๚WYVซ^|๙mผแึฮ:ัcฌฦ"DP0V S&Ž!Š1IF  1'cgkH&อ๕ฮ๖ูณg็๖ฮœ €น^ooŸ†๕œ0ซ (ๆD&cQDอ€1ๆ@ขํ3O๒นํ๕ูVG{เเ~w^y๎ั{?๛ว๚7?{๙ี“;nŒ:ฃ|๕ู_>๒“ว?๕_๎M๛ฝr๛ฉพ๑ื๙g_๚๎ฏž<}vๆ้วพ๕™?๙๑ฒs๊ไ๑ํ#ปื๋ต“š๋ณgฯno๏ฌ‚iบ^๏loŸYOะึฏ=๓G๖ํ‡๑โ๑ํะžx่kฯ?๖ะฯŸO>๕ม๋ฏ:บ๑C๗|ํณ๎3฿~๖ต“gZm๑๚KO=๔ต/<ฺห/ํ|โฎ๓Wรมม]ปvํฝฑwพ]ปฦHค(ะ้—O>้๕฿>๘ฬk'_{๙ๅgว๒ฤร?ซึง_}v๛๔ห๘/๚“ o๐๛มpNŽ ™ยฮ+ฯ<๑ƒฏ$๑แท=ผcฒ~ฬษgžu9๖c›์œzํหjkscsHมD`ž9}๖์้ฑ:ดตgอฺž={๗,๓…W— lเ๑W_=๊zืฎC็žฟs๒๔™ษๆ]›รAR8–€3 BtภBฐ`เŒูaBฮh ะX@+`‘XE4pN  A A%G€*IJR4†(„”า`ฬP กนฌC&•c2@ฤฐ$ฆ –4ฃ*Œœe !`ัœฌVี0œข‡šˆRฌgหbK€JB*T@ร5„€ฬ‰ู(ะ%h€@้(4˜;ัdึ041(˜8sˆˆ„i*.AตFA”eF†@ Œ1QrXQส€tไ$ƒI5F’fMuะaHHฒ0ฃษ%ษ„9ร†’ภ0X“‘„คD‡KZ8†ฮ "0กQำภ €BF€ ’3ศPๆ…’˜ๆด ŠB† T@ AP '8THS€@Tˆ”ฬp‚Bศ(ม@จ1ลA4ีษ€กIC’˜&(A*หHfŒภ&…ฬ9ข 4rฎ›5ฌl€0”tจc6E†ภจภ!ญRP`ข Q DๆD„ศ‰$(ฬ”ิ Aึ1ื-ภ”h3‡ˆP0ฉ0’ … ฦL% (หD )˜ ›€P`า0‰œdฺ€ ฃh8(ึC j‚0(%ฃม`‚1B`0Gฦœa รbhEˆาะ‡€€šNดาก„3U *โ’ส‚*Aภ‚EQ‹น(Mฒฆ"€1$Hj‚ BP8มก € จะ dยข(„XJŒ"a ะtบ ˆฆ0j Ib6€A"c$ี  šJ "&b d5KชX!™@ %Uฅ*ฦ0หjจ@1gห0( Hrc1€,—e@ฒlดMะ…ีX™X!r–€  Ig"C `Žแp%&มฌตŽ1 ต˜9RATฌ–ฅA hLฎ\ภ sส2–e(ลd‡ŠPฑ81ˆšชIมd@ƒนŒีD ้`‚XFฌุXภข€Œ4p,ซอี ึ9•ฑ ˆ-ซกซลณ‰โjตBƒข เ†8WหAVฦฒŒE$’šเXm `†,ซ'9‚ฑฑŒBtlญ"ด‰.ซีโ‚bLR #ว •ฐR# • # ส$ฺู>๕่Ÿ8ตฝ3ูท๏ภ}๛6y@A ˆjb Eๆ๖ูืž{๒‘๎๓๛Ÿ>uvg๗ๅW^~ั/ุ8ิฟ๛ลฯ}ๅGฯฝบ็ฺป~๛=oพ์๐Ž?๙๐฿ฦ?|่Ÿย/ุฅป๗ผ๚๐ƒ๗๓ž๏์v]tหฏ|๘Žฏฮ<๛ศw๎ป๏๛?}i="Dฌ Šํ;O๛พฃฏีท.=๏เฆO์ก๛ฟ๓ิ๑y๘ฒฮฏฟๅะ^Žฟ๐๘ฯŸ>zม—œเภน๘t๙=ฯใg_?pอทฝ๛ฮ.ฺูุwแถ@€Œrc๓œ7|ใฯเฟ=ปฺ:vูuื_น็๕ืŸ๚ž=s์ญท_ัy—พฺ้+/ุ  P ผ‹g~๚ฏx{๓ฺผ๏ฺsฮฝ0ม8๕๚ูŸ}๚ต“/฿ว๚งหฦฑน๛œ ]}ำ[oน๙†ฃ c฿ž]{vอื_๕้gOqกP^แลŸy„หมญ])0wž๑ีใฏ<ฤำฏ~๚_>๑…น†ฑk๗y]~ร-oป๑ส+Ž๎"%*‘…d„€ DHฉHDd €‚a0Bšฉ"*ม,aC็,ขBEฒˆ€ˆLTT*0ภ@ก ƒE*!T…ภ(jข$”ˆZ", MPิิ`’ข ก‰@8Œ 1†…ˆR€ษ˜ฉBQ @œ ฬGŠ Rf€าH(&("P Š ‰‰ HDกRคิ#ˆ@@@ ‰h„ ศHF$B %ขJX! "BฅV @(aNE" ฅJ•`‚ค`  ˆ‘Mœ8 G ๅZขP(• Bt’ €@A ‚4t0ืHCฑˆL0‘Y…จB ‘BgB 0กRDdกFh$D8„ †€RSฌD* ฦฬฤhโHย”€P0  ลE$ึ5@ข@!HภB#aŠHด!T "ดpUจ!! F$Mฌ†Bj‰ %–จ„6ล "Rก@!B! ˆ‰kFEกB‡ช!K  ฬD3I„Š ! @คN 'kUD€J!า ั@†Fช!’ŽAภฃBE @‘&"BT!ภ ฑ„`าŒEศ€ˆL@h(Dจ@$ล2E๙ ‚ณ็ั๏ยพฯฯ๋๓;็hH ฑˆล`cฐวvblงN›ค_ด3ฝศLoก3ฝ๏Lƒ4ดi;ำ4อt™ฤIผิx ฦุF˜อGHBB า๙พ๛<q3ใ”mD™ร ึU{}ฮ 4VถlBŠะfVAถrดm&”`X,ดษภ-ฑk‚ mมVFิ\ฆaXAbS๋โ„!ณk;U€YKmฉึ6mฺา@5ฃ ะ(h-L#(BcLvฉXQqฑํไŒข%ถkคF€หL%€ํเd›Uำ6@าLฃ$T“P ทfลjโš€ื_ม๓฿}๖™;/^ฏพ๒ยำOๅ็ี<๛โซw=๔žw?ๆw๔w>๛ท~๚‰w๗ณ๖ฅ?ใo~๏๎๏๛๙๐ู_ฤ‡ะ}ฝ7฿บ๕รๅ๙ฬ“ฟ๛๏พ๕ซo่G_๒—ฟ๚ไท_บ๕่‡>๔ห๐|ๆc๏x๐ผƒ๗?๒3/|๗ู็^ภ6ำD˜1ดl๐ฺK?๘ึึ~๙๎๛ศฏย/ฺฏ|'ฝgฏ~ฝ๚๗_าฟ}๚OkŸฉ7~ป฿๙แu๓ศร}่SŸ๚ไ฿p๛ต?๐ก^ป}๋ก7=pฯ-๏๚ศO฿๓อq฿ใฅ๛y๘ฯ|๐ๆIู—๗~๋ว~๑W>ส—ฟ๗ญฏ}ึ>๖ฉฯ๚ฯy๚™฿ฦG>๘๓ฟ๔K|o|๐พ๛๏ดZฬฟอฏ<๙๙/}๋z๛Oๆ—๓๐[ํฺ๗ุ;?๐๎ป๏ก๏นsณW_s_7ฟ๑ื฿z๊นW~๓7~๚‘๎9=๚ฤ{฿๙ฎ?ยฟ๚'๚ทฯฯ|๐ญ฿๋•<ี/็>๗฿ฝ|๗ป฿œe.๗=๒ถท?๑ฃำw๎ปฮอ~โwž๚า_อ7๚฿๙ี_๙;Ÿไป๎ลi•0 mh‡rฤ0ณJ1‚ฮน3TCี J›KV†บ`3jVปv) C‚\lฮ)ฃ iฤถกสu‰ฑป k!HJ1[ ‹ฌฎKCPฒอV&ฬ`ซ`ณฆKaDหธฆ!6&cจดYหD-ถูVs ›!™5ฑM5wi ศกlkChl)ฑ ›)C)รุ,ว*มถั\\ XIBklUๆฒ6ฉชล ฆฅJmวฎFœPmWื&™%Nตอ`6k•ืV"Tย€ุ˜ตƒŠฬฌ.ฮจ€ูhฃขมfีB5[คๅT.weU`่ฌูๆBk@› หฌ ฏ_ฮQฃ C;ธ a8Ff\$fS…)!Z…f‹ล–3นฎฮ‚fT0lอB—ม$1 fษฦฮˆปfซˆฅ-4Vi#,de›‘ญIb`ึ 6sˆ‹M ‘ลฐEb3L™ตm Ke›%;QfmถAลBi0ัุ\ึฆ* ˜*&หEœPถตหZอcdตQใ$รๆjงฌฐ-ตP €˜\[ ืt้L*0m$5ุ JBšl9ชํZ›$‡™cณึ0ฺl“lหฌi6ฒrณ -\ล((ืf 1”ูP–bRA„ฑ2ณศEซ  j[hฬ"นƒ2ฆซn6HธฎBF6,4[ฅฦยฑดอ†i%`„4 Pd3‡ธุสสฌC ฐi*6&ศhถY*ุƒ*ฬlื*’ fV  อemTลฆ…"ุq-VฺถถuFfฑi3RiฬฬVษุ.N •€!ฺ่X„$.š(Jค FRƒอ$ภาe[ฆ ฦ์r‡ร‚m+F†อ$swข#ใlณQฌfศฎRฎซฒีมุง6ุll๘แ7พ๘{๊๛_y๐๖๕๊ห/<๓๔7พ๒ลฟyๅ๎GŸ๘ฤฯ}์๏x%C0g 0h03ค๐๚s_๚็พps็ึ~๒Sฟ๘๓ฟk๛ใ๏y๓C/๙—ฟW๙์๋nฎฝ๔ิ_๓฿บ็ภK฿~๊‡wฝฺ+O}ใฉW^{ณ_๊;฿}ต๛{์‰ว฿๛ศอG฿๕Žท>๚่7^l‰ ภ๋ฏผ๔ฏ้“฿{๕๎w_|๊ษ/ฮณ_ถห๕๚หฯฟtmw_x๚ู็_x้ัwn฿{{w_yแ้?ญ฿พyฯ{๙๘฿๖ุ[ผ็60เฐ1sซ๛๘ฆ๛|๏/^ฝปz์ํ๏zฯ[๙ัพ๓ส^ป๗๑wฝ๋ํ?ๆ7œ€ฐํๅงพๅ/}๋/฿๓ฤ/๊'฿qอ•๎}ไั๗๐ส=ผ๙แ๛๎ฺซ?|๎ฉฏษ็๐ _๘7๏๋ไ‰ฯพ๓อ๗฿ฮิงž๙๗฿/ษ๕ฟฟอŸ|๛๎๓ใ—ฟ๗WOลWพ๚=}Ÿ‰„u}โง~3o่หทxไแ๏ปตW๐๔_?๙น๏w์‹ฟ๓;ทx๓{ฉGn†, ษ,-0ฤุ 3Jณ` e4•lKเ4R€M0ฃ ล˜m26mย’ยl)รฦfv[f)lbmกQ`JvA‡A ภrŠ]ปฤ`„$]†ย š(3ษ†%‚อl;#€ˆฉ66ฐ) ษr03Raำa†YคŒF์2›UŒHš] lƒAงหŒfณยศ@0Œิv@`Ša™ [†JmM„ถ%`f#Škภj5ด €fิˆ)›€9‰Uฦl:›เภt˜kjำ‰6`d:˜aูv$ฎACฬถฒ€1[ตIe›EjfุUf…b ้2ฑ1€#ๆฺภTุุถAดฒฑ1`ึ bหมฌLŒYB ˜Uญ™MU3ŒQl€Žฉf#ล˜bDeษ68”`ร‚ฺุd ฐ‰l6 ((-ฤฒe6ฉ6#ฒ…’m0P\0ะˆ0SF@Š™šขlaSfC„b 0bŒ l3rYŽ›6๊`ถ‰ส`\)f”™5:f `ยษ(Œm5eŠ+,‰m3b lk[มั ุณ[’mฑ`b‘f6ชฬq12`ขcaŒ(ึฃV6l` dCmbณฬ ุเ0›4ภ $0ดมfฃ2Fฤ vฆRุ–@ Y’bcS›@ศhDจฑญbi9ฒ0‹”ณ™ฑเฦๆดาhcฆฦถ8ตมุ ถญ Bอฒาตซ( ๘มWฟ๐_ช››;w๎๘m๏y์=Ÿ๘ฬ฿๛ลฝ๗-์ุlštฐะ๊๊lœํ ตมz่ํ?๑ถ7=p็•๏>๗ฬำฯพr๛ึC๏ไฏOฟั7s๖_๚๗žฟป๋z้Ÿ็โฯ:ฬCoy์ก{๎ฟำy๑๙๏่ฅ—^๏xำ;~ง“๋b@œ€ฆำ˜ํตื^3ฯผ|—๚ฯ~็™ฏ90ะ›|S๘แ|๔Cเ๛ถ7ฝ้[5Djvhkิ ๖ฺณฯ<ร๛ุCผแžื_}GO?๛=o๘ไc๗น๗p˜ฑYม7ฟ๘ฅ'ฟ๒W“Ÿ๙ŸyิkI”;?๔๘Gแํe3ะ‡฿–ท๒ทษท๘™ฟ–‡ธ}ฯ}๏|ฯ๒฿ป}็฿๙ำฏ=๓—๚ํnด—ฟ์๗๎~๋‡>๘ณŸ๘cลV็o{฿ุ.ีฎ|๔C฿๛ฃณ๛ฏ?๙ไื?๓ฑO>4ซfvMG–จ \™หnถฑํบœVฒ0ฌกv–ฐd7(PกCศ6'—ฑก(ถหฺHมVXiณ@ F3LlKิ’mj GcษฬL'aYฮvฮฎฮัŒZูe-ะa’ต`+CAˆู "fคŠฑAค’$S[5ณ"ยจ!]ปึ) 6ฦ+ ย@ ลeaƒ6ฺiXรภS&6ฺศƒฬู„h0ƒม&+f+;'aŽ0’Mาตญย QiปZb์บ๎ฎ“du$ื]ฮMwํ6ฆฤ6Hฐีถh0ƒA;2€ๆ:fCnXR…€Mฆ T7‡k›คธ(+(f&ฦhŠ-0–lซุlคLม–bbึะกภ5ห๊Vd[Uถ—กE1Eญุิ Iถ‰1˜#-‘ff” lชญZŒ1ซ† ึะบถp‚ม*†ฐ‘(ย,lภZ;ถ53kิจา`ฒม@h1,ษ2ฬ,,Œ1งalฆ์”Cs„Q1h,mP*mkจh-ถ‹K–ใศถภ้บ:]ลfsNCูถฉtฐC`6J„ๆ:ู'ŠTกภV@›ts4–TV\)ฎ--6์06`ถŠMฑmฃญ6ฑณ Mชa›Eetฺฎ$[2ณัb†8ญดญ(ู -C*ภฉMLธe#€ฐญูจ`0 ฐ01ฬ)0ุโb-YฐaืFFฃJ[บฤŒฬb"S†` fึ ˜ำ€อิ:อ9ตหŠฤj#aF6Jฉœmhอยฎมม*Kl›ฅMงkฑ6ฒอ5ิ6›$ fณˆlฒต#ถ%ฐ0œ(€ŠAย ฆธ,›ญ›ฃl›ตลj–…”ฎ]Eอ86pKถู™ ถn?๘ฦ7๖Ÿ๛๕่ฦoิ[๏€ˆœmXB”๋๚๑K/<๗~ย“฿|๎…ื฿่ห/พ๐อฏ_๊ำ?:๗฿zํ;ฯ>w๓๚G|๐{F,ืฟ๛ไŸ๙—พ๖ฃ‡Ÿ๘…ฟ๓้๗ณฅ˜Fซู.าbฎ›7ผ๕ั'>๔ฏพ๙๕ง_{‰นอyรป?๐‹๏‰Ÿ๛๛?ฮำฯฟzs๋๚๖ณํ฿ผr?๕้ฯํ?€P›Vอ*ผ๗ฝOผๅ๑7~ํซ฿๎oพปŸ}ฐฎนs]]9‘][Jฑ้šTฐฑ•‹6C"อถนt,V6› fม6ต-Š ฦ™ุ,,งHs™4[บชปj;งŒณย%3mำศะยฅฆœl›สด‰Fฑ้(ช™ญัIงอLbฐVb3%้๎ึฦu:vDปถ%ˆ—๋Œ™jญkฑ-”AJฌLl&YฑXอ–ฐkKsฒ๋š”PaฎFˆ•mฎ4fg` ;pถA Dปœ3Lณ:'šชฺบ\D4ถ‘b`๋ฐ]$ฬถฦivํœp1ฺN9‡ฬVศฆZ6U#D6ืj+j#v5D\ถํPํา@ก5[ฦFG์๎vVg‹Uณm# 5“lS†U%gbณาR†อสถฦ€VฅYฎ9fe’p@ณfF–ฬฐส!ปVKำXูศ96Vฌ๋š›*ำ\’fฌูฌ’tm`%วhืถvD4\ฎ6XีZืฐaีิึ*ภbึv @ถ(`AFsฒmฃฉฐ.dXNŠaWฮฺน*bถเgl™NuU;ฺ6อ่”ƒ’3 `ู@ดuุFฺฎ.nZ†โbc;ฅชM$ูˆ•ูดj˜˜Bถ SิFูeฃีมฎํLตฑ%IVภdณ5[ŽfSf3ƒXl4ึยbUก\Dc[-A€™+ถ5pQ+วaํrฺส–นšม1 ฆมV9ฉ]WฒLๆ,+ฉ`ลบFT4ำา`)esI6ญฎ‚lืฃ9ำฉนถ …U- \[5ตฉจmk› ˆaมŒrํ.‡ฐm”S”™ฑšY]#F\9บ,ซ` ฒ˜ถauL”…รถษถ:NQ“4M ต ธ(™ีhูlJฺ๋ีฌส†ฒนฦvข*ฦˆPณ‘Œ‚Tอ6–ญ›ฎ‹ฒห.šยฎ-Ž`ฺP #ธh3BˆmKŽษlึย,l›ฺVก ใ`[HJsYู…ม–๎ึ90[m็ฤฺ6["3ฃม fฺฆมญฎehgฅ-gg;เ๑_ฯห๘W?ฎ‡ฅmฺีออถ ถ4ญl—ฎ1f—ญV0ปฎm€๋4u)ˆpหญ7wz็G?๑ />๛•ฟ๘Ÿ?๗ยŸ?ง?๘เ๑ฦ‡฿๓ถ;ท๏น๗ž{๕สํ๖๓้?๚ฤ=0E6่ฉ[7็ืu๗๎๋/ฟ~ํNปv:Dถmฦฐู•ฺuVหPn๎๓ะร้๐ภŸ๘•๐ื?๑ฎ{ณ5n\w๏:ลึ{>k๙๛ึ?๘ๆ็๛ฃ฿?ฺ฿|๏ูฏ๎wอฝo|฿ัG๏Œaณ™9็ุตE\s๗ล็Ÿ๚๘_ท๒นฝ๒๚ึื๊ซ๐ฏ้6๔O›๊Ÿ๖ศฯๆ?อ฿ฝ๙ฌญš~฿๛ำ?ฺ_Ÿ๗~โcŸ๚๔ช]w/š Vw๋&.›[็ๆพ๓๚kฏฝ๎๎œใฬดผ๑๏{ใ~๔gร๗ป_๘ึ~โ๏๊g~๑gEถvตญ†ี—ซ[wn{๛ๆๆฺ๋ฏฟ~ฺูˆjknœๅ4ูน.ปฎs๋œหLd8kl™v’vmฦVรฮMm[NนถYv:ฦศดNfjsvsฝึ”Mp`Us9ษ์บbŠซูZ:uu]ƒฉjลขตถ“]ีถ1บฎK็œตญ4†ปnNปถQTฒkืตฺฮ9ฤึโ:--ฤถ:gkGgลl็r˜ีฆญœรum)0KrL“ีต๋mซHปีYsูีีฉ]v8#ดิEดm6r-ัตูVN›sตถ™%;4i—ูlป:šsŽmcภตซปI7ูE42.ตูตNปชiฎqหYTmื.I.Wฮน{:q!ๅบ[:ิf‰›&ถ1ฮฺฺุ่–ษดรe-ณ]“ำuj6ฑJ];8ข‰กำmปชฃ.kฮฆgญฑญmวฅ•ดู.ถึ้0ำ9gืuY&ษิ™ึO|. Ÿ๖}พ>ฟ๛yถ๗eY๚‚่ Šฌ.+’\fข7ษ$/3“3ศไX’ฬ$'q2ฮLdฒQEจP K/KKฝ฿\W™aบ.ฮnฎ๋ZซVmnUตv]ฑNc-บบฮMฺ\ฮิNEkููั–fTฆฎ๋Rฅd๋ไ—S™sNฒูตpิ9ืต•q•ีt\›NูZณหJƒiษ‰m#ฤ\9’ฐl็\ฎ#‚U]ฎ;็fอfืfๅปํชXcX;๊ฒ,ฬ5'ืZ:cปฌkึนูดšซmถูก‰-ีตKGS ^ี•รั)์โpfE—ต์ZวVpอตtUต+:uนอ9ืQหษํJธF๎d€‰ํb๊\HeW™lไุตหiimAQื๊”Bmณ›smปVŽš™ณ้@Šซk—k;ฆUิ\ืๅl5ณNืœ]ืZ"-™:ดหeีuนฮnvYƒุœs`ีhๆ8ืํmฌC]]ฉ,&ล–ณำhช%ืูั† ฺ6์"ฉณถuฒูฅm์œ’q]Xอนนนฎญ!ฎšบt\›JMปQkถห\VปvrปถฃรๅสaQนvฮ  mง›[ท7‡ฑmปโุี"kถร fUslืœlA›]—บfu†9kํšmI˜ึ.ืนuJI์ฺXบ5ื’ยXฃ.kูซฆžŽอumบฉหVหมลนqFจอ.Wn‚ฑ‰ุ.ฆ"[ปฒซะ‚อฺ]ปฝิJš]SิไœCห9sูสุุฮ้\fฮF ฮฮn^ผฎmiํ**]ืญ์”ฮiท—Sฒ]Svฮ1ฑ%—UืฬjํฺšฒK'n`•นu๎8ฎูฐn*ฎฎehgฅ-gƒ1ตีYโฮeญWบvซfๆ0ถ-][่ๆๆฮ=w๎7^๘๑O_ผฎํYˆH)–ซ€rฒiŽk‘้š4ํ๎ำฏ|/ำ฿๛ศ฿๏Ÿ๑>๑๏อ‡^๕ุƒฝ๎%<Š—๓7Ÿ~๑๏๎รŸ๚๛7พๅ‰๏=ƒvm;๊ฑ'{่๛o๖็๐๕/~๙…_~รnบ๖โ /ผ๐โ‹uฮอ=w;วํฯฎ๋gทอpuiv๗žปฝ™—|๊;ž๛ฤงฟ๚ซo}ใk๐ ฮ6—›ฮตkจ๊ฮ=<๕ฬ{‹ง฿๙kฟ๕ž๑ฯ๏๔วŸ|Gฯื~ะ[ื.เ–[ภตซสึ‚๓ะฃOฟ๗‡๑ฟแ?๘็ฦK>๐๎_zื;Ÿ๘๎ืŸะ๛ฏ?w฿o๗๔ํ>๖ุK_๕š—=zrญqร๕อฟใ?๛ฅŸฝไoy๗{_wŸฺ๋ฮ9dุlœลtฎ-z๑๙เ_๙^๚Š—s๎;Žธ}แ'฿๙ณ้Ÿ๐ูฏ>๚พ฿ๅ๗๊ฒ…“mบf*ต๋ชฎ|›฿๙ึwŸ?๗ฟ๔‰'Ÿ8ืV‡ษถkกอ)6"Mืaฦ5m7'ฺ7™MซŒ9งƒkilc7ณ\‡ัดห้ช Uืeญร2lY6u:s)ืตำถ82๋Jืฐjœ้4ูถ1ษ.บFk,ucฎdrฉฉ“mœSl^ubร$'๋๊vn\*˜Lญ๋๊œm[!k—Žp]ำชMถ]Uฅ€vตฆpXs…jฦ2ggนฦZชfไฺJฆ\ฮอ6s„k[4ืD งาuแBNlรตซสคๅถKRิu]งณIEศtqp{ k—สถ้มฌฺYูสf;ฮธฎซsฤุ$fœkN›k:7ํj'j๋Zถำั0ปฎ‘ฺ๋้d3์,˜ฒL‰ืด•sฺีn9n‚ ไ๖rNง\CบฎอฮศฌฅฺํฅV‹qt๊šk๋ฐLFธ’sฮv)]เศฌ+mะisฆ›mฮ5‡aฺุ,uL†aืmu)Iรvา9ปฝvซRื†–ฒA“kQืสBE]—ำl ืึ.™kWวถlฎTGฑ๊๖ถหีธY;ณŒืูํฮีnšmา๊0rm50uท;74้บvี šอˆุฉtU็ๆdf–ไฺMiนมŒUๆฆv]+ƒSชถKkปฉk—ส6rtซ๋ึZŒุu]วื(•kŽะฮีES13๊ๆœcฐึอa]ฺ๋ช€^ูjd6nฐ#S[36Ž9ํjท–sเ‚งsฝx)ฅ งsnvF1kฉvmณCอGีfฌurซCkทื9g›ุv:†‹ุต๋tMรumนhนถvูQ†+ํบส -ฤNชษu›ญuบ6คัืข.ขfEํยXCป–ฐ๋”นฝฎ๎ุ–1[Nsช๋v3bฎํ,ฑlJmือnืชึ\ฆฮŒlำ2จำqปšจUgททื9'hv;kืธR็ด ˜สต›bฐk;'sj†vํtœีฬถะ\ืฺ&1ร97่:—ญกฅ9็บn[ฒอ€œำต%ฐูTบบ2ศฤLื-็œ`ณฎณss˜k็ฒฆ“kืV3:โบˆึ ธๆดซŠ1›v9mํ"'r7’kะ)vปsฮฦ†รฒ4k‰k๊:eŒH้šตnr)K.jวํชฐkต]Uฎtํ"V œ้4\ฃ;ณJถมถ:หๅ:T @๊”ฌ“e—ฬฝ๛ะO<ู่ฝ๘ฅฟ๛๐ื๓๋/}่มฏ}?๕_ล_ไ.ปุฎm†8สšึVl&ฃj€อฬองžyๆ—ั๏|๘‹ๆ๓?๙ึ฿‡?yำซŸ|โ็_Š7พ๗M๑Sัw์๘^๙๛ฟ๛ทฃ็ง฿๚์ณ๛ื๕ษฟฟ๛ๆ_๒ภซ^๓า'|๛ป_๛ฬ_๛๘ฮG>๐Š๛Ÿึว>๘๙'๓ื_6syษSO>x฿ฝ็;฿๘ฦWŸ๛ไW^x๕+็‡_๘“๛็Ÿๆ๗~<๎}่ัgซฏ๙เ|ๆ๙/ๅ๛G๎v๗_ฒ๛_๘มs๐ŸัGพฬ~๏ื฿๕๚_๘าg?๙๏ฝ๘๊ทฝ็=tƒ/็ๆุน๗ž{๎๔็p๏๛Ž;nŸฮืฟ๑…O~้ม~ธง๚š‡ฎ ีอฝฝ o๒…ซ;ผไ o}{฿๛ฎ๓ฉO=๗‘Ÿ๓๒_ภ{๕๚๛ฟฮ{๎5YฅŸ|ๆ?๙'พ๔๕‡๒พทผํO1ƒส ฯ็ป฿๚ๆท๎พ๚ญฏx๐X่ง฿๙ย'๎O๊3=๚_y฿ซ๎ฝ็žุบ~๚~ูฟ๘ณO๐ี๏_~฿;_๓่= สพ๔ท๗โOผ์้‡๎NZ฿ไ}ไใ_๘โy๕_๗ฆท>ชถ%fWQษl\อj œŠถ!ะถ]ซณbh*ถiZ;ฺ้ฦiaญllKถcํœZถ6ีad)ฬฬ(ด หตlืf•ฒmุrฦ สfMNYฌ8ู˜aŽ™ฑl …Z“kปฐ๋šL็Fณ0ึชดฑำถํสUgUZ• ฺd\™Eg…mื˜ฎซ;‡.ข]œc œtึีู5dฤVวŠEวค8Œุl]:”ฺ˜Sg&มLณซnTaฌKึ–hฆ†-ฃแTูIณYฅูa ิถ2Rด9ฌหฎ-„kฑ(cรไข@e1vปCUŠีฅรiํbŒฤฉh0mP'กๆ*†ˆjŒbเš› c,ณช˜ฑm[IC2ๅ2ถฅŠ\อฬล”bป01ญฑ#ŠR,ปj‚ุฦdฑƒฤrึE—ููๅฺฆކ-ษ6ฌสfฆQูXJใZ•ซษกช3hำfhgWkœฎนฝฮM˜ฦูถ[;š4m์ฆฮv9ฃa‰Mตฦ \็X1‡cs9QำฬJะfš]9œšfณbŒํข`ืฅุ†vTK—ซํา์h€U#ชยVh์่Z, ถ.“qb7v9ฎซAั0Wฒห*U—ฅeDกcถ!šmีtๆ*ดชM“”fKปๆ$ย,f` ชโjูŒคกฺฮฉหฐ ]ญ-;ถa ฎ6;คฤfญำอฒIQ6๋RศุZฌš ปrอนn;วTTถ–cPf3m:1ป่›m™pิ๊ฬhฺุZปqตภ‰ถškฎ‡uอูถ[ •6J ๆุถUŒดๆLbWQeXกCเšiucฬฌ*Œๅ`‹]ศMัฮถำาฎ)6ํr]j#ŠXšหv ˆ1+ถ“ัฆ ด™]ฑqlอJœุฑiำVZfQฐหญมl์hน%EsLdถmœชฅ๋`bh*dS%vmฉ$ณ˜AฐhงZถมLซณภfƒBW,fcณต$`[ๅreณึ;'@ฺ8*ื43 €a3 8=ิ3๏xK๒C฿๙'ี๕ฟ=๛กG๏~๏นฏ|๑๓_๙้Šซ ™ฅmศ[ถ์}๘๑Wฝใwษ๛?๑?้็ฟๅู‡^๕๔K~็ํoyฯoฺGŸ๛w๛ฮวโ฿์[{๊‰๛z๑‡฿๙ึWฟ๒ี๏G๙๕฿{วใ๗?๙ถทฝ้o?์—์๋ฯ}๖Oๅ๚ญ<|็ง?๚ๆืพ๑อo?qฒz๔M๏~ๆ้ฯ|๖ซ_}๎c๔ห7Ÿ}Iท?๘ฦg>๕นo>3!u๎{ไ‘7ร๒Ÿ??๘ู็>๓๔“ฏ}้ฏ^๒๐Ÿ~๋_๙ย็ž{๑[ป^y฿Wฟ๚—แ่ ?{์O>๔ชงบ๛ย๗?w๎๙{ž๙Wพแ ฏนouใกW?ุ=๗฿ณo๑ฏ?๔ฏพ๖_<|๓๘;ซ๎Ÿพ๎sฆม:๗๗ณo}ํ๙๋๑Wฟ์ฉ—?ะอื_แwฟ๗ณ^๓ฦื=๒๐#wธ3แบฎฟุ๚“gฟๆฉ๗ฝํo}ๆษปฺ.ข]ฯ๓s๙ใ‡๓ณ—พอo|ีS?p~๖ใ๏|ๅณŸ๘่ว?๛ยƒo๙อ฿ื>v๏6งษํ๐๕ว๙ฏ?๔นฝ์ท๛๏yห3OcํZฝ๐ี}๐฿๙็พ๐โฏ|ๆ5ฏz้ฃ์‡฿๘๒ณหฟ๒ง฿ž๐พ7๐พ๒ฃรŸ๙๒ณฅฯ?๘่C>soร๗ๅgฟ๒]ภน{๓่ฯฝ๏๑_~๔ำŸฺ'๒ณ/ผ่ๆ๛y์ฉ7พ๋ญ๏~ห}่๐C๛“็>๓กฯ|L<๚๔;~้ฝฟ๖+๏๙นวฯฎฮน๗ต๏|/|‡/อพ๑งฟํoพUo{ๆ๙[ท›)€6?๒Ÿ๛้Coyู“>y๏๕ใ๐[฿๛๑งžyๆกs๏ j0ฎณ/ู?๒•<๒ ฟ๑๖7ฟ๙ๅ™UฦLูบ็~่พ๓ฉOี7>๑๛๎ื‹?๛้u๓๐coอ฿๚ภฏพ๏ี๗฿f{๑_๛์฿}๐฿}o฿๗ผ๕=pS0lุy๐๑‡๎๛๔Ožฤ‡ฟ๘ฉ๏M/ผ๘ฃŸ๔ๆ๑Wยป?๐+ฟ๔๎ื>zWจmXม˜‚b รตšฐ •ฬL โ‚l$จ`จF`‚ค61X'd›dbฉณ2FCล#ว’e#&ภ6” ƒดภ€AHฐญvM%Y›ต0pfุva iโึ่่H7’ †`ฦ&kจ‚lR@d†ณดุฆbฌก1j„aจAF Jถ]Fงดm`%KfะPvกลbb f5@ภฆ ’@6 d [ฬŠ!T\ภ6q™ุBุ€ํ@*ƒŒ )ฦ v]…-@&šฑAฆ(ดf–ŒM(fฃฒภฆ$3“˜(Lุ„*ฌลญEฎ)›PBmˆ…l“ฑญ.J…ฌน€@อPF›!ำ\วย$%ู ˜AนŒล Qฬl9IำฑM–ฑญƒฉ4ื$ุยS]ำXˆFิธ&NbิRŒY@-pU š0ญ1ฤภ$X5`M& lr@Fๅ‚aรl1ฆศlฐ •™ญ‘ฐ!.L&Z ่pู`„l˜ sุHฒm‡ฉM6Rษ†C5™ะฒ•ฉฒก5[ดิฒAฬ`(45aรถ1มrN8f,2Pฐญม.โธpคภ1›(รV`มม6หจฆcะ bฌfCลlจbaแ"#ฑ@ Bฒ5ถS‰อฉภ\ญ"ฆถ”๋ลŽ‚กฐึ, … ŒQlP€ฤ,@ฃ™ตaล ตส›ตf `SHจ ึ1‚ck’ู4ภ@ฑf‚Nl„š-ฆ ๋6H3X'c„•‰h16ฉcV\‘ฌe,0F๎จึlฤf[‡‰mฝ๏ก—ฝ้}ฟ๖“๏๏zษ๋_๑ศwม6ีชM ๗<๒ฒWฟ๗ื็‘Wํพ๖ํน๏๑—ฝo{ํ#|ฃwโžxๆ‰๛๎q๓ศห฿๐–๗ฺ๕ใ—>๑ฆ—‰}๒u๏|ื๒ๆo~๕ฃ7Œ-mV๓เฏ|ืฏส—{_๓ฬc฿‡”›๛ฯใ๏๘๕๔?z์ณฯ}๗งwžธ๗ลŸ<๐๔›฿๛?๑ช?่งฟ๔w~๘“›{|๒ฉWฝ o๙wฟแ๑{ป9z๘ต๏~ฯxโๅ๙ิืพยGžzํ[๓K~๚_แ฿๙ูXwŸ~๛/๖‡Ÿ|๊cŸ๙ส฿ฟะ}Oผ๔™ทฟ๗ฏ๛ษ_=๖7ฯ>๗“Wฝๆ•mหอG^›ฟ๗ภ“ฏ|ํว?๑นฏ~ใ{?}ม๛y๒eฯผ๙ฟ๘Žื>๙ภwž|๘ž{xไๅษฯ~๙[฿๛ู๎ฟ‘—พ๊๏zว฿๐๚—?xc[z๔-ฟ๚ปฯ฿}โ%ฯ~๕ป?ป๓ภฏx๕฿๑ช๛ ่'V๐๑๓๛ฎžื็๙อTRˆ“6ฉII้ะX5ฑHีิ” ZทBฌ๕R๔J„‚k7.tญnEค.Qhา–‰ฅ๙?฿ท็@Xญ฿๗๏ํ๋ฏๅ_?๖k้7๏์๗็ว็gสZณ™7ฟ๚ณ_๛ื๐๏ตฟ‡๓wตŸ&ใd่/ๆ/๖๔_ๆo/๓๚ฟ_๚O๕g๏วฯท๊_7เเ๗๚oำฦฦพ/_หฟ๐;๘๗ฝเ?๏/ๅ%#1ลฏญ๘?๙ล๏๎?ว๑“๗ฯฟ๕kํท็๗ํ็oอ_๖ฏฺชำ3เyงf•aถ9†ญrฺมฐA”อ Œj;ษึย4†3zอ4 0Sญ%4€` ftฏณฐ{[Fjะ,6i šฉ33›<Œส6ช0ึฆI3๓์$•ถ1Xcบ๖6JP,,oBZa „รl ฃ4›ฅ™ฉ Fะ!ufฝTั`yc‚6ฅX`#Aถ*†lkฦ:ฬ<ี$lLAษุฦtช ฤ@ฐฑตž-ฆŒcณi ูิิ›˜๒;˜Qf3ƒรVhเฒ™ใฎ ฒYห(Aณึ6€™‘kbก!S๓6Š Zซzf"ฬ0R5ซถ‘$&4“”Il+O =3ุc[ง1›FฦŒฝ-Eฬ˜ฎอF"(Xอ`’fิ šqE CDŒ2ฒญ5ญ0Š ”™ERุ–‡ ฺiACถ ‚lUใฬŒYIะš‡„=&h [N€สผ7›ฝb "0€4cงQสศช %อูfU๙rมุ†ˆ'[Eฎfด pะT6ี0มmf)ˆƒ%ตพํV ™ล‚•อ6P…ตถ—b3sฺ #า–b4ฃXPถุจ2ฤถS –{m[Pุx[วl`;=จฺŒฯ๖bI0ฬšฆlถ-ฅ! ฦฦ"ฑUชฤฐ`–ศภ`รhU[ˆ kผsBl`kSฤ#ฒ้63)ก5›ัAfฤ2lD ŠตmAู์9ฬ2šj#ˆI›YiอŠก6"…˜ฬ(๓@ฬ6Hภ&”ำmนXVV@๖ Tฬ ณVธะ(Fจ6˜Qฏ5ฑgทŽ Aฝฝ„~๚้งส6าlTZ๒KทdถQํu›YจŒ๐žR›z[š…qุถUีๆํ]=Ifฦ{gฦP้๖^6…ๆป]ฮิ,ฅอ{฿:[o๎ด๓ฟฏ๋ๆฟ๛๏๏ฟ๒7ฮ—่๏^šฅบํตนƒู&ต๗’`VูFa๛๎๛้’ฺŒปmLR ฐง3lล–&ฤš^UณO๔Oษพบภ6ซฤ{l–ชู”—6›Rฝg”ำLeุlด bLzdศถฯ็วoโฟ๑‹_์˜ฦ>—=ฬ4ร'=UฬbชaภXm_Tุจุh˜y฿๚tท c[vkYฐ…O๐šJfŠวฉ6ภzญภจฬถฎmฃส0ฑ–|๗าษฌ™U[Q6#Saณ๙{ฎ71 P#ฺfT5฿ฝซ1pmทฎ‰1Uทฝถ‰สˆŸ.gXe๙l{{I ฤ[o.ม–Tถm๊ถตง”ูW›ฺ{ขฌm 3ไl฿}ฏ‹1]ถงkร„ฮhฐงศฐลbFศ๖hS*y๏งv™a›U]{ฏ–ๆบ็MYlm+ุfเSos5ฬ<ฬถ™Lr_2๓๋๎ค-=„๏v•aekf๎`c•ตžU ูmcถ้bIlx?ีี Œm์–,˜6?‚ท™Jถฝํส#]ุ`ฝV`ฺ{uc–j YK6;w๓šYฉเMฆฐฝ๕น=Z ึnฆFdfVืผM0p่๔6P7ณงฺุ๋‹นฺ฿7\ฒ;7๖6ซ”qlฝ‘ƒํx1ณ๋ณญMฬ๋jิ6›ห$™…2ฺ›ิผ}ี9๖ึySg4ุ 6ูB†IKƒ6ธซ๚พ?ฯๅdcถืUmฏโถทดี‘ฝทR์=kŸฮถB†7›ฯmร•ษ้+ฦ ‚ฝแJญ™^๒ถPฯ๎™นศฦTkอPฆผฝ*อlธป๗ž.ฬj[์๚T๐6v ; 7Ÿฤlcuถฝํส จmด ฎ{๏š+7 k๖์อฺhบเMฆ‚s7lต‰ต›) lบkžูิfP7ณIถesต)lปœฝb9l–\F˜งq้m‡ข์mฃmwทฑณM‚ฐ=—IยFeด 5๓fืงํป: ฉmLE 6ฆd ,ฆY!fภถปซพฮ้ถa๏U]lSg{ญ-'ฯR๗ถ ๖^๋}:Jฺโอฆ&วซMชiฦ ‚ฝกชึL …้fˆ2๖\-mƒjlŠ=ชฐ=tmณT34.ร๖X]6<3อฑ,๔ๆrฬ4]ใTh01PูFš*ร@Kพ{้s๗ฃณอห๘<™3Xeูา๎vYm๒ุ๚a๋ฆšžZlk้ขY7,%zzหูไaคค=ฎ‚2d฿็ฮุชษt6ฐูๆใำibXbืŽฝ–*วกaNื๔ีC…Yะf้ๆำว,ฯ’Œุ์7ดถ=ตยŒ๋˜ฒทํฃl ใง?๙“?๙ีฏ~ึoึแ?๘ใ์g?ู๓U9x๖VฝAฆŒืตว๓๖ฎkhยณ๖ธaPร๋›ปŽkท5มfiํฝธฅ-hไีบO3*ิฦi๐ฌŠ\๓mช๑uonhyฺ๋.ำชญ๗ฯn“=ต0[[ฎ๎Xe3KUOo๕ฤD(‹Q#ฯถข”=ขJm`Sr๎l2มfž็E 6สฎีง* `2 “ู๋sosฃŠU2หˆ{๕>๓้ฌe6SK`฿เlปตํฉM‰ํๆq“ฝ7โ2‚Yบ> c6Rj^gcˆบ๗หQA›ปภฺฦถห›•mloบ>[ฃM›vkCuณMล{ํ|”Ÿฦdฺ–น|ณj์1ฎๅพ๛^ตhฺจ™ู›ฌGใ†ืKฉใึ™!‚วgู[Mh/อๅซugBฅฺ’`ณี‡\๓ฺ’ฺ›ŸฮีV›ญl9o]ซ•2ษ}฿ทOKSฅท๗R๖lญ~ศ1L๖l๏๎๊พok(&”%ฒ-o‹kจ๎jฐMฯ[rgiฒA๏m๙ดหคIฑRk๖pweƒซ' 2๗าบ›๑ภUฌยŒฅํ๛^š๋`^ž]0fKถฺ6pg๏ๆ‘ศ{# ‚mr}3:ลบทๅ VŸฝ-”สPุT5smุ+˜{[‰ๅ&›v5›ฤฦฦัฺ›LๆPพถ•้อy๎ผผ:ฒํ้ำา๓%jัล6fใณท๗ธarฎŽฦอํญm—`ดl -ฝwโxLuตฉค™H3{๕Zติ๓ออ=™{“–lจvทeB๛>งBำ๓6n€yฏu ผูU๕f›Vส จmูhกzธ:ตsผ%ู8oะ›=w iา–ฦk๖จฺ๊ฆŽน5bjำ™6ึงฑ=?*V˜๗๖ุ‡[+ฯห,ษ0ุ7gึยถฉMg/Œ:1ืwณ(1jRE oฃZณ/˜nจ เzร^มfู[‰e;›2gHfฆ๚๚้๚ิy3ฃ๚ฎ๓:{Yฉณงk‰7ใš]6ฯkใ3ณA›ญๅฎร&๒˜ม&ุfืา{ง4X้>mสHถ9 {usๆ5บm^ฦ็ษœม*/Vญe๘ัืไศhตืี4cชึผํศถE%Y› ฉ0๗าชูถฝuัeณ้๓ูdฯ€ศ)†จ0VญO#ุซค‰\y?>)ปฝญ”dysmcXP7v ฐy[ำl‹Iำ‹>m6ถ—Oe›™ัR๖ค Vถ%๗น ฏšึุ,Oตพysvฌูf^N™ขด–ฺ›ู’,็ณ&บjฝ8ี์ูถบdl็sื{ณ์>gถemญwyหดwปฯd‘w6qธ5› ค4S†ตG๊m6Ÿฦจ~ๆูํlฑ๕vwญูFyถq—D›Qaส˜ฒ˜๗*๋˜ํํUŸM6›qhWAkR์ีญ‹F6๖*iR•,ํ๛ใ“2›อา•1–ถmิa [wyc ูไ๒–ฆฑ>ฌแู–ซSณอ๕&KCŸ‹ญณ-‘dO๐™…=;_ีz™ฝ9๛๔ถfชsi-ูx[2ธขต5หf1ฌRNฺQm‹7ถRgึศS[JNๆ˜7ม#5=ovUฌmนปmฺเ.oQี˜‡ด>7›!*ณทงา๖๐Nุ}ขmฌบzฏYมž๐๛์สภุnไU๊โํqฺ&6ีฝญqbมซz›Mษ3ฯ{ญถMy๏๎Z [ไฝงdxฉ@ฌ†vฬ ]๛y๗ณ†7Zโ๚ฬ^5ฝJ า•Wํ}>@ำึu•๖ฦh{ฃ p/nํ}฿}ฺึฬคM.ำLำšฎ7ff๊ิ6{smc~๑”มr๎n{ชฤLzฯฮ‹^{ฯูงmkฉฮฆL {{œAแsท5clคาฦ&ฎถลุ˜๋šืhรฺVŸn5›5LVื˜อดช๕ถตฅ ]?6X%ูw>๋ร6}ส์ญ]Ÿdุ;M`ฎsถูUฑึxu฿็ฐทๆฺš๒6ช‹ทGหฐg๕ูดญ$Voซz˜ปทง๛ัšฑ๒ชžฒ…2ณํ8{[E5ึถ๏Pq๖าŽู6›ขซฝ}{ธ{ก…โบูbล'dcฏBiา•ม.‡ฒ{๏ญชยุh{ฃuใ}vผ'ถฑต6นl&ฆืI›ญูถีlณึl”}}.†•mษ}nDฆ๙ก๕Fฐสnฝ๏s\oรm๓๊๊˜’ฺ๗ฝกฉ๘˜ปญฒYnSค}็ธฺ๓dฎkึ†ejหง[glฮ{ซkฺฬcปา๛.ˆO?6{3ฅซv๏yนu0›ฯeถYืฉ™ฝำษ™yUยถูต๏sุดุะvณจYT0ทfฯ6tๆMฌ๒–ท%ี{c>๗6นดัjฏซ'ฆQึผํฤjFMฑo3Weฬ$YฌXS3สถาfลยcีD7n๋zใ„ูVuฝe$Œ 3วถญšd[ธ 66ป>›ต8ง˜ท๋ LrLร›(1ฤ{๋^ีˆV=Œ ีๆ9'ถ™๕ุ๕๔yำะฆf”ส˜1๎4ไfธ˜h6k!{I(ุ’O่่—ฟๅืฆd`›แŠœถm+{k*aK์ลjบฤผก๖*5šb ๘๑๓๕ฟธ*oถ.ฺw‘-เ๊nูถก์ฝ@,J๏ญส3ก3u[(SmทปkถฅŠผฝVซาf+ฒ ำ„[vภ>ƒbณvลNz{žชฯJข1Eะhฦ’mฅอซZlฯk-ฏขฒUtูfธปiยx‹ฝ93Œดฺ:ฦฦึป>›%ฎy๏สภ ฒ5ผ‰o๏ูฝช2k]c4TŒท[f๎ู๕ีYh‹u›bำ หŒQš$ณdฑ๏^.ฒกX”77Pตcถหz“Eflo\%Wf[‹7[U!hรlี›;)f33วึฒญจ™•สBT`’5๖บdcKุœGส๖Pm ZรณาถQ’ืŽธ๊ unWi[จไyŸฑถ[{uฺV)ไ&bmฒศn ป‹ตฒูฃ}3”f]{…ฬ๐ถd[1VKฺžืย;th๗ฃQ3ชu›jy/65ฐMY7šZหf๏URฮiไฝ€้ฒ™lฦต^oำl{ฆฎฬšย0สผ%ขylู๋yD,ฺึm2”22c”8ึีฌ8แป—R6›ฎ™หf R~hถหl!ปflctฅ๚1o‹bฑyK}ฬJ{ีW`ณ^วด™e2C+v›ฺ]ะlDฎ๛Vblฺd!.3PmFy›ดษy[บึŒ+ฤลถิๅkGห"ฌท‚ฅ!–‡Uด'kKอ่fญˆทเฝฑOซฦLๅฬSUฆ{‰ๆiWฬฺศy.๋'zฟฑบ_ฏฯ๗nปถk{ปญ[7‘? #c˜3Q๔H ม#<แ yโ๐ภ๘ไศฤDb1D‡Œน!nึuŸฎฟฯ๋ขวึ ถ‡ํRฅณ1าฒต9lชีh#็ดะถถ:wดœูข2@kณฮYณlO›ว=eึTน˜.น‹uwูดแf:›ฌmฑ˜fuše–C่ิ a๎Q”SZ#Š=;m3mฒำŒ™Nฉžฑ;bฺQ'ำบi—V่škัแ6[ถ ู{฿_๚๚฿มหเwลฏฟw?Jsญดก๚มŸOฟ‡๏~๊3ไ7ูงพ๚๏อ๏ื๋W๙g>๚JŠ๊œ™ูŠaฑ6ยF*OgqˆSป— า4NlK•ูถs฿๙๎_แฟWฟ๛ญ_๙๙o๚/0 $Dซใ้fYkjอจ™ี;โŽ9)c& ž9ูf&™๊iวnicดY-AY€„kฆS6‰5b•ํชMˆู๎œrw›ํAŠQlซfึh#ิฃฺณ]™Žุด ศว>๗›ฟัฯส๗?๔แํ'Z WGึv+™itF๊l]c‰Fcf€ิceึRำ& Š 66ˆะ๎ึไ7^ใ7X#Nฦฦถ้t3lีฤธค,ˆiS3*ุ†ฒmคส6Bmฦmc†SุถจkSีฦุ*c Kšmƒี้qCฆฦ์8[ ฺฺุPmCK2›EุŠmถมถ๖dt’อ่”ถอLฺT™M™47lCข ,&HธŠ™QYส83welภถk…‹า’ฅฒฑ%ูvjfัฬ‚ŽดA0ถuฏsc้0›5‹า5•`m่4-ษฆ%a –โก:[[ตZฺm‹€าbฌฌuSำ&Š*{dc‹3ฐvธ L›pฅ๓`6fS–™€6ตB:ู6ส˜ ๊d4bk4›มŽฃUvHฺษ2CิลุRl K cbํผ๗฿๙ฺ๛“/~้•ย?ๅท๗ฑ—cXePCฅ™ฑlK• 5ฺ ฆaPฃฒ]J{๚๑พต?๒7>๕๏ฟฤฒaส<]๗& ณTดmf’Qwวn„b0ชg"-ณกษXโภlM#[fสT1Kw‹*)ฦ๖4‡•aฬ@f†hk`ŽŒณฑูŒ€ฉ-„ฺหฦŒB-’`h–ํีOg>๙๖g Šั€fญLJ6(4ฑ2#K@†0(6bฐูhฺ’ุฆ‘d ฬjfบ#bšณ™-˜lDl•@ฃˆฑX1 ฤฑสุŠYhฬคถ!–ญส0ฐ4”- ถrG 1[Ce3 a•q๏จbมl:0˜{Qฐ'ชVcฆM" mhjš Q0„Œmก*ฑํ.Tฃ--0˜ุภ ฐ•%ตกฬ„uflF8‡fM’A l+ย.ฉ‚e2mŽ ด 0!+ฬaถฑฉLถฆm€$† Blƒ`1YLดนศFˆ&ปก ฅU`™€#ค%ธIฑBถฐM ณY5ู, )fฆสฆYข์ZI™Y, SŠ1'f*ฃ ธห–แšคฦ[ชdhEvฃfb pะZ `zฺNฉฬ์nช&ณล$ร`ฺ†m7bP“ญ`ฑั”ฉYSฦถM(  ภ ู†ขlำฆml…ด˜B.ƒ%,’™‚‘0ะฐ` าถaS@`m€˜a`จฌญฑ`e›bฺุB9ูbหณ$6ฅP€4Cต ”ม6 ctJ6BFRอ ีถ„;ฅป0ชู&2ฒ)าฒ)•ถนฃฌŒู0mภ]Fุ์ๆ(h›"–Bvฺœช1wฆc"Lu‘cxฺ=RI˜]Zl,3ca†ฐ…ูๅA#2 าl4มcPaฐภะŒAฒAQ6dl™Œ4Pชa0 ฅŒิ\!ฤภ่สภ0EๆN Kšภ@†ห™Eiถ.'fฬLJ[ฬV)ฦ‚n#bฃ  ฐ^๘๐ซŸ๘…๗ฏ๐…/ฏ_{๛'>๑๙I,ฐ๏~ๅOฟ๒ตoผัท~๊ํฯ~๒ต฿}๛W้oไ‹ฟ๘้ฟR†•ฉ6Ll4&•1ฒA!ฃšmฬะ"‘$`ถu†aธ$fท5b–‘หlˆŒ%Nlถๆยณง‰‚]‹V`# iู @C@ตlณจc30XH75ล 1€ฺF1PAบnˆดญme‚ ˜ต ai fvฐh0";ฮ`ภฌ:D`ี`›ุ”ฒ‘Q6‰5ภิaw ›Rถl$ฑ วh ๅ^›ำ&ลb †ฐ‘€ †Fb\š‚4S!›‚$ฤ01ูถ›†-ฆ’]ef›p4hปI6๋ฆ:ฑอ ƒค„š–,[8ศ@Zณปข2ชลค"kลl9น“ 7ํl›ฉSฦn1ฑlอ"h†jV ุŒc3Š—€ w afŠฺฤFaZZฐฑ…ฅ ›ำ(4ท:#ฺ1ชร8, ฦีifสฒ-ู*Œ ›"pซณฐUโภFุ(ฐ†+ยR‚afฦับc!อ*vฏโฌ%Š•ญ ฃลfอ ฉ)f0Œ.ม‘ณF–ํVu iI@ม4ชl™ม€pท”ิ๎UะJkญฺ*ฐCbŠa$6ฮษrƒฦ1ฑŒา H ae`bT„l,อmf 3Sh˜ˆ6K6ก…b˜05Gk[g[œld@0bdส0››ƒฒ:Fล4ทbุH!›ล’l [ล›$Z ลฤŠ‚ชถm[ตธ2ZN1*v’ชmอ€ `fอ$ฤจณ1ยฬb4ซ0ต ส˜ูชีvƒชฺŒ3’X!ฅ7)[ˆG‹2lWู,Kkหถ )ฤฌX;ชuw/"›ฐ”ุT$šYลฬŠู&fv4ฃdš™“มa`ภXฒ 0,ฌ Œฃ!8ŒALส†จ\e›cSbsลXฮ<้@rฦถยช™)ecฑŠfฐญ@0l$0ˆIดf ลภถ$`H1clwีbฺzผ๐๒›ŸGŸ{๋ ๅฯฟ๙๓?๙้็ฯŸ5ำ}็+๔?ฟ}์๏๔ฯ[/ฟ๒๘ะ็~ใŸิoผ๔โy˜ุฬาFฤต:&ฎA`&D0๋ฎำ๊บงN็คึŒ+ษjร@ถp'Šูt,Z%šูˆžปC‡ ‰ฑNย†œ=’l›!gd#; 3&iถIๆnwำAจีFKgฃc"[s!S์AเฬšEถ‚ตHร&ษฌPฐ6–ฆ• Jช-wบb9ฺLœ™)˜ด4LาฮฃmXฌอŽญc ฺฆ0งaฌZึX]NCDBff‰ถ-ญ;ณŽึฎฺ5ปijv4F$"๎ฬTณQe ป+0่l‹อpTวฬlรข`6ยl4ฦ598ฉŒ๊ดอhR=ึาu; 6CหlๆmvŠi`ํ์ไศvglƒ!‹6“„;ด™=MU `*k#VZถมฤšะ\Cรเํqฆ,ฮ,fขฬ6+•6๊ ฺจฌภ6ึmฅถASJm6›3K2ำ€™d#Fkvj6ฑR›MษdfณญLร [5fŠf[็Rหๅnก€N›m[ย42Z›™ึi:บ—ฦถ1Yfุ h6vZภถถ(ƒ FqR‡ml3ฒฺ`ƒุ๎4lซุvBa€็ุlซR=ึgตtgำ‘ูfษฑYbL$6k'%ํnw๗ัce Mษ6Isถัุv‡ชpjณ˜่h™Eฒั้*kf†9ฉm ™ ะ™v; ฉ˜l6ลbยช˜™@a[ณฅ:1M3˜F…`›mg-ษ`2—”ฐฅฅัฒI(ejฌฌmY!จฦอฆ&VfฑXŒ['ณFธd4[ณตa˜๑dซยlึใP ๙`œ‚ฒm;็ดกaถuuฺeอ`ฆ"ฑmF–lฤถู€–†œhฦ(U6jUkVKwF,mvฏ*l…ฐึ#fRา๎6™lํไL˜ rnริ๖4ำ9q˜Fˆ(หมถ’ฺ่˜3#ปไ$ศT`‹fถฒš$lร@จอชภถ0‚1[ชb6,+›bdB-=lญŠ;+ฆ`าาH6ญCeƒุัำŽณดF4ซvฃฌ ซQw2ถM*Tุฬ,ฅk`X›น$ฬ†อธึ/พ๔ฦgํ?วฟ๗g๒ีฟูฯ~๚๙›ฯุป_๙ย—ฟ77?3o์›๎ำ๏}๋ีw_ธo}ื^~ึ์้}๏ฏฟฮ;฿๛แOืyแCฏผ๙๓7^{ม~ฮ7๐ีทz๓๕ื>๔L๗๏๗พ๕ีฏฟs>๒w>ท^~ๅลฮฺ?๘แ;฿‹o๘๕Ÿ๘้ฝ๚x1ใƒอ7๚o}๏‡OO^x้งw฿X๎฿wพํ๏|๏‡|0^zๅรฯ_๕ื_yvnทvบ?๘๎ป฿y็wฟƒ๎ํ๑ย‡^}ํ๙›y๕…—^่๘ฝ๗๙ฮwพ๛{?เ๎๑โหฏฝ๑‘็ฏฟ๚กษู#I<{<ๆ)w‹ถ™าชmฐcs๋ฑmฅ…ใa!˜ญ*Ž ‘Y็l3ยwSปX๎๖XXู2ฺ๎รn-)ŒภF!jsํd{ข„ฉ [ณ€RfฃŠช๛คSถFqWฮ@ุถXั†1X;yโุV๕™uvŸ8YูภTฐกฬ=ต™ดf:ภfฤ‰์†Q)cุdฑhณ<+(น5ซ,ฆZŽอช4Zckq(7ฆ๓ฐูชjZNYFb`IeึSŒญ์œฬส(:i๎,…อZณŠ%37็ร]œ‡ญํ8่โ๎pŽ-Lตํ๊`[`Z,-dป๖‹™&{เ 2็ t›C(ฌc*ma๔rhะszฟย9ฟ๏วืk8ฮถq20(๊๋๎ฑย˜vw2k u4าŒCถล6i’ศฌตฆ%Rืษ9์๔p.ปขŠฑสถุ†l SซถG2QํLุbีฬ3ศถNK๎ิ9ถรถm’ฤฉš6)ตญ‘ศถM๎ดnŽYCXตอฺส;5ฒ์Sใ„Iถ]ฮ1;5Hถๆฆ•j ูuชัŒSป๋ฤัอˆ3lูšต4cูvฮ #ฦvิฑ ‚๕DwŒถjkgfŠภvฉƒ ƒฉาbุ6ฬฑYMถ’vทฺ”ŽMห์œณู†ง3wCkL๋v8Zf๓ฤ๎mZ€ำูŽ Iฐปู้l›lฅ2ร:*ถmUFV5"คนj::ถถำรนlšิฑปฒm2ฌ9HŒ[ซๆ‘`ไแa^ศ™6ูTบี ศถmUรf:†Mง3aฃs๎ึ ฤ6บwึ-ฌl‰ตล-:5ภฌj#dkCม์ฮูคถูt:ปKl]06ˆv—อ@“‘ปSlฐธท{ฮqงlฬmุฉญตfŠlฺ:”ฑฑMEฦp็™h›œ“ปeซ8ศญYem3ีrlิZcb ฐf:ปณUe๋ผxฏ๘แ?๖žW่็?๖Ÿzๆใ?๐ฮืผ๚ภฝ๛๊‡ํ#ฯ}๎‰๏๙๑ทผ้ฏโ~๓_๐ฯท็^๙๏gๅ_๑๏}อ“๗๙o~แำเ๛ฏ๒O?๖™/}๕…'^๒ํo๘๎๗ฤOไO๑ทฝx_๘ๅฟ๕_ฟ๕ฝ๑๒oิพใUฑฏม็>๘ฟ๕—^๚็๓ฟ๚ฏฝ๛ฏ1^๘'็็็๒oํฯี_๘พ—ว‹™ญ“ฮใ ฿๘??๛๓ฟ๘แ~ๆK฿xxลซ฿ฦwพ๕แ…ม‚?๚ๆ็žฤ‡๑฿{?๚ฬ—ฟ๒|Oฝโตoพใ?๑c?๙ox๙‹Žๅ๙ฏw~ๅแ—>๐๋Ÿ๘ฬพ๙G/}ๅพ๏G~๚_ณ?๔ฦืฝผฏ>๓ั_๛ล๗ƒ_่ง>•๛ฤทฝ๒-๏ฑ?ำ?๙žทฟ้ๅ็ุzุฎฮกMฎญถ[‹ลฬas๋ค1.™•ฝ€<ฬัr็>ž๓ฤดนwฐ:ฑธmำฅk๗ฑsถไ@yุkถ ำัรฑ[%ƒmดธG .9!ง˜];นชีvืฮจฌ๎๔เlPา‘’™Uฑi็๑าฤlซZNณYป˜-9Nฌmuีภ˜ ฑ๋œญูธซฮŽปmว9jฬฃUGpe—ซ"‡vฉmั ณูมƒnlใขฮ์t` „บึฉaภฅณSd†ถL็ม6`๎V<>^๕Pชจ์๑๑$N„)*”ถ๋œว+<>๎AถถR™e3ทฒ”ใpท.›Fโ๑ž{๎รŽS`v๏ีŽ„–-kSซ=œตใ`lk๖pถ[KlูโaปS•.ƒkM๓ุฆŽvq๋่l๎ลฌJทytzดปฒJr”ณรฬ˜ฑญฮฑ[,ปW‹Eฤ9NhQ5[ปvrN&ซถ{ๅ‚ฒชงฃaŒฅ#%ƒตsฺcu๎ฬุl-5ณ;ญ›‡ข›+s0Fช๎vณpูข*ด์qฬ5ั){t์๒()ีร}ผjY๐hgฆอ๎n[=ฬNc];จh—Zงƒ…ฺฺTg,ปโ;Nง•{Œ€ƒLqXUํ๎Xuูƒใ๑๎aฑ,:วm16$ท‡m†ูœœ๖ยในว™NeบฎวGง QKEฑูต‡‡ฌห4mฺฃีRfฑlัฬแยตVถํ=ไฌ›]้tช]M๖ศญƒำรฎl‹ณ —์!าโ๎ถๅจq vfY,ฃS+ใฆ๖Pไถ6ดtzย~เGฅ7~๐็>๒[Ÿ|๚ใŸ๛ัWฝz๗ว็ฟ๙ก_อgฟิ๗๒–Wb๗เไ๙g?๋?ฟO๗ณฏแ฿ท๚ฺแsฟ๕ย~g>๓์Wรเง_๛๗พ๓ฉโฃŸ๚ป฿๙ŽWฝ’ฏ~แหฟซฟ๖์ฃ๛ก฿๘ƒŸzใ;^ฺง๖—ฟ๘์G>๒๔7_๛ง฿๓O~๋“ โ๎o|้ใ๋๗7ม3฿xำ๐ฟ๑ฎwฝฦ7Ÿ๙ศ?'่7}๋:7๋เรฟ๚s฿๘๛Ÿxmโฯๅwฝ๑e๘ฅ๑+ฟ๒มฟ๗ท?;Ÿ ั๚ง๐ฤ๎7?๒3๋oฏ~๑5oz๏Ÿ๛ห?๘†Wฟ๐…ง?๒หๅ฿ท~+๖๔?๛‡๗g~แณOพ็/•ŸxซŸฝOฦo<๛—žย o|๙Kุ\๗:แ‰ญวูคf‘ฬไ\ณ๊ฐ่žแ ฦ mซญส‚๙_z็[_๛ๆ'พ๐ลฯ๎oร๋~่}oz๊ษ›zซ๔ูฬ?๚ไ—฿๐็า_?๖}ฏ๛–๛พ7ฝๅป๚์๓wžหl๗๓Ÿ๘่oาŸ[~๚ฏต?=/๊%็>พ๋฿๕ชฟ๓s๏ะ๗ ๏็๏๛w๗-ฯ“ฟKโ๋฿ง~๚ฯษ๗ผ๙e/zา ๏มฑวง^๑m/;Ÿๅ฿๖+฿|ี;~์O+์mO>ฑทผ๙๏}แ‰—พไฉีLฮ5ซ๑ัฯผ๗Moๆž๛ี|์‹ฯฟงพ็ป^๗๚—x3าื~๏ูg>๕ฑO๎็พ๘ณใ็axฺsฯมW_๒๒ื}๎oีภw๐ูง๏ณฯ~ํ]็น/๚้แ›฿๗็ๅOอŸ}ๆ้ฯ~๖oyใKžางŸ๔ใซพ‡฿ฒ?ูaฺึ๎7ฟ๚ี฿๙~พืไพ๕อ฿๕๊WผไL/yๅซ_๗ํO%_ฝฯ๏~๚/Žy฿{_ช—๕ิ‹ฟ๛ป฿๖ษๆฏฮo่฿ก๏ศG?๕๙พใG๖wพีฏx๊ฤ^ฒo}ูNž๚–oy๊แ…็>๑kฟ๐฿๐๗}ฯ;๒๊Wพ์‰ฃฺ,Œžุ„62ณ$ย b ถj#6`– 21ู„ „ลZ+†•jฺฦ(lขBฒฉ“ฑจTุŒฺ jๆ’าf•6VฆฒaคZลZถ‹อ9†Fฬ”ต-‰ญค€™;ฒ อ89ต ะTg)vG7‡f6Sa ลltmsbฆ™ลHึต—„˜–†อ†YŽญ’e†M&โR9vfจ0Uyแ๎ฤ*mjF,[n‚‘ gึถ&„˜™%tฦAถ!ณ mษฬ’h ฬ*31lKอะุฒญ`ภ ltู0ฌะ”ณิ cฮiุlถ•ต& bฉIชฬl ญH˜d–6XŠด” อฒ1TT,+`ˆTw;…bcๆA›ู9ล6ะHK˜ะnลa6$ 1[บf ถ3ณ!;1# j`V › ณ[ๆ”ถAr)™หlƒ›˜ฮษใิ&Ž]jF1b`จตŒ`c0!ุfBA›(uฐ `$ูย!†fี5ต- ลFณ-*1 c4(0fม6 ŠŽ ฬZริcป1ศถ# #้€6ดด‰6จฬ  [Lˆด Rl CšQูฌ$ภŒมจ๎8:ว€™ฅูVU3ฬŠ0(†˜™$š‹l dK3#ฑถmคLQ sฮส›;u#jถ ษฐm‹P1C††fบHAŽ]2XLXkูdตK0lถD5Ffญึถjธ›r ค;Nd˜b bƒฑK›P รMl4ฬ† ยษค1kaPl ถEk( ิษHฑร”‚l3ถ ึภ1˜$า6‰•ญลLฌฮfnlฆฤX;*`ึlไฆูV‰ุn"ƒP `›$šู‚ุ&6šYงm™ฑ!ภˆ]งAณBถU)™ขฟ๎๛ฟอ“Ÿ~ๆ“ฟ๕ฑฯ๘ซ^๒น}เทพ๘’ืิป๘บoู No|ๅ›_๊ฝ่_=๏{๗[Ÿ43ส๖ขWผ๕o~ช'_๔๚๗|๏ั3Ÿ๔๏มsฯ=๙3O?๓…๏x๏ฝ๋ฝฏy๛฿}3Og฿๖K?๛ฬ๏>๗๕Wผฝ๏x้“ำร}๙ฏ=๛์—๏KบWฝ์%/:Sฤ9รo|๑k_๙๒Wž๚Ž๏xํK[‹ถ‡—พ฿๖m/;๘ๅ?๘์ื๗าgž๚ Oพ๕5ฏx๙ทพ8H›mq^๕Ž๏}฿ฤ7>๘แรใำ๚•ื}็฿๒ฮwเ๗ฟๅuฏ–',ˆ'ง-ถมZF–(ฃK™™X; clส#g‰d0ฒm›Nเ0mืF[ Œhศ3%ŒP-GŒMHฃYู˜‚เฎ‰๔ ๏๒๕นปษfŸ MHB`CB%iส…ึjqิ“พ วWเพOฯ<๒คฮx‚ใดZ[yhกม@R(!€dMC€d๗ื๋ฒH›j\Qd F ™ก…M3 ญa,) ƒฆcC˜2-%5๗’mŽ ูVpษv‰jฐ†1ใ"g†’ญY•ฑญmํ”KPูยZf2Kleฌฎั"6๎Vi–Žด!nกฑT 3F้ D‘ญVmf  iKดศ0aจถ2Ci[…’•13ฉ‘X" ึLfiVฑฦI’อ,ฬถ‰ก44ืฐ90ืคQmถJ$fe€ภDFฆจถ˜ฆ’1š1ฮAmะภ0PภˆlฅQj†SฑKถ จ ซUถ‘ 3* [04ไNฃ„ถฉฦ.Vbc`K$มึLK 2—ƒmจ–ฦJCb-ีvน[rUe†ญˆUศf •p-s2kุ&bEe@Œf&ฌลแKฺ&” (1ฐ@3ู$‘ฦh`›B-˜ejส†ส\‹มl@;ชฎkacœ ม6Zฉ ฌฒb*G3ฬ !&†2XTFEะ @@8kŒsฤ&ฃฦฐม*ร˜ชl%’งbื2cาˆ fn†ฆa‹lึ ฮธsZศ6•viXG5ณ #ŠJc&HdฬL8ถ bSวฦถSwK ml5q7ชd˜กฦf*zภt2 3 (Šl‚ €›†'CูˆCm`…ฅ‘ แbK†Š•ั7 น[,ฐฑMŒNเdlณ—ถHK1–ิ$SะH,9še— 4+›ฤะšสถ 32 0ลYc”ุฦะ ฌrย0ฒLXPŽ6[3›cำ@6M™;S่ [0f,j‰-FŒmอถฅ%ฤ†A'([l ‰กมดAkปหP[ื"/๔ำŸ๘ฏๆ๑W~๛ื๋“o๑/๎ื_๙‘ฟ๗้~๘ฏJ/ฟ๚๖งม๖A@bwNผ๒๑ฯ|๊CฟK_}๛ํ/ึใฯฟ๒'฿๚ฎ๙ว฿๘ะ๛?๛}?O่ซ_สW๐ลปฏภgึw๏el4ปฯ็ำy๙๑8งm@hlรTีๆ^w›Rภแดฝ๚‘๘ัŸ้7‘฿/ั‹ฟ/~แท~็+_'็s?๖ฑ7บ๐R!†ฃ1Lฐึ"ฐ)b#J [ฑ6ฅ˜ใุ0PŒ6[ เp9S`4อF3†2sรNโ.3;ษ.#Z“c†ฃ 2ภฬ j€`W! .FZจa„]"€ ถ%ั,ภศlf„อ ฐฦฅภะiCœzุfKถ-d*Œๆฤฬฑhษ Sฺช˜‚iฬโQPอฦtRฤ`4‘†2FฌbIสชู&าข0ี43TมP=9-™อHrXฺ Škf a ฦ™@m;1Š…Œัศ6!HฒahŒฒูุH3Fอlีฬฺ๊€‰f%6ืขuืฦัF ฒนd–Lข›ข6ƒภL6ฦ)idXืf™ฑjhqษ`ฺ ูŒ’ฅ0"๎H0$8 fี&”า(ƒม9ถ5#Hา5รPกLฬแฒSดหี#5ฃญ elฒ)„‘kZiMม ”YณfชภQ5pฒู,!ัฬฆ–6"ฎm:6lŠ@Inะ,ต9ึจY`lฒjณF…3X (›Y$ส6ะฬVอ,ณt2m"ฆšูฅ‰ ฅฒปอัุI๓$ฐM#Š FดfFkค‘ม @`•ธŒ V Fค6ภศLMธฆ0คMƒl,Af-ตYต-ใT 8วฦ(P ีถ%A`CฮQBาถ=๋‘˜5อฮhCา6ูส๋dฑ2ู4 †อ)3˜*D ิศ’ตf‹ษ’Y€m€U (ศ4ฮถ,ฆMbใฐ-&!ีุdFฐ]ย—‡%1Œfจั6 [‘mœjป6ย‚Vู(6"i.`f-!f (4ฐbaรHลภแVb%KS0`$#ŒLhƒJŒ‘0a1ุPm,‘ฐญ:f ฉณ:ว{พ๏?๙๑ๆ๊๏๓Ÿ๕?}๑๕ส฿๗?๛ั7ฦ+ 3ถผ๚ๆ๛|ใ•็ฝฟ็๏~เอ‡ฃแ>7;‡๊ีศ'?๔ซ๏ื๐ ฟอ๓oผ๑ร?๚๑ืฮใ}?๒™ณ_ำ/แ๓็›/๊๕ท>7?tM ฮใ๑พ7_ฏ?‹oีป฿นzภๆwŸภxๅอื^๓ต็Ÿ}ใk/r฿๓>ฃบ฿๚ๆ7฿|๋๙๏๚เ๗ผวw}๘ƒ๏{้ลŸ฿~๓/๕๊ห\N#˜—_๛๐[Ÿะ[Ÿ๙ฉ}๓kฟ๗ลŸ฿แฯห_๙Gพ๏๛>ฑื_[‹/ษFี`ฑ›&`-็8[T ัขjX 6W*ถ;า้มlŠท3๋W 7iฅ ฬึ๎ส`ฺf–suี`,YQl†ํฒu์คj7a ๎:FแnใQ์Qฎฉฤถ# 2าฌnไธ2ถฮืlีlZง…ูTำ9@ุVตp—ปิู†ช”อฝ 3R›ฦJปsHk–q6kฦPชla4๋ดนsHcญ๋$fn'ทlจรถ{ฃR5f =6กvm;nฒถฮน[ lๆ๓ศฐDf[g–ฺุUvง`ƒฒอณโl “ญ งฒ-ฌม„mGรN)าถ™”ว7aฐ2Wถช#ญsญ*ฐ’ุMŽm†RฺุžะFถุ8]NS7A›วaƒูต{k{U[1Cฒูถ„œbลlษมv+Sft†i6Lซใ‚ˆmำ)ด]ูnฮlLDณปฉฆŽ l7ง…ฯEYƒŽzฉmwS˜ขaฃdw8ZณŒฦfญeReบ๖8™ "][ถ๎]ั๖์ไฺNSam’Saถ6=ถf3ณํp˜ู-™ŽอH1l๗๊QษlซตฆฑปTูลl:ถน1‘ห†- AuฺnMุFrถRณป•า D™1ุถฉ:ฆq๕˜Q'Xใdฯ–ฬถQ › ซญ๊.ุœ‘2ธษ[w^Jไn™=oถGi'Hf•อล6ฉV,ฯvฤรฦ`:vฏฮฐLŒณ{๗Xeฉr๏ฅ ึ†9ฤคca*…ยถไค{ปYE9N๎0l่Œ.Tf›Sk–ัฐึ29บึึŽฎUุ†Sf ถžwEณpLอ#f:Rl6gฦ์๎Tู]ถ3"›tธุ6ช@’ู&เฐu ฤ63wVgธ˜ฺ ’กŠมฺฅ ˆคใเb%)๐J9 ,QVถ‰rPv๏R• Mอะ)ฎ๋ไšฒถ;ซ ›-ปตUaFsฎ๛จfŒๅXึฆ”l3๎๓†Cฉฺ*3dปb—8ลqป‰รlb:์’ฺr•Y†; sสฝ—PqถธŽY`m=,\ขŽ4ถ%ฅvุษ่ษ”ฺH:๎0lpฤFV์ฮฃ์๋๛๊W{็E/^x๛yว‹ฏžท_x๑b?ำ~ๆgสึ ซ๊^:ๅฅ๒SŸ๘ม฿๘ต๒ฏ~๕๘ฺ_๕›Ÿ๙๑ะฏฝgถ[c˜ผ๚ฝ๚่ว>ฺo๓/ย/~ๅS?ก๗ฝžณ็_๕ฟฦ_~;฿=y=›W>๕้ท>๘…ฟ๛_xฯG|ํ๏‡_e}๔ำ?๘็W~๗ ฟ๓Wฏฝ๑๚วO๏fถ๑x๏ซo~์ใ฿๗พฯฟ_~็k฿๓ึฏฟิฮ7ฟg๒ฮ7น๎ฝแ~๏๗~๔=๘_ฟ๖ึฝ๗žํๆหฟ๗ฟ๛ว฿~ๅƒŸ๚ฬ'฿wแ7พ๔ๅ฿๚๒๏โ+Ÿx๕ฃฏพ}oๅ_|๛ฅW฿|๕ฝ}็ํ๏์๑W๛๒k|๋ว๎สฯWพํwฟmดœใภK[ถŒFบ๗:GูฒUฆ๋>สขใ๎nญบหc3;ว ญc}ึQถšvw*lี๎ึ ฃ˜aO{ิฬฒปฑณNwŽ1pึ=SฦฎN:›ุœุ่ถรšYjjwj:Ylฤษต;:็ ดf0[%0ี}๐๎-+ษl\Nc ปEAู<ๆA:ฮฃซv—bulฐสฐดR๓n2vฏUGw๏N)คŽร๎9†ู6w=n็ซ\ืฐญีz๗๙<งตmq๖ธปท-&Jkฬb˜]ฮษฝSI[v2บ–zะถs๏sว๎iด`๗ฮ9\ปVฺด‹…6FYษp˜š[gaหTกอฉฮxŒฺl \U๗ฉNญm\อดฐš๎๓๖XecหฎYม.”ษถbv๏ฌ๓ฐ{[ำฌ9ตปsฺฦฐ!รฎ%afฎ๛XฺD›–วํŽSใธืaฐาฮฑ{ุœ(sฌรš`ทำ KDํ^Eลaตf0fuˆฉn<—aงZฯ๕ศLj3ใๆ[!Q6gžHlป๋แvNซmํ๎ิุ๊`ีฐญิ< {ญJsฦ#ฦ9‡ญย 3ฯutwซš!sฯN๋yฏ”นใ์qwŠKsฮ๒X๋`0˜]ชŒqา– cœsz์๎์&ิ๗Uย\ฯฮฑfศไRV่O=fฑฺ–ถLeG\3ึฒอvึUต;(ี๎:นšWYฉ๛ผ5'c",›QรฦUถj l}๖8f0ูNgwถ1†a ฎ+G33ื= m2–sƒ%มt<ฏภ˜้œอa‡ลศา6์všB“ฆถuสีY3ƒ ษaชตน+๗œฮvW%Z]ใ&ฐขฃlฮ\.ปฯ9bปGญฒ1Cต{&ว\V‡บwVๅฬฝปsฃsDcuฎลฬ]qpบuXefธn;iถญำu็ž=ฦ5ฤฅ9gyฌIร fCย,G[VF›;szl;ถlีฆชูvs\ุB›\Š[B\@าถดAตฌ=ฏ‡:ฃ ฺุ=ตฉlฅฺ]'cฬ=eฅ๖œฆฉ๎",hq/Wวฐุญใ{o‡™iM{๎Qป*ู†ูฃึฬฬฒฃu็lh-3ฌเ*cWง9Gร๗>B€Q›XLา,ถŽKัใtOGณๅˆQฒ]ฯ•Uiษ8]&ฦุํPdำเ9์‰Rวิ}๎”ส0ธuถ›„ดyึแดป =Ÿ~{๏ษ^้y๛/^๔ฮ;ฝ๖yป/ผย‹พ๓ภ~ๆg๖3]นfŠฬ่Œน๗žGณY฿๓C๛ฤ~โ•_๛ง_๑žไO~๎oพ๖`ง#ฃ`๋ป฿๚กฟแo๊๒Oพ๖_Ÿ๙ะห๕๖?‹ฟฅo|เc๙฿ง}ฮyใ3Ÿ|๋o|K๊๎๋๛o}๚Gฐ^ไ}๒ฟ๖;ฟ}ํญOไง>๛ึษ.ง™f{ฯ/โ๏GŸว๐Wื๔๊wพ๕ใ?/ฟ๓‡_•๓Ÿ๒;๓Rqœ~โำŸ‰ฏ|้K๛ฯร๓ฟ๘{Ÿ๘๋๏๙ฟฅŸ๛…๙๏~๊ว~๚๏ฤwฝ็q>๕Ssฟ?ณ_G฿๚๚Ÿ}ํ?ผ๒๛โ์oภ๙฿ƒ็ฅ฿๘๕_า๒cO}๎‡ฟo๕s_๚ฤ฿๏ศwำuEฝT;ฑ,qwjŒตjiwฃvฅ ญfYี#‚ฬไ„™๑ค…ำaLwCคr/อ}็tฮƒโปU็4ึmบ;ม-F!uทsžง3fwถuืใ]๋>ต”ฐ้d[จiA็d›Xeฯ;fdฺr:53อvฅ‡bluo<รขถ{๎žฆn.ถวูํ๑‡~ฃฟ๚w?๐G๖'>๐ฬ O-ษุบท]ญg>๔๐ฯีŸ฿ๅ๔๋_๚๓฿>> |ๅc?๕'~๎3?๛ำz๔ธุ ?๔‰~๔•~๕ํW~ุ่๛ืมyๆc?๖ษWฟ›๏๗สG>๙#ฏmอm•Pz๊้็_๛ำ๛๏}_๑ฟฏ๕ฯวร๓/๔ใ?ฉ?๙ื>๕รฟ๔Oพ๔@Sฯ}๘ำไฯพ๘—รฟ๛ีล_๚โwŸy๊๙—>๒รๆฯฑŸ๙™Ÿ๚ศณใลOๅฟโีื~ๅฟฺฏห๖Ÿ=๓๛_~๕“/นฯ|รฏ}๓ฃ฿๘฿ฟ๒ฅ๓O๙w฿๎ู็_ศงโ_๘SŸ๙๔ผธPKป๑ะป-mm;g0ข{—lฆศ s7;็มh“aฌYGฬ&ซ‘ฦ6K+ˆอฆh„;tาiทYฐห95#;ำYRd๊rfุtคaf8’Y  ฬ,ช™กร`XณLว bฦา&ญมM*lQ8˜ ธDฦ8[บฑฤ@3$6ฃปฅjฎกู„fฆlZณPทm๗แ<ต{ฆbƒสl“T6ƒลr”{ฑ2ฌX๗๊˜E‚cุ#qhj#L,ถ–)k@2v๏ฮ9ึ 0ะ๊อD™]คู†k›bmœลvถีŠกฺDwณ•ุL‘-‚fปw็แดฐฦปšฮฃ'`lœณณVcWั@ถ้คำ2Œ Œ*€v๏s–ถYkวิ0iอl)†ํ๒ a0fลP†!ญ™้†5ห”…ฆฬX2ฃ2บรŠญฒBฤๆ’ฃ"๒8[Lƒ‘ˆส61สชฺ๎h6(lซศ&mc9๗l๗>œ์ŽฉฒAef›๊d3Xฌม๊<ุตัb“ณฏŽYคX{$jAP”˜‹m9;kWYRlปWUˆ1R.›3ฑ’†4ถ‘bm4ุŽsckสjp๋lยf[‰อHก-Hฝทs*ณF†จ6n็<ฅ€ปmNK ๗*ยศvwืษ9 ศฬj ฉบ้]ฺ ฉmฤXZ6บฐ’†นvจ0kฬŠก`CZ3ใ([ณLYะbสเ"g[et‡ฑค2—,ฑนไ‘;mAƒ‘3ี61Š5Rg,vกฐญ"ฐฬใใ๎žJlR ฃbฆ*ณ`kv๊0-›Zณ5jใจู!eœdฤ6กM‰Y›ดณ– ขฑœุถKีcเ ป5‡•„Q ๗ฌี&‹ีเึู sท(ุPฐˆmฮ9f-mkญฤ์V…0ปWํ@ โB#ณI™Y60ะฉปีฑ{ง‘ฺฐ$wŽ0(i˜YT˜…Kอ ฑ#m: ถฤ‚Sc9*รึเ&•ณƒpุ ฦ ว™i ๊ง?ำo†ท€๛ูฯ๚ย?\อล้lk[ชa$™ป•t฿ทพ๕๚ืพ๚ํ๛๔ห๛ั฿๗ำฌ:d๏>พ๓๛›ฟฦSฏ|๙ฅžqป๏พฮ[o|ํ๕7พ๛ึ๏<๖๐ฬs๏y๑๏้/ฝ๘์Sd฿ฺืžผ๑ญ๛ยหฏผ๖ƒ๏;ไุ[_?_{ใ›oฟ็?๐‘}่น‰%,Sฎ=~๗ฟ๓;oผ๙๏ฝณ๓๔s๏}฿^z้น๏๕ท฿xใ{๕ลž}0{๗๛฿๛ฝ7ž|ใฟ๛ึ๗ฝ็แ้็๛พ—_เKxแ™‡Qw~๏›฿ๆ฿|๓ญ๏ฝณžy๖=/พ๔ส+/ฟ๗ฉg๖๖w฿๖7๘ึwพ๛ฝทgžแฅภ_xแ=O!บw้<<ๅNต้ฐอ.G`wี จkฬji1จfฌ6ุ)ฬีilh ฃa&ดI‹ฑำ™ffฃนƒHหVุfu:ŒMยาบญ0CฐN[šุ†ˆ)#6eeฦB ฐลhIkฌ0 ลฎดYB"๎–e.ใlฆมf*ฒ™"7hv‡œณm(ษ’1YmXfLฑ:ฦุี†ฺฎอi6™ญ้ดŒ*ดู4szธทNcๅ๊!@ุFุ5T…fฐ$ˆ:w”3“ •d&c cฃ่6`“IปJื…!mื์๊L็”ูไ msjมT†ธlUฅฃๆR เŒM&3จMZŒฺfภv%]L‰1ฺm-"‡Uื$ln‹u\ูFaร„ู:Aถ0Bฬด„Uรšอฅ„ึXA”ซ…!ฌฒlฺฺ”šรŒfซs…c,mฬฒ˜CeษภVCCภ2Wป+ส ฅบ—ู8He๋ชTูุถๅxุฮ!vvืI l€อ†J1€Dส9 ธ!E3™หุ(๋žuUโnใฺ5ปjŽช1Ta`อX้€IeqึtŽR3–Qdƒ6˜lฅIฌv:ถf3CL#j›ฉJ0“Fv๏ญXนฺุชY„mีdซ`&L™XcฆฅXˆ5ใNอค6f)œ6C;3ฑmqXe1ฆMฉ Fณ%ล\!หbsg–ีT ี(Œญ†‚12fศ$•ซmUฅšmcซค ณูขZbอT•อ6งcg&อr“ขs9˜;Tr ้€ ลถEชbหศfณj„ถีeภ6#Žฺลฦ]‡jlใvfL •BW์œฃร0S‰6 ณุšHฐีฑŒภfล4pZ™์2Ba›ดฤ`’ํRซแฎšlƒH 1สŒT›ฬl-e+หใN„fจi›Š]i#ฮLุ๎dY6—ฮฦ,๖wN๓oภ>๗น๛…ฯBhTฆm๓ๆwสWพ๒ๅ/๚@3รฟG`Sร†ส,›ๅภ&a– d ุฆ2อ Tุ† % ฤถ$`›bmP˜ฝ๖ัื~โ'๒Ÿ๘ฤ]guยSญ`ฐF 1#L`…ŒY6Iภhbฬ6dm1mJู60ย0@ีl3`ดถญŒFb”mดห‘ฤภ0ฆอภlฤl ุฦtbw4ภAุFFรฤดิ‚™Uภ*š)BฆK(ตญ5Hฤ(dร\˜Z'[` 9lปะกŒ €ลถBจ‘(หVM l+$vmjcRล6 Xห้bBf-ภฆmcBภสd  01 2LF@ฐ m.วˆถกc‚ฐ [’$Œฬh™‘Xใ0aฤœโN›P c#!อ€Cย,wJ`4”K”fl€ €a@E€™Uภ*#า` Œซš‘ณAX’ฐปXa f l3—(2,AŒลDH –ฬ‹9ะ˜…2’ุl4)ฦ`53ั6 P$,ยฆNmF DbชeW`˜‚„f€mยศfœ‡๎l` ด˜A‰อ# [%#€˜SถฑA™KF@ ั1›0Œถ ›คุภTจ5c“f@hD €ษฤEภ„m*ฐ‘ฦ,H ฦUaล32@ ภถ6 `@ยคํŠi6—จbฑa)คlฐ„ภ็lณ Qศศ&”X``9ห&ิ, ูึ&F ,dหlIMH –]cbก™ภF4C1ุึiถ1#(ูƒจฤ00Kูl4lBLe›ัฎb(0ู„a„ขน ฬ26จถ Œ€ 4ฑ5€ฟ7๚Gธ฿๚-`Ÿ๛พ๐…ฺ0 €HO?<๛s’ฐ  lภš lSvIก ซถIยถ ร•ม(€]([`ƒฺฐ ุ0„ะefž~๚้‡ วw฿Y%lส<ฺi 5 ฃ‚อ0็Œํf:ฑ•j†ฤCฌmvuศฆดูZ7Gอึย๎ญ:!˜ R3ดvีรฐz๎˜‘–#ุdธ:Vฒ]ณ{ฮ`f*์ฎ‘‚ffMถ 3ƒU–8fชsฮถ;ศฤ–ศศbเุœ ซฤีิnม๎ุดs)๎X็˜ล”fถJ&dŒl๋#f‡!Dณภ(63,็rlHภš‘Tูฝ”=‚ฃฺุe:ICkm K„qทƒb ฬN‰วGlณ๎้จท&ฆh&#…ญฺ\ำ‚˜ฅfaH1ตฺ–ซาV6TlซธsˆeถGฉM$@ณnหF:์ฑ!์^9็ŒำvaCpดัฺdŽa˜ฮ๎Šู‰mBัฌ ;…m็twอV•M%fฆR{ผ'w–‚ุfM…กšl ‹zp]้œฐ™ฒ›ศˆl#ษุ–MฬV–p7œศฆๅ˜fsj- fถJ&—0,,YšYŠfQl[š˜ 1‹ ศ8ชKูๅฌฮฦฦU9ZkึXp็q๗ @cv v7ฮmึJ สฤmฃ PูF3,tฐU› ซณhษ`Iฃ;ฆV3ปUฺสฐŠm–ูี”lชšตฬE๋dท5vฯรรจณ™ยันLwel:)๎*›,fT4kร„ฌฺึษ๎ฦขฮถ ฬZ๎ฝฑY าvอTี6ถุ@n=ๅš;œsฐmสื$‹9m#ฺุ–ัV–ฐ™<ุี’5ฦa๗:ฅˆ-]ณU2!ธ„ aษาฬP4 š beS1 3ศ8าฬภvำ:gุlยฉกตf&,‡ัลNถฦ,ถ{;ณk‘ŒรŠตK˜N6 CถNl„1ซณ faIฐูD็bUฒภF5Klqฎู”4ฺฌe[ูm&มv๏=Ge0ๆ*$ƒŒซณ6•๖x+›,ุT4kร ์œ๎]ลfึึ9 0kœใŽ™‹”ด]lั9ถU3Hn=Xfชsฮฝw aถD6งaD6gc[˜๚ล_<็ฐ๐ูย/d+ฐQฎฅFฐqŠ€Qlf@ใtท*kFตKTถl7ญำd์2Sอถps[8ู# ๖๘nlฬNนิŒตqS‰m4“ญF6ืN#จY่œณŒQฮ +ฆ+D)-,—amทฒiรถอR’1†]k%ะq/1ำdSยfำ›Iต๎ fƒิัฒฺ@cณม6„1›™-c็aณอ$Aช”ฐกEใฮZฃฐ2F” Yถฆฉ(263SdTbYถGฎถŠํ2aQงต{๏ฆŒฆูdTณ™(ซฬ*Mฆฑ T‡„ฬถmสึ6CIZIœ„˜ŒmF™ŒัhW P%ฑmcH•mf6Csื@Uตฑ1Fิj“ ฦะชดZFœ&Yำh•’ฐฌZดถ๎\†˜6ŒูPฅ0ฦd—6(Fั๎ตาLำ6•šŒ1ฉชณ6cƒิ ลPbฺl3ฑ1;Gตm[*คูfบณึฌhฅQฒฦDูข"(ีดmุย'Ž(ฬถ๊ ๖ท๏&™ฉdโ1”จy!๑H6ึgฉ;cC ญ dž=6V(moฆฑจkํฝmCภฺุl6ำ”m‘๘ี1,ร†ฉNฅศlฐวXI RKศdl3 )ณ,MŒ @"ม๖4[T`loุ @UeฑŠZ,“AZUัjเ.˜–V้„5ึ^ีRหz<ูcถMชXฆวฤƒ™ค์อ†Pfš6Lfmm5muห3XำŒบ`Pl„6ฌmˆ ภบฺูถ‰ฅ$ณ!z๓% Œj+Jiโอfช ชjฺถ1แฌถQ…–™วcขPฺถm ตบvถ 5ดL€ุl›ฆdฆ๐หEm™ฬlิ (E€7ถ-Jะ*ไภฐTฦfE™eใ-ณ ค*a๏iฬคฒmc6ƒgk0(l&d5l™ด*–ปฎ0˜^Zฅ 5ึ^ษlž-ฒ1ถฑm#A1อณ™•„๑ี`lXw#šLf*š ฺ่cŠ(€อ6ณฑฬปk› DุLjั›/YQfีฅ ษb๓fKUฃชฦถmhสjcJl‹e_^๛_?3ภ฿฿_ษf‚ใR +lPีฑ)E ถlJ”ะ 8Bฌฉ06P”YV0ุยBTIถู€Tmfณ7ƒmk(Ša3ZmหกUi™P]M+ฆขH ร๏?ฯะผe’2Аีฉญ63เฆหUฉP์:kbฑmฝชVT]e™ูdฬ‘๊r;uir7ุฅ2u]หุ๒๛ํชณjาชฑG›V‹:u5Ydจ๋a€Yืd#กฝตชmถึ•ชชณbDฅŽป.‰ใ|๘ —ฬจำ6—‹อ.‰ํ€š’›ซป›๚Mืูcกไ™AWต1ฉ•T;7.cf,ญ#หหบcm(ห6๐kom"๐๒ผvp`OFl• สํ™Uœา๊ฎ?าYfuDฉฒลธ2•„๖i Iฯ;ใญๆถU›ฐูฎํQbฯศ“ƒขฌNMml07].ฅvCฑ:ฺBถm}=)ฅ่WYEญ<›”r:k๛นJŠิฌ\™บ:™=๕ป•JLณฎฑแaขNฎˆ,X]QฐAyึ)ฺ0—ฺท–qุZW•hิUˆฒmwฉำ๏\%N็ซฒถuวP.<฿Ub[ X๕ิา\Uwxo๕›๊ส6\-EAf\wmนคฺ9l๑ฌฑึ:Eึ^๗c2ูถk›-)๑๒yยสˆmชPฺ˜‘tkี๕ซ;k3ฯwต)Jสฃj…nแ$†E#iฦหfฏYฟm6‘›7›ซK4gˆ`vQVqนq๏=1MQฎj7)นญ‘mๆหRีŠฎ‹ˆoๅ1q“td[\uค\Jฅฌบฮ˜ฺ^๋Tฒน™‚ูฆญ[Tชชศ2VU†)3ีัฐน”7“ho]๓ึ:%ฅQWbVmUฟ:q:ฏถZPzo•า่ฎz๛ชf“ฑjขขฉปรrsuี๖แJJ6ซข r-ีN˜มูณฦ ตŽฌฝ: ฦสถมล<กห3{Cyoeju…ฌึถYNงU๕ป~qžื^ชส6”oซq”๖ฌๅค๎๚ใtžm{จถ‰ฎL•ฉ[8ส234™wฦ[ฦญLต‰4fƒ๊ํhฦ2IEHujซ๊๛>ต=[NmSW‘Yชทy‘ะปLฑทVฟ๗}uะ€๗vถษฺ’nอl3i-š-luz[V&•๏]ทผ‰ปmถึZ!lฐ๏๏๎mฦ๏งz฿ มึถr๓LR6{ฏ’ฬถญ๗‡ๆฑํRท16rCฒm†ซv ฏฺภฆmึ•ฬฬ6%ี™gถRํMmbZ6’ฐํUซ๗บ)˜mาถ๔`ทด7๛MททปฝQฝmvฆถ2ณj[-( ฿ทจWฦดฝŸ›g๊&ด๗พ‡ปยVฝญ—ฐูˆ๋Vถฬfท}๋~*ุถขFlแ็ฉื`4ซ=่ิ8}vj=eึLชfณ{ื๕6eๅฬหีถว,rํลJฌไm๗ีo฿ bญ๚ปฉS6ํิb6ถด– f๊ด M™]’ฺFwo์ตึขL์}ษ…i๓๛ฉ๖fXo+ีฐQe์{•dฐดmฝŸ6o‹ป้lศ ึปฎยP66mณR‘ู”Dีถท]ชmด zี† ูพjตe"ุ›T#gฟฅm๖^้Sำmห@Qm{–%ตi[ตญ–`…๗fjy+๚๖~bฐซทํmS]์ีm๓‚ถวq ถ<๏ถou๎m+j lŸี–a4ซ=(8=รญ็2k&U๒๖๛Uณ{๕ผ(ผmVฯสตejฑ๊|o๗๊๖lซbญz๏™ซบmฺ;ฉ-ญE…‰ญ๙{]2Œ•็2+[pk{~o์™ดv™6ฐทญR๔=ฟŸซูย๑z[ฉ๐ฌฉ3๖–‘ไต[ด8 ๓mQ‘A?6าษ^๏บ[+ฦชMะชdถ7นส|{—jcฃm;he’lฏ,[V›Tสะ฿oี6{c๗kLMmBŠซm3„ฺดญฒAKFQุ›ธพฝ-‚YuŠ๖lO๗+{ekำธูQU“ญฯฺf๊ฑฝ5ยx๏8ปถZ๓i–-Suภ่อฌO3sw๊ํkๅ0๏๋~อjC_+…ญฌLทyŠ๖^]ฌฅูาmำIญ™๗vZ *4[๓-‰ถee‚ฝี๏ตฝ๎ฆ™ญตv™6ฐmฏR๔ฆมย๑ฺ†J=ฏฉ,ถU’ืZฬ&? ๓mธช6ฆŸm‰†dok%ืZxีฦฦdณฎdถอ(ฉฮผ=IฑMํm ี6า๗/โ๛เ๙็๚ซถlw{+Š,ำmฺ–d[-j๓โz ฝ|vีdฒฮผ๏นปjOhoฝุ{เ*ญl5˜ูุำ๏‡ุฦŠalณ•น^ณ6 3ฝ็N0ฺผ?ึX๓ฺž-งถ)๏ซศ,ีฦ๏๊๛๛๏๓–ึ›ฎ3บZ฿{WkUf*ฬlš ฬ,o๏\<รF]3๖žช k 2ผสx๕ๅดš(1(P(Pขค*L •4I•AA›ข กHŠ(Zมˆ)Šj,€‚ดา$(Šˆ–H5Jˆ)ˆ -ชŠ*(*iiJJb–Aฅ$ฺ I@ดA5‰T’*ญTUB RBE[E’ถชM’h ขi Eii“HZ-"„‚H่ฌ€$!m5’hั˜ ก•คJI‰ชh%”JHง(%Z‰„Rีjิ$šดUษS-MJš„ฬV[ˆh H(D[(H„ ก PJ@ HจhSฺ9U$-@+!ZPUH 4ั–”HD $€ข$ญ€ -ก(ถI*Z@ีhJั$-ญE%@ i„ถญ$(€@TŠ"ดmŠBชBI ŠRช‚ะ )ญถีdh[$Hด‘"@ ัRศ Zm Bˆ4ะP%ั’ด-T}4๙H~๏๗ฬ_๛๕ฦoฆh$ั่ฌ™ŒVJก™hRMG‚–d`*ขZII“ถUEHด”A!ฺB%"B ”P€jช6‰(!TซF;ฃ€Phš’jšชคD"@II@UIŠ%‚คšP‚$ 5ืW๐ox๕u–”nฌ_ผดถzๆลห[ฦถํป๖8ฐo๗ฮํ A“ ‰Lี šขA& ญ^Y=q๊ฬ๊ล์s๘šซwGUขฬ‹ซซงŽŸบP(I"Vv๏;xไะฮhˆT€jชฬi1š˜[—.œ;โฑs›-ฉะ ฤถCืฝtส๒า‚H่œzeํฤ‹'Wืวซุฟw{i“NZ‘อ‹็ฯžy๑ไฺ†JX์ุณภU‡๗๏™ญต=๒ล๕)Š’Dk์<ฐ๏ภมซ๖,%Iำฬ+ึฮ;ทv๑๒•-ciyวสพƒv๏ฺ>ฦP”hะJ5$‚นฑ~แ๊๊นตK—ทfŒห{๗ทฒkวา@i› %%ญจDiD:ฏ\>ฟz๖๔๙๓—ฏlสb๛ฎ๛๗ํณฒk‰ˆj/œ:zjuรฎฏฟ•Aš& Sถ6ถึOเ่™อ•kฎ>ฐ{ฯฮ% "-ณ็^๘๎๗ž9{๑เ๎บ๓วvจ”ขZIคข ัา’$*Dด3M„คด‰ูคดMณฅ‘‘P"h*BัVK Q!ฃ…"C2ช@š&ํœŒ`@gZ$ฃf+AQ"J’„ัูิXข‘PกTF:AIคI$’T5ะbdDทZ"EE+” mhต$-SGi Rฃญั4•&TตAช‰(ฺค5 3จ*!‚„4Œš-‘hh@‹ดณI€ชR‰(‰€’ด –ŠถˆHH$--Rณ!ˆึะ™  Iฅ"‹ L‚ช"%#ช•Qฃฃ!i›™Ž6QITD้ฌJMRU‚–dtฮhƒ@…hฅญ$A[!ขšB$*ฺR†h›$ฺV#Ihี$•” -#’Qi'IขM'DiƒDt‘j’vชDค’Qกm ’Uษ1‰6#ั6 %”J2bVK"mŠH‘ดMดฺbด˜ดA’f2F[๊hZ5คm5 ศBAดTRCfชZ”*C‰Š0ฺŠDCKคh%™ํˆH( F“ย$J๎ส๘ศG๒‰Oภต_๓›ฟ‘ด5˜ก”ดดI"J‹4‹4ษL# UUอˆBT*I2fงIPIดŒด&ฺLฬ$ด%ฅ%ŠBฺDฉPE’ด„ชT„A ญ6CeBฺ6MFข@šZฬDi&4…ˆˆ0ฐ”)‹Hำj"Z‰€ถsฃ๋ฯ|็ัฟ99^{xืสŽAedฮฬ9Cชง์‹Ÿ๐็_๕๓ใsืฌ,ีณ๓ํ๘™~์gKภุ{ใ[~๖ฝฟ๘๗๏~‰$Uจฆัู$”ศ…<๗ีOo๎ป๓ร๒woผๆภถ$˜SK,LInl^<๖์ำ฿บ๗K_~ไฑนธฑm๙%ฏพํญw฿๕“ท฿rอฎ1Š‘ฬญ+g๔๔ทฟภ~๓้ฃงึถป^ฯผ๛ํoบฆ—์]ZฃาN s^8ย7๏๙๐‡?๙ฤw๓๙ oฝ๙ฺ}ชZตีหซG๙ณ/?๐ะ7แ ks็UG^uว;>๐soน้ภพํ@2g7ฮŸ?๖ุ—๏๙ณฏ?๚œธpy,๏ป๎ๆืฝ๙๏๗_ฟ<!M็ฦ๚น>๖ภ=_๘หวž;qโาFVŽผ๕?๕ำo{๋oนvวา่V็ ขH4้๙ว>๗ัOwj๏ะ?ือKษผฒv{—_๘ฏŸy๑ฬล๕ฅ}ฏฟๅญ๏๛ภทนjeH&f็ฆฑ่Vชขญ›O~้w?~๏wพ{b€vnmnอลKn{ืฯ|่—ซ.g?๓;๋_ผ้ƒu๗อ๛:3G‡ FšT†6ย$…4 ฤ8๒้o<๔เƒ_ึ“ฯŸXฝะ{ฏ{อOผ๓oร+_~x็R„†&mัnฎ}๊๋_๘ร๗ฝ๚๗ลฏ}ใมรหBปตyแิ้'๊๓๖ีGž<๚โูuห‡^๚š;฿tื๏xใ๕ถ‡ -!5[Wฮ>{๒K>ปใ๕oz๗ฯย?ถปฉŒ$Tu&ุ้2ฯ<๔O๛ฮีท~่C๏๗m?ถะ*ฺูผ๘ฃ๙็?ภ ฏzฯฟ๘ีŸฝiฯุ!้ผp่_?๘ฅ{๎{๐‰c์ฟๅ๛๒Uฏ>เสฑ'ฟ๛๙฿๛ืŸ}ึาHถ_๗๒฿๙OŸฟ9จv๋สน“฿๔‘พr๏#O=rmci฿‘Wํทฝ๓ฎ7ฟ๎†—ฌŒัจN’*‘"ี$ณ‰&TฦHซsnœ}๚?ิ=_๒w}่W้ŽsF “Kว๚{๘๗?๓ธฅEฐtํOผ้ีผz—13‘DสL˜&mำิ๙gŸ๙๊๗๎/N sฮญอnหหฟ๚osห๖๏’Hต—/Ÿ{๖k๛่งฟ๖ย๎Ÿ๚'๏฿ป๚RFЁ’ฎ>่?๓ัรgKภุ๛ŠŸ|ฯ{ม/พˆถ็ƒ฿๙ฤท;zhท67ทบ๓ๆ๗พใ๗Ÿฝ๕%5วฐy๙ฬ๙๒๙ะรO=นถ™•—ผ์u?s?ืญื๎ณ}$ยF คะDลX?๛ร'๙ซ/฿เ#฿?บz>c๗ีฏธใ๎wพใอทฝโšฝ‹!•ฆEาั  คsใาษ็žๆฝ_‹o๑ฏฬ‘ืพแmo^น๘ร'พ๓๘ื>uโ๔™ ‹ฝ๓ึ•6i3๗ฉO~๎gฮ๎พแต๛=w๏:๖ฝฏ}/่๗Oญ๙wฝ็ Gถืheฮ™”ฐy๊๙'พ๚ฑ?๘์๗.ูzูม}—vคีฉฒี๕ีว?๛?_x๘ถ—ฝฆ7฿ต{์ O}๛แฯ๎SgวO~๒๖/•X?}๚๑{อ‡๐แตG^s๓฿ผ3'Ÿ{ๆฉ‡๎๛GnW๊7๎žPฎ›๏๗๑~๚ฑSปnผ๕๖ไ–ฅีcO<>๓ฉใ'/ง์ƒฏ1คCาาRญญำ็ฮญ_สส๎•ปร$ฯ฿๛;ฟ๛้๏<ทtใM?พทผtวึฉ'พ๏O|๘ูตฟ~๏_nBjฮaฉŒAB็TอXค9๔ส;๎ุบ๖บต‚6/๒OŸุqี5ืญ–F็ฦ™“ง7ถ–๗๏นsนญt‰Ždยl™ำLZIESดฒqม๛ฯ~ํg]ห?๘ž/เก?‹๛~ฺ่ล_z฿๐šiฅฺ6‰v๓ฤำ฿ผ๓Ÿ~เ๛^๛ม์ฟ|็M+๛vŒ@ฏœ~๖ษฏษoแCวvญoฟ๛Ž}๓s๕๗พ๐ูฃ?:น๘ี_~ใฅ‘ถUสHๆฦ™‡่c๗|™ํw๕ฎwฟ-7์้H›lตฃ$m;gๆษSซ๓ส๒พ]หห;RDHฺS฿๚๚_๓8ถีoวฟWํ]lK…“O=๔gŸ๗_ๆ•๋๎|๗?|ํตืxหuื๎ก็VO=๕ษ^๛๚ทzอฎฅ‘ภถ/ฝ5wดฐH”ญ3O๙๚น๛พvrืีฏบ๎7๏ธ๘ท}์๓?พz๚?๛ปo:เW๏๙ไ฿Bฏนๅ wฝi๗ฅฃO=๖ฤ_}ๆฤษ“็ฦ?zฯ๋๗’4gฮฟxฉป๖ฎ์ท]ฺึฤจB˜[›Wฮœ:น9ฟcy‡j!sN’1ฒtเะกC๛ฟ๔๔ GOฮ๋,Fตm‚˜•dŒNำŒ$1h›š(CSšฆbชf€vbjฒˆB”ถdักmf›–ศํLRQ4ieQD Rศุ’U$ชีVKd(ํ(:ง‘ คํhDF”ฮ #าv4m ะ1ตITh0’0ƒ!‘่œsฦ0†D ƒR‹H5cKฃ)ชญŠTำ ณš1t$S#D*Y5ฺPmฅช’คt6‘™Mฃ™c$iซ&i‡I*มh5IE) ช!EฬPtd&’,ฺญš กbมhg5h5B็HZคั’9!D22˜Sฃรอศชถc$ัึ$1ชmษœM‚1BD[า9 ’**™ฺVรO|้av~~ฯ๎ณปุ^ะ Q A‚‹(‘U)E:ษ>—๘+ใป๘Edฯไ]ฌื7v^ไ]fB_|3็LrN2Eฒt–HJฆ(ŠD•่}‹Eูž/Ÿ’Pˆ ดJJญTฑ ฦ`ฑ*BDjP $ดCกก$TS‚jR(@จฑHi%มF4ฤ $…ิRJj‚J h 6A‘คTmT€ bHH!`RS"$ ฑะฐดRQARต!&)˜$$I("b…ขb‚Q‹’Z["DAคDˆHŠ–V จิ4!’คRK)F)ดL XkตTRˆDbJRSลลุาRI%‘ RRกฅ* DJŒP)$H’bUE5ฉกE$HH $รใ฿พ๎ฑฃ@พ๛]NŸ๖ฃ ฿~๗ž@ัPY…"!$ีJTP‘ค%ฉ€ลP„ดi‹$!C UI(D$กEmฤฤ$(6ฐƒZ $‰J ช$PSุ(†ฤ”„jJ@ภŠZSก†‚Š€ด’@ม$1I!UM 5iRƒ% ‚&T,Jˆ‚€DSฃด• "BฤZั$ข˜บธ:้ิูปY๗…Gึ wX$H…J‚uaaแƒ9ฺ†‡‡ึคTH˜nnzๆต๎Ou[ อŽฎŽฎR‰ฆตŠA!"ภาโาฬ๙VY3<า฿V $ Eฉี† ญฑ>๘อปoŸ/๋?๗ง๚๛‡ถ๔6ญฅ‡๛๚ใ7œ8{๒๐๑Oพฒ’L~๒‹ทŸxะฑ›๕/~fSGg#+_x~๛?๏ož;}แยž๛6ํํฉ„Zฃfๆึ้#ฟzใ“ซ๋7๖พ?8ิ  ำฯ๘~qyv๋7เ๋ฯ?ป{คปินฑOŸ๚ูz๐๑#งพุpเฐa~j๔์{?๙๙‡s;^ณ๏~q฿ฆ๕m,:wไŸำ‡ื?ไฦทูี,]ฟp๊๐ฟ[ู๑อ๑ฯ^9ุัU–—ฎๆ—?ี›—ฏัๅoํฺืQ0„ฅhย๒้™ฅ…2ุู/ดZฟ๛/oพ๓W^{๕•งถ๖5a้ฑCW—ฟ;~๊รซฯmฺ –’@@$•jจฺXฟๅW๗ฌฌVQY:“๛zฃใ‘๛ž>ธฏวบ”‡w,ถฺ7๔๕vu4I)ฅึ`@ %ัh‰ ิ„สฤฑw=y~๕ัgฟฅ๕๒žมŽญ…C›W_๛๗ฏœนr}๗ฮว๛ฌ ฅ”jjfn็ื๏ฟิตี๏›o<ึำQJ‘…ืฯษ[๏ozํฯkOฌ๏ํiO}๊“๗้ฯฏ|๔๎GใŸ๙า๚ค‘„ Bkif๑๚ฟ็v๎“๗Ž<๐•Mคส™™ๅ…าณพปงฏaH-)CB ฆตาz01ฝ\;๛๚{›อv""กhˆกฮฑนษ‰eฤF#&!Xขฅ)!"ˆะE ซ–ZŠ˜€šฺB@ T)!L@S#PT$ิhQฤ ’คP‚„ 0@ภR b’šPR4มBZUR+` ข1!‘’X (TSจQbฉ5RPIR$ฅ˜hD จ„X50ฑX@ IญJ1-@"กค !‰ฑF*@ˆJญV"Bข%’DICb“@ซ(€$˜ BAjEช)’„€จฅถ•(ƒD+@ลฐ@ƒBb1U  P$ )ล‚ี "‰ˆ† PR P %BBีB(‚€Hกฦ*"ลJ‚E j D*F  ฺ@”@*คRK)‰5(…„RคU# •JT@Hซš(ˆEจUˆตขID"‚P#• ’P!51ลš ฉขFR!ึ&Tั T-‰‚ @A"CD0‰$!ข‹5Q(’@T‘Z%ขHขE!Qภ€‰ช ! Iญล‚ คVcQAฤZk)€ฤ`’C(Rc‚’$Z"$€ิJQ A„’ด, 0D !H๙เ๋ฏ๓้ง”’แ/๊_~ฯ=๚ร๑ว๐๏็oฺJ‚!จBภฅT,Bฐั$‚š@ )‘@4Z…’€’ขฅ*ั*ม@@,ฅšDข„IJ„€(ฦ€ฤิ*T‚!%AR„Zีข$I(€PฉD…€‚ิT1F’,J!bญh(Rซ ุKA ‰ฉIDHXYšป๕ิีนอ‡ถvทHc$@k~~nzzฑด๗Œ”"D–FG๏LL.o{`หฺต"ˆ1€‚"Q‰ห‹Kำ็mnoดIกโรห'?>qfฒฎ?๘สื?ปo๋P‡ฆถ๚Ÿ~f฿…ซ—฿ฟ}๕ๅ;ฏl\'Œ}๔ั้+ใ:๐‹{6 tิ€}=O๏r๔๒ลษ‰ฉ;๋ž.MดP+Sg{๏ุน๙กC_<๙ฃั 55 ิ…นฉ‹G\^ู๊+ฯ=ณื–๖๙์ูุ›SSS—–W ร๙‰ฉkg_m =๗ฏz|ืHg;IซlฒeK๗ฑksำหตF„…ั+ืฯ_-›?๓ฺk‡vm์n/ฆฆmื–3งืœ›บ?9าš@1$`๎๔โ๒Jgwww๗ศสา์ลฯŒ/๕|‰{vnY#กึ๖= •๗ฏ?xธธฒD้‰–@ี๖5}]  $TWo๖๔๙๓#;>๛ไก'ึwš•ีึไฤิjํํ๋ํ่่ค’(($@ 5" "€2wฬ…ฑiทพด๋ฑวทŒ๔6 ุ้ฝ}ธ๓่อ้™นนล€ลZkณ:u๒ืฟ๙อ‰Kูv๐ซ_ฦมอƒํ! ฬ฿ฝ~ํ์้ ๓}ฝ๖ีฯ์2ุQŠฑ๏ฑ]ปฎํ8๓๋kWFW^lทKŒญู้ั฿๘ว7Nื/}ใK/นกงฃH„@DH$ bm9=yoฉๅHogg'Pจิ{g~๙ณw?žbว ฏ~ๅ ‡ถtB ยฤ๘ไ‡้ท๗ภŽ‘มF)€ึ๔ฤั;ฃฌโ™ฝkืwvถaD`๑ำŽrvr`ใณ/|ๅ๓m๎(5–กฮ็๗|iƒ7._ป๛…E $PจIPPฅฐ:ท:๑๑๙แป—สพปฦznฯฬu๕๔ซ dzl๎ล:ิวึ๗…1ฉ!ม‹U „Hซ"‰€ถฎžพ.ภ’ $ cท>ฝqแโB๎Wฟ๖ฆฎž6˜พrุป๖'ืูตๆ3๓o๔j_ืšฮฎ๖D\ฟ{wip[ืฎm$Iฤข5Iซj‘4:z๚; €J++sW~๓แ•ฉŽอ/<๓ิ;†›…ึโิ'๒๓Onถ๚๒gŸ}ภ๖อฝํบบ^9๐ๆ™ฃ—ฯ_bž‘> ˜คh‹!uโไG็n<่์เ /พ๘๘ฆม&!๖ํนแ็็&ฆgf็ฆa„€€จฉ$A}xๅ๔''NŽฏ ?๙•oผธo๋PG8๐ิ3๛.]ฝ๐ุตO/Žiำ†‚ ๗ฮ--ตญ_ำ‹$5ˆ €ญีึฉ๛-๛:;: %Bˆ@ ถ๗vฎ้ํไแฬƒญ2lM! D RQ”TBB…P‘ฅXALBAฉE*(D‚ฆQ‚J"€@ ` AB‚€B ŠB0ฤ@H $Q„"@@ jP! *&ฉฺ A1 *‚ ‚I%‚ค&ุ„จ€D$„@ $Hค•€…€ลCH F ขI’@T@ U€"ฅ`$‰h’VีF$ดด"$BI‚ˆ ‰…$@($(DะP ฉDUC0ฆV”@ภ$ขB $U"I5&€)@A€ช Tƒ " €A$`*†จก‚ ะ …Šข„D !€ H@@Œฆ€ZE$Aฤˆ  Lข  ั$$ก € •@ b`@ $b&’D!ฦhิ *H*B@ดA HP%€Š&! …ฺช6 AICจ5J h%QTK 1 A ึ"MHjช * E ฐฅฎVะ  š„Š‚ฅ@  HขA™ž๖๕ืห๋ฏ36ภเ`‹๏๑ฝฟdํ:LH๕๏5ž~š'ศ๗ฟŸฟ๙๋@@*@€‰J”T%Jช(”($@LQ *ภ"5 ‘DลPA"1!ฤ`!I0‘H0($ฤ")"V@@‘@@ ึ •ภXILM( BB%ก-1bB@DI’ฌ,/ฟ}ulพ็‰Gึ๔uถc€บ473~้๔ฉOoN..5ึ nูึ1~gjถถu๔ าVV๏^ป3>=ืตๅฉƒY^Y.อ๖"@L€TjQสส์ฤญืฏ_ธ7;ทดj{ืเฺ=ฯ<ฝkรบฒผธ0๛pนัึฟv˜ๅษ‹_ผtm์lํุผk฿'w*•้ห็ฯ_ฝถ0ฐแะSฯ๎๊@Cดัฟuฺ‘‘rzโ๎่๘*ฺ๋Yน}๙ฦฝ๙บe๋†อ›P V;บšF]Yiญ,@‰โ๕>zrฒs3ฯผฒ๎?ฎ6๚ฺ ะึ5ฐ๎้oษŽอOถกทAŠ vtถ5ŠฝC[Ÿยททถ๏=ธsจณู05ึลีฅน๙ฺนn`ฑ €าณeื3_ฮcƒปn๊mQ…าhด7;AะZผ็ฦี /›i5๛ึmผ๏ฦุ์bึt๗๔๖ดk’:๛pa5=ฝ}ภฌิฅ๛๗gชอž5mํ !iตV็\;w๎า๕ั{s ญถฮ[w๎}|ฯ–’ˆ `€ฌฎไW๏}2^7๑ษ'1ุŒตULNืl๎๏oถ-Ž]๘โ๙ซฃ“๓ญาปiณ/์YทfM›!@–ๆ&.>wๅๆฤ์ฬ ฝ๋ถ์|t๏ฃึu f~vqฅีXำูีีdู้•4†บฺ;šภ"d้๚ัร๏ฟฐฐ~๛sฏ|๙3{ท๔4lA‰™ผ;~๋๎rวž๛ท๔ต— คััีี–ฬ-.j‰ิ๙‡ทฯ}๐ฮฏ~;ถแเ7^{้ะ๎อƒ] ิ:?qํำKฏ˜˜]j๋Y๛x๏ปKญfwWW’ิ๙ว฿|๛K›ž~๖ๅฯ?๛่†๖$™๙๔ศแ3งŽ=uํม,W>y๓zฐ†วฟ๒๊ึ๖ตอNLŒŽ>X์฿๓ฤึfmญฌ–F›ฅ@ „Fๆฏ}pๆสฅก๗์ทkจYะbbฯึํ๋zหล๛“““ l์Nิ:;yําๅหืnM<˜[-mฝ#›๗<ธwธทูuanโโ๑~ใทc๋๖ลฝำ๏ฬO.ญฎ้๋oŠ"f๚ฮุุ่Lู้๙ศŽฮๅๅๅF{[CT ฐ2;~๕๚•ซืoOŸ_I{ภๆO?ตg]oG{ฉ‘`ƒ€ศสฤอOO~xj|aมoฝฒgคซั& )ุ์ฺ|`๗ำŸ๙3ๅoถัู฿ำีตฆ€!ศ๊ิ๕๑๑้นŽO๎tiuนัhำRŒHRE„ภสาญcoฟw๖เห_}r๏ž kฐ:๗๐ล#Gฎอ๗}แู'voZ฿ำf)อฮฝmh?56vff)ฝ„,ื้‰Oฯžปz๋๎ิj:๚ึnู่ฝปึu tnฺ๗๒ท6w=ฒ๋ัอริกMfฃัAXน๕๒ฅKWnLNอฎคู3ดiืƒ๛7๗ดตฯ]ปxแส•นพตž~~ืPณคbIฝ›ึฎYธqk๖ึuภฬ้ลๅฌ้้้jฎN^ป|๎ิู“ ซ][๗ํbวึuฝMkซต๘`j6nh+sท.žฟpแฺ๛ ฅ9ธ๕‰็^ุ=่,มฮŽfWg๛สƒ๙™%้,$M1*ีคข  `TˆBˆ " !‚ˆQ0b’คQH@-! !!„(’HJI‚IB„@ D@ลˆH$จ1""’ DD€€  R…D’Œ‘J B@€"!‰B‚Q0P $ข$P€(‰ค*ม@ั 05(`$H$`DŠ@มbR BB6(H%T)„HŒค"!@@@Hb@@-`„$X (!!€ˆ$L( $" @0 h"@VbRAA ิ ด˜ HB Bค& `Pˆ€BA1I„€hR•-€ PE ƒˆ„ภB’ˆ & ` “ะPC$(b$P“(D"„"(0 $PkII ‰€!J1€‚ˆP PP” ขคHB”D(ˆ‘š”n๔oƒฏฟฮ<ทื๏}/๙—4›B๕/ข๙Ÿ@Ÿ*7P "J*ˆI”$Jb@-`fฉ5woฝีถ็ภณ๏าื@ั„๏\=ๆฤ…้ฦึƒฏฝ0|๛๐/ONtฌ๗ิ›†ฺฌ„จAม@‰ี˜"‚@€@ภจ &1BT1  *B!DL’’(I*@b)@AQ@จฦ`จ๓K๓c7&—ืl5า์hhอMฝ๒ษ‘ร๏=;ถุึีึ์่พqญฬŒ}ุึถฉคำF๒เๆ๑;๗ฺo|๛sต6:ปืoูฑsหฺพพ&ˆ%Yyx๋ฉำ'O_ผ1yฑ•ึย๒ร;W๏–อ_ู02ธถืลลน‡ณฑนฆ๓แววO|rๆุ๑ใS‹ซ.tัืv๗QWnฟqgjก๏้อ;vnhˆBบป:;ปฺVVfง—C{๊rซUลF €๋์ุ่ิ์BฃซณฃปK ษ๊ฝ‹'๊๘ีฮ=/}แู›๎\ผW;ท ๕6›ํ" i4{ืํx้๗wLฌ,ฬฟ|๕๎j๛ฆGถ๖wฏi ุ1ผn๗๐ืvฌฆ4ิบฒฒ4wo๊ฦษำŸ˜ฺ่๙ฬ ๛†ฅ €ๆฺ์~6~ตgxรถGถmZ฿D ๕เฦ'G9v๊๚ไƒ•าัูำsฃ}t|~u o ณซKMR—ๆgnๅO๙ษx฿†‘ี้›g>ธwฅ{hรๆํ;ท7ญK3๗n^ฟ}๗ฮtซvฏฬ฿ฝ~ฃ0ฒieฉ˜ธ;vฦฤ|k๐ฺแŸMฎิถŽฎมM[ทmูผn ซ€ึ๘นsc่{t๋ถm#@่้_ำูั\^XœŸ‡žฺJ๋มต“GŽ๘์อ{—SR[4?:?ึฺS{"+sSืฮ๙๕ฯŽ=\๛๒ท^zvสo๊Rฺ:zz๛  ™ฟ}{์ฮอ‰ซ=็พuทถอฎม w<ฒnํ@g#@$ai๊าG๛๐๔ีษ{‹ีฌดV–ง?:7ฑ๚_=๔ศฆ6(Aฒ2s๓๔ฉOฮ]žฺ๛ีฯ=ฟตซQ voุผ๏s_ฬฺํร=ซวf็Zญ5=k:ป V ˜‡7o฿™˜ši>๕๖Ÿฒฺึัฟv๓Ž][ื๕๗5Q"@’ิ *Y]šพsฝ_๏๑ํ็๗๏\7๊า์ƒ[ง?ฝณ:๔โ๋{š$ค๚‡๛›…๙้๙ฅล%hฎฬอฝ|๚ศ‘฿~tirกต 5v\›mสcCmลวž}๕q 0’ึ๘ๅ[๗–V7ŒŒ  บบ4z๖๘‘฿;sฮร•๎Kท2๒G/l\3z้ๆุฤlฯพ;wohbD์๎์Zณฆนบบ0๛p™4ญ๎ฯ,/•.V<๕ปซ'.\Ÿšฝ9ีน<ฑอ/=ฟoว`]mอN_Js 9้‡ใcWฎ฿ผsgbโฝึ๚หณ}๕ืาัดูhk6ตต4ฟ˜tj€hk,ข"j˜DA!ะˆ$!`€Dิ$AZSข€ I  ) !€1• ˆ$bM@Q ฦB AABTH$@ ‘ @ˆH ‚DA @ TAH €P€ ฦ€@P“H‚คR 0ฤ„ˆ Bข€„ ˆB ( A€*’$"ะ‚‚@@PPQจ$ ’„€ค˜šคb@1•†Q$Dจฑˆ  D`ˆIU#’€Hb  $ ˜•( 1 JP@T"%@B €&,าŠ$€€BBBภก`b !IB% I0€B""€jH$€$€ข@@jข @ H1I’ุ€‚$DI”Thื๚๖ล๛ืN|x๛๎อ๙5๋๚††ˆฐ๚`fฅEอํณ๏ŽžmตV็g—:ท=๖ูืพสม t Y˜ธ๐ฏ~/'&์ฺถoS{w ิ\๚้๖ƒ3ำ ญ•–da~qfjถีj[=ๆีัพG๖|q฿ฎฑSว๗แฉ฿rม—๖่jฟy{๒แbวึก๕‰%HH)Jญญ•VHšฝSท๏ŽNmฺืLงŒ:z๊ๆไ๊๚'ืฌ๏7Ij‹ๅษซG๚ฮ‰ปห;พ๐ส็Ÿ;84q๋ƒ๖๊๊hoT’`X›ปz๚๐นนพ]฿zzWšE*ณc—ฎŒŽผt้ำ๓—'ส๚็พ๘ํฯm๏h„@ฤฤJฌ•‚Y˜ธrๆยลัๅ๎ญ{žyb=*u~์ัŸ๔Wg๏/ow่ณ๛๖ŽดอŽ]:๖๎GงW†บ๛บzึ6;;}๑…วฯโ๒แ›_šุา฿บw็โ™ฃง[Ÿฺ+Omะ฿žะZz8y๙ุ/๘ƒทnw๏zๆ…็บถ9;yใ๊่๑Kา‘$Aฅ(ญ•ลษ๑3ฟx๋ใ‡k}็ะใlํod5ญ‡w๎ทV™>s๒ฝดฯ<บพ}~ฺ๊‰รฮt๎x้ลƒ;wฏk ฆตบt๏ๆ?ษป็G๋ื.฿นมฅำ ห๖o:๘•๏ซ—๗o๊ํnotvฏ้๎mถ&๏ธ8vฟod “,อL฿ไ™ณ็บ์\าFV็๎\9๑ฏั๙…พ๎Ÿ?ึgฺึ๏8๘๒ฟ๘น็๗ uึฅน›ว฿๙ั฿Ÿ่่{๔ฉฯ>ฑ}sื๒๋็ผse~น}s_Gg'Jkiaโาวoo™o>ฒvi๔ฬG?\HchฯมWฟ๑{ฏ>1ป๏ณ_+ซ3๗ๆ๖สžฟ฿พดนคchHw[mMฬ...ญค}ๅฺ‘ŸH]Yœ_ถo็ฯ~แี—=นตฏYจ™นymbfipวฺแ‘ก๖!ข”ขYญซซ+Imอฯ\yํ่ƒ{ปŸ๚ฬใ;ถt-O^เ่ไฬ๊าZzฒ4=v๕“#๏๚ฮฏษkwอš›_คฃททฏฏ!•bๆฆ็Wlต็ฏ๛ูอ”ึา๔;Ÿ้ห_x้เฎ-}ํ@จห๗ฮ{๓Go_จอm}๖๑=#.>ฃใงoฟ๗๘พGF6๔ ดกมiธwa฿้ฯ็๗ž๓œMG็่h—ฌอฒkฑ,ฏ`ƒฐ1$’ ฅษด“f€ฆฏfx3๓nฎะdf^ดน ถW:m2IBXllฦ6ศ๛Šmyัbฺู—#}}~฿น๏’$5™?๗ห/พ~|ชตๅŽบcDSAPสเ๐ชแUธ4“‰  ]G–๗w๚:DHดF…๎•ษ…ลTfฮๅร‡ปuifjพณq็ญ๗~โฮืm(ิHj…๎ย๘ล๗_๛้/฿]๐[ทุrีŠ“ล๙ู๑๗_^l๏ผqู`C !IKบฅิnf/ฝึSอ3ฟ้๚7_ป}uฝม๛งfฆ.^๏fคT  ‰ะญ cว=๓•ึศ๕;ทoY;$ต..Mœ|ใ‰o็บซw๎9ฐkวฺf๊ฬ๑c|p๊๒|wอฉS—ฎฬ๖ฌYณ~ด ขFA-Eจ๎bqแ๒•้นล%ฮŸ<๒›™ฅ้ฒๅฦ,งŸ๛ษฯ^z๑™ถฎYฟi๓pkaq์ๅnํแย๋‡>œ๏_ถr๛Gv์Z๘เี'ๅฉ=บ๏ภบ‘NOซ)ฅi„nท›D@HH’` ZJ  `€ BP RA4JJ@!$€jี"A bL‚0*ฉ   ฅ1ค!PSม"…Bjb(–b€bจ" bCM `ะ!j’$AB„RSกB‘4ฤ ตjะ@€ล€`RัP0‚`ฌมR •คั5!ข†€! @"$†DkT ข @D„$ `ฌ†ฑˆ*)จH!cP‰H!„AQ‰ิT0 0j@HXB@!ฤB0B’@EE1I ( B€*(I@ฃ1BAM’ J"Œจค&A‚ˆˆERIb’h1€Pก„(ฤ$คชQ5Dฤ$‰1จ’(*’PSม" @0U$ACจˆ$BM ˜D$D †$$bBbSDB !EˆMญ€ ‚E“ŠQ‰ ิZด`bj ั@H@Cภ!1$BะDE‚ก‚„H‚€H€@‰(C*˜ – แื‡ส7ฟๅ฿ศหืพฦ~'"ฤA๋WฟVพฟภืฟฮ7Tฤ@D „$XT€A bŒ‚$I ขข˜$!„ุำ๔mฟ๙ฦ‡้ศ;๏}อ–M›๛€$จัLŸ=z๔่ฑKYqเ†ถvธะ7บqใlgdฐท‘JBิขI @ ” €C ‚ŠŒAญ  $Aƒ•ช& ED $กD 1€Q’"ˆ($ฐฆ66-‹ฉ!ิ$TJK•t็ๆๆฏLฮ•‘+ZM#tฏผ๗ึ‹ฟ~์ฅษ}ŸW๒ภ๖า1Lญ,ณ'ฝิ^พrฤ@Hwpใ๎}ทฏุภ๒ัUร}ej๊๘ ฟล‹ฏ<๒}{{Wถg™้ฮ/Mฝณฟ๙๖“็ื๏๔o๚ฎ๋wŽดปใž;cตiwzจ็ๆฆฦ'fๆ{Žz๚฿๚ใ฿ปeŠ=‹งG{บgฟ่ฅc‡Og๏6ฦฦฎฬฮต— .nQBจ]ชลnทKป)ลฐm๖ oผ๚๋ฏชY๛–}kš… oฟ๘ศžxgwoูบe]ิธ49๕ฮc฿๘ต‹๋๏ฝcoฺ6ุฝpjjlbŠั‘‘าj‚IL,IŒtง.}ํ•งฝัv๓o=ธีะP PSเ—๘=ฺ๑๓ ›w|๏ฝ๗ดu@ALBˆAกvง/ฝ๓‹ง_y๗DณภMป}]SSKwโศ?›ำs๏{๐s}z็ oฟj๖ฑ‰๓e wู`QBณ น๕๚wฮ=๓ฦกŸ>$ตฆตl๙พ๔๛๗ํ]ื_€๎์™w?๙๐#Gุ๘ภฟใ฿ฺ{ีpง K‹ฉKNoj"„dnฑง๋หŽฯyถ๕#Hป cฏคหฑทŽ์น๋ณ๗ั}[บำฎ้;ย7_>๒ูู๑ญŒฬผ๓๖ ฟ๘ษ ๛?ฏไซšžuiฉาตื˜ฺ]`ีตื๏;z๊ฉCฏ^ยิnื]็๎นq๛ชก@–fฆฯผใo?๙แฺu7ธ๙#ปถ.ฏ“'^{‰'=฿ๆ–o๙สืl_ู3ดzํฆk6>๚์?~gksฯฮ5ํ๎ฅท_}๊ษC/พ?ป๊๚›๎]กญ”da์็^x๎ล—/lุs๐ึ;๎^;๚๋ง~๙โ๋O>:Wkฎ๚ฤี‹ฏำ฿?qlaีวฟ๘;๗zpS_ฌ‹๛7=๛=ี7ดฌำื+ฉฉS็zปŸZ\นg =ธkอH๗๑WŸ{๊W/>ํ๑-C๚†‡{ฺํ&อะะบm{ท_=ช)•,๖Žnุ}รหท๔ฎุฐjธ7g<๔ซg฿8๔ณ…น2ฐ๎woZKืz๑ฬ…๙นfะ@PŠ„ุ%ฅ..-ี.ญาด,.ฮœ~๑{฿๙ีฑึ†๛ฟ๘๗<ฐพ€๗ฯอตzZ6ิ…ษSฟy๖™_>^๏ฯ้ม=ฃํ^OOMอ/2<8ฐl „ิ`(•[๖ผkลฮศชUรํ๖์น฿<๕๘ำฏ?ใฅ{‡ฟp๓ฺ6t—fN?๕Ÿผ8ึs๓๏=pmทo๎ิ๎ิ•ก™mC033ฟดิ•ม„bw๊๘ก็_;zฒuอn9ฐชจ5E!@’nฝxแ๒Rท3ดฌฏททฤ˜P‹ บƒw๏ป}d]–ฏ\5<ะฬLษ'^|ํง”Nฯฒ•?\ จ„,ฮœ=๖๖ฏ~๑ไ๊[พtฯตรหฺฉฤฬออ_9sฑฒlอЁาำtฉ‹’tป๓ KIำn•ฆ,œเญg๙้+็W๖‡๔G?ิ.R—บY์6ฝBบ"!š…+วŸฃ๏N }๔S7๎sี@’ฬ^๋G๕Oo^๔ภ๏ำw์0(ฐ0›พฦwฦฎฬฬ5ƒหF† &U#Bบฅลฅn-ฅด[ “Wฎฬ-tว/;?ผq'ูoYืื[&Ož๑๓gฯž?q)#น+cWบต9๒ฺ;๛๏๙โwxอฦ:~bc็ฤk๏พ๒๖ัณKืฏgค €B‹ !@PR4RŠIP”D TDฅD š(€E$$@I…JAjภ)’n Z €PDฌT€Š`D‚ PR0 5“`%Bˆ D  จค– ฉhBˆEcMj("ฉ`ฐ(ิ$•,…˜ค’ b !jHjฅD  1$"+JR )EL‚‰†XƒSRkทฑA ˆ$$)…DŠ 1Ej €T#$J!„@"ายš€hŒคK1IตR‹ Tภฤค–RB€IR ‰ข % !‚’P“จ „Xภ$AL!ล"•จ!€!ˆ$E‘ A…HMŠ $PจA‚Q IQบ‰€ D)A €bREEj `bSj $€€Šลบ@‹jตŠT@‰…HัD 1I0คAŠ!B ภPBZˆ „T4!ขจฉIPHBQ‹55• E‰$!56c$‘ฺญ”€H‚•R„ˆก*b!!‰jI & U"$b’ŠE @จิšฆ@ X( )…ิ˜€A$เแGสทพๅO ไม๓'ย'๏ฎก€LลbPศฟ*๖\พ ไ๋_ฯฟ†`M0H€ล Q„P€˜tU(* ํฮ–›nฺ๒๔^=rไิ‡;6o๎$1%P็=vโ๘ุะส›๖ธนSสฦ;เ฿์Y่๏๏ํ tH5Iตb@ิHR“"`€จ  –Xฉ)ิ‚]กB‹IHฅ+E‚ชต…P)-U@0ฦ”JPh…4JBPจคล๙๙้‰้4๋F‡ํi7…ลณoฟq๘๕w'F๗๛น{w ฤคBfgๆf—zZรCƒ`gํ๎ฎ!ฉp๐ฮk‡๔฿ใ+๏=๑ฮ‰{๖๕wg&N?๚฿}gig?๗ญืํi›,-,^บ0ถP— -่tZ.N/ฮLMอั๎ุๅ?ผes๏Hาำ?ฐ|dล@๗โฬฤ”ต;=55ฟฐุ฿ำ฿ฉ้ข8ฟธฐธฐุSz:ฝ$ฐ์เœ™_x๘๑—แฟ>๓าj๗,_34sqชูq๗ํ๛ฎฝบ7Iwj๚หเต๖'พx฿วnฝฆฆว.Oำฌถ“€„*ี4‘คNyๆ™ว๙ษ{;๚—t๋ศ@ป !–บ้ฎ/๓?๗ย๓/ฝ๚๔๛tk๔฿sU‰FH bi˜>๖ุwฟ๛่ห?๒ฉOึ][๚ u1—žี‹œi๏๛๘7น{H งfๆ–jgxฐำ?HMwa๊๔‘}๋฿๐ฝล7฿๔ะž๋๛ธ๒‘WžๅK?๙ัถkึฟcำHฏ๓'~๖๙หฌป๛ Ÿฟ~ี`S45”ViIš’DM ิฑฯฝ๒่฿๎Ž>๐ปทo] PŠ,.ี้ฑ๑9jeหว๏=xp๗–มZฑดืฎm˜žšฮโโยษw฿y๕ฅc๓ฃ๛ฏด]’ฺญbปบq๚ฤใ้/๔ฺ้kฎฝ๗๎๋ถ.๏™ฟp๒ี'}ฑ_ฺฒfdp๘ชeย๘ฅŸแ/OuถWล}{vฎ้ฐgหพอ=g๏ฝ๗ฬsงnZนqลชU{vถ๘๙ ๛W?้๙ฦฃะ, ๓ eรึw|ๆเp‘Dๅื_>ฺlธ๙ก?Ÿุ5&{wm์Iฯๆ๛‡Ÿ{็†uƒฟzไะน™๕Ÿ๛ฤญป๗^5` ฉKu|rถบrxคี2w้ไ‘g๖ƒ]Ÿ๛“ๅก]#ซ๚ u๗Uซึuฎ๙<๑๔‘ปื๎๎Žฯอฮ,4}+—บุˆ”ฮชซ๖ฎฺฐงšXจ$7_ฟฑ[฿ญ“G฿zๅ์ม{ึ-5น29S[พNำW  1V33=ฟฐฐุ๊้๋_ถ0นpโฉพry~๋๏฿s๓Žz$ ฉํžฦฆ@w๐ฟ๚ลฏŸŸพ๚ๆ/้“›z€,^™˜žฏอ@฿ฒก„‚สะ๖ฝทo฿›” 7X฿๛ๅc‡฿|่ŽSืneถ;ึฯ๚ึฅฮŸ?ฐc฿ๆฬ_9สณ?๛ว'N7+oพrxYG…ฉิs/ู7?์น๊ฮ๋๏8ธฉjBAk,@Lญ c'ปห๛๚:@"Pฝซฏ่๊€ตโMฝฯโ๛/9๚ม;'ๆ๖-๏•H@fO|๏…วžŸhํ๘โ็n^ตrจฅIบuzbฦŒ๔ 5i“jจฬLฬ-ฅ;8ุื๎ฉพw๔ตWืต;>ฅo่๏„ฆถีข@ีค‚ ’ล๑ห'ž๛ืฯอ฿๐๘™๖_5JXšผt๖๙Ÿ๊ฤยชป๚ิk6ท’Vก5Hjfฆฆ็็:v๏@/E)ฦTj˜›_\˜_l—žพพnฅ›˜_˜ญห๖ํหgnYKYฝqe_ogfa~qq~q~i๊๒ฤ<k๎๐เ-๚‚Mป3ธzฐ/MONu—ฐ?ค[k่ดJำ˜JŠRH"ฅXb„$ิR‹PUL‰€1!BP“tT‰Šh4`D!@ QB*”! $"Rก‚"`ั$ึP‰จDj’คXŠ€ 1„D@DUขb0$จฉฉีRP ฑX+QLAPBJCH("I…AHE•ฦ%(PD€ˆ–Xญ   ฆF่&ˆk(”ะPกŠD$ตมQ!`j„b%%64Z‹š 5) j…JืฺยbHีHี’ฆ@•$@ม41#!jํฆ! )‚$kฅฉ–$&ฉ)E0&$$ijบก”`ม˜Pฅ $กฦฆ`’@%PฌRกL@‹ฅ˜hbญ€Mก˜tE-`’kํ6B)จจ "@ Q!AE€T’n‘ˆ`ฐ•B*" „Z!H 6TจI!jขH "คHกฑิฤM•”P‰ิ$Iั‚ @ ](ฦˆAQ PB‚š$,DิHB  F5j 4RIช š@*‚ K P ฅib I€ุhj„š„ ’JHˆT‚)‘Z T ฐจ ข! ŠTKฌI จ๎๏๘ๆ7=๔ /9_๛jnบ™jHจ€‰“Tขž|๕ซ้Ÿๆ๋_ฯŸ~รJดตึ”SJ’Rฌ•KRERS `R“`คฉ]KM‰”" hำช›>z–'Žพ๒๖ฑ๛Ž฿ธiW_ฅ†ฆิ0wอc'O^^ฑ๓–={6€ZWญ ˆต,…bRมbc ิZญตS‹ช š!D $j!จ„jPIjจ UMลVŠ „‚†’THข Aก’B‹)4 •(Dฤฆ) hฐิํฮ.,ัำ฿iบฉgNœL$“บDฅ4Mำ2T`ฝ2515พิ฿ณrๅฺ†’้ฑห3 ‹[oุฟ็บC E“ฅฅฅnต้iท:ญ……๎•ห—hทvว๎5†Z !บดP)=}ฅif็gg—šา้๔ีDŠI €D“”$QK1‰bR!ิZฃXL‚’cm,DhŠRH H ฐK5ช’า”ZปR‚ Š ‰‹“ •ชฦ  E’R ฌDSQ”@,†$ก†RคB‚‰E,b4@คP)&AภคฆR(ลิคF 5‘ (…ฌ‰ม€!ZBช ึc"ี@ JมPIPH 04JR Rฃ‚@@R+JBRI  ‰˜@JB `ค$’จฉE1ฉIิJŠ…( จ •$Šคา‰!ˆH*ขคh €ิิB$จj€!Hƒ($•$ฦH D4APAคั! jจต*j„€ิˆตX"$ PŠR &HRB+4‚ิ”ฆTช1‰ฺ‚`-†$H‚!ULคคˆ(‰Š€ฅV4Dˆ€กPkJZ€ิDฌีRP) !Aขิ#"hจฉX@Œ‘RHภ”ขฉ‰TจR( €…h „D 1R!B4P  P *)D„(”Bช @А$ภ๔ๆ7y๋- _๛ZฺWูนฐ†ขคH*H“ิฏฯอยBฦ7bRKำˆˆ$ •"’R 0 ˆ€ฅP’ชH’"H BŠฎฺฎ_{๙ร๗Oผ{ฦ]ป{%2y๔๗NžiV๏บ๚๚ฝ+)ึฺ}ใo๗๏ผบrวgธ๛ถ]๋ Yœž>๗ฦ?สž™\ฐotร๖7๖ั;ฏ๎~แฟ๙?]๓‰๛๎ป๕ถ+’0eุ้?ฟุฝ๑หŸฝk๏ }Bw๒์ฅฟ๛~๏?ส{ึญ๏P,I…ษg๒ฯ6{๓;—๕ฟ๕ิs๏^joุ๙ป๛ZS๏ฟ๐s/พ|๘ƒ“—็ปฝฃvx฿็ุ7Rฺงว‡Ÿ๙อโ57๚๓๗]ำ‡I{๎ฏฟ๗‹ฃWึ฿๑O฿{ำj›ฦค๛O๗_*ซoพ๛พO์_W0ึRMŠฅe (B5€@jบT[ํžขฦ้๓ccW.นlๅฺ๕๋;ข’š๎…3gฮžKgีศศฐš@Ё’`!า4ฅ)Yœ\ผx๔๐™%๗l฿ด|pySบใ3G_{oa‰‘แกVง'‹ำณำม‘๛nXืฆ†8?5=~qlฉีปzรšฆีด›RR“Z‹\:}๑าุๅพWoผZ! ฅgphe฿เ๐ชั‹วพp่‡ฏO๔๎๛ง>บw็บมžฆ036~๚๕CฏŸžœน๒๋ป?๋i€ฅูษ+g'๊์ ฿๛‹~v๕Ÿน็ฦƒ;Vถ นำฯ๘‘วž}g~ใž฿u๏ญ;W๗ทฉฉDA0ฤาn—vซทvชu+;๕ฤุนK๓I›“Pง/^xใฑฟ๛ฮ๏ถu๏]ท}dฺeํbคTrๆฤฉ้๙พU๋VŽŽ๖Qฉb]ไ๒GฮฮM6๛๛—5ฬLŒ}ตำหoป๙ฦซ7ฏ]ึฺวสMื\5ฐtไ๘gๆฆถฺŸบ|iฆูผqC_ซจZ€ค FR‹L|๘๖/?๗ฦิ๐๎๛8ธถฐ… ึลฅ๙KcWาฺxํ๕V,(––บK“gN_๎–kV๖ ิ‹'&&&สภฒีึท)‚„ ,ฮอ{แะ๛—[›๎ท}วๆUรB_งงตc๓Hy๗ฬ้sใW&k้™ž9โ|zฎูy๕H`#ะbำำ[ฐP@ˆMำ๎้๔/_9บqโ๘฿y๗๘ุาฆoพ๋c7m่kQฑฦฯŸป]๛V,๏ด๛˜Ÿ:๖lํ\ตc็h_ำ#@c้)m้S่^ž˜™™ํv๚–/GiA  ขMฑจMฑฒX งีถฬuป5K!ชˆ”\เร ใ๓•ซึฌ_s้?ผิวฮœู๔nพภ†ๅECX˜_œ<{n2ญํkVต;ฝ™นน™™ลvเPวD0  "Pะ ‚IีBจ$คˆJ€ŠR!I)‚HH5‚ค‚ˆ‰ล BŒ5h!0R€DH’UI@ มPDH$THีฆBSPQ@b ค 5]€€TB@HMŒ Š &(ˆจEข%(@…Pƒ‰Xภ@ 0ฅ „T กดb( !JB`Bดด" *  ฤXE,E ”@ฐ X‰ `ขQˆ@  ŠึH‰ช%˜(1"ม&IQ €„`ช!’€*€ +)*ฤภ …T ฉDฃ&†*ิD(( Qฤ$I"DPา€จ„DQPฉE"$IญQ‹$`*† ช€1@1S("!‘I4%VL€h„Hจ€*Aj„*!$กhHR ี)RH ’( "Cจ-$DJMญE’ ‹„@จT@ESง&หทc๙ึ79y€ๅห๓ตฏๅk_ญkึ& %จ%ฦˆ`T(ม‰่šu๙ณ?ƒ$ฤ$!)E(!$จX‚†„ข˜$HLฌฦจI4- Šญ‘=๛ท?๓ษSวผ{๔สฎ=#˜šL~๋ศนsอฆ[w๎Xัฤๆ/Ÿ;–ี3๓ $๓—วŽฝ๐O฿y๔ํr๕๖๋๎ฺ7ุ0s๔ษ7~ใ3็ๆฟ๘?|d๓hgะ;‡ํฝๆฺํฃร……น™c/:มษ™๒ๆGฎฐc}งฯ:7=๖ฮห/›{วฒา฿! B]š;u๒ƒง/อ€scu`dtูฒพV=๑ุ๗พ๛?A๐๙^‡™˜๙๙๗ผงเเ w‚ vฑ‰"%‘E๕Y3๖์x]’ุŽฝษfว๛!ืต๙Sl_๙M6‰ฝฏว๖43๊)ŠฝlH4‚่8็}r฿5ฅฎว๛ฝ2;๖๘ฺฏ฿ๅ๔_ูซป๊›ณT†‡Lพฟ=ุb๎มม๗ฆ[ฺ๖;าˆTFo=[ื‘+’Œ%คmXŒ ุฒ‰ถqด…ล๚าฦฦz%U›ฏญญ ศBึสใ๗‡ŽnคฺjK๕@I–ฐ1hvlrq|{CฉTPๅyeq๚๙&้บRM:•ภๆิณ;—oฮRอ5ึg™ๅีตี•uฒ-m]-ฉ„l{}|jjtx9_ุตgO”Ru…šLjfuuuiูไ%@หSŒN.งš๛w๕wfQt„!x}z๒แฅฯฮIฟ๐{฿{ๅะถ๚BZ’๙ฺ๖=/ฝฝตc #0€ื—ง7็Fษถ=อฅฎฆ๚\Fฦ9n-N\๚ไฃ฿^Roหงฯ฿ำ”ลF@ท๏|ดึิ๊์ัFŒฑ+›››๋e5ู|d! TวŸ๊ทŸ}7ภมณ๏ฝ}๊ศถึบtใ€mWVทถrตต™\6!€XญฌL\ผ๐pzu9[ฌษๅk7ึืฆงทิีึY—ฯ$€QฌbชยAฑบUฉTbHe3ฦ ไ๕๑แ{7ฏ ,น๋ฤ;ฏ๕5dƒŒAB+•ีน๙uฒ--ต๙LXT–ึg๏_บฺ๗m/ี5Yฺฺู ‰D*•AŒ–P0ฎnm<›˜ูŒM-๕uต้`FˆUˆGฐซ1n–ห"“O… Hยqskyj|I‰ŽŽๆL&ฦ`ฤึ๊เ_œป$์_j๏ููีีึZ aƒฦฒXšXK7vื—’เju}ฃBขq๛‹‡N์ฺืQจฉญ+kj3ฒ]มฦ[k‹ใ~}npฃ๖่ห‡๖๖u6€ซีอ™™•ช;‹…d6ƒ !Œa$`v|rqอ๙ึR}C ถ$หโาศ๋ื๏<[/ํ=๛ฺ‘ฎ…h@„าษdmฉ6jiv~}ซc.เอ•ษซืžฎค๛N๎ljฌื๓สVe+„D2“00 ภฦŠห#w/|๙ล๛ฃ‰พWฟ๎™ร} )'ƒฑ\๕ึfูR:“!@&‚ Y(ไณ้๒ยฺ๊โ2ชdYTž?_L4พฐ{oWฦ/,,n•ณอฅbก”ฦฦŠฐ9z๛๓•dc[[k!nNฌ-,–Cฎฝป5ŸI'$lฮ-O?บ=๎๔ถC;๊3ตIโ๊าฺสสFศ7–„‘ภ, ุ0aC„`I2H€„ B’ˆB#ภ–ภ`, ษ"XvŒฦ`„m"ภ@ุ€ @`Y G#`„@F` €28H`Œ-„;` Hƒp ฐ$@aŒ%„ภ`€e ŒŒ‘„มX` „dŒถ€Iุ€0„‘ , ฦX`!หŽ c@ pt!D` 0Ž(a,XFHั lหX„„m ˆ€ภ€ถŒc‚‚ `!Eภฒถ ุD“-เhI2 `Œภ1"!@ หŠ`YFBAุ–„€Aุ 0 €1`$d B0คB l, d0ภ`ภ l„00€1ƒdŒ– l0BlภFKDฐ ,ู`lุ#ฐe# ภุ $a@ุH&X‚h2–…ุd@„l„ถŒ%ฒŒc0’ 2„ศ ,Œฑภ ห ใแo&๙7ห๒2==ษO“เlV–Œภ8ข` ศ€eL@2X !l6ุ’  ƒ!$,c„ฐํ  ย``ภ–m0ศธษฦฝป๛:๎Ž=~๐`แภษ’mฎ=œ\Nด฿พฃป>P` [ซ3O/ฟ0ีp๖฿ž9ถณฝฅ&xc~๖๙o/๘๖๋›ฏ{}ืŽฺฑ‰ฑษ้‰ๅRฦ๚ฺ๘ํฯ‹›3O&ฆfง7ฺปำ+ซKฃw-็w^ช)$ ศ€ `ซ๓ฃsอ‡๚ฟ๐ึฮึBฑุะ‘JWบ๛fถ็[šKy6ฆ๎฿ูวใฺ๙;oi๋๏ํ๊liz025<๔ฌฺืข็ŸVศๆR•๙๑วOf_jjRีSž•3ทตท7็e "X€dฤB’d …DชTชY‡jี•ญˆฆบ4v๛ส‡ฃ“ซฉฺLMฉ.ผฑ4_Nๆ3™\6eW+•ลแ๏ฎOลึพmฝญYภŽโๆfลี*f}fb่ๅซรห–๋K้dVฌฏญฏฌn†tฎPŸ"P]Ÿy|gp๐ษlถs๏‹;’ อmฅยศิิณ๑ง๓ีึ†คสฯ๏^ปqdญพs๏Cjภ!c6fฦฟ๛๚ซ+ทๆšŽ|{g^h)e’Š˜จdฑพ๋ศ[๎ุจb„ภfkz๔๎๙…๛ฯึwŸy๗พ–ๆๆb>›0PญฌอŒ๚wฟrP;œ:sๆลพฆœ ฒเˆ—ฦFn~๑PำTo4'Aเตฉง##ƒใ[™ROw:!0(–็ž>ผ๖อื฿(๗๚๐๗:ำP“ `aซR‰ีช%นผฒ๒๔ฺ—฿ อฌ”MลB.—Gห„DXY.omู™ธผด>7<๐dm+฿฿ู”ฉอ2ูL.—ชTงง7b1$ ฒฒธธถฒ•jj-ฅ.ฯ=พ~๋ฦเhตmฯหงŽvฆ@4ศlUถ็—ข๒…๚t2-.ฏL?}p๙๒ฝๅา๖wn+๒™šL&›ูš\˜ฉธ5ุฑผ4ปด๙ฦRJJ„€7ืึส•อจ\0ฆบ— /ฎ‡ึฆ†ฺบ<้dบถ>_š\?ิjๅfk}~j๘ฦญ‡๋ลm/์oซซอภถหkซcW~๗ัo/Oื๕Ÿyๅิ‘๊ ศjr้ธถธ0;ฝชฆGcW‡>}บRlฺทso{jYเสfลี(ฎฮ<|๓ีฑต*้๚†B:–ศไ’5ตน๊ิ๔ุุJ,dไศึ๒์๘ศภ‰ีšžW_hฏษ%W––ื7ษ–j๊ )ู€AฑผบผQU"[›K @xu์๚‡ใำ‰๖mw๎จ“ „ๆžฎR~dzr๒๙ณน๒ถ–Dณ9;p๎าเ๓๕บ#‡w๖๕•ช$๊Š5๒ฺ์jนผฅš$ุ•๕ล้ฑrCoKพa๗‰—;*W#‹๙ ๓“ำ)๖ผ๘๚ง๗ีๅKu!——ึHf25นT‰๋ฯn]_Kt๏ูึU V"™.ึี9Y฿าูำป-Ÿpตผ๒ldขฺิ\ศๆฒ‰•ๅฅ๑;Ÿ๖ฺd๎เ{/ํ฿ีQ—Œ1 ƒกซ+3ณีd]ก&•Ib, ‚ช›K ๅd>•ฮeˆ[•ฅG—n?‹ญฝบททfภถ‘6'n]ฟv็ัJ}ื+'N๖7Œ‚eฒ๙ฦ๎ผฎ>~๘d๑`sฑฎ& XYššธsแรra฿ห๛ท56ีฐ”อึค+•๕๙้…่’€่X^œ]ูŒีฺ–ๆXฉ๏>}์ะ๎ฎRย[นlวฎฦฏฎพ9ุ2 ioฎฮLŒ ?,ผใ๚L)Wฌห๋$6ศPI8‘ฌ-4ทoึฺš;R]พv้แjฌm๊์lฎฯ&ูZ[›พ๔๑ฅ'ดํ>ฒ฿๖ฆ "ห๕์้/ธ7t}pฐฆ>ดี%ฝตฑ4๛lไๆีษ–3ผุ—ษๅ„+ O‡ฎ~๙ู#Zใล]M-y0€1‚ษ•๒ฺๆฦfฅบฑ2?91WJๅBeccya9ษd3ู„ŒYŸธๅฺ๘V]mKGG}M6Tห+ำฯ|๖ูrj๛ฉC๛z{‹Bฦ†@en่า๙›CKtฟq๔๘๎ฌ ุP:_hํ?ฒงๅๆ•{฿]™๊ศ%ท–ง‡o_๒“šoผqbgs1ŸŒ--mษ;ใC—.้zกฉขฒ:;๑่แฬVฑt๐TSgจ.O\ไ—Ÿ\~ป๗œนซ5ต{<ษบj‹ิm฿ปฝ๛เํกหn5hญInญฮOไ›มฅrK'ุQภัZ˜Yฺุ sฯวGG“ต‰jya้•_~4ูpโ๘กฮ๖ผ—66ๆ—ญ๚ล๑ฃลๆJฮๅฅษGื.žฟ9“ฺu๒ฝำ๒iyu~fzvE™ฦ๖ฮR ’0`ƒ,$0–2ฦ ษ0ฦ!`[ฦยŽ!d0€ภ2–XุHd  Iฦ2d$Q ย` ุ€ ƒ E@BDH@`@Œ`Œุ– 02Œƒ$m#„ุ(B€ X6ƒ$aุ$ ย ภ ษฦ €6(b`หถAXู„ภถJG„ห ภHุ6ฒABภLƒ0ษFH` `ŒAย  €1„มุ0ศH 0ˆ€ฦฦ@l,0ุFAฦHุR@–lI0ฦBH`, ห ห2 ƒŒ„ƒdยFl6HุB$ษถ€dŒ `ูHFฒ#mƒ RmŒ$ƒK2ุHB`c@€AถA`ภฦ–ภฒŒภƒ$0HŒ 2ศF2 @’‘12H`cl !จขdd,# ƒ„ฐมV2ฦˆฐ-ฑยฒ  ฐM€ƒM,ฐ๕ํw๚ฟำ?เW_แฐภถe$XBG#a @ˆH€ฃ…`X€ฑ6มF2€` € €Aฦฐ €@F"€1ุฦXA€ถ๏{๏ๆทใoŸ;|ธ~ึี๛3k…—๛zปปKA`l„WืVfŸMm*ณ2>p๛YBPท๓ุถฮผœvp{หฅ๑™ษ‰‰™ี๚๕นว†ฒ๛฿ธ๓x9{่Dgc[!€[ย@ยม€l[ YJg๒ตyUWึ\ญ1๙ฎฎžžพโใ๋ƒ_ใG~นes๘ฦํ•SWbyr1_จฯชZผvณoฏจนณปปญ1YYžKt~ฝW๖ทๅ1ฉฺ\๋ใ;Kฯฏ—ถfYL๋œ9|็ัำๅ๚ฦฺd*มๆฺ๊สสfฅผ1๓๐ใŸ๊QwA‹ฯ™v~ฯ๋gฯพvฌQุฒh=vไศ๐ไ๓ฏ/]๙ล™่๏ศnN=^ m'ฯžyๅีƒ-iYHฦฺZ{~๏ฯพ:k6ำๆํ฿;ณป ศcGชว„ lฎ——W–”mknH$+บ๘ฃฏฯบะั˜|/แ<ั–€DiGก—?<›๊:pด๛๒ง7๏~{A+ซ=ฉอ็cƒ3-๏œm๚ีศ๘Zฉ˜ฉษ"็Jอ;Oพ๙าตผ๑?j้`WcงG‡๏ฯ–ท๗ุูTศ ืuw์zแd๏ฝ_>๘ํ๓_^ุ^—ฏฮ?›m_้)W‰เอ๙{็.฿›.์?u์ล#Mุ`;€$Aฌ”หหs‹๋กP˜๘ๆฃ/'ปšjฝ65<22ต๏?๕ฃŸ๎ฮืd,Zท๏{๔ภภะ•ซ?ฟ—Ÿ์ฎemf๘แฤfฉ็ลๆ*ฉBM฿้^ผ๙ำ‹ฟ๛lๅษำพฮ–ฬฦโฤ๐ปO–๖ฟ๋{ŠA(Wl่=vๆฅsC็/gณฝ]๕*ฯŒ=}>Ÿู๑๊๔toพ.pyc๖๑KงOช]g฿๛ษฝ= ™ ภD:_ุเ๑ุ่ล_ำ~ถwgSจL >~ถQณ็ฤฉW_?ึ]ฌNm;xค๛ย—#฿|rฎฒฌฐ2๓liษํฏฝV๘ูํวนฆ๚d:e[ษๆ–žง_}๐ๅื๗s{wดY|txl6ๆ๚^๙O-ฎ.ฏmV2น|M} ‚๒ฤญ›พ๚๚แFฆนkวถ–บฤ๚โ๘ฃG“ี|฿๋oฟ๖า‰ลcดฅ#ฏŸ_ผx๛าฏŸO vuๆ7gป7“j;๑[ฏ์฿™มNึ›Ÿyฅ๏ษ๙{vใษรžฆzf&&žฬๅz~eฎ“‚€Œ]]YX)'๒น|MX6•‰พbเัf]SgWosMbcqโ๑ƒฑrฉ๏ฬ๗ฯผธ{WI6Jื${Nพ๕โw3฿]๙ๆ“ูั๛=ส “รCฃ๓5‡พ ฎฬO?น{ซมตๆC๐ๆฎ๚|ฒJ XฦA ./N<z๚|}cmqไมœ+5K“๗n\Ÿm๎ัปkWwญฉF^=ล…‡Kj์์๎ioH—ืŸ Œฬ%:_}S/๎nฯc#@ฐ1qแซหวผใ๘กN๔็%[`aˆถe)]h๊8๑ฮ[—F>ฟ๓ซๆ๏v๓ำฃใc[-๛๛“mnฉ hู๊ฑ๏ศžkcทฏำŽOถิฤอgรOŸ—Sฝ'ฮ๎-ปฒ1}g฿OฟXฬทl฿\ฝ๔้ศล- ‘๎;๙้czT(6๔Ÿ>ฝ๓ฺฏ~๓‹žฝฟญณ”ุ˜Ÿ\ฺl{eG™qำกƒ‡‡ว&ฟธx๕ืฬภžฮ|๙๙รมแE7ฬซฏjห€็fVช‰ฬๆใ{Wซ+“ญอ้๒โ๘ฃซำ™]๏ผ๛ๆฑฝฅ›ๅสสโ๒†ฒ๑้g๒ูhW)[Y{42:ฟั๘ยซ?๘มkmฉdะึส๘ิฬ์2๕-=ญic,Œม`!$cŒฐ1 F’eษถุ` c„‘ฑl$m0!„ฐ‰8…"ฦ€ๅ`„ 6€Q 6„!! b ศ ย06H ุ€ถQ4!8F %„,Œศ2ถ%Œ-$„X`ห€ภIF ƒ% `c„ฐถ…e[ฒ€ ยถ!ส’"ถ‰ )&ˆ@BBฦใ`GG`ค €M„ h2 "@2F @l0`@Hv Cดm+(HE– [’l0`$ภ„„ัR”c0’ ‚€Œ1H2ุŠ ‘@ถมHŒ$"„เˆภ‘e 0XFBX6Xl‚ !pดƒ% !0FˆุBŒ0ุ’ ’ฑฐ%ฐภถ„ูศX6–%a$d›jt"ฐ6B6H€0`@ƒ ย d0X2 !Œ "ู€ ฑc4€0ฒย2#Y’ A-ฐF c,ษ–1ฦ’!ฐภ€ea$ pt„,ภฒ$dฒ ศ@@ฐmˆมฦ Xฒถฦˆ €1XŠถŒ‚Œl$Yฟ๙ๆo๔ัG€ฟ๗=R}๗{@ถ 2Aย!ฐ `Œ‚Xc@ AXDKQ dc$Aƒ,cc,„ 6ย„LI ุIŽ–ภ€"–(Š€`Yจ฿ัปฝท๏ฦ7w\ฟ5ำยฝ ๗f(พฒw[WGยถ์B5บZM—Ї่?๛‹นD„ ,ูกowoใหฯ'วG‡Z๕ไั8บณกwžฆK“ใO†“‹C#sดฟ}ฐ-ไ’ย ,LHู`Hค”Hฐค`Wห+3nน๗๐ั่ิ๓๙•••ๅ๒สl9ชbG“h์j้loฝz๘๎ƒ#ูƒห๙ฝฺw๎ฌ‹ฯ‡ Z๏Xป9ธู่ีX_JV)„ถ@ฦ(Œ p2อ อฮnV‚“ฅ—ุฌTV>บp๛ท?iูsโƒ?ร—ฺWพ}49พš/ี7$HบํอYพใ‹s็ฎฟ๛ํ@9‘+ต๏๓฿๛่ŽR.Ÿ0€j ๛?ห?/zeไๅ_{๛ญำ;ฺ/ ๅาอu‰T .ฎฏWซu๗}๙‡ ท๑ใ_=]HdZ๚œ}๛อS/๎.ส2B5}g~๏ƒ๚ฦยo>๛๚๎ทŸM›wx๗฿;{dOG1"ย„ ฉ:u“_uใ้Z๏koฝ๛[ฉชซ 'ค`FฒAุ 6*ีตJถะฺK1๑๙ใั—ฟ.งฌ _ฟ< @"_jk‡/”Œ uผ๑รฺบย/?๙๔ึฅs•ฉi้์?๑๎๋ฏฝ|twOPŒหท/]YŽf๑้ O iฃ\฿ฮ้LHๆ๖]L๎สKฟyเๅณ๑‡ว๛fู\บพฯg,)X฿ฮ฿๊๕_~s๎wŸุt2฿ะฑc๗[๏ฟ๕ฮฉ้„ŒŒ๓mปOพgY~๖๓/ož๛ฉbืฮ#ง฿?๓๒ห มั„ญฑห็n<]j}๖๘ซว;“ฐ$"$@๖fฅผฒฒี๘๊_ลถป—/^นpqq3ืบํภ;๖ญw฿?ุ%ˆŽ*๎>y2›Oไ~ัท7๗ซ+ษšึฎงแ™W_่mสช๊ฌZฯ!฿๑๑ง_]นw“หee๊šป{_๙ฃ?๋‡Zkkาม€จmj{๙OSช๑Ÿ}๎สฯ*ษฺๆ]G>เ์™ื๚Šษ€ฑฑืŸ ฿๖็Z)เฯปฝต๕)ภBa bDอ๛฿๘aพิR›/พนปมuๅšบ๗7ฯž8ดฃน˜ดะp๔‡๎ƒนฟโตฯž<์yแไo๑ั–์ใ็tMSc]6“Dฦกะน๏๘ฟ๙IM๑g?๚ฮ๕/ฎo& Mฝ๛|๘ฺ™SG๖ิ',ไีฅ•อuๅk )0นƒ|˜ln;w๎ตืฟบYM•๊บ๛^ม{oผผo{sMZ& ‡€ศํ:๛ใ?-6๖ฯฏnh ][๊yแ?๛แวบZ๊ามฦ(Q›myๅฯฺลŸ๚ํ+๎;Y่่wโ{๐ป‡›“ฤ`[€eชฎญ,ญ•ซน–Bmพ68‘ žโพ๘๊›‹W^>wวษB{[kพ๗ญ5 ภ‰dฆํฅ๑ฏีฏŸ~s๓๎7wฏ…ฺฦฮ=๛_๓ฟ๚K5d‚„*ฯ†Ÿบtuฝ๕ภ๏๑›5uู`cp0Cœy|ๅำŸหท— ลาฏy็k ;_๛ม๗[v๖ิ€ดฟ๙ฃ>ืๅื฿\ฝ๗โ@9ไJํ{ั{oี\› €q”p\๘้็ƒ3ฉฎW_9~tosPD60๙lใฑ?_บ๐ำ_|umเฺP9Qืณ๏ํ?>๛ฮ[‡RFH68๐Fฏฒน๚๘๊อฯ^ ตฝฟ๕๎๋'^่oจฌฮ}๓ฏž,วjuq๒ัโไ#Hฎท9ดwLCวŽ฿S๊~๑๕อ‹๗ฏ‡BใถŽม๗฿<C`ี๔พ๚ลR๑7Ÿ}u็โ'wCฑeวพำพwๆ๐พž๚ฌขbŒีน…๕D฿๑๏ฟิR}๔ํ…฿Lnฆบ{฿๘๓๓รSปš2น๖zนผฑบ;Nษ{๋wŸ_๘zt-ึv์8๒ม๋oœ}co ฐฝ๒๔ัิ๔Fขิ฿ณญ-ุ&FUEย–ŒภB!8:Fฒ-Ž’ฐ`ุ`Œ‘„ฐ# Xก llู$LŒQ’e,I2€‘#`Iศ Qถl#!ฦ€ !ศฒQยVTA@!€#2 „" ฒ!€e›’]%‘2ศ6F–Šฎ" ›เD$‘ˆ!F)‰€ "F%ข "ฐ#$ไl $A ฺЁ&สŽศ ศ‘Bดe[’ห€0ุ’‚์ˆ ฐqT#K(€ƒldDeEGฒุยHยม{ฬ๏wA๐ื๛๓]ž็ฺžž^ฮi{ฺาzAจญ0ธ(ฬจมฉ›S4บ,Kถaฌ˜Gbฒ์%ปeษ–P5ปฤฤl3ูLˆNG':Q ล" Bฑก:.ฺsž็๙=ฯ๏๓๋ฅiำ&ระ(D t’ #๖m–ค(ญคฆาDย$ล^GคhD็ ดญ1๖Œ4„ŠถSำŠ@’L–ฬถ9ชBEQCหl&ัจม$หh)Œคด•กMRะถ JดTU2 ํ”A:uศˆDซRRๆœ‘.% Iคญ๊H‚tศLGณข‰Qํˆฅ-tT3hฺdถฆ‘H UFญ–คH#{i#ะF*ฃI h›Ž†LS%„9-™จ@ ขี9IŒaขMฺ$ ๛ฉ*U::’aV&! #M[:จ ภ”คHFัะJ4Tbถ#IRี*ฟ๘฿๒พg๓@ไG๚ฬ3๚Vณาศœ•&‚jDค ฺฆMPˆะVvฎ–tถM"ii%ด-"ƒ=)‘ šฤฌUยl‰T#:eLั†P!kกhˆ$™ŒKืxเั‡๐ž๛“ไล›ฯ}์ฏ็๋ฟ๕๑k๗^=h…ดด€ึfฝ9<ฟห/|yิ9หfิ2Rฺ๓<|ฎ๒ฅ/>ๅ[พR7ฒ9<๗่#Wเใ_‹็?ัล_ๅฎoy๓๋ึ๋ํˆ5็ƒฉดhtžพก๗‡๙๙ำ๋oy๋ปฟžอื_{๎?ิฟฤ au๕Ž;๎ฟ๒'>๓ยG๔ลร็พt๋ƒ฿}๗•;๎นีซ๗๖ู?๙๘~‘ำO~y{ฯw>t๛ํท iMBMบ้Y‘ค•$j๎ฟ๚•—>๖›ฟป่ปเ-ืoฟฐ1ฯvวGววปำiŽี๖เแมโ๔ๆ๑๑ูฮoœžœ์Žwงgg๛&หjฝฺlท›๕H b๖ฬู้7ฯf:Vซอๆ`3–yz๓ต“๖โ…mVKwววGปณ.›scœ๎'cตฺnถ›อ’J่œ"บ?==9>>>=หjตZoถอzษ€บ$ต?~ํฦั๑Y—ํมนรs›‰ถm‰@2ฺFํIๆ๔ไ่ตใฎ/z~e ฺ๎OŽOŽ^=:S!่าถ#ษ๖า-็ืI:s๎wวGGฏํN#cฌืหzณYo6ซ%‹Qฺy|๓๋วว'ณ‘ด5Iš(๋อมแ…s›1“ัณใG'ป“ฝfYฏถ็6+G_m77็6%Šูน;พนํ;ีXึซอvsฐYVัำณ“ฃฃ›งggณซ๕f{xฐฎGดsฦ<บ๑ฺัูXmฯnVด1D$ัyถ;9พytฒ>wa9;=ํฮๆดฌึƒƒอvI†ฬฆ๛๎๔๘่่ไt฿D–อvปnึหIg''ว''ปณณl-cฌ6็ถ‡›๕HR‚ุ—้้๑๑ษู้~’ฑZญ7๋๕ๆ`YRจ์ไ๘่๘๘ฬzsยม& คกขฤ”Aบ฿ํŽŽwg๛9%หrฐl7๋%#I็ูไ๘ฦั้~สzตูn6KNฟ๖๕8ผtpYข๛ำใฃ“๎t฿cฌึอvฝAค/|เ฿์๚ย]}ื๗ใw=ถัYaŒฮณ๎ไไdwvถŸ•eต^6ํzณZ1คํ,Aœž์NNNw๛ู)ซ๕มแแvณŒ‘€๊`ฟฟy|swrบŸMฦjฌึƒ๕vYฦช๖`45็Hv7^;ฺ/ซํมแv•4Tา้ษ๑ษษษ้~ฮ&Yฦjต>8<\/หhซS1่์ไ๘ไไtw6gว2V›ƒํมมf"tžํNŽoY\ธpธZ5จฤ๔่ๆััษi;ย0V›รƒs็ึํ`žž์vง'ง๛ูฤฒZ-๋อjปlฦฒŒ‘Jg•ๅ๔่k7v3›ƒƒํvฃฆJ@ย<;พq|rบŸหฒฌ7อศJ FสŸํwวGวปณ9+หฒูl6ซี˜๖gGฏพvs฿ขRคำjs๐`ป^'‰˜งG7nŸœํŒ,›อ๖เมz‰ฮู๏wปษ๎ไtถ๏Xญื›ํมvฝ^่•ซฏุฏ6K็ูษ้i1ึ›ํมvณ#šฬ้ษั“ๆ…ีไdท;›2Vซํv{ฐYv๑ั฿๘ตOฝzวฝOผๅmO\&ฑoFD s?ำdDT•)Ct฿ˆ(šจ˜ …ŒDฺBฉFำถIGFปO ญJ#0G[iRI Z”s๛Š$mie$ณ2@JLฺDฒฬ$#‰”นŸ" DLษดืhษ˜-SEณษ2SณI’AhK*ะR’DัJ"A;อษ:’0kVฆ1ก- ’ดBฺฮX ีฮ(J "ํศาูdด:;m๗b1dŒ9kฒ)mu†1FีlEิะA4Hะสิ๎;š ’ฦlGขคˆi;S’RH4f#s Lm%IŒDN%ฉH†ข คกอcฟ฿G bถ:’jฃ$˜šดEฆ2Bซš"ณsดษTSี๛FHHLBgM’ …ฺห@+Ešฉ#* 2’Œฮ ฅDG็$๛‘กMฃณMฅh:ั 1ฆ$@C1aiIฺถ2’ถณสˆ1ด -กฬ&bัY‘F้œ%HDษ์Œ4SซcŒY์Iขm๗ษาhgšdHhK ‚ดmE2-’า๎ต"Œค์งTH"ดณ%A„T‡vฯˆ$‰Ra$ษPฅ cNmI๛3?3ž}ž๓=?๎™g<๖xซšิะJDIยฌT็”i„Aฦพs$JP’ฦh;SคQH4fCG {ฺ™Œฤชณ•d"#ZDDdค๛–„Pc˜m›ญ&ษ>†ฉBซ$i ฆFw_๘ฤ‡~๙~แ|๖ื?๐…?ยc๐ว์๖ศํ„ัิ~ัŸ๙'?๗{w}ระ๗|วc?๓ปฟ๕ ?๗?}ํ๛฿๛ำ฿๗ุฅ+้l็้<Ÿ๎ฮกส๏า๙ภ๏๎ฦผrฯู[๑ฯ้฿บ<ถงŸ•Ÿ๏}้ีW6—๚ๅ๑ฆg~๚๏?พฺฎF[ฅณห+ฟo~๊ฟู#oมฟ๛ทฟฑปาไ์๔ฦgว?ฟโ฿๏๘๖'๏9ฟฌ๒ฺห_๚๔“๏๛๘๏๙w?๖๖๛พตg/}ไ~ํ—žเอo๙›ใรŸ~์ผ็{๖๘ี _}๎ฃ฿?ห_x๐7ฟ๕‘[~๔ฝ่_ปฒ2i…eัJฦชั6M‚P‰FJq๐ี๋W๒ล—KOมส ๛ฃ›7nBดˆะ@@K„€* ดE%ะ  @€ข‚€ @ Z ZAT[‰ด ดEPhข%ดED  @A Š’ชD!PZ (Zก Pญ*กEJ •m#PดT BQDัŠขจ ก  สฅ‹฿๛๗nO_๙๚‘๓w]ฝ๛žป6|๙K_๚—๊_#P(ั""@K (ช J-DQ ด(PE  ($(@h%h!AgIะ‚ะABš(D E(PD‚ข…*ˆ ”€V ‚Vh @  „ขผๅษ{_=9วฃ=xŽM"ฉะ(ZmI’คZIฅm’ค%E ) ฃฅ๊le ลฤH M™f’ณ$‰4@E)ƒ*%m$R-IฺJkคัถ‚ AฉLMŒัeถch"”ะะDว์”0ฺF $ญึศ ™ญคD–ัŠ$P%ฺLmจ*cจJด-I"iฺŠ hซc ฬ6JUฅFg# ณก‘ดMญhสœS"I4‘กS…$tj‚’*%ษ 1uLIชญD$ฒ„v’ญชHh# “ั!T$ศC‰dJ€hE„ฆ:g€–V’ชอ"ัV’ด(ั๊dจถbHต'‹ดขMR’Š4ฉsdคฬJdถ‰Z(‘’@ต˜ดm2DตํH) ™*Y๖ีH$C•ดข „Fg›˜Dจ Y ฺ2ดF ษTŒ$U`VF[ Jา6H!$"ฺาj;ัM[า$ช$T‰2ชj*"ABg%Iข้$DhŽDาถDi;j(•: ‘ดMmข*fICขดMไ่ศ๛7ฌ_pแย|ๆ™ฤ3๎น' ขช!bN‰V" ‚ึ’Aซ ัjง! ˆRLา@FL’9,h‘ฆ’L"ฺ$"“YชmS @ ำ2ดDชลž‘อีซื~๒๚‡ŸSŸ๚ฤ“oผ๛สm๋1 Zฤ๚–ปxโ[฿๖ศg?๔›?s๑ึ7?x๙ูW_ำ?~แฯพ๖ตk฿๑“?๒ไฅ4—พ๏ฺ•็_๘ฬว?ฟ๕†๏|ำmc=d๛ภƒ๗]~แO?๓‰—vw<๐๖7]_–J)ษHg€Œ)slถ‹๙ส—ฟv|vl9xํๅ?[ฟ๚๋Ÿ:wขล๊๖;/?pฎำ_๐อแ[ฮี[.žๅา—ฏ^ฝๆw>๒G๑ะบ|๎า*H+ดำˆbUM"EฃŒvขhศXœฟ๒ภ#พ๘๒-ห๒๒_~แ…Oฝ๚๊ซภๅห—๗๛\฿ไwเ๋ึw^{เbุํvŸไ๓–๛๎>ผ๛๑o|๐๛๏บธ Zํ>@ชRRญf ศP)! $‘˜• Š„LIšด2,Zj4R”ส Sดฺด‰6f…jIUGˆ$จดH iซ%กTฃmซ’Pดm”กฅš ช%šขะช@ั@Z"ี"„˜JDกั‘คs6šาj( ะL‚$m›ฅ@SE:eˆ"Uf$ก*c„สlอX2ิ,"‰ฆ-ACJำŒYS1จV; m1+ั††ขขI:$H)dQี‚A 4ะˆš%SF 0ฅ‘IJ%A‘„–Š}„$ š&QEIBd0ตกŠ””ฆJC3BZก$ชฉ0ƒ"S)D4RLก#Cซ‚ดดIา) ฺH3’Tตhฅ-ก!‘ญŠฆmะถ$ัŠะถZ•T;i:!อlฅ -อ˜”ะBดˆดDช)H‚Fต‚ Iช%%ะ‰4(™šD‚6’R%šŠtJ$”า—ฟธ<๛<๛ฌW^pฯ=๓'~ขฯ<ใล M[D:HiJ™Š„vjfาVˆ–ัJ*ั$ฃคี@ศ(Aj$ด’ T%Qm–&Qf’าJ%ัJš„’FคIถทu๏๘ะๅ=•ž๚ฆวoฟใ–%IBต$ ฒบp็ฝoz็ป฿฿๘ศŸ๚ร๘์๏ญืห2ฦๆ๐๒๕74ข BฆŒ‘4Rฆีฒพใอ๏ถฯ|่sŸ_ห็nฝx๙s›+O?บฯ&up๙–ป๏รต๑‹ท=๙ฤ๕ห—ตบํถ;๏yไƒ฿D^ฤทŸ?\)2ดฺ$V! mศฒฝๅ๒๋พ้ฉ+็ฎn๒๔ำwv๋nทC[ˆด-‰B‚jคmค„ช" €ช" €vึHEั$JะกIฺ"QA[H‚ D‰Pด TUhดDด• E› …†6 ZJ(ฺ$€ถ"TดญŽ  Eh-hดEDTM"Iั† EดขDตอะJค-DP•@5Eช$HัJฺ&  ญ(šะ4RM‘"TRข @[„† DD5Jฺ ฺ„&Bตำ$€ขšZ‚รs‡หjeu๕‘ง๎Nฦฒคz๑าลแ–ดhด…ˆ@[ˆ€ŠBขšFช$ชIจ*’@+ฉ*’@+ฉถ’Eั$ RUกIะ6QFด…A+กD‰‚TQTUh‚ดDด• E›@im#J m’BถHดญŽ„ h‘@ช  -’T’ mจ€"”ดข‚M@DUEP@“@5Eช$ฺBข@ก*ไัG๎ฝrใwqหaTiEะะBƒ„Fš*P‚€ชถHhIข-H@ƒ่,ˆD s฿ BT $TะF%JB้, ะ คhEU‚’(ข@@C%4TK$R(T ํˆถEะ-!€-4(PกDB” U’igชQTข4hRชI  T#PB)B@C E(D%ฉ‚@ M’vขhˆขช* •DPAu@ดb2 @%…D[Išช(คญ AกD%44#"ณA FLRM A€"กm!QัR4ีถศHUจ†&จ* D“‚T#กtB[š$‚D Th(• -€!ก!Tˆh[‘(กกดข‰9 Q%%กด( $‚(P‰‚$"m# ๅำŸฮณ๏[ž}V เ๑ว๛ฬ3๓=?^(U€ึˆ–REHˆ‚จ€F’HัJ„ึdI’ถŠ’hˆ Hั$ชะJ” Q Bv&A•จlnป๖ภ7ฟ๋O{ๅโฃO฿w๑โ %”ศธ๚ิปพ๛Ž w=x็%้rx๎สCOฟใ๐๒นฟ๊+7ฯฌฮ]ธ|๗ต๛๎๘uWGfG3.?๔ิพ๛ญ_ฟt๏›๏สฌฦล๋O>ฮํแ7l๏xร๋n]-’ดด•8|เญ฿๛ฎGnฟ๗+็h1ฦธx฿7วwn>น—๚ีณฌ/\น๓ฺ=^ฺ๊๊ž\K๒lณชuอฆ๊Y|Fฝm0ชวถcI๑ถำ`•"๗YEข:็ผlX-nฤฤส„l 6@ฉM› ุ^m‹+ุPoํ ุ@u›a6iอT&๔ฒ 4˜ญ๋{๗™กะปซ0zต]X‡”-ฯw๐&GŒf6ข&ภ†พ9ุf ฐ*6C6ฅŠคฮี›ูฌคัถgG5i6 ีสช{s$™Uf“Y<KชlF,ญืn3ƒ$งฌ`cชฺ”™ฑ ๋๛ตณF™ฺสด†<6 iO›Ju›)D3f “M๒ฐป^ญติณธํzocBc๓2Kมn–„‡9X๒"wฃdฏฮlฉ03ํฑYะจ›‡l 6bJรุ^m˜\5ถjiคฺlcRoทดf”aBลLฬVฝพทmว*ฬ ปQ อผฺdดYสฦ fฝทษaธS@ม–ฒC/ƒ™S€…Šอฒ)m˜”ฎซ7ณYIฃmF5 อ†ก’ํี9จ™zถษศžฦB…4{Zูฬ`’ ll๕jSฦถถA๐^ุ‡m)S[เHkHุˆ‘คุ6๐^ท™BดM30‚ษ&‰X]EฺF*พ๖ฑฉaB8‚E^๎ๆ’๐ฐMณคฤถM=l{5ีh7ym3ฤTŒlKŒ`[`ขl“ซฦๆ๏nฟฒฟK๔‡่ฯปป0KlU6`‚]•Gทซ@d›UC3Iฺh3๋e6จสิ6 3 FCTw7/8@ม3ล6O“fไek#Af$k1^lJ6%-รกะๆeไอศ*'สยšธึUhjูb˜*ดีD๋˜ูโ ตษฉ•c๓hV@ ฅูถถ.m“) fbA…Ž๑dธdุZคอศdƒฅฑฉิZƒ'nPhฐœE˜iอ –™ฒlLKUณมิ 8ะ€ถ13ฒzVUๆ6yตต %ฦm6‰Q/ี6ฦ€jSู6์>Eต †มฤPuทFฏ`›ตmฏฬคดุmx5ถ‰  ุFYห๊นRูฐU5รT›W“!ฒ๕อ‹[œ‘ต–BถM&รๆUh€ฑbซhZ›มย6•†mc*ˆีD#bG- ”ฦ,ˆiงnืEม›‘baุ,˜Yภn๒ภŒLฐ‘๖ฺ„ภRkaLlFคƒฑฐฬะšฅ์1fAbซภฐ๊š6Š-สฦฐ1ฝllซวบMช ™้ล–mปกl๋}“ถอ€ˆe0ฆฒ ์DYูL `คฺmXmต[•l8˜Iza…ฐM6สุ(’‰™ๅy๖Y$อlF`sc=;I,0Sำc 2lูTด1ม–mฉ‡6šึVlชฺšํอฐ ถ๖_๗แo๋ใฯ~ไO6“ต„ bฺฉ ณHทQMฐมุ้aK#ณ€[ฺ0ูˆล@`% ฦn ยŒe๖ฬะC”{vv…ีo~ๆแ๚o๚Ÿ๏?€#€ภ,€1เ๗ี๏๑๑๏๊Wฐ}ฌวู๒6๓ณ~ิmฬถ้œ+ฃ๗ด‘ฺ้=/Ÿฯ6=ัเ<[ฝeX5ํฆํiจ๊อm"ปž B(น5,Uถ™lฏ”mFV_m›ลห๊ฮำ‘ึ9-mฌาณsV` ิ” ํ–ถYต9พmšะ.ฎ'๏&c™u๕ถ Kdfl7elฌžUฮอข•ฮ*„mตํ์)สผญ็ส่ถy๏ษฬฃjซขmจ๔|nU˜ึv๚z{สn;ฝm_]ด*ญ{ดNฬ6ผ็–S3Eผทq›U*๎๎v฿Ÿ?ฆ”]ฺn2‰zeทำ3๐šํใ}ท+‰rSืน ๓^;ห๋mทญ2ื^ Sม๎yทอฯบ:vKบิwMยผ๓๙j#ฒmซ๚|vงศรœงcŠ[ฺk7&bตQ5–๗งgƒ€JnG ขฒอd‹Jนป‘ีWณm๑ฒบi ‰lดฮ4ึฺ*el5!cBป%6ซVmŽLdŸว•z›Yฦ๚T› $3ร๔ู๊jปอ*{ยึำ•fซ”ต]m›ั#ิํ=WV3,v{๏ฉูฆฑ้ีVถห๓rFaถtmง'/ฑํฃทํiั6ปzฺณึAv๏ฑ6ถาx๓พ›Uฏ}>ท๕}ฏœm๒ฺ๎i6e…๗žmทนตท}ำ๊Yนฉตx6›๏kgy…ปซškี€Uถฯืw๙–|ด[าm›^5ืk็{๎a ฺ็ำ{พฯ6ฃฤ›ำย Zviตฑy[ZmTนMฦ{ถl1•l›Qร^ฯ6ฺ*ๅ๎Fฦซt;|ณฺXธ<ฒัŒ5ญkซ”ฑ™๕8BSฦ„ถ™ุVํีf 6์žญฆxr์ญO…mayfไฝถฺnณЇฺ็ฎงeฑฉboปฺ6Xzจห^+ถt–W-ณฦMฏ6-ถฐ]า3ฦซ๎nOk7xฏ๖ีํN6บิ็๕ดda๎ฦ mLญ๑ฆ7ฌ^ฏื๗}๎&ฏmaณญฌะ{™z>7jmำิv๏e฿hm[ืkณyใ•๎>UsMา0๕ถฯถ7ฏ>ฏ๛,iF๕ๆzํผ‘Kฉ}.๙>ๆn›žhฆ…ฑ๕ฒลBปi{Zmเ•น›ŒหdƒPษl5ฌj0“ํ•ฒmˆ๚๊ณร ฆอำ'lภฬผum‰อY1๛ซฟบ_~๑ท €๔O๛๕ฏ๛ฃ?๒ฺyd—ัJพทๅXฺฌvaษ ผo[m•ฝMK†f๕X›ทอžj2YนbKNิ“ณฦFT[๖ฐ-๔เถ{wท„ู่y๛ส๖ลาฅถC…ญ6›”-งึh^ผแ6ซWm>ใ๙nw“b,ํne…ชrทy?mหสfjป๗ฒว`šs}l^ม,฿๗๎kี0๕ถห๒6๓ญOึศl›ฯน ิใถ๕jฟ5›ฆ›s{ฏ(โู งEฆ๛œEfถE&อฆฎบ6Kษ+๋ำ1ŠI2gฌW์an_ธีSฏ๛\go‰Iฬยทฦmฺโ๛}ื›ฯ9แฑถฝฌฆ6ๆิ๑ภ"โcัิŒ6ฯ˜ฅaฅ8ห65ตๅ#ำพนูZ/็ญkD Ÿ<ีด[uฃจfฐ็ลฆผๆใ:ท 6ซ‹^9ƒe/n{ห`ยูฃบMZn๗zฝk‘ํฌ๚j๎hf*z_ (pŸMฏG‰ํNฝผ„zw๋ี~ŒA้้ร|l‰ทาsฦŠy๏๓ใ–—a–@ฯฎฆฮบฅฺช›™๖ๆlš2ั๘"’}าฺXƒgŸJถa๒r03)อษmฑํ”7ทƒ@”พซ†%ี[Ÿ6"•3ๆา๋i{๖c_ฆWอXใต››ฌตty3XOีgำ-zO-๋๖[ยcนiีด9…‡ˆƒE[™Œ‘a“๗ๅlhณื>2ํe|ฎ๕ยฎต,„y|pีŠะช ๔J {jอ฿ow๏|ยŒmฬี๎…Dfอ๖ๅณ9ฺ ญฮ"i›4๛๘ผ^A'๎x{h›ญmทฝขRอLŠ๛lJ ฑปฉkฝ็>ซž›ญ‘๕ณ>nึF๕๎๖Vสfd™z๓ยศตMไ๐ู{•mฦ7ฺ๎ํอูdoV.(“์cGอเูi`˜ผŒ‘ลz:'ท‰`Ÿ•7ข์๔Vู“ช9ดฅUf3K๏=mฯ>ศๆ๕Sฬmฝ%]ฬดโ๛ฑik*ฝีgLvธW^ป G2‡ˆณ){[l†aึb“cm๋หตำฐฺห็cUูetฎž™h†S—๎uท4P้๛๛ผขRศคุิSคM%ิฃ๗nฉ–ณ+ืWณนmๆฝ๗นฝีาž ๒‘Vํ0yต๛v๑Yซวmำk฿ท๛ดb6ซy5แ ส$;FlG ๆ1VmรดakึำLุ๐&๛,6สขlj usOJ๛ฯeฟง€๗๏ƒ?๛ต?๘ƒwk}nฏ๏เX’ป๋c฿U y3+^๏lฦV๛ž๖ฝXSฤฺ็JลงŽว€ภY๖ถXaุฌลถ8ซm๋!ืNฆฬCdหตถปFฐ๑ฉทtต,m*UJ.m{ัปูึ:ทY\ฅส9ฒ์ซป;หF4Xผ๚์žฦถูS๏|^ถ|gvrnงข๊i™Œส#์)e{ ๏=ปmฝฺูj๋A็fn๏๕a?๓ณตmฦzนฯsชผต5 ฆL=f'ๅ4ucˆ|ใ๚`˜Roๆn•f ๒jd’nฏอƒูถSญQkไฦQdƒAห๔š3$ฺ† †[#…ัฦฌ=m Œhถํ๕B;Kดm&ณuKห.ถวWoฆม’ฺ>ค—ฒ5ใnxฅfป}ถ*ทYy5ngอzถl†yUฮL%n[๕ภ-Hb‡๚n์ ธ^–mอT[u=ธ;• CพณM&ุf<๖`ฝส>{ฏอYชฬถ)X๘lHหv}ฟf›ฒe็๕jทM5E๎Hสlg๋›…%ฺฦชืsณ1eTุก็4ตูyilผ๗ึอฬหฯ+s†aตผ-ภณmปช) ตFnl€าณ1@ะ2ฝf6Zjร*`[Dรี์ียFlSล.%ฺ†5๏1ทF-ฌ{lฑQmวƒ&^ฉํ ๒ฒ5cท‘ชตๆ๛}ท;’อ6พตlอ mXา{fรคีณm+[Cใี} 8ฬ‹ึLยx)Ÿ ิฐู๋็ถšŒ–7‹lฏ๗ตฯfใm‡ุิทิH๕ูฝm3ขฒy*ณ[5E6๕š™ะRบM๐zธถ]‹  ฤว”ีdฺŒ๏ำบ6์ฝฏถ#ฺ๎ี้f“•ุ”ymทญkodณอฆDฯ0@ะB’sญั6ุV7‘`“3*&ึ(6กaฃW๑i%ยnk^โึ(Y๋žMf–*฿วiโผํJฝ™๔์ฑm๕๖ึyฏู™๕ดxUnFŒ1มดํm‹^l˜ด<ฦฑ™„b6ชŸฃชW?vo† uท๏Snิฐู๓te˜˜Fฤญ๗ณ[ะ˜ฝขm #6๕ภู&5Rฎ—rถมหึ6้i}>๗jฝศfฃjmๆ k๑ผด Uu‚Fฆพ|l"ำึmjผ็ฑ๋0lฏงๆlSฺฎยPชmcX”i{ml‡bzอZหฦช่›Z@uฎจงน-[Hฐษo฿๒ห~๙ล฿=€฿mฟ๕ฯ?๛หymtศรถ™—ฒu#YkฯhรMฏ็mท bพsฝํคŽ๔ฺ;ธู๕ึ"๕n๋ี๎ฦKตํฬbD@dk[›จดaZ{c๊อุ vจ฿lทั๗๕ูุ'สฌัฎ2 ึปNฯaQ 2 `เฟฤ yฤA€‚8€' ่Q%fดฝซอฆ 6Sนy๓Ze2(d๔ O๖๖ผํ’fTBตญฬlค {1ณิฺ{าƒ%๑ใy๗นa/ฆ้ฒทต*ัฯ‹\aผv๗น/mฐbiปช2วมยะLMŒญั ŒO‰-ญ0 ๖6]ˆ1ณนยTœm$Eํฝ.koUก•J ถe”yUlHใq^ZK†ญ kฆmZฯาˆU&V4c–ฒeTFLถG[ว€*,MV6^Z˜ก Šต-ฮi[fมข์5ฏLAะถต#‘อidูŠฐmœ33$6ฉมR‘อ„†‘Pdถ-ูFฑจมHiูTmbำี{‹ &ษฤ‚J<{035}ฺภเ)ฺ–ฮถ=aคซ™Yภ<5า@ม6ฌ2ณ‘ฑอ–Mc3h้JlP†€‡คXฬณนย5XณQฉjo(6[šfKU†IEๆUษLk๓rV6jf–ฦฺอดว-0B๖^—4c–สฺT `„˜X%[‹“ว–†jcซฐIฒฑ k็ฤึิูภ`5nSตFบŽmSlๆ•i ซฬ2 $ฐeK[เqณ๎(fVมฆQ]๓\m5 CUส†%zฯ…ฐaฃFŒTิผฆ^;zdฅY&kณ›i“มผxA v… ฬbbถTPk[59ƒPดpC๋ถU,ุ`นj`#ฮi^1mceSš1p*6@†ีv6Rฺ6R5f‘O6UdL‹™อJFชฑ)yo็ุP@‘วฐŒfkงฑํJ๖Tภขqฦฆฝiซ8ƒีrชู~6HUฺ&ภ,mศhาnm h6ฉ`มf๑ฆQ็ุ๊ฆ…ลุจc1›ม6ZBค„mK้=ฅฌˆmlTุ&I5ฅดG4fQdุO฿y฿฿wฟ๛~ฺoั?ƒ๗Ž‚Fอฺt,C•ญˆ-›6g3RภฉฮฬฌŒฺ{นส„!—QŒอ๚I›Bฐ™ุอh“—ญˆู(g{–*อ:หfeภิZ*I˜Qฒ-ู6 ฃชfo%SŒมNๅฝ]ิฦTุYPมหฆ D}ฺ˜xŒฐฉz63šลี6mฐbiปช2ว๐…๋ถอFชYCุHbฃ ำŒ,  2Œฉ0 ›™‰5…มlฏRa!ถ U,ถ%d อh–U๖<>6ศ›d01อd๔0ํlอŒั$1ˆ†FกŠellœํฝตฑ•ฺุฬRฬ3fl+ฃฒi0Yckฤ`ˆšํ- I4ณ‰-$ัฐ-9›Rฉ4fฐ˜ฝŠ‚อ4ภB,’‘aฮ{ฤิาfถX0ย 4 fm“‚m%–BHฑ ือถ!@Z3D6$6ถ7็ํ]76q™lฬฦ6b^oOUลศHBลถiรำ™ 3ภtl6"ฌM$Œ”Y"6†'C#ฺDŒลฆคXฐHาฦ$b ๐ˆลFa6eาดm˜ฌฒE#b‹ฬŒ% ถ[ตejcึRฺl4Uฐ๖ๆdlฌJjถ ัTFฐ*Tฃ$ˆlPPXNถู˜4ฆถXjำฦึมถ่ Tุ†€Pฬ@bc{s๖ฆSˆ อ`มภะฐjหซuัZ/ (ชmม`yถ…f€ญฒู&eึ›๓ฆ€1L$™1Lกัฦ@el(f[ใ’meท]#0สŒ5P7อ0M‡…U0ฐึœ™ู”Xณxb6 Fภั๖ดTฃfƒ ณUวj†ูT6PB@’`-ภ†A™ิ`Ž`$Œ)dา‰ มฦ&5ƒฆ€cฦชดฝ)สาl (ร๖&รt=r„ ฒ!Œฺfฒ…bอVl๊ิฬJ ี6ƒfm,ชˆi RfF1ะภXL›ฟํ๔งพฟ๘๏฿ฏ_๛ํ—JฃeŒ K‰m๋=w˜YEdTb›c&0์๑)‚™ุิfฒ0,6ฬ< „มVPY,‰ถ ช ูfi˜ฝ*ษŒ-h4Bิ’4T2"ฒ แlฯฒ%อชิฌ!‚‘06 `!ุ6R†ญtžUŒRlO]ทm–ชY3a# พช!1L‘xSจm3‹m"ˆA˜ย6eร†iXyฺ๋ฬ FDLm"ะถอ4cŒYj`ณ) ฺฦdฺ๋[SZ;mรฐภถภญ–FSl•ญb$ฦฺPฬฌmJฆl›aดvš7C0“€ํ™ ร4 ทฐ80’–ภุ@๖vAี% ฬ6Š% -KB€4ฒ‘l•˜™ฆFlษา:ถD`ฑฑ–#สภฃฅ1‘6ย$€6LFPlƒ์-ฤ†1…m+ุ 6Xe Ff›Hๅอ<1 „Y„ฑะ†i[1†สธ62เอi›ตkนฤ€6Tฎถ bfFู‚ 6‚mฌฒc)๏ !ภฒ ภ๖fอlดลkฑIKfz˜ตD12Jล1FฐeoŠตPI{[2 ถ9 ศชU#fšl„%ฺVbˆถUXb ม`Œ K &์Qk H#h˜าRm†fูRb€2&๋ผi์‘`l X…@Kd›าชดทถ‘`Sภ,ฑMl[{$ฃฤh6๑h ˜ภfv- 0šลฌฉถ 0ร 13ฃQ›ชd@ผƒ 8ถ”Mุ ณถ=•ยf–*F[Cึf ศฌ!รฬ6•ญˆต=$ˆฒส6(6@XะrZ3ฐ ุ&`ฐ–าฌฤžั™Q`˜ฒฐ1ฃเุR!6ดLGMศUถ%mƒ‘Aสˆ)c2›ุ ‘ฒ‘mm •"ศศ0Li•eภ ม0K `AุBู[U hด‰M_๗Ÿ ๔ฯ๛ํท๛ฟm›ูคฉf0ล„mำ(‹4Om2F0KูฐฬN`ภภ–๖๖จ’6ˆส 1P@fmOฮ ฉฺจฆB6ฬ๊า60ุ0 diอlJฆd›0FดDฤปณg#@€#$ฐY Kฐ˜ฬTj›b  ๐๕,(•ฑaL{ณ… kŠ๎ูฦ์J๓ถ‰n‚ภLˆ5ึ‚ุfŠQฬFd`›Lฆ๐ถำฑhล0.ๆqbไ๚ุSขbAยถฤvD,q๎ํmT\›ญูไ อห[ฐูิRrr-eบ,Œ-ษ„‚Aฃลฒl,ฎฯ|J3‹ƒ`“ตฬnณ#^อปU1m:6l›ฮXZ2๕vlรถ™ส,6ฐŠxkอiถ1blTTmKๆ5\afUูhˆอ,…m&ฉ™ุ@ ๓~๔qQถg)ลฒฑ1mณ%Rใ.†กณ'†9Œ‚A5ำิ&6”l\˜ฆถูd+Sีฯ{้Q#FŒŽฝ:SP<\ ,ัฌำˆฺวฬ6ๅผ5#†$4# อฬ lU’สXb+ภคˆžmกขฐ-1%ฦฮ–๋6๏ญ“-SูDัถฌeึีฺ^Z`ซH๏้€AผฝษฃJถ g33์ฉš์mยb‹Mn–๑F`zรfณ 3S0Bร0ZTŠ FำญRถทU้Xฬ0Lัฦ›&}zบ€ ํjณ ณ9hC=ำ”อtb อ†น6ร–ญะ q5bc”ุ[„ย 1ีิ=ฏลด4ต๓™ํmฆสyฺฐฝ2คeฑ3ิ6lป"ฉณYขส@บ.šMแกŠ!มถJฤkซฝอ•ตทๅศฒUL€ ฑ™ UoŠุฐ๒(cคŽFmฬถิยhึI`Kยฦh๎ผ‡ d 7มถV93TIƒ™ม˜0ZJ๎ศ0lฐ{ด x[f…˜ฺุS˜TS[ฑูฬถ0–%Bhh2 3%กa\f€aห›O4C‹–อใผฉ:{ A V&73Cหะ_๚๙พฟ๛ร๑๏_ตฬค…f–ม-3ปJCr-E ฮกaถd*ฤŒj[าฑ๖สX:ฝทๅ”ั$Aร–ึXอk*ฆplถ]ถ7มXkืู*โุdŒ“gง™กยL˜„ู›—ญVจฑMKfก0kษ ณf@3ญbf…๋ภLlิุf:o+”สุ0&„ฝูB๐5ะVำฒ g๔6๖53=ชhก$l“d{ๆฎ@ลžื*ฐaV76F€-Mlํผํ6S{‘ฐL•๗ƒ%ญIฅH{O)1ฑMฝ=T1ฃmsm5•bฺิ2ฦU0ยl–2‹y๏u-8๎{ฐ้Te+˜5zง,IE–ฎ4cูF]อ6}~ถ{ฑysŸ๓๓jุถื}˜%’l6๙ะz+ฏ1j<ฎฺCjlK˜ูe#›tฑŸ๗2n#bZใd›ยฦuญ™ขถšฝKeU?oูm/ำ›ีฝyตDc,65ฐmT็ํ™6—’ซล[ณฏaŠ A"0ซฬ{kฅzบใอ imf[ ”f1ฆถญMถUEฒใๅส{3ัฮ@qต๗H'VlSฯlW8ฃท!e’ZซMฦ uโfยยbถษ2}ูฝฦฬ4<}Ieƒฬ ›mk#ฅZ3จฎm ฬtงํฉ>หhš๏น ถXฐทฝ:0R๊lt๑ึต๒ึผฆŒU{ไิZ›jฐXm‹๗2Mวl&ป>x{ํัd9ฆ5"m*{คsฤ]oย‹+2,=์๔–ašฑบ/’ฺŒล†y{้ิ6›š|’i,ฎฝืผฃaRQร€ณYย{›๊๊ฉด™ฑJ›™ฉ4ฌทฬˆjฆถ–5[ุถ1[$ผRm6ˆฬ*Q(ซผAน6ญm2fีวVื aYF๗๕œBb Hตm‚4cฆfrmภeT€ฐคแ?๏ํW๖h ิRkšBหP#`ถvXฬ6ฆ&๖นฆอภh˜Pฅ™ฎ5ฯ<5ฅmmคT‹msU3fXดsณสนถฆวํYนผWdl+œอR”y6ตžyg0ี๔๊าษถUƒAฝฝซ๗eDฦถV{•ํูK“ๅุZด๙๐Ve”kึVMฦฎาฦh(;M›YัZc•ใUj3ดญ&Gl{9nซ†šmดOฝgึ๖ั6ญŽฬ`( ณ„ฝฝท่s ิผYซฒู†Tรฺ&kึiฆ6ณาถQถU ุ"แฑm%mJํMน6f›˜ฏ๙๙๛?๛‡ภ๖o๗๛๏๛ฯฅึฦr F˜อR‹ฑอบฆูUต1cI}lูhำถKฺิuc{ขc`’.=๕ดุzชฺjbf{™>6 ’ผูไ3ู`ึ4j@2s'›UY๗ุณญฺ^ฒuUถopซ๕๘แ ว นfMลFฆถwwษ@ฃmฦูikู–ีšธš๙^'[3,65ฐm”+o zรjZถแŒ–}fฆว็ง‰™ฝซึkV|$๏Vถ0›ลุTcหัnชjรlk็ณm๊ะฦ[-fีP๑ปฯอšํmPนOต7ญFฒ 4%ล๐‹36*ใ๎อฑ7h5ลฬhผ{}USึฬ‡5ฃdถ๕๘ษb๋ัาึvwmc๙z๗fูVศxป3ช#›ฬตฑ‚4f๖nงถyS›lz)งญฌrrู๗๛=#ถ6๛ˆั2cV>ฅ๖๖ข๋ถe3'จ<ข"oo)Tz{ํ„2ถลถฒ็๔^๗™6y๛Vญ ‚Jฅต P๖6.‰lๅฝีY„ฝmkฏปYš๕ฉ CYฅำฺ ฯผWZก๕AํV6fZŒM’ูKดฅฅส6๏ํ–3P]{ผีฆVŸ๏๗u-h›ฝท๛œ;ƒด-ภbุf็lัจ[;พเ๖ถMตGทู^ฉฏ>S๓ผž4Ah›cอ`—ถืใฑํ-ฺ๋kS&ึพv๏ ณ.หตูTq{ฏปYSช1ณืขี;mฒ้m%ตำึUบณํฝว;‘มk๗ณำร๔เY๙”ฺถ๗พ_w&–v‚"l6*๒ถๅ”ข>ถฦPแฝE.&&ง7ลูถ&žทืี ฆX]eอำู6คุy+{Kฉล„ฝmk+๎ๆ=;อบ˜ปRU(ำyๆญ*ึšืกfPถญลุH{/ัZง”j{๖fuirw{lมb^๕๊ฒ7Qฯฺๆ๛บบ#คฝuภ VถY2 Qท–ัfo*ฺ˜ๆฝืฉงๆR์!๓๎™T&ก™c0ใZkศูc[฿umํ‘ฎทwพถ‡ม,บ,O=3ฃปฯ๛~ปธmm(ี๖ยZํฝป6มฦ๘T;ถfV๙™ฝอŸ Fั ฯส c๏}นO7ฺฝํ€`หlTd ำQทํ-่ำ๗ป#๑ฉณญผแjKu›ญ‰ู{ชำbยT3ห3“‹ๅ]hY๖ฐถฐ›๗์4+ฎ6๓ฎ“”สŽทฯ ๏ช๕ส๋๕ฆlm[ ŒM5๖^u๛คชถูƒ>{Gื–7S‹yีชุ6]ฯฺfHน๓T{k–ํฒญ%0Dฃnxd›ชปผ๏+๕๚๏รŸ๖๛ouฟพ๔!ึ<.ํฑ…หฺฺชaq]o๏ทผอ[,”5bj:l[•”m{P“gVฒ-wgฯXผ• ‹๗ธ‚mถJษ๖๖ึp๗#&ฑถ7r†้ห๖ีญ‘ถ ^SฉทกGu™ฯ๖๔๙\ฺŒmถปFอูฯฌYลณkx3น7šMnัฺถขศ›ท…น6ฆ’bฑ)ๆLห์=sฎํƒ2›ฅึ{ล `&g<ฏ$UF@˜ฏ๖“fmc๊:{;๗๎%f*wฝ๗ุX]km้ฑฑบfฅ ๆaด13#ซท๛ดฤถi๖„ŒuเUร@ Owƒ fcV%ซ-ฯว~ฝนKฉึzUซูถvQึˆูๆšขอ(UฦพC&หฌmฉย›n="›น ‹๗ˆสฬ[ฅ0mุw ง2-™Iodว๔‹ํฉี„sฌ-u}74จิถต{บป ผ๗ ๔๘ฺ_ฬšUŒว 2ูผี9k™าN^!e๓Š็2›p‹ฤๆ้ผต\{๏๙ุฮvTฦ๓าiฯ,S2ภฦfูู63-Qทm„y๚ตไfอlช๎{'ั`{sบO6ฆปK๔ฬซkฬร:iƒmปบ@๑6อฦ”ู๚tƒ 4๔T+6ภณQ’ีดฏm฿m๊ฎ=าตึซRทwo+•5ฝนฆh3SW{3ฌ,ณvถฅ ›๒žZ#ฒyปฯa0C{ฏR˜6ๅ}ฟ๋กป:c]ž!lสys_โŒqฏุน ›eๆ๎6จซ๗์^F6ุ{Eฑ ๑ต›‚a—mฟvwฮš‰ฝช_ฯŸ๔ว้Ÿๅ_พ฿~฿๏ฟ๕ฝลe๓tถ ไ}ฟฮdโจw/N[Xีy๋†mฏO๖ฑmฌp6"๚ต]>˜ต=ฉOZฒ–๒พฯฉ‹mlLต าืผ* Z[๋‹c`ตJYbลถ5f“๔ž๋ˆIl กกjฬภlHฒp{nใฝ๗ไ>ท/ฉึะฎky๏ตM’X#{S+ฆฎ2ผmP[f-ีๅ}•มย8xฏชณqCR๊=ฅจีฬTุถT4ฆllKI‡ิา3€ผ๋‰${๊Vษ๐ภ’‚ ‚๖ถญตจlCL`m‹K i[Jƒ1…Iฒ!รื[‡ถู1ใญk*f™mฦๆfทM's0 )lั๖จ$Zฃฎž-vตgŠd๖น0ฆK[ูJ>B<[า]c›|-หm ิ์ซฆฒอหณฒ ึ6q1–„จ#ท>l ฺ้ชณš–ั–ดQฝmกจอฆOฝอ˜`๒ฉ‚ฑ†ณฒฝ-1“ฬ๊๒๖65ฒฬX๓fซชY๏Mdีx#mUํ™,3“ิถMฅฦ*ฃท—B)€mLฏŽ ้ฒ-7กaLฏ‚Š‚ฎm6‚ฎ๖yฬl D ห—คm€Wภ-๖จอ#›mKฐyot7ชlสl๎ูg›€้อฆRถึmWmoKคQWฯ2ฺี†T4ฤ๖น0ฐ5‹ญ3จ›ซxR]ษcm– UฆGJŒต< 3ุ`l‹า6g/ ๚1$ุTHgส Zmƒะ[w„วถฌข่=cceš”*0“ฌฐฝฌeVล๖,ฅMYFึถy+uณถT-Ma›าšไaKบmlJหŠ•mHJ™ะ6S/GX่bฃ‰ฦำkHiณYฉปฯร €Oล 3‹Odฅซ1eๆQ๒ๆcƒm›ทm}š’fฌfๆห=ตฑU{so&‰ดlU›m๊”ุ*6Z‚มY l5ฐ5 2Lล_(fช*ณตGIRM_•4สX2˜gJด MdดaI๚7)ถฉXg5m ๔ึฦ6VMฉ=Kxc…H)้˜-ฌฐฝqฒWyVจm'#ซต63{ฏค%3KeีUlฑุ5ถ%5cฃฺ5ฑ˜mR•ญ"lCŒ…๋าลZ%ำ3IีZฒไ€ b. ถลๅ–b0(าF<฿๕๗๖็ๅ_๕_๏ทฟ{๗›ฟ๚w}ฺž7M™mี๔ึM%)T›yvง ต6 m›ฒุJ…ฑ5C2l%?Šmcนป2ฦZ ถDฉทsฃlCฒ™sญยฦะ6G`ฐ$D5yl$ สVeก/ฌฑœพ๋ฒ06ปkข๖Œช๏^cjธˆ:fK{SyซYF•ท7"€mอlkSŠื,U2Œ7…ฅอ8[ญfฆยถฅข1ec[R?ฺ”อภ6ฌฬ„;ฐ mT6ก ห,!›iY%ฦา1’™‰ต)ซKฆฒืY`ช2ุ6ฉฺ ˜…ทล1ัaOณๆฝ*ฬu๋=k็<ุm%€นฺ๖+รlศค:[๏}Žฉส ^ Fถ๋ผ]ท๒ๆอู=๋=Kญฝง[^ฯ„ถ๑je%d{คฆทฒv›uัซf[`–ว๗<-’jณืw+‚้]—S”eฤ]ฯ{4ตูผทๅป๖ถ฿x๏9ศ…ถf•n=ฒ9ฮู+ฝํmqqU›ฺ๖›}:ง๕x™๘€ญZณZ`๓žkีTpXฒ™ฑPถึ<ฏ6‡kผ™m๏dY7+อถ]<ฏิถ,Sใ้2˜ญ=ํฝqW-3๔๎๎-cำไํ๊ถทAcฮ[ถƒ‰ษอW—qุcุฬ6ปยœvOณ€=KiผQผคK–อๆบอc„อน›ค8‘๕็D๋ฮ์‰อุํ˜›๕›ฺ๗3o1ฦฮํ=ยำm›ฝi…^*#s-™ตyoถS๗ชแmร0Kใfณผ“jcล™hซงหฉ๓ˆสทg๖์t่๓~ำ{?วพZโฐ5“ๆแgŸ;๏ท™/ชk‹ง๗๖๓>ฎb™—Ÿ๘‹lkอB+๖sฐ*๑-Œคห[‹wธอุึ\m๖๒อJณ ณฯ$“-O 1ณ{พm›ซZญ™๖VGomปฝ}wซ๚_ป๘s๚ฏุo๛ำ๐มˆ็รฦ`ๆฝgW˜pญg6J;๏QผถโCec*Œ—†๙”šิ}"ผwทป3{€goS!ิ๚๚งv๓๖ฆ๑;ท=ยฒfuฒ7ฐ๒ชฒ=‚šถฮ #lTฏkู[ฬR#n<ใ”อ&‰ฒ…8R็ลู๖ๆFo›ฅชrูL๖ึt๙ปฐึHแy—g้›๘ํZฅชm๏ฝ็ฎZๅj~yบุฮ6XณฺoBฃฉแ๗8@…ี๖ฺ๎‚Zšฝ_E`l๋^๛f๔=# ืF๋าศžป0 ตeƒฺึบพgืชIฌํฉfฺพr๙ีๆ–{๕ฑ฿ทoEถอ{ฤีx๋๖จ-๕ํlค•gwg{dฬ cS}[ื^๋uU›ฝ๎ร-iูždlZN™ถฬฺ\3ุ…Ky<ุ,ฅm*Lcภkdฝท= b[ฝ๛ใ, b3]mุ๋>ฆ ช-S›-QdSfฬ0W%v%ท๕๏ดB้xมDถู์1Ž6ศ&•โฝuฒ?.2ฒfY๋ด=a[BTฐฺึVพขf{ก,do5Ša™Žcิ™ปึ% S›Ta†ฺฒู&โฃ๖ึบ๎พ3‰e[žฑ}% 6ตต d"๖๛vซUุu๗6ตšแŠ~sƒD&o}Ÿlภค•gwgรศ6Fุ0ํญป2Nwxฟบ1 ฺ]k™G)ณlZ’ ศถฬ๑™ู.FIa6KMูบyc๔d๋qึห=ไึฺV๋ำ{j›ฉฒ™ูfƒn[5๎ปษ€Z%#ุฬฃนzืw[๏ฝ,นKผ‘mถ ๖xดุŒl"คผU› _)ฌฒ[ ฺž0ณqผ:X ;๋x-a›%Bถี„c{|g:ยfV๗ฆซfeoฒ‰.ูƒlวx4ๆKm๋Uwถ๙nbีy•2V›ฺL&ีf"๖Kœšmืeถกุใ+๖P%“-ฉวศค•!ฑGถ1ข7ฌ๖^w7BุนN๖ฒนmฬห๎Z ž]อฒi รธlcษฺ|อณ/”ภ๊k3c้ภถjซi†ฆั›ณ^:—oญMฝ๛l{j›)ชm๛q๎`oCูชQ‰…์Y- 6ežGSNƒธ่ปญฅ˜‡`โ๓๖ปŒmใ6€ั!ๅ-ฐ)ว็ท4#ฒึ%ฺa{๙xกeํ}-ฐm+bxลNฃ$D๔๎ฺฎฦ่v5+6Tjฉษfำ\„m้๚sญj›อ]g›Mํ“๒๒๚Ÿพ?ูฟ;€?ุ฿๔๗๛ฯi P์ว—46mบdฒE๗ํผษฤŠฬ๚ฮ๖(3cD๏ํn[*Qmํ^Uํ1l˜—Uะl{v5b@Nํฅึ?Aplฐ‹:ฆat]๏ทUpT *P‡๐์vุ“P6„3‰ ศ„2‰นg-ด๙ึv< ฌ>m0ฒ๔tปฒี@_ผมRKถฏ|[kรzS<๎า†กjŽถmVo[5ชฒejg5ชฐ)sŽ@ลหU๓6vด๗ขC๑ฑจ l># [)Fถสห๋cƒ,ฦZถ ๅ*ฌถ{{‘,อ๎ซา๛6b„…W๓Dl6ะนPaoฏ6ถย $‘ i›ššูๆค๗aLู‚d‘Vฬ vXอLztGซศภlยฒลPƒๆk›๕ชฌ%๋b˜‰ไ‘%ลถ-อ6$เ7_๓ชึถy๖A ณIj*$ใvzิถ ผUi 3gbฏ–อTจKLm‰์กฬ’`S๋![&3›U•ดmแึT/ Oรfทž}%5Xถด—„m`“z๐F‰ ซjdปYe\=ฐDyหำ๙/ฏฒ๑U™†รขm4คm…6ถqŠ๘Z6”fi๓žIOณมŽทฦศrz=!4Sศc2UฑMMฦvs’‡ั˜ฒิ"ึ4ณมl{m3)ฺlไqyศl’อฤX J˜ฏmฆžฅEf5E‰Psอ)ฒM^โๆFฝd;uฃ`ฺ–fฑ‡ส์น๑ฺฺฦc;‚บ‹zษศภ“L™i„a˜-oSึlš †$…YฮK‚aฑl ฺ†LbLฉฬ€lฏ68X ใec#SชฑณF(ฤภfฆ"‰นีฤฐ™ดBฃd™™bE†ฑi%ถ>Ÿ‘ุ€%ฑ2›B„‘‘Œฦfˆ ด6›4›21,๔b@ุุคF eำhคฑ[= ฐlย€ •™mุ})‚ฅˆต`›ๆส€ฉ@๎ ฤ@/&;‚เถ\}ถญ%ม„ั4* ลXfLˆM…b *0พbZE2ฃYุb†ค!4ฺl+6y™6Œf‰lS†J—YPแ้ŒถอคJภษ@l3g Œอ๒4Jjf‚‚ส chv[๓f`๔„6I์ฑ‡MKc3–7@ณš Š€3ฦk„f €29/Pˆ6,nKaฐl„€aƒฒดฬชbถ•5ลุV0˜ฦ,# bUoุP ทฑdึ(’0ฌ6ณม+[ื2$ ฤhช0KcTplkTaฆF†66’6Yณะ+6‘j3rFอ’•m@ฒSี“!H1`{’ๆุส„ฅmอ`–‡อ๒„Q^ iQ™aŒ ฮ๎๖žZZsd&‘อ0”‘ู0–ฦ kะl0"fAŒlำ์%! (™—ภ*lHŒ1Cดฐiฤ lป[ตฅXcฌฒฆ0ณ˜1#B6›`pำซ7ุถ ซํุ#&0$ 5ํ†ฐมซฟฝ?ฑ?๛๕ฏ๗ป฿ฌfz™ภ8๋PถMฒmH˜!0šŠอ๔ค ผฺ„Y^6ฬX0๊u‡™$ lzฅjฦญ ,mkฦถdlsI1J-˜ูค )ฐ1,4-š 3Iู2P,”&`,aƒ ฒ]ดi`Dฬ`ถ1สคmBK P&+4ู„JƒFV ˜Mฉ6ุุ๎ฆิ(b&:>  [œ(^‹f›%ฐ 1าŒ1ฺาJƒs‚HิˆŒฑญAคdภ‹D#ซคศฦ$้ท3ภ 4[ฤ4-ฃ”Mฐ  ภุ™ ƒจฬ6[คฑษสCaฒ†(ฬv3ัj-ksฤ ™R6ขด@$ร&c%๑,‚ภbƒ4l0† 2ถC†1ฌGf& 36mL fLH‰6#ฐมl3า–Lฑ6F")ถฅ์ปm š (ฑ9F[‹fฐQSฒษๆลLCaŒ=6ึ)ภ 6faLัEŠnJชภฐ @)†Bcั1"[ม`@6†ฑฑzฐูjeŒ,lภ6jณญญฏ ซูnลญ4อfภ € ฺุ6Qิ,‹Mฦจdค-†hฐู fฉ€#หถ€ ิ๚จm3“สšฑfcฐ 6ยB‘l!llช MมฦR&4ฉ์U์lฉฐQUd@4ท!‚›•‚‘ต)#$+*iซฑูc z PฒอfC) ”"หn„๊ †อF„PŠดตh,จฤ llฦhXณM+ำ`ฬถ ู&a6kK๓`Lอ๎ƒI‰f ˜6 l›ฒศศยdชZ"")BIะข›m€‰I0`รฒ-ภ@๔dฦ&€อภ !D“มฺฐฑ‰1ˆฐ „J€&ส^สfcU‚!Rsˆ †F^S ูˆ ฑ๖ˆAƒRl๔ทฟ๕ป฿ฝ฿ฆ?`ฟํ๕ฏ๗๋ฟ_6†ิซ6cุ ASช–m-Zฦx%˜›ู0HษfCก หฺ6(6€mk "ฮ ปอ &ฏE ›Mฬ 4 (ฑmขJ-ฒ ฑ6œ)…H‘VŠ Ilถ5ƒŒa2 hJa3Q`0ลƒม, *ณaณmAฌlA ’ชฐล‰โ%šm–€oฬ–๔คอ{o6ฬ\Œ—€ ,fา6sคPตMญe<[’ุc}jฃนjk‹อฆG,คM”ๅุQณ$f†vงjถ-™5Rl้รๅ%c++ุ๊ฐa3หKฯดฺค—vลฒ›Y%D•yื๖$™ูถj†yขแฌ3คbb`›i^/–ํŽงโณช9(iwฅF›Qyoท “ช‰วWฬ๚ด„5*yFClล’-#6๔Š &ฌฝว  อ6=Iฦฌ›lUZJี…ญฑฺฆ์ุะco[/ปlไ๕ฒํ6)L%kFCฏึ9k[ิำงMฏ’aๆฒY=b› 4‹ว–งs,ุช 6.ฯ๖† ฌW jฌฝ-ŒMyฌ†l&ฐคะ6ญุ6๋ญ9I0“๊cฉี†ฝฌp†ัถฝ^[V›3ิ์ฝํœEฉ2ํaƒจ๊ศvๆ#ตq˜F*& ถอ^/ู‘ย[ีœYyฺ.jdถญZžžm[^m(ุh>ฏeI›zฏ; h ู๖@ใP•`3…Uณฦp'[•งฑz]ุP(ํถ0 7๔ะฆํfฉ—mฦBFษุdแี:ณa[=ีฆคŒษ.›Uณb&โๅQJNำ๖mญโํตZkL%3ฉฒZฝa!๔ -ขอ๖TJ๖บญาฬผนOณฐ (ษฺ้@Se[+ๆ–Xณ๗%Uฬ‚ฑฝษ๎Žฐ๖ถ\M“ำfฬง[&ฬฺ๖ช%ํฝึมSส0k๎c%3[ฺช๊ Zอ” šฺ๑ ๅ3ฏคฉX5 2ฒฑ‡ wวX]ผiฆPฺ6“<ถqšดทญีeถทฑ˜.CKฦถ ]ถG{๋โ4B5ฃํีถัe•ุ0ึฺN€E‹ท๕ˆPตMอ7™ฌQ้ถษ,Yฑืš`fO?fธŒ~๗w๏?ว~๗ฐ฿m?ด฿๚-e๏™ ๓t„mฎ[mZค7ณak†)ฑฌl้ฌ=]k‘ฝ™U’จ๒ฺฬดFวฺ&:ถi*3<๓ ’ ›(ืล๒๖žขสŒTm6ิho+BฉฝฑกTƒ2ฬš;ำ(,MฆbSใVlbฆำR6 LชO3ฌ4*ซf›q!6ซ V…maซ12Lˆวฆๅธต+ฝํ%ฎc{o–fEl*™!“สฬ3ซำI›อ†ษรธฌ๐cำษf$๊mณV!ีถ๗)c6[โฐ+ถ7HหR‹9\7ำ์Œ6คอฃ๒ฒAZ]yณmˆถน็lT-ฏุ4UY๐พ๕yญ้ณvzk“eช๛—™นอใใ9๙lฏชiSํ›*€ญบ@ณ1ึถQe<ปึโฯ9މšอŽs๓]KVD์ญถ žีnหb™ฉ k™}^ฎ6รฤฎ๗u˜™ดซ™mยธ;็mH9y{gV&ีบl›QๆYบูึJ–ฺฌ ลฑตซ^ณต6ูชษžฝ๕ษๅ-ฉyณบถ*[mD6ฮตฝJู^๗cxก&ysข6pKtm4kถ—คล\m{๏9ษ‚‹9\g›3ุaษล`ฬRฎฬ‹ๅฺบl›๒๖rภๆสขml‰UŽ-ตๆอ`๕ถฯฏOปด^jฌWตฅ UฯpผSุแฝฟ๎~L–‘ZœศfnDgณ๑”x= ถmC\ผ˜ฆ๑ึ™ษ’&ษฺฒI*”อน๖ฆล–kS๒{Kw^d/o๎ต[>วฺิb๓ๆณ9บฦ`๎๕ฮzษะPaปmEj\อาšmIฆly+ด #ิl{‡ฬXผ~คf=˜ธwาlณVปZmหV3–›d™ฒW`PTm]{ึฺฎ๔ถm™ณอ\แํ๕ฮ๋2oฎฒay๏]%5›Rำฑf›ฉถวย๒๛y“มlญฝUbKุผฝ3€‘ญ๎mท๏ŠๅํบืwญlซŠ"^ชfOฐQห๖๎{?ื้Lู๋ฒ‘mฯ>#\ูs'^ณ‡ตm,ฅ[„ซg„eู–…oZก}ณ๙•4{{ฉ๕??๊_๛ล/ฐ๓o๔ำๅฟ4ฦ^ปต\ุ^์๔ๆอฉ๚ฯฎ—รd&&ห&5 6ŠทฉPถิปgผ9๕ถgทjคฺ๖ž;๚๑นิž7ๅฺ๓žฃิvถ๎|=‚ฑYก๋žวH•ผ/G ฺ๖ฮญ%Zํmo‡3๗ป็sฺVนฑLทIฆq7฿ฅถฑ:Vฑทแ๊ำฑU6™๎|็ฅพvwฑ=ƒู๋9csฺฎlอสยฮถK{ŸJถT—๗ต8nf๏อต-ชเิถ5w‹ึ=๛ส1ี–wชใรซ๏V?ow]m^๎kเขŒ7ŸกตY=“Qำzวํฉdฬิyุf†ึa„ถบžอCŽ\ท ื{{seuฆš›-ฉf[ณถซ‹ถlไี๒รภ๎๕ุ)๕™๗Uีd–ฝiกล๛พถBณ๙๚ถ๓V)x๒้3nมwณฅ FชlฏWฑฆ,KŸถ3ัvฉฅYณ~ž๋]{ุณญฺjTa™5sื–ท$[๚๚4+Ÿฝ๏ทๅ{ูโโณ7m&ฑพ{ J˜๊สž๘ฌู8†คSถ™n™ล˜๕#^ฉM\ไฝJkสv7ผง •แmำกcQี6หHwZ฿ญžUU๖ห˜+๏\5ู ูfVD฿‡ถOXko๕1fโ6ZaH๘|ืง1ๅ-;bR7ฦQพ[|f]๕๖]T๗๒ฎ 3ถ}๊bึฆฝ๑ฎ—_1 <;RŸู3$ึj๏+™P๖น๗ตํ่’สš็ฮ ‚'ŸnบอŒ๎๎ปgKมaSUถต%ู๛ฐ[”ึ4อฌ0ส์๛ิบn์ั๗ปปถ%มnฑUฦทQถ๔ิฐตOง{฿gs‡อ{ทA{9ึ–)u๕๖Uย{|๘ฌฑmh†ดTm†ญวyh5pำ๖๚cEmลE๖แ๛฿ธ_ตŸ~ฺOn๔Ÿด๖ฆโXTตฑ บs๋็ั+vw๑7ืขm89๐ฆ+ูTุฌ,๕Zดญาถบcc/nBฌ!แช…ญ‘–"@บ#j๖ถอm]šทจL}–ทาฺ6ฝฝO]ๅูžี7Kา๔๕>.žGช-ต=†โฒ}>ฝ๏๖–)สš็…—้‚'็VM่๎ถฝฝิดSชภX๚d;ีLiMlงfe๖fEตฏ๚~wEtล[‹ xฆ2in,ึ>ฺ๓ฆ\{๋(๋ฮืใC๏็๏ƒ ฑฎmผ9.ฏLํ็๏_๗๛o๐g๐'๙'๑โ๛~&``ƒbf„#Œภ@€ภฃbŒ0€` €›` €‘MŒlBภฆภภ0F`‚๔นฯฏญ_๕ฟ๓๋์ื~ใ๔+ืษVaVอLมPm1ฉํmฮgอฐถูซt5l6Uูฬภฃฯ ฑMz 0l;Vอุเ“ี{๎๖}ชฯูถํุ้๋Žถe๋b{OqhึLื6 &mั…mณ}[4!๏ปฎดfอ๎ป}. Žี€ึ,[užล m3Ž˜ิต๗D6„zNถ‘‰๑6XEฯnฝ-m์๕ษ` ทงซั&ุ#ฑ$ฯฌk˜‘‰ฑI}ถWมxดํf l๎`C1สพ๋F4pู†ู ,72%๛ฎ7—(ุ[ืฆfถกปYšต0{v3ฎ“=ถขfี Q6ฏn{๛™๒ร2๓mฉ6บcžE!ภรฉํUอ 1รถceโa>Yถj๏™>ู{{ ซ\z]l{F4kฆbูœฦ6\ษถYค—qs,๏ปชfญg๗์–๑™Aฺๆึ Cิูถa…f‡Ll}ฺwšxiCจ๑๛y5(xฐŠฦ๓n็vฺถ:6หฝฉยฌq๖%!Sณท> 32™zOtฆˆวใฆู @b›MฉmQŒฒทLธivฑณ)Kฒฉ:๏ซ๏sฉP‹)“ฝกn-มFณg ฎิfoลfMgณ2‘ผ๗3rส ๋ํ็ฆฎา6๊4ฌฤV ้ี†!I๓ฐ-ตMo๛Dถพฏ?d{๛*Lบด7Ufำอšฉุg“ฐ ี๗6‹4ฝึ๒จ*ณึzด้?็ฯo&๘ปw?๔~๚ฏี6-ม&Q™mƒœžyD™fฌ๛ดa F‹ !ำ{๏๎z[ษ<ถ‘้ฦ๓R ณ)cฒ…@๖าทฒจ˜5ฮ„Œด๏บlุต็zำt`•๑ฅนัึ,-‡mFj“ฃxถuห=ฦม€ysฒI๗๔Fศ6ปผ7T ลถำณa ชไ}wQLhf๎lVhU๏}g9S๋ํkซS5๓UูฬŽUภf&จ๎meระršb]x๓ก๓d๊ํ๓น๋วlP™ฑญฐยผŸ๘/๘๋๘็๘ง็O๊—๕ห๏/฿ธ๎o๘›๔็๔_์~ใืูo๑๖?ธ๋ํIึๆ)†D†y[งฑฌJk3จไf  Šyฐm8Tฐšก20 diฦRลb Xฆ5[bo0•ไhฦ$ณจUnCj๐`F•˜0ด™.sกXjC™rj[าŒ'๙ฬl€ต6๖’Y`ดํ}ปO16)k™]ต๓๒^์จqฺVIฏ๎)ุd4]๏a‚„ฎ-lXwร@ŽX[๖@• ฌ!ณงฃ4ดไ™๗R‹สตฬห A‘ึ6Eธ2 lb64^SโฌJlาชPcVi“XฌvŠ(lQedžbิXฃ™วM;tŒ ชi+ Slำlฒ•Tฺj6ฎเญูKŠฌัXฆaผ9ฆˆฑIอขF{CŒจ(›ตh3]้fญ V6"[—[,Ve{z๙ “™ึฺ0รธy๛ๆ ฺf]kmดc[}—ษฎ-ฯ>n[V๖่  sylซQKญซRืj ฅตอ% ึœฬ)*{กบ–Y„าHฉภฺฆWภLณำศ^งฅ็(‰อ4KำFjƒศ6E-ET`ฎ2`CX›ตžซeว\มŒ๘8mช3ฦฬ”Tูj6ช๐ึถˆa,ฉlึ€…Fฐ๒ๆ˜ขุ^Zk9XL3TmO6Yชp(†ุศbU๐ฯ๙~็w๚O ~ื฿Oึฟ๗๛หดกฅ cd›Œฐ‡:fc.ฑ—Vuk๐ุdYทG’”Pอ6g&ฬ6›ชTƒบˆษN] ƒดถyณ„L™ํิฺ า๑๒๖ฒdัE3+8 l 1ฦ %UˆE†xํqZ5lอRม๔&$ƒQ@QฐญK›)ึFƒูl๋ร‘ฐศี -1ๆูฬฦฎphฬJ๊อLฅ *3ถ5ย 3kซ?P1J3„ภFฟ|ฟ๕'ฟ๗Gฟ๗_๔ฟๅ๛ห๏พ?Apณ‚ {žetื-จเT‡‚n‡Bgํf่ศบ•!BG้LZ0ซต…|$&ๆ}๎ำตภ๛ํ~๛?๖๏~๛wใ7๓ฟ~?๘Ÿ๑?๙Gิศˆ-HfจbF9ถMI(ฦbSPh‡m ิ@@กcฃ"H’อศ†‚ƒสถ)#ซญPฑฐฺ–บ๐6\ถmสeƒhMkf›‚Pฃู ;†Al›า ˆอHa‚)คอฌdŠ$ุh=ุ€า†Œb tmณสLh Š˜สยF" ด„ฦถฬ,ถMษ0ก้จฒทฅ Š6 Fภ6@š™ฝษ@0 dlH’0ƒ‚ั0+งm ๆX12ฤภMUaFดอ&š@จมF2คยจb`Sy*ฌก bˆMูhTจm’ฐ m["6ฌฺZcฝŽฉ„ู& จM ฤ– ˆฉถ7• P`8ูvzMPถู[  6ใŒ fฌก„adf…@mรT@1`[QZlC0ภฬbH†AฌMaQZ1„mแš%ฐแY›ู‘ถe รš!Hณ%ล6ชARถ‰นLhb`H @€ะ†B#$deFธรHc4ลุ$€ฒง˜%สf$Q‹-aฒฺ6JผaํฯฬOํฟ{ๆ_๓๊„dDV !6ีำL…y)ภฬษ&Sฑูเb-*›362b*dณญq #hู Pก™ญฐ‘šMY ‚™ฑญ3„j[ฉ˜„dฦ#lฬ3+i`›€)L€FfBกตaVลถA c…Œๆca‰ 4P†ก†4จฅB`H3]ภฯtฑq" cฬ๛้๗?W?๓๚็?๐ใ๘๋ืฟ๑๛๐๗แŸ๓?๛ŸŸัlหt%ฬXšฤ2Y^.dX(&ดKKภฬ`คfหา@Lˆ2ฐq"อ,ฃ ฉ0€˜ml*วl$ab&6ฐ‘L"ๅูAƒPฬ›*ฐm^u3๑06ก6@ฅฌ`ึhฒฆำต APaaภึซนถืEมถ › A 0Kฌฐ…€๐H‰™มI72` ฑฤถญสผญุจŒ-ุย‚@๖6–โภ”ฝMฬ–qlmUฺ&‘ฑํŽ™ูฦช*ฬ[หวTKะBฝ})ษdPmŒ“F(03ษl™ŠัLกff'@๐LE“d ้˜ นุ@43XสSศฤรย$Rfc 1ฯLbณน ฬ์ฉ[› ›MbŠุTณFุFS(สCละ๓บkŒ&ˆฎmกะloฒ7ฆคุ†a€Y@ Tmรล#0ถœtฐ m A@ถํ]cVฌlห ุ˜E“ˆธ ƒ‘ุภ–ว‡1 Eฺ&ณM‰aต1L%ฤ6้@(X‘gHf!S†A%0 ภฐaiมณEk€$’ša6Dซ`11 ๆ.ถA4kฐ”M‚Yš…FkGส6-M3fM0"์_หล/}ฏ™›ฉžท˜ …6›"ณhึ†์ๅFIˆฤfตึฐ ฌฤะ…ุdoŒr"†FˆF้m)ˆyoคŠF†ญ€„m)lด V56`aiฦˆ& b๙`036y9eษถ4ย`S›f–Tๆอ*2"ฑšฬ6ช‘aลุ@Hฬฤ† c๖"mFPฑq" cขฉ€หd˜6c๙ฟ฿฿—Ÿ~๕‹ฟ๚ล๗}€Ÿ~๗ำฯ๕๓ฟ๙?วf{๓X™50I@kkฆฒ ฐV ด0`ถอl{P{๖…„™$0YจMcXf”Bฐอ…อ†:ฺVjฦ J#cัฬD)C Xcฉ-ุ6ึUมcLฌศ@ฬึl{c6˜ล„ูุถvะาlM 0ๆ!m#Žฦค`fณ*[Dฒ ฌmฦ(Žถกณฑšอ ฆุถmˆLRตอfŒM`lY5 Œ$›Aฺ<ณ@บ’Ujุšถˆภข5šฎช˜ุZfmž&h@ฦ€ฑfSFA Z Šmo›ูdํู$ฬภดAm@cšMฉุŒŒ7จุ†ณาฐ1ิ†a"•!ฦฐ …ฬf๋๊ cะ&‚mkฑmฬถŒฑY`mะ‚ Nc&0™อf6Nฬ`)B3-@• $2ฐ6›&tด 26อfm@ษถm,Kถืe`าุf`Hฤƒๆู6ตNeก&[+ฺ"H”ฤๆE›&ช้t…1†ดm{,adl0ึ ภ ฺbhภl6›gƒ๕ž=W hต1€4ศ c€ฑื›ชfณจ™-Hึ`ฆชj3`- !cถu\ถ†€ษŒkHตmถอ€ษl†ุ jZกf‚†a{34ณ1ขฬ˜*™ฦLh+"œ6ฦ„"Y6ญmณ16oฆ1&ู3@ŒญทXmlฦ4ฐอะlfมLชVˆถตข จe%ฑy์ถšR&บJeู ci3ถต”1€คฑfƒุ š€ี‚ ณอ6ๆmjฯช†lฆMcภ3+มฯถทg3ฺKZะ฿ๆo๙฿~๙›๗฿}ป_๛ฟ๚ลŸ‹?ว๔Gว@3๋ฝE๓Imkซัฐ ึ็“l#หฐ]f†บ™ตs’,Qพ฿ทsืนญฝญon †อใ๖*-N‹ฏm๛ds›ญW็mถ’็>Y"ฆฐ-–R๖eๆฅภ0x-™€หีVCห๖zนU}˜m+ณVคmฎท'5m๋ญฮ™m3หur‹a'รถ& ษผค`G}ฺfฺฦฺฑฉฝY]d‹šฺfฃjูฆNi&eถGหliYkม1ฒป6”อฒํส[9m R˜wไฦฬTต‘ส '๋zŽxoŸป็›ฮ™๊ป]ูฌ[Uึณsุฦึhs tw7o{wLา0 Nฺj˜ศxvŸหูหฐ]f*ึฮ•ฅIดต๗ฦชป์wู๋ywอฐw1ๅด๔l›ปn3Mผ6ถ•š+ชท)ฬยHูƒ๕šส`f(ณปฌ*šf็Jlี†็๖žOณYขิณพซาฒอซ:นeรZU a”3ห๊žmGอLฯ์ตƒ ๖wm!ร6Ow๔ศีi&e6d๖ถc…m-ŸG€]าTู”ฆูถ–ซษ|„]{ฯ]roช@ฝeีJYษ{c[Zc็ใ้ำklํฎ2shูถอฃM บJถ1๛œ, {h[๊ณฑFd<ปซ2˜eุB˜วณชd&ธ๕*๐๖ๆ}ฌ๗ฬk›4cv‡มฺ’^N]Mณ}ฮฬXถ™ฏ]}๊ฐฮ็>ฑmอ{6ฝฏ๘ัf-b˜นปฒmYOS• โฎํว๖6ฤฺถญ๗zkJืถ‚!w๏ezูญ[7;6ชช\oผ๙bถmSอคณํ๛๐ฉLบศzƒ*๏ใ๑ถว‹u๎ชn;+“\mฆหEVMoVปถพ฿6ฉ.๔พf7Ww™1฿N!ปv…ง+›อฑsต๖เL฿ททoื•5fŒFrœŠ{]w๘ธ“ํ}ญทถšš=ข ์๎ฑขฑ๖ํ๛}ž๚ชปู่พ„lQY6r฿ต๗ถgฯฆ&ฆญ-Rนึถ}Ÿ7RีGฝ๗X—kfVOซยถwU'ธhถููl์c[๓ถQื}๊f๋vw' o7o๏QE€ั{๖ใk3ดณa่ฎุ๖@ึำฤี%พ‹’|{฿ อฺถญญ๗šฌO3M”๊ฌธ{;ำำk>๋f”๎บฎ‡7aถงยข๑พ฿ทๅฃCชํmDํผ|ทg/ึ)ฒRทW $L—‹ŒM๏mฺต๕พฝot]\oถลญ๋๎š1ฏ[ฒฯeณ๗L)– หs}฿พชซ_‡AฉำษฝNWsถwฏ`ำิ์Eฮฎญู5cู5i฿ช๚@๎ขวถ;dข2fภถs๏ฑทฝmj‚`‘ไ’๑พรบ๎๊ฃๆm๋๊nžกน• ^ชS8lใ8ป๓<๏c[๓XS๎gืjWืแ•ท›ํ=–ฎF{๖}ฝwxํc3ŠฝฝAึ˜จ๎oMI^๏๛†f6c[๏๕dH@uV\[{๕knศฐั]ืงวฦ›aŸ x็@Mห2บฎ็”S@๑@D”Œ!Ziก‰…๑่T ;56j 2@9:ร ฐ็๔ฝทkู6ž&ฺ๗ฟนวี!ชŽPศห็ณืLJ™\ทตWร๋mฆr)หชฯทฑL{ฝฯฦuฺ้lk>ึu1fY)dwaะแ}nJ %ึ{Žป๑๖ึบปฒ>=€ไtา:Uลษ&๓nm๕\ณ"โlอ2เื$ฏ๗นฝต๎> ]ถqGูถˆblฃ้๗ถท7šภดe!Yf{๛4๋บซ^uŒMซีฮก‡€แร๎<ฯร›)"“ฎ๗ชy๐ๅwฟใ?cภ็>ฟ๖ญฏ}฿ฃT‡*ฎ˜ฝQwHx‘ุาซฤ^Mmปดfq7ย6๑t—ถlฺš๙โชถmฃ’2^๕–f3‰}~.ซ*ฺ์ึ^keks•ด‚ฮญทa“ะ-ณ จYว๐ฒสfถฮyoRloึึณ„ซM๏ู„นkkSู[jVU3ถฐ=Fุ๋]ัฦ…ทูบ้’*ykฑฺูฌ๎ฬ๊ํฎ ึ๔dk^ห%ูร๐ฺผึC้-ำถ๎ผ๓&ฺZ2ผ ]ญgงญ™T๎02yO*”ู^ืfk$u“ึฆ7ํ™๙ึ+{Ÿ™mญyปฎู{Lๆ.3ำดปรฬศ˜กซฎฆB…๗นT!ิ๔ฺeาฒŠๅUo+ ๖ๆ8 ู>?Oฅศ›j๓E ูฦ๊ฤฒ2mฺlนLol%บ{ฯrๆQ์>็ใณ"ล„ูนตm srห๘C“ปy๖ฒŠyชf›NํM5หŽtู*2Ÿ[ฒ๎ฺฒึRa{ธญe&๗ัŒอฐ๑hป*†๒ั3oE ง๗ัฝฝW‡ู2ฏ๎šี›ิ;๖)[ฬฒค™1ƒ=ญต=จปkˆ๓&5ถj๔6ๆœ๛4ฃอf็:ทสุ@fS)k๏uแอฤUGต6ถฒ››ฌZhฺvู{Lฤฺ,*๒fOๆFจ˜ทฬป/ณ d +สf[]๏อชiš![ญ‹ๅA{s“Šft์๓๛ŽงRไํžSฺถ:ฑ jฏzrะห.Gืึถ่ึ๎sn]คฯx0ทำ๋ใ๎fูx3จฦs]{„lปต•…ŸŸณZา`[Uุ8ผ5ร}4cู>i+ลd๚ู่žšจำฎฺ<ฏ4›ญ็บฤVVืฑ่ฑ9› –ค™ม{ชตQทปJฃyค๓ู์n‹ถูๆ\mlฝท*ุ@†mฃVถ–mาUGะksmฝั“Uห m—mŒQูk1ฆฎm๏ษ\ o™ืวณaไชธlT๕๏อRาบ™–6ฺ•ไf—ึ ฉฤ—}~jฐบK{ุš–ฎชmฆˆLบซๆAb็็๋.๙‚š"ยl>๗o~ฐํห๏น LKc›6RฐMฤ$ฝํ][bฯ,Mึfฮฅ—m`&ซdH†iP&kƒnfm๕1ึKV&4+’gˆน"&dย,ืb 1mซfˆL›ŒุZCLลฐอ,DG{ƒ&ูฦฬ ฑ-˜ฯาฦ รHl“:อฆkคlอ.ผญ๗Œิvตูฦœ=Iหถ 1%‘TCŒข`™@กฬ&ฯ€h›’FชฑณยEฐ˜Œm3žฅ2ยณW7ฐ โฐˆIผ-MX ‹ญia[ช {o–`a0ษa‰G{Y›št%Bมชฤ$ุl,Vชffซฆf6VษชฬปHe ‡ ฒ Uน] มโd3r˜™7Bzฬ> ˜yjkSฒอถตf f@11<ฉl$[.*ท †ญH‚1X.o|„A%f๘"iุภ–Œ €พ๘ห?๖ฃฟ๘๋?๙“ุ๛|฿๎๗โ[_ั๏ู๏ฟ๏|๛;๘โงๆOย_๛แ|฿ฝ฿ำ฿๙ฝoา}ๅ—มOO~ล|ใซฟ๕?ึ@?๑ท~ๆ๏ˆ/๛wฟ›ฟํะฯŸ๛ลฟ๚ร_|็[_ฏ๕k฿@_๙ส_๙ฟ๒๗ึ~๗/พ๕฿๋ืฟ๙ๅ็€-มŒฉ+šˆt›€2 ˜!&ร6ภ˜Xˆฆ o+!0ฬjฦlี6V ถ1ฉุฦ&O'6’}ฮฒ +# &SfBiFYูP(fcJ ! ฤ”,A Kภฆhm ด&,ัl@6ซƒณอ&%ƒึ`S[ฬ,  dอ‚dดmำ…หม`hIู1`ฐ"€bHR6ฤ ถ[ ฤณ ›gSภุ4ก%6ฃฒši ธมฆสฬf™mฃb`#ุjB``ฤl,ฑYฺฦ ฆฤ(สF0Œ˜1+ณH@ฆ•ข23B53Œ€J@!›%ฃ‚ฤ6ะฦญษmŒฑ9ฐm 0J‘อู`[า’`ฐ(a&ฐcl0‹2AiŒ,‡‘อจ‚- €‰†ค €*ม$˜J3ฤVลฌ‚˜ ž!m$ 3”อŠf@lซย`#ถjcƒ4k˜ฑ‰i$@ฒฑX ชa†-”ŒอZS€4ƒm.…!๒0A-วF +{ณ$3‹‚F •ึfS†คฦ^>d‰ภฐฺ(€ ฬฺ”lฆˆyุถ &ึR02* 6lฑC H#‚V€I€CสฮˆฬP- Sส624$€-ี1F™%`3Vณ†˜c$1ถตด A†ญ4Kฒ1` –4l`K›G›ฎุ €/์-“ ›แศ่๚ง~๖Ÿ›_๙ีฐ๏}~๏ํ ฒm฿๙ฮwฟ๑฿๕ํัo|๕ฯ๔[Ÿภว๊?ล๖~๒Gฟ๗๕Ÿ~๓_๛ฏฏป๕๚ห๘๏๐/๛฿}๓O๐[๗ณฟ๒ ๒Ÿ_๓‡?ฯ๓฿ศยฏํ๕ๆ/}ใwํ๘๒ซ_๛ะWฦฯ}ๅ‡๎ป๒ว๖'๚ุ/ฺ/๋๑ใ฿฿๚ฺo|๓ฯฟ|€4ย6Ž‚Yฬ\dภบ<๓ธ#l6 ŽกถK[0/d่ถืnจ‡!{ฃ"€ญญ๒VฃYlpg&Dฤล6“h๖์ๆ๒l+u€ญถฅaบm‚กrฃท2ถ™ู๖ๆƒ˜œ&ร‚ljSN F˜ภU๕ถ3, ™•6h4PL@ู€e€jc›)aฆฑ1{“ดEkyํใ@†ญจ^Kํอภศบฬ j{2ภd;ถ1ณๆสaflฌ",fปkecุภV ฐรˆ)%ญ`FอlLฬ\f–6*‚™ูิ'z๐๏๚._ฏฯk)Pฺ2R ฦ[\4cLL๔T<0&‹ฯอแ™1‹;[ขฦq1 X4l2„AฅJ๛๛๖บp *fq0ซๆ“ ฐ่ถื2jgำ4“m ั๖Œส˜ฌR๑ณ›จjK )˜มฺ๖ฌ9ฤXE3#cุ&ำูศV”—0ตmFFT˜ษ1 ‹๊6€šD5mZqVฎฦฐhC“–@ecภ•fฅุtู๖hษฒอณ VRบื>naซฤBฆbํีฑ)lC5+Sฤ63seHภ6Kสถ 5$•ดaล2Xท-6"^WZ`รt‘m`.Xมุ,ฅท‡ Šmถi„pyฐ๐ถ 3ู tkัLถgซถ’,dXธใๆ•่ฬส†BูฦjŒฺ๖ศ6ปยd„mฑ)#M”วสฦ\ถmรb>j™i+ๆีŸt<ฅšถ1ฅ•—บณู d Lล&ลbB1`B ถ ถ™ฒ…mํM(ัR/YQ์mWdภจZฦDƒbL Šฃ‰™Y$‹AฑอTl1†*ฐi“1ถขMlลTผ*a0๓ึว%0ะtถIl^ฎ๕ถ%กุfร$ฑ›ก€5%aVดMb6ถL‚n†รŒ2๒ระฆ`{_ษ๗{฿๙ม๗๗q_ฺWฟ๑อฏฬ฿๘๙_๕Ÿฟ๖ฏ‡ฟปห?๙๓/?๘สO๊/ฤ7ฟ๕ีฏฤฯโื๚‡๏่็แฟ{ฟ๖‹?๖'เ๏๖๔ฟ}ฆ)‚j[ ๔ŒI mjษ#v™ผ^Jี61อบทZzDT+ฦlต’‘‚อถ๎ถŽอฺ˜CฺlŽฎ ฑLhm$Wั๓^3ฏ.'Qญ"ี6˜@—m q5ผ†VยlCฮ–-หถถA(Rฦ„–๔,ส4Lยถ‹dI)๑ถŠoฑฮึฆญ๐๊lš"šฑ šm๛ดชlžš2ฬญฐญfm`#Mม$๑6Sุ%คทlLณe๓ึM2ศTšฤlปฅู,ะ•†=(ำ|T` ูผejตฦl[†|ดmorญzฬฺีฦถฉvy˜d+ik๑ๅP7<ย4Vฤcษ๖๖ฑQaุชL7สljถฒูด็ e$:ฦ6KF์%3๔ถ1ฉ—•˜Wๅ DๆVฤฆi™Šmรยฎ†ูHIfŒkฒeูฦ’R6W“ฦผ2ญๅfAตณถ]€6ๅถบ๖vผม๙x^46๗ถีinฬ~๏Ÿๆ๗อ๏แบฏฬฯํ฿/ƒo๒ฟ๛KษฟŸwู ภ๗์๓๏ยฯ|ิ์w~๓O๕|๑ืพ๖อฏ๑ต€”ุ{ุe“ฑ7ณC ถU›ิmlคj{ส]•„“งฆฐูปทO[ƒฑmป๔๖–ร›7K=V1KPๆ\g-f๑“c3VU›&†”6|nฏชาxv]๒&๊ƒn^ผ๎‹7ฬhถฑ>พจkYะปหถPฅM>=uy†mž๛˜8ฦ๖ฺ‘ฐตtุร;…GผVŽวํ™)CW๗ถฅปฤ63๊>ฒ%3”œๅj6T[๋มุŒ๋.mž5f๖ึ>นOลeึูป๊ลญ•ฝœlฐmUžูบ;ฝัŒN#Z@6ก…บKlฎjcนzณอm[+{ฏฒีVฑ๗ฐb{ฯณC„mชMช!Wi๏ า]ic๒Dgูณwwgณ™XถŠฒ๐ผ๘ัœyVยฬฝ:ดตmาีkอ[ว{d›Uี ›ฎ6ฬ>กซอk็ฺ–ฑ็:๎f6•bo˜ ๖ผฯพ๘jญต™ C๏สlSชถ2Sg๒t๋ัŠ8m-m&ถkhญใ=ฒ/๒ษmถ 5]๗ถYW]ผฝyTวถIฎ%llาn=lตw๙ุŒmอ์ญป=ํsปšu'—–KูKŠm#ถW1๏mW6อ>4ข5ถ%›ะ อG„บดaั5yนlฦ6Zึส6[dซุ{€หฌ๖žg‘ ๑ซFKุ*ํm{๎ิ๕กวีิ\{ฌป“ฝg ู;ญ.}๚ผ๊sšถฉbฆIูXฺถฅ+Yฏื๖ึฅฦ&ูžฉสด!pWc<ฆปฺฬRฑ-{ปโ บใYl-ฌฉู๛์#u›gm‡‡ึ๑ฐมuX“„g๒ˆฝิงm kว[ฃณ™ัZวfฃถ>๙JฬM๕ษGu•fฯใ๊ฒะbOาญ51oNKถ‰ฑgw_๐a"cึร*ฺ›>๕aWณฒ<:็;e าถGl+ฤfฤผท]๊ฌ™f'ฌmษ&„e๒1"Eฑฑ๕q“Eฑm3k›5dชถUlvืžผอ†…าx๏U0ิํTษ{Oนป>˜ะ˜|บๆฺ์ฅ๎z{รฒEตขKฯ+=6๗ถีอบUnฆเ‹ฅฑฑUูšืU™€ฝฯ๏ูO๚๘wฟ ?๗Sฟ๘7ฟ๖ฯ่K๊wแo๋|ํ}ห ์7~์ว์‹พ`ๆ-|ฤณ!J<ึ์${ดโ2{sMฌb†6ฎl‹ ํฝVwดMN๓™˜kูB†ี{ผUูš•Šใอ,Tณท๑อRQฑ๎6ฐฝW.ƒ˜MฬจZ cm๊ฎฃผIšตภn[wก8๕๖.หฐJ9ฦฆุfหํูวำŠต๗ึาณD,ชEณฺ0pzนMเlห.็ํแอl6ฝ>ด5 †-ญXZcฺ|หK\๕ฦซ €ฤŠ™อ–ึถG ดอ๖iw!’2{`bwป๕>฿HcTงป'†ถgฆืฮ๎š๊žmkŠคXคต6ตYำXฎzxฺ•Mš1[k๖ถ=๎าฬณ Qจแุ@+owkูถMๆัGSรbgoos ห0wMี๔6๋Zํนm—ุZkoต๔จ๑๔ัุ$ฆงๆุ3บœฏmUŒMํ>šผ1ขฌ๋ฝว™)ฒญ๙จทญIjcŸ็xzี>{๗AMV0zๅsJหf^ๅxฒ1ฺี็็๔”ฺVู]Y& ถE(7†Jsl› D5ืู4•๑zฝ[›7”ะYฺฦ.ห[M)ร lo›ei ปzฏY#dป‚cณz“mษH™ ˜ลœm6สญํู›ึlFSV œ[kkถ{,Uร‹Uธ‘Š™ฒ™ญืฮkฌ^šู#ซ‹bณ=jๅญ–lF๋MณMำถณํ+d˜ปฆjl6งnำฆ๋q{ซ&b/tฑูXฑžฬa6ส‡‹ผmโฎๅ=ื๎Z๖ LY53fRg[SฑทIwท7{M%ฦg๗ฆ š"รุรชlอ๋ชšูฦหํ _lP6ฆย{’เ}~~๏ฟ๓ฟ?๘;ฟv?๙ตฏิ7พ๚แหO๛ม—๕ƒ/๔ืพ๑ีฏ~ํฃ/ศ”ยฤฒa†tN’Mž•ฑส”h&ฎHk†PW0fๆๅn๔ŠiM5of‡Šdc’l6นุž†-บ>Œy}ฺy$ฒฉ (Sอ`46ถฎmภศฦ‰f2‚ ฦ@ฆTZ@ 6žณญตอ:“fKกอ๖x๋Sb&๎ฃšฉ^F"lถ้์ีNdถึย†ชl–sreฃŒ•*l‡„-ZูX๊s>0’a:Pิ’;|๐šมF้ฃ@Œ=Šต‘&“ลญŒ(ึฃ hจำ3ฃI็ษG๕์€mชq2 ’y[jุŒฝืธ>ยl{f™ป QRฃค™AฝA”อjฏ…1ƒถส–Fึฺ&& œอๆJŒตสถดวฆีGz›*Cยšนฅถืถูฝv$ภ>๗>xEซlฦ&QณUต %ฌFึ†ัฑmK{srถ4งQฃlo›Nฆ”mGก+4L๕ึz‰ฐูฆL-ฉlZยfSW๔*pฎli 6Oซ @HฒVฒ๋F๒PEถ`XQJ,–อlฃำ dดG™6ถฤŠ6B,k,้m*0รkwฆญ!5oCQlL…๗คฤflซ๎ ๏iJa*bถ€}๐G{oŸo?๚ั๊‹ฟ๙+๏?๚ๆืฟ๘‹๚๑o๓๏%พ๖K฿ฯฝŸ๙•ฟ๙/ีฏ๐O๚ฅWใฟ๓ญฏ่฿ึoแ_>ธŸ๚๙oฦ฿๚้_๙ลŸ๘ๆืฟ่ฝ๏}๏{ฟ/อ็O|>พ๕s฿ี_๛ึ฿๘ฅŸึ7พ๘๊‡|ฏพ๓'฿—็Ÿ฿๐—฿r@๚ฺฯ~๋oญo_๘ษoิWพ๒…ฯ๐ป—๐ป๚/~๗/{ŸŸ€ฏ๕งณๆฟ}฿G่;฿ซฏยo๔/๛วโว๐{_๏ั?ฟโป?๘สทฟอ_Ÿ๖ฟ๓ตŸ๘Zพƒ?ร?๛ญ฿๚ฮั๗ฟ ๚๊ท้ฟ๚O๚'๐๙'ŸG?:??ฏฯ๗ไœœ๏! ไPA@๑‚nulปu;;ก3ํLl‚Yฉ+ต]W]ช2ะE!@€`ฎไไBNN8฿ฯปฯ๓ํoผ}ว3Ÿx๘™'ฏ฿๗ลw๒ƒืฟ๗รท^}๓ภqืO?ภณฯ๗๘รื๎ผึ./฿yเ๕~แo]Nw]ฟ๖ฬ็?๚น_เง|่‹ใฮวพ๔Ÿ฿๑ฬgn~๙ห7ฟ๒ง/๘ต[ทฦล•๛๎ฟ็#่#บ็พป/ฎ{ฝ[ฏฝๆžใ…ผ \\ป๖่ใ~นŸz์ฺืบ๛ฏฟ๒ๆ~qํ๚E0i f6 ย6Pvโจ้hf0ภนe,ฦ,งUa”Dlเ3จ8g;C•6‚`Šล@'ฑ๓ฬ‘4ุ-ูฬ46% 1 kHdf#'Jถ–€,mม@Jkj['GัŠก' Jฑ#ฦi ™ขm ห8ถมl S1l€ดdถ ะจุT‚XูFา6ฦฑhH*†ส Mฅั6Z่ะ2ห`h6ะT+ 9ง@#Xม6R%ิถฐ!”‚ึเฤ‚pZflƒ6แT#bฃMด3'hซลlำ9ˆ…%# ถfฉช9อ˜ต้HpLL3งŠ …l+ฤ` iœ4กŒ:ุ ”0ํฌ‹Eฦถ–62ขJศ”ด†P6'Rˆ๋`ะร‚Tิฐ ฦาๆˆœ[ลยฒ9la6K;Uภ `Fตญ*ถBm3ฒlreE ว cJ!˜FศนEEถด ญ1„„)(งp™m…4"ƒฬH(”- ƒชภLฬ,'จŒFfฆ2lะ™ึ1›hฆ˜sหส–*0ซฬ™’ -ั@็นจ2ถโ4[ES‡!ฒq*lF‡jฤฐฮVœฺ45หTl*3ก„ู”KŽjถห9:€ซ(KLKสๆuด- ˜ƒคL3ƒ2Gbำ0VmŽ`ะF64;ํ(ง$ุฐqิ@อข™Q,…ู8ถฅฺVจ9[Eศi$mร*ฒ%๊` am 1(ฬBF(ฑ… % `ƒJ็bŽสNaห23DvVศ0ดFฮsอYŒ™Qdก[pžšRุฆงู**œ#ฎ”ี€$cSก1Ž๏}๔;ฎq๛7_้ฝW>๐ุ๏\}ํญ7๙ม?ผ.๎x๘มฯ~๎้/<{๋ซ_}๗›๚;p<๔ฬใฟปO>๒ห—{๛็฿ว›‡‹Ÿภ๋ษ฿๘๔ƒO>z๕Šห[๏]บ}๙‰g๎๙w฿{Wภq๕ฉ=๙฿๘ภฏ>๗ภใ^ฝใ8/oŸ—็แ๗^่+_๊‹้7xฯลqๅก<›ฟ๙ฤฏ?๗ภผzๅุ9‡ธ๚Oz๚[?๛ฺท^้Kทw^๛ฟ๗ฬง๏}๛๚=บy๑ภ‡๎บ็๚•ป๎บzฯ}ฝ๙‘๋๛—]๔cฟ๒แ;ฏ]นธz็•๛๏ปใึ‡?๘ศ๊o_nพบ๒ะรฟ๓ป~ฌ{z่wn^๘งx๔ž‹๋ืฏ\ฝ๋สnพ๗//๗่ื~๚๕ฟํล'ฎ=๒ะg?๓ฤg?๓ศGžบ๓๚๏r;บถ๓๕O<๐ไ7^๚ท~ำWo๗>๒ภG?r็#^ปv5ป๖ศc๗_ฝgvบu๛›Wpํฮ~๘แฯ|๎‰_{๎ม'ผธ}๛<็โข_ผ๙ฑผ๒ฟ๛ู฿๛ญwOภ๗๓์วอ฿z๒Sฟั๛๒ึํ[๏]z๗กผณ{ฎ^;@Š!-—sไ€ร™B1ซ5S˜U2œง"ไd:ภจ‚ษฦIiฐุต`SGN˜Q3& mš(fฤ†5 €Kjb-a4Z› 01€$ ็j rฺศูะฆศ@’˜™ลXศ@tž+G";WFG-œ;OcT€ุ&46ŽŒ@ม‚‰šู’ฬ™ัPZยvžƒ$ุ%Tm›c0vZ:@HณUฺfFl"ร, C(#ึ*†M8"'ํ4eึr ใ„.`ฤ0LŽVฅ™l mN่@เไ˜ค™-Sหdณ5 [PฆsŽBณbถลDก‘สฐ„@ูฌย2ะN †ึ€…U`cฒsั‘!#ณณ%5˜ถ0šJ,ถลiฺ9)lGฐญ0Gฉ98wเ ค™E›ฺ ’–`@ฦH˜-ษœ5ยฤvnFK“$ุŽมด“„ภา€26ส–l Eฬ f ‰LX€Bุ&„v*ห0„ตฑŒญ‹0+fุD1หe‚ `˜T ํ\ Šeฒู!jF’lเ8L›*(b5‘ฮPฐ† )@lดดšฺูQภ,ุร –q˜B‚  MTb*ณ™p ศ&ภฬคXlbฐฅ˜s6ว![ิค‹LุfณขqE4ฺ…U€มTึฆ6b„ถ1Jร0kฆbsI8วhiุIQฮ5166ว*ฑ44•ถMกดM‡m‰  0ห ลPยฬ จภ#HXBฦฅ.`†˜์(าXk3sT›MI‹ašŽ,lฃฺ0$06F”ี€คmเ8L9d"\QbVฬfth:Žซื๎ธ๗๋O|ๆษ}๐๊~ใป฿{G?~€:๎ผ๏ก฿๚ฯ>๖ฏฟx]ป๙ำ^yแงฟx๋ๆy๕๚๕วž~๐ฃO^ฟ็žรป€t๏}้~ๅ>s๏•หw๒ฃ—๒โป๏๘แO}๐žปบ|๗ๆฯๆํ›@Wฏโ๓ฯ›?~โ๑{oฟย฿๙แ[ฏธt๕๊“O?๚้O>๙‡๗฿ม๙๎W^{๋๗ๅkw>๗Gฟ๒นำฯo|็[?๛๖nณหห›/พ๙หปx๐‰Gพ๘๛๙ƒ฿~ฺ่อทพ๗—~๔า{ฟธ}๓ภฝฯ>๛ะว~ใ™‡:~๑๚?ใฅ.นธ๚ฤณ๘า=๛Ÿน๋๒็o~๗›oไี[ทฯ+๗=|ฯ“บ๏ƒธ๓0V@ฃc0t” *2ฃ‰ำ2`jsิถ ถ1สะŒ8–’`pš@)lหŽŽ9mฆ:4m ah0็จQUbิ˜ˆใhmฺฦ˜mlŽbฬš3‚มŽฃอQ †ํ€†bถ™ช†‚jวFk– kฒaFะT"‰aS)Lต)[ฒ4c–F0 BBญm@QsาbLSmv.i‰ฉc;7KDํœ‹‚iัึšั$jbาาฤ 1l– Yงe0สฮS ถูยZิ95”ลจa†Žฒaะa6rฬl˜ฺVู‚`:ถm;+D6+ษฆQ™ขvž8f• f€ฐNXR›3ƒๅ8Žf#PณƒษfJูf•1k2ฬ@&qqnbำ&ญ‰ฤฮํ$E33$ŠcƒTV6 ฬ2 หb(ค™bTcSถขภŽฺZ3ฑ‰˜YP…:[`ค:d˜ถ›mV G1iๆ‚จSmHƒ,คYA3•f6+อ6aชศฬฤ˜Q8์‰™ ญกฺุ†œิ •“# ษfFึQ6 :l[rฬ'œ–f•-(™ฒ™ ™ีวฬฆ ูfCชดฃ]žฺ!ภVbตตM(ๆฺ6ˆ ˜mฬ8*ั6%#ƒeขllรจ`f2,ŒL;wkF` K[่ศyฮFR3Zวัถษ0ˆ‘kh+š2ีัhฦ`FM!Sฦˆ˜MFฒุ‚’ิlร4(›aภุŽšœณ)[Žbˆmถฃ สถqศดฑด5p@†at€!€1iถ’-ฬ™)”a$f†ล ™MถZCG†4Dง™‘uฤbcบณ4'โ4ส ŽJˆaำฒaXlซ ™ูLTZlชƒฑM;ดŽอ,FเŠMฬv˜5ธv฿=O>๕ห‹ซฃ+Wฎ๗๐๙๘~๋‹<ะอ๚๊ฟ๚w/ฟ๐6ฎ\นใ๑_{๖_แ‡๎z๏?๕ฯ๋ว฿ัญ“ธ๓้๗'็ำw่Žใฺs_๚่ฟ๚ต๎uใo๖๙?๒‹฿{ๅt๗ใ|๊ฝzใ—฿ฺ้}ืŸ๙ยวไKs๋ฦ_๕?๙ฏ_๚๑Kฟ<้ธธ๗‘7รฏมฏ?๖{ฟ๓+o|๕ฏธ9Wบ๑ฺWใ๓๓woพu้ธrๅม๕ไ๔‘g?๓ไqใี๛/พง๋ฯ^บ%.พ๙ใ'?้?๑ัŸโๅŸ= ภ๕kwญoŸ๐//ผq ื๎โ๑ฟร'ž}๚‘_ย[๔ทฟ วO}่Oี“O?ถ๏ํ…?ณ}๓๛๏]๗?๚oO๑็}๕‡7พ๒/ๅŸ๒๒?฿uๅ๚ิ‡๎๖ึ?หฟ{@ื๎ฬgžยน๋_๛๐ฟ๏ฏธอ8๎|๖ืŸ/๋~ใ๚ฃ฿{้ท๏฿<ฏะ็ใษฯ๚=็Ÿ฿รฟ๛๒koฺฦ]๗|๊๓ฯWๆู‡>|ฐA™ฑ9ฺถฎ8aU#ฆ(›m9คŒc9ชmศ@ึšฃํจ์คีhชแœ€(l„mด :ฬfm›ะvโPb™`Dย2R˜mจ’mlsH 3$3K€ถูฅญšfํ`› [lp08ถ‡8 ŒHŒY™€ฦถนธˆM6ูถ%ี‘KLฑMXŠตaํไXรhbŽ0e†-l’f!ฺุาYฮษg‡‹ใ87ภฌ K†-*lีqTฮำ43iู†Y2[ย‹-€ฉm(lุ ถูPe 2ก h[eษN%saูVต4S8ŒซาดmวVูŒh›#6ญ‹ถ ;วๅดภTฒภ”ต‘ึฐ 6ณ:าเa›9„eดฺb33Uู6ค‚ฑฐv์BlCe้s๏œ?ส‹๛๖ฝW.๖‹—_ฺหภ•G๏ฝ๏ณฟุWzํ๏๘็_ๅว/๒;/฿~ๅล๗็}๔ฑงๅ™‡?๑ฑŸเ๏฿๘๎;nเ;ฏ๐{oฟu ็ํ๓฿}๕๙ื?๘ฤcW฿้็ฟ๓—nมธ๒K๔ำg>๑ฬฝ฿wกวฎz&็;฿๘๚ ๙ๅŸพ๐.ภญทฟ๚•Ÿ}์ฉปร๛}๒ม=s๑๏^ฺ่•;žา‡>๙่ี+/๘oพ๑๒?<™€ท^‹oผ๚Sw๒๗?ษ๛๖๙ืo€+O?๖วเฝ๏}๗[๒W_~ๅอB์ฝ~ตoํ็ž{™/<๕๔—฿มอ=๖‰G>๑ั๛ปล7ฟ๗ำ?๛?^ฝqเwพ๛ื>๔=๚๙฿`ฮmหd ‰ฃbถุDJ“รฦลคณ”c›ร9Q™ฬt:งChyฎcวฦ9ซŽใโ๒ผmำhด:-!ภิกK*ถuŽvvXงŠiŒDถsG+ึ–Y—c]asIT`vฬFีฮ์จP8ฺๆไX„ะ1 9‘J,3 ฐฒuมfฆe็eฉ ลฦ‰ณฃjPจauN™ lต„F*ึ.็ะyสjฒ8ฉUtHN็):"ุigวลนสฐตด2mXฦ2ซ#ฌu lํ\ฆ9ช —t*ZฮŽqฑต9Gห9‡.ไ€Y‡๓T[…ย*M6;ี–&[อXย•m็fŽใ8XGฺ›8Z‡ฮฮmศlงฦ1j’ฃbถุ””crุ8Jวฮ9l€าf:Žmmฦ‘2็ๅูกœj]8ืัq็นmฃŒชYf๊ะ‰็i:ฺลaK r๎(ฆ1กŽใๅŽฺZpfฮ.G] ฮRฬšb6T;7jqฅ;Ž6ฮ-3Bว€แหัก,งl4ถ[Ucุ๋yYซCภNsVMจชaตฒๆl%QริaํX›ฃmKอ dว’ำN:h;ํฒใุ@ฮํ8ำสดAฦ2ซ$c๋<ๅ,ŽฅvžœำTญi9ำฑ›๓ดCs่จ\ž‡ร&ƒชฬŽชณ์Rmij›g…ฉ2ป<ท9:Žฌาp2ไย:4;ณuฮแd:ภJชƒูmŽส1ษLi8::.ถaQต™jeฺ”#ณsะฺ%ึ็:ชฦvnjsคFF8˜ฉ˜F5œ:8‚9—ฒกฑๅ8Žํ\ยVœ‡ต.g:8็’Ÿ ธ้๙=ฬผz~๗ฉ๎ช๎ถ;'ฦ /‰`KฤnFbลเ[ฒEโmล $vˆI41™ ‰ํุํ~ซชฎช็พ8็!0ุal(6ตG-”1ทj‚ฑ;–SbZฎ5,ฆา ๎ฦด-ทาaธถฉฆช†ีฑอ&”sฐ!ฺX{vฮถE5Fuฮย์ฅsฌํnทZmjw๗4 W›c,6U#W›ํ4๑(ต;ฎš3ฅ5iำh;ธืŽEงŠู"ูU[2;ุถ์ชญ%รVsฐU”ปปำ %iหฝไp7ฺw2†M#e4[lNษqD†วค[`ว&ชfSuฺึŽถmููๅฎ'“œา๓v;›“ะ,แ0SH—Šm๔d“ตTํ๎๙ูyฌFฐ ภ๘๎พป/฿ฝ}8็๑้O?๛ำ?ษ฿—๙ฯ๛ฟฏ๛ฟ๘รภyz๕้Oำ๐ล๙่ฟ?~๙ฏ฿ะำ๙๘/๖ฯ~๘่๙ป๛๏ฟ฿่GŸล/ฯ=oอ_๓฿_Œ7๖‹๗‹_้Ÿ๖‹Ÿ}๚‹?{ี_ุ‡wo๛ao~x๎ํ{<๗๚๙ป่ฃง?๙่แอ๐แร๎๏€?~๓ฟ{๓ีท?๛ๅซŸโ#๕ภyy>็ษใc›oฟๆ‡วซฏpฟz๛อw๏๖ซ>๙'?๖ล—p~๙|๖'?ูท฿|๑›o~ื‹Wฏะ‡ท฿ฟ๙รวŸ์Gฟค๗ํวฟ๚ลg?ูG฿๕฿๚‹฿|œสึฌ`a[ช˜ —Gvท”# lญfถษ}Nw:ชก“ว0์>?[–:•ํ>ซu š)ืdv,Zh๎๋A@•l‡]™ Xz8›u[Dšv้:1ุVษ,ฦ!ร๊็4บศสฅ1อต ๗n ฆšYf–โดฒ็vถi;๗๎คณmI–ฤ7ณ)๑Xต@mŽฮแฦfสฝฮ‘KCๅŒs๏าฉ›จUZf •šฆฺDใั‹XXฮ้™คh;ฉlwL‹eืฆclถ{ฺ<6RˆภHีฒ;ีษnœ๎๓UubLšuNkŒ,็ZซFญญmmซ‚mkgžฒ$ e3&vv[;๎๓PุWuTG‡ณ–็็M%t2ซcปj…ฺฝN5ื&X;6ณภดyพคสvmg zศZํีูลคS‹ูVฉmะถฉ‡ฑšฉKTš[wŽ•Kำl ็lฌ‘ฉถ์™าieืF$์ป“ฺะา่ไฎŒัุUส4%ึฆ:1ถนฯN+wขp้ึqฆvgœSŽ]QซๅธVb€yดำ’ˆ้๔ภศฺา]œบ9GูN*ณmึM‡T6f‘]=™ๆh)๗^‚8%็ซhยะlS3.Yฮฬbสดณญm เ๎vŽ{uา\SฉA ]ฑ๖|vvll’็g%”ึQ9ทew3T์œcฆŒนWg5Žิ์บIF–`mkถ:s€จcป†™รูึฺ1ชฌmZงณfถdฃmS#ซ™(ีฆ๓ฐrฎา™ฑอถ)SM!rทไ”š›ญูู†จmNFฅ5ธiูUโศิใˆต้‹๛์คŒ\ZัโณํtสaŒSSŽK์ศi€›๕บ‹ฃ็ ํejจถอํF Šอp์ฺิฬ<ฎ#!<บ๗ขsิ‰-UŒ9˜4ร๊๑h.Y0สดcฆb๎žุ๋%Šป‡ฌอฺู๊m๊ฬv‡ำ.ป‚ชLzสkmซรฮษZชv๗|ณ๋”็=งxขY2โœPฬ~o~?๋/๗๛ีใ้๑๊—ฟ๘oี_‹_ูฟ—ว๓๘ฟ|๕็้Gฏ~๒ฒฮป฿๙อทภ9}๒๙ซฯžฮใอฏฟ๗๚=เฃว‹ฯ_่#ทท_|๕ ุฏฟ๎ร๏Ÿผ|๑ษgO\ภ`ฐmv7ฃ `w฿๐‡ผxq^่ วำหŸ|~ฮำ๙ลŸ๗ฟ๚ี›ž>๚“Ÿฝ|ีทO/O€~๚ูGฟ|z๑“ŸWอ๑็ู‡€๓๊ำ๚‰wž^พโฃ?๔ลห—ฝ๖๗_} 3pL6–`ฤŠป)งิvซำ1ุf๗hห %ฦ@šj`ฐถปs’ปfวฆjปCฑ-ํ•FRs‹›‘ต{Jgฦ9หu[uekืษŠŒ#m‰ฦšP๎mvN93ป8™dร*๊Œd›`-…laฃชbูฆb ฌ\ง,cPฐ{'jVอ6@›{=ส.k‰ˆปu(l-K3Jฬv็Œmฐมาั„ถ {ฮิjbulC•4์rฐ)า{ฯ9ตฑฑศ$๎ฦZ‰ึlD–ฉ็๋ธUฺ˜Uง€’t๏=+1@gนรlF‰–ป…บcฺฦ*9ฬVูlSย๎f6)œณ{ซcฐ๛|4จ ฌ4IอmปNิro™ีษ.VฬŽv/šฐN1อ6[evoTำง‡kV›ถรuฒขหอ)ถd วฺvปgอ‘์^S3ิฆชm]kQฑ{‹!ง™9ล `ึฑฅ02gwุมยัlราv—†;ds[5%ฎm่Pฦต,ัจ˜9meฃนwฑThญฉ๎2์ๆ ็žl ณฅ ี€Aงํ’ำvlนŽ๋`›E75c[ฃs[[ษปฉ :Bgฯฯtฤ0œว๎"flช\+„ฺlฺ†T0ถXFถ)aืฬSช ฦ์>'คฮ6Xฤ*ƒfm[tZž๏9fu`้๎นNŒ%ฆ1๖\˜aงVeิฃkMฆษฝ]กๆ mนฉ2ศู8fสฌmkวQuWฑiฐฺtภงsw—ภษฃุ–r:(3(ฒญ –ูdฮ๎Œc6jญmdปwGุ&„๋:'U6๎'—;ฺm๋dl5ตฺ&vราัดfbr5ฮN›RQู#lTm—Tฺฮู]nว4ุ๎–ตฐpงญ:&ซVY๎* ์ไ8จ™ ถMg›93ๆŽ:fิุี6„bƒัVฤXฒ]เmฬ9’ๅ8ฦฬ r”4าิ™ d๋y^H์ฮั,™mqŠqษฌ3ณ๕Tฦ6€4ข ฝ~๛๎~sเŸผ๚'๊—๙ไŸ‹Ÿ฿๚ซฟy €๔tžย๓๛๎0ฤ๙๘<โ๙~ธปœO็ฃp฿ุ`๗ร๓vำyv็N้้€NOzใ?~๋๗_๐p฿ผ›ฟ๊€ˆwฏ฿๖๏๐ื๗ปฏ฿๗W_>ธ็้ั9žŸ๏‡wฯf3เŽ BZKฌˆiื‰ถE,wm4Y@บ#YKSJš6H# ฐษ˜่T[c[ •ฦฬ˜j ฦv+FZ% ธcคQ6ตญa3‚-ฬ„ำฝcR6’4SvฬfŒtlŒจ5ข6F’-vššู‚!aปใิŒ ึ\r`v-ฬ`ฃŽAl6Aฐlกญ&1etbุ`Xำ1ถAw;Q# ถีฃb6V$› ‰ยUfZE1’m#fs†! -ŠŒv‰fนธ4ล6Y@‰jจตšฮbโฐฤf6ๅาฆ TuฺŒ!ฑษูศš$mซฉูV3“0MEs;วชY"\+‰)ltฐQLˆ˜๎fC3$›T&]‹ิฐe$ฐฺhemHตAมbk,ฉ%›€าุ&"pฬลๆาŒShถI™์€2ฑˆขmm ลฬิตจ0ฺ.—ึ 6›”l›FJbš5Mฅ˜„„รl `จึ2บc ูษ2 Sl[ƒฐฑUจถAkฮชษE…aƒภˆ2€1จฮูยุV(›vด…ฒUCุึ*™…††EฌYŒ5ฯชj คbl2๓L ฬภiw5pภ#Hf&1H’.šล™4ดrึ ขูŽอ"ุ”Xm7@0%ุ5"ำr"6hH"“™ฑคmู…ยปซIZJ!nLJ‚ดY1L˜ฉkM%ฃ!›]ต ณFชมุ$cดกี‰5ึF X`จ€$b๋^'šถX€60Œ0ีโฌ…ฅ)งblH#ส` ษSฦ›ปษมฦ`ธฏ_๚ื฿‹ŸวัวŸ์“?๙ฬ฿ผ…หpํํ๓๛็qžžNภ๗o>ผ^|๔๔๒ลใE >็7~ธ:OŸผ:G œฏ>:งฝ๛p฿ฟ{€๓๔๑‹ว‹'wฏ฿`๗>ฟ{z{๎฿แ๚?๛7๐ ภžŸ_ฟy๗ฐ๏฿<ฟw๏y๛๗๛้๚`ฯฯ๘อฝž?|ธ๗ูำ้ผx๐ 1ƒ™ไh„Q36&d6ิfwม,t็ดŠcb FFbฑป[†ฑœ]ืี*ก€+… `‘ฒ Kฬ†๋P่ฐจcdccษfจ”อ=ˆัยe†1ถMม ฦ*2U`a˜Eคv/2Šรˆ"[ 0ฃซh  ุFาอๆ)2ฑำHLpGL ‚j@`˜‘cดb@rbฦf‰B33XX€อrฐ whม”ู ] VถŠ f+@‚`˜ญ(ณ![ถ `ำ&j ฬ น†) ฐปQ6–ฬt g2ปำญ06ฉ!ศ˜Uณiบ่HŒ!)ฦFR๎ุlไ(c‡M\ึ€M’ูฬ †i‰ŒชFดมf‘*ฬุถ-อฑ9%dceˆฤฦด9ึ3af#CรH‡Lจล@pGL`ฺจุšeื™š"ฉุfkฦฦฤ`††คู6fณตกยV™aPh›ƒฎ…4Xก` [ฆhJf4ู2ร„๎0ฐlEa `†ยE‘-ภถM9ุธikZ-gฒ™นfZUQ#h††ฃ‘ญfฆAR“Vกูek3—Z'มeฤยŒูภเ'ื †ฑŠ Lฬfถป›ิ@1[ &6ฦึฑฆ™Y1สฬ KƒEรH้l‚ZlF$]lตQYถf132YGAŽpgS `lะฬ`hH˜ŒตYNcาขƒjwขf&(cฐ6โhดF›&dถฌŒป`–ญฆ dืLา`„"c‚;ไ`cฐถญฑ:๔ดYณbF0๐8Uย><ุvฮy๔ˆ เ>๘๎อ_ซ>ษ‹?9พy€S•ภ๎๖๛/฿—?๚ไŸ๔ใธ?|=ผ๐ๆ๋๏อ~๙“—ฟลห?ถ๏Ÿ€งŸ~๖O๒โๅำ๛/พ๛แ›ฏžy๔้ง?้ว?z๕๚๋พ๚;ุปฏฟ฿ผป๖๊วŸ>ž฿ฟ๛โทo๖ๅoพ๖๙๑โีใ‡/พ๛โ=}๛ทo๖ใ—/>้ซ‡wฯ(=ดด6@mvƒ‘ภVhอDl–hŒ,๎ฌ‘ข@m€ฆY# ศ0,baธ+Lญ U3Sฌฃฺ๎H9(w‹mลขXื †Mh&ƒ6 :hปคูฆH` [1ˆ%iV†U†``afP&0ฃ„Uฦ6$ lฌš`ึRณsI›ฺุ่„ Laf#1ปsฆร… `Rณ ร„ั ำกด๙ ‚“็q๐ฤฎหฯ๋๓u๗t๗tฯา={O&ษL $!!,Š,โRjY^ฌ๒โษ*O1–=Y`yะ…byPˆ&$2 Yfห๔์=ฝMo฿ฯ็Y˜UpW-L™MAดนCํNf‘.  ถm•ภL)ุ ส†‚ ฺ่ b›ศฦF„ะ#bลฤŒฬh…`มฦอฤย †-cฒŽล%™™ดqTถYส™ib†…f`ƒธ(4mยฆภf:ิถ•{๋hm0…‘,blฤ๋nฅ ”eC`idfAฃlŒชณ lMปุ๋ษd k๎&ศavฏ‡cุ–š•ภ,Rษbฒ3˜สlคถZเณวป'งัดดฅ !e :ฬ!ธ๋!ฺFHe4@ล6Yฐปส’lŒHห0ˆ @#f€‘`‰ f€aึ1lPงอ‘ภ,8 ุd7šษ&จMฤLศุ œุ\ŽŠน$ษBL1ฺvr YŒ Aกกu7ณฉfi Mš23คQšMZK‹‰•ญqศฎ(„Yชm”)fภ ำุใฤภฤอšAe”ภศถT X "ภ,›ŒQฌฒญ’mใถ9‰1ขฅ-&ฃ$ ปฦ9ฬu"อ”ถ14ฑ9ฅ™Y“UfUู˜%0+fึŠa p,k6+็(6ฅ–๓ก?3_๚ศง^~ๆษ{๏ฝ๑ฺ฿}ุ๏พ๕}|ๆ้^x๚๙็p>๔ฬK๚Cฯ๗๑w^ฃ?~๑ฝ'Ÿ{ๅร/๒™ง้™Ÿไวžz๚™€=wฟ๙ฝ฿๙๛๏์ู/~๑ฃ?๙ล็ž}*€ž|่™—_~๎c}๚i๏ฝ๑ฦ๐฿ผปวง๊Oพนฯ<๗์ำิSO?๓๙_ฤOพฬำ๏พ๙อW฿x๕;pž{๎้็ž8=y๊“?าOฝ๒‹็๏}๏๕?๚๚#w฿๙ฝรฝy?๖/}้‹ิ‹Oะร3O๔ใฯ}cฯ<๛ภ๎ๆœsNช๓๔Si?๘ๆพ๙ญท฿z๚ูฯ~แๅ_๘ย‡ž}B€ฮ๙ะsz้ฯ์รGx๏›฿y๛ตพฬ ๚ิ็?๖‰็เ<๕ิว?๖์ว_~:4Acฺ์nWSU๋Lป6ƒ fซีึ3คค‡้u‡ษฒฺšฦถห”‡Œอตมฌ๖pœQซญฮ๑ 0ฎmw.็tr่8งชำ)ู2ทู๊r็ถFๆ่ค8–;6c[ำl3Yอ๎vYง:L้X5 ˜ออluาrmcรvึ™(jก9ี!@<์บทหRŠ#Klhจ*ฅC`6›m išŠV๋!‡T”๊ค0ญ$‡ŽsศฦR{ฐhัฦ–™m.˜-+s7†ฑml&QQชาฉR4ฦw๏๎6ซา–kZ[)ƒสดภfCษ,ฮtk…n-K9ำภบLd6ซRj5‰0ูฎ&ีY้lํ2šจmซีึn[Cส9ชi3HฒCฒถฦถ1ฐNม˜1ถตใT˜•ZvV:ืถห•œN;งช๊„อ]ฝณๅ๊ฮํถ@s8้กwl6ตูilปป3ง J‡ๆnถภf™ญJซห`ุZgดLœt*„8ปํณZqdMฐ%ชN:๋ำlm๎fาœญญช”GGE)‡ขi%9œฃฃปฉ๎!™ษfรถม–6หf`ถD•*jB‰ฦw๏๎6+ี–‹ฦš$T0fฬ†“ำฮtต’&Y“3M˜ฑฐ‚ๆฌ&bLw๎ถปMิรJMถ1ถฒญV›ญ=FR๋จ8wmˆ*ูฑdmอ์Ju8—ฑฦ6[ว9JgYซŽฃ‡e\.WtN[ํไTีQถmmํmห๊ฮmท-’yPํOWฤฟ๛Ky้c๙ุใ{o~๛gฏว>๙น?๑ู_y๓ื_๊๏ผ๚ฝฮy๒๒—^๙ซๆนผ๚/ฃ?ึo|๙พ๚Wพ๔๙O‰Ÿ๘๋๗|เ฿{œ๓ไฉ~๚3ู฿๘๔'_zxํwฟ๗ฏฟ๒ƒฏ˜ภS™Ÿ}๙kๆ|๙๕wW=๒'ึ_์ฯ~๑นพ๛ํ?๘๏ผ฿๕ฏ}๙ฯพ๘ซ๊ๅฟ๘ซ๏ฟ๗ฮ๛่Ÿพ๖ฃ๗6๓๐‘Ÿ๘ไŸ๛ล๐๘oฯ฿/?๙าหฟ๔ซโง~๎•Wฟ๙ฦ๐ว?๒Sฏ<๛“wลฏฝ๕ีฮ7^}็›_ส๘สsี฿๘/๕?ล_๚ฉoฟ๚ึko๙ะ‡Ÿ์Oฟ๐๑gฯ฿ฺ}ใท๓ญG๚๘—>๗Ÿ~๚ฮ๐ฦซ฿~แูg_๙ย‹{๑ฉ‡ท๘k_ฦ๓m€๗฿ึ๚๗>S?K?๗ส๒_ผWฦ›ฏพ๚๎[ฮ‹แ•O}่๙gฟ๑_๛|๛wพ๓ฟ๕7฿ัG?๓ำŸ[้G~ๅ/ฝ{^x…^ป฿๒ผ[ฟ๑๕?๔‡“Ÿ๘า/๛'๒๋๔ฦo<๖์3/๊๙—^zฦ;o}ฅoฟ๘/~๚{ฏ~๛ืใ…Oพ๒แ_๙™OG๕‹๊+฿ๆ๎3ฯ๘ Ÿv๗๑ฏพ๖๚ฯพเฺฌŠฺ\ฮ}ผD›แ‰8่l ๑u๎™)isฑด,ตฦบvฮษ`จน= ุR9่^mL๊มpฎ•fา9]*mํLfSฉš]s—ฃ“5์ษaฉลๆ %f๎สดด๕xo‡Xต๋ฑำ–9•]Wt๗ŠsLq'#๊ฐํๆ4ebIc;Žต์p๏Tฺาศx\ฮ=Chแƒvxภ๎–ถxุ.ซ=ฺƒNfhิ˜ฒq7œu:ฮ:๎=ฮ•™ษvฮ6m=ฬL5๗v[วQ{œฆ&ิูšตฺV3mะ์ถร‰ฮๆ^ณ#gH{Xป้บ,gีษ๎Š ฐSซ๋๎^๗ฌ=nG%Jซ#ณhSณงงe{:l=r๖xฅ™8ฺH”emอ9gืฐดาดฬ=—“2ฉ3ี!llVว-ดk:O fะถn็ป้ฑ:ฺ์ŽaฦTีlถญ•s.vฃt๓ค%ถูl๊ฆญป1Yต๋V;ฌSiฏ่ {วงฃm7uร9ฆ]`ึX9ํn*q6ฺ–ปด6Pซึย#ฺน๗ฆ-ฺfส8ํัR]:บL;ณๆกg1 ็“ํด อsOgjเธns<ศ๎Œรyxฮ9ึตฐš์!ฮูตk&GZkซccใzฬYงรlรQ์dŽ”ด๖วณvaง:šrฏR3ปGฺ๊QšกฺrZถอhง‡lฎ์ถ"ณqtเ ‡เ0ญ{[sฮู5@ฺฺู1k—Rฬ:\I›ๅ8ฤŒ6ปbcvฒํึ‘™วฺ้เ^ยaปทN5›mš:9็BgRs4bถ๋๑๊ดu7ถH™Xc+ง๊h๎์’Nw=^(f,mงฃm ซ9วดห6[{ุ™๎D9\ะน ตŠหc{B๎ฝ œmNg๎i–Zn๋hLณf<ฺ™S•`๒ฐ™ษ&ง ื์ิ๖ำ๑่69ี–;gž๘_๛สส_ิฯ|แล?^x็อ๗~๔ƒ7๙๑๛ฯWŸ•ฟ๐ส_๘ๅ'v฿y๓{๏๏๖›๙๓๖?๖™Oฟ๔๒'?x๓๕wพ๙ว฿๚;kฺgหฟ๙สฯ~€๛๎kฟ๖๗~๛๕o|๖ฯนOฬžษ/>๗๗พ๗ใwพ๓๛฿๘๒o|๋_ึ๗ฟ๙ฝky๗ญ฿๚__ฏนO์?ษŸ~๙ณว}เํ๏W฿๚G๔ๆ|ƒ๐ื๐ƒ?๖ๆๅ/ย3ฯ=ใ฿ฃ๙๕๏ืฟ๙ฟ๙ƒoฝฑฐํต฿๊๔w_ฬŸ๙ล—~โS/ฬ'?โƒ๛ใ7฿๎ฝ๚๕ทใทพ๛๛_˜}็ท๐|ฬป}๚‹Ÿะ'>๛๐ฺ๗฿๘อ๑ํฏเƒ๗7_๒ฏ๎ฟ๕ฺ/๊g๑็_๔Oผ๔๙‡ฝม๛oฝ๖ึ๏ฟ฿๘ํ๙ญ๖?|๓วa๏๕ทพ๖ฟฝ๗W~๙็_ยŸฬg฿~ผ๑__Wo?๛โ'/ผ๐qจa6œำ้aณQู2˜5์ตf๋L'ูfณล๎jƒยํ6]ก -%ฅ๚`‰ดVทLี๎cตBv‡ฮ6S™†›ุ้ย„sฌฺฬ ญs6ถ0HบvdNฦถคG7M0’:3กฤfS‘ดบปวฎ lืN็ mv—Vต, งฬถsฺlƒ4ว‘ปaฆฬๆt‡ูอฝŽปำแšHป›๎]wฉตร›Nภ†ปP ็ถV5ปiด“-ใึ‘ั๎6šlเึCอ6Tฺจ์RN้ดHยt์ขณa7)ุ†ฤฝทช Žุถ“ุึv•คlศl ถ™p&ถYฌษถตeุคฺ˜ูTะ๎–ˆ:ŽY๏ฯแฅšE@ญNcะ’a9ี@ณี1ูุึ”i`ญP@ุ’ๆณ7Uณ)jtท#]Cรd$›คœš1ศq๎n2์ntSU6ฬทช๕`3kNวถญยฬศถTmุ;›ใ.*ฺฒ‹ซโย2%m6,แdw]Y™Œjถ้0vท3Šถv4ล์vฯ\ตาฺŒ*ฃอ xฐูดจ6ณ]Um$อฦIว)C บปสb7ป๗ˆ`swรMชฺถRวfำ๎U‚`ยถˆูdฮ–ถ‘ ’#ดm #lb€ู6ุL์:็ฆฆ‰GฑCณฮอakS๋dw็ณfซฐลb6่ฎฌ6ธญP lw•Vlซู!0b็ด-1๎quFVm““ลL9ฮถh[vท-]Šคํฺใqศh๗”ู”ฒน๗vlKด˜แ์.p ษรไr๎Œ[กุลศtr„ฦูร\ิถรฆM”ŽF ตto๗ฬU+eทMe„;@็‰;ฆKฑ{W‡aชูๆ$’hฅตฉa่<ุอvEฑว{รvกีtฺตก’wฮ๎Dฒ›ฬฬถp6bXGถษl“vWlbh6ถซ‚ฬ'ญN‹;wR`tฉY้๑ƒ†jy05‹M~ํk่ฟ๕ฟ ่แรฯ=๗สฯฟ๘b์>w~›ฏ๘๛?x๗ญท/8ฯ๔น—_~๖ฃ/>๕๔๗ƒ๛๎฿}ํ{o}๗ญ‡—?๑'>~zํอฏแ๏A๙ฬว>‰=}฿๖w^ใ๏ผ?๊™ฯฬG_๚ศร;?|ใี?~๓‡o๐ไ้—_~๎ฅ?๓ณOฮ๏๑อ7W฿~๓<๗“Ÿ}๎ลg>๘๎w๘ฦซ๏ เ'~v01๒Žž฿๛ู39Žƒ”&Hญฤ!ฎ‚wยถํuT•บ*lXฐฉzจ‹.*Bว๖ุžxfพ๗้9@Ÿ|๒มgŸ๘้'๏ฟ|dฯท๏พ๎_๕ทฏฝy7€^~แฯ?เง?}๑มห๓8v๏›ืoฟรฟ?x/เผx|๘๒“O^๎๋_๓oฟ{ภ๑โณ?๙้ใอ7ฏ~›๏พz5€>ลgฟ๚โƒ๗Ÿฏ๛ปWฟ[็|๔๕ท๓gฟ๘โป๗ำWo>๛เ'?ห๓ํ๓ปo_๙ๅ฿พz๗๖่ณ_~หŸฟ|ผแ7ฟ}๕o_พะ๛๏๚้‡Ÿ}๖มO>zผxดฝ}๓๎๕oพ๙ร๋?|๛ๆo@O>๘Oฟ๘๐“ง๘๚อ7_}›๑อ —/_|๚ลGŸ์ล‡/ณ๛ผo~|๗ซ?~๕๋ฏพy€๓ม‡/๖'g/>xq๛ๆoพ๊‡/ฟ~พ๗โร๐Ÿ>~๙๎๛๓/ฏ~๘๑^ภ฿๕฿๊งฟ (ถ-ปP‰9vIฤm- wael˜ธงlฒMตl"&iฃธ†jy˜†žซฃiดฉซฐŠ4kณ‘r6™MึศjSlฐตdqฒ2˜lฉ๎(ุึylKŒ&ž{& :‰I‰ าl–0Lgv-ถ fซ“Mƒ›ๅจซCsYCJปwฉฬ5:อ EB!bv1yิuOUต=+ เชQัฐ;K†˜˜mอ:ใค˜ฃสฝUI๛LuXs็Z’bCm๊ฐนๆtฒ+ุfD(รฒ9lI'ป6uOl—ฅูึฉlh‰0Šm›Jฬaค๋Rไ3ฌŒa๗tˆ™!Tห&สFตุH๒œc!#ถ‚%ล•ญd“ล`‰1›ัรน“ูdฌ6ลึฅณฆ0ู่ดปAมถ:ฉ0จอt๊awCก์ช4›Q`ใฌ]Kัvaถœฺ˜ธYญข]:ฮ6&ดํ๊ัศดูqLQ€ป๗๎“S›ัะฮcwNaภด9๗: r˜ฎฺN=G•ฬL‚ฆYฦีฬmปieใฺbŒ$]lุ๊ฬX  l<ฒ cnไm`คbwJbฦึ๊h›ๆFRณg6‹„{oE™`‰อ“nvR64ณหA(ุ†ฑ ;’˜ƒฺ6ชv'Aฃc๗6+ ุะ`!Akfœ5ะ&+ฎ…ฒ1eŒฉ$dๆFฃ{' …l†น9u03-ฌ!aA ร4ฃ(342ฎfาคฮl{Vdฉธwหะถ็ใ๑ุ ‚aฺtW[‘ร4LฌmทฮัุItษeƒŠปํ&’ฺฆkภี@l—u6ืXCC` cw๎qbะ˜eTัl[โB%w“#ฎenC๊0้n0P5 ะ˜Eย๎U”A† ”0”ณถญœlใฐmTฐ bd7hช ฃŒญะฌฺfซwทฮ{Œฺ&วถ*,h„-Š-&-V0ย([˜lw›Ž!Cล` BŒภš™ขศ6b,%ู4Ž ฎU‡&[ฤถF2l6ฅ€MFG\R H4l „ภ†ั”UBฑ]l1’Mฒ˜ู\ะi‹‰4f™ J6TŒย6ฎธ๎v“blŽมfิ!bถ(!ƒmะA(rd›6ฌำฦ†ฐl ่dƒpฺ„ฦ™ณ ุถ‘ถ*‡ …4baqํ` ย3บ1ม(‚ม„ภ&DH#ะau๏ด„ฬ†L)ฒ•bˆAlปWวภBeั–qะŒภ 5ecš1"(สต[qL&›]ข2l6%„ Kal:fอ`›LถำVœQšmณHP[ฬยZฤ\›ขkษb ะL$ุBษ@ฤ$ถํjดmฌ›Y–T" !3 SEถ-+›ย2k “‘fl™sฌะ@€อ1Tสฌ…ด@Xูึ‰ัXต1bฤ`ky 2HH™ฃฑmI9l–ู2#ศ3˜lsฏฒf…ิฦฦิูŠฒ•อส@i†‰’มRd+› „ฒอFค"˜ฉข ่tmcณDh16ŒถีDAb์า$ฐ“c4wฆ่nษะบ„0 Qฒ”3:fปkieyh3jmฦFQb€Jฺ˜5‰tB“)4ต)i5Nfl(LภาlHฌศH–4CXู†ดAšctsๆ`“››“€หd$‚1c.่8ณ& ฺ2X อญ˜อะ€%กฬ6ฆ‹@ู„fไ๒ˆKm Fยฺตt’ญ8)P#Y„PƒกCชˆBคTยšดฌ™ญฑmŒAjณ(4fC อดReƒVศp ำฦฦFชฦtea ช`ฌFSค-ภถญุ6˜1€ภภl6 "@l "’ฺ@ JรฦfS[J0†ษ €U4ัIฑ*ภNKฑvl˜ฅสุ lj‘ lD†”š]„4ถA`ฃิ`ฐBBcภPแฎ` ยถปฅ`A—M„ิIถ`6ณ *lh[$e…&RฉYฤขู6›;3†Um7ฦfฃะ f(ฬก›V0 h0 5 ฤHใ2Šฐชc5*Kั@ุ6ุ ฦอ€lpา€อ†ูl@ะlM$FdUb6รุlŒตhƒึ ฦฆ:gV ฑ–ดRธc+ษ"FดmSŠด1‰1 ฐ5 U“ %…PSj`ิม ccยถ2Œ u’อl6—ฦPฑกm0แHTj†ภk6ณอŒa๊lหฦ0j๊†อ•  ฑ-คLRbต’@ช–ฑบ„ฃจฦณฐ0`HภŒlFภ.0ถุฬด ดŠฌ"ณอ&[ฒB `ร A 06้œYŠ€-ิฌVป&ml m3 €ํ$ั`P›ปก“ิlEaำ€ฆ1ฤฑ…(šศ4๊ฤfยุ "fฦ@ Ueู†l6ƒตู`รู8(ภNิb ตC3›อf\ณU˜ลฦฌ&ะฦ8M(›จ…ฒh…›F‰š`รfl ๖๖อ๏{{๊๛็0c-ะศF*หถ6ูฮ(]f'อ˜5TŠ]Œ้ฐf,5ื!ีุึl;ตmk”h๑ดฐmาc“]@N็ถ]=Žลf•๗œuwว๎ฆๅค12.%›j•=7@คQub!˜Kจ$•™*.#k–2ื‰ ฑYUmDถฆ*ถ2lฺะดvฌ%ถ•vDiQmถซ๋ถs†นS†2Ža8ZหุฤIgฃ›ฮ ป›TAณ032ฆฆู’9Aฆณง“ #Ž„๎}Zณvซอ˜%ณฃฒฑูๅฎcงมฐฉ\˜™k'ณถLีฑุZถ\ฺH0CƒtษŒ:์ุดi[S kฅ จjฆ„M!ญcfhฎC w+ณส6VMissทZ็ะฬ†Qงf:‘™UZm›kฌ่™a ุฒ๊žธ›Sฃ8'รAf@Iิฑ;U6ปสšฅฌมฃคmวf•Xฃฺš้ T†mร„ุดข๒ุvทbG–’UๆnบอคณอศPŒ†ํฮIK.‹9œณลขฒญ3ฯm7Qa:๎ึ5ภฺถสลช*้์ aณ8lฝu4;ม`6kง\Kaถฑ๎ฝ็ัRด"w2K+œำู5ฬ$lนชŽล0ร-Qฐ‘Y้ยF-ฃ]ูขฉtญ fจŒ–โZe ิ\Ÿ 8Fขmหช+˜cG8U l<๚ชP !่D™’๓๏žส$ฆ)Vฝฝ™ Z‚‰›m๏=8ีฐgvršูษY๓*Ÿf^•0,ฎ๐WlœA {‹GN3ชบุ!3wซ™6f7Kตฦ๚Žjฟm:bmชญ๊ ณ ผu7ซ5uu๎้MฑHœฌ2ณy่ๅป™G†[ูŒHหbŽŽถIํฝŽ7ลำgณอHฦ[แฉb)1ฆถš’=ร'มๆ๗๛๋๎žE6Vฝwš2Xลฐ๖Vsม้”‡ู”L฿u{˜หถฮSสฎa3ถผ-ก`–Y1=ฺLฬฺhšขT3f† "๑ฐ0ƒbต^YVฝSิ(ก)lุŒ9ีdฯ๘๚๓พึRUoฏl’T๖Ÿ๚?๛ฟม6`pvผe‹$ี=ผบT›แไ๗๖%ฉจ์ฝงwwษ]ฺ๏ีถBDfฝd‘ccอ5+a๙บ๎ํ9{จา~฿ตฎฑอ6็ึo-‡ฤnfuo4วดง›F%๋† ฏพRZถัื2E›ๅmวR mlว˜ด๐80 ฐ^U0fbHpฺl;{;=ฅรwฝm[‹ฒผึxำTหขF๛VSŒี๛0ไž๒*c๐ปถ1ี—ฟ๒งeVร^]†Joตญ‘Yฟดkcxฎ฿๖ต–๊ฎz{e“dm~W๋ป7I-ถํy{re4€m@om ฺF[ปฑ=:˜อ4ฅ๚๓๑ผG๋4ตฎ— ๛mใ์ำw0์ฦ6tอT๋น™ีdณท๕๖!wูถต>Ÿ฿+cCลึr6›5[๋ซ๕(Aฬ๖sทX2/ฉ[[k†fฎg\t6๗ฏŽf๖ศบMี์7ฦVvŠกMฌyP›RถฉํTถ7S๎1x—ๅ )a๖vฃํ6uwฬlฤZjฉํๅ)ฅถ๖[˜-œ๊{ฏขอ~›Ÿw๛๓ีฑม*๏ฝvจิ๑šถ=$ื๖ฃฉƒ:†7CŽ–=]๊ํEำ๚"}ฝ๖อle mjซฬ0j3ห6ฏTf‘๖Kคฌ™]ทีฬ&ŒPmoำZ‘ค|5ฺึRLซ–ศ`™N6ฉอๆ`h[Tผฝฆ0๋™ุF!ƒฅ6Zช5†Q…mบYลˆีlห๊`Cฉaฑ๖ป๛ำจ€1ฏภ€ฤT้ถIi[Œ[/rฏตA‰mซƒw+ํ03ยS›ป6`EูXฌ6ฌชฉMŒmส ดฅ๒จgIc ฤบน`S˜ดDX,Z`‹Eคฺ›้ชl๋ฝ]SสุP7หž*ห–ภbš"ษฌKSฝ ถyreญ-#eอถjซ ฯh–ฆoี[ฃ&ฎาW–ภŒศหฎŠQ'๏นaภ๐ภkถฒ๕6ุหมƒย6u/ฌL- ฃ 3์f,] V1{+ถ"Z๐๋ฎB1“Lแ ้๒–า64 0ึnฅaƒR0`4ž๗ญPšุžฺT1fEูิ˜U…ู™ ฦ6ฑDซ ˜ฉg‰16Š€คกQ2aายd๒R*6น2ฺ๓elฃšโ]ูฒQdAo–รา”7ˆmำปพxฐฬ–ฤšQhำ๓6‚`oN5oญณ'๕UBmk[Z2ฺขeข ]๙=‘รfฤีถอ*Oอšaุf–lŒต์^ฬ6ณ›ี,ช S1X*ถE‘ oOี5*Š*˜)ร›rlb1ดf›m†V[Lš™•Vป™”fŒ˜ญืพYlฤ๖ จ์5)ซYฯIถGํฯ฿๑ฏ๘็๛'\๗๏๖๏๚˜EZe๓6uุ–ๆy9gร๓0qีฬฌ ’%l ญฬHc˜^฿ู@*šอVaซ.ณฑ2ฬฆาจ&[นl5$jŒฐอ ฉko›Mณญอฌ1•fณฌ๒RŒถฤjs3lAส@อผ@ฬV@–%อ@aณ™PUh@fc-H๑ึศถuดŒQKc#ษfŠ` 6]C‹A5[ู์้„šู‚ฉy4™ิ#ฝ (b3S•aL ฬXeดfฌม 3[cื‡XJk๓rฺ๋m&u%˜ก™ญฆPต23’มzืm#‚!dk{ป‚ฑฏฐmูbภ้Qัฬ–ัe 6„Rำ lณํญ[ีุถQลถ-Tฌ ซl•ดm(-ช`!6,1L†l จ%ษฦยศk–A…f“PฦN0…yขBๅษึcขึยนั6ƒ [ื„1ูD1hDษ H–lcFL๖”$lดe+ฺ$F๕c”Mถ4ฃสุf fธR ๐* a†™‰ฌ!‘ํNjราฒอ[%gญทม”‚จภห ##6ณXณM™XOศ&†@ฦ6„ฺฺ&y๚0ึ&›; E3[†D`&”ฤdถทท.uc&*ล\,Lํฤด+โE™M†ษ€กุ4‰l<ฺ[ูm •4€‡ฺสxWd)ถูŒชTว รr๘`PiyS 1g•ec-%3D2j“ทสXูƒJุh ™ฑImUซA๋1ฺ6cš%elฃ 3\)›W ยXlhJCJ{อ–*3›•4lฎต6ฌrsผSล@F‘ญ!ตู”‰yNฒฝ406A็DŠถ,‚,_ภŸ๛๓Ÿ๋?๖ 3Tf€@ฐ "lรL2hdpJข,ฒ B6ม[สl, ฦ€€€€ˆ Q-า‘ฤ&ฐถ ฅ&H@T"ุุ6ภฦ€mฆYD‘ฑก3•0mCd›Z"ƒตK0ฃ T˜ฐทL๔ ‚ท• ิฒ ฃใzพY3[‰‘ณ6รย‚<ท:กN1=hEaHก{7ภ`ณaณmรƒa$„m`" c@4az†D  สยผm;Bณฦศณฬ  ฦฦ$ 0ƒ4fะh€ษl,kS˜R’ลข4 (03˜Œ ™ ’06ำ@J€ภฺย6ฦH$Dศ@l€ต…Aj  T 6F ฺฦd3อ$ค@`MeฝRLำ˜1dAด+ล `X“… "0› รlc2ะ ถ-cภPl36pภxŒ,2Qฦ˜0Ja 0Fญm€ `ฬฦ@@รถฐ"ฬfCC HฤŒอLๅ& † `ษlL`‚iษ0€@[0 0RJl› @["8H‰ศJ… ˆllขlถ…LถVlMี๎mƒlณYd(d746ำ*  ›7ฌ0ˆึ†a6…06lFีเU2 Xฬฐ€1fT€l &ณ- ำ4ีึณŠŒอฐ€0ฆมฒฑ56ภHa6[ำa‰`Œิถํ)'(ฺฒ€m๘FณH[F€~๐ฝ๘‡?ลโ7๏7@๕๛ฟ๓๛?๎งฟ๗อ๗ž–V‘-&ถai&์Y+[จ ภŠ6PฯŠX&–Yภฺaž ตVถ!lsˆXšถ-บfผสจํูสข3H*f.`ฦฆ&รฐŒX๖ถ‘0ฮA€-Lล#ๆ=“Jลdบ` ู93ซ…ๆ B-fSณQ%ำˆe›" ฝG’Xถ}2˜ดm%-ถฒGึi€6Z™4ซ`ฎ,@ l,ฅ™P…-ด-‹ Xšฒy{๊ˆ‹h6‹ส˜ J@šmlI)`e>ูfoฒj@ูาHRถฝJศ่ดg!€qอ3„%+Fx›‹"H6kฃ(2oฃmณe‹ฒPˆ c;„›šFllQb ฬ6ฦนภฺุ@ 0/ัSTb๋ญา2อRฦfs  ‰IR‹ฬ6eจZอf€๓Iฌm.@ศ$JŠe–b] `ยKค u lกm–‚^kUสLmŽ% V0oษEึ>m‚fณL,ึˆRทj†m•ภส\ถmซ6A6‚`[ฑMำI/3T `1ˆ%†so•jkm0Bช6 ูšูฒAส า 0\xฐฉ˜ฑู๖™‚ู++ ๖Fด^ข€ตอ 2^ฅไฺุ  ›E&ึ ภฤ@ีภ†ู&fHU5‹e6Mƒl๖Vฉึถ]d“ฒmยbd0ฬR,noภ`ณ–H4@ตf"hอRลฺ4๕ธA(ซัl–ซฺk ช1/K.[า Š ฏฬeร์qซ6ฎภŠ’bุฌ&iหจะb‚oCSชJึๆOพ๗ƒฟ๙ั฿~๗G฿]|๛๙๖/๘/~๚฿}๛อ๗ภ‹ยl8ฑE@…AUUฐ66JA๋ดฎeŒiช–‰!JU%cฬpSชJึุ)›ใม^Fฦ์EQ-c"‘Jฅ Xcƒi(™ญฉZmฃZQ"ณึ6M)ujmm{PญYึžlห 6›mM4V Y ฉๅูŒ™ญญVไฦ‚!kห บ2fณmtRดm†ˆyooฉcตlEิbcณฎ2ร4Vo ง(™eW'&]อถอ;สUU,ณm^ฝ,ซฐhซIฦ›M2ณBcs๔†ู0–dคTJถถิf3ึดกDสฺ<VU )Cซ+ฆ4ฦHŸซl3ฦฒ๖”ฝ(6Œูฒ$dุ๖@าึ‘AU%˜อcSEา)Qฯ–iขjLHJ—’MkoฌR๊ชeูRี "ใญ๗0ฃฦf›(=ัคVิิ"มlฆ™-ฤP+r˜1Yดต”bc{ถ”ฌhู[J`ฌ–ญ i f6q€i5ฌž6-‹Zfq:A๓)l›‡UwUฦผฝzต ณ•‰ผy“ฬ6”i[๊ฑู„AFชJAฆlmญฐkปฝQ)ชยXXช“•@ิ4อX@ฅXˆ&`คฯu566kk1€ฑˆ`+"ถmรI[P˜ชŠ [›m*Hญคzถ6Mจ–i›…RUecผ Mฉ*Y›อRี€ฯ?ร?ภ—อฆ ‹๏๏~๗พฯ๑ฯฟoฎ๛๓?๚๓ฟซฟ๎tu:…*็2dvrlCUfิ๓ถta[ูบŠX•†ั +ุนทW ณง๊:0ฏ๛ฆฝูP•ฌ๒,-O$ฃ… กŠถ9V–Mvฆฺถฐ,SิิๅŠึุ†•Faฅฦ:๎LีUWuํ(ฺRฺฑดู์ฉ์ฺJ{ผvgฎโสฬTŸžU †โ‰ฉ.<ซZkช๖ž;rUu๎ฌ,ํฝ๎าต6›(บkฌะว}ฎgิฒส•B-จgŸ๊:าžง\5B(๑†Nธ6ฺไ*%e6(ณ›!Šถ-VUญS๊บํแบ:ล๊rkrf'ฑอTี<ู๊fB{๋l]ฒฒฉ4ฺk%^ฮ&กfS]ชf}u฿4๓Uฒส3Rซ้๒ฐ Jดญ ฮmืคูl–e ีUZgภ6–ๅ6 ำ ฏc+Uี•บ๖ต—า๎"˜m›ั:vญฑTณท™ซˆb[๕นQ‘กxbc่Šgช% ณชmโ.Tw.ึbีซปบ„7MQ…Uๅนำ]ิ2WŠJFฆzฆแ๊ชฦa&J2ีพ–ยฦึ็๒\ัHmUาต={M[ecldIVVฅึ)UทmV]งcUฎ™œY*ฐฝิ5Cหl‹ฺ[QฉŠ•Mั€MฏWbถbฉ˜m่tzฝบ˜*ฌ2fด…ตฬ•"hdช™เr]7ปO˜อ”“ฉผ*๓๕|๎<ชอศUา5๓žMhตUถฦFcี๊ฌS*ฅํฅปำAUuสL2ฒJผฝS—ภ2o;j]ลสV!†MฏWB`ลR้tฬ*.›M–เyน—ง๚็?€ ณ5‚ž฿้›?ƒู่?็฿›_ฟ=ภท฿|๛“?ษฯ๒g?๙แ_>uืุ:ณmชiีซ, $ลณBถํํํบ๖บฆ‘Tž•4ขoDU๒\ูึ€งะฐผ]”;ษบ†”ฐmwŸฦชvyoผึU—์ี1TJm™ปLOฆฉฎถช")ฯ.Iak๋ห“ ๅ"จค(bZ๒jQัNบ7•นป๎๔ถAเฉG 34YMฆqฅtฅc›ป U†˜’*ซู]iฝฝ‡งช6U' •-V…k๋sHFxตซHีถฝGบ„กL[มนผก*Hฃmž$a_~[งZ†EkY๖Vuฅ=โBRวต๕ถ^w๎ฆท-ัYฉiี*iZŠ!ีุปjœี&ฉ<+iลnฤVˆะฆJ›lผjšF,[(—ฬ๎Nฤp้ํญ.m›ๅ8์ฒอ^\wW็ฝ HีjหTาฃIWVUi๕ถrbZlm=Jล•HUอดd™$zWcstm๎๊’ผภ“ตj˜กษŠl=ฎ'ตึ]๊R1%Reํํฎดถ}y]๗)U ดnsฆจY[ ™ VซR๊nืDU กL„,‘ฒ™ฎซyนัฦx๊2{พDUษิkท,›ถฎ๋๖l‹(ชโฺzใu}ิ–$๒l[ชI™$์4 1คšถmKฎ7Ieฐยอ>#ณLูMํฉZงท' ูฑLvต;ืฌ "ฑูUVw2o/qดŠmหnwWว‹ tตฺB*ๆ^zRUYuๆ๊ตธฐตckk‹าแN0RR*ฆฅฦด”QGฦ›ฃksUu๑ตG`ฒ0ฌk˜šผ๒ึH•Nีbฃ๎ยตPคสฺ[Tูผฝฏงป จ:อkY…jfซ*I&ˆฑr]ึ}6ณ้ช@ฒีสKคฦ๎๎ฎy Ve&W`6l5ฺฎmช‡DF[mfOรƒฮ,7ภ<›>…lจญL[Œ,Wถ ฐa}>ทฝfL˜g๎n[`…€(ณญ;{›R†ฝ๏ลFฒญyQ‡๗ฦซKŒ4_k๊d[!…6š/qฤฦั๕c*7c4šะถU[ต™ฅb{ใFc&^ฤ+ดย6ะ &ีfช20oปถ“mŒฎo53*ณ๊g{›\าw“Y๐qฯ์›{ฬโjc$†…๕ฌ๛dุ6ฃVM6$ ฃ1ไถ`˜ป03Tฬlบ˜ฝมGL๖ธะf[W0ร6ึตwท=ี%ฃญ ูF๓ไ„งO๓ f๖]Ÿ+ฒqถcตƒ๒^]ู†ม†ul4ณUฤbต`›jVa[)c๋Swณฎ๋›0kฆช๛๎ูซKkทึ|ตีฉmจ ฺˆ7ใ86ข`๏นrƒa˜CถWmek†ฺ๋{ณ\ฬ4O,‹iIƒmอL*lจbšทiDตQลlXืฬจฬ6ซ{Dุธ“ถ4fg|แ™๙\ 0™๕จ ถอใ่ŒภฐFcHฃš™ร”jfจfอฆ๖๘ฌš์Qาf[U˜aุ๋์]ƒQฃสฃ-d ๊ํัŒUว—พบึรพ๋ช ํญ>2™ ƒฒ—” oXWmอƒf,)[F+Ly{u๓Œยถ\ฐUๅฝ๗ๆ๚ฝ7 kท๊69‡ฒต3ๆตWe•a3JฐyวF์=ฉ›ฃอ!VmWmo–า67O,‹ฑข‘m€ตSe3ecถกL<Uฐ‡uMืฃค1cฃ4ฬLบ>ว?๚‡ห/๙ฏŸ?๘฿|ฏ5{–xจซฦZeร\hด7 ’แUฐ I{{ทฯX ‚T6จะ๓Xข‘™มˆภิ`-ฌI2ฬl-5ำจ…มR<ห๊6 ( ›ลSอ€ซm มาŠูซ,ˆY3YF€m$มถM๊ฬ( Faร 5ณ) ซซlตmiU6&#,กฯฆkHLŸ๎—๔—?หฯ_๖หoทmZa 6ฃฬLศ[ ภZUƒฉ‰:ึถF)ะถคVถboยŠfZหLšฐ1จ &“yv สถ•Jƒฤ‹UรkVฃM-@ตฝNNม`ณาฺอ$ม†TtฯรTูhfฉอ(ฺHfMŽิ6ณžŽMฤ˜ม(™ฑฬ๊&ž…ีฉยุชุFณ˜jFฺฬ ภคฮา `ค 2lc*mณ16 Šศ†AทํL B2๖. ณm3Wำฝ๙d[Y-–ี-ฬb๓ถo>Œ*ๅ-ฑมhุ6ญโ วึLi&ีพCฃ:›ซูf*ลBLLYวยL ฉhฦจต†xrม$mฝฝ[›ฺ๖๊S& ลณถี”gฃ:พ’„,mป์ํ™vหผ|6š ร’คeฦh&m3นอฌ#(fม…lู{m!3ภš1+a3ถ้ฐมUef[Iถ' ถ€`}Lš’๖๖,;)Šm@eถญMฌฐ-U53ƒๅfYต•šY[อfฦi™zsึ ัh๊ ฤšgT€Šl ฦ`Iตตm{u๒Cอ’1ะภ<ฃฅP‚mถ–mB,ึทอ@–N3CKู4]mX3บชŠ t๕l–HยC`€้% h6…ฐอหถห›(ฒฑ›]k`2cญ)ฬฮž•k–Zp ƒKิ6dVลถM๊ึX”Qยณค๏๊ฒ1ฒVๆก%ุศVญุ(ูฆฐฑ%3ฎb,ำ๖ญLฤpผอ6 tญyฺฆถูน…‡D˜‹ๆ™0บ๖6Ÿ๙T J์Y2Œrแl •mdŠB[๋ณญ ะ็า3x3’˜ฮถมศ่์eะR6V ถfำ–fYูค#TCซMัCภ &ฑn6คLํ=2ฺ0.๋ตYฃุ0ภf–้˜!ฑถ!{$AœYJะ*nฦP๕ฌaVมlcwถ cRฬ’B„๖ฦใc”ฦรึบˆ5›น&ฆZIถLa[2›ฃ+6ร—™หฐฃrำ&mณDŒ%eณ)™ู’ตฝjฃญ๔ๅๆชQถMมFนp€Y–ฒ"า[kfฑ Lcถ5I่S๕0ลl›‹!มh-Qร-eQlk๖ๆำFˆbHฦศจX(V#ส6šูษX†‰ัอ&*ิฑฒลX[kcรภlfYl$ด๗(fPฌaขZeX2ำฎ6-ฏ‹“[˜yยญ vf‘M0๐่’๗dง]้Yอ›eiV-6“šQ"#๕ถฆB]3จู8ฑอ:0orXXcoฏตฮฐYr„mc&'ฤ–bšTmŒ‘ง6!ุclฎดฦไบ{›1‚ฌŠลึฌ*.[ [หx•e+‹ฒmฬผy:m>๊aต-ทml€ฌ2H0่1บzภฐญ๒ฦฃ*ฐํMคx<ฺ%Q๏๓.๎ˆš63XZ๓้บed›6qจ์ฉIŒ7ืxk)ๅด‰นNBฆUošษ0<๒ญ fูWm?ึฮฐ•“_Mrkํ=‡–๔z— ”อ๏AษรS๖•šXอถ—ฅตjอุจQD่ทT‘ณอFSs`A›ขฝงuW6ดญ—ใฐmณณ๖d&ณVห5šDฑูบzd lหุ\อifฃjููสjฑ˜vuqณฌฺดj+อ"ณmฝEำmีญถๅ6š{๗ฐŽฝวQ*ร`S6›4า{CŠW3–r4ฟฝตฎ๕ตš 0ll›yชชูhฐฃ์จlXฆทwšบyฦถˆušนnญrZ๕@f O<๊†#ุW๑lฆ[œ:ดZcดš1ฝM6ก๑ๆ`ณญdผทหฎ„ƒv{๏—/อP†๗83โV6qถญห†ฉ๒>-ฆดูFญkXZ"ยžื๎<ฤึฮCQ๖LiBข<‚x๏๎fŒ1Kถlฑึฎ.B,fฝ D&ฅฺ6ศy๏๕ำ‰ฮ@๊ญŒญ6c๋จึี๖ฬส 2 `๐ึญT(›m{๎Š1lึNTอ^ฏฺ}ญ&aุcKƒ็MGื`–-]*fV๋ฆฒ]฿ 0Y6ง™บLCถV6ƒ™9ฤŒ๔ญuy?v€zgย๊ีัห-mใ)ฺํู๕ฬ๖คZ[หกtk็ฝ_ไุา5๓Qฯrชทฆฐ๋Œ…R๖Vฬ[9™์อํฎษฺึ#์YœY2slvีnhEู35ดีถฆ›e ใฒ๗บu๗{3%ึz ฉธl1k,ฑj๏2้๓Vฦ,๓้ฏฺฺฌ]“Uณa‚‘—;Cฺ˜๗^+ท=›4—พ0ฃ9แวฝM€]=ƒfm๋์ฉทp๖ฬZร@gK.ห0๊๚ปลYหฺvืZP๓e7™ะ0,ตฺฦจbุTmC™็พno ู]Iผ ีšตฝi[tม ๐๚ฎkfFr๛™ฒฟYํ=[/15ฮศ1(ูvOm!#ลK๐<ฎืถyI๎–ผ„ia!๏๕๗mฦXumซ2P•๕fญsหo+ๅค˜‰j$6+ฑ๕ญทช;ๅMŒafurฯ2y๔ใ—ถmฮบ†ษิ๏ฝฯฎfl[–:›)oปใใ๊ถ๖6ป;ืฆู&y้ฤาฦฬKญf{Z#]2ฦœ๐˜5ำ[ หึณšตญ8ถlฯณVฅ™1๎lจมb[]_ำlsศฺViyญญฏํๆฺฺ–ฅV6†๚fIš]ๆน+J๖ึใฎ„ถทํบ5kf˜ัm๋ืw-bfฯNา>ฅ7ณoฎถycษtฯา‚์ิฝญส˜nsFJ‹ซžว๕6ณGา-รPZ›ผืw“ู›vumรกทอ]emฝึ-๕J‰ ฬถ(cฤ๔บ›z™Iบ”7mƒ1{wทI2“๑b๓ข}บฺปmฒBฃ฿\๏ฝRฺ{ซ6Uถx[ฑ๘ช๗z~wฐจf›ณ,——ฒ ดK{ดFค I๑€ทนWฑ6tฺ›ฟz›ี’x๏UX<›ถw€หfZ0hUหL›‘คs;๋gq|ผUฆฦศศวลlOg!#ฐsณ)yถ฿ˆZeู4ศ-ึฬ๔}๖›ฆฮสV๗6I˜mึป๋ถ„:นxถ!ฅmOw๓4„u๋ฝIU ฃmาถ๓.c˜c๒mWd†ผฎhฃู8๛ฎ๗ึ6Œ`ฉล˜eO_fณv}LV๑—–™aKฎวล๊“ูผบ bไฒCฦ\M๏msํj1ฉZ‹ ุBร2<–ƒ=ต66aบธYฑํmฑŠ-a˜๋ถmƒล–ใma-8kสlึQo5FT$๖ผ™๕:ฐ dหvดfFœfู่!x#ทHmํM#‚ iyสถmUญ! าl๏=Nฬ†น–l’Vƒ๗]ฃ4ึจๆlถšjฝฝ*ูฆ‘L(ุๆ|ภš9gฮํ aไjEศ)d{LรฝZฮ?;;ิL;y;lฆlRฑ†ตQถI4สฬV‰ฺถ๗ฦuว‚4ญ๔"‘šmหถตศน๓”ี6ิ6)vRถฬ๗{S.:“b‘f{z+Lฦšๅ—6`ท‡iŠnฦฒฝ-&€ุ0ฆย`ไmงุœ แqd1ญุfต๒ดษ๊š7ฯผ>ฐ$™ณึฬ•6z–…ฺาส†ต‘ว๒Cฺช๑”m W Cn{›Eแ™๕ตค1PzwฝทกยฉูTƒy;sz{ a“ํ-ิ€ฤผอน4kW{’ ช-H;(นฎM6lยฺทzษฯ๐QๆึRoธL›ถฒ‰ดฺค=ฒฑก4สaaถJaอหษ๘!ิค‡”ฐlหฌิGฯ“ึฆถ-K)x•๔ถแN†ŽEเM[€5๒ห‘Mdทgฦ›]๖Bิฬฎร˜™‘mXฏkยˆhซVl›nดlwoeฦ๓ถ๗๚€ ฒษD 3›อXqg๛ฅ ทผ=‹ึTc ถ็อ.U™5ิ าถ™โ5ฏkI1ฆาŠทก—^ฉfถsฺอ ษฆู&‰jถูตYปWูยฬ’-ฦPM%uถภlยzทป—ผผํฯ๎vZดp—nำh6œVoศffi2šดm•dj๏ Iฑ-ก&&‘‹wx†I๕งžว๊๖ุ\$Ÿ‹3)ฦ๛m๖6T"ฉ=†kดง3jgl*6++ŠฝuฃAฮ6#$ๆ๕5aรn‹jC+๊ญ{ำFi๖l{ฏ%ฐ$&4ภฒ7ฉถYุ่#ZP๏ๅmศึZSmฑฝท5u*ฬP7 ›m\ฑlฃณไfSiฅyoปnVตM(“ํ60ไ๎ถ'lkI&Uf6สaฌต &ฝ๗ฎผ &iUฅ25ู6ฐvำฒvู฿ึฑ44ถ;mคlNซ ™EถทY 2”›š2ฤฺ๋›จb[Bmค†\,3ร–ิใ >™อซศ฿6[Wอฒa”j1-ฬฺผv1Sl,ฐ`eถวธ)ถ%ซ“ ƒโ6PMฬƒ$-Hถจ่ถl๒€ีถฺฬึ’6ูŒKRd3Šอžฎ:jCถ c6ฑ†dรฒaญมJBเ๎`0~–ฎฤLm๏ญX;X `"R™6 ถPdๆlกKk†ส6+*›l†D™ตmดล„บi’2Fถึข ฺx#sตlƒZ๛ิ&-ำY›Šะถญw>ZฦํฑD#›้ะภ&ั<lฉฺึlณฉฐิฦฐybฑuh&vต™ฒฬ/#ํํญ Y™ณ,Cถ%ซ” Pะจกฒ6ฬคB`กาa ์lZูC(๏ฦŒUV…๐(6{iฎv0+S{0bฦRIh0[ชใ%ิ…ม6เั„3e๏ญ4dHุฆ8ฺีfฃYฟาF[R›m&tีุ6M—M6Xnี˜ 2หTด™eeฆ`_ for more examples. Creating indexes ---------------- .. autosummary:: :toctree: ../generated/ cftime_range date_range date_range_like indexes.RangeIndex.arange indexes.RangeIndex.linspace Built-in Indexes ---------------- Default, pandas-backed indexes built-in to Xarray: .. autosummary:: :toctree: ../generated/ indexes.PandasIndex indexes.PandasMultiIndex More complex indexes built-in to Xarray: .. autosummary:: :toctree: ../generated/ CFTimeIndex indexes.RangeIndex indexes.NDPointIndex indexes.CoordinateTransformIndex Building custom indexes ----------------------- These classes are building blocks for more complex Indexes: .. autosummary:: :toctree: ../generated/ indexes.CoordinateTransform indexes.CoordinateTransformIndex indexes.NDPointIndex indexes.TreeAdapter The Index base class for building custom indexes: .. autosummary:: :toctree: ../generated/ Index Index.from_variables Index.concat Index.stack Index.unstack Index.create_variables Index.should_add_coord_to_array Index.to_pandas_index Index.isel Index.sel Index.join Index.reindex_like Index.equals Index.roll Index.rename Index.copy The following are useful when building custom Indexes .. autosummary:: :toctree: ../generated/ IndexSelResult xarray-2025.12.0/doc/api/io.rst000066400000000000000000000030001511464676000160130ustar00rootroot00000000000000.. currentmodule:: xarray IO / Conversion =============== Dataset methods --------------- .. autosummary:: :toctree: ../generated/ load_dataset open_dataset open_mfdataset open_zarr save_mfdataset Dataset.as_numpy Dataset.from_dataframe Dataset.from_dict Dataset.to_dataarray Dataset.to_dataframe Dataset.to_dask_dataframe Dataset.to_dict Dataset.to_netcdf Dataset.to_pandas Dataset.to_zarr Dataset.chunk Dataset.close Dataset.compute Dataset.filter_by_attrs Dataset.info Dataset.load Dataset.persist Dataset.unify_chunks DataArray methods ----------------- .. autosummary:: :toctree: ../generated/ load_dataarray open_dataarray DataArray.as_numpy DataArray.from_dict DataArray.from_iris DataArray.from_series DataArray.to_dask_dataframe DataArray.to_dataframe DataArray.to_dataset DataArray.to_dict DataArray.to_index DataArray.to_iris DataArray.to_masked_array DataArray.to_netcdf DataArray.to_numpy DataArray.to_pandas DataArray.to_series DataArray.to_zarr DataArray.chunk DataArray.close DataArray.compute DataArray.persist DataArray.load DataArray.unify_chunks DataTree methods ---------------- .. autosummary:: :toctree: ../generated/ load_datatree open_datatree open_groups DataTree.to_dict DataTree.to_netcdf DataTree.to_zarr DataTree.chunk DataTree.load DataTree.compute DataTree.persist .. .. .. Missing: .. ``open_mfdatatree`` xarray-2025.12.0/doc/api/plotting.rst000066400000000000000000000022051511464676000172520ustar00rootroot00000000000000.. currentmodule:: xarray Plotting ======== Dataset ------- .. autosummary:: :toctree: ../generated/ :template: autosummary/accessor_method.rst Dataset.plot.scatter Dataset.plot.quiver Dataset.plot.streamplot DataArray --------- .. autosummary:: :toctree: ../generated/ :template: autosummary/accessor_callable.rst DataArray.plot .. autosummary:: :toctree: ../generated/ :template: autosummary/accessor_method.rst DataArray.plot.contourf DataArray.plot.contour DataArray.plot.hist DataArray.plot.imshow DataArray.plot.line DataArray.plot.pcolormesh DataArray.plot.step DataArray.plot.scatter DataArray.plot.surface Faceting -------- .. autosummary:: :toctree: ../generated/ plot.FacetGrid plot.FacetGrid.add_colorbar plot.FacetGrid.add_legend plot.FacetGrid.add_quiverkey plot.FacetGrid.map plot.FacetGrid.map_dataarray plot.FacetGrid.map_dataarray_line plot.FacetGrid.map_dataset plot.FacetGrid.map_plot1d plot.FacetGrid.set_axis_labels plot.FacetGrid.set_ticks plot.FacetGrid.set_titles plot.FacetGrid.set_xlabels plot.FacetGrid.set_ylabels xarray-2025.12.0/doc/api/resample.rst000066400000000000000000000034311511464676000172240ustar00rootroot00000000000000.. currentmodule:: xarray Resample objects ================ .. currentmodule:: xarray.core.resample Dataset ------- .. autosummary:: :toctree: ../generated/ DatasetResample DatasetResample.asfreq DatasetResample.backfill DatasetResample.interpolate DatasetResample.nearest DatasetResample.pad DatasetResample.all DatasetResample.any DatasetResample.apply DatasetResample.assign DatasetResample.assign_coords DatasetResample.bfill DatasetResample.count DatasetResample.ffill DatasetResample.fillna DatasetResample.first DatasetResample.last DatasetResample.map DatasetResample.max DatasetResample.mean DatasetResample.median DatasetResample.min DatasetResample.prod DatasetResample.quantile DatasetResample.reduce DatasetResample.std DatasetResample.sum DatasetResample.var DatasetResample.where DatasetResample.dims DatasetResample.groups DataArray --------- .. autosummary:: :toctree: ../generated/ DataArrayResample DataArrayResample.asfreq DataArrayResample.backfill DataArrayResample.interpolate DataArrayResample.nearest DataArrayResample.pad DataArrayResample.all DataArrayResample.any DataArrayResample.apply DataArrayResample.assign_coords DataArrayResample.bfill DataArrayResample.count DataArrayResample.ffill DataArrayResample.fillna DataArrayResample.first DataArrayResample.last DataArrayResample.map DataArrayResample.max DataArrayResample.mean DataArrayResample.median DataArrayResample.min DataArrayResample.prod DataArrayResample.quantile DataArrayResample.reduce DataArrayResample.std DataArrayResample.sum DataArrayResample.var DataArrayResample.where DataArrayResample.dims DataArrayResample.groups xarray-2025.12.0/doc/api/rolling-exp.rst000066400000000000000000000003471511464676000176570ustar00rootroot00000000000000.. currentmodule:: xarray Exponential rolling objects =========================== .. currentmodule:: xarray.computation.rolling_exp .. autosummary:: :toctree: ../generated/ RollingExp RollingExp.mean RollingExp.sum xarray-2025.12.0/doc/api/rolling.rst000066400000000000000000000016611511464676000170650ustar00rootroot00000000000000.. currentmodule:: xarray Rolling objects =============== .. currentmodule:: xarray.computation.rolling Dataset ------- .. autosummary:: :toctree: ../generated/ DatasetRolling DatasetRolling.construct DatasetRolling.reduce DatasetRolling.argmax DatasetRolling.argmin DatasetRolling.count DatasetRolling.max DatasetRolling.mean DatasetRolling.median DatasetRolling.min DatasetRolling.prod DatasetRolling.std DatasetRolling.sum DatasetRolling.var DataArray --------- .. autosummary:: :toctree: ../generated/ DataArrayRolling DataArrayRolling.__iter__ DataArrayRolling.construct DataArrayRolling.reduce DataArrayRolling.argmax DataArrayRolling.argmin DataArrayRolling.count DataArrayRolling.max DataArrayRolling.mean DataArrayRolling.median DataArrayRolling.min DataArrayRolling.prod DataArrayRolling.std DataArrayRolling.sum DataArrayRolling.var xarray-2025.12.0/doc/api/testing.rst000066400000000000000000000017151511464676000170740ustar00rootroot00000000000000.. currentmodule:: xarray Testing ======= .. autosummary:: :toctree: ../generated/ testing.assert_equal testing.assert_identical testing.assert_allclose testing.assert_chunks_equal Test that two ``DataTree`` objects are similar. .. autosummary:: :toctree: ../generated/ testing.assert_isomorphic testing.assert_equal testing.assert_identical Hypothesis Testing Strategies ============================= .. currentmodule:: xarray See the :ref:`documentation page on testing ` for a guide on how to use these strategies. .. warning:: These strategies should be considered highly experimental, and liable to change at any time. .. autosummary:: :toctree: ../generated/ testing.strategies.supported_dtypes testing.strategies.names testing.strategies.dimension_names testing.strategies.dimension_sizes testing.strategies.attrs testing.strategies.variables testing.strategies.unique_subset_of xarray-2025.12.0/doc/api/top-level.rst000066400000000000000000000012501511464676000173200ustar00rootroot00000000000000.. currentmodule:: xarray Top-level functions =================== Computation ----------- .. autosummary:: :toctree: ../generated/ apply_ufunc cov corr cross dot map_blocks polyval unify_chunks where Combining Data -------------- .. autosummary:: :toctree: ../generated/ align broadcast concat merge combine_by_coords combine_nested Creation -------- .. autosummary:: :toctree: ../generated/ DataArray Dataset DataTree full_like zeros_like ones_like Miscellaneous ------------- .. autosummary:: :toctree: ../generated/ decode_cf infer_freq show_versions set_options get_options xarray-2025.12.0/doc/api/tutorial.rst000066400000000000000000000003011511464676000172500ustar00rootroot00000000000000.. currentmodule:: xarray Tutorial ======== .. autosummary:: :toctree: ../generated/ tutorial.open_dataset tutorial.load_dataset tutorial.open_datatree tutorial.load_datatree xarray-2025.12.0/doc/api/ufuncs.rst000066400000000000000000000042451511464676000167230ustar00rootroot00000000000000.. currentmodule:: xarray Universal functions =================== These functions are equivalent to their NumPy versions, but for xarray objects backed by non-NumPy array types (e.g. ``cupy``, ``sparse``, or ``jax``), they will ensure that the computation is dispatched to the appropriate backend. You can find them in the ``xarray.ufuncs`` module: .. autosummary:: :toctree: ../generated/ ufuncs.abs ufuncs.absolute ufuncs.acos ufuncs.acosh ufuncs.arccos ufuncs.arccosh ufuncs.arcsin ufuncs.arcsinh ufuncs.arctan ufuncs.arctanh ufuncs.asin ufuncs.asinh ufuncs.atan ufuncs.atanh ufuncs.bitwise_count ufuncs.bitwise_invert ufuncs.bitwise_not ufuncs.cbrt ufuncs.ceil ufuncs.conj ufuncs.conjugate ufuncs.cos ufuncs.cosh ufuncs.deg2rad ufuncs.degrees ufuncs.exp ufuncs.exp2 ufuncs.expm1 ufuncs.fabs ufuncs.floor ufuncs.invert ufuncs.isfinite ufuncs.isinf ufuncs.isnan ufuncs.isnat ufuncs.log ufuncs.log10 ufuncs.log1p ufuncs.log2 ufuncs.logical_not ufuncs.negative ufuncs.positive ufuncs.rad2deg ufuncs.radians ufuncs.reciprocal ufuncs.rint ufuncs.sign ufuncs.signbit ufuncs.sin ufuncs.sinh ufuncs.spacing ufuncs.sqrt ufuncs.square ufuncs.tan ufuncs.tanh ufuncs.trunc ufuncs.add ufuncs.arctan2 ufuncs.atan2 ufuncs.bitwise_and ufuncs.bitwise_left_shift ufuncs.bitwise_or ufuncs.bitwise_right_shift ufuncs.bitwise_xor ufuncs.copysign ufuncs.divide ufuncs.equal ufuncs.float_power ufuncs.floor_divide ufuncs.fmax ufuncs.fmin ufuncs.fmod ufuncs.gcd ufuncs.greater ufuncs.greater_equal ufuncs.heaviside ufuncs.hypot ufuncs.lcm ufuncs.ldexp ufuncs.left_shift ufuncs.less ufuncs.less_equal ufuncs.logaddexp ufuncs.logaddexp2 ufuncs.logical_and ufuncs.logical_or ufuncs.logical_xor ufuncs.maximum ufuncs.minimum ufuncs.mod ufuncs.multiply ufuncs.nextafter ufuncs.not_equal ufuncs.pow ufuncs.power ufuncs.remainder ufuncs.right_shift ufuncs.subtract ufuncs.true_divide ufuncs.angle ufuncs.isreal ufuncs.iscomplex xarray-2025.12.0/doc/api/weighted.rst000066400000000000000000000012371511464676000172160ustar00rootroot00000000000000.. currentmodule:: xarray Weighted objects ================ .. currentmodule:: xarray.computation.weighted Dataset ------- .. autosummary:: :toctree: ../generated/ DatasetWeighted DatasetWeighted.mean DatasetWeighted.quantile DatasetWeighted.sum DatasetWeighted.std DatasetWeighted.var DatasetWeighted.sum_of_weights DatasetWeighted.sum_of_squares DataArray --------- .. autosummary:: :toctree: ../generated/ DataArrayWeighted DataArrayWeighted.mean DataArrayWeighted.quantile DataArrayWeighted.sum DataArrayWeighted.std DataArrayWeighted.var DataArrayWeighted.sum_of_weights DataArrayWeighted.sum_of_squares xarray-2025.12.0/doc/badge.json000066400000000000000000000023521511464676000160470ustar00rootroot00000000000000{ "label": "", "message": "xarray", "logoSvg": "", "logoWidth": 14, "labelColor": "#4a4a4a", "color": "#0e4666" } xarray-2025.12.0/doc/combined.json000066400000000000000000000017041511464676000165650ustar00rootroot00000000000000{ "version": 1, "refs": { ".zgroup": "{\"zarr_format\":2}", "foo/.zarray": "{\"chunks\":[4,5],\"compressor\":null,\"dtype\":\">> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.{3,}: | {5,8}: " copybutton_prompt_is_regexp = True # NBSphinx configuration nbsphinx_timeout = 600 nbsphinx_execute = "always" nbsphinx_allow_errors = False nbsphinx_requirejs_path = "" # png2x/retina rendering of figues in docs would also need to modify custom.css: # https://github.com/spatialaudio/nbsphinx/issues/464#issuecomment-652729126 # .rst-content .image-reference img { # max-width: unset; # width: 100% !important; # height: auto !important; # } # nbsphinx_execute_arguments = [ # "--InlineBackend.figure_formats=['png2x']", # ] nbsphinx_prolog = """ {% set docname = env.doc2path(env.docname, base=None) %} You can run this notebook in a `live session `_ |Binder| or view it `on Github `_. .. |Binder| image:: https://mybinder.org/badge.svg :target: https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/{{ docname }} """ # AutoDoc configuration autosummary_generate = True autodoc_typehints = "none" # Napoleon configuration napoleon_google_docstring = False napoleon_numpy_docstring = True napoleon_use_param = False napoleon_use_rtype = False napoleon_preprocess_types = True napoleon_type_aliases = { # general terms "sequence": ":term:`sequence`", "iterable": ":term:`iterable`", "callable": ":py:func:`callable`", "dict_like": ":term:`dict-like `", "dict-like": ":term:`dict-like `", "path-like": ":term:`path-like `", "mapping": ":term:`mapping`", "file-like": ":term:`file-like `", # special terms # "same type as caller": "*same type as caller*", # does not work, yet # "same type as values": "*same type as values*", # does not work, yet # stdlib type aliases "MutableMapping": "~collections.abc.MutableMapping", "sys.stdout": ":obj:`sys.stdout`", "timedelta": "~datetime.timedelta", "string": ":class:`string `", # numpy terms "array_like": ":term:`array_like`", "array-like": ":term:`array-like `", "scalar": ":term:`scalar`", "array": ":term:`array`", "hashable": ":term:`hashable `", # matplotlib terms "color-like": ":py:func:`color-like `", "matplotlib colormap name": ":doc:`matplotlib colormap name `", "matplotlib axes object": ":py:class:`matplotlib axes object `", "colormap": ":py:class:`colormap `", # xarray terms "dim name": ":term:`dimension name `", "var name": ":term:`variable name `", # objects without namespace: xarray "DataArray": "~xarray.DataArray", "Dataset": "~xarray.Dataset", "Variable": "~xarray.Variable", "DataTree": "~xarray.DataTree", "DatasetGroupBy": "~xarray.core.groupby.DatasetGroupBy", "DataArrayGroupBy": "~xarray.core.groupby.DataArrayGroupBy", "Grouper": "~xarray.groupers.Grouper", "Resampler": "~xarray.groupers.Resampler", # objects without namespace: numpy "ndarray": "~numpy.ndarray", "MaskedArray": "~numpy.ma.MaskedArray", "dtype": "~numpy.dtype", "ComplexWarning": "~numpy.ComplexWarning", # objects without namespace: pandas "Index": "~pandas.Index", "MultiIndex": "~pandas.MultiIndex", "CategoricalIndex": "~pandas.CategoricalIndex", "TimedeltaIndex": "~pandas.TimedeltaIndex", "DatetimeIndex": "~pandas.DatetimeIndex", "IntervalIndex": "~pandas.IntervalIndex", "Series": "~pandas.Series", "DataFrame": "~pandas.DataFrame", "Categorical": "~pandas.Categorical", "Path": "~~pathlib.Path", # objects with abbreviated namespace (from pandas) "pd.Index": "~pandas.Index", "pd.NaT": "~pandas.NaT", } autodoc_type_aliases = napoleon_type_aliases # Keep both in sync # mermaid config mermaid_version = "11.6.0" # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates", sphinx_autosummary_accessors.templates_path] # The master toctree document. master_doc = "index" remove_from_toctrees = ["generated/*"] # The language for content autogenerated by Sphinx. language = "en" # General information about the project. project = "xarray" copyright = f"2014-{datetime.datetime.now().year}, xarray Developers" # The short Y.M.D version. v = packaging.version.parse(xarray.__version__) version = ".".join(str(p) for p in v.release) # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = "%Y-%m-%d" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build", "debug.ipynb", "**.ipynb_checkpoints"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "pydata_sphinx_theme" html_title = "" html_context = { "github_user": "pydata", "github_repo": "xarray", "github_version": "main", "doc_path": "doc", } # https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/layout.html#references html_theme_options = { #"announcement":"๐Ÿพ Xarray is now 10 years old! ๐ŸŽ‰", "logo": {"image_dark": "https://docs.xarray.dev/en/stable/_static/logos/Xarray_Logo_FullColor_InverseRGB_Final.svg"}, "github_url":"https://github.com/pydata/xarray", "show_version_warning_banner":True, "use_edit_page_button":True, "header_links_before_dropdown": 8, "navbar_align": "left", "footer_center":["last-updated"], # Instead of adding these to the header bar they are linked in 'getting help' and 'contributing' # "icon_links": [ # { # "name": "Discord", # "url": "https://discord.com/invite/wEKPCt4PDu", # "icon": "fa-brands fa-discord", # }, # { # "name": "X", # "url": "https://x.com/xarray_dev", # "icon": "fa-brands fa-x-twitter", # }, # { # "name": "Bluesky", # "url": "https://bsky.app/profile/xarray.bsky.social", # "icon": "fa-brands fa-bluesky", # }, # ] } # pydata_sphinx_theme use_edit_page_button with github link seems better html_show_sourcelink = False # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "_static/logos/Xarray_Logo_RGB_Final.svg" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "_static/logos/Xarray_Icon_Final.svg" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_css_files = ["style.css"] # configuration for sphinxext.opengraph ogp_site_url = "https://docs.xarray.dev/en/latest/" ogp_image = "https://docs.xarray.dev/en/stable/_static/logos/Xarray_Logo_RGB_Final.png" ogp_custom_meta_tags = ( '', '', '', ) # Redirects for pages that were moved to new locations rediraffe_redirects = { "terminology.rst": "user-guide/terminology.rst", "data-structures.rst": "user-guide/data-structures.rst", "indexing.rst": "user-guide/indexing.rst", "interpolation.rst": "user-guide/interpolation.rst", "computation.rst": "user-guide/computation.rst", "groupby.rst": "user-guide/groupby.rst", "reshaping.rst": "user-guide/reshaping.rst", "combining.rst": "user-guide/combining.rst", "time-series.rst": "user-guide/time-series.rst", "weather-climate.rst": "user-guide/weather-climate.rst", "pandas.rst": "user-guide/pandas.rst", "io.rst": "user-guide/io.rst", "dask.rst": "user-guide/dask.rst", "plotting.rst": "user-guide/plotting.rst", "duckarrays.rst": "user-guide/duckarrays.rst", "related-projects.rst": "user-guide/ecosystem.rst", "faq.rst": "get-help/faq.rst", "why-xarray.rst": "getting-started-guide/why-xarray.rst", "installing.rst": "getting-started-guide/installing.rst", "quick-overview.rst": "getting-started-guide/quick-overview.rst", "contributing.rst": "contribute/contributing.rst", "developers-meeting.rst": "contribute/developers-meeting.rst", } # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = today_fmt # Output file base name for HTML help builder. htmlhelp_basename = "xarraydoc" # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "cftime": ("https://unidata.github.io/cftime", None), "cubed": ("https://cubed-dev.github.io/cubed/", None), "dask": ("https://docs.dask.org/en/latest", None), "flox": ("https://flox.readthedocs.io/en/latest/", None), "hypothesis": ("https://hypothesis.readthedocs.io/en/latest/", None), "iris": ("https://scitools-iris.readthedocs.io/en/latest", None), "matplotlib": ("https://matplotlib.org/stable/", None), "numba": ("https://numba.readthedocs.io/en/stable/", None), "numpy": ("https://numpy.org/doc/stable", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), "python": ("https://docs.python.org/3/", None), "scipy": ("https://docs.scipy.org/doc/scipy", None), "sparse": ("https://sparse.pydata.org/en/latest/", None), "xarray-tutorial": ("https://tutorial.xarray.dev/", None), "zarr": ("https://zarr.readthedocs.io/en/stable/", None), "xarray-lmfit": ("https://xarray-lmfit.readthedocs.io/stable", None), } # based on numpy doc/source/conf.py def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object """ if domain != "py": return None modname = info["module"] fullname = info["fullname"] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split("."): try: obj = getattr(obj, part) except AttributeError: return None try: fn = inspect.getsourcefile(inspect.unwrap(obj)) except TypeError: fn = None if not fn: return None try: source, lineno = inspect.getsourcelines(obj) except OSError: lineno = None if lineno: linespec = f"#L{lineno}-L{lineno + len(source) - 1}" else: linespec = "" fn = os.path.relpath(fn, start=os.path.dirname(xarray.__file__)) if "+" in xarray.__version__: return f"https://github.com/pydata/xarray/blob/main/xarray/{fn}{linespec}" else: return ( f"https://github.com/pydata/xarray/blob/" f"v{xarray.__version__}/xarray/{fn}{linespec}" ) def html_page_context(app, pagename, templatename, context, doctree): # Disable edit button for docstring generated pages if "generated" in pagename: context["theme_use_edit_page_button"] = False def update_gallery(app: Sphinx): """Update the gallery page.""" LOGGER.info("Updating gallery page...") gallery = yaml.safe_load(pathlib.Path(app.srcdir, "gallery.yml").read_bytes()) for key in gallery: items = [ f""" .. grid-item-card:: :text-align: center :link: {item['path']} .. image:: {item['thumbnail']} :alt: {item['title']} +++ {item['title']} """ for item in gallery[key] ] items_md = indent(dedent("\n".join(items)), prefix=" ") markdown = f""" .. grid:: 1 2 2 2 :gutter: 2 {items_md} """ pathlib.Path(app.srcdir, f"{key}-gallery.txt").write_text(markdown) LOGGER.info(f"{key} gallery page updated.") LOGGER.info("Gallery page updated.") def update_videos(app: Sphinx): """Update the videos page.""" LOGGER.info("Updating videos page...") videos = yaml.safe_load(pathlib.Path(app.srcdir, "videos.yml").read_bytes()) items = [] for video in videos: authors = " | ".join(video["authors"]) item = f""" .. grid-item-card:: {" ".join(video["title"].split())} :text-align: center .. raw:: html {video['src']} +++ {authors} """ items.append(item) items_md = indent(dedent("\n".join(items)), prefix=" ") markdown = f""" .. grid:: 1 2 2 2 :gutter: 2 {items_md} """ pathlib.Path(app.srcdir, "videos-gallery.txt").write_text(markdown) LOGGER.info("Videos page updated.") def setup(app: Sphinx): app.connect("html-page-context", html_page_context) app.connect("builder-inited", update_gallery) app.connect("builder-inited", update_videos) xarray-2025.12.0/doc/contribute/000077500000000000000000000000001511464676000162665ustar00rootroot00000000000000xarray-2025.12.0/doc/contribute/contributing.rst000066400000000000000000001240551511464676000215360ustar00rootroot00000000000000.. _contributing: ********************** Contributing to xarray ********************** .. note:: Large parts of this document came from the `Pandas Contributing Guide `_. Overview ======== We welcome your skills and enthusiasm at the xarray project!. There are numerous opportunities to contribute beyond just writing code. All contributions, including bug reports, bug fixes, documentation improvements, enhancement suggestions, and other ideas are welcome. If you have any questions on the process or how to fix something feel free to ask us! The recommended place to ask a question is on `GitHub Discussions `_ , but we also have a `Discord `_ and a `mailing list `_. There is also a `"python-xarray" tag on Stack Overflow `_ which we monitor for questions. We also have a biweekly community call, details of which are announced on the `Developers meeting `_. You are very welcome to join! Though we would love to hear from you, there is no expectation to contribute during the meeting either - you are always welcome to just sit in and listen. This project is a community effort, and everyone is welcome to contribute. Everyone within the community is expected to abide by our `code of conduct `_. Where to start? =============== If you are brand new to *xarray* or open-source development, we recommend going through the `GitHub "issues" tab `_ to find issues that interest you. Some issues are particularly suited for new contributors by the label `Documentation `__ and `good first issue `_ where you could start out. These are well documented issues, that do not require a deep understanding of the internals of xarray. Once you've found an interesting issue, you can return here to get your development environment setup. The xarray project does not assign issues. Issues are "assigned" by opening a Pull Request(PR). .. _contributing.bug_reports: Bug reports and enhancement requests ==================================== Bug reports are an important part of making *xarray* more stable. Having a complete bug report will allow others to reproduce the bug and provide insight into fixing. Trying out the bug-producing code on the *main* branch is often a worthwhile exercise to confirm that the bug still exists. It is also worth searching existing bug reports and pull requests to see if the issue has already been reported and/or fixed. Submitting a bug report ----------------------- If you find a bug in the code or documentation, do not hesitate to submit a ticket to the `Issue Tracker `_. You are also welcome to post feature requests or pull requests. If you are reporting a bug, please use the provided template which includes the following: #. Include a short, self-contained Python snippet reproducing the problem. You can format the code nicely by using `GitHub Flavored Markdown `_:: ```python import xarray as xr ds = xr.Dataset(...) ... ``` #. Include the full version string of *xarray* and its dependencies. You can use the built in function:: ```python import xarray as xr xr.show_versions() ... ``` #. Explain why the current behavior is wrong/not desired and what you expect instead. The issue will then show up to the *xarray* community and be open to comments/ideas from others. See this `stackoverflow article for tips on writing a good bug report `_ . .. _contributing.github: Now that you have an issue you want to fix, enhancement to add, or documentation to improve, you need to learn how to work with GitHub and the *xarray* code base. .. _contributing.version_control: Version control, Git, and GitHub ================================ The code is hosted on `GitHub `_. To contribute you will need to sign up for a `free GitHub account `_. We use `Git `_ for version control to allow many people to work together on the project. Some great resources for learning Git: * the `GitHub help pages `_. * the `NumPy's documentation `_. * Matthew Brett's `Pydagogue `_. Getting started with Git ------------------------ `GitHub has instructions for setting up Git `__ including installing git, setting up your SSH key, and configuring git. All these steps need to be completed before you can work seamlessly between your local repository and GitHub. .. note:: The following instructions assume you want to learn how to interact with github via the git command-line utility, but contributors who are new to git may find it easier to use other tools instead such as `Github Desktop `_. .. _contributing.dev_workflow: Development workflow ==================== To keep your work well organized, with readable history, and in turn make it easier for project maintainers to see what you've done, and why you did it, we recommend you to follow workflow: 1. `Create an account `_ on GitHub if you do not already have one. 2. You will need your own fork to work on the code. Go to the `xarray project page `_ and hit the ``Fork`` button near the top of the page. This creates a copy of the code under your account on the GitHub server. 3. Clone your fork to your machine:: git clone https://github.com/your-user-name/xarray.git cd xarray git remote add upstream https://github.com/pydata/xarray.git This creates the directory ``xarray`` and connects your repository to the upstream (main project) *xarray* repository. 4. Copy tags across from the xarray repository:: git fetch --tags upstream This will ensure that when you create a development environment a reasonable version number is created. .. _contributing.dev_env: Creating a development environment ---------------------------------- To test out code changes locally, you'll need to build *xarray* from source, which requires a Python environment. If you're making documentation changes, you can skip to :ref:`contributing.documentation` but you won't be able to build the documentation locally before pushing your changes. .. note:: For small changes, such as fixing a typo, you don't necessarily need to build and test xarray locally. If you make your changes then :ref:`commit and push them to a new branch `, xarray's automated :ref:`continuous integration tests ` will run and check your code in various ways. You can then try to fix these problems by committing and pushing more commits to the same branch. You can also avoid building the documentation locally by instead :ref:`viewing the updated documentation via the CI `. To speed up this feedback loop or for more complex development tasks you should build and test xarray locally. .. _contributing.dev_python: Creating a Python Environment ----------------------------- .. attention:: Xarray recently switched development workflows to use `Pixi `_ instead of Conda (PR https://github.com/pydata/xarray/pull/10888 ). If there are any edits to the contributing instructions that would improve clarity, please open a PR! Xarray uses `Pixi `_ to manage development environments. Before starting any development, you'll need to create an isolated xarray development environment: - Install `Pixi `_ - Make sure your Pixi is up to date (``pixi self-update``) - Make sure that you have :ref:`cloned the repository ` - ``cd`` to the *xarray* source directory That's it! Now you're ready to contribute to Xarray. Pixi defines multiple environments as well as tasks to help you with development. These include tasks for: - running the test suite - building the documentation - running the static type checker - running code formatters and linters Some of these tasks can be run in several environments (e.g., the test suite is run in environments with different, dependencies as well as different Python versions to make sure we have wide support for Xarray). Some of these tasks are only run in a single environment (e.g., building the documentation or running pre-commit hooks). You can see all available environments and tasks by running:: pixi list For example: - ``pixi run doc`` will build the documentation - ``pixi run mypy`` will run the static type checker - ``pixi run test`` will run the test suite - ``pixi run pre-commit`` will run all code formatters and linters - defined via the pre-commit hooks When running ``pixi run test`` you will be prompted to select which environment you want to use. You can specify the environment directly by providing the ``-e`` flag, e.g., ``pixi run -e my_environment test`` . Our CI setup uses Pixi as well - you can easily reproduce CI tests by running the same tasks in the same environments as defined in the CI. You can enter any of the defined environments with:: pixi shell -e my_environment This is similar to "activating" an environment in Conda. To exit this shell type ``exit`` or press ``Ctrl-D``. All these Pixi environments and tasks are defined in the ``pixi.toml`` file in the root of the repository. Install pre-commit hooks ------------------------- You can either run pre-commit manually via Pixi as described above, or set up git hooks to run pre-commit automatically. This is done by: .. code-block:: sh pixi shell -e pre-commit # enter the pre-commit environment pre-commit install # install the git hooks # or pre-commit uninstall # uninstall the git hooks Now, every time you make a git commit, all the pre-commit hooks will be run automatically using the pre-commit that comes with Pixi. Alternatively you can use a separate installation of ``pre-commit`` (e.g., install globally using Pixi (``pixi install -g pre_commit``), or via `Homebrew `_ ). If you want to commit without running ``pre-commit`` hooks, you can use ``git commit --no-verify``. Update the ``main`` branch -------------------------- First make sure you have :ref:`created a development environment `. Before starting a new set of changes, fetch all changes from ``upstream/main``, and start a new feature branch from that. From time to time you should fetch the upstream changes from GitHub: :: git fetch --tags upstream git merge upstream/main This will combine your commits with the latest *xarray* git ``main``. If this leads to merge conflicts, you must resolve these before submitting your pull request. If you have uncommitted changes, you will need to ``git stash`` them prior to updating. This will effectively store your changes, which can be reapplied after updating. Create a new feature branch --------------------------- Create a branch to save your changes, even before you start making changes. You want your ``main branch`` to contain only production-ready code:: git checkout -b shiny-new-feature This changes your working directory to the ``shiny-new-feature`` branch. Keep any changes in this branch specific to one bug or feature so it is clear what the branch brings to *xarray*. You can have many "shiny-new-features" and switch in between them using the ``git checkout`` command. Generally, you will want to keep your feature branches on your public GitHub fork of xarray. To do this, you ``git push`` this new branch up to your GitHub repo. Generally (if you followed the instructions in these pages, and by default), git will have a link to your fork of the GitHub repo, called ``origin``. You push up to your own fork with: :: git push origin shiny-new-feature In git >= 1.7 you can ensure that the link is correctly set by using the ``--set-upstream`` option: :: git push --set-upstream origin shiny-new-feature From now on git will know that ``shiny-new-feature`` is related to the ``shiny-new-feature branch`` in the GitHub repo. The editing workflow -------------------- 1. Make some changes 2. See which files have changed with ``git status``. You'll see a listing like this one: :: # On branch shiny-new-feature # Changed but not updated: # (use "git add ..." to update what will be committed) # (use "git checkout -- ..." to discard changes in working directory) # # modified: README 3. Check what the actual changes are with ``git diff``. 4. Build the `documentation `__ for the documentation changes. 5. `Run the test suite `_ for code changes. Commit and push your changes ---------------------------- 1. To commit all modified files into the local copy of your repo, do ``git commit -am 'A commit message'``. 2. To push the changes up to your forked repo on GitHub, do a ``git push``. Open a pull request ------------------- When you're ready or need feedback on your code, open a Pull Request (PR) so that the xarray developers can give feedback and eventually include your suggested code into the ``main`` branch. `Pull requests (PRs) on GitHub `_ are the mechanism for contributing to xarray's code and documentation. Enter a title for the set of changes with some explanation of what you've done. Follow the PR template, which looks like this. :: [ ]Closes #xxxx [ ]Tests added [ ]User visible changes (including notable bug fixes) are documented in whats-new.rst [ ]New functions/methods are listed in api.rst Mention anything you'd like particular attention for - such as a complicated change or some code you are not happy with. If you don't think your request is ready to be merged, just say so in your pull request message and use the "Draft PR" feature of GitHub. This is a good way of getting some preliminary code review. .. _contributing.documentation: Contributing to the documentation ================================= If you're not the developer type, contributing to the documentation is still of huge value. You don't even have to be an expert on *xarray* to do so! In fact, there are sections of the docs that are worse off after being written by experts. If something in the docs doesn't make sense to you, updating the relevant section after you figure it out is a great way to ensure it will help the next person. .. contents:: Documentation: :local: About the *xarray* documentation -------------------------------- The documentation is written in **reStructuredText**, which is almost like writing in plain English, and built using `Sphinx `__. The Sphinx Documentation has an excellent `introduction to reST `__. Review the Sphinx docs to perform more complex changes to the documentation as well. Some other important things to know about the docs: - The *xarray* documentation consists of two parts: the docstrings in the code itself and the docs in this folder ``xarray/doc/``. The docstrings are meant to provide a clear explanation of the usage of the individual functions, while the documentation in this folder consists of tutorial-like overviews per topic together with some other information (what's new, installation, etc). - The docstrings follow the **NumPy Docstring Standard**, which is used widely in the Scientific Python community. This standard specifies the format of the different sections of the docstring. Refer to the `documentation for the Numpy docstring format `_ for a detailed explanation, or look at some of the existing functions to extend it in a similar manner. - The documentation makes heavy use of the `jupyter-sphinx extension `_. The ``jupyter-execute`` directive lets you put code in the documentation which will be run during the doc build. For example: .. code:: rst .. jupyter-execute:: x = 2 x**3 will be rendered as: .. jupyter-execute:: x = 2 x**3 Almost all code examples in the docs are run (and the output saved) during the doc build. This approach means that code examples will always be up to date, but it does make building the docs a bit more complex. - Our API documentation in ``doc/api.rst`` houses the auto-generated documentation from the docstrings. For classes, there are a few subtleties around controlling which methods and attributes have pages auto-generated. Every method should be included in a ``toctree`` in ``api.rst``, else Sphinx will emit a warning. How to build the *xarray* documentation --------------------------------------- Requirements ~~~~~~~~~~~~ Make sure to follow the instructions on :ref:`creating a development environment` above. Once you have Pixi installed - you can build the documentation using the command:: pixi run doc Then you can find the HTML output files in the folder ``xarray/doc/_build/html/``. To see what the documentation now looks like with your changes, you can view the HTML build locally by opening the files in your local browser. For example, if you normally use Google Chrome as your browser, you could enter:: google-chrome _build/html/quick-overview.html in the terminal, running from within the ``doc/`` folder. You should now see a new tab pop open in your local browser showing the ``quick-overview`` page of the documentation. The different pages of this local build of the documentation are linked together, so you can browse the whole documentation by following links the same way you would on the officially-hosted xarray docs site. The first time you build the docs, it will take quite a while because it has to run all the code examples and build all the generated docstring pages. In subsequent evocations, Sphinx will try to only build the pages that have been modified. If you want to do a full clean build, do:: pixi run doc-clean Writing ReST pages ------------------ Most documentation is either in the docstrings of individual classes and methods, in explicit ``.rst`` files, or in examples and tutorials. All of these use the `ReST `_ syntax and are processed by `Sphinx `_. This section contains additional information and conventions how ReST is used in the xarray documentation. Section formatting ~~~~~~~~~~~~~~~~~~ We aim to follow the recommendations from the `Python documentation `_ and the `Sphinx reStructuredText documentation `_ for section markup characters, - ``*`` with overline, for chapters - ``=``, for heading - ``-``, for sections - ``~``, for subsections - ``**`` text ``**``, for **bold** text Referring to other documents and sections ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `Sphinx `_ allows internal `references `_ between documents. Documents can be linked with the ``:doc:`` directive: :: See the :doc:`/getting-started-guide/installing` See the :doc:`/getting-started-guide/quick-overview` will render as: See the `Installation `_ See the `Quick Overview `_ Including figures and files ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Image files can be directly included in pages with the ``image::`` directive. .. _contributing.code: Contributing to the code base ============================= .. contents:: Code Base: :local: Code standards -------------- Writing good code is not just about what you write. It is also about *how* you write it. During :ref:`Continuous Integration ` testing, several tools will be run to check your code for stylistic errors. Generating any warnings will cause the test to fail. Thus, good style is a requirement for submitting code to *xarray*. In addition, because a lot of people use our library, it is important that we do not make sudden changes to the code that could have the potential to break a lot of user code as a result, that is, we need it to be as *backwards compatible* as possible to avoid mass breakages. Code Formatting ~~~~~~~~~~~~~~~ xarray uses several tools to ensure a consistent code format throughout the project: - `ruff `_ for formatting, code quality checks and standardized order in imports, and - `mypy `_ for static type checking on `type hints `_. We highly recommend that you setup `pre-commit hooks `_ to automatically run all the above tools every time you make a git commit. This can be done by running:: pre-commit install from the root of the xarray repository. You can skip the pre-commit checks with ``git commit --no-verify``. Backwards Compatibility ~~~~~~~~~~~~~~~~~~~~~~~ Please try to maintain backwards compatibility. *xarray* has a growing number of users with lots of existing code, so don't break it if at all possible. If you think breakage is required, clearly state why as part of the pull request. Be especially careful when changing function and method signatures, because any change may require a deprecation warning. For example, if your pull request means that the argument ``old_arg`` to ``func`` is no longer valid, instead of simply raising an error if a user passes ``old_arg``, we would instead catch it: .. code-block:: python def func(new_arg, old_arg=None): if old_arg is not None: from xarray.core.utils import emit_user_level_warning emit_user_level_warning( "`old_arg` has been deprecated, and in the future will raise an error." "Please use `new_arg` from now on.", DeprecationWarning, ) # Still do what the user intended here This temporary check would then be removed in a subsequent version of xarray. This process of first warning users before actually breaking their code is known as a "deprecation cycle", and makes changes significantly easier to handle both for users of xarray, and for developers of other libraries that depend on xarray. .. _contributing.ci: Testing With Continuous Integration ----------------------------------- The *xarray* test suite runs automatically via the `GitHub Actions `__, continuous integration service, once your pull request is submitted. A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing, then you will get a red 'X', where you can click through to see the individual failed tests. This is an example of a green build. .. image:: ../_static/ci.png .. note:: Each time you push to your PR branch, a new run of the tests will be triggered on the CI. If they haven't already finished, tests for any older commits on the same branch will be automatically cancelled. .. _contributing.tdd: Test-driven development/code writing ------------------------------------ *xarray* is serious about testing and strongly encourages contributors to embrace `test-driven development (TDD) `_. This development process "relies on the repetition of a very short development cycle: first the developer writes an (initially failing) automated test case that defines a desired improvement or new function, then produces the minimum amount of code to pass that test." So, before actually writing any code, you should write your tests. Often the test can be taken from the original GitHub issue. However, it is always worth considering additional use cases and writing corresponding tests. Adding tests is one of the most common requests after code is pushed to *xarray*. Therefore, it is worth getting in the habit of writing tests ahead of time so that this is never an issue. Like many packages, *xarray* uses `pytest `_ and the convenient extensions in `numpy.testing `_. Writing tests ~~~~~~~~~~~~~ All tests should go into the ``tests`` subdirectory of the specific package. This folder contains many current examples of tests, and we suggest looking to these for inspiration. The ``xarray.testing`` module has many special ``assert`` functions that make it easier to make statements about whether DataArray or Dataset objects are equivalent. The easiest way to verify that your code is correct is to explicitly construct the result you expect, then compare the actual result to the expected correct result:: def test_constructor_from_0d(): expected = Dataset({None: ([], 0)})[None] actual = DataArray(0) assert_identical(expected, actual) Transitioning to ``pytest`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ *xarray* existing test structure is *mostly* class-based, meaning that you will typically find tests wrapped in a class. .. code-block:: python class TestReallyCoolFeature: ... Going forward, we are moving to a more *functional* style using the `pytest `__ framework, which offers a richer testing framework that will facilitate testing and developing. Thus, instead of writing test classes, we will write test functions like this: .. code-block:: python def test_really_cool_feature(): ... Using ``pytest`` ~~~~~~~~~~~~~~~~ Here is an example of a self-contained set of tests that illustrate multiple features that we like to use. - functional style: tests are like ``test_*`` and *only* take arguments that are either fixtures or parameters - ``pytest.mark`` can be used to set metadata on test functions, e.g. ``skip`` or ``xfail``. - using ``parametrize``: allow testing of multiple cases - to set a mark on a parameter, ``pytest.param(..., marks=...)`` syntax should be used - ``fixture``, code for object construction, on a per-test basis - using bare ``assert`` for scalars and truth-testing - ``assert_equal`` and ``assert_identical`` from the ``xarray.testing`` module for xarray object comparisons. - the typical pattern of constructing an ``expected`` and comparing versus the ``result`` We would name this file ``test_cool_feature.py`` and put in an appropriate place in the ``xarray/tests/`` structure. .. code-block:: python import pytest import numpy as np import xarray as xr from xarray.testing import assert_equal @pytest.mark.parametrize("dtype", ["int8", "int16", "int32", "int64"]) def test_dtypes(dtype): assert str(np.dtype(dtype)) == dtype @pytest.mark.parametrize( "dtype", [ "float32", pytest.param("int16", marks=pytest.mark.skip), pytest.param( "int32", marks=pytest.mark.xfail(reason="to show how it works") ), ], ) def test_mark(dtype): assert str(np.dtype(dtype)) == "float32" @pytest.fixture def dataarray(): return xr.DataArray([1, 2, 3]) @pytest.fixture(params=["int8", "int16", "int32", "int64"]) def dtype(request): return request.param def test_series(dataarray, dtype): result = dataarray.astype(dtype) assert result.dtype == dtype expected = xr.DataArray(np.array([1, 2, 3], dtype=dtype)) assert_equal(result, expected) A test run of this yields .. code-block:: shell ((xarray) $ pytest test_cool_feature.py -v ================================= test session starts ================================== platform darwin -- Python 3.10.6, pytest-7.2.0, pluggy-1.0.0 -- cachedir: .pytest_cache plugins: hypothesis-6.56.3, cov-4.0.0 collected 11 items xarray/tests/test_cool_feature.py::test_dtypes[int8] PASSED [ 9%] xarray/tests/test_cool_feature.py::test_dtypes[int16] PASSED [ 18%] xarray/tests/test_cool_feature.py::test_dtypes[int32] PASSED [ 27%] xarray/tests/test_cool_feature.py::test_dtypes[int64] PASSED [ 36%] xarray/tests/test_cool_feature.py::test_mark[float32] PASSED [ 45%] xarray/tests/test_cool_feature.py::test_mark[int16] SKIPPED (unconditional skip) [ 54%] xarray/tests/test_cool_feature.py::test_mark[int32] XFAIL (to show how it works) [ 63%] xarray/tests/test_cool_feature.py::test_series[int8] PASSED [ 72%] xarray/tests/test_cool_feature.py::test_series[int16] PASSED [ 81%] xarray/tests/test_cool_feature.py::test_series[int32] PASSED [ 90%] xarray/tests/test_cool_feature.py::test_series[int64] PASSED [100%] ==================== 9 passed, 1 skipped, 1 xfailed in 1.83 seconds ==================== Tests that we have ``parametrized`` are now accessible via the test name, for example we could run these with ``-k int8`` to sub-select *only* those tests which match ``int8``. .. code-block:: shell ((xarray) bash-3.2$ pytest test_cool_feature.py -v -k int8 ================================== test session starts ================================== platform darwin -- Python 3.10.6, pytest-7.2.0, pluggy-1.0.0 -- cachedir: .pytest_cache plugins: hypothesis-6.56.3, cov-4.0.0 collected 11 items test_cool_feature.py::test_dtypes[int8] PASSED test_cool_feature.py::test_series[int8] PASSED Running the test suite ---------------------- The tests can then be run directly inside your Git clone (without having to install *xarray*) by typing:: pytest xarray The tests suite is exhaustive and takes a few minutes. Often it is worth running only a subset of tests first around your changes before running the entire suite. The easiest way to do this is with:: pytest xarray/path/to/test.py -k regex_matching_test_name Or with one of the following constructs:: pytest xarray/tests/[test-module].py pytest xarray/tests/[test-module].py::[TestClass] pytest xarray/tests/[test-module].py::[TestClass]::[test_method] Using `pytest-xdist `_, one can speed up local testing on multicore machines, by running pytest with the optional -n argument:: pytest xarray -n 4 This can significantly reduce the time it takes to locally run tests before submitting a pull request. For more, see the `pytest `_ documentation. Running the performance test suite ---------------------------------- Performance matters and it is worth considering whether your code has introduced performance regressions. *xarray* is starting to write a suite of benchmarking tests using `asv `__ to enable easy monitoring of the performance of critical *xarray* operations. These benchmarks are all found in the ``xarray/asv_bench`` directory. To use all features of asv, you will need either ``conda`` or ``virtualenv``. For more details please check the `asv installation webpage `_. To install asv:: python -m pip install asv If you need to run a benchmark, change your directory to ``asv_bench/`` and run:: asv continuous -f 1.1 upstream/main HEAD You can replace ``HEAD`` with the name of the branch you are working on, and report benchmarks that changed by more than 10%. The command uses ``conda`` by default for creating the benchmark environments. If you want to use virtualenv instead, write:: asv continuous -f 1.1 -E virtualenv upstream/main HEAD The ``-E virtualenv`` option should be added to all ``asv`` commands that run benchmarks. The default value is defined in ``asv.conf.json``. Running the full benchmark suite can take up to one hour and use up a few GBs of RAM. Usually it is sufficient to paste only a subset of the results into the pull request to show that the committed changes do not cause unexpected performance regressions. You can run specific benchmarks using the ``-b`` flag, which takes a regular expression. For example, this will only run tests from a ``xarray/asv_bench/benchmarks/groupby.py`` file:: asv continuous -f 1.1 upstream/main HEAD -b ^groupby If you want to only run a specific group of tests from a file, you can do it using ``.`` as a separator. For example:: asv continuous -f 1.1 upstream/main HEAD -b groupby.GroupByMethods will only run the ``GroupByMethods`` benchmark defined in ``groupby.py``. You can also run the benchmark suite using the version of *xarray* already installed in your current Python environment. This can be useful if you do not have ``virtualenv`` or ``conda``, or are using the ``setup.py develop`` approach discussed above; for the in-place build you need to set ``PYTHONPATH``, e.g. ``PYTHONPATH="$PWD/.." asv [remaining arguments]``. You can run benchmarks using an existing Python environment by:: asv run -e -E existing or, to use a specific Python interpreter,:: asv run -e -E existing:python3.10 This will display stderr from the benchmarks, and use your local ``python`` that comes from your ``$PATH``. Learn `how to write a benchmark and how to use asv from the documentation `_ . .. TODO: uncomment once we have a working setup see https://github.com/pydata/xarray/pull/5066 The *xarray* benchmarking suite is run remotely and the results are available `here `_. Documenting your code --------------------- Changes should be reflected in the release notes located in ``doc/whats-new.rst``. This file contains an ongoing change log for each release. Add an entry to this file to document your fix, enhancement or (unavoidable) breaking change. Make sure to include the GitHub issue number when adding your entry (using ``:issue:`1234```, where ``1234`` is the issue/pull request number). If your code is an enhancement, it is most likely necessary to add usage examples to the existing documentation. This can be done by following the :ref:`guidelines for contributing to the documentation `. .. _contributing.changes: Contributing your changes to *xarray* ===================================== .. _contributing.committing: Committing your code -------------------- Keep style fixes to a separate commit to make your pull request more readable. Once you've made changes, you can see them by typing:: git status If you have created a new file, it is not being tracked by git. Add it by typing:: git add path/to/file-to-be-added.py Doing 'git status' again should give something like:: # On branch shiny-new-feature # # modified: /relative/path/to/file-you-added.py # The following defines how a commit message should ideally be structured: * A subject line with ``< 72`` chars. * One blank line. * Optionally, a commit message body. Please reference the relevant GitHub issues in your commit message using ``GH1234`` or ``#1234``. Either style is fine, but the former is generally preferred. Now you can commit your changes in your local repository:: git commit -m .. _contributing.pushing: Pushing your changes -------------------- When you want your changes to appear publicly on your GitHub page, push your forked feature branch's commits:: git push origin shiny-new-feature Here ``origin`` is the default name given to your remote repository on GitHub. You can see the remote repositories:: git remote -v If you added the upstream repository as described above you will see something like:: origin git@github.com:yourname/xarray.git (fetch) origin git@github.com:yourname/xarray.git (push) upstream git://github.com/pydata/xarray.git (fetch) upstream git://github.com/pydata/xarray.git (push) Now your code is on GitHub, but it is not yet a part of the *xarray* project. For that to happen, a pull request needs to be submitted on GitHub. .. _contributing.review: Review your code ---------------- When you're ready to ask for a code review, file a pull request. Before you do, once again make sure that you have followed all the guidelines outlined in this document regarding code style, tests, performance tests, and documentation. You should also double check your branch changes against the branch it was based on: #. Navigate to your repository on GitHub -- https://github.com/your-user-name/xarray #. Click on ``Branches`` #. Click on the ``Compare`` button for your feature branch #. Select the ``base`` and ``compare`` branches, if necessary. This will be ``main`` and ``shiny-new-feature``, respectively. .. _contributing.pr: Finally, make the pull request ------------------------------ If everything looks good, you are ready to make a pull request. A pull request is how code from a local repository becomes available to the GitHub community and can be looked at and eventually merged into the ``main`` version. This pull request and its associated changes will eventually be committed to the ``main`` branch and available in the next release. To submit a pull request: #. Navigate to your repository on GitHub #. Click on the ``Pull Request`` button #. You can then click on ``Commits`` and ``Files Changed`` to make sure everything looks okay one last time #. Write a description of your changes in the ``Preview Discussion`` tab #. Click ``Send Pull Request``. This request then goes to the repository maintainers, and they will review the code. If you have made updates to the documentation, you can now see a preview of the updated docs by clicking on "Details" under the ``docs/readthedocs.org`` check near the bottom of the list of checks that run automatically when submitting a PR, then clicking on the "View Docs" button on the right (not the big green button, the small black one further down). .. image:: ../_static/view-docs.png If you need to make more changes, you can make them in your branch, add them to a new commit, push them to GitHub, and the pull request will automatically be updated. Pushing them to GitHub again is done by:: git push origin shiny-new-feature This will automatically update your pull request with the latest code and restart the :ref:`Continuous Integration ` tests. .. _contributing.delete: Delete your merged branch (optional) ------------------------------------ Once your feature branch is accepted into upstream, you'll probably want to get rid of the branch. First, update your ``main`` branch to check that the merge was successful:: git fetch upstream git checkout main git merge upstream/main Then you can do:: git branch -D shiny-new-feature You need to use an upper-case ``-D`` because the branch was squashed into a single commit before merging. Be careful with this because ``git`` won't warn you if you accidentally delete an unmerged branch. If you didn't delete your branch using GitHub's interface, then it will still exist on GitHub. To delete it there do:: git push origin --delete shiny-new-feature .. _contributing.checklist: PR checklist ------------ - **Properly comment and document your code.** See `"Documenting your code" `_. - **Test that the documentation builds correctly** by typing ``make html`` in the ``doc`` directory. This is not strictly necessary, but this may be easier than waiting for CI to catch a mistake. See `"Contributing to the documentation" `_. - **Test your code**. - Write new tests if needed. See `"Test-driven development/code writing" `_. - Test the code using `Pytest `_. Running all tests (type ``pytest`` in the root directory) takes a while, so feel free to only run the tests you think are needed based on your PR (example: ``pytest xarray/tests/test_dataarray.py``). CI will catch any failing tests. - By default, the upstream dev CI is disabled on pull request and push events. You can override this behavior per commit by adding a ``[test-upstream]`` tag to the first line of the commit message. For documentation-only commits, you can skip the CI per commit by adding a ``[skip-ci]`` tag to the first line of the commit message. - **Properly format your code** and verify that it passes the formatting guidelines set by `ruff `_. See `"Code formatting" `_. You can use `pre-commit `_ to run these automatically on each commit. - Run ``pre-commit run --all-files`` in the root directory. This may modify some files. Confirm and commit any formatting changes. - **Push your code** and `create a PR on GitHub `_. - **Use a helpful title for your pull request** by summarizing the main contributions rather than using the latest commit message. If the PR addresses an `issue `_, please `reference it `_. xarray-2025.12.0/doc/contribute/developers-meeting.rst000066400000000000000000000024041511464676000226160ustar00rootroot00000000000000.. _developers-meeting: Developers meeting ------------------ Xarray developers meet bi-weekly every other Wednesday. The meeting occurs on `Zoom `__. Find the `notes for the meeting here `__. There is a :issue:`GitHub issue for changes to the meeting<4001>`. You can subscribe to this calendar to be notified of changes: * `Google Calendar `__ * `iCal `__ .. raw:: html xarray-2025.12.0/doc/contribute/index.rst000066400000000000000000000014271511464676000201330ustar00rootroot00000000000000######################## Xarray Developer's Guide ######################## We welcome your skills and enthusiasm at the Xarray project! There are numerous opportunities to contribute beyond just writing code. All contributions, including bug reports, bug fixes, documentation improvements, enhancement suggestions, and other ideas are welcome. Please review our Contributor's guide for more guidance. In this section you will also find documentation on the internal organization of Xarray's source code, the roadmap for current development priorities, as well as how to engage with core maintainers of the Xarray codebase. .. toctree:: :maxdepth: 2 :hidden: contributing ../internals/index ../roadmap ../whats-new developers-meeting Team xarray-2025.12.0/doc/examples/000077500000000000000000000000001511464676000157265ustar00rootroot00000000000000xarray-2025.12.0/doc/examples/ERA5-GRIB-example.ipynb000066400000000000000000000054431511464676000216450ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# GRIB Data Example " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "GRIB format is commonly used to disseminate atmospheric model data. With xarray and the cfgrib engine, GRIB data can easily be analyzed and visualized." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import xarray as xr\n", "import matplotlib.pyplot as plt\n", "%matplotlib inline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "To read GRIB data, you can use `xarray.load_dataset`. The only extra code you need is to specify the engine as `cfgrib`." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds = xr.tutorial.load_dataset(\"era5-2mt-2019-03-uk.grib\", engine=\"cfgrib\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Let's create a simple plot of 2-m air temperature in degrees Celsius:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds = ds - 273.15\n", "ds.t2m[0].plot(cmap=plt.cm.coolwarm)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "With CartoPy, we can create a more detailed plot, using built-in shapefiles to help provide geographic context:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import cartopy.crs as ccrs\n", "import cartopy\n", "\n", "fig = plt.figure(figsize=(10, 10))\n", "ax = plt.axes(projection=ccrs.Robinson())\n", "ax.coastlines(resolution=\"10m\")\n", "plot = ds.t2m[0].plot(\n", " cmap=plt.cm.coolwarm, transform=ccrs.PlateCarree(), cbar_kwargs={\"shrink\": 0.6}\n", ")\n", "plt.title(\"ERA5 - 2m temperature British Isles March 2019\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Finally, we can also pull out a time series for a given location easily:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds.t2m.sel(longitude=0, latitude=51.5).plot()\n", "plt.title(\"ERA5 - London 2m temperature March 2019\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.12.0/doc/examples/ROMS_ocean_model.ipynb000066400000000000000000000161531511464676000221040ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# ROMS Ocean Model Example" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The Regional Ocean Modeling System ([ROMS](https://www.myroms.org/)) is an open source hydrodynamic model that is used for simulating currents and water properties in coastal and estuarine regions. ROMS is one of a few standard ocean models, and it has an active user community.\n", "\n", "ROMS uses a regular C-Grid in the horizontal, similar to other structured grid ocean and atmospheric models, and a stretched vertical coordinate (see [the ROMS documentation](https://www.myroms.org/wiki/Vertical_S-coordinate) for more details). Both of these require special treatment when using `xarray` to analyze ROMS ocean model output. This example notebook shows how to create a lazily evaluated vertical coordinate, and make some basic plots. The `xgcm` package is required to do analysis that is aware of the horizontal C-Grid." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import cartopy.crs as ccrs\n", "import cartopy.feature as cfeature\n", "import matplotlib.pyplot as plt\n", "\n", "%matplotlib inline\n", "\n", "import xarray as xr" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Load a sample ROMS file. This is a subset of a full model available at \n", "\n", " http://barataria.tamu.edu/thredds/catalog.html?dataset=txla_hindcast_agg\n", " \n", "The subsetting was done using the following command on one of the output files:\n", "\n", " #open dataset\n", " ds = xr.open_dataset('/d2/shared/TXLA_ROMS/output_20yr_obc/2001/ocean_his_0015.nc')\n", " \n", " # Turn on chunking to activate dask and parallelize read/write.\n", " ds = ds.chunk({'ocean_time': 1})\n", " \n", " # Pick out some of the variables that will be included as coordinates\n", " ds = ds.set_coords(['Cs_r', 'Cs_w', 'hc', 'h', 'Vtransform'])\n", " \n", " # Select a subset of variables. Salt will be visualized, zeta is used to \n", " # calculate the vertical coordinate\n", " variables = ['salt', 'zeta']\n", " ds[variables].isel(ocean_time=slice(47, None, 7*24), \n", " xi_rho=slice(300, None)).to_netcdf('ROMS_example.nc', mode='w')\n", "\n", "So, the `ROMS_example.nc` file contains a subset of the grid, one 3D variable, and two time steps." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Load in ROMS dataset as an xarray object" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# load in the file\n", "ds = xr.tutorial.open_dataset(\"ROMS_example.nc\", chunks={\"ocean_time\": 1})\n", "\n", "# This is a way to turn on chunking and lazy evaluation. Opening with mfdataset, or\n", "# setting the chunking in the open_dataset would also achieve this.\n", "ds" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Add a lazilly calculated vertical coordinates\n", "\n", "Write equations to calculate the vertical coordinate. These will be only evaluated when data is requested. Information about the ROMS vertical coordinate can be found [here](https://www.myroms.org/wiki/Vertical_S-coordinate).\n", "\n", "In short, for `Vtransform==2` as used in this example, \n", "\n", "$Z_0 = (h_c \\, S + h \\,C) / (h_c + h)$\n", "\n", "$z = Z_0 (\\zeta + h) + \\zeta$\n", "\n", "where the variables are defined as in the link above." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if ds.Vtransform == 1:\n", " Zo_rho = ds.hc * (ds.s_rho - ds.Cs_r) + ds.Cs_r * ds.h\n", " z_rho = Zo_rho + ds.zeta * (1 + Zo_rho / ds.h)\n", "elif ds.Vtransform == 2:\n", " Zo_rho = (ds.hc * ds.s_rho + ds.Cs_r * ds.h) / (ds.hc + ds.h)\n", " z_rho = ds.zeta + (ds.zeta + ds.h) * Zo_rho\n", "\n", "ds.coords[\"z_rho\"] = z_rho.transpose() # needing transpose seems to be an xarray bug\n", "ds.salt" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### A naive vertical slice\n", "\n", "Creating a slice using the s-coordinate as the vertical dimension is typically not very informative." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "ds.salt.isel(xi_rho=50, ocean_time=0).plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can feed coordinate information to the plot method to give a more informative cross-section that uses the depths. Note that we did not need to slice the depth or longitude information separately, this was done automatically as the variable was sliced." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "section = ds.salt.isel(xi_rho=50, eta_rho=slice(0, 167), ocean_time=0)\n", "section.plot(x=\"lon_rho\", y=\"z_rho\", figsize=(15, 6), clim=(25, 35))\n", "plt.ylim([-100, 1]);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### A plan view\n", "\n", "Now make a naive plan view, without any projection information, just using lon/lat as x/y. This looks OK, but will appear compressed because lon and lat do not have an aspect constrained by the projection." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds.salt.isel(s_rho=-1, ocean_time=0).plot(x=\"lon_rho\", y=\"lat_rho\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And let's use a projection to make it nicer, and add a coast." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "proj = ccrs.LambertConformal(central_longitude=-92, central_latitude=29)\n", "fig = plt.figure(figsize=(15, 5))\n", "ax = plt.axes(projection=proj)\n", "ds.salt.isel(s_rho=-1, ocean_time=0).plot(\n", " x=\"lon_rho\", y=\"lat_rho\", transform=ccrs.PlateCarree()\n", ")\n", "\n", "coast_10m = cfeature.NaturalEarthFeature(\n", " \"physical\", \"land\", \"10m\", edgecolor=\"k\", facecolor=\"0.8\"\n", ")\n", "ax.add_feature(coast_10m)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" } }, "nbformat": 4, "nbformat_minor": 2 } xarray-2025.12.0/doc/examples/_code/000077500000000000000000000000001511464676000167775ustar00rootroot00000000000000xarray-2025.12.0/doc/examples/_code/accessor_example.py000066400000000000000000000012771511464676000226750ustar00rootroot00000000000000import xarray as xr @xr.register_dataset_accessor("geo") class GeoAccessor: def __init__(self, xarray_obj): self._obj = xarray_obj self._center = None @property def center(self): """Return the geographic center point of this dataset.""" if self._center is None: # we can use a cache on our accessor objects, because accessors # themselves are cached on instances that access them. lon = self._obj.latitude lat = self._obj.longitude self._center = (float(lon.mean()), float(lat.mean())) return self._center def plot(self): """Plot data on a map.""" return "plotting!" xarray-2025.12.0/doc/examples/apply_ufunc_vectorize_1d.ipynb000066400000000000000000000671061511464676000240060ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "## Applying unvectorized functions with `apply_ufunc`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This example will illustrate how to conveniently apply an unvectorized function `func` to xarray objects using `apply_ufunc`. `func` expects 1D numpy arrays and returns a 1D numpy array. Our goal is to conveniently apply this function along a dimension of xarray objects that may or may not wrap dask arrays with a signature.\n", "\n", "We will illustrate this using `np.interp`: \n", "\n", " Signature: np.interp(x, xp, fp, left=None, right=None, period=None)\n", " Docstring:\n", " One-dimensional linear interpolation.\n", "\n", " Returns the one-dimensional piecewise linear interpolant to a function\n", " with given discrete data points (`xp`, `fp`), evaluated at `x`.\n", "\n", "and write an `xr_interp` function with signature\n", "\n", " xr_interp(xarray_object, dimension_name, new_coordinate_to_interpolate_to)\n", "\n", "### Load data\n", "\n", "First let's load an example dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:45:51.659160Z", "start_time": "2020-01-15T14:45:50.528742Z" } }, "outputs": [], "source": [ "import xarray as xr\n", "import numpy as np\n", "\n", "xr.set_options(display_style=\"html\") # fancy HTML repr\n", "\n", "air = (\n", " xr.tutorial.load_dataset(\"air_temperature\")\n", " .air.sortby(\"lat\") # np.interp needs coordinate in ascending order\n", " .isel(time=slice(4), lon=slice(3))\n", ") # choose a small subset for convenience\n", "air" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The function we will apply is `np.interp` which expects 1D numpy arrays. This functionality is already implemented in xarray so we use that capability to make sure we are not making mistakes." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:45:55.431708Z", "start_time": "2020-01-15T14:45:55.104701Z" } }, "outputs": [], "source": [ "newlat = np.linspace(15, 75, 100)\n", "air.interp(lat=newlat)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Let's define a function that works with one vector of data along `lat` at a time." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:45:57.889496Z", "start_time": "2020-01-15T14:45:57.792269Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = interp1d_np(air.isel(time=0, lon=0), air.lat, newlat)\n", "expected = air.interp(lat=newlat)\n", "\n", "# no errors are raised if values are equal to within floating point precision\n", "np.testing.assert_allclose(expected.isel(time=0, lon=0).values, interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### No errors are raised so our interpolation is working.\n", "\n", "This function consumes and returns numpy arrays, which means we need to do a lot of work to convert the result back to an xarray object with meaningful metadata. This is where `apply_ufunc` is very useful.\n", "\n", "### `apply_ufunc`\n", "\n", " Apply a vectorized function for unlabeled arrays on xarray objects.\n", "\n", " The function will be mapped over the data variable(s) of the input arguments using \n", " xarrayโ€™s standard rules for labeled computation, including alignment, broadcasting, \n", " looping over GroupBy/Dataset variables, and merging of coordinates.\n", " \n", "`apply_ufunc` has many capabilities but for simplicity this example will focus on the common task of vectorizing 1D functions over nD xarray objects. We will iteratively build up the right set of arguments to `apply_ufunc` and read through many error messages in doing so." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:45:59.768626Z", "start_time": "2020-01-15T14:45:59.543808Z" } }, "outputs": [], "source": [ "xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "`apply_ufunc` needs to know a lot of information about what our function does so that it can reconstruct the outputs. In this case, the size of dimension lat has changed and we need to explicitly specify that this will happen. xarray helpfully tells us that we need to specify the kwarg `exclude_dims`." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### `exclude_dims`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "\n", "```\n", "exclude_dims : set, optional\n", " Core dimensions on the inputs to exclude from alignment and\n", " broadcasting entirely. Any input coordinates along these dimensions\n", " will be dropped. Each excluded dimension must also appear in\n", " ``input_core_dims`` for at least one argument. Only dimensions listed\n", " here are allowed to change size between input and output objects.\n", "```" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:02.187012Z", "start_time": "2020-01-15T14:46:02.105563Z" } }, "outputs": [], "source": [ "xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Core dimensions\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Core dimensions are central to using `apply_ufunc`. In our case, our function expects to receive a 1D vector along `lat` — this is the dimension that is \"core\" to the function's functionality. Multiple core dimensions are possible. `apply_ufunc` needs to know which dimensions of each variable are core dimensions.\n", "\n", " input_core_dims : Sequence[Sequence], optional\n", " List of the same length as ``args`` giving the list of core dimensions\n", " on each input argument that should not be broadcast. By default, we\n", " assume there are no core dimensions on any input arguments.\n", "\n", " For example, ``input_core_dims=[[], ['time']]`` indicates that all\n", " dimensions on the first argument and all dimensions other than 'time'\n", " on the second argument should be broadcast.\n", "\n", " Core dimensions are automatically moved to the last axes of input\n", " variables before applying ``func``, which facilitates using NumPy style\n", " generalized ufuncs [2]_.\n", " \n", " output_core_dims : List[tuple], optional\n", " List of the same length as the number of output arguments from\n", " ``func``, giving the list of core dimensions on each output that were\n", " not broadcast on the inputs. By default, we assume that ``func``\n", " outputs exactly one array, with axes corresponding to each broadcast\n", " dimension.\n", "\n", " Core dimensions are assumed to appear as the last dimensions of each\n", " output in the provided order.\n", " \n", "Next we specify `\"lat\"` as `input_core_dims` on both `air` and `air.lat`" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:05.031672Z", "start_time": "2020-01-15T14:46:04.947588Z" } }, "outputs": [], "source": [ "xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " input_core_dims=[[\"lat\"], [\"lat\"], []],\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "xarray is telling us that it expected to receive back a numpy array with 0 dimensions but instead received an array with 1 dimension corresponding to `newlat`. We can fix this by specifying `output_core_dims`" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:09.325218Z", "start_time": "2020-01-15T14:46:09.303020Z" } }, "outputs": [], "source": [ "xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " input_core_dims=[[\"lat\"], [\"lat\"], []], # list with one entry per arg\n", " output_core_dims=[[\"lat\"]],\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Finally we get some output! Let's check that this is right\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:11.295440Z", "start_time": "2020-01-15T14:46:11.226553Z" } }, "outputs": [], "source": [ "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " input_core_dims=[[\"lat\"], [\"lat\"], []], # list with one entry per arg\n", " output_core_dims=[[\"lat\"]],\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected.isel(time=0, lon=0), interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "No errors are raised so it is right!" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Vectorization with `np.vectorize`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now our function currently only works on one vector of data which is not so useful given our 3D dataset.\n", "Let's try passing the whole dataset. We add a `print` statement so we can see what our function receives." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:13.808646Z", "start_time": "2020-01-15T14:46:13.680098Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " print(f\"data: {data.shape} | x: {x.shape} | xi: {xi.shape}\")\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(\n", " lon=slice(3), time=slice(4)\n", " ), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " input_core_dims=[[\"lat\"], [\"lat\"], []], # list with one entry per arg\n", " output_core_dims=[[\"lat\"]],\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected.isel(time=0, lon=0), interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "That's a hard-to-interpret error but our `print` call helpfully printed the shapes of the input data: \n", "\n", " data: (10, 53, 25) | x: (25,) | xi: (100,)\n", "\n", "We see that `air` has been passed as a 3D numpy array which is not what `np.interp` expects. Instead we want loop over all combinations of `lon` and `time`; and apply our function to each corresponding vector of data along `lat`.\n", "`apply_ufunc` makes this easy by specifying `vectorize=True`:\n", "\n", " vectorize : bool, optional\n", " If True, then assume ``func`` only takes arrays defined over core\n", " dimensions as input and vectorize it automatically with\n", " :py:func:`numpy.vectorize`. This option exists for convenience, but is\n", " almost always slower than supplying a pre-vectorized function.\n", " Using this option requires NumPy version 1.12 or newer.\n", " \n", "Also see the documentation for `np.vectorize`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.vectorize.html. Most importantly\n", "\n", " The vectorize function is provided primarily for convenience, not for performance. \n", " The implementation is essentially a for loop." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:26.633233Z", "start_time": "2020-01-15T14:46:26.515209Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " print(f\"data: {data.shape} | x: {x.shape} | xi: {xi.shape}\")\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air, # now arguments in the order expected by 'interp1_np'\n", " air.lat, # as above\n", " newlat, # as above\n", " input_core_dims=[[\"lat\"], [\"lat\"], []], # list with one entry per arg\n", " output_core_dims=[[\"lat\"]], # returned data has one dimension\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", " vectorize=True, # loop over non-core dims\n", ")\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected, interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This unfortunately is another cryptic error from numpy. \n", "\n", "Notice that `newlat` is not an xarray object. Let's add a dimension name `new_lat` and modify the call. Note this cannot be `lat` because xarray expects dimensions to be the same size (or broadcastable) among all inputs. `output_core_dims` needs to be modified appropriately. We'll manually rename `new_lat` back to `lat` for easy checking." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:30.026663Z", "start_time": "2020-01-15T14:46:29.893267Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " print(f\"data: {data.shape} | x: {x.shape} | xi: {xi.shape}\")\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air, # now arguments in the order expected by 'interp1_np'\n", " air.lat, # as above\n", " newlat, # as above\n", " input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n", " output_core_dims=[[\"new_lat\"]], # returned data has one dimension\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be a set!\n", " vectorize=True, # loop over non-core dims\n", ")\n", "interped = interped.rename({\"new_lat\": \"lat\"})\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(\n", " expected.transpose(*interped.dims), interped # order of dims is different\n", ")\n", "interped" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Notice that the printed input shapes are all 1D and correspond to one vector along the `lat` dimension.\n", "\n", "The result is now an xarray object with coordinate values copied over from `data`. This is why `apply_ufunc` is so convenient; it takes care of a lot of boilerplate necessary to apply functions that consume and produce numpy arrays to xarray objects.\n", "\n", "One final point: `lat` is now the *last* dimension in `interped`. This is a \"property\" of core dimensions: they are moved to the end before being sent to `interp1d_np` as was noted in the docstring for `input_core_dims`\n", "\n", " Core dimensions are automatically moved to the last axes of input\n", " variables before applying ``func``, which facilitates using NumPy style\n", " generalized ufuncs [2]_." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Parallelization with dask\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So far our function can only handle numpy arrays. A real benefit of `apply_ufunc` is the ability to easily parallelize over dask chunks _when needed_. \n", "\n", "We want to apply this function in a vectorized fashion over each chunk of the dask array. This is possible using dask's `blockwise`, `map_blocks`, or `apply_gufunc`. Xarray's `apply_ufunc` wraps dask's `apply_gufunc` and asking it to map the function over chunks using `apply_gufunc` is as simple as specifying `dask=\"parallelized\"`. With this level of flexibility we need to provide dask with some extra information: \n", " 1. `output_dtypes`: dtypes of all returned objects, and \n", " 2. `output_sizes`: lengths of any new dimensions. \n", " \n", "Here we need to specify `output_dtypes` since `apply_ufunc` can infer the size of the new dimension `new_lat` from the argument corresponding to the third element in `input_core_dims`. Here I choose the chunk sizes to illustrate that `np.vectorize` is still applied so that our function receives 1D vectors even though the blocks are 3D." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:48:42.469341Z", "start_time": "2020-01-15T14:48:42.344209Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " print(f\"data: {data.shape} | x: {x.shape} | xi: {xi.shape}\")\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.chunk(\n", " {\"time\": 2, \"lon\": 2}\n", " ), # now arguments in the order expected by 'interp1_np'\n", " air.lat, # as above\n", " newlat, # as above\n", " input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n", " output_core_dims=[[\"new_lat\"]], # returned data has one dimension\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be a set!\n", " vectorize=True, # loop over non-core dims\n", " dask=\"parallelized\",\n", " output_dtypes=[air.dtype], # one per output\n", ").rename({\"new_lat\": \"lat\"})\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected.transpose(*interped.dims), interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Yay! our function is receiving 1D vectors, so we've successfully parallelized applying a 1D function over a block. If you have a distributed dashboard up, you should see computes happening as equality is checked.\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### High performance vectorization: gufuncs, numba & guvectorize\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "`np.vectorize` is a very convenient function but is unfortunately slow. It is only marginally faster than writing a for loop in Python and looping. A common way to get around this is to write a base interpolation function that can handle nD arrays in a compiled language like Fortran and then pass that to `apply_ufunc`.\n", "\n", "Another option is to use the numba package which provides a very convenient `guvectorize` decorator: https://numba.pydata.org/numba-doc/latest/user/vectorize.html#the-guvectorize-decorator\n", "\n", "Any decorated function gets compiled and will loop over any non-core dimension in parallel when necessary. We need to specify some extra information:\n", "\n", " 1. Our function cannot return a variable any more. Instead it must receive a variable (the last argument) whose contents the function will modify. So we change from `def interp1d_np(data, x, xi)` to `def interp1d_np_gufunc(data, x, xi, out)`. Our computed results must be assigned to `out`. All values of `out` must be assigned explicitly.\n", " \n", " 2. `guvectorize` needs to know the dtypes of the input and output. This is specified in string form as the first argument. Each element of the tuple corresponds to each argument of the function. In this case, we specify `float64` for all inputs and outputs: `\"(float64[:], float64[:], float64[:], float64[:])\"` corresponding to `data, x, xi, out`\n", " \n", " 3. Now we need to tell numba the size of the dimensions the function takes as inputs and returns as output i.e. core dimensions. This is done in symbolic form i.e. `data` and `x` are vectors of the same length, say `n`; `xi` and the output `out` have a different length, say `m`. So the second argument is (again as a string)\n", " `\"(n), (n), (m) -> (m).\"` corresponding again to `data, x, xi, out`\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:48:45.267633Z", "start_time": "2020-01-15T14:48:44.943939Z" } }, "outputs": [], "source": [ "from numba import float64, guvectorize\n", "\n", "\n", "@guvectorize(\"(float64[:], float64[:], float64[:], float64[:])\", \"(n), (n), (m) -> (m)\")\n", "def interp1d_np_gufunc(data, x, xi, out):\n", " # numba doesn't really like this.\n", " # seem to support fstrings so do it the old way\n", " print(\n", " \"data: \" + str(data.shape) + \" | x:\" + str(x.shape) + \" | xi: \" + str(xi.shape)\n", " )\n", " out[:] = np.interp(xi, x, data)\n", " # gufuncs don't return data\n", " # instead you assign to a the last arg\n", " # return np.interp(xi, x, data)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The warnings are about object-mode compilation relating to the `print` statement. This means we don't get much speed up: https://numba.pydata.org/numba-doc/latest/user/performance-tips.html#no-python-mode-vs-object-mode. We'll keep the `print` statement temporarily to make sure that `guvectorize` acts like we want it to." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:48:54.755405Z", "start_time": "2020-01-15T14:48:54.634724Z" } }, "outputs": [], "source": [ "interped = xr.apply_ufunc(\n", " interp1d_np_gufunc, # first the function\n", " air.chunk(\n", " {\"time\": 2, \"lon\": 2}\n", " ), # now arguments in the order expected by 'interp1_np'\n", " air.lat, # as above\n", " newlat, # as above\n", " input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n", " output_core_dims=[[\"new_lat\"]], # returned data has one dimension\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be a set!\n", " # vectorize=True, # not needed since numba takes care of vectorizing\n", " dask=\"parallelized\",\n", " output_dtypes=[air.dtype], # one per output\n", ").rename({\"new_lat\": \"lat\"})\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected.transpose(*interped.dims), interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Yay! Our function is receiving 1D vectors and is working automatically with dask arrays. Finally let's comment out the print line and wrap everything up in a nice reusable function" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:49:28.667528Z", "start_time": "2020-01-15T14:49:28.103914Z" } }, "outputs": [], "source": [ "from numba import float64, guvectorize\n", "\n", "\n", "@guvectorize(\n", " \"(float64[:], float64[:], float64[:], float64[:])\",\n", " \"(n), (n), (m) -> (m)\",\n", " nopython=True,\n", ")\n", "def interp1d_np_gufunc(data, x, xi, out):\n", " out[:] = np.interp(xi, x, data)\n", "\n", "\n", "def xr_interp(data, dim, newdim):\n", " interped = xr.apply_ufunc(\n", " interp1d_np_gufunc, # first the function\n", " data, # now arguments in the order expected by 'interp1_np'\n", " data[dim], # as above\n", " newdim, # as above\n", " input_core_dims=[[dim], [dim], [\"__newdim__\"]], # list with one entry per arg\n", " output_core_dims=[[\"__newdim__\"]], # returned data has one dimension\n", " exclude_dims=set((dim,)), # dimensions allowed to change size. Must be a set!\n", " # vectorize=True, # not needed since numba takes care of vectorizing\n", " dask=\"parallelized\",\n", " output_dtypes=[\n", " data.dtype\n", " ], # one per output; could also be float or np.dtype(\"float64\")\n", " ).rename({\"__newdim__\": dim})\n", " interped[dim] = newdim # need to add this manually\n", "\n", " return interped\n", "\n", "\n", "xr.testing.assert_allclose(\n", " expected.transpose(*interped.dims),\n", " xr_interp(air.chunk({\"time\": 2, \"lon\": 2}), \"lat\", newlat),\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This technique is generalizable to any 1D function." ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" }, "nbsphinx": { "allow_errors": true }, "org": null, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": false, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": {}, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.12.0/doc/examples/area_weighted_temperature.ipynb000066400000000000000000000140531511464676000242010ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": { "toc": true }, "source": [ "

Table of Contents

\n", "" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Compare weighted and unweighted mean temperature\n", "\n", "\n", "Author: [Mathias Hauser](https://github.com/mathause/)\n", "\n", "\n", "We use the `air_temperature` example dataset to calculate the area-weighted temperature over its domain. This dataset has a regular latitude/ longitude grid, thus the grid cell area decreases towards the pole. For this grid we can use the cosine of the latitude as proxy for the grid cell area.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:43:57.222351Z", "start_time": "2020-03-17T14:43:56.147541Z" } }, "outputs": [], "source": [ "%matplotlib inline\n", "\n", "import cartopy.crs as ccrs\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", "import xarray as xr" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Data\n", "\n", "Load the data, convert to celsius, and resample to daily values" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:43:57.831734Z", "start_time": "2020-03-17T14:43:57.651845Z" } }, "outputs": [], "source": [ "ds = xr.tutorial.load_dataset(\"air_temperature\")\n", "\n", "# to celsius\n", "air = ds.air - 273.15\n", "\n", "# resample from 6-hourly to daily values\n", "air = air.resample(time=\"D\").mean()\n", "\n", "air" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Plot the first timestep:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:43:59.887120Z", "start_time": "2020-03-17T14:43:59.582894Z" } }, "outputs": [], "source": [ "projection = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)\n", "\n", "f, ax = plt.subplots(subplot_kw=dict(projection=projection))\n", "\n", "air.isel(time=0).plot(transform=ccrs.PlateCarree(), cbar_kwargs=dict(shrink=0.7))\n", "ax.coastlines()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Creating weights\n", "\n", "For a rectangular grid the cosine of the latitude is proportional to the grid cell area." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:44:18.777092Z", "start_time": "2020-03-17T14:44:18.736587Z" } }, "outputs": [], "source": [ "weights = np.cos(np.deg2rad(air.lat))\n", "weights.name = \"weights\"\n", "weights" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Weighted mean" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:44:52.607120Z", "start_time": "2020-03-17T14:44:52.564674Z" } }, "outputs": [], "source": [ "air_weighted = air.weighted(weights)\n", "air_weighted" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:44:54.334279Z", "start_time": "2020-03-17T14:44:54.280022Z" } }, "outputs": [], "source": [ "weighted_mean = air_weighted.mean((\"lon\", \"lat\"))\n", "weighted_mean" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Plot: comparison with unweighted mean\n", "\n", "Note how the weighted mean temperature is higher than the unweighted." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:45:08.877307Z", "start_time": "2020-03-17T14:45:08.673383Z" } }, "outputs": [], "source": [ "weighted_mean.plot(label=\"weighted\")\n", "air.mean((\"lon\", \"lat\")).plot(label=\"unweighted\")\n", "\n", "plt.legend()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.6" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": true, "toc_position": {}, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.12.0/doc/examples/blank_template.ipynb000066400000000000000000000021231511464676000217510ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "id": "d8f54f6a", "metadata": {}, "source": [ "# Blank template\n", "\n", "Use this notebook from Binder to test an issue or reproduce a bug report" ] }, { "cell_type": "code", "execution_count": null, "id": "41b90ede", "metadata": {}, "outputs": [], "source": [ "import xarray as xr\n", "import numpy as np\n", "import pandas as pd\n", "\n", "ds = xr.tutorial.load_dataset(\"air_temperature\")\n", "da = ds[\"air\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "effd9aeb", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" } }, "nbformat": 4, "nbformat_minor": 5 } xarray-2025.12.0/doc/examples/monthly-means.ipynb000066400000000000000000000165261511464676000215760ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "Calculating Seasonal Averages from Time Series of Monthly Means \n", "=====\n", "\n", "Author: [Joe Hamman](https://github.com/jhamman/)\n", "\n", "The data used for this example can be found in the [xarray-data](https://github.com/pydata/xarray-data) repository. You may need to change the path to `rasm.nc` below.\n", "\n", "Suppose we have a netCDF or `xarray.Dataset` of monthly mean data and we want to calculate the seasonal average. To do this properly, we need to calculate the weighted average considering that each month has a different number of days." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:35.958210Z", "start_time": "2018-11-28T20:51:35.936966Z" } }, "outputs": [], "source": [ "%matplotlib inline\n", "import numpy as np\n", "import pandas as pd\n", "import xarray as xr\n", "import matplotlib.pyplot as plt" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Open the `Dataset`" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:36.072316Z", "start_time": "2018-11-28T20:51:36.016594Z" } }, "outputs": [], "source": [ "ds = xr.tutorial.open_dataset(\"rasm\").load()\n", "ds" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Now for the heavy lifting:\n", "We first have to come up with the weights,\n", "- calculate the month length for each monthly data record\n", "- calculate weights using `groupby('time.season')`\n", "\n", "Finally, we just need to multiply our weights by the `Dataset` and sum along the time dimension. Creating a `DataArray` for the month length is as easy as using the `days_in_month` accessor on the time coordinate. The calendar type, in this case `'noleap'`, is automatically considered in this operation." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "month_length = ds.time.dt.days_in_month\n", "month_length" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:36.132413Z", "start_time": "2018-11-28T20:51:36.073708Z" } }, "outputs": [], "source": [ "# Calculate the weights by grouping by 'time.season'.\n", "weights = (\n", " month_length.groupby(\"time.season\") / month_length.groupby(\"time.season\").sum()\n", ")\n", "\n", "# Test that the sum of the weights for each season is 1.0\n", "np.testing.assert_allclose(weights.groupby(\"time.season\").sum().values, np.ones(4))\n", "\n", "# Calculate the weighted average\n", "ds_weighted = (ds * weights).groupby(\"time.season\").sum(dim=\"time\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:36.152913Z", "start_time": "2018-11-28T20:51:36.133997Z" } }, "outputs": [], "source": [ "ds_weighted" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:36.190765Z", "start_time": "2018-11-28T20:51:36.154416Z" } }, "outputs": [], "source": [ "# only used for comparisons\n", "ds_unweighted = ds.groupby(\"time.season\").mean(\"time\")\n", "ds_diff = ds_weighted - ds_unweighted" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:40.264871Z", "start_time": "2018-11-28T20:51:36.192467Z" } }, "outputs": [], "source": [ "# Quick plot to show the results\n", "notnull = pd.notnull(ds_unweighted[\"Tair\"][0])\n", "\n", "fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(14, 12))\n", "for i, season in enumerate((\"DJF\", \"MAM\", \"JJA\", \"SON\")):\n", " ds_weighted[\"Tair\"].sel(season=season).where(notnull).plot.pcolormesh(\n", " ax=axes[i, 0],\n", " vmin=-30,\n", " vmax=30,\n", " cmap=\"Spectral_r\",\n", " add_colorbar=True,\n", " extend=\"both\",\n", " )\n", "\n", " ds_unweighted[\"Tair\"].sel(season=season).where(notnull).plot.pcolormesh(\n", " ax=axes[i, 1],\n", " vmin=-30,\n", " vmax=30,\n", " cmap=\"Spectral_r\",\n", " add_colorbar=True,\n", " extend=\"both\",\n", " )\n", "\n", " ds_diff[\"Tair\"].sel(season=season).where(notnull).plot.pcolormesh(\n", " ax=axes[i, 2],\n", " vmin=-0.1,\n", " vmax=0.1,\n", " cmap=\"RdBu_r\",\n", " add_colorbar=True,\n", " extend=\"both\",\n", " )\n", "\n", " axes[i, 0].set_ylabel(season)\n", " axes[i, 1].set_ylabel(\"\")\n", " axes[i, 2].set_ylabel(\"\")\n", "\n", "for ax in axes.flat:\n", " ax.axes.get_xaxis().set_ticklabels([])\n", " ax.axes.get_yaxis().set_ticklabels([])\n", " ax.axes.axis(\"tight\")\n", " ax.set_xlabel(\"\")\n", "\n", "axes[0, 0].set_title(\"Weighted by DPM\")\n", "axes[0, 1].set_title(\"Equal Weighting\")\n", "axes[0, 2].set_title(\"Difference\")\n", "\n", "plt.tight_layout()\n", "\n", "fig.suptitle(\"Seasonal Surface Air Temperature\", fontsize=16, y=1.02)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:40.284898Z", "start_time": "2018-11-28T20:51:40.266406Z" } }, "outputs": [], "source": [ "# Wrap it into a simple function\n", "def season_mean(ds, calendar=\"standard\"):\n", " # Make a DataArray with the number of days in each month, size = len(time)\n", " month_length = ds.time.dt.days_in_month\n", "\n", " # Calculate the weights by grouping by 'time.season'\n", " weights = (\n", " month_length.groupby(\"time.season\") / month_length.groupby(\"time.season\").sum()\n", " )\n", "\n", " # Test that the sum of the weights for each season is 1.0\n", " np.testing.assert_allclose(weights.groupby(\"time.season\").sum().values, np.ones(4))\n", "\n", " # Calculate the weighted average\n", " return (ds * weights).groupby(\"time.season\").sum(dim=\"time\")" ] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": true, "toc_position": {}, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.12.0/doc/examples/multidimensional-coords.ipynb000066400000000000000000000147421511464676000236450ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Working with Multidimensional Coordinates\n", "\n", "Author: [Ryan Abernathey](https://github.com/rabernat)\n", "\n", "Many datasets have _physical coordinates_ which differ from their _logical coordinates_. Xarray provides several ways to plot and analyze such datasets." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:49:56.068395Z", "start_time": "2018-11-28T20:49:56.035349Z" } }, "outputs": [], "source": [ "%matplotlib inline\n", "import numpy as np\n", "import pandas as pd\n", "import xarray as xr\n", "import cartopy.crs as ccrs\n", "from matplotlib import pyplot as plt" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "As an example, consider this dataset from the [xarray-data](https://github.com/pydata/xarray-data) repository." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:13.629720Z", "start_time": "2018-11-28T20:50:13.484542Z" } }, "outputs": [], "source": [ "ds = xr.tutorial.open_dataset(\"rasm\").load()\n", "ds" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In this example, the _logical coordinates_ are `x` and `y`, while the _physical coordinates_ are `xc` and `yc`, which represent the longitudes and latitudes of the data." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:15.836061Z", "start_time": "2018-11-28T20:50:15.768376Z" } }, "outputs": [], "source": [ "print(ds.xc.attrs)\n", "print(ds.yc.attrs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Plotting ##\n", "\n", "Let's examine these coordinate variables by plotting them." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:17.928556Z", "start_time": "2018-11-28T20:50:17.031211Z" } }, "outputs": [], "source": [ "fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(14, 4))\n", "ds.xc.plot(ax=ax1)\n", "ds.yc.plot(ax=ax2)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Note that the variables `xc` (longitude) and `yc` (latitude) are two-dimensional scalar fields.\n", "\n", "If we try to plot the data variable `Tair`, by default we get the logical coordinates." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:20.567749Z", "start_time": "2018-11-28T20:50:19.999393Z" } }, "outputs": [], "source": [ "ds.Tair[0].plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In order to visualize the data on a conventional latitude-longitude grid, we can take advantage of xarray's ability to apply [cartopy](https://cartopy.readthedocs.io/stable/) map projections." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:31.131708Z", "start_time": "2018-11-28T20:50:30.444697Z" } }, "outputs": [], "source": [ "plt.figure(figsize=(14, 6))\n", "ax = plt.axes(projection=ccrs.PlateCarree())\n", "ax.set_global()\n", "ds.Tair[0].plot.pcolormesh(\n", " ax=ax, transform=ccrs.PlateCarree(), x=\"xc\", y=\"yc\", add_colorbar=False\n", ")\n", "ax.coastlines()\n", "ax.set_ylim([0, 90]);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Multidimensional Groupby ##\n", "\n", "The above example allowed us to visualize the data on a regular latitude-longitude grid. But what if we want to do a calculation that involves grouping over one of these physical coordinates (rather than the logical coordinates), for example, calculating the mean temperature at each latitude. This can be achieved using xarray's `groupby` function, which accepts multidimensional variables. By default, `groupby` will use every unique value in the variable, which is probably not what we want. Instead, we can use the `groupby_bins` function to specify the output coordinates of the group. " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:43.670463Z", "start_time": "2018-11-28T20:50:43.245501Z" } }, "outputs": [], "source": [ "# define two-degree wide latitude bins\n", "lat_bins = np.arange(0, 91, 2)\n", "# define a label for each bin corresponding to the central latitude\n", "lat_center = np.arange(1, 90, 2)\n", "# group according to those bins and take the mean\n", "Tair_lat_mean = ds.Tair.groupby_bins(\"yc\", lat_bins, labels=lat_center).mean(\n", " dim=xr.ALL_DIMS\n", ")\n", "# plot the result\n", "Tair_lat_mean.plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The resulting coordinate for the `groupby_bins` operation got the `_bins` suffix appended: `yc_bins`. This help us distinguish it from the original multidimensional variable `yc`.\n", "\n", "**Note**: This group-by-latitude approach does not take into account the finite-size geometry of grid cells. It simply bins each value according to the coordinates at the cell center. Xarray has no understanding of grid cells and their geometry. More precise geographic regridding for xarray data is available via the [xesmf](https://xesmf.readthedocs.io) package." ] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.8" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": true, "toc_position": {}, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 2 } xarray-2025.12.0/doc/examples/visualization_gallery.ipynb000066400000000000000000000137441511464676000234220ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Visualization Gallery\n", "\n", "This notebook shows common visualization issues encountered in xarray." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import cartopy.crs as ccrs\n", "import matplotlib.pyplot as plt\n", "import xarray as xr\n", "\n", "%matplotlib inline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Load example dataset:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds = xr.tutorial.load_dataset(\"air_temperature\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Multiple plots and map projections\n", "\n", "Control the map projection parameters on multiple axes\n", "\n", "This example illustrates how to plot multiple maps and control their extent\n", "and aspect ratio.\n", "\n", "For more details see [this discussion](https://github.com/pydata/xarray/issues/1397#issuecomment-299190567) on github." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "air = ds.air.isel(time=[0, 724]) - 273.15\n", "\n", "# This is the map projection we want to plot *onto*\n", "map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)\n", "\n", "p = air.plot(\n", " transform=ccrs.PlateCarree(), # the data's projection\n", " col=\"time\",\n", " col_wrap=1, # multiplot settings\n", " aspect=ds.dims[\"lon\"] / ds.dims[\"lat\"], # for a sensible figsize\n", " subplot_kws={\"projection\": map_proj},\n", ") # the plot's projection\n", "\n", "# We have to set the map's options on all axes\n", "for ax in p.axes.flat:\n", " ax.coastlines()\n", " ax.set_extent([-160, -30, 5, 75])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Centered colormaps\n", "\n", "Xarray's automatic colormaps choice" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "air = ds.air.isel(time=0)\n", "\n", "f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))\n", "\n", "# The first plot (in kelvins) chooses \"viridis\" and uses the data's min/max\n", "air.plot(ax=ax1, cbar_kwargs={\"label\": \"K\"})\n", "ax1.set_title(\"Kelvins: default\")\n", "ax2.set_xlabel(\"\")\n", "\n", "# The second plot (in celsius) now chooses \"BuRd\" and centers min/max around 0\n", "airc = air - 273.15\n", "airc.plot(ax=ax2, cbar_kwargs={\"label\": \"ยฐC\"})\n", "ax2.set_title(\"Celsius: default\")\n", "ax2.set_xlabel(\"\")\n", "ax2.set_ylabel(\"\")\n", "\n", "# The center doesn't have to be 0\n", "air.plot(ax=ax3, center=273.15, cbar_kwargs={\"label\": \"K\"})\n", "ax3.set_title(\"Kelvins: center=273.15\")\n", "\n", "# Or it can be ignored\n", "airc.plot(ax=ax4, center=False, cbar_kwargs={\"label\": \"ยฐC\"})\n", "ax4.set_title(\"Celsius: center=False\")\n", "ax4.set_ylabel(\"\")\n", "\n", "# Make it nice\n", "plt.tight_layout()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Control the plot's colorbar\n", "\n", "Use ``cbar_kwargs`` keyword to specify the number of ticks.\n", "The ``spacing`` kwarg can be used to draw proportional ticks." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "air2d = ds.air.isel(time=500)\n", "\n", "# Prepare the figure\n", "f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))\n", "\n", "# Irregular levels to illustrate the use of a proportional colorbar\n", "levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]\n", "\n", "# Plot data\n", "air2d.plot(ax=ax1, levels=levels)\n", "air2d.plot(ax=ax2, levels=levels, cbar_kwargs={\"ticks\": levels})\n", "air2d.plot(\n", " ax=ax3, levels=levels, cbar_kwargs={\"ticks\": levels, \"spacing\": \"proportional\"}\n", ")\n", "\n", "# Show plots\n", "plt.tight_layout()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Multiple lines from a 2d DataArray\n", "\n", "Use ``xarray.plot.line`` on a 2d DataArray to plot selections as\n", "multiple lines.\n", "\n", "See ``plotting.multiplelines`` for more details." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "air = ds.air - 273.15 # to celsius\n", "\n", "# Prepare the figure\n", "f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)\n", "\n", "# Selected latitude indices\n", "isel_lats = [10, 15, 20]\n", "\n", "# Temperature vs longitude plot - illustrates the \"hue\" kwarg\n", "air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue=\"lat\")\n", "ax1.set_ylabel(\"ยฐC\")\n", "\n", "# Temperature vs time plot - illustrates the \"x\" and \"add_legend\" kwargs\n", "air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x=\"time\", add_legend=False)\n", "ax2.set_ylabel(\"\")\n", "\n", "# Show\n", "plt.tight_layout()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.7" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "state": {}, "version_major": 2, "version_minor": 0 } } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.12.0/doc/examples/weather-data.ipynb000066400000000000000000000212431511464676000213410ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Toy weather data\n", "\n", "Here is an example of how to easily manipulate a toy weather dataset using\n", "xarray and other recommended Python libraries:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import seaborn as sns\n", "\n", "import xarray as xr\n", "%matplotlib inline" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:43:36.127628Z", "start_time": "2020-01-27T15:43:36.081733Z" } }, "outputs": [], "source": [ "np.random.seed(123)\n", "\n", "xr.set_options(display_style=\"html\")\n", "\n", "times = pd.date_range(\"2000-01-01\", \"2001-12-31\", name=\"time\")\n", "annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28))\n", "\n", "base = 10 + 15 * annual_cycle.reshape(-1, 1)\n", "tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3)\n", "tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3)\n", "\n", "ds = xr.Dataset(\n", " {\n", " \"tmin\": ((\"time\", \"location\"), tmin_values),\n", " \"tmax\": ((\"time\", \"location\"), tmax_values),\n", " },\n", " {\"time\": times, \"location\": [\"IA\", \"IN\", \"IL\"]},\n", ")\n", "\n", "ds" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Examine a dataset with pandas and seaborn" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Convert to a pandas DataFrame" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:47:14.160297Z", "start_time": "2020-01-27T15:47:14.126738Z" } }, "outputs": [], "source": [ "df = ds.to_dataframe()\n", "df.head()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:47:32.682065Z", "start_time": "2020-01-27T15:47:32.652629Z" } }, "outputs": [], "source": [ "df.describe()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Visualize using pandas" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:47:34.617042Z", "start_time": "2020-01-27T15:47:34.282605Z" } }, "outputs": [], "source": [ "ds.mean(dim=\"location\").to_dataframe().plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Visualize using seaborn" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:47:37.643175Z", "start_time": "2020-01-27T15:47:37.202479Z" } }, "outputs": [], "source": [ "sns.pairplot(df.reset_index(), vars=ds.data_vars)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Probability of freeze by calendar month" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:48:11.241224Z", "start_time": "2020-01-27T15:48:11.211156Z" } }, "outputs": [], "source": [ "freeze = (ds[\"tmin\"] <= 0).groupby(\"time.month\").mean(\"time\")\n", "freeze" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:48:13.131247Z", "start_time": "2020-01-27T15:48:12.924985Z" } }, "outputs": [], "source": [ "freeze.to_pandas().plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Monthly averaging" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:48:08.498259Z", "start_time": "2020-01-27T15:48:08.210890Z" } }, "outputs": [], "source": [ "monthly_avg = ds.resample(time=\"1MS\").mean()\n", "monthly_avg.sel(location=\"IA\").to_dataframe().plot(style=\"s-\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Note that ``MS`` here refers to Month-Start; ``M`` labels Month-End (the last day of the month)." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Calculate monthly anomalies" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In climatology, \"anomalies\" refer to the difference between observations and\n", "typical weather for a particular season. Unlike observations, anomalies should\n", "not show any seasonal cycle." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:49:34.855086Z", "start_time": "2020-01-27T15:49:34.406439Z" } }, "outputs": [], "source": [ "climatology = ds.groupby(\"time.month\").mean(\"time\")\n", "anomalies = ds.groupby(\"time.month\") - climatology\n", "anomalies.mean(\"location\").to_dataframe()[[\"tmin\", \"tmax\"]].plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Calculate standardized monthly anomalies" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can create standardized anomalies where the difference between the\n", "observations and the climatological monthly mean is\n", "divided by the climatological standard deviation." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:50:09.144586Z", "start_time": "2020-01-27T15:50:08.734682Z" } }, "outputs": [], "source": [ "climatology_mean = ds.groupby(\"time.month\").mean(\"time\")\n", "climatology_std = ds.groupby(\"time.month\").std(\"time\")\n", "stand_anomalies = xr.apply_ufunc(\n", " lambda x, m, s: (x - m) / s,\n", " ds.groupby(\"time.month\"),\n", " climatology_mean,\n", " climatology_std,\n", ")\n", "\n", "stand_anomalies.mean(\"location\").to_dataframe()[[\"tmin\", \"tmax\"]].plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Fill missing values with climatology" ] }, { "cell_type": "markdown", "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:50:46.192491Z", "start_time": "2020-01-27T15:50:46.174554Z" } }, "source": [ "The ``fillna`` method on grouped objects lets you easily fill missing values by group:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:51:40.279299Z", "start_time": "2020-01-27T15:51:40.220342Z" } }, "outputs": [], "source": [ "# throw away the first half of every month\n", "some_missing = ds.tmin.sel(time=ds[\"time.day\"] > 15).reindex_like(ds)\n", "filled = some_missing.groupby(\"time.month\").fillna(climatology.tmin)\n", "both = xr.Dataset({\"some_missing\": some_missing, \"filled\": filled})\n", "both" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:52:11.815769Z", "start_time": "2020-01-27T15:52:11.770825Z" } }, "outputs": [], "source": [ "df = both.sel(time=\"2000\").mean(\"location\").reset_coords(drop=True).to_dataframe()\n", "df.head()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:52:14.867866Z", "start_time": "2020-01-27T15:52:14.449684Z" } }, "outputs": [], "source": [ "df[[\"filled\", \"some_missing\"]].plot()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": true, "toc_position": {}, "toc_section_display": true, "toc_window_display": false } }, "nbformat": 4, "nbformat_minor": 2 } xarray-2025.12.0/doc/gallery.rst000066400000000000000000000013531511464676000163030ustar00rootroot00000000000000Gallery ======= Here's a list of examples on how to use xarray. We will be adding more examples soon. Contributions are highly welcomed and appreciated. So, if you are interested in contributing, please consult the :ref:`contributing` guide. Notebook Examples ----------------- .. include:: notebooks-examples-gallery.txt .. toctree:: :maxdepth: 1 :hidden: examples/weather-data examples/monthly-means examples/area_weighted_temperature examples/multidimensional-coords examples/visualization_gallery examples/ROMS_ocean_model examples/ERA5-GRIB-example examples/apply_ufunc_vectorize_1d examples/blank_template External Examples ----------------- .. include:: external-examples-gallery.txt xarray-2025.12.0/doc/gallery.yml000066400000000000000000000034641511464676000163010ustar00rootroot00000000000000notebooks-examples: - title: Toy weather data path: examples/weather-data.html thumbnail: _static/thumbnails/toy-weather-data.png - title: Calculating Seasonal Averages from Timeseries of Monthly Means path: examples/monthly-means.html thumbnail: _static/thumbnails/monthly-means.png - title: Compare weighted and unweighted mean temperature path: examples/area_weighted_temperature.html thumbnail: _static/thumbnails/area_weighted_temperature.png - title: Working with Multidimensional Coordinates path: examples/multidimensional-coords.html thumbnail: _static/thumbnails/multidimensional-coords.png - title: Visualization Gallery path: examples/visualization_gallery.html thumbnail: _static/thumbnails/visualization_gallery.png - title: GRIB Data Example path: examples/ERA5-GRIB-example.html thumbnail: _static/thumbnails/ERA5-GRIB-example.png - title: Applying unvectorized functions with apply_ufunc path: examples/apply_ufunc_vectorize_1d.html thumbnail: _static/logos/Xarray_Logo_RGB_Final.svg external-examples: - title: Managing raster data with rioxarray path: https://corteva.github.io/rioxarray/stable/examples/examples.html thumbnail: _static/logos/Xarray_Logo_RGB_Final.svg - title: Xarray and dask on the cloud with Pangeo path: https://gallery.pangeo.io/ thumbnail: https://avatars.githubusercontent.com/u/60833341?s=200&v=4 - title: Xarray with Dask Arrays path: https://examples.dask.org/xarray.html_ thumbnail: _static/logos/Xarray_Logo_RGB_Final.svg - title: Project Pythia Foundations Book path: https://foundations.projectpythia.org/core/xarray.html thumbnail: https://raw.githubusercontent.com/ProjectPythia/projectpythia.github.io/main/portal/_static/images/logos/pythia_logo-blue-btext-twocolor.svg xarray-2025.12.0/doc/gallery/000077500000000000000000000000001511464676000155475ustar00rootroot00000000000000xarray-2025.12.0/doc/gallery/README.txt000066400000000000000000000000361511464676000172440ustar00rootroot00000000000000.. _recipes: Gallery ======= xarray-2025.12.0/doc/gallery/plot_cartopy_facetgrid.py000066400000000000000000000024061511464676000226520ustar00rootroot00000000000000""" ================================== Multiple plots and map projections ================================== Control the map projection parameters on multiple axes This example illustrates how to plot multiple maps and control their extent and aspect ratio. For more details see `this discussion`_ on github. .. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567 """ import cartopy.crs as ccrs import matplotlib.pyplot as plt import xarray as xr # Load the data ds = xr.tutorial.load_dataset("air_temperature") air = ds.air.isel(time=[0, 724]) - 273.15 # This is the map projection we want to plot *onto* map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45) p = air.plot( transform=ccrs.PlateCarree(), # the data's projection col="time", col_wrap=1, # multiplot settings aspect=ds.sizes["lon"] / ds.sizes["lat"], # for a sensible figsize subplot_kws={"projection": map_proj}, # the plot's projection ) # We have to set the map's options on all four axes for ax in p.axes.flat: ax.coastlines() ax.set_extent([-160, -30, 5, 75]) # Without this aspect attributes the maps will look chaotic and the # "extent" attribute above will be ignored ax.set_aspect("equal") plt.show() xarray-2025.12.0/doc/gallery/plot_colorbar_center.py000066400000000000000000000020131511464676000223160ustar00rootroot00000000000000""" ================== Centered colormaps ================== xarray's automatic colormaps choice """ import matplotlib.pyplot as plt import xarray as xr # Load the data ds = xr.tutorial.load_dataset("air_temperature") air = ds.air.isel(time=0) f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6)) # The first plot (in kelvins) chooses "viridis" and uses the data's min/max air.plot(ax=ax1, cbar_kwargs={"label": "K"}) ax1.set_title("Kelvins: default") ax2.set_xlabel("") # The second plot (in celsius) now chooses "BuRd" and centers min/max around 0 airc = air - 273.15 airc.plot(ax=ax2, cbar_kwargs={"label": "ยฐC"}) ax2.set_title("Celsius: default") ax2.set_xlabel("") ax2.set_ylabel("") # The center doesn't have to be 0 air.plot(ax=ax3, center=273.15, cbar_kwargs={"label": "K"}) ax3.set_title("Kelvins: center=273.15") # Or it can be ignored airc.plot(ax=ax4, center=False, cbar_kwargs={"label": "ยฐC"}) ax4.set_title("Celsius: center=False") ax4.set_ylabel("") # Make it nice plt.tight_layout() plt.show() xarray-2025.12.0/doc/gallery/plot_control_colorbar.py000066400000000000000000000015211511464676000225210ustar00rootroot00000000000000""" =========================== Control the plot's colorbar =========================== Use ``cbar_kwargs`` keyword to specify the number of ticks. The ``spacing`` kwarg can be used to draw proportional ticks. """ import matplotlib.pyplot as plt import xarray as xr # Load the data air_temp = xr.tutorial.load_dataset("air_temperature") air2d = air_temp.air.isel(time=500) # Prepare the figure f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4)) # Irregular levels to illustrate the use of a proportional colorbar levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340] # Plot data air2d.plot(ax=ax1, levels=levels) air2d.plot(ax=ax2, levels=levels, cbar_kwargs={"ticks": levels}) air2d.plot( ax=ax3, levels=levels, cbar_kwargs={"ticks": levels, "spacing": "proportional"} ) # Show plots plt.tight_layout() plt.show() xarray-2025.12.0/doc/gallery/plot_lines_from_2d.py000066400000000000000000000016141511464676000217030ustar00rootroot00000000000000""" ================================== Multiple lines from a 2d DataArray ================================== Use :py:func:`xarray.plot.line` on a 2d DataArray to plot selections as multiple lines. See :ref:`plotting.multiplelines` for more details. """ import matplotlib.pyplot as plt import xarray as xr # Load the data ds = xr.tutorial.load_dataset("air_temperature") air = ds.air - 273.15 # to celsius # Prepare the figure f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True) # Selected latitude indices isel_lats = [10, 15, 20] # Temperature vs longitude plot - illustrates the "hue" kwarg air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue="lat") ax1.set_ylabel("ยฐC") # Temperature vs time plot - illustrates the "x" and "add_legend" kwargs air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x="time", add_legend=False) ax2.set_ylabel("") # Show plt.tight_layout() plt.show() xarray-2025.12.0/doc/get-help/000077500000000000000000000000001511464676000156155ustar00rootroot00000000000000xarray-2025.12.0/doc/get-help/faq.rst000066400000000000000000000517761511464676000171360ustar00rootroot00000000000000.. _faq: Frequently Asked Questions ========================== .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) Your documentation keeps mentioning pandas. What is pandas? ----------------------------------------------------------- pandas_ is a very popular data analysis package in Python with wide usage in many fields. Our API is heavily inspired by pandas โ€” this is why there are so many references to pandas. .. _pandas: https://pandas.pydata.org Do I need to know pandas to use xarray? --------------------------------------- No! Our API is heavily inspired by pandas so while knowing pandas will let you become productive more quickly, knowledge of pandas is not necessary to use xarray. Should I use xarray instead of pandas? -------------------------------------- It's not an either/or choice! xarray provides robust support for converting back and forth between the tabular data-structures of pandas and its own multi-dimensional data-structures. That said, you should only bother with xarray if some aspect of data is fundamentally multi-dimensional. If your data is unstructured or one-dimensional, pandas is usually the right choice: it has better performance for common operations such as ``groupby`` and you'll find far more usage examples online. Why is pandas not enough? ------------------------- pandas is a fantastic library for analysis of low-dimensional labelled data - if it can be sensibly described as "rows and columns", pandas is probably the right choice. However, sometimes we want to use higher dimensional arrays (`ndim > 2`), or arrays for which the order of dimensions (e.g., columns vs rows) shouldn't really matter. For example, the images of a movie can be natively represented as an array with four dimensions: time, row, column and color. pandas has historically supported N-dimensional panels, but deprecated them in version 0.20 in favor of xarray data structures. There are now built-in methods on both sides to convert between pandas and xarray, allowing for more focused development effort. Xarray objects have a much richer model of dimensionality - if you were using Panels: - You need to create a new factory type for each dimensionality. - You can't do math between NDPanels with different dimensionality. - Each dimension in an NDPanel has a name (e.g., 'labels', 'items', 'major_axis', etc.) but the dimension names refer to order, not their meaning. You can't specify an operation as to be applied along the "time" axis. - You often have to manually convert collections of pandas arrays (Series, DataFrames, etc) to have the same number of dimensions. In contrast, this sort of data structure fits very naturally in an xarray ``Dataset``. You can :ref:`read about switching from Panels to xarray here `. pandas gets a lot of things right, but many science, engineering and complex analytics use cases need fully multi-dimensional data structures. How do xarray data structures differ from those found in pandas? ---------------------------------------------------------------- The main distinguishing feature of xarray's ``DataArray`` over labeled arrays in pandas is that dimensions can have names (e.g., "time", "latitude", "longitude"). Names are much easier to keep track of than axis numbers, and xarray uses dimension names for indexing, aggregation and broadcasting. Not only can you write ``x.sel(time='2000-01-01')`` and ``x.mean(dim='time')``, but operations like ``x - x.mean(dim='time')`` always work, no matter the order of the "time" dimension. You never need to reshape arrays (e.g., with ``np.newaxis``) to align them for arithmetic operations in xarray. Why don't aggregations return Python scalars? --------------------------------------------- Xarray tries hard to be self-consistent: operations on a ``DataArray`` (resp. ``Dataset``) return another ``DataArray`` (resp. ``Dataset``) object. In particular, operations returning scalar values (e.g. indexing or aggregations like ``mean`` or ``sum`` applied to all axes) will also return xarray objects. Unfortunately, this means we sometimes have to explicitly cast our results from xarray when using them in other libraries. As an illustration, the following code fragment .. jupyter-execute:: arr = xr.DataArray([1, 2, 3]) pd.Series({"x": arr[0], "mean": arr.mean(), "std": arr.std()}) does not yield the pandas DataFrame we expected. We need to specify the type conversion ourselves: .. jupyter-execute:: pd.Series({"x": arr[0], "mean": arr.mean(), "std": arr.std()}, dtype=float) Alternatively, we could use the ``item`` method or the ``float`` constructor to convert values one at a time .. jupyter-execute:: pd.Series({"x": arr[0].item(), "mean": float(arr.mean())}) .. _approach to metadata: What is your approach to metadata? ---------------------------------- We are firm believers in the power of labeled data! In addition to dimensions and coordinates, xarray supports arbitrary metadata in the form of global (Dataset) and variable specific (DataArray) attributes (``attrs``). Automatic interpretation of labels is powerful but also reduces flexibility. With xarray, we draw a firm line between labels that the library understands (``dims`` and ``coords``) and labels for users and user code (``attrs``). For example, we do not automatically interpret and enforce units or `CF conventions`_. (An exception is serialization to and from netCDF files.) .. _CF conventions: https://cfconventions.org/latest.html An implication of this choice is that we do not propagate ``attrs`` through most operations unless explicitly flagged (some methods have a ``keep_attrs`` option, and there is a global flag, accessible with :py:func:`xarray.set_options`, for setting this to be always True or False). Similarly, xarray does not check for conflicts between ``attrs`` when combining arrays and datasets, unless explicitly requested with the option ``compat='identical'``. The guiding principle is that metadata should not be allowed to get in the way. In general xarray uses the capabilities of the backends for reading and writing attributes. That has some implications on roundtripping. One example for such inconsistency is that size-1 lists will roundtrip as single element (for netcdf4 backends). What other netCDF related Python libraries should I know about? --------------------------------------------------------------- `netCDF4-python`__ provides a lower level interface for working with netCDF and OpenDAP datasets in Python. We use netCDF4-python internally in xarray, and have contributed a number of improvements and fixes upstream. Xarray does not yet support all of netCDF4-python's features, such as modifying files on-disk. __ https://unidata.github.io/netcdf4-python/ Iris_ (supported by the UK Met office) provides similar tools for in- memory manipulation of labeled arrays, aimed specifically at weather and climate data needs. Indeed, the Iris :py:class:`~iris.cube.Cube` was direct inspiration for xarray's :py:class:`~xarray.DataArray`. Xarray and Iris take very different approaches to handling metadata: Iris strictly interprets `CF conventions`_. Iris particularly shines at mapping, thanks to its integration with Cartopy_. .. _Iris: https://scitools-iris.readthedocs.io/en/stable/ .. _Cartopy: https://cartopy.readthedocs.io/stable/ We think the design decisions we have made for xarray (namely, basing it on pandas) make it a faster and more flexible data analysis tool. That said, Iris has some great domain specific functionality, and there are dedicated methods for converting back and forth between xarray and Iris. See :ref:`Reading and Writing Iris data ` for more details. What other projects leverage xarray? ------------------------------------ See section :ref:`ecosystem`. How do I open format X file as an xarray dataset? ------------------------------------------------- To open format X file in xarray, you need to know the `format of the data `_ you want to read. If the format is supported, you can use the appropriate function provided by xarray. The following table provides functions used for different file formats in xarray, as well as links to other packages that can be used: .. csv-table:: :header: "File Format", "Open via", " Related Packages" :widths: 15, 45, 15 "NetCDF (.nc, .nc4, .cdf)","``open_dataset()`` OR ``open_mfdataset()``", "`netCDF4 `_, `cdms2 `_" "HDF5 (.h5, .hdf5)","``open_dataset()`` OR ``open_mfdataset()``", "`h5py `_, `pytables `_ " "GRIB (.grb, .grib)", "``open_dataset()``", "`cfgrib `_, `pygrib `_" "CSV (.csv)","``open_dataset()``", "`pandas`_ , `dask `_" "Zarr (.zarr)","``open_dataset()`` OR ``open_mfdataset()``", "`zarr `_ , `dask `_ " .. _pandas: https://pandas.pydata.org If you are unable to open a file in xarray: - You should check that you are having all necessary dependencies installed, including any optional dependencies (like scipy, h5netcdf, cfgrib etc as mentioned below) that may be required for the specific use case. - If all necessary dependencies are installed but the file still cannot be opened, you must check if there are any specialized backends available for the specific file format you are working with. You can consult the xarray documentation or the documentation for the file format to determine if a specialized backend is required, and if so, how to install and use it with xarray. - If the file format is not supported by xarray or any of its available backends, the user may need to use a different library or tool to work with the file. You can consult the documentation for the file format to determine which tools are recommended for working with it. Xarray provides a default engine to read files, which is usually determined by the file extension or type. If you don't specify the engine, xarray will try to guess it based on the file extension or type, and may fall back to a different engine if it cannot determine the correct one. Therefore, it's good practice to always specify the engine explicitly, to ensure that the correct backend is used and especially when working with complex data formats or non-standard file extensions. :py:func:`xarray.backends.list_engines` is a function in xarray that returns a dictionary of available engines and their BackendEntrypoint objects. You can use the ``engine`` argument to specify the backend when calling ``open_dataset()`` or other reading functions in xarray, as shown below: NetCDF ~~~~~~ If you are reading a netCDF file with a ".nc" extension, the default engine is ``netcdf4``. However if you have files with non-standard extensions or if the file format is ambiguous. Specify the engine explicitly, to ensure that the correct backend is used. Use :py:func:`~xarray.open_dataset` to open a NetCDF file and return an xarray Dataset object. .. code:: python import xarray as xr # use xarray to open the file and return an xarray.Dataset object using netcdf4 engine ds = xr.open_dataset("/path/to/my/file.nc", engine="netcdf4") # Print Dataset object print(ds) # use xarray to open the file and return an xarray.Dataset object using scipy engine ds = xr.open_dataset("/path/to/my/file.nc", engine="scipy") We recommend installing ``scipy`` via conda using the below given code: :: conda install scipy HDF5 ~~~~ Use :py:func:`~xarray.open_dataset` to open an HDF5 file and return an xarray Dataset object. You should specify the ``engine`` keyword argument when reading HDF5 files with xarray, as there are multiple backends that can be used to read HDF5 files, and xarray may not always be able to automatically detect the correct one based on the file extension or file format. To read HDF5 files with xarray, you can use the :py:func:`~xarray.open_dataset` function from the ``h5netcdf`` backend, as follows: .. code:: python import xarray as xr # Open HDF5 file as an xarray Dataset ds = xr.open_dataset("path/to/hdf5/file.hdf5", engine="h5netcdf") # Print Dataset object print(ds) We recommend you to install ``h5netcdf`` library using the below given code: :: conda install -c conda-forge h5netcdf If you want to use the ``netCDF4`` backend to read a file with a ".h5" extension (which is typically associated with HDF5 file format), you can specify the engine argument as follows: .. code:: python ds = xr.open_dataset("path/to/file.h5", engine="netcdf4") GRIB ~~~~ You should specify the ``engine`` keyword argument when reading GRIB files with xarray, as there are multiple backends that can be used to read GRIB files, and xarray may not always be able to automatically detect the correct one based on the file extension or file format. Use the :py:func:`~xarray.open_dataset` function from the ``cfgrib`` package to open a GRIB file as an xarray Dataset. .. code:: python import xarray as xr # define the path to your GRIB file and the engine you want to use to open the file # use ``open_dataset()`` to open the file with the specified engine and return an xarray.Dataset object ds = xr.open_dataset("path/to/your/file.grib", engine="cfgrib") # Print Dataset object print(ds) We recommend installing ``cfgrib`` via conda using the below given code: :: conda install -c conda-forge cfgrib CSV ~~~ By default, xarray uses the built-in ``pandas`` library to read CSV files. In general, you don't need to specify the engine keyword argument when reading CSV files with xarray, as the default ``pandas`` engine is usually sufficient for most use cases. If you are working with very large CSV files or if you need to perform certain types of data processing that are not supported by the default ``pandas`` engine, you may want to use a different backend. In such cases, you can specify the engine argument when reading the CSV file with xarray. To read CSV files with xarray, use the :py:func:`~xarray.open_dataset` function and specify the path to the CSV file as follows: .. code:: python import xarray as xr import pandas as pd # Load CSV file into pandas DataFrame using the "c" engine df = pd.read_csv("your_file.csv", engine="c") # Convert `:py:func:pandas` DataFrame to xarray.Dataset ds = xr.Dataset.from_dataframe(df) # Prints the resulting xarray dataset print(ds) Zarr ~~~~ When opening a Zarr dataset with xarray, the ``engine`` is automatically detected based on the file extension or the type of input provided. If the dataset is stored in a directory with a ".zarr" extension, xarray will automatically use the "zarr" engine. To read zarr files with xarray, use the :py:func:`~xarray.open_dataset` function and specify the path to the zarr file as follows: .. code:: python import xarray as xr # use xarray to open the file and return an xarray.Dataset object using zarr engine ds = xr.open_dataset("path/to/your/file.zarr", engine="zarr") # Print Dataset object print(ds) We recommend installing ``zarr`` via conda using the below given code: :: conda install -c conda-forge zarr There may be situations where you need to specify the engine manually using the ``engine`` keyword argument. For example, if you have a Zarr dataset stored in a file with a different extension (e.g., ".npy"), you will need to specify the engine as "zarr" explicitly when opening the dataset. Some packages may have additional functionality beyond what is shown here. You can refer to the documentation for each package for more information. How does xarray handle missing values? -------------------------------------- **xarray can handle missing values using ``np.nan``** - ``np.nan`` is used to represent missing values in labeled arrays and datasets. It is a commonly used standard for representing missing or undefined numerical data in scientific computing. ``np.nan`` is a constant value in NumPy that represents "Not a Number" or missing values. - Most of xarray's computation methods are designed to automatically handle missing values appropriately. For example, when performing operations like addition or multiplication on arrays that contain missing values, xarray will automatically ignore the missing values and only perform the operation on the valid data. This makes it easy to work with data that may contain missing or undefined values without having to worry about handling them explicitly. - Many of xarray's `aggregation methods `_, such as ``sum()``, ``mean()``, ``min()``, ``max()``, and others, have a skipna argument that controls whether missing values (represented by NaN) should be skipped (True) or treated as NaN (False) when performing the calculation. By default, ``skipna`` is set to ``True``, so missing values are ignored when computing the result. However, you can set ``skipna`` to ``False`` if you want missing values to be treated as NaN and included in the calculation. - On `plotting `_ an xarray dataset or array that contains missing values, xarray will simply leave the missing values as blank spaces in the plot. - We have a set of `methods `_ for manipulating missing and filling values. How should I cite xarray? ------------------------- If you are using xarray and would like to cite it in academic publication, we would certainly appreciate it. We recommend two citations. 1. At a minimum, we recommend citing the xarray overview journal article, published in the Journal of Open Research Software. - Hoyer, S. & Hamman, J., (2017). xarray: N-D labeled Arrays and Datasets in Python. Journal of Open Research Software. 5(1), p.10. DOI: https://doi.org/10.5334/jors.148 Hereโ€™s an example of a BibTeX entry:: @article{hoyer2017xarray, title = {xarray: {N-D} labeled arrays and datasets in {Python}}, author = {Hoyer, S. and J. Hamman}, journal = {Journal of Open Research Software}, volume = {5}, number = {1}, year = {2017}, publisher = {Ubiquity Press}, doi = {10.5334/jors.148}, url = {https://doi.org/10.5334/jors.148} } 2. You may also want to cite a specific version of the xarray package. We provide a `Zenodo citation and DOI `_ for this purpose: .. image:: https://zenodo.org/badge/doi/10.5281/zenodo.598201.svg :target: https://doi.org/10.5281/zenodo.598201 An example BibTeX entry:: @misc{xarray_v0_8_0, author = {Stephan Hoyer and Clark Fitzgerald and Joe Hamman and others}, title = {xarray: v0.8.0}, month = aug, year = 2016, doi = {10.5281/zenodo.59499}, url = {https://doi.org/10.5281/zenodo.59499} } .. _api-stability: How stable is Xarray's API? --------------------------- Xarray tries very hard to maintain backwards compatibility in our :ref:`api` between released versions. Whilst we do occasionally make breaking changes in order to improve the library, we `signpost changes `_ with ``DeprecationWarnings`` for many releases in advance. (An exception is bugs - whose behaviour we try to fix as soon as we notice them.) Our `test-driven development practices `_ helps to ensure any accidental regressions are caught. This philosophy applies to everything in the `public API `_. .. _public-api: What parts of xarray are considered public API? ----------------------------------------------- As a rule, only functions/methods documented in our :ref:`api` are considered part of xarray's public API. Everything else (in particular, everything in ``xarray.core`` that is not also exposed in the top level ``xarray`` namespace) is considered a private implementation detail that may change at any time. Objects that exist to facilitate xarray's fluent interface on ``DataArray`` and ``Dataset`` objects are a special case. For convenience, we document them in the API docs, but only their methods and the ``DataArray``/``Dataset`` methods/properties to construct them (e.g., ``.plot()``, ``.groupby()``, ``.str``) are considered public API. Constructors and other details of the internal classes used to implemented them (i.e., ``xarray.plot.plotting._PlotMethods``, ``xarray.core.groupby.DataArrayGroupBy``, ``xarray.core.accessor_str.StringAccessor``) are not. xarray-2025.12.0/doc/get-help/help-diagram.rst000066400000000000000000000107251511464676000207060ustar00rootroot00000000000000Getting Help ============ Navigating the wealth of resources available for Xarray can be overwhelming. We've created this flow chart to help guide you towards the best way to get help, depending on what you're working towards. Also be sure to check out our :ref:`faq`. and :ref:`howdoi` pages for solutions to common questions. A major strength of Xarray is in the user community. Sometimes you might not yet have a concrete question but would simply like to connect with other Xarray users. We have a few accounts on different social platforms for that! :ref:`socials`. We look forward to hearing from you! Help Flowchart -------------- .. _comment: mermaid Flowcharg "link" text gets secondary color background, SVG icon fill gets primary color .. raw:: html .. mermaid:: :config: {"theme":"base","themeVariables":{"fontSize":"20px","primaryColor":"#fff","primaryTextColor":"#fff","primaryBorderColor":"#59c7d6","lineColor":"#e28126","secondaryColor":"#767985"}} :alt: Flowchart illustrating the different ways to access help using or contributing to Xarray. flowchart TD intro[Welcome to Xarray! How can we help?]:::quesNodefmt usage([fa:fa-chalkboard-user Xarray Tutorial fab:fa-readme Xarray Docs fab:fa-stack-overflow Stack Exchange fab:fa-google Ask Google fa:fa-robot Ask AI ChatBot]):::ansNodefmt extensions([Extension docs: fab:fa-readme Dask fab:fa-readme Rioxarray]):::ansNodefmt help([fab:fa-github Xarray Discussions fab:fa-discord Xarray Discord fa:fa-globe Pangeo Discourse]):::ansNodefmt bug([Let us know: fab:fa-github Xarray Issues]):::ansNodefmt contrib([fa:fa-book-open Xarray Contributor's Guide]):::ansNodefmt pr([fab:fa-github Pull Request]):::ansNodefmt dev([fab:fa-github Add PR Comment fa:fa-users Attend Developer's Meeting ]):::ansNodefmt report[Thanks for letting us know!]:::quesNodefmt merged[fa:fa-hands-clapping Thanks for contributing to Xarray!]:::quesNodefmt intro -->|How do I use Xarray?| usage usage -->|"With extensions (like Dask, Rioxarray, etc.)"| extensions usage -->|I still have questions or could use some guidance | help intro -->|I think I found a bug| bug bug contrib bug -->|I just wanted to tell you| report bug<-->|I'd like to fix the bug!| contrib pr -->|my PR was approved| merged intro -->|I wish Xarray could...| bug pr <-->|my PR is quiet| dev contrib -->pr classDef quesNodefmt font-size:20pt,fill:#0e4666,stroke:#59c7d6,stroke-width:3 classDef ansNodefmt font-size:18pt,fill:#4a4a4a,stroke:#17afb4,stroke-width:3 linkStyle default font-size:16pt,stroke-width:4 Flowchart links --------------- - `Xarray Tutorials `__ - `Xarray Docs `__ - `Stack Exchange `__ - `Xarray Discussions `__ - `Xarray Discord `__ - `Xarray Office Hours `__ - `Pangeo Discourse `__ - `Xarray Issues `__ - :ref:`contributing` - :ref:`developers-meeting` .. toctree:: :maxdepth: 1 :hidden: faq howdoi socials xarray-2025.12.0/doc/get-help/howdoi.rst000066400000000000000000000110341511464676000176370ustar00rootroot00000000000000.. currentmodule:: xarray .. _howdoi: How do I ... ============ .. list-table:: :header-rows: 1 :widths: 40 60 * - How do I... - Solution * - add a DataArray to my dataset as a new variable - ``my_dataset[varname] = my_dataArray`` or :py:meth:`Dataset.assign` (see also :ref:`dictionary_like_methods`) * - add variables from other datasets to my dataset - :py:meth:`Dataset.merge` * - add a new dimension and/or coordinate - :py:meth:`DataArray.expand_dims`, :py:meth:`Dataset.expand_dims` * - add a new coordinate variable - :py:meth:`DataArray.assign_coords` * - change a data variable to a coordinate variable - :py:meth:`Dataset.set_coords` * - change the order of dimensions - :py:meth:`DataArray.transpose`, :py:meth:`Dataset.transpose` * - reshape dimensions - :py:meth:`DataArray.stack`, :py:meth:`Dataset.stack`, :py:meth:`Dataset.coarsen.construct`, :py:meth:`DataArray.coarsen.construct` * - remove a variable from my object - :py:meth:`Dataset.drop_vars`, :py:meth:`DataArray.drop_vars` * - remove dimensions of length 1 or 0 - :py:meth:`DataArray.squeeze`, :py:meth:`Dataset.squeeze` * - remove all variables with a particular dimension - :py:meth:`Dataset.drop_dims` * - convert non-dimension coordinates to data variables or remove them - :py:meth:`DataArray.reset_coords`, :py:meth:`Dataset.reset_coords` * - rename a variable, dimension or coordinate - :py:meth:`Dataset.rename`, :py:meth:`DataArray.rename`, :py:meth:`Dataset.rename_vars`, :py:meth:`Dataset.rename_dims`, * - convert a DataArray to Dataset or vice versa - :py:meth:`DataArray.to_dataset`, :py:meth:`Dataset.to_dataarray`, :py:meth:`Dataset.to_stacked_array`, :py:meth:`DataArray.to_unstacked_dataset` * - extract variables that have certain attributes - :py:meth:`Dataset.filter_by_attrs` * - extract the underlying array (e.g. NumPy or Dask arrays) - :py:attr:`DataArray.data` * - convert to and extract the underlying NumPy array - :py:attr:`DataArray.to_numpy` * - convert to a pandas DataFrame - :py:attr:`Dataset.to_dataframe` * - sort values - :py:attr:`Dataset.sortby` * - find out if my xarray object is wrapping a Dask Array - :py:func:`dask.is_dask_collection` * - know how much memory my object requires - :py:attr:`DataArray.nbytes`, :py:attr:`Dataset.nbytes` * - Get axis number for a dimension - :py:meth:`DataArray.get_axis_num` * - convert a possibly irregularly sampled timeseries to a regularly sampled timeseries - :py:meth:`DataArray.resample`, :py:meth:`Dataset.resample` (see :ref:`resampling` for more) * - apply a function on all data variables in a Dataset - :py:meth:`Dataset.map` * - write xarray objects with complex values to a netCDF file - :py:func:`Dataset.to_netcdf`, :py:func:`DataArray.to_netcdf` specifying ``engine="h5netcdf"`` or :py:func:`Dataset.to_netcdf`, :py:func:`DataArray.to_netcdf` specifying ``engine="netCDF4", auto_complex=True`` * - make xarray objects look like other xarray objects - :py:func:`~xarray.ones_like`, :py:func:`~xarray.zeros_like`, :py:func:`~xarray.full_like`, :py:meth:`Dataset.reindex_like`, :py:meth:`Dataset.interp_like`, :py:meth:`Dataset.broadcast_like`, :py:meth:`DataArray.reindex_like`, :py:meth:`DataArray.interp_like`, :py:meth:`DataArray.broadcast_like` * - Make sure my datasets have values at the same coordinate locations - ``xr.align(dataset_1, dataset_2, join="exact")`` * - replace NaNs with other values - :py:meth:`Dataset.fillna`, :py:meth:`Dataset.ffill`, :py:meth:`Dataset.bfill`, :py:meth:`Dataset.interpolate_na`, :py:meth:`DataArray.fillna`, :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`DataArray.interpolate_na` * - extract the year, month, day or similar from a DataArray of time values - ``obj.dt.month`` for example where ``obj`` is a :py:class:`~xarray.DataArray` containing ``datetime64`` or ``cftime`` values. See :ref:`dt_accessor` for more. * - round off time values to a specified frequency - ``obj.dt.ceil``, ``obj.dt.floor``, ``obj.dt.round``. See :ref:`dt_accessor` for more. * - make a mask that is ``True`` where an object contains any of the values in an array - :py:meth:`Dataset.isin`, :py:meth:`DataArray.isin` * - Index using a boolean mask - :py:meth:`Dataset.query`, :py:meth:`DataArray.query`, :py:meth:`Dataset.where`, :py:meth:`DataArray.where` * - preserve ``attrs`` during (most) xarray operations - ``xr.set_options(keep_attrs=True)`` xarray-2025.12.0/doc/get-help/socials.rst000066400000000000000000000005111511464676000200010ustar00rootroot00000000000000.. _socials: Social Media ============ Xarray is active on several social media platforms. We use these platforms to share updates and connect with the user community. - `Discord `__ - `Bluesky `__ - `Twitter(X) `__ xarray-2025.12.0/doc/getting-started-guide/000077500000000000000000000000001511464676000203105ustar00rootroot00000000000000xarray-2025.12.0/doc/getting-started-guide/index.rst000066400000000000000000000005371511464676000221560ustar00rootroot00000000000000################ Getting Started ################ The getting started guide aims to get you using Xarray productively as quickly as possible. It is designed as an entry point for new users, and it provided an introduction to Xarray's main concepts. .. toctree:: :maxdepth: 2 why-xarray installing quick-overview tutorials-and-videos xarray-2025.12.0/doc/getting-started-guide/installing.rst000066400000000000000000000144421511464676000232130ustar00rootroot00000000000000.. _installing: Installation ============ Required dependencies --------------------- - Python (3.11 or later) - `numpy `__ (1.26 or later) - `packaging `__ (24.1 or later) - `pandas `__ (2.2 or later) .. _optional-dependencies: Optional dependencies --------------------- .. note:: If you are using pip to install xarray, optional dependencies can be installed by specifying *extras*. :ref:`installation-instructions` for both pip and conda are given below. For netCDF and IO ~~~~~~~~~~~~~~~~~ - `netCDF4 `__: recommended if you want to use xarray for reading or writing netCDF files - `scipy `__: used as a fallback for reading/writing netCDF3 - `pydap `__: used as a fallback for accessing OPeNDAP - `h5netcdf `__: an alternative library for reading and writing netCDF4 files that does not use the netCDF-C libraries - `zarr `__: for chunked, compressed, N-dimensional arrays. - `cftime `__: recommended if you want to encode/decode datetimes for non-standard calendars or dates before year 1678 or after year 2262. - `iris `__: for conversion to and from iris' Cube objects For accelerating xarray ~~~~~~~~~~~~~~~~~~~~~~~ - `scipy `__: necessary to enable the interpolation features for xarray objects - `bottleneck `__: speeds up NaN-skipping and rolling window aggregations by a large factor - `numbagg `_: for exponential rolling window operations For parallel computing ~~~~~~~~~~~~~~~~~~~~~~ - `dask.array `__: required for :ref:`dask`. For plotting ~~~~~~~~~~~~ - `matplotlib `__: required for :ref:`plotting` - `cartopy `__: recommended for :ref:`plot-maps` - `seaborn `__: for better color palettes - `nc-time-axis `__: for plotting cftime.datetime objects Alternative data containers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - `sparse `_: for sparse arrays - `pint `_: for units of measure - Any numpy-like objects that support `NEP-18 `_. Note that while such libraries theoretically should work, they are untested. Integration tests are in the process of being written for individual libraries. .. _mindeps_policy: Minimum dependency versions --------------------------- Xarray adopts a rolling policy regarding the minimum supported version of its dependencies: - **Python:** 30 months (`NEP-29 `_) - **numpy:** 18 months (`NEP-29 `_) - **all other libraries:** 12 months This means the latest minor (X.Y) version from N months prior. Patch versions (x.y.Z) are not pinned, and only the latest available at the moment of publishing the xarray release is guaranteed to work. You can see the actual minimum tested versions: ``_ .. _installation-instructions: Instructions ------------ Xarray itself is a pure Python package, but its dependencies are not. The easiest way to get everything installed is to use conda_. To install xarray with its recommended dependencies using the conda command line tool:: $ conda install -c conda-forge xarray dask netCDF4 bottleneck .. _conda: https://docs.conda.io If you require other :ref:`optional-dependencies` add them to the line above. We recommend using the community maintained `conda-forge `__ channel, as some of the dependencies are difficult to build. New releases may also appear in conda-forge before being updated in the default channel. If you don't use conda, be sure you have the required dependencies (numpy and pandas) installed first. Then, install xarray with pip:: $ python -m pip install xarray We also maintain other dependency sets for different subsets of functionality:: $ python -m pip install "xarray[io]" # Install optional dependencies for handling I/O $ python -m pip install "xarray[accel]" # Install optional dependencies for accelerating xarray $ python -m pip install "xarray[parallel]" # Install optional dependencies for dask arrays $ python -m pip install "xarray[viz]" # Install optional dependencies for visualization $ python -m pip install "xarray[complete]" # Install all the above The above commands should install most of the `optional dependencies`_. However, some packages which are either not listed on PyPI or require extra installation steps are excluded. To know which dependencies would be installed, take a look at the ``[project.optional-dependencies]`` section in ``pyproject.toml``: .. literalinclude:: ../../pyproject.toml :language: toml :start-at: [project.optional-dependencies] :end-before: [build-system] Development versions -------------------- To install the most recent development version, install from github:: $ python -m pip install git+https://github.com/pydata/xarray.git or from TestPyPI:: $ python -m pip install --index-url https://test.pypi.org/simple --extra-index-url https://pypi.org/simple --pre xarray Testing ------- To run the test suite after installing xarray, install (via pypi or conda) `py.test `__ and run ``pytest`` in the root directory of the xarray repository. Performance Monitoring ~~~~~~~~~~~~~~~~~~~~~~ .. TODO: uncomment once we have a working setup see https://github.com/pydata/xarray/pull/5066 A fixed-point performance monitoring of (a part of) our code can be seen on `this page `__. To run these benchmark tests in a local machine, first install - `airspeed-velocity `__: a tool for benchmarking Python packages over their lifetime. and run ``asv run # this will install some conda environments in ./.asv/envs`` xarray-2025.12.0/doc/getting-started-guide/quick-overview.rst000066400000000000000000000302711511464676000240250ustar00rootroot00000000000000############## Quick overview ############## Here are some quick examples of what you can do with :py:class:`xarray.DataArray` objects. Everything is explained in much more detail in the rest of the documentation. To begin, import numpy, pandas and xarray using their customary abbreviations: .. jupyter-execute:: import numpy as np import pandas as pd import xarray as xr Create a DataArray ------------------ You can make a DataArray from scratch by supplying data in the form of a numpy array or list, with optional *dimensions* and *coordinates*: .. jupyter-execute:: data = xr.DataArray(np.random.randn(2, 3), dims=("x", "y"), coords={"x": [10, 20]}) data In this case, we have generated a 2D array, assigned the names *x* and *y* to the two dimensions respectively and associated two *coordinate labels* '10' and '20' with the two locations along the x dimension. If you supply a pandas :py:class:`~pandas.Series` or :py:class:`~pandas.DataFrame`, metadata is copied directly: .. jupyter-execute:: xr.DataArray(pd.Series(range(3), index=list("abc"), name="foo")) Here are the key properties for a ``DataArray``: .. jupyter-execute:: # like in pandas, values is a numpy array that you can modify in-place data.values data.dims data.coords # you can use this dictionary to store arbitrary metadata data.attrs Indexing -------- Xarray supports four kinds of indexing. Since we have assigned coordinate labels to the x dimension we can use label-based indexing along that dimension just like pandas. The four examples below all yield the same result (the value at ``x=10``) but at varying levels of convenience and intuitiveness. .. jupyter-execute:: # positional and by integer label, like numpy data[0, :] # loc or "location": positional and coordinate label, like pandas data.loc[10] # isel or "integer select": by dimension name and integer label data.isel(x=0) # sel or "select": by dimension name and coordinate label data.sel(x=10) Unlike positional indexing, label-based indexing frees us from having to know how our array is organized. All we need to know are the dimension name and the label we wish to index i.e. ``data.sel(x=10)`` works regardless of whether ``x`` is the first or second dimension of the array and regardless of whether ``10`` is the first or second element of ``x``. We have already told xarray that x is the first dimension when we created ``data``: xarray keeps track of this so we don't have to. For more, see :ref:`indexing`. Attributes ---------- While you're setting up your DataArray, it's often a good idea to set metadata attributes. A useful choice is to set ``data.attrs['long_name']`` and ``data.attrs['units']`` since xarray will use these, if present, to automatically label your plots. These special names were chosen following the `NetCDF Climate and Forecast (CF) Metadata Conventions `_. ``attrs`` is just a Python dictionary, so you can assign anything you wish. .. jupyter-execute:: data.attrs["long_name"] = "random velocity" data.attrs["units"] = "metres/sec" data.attrs["description"] = "A random variable created as an example." data.attrs["random_attribute"] = 123 data.attrs # you can add metadata to coordinates too data.x.attrs["units"] = "x units" Computation ----------- Data arrays work very similarly to numpy ndarrays: .. jupyter-execute:: data + 10 np.sin(data) # transpose data.T data.sum() However, aggregation operations can use dimension names instead of axis numbers: .. jupyter-execute:: data.mean(dim="x") Arithmetic operations broadcast based on dimension name. This means you don't need to insert dummy dimensions for alignment: .. jupyter-execute:: a = xr.DataArray(np.random.randn(3), [data.coords["y"]]) b = xr.DataArray(np.random.randn(4), dims="z") a b a + b It also means that in most cases you do not need to worry about the order of dimensions: .. jupyter-execute:: data - data.T Operations also align based on index labels: .. jupyter-execute:: data[:-1] - data[:1] For more, see :ref:`compute`. GroupBy ------- Xarray supports grouped operations using a very similar API to pandas (see :ref:`groupby`): .. jupyter-execute:: labels = xr.DataArray(["E", "F", "E"], [data.coords["y"]], name="labels") labels data.groupby(labels).mean("y") data.groupby(labels).map(lambda x: x - x.min()) Plotting -------- Visualizing your datasets is quick and convenient: .. jupyter-execute:: data.plot() Note the automatic labeling with names and units. Our effort in adding metadata attributes has paid off! Many aspects of these figures are customizable: see :ref:`plotting`. pandas ------ Xarray objects can be easily converted to and from pandas objects using the :py:meth:`~xarray.DataArray.to_series`, :py:meth:`~xarray.DataArray.to_dataframe` and :py:meth:`~pandas.DataFrame.to_xarray` methods: .. jupyter-execute:: series = data.to_series() series # convert back series.to_xarray() Datasets -------- :py:class:`xarray.Dataset` is a dict-like container of aligned ``DataArray`` objects. You can think of it as a multi-dimensional generalization of the :py:class:`pandas.DataFrame`: .. jupyter-execute:: ds = xr.Dataset(dict(foo=data, bar=("x", [1, 2]), baz=np.pi)) ds This creates a dataset with three DataArrays named ``foo``, ``bar`` and ``baz``. Use dictionary or dot indexing to pull out ``Dataset`` variables as ``DataArray`` objects but note that assignment only works with dictionary indexing: .. jupyter-execute:: ds["foo"] ds.foo When creating ``ds``, we specified that ``foo`` is identical to ``data`` created earlier, ``bar`` is one-dimensional with single dimension ``x`` and associated values '1' and '2', and ``baz`` is a scalar not associated with any dimension in ``ds``. Variables in datasets can have different ``dtype`` and even different dimensions, but all dimensions are assumed to refer to points in the same shared coordinate system i.e. if two variables have dimension ``x``, that dimension must be identical in both variables. For example, when creating ``ds`` xarray automatically *aligns* ``bar`` with ``DataArray`` ``foo``, i.e., they share the same coordinate system so that ``ds.bar['x'] == ds.foo['x'] == ds['x']``. Consequently, the following works without explicitly specifying the coordinate ``x`` when creating ``ds['bar']``: .. jupyter-execute:: ds.bar.sel(x=10) You can do almost everything you can do with ``DataArray`` objects with ``Dataset`` objects (including indexing and arithmetic) if you prefer to work with multiple variables at once. Read & write netCDF files ------------------------- NetCDF is the recommended file format for xarray objects. Users from the geosciences will recognize that the :py:class:`~xarray.Dataset` data model looks very similar to a netCDF file (which, in fact, inspired it). You can directly read and write xarray objects to disk using :py:meth:`~xarray.Dataset.to_netcdf`, :py:func:`~xarray.open_dataset` and :py:func:`~xarray.open_dataarray`: .. jupyter-execute:: ds.to_netcdf("example.nc") reopened = xr.open_dataset("example.nc") reopened .. jupyter-execute:: :hide-code: import os reopened.close() os.remove("example.nc") It is common for datasets to be distributed across multiple files (commonly one file per timestep). Xarray supports this use-case by providing the :py:meth:`~xarray.open_mfdataset` and the :py:meth:`~xarray.save_mfdataset` methods. For more, see :ref:`io`. .. _quick-overview-datatrees: DataTrees --------- :py:class:`xarray.DataTree` is a tree-like container of :py:class:`~xarray.DataArray` objects, organised into multiple mutually alignable groups. You can think of it like a (recursive) ``dict`` of :py:class:`~xarray.Dataset` objects, where coordinate variables and their indexes are inherited down to children. Let's first make some example xarray datasets: .. jupyter-execute:: import numpy as np import xarray as xr data = xr.DataArray(np.random.randn(2, 3), dims=("x", "y"), coords={"x": [10, 20]}) ds = xr.Dataset({"foo": data, "bar": ("x", [1, 2]), "baz": np.pi}) ds ds2 = ds.interp(coords={"x": [10, 12, 14, 16, 18, 20]}) ds2 ds3 = xr.Dataset( {"people": ["alice", "bob"], "heights": ("people", [1.57, 1.82])}, coords={"species": "human"}, ) ds3 Now we'll put these datasets into a hierarchical DataTree: .. jupyter-execute:: dt = xr.DataTree.from_dict( {"simulation/coarse": ds, "simulation/fine": ds2, "/": ds3} ) dt This created a DataTree with nested groups. We have one root group, containing information about individual people. This root group can be named, but here it is unnamed, and is referenced with ``"/"``. This structure is similar to a unix-like filesystem. The root group then has one subgroup ``simulation``, which contains no data itself but does contain another two subgroups, named ``fine`` and ``coarse``. The (sub)subgroups ``fine`` and ``coarse`` contain two very similar datasets. They both have an ``"x"`` dimension, but the dimension is of different lengths in each group, which makes the data in each group unalignable. In the root group we placed some completely unrelated information, in order to show how a tree can store heterogeneous data. Remember to keep unalignable dimensions in sibling groups because a DataTree inherits coordinates down through its child nodes. You can see this inheritance in the above representation of the DataTree. The coordinates ``people`` and ``species`` defined in the root ``/`` node are shown in the child nodes both ``/simulation/coarse`` and ``/simulation/fine``. All coordinates in parent-descendent lineage must be alignable to form a DataTree. If your input data is not aligned, you can still get a nested ``dict`` of :py:class:`~xarray.Dataset` objects with :py:func:`~xarray.open_groups` and then apply any required changes to ensure alignment before converting to a :py:class:`~xarray.DataTree`. The constraints on each group are the same as the constraint on DataArrays within a single dataset with the addition of requiring parent-descendent coordinate agreement. We created the subgroups using a filesystem-like syntax, and accessing groups works the same way. We can access individual DataArrays in a similar fashion. .. jupyter-execute:: dt["simulation/coarse/foo"] We can also view the data in a particular group as a read-only :py:class:`~xarray.Datatree.DatasetView` using :py:attr:`xarray.Datatree.dataset`: .. jupyter-execute:: dt["simulation/coarse"].dataset We can get a copy of the :py:class:`~xarray.Dataset` including the inherited coordinates by calling the :py:class:`~xarray.datatree.to_dataset` method: .. jupyter-execute:: ds_inherited = dt["simulation/coarse"].to_dataset() ds_inherited And you can get a copy of just the node local values of :py:class:`~xarray.Dataset` by setting the ``inherit`` keyword to ``False``: .. jupyter-execute:: ds_node_local = dt["simulation/coarse"].to_dataset(inherit=False) ds_node_local .. note:: We intend to eventually implement most :py:class:`~xarray.Dataset` methods (indexing, aggregation, arithmetic, etc) on :py:class:`~xarray.DataTree` objects, but many methods have not been implemented yet. .. Operations map over subtrees, so we can take a mean over the ``x`` dimension of both the ``fine`` and ``coarse`` groups just by: .. .. jupyter-execute:: .. avg = dt["simulation"].mean(dim="x") .. avg .. Here the ``"x"`` dimension used is always the one local to that subgroup. .. You can do almost everything you can do with :py:class:`~xarray.Dataset` objects with :py:class:`~xarray.DataTree` objects .. (including indexing and arithmetic), as operations will be mapped over every subgroup in the tree. .. This allows you to work with multiple groups of non-alignable variables at once. .. tip:: If all of your variables are mutually alignable (i.e., they live on the same grid, such that every common dimension name maps to the same length), then you probably don't need :py:class:`xarray.DataTree`, and should consider just sticking with :py:class:`xarray.Dataset`. xarray-2025.12.0/doc/getting-started-guide/tutorials-and-videos.rst000066400000000000000000000025201511464676000251160ustar00rootroot00000000000000 Tutorials and Videos ==================== There are an abundance of tutorials and videos available for learning how to use *xarray*. Often, these tutorials are taught to workshop attendees at conferences or other events. We highlight a number of these resources below, but this is by no means an exhaustive list! Tutorials ---------- - `Xarray's Tutorials`_ repository - The `UW eScience Institute's Geohackweek`_ tutorial on xarray for geospatial data scientists. - `Nicolas Fauchereau's 2015 tutorial`_ on xarray for netCDF users. Videos ------- .. include:: ../videos-gallery.txt Books, Chapters and Articles ----------------------------- - Stephan Hoyer and Joe Hamman's `Journal of Open Research Software paper`_ describing the xarray project. .. _Xarray's Tutorials: https://xarray-contrib.github.io/xarray-tutorial/ .. _Journal of Open Research Software paper: https://doi.org/10.5334/jors.148 .. _UW eScience Institute's Geohackweek : https://geohackweek.github.io/nDarrays/ .. _tutorial: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial.ipynb .. _with answers: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial-with-answers.ipynb .. _Nicolas Fauchereau's 2015 tutorial: https://nbviewer.iPython.org/github/nicolasfauchereau/metocean/blob/master/notebooks/xray.ipynb xarray-2025.12.0/doc/getting-started-guide/why-xarray.rst000066400000000000000000000133371511464676000231640ustar00rootroot00000000000000Overview: Why xarray? ===================== Xarray introduces labels in the form of dimensions, coordinates and attributes on top of raw NumPy-like multidimensional arrays, which allows for a more intuitive, more concise, and less error-prone developer experience. What labels enable ------------------ Multi-dimensional (a.k.a. N-dimensional, ND) arrays (sometimes called "tensors") are an essential part of computational science. They are encountered in a wide range of fields, including physics, astronomy, geoscience, bioinformatics, engineering, finance, and deep learning. In Python, NumPy_ provides the fundamental data structure and API for working with raw ND arrays. However, real-world datasets are usually more than just raw numbers; they have labels which encode information about how the array values map to locations in space, time, etc. Xarray doesn't just keep track of labels on arrays -- it uses them to provide a powerful and concise interface. For example: - Apply operations over dimensions by name: ``x.sum('time')``. - Select values by label (or logical location) instead of integer location: ``x.loc['2014-01-01']`` or ``x.sel(time='2014-01-01')``. - Mathematical operations (e.g., ``x - y``) vectorize across multiple dimensions (array broadcasting) based on dimension names, not shape. - Easily use the `split-apply-combine `_ paradigm with ``groupby``: ``x.groupby('time.dayofyear').mean()``. - Database-like alignment based on coordinate labels that smoothly handles missing values: ``x, y = xr.align(x, y, join='outer')``. - Keep track of arbitrary metadata in the form of a Python dictionary: ``x.attrs``. The N-dimensional nature of xarray's data structures makes it suitable for dealing with multi-dimensional scientific data, and its use of dimension names instead of axis labels (``dim='time'`` instead of ``axis=0``) makes such arrays much more manageable than the raw numpy ndarray: with xarray, you don't need to keep track of the order of an array's dimensions or insert dummy dimensions of size 1 to align arrays (e.g., using ``np.newaxis``). The immediate payoff of using xarray is that you'll write less code. The long-term payoff is that you'll understand what you were thinking when you come back to look at it weeks or months later. Core data structures -------------------- Xarray has two core data structures, which build upon and extend the core strengths of NumPy_ and pandas_. Both data structures are fundamentally N-dimensional: - :py:class:`~xarray.DataArray` is our implementation of a labeled, N-dimensional array. It is an N-D generalization of a :py:class:`pandas.Series`. The name ``DataArray`` itself is borrowed from Fernando Perez's datarray_ project, which prototyped a similar data structure. - :py:class:`~xarray.Dataset` is a multi-dimensional, in-memory array database. It is a dict-like container of ``DataArray`` objects aligned along any number of shared dimensions, and serves a similar purpose in xarray to the :py:class:`pandas.DataFrame`. The value of attaching labels to numpy's :py:class:`numpy.ndarray` may be fairly obvious, but the dataset may need more motivation. The power of the dataset over a plain dictionary is that, in addition to pulling out arrays by name, it is possible to select or combine data along a dimension across all arrays simultaneously. Like a :py:class:`~pandas.DataFrame`, datasets facilitate array operations with heterogeneous data -- the difference is that the arrays in a dataset can have not only different data types, but also different numbers of dimensions. This data model is borrowed from the netCDF_ file format, which also provides xarray with a natural and portable serialization format. NetCDF is very popular in the geosciences, and there are existing libraries for reading and writing netCDF in many programming languages, including Python. Xarray distinguishes itself from many tools for working with netCDF data in-so-far as it provides data structures for in-memory analytics that both utilize and preserve labels. You only need to do the tedious work of adding metadata once, not every time you save a file. Goals and aspirations --------------------- Xarray contributes domain-agnostic data-structures and tools for labeled multi-dimensional arrays to Python's SciPy_ ecosystem for numerical computing. In particular, xarray builds upon and integrates with NumPy_ and pandas_: - Our user-facing interfaces aim to be more explicit versions of those found in NumPy/pandas. - Compatibility with the broader ecosystem is a major goal: it should be easy to get your data in and out. - We try to keep a tight focus on functionality and interfaces related to labeled data, and leverage other Python libraries for everything else, e.g., NumPy/pandas for fast arrays/indexing (xarray itself contains no compiled code), Dask_ for parallel computing, matplotlib_ for plotting, etc. Xarray is a collaborative and community driven project, run entirely on volunteer effort (see :ref:`contributing`). Our target audience is anyone who needs N-dimensional labeled arrays in Python. Originally, development was driven by the data analysis needs of physical scientists (especially geoscientists who already know and love netCDF_), but it has become a much more broadly useful tool, and is still under active development. See our technical :ref:`roadmap` for more details, and feel free to reach out with questions about whether xarray is the right tool for your needs. .. _datarray: https://github.com/BIDS/datarray .. _Dask: https://www.dask.org .. _matplotlib: https://matplotlib.org .. _netCDF: https://www.unidata.ucar.edu/software/netcdf .. _NumPy: https://numpy.org .. _pandas: https://pandas.pydata.org .. _SciPy: https://www.scipy.org xarray-2025.12.0/doc/index.rst000066400000000000000000000044351511464676000157570ustar00rootroot00000000000000:html_theme.sidebar_secondary.remove: true .. module:: xarray Xarray documentation ==================== Xarray makes working with labelled multi-dimensional arrays in Python simple, efficient, and fun! **Version**: |version| - :ref:`whats-new` **Useful links**: `Home `__ | `Code Repository `__ | `Issues `__ | `Discussions `__ | `Releases `__ | `Tutorial `__ | `Stack Overflow `__ | `Blog `__ | .. grid:: 1 1 2 2 :gutter: 2 .. grid-item-card:: Get started! :img-top: _static/index_getting_started.svg :class-card: intro-card :link: getting-started-guide/index :link-type: doc *New to Xarray?* Start here with our installation instructions and a brief overview of Xarray. .. grid-item-card:: User guide :img-top: _static/index_user_guide.svg :class-card: intro-card :link: user-guide/index :link-type: doc *Ready to deepen your understanding of Xarray?* Visit the user guide for detailed explanations of the data model, common computational patterns, and more. .. grid-item-card:: API reference :img-top: _static/index_api.svg :class-card: intro-card :link: api :link-type: doc *Need to learn more about a specific Xarray function?* Go here to review the documentation of all public functions and classes in Xarray. .. grid-item-card:: Contribute :img-top: _static/index_contribute.svg :class-card: intro-card :link: contribute/contributing :link-type: doc *Saw a typo in the documentation? Want to improve existing functionalities?* Please review our guide on improving Xarray. .. toctree:: :maxdepth: 2 :hidden: :caption: For users Get Started User Guide Tutorial Gallery API Reference Get Help Development Release Notes xarray-2025.12.0/doc/internals/000077500000000000000000000000001511464676000161075ustar00rootroot00000000000000xarray-2025.12.0/doc/internals/chunked-arrays.rst000066400000000000000000000137551511464676000215740ustar00rootroot00000000000000.. currentmodule:: xarray .. _internals.chunkedarrays: Alternative chunked array types =============================== .. warning:: This is a *highly* experimental feature. Please report any bugs or other difficulties on `xarray's issue tracker `_. In particular see discussion on `xarray issue #6807 `_ Xarray can wrap chunked dask arrays (see :ref:`dask`), but can also wrap any other chunked array type that exposes the correct interface. This allows us to support using other frameworks for distributed and out-of-core processing, with user code still written as xarray commands. In particular xarray also supports wrapping :py:class:`cubed.Array` objects (see `Cubed's documentation `_ and the `cubed-xarray package `_). The basic idea is that by wrapping an array that has an explicit notion of ``.chunks``, xarray can expose control over the choice of chunking scheme to users via methods like :py:meth:`DataArray.chunk` whilst the wrapped array actually implements the handling of processing all of the chunks. Chunked array methods and "core operations" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A chunked array needs to meet all the :ref:`requirements for normal duck arrays `, but must also implement additional features. Chunked arrays have additional attributes and methods, such as ``.chunks`` and ``.rechunk``. Furthermore, Xarray dispatches chunk-aware computations across one or more chunked arrays using special functions known as "core operations". Examples include ``map_blocks``, ``blockwise``, and ``apply_gufunc``. The core operations are generalizations of functions first implemented in :py:mod:`dask.array`. The implementation of these functions is specific to the type of arrays passed to them. For example, when applying the ``map_blocks`` core operation, :py:class:`dask.array.Array` objects must be processed by :py:func:`dask.array.map_blocks`, whereas :py:class:`cubed.Array` objects must be processed by :py:func:`cubed.map_blocks`. In order to use the correct implementation of a core operation for the array type encountered, xarray dispatches to the corresponding subclass of :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint`, also known as a "Chunk Manager". Therefore **a full list of the operations that need to be defined is set by the API of the** :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint` **abstract base class**. Note that chunked array methods are also currently dispatched using this class. Chunked array creation is also handled by this class. As chunked array objects have a one-to-one correspondence with in-memory numpy arrays, it should be possible to create a chunked array from a numpy array by passing the desired chunking pattern to an implementation of :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint.from_array``. .. note:: The :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint` abstract base class is mostly just acting as a namespace for containing the chunked-aware function primitives. Ideally in the future we would have an API standard for chunked array types which codified this structure, making the entrypoint system unnecessary. .. currentmodule:: xarray.namedarray.parallelcompat .. autoclass:: xarray.namedarray.parallelcompat.ChunkManagerEntrypoint :members: Registering a new ChunkManagerEntrypoint subclass ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Rather than hard-coding various chunk managers to deal with specific chunked array implementations, xarray uses an entrypoint system to allow developers of new chunked array implementations to register their corresponding subclass of :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint`. To register a new entrypoint you need to add an entry to the ``setup.cfg`` like this:: [options.entry_points] xarray.chunkmanagers = dask = xarray.namedarray.daskmanager:DaskManager See also `cubed-xarray `_ for another example. To check that the entrypoint has worked correctly, you may find it useful to display the available chunkmanagers using the internal function :py:func:`~xarray.namedarray.parallelcompat.list_chunkmanagers`. .. autofunction:: list_chunkmanagers User interface ~~~~~~~~~~~~~~ Once the chunkmanager subclass has been registered, xarray objects wrapping the desired array type can be created in 3 ways: #. By manually passing the array type to the :py:class:`~xarray.DataArray` constructor, see the examples for :ref:`numpy-like arrays `, #. Calling :py:meth:`~xarray.DataArray.chunk`, passing the keyword arguments ``chunked_array_type`` and ``from_array_kwargs``, #. Calling :py:func:`~xarray.open_dataset`, passing the keyword arguments ``chunked_array_type`` and ``from_array_kwargs``. The latter two methods ultimately call the chunkmanager's implementation of ``.from_array``, to which they pass the ``from_array_kwargs`` dict. The ``chunked_array_type`` kwarg selects which registered chunkmanager subclass to dispatch to. It defaults to ``'dask'`` if Dask is installed, otherwise it defaults to whichever chunkmanager is registered if only one is registered. If multiple chunkmanagers are registered, the ``chunk_manager`` configuration option (which can be set using :py:func:`set_options`) will be used to determine which chunkmanager to use, defaulting to ``'dask'``. Parallel processing without chunks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use a parallel array type that does not expose a concept of chunks explicitly, none of the information on this page is theoretically required. Such an array type (e.g. `Ramba `_ or `Arkouda `_) could be wrapped using xarray's existing support for :ref:`numpy-like "duck" arrays `. xarray-2025.12.0/doc/internals/duck-arrays-integration.rst000066400000000000000000000065641511464676000234220ustar00rootroot00000000000000 .. _internals.duckarrays: Integrating with duck arrays ============================= .. warning:: This is an experimental feature. Please report any bugs or other difficulties on `xarray's issue tracker `_. Xarray can wrap custom numpy-like arrays (":term:`duck array`\s") - see the :ref:`user guide documentation `. This page is intended for developers who are interested in wrapping a new custom array type with xarray. .. _internals.duckarrays.requirements: Duck array requirements ~~~~~~~~~~~~~~~~~~~~~~~ Xarray does not explicitly check that required methods are defined by the underlying duck array object before attempting to wrap the given array. However, a wrapped array type should at a minimum define these attributes: * ``shape`` property, * ``dtype`` property, * ``ndim`` property, * ``__array__`` method, * ``__array_ufunc__`` method, * ``__array_function__`` method. These need to be defined consistently with :py:class:`numpy.ndarray`, for example the array ``shape`` property needs to obey `numpy's broadcasting rules `_ (see also the `Python Array API standard's explanation `_ of these same rules). .. _internals.duckarrays.array_api_standard: Python Array API standard support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As an integration library xarray benefits greatly from the standardization of duck-array libraries' APIs, and so is a big supporter of the `Python Array API Standard `_. We aim to support any array libraries that follow the Array API standard out-of-the-box. However, xarray does occasionally call some numpy functions which are not (yet) part of the standard (e.g. :py:meth:`xarray.DataArray.pad` calls :py:func:`numpy.pad`). See `xarray issue #7848 `_ for a list of such functions. We can still support dispatching on these functions through the array protocols above, it just means that if you exclusively implement the methods in the Python Array API standard then some features in xarray will not work. Custom inline reprs ~~~~~~~~~~~~~~~~~~~ In certain situations (e.g. when printing the collapsed preview of variables of a ``Dataset``), xarray will display the repr of a :term:`duck array` in a single line, truncating it to a certain number of characters. If that would drop too much information, the :term:`duck array` may define a ``_repr_inline_`` method that takes ``max_width`` (number of characters) as an argument .. code:: python class MyDuckArray: ... def _repr_inline_(self, max_width): """format to a single line with at most max_width characters""" ... ... To avoid duplicated information, this method must omit information about the shape and :term:`dtype`. For example, the string representation of a ``dask`` array or a ``sparse`` matrix would be: .. jupyter-execute:: import dask.array as da import xarray as xr import numpy as np import sparse .. jupyter-execute:: a = da.linspace(0, 1, 20, chunks=2) a .. jupyter-execute:: b = np.eye(10) b[[5, 7, 3, 0], [6, 8, 2, 9]] = 2 b = sparse.COO.from_numpy(b) b .. jupyter-execute:: xr.Dataset(dict(a=("x", a), b=(("y", "z"), b))) xarray-2025.12.0/doc/internals/extending-xarray.rst000066400000000000000000000106361511464676000221400ustar00rootroot00000000000000 .. _internals.accessors: Extending xarray using accessors ================================ .. jupyter-execute:: :hide-code: import xarray as xr import numpy as np Xarray is designed as a general purpose library and hence tries to avoid including overly domain specific functionality. But inevitably, the need for more domain specific logic arises. .. _internals.accessors.composition: Composition over Inheritance ---------------------------- One potential solution to this problem is to subclass Dataset and/or DataArray to add domain specific functionality. However, inheritance is not very robust. It's easy to inadvertently use internal APIs when subclassing, which means that your code may break when xarray upgrades. Furthermore, many builtin methods will only return native xarray objects. The standard advice is to use :issue:`composition over inheritance <706>`, but reimplementing an API as large as xarray's on your own objects can be an onerous task, even if most methods are only forwarding to xarray implementations. (For an example of a project which took this approach of subclassing see `UXarray `_). If you simply want the ability to call a function with the syntax of a method call, then the builtin :py:meth:`~xarray.DataArray.pipe` method (copied from pandas) may suffice. .. _internals.accessors.writing accessors: Writing Custom Accessors ------------------------ To resolve this issue for more complex cases, xarray has the :py:func:`~xarray.register_dataset_accessor`, :py:func:`~xarray.register_dataarray_accessor` and :py:func:`~xarray.register_datatree_accessor` decorators for adding custom "accessors" on xarray objects, thereby "extending" the functionality of your xarray object. Here's how you might use these decorators to write a custom "geo" accessor implementing a geography specific extension to xarray: .. literalinclude:: ../examples/_code/accessor_example.py In general, the only restriction on the accessor class is that the ``__init__`` method must have a single parameter: the ``Dataset`` or ``DataArray`` object it is supposed to work on. This achieves the same result as if the ``Dataset`` class had a cached property defined that returns an instance of your class: .. code-block:: python class Dataset: ... @property def geo(self): return GeoAccessor(self) However, using the register accessor decorators is preferable to simply adding your own ad-hoc property (i.e., ``Dataset.geo = property(...)``), for several reasons: 1. It ensures that the name of your property does not accidentally conflict with any other attributes or methods (including other accessors). 2. Instances of accessor object will be cached on the xarray object that creates them. This means you can save state on them (e.g., to cache computed properties). 3. Using an accessor provides an implicit namespace for your custom functionality that clearly identifies it as separate from built-in xarray methods. .. note:: Accessors are created once per DataArray and Dataset instance. New instances, like those created from arithmetic operations or when accessing a DataArray from a Dataset (ex. ``ds[var_name]``), will have new accessors created. Back in an interactive IPython session, we can use these properties: .. jupyter-execute:: :hide-code: exec(open("examples/_code/accessor_example.py").read()) .. jupyter-execute:: ds = xr.Dataset({"longitude": np.linspace(0, 10), "latitude": np.linspace(0, 20)}) ds.geo.center .. jupyter-execute:: ds.geo.plot() The intent here is that libraries that extend xarray could add such an accessor to implement subclass specific functionality rather than using actual subclasses or patching in a large number of domain specific methods. For further reading on ways to write new accessors and the philosophy behind the approach, see https://github.com/pydata/xarray/issues/1080. To help users keep things straight, please `let us know `_ if you plan to write a new accessor for an open source library. Existing open source accessors and the libraries that implement them are available in the list on the :ref:`ecosystem` page. To make documenting accessors with ``sphinx`` and ``sphinx.ext.autosummary`` easier, you can use `sphinx-autosummary-accessors`_. .. _sphinx-autosummary-accessors: https://sphinx-autosummary-accessors.readthedocs.io/ xarray-2025.12.0/doc/internals/how-to-add-new-backend.rst000066400000000000000000000451171511464676000227700ustar00rootroot00000000000000.. _add_a_backend: How to add a new backend ------------------------ Adding a new backend for read support to Xarray does not require one to integrate any code in Xarray; all you need to do is: - Create a class that inherits from Xarray :py:class:`~xarray.backends.BackendEntrypoint` and implements the method ``open_dataset`` see :ref:`RST backend_entrypoint` - Declare this class as an external plugin in your project configuration, see :ref:`RST backend_registration` If you also want to support lazy loading and dask see :ref:`RST lazy_loading`. Note that the new interface for backends is available from Xarray version >= 0.18 onwards. You can see what backends are currently available in your working environment with :py:class:`~xarray.backends.list_engines()`. .. _RST backend_entrypoint: BackendEntrypoint subclassing +++++++++++++++++++++++++++++ Your ``BackendEntrypoint`` sub-class is the primary interface with Xarray, and it should implement the following attributes and methods: - the ``open_dataset`` method (mandatory) - the ``open_dataset_parameters`` attribute (optional) - the ``guess_can_open`` method (optional) - the ``description`` attribute (optional) - the ``url`` attribute (optional). This is what a ``BackendEntrypoint`` subclass should look like: .. code-block:: python from xarray.backends import BackendEntrypoint class MyBackendEntrypoint(BackendEntrypoint): def open_dataset( self, filename_or_obj, *, drop_variables=None, # other backend specific keyword arguments # `chunks` and `cache` DO NOT go here, they are handled by xarray ): return my_open_dataset(filename_or_obj, drop_variables=drop_variables) open_dataset_parameters = ["filename_or_obj", "drop_variables"] def guess_can_open(self, filename_or_obj): try: _, ext = os.path.splitext(filename_or_obj) except TypeError: return False return ext in {".my_format", ".my_fmt"} description = "Use .my_format files in Xarray" url = "https://link_to/your_backend/documentation" ``BackendEntrypoint`` subclass methods and attributes are detailed in the following. .. _RST open_dataset: open_dataset ^^^^^^^^^^^^ The backend ``open_dataset`` shall implement reading from file, the variables decoding and it shall instantiate the output Xarray class :py:class:`~xarray.Dataset`. The following is an example of the high level processing steps: .. code-block:: python def open_dataset( self, filename_or_obj, *, drop_variables=None, decode_times=True, decode_timedelta=True, decode_coords=True, my_backend_option=None, ): vars, attrs, coords = my_reader( filename_or_obj, drop_variables=drop_variables, my_backend_option=my_backend_option, ) vars, attrs, coords = my_decode_variables( vars, attrs, decode_times, decode_timedelta, decode_coords ) # see also conventions.decode_cf_variables ds = xr.Dataset(vars, attrs=attrs, coords=coords) ds.set_close(my_close_method) return ds The output :py:class:`~xarray.Dataset` shall implement the additional custom method ``close``, used by Xarray to ensure the related files are eventually closed. This method shall be set by using :py:meth:`~xarray.Dataset.set_close`. The input of ``open_dataset`` method are one argument (``filename_or_obj``) and one keyword argument (``drop_variables``): - ``filename_or_obj``: can be any object but usually it is a string containing a path or an instance of :py:class:`pathlib.Path`. - ``drop_variables``: can be ``None`` or an iterable containing the variable names to be dropped when reading the data. If it makes sense for your backend, your ``open_dataset`` method should implement in its interface the following boolean keyword arguments, called **decoders**, which default to ``None``: - ``mask_and_scale`` - ``decode_times`` - ``decode_timedelta`` - ``use_cftime`` - ``concat_characters`` - ``decode_coords`` Note: all the supported decoders shall be declared explicitly in backend ``open_dataset`` signature and adding a ``**kwargs`` is not allowed. These keyword arguments are explicitly defined in Xarray :py:func:`~xarray.open_dataset` signature. Xarray will pass them to the backend only if the User explicitly sets a value different from ``None``. For more details on decoders see :ref:`RST decoders`. Your backend can also take as input a set of backend-specific keyword arguments. All these keyword arguments can be passed to :py:func:`~xarray.open_dataset` grouped either via the ``backend_kwargs`` parameter or explicitly using the syntax ``**kwargs``. If you don't want to support the lazy loading, then the :py:class:`~xarray.Dataset` shall contain values as a :py:class:`numpy.ndarray` and your work is almost done. .. _RST open_dataset_parameters: open_dataset_parameters ^^^^^^^^^^^^^^^^^^^^^^^ ``open_dataset_parameters`` is the list of backend ``open_dataset`` parameters. It is not a mandatory parameter, and if the backend does not provide it explicitly, Xarray creates a list of them automatically by inspecting the backend signature. If ``open_dataset_parameters`` is not defined, but ``**kwargs`` and ``*args`` are in the backend ``open_dataset`` signature, Xarray raises an error. On the other hand, if the backend provides the ``open_dataset_parameters``, then ``**kwargs`` and ``*args`` can be used in the signature. However, this practice is discouraged unless there is a good reasons for using ``**kwargs`` or ``*args``. .. _RST guess_can_open: guess_can_open ^^^^^^^^^^^^^^ ``guess_can_open`` is used to identify the proper engine to open your data file automatically in case the engine is not specified explicitly. If you are not interested in supporting this feature, you can skip this step since :py:class:`~xarray.backends.BackendEntrypoint` already provides a default :py:meth:`~xarray.backends.BackendEntrypoint.guess_can_open` that always returns ``False``. Backend ``guess_can_open`` takes as input the ``filename_or_obj`` parameter of Xarray :py:meth:`~xarray.open_dataset`, and returns a boolean. .. _RST properties: description and url ^^^^^^^^^^^^^^^^^^^^ ``description`` is used to provide a short text description of the backend. ``url`` is used to include a link to the backend's documentation or code. These attributes are surfaced when a user prints :py:class:`~xarray.backends.BackendEntrypoint`. If ``description`` or ``url`` are not defined, an empty string is returned. .. _RST decoders: Decoders ^^^^^^^^ The decoders implement specific operations to transform data from on-disk representation to Xarray representation. A classic example is the โ€œtimeโ€ variable decoding operation. In NetCDF, the elements of the โ€œtimeโ€ variable are stored as integers, and the unit contains an origin (for example: "seconds since 1970-1-1"). In this case, Xarray transforms the pair integer-unit in a :py:class:`numpy.datetime64`. The standard coders implemented in Xarray are: - :py:class:`xarray.coding.strings.CharacterArrayCoder()` - :py:class:`xarray.coding.strings.EncodedStringCoder()` - :py:class:`xarray.coding.variables.UnsignedIntegerCoder()` - :py:class:`xarray.coding.variables.CFMaskCoder()` - :py:class:`xarray.coding.variables.CFScaleOffsetCoder()` - :py:class:`xarray.coding.times.CFTimedeltaCoder()` - :py:class:`xarray.coding.times.CFDatetimeCoder()` Xarray coders all have the same interface. They have two methods: ``decode`` and ``encode``. The method ``decode`` takes a ``Variable`` in on-disk format and returns a ``Variable`` in Xarray format. Variable attributes no more applicable after the decoding, are dropped and stored in the ``Variable.encoding`` to make them available to the ``encode`` method, which performs the inverse transformation. In the following an example on how to use the coders ``decode`` method: .. jupyter-execute:: :hide-code: import xarray as xr import numpy as np .. jupyter-execute:: var = xr.Variable( dims=("x",), data=np.arange(10.0), attrs={"scale_factor": 10, "add_offset": 2} ) var .. jupyter-execute:: coder = xr.coding.variables.CFScaleOffsetCoder() decoded_var = coder.decode(var) decoded_var .. jupyter-execute:: decoded_var.encoding Some of the transformations can be common to more backends, so before implementing a new decoder, be sure Xarray does not already implement that one. The backends can reuse Xarrayโ€™s decoders, either instantiating the coders and using the method ``decode`` directly or using the higher-level function :py:func:`~xarray.conventions.decode_cf_variables` that groups Xarray decoders. In some cases, the transformation to apply strongly depends on the on-disk data format. Therefore, you may need to implement your own decoder. An example of such a case is when you have to deal with the time format of a grib file. grib format is very different from the NetCDF one: in grib, the time is stored in two attributes dataDate and dataTime as strings. Therefore, it is not possible to reuse the Xarray time decoder, and implementing a new one is mandatory. Decoders can be activated or deactivated using the boolean keywords of Xarray :py:meth:`~xarray.open_dataset` signature: ``mask_and_scale``, ``decode_times``, ``decode_timedelta``, ``use_cftime``, ``concat_characters``, ``decode_coords``. Such keywords are passed to the backend only if the User sets a value different from ``None``. Note that the backend does not necessarily have to implement all the decoders, but it shall declare in its ``open_dataset`` interface only the boolean keywords related to the supported decoders. .. _RST backend_registration: How to register a backend +++++++++++++++++++++++++ Define a new entrypoint in your ``pyproject.toml`` (or ``setup.cfg/setup.py`` for older configurations), with: - group: ``xarray.backends`` - name: the name to be passed to :py:meth:`~xarray.open_dataset` as ``engine`` - object reference: the reference of the class that you have implemented. You can declare the entrypoint in your project configuration like so: .. tab:: pyproject.toml .. code:: toml [project.entry-points."xarray.backends"] my_engine = "my_package.my_module:MyBackendEntrypoint" .. tab:: pyproject.toml [Poetry] .. code-block:: toml [tool.poetry.plugins."xarray.backends"] my_engine = "my_package.my_module:MyBackendEntrypoint" .. tab:: setup.cfg .. code-block:: cfg [options.entry_points] xarray.backends = my_engine = my_package.my_module:MyBackendEntrypoint .. tab:: setup.py .. code-block:: setuptools.setup( entry_points={ "xarray.backends": [ "my_engine=my_package.my_module:MyBackendEntrypoint" ], }, ) See the `Python Packaging User Guide `_ for more information on entrypoints and details of the syntax. If you're using Poetry, note that table name in ``pyproject.toml`` is slightly different. See `the Poetry docs `_ for more information on plugins. .. _RST lazy_loading: How to support lazy loading +++++++++++++++++++++++++++ If you want to make your backend effective with big datasets, then you should take advantage of xarray's support for lazy loading and indexing. Basically, when your backend constructs the ``Variable`` objects, you need to replace the :py:class:`numpy.ndarray` inside the variables with a custom :py:class:`~xarray.backends.BackendArray` subclass that supports lazy loading and indexing. See the example below: .. code-block:: python backend_array = MyBackendArray() data = indexing.LazilyIndexedArray(backend_array) var = xr.Variable(dims, data, attrs=attrs, encoding=encoding) Where: - :py:class:`~xarray.core.indexing.LazilyIndexedArray` is a wrapper class provided by Xarray that manages the lazy loading and indexing. - ``MyBackendArray`` should be implemented by the backend and must inherit from :py:class:`~xarray.backends.BackendArray`. BackendArray subclassing ^^^^^^^^^^^^^^^^^^^^^^^^ The BackendArray subclass must implement the following method and attributes: - the ``__getitem__`` method that takes an index as an input and returns a `NumPy `__ array, - the ``shape`` attribute, - the ``dtype`` attribute. It may also optionally implement an additional ``async_getitem`` method. Xarray supports different types of :doc:`/user-guide/indexing`, that can be grouped in three types of indexes: :py:class:`~xarray.core.indexing.BasicIndexer`, :py:class:`~xarray.core.indexing.OuterIndexer`, and :py:class:`~xarray.core.indexing.VectorizedIndexer`. This implies that the implementation of the method ``__getitem__`` can be tricky. In order to simplify this task, Xarray provides a helper function, :py:func:`~xarray.core.indexing.explicit_indexing_adapter`, that transforms all the input indexer types (basic, outer, vectorized) in a tuple which is interpreted correctly by your backend. This is an example ``BackendArray`` subclass implementation: .. code-block:: python from xarray.backends import BackendArray class MyBackendArray(BackendArray): def __init__( self, shape, dtype, lock, # other backend specific keyword arguments ): self.shape = shape self.dtype = dtype self.lock = lock def __getitem__( self, key: xarray.core.indexing.ExplicitIndexer ) -> np.typing.ArrayLike: return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.BASIC, self._raw_indexing_method, ) def _raw_indexing_method(self, key: tuple) -> np.typing.ArrayLike: # thread safe method that access to data on disk with self.lock: ... return item Note that ``BackendArray.__getitem__`` must be thread safe to support multi-thread processing. The :py:func:`~xarray.core.indexing.explicit_indexing_adapter` method takes in input the ``key``, the array ``shape`` and the following parameters: - ``indexing_support``: the type of index supported by ``raw_indexing_method`` - ``raw_indexing_method``: a method that shall take in input a key in the form of a tuple and return an indexed :py:class:`numpy.ndarray`. For more details see :py:class:`~xarray.core.indexing.IndexingSupport` and :ref:`RST indexing`. Async support ^^^^^^^^^^^^^ Backends can also optionally support loading data asynchronously via xarray's asynchronous loading methods (e.g. ``~xarray.Dataset.load_async``). To support async loading the ``BackendArray`` subclass must additionally implement the ``BackendArray.async_getitem`` method. Note that implementing this method is only necessary if you want to be able to load data from different xarray objects concurrently. Even without this method your ``BackendArray`` implementation is still free to concurrently load chunks of data for a single ``Variable`` itself, so long as it does so behind the synchronous ``__getitem__`` interface. Dask support ^^^^^^^^^^^^ In order to support `Dask Distributed `__ and :py:mod:`multiprocessing`, the ``BackendArray`` subclass should be serializable either with :ref:`io.pickle` or `cloudpickle `__. That implies that all the reference to open files should be dropped. For opening files, we therefore suggest to use the helper class provided by Xarray :py:class:`~xarray.backends.CachingFileManager`. .. _RST indexing: Indexing examples ^^^^^^^^^^^^^^^^^ **BASIC** In the ``BASIC`` indexing support, numbers and slices are supported. Example: .. jupyter-input:: # () shall return the full array backend_array._raw_indexing_method(()) .. jupyter-output:: array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]) .. jupyter-input:: # shall support integers backend_array._raw_indexing_method(1, 1) .. jupyter-output:: 5 .. jupyter-input:: # shall support slices backend_array._raw_indexing_method(slice(0, 3), slice(2, 4)) .. jupyter-output:: array([[2, 3], [6, 7], [10, 11]]) **OUTER** The ``OUTER`` indexing shall support number, slices and in addition it shall support also lists of integers. The outer indexing is equivalent to combining multiple input list with ``itertools.product()``: .. jupyter-input:: backend_array._raw_indexing_method([0, 1], [0, 1, 2]) .. jupyter-output:: array([[0, 1, 2], [4, 5, 6]]) .. jupyter-input:: # shall support integers backend_array._raw_indexing_method(1, 1) .. jupyter-output:: 5 **OUTER_1VECTOR** The ``OUTER_1VECTOR`` indexing shall supports number, slices and at most one list. The behaviour with the list shall be the same as ``OUTER`` indexing. If you support more complex indexing as explicit indexing or numpy indexing, you can have a look to the implementation of Zarr backend and Scipy backend, currently available in :py:mod:`~xarray.backends` module. .. _RST preferred_chunks: Preferred chunk sizes ^^^^^^^^^^^^^^^^^^^^^ To potentially improve performance with lazy loading, the backend may define for each variable the chunk sizes that it prefers---that is, sizes that align with how the variable is stored. (Note that the backend is not directly involved in `Dask `__ chunking, because Xarray internally manages chunking.) To define the preferred chunk sizes, store a mapping within the variable's encoding under the key ``"preferred_chunks"`` (that is, ``var.encoding["preferred_chunks"]``). The mapping's keys shall be the names of dimensions with preferred chunk sizes, and each value shall be the corresponding dimension's preferred chunk sizes expressed as either an integer (such as ``{"dim1": 1000, "dim2": 2000}``) or a tuple of integers (such as ``{"dim1": (1000, 100), "dim2": (2000, 2000, 2000)}``). Xarray uses the preferred chunk sizes in some special cases of the ``chunks`` argument of the :py:func:`~xarray.open_dataset` and :py:func:`~xarray.open_mfdataset` functions. If ``chunks`` is a ``dict``, then for any dimensions missing from the keys or whose value is ``None``, Xarray sets the chunk sizes to the preferred sizes. If ``chunks`` equals ``"auto"``, then Xarray seeks ideal chunk sizes informed by the preferred chunk sizes. Specifically, it determines the chunk sizes using :py:func:`dask.array.core.normalize_chunks` with the ``previous_chunks`` argument set according to the preferred chunk sizes. xarray-2025.12.0/doc/internals/how-to-create-custom-index.rst000066400000000000000000000226161511464676000237430ustar00rootroot00000000000000.. currentmodule:: xarray .. _internals.custom indexes: How to create a custom index ============================ .. warning:: This feature is highly experimental. Support for custom indexes has been introduced in v2022.06.0 and is still incomplete. API is subject to change without deprecation notice. However we encourage you to experiment and report issues that arise. Xarray's built-in support for label-based indexing (e.g. ``ds.sel(latitude=40, method="nearest")``) and alignment operations relies on :py:class:`pandas.Index` objects. Pandas Indexes are powerful and suitable for many applications but also have some limitations: - it only works with 1-dimensional coordinates where explicit labels are fully loaded in memory - it is hard to reuse it with irregular data for which there exist more efficient, tree-based structures to perform data selection - it doesn't support extra metadata that may be required for indexing and alignment (e.g., a coordinate reference system) Fortunately, Xarray now allows extending this functionality with custom indexes, which can be implemented in 3rd-party libraries. The Index base class -------------------- Every Xarray index must inherit from the :py:class:`Index` base class. It is for example the case of Xarray built-in ``PandasIndex`` and ``PandasMultiIndex`` subclasses, which wrap :py:class:`pandas.Index` and :py:class:`pandas.MultiIndex` respectively. The ``Index`` API closely follows the :py:class:`Dataset` and :py:class:`DataArray` API, e.g., for an index to support :py:meth:`DataArray.sel` it needs to implement :py:meth:`Index.sel`, to support :py:meth:`DataArray.stack` and :py:meth:`DataArray.unstack` it needs to implement :py:meth:`Index.stack` and :py:meth:`Index.unstack`, etc. Some guidelines and examples are given below. More details can be found in the documented :py:class:`Index` API. Minimal requirements -------------------- Every index must at least implement the :py:meth:`Index.from_variables` class method, which is used by Xarray to build a new index instance from one or more existing coordinates in a Dataset or DataArray. Since any collection of coordinates can be passed to that method (i.e., the number, order and dimensions of the coordinates are all arbitrary), it is the responsibility of the index to check the consistency and validity of those input coordinates. For example, :py:class:`~xarray.indexes.PandasIndex` accepts only one coordinate and :py:class:`~xarray.indexes.PandasMultiIndex` accepts one or more 1-dimensional coordinates that must all share the same dimension. Other, custom indexes need not have the same constraints, e.g., - a georeferenced raster index which only accepts two 1-d coordinates with distinct dimensions - a staggered grid index which takes coordinates with different dimension name suffixes (e.g., "_c" and "_l" for center and left) Optional requirements --------------------- Pretty much everything else is optional. Depending on the method, in the absence of a (re)implementation, an index will either raise a ``NotImplementedError`` or won't do anything specific (just drop, pass or copy itself from/to the resulting Dataset or DataArray). For example, you can just skip re-implementing :py:meth:`Index.rename` if there is no internal attribute or object to rename according to the new desired coordinate or dimension names. In the case of ``PandasIndex``, we rename the underlying ``pandas.Index`` object and/or update the ``PandasIndex.dim`` attribute since the associated dimension name has been changed. Wrap index data as coordinate data ---------------------------------- In some cases it is possible to reuse the index's underlying object or structure as coordinate data and hence avoid data duplication. For ``PandasIndex`` and ``PandasMultiIndex``, we leverage the fact that ``pandas.Index`` objects expose some array-like API. In Xarray we use some wrappers around those underlying objects as a thin compatibility layer to preserve dtypes, handle explicit and n-dimensional indexing, etc. Other structures like tree-based indexes (e.g., kd-tree) may differ too much from arrays to reuse it as coordinate data. If the index data can be reused as coordinate data, the ``Index`` subclass should implement :py:meth:`Index.create_variables`. This method accepts a dictionary of variable names as keys and :py:class:`Variable` objects as values (used for propagating variable metadata) and should return a dictionary of new :py:class:`Variable` or :py:class:`IndexVariable` objects. Data selection -------------- For an index to support label-based selection, it needs to at least implement :py:meth:`Index.sel`. This method accepts a dictionary of labels where the keys are coordinate names (already filtered for the current index) and the values can be pretty much anything (e.g., a slice, a tuple, a list, a numpy array, a :py:class:`Variable` or a :py:class:`DataArray`). It is the responsibility of the index to properly handle those input labels. :py:meth:`Index.sel` must return an instance of :py:class:`IndexSelResult`. The latter is a small data class that holds positional indexers (indices) and that may also hold new variables, new indexes, names of variables or indexes to drop, names of dimensions to rename, etc. For example, this is useful in the case of ``PandasMultiIndex`` as it allows Xarray to convert it into a single ``PandasIndex`` when only one level remains after the selection. The :py:class:`IndexSelResult` class is also used to merge results from label-based selection performed by different indexes. Note that it is now possible to have two distinct indexes for two 1-d coordinates sharing the same dimension, but it is not currently possible to use those two indexes in the same call to :py:meth:`Dataset.sel`. Optionally, the index may also implement :py:meth:`Index.isel`. In the case of ``PandasIndex`` we use it to create a new index object by just indexing the underlying ``pandas.Index`` object. In other cases this may not be possible, e.g., a kd-tree object may not be easily indexed. If ``Index.isel()`` is not implemented, the index in just dropped in the DataArray or Dataset resulting from the selection. Alignment --------- For an index to support alignment, it needs to implement: - :py:meth:`Index.equals`, which compares the index with another index and returns either ``True`` or ``False`` - :py:meth:`Index.join`, which combines the index with another index and returns a new Index object - :py:meth:`Index.reindex_like`, which queries the index with another index and returns positional indexers that are used to re-index Dataset or DataArray variables along one or more dimensions Xarray ensures that those three methods are called with an index of the same type as argument. Meta-indexes ------------ Nothing prevents writing a custom Xarray index that itself encapsulates other Xarray index(es). We call such index a "meta-index". Here is a small example of a meta-index for geospatial, raster datasets (i.e., regularly spaced 2-dimensional data) that internally relies on two ``PandasIndex`` instances for the x and y dimensions respectively: .. code-block:: python from xarray import Index from xarray.core.indexes import PandasIndex from xarray.core.indexing import merge_sel_results class RasterIndex(Index): def __init__(self, xy_indexes): assert len(xy_indexes) == 2 # must have two distinct dimensions dim = [idx.dim for idx in xy_indexes.values()] assert dim[0] != dim[1] self._xy_indexes = xy_indexes @classmethod def from_variables(cls, variables): assert len(variables) == 2 xy_indexes = { k: PandasIndex.from_variables({k: v}) for k, v in variables.items() } return cls(xy_indexes) def create_variables(self, variables): idx_variables = {} for index in self._xy_indexes.values(): idx_variables.update(index.create_variables(variables)) return idx_variables def sel(self, labels): results = [] for k, index in self._xy_indexes.items(): if k in labels: results.append(index.sel({k: labels[k]})) return merge_sel_results(results) This basic index only supports label-based selection. Providing a full-featured index by implementing the other ``Index`` methods should be pretty straightforward for this example, though. This example is also not very useful unless we add some extra functionality on top of the two encapsulated ``PandasIndex`` objects, such as a coordinate reference system. How to use a custom index ------------------------- You can use :py:meth:`Dataset.set_xindex` or :py:meth:`DataArray.set_xindex` to assign a custom index to a Dataset or DataArray, e.g., using the ``RasterIndex`` above: .. code-block:: python import numpy as np import xarray as xr da = xr.DataArray( np.random.uniform(size=(100, 50)), coords={"x": ("x", np.arange(50)), "y": ("y", np.arange(100))}, dims=("y", "x"), ) # Xarray create default indexes for the 'x' and 'y' coordinates # we first need to explicitly drop it da = da.drop_indexes(["x", "y"]) # Build a RasterIndex from the 'x' and 'y' coordinates da_raster = da.set_xindex(["x", "y"], RasterIndex) # RasterIndex now takes care of label-based selection selected = da_raster.sel(x=10, y=slice(20, 50)) xarray-2025.12.0/doc/internals/index.rst000066400000000000000000000017661511464676000177620ustar00rootroot00000000000000.. _internals: Xarray Internals ================ Xarray builds upon two of the foundational libraries of the scientific Python stack, NumPy and pandas. It is written in pure Python (no C or Cython extensions), which makes it easy to develop and extend. Instead, we push compiled code to :ref:`optional dependencies`. The pages in this section are intended for: * Contributors to xarray who wish to better understand some of the internals, * Developers from other fields who wish to extend xarray with domain-specific logic, perhaps to support a new scientific community of users, * Developers of other packages who wish to interface xarray with their existing tools, e.g. by creating a backend for reading a new file format, or wrapping a custom array type. .. toctree:: :maxdepth: 2 :hidden: internal-design interoperability duck-arrays-integration chunked-arrays extending-xarray how-to-add-new-backend how-to-create-custom-index zarr-encoding-spec time-coding xarray-2025.12.0/doc/internals/internal-design.rst000066400000000000000000000236261511464676000217350ustar00rootroot00000000000000.. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) np.set_printoptions(threshold=10, edgeitems=2) .. _internal design: Internal Design =============== This page gives an overview of the internal design of xarray. In totality, the Xarray project defines 4 key data structures. In order of increasing complexity, they are: - :py:class:`xarray.Variable`, - :py:class:`xarray.DataArray`, - :py:class:`xarray.Dataset`, - :py:class:`xarray.DataTree`. The user guide lists only :py:class:`xarray.DataArray` and :py:class:`xarray.Dataset`, but :py:class:`~xarray.Variable` is the fundamental object internally, and :py:class:`~xarray.DataTree` is a natural generalisation of :py:class:`xarray.Dataset`. .. note:: Our :ref:`roadmap` includes plans to document :py:class:`~xarray.Variable` as fully public API. Internally private :ref:`lazy indexing classes ` are used to avoid loading more data than necessary, and flexible indexes classes (derived from :py:class:`~xarray.indexes.Index`) provide performant label-based lookups. .. _internal design.data structures: Data Structures --------------- The :ref:`data structures` page in the user guide explains the basics and concentrates on user-facing behavior, whereas this section explains how xarray's data structure classes actually work internally. .. _internal design.data structures.variable: Variable Objects ~~~~~~~~~~~~~~~~ The core internal data structure in xarray is the :py:class:`~xarray.Variable`, which is used as the basic building block behind xarray's :py:class:`~xarray.Dataset`, :py:class:`~xarray.DataArray` types. A :py:class:`~xarray.Variable` consists of: - ``dims``: A tuple of dimension names. - ``data``: The N-dimensional array (typically a NumPy or Dask array) storing the Variable's data. It must have the same number of dimensions as the length of ``dims``. - ``attrs``: A dictionary of metadata associated with this array. By convention, xarray's built-in operations never use this metadata. - ``encoding``: Another dictionary used to store information about how these variable's data is represented on disk. See :ref:`io.encoding` for more details. :py:class:`~xarray.Variable` has an interface similar to NumPy arrays, but extended to make use of named dimensions. For example, it uses ``dim`` in preference to an ``axis`` argument for methods like ``mean``, and supports :ref:`compute.broadcasting`. However, unlike ``Dataset`` and ``DataArray``, the basic ``Variable`` does not include coordinate labels along each axis. :py:class:`~xarray.Variable` is public API, but because of its incomplete support for labeled data, it is mostly intended for advanced uses, such as in xarray itself, for writing new backends, or when creating custom indexes. You can access the variable objects that correspond to xarray objects via the (readonly) :py:attr:`Dataset.variables ` and :py:attr:`DataArray.variable ` attributes. .. _internal design.dataarray: DataArray Objects ~~~~~~~~~~~~~~~~~ The simplest data structure used by most users is :py:class:`~xarray.DataArray`. A :py:class:`~xarray.DataArray` is a composite object consisting of multiple :py:class:`~xarray.Variable` objects which store related data. A single :py:class:`~xarray.Variable` is referred to as the "data variable", and stored under the :py:attr:`~xarray.DataArray.variable`` attribute. A :py:class:`~xarray.DataArray` inherits all of the properties of this data variable, i.e. ``dims``, ``data``, ``attrs`` and ``encoding``, all of which are implemented by forwarding on to the underlying ``Variable`` object. In addition, a :py:class:`~xarray.DataArray` stores additional ``Variable`` objects stored in a dict under the private ``_coords`` attribute, each of which is referred to as a "Coordinate Variable". These coordinate variable objects are only allowed to have ``dims`` that are a subset of the data variable's ``dims``, and each dim has a specific length. This means that the full :py:attr:`~xarray.DataArray.size` of the dataarray can be represented by a dictionary mapping dimension names to integer sizes. The underlying data variable has this exact same size, and the attached coordinate variables have sizes which are some subset of the size of the data variable. Another way of saying this is that all coordinate variables must be "alignable" with the data variable. When a coordinate is accessed by the user (e.g. via the dict-like :py:class:`~xarray.DataArray.__getitem__` syntax), then a new ``DataArray`` is constructed by finding all coordinate variables that have compatible dimensions and re-attaching them before the result is returned. This is why most users never see the ``Variable`` class underlying each coordinate variable - it is always promoted to a ``DataArray`` before returning. Lookups are performed by special :py:class:`~xarray.indexes.Index` objects, which are stored in a dict under the private ``_indexes`` attribute. Indexes must be associated with one or more coordinates, and essentially act by translating a query given in physical coordinate space (typically via the :py:meth:`~xarray.DataArray.sel` method) into a set of integer indices in array index space that can be used to index the underlying n-dimensional array-like ``data``. Indexing in array index space (typically performed via the :py:meth:`~xarray.DataArray.isel` method) does not require consulting an ``Index`` object. Finally a :py:class:`~xarray.DataArray` defines a :py:attr:`~xarray.DataArray.name` attribute, which refers to its data variable but is stored on the wrapping ``DataArray`` class. The ``name`` attribute is primarily used when one or more :py:class:`~xarray.DataArray` objects are promoted into a :py:class:`~xarray.Dataset` (e.g. via :py:meth:`~xarray.DataArray.to_dataset`). Note that the underlying :py:class:`~xarray.Variable` objects are all unnamed, so they can always be referred to uniquely via a dict-like mapping. .. _internal design.dataset: Dataset Objects ~~~~~~~~~~~~~~~ The :py:class:`~xarray.Dataset` class is a generalization of the :py:class:`~xarray.DataArray` class that can hold multiple data variables. Internally all data variables and coordinate variables are stored under a single ``variables`` dict, and coordinates are specified by storing their names in a private ``_coord_names`` dict. The dataset's ``dims`` are the set of all dims present across any variable, but (similar to in dataarrays) coordinate variables cannot have a dimension that is not present on any data variable. When a data variable or coordinate variable is accessed, a new ``DataArray`` is again constructed from all compatible coordinates before returning. .. _internal design.subclassing: .. note:: The way that selecting a variable from a ``DataArray`` or ``Dataset`` actually involves internally wrapping the ``Variable`` object back up into a ``DataArray``/``Dataset`` is the primary reason :ref:`we recommend against subclassing ` Xarray objects. The main problem it creates is that we currently cannot easily guarantee that for example selecting a coordinate variable from your ``SubclassedDataArray`` would return an instance of ``SubclassedDataArray`` instead of just an :py:class:`xarray.DataArray`. See `GH issue `_ for more details. .. _internal design.lazy indexing: Lazy Indexing Classes --------------------- Lazy Loading ~~~~~~~~~~~~ If we open a ``Variable`` object from disk using :py:func:`~xarray.open_dataset` we can see that the actual values of the array wrapped by the data variable are not displayed. .. jupyter-execute:: da = xr.tutorial.open_dataset("air_temperature")["air"] var = da.variable var We can see the size, and the dtype of the underlying array, but not the actual values. This is because the values have not yet been loaded. If we look at the private attribute :py:meth:`~xarray.Variable._data` containing the underlying array object, we see something interesting: .. jupyter-execute:: var._data You're looking at one of xarray's internal Lazy Indexing Classes. These powerful classes are hidden from the user, but provide important functionality. Calling the public :py:attr:`~xarray.Variable.data` property loads the underlying array into memory. .. jupyter-execute:: var.data This array is now cached, which we can see by accessing the private attribute again: .. jupyter-execute:: var._data Lazy Indexing ~~~~~~~~~~~~~ The purpose of these lazy indexing classes is to prevent more data being loaded into memory than is necessary for the subsequent analysis, by deferring loading data until after indexing is performed. Let's open the data from disk again. .. jupyter-execute:: da = xr.tutorial.open_dataset("air_temperature")["air"] var = da.variable Now, notice how even after subsetting the data has does not get loaded: .. jupyter-execute:: var.isel(time=0) The shape has changed, but the values are still not shown. Looking at the private attribute again shows how this indexing information was propagated via the hidden lazy indexing classes: .. jupyter-execute:: var.isel(time=0)._data .. note:: Currently only certain indexing operations are lazy, not all array operations. For discussion of making all array operations lazy see `GH issue #5081 `_. Lazy Dask Arrays ~~~~~~~~~~~~~~~~ Note that xarray's implementation of Lazy Indexing classes is completely separate from how :py:class:`dask.array.Array` objects evaluate lazily. Dask-backed xarray objects delay almost all operations until :py:meth:`~xarray.DataArray.compute` is called (either explicitly or implicitly via :py:meth:`~xarray.DataArray.plot` for example). The exceptions to this laziness are operations whose output shape is data-dependent, such as when calling :py:meth:`~xarray.DataArray.where`. xarray-2025.12.0/doc/internals/interoperability.rst000066400000000000000000000063451511464676000222360ustar00rootroot00000000000000.. _interoperability: Interoperability of Xarray ========================== Xarray is designed to be extremely interoperable, in many orthogonal ways. Making xarray as flexible as possible is the common theme of most of the goals on our :ref:`roadmap`. This interoperability comes via a set of flexible abstractions into which the user can plug in. The current full list is: - :ref:`Custom file backends ` via the :py:class:`~xarray.backends.BackendEntrypoint` system, - Numpy-like :ref:`"duck" array wrapping `, which supports the `Python Array API Standard `_, - :ref:`Chunked distributed array computation ` via the :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint` system, - Custom :py:class:`~xarray.Index` objects for :ref:`flexible label-based lookups `, - Extending xarray objects with domain-specific methods via :ref:`custom accessors `. .. warning:: One obvious way in which xarray could be more flexible is that whilst subclassing xarray objects is possible, we currently don't support it in most transformations, instead recommending composition over inheritance. See the :ref:`internal design page ` for the rationale and look at the corresponding `GH issue `_ if you're interested in improving support for subclassing! .. note:: If you think there is another way in which xarray could become more generically flexible then please tell us your ideas by `raising an issue to request the feature `_! Whilst xarray was originally designed specifically to open ``netCDF4`` files as :py:class:`numpy.ndarray` objects labelled by :py:class:`pandas.Index` objects, it is entirely possible today to: - lazily open an xarray object directly from a custom binary file format (e.g. using ``xarray.open_dataset(path, engine='my_custom_format')``, - handle the data as any API-compliant numpy-like array type (e.g. sparse or GPU-backed), - distribute out-of-core computation across that array type in parallel (e.g. via :ref:`dask`), - track the physical units of the data through computations (e.g via `pint-xarray `_), - query the data via custom index logic optimized for specific applications (e.g. an :py:class:`~xarray.Index` object backed by a KDTree structure), - attach domain-specific logic via accessor methods (e.g. to understand geographic Coordinate Reference System metadata), - organize hierarchical groups of xarray data in a :py:class:`xarray.DataTree` (e.g. to treat heterogeneous simulation and observational data together during analysis). All of these features can be provided simultaneously, using libraries compatible with the rest of the scientific python ecosystem. In this situation xarray would be essentially a thin wrapper acting as pure-python framework, providing a common interface and separation of concerns via various domain-agnostic abstractions. Most of the remaining pages in the documentation of xarray's internals describe these various types of interoperability in more detail. xarray-2025.12.0/doc/internals/time-coding.rst000066400000000000000000000556571511464676000210620ustar00rootroot00000000000000.. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) np.set_printoptions(threshold=20) int64_max = np.iinfo("int64").max int64_min = np.iinfo("int64").min + 1 uint64_max = np.iinfo("uint64").max .. _internals.timecoding: Time Coding =========== This page gives an overview how xarray encodes and decodes times and which conventions and functions are used. Pandas functionality -------------------- to_datetime ~~~~~~~~~~~ The function :py:func:`pandas.to_datetime` is used within xarray for inferring units and for testing purposes. In normal operation :py:func:`pandas.to_datetime` returns a :py:class:`pandas.Timestamp` (for scalar input) or :py:class:`pandas.DatetimeIndex` (for array-like input) which are related to ``np.datetime64`` values with a resolution inherited from the input (can be one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'``). If no resolution can be inherited ``'ns'`` is assumed. That has the implication that the maximum usable time range for those cases is approximately +/- 292 years centered around the Unix epoch (1970-01-01). To accommodate that, we carefully check the units/resolution in the encoding and decoding step. When the arguments are numeric (not strings or ``np.datetime64`` values) ``"unit"`` can be anything from ``'Y'``, ``'W'``, ``'D'``, ``'h'``, ``'m'``, ``'s'``, ``'ms'``, ``'us'`` or ``'ns'``, though the returned resolution will be ``"ns"``. .. jupyter-execute:: print(f"Minimum datetime: {pd.to_datetime(int64_min, unit="ns")}") print(f"Maximum datetime: {pd.to_datetime(int64_max, unit="ns")}") For input values which can't be represented in nanosecond resolution an :py:class:`pandas.OutOfBoundsDatetime` exception is raised: .. jupyter-execute:: try: dtime = pd.to_datetime(int64_max, unit="us") except Exception as err: print(err) .. jupyter-execute:: try: dtime = pd.to_datetime(uint64_max, unit="ns") print("Wrong:", dtime) dtime = pd.to_datetime([uint64_max], unit="ns") except Exception as err: print(err) ``np.datetime64`` values can be extracted with :py:meth:`pandas.Timestamp.to_numpy` and :py:meth:`pandas.DatetimeIndex.to_numpy`. The returned resolution depends on the internal representation. This representation can be changed using :py:meth:`pandas.Timestamp.as_unit` and :py:meth:`pandas.DatetimeIndex.as_unit` respectively. ``as_unit`` takes one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'`` as an argument. That means we are able to represent datetimes with second, millisecond, microsecond or nanosecond resolution. .. jupyter-execute:: time = pd.to_datetime(np.datetime64(0, "D")) print("Datetime:", time, np.asarray([time.to_numpy()]).dtype) print("Datetime as_unit('ms'):", time.as_unit("ms")) print("Datetime to_numpy():", time.as_unit("ms").to_numpy()) .. jupyter-execute:: time = pd.to_datetime(np.array([-1000, 1, 2], dtype="datetime64[Y]")) print("DatetimeIndex:", time) print("DatetimeIndex as_unit('us'):", time.as_unit("us")) print("DatetimeIndex to_numpy():", time.as_unit("us").to_numpy()) .. warning:: Input data with resolution higher than ``'ns'`` (eg. ``'ps'``, ``'fs'``, ``'as'``) is truncated (not rounded) at the ``'ns'``-level. This is `currently broken `_ for the ``'ps'`` input, where it is interpreted as ``'ns'``. .. jupyter-execute:: print("Good:", pd.to_datetime([np.datetime64(1901901901901, "as")])) print("Good:", pd.to_datetime([np.datetime64(1901901901901, "fs")])) print(" Bad:", pd.to_datetime([np.datetime64(1901901901901, "ps")])) print("Good:", pd.to_datetime([np.datetime64(1901901901901, "ns")])) print("Good:", pd.to_datetime([np.datetime64(1901901901901, "us")])) print("Good:", pd.to_datetime([np.datetime64(1901901901901, "ms")])) .. warning:: Care has to be taken, as some configurations of input data will raise. The following shows, that we are safe to use :py:func:`pandas.to_datetime` when providing :py:class:`numpy.datetime64` as scalar or numpy array as input. .. jupyter-execute:: print( "Works:", np.datetime64(1901901901901, "s"), pd.to_datetime(np.datetime64(1901901901901, "s")), ) print( "Works:", np.array([np.datetime64(1901901901901, "s")]), pd.to_datetime(np.array([np.datetime64(1901901901901, "s")])), ) try: pd.to_datetime([np.datetime64(1901901901901, "s")]) except Exception as err: print("Raises:", err) try: pd.to_datetime(1901901901901, unit="s") except Exception as err: print("Raises:", err) try: pd.to_datetime([1901901901901], unit="s") except Exception as err: print("Raises:", err) try: pd.to_datetime(np.array([1901901901901]), unit="s") except Exception as err: print("Raises:", err) to_timedelta ~~~~~~~~~~~~ The function :py:func:`pandas.to_timedelta` is used within xarray for inferring units and for testing purposes. In normal operation :py:func:`pandas.to_timedelta` returns a :py:class:`pandas.Timedelta` (for scalar input) or :py:class:`pandas.TimedeltaIndex` (for array-like input) which are ``np.timedelta64`` values with ``ns`` resolution internally. That has the implication, that the usable timedelta covers only roughly 585 years. To accommodate for that, we are working around that limitation in the encoding and decoding step. .. jupyter-execute:: f"Maximum timedelta range: ({pd.to_timedelta(int64_min, unit="ns")}, {pd.to_timedelta(int64_max, unit="ns")})" For input values which can't be represented in nanosecond resolution an :py:class:`pandas.OutOfBoundsTimedelta` exception is raised: .. jupyter-execute:: try: delta = pd.to_timedelta(int64_max, unit="us") except Exception as err: print("First:", err) .. jupyter-execute:: try: delta = pd.to_timedelta(uint64_max, unit="ns") except Exception as err: print("Second:", err) When arguments are numeric (not strings or ``np.timedelta64`` values) "unit" can be anything from ``'W'``, ``'D'``, ``'h'``, ``'m'``, ``'s'``, ``'ms'``, ``'us'`` or ``'ns'``, though the returned resolution will be ``"ns"``. ``np.timedelta64`` values can be extracted with :py:meth:`pandas.Timedelta.to_numpy` and :py:meth:`pandas.TimedeltaIndex.to_numpy`. The returned resolution depends on the internal representation. This representation can be changed using :py:meth:`pandas.Timedelta.as_unit` and :py:meth:`pandas.TimedeltaIndex.as_unit` respectively. ``as_unit`` takes one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'`` as an argument. That means we are able to represent timedeltas with second, millisecond, microsecond or nanosecond resolution. .. jupyter-execute:: delta = pd.to_timedelta(np.timedelta64(1, "D")) print("Timedelta:", delta, np.asarray([delta.to_numpy()]).dtype) print("Timedelta as_unit('ms'):", delta.as_unit("ms")) print("Timedelta to_numpy():", delta.as_unit("ms").to_numpy()) .. jupyter-execute:: delta = pd.to_timedelta([0, 1, 2], unit="D") print("TimedeltaIndex:", delta) print("TimedeltaIndex as_unit('ms'):", delta.as_unit("ms")) print("TimedeltaIndex to_numpy():", delta.as_unit("ms").to_numpy()) .. warning:: Care has to be taken, as some configurations of input data will raise. The following shows, that we are safe to use :py:func:`pandas.to_timedelta` when providing :py:class:`numpy.timedelta64` as scalar or numpy array as input. .. jupyter-execute:: print( "Works:", np.timedelta64(1901901901901, "s"), pd.to_timedelta(np.timedelta64(1901901901901, "s")), ) print( "Works:", np.array([np.timedelta64(1901901901901, "s")]), pd.to_timedelta(np.array([np.timedelta64(1901901901901, "s")])), ) try: pd.to_timedelta([np.timedelta64(1901901901901, "s")]) except Exception as err: print("Raises:", err) try: pd.to_timedelta(1901901901901, unit="s") except Exception as err: print("Raises:", err) try: pd.to_timedelta([1901901901901], unit="s") except Exception as err: print("Raises:", err) try: pd.to_timedelta(np.array([1901901901901]), unit="s") except Exception as err: print("Raises:", err) Timestamp ~~~~~~~~~ :py:class:`pandas.Timestamp` is used within xarray to wrap strings of CF encoding reference times and datetime.datetime. When arguments are numeric (not strings) "unit" can be anything from ``'Y'``, ``'W'``, ``'D'``, ``'h'``, ``'m'``, ``'s'``, ``'ms'``, ``'us'`` or ``'ns'``, though the returned resolution will be ``"ns"``. In normal operation :py:class:`pandas.Timestamp` holds the timestamp in the provided resolution, but only one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'``. Lower resolution input is automatically converted to ``'s'``, higher resolution input is truncated to ``'ns'``. The same conversion rules apply here as for :py:func:`pandas.to_timedelta` (see `to_timedelta`_). Depending on the internal resolution Timestamps can be represented in the range: .. jupyter-execute:: for unit in ["s", "ms", "us", "ns"]: print( f"unit: {unit!r} time range ({pd.Timestamp(int64_min, unit=unit)}, {pd.Timestamp(int64_max, unit=unit)})" ) Since relaxing the resolution, this enhances the range to several hundreds of thousands of centuries with microsecond representation. ``NaT`` will be at ``np.iinfo("int64").min`` for all of the different representations. .. warning:: When initialized with a datetime string this is only defined from ``-9999-01-01`` to ``9999-12-31``. .. jupyter-execute:: try: print("Works:", pd.Timestamp("-9999-01-01 00:00:00")) print("Works, too:", pd.Timestamp("9999-12-31 23:59:59")) print(pd.Timestamp("10000-01-01 00:00:00")) except Exception as err: print("Errors:", err) .. note:: :py:class:`pandas.Timestamp` is the only current possibility to correctly import time reference strings. It handles non-ISO formatted strings, keeps the resolution of the strings (``'s'``, ``'ms'`` etc.) and imports time zones. When initialized with :py:class:`numpy.datetime64` instead of a string it even overcomes the above limitation of the possible time range. .. jupyter-execute:: try: print("Handles non-ISO:", pd.Timestamp("92-1-8 151542")) print( "Keeps resolution 1:", pd.Timestamp("1992-10-08 15:15:42"), pd.Timestamp("1992-10-08 15:15:42").unit, ) print( "Keeps resolution 2:", pd.Timestamp("1992-10-08 15:15:42.5"), pd.Timestamp("1992-10-08 15:15:42.5").unit, ) print( "Keeps timezone:", pd.Timestamp("1992-10-08 15:15:42.5 -6:00"), pd.Timestamp("1992-10-08 15:15:42.5 -6:00").unit, ) print( "Extends timerange :", pd.Timestamp(np.datetime64("-10000-10-08 15:15:42.5001")), pd.Timestamp(np.datetime64("-10000-10-08 15:15:42.5001")).unit, ) except Exception as err: print("Errors:", err) DatetimeIndex ~~~~~~~~~~~~~ :py:class:`pandas.DatetimeIndex` is used to wrap ``np.datetime64`` values or other datetime-likes when encoding. The resolution of the DatetimeIndex depends on the input, but can be only one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'``. Lower resolution input is automatically converted to ``'s'``, higher resolution input is cut to ``'ns'``. :py:class:`pandas.DatetimeIndex` will raise :py:class:`pandas.OutOfBoundsDatetime` if the input can't be represented in the given resolution. .. jupyter-execute:: try: print( "Works:", pd.DatetimeIndex( np.array(["1992-01-08", "1992-01-09"], dtype="datetime64[D]") ), ) print( "Works:", pd.DatetimeIndex( np.array( ["1992-01-08 15:15:42", "1992-01-09 15:15:42"], dtype="datetime64[s]", ) ), ) print( "Works:", pd.DatetimeIndex( np.array( ["1992-01-08 15:15:42.5", "1992-01-09 15:15:42.0"], dtype="datetime64[ms]", ) ), ) print( "Works:", pd.DatetimeIndex( np.array( ["1970-01-01 00:00:00.401501601701801901", "1970-01-01 00:00:00"], dtype="datetime64[as]", ) ), ) print( "Works:", pd.DatetimeIndex( np.array( ["-10000-01-01 00:00:00.401501", "1970-01-01 00:00:00"], dtype="datetime64[us]", ) ), ) except Exception as err: print("Errors:", err) CF Conventions Time Handling ---------------------------- Xarray tries to adhere to the latest version of the `CF Conventions`_. Relevant is the section on `Time Coordinate`_ and the `Calendar`_ subsection. .. _CF Conventions: https://cfconventions.org .. _Time Coordinate: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.11/cf-conventions.html#time-coordinate .. _Calendar: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.11/cf-conventions.html#calendar CF time decoding ~~~~~~~~~~~~~~~~ Decoding of ``values`` with a time unit specification like ``"seconds since 1992-10-8 15:15:42.5 -6:00"`` into datetimes using the CF conventions is a multistage process. 1. If we have a non-standard calendar (e.g. ``"noleap"``) decoding is done with the ``cftime`` package, which is not covered in this section. For the ``"standard"``/``"gregorian"`` calendar as well as the ``"proleptic_gregorian"`` calendar the above outlined pandas functionality is used. 2. The ``"standard"``/``"gregorian"`` calendar and the ``"proleptic_gregorian"`` are equivalent for any dates and reference times >= ``"1582-10-15"``. First the reference time is checked and any timezone information stripped off. In a second step, the minimum and maximum ``values`` are checked if they can be represented in the current reference time resolution. At the same time integer overflow would be caught. For the ``"standard"``/``"gregorian"`` calendar the dates are checked to be >= ``"1582-10-15"``. If anything fails, the decoding is attempted with ``cftime``. 3. As the unit (here ``"seconds"``) and the resolution of the reference time ``"1992-10-8 15:15:42.5 -6:00"`` (here ``"milliseconds"``) might be different, the decoding resolution is aligned to the higher resolution of the two. Users may also specify their wanted target resolution by setting the ``time_unit`` keyword argument to one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'`` (default ``'ns'``). This will be included in the alignment process. This is done by multiplying the ``values`` by the ratio of nanoseconds per time unit and nanoseconds per reference time unit. To retain consistency for ``NaT`` values a mask is kept and re-introduced after the multiplication. 4. Times encoded as floating point values are checked for fractional parts and the resolution is enhanced in an iterative process until a fitting resolution (or ``'ns'``) is found. A ``SerializationWarning`` is issued to make the user aware of the possibly problematic encoding. 5. Finally, the ``values`` (at this point converted to ``int64`` values) are cast to ``datetime64[unit]`` (using the above retrieved unit) and added to the reference time :py:class:`pandas.Timestamp`. .. jupyter-execute:: calendar = "proleptic_gregorian" values = np.array([-1000 * 365, 0, 1000 * 365], dtype="int64") units = "days since 2000-01-01 00:00:00.000001" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[us]" dt .. jupyter-execute:: units = "microseconds since 2000-01-01 00:00:00" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[us]" dt .. jupyter-execute:: values = np.array([0, 0.25, 0.5, 0.75, 1.0], dtype="float64") units = "days since 2000-01-01 00:00:00.001" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[ms]" dt .. jupyter-execute:: values = np.array([0, 0.25, 0.5, 0.75, 1.0], dtype="float64") units = "hours since 2000-01-01" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[s]" dt .. jupyter-execute:: values = np.array([0, 0.25, 0.5, 0.75, 1.0], dtype="float64") units = "hours since 2000-01-01 00:00:00 03:30" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[s]" dt .. jupyter-execute:: values = np.array([-2002 * 365 - 121, -366, 365, 2000 * 365 + 119], dtype="int64") units = "days since 0001-01-01 00:00:00" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[s]" dt CF time encoding ~~~~~~~~~~~~~~~~ For encoding the process is more or less a reversal of the above, but we have to make some decisions on default values. 1. Infer ``data_units`` from the given ``dates``. 2. Infer ``units`` (either cleanup given ``units`` or use ``data_units`` 3. Infer the calendar name from the given ``dates``. 4. If dates are :py:class:`cftime.datetime` objects then encode with ``cftime.date2num`` 5. Retrieve ``time_units`` and ``ref_date`` from ``units`` 6. Check ``ref_date`` >= ``1582-10-15``, otherwise -> ``cftime`` 7. Wrap ``dates`` with pd.DatetimeIndex 8. Subtracting ``ref_date`` (:py:class:`pandas.Timestamp`) from above :py:class:`pandas.DatetimeIndex` will return :py:class:`pandas.TimedeltaIndex` 9. Align resolution of :py:class:`pandas.TimedeltaIndex` with resolution of ``time_units`` 10. Retrieve needed ``units`` and ``delta`` to faithfully encode into int64 11. Divide ``time_deltas`` by ``delta``, use floor division (integer) or normal division (float) 12. Return result .. jupyter-execute:: calendar = "proleptic_gregorian" dates = np.array( [ "-2000-01-01T00:00:00", "0000-01-01T00:00:00", "0002-01-01T00:00:00", "2000-01-01T00:00:00", ], dtype="datetime64[s]", ) orig_values = np.array( [-2002 * 365 - 121, -366, 365, 2000 * 365 + 119], dtype="int64" ) units = "days since 0001-01-01 00:00:00" values, _, _ = xr.coding.times.encode_cf_datetime( dates, units, calendar, dtype=np.dtype("int64") ) print(values, units) np.testing.assert_array_equal(values, orig_values) .. jupyter-execute:: :stderr: dates = np.array( [ "-2000-01-01T01:00:00", "0000-01-01T00:00:00", "0002-01-01T00:00:00", "2000-01-01T00:00:00", ], dtype="datetime64[s]", ) orig_values = np.array( [-2002 * 365 - 121, -366, 365, 2000 * 365 + 119], dtype="int64" ) orig_values *= 24 # Convert to hours orig_values[0] += 1 # Adjust for the hour offset in dates above units = "days since 0001-01-01 00:00:00" values, units, _ = xr.coding.times.encode_cf_datetime( dates, units, calendar, dtype=np.dtype("int64") ) print(values, units) np.testing.assert_array_equal(values, orig_values) .. _internals.default_timeunit: Default Time Unit ~~~~~~~~~~~~~~~~~ The current default time unit of xarray is ``'ns'``. When setting keyword argument ``time_unit`` unit to ``'s'`` (the lowest resolution pandas allows) datetimes will be converted to at least ``'s'``-resolution, if possible. The same holds true for ``'ms'`` and ``'us'``. .. jupyter-execute:: attrs = {"units": "hours since 2000-01-01"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) ds.to_netcdf("test-datetimes1.nc") .. jupyter-execute:: xr.open_dataset("test-datetimes1.nc") .. jupyter-execute:: coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.open_dataset("test-datetimes1.nc", decode_times=coder) If a coarser unit is requested the datetimes are decoded into their native on-disk resolution, if possible. .. jupyter-execute:: attrs = {"units": "milliseconds since 2000-01-01"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) ds.to_netcdf("test-datetimes2.nc") .. jupyter-execute:: xr.open_dataset("test-datetimes2.nc") .. jupyter-execute:: coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.open_dataset("test-datetimes2.nc", decode_times=coder) Similar logic applies for decoding timedelta values. The default resolution is ``"ns"``: .. jupyter-execute:: attrs = {"units": "hours"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) ds.to_netcdf("test-timedeltas1.nc") .. jupyter-execute:: :stderr: xr.open_dataset("test-timedeltas1.nc") By default, timedeltas will be decoded to the same resolution as datetimes: .. jupyter-execute:: coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.open_dataset("test-timedeltas1.nc", decode_times=coder, decode_timedelta=True) but if one would like to decode timedeltas to a different resolution, one can provide a coder specifically for timedeltas to ``decode_timedelta``: .. jupyter-execute:: timedelta_coder = xr.coders.CFTimedeltaCoder(time_unit="ms") xr.open_dataset( "test-timedeltas1.nc", decode_times=coder, decode_timedelta=timedelta_coder ) As with datetimes, if a coarser unit is requested the timedeltas are decoded into their native on-disk resolution, if possible: .. jupyter-execute:: attrs = {"units": "milliseconds"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) ds.to_netcdf("test-timedeltas2.nc") .. jupyter-execute:: xr.open_dataset("test-timedeltas2.nc", decode_timedelta=True) .. jupyter-execute:: coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.open_dataset("test-timedeltas2.nc", decode_times=coder, decode_timedelta=True) To opt-out of timedelta decoding (see issue `Undesired decoding to timedelta64 `_) pass ``False`` to ``decode_timedelta``: .. jupyter-execute:: xr.open_dataset("test-timedeltas2.nc", decode_timedelta=False) .. note:: Note that in the future the default value of ``decode_timedelta`` will be ``False`` rather than ``None``. .. jupyter-execute:: :hide-code: # Cleanup import os for f in [ "test-datetimes1.nc", "test-datetimes2.nc", "test-timedeltas1.nc", "test-timedeltas2.nc", ]: if os.path.exists(f): os.remove(f) xarray-2025.12.0/doc/internals/zarr-encoding-spec.rst000066400000000000000000000160621511464676000223400ustar00rootroot00000000000000.. currentmodule:: xarray .. _zarr_encoding: Zarr Encoding Specification ============================ In implementing support for the `Zarr `_ storage format, Xarray developers made some *ad hoc* choices about how to store NetCDF data in Zarr. Future versions of the Zarr spec will likely include a more formal convention for the storage of the NetCDF data model in Zarr; see `Zarr spec repo `_ for ongoing discussion. First, Xarray can only read and write Zarr groups. There is currently no support for reading / writing individual Zarr arrays. Zarr groups are mapped to Xarray ``Dataset`` objects. Second, from Xarray's point of view, the key difference between NetCDF and Zarr is that all NetCDF arrays have *dimension names* while Zarr arrays do not. In Zarr v2, Xarray uses an ad-hoc convention to encode and decode the name of each array's dimensions. However, starting with Zarr v3, the ``dimension_names`` attribute provides a formal convention for storing the NetCDF data model in Zarr. Dimension Encoding in Zarr Formats ----------------------------------- Xarray encodes array dimensions differently depending on the Zarr format version: **Zarr V2 Format:** Xarray uses a special Zarr array attribute: ``_ARRAY_DIMENSIONS``. The value of this attribute is a list of dimension names (strings), for example ``["time", "lon", "lat"]``. When writing data to Zarr V2, Xarray sets this attribute on all variables based on the variable dimensions. This attribute is visible when accessing arrays directly with zarr-python. **Zarr V3 Format:** Xarray uses the native ``dimension_names`` field in the array metadata. This is part of the official Zarr V3 specification and is not stored as a regular attribute. When accessing arrays with zarr-python, this information is available in the array's metadata but not in the attributes dictionary. When reading a Zarr group, Xarray looks for dimension information in the appropriate location based on the format version, raising an error if it can't be found. The dimension information is used to define the variable dimension names and then (for Zarr V2) removed from the attributes dictionary returned to the user. CF Conventions -------------- Xarray uses its standard CF encoding/decoding functionality for handling metadata (see :py:func:`decode_cf`). This includes encoding concepts such as dimensions and coordinates. The ``coordinates`` attribute, which lists coordinate variables (e.g., ``"yc xc"`` for spatial coordinates), is one part of the broader CF conventions used to describe metadata in NetCDF and Zarr. Compatibility and Reading ------------------------- Because of these encoding choices, Xarray cannot read arbitrary Zarr arrays, but only Zarr data with valid dimension metadata. Xarray supports: - Zarr V2 arrays with ``_ARRAY_DIMENSIONS`` attributes - Zarr V3 arrays with ``dimension_names`` metadata - `NCZarr `_ format (dimension names are defined in the ``.zarray`` file) After decoding the dimension information and assigning the variable dimensions, Xarray proceeds to [optionally] decode each variable using its standard CF decoding machinery used for NetCDF data. Finally, it's worth noting that Xarray writes (and attempts to read) "consolidated metadata" by default (the ``.zmetadata`` file), which is another non-standard Zarr extension, albeit one implemented upstream in Zarr-Python. You do not need to write consolidated metadata to make Zarr stores readable in Xarray, but because Xarray can open these stores much faster, users will see a warning about poor performance when reading non-consolidated stores unless they explicitly set ``consolidated=False``. See :ref:`io.zarr.consolidated_metadata` for more details. Examples: Zarr Format Differences ---------------------------------- The following examples demonstrate how dimension and coordinate encoding differs between Zarr format versions. We'll use the same tutorial dataset but write it in different formats to show what users will see when accessing the files directly with zarr-python. **Example 1: Zarr V2 Format** .. jupyter-execute:: import os import xarray as xr import zarr # Load tutorial dataset and write as Zarr V2 ds = xr.tutorial.load_dataset("rasm") ds.to_zarr("rasm_v2.zarr", mode="w", consolidated=False, zarr_format=2) # Open with zarr-python and examine attributes zgroup = zarr.open("rasm_v2.zarr") print("Zarr V2 - Tair attributes:") tair_attrs = dict(zgroup["Tair"].attrs) for key, value in tair_attrs.items(): print(f" '{key}': {repr(value)}") .. jupyter-execute:: :hide-code: import shutil shutil.rmtree("rasm_v2.zarr") **Example 2: Zarr V3 Format** .. jupyter-execute:: # Write the same dataset as Zarr V3 ds.to_zarr("rasm_v3.zarr", mode="w", consolidated=False, zarr_format=3) # Open with zarr-python and examine attributes zgroup = zarr.open("rasm_v3.zarr") print("Zarr V3 - Tair attributes:") tair_attrs = dict(zgroup["Tair"].attrs) for key, value in tair_attrs.items(): print(f" '{key}': {repr(value)}") # For Zarr V3, dimension information is in metadata tair_array = zgroup["Tair"] print(f"\nZarr V3 - dimension_names in metadata: {tair_array.metadata.dimension_names}") .. jupyter-execute:: :hide-code: import shutil shutil.rmtree("rasm_v3.zarr") Chunk Key Encoding ------------------ When writing data to Zarr stores, Xarray supports customizing how chunk keys are encoded through the ``chunk_key_encoding`` parameter in the variable's encoding dictionary. This is particularly useful when working with Zarr V2 arrays and you need to control the dimension separator in chunk keys. For example, to specify a custom separator for chunk keys: .. jupyter-execute:: import xarray as xr import numpy as np from zarr.core.chunk_key_encodings import V2ChunkKeyEncoding # Create a custom chunk key encoding with "/" as separator enc = V2ChunkKeyEncoding(separator="/").to_dict() # Create and write a dataset with custom chunk key encoding arr = np.ones((42, 100)) ds = xr.DataArray(arr, name="var1").to_dataset() ds.to_zarr( "example.zarr", zarr_format=2, mode="w", encoding={"var1": {"chunks": (42, 50), "chunk_key_encoding": enc}}, ) The ``chunk_key_encoding`` option accepts a dictionary that specifies the encoding configuration. For Zarr V2 arrays, you can use the ``V2ChunkKeyEncoding`` class from ``zarr.core.chunk_key_encodings`` to generate this configuration. This is particularly useful when you need to ensure compatibility with specific Zarr V2 storage layouts or when working with tools that expect a particular chunk key format. .. note:: The ``chunk_key_encoding`` option is only relevant when writing to Zarr stores. When reading Zarr arrays, Xarray automatically detects and uses the appropriate chunk key encoding based on the store's format and configuration. .. jupyter-execute:: :hide-code: import shutil shutil.rmtree("example.zarr") xarray-2025.12.0/doc/roadmap.rst000066400000000000000000000270331511464676000162720ustar00rootroot00000000000000.. _roadmap: Development roadmap =================== Authors: Xarray developers Date: September 7, 2021 Xarray is an open source Python library for labeled multidimensional arrays and datasets. Our philosophy -------------- Why has xarray been successful? In our opinion: - Xarray does a great job of solving **specific use-cases** for multidimensional data analysis: - The dominant use-case for xarray is for analysis of gridded dataset in the geosciences, e.g., as part of the `Pangeo `__ project. - Xarray is also used more broadly in the physical sciences, where we've found the needs for analyzing multidimensional datasets are remarkably consistent (e.g., see `SunPy `__ and `PlasmaPy `__). - Finally, xarray is used in a variety of other domains, including finance, `probabilistic programming `__ and genomics. - Xarray is also a **domain agnostic** solution: - We focus on providing a flexible set of functionality related labeled multidimensional arrays, rather than solving particular problems. - This facilitates collaboration between users with different needs, and helps us attract a broad community of contributors. - Importantly, this retains flexibility, for use cases that don't fit particularly well into existing frameworks. - Xarray **integrates well** with other libraries in the scientific Python stack. - We leverage first-class external libraries for core features of xarray (e.g., NumPy for ndarrays, pandas for indexing, dask for parallel computing) - We expose our internal abstractions to users (e.g., ``apply_ufunc()``), which facilitates extending xarray in various ways. Together, these features have made xarray a first-class choice for labeled multidimensional arrays in Python. We want to double-down on xarray's strengths by making it an even more flexible and powerful tool for multidimensional data analysis. We want to continue to engage xarray's core geoscience users, and to also reach out to new domains to learn from other successful data models like those of `yt `__ or the `OLAP cube `__. Specific needs -------------- The user community has voiced a number specific needs related to how xarray interfaces with domain specific problems. Xarray may not solve all of these issues directly, but these areas provide opportunities for xarray to provide better, more extensible, interfaces. Some examples of these common needs are: - Non-regular grids (e.g., staggered and unstructured meshes). - Physical units. - Lazily computed arrays (e.g., for coordinate systems). - New file-formats. Technical vision ---------------- We think the right approach to extending xarray's user community and the usefulness of the project is to focus on improving key interfaces that can be used externally to meet domain-specific needs. We can generalize the community's needs into three main categories: - More flexible grids/indexing. - More flexible arrays/computing. - More flexible storage backends. - More flexible data structures. Each of these are detailed further in the subsections below. Flexible indexes ~~~~~~~~~~~~~~~~ .. note:: Work on flexible grids and indexes is currently underway. See `GH Project #1 `__ for more detail. Xarray currently keeps track of indexes associated with coordinates by storing them in the form of a ``pandas.Index`` in special ``xarray.IndexVariable`` objects. The limitations of this model became clear with the addition of ``pandas.MultiIndex`` support in xarray 0.9, where a single index corresponds to multiple xarray variables. MultiIndex support is highly useful, but xarray now has numerous special cases to check for MultiIndex levels. A cleaner model would be to elevate ``indexes`` to an explicit part of xarray's data model, e.g., as attributes on the ``Dataset`` and ``DataArray`` classes. Indexes would need to be propagated along with coordinates in xarray operations, but will no longer would need to have a one-to-one correspondence with coordinate variables. Instead, an index should be able to refer to multiple (possibly multidimensional) coordinates that define it. See :issue:`1603` for full details. Specific tasks: - Add an ``indexes`` attribute to ``xarray.Dataset`` and ``xarray.Dataset``, as dictionaries that map from coordinate names to xarray index objects. - Use the new index interface to write wrappers for ``pandas.Index``, ``pandas.MultiIndex`` and ``scipy.spatial.KDTree``. - Expose the interface externally to allow third-party libraries to implement custom indexing routines, e.g., for geospatial look-ups on the surface of the Earth. In addition to the new features it directly enables, this clean up will allow xarray to more easily implement some long-awaited features that build upon indexing, such as groupby operations with multiple variables. Flexible arrays ~~~~~~~~~~~~~~~ .. note:: Work on flexible arrays is currently underway. See `GH Project #2 `__ for more detail. Xarray currently supports wrapping multidimensional arrays defined by NumPy, dask and to a limited-extent pandas. It would be nice to have interfaces that allow xarray to wrap alternative N-D array implementations, e.g.: - Arrays holding physical units. - Lazily computed arrays. - Other ndarray objects, e.g., sparse, xnd, xtensor. Our strategy has been to pursue upstream improvements in NumPy (see `NEP-22 `__) for supporting a complete duck-typing interface using with NumPy's higher level array API. Improvements in NumPy's support for custom data types would also be highly useful for xarray users. By pursuing these improvements in NumPy we hope to extend the benefits to the full scientific Python community, and avoid tight coupling between xarray and specific third-party libraries (e.g., for implementing units). This will allow xarray to maintain its domain agnostic strengths. We expect that we may eventually add some minimal interfaces in xarray for features that we delegate to external array libraries (e.g., for getting units and changing units). If we do add these features, we expect them to be thin wrappers, with core functionality implemented by third-party libraries. Flexible storage ~~~~~~~~~~~~~~~~ The xarray backends module has grown in size and complexity. Much of this growth has been "organic" and mostly to support incremental additions to the supported backends. This has left us with a fragile internal API that is difficult for even experienced xarray developers to use. Moreover, the lack of a public facing API for building xarray backends means that users can not easily build backend interface for xarray in third-party libraries. The idea of refactoring the backends API and exposing it to users was originally proposed in :issue:`1970`. The idea would be to develop a well tested and generic backend base class and associated utilities for external use. Specific tasks for this development would include: - Exposing an abstract backend for writing new storage systems. - Exposing utilities for features like automatic closing of files, LRU-caching and explicit/lazy indexing. - Possibly moving some infrequently used backends to third-party packages. Flexible data structures ~~~~~~~~~~~~~~~~~~~~~~~~ Xarray provides two primary data structures, the ``xarray.DataArray`` and the ``xarray.Dataset``. This section describes two possible data model extensions. Tree-like data structure ++++++++++++++++++++++++ .. note:: After some time, the community DataTree project has now been updated and merged into xarray exposing :py:class:`xarray.DataTree`. This is just released and a bit experimental, but please try it out and let us know what you think. Take a look at our :ref:`quick-overview-datatrees` quickstart. Xarrayโ€™s highest-level object was previously an ``xarray.Dataset``, whose data model echoes that of a single netCDF group. However real-world datasets are often better represented by a collection of related Datasets. Particular common examples include: - Multi-resolution datasets, - Collections of time series datasets with differing lengths, - Heterogeneous datasets comprising multiple different types of related observational or simulation data, - Bayesian workflows involving various statistical distributions over multiple variables, - Whole netCDF files containing multiple groups. - Comparison of output from many similar models (such as in the IPCC's Coupled Model Intercomparison Projects) A new tree-like data structure, ``xarray.DataTree``, which is essentially a structured hierarchical collection of Datasets, represents these cases and instead maps to multiple netCDF groups (see :issue:`4118`). Currently there are several libraries which have wrapped xarray in order to build domain-specific data structures (e.g. `xarray-multiscale `__.), but the general ``xarray.DataTree`` object obviates the need for these and consolidates effort in a single domain-agnostic tool, much as xarray has already achieved. Labeled array without coordinates +++++++++++++++++++++++++++++++++ There is a need for a lightweight array structure with named dimensions for convenient indexing and broadcasting. Xarray includes such a structure internally (``xarray.Variable``). We want to factor out xarray's โ€œVariableโ€ object into a standalone package with minimal dependencies for integration with libraries that don't want to inherit xarray's dependency on pandas (e.g. scikit-learn). The new โ€œVariableโ€ class will follow established array protocols and the new data-apis standard. It will be capable of wrapping multiple array-like objects (e.g. NumPy, Dask, Sparse, Pint, CuPy, Pytorch). While โ€œDataArrayโ€ fits some of these requirements, it offers a more complex data model than is desired for many applications and depends on pandas. Engaging more users ------------------- Like many open-source projects, the documentation of xarray has grown together with the library's features. While we think that the xarray documentation is comprehensive already, we acknowledge that the adoption of xarray might be slowed down because of the substantial time investment required to learn its working principles. In particular, non-computer scientists or users less familiar with the pydata ecosystem might find it difficult to learn xarray and realize how xarray can help them in their daily work. In order to lower this adoption barrier, we propose to: - Develop entry-level tutorials for users with different backgrounds. For example, we would like to develop tutorials for users with or without previous knowledge of pandas, NumPy, netCDF, etc. These tutorials may be built as part of xarray's documentation or included in a separate repository to enable interactive use (e.g. mybinder.org). - Document typical user workflows in a dedicated website, following the example of `dask-stories `__. - Write a basic glossary that defines terms that might not be familiar to all (e.g. "lazy", "labeled", "serialization", "indexing", "backend"). Administrative -------------- NumFOCUS ~~~~~~~~ On July 16, 2018, Joe and Stephan submitted xarray's fiscal sponsorship application to NumFOCUS. xarray-2025.12.0/doc/user-guide/000077500000000000000000000000001511464676000161615ustar00rootroot00000000000000xarray-2025.12.0/doc/user-guide/combining.rst000066400000000000000000000274211511464676000206660ustar00rootroot00000000000000.. _combining data: Combining data -------------- .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) %xmode minimal * For combining datasets or data arrays along a single dimension, see concatenate_. * For combining datasets with different variables, see merge_. * For combining datasets or data arrays with different indexes or missing values, see combine_. * For combining datasets or data arrays along multiple dimensions see combining.multi_. .. _concatenate: Concatenate ~~~~~~~~~~~ To combine :py:class:`~xarray.Dataset` / :py:class:`~xarray.DataArray` objects along an existing or new dimension into a larger object, you can use :py:func:`~xarray.concat`. ``concat`` takes an iterable of ``DataArray`` or ``Dataset`` objects, as well as a dimension name, and concatenates along that dimension: .. jupyter-execute:: da = xr.DataArray( np.arange(6).reshape(2, 3), [("x", ["a", "b"]), ("y", [10, 20, 30])] ) da.isel(y=slice(0, 1)) # same as da[:, :1] .. jupyter-execute:: # This resembles how you would use np.concatenate: xr.concat([da[:, :1], da[:, 1:]], dim="y") .. jupyter-execute:: # For more friendly pandas-like indexing you can use: xr.concat([da.isel(y=slice(0, 1)), da.isel(y=slice(1, None))], dim="y") In addition to combining along an existing dimension, ``concat`` can create a new dimension by stacking lower dimensional arrays together: .. jupyter-execute:: da.sel(x="a") .. jupyter-execute:: xr.concat([da.isel(x=0), da.isel(x=1)], "x") If the second argument to ``concat`` is a new dimension name, the arrays will be concatenated along that new dimension, which is always inserted as the first dimension: .. jupyter-execute:: da0 = da.isel(x=0, drop=True) da1 = da.isel(x=1, drop=True) xr.concat([da0, da1], "new_dim") The second argument to ``concat`` can also be an :py:class:`~pandas.Index` or :py:class:`~xarray.DataArray` object as well as a string, in which case it is used to label the values along the new dimension: .. jupyter-execute:: xr.concat([da0, da1], pd.Index([-90, -100], name="new_dim")) Of course, ``concat`` also works on ``Dataset`` objects: .. jupyter-execute:: ds = da.to_dataset(name="foo") xr.concat([ds.sel(x="a"), ds.sel(x="b")], "x") :py:func:`~xarray.concat` has a number of options which provide deeper control over which variables are concatenated and how it handles conflicting variables between datasets. With the default parameters, xarray will load some coordinate variables into memory to compare them between datasets. This may be prohibitively expensive if you are manipulating your dataset lazily using :ref:`dask`. .. note:: In a future version of xarray the default values for many of these options will change. You can opt into the new default values early using ``xr.set_options(use_new_combine_kwarg_defaults=True)``. .. _merge: Merge ~~~~~ To combine variables and coordinates between multiple ``DataArray`` and/or ``Dataset`` objects, use :py:func:`~xarray.merge`. It can merge a list of ``Dataset``, ``DataArray`` or dictionaries of objects convertible to ``DataArray`` objects: .. jupyter-execute:: xr.merge([ds, ds.rename({"foo": "bar"})]) .. jupyter-execute:: xr.merge([xr.DataArray(n, name="var%d" % n) for n in range(5)]) If you merge another dataset (or a dictionary including data array objects), by default the resulting dataset will be aligned on the **union** of all index coordinates: .. note:: In a future version of xarray the default value for ``join`` and ``compat`` will change. This change will mean that xarray will no longer attempt to align the indices of the merged dataset. You can opt into the new default values early using ``xr.set_options(use_new_combine_kwarg_defaults=True)``. Or explicitly set ``join='outer'`` to preserve old behavior. .. jupyter-execute:: other = xr.Dataset({"bar": ("x", [1, 2, 3, 4]), "x": list("abcd")}) xr.merge([ds, other], join="outer") This ensures that ``merge`` is non-destructive. ``xarray.MergeError`` is raised if you attempt to merge two variables with the same name but different values: .. jupyter-execute:: :raises: xr.merge([ds, ds + 1]) .. note:: In a future version of xarray the default value for ``compat`` will change from ``compat='no_conflicts'`` to ``compat='override'``. In this scenario the values in the first object override all the values in other objects. .. jupyter-execute:: xr.merge([ds, ds + 1], compat="override") The same non-destructive merging between ``DataArray`` index coordinates is used in the :py:class:`~xarray.Dataset` constructor: .. jupyter-execute:: xr.Dataset({"a": da.isel(x=slice(0, 1)), "b": da.isel(x=slice(1, 2))}) .. _combine: Combine ~~~~~~~ The instance method :py:meth:`~xarray.DataArray.combine_first` combines two datasets/data arrays and defaults to non-null values in the calling object, using values from the called object to fill holes. The resulting coordinates are the union of coordinate labels. Vacant cells as a result of the outer-join are filled with ``NaN``. For example: .. jupyter-execute:: ar0 = xr.DataArray([[0, 0], [0, 0]], [("x", ["a", "b"]), ("y", [-1, 0])]) ar1 = xr.DataArray([[1, 1], [1, 1]], [("x", ["b", "c"]), ("y", [0, 1])]) ar0.combine_first(ar1) .. jupyter-execute:: ar1.combine_first(ar0) For datasets, ``ds0.combine_first(ds1)`` works similarly to ``xr.merge([ds0, ds1])``, except that ``xr.merge`` raises ``MergeError`` when there are conflicting values in variables to be merged, whereas ``.combine_first`` defaults to the calling object's values. .. note:: In a future version of xarray the default options for ``xr.merge`` will change such that the behavior matches ``combine_first``. .. _update: Update ~~~~~~ In contrast to ``merge``, :py:meth:`~xarray.Dataset.update` modifies a dataset in-place without checking for conflicts, and will overwrite any existing variables with new values: .. jupyter-execute:: ds.update({"space": ("space", [10.2, 9.4, 3.9])}) However, dimensions are still required to be consistent between different Dataset variables, so you cannot change the size of a dimension unless you replace all dataset variables that use it. ``update`` also performs automatic alignment if necessary. Unlike ``merge``, it maintains the alignment of the original array instead of merging indexes: .. jupyter-execute:: ds.update(other) The exact same alignment logic when setting a variable with ``__setitem__`` syntax: .. jupyter-execute:: ds["baz"] = xr.DataArray([9, 9, 9, 9, 9], coords=[("x", list("abcde"))]) ds.baz Equals and identical ~~~~~~~~~~~~~~~~~~~~ Xarray objects can be compared by using the :py:meth:`~xarray.Dataset.equals`, :py:meth:`~xarray.Dataset.identical` and :py:meth:`~xarray.Dataset.broadcast_equals` methods. These methods are used by the optional ``compat`` argument on ``concat`` and ``merge``. :py:attr:`~xarray.Dataset.equals` checks dimension names, indexes and array values: .. jupyter-execute:: da.equals(da.copy()) :py:attr:`~xarray.Dataset.identical` also checks attributes, and the name of each object: .. jupyter-execute:: da.identical(da.rename("bar")) :py:attr:`~xarray.Dataset.broadcast_equals` does a more relaxed form of equality check that allows variables to have different dimensions, as long as values are constant along those new dimensions: .. jupyter-execute:: left = xr.Dataset(coords={"x": 0}) right = xr.Dataset({"x": [0, 0, 0]}) left.broadcast_equals(right) Like pandas objects, two xarray objects are still equal or identical if they have missing values marked by ``NaN`` in the same locations. In contrast, the ``==`` operation performs element-wise comparison (like numpy): .. jupyter-execute:: da == da.copy() Note that ``NaN`` does not compare equal to ``NaN`` in element-wise comparison; you may need to deal with missing values explicitly. .. _combining.no_conflicts: Merging with 'no_conflicts' ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``compat`` argument ``'no_conflicts'`` is only available when combining xarray objects with ``merge``. In addition to the above comparison methods it allows the merging of xarray objects with locations where *either* have ``NaN`` values. This can be used to combine data with overlapping coordinates as long as any non-missing values agree or are disjoint: .. jupyter-execute:: ds1 = xr.Dataset({"a": ("x", [10, 20, 30, np.nan])}, {"x": [1, 2, 3, 4]}) ds2 = xr.Dataset({"a": ("x", [np.nan, 30, 40, 50])}, {"x": [2, 3, 4, 5]}) xr.merge([ds1, ds2], join="outer", compat="no_conflicts") Note that due to the underlying representation of missing values as floating point numbers (``NaN``), variable data type is not always preserved when merging in this manner. .. _combining.multi: Combining along multiple dimensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For combining many objects along multiple dimensions xarray provides :py:func:`~xarray.combine_nested` and :py:func:`~xarray.combine_by_coords`. These functions use a combination of ``concat`` and ``merge`` across different variables to combine many objects into one. :py:func:`~xarray.combine_nested` requires specifying the order in which the objects should be combined, while :py:func:`~xarray.combine_by_coords` attempts to infer this ordering automatically from the coordinates in the data. :py:func:`~xarray.combine_nested` is useful when you know the spatial relationship between each object in advance. The datasets must be provided in the form of a nested list, which specifies their relative position and ordering. A common task is collecting data from a parallelized simulation where each processor wrote out data to a separate file. A domain which was decomposed into 4 parts, 2 each along both the x and y axes, requires organising the datasets into a doubly-nested list, e.g: .. jupyter-execute:: arr = xr.DataArray( name="temperature", data=np.random.randint(5, size=(2, 2)), dims=["x", "y"] ) arr .. jupyter-execute:: ds_grid = [[arr, arr], [arr, arr]] xr.combine_nested(ds_grid, concat_dim=["x", "y"]) :py:func:`~xarray.combine_nested` can also be used to explicitly merge datasets with different variables. For example if we have 4 datasets, which are divided along two times, and contain two different variables, we can pass ``None`` to ``'concat_dim'`` to specify the dimension of the nested list over which we wish to use ``merge`` instead of ``concat``: .. jupyter-execute:: temp = xr.DataArray(name="temperature", data=np.random.randn(2), dims=["t"]) precip = xr.DataArray(name="precipitation", data=np.random.randn(2), dims=["t"]) ds_grid = [[temp, precip], [temp, precip]] xr.combine_nested(ds_grid, concat_dim=["t", None]) :py:func:`~xarray.combine_by_coords` is for combining objects which have dimension coordinates which specify their relationship to and order relative to one another, for example a linearly-increasing 'time' dimension coordinate. Here we combine two datasets using their common dimension coordinates. Notice they are concatenated in order based on the values in their dimension coordinates, not on their position in the list passed to ``combine_by_coords``. .. jupyter-execute:: x1 = xr.DataArray(name="foo", data=np.random.randn(3), coords=[("x", [0, 1, 2])]) x2 = xr.DataArray(name="foo", data=np.random.randn(3), coords=[("x", [3, 4, 5])]) xr.combine_by_coords([x2, x1]) These functions are used by :py:func:`~xarray.open_mfdataset` to open many files as one dataset. The particular function used is specified by setting the argument ``'combine'`` to ``'by_coords'`` or ``'nested'``. This is useful for situations where your data is split across many files in multiple locations, which have some known relationship between one another. xarray-2025.12.0/doc/user-guide/complex-numbers.rst000066400000000000000000000072141511464676000220370ustar00rootroot00000000000000.. currentmodule:: xarray .. _complex: Complex Numbers =============== .. jupyter-execute:: :hide-code: import numpy as np import xarray as xr Xarray leverages NumPy to seamlessly handle complex numbers in :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects. In the examples below, we are using a DataArray named ``da`` with complex elements (of :math:`\mathbb{C}`): .. jupyter-execute:: data = np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + 8j]]) da = xr.DataArray( data, dims=["x", "y"], coords={"x": ["a", "b"], "y": [1, 2]}, name="complex_nums", ) Operations on Complex Data -------------------------- You can access real and imaginary components using the ``.real`` and ``.imag`` attributes. Most NumPy universal functions (ufuncs) like :py:doc:`numpy.abs ` or :py:doc:`numpy.angle ` work directly. .. jupyter-execute:: da.real .. jupyter-execute:: np.abs(da) .. note:: Like NumPy, ``.real`` and ``.imag`` typically return *views*, not copies, of the original data. Reading and Writing Complex Data -------------------------------- Writing complex data to NetCDF files (see :ref:`io.netcdf`) is supported via :py:meth:`~xarray.DataArray.to_netcdf` using specific backend engines that handle complex types: .. tab:: h5netcdf This requires the `h5netcdf `_ library to be installed. .. jupyter-execute:: # write the data to disk da.to_netcdf("complex_nums_h5.nc", engine="h5netcdf") # read the file back into memory ds_h5 = xr.open_dataset("complex_nums_h5.nc", engine="h5netcdf") # check the dtype ds_h5[da.name].dtype .. tab:: netcdf4 Requires the `netcdf4-python (>= 1.7.1) `_ library and you have to enable ``auto_complex=True``. .. jupyter-execute:: # write the data to disk da.to_netcdf("complex_nums_nc4.nc", engine="netcdf4", auto_complex=True) # read the file back into memory ds_nc4 = xr.open_dataset( "complex_nums_nc4.nc", engine="netcdf4", auto_complex=True ) # check the dtype ds_nc4[da.name].dtype .. warning:: The ``scipy`` engine only supports NetCDF V3 and does *not* support complex arrays; writing with ``engine="scipy"`` raises a ``TypeError``. Alternative: Manual Handling ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If direct writing is not supported (e.g., targeting NetCDF3), you can manually split the complex array into separate real and imaginary variables before saving: .. jupyter-execute:: # Write data to file ds_manual = xr.Dataset( { f"{da.name}_real": da.real, f"{da.name}_imag": da.imag, } ) ds_manual.to_netcdf("complex_manual.nc", engine="scipy") # Example # Read data from file ds = xr.open_dataset("complex_manual.nc", engine="scipy") reconstructed = ds[f"{da.name}_real"] + 1j * ds[f"{da.name}_imag"] Recommendations ^^^^^^^^^^^^^^^ - Use ``engine="netcdf4"`` with ``auto_complex=True`` for full compliance and ease. - Use ``h5netcdf`` for HDF5-based storage when interoperability with HDF5 is desired. - For maximum legacy support (NetCDF3), manually handle real/imaginary components. .. jupyter-execute:: :hide-code: # Cleanup import os for f in ["complex_nums_nc4.nc", "complex_nums_h5.nc", "complex_manual.nc"]: if os.path.exists(f): os.remove(f) See also -------- - :ref:`io.netcdf` โ€” full NetCDF I/O guide - `NumPy complex numbers `__ xarray-2025.12.0/doc/user-guide/computation.rst000066400000000000000000000715231511464676000212650ustar00rootroot00000000000000.. currentmodule:: xarray .. _compute: ########### Computation ########### The labels associated with :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects enables some powerful shortcuts for computation, notably including aggregation and broadcasting by dimension names. Basic array math ================ Arithmetic operations with a single DataArray automatically vectorize (like numpy) over all array values: .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) %xmode minimal .. jupyter-execute:: arr = xr.DataArray( np.random.default_rng(0).random((2, 3)), [("x", ["a", "b"]), ("y", [10, 20, 30])], ) arr - 3 .. jupyter-execute:: abs(arr) You can also use any of numpy's or scipy's many `ufunc`__ functions directly on a DataArray: __ https://numpy.org/doc/stable/reference/ufuncs.html .. jupyter-execute:: np.sin(arr) Use :py:func:`~xarray.where` to conditionally switch between values: .. jupyter-execute:: xr.where(arr > 0, "positive", "negative") Use ``@`` to compute the :py:func:`~xarray.dot` product: .. jupyter-execute:: arr @ arr Data arrays also implement many :py:class:`numpy.ndarray` methods: .. jupyter-execute:: arr.round(2) .. jupyter-execute:: arr.T .. jupyter-execute:: intarr = xr.DataArray([0, 1, 2, 3, 4, 5]) intarr << 2 # only supported for int types .. jupyter-execute:: intarr >> 1 .. _missing_values: Missing values ============== Xarray represents missing values using the "NaN" (Not a Number) value from NumPy, which is a special floating-point value that indicates a value that is undefined or unrepresentable. There are several methods for handling missing values in xarray: Xarray objects borrow the :py:meth:`~xarray.DataArray.isnull`, :py:meth:`~xarray.DataArray.notnull`, :py:meth:`~xarray.DataArray.count`, :py:meth:`~xarray.DataArray.dropna`, :py:meth:`~xarray.DataArray.fillna`, :py:meth:`~xarray.DataArray.ffill`, and :py:meth:`~xarray.DataArray.bfill` methods for working with missing data from pandas: :py:meth:`~xarray.DataArray.isnull` is a method in xarray that can be used to check for missing or null values in an xarray object. It returns a new xarray object with the same dimensions as the original object, but with boolean values indicating where **missing values** are present. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.isnull() In this example, the third and fourth elements of 'x' are NaN, so the resulting :py:class:`~xarray.DataArray` object has 'True' values in the third and fourth positions and 'False' values in the other positions. :py:meth:`~xarray.DataArray.notnull` is a method in xarray that can be used to check for non-missing or non-null values in an xarray object. It returns a new xarray object with the same dimensions as the original object, but with boolean values indicating where **non-missing values** are present. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.notnull() In this example, the first two and the last elements of x are not NaN, so the resulting :py:class:`~xarray.DataArray` object has 'True' values in these positions, and 'False' values in the third and fourth positions where NaN is located. :py:meth:`~xarray.DataArray.count` is a method in xarray that can be used to count the number of non-missing values along one or more dimensions of an xarray object. It returns a new xarray object with the same dimensions as the original object, but with each element replaced by the count of non-missing values along the specified dimensions. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.count() In this example, 'x' has five elements, but two of them are NaN, so the resulting :py:class:`~xarray.DataArray` object having a single element containing the value '3', which represents the number of non-null elements in x. :py:meth:`~xarray.DataArray.dropna` is a method in xarray that can be used to remove missing or null values from an xarray object. It returns a new xarray object with the same dimensions as the original object, but with missing values removed. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.dropna(dim="x") In this example, on calling x.dropna(dim="x") removes any missing values and returns a new :py:class:`~xarray.DataArray` object with only the non-null elements [0, 1, 2] of 'x', in the original order. :py:meth:`~xarray.DataArray.fillna` is a method in xarray that can be used to fill missing or null values in an xarray object with a specified value or method. It returns a new xarray object with the same dimensions as the original object, but with missing values filled. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.fillna(-1) In this example, there are two NaN values in 'x', so calling x.fillna(-1) replaces these values with -1 and returns a new :py:class:`~xarray.DataArray` object with five elements, containing the values [0, 1, -1, -1, 2] in the original order. :py:meth:`~xarray.DataArray.ffill` is a method in xarray that can be used to forward fill (or fill forward) missing values in an xarray object along one or more dimensions. It returns a new xarray object with the same dimensions as the original object, but with missing values replaced by the last non-missing value along the specified dimensions. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.ffill("x") In this example, there are two NaN values in 'x', so calling x.ffill("x") fills these values with the last non-null value in the same dimension, which are 0 and 1, respectively. The resulting :py:class:`~xarray.DataArray` object has five elements, containing the values [0, 1, 1, 1, 2] in the original order. :py:meth:`~xarray.DataArray.bfill` is a method in xarray that can be used to backward fill (or fill backward) missing values in an xarray object along one or more dimensions. It returns a new xarray object with the same dimensions as the original object, but with missing values replaced by the next non-missing value along the specified dimensions. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.bfill("x") In this example, there are two NaN values in 'x', so calling x.bfill("x") fills these values with the next non-null value in the same dimension, which are 2 and 2, respectively. The resulting :py:class:`~xarray.DataArray` object has five elements, containing the values [0, 1, 2, 2, 2] in the original order. Like pandas, xarray uses the float value ``np.nan`` (not-a-number) to represent missing values. Xarray objects also have an :py:meth:`~xarray.DataArray.interpolate_na` method for filling missing values via 1D interpolation. It returns a new xarray object with the same dimensions as the original object, but with missing values interpolated. .. jupyter-execute:: x = xr.DataArray( [0, 1, np.nan, np.nan, 2], dims=["x"], coords={"xx": xr.Variable("x", [0, 1, 1.1, 1.9, 3])}, ) x.interpolate_na(dim="x", method="linear", use_coordinate="xx") In this example, there are two NaN values in 'x', so calling x.interpolate_na(dim="x", method="linear", use_coordinate="xx") fills these values with interpolated values along the "x" dimension using linear interpolation based on the values of the xx coordinate. The resulting :py:class:`~xarray.DataArray` object has five elements, containing the values [0., 1., 1.05, 1.45, 2.] in the original order. Note that the interpolated values are calculated based on the values of the 'xx' coordinate, which has non-integer values, resulting in non-integer interpolated values. Note that xarray slightly diverges from the pandas ``interpolate`` syntax by providing the ``use_coordinate`` keyword which facilitates a clear specification of which values to use as the index in the interpolation. Xarray also provides the ``max_gap`` keyword argument to limit the interpolation to data gaps of length ``max_gap`` or smaller. See :py:meth:`~xarray.DataArray.interpolate_na` for more. .. _agg: Aggregation =========== Aggregation methods have been updated to take a ``dim`` argument instead of ``axis``. This allows for very intuitive syntax for aggregation methods that are applied along particular dimension(s): .. jupyter-execute:: arr.sum(dim="x") .. jupyter-execute:: arr.std(["x", "y"]) .. jupyter-execute:: arr.min() If you need to figure out the axis number for a dimension yourself (say, for wrapping code designed to work with numpy arrays), you can use the :py:meth:`~xarray.DataArray.get_axis_num` method: .. jupyter-execute:: arr.get_axis_num("y") These operations automatically skip missing values, like in pandas: .. jupyter-execute:: xr.DataArray([1, 2, np.nan, 3]).mean() If desired, you can disable this behavior by invoking the aggregation method with ``skipna=False``. .. _compute.rolling: Rolling window operations ========================= ``DataArray`` objects include a :py:meth:`~xarray.DataArray.rolling` method. This method supports rolling window aggregation: .. jupyter-execute:: arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5), dims=("x", "y")) arr :py:meth:`~xarray.DataArray.rolling` is applied along one dimension using the name of the dimension as a key (e.g. ``y``) and the window size as the value (e.g. ``3``). We get back a ``Rolling`` object: .. jupyter-execute:: arr.rolling(y=3) Aggregation and summary methods can be applied directly to the ``Rolling`` object: .. jupyter-execute:: r = arr.rolling(y=3) r.reduce(np.std) .. jupyter-execute:: r.mean() Aggregation results are assigned the coordinate at the end of each window by default, but can be centered by passing ``center=True`` when constructing the ``Rolling`` object: .. jupyter-execute:: r = arr.rolling(y=3, center=True) r.mean() As can be seen above, aggregations of windows which overlap the border of the array produce ``nan``\s. Setting ``min_periods`` in the call to ``rolling`` changes the minimum number of observations within the window required to have a value when aggregating: .. jupyter-execute:: r = arr.rolling(y=3, min_periods=2) r.mean() .. jupyter-execute:: r = arr.rolling(y=3, center=True, min_periods=2) r.mean() From version 0.17, xarray supports multidimensional rolling, .. jupyter-execute:: r = arr.rolling(x=2, y=3, min_periods=2) r.mean() .. tip:: Note that rolling window aggregations are faster and use less memory when bottleneck_ is installed. This only applies to numpy-backed xarray objects with 1d-rolling. .. _bottleneck: https://github.com/pydata/bottleneck We can also manually iterate through ``Rolling`` objects: .. code:: python for label, arr_window in r: # arr_window is a view of x ... .. _compute.rolling_exp: While ``rolling`` provides a simple moving average, ``DataArray`` also supports an exponential moving average with :py:meth:`~xarray.DataArray.rolling_exp`. This is similar to pandas' ``ewm`` method. numbagg_ is required. .. _numbagg: https://github.com/numbagg/numbagg .. code:: python arr.rolling_exp(y=3).mean() The ``rolling_exp`` method takes a ``window_type`` kwarg, which can be ``'alpha'``, ``'com'`` (for ``center-of-mass``), ``'span'``, and ``'halflife'``. The default is ``span``. Finally, the rolling object has a ``construct`` method which returns a view of the original ``DataArray`` with the windowed dimension in the last position. You can use this for more advanced rolling operations such as strided rolling, windowed rolling, convolution, short-time FFT etc. .. jupyter-execute:: # rolling with 2-point stride rolling_da = r.construct(x="x_win", y="y_win", stride=2) rolling_da .. jupyter-execute:: rolling_da.mean(["x_win", "y_win"], skipna=False) Because the ``DataArray`` given by ``r.construct('window_dim')`` is a view of the original array, it is memory efficient. You can also use ``construct`` to compute a weighted rolling sum: .. jupyter-execute:: weight = xr.DataArray([0.25, 0.5, 0.25], dims=["window"]) arr.rolling(y=3).construct(y="window").dot(weight) .. note:: numpy's Nan-aggregation functions such as ``nansum`` copy the original array. In xarray, we internally use these functions in our aggregation methods (such as ``.sum()``) if ``skipna`` argument is not specified or set to True. This means ``rolling_da.mean('window_dim')`` is memory inefficient. To avoid this, use ``skipna=False`` as the above example. .. _compute.weighted: Weighted array reductions ========================= :py:class:`DataArray` and :py:class:`Dataset` objects include :py:meth:`DataArray.weighted` and :py:meth:`Dataset.weighted` array reduction methods. They currently support weighted ``sum``, ``mean``, ``std``, ``var`` and ``quantile``. .. jupyter-execute:: coords = dict(month=("month", [1, 2, 3])) prec = xr.DataArray([1.1, 1.0, 0.9], dims=("month",), coords=coords) weights = xr.DataArray([31, 28, 31], dims=("month",), coords=coords) Create a weighted object: .. jupyter-execute:: weighted_prec = prec.weighted(weights) weighted_prec Calculate the weighted sum: .. jupyter-execute:: weighted_prec.sum() Calculate the weighted mean: .. jupyter-execute:: weighted_prec.mean(dim="month") Calculate the weighted quantile: .. jupyter-execute:: weighted_prec.quantile(q=0.5, dim="month") The weighted sum corresponds to: .. jupyter-execute:: weighted_sum = (prec * weights).sum() weighted_sum the weighted mean to: .. jupyter-execute:: weighted_mean = weighted_sum / weights.sum() weighted_mean the weighted variance to: .. jupyter-execute:: weighted_var = weighted_prec.sum_of_squares() / weights.sum() weighted_var and the weighted standard deviation to: .. jupyter-execute:: weighted_std = np.sqrt(weighted_var) weighted_std However, the functions also take missing values in the data into account: .. jupyter-execute:: data = xr.DataArray([np.nan, 2, 4]) weights = xr.DataArray([8, 1, 1]) data.weighted(weights).mean() Using ``(data * weights).sum() / weights.sum()`` would (incorrectly) result in 0.6. If the weights add up to to 0, ``sum`` returns 0: .. jupyter-execute:: data = xr.DataArray([1.0, 1.0]) weights = xr.DataArray([-1.0, 1.0]) data.weighted(weights).sum() and ``mean``, ``std`` and ``var`` return ``nan``: .. jupyter-execute:: data.weighted(weights).mean() .. note:: ``weights`` must be a :py:class:`DataArray` and cannot contain missing values. Missing values can be replaced manually by ``weights.fillna(0)``. .. _compute.coarsen: Coarsen large arrays ==================== :py:class:`DataArray` and :py:class:`Dataset` objects include a :py:meth:`~xarray.DataArray.coarsen` and :py:meth:`~xarray.Dataset.coarsen` methods. This supports block aggregation along multiple dimensions, .. jupyter-execute:: x = np.linspace(0, 10, 300) t = pd.date_range("1999-12-15", periods=364) da = xr.DataArray( np.sin(x) * np.cos(np.linspace(0, 1, 364)[:, np.newaxis]), dims=["time", "x"], coords={"time": t, "x": x}, ) da In order to take a block mean for every 7 days along ``time`` dimension and every 2 points along ``x`` dimension, .. jupyter-execute:: da.coarsen(time=7, x=2).mean() :py:meth:`~xarray.DataArray.coarsen` raises a ``ValueError`` if the data length is not a multiple of the corresponding window size. You can choose ``boundary='trim'`` or ``boundary='pad'`` options for trimming the excess entries or padding ``nan`` to insufficient entries, .. jupyter-execute:: da.coarsen(time=30, x=2, boundary="trim").mean() If you want to apply a specific function to coordinate, you can pass the function or method name to ``coord_func`` option, .. jupyter-execute:: da.coarsen(time=7, x=2, coord_func={"time": "min"}).mean() You can also :ref:`use coarsen to reshape` without applying a computation. .. _compute.using_coordinates: Computation using Coordinates ============================= Xarray objects have some handy methods for the computation with their coordinates. :py:meth:`~xarray.DataArray.differentiate` computes derivatives by central finite differences using their coordinates, .. jupyter-execute:: a = xr.DataArray([0, 1, 2, 3], dims=["x"], coords=[[0.1, 0.11, 0.2, 0.3]]) a.differentiate("x") This method can be used also for multidimensional arrays, .. jupyter-execute:: a = xr.DataArray( np.arange(8).reshape(4, 2), dims=["x", "y"], coords={"x": [0.1, 0.11, 0.2, 0.3]} ) a.differentiate("x") :py:meth:`~xarray.DataArray.integrate` computes integration based on trapezoidal rule using their coordinates, .. jupyter-execute:: a.integrate("x") .. note:: These methods are limited to simple cartesian geometry. Differentiation and integration along multidimensional coordinate are not supported. .. _compute.polyfit: Fitting polynomials =================== Xarray objects provide an interface for performing linear or polynomial regressions using the least-squares method. :py:meth:`~xarray.DataArray.polyfit` computes the best fitting coefficients along a given dimension and for a given order, .. jupyter-execute:: x = xr.DataArray(np.arange(10), dims=["x"], name="x") a = xr.DataArray(3 + 4 * x, dims=["x"], coords={"x": x}) out = a.polyfit(dim="x", deg=1, full=True) out The method outputs a dataset containing the coefficients (and more if ``full=True``). The inverse operation is done with :py:meth:`~xarray.polyval`, .. jupyter-execute:: xr.polyval(coord=x, coeffs=out.polyfit_coefficients) .. note:: These methods replicate the behaviour of :py:func:`numpy.polyfit` and :py:func:`numpy.polyval`. .. _compute.curvefit: Fitting arbitrary functions =========================== Xarray objects also provide an interface for fitting more complex functions using :py:func:`scipy.optimize.curve_fit`. :py:meth:`~xarray.DataArray.curvefit` accepts user-defined functions and can fit along multiple coordinates. For example, we can fit a relationship between two ``DataArray`` objects, maintaining a unique fit at each spatial coordinate but aggregating over the time dimension: .. jupyter-execute:: def exponential(x, a, xc): return np.exp((x - xc) / a) x = np.arange(-5, 5, 0.1) t = np.arange(-5, 5, 0.1) X, T = np.meshgrid(x, t) Z1 = np.random.uniform(low=-5, high=5, size=X.shape) Z2 = exponential(Z1, 3, X) Z3 = exponential(Z1, 1, -X) ds = xr.Dataset( data_vars=dict( var1=(["t", "x"], Z1), var2=(["t", "x"], Z2), var3=(["t", "x"], Z3) ), coords={"t": t, "x": x}, ) ds[["var2", "var3"]].curvefit( coords=ds.var1, func=exponential, reduce_dims="t", bounds={"a": (0.5, 5), "xc": (-5, 5)}, ) We can also fit multi-dimensional functions, and even use a wrapper function to simultaneously fit a summation of several functions, such as this field containing two gaussian peaks: .. jupyter-execute:: def gaussian_2d(coords, a, xc, yc, xalpha, yalpha): x, y = coords z = a * np.exp( -np.square(x - xc) / 2 / np.square(xalpha) - np.square(y - yc) / 2 / np.square(yalpha) ) return z def multi_peak(coords, *args): z = np.zeros(coords[0].shape) for i in range(len(args) // 5): z += gaussian_2d(coords, *args[i * 5 : i * 5 + 5]) return z x = np.arange(-5, 5, 0.1) y = np.arange(-5, 5, 0.1) X, Y = np.meshgrid(x, y) n_peaks = 2 names = ["a", "xc", "yc", "xalpha", "yalpha"] names = [f"{name}{i}" for i in range(n_peaks) for name in names] Z = gaussian_2d((X, Y), 3, 1, 1, 2, 1) + gaussian_2d((X, Y), 2, -1, -2, 1, 1) Z += np.random.normal(scale=0.1, size=Z.shape) da = xr.DataArray(Z, dims=["y", "x"], coords={"y": y, "x": x}) da.curvefit( coords=["x", "y"], func=multi_peak, param_names=names, kwargs={"maxfev": 10000}, ) .. note:: This method replicates the behavior of :py:func:`scipy.optimize.curve_fit`. .. _compute.broadcasting: Broadcasting by dimension name ============================== ``DataArray`` objects automatically align themselves ("broadcasting" in the numpy parlance) by dimension name instead of axis order. With xarray, you do not need to transpose arrays or insert dimensions of length 1 to get array operations to work, as commonly done in numpy with :py:func:`numpy.reshape` or :py:data:`numpy.newaxis`. This is best illustrated by a few examples. Consider two one-dimensional arrays with different sizes aligned along different dimensions: .. jupyter-execute:: a = xr.DataArray([1, 2], [("x", ["a", "b"])]) a .. jupyter-execute:: b = xr.DataArray([-1, -2, -3], [("y", [10, 20, 30])]) b With xarray, we can apply binary mathematical operations to these arrays, and their dimensions are expanded automatically: .. jupyter-execute:: a * b Moreover, dimensions are always reordered to the order in which they first appeared: .. jupyter-execute:: c = xr.DataArray(np.arange(6).reshape(3, 2), [b["y"], a["x"]]) c .. jupyter-execute:: a + c This means, for example, that you always subtract an array from its transpose: .. jupyter-execute:: c - c.T You can explicitly broadcast xarray data structures by using the :py:func:`~xarray.broadcast` function: .. jupyter-execute:: a2, b2 = xr.broadcast(a, b) a2 .. jupyter-execute:: b2 .. _math automatic alignment: Automatic alignment =================== Xarray enforces alignment between *index* :ref:`coordinates` (that is, coordinates with the same name as a dimension, marked by ``*``) on objects used in binary operations. Similarly to pandas, this alignment is automatic for arithmetic on binary operations. The default result of a binary operation is by the *intersection* (not the union) of coordinate labels: .. jupyter-execute:: arr = xr.DataArray(np.arange(3), [("x", range(3))]) arr + arr[:-1] If coordinate values for a dimension are missing on either argument, all matching dimensions must have the same size: .. jupyter-execute:: :raises: arr + xr.DataArray([1, 2], dims="x") However, one can explicitly change this default automatic alignment type ("inner") via :py:func:`~xarray.set_options()` in context manager: .. jupyter-execute:: with xr.set_options(arithmetic_join="outer"): arr + arr[:1] arr + arr[:1] Before loops or performance critical code, it's a good idea to align arrays explicitly (e.g., by putting them in the same Dataset or using :py:func:`~xarray.align`) to avoid the overhead of repeated alignment with each operation. See :ref:`align and reindex` for more details. .. note:: There is no automatic alignment between arguments when performing in-place arithmetic operations such as ``+=``. You will need to use :ref:`manual alignment`. This ensures in-place arithmetic never needs to modify data types. .. _coordinates math: Coordinates =========== Although index coordinates are aligned, other coordinates are not, and if their values conflict, they will be dropped. This is necessary, for example, because indexing turns 1D coordinates into scalar coordinates: .. jupyter-execute:: arr[0] .. jupyter-execute:: arr[1] .. jupyter-execute:: # notice that the scalar coordinate 'x' is silently dropped arr[1] - arr[0] Still, xarray will persist other coordinates in arithmetic, as long as there are no conflicting values: .. jupyter-execute:: # only one argument has the 'x' coordinate arr[0] + 1 .. jupyter-execute:: # both arguments have the same 'x' coordinate arr[0] - arr[0] Math with datasets ================== Datasets support arithmetic operations by automatically looping over all data variables: .. jupyter-execute:: ds = xr.Dataset( { "x_and_y": (("x", "y"), np.random.randn(3, 5)), "x_only": ("x", np.random.randn(3)), }, coords=arr.coords, ) ds > 0 Datasets support most of the same methods found on data arrays: .. jupyter-execute:: ds.mean(dim="x") .. jupyter-execute:: abs(ds) Datasets also support NumPy ufuncs (requires NumPy v1.13 or newer), or alternatively you can use :py:meth:`~xarray.Dataset.map` to map a function to each variable in a dataset: .. jupyter-execute:: np.sin(ds) # equivalent to ds.map(np.sin) Datasets also use looping over variables for *broadcasting* in binary arithmetic. You can do arithmetic between any ``DataArray`` and a dataset: .. jupyter-execute:: ds + arr Arithmetic between two datasets matches data variables of the same name: .. jupyter-execute:: ds2 = xr.Dataset({"x_and_y": 0, "x_only": 100}) ds - ds2 Similarly to index based alignment, the result has the intersection of all matching data variables. .. _compute.wrapping-custom: Wrapping custom computation =========================== It doesn't always make sense to do computation directly with xarray objects: - In the inner loop of performance limited code, using xarray can add considerable overhead compared to using NumPy or native Python types. This is particularly true when working with scalars or small arrays (less than ~1e6 elements). Keeping track of labels and ensuring their consistency adds overhead, and xarray's core itself is not especially fast, because it's written in Python rather than a compiled language like C. Also, xarray's high level label-based APIs removes low-level control over how operations are implemented. - Even if speed doesn't matter, it can be important to wrap existing code, or to support alternative interfaces that don't use xarray objects. For these reasons, it is often well-advised to write low-level routines that work with NumPy arrays, and to wrap these routines to work with xarray objects. However, adding support for labels on both :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` can be a bit of a chore. To make this easier, xarray supplies the :py:func:`~xarray.apply_ufunc` helper function, designed for wrapping functions that support broadcasting and vectorization on unlabeled arrays in the style of a NumPy `universal function `_ ("ufunc" for short). ``apply_ufunc`` takes care of everything needed for an idiomatic xarray wrapper, including alignment, broadcasting, looping over ``Dataset`` variables (if needed), and merging of coordinates. In fact, many internal xarray functions/methods are written using ``apply_ufunc``. Simple functions that act independently on each value should work without any additional arguments: .. jupyter-execute:: squared_error = lambda x, y: (x - y) ** 2 arr1 = xr.DataArray([0, 1, 2, 3], dims="x") xr.apply_ufunc(squared_error, arr1, 1) For using more complex operations that consider some array values collectively, it's important to understand the idea of "core dimensions" from NumPy's `generalized ufuncs `_. Core dimensions are defined as dimensions that should *not* be broadcast over. Usually, they correspond to the fundamental dimensions over which an operation is defined, e.g., the summed axis in ``np.sum``. A good clue that core dimensions are needed is the presence of an ``axis`` argument on the corresponding NumPy function. With ``apply_ufunc``, core dimensions are recognized by name, and then moved to the last dimension of any input arguments before applying the given function. This means that for functions that accept an ``axis`` argument, you usually need to set ``axis=-1``. As an example, here is how we would wrap :py:func:`numpy.linalg.norm` to calculate the vector norm: .. code-block:: python def vector_norm(x, dim, ord=None): return xr.apply_ufunc( np.linalg.norm, x, input_core_dims=[[dim]], kwargs={"ord": ord, "axis": -1} ) .. jupyter-execute:: :hide-code: def vector_norm(x, dim, ord=None): return xr.apply_ufunc( np.linalg.norm, x, input_core_dims=[[dim]], kwargs={"ord": ord, "axis": -1} ) .. jupyter-execute:: vector_norm(arr1, dim="x") Because ``apply_ufunc`` follows a standard convention for ufuncs, it plays nicely with tools for building vectorized functions, like :py:func:`numpy.broadcast_arrays` and :py:class:`numpy.vectorize`. For high performance needs, consider using :doc:`Numba's vectorize and guvectorize `. In addition to wrapping functions, ``apply_ufunc`` can automatically parallelize many functions when using dask by setting ``dask='parallelized'``. See :ref:`dask.automatic-parallelization` for details. :py:func:`~xarray.apply_ufunc` also supports some advanced options for controlling alignment of variables and the form of the result. See the docstring for full details and more examples. xarray-2025.12.0/doc/user-guide/dask.rst000066400000000000000000000553021511464676000176420ustar00rootroot00000000000000.. currentmodule:: xarray .. _dask: Parallel Computing with Dask ============================ .. jupyter-execute:: # Note that it's not necessary to import dask to use xarray with dask. import numpy as np import pandas as pd import xarray as xr import bottleneck .. jupyter-execute:: :hide-code: import os np.random.seed(123456) # limit the amount of information printed to screen xr.set_options(display_expand_data=False) np.set_printoptions(precision=3, linewidth=100, threshold=10, edgeitems=2) ds = xr.Dataset( { "temperature": ( ("time", "latitude", "longitude"), np.random.randn(30, 180, 180), ), "time": pd.date_range("2015-01-01", periods=30), "longitude": np.arange(180), "latitude": np.arange(89.5, -90.5, -1), } ) ds.to_netcdf("example-data.nc") Xarray integrates with `Dask `__, a general purpose library for parallel computing, to handle larger-than-memory computations. If youโ€™ve been using Xarray to read in large datasets or split up data across a number of files, you may already be using Dask: .. code-block:: python ds = xr.open_zarr("/path/to/data.zarr") timeseries = ds["temp"].mean(dim=["x", "y"]).compute() # Compute result Using Dask with Xarray feels similar to working with NumPy arrays, but on much larger datasets. The Dask integration is transparent, so you usually donโ€™t need to manage the parallelism directly; Xarray and Dask handle these aspects behind the scenes. This makes it easy to write code that scales from small, in-memory datasets on a single machine to large datasets that are distributed across a cluster, with minimal code changes. Examples -------- If you're new to using Xarray with Dask, we recommend the `Xarray + Dask Tutorial `_. Here are some examples for using Xarray with Dask at scale: - `Zonal averaging with the NOAA National Water Model `_ - `CMIP6 Precipitation Frequency Analysis `_ - `Using Dask + Cloud Optimized GeoTIFFs `_ Find more examples at the `Project Pythia cookbook gallery `_. Using Dask with Xarray ---------------------- .. image:: ../_static/dask-array.svg :width: 50 % :align: right :alt: A Dask array Dask divides arrays into smaller parts called chunks. These chunks are small, manageable pieces of the larger dataset, that Dask is able to process in parallel (see the `Dask Array docs on chunks `_). Commonly chunks are set when reading data, but you can also set the chunksize manually at any point in your workflow using :py:meth:`Dataset.chunk` and :py:meth:`DataArray.chunk`. See :ref:`dask.chunks` for more. Xarray operations on Dask-backed arrays are lazy. This means computations are not executed immediately, but are instead queued up as tasks in a Dask graph. When a result is requested (e.g., for plotting, writing to disk, or explicitly computing), Dask executes the task graph. The computations are carried out in parallel, with each chunk being processed independently. This parallel execution is key to handling large datasets efficiently. Nearly all Xarray methods have been extended to work automatically with Dask Arrays. This includes things like indexing, concatenating, rechunking, grouped operations, etc. Common operations are covered in more detail in each of the sections below. .. _dask.io: Reading and writing data ~~~~~~~~~~~~~~~~~~~~~~~~ When reading data, Dask divides your dataset into smaller chunks. You can specify the size of chunks with the ``chunks`` argument. Specifying ``chunks="auto"`` will set the dask chunk sizes to be a multiple of the on-disk chunk sizes. This can be a good idea, but usually the appropriate dask chunk size will depend on your workflow. .. tab:: Zarr The `Zarr `_ format is ideal for working with large datasets. Each chunk is stored in a separate file, allowing parallel reading and writing with Dask. You can also use Zarr to read/write directly from cloud storage buckets (see the `Dask documentation on connecting to remote data `__) When you open a Zarr dataset with :py:func:`~xarray.open_zarr`, it is loaded as a Dask array by default (if Dask is installed):: ds = xr.open_zarr("path/to/directory.zarr") See :ref:`io.zarr` for more details. .. tab:: NetCDF Open a single netCDF file with :py:func:`~xarray.open_dataset` and supplying a ``chunks`` argument:: ds = xr.open_dataset("example-data.nc", chunks={"time": 10}) Or open multiple files in parallel with py:func:`~xarray.open_mfdataset`:: xr.open_mfdataset('my/files/*.nc', parallel=True) .. tip:: When reading in many netCDF files with py:func:`~xarray.open_mfdataset`, using ``engine="h5netcdf"`` can be faster than the default which uses the netCDF4 package. Save larger-than-memory netCDF files:: ds.to_netcdf("my-big-file.nc") Or set ``compute=False`` to return a dask.delayed object that can be computed later:: delayed_write = ds.to_netcdf("my-big-file.nc", compute=False) delayed_write.compute() .. note:: When using Daskโ€™s distributed scheduler to write NETCDF4 files, it may be necessary to set the environment variable ``HDF5_USE_FILE_LOCKING=FALSE`` to avoid competing locks within the HDF5 SWMR file locking scheme. Note that writing netCDF files with Daskโ€™s distributed scheduler is only supported for the netcdf4 backend. See :ref:`io.netcdf` for more details. .. tab:: HDF5 Open HDF5 files with :py:func:`~xarray.open_dataset`:: xr.open_dataset("/path/to/my/file.h5", chunks='auto') See :ref:`io.hdf5` for more details. .. tab:: GeoTIFF Open large geoTIFF files with rioxarray:: xds = rioxarray.open_rasterio("my-satellite-image.tif", chunks='auto') See :ref:`io.rasterio` for more details. Loading Dask Arrays ~~~~~~~~~~~~~~~~~~~ There are a few common cases where you may want to convert lazy Dask arrays into eager, in-memory Xarray data structures: - You want to inspect smaller intermediate results when working interactively or debugging - You've reduced the dataset (by filtering or with a groupby, for example) and now have something much smaller that fits in memory - You need to compute intermediate results since Dask is unable (or struggles) to perform a certain computation. The canonical example of this is normalizing a dataset, e.g., ``ds - ds.mean()``, when ``ds`` is larger than memory. Typically, you should either save ``ds`` to disk or compute ``ds.mean()`` eagerly. To do this, you can use :py:meth:`Dataset.compute` or :py:meth:`DataArray.compute`: .. jupyter-execute:: ds.compute() .. note:: Using :py:meth:`Dataset.compute` is preferred to :py:meth:`Dataset.load`, which changes the results in-place. You can also access :py:attr:`DataArray.values`, which will always be a NumPy array: .. jupyter-input:: ds.temperature.values .. jupyter-output:: array([[[ 4.691e-01, -2.829e-01, ..., -5.577e-01, 3.814e-01], [ 1.337e+00, -1.531e+00, ..., 8.726e-01, -1.538e+00], ... # truncated for brevity NumPy ufuncs like :py:func:`numpy.sin` transparently work on all xarray objects, including those that store lazy Dask arrays: .. jupyter-execute:: np.sin(ds) To access Dask arrays directly, use the :py:attr:`DataArray.data` attribute which exposes the DataArray's underlying array type. If you're using a Dask cluster, you can also use :py:meth:`Dataset.persist` for quickly accessing intermediate outputs. This is most helpful after expensive operations like rechunking or setting an index. It's a way of telling the cluster that it should start executing the computations that you have defined so far, and that it should try to keep those results in memory. You will get back a new Dask array that is semantically equivalent to your old array, but now points to running data. .. code-block:: python ds = ds.persist() .. tip:: Remember to save the dataset returned by persist! This is a common mistake. .. _dask.chunks: Chunking and performance ~~~~~~~~~~~~~~~~~~~~~~~~ The way a dataset is chunked can be critical to performance when working with large datasets. You'll want chunk sizes large enough to reduce the number of chunks that Dask has to think about (to reduce overhead from the task graph) but also small enough so that many of them can fit in memory at once. .. tip:: A good rule of thumb is to create arrays with a minimum chunk size of at least one million elements (e.g., a 1000x1000 matrix). With large arrays (10+ GB), you may need larger chunks. See `Choosing good chunk sizes in Dask `_. It can be helpful to choose chunk sizes based on your downstream analyses and to chunk as early as possible. Datasets with smaller chunks along the time axis, for example, can make time domain problems easier to parallelize since Dask can perform the same operation on each time chunk. If you're working with a large dataset with chunks that make downstream analyses challenging, you may need to rechunk your data. This is an expensive operation though, so is only recommended when needed. You can chunk or rechunk a dataset by: - Specifying the ``chunks`` kwarg when reading in your dataset. If you know you'll want to do some spatial subsetting, for example, you could use ``chunks={'latitude': 10, 'longitude': 10}`` to specify small chunks across space. This can avoid loading subsets of data that span multiple chunks, thus reducing the number of file reads. Note that this will only work, though, for chunks that are similar to how the data is chunked on disk. Otherwise, it will be very slow and require a lot of network bandwidth. - Many array file formats are chunked on disk. You can specify ``chunks={}`` to have a single dask chunk map to a single on-disk chunk, and ``chunks="auto"`` to have a single dask chunk be an automatically chosen multiple of the on-disk chunks. - Using :py:meth:`Dataset.chunk` after you've already read in your dataset. For time domain problems, for example, you can use ``ds.chunk(time=TimeResampler())`` to rechunk according to a specified unit of time. ``ds.chunk(time=TimeResampler("MS"))``, for example, will set the chunks so that a month of data is contained in one chunk. For large-scale rechunking tasks (e.g., converting a simulation dataset stored with chunking only along time to a dataset with chunking only across space), consider writing another copy of your data on disk and/or using dedicated tools such as `Rechunker `_. .. _dask.automatic-parallelization: Parallelize custom functions with ``apply_ufunc`` and ``map_blocks`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Almost all of Xarray's built-in operations work on Dask arrays. If you want to use a function that isn't wrapped by Xarray, and have it applied in parallel on each block of your xarray object, you have three options: 1. Use :py:func:`~xarray.apply_ufunc` to apply functions that consume and return NumPy arrays. 2. Use :py:func:`~xarray.map_blocks`, :py:meth:`Dataset.map_blocks` or :py:meth:`DataArray.map_blocks` to apply functions that consume and return xarray objects. 3. Extract Dask Arrays from xarray objects with :py:attr:`DataArray.data` and use Dask directly. .. tip:: See the extensive Xarray tutorial on `apply_ufunc `_. ``apply_ufunc`` ############### :py:func:`~xarray.apply_ufunc` automates `embarrassingly parallel `__ "map" type operations where a function written for processing NumPy arrays should be repeatedly applied to Xarray objects containing Dask Arrays. It works similarly to :py:func:`dask.array.map_blocks` and :py:func:`dask.array.blockwise`, but without requiring an intermediate layer of abstraction. See the `Dask documentation `__ for more details. For the best performance when using Dask's multi-threaded scheduler, wrap a function that already releases the global interpreter lock, which fortunately already includes most NumPy and Scipy functions. Here we show an example using NumPy operations and a fast function from `bottleneck `__, which we use to calculate `Spearman's rank-correlation coefficient `__: .. code-block:: python def covariance_gufunc(x, y): return ( (x - x.mean(axis=-1, keepdims=True)) * (y - y.mean(axis=-1, keepdims=True)) ).mean(axis=-1) def pearson_correlation_gufunc(x, y): return covariance_gufunc(x, y) / (x.std(axis=-1) * y.std(axis=-1)) def spearman_correlation_gufunc(x, y): x_ranks = bottleneck.rankdata(x, axis=-1) y_ranks = bottleneck.rankdata(y, axis=-1) return pearson_correlation_gufunc(x_ranks, y_ranks) def spearman_correlation(x, y, dim): return xr.apply_ufunc( spearman_correlation_gufunc, x, y, input_core_dims=[[dim], [dim]], dask="parallelized", output_dtypes=[float], ) The only aspect of this example that is different from standard usage of ``apply_ufunc()`` is that we needed to supply the ``output_dtypes`` arguments. (Read up on :ref:`compute.wrapping-custom` for an explanation of the "core dimensions" listed in ``input_core_dims``.) Our new ``spearman_correlation()`` function achieves near linear speedup when run on large arrays across the four cores on my laptop. It would also work as a streaming operation, when run on arrays loaded from disk: .. jupyter-input:: rs = np.random.default_rng(0) array1 = xr.DataArray(rs.randn(1000, 100000), dims=["place", "time"]) # 800MB array2 = array1 + 0.5 * rs.randn(1000, 100000) # using one core, on NumPy arrays %time _ = spearman_correlation(array1, array2, 'time') # CPU times: user 21.6 s, sys: 2.84 s, total: 24.5 s # Wall time: 24.9 s chunked1 = array1.chunk({"place": 10}) chunked2 = array2.chunk({"place": 10}) # using all my laptop's cores, with Dask r = spearman_correlation(chunked1, chunked2, "time").compute() %time _ = r.compute() # CPU times: user 30.9 s, sys: 1.74 s, total: 32.6 s # Wall time: 4.59 s One limitation of ``apply_ufunc()`` is that it cannot be applied to arrays with multiple chunks along a core dimension: .. jupyter-input:: spearman_correlation(chunked1, chunked2, "place") .. jupyter-output:: ValueError: dimension 'place' on 0th function argument to apply_ufunc with dask='parallelized' consists of multiple chunks, but is also a core dimension. To fix, rechunk into a single Dask array chunk along this dimension, i.e., ``.rechunk({'place': -1})``, but beware that this may significantly increase memory usage. This reflects the nature of core dimensions, in contrast to broadcast (non-core) dimensions that allow operations to be split into arbitrary chunks for application. .. tip:: When possible, it's recommended to use pre-existing ``dask.array`` functions, either with existing xarray methods or :py:func:`~xarray.apply_ufunc()` with ``dask='allowed'``. Dask can often have a more efficient implementation that makes use of the specialized structure of a problem, unlike the generic speedups offered by ``dask='parallelized'``. ``map_blocks`` ############## Functions that consume and return Xarray objects can be easily applied in parallel using :py:func:`map_blocks`. Your function will receive an Xarray Dataset or DataArray subset to one chunk along each chunked dimension. .. jupyter-execute:: ds.temperature This DataArray has 3 chunks each with length 10 along the time dimension. At compute time, a function applied with :py:func:`map_blocks` will receive a DataArray corresponding to a single block of shape 10x180x180 (time x latitude x longitude) with values loaded. The following snippet illustrates how to check the shape of the object received by the applied function. .. jupyter-execute:: def func(da): print(da.sizes) return da.time mapped = xr.map_blocks(func, ds.temperature) mapped Notice that the :py:meth:`map_blocks` call printed ``Frozen({'time': 0, 'latitude': 0, 'longitude': 0})`` to screen. ``func`` is received 0-sized blocks! :py:meth:`map_blocks` needs to know what the final result looks like in terms of dimensions, shapes etc. It does so by running the provided function on 0-shaped inputs (*automated inference*). This works in many cases, but not all. If automatic inference does not work for your function, provide the ``template`` kwarg (see :ref:`below `). In this case, automatic inference has worked so let's check that the result is as expected. .. jupyter-execute:: mapped.load(scheduler="single-threaded") mapped.identical(ds.time) Note that we use ``.load(scheduler="single-threaded")`` to execute the computation. This executes the Dask graph in serial using a for loop, but allows for printing to screen and other debugging techniques. We can easily see that our function is receiving blocks of shape 10x180x180 and the returned result is identical to ``ds.time`` as expected. Here is a common example where automated inference will not work. .. jupyter-execute:: :raises: def func(da): print(da.sizes) return da.isel(time=[1]) mapped = xr.map_blocks(func, ds.temperature) ``func`` cannot be run on 0-shaped inputs because it is not possible to extract element 1 along a dimension of size 0. In this case we need to tell :py:func:`map_blocks` what the returned result looks like using the ``template`` kwarg. ``template`` must be an xarray Dataset or DataArray (depending on what the function returns) with dimensions, shapes, chunk sizes, attributes, coordinate variables *and* data variables that look exactly like the expected result. The variables should be dask-backed and hence not incur much memory cost. .. _template-note: .. note:: Note that when ``template`` is provided, ``attrs`` from ``template`` are copied over to the result. Any ``attrs`` set in ``func`` will be ignored. .. jupyter-execute:: template = ds.temperature.isel(time=[1, 11, 21]) mapped = xr.map_blocks(func, ds.temperature, template=template) Notice that the 0-shaped sizes were not printed to screen. Since ``template`` has been provided :py:func:`map_blocks` does not need to infer it by running ``func`` on 0-shaped inputs. .. jupyter-execute:: mapped.identical(template) :py:func:`map_blocks` also allows passing ``args`` and ``kwargs`` down to the user function ``func``. ``func`` will be executed as ``func(block_xarray, *args, **kwargs)`` so ``args`` must be a list and ``kwargs`` must be a dictionary. .. jupyter-execute:: def func(obj, a, b=0): return obj + a + b mapped = ds.map_blocks(func, args=[10], kwargs={"b": 10}) expected = ds + 10 + 10 mapped.identical(expected) .. jupyter-execute:: :hide-code: ds.close() # Closes "example-data.nc". os.remove("example-data.nc") .. tip:: As :py:func:`map_blocks` loads each block into memory, reduce as much as possible objects consumed by user functions. For example, drop useless variables before calling ``func`` with :py:func:`map_blocks`. Deploying Dask -------------- By default, Dask uses the multi-threaded scheduler, which distributes work across multiple cores on a single machine and allows for processing some datasets that do not fit into memory. However, this has two limitations: - You are limited by the size of your hard drive - Downloading data can be slow and expensive Instead, it can be faster and cheaper to run your computations close to where your data is stored, distributed across many machines on a Dask cluster. Often, this means deploying Dask on HPC clusters or on the cloud. See the `Dask deployment documentation `__ for more details. Best Practices -------------- Dask is pretty easy to use but there are some gotchas, many of which are under active development. Here are some tips we have found through experience. We also recommend checking out the `Dask best practices `_. 1. Do your spatial and temporal indexing (e.g. ``.sel()`` or ``.isel()``) early, especially before calling ``resample()`` or ``groupby()``. Grouping and resampling triggers some computation on all the blocks, which in theory should commute with indexing, but this optimization hasn't been implemented in Dask yet. (See `Dask issue #746 `_). 2. More generally, ``groupby()`` is a costly operation and will perform a lot better if the ``flox`` package is installed. See the `flox documentation `_ for more. By default Xarray will use ``flox`` if installed. 3. Save intermediate results to disk as a netCDF files (using ``to_netcdf()``) and then load them again with ``open_dataset()`` for further computations. For example, if subtracting temporal mean from a dataset, save the temporal mean to disk before subtracting. Again, in theory, Dask should be able to do the computation in a streaming fashion, but in practice this is a fail case for the Dask scheduler, because it tries to keep every chunk of an array that it computes in memory. (See `Dask issue #874 `_) 4. Use the `Dask dashboard `_ to identify performance bottlenecks. Here's an example of a simplified workflow putting some of these tips together: .. code-block:: python ds = xr.open_zarr( # Since we're doing a spatial reduction, increase chunk size in x, y "my-data.zarr", chunks={"x": 100, "y": 100} ) time_subset = ds.sea_temperature.sel( time=slice("2020-01-01", "2020-12-31") # Filter early ) # faster resampling when flox is installed daily = ds.resample(time="D").mean() daily.load() # Pull smaller results into memory after reducing the dataset xarray-2025.12.0/doc/user-guide/data-structures.rst000066400000000000000000001040611511464676000220470ustar00rootroot00000000000000.. _data structures: Data Structures =============== .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr import matplotlib.pyplot as plt np.random.seed(123456) np.set_printoptions(threshold=10) %xmode minimal DataArray --------- :py:class:`xarray.DataArray` is xarray's implementation of a labeled, multi-dimensional array. It has several key properties: - ``values``: a :py:class:`numpy.ndarray` or :ref:`numpy-like array ` holding the array's values - ``dims``: dimension names for each axis (e.g., ``('x', 'y', 'z')``) - ``coords``: a dict-like container of arrays (*coordinates*) that label each point (e.g., 1-dimensional arrays of numbers, datetime objects or strings) - ``attrs``: :py:class:`dict` to hold arbitrary metadata (*attributes*) Xarray uses ``dims`` and ``coords`` to enable its core metadata aware operations. Dimensions provide names that xarray uses instead of the ``axis`` argument found in many numpy functions. Coordinates enable fast label based indexing and alignment, building on the functionality of the ``index`` found on a pandas :py:class:`~pandas.DataFrame` or :py:class:`~pandas.Series`. DataArray objects also can have a ``name`` and can hold arbitrary metadata in the form of their ``attrs`` property. Names and attributes are strictly for users and user-written code: xarray makes no attempt to interpret them, and propagates them only in unambiguous cases. For reading and writing attributes xarray relies on the capabilities of the supported backends. (see FAQ, :ref:`approach to metadata`). .. _creating a dataarray: Creating a DataArray ~~~~~~~~~~~~~~~~~~~~ The :py:class:`~xarray.DataArray` constructor takes: - ``data``: a multi-dimensional array of values (e.g., a numpy ndarray, a :ref:`numpy-like array `, :py:class:`~pandas.Series`, :py:class:`~pandas.DataFrame` or ``pandas.Panel``) - ``coords``: a list or dictionary of coordinates. If a list, it should be a list of tuples where the first element is the dimension name and the second element is the corresponding coordinate array_like object. - ``dims``: a list of dimension names. If omitted and ``coords`` is a list of tuples, dimension names are taken from ``coords``. - ``attrs``: a dictionary of attributes to add to the instance - ``name``: a string that names the instance .. jupyter-execute:: data = np.random.rand(4, 3) locs = ["IA", "IL", "IN"] times = pd.date_range("2000-01-01", periods=4) foo = xr.DataArray(data, coords=[times, locs], dims=["time", "space"]) foo Only ``data`` is required; all of other arguments will be filled in with default values: .. jupyter-execute:: xr.DataArray(data) As you can see, dimension names are always present in the xarray data model: if you do not provide them, defaults of the form ``dim_N`` will be created. However, coordinates are always optional, and dimensions do not have automatic coordinate labels. .. note:: This is different from pandas, where axes always have tick labels, which default to the integers ``[0, ..., n-1]``. Prior to xarray v0.9, xarray copied this behavior: default coordinates for each dimension would be created if coordinates were not supplied explicitly. This is no longer the case. Coordinates can be specified in the following ways: - A list of values with length equal to the number of dimensions, providing coordinate labels for each dimension. Each value must be of one of the following forms: * A :py:class:`~xarray.DataArray` or :py:class:`~xarray.Variable` * A tuple of the form ``(dims, data[, attrs])``, which is converted into arguments for :py:class:`~xarray.Variable` * A pandas object or scalar value, which is converted into a ``DataArray`` * A 1D array or list, which is interpreted as values for a one dimensional coordinate variable along the same dimension as its name - A dictionary of ``{coord_name: coord}`` where values are of the same form as the list. Supplying coordinates as a dictionary allows other coordinates than those corresponding to dimensions (more on these later). If you supply ``coords`` as a dictionary, you must explicitly provide ``dims``. As a list of tuples: .. jupyter-execute:: xr.DataArray(data, coords=[("time", times), ("space", locs)]) As a dictionary: .. jupyter-execute:: xr.DataArray( data, coords={ "time": times, "space": locs, "const": 42, "ranking": ("space", [1, 2, 3]), }, dims=["time", "space"], ) As a dictionary with coords across multiple dimensions: .. jupyter-execute:: xr.DataArray( data, coords={ "time": times, "space": locs, "const": 42, "ranking": (("time", "space"), np.arange(12).reshape(4, 3)), }, dims=["time", "space"], ) If you create a ``DataArray`` by supplying a pandas :py:class:`~pandas.Series`, :py:class:`~pandas.DataFrame` or ``pandas.Panel``, any non-specified arguments in the ``DataArray`` constructor will be filled in from the pandas object: .. jupyter-execute:: df = pd.DataFrame({"x": [0, 1], "y": [2, 3]}, index=["a", "b"]) df.index.name = "abc" df.columns.name = "xyz" df .. jupyter-execute:: xr.DataArray(df) DataArray properties ~~~~~~~~~~~~~~~~~~~~ Let's take a look at the important properties on our array: .. jupyter-execute:: foo.values .. jupyter-execute:: foo.dims .. jupyter-execute:: foo.coords .. jupyter-execute:: foo.attrs .. jupyter-execute:: print(foo.name) You can modify ``values`` inplace: .. jupyter-execute:: foo.values = 1.0 * foo.values .. note:: The array values in a :py:class:`~xarray.DataArray` have a single (homogeneous) data type. To work with heterogeneous or structured data types in xarray, use coordinates, or put separate ``DataArray`` objects in a single :py:class:`~xarray.Dataset` (see below). Now fill in some of that missing metadata: .. jupyter-execute:: foo.name = "foo" foo.attrs["units"] = "meters" foo The :py:meth:`~xarray.DataArray.rename` method is another option, returning a new data array: .. jupyter-execute:: foo.rename("bar") DataArray Coordinates ~~~~~~~~~~~~~~~~~~~~~ The ``coords`` property is ``dict`` like. Individual coordinates can be accessed from the coordinates by name, or even by indexing the data array itself: .. jupyter-execute:: foo.coords["time"] .. jupyter-execute:: foo["time"] These are also :py:class:`~xarray.DataArray` objects, which contain tick-labels for each dimension. Coordinates can also be set or removed by using the dictionary like syntax: .. jupyter-execute:: foo["ranking"] = ("space", [1, 2, 3]) foo.coords .. jupyter-execute:: del foo["ranking"] foo.coords For more details, see :ref:`coordinates` below. Dataset ------- :py:class:`xarray.Dataset` is xarray's multi-dimensional equivalent of a :py:class:`~pandas.DataFrame`. It is a dict-like container of labeled arrays (:py:class:`~xarray.DataArray` objects) with aligned dimensions. It is designed as an in-memory representation of the data model from the `netCDF`__ file format. __ https://www.unidata.ucar.edu/software/netcdf/ In addition to the dict-like interface of the dataset itself, which can be used to access any variable in a dataset, datasets have four key properties: - ``dims``: a dictionary mapping from dimension names to the fixed length of each dimension (e.g., ``{'x': 6, 'y': 6, 'time': 8}``) - ``data_vars``: a dict-like container of DataArrays corresponding to variables - ``coords``: another dict-like container of DataArrays intended to label points used in ``data_vars`` (e.g., arrays of numbers, datetime objects or strings) - ``attrs``: :py:class:`dict` to hold arbitrary metadata The distinction between whether a variable falls in data or coordinates (borrowed from `CF conventions`_) is mostly semantic, and you can probably get away with ignoring it if you like: dictionary like access on a dataset will supply variables found in either category. However, xarray does make use of the distinction for indexing and computations. Coordinates indicate constant/fixed/independent quantities, unlike the varying/measured/dependent quantities that belong in data. .. _CF conventions: https://cfconventions.org/ Here is an example of how we might structure a dataset for a weather forecast: .. image:: ../_static/dataset-diagram.png In this example, it would be natural to call ``temperature`` and ``precipitation`` "data variables" and all the other arrays "coordinate variables" because they label the points along the dimensions. (see [1]_ for more background on this example). Creating a Dataset ~~~~~~~~~~~~~~~~~~ To make an :py:class:`~xarray.Dataset` from scratch, supply dictionaries for any variables (``data_vars``), coordinates (``coords``) and attributes (``attrs``). - ``data_vars`` should be a dictionary with each key as the name of the variable and each value as one of: * A :py:class:`~xarray.DataArray` or :py:class:`~xarray.Variable` * A tuple of the form ``(dims, data[, attrs])``, which is converted into arguments for :py:class:`~xarray.Variable` * A pandas object, which is converted into a ``DataArray`` * A 1D array or list, which is interpreted as values for a one dimensional coordinate variable along the same dimension as its name - ``coords`` should be a dictionary of the same form as ``data_vars``. - ``attrs`` should be a dictionary. Let's create some fake data for the example we show above. In this example dataset, we will represent measurements of the temperature and pressure that were made under various conditions: * the measurements were made on four different days; * they were made at two separate locations, which we will represent using their latitude and longitude; and * they were made using instruments by three different manufacturers, which we will refer to as ``'manufac1'``, ``'manufac2'``, and ``'manufac3'``. .. jupyter-execute:: np.random.seed(0) temperature = 15 + 8 * np.random.randn(2, 3, 4) precipitation = 10 * np.random.rand(2, 3, 4) lon = [-99.83, -99.32] lat = [42.25, 42.21] instruments = ["manufac1", "manufac2", "manufac3"] time = pd.date_range("2014-09-06", periods=4) reference_time = pd.Timestamp("2014-09-05") # for real use cases, its good practice to supply array attributes such as # units, but we won't bother here for the sake of brevity ds = xr.Dataset( { "temperature": (["loc", "instrument", "time"], temperature), "precipitation": (["loc", "instrument", "time"], precipitation), }, coords={ "lon": (["loc"], lon), "lat": (["loc"], lat), "instrument": instruments, "time": time, "reference_time": reference_time, }, ) ds Here we pass :py:class:`xarray.DataArray` objects or a pandas object as values in the dictionary: .. jupyter-execute:: xr.Dataset(dict(bar=foo)) .. jupyter-execute:: xr.Dataset(dict(bar=foo.to_pandas())) Where a pandas object is supplied as a value, the names of its indexes are used as dimension names, and its data is aligned to any existing dimensions. You can also create a dataset from: - A :py:class:`pandas.DataFrame` or ``pandas.Panel`` along its columns and items respectively, by passing it into the :py:class:`~xarray.Dataset` directly - A :py:class:`pandas.DataFrame` with :py:meth:`Dataset.from_dataframe `, which will additionally handle MultiIndexes See :ref:`pandas` - A netCDF file on disk with :py:func:`~xarray.open_dataset`. See :ref:`io`. Dataset contents ~~~~~~~~~~~~~~~~ :py:class:`~xarray.Dataset` implements the Python mapping interface, with values given by :py:class:`xarray.DataArray` objects: .. jupyter-execute:: print("temperature" in ds) ds["temperature"] Valid keys include each listed coordinate and data variable. Data and coordinate variables are also contained separately in the :py:attr:`~xarray.Dataset.data_vars` and :py:attr:`~xarray.Dataset.coords` dictionary-like attributes: .. jupyter-execute:: ds.data_vars .. jupyter-execute:: ds.coords Finally, like data arrays, datasets also store arbitrary metadata in the form of ``attributes``: .. jupyter-execute:: print(ds.attrs) ds.attrs["title"] = "example attribute" ds Xarray does not enforce any restrictions on attributes, but serialization to some file formats may fail if you use objects that are not strings, numbers or :py:class:`numpy.ndarray` objects. As a useful shortcut, you can use attribute style access for reading (but not setting) variables and attributes: .. jupyter-execute:: ds.temperature This is particularly useful in an exploratory context, because you can tab-complete these variable names with tools like IPython. .. _dictionary_like_methods: Dictionary like methods ~~~~~~~~~~~~~~~~~~~~~~~ We can update a dataset in-place using Python's standard dictionary syntax. For example, to create this example dataset from scratch, we could have written: .. jupyter-execute:: ds = xr.Dataset() ds["temperature"] = (("loc", "instrument", "time"), temperature) ds["temperature_double"] = (("loc", "instrument", "time"), temperature * 2) ds["precipitation"] = (("loc", "instrument", "time"), precipitation) ds.coords["lat"] = (("loc",), lat) ds.coords["lon"] = (("loc",), lon) ds.coords["time"] = pd.date_range("2014-09-06", periods=4) ds.coords["reference_time"] = pd.Timestamp("2014-09-05") To change the variables in a ``Dataset``, you can use all the standard dictionary methods, including ``values``, ``items``, ``__delitem__``, ``get`` and :py:meth:`~xarray.Dataset.update`. Note that assigning a ``DataArray`` or pandas object to a ``Dataset`` variable using ``__setitem__`` or ``update`` will :ref:`automatically align` the array(s) to the original dataset's indexes. You can copy a ``Dataset`` by calling the :py:meth:`~xarray.Dataset.copy` method. By default, the copy is shallow, so only the container will be copied: the arrays in the ``Dataset`` will still be stored in the same underlying :py:class:`numpy.ndarray` objects. You can copy all data by calling ``ds.copy(deep=True)``. .. _transforming datasets: Transforming datasets ~~~~~~~~~~~~~~~~~~~~~ In addition to dictionary-like methods (described above), xarray has additional methods (like pandas) for transforming datasets into new objects. For removing variables, you can select and drop an explicit list of variables by indexing with a list of names or using the :py:meth:`~xarray.Dataset.drop_vars` methods to return a new ``Dataset``. These operations keep around coordinates: .. jupyter-execute:: ds[["temperature"]] .. jupyter-execute:: ds[["temperature", "temperature_double"]] .. jupyter-execute:: ds.drop_vars("temperature") To remove a dimension, you can use :py:meth:`~xarray.Dataset.drop_dims` method. Any variables using that dimension are dropped: .. jupyter-execute:: ds.drop_dims("time") As an alternate to dictionary-like modifications, you can use :py:meth:`~xarray.Dataset.assign` and :py:meth:`~xarray.Dataset.assign_coords`. These methods return a new dataset with additional (or replaced) values: .. jupyter-execute:: ds.assign(temperature2=2 * ds.temperature) There is also the :py:meth:`~xarray.Dataset.pipe` method that allows you to use a method call with an external function (e.g., ``ds.pipe(func)``) instead of simply calling it (e.g., ``func(ds)``). This allows you to write pipelines for transforming your data (using "method chaining") instead of writing hard to follow nested function calls: .. jupyter-input:: # these lines are equivalent, but with pipe we can make the logic flow # entirely from left to right plt.plot((2 * ds.temperature.sel(loc=0)).mean("instrument")) (ds.temperature.sel(loc=0).pipe(lambda x: 2 * x).mean("instrument").pipe(plt.plot)) Both ``pipe`` and ``assign`` replicate the pandas methods of the same names (:py:meth:`DataFrame.pipe ` and :py:meth:`DataFrame.assign `). With xarray, there is no performance penalty for creating new datasets, even if variables are lazily loaded from a file on disk. Creating new objects instead of mutating existing objects often results in easier to understand code, so we encourage using this approach. Renaming variables ~~~~~~~~~~~~~~~~~~ Another useful option is the :py:meth:`~xarray.Dataset.rename` method to rename dataset variables: .. jupyter-execute:: ds.rename({"temperature": "temp", "precipitation": "precip"}) The related :py:meth:`~xarray.Dataset.swap_dims` method allows you do to swap dimension and non-dimension variables: .. jupyter-execute:: ds.coords["day"] = ("time", [6, 7, 8, 9]) ds.swap_dims({"time": "day"}) DataTree -------- :py:class:`~xarray.DataTree` is ``xarray``'s highest-level data structure, able to organise heterogeneous data which could not be stored inside a single :py:class:`~xarray.Dataset` object. This includes representing the recursive structure of multiple `groups`_ within a netCDF file or `Zarr Store`_. .. _groups: https://www.unidata.ucar.edu/software/netcdf/workshops/2011/groups-types/GroupsIntro.html .. _Zarr Store: https://zarr.readthedocs.io/en/stable/tutorial.html#groups Each :py:class:`~xarray.DataTree` object (or "node") contains the same data that a single :py:class:`xarray.Dataset` would (i.e. :py:class:`~xarray.DataArray` objects stored under hashable keys), and so has the same key properties: - ``dims``: a dictionary mapping of dimension names to lengths, for the variables in this node, and this node's ancestors, - ``data_vars``: a dict-like container of DataArrays corresponding to variables in this node, - ``coords``: another dict-like container of DataArrays, corresponding to coordinate variables in this node, and this node's ancestors, - ``attrs``: dict to hold arbitrary metadata relevant to data in this node. A single :py:class:`~xarray.DataTree` object acts much like a single :py:class:`~xarray.Dataset` object, and has a similar set of dict-like methods defined upon it. However, :py:class:`~xarray.DataTree`\s can also contain other :py:class:`~xarray.DataTree` objects, so they can be thought of as nested dict-like containers of both :py:class:`xarray.DataArray`\s and :py:class:`~xarray.DataTree`\s. A single datatree object is known as a "node", and its position relative to other nodes is defined by two more key properties: - ``children``: A dictionary mapping from names to other :py:class:`~xarray.DataTree` objects, known as its "child nodes". - ``parent``: The single :py:class:`~xarray.DataTree` object whose children this datatree is a member of, known as its "parent node". Each child automatically knows about its parent node, and a node without a parent is known as a "root" node (represented by the ``parent`` attribute pointing to ``None``). Nodes can have multiple children, but as each child node has at most one parent, there can only ever be one root node in a given tree. The overall structure is technically a connected acyclic undirected rooted graph, otherwise known as a `"Tree" `_. :py:class:`~xarray.DataTree` objects can also optionally have a ``name`` as well as ``attrs``, just like a :py:class:`~xarray.DataArray`. Again these are not normally used unless explicitly accessed by the user. .. _creating a datatree: Creating a DataTree ~~~~~~~~~~~~~~~~~~~ One way to create a :py:class:`~xarray.DataTree` from scratch is to create each node individually, specifying the nodes' relationship to one another as you create each one. The :py:class:`~xarray.DataTree` constructor takes: - ``dataset``: The data that will be stored in this node, represented by a single :py:class:`xarray.Dataset`. - ``children``: The various child nodes (if there are any), given as a mapping from string keys to :py:class:`~xarray.DataTree` objects. - ``name``: A string to use as the name of this node. Let's make a single datatree node with some example data in it: .. jupyter-execute:: ds1 = xr.Dataset({"foo": "orange"}) dt = xr.DataTree(name="root", dataset=ds1) dt At this point we have created a single node datatree with no parent and no children. .. jupyter-execute:: print(dt.parent is None) dt.children We can add a second node to this tree, assigning it to the parent node ``dt``: .. jupyter-execute:: dataset2 = xr.Dataset({"bar": 0}, coords={"y": ("y", [0, 1, 2])}) dt2 = xr.DataTree(name="a", dataset=dataset2) # Add the child Datatree to the root node dt.children = {"child-node": dt2} dt More idiomatically you can create a tree from a dictionary of ``Datasets`` and ``DataTrees``. In this case we add a new node under ``dt["child-node"]`` by providing the explicit path under ``"child-node"`` as the dictionary key: .. jupyter-execute:: # create a third Dataset ds3 = xr.Dataset({"zed": np.nan}) # create a tree from a dictionary of DataTrees and Datasets dt = xr.DataTree.from_dict({"/": dt, "/child-node/new-zed-node": ds3}) We have created a tree with three nodes in it: .. jupyter-execute:: dt Consistency checks are enforced. For instance, if we try to create a cycle, where the root node is also a child of a descendant, the constructor will raise an (:py:class:`~xarray.InvalidTreeError`): .. jupyter-execute:: :raises: dt["child-node"].children = {"new-child": dt} Alternatively you can also create a :py:class:`~xarray.DataTree` object from: - A dictionary mapping directory-like paths to either :py:class:`~xarray.DataTree` nodes or data, using :py:meth:`xarray.DataTree.from_dict()`, - A well formed netCDF or Zarr file on disk with :py:func:`~xarray.open_datatree()`. See :ref:`reading and writing files `. For data files with groups that do not align see :py:func:`xarray.open_groups` or target each group individually :py:func:`xarray.open_dataset(group='groupname') `. For more information about coordinate alignment see :ref:`datatree-inheritance` DataTree Contents ~~~~~~~~~~~~~~~~~ Like :py:class:`~xarray.Dataset`, :py:class:`~xarray.DataTree` implements the python mapping interface, but with values given by either :py:class:`~xarray.DataArray` objects or other :py:class:`~xarray.DataTree` objects. .. jupyter-execute:: dt["child-node"] .. jupyter-execute:: dt["foo"] Iterating over keys will iterate over both the names of variables and child nodes. We can also access all the data in a single node, and its inherited coordinates, through a dataset-like view .. jupyter-execute:: dt["child-node"].dataset This demonstrates the fact that the data in any one node is equivalent to the contents of a single :py:class:`~xarray.Dataset` object. The :py:attr:`DataTree.dataset ` property returns an immutable view, but we can instead extract the node's data contents as a new and mutable :py:class:`~xarray.Dataset` object via :py:meth:`DataTree.to_dataset() `: .. jupyter-execute:: dt["child-node"].to_dataset() Like with :py:class:`~xarray.Dataset`, you can access the data and coordinate variables of a node separately via the :py:attr:`~xarray.DataTree.data_vars` and :py:attr:`~xarray.DataTree.coords` attributes: .. jupyter-execute:: dt["child-node"].data_vars .. jupyter-execute:: dt["child-node"].coords Dictionary-like methods ~~~~~~~~~~~~~~~~~~~~~~~ We can update a datatree in-place using Python's standard dictionary syntax, similar to how we can for Dataset objects. For example, to create this example DataTree from scratch, we could have written: .. jupyter-execute:: dt = xr.DataTree(name="root") dt["foo"] = "orange" dt["child-node"] = xr.DataTree( dataset=xr.Dataset({"bar": 0}, coords={"y": ("y", [0, 1, 2])}) ) dt["child-node/new-zed-node/zed"] = np.nan dt To change the variables in a node of a :py:class:`~xarray.DataTree`, you can use all the standard dictionary methods, including ``values``, ``items``, ``__delitem__``, ``get`` and :py:meth:`xarray.DataTree.update`. Note that assigning a :py:class:`~xarray.DataTree` object to a :py:class:`~xarray.DataTree` variable using ``__setitem__`` or :py:meth:`~xarray.DataTree.update` will :ref:`automatically align ` the array(s) to the original node's indexes. If you copy a :py:class:`~xarray.DataTree` using the :py:func:`copy` function or the :py:meth:`xarray.DataTree.copy` method it will copy the subtree, meaning that node and children below it, but no parents above it. Like for :py:class:`~xarray.Dataset`, this copy is shallow by default, but you can copy all the underlying data arrays by calling ``dt.copy(deep=True)``. .. _datatree-inheritance: DataTree Inheritance ~~~~~~~~~~~~~~~~~~~~ DataTree implements a simple inheritance mechanism. Coordinates, dimensions and their associated indices are propagated from downward starting from the root node to all descendent nodes. Coordinate inheritance was inspired by the NetCDF-CF inherited dimensions, but DataTree's inheritance is slightly stricter yet easier to reason about. The constraint that this puts on a DataTree is that dimensions and indices that are inherited must be aligned with any direct descendant node's existing dimension or index. This allows descendants to use dimensions defined in ancestor nodes, without duplicating that information. But as a consequence, if a dimension-name is defined in on a node and that same dimension-name exists in one of its ancestors, they must align (have the same index and size). Some examples: .. jupyter-execute:: # Set up coordinates time = xr.DataArray(data=["2022-01", "2023-01"], dims="time") stations = xr.DataArray(data=list("abcdef"), dims="station") lon = [-100, -80, -60] lat = [10, 20, 30] # Set up fake data wind_speed = xr.DataArray(np.ones((2, 6)) * 2, dims=("time", "station")) pressure = xr.DataArray(np.ones((2, 6)) * 3, dims=("time", "station")) air_temperature = xr.DataArray(np.ones((2, 6)) * 4, dims=("time", "station")) dewpoint = xr.DataArray(np.ones((2, 6)) * 5, dims=("time", "station")) infrared = xr.DataArray(np.ones((2, 3, 3)) * 6, dims=("time", "lon", "lat")) true_color = xr.DataArray(np.ones((2, 3, 3)) * 7, dims=("time", "lon", "lat")) dt2 = xr.DataTree.from_dict( { "/": xr.Dataset( coords={"time": time}, ), "/weather": xr.Dataset( coords={"station": stations}, data_vars={ "wind_speed": wind_speed, "pressure": pressure, }, ), "/weather/temperature": xr.Dataset( data_vars={ "air_temperature": air_temperature, "dewpoint": dewpoint, }, ), "/satellite": xr.Dataset( coords={"lat": lat, "lon": lon}, data_vars={ "infrared": infrared, "true_color": true_color, }, ), }, ) dt2 Here there are four different coordinate variables, which apply to variables in the DataTree in different ways: ``time`` is a shared coordinate used by both ``weather`` and ``satellite`` variables ``station`` is used only for ``weather`` variables ``lat`` and ``lon`` are only use for ``satellite`` images Coordinate variables are inherited to descendent nodes, which is only possible because variables at different levels of a hierarchical DataTree are always aligned. Placing the ``time`` variable at the root node automatically indicates that it applies to all descendent nodes. Similarly, ``station`` is in the base ``weather`` node, because it applies to all weather variables, both directly in ``weather`` and in the ``temperature`` sub-tree. Notice the inherited coordinates are explicitly shown in the tree representation under ``Inherited coordinates:``. .. jupyter-execute:: dt2["/weather"] Accessing any of the lower level trees through the :py:func:`.dataset ` property automatically includes coordinates from higher levels (e.g., ``time`` and ``station``): .. jupyter-execute:: dt2["/weather/temperature"].dataset Similarly, when you retrieve a Dataset through :py:func:`~xarray.DataTree.to_dataset` , the inherited coordinates are included by default unless you exclude them with the ``inherit`` flag: .. jupyter-execute:: dt2["/weather/temperature"].to_dataset() .. jupyter-execute:: dt2["/weather/temperature"].to_dataset(inherit=False) For more examples and further discussion see :ref:`alignment and coordinate inheritance `. .. _coordinates: Coordinates ----------- Coordinates are ancillary variables stored for ``DataArray`` and ``Dataset`` objects in the ``coords`` attribute: .. jupyter-execute:: ds.coords Unlike attributes, xarray *does* interpret and persist coordinates in operations that transform xarray objects. There are two types of coordinates in xarray: - **dimension coordinates** are one dimensional coordinates with a name equal to their sole dimension (marked by ``*`` when printing a dataset or data array). They are used for label based indexing and alignment, like the ``index`` found on a pandas :py:class:`~pandas.DataFrame` or :py:class:`~pandas.Series`. Indeed, these "dimension" coordinates use a :py:class:`pandas.Index` internally to store their values. - **non-dimension coordinates** are variables that contain coordinate data, but are not a dimension coordinate. They can be multidimensional (see :ref:`/examples/multidimensional-coords.ipynb`), and there is no relationship between the name of a non-dimension coordinate and the name(s) of its dimension(s). Non-dimension coordinates can be useful for indexing or plotting; otherwise, xarray does not make any direct use of the values associated with them. They are not used for alignment or automatic indexing, nor are they required to match when doing arithmetic (see :ref:`coordinates math`). .. note:: Xarray's terminology differs from the `CF terminology`_, where the "dimension coordinates" are called "coordinate variables", and the "non-dimension coordinates" are called "auxiliary coordinate variables" (see :issue:`1295` for more details). .. _CF terminology: https://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#terminology Modifying coordinates ~~~~~~~~~~~~~~~~~~~~~ To entirely add or remove coordinate arrays, you can use dictionary like syntax, as shown above. To convert back and forth between data and coordinates, you can use the :py:meth:`~xarray.Dataset.set_coords` and :py:meth:`~xarray.Dataset.reset_coords` methods: .. jupyter-execute:: ds.reset_coords() .. jupyter-execute:: ds.set_coords(["temperature", "precipitation"]) .. jupyter-execute:: ds["temperature"].reset_coords(drop=True) Notice that these operations skip coordinates with names given by dimensions, as used for indexing. This mostly because we are not entirely sure how to design the interface around the fact that xarray cannot store a coordinate and variable with the name but different values in the same dictionary. But we do recognize that supporting something like this would be useful. Coordinates methods ~~~~~~~~~~~~~~~~~~~ ``Coordinates`` objects also have a few useful methods, mostly for converting them into dataset objects: .. jupyter-execute:: ds.coords.to_dataset() The merge method is particularly interesting, because it implements the same logic used for merging coordinates in arithmetic operations (see :ref:`compute`): .. jupyter-execute:: alt = xr.Dataset(coords={"z": [10], "lat": 0, "lon": 0}) ds.coords.merge(alt.coords) The ``coords.merge`` method may be useful if you want to implement your own binary operations that act on xarray objects. In the future, we hope to write more helper functions so that you can easily make your functions act like xarray's built-in arithmetic. Indexes ~~~~~~~ To convert a coordinate (or any ``DataArray``) into an actual :py:class:`pandas.Index`, use the :py:meth:`~xarray.DataArray.to_index` method: .. jupyter-execute:: ds["time"].to_index() A useful shortcut is the ``indexes`` property (on both ``DataArray`` and ``Dataset``), which lazily constructs a dictionary whose keys are given by each dimension and whose the values are ``Index`` objects: .. jupyter-execute:: ds.indexes MultiIndex coordinates ~~~~~~~~~~~~~~~~~~~~~~ Xarray supports labeling coordinate values with a :py:class:`pandas.MultiIndex`: .. jupyter-execute:: midx = pd.MultiIndex.from_arrays( [["R", "R", "V", "V"], [0.1, 0.2, 0.7, 0.9]], names=("band", "wn") ) mda = xr.DataArray(np.random.rand(4), coords={"spec": midx}, dims="spec") mda For convenience multi-index levels are directly accessible as "virtual" or "derived" coordinates (marked by ``-`` when printing a dataset or data array): .. jupyter-execute:: mda["band"] .. jupyter-execute:: mda.wn Indexing with multi-index levels is also possible using the ``sel`` method (see :ref:`multi-level indexing`). Unlike other coordinates, "virtual" level coordinates are not stored in the ``coords`` attribute of ``DataArray`` and ``Dataset`` objects (although they are shown when printing the ``coords`` attribute). Consequently, most of the coordinates related methods don't apply for them. It also can't be used to replace one particular level. Because in a ``DataArray`` or ``Dataset`` object each multi-index level is accessible as a "virtual" coordinate, its name must not conflict with the names of the other levels, coordinates and data variables of the same object. Even though xarray sets default names for multi-indexes with unnamed levels, it is recommended that you explicitly set the names of the levels. .. [1] Latitude and longitude are 2D arrays because the dataset uses `projected coordinates`__. ``reference_time`` refers to the reference time at which the forecast was made, rather than ``time`` which is the valid time for which the forecast applies. __ https://en.wikipedia.org/wiki/Map_projection xarray-2025.12.0/doc/user-guide/duckarrays.rst000066400000000000000000000252741511464676000210750ustar00rootroot00000000000000.. currentmodule:: xarray .. _userguide.duckarrays: Working with numpy-like arrays ============================== NumPy-like arrays (often known as :term:`duck array`\s) are drop-in replacements for the :py:class:`numpy.ndarray` class but with different features, such as propagating physical units or a different layout in memory. Xarray can often wrap these array types, allowing you to use labelled dimensions and indexes whilst benefiting from the additional features of these array libraries. Some numpy-like array types that xarray already has some support for: * `Cupy `_ - GPU support (see `cupy-xarray `_), * `Sparse `_ - for performant arrays with many zero elements, * `Pint `_ - for tracking the physical units of your data (see `pint-xarray `_), * `Dask `_ - parallel computing on larger-than-memory arrays (see :ref:`using dask with xarray `), * `Cubed `_ - another parallel computing framework that emphasises reliability (see `cubed-xarray `_). .. warning:: This feature should be considered somewhat experimental. Please report any bugs you find on `xarrayโ€™s issue tracker `_. .. note:: For information on wrapping dask arrays see :ref:`dask`. Whilst xarray wraps dask arrays in a similar way to that described on this page, chunked array types like :py:class:`dask.array.Array` implement additional methods that require slightly different user code (e.g. calling ``.chunk`` or ``.compute``). See the docs on :ref:`wrapping chunked arrays `. Why "duck"? ----------- Why is it also called a "duck" array? This comes from a common statement of object-oriented programming - "If it walks like a duck, and quacks like a duck, treat it like a duck". In other words, a library like xarray that is capable of using multiple different types of arrays does not have to explicitly check that each one it encounters is permitted (e.g. ``if dask``, ``if numpy``, ``if sparse`` etc.). Instead xarray can take the more permissive approach of simply treating the wrapped array as valid, attempting to call the relevant methods (e.g. ``.mean()``) and only raising an error if a problem occurs (e.g. the method is not found on the wrapped class). This is much more flexible, and allows objects and classes from different libraries to work together more easily. What is a numpy-like array? --------------------------- A "numpy-like array" (also known as a "duck array") is a class that contains array-like data, and implements key numpy-like functionality such as indexing, broadcasting, and computation methods. For example, the `sparse `_ library provides a sparse array type which is useful for representing nD array objects like sparse matrices in a memory-efficient manner. We can create a sparse array object (of the :py:class:`sparse.COO` type) from a numpy array like this: .. jupyter-execute:: from sparse import COO import xarray as xr import numpy as np %xmode minimal .. jupyter-execute:: x = np.eye(4, dtype=np.uint8) # create diagonal identity matrix s = COO.from_numpy(x) s This sparse object does not attempt to explicitly store every element in the array, only the non-zero elements. This approach is much more efficient for large arrays with only a few non-zero elements (such as tri-diagonal matrices). Sparse array objects can be converted back to a "dense" numpy array by calling :py:meth:`sparse.COO.todense`. Just like :py:class:`numpy.ndarray` objects, :py:class:`sparse.COO` arrays support indexing .. jupyter-execute:: s[1, 1] # diagonal elements should be ones .. jupyter-execute:: s[2, 3] # off-diagonal elements should be zero broadcasting, .. jupyter-execute:: x2 = np.zeros( (4, 1), dtype=np.uint8 ) # create second sparse array of different shape s2 = COO.from_numpy(x2) (s * s2) # multiplication requires broadcasting and various computation methods .. jupyter-execute:: s.sum(axis=1) This numpy-like array also supports calling so-called `numpy ufuncs `_ ("universal functions") on it directly: .. jupyter-execute:: np.sum(s, axis=1) Notice that in each case the API for calling the operation on the sparse array is identical to that of calling it on the equivalent numpy array - this is the sense in which the sparse array is "numpy-like". .. note:: For discussion on exactly which methods a class needs to implement to be considered "numpy-like", see :ref:`internals.duckarrays`. Wrapping numpy-like arrays in xarray ------------------------------------ :py:class:`DataArray`, :py:class:`Dataset`, and :py:class:`Variable` objects can wrap these numpy-like arrays. Constructing xarray objects which wrap numpy-like arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The primary way to create an xarray object which wraps a numpy-like array is to pass that numpy-like array instance directly to the constructor of the xarray class. The :ref:`page on xarray data structures ` shows how :py:class:`DataArray` and :py:class:`Dataset` both accept data in various forms through their ``data`` argument, but in fact this data can also be any wrappable numpy-like array. For example, we can wrap the sparse array we created earlier inside a new DataArray object: .. jupyter-execute:: s_da = xr.DataArray(s, dims=["i", "j"]) s_da We can see what's inside - the printable representation of our xarray object (the repr) automatically uses the printable representation of the underlying wrapped array. Of course our sparse array object is still there underneath - it's stored under the ``.data`` attribute of the dataarray: .. jupyter-execute:: s_da.data Array methods ~~~~~~~~~~~~~ We saw above that numpy-like arrays provide numpy methods. Xarray automatically uses these when you call the corresponding xarray method: .. jupyter-execute:: s_da.sum(dim="j") Converting wrapped types ~~~~~~~~~~~~~~~~~~~~~~~~ If you want to change the type inside your xarray object you can use :py:meth:`DataArray.as_numpy`: .. jupyter-execute:: s_da.as_numpy() This returns a new :py:class:`DataArray` object, but now wrapping a normal numpy array. If instead you want to convert to numpy and return that numpy array you can use either :py:meth:`DataArray.to_numpy` or :py:meth:`DataArray.values`, where the former is strongly preferred. The difference is in the way they coerce to numpy - :py:meth:`~DataArray.values` always uses :py:func:`numpy.asarray` which will fail for some array types (e.g. ``cupy``), whereas :py:meth:`~DataArray.to_numpy` uses the correct method depending on the array type. .. jupyter-execute:: s_da.to_numpy() .. jupyter-execute:: :raises: s_da.values This illustrates the difference between :py:meth:`~DataArray.data` and :py:meth:`~DataArray.values`, which is sometimes a point of confusion for new xarray users. Explicitly: :py:meth:`DataArray.data` returns the underlying numpy-like array, regardless of type, whereas :py:meth:`DataArray.values` converts the underlying array to a numpy array before returning it. (This is another reason to use :py:meth:`~DataArray.to_numpy` over :py:meth:`~DataArray.values` - the intention is clearer.) Conversion to numpy as a fallback ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If a wrapped array does not implement the corresponding array method then xarray will often attempt to convert the underlying array to a numpy array so that the operation can be performed. You may want to watch out for this behavior, and report any instances in which it causes problems. Most of xarray's API does support using :term:`duck array` objects, but there are a few areas where the code will still convert to ``numpy`` arrays: - Dimension coordinates, and thus all indexing operations: * :py:meth:`Dataset.sel` and :py:meth:`DataArray.sel` * :py:meth:`Dataset.loc` and :py:meth:`DataArray.loc` * :py:meth:`Dataset.drop_sel` and :py:meth:`DataArray.drop_sel` * :py:meth:`Dataset.reindex`, :py:meth:`Dataset.reindex_like`, :py:meth:`DataArray.reindex` and :py:meth:`DataArray.reindex_like`: duck arrays in data variables and non-dimension coordinates won't be casted - Functions and methods that depend on external libraries or features of ``numpy`` not covered by ``__array_function__`` / ``__array_ufunc__``: * :py:meth:`Dataset.ffill` and :py:meth:`DataArray.ffill` (uses ``bottleneck``) * :py:meth:`Dataset.bfill` and :py:meth:`DataArray.bfill` (uses ``bottleneck``) * :py:meth:`Dataset.interp`, :py:meth:`Dataset.interp_like`, :py:meth:`DataArray.interp` and :py:meth:`DataArray.interp_like` (uses ``scipy``): duck arrays in data variables and non-dimension coordinates will be casted in addition to not supporting duck arrays in dimension coordinates * :py:meth:`Dataset.rolling` and :py:meth:`DataArray.rolling` (requires ``numpy>=1.20``) * :py:meth:`Dataset.rolling_exp` and :py:meth:`DataArray.rolling_exp` (uses ``numbagg``) * :py:meth:`Dataset.interpolate_na` and :py:meth:`DataArray.interpolate_na` (uses :py:class:`numpy.vectorize`) * :py:func:`apply_ufunc` with ``vectorize=True`` (uses :py:class:`numpy.vectorize`) - Incompatibilities between different :term:`duck array` libraries: * :py:meth:`Dataset.chunk` and :py:meth:`DataArray.chunk`: this fails if the data was not already chunked and the :term:`duck array` (e.g. a ``pint`` quantity) should wrap the new ``dask`` array; changing the chunk sizes works however. Extensions using duck arrays ---------------------------- Whilst the features above allow many numpy-like array libraries to be used pretty seamlessly with xarray, it often also makes sense to use an interfacing package to make certain tasks easier. For example the `pint-xarray package `_ offers a custom ``.pint`` accessor (see :ref:`internals.accessors`) which provides convenient access to information stored within the wrapped array (e.g. ``.units`` and ``.magnitude``), and makes creating wrapped pint arrays (and especially xarray-wrapping-pint-wrapping-dask arrays) simpler for the user. We maintain a list of libraries extending ``xarray`` to make working with particular wrapped duck arrays easier. If you know of more that aren't on this list please raise an issue to add them! - `pint-xarray `_ - `cupy-xarray `_ - `cubed-xarray `_ xarray-2025.12.0/doc/user-guide/ecosystem.rst000066400000000000000000000253411511464676000207330ustar00rootroot00000000000000.. _ecosystem: Xarray related projects ----------------------- Below is a list of existing open source projects that build functionality upon xarray. See also section :ref:`internals` for more details on how to build xarray extensions. We also maintain the `xarray-contrib `_ GitHub organization as a place to curate projects that build upon xarray. Geosciences ~~~~~~~~~~~ - `aospy `_: Automated analysis and management of gridded climate data. - `argopy `_: xarray-based Argo data access, manipulation and visualisation for standard users as well as Argo experts. - `cf_xarray `_: Provides an accessor (DataArray.cf or Dataset.cf) that allows you to interpret Climate and Forecast metadata convention attributes present on xarray objects. - `climpred `_: Analysis of ensemble forecast models for climate prediction. - `geocube `_: Tool to convert geopandas vector data into rasterized xarray data. - `GeoWombat `_: Utilities for analysis of remotely sensed and gridded raster data at scale (easily tame Landsat, Sentinel, Quickbird, and PlanetScope). - `grib2io `_: Utility to work with GRIB2 files including an xarray backend, DASK support for parallel reading in open_mfdataset, lazy loading of data, editing of GRIB2 attributes and GRIB2IO DataArray attrs, and spatial interpolation and reprojection of GRIB2 messages and GRIB2IO Datasets/DataArrays for both grid to grid and grid to stations. - `gsw-xarray `_: a wrapper around `gsw `_ that adds CF compliant attributes when possible, units, name. - `infinite-diff `_: xarray-based finite-differencing, focused on gridded climate/meteorology data - `marc_analysis `_: Analysis package for CESM/MARC experiments and output. - `MetPy `_: A collection of tools in Python for reading, visualizing, and performing calculations with weather data. - `MPAS-Analysis `_: Analysis for simulations produced with Model for Prediction Across Scales (MPAS) components and the Accelerated Climate Model for Energy (ACME). - `OGGM `_: Open Global Glacier Model - `Oocgcm `_: Analysis of large gridded geophysical datasets - `Open Data Cube `_: Analysis toolkit of continental scale Earth Observation data from satellites. - `Pangaea `_: xarray extension for gridded land surface & weather model output). - `Pangeo `_: A community effort for big data geoscience in the cloud. - `PyGDX `_: Python 3 package for accessing data stored in GAMS Data eXchange (GDX) files. Also uses a custom subclass. - `pyinterp `_: Python 3 package for interpolating geo-referenced data used in the field of geosciences. - `pyXpcm `_: xarray-based Profile Classification Modelling (PCM), mostly for ocean data. - `Regionmask `_: plotting and creation of masks of spatial regions - `rioxarray `_: geospatial xarray extension powered by rasterio - `salem `_: Adds geolocalised subsetting, masking, and plotting operations to xarray's data structures via accessors. - `SatPy `_ : Library for reading and manipulating meteorological remote sensing data and writing it to various image and data file formats. - `SARXarray `_: xarray extension for reading and processing large Synthetic Aperture Radar (SAR) data stacks. - `shxarray `_: Convert, filter,and map geodesy related spherical harmonic representations of gravity and terrestrial water storage through an xarray extension. - `Spyfit `_: FTIR spectroscopy of the atmosphere - `windspharm `_: Spherical harmonic wind analysis in Python. - `wradlib `_: An Open Source Library for Weather Radar Data Processing. - `wrf-python `_: A collection of diagnostic and interpolation routines for use with output of the Weather Research and Forecasting (WRF-ARW) Model. - `xarray-eopf `_: An xarray backend implementation for opening ESA EOPF data products in Zarr format. - `xarray-regrid `_: xarray extension for regridding rectilinear data. - `xarray-simlab `_: xarray extension for computer model simulations. - `xarray-spatial `_: Numba-accelerated raster-based spatial processing tools (NDVI, curvature, zonal-statistics, proximity, hillshading, viewshed, etc.) - `xarray-topo `_: xarray extension for topographic analysis and modelling. - `xbpch `_: xarray interface for bpch files. - `xCDAT `_: An extension of xarray for climate data analysis on structured grids. - `xclim `_: A library for calculating climate science indices with unit handling built from xarray and dask. - `xESMF `_: Universal regridder for geospatial data. - `xgcm `_: Extends the xarray data model to understand finite volume grid cells (common in General Circulation Models) and provides interpolation and difference operations for such grids. - `xmitgcm `_: a python package for reading `MITgcm `_ binary MDS files into xarray data structures. - `xnemogcm `_: a package to read `NEMO `_ output files and add attributes to interface with xgcm. Machine Learning ~~~~~~~~~~~~~~~~ - `ArviZ `_: Exploratory analysis of Bayesian models, built on top of xarray. - `Darts `_: User-friendly modern machine learning for time series in Python. - `Elm `_: Parallel machine learning on xarray data structures - `sklearn-xarray (1) `_: Combines scikit-learn and xarray (1). - `sklearn-xarray (2) `_: Combines scikit-learn and xarray (2). - `xbatcher `_: Batch Generation from Xarray Datasets. Other domains ~~~~~~~~~~~~~ - `ptsa `_: EEG Time Series Analysis - `pycalphad `_: Computational Thermodynamics in Python - `pyomeca `_: Python framework for biomechanical analysis - `movement `_: A Python toolbox for analysing animal body movements Extend xarray capabilities ~~~~~~~~~~~~~~~~~~~~~~~~~~ - `Collocate `_: Collocate xarray trajectories in arbitrary physical dimensions - `eofs `_: EOF analysis in Python. - `hypothesis-gufunc `_: Extension to hypothesis. Makes it easy to write unit tests with xarray objects as input. - `ntv-pandas `_ : A tabular analyzer and a semantic, compact and reversible converter for multidimensional and tabular data - `nxarray `_: NeXus input/output capability for xarray. - `xarray-compare `_: xarray extension for data comparison. - `xarray-dataclasses `_: xarray extension for typed DataArray and Dataset creation. - `xarray_einstats `_: Statistics, linear algebra and einops for xarray - `xarray_extras `_: Advanced algorithms for xarray objects (e.g. integrations/interpolations). - `xeofs `_: PCA/EOF analysis and related techniques, integrated with xarray and Dask for efficient handling of large-scale data. - `xpublish `_: Publish Xarray Datasets via a Zarr compatible REST API. - `xrft `_: Fourier transforms for xarray data. - `xr-scipy `_: A lightweight scipy wrapper for xarray. - `X-regression `_: Multiple linear regression from Statsmodels library coupled with Xarray library. - `xskillscore `_: Metrics for verifying forecasts. - `xyzpy `_: Easily generate high dimensional data, including parallelization. - `xarray-lmfit `_: xarray extension for curve fitting using `lmfit `_. Visualization ~~~~~~~~~~~~~ - `datashader `_, `geoviews `_, `holoviews `_, : visualization packages for large data. - `hvplot `_ : A high-level plotting API for the PyData ecosystem built on HoloViews. - `psyplot `_: Interactive data visualization with python. - `xarray-leaflet `_: An xarray extension for tiled map plotting based on ipyleaflet. - `xtrude `_: An xarray extension for 3D terrain visualization based on pydeck. - `pyvista-xarray `_: xarray DataArray accessor for 3D visualization with `PyVista `_ and DataSet engines for reading VTK data formats. Non-Python projects ~~~~~~~~~~~~~~~~~~~ - `xframe `_: C++ data structures inspired by xarray. - `AxisArrays `_, `NamedArrays `_ and `YAXArrays.jl `_: similar data structures for Julia. More projects can be found at the `"xarray" Github topic `_. xarray-2025.12.0/doc/user-guide/groupby.rst000066400000000000000000000251331511464676000204060ustar00rootroot00000000000000.. currentmodule:: xarray .. _groupby: GroupBy: Group and Bin Data --------------------------- Often we want to bin or group data, produce statistics (mean, variance) on the groups, and then return a reduced data set. To do this, Xarray supports `"group by"`__ operations with the same API as pandas to implement the `split-apply-combine`__ strategy: __ https://pandas.pydata.org/pandas-docs/stable/groupby.html __ https://www.jstatsoft.org/v40/i01/paper - Split your data into multiple independent groups. - Apply some function to each group. - Combine your groups back into a single data object. Group by operations work on both :py:class:`Dataset` and :py:class:`DataArray` objects. Most of the examples focus on grouping by a single one-dimensional variable, although support for grouping over a multi-dimensional variable has recently been implemented. Note that for one-dimensional data, it is usually faster to rely on pandas' implementation of the same pipeline. .. tip:: `Install the flox package `_ to substantially improve the performance of GroupBy operations, particularly with dask. flox `extends Xarray's in-built GroupBy capabilities `_ by allowing grouping by multiple variables, and lazy grouping by dask arrays. If installed, Xarray will automatically use flox by default. Split ~~~~~ Let's create a simple example dataset: .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) .. jupyter-execute:: ds = xr.Dataset( {"foo": (("x", "y"), np.random.rand(4, 3))}, coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))}, ) arr = ds["foo"] ds If we groupby the name of a variable or coordinate in a dataset (we can also use a DataArray directly), we get back a ``GroupBy`` object: .. jupyter-execute:: ds.groupby("letters") This object works very similarly to a pandas GroupBy object. You can view the group indices with the ``groups`` attribute: .. jupyter-execute:: ds.groupby("letters").groups You can also iterate over groups in ``(label, group)`` pairs: .. jupyter-execute:: list(ds.groupby("letters")) You can index out a particular group: .. jupyter-execute:: ds.groupby("letters")["b"] To group by multiple variables, see :ref:`this section `. Binning ~~~~~~~ Sometimes you don't want to use all the unique values to determine the groups but instead want to "bin" the data into coarser groups. You could always create a customized coordinate, but xarray facilitates this via the :py:meth:`Dataset.groupby_bins` method. .. jupyter-execute:: x_bins = [0, 25, 50] ds.groupby_bins("x", x_bins).groups The binning is implemented via :func:`pandas.cut`, whose documentation details how the bins are assigned. As seen in the example above, by default, the bins are labeled with strings using set notation to precisely identify the bin limits. To override this behavior, you can specify the bin labels explicitly. Here we choose ``float`` labels which identify the bin centers: .. jupyter-execute:: x_bin_labels = [12.5, 37.5] ds.groupby_bins("x", x_bins, labels=x_bin_labels).groups Apply ~~~~~ To apply a function to each group, you can use the flexible :py:meth:`core.groupby.DatasetGroupBy.map` method. The resulting objects are automatically concatenated back together along the group axis: .. jupyter-execute:: def standardize(x): return (x - x.mean()) / x.std() arr.groupby("letters").map(standardize) GroupBy objects also have a :py:meth:`core.groupby.DatasetGroupBy.reduce` method and methods like :py:meth:`core.groupby.DatasetGroupBy.mean` as shortcuts for applying an aggregation function: .. jupyter-execute:: arr.groupby("letters").mean(dim="x") Using a groupby is thus also a convenient shortcut for aggregating over all dimensions *other than* the provided one: .. jupyter-execute:: ds.groupby("x").std(...) .. note:: We use an ellipsis (`...`) here to indicate we want to reduce over all other dimensions First and last ~~~~~~~~~~~~~~ There are two special aggregation operations that are currently only found on groupby objects: first and last. These provide the first or last example of values for group along the grouped dimension: .. jupyter-execute:: ds.groupby("letters").first(...) By default, they skip missing values (control this with ``skipna``). Grouped arithmetic ~~~~~~~~~~~~~~~~~~ GroupBy objects also support a limited set of binary arithmetic operations, as a shortcut for mapping over all unique labels. Binary arithmetic is supported for ``(GroupBy, Dataset)`` and ``(GroupBy, DataArray)`` pairs, as long as the dataset or data array uses the unique grouped values as one of its index coordinates. For example: .. jupyter-execute:: alt = arr.groupby("letters").mean(...) alt .. jupyter-execute:: ds.groupby("letters") - alt This last line is roughly equivalent to the following:: results = [] for label, group in ds.groupby('letters'): results.append(group - alt.sel(letters=label)) xr.concat(results, dim='x') .. _groupby.multidim: Multidimensional Grouping ~~~~~~~~~~~~~~~~~~~~~~~~~ Many datasets have a multidimensional coordinate variable (e.g. longitude) which is different from the logical grid dimensions (e.g. nx, ny). Such variables are valid under the `CF conventions`__. Xarray supports groupby operations over multidimensional coordinate variables: __ https://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_two_dimensional_latitude_longitude_coordinate_variables .. jupyter-execute:: da = xr.DataArray( [[0, 1], [2, 3]], coords={ "lon": (["ny", "nx"], [[30, 40], [40, 50]]), "lat": (["ny", "nx"], [[10, 10], [20, 20]]), }, dims=["ny", "nx"], ) da .. jupyter-execute:: da.groupby("lon").sum(...) .. jupyter-execute:: da.groupby("lon").map(lambda x: x - x.mean(), shortcut=False) Because multidimensional groups have the ability to generate a very large number of bins, coarse-binning via :py:meth:`Dataset.groupby_bins` may be desirable: .. jupyter-execute:: da.groupby_bins("lon", [0, 45, 50]).sum() These methods group by ``lon`` values. It is also possible to groupby each cell in a grid, regardless of value, by stacking multiple dimensions, applying your function, and then unstacking the result: .. jupyter-execute:: stacked = da.stack(gridcell=["ny", "nx"]) stacked.groupby("gridcell").sum(...).unstack("gridcell") Alternatively, you can groupby both ``lat`` and ``lon`` at the :ref:`same time `. .. _groupby.groupers: Grouper Objects ~~~~~~~~~~~~~~~ Both ``groupby_bins`` and ``resample`` are specializations of the core ``groupby`` operation for binning, and time resampling. Many problems demand more complex GroupBy application: for example, grouping by multiple variables with a combination of categorical grouping, binning, and resampling; or more specializations like spatial resampling; or more complex time grouping like special handling of seasons, or the ability to specify custom seasons. To handle these use-cases and more, Xarray is evolving to providing an extension point using ``Grouper`` objects. .. tip:: See the `grouper design`_ doc for more detail on the motivation and design ideas behind Grouper objects. .. _grouper design: https://github.com/pydata/xarray/blob/main/design_notes/grouper_objects.md For now Xarray provides three specialized Grouper objects: 1. :py:class:`groupers.UniqueGrouper` for categorical grouping 2. :py:class:`groupers.BinGrouper` for binned grouping 3. :py:class:`groupers.TimeResampler` for resampling along a datetime coordinate These provide functionality identical to the existing ``groupby``, ``groupby_bins``, and ``resample`` methods. That is, .. code-block:: python ds.groupby("x") is identical to .. code-block:: python from xarray.groupers import UniqueGrouper ds.groupby(x=UniqueGrouper()) Similarly, .. code-block:: python ds.groupby_bins("x", bins=bins) is identical to .. code-block:: python from xarray.groupers import BinGrouper ds.groupby(x=BinGrouper(bins)) and .. code-block:: python ds.resample(time="ME") is identical to .. code-block:: python from xarray.groupers import TimeResampler ds.resample(time=TimeResampler("ME")) The :py:class:`groupers.UniqueGrouper` accepts an optional ``labels`` kwarg that is not present in :py:meth:`DataArray.groupby` or :py:meth:`Dataset.groupby`. Specifying ``labels`` is required when grouping by a lazy array type (e.g. dask or cubed). The ``labels`` are used to construct the output coordinate (say for a reduction), and aggregations will only be run over the specified labels. You may use ``labels`` to also specify the ordering of groups to be used during iteration. The order will be preserved in the output. .. _groupby.multiple: Grouping by multiple variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use grouper objects to group by multiple dimensions: .. jupyter-execute:: from xarray.groupers import UniqueGrouper da.groupby(["lat", "lon"]).sum() The above is sugar for using ``UniqueGrouper`` objects directly: .. jupyter-execute:: da.groupby(lat=UniqueGrouper(), lon=UniqueGrouper()).sum() Different groupers can be combined to construct sophisticated GroupBy operations. .. jupyter-execute:: from xarray.groupers import BinGrouper ds.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum() Time Grouping and Resampling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. seealso:: See :ref:`resampling`. Shuffling ~~~~~~~~~ Shuffling is a generalization of sorting a DataArray or Dataset by another DataArray, named ``label`` for example, that follows from the idea of grouping by ``label``. Shuffling reorders the DataArray or the DataArrays in a Dataset such that all members of a group occur sequentially. For example, Shuffle the object using either :py:class:`DatasetGroupBy` or :py:class:`DataArrayGroupBy` as appropriate. .. jupyter-execute:: da = xr.DataArray( dims="x", data=[1, 2, 3, 4, 5, 6], coords={"label": ("x", "a b c a b c".split(" "))}, ) da.groupby("label").shuffle_to_chunks() For chunked array types (e.g. dask or cubed), shuffle may result in a more optimized communication pattern when compared to direct indexing by the appropriate indexer. Shuffling also makes GroupBy operations on chunked arrays an embarrassingly parallel problem, and may significantly improve workloads that use :py:meth:`DatasetGroupBy.map` or :py:meth:`DataArrayGroupBy.map`. xarray-2025.12.0/doc/user-guide/hierarchical-data.rst000066400000000000000000000760451511464676000222540ustar00rootroot00000000000000.. _userguide.hierarchical-data: Hierarchical data ================= .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) np.set_printoptions(threshold=10) %xmode minimal .. _why: Why Hierarchical Data? ---------------------- Many real-world datasets are composed of multiple differing components, and it can often be useful to think of these in terms of a hierarchy of related groups of data. Examples of data which one might want organise in a grouped or hierarchical manner include: - Simulation data at multiple resolutions, - Observational data about the same system but from multiple different types of sensors, - Mixed experimental and theoretical data, - A systematic study recording the same experiment but with different parameters, - Heterogeneous data, such as demographic and metereological data, or even any combination of the above. Often datasets like this cannot easily fit into a single :py:class:`~xarray.Dataset` object, or are more usefully thought of as groups of related :py:class:`~xarray.Dataset` objects. For this purpose we provide the :py:class:`xarray.DataTree` class. This page explains in detail how to understand and use the different features of the :py:class:`~xarray.DataTree` class for your own hierarchical data needs. .. _node relationships: Node Relationships ------------------ .. _creating a family tree: Creating a Family Tree ~~~~~~~~~~~~~~~~~~~~~~ The three main ways of creating a :py:class:`~xarray.DataTree` object are described briefly in :ref:`creating a datatree`. Here we go into more detail about how to create a tree node-by-node, using a famous family tree from the Simpsons cartoon as an example. Let's start by defining nodes representing the two siblings, Bart and Lisa Simpson: .. jupyter-execute:: bart = xr.DataTree(name="Bart") lisa = xr.DataTree(name="Lisa") Each of these node objects knows their own :py:class:`~xarray.DataTree.name`, but they currently have no relationship to one another. We can connect them by creating another node representing a common parent, Homer Simpson: .. jupyter-execute:: homer = xr.DataTree(name="Homer", children={"Bart": bart, "Lisa": lisa}) Here we set the children of Homer in the node's constructor. We now have a small family tree where we can see how these individual Simpson family members are related to one another: .. jupyter-execute:: print(homer) .. note:: We use ``print()`` above to show the compact tree hierarchy. :py:class:`~xarray.DataTree` objects also have an interactive HTML representation that is enabled by default in editors such as JupyterLab and VSCode. The HTML representation is especially helpful for larger trees and exploring new datasets, as it allows you to expand and collapse nodes. If you prefer the text representations you can also set ``xr.set_options(display_style="text")``. .. Comment:: may remove note and print()s after upstream theme changes https://github.com/pydata/pydata-sphinx-theme/pull/2187 The nodes representing Bart and Lisa are now connected - we can confirm their sibling rivalry by examining the :py:class:`~xarray.DataTree.siblings` property: .. jupyter-execute:: list(homer["Bart"].siblings) But oops, we forgot Homer's third daughter, Maggie! Let's add her by updating Homer's :py:class:`~xarray.DataTree.children` property to include her: .. jupyter-execute:: maggie = xr.DataTree(name="Maggie") homer.children = {"Bart": bart, "Lisa": lisa, "Maggie": maggie} print(homer) Let's check that Maggie knows who her Dad is: .. jupyter-execute:: maggie.parent.name That's good - updating the properties of our nodes does not break the internal consistency of our tree, as changes of parentage are automatically reflected on both nodes. These children obviously have another parent, Marge Simpson, but :py:class:`~xarray.DataTree` nodes can only have a maximum of one parent. Genealogical `family trees are not even technically trees `_ in the mathematical sense - the fact that distant relatives can mate makes them directed acyclic graphs. Trees of :py:class:`~xarray.DataTree` objects cannot represent this. Homer is currently listed as having no parent (the so-called "root node" of this tree), but we can update his :py:class:`~xarray.DataTree.parent` property: .. jupyter-execute:: abe = xr.DataTree(name="Abe") abe.children = {"Homer": homer} Abe is now the "root" of this tree, which we can see by examining the :py:class:`~xarray.DataTree.root` property of any node in the tree .. jupyter-execute:: maggie.root.name We can see the whole tree by printing Abe's node or just part of the tree by printing Homer's node: .. jupyter-execute:: print(abe) .. jupyter-execute:: print(abe["Homer"]) In episode 28, Abe Simpson reveals that he had another son, Herbert "Herb" Simpson. We can add Herbert to the family tree without displacing Homer by :py:meth:`~xarray.DataTree.assign`-ing another child to Abe: .. jupyter-execute:: herbert = xr.DataTree(name="Herb") abe = abe.assign({"Herbert": herbert}) print(abe) .. jupyter-execute:: print(abe["Herbert"].name) print(herbert.name) .. note:: This example shows a subtlety - the returned tree has Homer's brother listed as ``"Herbert"``, but the original node was named "Herb". Not only are names overridden when stored as keys like this, but the new node is a copy, so that the original node that was referenced is unchanged (i.e. ``herbert.name == "Herb"`` still). In other words, nodes are copied into trees, not inserted into them. This is intentional, and mirrors the behaviour when storing named :py:class:`~xarray.DataArray` objects inside datasets. Certain manipulations of our tree are forbidden, if they would create an inconsistent result. In episode 51 of the show Futurama, Philip J. Fry travels back in time and accidentally becomes his own Grandfather. If we try similar time-travelling hijinks with Homer, we get a :py:class:`~xarray.InvalidTreeError` raised: .. jupyter-execute:: :raises: abe["Homer"].children = {"Abe": abe} .. _evolutionary tree: Ancestry in an Evolutionary Tree ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Let's use a different example of a tree to discuss more complex relationships between nodes - the phylogenetic tree, or tree of life. .. jupyter-execute:: vertebrates = xr.DataTree.from_dict( { "/Sharks": None, "/Bony Skeleton/Ray-finned Fish": None, "/Bony Skeleton/Four Limbs/Amphibians": None, "/Bony Skeleton/Four Limbs/Amniotic Egg/Hair/Primates": None, "/Bony Skeleton/Four Limbs/Amniotic Egg/Hair/Rodents & Rabbits": None, "/Bony Skeleton/Four Limbs/Amniotic Egg/Two Fenestrae/Dinosaurs": None, "/Bony Skeleton/Four Limbs/Amniotic Egg/Two Fenestrae/Birds": None, }, name="Vertebrae", ) primates = vertebrates["/Bony Skeleton/Four Limbs/Amniotic Egg/Hair/Primates"] dinosaurs = vertebrates[ "/Bony Skeleton/Four Limbs/Amniotic Egg/Two Fenestrae/Dinosaurs" ] We have used the :py:meth:`~xarray.DataTree.from_dict` constructor method as a preferred way to quickly create a whole tree, and :ref:`filesystem paths` (to be explained shortly) to select two nodes of interest. .. jupyter-execute:: print(vertebrates) This tree shows various families of species, grouped by their common features (making it technically a `"Cladogram" `_, rather than an evolutionary tree). Here both the species and the features used to group them are represented by :py:class:`~xarray.DataTree` node objects - there is no distinction in types of node. We can however get a list of only the nodes we used to represent species by using the fact that all those nodes have no children - they are "leaf nodes". We can check if a node is a leaf with :py:meth:`~xarray.DataTree.is_leaf`, and get a list of all leaves with the :py:class:`~xarray.DataTree.leaves` property: .. jupyter-execute:: print(primates.is_leaf) [node.name for node in vertebrates.leaves] Pretending that this is a true evolutionary tree for a moment, we can find the features of the evolutionary ancestors (so-called "ancestor" nodes), the distinguishing feature of the common ancestor of all vertebrate life (the root node), and even the distinguishing feature of the common ancestor of any two species (the common ancestor of two nodes): .. jupyter-execute:: print([node.name for node in reversed(primates.parents)]) print(primates.root.name) print(primates.find_common_ancestor(dinosaurs).name) We can only find a common ancestor between two nodes that lie in the same tree. If we try to find the common evolutionary ancestor between primates and an Alien species that has no relationship to Earth's evolutionary tree, an error will be raised. .. jupyter-execute:: :raises: alien = xr.DataTree(name="Xenomorph") primates.find_common_ancestor(alien) .. _navigating trees: Navigating Trees ---------------- There are various ways to access the different nodes in a tree. Properties ~~~~~~~~~~ We can navigate trees using the :py:class:`~xarray.DataTree.parent` and :py:class:`~xarray.DataTree.children` properties of each node, for example: .. jupyter-execute:: lisa.parent.children["Bart"].name but there are also more convenient ways to access nodes. Dictionary-like interface ~~~~~~~~~~~~~~~~~~~~~~~~~ Children are stored on each node as a key-value mapping from name to child node. They can be accessed and altered via the :py:class:`~xarray.DataTree.__getitem__` and :py:class:`~xarray.DataTree.__setitem__` syntax. In general :py:class:`~xarray.DataTree.DataTree` objects support almost the entire set of dict-like methods, including :py:meth:`~xarray.DataTree.keys`, :py:class:`~xarray.DataTree.values`, :py:class:`~xarray.DataTree.items`, :py:meth:`~xarray.DataTree.__delitem__` and :py:meth:`~xarray.DataTree.update`. .. jupyter-execute:: print(vertebrates["Bony Skeleton"]["Ray-finned Fish"]) Note that the dict-like interface combines access to child :py:class:`~xarray.DataTree` nodes and stored :py:class:`~xarray.DataArrays`, so if we have a node that contains both children and data, calling :py:meth:`~xarray.DataTree.keys` will list both names of child nodes and names of data variables: .. jupyter-execute:: dt = xr.DataTree( dataset=xr.Dataset({"foo": 0, "bar": 1}), children={"a": xr.DataTree(), "b": xr.DataTree()}, ) print(dt) list(dt.keys()) This also means that the names of variables and of child nodes must be different to one another. Attribute-like access ~~~~~~~~~~~~~~~~~~~~~ You can also select both variables and child nodes through dot indexing .. jupyter-execute:: print(dt.foo) print(dt.a) .. _filesystem paths: Filesystem-like Paths ~~~~~~~~~~~~~~~~~~~~~ Hierarchical trees can be thought of as analogous to file systems. Each node is like a directory, and each directory can contain both more sub-directories and data. .. note:: Future development will allow you to make the filesystem analogy concrete by using :py:func:`~xarray.DataTree.open_mfdatatree` or :py:func:`~xarray.DataTree.save_mfdatatree`. (`See related issue in GitHub `_) Datatree objects support a syntax inspired by unix-like filesystems, where the "path" to a node is specified by the keys of each intermediate node in sequence, separated by forward slashes. This is an extension of the conventional dictionary ``__getitem__`` syntax to allow navigation across multiple levels of the tree. Like with filepaths, paths within the tree can either be relative to the current node, e.g. .. jupyter-execute:: print(abe["Homer/Bart"].name) print(abe["./Homer/Bart"].name) # alternative syntax or relative to the root node. A path specified from the root (as opposed to being specified relative to an arbitrary node in the tree) is sometimes also referred to as a `"fully qualified name" `_, or as an "absolute path". The root node is referred to by ``"/"``, so the path from the root node to its grand-child would be ``"/child/grandchild"``, e.g. .. jupyter-execute:: # access lisa's sibling by a relative path. print(lisa["../Bart"]) # or from absolute path print(lisa["/Homer/Bart"]) Relative paths between nodes also support the ``"../"`` syntax to mean the parent of the current node. We can use this with ``__setitem__`` to add a missing entry to our evolutionary tree, but add it relative to a more familiar node of interest: .. jupyter-execute:: primates["../../Two Fenestrae/Crocodiles"] = xr.DataTree() print(vertebrates) Given two nodes in a tree, we can also find their relative path: .. jupyter-execute:: bart.relative_to(lisa) You can use this filepath feature to build a nested tree from a dictionary of filesystem-like paths and corresponding :py:class:`~xarray.Dataset` objects in a single step. If we have a dictionary where each key is a valid path, and each value is either valid data or ``None``, we can construct a complex tree quickly using the alternative constructor :py:meth:`~xarray.DataTree.from_dict()`: .. jupyter-execute:: d = { "/": xr.Dataset({"foo": "orange"}), "/a": xr.Dataset({"bar": 0}, coords={"y": ("y", [0, 1, 2])}), "/a/b": xr.Dataset({"zed": np.nan}), "a/c/d": None, } dt = xr.DataTree.from_dict(d) print(dt) .. note:: Notice that using the path-like syntax will also create any intermediate empty nodes necessary to reach the end of the specified path (i.e. the node labelled ``"/a/c"`` in this case.) This is to help avoid lots of redundant entries when creating deeply-nested trees using :py:meth:`xarray.DataTree.from_dict`. .. _iterating over trees: Iterating over trees ~~~~~~~~~~~~~~~~~~~~ You can iterate over every node in a tree using the subtree :py:class:`~xarray.DataTree.subtree` property. This returns an iterable of nodes, which yields them in depth-first order. .. jupyter-execute:: for node in vertebrates.subtree: print(node.path) Similarly, :py:class:`~xarray.DataTree.subtree_with_keys` returns an iterable of relative paths and corresponding nodes. A very useful pattern is to iterate over :py:class:`~xarray.DataTree.subtree_with_keys` to manipulate nodes however you wish, then rebuild a new tree using :py:meth:`xarray.DataTree.from_dict()`. For example, we could keep only the nodes containing data by looping over all nodes, checking if they contain any data using :py:class:`~xarray.DataTree.has_data`, then rebuilding a new tree using only the paths of those nodes: .. jupyter-execute:: non_empty_nodes = { path: node.dataset for path, node in dt.subtree_with_keys if node.has_data } print(xr.DataTree.from_dict(non_empty_nodes)) You can see this tree is similar to the ``dt`` object above, except that it is missing the empty nodes ``a/c`` and ``a/c/d``. (If you want to keep the name of the root node, you will need to add the ``name`` kwarg to :py:class:`~xarray.DataTree.from_dict`, i.e. ``DataTree.from_dict(non_empty_nodes, name=dt.name)``.) .. _manipulating trees: Manipulating Trees ------------------ Subsetting Tree Nodes ~~~~~~~~~~~~~~~~~~~~~ We can subset our tree to select only nodes of interest in various ways. Similarly to on a real filesystem, matching nodes by common patterns in their paths is often useful. We can use :py:meth:`xarray.DataTree.match` for this: .. jupyter-execute:: dt = xr.DataTree.from_dict( { "/a/A": None, "/a/B": None, "/b/A": None, "/b/B": None, } ) result = dt.match("*/B") print(result) We can also subset trees by the contents of the nodes. :py:meth:`xarray.DataTree.filter` retains only the nodes of a tree that meet a certain condition. For example, we could recreate the Simpson's family tree with the ages of each individual, then filter for only the adults: First let's recreate the tree but with an ``age`` data variable in every node: .. jupyter-execute:: simpsons = xr.DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Herbert": xr.Dataset({"age": 40}), "/Homer": xr.Dataset({"age": 39}), "/Homer/Bart": xr.Dataset({"age": 10}), "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Maggie": xr.Dataset({"age": 1}), }, name="Abe", ) print(simpsons) Now let's filter out the minors: .. jupyter-execute:: print(simpsons.filter(lambda node: node["age"] > 18)) The result is a new tree, containing only the nodes matching the condition. (Yes, under the hood :py:meth:`~xarray.DataTree.filter` is just syntactic sugar for the pattern we showed you in :ref:`iterating over trees` !) If you want to filter out empty nodes you can use :py:meth:`~xarray.DataTree.prune`. .. _Tree Contents: Tree Contents ------------- Hollow Trees ~~~~~~~~~~~~ A concept that can sometimes be useful is that of a "Hollow Tree", which means a tree with data stored only at the leaf nodes. This is useful because certain useful tree manipulation operations only make sense for hollow trees. You can check if a tree is a hollow tree by using the :py:class:`~xarray.DataTree.is_hollow` property. We can see that the Simpson's family is not hollow because the data variable ``"age"`` is present at some nodes which have children (i.e. Abe and Homer). .. jupyter-execute:: simpsons.is_hollow .. _tree computation: Computation ----------- :py:class:`~xarray.DataTree` objects are also useful for performing computations, not just for organizing data. Operations and Methods on Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To show how applying operations across a whole tree at once can be useful, let's first create an example scientific dataset. .. jupyter-execute:: def time_stamps(n_samples, T): """Create an array of evenly-spaced time stamps""" return xr.DataArray( data=np.linspace(0, 2 * np.pi * T, n_samples), dims=["time"] ) def signal_generator(t, f, A, phase): """Generate an example electrical-like waveform""" return A * np.sin(f * t.data + phase) time_stamps1 = time_stamps(n_samples=15, T=1.5) time_stamps2 = time_stamps(n_samples=10, T=1.0) voltages = xr.DataTree.from_dict( { "/oscilloscope1": xr.Dataset( { "potential": ( "time", signal_generator(time_stamps1, f=2, A=1.2, phase=0.5), ), "current": ( "time", signal_generator(time_stamps1, f=2, A=1.2, phase=1), ), }, coords={"time": time_stamps1}, ), "/oscilloscope2": xr.Dataset( { "potential": ( "time", signal_generator(time_stamps2, f=1.6, A=1.6, phase=0.2), ), "current": ( "time", signal_generator(time_stamps2, f=1.6, A=1.6, phase=0.7), ), }, coords={"time": time_stamps2}, ), } ) print(voltages) Most xarray computation methods also exist as methods on datatree objects, so you can for example take the mean value of these two timeseries at once: .. jupyter-execute:: print(voltages.mean(dim="time")) This works by mapping the standard :py:meth:`xarray.Dataset.mean()` method over the dataset stored in each node of the tree one-by-one. The arguments passed to the method are used for every node, so the values of the arguments you pass might be valid for one node and invalid for another .. jupyter-execute:: :raises: voltages.isel(time=12) Notice that the error raised helpfully indicates which node of the tree the operation failed on. Arithmetic Methods on Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Arithmetic methods are also implemented, so you can e.g. add a scalar to every dataset in the tree at once. For example, we can advance the timeline of the Simpsons by a decade just by .. jupyter-execute:: print(simpsons + 10) See that the same change (fast-forwarding by adding 10 years to the age of each character) has been applied to every node. Mapping Custom Functions Over Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can map custom computation over each node in a tree using :py:meth:`xarray.DataTree.map_over_datasets`. You can map any function, so long as it takes :py:class:`xarray.Dataset` objects as one (or more) of the input arguments, and returns one (or more) xarray datasets. .. note:: Functions passed to :py:func:`~xarray.DataTree.map_over_datasets` cannot alter nodes in-place. Instead they must return new :py:class:`xarray.Dataset` objects. For example, we can define a function to calculate the Root Mean Square of a timeseries .. jupyter-execute:: def rms(signal): return np.sqrt(np.mean(signal**2)) Then calculate the RMS value of these signals: .. jupyter-execute:: print(voltages.map_over_datasets(rms)) .. _multiple trees: We can also use :py:func:`~xarray.map_over_datasets` to apply a function over the data in multiple trees, by passing the trees as positional arguments. Operating on Multiple Trees --------------------------- The examples so far have involved mapping functions or methods over the nodes of a single tree, but we can generalize this to mapping functions over multiple trees at once. Iterating Over Multiple Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To iterate over the corresponding nodes in multiple trees, use :py:func:`~xarray.group_subtrees` instead of :py:class:`~xarray.DataTree.subtree_with_keys`. This combines well with :py:meth:`xarray.DataTree.from_dict()` to build a new tree: .. jupyter-execute:: dt1 = xr.DataTree.from_dict({"a": xr.Dataset({"x": 1}), "b": xr.Dataset({"x": 2})}) dt2 = xr.DataTree.from_dict( {"a": xr.Dataset({"x": 10}), "b": xr.Dataset({"x": 20})} ) result = {} for path, (node1, node2) in xr.group_subtrees(dt1, dt2): result[path] = node1.dataset + node2.dataset dt3 = xr.DataTree.from_dict(result) print(dt3) Alternatively, you apply a function directly to paired datasets at every node using :py:func:`xarray.map_over_datasets`: .. jupyter-execute:: dt3 = xr.map_over_datasets(lambda x, y: x + y, dt1, dt2) print(dt3) Comparing Trees for Isomorphism ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For it to make sense to map a single non-unary function over the nodes of multiple trees at once, each tree needs to have the same structure. Specifically two trees can only be considered similar, or "isomorphic", if the full paths to all of their descendent nodes are the same. Applying :py:func:`~xarray.group_subtrees` to trees with different structures raises :py:class:`~xarray.TreeIsomorphismError`: .. jupyter-execute:: :raises: tree = xr.DataTree.from_dict({"a": None, "a/b": None, "a/c": None}) simple_tree = xr.DataTree.from_dict({"a": None}) for _ in xr.group_subtrees(tree, simple_tree): ... We can explicitly also check if any two trees are isomorphic using the :py:meth:`~xarray.DataTree.isomorphic` method: .. jupyter-execute:: tree.isomorphic(simple_tree) Corresponding tree nodes do not need to have the same data in order to be considered isomorphic: .. jupyter-execute:: tree_with_data = xr.DataTree.from_dict({"a": xr.Dataset({"foo": 1})}) simple_tree.isomorphic(tree_with_data) They also do not need to define child nodes in the same order: .. jupyter-execute:: reordered_tree = xr.DataTree.from_dict({"a": None, "a/c": None, "a/b": None}) tree.isomorphic(reordered_tree) Arithmetic Between Multiple Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Arithmetic operations like multiplication are binary operations, so as long as we have two isomorphic trees, we can do arithmetic between them. .. jupyter-execute:: currents = xr.DataTree.from_dict( { "/oscilloscope1": xr.Dataset( { "current": ( "time", signal_generator(time_stamps1, f=2, A=1.2, phase=1), ), }, coords={"time": time_stamps1}, ), "/oscilloscope2": xr.Dataset( { "current": ( "time", signal_generator(time_stamps2, f=1.6, A=1.6, phase=0.7), ), }, coords={"time": time_stamps2}, ), } ) print(currents) .. jupyter-execute:: currents.isomorphic(voltages) We could use this feature to quickly calculate the electrical power in our signal, P=IV. .. jupyter-execute:: power = currents * voltages print(power) .. _hierarchical-data.alignment-and-coordinate-inheritance: Alignment and Coordinate Inheritance ------------------------------------ .. _data-alignment: Data Alignment ~~~~~~~~~~~~~~ The data in different datatree nodes are not totally independent. In particular dimensions (and indexes) in child nodes must be exactly aligned with those in their parent nodes. Exact alignment means that shared dimensions must be the same length, and indexes along those dimensions must be equal. .. note:: If you were a previous user of the prototype `xarray-contrib/datatree `_ package, this is different from what you're used to! In that package the data model was that the data stored in each node actually was completely unrelated. The data model is now slightly stricter. This allows us to provide features like :ref:`coordinate-inheritance`. To demonstrate, let's first generate some example datasets which are not aligned with one another: .. jupyter-execute:: # (drop the attributes just to make the printed representation shorter) ds = xr.tutorial.open_dataset("air_temperature").drop_attrs() ds_daily = ds.resample(time="D").mean("time") ds_weekly = ds.resample(time="W").mean("time") ds_monthly = ds.resample(time="ME").mean("time") These datasets have different lengths along the ``time`` dimension, and are therefore not aligned along that dimension. .. jupyter-execute:: print(ds_daily.sizes) print(ds_weekly.sizes) print(ds_monthly.sizes) We cannot store these non-alignable variables on a single :py:class:`~xarray.Dataset` object, because they do not exactly align: .. jupyter-execute:: :raises: xr.align(ds_daily, ds_weekly, ds_monthly, join="exact") But we :ref:`previously said ` that multi-resolution data is a good use case for :py:class:`~xarray.DataTree`, so surely we should be able to store these in a single :py:class:`~xarray.DataTree`? If we first try to create a :py:class:`~xarray.DataTree` with these different-length time dimensions present in both parents and children, we will still get an alignment error: .. jupyter-execute:: :raises: xr.DataTree.from_dict({"daily": ds_daily, "daily/weekly": ds_weekly}) This is because DataTree checks that data in child nodes align exactly with their parents. .. note:: This requirement of aligned dimensions is similar to netCDF's concept of `inherited dimensions `_, as in netCDF-4 files dimensions are `visible to all child groups `_. This alignment check is performed up through the tree, all the way to the root, and so is therefore equivalent to requiring that this :py:func:`~xarray.align` command succeeds: .. code:: python xr.align(child.dataset, *(parent.dataset for parent in child.parents), join="exact") To represent our unalignable data in a single :py:class:`~xarray.DataTree`, we must instead place all variables which are a function of these different-length dimensions into nodes that are not direct descendents of one another, e.g. organize them as siblings. .. jupyter-execute:: dt = xr.DataTree.from_dict( {"daily": ds_daily, "weekly": ds_weekly, "monthly": ds_monthly} ) print(dt) Now we have a valid :py:class:`~xarray.DataTree` structure which contains all the data at each different time frequency, stored in a separate group. This is a useful way to organise our data because we can still operate on all the groups at once. For example we can extract all three timeseries at a specific lat-lon location: .. jupyter-execute:: dt_sel = dt.sel(lat=75, lon=300) print(dt_sel) or compute the standard deviation of each timeseries to find out how it varies with sampling frequency: .. jupyter-execute:: dt_std = dt.std(dim="time") print(dt_std) .. _coordinate-inheritance: Coordinate Inheritance ~~~~~~~~~~~~~~~~~~~~~~ Notice that in the trees we constructed above there is some redundancy - the ``lat`` and ``lon`` variables appear in each sibling group, but are identical across the groups. .. jupyter-execute:: dt We can use "Coordinate Inheritance" to define them only once in a parent group and remove this redundancy, whilst still being able to access those coordinate variables from the child groups. .. note:: This is also a new feature relative to the prototype `xarray-contrib/datatree `_ package. Let's instead place only the time-dependent variables in the child groups, and put the non-time-dependent ``lat`` and ``lon`` variables in the parent (root) group: .. jupyter-execute:: dt = xr.DataTree.from_dict( { "/": ds.drop_dims("time"), "daily": ds_daily.drop_vars(["lat", "lon"]), "weekly": ds_weekly.drop_vars(["lat", "lon"]), "monthly": ds_monthly.drop_vars(["lat", "lon"]), } ) dt This is preferred to the previous representation because it now makes it clear that all of these datasets share common spatial grid coordinates. Defining the common coordinates just once also ensures that the spatial coordinates for each group cannot become out of sync with one another during operations. We can still access the coordinates defined in the parent groups from any of the child groups as if they were actually present on the child groups: .. jupyter-execute:: dt.daily.coords .. jupyter-execute:: dt["daily/lat"] As we can still access them, we say that the ``lat`` and ``lon`` coordinates in the child groups have been "inherited" from their common parent group. If we print just one of the child nodes, it will still display inherited coordinates, but explicitly mark them as such: .. jupyter-execute:: dt["/daily"] This helps to differentiate which variables are defined on the datatree node that you are currently looking at, and which were defined somewhere above it. We can also still perform all the same operations on the whole tree: .. jupyter-execute:: dt.sel(lat=[75], lon=[300]) .. jupyter-execute:: dt.std(dim="time") xarray-2025.12.0/doc/user-guide/index.rst000066400000000000000000000015441511464676000200260ustar00rootroot00000000000000########### User Guide ########### In this user guide, you will find detailed descriptions and examples that describe many common tasks that you can accomplish with Xarray. .. toctree:: :maxdepth: 2 :caption: Data model terminology data-structures hierarchical-data dask .. toctree:: :maxdepth: 2 :caption: Core operations indexing combining reshaping computation groupby interpolation .. toctree:: :maxdepth: 2 :caption: I/O io complex-numbers .. toctree:: :maxdepth: 2 :caption: Visualization plotting .. toctree:: :maxdepth: 2 :caption: Interoperability pandas duckarrays ecosystem .. toctree:: :maxdepth: 2 :caption: Domain-specific workflows time-series weather-climate .. toctree:: :maxdepth: 2 :caption: Options and Testing options testing xarray-2025.12.0/doc/user-guide/indexing.rst000066400000000000000000000670171511464676000205330ustar00rootroot00000000000000.. _indexing: Indexing and selecting data =========================== .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) %xmode minimal Xarray offers extremely flexible indexing routines that combine the best features of NumPy and pandas for data selection. The most basic way to access elements of a :py:class:`~xarray.DataArray` object is to use Python's ``[]`` syntax, such as ``array[i, j]``, where ``i`` and ``j`` are both integers. As xarray objects can store coordinates corresponding to each dimension of an array, label-based indexing similar to ``pandas.DataFrame.loc`` is also possible. In label-based indexing, the element position ``i`` is automatically looked-up from the coordinate values. Dimensions of xarray objects have names, so you can also lookup the dimensions by name, instead of remembering their positional order. Quick overview -------------- In total, xarray supports four different kinds of indexing, as described below and summarized in this table: .. |br| raw:: html
+------------------+--------------+---------------------------------+--------------------------------+ | Dimension lookup | Index lookup | ``DataArray`` syntax | ``Dataset`` syntax | +==================+==============+=================================+================================+ | Positional | By integer | ``da[:, 0]`` | *not available* | +------------------+--------------+---------------------------------+--------------------------------+ | Positional | By label | ``da.loc[:, 'IA']`` | *not available* | +------------------+--------------+---------------------------------+--------------------------------+ | By name | By integer | ``da.isel(space=0)`` or |br| | ``ds.isel(space=0)`` or |br| | | | | ``da[dict(space=0)]`` | ``ds[dict(space=0)]`` | +------------------+--------------+---------------------------------+--------------------------------+ | By name | By label | ``da.sel(space='IA')`` or |br| | ``ds.sel(space='IA')`` or |br| | | | | ``da.loc[dict(space='IA')]`` | ``ds.loc[dict(space='IA')]`` | +------------------+--------------+---------------------------------+--------------------------------+ More advanced indexing is also possible for all the methods by supplying :py:class:`~xarray.DataArray` objects as indexer. See :ref:`vectorized_indexing` for the details. Positional indexing ------------------- Indexing a :py:class:`~xarray.DataArray` directly works (mostly) just like it does for numpy arrays, except that the returned object is always another DataArray: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 3), [ ("time", pd.date_range("2000-01-01", periods=4)), ("space", ["IA", "IL", "IN"]), ], ) da[:2] .. jupyter-execute:: da[0, 0] .. jupyter-execute:: da[:, [2, 1]] Attributes are persisted in all indexing operations. .. warning:: Positional indexing deviates from the NumPy when indexing with multiple arrays like ``da[[0, 1], [0, 1]]``, as described in :ref:`vectorized_indexing`. Xarray also supports label-based indexing, just like pandas. Because we use a :py:class:`pandas.Index` under the hood, label based indexing is very fast. To do label based indexing, use the :py:attr:`~xarray.DataArray.loc` attribute: .. jupyter-execute:: da.loc["2000-01-01":"2000-01-02", "IA"] In this example, the selected is a subpart of the array in the range '2000-01-01':'2000-01-02' along the first coordinate ``time`` and with 'IA' value from the second coordinate ``space``. You can perform any of the `label indexing operations supported by pandas`__, including indexing with individual, slices and lists/arrays of labels, as well as indexing with boolean arrays. Like pandas, label based indexing in xarray is *inclusive* of both the start and stop bounds. __ https://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-label Setting values with label based indexing is also supported: .. jupyter-execute:: da.loc["2000-01-01", ["IL", "IN"]] = -10 da Indexing with dimension names ----------------------------- With the dimension names, we do not have to rely on dimension order and can use them explicitly to slice data. There are two ways to do this: 1. Use the :py:meth:`~xarray.DataArray.sel` and :py:meth:`~xarray.DataArray.isel` convenience methods: .. jupyter-execute:: # index by integer array indices da.isel(space=0, time=slice(None, 2)) .. jupyter-execute:: # index by dimension coordinate labels da.sel(time=slice("2000-01-01", "2000-01-02")) 2. Use a dictionary as the argument for array positional or label based array indexing: .. jupyter-execute:: # index by integer array indices da[dict(space=0, time=slice(None, 2))] .. jupyter-execute:: # index by dimension coordinate labels da.loc[dict(time=slice("2000-01-01", "2000-01-02"))] The arguments to these methods can be any objects that could index the array along the dimension given by the keyword, e.g., labels for an individual value, :py:class:`Python slice` objects or 1-dimensional arrays. .. note:: We would love to be able to do indexing with labeled dimension names inside brackets, but unfortunately, `Python does not yet support indexing with keyword arguments`__ like ``da[space=0]`` __ https://legacy.python.org/dev/peps/pep-0472/ .. _nearest neighbor lookups: Nearest neighbor lookups ------------------------ The label based selection methods :py:meth:`~xarray.Dataset.sel`, :py:meth:`~xarray.Dataset.reindex` and :py:meth:`~xarray.Dataset.reindex_like` all support ``method`` and ``tolerance`` keyword argument. The method parameter allows for enabling nearest neighbor (inexact) lookups by use of the methods ``'pad'``, ``'backfill'`` or ``'nearest'``: .. jupyter-execute:: da = xr.DataArray([1, 2, 3], [("x", [0, 1, 2])]) da.sel(x=[1.1, 1.9], method="nearest") .. jupyter-execute:: da.sel(x=0.1, method="backfill") .. jupyter-execute:: da.reindex(x=[0.5, 1, 1.5, 2, 2.5], method="pad") Tolerance limits the maximum distance for valid matches with an inexact lookup: .. jupyter-execute:: da.reindex(x=[1.1, 1.5], method="nearest", tolerance=0.2) The method parameter is not yet supported if any of the arguments to ``.sel()`` is a ``slice`` object: .. jupyter-execute:: :raises: da.sel(x=slice(1, 3), method="nearest") However, you don't need to use ``method`` to do inexact slicing. Slicing already returns all values inside the range (inclusive), as long as the index labels are monotonic increasing: .. jupyter-execute:: da.sel(x=slice(0.9, 3.1)) Indexing axes with monotonic decreasing labels also works, as long as the ``slice`` or ``.loc`` arguments are also decreasing: .. jupyter-execute:: reversed_da = da[::-1] reversed_da.loc[3.1:0.9] .. note:: If you want to interpolate along coordinates rather than looking up the nearest neighbors, use :py:meth:`~xarray.Dataset.interp` and :py:meth:`~xarray.Dataset.interp_like`. See :ref:`interpolation ` for the details. Dataset indexing ---------------- We can also use these methods to index all variables in a dataset simultaneously, returning a new dataset: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 3), [ ("time", pd.date_range("2000-01-01", periods=4)), ("space", ["IA", "IL", "IN"]), ], ) ds = da.to_dataset(name="foo") ds.isel(space=[0], time=[0]) .. jupyter-execute:: ds.sel(time="2000-01-01") Positional indexing on a dataset is not supported because the ordering of dimensions in a dataset is somewhat ambiguous (it can vary between different arrays). However, you can do normal indexing with dimension names: .. jupyter-execute:: ds[dict(space=[0], time=[0])] .. jupyter-execute:: ds.loc[dict(time="2000-01-01")] Dropping labels and dimensions ------------------------------ The :py:meth:`~xarray.Dataset.drop_sel` method returns a new object with the listed index labels along a dimension dropped: .. jupyter-execute:: ds.drop_sel(space=["IN", "IL"]) ``drop_sel`` is both a ``Dataset`` and ``DataArray`` method. Use :py:meth:`~xarray.Dataset.drop_dims` to drop a full dimension from a Dataset. Any variables with these dimensions are also dropped: .. jupyter-execute:: ds.drop_dims("time") .. _masking with where: Masking with ``where`` ---------------------- Indexing methods on xarray objects generally return a subset of the original data. However, it is sometimes useful to select an object with the same shape as the original data, but with some elements masked. To do this type of selection in xarray, use :py:meth:`~xarray.DataArray.where`: .. jupyter-execute:: da = xr.DataArray(np.arange(16).reshape(4, 4), dims=["x", "y"]) da.where(da.x + da.y < 4) This is particularly useful for ragged indexing of multi-dimensional data, e.g., to apply a 2D mask to an image. Note that ``where`` follows all the usual xarray broadcasting and alignment rules for binary operations (e.g., ``+``) between the object being indexed and the condition, as described in :ref:`compute`: .. jupyter-execute:: da.where(da.y < 2) By default ``where`` maintains the original size of the data. For cases where the selected data size is much smaller than the original data, use of the option ``drop=True`` clips coordinate elements that are fully masked: .. jupyter-execute:: da.where(da.y < 2, drop=True) .. _selecting values with isin: Selecting values with ``isin`` ------------------------------ To check whether elements of an xarray object contain a single object, you can compare with the equality operator ``==`` (e.g., ``arr == 3``). To check multiple values, use :py:meth:`~xarray.DataArray.isin`: .. jupyter-execute:: da = xr.DataArray([1, 2, 3, 4, 5], dims=["x"]) da.isin([2, 4]) :py:meth:`~xarray.DataArray.isin` works particularly well with :py:meth:`~xarray.DataArray.where` to support indexing by arrays that are not already labels of an array: .. jupyter-execute:: lookup = xr.DataArray([-1, -2, -3, -4, -5], dims=["x"]) da.where(lookup.isin([-2, -4]), drop=True) However, some caution is in order: when done repeatedly, this type of indexing is significantly slower than using :py:meth:`~xarray.DataArray.sel`. .. _vectorized_indexing: Vectorized Indexing ------------------- Like numpy and pandas, xarray supports indexing many array elements at once in a vectorized manner. If you only provide integers, slices, or unlabeled arrays (array without dimension names, such as ``np.ndarray``, ``list``, but not :py:meth:`~xarray.DataArray` or :py:meth:`~xarray.Variable`) indexing can be understood as orthogonally. Each indexer component selects independently along the corresponding dimension, similar to how vector indexing works in Fortran or MATLAB, or after using the :py:func:`numpy.ix_` helper: .. jupyter-execute:: da = xr.DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]}, ) da .. jupyter-execute:: da[[0, 2, 2], [1, 3]] For more flexibility, you can supply :py:meth:`~xarray.DataArray` objects as indexers. Dimensions on resultant arrays are given by the ordered union of the indexers' dimensions: .. jupyter-execute:: ind_x = xr.DataArray([0, 1], dims=["x"]) ind_y = xr.DataArray([0, 1], dims=["y"]) da[ind_x, ind_y] # orthogonal indexing Slices or sequences/arrays without named-dimensions are treated as if they have the same dimension which is indexed along: .. jupyter-execute:: # Because [0, 1] is used to index along dimension 'x', # it is assumed to have dimension 'x' da[[0, 1], ind_x] Furthermore, you can use multi-dimensional :py:meth:`~xarray.DataArray` as indexers, where the resultant array dimension is also determined by indexers' dimension: .. jupyter-execute:: ind = xr.DataArray([[0, 1], [0, 1]], dims=["a", "b"]) da[ind] Similar to how `NumPy's advanced indexing`_ works, vectorized indexing for xarray is based on our :ref:`broadcasting rules `. See :ref:`indexing.rules` for the complete specification. .. _NumPy's advanced indexing: https://numpy.org/doc/stable/user/basics.indexing.html#advanced-indexing Vectorized indexing also works with ``isel``, ``loc``, and ``sel``: .. jupyter-execute:: ind = xr.DataArray([[0, 1], [0, 1]], dims=["a", "b"]) da.isel(y=ind) # same as da[:, ind] .. jupyter-execute:: ind = xr.DataArray([["a", "b"], ["b", "a"]], dims=["a", "b"]) da.loc[:, ind] # same as da.sel(y=ind) These methods may also be applied to ``Dataset`` objects .. jupyter-execute:: ds = da.to_dataset(name="bar") ds.isel(x=xr.DataArray([0, 1, 2], dims=["points"])) Vectorized indexing may be used to extract information from the nearest grid cells of interest, for example, the nearest climate model grid cells to a collection specified weather station latitudes and longitudes. To trigger vectorized indexing behavior you will need to provide the selection dimensions with a new shared output dimension name. In the example below, the selections of the closest latitude and longitude are renamed to an output dimension named "points": .. jupyter-execute:: ds = xr.tutorial.open_dataset("air_temperature") # Define target latitude and longitude (where weather stations might be) target_lon = xr.DataArray([200, 201, 202, 205], dims="points") target_lat = xr.DataArray([31, 41, 42, 42], dims="points") # Retrieve data at the grid cells nearest to the target latitudes and longitudes da = ds["air"].sel(lon=target_lon, lat=target_lat, method="nearest") da .. tip:: If you are lazily loading your data from disk, not every form of vectorized indexing is supported (or if supported, may not be supported efficiently). You may find increased performance by loading your data into memory first, e.g., with :py:meth:`~xarray.Dataset.load`. .. note:: If an indexer is a :py:meth:`~xarray.DataArray`, its coordinates should not conflict with the selected subpart of the target array (except for the explicitly indexed dimensions with ``.loc``/``.sel``). Otherwise, ``IndexError`` will be raised. .. _assigning_values: Assigning values with indexing ------------------------------ To select and assign values to a portion of a :py:meth:`~xarray.DataArray` you can use indexing with ``.loc`` : .. jupyter-execute:: ds = xr.tutorial.open_dataset("air_temperature") # add an empty 2D dataarray ds["empty"] = xr.full_like(ds.air.mean("time"), fill_value=0) # modify one grid point using loc() ds["empty"].loc[dict(lon=260, lat=30)] = 100 # modify a 2D region using loc() lc = ds.coords["lon"] la = ds.coords["lat"] ds["empty"].loc[ dict(lon=lc[(lc > 220) & (lc < 260)], lat=la[(la > 20) & (la < 60)]) ] = 100 or :py:meth:`~xarray.where`: .. jupyter-execute:: # modify one grid point using xr.where() ds["empty"] = xr.where( (ds.coords["lat"] == 20) & (ds.coords["lon"] == 260), 100, ds["empty"] ) # or modify a 2D region using xr.where() mask = ( (ds.coords["lat"] > 20) & (ds.coords["lat"] < 60) & (ds.coords["lon"] > 220) & (ds.coords["lon"] < 260) ) ds["empty"] = xr.where(mask, 100, ds["empty"]) Vectorized indexing can also be used to assign values to xarray object. .. jupyter-execute:: da = xr.DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]}, ) da .. jupyter-execute:: da[0] = -1 # assignment with broadcasting da .. jupyter-execute:: ind_x = xr.DataArray([0, 1], dims=["x"]) ind_y = xr.DataArray([0, 1], dims=["y"]) da[ind_x, ind_y] = -2 # assign -2 to (ix, iy) = (0, 0) and (1, 1) da .. jupyter-execute:: da[ind_x, ind_y] += 100 # increment is also possible da Like ``numpy.ndarray``, value assignment sometimes works differently from what one may expect. .. jupyter-execute:: da = xr.DataArray([0, 1, 2, 3], dims=["x"]) ind = xr.DataArray([0, 0, 0], dims=["x"]) da[ind] -= 1 da Where the 0th element will be subtracted 1 only once. This is because ``v[0] = v[0] - 1`` is called three times, rather than ``v[0] = v[0] - 1 - 1 - 1``. See `Assigning values to indexed arrays`__ for the details. __ https://numpy.org/doc/stable/user/basics.indexing.html#assigning-values-to-indexed-arrays .. note:: Dask array does not support value assignment (see :ref:`dask` for the details). .. note:: Coordinates in both the left- and right-hand-side arrays should not conflict with each other. Otherwise, ``IndexError`` will be raised. .. warning:: Do not try to assign values when using any of the indexing methods ``isel`` or ``sel``:: # DO NOT do this da.isel(space=0) = 0 Instead, values can be assigned using dictionary-based indexing:: da[dict(space=0)] = 0 Assigning values with the chained indexing using ``.sel`` or ``.isel`` fails silently. .. jupyter-execute:: da = xr.DataArray([0, 1, 2, 3], dims=["x"]) # DO NOT do this da.isel(x=[0, 1, 2])[1] = -1 da You can also assign values to all variables of a :py:class:`Dataset` at once: .. jupyter-execute:: :stderr: ds_org = xr.tutorial.open_dataset("eraint_uvz").isel( latitude=slice(56, 59), longitude=slice(255, 258), level=0 ) # set all values to 0 ds = xr.zeros_like(ds_org) ds .. jupyter-execute:: # by integer ds[dict(latitude=2, longitude=2)] = 1 ds["u"] .. jupyter-execute:: ds["v"] .. jupyter-execute:: # by label ds.loc[dict(latitude=47.25, longitude=[11.25, 12])] = 100 ds["u"] .. jupyter-execute:: # dataset as new values new_dat = ds_org.loc[dict(latitude=48, longitude=[11.25, 12])] new_dat .. jupyter-execute:: ds.loc[dict(latitude=47.25, longitude=[11.25, 12])] = new_dat ds["u"] The dimensions can differ between the variables in the dataset, but all variables need to have at least the dimensions specified in the indexer dictionary. The new values must be either a scalar, a :py:class:`DataArray` or a :py:class:`Dataset` itself that contains all variables that also appear in the dataset to be modified. .. _more_advanced_indexing: More advanced indexing ----------------------- The use of :py:meth:`~xarray.DataArray` objects as indexers enables very flexible indexing. The following is an example of the pointwise indexing: .. jupyter-execute:: da = xr.DataArray(np.arange(56).reshape((7, 8)), dims=["x", "y"]) da .. jupyter-execute:: da.isel(x=xr.DataArray([0, 1, 6], dims="z"), y=xr.DataArray([0, 1, 0], dims="z")) where three elements at ``(ix, iy) = ((0, 0), (1, 1), (6, 0))`` are selected and mapped along a new dimension ``z``. If you want to add a coordinate to the new dimension ``z``, you can supply a :py:class:`~xarray.DataArray` with a coordinate, .. jupyter-execute:: da.isel( x=xr.DataArray([0, 1, 6], dims="z", coords={"z": ["a", "b", "c"]}), y=xr.DataArray([0, 1, 0], dims="z"), ) Analogously, label-based pointwise-indexing is also possible by the ``.sel`` method: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 3), [ ("time", pd.date_range("2000-01-01", periods=4)), ("space", ["IA", "IL", "IN"]), ], ) times = xr.DataArray( pd.to_datetime(["2000-01-03", "2000-01-02", "2000-01-01"]), dims="new_time" ) da.sel(space=xr.DataArray(["IA", "IL", "IN"], dims=["new_time"]), time=times) .. _align and reindex: Align and reindex ----------------- Xarray's ``reindex``, ``reindex_like`` and ``align`` impose a ``DataArray`` or ``Dataset`` onto a new set of coordinates corresponding to dimensions. The original values are subset to the index labels still found in the new labels, and values corresponding to new labels not found in the original object are in-filled with ``NaN``. Xarray operations that combine multiple objects generally automatically align their arguments to share the same indexes. However, manual alignment can be useful for greater control and for increased performance. To reindex a particular dimension, use :py:meth:`~xarray.DataArray.reindex`: .. jupyter-execute:: da.reindex(space=["IA", "CA"]) The :py:meth:`~xarray.DataArray.reindex_like` method is a useful shortcut. To demonstrate, we will make a subset DataArray with new values: .. jupyter-execute:: foo = da.rename("foo") baz = (10 * da[:2, :2]).rename("baz") baz Reindexing ``foo`` with ``baz`` selects out the first two values along each dimension: .. jupyter-execute:: foo.reindex_like(baz) The opposite operation asks us to reindex to a larger shape, so we fill in the missing values with ``NaN``: .. jupyter-execute:: baz.reindex_like(foo) The :py:func:`~xarray.align` function lets us perform more flexible database-like ``'inner'``, ``'outer'``, ``'left'`` and ``'right'`` joins: .. jupyter-execute:: xr.align(foo, baz, join="inner") .. jupyter-execute:: xr.align(foo, baz, join="outer") Both ``reindex_like`` and ``align`` work interchangeably between :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects, and with any number of matching dimension names: .. jupyter-execute:: ds .. jupyter-execute:: ds.reindex_like(baz) .. jupyter-execute:: other = xr.DataArray(["a", "b", "c"], dims="other") # this is a no-op, because there are no shared dimension names ds.reindex_like(other) .. _indexing.missing_coordinates: Missing coordinate labels ------------------------- Coordinate labels for each dimension are optional (as of xarray v0.9). Label based indexing with ``.sel`` and ``.loc`` uses standard positional, integer-based indexing as a fallback for dimensions without a coordinate label: .. jupyter-execute:: da = xr.DataArray([1, 2, 3], dims="x") da.sel(x=[0, -1]) Alignment between xarray objects where one or both do not have coordinate labels succeeds only if all dimensions of the same name have the same length. Otherwise, it raises an informative error: .. jupyter-execute:: :raises: xr.align(da, da[:2]) Underlying Indexes ------------------ Xarray uses the :py:class:`pandas.Index` internally to perform indexing operations. If you need to access the underlying indexes, they are available through the :py:attr:`~xarray.DataArray.indexes` attribute. .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 3), [ ("time", pd.date_range("2000-01-01", periods=4)), ("space", ["IA", "IL", "IN"]), ], ) da .. jupyter-execute:: da.indexes .. jupyter-execute:: da.indexes["time"] Use :py:meth:`~xarray.DataArray.get_index` to get an index for a dimension, falling back to a default :py:class:`pandas.RangeIndex` if it has no coordinate labels: .. jupyter-execute:: da = xr.DataArray([1, 2, 3], dims="x") da .. jupyter-execute:: da.get_index("x") .. _copies_vs_views: Copies vs. Views ---------------- Whether array indexing returns a view or a copy of the underlying data depends on the nature of the labels. For positional (integer) indexing, xarray follows the same `rules`_ as NumPy: * Positional indexing with only integers and slices returns a view. * Positional indexing with arrays or lists returns a copy. The rules for label based indexing are more complex: * Label-based indexing with only slices returns a view. * Label-based indexing with arrays returns a copy. * Label-based indexing with scalars returns a view or a copy, depending upon if the corresponding positional indexer can be represented as an integer or a slice object. The exact rules are determined by pandas. Whether data is a copy or a view is more predictable in xarray than in pandas, so unlike pandas, xarray does not produce `SettingWithCopy warnings`_. However, you should still avoid assignment with chained indexing. Note that other operations (such as :py:meth:`~xarray.DataArray.values`) may also return views rather than copies. .. _SettingWithCopy warnings: https://pandas.pydata.org/pandas-docs/stable/indexing.html#returning-a-view-versus-a-copy .. _rules: https://numpy.org/doc/stable/user/basics.copies.html .. _multi-level indexing: Multi-level indexing -------------------- Just like pandas, advanced indexing on multi-level indexes is possible with ``loc`` and ``sel``. You can slice a multi-index by providing multiple indexers, i.e., a tuple of slices, labels, list of labels, or any selector allowed by pandas: .. jupyter-execute:: midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two")) mda = xr.DataArray(np.random.rand(6, 3), [("x", midx), ("y", range(3))]) mda .. jupyter-execute:: mda.sel(x=(list("ab"), [0])) You can also select multiple elements by providing a list of labels or tuples or a slice of tuples: .. jupyter-execute:: mda.sel(x=[("a", 0), ("b", 1)]) Additionally, xarray supports dictionaries: .. jupyter-execute:: mda.sel(x={"one": "a", "two": 0}) For convenience, ``sel`` also accepts multi-index levels directly as keyword arguments: .. jupyter-execute:: mda.sel(one="a", two=0) Note that using ``sel`` it is not possible to mix a dimension indexer with level indexers for that dimension (e.g., ``mda.sel(x={'one': 'a'}, two=0)`` will raise a ``ValueError``). Like pandas, xarray handles partial selection on multi-index (level drop). As shown below, it also renames the dimension / coordinate when the multi-index is reduced to a single index. .. jupyter-execute:: mda.loc[{"one": "a"}, ...] Unlike pandas, xarray does not guess whether you provide index levels or dimensions when using ``loc`` in some ambiguous cases. For example, for ``mda.loc[{'one': 'a', 'two': 0}]`` and ``mda.loc['a', 0]`` xarray always interprets ('one', 'two') and ('a', 0) as the names and labels of the 1st and 2nd dimension, respectively. You must specify all dimensions or use the ellipsis in the ``loc`` specifier, e.g. in the example above, ``mda.loc[{'one': 'a', 'two': 0}, :]`` or ``mda.loc[('a', 0), ...]``. .. _indexing.rules: Indexing rules -------------- Here we describe the full rules xarray uses for vectorized indexing. Note that this is for the purposes of explanation: for the sake of efficiency and to support various backends, the actual implementation is different. 0. (Only for label based indexing.) Look up positional indexes along each dimension from the corresponding :py:class:`pandas.Index`. 1. A full slice object ``:`` is inserted for each dimension without an indexer. 2. ``slice`` objects are converted into arrays, given by ``np.arange(*slice.indices(...))``. 3. Assume dimension names for array indexers without dimensions, such as ``np.ndarray`` and ``list``, from the dimensions to be indexed along. For example, ``v.isel(x=[0, 1])`` is understood as ``v.isel(x=xr.DataArray([0, 1], dims=['x']))``. 4. For each variable in a ``Dataset`` or ``DataArray`` (the array and its coordinates): a. Broadcast all relevant indexers based on their dimension names (see :ref:`compute.broadcasting` for full details). b. Index the underling array by the broadcast indexers, using NumPy's advanced indexing rules. 5. If any indexer DataArray has coordinates and no coordinate with the same name exists, attach them to the indexed object. .. note:: Only 1-dimensional boolean arrays can be used as indexers. xarray-2025.12.0/doc/user-guide/interpolation.rst000066400000000000000000000247601511464676000216130ustar00rootroot00000000000000.. _interp: Interpolating data ================== .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr import matplotlib.pyplot as plt np.random.seed(123456) Xarray offers flexible interpolation routines, which have a similar interface to our :ref:`indexing `. .. note:: ``interp`` requires ``scipy`` installed. Scalar and 1-dimensional interpolation -------------------------------------- Interpolating a :py:class:`~xarray.DataArray` works mostly like labeled indexing of a :py:class:`~xarray.DataArray`, .. jupyter-execute:: da = xr.DataArray( np.sin(0.3 * np.arange(12).reshape(4, 3)), [("time", np.arange(4)), ("space", [0.1, 0.2, 0.3])], ) # label lookup da.sel(time=3) .. jupyter-execute:: # interpolation da.interp(time=2.5) Similar to the indexing, :py:meth:`~xarray.DataArray.interp` also accepts an array-like, which gives the interpolated result as an array. .. jupyter-execute:: # label lookup da.sel(time=[2, 3]) .. jupyter-execute:: # interpolation da.interp(time=[2.5, 3.5]) To interpolate data with a :py:doc:`numpy.datetime64 ` coordinate you can pass a string. .. jupyter-execute:: da_dt64 = xr.DataArray( [1, 3], [("time", pd.date_range("1/1/2000", "1/3/2000", periods=2))] ) da_dt64.interp(time="2000-01-02") The interpolated data can be merged into the original :py:class:`~xarray.DataArray` by specifying the time periods required. .. jupyter-execute:: da_dt64.interp(time=pd.date_range("1/1/2000", "1/3/2000", periods=3)) Interpolation of data indexed by a :py:class:`~xarray.CFTimeIndex` is also allowed. See :ref:`CFTimeIndex` for examples. .. note:: Currently, our interpolation only works for regular grids. Therefore, similarly to :py:meth:`~xarray.DataArray.sel`, only 1D coordinates along a dimension can be used as the original coordinate to be interpolated. Multi-dimensional Interpolation ------------------------------- Like :py:meth:`~xarray.DataArray.sel`, :py:meth:`~xarray.DataArray.interp` accepts multiple coordinates. In this case, multidimensional interpolation is carried out. .. jupyter-execute:: # label lookup da.sel(time=2, space=0.1) .. jupyter-execute:: # interpolation da.interp(time=2.5, space=0.15) Array-like coordinates are also accepted: .. jupyter-execute:: # label lookup da.sel(time=[2, 3], space=[0.1, 0.2]) .. jupyter-execute:: # interpolation da.interp(time=[1.5, 2.5], space=[0.15, 0.25]) :py:meth:`~xarray.DataArray.interp_like` method is a useful shortcut. This method interpolates an xarray object onto the coordinates of another xarray object. For example, if we want to compute the difference between two :py:class:`~xarray.DataArray` s (``da`` and ``other``) staying on slightly different coordinates, .. jupyter-execute:: other = xr.DataArray( np.sin(0.4 * np.arange(9).reshape(3, 3)), [("time", [0.9, 1.9, 2.9]), ("space", [0.15, 0.25, 0.35])], ) it might be a good idea to first interpolate ``da`` so that it will stay on the same coordinates of ``other``, and then subtract it. :py:meth:`~xarray.DataArray.interp_like` can be used for such a case, .. jupyter-execute:: # interpolate da along other's coordinates interpolated = da.interp_like(other) interpolated It is now possible to safely compute the difference ``other - interpolated``. Interpolation methods --------------------- We use either :py:class:`scipy.interpolate.interp1d` or special interpolants from :py:class:`scipy.interpolate` for 1-dimensional interpolation (see :py:meth:`~xarray.Dataset.interp`). For multi-dimensional interpolation, an attempt is first made to decompose the interpolation in a series of 1-dimensional interpolations, in which case the relevant 1-dimensional interpolator is used. If a decomposition cannot be made (e.g. with advanced interpolation), :py:func:`scipy.interpolate.interpn` is used. The interpolation method can be specified by the optional ``method`` argument. .. jupyter-execute:: da = xr.DataArray( np.sin(np.linspace(0, 2 * np.pi, 10)), dims="x", coords={"x": np.linspace(0, 1, 10)}, ) da.plot.line("o", label="original") da.interp(x=np.linspace(0, 1, 100)).plot.line(label="linear (default)") da.interp(x=np.linspace(0, 1, 100), method="cubic").plot.line(label="cubic") plt.legend(); Additional keyword arguments can be passed to scipy's functions. .. jupyter-execute:: # fill 0 for the outside of the original coordinates. da.interp(x=np.linspace(-0.5, 1.5, 10), kwargs={"fill_value": 0.0}) .. jupyter-execute:: # 1-dimensional extrapolation da.interp(x=np.linspace(-0.5, 1.5, 10), kwargs={"fill_value": "extrapolate"}) .. jupyter-execute:: # multi-dimensional extrapolation da = xr.DataArray( np.sin(0.3 * np.arange(12).reshape(4, 3)), [("time", np.arange(4)), ("space", [0.1, 0.2, 0.3])], ) da.interp( time=4, space=np.linspace(-0.1, 0.5, 10), kwargs={"fill_value": "extrapolate"} ) Advanced Interpolation ---------------------- :py:meth:`~xarray.DataArray.interp` accepts :py:class:`~xarray.DataArray` as similar to :py:meth:`~xarray.DataArray.sel`, which enables us more advanced interpolation. Based on the dimension of the new coordinate passed to :py:meth:`~xarray.DataArray.interp`, the dimension of the result are determined. For example, if you want to interpolate a two dimensional array along a particular dimension, as illustrated below, you can pass two 1-dimensional :py:class:`~xarray.DataArray` s with a common dimension as new coordinate. .. image:: ../_static/advanced_selection_interpolation.svg :height: 200px :width: 400 px :alt: advanced indexing and interpolation :align: center For example: .. jupyter-execute:: da = xr.DataArray( np.sin(0.3 * np.arange(20).reshape(5, 4)), [("x", np.arange(5)), ("y", [0.1, 0.2, 0.3, 0.4])], ) # advanced indexing x = xr.DataArray([0, 2, 4], dims="z") y = xr.DataArray([0.1, 0.2, 0.3], dims="z") da.sel(x=x, y=y) .. jupyter-execute:: # advanced interpolation, without extrapolation x = xr.DataArray([0.5, 1.5, 2.5, 3.5], dims="z") y = xr.DataArray([0.15, 0.25, 0.35, 0.45], dims="z") da.interp(x=x, y=y) where values on the original coordinates ``(x, y) = ((0.5, 0.15), (1.5, 0.25), (2.5, 0.35), (3.5, 0.45))`` are obtained by the 2-dimensional interpolation and mapped along a new dimension ``z``. Since no keyword arguments are passed to the interpolation routine, no extrapolation is performed resulting in a ``nan`` value. If you want to add a coordinate to the new dimension ``z``, you can supply :py:class:`~xarray.DataArray` s with a coordinate. Extrapolation can be achieved by passing additional arguments to SciPy's ``interpnd`` function, .. jupyter-execute:: x = xr.DataArray([0.5, 1.5, 2.5, 3.5], dims="z", coords={"z": ["a", "b", "c", "d"]}) y = xr.DataArray( [0.15, 0.25, 0.35, 0.45], dims="z", coords={"z": ["a", "b", "c", "d"]} ) da.interp(x=x, y=y, kwargs={"fill_value": None}) For the details of the advanced indexing, see :ref:`more advanced indexing `. Interpolating arrays with NaN ----------------------------- Our :py:meth:`~xarray.DataArray.interp` works with arrays with NaN the same way that `scipy.interpolate.interp1d `_ and `scipy.interpolate.interpn `_ do. ``linear`` and ``nearest`` methods return arrays including NaN, while other methods such as ``cubic`` or ``quadratic`` return all NaN arrays. .. jupyter-execute:: da = xr.DataArray([0, 2, np.nan, 3, 3.25], dims="x", coords={"x": range(5)}) da.interp(x=[0.5, 1.5, 2.5]) .. jupyter-execute:: da.interp(x=[0.5, 1.5, 2.5], method="cubic") To avoid this, you can drop NaN by :py:meth:`~xarray.DataArray.dropna`, and then make the interpolation .. jupyter-execute:: dropped = da.dropna("x") dropped .. jupyter-execute:: dropped.interp(x=[0.5, 1.5, 2.5], method="cubic") If NaNs are distributed randomly in your multidimensional array, dropping all the columns containing more than one NaNs by :py:meth:`~xarray.DataArray.dropna` may lose a significant amount of information. In such a case, you can fill NaN by :py:meth:`~xarray.DataArray.interpolate_na`, which is similar to :py:meth:`pandas.Series.interpolate`. .. jupyter-execute:: filled = da.interpolate_na(dim="x") filled This fills NaN by interpolating along the specified dimension. After filling NaNs, you can interpolate: .. jupyter-execute:: filled.interp(x=[0.5, 1.5, 2.5], method="cubic") For the details of :py:meth:`~xarray.DataArray.interpolate_na`, see :ref:`Missing values `. Example ------- Let's see how :py:meth:`~xarray.DataArray.interp` works on real data. .. jupyter-execute:: # Raw data ds = xr.tutorial.open_dataset("air_temperature").isel(time=0) fig, axes = plt.subplots(ncols=2, figsize=(10, 4)) ds.air.plot(ax=axes[0]) axes[0].set_title("Raw data") # Interpolated data new_lon = np.linspace(ds.lon[0].item(), ds.lon[-1].item(), ds.sizes["lon"] * 4) new_lat = np.linspace(ds.lat[0].item(), ds.lat[-1].item(), ds.sizes["lat"] * 4) dsi = ds.interp(lat=new_lat, lon=new_lon) dsi.air.plot(ax=axes[1]) axes[1].set_title("Interpolated data"); Our advanced interpolation can be used to remap the data to the new coordinate. Consider the new coordinates x and z on the two dimensional plane. The remapping can be done as follows .. jupyter-execute:: # new coordinate x = np.linspace(240, 300, 100) z = np.linspace(20, 70, 100) # relation between new and original coordinates lat = xr.DataArray(z, dims=["z"], coords={"z": z}) lon = xr.DataArray( (x[:, np.newaxis] - 270) / np.cos(z * np.pi / 180) + 270, dims=["x", "z"], coords={"x": x, "z": z}, ) fig, axes = plt.subplots(ncols=2, figsize=(10, 4)) ds.air.plot(ax=axes[0]) # draw the new coordinate on the original coordinates. for idx in [0, 33, 66, 99]: axes[0].plot(lon.isel(x=idx), lat, "--k") for idx in [0, 33, 66, 99]: axes[0].plot(*xr.broadcast(lon.isel(z=idx), lat.isel(z=idx)), "--k") axes[0].set_title("Raw data") dsi = ds.interp(lon=lon, lat=lat) dsi.air.plot(ax=axes[1]) axes[1].set_title("Remapped data"); xarray-2025.12.0/doc/user-guide/io.rst000066400000000000000000001715451511464676000173370ustar00rootroot00000000000000.. currentmodule:: xarray .. _io: Reading and writing files ========================= Xarray supports direct serialization and IO to several file formats, from simple :ref:`io.pickle` files to the more flexible :ref:`io.netcdf` format (recommended). .. jupyter-execute:: :hide-code: import os import iris import ncdata.iris_xarray import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) You can read different types of files in ``xr.open_dataset`` by specifying the engine to be used: .. code:: python xr.open_dataset("example.nc", engine="netcdf4") The "engine" provides a set of instructions that tells xarray how to read the data and pack them into a ``Dataset`` (or ``Dataarray``). These instructions are stored in an underlying "backend". Xarray comes with several backends that cover many common data formats. Many more backends are available via external libraries, or you can `write your own `_. This diagram aims to help you determine - based on the format of the file you'd like to read - which type of backend you're using and how to use it. Text and boxes are clickable for more information. Following the diagram is detailed information on many popular backends. You can learn more about using and developing backends in the `Xarray tutorial JupyterBook `_. .. _comment: mermaid Flowcharg "link" text gets secondary color background, SVG icon fill gets primary color .. raw:: html .. mermaid:: :config: {"theme":"base","themeVariables":{"fontSize":"20px","primaryColor":"#fff","primaryTextColor":"#fff","primaryBorderColor":"#59c7d6","lineColor":"#e28126","secondaryColor":"#767985"}} :alt: Flowchart illustrating how to choose the right backend engine to read your data flowchart LR built-in-eng["`**Is your data stored in one of these formats?** - netCDF4 - netCDF3 - Zarr - DODS/OPeNDAP - HDF5 `"] built-in("`**You're in luck!** Xarray bundles a backend to automatically read these formats. Open data using xr.open_dataset(). We recommend explicitly setting engine='xxxx' for faster loading.`") installed-eng["""One of these formats? - GRIB - TileDB - GeoTIFF, JPEG-2000, etc. (via GDAL) - Sentinel-1 SAFE """] installed("""Install the linked backend library and use it with xr.open_dataset(file, engine='xxxx').""") other["`**Options:** - Look around to see if someone has created an Xarray backend for your format! - Create your own backend - Convert your data to a supported format `"] built-in-eng -->|Yes| built-in built-in-eng -->|No| installed-eng installed-eng -->|Yes| installed installed-eng -->|No| other click built-in-eng "https://docs.xarray.dev/en/stable/get-help/faq.html#how-do-i-open-format-x-file-as-an-xarray-dataset" classDef quesNodefmt font-size:12pt,fill:#0e4666,stroke:#59c7d6,stroke-width:3 class built-in-eng,installed-eng quesNodefmt classDef ansNodefmt font-size:12pt,fill:#4a4a4a,stroke:#17afb4,stroke-width:3 class built-in,installed,other ansNodefmt linkStyle default font-size:18pt,stroke-width:4 .. _io.backend_resolution: Backend Selection ----------------- When opening a file or URL without explicitly specifying the ``engine`` parameter, xarray automatically selects an appropriate backend based on the file path or URL. The backends are tried in order: **netcdf4 โ†’ h5netcdf โ†’ scipy โ†’ pydap โ†’ zarr**. .. note:: You can customize the order in which netCDF backends are tried using the ``netcdf_engine_order`` option in :py:func:`~xarray.set_options`: .. code-block:: python # Prefer h5netcdf over netcdf4 xr.set_options(netcdf_engine_order=["h5netcdf", "netcdf4", "scipy"]) See :ref:`options` for more details on configuration options. The following tables show which backend will be selected for different types of URLs and files. .. important:: โœ… means the backend will **guess it can open** the URL or file based on its path, extension, or magic number, but this doesn't guarantee success. For example, not all Zarr stores are xarray-compatible. โŒ means the backend will not attempt to open it. Remote URL Resolution ~~~~~~~~~~~~~~~~~~~~~ .. list-table:: :header-rows: 1 :widths: 50 10 10 10 10 10 * - URL - :ref:`netcdf4 ` - :ref:`h5netcdf ` - :ref:`scipy ` - :ref:`pydap ` - :ref:`zarr ` * - ``https://example.com/store.zarr`` - โŒ - โŒ - โŒ - โŒ - โœ… * - ``https://example.com/data.nc`` - โœ… - โœ… - โŒ - โŒ - โŒ * - ``http://example.com/data.nc?var=temp`` - โœ… - โŒ - โŒ - โŒ - โŒ * - ``http://example.com/dap4/data.nc?var=x`` - โœ… - โŒ - โŒ - โœ… - โŒ * - ``dap2://opendap.nasa.gov/dataset`` - โŒ - โŒ - โŒ - โœ… - โŒ * - ``https://example.com/DAP4/data`` - โŒ - โŒ - โŒ - โœ… - โŒ * - ``http://test.opendap.org/dap4/file.nc4`` - โœ… - โœ… - โŒ - โœ… - โŒ * - ``https://example.com/DAP4/data.nc`` - โœ… - โœ… - โŒ - โœ… - โŒ Local File Resolution ~~~~~~~~~~~~~~~~~~~~~ For local files, backends first try to read the file's **magic number** (first few bytes). If the magic number **cannot be read** (e.g., file doesn't exist, no permissions), they fall back to checking the file **extension**. If the magic number is readable but invalid, the backend returns False (does not fall back to extension). .. list-table:: :header-rows: 1 :widths: 40 20 10 10 10 10 * - File Path - Magic Number - :ref:`netcdf4 ` - :ref:`h5netcdf ` - :ref:`scipy ` - :ref:`zarr ` * - ``/path/to/file.nc`` - ``CDF\x01`` (netCDF3) - โœ… - โŒ - โœ… - โŒ * - ``/path/to/file.nc4`` - ``\x89HDF\r\n\x1a\n`` (HDF5/netCDF4) - โœ… - โœ… - โŒ - โŒ * - ``/path/to/file.nc.gz`` - ``\x1f\x8b`` + ``CDF`` inside - โŒ - โŒ - โœ… - โŒ * - ``/path/to/store.zarr/`` - (directory) - โŒ - โŒ - โŒ - โœ… * - ``/path/to/file.nc`` - *(no magic number)* - โœ… - โœ… - โœ… - โŒ * - ``/path/to/file.xyz`` - ``CDF\x01`` (netCDF3) - โœ… - โŒ - โœ… - โŒ * - ``/path/to/file.xyz`` - ``\x89HDF\r\n\x1a\n`` (HDF5/netCDF4) - โœ… - โœ… - โŒ - โŒ * - ``/path/to/file.xyz`` - *(no magic number)* - โŒ - โŒ - โŒ - โŒ .. note:: Remote URLs ending in ``.nc`` are **ambiguous**: - They could be netCDF files stored on a remote HTTP server (readable by ``netcdf4`` or ``h5netcdf``) - They could be OPeNDAP/DAP endpoints (readable by ``netcdf4`` with DAP support or ``pydap``) These interpretations are fundamentally incompatible. If xarray's automatic selection chooses the wrong backend, you must explicitly specify the ``engine`` parameter: .. code-block:: python # Force interpretation as a DAP endpoint ds = xr.open_dataset("http://example.com/data.nc", engine="pydap") # Force interpretation as a remote netCDF file ds = xr.open_dataset("https://example.com/data.nc", engine="netcdf4") .. _io.netcdf: netCDF ------ The recommended way to store xarray data structures is `netCDF`__, which is a binary file format for self-described datasets that originated in the geosciences. Xarray is based on the netCDF data model, so netCDF files on disk directly correspond to :py:class:`Dataset` objects (more accurately, a group in a netCDF file directly corresponds to a :py:class:`Dataset` object. See :ref:`io.netcdf_groups` for more.) NetCDF is supported on almost all platforms, and parsers exist for the vast majority of scientific programming languages. Recent versions of netCDF are based on the even more widely used HDF5 file-format. __ https://www.unidata.ucar.edu/software/netcdf/ .. tip:: If you aren't familiar with this data format, the `netCDF FAQ`_ is a good place to start. .. _netCDF FAQ: https://www.unidata.ucar.edu/software/netcdf/docs/faq.html#What-Is-netCDF Reading and writing netCDF files with xarray requires scipy, h5netcdf, or the `netCDF4-Python`__ library to be installed. SciPy only supports reading and writing of netCDF V3 files. __ https://github.com/Unidata/netcdf4-python We can save a Dataset to disk using the :py:meth:`Dataset.to_netcdf` method: .. jupyter-execute:: ds = xr.Dataset( {"foo": (("x", "y"), np.random.rand(4, 5))}, coords={ "x": [10, 20, 30, 40], "y": pd.date_range("2000-01-01", periods=5), "z": ("x", list("abcd")), }, ) ds.to_netcdf("saved_on_disk.nc") By default, the file is saved as netCDF4 (assuming netCDF4-Python is installed). You can control the format and engine used to write the file with the ``format`` and ``engine`` arguments. .. tip:: Using the `h5netcdf `_ package by passing ``engine='h5netcdf'`` to :py:meth:`open_dataset` can sometimes be quicker than the default ``engine='netcdf4'`` that uses the `netCDF4 `_ package. We can load netCDF files to create a new Dataset using :py:func:`open_dataset`: .. jupyter-execute:: ds_disk = xr.open_dataset("saved_on_disk.nc") ds_disk .. jupyter-execute:: :hide-code: # Close "saved_on_disk.nc", but retain the file until after closing or deleting other # datasets that will refer to it. ds_disk.close() Similarly, a DataArray can be saved to disk using the :py:meth:`DataArray.to_netcdf` method, and loaded from disk using the :py:func:`open_dataarray` function. As netCDF files correspond to :py:class:`Dataset` objects, these functions internally convert the ``DataArray`` to a ``Dataset`` before saving, and then convert back when loading, ensuring that the ``DataArray`` that is loaded is always exactly the same as the one that was saved. A dataset can also be loaded or written to a specific group within a netCDF file. To load from a group, pass a ``group`` keyword argument to the ``open_dataset`` function. The group can be specified as a path-like string, e.g., to access subgroup 'bar' within group 'foo' pass '/foo/bar' as the ``group`` argument. When writing multiple groups in one file, pass ``mode='a'`` to ``to_netcdf`` to ensure that each call does not delete the file. .. tip:: It is recommended to use :py:class:`~xarray.DataTree` to represent hierarchical data, and to use the :py:meth:`xarray.DataTree.to_netcdf` method when writing hierarchical data to a netCDF file. Data is *always* loaded lazily from netCDF files. You can manipulate, slice and subset Dataset and DataArray objects, and no array values are loaded into memory until you try to perform some sort of actual computation. For an example of how these lazy arrays work, see the OPeNDAP section below. There may be minor differences in the :py:class:`Dataset` object returned when reading a NetCDF file with different engines. It is important to note that when you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. .. tip:: Xarray's lazy loading of remote or on-disk datasets is often but not always desirable. Before performing computationally intense operations, it is often a good idea to load a Dataset (or DataArray) entirely into memory by invoking the :py:meth:`Dataset.load` method. Datasets have a :py:meth:`Dataset.close` method to close the associated netCDF file. However, it's often cleaner to use a ``with`` statement: .. jupyter-execute:: # this automatically closes the dataset after use with xr.open_dataset("saved_on_disk.nc") as ds: print(ds.keys()) Although xarray provides reasonable support for incremental reads of files on disk, it does not support incremental writes, which can be a useful strategy for dealing with datasets too big to fit into memory. Instead, xarray integrates with dask.array (see :ref:`dask`), which provides a fully featured engine for streaming computation. It is possible to append or overwrite netCDF variables using the ``mode='a'`` argument. When using this option, all variables in the dataset will be written to the original netCDF file, regardless if they exist in the original dataset. .. _io.netcdf_groups: Groups ~~~~~~ Whilst netCDF groups can only be loaded individually as ``Dataset`` objects, a whole file of many nested groups can be loaded as a single :py:class:`xarray.DataTree` object. To open a whole netCDF file as a tree of groups use the :py:func:`xarray.open_datatree` function. To save a DataTree object as a netCDF file containing many groups, use the :py:meth:`xarray.DataTree.to_netcdf` method. .. _netcdf.root_group.note: .. note:: Due to file format specifications the on-disk root group name is always ``"/"``, overriding any given ``DataTree`` root node name. .. _netcdf.group.warning: .. warning:: ``DataTree`` objects do not follow the exact same data model as netCDF files, which means that perfect round-tripping is not always possible. In particular in the netCDF data model dimensions are entities that can exist regardless of whether any variable possesses them. This is in contrast to `xarray's data model `_ (and hence :ref:`DataTree's data model `) in which the dimensions of a (Dataset/Tree) object are simply the set of dimensions present across all variables in that dataset. This means that if a netCDF file contains dimensions but no variables which possess those dimensions, these dimensions will not be present when that file is opened as a DataTree object. Saving this DataTree object to file will therefore not preserve these "unused" dimensions. .. _io.encoding: Reading encoded data ~~~~~~~~~~~~~~~~~~~~ NetCDF files follow some conventions for encoding datetime arrays (as numbers with a "units" attribute) and for packing and unpacking data (as described by the "scale_factor" and "add_offset" attributes). If the argument ``decode_cf=True`` (default) is given to :py:func:`open_dataset`, xarray will attempt to automatically decode the values in the netCDF objects according to `CF conventions`_. Sometimes this will fail, for example, if a variable has an invalid "units" or "calendar" attribute. For these cases, you can turn this decoding off manually. .. _CF conventions: https://cfconventions.org/ You can view this encoding information (among others) in the :py:attr:`DataArray.encoding` and :py:attr:`DataArray.encoding` attributes: .. jupyter-execute:: ds_disk["y"].encoding .. jupyter-execute:: ds_disk.encoding Note that all operations that manipulate variables other than indexing will remove encoding information. In some cases it is useful to intentionally reset a dataset's original encoding values. This can be done with either the :py:meth:`Dataset.drop_encoding` or :py:meth:`DataArray.drop_encoding` methods. .. jupyter-execute:: ds_no_encoding = ds_disk.drop_encoding() ds_no_encoding.encoding .. _combining multiple files: Reading multi-file datasets ........................... NetCDF files are often encountered in collections, e.g., with different files corresponding to different model runs or one file per timestamp. Xarray can straightforwardly combine such files into a single Dataset by making use of :py:func:`concat`, :py:func:`merge`, :py:func:`combine_nested` and :py:func:`combine_by_coords`. For details on the difference between these functions see :ref:`combining data`. Xarray includes support for manipulating datasets that don't fit into memory with dask_. If you have dask installed, you can open multiple files simultaneously in parallel using :py:func:`open_mfdataset`:: xr.open_mfdataset('my/files/*.nc', parallel=True) This function automatically concatenates and merges multiple files into a single xarray dataset. It is the recommended way to open multiple files with xarray. For more details on parallel reading, see :ref:`combining.multi`, :ref:`dask.io` and a `blog post`_ by Stephan Hoyer. :py:func:`open_mfdataset` takes many kwargs that allow you to control its behaviour (for e.g. ``parallel``, ``combine``, ``compat``, ``join``, ``concat_dim``). See its docstring for more details. .. note:: A common use-case involves a dataset distributed across a large number of files with each file containing a large number of variables. Commonly, a few of these variables need to be concatenated along a dimension (say ``"time"``), while the rest are equal across the datasets (ignoring floating point differences). The following command with suitable modifications (such as ``parallel=True``) works well with such datasets:: xr.open_mfdataset('my/files/*.nc', concat_dim="time", combine="nested", data_vars='minimal', coords='minimal', compat='override') This command concatenates variables along the ``"time"`` dimension, but only those that already contain the ``"time"`` dimension (``data_vars='minimal', coords='minimal'``). Variables that lack the ``"time"`` dimension are taken from the first dataset (``compat='override'``). .. _dask: https://www.dask.org .. _blog post: https://stephanhoyer.com/2015/06/11/xray-dask-out-of-core-labeled-arrays/ Sometimes multi-file datasets are not conveniently organized for easy use of :py:func:`open_mfdataset`. One can use the ``preprocess`` argument to provide a function that takes a dataset and returns a modified Dataset. :py:func:`open_mfdataset` will call ``preprocess`` on every dataset (corresponding to each file) prior to combining them. If :py:func:`open_mfdataset` does not meet your needs, other approaches are possible. The general pattern for parallel reading of multiple files using dask, modifying those datasets and then combining into a single ``Dataset`` is:: def modify(ds): # modify ds here return ds # this is basically what open_mfdataset does open_kwargs = dict(decode_cf=True, decode_times=False) open_tasks = [dask.delayed(xr.open_dataset)(f, **open_kwargs) for f in file_names] tasks = [dask.delayed(modify)(task) for task in open_tasks] datasets = dask.compute(tasks) # get a list of xarray.Datasets combined = xr.combine_nested(datasets) # or some combination of concat, merge As an example, here's how we could approximate ``MFDataset`` from the netCDF4 library:: from glob import glob import xarray as xr def read_netcdfs(files, dim): # glob expands paths with * to a list of files, like the unix shell paths = sorted(glob(files)) datasets = [xr.open_dataset(p) for p in paths] combined = xr.concat(datasets, dim) return combined combined = read_netcdfs('/all/my/files/*.nc', dim='time') This function will work in many cases, but it's not very robust. First, it never closes files, which means it will fail if you need to load more than a few thousand files. Second, it assumes that you want all the data from each file and that it can all fit into memory. In many situations, you only need a small subset or an aggregated summary of the data from each file. Here's a slightly more sophisticated example of how to remedy these deficiencies:: def read_netcdfs(files, dim, transform_func=None): def process_one_path(path): # use a context manager, to ensure the file gets closed after use with xr.open_dataset(path) as ds: # transform_func should do some sort of selection or # aggregation if transform_func is not None: ds = transform_func(ds) # load all data from the transformed dataset, to ensure we can # use it after closing each original file ds.load() return ds paths = sorted(glob(files)) datasets = [process_one_path(p) for p in paths] combined = xr.concat(datasets, dim) return combined # here we suppose we only care about the combined mean of each file; # you might also use indexing operations like .sel to subset datasets combined = read_netcdfs('/all/my/files/*.nc', dim='time', transform_func=lambda ds: ds.mean()) This pattern works well and is very robust. We've used similar code to process tens of thousands of files constituting 100s of GB of data. .. _io.netcdf.writing_encoded: Writing encoded data ~~~~~~~~~~~~~~~~~~~~ Conversely, you can customize how xarray writes netCDF files on disk by providing explicit encodings for each dataset variable. The ``encoding`` argument takes a dictionary with variable names as keys and variable specific encodings as values. These encodings are saved as attributes on the netCDF variables on disk, which allows xarray to faithfully read encoded data back into memory. It is important to note that using encodings is entirely optional: if you do not supply any of these encoding options, xarray will write data to disk using a default encoding, or the options in the ``encoding`` attribute, if set. This works perfectly fine in most cases, but encoding can be useful for additional control, especially for enabling compression. In the file on disk, these encodings are saved as attributes on each variable, which allow xarray and other CF-compliant tools for working with netCDF files to correctly read the data. Scaling and type conversions ............................ These encoding options (based on `CF Conventions on packed data`_) work on any version of the netCDF file format: - ``dtype``: Any valid NumPy dtype or string convertible to a dtype, e.g., ``'int16'`` or ``'float32'``. This controls the type of the data written on disk. - ``_FillValue``: Values of ``NaN`` in xarray variables are remapped to this value when saved on disk. This is important when converting floating point with missing values to integers on disk, because ``NaN`` is not a valid value for integer dtypes. By default, variables with float types are attributed a ``_FillValue`` of ``NaN`` in the output file, unless explicitly disabled with an encoding ``{'_FillValue': None}``. - ``scale_factor`` and ``add_offset``: Used to convert from encoded data on disk to to the decoded data in memory, according to the formula ``decoded = scale_factor * encoded + add_offset``. Please note that ``scale_factor`` and ``add_offset`` must be of same type and determine the type of the decoded data. These parameters can be fruitfully combined to compress discretized data on disk. For example, to save the variable ``foo`` with a precision of 0.1 in 16-bit integers while converting ``NaN`` to ``-9999``, we would use ``encoding={'foo': {'dtype': 'int16', 'scale_factor': 0.1, '_FillValue': -9999}}``. Compression and decompression with such discretization is extremely fast. .. _CF Conventions on packed data: https://cfconventions.org/cf-conventions/cf-conventions.html#packed-data .. _io.string-encoding: String encoding ............... Xarray can write unicode strings to netCDF files in two ways: - As variable length strings. This is only supported on netCDF4 (HDF5) files. - By encoding strings into bytes, and writing encoded bytes as a character array. The default encoding is UTF-8. By default, we use variable length strings for compatible files and fall-back to using encoded character arrays. Character arrays can be selected even for netCDF4 files by setting the ``dtype`` field in ``encoding`` to ``S1`` (corresponding to NumPy's single-character bytes dtype). If character arrays are used: - The string encoding that was used is stored on disk in the ``_Encoding`` attribute, which matches an ad-hoc convention `adopted by the netCDF4-Python library `_. At the time of this writing (October 2017), a standard convention for indicating string encoding for character arrays in netCDF files was `still under discussion `_. Technically, you can use `any string encoding recognized by Python `_ if you feel the need to deviate from UTF-8, by setting the ``_Encoding`` field in ``encoding``. But `we don't recommend it `_. - The character dimension name can be specified by the ``char_dim_name`` field of a variable's ``encoding``. If the name of the character dimension is not specified, the default is ``f'string{data.shape[-1]}'``. When decoding character arrays from existing files, the ``char_dim_name`` is added to the variables ``encoding`` to preserve if encoding happens, but the field can be edited by the user. .. warning:: Missing values in bytes or unicode string arrays (represented by ``NaN`` in xarray) are currently written to disk as empty strings ``''``. This means missing values will not be restored when data is loaded from disk. This behavior is likely to change in the future (:issue:`1647`). Unfortunately, explicitly setting a ``_FillValue`` for string arrays to handle missing values doesn't work yet either, though we also hope to fix this in the future. Chunk based compression ....................... ``zlib``, ``complevel``, ``fletcher32``, ``contiguous`` and ``chunksizes`` can be used for enabling netCDF4/HDF5's chunk based compression, as described in the `documentation for createVariable`_ for netCDF4-Python. This only works for netCDF4 files and thus requires using ``format='netCDF4'`` and either ``engine='netcdf4'`` or ``engine='h5netcdf'``. .. _documentation for createVariable: https://unidata.github.io/netcdf4-python/#netCDF4.Dataset.createVariable Chunk based gzip compression can yield impressive space savings, especially for sparse data, but it comes with significant performance overhead. HDF5 libraries can only read complete chunks back into memory, and maximum decompression speed is in the range of 50-100 MB/s. Worse, HDF5's compression and decompression currently cannot be parallelized with dask. For these reasons, we recommend trying discretization based compression (described above) first. Time units .......... The ``units`` and ``calendar`` attributes control how xarray serializes ``datetime64`` and ``timedelta64`` arrays to datasets on disk as numeric values. The ``units`` encoding should be a string like ``'days since 1900-01-01'`` for ``datetime64`` data or a string like ``'days'`` for ``timedelta64`` data. ``calendar`` should be one of the calendar types supported by netCDF4-python: ``'standard'``, ``'gregorian'``, ``'proleptic_gregorian'``, ``'noleap'``, ``'365_day'``, ``'360_day'``, ``'julian'``, ``'all_leap'``, ``'366_day'``. By default, xarray uses the ``'proleptic_gregorian'`` calendar and units of the smallest time difference between values, with a reference time of the first time value. .. _io.coordinates: Coordinates ........... You can control the ``coordinates`` attribute written to disk by specifying ``DataArray.encoding["coordinates"]``. If not specified, xarray automatically sets ``DataArray.encoding["coordinates"]`` to a space-delimited list of names of coordinate variables that share dimensions with the ``DataArray`` being written. This allows perfect roundtripping of xarray datasets but may not be desirable. When an xarray ``Dataset`` contains non-dimensional coordinates that do not share dimensions with any of the variables, these coordinate variable names are saved under a "global" ``"coordinates"`` attribute. This is not CF-compliant but again facilitates roundtripping of xarray datasets. Invalid netCDF files ~~~~~~~~~~~~~~~~~~~~ The library ``h5netcdf`` allows writing some dtypes that aren't allowed in netCDF4 (see `h5netcdf documentation `_). This feature is available through :py:meth:`DataArray.to_netcdf` and :py:meth:`Dataset.to_netcdf` when used with ``engine="h5netcdf"``, only if ``invalid_netcdf=True`` is explicitly set. .. warning:: Note that this produces a file that is likely to be not readable by other netCDF libraries! .. _io.hdf5: HDF5 ---- `HDF5`_ is both a file format and a data model for storing information. HDF5 stores data hierarchically, using groups to create a nested structure. HDF5 is a more general version of the netCDF4 data model, so the nested structure is one of many similarities between the two data formats. Reading HDF5 files in xarray requires the ``h5netcdf`` engine, which can be installed with ``conda install h5netcdf``. Once installed we can use xarray to open HDF5 files: .. code:: python xr.open_dataset("/path/to/my/file.h5") The similarities between HDF5 and netCDF4 mean that HDF5 data can be written with the same :py:meth:`Dataset.to_netcdf` method as used for netCDF4 data: .. jupyter-execute:: ds = xr.Dataset( {"foo": (("x", "y"), np.random.rand(4, 5))}, coords={ "x": [10, 20, 30, 40], "y": pd.date_range("2000-01-01", periods=5), "z": ("x", list("abcd")), }, ) ds.to_netcdf("saved_on_disk.h5") Groups ~~~~~~ If you have multiple or highly nested groups, xarray by default may not read the group that you want. A particular group of an HDF5 file can be specified using the ``group`` argument: .. code:: python xr.open_dataset("/path/to/my/file.h5", group="/my/group") While xarray cannot interrogate an HDF5 file to determine which groups are available, the HDF5 Python reader `h5py`_ can be used instead. Natively the xarray data structures can only handle one level of nesting, organized as DataArrays inside of Datasets. If your HDF5 file has additional levels of hierarchy you can only access one group and a time and will need to specify group names. .. _HDF5: https://hdfgroup.github.io/hdf5/index.html .. _h5py: https://www.h5py.org/ .. _io.zarr: Zarr ---- `Zarr`_ is a Python package that provides an implementation of chunked, compressed, N-dimensional arrays. Zarr has the ability to store arrays in a range of ways, including in memory, in files, and in cloud-based object storage such as `Amazon S3`_ and `Google Cloud Storage`_. Xarray's Zarr backend allows xarray to leverage these capabilities, including the ability to store and analyze datasets far too large fit onto disk (particularly :ref:`in combination with dask `). Xarray can't open just any zarr dataset, because xarray requires special metadata (attributes) describing the dataset dimensions and coordinates. At this time, xarray can only open zarr datasets with these special attributes, such as zarr datasets written by xarray, `netCDF `_, or `GDAL `_. For implementation details, see :ref:`zarr_encoding`. To write a dataset with zarr, we use the :py:meth:`Dataset.to_zarr` method. To write to a local directory, we pass a path to a directory: .. jupyter-execute:: :hide-code: ! rm -rf path/to/directory.zarr .. jupyter-execute:: :stderr: ds = xr.Dataset( {"foo": (("x", "y"), np.random.rand(4, 5))}, coords={ "x": [10, 20, 30, 40], "y": pd.date_range("2000-01-01", periods=5), "z": ("x", list("abcd")), }, ) ds.to_zarr("path/to/directory.zarr", zarr_format=2, consolidated=False) (The suffix ``.zarr`` is optional--just a reminder that a zarr store lives there.) If the directory does not exist, it will be created. If a zarr store is already present at that path, an error will be raised, preventing it from being overwritten. To override this behavior and overwrite an existing store, add ``mode='w'`` when invoking :py:meth:`~Dataset.to_zarr`. DataArrays can also be saved to disk using the :py:meth:`DataArray.to_zarr` method, and loaded from disk using the :py:func:`open_dataarray` function with ``engine='zarr'``. Similar to :py:meth:`DataArray.to_netcdf`, :py:meth:`DataArray.to_zarr` will convert the ``DataArray`` to a ``Dataset`` before saving, and then convert back when loading, ensuring that the ``DataArray`` that is loaded is always exactly the same as the one that was saved. .. note:: xarray does not write `NCZarr `_ attributes. Therefore, NCZarr data must be opened in read-only mode. To store variable length strings, convert them to object arrays first with ``dtype=object``. To read back a zarr dataset that has been created this way, we use the :py:func:`open_zarr` method: .. jupyter-execute:: ds_zarr = xr.open_zarr("path/to/directory.zarr", consolidated=False) ds_zarr Cloud Storage Buckets ~~~~~~~~~~~~~~~~~~~~~ It is possible to read and write xarray datasets directly from / to cloud storage buckets using zarr. This example uses the `gcsfs`_ package to provide an interface to `Google Cloud Storage`_. General `fsspec`_ URLs, those that begin with ``s3://`` or ``gcs://`` for example, are parsed and the store set up for you automatically when reading. You should include any arguments to the storage backend as the key ```storage_options``, part of ``backend_kwargs``. .. code:: python ds_gcs = xr.open_dataset( "gcs:///path.zarr", backend_kwargs={ "storage_options": {"project": "", "token": None} }, engine="zarr", ) This also works with ``open_mfdataset``, allowing you to pass a list of paths or a URL to be interpreted as a glob string. For writing, you may either specify a bucket URL or explicitly set up a ``zarr.abc.store.Store`` instance, as follows: .. tab:: URL .. code:: python # write to the bucket via GCS URL ds.to_zarr("gs://") # read it back ds_gcs = xr.open_zarr("gs://") .. tab:: fsspec .. code:: python import gcsfs import zarr # manually manage the cloud filesystem connection -- useful, for example, # when you need to manage permissions to cloud resources fs = gcsfs.GCSFileSystem(project="", token=None) zstore = zarr.storage.FsspecStore(fs, path="") # write to the bucket ds.to_zarr(store=zstore) # read it back ds_gcs = xr.open_zarr(zstore) .. tab:: obstore .. code:: python import obstore import zarr # alternatively, obstore offers a modern, performant interface for # cloud buckets gcsstore = obstore.store.GCSStore( "", prefix="", skip_signature=True ) zstore = zarr.store.ObjectStore(gcsstore) # write to the bucket ds.to_zarr(store=zstore) # read it back ds_gcs = xr.open_zarr(zstore) .. _fsspec: https://filesystem-spec.readthedocs.io/en/latest/ .. _obstore: https://developmentseed.org/obstore/latest/ .. _Zarr: https://zarr.readthedocs.io/ .. _Amazon S3: https://aws.amazon.com/s3/ .. _Google Cloud Storage: https://cloud.google.com/storage/ .. _gcsfs: https://github.com/fsspec/gcsfs .. _io.zarr.distributed_writes: Distributed writes ~~~~~~~~~~~~~~~~~~ Xarray will natively use dask to write in parallel to a zarr store, which should satisfy most moderately sized datasets. For more flexible parallelization, we can use ``region`` to write to limited regions of arrays in an existing Zarr store. To scale this up to writing large datasets, first create an initial Zarr store without writing all of its array data. This can be done by first creating a ``Dataset`` with dummy values stored in :ref:`dask `, and then calling ``to_zarr`` with ``compute=False`` to write only metadata (including ``attrs``) to Zarr: .. jupyter-execute:: :hide-code: ! rm -rf path/to/directory.zarr .. jupyter-execute:: import dask.array # The values of this dask array are entirely irrelevant; only the dtype, # shape and chunks are used dummies = dask.array.zeros(30, chunks=10) ds = xr.Dataset({"foo": ("x", dummies)}, coords={"x": np.arange(30)}) path = "path/to/directory.zarr" # Now we write the metadata without computing any array values ds.to_zarr(path, compute=False, consolidated=False) Now, a Zarr store with the correct variable shapes and attributes exists that can be filled out by subsequent calls to ``to_zarr``. Setting ``region="auto"`` will open the existing store and determine the correct alignment of the new data with the existing dimensions, or as an explicit mapping from dimension names to Python ``slice`` objects indicating where the data should be written (in index space, not label space), e.g., .. jupyter-execute:: # For convenience, we'll slice a single dataset, but in the real use-case # we would create them separately possibly even from separate processes. ds = xr.Dataset({"foo": ("x", np.arange(30))}, coords={"x": np.arange(30)}) # Any of the following region specifications are valid ds.isel(x=slice(0, 10)).to_zarr(path, region="auto", consolidated=False) ds.isel(x=slice(10, 20)).to_zarr(path, region={"x": "auto"}, consolidated=False) ds.isel(x=slice(20, 30)).to_zarr(path, region={"x": slice(20, 30)}, consolidated=False) Concurrent writes with ``region`` are safe as long as they modify distinct chunks in the underlying Zarr arrays (or use an appropriate ``lock``). As a safety check to make it harder to inadvertently override existing values, if you set ``region`` then *all* variables included in a Dataset must have dimensions included in ``region``. Other variables (typically coordinates) need to be explicitly dropped and/or written in a separate calls to ``to_zarr`` with ``mode='a'``. Zarr Compressors and Filters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are many different `options for compression and filtering possible with zarr `_. These options can be passed to the ``to_zarr`` method as variable encoding. For example: .. jupyter-execute:: :hide-code: ! rm -rf foo.zarr .. jupyter-execute:: import zarr from zarr.codecs import BloscCodec compressor = BloscCodec(cname="zstd", clevel=3, shuffle="shuffle") ds.to_zarr("foo.zarr", consolidated=False, encoding={"foo": {"compressors": [compressor]}}) .. note:: Not all native zarr compression and filtering options have been tested with xarray. .. _io.zarr.appending: Modifying existing Zarr stores ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xarray supports several ways of incrementally writing variables to a Zarr store. These options are useful for scenarios when it is infeasible or undesirable to write your entire dataset at once. 1. Use ``mode='a'`` to add or overwrite entire variables, 2. Use ``append_dim`` to resize and append to existing variables, and 3. Use ``region`` to write to limited regions of existing arrays. .. tip:: For ``Dataset`` objects containing dask arrays, a single call to ``to_zarr()`` will write all of your data in parallel. .. warning:: Alignment of coordinates is currently not checked when modifying an existing Zarr store. It is up to the user to ensure that coordinates are consistent. To add or overwrite entire variables, simply call :py:meth:`~Dataset.to_zarr` with ``mode='a'`` on a Dataset containing the new variables, passing in an existing Zarr store or path to a Zarr store. To resize and then append values along an existing dimension in a store, set ``append_dim``. This is a good option if data always arrives in a particular order, e.g., for time-stepping a simulation: .. jupyter-execute:: :hide-code: ! rm -rf path/to/directory.zarr .. jupyter-execute:: ds1 = xr.Dataset( {"foo": (("x", "y", "t"), np.random.rand(4, 5, 2))}, coords={ "x": [10, 20, 30, 40], "y": [1, 2, 3, 4, 5], "t": pd.date_range("2001-01-01", periods=2), }, ) ds1.to_zarr("path/to/directory.zarr", consolidated=False) .. jupyter-execute:: ds2 = xr.Dataset( {"foo": (("x", "y", "t"), np.random.rand(4, 5, 2))}, coords={ "x": [10, 20, 30, 40], "y": [1, 2, 3, 4, 5], "t": pd.date_range("2001-01-03", periods=2), }, ) ds2.to_zarr("path/to/directory.zarr", append_dim="t", consolidated=False) .. _io.zarr.writing_chunks: Specifying chunks in a zarr store ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Chunk sizes may be specified in one of three ways when writing to a zarr store: 1. Manual chunk sizing through the use of the ``encoding`` argument in :py:meth:`Dataset.to_zarr`: 2. Automatic chunking based on chunks in dask arrays 3. Default chunk behavior determined by the zarr library The resulting chunks will be determined based on the order of the above list; dask chunks will be overridden by manually-specified chunks in the encoding argument, and the presence of either dask chunks or chunks in the ``encoding`` attribute will supersede the default chunking heuristics in zarr. Importantly, this logic applies to every array in the zarr store individually, including coordinate arrays. Therefore, if a dataset contains one or more dask arrays, it may still be desirable to specify a chunk size for the coordinate arrays (for example, with a chunk size of ``-1`` to include the full coordinate). To specify chunks manually using the ``encoding`` argument, provide a nested dictionary with the structure ``{'variable_or_coord_name': {'chunks': chunks_tuple}}``. .. note:: The positional ordering of the chunks in the encoding argument must match the positional ordering of the dimensions in each array. Watch out for arrays with differently-ordered dimensions within a single Dataset. For example, let's say we're working with a dataset with dimensions ``('time', 'x', 'y')``, a variable ``Tair`` which is chunked in ``x`` and ``y``, and two multi-dimensional coordinates ``xc`` and ``yc``: .. jupyter-execute:: ds = xr.tutorial.open_dataset("rasm") ds["Tair"] = ds["Tair"].chunk({"x": 100, "y": 100}) ds These multi-dimensional coordinates are only two-dimensional and take up very little space on disk or in memory, yet when writing to disk the default zarr behavior is to split them into chunks: .. jupyter-execute:: ds.to_zarr("path/to/directory.zarr", consolidated=False, mode="w") !tree -I zarr.json path/to/directory.zarr This may cause unwanted overhead on some systems, such as when reading from a cloud storage provider. To disable this chunking, we can specify a chunk size equal to the shape of each coordinate array in the ``encoding`` argument: .. jupyter-execute:: ds.to_zarr( "path/to/directory.zarr", encoding={"xc": {"chunks": ds.xc.shape}, "yc": {"chunks": ds.yc.shape}}, consolidated=False, mode="w", ) !tree -I zarr.json path/to/directory.zarr The number of chunks on Tair matches our dask chunks, while there is now only a single chunk in the directory stores of each coordinate. Groups ~~~~~~ Nested groups in zarr stores can be represented by loading the store as a :py:class:`xarray.DataTree` object, similarly to netCDF. To open a whole zarr store as a tree of groups use the :py:func:`open_datatree` function. To save a ``DataTree`` object as a zarr store containing many groups, use the :py:meth:`xarray.DataTree.to_zarr()` method. .. note:: Note that perfect round-tripping should always be possible with a zarr store (:ref:`unlike for netCDF files `), as zarr does not support "unused" dimensions. For the root group the same restrictions (:ref:`as for netCDF files `) apply. Due to file format specifications the on-disk root group name is always ``"/"`` overriding any given ``DataTree`` root node name. .. _io.zarr.consolidated_metadata: Consolidated Metadata ~~~~~~~~~~~~~~~~~~~~~ Xarray needs to read all of the zarr metadata when it opens a dataset. In some storage mediums, such as with cloud object storage (e.g. `Amazon S3`_), this can introduce significant overhead, because two separate HTTP calls to the object store must be made for each variable in the dataset. By default Xarray uses a feature called *consolidated metadata*, storing all metadata for the entire dataset with a single key (by default called ``.zmetadata``). This typically drastically speeds up opening the store. (For more information on this feature, consult the `zarr docs on consolidating metadata `_.) By default, xarray writes consolidated metadata and attempts to read stores with consolidated metadata, falling back to use non-consolidated metadata for reads. Because this fall-back option is so much slower, xarray issues a ``RuntimeWarning`` with guidance when reading with consolidated metadata fails: Failed to open Zarr store with consolidated metadata, falling back to try reading non-consolidated metadata. This is typically much slower for opening a dataset. To silence this warning, consider: 1. Consolidating metadata in this existing store with :py:func:`zarr.consolidate_metadata`. 2. Explicitly setting ``consolidated=False``, to avoid trying to read consolidate metadata. 3. Explicitly setting ``consolidated=True``, to raise an error in this case instead of falling back to try reading non-consolidated metadata. Fill Values ~~~~~~~~~~~ Zarr arrays have a ``fill_value`` that is used for chunks that were never written to disk. For the Zarr version 2 format, Xarray will set ``fill_value`` to be equal to the CF/NetCDF ``"_FillValue"``. This is ``np.nan`` by default for floats, and unset otherwise. Note that the Zarr library will set a default ``fill_value`` if not specified (usually ``0``). For the Zarr version 3 format, ``_FillValue`` and ```fill_value`` are decoupled. So you can set ``fill_value`` in ``encoding`` as usual. Note that at read-time, you can control whether ``_FillValue`` is masked using the ``mask_and_scale`` kwarg; and whether Zarr's ``fill_value`` is treated as synonymous with ``_FillValue`` using the ``use_zarr_fill_value_as_mask`` kwarg to :py:func:`xarray.open_zarr`. .. _io.kerchunk: Kerchunk -------- `Kerchunk `_ is a Python library that allows you to access chunked and compressed data formats (such as NetCDF3, NetCDF4, HDF5, GRIB2, TIFF & FITS), many of which are primary data formats for many data archives, by viewing the whole archive as an ephemeral `Zarr`_ dataset which allows for parallel, chunk-specific access. Instead of creating a new copy of the dataset in the Zarr spec/format or downloading the files locally, Kerchunk reads through the data archive and extracts the byte range and compression information of each chunk and saves as a ``reference``. These references are then saved as ``json`` files or ``parquet`` (more efficient) for later use. You can view some of these stored in the ``references`` directory `here `_. .. note:: These references follow this `specification `_. Packages like `kerchunk`_ and `virtualizarr `_ help in creating and reading these references. Reading these data archives becomes really easy with ``kerchunk`` in combination with ``xarray``, especially when these archives are large in size. A single combined reference can refer to thousands of the original data files present in these archives. You can view the whole dataset with from this combined reference using the above packages. The following example shows opening a single ``json`` reference to the ``saved_on_disk.h5`` file created above. If the file were instead stored remotely (e.g. ``s3://saved_on_disk.h5``) you can use ``storage_options`` that are used to `configure fsspec `_: .. jupyter-execute:: ds_kerchunked = xr.open_dataset( "./combined.json", engine="kerchunk", storage_options={}, ) ds_kerchunked .. note:: You can refer to the `project pythia kerchunk cookbook `_ and the `pangeo guide on kerchunk `_ for more information. .. _io.iris: Iris ---- The Iris_ tool allows easy reading of common meteorological and climate model formats (including GRIB and UK MetOffice PP files) into ``Cube`` objects which are in many ways very similar to ``DataArray`` objects, while enforcing a CF-compliant data model. DataArray ``to_iris`` and ``from_iris`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If iris is installed, xarray can convert a ``DataArray`` into a ``Cube`` using :py:meth:`DataArray.to_iris`: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 5), dims=["x", "y"], coords=dict(x=[10, 20, 30, 40], y=pd.date_range("2000-01-01", periods=5)), ) cube = da.to_iris() print(cube) Conversely, we can create a new ``DataArray`` object from a ``Cube`` using :py:meth:`DataArray.from_iris`: .. jupyter-execute:: da_cube = xr.DataArray.from_iris(cube) da_cube Ncdata ~~~~~~ Ncdata_ provides more sophisticated means of transferring data, including entire datasets. It uses the file saving and loading functions in both projects to provide a more "correct" translation between them, but still with very low overhead and not using actual disk files. Here we load an xarray dataset and convert it to Iris cubes: .. jupyter-execute:: :stderr: ds = xr.tutorial.open_dataset("air_temperature_gradient") cubes = ncdata.iris_xarray.cubes_from_xarray(ds) print(cubes) .. jupyter-execute:: print(cubes[1]) And we can convert the cubes back to an xarray dataset: .. jupyter-execute:: # ensure dataset-level and variable-level attributes loaded correctly iris.FUTURE.save_split_attrs = True ds = ncdata.iris_xarray.cubes_to_xarray(cubes) ds Ncdata can also adjust file data within load and save operations, to fix data loading problems or provide exact save formatting without needing to modify files on disk. See for example : `ncdata usage examples`_ .. _Iris: https://scitools-iris.readthedocs.io .. _Ncdata: https://ncdata.readthedocs.io/en/latest/index.html .. _ncdata usage examples: https://github.com/pp-mo/ncdata/tree/v0.1.2?tab=readme-ov-file#correct-a-miscoded-attribute-in-iris-input .. _io.opendap: OPeNDAP ------- Xarray includes support for `OPeNDAP`__ (via the netCDF4 library or Pydap), which lets us access large datasets over HTTP. __ https://www.opendap.org/ For example, we can open a connection to GBs of weather data produced by the `PRISM`__ project, and hosted by `IRI`__ at Columbia: __ https://www.prism.oregonstate.edu/ __ https://iri.columbia.edu/ .. jupyter-input:: remote_data = xr.open_dataset( "http://iridl.ldeo.columbia.edu/SOURCES/.OSU/.PRISM/.monthly/dods", decode_times=False, ) remote_data .. jupyter-output:: Dimensions: (T: 1422, X: 1405, Y: 621) Coordinates: * X (X) float32 -125.0 -124.958 -124.917 -124.875 -124.833 -124.792 -124.75 ... * T (T) float32 -779.5 -778.5 -777.5 -776.5 -775.5 -774.5 -773.5 -772.5 -771.5 ... * Y (Y) float32 49.9167 49.875 49.8333 49.7917 49.75 49.7083 49.6667 49.625 ... Data variables: ppt (T, Y, X) float64 ... tdmean (T, Y, X) float64 ... tmax (T, Y, X) float64 ... tmin (T, Y, X) float64 ... Attributes: Conventions: IRIDL expires: 1375315200 .. TODO: update this example to show off decode_cf? .. note:: Like many real-world datasets, this dataset does not entirely follow `CF conventions`_. Unexpected formats will usually cause xarray's automatic decoding to fail. The way to work around this is to either set ``decode_cf=False`` in ``open_dataset`` to turn off all use of CF conventions, or by only disabling the troublesome parser. In this case, we set ``decode_times=False`` because the time axis here provides the calendar attribute in a format that xarray does not expect (the integer ``360`` instead of a string like ``'360_day'``). We can select and slice this data any number of times, and nothing is loaded over the network until we look at particular values: .. jupyter-input:: tmax = remote_data["tmax"][:500, ::3, ::3] tmax .. jupyter-output:: [48541500 values with dtype=float64] Coordinates: * Y (Y) float32 49.9167 49.7917 49.6667 49.5417 49.4167 49.2917 ... * X (X) float32 -125.0 -124.875 -124.75 -124.625 -124.5 -124.375 ... * T (T) float32 -779.5 -778.5 -777.5 -776.5 -775.5 -774.5 -773.5 ... Attributes: pointwidth: 120 standard_name: air_temperature units: Celsius_scale expires: 1443657600 .. jupyter-input:: # the data is downloaded automatically when we make the plot tmax[0].plot() .. image:: ../_static/opendap-prism-tmax.png Some servers require authentication before we can access the data. Pydap uses a `Requests`__ session object (which the user can pre-define), and this session object can recover `authentication`__` credentials from a locally stored ``.netrc`` file. For example, to connect to a server that requires NASA's URS authentication, with the username/password credentials stored on a locally accessible ``.netrc``, access to OPeNDAP data should be as simple as this:: import xarray as xr import requests my_session = requests.Session() ds_url = 'https://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc' ds = xr.open_dataset(ds_url, session=my_session, engine="pydap") Moreover, a bearer token header can be included in a `Requests`__ session object, allowing for token-based authentication which OPeNDAP servers can use to avoid some redirects. Lastly, OPeNDAP servers may provide endpoint URLs for different OPeNDAP protocols, DAP2 and DAP4. To specify which protocol between the two options to use, you can replace the scheme of the url with the name of the protocol. For example:: # dap2 url ds_url = 'dap2://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc' # dap4 url ds_url = 'dap4://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc' While most OPeNDAP servers implement DAP2, not all servers implement DAP4. It is recommended to check if the URL you are using `supports DAP4`__ by checking the URL on a browser. __ https://docs.python-requests.org __ https://pydap.github.io/pydap/en/notebooks/Authentication.html __ https://pydap.github.io/pydap/en/faqs/dap2_or_dap4_url.html .. _io.pickle: Pickle ------ The simplest way to serialize an xarray object is to use Python's built-in pickle module: .. jupyter-execute:: import pickle # use the highest protocol (-1) because it is way faster than the default # text based pickle format pkl = pickle.dumps(ds, protocol=-1) pickle.loads(pkl) Pickling is important because it doesn't require any external libraries and lets you use xarray objects with Python modules like :py:mod:`multiprocessing` or :ref:`Dask `. However, pickling is **not recommended for long-term storage**. Restoring a pickle requires that the internal structure of the types for the pickled data remain unchanged. Because the internal design of xarray is still being refined, we make no guarantees (at this point) that objects pickled with this version of xarray will work in future versions. .. note:: When pickling an object opened from a NetCDF file, the pickle file will contain a reference to the file on disk. If you want to store the actual array values, load it into memory first with :py:meth:`Dataset.load` or :py:meth:`Dataset.compute`. .. _dictionary io: Dictionary ---------- We can convert a ``Dataset`` (or a ``DataArray``) to a dict using :py:meth:`Dataset.to_dict`: .. jupyter-execute:: ds = xr.Dataset({"foo": ("x", np.arange(30))}) d = ds.to_dict() d We can create a new xarray object from a dict using :py:meth:`Dataset.from_dict`: .. jupyter-execute:: ds_dict = xr.Dataset.from_dict(d) ds_dict Dictionary support allows for flexible use of xarray objects. It doesn't require external libraries and dicts can easily be pickled, or converted to json, or geojson. All the values are converted to lists, so dicts might be quite large. To export just the dataset schema without the data itself, use the ``data=False`` option: .. jupyter-execute:: ds.to_dict(data=False) .. jupyter-execute:: :hide-code: # We're now done with the dataset named `ds`. Although the `with` statement closed # the dataset, displaying the unpickled pickle of `ds` re-opened "saved_on_disk.nc". # However, `ds` (rather than the unpickled dataset) refers to the open file. Delete # `ds` to close the file. del ds for f in ["saved_on_disk.nc", "saved_on_disk.h5"]: if os.path.exists(f): os.remove(f) This can be useful for generating indices of dataset contents to expose to search indices or other automated data discovery tools. .. _io.rasterio: Rasterio -------- GDAL readable raster data using `rasterio`_ such as GeoTIFFs can be opened using the `rioxarray`_ extension. `rioxarray`_ can also handle geospatial related tasks such as re-projecting and clipping. .. jupyter-input:: import rioxarray rds = rioxarray.open_rasterio("RGB.byte.tif") rds .. jupyter-output:: [1703814 values with dtype=uint8] Coordinates: * band (band) int64 1 2 3 * y (y) float64 2.827e+06 2.826e+06 ... 2.612e+06 2.612e+06 * x (x) float64 1.021e+05 1.024e+05 ... 3.389e+05 3.392e+05 spatial_ref int64 0 Attributes: STATISTICS_MAXIMUM: 255 STATISTICS_MEAN: 29.947726688477 STATISTICS_MINIMUM: 0 STATISTICS_STDDEV: 52.340921626611 transform: (300.0379266750948, 0.0, 101985.0, 0.0, -300.0417827... _FillValue: 0.0 scale_factor: 1.0 add_offset: 0.0 grid_mapping: spatial_ref .. jupyter-input:: rds.rio.crs # CRS.from_epsg(32618) rds4326 = rds.rio.reproject("epsg:4326") rds4326.rio.crs # CRS.from_epsg(4326) rds4326.rio.to_raster("RGB.byte.4326.tif") .. _rasterio: https://rasterio.readthedocs.io/en/latest/ .. _rioxarray: https://corteva.github.io/rioxarray/stable/ .. _test files: https://github.com/rasterio/rasterio/blob/master/tests/data/RGB.byte.tif .. _pyproj: https://github.com/pyproj4/pyproj .. _io.cfgrib: .. jupyter-execute:: :hide-code: import shutil shutil.rmtree("foo.zarr") shutil.rmtree("path/to/directory.zarr") GRIB format via cfgrib ---------------------- Xarray supports reading GRIB files via ECMWF cfgrib_ python driver, if it is installed. To open a GRIB file supply ``engine='cfgrib'`` to :py:func:`open_dataset` after installing cfgrib_: .. jupyter-input:: ds_grib = xr.open_dataset("example.grib", engine="cfgrib") We recommend installing cfgrib via conda:: conda install -c conda-forge cfgrib .. _cfgrib: https://github.com/ecmwf/cfgrib CSV and other formats supported by pandas ----------------------------------------- For more options (tabular formats and CSV files in particular), consider exporting your objects to pandas and using its broad range of `IO tools`_. For CSV files, one might also consider `xarray_extras`_. .. _xarray_extras: https://xarray-extras.readthedocs.io/en/latest/api/csv.html .. _IO tools: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html Third party libraries --------------------- More formats are supported by extension libraries: - `xarray-mongodb `_: Store xarray objects on MongoDB xarray-2025.12.0/doc/user-guide/options.rst000066400000000000000000000015451511464676000204130ustar00rootroot00000000000000.. currentmodule:: xarray .. _options: Configuration ============= Xarray offers a small number of configuration options through :py:func:`set_options`. With these, you can 1. Control the ``repr``: - ``display_expand_attrs`` - ``display_expand_coords`` - ``display_expand_data`` - ``display_expand_data_vars`` - ``display_max_rows`` - ``display_style`` 2. Control behaviour during operations: ``arithmetic_join``, ``keep_attrs``, ``use_bottleneck``. 3. Control colormaps for plots:``cmap_divergent``, ``cmap_sequential``. 4. Aspects of file reading: ``file_cache_maxsize``, ``netcdf_engine_order``, ``warn_on_unclosed_files``. You can set these options either globally :: xr.set_options(arithmetic_join="exact") or locally as a context manager: :: with xr.set_options(arithmetic_join="exact"): # do operation here pass xarray-2025.12.0/doc/user-guide/pandas.rst000066400000000000000000000232611511464676000201650ustar00rootroot00000000000000.. currentmodule:: xarray .. _pandas: =================== Working with pandas =================== One of the most important features of xarray is the ability to convert to and from :py:mod:`pandas` objects to interact with the rest of the PyData ecosystem. For example, for plotting labeled data, we highly recommend using the `visualization built in to pandas itself`__ or provided by the pandas aware libraries such as `Seaborn`__. __ https://pandas.pydata.org/pandas-docs/stable/visualization.html __ https://seaborn.pydata.org/ .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) Hierarchical and tidy data ~~~~~~~~~~~~~~~~~~~~~~~~~~ Tabular data is easiest to work with when it meets the criteria for `tidy data`__: * Each column holds a different variable. * Each rows holds a different observation. __ https://www.jstatsoft.org/v59/i10/ In this "tidy data" format, we can represent any :py:class:`Dataset` and :py:class:`DataArray` in terms of :py:class:`~pandas.DataFrame` and :py:class:`~pandas.Series`, respectively (and vice-versa). The representation works by flattening non-coordinates to 1D, and turning the tensor product of coordinate indexes into a :py:class:`pandas.MultiIndex`. Dataset and DataFrame --------------------- To convert any dataset to a ``DataFrame`` in tidy form, use the :py:meth:`Dataset.to_dataframe()` method: .. jupyter-execute:: ds = xr.Dataset( {"foo": (("x", "y"), np.random.randn(2, 3))}, coords={ "x": [10, 20], "y": ["a", "b", "c"], "along_x": ("x", np.random.randn(2)), "scalar": 123, }, ) ds .. jupyter-execute:: df = ds.to_dataframe() df We see that each variable and coordinate in the Dataset is now a column in the DataFrame, with the exception of indexes which are in the index. To convert the ``DataFrame`` to any other convenient representation, use ``DataFrame`` methods like :py:meth:`~pandas.DataFrame.reset_index`, :py:meth:`~pandas.DataFrame.stack` and :py:meth:`~pandas.DataFrame.unstack`. For datasets containing dask arrays where the data should be lazily loaded, see the :py:meth:`Dataset.to_dask_dataframe()` method. To create a ``Dataset`` from a ``DataFrame``, use the :py:meth:`Dataset.from_dataframe` class method or the equivalent :py:meth:`pandas.DataFrame.to_xarray` method: .. jupyter-execute:: xr.Dataset.from_dataframe(df) Notice that the dimensions of variables in the ``Dataset`` have now expanded after the round-trip conversion to a ``DataFrame``. This is because every object in a ``DataFrame`` must have the same indices, so we need to broadcast the data of each array to the full size of the new ``MultiIndex``. Likewise, all the coordinates (other than indexes) ended up as variables, because pandas does not distinguish non-index coordinates. DataArray and Series -------------------- ``DataArray`` objects have a complementary representation in terms of a :py:class:`~pandas.Series`. Using a Series preserves the ``Dataset`` to ``DataArray`` relationship, because ``DataFrames`` are dict-like containers of ``Series``. The methods are very similar to those for working with DataFrames: .. jupyter-execute:: s = ds["foo"].to_series() s .. jupyter-execute:: # or equivalently, with Series.to_xarray() xr.DataArray.from_series(s) Both the ``from_series`` and ``from_dataframe`` methods use reindexing, so they work even if the hierarchical index is not a full tensor product: .. jupyter-execute:: s[::2] .. jupyter-execute:: s[::2].to_xarray() Lossless and reversible conversion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The previous ``Dataset`` example shows that the conversion is not reversible (lossy roundtrip) and that the size of the ``Dataset`` increases. Particularly after a roundtrip, the following deviations are noted: - a non-dimension Dataset ``coordinate`` is converted into ``variable`` - a non-dimension DataArray ``coordinate`` is not converted - ``dtype`` is not always the same (e.g. "str" is converted to "object") - ``attrs`` metadata is not conserved To avoid these problems, the third-party `ntv-pandas `__ library offers lossless and reversible conversions between ``Dataset``/ ``DataArray`` and pandas ``DataFrame`` objects. This solution is particularly interesting for converting any ``DataFrame`` into a ``Dataset`` (the converter finds the multidimensional structure hidden by the tabular structure). The `ntv-pandas examples `__ show how to improve the conversion for the previous ``Dataset`` example and for more complex examples. Multi-dimensional data ~~~~~~~~~~~~~~~~~~~~~~ Tidy data is great, but it sometimes you want to preserve dimensions instead of automatically stacking them into a ``MultiIndex``. :py:meth:`DataArray.to_pandas()` is a shortcut that lets you convert a DataArray directly into a pandas object with the same dimensionality, if available in pandas (i.e., a 1D array is converted to a :py:class:`~pandas.Series` and 2D to :py:class:`~pandas.DataFrame`): .. jupyter-execute:: arr = xr.DataArray( np.random.randn(2, 3), coords=[("x", [10, 20]), ("y", ["a", "b", "c"])] ) df = arr.to_pandas() df To perform the inverse operation of converting any pandas objects into a data array with the same shape, simply use the :py:class:`DataArray` constructor: .. jupyter-execute:: xr.DataArray(df) Both the ``DataArray`` and ``Dataset`` constructors directly convert pandas objects into xarray objects with the same shape. This means that they preserve all use of multi-indexes: .. jupyter-execute:: index = pd.MultiIndex.from_arrays( [["a", "a", "b"], [0, 1, 2]], names=["one", "two"] ) df = pd.DataFrame({"x": 1, "y": 2}, index=index) ds = xr.Dataset(df) ds However, you will need to set dimension names explicitly, either with the ``dims`` argument on in the ``DataArray`` constructor or by calling :py:class:`~Dataset.rename` on the new object. .. _panel transition: Transitioning from pandas.Panel to xarray ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``Panel``, pandas' data structure for 3D arrays, was always a second class data structure compared to the Series and DataFrame. To allow pandas developers to focus more on its core functionality built around the DataFrame, pandas removed ``Panel`` in favor of directing users who use multi-dimensional arrays to xarray. Xarray has most of ``Panel``'s features, a more explicit API (particularly around indexing), and the ability to scale to >3 dimensions with the same interface. As discussed in the :ref:`data structures section of the docs `, there are two primary data structures in xarray: ``DataArray`` and ``Dataset``. You can imagine a ``DataArray`` as a n-dimensional pandas ``Series`` (i.e. a single typed array), and a ``Dataset`` as the ``DataFrame`` equivalent (i.e. a dict of aligned ``DataArray`` objects). So you can represent a Panel, in two ways: - As a 3-dimensional ``DataArray``, - Or as a ``Dataset`` containing a number of 2-dimensional DataArray objects. Let's take a look: .. jupyter-execute:: data = np.random.default_rng(0).random((2, 3, 4)) items = list("ab") major_axis = list("mno") minor_axis = pd.date_range(start="2000", periods=4, name="date") With old versions of pandas (prior to 0.25), this could stored in a ``Panel``: .. jupyter-input:: pd.Panel(data, items, major_axis, minor_axis) .. jupyter-output:: Dimensions: 2 (items) x 3 (major_axis) x 4 (minor_axis) Items axis: a to b Major_axis axis: m to o Minor_axis axis: 2000-01-01 00:00:00 to 2000-01-04 00:00:00 To put this data in a ``DataArray``, write: .. jupyter-execute:: array = xr.DataArray(data, [items, major_axis, minor_axis]) array As you can see, there are three dimensions (each is also a coordinate). Two of the axes of were unnamed, so have been assigned ``dim_0`` and ``dim_1`` respectively, while the third retains its name ``date``. You can also easily convert this data into ``Dataset``: .. jupyter-execute:: array.to_dataset(dim="dim_0") Here, there are two data variables, each representing a DataFrame on panel's ``items`` axis, and labeled as such. Each variable is a 2D array of the respective values along the ``items`` dimension. While the xarray docs are relatively complete, a few items stand out for Panel users: - A DataArray's data is stored as a numpy array, and so can only contain a single type. As a result, a Panel that contains :py:class:`~pandas.DataFrame` objects with multiple types will be converted to ``dtype=object``. A ``Dataset`` of multiple ``DataArray`` objects each with its own dtype will allow original types to be preserved. - :ref:`Indexing ` is similar to pandas, but more explicit and leverages xarray's naming of dimensions. - Because of those features, making much higher dimensional data is very practical. - Variables in ``Dataset`` objects can use a subset of its dimensions. For example, you can have one dataset with Person x Score x Time, and another with Person x Score. - You can use coordinates are used for both dimensions and for variables which _label_ the data variables, so you could have a coordinate Age, that labelled the Person dimension of a Dataset of Person x Score x Time. While xarray may take some getting used to, it's worth it! If anything is unclear, please `post an issue on GitHub `__ or `StackOverflow `__, and we'll endeavor to respond to the specific case or improve the general docs. xarray-2025.12.0/doc/user-guide/plotting.rst000066400000000000000000000666101511464676000205640ustar00rootroot00000000000000.. currentmodule:: xarray .. _plotting: Plotting ======== Introduction ------------ Labeled data enables expressive computations. These same labels can also be used to easily create informative plots. Xarray's plotting capabilities are centered around :py:class:`DataArray` objects. To plot :py:class:`Dataset` objects simply access the relevant DataArrays, i.e. ``dset['var1']``. Dataset specific plotting routines are also available (see :ref:`plot-dataset`). Here we focus mostly on arrays 2d or larger. If your data fits nicely into a pandas DataFrame then you're better off using one of the more developed tools there. Xarray plotting functionality is a thin wrapper around the popular `matplotlib `_ library. Matplotlib syntax and function names were copied as much as possible, which makes for an easy transition between the two. Matplotlib must be installed before xarray can plot. To use xarray's plotting capabilities with time coordinates containing ``cftime.datetime`` objects `nc-time-axis `_ v1.3.0 or later needs to be installed. For more extensive plotting applications consider the following projects: - `Seaborn `_: "provides a high-level interface for drawing attractive statistical graphics." Integrates well with pandas. - `HoloViews `_ and `GeoViews `_: "Composable, declarative data structures for building even complex visualizations easily." Includes native support for xarray objects. - `hvplot `_: ``hvplot`` makes it very easy to produce dynamic plots (backed by ``Holoviews`` or ``Geoviews``) by adding a ``hvplot`` accessor to DataArrays. - `Cartopy `_: Provides cartographic tools. Imports ~~~~~~~ .. jupyter-execute:: :hide-code: # Use defaults so we don't get gridlines in generated docs import matplotlib as mpl mpl.rcdefaults() The following imports are necessary for all of the examples. .. jupyter-execute:: import cartopy.crs as ccrs import matplotlib.pyplot as plt import numpy as np import pandas as pd import xarray as xr For these examples we'll use the North American air temperature dataset. .. jupyter-execute:: airtemps = xr.tutorial.open_dataset("air_temperature") airtemps .. jupyter-execute:: # Convert to celsius air = airtemps.air - 273.15 # copy attributes to get nice figure labels and change Kelvin to Celsius air.attrs = airtemps.air.attrs air.attrs["units"] = "deg C" .. note:: Until :issue:`1614` is solved, you might need to copy over the metadata in ``attrs`` to get informative figure labels (as was done above). DataArrays ---------- One Dimension ~~~~~~~~~~~~~ ================ Simple Example ================ The simplest way to make a plot is to call the :py:func:`DataArray.plot()` method. .. jupyter-execute:: air1d = air.isel(lat=10, lon=10) air1d.plot(); Xarray uses the coordinate name along with metadata ``attrs.long_name``, ``attrs.standard_name``, ``DataArray.name`` and ``attrs.units`` (if available) to label the axes. The names ``long_name``, ``standard_name`` and ``units`` are copied from the `CF-conventions spec `_. When choosing names, the order of precedence is ``long_name``, ``standard_name`` and finally ``DataArray.name``. The y-axis label in the above plot was constructed from the ``long_name`` and ``units`` attributes of ``air1d``. .. jupyter-execute:: air1d.attrs ====================== Additional Arguments ====================== Additional arguments are passed directly to the matplotlib function which does the work. For example, :py:func:`xarray.plot.line` calls matplotlib.pyplot.plot_ passing in the index and the array values as x and y, respectively. So to make a line plot with blue triangles a matplotlib format string can be used: .. _matplotlib.pyplot.plot: https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot .. jupyter-execute:: air1d[:200].plot.line("b-^"); .. note:: Not all xarray plotting methods support passing positional arguments to the wrapped matplotlib functions, but they do all support keyword arguments. Keyword arguments work the same way, and are more explicit. .. jupyter-execute:: air1d[:200].plot.line(color="purple", marker="o"); ========================= Adding to Existing Axis ========================= To add the plot to an existing axis pass in the axis as a keyword argument ``ax``. This works for all xarray plotting methods. In this example ``axs`` is an array consisting of the left and right axes created by ``plt.subplots``. .. jupyter-execute:: fig, axs = plt.subplots(ncols=2) print(axs) air1d.plot(ax=axs[0]) air1d.plot.hist(ax=axs[1]); On the right is a histogram created by :py:func:`xarray.plot.hist`. .. _plotting.figsize: ============================= Controlling the figure size ============================= You can pass a ``figsize`` argument to all xarray's plotting methods to control the figure size. For convenience, xarray's plotting methods also support the ``aspect`` and ``size`` arguments which control the size of the resulting image via the formula ``figsize = (aspect * size, size)``: .. jupyter-execute:: air1d.plot(aspect=2, size=3); This feature also works with :ref:`plotting.faceting`. For facet plots, ``size`` and ``aspect`` refer to a single panel (so that ``aspect * size`` gives the width of each facet in inches), while ``figsize`` refers to the entire figure (as for matplotlib's ``figsize`` argument). .. note:: If ``figsize`` or ``size`` are used, a new figure is created, so this is mutually exclusive with the ``ax`` argument. .. note:: The convention used by xarray (``figsize = (aspect * size, size)``) is borrowed from seaborn: it is therefore `not equivalent to matplotlib's`_. .. _not equivalent to matplotlib's: https://github.com/mwaskom/seaborn/issues/746 .. _plotting.multiplelines: ========================= Determine x-axis values ========================= Per default dimension coordinates are used for the x-axis (here the time coordinates). However, you can also use non-dimension coordinates, MultiIndex levels, and dimensions without coordinates along the x-axis. To illustrate this, let's calculate a 'decimal day' (epoch) from the time and assign it as a non-dimension coordinate: .. jupyter-execute:: decimal_day = (air1d.time - air1d.time[0]) / pd.Timedelta("1d") air1d_multi = air1d.assign_coords(decimal_day=("time", decimal_day.data)) air1d_multi To use ``'decimal_day'`` as x coordinate it must be explicitly specified: .. jupyter-execute:: air1d_multi.plot(x="decimal_day"); Creating a new MultiIndex named ``'date'`` from ``'time'`` and ``'decimal_day'``, it is also possible to use a MultiIndex level as x-axis: .. jupyter-execute:: air1d_multi = air1d_multi.set_index(date=("time", "decimal_day")) air1d_multi.plot(x="decimal_day"); Finally, if a dataset does not have any coordinates it enumerates all data points: .. jupyter-execute:: air1d_multi = air1d_multi.drop_vars(["date", "time", "decimal_day"]) air1d_multi.plot(); The same applies to 2D plots below. ==================================================== Multiple lines showing variation along a dimension ==================================================== It is possible to make line plots of two-dimensional data by calling :py:func:`xarray.plot.line` with appropriate arguments. Consider the 3D variable ``air`` defined above. We can use line plots to check the variation of air temperature at three different latitudes along a longitude line: .. jupyter-execute:: air.isel(lon=10, lat=[19, 21, 22]).plot.line(x="time"); It is required to explicitly specify either 1. ``x``: the dimension to be used for the x-axis, or 2. ``hue``: the dimension you want to represent by multiple lines. Thus, we could have made the previous plot by specifying ``hue='lat'`` instead of ``x='time'``. If required, the automatic legend can be turned off using ``add_legend=False``. Alternatively, ``hue`` can be passed directly to :py:func:`xarray.plot.line` as ``air.isel(lon=10, lat=[19,21,22]).plot.line(hue='lat')``. ======================== Dimension along y-axis ======================== It is also possible to make line plots such that the data are on the x-axis and a dimension is on the y-axis. This can be done by specifying the appropriate ``y`` keyword argument. .. jupyter-execute:: air.isel(time=10, lon=[10, 11]).plot(y="lat", hue="lon"); ============ Step plots ============ As an alternative, also a step plot similar to matplotlib's ``plt.step`` can be made using 1D data. .. jupyter-execute:: air1d[:20].plot.step(where="mid"); The argument ``where`` defines where the steps should be placed, options are ``'pre'`` (default), ``'post'``, and ``'mid'``. This is particularly handy when plotting data grouped with :py:meth:`Dataset.groupby_bins`. .. jupyter-execute:: air_grp = air.mean(["time", "lon"]).groupby_bins("lat", [0, 23.5, 66.5, 90]) air_mean = air_grp.mean() air_std = air_grp.std() air_mean.plot.step() (air_mean + air_std).plot.step(ls=":") (air_mean - air_std).plot.step(ls=":") plt.ylim(-20, 30) plt.title("Zonal mean temperature"); In this case, the actual boundaries of the bins are used and the ``where`` argument is ignored. Other axes kwargs ~~~~~~~~~~~~~~~~~ The keyword arguments ``xincrease`` and ``yincrease`` let you control the axes direction. .. jupyter-execute:: air.isel(time=10, lon=[10, 11]).plot.line( y="lat", hue="lon", xincrease=False, yincrease=False ); In addition, one can use ``xscale, yscale`` to set axes scaling; ``xticks, yticks`` to set axes ticks and ``xlim, ylim`` to set axes limits. These accept the same values as the matplotlib methods ``ax.set_(x,y)scale()``, ``ax.set_(x,y)ticks()``, ``ax.set_(x,y)lim()``, respectively. Two Dimensions ~~~~~~~~~~~~~~ ================ Simple Example ================ The default method :py:meth:`DataArray.plot` calls :py:func:`xarray.plot.pcolormesh` by default when the data is two-dimensional. .. jupyter-execute:: air2d = air.isel(time=500) air2d.plot(); All 2d plots in xarray allow the use of the keyword arguments ``yincrease`` and ``xincrease``. .. jupyter-execute:: air2d.plot(yincrease=False); .. note:: We use :py:func:`xarray.plot.pcolormesh` as the default two-dimensional plot method because it is more flexible than :py:func:`xarray.plot.imshow`. However, for large arrays, ``imshow`` can be much faster than ``pcolormesh``. If speed is important to you and you are plotting a regular mesh, consider using ``imshow``. ================ Missing Values ================ Xarray plots data with :ref:`missing_values`. .. jupyter-execute:: bad_air2d = air2d.copy() bad_air2d[dict(lat=slice(0, 10), lon=slice(0, 25))] = np.nan bad_air2d.plot(); ======================== Nonuniform Coordinates ======================== It's not necessary for the coordinates to be evenly spaced. Both :py:func:`xarray.plot.pcolormesh` (default) and :py:func:`xarray.plot.contourf` can produce plots with nonuniform coordinates. .. jupyter-execute:: b = air2d.copy() # Apply a nonlinear transformation to one of the coords b.coords["lat"] = np.log(b.coords["lat"]) b.plot(); ==================== Other types of plot ==================== There are several other options for plotting 2D data. Contour plot using :py:meth:`DataArray.plot.contour()` .. jupyter-execute:: air2d.plot.contour(); Filled contour plot using :py:meth:`DataArray.plot.contourf()` .. jupyter-execute:: air2d.plot.contourf(); Surface plot using :py:meth:`DataArray.plot.surface()` .. jupyter-execute:: # transpose just to make the example look a bit nicer air2d.T.plot.surface(); ==================== Calling Matplotlib ==================== Since this is a thin wrapper around matplotlib, all the functionality of matplotlib is available. .. jupyter-execute:: air2d.plot(cmap=plt.cm.Blues) plt.title("These colors prove North America\nhas fallen in the ocean") plt.ylabel("latitude") plt.xlabel("longitude"); .. note:: Xarray methods update label information and generally play around with the axes. So any kind of updates to the plot should be done *after* the call to the xarray's plot. In the example below, ``plt.xlabel`` effectively does nothing, since ``d_ylog.plot()`` updates the xlabel. .. jupyter-execute:: plt.xlabel("Never gonna see this.") air2d.plot(); =========== Colormaps =========== Xarray borrows logic from Seaborn to infer what kind of color map to use. For example, consider the original data in Kelvins rather than Celsius: .. jupyter-execute:: airtemps.air.isel(time=0).plot(); The Celsius data contain 0, so a diverging color map was used. The Kelvins do not have 0, so the default color map was used. .. _robust-plotting: ======== Robust ======== Outliers often have an extreme effect on the output of the plot. Here we add two bad data points. This affects the color scale, washing out the plot. .. jupyter-execute:: air_outliers = airtemps.air.isel(time=0).copy() air_outliers[0, 0] = 100 air_outliers[-1, -1] = 400 air_outliers.plot(); This plot shows that we have outliers. The easy way to visualize the data without the outliers is to pass the parameter ``robust=True``. This will use the 2nd and 98th percentiles of the data to compute the color limits. .. jupyter-execute:: air_outliers.plot(robust=True); Observe that the ranges of the color bar have changed. The arrows on the color bar indicate that the colors include data points outside the bounds. ==================== Discrete Colormaps ==================== It is often useful, when visualizing 2d data, to use a discrete colormap, rather than the default continuous colormaps that matplotlib uses. The ``levels`` keyword argument can be used to generate plots with discrete colormaps. For example, to make a plot with 8 discrete color intervals: .. jupyter-execute:: air2d.plot(levels=8); It is also possible to use a list of levels to specify the boundaries of the discrete colormap: .. jupyter-execute:: air2d.plot(levels=[0, 12, 18, 30]); You can also specify a list of discrete colors through the ``colors`` argument: .. jupyter-execute:: flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"] air2d.plot(levels=[0, 12, 18, 30], colors=flatui); Finally, if you have `Seaborn `_ installed, you can also specify a seaborn color palette to the ``cmap`` argument. Note that ``levels`` *must* be specified with seaborn color palettes if using ``imshow`` or ``pcolormesh`` (but not with ``contour`` or ``contourf``, since levels are chosen automatically). .. jupyter-execute:: air2d.plot(levels=10, cmap="husl"); .. _plotting.faceting: Faceting ~~~~~~~~ Faceting here refers to splitting an array along one or two dimensions and plotting each group. Xarray's basic plotting is useful for plotting two dimensional arrays. What about three or four dimensional arrays? That's where facets become helpful. The general approach to plotting here is called โ€œsmall multiplesโ€, where the same kind of plot is repeated multiple times, and the specific use of small multiples to display the same relationship conditioned on one or more other variables is often called a โ€œtrellis plotโ€. Consider the temperature data set. There are 4 observations per day for two years which makes for 2920 values along the time dimension. One way to visualize this data is to make a separate plot for each time period. The faceted dimension should not have too many values; faceting on the time dimension will produce 2920 plots. That's too much to be helpful. To handle this situation try performing an operation that reduces the size of the data in some way. For example, we could compute the average air temperature for each month and reduce the size of this dimension from 2920 -> 12. A simpler way is to just take a slice on that dimension. So let's use a slice to pick 6 times throughout the first year. .. jupyter-execute:: t = air.isel(time=slice(0, 365 * 4, 250)) t.coords ================ Simple Example ================ The easiest way to create faceted plots is to pass in ``row`` or ``col`` arguments to the xarray plotting methods/functions. This returns a :py:class:`xarray.plot.FacetGrid` object. .. jupyter-execute:: g_simple = t.plot(x="lon", y="lat", col="time", col_wrap=3); Faceting also works for line plots. .. jupyter-execute:: g_simple_line = t.isel(lat=slice(0, None, 4)).plot( x="lon", hue="lat", col="time", col_wrap=3 ); =============== 4 dimensional =============== For 4 dimensional arrays we can use the rows and columns of the grids. Here we create a 4 dimensional array by taking the original data and adding a fixed amount. Now we can see how the temperature maps would compare if one were much hotter. .. jupyter-execute:: t2 = t.isel(time=slice(0, 2)) t4d = xr.concat([t2, t2 + 40], pd.Index(["normal", "hot"], name="fourth_dim")) # This is a 4d array t4d.coords t4d.plot(x="lon", y="lat", col="time", row="fourth_dim"); ================ Other features ================ Faceted plotting supports other arguments common to xarray 2d plots. .. jupyter-execute:: hasoutliers = t.isel(time=slice(0, 5)).copy() hasoutliers[0, 0, 0] = -100 hasoutliers[-1, -1, -1] = 400 g = hasoutliers.plot.pcolormesh( x="lon", y="lat", col="time", col_wrap=3, robust=True, cmap="viridis", cbar_kwargs={"label": "this has outliers"}, ) =================== FacetGrid Objects =================== The object returned, ``g`` in the above examples, is a :py:class:`~xarray.plot.FacetGrid` object that links a :py:class:`DataArray` to a matplotlib figure with a particular structure. This object can be used to control the behavior of the multiple plots. It borrows an API and code from `Seaborn's FacetGrid `_. The structure is contained within the ``axs`` and ``name_dicts`` attributes, both 2d NumPy object arrays. .. jupyter-execute:: g.axs .. jupyter-execute:: g.name_dicts It's possible to select the :py:class:`xarray.DataArray` or :py:class:`xarray.Dataset` corresponding to the FacetGrid through the ``name_dicts``. .. jupyter-execute:: g.data.loc[g.name_dicts[0, 0]] Here is an example of using the lower level API and then modifying the axes after they have been plotted. .. jupyter-execute:: g = t.plot.imshow(x="lon", y="lat", col="time", col_wrap=3, robust=True) for i, ax in enumerate(g.axs.flat): ax.set_title("Air Temperature %d" % i) bottomright = g.axs[-1, -1] bottomright.annotate("bottom right", (240, 40)); :py:class:`~xarray.plot.FacetGrid` objects have methods that let you customize the automatically generated axis labels, axis ticks and plot titles. See :py:meth:`~xarray.plot.FacetGrid.set_titles`, :py:meth:`~xarray.plot.FacetGrid.set_xlabels`, :py:meth:`~xarray.plot.FacetGrid.set_ylabels` and :py:meth:`~xarray.plot.FacetGrid.set_ticks` for more information. Plotting functions can be applied to each subset of the data by calling :py:meth:`~xarray.plot.FacetGrid.map_dataarray` or to each subplot by calling :py:meth:`~xarray.plot.FacetGrid.map`. TODO: add an example of using the ``map`` method to plot dataset variables (e.g., with ``plt.quiver``). .. _plot-dataset: Datasets -------- Xarray has limited support for plotting Dataset variables against each other. Consider this dataset .. jupyter-execute:: ds = xr.tutorial.scatter_example_dataset(seed=42) ds Scatter ~~~~~~~ Let's plot the ``A`` DataArray as a function of the ``y`` coord .. jupyter-execute:: with xr.set_options(display_expand_data=False): display(ds.A) .. jupyter-execute:: ds.A.plot.scatter(x="y"); Same plot can be displayed using the dataset: .. jupyter-execute:: ds.plot.scatter(x="y", y="A"); Now suppose we want to scatter the ``A`` DataArray against the ``B`` DataArray .. jupyter-execute:: ds.plot.scatter(x="A", y="B"); The ``hue`` kwarg lets you vary the color by variable value .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="w"); You can force a legend instead of a colorbar by setting ``add_legend=True, add_colorbar=False``. .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="w", add_legend=True, add_colorbar=False); .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="w", add_legend=False, add_colorbar=True); The ``markersize`` kwarg lets you vary the point's size by variable value. You can additionally pass ``size_norm`` to control how the variable's values are mapped to point sizes. .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="y", markersize="z"); The ``z`` kwarg lets you plot the data along the z-axis as well. .. jupyter-execute:: ds.plot.scatter(x="A", y="B", z="z", hue="y", markersize="x"); Faceting is also possible .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="y", markersize="x", row="x", col="w"); And adding the z-axis .. jupyter-execute:: ds.plot.scatter(x="A", y="B", z="z", hue="y", markersize="x", row="x", col="w"); For more advanced scatter plots, we recommend converting the relevant data variables to a pandas DataFrame and using the extensive plotting capabilities of ``seaborn``. Quiver ~~~~~~ Visualizing vector fields is supported with quiver plots: .. jupyter-execute:: ds.isel(w=1, z=1).plot.quiver(x="x", y="y", u="A", v="B"); where ``u`` and ``v`` denote the x and y direction components of the arrow vectors. Again, faceting is also possible: .. jupyter-execute:: ds.plot.quiver(x="x", y="y", u="A", v="B", col="w", row="z", scale=4); ``scale`` is required for faceted quiver plots. The scale determines the number of data units per arrow length unit, i.e. a smaller scale parameter makes the arrow longer. Streamplot ~~~~~~~~~~ Visualizing vector fields is also supported with streamline plots: .. jupyter-execute:: ds.isel(w=1, z=1).plot.streamplot(x="x", y="y", u="A", v="B"); where ``u`` and ``v`` denote the x and y direction components of the vectors tangent to the streamlines. Again, faceting is also possible: .. jupyter-execute:: ds.plot.streamplot(x="x", y="y", u="A", v="B", col="w", row="z"); .. _plot-maps: Maps ---- To follow this section you'll need to have Cartopy installed and working. This script will plot the air temperature on a map. .. jupyter-execute:: :stderr: air = xr.tutorial.open_dataset("air_temperature").air p = air.isel(time=0).plot( subplot_kws=dict(projection=ccrs.Orthographic(-80, 35), facecolor="gray"), transform=ccrs.PlateCarree(), ) p.axes.set_global() p.axes.coastlines(); When faceting on maps, the projection can be transferred to the ``plot`` function using the ``subplot_kws`` keyword. The axes for the subplots created by faceting are accessible in the object returned by ``plot``: .. jupyter-execute:: p = air.isel(time=[0, 4]).plot( transform=ccrs.PlateCarree(), col="time", subplot_kws={"projection": ccrs.Orthographic(-80, 35)}, ) for ax in p.axs.flat: ax.coastlines() ax.gridlines() Details ------- Ways to Use ~~~~~~~~~~~ There are three ways to use the xarray plotting functionality: 1. Use ``plot`` as a convenience method for a DataArray. 2. Access a specific plotting method from the ``plot`` attribute of a DataArray. 3. Directly from the xarray plot submodule. These are provided for user convenience; they all call the same code. .. jupyter-execute:: da = xr.DataArray(range(5)) fig, axs = plt.subplots(ncols=2, nrows=2) da.plot(ax=axs[0, 0]) da.plot.line(ax=axs[0, 1]) xr.plot.plot(da, ax=axs[1, 0]) xr.plot.line(da, ax=axs[1, 1]); Here the output is the same. Since the data is 1 dimensional the line plot was used. The convenience method :py:meth:`xarray.DataArray.plot` dispatches to an appropriate plotting function based on the dimensions of the ``DataArray`` and whether the coordinates are sorted and uniformly spaced. This table describes what gets plotted: =============== =========================== Dimensions Plotting function --------------- --------------------------- 1 :py:func:`xarray.plot.line` 2 :py:func:`xarray.plot.pcolormesh` Anything else :py:func:`xarray.plot.hist` =============== =========================== Coordinates ~~~~~~~~~~~ If you'd like to find out what's really going on in the coordinate system, read on. .. jupyter-execute:: a0 = xr.DataArray(np.zeros((4, 3, 2)), dims=("y", "x", "z"), name="temperature") a0[0, 0, 0] = 1 a = a0.isel(z=0) a The plot will produce an image corresponding to the values of the array. Hence the top left pixel will be a different color than the others. Before reading on, you may want to look at the coordinates and think carefully about what the limits, labels, and orientation for each of the axes should be. .. jupyter-execute:: a.plot(); It may seem strange that the values on the y axis are decreasing with -0.5 on the top. This is because the pixels are centered over their coordinates, and the axis labels and ranges correspond to the values of the coordinates. Multidimensional coordinates ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also: :ref:`/examples/multidimensional-coords.ipynb`. You can plot irregular grids defined by multidimensional coordinates with xarray, but you'll have to tell the plot function to use these coordinates instead of the default ones: .. jupyter-execute:: lon, lat = np.meshgrid(np.linspace(-20, 20, 5), np.linspace(0, 30, 4)) lon += lat / 10 lat += lon / 10 da = xr.DataArray( np.arange(20).reshape(4, 5), dims=["y", "x"], coords={"lat": (("y", "x"), lat), "lon": (("y", "x"), lon)}, ) da.plot.pcolormesh(x="lon", y="lat"); Note that in this case, xarray still follows the pixel centered convention. This might be undesirable in some cases, for example when your data is defined on a polar projection (:issue:`781`). This is why the default is to not follow this convention when plotting on a map: .. jupyter-execute:: :stderr: ax = plt.subplot(projection=ccrs.PlateCarree()) da.plot.pcolormesh(x="lon", y="lat", ax=ax) ax.scatter(lon, lat, transform=ccrs.PlateCarree()) ax.coastlines() ax.gridlines(draw_labels=True); You can however decide to infer the cell boundaries and use the ``infer_intervals`` keyword: .. jupyter-execute:: ax = plt.subplot(projection=ccrs.PlateCarree()) da.plot.pcolormesh(x="lon", y="lat", ax=ax, infer_intervals=True) ax.scatter(lon, lat, transform=ccrs.PlateCarree()) ax.coastlines() ax.gridlines(draw_labels=True); .. note:: The data model of xarray does not support datasets with `cell boundaries`_ yet. If you want to use these coordinates, you'll have to make the plots outside the xarray framework. .. _cell boundaries: https://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#cell-boundaries One can also make line plots with multidimensional coordinates. In this case, ``hue`` must be a dimension name, not a coordinate name. .. jupyter-execute:: f, ax = plt.subplots(2, 1) da.plot.line(x="lon", hue="y", ax=ax[0]) da.plot.line(x="lon", hue="x", ax=ax[1]); xarray-2025.12.0/doc/user-guide/reshaping.rst000066400000000000000000000264151511464676000207030ustar00rootroot00000000000000.. _reshape: ############################### Reshaping and reorganizing data ############################### Reshaping and reorganizing data refers to the process of changing the structure or organization of data by modifying dimensions, array shapes, order of values, or indexes. Xarray provides several methods to accomplish these tasks. These methods are particularly useful for reshaping xarray objects for use in machine learning packages, such as scikit-learn, that usually require two-dimensional numpy arrays as inputs. Reshaping can also be required before passing data to external visualization tools, for example geospatial data might expect input organized into a particular format corresponding to stacks of satellite images. Importing the library --------------------- .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) # Use defaults so we don't get gridlines in generated docs import matplotlib as mpl mpl.rcdefaults() Reordering dimensions --------------------- To reorder dimensions on a :py:class:`~xarray.DataArray` or across all variables on a :py:class:`~xarray.Dataset`, use :py:meth:`~xarray.DataArray.transpose`. An ellipsis (`...`) can be used to represent all other dimensions: .. jupyter-execute:: ds = xr.Dataset({"foo": (("x", "y", "z"), [[[42]]]), "bar": (("y", "z"), [[24]])}) ds.transpose("y", "z", "x") # equivalent to ds.transpose(..., "x") .. jupyter-execute:: ds.transpose() # reverses all dimensions Expand and squeeze dimensions ----------------------------- To expand a :py:class:`~xarray.DataArray` or all variables on a :py:class:`~xarray.Dataset` along a new dimension, use :py:meth:`~xarray.DataArray.expand_dims` .. jupyter-execute:: expanded = ds.expand_dims("w") expanded This method attaches a new dimension with size 1 to all data variables. To remove such a size-1 dimension from the :py:class:`~xarray.DataArray` or :py:class:`~xarray.Dataset`, use :py:meth:`~xarray.DataArray.squeeze` .. jupyter-execute:: expanded.squeeze("w") Converting between datasets and arrays -------------------------------------- To convert from a Dataset to a DataArray, use :py:meth:`~xarray.Dataset.to_dataarray`: .. jupyter-execute:: arr = ds.to_dataarray() arr This method broadcasts all data variables in the dataset against each other, then concatenates them along a new dimension into a new array while preserving coordinates. To convert back from a DataArray to a Dataset, use :py:meth:`~xarray.DataArray.to_dataset`: .. jupyter-execute:: arr.to_dataset(dim="variable") The broadcasting behavior of ``to_dataarray`` means that the resulting array includes the union of data variable dimensions: .. jupyter-execute:: ds2 = xr.Dataset({"a": 0, "b": ("x", [3, 4, 5])}) # the input dataset has 4 elements ds2 .. jupyter-execute:: # the resulting array has 6 elements ds2.to_dataarray() Otherwise, the result could not be represented as an orthogonal array. If you use ``to_dataset`` without supplying the ``dim`` argument, the DataArray will be converted into a Dataset of one variable: .. jupyter-execute:: arr.to_dataset(name="combined") .. _reshape.stack: Stack and unstack ----------------- As part of xarray's nascent support for :py:class:`pandas.MultiIndex`, we have implemented :py:meth:`~xarray.DataArray.stack` and :py:meth:`~xarray.DataArray.unstack` method, for combining or splitting dimensions: .. jupyter-execute:: array = xr.DataArray( np.random.randn(2, 3), coords=[("x", ["a", "b"]), ("y", [0, 1, 2])] ) stacked = array.stack(z=("x", "y")) stacked .. jupyter-execute:: stacked.unstack("z") As elsewhere in xarray, an ellipsis (`...`) can be used to represent all unlisted dimensions: .. jupyter-execute:: stacked = array.stack(z=[..., "x"]) stacked These methods are modeled on the :py:class:`pandas.DataFrame` methods of the same name, although in xarray they always create new dimensions rather than adding to the existing index or columns. Like :py:meth:`DataFrame.unstack`, xarray's ``unstack`` always succeeds, even if the multi-index being unstacked does not contain all possible levels. Missing levels are filled in with ``NaN`` in the resulting object: .. jupyter-execute:: stacked2 = stacked[::2] stacked2 .. jupyter-execute:: stacked2.unstack("z") However, xarray's ``stack`` has an important difference from pandas: unlike pandas, it does not automatically drop missing values. Compare: .. jupyter-execute:: array = xr.DataArray([[np.nan, 1], [2, 3]], dims=["x", "y"]) array.stack(z=("x", "y")) .. jupyter-execute:: array.to_pandas().stack() We departed from pandas's behavior here because predictable shapes for new array dimensions is necessary for :ref:`dask`. .. _reshape.stacking_different: Stacking different variables together ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These stacking and unstacking operations are particularly useful for reshaping xarray objects for use in machine learning packages, such as `scikit-learn `_, that usually require two-dimensional numpy arrays as inputs. For datasets with only one variable, we only need ``stack`` and ``unstack``, but combining multiple variables in a :py:class:`xarray.Dataset` is more complicated. If the variables in the dataset have matching numbers of dimensions, we can call :py:meth:`~xarray.Dataset.to_dataarray` and then stack along the the new coordinate. But :py:meth:`~xarray.Dataset.to_dataarray` will broadcast the dataarrays together, which will effectively tile the lower dimensional variable along the missing dimensions. The method :py:meth:`xarray.Dataset.to_stacked_array` allows combining variables of differing dimensions without this wasteful copying while :py:meth:`xarray.DataArray.to_unstacked_dataset` reverses this operation. Just as with :py:meth:`xarray.Dataset.stack` the stacked coordinate is represented by a :py:class:`pandas.MultiIndex` object. These methods are used like this: .. jupyter-execute:: data = xr.Dataset( data_vars={"a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7])}, coords={"y": ["u", "v", "w"]}, ) data .. jupyter-execute:: stacked = data.to_stacked_array("z", sample_dims=["x"]) stacked .. jupyter-execute:: unstacked = stacked.to_unstacked_dataset("z") unstacked In this example, ``stacked`` is a two dimensional array that we can easily pass to a scikit-learn or another generic numerical method. .. note:: Unlike with ``stack``, in ``to_stacked_array``, the user specifies the dimensions they **do not** want stacked. For a machine learning task, these unstacked dimensions can be interpreted as the dimensions over which samples are drawn, whereas the stacked coordinates are the features. Naturally, all variables should possess these sampling dimensions. .. _reshape.set_index: Set and reset index ------------------- Complementary to stack / unstack, xarray's ``.set_index``, ``.reset_index`` and ``.reorder_levels`` allow easy manipulation of ``DataArray`` or ``Dataset`` multi-indexes without modifying the data and its dimensions. You can create a multi-index from several 1-dimensional variables and/or coordinates using :py:meth:`~xarray.DataArray.set_index`: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4), coords={ "band": ("x", ["a", "a", "b", "b"]), "wavenumber": ("x", np.linspace(200, 400, 4)), }, dims="x", ) da .. jupyter-execute:: mda = da.set_index(x=["band", "wavenumber"]) mda These coordinates can now be used for indexing, e.g., .. jupyter-execute:: mda.sel(band="a") Conversely, you can use :py:meth:`~xarray.DataArray.reset_index` to extract multi-index levels as coordinates (this is mainly useful for serialization): .. jupyter-execute:: mda.reset_index("x") :py:meth:`~xarray.DataArray.reorder_levels` allows changing the order of multi-index levels: .. jupyter-execute:: mda.reorder_levels(x=["wavenumber", "band"]) As of xarray v0.9 coordinate labels for each dimension are optional. You can also use ``.set_index`` / ``.reset_index`` to add / remove labels for one or several dimensions: .. jupyter-execute:: array = xr.DataArray([1, 2, 3], dims="x") array .. jupyter-execute:: array["c"] = ("x", ["a", "b", "c"]) array.set_index(x="c") .. jupyter-execute:: array = array.set_index(x="c") array = array.reset_index("x", drop=True) .. _reshape.shift_and_roll: Shift and roll -------------- To adjust coordinate labels, you can use the :py:meth:`~xarray.Dataset.shift` and :py:meth:`~xarray.Dataset.roll` methods: .. jupyter-execute:: array = xr.DataArray([1, 2, 3, 4], dims="x") array.shift(x=2) .. jupyter-execute:: array.roll(x=2, roll_coords=True) .. _reshape.sort: Sort ---- One may sort a DataArray/Dataset via :py:meth:`~xarray.DataArray.sortby` and :py:meth:`~xarray.Dataset.sortby`. The input can be an individual or list of 1D ``DataArray`` objects: .. jupyter-execute:: ds = xr.Dataset( { "A": (("x", "y"), [[1, 2], [3, 4]]), "B": (("x", "y"), [[5, 6], [7, 8]]), }, coords={"x": ["b", "a"], "y": [1, 0]}, ) dax = xr.DataArray([100, 99], [("x", [0, 1])]) day = xr.DataArray([90, 80], [("y", [0, 1])]) ds.sortby([day, dax]) As a shortcut, you can refer to existing coordinates by name: .. jupyter-execute:: ds.sortby("x") .. jupyter-execute:: ds.sortby(["y", "x"]) .. jupyter-execute:: ds.sortby(["y", "x"], ascending=False) .. _reshape.coarsen: Reshaping via coarsen --------------------- Whilst :py:class:`~xarray.DataArray.coarsen` is normally used for reducing your data's resolution by applying a reduction function (see the :ref:`page on computation`), it can also be used to reorganise your data without applying a computation via :py:meth:`~xarray.computation.rolling.DataArrayCoarsen.construct`. Taking our example tutorial air temperature dataset over the Northern US .. jupyter-execute:: air = xr.tutorial.open_dataset("air_temperature")["air"] air.isel(time=0).plot(x="lon", y="lat"); we can split this up into sub-regions of size ``(9, 18)`` points using :py:meth:`~xarray.computation.rolling.DataArrayCoarsen.construct`: .. jupyter-execute:: regions = air.coarsen(lat=9, lon=18, boundary="pad").construct( lon=("x_coarse", "x_fine"), lat=("y_coarse", "y_fine") ) with xr.set_options(display_expand_data=False): regions 9 new regions have been created, each of size 9 by 18 points. The ``boundary="pad"`` kwarg ensured that all regions are the same size even though the data does not evenly divide into these sizes. By plotting these 9 regions together via :ref:`faceting` we can see how they relate to the original data. .. jupyter-execute:: regions.isel(time=0).plot( x="x_fine", y="y_fine", col="x_coarse", row="y_coarse", yincrease=False ); We are now free to easily apply any custom computation to each coarsened region of our new dataarray. This would involve specifying that applied functions should act over the ``"x_fine"`` and ``"y_fine"`` dimensions, but broadcast over the ``"x_coarse"`` and ``"y_coarse"`` dimensions. xarray-2025.12.0/doc/user-guide/terminology.rst000066400000000000000000000353211511464676000212670ustar00rootroot00000000000000.. currentmodule:: xarray .. _terminology: Terminology =========== *Xarray terminology differs slightly from CF, mathematical conventions, and pandas; so we've put together a glossary of its terms. Here,* ``arr`` *refers to an xarray* :py:class:`DataArray` *in the examples. For more complete examples, please consult the relevant documentation.* .. jupyter-execute:: :hide-code: import numpy as np import xarray as xr .. glossary:: DataArray A multi-dimensional array with labeled or named dimensions. ``DataArray`` objects add metadata such as dimension names, coordinates, and attributes (defined below) to underlying "unlabeled" data structures such as numpy and Dask arrays. If its optional ``name`` property is set, it is a *named DataArray*. Dataset A dict-like collection of ``DataArray`` objects with aligned dimensions. Thus, most operations that can be performed on the dimensions of a single ``DataArray`` can be performed on a dataset. Datasets have data variables (see **Variable** below), dimensions, coordinates, and attributes. Variable A `NetCDF-like variable `_ consisting of dimensions, data, and attributes which describe a single array. The main functional difference between variables and numpy arrays is that numerical operations on variables implement array broadcasting by dimension name. Each ``DataArray`` has an underlying variable that can be accessed via ``arr.variable``. However, a variable is not fully described outside of either a ``Dataset`` or a ``DataArray``. .. note:: The :py:class:`Variable` class is low-level interface and can typically be ignored. However, the word "variable" appears often enough in the code and documentation that is useful to understand. Dimension In mathematics, the *dimension* of data is loosely the number of degrees of freedom for it. A *dimension axis* is a set of all points in which all but one of these degrees of freedom is fixed. We can think of each dimension axis as having a name, for example the "x dimension". In xarray, a ``DataArray`` object's *dimensions* are its named dimension axes ``da.dims``, and the name of the ``i``-th dimension is ``da.dims[i]``. If an array is created without specifying dimension names, the default dimension names will be ``dim_0``, ``dim_1``, and so forth. Coordinate An array that labels a dimension or set of dimensions of another ``DataArray``. In the usual one-dimensional case, the coordinate array's values can loosely be thought of as tick labels along a dimension. We distinguish :term:`Dimension coordinate` vs. :term:`Non-dimension coordinate` and :term:`Indexed coordinate` vs. :term:`Non-indexed coordinate`. A coordinate named ``x`` can be retrieved from ``arr.coords["x"]``. A ``DataArray`` can have more coordinates than dimensions because a single dimension can be labeled by multiple coordinate arrays. However, only one coordinate array can be assigned as a particular dimension's dimension coordinate array. Dimension coordinate A one-dimensional coordinate array assigned to ``arr`` with both a name and dimension name in ``arr.dims``. Usually (but not always), a dimension coordinate is also an :term:`Indexed coordinate` so that it can be used for label-based indexing and alignment, like the index found on a :py:class:`pandas.DataFrame` or :py:class:`pandas.Series`. Non-dimension coordinate A coordinate array assigned to ``arr`` with a name in ``arr.coords`` but *not* in ``arr.dims``. These coordinates arrays can be one-dimensional or multidimensional, and they are useful for auxiliary labeling. As an example, multidimensional coordinates are often used in geoscience datasets when :doc:`the data's physical coordinates (such as latitude and longitude) differ from their logical coordinates <../examples/multidimensional-coords>`. Printing ``arr.coords`` will print all of ``arr``'s coordinate names, with the corresponding dimension(s) in parentheses. For example, ``coord_name (dim_name) 1 2 3 ...``. Indexed coordinate A coordinate which has an associated :term:`Index`. Generally this means that the coordinate labels can be used for indexing (selection) and/or alignment. An indexed coordinate may have one or more arbitrary dimensions although in most cases it is also a :term:`Dimension coordinate`. It may or may not be grouped with other indexed coordinates depending on whether they share the same index. Indexed coordinates are marked by an asterisk ``*`` when printing a ``DataArray`` or ``Dataset``. Non-indexed coordinate A coordinate which has no associated :term:`Index`. It may still represent fixed labels along one or more dimensions but it cannot be used for label-based indexing and alignment. Index An *index* is a data structure optimized for efficient data selection and alignment within a discrete or continuous space that is defined by coordinate labels (unless it is a functional index). By default, Xarray creates a :py:class:`~xarray.indexes.PandasIndex` object (i.e., a :py:class:`pandas.Index` wrapper) for each :term:`Dimension coordinate`. For more advanced use cases (e.g., staggered or irregular grids, geospatial indexes), Xarray also accepts any instance of a specialized :py:class:`~xarray.indexes.Index` subclass that is associated to one or more arbitrary coordinates. The index associated with the coordinate ``x`` can be retrieved by ``arr.xindexes[x]`` (or ``arr.indexes["x"]`` if the index is convertible to a :py:class:`pandas.Index` object). If two coordinates ``x`` and ``y`` share the same index, ``arr.xindexes[x]`` and ``arr.xindexes[y]`` both return the same :py:class:`~xarray.indexes.Index` object. name The names of dimensions, coordinates, DataArray objects and data variables can be anything as long as they are :term:`hashable`. However, it is preferred to use :py:class:`str` typed names. scalar By definition, a scalar is not an :term:`array` and when converted to one, it has 0 dimensions. That means that, e.g., :py:class:`int`, :py:class:`float`, and :py:class:`str` objects are "scalar" while :py:class:`list` or :py:class:`tuple` are not. duck array `Duck arrays`__ are array implementations that behave like numpy arrays. They have to define the ``shape``, ``dtype`` and ``ndim`` properties. For integration with ``xarray``, the ``__array__``, ``__array_ufunc__`` and ``__array_function__`` protocols are also required. __ https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html Aligning Aligning refers to the process of ensuring that two or more DataArrays or Datasets have the same dimensions and coordinates, so that they can be combined or compared properly. .. jupyter-execute:: x = xr.DataArray( [[25, 35], [10, 24]], dims=("lat", "lon"), coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]}, ) y = xr.DataArray( [[20, 5], [7, 13]], dims=("lat", "lon"), coords={"lat": [35.0, 42.0], "lon": [100.0, 120.0]}, ) a, b = xr.align(x, y) # By default, an "inner join" is performed # so "a" is a copy of "x" where coordinates match "y" a Broadcasting A technique that allows operations to be performed on arrays with different shapes and dimensions. When performing operations on arrays with different shapes and dimensions, xarray will automatically attempt to broadcast the arrays to a common shape before the operation is applied. .. jupyter-execute:: # 'a' has shape (3,) and 'b' has shape (4,) a = xr.DataArray(np.array([1, 2, 3]), dims=["x"]) b = xr.DataArray(np.array([4, 5, 6, 7]), dims=["y"]) # 2D array with shape (3, 4) a + b Merging Merging is used to combine two or more Datasets or DataArrays that have different variables or coordinates along the same dimensions. When merging, xarray aligns the variables and coordinates of the different datasets along the specified dimensions and creates a new ``Dataset`` containing all the variables and coordinates. .. jupyter-execute:: # create two 1D arrays with names arr1 = xr.DataArray( [1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]}, name="arr1" ) arr2 = xr.DataArray( [4, 5, 6], dims=["x"], coords={"x": [20, 30, 40]}, name="arr2" ) # merge the two arrays into a new dataset merged_ds = xr.Dataset({"arr1": arr1, "arr2": arr2}) merged_ds Concatenating Concatenating is used to combine two or more Datasets or DataArrays along a dimension. When concatenating, xarray arranges the datasets or dataarrays along a new dimension, and the resulting ``Dataset`` or ``Dataarray`` will have the same variables and coordinates along the other dimensions. .. jupyter-execute:: a = xr.DataArray([[1, 2], [3, 4]], dims=("x", "y")) b = xr.DataArray([[5, 6], [7, 8]], dims=("x", "y")) c = xr.concat([a, b], dim="c") c Combining Combining is the process of arranging two or more DataArrays or Datasets into a single ``DataArray`` or ``Dataset`` using some combination of merging and concatenation operations. .. jupyter-execute:: ds1 = xr.Dataset( {"data": xr.DataArray([[1, 2], [3, 4]], dims=("x", "y"))}, coords={"x": [1, 2], "y": [3, 4]}, ) ds2 = xr.Dataset( {"data": xr.DataArray([[5, 6], [7, 8]], dims=("x", "y"))}, coords={"x": [2, 3], "y": [4, 5]}, ) # combine the datasets combined_ds = xr.combine_by_coords([ds1, ds2], join="outer") combined_ds lazy Lazily-evaluated operations do not load data into memory until necessary. Instead of doing calculations right away, xarray lets you plan what calculations you want to do, like finding the average temperature in a dataset. This planning is called "lazy evaluation." Later, when you're ready to see the final result, you tell xarray, "Okay, go ahead and do those calculations now!" That's when xarray starts working through the steps you planned and gives you the answer you wanted. This lazy approach helps save time and memory because xarray only does the work when you actually need the results. labeled Labeled data has metadata describing the context of the data, not just the raw data values. This contextual information can be labels for array axes (i.e. dimension names) tick labels along axes (stored as Coordinate variables) or unique names for each array. These labels provide context and meaning to the data, making it easier to understand and work with. If you have temperature data for different cities over time. Using xarray, you can label the dimensions: one for cities and another for time. serialization Serialization is the process of converting your data into a format that makes it easy to save and share. When you serialize data in xarray, you're taking all those temperature measurements, along with their labels and other information, and turning them into a format that can be stored in a file or sent over the internet. xarray objects can be serialized into formats which store the labels alongside the data. Some supported serialization formats are files that can then be stored or transferred (e.g. netCDF), whilst others are protocols that allow for data access over a network (e.g. Zarr). indexing :ref:`Indexing` is how you select subsets of your data which you are interested in. - Label-based Indexing: Selecting data by passing a specific label and comparing it to the labels stored in the associated coordinates. You can use labels to specify what you want like "Give me the temperature for New York on July 15th." - Positional Indexing: You can use numbers to refer to positions in the data like "Give me the third temperature value" This is useful when you know the order of your data but don't need to remember the exact labels. - Slicing: You can take a "slice" of your data, like you might want all temperatures from July 1st to July 10th. xarray supports slicing for both positional and label-based indexing. DataTree A tree-like collection of ``Dataset`` objects. A *tree* is made up of one or more *nodes*, each of which can store the same information as a single ``Dataset`` (accessed via ``.dataset``). This data is stored in the same way as in a ``Dataset``, i.e. in the form of data :term:`variables`, :term:`dimensions`, :term:`coordinates`, and attributes. The nodes in a tree are linked to one another, and each node is its own instance of ``DataTree`` object. Each node can have zero or more *children* (stored in a dictionary-like manner under their corresponding *names*), and those child nodes can themselves have children. If a node is a child of another node that other node is said to be its *parent*. Nodes can have a maximum of one parent, and if a node has no parent it is said to be the *root* node of that *tree*. Subtree A section of a *tree*, consisting of a *node* along with all the child nodes below it (and the child nodes below them, i.e. all so-called *descendant* nodes). Excludes the parent node and all nodes above. Group Another word for a subtree, reflecting how the hierarchical structure of a ``DataTree`` allows for grouping related data together. Analogous to a single `netCDF group `_ or `Zarr group `_. xarray-2025.12.0/doc/user-guide/testing.rst000066400000000000000000000264001511464676000203720ustar00rootroot00000000000000.. _testing: Testing your code ================= .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) .. _testing.hypothesis: Hypothesis testing ------------------ .. note:: Testing with hypothesis is a fairly advanced topic. Before reading this section it is recommended that you take a look at our guide to xarray's :ref:`data structures`, are familiar with conventional unit testing in `pytest `_, and have seen the `hypothesis library documentation `_. `The hypothesis library `_ is a powerful tool for property-based testing. Instead of writing tests for one example at a time, it allows you to write tests parameterized by a source of many dynamically generated examples. For example you might have written a test which you wish to be parameterized by the set of all possible integers via :py:func:`hypothesis.strategies.integers()`. Property-based testing is extremely powerful, because (unlike more conventional example-based testing) it can find bugs that you did not even think to look for! Strategies ~~~~~~~~~~ Each source of examples is called a "strategy", and xarray provides a range of custom strategies which produce xarray data structures containing arbitrary data. You can use these to efficiently test downstream code, quickly ensuring that your code can handle xarray objects of all possible structures and contents. These strategies are accessible in the :py:mod:`xarray.testing.strategies` module, which provides .. currentmodule:: xarray .. autosummary:: testing.strategies.supported_dtypes testing.strategies.names testing.strategies.dimension_names testing.strategies.dimension_sizes testing.strategies.attrs testing.strategies.variables testing.strategies.unique_subset_of These build upon the numpy and array API strategies offered in :py:mod:`hypothesis.extra.numpy` and :py:mod:`hypothesis.extra.array_api`: .. jupyter-execute:: import hypothesis.extra.numpy as npst Generating Examples ~~~~~~~~~~~~~~~~~~~ To see an example of what each of these strategies might produce, you can call one followed by the ``.example()`` method, which is a general hypothesis method valid for all strategies. .. jupyter-execute:: import xarray.testing.strategies as xrst xrst.variables().example() .. jupyter-execute:: xrst.variables().example() .. jupyter-execute:: xrst.variables().example() You can see that calling ``.example()`` multiple times will generate different examples, giving you an idea of the wide range of data that the xarray strategies can generate. In your tests however you should not use ``.example()`` - instead you should parameterize your tests with the :py:func:`hypothesis.given` decorator: .. jupyter-execute:: from hypothesis import given .. jupyter-execute:: @given(xrst.variables()) def test_function_that_acts_on_variables(var): assert func(var) == ... Chaining Strategies ~~~~~~~~~~~~~~~~~~~ Xarray's strategies can accept other strategies as arguments, allowing you to customise the contents of the generated examples. .. jupyter-execute:: # generate a Variable containing an array with a complex number dtype, but all other details still arbitrary from hypothesis.extra.numpy import complex_number_dtypes xrst.variables(dtype=complex_number_dtypes()).example() This also works with custom strategies, or strategies defined in other packages. For example you could imagine creating a ``chunks`` strategy to specify particular chunking patterns for a dask-backed array. Fixing Arguments ~~~~~~~~~~~~~~~~ If you want to fix one aspect of the data structure, whilst allowing variation in the generated examples over all other aspects, then use :py:func:`hypothesis.strategies.just()`. .. jupyter-execute:: import hypothesis.strategies as st # Generates only variable objects with dimensions ["x", "y"] xrst.variables(dims=st.just(["x", "y"])).example() (This is technically another example of chaining strategies - :py:func:`hypothesis.strategies.just()` is simply a special strategy that just contains a single example.) To fix the length of dimensions you can instead pass ``dims`` as a mapping of dimension names to lengths (i.e. following xarray objects' ``.sizes()`` property), e.g. .. jupyter-execute:: # Generates only variables with dimensions ["x", "y"], of lengths 2 & 3 respectively xrst.variables(dims=st.just({"x": 2, "y": 3})).example() You can also use this to specify that you want examples which are missing some part of the data structure, for instance .. jupyter-execute:: # Generates a Variable with no attributes xrst.variables(attrs=st.just({})).example() Through a combination of chaining strategies and fixing arguments, you can specify quite complicated requirements on the objects your chained strategy will generate. .. jupyter-execute:: fixed_x_variable_y_maybe_z = st.fixed_dictionaries( {"x": st.just(2), "y": st.integers(3, 4)}, optional={"z": st.just(2)} ) fixed_x_variable_y_maybe_z.example() .. jupyter-execute:: special_variables = xrst.variables(dims=fixed_x_variable_y_maybe_z) special_variables.example() .. jupyter-execute:: special_variables.example() Here we have used one of hypothesis' built-in strategies :py:func:`hypothesis.strategies.fixed_dictionaries` to create a strategy which generates mappings of dimension names to lengths (i.e. the ``size`` of the xarray object we want). This particular strategy will always generate an ``x`` dimension of length 2, and a ``y`` dimension of length either 3 or 4, and will sometimes also generate a ``z`` dimension of length 2. By feeding this strategy for dictionaries into the ``dims`` argument of xarray's :py:func:`~st.variables` strategy, we can generate arbitrary :py:class:`~xarray.Variable` objects whose dimensions will always match these specifications. Generating Duck-type Arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xarray objects don't have to wrap numpy arrays, in fact they can wrap any array type which presents the same API as a numpy array (so-called "duck array wrapping", see :ref:`wrapping numpy-like arrays `). Imagine we want to write a strategy which generates arbitrary ``Variable`` objects, each of which wraps a :py:class:`sparse.COO` array instead of a ``numpy.ndarray``. How could we do that? There are two ways: 1. Create a xarray object with numpy data and use the hypothesis' ``.map()`` method to convert the underlying array to a different type: .. jupyter-execute:: import sparse .. jupyter-execute:: def convert_to_sparse(var): return var.copy(data=sparse.COO.from_numpy(var.to_numpy())) .. jupyter-execute:: sparse_variables = xrst.variables(dims=xrst.dimension_names(min_dims=1)).map( convert_to_sparse ) sparse_variables.example() .. jupyter-execute:: sparse_variables.example() 2. Pass a function which returns a strategy which generates the duck-typed arrays directly to the ``array_strategy_fn`` argument of the xarray strategies: .. jupyter-execute:: def sparse_random_arrays(shape: tuple[int, ...]) -> sparse._coo.core.COO: """Strategy which generates random sparse.COO arrays""" if shape is None: shape = npst.array_shapes() else: shape = st.just(shape) density = st.integers(min_value=0, max_value=1) # note sparse.random does not accept a dtype kwarg return st.builds(sparse.random, shape=shape, density=density) def sparse_random_arrays_fn( *, shape: tuple[int, ...], dtype: np.dtype ) -> st.SearchStrategy[sparse._coo.core.COO]: return sparse_random_arrays(shape=shape) .. jupyter-execute:: sparse_random_variables = xrst.variables( array_strategy_fn=sparse_random_arrays_fn, dtype=st.just(np.dtype("float64")) ) sparse_random_variables.example() Either approach is fine, but one may be more convenient than the other depending on the type of the duck array which you want to wrap. Compatibility with the Python Array API Standard ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xarray aims to be compatible with any duck-array type that conforms to the `Python Array API Standard `_ (see our :ref:`docs on Array API Standard support `). .. warning:: The strategies defined in :py:mod:`testing.strategies` are **not** guaranteed to use array API standard-compliant dtypes by default. For example arrays with the dtype ``np.dtype('float16')`` may be generated by :py:func:`testing.strategies.variables` (assuming the ``dtype`` kwarg was not explicitly passed), despite ``np.dtype('float16')`` not being in the array API standard. If the array type you want to generate has an array API-compliant top-level namespace (e.g. that which is conventionally imported as ``xp`` or similar), you can use this neat trick: .. jupyter-execute:: import numpy as xp # compatible in numpy 2.0 # use `import numpy.array_api as xp` in numpy>=1.23,<2.0 from hypothesis.extra.array_api import make_strategies_namespace xps = make_strategies_namespace(xp) xp_variables = xrst.variables( array_strategy_fn=xps.arrays, dtype=xps.scalar_dtypes(), ) xp_variables.example() Another array API-compliant duck array library would replace the import, e.g. ``import cupy as cp`` instead. Testing over Subsets of Dimensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A common task when testing xarray user code is checking that your function works for all valid input dimensions. We can chain strategies to achieve this, for which the helper strategy :py:func:`~testing.strategies.unique_subset_of` is useful. It works for lists of dimension names .. jupyter-execute:: dims = ["x", "y", "z"] xrst.unique_subset_of(dims).example() .. jupyter-execute:: xrst.unique_subset_of(dims).example() as well as for mappings of dimension names to sizes .. jupyter-execute:: dim_sizes = {"x": 2, "y": 3, "z": 4} xrst.unique_subset_of(dim_sizes).example() .. jupyter-execute:: xrst.unique_subset_of(dim_sizes).example() This is useful because operations like reductions can be performed over any subset of the xarray object's dimensions. For example we can write a pytest test that tests that a reduction gives the expected result when applying that reduction along any possible valid subset of the Variable's dimensions. .. code-block:: python import numpy.testing as npt @given(st.data(), xrst.variables(dims=xrst.dimension_names(min_dims=1))) def test_mean(data, var): """Test that the mean of an xarray Variable is always equal to the mean of the underlying array.""" # specify arbitrary reduction along at least one dimension reduction_dims = data.draw(xrst.unique_subset_of(var.dims, min_size=1)) # create expected result (using nanmean because arrays with Nans will be generated) reduction_axes = tuple(var.get_axis_num(dim) for dim in reduction_dims) expected = np.nanmean(var.data, axis=reduction_axes) # assert property is always satisfied result = var.mean(dim=reduction_dims).data npt.assert_equal(expected, result) xarray-2025.12.0/doc/user-guide/time-series.rst000066400000000000000000000301321511464676000211400ustar00rootroot00000000000000.. currentmodule:: xarray .. _time-series: ================ Time series data ================ A major use case for xarray is multi-dimensional time-series data. Accordingly, we've copied many of features that make working with time-series data in pandas such a joy to xarray. In most cases, we rely on pandas for the core functionality. .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) Creating datetime64 data ------------------------ Xarray uses the numpy dtypes :py:class:`numpy.datetime64` and :py:class:`numpy.timedelta64` with specified units (one of ``"s"``, ``"ms"``, ``"us"`` and ``"ns"``) to represent datetime data, which offer vectorized operations with numpy and smooth integration with pandas. To convert to or create regular arrays of :py:class:`numpy.datetime64` data, we recommend using :py:func:`pandas.to_datetime`, :py:class:`pandas.DatetimeIndex`, or :py:func:`xarray.date_range`: .. jupyter-execute:: pd.to_datetime(["2000-01-01", "2000-02-02"]) .. jupyter-execute:: pd.DatetimeIndex( ["2000-01-01 00:00:00", "2000-02-02 00:00:00"], dtype="datetime64[s]" ) .. jupyter-execute:: xr.date_range("2000-01-01", periods=365) .. jupyter-execute:: xr.date_range("2000-01-01", periods=365, unit="s") .. note:: Care has to be taken to create the output with the wanted resolution. For :py:func:`pandas.date_range` the ``unit``-kwarg has to be specified and for :py:func:`pandas.to_datetime` the selection of the resolution isn't possible at all. For that :py:class:`pd.DatetimeIndex` can be used directly. There is more in-depth information in section :ref:`internals.timecoding`. Alternatively, you can supply arrays of Python ``datetime`` objects. These get converted automatically when used as arguments in xarray objects (with us-resolution): .. jupyter-execute:: import datetime xr.Dataset({"time": datetime.datetime(2000, 1, 1)}) When reading or writing netCDF files, xarray automatically decodes datetime and timedelta arrays using `CF conventions`_ (that is, by using a ``units`` attribute like ``'days since 2000-01-01'``). .. _CF conventions: https://cfconventions.org .. note:: When decoding/encoding datetimes for non-standard calendars or for dates before `1582-10-15`_, xarray uses the `cftime`_ library by default. It was previously packaged with the ``netcdf4-python`` package under the name ``netcdftime`` but is now distributed separately. ``cftime`` is an :ref:`optional dependency` of xarray. .. _cftime: https://unidata.github.io/cftime .. _1582-10-15: https://en.wikipedia.org/wiki/Gregorian_calendar You can manual decode arrays in this form by passing a dataset to :py:func:`decode_cf`: .. jupyter-execute:: attrs = {"units": "hours since 2000-01-01"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) # Default decoding to 'ns'-resolution xr.decode_cf(ds) .. jupyter-execute:: # Decoding to 's'-resolution coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.decode_cf(ds, decode_times=coder) From xarray 2025.01.2 the resolution of the dates can be one of ``"s"``, ``"ms"``, ``"us"`` or ``"ns"``. One limitation of using ``datetime64[ns]`` is that it limits the native representation of dates to those that fall between the years 1678 and 2262, which gets increased significantly with lower resolutions. When a store contains dates outside of these bounds (or dates < `1582-10-15`_ with a Gregorian, also known as standard, calendar), dates will be returned as arrays of :py:class:`cftime.datetime` objects and a :py:class:`CFTimeIndex` will be used for indexing. :py:class:`CFTimeIndex` enables most of the indexing functionality of a :py:class:`pandas.DatetimeIndex`. See :ref:`CFTimeIndex` for more information. Datetime indexing ----------------- Xarray borrows powerful indexing machinery from pandas (see :ref:`indexing`). This allows for several useful and succinct forms of indexing, particularly for ``datetime64`` data. For example, we support indexing with strings for single items and with the ``slice`` object: .. jupyter-execute:: time = pd.date_range("2000-01-01", freq="h", periods=365 * 24) ds = xr.Dataset({"foo": ("time", np.arange(365 * 24)), "time": time}) ds.sel(time="2000-01") .. jupyter-execute:: ds.sel(time=slice("2000-06-01", "2000-06-10")) You can also select a particular time by indexing with a :py:class:`datetime.time` object: .. jupyter-execute:: ds.sel(time=datetime.time(12)) For more details, read the pandas documentation and the section on :ref:`datetime_component_indexing` (i.e. using the ``.dt`` accessor). .. _dt_accessor: Datetime components ------------------- Similar to `pandas accessors`_, the components of datetime objects contained in a given ``DataArray`` can be quickly computed using a special ``.dt`` accessor. .. _pandas accessors: https://pandas.pydata.org/pandas-docs/stable/basics.html#basics-dt-accessors .. jupyter-execute:: time = pd.date_range("2000-01-01", freq="6h", periods=365 * 4) ds = xr.Dataset({"foo": ("time", np.arange(365 * 4)), "time": time}) ds.time.dt.hour .. jupyter-execute:: ds.time.dt.dayofweek The ``.dt`` accessor works on both coordinate dimensions as well as multi-dimensional data. Xarray also supports a notion of "virtual" or "derived" coordinates for `datetime components`__ implemented by pandas, including "year", "month", "day", "hour", "minute", "second", "dayofyear", "week", "dayofweek", "weekday" and "quarter": __ https://pandas.pydata.org/pandas-docs/stable/api.html#time-date-components .. jupyter-execute:: ds["time.month"] .. jupyter-execute:: ds["time.dayofyear"] For use as a derived coordinate, xarray adds ``'season'`` to the list of datetime components supported by pandas: .. jupyter-execute:: ds["time.season"] .. jupyter-execute:: ds["time"].dt.season The set of valid seasons consists of 'DJF', 'MAM', 'JJA' and 'SON', labeled by the first letters of the corresponding months. You can use these shortcuts with both Datasets and DataArray coordinates. In addition, xarray supports rounding operations ``floor``, ``ceil``, and ``round``. These operations require that you supply a `rounding frequency as a string argument.`__ __ https://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases .. jupyter-execute:: ds["time"].dt.floor("D") The ``.dt`` accessor can also be used to generate formatted datetime strings for arrays utilising the same formatting as the standard `datetime.strftime`_. .. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior .. jupyter-execute:: ds["time"].dt.strftime("%a, %b %d %H:%M") .. _datetime_component_indexing: Indexing Using Datetime Components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can use use the ``.dt`` accessor when subsetting your data as well. For example, we can subset for the month of January using the following: .. jupyter-execute:: ds.isel(time=(ds.time.dt.month == 1)) You can also search for multiple months (in this case January through March), using ``isin``: .. jupyter-execute:: ds.isel(time=ds.time.dt.month.isin([1, 2, 3])) .. _resampling: Resampling and grouped operations --------------------------------- .. seealso:: For more generic documentation on grouping, see :ref:`groupby`. Datetime components couple particularly well with grouped operations for analyzing features that repeat over time. Here's how to calculate the mean by time of day: .. jupyter-execute:: ds.groupby("time.hour").mean() For upsampling or downsampling temporal resolutions, xarray offers a :py:meth:`Dataset.resample` method building on the core functionality offered by the pandas method of the same name. Resample uses essentially the same api as :py:meth:`pandas.DataFrame.resample` `in pandas`_. .. _in pandas: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#up-and-downsampling For example, we can downsample our dataset from hourly to 6-hourly: .. jupyter-execute:: ds.resample(time="6h") This will create a specialized :py:class:`~xarray.core.resample.DatasetResample` or :py:class:`~xarray.core.resample.DataArrayResample` object which saves information necessary for resampling. All of the reduction methods which work with :py:class:`Dataset` or :py:class:`DataArray` objects can also be used for resampling: .. jupyter-execute:: ds.resample(time="6h").mean() You can also supply an arbitrary reduction function to aggregate over each resampling group: .. jupyter-execute:: ds.resample(time="6h").reduce(np.mean) You can also resample on the time dimension while applying reducing along other dimensions at the same time by specifying the ``dim`` keyword argument .. code-block:: python ds.resample(time="6h").mean(dim=["time", "latitude", "longitude"]) For upsampling, xarray provides six methods: ``asfreq``, ``ffill``, ``bfill``, ``pad``, ``nearest`` and ``interpolate``. ``interpolate`` extends :py:func:`scipy.interpolate.interp1d` and supports all of its schemes. All of these resampling operations work on both Dataset and DataArray objects with an arbitrary number of dimensions. In order to limit the scope of the methods ``ffill``, ``bfill``, ``pad`` and ``nearest`` the ``tolerance`` argument can be set in coordinate units. Data that has indices outside of the given ``tolerance`` are set to ``NaN``. .. jupyter-execute:: ds.resample(time="1h").nearest(tolerance="1h") It is often desirable to center the time values after a resampling operation. That can be accomplished by updating the resampled dataset time coordinate values using time offset arithmetic via the :py:func:`pandas.tseries.frequencies.to_offset` function. .. jupyter-execute:: resampled_ds = ds.resample(time="6h").mean() offset = pd.tseries.frequencies.to_offset("6h") / 2 resampled_ds["time"] = resampled_ds.get_index("time") + offset resampled_ds .. seealso:: For more examples of using grouped operations on a time dimension, see :doc:`../examples/weather-data`. .. _seasonal_grouping: Handling Seasons ~~~~~~~~~~~~~~~~ Two extremely common time series operations are to group by seasons, and resample to a seasonal frequency. Xarray has historically supported some simple versions of these computations. For example, ``.groupby("time.season")`` (where the seasons are DJF, MAM, JJA, SON) and resampling to a seasonal frequency using Pandas syntax: ``.resample(time="QS-DEC")``. Quite commonly one wants more flexibility in defining seasons. For these use-cases, Xarray provides :py:class:`groupers.SeasonGrouper` and :py:class:`groupers.SeasonResampler`. .. currentmodule:: xarray.groupers .. jupyter-execute:: from xarray.groupers import SeasonGrouper ds.groupby(time=SeasonGrouper(["DJF", "MAM", "JJA", "SON"])).mean() Note how the seasons are in the specified order, unlike ``.groupby("time.season")`` where the seasons are sorted alphabetically. .. jupyter-execute:: ds.groupby("time.season").mean() :py:class:`SeasonGrouper` supports overlapping seasons: .. jupyter-execute:: ds.groupby(time=SeasonGrouper(["DJFM", "MAMJ", "JJAS", "SOND"])).mean() Skipping months is allowed: .. jupyter-execute:: ds.groupby(time=SeasonGrouper(["JJAS"])).mean() Use :py:class:`SeasonResampler` to specify custom seasons. .. jupyter-execute:: from xarray.groupers import SeasonResampler ds.resample(time=SeasonResampler(["DJF", "MAM", "JJA", "SON"])).mean() :py:class:`SeasonResampler` is smart enough to correctly handle years for seasons that span the end of the year (e.g. DJF). By default :py:class:`SeasonResampler` will skip any season that is incomplete (e.g. the first DJF season for a time series that starts in Jan). Pass the ``drop_incomplete=False`` kwarg to :py:class:`SeasonResampler` to disable this behaviour. .. jupyter-execute:: from xarray.groupers import SeasonResampler ds.resample( time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=False) ).mean() Seasons need not be of the same length: .. jupyter-execute:: ds.resample(time=SeasonResampler(["JF", "MAM", "JJAS", "OND"])).mean() xarray-2025.12.0/doc/user-guide/weather-climate.rst000066400000000000000000000312441511464676000217720ustar00rootroot00000000000000.. currentmodule:: xarray .. _weather-climate: Weather and climate data ======================== .. jupyter-execute:: :hide-code: import xarray as xr import numpy as np Xarray can leverage metadata that follows the `Climate and Forecast (CF) conventions`_ if present. Examples include :ref:`automatic labelling of plots` with descriptive names and units if proper metadata is present and support for non-standard calendars used in climate science through the ``cftime`` module (explained in the :ref:`CFTimeIndex` section). There are also a number of :ref:`geosciences-focused projects that build on xarray`. .. _Climate and Forecast (CF) conventions: https://cfconventions.org .. _cf_variables: Related Variables ----------------- Several CF variable attributes contain lists of other variables associated with the variable with the attribute. A few of these are now parsed by xarray, with the attribute value popped to encoding on read and the variables in that value interpreted as non-dimension coordinates: - ``coordinates`` - ``bounds`` - ``grid_mapping`` - ``climatology`` - ``geometry`` - ``node_coordinates`` - ``node_count`` - ``part_node_count`` - ``interior_ring`` - ``cell_measures`` - ``formula_terms`` This decoding is controlled by the ``decode_coords`` kwarg to :py:func:`open_dataset` and :py:func:`open_mfdataset`. The CF attribute ``ancillary_variables`` was not included in the list due to the variables listed there being associated primarily with the variable with the attribute, rather than with the dimensions. .. _metpy_accessor: CF-compliant coordinate variables --------------------------------- `MetPy`_ adds a ``metpy`` accessor that allows accessing coordinates with appropriate CF metadata using generic names ``x``, ``y``, ``vertical`` and ``time``. There is also a ``cartopy_crs`` attribute that provides projection information, parsed from the appropriate CF metadata, as a `Cartopy`_ projection object. See the `metpy documentation`_ for more information. .. _`MetPy`: https://unidata.github.io/MetPy/dev/index.html .. _`metpy documentation`: https://unidata.github.io/MetPy/dev/tutorials/xarray_tutorial.html#coordinates .. _`Cartopy`: https://cartopy.readthedocs.io/stable/reference/crs.html .. _CFTimeIndex: Non-standard calendars and dates outside the precision range ------------------------------------------------------------ Through the standalone ``cftime`` library and a custom subclass of :py:class:`pandas.Index`, xarray supports a subset of the indexing functionality enabled through the standard :py:class:`pandas.DatetimeIndex` for dates from non-standard calendars commonly used in climate science or dates using a standard calendar, but outside the `precision range`_ and dates prior to `1582-10-15`_. .. note:: As of xarray version 0.11, by default, :py:class:`cftime.datetime` objects will be used to represent times (either in indexes, as a :py:class:`~xarray.CFTimeIndex`, or in data arrays with dtype object) if any of the following are true: - The dates are from a non-standard calendar - Any dates are outside the nanosecond-precision range (prior xarray version 2025.01.2) - Any dates are outside the time span limited by the resolution (from xarray version 2025.01.2) Otherwise pandas-compatible dates from a standard calendar will be represented with the ``np.datetime64[unit]`` data type (where unit can be one of ``"s"``, ``"ms"``, ``"us"``, ``"ns"``), enabling the use of a :py:class:`pandas.DatetimeIndex` or arrays with dtype ``np.datetime64[unit]`` and their full set of associated features. As of pandas version 2.0.0, pandas supports non-nanosecond precision datetime values. From xarray version 2025.01.2 on, non-nanosecond precision datetime values are also supported in xarray (this can be parameterized via :py:class:`~xarray.coders.CFDatetimeCoder` and ``decode_times`` kwarg). See also :ref:`internals.timecoding`. For example, you can create a DataArray indexed by a time coordinate with dates from a no-leap calendar and a :py:class:`~xarray.CFTimeIndex` will automatically be used: .. jupyter-execute:: from itertools import product from cftime import DatetimeNoLeap dates = [ DatetimeNoLeap(year, month, 1) for year, month in product(range(1, 3), range(1, 13)) ] da = xr.DataArray(np.arange(24), coords=[dates], dims=["time"], name="foo") Xarray also includes a :py:func:`~xarray.date_range` function, which enables creating a :py:class:`~xarray.CFTimeIndex` with regularly-spaced dates. For instance, we can create the same dates and DataArray we created above using (note that ``use_cftime=True`` is not mandatory to return a :py:class:`~xarray.CFTimeIndex` for non-standard calendars, but can be nice to use to be explicit): .. jupyter-execute:: dates = xr.date_range( start="0001", periods=24, freq="MS", calendar="noleap", use_cftime=True ) da = xr.DataArray(np.arange(24), coords=[dates], dims=["time"], name="foo") Mirroring pandas' method with the same name, :py:meth:`~xarray.infer_freq` allows one to infer the sampling frequency of a :py:class:`~xarray.CFTimeIndex` or a 1-D :py:class:`~xarray.DataArray` containing cftime objects. It also works transparently with ``np.datetime64`` and ``np.timedelta64`` data (with "s", "ms", "us" or "ns" resolution). .. jupyter-execute:: xr.infer_freq(dates) With :py:meth:`~xarray.CFTimeIndex.strftime` we can also easily generate formatted strings from the datetime values of a :py:class:`~xarray.CFTimeIndex` directly or through the ``dt`` accessor for a :py:class:`~xarray.DataArray` using the same formatting as the standard `datetime.strftime`_ convention . .. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior .. jupyter-execute:: dates.strftime("%c") .. jupyter-execute:: da["time"].dt.strftime("%Y%m%d") Conversion between non-standard calendar and to/from pandas DatetimeIndexes is facilitated with the :py:meth:`xarray.Dataset.convert_calendar` method (also available as :py:meth:`xarray.DataArray.convert_calendar`). Here, like elsewhere in xarray, the ``use_cftime`` argument controls which datetime backend is used in the output. The default (``None``) is to use ``pandas`` when possible, i.e. when the calendar is ``standard``/``gregorian`` and dates starting with `1582-10-15`_. There is no such restriction when converting to a ``proleptic_gregorian`` calendar. .. _1582-10-15: https://en.wikipedia.org/wiki/Gregorian_calendar .. jupyter-execute:: dates = xr.date_range( start="2001", periods=24, freq="MS", calendar="noleap", use_cftime=True ) da_nl = xr.DataArray(np.arange(24), coords=[dates], dims=["time"], name="foo") da_std = da.convert_calendar("standard", use_cftime=True) The data is unchanged, only the timestamps are modified. Further options are implemented for the special ``"360_day"`` calendar and for handling missing dates. There is also :py:meth:`xarray.Dataset.interp_calendar` (and :py:meth:`xarray.DataArray.interp_calendar`) for interpolating data between calendars. For data indexed by a :py:class:`~xarray.CFTimeIndex` xarray currently supports: - `Partial datetime string indexing`_: .. jupyter-execute:: da.sel(time="0001") .. jupyter-execute:: da.sel(time=slice("0001-05", "0002-02")) .. note:: For specifying full or partial datetime strings in cftime indexing, xarray supports two versions of the `ISO 8601 standard`_, the basic pattern (YYYYMMDDhhmmss) or the extended pattern (YYYY-MM-DDThh:mm:ss), as well as the default cftime string format (YYYY-MM-DD hh:mm:ss). This is somewhat more restrictive than pandas; in other words, some datetime strings that would be valid for a :py:class:`pandas.DatetimeIndex` are not valid for an :py:class:`~xarray.CFTimeIndex`. - Access of basic datetime components via the ``dt`` accessor (in this case just "year", "month", "day", "hour", "minute", "second", "microsecond", "season", "dayofyear", "dayofweek", and "days_in_month") with the addition of "calendar", absent from pandas: .. jupyter-execute:: da.time.dt.year .. jupyter-execute:: da.time.dt.month .. jupyter-execute:: da.time.dt.season .. jupyter-execute:: da.time.dt.dayofyear .. jupyter-execute:: da.time.dt.dayofweek .. jupyter-execute:: da.time.dt.days_in_month .. jupyter-execute:: da.time.dt.calendar - Rounding of datetimes to fixed frequencies via the ``dt`` accessor: .. jupyter-execute:: da.time.dt.ceil("3D").head() .. jupyter-execute:: da.time.dt.floor("5D").head() .. jupyter-execute:: da.time.dt.round("2D").head() - Group-by operations based on datetime accessor attributes (e.g. by month of the year): .. jupyter-execute:: da.groupby("time.month").sum() - Interpolation using :py:class:`cftime.datetime` objects: .. jupyter-execute:: da.interp(time=[DatetimeNoLeap(1, 1, 15), DatetimeNoLeap(1, 2, 15)]) - Interpolation using datetime strings: .. jupyter-execute:: da.interp(time=["0001-01-15", "0001-02-15"]) - Differentiation: .. jupyter-execute:: da.differentiate("time") - Serialization: .. jupyter-execute:: da.to_netcdf("example-no-leap.nc") reopened = xr.open_dataset("example-no-leap.nc") reopened .. jupyter-execute:: :hide-code: import os reopened.close() os.remove("example-no-leap.nc") - And resampling along the time dimension for data indexed by a :py:class:`~xarray.CFTimeIndex`: .. jupyter-execute:: da.resample(time="81min", closed="right", label="right", offset="3min").mean() .. _precision range: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timestamp-limitations .. _ISO 8601 standard: https://en.wikipedia.org/wiki/ISO_8601 .. _partial datetime string indexing: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#partial-string-indexing .. _cftime_arithmetic_limitations: Arithmetic limitations with ``cftime`` objects ---------------------------------------------- A current limitation when working with non-standard calendars and :py:class:`cftime.datetime` objects is that they support arithmetic with :py:class:`datetime.timedelta`, but **not** with :py:class:`numpy.timedelta64`. This means that certain xarray operations (such as :py:meth:`~xarray.DataArray.diff`) may produce ``timedelta64`` results that cannot be directly combined with ``cftime`` coordinates. For example, let's define a time axis using ``cftime`` objects: .. jupyter-execute:: import xarray as xr import numpy as np import pandas as pd import cftime time = xr.DataArray( xr.date_range("2000", periods=3, freq="MS", use_cftime=True), dims="time", ) If you want to compute, e.g., midpoints in the time intervals, this will not work: .. code-block:: python # Attempt to compute midpoints time[:-1] + 0.5 * time.diff("time") and result in an error like this: .. code-block:: none UFuncTypeError: ufunc 'add' cannot use operands with types dtype('O') and dtype('' authors: - Deepak Cherian - title: "Xdev Python Tutorial Seminar Series 2021 seminar introducing xarray (2 of 2)" src: '' authors: - Anderson Banihirwe - title: "Xdev Python Tutorial Seminar Series 2021 seminar introducing xarray (1 of 2)" src: '' authors: - Anderson Banihirwe - title: "Xarray's 2020 virtual tutorial" src: '' authors: - Anderson Banihirwe - Deepak Cherian - Martin Durant - title: "Xarray's Tutorial presented at the 2020 SciPy Conference" src: ' ' authors: - Joe Hamman - Deepak Cherian - Ryan Abernathey - Stephan Hoyer - title: "Scipy 2015 talk introducing xarray to a general audience" src: '' authors: - Stephan Hoyer - title: " 2015 Unidata Users Workshop talk and tutorial with (`with answers`_) introducing xarray to users familiar with netCDF" src: '' authors: - Stephan Hoyer xarray-2025.12.0/doc/whats-new.rst000066400000000000000000015775021511464676000166000ustar00rootroot00000000000000.. currentmodule:: xarray .. _whats-new: What's New ========== .. _whats-new.2025.12.0: v2025.12.0 (Dec 5, 2025) ------------------------ This release rolls back the default engine for HTTP urls, adds support for :py:class:`DataTree` objects in ``combine_nested`` and contains numerous bug fixes. Thanks to the 16 contributors to this release: Benoit Bovy, Christine P. Chai, Deepak Cherian, Dhruva Kumar Kaushal, Ian Hunt-Isaak, Ilan Gold, Illviljan, Julia Signell, Justus Magin, Lars Buntemeyer, Maximilian Roos, Miguel Jimenez, Nick Hodgskin, Richard Berg, Spencer Clark and Stephan Hoyer New Features ~~~~~~~~~~~~ - Improved ``pydap`` backend behavior and performance when using :py:func:`open_dataset`, :py:func:`open_datatree` when downloading dap4 (opendap) dimensions data (:issue:`10628`, :pull:`10629`). In addition ``checksums=True|False`` is added as optional argument to be passed to ``pydap`` backend. By `Miguel Jimenez-Urias `_. - :py:func:`combine_nested` now supports :py:class:`DataTree` objects (:pull:`10849`). By `Stephan Hoyer `_. Bug Fixes ~~~~~~~~~ - When assigning an indexed coordinate to a data variable or coordinate, coerce it from ``IndexVariable`` to ``Variable`` (:issue:`9859`, :issue:`10829`, :pull:`10909`). By `Julia Signell `_. - The NetCDF4 backend will now claim to be able to read any URL except for one that contains the substring zarr. This restores backward compatibility after :pull:`10804` broke workflows that relied on ``xr.open_dataset("http://...")`` (:pull:`10931`). By `Ian Hunt-Isaak `_. - Always normalize slices when indexing ``LazilyIndexedArray`` instances (:issue:`10941`, :pull:`10948`). By `Justus Magin `_. - Avoid casting custom indexes in ``Dataset.drop_attrs`` (:pull:`10961`) By `Justus Magin `_. - Support decoding unsigned integers to ``np.timedelta64``. By `Deepak Cherian `_. - Properly handle internal type promotion and ``NA`` objects for extension arrays (:pull:`10423`). By `Ilan Gold `_. Documentation ~~~~~~~~~~~~~ - Added section on the `limitations of cftime arithmetic `_ (:pull:`10653`). By `Lars Buntemeyer `_. Internal Changes ~~~~~~~~~~~~~~~~ - Change the development workflow to use ``pixi`` (:issue:`10732`, :pull:`10888`). By `Nick Nodgskin `_. .. _whats-new.2025.11.0: v2025.11.0 (Nov 17, 2025) ------------------------- This release changes the default for ``keep_attrs`` such that attributes are preserved by default, adds support for :py:class:`DataTree` in top-level functions, and contains several memory and performance improvements as well as a number of bug fixes. Thanks to the 21 contributors to this release: Aled Owen, Charles Turner, Christine P. Chai, David Huard, Deepak Cherian, Gregorio L. Trevisan, Ian Hunt-Isaak, Ilan Gold, Illviljan, Jan Meischner, Jemma Jeffree, Jonas Lundholm Bertelsen, Justus Magin, Kai Mรผhlbauer, Kristian Bodolai, Lukas Riedel, Max Jones, Maximilian Roos, Niclas Rieger, Stephan Hoyer and William Andrea New Features ~~~~~~~~~~~~ - :py:func:`merge` and :py:func:`concat` now support :py:class:`DataTree` objects (:issue:`9790`, :issue:`9778`). By `Stephan Hoyer `_. - The ``h5netcdf`` engine has support for pseudo ``NETCDF4_CLASSIC`` files, meaning variables and attributes are cast to supported types. Note that the saved files won't be recognized as genuine ``NETCDF4_CLASSIC`` files until ``h5netcdf`` adds support with version 1.7.0 (:issue:`10676`, :pull:`10686`). By `David Huard `_. - Support comparing :py:class:`DataTree` objects with :py:func:`testing.assert_allclose` (:pull:`10887`). By `Justus Magin `_. - Add support for ``chunks="auto"`` for cftime datasets (:issue:`9834`, :pull:`10527`). By `Charles Turner `_. Breaking Changes ~~~~~~~~~~~~~~~~ - All xarray operations now preserve attributes by default (:issue:`3891`, :issue:`2582`). Previously, operations would drop attributes unless explicitly told to preserve them via ``keep_attrs=True``. Additionally, when attributes are preserved in binary operations, they now combine attributes from both operands using ``drop_conflicts`` (keeping matching attributes, dropping conflicts), instead of keeping only the left operand's attributes. **What changed:** .. code-block:: python # Before (xarray <2025.11.0): data = xr.DataArray([1, 2, 3], attrs={"units": "meters", "long_name": "height"}) result = data.mean() result.attrs # {} - Attributes lost! # After (xarray โ‰ฅ2025.09.1): data = xr.DataArray([1, 2, 3], attrs={"units": "meters", "long_name": "height"}) result = data.mean() result.attrs # {"units": "meters", "long_name": "height"} - Attributes preserved! **Affected operations include:** *Computational operations:* - Reductions: ``mean()``, ``sum()``, ``std()``, ``var()``, ``min()``, ``max()``, ``median()``, ``quantile()``, etc. - Rolling windows: ``rolling().mean()``, ``rolling().sum()``, etc. - Groupby: ``groupby().mean()``, ``groupby().sum()``, etc. - Resampling: ``resample().mean()``, etc. - Weighted: ``weighted().mean()``, ``weighted().sum()``, etc. - ``apply_ufunc()`` and NumPy universal functions *Binary operations:* - Arithmetic: ``+``, ``-``, ``*``, ``/``, ``**``, ``//``, ``%`` (combines attributes using ``drop_conflicts``) - Comparisons: ``<``, ``>``, ``==``, ``!=``, ``<=``, ``>=`` (combines attributes using ``drop_conflicts``) - With scalars: ``data * 2``, ``10 - data`` (preserves data's attributes) *Data manipulation:* - Missing data: ``fillna()``, ``dropna()``, ``interpolate_na()``, ``ffill()``, ``bfill()`` - Indexing/selection: ``isel()``, ``sel()``, ``where()``, ``clip()`` - Alignment: ``interp()``, ``reindex()``, ``align()`` - Transformations: ``map()``, ``pipe()``, ``assign()``, ``assign_coords()`` - Shape operations: ``expand_dims()``, ``squeeze()``, ``transpose()``, ``stack()``, ``unstack()`` **Binary operations - combines attributes with** ``drop_conflicts``: .. code-block:: python a = xr.DataArray([1, 2], attrs={"units": "m", "source": "sensor_a"}) b = xr.DataArray([3, 4], attrs={"units": "m", "source": "sensor_b"}) (a + b).attrs # {"units": "m"} - Matching values kept, conflicts dropped (b + a).attrs # {"units": "m"} - Order doesn't matter for drop_conflicts **How to restore previous behavior:** 1. **Globally for your entire script:** .. code-block:: python import xarray as xr xr.set_options(keep_attrs=False) # Affects all subsequent operations 2. **For specific operations:** .. code-block:: python result = data.mean(dim="time", keep_attrs=False) 3. **For code blocks:** .. code-block:: python with xr.set_options(keep_attrs=False): # All operations in this block drop attrs result = data1 + data2 4. **Remove attributes after operations:** .. code-block:: python result = data.mean().drop_attrs() By `Maximilian Roos `_. Bug Fixes ~~~~~~~~~ - Fix h5netcdf backend for format=None, use same rule as netcdf4 backend (:pull:`10859`). By `Kai Mรผhlbauer `_. - ``netcdf4`` and ``pydap`` backends now use stricter URL detection to avoid incorrectly claiming remote URLs. The ``pydap`` backend now only claims URLs with explicit DAP protocol indicators (``dap2://`` or ``dap4://`` schemes, or ``/dap2/`` or ``/dap4/`` in the URL path). This prevents both backends from claiming remote Zarr stores and other non-DAP URLs without an explicit ``engine=`` argument (:pull:`10804`). By `Ian Hunt-Isaak `_. - Fix indexing with empty arrays for scipy & h5netcdf backends which now resolves to empty slices (:issue:`10867`, :pull:`10870`). By `Kai Mรผhlbauer `_ - Fix error handling issue in ``decode_cf_variables`` when decoding fails - the exception is now re-raised correctly, with a note added about the variable name that caused the error (:issue:`10873`, :pull:`10886`). By `Jonas L. Bertelsen `_. - Fix ``equivalent`` for numpy scalar nan comparison (:issue:`10833`, :pull:`10838`). By `Maximilian Roos `_. - Support non-``DataArray`` outputs in :py:meth:`Dataset.map` (:issue:`10835`, :pull:`10839`). By `Maximilian Roos `_. - Support ``drop_sel`` on ``MultiIndex`` objects (:issue:`10862`, :pull:`10863`). By `Aled Owen `_. Performance ~~~~~~~~~~~ - Speedup and reduce memory usage of :py:func:`concat`. Magnitude of improvement scales with size of the concatenation dimension (:issue:`10864`, :pull:`10866`). By `Deepak Cherian `_. - Speedup and reduce memory usage when coarsening along multiple dimensions (:pull:`10921`) By `Deepak Cherian `_. .. _whats-new.2025.10.1: v2025.10.1 (Oct 7, 2025) ------------------------ This release reverts a breaking change to Xarray's preferred netCDF backend. Breaking changes ~~~~~~~~~~~~~~~~ - Xarray's default engine for reading/writing netCDF files has been reverted to prefer netCDF4 over h5netcdf over scipy, which was the default before v2025.09.1. This change had larger implications for the ecosystem than we anticipated. We are still considering changing the default in the future, but will be a bit more careful about the implications. See :issue:`10657` and linked issues for discussion. The behavior can still be customized, e.g., with ``xr.set_options(netcdf_engine_order=['h5netcdf', 'netcdf4', 'scipy'])``. By `Stephan Hoyer `_. New features ~~~~~~~~~~~~ - Coordinates are ordered to match dims when displaying Xarray objects. (:pull:`10778`). By `Julia Signell `_. Bug fixes ~~~~~~~~~ - Fix error raised when writing scalar variables to Zarr with ``region={}`` (:pull:`10796`). By `Stephan Hoyer `_. .. _whats-new.2025.09.1: v2025.09.1 (Sep 29, 2025) ------------------------- This release contains improvements to netCDF IO and the :py:func:`DataTree.from_dict` constructor, as well as a variety of bug fixes. In particular, the default netCDF backend has switched from netCDF4 to h5netcdf, which is typically faster. Thanks to the 17 contributors to this release: Claude, Deepak Cherian, Dimitri Papadopoulos Orfanos, Dylan H. Morris, Emmanuel Mathot, Ian Hunt-Isaak, Joren Hammudoglu, Julia Signell, Justus Magin, Maximilian Roos, Nick Hodgskin, Spencer Clark, Stephan Hoyer, Tom Nicholas, gronniger, joseph nowak and pierre-manchon New Features ~~~~~~~~~~~~ - :py:func:`DataTree.from_dict` now supports passing in ``DataArray`` and nested dictionary values, and has a ``coords`` argument for specifying coordinates as ``DataArray`` objects (:pull:`10658`). - ``engine='netcdf4'`` now supports reading and writing in-memory netCDF files. All of Xarray's netCDF backends now support in-memory reads and writes (:pull:`10624`). By `Stephan Hoyer `_. Breaking changes ~~~~~~~~~~~~~~~~ - :py:meth:`Dataset.update` now returns ``None``, instead of the updated dataset. This completes the deprecation cycle started in version 0.17. The method still updates the dataset in-place. (:issue:`10167`) By `Maximilian Roos `_. - The default ``engine`` when reading/writing netCDF files is now h5netcdf or scipy, which are typically faster than the prior default of netCDF4-python. You can control this default behavior explicitly via the new ``netcdf_engine_order`` parameter in :py:func:`~xarray.set_options`, e.g., ``xr.set_options(netcdf_engine_order=['netcdf4', 'scipy', 'h5netcdf'])`` to restore the prior defaults (:issue:`10657`). By `Stephan Hoyer `_. - The HTML reprs for :py:class:`DataArray`, :py:class:`Dataset` and :py:class:`DataTree` have been tweaked to hide empty sections, consistent with the text reprs. The ``DataTree`` HTML repr also now automatically expands sub-groups (:pull:`10785`). By `Stephan Hoyer `_. - Zarr stores written with Xarray now consistently use a default Zarr fill value of ``NaN`` for float variables, for both Zarr v2 and v3 (:issue:`10646``). All other dtypes still use the Zarr default ``fill_value`` of zero. To customize, explicitly set encoding in :py:meth:`~Dataset.to_zarr`, e.g., ``encoding=dict.fromkey(ds.data_vars, {'fill_value': 0})``. By `Stephan Hoyer `_. Deprecations ~~~~~~~~~~~~ Bug fixes ~~~~~~~~~ - Xarray objects opened from file-like objects with ``engine='h5netcdf'`` can now be pickled, as long as the underlying file-like object also supports pickle (:issue:`10712`). By `Stephan Hoyer `_. - Closing Xarray objects opened from file-like objects with ```engine='scipy'`` no longer closes the underlying file, consistent with the h5netcdf backend (:pull:`10624`). By `Stephan Hoyer `_. - Fix the ``align_chunks`` parameter on the :py:meth:`~xarray.Dataset.to_zarr` method, it was not being passed to the underlying :py:meth:`~xarray.backends.api` method (:issue:`10501`, :pull:`10516`). - Fix error when encoding an empty :py:class:`numpy.datetime64` array (:issue:`10722`, :pull:`10723`). By `Spencer Clark `_. - Propagate coordinate attrs in :py:meth:`xarray.Dataset.map` (:issue:`9317`, :pull:`10602`). - Fix error from ``to_netcdf(..., compute=False)`` when using Dask Distributed (:issue:`10725`). By `Stephan Hoyer `_. - Propagation coordinate attrs in :py:meth:`xarray.Dataset.map` (:issue:`9317`, :pull:`10602`). By `Justus Magin `_. - Allow ``combine_attrs="drop_conflicts"`` to handle objects with ``__eq__`` methods that return non-bool values (e.g., numpy arrays) without raising ``ValueError`` (:pull:`10726`). By `Maximilian Roos `_. Documentation ~~~~~~~~~~~~~ - Fixed Zarr encoding documentation with consistent examples and added comprehensive coverage of dimension and coordinate encoding differences between Zarr V2 and V3 formats. The documentation shows what users will see when accessing Zarr files with raw zarr-python, and explains the relationship between ``_ARRAY_DIMENSIONS`` (Zarr V2), ``dimension_names`` metadata (Zarr V3), and CF ``coordinates`` attributes. (:pull:`10720`) By `Emmanuel Mathot `_. Internal Changes ~~~~~~~~~~~~~~~~ - Refactor structure of ``backends`` module to separate code for reading data from code for writing data (:pull:`10771`). By `Tom Nicholas `_. - All test files now have full mypy type checking enabled (``check_untyped_defs = true``), improving type safety and making the test suite a better reference for type annotations. (:pull:`10768`) By `Maximilian Roos `_. .. _whats-new.2025.09.0: v2025.09.0 (Sep 2, 2025) ------------------------ This release brings a number of small improvements and fixes, especially related to writing DataTree objects and netCDF files to disk. Thanks to the 13 contributors to this release: Benoit Bovy, DHRUVA KUMAR KAUSHAL, Deepak Cherian, Dhruva Kumar Kaushal, Giacomo Caria, Ian Hunt-Isaak, Illviljan, Justus Magin, Kai Mรผhlbauer, Ruth Comer, Spencer Clark, Stephan Hoyer and Tom Nicholas New Features ~~~~~~~~~~~~ - Support rechunking by :py:class:`~xarray.groupers.SeasonResampler` for seasonal data analysis (:issue:`10425`, :pull:`10519`). By `Dhruva Kumar Kaushal `_. - Add convenience methods to :py:class:`~xarray.Coordinates` (:pull:`10318`) By `Justus Magin `_. - Added :py:func:`load_datatree` for loading ``DataTree`` objects into memory from disk. It has the same relationship to :py:func:`open_datatree`, as :py:func:`load_dataset` has to :py:func:`open_dataset`. By `Stephan Hoyer `_. - ``compute=False`` is now supported by :py:meth:`DataTree.to_netcdf` and :py:meth:`DataTree.to_zarr`. By `Stephan Hoyer `_. - ``open_dataset`` will now correctly infer a path ending in ``.zarr/`` as zarr By `Ian Hunt-Isaak `_. Breaking changes ~~~~~~~~~~~~~~~~ - Following pandas 3.0 (`pandas-dev/pandas#61985 `_), ``Day`` is no longer considered a ``Tick``-like frequency. Therefore non-``None`` values of ``offset`` and non-``"start_day"`` values of ``origin`` will have no effect when resampling to a daily frequency for objects indexed by a :py:class:`xarray.CFTimeIndex`. As in `pandas-dev/pandas#62101 `_ warnings will be emitted if non default values are provided in this context (:issue:`10640`, :pull:`10650`). By `Spencer Clark `_. - The default backend ``engine`` used by :py:meth:`Dataset.to_netcdf` and :py:meth:`DataTree.to_netcdf` is now chosen consistently with :py:func:`open_dataset` and :py:func:`open_datatree`, using whichever netCDF libraries are available and valid, and preferring netCDF4 to h5netcdf to scipy (:issue:`10654`). This will change the default backend in some edge cases (e.g., from scipy to netCDF4 when writing to a file-like object or bytes). To override these new defaults, set ``engine`` explicitly. By `Stephan Hoyer `_. - The return value of :py:meth:`Dataset.to_netcdf` without ``path`` is now a ``memoryview`` object instead of ``bytes`` (:pull:`10656`). This removes an unnecessary memory copy and ensures consistency when using either ``engine="scipy"`` or ``engine="h5netcdf"``. If you need a bytes object, simply wrap the return value of ``to_netcdf()`` with ``bytes()``. By `Stephan Hoyer `_. Bug fixes ~~~~~~~~~ - Fix contour plots not normalizing the colors correctly when using for example logarithmic norms. (:issue:`10551`, :pull:`10565`) By `Jimmy Westling `_. - Fix distribution of ``auto_complex`` keyword argument for open_datatree (:issue:`10631`, :pull:`10632`). By `Kai Mรผhlbauer `_. - Warn instead of raise in case of misconfiguration of ``unlimited_dims`` originating from dataset.encoding, to prevent breaking users workflows (:issue:`10647`, :pull:`10648`). By `Kai Mรผhlbauer `_. - :py:meth:`DataTree.to_netcdf` and :py:meth:`DataTree.to_zarr` now avoid redundant computation of Dask arrays with cross-group dependencies (:issue:`10637`). By `Stephan Hoyer `_. - :py:meth:`DataTree.to_netcdf` had h5netcdf hard-coded as default (:issue:`10654`). By `Stephan Hoyer `_. Internal Changes ~~~~~~~~~~~~~~~~ - Run ``TestNetCDF4Data`` as ``TestNetCDF4DataTree`` through ``open_datatree`` (:pull:`10632`). By `Kai Mรผhlbauer `_. .. _whats-new.2025.08.0: v2025.08.0 (Aug 14, 2025) ------------------------- This release brings the ability to load xarray objects asynchronously, write netCDF as bytes, fixes a number of bugs, and starts an important deprecation cycle for changing the default values of keyword arguments for various xarray combining functions. Thanks to the 24 contributors to this release: Alfonso Ladino, Brigitta Sipล‘cz, Claude, Deepak Cherian, Dimitri Papadopoulos Orfanos, Eric Jansen, Ian Hunt-Isaak, Ilan Gold, Illviljan, Julia Signell, Justus Magin, Kai Mรผhlbauer, Mathias Hauser, Matthew, Michael Niklas, Miguel Jimenez, Nick Hodgskin, Pratiman, Scott Staniewicz, Spencer Clark, Stephan Hoyer, Tom Nicholas, Yang Yang and jemmajeffree New Features ~~~~~~~~~~~~ - Added :py:meth:`DataTree.prune` method to remove empty nodes while preserving tree structure. Useful for cleaning up DataTree after time-based filtering operations (:issue:`10590`, :pull:`10598`). By `Alfonso Ladino `_. - Added new asynchronous loading methods :py:meth:`Dataset.load_async`, :py:meth:`DataArray.load_async`, :py:meth:`Variable.load_async`. Note that users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way. (:issue:`10326`, :pull:`10327`) By `Tom Nicholas `_. - :py:meth:`DataTree.to_netcdf` can now write to a file-like object, or return bytes if called without a filepath. (:issue:`10570`) By `Matthew Willson `_. - Added exception handling for invalid files in :py:func:`open_mfdataset`. (:issue:`6736`) By `Pratiman Patel `_. Breaking changes ~~~~~~~~~~~~~~~~ - When writing to NetCDF files with groups, Xarray no longer redefines dimensions that have the same size in parent groups (:issue:`10241`). This conforms with `CF Conventions for group scrope `_ but may require adjustments for code that consumes NetCDF files produced by Xarray. By `Stephan Hoyer `_. Deprecations ~~~~~~~~~~~~ - Start a deprecation cycle for changing the default keyword arguments to :py:func:`concat`, :py:func:`merge`, :py:func:`combine_nested`, :py:func:`combine_by_coords`, and :py:func:`open_mfdataset`. Emits a :py:class:`FutureWarning` when using old defaults and new defaults would result in different behavior. Adds an option: ``use_new_combine_kwarg_defaults`` to opt in to new defaults immediately. New values are: - ``data_vars``: None which means ``all`` when concatenating along a new dimension, and ``"minimal"`` when concatenating along an existing dimension - ``coords``: "minimal" - ``compat``: "override" - ``join``: "exact" (:issue:`8778`, :issue:`1385`, :pull:`10062`). By `Julia Signell `_. Bug fixes ~~~~~~~~~ - Fix Pydap Datatree backend testing. Testing now compares elements of (unordered) two sets (before, lists) (:pull:`10525`). By `Miguel Jimenez-Urias `_. - Fix ``KeyError`` when passing a ``dim`` argument different from the default to ``convert_calendar`` (:pull:`10544`). By `Eric Jansen `_. - Fix transpose of boolean arrays read from disk. (:issue:`10536`) By `Deepak Cherian `_. - Fix detection of the ``h5netcdf`` backend. Xarray now selects ``h5netcdf`` if the default ``netCDF4`` engine is not available (:issue:`10401`, :pull:`10557`). By `Scott Staniewicz `_. - Fix :py:func:`merge` to prevent altering original object depending on join value (:pull:`10596`) By `Julia Signell `_. - Ensure ``unlimited_dims`` passed to :py:meth:`xarray.DataArray.to_netcdf`, :py:meth:`xarray.Dataset.to_netcdf` or :py:meth:`xarray.DataTree.to_netcdf` only contains dimensions present in the object; raise ``ValueError`` otherwise (:issue:`10549`, :pull:`10608`). By `Kai Mรผhlbauer `_. Documentation ~~~~~~~~~~~~~ - Clarify lazy behaviour and eager loading for ``chunks=None`` in :py:func:`~xarray.open_dataset`, :py:func:`~xarray.open_dataarray`, :py:func:`~xarray.open_datatree`, :py:func:`~xarray.open_groups` and :py:func:`~xarray.open_zarr` (:issue:`10612`, :pull:`10627`). By `Kai Mรผhlbauer `_. Performance ~~~~~~~~~~~ - Speed up non-numeric scalars when calling :py:meth:`Dataset.interp`. (:issue:`10054`, :pull:`10554`) By `Jimmy Westling `_. .. _whats-new.2025.07.1: v2025.07.1 (Jul 09, 2025) ------------------------- This release brings a lot of improvements to flexible indexes functionality, including new classes to ease building of new indexes with custom coordinate transforms (:py:class:`indexes.CoordinateTransformIndex`) and tree-like index structures (:py:class:`indexes.NDPointIndex`). See a `new gallery `_ showing off the possibilities enabled by flexible indexes. Thanks to the 7 contributors to this release: Benoit Bovy, Deepak Cherian, Dhruva Kumar Kaushal, Dimitri Papadopoulos Orfanos, Illviljan, Justus Magin and Tom Nicholas New Features ~~~~~~~~~~~~ - New :py:class:`xarray.indexes.NDPointIndex`, which by default uses :py:class:`scipy.spatial.KDTree` under the hood for the selection of irregular, n-dimensional data (:pull:`10478`). By `Benoit Bovy `_. - Allow skipping the creation of default indexes when opening datasets (:pull:`8051`). By `Benoit Bovy `_ and `Justus Magin `_. Bug fixes ~~~~~~~~~ - :py:meth:`Dataset.set_xindex` now raises a helpful error when a custom index creates extra variables that don't match the provided coordinate names, instead of silently ignoring them. The error message suggests using the factory method pattern with :py:meth:`xarray.Coordinates.from_xindex` and :py:meth:`Dataset.assign_coords` for advanced use cases (:issue:`10499`, :pull:`10503`). By `Dhruva Kumar Kaushal `_. Documentation ~~~~~~~~~~~~~ - A `new gallery `_ showing off the possibilities enabled by flexible indexes. Internal Changes ~~~~~~~~~~~~~~~~ - Refactored the ``PandasIndexingAdapter`` and ``CoordinateTransformIndexingAdapter`` internal indexing classes. Coordinate variables that wrap a :py:class:`pandas.RangeIndex`, a :py:class:`pandas.MultiIndex` or a :py:class:`xarray.indexes.CoordinateTransform` are now displayed as lazy variables in the Xarray data reprs (:pull:`10355`). By `Benoit Bovy `_. .. _whats-new.2025.07.0: v2025.07.0 (Jul 3, 2025) ------------------------ This release extends xarray's support for custom index classes, restores support for reading netCDF3 files with SciPy, updates minimum dependencies, and fixes a number of bugs. Thanks to the 17 contributors to this release: Bas Nijholt, Benoit Bovy, Deepak Cherian, Dhruva Kumar Kaushal, Dimitri Papadopoulos Orfanos, Ian Hunt-Isaak, Kai Mรผhlbauer, Mathias Hauser, Maximilian Roos, Miguel Jimenez, Nick Hodgskin, Scott Henderson, Shuhao Cao, Spencer Clark, Stephan Hoyer, Tom Nicholas and Zsolt Cserna New Features ~~~~~~~~~~~~ - Expose :py:class:`~xarray.indexes.RangeIndex`, and :py:class:`~xarray.indexes.CoordinateTransformIndex` as public api under the ``xarray.indexes`` namespace. By `Deepak Cherian `_. - Support zarr-python's new ``.supports_consolidated_metadata`` store property (:pull:`10457``). by `Tom Nicholas `_. - Better error messages when encoding data to be written to disk fails (:pull:`10464`). By `Stephan Hoyer `_ Breaking changes ~~~~~~~~~~~~~~~~ The minimum versions of some dependencies were changed (:issue:`10417`, :pull:`10438`): By `Dhruva Kumar Kaushal `_. .. list-table:: :header-rows: 1 :widths: 30 20 20 * - Dependency - Old Version - New Version * - Python - 3.10 - 3.11 * - array-api-strict - 1.0 - 1.1 * - boto3 - 1.29 - 1.34 * - bottleneck - 1.3 - 1.4 * - cartopy - 0.22 - 0.23 * - dask-core - 2023.11 - 2024.6 * - distributed - 2023.11 - 2024.6 * - flox - 0.7 - 0.9 * - h5py - 3.8 - 3.11 * - hdf5 - 1.12 - 1.14 * - iris - 3.7 - 3.9 * - lxml - 4.9 - 5.1 * - matplotlib-base - 3.7 - 3.8 * - numba - 0.57 - 0.60 * - numbagg - 0.6 - 0.8 * - numpy - 1.24 - 1.26 * - packaging - 23.2 - 24.1 * - pandas - 2.1 - 2.2 * - pint - 0.22 - 0.24 * - pydap - N/A - 3.5 * - scipy - 1.11 - 1.13 * - sparse - 0.14 - 0.15 * - typing_extensions - 4.8 - Removed * - zarr - 2.16 - 2.18 Bug fixes ~~~~~~~~~ - Fix Pydap test_cmp_local_file for numpy 2.3.0 changes, 1. do always return arrays for all versions and 2. skip astype(str) for numpy >= 2.3.0 for expected data. (:pull:`10421`) By `Kai Mรผhlbauer `_. - Fix the SciPy backend for netCDF3 files . (:issue:`8909`, :pull:`10376`) By `Deepak Cherian `_. - Check and fix character array string dimension names, issue warnings as needed (:issue:`6352`, :pull:`10395`). By `Kai Mรผhlbauer `_. - Fix the error message of :py:func:`testing.assert_equal` when two different :py:class:`DataTree` objects are passed (:pull:`10440`). By `Mathias Hauser `_. - Fix :py:func:`testing.assert_equal` with ``check_dim_order=False`` for :py:class:`DataTree` objects (:pull:`10442`). By `Mathias Hauser `_. - Fix Pydap backend testing. Now test forces string arrays to dtype "S" (pydap converts them to unicode type by default). Removes conditional to numpy version. (:issue:`10261`, :pull:`10482`) By `Miguel Jimenez-Urias `_. - Fix attribute overwriting bug when decoding encoded :py:class:`numpy.timedelta64` values from disk with a dtype attribute (:issue:`10468`, :pull:`10469`). By `Spencer Clark `_. - Fix default ``"_FillValue"`` dtype coercion bug when encoding :py:class:`numpy.timedelta64` values to an on-disk format that only supports 32-bit integers (:issue:`10466`, :pull:`10469`). By `Spencer Clark `_. Internal Changes ~~~~~~~~~~~~~~~~ - Forward variable name down to coders for AbstractWritableDataStore.encode_variable and subclasses. (:pull:`10395`). By `Kai Mรผhlbauer `_. .. _whats-new.2025.06.1: v2025.06.1 (Jun 11, 2025) ------------------------- This is quick bugfix release to remove an unintended dependency on ``typing_extensions``. Thanks to the 4 contributors to this release: Alex Merose, Deepak Cherian, Ilan Gold and Simon Perkins Bug fixes ~~~~~~~~~ - Remove dependency on ``typing_extensions`` (:pull:`10413`). By `Simon Perkins `_. .. _whats-new.2025.06.0: v2025.06.0 (Jun 10, 2025) ------------------------- This release brings HTML reprs to the documentation, fixes to flexible Xarray indexes, performance optimizations, more ergonomic seasonal grouping and resampling with new :py:class:`~xarray.groupers.SeasonGrouper` and :py:class:`~xarray.groupers.SeasonResampler` objects, and bugfixes. Thanks to the 33 contributors to this release: Andrecho, Antoine Gibek, Benoit Bovy, Brian Michell, Christine P. Chai, David Huard, Davis Bennett, Deepak Cherian, Dimitri Papadopoulos Orfanos, Elliott Sales de Andrade, Erik, Erik Mรฅnsson, Giacomo Caria, Ilan Gold, Illviljan, Jesse Rusak, Jonathan Neuhauser, Justus Magin, Kai Mรผhlbauer, Kimoon Han, Konstantin Ntokas, Mark Harfouche, Michael Niklas, Nick Hodgskin, Niko Sirmpilatze, Pascal Bourgault, Scott Henderson, Simon Perkins, Spencer Clark, Tom Vo, Trevor James Smith, joseph nowak and micguerr-bopen New Features ~~~~~~~~~~~~ - Switch docs to jupyter-execute sphinx extension for HTML reprs. (:issue:`3893`, :pull:`10383`) By `Scott Henderson `_. - Allow an Xarray index that uses multiple dimensions checking equality with another index for only a subset of those dimensions (i.e., ignoring the dimensions that are excluded from alignment). (:issue:`10243`, :pull:`10293`) By `Benoit Bovy `_. - New :py:class:`~xarray.groupers.SeasonGrouper` and :py:class:`~xarray.groupers.SeasonResampler` objects for ergonomic seasonal aggregation. See the docs on :ref:`seasonal_grouping` or `blog post `_ for more. By `Deepak Cherian `_. - Data corruption issues arising from misaligned Dask and Zarr chunks can now be prevented using the new ``align_chunks`` parameter in :py:meth:`~xarray.DataArray.to_zarr`. This option automatically rechunk the Dask array to align it with the Zarr storage chunks. For now, it is disabled by default, but this could change on the future. (:issue:`9914`, :pull:`10336`) By `Joseph Nowak `_. Documentation ~~~~~~~~~~~~~ - HTML reprs! By `Scott Henderson `_. Bug fixes ~~~~~~~~~ - Fix :py:class:`~xarray.groupers.BinGrouper` when ``labels`` is not specified (:issue:`10284`). By `Deepak Cherian `_. - Allow accessing arbitrary attributes on Pandas ExtensionArrays. By `Deepak Cherian `_. - Fix coding empty (zero-size) timedelta64 arrays, ``units`` taking precedence when encoding, fallback to default values when decoding (:issue:`10310`, :pull:`10313`). By `Kai Mรผhlbauer `_. - Use dtype from intermediate sum instead of source dtype or "int" for casting of count when calculating mean in rolling for correct operations (preserve float dtypes, correct mean of bool arrays) (:issue:`10340`, :pull:`10341`). By `Kai Mรผhlbauer `_. - Improve the html ``repr`` of Xarray objects (dark mode, icons and variable attribute / data dropdown sections). (:pull:`10353`, :pull:`10354`) By `Benoit Bovy `_. - Raise an error when attempting to encode :py:class:`numpy.datetime64` values prior to the Gregorian calendar reform date of 1582-10-15 with a ``"standard"`` or ``"gregorian"`` calendar. Previously we would warn and encode these as :py:class:`cftime.DatetimeGregorian` objects, but it is not clear that this is the user's intent, since this implicitly converts the calendar of the datetimes from ``"proleptic_gregorian"`` to ``"gregorian"`` and prevents round-tripping them as :py:class:`numpy.datetime64` values (:pull:`10352`). By `Spencer Clark `_. - Avoid unsafe casts from float to unsigned int in CFMaskCoder (:issue:`9815`, :pull:`9964`). By ` Elliott Sales de Andrade `_. Performance ~~~~~~~~~~~ - Lazily indexed arrays now use less memory to store keys by avoiding copies in :py:class:`~xarray.indexing.VectorizedIndexer` and :py:class:`~xarray.indexing.OuterIndexer` (:issue:`10316`). By `Jesse Rusak `_. - Fix performance regression in interp where more data was loaded than was necessary. (:issue:`10287`). By `Deepak Cherian `_. - Speed up encoding of :py:class:`cftime.datetime` objects by roughly a factor of three (:pull:`8324`). By `Antoine Gibek `_. .. _whats-new.2025.04.0: v2025.04.0 (Apr 29, 2025) ------------------------- This release brings bug fixes, better support for extension arrays including returning a :py:class:`pandas.IntervalArray` from ``groupby_bins``, and performance improvements. Thanks to the 24 contributors to this release: Alban Farchi, Andrecho, Benoit Bovy, Deepak Cherian, Dimitri Papadopoulos Orfanos, Florian Jetter, Giacomo Caria, Ilan Gold, Illviljan, Joren Hammudoglu, Julia Signell, Kai Muehlbauer, Kai Mรผhlbauer, Mathias Hauser, Mattia Almansi, Michael Sumner, Miguel Jimenez, Nick Hodgskin (๐ŸฆŽ Vecko), Pascal Bourgault, Philip Chmielowiec, Scott Henderson, Spencer Clark, Stephan Hoyer and Tom Nicholas New Features ~~~~~~~~~~~~ - By default xarray now encodes :py:class:`numpy.timedelta64` values by converting to :py:class:`numpy.int64` values and storing ``"dtype"`` and ``"units"`` attributes consistent with the dtype of the in-memory :py:class:`numpy.timedelta64` values, e.g. ``"timedelta64[s]"`` and ``"seconds"`` for second-resolution timedeltas. These values will always be decoded to timedeltas without a warning moving forward. Timedeltas encoded via the previous approach can still be roundtripped exactly, but in the future will not be decoded by default (:issue:`1621`, :issue:`10099`, :pull:`10101`). By `Spencer Clark `_. - Added `scipy-stubs `_ to the ``xarray[types]`` dependencies. By `Joren Hammudoglu `_. - Added a :mod:`xarray.typing` module to expose selected public types for use in downstream libraries and static type checking. (:issue:`10179`, :pull:`10215`). By `Michele Guerreri `_. - Improved compatibility with OPeNDAP DAP4 data model for backend engine ``pydap``. This includes ``datatree`` support, and removing slashes from dimension names. By `Miguel Jimenez-Urias `_. - Allow assigning index coordinates with non-array dimension(s) in a :py:class:`DataArray` by overriding :py:meth:`Index.should_add_coord_to_array`. For example, this enables support for CF boundaries coordinate (e.g., ``time(time)`` and ``time_bnds(time, nbnd)``) in a DataArray (:pull:`10137`). By `Benoit Bovy `_. - Improved support pandas categorical extension as indices (i.e., :py:class:`pandas.IntervalIndex`). (:issue:`9661`, :pull:`9671`) By `Ilan Gold `_. - Improved checks and errors raised when trying to align objects with conflicting indexes. It is now possible to align objects each with multiple indexes sharing common dimension(s). (:issue:`7695`, :pull:`10251`) By `Benoit Bovy `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed ===================== ========= ======= Package Old New ===================== ========= ======= pydap 3.4 3.5.0 ===================== ========= ======= - Reductions with ``groupby_bins`` or those that involve :py:class:`xarray.groupers.BinGrouper` now return objects indexed by :py:meth:`pandas.IntervalArray` objects, instead of numpy object arrays containing tuples. This change enables interval-aware indexing of such Xarray objects. (:pull:`9671`). By `Ilan Gold `_. - Remove ``PandasExtensionArrayIndex`` from :py:attr:`xarray.Variable.data` when the attribute is a :py:class:`pandas.api.extensions.ExtensionArray` (:pull:`10263`). By `Ilan Gold `_. - The html and text ``repr`` for ``DataTree`` are now truncated. Up to 6 children are displayed for each node -- the first 3 and the last 3 children -- with a ``...`` between them. The number of children to include in the display is configurable via options. For instance use ``set_options(display_max_children=8)`` to display 8 children rather than the default 6. (:pull:`10139`) By `Julia Signell `_. Deprecations ~~~~~~~~~~~~ - The deprecation cycle for the ``eagerly_compute_group`` kwarg to ``groupby`` and ``groupby_bins`` is now complete. By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - :py:meth:`~xarray.Dataset.to_stacked_array` now uses dimensions in order of appearance. This fixes the issue where using :py:meth:`~xarray.Dataset.transpose` before :py:meth:`~xarray.Dataset.to_stacked_array` had no effect. (Mentioned in :issue:`9921`) - Enable ``keep_attrs`` in ``DatasetView.map`` relevant for :py:func:`map_over_datasets` (:pull:`10219`) By `Mathias Hauser `_. - Variables with no temporal dimension are left untouched by :py:meth:`~xarray.Dataset.convert_calendar`. (:issue:`10266`, :pull:`10268`) By `Pascal Bourgault `_. - Enable ``chunk_key_encoding`` in :py:meth:`~xarray.Dataset.to_zarr` for Zarr v2 Datasets (:pull:`10274`) By `BrianMichell `_. Documentation ~~~~~~~~~~~~~ - Fix references to core classes in docs (:issue:`10195`, :pull:`10207`). By `Mattia Almansi `_. - Fix references to point to updated pydap documentation (:pull:`10182`). By `Miguel Jimenez-Urias `_. - Switch to `pydata-sphinx-theme `_ from `sphinx-book-theme `_ (:pull:`8708`). By `Scott Henderson `_. - Add a dedicated 'Complex Numbers' sections to the User Guide (:issue:`10213`, :pull:`10235`). By `Andre Wendlinger `_. Internal Changes ~~~~~~~~~~~~~~~~ - Avoid stacking when grouping by a chunked array. This can be a large performance improvement. By `Deepak Cherian `_. - The implementation of ``Variable.set_dims`` has changed to use array indexing syntax instead of ``np.broadcast_to`` to perform dimension expansions where all new dimensions have a size of 1. This should improve compatibility with duck arrays that do not support broadcasting (:issue:`9462`, :pull:`10277`). By `Mark Harfouche `_. .. _whats-new.2025.03.1: v2025.03.1 (Mar 30, 2025) ------------------------- This release brings the ability to specify ``fill_value`` and ``write_empty_chunks`` for Zarr V3 stores, and a few bug fixes. Thanks to the 10 contributors to this release: Andrecho, Deepak Cherian, Ian Hunt-Isaak, Karl Krauth, Mathias Hauser, Maximilian Roos, Nick Hodgskin (๐ŸฆŽ Vecko), Spencer Clark, Tom Nicholas and wpbonelli. New Features ~~~~~~~~~~~~ - Allow setting a ``fill_value`` for Zarr format 3 arrays. Specify ``fill_value`` in ``encoding`` as usual. (:issue:`10064`). By `Deepak Cherian `_. - Added :py:class:`indexes.RangeIndex` as an alternative, memory saving Xarray index representing a 1-dimensional bounded interval with evenly spaced floating values (:issue:`8473`, :pull:`10076`). By `Benoit Bovy `_. Breaking changes ~~~~~~~~~~~~~~~~ - Explicitly forbid appending a :py:class:`~xarray.DataTree` to zarr using :py:meth:`~xarray.DataTree.to_zarr` with ``append_dim``, because the expected behaviour is currently undefined. (:issue:`9858`, :pull:`10156`) By `Tom Nicholas `_. Bug fixes ~~~~~~~~~ - Update the parameters of :py:meth:`~xarray.DataArray.to_zarr` to match :py:meth:`~xarray.Dataset.to_zarr`. This fixes the issue where using the ``zarr_version`` parameter would raise a deprecation warning telling the user to use a non-existent ``zarr_format`` parameter instead. (:issue:`10163`, :pull:`10164`) By `Karl Krauth `_. - :py:meth:`DataTree.sel` and :py:meth:`DataTree.isel` display the path of the first failed node again (:pull:`10154`). By `Mathias Hauser `_. - Fix grouped and resampled ``first``, ``last`` with datetimes (:issue:`10169`, :pull:`10173`) By `Deepak Cherian `_. - FacetGrid plots now include units in their axis labels when available (:issue:`10184`, :pull:`10185`) By `Andre Wendlinger `_. .. _whats-new.2025.03.0: v2025.03.0 (Mar 20, 2025) ------------------------- This release brings tested support for Python 3.13, support for reading Zarr V3 datasets into a :py:class:`~xarray.DataTree`, significant improvements to datetime & timedelta encoding/decoding, and improvements to the :py:class:`~xarray.DataTree` API; in addition to the usual bug fixes and other improvements. Thanks to the 26 contributors to this release: Alfonso Ladino, Benoit Bovy, Chuck Daniels, Deepak Cherian, Eni, Florian Jetter, Ian Hunt-Isaak, Jan, Joe Hamman, Josh Kihm, Julia Signell, Justus Magin, Kai Mรผhlbauer, Kobe Vandelanotte, Mathias Hauser, Max Jones, Maximilian Roos, Oliver Watt-Meyer, Sam Levang, Sander van Rijn, Spencer Clark, Stephan Hoyer, Tom Nicholas, Tom White, Vecko and maddogghoek New Features ~~~~~~~~~~~~ - Added :py:meth:`tutorial.open_datatree` and :py:meth:`tutorial.load_datatree` By `Eni Awowale `_. - Added :py:meth:`DataTree.filter_like` to conveniently restructure a DataTree like another DataTree (:issue:`10096`, :pull:`10097`). By `Kobe Vandelanotte `_. - Added :py:meth:`Coordinates.from_xindex` as convenience for creating a new :py:class:`Coordinates` object directly from an existing Xarray index object if the latter supports it (:pull:`10000`) By `Benoit Bovy `_. - Allow kwargs in :py:meth:`DataTree.map_over_datasets` and :py:func:`map_over_datasets` (:issue:`10009`, :pull:`10012`). By `Kai Mรผhlbauer `_. - support python 3.13 (no free-threading) (:issue:`9664`, :pull:`9681`) By `Justus Magin `_. - Added experimental support for coordinate transforms (not ready for public use yet!) (:pull:`9543`) By `Benoit Bovy `_. - Similar to our :py:class:`numpy.datetime64` encoding path, automatically modify the units when an integer dtype is specified during eager cftime encoding, but the specified units would not allow for an exact round trip (:pull:`9498`). By `Spencer Clark `_. - Support reading to `GPU memory with Zarr `_ (:pull:`10078`). By `Deepak Cherian `_. Performance ~~~~~~~~~~~ - :py:meth:`DatasetGroupBy.first` and :py:meth:`DatasetGroupBy.last` can now use ``flox`` if available. (:issue:`9647`) By `Deepak Cherian `_. Breaking changes ~~~~~~~~~~~~~~~~ - Rolled back code that would attempt to catch integer overflow when encoding times with small integer dtypes (:issue:`8542`), since it was inconsistent with xarray's handling of standard integers, and interfered with encoding times with small integer dtypes and missing values (:pull:`9498`). By `Spencer Clark `_. - Warn instead of raise if phony_dims are detected when using h5netcdf-backend and ``phony_dims=None`` (:issue:`10049`, :pull:`10058`) By `Kai Mรผhlbauer `_. Deprecations ~~~~~~~~~~~~ - Deprecate :py:func:`~xarray.cftime_range` in favor of :py:func:`~xarray.date_range` with ``use_cftime=True`` (:issue:`9886`, :pull:`10024`). By `Josh Kihm `_. - Move from phony_dims=None to phony_dims="access" for h5netcdf-backend(:issue:`10049`, :pull:`10058`) By `Kai Mรผhlbauer `_. Bug fixes ~~~~~~~~~ - Fix ``open_datatree`` incompatibilities with Zarr-Python V3 and refactor ``TestZarrDatatreeIO`` accordingly (:issue:`9960`, :pull:`10020`). By `Alfonso Ladino-Rincon `_. - Default to resolution-dependent optimal integer encoding units when saving chunked non-nanosecond :py:class:`numpy.datetime64` or :py:class:`numpy.timedelta64` arrays to disk. Previously units of "nanoseconds" were chosen by default, which are optimal for nanosecond-resolution times, but not for times with coarser resolution. By `Spencer Clark `_ (:pull:`10017`). - Use mean of min/max years as offset in calculation of datetime64 mean (:issue:`10019`, :pull:`10035`). By `Kai Mรผhlbauer `_. - Fix ``DataArray().drop_attrs(deep=False)`` and add support for attrs to ``DataArray()._replace()``. (:issue:`10027`, :pull:`10030`). By `Jan Haacker `_. - Fix bug preventing encoding times with missing values with small integer dtype (:issue:`9134`, :pull:`9498`). By `Spencer Clark `_. - More robustly raise an error when lazily encoding times and an integer dtype is specified with units that do not allow for an exact round trip (:pull:`9498`). By `Spencer Clark `_. - Prevent false resolution change warnings from being emitted when decoding timedeltas encoded with floating point values, and make it clearer how to silence this warning message in the case that it is rightfully emitted (:issue:`10071`, :pull:`10072`). By `Spencer Clark `_. - Fix ``isel`` for multi-coordinate Xarray indexes (:issue:`10063`, :pull:`10066`). By `Benoit Bovy `_. - Fix dask tokenization when opening each node in :py:func:`xarray.open_datatree` (:issue:`10098`, :pull:`10100`). By `Sam Levang `_. - Improve handling of dtype and NaT when encoding/decoding masked and packaged datetimes and timedeltas (:issue:`8957`, :pull:`10050`). By `Kai Mรผhlbauer `_. Documentation ~~~~~~~~~~~~~ - Better expose the :py:class:`Coordinates` class in API reference (:pull:`10000`) By `Benoit Bovy `_. .. _whats-new.2025.01.2: v2025.01.2 (Jan 31, 2025) ------------------------- This release brings non-nanosecond datetime and timedelta resolution to xarray, sharded reading in zarr, suggestion of correct names when trying to access non-existent data variables and bug fixes! Thanks to the 16 contributors to this release: Deepak Cherian, Elliott Sales de Andrade, Jacob Prince-Bieker, Jimmy Westling, Joe Hamman, Joseph Nowak, Justus Magin, Kai Mรผhlbauer, Mattia Almansi, Michael Niklas, Roelof Rietbroek, Salaheddine EL FARISSI, Sam Levang, Spencer Clark, Stephan Hoyer and Tom Nicholas In the last couple of releases xarray has been prepared for allowing non-nanosecond datetime and timedelta resolution. The code had to be changed and adapted in numerous places, affecting especially the test suite. The documentation has been updated accordingly and a new internal chapter on :ref:`internals.timecoding` has been added. To make the transition as smooth as possible this is designed to be fully backwards compatible, keeping the current default of ``'ns'`` resolution on decoding. To opt-into decoding to other resolutions (``'us'``, ``'ms'`` or ``'s'``) an instance of the newly public :py:class:`coders.CFDatetimeCoder` class can be passed through the ``decode_times`` keyword argument (see also :ref:`internals.default_timeunit`): .. code-block:: python coder = xr.coders.CFDatetimeCoder(time_unit="s") ds = xr.open_dataset(filename, decode_times=coder) Similar control of the resolution of decoded timedeltas can be achieved through passing a :py:class:`coders.CFTimedeltaCoder` instance to the ``decode_timedelta`` keyword argument: .. code-block:: python coder = xr.coders.CFTimedeltaCoder(time_unit="s") ds = xr.open_dataset(filename, decode_timedelta=coder) though by default timedeltas will be decoded to the same ``time_unit`` as datetimes. There might slight changes when encoding/decoding times as some warning and error messages have been removed or rewritten. Xarray will now also allow non-nanosecond datetimes (with ``'us'``, ``'ms'`` or ``'s'`` resolution) when creating DataArray's from scratch, picking the lowest possible resolution: .. code:: python xr.DataArray(data=[np.datetime64("2000-01-01", "D")], dims=("time",)) In a future release the current default of ``'ns'`` resolution on decoding will eventually be deprecated. New Features ~~~~~~~~~~~~ - Relax nanosecond resolution restriction in CF time coding and permit :py:class:`numpy.datetime64` or :py:class:`numpy.timedelta64` dtype arrays with ``"s"``, ``"ms"``, ``"us"``, or ``"ns"`` resolution throughout xarray (:issue:`7493`, :pull:`9618`, :pull:`9977`, :pull:`9966`, :pull:`9999`). By `Kai Mรผhlbauer `_ and `Spencer Clark `_. - Enable the ``compute=False`` option in :py:meth:`DataTree.to_zarr`. (:pull:`9958`). By `Sam Levang `_. - Improve the error message raised when no key is matching the available variables in a dataset. (:pull:`9943`) By `Jimmy Westling `_. - Added a ``time_unit`` argument to :py:meth:`CFTimeIndex.to_datetimeindex`. Note that in a future version of xarray, :py:meth:`CFTimeIndex.to_datetimeindex` will return a microsecond-resolution :py:class:`pandas.DatetimeIndex` instead of a nanosecond-resolution :py:class:`pandas.DatetimeIndex` (:pull:`9965`). By `Spencer Clark `_ and `Kai Mรผhlbauer `_. - Adds shards to the list of valid_encodings in the zarr backend, so that sharded Zarr V3s can be written (:issue:`9947`, :pull:`9948`). By `Jacob Prince_Bieker `_ Deprecations ~~~~~~~~~~~~ - In a future version of xarray decoding of variables into :py:class:`numpy.timedelta64` values will be disabled by default. To silence warnings associated with this, set ``decode_timedelta`` to ``True``, ``False``, or a :py:class:`coders.CFTimedeltaCoder` instance when opening data (:issue:`1621`, :pull:`9966`). By `Spencer Clark `_. Bug fixes ~~~~~~~~~ - Fix :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill` and :py:meth:`Dataset.bfill` when the limit is bigger than the chunksize (:issue:`9939`). By `Joseph Nowak `_. - Fix issues related to Pandas v3 ("us" vs. "ns" for python datetime, copy on write) and handling of 0d-numpy arrays in datetime/timedelta decoding (:pull:`9953`). By `Kai Mรผhlbauer `_. - Remove dask-expr from CI runs, add "pyarrow" dask dependency to windows CI runs, fix related tests (:issue:`9962`, :pull:`9971`). By `Kai Mรผhlbauer `_. - Use zarr-fixture to prevent thread leakage errors (:pull:`9967`). By `Kai Mรผhlbauer `_. - Fix weighted ``polyfit`` for arrays with more than two dimensions (:issue:`9972`, :pull:`9974`). By `Mattia Almansi `_. - Preserve order of variables in :py:func:`xarray.combine_by_coords` (:issue:`8828`, :pull:`9070`). By `Kai Mรผhlbauer `_. - Cast ``numpy`` scalars to arrays in :py:meth:`NamedArray.from_arrays` (:issue:`10005`, :pull:`10008`) By `Justus Magin `_. Documentation ~~~~~~~~~~~~~ - A chapter on :ref:`internals.timecoding` is added to the internal section (:pull:`9618`). By `Kai Mรผhlbauer `_. - Clarified xarray's policy on API stability in the FAQ. (:issue:`9854`, :pull:`9855`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Updated time coding tests to assert exact equality rather than equality with a tolerance, since xarray's minimum supported version of cftime is greater than 1.2.1 (:pull:`9961`). By `Spencer Clark `_. .. _whats-new.2025.01.1: v2025.01.1 (Jan 9, 2025) ------------------------ This is a quick release to bring compatibility with the Zarr V3 release. It also includes an update to the time decoding infrastructure as a step toward `enabling non-nanosecond datetime support `_! New Features ~~~~~~~~~~~~ - Split out :py:class:`coders.CFDatetimeCoder` as public API in ``xr.coders``, make ``decode_times`` keyword argument consume :py:class:`coders.CFDatetimeCoder` (:pull:`9901`). By `Kai Mรผhlbauer `_. Deprecations ~~~~~~~~~~~~ - Time decoding related kwarg ``use_cftime`` is deprecated. Use keyword argument ``decode_times=CFDatetimeCoder(use_cftime=True)`` in :py:func:`~xarray.open_dataset`, :py:func:`~xarray.open_dataarray`, :py:func:`~xarray.open_datatree`, :py:func:`~xarray.open_groups`, :py:func:`~xarray.open_zarr` and :py:func:`~xarray.decode_cf` instead (:pull:`9901`). By `Kai Mรผhlbauer `_. .. _whats-new.2025.01.0: v.2025.01.0 (Jan 3, 2025) ------------------------- This release brings much improved read performance with Zarr arrays (without consolidated metadata), better support for additional array types, as well as bugfixes and performance improvements. Thanks to the 20 contributors to this release: Bruce Merry, Davis Bennett, Deepak Cherian, Dimitri Papadopoulos Orfanos, Florian Jetter, Illviljan, Janukan Sivajeyan, Justus Magin, Kai Germaschewski, Kai Mรผhlbauer, Max Jones, Maximilian Roos, Michael Niklas, Patrick Peglar, Sam Levang, Scott Huberty, Spencer Clark, Stephan Hoyer, Tom Nicholas and Vecko New Features ~~~~~~~~~~~~ - Improve the error message raised when using chunked-array methods if no chunk manager is available or if the requested chunk manager is missing (:pull:`9676`) By `Justus Magin `_. (:pull:`9676`) - Better support wrapping additional array types (e.g. ``cupy`` or ``jax``) by calling generalized duck array operations throughout more xarray methods. (:issue:`7848`, :pull:`9798`). By `Sam Levang `_. - Better performance for reading Zarr arrays in the ``ZarrStore`` class by caching the state of Zarr storage and avoiding redundant IO operations. By default, ``ZarrStore`` stores a snapshot of names and metadata of the in-scope Zarr arrays; this cache is then used when iterating over those Zarr arrays, which avoids IO operations and thereby reduces latency. (:issue:`9853`, :pull:`9861`). By `Davis Bennett `_. - Add ``unit`` - keyword argument to :py:func:`date_range` and ``microsecond`` parsing to iso8601-parser (:pull:`9885`). By `Kai Mรผhlbauer `_. Breaking changes ~~~~~~~~~~~~~~~~ - Methods including ``dropna``, ``rank``, ``idxmax``, ``idxmin`` require non-dimension arguments to be passed as keyword arguments. The previous behavior, which allowed ``.idxmax('foo', 'all')`` was too easily confused with ``'all'`` being a dimension. The updated equivalent is ``.idxmax('foo', how='all')``. The previous behavior was deprecated in v2023.10.0. By `Maximilian Roos `_. Deprecations ~~~~~~~~~~~~ - Finalize deprecation of ``closed`` parameters of :py:func:`cftime_range` and :py:func:`date_range` (:pull:`9882`). By `Kai Mรผhlbauer `_. Performance ~~~~~~~~~~~ - Better preservation of chunksizes in :py:meth:`Dataset.idxmin` and :py:meth:`Dataset.idxmax` (:issue:`9425`, :pull:`9800`). By `Deepak Cherian `_. - Much better implementation of vectorized interpolation for dask arrays (:pull:`9881`). By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Fix type annotations for ``get_axis_num``. (:issue:`9822`, :pull:`9827`). By `Bruce Merry `_. - Fix unintended load on datasets when calling :py:meth:`DataArray.plot.scatter` (:pull:`9818`). By `Jimmy Westling `_. - Fix interpolation when non-numeric coordinate variables are present (:issue:`8099`, :issue:`9839`). By `Deepak Cherian `_. Internal Changes ~~~~~~~~~~~~~~~~ - Move non-CF related ``ensure_dtype_not_object`` from conventions to backends (:pull:`9828`). By `Kai Mรผhlbauer `_. - Move handling of scalar datetimes into ``_possibly_convert_objects`` within ``as_compatible_data``. This is consistent with how lists of these objects will be converted (:pull:`9900`). By `Kai Mรผhlbauer `_. - Move ISO-8601 parser from coding.cftimeindex to coding.times to make it available there (prevents circular import), add capability to parse negative and/or five-digit years (:pull:`9899`). By `Kai Mรผhlbauer `_. - Refactor of time coding to prepare for relaxing nanosecond restriction (:pull:`9906`). By `Kai Mรผhlbauer `_. .. _whats-new.2024.11.0: v.2024.11.0 (Nov 22, 2024) -------------------------- This release brings better support for wrapping JAX arrays and Astropy Quantity objects, :py:meth:`DataTree.persist`, algorithmic improvements to many methods with dask (:py:meth:`Dataset.polyfit`, :py:meth:`Dataset.ffill`, :py:meth:`Dataset.bfill`, rolling reductions), and bug fixes. Thanks to the 22 contributors to this release: Benoit Bovy, Deepak Cherian, Dimitri Papadopoulos Orfanos, Holly Mandel, James Bourbeau, Joe Hamman, Justus Magin, Kai Mรผhlbauer, Lukas Trippe, Mathias Hauser, Maximilian Roos, Michael Niklas, Pascal Bourgault, Patrick Hoefler, Sam Levang, Sarah Charlotte Johnson, Scott Huberty, Stephan Hoyer, Tom Nicholas, Virgile Andreani, joseph nowak and tvo New Features ~~~~~~~~~~~~ - Added :py:meth:`DataTree.persist` method (:issue:`9675`, :pull:`9682`). By `Sam Levang `_. - Added ``write_inherited_coords`` option to :py:meth:`DataTree.to_netcdf` and :py:meth:`DataTree.to_zarr` (:pull:`9677`). By `Stephan Hoyer `_. - Support lazy grouping by dask arrays, and allow specifying ordered groups with ``UniqueGrouper(labels=["a", "b", "c"])`` (:issue:`2852`, :issue:`757`). By `Deepak Cherian `_. - Add new ``automatic_rechunk`` kwarg to :py:meth:`DataArrayRolling.construct` and :py:meth:`DatasetRolling.construct`. This is only useful on ``dask>=2024.11.0`` (:issue:`9550`). By `Deepak Cherian `_. - Optimize ffill, bfill with dask when limit is specified (:pull:`9771`). By `Joseph Nowak `_, and `Patrick Hoefler `_. - Allow wrapping ``np.ndarray`` subclasses, e.g. ``astropy.units.Quantity`` (:issue:`9704`, :pull:`9760`). By `Sam Levang `_ and `Tien Vo `_. - Optimize :py:meth:`DataArray.polyfit` and :py:meth:`Dataset.polyfit` with dask, when used with arrays with more than two dimensions. (:issue:`5629`). By `Deepak Cherian `_. - Support for directly opening remote files as string paths (for example, ``s3://bucket/data.nc``) with ``fsspec`` when using the ``h5netcdf`` engine (:issue:`9723`, :pull:`9797`). By `James Bourbeau `_. - Re-implement the :py:mod:`ufuncs` module, which now dynamically dispatches to the underlying array's backend. Provides better support for certain wrapped array types like ``jax.numpy.ndarray``. (:issue:`7848`, :pull:`9776`). By `Sam Levang `_. - Speed up loading of large zarr stores using dask arrays. (:issue:`8902`) By `Deepak Cherian `_. Breaking Changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed ===================== ========= ======= Package Old New ===================== ========= ======= boto3 1.28 1.29 dask-core 2023.9 2023.11 distributed 2023.9 2023.11 h5netcdf 1.2 1.3 numbagg 0.2.1 0.6 typing_extensions 4.7 4.8 ===================== ========= ======= Deprecations ~~~~~~~~~~~~ - Grouping by a chunked array (e.g. dask or cubed) currently eagerly loads that variable in to memory. This behaviour is deprecated. If eager loading was intended, please load such arrays manually using ``.load()`` or ``.compute()``. Else pass ``eagerly_compute_group=False``, and provide expected group labels using the ``labels`` kwarg to a grouper object such as :py:class:`grouper.UniqueGrouper` or :py:class:`grouper.BinGrouper`. Bug fixes ~~~~~~~~~ - Fix inadvertent deep-copying of child data in DataTree (:issue:`9683`, :pull:`9684`). By `Stephan Hoyer `_. - Avoid including parent groups when writing DataTree subgroups to Zarr or netCDF (:pull:`9682`). By `Stephan Hoyer `_. - Fix regression in the interoperability of :py:meth:`DataArray.polyfit` and :py:meth:`xr.polyval` for date-time coordinates. (:pull:`9691`). By `Pascal Bourgault `_. - Fix CF decoding of ``grid_mapping`` to allow all possible formats, add tests (:issue:`9761`, :pull:`9765`). By `Kai Mรผhlbauer `_. - Add ``User-Agent`` to request-headers when retrieving tutorial data (:issue:`9774`, :pull:`9782`) By `Kai Mรผhlbauer `_. Documentation ~~~~~~~~~~~~~ - Mention attribute peculiarities in docs/docstrings (:issue:`4798`, :pull:`9700`). By `Kai Mรผhlbauer `_. Internal Changes ~~~~~~~~~~~~~~~~ - ``persist`` methods now route through the :py:class:`xr.namedarray.parallelcompat.ChunkManagerEntrypoint` (:pull:`9682`). By `Sam Levang `_. .. _whats-new.2024.10.0: v2024.10.0 (Oct 24th, 2024) --------------------------- This release brings official support for ``xarray.DataTree``, and compatibility with zarr-python v3! Aside from these two huge features, it also improves support for vectorised interpolation and fixes various bugs. Thanks to the 31 contributors to this release: Alfonso Ladino, DWesl, Deepak Cherian, Eni, Etienne Schalk, Holly Mandel, Ilan Gold, Illviljan, Joe Hamman, Justus Magin, Kai Mรผhlbauer, Karl Krauth, Mark Harfouche, Martey Dodoo, Matt Savoie, Maximilian Roos, Patrick Hoefler, Peter Hill, Renat Sibgatulin, Ryan Abernathey, Spencer Clark, Stephan Hoyer, Tom Augspurger, Tom Nicholas, Vecko, Virgile Andreani, Yvonne Frรถhlich, carschandler, joseph nowak, mgunyho and owenlittlejohns New Features ~~~~~~~~~~~~ - ``DataTree`` related functionality is now exposed in the main ``xarray`` public API. This includes: ``xarray.DataTree``, ``xarray.open_datatree``, ``xarray.open_groups``, ``xarray.map_over_datasets``, ``xarray.group_subtrees``, ``xarray.register_datatree_accessor`` and ``xarray.testing.assert_isomorphic``. By `Owen Littlejohns `_, `Eni Awowale `_, `Matt Savoie `_, `Stephan Hoyer `_, `Tom Nicholas `_, `Justus Magin `_, and `Alfonso Ladino `_. - A migration guide for users of the prototype `xarray-contrib/datatree repository `_ has been added, and can be found in the ``DATATREE_MIGRATION_GUIDE.md`` file in the repository root. By `Tom Nicholas `_. - Support for Zarr-Python 3 (:issue:`95515`, :pull:`9552`). By `Tom Augspurger `_, `Ryan Abernathey `_ and `Joe Hamman `_. - Added zarr backends for :py:func:`open_groups` (:issue:`9430`, :pull:`9469`). By `Eni Awowale `_. - Added support for vectorized interpolation using additional interpolators from the ``scipy.interpolate`` module (:issue:`9049`, :pull:`9526`). By `Holly Mandel `_. - Implement handling of complex numbers (netcdf4/h5netcdf) and enums (h5netcdf) (:issue:`9246`, :issue:`3297`, :pull:`9509`). By `Kai Mรผhlbauer `_. - Fix passing missing arguments to when opening hdf5 and netCDF4 datatrees (:issue:`9427`, :pull:`9428`). By `Alfonso Ladino `_. Bug fixes ~~~~~~~~~ - Make illegal path-like variable names when constructing a DataTree from a Dataset (:issue:`9339`, :pull:`9378`) By `Etienne Schalk `_. - Work around `upstream pandas issue `_ to ensure that we can decode times encoded with small integer dtype values (e.g. ``np.int32``) in environments with NumPy 2.0 or greater without needing to fall back to cftime (:pull:`9518`). By `Spencer Clark `_. - Fix bug when encoding times with missing values as floats in the case when the non-missing times could in theory be encoded with integers (:issue:`9488`, :pull:`9497`). By `Spencer Clark `_. - Fix a few bugs affecting groupby reductions with ``flox``. (:issue:`8090`, :issue:`9398`, :issue:`9648`). - Fix a few bugs affecting groupby reductions with ``flox``. (:issue:`8090`, :issue:`9398`). By `Deepak Cherian `_. - Fix the safe_chunks validation option on the to_zarr method (:issue:`5511`, :pull:`9559`). By `Joseph Nowak `_. - Fix binning by multiple variables where some bins have no observations. (:issue:`9630`). By `Deepak Cherian `_. - Fix issue where polyfit wouldn't handle non-dimension coordinates. (:issue:`4375`, :pull:`9369`) By `Karl Krauth `_. Documentation ~~~~~~~~~~~~~ - Migrate documentation for ``datatree`` into main ``xarray`` documentation (:pull:`9033`). For information on previous ``datatree`` releases, please see: `datatree's historical release notes `_. By `Owen Littlejohns `_, `Matt Savoie `_, and `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ .. _whats-new.2024.09.0: v2024.09.0 (Sept 11, 2024) -------------------------- This release drops support for Python 3.9, and adds support for grouping by :ref:`multiple arrays `, while providing numerous performance improvements and bug fixes. Thanks to the 33 contributors to this release: Alfonso Ladino, Andrew Scherer, Anurag Nayak, David Hoese, Deepak Cherian, Diogo Teles Sant'Anna, Dom, Elliott Sales de Andrade, Eni, Holly Mandel, Illviljan, Jack Kelly, Julius Busecke, Justus Magin, Kai Mรผhlbauer, Manish Kumar Gupta, Matt Savoie, Maximilian Roos, Michele Claus, Miguel Jimenez, Niclas Rieger, Pascal Bourgault, Philip Chmielowiec, Spencer Clark, Stephan Hoyer, Tao Xin, Tiago Sanona, TimothyCera-NOAA, Tom Nicholas, Tom White, Virgile Andreani, oliverhiggs and tiago New Features ~~~~~~~~~~~~ - Add :py:attr:`~core.accessor_dt.DatetimeAccessor.days_in_year` and :py:attr:`~core.accessor_dt.DatetimeAccessor.decimal_year` to the ``DatetimeAccessor`` on ``xr.DataArray``. (:pull:`9105`). By `Pascal Bourgault `_. Performance ~~~~~~~~~~~ - Make chunk manager an option in ``set_options`` (:pull:`9362`). By `Tom White `_. - Support for :ref:`grouping by multiple variables `. This is quite new, so please check your results and report bugs. Binary operations after grouping by multiple arrays are not supported yet. (:issue:`1056`, :issue:`9332`, :issue:`324`, :pull:`9372`). By `Deepak Cherian `_. - Allow data variable specific ``constant_values`` in the dataset ``pad`` function (:pull:`9353`). By `Tiago Sanona `_. - Speed up grouping by avoiding deep-copy of non-dimension coordinates (:issue:`9426`, :pull:`9393`) By `Deepak Cherian `_. Breaking changes ~~~~~~~~~~~~~~~~ - Support for ``python 3.9`` has been dropped (:pull:`8937`) - The minimum versions of some dependencies were changed ===================== ========= ======= Package Old New ===================== ========= ======= boto3 1.26 1.28 cartopy 0.21 0.22 dask-core 2023.4 2023.9 distributed 2023.4 2023.9 h5netcdf 1.1 1.2 iris 3.4 3.7 numba 0.56 0.57 numpy 1.23 1.24 pandas 2.0 2.1 scipy 1.10 1.11 typing_extensions 4.5 4.7 zarr 2.14 2.16 ===================== ========= ======= Bug fixes ~~~~~~~~~ - Fix bug with rechunking to a frequency when some periods contain no data (:issue:`9360`). By `Deepak Cherian `_. - Fix bug causing ``DataTree.from_dict`` to be sensitive to insertion order (:issue:`9276`, :pull:`9292`). By `Tom Nicholas `_. - Fix resampling error with monthly, quarterly, or yearly frequencies with cftime when the time bins straddle the date "0001-01-01". For example, this can happen in certain circumstances when the time coordinate contains the date "0001-01-01". (:issue:`9108`, :pull:`9116`) By `Spencer Clark `_ and `Deepak Cherian `_. - Fix issue with passing parameters to ZarrStore.open_store when opening datatree in zarr format (:issue:`9376`, :pull:`9377`). By `Alfonso Ladino `_ - Fix deprecation warning that was raised when calling ``np.array`` on an ``xr.DataArray`` in NumPy 2.0 (:issue:`9312`, :pull:`9393`) By `Andrew Scherer `_. - Fix passing missing arguments to when opening hdf5 and netCDF4 datatrees (:issue:`9427`, :pull:`9428`). By `Alfonso Ladino `_. - Fix support for using ``pandas.DateOffset``, ``pandas.Timedelta``, and ``datetime.timedelta`` objects as ``resample`` frequencies (:issue:`9408`, :pull:`9413`). By `Oliver Higgs `_. Internal Changes ~~~~~~~~~~~~~~~~ - Re-enable testing ``pydap`` backend with ``numpy>=2`` (:pull:`9391`). By `Miguel Jimenez `_ . .. _whats-new.2024.07.0: v2024.07.0 (Jul 30, 2024) ------------------------- This release extends the API for groupby operations with various `grouper objects `_, and includes improvements to the documentation and numerous bugfixes. Thanks to the 22 contributors to this release: Alfonso Ladino, ChrisCleaner, David Hoese, Deepak Cherian, Dieter Werthmรผller, Illviljan, Jessica Scheick, Joel Jaeschke, Justus Magin, K. Arthur Endsley, Kai Mรผhlbauer, Mark Harfouche, Martin Raspaud, Mathijs Verhaegh, Maximilian Roos, Michael Niklas, Michaล‚ Gรณrny, Moritz Schreiber, Pontus Lurcock, Spencer Clark, Stephan Hoyer and Tom Nicholas New Features ~~~~~~~~~~~~ - Use fastpath when grouping both montonically increasing and decreasing variable in :py:class:`GroupBy` (:issue:`6220`, :pull:`7427`). By `Joel Jaeschke `_. - Introduce new :py:class:`groupers.UniqueGrouper`, :py:class:`groupers.BinGrouper`, and :py:class:`groupers.TimeResampler` objects as a step towards supporting grouping by multiple variables. See the `docs `_ and the `grouper design doc `_ for more. (:issue:`6610`, :pull:`8840`). By `Deepak Cherian `_. - Allow rechunking to a frequency using ``Dataset.chunk(time=TimeResampler("YE"))`` syntax. (:issue:`7559`, :pull:`9109`) Such rechunking allows many time domain analyses to be executed in an embarrassingly parallel fashion. By `Deepak Cherian `_. - Allow per-variable specification of ```mask_and_scale``, ``decode_times``, ``decode_timedelta`` ``use_cftime`` and ``concat_characters`` params in :py:func:`~xarray.open_dataset` (:pull:`9218`). By `Mathijs Verhaegh `_. - Allow chunking for arrays with duplicated dimension names (:issue:`8759`, :pull:`9099`). By `Martin Raspaud `_. - Extract the source url from fsspec objects (:issue:`9142`, :pull:`8923`). By `Justus Magin `_. - Add :py:meth:`DataArray.drop_attrs` & :py:meth:`Dataset.drop_attrs` methods, to return an object without ``attrs``. A ``deep`` parameter controls whether variables' ``attrs`` are also dropped. By `Maximilian Roos `_. (:pull:`8288`) - Added :py:func:`open_groups` for h5netcdf and netCDF4 backends (:issue:`9137`, :pull:`9243`). By `Eni Awowale `_. Breaking changes ~~~~~~~~~~~~~~~~ - The ``base`` and ``loffset`` parameters to :py:meth:`Dataset.resample` and :py:meth:`DataArray.resample` are now removed. These parameters have been deprecated since v2023.03.0. Using the ``origin`` or ``offset`` parameters is recommended as a replacement for using the ``base`` parameter and using time offset arithmetic is recommended as a replacement for using the ``loffset`` parameter. (:pull:`9233`) By `Deepak Cherian `_. - The ``squeeze`` kwarg to ``groupby`` is now ignored. This has been the source of some quite confusing behaviour and has been deprecated since v2024.01.0. ``groupby`` behavior is now always consistent with the existing ``.groupby(..., squeeze=False)`` behavior. No errors will be raised if ``squeeze=False``. (:pull:`9280`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Fix scatter plot broadcasting unnecessarily. (:issue:`9129`, :pull:`9206`) By `Jimmy Westling `_. - Don't convert custom indexes to ``pandas`` indexes when computing a diff (:pull:`9157`) By `Justus Magin `_. - Make :py:func:`testing.assert_allclose` work with numpy 2.0 (:issue:`9165`, :pull:`9166`). By `Pontus Lurcock `_. - Allow diffing objects with array attributes on variables (:issue:`9153`, :pull:`9169`). By `Justus Magin `_. - ``numpy>=2`` compatibility in the ``netcdf4`` backend (:pull:`9136`). By `Justus Magin `_ and `Kai Mรผhlbauer `_. - Promote floating-point numeric datetimes before decoding (:issue:`9179`, :pull:`9182`). By `Justus Magin `_. - Address regression introduced in :pull:`9002` that prevented objects returned by :py:meth:`DataArray.convert_calendar` to be indexed by a time index in certain circumstances (:issue:`9138`, :pull:`9192`). By `Mark Harfouche `_ and `Spencer Clark `_. - Fix static typing of tolerance arguments by allowing ``str`` type (:issue:`8892`, :pull:`9194`). By `Michael Niklas `_. - Dark themes are now properly detected for ``html[data-theme=dark]``-tags (:pull:`9200`). By `Dieter Werthmรผller `_. - Reductions no longer fail for ``np.complex_`` dtype arrays when numbagg is installed. (:pull:`9210`) By `Maximilian Roos `_. Documentation ~~~~~~~~~~~~~ - Adds intro to backend section of docs, including a flow-chart to navigate types of backends (:pull:`9175`). By `Jessica Scheick `_. - Adds a flow-chart diagram to help users navigate help resources (:discussion:`8990`, :pull:`9147`). By `Jessica Scheick `_. - Improvements to Zarr & chunking docs (:pull:`9139`, :pull:`9140`, :pull:`9132`) By `Maximilian Roos `_. - Fix copybutton for multi line examples and double digit ipython cell numbers (:pull:`9264`). By `Moritz Schreiber `_. Internal Changes ~~~~~~~~~~~~~~~~ - Enable typing checks of pandas (:pull:`9213`). By `Michael Niklas `_. .. _whats-new.2024.06.0: v2024.06.0 (Jun 13, 2024) ------------------------- This release brings various performance optimizations and compatibility with the upcoming numpy 2.0 release. Thanks to the 22 contributors to this release: Alfonso Ladino, David Hoese, Deepak Cherian, Eni Awowale, Ilan Gold, Jessica Scheick, Joe Hamman, Justus Magin, Kai Mรผhlbauer, Mark Harfouche, Mathias Hauser, Matt Savoie, Maximilian Roos, Mike Thramann, Nicolas Karasiak, Owen Littlejohns, Paul OckenfuรŸ, Philippe THOMY, Scott Henderson, Spencer Clark, Stephan Hoyer and Tom Nicholas Performance ~~~~~~~~~~~ - Small optimization to the netCDF4 and h5netcdf backends (:issue:`9058`, :pull:`9067`). By `Deepak Cherian `_. - Small optimizations to help reduce indexing speed of datasets (:pull:`9002`). By `Mark Harfouche `_. - Performance improvement in ``open_datatree`` method for Zarr, netCDF4 and h5netcdf backends (:issue:`8994`, :pull:`9014`). By `Alfonso Ladino `_. Bug fixes ~~~~~~~~~ - Preserve conversion of timezone-aware pandas Datetime arrays to numpy object arrays (:issue:`9026`, :pull:`9042`). By `Ilan Gold `_. - :py:meth:`DataArrayResample.interpolate` and :py:meth:`DatasetResample.interpolate` method now support arbitrary kwargs such as ``order`` for polynomial interpolation (:issue:`8762`). By `Nicolas Karasiak `_. Documentation ~~~~~~~~~~~~~ - Add link to CF Conventions on packed data and sentence on type determination in the I/O user guide (:issue:`9041`, :pull:`9045`). By `Kai Mรผhlbauer `_. Internal Changes ~~~~~~~~~~~~~~~~ - Migrates remainder of ``io.py`` to ``xarray/core/datatree_io.py`` and ``TreeAttrAccessMixin`` into ``xarray/core/common.py`` (:pull:`9011`). By `Owen Littlejohns `_ and `Tom Nicholas `_. - Compatibility with numpy 2 (:issue:`8844`, :pull:`8854`, :pull:`8946`). By `Justus Magin `_ and `Stephan Hoyer `_. .. _whats-new.2024.05.0: v2024.05.0 (May 12, 2024) ------------------------- This release brings support for pandas ExtensionArray objects, optimizations when reading Zarr, the ability to concatenate datasets without pandas indexes, more compatibility fixes for the upcoming numpy 2.0, and the migration of most of the xarray-datatree project code into xarray ``main``! Thanks to the 18 contributors to this release: Aimilios Tsouvelekakis, Andrey Akinshin, Deepak Cherian, Eni Awowale, Ilan Gold, Illviljan, Justus Magin, Mark Harfouche, Matt Savoie, Maximilian Roos, Noah C. Benson, Pascal Bourgault, Ray Bell, Spencer Clark, Tom Nicholas, ignamv, owenlittlejohns, and saschahofmann. New Features ~~~~~~~~~~~~ - New "random" method for converting to and from 360_day calendars (:pull:`8603`). By `Pascal Bourgault `_. - Xarray now makes a best attempt not to coerce :py:class:`pandas.api.extensions.ExtensionArray` to a numpy array by supporting 1D ``ExtensionArray`` objects internally where possible. Thus, :py:class:`Dataset` objects initialized with a ``pd.Categorical``, for example, will retain the object. However, one cannot do operations that are not possible on the ``ExtensionArray`` then, such as broadcasting. (:issue:`5287`, :issue:`8463`, :pull:`8723`) By `Ilan Gold `_. - :py:func:`testing.assert_allclose` / :py:func:`testing.assert_equal` now accept a new argument ``check_dims="transpose"``, controlling whether a transposed array is considered equal. (:issue:`5733`, :pull:`8991`) By `Ignacio Martinez Vazquez `_. - Added the option to avoid automatically creating 1D pandas indexes in :py:meth:`Dataset.expand_dims()`, by passing the new kwarg ``create_index_for_new_dim=False``. (:pull:`8960`) By `Tom Nicholas `_. - Avoid automatically re-creating 1D pandas indexes in :py:func:`concat()`. Also added option to avoid creating 1D indexes for new dimension coordinates by passing the new kwarg ``create_index_for_new_dim=False``. (:issue:`8871`, :pull:`8872`) By `Tom Nicholas `_. Breaking changes ~~~~~~~~~~~~~~~~ - The PyNIO backend has been deleted (:issue:`4491`, :pull:`7301`). By `Deepak Cherian `_. - The minimum versions of some dependencies were changed, in particular our minimum supported pandas version is now Pandas 2. ===================== ========= ======= Package Old New ===================== ========= ======= dask-core 2022.12 2023.4 distributed 2022.12 2023.4 h5py 3.7 3.8 matplotlib-base 3.6 3.7 packaging 22.0 23.1 pandas 1.5 2.0 pydap 3.3 3.4 sparse 0.13 0.14 typing_extensions 4.4 4.5 zarr 2.13 2.14 ===================== ========= ======= Bug fixes ~~~~~~~~~ - Following `an upstream bug fix `_ to :py:func:`pandas.date_range`, date ranges produced by :py:func:`xarray.cftime_range` with negative frequencies will now fall fully within the bounds of the provided start and end dates (:pull:`8999`). By `Spencer Clark `_. Internal Changes ~~~~~~~~~~~~~~~~ - Enforces failures on CI when tests raise warnings from within xarray (:pull:`8974`) By `Maximilian Roos `_ - Migrates ``formatting_html`` functionality for ``DataTree`` into ``xarray/core`` (:pull:`8930`) By `Eni Awowale `_, `Julia Signell `_ and `Tom Nicholas `_. - Migrates ``datatree_mapping`` functionality into ``xarray/core`` (:pull:`8948`) By `Matt Savoie `_ `Owen Littlejohns `_ and `Tom Nicholas `_. - Migrates ``extensions``, ``formatting`` and ``datatree_render`` functionality for ``DataTree`` into ``xarray/core``. Also migrates ``testing`` functionality into ``xarray/testing/assertions`` for ``DataTree``. (:pull:`8967`) By `Owen Littlejohns `_ and `Tom Nicholas `_. - Migrates ``ops.py`` functionality into ``xarray/core/datatree_ops.py`` (:pull:`8976`) By `Matt Savoie `_ and `Tom Nicholas `_. - Migrates ``iterator`` functionality into ``xarray/core`` (:pull:`8879`) By `Owen Littlejohns `_, `Matt Savoie `_ and `Tom Nicholas `_. - ``transpose``, ``set_dims``, ``stack`` & ``unstack`` now use a ``dim`` kwarg rather than ``dims`` or ``dimensions``. This is the final change to make xarray methods consistent with their use of ``dim``. Using the existing kwarg will raise a warning. By `Maximilian Roos `_ .. _whats-new.2024.03.0: v2024.03.0 (Mar 29, 2024) ------------------------- This release brings performance improvements for grouped and resampled quantile calculations, CF decoding improvements, minor optimizations to distributed Zarr writes, and compatibility fixes for Numpy 2.0 and Pandas 3.0. Thanks to the 18 contributors to this release: Anderson Banihirwe, Christoph Hasse, Deepak Cherian, Etienne Schalk, Justus Magin, Kai Mรผhlbauer, Kevin Schwarzwald, Mark Harfouche, Martin, Matt Savoie, Maximilian Roos, Ray Bell, Roberto Chang, Spencer Clark, Tom Nicholas, crusaderky, owenlittlejohns, saschahofmann New Features ~~~~~~~~~~~~ - Partial writes to existing chunks with ``region`` or ``append_dim`` will now raise an error (unless ``safe_chunks=False``); previously an error would only be raised on new variables. (:pull:`8459`, :issue:`8371`, :issue:`8882`) By `Maximilian Roos `_. - Grouped and resampling quantile calculations now use the vectorized algorithm in ``flox>=0.9.4`` if present. By `Deepak Cherian `_. - Do not broadcast in arithmetic operations when global option ``arithmetic_broadcast=False`` (:issue:`6806`, :pull:`8784`). By `Etienne Schalk `_ and `Deepak Cherian `_. - Add the ``.oindex`` property to Explicitly Indexed Arrays for orthogonal indexing functionality. (:issue:`8238`, :pull:`8750`) By `Anderson Banihirwe `_. - Add the ``.vindex`` property to Explicitly Indexed Arrays for vectorized indexing functionality. (:issue:`8238`, :pull:`8780`) By `Anderson Banihirwe `_. - Expand use of ``.oindex`` and ``.vindex`` properties. (:pull:`8790`) By `Anderson Banihirwe `_ and `Deepak Cherian `_. - Allow creating :py:class:`xr.Coordinates` objects with no indexes (:pull:`8711`) By `Benoit Bovy `_ and `Tom Nicholas `_. - Enable plotting of ``datetime.dates``. (:issue:`8866`, :pull:`8873`) By `Sascha Hofmann `_. Breaking changes ~~~~~~~~~~~~~~~~ - Don't allow overwriting index variables with ``to_zarr`` region writes. (:issue:`8589`, :pull:`8876`). By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - The default ``freq`` parameter in :py:meth:`xr.date_range` and :py:meth:`xr.cftime_range` is set to ``'D'`` only if ``periods``, ``start``, or ``end`` are ``None`` (:issue:`8770`, :pull:`8774`). By `Roberto Chang `_. - Ensure that non-nanosecond precision :py:class:`numpy.datetime64` and :py:class:`numpy.timedelta64` values are cast to nanosecond precision values when used in :py:meth:`DataArray.expand_dims` and ::py:meth:`Dataset.expand_dims` (:pull:`8781`). By `Spencer Clark `_. - CF conform handling of ``_FillValue``/``missing_value`` and ``dtype`` in ``CFMaskCoder``/``CFScaleOffsetCoder`` (:issue:`2304`, :issue:`5597`, :issue:`7691`, :pull:`8713`, see also discussion in :pull:`7654`). By `Kai Mรผhlbauer `_. - Do not cast ``_FillValue``/``missing_value`` in ``CFMaskCoder`` if ``_Unsigned`` is provided (:issue:`8844`, :pull:`8852`). - Adapt handling of copy keyword argument for numpy >= 2.0dev (:issue:`8844`, :pull:`8851`, :pull:`8865`). By `Kai Mรผhlbauer `_. - Import trapz/trapezoid depending on numpy version (:issue:`8844`, :pull:`8865`). By `Kai Mรผhlbauer `_. - Warn and return bytes undecoded in case of UnicodeDecodeError in h5netcdf-backend (:issue:`5563`, :pull:`8874`). By `Kai Mรผhlbauer `_. - Fix bug incorrectly disallowing creation of a dataset with a multidimensional coordinate variable with the same name as one of its dims. (:issue:`8884`, :pull:`8886`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Migrates ``treenode`` functionality into ``xarray/core`` (:pull:`8757`) By `Matt Savoie `_ and `Tom Nicholas `_. - Migrates ``datatree`` functionality into ``xarray/core``. (:pull:`8789`) By `Owen Littlejohns `_, `Matt Savoie `_ and `Tom Nicholas `_. .. _whats-new.2024.02.0: v2024.02.0 (Feb 19, 2024) ------------------------- This release brings size information to the text ``repr``, changes to the accepted frequency strings, and various bug fixes. Thanks to our 12 contributors: Anderson Banihirwe, Deepak Cherian, Eivind Jahren, Etienne Schalk, Justus Magin, Marco Wolsza, Mathias Hauser, Matt Savoie, Maximilian Roos, Rambaud Pierrick, Tom Nicholas New Features ~~~~~~~~~~~~ - Added a simple ``nbytes`` representation in DataArrays and Dataset ``repr``. (:issue:`8690`, :pull:`8702`). By `Etienne Schalk `_. - Allow negative frequency strings (e.g. ``"-1YE"``). These strings are for example used in :py:func:`date_range`, and :py:func:`cftime_range` (:pull:`8651`). By `Mathias Hauser `_. - Add :py:meth:`NamedArray.expand_dims`, :py:meth:`NamedArray.permute_dims` and :py:meth:`NamedArray.broadcast_to` (:pull:`8380`) By `Anderson Banihirwe `_. - Xarray now defers to `flox's heuristics `_ to set the default ``method`` for groupby problems. This only applies to ``flox>=0.9``. By `Deepak Cherian `_. - All ``quantile`` methods (e.g. :py:meth:`DataArray.quantile`) now use ``numbagg`` for the calculation of nanquantiles (i.e., ``skipna=True``) if it is installed. This is currently limited to the linear interpolation method (`method='linear'`). (:issue:`7377`, :pull:`8684`) By `Marco Wolsza `_. Breaking changes ~~~~~~~~~~~~~~~~ - :py:func:`infer_freq` always returns the frequency strings as defined in pandas 2.2 (:issue:`8612`, :pull:`8627`). By `Mathias Hauser `_. Deprecations ~~~~~~~~~~~~ - The ``dt.weekday_name`` parameter wasn't functional on modern pandas versions and has been removed. (:issue:`8610`, :pull:`8664`) By `Sam Coleman `_. Bug fixes ~~~~~~~~~ - Fixed a regression that prevented multi-index level coordinates being serialized after resetting or dropping the multi-index (:issue:`8628`, :pull:`8672`). By `Benoit Bovy `_. - Fix bug with broadcasting when wrapping array API-compliant classes. (:issue:`8665`, :pull:`8669`) By `Tom Nicholas `_. - Ensure :py:meth:`DataArray.unstack` works when wrapping array API-compliant classes. (:issue:`8666`, :pull:`8668`) By `Tom Nicholas `_. - Fix negative slicing of Zarr arrays without dask installed. (:issue:`8252`) By `Deepak Cherian `_. - Preserve chunks when writing time-like variables to zarr by enabling lazy CF encoding of time-like variables (:issue:`7132`, :issue:`8230`, :issue:`8432`, :pull:`8575`). By `Spencer Clark `_ and `Mattia Almansi `_. - Preserve chunks when writing time-like variables to zarr by enabling their lazy encoding (:issue:`7132`, :issue:`8230`, :issue:`8432`, :pull:`8253`, :pull:`8575`; see also discussion in :pull:`8253`). By `Spencer Clark `_ and `Mattia Almansi `_. - Raise an informative error if dtype encoding of time-like variables would lead to integer overflow or unsafe conversion from floating point to integer values (:issue:`8542`, :pull:`8575`). By `Spencer Clark `_. - Raise an error when unstacking a MultiIndex that has duplicates as this would lead to silent data loss (:issue:`7104`, :pull:`8737`). By `Mathias Hauser `_. Documentation ~~~~~~~~~~~~~ - Fix ``variables`` arg typo in ``Dataset.sortby()`` docstring (:issue:`8663`, :pull:`8670`) By `Tom Vo `_. - Fixed documentation where the use of the depreciated pandas frequency string prevented the documentation from being built. (:pull:`8638`) By `Sam Coleman `_. Internal Changes ~~~~~~~~~~~~~~~~ - ``DataArray.dt`` now raises an ``AttributeError`` rather than a ``TypeError`` when the data isn't datetime-like. (:issue:`8718`, :pull:`8724`) By `Maximilian Roos `_. - Move ``parallelcompat`` and ``chunk managers`` modules from ``xarray/core`` to ``xarray/namedarray``. (:pull:`8319`) By `Tom Nicholas `_ and `Anderson Banihirwe `_. - Imports ``datatree`` repository and history into internal location. (:pull:`8688`) By `Matt Savoie `_, `Justus Magin `_ and `Tom Nicholas `_. - Adds :py:func:`open_datatree` into ``xarray/backends`` (:pull:`8697`) By `Matt Savoie `_ and `Tom Nicholas `_. - Refactor :py:meth:`xarray.core.indexing.DaskIndexingAdapter.__getitem__` to remove an unnecessary rewrite of the indexer key (:issue:`8377`, :pull:`8758`) By `Anderson Banihirwe `_. .. _whats-new.2024.01.1: v2024.01.1 (23 Jan, 2024) ------------------------- This release is to fix a bug with the rendering of the documentation, but it also includes changes to the handling of pandas frequency strings. Breaking changes ~~~~~~~~~~~~~~~~ - Following pandas, :py:meth:`infer_freq` will return ``"YE"``, instead of ``"Y"`` (formerly ``"A"``). This is to be consistent with the deprecation of the latter frequency string in pandas 2.2. This is a follow up to :pull:`8415` (:issue:`8612`, :pull:`8642`). By `Mathias Hauser `_. Deprecations ~~~~~~~~~~~~ - Following pandas, the frequency string ``"Y"`` (formerly ``"A"``) is deprecated in favor of ``"YE"``. These strings are used, for example, in :py:func:`date_range`, :py:func:`cftime_range`, :py:meth:`DataArray.resample`, and :py:meth:`Dataset.resample` among others (:issue:`8612`, :pull:`8629`). By `Mathias Hauser `_. Documentation ~~~~~~~~~~~~~ - Pin ``sphinx-book-theme`` to ``1.0.1`` to fix a rendering issue with the sidebar in the docs. (:issue:`8619`, :pull:`8632`) By `Tom Nicholas `_. .. _whats-new.2024.01.0: v2024.01.0 (17 Jan, 2024) ------------------------- This release brings support for weights in correlation and covariance functions, a new ``DataArray.cumulative`` aggregation, improvements to ``xr.map_blocks``, an update to our minimum dependencies, and various bugfixes. Thanks to our 17 contributors to this release: Abel Aoun, Deepak Cherian, Illviljan, Johan Mathe, Justus Magin, Kai Mรผhlbauer, Llorenรง Lledรณ, Mark Harfouche, Markel, Mathias Hauser, Maximilian Roos, Michael Niklas, Niclas Rieger, Sรฉbastien Celles, Tom Nicholas, Trinh Quoc Anh, and crusaderky. New Features ~~~~~~~~~~~~ - :py:meth:`xr.cov` and :py:meth:`xr.corr` now support using weights (:issue:`8527`, :pull:`7392`). By `Llorenรง Lledรณ `_. - Accept the compression arguments new in netCDF 1.6.0 in the netCDF4 backend. See `netCDF4 documentation `_ for details. Note that some new compression filters needs plugins to be installed which may not be available in all netCDF distributions. By `Markel Garcรญa-Dรญez `_. (:issue:`6929`, :pull:`7551`) - Add :py:meth:`DataArray.cumulative` & :py:meth:`Dataset.cumulative` to compute cumulative aggregations, such as ``sum``, along a dimension โ€” for example ``da.cumulative('time').sum()``. This is similar to pandas' ``.expanding``, and mostly equivalent to ``.cumsum`` methods, or to :py:meth:`DataArray.rolling` with a window length equal to the dimension size. By `Maximilian Roos `_. (:pull:`8512`) - Decode/Encode netCDF4 enums and store the enum definition in dataarrays' dtype metadata. If multiple variables share the same enum in netCDF4, each dataarray will have its own enum definition in their respective dtype metadata. By `Abel Aoun `_. (:issue:`8144`, :pull:`8147`) Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed (:pull:`8586`): ===================== ========= ======== Package Old New ===================== ========= ======== cartopy 0.20 0.21 dask-core 2022.7 2022.12 distributed 2022.7 2022.12 flox 0.5 0.7 iris 3.2 3.4 matplotlib-base 3.5 3.6 numpy 1.22 1.23 numba 0.55 0.56 packaging 21.3 22.0 seaborn 0.11 0.12 scipy 1.8 1.10 typing_extensions 4.3 4.4 zarr 2.12 2.13 ===================== ========= ======== Deprecations ~~~~~~~~~~~~ - The ``squeeze`` kwarg to GroupBy is now deprecated. (:issue:`2157`, :pull:`8507`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Support non-string hashable dimensions in :py:class:`xarray.DataArray` (:issue:`8546`, :pull:`8559`). By `Michael Niklas `_. - Reverse index output of bottleneck's rolling move_argmax/move_argmin functions (:issue:`8541`, :pull:`8552`). By `Kai Mรผhlbauer `_. - Vendor ``SerializableLock`` from dask and use as default lock for netcdf4 backends (:issue:`8442`, :pull:`8571`). By `Kai Mรผhlbauer `_. - Add tests and fixes for empty :py:class:`CFTimeIndex`, including broken html repr (:issue:`7298`, :pull:`8600`). By `Mathias Hauser `_. Internal Changes ~~~~~~~~~~~~~~~~ - The implementation of :py:func:`map_blocks` has changed to minimize graph size and duplication of data. This should be a strict improvement even though the graphs are not always embarrassingly parallel any more. Please open an issue if you spot a regression. (:pull:`8412`, :issue:`8409`). By `Deepak Cherian `_. - Remove null values before plotting. (:pull:`8535`). By `Jimmy Westling `_. - Redirect cumulative reduction functions internally through the :py:class:`ChunkManagerEntryPoint`, potentially allowing :py:meth:`~xarray.DataArray.ffill` and :py:meth:`~xarray.DataArray.bfill` to use non-dask chunked array types. (:pull:`8019`) By `Tom Nicholas `_. .. _whats-new.2023.12.0: v2023.12.0 (2023 Dec 08) ------------------------ This release brings new `hypothesis `_ strategies for testing, significantly faster rolling aggregations as well as ``ffill`` and ``bfill`` with ``numbagg``, a new :py:meth:`Dataset.eval` method, and improvements to reading and writing Zarr arrays (including a new ``"a-"`` mode). Thanks to our 16 contributors: Anderson Banihirwe, Ben Mares, Carl Andersson, Deepak Cherian, Doug Latornell, Gregorio L. Trevisan, Illviljan, Jens Hedegaard Nielsen, Justus Magin, Mathias Hauser, Max Jones, Maximilian Roos, Michael Niklas, Patrick Hoefler, Ryan Abernathey, Tom Nicholas New Features ~~~~~~~~~~~~ - Added hypothesis strategies for generating :py:class:`xarray.Variable` objects containing arbitrary data, useful for parametrizing downstream tests. Accessible under :py:mod:`testing.strategies`, and documented in a new page on testing in the User Guide. (:issue:`6911`, :pull:`8404`) By `Tom Nicholas `_. - :py:meth:`rolling` uses `numbagg `_ for most of its computations by default. Numbagg is up to 5x faster than bottleneck where parallelization is possible. Where parallelization isn't possible โ€” for example a 1D array โ€” it's about the same speed as bottleneck, and 2-5x faster than pandas' default functions. (:pull:`8493`). numbagg is an optional dependency, so requires installing separately. - Use a concise format when plotting datetime arrays. (:pull:`8449`). By `Jimmy Westling `_. - Avoid overwriting unchanged existing coordinate variables when appending with :py:meth:`Dataset.to_zarr` by setting ``mode='a-'``. By `Ryan Abernathey `_ and `Deepak Cherian `_. - :py:meth:`~xarray.DataArray.rank` now operates on dask-backed arrays, assuming the core dim has exactly one chunk. (:pull:`8475`). By `Maximilian Roos `_. - Add a :py:meth:`Dataset.eval` method, similar to the pandas' method of the same name. (:pull:`7163`). This is currently marked as experimental and doesn't yet support the ``numexpr`` engine. - :py:meth:`Dataset.drop_vars` & :py:meth:`DataArray.drop_vars` allow passing a callable, similar to :py:meth:`Dataset.where` & :py:meth:`Dataset.sortby` & others. (:pull:`8511`). By `Maximilian Roos `_. Breaking changes ~~~~~~~~~~~~~~~~ - Explicitly warn when creating xarray objects with repeated dimension names. Such objects will also now raise when :py:meth:`DataArray.get_axis_num` is called, which means many functions will raise. This latter change is technically a breaking change, but whilst allowed, this behaviour was never actually supported! (:issue:`3731`, :pull:`8491`) By `Tom Nicholas `_. Deprecations ~~~~~~~~~~~~ - As part of an effort to standardize the API, we're renaming the ``dims`` keyword arg to ``dim`` for the minority of functions which current use ``dims``. This started with :py:func:`xarray.dot` & :py:meth:`DataArray.dot` and we'll gradually roll this out across all functions. The warnings are currently ``PendingDeprecationWarning``, which are silenced by default. We'll convert these to ``DeprecationWarning`` in a future release. By `Maximilian Roos `_. - Raise a ``FutureWarning`` warning that the type of :py:meth:`Dataset.dims` will be changed from a mapping of dimension names to lengths to a set of dimension names. This is to increase consistency with :py:meth:`DataArray.dims`. To access a mapping of dimension names to lengths please use :py:meth:`Dataset.sizes`. The same change also applies to ``DatasetGroupBy.dims``. (:issue:`8496`, :pull:`8500`) By `Tom Nicholas `_. - :py:meth:`Dataset.drop` & :py:meth:`DataArray.drop` are now deprecated, since pending deprecation for several years. :py:meth:`DataArray.drop_sel` & :py:meth:`DataArray.drop_var` replace them for labels & variables respectively. (:pull:`8497`) By `Maximilian Roos `_. Bug fixes ~~~~~~~~~ - Fix dtype inference for ``pd.CategoricalIndex`` when categories are backed by a ``pd.ExtensionDtype`` (:pull:`8481`) - Fix writing a variable that requires transposing when not writing to a region (:pull:`8484`) By `Maximilian Roos `_. - Static typing of ``p0`` and ``bounds`` arguments of :py:func:`xarray.DataArray.curvefit` and :py:func:`xarray.Dataset.curvefit` was changed to ``Mapping`` (:pull:`8502`). By `Michael Niklas `_. - Fix typing of :py:func:`xarray.DataArray.to_netcdf` and :py:func:`xarray.Dataset.to_netcdf` when ``compute`` is evaluated to bool instead of a Literal (:pull:`8268`). By `Jens Hedegaard Nielsen `_. Documentation ~~~~~~~~~~~~~ - Added illustration of updating the time coordinate values of a resampled dataset using time offset arithmetic. This is the recommended technique to replace the use of the deprecated ``loffset`` parameter in ``resample`` (:pull:`8479`). By `Doug Latornell `_. - Improved error message when attempting to get a variable which doesn't exist from a Dataset. (:pull:`8474`) By `Maximilian Roos `_. - Fix default value of ``combine_attrs`` in :py:func:`xarray.combine_by_coords` (:pull:`8471`) By `Gregorio L. Trevisan `_. Internal Changes ~~~~~~~~~~~~~~~~ - :py:meth:`DataArray.bfill` & :py:meth:`DataArray.ffill` now use numbagg `_ by default, which is up to 5x faster where parallelization is possible. (:pull:`8339`) By `Maximilian Roos `_. - Update mypy version to 1.7 (:issue:`8448`, :pull:`8501`). By `Michael Niklas `_. .. _whats-new.2023.11.0: v2023.11.0 (Nov 16, 2023) ------------------------- .. tip:: `This is our 10th year anniversary release! `_ Thank you for your love and support. This release brings the ability to use ``opt_einsum`` for :py:func:`xarray.dot` by default, support for auto-detecting ``region`` when writing partial datasets to Zarr, and the use of h5py drivers with ``h5netcdf``. Thanks to the 19 contributors to this release: Aman Bagrecha, Anderson Banihirwe, Ben Mares, Deepak Cherian, Dimitri Papadopoulos Orfanos, Ezequiel Cimadevilla Alvarez, Illviljan, Justus Magin, Katelyn FitzGerald, Kai Muehlbauer, Martin Durant, Maximilian Roos, Metamess, Sam Levang, Spencer Clark, Tom Nicholas, mgunyho, templiert New Features ~~~~~~~~~~~~ - Use `opt_einsum `_ for :py:func:`xarray.dot` by default if installed. By `Deepak Cherian `_. (:issue:`7764`, :pull:`8373`). - Add ``DataArray.dt.total_seconds()`` method to match the Pandas API. (:pull:`8435`). By `Ben Mares `_. - Allow passing ``region="auto"`` in :py:meth:`Dataset.to_zarr` to automatically infer the region to write in the original store. Also implement automatic transpose when dimension order does not match the original store. (:issue:`7702`, :issue:`8421`, :pull:`8434`). By `Sam Levang `_. - Allow the usage of h5py drivers (eg: ros3) via h5netcdf (:pull:`8360`). By `Ezequiel Cimadevilla `_. - Enable VLEN string fill_values, preserve VLEN string dtypes (:issue:`1647`, :issue:`7652`, :issue:`7868`, :pull:`7869`). By `Kai Mรผhlbauer `_. Breaking changes ~~~~~~~~~~~~~~~~ - drop support for `cdms2 `_. Please use `xcdat `_ instead (:pull:`8441`). By `Justus Magin `_. - Following pandas, :py:meth:`infer_freq` will return ``"Y"``, ``"YS"``, ``"QE"``, ``"ME"``, ``"h"``, ``"min"``, ``"s"``, ``"ms"``, ``"us"``, or ``"ns"`` instead of ``"A"``, ``"AS"``, ``"Q"``, ``"M"``, ``"H"``, ``"T"``, ``"S"``, ``"L"``, ``"U"``, or ``"N"``. This is to be consistent with the deprecation of the latter frequency strings (:issue:`8394`, :pull:`8415`). By `Spencer Clark `_. - Bump minimum tested pint version to ``>=0.22``. By `Deepak Cherian `_. - Minimum supported versions for the following packages have changed: ``h5py >=3.7``, ``h5netcdf>=1.1``. By `Kai Mรผhlbauer `_. Deprecations ~~~~~~~~~~~~ - The PseudoNetCDF backend has been removed. By `Deepak Cherian `_. - Supplying dimension-ordered sequences to :py:meth:`DataArray.chunk` & :py:meth:`Dataset.chunk` is deprecated in favor of supplying a dictionary of dimensions, or a single ``int`` or ``"auto"`` argument covering all dimensions. Xarray favors using dimensions names rather than positions, and this was one place in the API where dimension positions were used. (:pull:`8341`) By `Maximilian Roos `_. - Following pandas, the frequency strings ``"A"``, ``"AS"``, ``"Q"``, ``"M"``, ``"H"``, ``"T"``, ``"S"``, ``"L"``, ``"U"``, and ``"N"`` are deprecated in favor of ``"Y"``, ``"YS"``, ``"QE"``, ``"ME"``, ``"h"``, ``"min"``, ``"s"``, ``"ms"``, ``"us"``, and ``"ns"``, respectively. These strings are used, for example, in :py:func:`date_range`, :py:func:`cftime_range`, :py:meth:`DataArray.resample`, and :py:meth:`Dataset.resample` among others (:issue:`8394`, :pull:`8415`). By `Spencer Clark `_. - Rename :py:meth:`Dataset.to_array` to :py:meth:`Dataset.to_dataarray` for consistency with :py:meth:`DataArray.to_dataset` & :py:func:`open_dataarray` functions. This is a "soft" deprecation โ€” the existing methods work and don't raise any warnings, given the relatively small benefits of the change. By `Maximilian Roos `_. - Finally remove ``keep_attrs`` kwarg from :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample`. These were deprecated a long time ago. By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Port `bug fix from pandas `_ to eliminate the adjustment of resample bin edges in the case that the resampling frequency has units of days and is greater than one day (e.g. ``"2D"``, ``"3D"`` etc.) and the ``closed`` argument is set to ``"right"`` to xarray's implementation of resample for data indexed by a :py:class:`CFTimeIndex` (:pull:`8393`). By `Spencer Clark `_. - Fix to once again support date offset strings as input to the loffset parameter of resample and test this functionality (:pull:`8422`, :issue:`8399`). By `Katelyn FitzGerald `_. - Fix a bug where :py:meth:`DataArray.to_dataset` silently drops a variable if a coordinate with the same name already exists (:pull:`8433`, :issue:`7823`). By `Andrรกs Gunyhรณ `_. - Fix for :py:meth:`DataArray.to_zarr` & :py:meth:`Dataset.to_zarr` to close the created zarr store when passing a path with ``.zip`` extension (:pull:`8425`). By `Carl Andersson `_. Documentation ~~~~~~~~~~~~~ - Small updates to documentation on distributed writes: See :ref:`io.zarr.appending` to Zarr. By `Deepak Cherian `_. .. _whats-new.2023.10.1: v2023.10.1 (19 Oct, 2023) ------------------------- This release updates our minimum numpy version in ``pyproject.toml`` to 1.22, consistent with our documentation below. .. _whats-new.2023.10.0: v2023.10.0 (19 Oct, 2023) ------------------------- This release brings performance enhancements to reading Zarr datasets, the ability to use `numbagg `_ for reductions, an expansion in API for ``rolling_exp``, fixes two regressions with datetime decoding, and many other bugfixes and improvements. Groupby reductions will also use ``numbagg`` if ``flox>=0.8.1`` and ``numbagg`` are both installed. Thanks to our 13 contributors: Anderson Banihirwe, Bart Schilperoort, Deepak Cherian, Illviljan, Kai Mรผhlbauer, Mathias Hauser, Maximilian Roos, Michael Niklas, Pieter Eendebak, Simon Hรธxbro Hansen, Spencer Clark, Tom White, olimcc New Features ~~~~~~~~~~~~ - Support high-performance reductions with `numbagg `_. This is enabled by default if ``numbagg`` is installed. By `Deepak Cherian `_. (:pull:`8316`) - Add ``corr``, ``cov``, ``std`` & ``var`` to ``.rolling_exp``. By `Maximilian Roos `_. (:pull:`8307`) - :py:meth:`DataArray.where` & :py:meth:`Dataset.where` accept a callable for the ``other`` parameter, passing the object as the only argument. Previously, this was only valid for the ``cond`` parameter. (:issue:`8255`) By `Maximilian Roos `_. - ``.rolling_exp`` functions can now take a ``min_weight`` parameter, to only output values when there are sufficient recent non-nan values. ``numbagg>=0.3.1`` is required. (:pull:`8285`) By `Maximilian Roos `_. - :py:meth:`DataArray.sortby` & :py:meth:`Dataset.sortby` accept a callable for the ``variables`` parameter, passing the object as the only argument. By `Maximilian Roos `_. - ``.rolling_exp`` functions can now operate on dask-backed arrays, assuming the core dim has exactly one chunk. (:pull:`8284`). By `Maximilian Roos `_. Breaking changes ~~~~~~~~~~~~~~~~ - Made more arguments keyword-only (e.g. ``keep_attrs``, ``skipna``) for many :py:class:`xarray.DataArray` and :py:class:`xarray.Dataset` methods (:pull:`6403`). By `Mathias Hauser `_. - :py:meth:`Dataset.to_zarr` & :py:meth:`DataArray.to_zarr` require keyword arguments after the initial 7 positional arguments. By `Maximilian Roos `_. Deprecations ~~~~~~~~~~~~ - Rename :py:meth:`Dataset.reset_encoding` & :py:meth:`DataArray.reset_encoding` to :py:meth:`Dataset.drop_encoding` & :py:meth:`DataArray.drop_encoding` for consistency with other ``drop`` & ``reset`` methods โ€” ``drop`` generally removes something, while ``reset`` generally resets to some default or standard value. (:pull:`8287`, :issue:`8259`) By `Maximilian Roos `_. Bug fixes ~~~~~~~~~ - :py:meth:`DataArray.rename` & :py:meth:`Dataset.rename` would emit a warning when the operation was a no-op. (:issue:`8266`) By `Simon Hansen `_. - Fixed a regression introduced in the previous release checking time-like units when encoding/decoding masked data (:issue:`8269`, :pull:`8277`). By `Kai Mรผhlbauer `_. - Fix datetime encoding precision loss regression introduced in the previous release for datetimes encoded with units requiring floating point values, and a reference date not equal to the first value of the datetime array (:issue:`8271`, :pull:`8272`). By `Spencer Clark `_. - Fix excess metadata requests when using a Zarr store. Prior to this, metadata was re-read every time data was retrieved from the array, now metadata is retrieved only once when they array is initialized. (:issue:`8290`, :pull:`8297`). By `Oliver McCormack `_. - Fix to_zarr ending in a ReadOnlyError when consolidated metadata was used and the write_empty_chunks was provided. (:issue:`8323`, :pull:`8326`) By `Matthijs Amesz `_. Documentation ~~~~~~~~~~~~~ - Added page on the interoperability of xarray objects. (:pull:`7992`) By `Tom Nicholas `_. - Added xarray-regrid to the list of xarray related projects (:pull:`8272`). By `Bart Schilperoort `_. Internal Changes ~~~~~~~~~~~~~~~~ - More improvements to support the Python `array API standard `_ by using duck array ops in more places in the codebase. (:pull:`8267`) By `Tom White `_. .. _whats-new.2023.09.0: v2023.09.0 (Sep 26, 2023) ------------------------- This release continues work on the new :py:class:`xarray.Coordinates` object, allows to provide ``preferred_chunks`` when reading from netcdf files, enables :py:func:`xarray.apply_ufunc` to handle missing core dimensions and fixes several bugs. Thanks to the 24 contributors to this release: Alexander Fischer, Amrest Chinkamol, Benoit Bovy, Darsh Ranjan, Deepak Cherian, Gianfranco Costamagna, Gregorio L. Trevisan, Illviljan, Joe Hamman, JR, Justus Magin, Kai Mรผhlbauer, Kian-Meng Ang, Kyle Sunden, Martin Raspaud, Mathias Hauser, Mattia Almansi, Maximilian Roos, Andrรกs Gunyhรณ, Michael Niklas, Richard Kleijn, Riulinchen, Tom Nicholas and Wiktor Kraล›nicki. We welcome the following new contributors to Xarray!: Alexander Fischer, Amrest Chinkamol, Darsh Ranjan, Gianfranco Costamagna, Gregorio L. Trevisan, Kian-Meng Ang, Riulinchen and Wiktor Kraล›nicki. New Features ~~~~~~~~~~~~ - Added the :py:meth:`Coordinates.assign` method that can be used to combine different collections of coordinates prior to assign them to a Dataset or DataArray (:pull:`8102`) at once. By `Benoรฎt Bovy `_. - Provide ``preferred_chunks`` for data read from netcdf files (:issue:`1440`, :pull:`7948`). By `Martin Raspaud `_. - Added ``on_missing_core_dims`` to :py:meth:`apply_ufunc` to allow for copying or dropping a :py:class:`Dataset`'s variables with missing core dimensions (:pull:`8138`). By `Maximilian Roos `_. Breaking changes ~~~~~~~~~~~~~~~~ - The :py:class:`Coordinates` constructor now creates a (pandas) index by default for each dimension coordinate. To keep the previous behavior (no index created), pass an empty dictionary to ``indexes``. The constructor now also extracts and add the indexes from another :py:class:`Coordinates` object passed via ``coords`` (:pull:`8107`). By `Benoรฎt Bovy `_. - Static typing of ``xlim`` and ``ylim`` arguments in plotting functions now must be ``tuple[float, float]`` to align with matplotlib requirements. (:issue:`7802`, :pull:`8030`). By `Michael Niklas `_. Deprecations ~~~~~~~~~~~~ - Deprecate passing a :py:class:`pandas.MultiIndex` object directly to the :py:class:`Dataset` and :py:class:`DataArray` constructors as well as to :py:meth:`Dataset.assign` and :py:meth:`Dataset.assign_coords`. A new Xarray :py:class:`Coordinates` object has to be created first using :py:meth:`Coordinates.from_pandas_multiindex` (:pull:`8094`). By `Benoรฎt Bovy `_. Bug fixes ~~~~~~~~~ - Improved static typing of reduction methods (:pull:`6746`). By `Richard Kleijn `_. - Fix bug where empty attrs would generate inconsistent tokens (:issue:`6970`, :pull:`8101`). By `Mattia Almansi `_. - Improved handling of multi-coordinate indexes when updating coordinates, including bug fixes (and improved warnings for deprecated features) for pandas multi-indexes (:pull:`8094`). By `Benoรฎt Bovy `_. - Fixed a bug in :py:func:`merge` with ``compat='minimal'`` where the coordinate names were not updated properly internally (:issue:`7405`, :issue:`7588`, :pull:`8104`). By `Benoรฎt Bovy `_. - Fix bug where :py:class:`DataArray` instances on the right-hand side of :py:meth:`DataArray.__setitem__` lose dimension names (:issue:`7030`, :pull:`8067`). By `Darsh Ranjan `_. - Return ``float64`` in presence of ``NaT`` in :py:class:`~core.accessor_dt.DatetimeAccessor` and special case ``NaT`` handling in :py:meth:`~core.accessor_dt.DatetimeAccessor.isocalendar` (:issue:`7928`, :pull:`8084`). By `Kai Mรผhlbauer `_. - Fix :py:meth:`~computation.rolling.DatasetRolling.construct` with stride on Datasets without indexes. (:issue:`7021`, :pull:`7578`). By `Amrest Chinkamol `_ and `Michael Niklas `_. - Calling plot with kwargs ``col``, ``row`` or ``hue`` no longer squeezes dimensions passed via these arguments (:issue:`7552`, :pull:`8174`). By `Wiktor Kraล›nicki `_. - Fixed a bug where casting from ``float`` to ``int64`` (undefined for ``NaN``) led to varying issues (:issue:`7817`, :issue:`7942`, :issue:`7790`, :issue:`6191`, :issue:`7096`, :issue:`1064`, :pull:`7827`). By `Kai Mรผhlbauer `_. - Fixed a bug where inaccurate ``coordinates`` silently failed to decode variable (:issue:`1809`, :pull:`8195`). By `Kai Mรผhlbauer `_ - ``.rolling_exp`` functions no longer mistakenly lose non-dimensioned coords (:issue:`6528`, :pull:`8114`). By `Maximilian Roos `_. - In the event that user-provided datetime64/timedelta64 units and integer dtype encoding parameters conflict with each other, override the units to preserve an integer dtype for most faithful serialization to disk (:issue:`1064`, :pull:`8201`). By `Kai Mรผhlbauer `_. - Static typing of dunder ops methods (like :py:meth:`DataArray.__eq__`) has been fixed. Remaining issues are upstream problems (:issue:`7780`, :pull:`8204`). By `Michael Niklas `_. - Fix type annotation for ``center`` argument of plotting methods (like :py:meth:`xarray.plot.dataarray_plot.pcolormesh`) (:pull:`8261`). By `Pieter Eendebak `_. Documentation ~~~~~~~~~~~~~ - Make documentation of :py:meth:`DataArray.where` clearer (:issue:`7767`, :pull:`7955`). By `Riulinchen `_. Internal Changes ~~~~~~~~~~~~~~~~ - Many error messages related to invalid dimensions or coordinates now always show the list of valid dims/coords (:pull:`8079`). By `Andrรกs Gunyhรณ `_. - Refactor of encoding and decoding times/timedeltas to preserve nanosecond resolution in arrays that contain missing values (:pull:`7827`). By `Kai Mรผhlbauer `_. - Transition ``.rolling_exp`` functions to use ``.apply_ufunc`` internally rather than ``.reduce``, as the start of a broader effort to move non-reducing functions away from ```.reduce``, (:pull:`8114`). By `Maximilian Roos `_. - Test range of fill_value's in test_interpolate_pd_compat (:issue:`8146`, :pull:`8189`). By `Kai Mรผhlbauer `_. .. _whats-new.2023.08.0: v2023.08.0 (Aug 18, 2023) ------------------------- This release brings changes to minimum dependencies, allows reading of datasets where a dimension name is associated with a multidimensional variable (e.g. finite volume ocean model output), and introduces a new :py:class:`xarray.Coordinates` object. Thanks to the 16 contributors to this release: Anderson Banihirwe, Articoking, Benoit Bovy, Deepak Cherian, Harshitha, Ian Carroll, Joe Hamman, Justus Magin, Peter Hill, Rachel Wegener, Riley Kuttruff, Thomas Nicholas, Tom Nicholas, ilgast, quantsnus, vallirep Announcements ~~~~~~~~~~~~~ The :py:class:`xarray.Variable` class is being refactored out to a new project title 'namedarray'. See the `design doc `_ for more details. Reach out to us on this [discussion topic](https://github.com/pydata/xarray/discussions/8080) if you have any thoughts. New Features ~~~~~~~~~~~~ - :py:class:`Coordinates` can now be constructed independently of any Dataset or DataArray (it is also returned by the :py:attr:`Dataset.coords` and :py:attr:`DataArray.coords` properties). ``Coordinates`` objects are useful for passing both coordinate variables and indexes to new Dataset / DataArray objects, e.g., via their constructor or via :py:meth:`Dataset.assign_coords`. We may also wrap coordinate variables in a ``Coordinates`` object in order to skip the automatic creation of (pandas) indexes for dimension coordinates. The :py:class:`Coordinates.from_pandas_multiindex` constructor may be used to create coordinates directly from a :py:class:`pandas.MultiIndex` object (it is preferred over passing it directly as coordinate data, which may be deprecated soon). Like Dataset and DataArray objects, ``Coordinates`` objects may now be used in :py:func:`align` and :py:func:`merge`. (:issue:`6392`, :pull:`7368`). By `Benoรฎt Bovy `_. - Visually group together coordinates with the same indexes in the index section of the text repr (:pull:`7225`). By `Justus Magin `_. - Allow creating Xarray objects where a multidimensional variable shares its name with a dimension. Examples include output from finite volume models like FVCOM. (:issue:`2233`, :pull:`7989`) By `Deepak Cherian `_ and `Benoit Bovy `_. - When outputting :py:class:`Dataset` objects as Zarr via :py:meth:`Dataset.to_zarr`, user can now specify that chunks that will contain no valid data will not be written. Originally, this could be done by specifying ``"write_empty_chunks": True`` in the ``encoding`` parameter; however, this setting would not carry over when appending new data to an existing dataset. (:issue:`8009`) Requires ``zarr>=2.11``. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed (:pull:`8022`): ===================== ========= ======== Package Old New ===================== ========= ======== boto3 1.20 1.24 cftime 1.5 1.6 dask-core 2022.1 2022.7 distributed 2022.1 2022.7 hfnetcdf 0.13 1.0 iris 3.1 3.2 lxml 4.7 4.9 netcdf4 1.5.7 1.6.0 numpy 1.21 1.22 pint 0.18 0.19 pydap 3.2 3.3 rasterio 1.2 1.3 scipy 1.7 1.8 toolz 0.11 0.12 typing_extensions 4.0 4.3 zarr 2.10 2.12 numbagg 0.1 0.2.1 ===================== ========= ======== Documentation ~~~~~~~~~~~~~ - Added page on the internal design of xarray objects. (:pull:`7991`) By `Tom Nicholas `_. - Added examples to docstrings of :py:meth:`Dataset.assign_attrs`, :py:meth:`Dataset.broadcast_equals`, :py:meth:`Dataset.equals`, :py:meth:`Dataset.identical`, :py:meth:`Dataset.expand_dims`, :py:meth:`Dataset.drop_vars` (:issue:`6793`, :pull:`7937`) By `Harshitha `_. - Add docstrings for the :py:class:`Index` base class and add some documentation on how to create custom, Xarray-compatible indexes (:pull:`6975`) By `Benoรฎt Bovy `_. - Added a page clarifying the role of Xarray core team members. (:pull:`7999`) By `Tom Nicholas `_. - Fixed broken links in "See also" section of :py:meth:`Dataset.count` (:issue:`8055`, :pull:`8057`) By `Articoking `_. - Extended the glossary by adding terms Aligning, Broadcasting, Merging, Concatenating, Combining, lazy, labeled, serialization, indexing (:issue:`3355`, :pull:`7732`) By `Harshitha `_. Internal Changes ~~~~~~~~~~~~~~~~ - :py:func:`as_variable` now consistently includes the variable name in any exceptions raised. (:pull:`7995`). By `Peter Hill `_ - :py:func:`encode_dataset_coordinates` now sorts coordinates automatically assigned to ``coordinates`` attributes during serialization (:issue:`8026`, :pull:`8034`). `By Ian Carroll `_. .. _whats-new.2023.07.0: v2023.07.0 (July 17, 2023) -------------------------- This release brings improvements to the documentation on wrapping numpy-like arrays, improved docstrings, and bug fixes. Deprecations ~~~~~~~~~~~~ - ``hue_style`` is being deprecated for scatter plots. (:issue:`7907`, :pull:`7925`). By `Jimmy Westling `_. Bug fixes ~~~~~~~~~ - Ensure no forward slashes in variable and dimension names for HDF5-based engines. (:issue:`7943`, :pull:`7953`) By `Kai Mรผhlbauer `_. Documentation ~~~~~~~~~~~~~ - Added examples to docstrings of :py:meth:`Dataset.assign_attrs`, :py:meth:`Dataset.broadcast_equals`, :py:meth:`Dataset.equals`, :py:meth:`Dataset.identical`, :py:meth:`Dataset.expand_dims`, :py:meth:`Dataset.drop_vars` (:issue:`6793`, :pull:`7937`) By `Harshitha `_. - Added page on wrapping chunked numpy-like arrays as alternatives to dask arrays. (:pull:`7951`) By `Tom Nicholas `_. - Expanded the page on wrapping numpy-like "duck" arrays. (:pull:`7911`) By `Tom Nicholas `_. - Added examples to docstrings of :py:meth:`Dataset.isel`, :py:meth:`Dataset.reduce`, :py:meth:`Dataset.argmin`, :py:meth:`Dataset.argmax` (:issue:`6793`, :pull:`7881`) By `Harshitha `_ . Internal Changes ~~~~~~~~~~~~~~~~ - Allow chunked non-dask arrays (i.e. Cubed arrays) in groupby operations. (:pull:`7941`) By `Tom Nicholas `_. .. _whats-new.2023.06.0: v2023.06.0 (June 21, 2023) -------------------------- This release adds features to ``curvefit``, improves the performance of concatenation, and fixes various bugs. Thank to our 13 contributors to this release: Anderson Banihirwe, Deepak Cherian, dependabot[bot], Illviljan, Juniper Tyree, Justus Magin, Martin Fleischmann, Mattia Almansi, mgunyho, Rutger van Haasteren, Thomas Nicholas, Tom Nicholas, Tom White. New Features ~~~~~~~~~~~~ - Added support for multidimensional initial guess and bounds in :py:meth:`DataArray.curvefit` (:issue:`7768`, :pull:`7821`). By `Andrรกs Gunyhรณ `_. - Add an ``errors`` option to :py:meth:`Dataset.curve_fit` that allows returning NaN for the parameters and covariances of failed fits, rather than failing the whole series of fits (:issue:`6317`, :pull:`7891`). By `Dominik Staล„czak `_ and `Andrรกs Gunyhรณ `_. Breaking changes ~~~~~~~~~~~~~~~~ Deprecations ~~~~~~~~~~~~ - Deprecate the `cdms2 `_ conversion methods (:pull:`7876`) By `Justus Magin `_. Performance ~~~~~~~~~~~ - Improve concatenation performance (:issue:`7833`, :pull:`7824`). By `Jimmy Westling `_. Bug fixes ~~~~~~~~~ - Fix bug where weighted ``polyfit`` were changing the original object (:issue:`5644`, :pull:`7900`). By `Mattia Almansi `_. - Don't call ``CachingFileManager.__del__`` on interpreter shutdown (:issue:`7814`, :pull:`7880`). By `Justus Magin `_. - Preserve vlen dtype for empty string arrays (:issue:`7328`, :pull:`7862`). By `Tom White `_ and `Kai Mรผhlbauer `_. - Ensure dtype of reindex result matches dtype of the original DataArray (:issue:`7299`, :pull:`7917`) By `Anderson Banihirwe `_. - Fix bug where a zero-length zarr ``chunk_store`` was ignored as if it was ``None`` (:pull:`7923`) By `Juniper Tyree `_. Documentation ~~~~~~~~~~~~~ Internal Changes ~~~~~~~~~~~~~~~~ - Minor improvements to support of the python `array api standard `_, internally using the function ``xp.astype()`` instead of the method ``arr.astype()``, as the latter is not in the standard. (:pull:`7847`) By `Tom Nicholas `_. - Xarray now uploads nightly wheels to https://pypi.anaconda.org/scientific-python-nightly-wheels/simple/ (:issue:`7863`, :pull:`7865`). By `Martin Fleischmann `_. - Stop uploading development wheels to TestPyPI (:pull:`7889`) By `Justus Magin `_. - Added an exception catch for ``AttributeError`` along with ``ImportError`` when duck typing the dynamic imports in pycompat.py. This catches some name collisions between packages. (:issue:`7870`, :pull:`7874`) .. _whats-new.2023.05.0: v2023.05.0 (May 18, 2023) ------------------------- This release adds some new methods and operators, updates our deprecation policy for python versions, fixes some bugs with groupby, and introduces experimental support for alternative chunked parallel array computation backends via a new plugin system! **Note:** If you are using a locally-installed development version of xarray then pulling the changes from this release may require you to re-install. This avoids an error where xarray cannot detect dask via the new entrypoints system introduced in :pull:`7019`. See :issue:`7856` for details. Thanks to our 14 contributors: Alan Brammer, crusaderky, David Stansby, dcherian, Deeksha, Deepak Cherian, Illviljan, James McCreight, Joe Hamman, Justus Magin, Kyle Sunden, Max Hollmann, mgunyho, and Tom Nicholas New Features ~~~~~~~~~~~~ - Added new method :py:meth:`DataArray.to_dask_dataframe`, convert a dataarray into a dask dataframe (:issue:`7409`). By `Deeksha `_. - Add support for lshift and rshift binary operators (``<<``, ``>>``) on :py:class:`xr.DataArray` of type :py:class:`int` (:issue:`7727` , :pull:`7741`). By `Alan Brammer `_. - Keyword argument ``data='array'`` to both :py:meth:`xarray.Dataset.to_dict` and :py:meth:`xarray.DataArray.to_dict` will now return data as the underlying array type. Python lists are returned for ``data='list'`` or ``data=True``. Supplying ``data=False`` only returns the schema without data. ``encoding=True`` returns the encoding dictionary for the underlying variable also. (:issue:`1599`, :pull:`7739`) . By `James McCreight `_. Breaking changes ~~~~~~~~~~~~~~~~ - adjust the deprecation policy for python to once again align with NEP-29 (:issue:`7765`, :pull:`7793`) By `Justus Magin `_. Performance ~~~~~~~~~~~ - Optimize ``.dt `` accessor performance with ``CFTimeIndex``. (:pull:`7796`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Fix ``as_compatible_data`` for masked float arrays, now always creates a copy when mask is present (:issue:`2377`, :pull:`7788`). By `Max Hollmann `_. - Fix groupby binary ops when grouped array is subset relative to other. (:issue:`7797`). By `Deepak Cherian `_. - Fix groupby sum, prod for all-NaN groups with ``flox``. (:issue:`7808`). By `Deepak Cherian `_. Internal Changes ~~~~~~~~~~~~~~~~ - Experimental support for wrapping chunked array libraries other than dask. A new ABC is defined - :py:class:`xr.namedarray.parallelcompat.ChunkManagerEntrypoint` - which can be subclassed and then registered by alternative chunked array implementations. (:issue:`6807`, :pull:`7019`) By `Tom Nicholas `_. .. _whats-new.2023.04.2: v2023.04.2 (April 20, 2023) --------------------------- This is a patch release to fix a bug with binning (:issue:`7766`) Bug fixes ~~~~~~~~~ - Fix binning when ``labels`` is specified. (:issue:`7766`). By `Deepak Cherian `_. Documentation ~~~~~~~~~~~~~ - Added examples to docstrings for :py:meth:`xarray.core.accessor_str.StringAccessor` methods. (:pull:`7669`) . By `Mary Gathoni `_. .. _whats-new.2023.04.1: v2023.04.1 (April 18, 2023) --------------------------- This is a patch release to fix a bug with binning (:issue:`7759`) Bug fixes ~~~~~~~~~ - Fix binning by unsorted arrays. (:issue:`7759`) .. _whats-new.2023.04.0: v2023.04.0 (April 14, 2023) --------------------------- This release includes support for pandas v2, allows refreshing of backend engines in a session, and removes deprecated backends for ``rasterio`` and ``cfgrib``. Thanks to our 19 contributors: Chinemere, Tom Coleman, Deepak Cherian, Harshitha, Illviljan, Jessica Scheick, Joe Hamman, Justus Magin, Kai Mรผhlbauer, Kwonil-Kim, Mary Gathoni, Michael Niklas, Pierre, Scott Henderson, Shreyal Gupta, Spencer Clark, mccloskey, nishtha981, veenstrajelmer We welcome the following new contributors to Xarray!: Mary Gathoni, Harshitha, veenstrajelmer, Chinemere, nishtha981, Shreyal Gupta, Kwonil-Kim, mccloskey. New Features ~~~~~~~~~~~~ - New methods to reset an objects encoding (:py:meth:`Dataset.reset_encoding`, :py:meth:`DataArray.reset_encoding`). (:issue:`7686`, :pull:`7689`). By `Joe Hamman `_. - Allow refreshing backend engines with :py:meth:`xarray.backends.refresh_engines` (:issue:`7478`, :pull:`7523`). By `Michael Niklas `_. - Added ability to save ``DataArray`` objects directly to Zarr using :py:meth:`~xarray.DataArray.to_zarr`. (:issue:`7692`, :pull:`7693`) . By `Joe Hamman `_. Breaking changes ~~~~~~~~~~~~~~~~ - Remove deprecated rasterio backend in favor of rioxarray (:pull:`7392`). By `Scott Henderson `_. Deprecations ~~~~~~~~~~~~ Performance ~~~~~~~~~~~ - Optimize alignment with ``join="exact", copy=False`` by avoiding copies. (:pull:`7736`) By `Deepak Cherian `_. - Avoid unnecessary copies of ``CFTimeIndex``. (:pull:`7735`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Fix :py:meth:`xr.polyval` with non-system standard integer coeffs (:pull:`7619`). By `Shreyal Gupta `_ and `Michael Niklas `_. - Improve error message when trying to open a file which you do not have permission to read (:issue:`6523`, :pull:`7629`). By `Thomas Coleman `_. - Proper plotting when passing :py:class:`~matplotlib.colors.BoundaryNorm` type argument in :py:meth:`DataArray.plot`. (:issue:`4061`, :issue:`7014`,:pull:`7553`) By `Jelmer Veenstra `_. - Ensure the formatting of time encoding reference dates outside the range of nanosecond-precision datetimes remains the same under pandas version 2.0.0 (:issue:`7420`, :pull:`7441`). By `Justus Magin `_ and `Spencer Clark `_. - Various ``dtype`` related fixes needed to support ``pandas>=2.0`` (:pull:`7724`) By `Justus Magin `_. - Preserve boolean dtype within encoding (:issue:`7652`, :pull:`7720`). By `Kai Mรผhlbauer `_ Documentation ~~~~~~~~~~~~~ - Update FAQ page on how do I open format X file as an xarray dataset? (:issue:`1285`, :pull:`7638`) using :py:func:`~xarray.open_dataset` By `Harshitha `_ , `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Don't assume that arrays read from disk will be Numpy arrays. This is a step toward enabling reads from a Zarr store using the `Kvikio `_ or `TensorStore `_ libraries. (:pull:`6874`). By `Deepak Cherian `_. - Remove internal support for reading GRIB files through the ``cfgrib`` backend. ``cfgrib`` now uses the external backend interface, so no existing code should break. By `Deepak Cherian `_. - Implement CF coding functions in ``VariableCoders`` (:pull:`7719`). By `Kai Mรผhlbauer `_ - Added a config.yml file with messages for the welcome bot when a Github user creates their first ever issue or pull request or has their first PR merged. (:issue:`7685`, :pull:`7685`) By `Nishtha P `_. - Ensure that only nanosecond-precision :py:class:`pd.Timestamp` objects continue to be used internally under pandas version 2.0.0. This is mainly to ease the transition to this latest version of pandas. It should be relaxed when addressing :issue:`7493`. By `Spencer Clark `_ (:issue:`7707`, :pull:`7731`). .. _whats-new.2023.03.0: v2023.03.0 (March 22, 2023) --------------------------- This release brings many bug fixes, and some new features. The maximum pandas version is pinned to ``<2`` until we can support the new pandas datetime types. Thanks to our 19 contributors: Abel Aoun, Alex Goodman, Deepak Cherian, Illviljan, Jody Klymak, Joe Hamman, Justus Magin, Mary Gathoni, Mathias Hauser, Mattia Almansi, Mick, Oriol Abril-Pla, Patrick Hoefler, Paul OckenfuรŸ, Pierre, Shreyal Gupta, Spencer Clark, Tom Nicholas, Tom Vo New Features ~~~~~~~~~~~~ - Fix :py:meth:`xr.cov` and :py:meth:`xr.corr` now support complex valued arrays (:issue:`7340`, :pull:`7392`). By `Michael Niklas `_. - Allow indexing along unindexed dimensions with dask arrays (:issue:`2511`, :issue:`4276`, :issue:`4663`, :pull:`5873`). By `Abel Aoun `_ and `Deepak Cherian `_. - Support dask arrays in ``first`` and ``last`` reductions. By `Deepak Cherian `_. - Improved performance in ``open_dataset`` for datasets with large object arrays (:issue:`7484`, :pull:`7494`). By `Alex Goodman `_ and `Deepak Cherian `_. Breaking changes ~~~~~~~~~~~~~~~~ Deprecations ~~~~~~~~~~~~ - Following pandas, the ``base`` and ``loffset`` parameters of :py:meth:`xr.DataArray.resample` and :py:meth:`xr.Dataset.resample` have been deprecated and will be removed in a future version of xarray. Using the ``origin`` or ``offset`` parameters is recommended as a replacement for using the ``base`` parameter and using time offset arithmetic is recommended as a replacement for using the ``loffset`` parameter (:pull:`8459`). By `Spencer Clark `_. Bug fixes ~~~~~~~~~ - Improve error message when using in :py:meth:`Dataset.drop_vars` to state which variables can't be dropped. (:pull:`7518`) By `Tom Nicholas `_. - Require to explicitly defining optional dimensions such as hue and markersize for scatter plots. (:issue:`7314`, :pull:`7277`). By `Jimmy Westling `_. - Fix matplotlib raising a UserWarning when plotting a scatter plot with an unfilled marker (:issue:`7313`, :pull:`7318`). By `Jimmy Westling `_. - Fix issue with ``max_gap`` in ``interpolate_na``, when applied to multidimensional arrays. (:issue:`7597`, :pull:`7598`). By `Paul OckenfuรŸ `_. - Fix :py:meth:`DataArray.plot.pcolormesh` which now works if one of the coordinates has str dtype (:issue:`6775`, :pull:`7612`). By `Michael Niklas `_. Documentation ~~~~~~~~~~~~~ - Clarify language in contributor's guide (:issue:`7495`, :pull:`7595`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Pin pandas to ``<2``. By `Deepak Cherian `_. .. _whats-new.2023.02.0: v2023.02.0 (Feb 7, 2023) ------------------------ This release brings a major upgrade to :py:func:`xarray.concat`, many bug fixes, and a bump in supported dependency versions. Thanks to our 11 contributors: Aron Gergely, Deepak Cherian, Illviljan, James Bourbeau, Joe Hamman, Justus Magin, Hauke Schulz, Kai Mรผhlbauer, Ken Mankoff, Spencer Clark, Tom Nicholas. Breaking changes ~~~~~~~~~~~~~~~~ - Support for ``python 3.8`` has been dropped and the minimum versions of some dependencies were changed (:pull:`7461`): ===================== ========= ======== Package Old New ===================== ========= ======== python 3.8 3.9 numpy 1.20 1.21 pandas 1.3 1.4 dask 2021.11 2022.1 distributed 2021.11 2022.1 h5netcdf 0.11 0.13 lxml 4.6 4.7 numba 5.4 5.5 ===================== ========= ======== Deprecations ~~~~~~~~~~~~ - Following pandas, the ``closed`` parameters of :py:func:`cftime_range` and :py:func:`date_range` are deprecated in favor of the ``inclusive`` parameters, and will be removed in a future version of xarray (:issue:`6985`:, :pull:`7373`). By `Spencer Clark `_. Bug fixes ~~~~~~~~~ - :py:func:`xarray.concat` can now concatenate variables present in some datasets but not others (:issue:`508`, :pull:`7400`). By `Kai Mรผhlbauer `_ and `Scott Chamberlin `_. - Handle ``keep_attrs`` option in binary operators of :py:meth:`Dataset` (:issue:`7390`, :pull:`7391`). By `Aron Gergely `_. - Improve error message when using dask in :py:func:`apply_ufunc` with ``output_sizes`` not supplied. (:pull:`7509`) By `Tom Nicholas `_. - :py:func:`xarray.Dataset.to_zarr` now drops variable encodings that have been added by xarray during reading a dataset. (:issue:`7129`, :pull:`7500`). By `Hauke Schulz `_. Documentation ~~~~~~~~~~~~~ - Mention the `flox package `_ in GroupBy documentation and docstrings. By `Deepak Cherian `_. .. _whats-new.2023.01.0: v2023.01.0 (Jan 17, 2023) ------------------------- This release includes a number of bug fixes. Thanks to the 14 contributors to this release: Aron Gergely, Benoit Bovy, Deepak Cherian, Ian Carroll, Illviljan, Joe Hamman, Justus Magin, Mark Harfouche, Matthew Roeschke, Paige Martin, Pierre, Sam Levang, Tom White, stefank0. Breaking changes ~~~~~~~~~~~~~~~~ - :py:meth:`CFTimeIndex.get_loc` has removed the ``method`` and ``tolerance`` keyword arguments. Use ``.get_indexer([key], method=..., tolerance=...)`` instead (:pull:`7361`). By `Matthew Roeschke `_. Bug fixes ~~~~~~~~~ - Avoid in-memory broadcasting when converting to a dask dataframe using ``.to_dask_dataframe.`` (:issue:`6811`, :pull:`7472`). By `Jimmy Westling `_. - Accessing the property ``.nbytes`` of a DataArray, or Variable no longer accidentally triggers loading the variable into memory. - Allow numpy-only objects in :py:func:`where` when ``keep_attrs=True`` (:issue:`7362`, :pull:`7364`). By `Sam Levang `_. - add a ``keep_attrs`` parameter to :py:meth:`Dataset.pad`, :py:meth:`DataArray.pad`, and :py:meth:`Variable.pad` (:pull:`7267`). By `Justus Magin `_. - Fixed performance regression in alignment between indexed and non-indexed objects of the same shape (:pull:`7382`). By `Benoรฎt Bovy `_. - Preserve original dtype on accessing MultiIndex levels (:issue:`7250`, :pull:`7393`). By `Ian Carroll `_. Internal Changes ~~~~~~~~~~~~~~~~ - Add the pre-commit hook ``absolufy-imports`` to convert relative xarray imports to absolute imports (:pull:`7204`, :pull:`7370`). By `Jimmy Westling `_. .. _whats-new.2022.12.0: v2022.12.0 (2022 Dec 2) ----------------------- This release includes a number of bug fixes and experimental support for Zarr V3. Thanks to the 16 contributors to this release: Deepak Cherian, Francesco Zanetta, Gregory Lee, Illviljan, Joe Hamman, Justus Magin, Luke Conibear, Mark Harfouche, Mathias Hauser, Mick, Mike Taves, Sam Levang, Spencer Clark, Tom Nicholas, Wei Ji, templiert New Features ~~~~~~~~~~~~ - Enable using ``offset`` and ``origin`` arguments in :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample` (:issue:`7266`, :pull:`7284`). By `Spencer Clark `_. - Add experimental support for Zarr's in-progress V3 specification. (:pull:`6475`). By `Gregory Lee `_ and `Joe Hamman `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed (:pull:`7300`): ========================== ========= ======== Package Old New ========================== ========= ======== boto 1.18 1.20 cartopy 0.19 0.20 distributed 2021.09 2021.11 dask 2021.09 2021.11 h5py 3.1 3.6 hdf5 1.10 1.12 matplotlib-base 3.4 3.5 nc-time-axis 1.3 1.4 netcdf4 1.5.3 1.5.7 packaging 20.3 21.3 pint 0.17 0.18 pseudonetcdf 3.1 3.2 typing_extensions 3.10 4.0 ========================== ========= ======== Deprecations ~~~~~~~~~~~~ - The PyNIO backend has been deprecated (:issue:`4491`, :pull:`7301`). By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - Fix handling of coordinate attributes in :py:func:`where`. (:issue:`7220`, :pull:`7229`) By `Sam Levang `_. - Import ``nc_time_axis`` when needed (:issue:`7275`, :pull:`7276`). By `Michael Niklas `_. - Fix static typing of :py:meth:`xr.polyval` (:issue:`7312`, :pull:`7315`). By `Michael Niklas `_. - Fix multiple reads on fsspec S3 files by resetting file pointer to 0 when reading file streams (:issue:`6813`, :pull:`7304`). By `David Hoese `_ and `Wei Ji Leong `_. - Fix :py:meth:`Dataset.assign_coords` resetting all dimension coordinates to default (pandas) index (:issue:`7346`, :pull:`7347`). By `Benoรฎt Bovy `_. Documentation ~~~~~~~~~~~~~ - Add example of reading and writing individual groups to a single netCDF file to I/O docs page. (:pull:`7338`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ .. _whats-new.2022.11.0: v2022.11.0 (Nov 4, 2022) ------------------------ This release brings a number of bugfixes and documentation improvements. Both text and HTML reprs now have a new "Indexes" section, which we expect will help with development of new Index objects. This release also features more support for the Python Array API. Many thanks to the 16 contributors to this release: Daniel Goman, Deepak Cherian, Illviljan, Jessica Scheick, Justus Magin, Mark Harfouche, Maximilian Roos, Mick, Patrick Naylor, Pierre, Spencer Clark, Stephan Hoyer, Tom Nicholas, Tom White New Features ~~~~~~~~~~~~ - Add static typing to plot accessors (:issue:`6949`, :pull:`7052`). By `Michael Niklas `_. - Display the indexes in a new section of the text and HTML reprs (:pull:`6795`, :pull:`7183`, :pull:`7185`) By `Justus Magin `_ and `Benoรฎt Bovy `_. - Added methods :py:meth:`DataArrayGroupBy.cumprod` and :py:meth:`DatasetGroupBy.cumprod`. (:pull:`5816`) By `Patrick Naylor `_ Breaking changes ~~~~~~~~~~~~~~~~ - ``repr(ds)`` may not show the same result because it doesn't load small, lazy data anymore. Use ``ds.head().load()`` when wanting to see just a sample of the data. (:issue:`6722`, :pull:`7203`). By `Jimmy Westling `_. - Many arguments of plotmethods have been made keyword-only. - ``xarray.plot.plot`` module renamed to ``xarray.plot.dataarray_plot`` to prevent shadowing of the ``plot`` method. (:issue:`6949`, :pull:`7052`). By `Michael Niklas `_. Deprecations ~~~~~~~~~~~~ - Positional arguments for all plot methods have been deprecated (:issue:`6949`, :pull:`7052`). By `Michael Niklas `_. - ``xarray.plot.FacetGrid.axes`` has been renamed to ``xarray.plot.FacetGrid.axs`` because it's not clear if ``axes`` refers to single or multiple ``Axes`` instances. This aligns with ``matplotlib.pyplot.subplots``. (:pull:`7194`) By `Jimmy Westling `_. Bug fixes ~~~~~~~~~ - Explicitly opening a file multiple times (e.g., after modifying it on disk) now reopens the file from scratch for h5netcdf and scipy netCDF backends, rather than reusing a cached version (:issue:`4240`, :issue:`4862`). By `Stephan Hoyer `_. - Fixed bug where :py:meth:`Dataset.coarsen.construct` would demote non-dimension coordinates to variables. (:pull:`7233`) By `Tom Nicholas `_. - Raise a TypeError when trying to plot empty data (:issue:`7156`, :pull:`7228`). By `Michael Niklas `_. Documentation ~~~~~~~~~~~~~ - Improves overall documentation around available backends, including adding docstrings for :py:func:`xarray.backends.list_engines` Add :py:meth:`__str__` to surface the new :py:class:`BackendEntrypoint` ``description`` and ``url`` attributes. (:issue:`6577`, :pull:`7000`) By `Jessica Scheick `_. - Created docstring examples for :py:meth:`DataArray.cumsum`, :py:meth:`DataArray.cumprod`, :py:meth:`Dataset.cumsum`, :py:meth:`Dataset.cumprod`, :py:meth:`DatasetGroupBy.cumsum`, :py:meth:`DataArrayGroupBy.cumsum`. (:issue:`5816`, :pull:`7152`) By `Patrick Naylor `_ - Add example of using :py:meth:`DataArray.coarsen.construct` to User Guide. (:pull:`7192`) By `Tom Nicholas `_. - Rename ``axes`` to ``axs`` in plotting to align with ``matplotlib.pyplot.subplots``. (:pull:`7194`) By `Jimmy Westling `_. - Add documentation of specific BackendEntrypoints (:pull:`7200`). By `Michael Niklas `_. - Add examples to docstring for :py:meth:`DataArray.drop_vars`, :py:meth:`DataArray.reindex_like`, :py:meth:`DataArray.interp_like`. (:issue:`6793`, :pull:`7123`) By `Daniel Goman `_. Internal Changes ~~~~~~~~~~~~~~~~ - Doctests fail on any warnings (:pull:`7166`) By `Maximilian Roos `_. - Improve import time by lazy loading ``dask.distributed`` (:pull:`7172`). - Explicitly specify ``longdouble=False`` in :py:func:`cftime.date2num` when encoding times to preserve existing behavior and prevent future errors when it is eventually set to ``True`` by default in cftime (:pull:`7171`). By `Spencer Clark `_. - Improved import time by lazily importing backend modules, matplotlib, dask.array and flox. (:issue:`6726`, :pull:`7179`) By `Michael Niklas `_. - Emit a warning under the development version of pandas when we convert non-nanosecond precision datetime or timedelta values to nanosecond precision. This was required in the past, because pandas previously was not compatible with non-nanosecond precision values. However pandas is currently working towards removing this restriction. When things stabilize in pandas we will likely consider relaxing this behavior in xarray as well (:issue:`7175`, :pull:`7201`). By `Spencer Clark `_. .. _whats-new.2022.10.0: v2022.10.0 (Oct 14 2022) ------------------------ This release brings numerous bugfixes, a change in minimum supported versions, and a new scatter plot method for DataArrays. Many thanks to 11 contributors to this release: Anderson Banihirwe, Benoit Bovy, Dan Adriaansen, Illviljan, Justus Magin, Lukas Bindreiter, Mick, Patrick Naylor, Spencer Clark, Thomas Nicholas New Features ~~~~~~~~~~~~ - Add scatter plot for datarrays. Scatter plots now also supports 3d plots with the z argument. (:pull:`6778`) By `Jimmy Westling `_. - Include the variable name in the error message when CF decoding fails to allow for easier identification of problematic variables (:issue:`7145`, :pull:`7147`). By `Spencer Clark `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed: ========================== ========= ======== Package Old New ========================== ========= ======== cftime 1.4 1.5 distributed 2021.08 2021.09 dask 2021.08 2021.09 iris 2.4 3.1 nc-time-axis 1.2 1.3 numba 0.53 0.54 numpy 1.19 1.20 pandas 1.2 1.3 packaging 20.0 21.0 scipy 1.6 1.7 sparse 0.12 0.13 typing_extensions 3.7 3.10 zarr 2.8 2.10 ========================== ========= ======== Bug fixes ~~~~~~~~~ - Remove nested function from :py:func:`open_mfdataset` to allow Dataset objects to be pickled. (:issue:`7109`, :pull:`7116`) By `Daniel Adriaansen `_. - Support for recursively defined Arrays. Fixes repr and deepcopy. (:issue:`7111`, :pull:`7112`) By `Michael Niklas `_. - Fixed :py:meth:`Dataset.transpose` to raise a more informative error. (:issue:`6502`, :pull:`7120`) By `Patrick Naylor `_ - Fix groupby on a multi-index level coordinate and fix :py:meth:`DataArray.to_index` for multi-index levels (convert to single index). (:issue:`6836`, :pull:`7105`) By `Benoรฎt Bovy `_. - Support for open_dataset backends that return datasets containing multi-indexes (:issue:`7139`, :pull:`7150`) By `Lukas Bindreiter `_. .. _whats-new.2022.09.0: v2022.09.0 (September 30, 2022) ------------------------------- This release brings a large number of bugfixes and documentation improvements, as well as an external interface for setting custom indexes! Many thanks to our 40 contributors: Anderson Banihirwe, Andrew Ronald Friedman, Bane Sullivan, Benoit Bovy, ColemanTom, Deepak Cherian, Dimitri Papadopoulos Orfanos, Emma Marshall, Fabian Hofmann, Francesco Nattino, ghislainp, Graham Inggs, Hauke Schulz, Illviljan, James Bourbeau, Jody Klymak, Julia Signell, Justus Magin, Keewis, Ken Mankoff, Luke Conibear, Mathias Hauser, Max Jones, mgunyho, Michael Delgado, Mick, Mike Taves, Oliver Lopez, Patrick Naylor, Paul Hockett, Pierre Manchon, Ray Bell, Riley Brady, Sam Levang, Spencer Clark, Stefaan Lippens, Tom Nicholas, Tom White, Travis A. O'Brien, and Zachary Moon. New Features ~~~~~~~~~~~~ - Add :py:meth:`Dataset.set_xindex` and :py:meth:`Dataset.drop_indexes` and their DataArray counterpart for setting and dropping pandas or custom indexes given a set of arbitrary coordinates. (:pull:`6971`) By `Benoรฎt Bovy `_ and `Justus Magin `_. - Enable taking the mean of dask-backed :py:class:`cftime.datetime` arrays (:pull:`6556`, :pull:`6940`). By `Deepak Cherian `_ and `Spencer Clark `_. Bug fixes ~~~~~~~~~ - Allow reading netcdf files where the 'units' attribute is a number. (:pull:`7085`) By `Ghislain Picard `_. - Allow decoding of 0 sized datetimes. (:issue:`1329`, :pull:`6882`) By `Deepak Cherian `_. - Make sure DataArray.name is always a string when used as label for plotting. (:issue:`6826`, :pull:`6832`) By `Jimmy Westling `_. - :py:attr:`DataArray.nbytes` now uses the ``nbytes`` property of the underlying array if available. (:pull:`6797`) By `Max Jones `_. - Rely on the array backend for string formatting. (:pull:`6823`). By `Jimmy Westling `_. - Fix incompatibility with numpy 1.20. (:issue:`6818`, :pull:`6821`) By `Michael Niklas `_. - Fix side effects on index coordinate metadata after aligning objects. (:issue:`6852`, :pull:`6857`) By `Benoรฎt Bovy `_. - Make FacetGrid.set_titles send kwargs correctly using ``handle.update(kwargs)``. (:issue:`6839`, :pull:`6843`) By `Oliver Lopez `_. - Fix bug where index variables would be changed inplace. (:issue:`6931`, :pull:`6938`) By `Michael Niklas `_. - Allow taking the mean over non-time dimensions of datasets containing dask-backed cftime arrays. (:issue:`5897`, :pull:`6950`) By `Spencer Clark `_. - Harmonize returned multi-indexed indexes when applying ``concat`` along new dimension. (:issue:`6881`, :pull:`6889`) By `Fabian Hofmann `_. - Fix step plots with ``hue`` arg. (:pull:`6944`) By `Andrรกs Gunyhรณ `_. - Avoid use of random numbers in ``test_weighted.test_weighted_operations_nonequal_coords``. (:issue:`6504`, :pull:`6961`) By `Luke Conibear `_. - Fix multiple regression issues with :py:meth:`Dataset.set_index` and :py:meth:`Dataset.reset_index`. (:pull:`6992`) By `Benoรฎt Bovy `_. - Raise a ``UserWarning`` when renaming a coordinate or a dimension creates a non-indexed dimension coordinate, and suggest the user creating an index either with ``swap_dims`` or ``set_index``. (:issue:`6607`, :pull:`6999`) By `Benoรฎt Bovy `_. - Use ``keep_attrs=True`` in grouping and resampling operations by default. (:issue:`7012`) This means :py:attr:`Dataset.attrs` and :py:attr:`DataArray.attrs` are now preserved by default. By `Deepak Cherian `_. - ``Dataset.encoding['source']`` now exists when reading from a Path object. (:issue:`5888`, :pull:`6974`) By `Thomas Coleman `_. - Better dtype consistency for ``rolling.mean()``. (:issue:`7062`, :pull:`7063`) By `Sam Levang `_. - Allow writing NetCDF files including only dimensionless variables using the distributed or multiprocessing scheduler. (:issue:`7013`, :pull:`7040`) By `Francesco Nattino `_. - Fix deepcopy of attrs and encoding of DataArrays and Variables. (:issue:`2835`, :pull:`7089`) By `Michael Niklas `_. - Fix bug where subplot_kwargs were not working when plotting with figsize, size or aspect. (:issue:`7078`, :pull:`7080`) By `Michael Niklas `_. Documentation ~~~~~~~~~~~~~ - Update merge docstrings. (:issue:`6935`, :pull:`7033`) By `Zach Moon `_. - Raise a more informative error when trying to open a non-existent zarr store. (:issue:`6484`, :pull:`7060`) By `Sam Levang `_. - Added examples to docstrings for :py:meth:`DataArray.expand_dims`, :py:meth:`DataArray.drop_duplicates`, :py:meth:`DataArray.reset_coords`, :py:meth:`DataArray.equals`, :py:meth:`DataArray.identical`, :py:meth:`DataArray.broadcast_equals`, :py:meth:`DataArray.bfill`, :py:meth:`DataArray.ffill`, :py:meth:`DataArray.fillna`, :py:meth:`DataArray.dropna`, :py:meth:`DataArray.drop_isel`, :py:meth:`DataArray.drop_sel`, :py:meth:`DataArray.head`, :py:meth:`DataArray.tail`. (:issue:`5816`, :pull:`7088`) By `Patrick Naylor `_. - Add missing docstrings to various array properties. (:pull:`7090`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Added test for DataArray attrs deepcopy recursion/nested attrs. (:issue:`2835`, :pull:`7086`) By `Paul hockett `_. .. _whats-new.2022.06.0: v2022.06.0 (July 21, 2022) -------------------------- This release brings a number of bug fixes and improvements, most notably a major internal refactor of the indexing functionality, the use of `flox`_ in ``groupby`` operations, and experimental support for the new Python `Array API standard `_. It also stops testing support for the abandoned PyNIO. Much effort has been made to preserve backwards compatibility as part of the indexing refactor. We are aware of one `unfixed issue `_. Please also see the `whats-new.2022.06.0rc0`_ for a full list of changes. Many thanks to our 18 contributors: Bane Sullivan, Deepak Cherian, Dimitri Papadopoulos Orfanos, Emma Marshall, Hauke Schulz, Illviljan, Julia Signell, Justus Magin, Keewis, Mathias Hauser, Michael Delgado, Mick, Pierre Manchon, Ray Bell, Spencer Clark, Stefaan Lippens, Tom White, Travis A. O'Brien, New Features ~~~~~~~~~~~~ - Add :py:attr:`Dataset.dtypes`, :py:attr:`core.coordinates.DatasetCoordinates.dtypes`, :py:attr:`core.coordinates.DataArrayCoordinates.dtypes` properties: Mapping from variable names to dtypes. (:pull:`6706`) By `Michael Niklas `_. - Initial typing support for :py:meth:`groupby`, :py:meth:`rolling`, :py:meth:`rolling_exp`, :py:meth:`coarsen`, :py:meth:`weighted`, :py:meth:`resample`, (:pull:`6702`) By `Michael Niklas `_. - Experimental support for wrapping any array type that conforms to the python `array api standard `_. (:pull:`6804`) By `Tom White `_. - Allow string formatting of scalar DataArrays. (:pull:`5981`) By `fmaussion `_. Bug fixes ~~~~~~~~~ - :py:meth:`save_mfdataset` now passes ``**kwargs`` on to :py:meth:`Dataset.to_netcdf`, allowing the ``encoding`` and ``unlimited_dims`` options with :py:meth:`save_mfdataset`. (:issue:`6684`) By `Travis A. O'Brien `_. - Fix backend support of pydap versions <3.3.0 (:issue:`6648`, :pull:`6656`). By `Hauke Schulz `_. - :py:meth:`Dataset.where` with ``drop=True`` now behaves correctly with mixed dimensions. (:issue:`6227`, :pull:`6690`) By `Michael Niklas `_. - Accommodate newly raised ``OutOfBoundsTimedelta`` error in the development version of pandas when decoding times outside the range that can be represented with nanosecond-precision values (:issue:`6716`, :pull:`6717`). By `Spencer Clark `_. - :py:meth:`open_dataset` with dask and ``~`` in the path now resolves the home directory instead of raising an error. (:issue:`6707`, :pull:`6710`) By `Michael Niklas `_. - :py:meth:`DataArrayRolling.__iter__` with ``center=True`` now works correctly. (:issue:`6739`, :pull:`6744`) By `Michael Niklas `_. Internal Changes ~~~~~~~~~~~~~~~~ - ``xarray.core.groupby``, ``xarray.core.rolling``, ``xarray.core.rolling_exp``, ``xarray.core.weighted`` and ``xarray.core.resample`` modules are no longer imported by default. (:pull:`6702`) .. _whats-new.2022.06.0rc0: v2022.06.0rc0 (9 June 2022) --------------------------- This pre-release brings a number of bug fixes and improvements, most notably a major internal refactor of the indexing functionality and the use of `flox`_ in ``groupby`` operations. It also stops testing support for the abandoned PyNIO. Install it using :: mamba create -n python=3.10 xarray python -m pip install --pre --upgrade --no-deps xarray Many thanks to the 39 contributors: Abel Soares Siqueira, Alex Santana, Anderson Banihirwe, Benoit Bovy, Blair Bonnett, Brewster Malevich, brynjarmorka, Charles Stern, Christian Jauvin, Deepak Cherian, Emma Marshall, Fabien Maussion, Greg Behm, Guelate Seyo, Illviljan, Joe Hamman, Joseph K Aicher, Justus Magin, Kevin Paul, Louis Stenger, Mathias Hauser, Mattia Almansi, Maximilian Roos, Michael Bauer, Michael Delgado, Mick, ngam, Oleh Khoma, Oriol Abril-Pla, Philippe Blain, PLSeuJ, Sam Levang, Spencer Clark, Stan West, Thomas Nicholas, Thomas Vogt, Tom White, Xianxiang Li Known Regressions ~~~~~~~~~~~~~~~~~ - ``reset_coords(drop=True)`` does not create indexes (:issue:`6607`) New Features ~~~~~~~~~~~~ - The ``zarr`` backend is now able to read NCZarr. By `Mattia Almansi `_. - Add a weighted ``quantile`` method to :py:class:`.computation.weighted.DatasetWeighted` and :py:class:`~computation.weighted.DataArrayWeighted` (:pull:`6059`). By `Christian Jauvin `_ and `David Huard `_. - Add a ``create_index=True`` parameter to :py:meth:`Dataset.stack` and :py:meth:`DataArray.stack` so that the creation of multi-indexes is optional (:pull:`5692`). By `Benoรฎt Bovy `_. - Multi-index levels are now accessible through their own, regular coordinates instead of virtual coordinates (:pull:`5692`). By `Benoรฎt Bovy `_. - Add a ``display_values_threshold`` option to control the total number of array elements which trigger summarization rather than full repr in (numpy) array detailed views of the html repr (:pull:`6400`). By `Benoรฎt Bovy `_. - Allow passing chunks in ``kwargs`` form to :py:meth:`Dataset.chunk`, :py:meth:`DataArray.chunk`, and :py:meth:`Variable.chunk`. (:pull:`6471`) By `Tom Nicholas `_. - Add :py:meth:`core.groupby.DatasetGroupBy.cumsum` and :py:meth:`core.groupby.DataArrayGroupBy.cumsum`. By `Vladislav Skripniuk `_ and `Deepak Cherian `_. (:pull:`3147`, :pull:`6525`, :issue:`3141`) - Expose ``inline_array`` kwarg from ``dask.array.from_array`` in :py:func:`open_dataset`, :py:meth:`Dataset.chunk`, :py:meth:`DataArray.chunk`, and :py:meth:`Variable.chunk`. (:pull:`6471`) - Expose the ``inline_array`` kwarg from :py:func:`dask.array.from_array` in :py:func:`open_dataset`, :py:meth:`Dataset.chunk`, :py:meth:`DataArray.chunk`, and :py:meth:`Variable.chunk`. (:pull:`6471`) By `Tom Nicholas `_. - :py:func:`polyval` now supports :py:class:`Dataset` and :py:class:`DataArray` args of any shape, is faster and requires less memory. (:pull:`6548`) By `Michael Niklas `_. - Improved overall typing. - :py:meth:`Dataset.to_dict` and :py:meth:`DataArray.to_dict` may now optionally include encoding attributes. (:pull:`6635`) By `Joe Hamman `_. - Upload development versions to `TestPyPI `_. By `Justus Magin `_. Breaking changes ~~~~~~~~~~~~~~~~ - PyNIO support is now untested. The minimum versions of some dependencies were changed: =============== ===== ==== Package Old New =============== ===== ==== cftime 1.2 1.4 dask 2.30 2021.4 distributed 2.30 2021.4 h5netcdf 0.8 0.11 matplotlib-base 3.3 3.4 numba 0.51 0.53 numpy 1.18 1.19 pandas 1.1 1.2 pint 0.16 0.17 rasterio 1.1 1.2 scipy 1.5 1.6 sparse 0.11 0.12 zarr 2.5 2.8 =============== ===== ==== - The Dataset and DataArray ``rename```` methods do not implicitly add or drop indexes. (:pull:`5692`). By `Benoรฎt Bovy `_. - Many arguments like ``keep_attrs``, ``axis``, and ``skipna`` are now keyword only for all reduction operations like ``.mean``. By `Deepak Cherian `_, `Jimmy Westling `_. - Xarray's ufuncs have been removed, now that they can be replaced by numpy's ufuncs in all supported versions of numpy. By `Maximilian Roos `_. - :py:meth:`xr.polyval` now uses the ``coord`` argument directly instead of its index coordinate. (:pull:`6548`) By `Michael Niklas `_. Bug fixes ~~~~~~~~~ - :py:meth:`Dataset.to_zarr` now allows to write all attribute types supported by ``zarr-python``. By `Mattia Almansi `_. - Set ``skipna=None`` for all ``quantile`` methods (e.g. :py:meth:`Dataset.quantile`) and ensure it skips missing values for float dtypes (consistent with other methods). This should not change the behavior (:pull:`6303`). By `Mathias Hauser `_. - Many bugs fixed by the explicit indexes refactor, mainly related to multi-index (virtual) coordinates. See the corresponding pull-request on GitHub for more details. (:pull:`5692`). By `Benoรฎt Bovy `_. - Fixed "unhashable type" error trying to read NetCDF file with variable having its 'units' attribute not ``str`` (e.g. ``numpy.ndarray``) (:issue:`6368`). By `Oleh Khoma `_. - Omit warning about specified dask chunks separating chunks on disk when the underlying array is empty (e.g., because of an empty dimension) (:issue:`6401`). By `Joseph K Aicher `_. - Fixed the poor html repr performance on large multi-indexes (:pull:`6400`). By `Benoรฎt Bovy `_. - Allow fancy indexing of duck dask arrays along multiple dimensions. (:pull:`6414`) By `Justus Magin `_. - In the API for backends, support dimensions that express their preferred chunk sizes as a tuple of integers. (:issue:`6333`, :pull:`6334`) By `Stan West `_. - Fix bug in :py:func:`where` when passing non-xarray objects with ``keep_attrs=True``. (:issue:`6444`, :pull:`6461`) By `Sam Levang `_. - Allow passing both ``other`` and ``drop=True`` arguments to :py:meth:`DataArray.where` and :py:meth:`Dataset.where` (:pull:`6466`, :pull:`6467`). By `Michael Delgado `_. - Ensure dtype encoding attributes are not added or modified on variables that contain datetime-like values prior to being passed to :py:func:`xarray.conventions.decode_cf_variable` (:issue:`6453`, :pull:`6489`). By `Spencer Clark `_. - Dark themes are now properly detected in Furo-themed Sphinx documents (:issue:`6500`, :pull:`6501`). By `Kevin Paul `_. - :py:meth:`Dataset.isel`, :py:meth:`DataArray.isel` with ``drop=True`` works as intended with scalar :py:class:`DataArray` indexers. (:issue:`6554`, :pull:`6579`) By `Michael Niklas `_. - Fixed silent overflow issue when decoding times encoded with 32-bit and below unsigned integer data types (:issue:`6589`, :pull:`6598`). By `Spencer Clark `_. - Fixed ``.chunks`` loading lazy data (:issue:`6538`). By `Deepak Cherian `_. Documentation ~~~~~~~~~~~~~ - Revise the documentation for developers on specifying a backend's preferred chunk sizes. In particular, correct the syntax and replace lists with tuples in the examples. (:issue:`6333`, :pull:`6334`) By `Stan West `_. - Mention that :py:meth:`DataArray.rename` can rename coordinates. (:issue:`5458`, :pull:`6665`) By `Michael Niklas `_. - Added examples to :py:meth:`Dataset.thin` and :py:meth:`DataArray.thin` By `Emma Marshall `_. Performance ~~~~~~~~~~~ - GroupBy binary operations are now vectorized. Previously this involved looping over all groups. (:issue:`5804`, :pull:`6160`) By `Deepak Cherian `_. - Substantially improved GroupBy operations using `flox `_. This is auto-enabled when ``flox`` is installed. Use ``xr.set_options(use_flox=False)`` to use the old algorithm. (:issue:`4473`, :issue:`4498`, :issue:`659`, :issue:`2237`, :pull:`271`). By `Deepak Cherian `_, `Anderson Banihirwe `_, `Jimmy Westling `_. Internal Changes ~~~~~~~~~~~~~~~~ - Many internal changes due to the explicit indexes refactor. See the corresponding pull-request on GitHub for more details. (:pull:`5692`). By `Benoรฎt Bovy `_. .. _whats-new.2022.03.0: v2022.03.0 (2 March 2022) ------------------------- This release brings a number of small improvements, as well as a move to `calendar versioning `_ (:issue:`6176`). Many thanks to the 16 contributors to the v2022.02.0 release! Aaron Spring, Alan D. Snow, Anderson Banihirwe, crusaderky, Illviljan, Joe Hamman, Jonas GliรŸ, Lukas Pilz, Martin Bergemann, Mathias Hauser, Maximilian Roos, Romain Caneill, Stan West, Stijn Van Hoey, Tobias Kรถlling, and Tom Nicholas. New Features ~~~~~~~~~~~~ - Enabled multiplying tick offsets by floats. Allows ``float`` ``n`` in :py:meth:`CFTimeIndex.shift` if ``shift_freq`` is between ``Day`` and ``Microsecond``. (:issue:`6134`, :pull:`6135`). By `Aaron Spring `_. - Enable providing more keyword arguments to the ``pydap`` backend when reading OpenDAP datasets (:issue:`6274`). By `Jonas GliรŸ `_. - Allow :py:meth:`DataArray.drop_duplicates` to drop duplicates along multiple dimensions at once, and add :py:meth:`Dataset.drop_duplicates`. (:pull:`6307`) By `Tom Nicholas `_. Breaking changes ~~~~~~~~~~~~~~~~ - Renamed the ``interpolation`` keyword of all ``quantile`` methods (e.g. :py:meth:`DataArray.quantile`) to ``method`` for consistency with numpy v1.22.0 (:pull:`6108`). By `Mathias Hauser `_. Deprecations ~~~~~~~~~~~~ Bug fixes ~~~~~~~~~ - Variables which are chunked using dask in larger (but aligned) chunks than the target zarr chunk size can now be stored using ``to_zarr()`` (:pull:`6258`) By `Tobias Kรถlling `_. - Multi-file datasets containing encoded :py:class:`cftime.datetime` objects can be read in parallel again (:issue:`6226`, :pull:`6249`, :pull:`6305`). By `Martin Bergemann `_ and `Stan West `_. Documentation ~~~~~~~~~~~~~ - Delete files of datasets saved to disk while building the documentation and enable building on Windows via ``sphinx-build`` (:pull:`6237`). By `Stan West `_. Internal Changes ~~~~~~~~~~~~~~~~ .. _whats-new.0.21.1: v0.21.1 (31 January 2022) ------------------------- This is a bugfix release to resolve (:issue:`6216`, :pull:`6207`). Bug fixes ~~~~~~~~~ - Add ``packaging`` as a dependency to Xarray (:issue:`6216`, :pull:`6207`). By `Sebastian Weigand `_ and `Joe Hamman `_. .. _whats-new.0.21.0: v0.21.0 (27 January 2022) ------------------------- Many thanks to the 20 contributors to the v0.21.0 release! Abel Aoun, Anderson Banihirwe, Ant Gib, Chris Roat, Cindy Chiao, Deepak Cherian, Dominik Staล„czak, Fabian Hofmann, Illviljan, Jody Klymak, Joseph K Aicher, Mark Harfouche, Mathias Hauser, Matthew Roeschke, Maximilian Roos, Michael Delgado, Pascal Bourgault, Pierre, Ray Bell, Romain Caneill, Tim Heap, Tom Nicholas, Zeb Nicholls, joseph nowak, keewis. New Features ~~~~~~~~~~~~ - New top-level function :py:func:`cross`. (:issue:`3279`, :pull:`5365`). By `Jimmy Westling `_. - ``keep_attrs`` support for :py:func:`where` (:issue:`4141`, :issue:`4682`, :pull:`4687`). By `Justus Magin `_. - Enable the limit option for dask array in the following methods :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill` and :py:meth:`Dataset.bfill` (:issue:`6112`) By `Joseph Nowak `_. Breaking changes ~~~~~~~~~~~~~~~~ - Rely on matplotlib's default datetime converters instead of pandas' (:issue:`6102`, :pull:`6109`). By `Jimmy Westling `_. - Improve repr readability when there are a large number of dimensions in datasets or dataarrays by wrapping the text once the maximum display width has been exceeded. (:issue:`5546`, :pull:`5662`) By `Jimmy Westling `_. Deprecations ~~~~~~~~~~~~ - Removed the lock kwarg from the zarr and pydap backends, completing the deprecation cycle started in :issue:`5256`. By `Tom Nicholas `_. - Support for ``python 3.7`` has been dropped. (:pull:`5892`) By `Jimmy Westling `_. Bug fixes ~~~~~~~~~ - Preserve chunks when creating a :py:class:`DataArray` from another :py:class:`DataArray` (:pull:`5984`). By `Fabian Hofmann `_. - Properly support :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill` and :py:meth:`Dataset.bfill` along chunked dimensions (:issue:`6112`). By `Joseph Nowak `_. - Subclasses of ``byte`` and ``str`` (e.g. ``np.str_`` and ``np.bytes_``) will now serialise to disk rather than raising a ``ValueError: unsupported dtype for netCDF4 variable: object`` as they did previously (:pull:`5264`). By `Zeb Nicholls `_. - Fix applying function with non-xarray arguments using :py:func:`xr.map_blocks`. By `Cindy Chiao `_. - No longer raise an error for an all-nan-but-one argument to :py:meth:`DataArray.interpolate_na` when using ``method='nearest'`` (:issue:`5994`, :pull:`6144`). By `Michael Delgado `_. - `dt.season `_ can now handle NaN and NaT. (:pull:`5876`). By `Pierre Loicq `_. - Determination of zarr chunks handles empty lists for encoding chunks or variable chunks that occurs in certain circumstances (:pull:`5526`). By `Chris Roat `_. Internal Changes ~~~~~~~~~~~~~~~~ - Replace ``distutils.version`` with ``packaging.version`` (:issue:`6092`). By `Mathias Hauser `_. - Removed internal checks for ``pd.Panel`` (:issue:`6145`). By `Matthew Roeschke `_. - Add ``pyupgrade`` pre-commit hook (:pull:`6152`). By `Maximilian Roos `_. .. _whats-new.0.20.2: v0.20.2 (9 December 2021) ------------------------- This is a bugfix release to resolve (:issue:`3391`, :issue:`5715`). It also includes performance improvements in unstacking to a ``sparse`` array and a number of documentation improvements. Many thanks to the 20 contributors: Aaron Spring, Alexandre Poux, Deepak Cherian, Enrico Minack, Fabien Maussion, Giacomo Caria, Gijom, Guillaume Maze, Illviljan, Joe Hamman, Joseph Hardin, Kai Mรผhlbauer, Matt Henderson, Maximilian Roos, Michael Delgado, Robert Gieseke, Sebastian Weigand and Stephan Hoyer. Breaking changes ~~~~~~~~~~~~~~~~ - Use complex nan when interpolating complex values out of bounds by default (instead of real nan) (:pull:`6019`). By `Alexandre Poux `_. Performance ~~~~~~~~~~~ - Significantly faster unstacking to a ``sparse`` array. :pull:`5577` By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - :py:func:`xr.map_blocks` and :py:func:`xr.corr` now work when dask is not installed (:issue:`3391`, :issue:`5715`, :pull:`5731`). By `Gijom `_. - Fix plot.line crash for data of shape ``(1, N)`` in _title_for_slice on format_item (:pull:`5948`). By `Sebastian Weigand `_. - Fix a regression in the removal of duplicate backend entrypoints (:issue:`5944`, :pull:`5959`) By `Kai Mรผhlbauer `_. - Fix an issue that datasets from being saved when time variables with units that ``cftime`` can parse but pandas can not were present (:pull:`6049`). By `Tim Heap `_. Documentation ~~~~~~~~~~~~~ - Better examples in docstrings for groupby and resampling reductions (:pull:`5871`). By `Deepak Cherian `_, `Maximilian Roos `_, `Jimmy Westling `_ . - Add list-like possibility for tolerance parameter in the reindex functions. By `Antoine Gibek `_, Internal Changes ~~~~~~~~~~~~~~~~ - Use ``importlib`` to replace functionality of ``pkg_resources`` in backend plugins tests. (:pull:`5959`). By `Kai Mรผhlbauer `_. .. _whats-new.0.20.1: v0.20.1 (5 November 2021) ------------------------- This is a bugfix release to fix :issue:`5930`. Bug fixes ~~~~~~~~~ - Fix a regression in the detection of the backend entrypoints (:issue:`5930`, :pull:`5931`) By `Justus Magin `_. Documentation ~~~~~~~~~~~~~ - Significant improvements to :ref:`api`. By `Deepak Cherian `_. .. _whats-new.0.20.0: v0.20.0 (1 November 2021) ------------------------- This release brings improved support for pint arrays, methods for weighted standard deviation, variance, and sum of squares, the option to disable the use of the bottleneck library, significantly improved performance of unstack, as well as many bugfixes and internal changes. Many thanks to the 40 contributors to this release!: Aaron Spring, Akio Taniguchi, Alan D. Snow, arfy slowy, Benoit Bovy, Christian Jauvin, crusaderky, Deepak Cherian, Giacomo Caria, Illviljan, James Bourbeau, Joe Hamman, Joseph K Aicher, Julien Herzen, Kai Mรผhlbauer, keewis, lusewell, Martin K. Scherer, Mathias Hauser, Max Grover, Maxime Liquet, Maximilian Roos, Mike Taves, Nathan Lis, pmav99, Pushkar Kopparla, Ray Bell, Rio McMahon, Scott Staniewicz, Spencer Clark, Stefan Bender, Taher Chegini, Thomas Nicholas, Tomas Chor, Tom Augspurger, Victor Negรฎrneac, Zachary Blackwood, Zachary Moon, and Zeb Nicholls. New Features ~~~~~~~~~~~~ - Add ``std``, ``var``, ``sum_of_squares`` to :py:class:`~computation.weighted.DatasetWeighted` and :py:class:`~computation.weighted.DataArrayWeighted`. By `Christian Jauvin `_. - Added a :py:func:`get_options` method to xarray's root namespace (:issue:`5698`, :pull:`5716`) By `Pushkar Kopparla `_. - Xarray now does a better job rendering variable names that are long LaTeX sequences when plotting (:issue:`5681`, :pull:`5682`). By `Tomas Chor `_. - Add an option (``"use_bottleneck"``) to disable the use of ``bottleneck`` using :py:func:`set_options` (:pull:`5560`) By `Justus Magin `_. - Added ``**kwargs`` argument to :py:meth:`open_rasterio` to access overviews (:issue:`3269`). By `Pushkar Kopparla `_. - Added ``storage_options`` argument to :py:meth:`to_zarr` (:issue:`5601`, :pull:`5615`). By `Ray Bell `_, `Zachary Blackwood `_ and `Nathan Lis `_. - Added calendar utilities :py:func:`DataArray.convert_calendar`, :py:func:`DataArray.interp_calendar`, :py:func:`date_range`, :py:func:`date_range_like` and :py:attr:`DataArray.dt.calendar` (:issue:`5155`, :pull:`5233`). By `Pascal Bourgault `_. - Histogram plots are set with a title displaying the scalar coords if any, similarly to the other plots (:issue:`5791`, :pull:`5792`). By `Maxime Liquet `_. - Slice plots display the coords units in the same way as x/y/colorbar labels (:pull:`5847`). By `Victor Negรฎrneac `_. - Added a new :py:attr:`Dataset.chunksizes`, :py:attr:`DataArray.chunksizes`, and :py:attr:`Variable.chunksizes` property, which will always return a mapping from dimension names to chunking pattern along that dimension, regardless of whether the object is a Dataset, DataArray, or Variable. (:issue:`5846`, :pull:`5900`) By `Tom Nicholas `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed: =============== ====== ==== Package Old New =============== ====== ==== cftime 1.1 1.2 dask 2.15 2.30 distributed 2.15 2.30 lxml 4.5 4.6 matplotlib-base 3.2 3.3 numba 0.49 0.51 numpy 1.17 1.18 pandas 1.0 1.1 pint 0.15 0.16 scipy 1.4 1.5 seaborn 0.10 0.11 sparse 0.8 0.11 toolz 0.10 0.11 zarr 2.4 2.5 =============== ====== ==== - The ``__repr__`` of a :py:class:`xarray.Dataset`'s ``coords`` and ``data_vars`` ignore ``xarray.set_option(display_max_rows=...)`` and show the full output when called directly as, e.g., ``ds.data_vars`` or ``print(ds.data_vars)`` (:issue:`5545`, :pull:`5580`). By `Stefan Bender `_. Deprecations ~~~~~~~~~~~~ - Deprecate :py:func:`open_rasterio` (:issue:`4697`, :pull:`5808`). By `Alan Snow `_. - Set the default argument for ``roll_coords`` to ``False`` for :py:meth:`DataArray.roll` and :py:meth:`Dataset.roll`. (:pull:`5653`) By `Tom Nicholas `_. - :py:meth:`xarray.open_mfdataset` will now error instead of warn when a value for ``concat_dim`` is passed alongside ``combine='by_coords'``. By `Tom Nicholas `_. Bug fixes ~~~~~~~~~ - Fix ZeroDivisionError from saving dask array with empty dimension (:issue:`5741`). By `Joseph K Aicher `_. - Fixed performance bug where ``cftime`` import attempted within various core operations if ``cftime`` not installed (:pull:`5640`). By `Luke Sewell `_ - Fixed bug when combining named DataArrays using :py:func:`combine_by_coords`. (:pull:`5834`). By `Tom Nicholas `_. - When a custom engine was used in :py:func:`~xarray.open_dataset` the engine wasn't initialized properly, causing missing argument errors or inconsistent method signatures. (:pull:`5684`) By `Jimmy Westling `_. - Numbers are properly formatted in a plot's title (:issue:`5788`, :pull:`5789`). By `Maxime Liquet `_. - Faceted plots will no longer raise a ``pint.UnitStrippedWarning`` when a ``pint.Quantity`` array is plotted, and will correctly display the units of the data in the colorbar (if there is one) (:pull:`5886`). By `Tom Nicholas `_. - With backends, check for path-like objects rather than ``pathlib.Path`` type, use ``os.fspath`` (:pull:`5879`). By `Mike Taves `_. - ``open_mfdataset()`` now accepts a single ``pathlib.Path`` object (:issue:`5881`). By `Panos Mavrogiorgos `_. - Improved performance of :py:meth:`Dataset.unstack` (:pull:`5906`). By `Tom Augspurger `_. Documentation ~~~~~~~~~~~~~ - Users are instructed to try ``use_cftime=True`` if a ``TypeError`` occurs when combining datasets and one of the types involved is a subclass of ``cftime.datetime`` (:pull:`5776`). By `Zeb Nicholls `_. - A clearer error is now raised if a user attempts to assign a Dataset to a single key of another Dataset. (:pull:`5839`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Explicit indexes refactor: avoid ``len(index)`` in ``map_blocks`` (:pull:`5670`). By `Deepak Cherian `_. - Explicit indexes refactor: decouple ``xarray.Index``` from ``xarray.Variable`` (:pull:`5636`). By `Benoit Bovy `_. - Fix ``Mapping`` argument typing to allow mypy to pass on ``str`` keys (:pull:`5690`). By `Maximilian Roos `_. - Annotate many of our tests, and fix some of the resulting typing errors. This will also mean our typing annotations are tested as part of CI. (:pull:`5728`). By `Maximilian Roos `_. - Improve the performance of reprs for large datasets or dataarrays. (:pull:`5661`) By `Jimmy Westling `_. - Use isort's ``float_to_top`` config. (:pull:`5695`). By `Maximilian Roos `_. - Remove use of the deprecated ``kind`` argument in :py:meth:`pandas.Index.get_slice_bound` inside :py:class:`xarray.CFTimeIndex` tests (:pull:`5723`). By `Spencer Clark `_. - Refactor ``xarray.core.duck_array_ops`` to no longer special-case dispatching to dask versions of functions when acting on dask arrays, instead relying numpy and dask's adherence to NEP-18 to dispatch automatically. (:pull:`5571`) By `Tom Nicholas `_. - Add an ASV benchmark CI and improve performance of the benchmarks (:pull:`5796`) By `Jimmy Westling `_. - Use ``importlib`` to replace functionality of ``pkg_resources`` such as version setting and loading of resources. (:pull:`5845`). By `Martin K. Scherer `_. .. _whats-new.0.19.0: v0.19.0 (23 July 2021) ---------------------- This release brings improvements to plotting of categorical data, the ability to specify how attributes are combined in xarray operations, a new high-level :py:func:`unify_chunks` function, as well as various deprecations, bug fixes, and minor improvements. Many thanks to the 29 contributors to this release!: Andrew Williams, Augustus, Aureliana Barghini, Benoit Bovy, crusaderky, Deepak Cherian, ellesmith88, Elliott Sales de Andrade, Giacomo Caria, github-actions[bot], Illviljan, Joeperdefloep, joooeey, Julia Kent, Julius Busecke, keewis, Mathias Hauser, Matthias Gรถbel, Mattia Almansi, Maximilian Roos, Peter Andreas Entschev, Ray Bell, Sander, Santiago Soler, Sebastian, Spencer Clark, Stephan Hoyer, Thomas Hirtz, Thomas Nicholas. New Features ~~~~~~~~~~~~ - Allow passing argument ``missing_dims`` to :py:meth:`Variable.transpose` and :py:meth:`Dataset.transpose` (:issue:`5550`, :pull:`5586`) By `Giacomo Caria `_. - Allow passing a dictionary as coords to a :py:class:`DataArray` (:issue:`5527`, reverts :pull:`1539`, which had deprecated this due to python's inconsistent ordering in earlier versions). By `Sander van Rijn `_. - Added :py:meth:`Dataset.coarsen.construct`, :py:meth:`DataArray.coarsen.construct` (:issue:`5454`, :pull:`5475`). By `Deepak Cherian `_. - Xarray now uses consolidated metadata by default when writing and reading Zarr stores (:issue:`5251`). By `Stephan Hoyer `_. - New top-level function :py:func:`unify_chunks`. By `Mattia Almansi `_. - Allow assigning values to a subset of a dataset using positional or label-based indexing (:issue:`3015`, :pull:`5362`). By `Matthias Gรถbel `_. - Attempting to reduce a weighted object over missing dimensions now raises an error (:pull:`5362`). By `Mattia Almansi `_. - Add ``.sum`` to :py:meth:`~xarray.DataArray.rolling_exp` and :py:meth:`~xarray.Dataset.rolling_exp` for exponentially weighted rolling sums. These require numbagg 0.2.1; (:pull:`5178`). By `Maximilian Roos `_. - :py:func:`xarray.cov` and :py:func:`xarray.corr` now lazily check for missing values if inputs are dask arrays (:issue:`4804`, :pull:`5284`). By `Andrew Williams `_. - Attempting to ``concat`` list of elements that are not all ``Dataset`` or all ``DataArray`` now raises an error (:issue:`5051`, :pull:`5425`). By `Thomas Hirtz `_. - allow passing a function to ``combine_attrs`` (:pull:`4896`). By `Justus Magin `_. - Allow plotting categorical data (:pull:`5464`). By `Jimmy Westling `_. - Allow removal of the coordinate attribute ``coordinates`` on variables by setting ``.attrs['coordinates']= None`` (:issue:`5510`). By `Elle Smith `_. - Added :py:meth:`DataArray.to_numpy`, :py:meth:`DataArray.as_numpy`, and :py:meth:`Dataset.as_numpy`. (:pull:`5568`). By `Tom Nicholas `_. - Units in plot labels are now automatically inferred from wrapped :py:meth:`pint.Quantity` arrays. (:pull:`5561`). By `Tom Nicholas `_. Breaking changes ~~~~~~~~~~~~~~~~ - The default ``mode`` for :py:meth:`Dataset.to_zarr` when ``region`` is set has changed to the new ``mode="r+"``, which only allows for overriding pre-existing array values. This is a safer default than the prior ``mode="a"``, and allows for higher performance writes (:pull:`5252`). By `Stephan Hoyer `_. - The main parameter to :py:func:`combine_by_coords` is renamed to ``data_objects`` instead of ``datasets`` so anyone calling this method using a named parameter will need to update the name accordingly (:issue:`3248`, :pull:`4696`). By `Augustus Ijams `_. Deprecations ~~~~~~~~~~~~ - Removed the deprecated ``dim`` kwarg to :py:func:`DataArray.integrate` (:pull:`5630`) - Removed the deprecated ``keep_attrs`` kwarg to :py:func:`DataArray.rolling` (:pull:`5630`) - Removed the deprecated ``keep_attrs`` kwarg to :py:func:`DataArray.coarsen` (:pull:`5630`) - Completed deprecation of passing an ``xarray.DataArray`` to :py:func:`Variable` - will now raise a ``TypeError`` (:pull:`5630`) Bug fixes ~~~~~~~~~ - Fix a minor incompatibility between partial datetime string indexing with a :py:class:`CFTimeIndex` and upcoming pandas version 1.3.0 (:issue:`5356`, :pull:`5359`). By `Spencer Clark `_. - Fix 1-level multi-index incorrectly converted to single index (:issue:`5384`, :pull:`5385`). By `Benoit Bovy `_. - Don't cast a duck array in a coordinate to :py:class:`numpy.ndarray` in :py:meth:`DataArray.differentiate` (:pull:`5408`) By `Justus Magin `_. - Fix the ``repr`` of :py:class:`Variable` objects with ``display_expand_data=True`` (:pull:`5406`) By `Justus Magin `_. - Plotting a pcolormesh with ``xscale="log"`` and/or ``yscale="log"`` works as expected after improving the way the interval breaks are generated (:issue:`5333`). By `Santiago Soler `_ - :py:func:`combine_by_coords` can now handle combining a list of unnamed ``DataArray`` as input (:issue:`3248`, :pull:`4696`). By `Augustus Ijams `_. Internal Changes ~~~~~~~~~~~~~~~~ - Run CI on the first & last python versions supported only; currently 3.7 & 3.9. (:pull:`5433`) By `Maximilian Roos `_. - Publish test results & timings on each PR. (:pull:`5537`) By `Maximilian Roos `_. - Explicit indexes refactor: add a ``xarray.Index.query()`` method in which one may eventually provide a custom implementation of label-based data selection (not ready yet for public use). Also refactor the internal, pandas-specific implementation into ``PandasIndex.query()`` and ``PandasMultiIndex.query()`` (:pull:`5322`). By `Benoit Bovy `_. .. _whats-new.0.18.2: v0.18.2 (19 May 2021) --------------------- This release reverts a regression in xarray's unstacking of dask-backed arrays. .. _whats-new.0.18.1: v0.18.1 (18 May 2021) --------------------- This release is intended as a small patch release to be compatible with the new 2021.5.0 ``dask.distributed`` release. It also includes a new ``drop_duplicates`` method, some documentation improvements, the beginnings of our internal Index refactoring, and some bug fixes. Thank you to all 16 contributors! Anderson Banihirwe, Andrew, Benoit Bovy, Brewster Malevich, Giacomo Caria, Illviljan, James Bourbeau, Keewis, Maximilian Roos, Ravin Kumar, Stephan Hoyer, Thomas Nicholas, Tom Nicholas, Zachary Moon. New Features ~~~~~~~~~~~~ - Implement :py:meth:`DataArray.drop_duplicates` to remove duplicate dimension values (:pull:`5239`). By `Andrew Huang `_. - Allow passing ``combine_attrs`` strategy names to the ``keep_attrs`` parameter of :py:func:`apply_ufunc` (:pull:`5041`) By `Justus Magin `_. - :py:meth:`Dataset.interp` now allows interpolation with non-numerical datatypes, such as booleans, instead of dropping them. (:issue:`4761` :pull:`5008`). By `Jimmy Westling `_. - Raise more informative error when decoding time variables with invalid reference dates. (:issue:`5199`, :pull:`5288`). By `Giacomo Caria `_. Bug fixes ~~~~~~~~~ - Opening netCDF files from a path that doesn't end in ``.nc`` without supplying an explicit ``engine`` works again (:issue:`5295`), fixing a bug introduced in 0.18.0. By `Stephan Hoyer `_ Documentation ~~~~~~~~~~~~~ - Clean up and enhance docstrings for the :py:class:`DataArray.plot` and ``Dataset.plot.*`` families of methods (:pull:`5285`). By `Zach Moon `_. - Explanation of deprecation cycles and how to implement them added to contributors guide. (:pull:`5289`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Explicit indexes refactor: add an ``xarray.Index`` base class and ``Dataset.xindexes`` / ``DataArray.xindexes`` properties. Also rename ``PandasIndexAdapter`` to ``PandasIndex``, which now inherits from ``xarray.Index`` (:pull:`5102`). By `Benoit Bovy `_. - Replace ``SortedKeysDict`` with python's ``dict``, given dicts are now ordered. By `Maximilian Roos `_. - Updated the release guide for developers. Now accounts for actions that are automated via github actions. (:pull:`5274`). By `Tom Nicholas `_. .. _whats-new.0.18.0: v0.18.0 (6 May 2021) -------------------- This release brings a few important performance improvements, a wide range of usability upgrades, lots of bug fixes, and some new features. These include a plugin API to add backend engines, a new theme for the documentation, curve fitting methods, and several new plotting functions. Many thanks to the 38 contributors to this release: Aaron Spring, Alessandro Amici, Alex Marandon, Alistair Miles, Ana Paula Krelling, Anderson Banihirwe, Aureliana Barghini, Baudouin Raoult, Benoit Bovy, Blair Bonnett, David Trรฉmouilles, Deepak Cherian, Gabriel Medeiros Abrahรฃo, Giacomo Caria, Hauke Schulz, Illviljan, Mathias Hauser, Matthias Bussonnier, Mattia Almansi, Maximilian Roos, Ray Bell, Richard Kleijn, Ryan Abernathey, Sam Levang, Spencer Clark, Spencer Jones, Tammas Loughran, Tobias Kรถlling, Todd, Tom Nicholas, Tom White, Victor Negรฎrneac, Xianxiang Li, Zeb Nicholls, crusaderky, dschwoerer, johnomotani, keewis New Features ~~~~~~~~~~~~ - apply ``combine_attrs`` on data variables and coordinate variables when concatenating and merging datasets and dataarrays (:pull:`4902`). By `Justus Magin `_. - Add :py:meth:`Dataset.to_pandas` (:pull:`5247`) By `Giacomo Caria `_. - Add :py:meth:`DataArray.plot.surface` which wraps matplotlib's ``plot_surface`` to make surface plots (:issue:`2235` :issue:`5084` :pull:`5101`). By `John Omotani `_. - Allow passing multiple arrays to :py:meth:`Dataset.__setitem__` (:pull:`5216`). By `Giacomo Caria `_. - Add 'cumulative' option to :py:meth:`Dataset.integrate` and :py:meth:`DataArray.integrate` so that result is a cumulative integral, like :py:func:`scipy.integrate.cumulative_trapezoidal` (:pull:`5153`). By `John Omotani `_. - Add ``safe_chunks`` option to :py:meth:`Dataset.to_zarr` which allows overriding checks made to ensure Dask and Zarr chunk compatibility (:issue:`5056`). By `Ryan Abernathey `_ - Add :py:meth:`Dataset.query` and :py:meth:`DataArray.query` which enable indexing of datasets and data arrays by evaluating query expressions against the values of the data variables (:pull:`4984`). By `Alistair Miles `_. - Allow passing ``combine_attrs`` to :py:meth:`Dataset.merge` (:pull:`4895`). By `Justus Magin `_. - Support for `dask.graph_manipulation `_ (requires dask >=2021.3) By `Guido Imperiale `_ - Add :py:meth:`Dataset.plot.streamplot` for streamplot plots with :py:class:`Dataset` variables (:pull:`5003`). By `John Omotani `_. - Many of the arguments for the :py:attr:`DataArray.str` methods now support providing an array-like input. In this case, the array provided to the arguments is broadcast against the original array and applied elementwise. - :py:attr:`DataArray.str` now supports ``+``, ``*``, and ``%`` operators. These behave the same as they do for :py:class:`str`, except that they follow array broadcasting rules. - A large number of new :py:attr:`DataArray.str` methods were implemented, :py:meth:`DataArray.str.casefold`, :py:meth:`DataArray.str.cat`, :py:meth:`DataArray.str.extract`, :py:meth:`DataArray.str.extractall`, :py:meth:`DataArray.str.findall`, :py:meth:`DataArray.str.format`, :py:meth:`DataArray.str.get_dummies`, :py:meth:`DataArray.str.islower`, :py:meth:`DataArray.str.join`, :py:meth:`DataArray.str.normalize`, :py:meth:`DataArray.str.partition`, :py:meth:`DataArray.str.rpartition`, :py:meth:`DataArray.str.rsplit`, and :py:meth:`DataArray.str.split`. A number of these methods allow for splitting or joining the strings in an array. (:issue:`4622`) By `Todd Jennings `_ - Thanks to the new pluggable backend infrastructure external packages may now use the ``xarray.backends`` entry point to register additional engines to be used in :py:func:`open_dataset`, see the documentation in :ref:`add_a_backend` (:issue:`4309`, :issue:`4803`, :pull:`4989`, :pull:`4810` and many others). The backend refactor has been sponsored with the "Essential Open Source Software for Science" grant from the `Chan Zuckerberg Initiative `_ and developed by `B-Open `_. By `Aureliana Barghini `_ and `Alessandro Amici `_. - :py:attr:`~core.accessor_dt.DatetimeAccessor.date` added (:issue:`4983`, :pull:`4994`). By `Hauke Schulz `_. - Implement ``__getitem__`` for both :py:class:`~core.groupby.DatasetGroupBy` and :py:class:`~core.groupby.DataArrayGroupBy`, inspired by pandas' :py:meth:`~pandas.core.groupby.GroupBy.get_group`. By `Deepak Cherian `_. - Switch the tutorial functions to use `pooch `_ (which is now a optional dependency) and add :py:func:`tutorial.open_rasterio` as a way to open example rasterio files (:issue:`3986`, :pull:`4102`, :pull:`5074`). By `Justus Magin `_. - Add typing information to unary and binary arithmetic operators operating on :py:class:`Dataset`, :py:class:`DataArray`, :py:class:`Variable`, :py:class:`~core.groupby.DatasetGroupBy` or :py:class:`~core.groupby.DataArrayGroupBy` (:pull:`4904`). By `Richard Kleijn `_. - Add a ``combine_attrs`` parameter to :py:func:`open_mfdataset` (:pull:`4971`). By `Justus Magin `_. - Enable passing arrays with a subset of dimensions to :py:meth:`DataArray.clip` & :py:meth:`Dataset.clip`; these methods now use :py:func:`xarray.apply_ufunc`; (:pull:`5184`). By `Maximilian Roos `_. - Disable the ``cfgrib`` backend if the ``eccodes`` library is not installed (:pull:`5083`). By `Baudouin Raoult `_. - Added :py:meth:`DataArray.curvefit` and :py:meth:`Dataset.curvefit` for general curve fitting applications. (:issue:`4300`, :pull:`4849`) By `Sam Levang `_. - Add options to control expand/collapse of sections in display of Dataset and DataArray. The function :py:func:`set_options` now takes keyword arguments ``display_expand_attrs``, ``display_expand_coords``, ``display_expand_data``, ``display_expand_data_vars``, all of which can be one of ``True`` to always expand, ``False`` to always collapse, or ``default`` to expand unless over a pre-defined limit (:pull:`5126`). By `Tom White `_. - Significant speedups in :py:meth:`Dataset.interp` and :py:meth:`DataArray.interp`. (:issue:`4739`, :pull:`4740`). By `Deepak Cherian `_. - Prevent passing ``concat_dim`` to :py:func:`xarray.open_mfdataset` when ``combine='by_coords'`` is specified, which should never have been possible (as :py:func:`xarray.combine_by_coords` has no ``concat_dim`` argument to pass to). Also removes unneeded internal reordering of datasets in :py:func:`xarray.open_mfdataset` when ``combine='by_coords'`` is specified. Fixes (:issue:`5230`). By `Tom Nicholas `_. - Implement ``__setitem__`` for ``xarray.core.indexing.DaskIndexingAdapter`` if dask version supports item assignment. (:issue:`5171`, :pull:`5174`) By `Tammas Loughran `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed: ============ ====== ==== Package Old New ============ ====== ==== boto3 1.12 1.13 cftime 1.0 1.1 dask 2.11 2.15 distributed 2.11 2.15 matplotlib 3.1 3.2 numba 0.48 0.49 ============ ====== ==== - :py:func:`open_dataset` and :py:func:`open_dataarray` now accept only the first argument as positional, all others need to be passed are keyword arguments. This is part of the refactor to support external backends (:issue:`4309`, :pull:`4989`). By `Alessandro Amici `_. - Functions that are identities for 0d data return the unchanged data if axis is empty. This ensures that Datasets where some variables do not have the averaged dimensions are not accidentally changed (:issue:`4885`, :pull:`5207`). By `David Schwรถrer `_. - :py:attr:`DataArray.coarsen` and :py:attr:`Dataset.coarsen` no longer support passing ``keep_attrs`` via its constructor. Pass ``keep_attrs`` via the applied function, i.e. use ``ds.coarsen(...).mean(keep_attrs=False)`` instead of ``ds.coarsen(..., keep_attrs=False).mean()``. Further, coarsen now keeps attributes per default (:pull:`5227`). By `Mathias Hauser `_. - switch the default of the :py:func:`merge` ``combine_attrs`` parameter to ``"override"``. This will keep the current behavior for merging the ``attrs`` of variables but stop dropping the ``attrs`` of the main objects (:pull:`4902`). By `Justus Magin `_. Deprecations ~~~~~~~~~~~~ - Warn when passing ``concat_dim`` to :py:func:`xarray.open_mfdataset` when ``combine='by_coords'`` is specified, which should never have been possible (as :py:func:`xarray.combine_by_coords` has no ``concat_dim`` argument to pass to). Also removes unneeded internal reordering of datasets in :py:func:`xarray.open_mfdataset` when ``combine='by_coords'`` is specified. Fixes (:issue:`5230`), via (:pull:`5231`, :pull:`5255`). By `Tom Nicholas `_. - The ``lock`` keyword argument to :py:func:`open_dataset` and :py:func:`open_dataarray` is now a backend specific option. It will give a warning if passed to a backend that doesn't support it instead of being silently ignored. From the next version it will raise an error. This is part of the refactor to support external backends (:issue:`5073`). By `Tom Nicholas `_ and `Alessandro Amici `_. Bug fixes ~~~~~~~~~ - Properly support :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill`, :py:meth:`Dataset.bfill` along chunked dimensions. (:issue:`2699`). By `Deepak Cherian `_. - Fix 2d plot failure for certain combinations of dimensions when ``x`` is 1d and ``y`` is 2d (:issue:`5097`, :pull:`5099`). By `John Omotani `_. - Ensure standard calendar times encoded with large values (i.e. greater than approximately 292 years), can be decoded correctly without silently overflowing (:pull:`5050`). This was a regression in xarray 0.17.0. By `Zeb Nicholls `_. - Added support for ``numpy.bool_`` attributes in roundtrips using ``h5netcdf`` engine with ``invalid_netcdf=True`` [which casts ``bool`` s to ``numpy.bool_``] (:issue:`4981`, :pull:`4986`). By `Victor Negรฎrneac `_. - Don't allow passing ``axis`` to :py:meth:`Dataset.reduce` methods (:issue:`3510`, :pull:`4940`). By `Justus Magin `_. - Decode values as signed if attribute ``_Unsigned = "false"`` (:issue:`4954`) By `Tobias Kรถlling `_. - Keep coords attributes when interpolating when the indexer is not a Variable. (:issue:`4239`, :issue:`4839` :pull:`5031`) By `Jimmy Westling `_. - Ensure standard calendar dates encoded with a calendar attribute with some or all uppercase letters can be decoded or encoded to or from ``np.datetime64[ns]`` dates with or without ``cftime`` installed (:issue:`5093`, :pull:`5180`). By `Spencer Clark `_. - Warn on passing ``keep_attrs`` to ``resample`` and ``rolling_exp`` as they are ignored, pass ``keep_attrs`` to the applied function instead (:pull:`5265`). By `Mathias Hauser `_. Documentation ~~~~~~~~~~~~~ - New section on :ref:`add_a_backend` in the "Internals" chapter aimed to backend developers (:issue:`4803`, :pull:`4810`). By `Aureliana Barghini `_. - Add :py:meth:`Dataset.polyfit` and :py:meth:`DataArray.polyfit` under "See also" in the docstrings of :py:meth:`Dataset.polyfit` and :py:meth:`DataArray.polyfit` (:issue:`5016`, :pull:`5020`). By `Aaron Spring `_. - New sphinx theme & rearrangement of the docs (:pull:`4835`). By `Anderson Banihirwe `_. Internal Changes ~~~~~~~~~~~~~~~~ - Enable displaying mypy error codes and ignore only specific error codes using ``# type: ignore[error-code]`` (:pull:`5096`). By `Mathias Hauser `_. - Replace uses of ``raises_regex`` with the more standard ``pytest.raises(Exception, match="foo")``; (:pull:`5188`), (:pull:`5191`). By `Maximilian Roos `_. .. _whats-new.0.17.0: v0.17.0 (24 Feb 2021) --------------------- This release brings a few important performance improvements, a wide range of usability upgrades, lots of bug fixes, and some new features. These include better ``cftime`` support, a new quiver plot, better ``unstack`` performance, more efficient memory use in rolling operations, and some python packaging improvements. We also have a few documentation improvements (and more planned!). Many thanks to the 36 contributors to this release: Alessandro Amici, Anderson Banihirwe, Aureliana Barghini, Ayrton Bourn, Benjamin Bean, Blair Bonnett, Chun Ho Chow, DWesl, Daniel Mesejo-Leรณn, Deepak Cherian, Eric Keenan, Illviljan, Jens Hedegaard Nielsen, Jody Klymak, Julien Seguinot, Julius Busecke, Kai Mรผhlbauer, Leif Denby, Martin Durant, Mathias Hauser, Maximilian Roos, Michael Mann, Ray Bell, RichardScottOZ, Spencer Clark, Tim Gates, Tom Nicholas, Yunus Sevinchan, alexamici, aurghs, crusaderky, dcherian, ghislainp, keewis, rhkleijn Breaking changes ~~~~~~~~~~~~~~~~ - xarray no longer supports python 3.6 The minimum version policy was changed to also apply to projects with irregular releases. As a result, the minimum versions of some dependencies have changed: ============ ====== ==== Package Old New ============ ====== ==== Python 3.6 3.7 setuptools 38.4 40.4 numpy 1.15 1.17 pandas 0.25 1.0 dask 2.9 2.11 distributed 2.9 2.11 bottleneck 1.2 1.3 h5netcdf 0.7 0.8 iris 2.2 2.4 netcdf4 1.4 1.5 pseudonetcdf 3.0 3.1 rasterio 1.0 1.1 scipy 1.3 1.4 seaborn 0.9 0.10 zarr 2.3 2.4 ============ ====== ==== (:issue:`4688`, :pull:`4720`, :pull:`4907`, :pull:`4942`) - As a result of :pull:`4684` the default units encoding for datetime-like values (``np.datetime64[ns]`` or ``cftime.datetime``) will now always be set such that ``int64`` values can be used. In the past, no units finer than "seconds" were chosen, which would sometimes mean that ``float64`` values were required, which would lead to inaccurate I/O round-trips. - Variables referred to in attributes like ``bounds`` and ``grid_mapping`` can be set as coordinate variables. These attributes are moved to :py:attr:`DataArray.encoding` from :py:attr:`DataArray.attrs`. This behaviour is controlled by the ``decode_coords`` kwarg to :py:func:`open_dataset` and :py:func:`open_mfdataset`. The full list of decoded attributes is in :ref:`weather-climate` (:pull:`2844`, :issue:`3689`) - As a result of :pull:`4911` the output from calling :py:meth:`DataArray.sum` or :py:meth:`DataArray.prod` on an integer array with ``skipna=True`` and a non-None value for ``min_count`` will now be a float array rather than an integer array. Deprecations ~~~~~~~~~~~~ - ``dim`` argument to :py:meth:`DataArray.integrate` is being deprecated in favour of a ``coord`` argument, for consistency with :py:meth:`Dataset.integrate`. For now using ``dim`` issues a ``FutureWarning``. It will be removed in version 0.19.0 (:pull:`3993`). By `Tom Nicholas `_. - Deprecated ``autoclose`` kwargs from :py:func:`open_dataset` are removed (:pull:`4725`). By `Aureliana Barghini `_. - the return value of :py:meth:`Dataset.update` is being deprecated to make it work more like :py:meth:`dict.update`. It will be removed in version 0.19.0 (:pull:`4932`). By `Justus Magin `_. New Features ~~~~~~~~~~~~ - :py:meth:`~xarray.cftime_range` and :py:meth:`DataArray.resample` now support millisecond (``"L"`` or ``"ms"``) and microsecond (``"U"`` or ``"us"``) frequencies for ``cftime.datetime`` coordinates (:issue:`4097`, :pull:`4758`). By `Spencer Clark `_. - Significantly higher ``unstack`` performance on numpy-backed arrays which contain missing values; 8x faster than previous versions in our benchmark, and now 2x faster than pandas (:pull:`4746`). By `Maximilian Roos `_. - Add :py:meth:`Dataset.plot.quiver` for quiver plots with :py:class:`Dataset` variables. By `Deepak Cherian `_. - Add ``"drop_conflicts"`` to the strategies supported by the ``combine_attrs`` kwarg (:issue:`4749`, :pull:`4827`). By `Justus Magin `_. - Allow installing from git archives (:pull:`4897`). By `Justus Magin `_. - :py:class:`~computation.rolling.DataArrayCoarsen` and :py:class:`~computation.rolling.DatasetCoarsen` now implement a ``reduce`` method, enabling coarsening operations with custom reduction functions (:issue:`3741`, :pull:`4939`). By `Spencer Clark `_. - Most rolling operations use significantly less memory. (:issue:`4325`). By `Deepak Cherian `_. - Add :py:meth:`Dataset.drop_isel` and :py:meth:`DataArray.drop_isel` (:issue:`4658`, :pull:`4819`). By `Daniel Mesejo `_. - Xarray now leverages updates as of cftime version 1.4.1, which enable exact I/O roundtripping of ``cftime.datetime`` objects (:pull:`4758`). By `Spencer Clark `_. - :py:func:`open_dataset` and :py:func:`open_mfdataset` now accept ``fsspec`` URLs (including globs for the latter) for ``engine="zarr"``, and so allow reading from many remote and other file systems (:pull:`4461`) By `Martin Durant `_ - :py:meth:`DataArray.swap_dims` & :py:meth:`Dataset.swap_dims` now accept dims in the form of kwargs as well as a dict, like most similar methods. By `Maximilian Roos `_. Bug fixes ~~~~~~~~~ - Use specific type checks in ``xarray.core.variable.as_compatible_data`` instead of blanket access to ``values`` attribute (:issue:`2097`) By `Yunus Sevinchan `_. - :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample` do not trigger computations anymore if :py:meth:`Dataset.weighted` or :py:meth:`DataArray.weighted` are applied (:issue:`4625`, :pull:`4668`). By `Julius Busecke `_. - :py:func:`merge` with ``combine_attrs='override'`` makes a copy of the attrs (:issue:`4627`). - By default, when possible, xarray will now always use values of type ``int64`` when encoding and decoding ``numpy.datetime64[ns]`` datetimes. This ensures that maximum precision and accuracy are maintained in the round-tripping process (:issue:`4045`, :pull:`4684`). It also enables encoding and decoding standard calendar dates with time units of nanoseconds (:pull:`4400`). By `Spencer Clark `_ and `Mark Harfouche `_. - :py:meth:`DataArray.astype`, :py:meth:`Dataset.astype` and :py:meth:`Variable.astype` support the ``order`` and ``subok`` parameters again. This fixes a regression introduced in version 0.16.1 (:issue:`4644`, :pull:`4683`). By `Richard Kleijn `_ . - Remove dictionary unpacking when using ``.loc`` to avoid collision with ``.sel`` parameters (:pull:`4695`). By `Anderson Banihirwe `_. - Fix the legend created by :py:meth:`Dataset.plot.scatter` (:issue:`4641`, :pull:`4723`). By `Justus Magin `_. - Fix a crash in orthogonal indexing on geographic coordinates with ``engine='cfgrib'`` (:issue:`4733` :pull:`4737`). By `Alessandro Amici `_. - Coordinates with dtype ``str`` or ``bytes`` now retain their dtype on many operations, e.g. ``reindex``, ``align``, ``concat``, ``assign``, previously they were cast to an object dtype (:issue:`2658` and :issue:`4543`). By `Mathias Hauser `_. - Limit number of data rows when printing large datasets. (:issue:`4736`, :pull:`4750`). By `Jimmy Westling `_. - Add ``missing_dims`` parameter to transpose (:issue:`4647`, :pull:`4767`). By `Daniel Mesejo `_. - Resolve intervals before appending other metadata to labels when plotting (:issue:`4322`, :pull:`4794`). By `Justus Magin `_. - Fix regression when decoding a variable with a ``scale_factor`` and ``add_offset`` given as a list of length one (:issue:`4631`). By `Mathias Hauser `_. - Expand user directory paths (e.g. ``~/``) in :py:func:`open_mfdataset` and :py:meth:`Dataset.to_zarr` (:issue:`4783`, :pull:`4795`). By `Julien Seguinot `_. - Raise DeprecationWarning when trying to typecast a tuple containing a :py:class:`DataArray`. User now prompted to first call ``.data`` on it (:issue:`4483`). By `Chun Ho Chow `_. - Ensure that :py:meth:`Dataset.interp` raises ``ValueError`` when interpolating outside coordinate range and ``bounds_error=True`` (:issue:`4854`, :pull:`4855`). By `Leif Denby `_. - Fix time encoding bug associated with using cftime versions greater than 1.4.0 with xarray (:issue:`4870`, :pull:`4871`). By `Spencer Clark `_. - Stop :py:meth:`DataArray.sum` and :py:meth:`DataArray.prod` computing lazy arrays when called with a ``min_count`` parameter (:issue:`4898`, :pull:`4911`). By `Blair Bonnett `_. - Fix bug preventing the ``min_count`` parameter to :py:meth:`DataArray.sum` and :py:meth:`DataArray.prod` working correctly when calculating over all axes of a float64 array (:issue:`4898`, :pull:`4911`). By `Blair Bonnett `_. - Fix decoding of vlen strings using h5py versions greater than 3.0.0 with h5netcdf backend (:issue:`4570`, :pull:`4893`). By `Kai Mรผhlbauer `_. - Allow converting :py:class:`Dataset` or :py:class:`DataArray` objects with a ``MultiIndex`` and at least one other dimension to a ``pandas`` object (:issue:`3008`, :pull:`4442`). By `ghislainp `_. Documentation ~~~~~~~~~~~~~ - Add information about requirements for accessor classes (:issue:`2788`, :pull:`4657`). By `Justus Magin `_. - Start a list of external I/O integrating with ``xarray`` (:issue:`683`, :pull:`4566`). By `Justus Magin `_. - Add concat examples and improve combining documentation (:issue:`4620`, :pull:`4645`). By `Ray Bell `_ and `Justus Magin `_. - explicitly mention that :py:meth:`Dataset.update` updates inplace (:issue:`2951`, :pull:`4932`). By `Justus Magin `_. - Added docs on vectorized indexing (:pull:`4711`). By `Eric Keenan `_. Internal Changes ~~~~~~~~~~~~~~~~ - Speed up of the continuous integration tests on azure. - Switched to mamba and use matplotlib-base for a faster installation of all dependencies (:pull:`4672`). - Use ``pytest.mark.skip`` instead of ``pytest.mark.xfail`` for some tests that can currently not succeed (:pull:`4685`). - Run the tests in parallel using pytest-xdist (:pull:`4694`). By `Justus Magin `_ and `Mathias Hauser `_. - Use ``pyproject.toml`` instead of the ``setup_requires`` option for ``setuptools`` (:pull:`4897`). By `Justus Magin `_. - Replace all usages of ``assert x.identical(y)`` with ``assert_identical(x, y)`` for clearer error messages (:pull:`4752`). By `Maximilian Roos `_. - Speed up attribute style access (e.g. ``ds.somevar`` instead of ``ds["somevar"]``) and tab completion in IPython (:issue:`4741`, :pull:`4742`). By `Richard Kleijn `_. - Added the ``set_close`` method to ``Dataset`` and ``DataArray`` for backends to specify how to voluntary release all resources. (:pull:`#4809`) By `Alessandro Amici `_. - Update type hints to work with numpy v1.20 (:pull:`4878`). By `Mathias Hauser `_. - Ensure warnings cannot be turned into exceptions in :py:func:`testing.assert_equal` and the other ``assert_*`` functions (:pull:`4864`). By `Mathias Hauser `_. - Performance improvement when constructing DataArrays. Significantly speeds up repr for Datasets with large number of variables. By `Deepak Cherian `_. .. _whats-new.0.16.2: v0.16.2 (30 Nov 2020) --------------------- This release brings the ability to write to limited regions of ``zarr`` files, open zarr files with :py:func:`open_dataset` and :py:func:`open_mfdataset`, increased support for propagating ``attrs`` using the ``keep_attrs`` flag, as well as numerous bugfixes and documentation improvements. Many thanks to the 31 contributors who contributed to this release: Aaron Spring, Akio Taniguchi, Aleksandar Jelenak, alexamici, Alexandre Poux, Anderson Banihirwe, Andrew Pauling, Ashwin Vishnu, aurghs, Brian Ward, Caleb, crusaderky, Dan Nowacki, darikg, David Brochart, David Huard, Deepak Cherian, Dion Hรคfner, Gerardo Rivera, Gerrit Holl, Illviljan, inakleinbottle, Jacob Tomlinson, James A. Bednar, jenssss, Joe Hamman, johnomotani, Joris Van den Bossche, Julia Kent, Julius Busecke, Kai Mรผhlbauer, keewis, Keisuke Fujii, Kyle Cranmer, Luke Volpatti, Mathias Hauser, Maximilian Roos, Michaรซl Defferrard, Michal Baumgartner, Nick R. Papior, Pascal Bourgault, Peter Hausamann, PGijsbers, Ray Bell, Romain Martinez, rpgoldman, Russell Manser, Sahid Velji, Samnan Rahee, Sander, Spencer Clark, Stephan Hoyer, Thomas Zilio, Tobias Kรถlling, Tom Augspurger, Wei Ji, Yash Saboo, Zeb Nicholls, Deprecations ~~~~~~~~~~~~ - :py:attr:`~core.accessor_dt.DatetimeAccessor.weekofyear` and :py:attr:`~core.accessor_dt.DatetimeAccessor.week` have been deprecated. Use ``DataArray.dt.isocalendar().week`` instead (:pull:`4534`). By `Mathias Hauser `_. `Maximilian Roos `_, and `Spencer Clark `_. - :py:attr:`DataArray.rolling` and :py:attr:`Dataset.rolling` no longer support passing ``keep_attrs`` via its constructor. Pass ``keep_attrs`` via the applied function, i.e. use ``ds.rolling(...).mean(keep_attrs=False)`` instead of ``ds.rolling(..., keep_attrs=False).mean()`` Rolling operations now keep their attributes per default (:pull:`4510`). By `Mathias Hauser `_. New Features ~~~~~~~~~~~~ - :py:func:`open_dataset` and :py:func:`open_mfdataset` now works with ``engine="zarr"`` (:issue:`3668`, :pull:`4003`, :pull:`4187`). By `Miguel Jimenez `_ and `Wei Ji Leong `_. - Unary & binary operations follow the ``keep_attrs`` flag (:issue:`3490`, :issue:`4065`, :issue:`3433`, :issue:`3595`, :pull:`4195`). By `Deepak Cherian `_. - Added :py:meth:`~core.accessor_dt.DatetimeAccessor.isocalendar()` that returns a Dataset with year, week, and weekday calculated according to the ISO 8601 calendar. Requires pandas version 1.1.0 or greater (:pull:`4534`). By `Mathias Hauser `_, `Maximilian Roos `_, and `Spencer Clark `_. - :py:meth:`Dataset.to_zarr` now supports a ``region`` keyword for writing to limited regions of existing Zarr stores (:pull:`4035`). See :ref:`io.zarr.appending` for full details. By `Stephan Hoyer `_. - Added typehints in :py:func:`align` to reflect that the same type received in ``objects`` arg will be returned (:pull:`4522`). By `Michal Baumgartner `_. - :py:meth:`Dataset.weighted` and :py:meth:`DataArray.weighted` are now executing value checks lazily if weights are provided as dask arrays (:issue:`4541`, :pull:`4559`). By `Julius Busecke `_. - Added the ``keep_attrs`` keyword to ``rolling_exp.mean()``; it now keeps attributes per default. By `Mathias Hauser `_ (:pull:`4592`). - Added ``freq`` as property to :py:class:`CFTimeIndex` and into the ``CFTimeIndex.repr``. (:issue:`2416`, :pull:`4597`) By `Aaron Spring `_. Bug fixes ~~~~~~~~~ - Fix bug where reference times without padded years (e.g. ``since 1-1-1``) would lose their units when being passed by ``encode_cf_datetime`` (:issue:`4422`, :pull:`4506`). Such units are ambiguous about which digit represents the years (is it YMD or DMY?). Now, if such formatting is encountered, it is assumed that the first digit is the years, they are padded appropriately (to e.g. ``since 0001-1-1``) and a warning that this assumption is being made is issued. Previously, without ``cftime``, such times would be silently parsed incorrectly (at least based on the CF conventions) e.g. "since 1-1-1" would be parsed (via ``pandas`` and ``dateutil``) to ``since 2001-1-1``. By `Zeb Nicholls `_. - Fix :py:meth:`DataArray.plot.step`. By `Deepak Cherian `_. - Fix bug where reading a scalar value from a NetCDF file opened with the ``h5netcdf`` backend would raise a ``ValueError`` when ``decode_cf=True`` (:issue:`4471`, :pull:`4485`). By `Gerrit Holl `_. - Fix bug where datetime64 times are silently changed to incorrect values if they are outside the valid date range for ns precision when provided in some other units (:issue:`4427`, :pull:`4454`). By `Andrew Pauling `_ - Fix silently overwriting the ``engine`` key when passing :py:func:`open_dataset` a file object to an incompatible netCDF (:issue:`4457`). Now incompatible combinations of files and engines raise an exception instead. By `Alessandro Amici `_. - The ``min_count`` argument to :py:meth:`DataArray.sum()` and :py:meth:`DataArray.prod()` is now ignored when not applicable, i.e. when ``skipna=False`` or when ``skipna=None`` and the dtype does not have a missing value (:issue:`4352`). By `Mathias Hauser `_. - :py:func:`combine_by_coords` now raises an informative error when passing coordinates with differing calendars (:issue:`4495`). By `Mathias Hauser `_. - :py:attr:`DataArray.rolling` and :py:attr:`Dataset.rolling` now also keep the attributes and names of of (wrapped) ``DataArray`` objects, previously only the global attributes were retained (:issue:`4497`, :pull:`4510`). By `Mathias Hauser `_. - Improve performance where reading small slices from huge dimensions was slower than necessary (:pull:`4560`). By `Dion Hรคfner `_. - Fix bug where ``dask_gufunc_kwargs`` was silently changed in :py:func:`apply_ufunc` (:pull:`4576`). By `Kai Mรผhlbauer `_. Documentation ~~~~~~~~~~~~~ - document the API not supported with duck arrays (:pull:`4530`). By `Justus Magin `_. - Mention the possibility to pass functions to :py:meth:`Dataset.where` or :py:meth:`DataArray.where` in the parameter documentation (:issue:`4223`, :pull:`4613`). By `Justus Magin `_. - Update the docstring of :py:class:`DataArray` and :py:class:`Dataset`. (:pull:`4532`); By `Jimmy Westling `_. - Raise a more informative error when :py:meth:`DataArray.to_dataframe` is is called on a scalar, (:issue:`4228`); By `Pieter Gijsbers `_. - Fix grammar and typos in the :ref:`contributing` guide (:pull:`4545`). By `Sahid Velji `_. - Fix grammar and typos in the :doc:`user-guide/io` guide (:pull:`4553`). By `Sahid Velji `_. - Update link to NumPy docstring standard in the :ref:`contributing` guide (:pull:`4558`). By `Sahid Velji `_. - Add docstrings to ``isnull`` and ``notnull``, and fix the displayed signature (:issue:`2760`, :pull:`4618`). By `Justus Magin `_. Internal Changes ~~~~~~~~~~~~~~~~ - Optional dependencies can be installed along with xarray by specifying extras as ``pip install "xarray[extra]"`` where ``extra`` can be one of ``io``, ``accel``, ``parallel``, ``viz`` and ``complete``. See docs for updated :ref:`installation instructions `. (:issue:`2888`, :pull:`4480`). By `Ashwin Vishnu `_, `Justus Magin `_ and `Mathias Hauser `_. - Removed stray spaces that stem from black removing new lines (:pull:`4504`). By `Mathias Hauser `_. - Ensure tests are not skipped in the ``py38-all-but-dask`` test environment (:issue:`4509`). By `Mathias Hauser `_. - Ignore select numpy warnings around missing values, where xarray handles the values appropriately, (:pull:`4536`); By `Maximilian Roos `_. - Replace the internal use of ``pd.Index.__or__`` and ``pd.Index.__and__`` with ``pd.Index.union`` and ``pd.Index.intersection`` as they will stop working as set operations in the future (:issue:`4565`). By `Mathias Hauser `_. - Add GitHub action for running nightly tests against upstream dependencies (:pull:`4583`). By `Anderson Banihirwe `_. - Ensure all figures are closed properly in plot tests (:pull:`4600`). By `Yash Saboo `_, `Nirupam K N `_ and `Mathias Hauser `_. .. _whats-new.0.16.1: v0.16.1 (2020-09-20) --------------------- This patch release fixes an incompatibility with a recent pandas change, which was causing an issue indexing with a ``datetime64``. It also includes improvements to ``rolling``, ``to_dataframe``, ``cov`` & ``corr`` methods and bug fixes. Our documentation has a number of improvements, including fixing all doctests and confirming their accuracy on every commit. Many thanks to the 36 contributors who contributed to this release: Aaron Spring, Akio Taniguchi, Aleksandar Jelenak, Alexandre Poux, Caleb, Dan Nowacki, Deepak Cherian, Gerardo Rivera, Jacob Tomlinson, James A. Bednar, Joe Hamman, Julia Kent, Kai Mรผhlbauer, Keisuke Fujii, Mathias Hauser, Maximilian Roos, Nick R. Papior, Pascal Bourgault, Peter Hausamann, Romain Martinez, Russell Manser, Samnan Rahee, Sander, Spencer Clark, Stephan Hoyer, Thomas Zilio, Tobias Kรถlling, Tom Augspurger, alexamici, crusaderky, darikg, inakleinbottle, jenssss, johnomotani, keewis, and rpgoldman. Breaking changes ~~~~~~~~~~~~~~~~ - :py:meth:`DataArray.astype` and :py:meth:`Dataset.astype` now preserve attributes. Keep the old behavior by passing ``keep_attrs=False`` (:issue:`2049`, :pull:`4314`). By `Dan Nowacki `_ and `Gabriel Joel Mitchell `_. New Features ~~~~~~~~~~~~ - :py:meth:`~xarray.DataArray.rolling` and :py:meth:`~xarray.Dataset.rolling` now accept more than 1 dimension. (:pull:`4219`) By `Keisuke Fujii `_. - :py:meth:`~xarray.DataArray.to_dataframe` and :py:meth:`~xarray.Dataset.to_dataframe` now accept a ``dim_order`` parameter allowing to specify the resulting dataframe's dimensions order (:issue:`4331`, :pull:`4333`). By `Thomas Zilio `_. - Support multiple outputs in :py:func:`xarray.apply_ufunc` when using ``dask='parallelized'``. (:issue:`1815`, :pull:`4060`). By `Kai Mรผhlbauer `_. - ``min_count`` can be supplied to reductions such as ``.sum`` when specifying multiple dimension to reduce over; (:pull:`4356`). By `Maximilian Roos `_. - :py:func:`xarray.cov` and :py:func:`xarray.corr` now handle missing values; (:pull:`4351`). By `Maximilian Roos `_. - Add support for parsing datetime strings formatted following the default string representation of cftime objects, i.e. YYYY-MM-DD hh:mm:ss, in partial datetime string indexing, as well as :py:meth:`~xarray.cftime_range` (:issue:`4337`). By `Spencer Clark `_. - Build ``CFTimeIndex.__repr__`` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new property for :py:class:`CFTimeIndex` and show ``calendar`` and ``length`` in ``CFTimeIndex.__repr__`` (:issue:`2416`, :pull:`4092`) By `Aaron Spring `_. - Use a wrapped array's ``_repr_inline_`` method to construct the collapsed ``repr`` of :py:class:`DataArray` and :py:class:`Dataset` objects and document the new method in :doc:`internals/index`. (:pull:`4248`). By `Justus Magin `_. - Allow per-variable fill values in most functions. (:pull:`4237`). By `Justus Magin `_. - Expose ``use_cftime`` option in :py:func:`~xarray.open_zarr` (:issue:`2886`, :pull:`3229`) By `Samnan Rahee `_ and `Anderson Banihirwe `_. Bug fixes ~~~~~~~~~ - Fix indexing with datetime64 scalars with pandas 1.1 (:issue:`4283`). By `Stephan Hoyer `_ and `Justus Magin `_. - Variables which are chunked using dask only along some dimensions can be chunked while storing with zarr along previously unchunked dimensions (:pull:`4312`) By `Tobias Kรถlling `_. - Fixed a bug in backend caused by basic installation of Dask (:issue:`4164`, :pull:`4318`) `Sam Morley `_. - Fixed a few bugs with :py:meth:`Dataset.polyfit` when encountering deficient matrix ranks (:issue:`4190`, :pull:`4193`). By `Pascal Bourgault `_. - Fixed inconsistencies between docstring and functionality for :py:meth:`DataArray.str.get` and :py:meth:`DataArray.str.wrap` (:issue:`4334`). By `Mathias Hauser `_. - Fixed overflow issue causing incorrect results in computing means of :py:class:`cftime.datetime` arrays (:issue:`4341`). By `Spencer Clark `_. - Fixed :py:meth:`Dataset.coarsen`, :py:meth:`DataArray.coarsen` dropping attributes on original object (:issue:`4120`, :pull:`4360`). By `Julia Kent `_. - fix the signature of the plot methods. (:pull:`4359`) By `Justus Magin `_. - Fix :py:func:`xarray.apply_ufunc` with ``vectorize=True`` and ``exclude_dims`` (:issue:`3890`). By `Mathias Hauser `_. - Fix ``KeyError`` when doing linear interpolation to an nd ``DataArray`` that contains NaNs (:pull:`4233`). By `Jens Svensmark `_ - Fix incorrect legend labels for :py:meth:`Dataset.plot.scatter` (:issue:`4126`). By `Peter Hausamann `_. - Fix ``dask.optimize`` on ``DataArray`` producing an invalid Dask task graph (:issue:`3698`) By `Tom Augspurger `_ - Fix ``pip install .`` when no ``.git`` directory exists; namely when the xarray source directory has been rsync'ed by PyCharm Professional for a remote deployment over SSH. By `Guido Imperiale `_ - Preserve dimension and coordinate order during :py:func:`xarray.concat` (:issue:`2811`, :issue:`4072`, :pull:`4419`). By `Kai Mรผhlbauer `_. - Avoid relying on :py:class:`set` objects for the ordering of the coordinates (:pull:`4409`) By `Justus Magin `_. Documentation ~~~~~~~~~~~~~ - Update the docstring of :py:meth:`DataArray.copy` to remove incorrect mention of 'dataset' (:issue:`3606`) By `Sander van Rijn `_. - Removed skipna argument from :py:meth:`DataArray.count`, :py:meth:`DataArray.any`, :py:meth:`DataArray.all`. (:issue:`755`) By `Sander van Rijn `_ - Update the contributing guide to use merges instead of rebasing and state that we squash-merge. (:pull:`4355`). By `Justus Magin `_. - Make sure the examples from the docstrings actually work (:pull:`4408`). By `Justus Magin `_. - Updated Vectorized Indexing to a clearer example. By `Maximilian Roos `_ Internal Changes ~~~~~~~~~~~~~~~~ - Fixed all doctests and enabled their running in CI. By `Justus Magin `_. - Relaxed the :ref:`mindeps_policy` to support: - all versions of setuptools released in the last 42 months (but no older than 38.4) - all versions of dask and dask.distributed released in the last 12 months (but no older than 2.9) - all versions of other packages released in the last 12 months All are up from 6 months (:issue:`4295`) `Guido Imperiale `_. - Use :py:func:`dask.array.apply_gufunc ` instead of :py:func:`dask.array.blockwise` in :py:func:`xarray.apply_ufunc` when using ``dask='parallelized'``. (:pull:`4060`, :pull:`4391`, :pull:`4392`) By `Kai Mรผhlbauer `_. - Align ``mypy`` versions to ``0.782`` across ``requirements`` and ``.pre-commit-config.yml`` files. (:pull:`4390`) By `Maximilian Roos `_ - Only load resource files when running inside a Jupyter Notebook (:issue:`4294`) By `Guido Imperiale `_ - Silenced most ``numpy`` warnings such as ``Mean of empty slice``. (:pull:`4369`) By `Maximilian Roos `_ - Enable type checking for :py:func:`concat` (:issue:`4238`) By `Mathias Hauser `_. - Updated plot functions for matplotlib version 3.3 and silenced warnings in the plot tests (:pull:`4365`). By `Mathias Hauser `_. - Versions in ``pre-commit.yaml`` are now pinned, to reduce the chances of conflicting versions. (:pull:`4388`) By `Maximilian Roos `_ .. _whats-new.0.16.0: v0.16.0 (2020-07-11) --------------------- This release adds ``xarray.cov`` & ``xarray.corr`` for covariance & correlation respectively; the ``idxmax`` & ``idxmin`` methods, the ``polyfit`` method & ``xarray.polyval`` for fitting polynomials, as well as a number of documentation improvements, other features, and bug fixes. Many thanks to all 44 contributors who contributed to this release: Akio Taniguchi, Andrew Williams, Aurรฉlien Ponte, Benoit Bovy, Dave Cole, David Brochart, Deepak Cherian, Elliott Sales de Andrade, Etienne Combrisson, Hossein Madadi, Huite, Joe Hamman, Kai Mรผhlbauer, Keisuke Fujii, Maik Riechert, Marek Jacob, Mathias Hauser, Matthieu Ancellin, Maximilian Roos, Noah D Brenowitz, Oriol Abril, Pascal Bourgault, Phillip Butcher, Prajjwal Nijhara, Ray Bell, Ryan Abernathey, Ryan May, Spencer Clark, Spencer Hill, Srijan Saurav, Stephan Hoyer, Taher Chegini, Todd, Tom Nicholas, Yohai Bar Sinai, Yunus Sevinchan, arabidopsis, aurghs, clausmichele, dmey, johnomotani, keewis, raphael dussin, risebell Breaking changes ~~~~~~~~~~~~~~~~ - Minimum supported versions for the following packages have changed: ``dask >=2.9``, ``distributed>=2.9``. By `Deepak Cherian `_ - ``groupby`` operations will restore coord dimension order. Pass ``restore_coord_dims=False`` to revert to previous behavior. - :meth:`DataArray.transpose` will now transpose coordinates by default. Pass ``transpose_coords=False`` to revert to previous behaviour. By `Maximilian Roos `_ - Alternate draw styles for :py:meth:`plot.step` must be passed using the ``drawstyle`` (or ``ds``) keyword argument, instead of the ``linestyle`` (or ``ls``) keyword argument, in line with the `upstream change in Matplotlib `_. (:pull:`3274`) By `Elliott Sales de Andrade `_ - The old ``auto_combine`` function has now been removed in favour of the :py:func:`combine_by_coords` and :py:func:`combine_nested` functions. This also means that the default behaviour of :py:func:`open_mfdataset` has changed to use ``combine='by_coords'`` as the default argument value. (:issue:`2616`, :pull:`3926`) By `Tom Nicholas `_. - The ``DataArray`` and ``Variable`` HTML reprs now expand the data section by default (:issue:`4176`) By `Stephan Hoyer `_. New Features ~~~~~~~~~~~~ - :py:meth:`DataArray.argmin` and :py:meth:`DataArray.argmax` now support sequences of 'dim' arguments, and if a sequence is passed return a dict (which can be passed to :py:meth:`DataArray.isel` to get the value of the minimum) of the indices for each dimension of the minimum or maximum of a DataArray. (:pull:`3936`) By `John Omotani `_, thanks to `Keisuke Fujii `_ for work in :pull:`1469`. - Added :py:func:`xarray.cov` and :py:func:`xarray.corr` (:issue:`3784`, :pull:`3550`, :pull:`4089`). By `Andrew Williams `_ and `Robin Beer `_. - Implement :py:meth:`DataArray.idxmax`, :py:meth:`DataArray.idxmin`, :py:meth:`Dataset.idxmax`, :py:meth:`Dataset.idxmin`. (:issue:`60`, :pull:`3871`) By `Todd Jennings `_ - Added :py:meth:`DataArray.polyfit` and :py:func:`xarray.polyval` for fitting polynomials. (:issue:`3349`, :pull:`3733`, :pull:`4099`) By `Pascal Bourgault `_. - Added :py:meth:`xarray.infer_freq` for extending frequency inferring to CFTime indexes and data (:pull:`4033`). By `Pascal Bourgault `_. - ``chunks='auto'`` is now supported in the ``chunks`` argument of :py:meth:`Dataset.chunk`. (:issue:`4055`) By `Andrew Williams `_ - Control over attributes of result in :py:func:`merge`, :py:func:`concat`, :py:func:`combine_by_coords` and :py:func:`combine_nested` using combine_attrs keyword argument. (:issue:`3865`, :pull:`3877`) By `John Omotani `_ - ``missing_dims`` argument to :py:meth:`Dataset.isel`, :py:meth:`DataArray.isel` and :py:meth:`Variable.isel` to allow replacing the exception when a dimension passed to ``isel`` is not present with a warning, or just ignore the dimension. (:issue:`3866`, :pull:`3923`) By `John Omotani `_ - Support dask handling for :py:meth:`DataArray.idxmax`, :py:meth:`DataArray.idxmin`, :py:meth:`Dataset.idxmax`, :py:meth:`Dataset.idxmin`. (:pull:`3922`, :pull:`4135`) By `Kai Mรผhlbauer `_ and `Pascal Bourgault `_. - More support for unit aware arrays with pint (:pull:`3643`, :pull:`3975`, :pull:`4163`) By `Justus Magin `_. - Support overriding existing variables in ``to_zarr()`` with ``mode='a'`` even without ``append_dim``, as long as dimension sizes do not change. By `Stephan Hoyer `_. - Allow plotting of boolean arrays. (:pull:`3766`) By `Marek Jacob `_ - Enable using MultiIndex levels as coordinates in 1D and 2D plots (:issue:`3927`). By `Mathias Hauser `_. - A ``days_in_month`` accessor for :py:class:`xarray.CFTimeIndex`, analogous to the ``days_in_month`` accessor for a :py:class:`pandas.DatetimeIndex`, which returns the days in the month each datetime in the index. Now days in month weights for both standard and non-standard calendars can be obtained using the :py:class:`~core.accessor_dt.DatetimeAccessor` (:pull:`3935`). This feature requires cftime version 1.1.0 or greater. By `Spencer Clark `_. - For the netCDF3 backend, added dtype coercions for unsigned integer types. (:issue:`4014`, :pull:`4018`) By `Yunus Sevinchan `_ - :py:meth:`map_blocks` now accepts a ``template`` kwarg. This allows use cases where the result of a computation could not be inferred automatically. By `Deepak Cherian `_ - :py:meth:`map_blocks` can now handle dask-backed xarray objects in ``args``. (:pull:`3818`) By `Deepak Cherian `_ - Add keyword ``decode_timedelta`` to :py:func:`xarray.open_dataset`, (:py:func:`xarray.open_dataarray`, :py:func:`xarray.open_dataarray`, :py:func:`xarray.decode_cf`) that allows to disable/enable the decoding of timedeltas independently of time decoding (:issue:`1621`) `Aureliana Barghini `_ Enhancements ~~~~~~~~~~~~ - Performance improvement of :py:meth:`DataArray.interp` and :py:func:`Dataset.interp` We performs independent interpolation sequentially rather than interpolating in one large multidimensional space. (:issue:`2223`) By `Keisuke Fujii `_. - :py:meth:`DataArray.interp` now support interpolations over chunked dimensions (:pull:`4155`). By `Alexandre Poux `_. - Major performance improvement for :py:meth:`Dataset.from_dataframe` when the dataframe has a MultiIndex (:pull:`4184`). By `Stephan Hoyer `_. - :py:meth:`DataArray.reset_index` and :py:meth:`Dataset.reset_index` now keep coordinate attributes (:pull:`4103`). By `Oriol Abril `_. - Axes kwargs such as ``facecolor`` can now be passed to :py:meth:`DataArray.plot` in ``subplot_kws``. This works for both single axes plots and FacetGrid plots. By `Raphael Dussin `_. - Array items with long string reprs are now limited to a reasonable width (:pull:`3900`) By `Maximilian Roos `_ - Large arrays whose numpy reprs would have greater than 40 lines are now limited to a reasonable length. (:pull:`3905`) By `Maximilian Roos `_ Bug fixes ~~~~~~~~~ - Fix errors combining attrs in :py:func:`open_mfdataset` (:issue:`4009`, :pull:`4173`) By `John Omotani `_ - If groupby receives a ``DataArray`` with name=None, assign a default name (:issue:`158`) By `Phil Butcher `_. - Support dark mode in VS code (:issue:`4024`) By `Keisuke Fujii `_. - Fix bug when converting multiindexed pandas objects to sparse xarray objects. (:issue:`4019`) By `Deepak Cherian `_. - ``ValueError`` is raised when ``fill_value`` is not a scalar in :py:meth:`full_like`. (:issue:`3977`) By `Huite Bootsma `_. - Fix wrong order in converting a ``pd.Series`` with a MultiIndex to ``DataArray``. (:issue:`3951`, :issue:`4186`) By `Keisuke Fujii `_ and `Stephan Hoyer `_. - Fix renaming of coords when one or more stacked coords is not in sorted order during stack+groupby+apply operations. (:issue:`3287`, :pull:`3906`) By `Spencer Hill `_ - Fix a regression where deleting a coordinate from a copied :py:class:`DataArray` can affect the original :py:class:`DataArray`. (:issue:`3899`, :pull:`3871`) By `Todd Jennings `_ - Fix :py:class:`~xarray.plot.FacetGrid` plots with a single contour. (:issue:`3569`, :pull:`3915`). By `Deepak Cherian `_ - Use divergent colormap if ``levels`` spans 0. (:issue:`3524`) By `Deepak Cherian `_ - Fix :py:class:`~xarray.plot.FacetGrid` when ``vmin == vmax``. (:issue:`3734`) By `Deepak Cherian `_ - Fix plotting when ``levels`` is a scalar and ``norm`` is provided. (:issue:`3735`) By `Deepak Cherian `_ - Fix bug where plotting line plots with 2D coordinates depended on dimension order. (:issue:`3933`) By `Tom Nicholas `_. - Fix ``RasterioDeprecationWarning`` when using a ``vrt`` in ``open_rasterio``. (:issue:`3964`) By `Taher Chegini `_. - Fix ``AttributeError`` on displaying a :py:class:`Variable` in a notebook context. (:issue:`3972`, :pull:`3973`) By `Ian Castleden `_. - Fix bug causing :py:meth:`DataArray.interpolate_na` to always drop attributes, and added ``keep_attrs`` argument. (:issue:`3968`) By `Tom Nicholas `_. - Fix bug in time parsing failing to fall back to cftime. This was causing time variables with a time unit of ``'msecs'`` to fail to parse. (:pull:`3998`) By `Ryan May `_. - Fix weighted mean when passing boolean weights (:issue:`4074`). By `Mathias Hauser `_. - Fix html repr in untrusted notebooks: fallback to plain text repr. (:pull:`4053`) By `Benoit Bovy `_. - Fix :py:meth:`DataArray.to_unstacked_dataset` for single-dimension variables. (:issue:`4049`) By `Deepak Cherian `_ - Fix :py:func:`open_rasterio` for ``WarpedVRT`` with specified ``src_crs``. (:pull:`4104`) By `Dave Cole `_. Documentation ~~~~~~~~~~~~~ - update the docstring of :py:meth:`DataArray.assign_coords` : clarify how to add a new coordinate to an existing dimension and illustrative example (:issue:`3952`, :pull:`3958`) By `Etienne Combrisson `_. - update the docstring of :py:meth:`Dataset.diff` and :py:meth:`DataArray.diff` so it does document the ``dim`` parameter as required. (:issue:`1040`, :pull:`3909`) By `Justus Magin `_. - Updated :doc:`Calculating Seasonal Averages from Timeseries of Monthly Means ` example notebook to take advantage of the new ``days_in_month`` accessor for :py:class:`xarray.CFTimeIndex` (:pull:`3935`). By `Spencer Clark `_. - Updated the list of current core developers. (:issue:`3892`) By `Tom Nicholas `_. - Add example for multi-dimensional extrapolation and note different behavior of ``kwargs`` in :py:meth:`Dataset.interp` and :py:meth:`DataArray.interp` for 1-d and n-d interpolation (:pull:`3956`). By `Matthias RiรŸe `_. - Apply ``black`` to all the code in the documentation (:pull:`4012`) By `Justus Magin `_. - Narrative documentation now describes :py:meth:`map_blocks`: :ref:`dask.automatic-parallelization`. By `Deepak Cherian `_. - Document ``.plot``, ``.dt``, ``.str`` accessors the way they are called. (:issue:`3625`, :pull:`3988`) By `Justus Magin `_. - Add documentation for the parameters and return values of :py:meth:`DataArray.sel`. By `Justus Magin `_. Internal Changes ~~~~~~~~~~~~~~~~ - Raise more informative error messages for chunk size conflicts when writing to zarr files. By `Deepak Cherian `_. - Run the ``isort`` pre-commit hook only on python source files and update the ``flake8`` version. (:issue:`3750`, :pull:`3711`) By `Justus Magin `_. - Add `blackdoc `_ to the list of checkers for development. (:pull:`4177`) By `Justus Magin `_. - Add a CI job that runs the tests with every optional dependency except ``dask``. (:issue:`3794`, :pull:`3919`) By `Justus Magin `_. - Use ``async`` / ``await`` for the asynchronous distributed tests. (:issue:`3987`, :pull:`3989`) By `Justus Magin `_. - Various internal code clean-ups (:pull:`4026`, :pull:`4038`). By `Prajjwal Nijhara `_. .. _whats-new.0.15.1: v0.15.1 (23 Mar 2020) --------------------- This release brings many new features such as :py:meth:`Dataset.weighted` methods for weighted array reductions, a new jupyter repr by default, and the start of units integration with pint. There's also the usual batch of usability improvements, documentation additions, and bug fixes. Breaking changes ~~~~~~~~~~~~~~~~ - Raise an error when assigning to the ``.values`` or ``.data`` attribute of dimension coordinates i.e. ``IndexVariable`` objects. This has been broken since v0.12.0. Please use :py:meth:`DataArray.assign_coords` or :py:meth:`Dataset.assign_coords` instead. (:issue:`3470`, :pull:`3862`) By `Deepak Cherian `_ New Features ~~~~~~~~~~~~ - Weighted array reductions are now supported via the new :py:meth:`DataArray.weighted` and :py:meth:`Dataset.weighted` methods. See :ref:`compute.weighted`. (:issue:`422`, :pull:`2922`). By `Mathias Hauser `_. - The new jupyter notebook repr (``Dataset._repr_html_`` and ``DataArray._repr_html_``) (introduced in 0.14.1) is now on by default. To disable, use ``xarray.set_options(display_style="text")``. By `Julia Signell `_. - Added support for :py:class:`pandas.DatetimeIndex`-style rounding of ``cftime.datetime`` objects directly via a :py:class:`CFTimeIndex` or via the :py:class:`~core.accessor_dt.DatetimeAccessor`. By `Spencer Clark `_ - Support new h5netcdf backend keyword ``phony_dims`` (available from h5netcdf v0.8.0 for :py:class:`~xarray.backends.H5NetCDFStore`. By `Kai Mรผhlbauer `_. - Add partial support for unit aware arrays with pint. (:pull:`3706`, :pull:`3611`) By `Justus Magin `_. - :py:meth:`Dataset.groupby` and :py:meth:`DataArray.groupby` now raise a ``TypeError`` on multiple string arguments. Receiving multiple string arguments often means a user is attempting to pass multiple dimensions as separate arguments and should instead pass a single list of dimensions. (:pull:`3802`) By `Maximilian Roos `_ - :py:func:`map_blocks` can now apply functions that add new unindexed dimensions. By `Deepak Cherian `_ - An ellipsis (``...``) is now supported in the ``dims`` argument of :py:meth:`Dataset.stack` and :py:meth:`DataArray.stack`, meaning all unlisted dimensions, similar to its meaning in :py:meth:`DataArray.transpose`. (:pull:`3826`) By `Maximilian Roos `_ - :py:meth:`Dataset.where` and :py:meth:`DataArray.where` accept a lambda as a first argument, which is then called on the input; replicating pandas' behavior. By `Maximilian Roos `_. - ``skipna`` is available in :py:meth:`Dataset.quantile`, :py:meth:`DataArray.quantile`, :py:meth:`core.groupby.DatasetGroupBy.quantile`, :py:meth:`core.groupby.DataArrayGroupBy.quantile` (:issue:`3843`, :pull:`3844`) By `Aaron Spring `_. - Add a diff summary for ``testing.assert_allclose``. (:issue:`3617`, :pull:`3847`) By `Justus Magin `_. Bug fixes ~~~~~~~~~ - Fix :py:meth:`Dataset.interp` when indexing array shares coordinates with the indexed variable (:issue:`3252`). By `David Huard `_. - Fix recombination of groups in :py:meth:`Dataset.groupby` and :py:meth:`DataArray.groupby` when performing an operation that changes the size of the groups along the grouped dimension. By `Eric Jansen `_. - Fix use of multi-index with categorical values (:issue:`3674`). By `Matthieu Ancellin `_. - Fix alignment with ``join="override"`` when some dimensions are unindexed. (:issue:`3681`). By `Deepak Cherian `_. - Fix :py:meth:`Dataset.swap_dims` and :py:meth:`DataArray.swap_dims` producing index with name reflecting the previous dimension name instead of the new one (:issue:`3748`, :pull:`3752`). By `Joseph K Aicher `_. - Use ``dask_array_type`` instead of ``dask_array.Array`` for type checking. (:issue:`3779`, :pull:`3787`) By `Justus Magin `_. - :py:func:`concat` can now handle coordinate variables only present in one of the objects to be concatenated when ``coords="different"``. By `Deepak Cherian `_. - xarray now respects the over, under and bad colors if set on a provided colormap. (:issue:`3590`, :pull:`3601`) By `johnomotani `_. - ``coarsen`` and ``rolling`` now respect ``xr.set_options(keep_attrs=True)`` to preserve attributes. :py:meth:`Dataset.coarsen` accepts a keyword argument ``keep_attrs`` to change this setting. (:issue:`3376`, :pull:`3801`) By `Andrew Thomas `_. - Delete associated indexes when deleting coordinate variables. (:issue:`3746`). By `Deepak Cherian `_. - Fix :py:meth:`Dataset.to_zarr` when using ``append_dim`` and ``group`` simultaneously. (:issue:`3170`). By `Matthias Meyer `_. - Fix html repr on :py:class:`Dataset` with non-string keys (:pull:`3807`). By `Maximilian Roos `_. Documentation ~~~~~~~~~~~~~ - Fix documentation of :py:class:`DataArray` removing the deprecated mention that when omitted, ``dims`` are inferred from a ``coords``-dict. (:pull:`3821`) By `Sander van Rijn `_. - Improve the :py:func:`where` docstring. By `Maximilian Roos `_ - Update the installation instructions: only explicitly list recommended dependencies (:issue:`3756`). By `Mathias Hauser `_. Internal Changes ~~~~~~~~~~~~~~~~ - Remove the internal ``import_seaborn`` function which handled the deprecation of the ``seaborn.apionly`` entry point (:issue:`3747`). By `Mathias Hauser `_. - Don't test pint integration in combination with datetime objects. (:issue:`3778`, :pull:`3788`) By `Justus Magin `_. - Change test_open_mfdataset_list_attr to only run with dask installed (:issue:`3777`, :pull:`3780`). By `Bruno Pagani `_. - Preserve the ability to index with ``method="nearest"`` with a :py:class:`CFTimeIndex` with pandas versions greater than 1.0.1 (:issue:`3751`). By `Spencer Clark `_. - Greater flexibility and improved test coverage of subtracting various types of objects from a :py:class:`CFTimeIndex`. By `Spencer Clark `_. - Update Azure CI MacOS image, given pending removal. By `Maximilian Roos `_ - Remove xfails for scipy 1.0.1 for tests that append to netCDF files (:pull:`3805`). By `Mathias Hauser `_. - Remove conversion to ``pandas.Panel``, given its removal in pandas in favor of xarray's objects. By `Maximilian Roos `_ .. _whats-new.0.15.0: v0.15.0 (30 Jan 2020) --------------------- This release brings many improvements to xarray's documentation: our examples are now binderized notebooks (`click here `_) and we have new example notebooks from our SciPy 2019 sprint (many thanks to our contributors!). This release also features many API improvements such as a new :py:class:`~core.accessor_dt.TimedeltaAccessor` and support for :py:class:`CFTimeIndex` in :py:meth:`~DataArray.interpolate_na`); as well as many bug fixes. Breaking changes ~~~~~~~~~~~~~~~~ - Bumped minimum tested versions for dependencies: - numpy 1.15 - pandas 0.25 - dask 2.2 - distributed 2.2 - scipy 1.3 - Remove ``compat`` and ``encoding`` kwargs from ``DataArray``, which have been deprecated since 0.12. (:pull:`3650`). Instead, specify the ``encoding`` kwarg when writing to disk or set the :py:attr:`DataArray.encoding` attribute directly. By `Maximilian Roos `_. - :py:func:`xarray.dot`, :py:meth:`DataArray.dot`, and the ``@`` operator now use ``align="inner"`` (except when ``xarray.set_options(arithmetic_join="exact")``; :issue:`3694`) by `Mathias Hauser `_. New Features ~~~~~~~~~~~~ - Implement :py:meth:`DataArray.pad` and :py:meth:`Dataset.pad`. (:issue:`2605`, :pull:`3596`). By `Mark Boer `_. - :py:meth:`DataArray.sel` and :py:meth:`Dataset.sel` now support :py:class:`pandas.CategoricalIndex`. (:issue:`3669`) By `Keisuke Fujii `_. - Support using an existing, opened h5netcdf ``File`` with :py:class:`~xarray.backends.H5NetCDFStore`. This permits creating an :py:class:`~xarray.Dataset` from a h5netcdf ``File`` that has been opened using other means (:issue:`3618`). By `Kai Mรผhlbauer `_. - Implement ``median`` and ``nanmedian`` for dask arrays. This works by rechunking to a single chunk along all reduction axes. (:issue:`2999`). By `Deepak Cherian `_. - :py:func:`~xarray.concat` now preserves attributes from the first Variable. (:issue:`2575`, :issue:`2060`, :issue:`1614`) By `Deepak Cherian `_. - :py:meth:`Dataset.quantile`, :py:meth:`DataArray.quantile` and ``GroupBy.quantile`` now work with dask Variables. By `Deepak Cherian `_. - Added the ``count`` reduction method to both :py:class:`~computation.rolling.DatasetCoarsen` and :py:class:`~computation.rolling.DataArrayCoarsen` objects. (:pull:`3500`) By `Deepak Cherian `_ - Add ``meta`` kwarg to :py:func:`~xarray.apply_ufunc`; this is passed on to :py:func:`dask.array.blockwise`. (:pull:`3660`) By `Deepak Cherian `_. - Add ``attrs_file`` option in :py:func:`~xarray.open_mfdataset` to choose the source file for global attributes in a multi-file dataset (:issue:`2382`, :pull:`3498`). By `Julien Seguinot `_. - :py:meth:`Dataset.swap_dims` and :py:meth:`DataArray.swap_dims` now allow swapping to dimension names that don't exist yet. (:pull:`3636`) By `Justus Magin `_. - Extend :py:class:`~core.accessor_dt.DatetimeAccessor` properties and support ``.dt`` accessor for timedeltas via :py:class:`~core.accessor_dt.TimedeltaAccessor` (:pull:`3612`) By `Anderson Banihirwe `_. - Improvements to interpolating along time axes (:issue:`3641`, :pull:`3631`). By `David Huard `_. - Support :py:class:`CFTimeIndex` in :py:meth:`DataArray.interpolate_na` - define 1970-01-01 as the default offset for the interpolation index for both :py:class:`pandas.DatetimeIndex` and :py:class:`CFTimeIndex`, - use microseconds in the conversion from timedelta objects to floats to avoid overflow errors. Bug fixes ~~~~~~~~~ - Applying a user-defined function that adds new dimensions using :py:func:`apply_ufunc` and ``vectorize=True`` now works with ``dask > 2.0``. (:issue:`3574`, :pull:`3660`). By `Deepak Cherian `_. - Fix :py:meth:`~xarray.combine_by_coords` to allow for combining incomplete hypercubes of Datasets (:issue:`3648`). By `Ian Bolliger `_. - Fix :py:func:`~xarray.combine_by_coords` when combining cftime coordinates which span long time intervals (:issue:`3535`). By `Spencer Clark `_. - Fix plotting with transposed 2D non-dimensional coordinates. (:issue:`3138`, :pull:`3441`) By `Deepak Cherian `_. - :py:meth:`plot.FacetGrid.set_titles` can now replace existing row titles of a :py:class:`~xarray.plot.FacetGrid` plot. In addition :py:class:`~xarray.plot.FacetGrid` gained two new attributes: :py:attr:`~xarray.plot.FacetGrid.col_labels` and :py:attr:`~xarray.plot.FacetGrid.row_labels` contain :py:class:`matplotlib.text.Text` handles for both column and row labels. These can be used to manually change the labels. By `Deepak Cherian `_. - Fix issue with Dask-backed datasets raising a ``KeyError`` on some computations involving :py:func:`map_blocks` (:pull:`3598`). By `Tom Augspurger `_. - Ensure :py:meth:`Dataset.quantile`, :py:meth:`DataArray.quantile` issue the correct error when ``q`` is out of bounds (:issue:`3634`) by `Mathias Hauser `_. - Fix regression in xarray 0.14.1 that prevented encoding times with certain ``dtype``, ``_FillValue``, and ``missing_value`` encodings (:issue:`3624`). By `Spencer Clark `_ - Raise an error when trying to use :py:meth:`Dataset.rename_dims` to rename to an existing name (:issue:`3438`, :pull:`3645`) By `Justus Magin `_. - :py:meth:`Dataset.rename`, :py:meth:`DataArray.rename` now check for conflicts with MultiIndex level names. - :py:meth:`Dataset.merge` no longer fails when passed a :py:class:`DataArray` instead of a :py:class:`Dataset`. By `Tom Nicholas `_. - Fix a regression in :py:meth:`Dataset.drop`: allow passing any iterable when dropping variables (:issue:`3552`, :pull:`3693`) By `Justus Magin `_. - Fixed errors emitted by ``mypy --strict`` in modules that import xarray. (:issue:`3695`) by `Guido Imperiale `_. - Allow plotting of binned coordinates on the y axis in :py:meth:`plot.line` and :py:meth:`plot.step` plots (:issue:`3571`, :pull:`3685`) by `Julien Seguinot `_. - setuptools is now marked as a dependency of xarray (:pull:`3628`) by `Richard Hรถchenberger `_. Documentation ~~~~~~~~~~~~~ - Switch doc examples to use `nbsphinx `_ and replace ``sphinx_gallery`` scripts with Jupyter notebooks. (:pull:`3105`, :pull:`3106`, :pull:`3121`) By `Ryan Abernathey `_. - Added :doc:`example notebook ` demonstrating use of xarray with Regional Ocean Modeling System (ROMS) ocean hydrodynamic model output. (:pull:`3116`) By `Robert Hetland `_. - Added :doc:`example notebook ` demonstrating the visualization of ERA5 GRIB data. (:pull:`3199`) By `Zach Bruick `_ and `Stephan Siemen `_. - Added examples for :py:meth:`DataArray.quantile`, :py:meth:`Dataset.quantile` and ``GroupBy.quantile``. (:pull:`3576`) By `Justus Magin `_. - Add new :doc:`example notebook ` example notebook demonstrating vectorization of a 1D function using :py:func:`apply_ufunc` , dask and numba. By `Deepak Cherian `_. - Added example for :py:func:`~xarray.map_blocks`. (:pull:`3667`) By `Riley X. Brady `_. Internal Changes ~~~~~~~~~~~~~~~~ - Make sure dask names change when rechunking by different chunk sizes. Conversely, make sure they stay the same when rechunking by the same chunk size. (:issue:`3350`) By `Deepak Cherian `_. - 2x to 5x speed boost (on small arrays) for :py:meth:`Dataset.isel`, :py:meth:`DataArray.isel`, and :py:meth:`DataArray.__getitem__` when indexing by int, slice, list of int, scalar ndarray, or 1-dimensional ndarray. (:pull:`3533`) by `Guido Imperiale `_. - Removed internal method ``Dataset._from_vars_and_coord_names``, which was dominated by ``Dataset._construct_direct``. (:pull:`3565`) By `Maximilian Roos `_. - Replaced versioneer with setuptools-scm. Moved contents of setup.py to setup.cfg. Removed pytest-runner from setup.py, as per deprecation notice on the pytest-runner project. (:pull:`3714`) by `Guido Imperiale `_. - Use of isort is now enforced by CI. (:pull:`3721`) by `Guido Imperiale `_ .. _whats-new.0.14.1: v0.14.1 (19 Nov 2019) --------------------- Breaking changes ~~~~~~~~~~~~~~~~ - Broken compatibility with ``cftime < 1.0.3`` . By `Deepak Cherian `_. .. warning:: cftime version 1.0.4 is broken (`cftime/126 `_); please use version 1.0.4.2 instead. - All leftover support for dates from non-standard calendars through ``netcdftime``, the module included in versions of netCDF4 prior to 1.4 that eventually became the `cftime `_ package, has been removed in favor of relying solely on the standalone ``cftime`` package (:pull:`3450`). By `Spencer Clark `_. New Features ~~~~~~~~~~~~ - Added the ``sparse`` option to :py:meth:`~xarray.DataArray.unstack`, :py:meth:`~xarray.Dataset.unstack`, :py:meth:`~xarray.DataArray.reindex`, :py:meth:`~xarray.Dataset.reindex` (:issue:`3518`). By `Keisuke Fujii `_. - Added the ``fill_value`` option to :py:meth:`DataArray.unstack` and :py:meth:`Dataset.unstack` (:issue:`3518`, :pull:`3541`). By `Keisuke Fujii `_. - Added the ``max_gap`` kwarg to :py:meth:`~xarray.DataArray.interpolate_na` and :py:meth:`~xarray.Dataset.interpolate_na`. This controls the maximum size of the data gap that will be filled by interpolation. By `Deepak Cherian `_. - Added :py:meth:`Dataset.drop_sel` & :py:meth:`DataArray.drop_sel` for dropping labels. :py:meth:`Dataset.drop_vars` & :py:meth:`DataArray.drop_vars` have been added for dropping variables (including coordinates). The existing :py:meth:`Dataset.drop` & :py:meth:`DataArray.drop` methods remain as a backward compatible option for dropping either labels or variables, but using the more specific methods is encouraged. (:pull:`3475`) By `Maximilian Roos `_ - Added :py:meth:`Dataset.map` & ``GroupBy.map`` & ``Resample.map`` for mapping / applying a function over each item in the collection, reflecting the widely used and least surprising name for this operation. The existing ``apply`` methods remain for backward compatibility, though using the ``map`` methods is encouraged. (:pull:`3459`) By `Maximilian Roos `_ - :py:meth:`Dataset.transpose` and :py:meth:`DataArray.transpose` now support an ellipsis (``...``) to represent all 'other' dimensions. For example, to move one dimension to the front, use ``.transpose('x', ...)``. (:pull:`3421`) By `Maximilian Roos `_ - Changed ``xr.ALL_DIMS`` to equal python's ``Ellipsis`` (``...``), and changed internal usages to use ``...`` directly. As before, you can use this to instruct a ``groupby`` operation to reduce over all dimensions. While we have no plans to remove ``xr.ALL_DIMS``, we suggest using ``...``. (:pull:`3418`) By `Maximilian Roos `_ - :py:func:`xarray.dot`, and :py:meth:`DataArray.dot` now support the ``dims=...`` option to sum over the union of dimensions of all input arrays (:issue:`3423`) by `Mathias Hauser `_. - Added new ``Dataset._repr_html_`` and ``DataArray._repr_html_`` to improve representation of objects in Jupyter. By default this feature is turned off for now. Enable it with ``xarray.set_options(display_style="html")``. (:pull:`3425`) by `Benoit Bovy `_ and `Julia Signell `_. - Implement `dask deterministic hashing `_ for xarray objects. Note that xarray objects with a dask.array backend already used deterministic hashing in previous releases; this change implements it when whole xarray objects are embedded in a dask graph, e.g. when :py:meth:`DataArray.map_blocks` is invoked. (:issue:`3378`, :pull:`3446`, :pull:`3515`) By `Deepak Cherian `_ and `Guido Imperiale `_. - Add the documented-but-missing :py:meth:`~core.groupby.DatasetGroupBy.quantile`. - xarray now respects the ``DataArray.encoding["coordinates"]`` attribute when writing to disk. See :ref:`io.coordinates` for more. (:issue:`3351`, :pull:`3487`) By `Deepak Cherian `_. - Add the documented-but-missing :py:meth:`~core.groupby.DatasetGroupBy.quantile`. (:issue:`3525`, :pull:`3527`). By `Justus Magin `_. Bug fixes ~~~~~~~~~ - Ensure an index of type ``CFTimeIndex`` is not converted to a ``DatetimeIndex`` when calling :py:meth:`Dataset.rename`, :py:meth:`Dataset.rename_dims` and :py:meth:`Dataset.rename_vars`. By `Mathias Hauser `_. (:issue:`3522`). - Fix a bug in :py:meth:`DataArray.set_index` in case that an existing dimension becomes a level variable of MultiIndex. (:pull:`3520`). By `Keisuke Fujii `_. - Harmonize ``_FillValue``, ``missing_value`` during encoding and decoding steps. (:pull:`3502`) By `Anderson Banihirwe `_. - Fix regression introduced in v0.14.0 that would cause a crash if dask is installed but cloudpickle isn't (:issue:`3401`) by `Rhys Doyle `_ - Fix grouping over variables with NaNs. (:issue:`2383`, :pull:`3406`). By `Deepak Cherian `_. - Make alignment and concatenation significantly more efficient by using dask names to compare dask objects prior to comparing values after computation. This change makes it more convenient to carry around large non-dimensional coordinate variables backed by dask arrays. Existing workarounds involving ``reset_coords(drop=True)`` should now be unnecessary in most cases. (:issue:`3068`, :issue:`3311`, :issue:`3454`, :pull:`3453`). By `Deepak Cherian `_. - Add support for cftime>=1.0.4. By `Anderson Banihirwe `_. - Rolling reduction operations no longer compute dask arrays by default. (:issue:`3161`). In addition, the ``allow_lazy`` kwarg to ``reduce`` is deprecated. By `Deepak Cherian `_. - Fix ``GroupBy.reduce`` when reducing over multiple dimensions. (:issue:`3402`). By `Deepak Cherian `_ - Allow appending datetime and bool data variables to zarr stores. (:issue:`3480`). By `Akihiro Matsukawa `_. - Add support for numpy >=1.18 (); bugfix mean() on datetime64 arrays on dask backend (:issue:`3409`, :pull:`3537`). By `Guido Imperiale `_. - Add support for pandas >=0.26 (:issue:`3440`). By `Deepak Cherian `_. - Add support for pseudonetcdf >=3.1 (:pull:`3485`). By `Barron Henderson `_. Documentation ~~~~~~~~~~~~~ - Fix leap year condition in `monthly means example `_. By `Mickaรซl Lalande `_. - Fix the documentation of :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample`, explicitly stating that a datetime-like dimension is required. (:pull:`3400`) By `Justus Magin `_. - Update the :ref:`terminology` page to address multidimensional coordinates. (:pull:`3410`) By `Jon Thielen `_. - Fix the documentation of :py:meth:`Dataset.integrate` and :py:meth:`DataArray.integrate` and add an example to :py:meth:`Dataset.integrate`. (:pull:`3469`) By `Justus Magin `_. Internal Changes ~~~~~~~~~~~~~~~~ - Added integration tests against `pint `_. (:pull:`3238`, :pull:`3447`, :pull:`3493`, :pull:`3508`) by `Justus Magin `_. .. note:: At the moment of writing, these tests *as well as the ability to use pint in general* require `a highly experimental version of pint `_ (install with ``pip install git+https://github.com/andrewgsavage/pint.git@refs/pull/6/head)``. Even with it, interaction with non-numpy array libraries, e.g. dask or sparse, is broken. - Use Python 3.6 idioms throughout the codebase. (:pull:`3419`) By `Maximilian Roos `_ - Run basic CI tests on Python 3.8. (:pull:`3477`) By `Maximilian Roos `_ - Enable type checking on default sentinel values (:pull:`3472`) By `Maximilian Roos `_ - Add ``Variable._replace`` for simpler replacing of a subset of attributes (:pull:`3472`) By `Maximilian Roos `_ .. _whats-new.0.14.0: v0.14.0 (14 Oct 2019) --------------------- Breaking changes ~~~~~~~~~~~~~~~~ - This release introduces a rolling policy for minimum dependency versions: :ref:`mindeps_policy`. Several minimum versions have been increased: ============ ================== ==== Package Old New ============ ================== ==== Python 3.5.3 3.6 numpy 1.12 1.14 pandas 0.19.2 0.24 dask 0.16 (tested: 2.4) 1.2 bottleneck 1.1 (tested: 1.2) 1.2 matplotlib 1.5 (tested: 3.1) 3.1 ============ ================== ==== Obsolete patch versions (x.y.Z) are not tested anymore. The oldest supported versions of all optional dependencies are now covered by automated tests (before, only the very latest versions were tested). (:issue:`3222`, :issue:`3293`, :issue:`3340`, :issue:`3346`, :issue:`3358`). By `Guido Imperiale `_. - Dropped the ``drop=False`` optional parameter from :py:meth:`Variable.isel`. It was unused and doesn't make sense for a Variable. (:pull:`3375`). By `Guido Imperiale `_. - Remove internal usage of :py:class:`collections.OrderedDict`. After dropping support for Python <=3.5, most uses of ``OrderedDict`` in xarray were no longer necessary. We have removed the internal use of the ``OrderedDict`` in favor of Python's builtin ``dict`` object which is now ordered itself. This change will be most obvious when interacting with the ``attrs`` property on Dataset and DataArray objects. (:issue:`3380`, :pull:`3389`). By `Joe Hamman `_. New functions/methods ~~~~~~~~~~~~~~~~~~~~~ - Added :py:func:`~xarray.map_blocks`, modeled after :py:func:`dask.array.map_blocks`. Also added :py:meth:`Dataset.unify_chunks`, :py:meth:`DataArray.unify_chunks` and :py:meth:`testing.assert_chunks_equal`. (:pull:`3276`). By `Deepak Cherian `_ and `Guido Imperiale `_. Enhancements ~~~~~~~~~~~~ - ``core.groupby.GroupBy`` enhancements. By `Deepak Cherian `_. - Added a repr (:pull:`3344`). Example:: >>> da.groupby("time.season") DataArrayGroupBy, grouped over 'season' 4 groups with labels 'DJF', 'JJA', 'MAM', 'SON' - Added a ``GroupBy.dims`` property that mirrors the dimensions of each group (:issue:`3344`). - Speed up :py:meth:`Dataset.isel` up to 33% and :py:meth:`DataArray.isel` up to 25% for small arrays (:issue:`2799`, :pull:`3375`). By `Guido Imperiale `_. Bug fixes ~~~~~~~~~ - Reintroduce support for :mod:`weakref` (broken in v0.13.0). Support has been reinstated for :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects only. Internal xarray objects remain unaddressable by weakref in order to save memory (:issue:`3317`). By `Guido Imperiale `_. - Line plots with the ``x`` or ``y`` argument set to a 1D non-dimensional coord now plot the correct data for 2D DataArrays (:issue:`3334`). By `Tom Nicholas `_. - Make :py:func:`~xarray.concat` more robust when merging variables present in some datasets but not others (:issue:`508`). By `Deepak Cherian `_. - The default behaviour of reducing across all dimensions for :py:class:`~xarray.core.groupby.DataArrayGroupBy` objects has now been properly removed as was done for :py:class:`~xarray.core.groupby.DatasetGroupBy` in 0.13.0 (:issue:`3337`). Use ``xarray.ALL_DIMS`` if you need to replicate previous behaviour. Also raise nicer error message when no groups are created (:issue:`1764`). By `Deepak Cherian `_. - Fix error in concatenating unlabeled dimensions (:pull:`3362`). By `Deepak Cherian `_. - Warn if the ``dim`` kwarg is passed to rolling operations. This is redundant since a dimension is specified when the :py:class:`~computation.rolling.DatasetRolling` or :py:class:`~computation.rolling.DataArrayRolling` object is created. (:pull:`3362`). By `Deepak Cherian `_. Documentation ~~~~~~~~~~~~~ - Created a glossary of important xarray terms (:issue:`2410`, :pull:`3352`). By `Gregory Gundersen `_. - Created a "How do I..." section (:ref:`howdoi`) for solutions to common questions. (:pull:`3357`). By `Deepak Cherian `_. - Add examples for :py:meth:`Dataset.swap_dims` and :py:meth:`DataArray.swap_dims` (:pull:`3331`, :pull:`3331`). By `Justus Magin `_. - Add examples for :py:meth:`align`, :py:meth:`merge`, :py:meth:`combine_by_coords`, :py:meth:`full_like`, :py:meth:`zeros_like`, :py:meth:`ones_like`, :py:meth:`Dataset.pipe`, :py:meth:`Dataset.assign`, :py:meth:`Dataset.reindex`, :py:meth:`Dataset.fillna` (:pull:`3328`). By `Anderson Banihirwe `_. - Fixed documentation to clean up an unwanted file created in ``ipython`` example (:pull:`3353`). By `Gregory Gundersen `_. .. _whats-new.0.13.0: v0.13.0 (17 Sep 2019) --------------------- This release includes many exciting changes: wrapping of `NEP18 `_ compliant numpy-like arrays; new :py:meth:`~Dataset.plot.scatter` plotting method that can scatter two ``DataArrays`` in a ``Dataset`` against each other; support for converting pandas DataFrames to xarray objects that wrap ``pydata/sparse``; and more! Breaking changes ~~~~~~~~~~~~~~~~ - This release increases the minimum required Python version from 3.5.0 to 3.5.3 (:issue:`3089`). By `Guido Imperiale `_. - The ``isel_points`` and ``sel_points`` methods are removed, having been deprecated since v0.10.0. These are redundant with the ``isel`` / ``sel`` methods. See :ref:`vectorized_indexing` for the details By `Maximilian Roos `_ - The ``inplace`` kwarg for public methods now raises an error, having been deprecated since v0.11.0. By `Maximilian Roos `_ - :py:func:`~xarray.concat` now requires the ``dim`` argument. Its ``indexers``, ``mode`` and ``concat_over`` kwargs have now been removed. By `Deepak Cherian `_ - Passing a list of colors in ``cmap`` will now raise an error, having been deprecated since v0.6.1. - Most xarray objects now define ``__slots__``. This reduces overall RAM usage by ~22% (not counting the underlying numpy buffers); on CPython 3.7/x64, a trivial DataArray has gone down from 1.9kB to 1.5kB. Caveats: - Pickle streams produced by older versions of xarray can't be loaded using this release, and vice versa. - Any user code that was accessing the ``__dict__`` attribute of xarray objects will break. The best practice to attach custom metadata to xarray objects is to use the ``attrs`` dictionary. - Any user code that defines custom subclasses of xarray classes must now explicitly define ``__slots__`` itself. Subclasses that don't add any attributes must state so by defining ``__slots__ = ()`` right after the class header. Omitting ``__slots__`` will now cause a ``FutureWarning`` to be logged, and will raise an error in a later release. (:issue:`3250`) by `Guido Imperiale `_. - The default dimension for :py:meth:`Dataset.groupby`, :py:meth:`Dataset.resample`, :py:meth:`DataArray.groupby` and :py:meth:`DataArray.resample` reductions is now the grouping or resampling dimension. - :py:meth:`DataArray.to_dataset` requires ``name`` to be passed as a kwarg (previously ambiguous positional arguments were deprecated) - Reindexing with variables of a different dimension now raise an error (previously deprecated) - ``xarray.broadcast_array`` is removed (previously deprecated in favor of :py:func:`~xarray.broadcast`) - ``Variable.expand_dims`` is removed (previously deprecated in favor of :py:meth:`Variable.set_dims`) New functions/methods ~~~~~~~~~~~~~~~~~~~~~ - xarray can now wrap around any `NEP18 `_ compliant numpy-like library (important: read notes about ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION`` in the above link). Added explicit test coverage for `sparse `_. (:issue:`3117`, :issue:`3202`). This requires ``sparse>=0.8.0``. By `Nezar Abdennur `_ and `Guido Imperiale `_. - :py:meth:`~Dataset.from_dataframe` and :py:meth:`~DataArray.from_series` now support ``sparse=True`` for converting pandas objects into xarray objects wrapping sparse arrays. This is particularly useful with sparsely populated hierarchical indexes. (:issue:`3206`) By `Stephan Hoyer `_. - The xarray package is now discoverable by mypy (although typing hints coverage is not complete yet). mypy type checking is now enforced by CI. Libraries that depend on xarray and use mypy can now remove from their setup.cfg the lines:: [mypy-xarray] ignore_missing_imports = True (:issue:`2877`, :issue:`3088`, :issue:`3090`, :issue:`3112`, :issue:`3117`, :issue:`3207`) By `Guido Imperiale `_ and `Maximilian Roos `_. - Added :py:meth:`DataArray.broadcast_like` and :py:meth:`Dataset.broadcast_like`. By `Deepak Cherian `_ and `David Mertz `_. - Dataset plotting API for visualizing dependencies between two DataArrays! Currently only :py:meth:`Dataset.plot.scatter` is implemented. By `Yohai Bar Sinai `_ and `Deepak Cherian `_ - Added :py:meth:`DataArray.head`, :py:meth:`DataArray.tail` and :py:meth:`DataArray.thin`; as well as :py:meth:`Dataset.head`, :py:meth:`Dataset.tail` and :py:meth:`Dataset.thin` methods. (:issue:`319`) By `Gerardo Rivera `_. Enhancements ~~~~~~~~~~~~ - Multiple enhancements to :py:func:`~xarray.concat` and :py:func:`~xarray.open_mfdataset`. By `Deepak Cherian `_ - Added ``compat='override'``. When merging, this option picks the variable from the first dataset and skips all comparisons. - Added ``join='override'``. When aligning, this only checks that index sizes are equal among objects and skips checking indexes for equality. - :py:func:`~xarray.concat` and :py:func:`~xarray.open_mfdataset` now support the ``join`` kwarg. It is passed down to :py:func:`~xarray.align`. - :py:func:`~xarray.concat` now calls :py:func:`~xarray.merge` on variables that are not concatenated (i.e. variables without ``concat_dim`` when ``data_vars`` or ``coords`` are ``"minimal"``). :py:func:`~xarray.concat` passes its new ``compat`` kwarg down to :py:func:`~xarray.merge`. (:issue:`2064`) Users can avoid a common bottleneck when using :py:func:`~xarray.open_mfdataset` on a large number of files with variables that are known to be aligned and some of which need not be concatenated. Slow equality comparisons can now be avoided, for e.g.:: data = xr.open_mfdataset(files, concat_dim='time', data_vars='minimal', coords='minimal', compat='override', join='override') - In :py:meth:`~xarray.Dataset.to_zarr`, passing ``mode`` is not mandatory if ``append_dim`` is set, as it will automatically be set to ``'a'`` internally. By `David Brochart `_. - Added the ability to initialize an empty or full DataArray with a single value. (:issue:`277`) By `Gerardo Rivera `_. - :py:func:`~xarray.Dataset.to_netcdf()` now supports the ``invalid_netcdf`` kwarg when used with ``engine="h5netcdf"``. It is passed to ``h5netcdf.File``. By `Ulrich Herter `_. - ``xarray.Dataset.drop`` now supports keyword arguments; dropping index labels by using both ``dim`` and ``labels`` or using a :py:class:`~core.coordinates.DataArrayCoordinates` object are deprecated (:issue:`2910`). By `Gregory Gundersen `_. - Added examples of :py:meth:`Dataset.set_index` and :py:meth:`DataArray.set_index`, as well are more specific error messages when the user passes invalid arguments (:issue:`3176`). By `Gregory Gundersen `_. - :py:meth:`Dataset.filter_by_attrs` now filters the coordinates as well as the variables. By `Spencer Jones `_. Bug fixes ~~~~~~~~~ - Improve "missing dimensions" error message for :py:func:`~xarray.apply_ufunc` (:issue:`2078`). By `Rick Russotto `_. - :py:meth:`~xarray.DataArray.assign_coords` now supports dictionary arguments (:issue:`3231`). By `Gregory Gundersen `_. - Fix regression introduced in v0.12.2 where ``copy(deep=True)`` would convert unicode indices to dtype=object (:issue:`3094`). By `Guido Imperiale `_. - Improved error handling and documentation for ``.expand_dims()`` read-only view. - Fix tests for big-endian systems (:issue:`3125`). By `Graham Inggs `_. - XFAIL several tests which are expected to fail on ARM systems due to a ``datetime`` issue in NumPy (:issue:`2334`). By `Graham Inggs `_. - Fix KeyError that arises when using .sel method with float values different from coords float type (:issue:`3137`). By `Hasan Ahmad `_. - Fixed bug in ``combine_by_coords()`` causing a ``ValueError`` if the input had an unused dimension with coordinates which were not monotonic (:issue:`3150`). By `Tom Nicholas `_. - Fixed crash when applying ``distributed.Client.compute()`` to a DataArray (:issue:`3171`). By `Guido Imperiale `_. - Better error message when using groupby on an empty DataArray (:issue:`3037`). By `Hasan Ahmad `_. - Fix error that arises when using open_mfdataset on a series of netcdf files having differing values for a variable attribute of type list. (:issue:`3034`) By `Hasan Ahmad `_. - Prevent :py:meth:`~xarray.DataArray.argmax` and :py:meth:`~xarray.DataArray.argmin` from calling dask compute (:issue:`3237`). By `Ulrich Herter `_. - Plots in 2 dimensions (pcolormesh, contour) now allow to specify levels as numpy array (:issue:`3284`). By `Mathias Hauser `_. - Fixed bug in :meth:`DataArray.quantile` failing to keep attributes when ``keep_attrs`` was True (:issue:`3304`). By `David Huard `_. Documentation ~~~~~~~~~~~~~ - Created a `PR checklist `_ as a quick reference for tasks before creating a new PR or pushing new commits. By `Gregory Gundersen `_. - Fixed documentation to clean up unwanted files created in ``ipython`` examples (:issue:`3227`). By `Gregory Gundersen `_. .. _whats-new.0.12.3: v0.12.3 (10 July 2019) ---------------------- New functions/methods ~~~~~~~~~~~~~~~~~~~~~ - New methods :py:meth:`Dataset.to_stacked_array` and :py:meth:`DataArray.to_unstacked_dataset` for reshaping Datasets of variables with different dimensions (:issue:`1317`). This is useful for feeding data from xarray into machine learning models, as described in :ref:`reshape.stacking_different`. By `Noah Brenowitz `_. Enhancements ~~~~~~~~~~~~ - Support for renaming ``Dataset`` variables and dimensions independently with :py:meth:`~Dataset.rename_vars` and :py:meth:`~Dataset.rename_dims` (:issue:`3026`). By `Julia Kent `_. - Add ``scales``, ``offsets``, ``units`` and ``descriptions`` attributes to :py:class:`~xarray.DataArray` returned by :py:func:`~xarray.open_rasterio`. (:issue:`3013`) By `Erle Carrara `_. Bug fixes ~~~~~~~~~ - Resolved deprecation warnings from newer versions of matplotlib and dask. - Compatibility fixes for the upcoming pandas 0.25 and NumPy 1.17 releases. By `Stephan Hoyer `_. - Fix summaries for multiindex coordinates (:issue:`3079`). By `Jonas Hรถrsch `_. - Fix HDF5 error that could arise when reading multiple groups from a file at once (:issue:`2954`). By `Stephan Hoyer `_. .. _whats-new.0.12.2: v0.12.2 (29 June 2019) ---------------------- New functions/methods ~~~~~~~~~~~~~~~~~~~~~ - Two new functions, :py:func:`~xarray.combine_nested` and :py:func:`~xarray.combine_by_coords`, allow for combining datasets along any number of dimensions, instead of the one-dimensional list of datasets supported by :py:func:`~xarray.concat`. The new ``combine_nested`` will accept the datasets as a nested list-of-lists, and combine by applying a series of concat and merge operations. The new ``combine_by_coords`` instead uses the dimension coordinates of datasets to order them. :py:func:`~xarray.open_mfdataset` can use either ``combine_nested`` or ``combine_by_coords`` to combine datasets along multiple dimensions, by specifying the argument ``combine='nested'`` or ``combine='by_coords'``. The older function ``auto_combine`` has been deprecated, because its functionality has been subsumed by the new functions. To avoid FutureWarnings switch to using ``combine_nested`` or ``combine_by_coords``, (or set the ``combine`` argument in ``open_mfdataset``). (:issue:`2159`) By `Tom Nicholas `_. - :py:meth:`~xarray.DataArray.rolling_exp` and :py:meth:`~xarray.Dataset.rolling_exp` added, similar to pandas' ``pd.DataFrame.ewm`` method. Calling ``.mean`` on the resulting object will return an exponentially weighted moving average. By `Maximilian Roos `_. - New :py:func:`DataArray.str ` for string related manipulations, based on ``pandas.Series.str``. By `0x0L `_. - Added ``strftime`` method to ``.dt`` accessor, making it simpler to hand a datetime ``DataArray`` to other code expecting formatted dates and times. (:issue:`2090`). :py:meth:`~xarray.CFTimeIndex.strftime` is also now available on :py:class:`CFTimeIndex`. By `Alan Brammer `_ and `Ryan May `_. - ``GroupBy.quantile`` is now a method of ``GroupBy`` objects (:issue:`3018`). By `David Huard `_. - Argument and return types are added to most methods on ``DataArray`` and ``Dataset``, allowing static type checking both within xarray and external libraries. Type checking with `mypy `_ is enabled in CI (though not required yet). By `Guido Imperiale `_ and `Maximilian Roos `_. Enhancements to existing functionality ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Add ``keepdims`` argument for reduce operations (:issue:`2170`) By `Scott Wales `_. - Enable ``@`` operator for DataArray. This is equivalent to :py:meth:`DataArray.dot` By `Maximilian Roos `_. - Add ``fill_value`` argument for reindex, align, and merge operations to enable custom fill values. (:issue:`2876`) By `Zach Griffith `_. - :py:meth:`DataArray.transpose` now accepts a keyword argument ``transpose_coords`` which enables transposition of coordinates in the same way as :py:meth:`Dataset.transpose`. :py:meth:`DataArray.groupby` :py:meth:`DataArray.groupby_bins`, and :py:meth:`DataArray.resample` now accept a keyword argument ``restore_coord_dims`` which keeps the order of the dimensions of multi-dimensional coordinates intact (:issue:`1856`). By `Peter Hausamann `_. - Clean up Python 2 compatibility in code (:issue:`2950`) By `Guido Imperiale `_. - Better warning message when supplying invalid objects to ``xr.merge`` (:issue:`2948`). By `Mathias Hauser `_. - Add ``errors`` keyword argument to ``Dataset.drop`` and :py:meth:`Dataset.drop_dims` that allows ignoring errors if a passed label or dimension is not in the dataset (:issue:`2994`). By `Andrew Ross `_. IO related enhancements ~~~~~~~~~~~~~~~~~~~~~~~ - Implement :py:func:`~xarray.load_dataset` and :py:func:`~xarray.load_dataarray` as alternatives to :py:func:`~xarray.open_dataset` and :py:func:`~xarray.open_dataarray` to open, load into memory, and close files, returning the Dataset or DataArray. These functions are helpful for avoiding file-lock errors when trying to write to files opened using ``open_dataset()`` or ``open_dataarray()``. (:issue:`2887`) By `Dan Nowacki `_. - It is now possible to extend existing :ref:`io.zarr` datasets, by using ``mode='a'`` and the new ``append_dim`` argument in :py:meth:`~xarray.Dataset.to_zarr`. By `Jendrik Jรถrdening `_, `David Brochart `_, `Ryan Abernathey `_ and `Shikhar Goenka `_. - ``xr.open_zarr`` now accepts manually specified chunks with the ``chunks=`` parameter. ``auto_chunk=True`` is equivalent to ``chunks='auto'`` for backwards compatibility. The ``overwrite_encoded_chunks`` parameter is added to remove the original zarr chunk encoding. By `Lily Wang `_. - netCDF chunksizes are now only dropped when original_shape is different, not when it isn't found. (:issue:`2207`) By `Karel van de Plassche `_. - Character arrays' character dimension name decoding and encoding handled by ``var.encoding['char_dim_name']`` (:issue:`2895`) By `James McCreight `_. - open_rasterio() now supports rasterio.vrt.WarpedVRT with custom transform, width and height (:issue:`2864`). By `Julien Michel `_. Bug fixes ~~~~~~~~~ - Rolling operations on xarray objects containing dask arrays could silently compute the incorrect result or use large amounts of memory (:issue:`2940`). By `Stephan Hoyer `_. - Don't set encoding attributes on bounds variables when writing to netCDF. (:issue:`2921`) By `Deepak Cherian `_. - NetCDF4 output: variables with unlimited dimensions must be chunked (not contiguous) on output. (:issue:`1849`) By `James McCreight `_. - indexing with an empty list creates an object with zero-length axis (:issue:`2882`) By `Mayeul d'Avezac `_. - Return correct count for scalar datetime64 arrays (:issue:`2770`) By `Dan Nowacki `_. - Fixed max, min exception when applied to a multiIndex (:issue:`2923`) By `Ian Castleden `_ - A deep copy deep-copies the coords (:issue:`1463`) By `Martin Pletcher `_. - Increased support for ``missing_value`` (:issue:`2871`) By `Deepak Cherian `_. - Removed usages of ``pytest.config``, which is deprecated (:issue:`2988`) By `Maximilian Roos `_. - Fixed performance issues with cftime installed (:issue:`3000`) By `0x0L `_. - Replace incorrect usages of ``message`` in pytest assertions with ``match`` (:issue:`3011`) By `Maximilian Roos `_. - Add explicit pytest markers, now required by pytest (:issue:`3032`). By `Maximilian Roos `_. - Test suite fixes for newer versions of pytest (:issue:`3011`, :issue:`3032`). By `Maximilian Roos `_ and `Stephan Hoyer `_. .. _whats-new.0.12.1: v0.12.1 (4 April 2019) ---------------------- Enhancements ~~~~~~~~~~~~ - Allow ``expand_dims`` method to support inserting/broadcasting dimensions with size > 1. (:issue:`2710`) By `Martin Pletcher `_. Bug fixes ~~~~~~~~~ - Dataset.copy(deep=True) now creates a deep copy of the attrs (:issue:`2835`). By `Andras Gefferth `_. - Fix incorrect ``indexes`` resulting from various ``Dataset`` operations (e.g., ``swap_dims``, ``isel``, ``reindex``, ``[]``) (:issue:`2842`, :issue:`2856`). By `Stephan Hoyer `_. .. _whats-new.0.12.0: v0.12.0 (15 March 2019) ----------------------- Highlights include: - Removed support for Python 2. This is the first version of xarray that is Python 3 only! - New :py:meth:`~xarray.DataArray.coarsen` and :py:meth:`~xarray.DataArray.integrate` methods. See :ref:`compute.coarsen` and :ref:`compute.using_coordinates` for details. - Many improvements to cftime support. See below for details. Deprecations ~~~~~~~~~~~~ - The ``compat`` argument to ``Dataset`` and the ``encoding`` argument to ``DataArray`` are deprecated and will be removed in a future release. (:issue:`1188`) By `Maximilian Roos `_. cftime related enhancements ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Resampling of standard and non-standard calendars indexed by :py:class:`~xarray.CFTimeIndex` is now possible. (:issue:`2191`). By `Jwen Fai Low `_ and `Spencer Clark `_. - Taking the mean of arrays of :py:class:`cftime.datetime` objects, and by extension, use of :py:meth:`~xarray.DataArray.coarsen` with :py:class:`cftime.datetime` coordinates is now possible. By `Spencer Clark `_. - Internal plotting now supports ``cftime.datetime`` objects as time series. (:issue:`2164`) By `Julius Busecke `_ and `Spencer Clark `_. - :py:meth:`~xarray.cftime_range` now supports QuarterBegin and QuarterEnd offsets (:issue:`2663`). By `Jwen Fai Low `_ - :py:meth:`~xarray.open_dataset` now accepts a ``use_cftime`` argument, which can be used to require that ``cftime.datetime`` objects are always used, or never used when decoding dates encoded with a standard calendar. This can be used to ensure consistent date types are returned when using :py:meth:`~xarray.open_mfdataset` (:issue:`1263`) and/or to silence serialization warnings raised if dates from a standard calendar are found to be outside the :py:class:`pandas.Timestamp`-valid range (:issue:`2754`). By `Spencer Clark `_. - :py:meth:`pandas.Series.dropna` is now supported for a :py:class:`pandas.Series` indexed by a :py:class:`~xarray.CFTimeIndex` (:issue:`2688`). By `Spencer Clark `_. Other enhancements ~~~~~~~~~~~~~~~~~~ - Added ability to open netcdf4/hdf5 file-like objects with ``open_dataset``. Requires (h5netcdf>0.7 and h5py>2.9.0). (:issue:`2781`) By `Scott Henderson `_ - Add ``data=False`` option to ``to_dict()`` methods. (:issue:`2656`) By `Ryan Abernathey `_ - :py:meth:`DataArray.coarsen` and :py:meth:`Dataset.coarsen` are newly added. See :ref:`compute.coarsen` for details. (:issue:`2525`) By `Keisuke Fujii `_. - Upsampling an array via interpolation with resample is now dask-compatible, as long as the array is not chunked along the resampling dimension. By `Spencer Clark `_. - :py:func:`xarray.testing.assert_equal` and :py:func:`xarray.testing.assert_identical` now provide a more detailed report showing what exactly differs between the two objects (dimensions / coordinates / variables / attributes) (:issue:`1507`). By `Benoit Bovy `_. - Add ``tolerance`` option to ``resample()`` methods ``bfill``, ``pad``, ``nearest``. (:issue:`2695`) By `Hauke Schulz `_. - :py:meth:`DataArray.integrate` and :py:meth:`Dataset.integrate` are newly added. See :ref:`compute.using_coordinates` for the detail. (:issue:`1332`) By `Keisuke Fujii `_. - Added :py:meth:`~xarray.Dataset.drop_dims` (:issue:`1949`). By `Kevin Squire `_. Bug fixes ~~~~~~~~~ - Silenced warnings that appear when using pandas 0.24. By `Stephan Hoyer `_ - Interpolating via resample now internally specifies ``bounds_error=False`` as an argument to ``scipy.interpolate.interp1d``, allowing for interpolation from higher frequencies to lower frequencies. Datapoints outside the bounds of the original time coordinate are now filled with NaN (:issue:`2197`). By `Spencer Clark `_. - Line plots with the ``x`` argument set to a non-dimensional coord now plot the correct data for 1D DataArrays. (:issue:`2725`). By `Tom Nicholas `_. - Subtracting a scalar ``cftime.datetime`` object from a :py:class:`CFTimeIndex` now results in a :py:class:`pandas.TimedeltaIndex` instead of raising a ``TypeError`` (:issue:`2671`). By `Spencer Clark `_. - backend_kwargs are no longer ignored when using open_dataset with pynio engine (:issue:'2380') By `Jonathan Joyce `_. - Fix ``open_rasterio`` creating a WKT CRS instead of PROJ.4 with ``rasterio`` 1.0.14+ (:issue:`2715`). By `David Hoese `_. - Masking data arrays with :py:meth:`xarray.DataArray.where` now returns an array with the name of the original masked array (:issue:`2748` and :issue:`2457`). By `Yohai Bar-Sinai `_. - Fixed error when trying to reduce a DataArray using a function which does not require an axis argument. (:issue:`2768`) By `Tom Nicholas `_. - Concatenating a sequence of :py:class:`~xarray.DataArray` with varying names sets the name of the output array to ``None``, instead of the name of the first input array. If the names are the same it sets the name to that, instead to the name of the first DataArray in the list as it did before. (:issue:`2775`). By `Tom Nicholas `_. - Per the `CF conventions section on calendars `_, specifying ``'standard'`` as the calendar type in :py:meth:`~xarray.cftime_range` now correctly refers to the ``'gregorian'`` calendar instead of the ``'proleptic_gregorian'`` calendar (:issue:`2761`). .. _whats-new.0.11.3: v0.11.3 (26 January 2019) ------------------------- Bug fixes ~~~~~~~~~ - Saving files with times encoded with reference dates with timezones (e.g. '2000-01-01T00:00:00-05:00') no longer raises an error (:issue:`2649`). By `Spencer Clark `_. - Fixed performance regression with ``open_mfdataset`` (:issue:`2662`). By `Tom Nicholas `_. - Fixed supplying an explicit dimension in the ``concat_dim`` argument to to ``open_mfdataset`` (:issue:`2647`). By `Ben Root `_. .. _whats-new.0.11.2: v0.11.2 (2 January 2019) ------------------------ Removes inadvertently introduced setup dependency on pytest-runner (:issue:`2641`). Otherwise, this release is exactly equivalent to 0.11.1. .. warning:: This is the last xarray release that will support Python 2.7. Future releases will be Python 3 only, but older versions of xarray will always be available for Python 2.7 users. For the more details, see: - :issue:`Xarray Github issue discussing dropping Python 2 <1829>` - `Python 3 Statement `__ - `Tips on porting to Python 3 `__ .. _whats-new.0.11.1: v0.11.1 (29 December 2018) -------------------------- This minor release includes a number of enhancements and bug fixes, and two (slightly) breaking changes. Breaking changes ~~~~~~~~~~~~~~~~ - Minimum rasterio version increased from 0.36 to 1.0 (for ``open_rasterio``) - Time bounds variables are now also decoded according to CF conventions (:issue:`2565`). The previous behavior was to decode them only if they had specific time attributes, now these attributes are copied automatically from the corresponding time coordinate. This might break downstream code that was relying on these variables to be brake downstream code that was relying on these variables to be not decoded. By `Fabien Maussion `_. Enhancements ~~~~~~~~~~~~ - Ability to read and write consolidated metadata in zarr stores (:issue:`2558`). By `Ryan Abernathey `_. - :py:class:`CFTimeIndex` uses slicing for string indexing when possible (like :py:class:`pandas.DatetimeIndex`), which avoids unnecessary copies. By `Stephan Hoyer `_ - Enable passing ``rasterio.io.DatasetReader`` or ``rasterio.vrt.WarpedVRT`` to ``open_rasterio`` instead of file path string. Allows for in-memory reprojection, see (:issue:`2588`). By `Scott Henderson `_. - Like :py:class:`pandas.DatetimeIndex`, :py:class:`CFTimeIndex` now supports "dayofyear" and "dayofweek" accessors (:issue:`2597`). Note this requires a version of cftime greater than 1.0.2. By `Spencer Clark `_. - The option ``'warn_for_unclosed_files'`` (False by default) has been added to allow users to enable a warning when files opened by xarray are deallocated but were not explicitly closed. This is mostly useful for debugging; we recommend enabling it in your test suites if you use xarray for IO. By `Stephan Hoyer `_ - Support Dask ``HighLevelGraphs`` by `Matthew Rocklin `_. - :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample` now supports the ``loffset`` kwarg just like pandas. By `Deepak Cherian `_ - Datasets are now guaranteed to have a ``'source'`` encoding, so the source file name is always stored (:issue:`2550`). By `Tom Nicholas `_. - The ``apply`` methods for ``DatasetGroupBy``, ``DataArrayGroupBy``, ``DatasetResample`` and ``DataArrayResample`` now support passing positional arguments to the applied function as a tuple to the ``args`` argument. By `Matti Eskelinen `_. - 0d slices of ndarrays are now obtained directly through indexing, rather than extracting and wrapping a scalar, avoiding unnecessary copying. By `Daniel Wennberg `_. - Added support for ``fill_value`` with :py:meth:`~xarray.DataArray.shift` and :py:meth:`~xarray.Dataset.shift` By `Maximilian Roos `_ Bug fixes ~~~~~~~~~ - Ensure files are automatically closed, if possible, when no longer referenced by a Python variable (:issue:`2560`). By `Stephan Hoyer `_ - Fixed possible race conditions when reading/writing to disk in parallel (:issue:`2595`). By `Stephan Hoyer `_ - Fix h5netcdf saving scalars with filters or chunks (:issue:`2563`). By `Martin Raspaud `_. - Fix parsing of ``_Unsigned`` attribute set by OPENDAP servers. (:issue:`2583`). By `Deepak Cherian `_ - Fix failure in time encoding when exporting to netCDF with versions of pandas less than 0.21.1 (:issue:`2623`). By `Spencer Clark `_. - Fix MultiIndex selection to update label and level (:issue:`2619`). By `Keisuke Fujii `_. .. _whats-new.0.11.0: v0.11.0 (7 November 2018) ------------------------- Breaking changes ~~~~~~~~~~~~~~~~ - Finished deprecations (changed behavior with this release): - ``Dataset.T`` has been removed as a shortcut for :py:meth:`Dataset.transpose`. Call :py:meth:`Dataset.transpose` directly instead. - Iterating over a ``Dataset`` now includes only data variables, not coordinates. Similarly, calling ``len`` and ``bool`` on a ``Dataset`` now includes only data variables. - ``DataArray.__contains__`` (used by Python's ``in`` operator) now checks array data, not coordinates. - The old resample syntax from before xarray 0.10, e.g., ``data.resample('1D', dim='time', how='mean')``, is no longer supported will raise an error in most cases. You need to use the new resample syntax instead, e.g., ``data.resample(time='1D').mean()`` or ``data.resample({'time': '1D'}).mean()``. - New deprecations (behavior will be changed in xarray 0.12): - Reduction of :py:meth:`DataArray.groupby` and :py:meth:`DataArray.resample` without dimension argument will change in the next release. Now we warn a FutureWarning. By `Keisuke Fujii `_. - The ``inplace`` kwarg of a number of ``DataArray`` and ``Dataset`` methods is being deprecated and will be removed in the next release. By `Deepak Cherian `_. - Refactored storage backends: - Xarray's storage backends now automatically open and close files when necessary, rather than requiring opening a file with ``autoclose=True``. A global least-recently-used cache is used to store open files; the default limit of 128 open files should suffice in most cases, but can be adjusted if necessary with ``xarray.set_options(file_cache_maxsize=...)``. The ``autoclose`` argument to ``open_dataset`` and related functions has been deprecated and is now a no-op. This change, along with an internal refactor of xarray's storage backends, should significantly improve performance when reading and writing netCDF files with Dask, especially when working with many files or using Dask Distributed. By `Stephan Hoyer `_ - Support for non-standard calendars used in climate science: - Xarray will now always use :py:class:`cftime.datetime` objects, rather than by default trying to coerce them into ``np.datetime64[ns]`` objects. A :py:class:`~xarray.CFTimeIndex` will be used for indexing along time coordinates in these cases. - A new method :py:meth:`~xarray.CFTimeIndex.to_datetimeindex` has been added to aid in converting from a :py:class:`~xarray.CFTimeIndex` to a :py:class:`pandas.DatetimeIndex` for the remaining use-cases where using a :py:class:`~xarray.CFTimeIndex` is still a limitation (e.g. for resample or plotting). - Setting the ``enable_cftimeindex`` option is now a no-op and emits a ``FutureWarning``. Enhancements ~~~~~~~~~~~~ - :py:meth:`xarray.DataArray.plot.line` can now accept multidimensional coordinate variables as input. ``hue`` must be a dimension name in this case. (:issue:`2407`) By `Deepak Cherian `_. - Added support for Python 3.7. (:issue:`2271`). By `Joe Hamman `_. - Added support for plotting data with ``pandas.Interval`` coordinates, such as those created by :py:meth:`~xarray.DataArray.groupby_bins` By `Maximilian Maahn `_. - Added :py:meth:`~xarray.CFTimeIndex.shift` for shifting the values of a CFTimeIndex by a specified frequency. (:issue:`2244`). By `Spencer Clark `_. - Added support for using ``cftime.datetime`` coordinates with :py:meth:`~xarray.DataArray.differentiate`, :py:meth:`~xarray.Dataset.differentiate`, :py:meth:`~xarray.DataArray.interp`, and :py:meth:`~xarray.Dataset.interp`. By `Spencer Clark `_ - There is now a global option to either always keep or always discard dataset and dataarray attrs upon operations. The option is set with ``xarray.set_options(keep_attrs=True)``, and the default is to use the old behaviour. By `Tom Nicholas `_. - Added a new backend for the GRIB file format based on ECMWF *cfgrib* python driver and *ecCodes* C-library. (:issue:`2475`) By `Alessandro Amici `_, sponsored by `ECMWF `_. - Resample now supports a dictionary mapping from dimension to frequency as its first argument, e.g., ``data.resample({'time': '1D'}).mean()``. This is consistent with other xarray functions that accept either dictionaries or keyword arguments. By `Stephan Hoyer `_. - The preferred way to access tutorial data is now to load it lazily with :py:meth:`xarray.tutorial.open_dataset`. :py:meth:`xarray.tutorial.load_dataset` calls ``Dataset.load()`` prior to returning (and is now deprecated). This was changed in order to facilitate using tutorial datasets with dask. By `Joe Hamman `_. - ``DataArray`` can now use ``xr.set_option(keep_attrs=True)`` and retain attributes in binary operations, such as (``+, -, * ,/``). Default behaviour is unchanged (*Attributes will be dismissed*). By `Michael Blaschek `_ Bug fixes ~~~~~~~~~ - ``FacetGrid`` now properly uses the ``cbar_kwargs`` keyword argument. (:issue:`1504`, :issue:`1717`) By `Deepak Cherian `_. - Addition and subtraction operators used with a CFTimeIndex now preserve the index's type. (:issue:`2244`). By `Spencer Clark `_. - We now properly handle arrays of ``datetime.datetime`` and ``datetime.timedelta`` provided as coordinates. (:issue:`2512`) By `Deepak Cherian `_. - ``xarray.DataArray.roll`` correctly handles multidimensional arrays. (:issue:`2445`) By `Keisuke Fujii `_. - ``xarray.plot()`` now properly accepts a ``norm`` argument and does not override the norm's ``vmin`` and ``vmax``. (:issue:`2381`) By `Deepak Cherian `_. - ``xarray.DataArray.std()`` now correctly accepts ``ddof`` keyword argument. (:issue:`2240`) By `Keisuke Fujii `_. - Restore matplotlib's default of plotting dashed negative contours when a single color is passed to ``DataArray.contour()`` e.g. ``colors='k'``. By `Deepak Cherian `_. - Fix a bug that caused some indexing operations on arrays opened with ``open_rasterio`` to error (:issue:`2454`). By `Stephan Hoyer `_. - Subtracting one CFTimeIndex from another now returns a ``pandas.TimedeltaIndex``, analogous to the behavior for DatetimeIndexes (:issue:`2484`). By `Spencer Clark `_. - Adding a TimedeltaIndex to, or subtracting a TimedeltaIndex from a CFTimeIndex is now allowed (:issue:`2484`). By `Spencer Clark `_. - Avoid use of Dask's deprecated ``get=`` parameter in tests by `Matthew Rocklin `_. - An ``OverflowError`` is now accurately raised and caught during the encoding process if a reference date is used that is so distant that the dates must be encoded using cftime rather than NumPy (:issue:`2272`). By `Spencer Clark `_. - Chunked datasets can now roundtrip to Zarr storage continually with ``to_zarr`` and ``open_zarr`` (:issue:`2300`). By `Lily Wang `_. .. _whats-new.0.10.9: v0.10.9 (21 September 2018) --------------------------- This minor release contains a number of backwards compatible enhancements. Announcements of note: - Xarray is now a NumFOCUS fiscally sponsored project! Read `the announcement `_ for more details. - We have a new :doc:`roadmap` that outlines our future development plans. - ``Dataset.apply`` now properly documents the way ``func`` is called. By `Matti Eskelinen `_. Enhancements ~~~~~~~~~~~~ - :py:meth:`~xarray.DataArray.differentiate` and :py:meth:`~xarray.Dataset.differentiate` are newly added. (:issue:`1332`) By `Keisuke Fujii `_. - Default colormap for sequential and divergent data can now be set via :py:func:`~xarray.set_options()` (:issue:`2394`) By `Julius Busecke `_. - min_count option is newly supported in :py:meth:`~xarray.DataArray.sum`, :py:meth:`~xarray.DataArray.prod` and :py:meth:`~xarray.Dataset.sum`, and :py:meth:`~xarray.Dataset.prod`. (:issue:`2230`) By `Keisuke Fujii `_. - :py:func:`~plot.plot()` now accepts the kwargs ``xscale, yscale, xlim, ylim, xticks, yticks`` just like pandas. Also ``xincrease=False, yincrease=False`` now use matplotlib's axis inverting methods instead of setting limits. By `Deepak Cherian `_. (:issue:`2224`) - DataArray coordinates and Dataset coordinates and data variables are now displayed as ``a b ... y z`` rather than ``a b c d ...``. (:issue:`1186`) By `Seth P `_. - A new CFTimeIndex-enabled :py:func:`cftime_range` function for use in generating dates from standard or non-standard calendars. By `Spencer Clark `_. - When interpolating over a ``datetime64`` axis, you can now provide a datetime string instead of a ``datetime64`` object. E.g. ``da.interp(time='1991-02-01')`` (:issue:`2284`) By `Deepak Cherian `_. - A clear error message is now displayed if a ``set`` or ``dict`` is passed in place of an array (:issue:`2331`) By `Maximilian Roos `_. - Applying ``unstack`` to a large DataArray or Dataset is now much faster if the MultiIndex has not been modified after stacking the indices. (:issue:`1560`) By `Maximilian Maahn `_. - You can now control whether or not to offset the coordinates when using the ``roll`` method and the current behavior, coordinates rolled by default, raises a deprecation warning unless explicitly setting the keyword argument. (:issue:`1875`) By `Andrew Huang `_. - You can now call ``unstack`` without arguments to unstack every MultiIndex in a DataArray or Dataset. By `Julia Signell `_. - Added the ability to pass a data kwarg to ``copy`` to create a new object with the same metadata as the original object but using new values. By `Julia Signell `_. Bug fixes ~~~~~~~~~ - ``xarray.plot.imshow()`` correctly uses the ``origin`` argument. (:issue:`2379`) By `Deepak Cherian `_. - Fixed ``DataArray.to_iris()`` failure while creating ``DimCoord`` by falling back to creating ``AuxCoord``. Fixed dependency on ``var_name`` attribute being set. (:issue:`2201`) By `Thomas Voigt `_. - Fixed a bug in ``zarr`` backend which prevented use with datasets with invalid chunk size encoding after reading from an existing store (:issue:`2278`). By `Joe Hamman `_. - Tests can be run in parallel with pytest-xdist By `Tony Tung `_. - Follow up the renamings in dask; from dask.ghost to dask.overlap By `Keisuke Fujii `_. - Now raises a ValueError when there is a conflict between dimension names and level names of MultiIndex. (:issue:`2299`) By `Keisuke Fujii `_. - Follow up the renamings in dask; from dask.ghost to dask.overlap By `Keisuke Fujii `_. - Now :py:func:`~xarray.apply_ufunc` raises a ValueError when the size of ``input_core_dims`` is inconsistent with the number of arguments. (:issue:`2341`) By `Keisuke Fujii `_. - Fixed ``Dataset.filter_by_attrs()`` behavior not matching ``netCDF4.Dataset.get_variables_by_attributes()``. When more than one ``key=value`` is passed into ``Dataset.filter_by_attrs()`` it will now return a Dataset with variables which pass all the filters. (:issue:`2315`) By `Andrew Barna `_. .. _whats-new.0.10.8: v0.10.8 (18 July 2018) ---------------------- Breaking changes ~~~~~~~~~~~~~~~~ - Xarray no longer supports python 3.4. Additionally, the minimum supported versions of the following dependencies has been updated and/or clarified: - pandas: 0.18 -> 0.19 - NumPy: 1.11 -> 1.12 - Dask: 0.9 -> 0.16 - Matplotlib: unspecified -> 1.5 (:issue:`2204`). By `Joe Hamman `_. Enhancements ~~~~~~~~~~~~ - :py:meth:`~xarray.DataArray.interp_like` and :py:meth:`~xarray.Dataset.interp_like` methods are newly added. (:issue:`2218`) By `Keisuke Fujii `_. - Added support for curvilinear and unstructured generic grids to :py:meth:`~xarray.DataArray.to_cdms2` and :py:meth:`~xarray.DataArray.from_cdms2` (:issue:`2262`). By `Stephane Raynaud `_. Bug fixes ~~~~~~~~~ - Fixed a bug in ``zarr`` backend which prevented use with datasets with incomplete chunks in multiple dimensions (:issue:`2225`). By `Joe Hamman `_. - Fixed a bug in :py:meth:`~Dataset.to_netcdf` which prevented writing datasets when the arrays had different chunk sizes (:issue:`2254`). By `Mike Neish `_. - Fixed masking during the conversion to cdms2 objects by :py:meth:`~xarray.DataArray.to_cdms2` (:issue:`2262`). By `Stephane Raynaud `_. - Fixed a bug in 2D plots which incorrectly raised an error when 2D coordinates weren't monotonic (:issue:`2250`). By `Fabien Maussion `_. - Fixed warning raised in :py:meth:`~Dataset.to_netcdf` due to deprecation of ``effective_get`` in dask (:issue:`2238`). By `Joe Hamman `_. .. _whats-new.0.10.7: v0.10.7 (7 June 2018) --------------------- Enhancements ~~~~~~~~~~~~ - Plot labels now make use of metadata that follow CF conventions (:issue:`2135`). By `Deepak Cherian `_ and `Ryan Abernathey `_. - Line plots now support facetting with ``row`` and ``col`` arguments (:issue:`2107`). By `Yohai Bar Sinai `_. - :py:meth:`~xarray.DataArray.interp` and :py:meth:`~xarray.Dataset.interp` methods are newly added. See :ref:`interp` for the detail. (:issue:`2079`) By `Keisuke Fujii `_. Bug fixes ~~~~~~~~~ - Fixed a bug in ``rasterio`` backend which prevented use with ``distributed``. The ``rasterio`` backend now returns pickleable objects (:issue:`2021`). By `Joe Hamman `_. .. _whats-new.0.10.6: v0.10.6 (31 May 2018) --------------------- The minor release includes a number of bug-fixes and backwards compatible enhancements. Enhancements ~~~~~~~~~~~~ - New PseudoNetCDF backend for many Atmospheric data formats including GEOS-Chem, CAMx, NOAA arlpacked bit and many others. See ``io.PseudoNetCDF`` for more details. By `Barron Henderson `_. - The :py:class:`Dataset` constructor now aligns :py:class:`DataArray` arguments in ``data_vars`` to indexes set explicitly in ``coords``, where previously an error would be raised. (:issue:`674`) By `Maximilian Roos `_. - :py:meth:`~DataArray.sel`, :py:meth:`~DataArray.isel` & :py:meth:`~DataArray.reindex`, (and their :py:class:`Dataset` counterparts) now support supplying a ``dict`` as a first argument, as an alternative to the existing approach of supplying ``kwargs``. This allows for more robust behavior of dimension names which conflict with other keyword names, or are not strings. By `Maximilian Roos `_. - :py:meth:`~DataArray.rename` now supports supplying ``**kwargs``, as an alternative to the existing approach of supplying a ``dict`` as the first argument. By `Maximilian Roos `_. - :py:meth:`~DataArray.cumsum` and :py:meth:`~DataArray.cumprod` now support aggregation over multiple dimensions at the same time. This is the default behavior when dimensions are not specified (previously this raised an error). By `Stephan Hoyer `_ - :py:meth:`DataArray.dot` and :py:func:`dot` are partly supported with older dask<0.17.4. (related to :issue:`2203`) By `Keisuke Fujii `_. - Xarray now uses `Versioneer `__ to manage its version strings. (:issue:`1300`). By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - Fixed a regression in 0.10.4, where explicitly specifying ``dtype='S1'`` or ``dtype=str`` in ``encoding`` with ``to_netcdf()`` raised an error (:issue:`2149`). `Stephan Hoyer `_ - :py:func:`apply_ufunc` now directly validates output variables (:issue:`1931`). By `Stephan Hoyer `_. - Fixed a bug where ``to_netcdf(..., unlimited_dims='bar')`` yielded NetCDF files with spurious 0-length dimensions (i.e. ``b``, ``a``, and ``r``) (:issue:`2134`). By `Joe Hamman `_. - Removed spurious warnings with ``Dataset.update(Dataset)`` (:issue:`2161`) and ``array.equals(array)`` when ``array`` contains ``NaT`` (:issue:`2162`). By `Stephan Hoyer `_. - Aggregations with :py:meth:`Dataset.reduce` (including ``mean``, ``sum``, etc) no longer drop unrelated coordinates (:issue:`1470`). Also fixed a bug where non-scalar data-variables that did not include the aggregation dimension were improperly skipped. By `Stephan Hoyer `_ - Fix :meth:`~DataArray.stack` with non-unique coordinates on pandas 0.23 (:issue:`2160`). By `Stephan Hoyer `_ - Selecting data indexed by a length-1 ``CFTimeIndex`` with a slice of strings now behaves as it does when using a length-1 ``DatetimeIndex`` (i.e. it no longer falsely returns an empty array when the slice includes the value in the index) (:issue:`2165`). By `Spencer Clark `_. - Fix ``DataArray.groupby().reduce()`` mutating coordinates on the input array when grouping over dimension coordinates with duplicated entries (:issue:`2153`). By `Stephan Hoyer `_ - Fix ``Dataset.to_netcdf()`` cannot create group with ``engine="h5netcdf"`` (:issue:`2177`). By `Stephan Hoyer `_ .. _whats-new.0.10.4: v0.10.4 (16 May 2018) ---------------------- The minor release includes a number of bug-fixes and backwards compatible enhancements. A highlight is ``CFTimeIndex``, which offers support for non-standard calendars used in climate modeling. Documentation ~~~~~~~~~~~~~ - New FAQ entry, :ref:`ecosystem`. By `Deepak Cherian `_. - :ref:`assigning_values` now includes examples on how to select and assign values to a :py:class:`~xarray.DataArray` with ``.loc``. By `Chiara Lepore `_. Enhancements ~~~~~~~~~~~~ - Add an option for using a ``CFTimeIndex`` for indexing times with non-standard calendars and/or outside the Timestamp-valid range; this index enables a subset of the functionality of a standard ``pandas.DatetimeIndex``. See :ref:`CFTimeIndex` for full details. (:issue:`789`, :issue:`1084`, :issue:`1252`) By `Spencer Clark `_ with help from `Stephan Hoyer `_. - Allow for serialization of ``cftime.datetime`` objects (:issue:`789`, :issue:`1084`, :issue:`2008`, :issue:`1252`) using the standalone ``cftime`` library. By `Spencer Clark `_. - Support writing lists of strings as netCDF attributes (:issue:`2044`). By `Dan Nowacki `_. - :py:meth:`~xarray.Dataset.to_netcdf` with ``engine='h5netcdf'`` now accepts h5py encoding settings ``compression`` and ``compression_opts``, along with the NetCDF4-Python style settings ``gzip=True`` and ``complevel``. This allows using any compression plugin installed in hdf5, e.g. LZF (:issue:`1536`). By `Guido Imperiale `_. - :py:meth:`~xarray.dot` on dask-backed data will now call :func:`dask.array.einsum`. This greatly boosts speed and allows chunking on the core dims. The function now requires dask >= 0.17.3 to work on dask-backed data (:issue:`2074`). By `Guido Imperiale `_. - ``plot.line()`` learned new kwargs: ``xincrease``, ``yincrease`` that change the direction of the respective axes. By `Deepak Cherian `_. - Added the ``parallel`` option to :py:func:`open_mfdataset`. This option uses ``dask.delayed`` to parallelize the open and preprocessing steps within ``open_mfdataset``. This is expected to provide performance improvements when opening many files, particularly when used in conjunction with dask's multiprocessing or distributed schedulers (:issue:`1981`). By `Joe Hamman `_. - New ``compute`` option in :py:meth:`~xarray.Dataset.to_netcdf`, :py:meth:`~xarray.Dataset.to_zarr`, and :py:func:`~xarray.save_mfdataset` to allow for the lazy computation of netCDF and zarr stores. This feature is currently only supported by the netCDF4 and zarr backends. (:issue:`1784`). By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - ``ValueError`` is raised when coordinates with the wrong size are assigned to a :py:class:`DataArray`. (:issue:`2112`) By `Keisuke Fujii `_. - Fixed a bug in :py:meth:`~xarray.DataArray.rolling` with bottleneck. Also, fixed a bug in rolling an integer dask array. (:issue:`2113`) By `Keisuke Fujii `_. - Fixed a bug where ``keep_attrs=True`` flag was neglected if :py:func:`apply_ufunc` was used with :py:class:`Variable`. (:issue:`2114`) By `Keisuke Fujii `_. - When assigning a :py:class:`DataArray` to :py:class:`Dataset`, any conflicted non-dimensional coordinates of the DataArray are now dropped. (:issue:`2068`) By `Keisuke Fujii `_. - Better error handling in ``open_mfdataset`` (:issue:`2077`). By `Stephan Hoyer `_. - ``plot.line()`` does not call ``autofmt_xdate()`` anymore. Instead it changes the rotation and horizontal alignment of labels without removing the x-axes of any other subplots in the figure (if any). By `Deepak Cherian `_. - Colorbar limits are now determined by excluding ยฑInfs too. By `Deepak Cherian `_. By `Joe Hamman `_. - Fixed ``to_iris`` to maintain lazy dask array after conversion (:issue:`2046`). By `Alex Hilson `_ and `Stephan Hoyer `_. .. _whats-new.0.10.3: v0.10.3 (13 April 2018) ------------------------ The minor release includes a number of bug-fixes and backwards compatible enhancements. Enhancements ~~~~~~~~~~~~ - :py:meth:`~xarray.DataArray.isin` and :py:meth:`~xarray.Dataset.isin` methods, which test each value in the array for whether it is contained in the supplied list, returning a bool array. See :ref:`selecting values with isin` for full details. Similar to the ``np.isin`` function. By `Maximilian Roos `_. - Some speed improvement to construct :py:class:`~xarray.computation.rolling.DataArrayRolling` object (:issue:`1993`) By `Keisuke Fujii `_. - Handle variables with different values for ``missing_value`` and ``_FillValue`` by masking values for both attributes; previously this resulted in a ``ValueError``. (:issue:`2016`) By `Ryan May `_. Bug fixes ~~~~~~~~~ - Fixed ``decode_cf`` function to operate lazily on dask arrays (:issue:`1372`). By `Ryan Abernathey `_. - Fixed labeled indexing with slice bounds given by xarray objects with datetime64 or timedelta64 dtypes (:issue:`1240`). By `Stephan Hoyer `_. - Attempting to convert an xarray.Dataset into a numpy array now raises an informative error message. By `Stephan Hoyer `_. - Fixed a bug in decode_cf_datetime where ``int32`` arrays weren't parsed correctly (:issue:`2002`). By `Fabien Maussion `_. - When calling ``xr.auto_combine()`` or ``xr.open_mfdataset()`` with a ``concat_dim``, the resulting dataset will have that one-element dimension (it was silently dropped, previously) (:issue:`1988`). By `Ben Root `_. .. _whats-new.0.10.2: v0.10.2 (13 March 2018) ----------------------- The minor release includes a number of bug-fixes and enhancements, along with one possibly **backwards incompatible change**. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The addition of ``__array_ufunc__`` for xarray objects (see below) means that NumPy `ufunc methods`_ (e.g., ``np.add.reduce``) that previously worked on ``xarray.DataArray`` objects by converting them into NumPy arrays will now raise ``NotImplementedError`` instead. In all cases, the work-around is simple: convert your objects explicitly into NumPy arrays before calling the ufunc (e.g., with ``.values``). .. _ufunc methods: https://numpy.org/doc/stable/reference/ufuncs.html#methods Enhancements ~~~~~~~~~~~~ - Added :py:func:`~xarray.dot`, equivalent to :py:func:`numpy.einsum`. Also, :py:func:`~xarray.DataArray.dot` now supports ``dims`` option, which specifies the dimensions to sum over. (:issue:`1951`) By `Keisuke Fujii `_. - Support for writing xarray datasets to netCDF files (netcdf4 backend only) when using the `dask.distributed `_ scheduler (:issue:`1464`). By `Joe Hamman `_. - Support lazy vectorized-indexing. After this change, flexible indexing such as orthogonal/vectorized indexing, becomes possible for all the backend arrays. Also, lazy ``transpose`` is now also supported. (:issue:`1897`) By `Keisuke Fujii `_. - Implemented NumPy's ``__array_ufunc__`` protocol for all xarray objects (:issue:`1617`). This enables using NumPy ufuncs directly on ``xarray.Dataset`` objects with recent versions of NumPy (v1.13 and newer): .. code:: python ds = xr.Dataset({"a": 1}) np.sin(ds) This obliviates the need for the ``xarray.ufuncs`` module, which will be deprecated in the future when xarray drops support for older versions of NumPy. By `Stephan Hoyer `_. - Improve :py:func:`~xarray.DataArray.rolling` logic. :py:func:`~xarray.computation.rolling.DataArrayRolling` object now supports :py:func:`~xarray.computation.rolling.DataArrayRolling.construct` method that returns a view of the DataArray / Dataset object with the rolling-window dimension added to the last axis. This enables more flexible operation, such as strided rolling, windowed rolling, ND-rolling, short-time FFT and convolution. (:issue:`1831`, :issue:`1142`, :issue:`819`) By `Keisuke Fujii `_. - :py:func:`~plot.line()` learned to make plots with data on x-axis if so specified. (:issue:`575`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Raise an informative error message when using ``apply_ufunc`` with numpy v1.11 (:issue:`1956`). By `Stephan Hoyer `_. - Fix the precision drop after indexing datetime64 arrays (:issue:`1932`). By `Keisuke Fujii `_. - Silenced irrelevant warnings issued by ``open_rasterio`` (:issue:`1964`). By `Stephan Hoyer `_. - Fix kwarg ``colors`` clashing with auto-inferred ``cmap`` (:issue:`1461`) By `Deepak Cherian `_. - Fix :py:func:`~xarray.plot.imshow` error when passed an RGB array with size one in a spatial dimension. By `Zac Hatfield-Dodds `_. .. _whats-new.0.10.1: v0.10.1 (25 February 2018) -------------------------- The minor release includes a number of bug-fixes and backwards compatible enhancements. Documentation ~~~~~~~~~~~~~ - Added a new guide on :ref:`contributing` (:issue:`640`) By `Joe Hamman `_. - Added apply_ufunc example to :ref:`/examples/weather-data.ipynb#Toy-weather-data` (:issue:`1844`). By `Liam Brannigan `_. - New entry ``Why donโ€™t aggregations return Python scalars?`` in the :ref:`faq` (:issue:`1726`). By `0x0L `_. Enhancements ~~~~~~~~~~~~ **New functions and methods**: - Added :py:meth:`DataArray.to_iris` and :py:meth:`DataArray.from_iris` for converting data arrays to and from Iris_ Cubes with the same data and coordinates (:issue:`621` and :issue:`37`). By `Neil Parley `_ and `Duncan Watson-Parris `_. - Experimental support for using `Zarr`_ as storage layer for xarray (:issue:`1223`). By `Ryan Abernathey `_ and `Joe Hamman `_. - New :py:meth:`~xarray.DataArray.rank` on arrays and datasets. Requires bottleneck (:issue:`1731`). By `0x0L `_. - ``.dt`` accessor can now ceil, floor and round timestamps to specified frequency. By `Deepak Cherian `_. **Plotting enhancements**: - :func:`xarray.plot.imshow` now handles RGB and RGBA images. Saturation can be adjusted with ``vmin`` and ``vmax``, or with ``robust=True``. By `Zac Hatfield-Dodds `_. - :py:func:`~plot.contourf()` learned to contour 2D variables that have both a 1D coordinate (e.g. time) and a 2D coordinate (e.g. depth as a function of time) (:issue:`1737`). By `Deepak Cherian `_. - :py:func:`~plot.plot()` rotates x-axis ticks if x-axis is time. By `Deepak Cherian `_. - :py:func:`~plot.line()` can draw multiple lines if provided with a 2D variable. By `Deepak Cherian `_. **Other enhancements**: - Reduce methods such as :py:func:`DataArray.sum()` now handles object-type array. .. code:: python da = xr.DataArray(np.array([True, False, np.nan], dtype=object), dims="x") da.sum() (:issue:`1866`) By `Keisuke Fujii `_. - Reduce methods such as :py:func:`DataArray.sum()` now accepts ``dtype`` arguments. (:issue:`1838`) By `Keisuke Fujii `_. - Added nodatavals attribute to DataArray when using :py:func:`~xarray.open_rasterio`. (:issue:`1736`). By `Alan Snow `_. - Use ``pandas.Grouper`` class in xarray resample methods rather than the deprecated ``pandas.TimeGrouper`` class (:issue:`1766`). By `Joe Hamman `_. - Experimental support for parsing ENVI metadata to coordinates and attributes in :py:func:`xarray.open_rasterio`. By `Matti Eskelinen `_. - Reduce memory usage when decoding a variable with a scale_factor, by converting 8-bit and 16-bit integers to float32 instead of float64 (:pull:`1840`), and keeping float16 and float32 as float32 (:issue:`1842`). Correspondingly, encoded variables may also be saved with a smaller dtype. By `Zac Hatfield-Dodds `_. - Speed of reindexing/alignment with dask array is orders of magnitude faster when inserting missing values (:issue:`1847`). By `Stephan Hoyer `_. - Fix ``axis`` keyword ignored when applying ``np.squeeze`` to ``DataArray`` (:issue:`1487`). By `Florian Pinault `_. - ``netcdf4-python`` has moved the its time handling in the ``netcdftime`` module to a standalone package (`netcdftime`_). As such, xarray now considers `netcdftime`_ an optional dependency. One benefit of this change is that it allows for encoding/decoding of datetimes with non-standard calendars without the ``netcdf4-python`` dependency (:issue:`1084`). By `Joe Hamman `_. .. _Zarr: http://zarr.readthedocs.io/ .. _Iris: http://scitools-iris.readthedocs.io .. _netcdftime: https://unidata.github.io/netcdftime **New functions/methods** - New :py:meth:`~xarray.DataArray.rank` on arrays and datasets. Requires bottleneck (:issue:`1731`). By `0x0L `_. Bug fixes ~~~~~~~~~ - Rolling aggregation with ``center=True`` option now gives the same result with pandas including the last element (:issue:`1046`). By `Keisuke Fujii `_. - Support indexing with a 0d-np.ndarray (:issue:`1921`). By `Keisuke Fujii `_. - Added warning in api.py of a netCDF4 bug that occurs when the filepath has 88 characters (:issue:`1745`). By `Liam Brannigan `_. - Fixed encoding of multi-dimensional coordinates in :py:meth:`~Dataset.to_netcdf` (:issue:`1763`). By `Mike Neish `_. - Fixed chunking with non-file-based rasterio datasets (:issue:`1816`) and refactored rasterio test suite. By `Ryan Abernathey `_ - Bug fix in open_dataset(engine='pydap') (:issue:`1775`) By `Keisuke Fujii `_. - Bug fix in vectorized assignment (:issue:`1743`, :issue:`1744`). Now item assignment to :py:meth:`~DataArray.__setitem__` checks - Bug fix in vectorized assignment (:issue:`1743`, :issue:`1744`). Now item assignment to :py:meth:`DataArray.__setitem__` checks coordinates of target, destination and keys. If there are any conflict among these coordinates, ``IndexError`` will be raised. By `Keisuke Fujii `_. - Properly point ``DataArray.__dask_scheduler__`` to ``dask.threaded.get``. By `Matthew Rocklin `_. - Bug fixes in :py:meth:`DataArray.plot.imshow`: all-NaN arrays and arrays with size one in some dimension can now be plotted, which is good for exploring satellite imagery (:issue:`1780`). By `Zac Hatfield-Dodds `_. - Fixed ``UnboundLocalError`` when opening netCDF file (:issue:`1781`). By `Stephan Hoyer `_. - The ``variables``, ``attrs``, and ``dimensions`` properties have been deprecated as part of a bug fix addressing an issue where backends were unintentionally loading the datastores data and attributes repeatedly during writes (:issue:`1798`). By `Joe Hamman `_. - Compatibility fixes to plotting module for NumPy 1.14 and pandas 0.22 (:issue:`1813`). By `Joe Hamman `_. - Bug fix in encoding coordinates with ``{'_FillValue': None}`` in netCDF metadata (:issue:`1865`). By `Chris Roth `_. - Fix indexing with lists for arrays loaded from netCDF files with ``engine='h5netcdf`` (:issue:`1864`). By `Stephan Hoyer `_. - Corrected a bug with incorrect coordinates for non-georeferenced geotiff files (:issue:`1686`). Internally, we now use the rasterio coordinate transform tool instead of doing the computations ourselves. A ``parse_coordinates`` kwarg has been added to :py:func:`~open_rasterio` (set to ``True`` per default). By `Fabien Maussion `_. - The colors of discrete colormaps are now the same regardless if ``seaborn`` is installed or not (:issue:`1896`). By `Fabien Maussion `_. - Fixed dtype promotion rules in :py:func:`where` and :py:func:`concat` to match pandas (:issue:`1847`). A combination of strings/numbers or unicode/bytes now promote to object dtype, instead of strings or unicode. By `Stephan Hoyer `_. - Fixed bug where :py:meth:`~xarray.DataArray.isnull` was loading data stored as dask arrays (:issue:`1937`). By `Joe Hamman `_. .. _whats-new.0.10.0: v0.10.0 (20 November 2017) -------------------------- This is a major release that includes bug fixes, new features and a few backwards incompatible changes. Highlights include: - Indexing now supports broadcasting over dimensions, similar to NumPy's vectorized indexing (but better!). - :py:meth:`~DataArray.resample` has a new groupby-like API like pandas. - :py:func:`~xarray.apply_ufunc` facilitates wrapping and parallelizing functions written for NumPy arrays. - Performance improvements, particularly for dask and :py:func:`open_mfdataset`. Breaking changes ~~~~~~~~~~~~~~~~ - xarray now supports a form of vectorized indexing with broadcasting, where the result of indexing depends on dimensions of indexers, e.g., ``array.sel(x=ind)`` with ``ind.dims == ('y',)``. Alignment between coordinates on indexed and indexing objects is also now enforced. Due to these changes, existing uses of xarray objects to index other xarray objects will break in some cases. The new indexing API is much more powerful, supporting outer, diagonal and vectorized indexing in a single interface. The ``isel_points`` and ``sel_points`` methods are deprecated, since they are now redundant with the ``isel`` / ``sel`` methods. See :ref:`vectorized_indexing` for the details (:issue:`1444`, :issue:`1436`). By `Keisuke Fujii `_ and `Stephan Hoyer `_. - A new resampling interface to match pandas' groupby-like API was added to :py:meth:`Dataset.resample` and :py:meth:`DataArray.resample` (:issue:`1272`). :ref:`Timeseries resampling ` is fully supported for data with arbitrary dimensions as is both downsampling and upsampling (including linear, quadratic, cubic, and spline interpolation). Old syntax: .. jupyter-input:: ds.resample("24H", dim="time", how="max") New syntax: .. jupyter-input:: ds.resample(time="24H").max() Note that both versions are currently supported, but using the old syntax will produce a warning encouraging users to adopt the new syntax. By `Daniel Rothenberg `_. - Calling ``repr()`` or printing xarray objects at the command line or in a Jupyter Notebook will not longer automatically compute dask variables or load data on arrays lazily loaded from disk (:issue:`1522`). By `Guido Imperiale `_. - Supplying ``coords`` as a dictionary to the ``DataArray`` constructor without also supplying an explicit ``dims`` argument is no longer supported. This behavior was deprecated in version 0.9 but will now raise an error (:issue:`727`). - Several existing features have been deprecated and will change to new behavior in xarray v0.11. If you use any of them with xarray v0.10, you should see a ``FutureWarning`` that describes how to update your code: - ``Dataset.T`` has been deprecated an alias for ``Dataset.transpose()`` (:issue:`1232`). In the next major version of xarray, it will provide short- cut lookup for variables or attributes with name ``'T'``. - ``DataArray.__contains__`` (e.g., ``key in data_array``) currently checks for membership in ``DataArray.coords``. In the next major version of xarray, it will check membership in the array data found in ``DataArray.values`` instead (:issue:`1267`). - Direct iteration over and counting a ``Dataset`` (e.g., ``[k for k in ds]``, ``ds.keys()``, ``ds.values()``, ``len(ds)`` and ``if ds``) currently includes all variables, both data and coordinates. For improved usability and consistency with pandas, in the next major version of xarray these will change to only include data variables (:issue:`884`). Use ``ds.variables``, ``ds.data_vars`` or ``ds.coords`` as alternatives. - Changes to minimum versions of dependencies: - Old numpy < 1.11 and pandas < 0.18 are no longer supported (:issue:`1512`). By `Keisuke Fujii `_. - The minimum supported version bottleneck has increased to 1.1 (:issue:`1279`). By `Joe Hamman `_. Enhancements ~~~~~~~~~~~~ **New functions/methods** - New helper function :py:func:`~xarray.apply_ufunc` for wrapping functions written to work on NumPy arrays to support labels on xarray objects (:issue:`770`). ``apply_ufunc`` also support automatic parallelization for many functions with dask. See :ref:`compute.wrapping-custom` and :ref:`dask.automatic-parallelization` for details. By `Stephan Hoyer `_. - Added new method :py:meth:`Dataset.to_dask_dataframe`, convert a dataset into a dask dataframe. This allows lazy loading of data from a dataset containing dask arrays (:issue:`1462`). By `James Munroe `_. - New function :py:func:`~xarray.where` for conditionally switching between values in xarray objects, like :py:func:`numpy.where`: .. jupyter-input:: import xarray as xr arr = xr.DataArray([[1, 2, 3], [4, 5, 6]], dims=("x", "y")) xr.where(arr % 2, "even", "odd") .. jupyter-output:: array([['even', 'odd', 'even'], ['odd', 'even', 'odd']], dtype='`_. - Added :py:func:`~xarray.show_versions` function to aid in debugging (:issue:`1485`). By `Joe Hamman `_. **Performance improvements** - :py:func:`~xarray.concat` was computing variables that aren't in memory (e.g. dask-based) multiple times; :py:func:`~xarray.open_mfdataset` was loading them multiple times from disk. Now, both functions will instead load them at most once and, if they do, store them in memory in the concatenated array/dataset (:issue:`1521`). By `Guido Imperiale `_. - Speed-up (x 100) of ``xarray.conventions.decode_cf_datetime``. By `Christian Chwala `_. **IO related improvements** - Unicode strings (``str`` on Python 3) are now round-tripped successfully even when written as character arrays (e.g., as netCDF3 files or when using ``engine='scipy'``) (:issue:`1638`). This is controlled by the ``_Encoding`` attribute convention, which is also understood directly by the netCDF4-Python interface. See :ref:`io.string-encoding` for full details. By `Stephan Hoyer `_. - Support for ``data_vars`` and ``coords`` keywords from :py:func:`~xarray.concat` added to :py:func:`~xarray.open_mfdataset` (:issue:`438`). Using these keyword arguments can significantly reduce memory usage and increase speed. By `Oleksandr Huziy `_. - Support for :py:class:`pathlib.Path` objects added to :py:func:`~xarray.open_dataset`, :py:func:`~xarray.open_mfdataset`, ``xarray.to_netcdf``, and :py:func:`~xarray.save_mfdataset` (:issue:`799`): .. jupyter-input:: from pathlib import Path # In Python 2, use pathlib2! data_dir = Path("data/") one_file = data_dir / "dta_for_month_01.nc" xr.open_dataset(one_file) By `Willi Rath `_. - You can now explicitly disable any default ``_FillValue`` (``NaN`` for floating point values) by passing the encoding ``{'_FillValue': None}`` (:issue:`1598`). By `Stephan Hoyer `_. - More attributes available in :py:attr:`~xarray.Dataset.attrs` dictionary when raster files are opened with :py:func:`~xarray.open_rasterio`. By `Greg Brener `_. - Support for NetCDF files using an ``_Unsigned`` attribute to indicate that a a signed integer data type should be interpreted as unsigned bytes (:issue:`1444`). By `Eric Bruning `_. - Support using an existing, opened netCDF4 ``Dataset`` with :py:class:`~xarray.backends.NetCDF4DataStore`. This permits creating an :py:class:`~xarray.Dataset` from a netCDF4 ``Dataset`` that has been opened using other means (:issue:`1459`). By `Ryan May `_. - Changed :py:class:`~xarray.backends.PydapDataStore` to take a Pydap dataset. This permits opening Opendap datasets that require authentication, by instantiating a Pydap dataset with a session object. Also added :py:meth:`xarray.backends.PydapDataStore.open` which takes a url and session object (:issue:`1068`). By `Philip Graae `_. - Support reading and writing unlimited dimensions with h5netcdf (:issue:`1636`). By `Joe Hamman `_. **Other improvements** - Added ``_ipython_key_completions_`` to xarray objects, to enable autocompletion for dictionary-like access in IPython, e.g., ``ds['tem`` + tab -> ``ds['temperature']`` (:issue:`1628`). By `Keisuke Fujii `_. - Support passing keyword arguments to ``load``, ``compute``, and ``persist`` methods. Any keyword arguments supplied to these methods are passed on to the corresponding dask function (:issue:`1523`). By `Joe Hamman `_. - Encoding attributes are now preserved when xarray objects are concatenated. The encoding is copied from the first object (:issue:`1297`). By `Joe Hamman `_ and `Gerrit Holl `_. - Support applying rolling window operations using bottleneck's moving window functions on data stored as dask arrays (:issue:`1279`). By `Joe Hamman `_. - Experimental support for the Dask collection interface (:issue:`1674`). By `Matthew Rocklin `_. Bug fixes ~~~~~~~~~ - Suppress ``RuntimeWarning`` issued by ``numpy`` for "invalid value comparisons" (e.g. ``NaN``). Xarray now behaves similarly to pandas in its treatment of binary and unary operations on objects with NaNs (:issue:`1657`). By `Joe Hamman `_. - Unsigned int support for reduce methods with ``skipna=True`` (:issue:`1562`). By `Keisuke Fujii `_. - Fixes to ensure xarray works properly with pandas 0.21: - Fix :py:meth:`~xarray.DataArray.isnull` method (:issue:`1549`). - :py:meth:`~xarray.DataArray.to_series` and :py:meth:`~xarray.Dataset.to_dataframe` should not return a ``pandas.MultiIndex`` for 1D data (:issue:`1548`). - Fix plotting with datetime64 axis labels (:issue:`1661`). By `Stephan Hoyer `_. - :py:func:`~xarray.open_rasterio` method now shifts the rasterio coordinates so that they are centered in each pixel (:issue:`1468`). By `Greg Brener `_. - :py:meth:`~xarray.Dataset.rename` method now doesn't throw errors if some ``Variable`` is renamed to the same name as another ``Variable`` as long as that other ``Variable`` is also renamed (:issue:`1477`). This method now does throw when two ``Variables`` would end up with the same name after the rename (since one of them would get overwritten in this case). By `Prakhar Goel `_. - Fix :py:func:`xarray.testing.assert_allclose` to actually use ``atol`` and ``rtol`` arguments when called on ``DataArray`` objects (:issue:`1488`). By `Stephan Hoyer `_. - xarray ``quantile`` methods now properly raise a ``TypeError`` when applied to objects with data stored as ``dask`` arrays (:issue:`1529`). By `Joe Hamman `_. - Fix positional indexing to allow the use of unsigned integers (:issue:`1405`). By `Joe Hamman `_ and `Gerrit Holl `_. - Creating a :py:class:`Dataset` now raises ``MergeError`` if a coordinate shares a name with a dimension but is comprised of arbitrary dimensions (:issue:`1120`). By `Joe Hamman `_. - :py:func:`~xarray.open_rasterio` method now skips rasterio's ``crs`` attribute if its value is ``None`` (:issue:`1520`). By `Leevi Annala `_. - Fix :py:func:`xarray.DataArray.to_netcdf` to return bytes when no path is provided (:issue:`1410`). By `Joe Hamman `_. - Fix :py:func:`xarray.save_mfdataset` to properly raise an informative error when objects other than ``Dataset`` are provided (:issue:`1555`). By `Joe Hamman `_. - :py:func:`xarray.Dataset.copy` would not preserve the encoding property (:issue:`1586`). By `Guido Imperiale `_. - :py:func:`xarray.concat` would eagerly load dask variables into memory if the first argument was a numpy variable (:issue:`1588`). By `Guido Imperiale `_. - Fix bug in :py:meth:`~xarray.Dataset.to_netcdf` when writing in append mode (:issue:`1215`). By `Joe Hamman `_. - Fix ``netCDF4`` backend to properly roundtrip the ``shuffle`` encoding option (:issue:`1606`). By `Joe Hamman `_. - Fix bug when using ``pytest`` class decorators to skipping certain unittests. The previous behavior unintentionally causing additional tests to be skipped (:issue:`1531`). By `Joe Hamman `_. - Fix pynio backend for upcoming release of pynio with Python 3 support (:issue:`1611`). By `Ben Hillman `_. - Fix ``seaborn`` import warning for Seaborn versions 0.8 and newer when the ``apionly`` module was deprecated. (:issue:`1633`). By `Joe Hamman `_. - Fix COMPAT: MultiIndex checking is fragile (:issue:`1833`). By `Florian Pinault `_. - Fix ``rasterio`` backend for Rasterio versions 1.0alpha10 and newer. (:issue:`1641`). By `Chris Holden `_. Bug fixes after rc1 ~~~~~~~~~~~~~~~~~~~ - Suppress warning in IPython autocompletion, related to the deprecation of ``.T`` attributes (:issue:`1675`). By `Keisuke Fujii `_. - Fix a bug in lazily-indexing netCDF array. (:issue:`1688`) By `Keisuke Fujii `_. - (Internal bug) MemoryCachedArray now supports the orthogonal indexing. Also made some internal cleanups around array wrappers (:issue:`1429`). By `Keisuke Fujii `_. - (Internal bug) MemoryCachedArray now always wraps ``np.ndarray`` by ``NumpyIndexingAdapter``. (:issue:`1694`) By `Keisuke Fujii `_. - Fix importing xarray when running Python with ``-OO`` (:issue:`1706`). By `Stephan Hoyer `_. - Saving a netCDF file with a coordinates with a spaces in its names now raises an appropriate warning (:issue:`1689`). By `Stephan Hoyer `_. - Fix two bugs that were preventing dask arrays from being specified as coordinates in the DataArray constructor (:issue:`1684`). By `Joe Hamman `_. - Fixed ``apply_ufunc`` with ``dask='parallelized'`` for scalar arguments (:issue:`1697`). By `Stephan Hoyer `_. - Fix "Chunksize cannot exceed dimension size" error when writing netCDF4 files loaded from disk (:issue:`1225`). By `Stephan Hoyer `_. - Validate the shape of coordinates with names matching dimensions in the DataArray constructor (:issue:`1709`). By `Stephan Hoyer `_. - Raise ``NotImplementedError`` when attempting to save a MultiIndex to a netCDF file (:issue:`1547`). By `Stephan Hoyer `_. - Remove netCDF dependency from rasterio backend tests. By `Matti Eskelinen `_ Bug fixes after rc2 ~~~~~~~~~~~~~~~~~~~ - Fixed unexpected behavior in ``Dataset.set_index()`` and ``DataArray.set_index()`` introduced by pandas 0.21.0. Setting a new index with a single variable resulted in 1-level ``pandas.MultiIndex`` instead of a simple ``pandas.Index`` (:issue:`1722`). By `Benoit Bovy `_. - Fixed unexpected memory loading of backend arrays after ``print``. (:issue:`1720`). By `Keisuke Fujii `_. .. _whats-new.0.9.6: v0.9.6 (8 June 2017) -------------------- This release includes a number of backwards compatible enhancements and bug fixes. Enhancements ~~~~~~~~~~~~ - New :py:meth:`~xarray.Dataset.sortby` method to ``Dataset`` and ``DataArray`` that enable sorting along dimensions (:issue:`967`). See :ref:`the docs ` for examples. By `Chun-Wei Yuan `_ and `Kyle Heuton `_. - Add ``.dt`` accessor to DataArrays for computing datetime-like properties for the values they contain, similar to ``pandas.Series`` (:issue:`358`). By `Daniel Rothenberg `_. - Renamed internal dask arrays created by ``open_dataset`` to match new dask conventions (:issue:`1343`). By `Ryan Abernathey `_. - :py:meth:`~xarray.as_variable` is now part of the public API (:issue:`1303`). By `Benoit Bovy `_. - :py:func:`~xarray.align` now supports ``join='exact'``, which raises an error instead of aligning when indexes to be aligned are not equal. By `Stephan Hoyer `_. - New function :py:func:`~xarray.open_rasterio` for opening raster files with the `rasterio `_ library. See :ref:`the docs ` for details. By `Joe Hamman `_, `Nic Wayand `_ and `Fabien Maussion `_ Bug fixes ~~~~~~~~~ - Fix error from repeated indexing of datasets loaded from disk (:issue:`1374`). By `Stephan Hoyer `_. - Fix a bug where ``.isel_points`` wrongly assigns unselected coordinate to ``data_vars``. By `Keisuke Fujii `_. - Tutorial datasets are now checked against a reference MD5 sum to confirm successful download (:issue:`1392`). By `Matthew Gidden `_. - ``DataArray.chunk()`` now accepts dask specific kwargs like ``Dataset.chunk()`` does. By `Fabien Maussion `_. - Support for ``engine='pydap'`` with recent releases of Pydap (3.2.2+), including on Python 3 (:issue:`1174`). Documentation ~~~~~~~~~~~~~ - A new `gallery `_ allows to add interactive examples to the documentation. By `Fabien Maussion `_. Testing ~~~~~~~ - Fix test suite failure caused by changes to ``pandas.cut`` function (:issue:`1386`). By `Ryan Abernathey `_. - Enhanced tests suite by use of ``@network`` decorator, which is controlled via ``--run-network-tests`` command line argument to ``py.test`` (:issue:`1393`). By `Matthew Gidden `_. .. _whats-new.0.9.5: v0.9.5 (17 April, 2017) ----------------------- Remove an inadvertently introduced print statement. .. _whats-new.0.9.3: v0.9.3 (16 April, 2017) ----------------------- This minor release includes bug-fixes and backwards compatible enhancements. Enhancements ~~~~~~~~~~~~ - New :py:meth:`~xarray.DataArray.persist` method to Datasets and DataArrays to enable persisting data in distributed memory when using Dask (:issue:`1344`). By `Matthew Rocklin `_. - New :py:meth:`~xarray.DataArray.expand_dims` method for ``DataArray`` and ``Dataset`` (:issue:`1326`). By `Keisuke Fujii `_. Bug fixes ~~~~~~~~~ - Fix ``.where()`` with ``drop=True`` when arguments do not have indexes (:issue:`1350`). This bug, introduced in v0.9, resulted in xarray producing incorrect results in some cases. By `Stephan Hoyer `_. - Fixed writing to file-like objects with :py:meth:`~xarray.Dataset.to_netcdf` (:issue:`1320`). `Stephan Hoyer `_. - Fixed explicitly setting ``engine='scipy'`` with ``to_netcdf`` when not providing a path (:issue:`1321`). `Stephan Hoyer `_. - Fixed open_dataarray does not pass properly its parameters to open_dataset (:issue:`1359`). `Stephan Hoyer `_. - Ensure test suite works when runs from an installed version of xarray (:issue:`1336`). Use ``@pytest.mark.slow`` instead of a custom flag to mark slow tests. By `Stephan Hoyer `_ .. _whats-new.0.9.2: v0.9.2 (2 April 2017) --------------------- The minor release includes bug-fixes and backwards compatible enhancements. Enhancements ~~~~~~~~~~~~ - ``rolling`` on Dataset is now supported (:issue:`859`). - ``.rolling()`` on Dataset is now supported (:issue:`859`). By `Keisuke Fujii `_. - When bottleneck version 1.1 or later is installed, use bottleneck for rolling ``var``, ``argmin``, ``argmax``, and ``rank`` computations. Also, rolling median now accepts a ``min_periods`` argument (:issue:`1276`). By `Joe Hamman `_. - When ``.plot()`` is called on a 2D DataArray and only one dimension is specified with ``x=`` or ``y=``, the other dimension is now guessed (:issue:`1291`). By `Vincent Noel `_. - Added new method :py:meth:`~Dataset.assign_attrs` to ``DataArray`` and ``Dataset``, a chained-method compatible implementation of the ``dict.update`` method on attrs (:issue:`1281`). By `Henry S. Harrison `_. - Added new ``autoclose=True`` argument to :py:func:`~xarray.open_mfdataset` to explicitly close opened files when not in use to prevent occurrence of an OS Error related to too many open files (:issue:`1198`). Note, the default is ``autoclose=False``, which is consistent with previous xarray behavior. By `Phillip J. Wolfram `_. - The ``repr()`` of ``Dataset`` and ``DataArray`` attributes uses a similar format to coordinates and variables, with vertically aligned entries truncated to fit on a single line (:issue:`1319`). Hopefully this will stop people writing ``data.attrs = {}`` and discarding metadata in notebooks for the sake of cleaner output. The full metadata is still available as ``data.attrs``. By `Zac Hatfield-Dodds `_. - Enhanced tests suite by use of ``@slow`` and ``@flaky`` decorators, which are controlled via ``--run-flaky`` and ``--skip-slow`` command line arguments to ``py.test`` (:issue:`1336`). By `Stephan Hoyer `_ and `Phillip J. Wolfram `_. - New aggregation on rolling objects :py:meth:`~computation.rolling.DataArrayRolling.count` which providing a rolling count of valid values (:issue:`1138`). Bug fixes ~~~~~~~~~ - Rolling operations now keep preserve original dimension order (:issue:`1125`). By `Keisuke Fujii `_. - Fixed ``sel`` with ``method='nearest'`` on Python 2.7 and 64-bit Windows (:issue:`1140`). `Stephan Hoyer `_. - Fixed ``where`` with ``drop='True'`` for empty masks (:issue:`1341`). By `Stephan Hoyer `_ and `Phillip J. Wolfram `_. .. _whats-new.0.9.1: v0.9.1 (30 January 2017) ------------------------ Renamed the "Unindexed dimensions" section in the ``Dataset`` and ``DataArray`` repr (added in v0.9.0) to "Dimensions without coordinates" (:issue:`1199`). .. _whats-new.0.9.0: v0.9.0 (25 January 2017) ------------------------ This major release includes five months worth of enhancements and bug fixes from 24 contributors, including some significant changes that are not fully backwards compatible. Highlights include: - Coordinates are now *optional* in the xarray data model, even for dimensions. - Changes to caching, lazy loading and pickling to improve xarray's experience for parallel computing. - Improvements for accessing and manipulating ``pandas.MultiIndex`` levels. - Many new methods and functions, including :py:meth:`~DataArray.quantile`, :py:meth:`~DataArray.cumsum`, :py:meth:`~DataArray.cumprod` :py:attr:`~DataArray.combine_first` :py:meth:`~DataArray.set_index`, :py:meth:`~DataArray.reset_index`, :py:meth:`~DataArray.reorder_levels`, :py:func:`~xarray.full_like`, :py:func:`~xarray.zeros_like`, :py:func:`~xarray.ones_like` :py:func:`~xarray.open_dataarray`, :py:meth:`~DataArray.compute`, :py:meth:`Dataset.info`, :py:func:`testing.assert_equal`, :py:func:`testing.assert_identical`, and :py:func:`testing.assert_allclose`. Breaking changes ~~~~~~~~~~~~~~~~ - Index coordinates for each dimensions are now optional, and no longer created by default :issue:`1017`. You can identify such dimensions without coordinates by their appearance in list of "Dimensions without coordinates" in the ``Dataset`` or ``DataArray`` repr: .. jupyter-input:: xr.Dataset({"foo": (("x", "y"), [[1, 2]])}) .. jupyter-output:: Dimensions: (x: 1, y: 2) Dimensions without coordinates: x, y Data variables: foo (x, y) int64 1 2 This has a number of implications: - :py:func:`~align` and :py:meth:`~Dataset.reindex` can now error, if dimensions labels are missing and dimensions have different sizes. - Because pandas does not support missing indexes, methods such as ``to_dataframe``/``from_dataframe`` and ``stack``/``unstack`` no longer roundtrip faithfully on all inputs. Use :py:meth:`~Dataset.reset_index` to remove undesired indexes. - ``Dataset.__delitem__`` and :py:meth:`~Dataset.drop` no longer delete/drop variables that have dimensions matching a deleted/dropped variable. - ``DataArray.coords.__delitem__`` is now allowed on variables matching dimension names. - ``.sel`` and ``.loc`` now handle indexing along a dimension without coordinate labels by doing integer based indexing. See :ref:`indexing.missing_coordinates` for an example. - :py:attr:`~Dataset.indexes` is no longer guaranteed to include all dimensions names as keys. The new method :py:meth:`~Dataset.get_index` has been added to get an index for a dimension guaranteed, falling back to produce a default ``RangeIndex`` if necessary. - The default behavior of ``merge`` is now ``compat='no_conflicts'``, so some merges will now succeed in cases that previously raised ``xarray.MergeError``. Set ``compat='broadcast_equals'`` to restore the previous default. See :ref:`combining.no_conflicts` for more details. - Reading :py:attr:`~DataArray.values` no longer always caches values in a NumPy array :issue:`1128`. Caching of ``.values`` on variables read from netCDF files on disk is still the default when :py:func:`open_dataset` is called with ``cache=True``. By `Guido Imperiale `_ and `Stephan Hoyer `_. - Pickling a ``Dataset`` or ``DataArray`` linked to a file on disk no longer caches its values into memory before pickling (:issue:`1128`). Instead, pickle stores file paths and restores objects by reopening file references. This enables preliminary, experimental use of xarray for opening files with `dask.distributed `_. By `Stephan Hoyer `_. - Coordinates used to index a dimension are now loaded eagerly into :py:class:`pandas.Index` objects, instead of loading the values lazily. By `Guido Imperiale `_. - Automatic levels for 2d plots are now guaranteed to land on ``vmin`` and ``vmax`` when these kwargs are explicitly provided (:issue:`1191`). The automated level selection logic also slightly changed. By `Fabien Maussion `_. - ``DataArray.rename()`` behavior changed to strictly change the ``DataArray.name`` if called with string argument, or strictly change coordinate names if called with dict-like argument. By `Markus Gonser `_. - By default ``to_netcdf()`` add a ``_FillValue = NaN`` attributes to float types. By `Frederic Laliberte `_. - ``repr`` on ``DataArray`` objects uses an shortened display for NumPy array data that is less likely to overflow onto multiple pages (:issue:`1207`). By `Stephan Hoyer `_. - xarray no longer supports python 3.3, versions of dask prior to v0.9.0, or versions of bottleneck prior to v1.0. Deprecations ~~~~~~~~~~~~ - Renamed the ``Coordinate`` class from xarray's low level API to :py:class:`~xarray.IndexVariable`. ``Variable.to_variable`` and ``Variable.to_coord`` have been renamed to :py:meth:`~xarray.Variable.to_base_variable` and :py:meth:`~xarray.Variable.to_index_variable`. - Deprecated supplying ``coords`` as a dictionary to the ``DataArray`` constructor without also supplying an explicit ``dims`` argument. The old behavior encouraged relying on the iteration order of dictionaries, which is a bad practice (:issue:`727`). - Removed a number of methods deprecated since v0.7.0 or earlier: ``load_data``, ``vars``, ``drop_vars``, ``dump``, ``dumps`` and the ``variables`` keyword argument to ``Dataset``. - Removed the dummy module that enabled ``import xray``. Enhancements ~~~~~~~~~~~~ - Added new method :py:meth:`~DataArray.combine_first` to ``DataArray`` and ``Dataset``, based on the pandas method of the same name (see :ref:`combine`). By `Chun-Wei Yuan `_. - Added the ability to change default automatic alignment (arithmetic_join="inner") for binary operations via :py:func:`~xarray.set_options()` (see :ref:`math automatic alignment`). By `Chun-Wei Yuan `_. - Add checking of ``attr`` names and values when saving to netCDF, raising useful error messages if they are invalid. (:issue:`911`). By `Robin Wilson `_. - Added ability to save ``DataArray`` objects directly to netCDF files using :py:meth:`~xarray.DataArray.to_netcdf`, and to load directly from netCDF files using :py:func:`~xarray.open_dataarray` (:issue:`915`). These remove the need to convert a ``DataArray`` to a ``Dataset`` before saving as a netCDF file, and deals with names to ensure a perfect 'roundtrip' capability. By `Robin Wilson `_. - Multi-index levels are now accessible as "virtual" coordinate variables, e.g., ``ds['time']`` can pull out the ``'time'`` level of a multi-index (see :ref:`coordinates`). ``sel`` also accepts providing multi-index levels as keyword arguments, e.g., ``ds.sel(time='2000-01')`` (see :ref:`multi-level indexing`). By `Benoit Bovy `_. - Added ``set_index``, ``reset_index`` and ``reorder_levels`` methods to easily create and manipulate (multi-)indexes (see :ref:`reshape.set_index`). By `Benoit Bovy `_. - Added the ``compat`` option ``'no_conflicts'`` to ``merge``, allowing the combination of xarray objects with disjoint (:issue:`742`) or overlapping (:issue:`835`) coordinates as long as all present data agrees. By `Johnnie Gray `_. See :ref:`combining.no_conflicts` for more details. - It is now possible to set ``concat_dim=None`` explicitly in :py:func:`~xarray.open_mfdataset` to disable inferring a dimension along which to concatenate. By `Stephan Hoyer `_. - Added methods :py:meth:`DataArray.compute`, :py:meth:`Dataset.compute`, and :py:meth:`Variable.compute` as a non-mutating alternative to :py:meth:`~DataArray.load`. By `Guido Imperiale `_. - Adds DataArray and Dataset methods :py:meth:`~xarray.DataArray.cumsum` and :py:meth:`~xarray.DataArray.cumprod`. By `Phillip J. Wolfram `_. - New properties :py:attr:`Dataset.sizes` and :py:attr:`DataArray.sizes` for providing consistent access to dimension length on both ``Dataset`` and ``DataArray`` (:issue:`921`). By `Stephan Hoyer `_. - New keyword argument ``drop=True`` for :py:meth:`~DataArray.sel`, :py:meth:`~DataArray.isel` and :py:meth:`~DataArray.squeeze` for dropping scalar coordinates that arise from indexing. ``DataArray`` (:issue:`242`). By `Stephan Hoyer `_. - New top-level functions :py:func:`~xarray.full_like`, :py:func:`~xarray.zeros_like`, and :py:func:`~xarray.ones_like` By `Guido Imperiale `_. - Overriding a preexisting attribute with :py:func:`~xarray.register_dataset_accessor` or :py:func:`~xarray.register_dataarray_accessor` now issues a warning instead of raising an error (:issue:`1082`). By `Stephan Hoyer `_. - Options for axes sharing between subplots are exposed to :py:class:`~xarray.plot.FacetGrid` and :py:func:`~xarray.plot.plot`, so axes sharing can be disabled for polar plots. By `Bas Hoonhout `_. - New utility functions :py:func:`~xarray.testing.assert_equal`, :py:func:`~xarray.testing.assert_identical`, and :py:func:`~xarray.testing.assert_allclose` for asserting relationships between xarray objects, designed for use in a pytest test suite. - ``figsize``, ``size`` and ``aspect`` plot arguments are now supported for all plots (:issue:`897`). See :ref:`plotting.figsize` for more details. By `Stephan Hoyer `_ and `Fabien Maussion `_. - New :py:meth:`~Dataset.info` method to summarize ``Dataset`` variables and attributes. The method prints to a buffer (e.g. ``stdout``) with output similar to what the command line utility ``ncdump -h`` produces (:issue:`1150`). By `Joe Hamman `_. - Added the ability write unlimited netCDF dimensions with the ``scipy`` and ``netcdf4`` backends via the new ``xray.Dataset.encoding`` attribute or via the ``unlimited_dims`` argument to ``xray.Dataset.to_netcdf``. By `Joe Hamman `_. - New :py:meth:`~DataArray.quantile` method to calculate quantiles from DataArray objects (:issue:`1187`). By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - ``groupby_bins`` now restores empty bins by default (:issue:`1019`). By `Ryan Abernathey `_. - Fix issues for dates outside the valid range of pandas timestamps (:issue:`975`). By `Mathias Hauser `_. - Unstacking produced flipped array after stacking decreasing coordinate values (:issue:`980`). By `Stephan Hoyer `_. - Setting ``dtype`` via the ``encoding`` parameter of ``to_netcdf`` failed if the encoded dtype was the same as the dtype of the original array (:issue:`873`). By `Stephan Hoyer `_. - Fix issues with variables where both attributes ``_FillValue`` and ``missing_value`` are set to ``NaN`` (:issue:`997`). By `Marco Zรผhlke `_. - ``.where()`` and ``.fillna()`` now preserve attributes (:issue:`1009`). By `Fabien Maussion `_. - Applying :py:func:`broadcast()` to an xarray object based on the dask backend won't accidentally convert the array from dask to numpy anymore (:issue:`978`). By `Guido Imperiale `_. - ``Dataset.concat()`` now preserves variables order (:issue:`1027`). By `Fabien Maussion `_. - Fixed an issue with pcolormesh (:issue:`781`). A new ``infer_intervals`` keyword gives control on whether the cell intervals should be computed or not. By `Fabien Maussion `_. - Grouping over an dimension with non-unique values with ``groupby`` gives correct groups. By `Stephan Hoyer `_. - Fixed accessing coordinate variables with non-string names from ``.coords``. By `Stephan Hoyer `_. - :py:meth:`~xarray.DataArray.rename` now simultaneously renames the array and any coordinate with the same name, when supplied via a :py:class:`dict` (:issue:`1116`). By `Yves Delley `_. - Fixed sub-optimal performance in certain operations with object arrays (:issue:`1121`). By `Yves Delley `_. - Fix ``.groupby(group)`` when ``group`` has datetime dtype (:issue:`1132`). By `Jonas Sรธlvsteen `_. - Fixed a bug with facetgrid (the ``norm`` keyword was ignored, :issue:`1159`). By `Fabien Maussion `_. - Resolved a concurrency bug that could cause Python to crash when simultaneously reading and writing netCDF4 files with dask (:issue:`1172`). By `Stephan Hoyer `_. - Fix to make ``.copy()`` actually copy dask arrays, which will be relevant for future releases of dask in which dask arrays will be mutable (:issue:`1180`). By `Stephan Hoyer `_. - Fix opening NetCDF files with multi-dimensional time variables (:issue:`1229`). By `Stephan Hoyer `_. Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - ``xarray.Dataset.isel_points`` and ``xarray.Dataset.sel_points`` now use vectorised indexing in numpy and dask (:issue:`1161`), which can result in several orders of magnitude speedup. By `Jonathan Chambers `_. .. _whats-new.0.8.2: v0.8.2 (18 August 2016) ----------------------- This release includes a number of bug fixes and minor enhancements. Breaking changes ~~~~~~~~~~~~~~~~ - :py:func:`~xarray.broadcast` and :py:func:`~xarray.concat` now auto-align inputs, using ``join=outer``. Previously, these functions raised ``ValueError`` for non-aligned inputs. By `Guido Imperiale `_. Enhancements ~~~~~~~~~~~~ - New documentation on :ref:`panel transition`. By `Maximilian Roos `_. - New ``Dataset`` and ``DataArray`` methods :py:meth:`~xarray.Dataset.to_dict` and :py:meth:`~xarray.Dataset.from_dict` to allow easy conversion between dictionaries and xarray objects (:issue:`432`). See :ref:`dictionary IO` for more details. By `Julia Signell `_. - Added ``exclude`` and ``indexes`` optional parameters to :py:func:`~xarray.align`, and ``exclude`` optional parameter to :py:func:`~xarray.broadcast`. By `Guido Imperiale `_. - Better error message when assigning variables without dimensions (:issue:`971`). By `Stephan Hoyer `_. - Better error message when reindex/align fails due to duplicate index values (:issue:`956`). By `Stephan Hoyer `_. Bug fixes ~~~~~~~~~ - Ensure xarray works with h5netcdf v0.3.0 for arrays with ``dtype=str`` (:issue:`953`). By `Stephan Hoyer `_. - ``Dataset.__dir__()`` (i.e. the method python calls to get autocomplete options) failed if one of the dataset's keys was not a string (:issue:`852`). By `Maximilian Roos `_. - ``Dataset`` constructor can now take arbitrary objects as values (:issue:`647`). By `Maximilian Roos `_. - Clarified ``copy`` argument for :py:meth:`~xarray.DataArray.reindex` and :py:func:`~xarray.align`, which now consistently always return new xarray objects (:issue:`927`). - Fix ``open_mfdataset`` with ``engine='pynio'`` (:issue:`936`). By `Stephan Hoyer `_. - ``groupby_bins`` sorted bin labels as strings (:issue:`952`). By `Stephan Hoyer `_. - Fix bug introduced by v0.8.0 that broke assignment to datasets when both the left and right side have the same non-unique index values (:issue:`956`). .. _whats-new.0.8.1: v0.8.1 (5 August 2016) ---------------------- Bug fixes ~~~~~~~~~ - Fix bug in v0.8.0 that broke assignment to Datasets with non-unique indexes (:issue:`943`). By `Stephan Hoyer `_. .. _whats-new.0.8.0: v0.8.0 (2 August 2016) ---------------------- This release includes four months of new features and bug fixes, including several breaking changes. .. _v0.8.0.breaking: Breaking changes ~~~~~~~~~~~~~~~~ - Dropped support for Python 2.6 (:issue:`855`). - Indexing on multi-index now drop levels, which is consistent with pandas. It also changes the name of the dimension / coordinate when the multi-index is reduced to a single index (:issue:`802`). - Contour plots no longer add a colorbar per default (:issue:`866`). Filled contour plots are unchanged. - ``DataArray.values`` and ``.data`` now always returns an NumPy array-like object, even for 0-dimensional arrays with object dtype (:issue:`867`). Previously, ``.values`` returned native Python objects in such cases. To convert the values of scalar arrays to Python objects, use the ``.item()`` method. Enhancements ~~~~~~~~~~~~ - Groupby operations now support grouping over multidimensional variables. A new method called :py:meth:`~xarray.Dataset.groupby_bins` has also been added to allow users to specify bins for grouping. The new features are described in :ref:`groupby.multidim` and :ref:`/examples/multidimensional-coords.ipynb`. By `Ryan Abernathey `_. - DataArray and Dataset method :py:meth:`where` now supports a ``drop=True`` option that clips coordinate elements that are fully masked. By `Phillip J. Wolfram `_. - New top level :py:func:`merge` function allows for combining variables from any number of ``Dataset`` and/or ``DataArray`` variables. See :ref:`merge` for more details. By `Stephan Hoyer `_. - :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample` now support the ``keep_attrs=False`` option that determines whether variable and dataset attributes are retained in the resampled object. By `Jeremy McGibbon `_. - Better multi-index support in :py:meth:`DataArray.sel`, :py:meth:`DataArray.loc`, :py:meth:`Dataset.sel` and :py:meth:`Dataset.loc`, which now behave more closely to pandas and which also accept dictionaries for indexing based on given level names and labels (see :ref:`multi-level indexing`). By `Benoit Bovy `_. - New (experimental) decorators :py:func:`~xarray.register_dataset_accessor` and :py:func:`~xarray.register_dataarray_accessor` for registering custom xarray extensions without subclassing. They are described in the new documentation page on :ref:`internals`. By `Stephan Hoyer `_. - Round trip boolean datatypes. Previously, writing boolean datatypes to netCDF formats would raise an error since netCDF does not have a ``bool`` datatype. This feature reads/writes a ``dtype`` attribute to boolean variables in netCDF files. By `Joe Hamman `_. - 2D plotting methods now have two new keywords (``cbar_ax`` and ``cbar_kwargs``), allowing more control on the colorbar (:issue:`872`). By `Fabien Maussion `_. - New Dataset method :py:meth:`Dataset.filter_by_attrs`, akin to ``netCDF4.Dataset.get_variables_by_attributes``, to easily filter data variables using its attributes. `Filipe Fernandes `_. Bug fixes ~~~~~~~~~ - Attributes were being retained by default for some resampling operations when they should not. With the ``keep_attrs=False`` option, they will no longer be retained by default. This may be backwards-incompatible with some scripts, but the attributes may be kept by adding the ``keep_attrs=True`` option. By `Jeremy McGibbon `_. - Concatenating xarray objects along an axis with a MultiIndex or PeriodIndex preserves the nature of the index (:issue:`875`). By `Stephan Hoyer `_. - Fixed bug in arithmetic operations on DataArray objects whose dimensions are numpy structured arrays or recarrays :issue:`861`, :issue:`837`. By `Maciek Swat `_. - ``decode_cf_timedelta`` now accepts arrays with ``ndim`` >1 (:issue:`842`). This fixes issue :issue:`665`. `Filipe Fernandes `_. - Fix a bug where ``xarray.ufuncs`` that take two arguments would incorrectly use to numpy functions instead of dask.array functions (:issue:`876`). By `Stephan Hoyer `_. - Support for pickling functions from ``xarray.ufuncs`` (:issue:`901`). By `Stephan Hoyer `_. - ``Variable.copy(deep=True)`` no longer converts MultiIndex into a base Index (:issue:`769`). By `Benoit Bovy `_. - Fixes for groupby on dimensions with a multi-index (:issue:`867`). By `Stephan Hoyer `_. - Fix printing datasets with unicode attributes on Python 2 (:issue:`892`). By `Stephan Hoyer `_. - Fixed incorrect test for dask version (:issue:`891`). By `Stephan Hoyer `_. - Fixed ``dim`` argument for ``isel_points``/``sel_points`` when a ``pandas.Index`` is passed. By `Stephan Hoyer `_. - :py:func:`~xarray.plot.contour` now plots the correct number of contours (:issue:`866`). By `Fabien Maussion `_. .. _whats-new.0.7.2: v0.7.2 (13 March 2016) ---------------------- This release includes two new, entirely backwards compatible features and several bug fixes. Enhancements ~~~~~~~~~~~~ - New DataArray method :py:meth:`DataArray.dot` for calculating the dot product of two DataArrays along shared dimensions. By `Dean Pospisil `_. - Rolling window operations on DataArray objects are now supported via a new :py:meth:`DataArray.rolling` method. For example: .. jupyter-input:: import xarray as xr import numpy as np arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5), dims=("x", "y")) arr .. jupyter-output:: array([[ 0. , 0.5, 1. , 1.5, 2. ], [ 2.5, 3. , 3.5, 4. , 4.5], [ 5. , 5.5, 6. , 6.5, 7. ]]) Coordinates: * x (x) int64 0 1 2 * y (y) int64 0 1 2 3 4 .. jupyter-input:: arr.rolling(y=3, min_periods=2).mean() .. jupyter-output:: array([[ nan, 0.25, 0.5 , 1. , 1.5 ], [ nan, 2.75, 3. , 3.5 , 4. ], [ nan, 5.25, 5.5 , 6. , 6.5 ]]) Coordinates: * x (x) int64 0 1 2 * y (y) int64 0 1 2 3 4 See :ref:`compute.rolling` for more details. By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - Fixed an issue where plots using pcolormesh and Cartopy axes were being distorted by the inference of the axis interval breaks. This change chooses not to modify the coordinate variables when the axes have the attribute ``projection``, allowing Cartopy to handle the extent of pcolormesh plots (:issue:`781`). By `Joe Hamman `_. - 2D plots now better handle additional coordinates which are not ``DataArray`` dimensions (:issue:`788`). By `Fabien Maussion `_. .. _whats-new.0.7.1: v0.7.1 (16 February 2016) ------------------------- This is a bug fix release that includes two small, backwards compatible enhancements. We recommend that all users upgrade. Enhancements ~~~~~~~~~~~~ - Numerical operations now return empty objects on no overlapping labels rather than raising ``ValueError`` (:issue:`739`). - :py:class:`~pandas.Series` is now supported as valid input to the ``Dataset`` constructor (:issue:`740`). Bug fixes ~~~~~~~~~ - Restore checks for shape consistency between data and coordinates in the DataArray constructor (:issue:`758`). - Single dimension variables no longer transpose as part of a broader ``.transpose``. This behavior was causing ``pandas.PeriodIndex`` dimensions to lose their type (:issue:`749`) - :py:class:`~xarray.Dataset` labels remain as their native type on ``.to_dataset``. Previously they were coerced to strings (:issue:`745`) - Fixed a bug where replacing a ``DataArray`` index coordinate would improperly align the coordinate (:issue:`725`). - ``DataArray.reindex_like`` now maintains the dtype of complex numbers when reindexing leads to NaN values (:issue:`738`). - ``Dataset.rename`` and ``DataArray.rename`` support the old and new names being the same (:issue:`724`). - Fix :py:meth:`~xarray.Dataset.from_dataframe` for DataFrames with Categorical column and a MultiIndex index (:issue:`737`). - Fixes to ensure xarray works properly after the upcoming pandas v0.18 and NumPy v1.11 releases. Acknowledgments ~~~~~~~~~~~~~~~ The following individuals contributed to this release: - Edward Richards - Maximilian Roos - Rafael Guedes - Spencer Hill - Stephan Hoyer .. _whats-new.0.7.0: v0.7.0 (21 January 2016) ------------------------ This major release includes redesign of :py:class:`~xarray.DataArray` internals, as well as new methods for reshaping, rolling and shifting data. It includes preliminary support for :py:class:`pandas.MultiIndex`, as well as a number of other features and bug fixes, several of which offer improved compatibility with pandas. New name ~~~~~~~~ The project formerly known as "xray" is now "xarray", pronounced "x-array"! This avoids a namespace conflict with the entire field of x-ray science. Renaming our project seemed like the right thing to do, especially because some scientists who work with actual x-rays are interested in using this project in their work. Thanks for your understanding and patience in this transition. You can now find our documentation and code repository at new URLs: - https://docs.xarray.dev - https://github.com/pydata/xarray/ To ease the transition, we have simultaneously released v0.7.0 of both ``xray`` and ``xarray`` on the Python Package Index. These packages are identical. For now, ``import xray`` still works, except it issues a deprecation warning. This will be the last xray release. Going forward, we recommend switching your import statements to ``import xarray as xr``. .. _v0.7.0.breaking: Breaking changes ~~~~~~~~~~~~~~~~ - The internal data model used by ``xray.DataArray`` has been rewritten to fix several outstanding issues (:issue:`367`, :issue:`634`, `this stackoverflow report`_). Internally, ``DataArray`` is now implemented in terms of ``._variable`` and ``._coords`` attributes instead of holding variables in a ``Dataset`` object. This refactor ensures that if a DataArray has the same name as one of its coordinates, the array and the coordinate no longer share the same data. In practice, this means that creating a DataArray with the same ``name`` as one of its dimensions no longer automatically uses that array to label the corresponding coordinate. You will now need to provide coordinate labels explicitly. Here's the old behavior: .. jupyter-input:: xray.DataArray([4, 5, 6], dims="x", name="x") .. jupyter-output:: array([4, 5, 6]) Coordinates: * x (x) int64 4 5 6 and the new behavior (compare the values of the ``x`` coordinate): .. jupyter-input:: xray.DataArray([4, 5, 6], dims="x", name="x") .. jupyter-output:: array([4, 5, 6]) Coordinates: * x (x) int64 0 1 2 - It is no longer possible to convert a DataArray to a Dataset with ``xray.DataArray.to_dataset`` if it is unnamed. This will now raise ``ValueError``. If the array is unnamed, you need to supply the ``name`` argument. .. _this stackoverflow report: http://stackoverflow.com/questions/33158558/python-xray-extract-first-and-last-time-value-within-each-month-of-a-timeseries Enhancements ~~~~~~~~~~~~ - Basic support for :py:class:`~pandas.MultiIndex` coordinates on xray objects, including indexing, :py:meth:`~DataArray.stack` and :py:meth:`~DataArray.unstack`: .. jupyter-input:: df = pd.DataFrame({"foo": range(3), "x": ["a", "b", "b"], "y": [0, 0, 1]}) s = df.set_index(["x", "y"])["foo"] arr = xray.DataArray(s, dims="z") arr .. jupyter-output:: array([0, 1, 2]) Coordinates: * z (z) object ('a', 0) ('b', 0) ('b', 1) .. jupyter-input:: arr.indexes["z"] .. jupyter-output:: MultiIndex(levels=[[u'a', u'b'], [0, 1]], labels=[[0, 1, 1], [0, 0, 1]], names=[u'x', u'y']) .. jupyter-input:: arr.unstack("z") .. jupyter-output:: array([[ 0., nan], [ 1., 2.]]) Coordinates: * x (x) object 'a' 'b' * y (y) int64 0 1 .. jupyter-input:: arr.unstack("z").stack(z=("x", "y")) .. jupyter-output:: array([ 0., nan, 1., 2.]) Coordinates: * z (z) object ('a', 0) ('a', 1) ('b', 0) ('b', 1) See :ref:`reshape.stack` for more details. .. warning:: xray's MultiIndex support is still experimental, and we have a long to- do list of desired additions (:issue:`719`), including better display of multi-index levels when printing a ``Dataset``, and support for saving datasets with a MultiIndex to a netCDF file. User contributions in this area would be greatly appreciated. - Support for reading GRIB, HDF4 and other file formats via PyNIO_. - Better error message when a variable is supplied with the same name as one of its dimensions. - Plotting: more control on colormap parameters (:issue:`642`). ``vmin`` and ``vmax`` will not be silently ignored anymore. Setting ``center=False`` prevents automatic selection of a divergent colormap. - New ``xray.Dataset.shift`` and ``xray.Dataset.roll`` methods for shifting/rotating datasets or arrays along a dimension: .. code:: python array = xray.DataArray([5, 6, 7, 8], dims="x") array.shift(x=2) array.roll(x=2) Notice that ``shift`` moves data independently of coordinates, but ``roll`` moves both data and coordinates. - Assigning a ``pandas`` object directly as a ``Dataset`` variable is now permitted. Its index names correspond to the ``dims`` of the ``Dataset``, and its data is aligned. - Passing a :py:class:`pandas.DataFrame` or ``pandas.Panel`` to a Dataset constructor is now permitted. - New function ``xray.broadcast`` for explicitly broadcasting ``DataArray`` and ``Dataset`` objects against each other. For example: .. code:: python a = xray.DataArray([1, 2, 3], dims="x") b = xray.DataArray([5, 6], dims="y") a b a2, b2 = xray.broadcast(a, b) a2 b2 .. _PyNIO: https://www.pyngl.ucar.edu/Nio.shtml Bug fixes ~~~~~~~~~ - Fixes for several issues found on ``DataArray`` objects with the same name as one of their coordinates (see :ref:`v0.7.0.breaking` for more details). - ``DataArray.to_masked_array`` always returns masked array with mask being an array (not a scalar value) (:issue:`684`) - Allows for (imperfect) repr of Coords when underlying index is PeriodIndex (:issue:`645`). - Fixes for several issues found on ``DataArray`` objects with the same name as one of their coordinates (see :ref:`v0.7.0.breaking` for more details). - Attempting to assign a ``Dataset`` or ``DataArray`` variable/attribute using attribute-style syntax (e.g., ``ds.foo = 42``) now raises an error rather than silently failing (:issue:`656`, :issue:`714`). - You can now pass pandas objects with non-numpy dtypes (e.g., ``categorical`` or ``datetime64`` with a timezone) into xray without an error (:issue:`716`). Acknowledgments ~~~~~~~~~~~~~~~ The following individuals contributed to this release: - Antony Lee - Fabien Maussion - Joe Hamman - Maximilian Roos - Stephan Hoyer - Takeshi Kanmae - femtotrader v0.6.1 (21 October 2015) ------------------------ This release contains a number of bug and compatibility fixes, as well as enhancements to plotting, indexing and writing files to disk. Note that the minimum required version of dask for use with xray is now version 0.6. API Changes ~~~~~~~~~~~ - The handling of colormaps and discrete color lists for 2D plots in ``xray.DataArray.plot`` was changed to provide more compatibility with matplotlib's ``contour`` and ``contourf`` functions (:issue:`538`). Now discrete lists of colors should be specified using ``colors`` keyword, rather than ``cmap``. Enhancements ~~~~~~~~~~~~ - Faceted plotting through ``xray.plot.FacetGrid`` and the ``xray.plot.plot`` method. See :ref:`plotting.faceting` for more details and examples. - ``xray.Dataset.sel`` and ``xray.Dataset.reindex`` now support the ``tolerance`` argument for controlling nearest-neighbor selection (:issue:`629`): .. jupyter-input:: array = xray.DataArray([1, 2, 3], dims="x") array.reindex(x=[0.9, 1.5], method="nearest", tolerance=0.2) .. jupyter-output:: array([ 2., nan]) Coordinates: * x (x) float64 0.9 1.5 This feature requires pandas v0.17 or newer. - New ``encoding`` argument in ``xray.Dataset.to_netcdf`` for writing netCDF files with compression, as described in the new documentation section on :ref:`io.netcdf.writing_encoded`. - Add ``xray.Dataset.real`` and ``xray.Dataset.imag`` attributes to Dataset and DataArray (:issue:`553`). - More informative error message with ``xray.Dataset.from_dataframe`` if the frame has duplicate columns. - xray now uses deterministic names for dask arrays it creates or opens from disk. This allows xray users to take advantage of dask's nascent support for caching intermediate computation results. See :issue:`555` for an example. Bug fixes ~~~~~~~~~ - Forwards compatibility with the latest pandas release (v0.17.0). We were using some internal pandas routines for datetime conversion, which unfortunately have now changed upstream (:issue:`569`). - Aggregation functions now correctly skip ``NaN`` for data for ``complex128`` dtype (:issue:`554`). - Fixed indexing 0d arrays with unicode dtype (:issue:`568`). - ``xray.DataArray.name`` and Dataset keys must be a string or None to be written to netCDF (:issue:`533`). - ``xray.DataArray.where`` now uses dask instead of numpy if either the array or ``other`` is a dask array. Previously, if ``other`` was a numpy array the method was evaluated eagerly. - Global attributes are now handled more consistently when loading remote datasets using ``engine='pydap'`` (:issue:`574`). - It is now possible to assign to the ``.data`` attribute of DataArray objects. - ``coordinates`` attribute is now kept in the encoding dictionary after decoding (:issue:`610`). - Compatibility with numpy 1.10 (:issue:`617`). Acknowledgments ~~~~~~~~~~~~~~~ The following individuals contributed to this release: - Ryan Abernathey - Pete Cable - Clark Fitzgerald - Joe Hamman - Stephan Hoyer - Scott Sinclair v0.6.0 (21 August 2015) ----------------------- This release includes numerous bug fixes and enhancements. Highlights include the introduction of a plotting module and the new Dataset and DataArray methods ``xray.Dataset.isel_points``, ``xray.Dataset.sel_points``, ``xray.Dataset.where`` and ``xray.Dataset.diff``. There are no breaking changes from v0.5.2. Enhancements ~~~~~~~~~~~~ - Plotting methods have been implemented on DataArray objects ``xray.DataArray.plot`` through integration with matplotlib (:issue:`185`). For an introduction, see :ref:`plotting`. - Variables in netCDF files with multiple missing values are now decoded as NaN after issuing a warning if open_dataset is called with mask_and_scale=True. - We clarified our rules for when the result from an xray operation is a copy vs. a view (see :ref:`copies_vs_views` for more details). - Dataset variables are now written to netCDF files in order of appearance when using the netcdf4 backend (:issue:`479`). - Added ``xray.Dataset.isel_points`` and ``xray.Dataset.sel_points`` to support pointwise indexing of Datasets and DataArrays (:issue:`475`). .. jupyter-input:: da = xray.DataArray( ...: np.arange(56).reshape((7, 8)), ...: coords={"x": list("abcdefg"), "y": 10 * np.arange(8)}, ...: dims=["x", "y"], ...: ) da .. jupyter-output:: array([[ 0, 1, 2, 3, 4, 5, 6, 7], [ 8, 9, 10, 11, 12, 13, 14, 15], [16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29, 30, 31], [32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47], [48, 49, 50, 51, 52, 53, 54, 55]]) Coordinates: * y (y) int64 0 10 20 30 40 50 60 70 * x (x) |S1 'a' 'b' 'c' 'd' 'e' 'f' 'g' .. jupyter-input:: # we can index by position along each dimension da.isel_points(x=[0, 1, 6], y=[0, 1, 0], dim="points") .. jupyter-output:: array([ 0, 9, 48]) Coordinates: y (points) int64 0 10 0 x (points) |S1 'a' 'b' 'g' * points (points) int64 0 1 2 .. jupyter-input:: # or equivalently by label da.sel_points(x=["a", "b", "g"], y=[0, 10, 0], dim="points") .. jupyter-output:: array([ 0, 9, 48]) Coordinates: y (points) int64 0 10 0 x (points) |S1 'a' 'b' 'g' * points (points) int64 0 1 2 - New ``xray.Dataset.where`` method for masking xray objects according to some criteria. This works particularly well with multi-dimensional data: .. code:: python ds = xray.Dataset(coords={"x": range(100), "y": range(100)}) ds["distance"] = np.sqrt(ds.x**2 + ds.y**2) ds.distance.where(ds.distance < 100).plot() - Added new methods ``xray.DataArray.diff`` and ``xray.Dataset.diff`` for finite difference calculations along a given axis. - New ``xray.DataArray.to_masked_array`` convenience method for returning a numpy.ma.MaskedArray. .. code:: python da = xray.DataArray(np.random.random_sample(size=(5, 4))) da.where(da < 0.5) da.where(da < 0.5).to_masked_array(copy=True) - Added new flag "drop_variables" to ``xray.open_dataset`` for excluding variables from being parsed. This may be useful to drop variables with problems or inconsistent values. Bug fixes ~~~~~~~~~ - Fixed aggregation functions (e.g., sum and mean) on big-endian arrays when bottleneck is installed (:issue:`489`). - Dataset aggregation functions dropped variables with unsigned integer dtype (:issue:`505`). - ``.any()`` and ``.all()`` were not lazy when used on xray objects containing dask arrays. - Fixed an error when attempting to saving datetime64 variables to netCDF files when the first element is ``NaT`` (:issue:`528`). - Fix pickle on DataArray objects (:issue:`515`). - Fixed unnecessary coercion of float64 to float32 when using netcdf3 and netcdf4_classic formats (:issue:`526`). v0.5.2 (16 July 2015) --------------------- This release contains bug fixes, several additional options for opening and saving netCDF files, and a backwards incompatible rewrite of the advanced options for ``xray.concat``. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The optional arguments ``concat_over`` and ``mode`` in ``xray.concat`` have been removed and replaced by ``data_vars`` and ``coords``. The new arguments are both more easily understood and more robustly implemented, and allowed us to fix a bug where ``concat`` accidentally loaded data into memory. If you set values for these optional arguments manually, you will need to update your code. The default behavior should be unchanged. Enhancements ~~~~~~~~~~~~ - ``xray.open_mfdataset`` now supports a ``preprocess`` argument for preprocessing datasets prior to concatenaton. This is useful if datasets cannot be otherwise merged automatically, e.g., if the original datasets have conflicting index coordinates (:issue:`443`). - ``xray.open_dataset`` and ``xray.open_mfdataset`` now use a global thread lock by default for reading from netCDF files with dask. This avoids possible segmentation faults for reading from netCDF4 files when HDF5 is not configured properly for concurrent access (:issue:`444`). - Added support for serializing arrays of complex numbers with ``engine='h5netcdf'``. - The new ``xray.save_mfdataset`` function allows for saving multiple datasets to disk simultaneously. This is useful when processing large datasets with dask.array. For example, to save a dataset too big to fit into memory to one file per year, we could write: .. jupyter-input:: years, datasets = zip(*ds.groupby("time.year")) paths = ["%s.nc" % y for y in years] xray.save_mfdataset(datasets, paths) Bug fixes ~~~~~~~~~ - Fixed ``min``, ``max``, ``argmin`` and ``argmax`` for arrays with string or unicode types (:issue:`453`). - ``xray.open_dataset`` and ``xray.open_mfdataset`` support supplying chunks as a single integer. - Fixed a bug in serializing scalar datetime variable to netCDF. - Fixed a bug that could occur in serialization of 0-dimensional integer arrays. - Fixed a bug where concatenating DataArrays was not always lazy (:issue:`464`). - When reading datasets with h5netcdf, bytes attributes are decoded to strings. This allows conventions decoding to work properly on Python 3 (:issue:`451`). v0.5.1 (15 June 2015) --------------------- This minor release fixes a few bugs and an inconsistency with pandas. It also adds the ``pipe`` method, copied from pandas. Enhancements ~~~~~~~~~~~~ - Added ``xray.Dataset.pipe``, replicating the `new pandas method`_ in version 0.16.2. See :ref:`transforming datasets` for more details. - ``xray.Dataset.assign`` and ``xray.Dataset.assign_coords`` now assign new variables in sorted (alphabetical) order, mirroring the behavior in pandas. Previously, the order was arbitrary. .. _new pandas method: http://pandas.pydata.org/pandas-docs/version/0.16.2/whatsnew.html#pipe Bug fixes ~~~~~~~~~ - ``xray.concat`` fails in an edge case involving identical coordinate variables (:issue:`425`) - We now decode variables loaded from netCDF3 files with the scipy engine using native endianness (:issue:`416`). This resolves an issue when aggregating these arrays with bottleneck installed. v0.5 (1 June 2015) ------------------ Highlights ~~~~~~~~~~ The headline feature in this release is experimental support for out-of-core computing (data that doesn't fit into memory) with :doc:`user-guide/dask`. This includes a new top-level function ``xray.open_mfdataset`` that makes it easy to open a collection of netCDF (using dask) as a single ``xray.Dataset`` object. For more on dask, read the `blog post introducing xray + dask`_ and the new documentation section :doc:`user-guide/dask`. .. _blog post introducing xray + dask: https://www.anaconda.com/blog/developer-blog/xray-dask-out-core-labeled-arrays-python/ Dask makes it possible to harness parallelism and manipulate gigantic datasets with xray. It is currently an optional dependency, but it may become required in the future. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The logic used for choosing which variables are concatenated with ``xray.concat`` has changed. Previously, by default any variables which were equal across a dimension were not concatenated. This lead to some surprising behavior, where the behavior of groupby and concat operations could depend on runtime values (:issue:`268`). For example: .. jupyter-input:: ds = xray.Dataset({"x": 0}) xray.concat([ds, ds], dim="y") .. jupyter-output:: Dimensions: () Coordinates: *empty* Data variables: x int64 0 Now, the default always concatenates data variables: .. code:: python In [1]: ds = xray.Dataset({"x": 0}) In [2]: xray.concat([ds, ds], dim="y") Out[2]: Size: 16B Dimensions: (y: 2) Dimensions without coordinates: y Data variables: x (y) int64 16B 0 0 .. code:: python xray.concat([ds, ds], dim="y") To obtain the old behavior, supply the argument ``concat_over=[]``. Enhancements ~~~~~~~~~~~~ - New ``xray.Dataset.to_dataarray`` and enhanced ``xray.DataArray.to_dataset`` methods make it easy to switch back and forth between arrays and datasets: .. code:: python ds = xray.Dataset( {"a": 1, "b": ("x", [1, 2, 3])}, coords={"c": 42}, attrs={"Conventions": "None"}, ) ds.to_dataarray() ds.to_dataarray().to_dataset(dim="variable") - New ``xray.Dataset.fillna`` method to fill missing values, modeled off the pandas method of the same name: .. code:: python array = xray.DataArray([np.nan, 1, np.nan, 3], dims="x") array.fillna(0) ``fillna`` works on both ``Dataset`` and ``DataArray`` objects, and uses index based alignment and broadcasting like standard binary operations. It also can be applied by group, as illustrated in :ref:`/examples/weather-data.ipynb#Fill-missing-values-with-climatology`. - New ``xray.Dataset.assign`` and ``xray.Dataset.assign_coords`` methods patterned off the new :py:meth:`DataFrame.assign ` method in pandas: .. code:: python ds = xray.Dataset({"y": ("x", [1, 2, 3])}) ds.assign(z=lambda ds: ds.y**2) ds.assign_coords(z=("x", ["a", "b", "c"])) These methods return a new Dataset (or DataArray) with updated data or coordinate variables. - ``xray.Dataset.sel`` now supports the ``method`` parameter, which works like the parameter of the same name on ``xray.Dataset.reindex``. It provides a simple interface for doing nearest-neighbor interpolation: .. use verbatim because I can't seem to install pandas 0.16.1 on RTD :( .. jupyter-input:: ds.sel(x=1.1, method="nearest") .. jupyter-output:: Dimensions: () Coordinates: x int64 1 Data variables: y int64 2 .. jupyter-input:: ds.sel(x=[1.1, 2.1], method="pad") .. jupyter-output:: Dimensions: (x: 2) Coordinates: * x (x) int64 1 2 Data variables: y (x) int64 2 3 See :ref:`nearest neighbor lookups` for more details. - You can now control the underlying backend used for accessing remote datasets (via OPeNDAP) by specifying ``engine='netcdf4'`` or ``engine='pydap'``. - xray now provides experimental support for reading and writing netCDF4 files directly via `h5py`_ with the `h5netcdf`_ package, avoiding the netCDF4-Python package. You will need to install h5netcdf and specify ``engine='h5netcdf'`` to try this feature. - Accessing data from remote datasets now has retrying logic (with exponential backoff) that should make it robust to occasional bad responses from DAP servers. - You can control the width of the Dataset repr with ``xray.set_options``. It can be used either as a context manager, in which case the default is restored outside the context: .. code:: python ds = xray.Dataset({"x": np.arange(1000)}) with xray.set_options(display_width=40): print(ds) Or to set a global option: .. jupyter-input:: xray.set_options(display_width=80) The default value for the ``display_width`` option is 80. .. _h5py: http://www.h5py.org/ .. _h5netcdf: https://github.com/shoyer/h5netcdf Deprecations ~~~~~~~~~~~~ - The method ``load_data()`` has been renamed to the more succinct ``xray.Dataset.load``. v0.4.1 (18 March 2015) ---------------------- The release contains bug fixes and several new features. All changes should be fully backwards compatible. Enhancements ~~~~~~~~~~~~ - New documentation sections on :ref:`time-series` and :ref:`combining multiple files`. - ``xray.Dataset.resample`` lets you resample a dataset or data array to a new temporal resolution. The syntax is the `same as pandas`_, except you need to supply the time dimension explicitly: .. code:: python time = pd.date_range("2000-01-01", freq="6H", periods=10) array = xray.DataArray(np.arange(10), [("time", time)]) array.resample("1D", dim="time") You can specify how to do the resampling with the ``how`` argument and other options such as ``closed`` and ``label`` let you control labeling: .. code:: python array.resample("1D", dim="time", how="sum", label="right") If the desired temporal resolution is higher than the original data (upsampling), xray will insert missing values: .. code:: python array.resample("3H", "time") - ``first`` and ``last`` methods on groupby objects let you take the first or last examples from each group along the grouped axis: .. code:: python array.groupby("time.day").first() These methods combine well with ``resample``: .. code:: python array.resample("1D", dim="time", how="first") - ``xray.Dataset.swap_dims`` allows for easily swapping one dimension out for another: .. code:: python ds = xray.Dataset({"x": range(3), "y": ("x", list("abc"))}) ds.swap_dims({"x": "y"}) This was possible in earlier versions of xray, but required some contortions. - ``xray.open_dataset`` and ``xray.Dataset.to_netcdf`` now accept an ``engine`` argument to explicitly select which underlying library (netcdf4 or scipy) is used for reading/writing a netCDF file. .. _same as pandas: http://pandas.pydata.org/pandas-docs/stable/timeseries.html#up-and-downsampling Bug fixes ~~~~~~~~~ - Fixed a bug where data netCDF variables read from disk with ``engine='scipy'`` could still be associated with the file on disk, even after closing the file (:issue:`341`). This manifested itself in warnings about mmapped arrays and segmentation faults (if the data was accessed). - Silenced spurious warnings about all-NaN slices when using nan-aware aggregation methods (:issue:`344`). - Dataset aggregations with ``keep_attrs=True`` now preserve attributes on data variables, not just the dataset itself. - Tests for xray now pass when run on Windows (:issue:`360`). - Fixed a regression in v0.4 where saving to netCDF could fail with the error ``ValueError: could not automatically determine time units``. v0.4 (2 March, 2015) -------------------- This is one of the biggest releases yet for xray: it includes some major changes that may break existing code, along with the usual collection of minor enhancements and bug fixes. On the plus side, this release includes all hitherto planned breaking changes, so the upgrade path for xray should be smoother going forward. Breaking changes ~~~~~~~~~~~~~~~~ - We now automatically align index labels in arithmetic, dataset construction, merging and updating. This means the need for manually invoking methods like ``xray.align`` and ``xray.Dataset.reindex_like`` should be vastly reduced. :ref:`For arithmetic`, we align based on the **intersection** of labels: .. code:: python lhs = xray.DataArray([1, 2, 3], [("x", [0, 1, 2])]) rhs = xray.DataArray([2, 3, 4], [("x", [1, 2, 3])]) lhs + rhs :ref:`For dataset construction and merging`, we align based on the **union** of labels: .. code:: python xray.Dataset({"foo": lhs, "bar": rhs}) :ref:`For update and __setitem__`, we align based on the **original** object: .. code:: python lhs.coords["rhs"] = rhs lhs - Aggregations like ``mean`` or ``median`` now skip missing values by default: .. code:: python xray.DataArray([1, 2, np.nan, 3]).mean() You can turn this behavior off by supplying the keyword argument ``skipna=False``. These operations are lightning fast thanks to integration with bottleneck_, which is a new optional dependency for xray (numpy is used if bottleneck is not installed). - Scalar coordinates no longer conflict with constant arrays with the same value (e.g., in arithmetic, merging datasets and concat), even if they have different shape (:issue:`243`). For example, the coordinate ``c`` here persists through arithmetic, even though it has different shapes on each DataArray: .. code:: python a = xray.DataArray([1, 2], coords={"c": 0}, dims="x") b = xray.DataArray([1, 2], coords={"c": ("x", [0, 0])}, dims="x") (a + b).coords This functionality can be controlled through the ``compat`` option, which has also been added to the ``xray.Dataset`` constructor. - Datetime shortcuts such as ``'time.month'`` now return a ``DataArray`` with the name ``'month'``, not ``'time.month'`` (:issue:`345`). This makes it easier to index the resulting arrays when they are used with ``groupby``: .. code:: python time = xray.DataArray( pd.date_range("2000-01-01", periods=365), dims="time", name="time" ) counts = time.groupby("time.month").count() counts.sel(month=2) Previously, you would need to use something like ``counts.sel(**{'time.month': 2}})``, which is much more awkward. - The ``season`` datetime shortcut now returns an array of string labels such ``'DJF'``: .. code-block:: ipython In[92]: ds = xray.Dataset({"t": pd.date_range("2000-01-01", periods=12, freq="M")}) In[93]: ds["t.season"] Out[93]: array(['DJF', 'DJF', 'MAM', ..., 'SON', 'SON', 'DJF'], dtype='`_. - Use functions that return generic ndarrays with DataArray.groupby.apply and Dataset.apply (:issue:`327` and :issue:`329`). Thanks Jeff Gerard! - Consolidated the functionality of ``dumps`` (writing a dataset to a netCDF3 bytestring) into ``xray.Dataset.to_netcdf`` (:issue:`333`). - ``xray.Dataset.to_netcdf`` now supports writing to groups in netCDF4 files (:issue:`333`). It also finally has a full docstring -- you should read it! - ``xray.open_dataset`` and ``xray.Dataset.to_netcdf`` now work on netCDF3 files when netcdf4-python is not installed as long as scipy is available (:issue:`333`). - The new ``xray.Dataset.drop`` and ``xray.DataArray.drop`` methods makes it easy to drop explicitly listed variables or index labels: .. code:: python # drop variables ds = xray.Dataset({"x": 0, "y": 1}) ds.drop("x") # drop index labels arr = xray.DataArray([1, 2, 3], coords=[("x", list("abc"))]) arr.drop(["a", "c"], dim="x") - ``xray.Dataset.broadcast_equals`` has been added to correspond to the new ``compat`` option. - Long attributes are now truncated at 500 characters when printing a dataset (:issue:`338`). This should make things more convenient for working with datasets interactively. - Added a new documentation example, :ref:`/examples/monthly-means.ipynb`. Thanks Joe Hamman! Bug fixes ~~~~~~~~~ - Several bug fixes related to decoding time units from netCDF files (:issue:`316`, :issue:`330`). Thanks Stefan Pfenninger! - xray no longer requires ``decode_coords=False`` when reading datasets with unparsable coordinate attributes (:issue:`308`). - Fixed ``DataArray.loc`` indexing with ``...`` (:issue:`318`). - Fixed an edge case that resulting in an error when reindexing multi-dimensional variables (:issue:`315`). - Slicing with negative step sizes (:issue:`312`). - Invalid conversion of string arrays to numeric dtype (:issue:`305`). - Fixed ``repr()`` on dataset objects with non-standard dates (:issue:`347`). Deprecations ~~~~~~~~~~~~ - ``dump`` and ``dumps`` have been deprecated in favor of ``xray.Dataset.to_netcdf``. - ``drop_vars`` has been deprecated in favor of ``xray.Dataset.drop``. Future plans ~~~~~~~~~~~~ The biggest feature I'm excited about working toward in the immediate future is supporting out-of-core operations in xray using Dask_, a part of the Blaze_ project. For a preview of using Dask with weather data, read `this blog post`_ by Matthew Rocklin. See :issue:`328` for more details. .. _Dask: https://dask.org .. _Blaze: https://blaze.pydata.org .. _this blog post: https://matthewrocklin.com/blog/work/2015/02/13/Towards-OOC-Slicing-and-Stacking v0.3.2 (23 December, 2014) -------------------------- This release focused on bug-fixes, speedups and resolving some niggling inconsistencies. There are a few cases where the behavior of xray differs from the previous version. However, I expect that in almost all cases your code will continue to run unmodified. .. warning:: xray now requires pandas v0.15.0 or later. This was necessary for supporting TimedeltaIndex without too many painful hacks. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Arrays of :py:class:`datetime.datetime` objects are now automatically cast to ``datetime64[ns]`` arrays when stored in an xray object, using machinery borrowed from pandas: .. code:: python from datetime import datetime xray.Dataset({"t": [datetime(2000, 1, 1)]}) - xray now has support (including serialization to netCDF) for :py:class:`~pandas.TimedeltaIndex`. :py:class:`datetime.timedelta` objects are thus accordingly cast to ``timedelta64[ns]`` objects when appropriate. - Masked arrays are now properly coerced to use ``NaN`` as a sentinel value (:issue:`259`). Enhancements ~~~~~~~~~~~~ - Due to popular demand, we have added experimental attribute style access as a shortcut for dataset variables, coordinates and attributes: .. code:: python ds = xray.Dataset({"tmin": ([], 25, {"units": "celsius"})}) ds.tmin.units Tab-completion for these variables should work in editors such as IPython. However, setting variables or attributes in this fashion is not yet supported because there are some unresolved ambiguities (:issue:`300`). - You can now use a dictionary for indexing with labeled dimensions. This provides a safe way to do assignment with labeled dimensions: .. code:: python array = xray.DataArray(np.zeros(5), dims=["x"]) array[dict(x=slice(3))] = 1 array - Non-index coordinates can now be faithfully written to and restored from netCDF files. This is done according to CF conventions when possible by using the ``coordinates`` attribute on a data variable. When not possible, xray defines a global ``coordinates`` attribute. - Preliminary support for converting ``xray.DataArray`` objects to and from CDAT_ ``cdms2`` variables. - We sped up any operation that involves creating a new Dataset or DataArray (e.g., indexing, aggregation, arithmetic) by a factor of 30 to 50%. The full speed up requires cyordereddict_ to be installed. .. _CDAT: http://uvcdat.llnl.gov/ .. _cyordereddict: https://github.com/shoyer/cyordereddict Bug fixes ~~~~~~~~~ - Fix for ``to_dataframe()`` with 0d string/object coordinates (:issue:`287`) - Fix for ``to_netcdf`` with 0d string variable (:issue:`284`) - Fix writing datetime64 arrays to netcdf if NaT is present (:issue:`270`) - Fix align silently upcasts data arrays when NaNs are inserted (:issue:`264`) Future plans ~~~~~~~~~~~~ - I am contemplating switching to the terms "coordinate variables" and "data variables" instead of the (currently used) "coordinates" and "variables", following their use in `CF Conventions`_ (:issue:`293`). This would mostly have implications for the documentation, but I would also change the ``Dataset`` attribute ``vars`` to ``data``. - I no longer certain that automatic label alignment for arithmetic would be a good idea for xray -- it is a feature from pandas that I have not missed (:issue:`186`). - The main API breakage that I *do* anticipate in the next release is finally making all aggregation operations skip missing values by default (:issue:`130`). I'm pretty sick of writing ``ds.reduce(np.nanmean, 'time')``. - The next version of xray (0.4) will remove deprecated features and aliases whose use currently raises a warning. If you have opinions about any of these anticipated changes, I would love to hear them -- please add a note to any of the referenced GitHub issues. .. _CF Conventions: http://cfconventions.org/Data/cf-conventions/cf-conventions-1.6/build/cf-conventions.html v0.3.1 (22 October, 2014) ------------------------- This is mostly a bug-fix release to make xray compatible with the latest release of pandas (v0.15). We added several features to better support working with missing values and exporting xray objects to pandas. We also reorganized the internal API for serializing and deserializing datasets, but this change should be almost entirely transparent to users. Other than breaking the experimental DataStore API, there should be no backwards incompatible changes. New features ~~~~~~~~~~~~ - Added ``xray.Dataset.count`` and ``xray.Dataset.dropna`` methods, copied from pandas, for working with missing values (:issue:`247`, :issue:`58`). - Added ``xray.DataArray.to_pandas`` for converting a data array into the pandas object with the same dimensionality (1D to Series, 2D to DataFrame, etc.) (:issue:`255`). - Support for reading gzipped netCDF3 files (:issue:`239`). - Reduced memory usage when writing netCDF files (:issue:`251`). - 'missing_value' is now supported as an alias for the '_FillValue' attribute on netCDF variables (:issue:`245`). - Trivial indexes, equivalent to ``range(n)`` where ``n`` is the length of the dimension, are no longer written to disk (:issue:`245`). Bug fixes ~~~~~~~~~ - Compatibility fixes for pandas v0.15 (:issue:`262`). - Fixes for display and indexing of ``NaT`` (not-a-time) (:issue:`238`, :issue:`240`) - Fix slicing by label was an argument is a data array (:issue:`250`). - Test data is now shipped with the source distribution (:issue:`253`). - Ensure order does not matter when doing arithmetic with scalar data arrays (:issue:`254`). - Order of dimensions preserved with ``DataArray.to_dataframe`` (:issue:`260`). v0.3 (21 September 2014) ------------------------ New features ~~~~~~~~~~~~ - **Revamped coordinates**: "coordinates" now refer to all arrays that are not used to index a dimension. Coordinates are intended to allow for keeping track of arrays of metadata that describe the grid on which the points in "variable" arrays lie. They are preserved (when unambiguous) even though mathematical operations. - **Dataset math** ``xray.Dataset`` objects now support all arithmetic operations directly. Dataset-array operations map across all dataset variables; dataset-dataset operations act on each pair of variables with the same name. - **GroupBy math**: This provides a convenient shortcut for normalizing by the average value of a group. - The dataset ``__repr__`` method has been entirely overhauled; dataset objects now show their values when printed. - You can now index a dataset with a list of variables to return a new dataset: ``ds[['foo', 'bar']]``. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - ``Dataset.__eq__`` and ``Dataset.__ne__`` are now element-wise operations instead of comparing all values to obtain a single boolean. Use the method ``xray.Dataset.equals`` instead. Deprecations ~~~~~~~~~~~~ - ``Dataset.noncoords`` is deprecated: use ``Dataset.vars`` instead. - ``Dataset.select_vars`` deprecated: index a ``Dataset`` with a list of variable names instead. - ``DataArray.select_vars`` and ``DataArray.drop_vars`` deprecated: use ``xray.DataArray.reset_coords`` instead. v0.2 (14 August 2014) --------------------- This is major release that includes some new features and quite a few bug fixes. Here are the highlights: - There is now a direct constructor for ``DataArray`` objects, which makes it possible to create a DataArray without using a Dataset. This is highlighted in the refreshed ``tutorial``. - You can perform aggregation operations like ``mean`` directly on ``xray.Dataset`` objects, thanks to Joe Hamman. These aggregation methods also worked on grouped datasets. - xray now works on Python 2.6, thanks to Anna Kuznetsova. - A number of methods and attributes were given more sensible (usually shorter) names: ``labeled`` -> ``sel``, ``indexed`` -> ``isel``, ``select`` -> ``select_vars``, ``unselect`` -> ``drop_vars``, ``dimensions`` -> ``dims``, ``coordinates`` -> ``coords``, ``attributes`` -> ``attrs``. - New ``xray.Dataset.load_data`` and ``xray.Dataset.close`` methods for datasets facilitate lower level of control of data loaded from disk. v0.1.1 (20 May 2014) -------------------- xray 0.1.1 is a bug-fix release that includes changes that should be almost entirely backwards compatible with v0.1: - Python 3 support (:issue:`53`) - Required numpy version relaxed to 1.7 (:issue:`129`) - Return numpy.datetime64 arrays for non-standard calendars (:issue:`126`) - Support for opening datasets associated with NetCDF4 groups (:issue:`127`) - Bug-fixes for concatenating datetime arrays (:issue:`134`) Special thanks to new contributors Thomas Kluyver, Joe Hamman and Alistair Miles. v0.1 (2 May 2014) ----------------- Initial release. ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/licenses/��������������������������������������������������������������������������0000775�0000000�0000000�00000000000�15114646760�0015150�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/licenses/ANYTREE_LICENSE�����������������������������������������������������������0000664�0000000�0000000�00000026135�15114646760�0017353�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/licenses/DASK_LICENSE��������������������������������������������������������������0000664�0000000�0000000�00000002715�15114646760�0016764�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Copyright (c) 2014-2018, Anaconda, Inc. and contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of Anaconda nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ���������������������������������������������������xarray-2025.12.0/licenses/ICOMOON_LICENSE�����������������������������������������������������������0000664�0000000�0000000�00000044340�15114646760�0017345�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Attribution 4.0 International ======================================================================= Creative Commons Corporation ("Creative Commons") is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an "as-is" basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC- licensed material, or material used under an exception or limitation to copyright. More considerations for licensors: wiki.creativecommons.org/Considerations_for_licensors Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor's permission is not necessary for any reason--for example, because of any applicable exception or limitation to copyright--then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public: wiki.creativecommons.org/Considerations_for_licensees ======================================================================= Creative Commons Attribution 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. Section 1 -- Definitions. a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. d. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. e. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. f. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. g. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. h. Licensor means the individual(s) or entity(ies) granting rights under this Public License. i. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. j. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. k. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. Section 2 -- Scope. a. License grant. 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: a. reproduce and Share the Licensed Material, in whole or in part; and b. produce, reproduce, and Share Adapted Material. 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. Term. The term of this Public License is specified in Section 6(a). 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a) (4) never produces Adapted Material. 5. Downstream recipients. a. Offer from the Licensor -- Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. b. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. Other rights. 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. Section 3 -- License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. Attribution. 1. If You Share the Licensed Material (including in modified form), You must: a. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; b. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and c. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License. Section 4 -- Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. Section 5 -- Disclaimer of Warranties and Limitation of Liability. a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. Section 6 -- Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. Section 7 -- Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. Section 8 -- Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. ======================================================================= Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the โ€œLicensor.โ€ The text of the Creative Commons public licenses is dedicated to the public domain under the CC0 Public Domain Dedication. Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark "Creative Commons" or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. Creative Commons may be contacted at creativecommons.org. ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/licenses/NUMPY_LICENSE�������������������������������������������������������������0000664�0000000�0000000�00000003007�15114646760�0017145�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Copyright (c) 2005-2011, NumPy Developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the NumPy Developers nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/licenses/PANDAS_LICENSE������������������������������������������������������������0000664�0000000�0000000�00000003216�15114646760�0017205�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������pandas license ============== Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team All rights reserved. Copyright (c) 2008-2011 AQR Capital Management, LLC All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/licenses/PYTHON_LICENSE������������������������������������������������������������0000664�0000000�0000000�00000030731�15114646760�0017262�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������A. HISTORY OF THE SOFTWARE ========================== Python was created in the early 1990s by Guido van Rossum at Stichting Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands as a successor of a language called ABC. Guido remains Python's principal author, although it includes many contributions from others. In 1995, Guido continued his work on Python at the Corporation for National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) in Reston, Virginia where he released several versions of the software. In May 2000, Guido and the Python core development team moved to BeOpen.com to form the BeOpen PythonLabs team. In October of the same year, the PythonLabs team moved to Digital Creations (now Zope Corporation, see http://www.zope.com). In 2001, the Python Software Foundation (PSF, see http://www.python.org/psf/) was formed, a non-profit organization created specifically to own Python-related Intellectual Property. Zope Corporation is a sponsoring member of the PSF. All Python releases are Open Source (see http://www.opensource.org for the Open Source Definition). Historically, most, but not all, Python releases have also been GPL-compatible; the table below summarizes the various releases. Release Derived Year Owner GPL- from compatible? (1) 0.9.0 thru 1.2 1991-1995 CWI yes 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes 1.6 1.5.2 2000 CNRI no 2.0 1.6 2000 BeOpen.com no 1.6.1 1.6 2001 CNRI yes (2) 2.1 2.0+1.6.1 2001 PSF no 2.0.1 2.0+1.6.1 2001 PSF yes 2.1.1 2.1+2.0.1 2001 PSF yes 2.1.2 2.1.1 2002 PSF yes 2.1.3 2.1.2 2002 PSF yes 2.2 and above 2.1.1 2001-now PSF yes Footnotes: (1) GPL-compatible doesn't mean that we're distributing Python under the GPL. All Python licenses, unlike the GPL, let you distribute a modified version without making your changes open source. The GPL-compatible licenses make it possible to combine Python with other software that is released under the GPL; the others don't. (2) According to Richard Stallman, 1.6.1 is not GPL-compatible, because its license has a choice of law clause. According to CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 is "not incompatible" with the GPL. Thanks to the many outside volunteers who have worked under Guido's direction to make these releases possible. B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON =============================================================== PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -------------------------------------------- 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 ------------------------------------------- BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the Individual or Organization ("Licensee") accessing and otherwise using this software in source or binary form and its associated documentation ("the Software"). 2. Subject to the terms and conditions of this BeOpen Python License Agreement, BeOpen hereby grants Licensee a non-exclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use the Software alone or in any derivative version, provided, however, that the BeOpen Python License is retained in the Software, alone or in any derivative version prepared by Licensee. 3. BeOpen is making the Software available to Licensee on an "AS IS" basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 5. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 6. This License Agreement shall be governed by and interpreted in all respects by the law of the State of California, excluding conflict of law provisions. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between BeOpen and Licensee. This License Agreement does not grant permission to use BeOpen trademarks or trade names in a trademark sense to endorse or promote products or services of Licensee, or any third party. As an exception, the "BeOpen Python" logos available at http://www.pythonlabs.com/logos.html may be used according to the permissions granted on that web page. 7. By copying, installing or otherwise using the software, Licensee agrees to be bound by the terms and conditions of this License Agreement. CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 --------------------------------------- 1. This LICENSE AGREEMENT is between the Corporation for National Research Initiatives, having an office at 1895 Preston White Drive, Reston, VA 20191 ("CNRI"), and the Individual or Organization ("Licensee") accessing and otherwise using Python 1.6.1 software in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, CNRI hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python 1.6.1 alone or in any derivative version, provided, however, that CNRI's License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) 1995-2001 Corporation for National Research Initiatives; All Rights Reserved" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee. Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): "Python 1.6.1 is made available subject to the terms and conditions in CNRI's License Agreement. This Agreement together with Python 1.6.1 may be located on the Internet using the following unique, persistent identifier (known as a handle): 1895.22/1013. This Agreement may also be obtained from a proxy server on the Internet using the following URL: http://hdl.handle.net/1895.22/1013". 3. In the event Licensee prepares a derivative work that is based on or incorporates Python 1.6.1 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python 1.6.1. 4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. This License Agreement shall be governed by the federal intellectual property law of the United States, including without limitation the federal copyright law, and, to the extent such U.S. federal law does not apply, by the law of the Commonwealth of Virginia, excluding Virginia's conflict of law provisions. Notwithstanding the foregoing, with regard to derivative works based on Python 1.6.1 that incorporate non-separable material that was previously distributed under the GNU General Public License (GPL), the law of the Commonwealth of Virginia shall govern this License Agreement only as to issues arising under or with respect to Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between CNRI and Licensee. This License Agreement does not grant permission to use CNRI trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By clicking on the "ACCEPT" button where indicated, or by copying, installing or otherwise using Python 1.6.1, Licensee agrees to be bound by the terms and conditions of this License Agreement. ACCEPT CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 -------------------------------------------------- Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands. All rights reserved. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Stichting Mathematisch Centrum or CWI not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ���������������������������������������xarray-2025.12.0/licenses/SCIKIT_LEARN_LICENSE������������������������������������������������������0000664�0000000�0000000�00000002773�15114646760�0020155�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������BSD 3-Clause License Copyright (c) 2007-2021 The scikit-learn developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE �����xarray-2025.12.0/licenses/SEABORN_LICENSE�����������������������������������������������������������0000664�0000000�0000000�00000002732�15114646760�0017332�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Copyright (c) 2012-2013, Michael L. Waskom All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the {organization} nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ��������������������������������������xarray-2025.12.0/pixi.toml��������������������������������������������������������������������������0000664�0000000�0000000�00000020403�15114646760�0015210�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������[workspace] preview = ["pixi-build"] channels = ["conda-forge", "nodefaults"] platforms = ["win-64", "linux-64", "osx-arm64"] [package] name = "xarray" version = "dynamic" # dynamic versioning needs better support in pixi https://github.com/prefix-dev/pixi/issues/2923#issuecomment-2598460666 . Putting `version = "dynamic"` here for now until pixi recommends something else. [package.build] backend = { name = "pixi-build-python", version = "==0.4.0" } [package.host-dependencies] setuptools = "*" setuptools_scm = "*" [package.run-dependencies] python = "*" numpy = "*" pandas = "*" packaging = "24.1.*" git = "*" # needed for dynamic versioning [dependencies] xarray = { path = "." } [target.linux-64.dependencies] pydap-server = "*" [feature.py311.dependencies] python = "3.11.*" [feature.py312.dependencies] python = "3.12.*" [feature.py313.dependencies] python = "3.13.*" [feature.backends.dependencies] # files h5netcdf = "*" h5py = "*" hdf5 = "*" netcdf4 = "*" zarr = "*" rasterio = "*" # opendap pydap = "*" lxml = "*" # Optional dep of pydap # s3 boto3 = "*" fsspec = "*" aiobotocore = "*" [feature.numba.dependencies] numba = "*" numbagg = "*" [feature.dask.dependencies] dask = "*" distributed = "*" [feature.accel.dependencies] flox = "*" bottleneck = "*" numexpr = "*" pyarrow = "*" opt_einsum = "*" [feature.viz.dependencies] cartopy = "*" matplotlib-base = "*" nc-time-axis = "*" seaborn = "*" [feature.extras.dependencies] # array sparse = "*" # algorithms scipy = "*" toolz = "*" # tutorial pooch = "*" # other cftime = "*" pint = "*" iris = "*" [feature.extras.pypi-dependencies] # array jax = "*" # no way to get cpu-only jaxlib from conda if gpu is present [feature.minimal.dependencies] # minimal versions python = "3.11.*" numpy = "1.26.*" pandas = "2.2.*" [feature.minimum-scipy.dependencies] scipy = "1.13.*" [feature.min-versions.dependencies] # minimal versions for all dependencies # Note that when you update min-supported versions, you should: # - Update the min version lower-bound in the corresponding feature(s) where applicable # - Update this section to pin to the min version array-api-strict = "1.1.*" # dependency for testing the array api compat boto3 = "1.34.*" bottleneck = "1.4.*" cartopy = "0.23.*" cftime = "1.6.*" dask-core = "2024.6.*" distributed = "2024.6.*" flox = "0.9.*" h5netcdf = "1.3.*" # h5py and hdf5 tend to cause conflicts # for e.g. hdf5 1.12 conflicts with h5py=3.1 # prioritize bumping other packages instead h5py = "3.11.*" hdf5 = "1.14.*" iris = "3.9.*" lxml = "5.1.*" # Optional dep of pydap matplotlib-base = "3.8.*" nc-time-axis = "1.4.*" # netcdf follows a 1.major.minor[.patch] convention # (see https://github.com/Unidata/netcdf4-python/issues/1090) netcdf4 = "1.6.*" numba = "0.60.*" numbagg = "0.8.*" packaging = "24.1.*" pint = "0.24.*" pydap = "3.5.*" rasterio = "1.3.*" seaborn = "0.13.*" sparse = "0.15.*" toolz = "0.12.*" zarr = "2.18.*" [feature.nightly.dependencies] python = "*" [feature.nightly.pypi-options.dependency-overrides] numpy = { version = "*", index = "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" } scipy = { version = "*", index = "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" } matplotlib = { version = "*", index = "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" } pandas = { version = "*", index = "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" } pyarrow = { version = "*", index = "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" } dask = { git = "https://github.com/dask/dask" } distributed = { git = "https://github.com/dask/distributed" } zarr = { git = "https://github.com/zarr-developers/zarr-python" } numcodecs = { git = "https://github.com/zarr-developers/numcodecs" } cftime = { git = "https://github.com/Unidata/cftime" } # packaging = { git = "https://github.com/pypa/packaging"} #? Pixi warns if this is enabled pint = { git = "https://github.com/hgrecco/pint" } bottleneck = { git = "https://github.com/pydata/bottleneck" } fsspec = { git = "https://github.com/intake/filesystem_spec" } nc-time-axis = { git = "https://github.com/SciTools/nc-time-axis" } flox = { git = "https://github.com/xarray-contrib/flox" } h5netcdf = { git = "https://github.com/h5netcdf/h5netcdf" } opt_einsum = { git = "https://github.com/dgasmith/opt_einsum" } # sparse = { git = "https://github.com/pydata/sparse"} [feature.nightly.pypi-dependencies] xarray = { path = ".", editable = true } numpy = "*" pandas = "*" matplotlib = "*" scipy = "*" pyarrow = "*" dask = "*" distributed = "*" zarr = "*" numcodecs = "*" cftime = "*" packaging = "*" pint = "*" bottleneck = "*" fsspec = "*" nc-time-axis = "*" flox = "*" # h5netcdf = "*" # h5py = "*" opt_einsum = "*" netcdf4 = "*" scitools-iris = "*" pydap = "*" cartopy = "*" seaborn = "*" [feature.test.dependencies] array-api-strict = "<2.4" pytest = "*" pytest-asyncio = "*" pytest-cov = "*" pytest-env = "*" pytest-mypy-plugins = "*" pytest-reportlog = "*" pytest-timeout = "*" pytest-xdist = "*" pytz = "*" hypothesis = "*" coveralls = "*" [feature.test.tasks] test = { cmd = "pytest" } [feature.doc.dependencies] kerchunk = "*" ipykernel = "*" ipywidgets = "*" # silence nbsphinx warning ipython = "*" jupyter_client = "*" jupyter_sphinx = "*" nbsphinx = "*" ncdata = "*" pydata-sphinx-theme = "*" pyproj = "*" rich = "*" # for Zarr tree() setuptools = "*" sphinx-autosummary-accessors = "*" sphinx-copybutton = "*" sphinx-design = "*" sphinx-inline-tabs = "*" sphinx-llms-txt = "*" sphinx = ">=6,<8" sphinxcontrib-mermaid = "*" sphinxcontrib-srclinks = "*" sphinx-remove-toctrees = "*" sphinxext-opengraph = "*" sphinxext-rediraffe = "*" [feature.doc.pypi-dependencies] cfgrib = "*" # pypi dep because of https://github.com/prefix-dev/pixi/issues/3032#issuecomment-3302638043 [feature.doc.tasks] doc = { cmd = "make html", cwd = "doc" } doc-clean = { cmd = "make clean && make html", cwd = "doc" } [feature.typing.dependencies] mypy = "==1.18.1" pyright = "*" hypothesis = "*" lxml = "*" pandas-stubs = "<=2.2.3.241126" # https://github.com/pydata/xarray/issues/10110 types-colorama = "*" types-docutils = "*" types-psutil = "*" types-Pygments = "*" types-python-dateutil = "*" types-pytz = "*" types-PyYAML = "*" types-requests = "*" types-setuptools = "*" types-openpyxl = "*" typing_extensions = "*" pip = "*" [feature.typing.pypi-dependencies] types-defusedxml = "*" types-pexpect = "*" [feature.typing.tasks] mypy = "mypy --install-types --non-interactive --cobertura-xml-report mypy_report" [feature.pre-commit.dependencies] pre-commit = "*" [feature.pre-commit.tasks] pre-commit = { cmd = "pre-commit" } [feature.release.dependencies] gitpython = "*" cytoolz = "*" [feature.release.tasks] release-contributors = "python ci/release_contributors.py" [environments] # Testing # test-just-xarray = { features = ["test"] } # https://github.com/pydata/xarray/pull/10888/files#r2511336147 test-py313-no-numba = { features = [ "py313", "test", "backends", "accel", "dask", "viz", "extras", ] } test-py313-no-dask = { features = [ "py312", "test", "backends", "accel", "numba", "viz", "extras", ] } test-py313 = { features = [ "py313", "test", "backends", "accel", "numba", "dask", "viz", "extras", ] } test-nightly = { features = [ "py313", "nightly", "test", # "typing", ], no-default-feature = true } test-py311 = { features = [ "py311", "test", "backends", "accel", "numba", "dask", "viz", "extras", ] } test-py311-with-typing = { features = [ "py311", "test", "backends", "accel", "numba", "dask", "viz", "extras", "typing", ] } test-py313-with-typing = { features = [ "py313", "test", "backends", "accel", "numba", "dask", "viz", "extras", "typing", ] } test-py311-bare-minimum = { features = ["test", "minimal"] } test-py311-bare-min-and-scipy = { features = [ "test", "minimal", "minimum-scipy", ] } test-py311-min-versions = { features = [ "test", "minimal", "minimum-scipy", "min-versions", ] } # Extra typing = { features = ["typing"] } doc = { features = [ "doc", "backends", "test", "accel", "viz", "extras", ] } pre-commit = { features = ["pre-commit"], no-default-feature = true } release = { features = ["release"], no-default-feature = true } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/properties/������������������������������������������������������������������������0000775�0000000�0000000�00000000000�15114646760�0015537�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/properties/README.md���������������������������������������������������������������0000664�0000000�0000000�00000001741�15114646760�0017021�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Property-based tests using Hypothesis This directory contains property-based tests using a library called [Hypothesis](https://github.com/HypothesisWorks/hypothesis-python). The property tests for xarray are a work in progress - more are always welcome. They are stored in a separate directory because they tend to run more examples and thus take longer, and so that local development can run a test suite without needing to `pip install hypothesis`. ## Hang on, "property-based" tests? Instead of making assertions about operations on a particular piece of data, you use Hypothesis to describe a _kind_ of data, then make assertions that should hold for _any_ example of this kind. For example: "given a 2d ndarray of dtype uint8 `arr`, `xr.DataArray(arr).plot.imshow()` never raises an exception". Hypothesis will then try many random examples, and report a minimised failing input for each error it finds. [See the docs for more info.](https://hypothesis.readthedocs.io/en/master/) �������������������������������xarray-2025.12.0/properties/__init__.py�������������������������������������������������������������0000664�0000000�0000000�00000000000�15114646760�0017636�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/properties/conftest.py�������������������������������������������������������������0000664�0000000�0000000�00000001375�15114646760�0017744�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import pytest def pytest_addoption(parser): parser.addoption( "--run-slow-hypothesis", action="store_true", default=False, help="run slow hypothesis tests", ) def pytest_collection_modifyitems(config, items): if config.getoption("--run-slow-hypothesis"): return skip_slow_hyp = pytest.mark.skip(reason="need --run-slow-hypothesis option to run") for item in items: if "slow_hypothesis" in item.keywords: item.add_marker(skip_slow_hyp) try: from hypothesis import settings except ImportError: pass else: # Run for a while - arrays are a bigger search space than usual settings.register_profile("ci", deadline=None, print_blob=True) settings.load_profile("ci") �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/properties/test_encode_decode.py���������������������������������������������������0000664�0000000�0000000�00000002726�15114646760�0021717�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" Property-based tests for encoding/decoding methods. These ones pass, just as you'd hope! """ import warnings import pytest pytest.importorskip("hypothesis") # isort: split import hypothesis.extra.numpy as npst import numpy as np from hypothesis import given import xarray as xr from xarray.coding.times import _parse_iso8601 from xarray.testing.strategies import datetimes, variables @pytest.mark.slow @given(original=variables()) def test_CFMask_coder_roundtrip(original) -> None: coder = xr.coding.variables.CFMaskCoder() roundtripped = coder.decode(coder.encode(original)) xr.testing.assert_identical(original, roundtripped) @pytest.mark.xfail @pytest.mark.slow @given(var=variables(dtype=npst.floating_dtypes())) def test_CFMask_coder_decode(var) -> None: var[0] = -99 var.attrs["_FillValue"] = -99 coder = xr.coding.variables.CFMaskCoder() decoded = coder.decode(var) assert np.isnan(decoded[0]) @pytest.mark.slow @given(original=variables()) def test_CFScaleOffset_coder_roundtrip(original) -> None: coder = xr.coding.variables.CFScaleOffsetCoder() roundtripped = coder.decode(coder.encode(original)) xr.testing.assert_identical(original, roundtripped) @given(dt=datetimes()) def test_iso8601_decode(dt): iso = dt.isoformat() with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=".*date/calendar/year zero.*") parsed, _ = _parse_iso8601(type(dt), iso) assert dt == parsed ������������������������������������������xarray-2025.12.0/properties/test_index_manipulation.py����������������������������������������������0000664�0000000�0000000�00000023441�15114646760�0023043�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import itertools import warnings import numpy as np import pytest import xarray as xr from xarray import Dataset from xarray.testing import _assert_internal_invariants pytest.importorskip("hypothesis") pytestmark = pytest.mark.slow_hypothesis import hypothesis.extra.numpy as npst import hypothesis.strategies as st from hypothesis import note, settings from hypothesis.stateful import ( RuleBasedStateMachine, initialize, invariant, precondition, rule, ) import xarray.testing.strategies as xrst # Strategy for generating names - uniqueness is enforced by the state machine NAME_STRATEGY = xrst.names() DIM_NAME = xrst.dimension_names(name_strategy=NAME_STRATEGY, min_dims=1, max_dims=1) index_variables = st.builds( xr.Variable, data=npst.arrays( dtype=xrst.pandas_index_dtypes(), shape=npst.array_shapes(min_dims=1, max_dims=1), elements=dict(allow_nan=False, allow_infinity=False, allow_subnormal=False), unique=True, ), dims=DIM_NAME, attrs=xrst.attrs(), ) def add_dim_coord_and_data_var(ds, var): (name,) = var.dims # dim coord ds[name] = var # non-dim coord of same size; this allows renaming ds[name + "_"] = var class DatasetStateMachine(RuleBasedStateMachine): # Can't use bundles because we'd need pre-conditions on consumes(bundle) # indexed_dims = Bundle("indexed_dims") # multi_indexed_dims = Bundle("multi_indexed_dims") def __init__(self): super().__init__() self.dataset = Dataset() self.check_default_indexes = True # We track these separately as lists so we can guarantee order of iteration over them. # Order of iteration over Dataset.dims is not guaranteed self.indexed_dims = [] self.multi_indexed_dims = [] # Track all used names to ensure uniqueness (avoids flaky Hypothesis tests) self.used_names: set[str] = set() def _draw_unique_name(self, data) -> str: """Draw a name that hasn't been used yet in this test case.""" name = data.draw(NAME_STRATEGY.filter(lambda x: x not in self.used_names)) self.used_names.add(name) return name def _draw_unique_var(self, data) -> xr.Variable: """Draw an index variable with a unique dimension name.""" var = data.draw(index_variables) # Replace with a guaranteed unique name new_name = self._draw_unique_name(data) return xr.Variable(dims=(new_name,), data=var.data, attrs=var.attrs) @initialize(data=st.data()) def init_ds(self, data): """Initialize the Dataset so that at least one rule will always fire.""" var = self._draw_unique_var(data) (name,) = var.dims note(f"initializing with dimension coordinate {name}") add_dim_coord_and_data_var(self.dataset, var) self.indexed_dims.append(name) # TODO: stacking with a timedelta64 index and unstacking converts it to object @rule(data=st.data()) def add_dim_coord(self, data): var = self._draw_unique_var(data) (name,) = var.dims note(f"adding dimension coordinate {name}") add_dim_coord_and_data_var(self.dataset, var) self.indexed_dims.append(name) @rule(data=st.data()) def assign_coords(self, data): var = self._draw_unique_var(data) (name,) = var.dims note(f"assign_coords: {name}") self.dataset = self.dataset.assign_coords({name: var}) self.indexed_dims.append(name) @property def has_indexed_dims(self) -> bool: return bool(self.indexed_dims + self.multi_indexed_dims) @rule(data=st.data()) @precondition(lambda self: self.has_indexed_dims) def reset_index(self, data): dim = data.draw(st.sampled_from(self.indexed_dims + self.multi_indexed_dims)) self.check_default_indexes = False note(f"> resetting {dim}") self.dataset = self.dataset.reset_index(dim) if dim in self.indexed_dims: del self.indexed_dims[self.indexed_dims.index(dim)] elif dim in self.multi_indexed_dims: del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] @rule(data=st.data(), create_index=st.booleans()) @precondition(lambda self: bool(self.indexed_dims)) def stack(self, data, create_index): newname = self._draw_unique_name(data) oldnames = data.draw( st.lists( st.sampled_from(self.indexed_dims), min_size=1, max_size=3 if create_index else None, unique=True, ) ) note(f"> stacking {oldnames} as {newname}") self.dataset = self.dataset.stack( {newname: oldnames}, create_index=create_index ) if create_index: self.multi_indexed_dims += [newname] # if create_index is False, then we just drop these for dim in oldnames: del self.indexed_dims[self.indexed_dims.index(dim)] @rule(data=st.data()) @precondition(lambda self: bool(self.multi_indexed_dims)) def unstack(self, data): # TODO: add None dim = data.draw(st.sampled_from(self.multi_indexed_dims)) note(f"> unstacking {dim}") if dim is not None: pd_index = self.dataset.xindexes[dim].index self.dataset = self.dataset.unstack(dim) del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] if dim is not None: self.indexed_dims.extend(pd_index.names) else: # TODO: fix this pass @rule(data=st.data()) @precondition(lambda self: bool(self.dataset.variables)) def rename_vars(self, data): newname = self._draw_unique_name(data) dim = data.draw(st.sampled_from(sorted(self.dataset.variables))) # benbovy: "skip the default indexes invariant test when the name of an # existing dimension coordinate is passed as input kwarg or dict key # to .rename_vars()." self.check_default_indexes = False note(f"> renaming {dim} to {newname}") self.dataset = self.dataset.rename_vars({dim: newname}) if dim in self.indexed_dims: del self.indexed_dims[self.indexed_dims.index(dim)] elif dim in self.multi_indexed_dims: del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] @precondition(lambda self: bool(self.dataset.dims)) @rule(data=st.data()) def drop_dims(self, data): dims = data.draw( st.lists( st.sampled_from(sorted(self.dataset.dims)), min_size=1, unique=True, ) ) note(f"> drop_dims: {dims}") # TODO: dropping a multi-index dimension raises a DeprecationWarning with warnings.catch_warnings(): warnings.simplefilter("ignore", category=DeprecationWarning) self.dataset = self.dataset.drop_dims(dims) for dim in dims: if dim in self.indexed_dims: del self.indexed_dims[self.indexed_dims.index(dim)] elif dim in self.multi_indexed_dims: del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] @precondition(lambda self: bool(self.indexed_dims)) @rule(data=st.data()) def drop_indexes(self, data): self.check_default_indexes = False dims = data.draw( st.lists(st.sampled_from(self.indexed_dims), min_size=1, unique=True) ) note(f"> drop_indexes: {dims}") self.dataset = self.dataset.drop_indexes(dims) for dim in dims: if dim in self.indexed_dims: del self.indexed_dims[self.indexed_dims.index(dim)] elif dim in self.multi_indexed_dims: del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] @property def swappable_dims(self): ds = self.dataset options = [] for dim in self.indexed_dims: choices = [ name for name, var in ds._variables.items() if var.dims == (dim,) # TODO: Avoid swapping a dimension to itself and name != dim ] options.extend( (a, b) for a, b in itertools.zip_longest((dim,), choices, fillvalue=dim) ) return options @rule(data=st.data()) # TODO: swap_dims is basically all broken if a multiindex is present # TODO: Avoid swapping from Index to a MultiIndex level # TODO: Avoid swapping from MultiIndex to a level of the same MultiIndex # TODO: Avoid swapping when a MultiIndex is present @precondition(lambda self: not bool(self.multi_indexed_dims)) @precondition(lambda self: bool(self.swappable_dims)) def swap_dims(self, data): ds = self.dataset options = self.swappable_dims dim, to = data.draw(st.sampled_from(options)) note( f"> swapping {dim} to {to}, found swappable dims: {options}, all_dims: {tuple(self.dataset.dims)}" ) self.dataset = ds.swap_dims({dim: to}) del self.indexed_dims[self.indexed_dims.index(dim)] self.indexed_dims += [to] @invariant() def assert_invariants(self): # note(f"> ===\n\n {self.dataset!r} \n===\n\n") _assert_internal_invariants(self.dataset, self.check_default_indexes) DatasetStateMachine.TestCase.settings = settings(max_examples=300, deadline=None) DatasetTest = DatasetStateMachine.TestCase @pytest.mark.skip(reason="failure detected by hypothesis") def test_unstack_object(): ds = xr.Dataset() ds["0"] = np.array(["", "\x000"], dtype=object) ds.stack({"1": ["0"]}).unstack() @pytest.mark.skip(reason="failure detected by hypothesis") def test_unstack_timedelta_index(): ds = xr.Dataset() ds["0"] = np.array([0, 1, 2, 3], dtype="timedelta64[ns]") ds.stack({"1": ["0"]}).unstack() �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/properties/test_pandas_roundtrip.py������������������������������������������������0000664�0000000�0000000�00000013742�15114646760�0022533�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" Property-based tests for roundtripping between xarray and pandas objects. """ from functools import partial from typing import cast import numpy as np import pandas as pd import pytest import xarray as xr from xarray.core.dataset import Dataset pytest.importorskip("hypothesis") import hypothesis.extra.numpy as npst # isort:skip import hypothesis.extra.pandas as pdst # isort:skip import hypothesis.strategies as st # isort:skip from hypothesis import given # isort:skip from xarray.tests import has_pyarrow numeric_dtypes = st.one_of( npst.unsigned_integer_dtypes(endianness="="), npst.integer_dtypes(endianness="="), npst.floating_dtypes(endianness="="), ) numeric_series = numeric_dtypes.flatmap(lambda dt: pdst.series(dtype=dt)) @st.composite def dataframe_strategy(draw): tz = draw(st.timezones()) dtype = pd.DatetimeTZDtype(unit="ns", tz=tz) datetimes = st.datetimes( min_value=pd.Timestamp("1677-09-21T00:12:43.145224193"), max_value=pd.Timestamp("2262-04-11T23:47:16.854775807"), timezones=st.just(tz), ) df = pdst.data_frames( [ pdst.column("datetime_col", elements=datetimes), pdst.column("other_col", elements=st.integers()), ], index=pdst.range_indexes(min_size=1, max_size=10), ) return draw(df).astype({"datetime_col": dtype}) an_array = npst.arrays( dtype=numeric_dtypes, shape=npst.array_shapes(max_dims=2), # can only convert 1D/2D to pandas ) @st.composite def datasets_1d_vars(draw) -> xr.Dataset: """Generate datasets with only 1D variables Suitable for converting to pandas dataframes. """ # Generate an index for the dataset idx = draw(pdst.indexes(dtype="u8", min_size=0, max_size=100)) # Generate 1-3 variables, 1D with the same length as the index vars_strategy = st.dictionaries( keys=st.text(), values=npst.arrays(dtype=numeric_dtypes, shape=len(idx)).map( partial(xr.Variable, ("rows",)) ), min_size=1, max_size=3, ) return xr.Dataset(draw(vars_strategy), coords={"rows": idx}) @given(st.data(), an_array) def test_roundtrip_dataarray(data, arr) -> None: names = data.draw( st.lists(st.text(), min_size=arr.ndim, max_size=arr.ndim, unique=True).map( tuple ) ) coords = {name: np.arange(n) for (name, n) in zip(names, arr.shape, strict=True)} original = xr.DataArray(arr, dims=names, coords=coords) roundtripped = xr.DataArray(original.to_pandas()) xr.testing.assert_identical(original, roundtripped) @given(datasets_1d_vars()) def test_roundtrip_dataset(dataset: Dataset) -> None: df = dataset.to_dataframe() assert isinstance(df, pd.DataFrame) roundtripped = xr.Dataset.from_dataframe(df) xr.testing.assert_identical(dataset, roundtripped) @given(numeric_series, st.text()) def test_roundtrip_pandas_series(ser, ix_name) -> None: # Need to name the index, otherwise Xarray calls it 'dim_0'. ser.index.name = ix_name arr = xr.DataArray(ser) roundtripped = arr.to_pandas() pd.testing.assert_series_equal(ser, roundtripped) # type: ignore[arg-type] xr.testing.assert_identical(arr, roundtripped.to_xarray()) # Dataframes with columns of all the same dtype - for roundtrip to DataArray numeric_homogeneous_dataframe = numeric_dtypes.flatmap( lambda dt: pdst.data_frames(columns=pdst.columns(["a", "b", "c"], dtype=dt)) ) @pytest.mark.xfail @given(numeric_homogeneous_dataframe) def test_roundtrip_pandas_dataframe(df) -> None: # Need to name the indexes, otherwise Xarray names them 'dim_0', 'dim_1'. df.index.name = "rows" df.columns.name = "cols" arr = xr.DataArray(df) roundtripped = arr.to_pandas() pd.testing.assert_frame_equal(df, cast(pd.DataFrame, roundtripped)) xr.testing.assert_identical(arr, roundtripped.to_xarray()) @given(df=dataframe_strategy()) def test_roundtrip_pandas_dataframe_datetime(df) -> None: # Need to name the indexes, otherwise Xarray names them 'dim_0', 'dim_1'. df.index.name = "rows" df.columns.name = "cols" dataset = xr.Dataset.from_dataframe(df) roundtripped = dataset.to_dataframe() roundtripped.columns.name = "cols" # why? pd.testing.assert_frame_equal(df, roundtripped) xr.testing.assert_identical(dataset, roundtripped.to_xarray()) @pytest.mark.parametrize( "extension_array", [ pd.Categorical(["a", "b", "c"]), pd.array(["a", "b", "c"], dtype="string"), pd.arrays.IntervalArray( [pd.Interval(0, 1), pd.Interval(1, 5), pd.Interval(2, 6)] ), pd.arrays.TimedeltaArray._from_sequence(pd.TimedeltaIndex(["1h", "2h", "3h"])), # type: ignore[attr-defined] pd.arrays.DatetimeArray._from_sequence( # type: ignore[attr-defined] pd.DatetimeIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D") ), np.array([1, 2, 3], dtype="int64"), ] + ([pd.array([1, 2, 3], dtype="int64[pyarrow]")] if has_pyarrow else []), ids=["cat", "string", "interval", "timedelta", "datetime", "numpy"] + (["pyarrow"] if has_pyarrow else []), ) @pytest.mark.parametrize("is_index", [True, False]) def test_roundtrip_1d_pandas_extension_array(extension_array, is_index) -> None: df = pd.DataFrame({"arr": extension_array}) if is_index: df = df.set_index("arr") arr = xr.Dataset.from_dataframe(df)["arr"] roundtripped = arr.to_pandas() df_arr_to_test = df.index if is_index else df["arr"] assert (df_arr_to_test == roundtripped).all() # `NumpyExtensionArray` types are not roundtripped, including `StringArray` which subtypes. if isinstance( extension_array, pd.arrays.NumpyExtensionArray | pd.arrays.ArrowStringArray ): # type: ignore[attr-defined] assert isinstance(arr.data, np.ndarray) else: assert ( df_arr_to_test.dtype == (roundtripped.index if is_index else roundtripped).dtype ) xr.testing.assert_identical(arr, roundtripped.to_xarray()) ������������������������������xarray-2025.12.0/properties/test_properties.py������������������������������������������������������0000664�0000000�0000000�00000003721�15114646760�0021347�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import itertools import pytest pytest.importorskip("hypothesis") import hypothesis.strategies as st from hypothesis import given, note import xarray as xr import xarray.testing.strategies as xrst from xarray.groupers import find_independent_seasons, season_to_month_tuple @given(attrs=xrst.simple_attrs) def test_assert_identical(attrs): v = xr.Variable(dims=(), data=0, attrs=attrs) xr.testing.assert_identical(v, v.copy(deep=True)) ds = xr.Dataset(attrs=attrs) xr.testing.assert_identical(ds, ds.copy(deep=True)) @given( roll=st.integers(min_value=0, max_value=12), breaks=st.lists( st.integers(min_value=0, max_value=11), min_size=1, max_size=12, unique=True ), ) def test_property_season_month_tuple(roll, breaks): chars = list("JFMAMJJASOND") months = tuple(range(1, 13)) rolled_chars = chars[roll:] + chars[:roll] rolled_months = months[roll:] + months[:roll] breaks = sorted(breaks) if breaks[0] != 0: breaks = [0] + breaks if breaks[-1] != 12: breaks = breaks + [12] seasons = tuple( "".join(rolled_chars[start:stop]) for start, stop in itertools.pairwise(breaks) ) actual = season_to_month_tuple(seasons) expected = tuple( rolled_months[start:stop] for start, stop in itertools.pairwise(breaks) ) assert expected == actual @given(data=st.data(), nmonths=st.integers(min_value=1, max_value=11)) def test_property_find_independent_seasons(data, nmonths): chars = "JFMAMJJASOND" # if stride > nmonths, then we can't infer season order stride = data.draw(st.integers(min_value=1, max_value=nmonths)) chars = chars + chars[:nmonths] seasons = [list(chars[i : i + nmonths]) for i in range(0, 12, stride)] note(seasons) groups = find_independent_seasons(seasons) for group in groups: inds = tuple(itertools.chain(*group.inds)) assert len(inds) == len(set(inds)) assert len(group.codes) == len(set(group.codes)) �����������������������������������������������xarray-2025.12.0/pyproject.toml���������������������������������������������������������������������0000664�0000000�0000000�00000030551�15114646760�0016263�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������[project] authors = [{ name = "xarray Developers", email = "xarray@googlegroups.com" }] classifiers = [ "Development Status :: 5 - Production/Stable", "Operating System :: OS Independent", "Intended Audience :: Science/Research", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Scientific/Engineering", ] description = "N-D labeled arrays and datasets in Python" dynamic = ["version"] license = "Apache-2.0" name = "xarray" readme = "README.md" requires-python = ">=3.11" dependencies = ["numpy>=1.26", "packaging>=24.1", "pandas>=2.2"] # We don't encode minimum requirements here (though if we can write a script to # generate the text from `min_deps_check.py`, that's welcome...). We do add # `numba>=0.54` here because of https://github.com/astral-sh/uv/issues/7881; # note that it's not a direct dependency of xarray. [project.optional-dependencies] accel = [ "scipy>=1.13", "bottleneck", "numbagg>=0.8", "numba>=0.62", # numba 0.62 added support for numpy 2.3 "flox>=0.9", "opt_einsum", ] complete = ["xarray[accel,etc,io,parallel,viz]"] io = [ "netCDF4>=1.6.0", "h5netcdf", "pydap", "scipy>=1.13", "zarr>=2.18", "fsspec", "cftime", "pooch", ] etc = ["sparse>=0.15"] parallel = ["dask[complete]"] viz = ["cartopy>=0.23", "matplotlib>=3.8", "nc-time-axis", "seaborn"] types = [ "pandas-stubs", "scipy-stubs", "types-PyYAML", "types-Pygments", "types-colorama", "types-decorator", "types-defusedxml", "types-docutils", "types-networkx", "types-pexpect", "types-psutil", "types-pycurl", "types-openpyxl", "types-python-dateutil", "types-pytz", "types-requests", "types-setuptools", ] [dependency-groups] dev = [ "hypothesis", "jinja2", "mypy==1.18.1", "pre-commit", "pytest", "pytest-cov", "pytest-env", "pytest-mypy-plugins", "pytest-timeout", "pytest-xdist", "pytest-asyncio", "ruff>=0.8.0", "sphinx", "sphinx_autosummary_accessors", "xarray[complete,types]", ] [project.urls] Documentation = "https://docs.xarray.dev" SciPy2015-talk = "https://www.youtube.com/watch?v=X0pAhJgySxk" homepage = "https://xarray.dev/" issue-tracker = "https://github.com/pydata/xarray/issues" source-code = "https://github.com/pydata/xarray" [project.entry-points."xarray.chunkmanagers"] dask = "xarray.namedarray.daskmanager:DaskManager" [build-system] build-backend = "setuptools.build_meta" requires = ["setuptools>=77.0.3", "setuptools-scm>=8"] [tool.setuptools.packages.find] include = ["xarray*"] [tool.setuptools_scm] fallback_version = "9999" [tool.coverage.run] omit = [ "*/xarray/tests/*", "*/xarray/compat/dask_array_compat.py", "*/xarray/compat/npcompat.py", "*/xarray/compat/pdcompat.py", "*/xarray/namedarray/pycompat.py", "*/xarray/core/types.py", ] source = ["xarray"] [tool.coverage.report] exclude_lines = ["pragma: no cover", "if TYPE_CHECKING"] [tool.mypy] enable_error_code = ["ignore-without-code", "redundant-self", "redundant-expr"] exclude = ['build', 'xarray/util/generate_.*\.py'] files = "xarray" show_error_context = true warn_redundant_casts = true warn_unused_configs = true warn_unused_ignores = true # Much of the numerical computing stack doesn't have type annotations yet. [[tool.mypy.overrides]] ignore_missing_imports = true module = [ "affine.*", "bottleneck.*", "cartopy.*", "cf_units.*", "cfgrib.*", "cftime.*", "cloudpickle.*", "cubed.*", "cupy.*", "fsspec.*", "h5netcdf.*", "h5py.*", "iris.*", "mpl_toolkits.*", "nc_time_axis.*", "netCDF4.*", "netcdftime.*", "numcodecs.*", "opt_einsum.*", "pint.*", "pooch.*", "pyarrow.*", "pydap.*", "scipy.*", "seaborn.*", "setuptools", "sparse.*", "toolz.*", "zarr.*", "numpy.exceptions.*", # remove once support for `numpy<2.0` has been dropped "array_api_strict.*", ] # Gradually we want to add more modules to this list, ratcheting up our total # coverage. Once a module is here, functions are checked by mypy regardless of # whether they have type annotations. It would be especially useful to have test # files listed here, because without them being checked, we don't have a great # way of testing our annotations. [[tool.mypy.overrides]] check_untyped_defs = true module = [ "xarray.core.accessor_dt", "xarray.core.accessor_str", "xarray.structure.alignment", "xarray.computation.*", "xarray.indexes.*", "xarray.tests.*", ] # Use strict = true whenever namedarray has become standalone. In the meantime # don't forget to add all new files related to namedarray here: # ref: https://mypy.readthedocs.io/en/stable/existing_code.html#introduce-stricter-options [[tool.mypy.overrides]] # Start off with these warn_unused_ignores = true # Getting these passing should be easy strict_concatenate = true strict_equality = true # Strongly recommend enabling this one as soon as you can check_untyped_defs = true # These shouldn't be too much additional work, but may be tricky to # get passing if you use a lot of untyped libraries disallow_any_generics = true disallow_subclassing_any = true disallow_untyped_decorators = true # These next few are various gradations of forcing use of type annotations disallow_incomplete_defs = true disallow_untyped_calls = true disallow_untyped_defs = true # This one isn't too hard to get passing, but return on investment is lower no_implicit_reexport = true # This one can be tricky to get passing if you use a lot of untyped libraries warn_return_any = true module = ["xarray.namedarray.*", "xarray.tests.test_namedarray"] # We disable pyright here for now, since including it means that all errors show # up in devs' VS Code, which then makes it more difficult to work with actual # errors. It overrides local VS Code settings so isn't escapable. # [tool.pyright] # defineConstant = {DEBUG = true} # # Enabling this means that developers who have disabled the warning locally โ€” # # because not all dependencies are installable โ€” are overridden # # reportMissingImports = true # reportMissingTypeStubs = false [tool.ruff] extend-exclude = ["doc", "_typed_ops.pyi"] [tool.ruff.lint] extend-select = [ "YTT", # flake8-2020 "B", # flake8-bugbear "C4", # flake8-comprehensions "ISC", # flake8-implicit-str-concat "PIE", # flake8-pie "TID", # flake8-tidy-imports (absolute imports) "PYI", # flake8-pyi "SIM", # flake8-simplify "FLY", # flynt "I", # isort "PERF", # Perflint "W", # pycodestyle warnings "PGH", # pygrep-hooks "PLC", # Pylint Convention "PLE", # Pylint Errors "PLR", # Pylint Refactor "PLW", # Pylint Warnings "UP", # pyupgrade "FURB", # refurb "RUF", ] extend-safe-fixes = [ "TID252", # absolute imports ] ignore = [ "C40", # unnecessary generator, comprehension, or literal "PIE790", # unnecessary pass statement "PYI019", # use `Self` instead of custom TypeVar "PYI041", # use `float` instead of `int | float` "SIM102", # use a single `if` statement instead of nested `if` statements "SIM108", # use ternary operator instead of `if`-`else`-block "SIM117", # use a single `with` statement instead of nested `with` statements "SIM118", # use `key in dict` instead of `key in dict.keys()` "SIM300", # yoda condition detected "PERF203", # try-except within a loop incurs performance overhead "E402", # module level import not at top of file "E731", # do not assign a lambda expression, use a def "PLC0415", # `import` should be at the top-level of a file "PLC0206", # extracting value from dictionary without calling `.items()` "PLR091", # too many arguments / branches / statements "PLR2004", # magic value used in comparison "PLW0603", # using the global statement to update is discouraged "PLW0642", # reassigned `self` variable in instance method "PLW1641", # object does not implement `__hash__` method "PLW2901", # `for` loop variable overwritten by assignment target "UP007", # use X | Y for type annotations "FURB105", # unnecessary empty string passed to `print` "RUF001", # string contains ambiguous unicode character "RUF002", # docstring contains ambiguous acute accent unicode character "RUF003", # comment contains ambiguous no-break space unicode character "RUF005", # consider unpacking operator instead of concatenation "RUF012", # mutable class attributes ] [tool.ruff.lint.per-file-ignores] # don't enforce absolute imports "asv_bench/**" = ["TID252"] # comparison with itself in tests "xarray/tests/**" = ["PLR0124"] # looks like ruff bugs "xarray/core/_typed_ops.py" = ["PYI034"] "xarray/namedarray/_typing.py" = ["PYI018", "PYI046"] [tool.ruff.lint.isort] known-first-party = ["xarray"] [tool.ruff.lint.flake8-tidy-imports] # Disallow all relative imports. ban-relative-imports = "all" [tool.ruff.lint.flake8-tidy-imports.banned-api] "pandas.api.types.is_extension_array_dtype".msg = "Use xarray.core.utils.is_allowed_extension_array{_dtype} instead. Only use the banend API if the incoming data has already been sanitized by xarray" [tool.pytest.ini_options] addopts = [ "--strict-config", "--strict-markers", "--mypy-only-local-stub", "--mypy-pyproject-toml-file=pyproject.toml", ] # We want to forbid warnings from within xarray in our tests โ€” instead we should # fix our own code, or mark the test itself as expecting a warning. So this: # - Converts any warning from xarray into an error # - Allows some warnings ("default") which the test suite currently raises, # since it wasn't practical to fix them all before merging this config. The # warnings are reported in CI (since it uses `default`, not `ignore`). # # Over time, we can remove these rules allowing warnings. A valued contribution # is removing a line, seeing what breaks, and then fixing the library code or # tests so that it doesn't raise warnings. # # There are some instance where we'll want to add to these rules: # - While we only raise errors on warnings from within xarray, a dependency can # raise a warning with a stacklevel such that it's interpreted to be raised # from xarray and this will mistakenly convert it to an error. If that # happens, please feel free to add a rule switching it to `default` here, and # disabling the error. # - If these settings get in the way of making progress, it's also acceptable to # temporarily add additional `default` rules. # - But we should only add `ignore` rules if we're confident that we'll never # need to address a warning. filterwarnings = [ "error:::xarray.*", # Zarr 2 V3 implementation "default:Zarr-Python is not in alignment with the final V3 specification", # TODO: this is raised for vlen-utf8, consolidated metadata, U1 dtype "default:is currently not part .* the Zarr version 3 specification.", # Zarr V3 data type specifications warnings - very repetitive "ignore:The data type .* does not have a Zarr V3 specification", "ignore:Consolidated metadata is currently not part", # TODO: remove once we know how to deal with a changed signature in protocols "default:::xarray.tests.test_strategies", ] log_cli_level = "INFO" markers = [ "flaky: flaky tests", "mypy: type annotation tests", "network: tests requiring a network connection", "slow: slow tests", "slow_hypothesis: slow hypothesis tests", ] minversion = "7" python_files = ["test_*.py"] testpaths = ["xarray/tests", "properties"] [tool.aliases] test = "pytest" [tool.repo-review] ignore = [ "PP308", # This option creates a large amount of log lines. ] [tool.typos] [tool.typos.default] extend-ignore-identifiers-re = [ # Variable names "nd_.*", ".*_nd", "ba_.*", ".*_ba", "ser_.*", ".*_ser", # Function/class names "NDArray.*", ".*NDArray.*", ] [tool.typos.default.extend-words] # NumPy function names arange = "arange" ond = "ond" aso = "aso" # Technical terms nd = "nd" nin = "nin" nclusive = "nclusive" # part of "inclusive" in error messages # Variable names ba = "ba" ser = "ser" fo = "fo" iy = "iy" vart = "vart" ede = "ede" # Organization/Institution names Stichting = "Stichting" Mathematisch = "Mathematisch" # People's names Soler = "Soler" Bruning = "Bruning" Tung = "Tung" Claus = "Claus" Celles = "Celles" slowy = "slowy" Commun = "Commun" # Tests Ome = "Ome" SUR = "SUR" Tio = "Tio" Ono = "Ono" abl = "abl" # Technical terms splitted = "splitted" childs = "childs" cutted = "cutted" LOCA = "LOCA" SLEP = "SLEP" [tool.typos.type.jupyter] extend-ignore-re = ["\"id\": \".*\""] �������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/setup.py���������������������������������������������������������������������������0000775�0000000�0000000�00000000150�15114646760�0015054�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python from setuptools import setup setup(use_scm_version={"fallback_version": "9999"}) ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/����������������������������������������������������������������������������0000775�0000000�0000000�00000000000�15114646760�0014651�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/__init__.py�����������������������������������������������������������������0000664�0000000�0000000�00000007360�15114646760�0016770�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from importlib.metadata import version as _version from xarray import coders, groupers, indexes, testing, tutorial, ufuncs from xarray.backends.api import ( load_dataarray, load_dataset, load_datatree, open_dataarray, open_dataset, open_datatree, open_groups, open_mfdataset, ) from xarray.backends.writers import save_mfdataset from xarray.backends.zarr import open_zarr from xarray.coding.cftime_offsets import cftime_range, date_range, date_range_like from xarray.coding.cftimeindex import CFTimeIndex from xarray.coding.frequencies import infer_freq from xarray.computation.apply_ufunc import ( apply_ufunc, ) from xarray.computation.computation import ( corr, cov, cross, dot, polyval, where, ) from xarray.conventions import SerializationWarning, decode_cf from xarray.core.common import ALL_DIMS, full_like, ones_like, zeros_like from xarray.core.coordinates import Coordinates, CoordinateValidationError from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.datatree_mapping import map_over_datasets from xarray.core.extensions import ( register_dataarray_accessor, register_dataset_accessor, register_datatree_accessor, ) from xarray.core.indexes import Index from xarray.core.indexing import IndexSelResult from xarray.core.options import get_options, set_options from xarray.core.parallel import map_blocks from xarray.core.treenode import ( InvalidTreeError, NotFoundInTreeError, TreeIsomorphismError, group_subtrees, ) from xarray.core.variable import IndexVariable, Variable, as_variable from xarray.namedarray.core import NamedArray from xarray.structure.alignment import AlignmentError, align, broadcast from xarray.structure.chunks import unify_chunks from xarray.structure.combine import combine_by_coords, combine_nested from xarray.structure.concat import concat from xarray.structure.merge import Context, MergeError, merge from xarray.util.print_versions import show_versions try: __version__ = _version("xarray") except Exception: # Local copy or not installed with setuptools. # Disable minimum version checks on downstream libraries. __version__ = "9999" # A hardcoded __all__ variable is necessary to appease # `mypy --strict` running in projects that import xarray. __all__ = ( # noqa: RUF022 # Sub-packages "coders", "groupers", "indexes", "testing", "tutorial", "ufuncs", # Top-level functions "align", "apply_ufunc", "as_variable", "broadcast", "cftime_range", "combine_by_coords", "combine_nested", "concat", "corr", "cov", "cross", "date_range", "date_range_like", "decode_cf", "dot", "full_like", "get_options", "group_subtrees", "infer_freq", "load_dataarray", "load_dataset", "load_datatree", "map_blocks", "map_over_datasets", "merge", "ones_like", "open_dataarray", "open_dataset", "open_datatree", "open_groups", "open_mfdataset", "open_zarr", "polyval", "register_dataarray_accessor", "register_dataset_accessor", "register_datatree_accessor", "save_mfdataset", "set_options", "show_versions", "unify_chunks", "where", "zeros_like", # Classes "CFTimeIndex", "Context", "Coordinates", "DataArray", "DataTree", "Dataset", "Index", "IndexSelResult", "IndexVariable", "NamedArray", "Variable", # Exceptions "AlignmentError", "CoordinateValidationError", "InvalidTreeError", "MergeError", "NotFoundInTreeError", "SerializationWarning", "TreeIsomorphismError", # Constants "ALL_DIMS", "__version__", ) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/�������������������������������������������������������������������0000775�0000000�0000000�00000000000�15114646760�0016423�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/__init__.py��������������������������������������������������������0000664�0000000�0000000�00000002673�15114646760�0020544�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Backend objects for saving and loading data DataStores provide a uniform interface for saving and loading data in different formats. They should not be used directly, but rather through Dataset objects. """ from xarray.backends.common import AbstractDataStore, BackendArray, BackendEntrypoint from xarray.backends.file_manager import ( CachingFileManager, DummyFileManager, FileManager, ) from xarray.backends.h5netcdf_ import H5netcdfBackendEntrypoint, H5NetCDFStore from xarray.backends.memory import InMemoryDataStore from xarray.backends.netCDF4_ import NetCDF4BackendEntrypoint, NetCDF4DataStore from xarray.backends.plugins import list_engines, refresh_engines from xarray.backends.pydap_ import PydapBackendEntrypoint, PydapDataStore from xarray.backends.scipy_ import ScipyBackendEntrypoint, ScipyDataStore from xarray.backends.store import StoreBackendEntrypoint from xarray.backends.zarr import ZarrBackendEntrypoint, ZarrStore __all__ = [ "AbstractDataStore", "BackendArray", "BackendEntrypoint", "CachingFileManager", "DummyFileManager", "FileManager", "H5NetCDFStore", "H5netcdfBackendEntrypoint", "InMemoryDataStore", "NetCDF4BackendEntrypoint", "NetCDF4DataStore", "PydapBackendEntrypoint", "PydapDataStore", "ScipyBackendEntrypoint", "ScipyDataStore", "StoreBackendEntrypoint", "ZarrBackendEntrypoint", "ZarrStore", "list_engines", "refresh_engines", ] ���������������������������������������������������������������������xarray-2025.12.0/xarray/backends/api.py�������������������������������������������������������������0000664�0000000�0000000�00000221016�15114646760�0017550�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import os from collections.abc import ( Callable, Iterable, Mapping, Sequence, ) from functools import partial from typing import ( TYPE_CHECKING, Any, Literal, TypeVar, Union, cast, ) from xarray.backends import plugins from xarray.backends.common import ( T_PathFileOrDataStore, _find_absolute_paths, _normalize_path, ) from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.core import dtypes, indexing from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.indexes import Index from xarray.core.treenode import group_subtrees from xarray.core.types import ReadBuffer from xarray.core.utils import emit_user_level_warning, is_remote_uri from xarray.namedarray.daskmanager import DaskManager from xarray.namedarray.parallelcompat import guess_chunkmanager from xarray.namedarray.utils import _get_chunk from xarray.structure.chunks import _maybe_chunk from xarray.structure.combine import ( _infer_concat_order_from_positions, _nested_combine, combine_by_coords, ) from xarray.util.deprecation_helpers import ( _COMPAT_DEFAULT, _COORDS_DEFAULT, _DATA_VARS_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, ) if TYPE_CHECKING: try: from dask.delayed import Delayed except ImportError: Delayed = None # type: ignore[assignment, misc] from xarray.backends.common import BackendEntrypoint from xarray.core.types import ( CombineAttrsOptions, CompatOptions, ErrorOptionsWithWarn, JoinOptions, NestedSequence, T_Chunks, ) T_NetcdfEngine = Literal["netcdf4", "scipy", "h5netcdf"] T_Engine = Union[ T_NetcdfEngine, Literal["pydap", "zarr"], # noqa: PYI051 type[BackendEntrypoint], str, # no nice typing support for custom backends None, ] T_NetcdfTypes = Literal[ "NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", "NETCDF3_CLASSIC" ] DATAARRAY_NAME = "__xarray_dataarray_name__" DATAARRAY_VARIABLE = "__xarray_dataarray_variable__" def _resolve_decoders_kwargs(decode_cf, open_backend_dataset_parameters, **decoders): for d in list(decoders): if decode_cf is False and d in open_backend_dataset_parameters: decoders[d] = False if decoders[d] is None: decoders.pop(d) return decoders def _get_mtime(filename_or_obj): # if passed an actual file path, augment the token with # the file modification time mtime = None try: path = os.fspath(filename_or_obj) except TypeError: path = None if path and not is_remote_uri(path): mtime = os.path.getmtime(os.path.expanduser(filename_or_obj)) return mtime def _protect_dataset_variables_inplace(dataset: Dataset, cache: bool) -> None: for name, variable in dataset.variables.items(): if name not in dataset._indexes: # no need to protect IndexVariable objects data: indexing.ExplicitlyIndexedNDArrayMixin data = indexing.CopyOnWriteArray(variable._data) if cache: data = indexing.MemoryCachedArray(data) variable.data = data def _protect_datatree_variables_inplace(tree: DataTree, cache: bool) -> None: for node in tree.subtree: _protect_dataset_variables_inplace(node.dataset, cache) def _finalize_store(writes, store): """Finalize this store by explicitly syncing and closing""" del writes # ensure writing is done first store.close() def delayed_close_after_writes(writes, store): import dask return dask.delayed(_finalize_store)(writes, store) def _multi_file_closer(closers): for closer in closers: closer() def load_dataset(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> Dataset: """Open, load into memory, and close a Dataset from a file or file-like object. This is a thin wrapper around :py:meth:`~xarray.open_dataset`. It differs from `open_dataset` in that it loads the Dataset into memory, closes the file, and returns the Dataset. In contrast, `open_dataset` keeps the file handle open and lazy loads its contents. All parameters are passed directly to `open_dataset`. See that documentation for further details. Returns ------- dataset : Dataset The newly created Dataset. See Also -------- open_dataset """ if "cache" in kwargs: raise TypeError("cache has no effect in this context") with open_dataset(filename_or_obj, **kwargs) as ds: return ds.load() def load_dataarray(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> DataArray: """Open, load into memory, and close a DataArray from a file or file-like object containing a single data variable. This is a thin wrapper around :py:meth:`~xarray.open_dataarray`. It differs from `open_dataarray` in that it loads the Dataset into memory, closes the file, and returns the Dataset. In contrast, `open_dataarray` keeps the file handle open and lazy loads its contents. All parameters are passed directly to `open_dataarray`. See that documentation for further details. Returns ------- datarray : DataArray The newly created DataArray. See Also -------- open_dataarray """ if "cache" in kwargs: raise TypeError("cache has no effect in this context") with open_dataarray(filename_or_obj, **kwargs) as da: return da.load() def load_datatree(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> DataTree: """Open, load into memory, and close a DataTree from a file or file-like object. This is a thin wrapper around :py:meth:`~xarray.open_datatree`. It differs from `open_datatree` in that it loads the DataTree into memory, closes the file, and returns the DataTree. In contrast, `open_datatree` keeps the file handle open and lazy loads its contents. All parameters are passed directly to `open_datatree`. See that documentation for further details. Returns ------- datatree : DataTree The newly created DataTree. See Also -------- open_datatree """ if "cache" in kwargs: raise TypeError("cache has no effect in this context") with open_datatree(filename_or_obj, **kwargs) as dt: return dt.load() def _chunk_ds( backend_ds, filename_or_obj, engine, chunks, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, **extra_tokens, ): chunkmanager = guess_chunkmanager(chunked_array_type) # TODO refactor to move this dask-specific logic inside the DaskManager class if isinstance(chunkmanager, DaskManager): from dask.base import tokenize mtime = _get_mtime(filename_or_obj) token = tokenize(filename_or_obj, mtime, engine, chunks, **extra_tokens) name_prefix = "open_dataset-" else: # not used token = (None,) name_prefix = None variables = {} for name, var in backend_ds.variables.items(): if var._in_memory: variables[name] = var continue var_chunks = _get_chunk( var._data, chunks, chunkmanager, preferred_chunks=var.encoding.get("preferred_chunks", {}), dims=var.dims, ) variables[name] = _maybe_chunk( name, var, var_chunks, overwrite_encoded_chunks=overwrite_encoded_chunks, name_prefix=name_prefix, token=token, inline_array=inline_array, chunked_array_type=chunkmanager, from_array_kwargs=from_array_kwargs.copy(), ) return backend_ds._replace(variables) def _maybe_create_default_indexes(ds): to_index = { name: coord.variable for name, coord in ds.coords.items() if coord.dims == (name,) and name not in ds.xindexes } return ds.assign_coords(Coordinates(to_index)) def _dataset_from_backend_dataset( backend_ds, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, create_default_indexes, **extra_tokens, ): if not isinstance(chunks, int | dict) and chunks not in {None, "auto"}: raise ValueError( f"chunks must be an int, dict, 'auto', or None. Instead found {chunks}." ) _protect_dataset_variables_inplace(backend_ds, cache) if create_default_indexes: ds = _maybe_create_default_indexes(backend_ds) else: ds = backend_ds if chunks is not None: ds = _chunk_ds( ds, filename_or_obj, engine, chunks, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, **extra_tokens, ) ds.set_close(backend_ds._close) # Ensure source filename always stored in dataset object if "source" not in ds.encoding: path = getattr(filename_or_obj, "path", filename_or_obj) if isinstance(path, str | os.PathLike): ds.encoding["source"] = _normalize_path(path) return ds def _datatree_from_backend_datatree( backend_tree, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, create_default_indexes, **extra_tokens, ): if not isinstance(chunks, int | dict) and chunks not in {None, "auto"}: raise ValueError( f"chunks must be an int, dict, 'auto', or None. Instead found {chunks}." ) _protect_datatree_variables_inplace(backend_tree, cache) if create_default_indexes: tree = backend_tree.map_over_datasets(_maybe_create_default_indexes) else: tree = backend_tree if chunks is not None: tree = DataTree.from_dict( { path: _chunk_ds( node.dataset, filename_or_obj, engine, chunks, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, node=path, **extra_tokens, ) for path, [node] in group_subtrees(tree) }, name=tree.name, ) if create_default_indexes or chunks is not None: for path, [node] in group_subtrees(backend_tree): tree[path].set_close(node._close) # Ensure source filename always stored in dataset object if "source" not in tree.encoding: path = getattr(filename_or_obj, "path", filename_or_obj) if isinstance(path, str | os.PathLike): tree.encoding["source"] = _normalize_path(path) return tree def open_dataset( filename_or_obj: T_PathFileOrDataStore, *, engine: T_Engine = None, chunks: T_Chunks = None, cache: bool | None = None, decode_cf: bool | None = None, mask_and_scale: bool | Mapping[str, bool] | None = None, decode_times: ( bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None ) = None, decode_timedelta: ( bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None ) = None, use_cftime: bool | Mapping[str, bool] | None = None, concat_characters: bool | Mapping[str, bool] | None = None, decode_coords: Literal["coordinates", "all"] | bool | None = None, drop_variables: str | Iterable[str] | None = None, create_default_indexes: bool = True, inline_array: bool = False, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, backend_kwargs: dict[str, Any] | None = None, **kwargs, ) -> Dataset: """Open and decode a dataset from a file or file-like object. Parameters ---------- filename_or_obj : str, Path, file-like, bytes, memoryview or DataStore Strings and Path objects are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with scipy.io.netcdf (only netCDF3 supported). Bytes, memoryview and file-like objects are opened by scipy.io.netcdf (netCDF3) or h5netcdf (netCDF4). engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\ , installed backend \ or subclass of xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, by default preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. chunks : int, dict, 'auto' or None, default: None If provided, used to load the data into dask arrays. - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays or when large arrays are sliced before computation. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using the engine's preferred chunk size, generally identical to the format's chunk size. If not available, a single chunk for all arrays. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool or dict-like, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_times : bool, CFDatetimeCoder or dict-like, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, use :py:class:`coders.CFDatetimeCoder` or leave them encoded as numbers. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_timedelta : bool, CFTimedeltaCoder, or dict-like, optional If True, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of ``decode_times``; if ``decode_times`` is a :py:class:`coders.CFDatetimeCoder` instance, this takes the form of a :py:class:`coders.CFTimedeltaCoder` instance with a matching ``time_unit``. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. use_cftime: bool or dict-like, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. concat_characters : bool or dict-like, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. Only existing variables can be set as coordinates. Missing variables will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. inline_array: bool, default: False How to include the array in the dask task graph. By default(``inline_array=False``) the array is included in a task by itself, and each chunk refers to that task by its key. With ``inline_array=True``, Dask will instead inline the array directly in the values of the task graph. See :py:func:`dask.array.from_array`. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the netCDF4 group in the given file to open given as a str,supported by "netcdf4", "h5netcdf", "zarr". - 'lock': resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by "netcdf4", "h5netcdf", "scipy". See engine open function for kwargs accepted by each specific engine. Returns ------- dataset : Dataset The newly created dataset. Notes ----- ``open_dataset`` opens the file with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- open_mfdataset """ if cache is None: cache = chunks is None if backend_kwargs is not None: kwargs.update(backend_kwargs) if engine is None: engine = plugins.guess_engine(filename_or_obj) if from_array_kwargs is None: from_array_kwargs = {} backend = plugins.get_backend(engine) decoders = _resolve_decoders_kwargs( decode_cf, open_backend_dataset_parameters=backend.open_dataset_parameters, mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, concat_characters=concat_characters, use_cftime=use_cftime, decode_coords=decode_coords, ) overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None) backend_ds = backend.open_dataset( filename_or_obj, drop_variables=drop_variables, **decoders, **kwargs, ) ds = _dataset_from_backend_dataset( backend_ds, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, drop_variables=drop_variables, create_default_indexes=create_default_indexes, **decoders, **kwargs, ) return ds def open_dataarray( filename_or_obj: T_PathFileOrDataStore, *, engine: T_Engine = None, chunks: T_Chunks = None, cache: bool | None = None, decode_cf: bool | None = None, mask_and_scale: bool | None = None, decode_times: ( bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None ) = None, decode_timedelta: bool | CFTimedeltaCoder | None = None, use_cftime: bool | None = None, concat_characters: bool | None = None, decode_coords: Literal["coordinates", "all"] | bool | None = None, drop_variables: str | Iterable[str] | None = None, create_default_indexes: bool = True, inline_array: bool = False, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, backend_kwargs: dict[str, Any] | None = None, **kwargs, ) -> DataArray: """Open a DataArray from a file or file-like object containing a single data variable. This is designed to read netCDF files with only one data variable. If multiple variables are present then a ValueError is raised. Parameters ---------- filename_or_obj : str, Path, file-like, bytes, memoryview or DataStore Strings and Path objects are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with scipy.io.netcdf (only netCDF3 supported). Bytes, memoryview and file-like objects are opened by scipy.io.netcdf (netCDF3) or h5netcdf (netCDF4). engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\ , installed backend \ or subclass of xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, by default preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. chunks : int, dict, 'auto' or None, default: None If provided, used to load the data into dask arrays. - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays, though results may vary. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using engine preferred chunks if exposed by the backend, otherwise with a single chunk for all arrays. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. This keyword may not be supported by all the backends. decode_times : bool, CFDatetimeCoder or dict-like, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, use :py:class:`coders.CFDatetimeCoder` or leave them encoded as numbers. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of ``decode_times``; if ``decode_times`` is a :py:class:`coders.CFDatetimeCoder` instance, this takes the form of a :py:class:`coders.CFTimedeltaCoder` instance with a matching ``time_unit``. This keyword may not be supported by all the backends. use_cftime: bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. This keyword may not be supported by all the backends. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. This keyword may not be supported by all the backends. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. Only existing variables can be set as coordinates. Missing variables will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. inline_array: bool, default: False How to include the array in the dask task graph. By default(``inline_array=False``) the array is included in a task by itself, and each chunk refers to that task by its key. With ``inline_array=True``, Dask will instead inline the array directly in the values of the task graph. See :py:func:`dask.array.from_array`. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the netCDF4 group in the given file to open given as a str,supported by "netcdf4", "h5netcdf", "zarr". - 'lock': resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by "netcdf4", "h5netcdf", "scipy". See engine open function for kwargs accepted by each specific engine. Notes ----- This is designed to be fully compatible with `DataArray.to_netcdf`. Saving using `DataArray.to_netcdf` and then loading with this function will produce an identical result. All parameters are passed directly to `xarray.open_dataset`. See that documentation for further details. See also -------- open_dataset """ dataset = open_dataset( filename_or_obj, decode_cf=decode_cf, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, engine=engine, chunks=chunks, cache=cache, drop_variables=drop_variables, create_default_indexes=create_default_indexes, inline_array=inline_array, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, backend_kwargs=backend_kwargs, use_cftime=use_cftime, decode_timedelta=decode_timedelta, **kwargs, ) if len(dataset.data_vars) != 1: if len(dataset.data_vars) == 0: msg = "Given file dataset contains no data variables." else: msg = ( "Given file dataset contains more than one data " "variable. Please read with xarray.open_dataset and " "then select the variable you want." ) raise ValueError(msg) else: (data_array,) = dataset.data_vars.values() data_array.set_close(dataset._close) # Reset names if they were changed during saving # to ensure that we can 'roundtrip' perfectly if DATAARRAY_NAME in dataset.attrs: data_array.name = dataset.attrs[DATAARRAY_NAME] del dataset.attrs[DATAARRAY_NAME] if data_array.name == DATAARRAY_VARIABLE: data_array.name = None return data_array def open_datatree( filename_or_obj: T_PathFileOrDataStore, *, engine: T_Engine = None, chunks: T_Chunks = None, cache: bool | None = None, decode_cf: bool | None = None, mask_and_scale: bool | Mapping[str, bool] | None = None, decode_times: ( bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None ) = None, decode_timedelta: ( bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None ) = None, use_cftime: bool | Mapping[str, bool] | None = None, concat_characters: bool | Mapping[str, bool] | None = None, decode_coords: Literal["coordinates", "all"] | bool | None = None, drop_variables: str | Iterable[str] | None = None, create_default_indexes: bool = True, inline_array: bool = False, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, backend_kwargs: dict[str, Any] | None = None, **kwargs, ) -> DataTree: """ Open and decode a DataTree from a file or file-like object, creating one tree node for each group in the file. Parameters ---------- filename_or_obj : str, Path, file-like, bytes or DataStore Strings and Path objects are interpreted as a path to a netCDF file or Zarr store. Bytes and memoryview objects are interpreted as file contents. engine : {"netcdf4", "h5netcdf", "zarr", None}, \ installed backend or xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, by default preferring "h5netcdf" over "netcdf4" (customizable via ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. chunks : int, dict, 'auto' or None, default: None If provided, used to load the data into dask arrays. - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays, though results may vary. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using the engine's preferred chunk size, generally identical to the format's chunk size. If not available, a single chunk for all arrays. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool or dict-like, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_times : bool, CFDatetimeCoder or dict-like, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, use :py:class:`coders.CFDatetimeCoder` or leave them encoded as numbers. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_timedelta : bool or dict-like, optional If True, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of ``decode_times``; if ``decode_times`` is a :py:class:`coders.CFDatetimeCoder` instance, this takes the form of a :py:class:`coders.CFTimedeltaCoder` instance with a matching ``time_unit``. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. use_cftime: bool or dict-like, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. concat_characters : bool or dict-like, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. Only existing variables can be set as coordinates. Missing variables will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. inline_array: bool, default: False How to include the array in the dask task graph. By default(``inline_array=False``) the array is included in a task by itself, and each chunk refers to that task by its key. With ``inline_array=True``, Dask will instead inline the array directly in the values of the task graph. See :py:func:`dask.array.from_array`. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the group in the given file to open as the root group as a str. - 'lock': resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by "netcdf4", "h5netcdf", "scipy". See engine open function for kwargs accepted by each specific engine. Returns ------- tree : DataTree The newly created datatree. Notes ----- ``open_datatree`` opens the file with read-only access. When you modify values of a DataTree, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- xarray.open_groups xarray.open_dataset """ if cache is None: cache = chunks is None if backend_kwargs is not None: kwargs.update(backend_kwargs) if engine is None: engine = plugins.guess_engine(filename_or_obj, must_support_groups=True) if from_array_kwargs is None: from_array_kwargs = {} backend = plugins.get_backend(engine) decoders = _resolve_decoders_kwargs( decode_cf, open_backend_dataset_parameters=backend.open_dataset_parameters, mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, concat_characters=concat_characters, use_cftime=use_cftime, decode_coords=decode_coords, ) overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None) backend_tree = backend.open_datatree( filename_or_obj, drop_variables=drop_variables, **decoders, **kwargs, ) tree = _datatree_from_backend_datatree( backend_tree, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, drop_variables=drop_variables, create_default_indexes=create_default_indexes, **decoders, **kwargs, ) return tree def open_groups( filename_or_obj: T_PathFileOrDataStore, *, engine: T_Engine = None, chunks: T_Chunks = None, cache: bool | None = None, decode_cf: bool | None = None, mask_and_scale: bool | Mapping[str, bool] | None = None, decode_times: ( bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None ) = None, decode_timedelta: ( bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None ) = None, use_cftime: bool | Mapping[str, bool] | None = None, concat_characters: bool | Mapping[str, bool] | None = None, decode_coords: Literal["coordinates", "all"] | bool | None = None, drop_variables: str | Iterable[str] | None = None, create_default_indexes: bool = True, inline_array: bool = False, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, backend_kwargs: dict[str, Any] | None = None, **kwargs, ) -> dict[str, Dataset]: """ Open and decode a file or file-like object, creating a dictionary containing one xarray Dataset for each group in the file. Useful for an HDF file ("netcdf4" or "h5netcdf") containing many groups that are not alignable with their parents and cannot be opened directly with ``open_datatree``. It is encouraged to use this function to inspect your data, then make the necessary changes to make the structure coercible to a `DataTree` object before calling `DataTree.from_dict()` and proceeding with your analysis. Parameters ---------- filename_or_obj : str, Path, file-like, bytes, memoryview or DataStore Strings and Path objects are interpreted as a path to a netCDF file or Zarr store. Bytes and memoryview objects are interpreted as file contents. engine : {"netcdf4", "h5netcdf", "zarr", None}, \ installed backend or xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, by default preferring "h5netcdf" over "netcdf4" (customizable via ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. can also be used. chunks : int, dict, 'auto' or None, default: None If provided, used to load the data into dask arrays. - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays, though results may vary. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using the engine's preferred chunk size, generally identical to the format's chunk size. If not available, a single chunk for all arrays. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool or dict-like, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_times : bool, CFDatetimeCoder or dict-like, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, use :py:class:`coders.CFDatetimeCoder` or leave them encoded as numbers. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_timedelta : bool or dict-like, optional If True, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of ``decode_times``; if ``decode_times`` is a :py:class:`coders.CFDatetimeCoder` instance, this takes the form of a :py:class:`coders.CFTimedeltaCoder` instance with a matching ``time_unit``. This keyword may not be supported by all the backends. use_cftime: bool or dict-like, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. concat_characters : bool or dict-like, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. Only existing variables can be set as coordinates. Missing variables will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. inline_array: bool, default: False How to include the array in the dask task graph. By default(``inline_array=False``) the array is included in a task by itself, and each chunk refers to that task by its key. With ``inline_array=True``, Dask will instead inline the array directly in the values of the task graph. See :py:func:`dask.array.from_array`. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the group in the given file to open as the root group as a str. - 'lock': resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by "netcdf4", "h5netcdf", "scipy". See engine open function for kwargs accepted by each specific engine. Returns ------- groups : dict of str to xarray.Dataset The groups as Dataset objects Notes ----- ``open_groups`` opens the file with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- xarray.open_datatree xarray.open_dataset xarray.DataTree.from_dict """ if cache is None: cache = chunks is None if backend_kwargs is not None: kwargs.update(backend_kwargs) if engine is None: engine = plugins.guess_engine(filename_or_obj, must_support_groups=True) if from_array_kwargs is None: from_array_kwargs = {} backend = plugins.get_backend(engine) decoders = _resolve_decoders_kwargs( decode_cf, open_backend_dataset_parameters=(), mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, concat_characters=concat_characters, use_cftime=use_cftime, decode_coords=decode_coords, ) overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None) backend_groups = backend.open_groups_as_dict( filename_or_obj, drop_variables=drop_variables, **decoders, **kwargs, ) groups = { name: _dataset_from_backend_dataset( backend_ds, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, drop_variables=drop_variables, create_default_indexes=create_default_indexes, **decoders, **kwargs, ) for name, backend_ds in backend_groups.items() } return groups _FLike = TypeVar("_FLike", bound=Union[str, ReadBuffer]) def _remove_path( paths: NestedSequence[_FLike], paths_to_remove: set[_FLike] ) -> NestedSequence[_FLike]: # Initialize an empty list to store the result result: list[Union[_FLike, NestedSequence[_FLike]]] = [] for item in paths: if isinstance(item, list): # If the current item is a list, recursively call remove_elements on it nested_result = _remove_path(item, paths_to_remove) if nested_result: # Only add non-empty lists to avoid adding empty lists result.append(nested_result) elif item not in paths_to_remove: # Add the item to the result if it is not in the set of elements to remove result.append(item) return result def open_mfdataset( paths: ( str | os.PathLike | ReadBuffer | NestedSequence[str | os.PathLike | ReadBuffer] ), chunks: T_Chunks = None, concat_dim: ( str | DataArray | Index | Sequence[str] | Sequence[DataArray] | Sequence[Index] | None ) = None, compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT, preprocess: Callable[[Dataset], Dataset] | None = None, engine: T_Engine = None, data_vars: ( Literal["all", "minimal", "different"] | None | list[str] | CombineKwargDefault ) = _DATA_VARS_DEFAULT, coords=_COORDS_DEFAULT, combine: Literal["by_coords", "nested"] = "by_coords", parallel: bool = False, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, attrs_file: str | os.PathLike | None = None, combine_attrs: CombineAttrsOptions = "override", errors: ErrorOptionsWithWarn = "raise", **kwargs, ) -> Dataset: """Open multiple files as a single dataset. If combine='by_coords' then the function ``combine_by_coords`` is used to combine the datasets into one before returning the result, and if combine='nested' then ``combine_nested`` is used. The filepaths must be structured according to which combining function is used, the details of which are given in the documentation for ``combine_by_coords`` and ``combine_nested``. By default ``combine='by_coords'`` will be used. Requires dask to be installed. See documentation for details on dask [1]_. Global attributes from the ``attrs_file`` are used for the combined dataset. Parameters ---------- paths : str or nested sequence of paths Either a string glob in the form ``"path/to/my/files/*.nc"`` or an explicit list of files to open. Paths can be given as strings or as pathlib Paths. If concatenation along more than one dimension is desired, then ``paths`` must be a nested list-of-lists (see ``combine_nested`` for details). (A string glob will be expanded to a 1-dimensional list.) chunks : int, dict, 'auto' or None, optional Dictionary with keys given by dimension names and values given by chunk sizes. In general, these should divide the dimensions of each dataset. If int, chunk each dimension by ``chunks``. By default, chunks will be chosen to match the chunks on disk. This may impact performance: please see the full documentation for more details [2]_. This argument is evaluated on a per-file basis, so chunk sizes that span multiple files will be ignored. concat_dim : str, DataArray, Index or a Sequence of these or None, optional Dimensions to concatenate files along. You only need to provide this argument if ``combine='nested'``, and if any of the dimensions along which you want to concatenate is not a dimension in the original datasets, e.g., if you want to stack a collection of 2D arrays along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation along a particular dimension. Default is None, which for a 1D list of filepaths is equivalent to opening the files separately and then merging them with ``xarray.merge``. combine : {"by_coords", "nested"}, optional Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to combine all the data. Default is to use ``xarray.combine_by_coords``. compat : {"identical", "equals", "broadcast_equals", \ "no_conflicts", "override"}, default: "no_conflicts" String indicating how to compare variables of the same name for potential conflicts when merging: * "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. * "equals": all values and dimensions must be the same. * "identical": all values, dimensions and attributes must be the same. * "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. * "override": skip comparing and pick variable from first dataset preprocess : callable, optional If provided, call this function on each dataset prior to concatenation. You can find the file-name from which each dataset was loaded in ``ds.encoding["source"]``. engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\ , installed backend \ or subclass of xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, by default preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. data_vars : {"minimal", "different", "all"} or list of str, default: "all" These data variables will be concatenated together: * "minimal": Only data variables in which the dimension already appears are included. * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * "all": All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in addition to the "minimal" data variables. coords : {"minimal", "different", "all"} or list of str, optional These coordinate variables will be concatenated together: * "minimal": Only coordinates in which the dimension already appears are included. * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, in addition the "minimal" coordinates. parallel : bool, default: False If True, the open and preprocess steps of this function will be performed in parallel using ``dask.delayed``. Default is False. join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer" String indicating how to combine differing indexes (excluding concat_dim) in objects - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. attrs_file : str or path-like, optional Path of the file used to read global attributes from. By default global attributes are read from the first file provided, with wildcard matches sorted by filename. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. errors : {"raise", "warn", "ignore"}, default: "raise" String indicating how to handle errors in opening dataset. - "raise": invalid dataset will raise an exception. - "warn": a warning will be issued for each invalid dataset. - "ignore": invalid dataset will be ignored. **kwargs : optional Additional arguments passed on to :py:func:`xarray.open_dataset`. For an overview of some of the possible options, see the documentation of :py:func:`xarray.open_dataset` Returns ------- xarray.Dataset Notes ----- ``open_mfdataset`` opens files with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- combine_by_coords combine_nested open_dataset Examples -------- A user might want to pass additional arguments into ``preprocess`` when applying some operation to many individual files that are being opened. One route to do this is through the use of ``functools.partial``. >>> from functools import partial >>> def _preprocess(x, lon_bnds, lat_bnds): ... return x.sel(lon=slice(*lon_bnds), lat=slice(*lat_bnds)) ... >>> lon_bnds, lat_bnds = (-110, -105), (40, 45) >>> partial_func = partial(_preprocess, lon_bnds=lon_bnds, lat_bnds=lat_bnds) >>> ds = xr.open_mfdataset( ... "file_*.nc", concat_dim="time", preprocess=partial_func ... ) # doctest: +SKIP It is also possible to use any argument to ``open_dataset`` together with ``open_mfdataset``, such as for example ``drop_variables``: >>> ds = xr.open_mfdataset( ... "file.nc", drop_variables=["varname_1", "varname_2"] # any list of vars ... ) # doctest: +SKIP References ---------- .. [1] https://docs.xarray.dev/en/stable/dask.html .. [2] https://docs.xarray.dev/en/stable/dask.html#chunking-and-performance """ paths = _find_absolute_paths(paths, engine=engine, **kwargs) if not paths: raise OSError("no files to open") paths1d: list[str | ReadBuffer] if combine == "nested": if isinstance(concat_dim, str | DataArray) or concat_dim is None: concat_dim = [concat_dim] # type: ignore[assignment] # This creates a flat list which is easier to iterate over, whilst # encoding the originally-supplied structure as "ids". # The "ids" are not used at all if combine='by_coords`. combined_ids_paths = _infer_concat_order_from_positions(paths) ids, paths1d = ( list(combined_ids_paths.keys()), list(combined_ids_paths.values()), ) elif concat_dim is not None: raise ValueError( "When combine='by_coords', passing a value for `concat_dim` has no " "effect. To manually combine along a specific dimension you should " "instead specify combine='nested' along with a value for `concat_dim`.", ) else: paths1d = paths # type: ignore[assignment] open_kwargs = dict(engine=engine, chunks=chunks or {}, **kwargs) if parallel: import dask # wrap the open_dataset, getattr, and preprocess with delayed open_ = dask.delayed(open_dataset) getattr_ = dask.delayed(getattr) if preprocess is not None: preprocess = dask.delayed(preprocess) else: open_ = open_dataset getattr_ = getattr if errors not in ("raise", "warn", "ignore"): raise ValueError( f"'errors' must be 'raise', 'warn' or 'ignore', got '{errors}'" ) datasets = [] invalid_paths = set() for p in paths1d: try: ds = open_(p, **open_kwargs) datasets.append(ds) except Exception as e: if errors == "raise": raise elif errors == "warn": emit_user_level_warning(f"Could not open {p} due to {e}. Ignoring.") # remove invalid paths invalid_paths.add(p) if invalid_paths: paths = _remove_path(paths, invalid_paths) if combine == "nested": # Create new ids and paths based on removed items combined_ids_paths = _infer_concat_order_from_positions(paths) ids = list(combined_ids_paths.keys()) closers = [getattr_(ds, "_close") for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] if parallel: # calling compute here will return the datasets/file_objs lists, # the underlying datasets will still be stored as dask arrays datasets, closers = dask.compute(datasets, closers) # Combine all datasets, closing them in case of a ValueError try: if combine == "nested": # Combined nested list by successive concat and merge operations # along each dimension, using structure given by "ids" combined = _nested_combine( datasets, concat_dims=concat_dim, compat=compat, data_vars=data_vars, coords=coords, ids=ids, join=join, combine_attrs=combine_attrs, fill_value=dtypes.NA, ) elif combine == "by_coords": # Redo ordering from coordinates, ignoring how they were ordered # previously combined = combine_by_coords( datasets, compat=compat, data_vars=data_vars, coords=coords, join=join, combine_attrs=combine_attrs, ) else: raise ValueError( f"{combine} is an invalid option for the keyword argument ``combine``" ) except ValueError: for ds in datasets: ds.close() raise combined.set_close(partial(_multi_file_closer, closers)) # read global attributes from the attrs_file or from the first dataset if attrs_file is not None: if isinstance(attrs_file, os.PathLike): attrs_file = cast(str, os.fspath(attrs_file)) combined.attrs = datasets[paths1d.index(attrs_file)].attrs return combined ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/chunks.py����������������������������������������������������������0000664�0000000�0000000�00000026223�15114646760�0020275�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import numpy as np from xarray.core.datatree import Variable def align_nd_chunks( nd_v_chunks: tuple[tuple[int, ...], ...], nd_backend_chunks: tuple[tuple[int, ...], ...], ) -> tuple[tuple[int, ...], ...]: if len(nd_backend_chunks) != len(nd_v_chunks): raise ValueError( "The number of dimensions on the backend and the variable must be the same." ) nd_aligned_chunks: list[tuple[int, ...]] = [] for backend_chunks, v_chunks in zip(nd_backend_chunks, nd_v_chunks, strict=True): # Validate that they have the same number of elements if sum(backend_chunks) != sum(v_chunks): raise ValueError( "The number of elements in the backend does not " "match the number of elements in the variable. " "This inconsistency should never occur at this stage." ) # Validate if the backend_chunks satisfy the condition that all the values # excluding the borders are equal if len(set(backend_chunks[1:-1])) > 1: raise ValueError( f"This function currently supports aligning chunks " f"only when backend chunks are of uniform size, excluding borders. " f"If you encounter this error, please report itโ€”this scenario should never occur " f"unless there is an internal misuse. " f"Backend chunks: {backend_chunks}" ) # The algorithm assumes that there are always two borders on the # Backend and the Array if not, the result is going to be the same # as the input, and there is nothing to optimize if len(backend_chunks) == 1: nd_aligned_chunks.append(backend_chunks) continue if len(v_chunks) == 1: nd_aligned_chunks.append(v_chunks) continue # Size of the chunk on the backend fixed_chunk = max(backend_chunks) # The ideal size of the chunks is the maximum of the two; this would avoid # that we use more memory than expected max_chunk = max(fixed_chunk, *v_chunks) # The algorithm assumes that the chunks on this array are aligned except the last one # because it can be considered a partial one aligned_chunks: list[int] = [] # For simplicity of the algorithm, let's transform the Array chunks in such a way that # we remove the partial chunks. To achieve this, we add artificial data to the borders t_v_chunks = list(v_chunks) t_v_chunks[0] += fixed_chunk - backend_chunks[0] t_v_chunks[-1] += fixed_chunk - backend_chunks[-1] # The unfilled_size is the amount of space that has not been filled on the last # processed chunk; this is equivalent to the amount of data that would need to be # added to a partial Zarr chunk to fill it up to the fixed_chunk size unfilled_size = 0 for v_chunk in t_v_chunks: # Ideally, we should try to preserve the original Dask chunks, but this is only # possible if the last processed chunk was aligned (unfilled_size == 0) ideal_chunk = v_chunk if unfilled_size: # If that scenario is not possible, the best option is to merge the chunks ideal_chunk = v_chunk + aligned_chunks[-1] while ideal_chunk: if not unfilled_size: # If the previous chunk is filled, let's add a new chunk # of size 0 that will be used on the merging step to simplify the algorithm aligned_chunks.append(0) if ideal_chunk > max_chunk: # If the ideal_chunk is bigger than the max_chunk, # we need to increase the last chunk as much as possible # but keeping it aligned, and then add a new chunk max_increase = max_chunk - aligned_chunks[-1] max_increase = ( max_increase - (max_increase - unfilled_size) % fixed_chunk ) aligned_chunks[-1] += max_increase else: # Perfect scenario where the chunks can be merged without any split. aligned_chunks[-1] = ideal_chunk ideal_chunk -= aligned_chunks[-1] unfilled_size = ( fixed_chunk - aligned_chunks[-1] % fixed_chunk ) % fixed_chunk # Now we have to remove the artificial data added to the borders for order in [-1, 1]: border_size = fixed_chunk - backend_chunks[::order][0] aligned_chunks = aligned_chunks[::order] aligned_chunks[0] -= border_size t_v_chunks = t_v_chunks[::order] t_v_chunks[0] -= border_size if ( len(aligned_chunks) >= 2 and aligned_chunks[0] + aligned_chunks[1] <= max_chunk and aligned_chunks[0] != t_v_chunks[0] ): # The artificial data added to the border can introduce inefficient chunks # on the borders, for that reason, we will check if we can merge them or not # Example: # backend_chunks = [6, 6, 1] # v_chunks = [6, 7] # t_v_chunks = [6, 12] # The ideal output should preserve the same v_chunks, but the previous loop # is going to produce aligned_chunks = [6, 6, 6] # And after removing the artificial data, we will end up with aligned_chunks = [6, 6, 1] # which is not ideal and can be merged into a single chunk aligned_chunks[1] += aligned_chunks[0] aligned_chunks = aligned_chunks[1:] t_v_chunks = t_v_chunks[::order] aligned_chunks = aligned_chunks[::order] nd_aligned_chunks.append(tuple(aligned_chunks)) return tuple(nd_aligned_chunks) def build_grid_chunks( size: int, chunk_size: int, region: slice | None = None, ) -> tuple[int, ...]: if region is None: region = slice(0, size) region_start = region.start or 0 # Generate the zarr chunks inside the region of this dim chunks_on_region = [chunk_size - (region_start % chunk_size)] if chunks_on_region[0] >= size: # This is useful for the scenarios where the chunk_size are bigger # than the variable chunks, which can happens when the user specifies # the enc_chunks manually. return (size,) chunks_on_region.extend([chunk_size] * ((size - chunks_on_region[0]) // chunk_size)) if (size - chunks_on_region[0]) % chunk_size != 0: chunks_on_region.append((size - chunks_on_region[0]) % chunk_size) return tuple(chunks_on_region) def grid_rechunk( v: Variable, enc_chunks: tuple[int, ...], region: tuple[slice, ...], ) -> Variable: nd_v_chunks = v.chunks if not nd_v_chunks: return v nd_grid_chunks = tuple( build_grid_chunks( v_size, region=interval, chunk_size=chunk_size, ) for v_size, chunk_size, interval in zip( v.shape, enc_chunks, region, strict=True ) ) nd_aligned_chunks = align_nd_chunks( nd_v_chunks=nd_v_chunks, nd_backend_chunks=nd_grid_chunks, ) v = v.chunk(dict(zip(v.dims, nd_aligned_chunks, strict=True))) return v def validate_grid_chunks_alignment( nd_v_chunks: tuple[tuple[int, ...], ...] | None, enc_chunks: tuple[int, ...], backend_shape: tuple[int, ...], region: tuple[slice, ...], allow_partial_chunks: bool, name: str, ): if nd_v_chunks is None: return base_error = ( "Specified Zarr chunks encoding['chunks']={enc_chunks!r} for " "variable named {name!r} would overlap multiple Dask chunks. " "Please check the Dask chunks at position {v_chunk_pos} and " "{v_chunk_pos_next}, on axis {axis}, they are overlapped " "on the same Zarr chunk in the region {region}. " "Writing this array in parallel with Dask could lead to corrupted data. " "To resolve this issue, consider one of the following options: " "- Rechunk the array using `chunk()`. " "- Modify or delete `encoding['chunks']`. " "- Set `safe_chunks=False`. " "- Enable automatic chunks alignment with `align_chunks=True`." ) for axis, chunk_size, v_chunks, interval, size in zip( range(len(enc_chunks)), enc_chunks, nd_v_chunks, region, backend_shape, strict=True, ): for i, chunk in enumerate(v_chunks[1:-1]): if chunk % chunk_size: raise ValueError( base_error.format( v_chunk_pos=i + 1, v_chunk_pos_next=i + 2, v_chunk_size=chunk, axis=axis, name=name, chunk_size=chunk_size, region=interval, enc_chunks=enc_chunks, ) ) interval_start = interval.start or 0 if len(v_chunks) > 1: # The first border size is the amount of data that needs to be updated on the # first chunk taking into account the region slice. first_border_size = chunk_size if allow_partial_chunks: first_border_size = chunk_size - interval_start % chunk_size if (v_chunks[0] - first_border_size) % chunk_size: raise ValueError( base_error.format( v_chunk_pos=0, v_chunk_pos_next=0, v_chunk_size=v_chunks[0], axis=axis, name=name, chunk_size=chunk_size, region=interval, enc_chunks=enc_chunks, ) ) if not allow_partial_chunks: region_stop = interval.stop or size error_on_last_chunk = base_error.format( v_chunk_pos=len(v_chunks) - 1, v_chunk_pos_next=len(v_chunks) - 1, v_chunk_size=v_chunks[-1], axis=axis, name=name, chunk_size=chunk_size, region=interval, enc_chunks=enc_chunks, ) if interval_start % chunk_size: # The last chunk which can also be the only one is a partial chunk # if it is not aligned at the beginning raise ValueError(error_on_last_chunk) if np.ceil(region_stop / chunk_size) == np.ceil(size / chunk_size): # If the region is covering the last chunk then check # if the reminder with the default chunk size # is equal to the size of the last chunk if v_chunks[-1] % chunk_size != size % chunk_size: raise ValueError(error_on_last_chunk) elif v_chunks[-1] % chunk_size: raise ValueError(error_on_last_chunk) �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/common.py����������������������������������������������������������0000664�0000000�0000000�00000066373�15114646760�0020304�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import logging import os import time import traceback from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from dataclasses import dataclass from glob import glob from typing import ( TYPE_CHECKING, Any, ClassVar, Self, TypeVar, Union, overload, ) import numpy as np import pandas as pd from xarray.coding import strings, variables from xarray.coding.variables import SerializationWarning from xarray.conventions import cf_encoder from xarray.core import indexing from xarray.core.datatree import DataTree, Variable from xarray.core.types import ReadBuffer from xarray.core.utils import ( FrozenDict, NdimSizeLenMixin, attempt_import, emit_user_level_warning, is_remote_uri, ) from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array from xarray.namedarray.utils import is_duck_dask_array if TYPE_CHECKING: from xarray.core.dataset import Dataset from xarray.core.types import NestedSequence T_Name = Union[Hashable, None] # Create a logger object, but don't add any handlers. Leave that to user code. logger = logging.getLogger(__name__) NONE_VAR_NAME = "__values__" T = TypeVar("T") @overload def _normalize_path(path: os.PathLike) -> str: ... @overload def _normalize_path(path: str) -> str: ... @overload def _normalize_path(path: T) -> T: ... def _normalize_path(path: os.PathLike | str | T) -> str | T: """ Normalize pathlikes to string. Parameters ---------- path : Path to file. Examples -------- >>> from pathlib import Path >>> directory = Path(xr.backends.common.__file__).parent >>> paths_path = Path(directory).joinpath("comm*n.py") >>> paths_str = xr.backends.common._normalize_path(paths_path) >>> print([type(p) for p in (paths_str,)]) [] """ if isinstance(path, os.PathLike): path = os.fspath(path) if isinstance(path, str) and not is_remote_uri(path): path = os.path.abspath(os.path.expanduser(path)) return path # type: ignore[return-value] @overload def _find_absolute_paths( paths: str | os.PathLike | Sequence[str | os.PathLike], **kwargs, ) -> list[str]: ... @overload def _find_absolute_paths( paths: ReadBuffer | Sequence[ReadBuffer], **kwargs, ) -> list[ReadBuffer]: ... @overload def _find_absolute_paths( paths: NestedSequence[str | os.PathLike], **kwargs ) -> NestedSequence[str]: ... @overload def _find_absolute_paths( paths: NestedSequence[ReadBuffer], **kwargs ) -> NestedSequence[ReadBuffer]: ... @overload def _find_absolute_paths( paths: str | os.PathLike | ReadBuffer | NestedSequence[str | os.PathLike | ReadBuffer], **kwargs, ) -> NestedSequence[str | ReadBuffer]: ... def _find_absolute_paths( paths: str | os.PathLike | ReadBuffer | NestedSequence[str | os.PathLike | ReadBuffer], **kwargs, ) -> NestedSequence[str | ReadBuffer]: """ Find absolute paths from the pattern. Parameters ---------- paths : Path(s) to file(s). Can include wildcards like * . **kwargs : Extra kwargs. Mainly for fsspec. Examples -------- >>> from pathlib import Path >>> directory = Path(xr.backends.common.__file__).parent >>> paths = str(Path(directory).joinpath("comm*n.py")) # Find common with wildcard >>> paths = xr.backends.common._find_absolute_paths(paths) >>> [Path(p).name for p in paths] ['common.py'] """ if isinstance(paths, str): if is_remote_uri(paths) and kwargs.get("engine") == "zarr": if TYPE_CHECKING: import fsspec else: fsspec = attempt_import("fsspec") fs, _, _ = fsspec.core.get_fs_token_paths( paths, mode="rb", storage_options=kwargs.get("backend_kwargs", {}).get( "storage_options", {} ), expand=False, ) tmp_paths = fs.glob(fs._strip_protocol(paths)) # finds directories return [fs.get_mapper(path) for path in tmp_paths] elif is_remote_uri(paths): raise ValueError( "cannot do wild-card matching for paths that are remote URLs " f"unless engine='zarr' is specified. Got paths: {paths}. " "Instead, supply paths as an explicit list of strings." ) else: return sorted(glob(_normalize_path(paths))) elif isinstance(paths, os.PathLike): return [_normalize_path(paths)] elif isinstance(paths, ReadBuffer): return [paths] def _normalize_path_list( lpaths: NestedSequence[str | os.PathLike | ReadBuffer], ) -> NestedSequence[str | ReadBuffer]: paths = [] for p in lpaths: if isinstance(p, str | os.PathLike): paths.append(_normalize_path(p)) elif isinstance(p, list): paths.append(_normalize_path_list(p)) # type: ignore[arg-type] else: paths.append(p) # type: ignore[arg-type] return paths return _normalize_path_list(paths) @dataclass class BytesIOProxy: """Proxy object for a write that a memoryview.""" getvalue: Callable[[], memoryview] | None = None def getbuffer(self) -> memoryview: """Get the value of this write as bytes or memory.""" if self.getvalue is None: raise ValueError("must set getvalue before fetching value") return self.getvalue() def _open_remote_file(file, mode, storage_options=None): import fsspec fs, _, paths = fsspec.get_fs_token_paths( file, mode=mode, storage_options=storage_options ) return fs.open(paths[0], mode=mode) def _encode_variable_name(name): if name is None: name = NONE_VAR_NAME return name def _decode_variable_name(name): if name == NONE_VAR_NAME: name = None return name def _iter_nc_groups(root, parent="/"): from xarray.core.treenode import NodePath parent = NodePath(parent) yield str(parent) for path, group in root.groups.items(): gpath = parent / path yield from _iter_nc_groups(group, parent=gpath) def find_root_and_group(ds): """Find the root and group name of a netCDF4/h5netcdf dataset.""" hierarchy = () while ds.parent is not None: hierarchy = (ds.name.split("/")[-1],) + hierarchy ds = ds.parent group = "/" + "/".join(hierarchy) return ds, group def collect_ancestor_dimensions(group) -> dict[str, int]: """Returns dimensions defined in parent groups. If dimensions are defined in multiple ancestors, use the size of the closest ancestor. """ dims = {} while (group := group.parent) is not None: for k, v in group.dimensions.items(): if k not in dims: dims[k] = len(v) return dims def datatree_from_dict_with_io_cleanup(groups_dict: Mapping[str, Dataset]) -> DataTree: """DataTree.from_dict with file clean-up.""" try: tree = DataTree.from_dict(groups_dict) except Exception: for ds in groups_dict.values(): ds.close() raise for path, ds in groups_dict.items(): tree[path].set_close(ds._close) return tree def robust_getitem(array, key, catch=Exception, max_retries=6, initial_delay=500): """ Robustly index an array, using retry logic with exponential backoff if any of the errors ``catch`` are raised. The initial_delay is measured in ms. With the default settings, the maximum delay will be in the range of 32-64 seconds. """ assert max_retries >= 0 for n in range(max_retries + 1): try: return array[key] except catch: if n == max_retries: raise base_delay = initial_delay * 2**n next_delay = base_delay + np.random.randint(base_delay) msg = ( f"getitem failed, waiting {next_delay} ms before trying again " f"({max_retries - n} tries remaining). Full traceback: {traceback.format_exc()}" ) logger.debug(msg) time.sleep(1e-3 * next_delay) class BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed): __slots__ = () async def async_getitem(self, key: indexing.ExplicitIndexer) -> np.typing.ArrayLike: raise NotImplementedError("Backend does not support asynchronous loading") def get_duck_array(self, dtype: np.typing.DTypeLike | None = None): key = indexing.BasicIndexer((slice(None),) * self.ndim) return self[key] # type: ignore[index] async def async_get_duck_array(self, dtype: np.typing.DTypeLike | None = None): key = indexing.BasicIndexer((slice(None),) * self.ndim) return await self.async_getitem(key) class AbstractDataStore: __slots__ = () def get_child_store(self, group: str) -> Self: # pragma: no cover """Get a store corresponding to the indicated child group.""" raise NotImplementedError() def get_dimensions(self): # pragma: no cover raise NotImplementedError() def get_parent_dimensions(self): # pragma: no cover return {} def get_attrs(self): # pragma: no cover raise NotImplementedError() def get_variables(self): # pragma: no cover raise NotImplementedError() def get_encoding(self): return {} def load(self): """ This loads the variables and attributes simultaneously. A centralized loading function makes it easier to create data stores that do automatic encoding/decoding. For example:: class SuffixAppendingDataStore(AbstractDataStore): def load(self): variables, attributes = AbstractDataStore.load(self) variables = {"%s_suffix" % k: v for k, v in variables.items()} attributes = {"%s_suffix" % k: v for k, v in attributes.items()} return variables, attributes This function will be called anytime variables or attributes are requested, so care should be taken to make sure its fast. """ variables = FrozenDict( (_decode_variable_name(k), v) for k, v in self.get_variables().items() ) attributes = FrozenDict(self.get_attrs()) return variables, attributes def close(self): pass def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.close() T_PathFileOrDataStore = ( str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore ) class ArrayWriter: __slots__ = ("lock", "regions", "sources", "targets") def __init__(self, lock=None): self.sources = [] self.targets = [] self.regions = [] self.lock = lock def add(self, source, target, region=None): if is_chunked_array(source): self.sources.append(source) self.targets.append(target) self.regions.append(region) elif region: target[region] = source else: target[...] = source def sync(self, compute=True, chunkmanager_store_kwargs=None): if self.sources: chunkmanager = get_chunked_array_type(*self.sources) # TODO: consider wrapping targets with dask.delayed, if this makes # for any discernible difference in performance, e.g., # targets = [dask.delayed(t) for t in self.targets] if chunkmanager_store_kwargs is None: chunkmanager_store_kwargs = {} delayed_store = chunkmanager.store( self.sources, self.targets, lock=self.lock, compute=compute, flush=True, regions=self.regions, **chunkmanager_store_kwargs, ) self.sources = [] self.targets = [] self.regions = [] return delayed_store class AbstractWritableDataStore(AbstractDataStore): __slots__ = () def encode(self, variables, attributes): """ Encode the variables and attributes in this store Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs Returns ------- variables : dict-like attributes : dict-like """ encoded_variables = {} for k, v in variables.items(): try: encoded_variables[k] = self.encode_variable(v) except Exception as e: e.add_note(f"Raised while encoding variable {k!r} with value {v!r}") raise encoded_attributes = {} for k, v in attributes.items(): try: encoded_attributes[k] = self.encode_attribute(v) except Exception as e: e.add_note(f"Raised while encoding attribute {k!r} with value {v!r}") raise return encoded_variables, encoded_attributes def encode_variable(self, v, name=None): """encode one variable""" return v def encode_attribute(self, a): """encode one attribute""" return a def prepare_variable(self, name, variable, check_encoding, unlimited_dims): raise NotImplementedError() def set_dimension(self, dim, length, is_unlimited): # pragma: no cover raise NotImplementedError() def set_attribute(self, k, v): # pragma: no cover raise NotImplementedError() def set_variable(self, k, v): # pragma: no cover raise NotImplementedError() def store_dataset(self, dataset): """ in stores, variables are all variables AND coordinates in xarray.Dataset variables are variables NOT coordinates, so here we pass the whole dataset in instead of doing dataset.variables """ self.store(dataset, dataset.attrs) def store( self, variables, attributes, check_encoding_set=frozenset(), writer=None, unlimited_dims=None, ): """ Top level method for putting data on this store, this method: - encodes variables/attributes - sets dimensions - sets variables Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer : ArrayWriter unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ if writer is None: writer = ArrayWriter() variables, attributes = self.encode(variables, attributes) self.set_attributes(attributes) self.set_dimensions(variables, unlimited_dims=unlimited_dims) self.set_variables( variables, check_encoding_set, writer, unlimited_dims=unlimited_dims ) def set_attributes(self, attributes): """ This provides a centralized method to set the dataset attributes on the data store. Parameters ---------- attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs """ for k, v in attributes.items(): self.set_attribute(k, v) def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None): """ This provides a centralized method to set the variables on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer : ArrayWriter unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ for vn, v in variables.items(): name = _encode_variable_name(vn) check = vn in check_encoding_set target, source = self.prepare_variable( name, v, check, unlimited_dims=unlimited_dims ) writer.add(source, target) def set_dimensions(self, variables, unlimited_dims=None): """ This provides a centralized method to set the dimensions on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ if unlimited_dims is None: unlimited_dims = set() parent_dims = self.get_parent_dimensions() existing_dims = self.get_dimensions() dims = {} for v in unlimited_dims: # put unlimited_dims first dims[v] = None for v in variables.values(): dims |= v.sizes for dim, length in dims.items(): if dim in existing_dims and length != existing_dims[dim]: raise ValueError( "Unable to update size for existing dimension" f"{dim!r} ({length} != {existing_dims[dim]})" ) elif dim not in existing_dims and length != parent_dims.get(dim): is_unlimited = dim in unlimited_dims self.set_dimension(dim, length, is_unlimited) def sync(self): """Write all buffered data to disk.""" raise NotImplementedError() def _infer_dtype(array, name=None): """Given an object array with no missing values, infer its dtype from all elements.""" if array.dtype.kind != "O": raise TypeError("infer_type must be called on a dtype=object array") if array.size == 0: return np.dtype(float) native_dtypes = set(np.vectorize(type, otypes=[object])(array.ravel())) if len(native_dtypes) > 1 and native_dtypes != {bytes, str}: native_dtype_names = ", ".join(x.__name__ for x in native_dtypes) raise ValueError( f"unable to infer dtype on variable {name!r}; object array " f"contains mixed native types: {native_dtype_names}" ) element = array[(0,) * array.ndim] # We use the base types to avoid subclasses of bytes and str (which might # not play nice with e.g. hdf5 datatypes), such as those from numpy if isinstance(element, bytes): return strings.create_vlen_dtype(bytes) elif isinstance(element, str): return strings.create_vlen_dtype(str) dtype = np.array(element).dtype if dtype.kind != "O": return dtype raise ValueError( f"unable to infer dtype on variable {name!r}; xarray " "cannot serialize arbitrary Python objects" ) def _copy_with_dtype(data, dtype: np.typing.DTypeLike | None): """Create a copy of an array with the given dtype. We use this instead of np.array() to ensure that custom object dtypes end up on the resulting array. """ result = np.empty(data.shape, dtype) result[...] = data return result def ensure_dtype_not_object(var: Variable, name: T_Name = None) -> Variable: if var.dtype.kind == "O": dims, data, attrs, encoding = variables.unpack_for_encoding(var) # leave vlen dtypes unchanged if strings.check_vlen_dtype(data.dtype) is not None: return var if is_duck_dask_array(data): emit_user_level_warning( f"variable {name} has data in the form of a dask array with " "dtype=object, which means it is being loaded into memory " "to determine a data type that can be safely stored on disk. " "To avoid this, coerce this variable to a fixed-size dtype " "with astype() before saving it.", category=SerializationWarning, ) data = data.compute() missing = pd.isnull(data) if missing.any(): # nb. this will fail for dask.array data non_missing_values = data[~missing] inferred_dtype = _infer_dtype(non_missing_values, name) # There is no safe bit-pattern for NA in typical binary string # formats, we so can't set a fill_value. Unfortunately, this means # we can't distinguish between missing values and empty strings. fill_value: bytes | str if strings.is_bytes_dtype(inferred_dtype): fill_value = b"" elif strings.is_unicode_dtype(inferred_dtype): fill_value = "" else: # insist on using float for numeric values if not np.issubdtype(inferred_dtype, np.floating): inferred_dtype = np.dtype(float) fill_value = inferred_dtype.type(np.nan) data = _copy_with_dtype(data, dtype=inferred_dtype) data[missing] = fill_value else: data = _copy_with_dtype(data, dtype=_infer_dtype(data, name)) assert data.dtype.kind != "O" or data.dtype.metadata var = Variable(dims, data, attrs, encoding, fastpath=True) return var class WritableCFDataStore(AbstractWritableDataStore): __slots__ = () def encode(self, variables, attributes): # All NetCDF files get CF encoded by default, without this attempting # to write times, for example, would fail. variables, attributes = cf_encoder(variables, attributes) variables = { k: ensure_dtype_not_object(v, name=k) for k, v in variables.items() } return super().encode(variables, attributes) class BackendEntrypoint: """ ``BackendEntrypoint`` is a class container and it is the main interface for the backend plugins, see :ref:`RST backend_entrypoint`. It shall implement: - ``open_dataset`` method: it shall implement reading from file, variables decoding and it returns an instance of :py:class:`~xarray.Dataset`. It shall take in input at least ``filename_or_obj`` argument and ``drop_variables`` keyword argument. For more details see :ref:`RST open_dataset`. - ``guess_can_open`` method: it shall return ``True`` if the backend is able to open ``filename_or_obj``, ``False`` otherwise. The implementation of this method is not mandatory. - ``open_datatree`` method: it shall implement reading from file, variables decoding and it returns an instance of :py:class:`~datatree.DataTree`. It shall take in input at least ``filename_or_obj`` argument. The implementation of this method is not mandatory. For more details see . Attributes ---------- open_dataset_parameters : tuple, default: None A list of ``open_dataset`` method parameters. The setting of this attribute is not mandatory. description : str, default: "" A short string describing the engine. The setting of this attribute is not mandatory. url : str, default: "" A string with the URL to the backend's documentation. The setting of this attribute is not mandatory. supports_groups : bool, default: False Whether the backend supports opening groups (via open_datatree and open_groups_as_dict) or not. """ open_dataset_parameters: ClassVar[tuple | None] = None description: ClassVar[str] = "" url: ClassVar[str] = "" supports_groups: ClassVar[bool] = False def __repr__(self) -> str: txt = f"<{type(self).__name__}>" if self.description: txt += f"\n {self.description}" if self.url: txt += f"\n Learn more at {self.url}" return txt def open_dataset( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, *, drop_variables: str | Iterable[str] | None = None, ) -> Dataset: """ Backend open_dataset method used by Xarray in :py:func:`~xarray.open_dataset`. """ raise NotImplementedError() def guess_can_open( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, ) -> bool: """ Backend open_dataset method used by Xarray in :py:func:`~xarray.open_dataset`. """ return False def open_datatree( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, *, drop_variables: str | Iterable[str] | None = None, ) -> DataTree: """ Backend open_datatree method used by Xarray in :py:func:`~xarray.open_datatree`. If implemented, set the class variable supports_groups to True. """ raise NotImplementedError() def open_groups_as_dict( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, *, drop_variables: str | Iterable[str] | None = None, ) -> dict[str, Dataset]: """ Opens a dictionary mapping from group names to Datasets. Called by :py:func:`~xarray.open_groups`. This function exists to provide a universal way to open all groups in a file, before applying any additional consistency checks or requirements necessary to create a `DataTree` object (typically done using :py:meth:`~xarray.DataTree.from_dict`). If implemented, set the class variable supports_groups to True. """ raise NotImplementedError() # mapping of engine name to (module name, BackendEntrypoint Class) BACKEND_ENTRYPOINTS: dict[str, tuple[str | None, type[BackendEntrypoint]]] = {} def _is_likely_dap_url(url: str) -> bool: """ Determines if a URL is likely an OPeNDAP (DAP) endpoint based on known protocols, server software path patterns, and file extensions. Parameters ---------- url : str Returns ------- True if the URL matches common DAP patterns, False otherwise. """ if not url: return False url_lower = url.lower() # For remote URIs, check for DAP server software path patterns if is_remote_uri(url_lower): dap_path_patterns = ( "/dodsc/", # THREDDS Data Server (TDS) DAP endpoint (case-insensitive) "/dods/", # GrADS Data Server (GDS) DAP endpoint "/opendap/", # Generic OPeNDAP/Hyrax server "/erddap/", # ERDDAP data server "/dap2/", # Explicit DAP2 version in path "/dap4/", # Explicit DAP4 version in path "/dap/", ) return any(pattern in url_lower for pattern in dap_path_patterns) return False ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/file_manager.py����������������������������������������������������0000664�0000000�0000000�00000041066�15114646760�0021415�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import atexit import threading import uuid import warnings from collections.abc import Callable, Hashable, Iterator, Mapping, MutableMapping from contextlib import AbstractContextManager, contextmanager from typing import Any, Generic, Literal, TypeVar, cast from xarray.backends.locks import acquire from xarray.backends.lru_cache import LRUCache from xarray.core import utils from xarray.core.options import OPTIONS from xarray.core.types import Closable, Lock # Global cache for storing open files. FILE_CACHE: LRUCache[Any, Closable] = LRUCache( maxsize=OPTIONS["file_cache_maxsize"], on_evict=lambda k, v: v.close() ) assert FILE_CACHE.maxsize, "file cache must be at least size one" T_File = TypeVar("T_File", bound=Closable) REF_COUNTS: dict[Any, int] = {} _OMIT_MODE = utils.ReprObject("") class FileManager(Generic[T_File]): """Manager for acquiring and closing a file object. Use FileManager subclasses (CachingFileManager in particular) on backend storage classes to automatically handle issues related to keeping track of many open files and transferring them between multiple processes. """ def acquire(self, needs_lock: bool = True) -> T_File: """Acquire the file object from this manager.""" raise NotImplementedError() def acquire_context( self, needs_lock: bool = True ) -> AbstractContextManager[T_File]: """Context manager for acquiring a file. Yields a file object. The context manager unwinds any actions taken as part of acquisition (i.e., removes it from any cache) if an exception is raised from the context. It *does not* automatically close the file. """ raise NotImplementedError() def close(self, needs_lock: bool = True) -> None: """Close the file object associated with this manager, if needed.""" raise NotImplementedError() class CachingFileManager(FileManager[T_File]): """Wrapper for automatically opening and closing file objects. Unlike files, CachingFileManager objects can be safely pickled and passed between processes. They should be explicitly closed to release resources, but a per-process least-recently-used cache for open files ensures that you can safely create arbitrarily large numbers of FileManager objects. Don't directly close files acquired from a FileManager. Instead, call FileManager.close(), which ensures that closed files are removed from the cache as well. Example usage:: manager = FileManager(open, "example.txt", mode="w") f = manager.acquire() f.write(...) manager.close() # ensures file is closed Note that as long as previous files are still cached, acquiring a file multiple times from the same FileManager is essentially free:: f1 = manager.acquire() f2 = manager.acquire() assert f1 is f2 """ def __init__( self, opener: Callable[..., T_File], *args: Any, mode: Any = _OMIT_MODE, kwargs: Mapping[str, Any] | None = None, lock: Lock | None | Literal[False] = None, cache: MutableMapping[Any, T_File] | None = None, manager_id: Hashable | None = None, ref_counts: dict[Any, int] | None = None, ): """Initialize a CachingFileManager. The cache, manager_id and ref_counts arguments exist solely to facilitate dependency injection, and should only be set for tests. Parameters ---------- opener : callable Function that when called like ``opener(*args, **kwargs)`` returns an open file object. The file object must implement a ``close()`` method. *args Positional arguments for opener. A ``mode`` argument should be provided as a keyword argument (see below). All arguments must be hashable. mode : optional If provided, passed as a keyword argument to ``opener`` along with ``**kwargs``. ``mode='w' `` has special treatment: after the first call it is replaced by ``mode='a'`` in all subsequent function to avoid overriding the newly created file. kwargs : dict, optional Keyword arguments for opener, excluding ``mode``. All values must be hashable. lock : duck-compatible threading.Lock, optional Lock to use when modifying the cache inside acquire() and close(). By default, uses a new threading.Lock() object. If set, this object should be pickleable. cache : MutableMapping, optional Mapping to use as a cache for open files. By default, uses xarray's global LRU file cache. Because ``cache`` typically points to a global variable and contains non-picklable file objects, an unpickled FileManager objects will be restored with the default cache. manager_id : hashable, optional Identifier for this CachingFileManager. ref_counts : dict, optional Optional dict to use for keeping track the number of references to the same file. """ self._opener = opener self._args = args self._mode = mode self._kwargs = {} if kwargs is None else dict(kwargs) if lock is None or lock is False: self._use_default_lock = True self._lock: Lock = threading.Lock() else: self._use_default_lock = False self._lock = lock # cache[self._key] stores the file associated with this object. if cache is None: cache = cast(MutableMapping[Any, T_File], FILE_CACHE) self._cache: MutableMapping[Any, T_File] = cache if manager_id is None: # Each call to CachingFileManager should separately open files. manager_id = str(uuid.uuid4()) self._manager_id = manager_id self._key = self._make_key() # ref_counts[self._key] stores the number of CachingFileManager objects # in memory referencing this same file. We use this to know if we can # close a file when the manager is deallocated. if ref_counts is None: ref_counts = REF_COUNTS self._ref_counter = _RefCounter(ref_counts) self._ref_counter.increment(self._key) def _make_key(self) -> _HashedSequence: """Make a key for caching files in the LRU cache.""" value = ( self._opener, self._args, "a" if self._mode == "w" else self._mode, tuple(sorted(self._kwargs.items())), self._manager_id, ) return _HashedSequence(value) @contextmanager def _optional_lock(self, needs_lock: bool): """Context manager for optionally acquiring a lock.""" if needs_lock: with self._lock: yield else: yield def acquire(self, needs_lock: bool = True) -> T_File: """Acquire a file object from the manager. A new file is only opened if it has expired from the least-recently-used cache. This method uses a lock, which ensures that it is thread-safe. You can safely acquire a file in multiple threads at the same time, as long as the underlying file object is thread-safe. Returns ------- file-like An open file object, as returned by ``opener(*args, **kwargs)``. """ file, _ = self._acquire_with_cache_info(needs_lock) return file @contextmanager def acquire_context(self, needs_lock: bool = True) -> Iterator[T_File]: """Context manager for acquiring a file.""" file, cached = self._acquire_with_cache_info(needs_lock) try: yield file except Exception: if not cached: self.close(needs_lock) raise def _acquire_with_cache_info(self, needs_lock: bool = True) -> tuple[T_File, bool]: """Acquire a file, returning the file and whether it was cached.""" with self._optional_lock(needs_lock): try: file = self._cache[self._key] except KeyError: kwargs = self._kwargs if self._mode is not _OMIT_MODE: kwargs = kwargs.copy() kwargs["mode"] = self._mode file = self._opener(*self._args, **kwargs) if self._mode == "w": # ensure file doesn't get overridden when opened again self._mode = "a" self._cache[self._key] = file return file, False else: return file, True def close(self, needs_lock: bool = True) -> None: """Explicitly close any associated file object (if necessary).""" # TODO: remove needs_lock if/when we have a reentrant lock in # dask.distributed: https://github.com/dask/dask/issues/3832 with self._optional_lock(needs_lock): default = None file = self._cache.pop(self._key, default) if file is not None: file.close() def __del__(self) -> None: # If we're the only CachingFileManger referencing a unclosed file, # remove it from the cache upon garbage collection. # # We keep track of our own reference count because we don't want to # close files if another identical file manager needs it. This can # happen if a CachingFileManager is pickled and unpickled without # closing the original file. ref_count = self._ref_counter.decrement(self._key) if not ref_count and self._key in self._cache: if acquire(self._lock, blocking=False): # Only close files if we can do so immediately. try: self.close(needs_lock=False) finally: self._lock.release() if OPTIONS["warn_for_unclosed_files"]: warnings.warn( f"deallocating {self}, but file is not already closed. " "This may indicate a bug.", RuntimeWarning, stacklevel=2, ) def __getstate__(self): """State for pickling.""" # cache is intentionally omitted: we don't want to try to serialize # these global objects. lock = None if self._use_default_lock else self._lock return ( self._opener, self._args, self._mode, self._kwargs, lock, self._manager_id, ) def __setstate__(self, state) -> None: """Restore from a pickle.""" opener, args, mode, kwargs, lock, manager_id = state self.__init__( # type: ignore[misc] opener, *args, mode=mode, kwargs=kwargs, lock=lock, manager_id=manager_id ) def __repr__(self) -> str: args_string = ", ".join(map(repr, self._args)) if self._mode is not _OMIT_MODE: args_string += f", mode={self._mode!r}" return ( f"{type(self).__name__}({self._opener!r}, {args_string}, " f"kwargs={self._kwargs}, manager_id={self._manager_id!r})" ) class _RefCounter: """Class for keeping track of reference counts.""" def __init__(self, counts): self._counts = counts self._lock = threading.Lock() def increment(self, name): with self._lock: count = self._counts[name] = self._counts.get(name, 0) + 1 return count def decrement(self, name): with self._lock: count = self._counts[name] - 1 if count: self._counts[name] = count else: del self._counts[name] return count class _HashedSequence(list): """Speedup repeated look-ups by caching hash values. Based on what Python uses internally in functools.lru_cache. Python doesn't perform this optimization automatically: https://bugs.python.org/issue1462796 """ def __init__(self, tuple_value): self[:] = tuple_value self.hashvalue = hash(tuple_value) def __hash__(self) -> int: # type: ignore[override] return self.hashvalue def _get_none() -> None: return None class PickleableFileManager(FileManager[T_File]): """File manager that supports pickling by reopening a file object. Use PickleableFileManager for wrapping file-like objects that do not natively support pickling (e.g., netCDF4.Dataset and h5netcdf.File) in cases where a global cache is not desirable (e.g., for netCDF files opened from bytes in memory, or from existing file objects). """ def __init__( self, opener: Callable[..., T_File], *args: Any, mode: Any = _OMIT_MODE, kwargs: Mapping[str, Any] | None = None, ): kwargs = {} if kwargs is None else dict(kwargs) self._opener = opener self._args = args self._mode = "a" if mode == "w" else mode self._kwargs = kwargs # Note: No need for locking with PickleableFileManager, because all # opening of files happens in the constructor. if mode != _OMIT_MODE: kwargs = kwargs | {"mode": mode} self._file: T_File | None = opener(*args, **kwargs) @property def _closed(self) -> bool: # If opener() raised an error in the constructor, _file may not be set return getattr(self, "_file", None) is None def _get_unclosed_file(self) -> T_File: if self._closed: raise RuntimeError("file is closed") file = self._file assert file is not None return file def acquire(self, needs_lock: bool = True) -> T_File: del needs_lock # unused return self._get_unclosed_file() @contextmanager def acquire_context(self, needs_lock: bool = True) -> Iterator[T_File]: del needs_lock # unused yield self._get_unclosed_file() def close(self, needs_lock: bool = True) -> None: del needs_lock # unused if not self._closed: file = self._get_unclosed_file() file.close() self._file = None # Remove all references to opener arguments, so they can be garbage # collected. self._args = () self._mode = _OMIT_MODE self._kwargs = {} def __del__(self) -> None: if not self._closed: self.close() if OPTIONS["warn_for_unclosed_files"]: warnings.warn( f"deallocating {self}, but file is not already closed. " "This may indicate a bug.", RuntimeWarning, stacklevel=2, ) def __getstate__(self): # file is intentionally omitted: we want to open it again opener = _get_none if self._closed else self._opener return (opener, self._args, self._mode, self._kwargs) def __setstate__(self, state) -> None: opener, args, mode, kwargs = state self.__init__(opener, *args, mode=mode, kwargs=kwargs) # type: ignore[misc] def __repr__(self) -> str: if self._closed: return f"" args_string = ", ".join(map(repr, self._args)) if self._mode is not _OMIT_MODE: args_string += f", mode={self._mode!r}" kwargs = ( self._kwargs | {"memory": utils.ReprObject("...")} if "memory" in self._kwargs else self._kwargs ) return f"{type(self).__name__}({self._opener!r}, {args_string}, {kwargs=})" @atexit.register def _remove_del_methods(): # We don't need to close unclosed files at program exit, and may not be able # to, because Python is cleaning up imports / globals. del CachingFileManager.__del__ del PickleableFileManager.__del__ class DummyFileManager(FileManager[T_File]): """FileManager that simply wraps an open file in the FileManager interface.""" def __init__(self, value: T_File, *, close: Callable[[], None] | None = None): if close is None: close = value.close self._value = value self._close = close def acquire(self, needs_lock: bool = True) -> T_File: del needs_lock # unused return self._value @contextmanager def acquire_context(self, needs_lock: bool = True) -> Iterator[T_File]: del needs_lock # unused yield self._value def close(self, needs_lock: bool = True) -> None: del needs_lock # unused self._close() ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/h5netcdf_.py�������������������������������������������������������0000664�0000000�0000000�00000054224�15114646760�0020643�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import functools import io import os from collections.abc import Iterable from typing import TYPE_CHECKING, Any, Self import numpy as np from packaging.version import Version from xarray.backends.common import ( BACKEND_ENTRYPOINTS, BackendEntrypoint, BytesIOProxy, T_PathFileOrDataStore, WritableCFDataStore, _normalize_path, _open_remote_file, collect_ancestor_dimensions, datatree_from_dict_with_io_cleanup, find_root_and_group, ) from xarray.backends.file_manager import ( CachingFileManager, DummyFileManager, FileManager, PickleableFileManager, ) from xarray.backends.locks import HDF5_LOCK, combine_locks, ensure_lock, get_write_lock from xarray.backends.netcdf3 import encode_nc3_attr_value, encode_nc3_variable from xarray.backends.netCDF4_ import ( BaseNetCDF4Array, _build_and_get_enum, _encode_nc4_variable, _ensure_no_forward_slash_in_name, _extract_nc4_variable_encoding, _get_datatype, _nc4_require_group, ) from xarray.backends.store import StoreBackendEntrypoint from xarray.core import indexing from xarray.core.utils import ( FrozenDict, emit_user_level_warning, is_remote_uri, read_magic_number_from_file, try_read_magic_number_from_file_or_path, ) from xarray.core.variable import Variable if TYPE_CHECKING: import h5netcdf from xarray.backends.common import AbstractDataStore from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.types import ReadBuffer class H5NetCDFArrayWrapper(BaseNetCDF4Array): def get_array(self, needs_lock=True): ds = self.datastore._acquire(needs_lock) return ds.variables[self.variable_name] def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem ) def _getitem(self, key): with self.datastore.lock: array = self.get_array(needs_lock=False) return array[key] def _read_attributes(h5netcdf_var): # GH451 # to ensure conventions decoding works properly on Python 3, decode all # bytes attributes to strings attrs = {} for k, v in h5netcdf_var.attrs.items(): if k not in ["_FillValue", "missing_value"] and isinstance(v, bytes): try: v = v.decode("utf-8") except UnicodeDecodeError: emit_user_level_warning( f"'utf-8' codec can't decode bytes for attribute " f"{k!r} of h5netcdf object {h5netcdf_var.name!r}, " f"returning bytes undecoded.", UnicodeWarning, ) attrs[k] = v return attrs _extract_h5nc_encoding = functools.partial( _extract_nc4_variable_encoding, lsd_okay=False, h5py_okay=True, backend="h5netcdf", unlimited_dims=None, ) def _h5netcdf_create_group(dataset, name): return dataset.create_group(name) class H5NetCDFStore(WritableCFDataStore): """Store for reading and writing data via h5netcdf""" __slots__ = ( "_filename", "_group", "_manager", "_mode", "autoclose", "format", "is_remote", "lock", ) def __init__( self, manager: FileManager | h5netcdf.File | h5netcdf.Group, group=None, mode=None, format="NETCDF4", lock=HDF5_LOCK, autoclose=False, ): import h5netcdf if isinstance(manager, h5netcdf.File | h5netcdf.Group): if group is None: root, group = find_root_and_group(manager) else: if type(manager) is not h5netcdf.File: raise ValueError( "must supply a h5netcdf.File if the group argument is provided" ) root = manager manager = DummyFileManager(root) self._manager = manager self._group = group self._mode = mode self.format = format or "NETCDF4" # todo: utilizing find_root_and_group seems a bit clunky # making filename available on h5netcdf.Group seems better self._filename = find_root_and_group(self.ds)[0].filename self.is_remote = is_remote_uri(self._filename) self.lock = ensure_lock(lock) self.autoclose = autoclose def get_child_store(self, group: str) -> Self: if self.format == "NETCDF4_CLASSIC": raise ValueError("Cannot create sub-groups in `NETCDF4_CLASSIC` format.") if self._group is not None: group = os.path.join(self._group, group) return type(self)( self._manager, group=group, mode=self._mode, lock=self.lock, autoclose=self.autoclose, ) @classmethod def open( cls, filename, mode="r", format="NETCDF4", group=None, lock=None, autoclose=False, invalid_netcdf=None, phony_dims=None, decode_vlen_strings=True, driver=None, driver_kwds=None, storage_options: dict[str, Any] | None = None, ): import h5netcdf if isinstance(filename, str) and is_remote_uri(filename) and driver is None: mode_ = "rb" if mode == "r" else mode filename = _open_remote_file( filename, mode=mode_, storage_options=storage_options ) if isinstance(filename, BytesIOProxy): source = filename filename = io.BytesIO() source.getvalue = filename.getbuffer if isinstance(filename, io.IOBase) and mode == "r": magic_number = read_magic_number_from_file(filename) if not magic_number.startswith(b"\211HDF\r\n\032\n"): raise ValueError( f"{magic_number!r} is not the signature of a valid netCDF4 file" ) if format is None: format = "NETCDF4" if format not in ["NETCDF4", "NETCDF4_CLASSIC"]: raise ValueError(f"invalid format for h5netcdf backend: {format}") kwargs = { "invalid_netcdf": invalid_netcdf, "decode_vlen_strings": decode_vlen_strings, "driver": driver, } if driver_kwds is not None: kwargs.update(driver_kwds) if phony_dims is not None: kwargs["phony_dims"] = phony_dims if Version(h5netcdf.__version__) > Version("1.6.4"): kwargs["format"] = format elif format == "NETCDF4_CLASSIC": raise ValueError( "h5netcdf >= 1.7.0 is required to save output in NETCDF4_CLASSIC format." ) if lock is None: if mode == "r": lock = HDF5_LOCK else: lock = combine_locks([HDF5_LOCK, get_write_lock(filename)]) manager_cls = ( CachingFileManager if isinstance(filename, str) and not is_remote_uri(filename) else PickleableFileManager ) manager = manager_cls(h5netcdf.File, filename, mode=mode, kwargs=kwargs) return cls( manager, group=group, format=format, mode=mode, lock=lock, autoclose=autoclose, ) def _acquire(self, needs_lock=True): with self._manager.acquire_context(needs_lock) as root: ds = _nc4_require_group( root, self._group, self._mode, create_group=_h5netcdf_create_group ) return ds @property def ds(self): return self._acquire() def open_store_variable(self, name, var): import h5netcdf.core import h5py dimensions = var.dimensions data = indexing.LazilyIndexedArray(H5NetCDFArrayWrapper(name, self)) attrs = _read_attributes(var) # netCDF4 specific encoding encoding = { "chunksizes": var.chunks, "fletcher32": var.fletcher32, "shuffle": var.shuffle, } if var.chunks: encoding["preferred_chunks"] = dict( zip(var.dimensions, var.chunks, strict=True) ) # Convert h5py-style compression options to NetCDF4-Python # style, if possible if var.compression == "gzip": encoding["zlib"] = True encoding["complevel"] = var.compression_opts elif var.compression is not None: encoding["compression"] = var.compression encoding["compression_opts"] = var.compression_opts # save source so __repr__ can detect if it's local or not encoding["source"] = self._filename encoding["original_shape"] = data.shape vlen_dtype = h5py.check_dtype(vlen=var.dtype) if vlen_dtype is str: encoding["dtype"] = str elif vlen_dtype is not None: # pragma: no cover # xarray doesn't support writing arbitrary vlen dtypes yet. pass # just check if datatype is available and create dtype # this check can be removed if h5netcdf >= 1.4.0 for any environment elif (datatype := getattr(var, "datatype", None)) and isinstance( datatype, h5netcdf.core.EnumType ): encoding["dtype"] = np.dtype( data.dtype, metadata={ "enum": datatype.enum_dict, "enum_name": datatype.name, }, ) else: encoding["dtype"] = var.dtype return Variable(dimensions, data, attrs, encoding) def get_variables(self): return FrozenDict( (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items() ) def get_attrs(self): return FrozenDict(_read_attributes(self.ds)) def get_dimensions(self): return FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items()) def get_parent_dimensions(self): return FrozenDict(collect_ancestor_dimensions(self.ds)) def get_encoding(self): return { "unlimited_dims": { k for k, v in self.ds.dimensions.items() if v.isunlimited() } } def set_dimension(self, name, length, is_unlimited=False): _ensure_no_forward_slash_in_name(name) if is_unlimited: self.ds.dimensions[name] = None self.ds.resize_dimension(name, length) else: self.ds.dimensions[name] = length def set_attribute(self, key, value): if self.format == "NETCDF4_CLASSIC": value = encode_nc3_attr_value(value) self.ds.attrs[key] = value def encode_variable(self, variable, name=None): if self.format == "NETCDF4_CLASSIC": return encode_nc3_variable(variable, name=name) else: return _encode_nc4_variable(variable, name=name) def prepare_variable( self, name, variable, check_encoding=False, unlimited_dims=None ): import h5py _ensure_no_forward_slash_in_name(name) attrs = variable.attrs.copy() dtype = _get_datatype( variable, nc_format=self.format, raise_on_invalid_encoding=check_encoding ) fillvalue = attrs.pop("_FillValue", None) if dtype is str: dtype = h5py.special_dtype(vlen=str) # check enum metadata and use h5netcdf.core.EnumType if ( hasattr(self.ds, "enumtypes") and (meta := np.dtype(dtype).metadata) and (e_name := meta.get("enum_name")) and (e_dict := meta.get("enum")) ): dtype = _build_and_get_enum(self, name, dtype, e_name, e_dict) encoding = _extract_h5nc_encoding(variable, raise_on_invalid=check_encoding) kwargs = {} # Convert from NetCDF4-Python style compression settings to h5py style # If both styles are used together, h5py takes precedence # If set_encoding=True, raise ValueError in case of mismatch if encoding.pop("zlib", False): if check_encoding and encoding.get("compression") not in (None, "gzip"): raise ValueError("'zlib' and 'compression' encodings mismatch") encoding.setdefault("compression", "gzip") if ( check_encoding and "complevel" in encoding and "compression_opts" in encoding and encoding["complevel"] != encoding["compression_opts"] ): raise ValueError("'complevel' and 'compression_opts' encodings mismatch") complevel = encoding.pop("complevel", 0) if complevel != 0: encoding.setdefault("compression_opts", complevel) encoding["chunks"] = encoding.pop("chunksizes", None) # Do not apply compression, filters or chunking to scalars. if variable.shape: for key in [ "compression", "compression_opts", "shuffle", "chunks", "fletcher32", ]: if key in encoding: kwargs[key] = encoding[key] if name not in self.ds: nc4_var = self.ds.create_variable( name, dtype=dtype, dimensions=variable.dims, fillvalue=fillvalue, **kwargs, ) else: nc4_var = self.ds[name] for k, v in attrs.items(): if self.format == "NETCDF4_CLASSIC": v = encode_nc3_attr_value(v) nc4_var.attrs[k] = v target = H5NetCDFArrayWrapper(name, self) return target, variable.data def sync(self): self.ds.sync() def close(self, **kwargs): self._manager.close(**kwargs) def _check_phony_dims(phony_dims): emit_phony_dims_warning = False if phony_dims is None: emit_phony_dims_warning = True phony_dims = "access" return emit_phony_dims_warning, phony_dims def _emit_phony_dims_warning(): emit_user_level_warning( "The 'phony_dims' kwarg now defaults to 'access'. " "Previously 'phony_dims=None' would raise an error. " "For full netcdf equivalence please use phony_dims='sort'.", UserWarning, ) def _normalize_filename_or_obj( filename_or_obj: T_PathFileOrDataStore, ) -> str | ReadBuffer | AbstractDataStore: if isinstance(filename_or_obj, bytes | memoryview): return io.BytesIO(filename_or_obj) else: return _normalize_path(filename_or_obj) class H5netcdfBackendEntrypoint(BackendEntrypoint): """ Backend for netCDF files based on the h5netcdf package. It can open ".nc", ".nc4", ".cdf" files but will only be selected as the default if the "netcdf4" engine is not available. Additionally it can open valid HDF5 files, see https://h5netcdf.org/#invalid-netcdf-files for more info. It will not be detected as valid backend for such files, so make sure to specify ``engine="h5netcdf"`` in ``open_dataset``. For more information about the underlying library, visit: https://h5netcdf.org See Also -------- backends.H5NetCDFStore backends.NetCDF4BackendEntrypoint backends.ScipyBackendEntrypoint """ description = ( "Open netCDF (.nc, .nc4 and .cdf) and most HDF5 files using h5netcdf in Xarray" ) url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.H5netcdfBackendEntrypoint.html" supports_groups = True def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: from xarray.core.utils import is_remote_uri filename_or_obj = _normalize_filename_or_obj(filename_or_obj) # Try to read magic number for local files only is_remote = isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj) if not is_remote: magic_number = try_read_magic_number_from_file_or_path(filename_or_obj) if magic_number is not None: return magic_number.startswith(b"\211HDF\r\n\032\n") if isinstance(filename_or_obj, str | os.PathLike): _, ext = os.path.splitext(filename_or_obj) return ext in {".nc", ".nc4", ".cdf"} return False def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, format="NETCDF4", group=None, lock=None, invalid_netcdf=None, phony_dims=None, decode_vlen_strings=True, driver=None, driver_kwds=None, storage_options: dict[str, Any] | None = None, ) -> Dataset: # Keep this message for some versions # remove and set phony_dims="access" above emit_phony_dims_warning, phony_dims = _check_phony_dims(phony_dims) filename_or_obj = _normalize_filename_or_obj(filename_or_obj) store = H5NetCDFStore.open( filename_or_obj, format=format, group=group, lock=lock, invalid_netcdf=invalid_netcdf, phony_dims=phony_dims, decode_vlen_strings=decode_vlen_strings, driver=driver, driver_kwds=driver_kwds, storage_options=storage_options, ) store_entrypoint = StoreBackendEntrypoint() ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) # only warn if phony_dims exist in file # remove together with the above check # after some versions if store.ds._root._phony_dim_count > 0 and emit_phony_dims_warning: _emit_phony_dims_warning() return ds def open_datatree( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, format="NETCDF4", group: str | None = None, lock=None, invalid_netcdf=None, phony_dims=None, decode_vlen_strings=True, driver=None, driver_kwds=None, **kwargs, ) -> DataTree: groups_dict = self.open_groups_as_dict( filename_or_obj, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, format=format, group=group, lock=lock, invalid_netcdf=invalid_netcdf, phony_dims=phony_dims, decode_vlen_strings=decode_vlen_strings, driver=driver, driver_kwds=driver_kwds, **kwargs, ) return datatree_from_dict_with_io_cleanup(groups_dict) def open_groups_as_dict( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, format="NETCDF4", group: str | None = None, lock=None, invalid_netcdf=None, phony_dims=None, decode_vlen_strings=True, driver=None, driver_kwds=None, **kwargs, ) -> dict[str, Dataset]: from xarray.backends.common import _iter_nc_groups from xarray.core.treenode import NodePath from xarray.core.utils import close_on_error # Keep this message for some versions # remove and set phony_dims="access" above emit_phony_dims_warning, phony_dims = _check_phony_dims(phony_dims) filename_or_obj = _normalize_filename_or_obj(filename_or_obj) store = H5NetCDFStore.open( filename_or_obj, format=format, group=group, lock=lock, invalid_netcdf=invalid_netcdf, phony_dims=phony_dims, decode_vlen_strings=decode_vlen_strings, driver=driver, driver_kwds=driver_kwds, ) # Check for a group and make it a parent if it exists if group: parent = NodePath("/") / NodePath(group) else: parent = NodePath("/") manager = store._manager groups_dict = {} for path_group in _iter_nc_groups(store.ds, parent=parent): group_store = H5NetCDFStore(manager, group=path_group, **kwargs) store_entrypoint = StoreBackendEntrypoint() with close_on_error(group_store): group_ds = store_entrypoint.open_dataset( group_store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) if group: group_name = str(NodePath(path_group).relative_to(parent)) else: group_name = str(NodePath(path_group)) groups_dict[group_name] = group_ds # only warn if phony_dims exist in file # remove together with the above check # after some versions if store.ds._root._phony_dim_count > 0 and emit_phony_dims_warning: _emit_phony_dims_warning() return groups_dict BACKEND_ENTRYPOINTS["h5netcdf"] = ("h5netcdf", H5netcdfBackendEntrypoint) ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/locks.py�����������������������������������������������������������0000664�0000000�0000000�00000017254�15114646760�0020121�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import multiprocessing import threading import uuid import weakref from collections.abc import Callable, Hashable, MutableMapping, Sequence from typing import Any, ClassVar, Literal from weakref import WeakValueDictionary from xarray.core.types import Lock # SerializableLock is adapted from Dask: # https://github.com/dask/dask/blob/74e898f0ec712e8317ba86cc3b9d18b6b9922be0/dask/utils.py#L1160-L1224 # Used under the terms of Dask's license, see licenses/DASK_LICENSE. class SerializableLock(Lock): """A Serializable per-process Lock This wraps a normal ``threading.Lock`` object and satisfies the same interface. However, this lock can also be serialized and sent to different processes. It will not block concurrent operations between processes (for this you should look at ``dask.multiprocessing.Lock`` or ``locket.lock_file`` but will consistently deserialize into the same lock. So if we make a lock in one process:: lock = SerializableLock() And then send it over to another process multiple times:: bytes = pickle.dumps(lock) a = pickle.loads(bytes) b = pickle.loads(bytes) Then the deserialized objects will operate as though they were the same lock, and collide as appropriate. This is useful for consistently protecting resources on a per-process level. The creation of locks is itself not threadsafe. """ _locks: ClassVar[WeakValueDictionary[Hashable, threading.Lock]] = ( WeakValueDictionary() ) token: Hashable lock: threading.Lock def __init__(self, token: Hashable | None = None): self.token = token or str(uuid.uuid4()) if self.token in SerializableLock._locks: self.lock = SerializableLock._locks[self.token] else: self.lock = threading.Lock() SerializableLock._locks[self.token] = self.lock def acquire(self, *args, **kwargs): return self.lock.acquire(*args, **kwargs) def release(self, *args, **kwargs): return self.lock.release(*args, **kwargs) def __enter__(self): self.lock.__enter__() def __exit__(self, *args): self.lock.__exit__(*args) def locked(self): return self.lock.locked() def __getstate__(self): return self.token def __setstate__(self, token): self.__init__(token) def __str__(self): return f"<{self.__class__.__name__}: {self.token}>" __repr__ = __str__ # Locks used by multiple backends. # Neither HDF5 nor the netCDF-C library are thread-safe. HDF5_LOCK = SerializableLock() NETCDFC_LOCK = SerializableLock() _FILE_LOCKS: MutableMapping[Any, threading.Lock] = weakref.WeakValueDictionary() def _get_threaded_lock(key: str) -> threading.Lock: try: lock = _FILE_LOCKS[key] except KeyError: lock = _FILE_LOCKS[key] = threading.Lock() return lock def _get_multiprocessing_lock(key: str) -> Lock: # TODO: make use of the key -- maybe use locket.py? # https://github.com/mwilliamson/locket.py del key # unused return multiprocessing.Lock() def _get_lock_maker(scheduler: str | None = None) -> Callable[..., Lock]: """Returns an appropriate function for creating resource locks. Parameters ---------- scheduler : str or None Dask scheduler being used. See Also -------- dask.utils.get_scheduler_lock """ if scheduler is None or scheduler == "threaded": return _get_threaded_lock elif scheduler == "multiprocessing": return _get_multiprocessing_lock elif scheduler == "distributed": # Lazy import distributed since it is can add a significant # amount of time to import from dask.distributed import Lock as DistributedLock return DistributedLock else: raise KeyError(scheduler) def get_dask_scheduler(get=None, collection=None) -> str | None: """Determine the dask scheduler that is being used. None is returned if no dask scheduler is active. See Also -------- dask.base.get_scheduler """ try: # Fix for bug caused by dask installation that doesn't involve the toolz library # Issue: 4164 import dask from dask.base import get_scheduler actual_get = get_scheduler(get, collection) except ImportError: return None try: from dask.distributed import Client if isinstance(actual_get.__self__, Client): return "distributed" except (ImportError, AttributeError): pass try: # As of dask=2.6, dask.multiprocessing requires cloudpickle to be installed # Dependency removed in https://github.com/dask/dask/pull/5511 if actual_get is dask.multiprocessing.get: return "multiprocessing" except AttributeError: pass return "threaded" def get_write_lock(key: str) -> Lock: """Get a scheduler appropriate lock for writing to the given resource. Parameters ---------- key : str Name of the resource for which to acquire a lock. Typically a filename. Returns ------- Lock object that can be used like a threading.Lock object. """ scheduler = get_dask_scheduler() lock_maker = _get_lock_maker(scheduler) return lock_maker(key) def acquire(lock, blocking=True): """Acquire a lock, possibly in a non-blocking fashion. Includes backwards compatibility hacks for old versions of Python, dask and dask-distributed. """ if blocking: # no arguments needed return lock.acquire() else: # "blocking" keyword argument not supported for: # - threading.Lock on Python 2. # - dask.SerializableLock with dask v1.0.0 or earlier. # - multiprocessing.Lock calls the argument "block" instead. # - dask.distributed.Lock uses the blocking argument as the first one return lock.acquire(blocking) class CombinedLock(Lock): """A combination of multiple locks. Like a locked door, a CombinedLock is locked if any of its constituent locks are locked. """ def __init__(self, locks: Sequence[Lock]): self.locks = tuple(set(locks)) # remove duplicates def acquire(self, blocking=True): return all(acquire(lock, blocking=blocking) for lock in self.locks) def release(self): for lock in self.locks: lock.release() def __enter__(self): for lock in self.locks: lock.__enter__() def __exit__(self, *args): for lock in self.locks: lock.__exit__(*args) def locked(self): return any(lock.locked for lock in self.locks) def __repr__(self): return f"CombinedLock({list(self.locks)!r})" class DummyLock(Lock): """DummyLock provides the lock API without any actual locking.""" def acquire(self, blocking=True): pass def release(self): pass def __enter__(self): pass def __exit__(self, *args): pass def locked(self): return False def combine_locks(locks: Sequence[Lock]) -> Lock: """Combine a sequence of locks into a single lock.""" all_locks: list[Lock] = [] for lock in locks: if isinstance(lock, CombinedLock): all_locks.extend(lock.locks) elif lock is not None: all_locks.append(lock) num_locks = len(all_locks) if num_locks > 1: return CombinedLock(all_locks) elif num_locks == 1: return all_locks[0] else: return DummyLock() def ensure_lock(lock: Lock | None | Literal[False]) -> Lock: """Ensure that the given object is a lock.""" if lock is None or lock is False: return DummyLock() return lock ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/lru_cache.py�������������������������������������������������������0000664�0000000�0000000�00000007115�15114646760�0020726�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import threading from collections import OrderedDict from collections.abc import Callable, Iterator, MutableMapping from typing import Any, TypeVar K = TypeVar("K") V = TypeVar("V") class LRUCache(MutableMapping[K, V]): """Thread-safe LRUCache based on an OrderedDict. All dict operations (__getitem__, __setitem__, __contains__) update the priority of the relevant key and take O(1) time. The dict is iterated over in order from the oldest to newest key, which means that a complete pass over the dict should not affect the order of any entries. When a new item is set and the maximum size of the cache is exceeded, the oldest item is dropped and called with ``on_evict(key, value)``. The ``maxsize`` property can be used to view or adjust the capacity of the cache, e.g., ``cache.maxsize = new_size``. """ _cache: OrderedDict[K, V] _maxsize: int _lock: threading.RLock _on_evict: Callable[[K, V], Any] | None __slots__ = ("_cache", "_lock", "_maxsize", "_on_evict") def __init__(self, maxsize: int, on_evict: Callable[[K, V], Any] | None = None): """ Parameters ---------- maxsize : int Integer maximum number of items to hold in the cache. on_evict : callable, optional Function to call like ``on_evict(key, value)`` when items are evicted. """ if not isinstance(maxsize, int): raise TypeError("maxsize must be an integer") if maxsize < 0: raise ValueError("maxsize must be non-negative") self._maxsize = maxsize self._cache = OrderedDict() self._lock = threading.RLock() self._on_evict = on_evict def __getitem__(self, key: K) -> V: # record recent use of the key by moving it to the front of the list with self._lock: value = self._cache[key] self._cache.move_to_end(key) return value def _enforce_size_limit(self, capacity: int) -> None: """Shrink the cache if necessary, evicting the oldest items.""" while len(self._cache) > capacity: key, value = self._cache.popitem(last=False) if self._on_evict is not None: self._on_evict(key, value) def __setitem__(self, key: K, value: V) -> None: with self._lock: if key in self._cache: # insert the new value at the end del self._cache[key] self._cache[key] = value elif self._maxsize: # make room if necessary self._enforce_size_limit(self._maxsize - 1) self._cache[key] = value elif self._on_evict is not None: # not saving, immediately evict self._on_evict(key, value) def __delitem__(self, key: K) -> None: del self._cache[key] def __iter__(self) -> Iterator[K]: # create a list, so accessing the cache during iteration cannot change # the iteration order return iter(list(self._cache)) def __len__(self) -> int: return len(self._cache) @property def maxsize(self) -> int: """Maximum number of items can be held in the cache.""" return self._maxsize @maxsize.setter def maxsize(self, size: int) -> None: """Resize the cache, evicting the oldest items if necessary.""" if size < 0: raise ValueError("maxsize must be non-negative") with self._lock: self._enforce_size_limit(size) self._maxsize = size ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/memory.py����������������������������������������������������������0000664�0000000�0000000�00000002765�15114646760�0020317�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import copy import numpy as np from xarray.backends.common import AbstractWritableDataStore from xarray.core import indexing from xarray.core.variable import Variable class InMemoryDataStore(AbstractWritableDataStore): """ Stores dimensions, variables and attributes in ordered dictionaries, making this store fast compared to stores which save to disk. This store exists purely for internal testing purposes. """ def __init__(self, variables=None, attributes=None): self._variables = {} if variables is None else variables self._attributes = {} if attributes is None else attributes def get_attrs(self): return self._attributes def get_variables(self): res = {} for k, v in self._variables.items(): v = v.copy(deep=True) res[k] = v v._data = indexing.LazilyIndexedArray(v._data) return res def get_dimensions(self): return {d: s for v in self._variables.values() for d, s in v.dims.items()} def prepare_variable(self, k, v, *args, **kwargs): new_var = Variable(v.dims, np.empty_like(v), v.attrs) self._variables[k] = new_var return new_var, v.data def set_attribute(self, k, v): # copy to imitate writing to disk. self._attributes[k] = copy.deepcopy(v) def set_dimension(self, dim, length, unlimited_dims=None): # in this model, dimensions are accounted for in the variables pass �����������xarray-2025.12.0/xarray/backends/netCDF4_.py��������������������������������������������������������0000664�0000000�0000000�00000072712�15114646760�0020334�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import functools import operator import os from collections.abc import Iterable from contextlib import suppress from dataclasses import dataclass from io import IOBase from typing import TYPE_CHECKING, Any, Self import numpy as np from xarray.backends.common import ( BACKEND_ENTRYPOINTS, BackendArray, BackendEntrypoint, BytesIOProxy, T_PathFileOrDataStore, WritableCFDataStore, _normalize_path, collect_ancestor_dimensions, datatree_from_dict_with_io_cleanup, find_root_and_group, robust_getitem, ) from xarray.backends.file_manager import ( CachingFileManager, DummyFileManager, PickleableFileManager, ) from xarray.backends.locks import ( HDF5_LOCK, NETCDFC_LOCK, combine_locks, ensure_lock, get_write_lock, ) from xarray.backends.netcdf3 import encode_nc3_attr_value, encode_nc3_variable from xarray.backends.store import StoreBackendEntrypoint from xarray.coding.strings import ( CharacterArrayCoder, EncodedStringCoder, create_vlen_dtype, is_unicode_dtype, ) from xarray.coding.variables import pop_to from xarray.core import indexing from xarray.core.utils import ( FrozenDict, close_on_error, is_remote_uri, strip_uri_params, try_read_magic_number_from_path, ) from xarray.core.variable import Variable if TYPE_CHECKING: import netCDF4 from h5netcdf.core import EnumType as h5EnumType from netCDF4 import EnumType as ncEnumType from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree # This lookup table maps from dtype.byteorder to a readable endian # string used by netCDF4. _endian_lookup = {"=": "native", ">": "big", "<": "little", "|": "native"} NETCDF4_PYTHON_LOCK = combine_locks([NETCDFC_LOCK, HDF5_LOCK]) class BaseNetCDF4Array(BackendArray): __slots__ = ("datastore", "dtype", "shape", "variable_name") def __init__(self, variable_name, datastore): self.datastore = datastore self.variable_name = variable_name array = self.get_array() self.shape = array.shape dtype = array.dtype if dtype is str: # use object dtype (with additional vlen string metadata) because that's # the only way in numpy to represent variable length strings and to # check vlen string dtype in further steps # it also prevents automatic string concatenation via # conventions.decode_cf_variable dtype = create_vlen_dtype(str) self.dtype = dtype def __setitem__(self, key, value): with self.datastore.lock: data = self.get_array(needs_lock=False) data[key] = value if self.datastore.autoclose: self.datastore.close(needs_lock=False) def get_array(self, needs_lock=True): raise NotImplementedError("Virtual Method") class NetCDF4ArrayWrapper(BaseNetCDF4Array): __slots__ = () def get_array(self, needs_lock=True): ds = self.datastore._acquire(needs_lock) variable = ds.variables[self.variable_name] variable.set_auto_maskandscale(False) # only added in netCDF4-python v1.2.8 with suppress(AttributeError): variable.set_auto_chartostring(False) return variable def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER, self._getitem ) def _getitem(self, key): if self.datastore.is_remote: # pragma: no cover getitem = functools.partial(robust_getitem, catch=RuntimeError) else: getitem = operator.getitem try: with self.datastore.lock: original_array = self.get_array(needs_lock=False) array = getitem(original_array, key) except IndexError as err: # Catch IndexError in netCDF4 and return a more informative # error message. This is most often called when an unsorted # indexer is used before the data is loaded from disk. msg = ( "The indexing operation you are attempting to perform " "is not valid on netCDF4.Variable object. Try loading " "your data into memory first by calling .load()." ) raise IndexError(msg) from err return array def _encode_nc4_variable(var, name=None): for coder in [ EncodedStringCoder(allows_unicode=True), CharacterArrayCoder(), ]: var = coder.encode(var, name=name) return var def _check_encoding_dtype_is_vlen_string(dtype): if dtype is not str: raise AssertionError( # pragma: no cover f"unexpected dtype encoding {dtype!r}. This shouldn't happen: please " "file a bug report at github.com/pydata/xarray" ) def _get_datatype( var, nc_format="NETCDF4", raise_on_invalid_encoding=False ) -> np.dtype: if nc_format == "NETCDF4": return _nc4_dtype(var) if "dtype" in var.encoding: encoded_dtype = var.encoding["dtype"] _check_encoding_dtype_is_vlen_string(encoded_dtype) if raise_on_invalid_encoding: raise ValueError( "encoding dtype=str for vlen strings is only supported " "with format='NETCDF4'." ) return var.dtype def _nc4_dtype(var): if "dtype" in var.encoding: dtype = var.encoding.pop("dtype") _check_encoding_dtype_is_vlen_string(dtype) elif is_unicode_dtype(var.dtype): dtype = str elif var.dtype.kind in ["i", "u", "f", "c", "S"]: dtype = var.dtype else: raise ValueError(f"unsupported dtype for netCDF4 variable: {var.dtype}") return dtype def _netcdf4_create_group(dataset, name): return dataset.createGroup(name) def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group): if group in {None, "", "/"}: # use the root group return ds else: # make sure it's a string if not isinstance(group, str): raise ValueError("group must be a string or None") # support path-like syntax path = group.strip("/").split("/") for key in path: try: ds = ds.groups[key] except KeyError as e: if mode != "r": ds = create_group(ds, key) else: # wrap error to provide slightly more helpful message raise OSError(f"group not found: {key}", e) from e return ds def _ensure_no_forward_slash_in_name(name): if "/" in name: raise ValueError( f"Forward slashes '/' are not allowed in variable and dimension names (got {name!r}). " "Forward slashes are used as hierarchy-separators for " "HDF5-based files ('netcdf4'/'h5netcdf')." ) def _ensure_fill_value_valid(data, attributes): # work around for netCDF4/scipy issue where _FillValue has the wrong type: # https://github.com/Unidata/netcdf4-python/issues/271 if data.dtype.kind == "S" and "_FillValue" in attributes: attributes["_FillValue"] = np.bytes_(attributes["_FillValue"]) def _force_native_endianness(var): # possible values for byteorder are: # = native # < little-endian # > big-endian # | not applicable # Below we check if the data type is not native or NA if var.dtype.byteorder not in ["=", "|"]: # if endianness is specified explicitly, convert to the native type data = var.data.astype(var.dtype.newbyteorder("=")) var = Variable(var.dims, data, var.attrs, var.encoding) # if endian exists, remove it from the encoding. var.encoding.pop("endian", None) # check to see if encoding has a value for endian its 'native' if var.encoding.get("endian", "native") != "native": raise NotImplementedError( "Attempt to write non-native endian type, " "this is not supported by the netCDF4 " "python library." ) return var def _extract_nc4_variable_encoding( variable: Variable, raise_on_invalid=False, lsd_okay=True, h5py_okay=False, backend="netCDF4", unlimited_dims=None, ) -> dict[str, Any]: if unlimited_dims is None: unlimited_dims = () encoding = variable.encoding.copy() safe_to_drop = {"source", "original_shape"} valid_encodings = { "zlib", "complevel", "fletcher32", "contiguous", "chunksizes", "shuffle", "_FillValue", "dtype", "compression", "significant_digits", "quantize_mode", "blosc_shuffle", "szip_coding", "szip_pixels_per_block", "endian", } if lsd_okay: valid_encodings.add("least_significant_digit") if h5py_okay: valid_encodings.add("compression_opts") if not raise_on_invalid and encoding.get("chunksizes") is not None: # It's possible to get encoded chunksizes larger than a dimension size # if the original file had an unlimited dimension. This is problematic # if the new file no longer has an unlimited dimension. chunksizes = encoding["chunksizes"] chunks_too_big = any( c > d and dim not in unlimited_dims for c, d, dim in zip( chunksizes, variable.shape, variable.dims, strict=False ) ) has_original_shape = "original_shape" in encoding changed_shape = ( has_original_shape and encoding.get("original_shape") != variable.shape ) if chunks_too_big or changed_shape: del encoding["chunksizes"] var_has_unlim_dim = any(dim in unlimited_dims for dim in variable.dims) if not raise_on_invalid and var_has_unlim_dim and "contiguous" in encoding.keys(): del encoding["contiguous"] for k in safe_to_drop: if k in encoding: del encoding[k] if raise_on_invalid: invalid = [k for k in encoding if k not in valid_encodings] if invalid: raise ValueError( f"unexpected encoding parameters for {backend!r} backend: {invalid!r}. Valid " f"encodings are: {valid_encodings!r}" ) else: for k in list(encoding): if k not in valid_encodings: del encoding[k] return encoding def _is_list_of_strings(value) -> bool: arr = np.asarray(value) return arr.dtype.kind in ["U", "S"] and arr.size > 1 def _build_and_get_enum( store, var_name: str, dtype: np.dtype, enum_name: str, enum_dict: dict[str, int] ) -> ncEnumType | h5EnumType: """ Add or get the netCDF4 Enum based on the dtype in encoding. The return type should be ``netCDF4.EnumType``, but we avoid importing netCDF4 globally for performances. """ if enum_name not in store.ds.enumtypes: create_func = ( store.ds.createEnumType if isinstance(store, NetCDF4DataStore) else store.ds.create_enumtype ) return create_func( dtype, enum_name, enum_dict, ) datatype = store.ds.enumtypes[enum_name] if datatype.enum_dict != enum_dict: error_msg = ( f"Cannot save variable `{var_name}` because an enum" f" `{enum_name}` already exists in the Dataset but has" " a different definition. To fix this error, make sure" " all variables have a uniquely named enum in their" " `encoding['dtype'].metadata` or, if they should share" " the same enum type, make sure the enums are identical." ) raise ValueError(error_msg) return datatype @dataclass class _Thunk: """Pickleable equivalent of `lambda: value`.""" value: Any def __call__(self): return self.value @dataclass class _CloseWithCopy: """Wrapper around netCDF4's esoteric interface for writing in-memory data.""" proxy: BytesIOProxy nc4_dataset: netCDF4.Dataset def __call__(self): value = self.nc4_dataset.close() self.proxy.getvalue = _Thunk(value) class NetCDF4DataStore(WritableCFDataStore): """Store for reading and writing data via the Python-NetCDF4 library. This store supports NetCDF3, NetCDF4 and OpenDAP datasets. """ __slots__ = ( "_filename", "_group", "_manager", "_mode", "autoclose", "format", "is_remote", "lock", ) def __init__( self, manager, group=None, mode=None, lock=NETCDF4_PYTHON_LOCK, autoclose=False ): import netCDF4 if isinstance(manager, netCDF4.Dataset): if group is None: root, group = find_root_and_group(manager) else: if type(manager) is not netCDF4.Dataset: raise ValueError( "must supply a root netCDF4.Dataset if the group " "argument is provided" ) root = manager manager = DummyFileManager(root) self._manager = manager self._group = group self._mode = mode self.format = self.ds.data_model self._filename = self.ds.filepath() self.is_remote = is_remote_uri(self._filename) self.lock = ensure_lock(lock) self.autoclose = autoclose def get_child_store(self, group: str) -> Self: if self._group is not None: group = os.path.join(self._group, group) return type(self)( self._manager, group=group, mode=self._mode, lock=self.lock, autoclose=self.autoclose, ) @classmethod def open( cls, filename, mode="r", format="NETCDF4", group=None, clobber=True, diskless=False, persist=False, auto_complex=None, lock=None, lock_maker=None, autoclose=False, ): import netCDF4 if isinstance(filename, os.PathLike): filename = os.fspath(filename) if isinstance(filename, IOBase): raise TypeError( f"file objects are not supported by the netCDF4 backend: {filename}" ) if not isinstance(filename, str | bytes | memoryview | BytesIOProxy): raise TypeError(f"invalid filename for netCDF4 backend: {filename}") if format is None: format = "NETCDF4" if lock is None: if mode == "r": if isinstance(filename, str) and is_remote_uri(filename): lock = NETCDFC_LOCK else: lock = NETCDF4_PYTHON_LOCK else: if format is None or format.startswith("NETCDF4"): lock = NETCDF4_PYTHON_LOCK else: lock = NETCDFC_LOCK if isinstance(filename, str): lock = combine_locks([lock, get_write_lock(filename)]) kwargs = dict( clobber=clobber, diskless=diskless, persist=persist, format=format, ) if auto_complex is not None: kwargs["auto_complex"] = auto_complex if isinstance(filename, BytesIOProxy): assert mode == "w" # Size hint used for creating netCDF3 files. Per the documentation # for nc__create(), the special value NC_SIZEHINT_DEFAULT (which is # the value 0), lets the netcdf library choose a suitable initial # size. memory = 0 kwargs["diskless"] = False nc4_dataset = netCDF4.Dataset( "", mode=mode, memory=memory, **kwargs ) close = _CloseWithCopy(filename, nc4_dataset) manager = DummyFileManager(nc4_dataset, close=close) elif isinstance(filename, bytes | memoryview): assert mode == "r" kwargs["memory"] = filename manager = PickleableFileManager( netCDF4.Dataset, "", mode=mode, kwargs=kwargs ) else: manager = CachingFileManager( netCDF4.Dataset, filename, mode=mode, kwargs=kwargs ) return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose) def _acquire(self, needs_lock=True): with self._manager.acquire_context(needs_lock) as root: ds = _nc4_require_group(root, self._group, self._mode) return ds @property def ds(self): return self._acquire() def open_store_variable(self, name: str, var): import netCDF4 dimensions = var.dimensions attributes = {k: var.getncattr(k) for k in var.ncattrs()} data = indexing.LazilyIndexedArray(NetCDF4ArrayWrapper(name, self)) encoding: dict[str, Any] = {} if isinstance(var.datatype, netCDF4.EnumType): encoding["dtype"] = np.dtype( data.dtype, metadata={ "enum": var.datatype.enum_dict, "enum_name": var.datatype.name, }, ) else: encoding["dtype"] = var.dtype _ensure_fill_value_valid(data, attributes) # netCDF4 specific encoding; save _FillValue for later filters = var.filters() if filters is not None: encoding.update(filters) chunking = var.chunking() if chunking is not None: if chunking == "contiguous": encoding["contiguous"] = True encoding["chunksizes"] = None else: encoding["contiguous"] = False encoding["chunksizes"] = tuple(chunking) encoding["preferred_chunks"] = dict( zip(var.dimensions, chunking, strict=True) ) # TODO: figure out how to round-trip "endian-ness" without raising # warnings from netCDF4 # encoding['endian'] = var.endian() pop_to(attributes, encoding, "least_significant_digit") # save source so __repr__ can detect if it's local or not encoding["source"] = self._filename encoding["original_shape"] = data.shape return Variable(dimensions, data, attributes, encoding) def get_variables(self): return FrozenDict( (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items() ) def get_attrs(self): return FrozenDict((k, self.ds.getncattr(k)) for k in self.ds.ncattrs()) def get_dimensions(self): return FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items()) def get_parent_dimensions(self): return FrozenDict(collect_ancestor_dimensions(self.ds)) def get_encoding(self): return { "unlimited_dims": { k for k, v in self.ds.dimensions.items() if v.isunlimited() } } def set_dimension(self, name, length, is_unlimited=False): _ensure_no_forward_slash_in_name(name) dim_length = length if not is_unlimited else None self.ds.createDimension(name, size=dim_length) def set_attribute(self, key, value): if self.format != "NETCDF4": value = encode_nc3_attr_value(value) if _is_list_of_strings(value): # encode as NC_STRING if attr is list of strings self.ds.setncattr_string(key, value) else: self.ds.setncattr(key, value) def encode_variable(self, variable, name=None): variable = _force_native_endianness(variable) if self.format == "NETCDF4": variable = _encode_nc4_variable(variable, name=name) else: variable = encode_nc3_variable(variable, name=name) return variable def prepare_variable( self, name, variable: Variable, check_encoding=False, unlimited_dims=None ): _ensure_no_forward_slash_in_name(name) attrs = variable.attrs.copy() fill_value = attrs.pop("_FillValue", None) datatype: np.dtype | ncEnumType | h5EnumType datatype = _get_datatype( variable, self.format, raise_on_invalid_encoding=check_encoding ) # check enum metadata and use netCDF4.EnumType if ( (meta := np.dtype(datatype).metadata) and (e_name := meta.get("enum_name")) and (e_dict := meta.get("enum")) ): datatype = _build_and_get_enum(self, name, datatype, e_name, e_dict) encoding = _extract_nc4_variable_encoding( variable, raise_on_invalid=check_encoding, unlimited_dims=unlimited_dims ) if name in self.ds.variables: nc4_var = self.ds.variables[name] else: default_args = dict( varname=name, datatype=datatype, dimensions=variable.dims, zlib=False, complevel=4, shuffle=True, fletcher32=False, contiguous=False, chunksizes=None, endian="native", least_significant_digit=None, fill_value=fill_value, ) default_args.update(encoding) default_args.pop("_FillValue", None) nc4_var = self.ds.createVariable(**default_args) nc4_var.setncatts(attrs) target = NetCDF4ArrayWrapper(name, self) return target, variable.data def sync(self): self.ds.sync() def close(self, **kwargs): self._manager.close(**kwargs) class NetCDF4BackendEntrypoint(BackendEntrypoint): """ Backend for netCDF files based on the netCDF4 package. It can open ".nc", ".nc4", ".cdf" files and will be chosen as default for these files. Additionally it can open valid HDF5 files, see https://h5netcdf.org/#invalid-netcdf-files for more info. It will not be detected as valid backend for such files, so make sure to specify ``engine="netcdf4"`` in ``open_dataset``. For more information about the underlying library, visit: https://unidata.github.io/netcdf4-python See Also -------- backends.NetCDF4DataStore backends.H5netcdfBackendEntrypoint backends.ScipyBackendEntrypoint """ description = ( "Open netCDF (.nc, .nc4 and .cdf) and most HDF5 files using netCDF4 in Xarray" ) url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.NetCDF4BackendEntrypoint.html" supports_groups = True def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: # Helper to check if magic number is netCDF or HDF5 def _is_netcdf_magic(magic: bytes) -> bool: return magic.startswith((b"CDF", b"\211HDF\r\n\032\n")) # Helper to check if extension is netCDF def _has_netcdf_ext(path: str | os.PathLike, is_remote: bool = False) -> bool: path = str(path).rstrip("/") # For remote URIs, strip query parameters and fragments if is_remote: path = strip_uri_params(path) _, ext = os.path.splitext(path) return ext in {".nc", ".nc4", ".cdf"} if isinstance(filename_or_obj, str): if is_remote_uri(filename_or_obj): # For remote URIs, check extension (accounting for query params/fragments) # Remote netcdf-c can handle both regular URLs and DAP URLs if _has_netcdf_ext(filename_or_obj, is_remote=True): return True elif "zarr" in filename_or_obj.lower(): return False # return true for non-zarr URLs so we don't have a breaking change for people relying on this # netcdf backend guessing true for all remote sources. # TODO: emit a warning here about deprecation of this behavior # https://github.com/pydata/xarray/pull/10931 return True if isinstance(filename_or_obj, str | os.PathLike): # For local paths, check magic number first, then extension magic_number = try_read_magic_number_from_path(filename_or_obj) if magic_number is not None: return _is_netcdf_magic(magic_number) # No magic number available, fallback to extension return _has_netcdf_ext(filename_or_obj) if isinstance(filename_or_obj, bytes | memoryview): return _is_netcdf_magic(bytes(filename_or_obj[:8])) return False def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group=None, mode="r", format="NETCDF4", clobber=True, diskless=False, persist=False, auto_complex=None, lock=None, autoclose=False, ) -> Dataset: filename_or_obj = _normalize_path(filename_or_obj) store = NetCDF4DataStore.open( filename_or_obj, mode=mode, format=format, group=group, clobber=clobber, diskless=diskless, persist=persist, auto_complex=auto_complex, lock=lock, autoclose=autoclose, ) store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds def open_datatree( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, format="NETCDF4", clobber=True, diskless=False, persist=False, auto_complex=None, lock=None, autoclose=False, **kwargs, ) -> DataTree: groups_dict = self.open_groups_as_dict( filename_or_obj, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, group=group, format=format, clobber=clobber, diskless=diskless, persist=persist, auto_complex=auto_complex, lock=lock, autoclose=autoclose, **kwargs, ) return datatree_from_dict_with_io_cleanup(groups_dict) def open_groups_as_dict( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, format="NETCDF4", clobber=True, diskless=False, persist=False, auto_complex=None, lock=None, autoclose=False, **kwargs, ) -> dict[str, Dataset]: from xarray.backends.common import _iter_nc_groups from xarray.core.treenode import NodePath filename_or_obj = _normalize_path(filename_or_obj) store = NetCDF4DataStore.open( filename_or_obj, group=group, format=format, clobber=clobber, diskless=diskless, persist=persist, auto_complex=auto_complex, lock=lock, autoclose=autoclose, ) # Check for a group and make it a parent if it exists if group: parent = NodePath("/") / NodePath(group) else: parent = NodePath("/") manager = store._manager groups_dict = {} for path_group in _iter_nc_groups(store.ds, parent=parent): group_store = NetCDF4DataStore(manager, group=path_group, **kwargs) store_entrypoint = StoreBackendEntrypoint() with close_on_error(group_store): group_ds = store_entrypoint.open_dataset( group_store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) if group: group_name = str(NodePath(path_group).relative_to(parent)) else: group_name = str(NodePath(path_group)) groups_dict[group_name] = group_ds return groups_dict BACKEND_ENTRYPOINTS["netcdf4"] = ("netCDF4", NetCDF4BackendEntrypoint) ������������������������������������������������������xarray-2025.12.0/xarray/backends/netcdf3.py���������������������������������������������������������0000664�0000000�0000000�00000013140�15114646760�0020322�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import unicodedata import numpy as np from xarray import coding from xarray.core.variable import Variable # Special characters that are permitted in netCDF names except in the # 0th position of the string _specialchars = '_.@+- !"#$%&\\()*,:;<=>?[]^`{|}~' # The following are reserved names in CDL and may not be used as names of # variables, dimension, attributes _reserved_names = { "byte", "char", "short", "ushort", "int", "uint", "int64", "uint64", "float", "real", "double", "bool", "string", } # These data-types aren't supported by netCDF3, so they are automatically # coerced instead as indicated by the "coerce_nc3_dtype" function _nc3_dtype_coercions = { "int64": "int32", "uint64": "int32", "uint32": "int32", "uint16": "int16", "uint8": "int8", "bool": "int8", } # encode all strings as UTF-8 STRING_ENCODING = "utf-8" COERCION_VALUE_ERROR = ( "could not safely cast array from {dtype} to {new_dtype}. While it is not " "always the case, a common reason for this is that xarray has deemed it " "safest to encode np.datetime64[ns] or np.timedelta64[ns] values with " "int64 values representing units of 'nanoseconds'. This is either due to " "the fact that the times are known to require nanosecond precision for an " "accurate round trip, or that the times are unknown prior to writing due " "to being contained in a chunked array. Ways to work around this are " "either to use a backend that supports writing int64 values, or to " "manually specify the encoding['units'] and encoding['dtype'] (e.g. " "'seconds since 1970-01-01' and np.dtype('int32')) on the time " "variable(s) such that the times can be serialized in a netCDF3 file " "(note that depending on the situation, however, this latter option may " "result in an inaccurate round trip)." ) def coerce_nc3_dtype(arr): """Coerce an array to a data type that can be stored in a netCDF-3 file This function performs the dtype conversions as specified by the ``_nc3_dtype_coercions`` mapping: int64 -> int32 uint64 -> int32 uint32 -> int32 uint16 -> int16 uint8 -> int8 bool -> int8 Data is checked for equality, or equivalence (non-NaN values) using the ``(cast_array == original_array).all()``. """ dtype = str(arr.dtype) if dtype in _nc3_dtype_coercions: new_dtype = _nc3_dtype_coercions[dtype] # TODO: raise a warning whenever casting the data-type instead? cast_arr = arr.astype(new_dtype) if not (cast_arr == arr).all(): raise ValueError( COERCION_VALUE_ERROR.format(dtype=dtype, new_dtype=new_dtype) ) arr = cast_arr return arr def encode_nc3_attr_value(value): if isinstance(value, bytes): pass elif isinstance(value, str): value = value.encode(STRING_ENCODING) else: value = coerce_nc3_dtype(np.atleast_1d(value)) if value.ndim > 1: raise ValueError("netCDF attributes must be 1-dimensional") return value def encode_nc3_attrs(attrs): return {k: encode_nc3_attr_value(v) for k, v in attrs.items()} def _maybe_prepare_times(var): # checks for integer-based time-like and # replaces np.iinfo(np.int64).min with _FillValue or np.nan # this keeps backwards compatibility data = var.data if data.dtype.kind in "iu": units = var.attrs.get("units", None) if units is not None and coding.variables._is_time_like(units): mask = data == np.iinfo(np.int64).min if mask.any(): data = np.where(mask, var.attrs.get("_FillValue", np.nan), data) return data def encode_nc3_variable(var, name=None): for coder in [ coding.strings.EncodedStringCoder(allows_unicode=False), coding.strings.CharacterArrayCoder(), ]: var = coder.encode(var, name=name) data = _maybe_prepare_times(var) data = coerce_nc3_dtype(data) attrs = encode_nc3_attrs(var.attrs) return Variable(var.dims, data, attrs, var.encoding) def _isalnumMUTF8(c): """Return True if the given UTF-8 encoded character is alphanumeric or multibyte. Input is not checked! """ return c.isalnum() or (len(c.encode("utf-8")) > 1) def is_valid_nc3_name(s): """Test whether an object can be validly converted to a netCDF-3 dimension, variable or attribute name Earlier versions of the netCDF C-library reference implementation enforced a more restricted set of characters in creating new names, but permitted reading names containing arbitrary bytes. This specification extends the permitted characters in names to include multi-byte UTF-8 encoded Unicode and additional printing characters from the US-ASCII alphabet. The first character of a name must be alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for special names with meaning to implementations, such as the "_FillValue" attribute). Subsequent characters may also include printing special characters, except for '/' which is not allowed in names. Names that have trailing space characters are also not permitted. """ if not isinstance(s, str): return False num_bytes = len(s.encode("utf-8")) return ( (unicodedata.normalize("NFC", s) == s) and (s not in _reserved_names) and (num_bytes >= 0) and ("/" not in s) and (s[-1] != " ") and (_isalnumMUTF8(s[0]) or (s[0] == "_")) and all(_isalnumMUTF8(c) or c in _specialchars for c in s) ) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/plugins.py���������������������������������������������������������0000664�0000000�0000000�00000021571�15114646760�0020464�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import functools import inspect import itertools import warnings from collections.abc import Callable from importlib.metadata import entry_points from typing import TYPE_CHECKING, Any from xarray.backends.common import BACKEND_ENTRYPOINTS, BackendEntrypoint from xarray.core.options import OPTIONS from xarray.core.utils import module_available if TYPE_CHECKING: import os from importlib.metadata import EntryPoint, EntryPoints from xarray.backends.common import AbstractDataStore from xarray.core.types import ReadBuffer def remove_duplicates(entrypoints: EntryPoints) -> list[EntryPoint]: # sort and group entrypoints by name entrypoints_sorted = sorted(entrypoints, key=lambda ep: ep.name) entrypoints_grouped = itertools.groupby(entrypoints_sorted, key=lambda ep: ep.name) # check if there are multiple entrypoints for the same name unique_entrypoints = [] for name, _matches in entrypoints_grouped: # remove equal entrypoints matches = list(set(_matches)) unique_entrypoints.append(matches[0]) matches_len = len(matches) if matches_len > 1: all_module_names = [e.value.split(":")[0] for e in matches] selected_module_name = all_module_names[0] warnings.warn( f"Found {matches_len} entrypoints for the engine name {name}:" f"\n {all_module_names}.\n " f"The entrypoint {selected_module_name} will be used.", RuntimeWarning, stacklevel=2, ) return unique_entrypoints def detect_parameters(open_dataset: Callable) -> tuple[str, ...]: signature = inspect.signature(open_dataset) parameters = signature.parameters parameters_list = [] for name, param in parameters.items(): if param.kind in ( inspect.Parameter.VAR_KEYWORD, inspect.Parameter.VAR_POSITIONAL, ): raise TypeError( f"All the parameters in {open_dataset!r} signature should be explicit. " "*args and **kwargs is not supported" ) if name != "self": parameters_list.append(name) return tuple(parameters_list) def backends_dict_from_pkg( entrypoints: list[EntryPoint], ) -> dict[str, type[BackendEntrypoint]]: backend_entrypoints = {} for entrypoint in entrypoints: name = entrypoint.name try: backend = entrypoint.load() backend_entrypoints[name] = backend except Exception as ex: warnings.warn( f"Engine {name!r} loading failed:\n{ex}", RuntimeWarning, stacklevel=2 ) return backend_entrypoints def set_missing_parameters( backend_entrypoints: dict[str, type[BackendEntrypoint]], ) -> None: for backend in backend_entrypoints.values(): if backend.open_dataset_parameters is None: open_dataset = backend.open_dataset backend.open_dataset_parameters = detect_parameters(open_dataset) def sort_backends( backend_entrypoints: dict[str, type[BackendEntrypoint]], ) -> dict[str, type[BackendEntrypoint]]: ordered_backends_entrypoints: dict[str, type[BackendEntrypoint]] = {} for be_name in OPTIONS["netcdf_engine_order"]: if backend_entrypoints.get(be_name) is not None: ordered_backends_entrypoints[be_name] = backend_entrypoints.pop(be_name) ordered_backends_entrypoints.update( {name: backend_entrypoints[name] for name in sorted(backend_entrypoints)} ) return ordered_backends_entrypoints def build_engines(entrypoints: EntryPoints) -> dict[str, BackendEntrypoint]: backend_entrypoints: dict[str, type[BackendEntrypoint]] = {} for backend_name, (module_name, backend) in BACKEND_ENTRYPOINTS.items(): if module_name is None or module_available(module_name): backend_entrypoints[backend_name] = backend entrypoints_unique = remove_duplicates(entrypoints) external_backend_entrypoints = backends_dict_from_pkg(entrypoints_unique) backend_entrypoints.update(external_backend_entrypoints) backend_entrypoints = sort_backends(backend_entrypoints) set_missing_parameters(backend_entrypoints) return {name: backend() for name, backend in backend_entrypoints.items()} @functools.lru_cache(maxsize=1) def list_engines() -> dict[str, BackendEntrypoint]: """ Return a dictionary of available engines and their BackendEntrypoint objects. Returns ------- dictionary Notes ----- This function lives in the backends namespace (``engs=xr.backends.list_engines()``). If available, more information is available about each backend via ``engs["eng_name"]``. """ entrypoints = entry_points(group="xarray.backends") return build_engines(entrypoints) def refresh_engines() -> None: """Refreshes the backend engines based on installed packages.""" list_engines.cache_clear() def guess_engine( store_spec: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, must_support_groups: bool = False, ) -> str | type[BackendEntrypoint]: engines = list_engines() for engine, backend in engines.items(): if must_support_groups and not backend.supports_groups: continue try: if backend.guess_can_open(store_spec): return engine except PermissionError: raise except Exception: warnings.warn( f"{engine!r} fails while guessing", RuntimeWarning, stacklevel=2 ) compatible_engines = [] for engine, (_, backend_cls) in BACKEND_ENTRYPOINTS.items(): try: backend = backend_cls() if must_support_groups and not backend.supports_groups: continue if backend.guess_can_open(store_spec): compatible_engines.append(engine) except Exception: warnings.warn( f"{engine!r} fails while guessing", RuntimeWarning, stacklevel=2 ) installed_engines = [k for k in engines if k != "store"] if not compatible_engines: if installed_engines: error_msg = ( "did not find a match in any of xarray's currently installed IO " f"backends {installed_engines}. Consider explicitly selecting one of the " "installed engines via the ``engine`` parameter, or installing " "additional IO dependencies, see:\n" "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html\n" "https://docs.xarray.dev/en/stable/user-guide/io.html" ) elif must_support_groups: error_msg = ( "xarray is unable to open this file because it has no currently " "installed IO backends that support reading groups (e.g., h5netcdf " "or netCDF4-python). Xarray's read/write support requires " "installing optional IO dependencies, see:\n" "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html\n" "https://docs.xarray.dev/en/stable/user-guide/io" ) else: error_msg = ( "xarray is unable to open this file because it has no currently " "installed IO backends. Xarray's read/write support requires " "installing optional IO dependencies, see:\n" "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html\n" "https://docs.xarray.dev/en/stable/user-guide/io" ) else: error_msg = ( "found the following matches with the input file in xarray's IO " f"backends: {compatible_engines}. But their dependencies may not be installed, see:\n" "https://docs.xarray.dev/en/stable/user-guide/io.html \n" "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html" ) raise ValueError(error_msg) def get_backend(engine: str | type[BackendEntrypoint]) -> BackendEntrypoint: """Select open_dataset method based on current engine.""" if isinstance(engine, str): engines = list_engines() if engine not in engines: raise ValueError( f"unrecognized engine '{engine}' must be one of your download engines: {list(engines)}. " "To install additional dependencies, see:\n" "https://docs.xarray.dev/en/stable/user-guide/io.html \n" "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html" ) backend = engines[engine] elif issubclass(engine, BackendEntrypoint): backend = engine() else: raise TypeError( "engine must be a string or a subclass of " f"xarray.backends.BackendEntrypoint: {engine}" ) return backend ���������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/pydap_.py����������������������������������������������������������0000664�0000000�0000000�00000033454�15114646760�0020262�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import os from collections.abc import Iterable from typing import TYPE_CHECKING, Any import numpy as np from xarray.backends.common import ( BACKEND_ENTRYPOINTS, AbstractDataStore, BackendArray, BackendEntrypoint, T_PathFileOrDataStore, _is_likely_dap_url, _normalize_path, datatree_from_dict_with_io_cleanup, robust_getitem, ) from xarray.backends.store import StoreBackendEntrypoint from xarray.core import indexing from xarray.core.utils import ( Frozen, FrozenDict, close_on_error, ) from xarray.core.variable import Variable from xarray.namedarray.pycompat import integer_types if TYPE_CHECKING: import os from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.types import ReadBuffer class PydapArrayWrapper(BackendArray): def __init__(self, array, checksums=True): self.array = array @property def shape(self) -> tuple[int, ...]: return self.array.shape @property def dtype(self): return self.array.dtype def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.BASIC, self._getitem ) def _getitem(self, key): result = robust_getitem(self.array, key, catch=ValueError) result = np.asarray(result.data) axis = tuple(n for n, k in enumerate(key) if isinstance(k, integer_types)) if result.ndim + len(axis) != self.array.ndim and axis: result = np.squeeze(result, axis) return result def get_group(ds, group): if group in {None, "", "/"}: # use the root group return ds else: try: return ds[group] except KeyError as e: # wrap error to provide slightly more helpful message raise KeyError(f"group not found: {group}", e) from e class PydapDataStore(AbstractDataStore): """Store for accessing OpenDAP datasets with pydap. This store provides an alternative way to access OpenDAP datasets that may be useful if the netCDF4 library is not available. """ def __init__( self, dataset, group=None, session=None, protocol=None, checksums=True, ): """ Parameters ---------- ds : pydap DatasetType group: str or None (default None) The group to open. If None, the root group is opened. """ self.dataset = dataset self.group = group self._protocol = protocol self._checksums = checksums # true by default @classmethod def open( cls, url, group=None, application=None, session=None, output_grid=None, timeout=None, verify=None, user_charset=None, checksums=True, ): from pydap.client import open_url from pydap.net import DEFAULT_TIMEOUT if output_grid is not None: # output_grid is no longer passed to pydap.client.open_url from xarray.core.utils import emit_user_level_warning emit_user_level_warning( "`output_grid` is deprecated and will be removed in a future version" " of xarray. Will be set to `None`, the new default. ", DeprecationWarning, ) output_grid = False # new default behavior kwargs = { "url": url, "application": application, "session": session, "output_grid": output_grid or False, "timeout": timeout or DEFAULT_TIMEOUT, "verify": verify or True, "user_charset": user_charset, } if isinstance(url, str): # check uit begins with an acceptable scheme dataset = open_url(**kwargs) elif hasattr(url, "ds"): # pydap dataset dataset = url.ds args = {"dataset": dataset, "checksums": checksums} if group: args["group"] = group if url.startswith(("http", "dap2")): args["protocol"] = "dap2" elif url.startswith("dap4"): args["protocol"] = "dap4" return cls(**args) def open_store_variable(self, var): if hasattr(var, "dims"): dimensions = [ dim.split("/")[-1] if dim.startswith("/") else dim for dim in var.dims ] else: # GridType does not have a dims attribute - instead get `dimensions` # see https://github.com/pydap/pydap/issues/485 dimensions = var.dimensions if ( self._protocol == "dap4" and var.name in dimensions and hasattr(var, "dataset") # only True for pydap>3.5.5 ): var.dataset.enable_batch_mode() data_array = self._get_data_array(var) data = indexing.LazilyIndexedArray(data_array) var.dataset.disable_batch_mode() else: # all non-dimension variables data = indexing.LazilyIndexedArray(PydapArrayWrapper(var)) return Variable(dimensions, data, var.attributes) def get_variables(self): # get first all variables arrays, excluding any container type like, # `Groups`, `Sequence` or `Structure` types try: _vars = list(self.ds.variables()) _vars += list(self.ds.grids()) # dap2 objects except AttributeError: from pydap.model import GroupType _vars = [ var for var in self.ds.keys() # check the key is not a BaseType or GridType if not isinstance(self.ds[var], GroupType) ] return FrozenDict((k, self.open_store_variable(self.ds[k])) for k in _vars) def get_attrs(self): """Remove any opendap specific attributes""" opendap_attrs = ( "configuration", "build_dmrpp", "bes", "libdap", "invocation", "dimensions", "path", "Maps", ) attrs = dict(self.ds.attributes) list(map(attrs.pop, opendap_attrs, [None] * len(opendap_attrs))) return Frozen(attrs) def get_dimensions(self): return Frozen(sorted(self.ds.dimensions)) @property def ds(self): return get_group(self.dataset, self.group) def _get_data_array(self, var): """gets dimension data all at once, storing the numpy arrays within a cached dictionary """ from pydap.client import get_batch_data if not var._is_data_loaded(): # data has not been deserialized yet # runs only once per store/hierarchy get_batch_data(var, checksums=self._checksums) return self.dataset[var.id].data class PydapBackendEntrypoint(BackendEntrypoint): """ Backend for steaming datasets over the internet using the Data Access Protocol, also known as DODS or OPeNDAP based on the pydap package. This backend is selected by default for urls. For more information about the underlying library, visit: https://pydap.github.io/pydap/en/intro.html See Also -------- backends.PydapDataStore """ description = "Open remote datasets via OPeNDAP using pydap in Xarray" url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.PydapBackendEntrypoint.html" def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: if not isinstance(filename_or_obj, str): return False return _is_likely_dap_url(filename_or_obj) def open_dataset( self, filename_or_obj: ( str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore ), *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group=None, application=None, session=None, output_grid=None, timeout=None, verify=None, user_charset=None, checksums=True, ) -> Dataset: store = PydapDataStore.open( url=filename_or_obj, group=group, application=application, session=session, output_grid=output_grid, timeout=timeout, verify=verify, user_charset=user_charset, checksums=checksums, ) store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds def open_datatree( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, application=None, session=None, timeout=None, verify=None, user_charset=None, checksums=True, ) -> DataTree: groups_dict = self.open_groups_as_dict( filename_or_obj, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, group=group, application=application, session=session, timeout=timeout, verify=verify, user_charset=user_charset, checksums=checksums, ) return datatree_from_dict_with_io_cleanup(groups_dict) def open_groups_as_dict( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, application=None, session=None, timeout=None, verify=None, user_charset=None, checksums=True, ) -> dict[str, Dataset]: from xarray.core.treenode import NodePath filename_or_obj = _normalize_path(filename_or_obj) store = PydapDataStore.open( url=filename_or_obj, application=application, session=session, timeout=timeout, verify=verify, user_charset=user_charset, checksums=checksums, ) # Check for a group and make it a parent if it exists if group: parent = str(NodePath("/") / NodePath(group)) else: parent = str(NodePath("/")) groups_dict = {} group_names = [parent] # construct fully qualified path to group try: # this works for pydap >= 3.5.1 Groups = store.ds[parent].groups() except AttributeError: # THIS IS ONLY NEEDED FOR `pydap == 3.5.0` # `pydap>= 3.5.1` has a new method `groups()` # that returns a dict of group names and their paths def group_fqn(store, path=None, g_fqn=None) -> dict[str, str]: """To be removed for pydap > 3.5.0. Derives the fully qualifying name of a Group.""" from pydap.model import GroupType if not path: path = "/" # parent if not g_fqn: g_fqn = {} groups = [ store[key].id for key in store.keys() if isinstance(store[key], GroupType) ] for g in groups: g_fqn.update({g: path}) subgroups = [ var for var in store[g] if isinstance(store[g][var], GroupType) ] if len(subgroups) > 0: npath = path + g g_fqn = group_fqn(store[g], npath, g_fqn) return g_fqn Groups = group_fqn(store.ds) group_names += [ str(NodePath(path_to_group) / NodePath(group)) for group, path_to_group in Groups.items() ] for path_group in group_names: # get a group from the store store.group = path_group store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): group_ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) if group: group_name = str(NodePath(path_group).relative_to(parent)) else: group_name = str(NodePath(path_group)) groups_dict[group_name] = group_ds return groups_dict BACKEND_ENTRYPOINTS["pydap"] = ("pydap", PydapBackendEntrypoint) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/scipy_.py����������������������������������������������������������0000664�0000000�0000000�00000033253�15114646760�0020271�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import gzip import io import os from collections.abc import Iterable from typing import TYPE_CHECKING, Any import numpy as np from xarray.backends.common import ( BACKEND_ENTRYPOINTS, BackendArray, BackendEntrypoint, BytesIOProxy, T_PathFileOrDataStore, WritableCFDataStore, _normalize_path, ) from xarray.backends.file_manager import CachingFileManager, DummyFileManager from xarray.backends.locks import ensure_lock, get_write_lock from xarray.backends.netcdf3 import ( encode_nc3_attr_value, encode_nc3_variable, is_valid_nc3_name, ) from xarray.backends.store import StoreBackendEntrypoint from xarray.core import indexing from xarray.core.utils import ( Frozen, FrozenDict, close_on_error, module_available, try_read_magic_number_from_file_or_path, ) from xarray.core.variable import Variable try: from scipy.io import netcdf_file as netcdf_file_base except ImportError: netcdf_file_base = object # type: ignore[assignment,misc,unused-ignore] # scipy is optional if TYPE_CHECKING: import scipy.io from xarray.backends.common import AbstractDataStore from xarray.core.dataset import Dataset from xarray.core.types import ReadBuffer HAS_NUMPY_2_0 = module_available("numpy", minversion="2.0.0.dev0") def _decode_string(s): if isinstance(s, bytes): return s.decode("utf-8", "replace") return s def _decode_attrs(d): # don't decode _FillValue from bytes -> unicode, because we want to ensure # that its type matches the data exactly return {k: v if k == "_FillValue" else _decode_string(v) for (k, v) in d.items()} class ScipyArrayWrapper(BackendArray): def __init__(self, variable_name, datastore): self.datastore = datastore self.variable_name = variable_name array = self.get_variable().data self.shape = array.shape self.dtype = np.dtype(array.dtype.kind + str(array.dtype.itemsize)) def get_variable(self, needs_lock=True): ds = self.datastore._manager.acquire(needs_lock) return ds.variables[self.variable_name] def _getitem(self, key): with self.datastore.lock: data = self.get_variable(needs_lock=False).data return data[key] def __getitem__(self, key): data = indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem ) # Copy data if the source file is mmapped. This makes things consistent # with the netCDF4 library by ensuring we can safely read arrays even # after closing associated files. copy = self.datastore.ds.use_mmap # adapt handling of copy-kwarg to numpy 2.0 # see https://github.com/numpy/numpy/issues/25916 # and https://github.com/numpy/numpy/pull/25922 copy = None if HAS_NUMPY_2_0 and copy is False else copy return np.array(data, dtype=self.dtype, copy=copy) def __setitem__(self, key, value): with self.datastore.lock: data = self.get_variable(needs_lock=False) try: data[key] = value except TypeError: if key is Ellipsis: # workaround for GH: scipy/scipy#6880 data[:] = value else: raise # TODO: Make the scipy import lazy again after upstreaming these fixes. class flush_only_netcdf_file(netcdf_file_base): # scipy.io.netcdf_file.close() incorrectly closes file objects that # were passed in as constructor arguments: # https://github.com/scipy/scipy/issues/13905 # Instead of closing such files, only call flush(), which is # equivalent as long as the netcdf_file object is not mmapped. # This suffices to keep BytesIO objects open long enough to read # their contents from to_netcdf(), but underlying files still get # closed when the netcdf_file is garbage collected (via __del__), # and will need to be fixed upstream in scipy. def close(self): if hasattr(self, "fp") and not self.fp.closed: self.flush() self.fp.seek(0) # allow file to be read again def __del__(self): # Remove the __del__ method, which in scipy is aliased to close(). # These files need to be closed explicitly by xarray. pass def _open_scipy_netcdf(filename, mode, mmap, version, flush_only=False): import scipy.io netcdf_file = flush_only_netcdf_file if flush_only else scipy.io.netcdf_file # if the string ends with .gz, then gunzip and open as netcdf file if isinstance(filename, str) and filename.endswith(".gz"): try: return netcdf_file( gzip.open(filename), mode=mode, mmap=mmap, version=version ) except TypeError as e: # TODO: gzipped loading only works with NetCDF3 files. errmsg = e.args[0] if "is not a valid NetCDF 3 file" in errmsg: raise ValueError( "gzipped file loading only supports NetCDF 3 files." ) from e else: raise try: return netcdf_file(filename, mode=mode, mmap=mmap, version=version) except TypeError as e: # netcdf3 message is obscure in this case errmsg = e.args[0] if "is not a valid NetCDF 3 file" in errmsg: msg = """ If this is a NetCDF4 file, you may need to install the netcdf4 library, e.g., $ pip install netcdf4 """ errmsg += msg raise TypeError(errmsg) from e else: raise class ScipyDataStore(WritableCFDataStore): """Store for reading and writing data via scipy.io.netcdf_file. This store has the advantage of being able to be initialized with a StringIO object, allow for serialization without writing to disk. It only supports the NetCDF3 file-format. """ def __init__( self, filename_or_obj, mode="r", format=None, group=None, mmap=None, lock=None ): if group is not None: raise ValueError("cannot save to a group with the scipy.io.netcdf backend") if format is None or format == "NETCDF3_64BIT": version = 2 elif format == "NETCDF3_CLASSIC": version = 1 else: raise ValueError(f"invalid format for scipy.io.netcdf backend: {format!r}") if lock is None and mode != "r" and isinstance(filename_or_obj, str): lock = get_write_lock(filename_or_obj) self.lock = ensure_lock(lock) if isinstance(filename_or_obj, BytesIOProxy): source = filename_or_obj filename_or_obj = io.BytesIO() source.getvalue = filename_or_obj.getbuffer if isinstance(filename_or_obj, str): # path manager = CachingFileManager( _open_scipy_netcdf, filename_or_obj, mode=mode, lock=lock, kwargs=dict(mmap=mmap, version=version), ) elif hasattr(filename_or_obj, "seek"): # file object # Note: checking for .seek matches the check for file objects # in scipy.io.netcdf_file scipy_dataset = _open_scipy_netcdf( filename_or_obj, mode=mode, mmap=mmap, version=version, flush_only=True, ) assert not scipy_dataset.use_mmap # no mmap for file objects manager = DummyFileManager(scipy_dataset) else: raise ValueError( f"cannot open {filename_or_obj=} with scipy.io.netcdf_file" ) self._manager = manager @property def ds(self) -> scipy.io.netcdf_file: return self._manager.acquire() def open_store_variable(self, name, var): return Variable( var.dimensions, indexing.LazilyIndexedArray(ScipyArrayWrapper(name, self)), _decode_attrs(var._attributes), ) def get_variables(self): return FrozenDict( (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items() ) def get_attrs(self): return Frozen(_decode_attrs(self.ds._attributes)) def get_dimensions(self): return Frozen(self.ds.dimensions) def get_encoding(self): return { "unlimited_dims": {k for k, v in self.ds.dimensions.items() if v is None} } def set_dimension(self, name, length, is_unlimited=False): if name in self.ds.dimensions: raise ValueError( f"{type(self).__name__} does not support modifying dimensions" ) dim_length = length if not is_unlimited else None self.ds.createDimension(name, dim_length) def _validate_attr_key(self, key): if not is_valid_nc3_name(key): raise ValueError("Not a valid attribute name") def set_attribute(self, key, value): self._validate_attr_key(key) value = encode_nc3_attr_value(value) setattr(self.ds, key, value) def encode_variable(self, variable, name=None): variable = encode_nc3_variable(variable, name=name) return variable def prepare_variable( self, name, variable, check_encoding=False, unlimited_dims=None ): if ( check_encoding and variable.encoding and variable.encoding != {"_FillValue": None} ): raise ValueError( f"unexpected encoding for scipy backend: {list(variable.encoding)}" ) data = variable.data # nb. this still creates a numpy array in all memory, even though we # don't write the data yet; scipy.io.netcdf does not support incremental # writes. if name not in self.ds.variables: self.ds.createVariable(name, data.dtype, variable.dims) scipy_var = self.ds.variables[name] for k, v in variable.attrs.items(): self._validate_attr_key(k) setattr(scipy_var, k, v) target = ScipyArrayWrapper(name, self) return target, data def sync(self): self.ds.sync() def close(self): self._manager.close() def _normalize_filename_or_obj( filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, ) -> str | ReadBuffer | AbstractDataStore: if isinstance(filename_or_obj, bytes | memoryview): return io.BytesIO(filename_or_obj) else: return _normalize_path(filename_or_obj) class ScipyBackendEntrypoint(BackendEntrypoint): """ Backend for netCDF files based on the scipy package. It can open ".nc", ".cdf", and "nc..gz" files but will only be selected as the default if the "netcdf4" and "h5netcdf" engines are not available. It has the advantage that is is a lightweight engine that has no system requirements (unlike netcdf4 and h5netcdf). Additionally it can open gzip compressed (".gz") files. For more information about the underlying library, visit: https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_file.html See Also -------- backends.ScipyDataStore backends.NetCDF4BackendEntrypoint backends.H5netcdfBackendEntrypoint """ description = "Open netCDF files (.nc, .cdf and .nc.gz) using scipy in Xarray" url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.ScipyBackendEntrypoint.html" def guess_can_open( self, filename_or_obj: T_PathFileOrDataStore, ) -> bool: from xarray.core.utils import is_remote_uri filename_or_obj = _normalize_filename_or_obj(filename_or_obj) # scipy can only handle local files - check this before trying to read magic number if isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj): return False magic_number = try_read_magic_number_from_file_or_path(filename_or_obj) if magic_number is not None and magic_number.startswith(b"\x1f\x8b"): with gzip.open(filename_or_obj) as f: # type: ignore[arg-type] magic_number = try_read_magic_number_from_file_or_path(f) if magic_number is not None: return magic_number.startswith(b"CDF") if isinstance(filename_or_obj, str | os.PathLike): from pathlib import Path suffix = "".join(Path(filename_or_obj).suffixes) return suffix in {".nc", ".cdf", ".nc.gz"} return False def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, mode="r", format=None, group=None, mmap=None, lock=None, ) -> Dataset: filename_or_obj = _normalize_filename_or_obj(filename_or_obj) store = ScipyDataStore( filename_or_obj, mode=mode, format=format, group=group, mmap=mmap, lock=lock ) store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds BACKEND_ENTRYPOINTS["scipy"] = ("scipy", ScipyBackendEntrypoint) �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/store.py�����������������������������������������������������������0000664�0000000�0000000�00000004453�15114646760�0020137�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations from collections.abc import Iterable from typing import TYPE_CHECKING from xarray import conventions from xarray.backends.common import ( BACKEND_ENTRYPOINTS, AbstractDataStore, BackendEntrypoint, T_PathFileOrDataStore, ) from xarray.core.coordinates import Coordinates from xarray.core.dataset import Dataset if TYPE_CHECKING: pass class StoreBackendEntrypoint(BackendEntrypoint): description = "Open AbstractDataStore instances in Xarray" url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.StoreBackendEntrypoint.html" def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: return isinstance(filename_or_obj, AbstractDataStore) def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, set_indexes: bool = True, use_cftime=None, decode_timedelta=None, ) -> Dataset: assert isinstance(filename_or_obj, AbstractDataStore) vars, attrs = filename_or_obj.load() encoding = filename_or_obj.get_encoding() vars, attrs, coord_names = conventions.decode_cf_variables( vars, attrs, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) # split data and coordinate variables (promote dimension coordinates) data_vars = {} coord_vars = {} for name, var in vars.items(): if name in coord_names or var.dims == (name,): coord_vars[name] = var else: data_vars[name] = var # explicit Coordinates object with no index passed coords = Coordinates(coord_vars, indexes={}) ds = Dataset(data_vars, coords=coords, attrs=attrs) ds.set_close(filename_or_obj.close) ds.encoding = encoding return ds BACKEND_ENTRYPOINTS["store"] = (None, StoreBackendEntrypoint) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/writers.py���������������������������������������������������������0000664�0000000�0000000�00000103242�15114646760�0020476�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import importlib import io import os from collections.abc import Callable, Hashable, Iterable, Mapping, MutableMapping from io import IOBase from itertools import starmap from numbers import Number from os import PathLike from typing import TYPE_CHECKING, Any, Literal, get_args, overload import numpy as np from xarray import backends, conventions from xarray.backends.api import ( _normalize_path, delayed_close_after_writes, ) from xarray.backends.common import AbstractWritableDataStore, ArrayWriter, BytesIOProxy from xarray.backends.locks import get_dask_scheduler from xarray.backends.store import AbstractDataStore from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.options import OPTIONS from xarray.core.types import NetcdfWriteModes, ZarrWriteModes from xarray.core.utils import emit_user_level_warning if TYPE_CHECKING: from dask.delayed import Delayed from xarray.backends import ZarrStore from xarray.backends.api import T_NetcdfEngine, T_NetcdfTypes from xarray.core.types import ZarrStoreLike T_DataTreeNetcdfEngine = Literal["netcdf4", "h5netcdf", "pydap"] T_DataTreeNetcdfTypes = Literal["NETCDF4"] WRITEABLE_STORES: dict[T_NetcdfEngine, Callable] = { "netcdf4": backends.NetCDF4DataStore.open, "scipy": backends.ScipyDataStore, "h5netcdf": backends.H5NetCDFStore.open, } def get_writable_netcdf_store( target, engine: T_NetcdfEngine, *, format: T_NetcdfTypes | None, mode: NetcdfWriteModes, autoclose: bool, invalid_netcdf: bool, auto_complex: bool | None, ) -> AbstractWritableDataStore: """Create a store for writing to a netCDF file.""" try: store_open = WRITEABLE_STORES[engine] except KeyError as err: raise ValueError(f"unrecognized engine for to_netcdf: {engine!r}") from err if format is not None: format = format.upper() # type: ignore[assignment] kwargs = dict(autoclose=True) if autoclose else {} if invalid_netcdf: if engine == "h5netcdf": kwargs["invalid_netcdf"] = invalid_netcdf else: raise ValueError( f"unrecognized option 'invalid_netcdf' for engine {engine}" ) if auto_complex is not None: kwargs["auto_complex"] = auto_complex return store_open(target, mode=mode, format=format, **kwargs) def _validate_dataset_names(dataset: Dataset) -> None: """DataArray.name and Dataset keys must be a string or None""" def check_name(name: Hashable): if isinstance(name, str): if not name: raise ValueError( f"Invalid name {name!r} for DataArray or Dataset key: " "string must be length 1 or greater for " "serialization to netCDF or zarr files" ) elif name is not None: raise TypeError( f"Invalid name {name!r} for DataArray or Dataset key: " "must be either a string or None for serialization to netCDF " "or zarr files" ) for k in dataset.variables: check_name(k) def _validate_attrs(dataset, engine, invalid_netcdf=False): """`attrs` must have a string key and a value which is either: a number, a string, an ndarray, a list/tuple of numbers/strings, or a numpy.bool_. Notes ----- A numpy.bool_ is only allowed when using the h5netcdf engine with `invalid_netcdf=True`. """ valid_types = (str, Number, np.ndarray, np.number, list, tuple, bytes) if invalid_netcdf and engine == "h5netcdf": valid_types += (np.bool_,) def check_attr(name, value, valid_types): if isinstance(name, str): if not name: raise ValueError( f"Invalid name for attr {name!r}: string must be " "length 1 or greater for serialization to " "netCDF files" ) else: raise TypeError( f"Invalid name for attr: {name!r} must be a string for " "serialization to netCDF files" ) if not isinstance(value, valid_types): raise TypeError( f"Invalid value for attr {name!r}: {value!r}. For serialization to " "netCDF files, its value must be of one of the following types: " f"{', '.join([vtype.__name__ for vtype in valid_types])}" ) if isinstance(value, bytes) and engine == "h5netcdf": try: value.decode("utf-8") except UnicodeDecodeError as e: raise ValueError( f"Invalid value provided for attribute '{name!r}': {value!r}. " "Only binary data derived from UTF-8 encoded strings is allowed " f"for the '{engine}' engine. Consider using the 'netcdf4' engine." ) from e if b"\x00" in value: raise ValueError( f"Invalid value provided for attribute '{name!r}': {value!r}. " f"Null characters are not permitted for the '{engine}' engine. " "Consider using the 'netcdf4' engine." ) # Check attrs on the dataset itself for k, v in dataset.attrs.items(): check_attr(k, v, valid_types) # Check attrs on each variable within the dataset for variable in dataset.variables.values(): for k, v in variable.attrs.items(): check_attr(k, v, valid_types) def get_default_netcdf_write_engine( path_or_file: str | IOBase | None, format: T_NetcdfTypes | None, ) -> Literal["netcdf4", "h5netcdf", "scipy"]: """Return the default netCDF library to use for writing a netCDF file.""" module_names = { "netcdf4": "netCDF4", "scipy": "scipy", "h5netcdf": "h5netcdf", } candidates = list(OPTIONS["netcdf_engine_order"]) if format is not None: format = format.upper() # type: ignore[assignment] if format not in { "NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", "NETCDF3_CLASSIC", }: raise ValueError(f"unexpected {format=}") # TODO: allow format='NETCDF4_CLASSIC' to default to using h5netcdf, # when the oldest supported version of h5netcdf supports it: # https://github.com/h5netcdf/h5netcdf/pull/283 if format != "NETCDF4": candidates.remove("h5netcdf") if format not in {"NETCDF3_64BIT", "NETCDF3_CLASSIC"}: candidates.remove("scipy") nczarr_mode = isinstance(path_or_file, str) and path_or_file.endswith( "#mode=nczarr" ) if nczarr_mode: candidates[:] = ["netcdf4"] if isinstance(path_or_file, IOBase): candidates.remove("netcdf4") for engine in candidates: module_name = module_names[engine] if importlib.util.find_spec(module_name) is not None: return engine if nczarr_mode: format_str = " in NCZarr format" else: format_str = f" with {format=}" if format is not None else "" libraries = ", ".join(module_names[c] for c in candidates) raise ValueError( f"cannot write NetCDF files{format_str} because none of the suitable " f"backend libraries ({libraries}) are installed" ) def _sanitize_unlimited_dims(dataset, unlimited_dims): msg_origin = "unlimited_dims-kwarg" if unlimited_dims is None: unlimited_dims = dataset.encoding.get("unlimited_dims", None) msg_origin = "dataset.encoding" if unlimited_dims is not None: if isinstance(unlimited_dims, str) or not isinstance(unlimited_dims, Iterable): unlimited_dims = [unlimited_dims] else: unlimited_dims = list(unlimited_dims) dataset_dims = set(dataset.dims) unlimited_dims = set(unlimited_dims) if undeclared_dims := (unlimited_dims - dataset_dims): msg = ( f"Unlimited dimension(s) {undeclared_dims!r} declared in {msg_origin!r}, " f"but not part of current dataset dimensions. " f"Consider removing {undeclared_dims!r} from {msg_origin!r}." ) if msg_origin == "unlimited_dims-kwarg": raise ValueError(msg) else: emit_user_level_warning(msg) return unlimited_dims # multifile=True returns writer and datastore @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike | None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, *, multifile: Literal[True], invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> tuple[ArrayWriter, AbstractDataStore]: ... # path=None writes to bytes or memoryview, depending on store @overload def to_netcdf( dataset: Dataset, path_or_file: None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, multifile: Literal[False] = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview: ... # compute=False returns dask.Delayed @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, *, compute: Literal[False], multifile: Literal[False] = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed: ... # default return None @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike | IOBase, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: Literal[True] = True, multifile: Literal[False] = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> None: ... # if compute cannot be evaluated at type check time # we may get back either Delayed or None @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = False, multifile: Literal[False] = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed | None: ... # if multifile cannot be evaluated at type check time # we may get back either writer and datastore or Delayed or None @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = False, multifile: bool = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> tuple[ArrayWriter, AbstractDataStore] | Delayed | None: ... # Any @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike | IOBase | None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = False, multifile: bool = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> tuple[ArrayWriter, AbstractDataStore] | memoryview | Delayed | None: ... def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike | IOBase | None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, multifile: bool = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> tuple[ArrayWriter, AbstractDataStore] | memoryview | Delayed | None: """This function creates an appropriate datastore for writing a dataset to disk as a netCDF file See `Dataset.to_netcdf` for full API docs. The ``multifile`` argument is only for the private use of save_mfdataset. """ if encoding is None: encoding = {} normalized_path = _normalize_path(path_or_file) if engine is None: engine = get_default_netcdf_write_engine(normalized_path, format) # validate Dataset keys, DataArray names, and attr keys/values _validate_dataset_names(dataset) _validate_attrs(dataset, engine, invalid_netcdf) # sanitize unlimited_dims unlimited_dims = _sanitize_unlimited_dims(dataset, unlimited_dims) autoclose = _get_netcdf_autoclose(dataset, engine) if normalized_path is None: if not compute: raise NotImplementedError( "to_netcdf() with compute=False is not yet implemented when " "returning a memoryview" ) target = BytesIOProxy() else: target = normalized_path # type: ignore[assignment] store = get_writable_netcdf_store( target, engine, mode=mode, format=format, autoclose=autoclose, invalid_netcdf=invalid_netcdf, auto_complex=auto_complex, ) if group is not None: store = store.get_child_store(group) writer = ArrayWriter() # TODO: figure out how to refactor this logic (here and in save_mfdataset) # to avoid this mess of conditionals try: # TODO: allow this work (setting up the file for writing array data) # to be parallelized with dask dump_to_store( dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims ) if autoclose: store.close() if multifile: return writer, store writes = writer.sync(compute=compute) finally: if not multifile and not autoclose: # type: ignore[redundant-expr,unused-ignore] if compute: store.close() else: store.sync() if path_or_file is None: assert isinstance(target, BytesIOProxy) # created in this function return target.getbuffer() if not compute: return delayed_close_after_writes(writes, store) return None def dump_to_store( dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None ): """Store dataset contents to a backends.*DataStore object.""" if writer is None: writer = ArrayWriter() if encoding is None: encoding = {} variables, attrs = conventions.encode_dataset_coordinates(dataset) check_encoding = set() for k, enc in encoding.items(): # no need to shallow copy the variable again; that already happened # in encode_dataset_coordinates variables[k].encoding = enc check_encoding.add(k) if encoder: variables, attrs = encoder(variables, attrs) store.store(variables, attrs, check_encoding, writer, unlimited_dims=unlimited_dims) def save_mfdataset( datasets, paths, mode="w", format=None, groups=None, engine=None, compute=True, **kwargs, ): """Write multiple datasets to disk as netCDF files simultaneously. This function is intended for use with datasets consisting of dask.array objects, in which case it can write the multiple datasets to disk simultaneously using a shared thread pool. When not using dask, it is no different than calling ``to_netcdf`` repeatedly. Parameters ---------- datasets : list of Dataset List of datasets to save. paths : list of str or list of path-like objects List of paths to which to save each corresponding dataset. mode : {"w", "a"}, optional Write ("w") or append ("a") mode. If mode="w", any existing file at these locations will be overwritten. format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API features. * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only netCDF 3 compatible API features. * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format, which fully supports 2+ GB files, but is only compatible with clients linked against netCDF version 3.6.0 or later. * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not handle 2+ GB files very well. All formats are supported by the netCDF4-python library. scipy.io.netcdf only supports the last two formats. The default format is NETCDF4 if you are saving a file to disk and have the netCDF4-python library available. Otherwise, xarray falls back to using scipy to write netCDF files and defaults to the NETCDF3_64BIT format (scipy does not support netCDF4). groups : list of str, optional Paths to the netCDF4 group in each corresponding file to which to save datasets (only works for format="NETCDF4"). The groups will be created if necessary. engine : {"netcdf4", "h5netcdf", "scipy"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, by default preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via ``netcdf_engine_order`` in ``xarray.set_options()``). compute : bool If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. **kwargs : dict, optional Additional arguments are passed along to ``to_netcdf``. Examples -------- Save a dataset into one netCDF per year of data: >>> ds = xr.Dataset( ... {"a": ("time", np.linspace(0, 1, 48))}, ... coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)}, ... ) >>> ds Size: 768B Dimensions: (time: 48) Coordinates: * time (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31 Data variables: a (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0 >>> years, datasets = zip(*ds.groupby("time.year")) >>> paths = [f"{y}.nc" for y in years] >>> xr.save_mfdataset(datasets, paths) """ if mode == "w" and len(set(paths)) < len(paths): raise ValueError( "cannot use mode='w' when writing multiple datasets to the same path" ) for obj in datasets: if not isinstance(obj, Dataset): raise TypeError( "save_mfdataset only supports writing Dataset " f"objects, received type {type(obj)}" ) if groups is None: groups = [None] * len(datasets) if len({len(datasets), len(paths), len(groups)}) > 1: raise ValueError( "must supply lists of the same length for the " "datasets, paths and groups arguments to " "save_mfdataset" ) writers, stores = zip( *[ to_netcdf( ds, path, mode, format, group, engine, compute=compute, multifile=True, **kwargs, ) for ds, path, group in zip(datasets, paths, groups, strict=True) ], strict=True, ) try: writes = [w.sync(compute=compute) for w in writers] finally: for store in stores: if compute: store.close() else: store.sync() if not compute: import dask return dask.delayed( list(starmap(delayed_close_after_writes, zip(writes, stores, strict=True))) ) def get_writable_zarr_store( store: ZarrStoreLike | None = None, *, chunk_store: MutableMapping | str | os.PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, ) -> backends.ZarrStore: """Create a store for writing to Zarr.""" from xarray.backends.zarr import _choose_default_mode, _get_mappers kwargs, mapper, chunk_mapper = _get_mappers( storage_options=storage_options, store=store, chunk_store=chunk_store ) mode = _choose_default_mode(mode=mode, append_dim=append_dim, region=region) if mode == "r+": already_consolidated = consolidated consolidate_on_close = False else: already_consolidated = False consolidate_on_close = consolidated or consolidated is None return backends.ZarrStore.open_group( store=mapper, mode=mode, synchronizer=synchronizer, group=group, consolidated=already_consolidated, consolidate_on_close=consolidate_on_close, chunk_store=chunk_mapper, append_dim=append_dim, write_region=region, safe_chunks=safe_chunks, align_chunks=align_chunks, zarr_version=zarr_version, zarr_format=zarr_format, write_empty=write_empty_chunks, **kwargs, ) # compute=True returns ZarrStore @overload def to_zarr( dataset: Dataset, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | os.PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[True] = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> backends.ZarrStore: ... # compute=False returns dask.Delayed @overload def to_zarr( dataset: Dataset, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | os.PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[False], consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> Delayed: ... def to_zarr( dataset: Dataset, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | os.PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: bool = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> backends.ZarrStore | Delayed: """This function creates an appropriate datastore for writing a dataset to a zarr ztore See `Dataset.to_zarr` for full API docs. """ # validate Dataset keys, DataArray names _validate_dataset_names(dataset) # Load empty arrays to avoid bug saving zero length dimensions (Issue #5741) # TODO: delete when min dask>=2023.12.1 # https://github.com/dask/dask/pull/10506 for v in dataset.variables.values(): if v.size == 0: v.load() if encoding is None: encoding = {} zstore = get_writable_zarr_store( store, chunk_store=chunk_store, mode=mode, synchronizer=synchronizer, group=group, consolidated=consolidated, append_dim=append_dim, region=region, safe_chunks=safe_chunks, align_chunks=align_chunks, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, write_empty_chunks=write_empty_chunks, ) dataset = zstore._validate_and_autodetect_region(dataset) zstore._validate_encoding(encoding) writer = ArrayWriter() # TODO: figure out how to properly handle unlimited_dims try: dump_to_store(dataset, zstore, writer, encoding=encoding) writes = writer.sync( compute=compute, chunkmanager_store_kwargs=chunkmanager_store_kwargs ) finally: if compute: zstore.close() if not compute: return delayed_close_after_writes(writes, zstore) return zstore def _datatree_to_netcdf( dt: DataTree, filepath: str | PathLike | io.IOBase | None = None, mode: NetcdfWriteModes = "w", encoding: Mapping[str, Any] | None = None, unlimited_dims: Mapping | None = None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> None | memoryview | Delayed: """Implementation of `DataTree.to_netcdf`.""" if format not in [None, *get_args(T_DataTreeNetcdfTypes)]: raise ValueError("DataTree.to_netcdf only supports the NETCDF4 format") if engine not in [None, *get_args(T_DataTreeNetcdfEngine)]: raise ValueError( "DataTree.to_netcdf only supports the netcdf4 and h5netcdf engines" ) normalized_path = _normalize_path(filepath) if engine is None: engine = get_default_netcdf_write_engine( path_or_file=normalized_path, format="NETCDF4", # required for supporting groups ) # type: ignore[assignment] if group is not None: raise NotImplementedError( "specifying a root group for the tree has not been implemented" ) if encoding is None: encoding = {} # In the future, we may want to expand this check to insure all the provided encoding # options are valid. For now, this simply checks that all provided encoding keys are # groups in the datatree. if set(encoding) - set(dt.groups): raise ValueError( f"unexpected encoding group name(s) provided: {set(encoding) - set(dt.groups)}" ) if normalized_path is None: if not compute: raise NotImplementedError( "to_netcdf() with compute=False is not yet implemented when " "returning a memoryview" ) target = BytesIOProxy() else: target = normalized_path # type: ignore[assignment] if unlimited_dims is None: unlimited_dims = {} scheduler = get_dask_scheduler() have_chunks = any( v.chunks is not None for node in dt.subtree for v in node.variables.values() ) autoclose = have_chunks and scheduler in ["distributed", "multiprocessing"] root_store = get_writable_netcdf_store( target, engine, # type: ignore[arg-type] mode=mode, format=format, autoclose=autoclose, invalid_netcdf=invalid_netcdf, auto_complex=auto_complex, ) writer = ArrayWriter() # TODO: allow this work (setting up the file for writing array data) # to be parallelized with dask try: for node in dt.subtree: at_root = node is dt dataset = node.to_dataset(inherit=write_inherited_coords or at_root) node_store = ( root_store if at_root else root_store.get_child_store(node.path) ) dump_to_store( dataset, node_store, writer, encoding=encoding.get(node.path), unlimited_dims=unlimited_dims.get(node.path), ) if autoclose: root_store.close() writes = writer.sync(compute=compute) finally: if compute: root_store.close() else: root_store.sync() if filepath is None: assert isinstance(target, BytesIOProxy) # created in this function return target.getbuffer() if not compute: return delayed_close_after_writes(writes, root_store) return None def _datatree_to_zarr( dt: DataTree, store: ZarrStoreLike, mode: ZarrWriteModes = "w-", encoding: Mapping[str, Any] | None = None, synchronizer=None, group: str | None = None, write_inherited_coords: bool = False, *, chunk_store: MutableMapping | str | PathLike | None = None, compute: bool = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore | Delayed: """Implementation of `DataTree.to_zarr`.""" if group is not None: raise NotImplementedError( "specifying a root group for the tree has not been implemented" ) if append_dim is not None: raise NotImplementedError( "specifying ``append_dim`` with ``DataTree.to_zarr`` has not been implemented" ) if encoding is None: encoding = {} # In the future, we may want to expand this check to insure all the provided encoding # options are valid. For now, this simply checks that all provided encoding keys are # groups in the datatree. if set(encoding) - set(dt.groups): raise ValueError( f"unexpected encoding group name(s) provided: {set(encoding) - set(dt.groups)}" ) root_store = get_writable_zarr_store( store, chunk_store=chunk_store, mode=mode, synchronizer=synchronizer, group=group, consolidated=consolidated, append_dim=append_dim, region=region, safe_chunks=safe_chunks, align_chunks=align_chunks, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, write_empty_chunks=write_empty_chunks, ) writer = ArrayWriter() try: for rel_path, node in dt.subtree_with_keys: at_root = node is dt dataset = node.to_dataset(inherit=write_inherited_coords or at_root) # Use a relative path for group, because absolute paths are broken # with consolidated metadata in zarr 3.1.2 and earlier: # https://github.com/zarr-developers/zarr-python/pull/3428 node_store = root_store if at_root else root_store.get_child_store(rel_path) dataset = node_store._validate_and_autodetect_region(dataset) node_store._validate_encoding(encoding) dump_to_store( dataset, node_store, writer, encoding=encoding.get(node.path), ) writes = writer.sync( compute=compute, chunkmanager_store_kwargs=chunkmanager_store_kwargs ) finally: if compute: root_store.close() if not compute: return delayed_close_after_writes(writes, root_store) return root_store def _get_netcdf_autoclose(dataset: Dataset, engine: T_NetcdfEngine) -> bool: """Should we close files after each write operations?""" scheduler = get_dask_scheduler() have_chunks = any(v.chunks is not None for v in dataset.variables.values()) autoclose = have_chunks and scheduler in ["distributed", "multiprocessing"] if autoclose and engine == "scipy": raise NotImplementedError( f"Writing netCDF files with the {engine} backend " f"is not currently supported with dask's {scheduler} scheduler" ) return autoclose ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/backends/zarr.py������������������������������������������������������������0000664�0000000�0000000�00000216432�15114646760�0017763�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import base64 import json import os import struct from collections.abc import Hashable, Iterable, Mapping from typing import TYPE_CHECKING, Any, Literal, Self, cast import numpy as np import pandas as pd from xarray import coding, conventions from xarray.backends.chunks import grid_rechunk, validate_grid_chunks_alignment from xarray.backends.common import ( BACKEND_ENTRYPOINTS, AbstractWritableDataStore, BackendArray, BackendEntrypoint, T_PathFileOrDataStore, _encode_variable_name, _normalize_path, datatree_from_dict_with_io_cleanup, ensure_dtype_not_object, ) from xarray.backends.store import StoreBackendEntrypoint from xarray.core import indexing from xarray.core.treenode import NodePath from xarray.core.types import ZarrWriteModes from xarray.core.utils import ( FrozenDict, HiddenKeyDict, attempt_import, close_on_error, emit_user_level_warning, ) from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import guess_chunkmanager from xarray.namedarray.pycompat import integer_types from xarray.namedarray.utils import module_available if TYPE_CHECKING: from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.types import ZarrArray, ZarrGroup def _get_mappers(*, storage_options, store, chunk_store): # expand str and path-like arguments store = _normalize_path(store) chunk_store = _normalize_path(chunk_store) kwargs = {} if storage_options is None: mapper = store chunk_mapper = chunk_store else: if not isinstance(store, str): raise ValueError( f"store must be a string to use storage_options. Got {type(store)}" ) if _zarr_v3(): kwargs["storage_options"] = storage_options mapper = store chunk_mapper = chunk_store else: from fsspec import get_mapper mapper = get_mapper(store, **storage_options) if chunk_store is not None: chunk_mapper = get_mapper(chunk_store, **storage_options) else: chunk_mapper = chunk_store return kwargs, mapper, chunk_mapper def _choose_default_mode( *, mode: ZarrWriteModes | None, append_dim: Hashable | None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None, ) -> ZarrWriteModes: if mode is None: if append_dim is not None: mode = "a" elif region is not None: mode = "r+" else: mode = "w-" if mode not in ["a", "a-"] and append_dim is not None: raise ValueError("cannot set append_dim unless mode='a' or mode=None") if mode not in ["a", "a-", "r+"] and region is not None: raise ValueError( "cannot set region unless mode='a', mode='a-', mode='r+' or mode=None" ) if mode not in ["w", "w-", "a", "a-", "r+"]: raise ValueError( "The only supported options for mode are 'w', " f"'w-', 'a', 'a-', and 'r+', but mode={mode!r}" ) return mode def _zarr_v3() -> bool: return module_available("zarr", minversion="3") # need some special secret attributes to tell us the dimensions DIMENSION_KEY = "_ARRAY_DIMENSIONS" ZarrFormat = Literal[2, 3] class FillValueCoder: """Handle custom logic to safely encode and decode fill values in Zarr. Possibly redundant with logic in xarray/coding/variables.py but needs to be isolated from NetCDF-specific logic. """ @classmethod def encode(cls, value: int | float | str | bytes, dtype: np.dtype[Any]) -> Any: if dtype.kind in "S": # byte string, this implies that 'value' must also be `bytes` dtype. assert isinstance(value, bytes) return base64.standard_b64encode(value).decode() elif dtype.kind in "b": # boolean return bool(value) elif dtype.kind in "iu": # todo: do we want to check for decimals? return int(value) elif dtype.kind in "f": return base64.standard_b64encode(struct.pack(" list scalar array -> scalar other -> other (no change) """ if isinstance(value, np.ndarray): encoded = value.tolist() elif isinstance(value, np.generic): encoded = value.item() else: encoded = value return encoded def has_zarr_async_index() -> bool: try: import zarr return hasattr(zarr.AsyncArray, "oindex") except (ImportError, AttributeError): return False class ZarrArrayWrapper(BackendArray): __slots__ = ("_array", "dtype", "shape") def __init__(self, zarr_array): # some callers attempt to evaluate an array if an `array` property exists on the object. # we prefix with _ to avoid this inference. # TODO type hint this? self._array = zarr_array self.shape = self._array.shape # preserve vlen string object dtype (GH 7328) if ( not _zarr_v3() and self._array.filters is not None and any(filt.codec_id == "vlen-utf8" for filt in self._array.filters) ): dtype = coding.strings.create_vlen_dtype(str) else: dtype = self._array.dtype self.dtype = dtype def get_array(self): return self._array def _oindex(self, key): return self._array.oindex[key] def _vindex(self, key): return self._array.vindex[key] def _getitem(self, key): return self._array[key] async def _async_getitem(self, key): if not _zarr_v3(): raise NotImplementedError( "For lazy basic async indexing with zarr, zarr-python=>v3.0.0 is required" ) async_array = self._array._async_array return await async_array.getitem(key) async def _async_oindex(self, key): if not has_zarr_async_index(): raise NotImplementedError( "For lazy orthogonal async indexing with zarr, zarr-python=>v3.1.2 is required" ) async_array = self._array._async_array return await async_array.oindex.getitem(key) async def _async_vindex(self, key): if not has_zarr_async_index(): raise NotImplementedError( "For lazy vectorized async indexing with zarr, zarr-python=>v3.1.2 is required" ) async_array = self._array._async_array return await async_array.vindex.getitem(key) def __getitem__(self, key): array = self._array if isinstance(key, indexing.BasicIndexer): method = self._getitem elif isinstance(key, indexing.VectorizedIndexer): method = self._vindex elif isinstance(key, indexing.OuterIndexer): method = self._oindex return indexing.explicit_indexing_adapter( key, array.shape, indexing.IndexingSupport.VECTORIZED, method ) # if self.ndim == 0: # could possibly have a work-around for 0d data here async def async_getitem(self, key): array = self._array if isinstance(key, indexing.BasicIndexer): method = self._async_getitem elif isinstance(key, indexing.VectorizedIndexer): method = self._async_vindex elif isinstance(key, indexing.OuterIndexer): method = self._async_oindex return await indexing.async_explicit_indexing_adapter( key, array.shape, indexing.IndexingSupport.VECTORIZED, method ) def _determine_zarr_chunks(enc_chunks, var_chunks, ndim, name): """ Given encoding chunks (possibly None or []) and variable chunks (possibly None or []). """ # zarr chunk spec: # chunks : int or tuple of ints, optional # Chunk shape. If not provided, will be guessed from shape and dtype. # if there are no chunks in encoding and the variable data is a numpy # array, then we let zarr use its own heuristics to pick the chunks if not var_chunks and not enc_chunks: return None # if there are no chunks in encoding but there are dask chunks, we try to # use the same chunks in zarr # However, zarr chunks needs to be uniform for each array # https://zarr-specs.readthedocs.io/en/latest/v2/v2.0.html#chunks # while dask chunks can be variable sized # https://dask.pydata.org/en/latest/array-design.html#chunks if var_chunks and not enc_chunks: if any(len(set(chunks[:-1])) > 1 for chunks in var_chunks): raise ValueError( "Zarr requires uniform chunk sizes except for final chunk. " f"Variable named {name!r} has incompatible dask chunks: {var_chunks!r}. " "Consider rechunking using `chunk()`." ) if any((chunks[0] < chunks[-1]) for chunks in var_chunks): raise ValueError( "Final chunk of Zarr array must be the same size or smaller " f"than the first. Variable named {name!r} has incompatible Dask chunks {var_chunks!r}." "Consider either rechunking using `chunk()` or instead deleting " "or modifying `encoding['chunks']`." ) # return the first chunk for each dimension return tuple(chunk[0] for chunk in var_chunks) # From here on, we are dealing with user-specified chunks in encoding # zarr allows chunks to be an integer, in which case it uses the same chunk # size on each dimension. # Here we re-implement this expansion ourselves. That makes the logic of # checking chunk compatibility easier if isinstance(enc_chunks, integer_types): enc_chunks_tuple = ndim * (enc_chunks,) else: enc_chunks_tuple = tuple(enc_chunks) if len(enc_chunks_tuple) != ndim: # throw away encoding chunks, start over return _determine_zarr_chunks( None, var_chunks, ndim, name, ) for x in enc_chunks_tuple: if not isinstance(x, int): raise TypeError( "zarr chunk sizes specified in `encoding['chunks']` " "must be an int or a tuple of ints. " f"Instead found encoding['chunks']={enc_chunks_tuple!r} " f"for variable named {name!r}." ) # if there are chunks in encoding and the variable data is a numpy array, # we use the specified chunks if not var_chunks: return enc_chunks_tuple return enc_chunks_tuple def _get_zarr_dims_and_attrs(zarr_obj, dimension_key, try_nczarr): # Zarr V3 explicitly stores the dimension names in the metadata try: # if this exists, we are looking at a Zarr V3 array # convert None to empty tuple dimensions = zarr_obj.metadata.dimension_names or () except AttributeError: # continue to old code path pass else: attributes = dict(zarr_obj.attrs) if len(zarr_obj.shape) != len(dimensions): raise KeyError( "Zarr object is missing the `dimension_names` metadata which is " "required for xarray to determine variable dimensions." ) return dimensions, attributes # Zarr arrays do not have dimensions. To get around this problem, we add # an attribute that specifies the dimension. We have to hide this attribute # when we send the attributes to the user. # zarr_obj can be either a zarr group or zarr array try: # Xarray-Zarr dimensions = zarr_obj.attrs[dimension_key] except KeyError as e: if not try_nczarr: raise KeyError( f"Zarr object is missing the attribute `{dimension_key}`, which is " "required for xarray to determine variable dimensions." ) from e # NCZarr defines dimensions through metadata in .zarray zarray_path = os.path.join(zarr_obj.path, ".zarray") if _zarr_v3(): import asyncio zarray_str = asyncio.run(zarr_obj.store.get(zarray_path)).to_bytes() else: zarray_str = zarr_obj.store.get(zarray_path) zarray = json.loads(zarray_str) try: # NCZarr uses Fully Qualified Names dimensions = [ os.path.basename(dim) for dim in zarray["_NCZARR_ARRAY"]["dimrefs"] ] except KeyError as e: raise KeyError( f"Zarr object is missing the attribute `{dimension_key}` and the NCZarr metadata, " "which are required for xarray to determine variable dimensions." ) from e nc_attrs = [attr for attr in zarr_obj.attrs if attr.lower().startswith("_nc")] attributes = HiddenKeyDict(zarr_obj.attrs, [dimension_key] + nc_attrs) return dimensions, attributes def extract_zarr_variable_encoding( variable, raise_on_invalid=False, name=None, *, zarr_format: ZarrFormat, ): """ Extract zarr encoding dictionary from xarray Variable Parameters ---------- variable : Variable raise_on_invalid : bool, optional name: str | Hashable, optional zarr_format: Literal[2,3] Returns ------- encoding : dict Zarr encoding for `variable` """ encoding = variable.encoding.copy() safe_to_drop = {"source", "original_shape", "preferred_chunks"} valid_encodings = { "chunks", "shards", "compressor", # TODO: delete when min zarr >=3 "compressors", "filters", "serializer", "cache_metadata", "write_empty_chunks", "chunk_key_encoding", } if zarr_format == 3: valid_encodings.add("fill_value") for k in safe_to_drop: if k in encoding: del encoding[k] if raise_on_invalid: invalid = [k for k in encoding if k not in valid_encodings] if "fill_value" in invalid and zarr_format == 2: msg = " Use `_FillValue` to set the Zarr array `fill_value`" else: msg = "" if invalid: raise ValueError( f"unexpected encoding parameters for zarr backend: {invalid!r}." + msg ) else: for k in list(encoding): if k not in valid_encodings: del encoding[k] chunks = _determine_zarr_chunks( enc_chunks=encoding.get("chunks"), var_chunks=variable.chunks, ndim=variable.ndim, name=name, ) if _zarr_v3() and chunks is None: chunks = "auto" encoding["chunks"] = chunks return encoding # Function below is copied from conventions.encode_cf_variable. # The only change is to raise an error for object dtypes. def encode_zarr_variable(var, needs_copy=True, name=None): """ Converts an Variable into an Variable which follows some of the CF conventions: - Nans are masked using _FillValue (or the deprecated missing_value) - Rescaling via: scale_factor and add_offset - datetimes are converted to the CF 'units since time' format - dtype encodings are enforced. Parameters ---------- var : Variable A variable holding un-encoded data. Returns ------- out : Variable A variable which has been encoded as described above. """ var = conventions.encode_cf_variable(var, name=name) var = ensure_dtype_not_object(var, name=name) # zarr allows unicode, but not variable-length strings, so it's both # simpler and more compact to always encode as UTF-8 explicitly. # TODO: allow toggling this explicitly via dtype in encoding. # TODO: revisit this now that Zarr _does_ allow variable-length strings coder = coding.strings.EncodedStringCoder(allows_unicode=True) var = coder.encode(var, name=name) var = coding.strings.ensure_fixed_length_bytes(var) return var def _validate_datatypes_for_zarr_append(vname, existing_var, new_var): """If variable exists in the store, confirm dtype of the data to append is compatible with existing dtype. """ if ( np.issubdtype(new_var.dtype, np.number) or np.issubdtype(new_var.dtype, np.datetime64) or np.issubdtype(new_var.dtype, np.bool_) or new_var.dtype == object or (new_var.dtype.kind in ("S", "U") and existing_var.dtype == object) ): # We can skip dtype equality checks under two conditions: (1) if the var to append is # new to the dataset, because in this case there is no existing var to compare it to; # or (2) if var to append's dtype is known to be easy-to-append, because in this case # we can be confident appending won't cause problems. Examples of dtypes which are not # easy-to-append include length-specified strings of type `|S*` or ` Self: zarr_group = self.zarr_group.require_group(group) return type(self)( zarr_group=zarr_group, mode=self._mode, consolidate_on_close=self._consolidate_on_close, append_dim=self._append_dim, write_region=self._write_region, safe_chunks=self._safe_chunks, write_empty=self._write_empty, close_store_on_close=self._close_store_on_close, use_zarr_fill_value_as_mask=self._use_zarr_fill_value_as_mask, align_chunks=self._align_chunks, cache_members=self._cache_members, ) @property def members(self) -> dict[str, ZarrArray | ZarrGroup]: """ Model the arrays and groups contained in self.zarr_group as a dict. If `self._cache_members` is true, the dict is cached. Otherwise, it is retrieved from storage. """ if not self._cache_members: return self._fetch_members() else: return self._members def _fetch_members(self) -> dict[str, ZarrArray | ZarrGroup]: """ Get the arrays and groups defined in the zarr group modelled by this Store """ import zarr if zarr.__version__ >= "3": return dict(self.zarr_group.members()) else: return dict(self.zarr_group.items()) def array_keys(self) -> tuple[str, ...]: from zarr import Array as ZarrArray return tuple( key for (key, node) in self.members.items() if isinstance(node, ZarrArray) ) def arrays(self) -> tuple[tuple[str, ZarrArray], ...]: from zarr import Array as ZarrArray return tuple( (key, node) for (key, node) in self.members.items() if isinstance(node, ZarrArray) ) @property def ds(self): # TODO: consider deprecating this in favor of zarr_group return self.zarr_group def open_store_variable(self, name): zarr_array = self.members[name] data = indexing.LazilyIndexedArray(ZarrArrayWrapper(zarr_array)) try_nczarr = self._mode == "r" dimensions, attributes = _get_zarr_dims_and_attrs( zarr_array, DIMENSION_KEY, try_nczarr ) attributes = dict(attributes) encoding = { "chunks": zarr_array.chunks, "preferred_chunks": dict(zip(dimensions, zarr_array.chunks, strict=True)), } if _zarr_v3(): encoding.update( { "compressors": zarr_array.compressors, "filters": zarr_array.filters, "shards": zarr_array.shards, } ) if self.zarr_group.metadata.zarr_format == 3: encoding.update({"serializer": zarr_array.serializer}) else: encoding.update( { "compressor": zarr_array.compressor, "filters": zarr_array.filters, } ) if self._use_zarr_fill_value_as_mask: # Setting this attribute triggers CF decoding for missing values # by interpreting Zarr's fill_value to mean the same as netCDF's _FillValue if zarr_array.fill_value is not None: attributes["_FillValue"] = zarr_array.fill_value elif "_FillValue" in attributes: attributes["_FillValue"] = FillValueCoder.decode( attributes["_FillValue"], zarr_array.dtype ) return Variable(dimensions, data, attributes, encoding) def get_variables(self): return FrozenDict((k, self.open_store_variable(k)) for k in self.array_keys()) def get_attrs(self): return { k: v for k, v in self.zarr_group.attrs.asdict().items() if not k.lower().startswith("_nc") } def get_dimensions(self): try_nczarr = self._mode == "r" dimensions = {} for _k, v in self.arrays(): dim_names, _ = _get_zarr_dims_and_attrs(v, DIMENSION_KEY, try_nczarr) for d, s in zip(dim_names, v.shape, strict=True): if d in dimensions and dimensions[d] != s: raise ValueError( f"found conflicting lengths for dimension {d} " f"({s} != {dimensions[d]})" ) dimensions[d] = s return dimensions def set_dimensions(self, variables, unlimited_dims=None): if unlimited_dims is not None: raise NotImplementedError( "Zarr backend doesn't know how to handle unlimited dimensions" ) def set_attributes(self, attributes): _put_attrs(self.zarr_group, attributes) def encode_variable(self, variable, name=None): variable = encode_zarr_variable(variable, name=name) return variable def encode_attribute(self, a): return encode_zarr_attr_value(a) def store( self, variables, attributes, check_encoding_set=frozenset(), writer=None, unlimited_dims=None, ): """ Top level method for putting data on this store, this method: - encodes variables/attributes - sets dimensions - sets variables Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer : ArrayWriter unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. dimension on which the zarray will be appended only needed in append mode """ if TYPE_CHECKING: import zarr else: zarr = attempt_import("zarr") if self._mode == "w": # always overwrite, so we don't care about existing names, # and consistency of encoding new_variable_names = set(variables) existing_keys = {} existing_variable_names = {} else: existing_keys = self.array_keys() existing_variable_names = { vn for vn in variables if _encode_variable_name(vn) in existing_keys } new_variable_names = set(variables) - existing_variable_names if self._mode == "r+" and ( new_names := [k for k in variables if k not in existing_keys] ): raise ValueError( f"dataset contains non-pre-existing variables {new_names!r}, " "which is not allowed in ``xarray.Dataset.to_zarr()`` with " "``mode='r+'``. To allow writing new variables, set ``mode='a'``." ) if self._append_dim is not None and self._append_dim not in existing_keys: # For dimensions without coordinate values, we must parse # the _ARRAY_DIMENSIONS attribute on *all* arrays to check if it # is a valid existing dimension name. # TODO: This `get_dimensions` method also does shape checking # which isn't strictly necessary for our check. existing_dims = self.get_dimensions() if self._append_dim not in existing_dims: raise ValueError( f"append_dim={self._append_dim!r} does not match any existing " f"dataset dimensions {existing_dims}" ) variables_encoded, attributes = self.encode( {vn: variables[vn] for vn in new_variable_names}, attributes ) if existing_variable_names: # We make sure that values to be appended are encoded *exactly* # as the current values in the store. # To do so, we decode variables directly to access the proper encoding, # without going via xarray.Dataset to avoid needing to load # index variables into memory. existing_vars, _, _ = conventions.decode_cf_variables( variables={ k: self.open_store_variable(name=k) for k in existing_variable_names }, # attributes = {} since we don't care about parsing the global # "coordinates" attribute attributes={}, ) # Modified variables must use the same encoding as the store. vars_with_encoding = {} for vn in existing_variable_names: _validate_datatypes_for_zarr_append( vn, existing_vars[vn], variables[vn] ) vars_with_encoding[vn] = variables[vn].copy(deep=False) vars_with_encoding[vn].encoding = existing_vars[vn].encoding vars_with_encoding, _ = self.encode(vars_with_encoding, {}) variables_encoded.update(vars_with_encoding) for var_name in existing_variable_names: variables_encoded[var_name] = _validate_and_transpose_existing_dims( var_name, variables_encoded[var_name], existing_vars[var_name], self._write_region, self._append_dim, ) if self._mode not in ["r", "r+"]: self.set_attributes(attributes) self.set_dimensions(variables_encoded, unlimited_dims=unlimited_dims) # if we are appending to an append_dim, only write either # - new variables not already present, OR # - variables with the append_dim in their dimensions # We do NOT overwrite other variables. if self._mode == "a-" and self._append_dim is not None: variables_to_set = { k: v for k, v in variables_encoded.items() if (k not in existing_variable_names) or (self._append_dim in v.dims) } else: variables_to_set = variables_encoded self.set_variables( variables_to_set, check_encoding_set, writer, unlimited_dims=unlimited_dims ) if self._consolidate_on_close: kwargs = {} if _zarr_v3(): kwargs["zarr_format"] = self.zarr_group.metadata.zarr_format zarr.consolidate_metadata(self.zarr_group.store, **kwargs) def _open_existing_array(self, *, name) -> ZarrArray: import zarr from zarr import Array as ZarrArray # TODO: if mode="a", consider overriding the existing variable # metadata. This would need some case work properly with region # and append_dim. if self._write_empty is not None: # Write to zarr_group.chunk_store instead of zarr_group.store # See https://github.com/pydata/xarray/pull/8326#discussion_r1365311316 for a longer explanation # The open_consolidated() enforces a mode of r or r+ # (and to_zarr with region provided enforces a read mode of r+), # and this function makes sure the resulting Group has a store of type ConsolidatedMetadataStore # and a 'normal Store subtype for chunk_store. # The exact type depends on if a local path was used, or a URL of some sort, # but the point is that it's not a read-only ConsolidatedMetadataStore. # It is safe to write chunk data to the chunk_store because no metadata would be changed by # to_zarr with the region parameter: # - Because the write mode is enforced to be r+, no new variables can be added to the store # (this is also checked and enforced in xarray.backends.api.py::to_zarr()). # - Existing variables already have their attrs included in the consolidated metadata file. # - The size of dimensions can not be expanded, that would require a call using `append_dim` # which is mutually exclusive with `region` empty: dict[str, bool] | dict[str, dict[str, bool]] if _zarr_v3(): empty = dict(config={"write_empty_chunks": self._write_empty}) else: empty = dict(write_empty_chunks=self._write_empty) zarr_array = zarr.open( store=( self.zarr_group.store if _zarr_v3() else self.zarr_group.chunk_store ), # TODO: see if zarr should normalize these strings. path="/".join([self.zarr_group.name.rstrip("/"), name]).lstrip("/"), **empty, ) else: zarr_array = self.zarr_group[name] return cast(ZarrArray, zarr_array) def _create_new_array( self, *, name, shape, dtype, fill_value, encoding, attrs ) -> ZarrArray: if coding.strings.check_vlen_dtype(dtype) is str: dtype = str if self._write_empty is not None: if ( "write_empty_chunks" in encoding and encoding["write_empty_chunks"] != self._write_empty ): raise ValueError( 'Differing "write_empty_chunks" values in encoding and parameters' f'Got {encoding["write_empty_chunks"] = } and {self._write_empty = }' ) else: encoding["write_empty_chunks"] = self._write_empty if _zarr_v3(): # zarr v3 deprecated origin and write_empty_chunks # instead preferring to pass them via the config argument encoding["config"] = {} for c in ("write_empty_chunks", "order"): if c in encoding: encoding["config"][c] = encoding.pop(c) zarr_array = self.zarr_group.create( name, shape=shape, dtype=dtype, fill_value=fill_value, **encoding, ) zarr_array = _put_attrs(zarr_array, attrs) return zarr_array def set_variables( self, variables: dict[str, Variable], check_encoding_set, writer, unlimited_dims=None, ): """ This provides a centralized method to set the variables on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ existing_keys = self.array_keys() is_zarr_v3_format = _zarr_v3() and self.zarr_group.metadata.zarr_format == 3 for vn, v in variables.items(): name = _encode_variable_name(vn) attrs = v.attrs.copy() dims = v.dims dtype = v.dtype shape = v.shape if self._use_zarr_fill_value_as_mask: fill_value = attrs.pop("_FillValue", None) else: fill_value = v.encoding.pop("fill_value", None) if fill_value is None and v.dtype.kind == "f": # For floating point data, Xarray defaults to a fill_value # of NaN (unlike Zarr, which uses zero): # https://github.com/pydata/xarray/issues/10646 fill_value = np.nan if "_FillValue" in attrs: # replace with encoded fill value fv = attrs.pop("_FillValue") if fv is not None: attrs["_FillValue"] = FillValueCoder.encode(fv, dtype) # _FillValue is never a valid encoding for Zarr # TODO: refactor this logic so we don't need to check this here if "_FillValue" in v.encoding: if v.encoding.get("_FillValue") is not None: raise ValueError("Zarr does not support _FillValue in encoding.") else: del v.encoding["_FillValue"] zarr_shape = None write_region = self._write_region if self._write_region is not None else {} write_region = {dim: write_region.get(dim, slice(None)) for dim in dims} if self._mode != "w" and name in existing_keys: # existing variable zarr_array = self._open_existing_array(name=name) if self._append_dim is not None and self._append_dim in dims: # resize existing variable append_axis = dims.index(self._append_dim) assert write_region[self._append_dim] == slice(None) write_region[self._append_dim] = slice( zarr_array.shape[append_axis], None ) new_shape = ( zarr_array.shape[:append_axis] + (zarr_array.shape[append_axis] + v.shape[append_axis],) + zarr_array.shape[append_axis + 1 :] ) zarr_array.resize(new_shape) zarr_shape = zarr_array.shape region = tuple(write_region[dim] for dim in dims) # We need to do this for both new and existing variables to ensure we're not # writing to a partial chunk, even though we don't use the `encoding` value # when writing to an existing variable. See # https://github.com/pydata/xarray/issues/8371 for details. # Note: Ideally there should be two functions, one for validating the chunks and # another one for extracting the encoding. encoding = extract_zarr_variable_encoding( v, raise_on_invalid=vn in check_encoding_set, name=vn, zarr_format=3 if is_zarr_v3_format else 2, ) if self._align_chunks and isinstance(encoding["chunks"], tuple): v = grid_rechunk( v=v, enc_chunks=encoding["chunks"], region=region, ) if self._safe_chunks and isinstance(encoding["chunks"], tuple): # the hard case # DESIGN CHOICE: do not allow multiple dask chunks on a single zarr chunk # this avoids the need to get involved in zarr synchronization / locking # From zarr docs: # "If each worker in a parallel computation is writing to a # separate region of the array, and if region boundaries are perfectly aligned # with chunk boundaries, then no synchronization is required." # TODO: incorporate synchronizer to allow writes from multiple dask # threads shape = zarr_shape or v.shape validate_grid_chunks_alignment( nd_v_chunks=v.chunks, enc_chunks=encoding["chunks"], region=region, allow_partial_chunks=self._mode != "r+", name=name, backend_shape=shape, ) if self._mode == "w" or name not in existing_keys: # new variable encoded_attrs = {k: self.encode_attribute(v) for k, v in attrs.items()} # the magic for storing the hidden dimension data if is_zarr_v3_format: encoding["dimension_names"] = dims else: encoded_attrs[DIMENSION_KEY] = dims encoding["overwrite"] = self._mode == "w" zarr_array = self._create_new_array( name=name, dtype=dtype, shape=shape, fill_value=fill_value, encoding=encoding, attrs=encoded_attrs, ) writer.add(v.data, zarr_array, region) def sync(self) -> None: pass def close(self) -> None: if self._close_store_on_close: self.zarr_group.store.close() def _auto_detect_regions(self, ds, region): for dim, val in region.items(): if val != "auto": continue if dim not in ds._variables: # unindexed dimension region[dim] = slice(0, ds.sizes[dim]) continue variable = conventions.decode_cf_variable( dim, self.open_store_variable(dim).compute() ) assert variable.dims == (dim,) index = pd.Index(variable.data) idxs = index.get_indexer(ds[dim].data) if (idxs == -1).any(): raise KeyError( f"Not all values of coordinate '{dim}' in the new array were" " found in the original store. Writing to a zarr region slice" " requires that no dimensions or metadata are changed by the write." ) if (np.diff(idxs) != 1).any(): raise ValueError( f"The auto-detected region of coordinate '{dim}' for writing new data" " to the original store had non-contiguous indices. Writing to a zarr" " region slice requires that the new data constitute a contiguous subset" " of the original store." ) region[dim] = slice(idxs[0], idxs[-1] + 1) return region def _validate_and_autodetect_region(self, ds: Dataset) -> Dataset: if self._write_region is None: return ds region = self._write_region if region == "auto": region = dict.fromkeys(ds.dims, "auto") if not isinstance(region, dict): raise TypeError(f"``region`` must be a dict, got {type(region)}") if any(v == "auto" for v in region.values()): if self._mode not in ["r+", "a"]: raise ValueError( f"``mode`` must be 'r+' or 'a' when using ``region='auto'``, got {self._mode!r}" ) region = self._auto_detect_regions(ds, region) # validate before attempting to auto-detect since the auto-detection # should always return a valid slice. for k, v in region.items(): if k not in ds.dims: raise ValueError( f"all keys in ``region`` are not in Dataset dimensions, got " f"{list(region)} and {list(ds.dims)}" ) if not isinstance(v, slice): raise TypeError( "all values in ``region`` must be slice objects, got " f"region={region}" ) if v.step not in {1, None}: raise ValueError( "step on all slices in ``region`` must be 1 or None, got " f"region={region}" ) non_matching_vars = [ k for k, v in ds.variables.items() if not set(region).intersection(v.dims) ] if region and non_matching_vars: raise ValueError( f"when setting `region` explicitly in to_zarr(), all " f"variables in the dataset to write must have at least " f"one dimension in common with the region's dimensions " f"{list(region.keys())}, but that is not " f"the case for some variables here. To drop these variables " f"from this dataset before exporting to zarr, write: " f".drop_vars({non_matching_vars!r})" ) if self._append_dim is not None and self._append_dim in region: raise ValueError( f"cannot list the same dimension in both ``append_dim`` and " f"``region`` with to_zarr(), got {self._append_dim} in both" ) self._write_region = region # can't modify indexes with region writes return ds.drop_vars(ds.indexes) def _validate_encoding(self, encoding) -> None: if encoding and self._mode in ["a", "a-", "r+"]: existing_var_names = self.array_keys() for var_name in existing_var_names: if var_name in encoding: raise ValueError( f"variable {var_name!r} already exists, but encoding was provided" ) def open_zarr( store, group=None, synchronizer=None, chunks="auto", decode_cf=True, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables=None, consolidated=None, overwrite_encoded_chunks=False, chunk_store=None, storage_options=None, decode_timedelta=None, use_cftime=None, zarr_version=None, zarr_format=None, use_zarr_fill_value_as_mask=None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, create_default_indexes=True, **kwargs, ): """Load and decode a dataset from a Zarr store. The `store` object should be a valid store for a Zarr group. `store` variables must contain dimension metadata encoded in the `_ARRAY_DIMENSIONS` attribute or must have NCZarr format. Parameters ---------- store : MutableMapping or str A MutableMapping where a Zarr Group has been stored or a path to a directory in file system where a Zarr DirectoryStore has been stored. synchronizer : object, optional Array synchronizer provided to zarr group : str, optional Group path. (a.k.a. `path` in zarr terminology.) chunks : int, dict, 'auto' or None, default: 'auto' If provided, used to load the data into dask arrays. - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays, though results may vary. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using engine preferred chunks if exposed by the backend, otherwise with a single chunk for all arrays. See dask chunking for more details. overwrite_encoded_chunks : bool, optional Whether to drop the zarr chunks encoded for each variable when a dataset is loaded with specified chunk sizes (default: False) decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. consolidated : bool, optional Whether to open the store using zarr's consolidated metadata capability. Only works for stores that have already been consolidated. By default (`consolidate=None`), attempts to read consolidated metadata, falling back to read non-consolidated metadata if that fails. When the experimental ``zarr_version=3``, ``consolidated`` must be either be ``None`` or ``False``. chunk_store : MutableMapping, optional A separate Zarr store only for chunk data. storage_options : dict, optional Any additional parameters for the storage backend (ignored for local paths). decode_timedelta : bool, optional If True, decode variables and coordinates with time units in {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. zarr_version : int or None, optional .. deprecated:: 2024.9.1 Use ``zarr_format`` instead. zarr_format : int or None, optional The desired zarr format to target (currently 2 or 3). The default of None will attempt to determine the zarr version from ``store`` when possible, otherwise defaulting to the default version used by the zarr-python library installed. use_zarr_fill_value_as_mask : bool, optional If True, use the zarr Array ``fill_value`` to mask the data, the same as done for NetCDF data with ``_FillValue`` or ``missing_value`` attributes. If False, the ``fill_value`` is ignored and the data are not masked. If None, this defaults to True for ``zarr_version=2`` and False for ``zarr_version=3``. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the ``ChunkManagerEntrypoint.from_array`` method used to create chunked arrays, via whichever chunk manager is specified through the ``chunked_array_type`` kwarg. Defaults to ``{'manager': 'dask'}``, meaning additional kwargs will be passed eventually to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. Returns ------- dataset : Dataset The newly created dataset. See Also -------- open_dataset open_mfdataset References ---------- https://zarr.readthedocs.io/ """ from xarray.backends.api import open_dataset if from_array_kwargs is None: from_array_kwargs = {} if chunks == "auto": try: guess_chunkmanager( chunked_array_type ) # attempt to import that parallel backend chunks = {} except (ValueError, ImportError): chunks = None if kwargs: raise TypeError( "open_zarr() got unexpected keyword arguments " + ",".join(kwargs.keys()) ) backend_kwargs = { "synchronizer": synchronizer, "consolidated": consolidated, "overwrite_encoded_chunks": overwrite_encoded_chunks, "chunk_store": chunk_store, "storage_options": storage_options, "zarr_version": zarr_version, "zarr_format": zarr_format, } ds = open_dataset( filename_or_obj=store, group=group, decode_cf=decode_cf, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, engine="zarr", chunks=chunks, drop_variables=drop_variables, create_default_indexes=create_default_indexes, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, backend_kwargs=backend_kwargs, decode_timedelta=decode_timedelta, use_cftime=use_cftime, zarr_version=zarr_version, use_zarr_fill_value_as_mask=use_zarr_fill_value_as_mask, ) return ds class ZarrBackendEntrypoint(BackendEntrypoint): """ Backend for ".zarr" files based on the zarr package. For more information about the underlying library, visit: https://zarr.readthedocs.io/en/stable See Also -------- backends.ZarrStore """ description = "Open zarr files (.zarr) using zarr in Xarray" url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.ZarrBackendEntrypoint.html" supports_groups = True def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: if isinstance(filename_or_obj, str | os.PathLike): # allow a trailing slash to account for an autocomplete # adding it. _, ext = os.path.splitext(str(filename_or_obj).rstrip("/")) return ext in [".zarr"] return False def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group=None, mode="r", synchronizer=None, consolidated=None, chunk_store=None, storage_options=None, zarr_version=None, zarr_format=None, store=None, engine=None, use_zarr_fill_value_as_mask=None, cache_members: bool = True, ) -> Dataset: filename_or_obj = _normalize_path(filename_or_obj) if not store: store = ZarrStore.open_group( filename_or_obj, group=group, mode=mode, synchronizer=synchronizer, consolidated=consolidated, consolidate_on_close=False, chunk_store=chunk_store, storage_options=storage_options, zarr_version=zarr_version, use_zarr_fill_value_as_mask=None, zarr_format=zarr_format, cache_members=cache_members, ) store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds def open_datatree( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, mode="r", synchronizer=None, consolidated=None, chunk_store=None, storage_options=None, zarr_version=None, zarr_format=None, ) -> DataTree: filename_or_obj = _normalize_path(filename_or_obj) groups_dict = self.open_groups_as_dict( filename_or_obj=filename_or_obj, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, group=group, mode=mode, synchronizer=synchronizer, consolidated=consolidated, chunk_store=chunk_store, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, ) return datatree_from_dict_with_io_cleanup(groups_dict) def open_groups_as_dict( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, mode="r", synchronizer=None, consolidated=None, chunk_store=None, storage_options=None, zarr_version=None, zarr_format=None, ) -> dict[str, Dataset]: filename_or_obj = _normalize_path(filename_or_obj) # Check for a group and make it a parent if it exists if group: parent = str(NodePath("/") / NodePath(group)) else: parent = str(NodePath("/")) stores = ZarrStore.open_store( filename_or_obj, group=parent, mode=mode, synchronizer=synchronizer, consolidated=consolidated, consolidate_on_close=False, chunk_store=chunk_store, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, ) groups_dict = {} for path_group, store in stores.items(): store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): group_ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) if group: group_name = str(NodePath(path_group).relative_to(parent)) else: group_name = str(NodePath(path_group)) groups_dict[group_name] = group_ds return groups_dict def _iter_zarr_groups(root: ZarrGroup, parent: str = "/") -> Iterable[str]: parent_nodepath = NodePath(parent) yield str(parent_nodepath) for path, group in root.groups(): gpath = parent_nodepath / path yield from _iter_zarr_groups(group, parent=str(gpath)) def _get_open_params( store, mode, synchronizer, group, consolidated, consolidate_on_close, chunk_store, storage_options, zarr_version, use_zarr_fill_value_as_mask, zarr_format, ): if TYPE_CHECKING: import zarr else: zarr = attempt_import("zarr") # zarr doesn't support pathlib.Path objects yet. zarr-python#601 if isinstance(store, os.PathLike): store = os.fspath(store) open_kwargs = dict( # mode='a-' is a handcrafted xarray specialty mode="a" if mode == "a-" else mode, synchronizer=synchronizer, path=group, ) open_kwargs["storage_options"] = storage_options zarr_format = _handle_zarr_version_or_format( zarr_version=zarr_version, zarr_format=zarr_format ) if _zarr_v3(): open_kwargs["zarr_format"] = zarr_format else: open_kwargs["zarr_version"] = zarr_format if chunk_store is not None: open_kwargs["chunk_store"] = chunk_store if consolidated is None: consolidated = False if _zarr_v3(): # TODO: replace AssertionError after https://github.com/zarr-developers/zarr-python/issues/2821 is resolved missing_exc = AssertionError else: missing_exc = zarr.errors.GroupNotFoundError if _zarr_v3(): # zarr 3.0.8 and earlier did not support this property - it was effectively assumed true if not getattr(store, "supports_consolidated_metadata", True): consolidated = consolidate_on_close = False if consolidated in [None, True]: # open the root of the store, in case there is metadata consolidated there group = open_kwargs.pop("path") if consolidated: # TODO: an option to pass the metadata_key keyword zarr_root_group = zarr.open_consolidated(store, **open_kwargs) elif consolidated is None: # same but with more error handling in case no consolidated metadata found try: zarr_root_group = zarr.open_consolidated(store, **open_kwargs) except (ValueError, KeyError): # ValueError in zarr-python 3.x, KeyError in 2.x. try: zarr_root_group = zarr.open_group(store, **open_kwargs) emit_user_level_warning( "Failed to open Zarr store with consolidated metadata, " "but successfully read with non-consolidated metadata. " "This is typically much slower for opening a dataset. " "To silence this warning, consider:\n" "1. Consolidating metadata in this existing store with " "zarr.consolidate_metadata().\n" "2. Explicitly setting consolidated=False, to avoid trying " "to read consolidate metadata, or\n" "3. Explicitly setting consolidated=True, to raise an " "error in this case instead of falling back to try " "reading non-consolidated metadata.", RuntimeWarning, ) except missing_exc as err: raise FileNotFoundError( f"No such file or directory: '{store}'" ) from err # but the user should still receive a DataTree whose root is the group they asked for if group and group != "/": zarr_group = zarr_root_group[group.removeprefix("/")] else: zarr_group = zarr_root_group else: if _zarr_v3(): # we have determined that we don't want to use consolidated metadata # so we set that to False to avoid trying to read it open_kwargs["use_consolidated"] = False zarr_group = zarr.open_group(store, **open_kwargs) close_store_on_close = zarr_group.store is not store # we use this to determine how to handle fill_value is_zarr_v3_format = _zarr_v3() and zarr_group.metadata.zarr_format == 3 if use_zarr_fill_value_as_mask is None: if is_zarr_v3_format: # for new data, we use a better default use_zarr_fill_value_as_mask = False else: # this was the default for v2 and should apply to most existing Zarr data use_zarr_fill_value_as_mask = True return ( zarr_group, consolidate_on_close, close_store_on_close, use_zarr_fill_value_as_mask, ) def _handle_zarr_version_or_format( *, zarr_version: ZarrFormat | None, zarr_format: ZarrFormat | None ) -> ZarrFormat | None: """handle the deprecated zarr_version kwarg and return zarr_format""" if ( zarr_format is not None and zarr_version is not None and zarr_format != zarr_version ): raise ValueError( f"zarr_format {zarr_format} does not match zarr_version {zarr_version}, please only set one" ) if zarr_version is not None: emit_user_level_warning( "zarr_version is deprecated, use zarr_format", FutureWarning ) return zarr_version return zarr_format BACKEND_ENTRYPOINTS["zarr"] = ("zarr", ZarrBackendEntrypoint) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coders.py�������������������������������������������������������������������0000664�0000000�0000000�00000000323�15114646760�0016500�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" This module provides coder objects that encapsulate the "encoding/decoding" process. """ from xarray.coding.times import CFDatetimeCoder, CFTimedeltaCoder __all__ = ["CFDatetimeCoder", "CFTimedeltaCoder"] �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coding/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�15114646760�0016114�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coding/__init__.py����������������������������������������������������������0000664�0000000�0000000�00000000000�15114646760�0020213�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coding/calendar_ops.py������������������������������������������������������0000664�0000000�0000000�00000037424�15114646760�0021132�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import numpy as np import pandas as pd from xarray.coding.cftime_offsets import date_range_like, get_date_type from xarray.coding.cftimeindex import CFTimeIndex from xarray.coding.times import ( _should_cftime_be_used, convert_times, ) from xarray.core.common import ( _contains_datetime_like_objects, full_like, is_np_datetime_like, ) try: import cftime except ImportError: cftime = None _CALENDARS_WITHOUT_YEAR_ZERO = [ "gregorian", "proleptic_gregorian", "julian", "standard", ] def convert_calendar( obj, calendar, dim="time", align_on=None, missing=None, use_cftime=None, ): """Transform a time-indexed Dataset or DataArray to one that uses another calendar. This function only converts the individual timestamps; it does not modify any data except in dropping invalid/surplus dates, or inserting values for missing dates. If the source and target calendars are both from a standard type, only the type of the time array is modified. When converting to a calendar with a leap year from to a calendar without a leap year, the 29th of February will be removed from the array. In the other direction the 29th of February will be missing in the output, unless `missing` is specified, in which case that value is inserted. For conversions involving the `360_day` calendar, see Notes. This method is safe to use with sub-daily data as it doesn't touch the time part of the timestamps. Parameters ---------- obj : DataArray or Dataset Input DataArray or Dataset with a time coordinate of a valid dtype (:py:class:`numpy.datetime64` or :py:class:`cftime.datetime`). calendar : str The target calendar name. dim : str Name of the time coordinate in the input DataArray or Dataset. align_on : {None, 'date', 'year', 'random'} Must be specified when either the source or target is a `"360_day"` calendar; ignored otherwise. See Notes. missing : any, optional By default, i.e. if the value is None, this method will simply attempt to convert the dates in the source calendar to the same dates in the target calendar, and drop any of those that are not possible to represent. If a value is provided, a new time coordinate will be created in the target calendar with the same frequency as the original time coordinate; for any dates that are not present in the source, the data will be filled with this value. Note that using this mode requires that the source data have an inferable frequency; for more information see :py:func:`xarray.infer_freq`. For certain frequency, source, and target calendar combinations, this could result in many missing values, see notes. use_cftime : bool, optional Whether to use cftime objects in the output, only used if `calendar` is one of {"proleptic_gregorian", "gregorian" or "standard"}. If True, the new time axis uses cftime objects. If None (default), it uses :py:class:`numpy.datetime64` values if the date range permits it, and :py:class:`cftime.datetime` objects if not. If False, it uses :py:class:`numpy.datetime64` or fails. Returns ------- Copy of source with the time coordinate converted to the target calendar. If `missing` was None (default), invalid dates in the new calendar are dropped, but missing dates are not inserted. If `missing` was given, the new data is reindexed to have a time axis with the same frequency as the source, but in the new calendar; any missing datapoints are filled with `missing`. Notes ----- Passing a value to `missing` is only usable if the source's time coordinate as an inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate if the target coordinate, generated from this frequency, has dates equivalent to the source. It is usually **not** appropriate to use this mode with: - Period-end frequencies: 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS' - Sub-monthly frequencies that do not divide a day evenly: 'W', 'nD' where `n != 1` or 'mH' where 24 % m != 0). If one of the source or target calendars is `"360_day"`, `align_on` must be specified and two options are offered. "year" The dates are translated according to their relative position in the year, ignoring their original month and day information, meaning that the missing/surplus days are added/removed at regular intervals. From a `360_day` to a standard calendar, the output will be missing the following dates (day of year in parentheses): To a leap year: January 31st (31), March 31st (91), June 1st (153), July 31st (213), September 31st (275) and November 30th (335). To a non-leap year: February 6th (36), April 19th (109), July 2nd (183), September 12th (255), November 25th (329). From a standard calendar to a `"360_day"`, the following dates in the source array will be dropped: From a leap year: January 31st (31), April 1st (92), June 1st (153), August 1st (214), September 31st (275), December 1st (336) From a non-leap year: February 6th (37), April 20th (110), July 2nd (183), September 13th (256), November 25th (329) This option is best used on daily and subdaily data. "date" The month/day information is conserved and invalid dates are dropped from the output. This means that when converting from a `"360_day"` to a standard calendar, all 31sts (Jan, March, May, July, August, October and December) will be missing as there is no equivalent dates in the `"360_day"` calendar and the 29th (on non-leap years) and 30th of February will be dropped as there are no equivalent dates in a standard calendar. This option is best used with data on a frequency coarser than daily. "random" Similar to "year", each day of year of the source is mapped to another day of year of the target. However, instead of having always the same missing days according the source and target years, here 5 days are chosen randomly, one for each fifth of the year. However, February 29th is always missing when converting to a leap year, or its value is dropped when converting from a leap year. This is similar to the method used in the LOCA dataset (see Pierce, Cayan, and Thrasher (2014). doi:10.1175/JHM-D-14-0082.1). This option is best used on daily data. """ from xarray.core.dataarray import DataArray time = obj[dim] if not _contains_datetime_like_objects(time.variable): raise ValueError(f"Coordinate {dim} must contain datetime objects.") use_cftime = _should_cftime_be_used(time, calendar, use_cftime) source_calendar = time.dt.calendar # Do nothing if request calendar is the same as the source # AND source is np XOR use_cftime if source_calendar == calendar and is_np_datetime_like(time.dtype) ^ use_cftime: return obj if (time.dt.year == 0).any() and calendar in _CALENDARS_WITHOUT_YEAR_ZERO: raise ValueError( f"Source time coordinate contains dates with year 0, which is not supported by target calendar {calendar}." ) if (source_calendar == "360_day" or calendar == "360_day") and align_on is None: raise ValueError( "Argument `align_on` must be specified with either 'date' or " "'year' when converting to or from a '360_day' calendar." ) if source_calendar != "360_day" and calendar != "360_day": align_on = "date" out = obj.copy() if align_on in ["year", "random"]: # Special case for conversion involving 360_day calendar if align_on == "year": # Instead of translating dates directly, this tries to keep the position within a year similar. new_doy = _interpolate_day_of_year(time, target_calendar=calendar) elif align_on == "random": # The 5 days to remove are randomly chosen, one for each of the five 72-days periods of the year. new_doy = time.groupby(f"{dim}.year").map( _random_day_of_year, target_calendar=calendar, use_cftime=use_cftime ) # Convert the source datetimes, but override the day of year with our new day of years. out[dim] = DataArray( [ _convert_to_new_calendar_with_new_day_of_year( date, newdoy, calendar, use_cftime ) for date, newdoy in zip(time.variable._data.array, new_doy, strict=True) ], dims=(dim,), name=dim, ) # Remove duplicate timestamps, happens when reducing the number of days out = out.isel({dim: np.unique(out[dim], return_index=True)[1]}) elif align_on == "date": new_times = convert_times( time.data, get_date_type(calendar, use_cftime=use_cftime), raise_on_invalid=False, ) out[dim] = new_times # Remove NaN that where put on invalid dates in target calendar out = out.sel({dim: out[dim].notnull()}) if use_cftime: # Reassign times to ensure time index of output is a CFTimeIndex # (previously it was an Index due to the presence of NaN values). # Note this is not needed in the case that the output time index is # a DatetimeIndex, since DatetimeIndexes can handle NaN values. out[dim] = CFTimeIndex(out[dim].data) if missing is not None: time_target = date_range_like(time, calendar=calendar, use_cftime=use_cftime) out = out.reindex({dim: time_target}, fill_value=missing) # Copy attrs but remove `calendar` if still present. out[dim].attrs.update(time.attrs) out[dim].attrs.pop("calendar", None) return out def _is_leap_year(years, calendar): func = np.vectorize(cftime.is_leap_year) return func(years, calendar=calendar) def _days_in_year(years, calendar): """The number of days in the year according to given calendar.""" if calendar == "360_day": return full_like(years, 360) return _is_leap_year(years, calendar).astype(int) + 365 def _interpolate_day_of_year(times, target_calendar): """Returns the nearest day in the target calendar of the corresponding "decimal year" in the source calendar.""" source_calendar = times.dt.calendar return np.round( _days_in_year(times.dt.year, target_calendar) * times.dt.dayofyear / _days_in_year(times.dt.year, source_calendar) ).astype(int) def _random_day_of_year(time, target_calendar, use_cftime): """Return a day of year in the new calendar. Removes Feb 29th and five other days chosen randomly within five sections of 72 days. """ year = time.dt.year[0] source_calendar = time.dt.calendar new_doy = np.arange(360) + 1 rm_idx = np.random.default_rng().integers(0, 72, 5) + 72 * np.arange(5) if source_calendar == "360_day": for idx in rm_idx: new_doy[idx + 1 :] = new_doy[idx + 1 :] + 1 if _days_in_year(year, target_calendar) == 366: new_doy[new_doy >= 60] = new_doy[new_doy >= 60] + 1 elif target_calendar == "360_day": new_doy = np.insert(new_doy, rm_idx - np.arange(5), -1) if _days_in_year(year, source_calendar) == 366: new_doy = np.insert(new_doy, 60, -1) return new_doy[time.dt.dayofyear - 1] def _convert_to_new_calendar_with_new_day_of_year( date, day_of_year, calendar, use_cftime ): """Convert a datetime object to another calendar with a new day of year. Redefines the day of year (and thus ignores the month and day information from the source datetime). Nanosecond information is lost as cftime.datetime doesn't support it. """ new_date = cftime.num2date( day_of_year - 1, f"days since {date.year}-01-01", calendar=calendar if use_cftime else "standard", ) try: return get_date_type(calendar, use_cftime)( date.year, new_date.month, new_date.day, date.hour, date.minute, date.second, date.microsecond, ) except ValueError: return np.nan def _decimal_year_cftime(time, year, days_in_year, *, date_class): year_start = date_class(year, 1, 1) delta = np.timedelta64(time - year_start, "ns") days_in_year = np.timedelta64(days_in_year, "D") return year + delta / days_in_year def _decimal_year_numpy(time, year, days_in_year, *, dtype): time = np.asarray(time).astype(dtype) year_start = np.datetime64(int(year) - 1970, "Y").astype(dtype) delta = time - year_start days_in_year = np.timedelta64(days_in_year, "D") return year + delta / days_in_year def _decimal_year(times): """Convert a datetime DataArray to decimal years according to its calendar. The decimal year of a timestamp is its year plus its sub-year component converted to the fraction of its year. Ex: '2000-03-01 12:00' is 2000.1653 in a standard calendar, 2000.16301 in a "noleap" or 2000.16806 in a "360_day". """ if times.dtype == "O": function = _decimal_year_cftime kwargs = {"date_class": get_date_type(times.dt.calendar, True)} else: function = _decimal_year_numpy kwargs = {"dtype": times.dtype} from xarray.computation.apply_ufunc import apply_ufunc return apply_ufunc( function, times, times.dt.year, times.dt.days_in_year, kwargs=kwargs, vectorize=True, dask="parallelized", output_dtypes=[np.float64], ) def interp_calendar(source, target, dim="time"): """Interpolates a DataArray or Dataset indexed by a time coordinate to another calendar based on decimal year measure. Each timestamp in `source` and `target` are first converted to their decimal year equivalent then `source` is interpolated on the target coordinate. The decimal year of a timestamp is its year plus its sub-year component converted to the fraction of its year. For example "2000-03-01 12:00" is 2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar. This method should only be used when the time (HH:MM:SS) information of time coordinate is not important. Parameters ---------- source: DataArray or Dataset The source data to interpolate; must have a time coordinate of a valid dtype (:py:class:`numpy.datetime64` or :py:class:`cftime.datetime` objects) target: DataArray, DatetimeIndex, or CFTimeIndex The target time coordinate of a valid dtype (np.datetime64 or cftime objects) dim : str The time coordinate name. Return ------ DataArray or Dataset The source interpolated on the decimal years of target, """ from xarray.core.dataarray import DataArray if isinstance(target, pd.DatetimeIndex | CFTimeIndex): target = DataArray(target, dims=(dim,), name=dim) if not _contains_datetime_like_objects( source[dim].variable ) or not _contains_datetime_like_objects(target.variable): raise ValueError( f"Both 'source.{dim}' and 'target' must contain datetime objects." ) target_calendar = target.dt.calendar if ( source[dim].time.dt.year == 0 ).any() and target_calendar in _CALENDARS_WITHOUT_YEAR_ZERO: raise ValueError( f"Source time coordinate contains dates with year 0, which is not supported by target calendar {target_calendar}." ) out = source.copy() out[dim] = _decimal_year(source[dim]) target_idx = _decimal_year(target) out = out.interp(**{dim: target_idx}) out[dim] = target return out ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coding/cftime_offsets.py����������������������������������������������������0000664�0000000�0000000�00000172653�15114646760�0021504�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Time offset classes for use with cftime.datetime objects""" # The offset classes and mechanisms for generating time ranges defined in # this module were copied/adapted from those defined in pandas. See in # particular the objects and methods defined in pandas.tseries.offsets # and pandas.core.indexes.datetimes. # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import re import warnings from collections.abc import Mapping from datetime import datetime, timedelta from functools import partial from typing import TYPE_CHECKING, ClassVar, Literal, TypeVar, get_args import numpy as np import pandas as pd from packaging.version import Version from xarray.coding.cftimeindex import CFTimeIndex from xarray.coding.times import ( _is_standard_calendar, _parse_iso8601, _should_cftime_be_used, convert_time_or_go_back, format_cftime_datetime, ) from xarray.compat.pdcompat import ( count_not_none, default_precision_timestamp, ) from xarray.core.common import _contains_datetime_like_objects, is_np_datetime_like from xarray.core.types import InclusiveOptions from xarray.core.utils import attempt_import, emit_user_level_warning if TYPE_CHECKING: from xarray.core.types import ( PDDatetimeUnitOptions, Self, TypeAlias, ) DayOption: TypeAlias = Literal["start", "end"] T_FreqStr = TypeVar("T_FreqStr", str, None) def get_date_type(calendar, use_cftime=True): """Return the cftime date type for a given calendar name.""" if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if _is_standard_calendar(calendar) and not use_cftime: return default_precision_timestamp calendars = { "noleap": cftime.DatetimeNoLeap, "360_day": cftime.Datetime360Day, "365_day": cftime.DatetimeNoLeap, "366_day": cftime.DatetimeAllLeap, "gregorian": cftime.DatetimeGregorian, "proleptic_gregorian": cftime.DatetimeProlepticGregorian, "julian": cftime.DatetimeJulian, "all_leap": cftime.DatetimeAllLeap, "standard": cftime.DatetimeGregorian, } return calendars[calendar] class BaseCFTimeOffset: _freq: ClassVar[str | None] = None _day_option: ClassVar[DayOption | None] = None n: int def __init__(self, n: int = 1) -> None: if not isinstance(n, int): raise TypeError( "The provided multiple 'n' must be an integer. " f"Instead a value of type {type(n)!r} was provided." ) self.n = n def rule_code(self) -> str | None: return self._freq def __eq__(self, other: object) -> bool: if not isinstance(other, BaseCFTimeOffset): return NotImplemented return self.n == other.n and self.rule_code() == other.rule_code() def __ne__(self, other: object) -> bool: return not self == other def __add__(self, other): return self.__apply__(other) def __sub__(self, other): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if isinstance(other, cftime.datetime): raise TypeError("Cannot subtract a cftime.datetime from a time offset.") elif type(other) is type(self): return type(self)(self.n - other.n) else: return NotImplemented def __mul__(self, other: int) -> Self: if not isinstance(other, int): return NotImplemented return type(self)(n=other * self.n) def __neg__(self) -> Self: return self * -1 def __rmul__(self, other): return self.__mul__(other) def __radd__(self, other): return self.__add__(other) def __rsub__(self, other): if isinstance(other, BaseCFTimeOffset) and type(self) is not type(other): raise TypeError("Cannot subtract cftime offsets of differing types") return -self + other def __apply__(self, other): return NotImplemented def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" test_date = (self + date) - self return date == test_date def rollforward(self, date): if self.onOffset(date): return date else: return date + type(self)() def rollback(self, date): if self.onOffset(date): return date else: return date - type(self)() def __str__(self): return f"<{type(self).__name__}: n={self.n}>" def __repr__(self): return str(self) def _get_offset_day(self, other): # subclass must implement `_day_option`; calling from the base class # will raise NotImplementedError. return _get_day_of_month(other, self._day_option) class Tick(BaseCFTimeOffset): # analogous https://github.com/pandas-dev/pandas/blob/ccb25ab1d24c4fb9691270706a59c8d319750870/pandas/_libs/tslibs/offsets.pyx#L806 def _next_higher_resolution(self) -> Tick: self_type = type(self) if self_type is Day: return Hour(self.n * 24) if self_type is Hour: return Minute(self.n * 60) if self_type is Minute: return Second(self.n * 60) if self_type is Second: return Millisecond(self.n * 1000) if self_type is Millisecond: return Microsecond(self.n * 1000) raise ValueError("Could not convert to integer offset at any resolution") def __mul__(self, other: int | float) -> Tick: if not isinstance(other, int | float): return NotImplemented if isinstance(other, float): n = other * self.n # If the new `n` is an integer, we can represent it using the # same BaseCFTimeOffset subclass as self, otherwise we need to move up # to a higher-resolution subclass if np.isclose(n % 1, 0): return type(self)(int(n)) new_self = self._next_higher_resolution() return new_self * other return type(self)(n=other * self.n) def as_timedelta(self) -> timedelta: """All Tick subclasses must implement an as_timedelta method.""" raise NotImplementedError def _get_day_of_month(other, day_option: DayOption) -> int: """Find the day in `other`'s month that satisfies a BaseCFTimeOffset's onOffset policy, as described by the `day_option` argument. Parameters ---------- other : cftime.datetime day_option : 'start', 'end' 'start': returns 1 'end': returns last day of the month Returns ------- day_of_month : int """ if day_option == "start": return 1 elif day_option == "end": return other.daysinmonth elif day_option is None: # Note: unlike `_shift_month`, _get_day_of_month does not # allow day_option = None raise NotImplementedError() raise ValueError(day_option) def _adjust_n_months(other_day, n, reference_day): """Adjust the number of times a monthly offset is applied based on the day of a given date, and the reference day provided. """ if n > 0 and other_day < reference_day: n = n - 1 elif n <= 0 and other_day > reference_day: n = n + 1 return n def _adjust_n_years(other, n, month, reference_day): """Adjust the number of times an annual offset is applied based on another date, and the reference day provided""" if n > 0: if other.month < month or (other.month == month and other.day < reference_day): n -= 1 elif other.month > month or (other.month == month and other.day > reference_day): n += 1 return n def _shift_month(date, months, day_option: DayOption = "start"): """Shift the date to a month start or end a given number of months away.""" _ = attempt_import("cftime") has_year_zero = date.has_year_zero year = date.year + (date.month + months) // 12 month = (date.month + months) % 12 if month == 0: month = 12 year -= 1 if not has_year_zero: if date.year < 0 <= year: year += 1 elif year <= 0 < date.year: year -= 1 # Silence warnings associated with generating dates with years < 1. with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="this date/calendar/year zero") if day_option == "start": day = 1 elif day_option == "end": reference = type(date)(year, month, 1, has_year_zero=has_year_zero) day = reference.daysinmonth else: raise ValueError(day_option) return date.replace(year=year, month=month, day=day) def roll_qtrday( other, n: int, month: int, day_option: DayOption, modby: int = 3 ) -> int: """Possibly increment or decrement the number of periods to shift based on rollforward/rollbackward conventions. Parameters ---------- other : cftime.datetime n : number of periods to increment, before adjusting for rolling month : int reference month giving the first month of the year day_option : 'start', 'end' The convention to use in finding the day in a given month against which to compare for rollforward/rollbackward decisions. modby : int 3 for quarters, 12 for years Returns ------- n : int number of periods to increment See Also -------- _get_day_of_month : Find the day in a month provided an offset. """ months_since = other.month % modby - month % modby if n > 0: if months_since < 0 or ( months_since == 0 and other.day < _get_day_of_month(other, day_option) ): # pretend to roll back if on same month but # before compare_day n -= 1 elif months_since > 0 or ( months_since == 0 and other.day > _get_day_of_month(other, day_option) ): # make sure to roll forward, so negate n += 1 return n def _validate_month(month: int | None, default_month: int) -> int: result_month = default_month if month is None else month if not isinstance(result_month, int): raise TypeError( "'self.month' must be an integer value between 1 " "and 12. Instead, it was set to a value of " f"{result_month!r}" ) elif not (1 <= result_month <= 12): raise ValueError( "'self.month' must be an integer value between 1 " "and 12. Instead, it was set to a value of " f"{result_month!r}" ) return result_month class MonthBegin(BaseCFTimeOffset): _freq = "MS" def __apply__(self, other): n = _adjust_n_months(other.day, self.n, 1) return _shift_month(other, n, "start") def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" return date.day == 1 class MonthEnd(BaseCFTimeOffset): _freq = "ME" def __apply__(self, other): n = _adjust_n_months(other.day, self.n, other.daysinmonth) return _shift_month(other, n, "end") def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" return date.day == date.daysinmonth _MONTH_ABBREVIATIONS = { 1: "JAN", 2: "FEB", 3: "MAR", 4: "APR", 5: "MAY", 6: "JUN", 7: "JUL", 8: "AUG", 9: "SEP", 10: "OCT", 11: "NOV", 12: "DEC", } class QuarterOffset(BaseCFTimeOffset): """Quarter representation copied off of pandas/tseries/offsets.py""" _default_month: ClassVar[int] month: int def __init__(self, n: int = 1, month: int | None = None) -> None: BaseCFTimeOffset.__init__(self, n) self.month = _validate_month(month, self._default_month) def __apply__(self, other): # months_since: find the calendar quarter containing other.month, # e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep]. # Then find the month in that quarter containing an onOffset date for # self. `months_since` is the number of months to shift other.month # to get to this on-offset month. months_since = other.month % 3 - self.month % 3 qtrs = roll_qtrday( other, self.n, self.month, day_option=self._day_option, modby=3 ) months = qtrs * 3 - months_since return _shift_month(other, months, self._day_option) def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" mod_month = (date.month - self.month) % 3 return mod_month == 0 and date.day == self._get_offset_day(date) def __sub__(self, other: Self) -> Self: if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if isinstance(other, cftime.datetime): raise TypeError("Cannot subtract cftime.datetime from offset.") if type(other) is type(self) and other.month == self.month: return type(self)(self.n - other.n, month=self.month) return NotImplemented def __mul__(self, other): if isinstance(other, float): return NotImplemented return type(self)(n=other * self.n, month=self.month) def rule_code(self) -> str: return f"{self._freq}-{_MONTH_ABBREVIATIONS[self.month]}" def __str__(self): return f"<{type(self).__name__}: n={self.n}, month={self.month}>" class QuarterBegin(QuarterOffset): # When converting a string to an offset, pandas converts # 'QS' to a QuarterBegin offset starting in the month of # January. When creating a QuarterBegin offset directly # from the constructor, however, the default month is March. # We follow that behavior here. _default_month = 3 _freq = "QS" _day_option = "start" def rollforward(self, date): """Roll date forward to nearest start of quarter""" if self.onOffset(date): return date else: return date + QuarterBegin(month=self.month) def rollback(self, date): """Roll date backward to nearest start of quarter""" if self.onOffset(date): return date else: return date - QuarterBegin(month=self.month) class QuarterEnd(QuarterOffset): # When converting a string to an offset, pandas converts # 'Q' to a QuarterEnd offset starting in the month of # December. When creating a QuarterEnd offset directly # from the constructor, however, the default month is March. # We follow that behavior here. _default_month = 3 _freq = "QE" _day_option = "end" def rollforward(self, date): """Roll date forward to nearest end of quarter""" if self.onOffset(date): return date else: return date + QuarterEnd(month=self.month) def rollback(self, date): """Roll date backward to nearest end of quarter""" if self.onOffset(date): return date else: return date - QuarterEnd(month=self.month) class YearOffset(BaseCFTimeOffset): _default_month: ClassVar[int] month: int def __init__(self, n: int = 1, month: int | None = None) -> None: BaseCFTimeOffset.__init__(self, n) self.month = _validate_month(month, self._default_month) def __apply__(self, other): reference_day = _get_day_of_month(other, self._day_option) years = _adjust_n_years(other, self.n, self.month, reference_day) months = years * 12 + (self.month - other.month) return _shift_month(other, months, self._day_option) def __sub__(self, other): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if isinstance(other, cftime.datetime): raise TypeError("Cannot subtract cftime.datetime from offset.") elif type(other) is type(self) and other.month == self.month: return type(self)(self.n - other.n, month=self.month) else: return NotImplemented def __mul__(self, other): if isinstance(other, float): return NotImplemented return type(self)(n=other * self.n, month=self.month) def rule_code(self) -> str: return f"{self._freq}-{_MONTH_ABBREVIATIONS[self.month]}" def __str__(self) -> str: return f"<{type(self).__name__}: n={self.n}, month={self.month}>" class YearBegin(YearOffset): _freq = "YS" _day_option = "start" _default_month = 1 def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" return date.day == 1 and date.month == self.month def rollforward(self, date): """Roll date forward to nearest start of year""" if self.onOffset(date): return date else: return date + YearBegin(month=self.month) def rollback(self, date): """Roll date backward to nearest start of year""" if self.onOffset(date): return date else: return date - YearBegin(month=self.month) class YearEnd(YearOffset): _freq = "YE" _day_option = "end" _default_month = 12 def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" return date.day == date.daysinmonth and date.month == self.month def rollforward(self, date): """Roll date forward to nearest end of year""" if self.onOffset(date): return date else: return date + YearEnd(month=self.month) def rollback(self, date): """Roll date backward to nearest end of year""" if self.onOffset(date): return date else: return date - YearEnd(month=self.month) class Day(BaseCFTimeOffset): """Day offset following definition in pandas/_libs/tslibs/offsets.pyx""" _freq = "D" def __apply__(self, other): if isinstance(other, Day): return Day(self.n + other.n) else: return other + timedelta(days=self.n) def onOffset(self, date) -> bool: return True class Hour(Tick): _freq = "h" def as_timedelta(self) -> timedelta: return timedelta(hours=self.n) def __apply__(self, other): return other + self.as_timedelta() class Minute(Tick): _freq = "min" def as_timedelta(self) -> timedelta: return timedelta(minutes=self.n) def __apply__(self, other): return other + self.as_timedelta() class Second(Tick): _freq = "s" def as_timedelta(self) -> timedelta: return timedelta(seconds=self.n) def __apply__(self, other): return other + self.as_timedelta() class Millisecond(Tick): _freq = "ms" def as_timedelta(self) -> timedelta: return timedelta(milliseconds=self.n) def __apply__(self, other): return other + self.as_timedelta() class Microsecond(Tick): _freq = "us" def as_timedelta(self) -> timedelta: return timedelta(microseconds=self.n) def __apply__(self, other): return other + self.as_timedelta() def _generate_anchored_offsets( base_freq: str, offset: type[YearOffset | QuarterOffset] ) -> dict[str, type[BaseCFTimeOffset]]: offsets: dict[str, type[BaseCFTimeOffset]] = {} for month, abbreviation in _MONTH_ABBREVIATIONS.items(): anchored_freq = f"{base_freq}-{abbreviation}" offsets[anchored_freq] = partial(offset, month=month) # type: ignore[assignment] return offsets _FREQUENCIES: Mapping[str, type[BaseCFTimeOffset]] = { "A": YearEnd, "AS": YearBegin, "Y": YearEnd, "YE": YearEnd, "YS": YearBegin, "Q": partial(QuarterEnd, month=12), # type: ignore[dict-item] "QE": partial(QuarterEnd, month=12), # type: ignore[dict-item] "QS": partial(QuarterBegin, month=1), # type: ignore[dict-item] "M": MonthEnd, "ME": MonthEnd, "MS": MonthBegin, "D": Day, "H": Hour, "h": Hour, "T": Minute, "min": Minute, "S": Second, "s": Second, "L": Millisecond, "ms": Millisecond, "U": Microsecond, "us": Microsecond, **_generate_anchored_offsets("AS", YearBegin), **_generate_anchored_offsets("A", YearEnd), **_generate_anchored_offsets("YS", YearBegin), **_generate_anchored_offsets("Y", YearEnd), **_generate_anchored_offsets("YE", YearEnd), **_generate_anchored_offsets("QS", QuarterBegin), **_generate_anchored_offsets("Q", QuarterEnd), **_generate_anchored_offsets("QE", QuarterEnd), } _FREQUENCY_CONDITION = "|".join(_FREQUENCIES.keys()) _PATTERN = rf"^((?P[+-]?\d+)|())(?P({_FREQUENCY_CONDITION}))$" # pandas defines these offsets as "Tick" objects, which for instance have # distinct behavior from daily or longer frequencies in resample. CFTIME_TICKS = (Hour, Minute, Second) def _generate_anchored_deprecated_frequencies( deprecated: str, recommended: str ) -> dict[str, str]: pairs = {} for abbreviation in _MONTH_ABBREVIATIONS.values(): anchored_deprecated = f"{deprecated}-{abbreviation}" anchored_recommended = f"{recommended}-{abbreviation}" pairs[anchored_deprecated] = anchored_recommended return pairs _DEPRECATED_FREQUENCIES: dict[str, str] = { "A": "YE", "Y": "YE", "AS": "YS", "Q": "QE", "M": "ME", "H": "h", "T": "min", "S": "s", "L": "ms", "U": "us", **_generate_anchored_deprecated_frequencies("A", "YE"), **_generate_anchored_deprecated_frequencies("Y", "YE"), **_generate_anchored_deprecated_frequencies("AS", "YS"), **_generate_anchored_deprecated_frequencies("Q", "QE"), } _DEPRECATION_MESSAGE = ( "{deprecated_freq!r} is deprecated and will be removed in a future " "version. Please use {recommended_freq!r} instead of " "{deprecated_freq!r}." ) def _emit_freq_deprecation_warning(deprecated_freq): recommended_freq = _DEPRECATED_FREQUENCIES[deprecated_freq] message = _DEPRECATION_MESSAGE.format( deprecated_freq=deprecated_freq, recommended_freq=recommended_freq ) emit_user_level_warning(message, FutureWarning) def to_offset( freq: BaseCFTimeOffset | str | timedelta | pd.Timedelta | pd.DateOffset, warn: bool = True, ) -> BaseCFTimeOffset: """Convert a frequency string to the appropriate subclass of BaseCFTimeOffset.""" if isinstance(freq, BaseCFTimeOffset): return freq if isinstance(freq, timedelta | pd.Timedelta): return delta_to_tick(freq) if isinstance(freq, pd.DateOffset): freq = _legacy_to_new_freq(freq.freqstr) match = re.match(_PATTERN, freq) if match is None: raise ValueError("Invalid frequency string provided") freq_data = match.groupdict() freq = freq_data["freq"] if warn and freq in _DEPRECATED_FREQUENCIES: _emit_freq_deprecation_warning(freq) multiples = freq_data["multiple"] multiples = 1 if multiples is None else int(multiples) return _FREQUENCIES[freq](n=multiples) def delta_to_tick(delta: timedelta | pd.Timedelta) -> Tick: """Adapted from pandas.tslib.delta_to_tick""" if isinstance(delta, pd.Timedelta) and delta.nanoseconds != 0: # pandas.Timedelta has nanoseconds, but these are not supported raise ValueError( "Unable to convert 'pandas.Timedelta' object with non-zero " "nanoseconds to 'CFTimeOffset' object" ) if delta.microseconds == 0: seconds = delta.days * 86400 + delta.seconds if seconds % 3600 == 0: return Hour(n=seconds // 3600) elif seconds % 60 == 0: return Minute(n=seconds // 60) else: return Second(n=seconds) # Regardless of the days and seconds this will always be a Millisecond # or Microsecond object elif delta.microseconds % 1_000 == 0: return Millisecond(n=delta.microseconds // 1_000) else: return Microsecond(n=delta.microseconds) def to_cftime_datetime(date_str_or_date, calendar=None): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if isinstance(date_str_or_date, str): if calendar is None: raise ValueError( "If converting a string to a cftime.datetime object, " "a calendar type must be provided" ) date, _ = _parse_iso8601(get_date_type(calendar), date_str_or_date) return date elif isinstance(date_str_or_date, cftime.datetime): return date_str_or_date elif isinstance(date_str_or_date, datetime | pd.Timestamp): return cftime.DatetimeProlepticGregorian(*date_str_or_date.timetuple()) else: raise TypeError( "date_str_or_date must be a string or a " "subclass of cftime.datetime. Instead got " f"{date_str_or_date!r}." ) def normalize_date(date): """Round datetime down to midnight.""" return date.replace(hour=0, minute=0, second=0, microsecond=0) def _get_normalized_cfdate(date, calendar, normalize): """convert to cf datetime and round down to midnight if normalize.""" if date is None: return date cf_date = to_cftime_datetime(date, calendar) return normalize_date(cf_date) if normalize else cf_date def _generate_linear_date_range(start, end, periods): """Generate an equally-spaced sequence of cftime.datetime objects between and including two dates (whose length equals the number of periods).""" if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") total_seconds = (end - start).total_seconds() values = np.linspace(0.0, total_seconds, periods, endpoint=True) units = f"seconds since {format_cftime_datetime(start)}" calendar = start.calendar return cftime.num2date( values, units=units, calendar=calendar, only_use_cftime_datetimes=True ) def _generate_linear_date_range_with_freq(start, end, periods, freq): """Generate a regular range of cftime.datetime objects with a given frequency. Adapted from pandas.tseries.offsets.generate_range (now at pandas.core.arrays.datetimes._generate_range). Parameters ---------- start : cftime.datetime, or None Start of range end : cftime.datetime, or None End of range periods : int, or None Number of elements in the sequence freq: str Step size between cftime.datetime objects. Not None. Returns ------- A generator object of cftime.datetime objects """ offset = to_offset(freq) if start: # From pandas GH 56147 / 56832 to account for negative direction and # range bounds if offset.n >= 0: start = offset.rollforward(start) else: start = offset.rollback(start) if periods is None and end < start and offset.n >= 0: end = None periods = 0 if end is None: end = start + (periods - 1) * offset if start is None: start = end - (periods - 1) * offset current = start if offset.n >= 0: while current <= end: yield current next_date = current + offset if next_date <= current: raise ValueError(f"Offset {offset} did not increment date") current = next_date else: while current >= end: yield current next_date = current + offset if next_date >= current: raise ValueError(f"Offset {offset} did not decrement date") current = next_date def cftime_range( start=None, end=None, periods=None, freq=None, normalize=False, name=None, inclusive: InclusiveOptions = "both", calendar="standard", ) -> CFTimeIndex: """Return a fixed frequency CFTimeIndex. .. deprecated:: 2025.02.0 Use :py:func:`~xarray.date_range` with ``use_cftime=True`` instead. Parameters ---------- start : str or cftime.datetime, optional Left bound for generating dates. end : str or cftime.datetime, optional Right bound for generating dates. periods : int, optional Number of periods to generate. freq : str or None, default: "D" Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D". normalize : bool, default: False Normalize start/end dates to midnight before generating date range. name : str, default: None Name of the resulting index inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. .. versionadded:: 2023.02.0 calendar : str, default: "standard" Calendar type for the datetimes. Returns ------- CFTimeIndex Notes ----- This function is an analog of ``pandas.date_range`` for use in generating sequences of ``cftime.datetime`` objects. It supports most of the features of ``pandas.date_range`` (e.g. specifying how the index is ``closed`` on either side, or whether or not to ``normalize`` the start and end bounds); however, there are some notable exceptions: - You cannot specify a ``tz`` (time zone) argument. - Start or end dates specified as partial-datetime strings must use the `ISO-8601 format `_. - It supports many, but not all, frequencies supported by ``pandas.date_range``. For example it does not currently support any of the business-related or semi-monthly frequencies. - Compound sub-monthly frequencies are not supported, e.g. '1H1min', as these can easily be written in terms of the finest common resolution, e.g. '61min'. Valid simple frequency strings for use with ``cftime``-calendars include any multiples of the following. +--------+--------------------------+ | Alias | Description | +========+==========================+ | YE | Year-end frequency | +--------+--------------------------+ | YS | Year-start frequency | +--------+--------------------------+ | QE | Quarter-end frequency | +--------+--------------------------+ | QS | Quarter-start frequency | +--------+--------------------------+ | ME | Month-end frequency | +--------+--------------------------+ | MS | Month-start frequency | +--------+--------------------------+ | D | Day frequency | +--------+--------------------------+ | h | Hour frequency | +--------+--------------------------+ | min | Minute frequency | +--------+--------------------------+ | s | Second frequency | +--------+--------------------------+ | ms | Millisecond frequency | +--------+--------------------------+ | us | Microsecond frequency | +--------+--------------------------+ Any multiples of the following anchored offsets are also supported. +------------+--------------------------------------------------------------------+ | Alias | Description | +============+====================================================================+ | Y(E,S)-JAN | Annual frequency, anchored at the (end, beginning) of January | +------------+--------------------------------------------------------------------+ | Y(E,S)-FEB | Annual frequency, anchored at the (end, beginning) of February | +------------+--------------------------------------------------------------------+ | Y(E,S)-MAR | Annual frequency, anchored at the (end, beginning) of March | +------------+--------------------------------------------------------------------+ | Y(E,S)-APR | Annual frequency, anchored at the (end, beginning) of April | +------------+--------------------------------------------------------------------+ | Y(E,S)-MAY | Annual frequency, anchored at the (end, beginning) of May | +------------+--------------------------------------------------------------------+ | Y(E,S)-JUN | Annual frequency, anchored at the (end, beginning) of June | +------------+--------------------------------------------------------------------+ | Y(E,S)-JUL | Annual frequency, anchored at the (end, beginning) of July | +------------+--------------------------------------------------------------------+ | Y(E,S)-AUG | Annual frequency, anchored at the (end, beginning) of August | +------------+--------------------------------------------------------------------+ | Y(E,S)-SEP | Annual frequency, anchored at the (end, beginning) of September | +------------+--------------------------------------------------------------------+ | Y(E,S)-OCT | Annual frequency, anchored at the (end, beginning) of October | +------------+--------------------------------------------------------------------+ | Y(E,S)-NOV | Annual frequency, anchored at the (end, beginning) of November | +------------+--------------------------------------------------------------------+ | Y(E,S)-DEC | Annual frequency, anchored at the (end, beginning) of December | +------------+--------------------------------------------------------------------+ | Q(E,S)-JAN | Quarter frequency, anchored at the (end, beginning) of January | +------------+--------------------------------------------------------------------+ | Q(E,S)-FEB | Quarter frequency, anchored at the (end, beginning) of February | +------------+--------------------------------------------------------------------+ | Q(E,S)-MAR | Quarter frequency, anchored at the (end, beginning) of March | +------------+--------------------------------------------------------------------+ | Q(E,S)-APR | Quarter frequency, anchored at the (end, beginning) of April | +------------+--------------------------------------------------------------------+ | Q(E,S)-MAY | Quarter frequency, anchored at the (end, beginning) of May | +------------+--------------------------------------------------------------------+ | Q(E,S)-JUN | Quarter frequency, anchored at the (end, beginning) of June | +------------+--------------------------------------------------------------------+ | Q(E,S)-JUL | Quarter frequency, anchored at the (end, beginning) of July | +------------+--------------------------------------------------------------------+ | Q(E,S)-AUG | Quarter frequency, anchored at the (end, beginning) of August | +------------+--------------------------------------------------------------------+ | Q(E,S)-SEP | Quarter frequency, anchored at the (end, beginning) of September | +------------+--------------------------------------------------------------------+ | Q(E,S)-OCT | Quarter frequency, anchored at the (end, beginning) of October | +------------+--------------------------------------------------------------------+ | Q(E,S)-NOV | Quarter frequency, anchored at the (end, beginning) of November | +------------+--------------------------------------------------------------------+ | Q(E,S)-DEC | Quarter frequency, anchored at the (end, beginning) of December | +------------+--------------------------------------------------------------------+ Finally, the following calendar aliases are supported. +--------------------------------+---------------------------------------+ | Alias | Date type | +================================+=======================================+ | standard, gregorian | ``cftime.DatetimeGregorian`` | +--------------------------------+---------------------------------------+ | proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` | +--------------------------------+---------------------------------------+ | noleap, 365_day | ``cftime.DatetimeNoLeap`` | +--------------------------------+---------------------------------------+ | all_leap, 366_day | ``cftime.DatetimeAllLeap`` | +--------------------------------+---------------------------------------+ | 360_day | ``cftime.Datetime360Day`` | +--------------------------------+---------------------------------------+ | julian | ``cftime.DatetimeJulian`` | +--------------------------------+---------------------------------------+ Examples -------- This function returns a ``CFTimeIndex``, populated with ``cftime.datetime`` objects associated with the specified calendar type, e.g. >>> xr.date_range( ... start="2000", periods=6, freq="2MS", calendar="noleap", use_cftime=True ... ) CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00, 2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00], dtype='object', length=6, calendar='noleap', freq='2MS') As in the standard pandas function, three of the ``start``, ``end``, ``periods``, or ``freq`` arguments must be specified at a given time, with the other set to ``None``. See the `pandas documentation `_ for more examples of the behavior of ``date_range`` with each of the parameters. See Also -------- pandas.date_range """ emit_user_level_warning( "cftime_range() is deprecated, please use xarray.date_range(..., use_cftime=True) instead.", DeprecationWarning, ) return date_range( start=start, end=end, periods=periods, freq=freq, normalize=normalize, name=name, inclusive=inclusive, calendar=calendar, use_cftime=True, ) def _cftime_range( start=None, end=None, periods=None, freq=None, normalize=False, name=None, inclusive: InclusiveOptions = "both", calendar="standard", ) -> CFTimeIndex: """Return a fixed frequency CFTimeIndex. Parameters ---------- start : str or cftime.datetime, optional Left bound for generating dates. end : str or cftime.datetime, optional Right bound for generating dates. periods : int, optional Number of periods to generate. freq : str or None, default: "D" Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D". normalize : bool, default: False Normalize start/end dates to midnight before generating date range. name : str, default: None Name of the resulting index inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. calendar : str, default: "standard" Calendar type for the datetimes. Returns ------- CFTimeIndex Notes ----- see cftime_range """ if freq is None and any(arg is None for arg in [periods, start, end]): freq = "D" # Adapted from pandas.core.indexes.datetimes._generate_range. if count_not_none(start, end, periods, freq) != 3: raise ValueError( "Exactly three of 'start', 'end', 'periods', or 'freq' must be " "specified to generate a date range. Note that 'freq' defaults to " "'D' in the event that any of 'start', 'end', or 'periods' are " "None." ) start = _get_normalized_cfdate(start, calendar, normalize) end = _get_normalized_cfdate(end, calendar, normalize) if freq is None: dates = _generate_linear_date_range(start, end, periods) else: dates = np.array( list(_generate_linear_date_range_with_freq(start, end, periods, freq)) ) if not TYPE_CHECKING and inclusive not in get_args(InclusiveOptions): raise ValueError( f"Argument `inclusive` must be either 'both', 'neither', " f"'left', or 'right'. Got {inclusive}." ) if len(dates) and inclusive != "both": if inclusive != "left" and dates[0] == start: dates = dates[1:] if inclusive != "right" and dates[-1] == end: dates = dates[:-1] return CFTimeIndex(dates, name=name) def date_range( start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, inclusive: InclusiveOptions = "both", unit: PDDatetimeUnitOptions = "ns", calendar="standard", use_cftime=None, ): """Return a fixed frequency datetime index. The type (:py:class:`xarray.CFTimeIndex` or :py:class:`pandas.DatetimeIndex`) of the returned index depends on the requested calendar and on `use_cftime`. Parameters ---------- start : str or datetime-like, optional Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. periods : int, optional Number of periods to generate. freq : str or None, default: "D" Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D". tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is timezone-naive. Only valid with pandas DatetimeIndex. normalize : bool, default: False Normalize start/end dates to midnight before generating date range. name : str, default: None Name of the resulting index inclusive : {"both", "neither", "left", "right"}, default: "both" Include boundaries; whether to set each bound as closed or open. .. versionadded:: 2023.02.0 unit : {"s", "ms", "us", "ns"}, default "ns" Specify the desired resolution of the result. .. versionadded:: 2024.12.0 calendar : str, default: "standard" Calendar type for the datetimes. use_cftime : boolean, optional If True, always return a CFTimeIndex. If False, return a pd.DatetimeIndex if possible or raise a ValueError. If None (default), return a pd.DatetimeIndex if possible, otherwise return a CFTimeIndex. Overridden to False if `tz` is not None. Returns ------- CFTimeIndex or pd.DatetimeIndex Notes ----- When ``use_cftime=True``, or a calendar other than "standard", "gregorian", or "proleptic_gregorian" is provided, this function is an analog of ``pandas.date_range`` for use in generating sequences of ``cftime.datetime`` objects. It supports most of the features of ``pandas.date_range`` (e.g. specifying how the index is ``closed`` on either side, or whether or not to ``normalize`` the start and end bounds); however, there are some notable exceptions: - You cannot specify a ``tz`` (time zone) argument. - Start or end dates specified as partial-datetime strings must use the `ISO-8601 format `_. - It supports many, but not all, frequencies supported by ``pandas.date_range``. For example it does not currently support any of the business-related or semi-monthly frequencies. - Compound sub-monthly frequencies are not supported, e.g. '1H1min', as these can easily be written in terms of the finest common resolution, e.g. '61min'. Valid simple frequency strings for use with ``cftime``-calendars include any multiples of the following. +--------+--------------------------+ | Alias | Description | +========+==========================+ | YE | Year-end frequency | +--------+--------------------------+ | YS | Year-start frequency | +--------+--------------------------+ | QE | Quarter-end frequency | +--------+--------------------------+ | QS | Quarter-start frequency | +--------+--------------------------+ | ME | Month-end frequency | +--------+--------------------------+ | MS | Month-start frequency | +--------+--------------------------+ | D | Day frequency | +--------+--------------------------+ | h | Hour frequency | +--------+--------------------------+ | min | Minute frequency | +--------+--------------------------+ | s | Second frequency | +--------+--------------------------+ | ms | Millisecond frequency | +--------+--------------------------+ | us | Microsecond frequency | +--------+--------------------------+ Any multiples of the following anchored offsets are also supported. +------------+--------------------------------------------------------------------+ | Alias | Description | +============+====================================================================+ | Y(E,S)-JAN | Annual frequency, anchored at the (end, beginning) of January | +------------+--------------------------------------------------------------------+ | Y(E,S)-FEB | Annual frequency, anchored at the (end, beginning) of February | +------------+--------------------------------------------------------------------+ | Y(E,S)-MAR | Annual frequency, anchored at the (end, beginning) of March | +------------+--------------------------------------------------------------------+ | Y(E,S)-APR | Annual frequency, anchored at the (end, beginning) of April | +------------+--------------------------------------------------------------------+ | Y(E,S)-MAY | Annual frequency, anchored at the (end, beginning) of May | +------------+--------------------------------------------------------------------+ | Y(E,S)-JUN | Annual frequency, anchored at the (end, beginning) of June | +------------+--------------------------------------------------------------------+ | Y(E,S)-JUL | Annual frequency, anchored at the (end, beginning) of July | +------------+--------------------------------------------------------------------+ | Y(E,S)-AUG | Annual frequency, anchored at the (end, beginning) of August | +------------+--------------------------------------------------------------------+ | Y(E,S)-SEP | Annual frequency, anchored at the (end, beginning) of September | +------------+--------------------------------------------------------------------+ | Y(E,S)-OCT | Annual frequency, anchored at the (end, beginning) of October | +------------+--------------------------------------------------------------------+ | Y(E,S)-NOV | Annual frequency, anchored at the (end, beginning) of November | +------------+--------------------------------------------------------------------+ | Y(E,S)-DEC | Annual frequency, anchored at the (end, beginning) of December | +------------+--------------------------------------------------------------------+ | Q(E,S)-JAN | Quarter frequency, anchored at the (end, beginning) of January | +------------+--------------------------------------------------------------------+ | Q(E,S)-FEB | Quarter frequency, anchored at the (end, beginning) of February | +------------+--------------------------------------------------------------------+ | Q(E,S)-MAR | Quarter frequency, anchored at the (end, beginning) of March | +------------+--------------------------------------------------------------------+ | Q(E,S)-APR | Quarter frequency, anchored at the (end, beginning) of April | +------------+--------------------------------------------------------------------+ | Q(E,S)-MAY | Quarter frequency, anchored at the (end, beginning) of May | +------------+--------------------------------------------------------------------+ | Q(E,S)-JUN | Quarter frequency, anchored at the (end, beginning) of June | +------------+--------------------------------------------------------------------+ | Q(E,S)-JUL | Quarter frequency, anchored at the (end, beginning) of July | +------------+--------------------------------------------------------------------+ | Q(E,S)-AUG | Quarter frequency, anchored at the (end, beginning) of August | +------------+--------------------------------------------------------------------+ | Q(E,S)-SEP | Quarter frequency, anchored at the (end, beginning) of September | +------------+--------------------------------------------------------------------+ | Q(E,S)-OCT | Quarter frequency, anchored at the (end, beginning) of October | +------------+--------------------------------------------------------------------+ | Q(E,S)-NOV | Quarter frequency, anchored at the (end, beginning) of November | +------------+--------------------------------------------------------------------+ | Q(E,S)-DEC | Quarter frequency, anchored at the (end, beginning) of December | +------------+--------------------------------------------------------------------+ Finally, the following calendar aliases are supported. +--------------------------------+---------------------------------------+----------------------------+ | Alias | Date type | Available use_cftime=False | +================================+=======================================+============================+ | standard, gregorian | ``cftime.DatetimeGregorian`` | True | +--------------------------------+---------------------------------------+----------------------------+ | proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` | True | +--------------------------------+---------------------------------------+----------------------------+ | noleap, 365_day | ``cftime.DatetimeNoLeap`` | False | +--------------------------------+---------------------------------------+----------------------------+ | all_leap, 366_day | ``cftime.DatetimeAllLeap`` | False | +--------------------------------+---------------------------------------+----------------------------+ | 360_day | ``cftime.Datetime360Day`` | False | +--------------------------------+---------------------------------------+----------------------------+ | julian | ``cftime.DatetimeJulian`` | False | +--------------------------------+---------------------------------------+----------------------------+ As in the standard pandas function, exactly three of ``start``, ``end``, ``periods``, or ``freq`` are required to generate a date range. Note that ``freq`` defaults to ``"D"`` in the event that any of ``start``, ``end``, or ``periods`` are set to ``None``. See :py:func:`pandas.date_range`. for more examples of the behavior of ``date_range`` with each of the parameters. Examples -------- This function returns a ``CFTimeIndex``, populated with ``cftime.datetime`` objects associated with the specified calendar type, e.g. >>> xr.date_range( ... start="2000", periods=6, freq="2MS", calendar="noleap", use_cftime=True ... ) CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00, 2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00], dtype='object', length=6, calendar='noleap', freq='2MS') See also -------- pandas.date_range cftime_range date_range_like """ if tz is not None: use_cftime = False if _is_standard_calendar(calendar) and use_cftime is not True: try: return pd.date_range( # type: ignore[call-overload,unused-ignore] start=start, end=end, periods=periods, # TODO remove translation once requiring pandas >= 2.2 freq=_new_to_legacy_freq(freq), tz=tz, normalize=normalize, name=name, inclusive=inclusive, unit=unit, ) except pd.errors.OutOfBoundsDatetime as err: if use_cftime is False: raise ValueError( "Date range is invalid for pandas DatetimeIndex, try using `use_cftime=True`." ) from err elif use_cftime is False: raise ValueError( f"Invalid calendar {calendar} for pandas DatetimeIndex, try using `use_cftime=True`." ) return _cftime_range( start=start, end=end, periods=periods, freq=freq, normalize=normalize, name=name, inclusive=inclusive, calendar=calendar, ) def _new_to_legacy_freq(freq): # xarray will now always return "ME" and "QE" for MonthEnd and QuarterEnd # frequencies, but older versions of pandas do not support these as # frequency strings. Until xarray's minimum pandas version is 2.2 or above, # we add logic to continue using the deprecated "M" and "Q" frequency # strings in these circumstances. # NOTE: other conversions ("h" -> "H", ..., "ns" -> "N") not required # TODO: remove once requiring pandas >= 2.2 if not freq or Version(pd.__version__) >= Version("2.2"): return freq try: freq_as_offset = to_offset(freq) except ValueError: # freq may be valid in pandas but not in xarray return freq if isinstance(freq_as_offset, MonthEnd) and "ME" in freq: freq = freq.replace("ME", "M") elif isinstance(freq_as_offset, QuarterEnd) and "QE" in freq: freq = freq.replace("QE", "Q") elif isinstance(freq_as_offset, YearBegin) and "YS" in freq: freq = freq.replace("YS", "AS") elif isinstance(freq_as_offset, YearEnd): # testing for "Y" is required as this was valid in xarray 2023.11 - 2024.01 if "Y-" in freq: # Check for and replace "Y-" instead of just "Y" to prevent # corrupting anchored offsets that contain "Y" in the month # abbreviation, e.g. "Y-MAY" -> "A-MAY". freq = freq.replace("Y-", "A-") elif "YE-" in freq: freq = freq.replace("YE-", "A-") elif "A-" not in freq and freq.endswith("Y"): freq = freq.replace("Y", "A") elif freq.endswith("YE"): freq = freq.replace("YE", "A") return freq def _legacy_to_new_freq(freq: T_FreqStr) -> T_FreqStr: # to avoid internal deprecation warnings when freq is determined using pandas < 2.2 # TODO: remove once requiring pandas >= 2.2 if not freq or Version(pd.__version__) >= Version("2.2"): return freq try: freq_as_offset = to_offset(freq, warn=False) except ValueError: # freq may be valid in pandas but not in xarray return freq if isinstance(freq_as_offset, MonthEnd) and "ME" not in freq: freq = freq.replace("M", "ME") elif isinstance(freq_as_offset, QuarterEnd) and "QE" not in freq: freq = freq.replace("Q", "QE") elif isinstance(freq_as_offset, YearBegin) and "YS" not in freq: freq = freq.replace("AS", "YS") elif isinstance(freq_as_offset, YearEnd): if "A-" in freq: # Check for and replace "A-" instead of just "A" to prevent # corrupting anchored offsets that contain "Y" in the month # abbreviation, e.g. "A-MAY" -> "YE-MAY". freq = freq.replace("A-", "YE-") elif "Y-" in freq: freq = freq.replace("Y-", "YE-") elif freq.endswith("A"): # the "A-MAY" case is already handled above freq = freq.replace("A", "YE") elif "YE" not in freq and freq.endswith("Y"): # the "Y-MAY" case is already handled above freq = freq.replace("Y", "YE") elif isinstance(freq_as_offset, Hour): freq = freq.replace("H", "h") elif isinstance(freq_as_offset, Minute): freq = freq.replace("T", "min") elif isinstance(freq_as_offset, Second): freq = freq.replace("S", "s") elif isinstance(freq_as_offset, Millisecond): freq = freq.replace("L", "ms") elif isinstance(freq_as_offset, Microsecond): freq = freq.replace("U", "us") return freq def date_range_like(source, calendar, use_cftime=None): """Generate a datetime array with the same frequency, start and end as another one, but in a different calendar. Parameters ---------- source : DataArray, CFTimeIndex, or pd.DatetimeIndex 1D datetime array calendar : str New calendar name. use_cftime : bool, optional If True, the output uses :py:class:`cftime.datetime` objects. If None (default), :py:class:`numpy.datetime64` values are used if possible. If False, :py:class:`numpy.datetime64` values are used or an error is raised. Returns ------- DataArray 1D datetime coordinate with the same start, end and frequency as the source, but in the new calendar. The start date is assumed to exist in the target calendar. If the end date doesn't exist, the code tries 1 and 2 calendar days before. There is a special case when the source time series is daily or coarser and the end of the input range is on the last day of the month. Then the output range will also end on the last day of the month in the new calendar. """ from xarray.coding.frequencies import infer_freq from xarray.core.dataarray import DataArray if not isinstance(source, pd.DatetimeIndex | CFTimeIndex) and ( (isinstance(source, DataArray) and (source.ndim != 1)) or not _contains_datetime_like_objects(source.variable) ): raise ValueError( "'source' must be a 1D array of datetime objects for inferring its range." ) freq = infer_freq(source) if freq is None: raise ValueError( "`date_range_like` was unable to generate a range as the source frequency was not inferable." ) # TODO remove once requiring pandas >= 2.2 freq = _legacy_to_new_freq(freq) use_cftime = _should_cftime_be_used(source, calendar, use_cftime) source_start = source.values.min() source_end = source.values.max() freq_as_offset = to_offset(freq) if freq_as_offset.n < 0: source_start, source_end = source_end, source_start if is_np_datetime_like(source.dtype): # We want to use datetime fields (datetime64 object don't have them) source_calendar = "standard" source_start = default_precision_timestamp(source_start) source_end = default_precision_timestamp(source_end) elif isinstance(source, CFTimeIndex): source_calendar = source.calendar else: # DataArray source_calendar = source.dt.calendar if calendar == source_calendar and is_np_datetime_like(source.dtype) ^ use_cftime: return source date_type = get_date_type(calendar, use_cftime) start = convert_time_or_go_back(source_start, date_type) end = convert_time_or_go_back(source_end, date_type) # For the cases where the source ends on the end of the month, we expect the same in the new calendar. if source_end.day == source_end.daysinmonth and isinstance( freq_as_offset, YearEnd | QuarterEnd | MonthEnd | Day ): end = end.replace(day=end.daysinmonth) return date_range( start=start.isoformat(), end=end.isoformat(), freq=freq, calendar=calendar, ) �������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coding/cftimeindex.py�������������������������������������������������������0000664�0000000�0000000�00000074051�15114646760�0020774�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""DatetimeIndex analog for cftime.datetime objects""" # The pandas.Index subclass defined here was copied and adapted for # use with cftime.datetime objects based on the source code defining # pandas.DatetimeIndex. # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import math from datetime import timedelta from typing import TYPE_CHECKING, Any import numpy as np import pandas as pd from packaging.version import Version from xarray.coding.times import ( _STANDARD_CALENDARS, _parse_iso8601, cftime_to_nptime, infer_calendar_name, ) from xarray.core.common import _contains_cftime_datetimes from xarray.core.options import OPTIONS from xarray.core.types import PDDatetimeUnitOptions from xarray.core.utils import attempt_import, emit_user_level_warning, is_scalar if TYPE_CHECKING: from xarray.coding.cftime_offsets import BaseCFTimeOffset from xarray.core.types import Self # constants for cftimeindex.repr CFTIME_REPR_LENGTH = 19 ITEMS_IN_REPR_MAX_ELSE_ELLIPSIS = 100 REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END = 10 OUT_OF_BOUNDS_TIMEDELTA_ERRORS: tuple[type[Exception], ...] try: OUT_OF_BOUNDS_TIMEDELTA_ERRORS = (pd.errors.OutOfBoundsTimedelta, OverflowError) except AttributeError: OUT_OF_BOUNDS_TIMEDELTA_ERRORS = (OverflowError,) def _parsed_string_to_bounds(date_type, resolution, parsed): """Generalization of pandas.tseries.index.DatetimeIndex._parsed_string_to_bounds for use with non-standard calendars and cftime.datetime objects. """ if resolution == "year": return ( date_type(parsed.year, 1, 1), date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1), ) elif resolution == "month": if parsed.month == 12: end = date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1) else: end = date_type(parsed.year, parsed.month + 1, 1) - timedelta( microseconds=1 ) return date_type(parsed.year, parsed.month, 1), end elif resolution == "day": start = date_type(parsed.year, parsed.month, parsed.day) return start, start + timedelta(days=1, microseconds=-1) elif resolution == "hour": start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour) return start, start + timedelta(hours=1, microseconds=-1) elif resolution == "minute": start = date_type( parsed.year, parsed.month, parsed.day, parsed.hour, parsed.minute ) return start, start + timedelta(minutes=1, microseconds=-1) elif resolution == "second": start = date_type( parsed.year, parsed.month, parsed.day, parsed.hour, parsed.minute, parsed.second, ) return start, start + timedelta(seconds=1, microseconds=-1) else: raise KeyError def get_date_field(datetimes, field): """Adapted from pandas.tslib.get_date_field""" return np.array([getattr(date, field) for date in datetimes], dtype=np.int64) def _field_accessor(name, docstring=None, min_cftime_version="0.0"): """Adapted from pandas.tseries.index._field_accessor""" def f(self, min_cftime_version=min_cftime_version): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if Version(cftime.__version__) >= Version(min_cftime_version): return get_date_field(self._data, name) else: raise ImportError( f"The {name:!r} accessor requires a minimum " f"version of cftime of {min_cftime_version}. Found an " f"installed version of {cftime.__version__}." ) f.__name__ = name f.__doc__ = docstring return property(f) def get_date_type(self): if self._data.size: return type(self._data[0]) else: return None def assert_all_valid_date_type(data): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if len(data) > 0: sample = data[0] date_type = type(sample) if not isinstance(sample, cftime.datetime): raise TypeError( "CFTimeIndex requires cftime.datetime " f"objects. Got object of {date_type}." ) if not all(isinstance(value, date_type) for value in data): raise TypeError( "CFTimeIndex requires using datetime " f"objects of all the same type. Got\n{data}." ) def format_row(times, indent=0, separator=", ", row_end=",\n"): """Format a single row from format_times.""" return indent * " " + separator.join(map(str, times)) + row_end def format_times( index, max_width, offset, separator=", ", first_row_offset=0, intermediate_row_end=",\n", last_row_end="", ): """Format values of cftimeindex as pd.Index.""" n_per_row = max(max_width // (CFTIME_REPR_LENGTH + len(separator)), 1) n_rows = math.ceil(len(index) / n_per_row) representation = "" for row in range(n_rows): indent = first_row_offset if row == 0 else offset row_end = last_row_end if row == n_rows - 1 else intermediate_row_end times_for_row = index[row * n_per_row : (row + 1) * n_per_row] representation += format_row( times_for_row, indent=indent, separator=separator, row_end=row_end ) return representation def format_attrs(index, separator=", "): """Format attributes of CFTimeIndex for __repr__.""" attrs = { "dtype": f"'{index.dtype}'", "length": f"{len(index)}", "calendar": f"{index.calendar!r}", "freq": f"{index.freq!r}", } attrs_str = [f"{k}={v}" for k, v in attrs.items()] attrs_str = f"{separator}".join(attrs_str) return attrs_str class CFTimeIndex(pd.Index): """Custom Index for working with CF calendars and dates All elements of a CFTimeIndex must be cftime.datetime objects. Parameters ---------- data : array or CFTimeIndex Sequence of cftime.datetime objects to use in index name : str, default: None Name of the resulting index See Also -------- date_range """ _data: np.ndarray year = _field_accessor("year", "The year of the datetime") month = _field_accessor("month", "The month of the datetime") day = _field_accessor("day", "The days of the datetime") hour = _field_accessor("hour", "The hours of the datetime") minute = _field_accessor("minute", "The minutes of the datetime") second = _field_accessor("second", "The seconds of the datetime") microsecond = _field_accessor("microsecond", "The microseconds of the datetime") dayofyear = _field_accessor( "dayofyr", "The ordinal day of year of the datetime", "1.0.2.1" ) dayofweek = _field_accessor("dayofwk", "The day of week of the datetime", "1.0.2.1") days_in_month = _field_accessor( "daysinmonth", "The number of days in the month of the datetime", "1.1.0.0" ) date_type = property(get_date_type) def __new__(cls, data, name=None, **kwargs): assert_all_valid_date_type(data) if name is None and hasattr(data, "name"): name = data.name result = object.__new__(cls) result._data = np.array(data, dtype="O") result.name = name result._cache = {} return result def __repr__(self): """ Return a string representation for this object. """ klass_name = type(self).__name__ display_width = OPTIONS["display_width"] offset = len(klass_name) + 2 if len(self) <= ITEMS_IN_REPR_MAX_ELSE_ELLIPSIS: datastr = format_times( self.values, display_width, offset=offset, first_row_offset=0 ) else: front_str = format_times( self.values[:REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END], display_width, offset=offset, first_row_offset=0, last_row_end=",", ) end_str = format_times( self.values[-REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END:], display_width, offset=offset, first_row_offset=offset, ) datastr = "\n".join([front_str, f"{' ' * offset}...", end_str]) attrs_str = format_attrs(self) # oneliner only if smaller than display_width full_repr_str = f"{klass_name}([{datastr}], {attrs_str})" if len(full_repr_str) > display_width: # if attrs_str too long, one per line if len(attrs_str) >= display_width - offset: attrs_str = attrs_str.replace(",", f",\n{' ' * (offset - 2)}") full_repr_str = ( f"{klass_name}([{datastr}],\n{' ' * (offset - 1)}{attrs_str})" ) return full_repr_str def _partial_date_slice(self, resolution, parsed): """Adapted from pandas.tseries.index.DatetimeIndex._partial_date_slice Note that when using a CFTimeIndex, if a partial-date selection returns a single element, it will never be converted to a scalar coordinate; this is in slight contrast to the behavior when using a DatetimeIndex, which sometimes will return a DataArray with a scalar coordinate depending on the resolution of the datetimes used in defining the index. For example: >>> from cftime import DatetimeNoLeap >>> da = xr.DataArray( ... [1, 2], ... coords=[[DatetimeNoLeap(2001, 1, 1), DatetimeNoLeap(2001, 2, 1)]], ... dims=["time"], ... ) >>> da.sel(time="2001-01-01") Size: 8B array([1]) Coordinates: * time (time) object 8B 2001-01-01 00:00:00 >>> da = xr.DataArray( ... [1, 2], ... coords=[[pd.Timestamp(2001, 1, 1), pd.Timestamp(2001, 2, 1)]], ... dims=["time"], ... ) >>> da.sel(time="2001-01-01") Size: 8B array(1) Coordinates: time datetime64[ns] 8B 2001-01-01 >>> da = xr.DataArray( ... [1, 2], ... coords=[[pd.Timestamp(2001, 1, 1, 1), pd.Timestamp(2001, 2, 1)]], ... dims=["time"], ... ) >>> da.sel(time="2001-01-01") Size: 8B array([1]) Coordinates: * time (time) datetime64[ns] 8B 2001-01-01T01:00:00 """ start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed) times = self._data if self.is_monotonic_increasing: if len(times) and ( (start < times[0] and end < times[0]) or (start > times[-1] and end > times[-1]) ): # we are out of range raise KeyError # a monotonic (sorted) series can be sliced left = times.searchsorted(start, side="left") right = times.searchsorted(end, side="right") return slice(left, right) lhs_mask = times >= start rhs_mask = times <= end return np.flatnonzero(lhs_mask & rhs_mask) def _get_string_slice(self, key): """Adapted from pandas.tseries.index.DatetimeIndex._get_string_slice""" parsed, resolution = _parse_iso8601(self.date_type, key) try: loc = self._partial_date_slice(resolution, parsed) except KeyError as err: raise KeyError(key) from err return loc def _get_nearest_indexer(self, target, limit, tolerance): """Adapted from pandas.Index._get_nearest_indexer""" left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = abs(self.values[left_indexer] - target.values) right_distances = abs(self.values[right_indexer] - target.values) if self.is_monotonic_increasing: condition = (left_distances < right_distances) | (right_indexer == -1) else: condition = (left_distances <= right_distances) | (right_indexer == -1) indexer = np.where(condition, left_indexer, right_indexer) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance(self, target, indexer, tolerance): """Adapted from pandas.Index._filter_indexer_tolerance""" if isinstance(target, pd.Index): distance = abs(self.values[indexer] - target.values) else: distance = abs(self.values[indexer] - target) indexer = np.where(distance <= tolerance, indexer, -1) return indexer def get_loc(self, key): """Adapted from pandas.tseries.index.DatetimeIndex.get_loc""" if isinstance(key, str): return self._get_string_slice(key) else: return super().get_loc(key) def _maybe_cast_slice_bound(self, label, side): """Adapted from pandas.tseries.index.DatetimeIndex._maybe_cast_slice_bound """ if not isinstance(label, str): return label parsed, resolution = _parse_iso8601(self.date_type, label) start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed) if self.is_monotonic_decreasing and len(self) > 1: return end if side == "left" else start return start if side == "left" else end # TODO: Add ability to use integer range outside of iloc? # e.g. series[1:5]. def get_value(self, series, key): """Adapted from pandas.tseries.index.DatetimeIndex.get_value""" if np.asarray(key).dtype == np.dtype(bool): return series.iloc[key] elif isinstance(key, slice): return series.iloc[self.slice_indexer(key.start, key.stop, key.step)] else: return series.iloc[self.get_loc(key)] def __contains__(self, key: Any) -> bool: """Adapted from pandas.tseries.base.DatetimeIndexOpsMixin.__contains__""" try: result = self.get_loc(key) return ( is_scalar(result) or isinstance(result, slice) or (isinstance(result, np.ndarray) and result.size > 0) ) except (KeyError, TypeError, ValueError): return False def contains(self, key: Any) -> bool: """Needed for .loc based partial-string indexing""" return self.__contains__(key) def shift( # type: ignore[override,unused-ignore] self, periods: int | float, freq: str | timedelta | BaseCFTimeOffset | None = None, ) -> Self: """Shift the CFTimeIndex a multiple of the given frequency. See the documentation for :py:func:`~xarray.date_range` for a complete listing of valid frequency strings. Parameters ---------- periods : int, float if freq of days or below Periods to shift by freq : str, datetime.timedelta or BaseCFTimeOffset A frequency string or datetime.timedelta object to shift by Returns ------- CFTimeIndex See Also -------- pandas.DatetimeIndex.shift Examples -------- >>> index = xr.date_range("2000", periods=1, freq="ME", use_cftime=True) >>> index CFTimeIndex([2000-01-31 00:00:00], dtype='object', length=1, calendar='standard', freq=None) >>> index.shift(1, "ME") CFTimeIndex([2000-02-29 00:00:00], dtype='object', length=1, calendar='standard', freq=None) >>> index.shift(1.5, "24h") CFTimeIndex([2000-02-01 12:00:00], dtype='object', length=1, calendar='standard', freq=None) """ from xarray.coding.cftime_offsets import BaseCFTimeOffset if freq is None: # None type is required to be compatible with base pd.Index class raise TypeError( f"`freq` argument cannot be None for {type(self).__name__}.shift" ) if isinstance(freq, timedelta): return self + periods * freq if isinstance(freq, str | BaseCFTimeOffset): from xarray.coding.cftime_offsets import to_offset return self + periods * to_offset(freq) raise TypeError( f"'freq' must be of type str or datetime.timedelta, got {type(freq)}." ) def __add__(self, other) -> Self: if isinstance(other, pd.TimedeltaIndex): other = other.to_pytimedelta() return type(self)(np.array(self) + other) def __radd__(self, other) -> Self: if isinstance(other, pd.TimedeltaIndex): other = other.to_pytimedelta() return type(self)(other + np.array(self)) def __sub__(self, other): if _contains_datetime_timedeltas(other): return type(self)(np.array(self) - other) if isinstance(other, pd.TimedeltaIndex): return type(self)(np.array(self) - other.to_pytimedelta()) if _contains_cftime_datetimes(np.array(other)): try: return pd.TimedeltaIndex(np.array(self) - np.array(other)) except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err: raise ValueError( "The time difference exceeds the range of values " "that can be expressed at the nanosecond resolution." ) from err return NotImplemented def __rsub__(self, other): try: return pd.TimedeltaIndex(other - np.array(self)) except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err: raise ValueError( "The time difference exceeds the range of values " "that can be expressed at the nanosecond resolution." ) from err def to_datetimeindex( self, unsafe: bool = False, time_unit: PDDatetimeUnitOptions | None = None ) -> pd.DatetimeIndex: """If possible, convert this index to a pandas.DatetimeIndex. Parameters ---------- unsafe : bool Flag to turn off calendar mismatch warnings (default ``False``). time_unit : str Time resolution of resulting DatetimeIndex. Can be one of `"s"`, ``"ms"``, ``"us"``, or ``"ns"`` (default ``"ns"``). Returns ------- pandas.DatetimeIndex Raises ------ ValueError If the CFTimeIndex contains dates that are not possible in the standard calendar or outside the range representable by the specified ``time_unit``. Warns ----- RuntimeWarning If converting from a non-standard calendar, or a Gregorian calendar with dates prior to the reform (1582-10-15). Warnings -------- Note that for non-proleptic Gregorian calendars, this will change the calendar type of the index. In that case the result of this method should be used with caution. Examples -------- >>> times = xr.date_range( ... "2000", periods=2, calendar="gregorian", use_cftime=True ... ) >>> times CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00], dtype='object', length=2, calendar='standard', freq=None) >>> times.to_datetimeindex(time_unit="ns") DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None) """ if not self._data.size: return pd.DatetimeIndex([]) if time_unit is None: emit_user_level_warning( "In a future version of xarray to_datetimeindex will default " "to returning a 'us'-resolution DatetimeIndex instead of a " "'ns'-resolution DatetimeIndex. This warning can be silenced " "by explicitly passing the `time_unit` keyword argument.", FutureWarning, ) time_unit = "ns" nptimes = cftime_to_nptime(self, time_unit=time_unit) calendar = infer_calendar_name(self) if calendar not in _STANDARD_CALENDARS and not unsafe: emit_user_level_warning( "Converting a CFTimeIndex with dates from a non-standard " f"calendar, {calendar!r}, to a pandas.DatetimeIndex, which " "uses dates from the standard calendar. This may lead to " "subtle errors in operations that depend on the length of " "time between dates.", RuntimeWarning, ) if calendar == "standard" and not unsafe: reform_date = self.date_type(1582, 10, 15) if self.min() < reform_date: emit_user_level_warning( "Converting a CFTimeIndex with dates from a Gregorian " "calendar that fall before the reform date of 1582-10-15 " "to a pandas.DatetimeIndex. During this time period the " "Gregorian calendar and the proleptic Gregorian calendar " "of the DatetimeIndex do not exactly align. This warning " "can be silenced by setting unsafe=True.", RuntimeWarning, ) return pd.DatetimeIndex(nptimes) def strftime(self, date_format): """ Return an Index of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format doc `__ Parameters ---------- date_format : str Date format string (e.g. "%Y-%m-%d") Returns ------- pandas.Index Index of formatted strings Examples -------- >>> rng = xr.date_range( ... start="2000", ... periods=5, ... freq="2MS", ... calendar="noleap", ... use_cftime=True, ... ) >>> rng.strftime("%B %d, %Y, %r") Index(['January 01, 2000, 12:00:00 AM', 'March 01, 2000, 12:00:00 AM', 'May 01, 2000, 12:00:00 AM', 'July 01, 2000, 12:00:00 AM', 'September 01, 2000, 12:00:00 AM'], dtype='object') """ return pd.Index([date.strftime(date_format) for date in self._data]) @property def asi8(self): """Convert to integers with units of microseconds since 1970-01-01.""" from xarray.core.resample_cftime import exact_cftime_datetime_difference if not self._data.size: return np.array([], dtype=np.int64) epoch = self.date_type(1970, 1, 1) return np.array( [ _total_microseconds(exact_cftime_datetime_difference(epoch, date)) for date in self.values ], dtype=np.int64, ) @property def calendar(self): """The calendar used by the datetimes in the index.""" if not self._data.size: return None return infer_calendar_name(self) @property def freq(self): """The frequency used by the dates in the index.""" from xarray.coding.frequencies import infer_freq # min 3 elemtents required to determine freq if self._data.size < 3: return None return infer_freq(self) def _round_via_method(self, freq, method): """Round dates using a specified method.""" from xarray.coding.cftime_offsets import CFTIME_TICKS, Day, to_offset if not self._data.size: return CFTimeIndex(np.array(self)) offset = to_offset(freq) if isinstance(offset, Day): # Following pandas, "In the 'round' context, Day unambiguously # means 24h, not calendar-day" offset_as_timedelta = timedelta(days=offset.n) elif isinstance(offset, CFTIME_TICKS): offset_as_timedelta = offset.as_timedelta() else: raise ValueError(f"{offset} is a non-fixed frequency") unit = _total_microseconds(offset_as_timedelta) values = self.asi8 rounded = method(values, unit) return _cftimeindex_from_i8(rounded, self.date_type, self.name) def floor(self, freq): """Round dates down to fixed frequency. Parameters ---------- freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ for a list of possible values. Returns ------- CFTimeIndex """ return self._round_via_method(freq, _floor_int) def ceil(self, freq): """Round dates up to fixed frequency. Parameters ---------- freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ for a list of possible values. Returns ------- CFTimeIndex """ return self._round_via_method(freq, _ceil_int) def round(self, freq): """Round dates to a fixed frequency. Parameters ---------- freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ for a list of possible values. Returns ------- CFTimeIndex """ return self._round_via_method(freq, _round_to_nearest_half_even) @property def is_leap_year(self): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") func = np.vectorize(cftime.is_leap_year) return func(self.year, calendar=self.calendar) def _parse_array_of_cftime_strings(strings, date_type): """Create a numpy array from an array of strings. For use in generating dates from strings for use with interp. Assumes the array is either 0-dimensional or 1-dimensional. Parameters ---------- strings : array of strings Strings to convert to dates date_type : cftime.datetime type Calendar type to use for dates Returns ------- np.array """ return np.array([_parse_iso8601(date_type, s)[0] for s in strings.ravel()]).reshape( strings.shape ) def _contains_datetime_timedeltas(array): """Check if an input array contains datetime.timedelta objects.""" array = np.atleast_1d(array) return isinstance(array[0], timedelta) def _cftimeindex_from_i8(values, date_type, name): """Construct a CFTimeIndex from an array of integers. Parameters ---------- values : np.array Integers representing microseconds since 1970-01-01. date_type : cftime.datetime Type of date for the index. name : str Name of the index. Returns ------- CFTimeIndex """ epoch = date_type(1970, 1, 1) dates = np.array([epoch + timedelta(microseconds=int(value)) for value in values]) return CFTimeIndex(dates, name=name) def _total_microseconds(delta): """Compute the total number of microseconds of a datetime.timedelta. Parameters ---------- delta : datetime.timedelta Input timedelta. Returns ------- int """ return delta / timedelta(microseconds=1) def _floor_int(values, unit): """Copied from pandas.""" return values - np.remainder(values, unit) def _ceil_int(values, unit): """Copied from pandas.""" return values + np.remainder(-values, unit) def _round_to_nearest_half_even(values, unit): """Copied from pandas.""" if unit % 2: return _ceil_int(values - unit // 2, unit) quotient, remainder = np.divmod(values, unit) mask = np.logical_or( remainder > (unit // 2), np.logical_and(remainder == (unit // 2), quotient % 2) ) quotient[mask] += 1 return quotient * unit ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coding/common.py������������������������������������������������������������0000664�0000000�0000000�00000011733�15114646760�0017763�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations from collections.abc import Callable, Hashable, MutableMapping from typing import TYPE_CHECKING, Any, Union import numpy as np from xarray.core import indexing from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array if TYPE_CHECKING: T_VarTuple = tuple[tuple[Hashable, ...], Any, dict, dict] T_Name = Union[Hashable, None] class SerializationWarning(RuntimeWarning): """Warnings about encoding/decoding issues in serialization.""" class VariableCoder: """Base class for encoding and decoding transformations on variables. We use coders for transforming variables between xarray's data model and a format suitable for serialization. For example, coders apply CF conventions for how data should be represented in netCDF files. Subclasses should implement encode() and decode(), which should satisfy the identity ``coder.decode(coder.encode(variable)) == variable``. If any options are necessary, they should be implemented as arguments to the __init__ method. The optional name argument to encode() and decode() exists solely for the sake of better error messages, and should correspond to the name of variables in the underlying store. """ def encode(self, variable: Variable, name: T_Name = None) -> Variable: """Convert an encoded variable to a decoded variable""" raise NotImplementedError() def decode(self, variable: Variable, name: T_Name = None) -> Variable: """Convert a decoded variable to an encoded variable""" raise NotImplementedError() class _ElementwiseFunctionArray(indexing.ExplicitlyIndexedNDArrayMixin): """Lazily computed array holding values of elemwise-function. Do not construct this object directly: call lazy_elemwise_func instead. Values are computed upon indexing or coercion to a NumPy array. """ def __init__(self, array, func: Callable, dtype: np.typing.DTypeLike | None): assert not is_chunked_array(array) self.array = indexing.as_indexable(array) self.func = func self._dtype = dtype @property def dtype(self) -> np.dtype: return np.dtype(self._dtype) def transpose(self, order): # For elementwise functions, we can compose transpose and function application return type(self)(self.array.transpose(order), self.func, self.dtype) def _oindex_get(self, key): return type(self)(self.array.oindex[key], self.func, self.dtype) def _vindex_get(self, key): return type(self)(self.array.vindex[key], self.func, self.dtype) def __getitem__(self, key): return type(self)(self.array[key], self.func, self.dtype) def get_duck_array(self): return self.func(self.array.get_duck_array()) async def async_get_duck_array(self): return self.func(await self.array.async_get_duck_array()) def __repr__(self) -> str: return f"{type(self).__name__}({self.array!r}, func={self.func!r}, dtype={self.dtype!r})" def lazy_elemwise_func(array, func: Callable, dtype: np.typing.DTypeLike | None): """Lazily apply an element-wise function to an array. Parameters ---------- array : any valid value of Variable._data func : callable Function to apply to indexed slices of an array. For use with dask, this should be a pickle-able object. dtype : coercible to np.dtype Dtype for the result of this function. Returns ------- Either a dask.array.Array or _ElementwiseFunctionArray. """ if is_chunked_array(array): chunkmanager = get_chunked_array_type(array) return chunkmanager.map_blocks(func, array, dtype=dtype) # type: ignore[arg-type] else: return _ElementwiseFunctionArray(array, func, dtype) def safe_setitem(dest, key: Hashable, value, name: T_Name = None): if key in dest: var_str = f" on variable {name!r}" if name else "" raise ValueError( f"failed to prevent overwriting existing key {key} in attrs{var_str}. " "This is probably an encoding field used by xarray to describe " "how a variable is serialized. To proceed, remove this key from " "the variable's attributes manually." ) dest[key] = value def pop_to( source: MutableMapping, dest: MutableMapping, key: Hashable, name: T_Name = None ) -> Any: """ A convenience function which pops a key k from source to dest. None values are not passed on. If k already exists in dest an error is raised. """ value = source.pop(key, None) if value is not None: safe_setitem(dest, key, value, name=name) return value def unpack_for_encoding(var: Variable) -> T_VarTuple: return var.dims, var.data, var.attrs.copy(), var.encoding.copy() def unpack_for_decoding(var: Variable) -> T_VarTuple: return var.dims, var._data, var.attrs.copy(), var.encoding.copy() �������������������������������������xarray-2025.12.0/xarray/coding/frequencies.py�������������������������������������������������������0000664�0000000�0000000�00000022244�15114646760�0021003�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""FrequencyInferer analog for cftime.datetime objects""" # The infer_freq method and the _CFTimeFrequencyInferer # subclass defined here were copied and adapted for # use with cftime.datetime objects based on the source code in # pandas.tseries.Frequencies._FrequencyInferer # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import numpy as np import pandas as pd from xarray.coding.cftime_offsets import _MONTH_ABBREVIATIONS, _legacy_to_new_freq from xarray.coding.cftimeindex import CFTimeIndex from xarray.core.common import _contains_datetime_like_objects from xarray.core.dtypes import _is_numpy_subdtype _ONE_MICRO = 1 _ONE_MILLI = _ONE_MICRO * 1000 _ONE_SECOND = _ONE_MILLI * 1000 _ONE_MINUTE = 60 * _ONE_SECOND _ONE_HOUR = 60 * _ONE_MINUTE _ONE_DAY = 24 * _ONE_HOUR def infer_freq(index): """ Infer the most likely frequency given the input index. Parameters ---------- index : CFTimeIndex, DataArray, DatetimeIndex, TimedeltaIndex, Series If not passed a CFTimeIndex, this simply calls `pandas.infer_freq`. If passed a Series or a DataArray will use the values of the series (NOT THE INDEX). Returns ------- str or None None if no discernible frequency. Raises ------ TypeError If the index is not datetime-like. ValueError If there are fewer than three values or the index is not 1D. """ from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if isinstance(index, DataArray | pd.Series): if index.ndim != 1: raise ValueError("'index' must be 1D") elif not _contains_datetime_like_objects(Variable("dim", index)): raise ValueError("'index' must contain datetime-like objects") dtype = np.asarray(index).dtype if _is_numpy_subdtype(dtype, "datetime64"): index = pd.DatetimeIndex(index.values) elif _is_numpy_subdtype(dtype, "timedelta64"): index = pd.TimedeltaIndex(index.values) else: index = CFTimeIndex(index.values) if isinstance(index, CFTimeIndex): inferer = _CFTimeFrequencyInferer(index) return inferer.get_freq() return _legacy_to_new_freq(pd.infer_freq(index)) class _CFTimeFrequencyInferer: # (pd.tseries.frequencies._FrequencyInferer): def __init__(self, index): self.index = index self.values = index.asi8 if len(index) < 3: raise ValueError("Need at least 3 dates to infer frequency") self.is_monotonic = ( self.index.is_monotonic_decreasing or self.index.is_monotonic_increasing ) self._deltas = None self._year_deltas = None self._month_deltas = None def get_freq(self): """Find the appropriate frequency string to describe the inferred frequency of self.index Adapted from `pandas.tsseries.frequencies._FrequencyInferer.get_freq` for CFTimeIndexes. Returns ------- str or None """ if not self.is_monotonic or not self.index.is_unique: return None delta = self.deltas[0] # Smallest delta if _is_multiple(delta, _ONE_DAY): return self._infer_daily_rule() # There is no possible intraday frequency with a non-unique delta # Different from pandas: we don't need to manage DST and business offsets in cftime elif len(self.deltas) != 1: return None if _is_multiple(delta, _ONE_HOUR): return _maybe_add_count("h", delta / _ONE_HOUR) elif _is_multiple(delta, _ONE_MINUTE): return _maybe_add_count("min", delta / _ONE_MINUTE) elif _is_multiple(delta, _ONE_SECOND): return _maybe_add_count("s", delta / _ONE_SECOND) elif _is_multiple(delta, _ONE_MILLI): return _maybe_add_count("ms", delta / _ONE_MILLI) else: return _maybe_add_count("us", delta / _ONE_MICRO) def _infer_daily_rule(self): annual_rule = self._get_annual_rule() if annual_rule: nyears = self.year_deltas[0] month = _MONTH_ABBREVIATIONS[self.index[0].month] alias = f"{annual_rule}-{month}" return _maybe_add_count(alias, nyears) quartely_rule = self._get_quartely_rule() if quartely_rule: nquarters = self.month_deltas[0] / 3 mod_dict = {0: 12, 2: 11, 1: 10} month = _MONTH_ABBREVIATIONS[mod_dict[self.index[0].month % 3]] alias = f"{quartely_rule}-{month}" return _maybe_add_count(alias, nquarters) monthly_rule = self._get_monthly_rule() if monthly_rule: return _maybe_add_count(monthly_rule, self.month_deltas[0]) if len(self.deltas) == 1: # Daily as there is no "Weekly" offsets with CFTime days = self.deltas[0] / _ONE_DAY return _maybe_add_count("D", days) # CFTime has no business freq and no "week of month" (WOM) return None def _get_annual_rule(self): if len(self.year_deltas) > 1: return None if len(np.unique(self.index.month)) > 1: return None return {"cs": "YS", "ce": "YE"}.get(month_anchor_check(self.index)) def _get_quartely_rule(self): if len(self.month_deltas) > 1: return None if self.month_deltas[0] % 3 != 0: return None return {"cs": "QS", "ce": "QE"}.get(month_anchor_check(self.index)) def _get_monthly_rule(self): if len(self.month_deltas) > 1: return None return {"cs": "MS", "ce": "ME"}.get(month_anchor_check(self.index)) @property def deltas(self): """Sorted unique timedeltas as microseconds.""" if self._deltas is None: self._deltas = _unique_deltas(self.values) return self._deltas @property def year_deltas(self): """Sorted unique year deltas.""" if self._year_deltas is None: self._year_deltas = _unique_deltas(self.index.year) return self._year_deltas @property def month_deltas(self): """Sorted unique month deltas.""" if self._month_deltas is None: self._month_deltas = _unique_deltas(self.index.year * 12 + self.index.month) return self._month_deltas def _unique_deltas(arr): """Sorted unique deltas of numpy array""" return np.sort(np.unique(np.diff(arr))) def _is_multiple(us, mult: int): """Whether us is a multiple of mult""" return us % mult == 0 def _maybe_add_count(base: str, count: float): """If count is greater than 1, add it to the base offset string""" if count != 1: assert count == int(count) count = int(count) return f"{count}{base}" else: return base def month_anchor_check(dates): """Return the monthly offset string. Return "cs" if all dates are the first days of the month, "ce" if all dates are the last day of the month, None otherwise. Replicated pandas._libs.tslibs.resolution.month_position_check but without business offset handling. """ calendar_end = True calendar_start = True for date in dates: if calendar_start: calendar_start &= date.day == 1 if calendar_end: cal = date.day == date.daysinmonth calendar_end &= cal elif not calendar_start: break if calendar_end: return "ce" elif calendar_start: return "cs" else: return None ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coding/strings.py�����������������������������������������������������������0000664�0000000�0000000�00000024641�15114646760�0020166�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Coders for strings.""" from __future__ import annotations import re from functools import partial import numpy as np from xarray.coding.variables import ( VariableCoder, lazy_elemwise_func, pop_to, safe_setitem, unpack_for_decoding, unpack_for_encoding, ) from xarray.core import indexing from xarray.core.utils import emit_user_level_warning, module_available from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array HAS_NUMPY_2_0 = module_available("numpy", minversion="2.0.0.dev0") def create_vlen_dtype(element_type): if element_type not in (str, bytes): raise TypeError(f"unsupported type for vlen_dtype: {element_type!r}") # based on h5py.special_dtype return np.dtype("O", metadata={"element_type": element_type}) def check_vlen_dtype(dtype): if dtype.kind != "O" or dtype.metadata is None: return None else: # check xarray (element_type) as well as h5py (vlen) return dtype.metadata.get("element_type", dtype.metadata.get("vlen")) def is_unicode_dtype(dtype): return dtype.kind == "U" or check_vlen_dtype(dtype) is str def is_bytes_dtype(dtype): return dtype.kind == "S" or check_vlen_dtype(dtype) is bytes class EncodedStringCoder(VariableCoder): """Transforms between unicode strings and fixed-width UTF-8 bytes.""" def __init__(self, allows_unicode=True): self.allows_unicode = allows_unicode def encode(self, variable: Variable, name=None) -> Variable: dims, data, attrs, encoding = unpack_for_encoding(variable) contains_unicode = is_unicode_dtype(data.dtype) encode_as_char = encoding.get("dtype") == "S1" if encode_as_char: del encoding["dtype"] # no longer relevant if contains_unicode and (encode_as_char or not self.allows_unicode): if "_FillValue" in attrs: raise NotImplementedError( f"variable {name!r} has a _FillValue specified, but " "_FillValue is not yet supported on unicode strings: " "https://github.com/pydata/xarray/issues/1647" ) string_encoding = encoding.pop("_Encoding", "utf-8") safe_setitem(attrs, "_Encoding", string_encoding, name=name) # TODO: figure out how to handle this in a lazy way with dask data = encode_string_array(data, string_encoding) return Variable(dims, data, attrs, encoding) else: variable.encoding = encoding return variable def decode(self, variable: Variable, name=None) -> Variable: dims, data, attrs, encoding = unpack_for_decoding(variable) if "_Encoding" in attrs: string_encoding = pop_to(attrs, encoding, "_Encoding") func = partial(decode_bytes_array, encoding=string_encoding) data = lazy_elemwise_func(data, func, np.dtype(object)) return Variable(dims, data, attrs, encoding) def decode_bytes_array(bytes_array, encoding="utf-8"): # This is faster than using np.char.decode() or np.vectorize() bytes_array = np.asarray(bytes_array) decoded = [x.decode(encoding) for x in bytes_array.ravel()] return np.array(decoded, dtype=object).reshape(bytes_array.shape) def encode_string_array(string_array, encoding="utf-8"): string_array = np.asarray(string_array) encoded = [x.encode(encoding) for x in string_array.ravel()] return np.array(encoded, dtype=bytes).reshape(string_array.shape) def ensure_fixed_length_bytes(var: Variable) -> Variable: """Ensure that a variable with vlen bytes is converted to fixed width.""" if check_vlen_dtype(var.dtype) is bytes: dims, data, attrs, encoding = unpack_for_encoding(var) # TODO: figure out how to handle this with dask data = np.asarray(data, dtype=np.bytes_) return Variable(dims, data, attrs, encoding) else: return var def validate_char_dim_name(strlen, encoding, name) -> str: """Check character array dimension naming and size and return it.""" if (char_dim_name := encoding.pop("char_dim_name", None)) is not None: # 1 - extract all characters up to last number sequence # 2 - extract last number sequence match = re.search(r"^(.*?)(\d+)(?!.*\d)", char_dim_name) if match: new_dim_name = match.group(1) if int(match.group(2)) != strlen: emit_user_level_warning( f"String dimension naming mismatch on variable {name!r}. {char_dim_name!r} provided by encoding, but data has length of '{strlen}'. Using '{new_dim_name}{strlen}' instead of {char_dim_name!r} to prevent possible naming clash.\n" "To silence this warning either remove 'char_dim_name' from encoding or provide a fitting name." ) char_dim_name = f"{new_dim_name}{strlen}" elif ( original_shape := encoding.get("original_shape", [-1])[-1] ) != -1 and original_shape != strlen: emit_user_level_warning( f"String dimension length mismatch on variable {name!r}. '{original_shape}' provided by encoding, but data has length of '{strlen}'. Using '{char_dim_name}{strlen}' instead of {char_dim_name!r} to prevent possible naming clash.\n" f"To silence this warning remove 'original_shape' from encoding." ) char_dim_name = f"{char_dim_name}{strlen}" else: char_dim_name = f"string{strlen}" return char_dim_name class CharacterArrayCoder(VariableCoder): """Transforms between arrays containing bytes and character arrays.""" def encode(self, variable, name=None): variable = ensure_fixed_length_bytes(variable) dims, data, attrs, encoding = unpack_for_encoding(variable) if data.dtype.kind == "S" and encoding.get("dtype") is not str: data = bytes_to_char(data) char_dim_name = validate_char_dim_name(data.shape[-1], encoding, name) dims = dims + (char_dim_name,) return Variable(dims, data, attrs, encoding) def decode(self, variable, name=None): dims, data, attrs, encoding = unpack_for_decoding(variable) if data.dtype == "S1" and dims: encoding["char_dim_name"] = dims[-1] dims = dims[:-1] data = char_to_bytes(data) return Variable(dims, data, attrs, encoding) def bytes_to_char(arr): """Convert numpy/dask arrays from fixed width bytes to characters.""" if arr.dtype.kind != "S": raise ValueError("argument must have a fixed-width bytes dtype") if is_chunked_array(arr): chunkmanager = get_chunked_array_type(arr) return chunkmanager.map_blocks( _numpy_bytes_to_char, arr, dtype="S1", chunks=arr.chunks + ((arr.dtype.itemsize,)), new_axis=[arr.ndim], ) return _numpy_bytes_to_char(arr) def _numpy_bytes_to_char(arr): """Like netCDF4.stringtochar, but faster and more flexible.""" # adapt handling of copy-kwarg to numpy 2.0 # see https://github.com/numpy/numpy/issues/25916 # and https://github.com/numpy/numpy/pull/25922 copy = None if HAS_NUMPY_2_0 else False # ensure the array is contiguous arr = np.array(arr, copy=copy, order="C", dtype=np.bytes_) return arr.reshape(arr.shape + (1,)).view("S1") def char_to_bytes(arr): """Convert numpy/dask arrays from characters to fixed width bytes.""" if arr.dtype != "S1": raise ValueError("argument must have dtype='S1'") if not arr.ndim: # no dimension to concatenate along return arr size = arr.shape[-1] if not size: # can't make an S0 dtype return np.zeros(arr.shape[:-1], dtype=np.bytes_) if is_chunked_array(arr): chunkmanager = get_chunked_array_type(arr) if len(arr.chunks[-1]) > 1: raise ValueError( "cannot stacked dask character array with " f"multiple chunks in the last dimension: {arr}" ) dtype = np.dtype("S" + str(arr.shape[-1])) return chunkmanager.map_blocks( _numpy_char_to_bytes, arr, dtype=dtype, chunks=arr.chunks[:-1], drop_axis=[arr.ndim - 1], ) else: return StackedBytesArray(arr) def _numpy_char_to_bytes(arr): """Like netCDF4.chartostring, but faster and more flexible.""" # adapt handling of copy-kwarg to numpy 2.0 # see https://github.com/numpy/numpy/issues/25916 # and https://github.com/numpy/numpy/pull/25922 copy = None if HAS_NUMPY_2_0 else False # based on: https://stackoverflow.com/a/10984878/809705 arr = np.array(arr, copy=copy, order="C") dtype = "S" + str(arr.shape[-1]) return arr.view(dtype).reshape(arr.shape[:-1]) class StackedBytesArray(indexing.ExplicitlyIndexedNDArrayMixin): """Wrapper around array-like objects to create a new indexable object where values, when accessed, are automatically stacked along the last dimension. >>> indexer = indexing.BasicIndexer((slice(None),)) >>> np.array(StackedBytesArray(np.array(["a", "b", "c"], dtype="S1"))[indexer]) array(b'abc', dtype='|S3') """ def __init__(self, array): """ Parameters ---------- array : array-like Original array of values to wrap. """ if array.dtype != "S1": raise ValueError( "can only use StackedBytesArray if argument has dtype='S1'" ) self.array = indexing.as_indexable(array) @property def dtype(self): return np.dtype("S" + str(self.array.shape[-1])) @property def shape(self) -> tuple[int, ...]: return self.array.shape[:-1] def __repr__(self): return f"{type(self).__name__}({self.array!r})" def _vindex_get(self, key): return type(self)(self.array.vindex[key]) def _oindex_get(self, key): return type(self)(self.array.oindex[key]) def __getitem__(self, key): # require slicing the last dimension completely key = type(key)(indexing.expanded_indexer(key.tuple, self.array.ndim)) if key.tuple[-1] != slice(None): raise IndexError("too many indices") return type(self)(self.array[key]) def get_duck_array(self): return _numpy_char_to_bytes(self.array.get_duck_array()) �����������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coding/times.py�������������������������������������������������������������0000664�0000000�0000000�00000167425�15114646760�0017626�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import contextlib import re import warnings from collections.abc import Callable, Hashable from datetime import datetime, timedelta from functools import partial from typing import TYPE_CHECKING, Union, cast import numpy as np import pandas as pd from pandas.errors import OutOfBoundsDatetime, OutOfBoundsTimedelta from xarray.coding.common import ( SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to, safe_setitem, unpack_for_decoding, unpack_for_encoding, ) from xarray.compat.pdcompat import default_precision_timestamp, timestamp_as_unit from xarray.core import indexing from xarray.core.common import contains_cftime_datetimes, is_np_datetime_like from xarray.core.duck_array_ops import array_all, asarray, ravel, reshape from xarray.core.formatting import first_n_items, format_timestamp, last_item from xarray.core.utils import attempt_import, emit_user_level_warning from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import T_ChunkedArray, get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array, to_numpy from xarray.namedarray.utils import is_duck_dask_array try: import cftime except ImportError: cftime = None from xarray.core.types import ( CFCalendar, CFTimeDatetime, NPDatetimeUnitOptions, PDDatetimeUnitOptions, T_DuckArray, ) T_Name = Union[Hashable, None] # standard calendars recognized by cftime _STANDARD_CALENDARS = {"standard", "gregorian", "proleptic_gregorian"} _NS_PER_TIME_DELTA = { "ns": 1, "us": int(1e3), "ms": int(1e6), "s": int(1e9), "m": int(1e9) * 60, "h": int(1e9) * 60 * 60, "D": int(1e9) * 60 * 60 * 24, } _US_PER_TIME_DELTA = { "microseconds": 1, "milliseconds": 1_000, "seconds": 1_000_000, "minutes": 60 * 1_000_000, "hours": 60 * 60 * 1_000_000, "days": 24 * 60 * 60 * 1_000_000, } _NETCDF_TIME_UNITS_CFTIME = [ "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", ] _NETCDF_TIME_UNITS_NUMPY = _NETCDF_TIME_UNITS_CFTIME + ["nanoseconds"] TIME_UNITS = frozenset( [ "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "nanoseconds", ] ) _INVALID_LITERAL_TIMEDELTA64_ENCODING_KEYS = [ "add_offset", "scale_factor", ] _ORDERED_PANDAS_TIME_RESOLUTIONS: list[PDDatetimeUnitOptions] = ["s", "ms", "us", "ns"] def _is_standard_calendar(calendar: str) -> bool: return calendar.lower() in _STANDARD_CALENDARS def _is_numpy_compatible_time_range(times): if is_np_datetime_like(times.dtype): return True # times array contains cftime objects times = np.asarray(times) tmin = times.min() tmax = times.max() try: # before relaxing the nanosecond constrained # this raised OutOfBoundsDatetime for # times < 1678 and times > 2262 # this isn't the case anymore for other resolutions like "s" # now, we raise for dates before 1582-10-15 _check_date_is_after_shift(tmin, "standard") _check_date_is_after_shift(tmax, "standard") convert_time_or_go_back(tmin, pd.Timestamp) convert_time_or_go_back(tmax, pd.Timestamp) except pd.errors.OutOfBoundsDatetime: return False except ValueError as err: if err.args[0] == "year 0 is out of range": return False raise else: return True def _netcdf_to_numpy_timeunit(units: str) -> NPDatetimeUnitOptions: units = units.lower() if not units.endswith("s"): units = f"{units}s" return cast( NPDatetimeUnitOptions, { "nanoseconds": "ns", "microseconds": "us", "milliseconds": "ms", "seconds": "s", "minutes": "m", "hours": "h", "days": "D", }[units], ) def _numpy_to_netcdf_timeunit(units: NPDatetimeUnitOptions) -> str: return { "ns": "nanoseconds", "us": "microseconds", "ms": "milliseconds", "s": "seconds", "m": "minutes", "h": "hours", "D": "days", }[units] def _numpy_dtype_to_netcdf_timeunit(dtype: np.dtype) -> str: unit, _ = np.datetime_data(dtype) unit = cast(NPDatetimeUnitOptions, unit) return _numpy_to_netcdf_timeunit(unit) def _ensure_padded_year(ref_date: str) -> str: # Reference dates without a padded year (e.g. since 1-1-1 or since 2-3-4) # are ambiguous (is it YMD or DMY?). This can lead to some very odd # behaviour e.g. pandas (via dateutil) passes '1-1-1 00:00:0.0' as # '2001-01-01 00:00:00' (because it assumes a) DMY and b) that year 1 is # shorthand for 2001 (like 02 would be shorthand for year 2002)). # Here we ensure that there is always a four-digit year, with the # assumption being that year comes first if we get something ambiguous. matches_year = re.match(r".*\d{4}.*", ref_date) if matches_year: # all good, return return ref_date # No four-digit strings, assume the first digits are the year and pad # appropriately matches_start_digits = re.match(r"(\d+)(.*)", ref_date) if not matches_start_digits: raise ValueError(f"invalid reference date for time units: {ref_date}") ref_year, everything_else = (s for s in matches_start_digits.groups()) ref_date_padded = f"{int(ref_year):04d}{everything_else}" warning_msg = ( f"Ambiguous reference date string: {ref_date}. The first value is " "assumed to be the year hence will be padded with zeros to remove " f"the ambiguity (the padded reference date string is: {ref_date_padded}). " "To remove this message, remove the ambiguity by padding your reference " "date strings with zeros." ) warnings.warn(warning_msg, SerializationWarning, stacklevel=2) return ref_date_padded def _unpack_netcdf_time_units(units: str) -> tuple[str, str]: # CF datetime units follow the format: "UNIT since DATE" # this parses out the unit and date allowing for extraneous # whitespace. It also ensures that the year is padded with zeros # so it will be correctly understood by pandas (via dateutil). matches = re.match(r"(.+) since (.+)", units) if not matches: raise ValueError(f"invalid time units: {units}") delta_units, ref_date = (s.strip() for s in matches.groups()) ref_date = _ensure_padded_year(ref_date) return delta_units, ref_date def named(name: str, pattern: str) -> str: return "(?P<" + name + ">" + pattern + ")" def optional(x: str) -> str: return "(?:" + x + ")?" def trailing_optional(xs: list[str]) -> str: if not xs: return "" return xs[0] + optional(trailing_optional(xs[1:])) def build_pattern( date_sep: str = r"\-", datetime_sep: str = r"T", time_sep: str = r"\:", micro_sep: str = r".", ) -> str: pieces = [ (None, "year", r"[+-]?\d{4,5}"), (date_sep, "month", r"\d{2}"), (date_sep, "day", r"\d{2}"), (datetime_sep, "hour", r"\d{2}"), (time_sep, "minute", r"\d{2}"), (time_sep, "second", r"\d{2}"), (micro_sep, "microsecond", r"\d{1,6}"), ] pattern_list = [] for sep, name, sub_pattern in pieces: pattern_list.append((sep or "") + named(name, sub_pattern)) # TODO: allow timezone offsets? return "^" + trailing_optional(pattern_list) + "$" _BASIC_PATTERN = build_pattern(date_sep="", time_sep="") _EXTENDED_PATTERN = build_pattern() _CFTIME_PATTERN = build_pattern(datetime_sep=" ") _PATTERNS = [_BASIC_PATTERN, _EXTENDED_PATTERN, _CFTIME_PATTERN] def parse_iso8601_like(datetime_string: str) -> dict[str, str | None]: for pattern in _PATTERNS: match = re.match(pattern, datetime_string) if match: return match.groupdict() raise ValueError( f"no ISO-8601 or cftime-string-like match for string: {datetime_string}" ) def _parse_iso8601(date_type, timestr): default = date_type(1, 1, 1) result = parse_iso8601_like(timestr) replace = {} for attr in ["year", "month", "day", "hour", "minute", "second", "microsecond"]: value = result.get(attr, None) if value is not None: resolution = attr if attr == "microsecond": if len(value) <= 3: resolution = "millisecond" # convert match string into valid microsecond value value = 10 ** (6 - len(value)) * int(value) replace[attr] = int(value) return default.replace(**replace), resolution def _maybe_strip_tz_from_timestamp(date: pd.Timestamp) -> pd.Timestamp: # If the ref_date Timestamp is timezone-aware, convert to UTC and # make it timezone-naive (GH 2649). if date.tz is not None: return date.tz_convert("UTC").tz_convert(None) return date def _cast_timestamp_to_coarsest_resolution(timestamp: pd.Timestamp) -> pd.Timestamp: # Cast timestamp to the coarsest resolution that can be used without # changing its value. If provided a string, the pandas.Timestamp # constructor used to automatically infer this from the resolution of the # string, but this behavior was changed in pandas-dev/pandas#62801. This # function allows us to approximately restore the old behavior in a way # that is perhaps more consistent with how we infer the resolution of the # data values themselves. for unit in _ORDERED_PANDAS_TIME_RESOLUTIONS: coarsest_timestamp = timestamp.as_unit(unit) if coarsest_timestamp == timestamp: return coarsest_timestamp return timestamp def _unpack_time_unit_and_ref_date( units: str, ) -> tuple[NPDatetimeUnitOptions, pd.Timestamp]: # same us _unpack_netcdf_time_units but finalizes ref_date for # processing in encode_cf_datetime time_unit, _ref_date = _unpack_netcdf_time_units(units) time_unit = _netcdf_to_numpy_timeunit(time_unit) ref_date = pd.Timestamp(_ref_date) ref_date = _cast_timestamp_to_coarsest_resolution(ref_date) ref_date = _maybe_strip_tz_from_timestamp(ref_date) return time_unit, ref_date def _unpack_time_units_and_ref_date_cftime(units: str, calendar: str): # same as _unpack_netcdf_time_units but finalizes ref_date for # processing in encode_cf_datetime time_units, ref_date = _unpack_netcdf_time_units(units) ref_date = cftime.num2date( 0, units=f"microseconds since {ref_date}", calendar=calendar, only_use_cftime_datetimes=True, ) return time_units, ref_date def _decode_cf_datetime_dtype( data, units: str, calendar: str | None, use_cftime: bool | None, time_unit: PDDatetimeUnitOptions = "ns", ) -> np.dtype: # Verify that at least the first and last date can be decoded # successfully. Otherwise, tracebacks end up swallowed by # Dataset.__repr__ when users try to view their lazily decoded array. values = indexing.ImplicitToExplicitIndexingAdapter(indexing.as_indexable(data)) example_value = np.concatenate( [to_numpy(first_n_items(values, 1)), to_numpy(last_item(values))] ) try: result = decode_cf_datetime( example_value, units, calendar, use_cftime, time_unit ) except Exception as err: calendar_msg = ( "the default calendar" if calendar is None else f"calendar {calendar!r}" ) msg = ( f"unable to decode time units {units!r} with {calendar_msg!r}. Try " "opening your dataset with decode_times=False or installing cftime " "if it is not installed." ) raise ValueError(msg) from err else: dtype = getattr(result, "dtype", np.dtype("object")) return dtype def _decode_datetime_with_cftime( num_dates: np.ndarray, units: str, calendar: str ) -> np.ndarray: if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if num_dates.size > 0: return np.asarray( cftime.num2date(num_dates, units, calendar, only_use_cftime_datetimes=True) ) else: return np.array([], dtype=object) def _check_date_for_units_since_refdate( date, unit: NPDatetimeUnitOptions, ref_date: pd.Timestamp ) -> pd.Timestamp: # check for out-of-bounds floats and raise if date > np.iinfo("int64").max or date < np.iinfo("int64").min: raise OutOfBoundsTimedelta( f"Value {date} can't be represented as Datetime/Timedelta." ) delta = date * np.timedelta64(1, unit) if not np.isnan(delta): # this will raise on dtype overflow for integer dtypes if date.dtype.kind in "u" and not np.int64(delta) == date: raise OutOfBoundsTimedelta( "DType overflow in Datetime/Timedelta calculation." ) # this will raise on overflow if ref_date + delta # can't be represented in the current ref_date resolution return timestamp_as_unit(ref_date + delta, ref_date.unit) else: # if date is exactly NaT (np.iinfo("int64").min) return NaT # to make follow-up checks work return pd.Timestamp("NaT") def _check_timedelta_range(value, data_unit, time_unit): if value > np.iinfo("int64").max or value < np.iinfo("int64").min: OutOfBoundsTimedelta(f"Value {value} can't be represented as Timedelta.") # on windows multiplying nan leads to RuntimeWarning with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "invalid value encountered in multiply", RuntimeWarning ) delta = value * np.timedelta64(1, data_unit) if not np.isnan(delta): # this will raise on dtype overflow for integer dtypes if value.dtype.kind in "u" and not np.int64(delta) == value: raise OutOfBoundsTimedelta( "DType overflow in Datetime/Timedelta calculation." ) # this will raise on overflow if delta cannot be represented with the # resolutions supported by pandas. pd.to_timedelta(delta) def _align_reference_date_and_unit( ref_date: pd.Timestamp, unit: NPDatetimeUnitOptions ) -> pd.Timestamp: # align to the highest needed resolution of ref_date or unit if np.timedelta64(1, ref_date.unit) > np.timedelta64(1, unit): # this will raise accordingly # if data can't be represented in the higher resolution return timestamp_as_unit(ref_date, cast(PDDatetimeUnitOptions, unit)) return ref_date def _check_date_is_after_shift( date: pd.Timestamp | datetime | CFTimeDatetime, calendar: str ) -> None: # if we have gregorian/standard we need to raise # if we are outside the well-defined date range # proleptic_gregorian and standard/gregorian are only equivalent # if reference date and date range is >= 1582-10-15 if calendar != "proleptic_gregorian" and date < type(date)(1582, 10, 15): raise OutOfBoundsDatetime( f"Dates before 1582-10-15 cannot be decoded " f"with pandas using {calendar!r} calendar: {date}" ) def _check_higher_resolution( flat_num_dates: np.ndarray, time_unit: PDDatetimeUnitOptions, ) -> tuple[np.ndarray, PDDatetimeUnitOptions]: """Iterate until fitting resolution found.""" index = _ORDERED_PANDAS_TIME_RESOLUTIONS.index(time_unit) new_units = _ORDERED_PANDAS_TIME_RESOLUTIONS[index:] for new_time_unit in new_units: if not ((np.unique(flat_num_dates % 1) > 0).any() and new_time_unit != "ns"): break flat_num_dates *= 1000 return flat_num_dates, new_time_unit def _decode_datetime_with_pandas( flat_num_dates: np.ndarray, units: str, calendar: str, time_resolution: PDDatetimeUnitOptions = "ns", ) -> np.ndarray: if not _is_standard_calendar(calendar): raise OutOfBoundsDatetime( f"Cannot decode times from a non-standard calendar, {calendar!r}, using " "pandas." ) # Work around pandas.to_timedelta issue with dtypes smaller than int64 and # NumPy 2.0 by casting all int and uint data to int64 and uint64, # respectively. See https://github.com/pandas-dev/pandas/issues/56996 for # more details. if flat_num_dates.dtype.kind == "i": flat_num_dates = flat_num_dates.astype(np.int64) elif flat_num_dates.dtype.kind == "u": flat_num_dates = flat_num_dates.astype(np.uint64) try: time_unit, ref_date = _unpack_time_unit_and_ref_date(units) ref_date = _align_reference_date_and_unit(ref_date, time_unit) # here the highest wanted resolution is set ref_date = _align_reference_date_and_unit(ref_date, time_resolution) except ValueError as err: # ValueError is raised by pd.Timestamp for non-ISO timestamp # strings, in which case we fall back to using cftime raise OutOfBoundsDatetime from err _check_date_is_after_shift(ref_date, calendar) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "invalid value encountered", RuntimeWarning) if flat_num_dates.size > 0: # avoid size 0 datetimes GH1329 _check_date_for_units_since_refdate( flat_num_dates.min(), time_unit, ref_date ) _check_date_for_units_since_refdate( flat_num_dates.max(), time_unit, ref_date ) # To avoid integer overflow when converting to nanosecond units for integer # dtypes smaller than np.int64 cast all integer and unsigned integer dtype # arrays to np.int64 (GH 2002, GH 6589). Note this is safe even in the case # of np.uint64 values, because any np.uint64 value that would lead to # overflow when converting to np.int64 would not be representable with a # timedelta64 value, and therefore would raise an error in the lines above. if flat_num_dates.dtype.kind in "iu": flat_num_dates = flat_num_dates.astype(np.int64) elif flat_num_dates.dtype.kind in "f": flat_num_dates = flat_num_dates.astype(np.float64) timedeltas = _numbers_to_timedelta( flat_num_dates, time_unit, ref_date.unit, "datetimes" ) # add timedeltas to ref_date return ref_date + timedeltas def decode_cf_datetime( num_dates, units: str, calendar: str | None = None, use_cftime: bool | None = None, time_unit: PDDatetimeUnitOptions = "ns", ) -> np.ndarray: """Given an array of numeric dates in netCDF format, convert it into a numpy array of date time objects. For standard (Gregorian) calendars, this function uses vectorized operations, which makes it much faster than cftime.num2date. In such a case, the returned array will be of type np.datetime64. Note that time unit in `units` must not be smaller than microseconds and not larger than days. See Also -------- cftime.num2date """ num_dates = to_numpy(num_dates) flat_num_dates = ravel(num_dates) if calendar is None: calendar = "standard" if use_cftime is None: try: dates = _decode_datetime_with_pandas( flat_num_dates, units, calendar, time_unit ) except (KeyError, OutOfBoundsDatetime, OutOfBoundsTimedelta, OverflowError): dates = _decode_datetime_with_cftime( flat_num_dates.astype(float), units, calendar ) # retrieve cftype dates_min = dates[np.nanargmin(num_dates)] dates_max = dates[np.nanargmax(num_dates)] cftype = type(dates_min) # create first day of gregorian calendar in current cf calendar type border = cftype(1582, 10, 15) # "ns" borders # between ['1677-09-21T00:12:43.145224193', '2262-04-11T23:47:16.854775807'] lower = cftype(1677, 9, 21, 0, 12, 43, 145224) upper = cftype(2262, 4, 11, 23, 47, 16, 854775) if dates_min < border: if _is_standard_calendar(calendar): emit_user_level_warning( "Unable to decode time axis into full " "numpy.datetime64 objects, continuing using " "cftime.datetime objects instead, reason: dates prior " "reform date (1582-10-15). To silence this warning specify " "'use_cftime=True'.", SerializationWarning, ) elif time_unit == "ns" and (dates_min < lower or dates_max > upper): emit_user_level_warning( "Unable to decode time axis into full " "numpy.datetime64[ns] objects, continuing using " "cftime.datetime objects instead, reason: dates out " "of range. To silence this warning use a coarser resolution " "'time_unit' or specify 'use_cftime=True'.", SerializationWarning, ) elif _is_standard_calendar(calendar): dates = cftime_to_nptime(dates, time_unit=time_unit) elif use_cftime: dates = _decode_datetime_with_cftime(flat_num_dates, units, calendar) else: dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar, time_unit) return reshape(dates, num_dates.shape) def to_datetime_unboxed(value, **kwargs): result = pd.to_datetime(value, **kwargs).to_numpy() assert np.issubdtype(result.dtype, "datetime64") return result def _numbers_to_timedelta( flat_num: np.ndarray, time_unit: NPDatetimeUnitOptions, ref_unit: PDDatetimeUnitOptions, datatype: str, target_unit: PDDatetimeUnitOptions | None = None, ) -> np.ndarray: """Transform numbers to np.timedelta64.""" # keep NaT/nan mask if flat_num.dtype.kind == "f": nan = np.asarray(np.isnan(flat_num)) elif flat_num.dtype.kind == "i": nan = np.asarray(flat_num == np.iinfo(np.int64).min) elif flat_num.dtype.kind == "u": nan = np.broadcast_to(np.asarray(False), flat_num.shape) # in case we need to change the unit, we fix the numbers here # this should be safe, as errors would have been raised above ns_time_unit = _NS_PER_TIME_DELTA[time_unit] ns_ref_date_unit = _NS_PER_TIME_DELTA[ref_unit] if ns_time_unit > ns_ref_date_unit: flat_num = np.asarray(flat_num * np.int64(ns_time_unit / ns_ref_date_unit)) time_unit = ref_unit # estimate fitting resolution for floating point values # this iterates until all floats are fractionless or time_unit == "ns" if flat_num.dtype.kind == "f" and time_unit != "ns": flat_num, new_time_unit = _check_higher_resolution( flat_num, cast(PDDatetimeUnitOptions, time_unit) ) if time_unit != new_time_unit: if target_unit is None or np.timedelta64(1, target_unit) > np.timedelta64( 1, new_time_unit ): if datatype == "datetimes": kwarg = "decode_times" coder = "CFDatetimeCoder" else: kwarg = "decode_timedelta" coder = "CFTimedeltaCoder" formatted_kwarg = f"{kwarg}={coder}(time_unit={new_time_unit!r})" message = ( f"Can't decode floating point {datatype} to {time_unit!r} " f"without precision loss; decoding to {new_time_unit!r} " f"instead. To silence this warning pass {formatted_kwarg} " f"to your opening function." ) emit_user_level_warning(message, SerializationWarning) time_unit = new_time_unit # Cast input ordinals to integers and properly handle NaN/NaT # to prevent casting NaN to int with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) flat_num = flat_num.astype(np.int64) if nan.any(): flat_num[nan] = np.iinfo(np.int64).min # cast to wanted type return flat_num.astype(f"timedelta64[{time_unit}]") def decode_cf_timedelta( num_timedeltas, units: str, time_unit: PDDatetimeUnitOptions = "ns" ) -> np.ndarray: """Given an array of numeric timedeltas in netCDF format, convert it into a numpy timedelta64 ["s", "ms", "us", "ns"] array. """ num_timedeltas = to_numpy(num_timedeltas) unit = _netcdf_to_numpy_timeunit(units) # special case empty arrays is_empty_array = num_timedeltas.size == 0 with warnings.catch_warnings(): warnings.filterwarnings("ignore", "All-NaN slice encountered", RuntimeWarning) if not is_empty_array: _check_timedelta_range(np.nanmin(num_timedeltas), unit, time_unit) _check_timedelta_range(np.nanmax(num_timedeltas), unit, time_unit) timedeltas = _numbers_to_timedelta( num_timedeltas, unit, "s", "timedeltas", target_unit=time_unit ) pd_timedeltas = pd.to_timedelta(ravel(timedeltas)) if not is_empty_array and np.isnat(timedeltas).all(): empirical_unit = time_unit else: empirical_unit = pd_timedeltas.unit if is_empty_array or np.timedelta64(1, time_unit) > np.timedelta64( 1, empirical_unit ): time_unit = empirical_unit if time_unit not in {"s", "ms", "us", "ns"}: raise ValueError( f"time_unit must be one of 's', 'ms', 'us', or 'ns'. Got: {time_unit}" ) result = pd_timedeltas.as_unit(time_unit).to_numpy() return reshape(result, num_timedeltas.shape) def _unit_timedelta_cftime(units: str) -> timedelta: return timedelta(microseconds=_US_PER_TIME_DELTA[units]) def _unit_timedelta_numpy(units: str) -> np.timedelta64: numpy_units = _netcdf_to_numpy_timeunit(units) return np.timedelta64(1, numpy_units) def _infer_time_units_from_diff(unique_timedeltas) -> str: # todo: check, if this function works correctly wrt np.timedelta64 unit_timedelta: Callable[[str], timedelta] | Callable[[str], np.timedelta64] zero_timedelta: timedelta | np.timedelta64 unique_timedeltas = asarray(unique_timedeltas) if unique_timedeltas.dtype == np.dtype("O"): time_units = _NETCDF_TIME_UNITS_CFTIME unit_timedelta = _unit_timedelta_cftime zero_timedelta = timedelta(microseconds=0) else: time_units = _NETCDF_TIME_UNITS_NUMPY unit_timedelta = _unit_timedelta_numpy zero_timedelta = np.timedelta64(0, "ns") for time_unit in time_units: if array_all(unique_timedeltas % unit_timedelta(time_unit) == zero_timedelta): return time_unit return "seconds" def _time_units_to_timedelta(units: str) -> timedelta: return timedelta(microseconds=_US_PER_TIME_DELTA[units]) def infer_calendar_name(dates) -> CFCalendar: """Given an array of datetimes, infer the CF calendar name""" if is_np_datetime_like(dates.dtype): return "proleptic_gregorian" elif dates.dtype == np.dtype("O") and dates.size > 0: # Logic copied from core.common.contains_cftime_datetimes. if cftime is not None: sample = np.asarray(dates).flat[0] if is_duck_dask_array(sample): sample = sample.compute() if isinstance(sample, np.ndarray): sample = sample.item() if isinstance(sample, cftime.datetime): return sample.calendar # Error raise if dtype is neither datetime or "O", if cftime is not importable, and if element of 'O' dtype is not cftime. raise ValueError("Array does not contain datetime objects.") def infer_datetime_units(dates) -> str: """Given an array of datetimes, returns a CF compatible time-unit string of the form "{time_unit} since {date[0]}", where `time_unit` is 'days', 'hours', 'minutes' or 'seconds' (the first one that can evenly divide all unique time deltas in `dates`) """ dates = ravel(np.asarray(dates)) if np.issubdtype(np.asarray(dates).dtype, "datetime64"): dates = to_datetime_unboxed(dates) dates = dates[pd.notnull(dates)] reference_date = dates[0] if len(dates) > 0 else "1970-01-01" reference_date = pd.Timestamp(reference_date) else: reference_date = dates[0] if len(dates) > 0 else "1970-01-01" reference_date = format_cftime_datetime(reference_date) unique_timedeltas = np.unique(np.diff(dates)) units = _infer_time_units_from_diff(unique_timedeltas) return f"{units} since {reference_date}" def format_cftime_datetime(date) -> str: """Converts a cftime.datetime object to a string with the format: YYYY-MM-DD HH:MM:SS.UUUUUU """ return f"{date.year:04d}-{date.month:02d}-{date.day:02d} {date.hour:02d}:{date.minute:02d}:{date.second:02d}.{date.microsecond:06d}" def infer_timedelta_units(deltas) -> str: """Given an array of timedeltas, returns a CF compatible time-unit from {'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly divide all unique time deltas in `deltas`) """ deltas = ravel(deltas) unique_timedeltas = np.unique(deltas[pd.notnull(deltas)]) return _infer_time_units_from_diff(unique_timedeltas) def cftime_to_nptime( times, raise_on_invalid: bool = True, time_unit: PDDatetimeUnitOptions = "ns" ) -> np.ndarray: """Given an array of cftime.datetime objects, return an array of numpy.datetime64 objects of the same size If raise_on_invalid is True (default), invalid dates trigger a ValueError. Otherwise, the invalid element is replaced by np.NaT.""" times = np.asarray(times) new = [] dt: np.datetime64 for _i, t in np.ndenumerate(times): try: # We expect either "us" resolution or "s" resolution depending on # whether 'microseconds' are defined for the input or not. dt = ( pd.Timestamp(np.datetime64(t.isoformat())).as_unit(time_unit).to_numpy() ) except ValueError as e: if raise_on_invalid: raise ValueError( f"Cannot convert date {t} to a date in the " f"standard calendar. Reason: {e}." ) from e else: dt = np.datetime64("NaT") new.append(dt) return np.asarray(new).reshape(times.shape) def convert_times(times, date_type, raise_on_invalid: bool = True) -> np.ndarray: """Given an array of datetimes, return the same dates in another cftime or numpy date type. Useful to convert between calendars in numpy and cftime or between cftime calendars. If raise_on_valid is True (default), invalid dates trigger a ValueError. Otherwise, the invalid element is replaced by np.nan for cftime types and np.NaT for np.datetime64. """ if date_type in (pd.Timestamp, np.datetime64) and not is_np_datetime_like( times.dtype ): return cftime_to_nptime(times, raise_on_invalid=raise_on_invalid) if is_np_datetime_like(times.dtype): # Convert datetime64 objects to Timestamps since those have year, month, day, etc. attributes times = pd.DatetimeIndex(times) new = np.empty(times.shape, dtype="O") for i, t in enumerate(times): try: dt = date_type( t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond ) except ValueError as e: if raise_on_invalid: raise ValueError( f"Cannot convert date {t} to a date in the " f"{date_type(2000, 1, 1).calendar} calendar. Reason: {e}." ) from e else: dt = np.nan new[i] = dt return new def convert_time_or_go_back(date, date_type): """Convert a single date to a new date_type (cftime.datetime or pd.Timestamp). If the new date is invalid, it goes back a day and tries again. If it is still invalid, goes back a second day. This is meant to convert end-of-month dates into a new calendar. """ if date_type == pd.Timestamp: date_type = default_precision_timestamp try: return date_type( date.year, date.month, date.day, date.hour, date.minute, date.second, date.microsecond, ) except OutOfBoundsDatetime: raise except ValueError: # Day is invalid, happens at the end of months, try again the day before try: return date_type( date.year, date.month, date.day - 1, date.hour, date.minute, date.second, date.microsecond, ) except ValueError: # Still invalid, happens for 360_day to non-leap february. Try again 2 days before date. return date_type( date.year, date.month, date.day - 2, date.hour, date.minute, date.second, date.microsecond, ) def _should_cftime_be_used( source, target_calendar: str, use_cftime: bool | None ) -> bool: """Return whether conversion of the source to the target calendar should result in a cftime-backed array. Source is a 1D datetime array, target_cal a string (calendar name) and use_cftime is a boolean or None. If use_cftime is None, this returns True if the source's range and target calendar are convertible to np.datetime64 objects. """ # Arguments Checks for target if use_cftime is not True: if _is_standard_calendar(target_calendar): if _is_numpy_compatible_time_range(source): # Conversion is possible with pandas, force False if it was None return False elif use_cftime is False: raise ValueError( "Source time range is not valid for numpy datetimes. Try using `use_cftime=True`." ) elif use_cftime is False: raise ValueError( f"Calendar '{target_calendar}' is only valid with cftime. Try using `use_cftime=True`." ) return True def _cleanup_netcdf_time_units(units: str) -> str: time_units, ref_date = _unpack_netcdf_time_units(units) time_units = time_units.lower() if not time_units.endswith("s"): time_units = f"{time_units}s" # don't worry about reifying the units if they're out of bounds or # formatted badly with contextlib.suppress(OutOfBoundsDatetime, ValueError): units = f"{time_units} since {format_timestamp(ref_date)}" return units def _encode_datetime_with_cftime(dates, units: str, calendar: str) -> np.ndarray: """Fallback method for encoding dates using cftime. This method is more flexible than xarray's parsing using datetime64[ns] arrays but also slower because it loops over each element. """ if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") dates = np.asarray(dates) original_shape = dates.shape if np.issubdtype(dates.dtype, np.datetime64): # numpy's broken datetime conversion only works for us precision dates = dates.astype("M8[us]").astype(datetime) dates = np.atleast_1d(dates) # Find all the None position none_position = dates == None # noqa: E711 filtered_dates = dates[~none_position] # Since netCDF files do not support storing float128 values, we ensure # that float64 values are used by setting longdouble=False in num2date. # This try except logic can be removed when xarray's minimum version of # cftime is at least 1.6.2. try: encoded_nums = cftime.date2num( filtered_dates, units, calendar, longdouble=False ) except TypeError: encoded_nums = cftime.date2num(filtered_dates, units, calendar) if filtered_dates.size == none_position.size: return encoded_nums.reshape(original_shape) # Create a full matrix of NaN # And fill the num dates in the not NaN or None position result = np.full(dates.shape, np.nan) result[np.nonzero(~none_position)] = encoded_nums return result.reshape(original_shape) def cast_to_int_if_safe(num) -> np.ndarray: int_num = np.asarray(num, dtype=np.int64) if array_all(num == int_num): num = int_num return num def _division(deltas, delta, floor): if floor: # calculate int64 floor division # to preserve integer dtype if possible (GH 4045, GH7817). num = deltas // delta.astype(np.int64) num = num.astype(np.int64, copy=False) else: num = deltas / delta return num def encode_cf_datetime( dates: T_DuckArray, # type: ignore[misc] units: str | None = None, calendar: str | None = None, dtype: np.dtype | None = None, ) -> tuple[T_DuckArray, str, str]: """Given an array of datetime objects, returns the tuple `(num, units, calendar)` suitable for a CF compliant time variable. Unlike `date2num`, this function can handle datetime64 arrays. See Also -------- cftime.date2num """ dates = asarray(dates) if is_chunked_array(dates): return _lazily_encode_cf_datetime(dates, units, calendar, dtype) else: return _eagerly_encode_cf_datetime(dates, units, calendar, dtype) def _infer_needed_units_numpy(ref_date, data_units): needed_units, data_ref_date = _unpack_time_unit_and_ref_date(data_units) needed_units = _numpy_to_netcdf_timeunit(needed_units) ref_delta = abs(data_ref_date - ref_date).to_timedelta64() data_delta = _unit_timedelta_numpy(needed_units) if (ref_delta % data_delta) > np.timedelta64(0, "ns"): needed_units = _infer_time_units_from_diff(ref_delta) return needed_units def _infer_needed_units_cftime(ref_date, data_units, calendar): needed_units, data_ref_date = _unpack_time_units_and_ref_date_cftime( data_units, calendar ) ref_delta = abs(data_ref_date - ref_date) data_delta = _time_units_to_timedelta(needed_units) if (ref_delta % data_delta) > timedelta(seconds=0): needed_units = _infer_time_units_from_diff(ref_delta) return needed_units def _eagerly_encode_cf_datetime( dates: T_DuckArray, # type: ignore[misc] units: str | None = None, calendar: str | None = None, dtype: np.dtype | None = None, allow_units_modification: bool = True, ) -> tuple[T_DuckArray, str, str]: dates = asarray(dates) data_units = infer_datetime_units(dates) if units is None: units = data_units else: units = _cleanup_netcdf_time_units(units) if calendar is None: calendar = infer_calendar_name(dates) raise_incompatible_units_error = False raise_gregorian_proleptic_gregorian_mismatch_error = False try: if not _is_standard_calendar(calendar) or dates.dtype.kind == "O": # parse with cftime instead raise OutOfBoundsDatetime assert np.issubdtype(dates.dtype, "datetime64") if ( calendar in ["standard", "gregorian"] and dates.size > 0 and np.nanmin(dates).astype("=M8[us]").astype(datetime) < datetime(1582, 10, 15) ): raise_gregorian_proleptic_gregorian_mismatch_error = True time_unit, ref_date = _unpack_time_unit_and_ref_date(units) # calendar equivalence only for days after the reform _check_date_is_after_shift(ref_date, calendar) time_delta = np.timedelta64(1, time_unit) # Wrap the dates in a DatetimeIndex to do the subtraction to ensure # an OverflowError is raised if the ref_date is too far away from # dates to be encoded (GH 2272). # DatetimeIndex will convert to units of ["s", "ms", "us", "ns"] dates_as_index = pd.DatetimeIndex(ravel(dates)) time_deltas = dates_as_index - ref_date # retrieve needed units to faithfully encode to int64 needed_units = _infer_needed_units_numpy(ref_date, data_units) needed_time_delta = _unit_timedelta_numpy(needed_units) floor_division = np.issubdtype(dtype, np.integer) or dtype is None if time_delta > needed_time_delta: floor_division = False if dtype is None: emit_user_level_warning( f"Times can't be serialized faithfully to int64 with requested units {units!r}. " f"Resolution of {needed_units!r} needed. Serializing times to floating point instead. " f"Set encoding['dtype'] to integer dtype to serialize to int64. " f"Set encoding['dtype'] to floating point dtype to silence this warning." ) elif np.issubdtype(dtype, np.integer) and allow_units_modification: new_units = f"{needed_units} since {format_timestamp(ref_date)}" emit_user_level_warning( f"Times can't be serialized faithfully to int64 with requested units {units!r}. " f"Serializing with units {new_units!r} instead. " f"Set encoding['dtype'] to floating point dtype to serialize with units {units!r}. " f"Set encoding['units'] to {new_units!r} to silence this warning ." ) units = new_units time_delta = needed_time_delta floor_division = True elif np.issubdtype(dtype, np.integer) and not allow_units_modification: new_units = f"{needed_units} since {format_timestamp(ref_date)}" raise_incompatible_units_error = True # get resolution of TimedeltaIndex and align time_delta # todo: check, if this works in any case num = _division( time_deltas, time_delta.astype(f"=m8[{time_deltas.unit}]"), floor_division ) num = reshape(num.values, dates.shape) except (OutOfBoundsDatetime, OverflowError, ValueError): time_units, ref_date = _unpack_time_units_and_ref_date_cftime(units, calendar) time_delta_cftime = _time_units_to_timedelta(time_units) needed_units = _infer_needed_units_cftime(ref_date, data_units, calendar) needed_time_delta_cftime = _time_units_to_timedelta(needed_units) if ( np.issubdtype(dtype, np.integer) and time_delta_cftime > needed_time_delta_cftime ): new_units = f"{needed_units} since {format_cftime_datetime(ref_date)}" if allow_units_modification: emit_user_level_warning( f"Times can't be serialized faithfully to int64 with requested units {units!r}. " f"Serializing with units {new_units!r} instead. " f"Set encoding['dtype'] to floating point dtype to serialize with units {units!r}. " f"Set encoding['units'] to {new_units!r} to silence this warning ." ) units = new_units else: raise_incompatible_units_error = True num = _encode_datetime_with_cftime(dates, units, calendar) # do it now only for cftime-based flow # we already covered for this in pandas-based flow num = cast_to_int_if_safe(num) if raise_incompatible_units_error: raise ValueError( f"Times can't be serialized faithfully to int64 with requested units {units!r}. " f"Consider setting encoding['dtype'] to a floating point dtype to serialize with " f"units {units!r}. Consider setting encoding['units'] to {new_units!r} to " f"serialize with an integer dtype." ) if raise_gregorian_proleptic_gregorian_mismatch_error: raise ValueError( f"Unable to encode np.datetime64 values with {calendar} " f"calendar, because some or all values are prior to the reform " f"date of 1582-10-15. To encode these times, set " f"encoding['calendar'] to 'proleptic_gregorian' instead, which " f"is the true calendar that np.datetime64 values use. The " f"'standard' or 'gregorian' calendar is only equivalent to the " f"'proleptic_gregorian' calendar after the reform date." ) return num, units, calendar def _encode_cf_datetime_within_map_blocks( dates: T_DuckArray, # type: ignore[misc] units: str, calendar: str, dtype: np.dtype, ) -> T_DuckArray: num, *_ = _eagerly_encode_cf_datetime( dates, units, calendar, dtype, allow_units_modification=False ) return num def _lazily_encode_cf_datetime( dates: T_ChunkedArray, units: str | None = None, calendar: str | None = None, dtype: np.dtype | None = None, ) -> tuple[T_ChunkedArray, str, str]: if calendar is None: # This will only trigger minor compute if dates is an object dtype array. calendar = infer_calendar_name(dates) if units is None and dtype is None: if dates.dtype == "O": units = "microseconds since 1970-01-01" dtype = np.dtype("int64") else: netcdf_unit = _numpy_dtype_to_netcdf_timeunit(dates.dtype) units = f"{netcdf_unit} since 1970-01-01" dtype = np.dtype("int64") if units is None or dtype is None: raise ValueError( f"When encoding chunked arrays of datetime values, both the units " f"and dtype must be prescribed or both must be unprescribed. " f"Prescribing only one or the other is not currently supported. " f"Got a units encoding of {units} and a dtype encoding of {dtype}." ) chunkmanager = get_chunked_array_type(dates) num = chunkmanager.map_blocks( _encode_cf_datetime_within_map_blocks, dates, units, calendar, dtype, dtype=dtype, ) return num, units, calendar def encode_cf_timedelta( timedeltas: T_DuckArray, # type: ignore[misc] units: str | None = None, dtype: np.dtype | None = None, ) -> tuple[T_DuckArray, str]: timedeltas = asarray(timedeltas) if is_chunked_array(timedeltas): return _lazily_encode_cf_timedelta(timedeltas, units, dtype) else: return _eagerly_encode_cf_timedelta(timedeltas, units, dtype) def _eagerly_encode_cf_timedelta( timedeltas: T_DuckArray, # type: ignore[misc] units: str | None = None, dtype: np.dtype | None = None, allow_units_modification: bool = True, ) -> tuple[T_DuckArray, str]: data_units = infer_timedelta_units(timedeltas) if units is None: units = data_units # units take precedence in the case of zero-size array if timedeltas.size == 0: data_units = units time_delta = _unit_timedelta_numpy(units) time_deltas = pd.TimedeltaIndex(ravel(timedeltas)) # get resolution of TimedeltaIndex and align time_delta deltas_unit = time_deltas.unit time_delta = time_delta.astype(f"=m8[{deltas_unit}]") # retrieve needed units to faithfully encode to int64 needed_units = data_units if data_units != units: needed_units = _infer_time_units_from_diff(np.unique(time_deltas.dropna())) # needed time delta to encode faithfully to int64 needed_time_delta = _unit_timedelta_numpy(needed_units) floor_division = np.issubdtype(dtype, np.integer) or dtype is None if time_delta > needed_time_delta: floor_division = False if dtype is None: emit_user_level_warning( f"Timedeltas can't be serialized faithfully to int64 with requested units {units!r}. " f"Resolution of {needed_units!r} needed. Serializing timeseries to floating point instead. " f"Set encoding['dtype'] to integer dtype to serialize to int64. " f"Set encoding['dtype'] to floating point dtype to silence this warning." ) elif np.issubdtype(dtype, np.integer) and allow_units_modification: emit_user_level_warning( f"Timedeltas can't be serialized faithfully with requested units {units!r}. " f"Serializing with units {needed_units!r} instead. " f"Set encoding['dtype'] to floating point dtype to serialize with units {units!r}. " f"Set encoding['units'] to {needed_units!r} to silence this warning ." ) units = needed_units time_delta = needed_time_delta time_delta = time_delta.astype(f"=m8[{deltas_unit}]") floor_division = True elif np.issubdtype(dtype, np.integer) and not allow_units_modification: raise ValueError( f"Timedeltas can't be serialized faithfully to int64 with requested units {units!r}. " f"Consider setting encoding['dtype'] to a floating point dtype to serialize with " f"units {units!r}. Consider setting encoding['units'] to {needed_units!r} to " f"serialize with an integer dtype." ) num = _division(time_deltas, time_delta, floor_division) num = reshape(num.values, timedeltas.shape) return num, units def _encode_cf_timedelta_within_map_blocks( timedeltas: T_DuckArray, # type: ignore[misc] units: str, dtype: np.dtype, ) -> T_DuckArray: num, _ = _eagerly_encode_cf_timedelta( timedeltas, units, dtype, allow_units_modification=False ) return num def _lazily_encode_cf_timedelta( timedeltas: T_ChunkedArray, units: str | None = None, dtype: np.dtype | None = None ) -> tuple[T_ChunkedArray, str]: if units is None and dtype is None: units = _numpy_dtype_to_netcdf_timeunit(timedeltas.dtype) dtype = np.dtype("int64") if units is None or dtype is None: raise ValueError( f"When encoding chunked arrays of timedelta values, both the " f"units and dtype must be prescribed or both must be " f"unprescribed. Prescribing only one or the other is not " f"currently supported. Got a units encoding of {units} and a " f"dtype encoding of {dtype}." ) chunkmanager = get_chunked_array_type(timedeltas) num = chunkmanager.map_blocks( _encode_cf_timedelta_within_map_blocks, timedeltas, units, dtype, dtype=dtype, ) return num, units class CFDatetimeCoder(VariableCoder): """Coder for CF Datetime coding. Parameters ---------- use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64`` objects. If False, always decode times to ``np.datetime64`` objects; if this is not possible raise an error. May not be supported by all the backends. time_unit : PDDatetimeUnitOptions Target resolution when decoding dates. Defaults to "ns". """ def __init__( self, use_cftime: bool | None = None, time_unit: PDDatetimeUnitOptions = "ns", ) -> None: self.use_cftime = use_cftime self.time_unit = time_unit def encode(self, variable: Variable, name: T_Name = None) -> Variable: if np.issubdtype(variable.dtype, np.datetime64) or contains_cftime_datetimes( variable ): dims, data, attrs, encoding = unpack_for_encoding(variable) units = encoding.pop("units", None) calendar = encoding.pop("calendar", None) dtype = encoding.get("dtype", None) # in the case of packed data we need to encode into # float first, the correct dtype will be established # via CFScaleOffsetCoder/CFMaskCoder if "add_offset" in encoding or "scale_factor" in encoding: dtype = data.dtype if data.dtype.kind == "f" else "float64" (data, units, calendar) = encode_cf_datetime(data, units, calendar, dtype) safe_setitem(attrs, "units", units, name=name) safe_setitem(attrs, "calendar", calendar, name=name) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: units = variable.attrs.get("units", None) if isinstance(units, str) and "since" in units: dims, data, attrs, encoding = unpack_for_decoding(variable) units = pop_to(attrs, encoding, "units") calendar = pop_to(attrs, encoding, "calendar") dtype = _decode_cf_datetime_dtype( data, units, calendar, self.use_cftime, self.time_unit ) transform = partial( decode_cf_datetime, units=units, calendar=calendar, use_cftime=self.use_cftime, time_unit=self.time_unit, ) data = lazy_elemwise_func(data, transform, dtype) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def has_timedelta64_encoding_dtype(attrs_or_encoding: dict) -> bool: dtype = attrs_or_encoding.get("dtype") return isinstance(dtype, str) and dtype.startswith("timedelta64") def resolve_time_unit_from_attrs_dtype( attrs_dtype: str, name: T_Name ) -> PDDatetimeUnitOptions: dtype = np.dtype(attrs_dtype) resolution, _ = np.datetime_data(dtype) resolution = cast(NPDatetimeUnitOptions, resolution) time_unit: PDDatetimeUnitOptions if np.timedelta64(1, resolution) > np.timedelta64(1, "s"): time_unit = "s" message = ( f"Following pandas, xarray only supports decoding to timedelta64 " f"values with a resolution of 's', 'ms', 'us', or 'ns'. Encoded " f"values for variable {name!r} have a resolution of " f"{resolution!r}. Attempting to decode to a resolution of 's'. " f"Note, depending on the encoded values, this may lead to an " f"OverflowError. Additionally, data will not be identically round " f"tripped; xarray will choose an encoding dtype of " f"'timedelta64[s]' when re-encoding." ) emit_user_level_warning(message) elif np.timedelta64(1, resolution) < np.timedelta64(1, "ns"): time_unit = "ns" message = ( f"Following pandas, xarray only supports decoding to timedelta64 " f"values with a resolution of 's', 'ms', 'us', or 'ns'. Encoded " f"values for variable {name!r} have a resolution of " f"{resolution!r}. Attempting to decode to a resolution of 'ns'. " f"Note, depending on the encoded values, this may lead to loss of " f"precision. Additionally, data will not be identically round " f"tripped; xarray will choose an encoding dtype of " f"'timedelta64[ns]' when re-encoding." ) emit_user_level_warning(message) else: time_unit = cast(PDDatetimeUnitOptions, resolution) return time_unit class CFTimedeltaCoder(VariableCoder): """Coder for CF Timedelta coding. Parameters ---------- time_unit : PDDatetimeUnitOptions Target resolution when decoding timedeltas via units. Defaults to "ns". When decoding via dtype, the resolution is specified in the dtype attribute, so this parameter is ignored. decode_via_units : bool Whether to decode timedeltas based on the presence of a timedelta-like units attribute, e.g. "seconds". Defaults to True, but in the future will default to False. decode_via_dtype : bool Whether to decode timedeltas based on the presence of an np.timedelta64 dtype attribute, e.g. "timedelta64[s]". Defaults to True. """ def __init__( self, time_unit: PDDatetimeUnitOptions | None = None, decode_via_units: bool = True, decode_via_dtype: bool = True, ) -> None: self.time_unit = time_unit self.decode_via_units = decode_via_units self.decode_via_dtype = decode_via_dtype self._emit_decode_timedelta_future_warning = False def encode(self, variable: Variable, name: T_Name = None) -> Variable: if np.issubdtype(variable.dtype, np.timedelta64): dims, data, attrs, encoding = unpack_for_encoding(variable) dtype = encoding.get("dtype", None) units = encoding.pop("units", None) # in the case of packed data we need to encode into # float first, the correct dtype will be established # via CFScaleOffsetCoder/CFMaskCoder if "add_offset" in encoding or "scale_factor" in encoding: dtype = data.dtype if data.dtype.kind == "f" else "float64" resolution, _ = np.datetime_data(variable.dtype) attrs_dtype = f"timedelta64[{resolution}]" safe_setitem(attrs, "dtype", attrs_dtype, name=name) data, units = encode_cf_timedelta(data, units, dtype) safe_setitem(attrs, "units", units, name=name) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: units = variable.attrs.get("units", None) has_timedelta_units = isinstance(units, str) and units in TIME_UNITS has_timedelta_dtype = has_timedelta64_encoding_dtype(variable.attrs) is_dtype_decodable = has_timedelta_units and has_timedelta_dtype is_units_decodable = has_timedelta_units if (is_dtype_decodable and self.decode_via_dtype) or ( is_units_decodable and self.decode_via_units ): dims, data, attrs, encoding = unpack_for_decoding(variable) units = pop_to(attrs, encoding, "units") if is_dtype_decodable: attrs_dtype = attrs.pop("dtype") if self.time_unit is None: time_unit = resolve_time_unit_from_attrs_dtype(attrs_dtype, name) else: time_unit = self.time_unit else: if self._emit_decode_timedelta_future_warning: var_string = f"the variable {name!r}" if name else "" emit_user_level_warning( "In a future version, xarray will not decode " f"{var_string} into a timedelta64 dtype based on the " "presence of a timedelta-like 'units' attribute by " "default. Instead it will rely on the presence of a " "timedelta64 'dtype' attribute, which is now xarray's " "default way of encoding timedelta64 values.\n" "To continue decoding into a timedelta64 dtype, either " "set `decode_timedelta=True` when opening this " "dataset, or add the attribute " "`dtype='timedelta64[ns]'` to this variable on disk.\n" "To opt-in to future behavior, set " "`decode_timedelta=False`.", FutureWarning, ) if self.time_unit is None: time_unit = "ns" else: time_unit = self.time_unit # Handle edge case that decode_via_dtype=False and # decode_via_units=True, and timedeltas were encoded with a # dtype attribute. We need to remove the dtype attribute # to prevent an error during round tripping. if has_timedelta_dtype: attrs.pop("dtype") dtype = np.dtype(f"timedelta64[{time_unit}]") transform = partial(decode_cf_timedelta, units=units, time_unit=time_unit) data = lazy_elemwise_func(data, transform, dtype=dtype) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/coding/variables.py���������������������������������������������������������0000664�0000000�0000000�00000063267�15114646760�0020454�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Coders for individual Variable objects.""" from __future__ import annotations import warnings from collections.abc import Hashable, MutableMapping from functools import partial from typing import TYPE_CHECKING, Any, Union import numpy as np import pandas as pd from xarray.coding.common import ( SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to, safe_setitem, unpack_for_decoding, unpack_for_encoding, ) from xarray.coding.times import CFDatetimeCoder, CFTimedeltaCoder from xarray.core import dtypes, duck_array_ops, indexing from xarray.core.types import Self from xarray.core.variable import Variable if TYPE_CHECKING: T_VarTuple = tuple[tuple[Hashable, ...], Any, dict, dict] T_Name = Union[Hashable, None] class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin): """Decode arrays on the fly from non-native to native endianness This is useful for decoding arrays from netCDF3 files (which are all big endian) into native endianness, so they can be used with Cython functions, such as those found in bottleneck and pandas. >>> x = np.arange(5, dtype=">i2") >>> x.dtype dtype('>i2') >>> NativeEndiannessArray(x).dtype dtype('int16') >>> indexer = indexing.BasicIndexer((slice(None),)) >>> NativeEndiannessArray(x)[indexer].dtype dtype('int16') """ __slots__ = ("array",) def __init__(self, array) -> None: self.array = indexing.as_indexable(array) @property def dtype(self) -> np.dtype: return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize)) def _oindex_get(self, key): return type(self)(self.array.oindex[key]) def _vindex_get(self, key): return type(self)(self.array.vindex[key]) def __getitem__(self, key) -> Self: return type(self)(self.array[key]) def get_duck_array(self): return duck_array_ops.astype(self.array.get_duck_array(), dtype=self.dtype) def transpose(self, order): return type(self)(self.array.transpose(order)) class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin): """Decode arrays on the fly from integer to boolean datatype This is useful for decoding boolean arrays from integer typed netCDF variables. >>> x = np.array([1, 0, 1, 1, 0], dtype="i1") >>> x.dtype dtype('int8') >>> BoolTypeArray(x).dtype dtype('bool') >>> indexer = indexing.BasicIndexer((slice(None),)) >>> BoolTypeArray(x)[indexer].dtype dtype('bool') """ __slots__ = ("array",) def __init__(self, array) -> None: self.array = indexing.as_indexable(array) @property def dtype(self) -> np.dtype: return np.dtype("bool") def _oindex_get(self, key): return type(self)(self.array.oindex[key]) def _vindex_get(self, key): return type(self)(self.array.vindex[key]) def __getitem__(self, key) -> Self: return type(self)(self.array[key]) def get_duck_array(self): return duck_array_ops.astype(self.array.get_duck_array(), dtype=self.dtype) def transpose(self, order): return type(self)(self.array.transpose(order)) def _apply_mask( data: np.ndarray, encoded_fill_values: list, decoded_fill_value: Any, dtype: np.typing.DTypeLike | None, ) -> np.ndarray: """Mask all matching values in a NumPy arrays.""" data = np.asarray(data, dtype=dtype) condition = False for fv in encoded_fill_values: condition |= data == fv return np.where(condition, decoded_fill_value, data) def _is_time_like(units): # test for time-like # return "datetime" for datetime-like # return "timedelta" for timedelta-like if units is None: return False time_strings = [ "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "nanoseconds", ] units = str(units) # to prevent detecting units like `days accumulated` as time-like # special casing for datetime-units and timedelta-units (GH-8269) if "since" in units: from xarray.coding.times import _unpack_netcdf_time_units try: _unpack_netcdf_time_units(units) except ValueError: return False return "datetime" else: return "timedelta" if any(tstr == units for tstr in time_strings) else False def _check_fill_values(attrs, name, dtype): """Check _FillValue and missing_value if available. Return dictionary with raw fill values and set with encoded fill values. Issue SerializationWarning if appropriate. """ raw_fill_dict = {} for attr in ("missing_value", "_FillValue"): pop_to(attrs, raw_fill_dict, attr, name=name) encoded_fill_values = set() for k in list(raw_fill_dict): v = raw_fill_dict[k] kfill = {fv for fv in np.ravel(v) if not pd.isnull(fv)} if not kfill and np.issubdtype(dtype, np.integer): warnings.warn( f"variable {name!r} has non-conforming {k!r} " f"{v!r} defined, dropping {k!r} entirely.", SerializationWarning, stacklevel=3, ) del raw_fill_dict[k] else: encoded_fill_values |= kfill if len(encoded_fill_values) > 1: warnings.warn( f"variable {name!r} has multiple fill values " f"{encoded_fill_values} defined, decoding all values to NaN.", SerializationWarning, stacklevel=3, ) return raw_fill_dict, encoded_fill_values def _convert_unsigned_fill_value( name: T_Name, data: Any, unsigned: str, raw_fill_value: Any, encoded_fill_values: set, ) -> Any: if data.dtype.kind == "i": if unsigned == "true": unsigned_dtype = np.dtype(f"u{data.dtype.itemsize}") transform = partial(np.asarray, dtype=unsigned_dtype) if raw_fill_value is not None: new_fill = np.array(raw_fill_value, dtype=data.dtype) encoded_fill_values.remove(raw_fill_value) # use view here to prevent OverflowError encoded_fill_values.add(new_fill.view(unsigned_dtype).item()) data = lazy_elemwise_func(data, transform, unsigned_dtype) elif data.dtype.kind == "u": if unsigned == "false": signed_dtype = np.dtype(f"i{data.dtype.itemsize}") transform = partial(np.asarray, dtype=signed_dtype) data = lazy_elemwise_func(data, transform, signed_dtype) if raw_fill_value is not None: new_fill = signed_dtype.type(raw_fill_value) encoded_fill_values.remove(raw_fill_value) encoded_fill_values.add(new_fill) else: warnings.warn( f"variable {name!r} has _Unsigned attribute but is not " "of integer type. Ignoring attribute.", SerializationWarning, stacklevel=3, ) return data def _encode_unsigned_fill_value( name: T_Name, fill_value: Any, encoded_dtype: np.dtype, ) -> Any: try: if hasattr(fill_value, "item"): # if numpy type, convert to python native integer to determine overflow # otherwise numpy unsigned ints will silently cast to the signed counterpart fill_value = fill_value.item() # passes if provided fill value fits in encoded on-disk type new_fill = encoded_dtype.type(fill_value) except OverflowError: encoded_kind_str = "signed" if encoded_dtype.kind == "i" else "unsigned" warnings.warn( f"variable {name!r} will be stored as {encoded_kind_str} integers " f"but _FillValue attribute can't be represented as a " f"{encoded_kind_str} integer.", SerializationWarning, stacklevel=3, ) # user probably provided the fill as the in-memory dtype, # convert to on-disk type to match CF standard orig_kind = "u" if encoded_dtype.kind == "i" else "i" orig_dtype = np.dtype(f"{orig_kind}{encoded_dtype.itemsize}") # use view here to prevent OverflowError new_fill = np.array(fill_value, dtype=orig_dtype).view(encoded_dtype).item() return new_fill class CFMaskCoder(VariableCoder): """Mask or unmask fill values according to CF conventions.""" def __init__( self, decode_times: bool | CFDatetimeCoder = False, decode_timedelta: bool | CFTimedeltaCoder = False, ) -> None: self.decode_times = decode_times self.decode_timedelta = decode_timedelta def encode(self, variable: Variable, name: T_Name = None): dims, data, attrs, encoding = unpack_for_encoding(variable) dtype = np.dtype(encoding.get("dtype", data.dtype)) # from netCDF best practices # https://docs.unidata.ucar.edu/nug/current/best_practices.html#bp_Unsigned-Data # "_Unsigned = "true" to indicate that # integer data should be treated as unsigned" has_unsigned = encoding.get("_Unsigned") is not None fv = encoding.get("_FillValue") mv = encoding.get("missing_value") fill_value = None fv_exists = fv is not None mv_exists = mv is not None if not fv_exists and not mv_exists: return variable if fv_exists and mv_exists and not duck_array_ops.allclose_or_equiv(fv, mv): raise ValueError( f"Variable {name!r} has conflicting _FillValue ({fv}) and missing_value ({mv}). Cannot encode data." ) if fv_exists: # Ensure _FillValue is cast to same dtype as data's # but not for packed data if has_unsigned: encoding["_FillValue"] = _encode_unsigned_fill_value(name, fv, dtype) elif "add_offset" not in encoding and "scale_factor" not in encoding: encoding["_FillValue"] = dtype.type(fv) else: encoding["_FillValue"] = fv fill_value = pop_to(encoding, attrs, "_FillValue", name=name) if mv_exists: # try to use _FillValue, if it exists to align both values # or use missing_value and ensure it's cast to same dtype as data's # but not for packed data encoding["missing_value"] = attrs.get( "_FillValue", ( _encode_unsigned_fill_value(name, mv, dtype) if has_unsigned else ( dtype.type(mv) if "add_offset" not in encoding and "scale_factor" not in encoding else mv ) ), ) fill_value = pop_to(encoding, attrs, "missing_value", name=name) # apply fillna if fill_value is not None and not pd.isnull(fill_value): # special case DateTime to properly handle NaT if _is_time_like(attrs.get("units")): if data.dtype.kind in "iu": data = duck_array_ops.where( data != np.iinfo(np.int64).min, data, fill_value ) else: # if we have float data (data was packed prior masking) # we just fillna data = duck_array_ops.fillna(data, fill_value) # but if the fill_value is of integer type # we need to round and cast if np.array(fill_value).dtype.kind in "iu": data = duck_array_ops.astype( duck_array_ops.around(data), type(fill_value) ) else: data = duck_array_ops.fillna(data, fill_value) if fill_value is not None and has_unsigned: pop_to(encoding, attrs, "_Unsigned") # XXX: Is this actually needed? Doesn't the backend handle this? # two-stage casting to prevent undefined cast from float to unsigned int # first float -> int with corresponding itemsize # second int -> int/uint to final itemsize signed_dtype = np.dtype(f"i{data.itemsize}") data = duck_array_ops.astype( duck_array_ops.astype( duck_array_ops.around(data), signed_dtype, copy=False ), dtype, copy=False, ) attrs["_FillValue"] = fill_value return Variable(dims, data, attrs, encoding, fastpath=True) def decode(self, variable: Variable, name: T_Name = None): raw_fill_dict, encoded_fill_values = _check_fill_values( variable.attrs, name, variable.dtype ) if "_Unsigned" not in variable.attrs and not raw_fill_dict: return variable dims, data, attrs, encoding = unpack_for_decoding(variable) # Even if _Unsigned is used, retain on-disk _FillValue for attr, value in raw_fill_dict.items(): safe_setitem(encoding, attr, value, name=name) if "_Unsigned" in attrs: unsigned = pop_to(attrs, encoding, "_Unsigned") data = _convert_unsigned_fill_value( name, data, unsigned, raw_fill_dict.get("_FillValue"), encoded_fill_values, ) if encoded_fill_values: dtype: np.typing.DTypeLike decoded_fill_value: Any # in case of packed data we have to decode into float # in any case if "scale_factor" in attrs or "add_offset" in attrs: dtype, decoded_fill_value = ( _choose_float_dtype(data.dtype, attrs), np.nan, ) else: # in case of no-packing special case DateTime/Timedelta to properly # handle NaT, we need to check if time-like will be decoded # or not in further processing is_time_like = _is_time_like(attrs.get("units")) if ( (is_time_like == "datetime" and self.decode_times) or (is_time_like == "timedelta" and self.decode_timedelta) ) and data.dtype.kind in "iu": dtype = np.int64 decoded_fill_value = np.iinfo(np.int64).min else: dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype) transform = partial( _apply_mask, encoded_fill_values=encoded_fill_values, decoded_fill_value=decoded_fill_value, dtype=dtype, ) data = lazy_elemwise_func(data, transform, dtype) return Variable(dims, data, attrs, encoding, fastpath=True) def _scale_offset_decoding( data, scale_factor, add_offset, dtype: np.typing.DTypeLike | None ): data = data.astype(dtype=dtype, copy=True) if scale_factor is not None: data *= scale_factor if add_offset is not None: data += add_offset return data def _choose_float_dtype( dtype: np.dtype, mapping: MutableMapping ) -> type[np.floating[Any]]: """Return a float dtype that can losslessly represent `dtype` values.""" # check scale/offset first to derive wanted float dtype # see https://github.com/pydata/xarray/issues/5597#issuecomment-879561954 scale_factor = mapping.get("scale_factor") add_offset = mapping.get("add_offset") if scale_factor is not None or add_offset is not None: # get the type from scale_factor/add_offset to determine # the needed floating point type if scale_factor is not None: scale_type = np.dtype(type(scale_factor)) if add_offset is not None: offset_type = np.dtype(type(add_offset)) # CF conforming, both scale_factor and add-offset are given and # of same floating point type (float32/64) if ( add_offset is not None and scale_factor is not None and offset_type == scale_type and scale_type in [np.float32, np.float64] ): # in case of int32 -> we need upcast to float64 # due to precision issues if dtype.itemsize == 4 and np.issubdtype(dtype, np.integer): return np.float64 return scale_type.type # Not CF conforming and add_offset given: # A scale factor is entirely safe (vanishing into the mantissa), # but a large integer offset could lead to loss of precision. # Sensitivity analysis can be tricky, so we just use a float64 # if there's any offset at all - better unoptimised than wrong! if add_offset is not None: return np.float64 # return dtype depending on given scale_factor return scale_type.type # If no scale_factor or add_offset is given, use some general rules. # Keep float32 as-is. Upcast half-precision to single-precision, # because float16 is "intended for storage but not computation" if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating): return np.float32 # float32 can exactly represent all integers up to 24 bits if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer): return np.float32 # For all other types and circumstances, we just use float64. # Todo: with nc-complex from netcdf4-python >= 1.7.0 this is available # (safe because eg. complex numbers are not supported in NetCDF) return np.float64 class CFScaleOffsetCoder(VariableCoder): """Scale and offset variables according to CF conventions. Follows the formula: decode_values = encoded_values * scale_factor + add_offset """ def __init__( self, decode_times: bool | CFDatetimeCoder = False, decode_timedelta: bool | CFTimedeltaCoder = False, ) -> None: self.decode_times = decode_times self.decode_timedelta = decode_timedelta def encode(self, variable: Variable, name: T_Name = None) -> Variable: dims, data, attrs, encoding = unpack_for_encoding(variable) if "scale_factor" in encoding or "add_offset" in encoding: # if we have a _FillValue/masked_value we do not want to cast now # but leave that to CFMaskCoder dtype = data.dtype if "_FillValue" not in encoding and "missing_value" not in encoding: dtype = _choose_float_dtype(data.dtype, encoding) # but still we need a copy prevent changing original data data = duck_array_ops.astype(data, dtype=dtype, copy=True) if "add_offset" in encoding: data -= pop_to(encoding, attrs, "add_offset", name=name) if "scale_factor" in encoding: data /= pop_to(encoding, attrs, "scale_factor", name=name) return Variable(dims, data, attrs, encoding, fastpath=True) def decode(self, variable: Variable, name: T_Name = None) -> Variable: _attrs = variable.attrs if "scale_factor" in _attrs or "add_offset" in _attrs: dims, data, attrs, encoding = unpack_for_decoding(variable) scale_factor = pop_to(attrs, encoding, "scale_factor", name=name) add_offset = pop_to(attrs, encoding, "add_offset", name=name) if duck_array_ops.ndim(scale_factor) > 0: scale_factor = np.asarray(scale_factor).item() if duck_array_ops.ndim(add_offset) > 0: add_offset = np.asarray(add_offset).item() # if we have a _FillValue/masked_value in encoding we already have the wanted # floating point dtype here (via CFMaskCoder), so no check is necessary # only check in other cases and for time-like dtype = data.dtype is_time_like = _is_time_like(attrs.get("units")) if ( ("_FillValue" not in encoding and "missing_value" not in encoding) or (is_time_like == "datetime" and self.decode_times) or (is_time_like == "timedelta" and self.decode_timedelta) ): dtype = _choose_float_dtype(dtype, encoding) transform = partial( _scale_offset_decoding, scale_factor=scale_factor, add_offset=add_offset, dtype=dtype, ) data = lazy_elemwise_func(data, transform, dtype) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable class DefaultFillvalueCoder(VariableCoder): """Encode default _FillValue if needed.""" def encode(self, variable: Variable, name: T_Name = None) -> Variable: dims, data, attrs, encoding = unpack_for_encoding(variable) # make NaN the fill value for float types if ( "_FillValue" not in attrs and "_FillValue" not in encoding and np.issubdtype(variable.dtype, np.floating) ): attrs["_FillValue"] = variable.dtype.type(np.nan) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: raise NotImplementedError() class BooleanCoder(VariableCoder): """Code boolean values.""" def encode(self, variable: Variable, name: T_Name = None) -> Variable: if ( (variable.dtype == bool) and ("dtype" not in variable.encoding) and ("dtype" not in variable.attrs) ): dims, data, attrs, encoding = unpack_for_encoding(variable) attrs["dtype"] = "bool" data = duck_array_ops.astype(data, dtype="i1", copy=True) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: if variable.attrs.get("dtype", False) == "bool": dims, data, attrs, encoding = unpack_for_decoding(variable) # overwrite (!) dtype in encoding, and remove from attrs # needed for correct subsequent encoding encoding["dtype"] = attrs.pop("dtype") data = BoolTypeArray(data) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable class EndianCoder(VariableCoder): """Decode Endianness to native.""" def encode(self): raise NotImplementedError() def decode(self, variable: Variable, name: T_Name = None) -> Variable: dims, data, attrs, encoding = unpack_for_decoding(variable) if not data.dtype.isnative: data = NativeEndiannessArray(data) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable class NonStringCoder(VariableCoder): """Encode NonString variables if dtypes differ.""" def encode(self, variable: Variable, name: T_Name = None) -> Variable: if "dtype" in variable.encoding and variable.encoding["dtype"] not in ( "S1", str, ): dims, data, attrs, encoding = unpack_for_encoding(variable) dtype = np.dtype(encoding.pop("dtype")) if dtype != variable.dtype: if np.issubdtype(dtype, np.integer): if ( np.issubdtype(variable.dtype, np.floating) and "_FillValue" not in variable.attrs and "missing_value" not in variable.attrs ): warnings.warn( f"saving variable {name} with floating " "point data as an integer dtype without " "any _FillValue to use for NaNs", SerializationWarning, stacklevel=10, ) data = duck_array_ops.round(data) data = duck_array_ops.astype(data, dtype=dtype) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self): raise NotImplementedError() class ObjectVLenStringCoder(VariableCoder): def encode(self): raise NotImplementedError def decode(self, variable: Variable, name: T_Name = None) -> Variable: if variable.dtype.kind == "O" and variable.encoding.get("dtype", False) is str: variable = variable.astype(variable.encoding["dtype"]) return variable else: return variable class Numpy2StringDTypeCoder(VariableCoder): # Convert Numpy 2 StringDType arrays to object arrays for backwards compatibility # TODO: remove this if / when we decide to allow StringDType arrays in Xarray def encode(self): raise NotImplementedError def decode(self, variable: Variable, name: T_Name = None) -> Variable: if variable.dtype.kind == "T": return variable.astype(object) else: return variable class NativeEnumCoder(VariableCoder): """Encode Enum into variable dtype metadata.""" def encode(self, variable: Variable, name: T_Name = None) -> Variable: if ( "dtype" in variable.encoding and np.dtype(variable.encoding["dtype"]).metadata and "enum" in variable.encoding["dtype"].metadata ): dims, data, attrs, encoding = unpack_for_encoding(variable) data = data.astype(dtype=variable.encoding.pop("dtype")) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: raise NotImplementedError() �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/compat/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�15114646760�0016134�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/compat/__init__.py����������������������������������������������������������0000664�0000000�0000000�00000000000�15114646760�0020233�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/compat/array_api_compat.py��������������������������������������������������0000664�0000000�0000000�00000004767�15114646760�0022036�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import numpy as np from xarray.namedarray.pycompat import array_type def is_weak_scalar_type(t): return isinstance(t, bool | int | float | complex | str | bytes) def _future_array_api_result_type(*arrays_and_dtypes, xp): # fallback implementation for `xp.result_type` with python scalars. Can be removed once a # version of the Array API that includes https://github.com/data-apis/array-api/issues/805 # can be required strongly_dtyped = [t for t in arrays_and_dtypes if not is_weak_scalar_type(t)] weakly_dtyped = [t for t in arrays_and_dtypes if is_weak_scalar_type(t)] if not strongly_dtyped: strongly_dtyped = [ xp.asarray(x) if not isinstance(x, type) else x for x in weakly_dtyped ] weakly_dtyped = [] dtype = xp.result_type(*strongly_dtyped) if not weakly_dtyped: return dtype possible_dtypes = { complex: "complex64", float: "float32", int: "int8", bool: "bool", str: "str", bytes: "bytes", } dtypes = [possible_dtypes.get(type(x), "object") for x in weakly_dtyped] return xp.result_type(dtype, *dtypes) def result_type(*arrays_and_dtypes, xp) -> np.dtype: if xp is np or any( isinstance(getattr(t, "dtype", t), np.dtype) for t in arrays_and_dtypes ): return xp.result_type(*arrays_and_dtypes) else: return _future_array_api_result_type(*arrays_and_dtypes, xp=xp) def get_array_namespace(*values): def _get_single_namespace(x): if hasattr(x, "__array_namespace__"): return x.__array_namespace__() elif isinstance(x, array_type("cupy")): # cupy is fully compliant from xarray's perspective, but will not expose # __array_namespace__ until at least v14. Special case it for now import cupy as cp return cp else: return np namespaces = {_get_single_namespace(t) for t in values} non_numpy = namespaces - {np} if len(non_numpy) > 1: names = [module.__name__ for module in non_numpy] raise TypeError(f"Mixed array types {names} are not supported.") elif non_numpy: [xp] = non_numpy else: xp = np return xp def to_like_array(array, like): # Mostly for cupy compatibility, because cupy binary ops require all cupy arrays xp = get_array_namespace(like) if xp is not np: return xp.asarray(array) # avoid casting things like pint quantities to numpy arrays return array ���������xarray-2025.12.0/xarray/compat/dask_array_compat.py�������������������������������������������������0000664�0000000�0000000�00000002032�15114646760�0022166�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from typing import Any from xarray.namedarray.utils import module_available def reshape_blockwise( x: Any, shape: int | tuple[int, ...], chunks: tuple[tuple[int, ...], ...] | None = None, ): if module_available("dask", "2024.08.2"): from dask.array import reshape_blockwise return reshape_blockwise(x, shape=shape, chunks=chunks) else: return x.reshape(shape) def sliding_window_view( x, window_shape, axis=None, *, automatic_rechunk=True, **kwargs ): # Backcompat for handling `automatic_rechunk`, delete when dask>=2024.11.0 # Note that subok, writeable are unsupported by dask, so we ignore those in kwargs from dask.array.lib.stride_tricks import sliding_window_view if module_available("dask", "2024.11.0"): return sliding_window_view( x, window_shape=window_shape, axis=axis, automatic_rechunk=automatic_rechunk ) else: # automatic_rechunk is not supported return sliding_window_view(x, window_shape=window_shape, axis=axis) ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/compat/dask_array_ops.py����������������������������������������������������0000664�0000000�0000000�00000011022�15114646760�0021503�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import math from xarray.compat.dask_array_compat import reshape_blockwise from xarray.core import dtypes, nputils def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1): """Wrapper to apply bottleneck moving window funcs on dask arrays""" dtype, _ = dtypes.maybe_promote(a.dtype) return a.data.map_overlap( moving_func, depth={axis: (window - 1, 0)}, axis=axis, dtype=dtype, window=window, min_count=min_count, ) def least_squares(lhs, rhs, rcond=None, skipna=False): import dask.array as da # The trick here is that the core dimension is axis 0. # All other dimensions need to be reshaped down to one axis for `lstsq` # (which only accepts 2D input) # and this needs to be undone after running `lstsq` # The order of values in the reshaped axes is irrelevant. # There are big gains to be had by simply reshaping the blocks on a blockwise # basis, and then undoing that transform. # We use a specific `reshape_blockwise` method in dask for this optimization if rhs.ndim > 2: out_shape = rhs.shape reshape_chunks = rhs.chunks rhs = reshape_blockwise(rhs, (rhs.shape[0], math.prod(rhs.shape[1:]))) else: out_shape = None lhs_da = da.from_array(lhs, chunks=(rhs.chunks[0], lhs.shape[1])) if skipna: added_dim = rhs.ndim == 1 if added_dim: rhs = rhs.reshape(rhs.shape[0], 1) results = da.apply_along_axis( nputils._nanpolyfit_1d, 0, rhs, lhs_da, dtype=float, shape=(lhs.shape[1] + 1,), rcond=rcond, ) coeffs = results[:-1, ...] residuals = results[-1, ...] if added_dim: coeffs = coeffs.reshape(coeffs.shape[0]) residuals = residuals.reshape(residuals.shape[0]) else: # Residuals here are (1, 1) but should be (K,) as rhs is (N, K) # See issue dask/dask#6516 coeffs, residuals, _, _ = da.linalg.lstsq(lhs_da, rhs) if out_shape is not None: coeffs = reshape_blockwise( coeffs, shape=(coeffs.shape[0], *out_shape[1:]), chunks=((coeffs.shape[0],), *reshape_chunks[1:]), ) residuals = reshape_blockwise( residuals, shape=out_shape[1:], chunks=reshape_chunks[1:] ) return coeffs, residuals def _fill_with_last_one(a, b): import numpy as np # cumreduction apply the push func over all the blocks first so, # the only missing part is filling the missing values using the # last data of the previous chunk return np.where(np.isnan(b), a, b) def _dtype_push(a, axis, dtype=None): from xarray.core.duck_array_ops import _push # Not sure why the blelloch algorithm force to receive a dtype return _push(a, axis=axis) def push(array, n, axis, method="blelloch"): """ Dask-aware bottleneck.push """ import dask.array as da import numpy as np from xarray.core.duck_array_ops import _push from xarray.core.nputils import nanlast if n is not None and all(n <= size for size in array.chunks[axis]): return array.map_overlap(_push, depth={axis: (n, 0)}, n=n, axis=axis) # TODO: Replace all this function # once https://github.com/pydata/xarray/issues/9229 being implemented pushed_array = da.reductions.cumreduction( func=_dtype_push, binop=_fill_with_last_one, ident=np.nan, x=array, axis=axis, dtype=array.dtype, method=method, preop=nanlast, ) if n is not None and 0 < n < array.shape[axis] - 1: # The idea is to calculate a cumulative sum of a bitmask # created from the isnan method, but every time a False is found the sum # must be restarted, and the final result indicates the amount of contiguous # nan values found in the original array on every position nan_bitmask = da.isnan(array, dtype=int) cumsum_nan = nan_bitmask.cumsum(axis=axis, method=method) valid_positions = da.where(nan_bitmask == 0, cumsum_nan, np.nan) valid_positions = push(valid_positions, None, axis, method=method) # All the NaNs at the beginning are converted to 0 valid_positions = da.nan_to_num(valid_positions) valid_positions = cumsum_nan - valid_positions valid_positions = valid_positions <= n pushed_array = da.where(valid_positions, pushed_array, np.nan) return pushed_array ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/compat/npcompat.py����������������������������������������������������������0000664�0000000�0000000�00000006314�15114646760�0020333�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Copyright (c) 2005-2011, NumPy Developers. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the NumPy Developers nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations from typing import Any try: # requires numpy>=2.0 from numpy import isdtype # type: ignore[attr-defined,unused-ignore] HAS_STRING_DTYPE = True except ImportError: import numpy as np from numpy.typing import DTypeLike kind_mapping = { "bool": np.bool_, "signed integer": np.signedinteger, "unsigned integer": np.unsignedinteger, "integral": np.integer, "real floating": np.floating, "complex floating": np.complexfloating, "numeric": np.number, } def isdtype( # type: ignore[misc] dtype: np.dtype[Any] | type[Any], kind: DTypeLike | tuple[DTypeLike, ...] ) -> bool: kinds = kind if isinstance(kind, tuple) else (kind,) str_kinds = {k for k in kinds if isinstance(k, str)} type_kinds = {k.type for k in kinds if isinstance(k, np.dtype)} if unknown_kind_types := set(kinds) - str_kinds - type_kinds: raise TypeError( f"kind must be str, np.dtype or a tuple of these, got {unknown_kind_types}" ) if unknown_kinds := {k for k in str_kinds if k not in kind_mapping}: raise ValueError( f"unknown kind: {unknown_kinds}, must be a np.dtype or one of {list(kind_mapping)}" ) # verified the dtypes already, no need to check again translated_kinds = {kind_mapping[k] for k in str_kinds} | type_kinds if isinstance(dtype, np.generic): return isinstance(dtype, translated_kinds) else: return any(np.issubdtype(dtype, k) for k in translated_kinds) HAS_STRING_DTYPE = False ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/compat/pdcompat.py����������������������������������������������������������0000664�0000000�0000000�00000006622�15114646760�0020323�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For reference, here is a copy of the pandas copyright notice: # BSD 3-Clause License # Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2011-2025, Open source contributors. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations from enum import Enum from typing import Literal import pandas as pd from xarray.core.types import PDDatetimeUnitOptions def count_not_none(*args) -> int: """Compute the number of non-None arguments. Copied from pandas.core.common.count_not_none (not part of the public API) """ return sum(arg is not None for arg in args) class _NoDefault(Enum): """Used by pandas to specify a default value for a deprecated argument. Copied from pandas._libs.lib._NoDefault. See also: - pandas-dev/pandas#30788 - pandas-dev/pandas#40684 - pandas-dev/pandas#40715 - pandas-dev/pandas#47045 """ no_default = "NO_DEFAULT" def __repr__(self) -> str: return "" no_default = ( _NoDefault.no_default ) # Sentinel indicating the default value following pandas NoDefault = Literal[_NoDefault.no_default] # For typing following pandas def timestamp_as_unit(date: pd.Timestamp, unit: PDDatetimeUnitOptions) -> pd.Timestamp: """Convert the underlying int64 representation to the given unit. Compatibility function for pandas issue where "as_unit" is not defined for pandas.Timestamp in pandas versions < 2.2. Can be removed minimum pandas version is >= 2.2. """ if hasattr(date, "as_unit"): date = date.as_unit(unit) elif hasattr(date, "_as_unit"): date = date._as_unit(unit) return date def default_precision_timestamp(*args, **kwargs) -> pd.Timestamp: """Return a Timestamp object with the default precision. Xarray default is "ns". """ dt = pd.Timestamp(*args, **kwargs) if dt.unit != "ns": dt = timestamp_as_unit(dt, "ns") return dt ��������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/compat/toolzcompat.py�������������������������������������������������������0000664�0000000�0000000�00000004416�15114646760�0021066�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# This file contains functions copied from the toolz library in accordance # with its license. The original copyright notice is duplicated below. # Copyright (c) 2013 Matthew Rocklin # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # a. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # b. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # c. Neither the name of toolz nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. def sliding_window(n, seq): """A sequence of overlapping subsequences >>> list(sliding_window(2, [1, 2, 3, 4])) [(1, 2), (2, 3), (3, 4)] This function creates a sliding window suitable for transformations like sliding means / smoothing >>> mean = lambda seq: float(sum(seq)) / len(seq) >>> list(map(mean, sliding_window(2, [1, 2, 3, 4]))) [1.5, 2.5, 3.5] """ import collections import itertools return zip( *( collections.deque(itertools.islice(it, i), 0) or it for i, it in enumerate(itertools.tee(seq, n)) ), strict=False, ) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/computation/����������������������������������������������������������������0000775�0000000�0000000�00000000000�15114646760�0017213�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/computation/__init__.py�����������������������������������������������������0000664�0000000�0000000�00000000000�15114646760�0021312�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/computation/apply_ufunc.py��������������������������������������������������0000664�0000000�0000000�00000134610�15114646760�0022117�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" Functions for applying functions that act on arrays to xarray's labeled data. """ from __future__ import annotations import functools import itertools import operator import warnings from collections import Counter from collections.abc import ( Callable, Hashable, Iterable, Iterator, Mapping, Sequence, ) from collections.abc import ( Set as AbstractSet, ) from typing import TYPE_CHECKING, Any, Literal import numpy as np from xarray.core import duck_array_ops, utils from xarray.core.formatting import limit_lines from xarray.core.indexes import Index, filter_indexes_from_coords from xarray.core.options import _get_keep_attrs from xarray.core.utils import is_dict_like, result_name from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array from xarray.structure.alignment import deep_align from xarray.structure.merge import merge_attrs, merge_coordinates_without_align if TYPE_CHECKING: from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import CombineAttrsOptions, JoinOptions MissingCoreDimOptions = Literal["raise", "copy", "drop"] _NO_FILL_VALUE = utils.ReprObject("") _JOINS_WITHOUT_FILL_VALUES = frozenset({"inner", "exact"}) def _first_of_type(args, kind): """Return either first object of type 'kind' or raise if not found.""" for arg in args: if isinstance(arg, kind): return arg raise ValueError("This should be unreachable.") def _all_of_type(args, kind): """Return all objects of type 'kind'""" return [arg for arg in args if isinstance(arg, kind)] class _UFuncSignature: """Core dimensions signature for a given function. Based on the signature provided by generalized ufuncs in NumPy. Attributes ---------- input_core_dims : tuple[tuple, ...] Core dimension names on each input variable. output_core_dims : tuple[tuple, ...] Core dimension names on each output variable. """ __slots__ = ( "_all_core_dims", "_all_input_core_dims", "_all_output_core_dims", "input_core_dims", "output_core_dims", ) def __init__(self, input_core_dims, output_core_dims=((),)): self.input_core_dims = tuple(tuple(a) for a in input_core_dims) self.output_core_dims = tuple(tuple(a) for a in output_core_dims) self._all_input_core_dims = None self._all_output_core_dims = None self._all_core_dims = None @property def all_input_core_dims(self): if self._all_input_core_dims is None: self._all_input_core_dims = frozenset( dim for dims in self.input_core_dims for dim in dims ) return self._all_input_core_dims @property def all_output_core_dims(self): if self._all_output_core_dims is None: self._all_output_core_dims = frozenset( dim for dims in self.output_core_dims for dim in dims ) return self._all_output_core_dims @property def all_core_dims(self): if self._all_core_dims is None: self._all_core_dims = self.all_input_core_dims | self.all_output_core_dims return self._all_core_dims @property def dims_map(self): return { core_dim: f"dim{n}" for n, core_dim in enumerate(sorted(self.all_core_dims)) } @property def num_inputs(self): return len(self.input_core_dims) @property def num_outputs(self): return len(self.output_core_dims) def __eq__(self, other): try: return ( self.input_core_dims == other.input_core_dims and self.output_core_dims == other.output_core_dims ) except AttributeError: return False def __ne__(self, other): return not self == other def __repr__(self): return f"{type(self).__name__}({list(self.input_core_dims)!r}, {list(self.output_core_dims)!r})" def __str__(self): comma_separated = ",".join lhs = comma_separated( f"({comma_separated(dims)})" for dims in self.input_core_dims ) rhs = comma_separated( f"({comma_separated(dims)})" for dims in self.output_core_dims ) return f"{lhs}->{rhs}" def to_gufunc_string(self, exclude_dims=frozenset()): """Create an equivalent signature string for a NumPy gufunc. Unlike __str__, handles dimensions that don't map to Python identifiers. Also creates unique names for input_core_dims contained in exclude_dims. """ input_core_dims = [ [self.dims_map[dim] for dim in core_dims] for core_dims in self.input_core_dims ] output_core_dims = [ [self.dims_map[dim] for dim in core_dims] for core_dims in self.output_core_dims ] # enumerate input_core_dims contained in exclude_dims to make them unique if exclude_dims: exclude_dims = [self.dims_map[dim] for dim in exclude_dims] counter: Counter = Counter() def _enumerate(dim): if dim in exclude_dims: n = counter[dim] counter.update([dim]) dim = f"{dim}_{n}" return dim input_core_dims = [ [_enumerate(dim) for dim in arg] for arg in input_core_dims ] alt_signature = type(self)(input_core_dims, output_core_dims) return str(alt_signature) def _get_coords_list(args: Iterable[Any]) -> list[Coordinates]: coords_list = [] for arg in args: try: coords = arg.coords except AttributeError: pass # skip this argument else: coords_list.append(coords) return coords_list def build_output_coords_and_indexes( args: Iterable[Any], signature: _UFuncSignature, exclude_dims: AbstractSet = frozenset(), combine_attrs: CombineAttrsOptions = "override", ) -> tuple[list[dict[Any, Variable]], list[dict[Any, Index]]]: """Build output coordinates and indexes for an operation. Parameters ---------- args : Iterable List of raw operation arguments. Any valid types for xarray operations are OK, e.g., scalars, Variable, DataArray, Dataset. signature : _UfuncSignature Core dimensions signature for the operation. exclude_dims : set, optional Dimensions excluded from the operation. Coordinates along these dimensions are dropped. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "drop" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- Dictionaries of Variable and Index objects with merged coordinates. """ coords_list = _get_coords_list(args) if len(coords_list) == 1 and not exclude_dims: # we can skip the expensive merge (unpacked_coords,) = coords_list merged_vars = dict(unpacked_coords.variables) merged_indexes = dict(unpacked_coords.xindexes) else: merged_vars, merged_indexes = merge_coordinates_without_align( coords_list, exclude_dims=exclude_dims, combine_attrs=combine_attrs ) output_coords = [] output_indexes = [] for output_dims in signature.output_core_dims: dropped_dims = signature.all_input_core_dims - set(output_dims) if dropped_dims: filtered_coords = { k: v for k, v in merged_vars.items() if dropped_dims.isdisjoint(v.dims) } filtered_indexes = filter_indexes_from_coords( merged_indexes, set(filtered_coords) ) else: filtered_coords = merged_vars filtered_indexes = merged_indexes output_coords.append(filtered_coords) output_indexes.append(filtered_indexes) return output_coords, output_indexes def apply_dataarray_vfunc( func, *args, signature: _UFuncSignature, join: JoinOptions = "inner", exclude_dims=frozenset(), keep_attrs="override", ) -> tuple[DataArray, ...] | DataArray: """Apply a variable level function over DataArray, Variable and/or ndarray objects. """ from xarray.core.dataarray import DataArray if len(args) > 1: args = tuple( deep_align( args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False, ) ) objs = _all_of_type(args, DataArray) if keep_attrs == "drop": name = result_name(args) else: first_obj = _first_of_type(args, DataArray) name = first_obj.name result_coords, result_indexes = build_output_coords_and_indexes( args, signature, exclude_dims, combine_attrs=keep_attrs ) data_vars = [getattr(a, "variable", a) for a in args] result_var = func(*data_vars) out: tuple[DataArray, ...] | DataArray if signature.num_outputs > 1: out = tuple( DataArray( variable, coords=coords, indexes=indexes, name=name, fastpath=True ) for variable, coords, indexes in zip( result_var, result_coords, result_indexes, strict=True ) ) else: (coords,) = result_coords (indexes,) = result_indexes out = DataArray( result_var, coords=coords, indexes=indexes, name=name, fastpath=True ) attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs) if isinstance(out, tuple): for da in out: da.attrs = attrs else: out.attrs = attrs return out def ordered_set_union(all_keys: list[Iterable]) -> Iterable: return {key: None for keys in all_keys for key in keys}.keys() def ordered_set_intersection(all_keys: list[Iterable]) -> Iterable: intersection = set(all_keys[0]) for keys in all_keys[1:]: intersection.intersection_update(keys) return [key for key in all_keys[0] if key in intersection] def assert_and_return_exact_match(all_keys): first_keys = all_keys[0] for keys in all_keys[1:]: if keys != first_keys: raise ValueError( "exact match required for all data variable names, " f"but {list(keys)} != {list(first_keys)}: {set(keys) ^ set(first_keys)} are not in both." ) return first_keys _JOINERS: dict[str, Callable] = { "inner": ordered_set_intersection, "outer": ordered_set_union, "left": operator.itemgetter(0), "right": operator.itemgetter(-1), "exact": assert_and_return_exact_match, } def join_dict_keys(objects: Iterable[Mapping | Any], how: str = "inner") -> Iterable: joiner = _JOINERS[how] all_keys = [obj.keys() for obj in objects if hasattr(obj, "keys")] return joiner(all_keys) def collect_dict_values( objects: Iterable[Mapping | Any], keys: Iterable, fill_value: object = None ) -> list[list]: return [ [obj.get(key, fill_value) if is_dict_like(obj) else obj for obj in objects] for key in keys ] def _as_variables_or_variable(arg) -> Variable | tuple[Variable]: try: return arg.variables except AttributeError: try: return arg.variable except AttributeError: return arg def _unpack_dict_tuples( result_vars: Mapping[Any, tuple[Variable, ...]], num_outputs: int ) -> tuple[dict[Hashable, Variable], ...]: out: tuple[dict[Hashable, Variable], ...] = tuple({} for _ in range(num_outputs)) for name, values in result_vars.items(): for value, results_dict in zip(values, out, strict=True): results_dict[name] = value return out def _check_core_dims(signature, variable_args, name): """ Check if an arg has all the core dims required by the signature. Slightly awkward design, of returning the error message. But we want to give a detailed error message, which requires inspecting the variable in the inner loop. """ missing = [] for i, (core_dims, variable_arg) in enumerate( zip(signature.input_core_dims, variable_args, strict=True) ): # Check whether all the dims are on the variable. Note that we need the # `hasattr` to check for a dims property, to protect against the case where # a numpy array is passed in. if hasattr(variable_arg, "dims") and set(core_dims) - set(variable_arg.dims): missing += [[i, variable_arg, core_dims]] if missing: message = "" for i, variable_arg, core_dims in missing: message += f"Missing core dims {set(core_dims) - set(variable_arg.dims)} from arg number {i + 1} on a variable named `{name}`:\n{variable_arg}\n\n" message += "Either add the core dimension, or if passing a dataset alternatively pass `on_missing_core_dim` as `copy` or `drop`. " return message return True def apply_dict_of_variables_vfunc( func, *args, signature: _UFuncSignature, join="inner", fill_value=None, on_missing_core_dim: MissingCoreDimOptions = "raise", ): """Apply a variable level function over dicts of DataArray, DataArray, Variable and ndarray objects. """ args = tuple(_as_variables_or_variable(arg) for arg in args) names = join_dict_keys(args, how=join) grouped_by_name = collect_dict_values(args, names, fill_value) result_vars = {} for name, variable_args in zip(names, grouped_by_name, strict=True): core_dim_present = _check_core_dims(signature, variable_args, name) if core_dim_present is True: result_vars[name] = func(*variable_args) elif on_missing_core_dim == "raise": raise ValueError(core_dim_present) elif on_missing_core_dim == "copy": result_vars[name] = variable_args[0] elif on_missing_core_dim == "drop": pass else: raise ValueError( f"Invalid value for `on_missing_core_dim`: {on_missing_core_dim!r}" ) if signature.num_outputs > 1: return _unpack_dict_tuples(result_vars, signature.num_outputs) else: return result_vars def _fast_dataset( variables: dict[Hashable, Variable], coord_variables: Mapping[Hashable, Variable], indexes: dict[Hashable, Index], ) -> Dataset: """Create a dataset as quickly as possible. Beware: the `variables` dict is modified INPLACE. """ from xarray.core.dataset import Dataset variables.update(coord_variables) coord_names = set(coord_variables) return Dataset._construct_direct(variables, coord_names, indexes=indexes) def apply_dataset_vfunc( func, *args, signature: _UFuncSignature, join="inner", dataset_join="exact", fill_value=_NO_FILL_VALUE, exclude_dims=frozenset(), keep_attrs="override", on_missing_core_dim: MissingCoreDimOptions = "raise", ) -> Dataset | tuple[Dataset, ...]: """Apply a variable level function over Dataset, dict of DataArray, DataArray, Variable and/or ndarray objects. """ from xarray.core.dataset import Dataset if dataset_join not in _JOINS_WITHOUT_FILL_VALUES and fill_value is _NO_FILL_VALUE: raise TypeError( "to apply an operation to datasets with different " "data variables with apply_ufunc, you must supply the " "dataset_fill_value argument." ) objs = _all_of_type(args, Dataset) if len(args) > 1: args = tuple( deep_align( args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False, ) ) list_of_coords, list_of_indexes = build_output_coords_and_indexes( args, signature, exclude_dims, combine_attrs=keep_attrs ) args = tuple(getattr(arg, "data_vars", arg) for arg in args) result_vars = apply_dict_of_variables_vfunc( func, *args, signature=signature, join=dataset_join, fill_value=fill_value, on_missing_core_dim=on_missing_core_dim, ) out: Dataset | tuple[Dataset, ...] if signature.num_outputs > 1: out = tuple( itertools.starmap( _fast_dataset, zip(result_vars, list_of_coords, list_of_indexes, strict=True), ) ) else: (coord_vars,) = list_of_coords (indexes,) = list_of_indexes out = _fast_dataset(result_vars, coord_vars, indexes=indexes) attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs) if isinstance(out, tuple): for ds in out: ds.attrs = attrs else: out.attrs = attrs return out def _iter_over_selections(obj, dim, values): """Iterate over selections of an xarray object in the provided order.""" from xarray.core.groupby import _dummy_copy dummy = None for value in values: try: obj_sel = obj.sel(**{dim: value}) except (KeyError, IndexError): if dummy is None: dummy = _dummy_copy(obj) obj_sel = dummy yield obj_sel def apply_groupby_func(func, *args): """Apply a dataset or datarray level function over GroupBy, Dataset, DataArray, Variable and/or ndarray objects. """ from xarray.core.groupby import GroupBy, peek_at groupbys = [arg for arg in args if isinstance(arg, GroupBy)] assert groupbys, "must have at least one groupby to iterate over" first_groupby = groupbys[0] (grouper,) = first_groupby.groupers if any(not grouper.group.equals(gb.groupers[0].group) for gb in groupbys[1:]): # type: ignore[union-attr] raise ValueError( "apply_ufunc can only perform operations over " "multiple GroupBy objects at once if they are all " "grouped the same way" ) grouped_dim = grouper.name unique_values = grouper.unique_coord.values iterators = [] for arg in args: iterator: Iterator[Any] if isinstance(arg, GroupBy): iterator = (value for _, value in arg) elif hasattr(arg, "dims") and grouped_dim in arg.dims: if isinstance(arg, Variable): raise ValueError( "groupby operations cannot be performed with " "xarray.Variable objects that share a dimension with " "the grouped dimension" ) iterator = _iter_over_selections(arg, grouped_dim, unique_values) else: iterator = itertools.repeat(arg) iterators.append(iterator) applied: Iterator = itertools.starmap(func, zip(*iterators, strict=False)) applied_example, applied = peek_at(applied) combine = first_groupby._combine # type: ignore[attr-defined] if isinstance(applied_example, tuple): combined = tuple(combine(output) for output in zip(*applied, strict=True)) else: combined = combine(applied) return combined def unified_dim_sizes( variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset() ) -> dict[Hashable, int]: dim_sizes: dict[Hashable, int] = {} for var in variables: if len(set(var.dims)) < len(var.dims): raise ValueError( "broadcasting cannot handle duplicate " f"dimensions on a variable: {list(var.dims)}" ) for dim, size in zip(var.dims, var.shape, strict=True): if dim not in exclude_dims: if dim not in dim_sizes: dim_sizes[dim] = size elif dim_sizes[dim] != size: raise ValueError( "operands cannot be broadcast together " "with mismatched lengths for dimension " f"{dim}: {dim_sizes[dim]} vs {size}" ) return dim_sizes SLICE_NONE = slice(None) def broadcast_compat_data( variable: Variable, broadcast_dims: tuple[Hashable, ...], core_dims: tuple[Hashable, ...], ) -> Any: data = variable.data old_dims = variable.dims new_dims = broadcast_dims + core_dims if new_dims == old_dims: # optimize for the typical case return data set_old_dims = set(old_dims) set_new_dims = set(new_dims) unexpected_dims = [d for d in old_dims if d not in set_new_dims] if unexpected_dims: raise ValueError( "operand to apply_ufunc encountered unexpected " f"dimensions {unexpected_dims!r} on an input variable: these are core " "dimensions on other input or output variables" ) # for consistency with numpy, keep broadcast dimensions to the left old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims) reordered_dims = old_broadcast_dims + core_dims if reordered_dims != old_dims: order = tuple(old_dims.index(d) for d in reordered_dims) data = duck_array_ops.transpose(data, order) if new_dims != reordered_dims: key_parts: list[slice | None] = [] for dim in new_dims: if dim in set_old_dims: key_parts.append(SLICE_NONE) elif key_parts: # no need to insert new axes at the beginning that are already # handled by broadcasting key_parts.append(np.newaxis) data = data[tuple(key_parts)] return data def _vectorize(func, signature, output_dtypes, exclude_dims): if signature.all_core_dims: func = np.vectorize( func, otypes=output_dtypes, signature=signature.to_gufunc_string(exclude_dims), ) else: func = np.vectorize(func, otypes=output_dtypes) return func def apply_variable_ufunc( func, *args, signature: _UFuncSignature, exclude_dims=frozenset(), dask="forbidden", output_dtypes=None, vectorize=False, keep_attrs="override", dask_gufunc_kwargs=None, ) -> Variable | tuple[Variable, ...]: """Apply an ndarray level function over Variable and/or ndarray objects.""" from xarray.core.formatting import short_array_repr from xarray.core.variable import as_compatible_data dim_sizes = unified_dim_sizes( (a for a in args if hasattr(a, "dims")), exclude_dims=exclude_dims ) broadcast_dims = tuple( dim for dim in dim_sizes if dim not in signature.all_core_dims ) output_dims = [broadcast_dims + out for out in signature.output_core_dims] input_data = [ ( broadcast_compat_data(arg, broadcast_dims, core_dims) if isinstance(arg, Variable) else arg ) for arg, core_dims in zip(args, signature.input_core_dims, strict=True) ] if any(is_chunked_array(array) for array in input_data): if dask == "forbidden": raise ValueError( "apply_ufunc encountered a chunked array on an " "argument, but handling for chunked arrays has not " "been enabled. Either set the ``dask`` argument " "or load your data into memory first with " "``.load()`` or ``.compute()``" ) elif dask == "parallelized": chunkmanager = get_chunked_array_type(*input_data) numpy_func = func if dask_gufunc_kwargs is None: dask_gufunc_kwargs = {} else: dask_gufunc_kwargs = dask_gufunc_kwargs.copy() allow_rechunk = dask_gufunc_kwargs.get("allow_rechunk", None) if allow_rechunk is None: for n, (data, core_dims) in enumerate( zip(input_data, signature.input_core_dims, strict=True) ): if is_chunked_array(data): # core dimensions cannot span multiple chunks for axis, dim in enumerate(core_dims, start=-len(core_dims)): if len(data.chunks[axis]) != 1: raise ValueError( f"dimension {dim} on {n}th function argument to " "apply_ufunc with dask='parallelized' consists of " "multiple chunks, but is also a core dimension. To " "fix, either rechunk into a single array chunk along " f"this dimension, i.e., ``.chunk(dict({dim}=-1))``, or " "pass ``allow_rechunk=True`` in ``dask_gufunc_kwargs`` " "but beware that this may significantly increase memory usage." ) dask_gufunc_kwargs["allow_rechunk"] = True output_sizes = dask_gufunc_kwargs.pop("output_sizes", {}) if output_sizes: output_sizes_renamed = {} for key, value in output_sizes.items(): if key not in signature.all_output_core_dims: raise ValueError( f"dimension '{key}' in 'output_sizes' must correspond to output_core_dims" ) output_sizes_renamed[signature.dims_map[key]] = value dask_gufunc_kwargs["output_sizes"] = output_sizes_renamed for key in signature.all_output_core_dims: if ( key not in signature.all_input_core_dims or key in exclude_dims ) and key not in output_sizes: raise ValueError( f"dimension '{key}' in 'output_core_dims' needs corresponding (dim, size) in 'output_sizes'" ) def func(*arrays): res = chunkmanager.apply_gufunc( numpy_func, signature.to_gufunc_string(exclude_dims), *arrays, vectorize=vectorize, output_dtypes=output_dtypes, **dask_gufunc_kwargs, ) return res elif dask == "allowed": pass else: raise ValueError( f"unknown setting for chunked array handling in apply_ufunc: {dask}" ) elif vectorize: func = _vectorize( func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims ) result_data = func(*input_data) if signature.num_outputs == 1: result_data = (result_data,) elif ( not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs ): raise ValueError( f"applied function does not have the number of " f"outputs specified in the ufunc signature. " f"Received a {type(result_data)} with {len(result_data)} elements. " f"Expected a tuple of {signature.num_outputs} elements:\n\n" f"{limit_lines(repr(result_data), limit=10)}" ) objs = _all_of_type(args, Variable) attrs = merge_attrs( [obj.attrs for obj in objs], combine_attrs=keep_attrs, ) output: list[Variable] = [] for dims, data in zip(output_dims, result_data, strict=True): data = as_compatible_data(data) if data.ndim != len(dims): raise ValueError( "applied function returned data with an unexpected " f"number of dimensions. Received {data.ndim} dimension(s) but " f"expected {len(dims)} dimensions with names {dims!r}, from:\n\n" f"{short_array_repr(data)}" ) var = Variable(dims, data, fastpath=True) for dim, new_size in var.sizes.items(): if dim in dim_sizes and new_size != dim_sizes[dim]: raise ValueError( f"size of dimension '{dim}' on inputs was unexpectedly " f"changed by applied function from {dim_sizes[dim]} to {new_size}. Only " "dimensions specified in ``exclude_dims`` with " "xarray.apply_ufunc are allowed to change size. " "The data returned was:\n\n" f"{short_array_repr(data)}" ) var.attrs = attrs output.append(var) if signature.num_outputs == 1: return output[0] else: return tuple(output) def apply_array_ufunc(func, *args, dask="forbidden"): """Apply an ndarray level function over ndarray objects.""" if any(is_chunked_array(arg) for arg in args): if dask == "forbidden": raise ValueError( "apply_ufunc encountered a dask array on an " "argument, but handling for dask arrays has not " "been enabled. Either set the ``dask`` argument " "or load your data into memory first with " "``.load()`` or ``.compute()``" ) elif dask == "parallelized": raise ValueError( "cannot use dask='parallelized' for apply_ufunc " "unless at least one input is an xarray object" ) elif dask == "allowed": pass else: raise ValueError(f"unknown setting for dask array handling: {dask}") return func(*args) def apply_ufunc( func: Callable, *args: Any, input_core_dims: Sequence[Sequence] | None = None, output_core_dims: Sequence[Sequence] | None = ((),), exclude_dims: AbstractSet = frozenset(), vectorize: bool = False, join: JoinOptions = "exact", dataset_join: str = "exact", dataset_fill_value: object = _NO_FILL_VALUE, keep_attrs: bool | str | None = None, kwargs: Mapping | None = None, dask: Literal["forbidden", "allowed", "parallelized"] = "forbidden", output_dtypes: Sequence | None = None, output_sizes: Mapping[Any, int] | None = None, meta: Any = None, dask_gufunc_kwargs: dict[str, Any] | None = None, on_missing_core_dim: MissingCoreDimOptions = "raise", ) -> Any: """Apply a vectorized function for unlabeled arrays on xarray objects. The function will be mapped over the data variable(s) of the input arguments using xarray's standard rules for labeled computation, including alignment, broadcasting, looping over GroupBy/Dataset variables, and merging of coordinates. Parameters ---------- func : callable Function to call like ``func(*args, **kwargs)`` on unlabeled arrays (``.data``) that returns an array or tuple of arrays. If multiple arguments with non-matching dimensions are supplied, this function is expected to vectorize (broadcast) over axes of positional arguments in the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, you must set ``output_core_dims`` as well. *args : Dataset, DataArray, DataArrayGroupBy, DatasetGroupBy, Variable, \ numpy.ndarray, dask.array.Array or scalar Mix of labeled and/or unlabeled arrays to which to apply the function. input_core_dims : sequence of sequence, optional List of the same length as ``args`` giving the list of core dimensions on each input argument that should not be broadcast. By default, we assume there are no core dimensions on any input arguments. For example, ``input_core_dims=[[], ['time']]`` indicates that all dimensions on the first argument and all dimensions other than 'time' on the second argument should be broadcast. Core dimensions are automatically moved to the last axes of input variables before applying ``func``, which facilitates using NumPy style generalized ufuncs [2]_. output_core_dims : list of tuple, optional List of the same length as the number of output arguments from ``func``, giving the list of core dimensions on each output that were not broadcast on the inputs. By default, we assume that ``func`` outputs exactly one array, with axes corresponding to each broadcast dimension. Core dimensions are assumed to appear as the last dimensions of each output in the provided order. exclude_dims : set, optional Core dimensions on the inputs to exclude from alignment and broadcasting entirely. Any input coordinates along these dimensions will be dropped. Each excluded dimension must also appear in ``input_core_dims`` for at least one argument. Only dimensions listed here are allowed to change size between input and output objects. vectorize : bool, optional If True, then assume ``func`` only takes arrays defined over core dimensions as input and vectorize it automatically with :py:func:`numpy.vectorize`. This option exists for convenience, but is almost always slower than supplying a pre-vectorized function. join : {"outer", "inner", "left", "right", "exact"}, default: "exact" Method for joining the indexes of the passed objects along each dimension, and the variables of Dataset objects with mismatched data variables: - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': raise `ValueError` instead of aligning when indexes to be aligned are not equal dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact" Method for joining variables of Dataset objects with mismatched data variables. - 'outer': take variables from both Dataset objects - 'inner': take only overlapped variables - 'left': take only variables from the first object - 'right': take only variables from the last object - 'exact': data variables on all Dataset objects must match exactly dataset_fill_value : optional Value used in place of missing variables on Dataset inputs when the datasets do not share the exact same ``data_vars``. Required if ``dataset_join not in {'inner', 'exact'}``, otherwise ignored. keep_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", "override"} or bool, optional - 'drop' or False: empty attrs on returned xarray object. - 'identical': all attrs must be the same on every object. - 'no_conflicts': attrs from all objects are combined, any that have the same name must also have the same value. - 'drop_conflicts': attrs from all objects are combined, any that have the same name but different values are dropped. - 'override' or True: skip comparing and copy attrs from the first object to the result. kwargs : dict, optional Optional keyword arguments passed directly on to call ``func``. dask : {"forbidden", "allowed", "parallelized"}, default: "forbidden" How to handle applying to objects containing lazy data in the form of dask arrays: - 'forbidden' (default): raise an error if a dask array is encountered. - 'allowed': pass dask arrays directly on to ``func``. Prefer this option if ``func`` natively supports dask arrays. - 'parallelized': automatically parallelize ``func`` if any of the inputs are a dask array by using :py:func:`dask.array.apply_gufunc`. Multiple output arguments are supported. Only use this option if ``func`` does not natively support dask arrays (e.g. converts them to numpy arrays). dask_gufunc_kwargs : dict, optional Optional keyword arguments passed to :py:func:`dask.array.apply_gufunc` if dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk`` and ``meta``. output_dtypes : list of dtype, optional Optional list of output dtypes. Only used if ``dask='parallelized'`` or ``vectorize=True``. output_sizes : dict, optional Optional mapping from dimension names to sizes for outputs. Only used if dask='parallelized' and new dimensions (not found on inputs) appear on outputs. ``output_sizes`` should be given in the ``dask_gufunc_kwargs`` parameter. It will be removed as direct parameter in a future version. meta : optional Size-0 object representing the type of array wrapped by dask array. Passed on to :py:func:`dask.array.apply_gufunc`. ``meta`` should be given in the ``dask_gufunc_kwargs`` parameter . It will be removed as direct parameter a future version. on_missing_core_dim : {"raise", "copy", "drop"}, default: "raise" How to handle missing core dimensions on input variables. Returns ------- Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or numpy.ndarray, the first type on that list to appear on an input. Notes ----- This function is designed for the more common case where ``func`` can work on numpy arrays. If ``func`` needs to manipulate a whole xarray object subset to each block it is possible to use :py:func:`xarray.map_blocks`. Note that due to the overhead :py:func:`xarray.map_blocks` is considerably slower than ``apply_ufunc``. Examples -------- Calculate the vector magnitude of two arguments: >>> def magnitude(a, b): ... func = lambda x, y: np.sqrt(x**2 + y**2) ... return xr.apply_ufunc(func, a, b) ... You can now apply ``magnitude()`` to :py:class:`DataArray` and :py:class:`Dataset` objects, with automatically preserved dimensions and coordinates, e.g., >>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])]) >>> magnitude(array, -array) Size: 24B array([1.41421356, 2.82842712, 4.24264069]) Coordinates: * x (x) float64 24B 0.1 0.2 0.3 Plain scalars, numpy arrays and a mix of these with xarray objects is also supported: >>> magnitude(3, 4) np.float64(5.0) >>> magnitude(3, np.array([0, 4])) array([3., 5.]) >>> magnitude(array, 0) Size: 24B array([1., 2., 3.]) Coordinates: * x (x) float64 24B 0.1 0.2 0.3 Other examples of how you could use ``apply_ufunc`` to write functions to (very nearly) replicate existing xarray functionality: Compute the mean (``.mean``) over one dimension: >>> def mean(obj, dim): ... # note: apply always moves core dimensions to the end ... return apply_ufunc( ... np.mean, obj, input_core_dims=[[dim]], kwargs={"axis": -1} ... ) ... Inner product over a specific dimension (like :py:func:`dot`): >>> def _inner(x, y): ... result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis]) ... return result[..., 0, 0] ... >>> def inner_product(a, b, dim): ... return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]]) ... Stack objects along a new dimension (like :py:func:`concat`): >>> def stack(objects, dim, new_coord): ... # note: this version does not stack coordinates ... func = lambda *x: np.stack(x, axis=-1) ... result = apply_ufunc( ... func, ... *objects, ... output_core_dims=[[dim]], ... join="outer", ... dataset_fill_value=np.nan ... ) ... result[dim] = new_coord ... return result ... If your function is not vectorized but can be applied only to core dimensions, you can use ``vectorize=True`` to turn into a vectorized function. This wraps :py:func:`numpy.vectorize`, so the operation isn't terribly fast. Here we'll use it to calculate the distance between empirical samples from two probability distributions, using a scipy function that needs to be applied to vectors: >>> import scipy.stats >>> def earth_mover_distance(first_samples, second_samples, dim="ensemble"): ... return apply_ufunc( ... scipy.stats.wasserstein_distance, ... first_samples, ... second_samples, ... input_core_dims=[[dim], [dim]], ... vectorize=True, ... ) ... Most of NumPy's builtin functions already broadcast their inputs appropriately for use in ``apply_ufunc``. You may find helper functions such as :py:func:`numpy.broadcast_arrays` helpful in writing your function. ``apply_ufunc`` also works well with :py:func:`numba.vectorize` and :py:func:`numba.guvectorize`. See Also -------- numpy.broadcast_arrays numba.vectorize numba.guvectorize dask.array.apply_gufunc xarray.map_blocks Notes ----- :ref:`dask.automatic-parallelization` User guide describing :py:func:`apply_ufunc` and :py:func:`map_blocks`. :doc:`xarray-tutorial:advanced/apply_ufunc/apply_ufunc` Advanced Tutorial on applying numpy function using :py:func:`apply_ufunc` References ---------- .. [1] https://numpy.org/doc/stable/reference/ufuncs.html .. [2] https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html """ from xarray.core.dataarray import DataArray from xarray.core.groupby import GroupBy from xarray.core.variable import Variable if input_core_dims is None: input_core_dims = ((),) * (len(args)) elif len(input_core_dims) != len(args): raise ValueError( f"input_core_dims must be None or a tuple with the length same to " f"the number of arguments. " f"Given {len(input_core_dims)} input_core_dims: {input_core_dims}, " f" but number of args is {len(args)}." ) if kwargs is None: kwargs = {} signature = _UFuncSignature(input_core_dims, output_core_dims) if exclude_dims: if not isinstance(exclude_dims, set): raise TypeError( f"Expected exclude_dims to be a 'set'. Received '{type(exclude_dims).__name__}' instead." ) if not exclude_dims <= signature.all_core_dims: raise ValueError( f"each dimension in `exclude_dims` must also be a " f"core dimension in the function signature. " f"Please make {(exclude_dims - signature.all_core_dims)} a core dimension" ) # handle dask_gufunc_kwargs if dask == "parallelized": if dask_gufunc_kwargs is None: dask_gufunc_kwargs = {} else: dask_gufunc_kwargs = dask_gufunc_kwargs.copy() # todo: remove warnings after deprecation cycle if meta is not None: warnings.warn( "``meta`` should be given in the ``dask_gufunc_kwargs`` parameter." " It will be removed as direct parameter in a future version.", FutureWarning, stacklevel=2, ) dask_gufunc_kwargs.setdefault("meta", meta) if output_sizes is not None: warnings.warn( "``output_sizes`` should be given in the ``dask_gufunc_kwargs`` " "parameter. It will be removed as direct parameter in a future " "version.", FutureWarning, stacklevel=2, ) dask_gufunc_kwargs.setdefault("output_sizes", output_sizes) if kwargs: if "where" in kwargs and isinstance(kwargs["where"], DataArray): kwargs["where"] = kwargs["where"].data # type:ignore[index] func = functools.partial(func, **kwargs) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) if isinstance(keep_attrs, bool): keep_attrs = "override" if keep_attrs else "drop" variables_vfunc = functools.partial( apply_variable_ufunc, func, signature=signature, exclude_dims=exclude_dims, keep_attrs=keep_attrs, dask=dask, vectorize=vectorize, output_dtypes=output_dtypes, dask_gufunc_kwargs=dask_gufunc_kwargs, ) # feed groupby-apply_ufunc through apply_groupby_func if any(isinstance(a, GroupBy) for a in args): this_apply = functools.partial( apply_ufunc, func, input_core_dims=input_core_dims, output_core_dims=output_core_dims, exclude_dims=exclude_dims, join=join, dataset_join=dataset_join, dataset_fill_value=dataset_fill_value, keep_attrs=keep_attrs, dask=dask, vectorize=vectorize, output_dtypes=output_dtypes, dask_gufunc_kwargs=dask_gufunc_kwargs, ) return apply_groupby_func(this_apply, *args) # feed datasets apply_variable_ufunc through apply_dataset_vfunc elif any(is_dict_like(a) for a in args): return apply_dataset_vfunc( variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, dataset_join=dataset_join, fill_value=dataset_fill_value, keep_attrs=keep_attrs, on_missing_core_dim=on_missing_core_dim, ) # feed DataArray apply_variable_ufunc through apply_dataarray_vfunc elif any(isinstance(a, DataArray) for a in args): return apply_dataarray_vfunc( variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, keep_attrs=keep_attrs, ) # feed Variables directly through apply_variable_ufunc elif any(isinstance(a, Variable) for a in args): return variables_vfunc(*args) else: # feed anything else through apply_array_ufunc return apply_array_ufunc(func, *args, dask=dask) ������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/computation/arithmetic.py���������������������������������������������������0000664�0000000�0000000�00000010351�15114646760�0021716�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Base classes implementing arithmetic for xarray objects.""" from __future__ import annotations import numbers import numpy as np from xarray.computation.ops import IncludeNumpySameMethods, IncludeReduceMethods # _typed_ops.py is a generated file from xarray.core._typed_ops import ( DataArrayGroupByOpsMixin, DataArrayOpsMixin, DatasetGroupByOpsMixin, DatasetOpsMixin, VariableOpsMixin, ) from xarray.core.common import ImplementsArrayReduce, ImplementsDatasetReduce from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.namedarray.utils import is_duck_array class SupportsArithmetic: """Base class for xarray types that support arithmetic. Used by Dataset, DataArray, Variable and GroupBy. """ __slots__ = () # TODO: implement special methods for arithmetic here rather than injecting # them in xarray/computation/ops.py. Ideally, do so by inheriting from # numpy.lib.mixins.NDArrayOperatorsMixin. # TODO: allow extending this with some sort of registration system _HANDLED_TYPES = ( np.generic, numbers.Number, bytes, str, ) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): from xarray.computation.apply_ufunc import apply_ufunc # See the docstring example for numpy.lib.mixins.NDArrayOperatorsMixin. out = kwargs.get("out", ()) for x in inputs + out: if not is_duck_array(x) and not isinstance( x, self._HANDLED_TYPES + (SupportsArithmetic,) ): return NotImplemented if ufunc.signature is not None: raise NotImplementedError( f"{ufunc} not supported: xarray objects do not directly implement " "generalized ufuncs. Instead, use xarray.apply_ufunc or " "explicitly convert to xarray objects to NumPy arrays " "(e.g., with `.values`)." ) if method != "__call__": # TODO: support other methods, e.g., reduce and accumulate. raise NotImplementedError( f"{method} method for ufunc {ufunc} is not implemented on xarray objects, " "which currently only support the __call__ method. As an " "alternative, consider explicitly converting xarray objects " "to NumPy arrays (e.g., with `.values`)." ) if any(isinstance(o, SupportsArithmetic) for o in out): # TODO: implement this with logic like _inplace_binary_op. This # will be necessary to use NDArrayOperatorsMixin. raise NotImplementedError( "xarray objects are not yet supported in the `out` argument " "for ufuncs. As an alternative, consider explicitly " "converting xarray objects to NumPy arrays (e.g., with " "`.values`)." ) join = dataset_join = OPTIONS["arithmetic_join"] return apply_ufunc( ufunc, *inputs, input_core_dims=((),) * ufunc.nin, output_core_dims=((),) * ufunc.nout, join=join, dataset_join=dataset_join, dataset_fill_value=np.nan, kwargs=kwargs, dask="allowed", keep_attrs=_get_keep_attrs(default=True), ) class VariableArithmetic( ImplementsArrayReduce, IncludeNumpySameMethods, SupportsArithmetic, VariableOpsMixin, ): __slots__ = () # prioritize our operations over those of numpy.ndarray (priority=0) __array_priority__ = 50 class DatasetArithmetic( ImplementsDatasetReduce, SupportsArithmetic, DatasetOpsMixin, ): __slots__ = () __array_priority__ = 50 class DataArrayArithmetic( ImplementsArrayReduce, IncludeNumpySameMethods, SupportsArithmetic, DataArrayOpsMixin, ): __slots__ = () # priority must be higher than Variable to properly work with binary ufuncs __array_priority__ = 60 class DataArrayGroupbyArithmetic( SupportsArithmetic, DataArrayGroupByOpsMixin, ): __slots__ = () class DatasetGroupbyArithmetic( SupportsArithmetic, DatasetGroupByOpsMixin, ): __slots__ = () class CoarsenArithmetic(IncludeReduceMethods): __slots__ = () ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/computation/computation.py��������������������������������������������������0000664�0000000�0000000�00000075111�15114646760�0022134�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" Functions for applying functions that act on arrays to xarray's labeled data. NOTE: This module is currently large and contains various computational functionality. The long-term plan is to break it down into more focused submodules. """ from __future__ import annotations import functools from collections import Counter from collections.abc import ( Callable, Hashable, ) from typing import TYPE_CHECKING, Any, Literal, cast, overload import numpy as np from xarray.compat.array_api_compat import to_like_array from xarray.core import dtypes, duck_array_ops, utils from xarray.core.common import zeros_like from xarray.core.duck_array_ops import datetime_to_numeric from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import Dims, T_DataArray from xarray.core.utils import ( is_scalar, parse_dims_as_set, ) from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array from xarray.structure.alignment import align from xarray.util.deprecation_helpers import deprecate_dims if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset MissingCoreDimOptions = Literal["raise", "copy", "drop"] _NO_FILL_VALUE = utils.ReprObject("") _JOINS_WITHOUT_FILL_VALUES = frozenset({"inner", "exact"}) def cov( da_a: T_DataArray, da_b: T_DataArray, dim: Dims = None, ddof: int = 1, weights: T_DataArray | None = None, ) -> T_DataArray: """ Compute covariance between two DataArray objects along a shared dimension. Parameters ---------- da_a : DataArray Array to compute. da_b : DataArray Array to compute. dim : str, iterable of hashable, "..." or None, optional The dimension along which the covariance will be computed ddof : int, default: 1 If ddof=1, covariance is normalized by N-1, giving an unbiased estimate, else normalization is by N. weights : DataArray, optional Array of weights. Returns ------- covariance : DataArray See Also -------- pandas.Series.cov : corresponding pandas function xarray.corr : respective function to calculate correlation Examples -------- >>> from xarray import DataArray >>> da_a = DataArray( ... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)), ... ], ... ) >>> da_a Size: 72B array([[1. , 2. , 3. ], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]) Coordinates: * space (space) >> da_b = DataArray( ... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)), ... ], ... ) >>> da_b Size: 72B array([[ 0.2, 0.4, 0.6], [15. , 10. , 5. ], [ 3.2, 0.6, 1.8]]) Coordinates: * space (space) >> xr.cov(da_a, da_b) Size: 8B array(-3.53055556) >>> xr.cov(da_a, da_b, dim="time") Size: 24B array([ 0.2 , -0.5 , 1.69333333]) Coordinates: * space (space) >> weights = DataArray( ... [4, 2, 1], ... dims=("space"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ], ... ) >>> weights Size: 24B array([4, 2, 1]) Coordinates: * space (space) >> xr.cov(da_a, da_b, dim="space", weights=weights) Size: 24B array([-4.69346939, -4.49632653, -3.37959184]) Coordinates: * time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03 """ from xarray.core.dataarray import DataArray if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]): raise TypeError( "Only xr.DataArray is supported." f"Given {[type(arr) for arr in [da_a, da_b]]}." ) if weights is not None and not isinstance(weights, DataArray): raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.") return _cov_corr(da_a, da_b, weights=weights, dim=dim, ddof=ddof, method="cov") def corr( da_a: T_DataArray, da_b: T_DataArray, dim: Dims = None, weights: T_DataArray | None = None, ) -> T_DataArray: """ Compute the Pearson correlation coefficient between two DataArray objects along a shared dimension. Parameters ---------- da_a : DataArray Array to compute. da_b : DataArray Array to compute. dim : str, iterable of hashable, "..." or None, optional The dimension along which the correlation will be computed weights : DataArray, optional Array of weights. Returns ------- correlation: DataArray See Also -------- pandas.Series.corr : corresponding pandas function xarray.cov : underlying covariance function Examples -------- >>> from xarray import DataArray >>> da_a = DataArray( ... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)), ... ], ... ) >>> da_a Size: 72B array([[1. , 2. , 3. ], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]) Coordinates: * space (space) >> da_b = DataArray( ... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)), ... ], ... ) >>> da_b Size: 72B array([[ 0.2, 0.4, 0.6], [15. , 10. , 5. ], [ 3.2, 0.6, 1.8]]) Coordinates: * space (space) >> xr.corr(da_a, da_b) Size: 8B array(-0.57087777) >>> xr.corr(da_a, da_b, dim="time") Size: 24B array([ 1., -1., 1.]) Coordinates: * space (space) >> weights = DataArray( ... [4, 2, 1], ... dims=("space"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ], ... ) >>> weights Size: 24B array([4, 2, 1]) Coordinates: * space (space) >> xr.corr(da_a, da_b, dim="space", weights=weights) Size: 24B array([-0.50240504, -0.83215028, -0.99057446]) Coordinates: * time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03 """ from xarray.core.dataarray import DataArray if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]): raise TypeError( "Only xr.DataArray is supported." f"Given {[type(arr) for arr in [da_a, da_b]]}." ) if weights is not None and not isinstance(weights, DataArray): raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.") return _cov_corr(da_a, da_b, weights=weights, dim=dim, method="corr") def _cov_corr( da_a: T_DataArray, da_b: T_DataArray, weights: T_DataArray | None = None, dim: Dims = None, ddof: int = 0, method: Literal["cov", "corr"] | None = None, ) -> T_DataArray: """ Internal method for xr.cov() and xr.corr() so only have to sanitize the input arrays once and we don't repeat code. """ # 1. Broadcast the two arrays da_a, da_b = align(da_a, da_b, join="inner", copy=False) # 2. Ignore the nans valid_values = da_a.notnull() & da_b.notnull() da_a = da_a.where(valid_values) da_b = da_b.where(valid_values) # 3. Detrend along the given dim if weights is not None: demeaned_da_a = da_a - da_a.weighted(weights).mean(dim=dim) demeaned_da_b = da_b - da_b.weighted(weights).mean(dim=dim) else: demeaned_da_a = da_a - da_a.mean(dim=dim) demeaned_da_b = da_b - da_b.mean(dim=dim) # 4. Compute covariance along the given dim # N.B. `skipna=True` is required or auto-covariance is computed incorrectly. E.g. # Try xr.cov(da,da) for da = xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"]) if weights is not None: cov = ( (demeaned_da_a.conj() * demeaned_da_b) .weighted(weights) .mean(dim=dim, skipna=True) ) else: cov = (demeaned_da_a.conj() * demeaned_da_b).mean(dim=dim, skipna=True) if method == "cov": # Adjust covariance for degrees of freedom valid_count = valid_values.sum(dim) adjust = valid_count / (valid_count - ddof) # I think the cast is required because of `T_DataArray` + `T_Xarray` (would be # the same with `T_DatasetOrArray`) # https://github.com/pydata/xarray/pull/8384#issuecomment-1784228026 return cast(T_DataArray, cov * adjust) else: # Compute std and corr if weights is not None: da_a_std = da_a.weighted(weights).std(dim=dim) da_b_std = da_b.weighted(weights).std(dim=dim) else: da_a_std = da_a.std(dim=dim) da_b_std = da_b.std(dim=dim) corr = cov / (da_a_std * da_b_std) return cast(T_DataArray, corr) def cross( a: DataArray | Variable, b: DataArray | Variable, *, dim: Hashable ) -> DataArray | Variable: """ Compute the cross product of two (arrays of) vectors. The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both `a` and `b`. The vectors in `a` and `b` are defined by the values along the dimension `dim` and can have sizes 1, 2 or 3. Where the size of either `a` or `b` is 1 or 2, the remaining components of the input vector is assumed to be zero and the cross product calculated accordingly. In cases where both input vectors have dimension 2, the z-component of the cross product is returned. Parameters ---------- a, b : DataArray or Variable Components of the first and second vector(s). dim : hashable The dimension along which the cross product will be computed. Must be available in both vectors. Examples -------- Vector cross-product with 3 dimensions: >>> a = xr.DataArray([1, 2, 3]) >>> b = xr.DataArray([4, 5, 6]) >>> xr.cross(a, b, dim="dim_0") Size: 24B array([-3, 6, -3]) Dimensions without coordinates: dim_0 Vector cross-product with 3 dimensions but zeros at the last axis yields the same results as with 2 dimensions: >>> a = xr.DataArray([1, 2, 0]) >>> b = xr.DataArray([4, 5, 0]) >>> xr.cross(a, b, dim="dim_0") Size: 24B array([ 0, 0, -3]) Dimensions without coordinates: dim_0 Multiple vector cross-products. Note that the direction of the cross product vector is defined by the right-hand rule: >>> a = xr.DataArray( ... [[1, 2, 3], [4, 5, 6]], ... dims=("time", "cartesian"), ... coords=dict( ... time=(["time"], [0, 1]), ... cartesian=(["cartesian"], ["x", "y", "z"]), ... ), ... ) >>> b = xr.DataArray( ... [[4, 5, 6], [1, 2, 3]], ... dims=("time", "cartesian"), ... coords=dict( ... time=(["time"], [0, 1]), ... cartesian=(["cartesian"], ["x", "y", "z"]), ... ), ... ) >>> xr.cross(a, b, dim="cartesian") Size: 48B array([[-3, 6, -3], [ 3, -6, 3]]) Coordinates: * time (time) int64 16B 0 1 * cartesian (cartesian) >> ds_a = xr.Dataset(dict(x=("dim_0", [1]), y=("dim_0", [2]), z=("dim_0", [3]))) >>> ds_b = xr.Dataset(dict(x=("dim_0", [4]), y=("dim_0", [5]), z=("dim_0", [6]))) >>> c = xr.cross( ... ds_a.to_dataarray("cartesian"), ... ds_b.to_dataarray("cartesian"), ... dim="cartesian", ... ) >>> c.to_dataset(dim="cartesian") Size: 24B Dimensions: (dim_0: 1) Dimensions without coordinates: dim_0 Data variables: x (dim_0) int64 8B -3 y (dim_0) int64 8B 6 z (dim_0) int64 8B -3 See Also -------- numpy.cross : Corresponding numpy function """ if dim not in a.dims: raise ValueError(f"Dimension {dim!r} not on a") elif dim not in b.dims: raise ValueError(f"Dimension {dim!r} not on b") if not 1 <= a.sizes[dim] <= 3: raise ValueError( f"The size of {dim!r} on a must be 1, 2, or 3 to be " f"compatible with a cross product but is {a.sizes[dim]}" ) elif not 1 <= b.sizes[dim] <= 3: raise ValueError( f"The size of {dim!r} on b must be 1, 2, or 3 to be " f"compatible with a cross product but is {b.sizes[dim]}" ) all_dims = list(dict.fromkeys(a.dims + b.dims)) if a.sizes[dim] != b.sizes[dim]: # Arrays have different sizes. Append zeros where the smaller # array is missing a value, zeros will not affect np.cross: if ( not isinstance(a, Variable) # Only used to make mypy happy. and dim in getattr(a, "coords", {}) and not isinstance(b, Variable) # Only used to make mypy happy. and dim in getattr(b, "coords", {}) ): # If the arrays have coords we know which indexes to fill # with zeros: a, b = align( a, b, fill_value=0, join="outer", exclude=set(all_dims) - {dim}, ) elif min(a.sizes[dim], b.sizes[dim]) == 2: # If the array doesn't have coords we can only infer # that it has composite values if the size is at least 2. # Once padded, rechunk the padded array because apply_ufunc # requires core dimensions not to be chunked: if a.sizes[dim] < b.sizes[dim]: a = a.pad({dim: (0, 1)}, constant_values=0) # TODO: Should pad or apply_ufunc handle correct chunking? a = a.chunk({dim: -1}) if is_chunked_array(a.data) else a else: b = b.pad({dim: (0, 1)}, constant_values=0) # TODO: Should pad or apply_ufunc handle correct chunking? b = b.chunk({dim: -1}) if is_chunked_array(b.data) else b else: raise ValueError( f"{dim!r} on {'a' if a.sizes[dim] == 1 else 'b'} is incompatible:" " dimensions without coordinates must have have a length of 2 or 3" ) from xarray.computation.apply_ufunc import apply_ufunc c = apply_ufunc( duck_array_ops.cross, a, b, input_core_dims=[[dim], [dim]], output_core_dims=[[dim] if a.sizes[dim] == 3 else []], dask="parallelized", output_dtypes=[np.result_type(a, b)], ) c = c.transpose(*all_dims, missing_dims="ignore") return c @deprecate_dims def dot( *arrays, dim: Dims = None, **kwargs: Any, ): """Generalized dot product for xarray objects. Like ``np.einsum``, but provides a simpler interface based on array dimension names. Parameters ---------- *arrays : DataArray or Variable Arrays to compute. dim : str, iterable of hashable, "..." or None, optional Which dimensions to sum over. Ellipsis ('...') sums over all dimensions. If not specified, then all the common dimensions are summed over. **kwargs : dict Additional keyword arguments passed to ``numpy.einsum`` or ``dask.array.einsum`` Returns ------- DataArray See Also -------- numpy.einsum dask.array.einsum opt_einsum.contract Notes ----- We recommend installing the optional ``opt_einsum`` package, or alternatively passing ``optimize=True``, which is passed through to ``np.einsum``, and works for most array backends. Examples -------- >>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=["a", "b"]) >>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2), dims=["a", "b", "c"]) >>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=["c", "d"]) >>> da_a Size: 48B array([[0, 1], [2, 3], [4, 5]]) Dimensions without coordinates: a, b >>> da_b Size: 96B array([[[ 0, 1], [ 2, 3]], [[ 4, 5], [ 6, 7]], [[ 8, 9], [10, 11]]]) Dimensions without coordinates: a, b, c >>> da_c Size: 48B array([[0, 1, 2], [3, 4, 5]]) Dimensions without coordinates: c, d >>> xr.dot(da_a, da_b, dim=["a", "b"]) Size: 16B array([110, 125]) Dimensions without coordinates: c >>> xr.dot(da_a, da_b, dim=["a"]) Size: 32B array([[40, 46], [70, 79]]) Dimensions without coordinates: b, c >>> xr.dot(da_a, da_b, da_c, dim=["b", "c"]) Size: 72B array([[ 9, 14, 19], [ 93, 150, 207], [273, 446, 619]]) Dimensions without coordinates: a, d >>> xr.dot(da_a, da_b) Size: 16B array([110, 125]) Dimensions without coordinates: c >>> xr.dot(da_a, da_b, dim=...) Size: 8B array(235) """ from xarray.core.dataarray import DataArray if any(not isinstance(arr, Variable | DataArray) for arr in arrays): raise TypeError( "Only xr.DataArray and xr.Variable are supported." f"Given {[type(arr) for arr in arrays]}." ) if len(arrays) == 0: raise TypeError("At least one array should be given.") common_dims: set[Hashable] = set.intersection(*(set(arr.dims) for arr in arrays)) all_dims = [] for arr in arrays: all_dims += [d for d in arr.dims if d not in all_dims] einsum_axes = "abcdefghijklmnopqrstuvwxyz" dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)} dot_dims: set[Hashable] if dim is None: # find dimensions that occur more than once dim_counts: Counter = Counter() for arr in arrays: dim_counts.update(arr.dims) dot_dims = {d for d, c in dim_counts.items() if c > 1} else: dot_dims = parse_dims_as_set(dim, all_dims=set(all_dims)) # dimensions to be parallelized broadcast_dims = common_dims - dot_dims input_core_dims = [ [d for d in arr.dims if d not in broadcast_dims] for arr in arrays ] output_core_dims = [ [d for d in all_dims if d not in dot_dims and d not in broadcast_dims] ] # construct einsum subscripts, such as '...abc,...ab->...c' # Note: input_core_dims are always moved to the last position subscripts_list = [ "..." + "".join(dim_map[d] for d in ds) for ds in input_core_dims ] subscripts = ",".join(subscripts_list) subscripts += "->..." + "".join(dim_map[d] for d in output_core_dims[0]) join = OPTIONS["arithmetic_join"] # using "inner" emulates `(a * b).sum()` for all joins (except "exact") if join != "exact": join = "inner" # subscripts should be passed to np.einsum as arg, not as kwargs. We need # to construct a partial function for apply_ufunc to work. func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs) from xarray.computation.apply_ufunc import apply_ufunc result = apply_ufunc( func, *arrays, input_core_dims=input_core_dims, output_core_dims=output_core_dims, join=join, dask="allowed", ) return result.transpose(*all_dims, missing_dims="ignore") def where(cond, x, y, keep_attrs=None): """Return elements from `x` or `y` depending on `cond`. Performs xarray-like broadcasting across input arguments. All dimension coordinates on `x` and `y` must be aligned with each other and with `cond`. Parameters ---------- cond : scalar, array, Variable, DataArray or Dataset When True, return values from `x`, otherwise returns values from `y`. x : scalar, array, Variable, DataArray or Dataset values to choose from where `cond` is True y : scalar, array, Variable, DataArray or Dataset values to choose from where `cond` is False keep_attrs : bool or str or callable, optional How to treat attrs. If True, keep the attrs of `x`. Returns ------- Dataset, DataArray, Variable or array In priority order: Dataset, DataArray, Variable or array, whichever type appears as an input argument. Examples -------- >>> x = xr.DataArray( ... 0.1 * np.arange(10), ... dims=["lat"], ... coords={"lat": np.arange(10)}, ... name="sst", ... ) >>> x Size: 80B array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) Coordinates: * lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9 >>> xr.where(x < 0.5, x, x * 100) Size: 80B array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ]) Coordinates: * lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9 >>> y = xr.DataArray( ... 0.1 * np.arange(9).reshape(3, 3), ... dims=["lat", "lon"], ... coords={"lat": np.arange(3), "lon": 10 + np.arange(3)}, ... name="sst", ... ) >>> y Size: 72B array([[0. , 0.1, 0.2], [0.3, 0.4, 0.5], [0.6, 0.7, 0.8]]) Coordinates: * lat (lat) int64 24B 0 1 2 * lon (lon) int64 24B 10 11 12 >>> xr.where(y.lat < 1, y, -1) Size: 72B array([[ 0. , 0.1, 0.2], [-1. , -1. , -1. ], [-1. , -1. , -1. ]]) Coordinates: * lat (lat) int64 24B 0 1 2 * lon (lon) int64 24B 10 11 12 >>> cond = xr.DataArray([True, False], dims=["x"]) >>> x = xr.DataArray([1, 2], dims=["y"]) >>> xr.where(cond, x, 0) Size: 32B array([[1, 2], [0, 0]]) Dimensions without coordinates: x, y See Also -------- numpy.where : corresponding numpy function Dataset.where, DataArray.where : equivalent methods """ from xarray.core.dataset import Dataset if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) # alignment for three arguments is complicated, so don't support it yet from xarray.computation.apply_ufunc import apply_ufunc result = apply_ufunc( duck_array_ops.where, cond, x, y, join="exact", dataset_join="exact", dask="allowed", keep_attrs=keep_attrs, ) # keep the attributes of x, the second parameter, by default to # be consistent with the `where` method of `DataArray` and `Dataset` # rebuild the attrs from x at each level of the output, which could be # Dataset, DataArray, or Variable, and also handle coords if keep_attrs is True and hasattr(result, "attrs"): if isinstance(y, Dataset) and not isinstance(x, Dataset): # handle special case where x gets promoted to Dataset result.attrs = {} if getattr(x, "name", None) in result.data_vars: result[x.name].attrs = getattr(x, "attrs", {}) else: # otherwise, fill in global attrs and variable attrs (if they exist) result.attrs = getattr(x, "attrs", {}) for v in getattr(result, "data_vars", []): result[v].attrs = getattr(getattr(x, v, None), "attrs", {}) for c in getattr(result, "coords", []): # always fill coord attrs of x result[c].attrs = getattr(getattr(x, c, None), "attrs", {}) return result @overload def polyval( coord: DataArray, coeffs: DataArray, degree_dim: Hashable = "degree" ) -> DataArray: ... @overload def polyval( coord: DataArray, coeffs: Dataset, degree_dim: Hashable = "degree" ) -> Dataset: ... @overload def polyval( coord: Dataset, coeffs: DataArray, degree_dim: Hashable = "degree" ) -> Dataset: ... @overload def polyval( coord: Dataset, coeffs: Dataset, degree_dim: Hashable = "degree" ) -> Dataset: ... @overload def polyval( coord: Dataset | DataArray, coeffs: Dataset | DataArray, degree_dim: Hashable = "degree", ) -> Dataset | DataArray: ... def polyval( coord: Dataset | DataArray, coeffs: Dataset | DataArray, degree_dim: Hashable = "degree", ) -> Dataset | DataArray: """Evaluate a polynomial at specific values Parameters ---------- coord : DataArray or Dataset Values at which to evaluate the polynomial. coeffs : DataArray or Dataset Coefficients of the polynomial. degree_dim : Hashable, default: "degree" Name of the polynomial degree dimension in `coeffs`. Returns ------- DataArray or Dataset Evaluated polynomial. See Also -------- xarray.DataArray.polyfit numpy.polynomial.polynomial.polyval """ if degree_dim not in coeffs._indexes: raise ValueError( f"Dimension `{degree_dim}` should be a coordinate variable with labels." ) if not np.issubdtype(coeffs[degree_dim].dtype, np.integer): raise ValueError( f"Dimension `{degree_dim}` should be of integer dtype. Received {coeffs[degree_dim].dtype} instead." ) max_deg = coeffs[degree_dim].max().item() coeffs = coeffs.reindex( {degree_dim: np.arange(max_deg + 1)}, fill_value=0, copy=False ) coord = _ensure_numeric(coord) # using Horner's method # https://en.wikipedia.org/wiki/Horner%27s_method res = zeros_like(coord) + coeffs.isel({degree_dim: max_deg}, drop=True) for deg in range(max_deg - 1, -1, -1): res *= coord res += coeffs.isel({degree_dim: deg}, drop=True) return res def _ensure_numeric(data: Dataset | DataArray) -> Dataset | DataArray: """Converts all datetime64 variables to float64 Parameters ---------- data : DataArray or Dataset Variables with possible datetime dtypes. Returns ------- DataArray or Dataset Variables with datetime64 dtypes converted to float64. """ from xarray.core.dataset import Dataset def _cfoffset(x: DataArray) -> Any: scalar = x.compute().data[0] if not is_scalar(scalar): # we do not get a scalar back on dask == 2021.04.1 scalar = scalar.item() return type(scalar)(1970, 1, 1) def to_floatable(x: DataArray) -> DataArray: if x.dtype.kind in "MO": # datetimes (CFIndexes are object type) offset = ( np.datetime64("1970-01-01") if x.dtype.kind == "M" else _cfoffset(x) ) return x.copy( data=datetime_to_numeric(x.data, offset=offset, datetime_unit="ns"), ) elif x.dtype.kind == "m": # timedeltas return duck_array_ops.astype(x, dtype=float) return x if isinstance(data, Dataset): return data.map(to_floatable) else: return to_floatable(data) def _calc_idxminmax( *, array, func: Callable, dim: Hashable | None = None, skipna: bool | None = None, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, ): """Apply common operations for idxmin and idxmax.""" # This function doesn't make sense for scalars so don't try if not array.ndim: raise ValueError("This function does not apply for scalars") if dim is not None: pass # Use the dim if available elif array.ndim == 1: # it is okay to guess the dim if there is only 1 dim = array.dims[0] else: # The dim is not specified and ambiguous. Don't guess. raise ValueError("Must supply 'dim' argument for multidimensional arrays") if dim not in array.dims: raise KeyError( f"Dimension {dim!r} not found in array dimensions {array.dims!r}" ) if dim not in array.coords: raise KeyError( f"Dimension {dim!r} is not one of the coordinates {tuple(array.coords.keys())}" ) # These are dtypes with NaN values argmin and argmax can handle na_dtypes = "cfO" if skipna or (skipna is None and array.dtype.kind in na_dtypes): # Need to skip NaN values since argmin and argmax can't handle them allna = array.isnull().all(dim) array = array.where(~allna, 0) # This will run argmin or argmax. index = func(array, dim=dim, axis=None, keep_attrs=keep_attrs, skipna=skipna) # Handle chunked arrays (e.g. dask). coord = array[dim]._variable.to_base_variable() if is_chunked_array(array.data): chunkmanager = get_chunked_array_type(array.data) coord_array = chunkmanager.from_array( array[dim].data, chunks=((array.sizes[dim],),) ) coord = coord.copy(data=coord_array) else: coord = coord.copy(data=to_like_array(array[dim].data, array.data)) res = index._replace(coord[(index.variable,)]).rename(dim) if skipna or (skipna is None and array.dtype.kind in na_dtypes): # Put the NaN values back in after removing them res = res.where(~allna, fill_value) # Copy attributes from argmin/argmax, if any res.attrs = index.attrs return res �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/computation/fit.py����������������������������������������������������������0000664�0000000�0000000�00000046672�15114646760�0020366�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Fitting operations for DataArrays and Datasets.""" from __future__ import annotations import inspect import warnings from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from inspect import Parameter from types import MappingProxyType from typing import ( Any, Literal, Union, ) import numpy as np # remove once numpy 2.0 is the oldest supported version try: from numpy.exceptions import RankWarning except ImportError: from numpy import RankWarning # type: ignore[no-redef,attr-defined,unused-ignore] from xarray.computation.apply_ufunc import apply_ufunc from xarray.computation.computation import _ensure_numeric, where from xarray.core.dataarray import DataArray from xarray.core.duck_array_ops import is_duck_dask_array, least_squares from xarray.core.types import Dims, ErrorOptions from xarray.core.variable import Variable from xarray.structure.alignment import broadcast def _get_func_args(func, param_names): """Use `inspect.signature` to try accessing `func` args. Otherwise, ensure they are provided by user. """ func_args: Union[dict[str, Parameter], MappingProxyType[str, Parameter]] try: func_args = inspect.signature(func).parameters except ValueError as err: func_args = {} # type: ignore[assignment,unused-ignore] if not param_names: raise ValueError( "Unable to inspect `func` signature, and `param_names` was not provided." ) from err if param_names: params = param_names else: params = list(func_args)[1:] if any( (p.kind in [p.VAR_POSITIONAL, p.VAR_KEYWORD]) for p in func_args.values() ): raise ValueError( "`param_names` must be provided because `func` takes variable length arguments." ) return params, func_args def _initialize_curvefit_params(params, p0, bounds, func_args): """Set initial guess and bounds for curvefit. Priority: 1) passed args 2) func signature 3) scipy defaults """ def _initialize_feasible(lb, ub): # Mimics functionality of scipy.optimize.minpack._initialize_feasible lb_finite = np.isfinite(lb) ub_finite = np.isfinite(ub) p0 = where( lb_finite, where( ub_finite, 0.5 * (lb + ub), # both bounds finite lb + 1, # lower bound finite, upper infinite ), where( ub_finite, ub - 1, # lower bound infinite, upper finite 0, # both bounds infinite ), ) return p0 param_defaults = dict.fromkeys(params, 1) bounds_defaults = dict.fromkeys(params, (-np.inf, np.inf)) for p in params: if p in func_args and func_args[p].default is not func_args[p].empty: param_defaults[p] = func_args[p].default if p in bounds: lb, ub = bounds[p] bounds_defaults[p] = (lb, ub) param_defaults[p] = where( (param_defaults[p] < lb) | (param_defaults[p] > ub), _initialize_feasible(lb, ub), param_defaults[p], ) if p in p0: param_defaults[p] = p0[p] return param_defaults, bounds_defaults def polyfit( obj, dim: Hashable, deg: int, skipna: bool | None = None, rcond: np.floating[Any] | float | None = None, w: Hashable | Any = None, full: bool = False, cov: bool | Literal["unscaled"] = False, ): """ Least squares polynomial fit. This replicates the behaviour of `numpy.polyfit` but differs by skipping invalid values when `skipna = True`. Parameters ---------- obj : Dataset or DataArray Object to perform the polyfit on dim : hashable Coordinate along which to fit the polynomials. deg : int Degree of the fitting polynomial. skipna : bool or None, optional If True, removes all invalid values before fitting each 1D slices of the array. Default is True if data is stored in a dask.array or if there is any invalid values, False otherwise. rcond : float or None, optional Relative condition number to the fit. w : hashable or Any, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, default: False Whether to return the residuals, matrix rank and singular values in addition to the coefficients. cov : bool or "unscaled", default: False Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. Returns ------- Dataset A single dataset which contains (for each "var" in the input dataset): [var]_polyfit_coefficients The coefficients of the best fit for each variable in this dataset. [var]_polyfit_residuals The residuals of the least-square computation for each variable (only included if `full=True`) When the matrix rank is deficient, np.nan is returned. [dim]_matrix_rank The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`) The rank is computed ignoring the NaN values that might be skipped. [dim]_singular_values The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`) [var]_polyfit_covariance The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`) Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is not raised with in-memory (not dask) data and `full=True`. See Also -------- numpy.polyfit numpy.polyval xarray.polyval """ variables: dict[Hashable, Variable] = {} skipna_da = skipna x = np.asarray(_ensure_numeric(obj.coords[dim]).astype(np.float64)) xname = f"{obj[dim].name}_" order = int(deg) + 1 degree_coord_values = np.arange(order)[::-1] lhs = np.vander(x, order) if rcond is None: rcond = x.shape[0] * np.finfo(x.dtype).eps # Weights: if w is not None: if isinstance(w, Hashable): w = obj.coords[w] w = np.asarray(w) if w.ndim != 1: raise TypeError("Expected a 1-d array for weights.") if w.shape[0] != lhs.shape[0]: raise TypeError(f"Expected w and {dim} to have the same length") lhs *= w[:, np.newaxis] # Scaling scale = np.sqrt((lhs * lhs).sum(axis=0)) lhs /= scale from xarray.core import utils degree_dim = utils.get_temp_dimname(obj.dims, "degree") rank = np.linalg.matrix_rank(lhs) if full: rank = Variable(dims=(), data=rank) variables[xname + "matrix_rank"] = rank _sing = np.linalg.svd(lhs, compute_uv=False) variables[xname + "singular_values"] = Variable( dims=(degree_dim,), data=np.concatenate([np.full((order - rank.data,), np.nan), _sing]), ) # If we have a coordinate get its underlying dimension. (true_dim,) = obj.coords[dim].dims other_coords = { dim: obj._variables[dim] for dim in set(obj.dims) - {true_dim} if dim in obj._variables } present_dims: set[Hashable] = set() for name, var in obj._variables.items(): if name in obj._coord_names or name in obj.dims: continue if true_dim not in var.dims: continue if is_duck_dask_array(var._data) and (rank != order or full or skipna is None): # Current algorithm with dask and skipna=False neither supports # deficient ranks nor does it output the "full" info (issue dask/dask#6516) skipna_da = True elif skipna is None: skipna_da = bool(np.any(var.isnull())) if var.ndim > 1: rhs = var.transpose(true_dim, ...) other_dims = rhs.dims[1:] scale_da = scale.reshape(-1, *((1,) * len(other_dims))) else: rhs = var scale_da = scale other_dims = () present_dims.update(other_dims) if w is not None: rhs = rhs * w.reshape(-1, *((1,) * len(other_dims))) with warnings.catch_warnings(): if full: # Copy np.polyfit behavior warnings.simplefilter("ignore", RankWarning) else: # Raise only once per variable warnings.simplefilter("once", RankWarning) coeffs, residuals = least_squares( lhs, rhs.data, rcond=rcond, skipna=skipna_da ) from xarray.core.dataarray import _THIS_ARRAY if name is _THIS_ARRAY: # When polyfit is called on a DataArray, ensure the resulting # dataset is backwards compatible with previous behavior name = "" elif isinstance(name, str): name = f"{name}_" else: # For other non-string names name = "" variables[name + "polyfit_coefficients"] = Variable( data=coeffs / scale_da, dims=(degree_dim,) + other_dims ) if full or (cov is True): variables[name + "polyfit_residuals"] = Variable( data=residuals if var.ndim > 1 else residuals.squeeze(), dims=other_dims, ) fac: Variable | int if cov: Vbase = np.linalg.inv(np.dot(lhs.T, lhs)) Vbase /= np.outer(scale, scale) if cov == "unscaled": fac = 1 else: if x.shape[0] <= order: raise ValueError( "The number of data points must exceed order to scale the covariance matrix." ) fac = variables[name + "polyfit_residuals"] / (x.shape[0] - order) variables[name + "polyfit_covariance"] = ( Variable(data=Vbase, dims=("cov_i", "cov_j")) * fac ) return type(obj)( data_vars=variables, coords={ degree_dim: degree_coord_values, **{ name: coord for name, coord in other_coords.items() if name in present_dims }, }, attrs=obj.attrs.copy(), ) def curvefit( obj, coords: str | DataArray | Iterable[str | DataArray], func: Callable[..., Any], reduce_dims: Dims = None, skipna: bool = True, p0: Mapping[str, float | DataArray] | None = None, bounds: Mapping[str, tuple[float | DataArray, float | DataArray]] | None = None, param_names: Sequence[str] | None = None, errors: ErrorOptions = "raise", kwargs: dict[str, Any] | None = None, ): """ Curve fitting optimization for arbitrary functions. Wraps `scipy.optimize.curve_fit` with `apply_ufunc`. Parameters ---------- obj : Dataset or DataArray Object to perform the curvefit on coords : hashable, DataArray, or sequence of hashable or DataArray Independent coordinate(s) over which to perform the curve fitting. Must share at least one dimension with the calling object. When fitting multi-dimensional functions, supply `coords` as a sequence in the same order as arguments in `func`. To fit along existing dimensions of the calling object, `coords` can also be specified as a str or sequence of strs. func : callable User specified function in the form `f(x, *params)` which returns a numpy array of length `len(x)`. `params` are the fittable parameters which are optimized by scipy curve_fit. `x` can also be specified as a sequence containing multiple coordinates, e.g. `f((x0, x1), *params)`. reduce_dims : str, Iterable of Hashable or None, optional Additional dimension(s) over which to aggregate while fitting. For example, calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will aggregate all lat and lon points and fit the specified function along the time dimension. skipna : bool, default: True Whether to skip missing values when fitting. Default is True. p0 : dict-like, optional Optional dictionary of parameter names to initial guesses passed to the `curve_fit` `p0` arg. If the values are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be assigned initial values following the default scipy behavior. bounds : dict-like, optional Optional dictionary of parameter names to tuples of bounding values passed to the `curve_fit` `bounds` arg. If any of the bounds are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be unbounded following the default scipy behavior. param_names : sequence of hashable, optional Sequence of names for the fittable parameters of `func`. If not supplied, this will be automatically determined by arguments of `func`. `param_names` should be manually supplied when fitting a function that takes a variable number of parameters. errors : {"raise", "ignore"}, default: "raise" If 'raise', any errors from the `scipy.optimize_curve_fit` optimization will raise an exception. If 'ignore', the coefficients and covariances for the coordinates where the fitting failed will be NaN. kwargs : optional Additional keyword arguments to passed to scipy curve_fit. Returns ------- Dataset A single dataset which contains: [var]_curvefit_coefficients The coefficients of the best fit. [var]_curvefit_covariance The covariance matrix of the coefficient estimates. See Also -------- Dataset.polyfit scipy.optimize.curve_fit """ from scipy.optimize import curve_fit if p0 is None: p0 = {} if bounds is None: bounds = {} if kwargs is None: kwargs = {} reduce_dims_: list[Hashable] if not reduce_dims: reduce_dims_ = [] elif isinstance(reduce_dims, str) or not isinstance(reduce_dims, Iterable): reduce_dims_ = [reduce_dims] else: reduce_dims_ = list(reduce_dims) if isinstance(coords, str | DataArray) or not isinstance(coords, Iterable): coords = [coords] coords_: Sequence[DataArray] = [ obj[coord] if isinstance(coord, str) else coord for coord in coords ] # Determine whether any coords are dims on self for coord in coords_: reduce_dims_ += [c for c in obj.dims if coord.equals(obj[c])] reduce_dims_ = list(set(reduce_dims_)) preserved_dims = list(set(obj.dims) - set(reduce_dims_)) if not reduce_dims_: raise ValueError( "No arguments to `coords` were identified as a dimension on the calling " "object, and no dims were supplied to `reduce_dims`. This would result " "in fitting on scalar data." ) # Check that initial guess and bounds only contain coordinates that are in preserved_dims for param, guess in p0.items(): if isinstance(guess, DataArray): unexpected = set(guess.dims) - set(preserved_dims) if unexpected: raise ValueError( f"Initial guess for '{param}' has unexpected dimensions " f"{tuple(unexpected)}. It should only have dimensions that are in data " f"dimensions {preserved_dims}." ) for param, (lb, ub) in bounds.items(): for label, bound in zip(("Lower", "Upper"), (lb, ub), strict=True): if isinstance(bound, DataArray): unexpected = set(bound.dims) - set(preserved_dims) if unexpected: raise ValueError( f"{label} bound for '{param}' has unexpected dimensions " f"{tuple(unexpected)}. It should only have dimensions that are in data " f"dimensions {preserved_dims}." ) if errors not in ["raise", "ignore"]: raise ValueError('errors must be either "raise" or "ignore"') # Broadcast all coords with each other coords_ = broadcast(*coords_) coords_ = [coord.broadcast_like(obj, exclude=preserved_dims) for coord in coords_] n_coords = len(coords_) params, func_args = _get_func_args(func, param_names) param_defaults, bounds_defaults = _initialize_curvefit_params( params, p0, bounds, func_args ) n_params = len(params) def _wrapper(Y, *args, **kwargs): # Wrap curve_fit with raveled coordinates and pointwise NaN handling # *args contains: # - the coordinates # - initial guess # - lower bounds # - upper bounds coords__ = args[:n_coords] p0_ = args[n_coords + 0 * n_params : n_coords + 1 * n_params] lb = args[n_coords + 1 * n_params : n_coords + 2 * n_params] ub = args[n_coords + 2 * n_params :] x = np.vstack([c.ravel() for c in coords__]) y = Y.ravel() if skipna: mask = np.all([np.any(~np.isnan(x), axis=0), ~np.isnan(y)], axis=0) x = x[:, mask] y = y[mask] if y.size == 0: popt = np.full([n_params], np.nan) pcov = np.full([n_params, n_params], np.nan) return popt, pcov x = np.squeeze(x) try: popt, pcov = curve_fit(func, x, y, p0=p0_, bounds=(lb, ub), **kwargs) except RuntimeError: if errors == "raise": raise popt = np.full([n_params], np.nan) pcov = np.full([n_params, n_params], np.nan) return popt, pcov from xarray.core.dataarray import _THIS_ARRAY result = type(obj)() for name, da in obj.data_vars.items(): if name is _THIS_ARRAY: # When curvefit is called on a DataArray, ensure the resulting # dataset is backwards compatible with previous behavior var_name = "" else: var_name = f"{name}_" input_core_dims = [reduce_dims_ for _ in range(n_coords + 1)] input_core_dims.extend( [[] for _ in range(3 * n_params)] ) # core_dims for p0 and bounds popt, pcov = apply_ufunc( _wrapper, da, *coords_, *param_defaults.values(), *[b[0] for b in bounds_defaults.values()], *[b[1] for b in bounds_defaults.values()], vectorize=True, dask="parallelized", input_core_dims=input_core_dims, output_core_dims=[["param"], ["cov_i", "cov_j"]], dask_gufunc_kwargs={ "output_sizes": { "param": n_params, "cov_i": n_params, "cov_j": n_params, }, }, output_dtypes=(np.float64, np.float64), exclude_dims=set(reduce_dims_), kwargs=kwargs, ) result[var_name + "curvefit_coefficients"] = popt result[var_name + "curvefit_covariance"] = pcov result = result.assign_coords({"param": params, "cov_i": params, "cov_j": params}) result.attrs = obj.attrs.copy() return result ����������������������������������������������������������������������xarray-2025.12.0/xarray/computation/nanops.py�������������������������������������������������������0000664�0000000�0000000�00000012777�15114646760�0021101�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import warnings import numpy as np from xarray.core import dtypes, duck_array_ops, nputils, utils from xarray.core.duck_array_ops import ( astype, count, fillna, isnull, sum_where, where, where_method, ) def _maybe_null_out(result, axis, mask, min_count=1): """ xarray version of pandas.core.nanops._maybe_null_out """ if axis is not None and getattr(result, "ndim", False): null_mask = ( np.take(mask.shape, axis).prod() - duck_array_ops.sum(mask, axis) - min_count ) < 0 dtype, fill_value = dtypes.maybe_promote(result.dtype) result = where(null_mask, fill_value, astype(result, dtype)) elif getattr(result, "dtype", None) not in dtypes.NAT_TYPES: null_mask = mask.size - duck_array_ops.sum(mask) result = where(null_mask < min_count, np.nan, result) return result def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs): """In house nanargmin, nanargmax for object arrays. Always return integer type """ valid_count = count(value, axis=axis) value = fillna(value, fill_value) data = getattr(np, func)(value, axis=axis, **kwargs) # TODO This will evaluate dask arrays and might be costly. if duck_array_ops.array_any(valid_count == 0): raise ValueError("All-NaN slice encountered") return data def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs): """In house nanmin and nanmax for object array""" valid_count = count(value, axis=axis) filled_value = fillna(value, fill_value) data = getattr(np, func)(filled_value, axis=axis, **kwargs) if not hasattr(data, "dtype"): # scalar case data = fill_value if valid_count == 0 else data # we've computed a single min, max value of type object. # don't let np.array turn a tuple back into an array return utils.to_0d_object_array(data) return where_method(data, valid_count != 0) def nanmin(a, axis=None, out=None): if a.dtype.kind == "O": return _nan_minmax_object("min", dtypes.get_pos_infinity(a.dtype), a, axis) return nputils.nanmin(a, axis=axis) def nanmax(a, axis=None, out=None): if a.dtype.kind == "O": return _nan_minmax_object("max", dtypes.get_neg_infinity(a.dtype), a, axis) return nputils.nanmax(a, axis=axis) def nanargmin(a, axis=None): if a.dtype.kind == "O": fill_value = dtypes.get_pos_infinity(a.dtype) return _nan_argminmax_object("argmin", fill_value, a, axis=axis) return nputils.nanargmin(a, axis=axis) def nanargmax(a, axis=None): if a.dtype.kind == "O": fill_value = dtypes.get_neg_infinity(a.dtype) return _nan_argminmax_object("argmax", fill_value, a, axis=axis) return nputils.nanargmax(a, axis=axis) def nansum(a, axis=None, dtype=None, out=None, min_count=None): mask = isnull(a) result = sum_where(a, axis=axis, dtype=dtype, where=mask) if min_count is not None: return _maybe_null_out(result, axis, mask, min_count) else: return result def _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs): """In house nanmean. ddof argument will be used in _nanvar method""" valid_count = count(value, axis=axis) value = fillna(value, 0) # As dtype inference is impossible for object dtype, we assume float # https://github.com/dask/dask/issues/3162 if dtype is None and value.dtype.kind == "O": dtype = float data = np.sum(value, axis=axis, dtype=dtype, **kwargs) data = data / (valid_count - ddof) return where_method(data, valid_count != 0) def nanmean(a, axis=None, dtype=None, out=None): if a.dtype.kind == "O": return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", r"Mean of empty slice", category=RuntimeWarning ) return nputils.nanmean(a, axis=axis, dtype=dtype) def nanmedian(a, axis=None, out=None): # The dask algorithm works by rechunking to one chunk along axis # Make sure we trigger the dask error when passing all dimensions # so that we don't rechunk the entire array to one chunk and # possibly blow memory if axis is not None and len(np.atleast_1d(axis)) == a.ndim: axis = None return nputils.nanmedian(a, axis=axis) def _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs): value_mean = _nanmean_ddof_object( ddof=0, value=value, axis=axis, keepdims=True, **kwargs ) squared = (astype(value, value_mean.dtype) - value_mean) ** 2 return _nanmean_ddof_object(ddof, squared, axis=axis, keepdims=keepdims, **kwargs) def nanvar(a, axis=None, dtype=None, out=None, ddof=0): if a.dtype.kind == "O": return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof) return nputils.nanvar(a, axis=axis, dtype=dtype, ddof=ddof) def nanstd(a, axis=None, dtype=None, out=None, ddof=0): return nputils.nanstd(a, axis=axis, dtype=dtype, ddof=ddof) def nanprod(a, axis=None, dtype=None, out=None, min_count=None): mask = isnull(a) result = nputils.nanprod(a, axis=axis, dtype=dtype) if min_count is not None: return _maybe_null_out(result, axis, mask, min_count) else: return result def nancumsum(a, axis=None, dtype=None, out=None): return nputils.nancumsum(a, axis=axis, dtype=dtype) def nancumprod(a, axis=None, dtype=None, out=None): return nputils.nancumprod(a, axis=axis, dtype=dtype) �xarray-2025.12.0/xarray/computation/ops.py����������������������������������������������������������0000664�0000000�0000000�00000022214�15114646760�0020367�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Define core operations for xarray objects. TODO(shoyer): rewrite this module, making use of xarray.computation.computation, NumPy's __array_ufunc__ and mixin classes instead of the unintuitive "inject" functions. """ from __future__ import annotations import operator from typing import TYPE_CHECKING, Literal import numpy as np from xarray.core import dtypes, duck_array_ops if TYPE_CHECKING: pass try: import bottleneck as bn has_bottleneck = True except ImportError: # use numpy methods instead bn = np has_bottleneck = False NUM_BINARY_OPS = [ "add", "sub", "mul", "truediv", "floordiv", "mod", "pow", "and", "xor", "or", "lshift", "rshift", ] # methods which pass on the numpy return value unchanged # be careful not to list methods that we would want to wrap later NUMPY_SAME_METHODS = ["item", "searchsorted"] # methods which remove an axis REDUCE_METHODS = ["all", "any"] NAN_REDUCE_METHODS = [ "max", "min", "mean", "prod", "sum", "std", "var", "median", ] # TODO: wrap take, dot, sort _CUM_DOCSTRING_TEMPLATE = """\ Apply `{name}` along some dimension of {cls}. Parameters ---------- {extra_args} skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `{name}`. Returns ------- cumvalue : {cls} New {cls} object with `{name}` applied to its data along the indicated dimension. """ _REDUCE_DOCSTRING_TEMPLATE = """\ Reduce this {cls}'s data by applying `{name}` along some dimension(s). Parameters ---------- {extra_args}{skip_na_docs}{min_count_docs} keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to the appropriate array function for calculating `{name}` on this object's data. Returns ------- reduced : {cls} New {cls} object with `{name}` applied to its data and the indicated dimension(s) removed. """ _SKIPNA_DOCSTRING = """ skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64).""" _MINCOUNT_DOCSTRING = """ min_count : int, default: None The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. New in version 0.10.8: Added with the default being None. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array.""" def fillna(data, other, join="left", dataset_join="left"): """Fill missing values in this object with data from the other object. Follows normal broadcasting and alignment rules. Parameters ---------- join : {"outer", "inner", "left", "right"}, optional Method for joining the indexes of the passed objects along each dimension - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": raise `ValueError` instead of aligning when indexes to be aligned are not equal dataset_join : {"outer", "inner", "left", "right"}, optional Method for joining variables of Dataset objects with mismatched data variables. - "outer": take variables from both Dataset objects - "inner": take only overlapped variables - "left": take only variables from the first object - "right": take only variables from the last object """ from xarray.computation.apply_ufunc import apply_ufunc return apply_ufunc( duck_array_ops.fillna, data, other, join=join, dask="allowed", dataset_join=dataset_join, dataset_fill_value=np.nan, keep_attrs=True, ) # TODO: type this properly def where_method(self, cond, other=dtypes.NA): # type: ignore[unused-ignore,has-type] """Return elements from `self` or `other` depending on `cond`. Parameters ---------- cond : DataArray or Dataset with boolean dtype Locations at which to preserve this objects values. other : scalar, DataArray or Dataset, optional Value to use for locations in this object where ``cond`` is False. By default, inserts missing values. Returns ------- Same type as caller. """ from xarray.computation.apply_ufunc import apply_ufunc # alignment for three arguments is complicated, so don't support it yet join: Literal["inner", "exact"] = "inner" if other is dtypes.NA else "exact" return apply_ufunc( duck_array_ops.where_method, self, cond, other, join=join, dataset_join=join, dask="allowed", keep_attrs=True, ) def _call_possibly_missing_method(arg, name, args, kwargs): try: method = getattr(arg, name) except AttributeError: duck_array_ops.fail_on_dask_array_input(arg, func_name=name) if hasattr(arg, "data"): duck_array_ops.fail_on_dask_array_input(arg.data, func_name=name) raise else: return method(*args, **kwargs) def _values_method_wrapper(name): def func(self, *args, **kwargs): return _call_possibly_missing_method(self.data, name, args, kwargs) func.__name__ = name func.__doc__ = getattr(np.ndarray, name).__doc__ return func def _method_wrapper(name): def func(self, *args, **kwargs): return _call_possibly_missing_method(self, name, args, kwargs) func.__name__ = name func.__doc__ = getattr(np.ndarray, name).__doc__ return func def _func_slash_method_wrapper(f, name=None): # try to wrap a method, but if not found use the function # this is useful when patching in a function as both a DataArray and # Dataset method if name is None: name = f.__name__ def func(self, *args, **kwargs): try: return getattr(self, name)(*args, **kwargs) except AttributeError: return f(self, *args, **kwargs) func.__name__ = name func.__doc__ = f.__doc__ return func def inject_reduce_methods(cls): methods = ( [ (name, getattr(duck_array_ops, f"array_{name}"), False) for name in REDUCE_METHODS ] + [(name, getattr(duck_array_ops, name), True) for name in NAN_REDUCE_METHODS] + [("count", duck_array_ops.count, False)] ) for name, f, include_skipna in methods: numeric_only = getattr(f, "numeric_only", False) available_min_count = getattr(f, "available_min_count", False) skip_na_docs = _SKIPNA_DOCSTRING if include_skipna else "" min_count_docs = _MINCOUNT_DOCSTRING if available_min_count else "" func = cls._reduce_method(f, include_skipna, numeric_only) func.__name__ = name func.__doc__ = _REDUCE_DOCSTRING_TEMPLATE.format( name=name, cls=cls.__name__, extra_args=cls._reduce_extra_args_docstring.format(name=name), skip_na_docs=skip_na_docs, min_count_docs=min_count_docs, ) setattr(cls, name, func) def op_str(name): return f"__{name}__" def get_op(name): return getattr(operator, op_str(name)) NON_INPLACE_OP = {get_op("i" + name): get_op(name) for name in NUM_BINARY_OPS} def inplace_to_noninplace_op(f): return NON_INPLACE_OP[f] # _typed_ops.py uses the following wrapped functions as a kind of unary operator argsort = _method_wrapper("argsort") conj = _method_wrapper("conj") conjugate = _method_wrapper("conj") round_ = _func_slash_method_wrapper(duck_array_ops.around, name="round") def inject_numpy_same(cls): # these methods don't return arrays of the same shape as the input, so # don't try to patch these in for Dataset objects for name in NUMPY_SAME_METHODS: setattr(cls, name, _values_method_wrapper(name)) class IncludeReduceMethods: __slots__ = () def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if getattr(cls, "_reduce_method", None): inject_reduce_methods(cls) class IncludeNumpySameMethods: __slots__ = () def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) inject_numpy_same(cls) # some methods not applicable to Dataset objects ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/computation/rolling.py������������������������������������������������������0000664�0000000�0000000�00000141666�15114646760�0021251�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import functools import itertools import math import warnings from collections.abc import Callable, Hashable, Iterator, Mapping from typing import TYPE_CHECKING, Any, Generic, TypeVar import numpy as np from xarray.compat import dask_array_ops from xarray.computation.arithmetic import CoarsenArithmetic from xarray.core import dtypes, duck_array_ops, utils from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import CoarsenBoundaryOptions, SideOptions, T_Xarray from xarray.core.utils import ( either_dict_or_kwargs, is_duck_dask_array, module_available, ) from xarray.util.deprecation_helpers import _deprecate_positional_args try: import bottleneck except ImportError: # use numpy methods instead bottleneck = None if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset RollingKey = Any _T = TypeVar("_T") _ROLLING_REDUCE_DOCSTRING_TEMPLATE = """\ Reduce this object's data windows by applying `{name}` along its dimension. Parameters ---------- keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. **kwargs : dict Additional keyword arguments passed on to `{name}`. Returns ------- reduced : same type as caller New object with `{name}` applied along its rolling dimension. """ class Rolling(Generic[T_Xarray]): """A object that implements the moving window pattern. See Also -------- xarray.Dataset.groupby xarray.DataArray.groupby xarray.Dataset.rolling xarray.DataArray.rolling """ __slots__ = ("center", "dim", "min_periods", "obj", "window") _attributes = ("window", "min_periods", "center", "dim") dim: list[Hashable] window: list[int] center: list[bool] obj: T_Xarray min_periods: int def __init__( self, obj: T_Xarray, windows: Mapping[Any, int], min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, ) -> None: """ Moving window object. Parameters ---------- obj : Dataset or DataArray Object to window. windows : mapping of hashable to int A mapping from the name of the dimension to create the rolling window along (e.g. `time`) to the size of the moving window. min_periods : int or None, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool or dict-like Hashable to bool, default: False Set the labels at the center of the window. If dict-like, set this property per rolling dimension. Returns ------- rolling : type of input argument """ self.dim = [] self.window = [] for d, w in windows.items(): self.dim.append(d) if w <= 0: raise ValueError("window must be > 0") self.window.append(w) self.center = self._mapping_to_list(center, default=False) self.obj = obj missing_dims = tuple(dim for dim in self.dim if dim not in self.obj.dims) if missing_dims: # NOTE: we raise KeyError here but ValueError in Coarsen. raise KeyError( f"Window dimensions {missing_dims} not found in {self.obj.__class__.__name__} " f"dimensions {tuple(self.obj.dims)}" ) # attributes if min_periods is not None and min_periods <= 0: raise ValueError("min_periods must be greater than zero or None") self.min_periods = ( math.prod(self.window) if min_periods is None else min_periods ) def __repr__(self) -> str: """provide a nice str repr of our rolling object""" attrs = ",".join( f"{k}->{w}{'(center)' if c else ''}" for k, w, c in zip(self.dim, self.window, self.center, strict=True) ) return f"{self.__class__.__name__} [{attrs}]" def __len__(self) -> int: return math.prod(self.obj.sizes[d] for d in self.dim) @property def ndim(self) -> int: return len(self.dim) def _reduce_method( # type: ignore[misc] name: str, fillna: Any, rolling_agg_func: Callable | None = None, automatic_rechunk: bool = False, ) -> Callable[..., T_Xarray]: """Constructs reduction methods built on a numpy reduction function (e.g. sum), a numbagg reduction function (e.g. move_sum), a bottleneck reduction function (e.g. move_sum), or a Rolling reduction (_mean). The logic here for which function to run is quite diffuse, across this method & _array_reduce. Arguably we could refactor this. But one constraint is that we need context of xarray options, of the functions each library offers, of the array (e.g. dtype). Set automatic_rechunk=True when the reduction method makes a memory copy. """ if rolling_agg_func: array_agg_func = None else: array_agg_func = getattr(duck_array_ops, name) bottleneck_move_func = getattr(bottleneck, "move_" + name, None) if module_available("numbagg"): import numbagg # type: ignore[import-not-found, unused-ignore] numbagg_move_func = getattr(numbagg, "move_" + name, None) else: numbagg_move_func = None def method(self, keep_attrs=None, **kwargs): keep_attrs = self._get_keep_attrs(keep_attrs) return self._array_reduce( array_agg_func=array_agg_func, bottleneck_move_func=bottleneck_move_func, numbagg_move_func=numbagg_move_func, rolling_agg_func=rolling_agg_func, keep_attrs=keep_attrs, fillna=fillna, sliding_window_view_kwargs=dict(automatic_rechunk=automatic_rechunk), **kwargs, ) method.__name__ = name method.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name=name) return method def _mean(self, keep_attrs, **kwargs): result = self.sum(keep_attrs=False, **kwargs) # use dtype of result for casting of count # this allows for GH #7062 and GH #8864, fixes GH #10340 result /= duck_array_ops.astype( self.count(keep_attrs=False), dtype=result.dtype, copy=False ) if keep_attrs: result.attrs = self.obj.attrs return result _mean.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name="mean") # automatic_rechunk is set to True for reductions that make a copy. # std, var could be optimized after which we can set it to False # See #4325 argmax = _reduce_method("argmax", dtypes.NINF, automatic_rechunk=True) argmin = _reduce_method("argmin", dtypes.INF, automatic_rechunk=True) max = _reduce_method("max", dtypes.NINF) min = _reduce_method("min", dtypes.INF) prod = _reduce_method("prod", 1) sum = _reduce_method("sum", 0) mean = _reduce_method("mean", None, _mean) std = _reduce_method("std", None, automatic_rechunk=True) var = _reduce_method("var", None, automatic_rechunk=True) median = _reduce_method("median", None, automatic_rechunk=True) def _counts(self, keep_attrs: bool | None) -> T_Xarray: raise NotImplementedError() def count(self, keep_attrs: bool | None = None) -> T_Xarray: keep_attrs = self._get_keep_attrs(keep_attrs) rolling_count = self._counts(keep_attrs=keep_attrs) enough_periods = rolling_count >= self.min_periods return rolling_count.where(enough_periods) count.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name="count") def _mapping_to_list( self, arg: _T | Mapping[Any, _T], default: _T | None = None, allow_default: bool = True, allow_allsame: bool = True, ) -> list[_T]: if utils.is_dict_like(arg): if allow_default: return [arg.get(d, default) for d in self.dim] for d in self.dim: if d not in arg: raise KeyError(f"Argument has no dimension key {d}.") return [arg[d] for d in self.dim] if allow_allsame: # for single argument return [arg] * self.ndim # type: ignore[list-item] # no check for negatives if self.ndim == 1: return [arg] # type: ignore[list-item] # no check for negatives raise ValueError(f"Mapping argument is necessary for {self.ndim}d-rolling.") def _get_keep_attrs(self, keep_attrs): if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) return keep_attrs class DataArrayRolling(Rolling["DataArray"]): __slots__ = ("window_labels",) def __init__( self, obj: DataArray, windows: Mapping[Any, int], min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, ) -> None: """ Moving window object for DataArray. You should use DataArray.rolling() method to construct this object instead of the class constructor. Parameters ---------- obj : DataArray Object to window. windows : mapping of hashable to int A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool, default: False Set the labels at the center of the window. The default, False, sets the labels at the right edge of the window. Returns ------- rolling : type of input argument See Also -------- xarray.DataArray.rolling xarray.DataArray.groupby xarray.Dataset.rolling xarray.Dataset.groupby """ super().__init__(obj, windows, min_periods=min_periods, center=center) # TODO legacy attribute self.window_labels = self.obj[self.dim[0]] def __iter__(self) -> Iterator[tuple[DataArray, DataArray]]: if self.ndim > 1: raise ValueError("__iter__ is only supported for 1d-rolling") dim0 = self.dim[0] window0 = int(self.window[0]) offset = (window0 + 1) // 2 if self.center[0] else 1 stops = np.arange(offset, self.obj.sizes[dim0] + offset) starts = stops - window0 starts[: window0 - offset] = 0 for label, start, stop in zip(self.window_labels, starts, stops, strict=True): window = self.obj.isel({dim0: slice(start, stop)}) counts = window.count(dim=[dim0]) window = window.where(counts >= self.min_periods) yield (label, window) @_deprecate_positional_args("v2024.11.0") def construct( self, window_dim: Hashable | Mapping[Any, Hashable] | None = None, *, stride: int | Mapping[Any, int] = 1, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **window_dim_kwargs: Hashable, ) -> DataArray: """ Convert this rolling object to xr.DataArray, where the window dimension is stacked as a new dimension Parameters ---------- window_dim : Hashable or dict-like to Hashable, optional A mapping from dimension name to the new window dimension names. stride : int or mapping of int, default: 1 Size of stride for the rolling window. fill_value : default: dtypes.NA Filling value to match the dimension size. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. sliding_window_view_kwargs : Mapping Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. **window_dim_kwargs : Hashable, optional The keyword arguments form of ``window_dim`` {dim: new_name, ...}. Returns ------- DataArray a view of the original array. By default, the returned array is not writeable. For numpy arrays, one can pass ``writeable=True`` in ``sliding_window_view_kwargs``. See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Notes ----- With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as ``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls whether dask should automatically rechunk the output to avoid exploding chunk sizes. Automatically rechunking is the default behaviour. Importantly, each chunk will be a view of the data so large chunk sizes are only safe if *no* copies are made later. Examples -------- >>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b")) >>> rolling = da.rolling(b=3) >>> rolling.construct("window_dim") Size: 192B array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.]], [[nan, nan, 4.], [nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.]]]) Dimensions without coordinates: a, b, window_dim >>> rolling = da.rolling(b=3, center=True) >>> rolling.construct("window_dim") Size: 192B array([[[nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.], [ 2., 3., nan]], [[nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.], [ 6., 7., nan]]]) Dimensions without coordinates: a, b, window_dim """ if sliding_window_view_kwargs is None: sliding_window_view_kwargs = {} return self._construct( self.obj, window_dim=window_dim, stride=stride, fill_value=fill_value, keep_attrs=keep_attrs, sliding_window_view_kwargs=sliding_window_view_kwargs, **window_dim_kwargs, ) def _construct( self, obj: DataArray, *, window_dim: Hashable | Mapping[Any, Hashable] | None = None, stride: int | Mapping[Any, int] = 1, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **window_dim_kwargs: Hashable, ) -> DataArray: from xarray.core.dataarray import DataArray if sliding_window_view_kwargs is None: sliding_window_view_kwargs = {} keep_attrs = self._get_keep_attrs(keep_attrs) if window_dim is None: if len(window_dim_kwargs) == 0: raise ValueError( "Either window_dim or window_dim_kwargs need to be specified." ) window_dim = {d: window_dim_kwargs[str(d)] for d in self.dim} window_dims = self._mapping_to_list( window_dim, allow_default=False, allow_allsame=False ) strides = self._mapping_to_list(stride, default=1) window = obj.variable.rolling_window( self.dim, self.window, window_dims, center=self.center, fill_value=fill_value, **sliding_window_view_kwargs, ) attrs = obj.attrs if keep_attrs else {} result = DataArray( window, dims=obj.dims + tuple(window_dims), coords=obj.coords, attrs=attrs, name=obj.name, ) return result.isel( {d: slice(None, None, s) for d, s in zip(self.dim, strides, strict=True)} ) def reduce( self, func: Callable, keep_attrs: bool | None = None, *, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **kwargs: Any, ) -> DataArray: """Reduce each window by applying `func`. Equivalent to ``.construct(...).reduce(func, ...)``. Parameters ---------- func : callable Function which can be called in the form `func(x, **kwargs)` to return the result of collapsing an np.ndarray over the rolling dimension. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. sliding_window_view_kwargs Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray Array with summarized data. See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Notes ----- With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as ``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls whether dask should automatically rechunk the output to avoid exploding chunk sizes. Automatically rechunking is the default behaviour. Importantly, each chunk will be a view of the data so large chunk sizes are only safe if *no* copies are made later. Examples -------- >>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b")) >>> rolling = da.rolling(b=3) >>> rolling.construct("window_dim") Size: 192B array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.]], [[nan, nan, 4.], [nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.]]]) Dimensions without coordinates: a, b, window_dim >>> rolling.reduce(np.sum) Size: 64B array([[nan, nan, 3., 6.], [nan, nan, 15., 18.]]) Dimensions without coordinates: a, b >>> rolling = da.rolling(b=3, min_periods=1) >>> rolling.reduce(np.nansum) Size: 64B array([[ 0., 1., 3., 6.], [ 4., 9., 15., 18.]]) Dimensions without coordinates: a, b """ keep_attrs = self._get_keep_attrs(keep_attrs) rolling_dim = { d: utils.get_temp_dimname(self.obj.dims, f"_rolling_dim_{d}") for d in self.dim } # save memory with reductions GH4325 fillna = kwargs.pop("fillna", dtypes.NA) if fillna is not dtypes.NA: obj = self.obj.fillna(fillna) else: obj = self.obj windows = self._construct( obj, window_dim=rolling_dim, keep_attrs=keep_attrs, fill_value=fillna, sliding_window_view_kwargs=sliding_window_view_kwargs, ) dim = list(rolling_dim.values()) result = windows.reduce(func, dim=dim, keep_attrs=keep_attrs, **kwargs) # Find valid windows based on count. counts = self._counts(keep_attrs=False) return result.where(counts >= self.min_periods) def _counts(self, keep_attrs: bool | None) -> DataArray: """Number of non-nan entries in each rolling window.""" rolling_dim = { d: utils.get_temp_dimname(self.obj.dims, f"_rolling_dim_{d}") for d in self.dim } # We use False as the fill_value instead of np.nan, since boolean # array is faster to be reduced than object array. # The use of skipna==False is also faster since it does not need to # copy the strided array. dim = list(rolling_dim.values()) counts = ( self.obj.notnull(keep_attrs=keep_attrs) .rolling( dict(zip(self.dim, self.window, strict=True)), center={d: self.center[i] for i, d in enumerate(self.dim)}, ) .construct(rolling_dim, fill_value=False, keep_attrs=keep_attrs) .sum(dim=dim, skipna=False, keep_attrs=keep_attrs) ) return counts def _numbagg_reduce(self, func, keep_attrs, **kwargs): # Some of this is copied from `_bottleneck_reduce`, we could reduce this as part # of a wider refactor. axis = self.obj.get_axis_num(self.dim[0]) padded = self.obj.variable if self.center[0]: if is_duck_dask_array(padded.data): # workaround to make the padded chunk size larger than # self.window - 1 shift = -(self.window[0] + 1) // 2 offset = (self.window[0] - 1) // 2 valid = (slice(None),) * axis + ( slice(offset, offset + self.obj.shape[axis]), ) else: shift = (-self.window[0] // 2) + 1 valid = (slice(None),) * axis + (slice(-shift, None),) padded = padded.pad({self.dim[0]: (0, -shift)}, mode="constant") if is_duck_dask_array(padded.data) and False: raise AssertionError("should not be reachable") else: values = func( padded.data, window=self.window[0], min_count=self.min_periods, axis=axis, ) if self.center[0]: values = values[valid] attrs = self.obj.attrs if keep_attrs else {} return self.obj.__class__( values, self.obj.coords, attrs=attrs, name=self.obj.name ) def _bottleneck_reduce(self, func, keep_attrs, **kwargs): # bottleneck doesn't allow min_count to be 0, although it should # work the same as if min_count = 1 # Note bottleneck only works with 1d-rolling. if self.min_periods == 0: min_count = 1 else: min_count = self.min_periods axis = self.obj.get_axis_num(self.dim[0]) padded = self.obj.variable if self.center[0]: if is_duck_dask_array(padded.data): # workaround to make the padded chunk size larger than # self.window - 1 shift = -(self.window[0] + 1) // 2 offset = (self.window[0] - 1) // 2 valid = (slice(None),) * axis + ( slice(offset, offset + self.obj.shape[axis]), ) else: shift = (-self.window[0] // 2) + 1 valid = (slice(None),) * axis + (slice(-shift, None),) padded = padded.pad({self.dim[0]: (0, -shift)}, mode="constant") if is_duck_dask_array(padded.data): values = dask_array_ops.dask_rolling_wrapper( func, padded, axis=axis, window=self.window[0], min_count=min_count ) else: values = func( padded.data, window=self.window[0], min_count=min_count, axis=axis ) # index 0 is at the rightmost edge of the window # need to reverse index here # see GH #8541 if func in [bottleneck.move_argmin, bottleneck.move_argmax]: values = self.window[0] - 1 - values if self.center[0]: values = values[valid] attrs = self.obj.attrs if keep_attrs else {} return self.obj.__class__( values, self.obj.coords, attrs=attrs, name=self.obj.name ) def _array_reduce( self, array_agg_func, bottleneck_move_func, numbagg_move_func, rolling_agg_func, keep_attrs, fillna, **kwargs, ): if "dim" in kwargs: warnings.warn( f"Reductions are applied along the rolling dimension(s) " f"'{self.dim}'. Passing the 'dim' kwarg to reduction " f"operations has no effect.", DeprecationWarning, stacklevel=3, ) del kwargs["dim"] xp = duck_array_ops.get_array_namespace(self.obj.data) if ( OPTIONS["use_numbagg"] and module_available("numbagg") and numbagg_move_func is not None # TODO: we could at least allow this for the equivalent of `apply_ufunc`'s # "parallelized". `rolling_exp` does this, as an example (but rolling_exp is # much simpler) and not is_duck_dask_array(self.obj.data) # Numbagg doesn't handle object arrays and generally has dtype consistency, # so doesn't deal well with bool arrays which are expected to change type. and self.obj.data.dtype.kind not in "ObMm" # TODO: we could also allow this, probably as part of a refactoring of this # module, so we can use the machinery in `self.reduce`. and self.ndim == 1 and xp is np ): import numbagg # Numbagg has a default ddof of 1. I (@max-sixty) think we should make # this the default in xarray too, but until we do, don't use numbagg for # std and var unless ddof is set to 1. if ( numbagg_move_func not in [numbagg.move_std, numbagg.move_var] or kwargs.get("ddof") == 1 ): return self._numbagg_reduce( numbagg_move_func, keep_attrs=keep_attrs, **kwargs ) if ( OPTIONS["use_bottleneck"] and bottleneck_move_func is not None and ( not is_duck_dask_array(self.obj.data) or module_available("dask", "2024.11.0") ) and self.ndim == 1 and xp is np ): return self._bottleneck_reduce( bottleneck_move_func, keep_attrs=keep_attrs, **kwargs ) if rolling_agg_func: return rolling_agg_func(self, keep_attrs=self._get_keep_attrs(keep_attrs)) if fillna is not None: if fillna is dtypes.INF: fillna = dtypes.get_pos_infinity(self.obj.dtype, max_for_int=True) elif fillna is dtypes.NINF: fillna = dtypes.get_neg_infinity(self.obj.dtype, min_for_int=True) kwargs.setdefault("skipna", False) kwargs.setdefault("fillna", fillna) return self.reduce(array_agg_func, keep_attrs=keep_attrs, **kwargs) class DatasetRolling(Rolling["Dataset"]): __slots__ = ("rollings",) def __init__( self, obj: Dataset, windows: Mapping[Any, int], min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, ) -> None: """ Moving window object for Dataset. You should use Dataset.rolling() method to construct this object instead of the class constructor. Parameters ---------- obj : Dataset Object to window. windows : mapping of hashable to int A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool or mapping of hashable to bool, default: False Set the labels at the center of the window. The default, False, sets the labels at the right edge of the window. Returns ------- rolling : type of input argument See Also -------- xarray.Dataset.rolling xarray.DataArray.rolling xarray.Dataset.groupby xarray.DataArray.groupby """ super().__init__(obj, windows, min_periods, center) # Keep each Rolling object as a dictionary self.rollings = {} for key, da in self.obj.data_vars.items(): # keeps rollings only for the dataset depending on self.dim dims, center = [], {} for i, d in enumerate(self.dim): if d in da.dims: dims.append(d) center[d] = self.center[i] if dims: w = {d: windows[d] for d in dims} self.rollings[key] = DataArrayRolling(da, w, min_periods, center) def _dataset_implementation(self, func, keep_attrs, **kwargs): from xarray.core.dataset import Dataset keep_attrs = self._get_keep_attrs(keep_attrs) reduced = {} for key, da in self.obj.data_vars.items(): if any(d in da.dims for d in self.dim): reduced[key] = func(self.rollings[key], keep_attrs=keep_attrs, **kwargs) else: reduced[key] = self.obj[key].copy() # we need to delete the attrs of the copied DataArray if not keep_attrs: reduced[key].attrs = {} attrs = self.obj.attrs if keep_attrs else {} return Dataset(reduced, coords=self.obj.coords, attrs=attrs) def reduce( self, func: Callable, keep_attrs: bool | None = None, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **kwargs: Any, ) -> DataArray: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, **kwargs)` to return the result of collapsing an np.ndarray over the rolling dimension. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. sliding_window_view_kwargs : Mapping Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray Array with summarized data. See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Notes ----- With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as ``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls whether dask should automatically rechunk the output to avoid exploding chunk sizes. Automatically rechunking is the default behaviour. Importantly, each chunk will be a view of the data so large chunk sizes are only safe if *no* copies are made later. """ return self._dataset_implementation( functools.partial(DataArrayRolling.reduce, func=func), keep_attrs=keep_attrs, sliding_window_view_kwargs=sliding_window_view_kwargs, **kwargs, ) def _counts(self, keep_attrs: bool | None) -> Dataset: return self._dataset_implementation( DataArrayRolling._counts, keep_attrs=keep_attrs ) def _array_reduce( self, array_agg_func, bottleneck_move_func, rolling_agg_func, keep_attrs, **kwargs, ): return self._dataset_implementation( functools.partial( DataArrayRolling._array_reduce, array_agg_func=array_agg_func, bottleneck_move_func=bottleneck_move_func, rolling_agg_func=rolling_agg_func, ), keep_attrs=keep_attrs, **kwargs, ) @_deprecate_positional_args("v2024.11.0") def construct( self, window_dim: Hashable | Mapping[Any, Hashable] | None = None, *, stride: int | Mapping[Any, int] = 1, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **window_dim_kwargs: Hashable, ) -> Dataset: """ Convert this rolling object to xr.Dataset, where the window dimension is stacked as a new dimension Parameters ---------- window_dim : str or mapping, optional A mapping from dimension name to the new window dimension names. Just a string can be used for 1d-rolling. stride : int, optional size of stride for the rolling window. fill_value : Any, default: dtypes.NA Filling value to match the dimension size. sliding_window_view_kwargs Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. **window_dim_kwargs : {dim: new_name, ...}, optional The keyword arguments form of ``window_dim``. Returns ------- Dataset Dataset with views of the original arrays. By default, the returned arrays are not writeable. For numpy arrays, one can pass ``writeable=True`` in ``sliding_window_view_kwargs``. See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Notes ----- With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as ``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls whether dask should automatically rechunk the output to avoid exploding chunk sizes. Automatically rechunking is the default behaviour. Importantly, each chunk will be a view of the data so large chunk sizes are only safe if *no* copies are made later. """ from xarray.core.dataset import Dataset keep_attrs = self._get_keep_attrs(keep_attrs) if window_dim is None: if len(window_dim_kwargs) == 0: raise ValueError( "Either window_dim or window_dim_kwargs need to be specified." ) window_dim = {d: window_dim_kwargs[str(d)] for d in self.dim} window_dims = self._mapping_to_list( window_dim, allow_default=False, allow_allsame=False ) strides = self._mapping_to_list(stride, default=1) dataset = {} for key, da in self.obj.data_vars.items(): # keeps rollings only for the dataset depending on self.dim dims = [d for d in self.dim if d in da.dims] if dims: wi = {d: window_dims[i] for i, d in enumerate(self.dim) if d in da.dims} st = {d: strides[i] for i, d in enumerate(self.dim) if d in da.dims} dataset[key] = self.rollings[key].construct( window_dim=wi, fill_value=fill_value, stride=st, keep_attrs=keep_attrs, sliding_window_view_kwargs=sliding_window_view_kwargs, ) else: dataset[key] = da.copy() # as the DataArrays can be copied we need to delete the attrs if not keep_attrs: dataset[key].attrs = {} # Need to stride coords as well. TODO: is there a better way? coords = self.obj.isel( {d: slice(None, None, s) for d, s in zip(self.dim, strides, strict=True)} ).coords attrs = self.obj.attrs if keep_attrs else {} return Dataset(dataset, coords=coords, attrs=attrs) class Coarsen(CoarsenArithmetic, Generic[T_Xarray]): """A object that implements the coarsen. See Also -------- Dataset.coarsen DataArray.coarsen """ __slots__ = ( "boundary", "coord_func", "obj", "side", "trim_excess", "windows", ) _attributes = ("windows", "side", "trim_excess") obj: T_Xarray windows: Mapping[Hashable, int] side: SideOptions | Mapping[Hashable, SideOptions] boundary: CoarsenBoundaryOptions coord_func: Mapping[Hashable, str | Callable] def __init__( self, obj: T_Xarray, windows: Mapping[Any, int], boundary: CoarsenBoundaryOptions, side: SideOptions | Mapping[Any, SideOptions], coord_func: str | Callable | Mapping[Any, str | Callable], ) -> None: """ Moving window object. Parameters ---------- obj : Dataset or DataArray Object to window. windows : mapping of hashable to int A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. boundary : {"exact", "trim", "pad"} If 'exact', a ValueError will be raised if dimension size is not a multiple of window size. If 'trim', the excess indexes are trimmed. If 'pad', NA will be padded. side : 'left' or 'right' or mapping from dimension to 'left' or 'right' coord_func : function (name) or mapping from coordinate name to function (name). Returns ------- coarsen """ self.obj = obj self.windows = windows self.side = side self.boundary = boundary missing_dims = tuple(dim for dim in windows.keys() if dim not in self.obj.dims) if missing_dims: raise ValueError( f"Window dimensions {missing_dims} not found in {self.obj.__class__.__name__} " f"dimensions {tuple(self.obj.dims)}" ) if utils.is_dict_like(coord_func): coord_func_map = coord_func else: coord_func_map = dict.fromkeys(self.obj.dims, coord_func) for c in self.obj.coords: if c not in coord_func_map: coord_func_map[c] = duck_array_ops.mean # type: ignore[index] self.coord_func = coord_func_map def _get_keep_attrs(self, keep_attrs): if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) return keep_attrs def __repr__(self) -> str: """provide a nice str repr of our coarsen object""" attrs = ",".join( f"{k}->{getattr(self, k)}" for k in self._attributes if getattr(self, k, None) is not None ) return f"{self.__class__.__name__} [{attrs}]" def construct( self, window_dim=None, keep_attrs=None, **window_dim_kwargs, ) -> T_Xarray: """ Convert this Coarsen object to a DataArray or Dataset, where the coarsening dimension is split or reshaped to two new dimensions. Parameters ---------- window_dim: mapping A mapping from existing dimension name to new dimension names. The size of the second dimension will be the length of the coarsening window. keep_attrs: bool, optional Preserve attributes if True **window_dim_kwargs : {dim: new_name, ...} The keyword arguments form of ``window_dim``. Returns ------- Dataset or DataArray with reshaped dimensions Examples -------- >>> da = xr.DataArray(np.arange(24), dims="time") >>> da.coarsen(time=12).construct(time=("year", "month")) Size: 192B array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]]) Dimensions without coordinates: year, month See Also -------- DataArrayRolling.construct DatasetRolling.construct """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset window_dim = either_dict_or_kwargs( window_dim, window_dim_kwargs, "Coarsen.construct" ) if not window_dim: raise ValueError( "Either window_dim or window_dim_kwargs need to be specified." ) bad_new_dims = tuple( win for win, dims in window_dim.items() if len(dims) != 2 or isinstance(dims, str) ) if bad_new_dims: raise ValueError( f"Please provide exactly two dimension names for the following coarsening dimensions: {bad_new_dims}" ) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) missing_dims = set(window_dim) - set(self.windows) if missing_dims: raise ValueError( f"'window_dim' must contain entries for all dimensions to coarsen. Missing {missing_dims}" ) extra_windows = set(self.windows) - set(window_dim) if extra_windows: raise ValueError( f"'window_dim' includes dimensions that will not be coarsened: {extra_windows}" ) reshaped = Dataset() if isinstance(self.obj, DataArray): obj = self.obj._to_temp_dataset() else: obj = self.obj reshaped.attrs = obj.attrs if keep_attrs else {} for key, var in obj.variables.items(): reshaped_dims = tuple( itertools.chain(*[window_dim.get(dim, [dim]) for dim in list(var.dims)]) ) if reshaped_dims != var.dims: windows = {w: self.windows[w] for w in window_dim if w in var.dims} reshaped_var, _ = var.coarsen_reshape(windows, self.boundary, self.side) attrs = var.attrs if keep_attrs else {} reshaped[key] = (reshaped_dims, reshaped_var, attrs) else: reshaped[key] = var # should handle window_dim being unindexed should_be_coords = (set(window_dim) & set(self.obj.coords)) | set( self.obj.coords ) result = reshaped.set_coords(should_be_coords) if isinstance(self.obj, DataArray): return self.obj._from_temp_dataset(result) else: return result class DataArrayCoarsen(Coarsen["DataArray"]): __slots__ = () _reduce_extra_args_docstring = """""" @classmethod def _reduce_method( cls, func: Callable, include_skipna: bool = False, numeric_only: bool = False ) -> Callable[..., DataArray]: """ Return a wrapped function for injecting reduction methods. see ops.inject_reduce_methods """ kwargs: dict[str, Any] = {} if include_skipna: kwargs["skipna"] = None def wrapped_func( self: DataArrayCoarsen, keep_attrs: bool | None = None, **kwargs ) -> DataArray: from xarray.core.dataarray import DataArray keep_attrs = self._get_keep_attrs(keep_attrs) reduced = self.obj.variable.coarsen( self.windows, func, self.boundary, self.side, keep_attrs, **kwargs ) coords = {} for c, v in self.obj.coords.items(): if c == self.obj.name: coords[c] = reduced elif any(d in self.windows for d in v.dims): coords[c] = v.variable.coarsen( self.windows, self.coord_func[c], self.boundary, self.side, keep_attrs, **kwargs, ) else: coords[c] = v return DataArray( reduced, dims=self.obj.dims, coords=coords, name=self.obj.name ) return wrapped_func def reduce( self, func: Callable, keep_attrs: bool | None = None, **kwargs ) -> DataArray: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis, **kwargs)` to return the result of collapsing an np.ndarray over the coarsening dimensions. It must be possible to provide the `axis` argument with a tuple of integers. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray Array with summarized data. Examples -------- >>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b")) >>> coarsen = da.coarsen(b=2) >>> coarsen.reduce(np.sum) Size: 32B array([[ 1, 5], [ 9, 13]]) Dimensions without coordinates: a, b """ wrapped_func = self._reduce_method(func) return wrapped_func(self, keep_attrs=keep_attrs, **kwargs) class DatasetCoarsen(Coarsen["Dataset"]): __slots__ = () _reduce_extra_args_docstring = """""" @classmethod def _reduce_method( cls, func: Callable, include_skipna: bool = False, numeric_only: bool = False ) -> Callable[..., Dataset]: """ Return a wrapped function for injecting reduction methods. see ops.inject_reduce_methods """ kwargs: dict[str, Any] = {} if include_skipna: kwargs["skipna"] = None def wrapped_func( self: DatasetCoarsen, keep_attrs: bool | None = None, **kwargs ) -> Dataset: from xarray.core.dataset import Dataset keep_attrs = self._get_keep_attrs(keep_attrs) if keep_attrs: attrs = self.obj.attrs else: attrs = {} reduced = {} for key, da in self.obj.data_vars.items(): reduced[key] = da.variable.coarsen( self.windows, func, self.boundary, self.side, keep_attrs=keep_attrs, **kwargs, ) coords = {} for c, v in self.obj.coords.items(): # variable.coarsen returns variables not containing the window dims # unchanged (maybe removes attrs) coords[c] = v.variable.coarsen( self.windows, self.coord_func[c], self.boundary, self.side, keep_attrs=keep_attrs, **kwargs, ) return Dataset(reduced, coords=coords, attrs=attrs) return wrapped_func def reduce(self, func: Callable, keep_attrs=None, **kwargs) -> Dataset: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis, **kwargs)` to return the result of collapsing an np.ndarray over the coarsening dimensions. It must be possible to provide the `axis` argument with a tuple of integers. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Dataset Arrays with summarized data. """ wrapped_func = self._reduce_method(func) return wrapped_func(self, keep_attrs=keep_attrs, **kwargs) ��������������������������������������������������������������������������xarray-2025.12.0/xarray/computation/rolling_exp.py��������������������������������������������������0000664�0000000�0000000�00000022336�15114646760�0022115�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations from collections.abc import Mapping from typing import Any, Generic import numpy as np from xarray.compat.pdcompat import count_not_none from xarray.computation.apply_ufunc import apply_ufunc from xarray.core.options import _get_keep_attrs from xarray.core.types import T_DataWithCoords from xarray.core.utils import module_available def _get_alpha( com: float | None = None, span: float | None = None, halflife: float | None = None, alpha: float | None = None, ) -> float: """ Convert com, span, halflife to alpha. """ valid_count = count_not_none(com, span, halflife, alpha) if valid_count > 1: raise ValueError("com, span, halflife, and alpha are mutually exclusive") # Convert to alpha if com is not None: if com < 0: raise ValueError("commust satisfy: com>= 0") return 1 / (com + 1) elif span is not None: if span < 1: raise ValueError("span must satisfy: span >= 1") return 2 / (span + 1) elif halflife is not None: if halflife <= 0: raise ValueError("halflife must satisfy: halflife > 0") return 1 - np.exp(np.log(0.5) / halflife) elif alpha is not None: if not 0 < alpha <= 1: raise ValueError("alpha must satisfy: 0 < alpha <= 1") return alpha else: raise ValueError("Must pass one of comass, span, halflife, or alpha") class RollingExp(Generic[T_DataWithCoords]): """ Exponentially-weighted moving window object. Similar to EWM in pandas Parameters ---------- obj : Dataset or DataArray Object to window. windows : mapping of hashable to int (or float for alpha type) A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html Returns ------- RollingExp : type of input argument """ def __init__( self, obj: T_DataWithCoords, windows: Mapping[Any, int | float], window_type: str = "span", min_weight: float = 0.0, ): if not module_available("numbagg"): raise ImportError( "numbagg >= 0.2.1 is required for rolling_exp but currently numbagg is not installed" ) self.obj: T_DataWithCoords = obj dim, window = next(iter(windows.items())) self.dim = dim self.alpha = _get_alpha(**{window_type: window}) self.min_weight = min_weight # Don't pass min_weight=0 so we can support older versions of numbagg kwargs = dict(alpha=self.alpha, axis=-1) if min_weight > 0: kwargs["min_weight"] = min_weight self.kwargs = kwargs def mean(self, keep_attrs: bool | None = None) -> T_DataWithCoords: """ Exponentially weighted moving average. Parameters ---------- keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").mean() Size: 40B array([1. , 1. , 1.69230769, 1.9 , 1.96694215]) Dimensions without coordinates: x """ import numbagg # type: ignore[import-not-found, unused-ignore] if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) dim_order = self.obj.dims return apply_ufunc( numbagg.move_exp_nanmean, self.obj, input_core_dims=[[self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=keep_attrs, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def sum(self, keep_attrs: bool | None = None) -> T_DataWithCoords: """ Exponentially weighted moving sum. Parameters ---------- keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").sum() Size: 40B array([1. , 1.33333333, 2.44444444, 2.81481481, 2.9382716 ]) Dimensions without coordinates: x """ import numbagg if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) dim_order = self.obj.dims return apply_ufunc( numbagg.move_exp_nansum, self.obj, input_core_dims=[[self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=keep_attrs, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def std(self) -> T_DataWithCoords: """ Exponentially weighted moving standard deviation. `keep_attrs` is always True for this method. Drop attrs separately to remove attrs. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").std() Size: 40B array([ nan, 0. , 0.67936622, 0.42966892, 0.25389527]) Dimensions without coordinates: x """ import numbagg dim_order = self.obj.dims return apply_ufunc( numbagg.move_exp_nanstd, self.obj, input_core_dims=[[self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=True, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def var(self) -> T_DataWithCoords: """ Exponentially weighted moving variance. `keep_attrs` is always True for this method. Drop attrs separately to remove attrs. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").var() Size: 40B array([ nan, 0. , 0.46153846, 0.18461538, 0.06446281]) Dimensions without coordinates: x """ dim_order = self.obj.dims import numbagg return apply_ufunc( numbagg.move_exp_nanvar, self.obj, input_core_dims=[[self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=True, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def cov(self, other: T_DataWithCoords) -> T_DataWithCoords: """ Exponentially weighted moving covariance. `keep_attrs` is always True for this method. Drop attrs separately to remove attrs. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").cov(da**2) Size: 40B array([ nan, 0. , 1.38461538, 0.55384615, 0.19338843]) Dimensions without coordinates: x """ dim_order = self.obj.dims import numbagg return apply_ufunc( numbagg.move_exp_nancov, self.obj, other, input_core_dims=[[self.dim], [self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=True, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def corr(self, other: T_DataWithCoords) -> T_DataWithCoords: """ Exponentially weighted moving correlation. `keep_attrs` is always True for this method. Drop attrs separately to remove attrs. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").corr(da.shift(x=1)) Size: 40B array([ nan, nan, nan, 0.4330127 , 0.48038446]) Dimensions without coordinates: x """ dim_order = self.obj.dims import numbagg return apply_ufunc( numbagg.move_exp_nancorr, self.obj, other, input_core_dims=[[self.dim], [self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=True, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/computation/weighted.py�����������������������������������������������������0000664�0000000�0000000�00000046430�15114646760�0021374�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations from collections.abc import Hashable, Iterable, Sequence from typing import TYPE_CHECKING, Generic, Literal, cast import numpy as np from numpy.typing import ArrayLike from xarray.computation.apply_ufunc import apply_ufunc from xarray.computation.computation import dot from xarray.core import duck_array_ops, utils from xarray.core.types import Dims, T_DataArray, T_Xarray from xarray.namedarray.utils import is_duck_dask_array from xarray.structure.alignment import align, broadcast # Weighted quantile methods are a subset of the numpy supported quantile methods. QUANTILE_METHODS = Literal[ "linear", "interpolated_inverted_cdf", "hazen", "weibull", "median_unbiased", "normal_unbiased", ] _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE = """ Reduce this {cls}'s data by a weighted ``{fcn}`` along some dimension(s). Parameters ---------- dim : Hashable or Iterable of Hashable, optional Dimension(s) over which to apply the weighted ``{fcn}``. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- reduced : {cls} New {cls} object with weighted ``{fcn}`` applied to its data and the indicated dimension(s) removed. Notes ----- Returns {on_zero} if the ``weights`` sum to 0.0 along the reduced dimension(s). """ _SUM_OF_WEIGHTS_DOCSTRING = """ Calculate the sum of weights, accounting for missing values in the data. Parameters ---------- dim : str or sequence of str, optional Dimension(s) over which to sum the weights. keep_attrs : bool, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- reduced : {cls} New {cls} object with the sum of the weights over the given dimension. """ _WEIGHTED_QUANTILE_DOCSTRING_TEMPLATE = """ Apply a weighted ``quantile`` to this {cls}'s data along some dimension(s). Weights are interpreted as *sampling weights* (or probability weights) and describe how a sample is scaled to the whole population [1]_. There are other possible interpretations for weights, *precision weights* describing the precision of observations, or *frequency weights* counting the number of identical observations, however, they are not implemented here. For compatibility with NumPy's non-weighted ``quantile`` (which is used by ``DataArray.quantile`` and ``Dataset.quantile``), the only interpolation method supported by this weighted version corresponds to the default "linear" option of ``numpy.quantile``. This is "Type 7" option, described in Hyndman and Fan (1996) [2]_. The implementation is largely inspired by a blog post from A. Akinshin's (2023) [3]_. Parameters ---------- q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply the weighted ``quantile``. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- quantiles : {cls} New {cls} object with weighted ``quantile`` applied to its data and the indicated dimension(s) removed. See Also -------- numpy.nanquantile, pandas.Series.quantile, Dataset.quantile, DataArray.quantile Notes ----- Returns NaN if the ``weights`` sum to 0.0 along the reduced dimension(s). References ---------- .. [1] https://notstatschat.rbind.io/2020/08/04/weights-in-statistics/ .. [2] Hyndman, R. J. & Fan, Y. (1996). Sample Quantiles in Statistical Packages. The American Statistician, 50(4), 361โ€“365. https://doi.org/10.2307/2684934 .. [3] Akinshin, A. (2023) "Weighted quantile estimators" arXiv:2304.07265 [stat.ME] https://arxiv.org/abs/2304.07265 """ if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset class Weighted(Generic[T_Xarray]): """An object that implements weighted operations. You should create a Weighted object by using the ``DataArray.weighted`` or ``Dataset.weighted`` methods. See Also -------- Dataset.weighted DataArray.weighted """ __slots__ = ("obj", "weights") def __init__(self, obj: T_Xarray, weights: T_DataArray) -> None: """ Create a Weighted object Parameters ---------- obj : DataArray or Dataset Object over which the weighted reduction operation is applied. weights : DataArray An array of weights associated with the values in the obj. Each value in the obj contributes to the reduction operation according to its associated weight. Notes ----- ``weights`` must be a ``DataArray`` and cannot contain missing values. Missing values can be replaced by ``weights.fillna(0)``. """ from xarray.core.dataarray import DataArray if not isinstance(weights, DataArray): raise ValueError("`weights` must be a DataArray") def _weight_check(w): # Ref https://github.com/pydata/xarray/pull/4559/files#r515968670 if duck_array_ops.array_any(duck_array_ops.isnull(w)): raise ValueError( "`weights` cannot contain missing values. " "Missing values can be replaced by `weights.fillna(0)`." ) return w if is_duck_dask_array(weights.data): # assign to copy - else the check is not triggered weights = weights.copy( data=weights.data.map_blocks(_weight_check, dtype=weights.dtype), # type: ignore[call-arg, arg-type] deep=False, ) else: _weight_check(weights.data) self.obj: T_Xarray = obj self.weights: T_DataArray = weights def _check_dim(self, dim: Dims): """raise an error if any dimension is missing""" dims: list[Hashable] if isinstance(dim, str) or not isinstance(dim, Iterable): dims = [dim] if dim else [] else: dims = list(dim) all_dims = set(self.obj.dims).union(set(self.weights.dims)) missing_dims = set(dims) - all_dims if missing_dims: raise ValueError( f"Dimensions {tuple(missing_dims)} not found in {self.__class__.__name__} dimensions {tuple(all_dims)}" ) @staticmethod def _reduce( da: T_DataArray, weights: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """reduce using dot; equivalent to (da * weights).sum(dim, skipna) for internal use only """ # need to infer dims as we use `dot` if dim is None: dim = ... # need to mask invalid values in da, as `dot` does not implement skipna if skipna or (skipna is None and da.dtype.kind in "cfO"): da = da.fillna(0.0) # `dot` does not broadcast arrays, so this avoids creating a large # DataArray (if `weights` has additional dimensions) return dot(da, weights, dim=dim) def _sum_of_weights(self, da: T_DataArray, dim: Dims = None) -> T_DataArray: """Calculate the sum of weights, accounting for missing values""" # we need to mask data values that are nan; else the weights are wrong mask = da.notnull() # bool -> int, because ``xr.dot([True, True], [True, True])`` -> True # (and not 2); GH4074 if self.weights.dtype == bool: sum_of_weights = self._reduce( mask, duck_array_ops.astype(self.weights, dtype=int), dim=dim, skipna=False, ) else: sum_of_weights = self._reduce(mask, self.weights, dim=dim, skipna=False) # 0-weights are not valid valid_weights = sum_of_weights != 0.0 return sum_of_weights.where(valid_weights) def _sum_of_squares( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``sum_of_squares`` along some dimension(s).""" demeaned = da - da.weighted(self.weights).mean(dim=dim) # TODO: unsure why mypy complains about these being DataArray return types # rather than T_DataArray? return self._reduce((demeaned**2), self.weights, dim=dim, skipna=skipna) # type: ignore[return-value] def _weighted_sum( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``sum`` along some dimension(s).""" return self._reduce(da, self.weights, dim=dim, skipna=skipna) # type: ignore[return-value] def _weighted_mean( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``mean`` along some dimension(s).""" weighted_sum = self._weighted_sum(da, dim=dim, skipna=skipna) sum_of_weights = self._sum_of_weights(da, dim=dim) return weighted_sum / sum_of_weights def _weighted_var( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``var`` along some dimension(s).""" sum_of_squares = self._sum_of_squares(da, dim=dim, skipna=skipna) sum_of_weights = self._sum_of_weights(da, dim=dim) return sum_of_squares / sum_of_weights def _weighted_std( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``std`` along some dimension(s).""" return cast("T_DataArray", np.sqrt(self._weighted_var(da, dim, skipna))) def _weighted_quantile( self, da: T_DataArray, q: ArrayLike, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Apply a weighted ``quantile`` to a DataArray along some dimension(s).""" def _get_h(n: float, q: np.ndarray, method: QUANTILE_METHODS) -> np.ndarray: """Return the interpolation parameter.""" # Note that options are not yet exposed in the public API. h: np.ndarray if method == "linear": h = (n - 1) * q + 1 elif method == "interpolated_inverted_cdf": h = n * q elif method == "hazen": h = n * q + 0.5 elif method == "weibull": h = (n + 1) * q elif method == "median_unbiased": h = (n + 1 / 3) * q + 1 / 3 elif method == "normal_unbiased": h = (n + 1 / 4) * q + 3 / 8 else: raise ValueError(f"Invalid method: {method}.") return h.clip(1, n) def _weighted_quantile_1d( data: np.ndarray, weights: np.ndarray, q: np.ndarray, skipna: bool, method: QUANTILE_METHODS = "linear", ) -> np.ndarray: # This algorithm has been adapted from: # https://aakinshin.net/posts/weighted-quantiles/#reference-implementation is_nan = np.isnan(data) if skipna: # Remove nans from data and weights not_nan = ~is_nan data = data[not_nan] weights = weights[not_nan] elif is_nan.any(): # Return nan if data contains any nan return np.full(q.size, np.nan) # Filter out data (and weights) associated with zero weights, which also flattens them nonzero_weights = weights != 0 data = data[nonzero_weights] weights = weights[nonzero_weights] n = data.size if n == 0: # Possibly empty after nan or zero weight filtering above return np.full(q.size, np.nan) # Kish's effective sample size nw = weights.sum() ** 2 / (weights**2).sum() # Sort data and weights sorter = np.argsort(data) data = data[sorter] weights = weights[sorter] # Normalize and sum the weights weights = weights / weights.sum() weights_cum = np.append(0, weights.cumsum()) # Vectorize the computation by transposing q with respect to weights q = np.atleast_2d(q).T # Get the interpolation parameter for each q h = _get_h(nw, q, method) # Find the samples contributing to the quantile computation (at *positions* between (h-1)/nw and h/nw) u = np.maximum((h - 1) / nw, np.minimum(h / nw, weights_cum)) # Compute their relative weight v = u * nw - h + 1 w = np.diff(v) # Apply the weights return (data * w).sum(axis=1) if skipna is None and da.dtype.kind in "cfO": skipna = True q = np.atleast_1d(np.asarray(q, dtype=np.float64)) if q.ndim > 1: raise ValueError("q must be a scalar or 1d") if np.any((q < 0) | (q > 1)): raise ValueError("q values must be between 0 and 1") if dim is None: dim = da.dims if utils.is_scalar(dim): dim = [dim] # To satisfy mypy dim = cast(Sequence, dim) # need to align *and* broadcast # - `_weighted_quantile_1d` requires arrays with the same shape # - broadcast does an outer join, which can introduce NaN to weights # - therefore we first need to do align(..., join="inner") # TODO: use broadcast(..., join="inner") once available # see https://github.com/pydata/xarray/issues/6304 da, weights = align(da, self.weights, join="inner") da, weights = broadcast(da, weights) result = apply_ufunc( _weighted_quantile_1d, da, weights, input_core_dims=[dim, dim], output_core_dims=[["quantile"]], output_dtypes=[np.float64], dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}), dask="parallelized", vectorize=True, kwargs={"q": q, "skipna": skipna}, ) result = result.transpose("quantile", ...) result = result.assign_coords(quantile=q).squeeze() return result def _implementation(self, func, dim, **kwargs): raise NotImplementedError("Use `Dataset.weighted` or `DataArray.weighted`") def sum_of_weights( self, dim: Dims = None, *, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._sum_of_weights, dim=dim, keep_attrs=keep_attrs ) def sum_of_squares( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._sum_of_squares, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._weighted_sum, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._weighted_mean, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def var( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._weighted_var, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def std( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._weighted_std, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def quantile( self, q: ArrayLike, *, dim: Dims = None, keep_attrs: bool | None = None, skipna: bool = True, ) -> T_Xarray: return self._implementation( self._weighted_quantile, q=q, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def __repr__(self) -> str: """provide a nice str repr of our Weighted object""" klass = self.__class__.__name__ weight_dims = ", ".join(map(str, self.weights.dims)) return f"{klass} with weights along dimensions: {weight_dims}" class DataArrayWeighted(Weighted["DataArray"]): def _implementation(self, func, dim, **kwargs) -> DataArray: self._check_dim(dim) dataset = self.obj._to_temp_dataset() dataset = dataset.map(func, dim=dim, **kwargs) return self.obj._from_temp_dataset(dataset) class DatasetWeighted(Weighted["Dataset"]): def _implementation(self, func, dim, **kwargs) -> Dataset: self._check_dim(dim) return self.obj.map(func, dim=dim, **kwargs) def _inject_docstring(cls, cls_name): cls.sum_of_weights.__doc__ = _SUM_OF_WEIGHTS_DOCSTRING.format(cls=cls_name) cls.sum.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="sum", on_zero="0" ) cls.mean.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="mean", on_zero="NaN" ) cls.sum_of_squares.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="sum_of_squares", on_zero="0" ) cls.var.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="var", on_zero="NaN" ) cls.std.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="std", on_zero="NaN" ) cls.quantile.__doc__ = _WEIGHTED_QUANTILE_DOCSTRING_TEMPLATE.format(cls=cls_name) _inject_docstring(DataArrayWeighted, "DataArray") _inject_docstring(DatasetWeighted, "Dataset") ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/conventions.py��������������������������������������������������������������0000664�0000000�0000000�00000076255�15114646760�0017607�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import itertools import warnings from collections import defaultdict from collections.abc import Hashable, Iterable, Mapping, MutableMapping from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union, cast import numpy as np from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.coding import strings, variables from xarray.coding.variables import SerializationWarning, pop_to from xarray.core import indexing from xarray.core.common import ( _contains_datetime_like_objects, contains_cftime_datetimes, ) from xarray.core.utils import emit_user_level_warning from xarray.core.variable import IndexVariable, Variable from xarray.namedarray.utils import is_duck_array CF_RELATED_DATA = ( "bounds", "grid_mapping", "climatology", "geometry", "node_coordinates", "node_count", "part_node_count", "interior_ring", "cell_measures", "formula_terms", ) CF_RELATED_DATA_NEEDS_PARSING = ( "grid_mapping", "cell_measures", "formula_terms", ) if TYPE_CHECKING: from xarray.backends.common import AbstractDataStore from xarray.core.dataset import Dataset T_VarTuple = tuple[tuple[Hashable, ...], Any, dict, dict] T_Name = Union[Hashable, None] T_Variables = Mapping[Any, Variable] T_Attrs = MutableMapping[Any, Any] T_DropVariables = Union[str, Iterable[Hashable], None] T_DatasetOrAbstractstore = Union[Dataset, AbstractDataStore] def ensure_not_multiindex(var: Variable, name: T_Name = None) -> None: # only the pandas multi-index dimension coordinate cannot be serialized (tuple values) if isinstance(var._data, indexing.PandasMultiIndexingAdapter): if name is None and isinstance(var, IndexVariable): name = var.name if var.dims == (name,): raise NotImplementedError( f"variable {name!r} is a MultiIndex, which cannot yet be " "serialized. Instead, either use reset_index() " "to convert MultiIndex levels into coordinate variables instead " "or use https://cf-xarray.readthedocs.io/en/latest/coding.html." ) def encode_cf_variable( var: Variable, needs_copy: bool = True, name: T_Name = None ) -> Variable: """ Converts a Variable into a Variable which follows some of the CF conventions: - Nans are masked using _FillValue (or the deprecated missing_value) - Rescaling via: scale_factor and add_offset - datetimes are converted to the CF 'units since time' format - dtype encodings are enforced. Parameters ---------- var : Variable A variable holding un-encoded data. Returns ------- out : Variable A variable which has been encoded as described above. """ ensure_not_multiindex(var, name=name) for coder in [ CFDatetimeCoder(), CFTimedeltaCoder(), variables.CFScaleOffsetCoder(), variables.CFMaskCoder(), variables.NativeEnumCoder(), variables.NonStringCoder(), variables.DefaultFillvalueCoder(), variables.BooleanCoder(), ]: var = coder.encode(var, name=name) for attr_name in CF_RELATED_DATA: pop_to(var.encoding, var.attrs, attr_name) return var def decode_cf_variable( name: Hashable, var: Variable, concat_characters: bool = True, mask_and_scale: bool = True, decode_times: bool | CFDatetimeCoder = True, decode_endianness: bool = True, stack_char_dim: bool = True, use_cftime: bool | None = None, decode_timedelta: bool | CFTimedeltaCoder | None = None, ) -> Variable: """ Decodes a variable which may hold CF encoded information. This includes variables that have been masked and scaled, which hold CF style time variables (this is almost always the case if the dataset has been serialized) and which have strings encoded as character arrays. Parameters ---------- name : str Name of the variable. Used for better error messages. var : Variable A variable holding potentially CF encoded information. concat_characters : bool Should character arrays be concatenated to strings, for example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale : bool Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). If the _Unsigned attribute is present treat integer arrays as unsigned. decode_times : bool or CFDatetimeCoder Decode cf times ("hours since 2000-01-01") to np.datetime64. decode_endianness : bool Decode arrays from non-native to native endianness. stack_char_dim : bool Whether to stack characters into bytes along the last dimension of this array. Passed as an argument because we need to look at the full dataset to figure out if this is appropriate. use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. decode_timedelta : None, bool, or CFTimedeltaCoder Decode cf timedeltas ("hours") to np.timedelta64. Returns ------- out : Variable A variable holding the decoded equivalent of var. """ # Ensure datetime-like Variables are passed through unmodified (GH 6453) if _contains_datetime_like_objects(var): return var original_dtype = var.dtype decode_timedelta_was_none = decode_timedelta is None if decode_timedelta is None: if isinstance(decode_times, CFDatetimeCoder): decode_timedelta = CFTimedeltaCoder(time_unit=decode_times.time_unit) else: decode_timedelta = bool(decode_times) if concat_characters: if stack_char_dim: var = strings.CharacterArrayCoder().decode(var, name=name) var = strings.EncodedStringCoder().decode(var) if original_dtype.kind == "O": var = variables.ObjectVLenStringCoder().decode(var) original_dtype = var.dtype if original_dtype.kind == "T": var = variables.Numpy2StringDTypeCoder().decode(var) if mask_and_scale: for coder in [ variables.CFMaskCoder( decode_times=decode_times, decode_timedelta=decode_timedelta ), variables.CFScaleOffsetCoder( decode_times=decode_times, decode_timedelta=decode_timedelta ), ]: var = coder.decode(var, name=name) if decode_timedelta: if isinstance(decode_timedelta, bool): decode_timedelta = CFTimedeltaCoder( decode_via_units=decode_timedelta, decode_via_dtype=decode_timedelta ) decode_timedelta._emit_decode_timedelta_future_warning = ( decode_timedelta_was_none ) var = decode_timedelta.decode(var, name=name) if decode_times: # remove checks after end of deprecation cycle if not isinstance(decode_times, CFDatetimeCoder): if use_cftime is not None: emit_user_level_warning( "Usage of 'use_cftime' as a kwarg is deprecated. " "Please pass a 'CFDatetimeCoder' instance initialized " "with 'use_cftime' to the 'decode_times' kwarg instead.\n" "Example usage:\n" " time_coder = xr.coders.CFDatetimeCoder(use_cftime=True)\n" " ds = xr.open_dataset(decode_times=time_coder)\n", DeprecationWarning, ) decode_times = CFDatetimeCoder(use_cftime=use_cftime) elif use_cftime is not None: raise TypeError( "Usage of 'use_cftime' as a kwarg is not allowed " "if a 'CFDatetimeCoder' instance is passed to " "'decode_times'. Please set 'use_cftime' " "when initializing 'CFDatetimeCoder' instead.\n" "Example usage:\n" " time_coder = xr.coders.CFDatetimeCoder(use_cftime=True)\n" " ds = xr.open_dataset(decode_times=time_coder)\n", ) var = decode_times.decode(var, name=name) if decode_endianness and not var.dtype.isnative: var = variables.EndianCoder().decode(var) original_dtype = var.dtype var = variables.BooleanCoder().decode(var) dimensions, data, attributes, encoding = variables.unpack_for_decoding(var) encoding.setdefault("dtype", original_dtype) if ( # we don't need to lazily index duck arrays not is_duck_array(data) # These arrays already support lazy indexing # OR for IndexingAdapters, it makes no sense to wrap them and not isinstance(data, indexing.ExplicitlyIndexedNDArrayMixin) ): # this path applies to bare BackendArray objects. # It is not hit for any internal Xarray backend data = indexing.LazilyIndexedArray(data) return Variable(dimensions, data, attributes, encoding=encoding, fastpath=True) def _update_bounds_attributes(variables: T_Variables) -> None: """Adds time attributes to time bounds variables. Variables handling time bounds ("Cell boundaries" in the CF conventions) do not necessarily carry the necessary attributes to be decoded. This copies the attributes from the time variable to the associated boundaries. See Also: http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/ cf-conventions.html#cell-boundaries https://github.com/pydata/xarray/issues/2565 """ # For all time variables with bounds for v in variables.values(): attrs = v.attrs units = attrs.get("units") has_date_units = isinstance(units, str) and "since" in units if has_date_units and "bounds" in attrs and attrs["bounds"] in variables: bounds_attrs = variables[attrs["bounds"]].attrs bounds_attrs.setdefault("units", attrs["units"]) if "calendar" in attrs: bounds_attrs.setdefault("calendar", attrs["calendar"]) def _update_bounds_encoding(variables: T_Variables) -> None: """Adds time encoding to time bounds variables. Variables handling time bounds ("Cell boundaries" in the CF conventions) do not necessarily carry the necessary attributes to be decoded. This copies the encoding from the time variable to the associated bounds variable so that we write CF-compliant files. See Also: http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/ cf-conventions.html#cell-boundaries https://github.com/pydata/xarray/issues/2565 """ # For all time variables with bounds for name, v in variables.items(): attrs = v.attrs encoding = v.encoding has_date_units = "units" in encoding and "since" in encoding["units"] is_datetime_type = np.issubdtype( v.dtype, np.datetime64 ) or contains_cftime_datetimes(v) if ( is_datetime_type and not has_date_units and "bounds" in attrs and attrs["bounds"] in variables ): emit_user_level_warning( f"Variable {name} has datetime type and a " f"bounds variable but {name}.encoding does not have " f"units specified. The units encodings for {name} " f"and {attrs['bounds']} will be determined independently " "and may not be equal, counter to CF-conventions. " "If this is a concern, specify a units encoding for " f"{name} before writing to a file.", ) if has_date_units and "bounds" in attrs and attrs["bounds"] in variables: bounds_encoding = variables[attrs["bounds"]].encoding bounds_encoding.setdefault("units", encoding["units"]) if "calendar" in encoding: bounds_encoding.setdefault("calendar", encoding["calendar"]) T = TypeVar("T") U = TypeVar("U") def _item_or_default(obj: Mapping[Any, T | U] | T, key: Hashable, default: T) -> T | U: """ Return item by key if obj is mapping and key is present, else return default value. """ return obj.get(key, default) if isinstance(obj, Mapping) else obj def decode_cf_variables( variables: T_Variables, attributes: T_Attrs, concat_characters: bool | Mapping[str, bool] = True, mask_and_scale: bool | Mapping[str, bool] = True, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] = True, decode_coords: bool | Literal["coordinates", "all"] = True, drop_variables: T_DropVariables = None, use_cftime: bool | Mapping[str, bool] | None = None, decode_timedelta: bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None = None, ) -> tuple[T_Variables, T_Attrs, set[Hashable]]: """ Decode several CF encoded variables. See: decode_cf_variable """ # Only emit one instance of the decode_timedelta default change # FutureWarning. This can be removed once this change is made. warnings.filterwarnings("once", "decode_timedelta", FutureWarning) dimensions_used_by = defaultdict(list) for v in variables.values(): for d in v.dims: dimensions_used_by[d].append(v) def stackable(dim: Hashable) -> bool: # figure out if a dimension can be concatenated over if dim in variables: return False for v in dimensions_used_by[dim]: if v.dtype.kind != "S" or dim != v.dims[-1]: return False return True coord_names = set() if isinstance(drop_variables, str): drop_variables = [drop_variables] elif drop_variables is None: drop_variables = [] drop_variables = set(drop_variables) # Time bounds coordinates might miss the decoding attributes if decode_times: _update_bounds_attributes(variables) new_vars = {} for k, v in variables.items(): if k in drop_variables: continue stack_char_dim = ( _item_or_default(concat_characters, k, True) and v.dtype == "S1" and v.ndim > 0 and stackable(v.dims[-1]) ) try: new_vars[k] = decode_cf_variable( k, v, concat_characters=_item_or_default(concat_characters, k, True), mask_and_scale=_item_or_default(mask_and_scale, k, True), decode_times=cast( bool | CFDatetimeCoder, _item_or_default(decode_times, k, True) ), stack_char_dim=stack_char_dim, use_cftime=_item_or_default(use_cftime, k, None), decode_timedelta=_item_or_default(decode_timedelta, k, None), ) except Exception as e: e.add_note(f"Raised while decoding variable {k!r} with value {v!r}") raise if decode_coords in [True, "coordinates", "all"]: var_attrs = new_vars[k].attrs if "coordinates" in var_attrs: var_coord_names = [ c for c in var_attrs["coordinates"].split() if c in variables ] # propagate as is new_vars[k].encoding["coordinates"] = var_attrs["coordinates"] del var_attrs["coordinates"] # but only use as coordinate if existing if var_coord_names: coord_names.update(var_coord_names) if decode_coords == "all": for attr_name in CF_RELATED_DATA: if attr_name in var_attrs: # fixes stray colon attr_val = var_attrs[attr_name].replace(" :", ":") var_names = attr_val.split() # if grid_mapping is a single string, do not enter here if ( attr_name in CF_RELATED_DATA_NEEDS_PARSING and len(var_names) > 1 ): # map the keys to list of strings # "A: b c d E: f g" returns # {"A": ["b", "c", "d"], "E": ["f", "g"]} roles_and_names = defaultdict(list) key = None for vname in var_names: if ":" in vname: key = vname.strip(":") else: if key is None: raise ValueError( f"First element {vname!r} of [{attr_val!r}] misses ':', " f"cannot decode {attr_name!r}." ) roles_and_names[key].append(vname) # for grid_mapping keys are var_names if attr_name == "grid_mapping": var_names = list(roles_and_names.keys()) else: # for cell_measures and formula_terms values are var names var_names = list(itertools.chain(*roles_and_names.values())) # consistency check (one element per key) if len(var_names) != len(roles_and_names.keys()): emit_user_level_warning( f"Attribute {attr_name!r} has malformed content [{attr_val!r}], " f"decoding {var_names!r} to coordinates." ) if all(var_name in variables for var_name in var_names): new_vars[k].encoding[attr_name] = attr_val coord_names.update(var_names) else: referenced_vars_not_in_variables = [ proj_name for proj_name in var_names if proj_name not in variables ] emit_user_level_warning( f"Variable(s) referenced in {attr_name} not in variables: {referenced_vars_not_in_variables}", ) del var_attrs[attr_name] if decode_coords and isinstance(attributes.get("coordinates", None), str): attributes = dict(attributes) crds = attributes.pop("coordinates") coord_names.update(crds.split()) return new_vars, attributes, coord_names def decode_cf( obj: T_DatasetOrAbstractstore, concat_characters: bool = True, mask_and_scale: bool = True, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] = True, decode_coords: bool | Literal["coordinates", "all"] = True, drop_variables: T_DropVariables = None, use_cftime: bool | None = None, decode_timedelta: bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None = None, ) -> Dataset: """Decode the given Dataset or Datastore according to CF conventions into a new Dataset. Parameters ---------- obj : Dataset or DataStore Object to decode. concat_characters : bool, optional Should character arrays be concatenated to strings, for example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale : bool, optional Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). decode_times : bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder], optional Decode cf times (e.g., integers since "hours since 2000-01-01") to np.datetime64. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. decode_timedelta : bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder], optional If True or :py:class:`CFTimedeltaCoder`, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same behavior as decode_times. The resolution of the decoded timedeltas can be configured with the ``time_unit`` argument in the :py:class:`CFTimedeltaCoder` passed. Returns ------- decoded : Dataset """ from xarray.backends.common import AbstractDataStore from xarray.core.dataset import Dataset vars: T_Variables attrs: T_Attrs if isinstance(obj, Dataset): vars = obj._variables attrs = obj.attrs extra_coords = set(obj.coords) close = obj._close encoding = obj.encoding elif isinstance(obj, AbstractDataStore): vars, attrs = obj.load() extra_coords = set() close = obj.close encoding = obj.get_encoding() else: raise TypeError("can only decode Dataset or DataStore objects") vars, attrs, coord_names = decode_cf_variables( vars, attrs, concat_characters, mask_and_scale, decode_times, decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) ds = Dataset(vars, attrs=attrs) ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars)) ds.set_close(close) ds.encoding = encoding return ds def cf_decoder( variables: T_Variables, attributes: T_Attrs, concat_characters: bool = True, mask_and_scale: bool = True, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] = True, ) -> tuple[T_Variables, T_Attrs]: """ Decode a set of CF encoded variables and attributes. Parameters ---------- variables : dict A dictionary mapping from variable name to xarray.Variable attributes : dict A dictionary mapping from attribute name to value concat_characters : bool Should character arrays be concatenated to strings, for example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale : bool Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). decode_times : bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] Decode cf times ("hours since 2000-01-01") to np.datetime64. Returns ------- decoded_variables : dict A dictionary mapping from variable name to xarray.Variable objects. decoded_attributes : dict A dictionary mapping from attribute name to values. See Also -------- decode_cf_variable """ variables, attributes, _ = decode_cf_variables( variables, attributes, concat_characters, mask_and_scale, decode_times, ) return variables, attributes def _encode_coordinates( variables: T_Variables, attributes: T_Attrs, non_dim_coord_names ): # calculate global and variable specific coordinates non_dim_coord_names = set(non_dim_coord_names) for name in list(non_dim_coord_names): if isinstance(name, str) and " " in name: emit_user_level_warning( f"coordinate {name!r} has a space in its name, which means it " "cannot be marked as a coordinate on disk and will be " "saved as a data variable instead", category=SerializationWarning, ) non_dim_coord_names.discard(name) global_coordinates = non_dim_coord_names.copy() variable_coordinates = defaultdict(set) not_technically_coordinates = set() for coord_name in non_dim_coord_names: target_dims = variables[coord_name].dims for k, v in variables.items(): if ( k not in non_dim_coord_names and k not in v.dims and set(target_dims) <= set(v.dims) ): variable_coordinates[k].add(coord_name) if any( coord_name in v.encoding.get(attr_name, tuple()) for attr_name in CF_RELATED_DATA ): not_technically_coordinates.add(coord_name) global_coordinates.discard(coord_name) variables = {k: v.copy(deep=False) for k, v in variables.items()} # keep track of variable names written to file under the "coordinates" attributes written_coords = set() for name, var in variables.items(): encoding = var.encoding attrs = var.attrs if "coordinates" in attrs and "coordinates" in encoding: raise ValueError( f"'coordinates' found in both attrs and encoding for variable {name!r}." ) # if coordinates set to None, don't write coordinates attribute if ("coordinates" in attrs and attrs.get("coordinates") is None) or ( "coordinates" in encoding and encoding.get("coordinates") is None ): # make sure "coordinates" is removed from attrs/encoding attrs.pop("coordinates", None) encoding.pop("coordinates", None) continue # this will copy coordinates from encoding to attrs if "coordinates" in attrs # after the next line, "coordinates" is never in encoding # we get support for attrs["coordinates"] for free. coords_str = pop_to(encoding, attrs, "coordinates") or attrs.get("coordinates") if not coords_str and variable_coordinates[name]: coordinates_text = " ".join( str(coord_name) for coord_name in sorted(variable_coordinates[name]) if coord_name not in not_technically_coordinates ) if coordinates_text: attrs["coordinates"] = coordinates_text if "coordinates" in attrs: written_coords.update(attrs["coordinates"].split()) # These coordinates are not associated with any particular variables, so we # save them under a global 'coordinates' attribute so xarray can roundtrip # the dataset faithfully. Because this serialization goes beyond CF # conventions, only do it if necessary. # Reference discussion: # https://cfconventions.org/mailing-list-archive/Data/7400.html global_coordinates.difference_update(written_coords) if global_coordinates: attributes = dict(attributes) if "coordinates" in attributes: emit_user_level_warning( f"cannot serialize global coordinates {global_coordinates!r} because the global " f"attribute 'coordinates' already exists. This may prevent faithful roundtripping" f"of xarray datasets", category=SerializationWarning, ) else: attributes["coordinates"] = " ".join(sorted(map(str, global_coordinates))) return variables, attributes def encode_dataset_coordinates(dataset: Dataset): """Encode coordinates on the given dataset object into variable specific and global attributes. When possible, this is done according to CF conventions. Parameters ---------- dataset : Dataset Object to encode. Returns ------- variables : dict attrs : dict """ non_dim_coord_names = set(dataset.coords) - set(dataset.dims) return _encode_coordinates( dataset._variables, dataset.attrs, non_dim_coord_names=non_dim_coord_names ) def cf_encoder(variables: T_Variables, attributes: T_Attrs): """ Encode a set of CF encoded variables and attributes. Takes a dicts of variables and attributes and encodes them to conform to CF conventions as much as possible. This includes masking, scaling, character array handling, and CF-time encoding. Parameters ---------- variables : dict A dictionary mapping from variable name to xarray.Variable attributes : dict A dictionary mapping from attribute name to value Returns ------- encoded_variables : dict A dictionary mapping from variable name to xarray.Variable, encoded_attributes : dict A dictionary mapping from attribute name to value See Also -------- decode_cf_variable, encode_cf_variable """ # add encoding for time bounds variables if present. _update_bounds_encoding(variables) new_vars = {} for k, v in variables.items(): try: new_vars[k] = encode_cf_variable(v, name=k) except Exception as e: e.add_note(f"Raised while encoding variable {k!r} with value {v!r}") raise # Remove attrs from bounds variables (issue #2921) for var in new_vars.values(): bounds = var.attrs.get("bounds") if bounds and bounds in new_vars: # see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries for attr in [ "units", "standard_name", "axis", "positive", "calendar", "long_name", "leap_month", "leap_year", "month_lengths", ]: if ( attr in new_vars[bounds].attrs and attr in var.attrs and new_vars[bounds].attrs[attr] == var.attrs[attr] ): new_vars[bounds].attrs.pop(attr) return new_vars, attributes ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/convert.py������������������������������������������������������������������0000664�0000000�0000000�00000014766�15114646760�0016721�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Functions for converting to and from xarray objects""" from collections import Counter import numpy as np from xarray.coders import CFDatetimeCoder from xarray.coding.times import CFTimedeltaCoder from xarray.conventions import decode_cf from xarray.core import duck_array_ops from xarray.core.dataarray import DataArray from xarray.core.dtypes import get_fill_value from xarray.namedarray.pycompat import array_type iris_forbidden_keys = { "standard_name", "long_name", "units", "bounds", "axis", "calendar", "leap_month", "leap_year", "month_lengths", "coordinates", "grid_mapping", "climatology", "cell_methods", "formula_terms", "compress", "missing_value", "add_offset", "scale_factor", "valid_max", "valid_min", "valid_range", "_FillValue", } cell_methods_strings = { "point", "sum", "maximum", "median", "mid_range", "minimum", "mean", "mode", "standard_deviation", "variance", } def encode(var): return CFTimedeltaCoder().encode(CFDatetimeCoder().encode(var.variable)) def _filter_attrs(attrs, ignored_attrs): """Return attrs that are not in ignored_attrs""" return {k: v for k, v in attrs.items() if k not in ignored_attrs} def _pick_attrs(attrs, keys): """Return attrs with keys in keys list""" return {k: v for k, v in attrs.items() if k in keys} def _get_iris_args(attrs): """Converts the xarray attrs into args that can be passed into Iris""" # iris.unit is deprecated in Iris v1.9 import cf_units args = {"attributes": _filter_attrs(attrs, iris_forbidden_keys)} args.update(_pick_attrs(attrs, ("standard_name", "long_name"))) unit_args = _pick_attrs(attrs, ("calendar",)) if "units" in attrs: args["units"] = cf_units.Unit(attrs["units"], **unit_args) return args # TODO: Add converting bounds from xarray to Iris and back def to_iris(dataarray): """Convert a DataArray into an Iris Cube""" # Iris not a hard dependency import iris from iris.fileformats.netcdf import parse_cell_methods dim_coords = [] aux_coords = [] for coord_name in dataarray.coords: coord = encode(dataarray.coords[coord_name]) coord_args = _get_iris_args(coord.attrs) coord_args["var_name"] = coord_name axis = None if coord.dims: axis = dataarray.get_axis_num(coord.dims) if coord_name in dataarray.dims: try: iris_coord = iris.coords.DimCoord(coord.values, **coord_args) dim_coords.append((iris_coord, axis)) except ValueError: iris_coord = iris.coords.AuxCoord(coord.values, **coord_args) aux_coords.append((iris_coord, axis)) else: iris_coord = iris.coords.AuxCoord(coord.values, **coord_args) aux_coords.append((iris_coord, axis)) args = _get_iris_args(dataarray.attrs) args["var_name"] = dataarray.name args["dim_coords_and_dims"] = dim_coords args["aux_coords_and_dims"] = aux_coords if "cell_methods" in dataarray.attrs: args["cell_methods"] = parse_cell_methods(dataarray.attrs["cell_methods"]) masked_data = duck_array_ops.masked_invalid(dataarray.data) cube = iris.cube.Cube(masked_data, **args) return cube def _iris_obj_to_attrs(obj): """Return a dictionary of attrs when given an Iris object""" attrs = {"standard_name": obj.standard_name, "long_name": obj.long_name} if obj.units.calendar: attrs["calendar"] = obj.units.calendar if obj.units.origin != "1" and not obj.units.is_unknown(): attrs["units"] = obj.units.origin attrs.update(obj.attributes) return {k: v for k, v in attrs.items() if v is not None} def _iris_cell_methods_to_str(cell_methods_obj): """Converts an Iris cell methods into a string""" cell_methods = [] for cell_method in cell_methods_obj: names = "".join(f"{n}: " for n in cell_method.coord_names) intervals = " ".join( f"interval: {interval}" for interval in cell_method.intervals ) comments = " ".join(f"comment: {comment}" for comment in cell_method.comments) extra = f"{intervals} {comments}".strip() if extra: extra = f" ({extra})" cell_methods.append(names + cell_method.method + extra) return " ".join(cell_methods) def _name(iris_obj, default="unknown"): """Mimics `iris_obj.name()` but with different name resolution order. Similar to iris_obj.name() method, but using iris_obj.var_name first to enable roundtripping. """ return iris_obj.var_name or iris_obj.standard_name or iris_obj.long_name or default def from_iris(cube): """Convert an Iris cube into a DataArray""" import iris.exceptions name = _name(cube) if name == "unknown": name = None dims = [] for i in range(cube.ndim): try: dim_coord = cube.coord(dim_coords=True, dimensions=(i,)) dims.append(_name(dim_coord)) except iris.exceptions.CoordinateNotFoundError: dims.append(f"dim_{i}") if len(set(dims)) != len(dims): duplicates = [k for k, v in Counter(dims).items() if v > 1] raise ValueError(f"Duplicate coordinate name {duplicates}.") coords = {} for coord in cube.coords(): coord_attrs = _iris_obj_to_attrs(coord) coord_dims = [dims[i] for i in cube.coord_dims(coord)] if coord_dims: coords[_name(coord)] = (coord_dims, coord.points, coord_attrs) else: coords[_name(coord)] = ((), coord.points.item(), coord_attrs) array_attrs = _iris_obj_to_attrs(cube) cell_methods = _iris_cell_methods_to_str(cube.cell_methods) if cell_methods: array_attrs["cell_methods"] = cell_methods # Deal with iris 1.* and 2.* cube_data = cube.core_data() if hasattr(cube, "core_data") else cube.data # Deal with dask and numpy masked arrays dask_array_type = array_type("dask") if isinstance(cube_data, dask_array_type): from dask.array import ma as dask_ma filled_data = dask_ma.filled(cube_data, get_fill_value(cube.dtype)) elif isinstance(cube_data, np.ma.MaskedArray): filled_data = np.ma.filled(cube_data, get_fill_value(cube.dtype)) else: filled_data = cube_data dataarray = DataArray( filled_data, coords=coords, name=name, attrs=array_attrs, dims=dims ) decoded_ds = decode_cf(dataarray._to_temp_dataset()) return dataarray._from_temp_dataset(decoded_ds) ����������xarray-2025.12.0/xarray/core/�����������������������������������������������������������������������0000775�0000000�0000000�00000000000�15114646760�0015601�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/core/__init__.py������������������������������������������������������������0000664�0000000�0000000�00000000000�15114646760�0017700�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/core/_aggregations.py�������������������������������������������������������0000664�0000000�0000000�00001223324�15114646760�0020773�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Mixin classes with reduction operations.""" # This file was generated using xarray.util.generate_aggregations. Do not edit manually. from __future__ import annotations from collections.abc import Callable, Sequence from typing import TYPE_CHECKING, Any from xarray.core import duck_array_ops from xarray.core.options import OPTIONS from xarray.core.types import Dims, Self from xarray.core.utils import contains_only_chunked_or_numpy, module_available if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset flox_available = module_available("flox") class DataTreeAggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count DataArray.count :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.count() Group: / Dimensions: () Data variables: foo int64 8B 5 """ return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all DataArray.all :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict( ... foo=( ... "time", ... np.array([True, True, True, True, True, False], dtype=bool), ... ) ... ), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.all() Group: / Dimensions: () Data variables: foo bool 1B False """ return self.reduce( duck_array_ops.array_all, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any DataArray.any :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict( ... foo=( ... "time", ... np.array([True, True, True, True, True, False], dtype=bool), ... ) ... ), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.any() Group: / Dimensions: () Data variables: foo bool 1B True """ return self.reduce( duck_array_ops.array_any, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max DataArray.max :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.max() Group: / Dimensions: () Data variables: foo float64 8B 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.max(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min DataArray.min :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.min() Group: / Dimensions: () Data variables: foo float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.min(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean DataArray.mean :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.mean() Group: / Dimensions: () Data variables: foo float64 8B 1.6 Use ``skipna`` to control whether NaNs are ignored. >>> dt.mean(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod DataArray.prod :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.prod() Group: / Dimensions: () Data variables: foo float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.prod(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> dt.prod(skipna=True, min_count=2) Group: / Dimensions: () Data variables: foo float64 8B 0.0 """ return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum DataArray.sum :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.sum() Group: / Dimensions: () Data variables: foo float64 8B 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.sum(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> dt.sum(skipna=True, min_count=2) Group: / Dimensions: () Data variables: foo float64 8B 8.0 """ return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std DataArray.std :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.std() Group: / Dimensions: () Data variables: foo float64 8B 1.02 Use ``skipna`` to control whether NaNs are ignored. >>> dt.std(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> dt.std(skipna=True, ddof=1) Group: / Dimensions: () Data variables: foo float64 8B 1.14 """ return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var DataArray.var :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.var() Group: / Dimensions: () Data variables: foo float64 8B 1.04 Use ``skipna`` to control whether NaNs are ignored. >>> dt.var(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> dt.var(skipna=True, ddof=1) Group: / Dimensions: () Data variables: foo float64 8B 1.3 """ return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median DataArray.median :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.median() Group: / Dimensions: () Data variables: foo float64 8B 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.median(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum DataArray.cumsum DataTree.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.cumsum() Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 3.0 6.0 6.0 8.0 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.cumsum(skipna=False) Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 3.0 6.0 6.0 8.0 nan """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod DataArray.cumprod DataTree.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.cumprod() Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 2.0 6.0 0.0 0.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.cumprod(skipna=False) Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 2.0 6.0 0.0 0.0 nan """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) class DatasetAggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count DataArray.count :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.count() Size: 8B Dimensions: () Data variables: da int64 8B 5 """ return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all DataArray.all :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.all() Size: 1B Dimensions: () Data variables: da bool 1B False """ return self.reduce( duck_array_ops.array_all, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any DataArray.any :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.any() Size: 1B Dimensions: () Data variables: da bool 1B True """ return self.reduce( duck_array_ops.array_any, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max DataArray.max :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.max() Size: 8B Dimensions: () Data variables: da float64 8B 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.max(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan """ return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min DataArray.min :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.min() Size: 8B Dimensions: () Data variables: da float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.min(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan """ return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean DataArray.mean :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.mean() Size: 8B Dimensions: () Data variables: da float64 8B 1.6 Use ``skipna`` to control whether NaNs are ignored. >>> ds.mean(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan """ return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod DataArray.prod :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.prod() Size: 8B Dimensions: () Data variables: da float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.prod(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.prod(skipna=True, min_count=2) Size: 8B Dimensions: () Data variables: da float64 8B 0.0 """ return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum DataArray.sum :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.sum() Size: 8B Dimensions: () Data variables: da float64 8B 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.sum(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.sum(skipna=True, min_count=2) Size: 8B Dimensions: () Data variables: da float64 8B 8.0 """ return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std DataArray.std :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.std() Size: 8B Dimensions: () Data variables: da float64 8B 1.02 Use ``skipna`` to control whether NaNs are ignored. >>> ds.std(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.std(skipna=True, ddof=1) Size: 8B Dimensions: () Data variables: da float64 8B 1.14 """ return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var DataArray.var :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.var() Size: 8B Dimensions: () Data variables: da float64 8B 1.04 Use ``skipna`` to control whether NaNs are ignored. >>> ds.var(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.var(skipna=True, ddof=1) Size: 8B Dimensions: () Data variables: da float64 8B 1.3 """ return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median DataArray.median :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.median() Size: 8B Dimensions: () Data variables: da float64 8B 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.median(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum DataArray.cumsum Dataset.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.cumsum() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 3.0 6.0 6.0 8.0 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumsum(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 3.0 6.0 6.0 8.0 nan """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod DataArray.cumprod Dataset.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.cumprod() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 6.0 0.0 0.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumprod(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 6.0 0.0 0.0 nan """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) class DataArrayAggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.count() Size: 8B array(5) """ return self.reduce( duck_array_ops.count, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.all() Size: 1B array(False) """ return self.reduce( duck_array_ops.array_all, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.any() Size: 1B array(True) """ return self.reduce( duck_array_ops.array_any, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.max() Size: 8B array(3.) Use ``skipna`` to control whether NaNs are ignored. >>> da.max(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.min() Size: 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.min(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.mean() Size: 8B array(1.6) Use ``skipna`` to control whether NaNs are ignored. >>> da.mean(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.prod() Size: 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.prod(skipna=False) Size: 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.prod(skipna=True, min_count=2) Size: 8B array(0.) """ return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.sum() Size: 8B array(8.) Use ``skipna`` to control whether NaNs are ignored. >>> da.sum(skipna=False) Size: 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.sum(skipna=True, min_count=2) Size: 8B array(8.) """ return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.std() Size: 8B array(1.0198039) Use ``skipna`` to control whether NaNs are ignored. >>> da.std(skipna=False) Size: 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.std(skipna=True, ddof=1) Size: 8B array(1.14017543) """ return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.var() Size: 8B array(1.04) Use ``skipna`` to control whether NaNs are ignored. >>> da.var(skipna=False) Size: 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.var(skipna=True, ddof=1) Size: 8B array(1.3) """ return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.median() Size: 8B array(2.) Use ``skipna`` to control whether NaNs are ignored. >>> da.median(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum DataArray.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumsum() Size: 48B array([1., 3., 6., 6., 8., 8.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumsum(skipna=False) Size: 48B array([ 1., 3., 6., 6., 8., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) Self: """ Reduce this DataArray's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod DataArray.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumprod() Size: 48B array([1., 2., 6., 0., 0., 0.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumprod(skipna=False) Size: 48B array([ 1., 2., 6., 0., 0., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) Dataset: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> Dataset: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").count() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) int64 24B 1 2 2 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="count", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").all() Size: 27B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) bool 3B False True True """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="all", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_all, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").any() Size: 27B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) bool 3B True True True """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="any", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_any, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").max() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 2.0 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").max(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 2.0 3.0 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="max", dim=dim, skipna=skipna, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").min() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 2.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").min(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 2.0 0.0 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="min", dim=dim, skipna=skipna, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").mean() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").mean(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 2.0 1.5 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="mean", dim=dim, skipna=skipna, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").prod() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 4.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").prod(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 4.0 0.0 Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").prod(skipna=True, min_count=2) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 4.0 0.0 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="prod", dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").sum() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 4.0 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").sum(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 4.0 3.0 Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").sum(skipna=True, min_count=2) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 4.0 3.0 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="sum", dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").std() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 0.0 0.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").std(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 0.0 1.5 Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").std(skipna=True, ddof=1) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 0.0 2.121 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="std", dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").var() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 0.0 0.0 2.25 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").var(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 0.0 2.25 Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").var(skipna=True, ddof=1) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 0.0 4.5 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="var", dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").median() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").median(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 2.0 1.5 """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum Dataset.cumulative :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").cumsum() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 3.0 3.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumsum(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 3.0 3.0 4.0 nan """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod Dataset.cumulative :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").cumprod() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 3.0 0.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumprod(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 3.0 0.0 4.0 nan """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) class DatasetResampleAggregations: _obj: Dataset def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Dataset: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> Dataset: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").count() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) int64 24B 1 3 1 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="count", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").all() Size: 27B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) bool 3B True True False """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="all", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_all, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").any() Size: 27B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) bool 3B True True True """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="any", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_any, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").max() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 3.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").max(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 3.0 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="max", dim=dim, skipna=skipna, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").min() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").min(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 0.0 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="min", dim=dim, skipna=skipna, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").mean() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 1.667 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").mean(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 1.667 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="mean", dim=dim, skipna=skipna, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").prod() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").prod(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 0.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3ME").prod(skipna=True, min_count=2) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B nan 0.0 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="prod", dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").sum() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 5.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").sum(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 5.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3ME").sum(skipna=True, min_count=2) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B nan 5.0 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="sum", dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").std() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 0.0 1.247 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").std(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 0.0 1.247 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3ME").std(skipna=True, ddof=1) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B nan 1.528 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="std", dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").var() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 0.0 1.556 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").var(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 0.0 1.556 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3ME").var(skipna=True, ddof=1) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B nan 2.333 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="var", dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").median() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").median(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 2.0 nan """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum Dataset.cumulative :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").cumsum() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 5.0 5.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").cumsum(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 5.0 5.0 2.0 nan """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod Dataset.cumulative :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").cumprod() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 6.0 0.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").cumprod(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 6.0 0.0 2.0 nan """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) class DataArrayGroupByAggregations: _obj: DataArray def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> DataArray: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> DataArray: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count DataArray.count :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").count() Size: 24B array([1, 2, 2]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="count", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.count, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all DataArray.all :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").all() Size: 3B array([False, True, True]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="all", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_all, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any DataArray.any :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").any() Size: 3B array([ True, True, True]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="any", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_any, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max DataArray.max :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").max() Size: 24B array([1., 2., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").max(skipna=False) Size: 24B array([nan, 2., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="max", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min DataArray.min :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").min() Size: 24B array([1., 2., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").min(skipna=False) Size: 24B array([nan, 2., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="min", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean DataArray.mean :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").mean() Size: 24B array([1. , 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").mean(skipna=False) Size: 24B array([nan, 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="mean", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod DataArray.prod :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").prod() Size: 24B array([1., 4., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").prod(skipna=False) Size: 24B array([nan, 4., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").prod(skipna=True, min_count=2) Size: 24B array([nan, 4., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="prod", dim=dim, skipna=skipna, min_count=min_count, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum DataArray.sum :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").sum() Size: 24B array([1., 4., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").sum(skipna=False) Size: 24B array([nan, 4., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").sum(skipna=True, min_count=2) Size: 24B array([nan, 4., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="sum", dim=dim, skipna=skipna, min_count=min_count, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std DataArray.std :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").std() Size: 24B array([0. , 0. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").std(skipna=False) Size: 24B array([nan, 0. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").std(skipna=True, ddof=1) Size: 24B array([ nan, 0. , 2.12132034]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="std", dim=dim, skipna=skipna, ddof=ddof, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var DataArray.var :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").var() Size: 24B array([0. , 0. , 2.25]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").var(skipna=False) Size: 24B array([ nan, 0. , 2.25]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").var(skipna=True, ddof=1) Size: 24B array([nan, 0. , 4.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="var", dim=dim, skipna=skipna, ddof=ddof, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median DataArray.median :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").median() Size: 24B array([1. , 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").median(skipna=False) Size: 24B array([nan, 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum DataArray.cumsum DataArray.cumulative :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumsum() Size: 48B array([1., 2., 3., 3., 4., 1.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumsum(skipna=False) Size: 48B array([ 1., 2., 3., 3., 4., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) DataArray: """ Reduce this DataArray's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod DataArray.cumprod DataArray.cumulative :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumprod() Size: 48B array([1., 2., 3., 0., 4., 1.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumprod(skipna=False) Size: 48B array([ 1., 2., 3., 0., 4., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) DataArray: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> DataArray: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count DataArray.count :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").count() Size: 24B array([1, 3, 1]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="count", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.count, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all DataArray.all :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").all() Size: 3B array([ True, True, False]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="all", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_all, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any DataArray.any :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").any() Size: 3B array([ True, True, True]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="any", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_any, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max DataArray.max :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").max() Size: 24B array([1., 3., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").max(skipna=False) Size: 24B array([ 1., 3., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="max", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min DataArray.min :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").min() Size: 24B array([1., 0., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").min(skipna=False) Size: 24B array([ 1., 0., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="min", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean DataArray.mean :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").mean() Size: 24B array([1. , 1.66666667, 2. ]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").mean(skipna=False) Size: 24B array([1. , 1.66666667, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="mean", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod DataArray.prod :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").prod() Size: 24B array([1., 0., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").prod(skipna=False) Size: 24B array([ 1., 0., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3ME").prod(skipna=True, min_count=2) Size: 24B array([nan, 0., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="prod", dim=dim, skipna=skipna, min_count=min_count, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum DataArray.sum :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").sum() Size: 24B array([1., 5., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").sum(skipna=False) Size: 24B array([ 1., 5., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3ME").sum(skipna=True, min_count=2) Size: 24B array([nan, 5., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="sum", dim=dim, skipna=skipna, min_count=min_count, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std DataArray.std :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").std() Size: 24B array([0. , 1.24721913, 0. ]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").std(skipna=False) Size: 24B array([0. , 1.24721913, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3ME").std(skipna=True, ddof=1) Size: 24B array([ nan, 1.52752523, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="std", dim=dim, skipna=skipna, ddof=ddof, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var DataArray.var :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").var() Size: 24B array([0. , 1.55555556, 0. ]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").var(skipna=False) Size: 24B array([0. , 1.55555556, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3ME").var(skipna=True, ddof=1) Size: 24B array([ nan, 2.33333333, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="var", dim=dim, skipna=skipna, ddof=ddof, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median DataArray.median :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").median() Size: 24B array([1., 2., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").median(skipna=False) Size: 24B array([ 1., 2., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum DataArray.cumsum DataArray.cumulative :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").cumsum() Size: 48B array([1., 2., 5., 5., 2., 2.]) Coordinates: labels (time) >> da.resample(time="3ME").cumsum(skipna=False) Size: 48B array([ 1., 2., 5., 5., 2., nan]) Coordinates: labels (time) DataArray: """ Reduce this DataArray's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod DataArray.cumprod DataArray.cumulative :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").cumprod() Size: 48B array([1., 2., 6., 0., 2., 2.]) Coordinates: labels (time) >> da.resample(time="3ME").cumprod(skipna=False) Size: 48B array([ 1., 2., 6., 0., 2., nan]) Coordinates: labels (time) Self: raise NotImplementedError def __add__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.add) def __sub__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.sub) def __mul__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.mul) def __pow__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.pow) def __truediv__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.truediv) def __floordiv__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.floordiv) def __mod__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.mod) def __and__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.and_) def __xor__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.xor) def __or__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.or_) def __lshift__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.lshift) def __rshift__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.rshift) def __lt__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.lt) def __le__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.le) def __gt__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.gt) def __ge__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.ge) def __eq__(self, other: DtCompatible) -> Self: # type:ignore[override] return self._binary_op(other, nputils.array_eq) def __ne__(self, other: DtCompatible) -> Self: # type:ignore[override] return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError def __neg__(self) -> Self: return self._unary_op(operator.neg) def __pos__(self) -> Self: return self._unary_op(operator.pos) def __abs__(self) -> Self: return self._unary_op(operator.abs) def __invert__(self) -> Self: return self._unary_op(operator.invert) def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ __neg__.__doc__ = operator.neg.__doc__ __pos__.__doc__ = operator.pos.__doc__ __abs__.__doc__ = operator.abs.__doc__ __invert__.__doc__ = operator.invert.__doc__ round.__doc__ = ops.round_.__doc__ argsort.__doc__ = ops.argsort.__doc__ conj.__doc__ = ops.conj.__doc__ conjugate.__doc__ = ops.conjugate.__doc__ class DatasetOpsMixin: __slots__ = () def _binary_op( self, other: DsCompatible, f: Callable, reflexive: bool = False ) -> Self: raise NotImplementedError @overload def __add__(self, other: DataTree) -> DataTree: ... @overload def __add__(self, other: DsCompatible) -> Self: ... def __add__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.add) @overload def __sub__(self, other: DataTree) -> DataTree: ... @overload def __sub__(self, other: DsCompatible) -> Self: ... def __sub__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.sub) @overload def __mul__(self, other: DataTree) -> DataTree: ... @overload def __mul__(self, other: DsCompatible) -> Self: ... def __mul__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.mul) @overload def __pow__(self, other: DataTree) -> DataTree: ... @overload def __pow__(self, other: DsCompatible) -> Self: ... def __pow__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.pow) @overload def __truediv__(self, other: DataTree) -> DataTree: ... @overload def __truediv__(self, other: DsCompatible) -> Self: ... def __truediv__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.truediv) @overload def __floordiv__(self, other: DataTree) -> DataTree: ... @overload def __floordiv__(self, other: DsCompatible) -> Self: ... def __floordiv__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.floordiv) @overload def __mod__(self, other: DataTree) -> DataTree: ... @overload def __mod__(self, other: DsCompatible) -> Self: ... def __mod__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.mod) @overload def __and__(self, other: DataTree) -> DataTree: ... @overload def __and__(self, other: DsCompatible) -> Self: ... def __and__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.and_) @overload def __xor__(self, other: DataTree) -> DataTree: ... @overload def __xor__(self, other: DsCompatible) -> Self: ... def __xor__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.xor) @overload def __or__(self, other: DataTree) -> DataTree: ... @overload def __or__(self, other: DsCompatible) -> Self: ... def __or__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.or_) @overload def __lshift__(self, other: DataTree) -> DataTree: ... @overload def __lshift__(self, other: DsCompatible) -> Self: ... def __lshift__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.lshift) @overload def __rshift__(self, other: DataTree) -> DataTree: ... @overload def __rshift__(self, other: DsCompatible) -> Self: ... def __rshift__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.rshift) @overload def __lt__(self, other: DataTree) -> DataTree: ... @overload def __lt__(self, other: DsCompatible) -> Self: ... def __lt__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.lt) @overload def __le__(self, other: DataTree) -> DataTree: ... @overload def __le__(self, other: DsCompatible) -> Self: ... def __le__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.le) @overload def __gt__(self, other: DataTree) -> DataTree: ... @overload def __gt__(self, other: DsCompatible) -> Self: ... def __gt__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.gt) @overload def __ge__(self, other: DataTree) -> DataTree: ... @overload def __ge__(self, other: DsCompatible) -> Self: ... def __ge__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.ge) @overload # type:ignore[override] def __eq__(self, other: DataTree) -> DataTree: ... @overload def __eq__(self, other: DsCompatible) -> Self: ... def __eq__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, nputils.array_eq) @overload # type:ignore[override] def __ne__(self, other: DataTree) -> DataTree: ... @overload def __ne__(self, other: DsCompatible) -> Self: ... def __ne__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) def _inplace_binary_op(self, other: DsCompatible, f: Callable) -> Self: raise NotImplementedError def __iadd__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iadd) def __isub__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.isub) def __imul__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imul) def __ipow__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ipow) def __itruediv__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.itruediv) def __ifloordiv__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ifloordiv) def __imod__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imod) def __iand__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iand) def __ixor__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ixor) def __ior__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ior) def __ilshift__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ilshift) def __irshift__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.irshift) def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError def __neg__(self) -> Self: return self._unary_op(operator.neg) def __pos__(self) -> Self: return self._unary_op(operator.pos) def __abs__(self) -> Self: return self._unary_op(operator.abs) def __invert__(self) -> Self: return self._unary_op(operator.invert) def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ __iadd__.__doc__ = operator.iadd.__doc__ __isub__.__doc__ = operator.isub.__doc__ __imul__.__doc__ = operator.imul.__doc__ __ipow__.__doc__ = operator.ipow.__doc__ __itruediv__.__doc__ = operator.itruediv.__doc__ __ifloordiv__.__doc__ = operator.ifloordiv.__doc__ __imod__.__doc__ = operator.imod.__doc__ __iand__.__doc__ = operator.iand.__doc__ __ixor__.__doc__ = operator.ixor.__doc__ __ior__.__doc__ = operator.ior.__doc__ __ilshift__.__doc__ = operator.ilshift.__doc__ __irshift__.__doc__ = operator.irshift.__doc__ __neg__.__doc__ = operator.neg.__doc__ __pos__.__doc__ = operator.pos.__doc__ __abs__.__doc__ = operator.abs.__doc__ __invert__.__doc__ = operator.invert.__doc__ round.__doc__ = ops.round_.__doc__ argsort.__doc__ = ops.argsort.__doc__ conj.__doc__ = ops.conj.__doc__ conjugate.__doc__ = ops.conjugate.__doc__ class DataArrayOpsMixin: __slots__ = () def _binary_op( self, other: DaCompatible, f: Callable, reflexive: bool = False ) -> Self: raise NotImplementedError @overload def __add__(self, other: Dataset) -> Dataset: ... @overload def __add__(self, other: DataTree) -> DataTree: ... @overload def __add__(self, other: DaCompatible) -> Self: ... def __add__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.add) @overload def __sub__(self, other: Dataset) -> Dataset: ... @overload def __sub__(self, other: DataTree) -> DataTree: ... @overload def __sub__(self, other: DaCompatible) -> Self: ... def __sub__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.sub) @overload def __mul__(self, other: Dataset) -> Dataset: ... @overload def __mul__(self, other: DataTree) -> DataTree: ... @overload def __mul__(self, other: DaCompatible) -> Self: ... def __mul__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.mul) @overload def __pow__(self, other: Dataset) -> Dataset: ... @overload def __pow__(self, other: DataTree) -> DataTree: ... @overload def __pow__(self, other: DaCompatible) -> Self: ... def __pow__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.pow) @overload def __truediv__(self, other: Dataset) -> Dataset: ... @overload def __truediv__(self, other: DataTree) -> DataTree: ... @overload def __truediv__(self, other: DaCompatible) -> Self: ... def __truediv__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.truediv) @overload def __floordiv__(self, other: Dataset) -> Dataset: ... @overload def __floordiv__(self, other: DataTree) -> DataTree: ... @overload def __floordiv__(self, other: DaCompatible) -> Self: ... def __floordiv__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.floordiv) @overload def __mod__(self, other: Dataset) -> Dataset: ... @overload def __mod__(self, other: DataTree) -> DataTree: ... @overload def __mod__(self, other: DaCompatible) -> Self: ... def __mod__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.mod) @overload def __and__(self, other: Dataset) -> Dataset: ... @overload def __and__(self, other: DataTree) -> DataTree: ... @overload def __and__(self, other: DaCompatible) -> Self: ... def __and__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.and_) @overload def __xor__(self, other: Dataset) -> Dataset: ... @overload def __xor__(self, other: DataTree) -> DataTree: ... @overload def __xor__(self, other: DaCompatible) -> Self: ... def __xor__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.xor) @overload def __or__(self, other: Dataset) -> Dataset: ... @overload def __or__(self, other: DataTree) -> DataTree: ... @overload def __or__(self, other: DaCompatible) -> Self: ... def __or__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.or_) @overload def __lshift__(self, other: Dataset) -> Dataset: ... @overload def __lshift__(self, other: DataTree) -> DataTree: ... @overload def __lshift__(self, other: DaCompatible) -> Self: ... def __lshift__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.lshift) @overload def __rshift__(self, other: Dataset) -> Dataset: ... @overload def __rshift__(self, other: DataTree) -> DataTree: ... @overload def __rshift__(self, other: DaCompatible) -> Self: ... def __rshift__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.rshift) @overload def __lt__(self, other: Dataset) -> Dataset: ... @overload def __lt__(self, other: DataTree) -> DataTree: ... @overload def __lt__(self, other: DaCompatible) -> Self: ... def __lt__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.lt) @overload def __le__(self, other: Dataset) -> Dataset: ... @overload def __le__(self, other: DataTree) -> DataTree: ... @overload def __le__(self, other: DaCompatible) -> Self: ... def __le__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.le) @overload def __gt__(self, other: Dataset) -> Dataset: ... @overload def __gt__(self, other: DataTree) -> DataTree: ... @overload def __gt__(self, other: DaCompatible) -> Self: ... def __gt__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.gt) @overload def __ge__(self, other: Dataset) -> Dataset: ... @overload def __ge__(self, other: DataTree) -> DataTree: ... @overload def __ge__(self, other: DaCompatible) -> Self: ... def __ge__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.ge) @overload # type:ignore[override] def __eq__(self, other: Dataset) -> Dataset: ... @overload def __eq__(self, other: DataTree) -> DataTree: ... @overload def __eq__(self, other: DaCompatible) -> Self: ... def __eq__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, nputils.array_eq) @overload # type:ignore[override] def __ne__(self, other: Dataset) -> Dataset: ... @overload def __ne__(self, other: DataTree) -> DataTree: ... @overload def __ne__(self, other: DaCompatible) -> Self: ... def __ne__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) def _inplace_binary_op(self, other: DaCompatible, f: Callable) -> Self: raise NotImplementedError def __iadd__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iadd) def __isub__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.isub) def __imul__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imul) def __ipow__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ipow) def __itruediv__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.itruediv) def __ifloordiv__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ifloordiv) def __imod__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imod) def __iand__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iand) def __ixor__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ixor) def __ior__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ior) def __ilshift__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ilshift) def __irshift__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.irshift) def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError def __neg__(self) -> Self: return self._unary_op(operator.neg) def __pos__(self) -> Self: return self._unary_op(operator.pos) def __abs__(self) -> Self: return self._unary_op(operator.abs) def __invert__(self) -> Self: return self._unary_op(operator.invert) def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ __iadd__.__doc__ = operator.iadd.__doc__ __isub__.__doc__ = operator.isub.__doc__ __imul__.__doc__ = operator.imul.__doc__ __ipow__.__doc__ = operator.ipow.__doc__ __itruediv__.__doc__ = operator.itruediv.__doc__ __ifloordiv__.__doc__ = operator.ifloordiv.__doc__ __imod__.__doc__ = operator.imod.__doc__ __iand__.__doc__ = operator.iand.__doc__ __ixor__.__doc__ = operator.ixor.__doc__ __ior__.__doc__ = operator.ior.__doc__ __ilshift__.__doc__ = operator.ilshift.__doc__ __irshift__.__doc__ = operator.irshift.__doc__ __neg__.__doc__ = operator.neg.__doc__ __pos__.__doc__ = operator.pos.__doc__ __abs__.__doc__ = operator.abs.__doc__ __invert__.__doc__ = operator.invert.__doc__ round.__doc__ = ops.round_.__doc__ argsort.__doc__ = ops.argsort.__doc__ conj.__doc__ = ops.conj.__doc__ conjugate.__doc__ = ops.conjugate.__doc__ class VariableOpsMixin: __slots__ = () def _binary_op( self, other: VarCompatible, f: Callable, reflexive: bool = False ) -> Self: raise NotImplementedError @overload def __add__(self, other: T_DA) -> T_DA: ... @overload def __add__(self, other: Dataset) -> Dataset: ... @overload def __add__(self, other: DataTree) -> DataTree: ... @overload def __add__(self, other: VarCompatible) -> Self: ... def __add__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.add) @overload def __sub__(self, other: T_DA) -> T_DA: ... @overload def __sub__(self, other: Dataset) -> Dataset: ... @overload def __sub__(self, other: DataTree) -> DataTree: ... @overload def __sub__(self, other: VarCompatible) -> Self: ... def __sub__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.sub) @overload def __mul__(self, other: T_DA) -> T_DA: ... @overload def __mul__(self, other: Dataset) -> Dataset: ... @overload def __mul__(self, other: DataTree) -> DataTree: ... @overload def __mul__(self, other: VarCompatible) -> Self: ... def __mul__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.mul) @overload def __pow__(self, other: T_DA) -> T_DA: ... @overload def __pow__(self, other: Dataset) -> Dataset: ... @overload def __pow__(self, other: DataTree) -> DataTree: ... @overload def __pow__(self, other: VarCompatible) -> Self: ... def __pow__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.pow) @overload def __truediv__(self, other: T_DA) -> T_DA: ... @overload def __truediv__(self, other: Dataset) -> Dataset: ... @overload def __truediv__(self, other: DataTree) -> DataTree: ... @overload def __truediv__(self, other: VarCompatible) -> Self: ... def __truediv__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.truediv) @overload def __floordiv__(self, other: T_DA) -> T_DA: ... @overload def __floordiv__(self, other: Dataset) -> Dataset: ... @overload def __floordiv__(self, other: DataTree) -> DataTree: ... @overload def __floordiv__(self, other: VarCompatible) -> Self: ... def __floordiv__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.floordiv) @overload def __mod__(self, other: T_DA) -> T_DA: ... @overload def __mod__(self, other: Dataset) -> Dataset: ... @overload def __mod__(self, other: DataTree) -> DataTree: ... @overload def __mod__(self, other: VarCompatible) -> Self: ... def __mod__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.mod) @overload def __and__(self, other: T_DA) -> T_DA: ... @overload def __and__(self, other: Dataset) -> Dataset: ... @overload def __and__(self, other: DataTree) -> DataTree: ... @overload def __and__(self, other: VarCompatible) -> Self: ... def __and__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.and_) @overload def __xor__(self, other: T_DA) -> T_DA: ... @overload def __xor__(self, other: Dataset) -> Dataset: ... @overload def __xor__(self, other: DataTree) -> DataTree: ... @overload def __xor__(self, other: VarCompatible) -> Self: ... def __xor__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.xor) @overload def __or__(self, other: T_DA) -> T_DA: ... @overload def __or__(self, other: Dataset) -> Dataset: ... @overload def __or__(self, other: DataTree) -> DataTree: ... @overload def __or__(self, other: VarCompatible) -> Self: ... def __or__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.or_) @overload def __lshift__(self, other: T_DA) -> T_DA: ... @overload def __lshift__(self, other: Dataset) -> Dataset: ... @overload def __lshift__(self, other: DataTree) -> DataTree: ... @overload def __lshift__(self, other: VarCompatible) -> Self: ... def __lshift__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.lshift) @overload def __rshift__(self, other: T_DA) -> T_DA: ... @overload def __rshift__(self, other: Dataset) -> Dataset: ... @overload def __rshift__(self, other: DataTree) -> DataTree: ... @overload def __rshift__(self, other: VarCompatible) -> Self: ... def __rshift__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.rshift) @overload def __lt__(self, other: T_DA) -> T_DA: ... @overload def __lt__(self, other: Dataset) -> Dataset: ... @overload def __lt__(self, other: DataTree) -> DataTree: ... @overload def __lt__(self, other: VarCompatible) -> Self: ... def __lt__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.lt) @overload def __le__(self, other: T_DA) -> T_DA: ... @overload def __le__(self, other: Dataset) -> Dataset: ... @overload def __le__(self, other: DataTree) -> DataTree: ... @overload def __le__(self, other: VarCompatible) -> Self: ... def __le__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.le) @overload def __gt__(self, other: T_DA) -> T_DA: ... @overload def __gt__(self, other: Dataset) -> Dataset: ... @overload def __gt__(self, other: DataTree) -> DataTree: ... @overload def __gt__(self, other: VarCompatible) -> Self: ... def __gt__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.gt) @overload def __ge__(self, other: T_DA) -> T_DA: ... @overload def __ge__(self, other: Dataset) -> Dataset: ... @overload def __ge__(self, other: DataTree) -> DataTree: ... @overload def __ge__(self, other: VarCompatible) -> Self: ... def __ge__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.ge) @overload # type:ignore[override] def __eq__(self, other: T_DA) -> T_DA: ... @overload def __eq__(self, other: Dataset) -> Dataset: ... @overload def __eq__(self, other: DataTree) -> DataTree: ... @overload def __eq__(self, other: VarCompatible) -> Self: ... def __eq__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, nputils.array_eq) @overload # type:ignore[override] def __ne__(self, other: T_DA) -> T_DA: ... @overload def __ne__(self, other: Dataset) -> Dataset: ... @overload def __ne__(self, other: DataTree) -> DataTree: ... @overload def __ne__(self, other: VarCompatible) -> Self: ... def __ne__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) def _inplace_binary_op(self, other: VarCompatible, f: Callable) -> Self: raise NotImplementedError def __iadd__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iadd) def __isub__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.isub) def __imul__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imul) def __ipow__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ipow) def __itruediv__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.itruediv) def __ifloordiv__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ifloordiv) def __imod__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imod) def __iand__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iand) def __ixor__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ixor) def __ior__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ior) def __ilshift__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ilshift) def __irshift__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.irshift) def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError def __neg__(self) -> Self: return self._unary_op(operator.neg) def __pos__(self) -> Self: return self._unary_op(operator.pos) def __abs__(self) -> Self: return self._unary_op(operator.abs) def __invert__(self) -> Self: return self._unary_op(operator.invert) def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ __iadd__.__doc__ = operator.iadd.__doc__ __isub__.__doc__ = operator.isub.__doc__ __imul__.__doc__ = operator.imul.__doc__ __ipow__.__doc__ = operator.ipow.__doc__ __itruediv__.__doc__ = operator.itruediv.__doc__ __ifloordiv__.__doc__ = operator.ifloordiv.__doc__ __imod__.__doc__ = operator.imod.__doc__ __iand__.__doc__ = operator.iand.__doc__ __ixor__.__doc__ = operator.ixor.__doc__ __ior__.__doc__ = operator.ior.__doc__ __ilshift__.__doc__ = operator.ilshift.__doc__ __irshift__.__doc__ = operator.irshift.__doc__ __neg__.__doc__ = operator.neg.__doc__ __pos__.__doc__ = operator.pos.__doc__ __abs__.__doc__ = operator.abs.__doc__ __invert__.__doc__ = operator.invert.__doc__ round.__doc__ = ops.round_.__doc__ argsort.__doc__ = ops.argsort.__doc__ conj.__doc__ = ops.conj.__doc__ conjugate.__doc__ = ops.conjugate.__doc__ class DatasetGroupByOpsMixin: __slots__ = () def _binary_op( self, other: Dataset | DataArray, f: Callable, reflexive: bool = False ) -> Dataset: raise NotImplementedError def __add__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.add) def __sub__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.sub) def __mul__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.mul) def __pow__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.pow) def __truediv__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.truediv) def __floordiv__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.floordiv) def __mod__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.mod) def __and__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.and_) def __xor__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.xor) def __or__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.or_) def __lshift__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.lshift) def __rshift__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.rshift) def __lt__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.lt) def __le__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.le) def __gt__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.gt) def __ge__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.ge) def __eq__(self, other: Dataset | DataArray) -> Dataset: # type:ignore[override] return self._binary_op(other, nputils.array_eq) def __ne__(self, other: Dataset | DataArray) -> Dataset: # type:ignore[override] return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.or_, reflexive=True) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ class DataArrayGroupByOpsMixin: __slots__ = () def _binary_op( self, other: T_Xarray, f: Callable, reflexive: bool = False ) -> T_Xarray: raise NotImplementedError def __add__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.add) def __sub__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.sub) def __mul__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mul) def __pow__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.pow) def __truediv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.truediv) def __floordiv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.floordiv) def __mod__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mod) def __and__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.and_) def __xor__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.xor) def __or__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.or_) def __lshift__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.lshift) def __rshift__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.rshift) def __lt__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.lt) def __le__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.le) def __gt__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.gt) def __ge__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.ge) def __eq__(self, other: T_Xarray) -> T_Xarray: # type:ignore[override] return self._binary_op(other, nputils.array_eq) def __ne__(self, other: T_Xarray) -> T_Xarray: # type:ignore[override] return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.or_, reflexive=True) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/core/accessor_dt.py���������������������������������������������������������0000664�0000000�0000000�00000055575�15114646760�0020465�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import warnings from typing import TYPE_CHECKING, Generic import numpy as np import pandas as pd from xarray.coding.calendar_ops import _decimal_year from xarray.coding.times import infer_calendar_name from xarray.core import duck_array_ops from xarray.core.common import ( _contains_datetime_like_objects, full_like, is_np_datetime_like, is_np_timedelta_like, ) from xarray.core.types import T_DataArray from xarray.core.variable import IndexVariable, Variable from xarray.namedarray.utils import is_duck_dask_array if TYPE_CHECKING: from typing import Self from numpy.typing import DTypeLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import CFCalendar def _season_from_months(months): """Compute season (DJF, MAM, JJA, SON) from month ordinal""" # TODO: Move "season" accessor upstream into pandas seasons = np.array(["DJF", "MAM", "JJA", "SON", "nan"]) months = np.asarray(months) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="invalid value encountered in floor_divide" ) warnings.filterwarnings( "ignore", message="invalid value encountered in remainder" ) idx = (months // 3) % 4 idx[np.isnan(idx)] = 4 return seasons[idx.astype(int)] def _access_through_cftimeindex(values, name): """Coerce an array of datetime-like values to a CFTimeIndex and access requested datetime component """ from xarray.coding.cftimeindex import CFTimeIndex if not isinstance(values, CFTimeIndex): values_as_cftimeindex = CFTimeIndex(duck_array_ops.ravel(values)) else: values_as_cftimeindex = values if name == "season": months = values_as_cftimeindex.month field_values = _season_from_months(months) elif name == "date": raise AttributeError( "'CFTimeIndex' object has no attribute `date`. Consider using the floor method " "instead, for instance: `.time.dt.floor('D')`." ) else: field_values = getattr(values_as_cftimeindex, name) return field_values.reshape(values.shape) def _access_through_series(values, name): """Coerce an array of datetime-like values to a pandas Series and access requested datetime component """ values_as_series = pd.Series(duck_array_ops.ravel(values), copy=False) if name == "season": months = values_as_series.dt.month.values field_values = _season_from_months(months) elif name == "total_seconds": field_values = values_as_series.dt.total_seconds().values elif name == "isocalendar": # special NaT-handling can be removed when # https://github.com/pandas-dev/pandas/issues/54657 is resolved field_values = values_as_series.dt.isocalendar() # test for and apply needed dtype hasna = any(field_values.year.isnull()) if hasna: field_values = np.dstack( [ getattr(field_values, name).astype(np.float64, copy=False).values for name in ["year", "week", "day"] ] ) else: field_values = np.array(field_values, dtype=np.int64) # isocalendar returns iso- year, week, and weekday -> reshape return field_values.T.reshape(3, *values.shape) else: field_values = getattr(values_as_series.dt, name).values return field_values.reshape(values.shape) def _get_date_field(values, name, dtype): """Indirectly access pandas' libts.get_date_field by wrapping data as a Series and calling through `.dt` attribute. Parameters ---------- values : np.ndarray or dask.array-like Array-like container of datetime-like values name : str Name of datetime field to access dtype : dtype-like dtype for output date field values Returns ------- datetime_fields : same type as values Array-like of datetime fields accessed for each element in values """ if is_np_datetime_like(values.dtype): access_method = _access_through_series else: access_method = _access_through_cftimeindex if is_duck_dask_array(values): from dask.array import map_blocks new_axis = chunks = None # isocalendar adds an axis if name == "isocalendar": chunks = (3,) + values.chunksize new_axis = 0 return map_blocks( access_method, values, name, dtype=dtype, new_axis=new_axis, chunks=chunks ) else: out = access_method(values, name) # cast only for integer types to keep float64 in presence of NaT # see https://github.com/pydata/xarray/issues/7928 if np.issubdtype(out.dtype, np.integer): out = out.astype(dtype, copy=False) return out def _round_through_series_or_index(values, name, freq): """Coerce an array of datetime-like values to a pandas Series or xarray CFTimeIndex and apply requested rounding """ from xarray.coding.cftimeindex import CFTimeIndex if is_np_datetime_like(values.dtype): values_as_series = pd.Series(duck_array_ops.ravel(values), copy=False) method = getattr(values_as_series.dt, name) else: values_as_cftimeindex = CFTimeIndex(duck_array_ops.ravel(values)) method = getattr(values_as_cftimeindex, name) field_values = method(freq=freq).values return field_values.reshape(values.shape) def _round_field(values, name, freq): """Indirectly access rounding functions by wrapping data as a Series or CFTimeIndex Parameters ---------- values : np.ndarray or dask.array-like Array-like container of datetime-like values name : {"ceil", "floor", "round"} Name of rounding function freq : str a freq string indicating the rounding resolution Returns ------- rounded timestamps : same type as values Array-like of datetime fields accessed for each element in values """ if is_duck_dask_array(values): from dask.array import map_blocks dtype = np.datetime64 if is_np_datetime_like(values.dtype) else np.dtype("O") return map_blocks( _round_through_series_or_index, values, name, freq=freq, dtype=dtype ) else: return _round_through_series_or_index(values, name, freq) def _strftime_through_cftimeindex(values, date_format: str): """Coerce an array of cftime-like values to a CFTimeIndex and access requested datetime component """ from xarray.coding.cftimeindex import CFTimeIndex values_as_cftimeindex = CFTimeIndex(duck_array_ops.ravel(values)) field_values = values_as_cftimeindex.strftime(date_format) return field_values.to_numpy().reshape(values.shape) def _strftime_through_series(values, date_format: str): """Coerce an array of datetime-like values to a pandas Series and apply string formatting """ values_as_series = pd.Series(duck_array_ops.ravel(values), copy=False) strs = values_as_series.dt.strftime(date_format) return strs.to_numpy().reshape(values.shape) def _strftime(values, date_format): if is_np_datetime_like(values.dtype): access_method = _strftime_through_series else: access_method = _strftime_through_cftimeindex if is_duck_dask_array(values): from dask.array import map_blocks return map_blocks(access_method, values, date_format) else: return access_method(values, date_format) def _index_or_data(obj): if isinstance(obj.variable, IndexVariable): return obj.to_index() else: return obj.data class TimeAccessor(Generic[T_DataArray]): __slots__ = ("_obj",) def __init__(self, obj: T_DataArray) -> None: self._obj = obj def _date_field(self, name: str, dtype: DTypeLike | None) -> T_DataArray: if dtype is None: dtype = self._obj.dtype result = _get_date_field(_index_or_data(self._obj), name, dtype) newvar = Variable( dims=self._obj.dims, attrs=self._obj.attrs, encoding=self._obj.encoding, data=result, ) return self._obj._replace(newvar, name=name) def _tslib_round_accessor(self, name: str, freq: str) -> T_DataArray: result = _round_field(_index_or_data(self._obj), name, freq) newvar = Variable( dims=self._obj.dims, attrs=self._obj.attrs, encoding=self._obj.encoding, data=result, ) return self._obj._replace(newvar, name=name) def floor(self, freq: str) -> T_DataArray: """ Round timestamps downward to specified frequency resolution. Parameters ---------- freq : str a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- floor-ed timestamps : same type as values Array-like of datetime fields accessed for each element in values """ return self._tslib_round_accessor("floor", freq) def ceil(self, freq: str) -> T_DataArray: """ Round timestamps upward to specified frequency resolution. Parameters ---------- freq : str a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- ceil-ed timestamps : same type as values Array-like of datetime fields accessed for each element in values """ return self._tslib_round_accessor("ceil", freq) def round(self, freq: str) -> T_DataArray: """ Round timestamps to specified frequency resolution. Parameters ---------- freq : str a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- rounded timestamps : same type as values Array-like of datetime fields accessed for each element in values """ return self._tslib_round_accessor("round", freq) class DatetimeAccessor(TimeAccessor[T_DataArray]): """Access datetime fields for DataArrays with datetime-like dtypes. Fields can be accessed through the `.dt` attribute for applicable DataArrays. Examples --------- >>> dates = pd.date_range(start="2000/01/01", freq="D", periods=10) >>> ts = xr.DataArray(dates, dims=("time")) >>> ts Size: 80B array(['2000-01-01T00:00:00.000000000', '2000-01-02T00:00:00.000000000', '2000-01-03T00:00:00.000000000', '2000-01-04T00:00:00.000000000', '2000-01-05T00:00:00.000000000', '2000-01-06T00:00:00.000000000', '2000-01-07T00:00:00.000000000', '2000-01-08T00:00:00.000000000', '2000-01-09T00:00:00.000000000', '2000-01-10T00:00:00.000000000'], dtype='datetime64[ns]') Coordinates: * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 >>> ts.dt # doctest: +ELLIPSIS >>> ts.dt.dayofyear Size: 80B array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) Coordinates: * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 >>> ts.dt.quarter Size: 80B array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) Coordinates: * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 """ def strftime(self, date_format: str) -> T_DataArray: """ Return an array of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format doc `__ Parameters ---------- date_format : str date format string (e.g. "%Y-%m-%d") Returns ------- formatted strings : same type as values Array-like of strings formatted for each element in values Examples -------- >>> import datetime >>> rng = xr.Dataset({"time": datetime.datetime(2000, 1, 1)}) >>> rng["time"].dt.strftime("%B %d, %Y, %r") Size: 8B array('January 01, 2000, 12:00:00 AM', dtype=object) """ obj_type = type(self._obj) result = _strftime(self._obj.data, date_format) return obj_type( result, name="strftime", coords=self._obj.coords, dims=self._obj.dims ) def isocalendar(self) -> Dataset: """Dataset containing ISO year, week number, and weekday. Notes ----- The iso year and weekday differ from the nominal year and weekday. """ from xarray.core.dataset import Dataset if not is_np_datetime_like(self._obj.data.dtype): raise AttributeError("'CFTimeIndex' object has no attribute 'isocalendar'") values = _get_date_field(self._obj.data, "isocalendar", np.int64) obj_type = type(self._obj) data_vars = {} for i, name in enumerate(["year", "week", "weekday"]): data_vars[name] = obj_type( values[i], name=name, coords=self._obj.coords, dims=self._obj.dims ) return Dataset(data_vars) @property def year(self) -> T_DataArray: """The year of the datetime""" return self._date_field("year", np.int64) @property def month(self) -> T_DataArray: """The month as January=1, December=12""" return self._date_field("month", np.int64) @property def day(self) -> T_DataArray: """The days of the datetime""" return self._date_field("day", np.int64) @property def hour(self) -> T_DataArray: """The hours of the datetime""" return self._date_field("hour", np.int64) @property def minute(self) -> T_DataArray: """The minutes of the datetime""" return self._date_field("minute", np.int64) @property def second(self) -> T_DataArray: """The seconds of the datetime""" return self._date_field("second", np.int64) @property def microsecond(self) -> T_DataArray: """The microseconds of the datetime""" return self._date_field("microsecond", np.int64) @property def nanosecond(self) -> T_DataArray: """The nanoseconds of the datetime""" return self._date_field("nanosecond", np.int64) @property def weekofyear(self) -> DataArray: "The week ordinal of the year" warnings.warn( "dt.weekofyear and dt.week have been deprecated. Please use " "dt.isocalendar().week instead.", FutureWarning, stacklevel=2, ) weekofyear = self.isocalendar().week return weekofyear week = weekofyear @property def dayofweek(self) -> T_DataArray: """The day of the week with Monday=0, Sunday=6""" return self._date_field("dayofweek", np.int64) weekday = dayofweek @property def dayofyear(self) -> T_DataArray: """The ordinal day of the year""" return self._date_field("dayofyear", np.int64) @property def quarter(self) -> T_DataArray: """The quarter of the date""" return self._date_field("quarter", np.int64) @property def days_in_month(self) -> T_DataArray: """The number of days in the month""" return self._date_field("days_in_month", np.int64) daysinmonth = days_in_month @property def season(self) -> T_DataArray: """Season of the year""" return self._date_field("season", object) @property def time(self) -> T_DataArray: """Timestamps corresponding to datetimes""" return self._date_field("time", object) @property def date(self) -> T_DataArray: """Date corresponding to datetimes""" return self._date_field("date", object) @property def is_month_start(self) -> T_DataArray: """Indicate whether the date is the first day of the month""" return self._date_field("is_month_start", bool) @property def is_month_end(self) -> T_DataArray: """Indicate whether the date is the last day of the month""" return self._date_field("is_month_end", bool) @property def is_quarter_start(self) -> T_DataArray: """Indicate whether the date is the first day of a quarter""" return self._date_field("is_quarter_start", bool) @property def is_quarter_end(self) -> T_DataArray: """Indicate whether the date is the last day of a quarter""" return self._date_field("is_quarter_end", bool) @property def is_year_start(self) -> T_DataArray: """Indicate whether the date is the first day of a year""" return self._date_field("is_year_start", bool) @property def is_year_end(self) -> T_DataArray: """Indicate whether the date is the last day of the year""" return self._date_field("is_year_end", bool) @property def is_leap_year(self) -> T_DataArray: """Indicate if the date belongs to a leap year""" return self._date_field("is_leap_year", bool) @property def calendar(self) -> CFCalendar: """The name of the calendar of the dates. Only relevant for arrays of :py:class:`cftime.datetime` objects, returns "proleptic_gregorian" for arrays of :py:class:`numpy.datetime64` values. """ return infer_calendar_name(self._obj.data) @property def days_in_year(self) -> T_DataArray: """Each datetime as the year plus the fraction of the year elapsed.""" if self.calendar == "360_day": result = full_like(self.year, 360) else: result = self.is_leap_year.astype(int) + 365 newvar = Variable( dims=self._obj.dims, attrs=self._obj.attrs, encoding=self._obj.encoding, data=result, ) return self._obj._replace(newvar, name="days_in_year") @property def decimal_year(self) -> T_DataArray: """Convert the dates as a fractional year.""" result = _decimal_year(self._obj) newvar = Variable( dims=self._obj.dims, attrs=self._obj.attrs, encoding=self._obj.encoding, data=result, ) return self._obj._replace(newvar, name="decimal_year") class TimedeltaAccessor(TimeAccessor[T_DataArray]): """Access Timedelta fields for DataArrays with Timedelta-like dtypes. Fields can be accessed through the `.dt` attribute for applicable DataArrays. Examples -------- >>> dates = pd.timedelta_range(start="1 day", freq="6h", periods=20) >>> ts = xr.DataArray(dates, dims=("time")) >>> ts Size: 160B array([ 86400000000000, 108000000000000, 129600000000000, 151200000000000, 172800000000000, 194400000000000, 216000000000000, 237600000000000, 259200000000000, 280800000000000, 302400000000000, 324000000000000, 345600000000000, 367200000000000, 388800000000000, 410400000000000, 432000000000000, 453600000000000, 475200000000000, 496800000000000], dtype='timedelta64[ns]') Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt # doctest: +ELLIPSIS >>> ts.dt.days Size: 160B array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.microseconds Size: 160B array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.seconds Size: 160B array([ 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.total_seconds() Size: 160B array([ 86400., 108000., 129600., 151200., 172800., 194400., 216000., 237600., 259200., 280800., 302400., 324000., 345600., 367200., 388800., 410400., 432000., 453600., 475200., 496800.]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 """ @property def days(self) -> T_DataArray: """Number of days for each element""" return self._date_field("days", np.int64) @property def seconds(self) -> T_DataArray: """Number of seconds (>= 0 and less than 1 day) for each element""" return self._date_field("seconds", np.int64) @property def microseconds(self) -> T_DataArray: """Number of microseconds (>= 0 and less than 1 second) for each element""" return self._date_field("microseconds", np.int64) @property def nanoseconds(self) -> T_DataArray: """Number of nanoseconds (>= 0 and less than 1 microsecond) for each element""" return self._date_field("nanoseconds", np.int64) # Not defined as a property in order to match the Pandas API def total_seconds(self) -> T_DataArray: """Total duration of each element expressed in seconds.""" return self._date_field("total_seconds", np.float64) class CombinedDatetimelikeAccessor( DatetimeAccessor[T_DataArray], TimedeltaAccessor[T_DataArray] ): def __new__(cls, obj: T_DataArray) -> Self: # CombinedDatetimelikeAccessor isn't really instantiated. Instead # we need to choose which parent (datetime or timedelta) is # appropriate. Since we're checking the dtypes anyway, we'll just # do all the validation here. if not _contains_datetime_like_objects(obj.variable): # We use an AttributeError here so that `obj.dt` raises an error that # `getattr` expects; https://github.com/pydata/xarray/issues/8718. It's a # bit unusual in a `__new__`, but that's the only case where we use this # class. raise AttributeError( "'.dt' accessor only available for " "DataArray with datetime64 timedelta64 dtype or " "for arrays containing cftime datetime " "objects." ) if is_np_timedelta_like(obj.dtype): return TimedeltaAccessor(obj) # type: ignore[return-value] else: return DatetimeAccessor(obj) # type: ignore[return-value] �����������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/core/accessor_str.py��������������������������������������������������������0000664�0000000�0000000�00000302537�15114646760�0020657�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# The StringAccessor class defined below is an adaptation of the # pandas string methods source code (see pd.core.strings) # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import codecs import re import textwrap from collections.abc import Callable, Hashable, Mapping from functools import reduce from operator import or_ as set_union from re import Pattern from typing import TYPE_CHECKING, Any, Generic from unicodedata import normalize import numpy as np from xarray.core import duck_array_ops from xarray.core.types import T_DataArray if TYPE_CHECKING: from numpy.typing import DTypeLike from xarray.core.dataarray import DataArray _cpython_optimized_encoders = ( "utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii", ) _cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32") def _contains_obj_type(*, pat: Any, checker: Any) -> bool: """Determine if the object fits some rule or is array of objects that do so.""" if isinstance(checker, type): targtype = checker checker = lambda x: isinstance(x, targtype) if checker(pat): return True # If it is not an object array it can't contain compiled re if getattr(pat, "dtype", "no") != np.object_: return False return _apply_str_ufunc(func=checker, obj=pat).all() def _contains_str_like(pat: Any) -> bool: """Determine if the object is a str-like or array of str-like.""" if isinstance(pat, str | bytes): return True if not hasattr(pat, "dtype"): return False return pat.dtype.kind in ["U", "S"] def _contains_compiled_re(pat: Any) -> bool: """Determine if the object is a compiled re or array of compiled re.""" return _contains_obj_type(pat=pat, checker=re.Pattern) def _contains_callable(pat: Any) -> bool: """Determine if the object is a callable or array of callables.""" return _contains_obj_type(pat=pat, checker=callable) def _apply_str_ufunc( *, func: Callable, obj: Any, dtype: DTypeLike | None = None, output_core_dims: list | tuple = ((),), output_sizes: Mapping[Any, int] | None = None, func_args: tuple = (), func_kwargs: Mapping = {}, ) -> Any: # TODO handling of na values ? if dtype is None: dtype = obj.dtype dask_gufunc_kwargs = dict() if output_sizes is not None: dask_gufunc_kwargs["output_sizes"] = output_sizes from xarray.computation.apply_ufunc import apply_ufunc return apply_ufunc( func, obj, *func_args, vectorize=True, dask="parallelized", output_dtypes=[dtype], output_core_dims=output_core_dims, dask_gufunc_kwargs=dask_gufunc_kwargs, **func_kwargs, ) class StringAccessor(Generic[T_DataArray]): r"""Vectorized string functions for string-like arrays. Similar to pandas, fields can be accessed through the `.str` attribute for applicable DataArrays. >>> da = xr.DataArray(["some", "text", "in", "an", "array"]) >>> da.str.len() Size: 40B array([4, 4, 2, 2, 5]) Dimensions without coordinates: dim_0 It also implements ``+``, ``*``, and ``%``, which operate as elementwise versions of the corresponding ``str`` methods. These will automatically broadcast for array-like inputs. >>> da1 = xr.DataArray(["first", "second", "third"], dims=["X"]) >>> da2 = xr.DataArray([1, 2, 3], dims=["Y"]) >>> da1.str + da2 Size: 252B array([['first1', 'first2', 'first3'], ['second1', 'second2', 'second3'], ['third1', 'third2', 'third3']], dtype='>> da1 = xr.DataArray(["a", "b", "c", "d"], dims=["X"]) >>> reps = xr.DataArray([3, 4], dims=["Y"]) >>> da1.str * reps Size: 128B array([['aaa', 'aaaa'], ['bbb', 'bbbb'], ['ccc', 'cccc'], ['ddd', 'dddd']], dtype='>> da1 = xr.DataArray(["%s_%s", "%s-%s", "%s|%s"], dims=["X"]) >>> da2 = xr.DataArray([1, 2], dims=["Y"]) >>> da3 = xr.DataArray([0.1, 0.2], dims=["Z"]) >>> da1.str % (da2, da3) Size: 240B array([[['1_0.1', '1_0.2'], ['2_0.1', '2_0.2']], [['1-0.1', '1-0.2'], ['2-0.1', '2-0.2']], [['1|0.1', '1|0.2'], ['2|0.1', '2|0.2']]], dtype='>> da1 = xr.DataArray(["%(a)s"], dims=["X"]) >>> da2 = xr.DataArray([1, 2, 3], dims=["Y"]) >>> da1 % {"a": da2} Size: 8B array([' Size: 24B\narray([1, 2, 3])\nDimensions without coordinates: Y'], dtype=object) Dimensions without coordinates: X """ __slots__ = ("_obj",) def __init__(self, obj: T_DataArray) -> None: self._obj = obj def _stringify(self, invar: Any) -> str | bytes | Any: """ Convert a string-like to the correct string/bytes type. This is mostly here to tell mypy a pattern is a str/bytes not a re.Pattern. """ if hasattr(invar, "astype"): return invar.astype(self._obj.dtype.kind) else: return self._obj.dtype.type(invar) def _apply( self, *, func: Callable, dtype: DTypeLike | None = None, output_core_dims: list | tuple = ((),), output_sizes: Mapping[Any, int] | None = None, func_args: tuple = (), func_kwargs: Mapping = {}, ) -> T_DataArray: return _apply_str_ufunc( obj=self._obj, func=func, dtype=dtype, output_core_dims=output_core_dims, output_sizes=output_sizes, func_args=func_args, func_kwargs=func_kwargs, ) def _re_compile( self, *, pat: str | bytes | Pattern | Any, flags: int = 0, case: bool | None = None, ) -> Pattern | Any: is_compiled_re = isinstance(pat, re.Pattern) if is_compiled_re and flags != 0: raise ValueError("Flags cannot be set when pat is a compiled regex.") if is_compiled_re and case is not None: raise ValueError("Case cannot be set when pat is a compiled regex.") if is_compiled_re: # no-op, needed to tell mypy this isn't a string return re.compile(pat) if case is None: case = True # The case is handled by the re flags internally. # Add it to the flags if necessary. if not case: flags |= re.IGNORECASE if getattr(pat, "dtype", None) != np.object_: pat = self._stringify(pat) def func(x): return re.compile(x, flags=flags) if isinstance(pat, np.ndarray): # apply_ufunc doesn't work for numpy arrays with output object dtypes func_ = np.vectorize(func) return func_(pat) else: return _apply_str_ufunc(func=func, obj=pat, dtype=np.object_) def len(self) -> T_DataArray: """ Compute the length of each string in the array. Returns ------- lengths array : array of int """ return self._apply(func=len, dtype=int) def __getitem__( self, key: int | slice, ) -> T_DataArray: if isinstance(key, slice): return self.slice(start=key.start, stop=key.stop, step=key.step) else: return self.get(key) def __add__(self, other: Any) -> T_DataArray: return self.cat(other, sep="") def __mul__( self, num: int | Any, ) -> T_DataArray: return self.repeat(num) def __mod__( self, other: Any, ) -> T_DataArray: if isinstance(other, dict): other = {key: self._stringify(val) for key, val in other.items()} return self._apply(func=lambda x: x % other) elif isinstance(other, tuple): other = tuple(self._stringify(x) for x in other) return self._apply(func=lambda x, *y: x % y, func_args=other) else: return self._apply(func=lambda x, y: x % y, func_args=(other,)) def get( self, i: int | Any, default: str | bytes = "", ) -> T_DataArray: """ Extract character number `i` from each string in the array. If `i` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- i : int or array-like of int Position of element to extract. If array-like, it is broadcast. default : str or bytes, default: "" Value for out-of-range index. Returns ------- items : array of object """ def f(x, iind): islice = slice(-1, None) if iind == -1 else slice(iind, iind + 1) item = x[islice] return item or default return self._apply(func=f, func_args=(i,)) def slice( self, start: int | Any | None = None, stop: int | Any | None = None, step: int | Any | None = None, ) -> T_DataArray: """ Slice substrings from each string in the array. If `start`, `stop`, or 'step` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- start : int or array-like of int, optional Start position for slice operation. If array-like, it is broadcast. stop : int or array-like of int, optional Stop position for slice operation. If array-like, it is broadcast. step : int or array-like of int, optional Step size for slice operation. If array-like, it is broadcast. Returns ------- sliced strings : same type as values """ f = lambda x, istart, istop, istep: x[slice(istart, istop, istep)] return self._apply(func=f, func_args=(start, stop, step)) def slice_replace( self, start: int | Any | None = None, stop: int | Any | None = None, repl: str | bytes | Any = "", ) -> T_DataArray: """ Replace a positional slice of a string with another value. If `start`, `stop`, or 'repl` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- start : int or array-like of int, optional Left index position to use for the slice. If not specified (None), the slice is unbounded on the left, i.e. slice from the start of the string. If array-like, it is broadcast. stop : int or array-like of int, optional Right index position to use for the slice. If not specified (None), the slice is unbounded on the right, i.e. slice until the end of the string. If array-like, it is broadcast. repl : str or array-like of str, default: "" String for replacement. If not specified, the sliced region is replaced with an empty string. If array-like, it is broadcast. Returns ------- replaced : same type as values """ repl = self._stringify(repl) def func(x, istart, istop, irepl): if len(x[istart:istop]) == 0: local_stop = istart else: local_stop = istop y = self._stringify("") if istart is not None: y += x[:istart] y += irepl if istop is not None: y += x[local_stop:] return y return self._apply(func=func, func_args=(start, stop, repl)) def cat(self, *others, sep: str | bytes | Any = "") -> T_DataArray: """ Concatenate strings elementwise in the DataArray with other strings. The other strings can either be string scalars or other array-like. Dimensions are automatically broadcast together. An optional separator `sep` can also be specified. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- *others : str or array-like of str Strings or array-like of strings to concatenate elementwise with the current DataArray. sep : str or array-like of str, default: "". Separator to use between strings. It is broadcast in the same way as the other input strings. If array-like, its dimensions will be placed at the end of the output array dimensions. Returns ------- concatenated : same type as values Examples -------- Create a string array >>> myarray = xr.DataArray( ... ["11111", "4"], ... dims=["X"], ... ) Create some arrays to concatenate with it >>> values_1 = xr.DataArray( ... ["a", "bb", "cccc"], ... dims=["Y"], ... ) >>> values_2 = np.array(3.4) >>> values_3 = "" >>> values_4 = np.array("test", dtype=np.str_) Determine the separator to use >>> seps = xr.DataArray( ... [" ", ", "], ... dims=["ZZ"], ... ) Concatenate the arrays using the separator >>> myarray.str.cat(values_1, values_2, values_3, values_4, sep=seps) Size: 1kB array([[['11111 a 3.4 test', '11111, a, 3.4, , test'], ['11111 bb 3.4 test', '11111, bb, 3.4, , test'], ['11111 cccc 3.4 test', '11111, cccc, 3.4, , test']], [['4 a 3.4 test', '4, a, 3.4, , test'], ['4 bb 3.4 test', '4, bb, 3.4, , test'], ['4 cccc 3.4 test', '4, cccc, 3.4, , test']]], dtype=' T_DataArray: """ Concatenate strings in a DataArray along a particular dimension. An optional separator `sep` can also be specified. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable, optional Dimension along which the strings should be concatenated. Only one dimension is allowed at a time. Optional for 0D or 1D DataArrays, required for multidimensional DataArrays. sep : str or array-like, default: "". Separator to use between strings. It is broadcast in the same way as the other input strings. If array-like, its dimensions will be placed at the end of the output array dimensions. Returns ------- joined : same type as values Examples -------- Create an array >>> values = xr.DataArray( ... [["a", "bab", "abc"], ["abcd", "", "abcdef"]], ... dims=["X", "Y"], ... ) Determine the separator >>> seps = xr.DataArray( ... ["-", "_"], ... dims=["ZZ"], ... ) Join the strings along a given dimension >>> values.str.join(dim="Y", sep=seps) Size: 192B array([['a-bab-abc', 'a_bab_abc'], ['abcd--abcdef', 'abcd__abcdef']], dtype=' 1 and dim is None: raise ValueError("Dimension must be specified for multidimensional arrays.") if self._obj.ndim > 1: # Move the target dimension to the start and split along it dimshifted = list(self._obj.transpose(dim, ...)) elif self._obj.ndim == 1: dimshifted = list(self._obj) else: dimshifted = [self._obj] start, *others = dimshifted # concatenate the resulting arrays return start.str.cat(*others, sep=sep) def format( self, *args: Any, **kwargs: Any, ) -> T_DataArray: """ Perform python string formatting on each element of the DataArray. This is equivalent to calling `str.format` on every element of the DataArray. The replacement values can either be a string-like scalar or array-like of string-like values. If array-like, the values will be broadcast and applied elementwiseto the input DataArray. .. note:: Array-like values provided as `*args` will have their dimensions added even if those arguments are not used in any string formatting. .. warning:: Array-like arguments are only applied elementwise for `*args`. For `**kwargs`, values are used as-is. Parameters ---------- *args : str or bytes or array-like of str or bytes Values for positional formatting. If array-like, the values are broadcast and applied elementwise. The dimensions will be placed at the end of the output array dimensions in the order they are provided. **kwargs : str or bytes or array-like of str or bytes Values for keyword-based formatting. These are **not** broadcast or applied elementwise. Returns ------- formatted : same type as values Examples -------- Create an array to format. >>> values = xr.DataArray( ... ["{} is {adj0}", "{} and {} are {adj1}"], ... dims=["X"], ... ) Set the values to fill. >>> noun0 = xr.DataArray( ... ["spam", "egg"], ... dims=["Y"], ... ) >>> noun1 = xr.DataArray( ... ["lancelot", "arthur"], ... dims=["ZZ"], ... ) >>> adj0 = "unexpected" >>> adj1 = "like a duck" Insert the values into the array >>> values.str.format(noun0, noun1, adj0=adj0, adj1=adj1) Size: 1kB array([[['spam is unexpected', 'spam is unexpected'], ['egg is unexpected', 'egg is unexpected']], [['spam and lancelot are like a duck', 'spam and arthur are like a duck'], ['egg and lancelot are like a duck', 'egg and arthur are like a duck']]], dtype=' T_DataArray: """ Convert strings in the array to be capitalized. Returns ------- capitalized : same type as values Examples -------- >>> da = xr.DataArray( ... ["temperature", "PRESSURE", "PreCipiTation", "daily rainfall"], dims="x" ... ) >>> da Size: 224B array(['temperature', 'PRESSURE', 'PreCipiTation', 'daily rainfall'], dtype='>> capitalized = da.str.capitalize() >>> capitalized Size: 224B array(['Temperature', 'Pressure', 'Precipitation', 'Daily rainfall'], dtype=' T_DataArray: """ Convert strings in the array to lowercase. Returns ------- lowered : same type as values Examples -------- >>> da = xr.DataArray(["Temperature", "PRESSURE"], dims="x") >>> da Size: 88B array(['Temperature', 'PRESSURE'], dtype='>> lowered = da.str.lower() >>> lowered Size: 88B array(['temperature', 'pressure'], dtype=' T_DataArray: """ Convert strings in the array to be swapcased. Returns ------- swapcased : same type as values Examples -------- >>> import xarray as xr >>> da = xr.DataArray(["temperature", "PRESSURE", "HuMiDiTy"], dims="x") >>> da Size: 132B array(['temperature', 'PRESSURE', 'HuMiDiTy'], dtype='>> swapcased = da.str.swapcase() >>> swapcased Size: 132B array(['TEMPERATURE', 'pressure', 'hUmIdItY'], dtype=' T_DataArray: """ Convert strings in the array to titlecase. Returns ------- titled : same type as values Examples -------- >>> da = xr.DataArray(["temperature", "PRESSURE", "HuMiDiTy"], dims="x") >>> da Size: 132B array(['temperature', 'PRESSURE', 'HuMiDiTy'], dtype='>> titled = da.str.title() >>> titled Size: 132B array(['Temperature', 'Pressure', 'Humidity'], dtype=' T_DataArray: """ Convert strings in the array to uppercase. Returns ------- uppered : same type as values Examples -------- >>> da = xr.DataArray(["temperature", "HuMiDiTy"], dims="x") >>> da Size: 88B array(['temperature', 'HuMiDiTy'], dtype='>> uppered = da.str.upper() >>> uppered Size: 88B array(['TEMPERATURE', 'HUMIDITY'], dtype=' T_DataArray: """ Convert strings in the array to be casefolded. Casefolding is similar to converting to lowercase, but removes all case distinctions. This is important in some languages that have more complicated cases and case conversions. For example, the 'รŸ' character in German is case-folded to 'ss', whereas it is lowercased to 'รŸ'. Returns ------- casefolded : same type as values Examples -------- >>> da = xr.DataArray(["TEMPERATURE", "HuMiDiTy"], dims="x") >>> da Size: 88B array(['TEMPERATURE', 'HuMiDiTy'], dtype='>> casefolded = da.str.casefold() >>> casefolded Size: 88B array(['temperature', 'humidity'], dtype='>> da = xr.DataArray(["รŸ", "ฤฐ"], dims="x") >>> da Size: 8B array(['รŸ', 'ฤฐ'], dtype='>> casefolded = da.str.casefold() >>> casefolded Size: 16B array(['ss', 'iฬ‡'], dtype=' T_DataArray: """ Return the Unicode normal form for the strings in the datarray. For more information on the forms, see the documentation for :func:`unicodedata.normalize`. Parameters ---------- form : {"NFC", "NFKC", "NFD", "NFKD"} Unicode form. Returns ------- normalized : same type as values """ return self._apply(func=lambda x: normalize(form, x)) # type: ignore[arg-type] def isalnum(self) -> T_DataArray: """ Check whether all characters in each string are alphanumeric. Returns ------- isalnum : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["H2O", "NaCl-"], dims="x") >>> da Size: 40B array(['H2O', 'NaCl-'], dtype='>> isalnum = da.str.isalnum() >>> isalnum Size: 2B array([ True, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isalnum(), dtype=bool) def isalpha(self) -> T_DataArray: """ Check whether all characters in each string are alphabetic. Returns ------- isalpha : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["Mn", "H2O", "NaCl-"], dims="x") >>> da Size: 60B array(['Mn', 'H2O', 'NaCl-'], dtype='>> isalpha = da.str.isalpha() >>> isalpha Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isalpha(), dtype=bool) def isdecimal(self) -> T_DataArray: """ Check whether all characters in each string are decimal. Returns ------- isdecimal : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["2.3", "123", "0"], dims="x") >>> da Size: 36B array(['2.3', '123', '0'], dtype='>> isdecimal = da.str.isdecimal() >>> isdecimal Size: 3B array([False, True, True]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isdecimal(), dtype=bool) def isdigit(self) -> T_DataArray: """ Check whether all characters in each string are digits. Returns ------- isdigit : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["123", "1.2", "0", "CO2", "NaCl"], dims="x") >>> da Size: 80B array(['123', '1.2', '0', 'CO2', 'NaCl'], dtype='>> isdigit = da.str.isdigit() >>> isdigit Size: 5B array([ True, False, True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isdigit(), dtype=bool) def islower(self) -> T_DataArray: """ Check whether all characters in each string are lowercase. Returns ------- islower : array of bool Array of boolean values with the same shape as the original array indicating whether all characters of each element of the string array are lowercase (True) or not (False). Examples -------- >>> da = xr.DataArray(["temperature", "HUMIDITY", "pREciPiTaTioN"], dims="x") >>> da Size: 156B array(['temperature', 'HUMIDITY', 'pREciPiTaTioN'], dtype='>> islower = da.str.islower() >>> islower Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.islower(), dtype=bool) def isnumeric(self) -> T_DataArray: """ Check whether all characters in each string are numeric. Returns ------- isnumeric : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["123", "2.3", "H2O", "NaCl-", "Mn"], dims="x") >>> da Size: 100B array(['123', '2.3', 'H2O', 'NaCl-', 'Mn'], dtype='>> isnumeric = da.str.isnumeric() >>> isnumeric Size: 5B array([ True, False, False, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isnumeric(), dtype=bool) def isspace(self) -> T_DataArray: """ Check whether all characters in each string are spaces. Returns ------- isspace : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["", " ", "\\t", "\\n"], dims="x") >>> da Size: 16B array(['', ' ', '\\t', '\\n'], dtype='>> isspace = da.str.isspace() >>> isspace Size: 4B array([False, True, True, True]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isspace(), dtype=bool) def istitle(self) -> T_DataArray: """ Check whether all characters in each string are titlecase. Returns ------- istitle : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray( ... [ ... "The Evolution Of Species", ... "The Theory of relativity", ... "the quantum mechanics of atoms", ... ], ... dims="title", ... ) >>> da Size: 360B array(['The Evolution Of Species', 'The Theory of relativity', 'the quantum mechanics of atoms'], dtype='>> istitle = da.str.istitle() >>> istitle Size: 3B array([ True, False, False]) Dimensions without coordinates: title """ return self._apply(func=lambda x: x.istitle(), dtype=bool) def isupper(self) -> T_DataArray: """ Check whether all characters in each string are uppercase. Returns ------- isupper : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["TEMPERATURE", "humidity", "PreCIpiTAtioN"], dims="x") >>> da Size: 156B array(['TEMPERATURE', 'humidity', 'PreCIpiTAtioN'], dtype='>> isupper = da.str.isupper() >>> isupper Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isupper(), dtype=bool) def count( self, pat: str | bytes | Pattern | Any, flags: int = 0, case: bool | None = None ) -> T_DataArray: """ Count occurrences of pattern in each string of the array. This function is used to count the number of times a particular regex pattern is repeated in each of the string elements of the :class:`~xarray.DataArray`. The pattern `pat` can either be a single ``str`` or `re.Pattern` or array-like of ``str`` or `re.Pattern`. If array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. Returns ------- counts : array of int Examples -------- >>> da = xr.DataArray(["jjklmn", "opjjqrs", "t-JJ99vwx"], dims="x") >>> da Size: 108B array(['jjklmn', 'opjjqrs', 't-JJ99vwx'], dtype='>> da.str.count("jj") Size: 24B array([1, 1, 0]) Dimensions without coordinates: x Enable case-insensitive matching by setting case to false: >>> counts = da.str.count("jj", case=False) >>> counts Size: 24B array([1, 1, 1]) Dimensions without coordinates: x Using regex: >>> pat = "JJ[0-9]{2}[a-z]{3}" >>> counts = da.str.count(pat) >>> counts Size: 24B array([0, 0, 1]) Dimensions without coordinates: x Using an array of strings (the pattern will be broadcast against the array): >>> pat = xr.DataArray(["jj", "JJ"], dims="y") >>> counts = da.str.count(pat) >>> counts Size: 48B array([[1, 0], [1, 0], [0, 1]]) Dimensions without coordinates: x, y """ pat = self._re_compile(pat=pat, flags=flags, case=case) func = lambda x, ipat: len(ipat.findall(x)) return self._apply(func=func, func_args=(pat,), dtype=int) def startswith(self, pat: str | bytes | Any) -> T_DataArray: """ Test if the start of each string in the array matches a pattern. The pattern `pat` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- pat : str Character sequence. Regular expressions are not accepted. If array-like, it is broadcast. Returns ------- startswith : array of bool An array of booleans indicating whether the given pattern matches the start of each string element. Examples -------- >>> da = xr.DataArray(["$100", "ยฃ23", "100"], dims="x") >>> da Size: 48B array(['$100', 'ยฃ23', '100'], dtype='>> startswith = da.str.startswith("$") >>> startswith Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ pat = self._stringify(pat) func = lambda x, y: x.startswith(y) return self._apply(func=func, func_args=(pat,), dtype=bool) def endswith(self, pat: str | bytes | Any) -> T_DataArray: """ Test if the end of each string in the array matches a pattern. The pattern `pat` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- pat : str Character sequence. Regular expressions are not accepted. If array-like, it is broadcast. Returns ------- endswith : array of bool A Series of booleans indicating whether the given pattern matches the end of each string element. Examples -------- >>> da = xr.DataArray(["10C", "10c", "100F"], dims="x") >>> da Size: 48B array(['10C', '10c', '100F'], dtype='>> endswith = da.str.endswith("C") >>> endswith Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ pat = self._stringify(pat) func = lambda x, y: x.endswith(y) return self._apply(func=func, func_args=(pat,), dtype=bool) def pad( self, width: int | Any, side: str = "left", fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Pad strings in the array up to width. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with character defined in ``fillchar``. If array-like, it is broadcast. side : {"left", "right", "both"}, default: "left" Side from which to fill resulting string. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values Array with a minimum number of char in each element. Examples -------- Pad strings in the array with a single string on the left side. Define the string in the array. >>> da = xr.DataArray(["PAR184", "TKO65", "NBO9139", "NZ39"], dims="x") >>> da Size: 112B array(['PAR184', 'TKO65', 'NBO9139', 'NZ39'], dtype='>> filled = da.str.pad(8, side="left", fillchar="0") >>> filled Size: 128B array(['00PAR184', '000TKO65', '0NBO9139', '0000NZ39'], dtype='>> filled = da.str.pad(8, side="right", fillchar="0") >>> filled Size: 128B array(['PAR18400', 'TKO65000', 'NBO91390', 'NZ390000'], dtype='>> filled = da.str.pad(8, side="both", fillchar="0") >>> filled Size: 128B array(['0PAR1840', '0TKO6500', 'NBO91390', '00NZ3900'], dtype='>> width = xr.DataArray([8, 10], dims="y") >>> filled = da.str.pad(width, side="left", fillchar="0") >>> filled Size: 320B array([['00PAR184', '0000PAR184'], ['000TKO65', '00000TKO65'], ['0NBO9139', '000NBO9139'], ['0000NZ39', '000000NZ39']], dtype='>> fillchar = xr.DataArray(["0", "-"], dims="y") >>> filled = da.str.pad(8, side="left", fillchar=fillchar) >>> filled Size: 256B array([['00PAR184', '--PAR184'], ['000TKO65', '---TKO65'], ['0NBO9139', '-NBO9139'], ['0000NZ39', '----NZ39']], dtype=' T_DataArray: """ Wrapper function to handle padding operations """ fillchar = self._stringify(fillchar) def overfunc(x, iwidth, ifillchar): if len(ifillchar) != 1: raise TypeError("fillchar must be a character, not str") return func(x, int(iwidth), ifillchar) return self._apply(func=overfunc, func_args=(width, fillchar)) def center( self, width: int | Any, fillchar: str | bytes | Any = " " ) -> T_DataArray: """ Pad left and right side of each string in the array. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with ``fillchar``. If array-like, it is broadcast. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values """ func = self._obj.dtype.type.center return self._padder(func=func, width=width, fillchar=fillchar) def ljust( self, width: int | Any, fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Pad right side of each string in the array. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with ``fillchar``. If array-like, it is broadcast. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values """ func = self._obj.dtype.type.ljust return self._padder(func=func, width=width, fillchar=fillchar) def rjust( self, width: int | Any, fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Pad left side of each string in the array. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with ``fillchar``. If array-like, it is broadcast. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values """ func = self._obj.dtype.type.rjust return self._padder(func=func, width=width, fillchar=fillchar) def zfill(self, width: int | Any) -> T_DataArray: """ Pad each string in the array by prepending '0' characters. Strings in the array are padded with '0' characters on the left of the string to reach a total string length `width`. Strings in the array with length greater or equal to `width` are unchanged. If `width` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum length of resulting string; strings with length less than `width` be prepended with '0' characters. If array-like, it is broadcast. Returns ------- filled : same type as values """ return self.rjust(width, fillchar="0") def contains( self, pat: str | bytes | Pattern | Any, case: bool | None = None, flags: int = 0, regex: bool = True, ) -> T_DataArray: """ Test if pattern or regex is contained within each string of the array. Return boolean array based on whether a given pattern or regex is contained within a string of the array. The pattern `pat` can either be a single ``str`` or `re.Pattern` or array-like of ``str`` or `re.Pattern`. If array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern Character sequence, a string containing a regular expression, or a compiled regular expression object. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. regex : bool, default: True If True, assumes the pat is a regular expression. If False, treats the pat as a literal string. Cannot be set to `False` if `pat` is a compiled regex. Returns ------- contains : array of bool An array of boolean values indicating whether the given pattern is contained within the string of each element of the array. """ is_compiled_re = _contains_compiled_re(pat) if is_compiled_re and not regex: raise ValueError( "Must use regular expression matching for regular expression object." ) if regex: if not is_compiled_re: pat = self._re_compile(pat=pat, flags=flags, case=case) def func(x, ipat): if ipat.groups > 0: # pragma: no cover raise ValueError("This pattern has match groups.") return bool(ipat.search(x)) else: pat = self._stringify(pat) if case or case is None: func = lambda x, ipat: ipat in x elif self._obj.dtype.char == "U": uppered = self.casefold() uppat = StringAccessor(pat).casefold() # type: ignore[type-var] # hack? return uppered.str.contains(uppat, regex=False) # type: ignore[return-value] else: uppered = self.upper() uppat = StringAccessor(pat).upper() # type: ignore[type-var] # hack? return uppered.str.contains(uppat, regex=False) # type: ignore[return-value] return self._apply(func=func, func_args=(pat,), dtype=bool) def match( self, pat: str | bytes | Pattern | Any, case: bool | None = None, flags: int = 0, ) -> T_DataArray: """ Determine if each string in the array matches a regular expression. The pattern `pat` can either be a single ``str`` or `re.Pattern` or array-like of ``str`` or `re.Pattern`. If array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- matched : array of bool """ pat = self._re_compile(pat=pat, flags=flags, case=case) func = lambda x, ipat: bool(ipat.match(x)) return self._apply(func=func, func_args=(pat,), dtype=bool) def strip( self, to_strip: str | bytes | Any = None, side: str = "both" ) -> T_DataArray: """ Remove leading and trailing characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the array from left and/or right sides. `to_strip` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- to_strip : str or array-like of str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. If array-like, it is broadcast. side : {"left", "right", "both"}, default: "both" Side from which to strip. Returns ------- stripped : same type as values """ if to_strip is not None: to_strip = self._stringify(to_strip) if side == "both": func = lambda x, y: x.strip(y) elif side == "left": func = lambda x, y: x.lstrip(y) elif side == "right": func = lambda x, y: x.rstrip(y) else: # pragma: no cover raise ValueError("Invalid side") return self._apply(func=func, func_args=(to_strip,)) def lstrip(self, to_strip: str | bytes | Any = None) -> T_DataArray: """ Remove leading characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the array from the left side. `to_strip` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- to_strip : str or array-like of str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. If array-like, it is broadcast. Returns ------- stripped : same type as values """ return self.strip(to_strip, side="left") def rstrip(self, to_strip: str | bytes | Any = None) -> T_DataArray: """ Remove trailing characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the array from the right side. `to_strip` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- to_strip : str or array-like of str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. If array-like, it is broadcast. Returns ------- stripped : same type as values """ return self.strip(to_strip, side="right") def wrap(self, width: int | Any, **kwargs) -> T_DataArray: """ Wrap long strings in the array in paragraphs with length less than `width`. This method has the same keyword parameters and defaults as :class:`textwrap.TextWrapper`. If `width` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Maximum line-width. If array-like, it is broadcast. **kwargs keyword arguments passed into :class:`textwrap.TextWrapper`. Returns ------- wrapped : same type as values """ ifunc = lambda x: textwrap.TextWrapper(width=x, **kwargs) tw = StringAccessor(width)._apply(func=ifunc, dtype=np.object_) # type: ignore[type-var] # hack? func = lambda x, itw: "\n".join(itw.wrap(x)) return self._apply(func=func, func_args=(tw,)) # Mapping is only covariant in its values, maybe use a custom CovariantMapping? def translate(self, table: Mapping[Any, str | bytes | int | None]) -> T_DataArray: """ Map characters of each string through the given mapping table. Parameters ---------- table : dict-like from and to str or bytes or int A a mapping of Unicode ordinals to Unicode ordinals, strings, int or None. Unmapped characters are left untouched. Characters mapped to None are deleted. :meth:`str.maketrans` is a helper function for making translation tables. Returns ------- translated : same type as values """ func = lambda x: x.translate(table) return self._apply(func=func) def repeat( self, repeats: int | Any, ) -> T_DataArray: """ Repeat each string in the array. If `repeats` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- repeats : int or array-like of int Number of repetitions. If array-like, it is broadcast. Returns ------- repeated : same type as values Array of repeated string objects. """ func = lambda x, y: x * y return self._apply(func=func, func_args=(repeats,)) def find( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, side: str = "left", ) -> T_DataArray: """ Return lowest or highest indexes in each strings in the array where the substring is fully contained between [start:end]. Return -1 on failure. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. side : {"left", "right"}, default: "left" Starting side for search. Returns ------- found : array of int """ sub = self._stringify(sub) if side == "left": method = "find" elif side == "right": method = "rfind" else: # pragma: no cover raise ValueError("Invalid side") func = lambda x, isub, istart, iend: getattr(x, method)(isub, istart, iend) return self._apply(func=func, func_args=(sub, start, end), dtype=int) def rfind( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, ) -> T_DataArray: """ Return highest indexes in each strings in the array where the substring is fully contained between [start:end]. Return -1 on failure. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. Returns ------- found : array of int """ return self.find(sub, start=start, end=end, side="right") def index( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, side: str = "left", ) -> T_DataArray: """ Return lowest or highest indexes in each strings where the substring is fully contained between [start:end]. This is the same as ``str.find`` except instead of returning -1, it raises a ValueError when the substring is not found. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. side : {"left", "right"}, default: "left" Starting side for search. Returns ------- found : array of int Raises ------ ValueError substring is not found """ sub = self._stringify(sub) if side == "left": method = "index" elif side == "right": method = "rindex" else: # pragma: no cover raise ValueError("Invalid side") func = lambda x, isub, istart, iend: getattr(x, method)(isub, istart, iend) return self._apply(func=func, func_args=(sub, start, end), dtype=int) def rindex( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, ) -> T_DataArray: """ Return highest indexes in each strings where the substring is fully contained between [start:end]. This is the same as ``str.rfind`` except instead of returning -1, it raises a ValueError when the substring is not found. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. Returns ------- found : array of int Raises ------ ValueError substring is not found """ return self.index(sub, start=start, end=end, side="right") def replace( self, pat: str | bytes | Pattern | Any, repl: str | bytes | Callable | Any, n: int | Any = -1, case: bool | None = None, flags: int = 0, regex: bool = True, ) -> T_DataArray: """ Replace occurrences of pattern/regex in the array with some string. If `pat`, `repl`, or 'n` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern String can be a character sequence or regular expression. If array-like, it is broadcast. repl : str or callable or array-like of str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. If array-like, it is broadcast. n : int or array of int, default: -1 Number of replacements to make from start. Use ``-1`` to replace all. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. regex : bool, default: True If True, assumes the passed-in pattern is a regular expression. If False, treats the pattern as a literal string. Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. Returns ------- replaced : same type as values A copy of the object with all matching occurrences of `pat` replaced by `repl`. """ if _contains_str_like(repl): repl = self._stringify(repl) elif not _contains_callable(repl): # pragma: no cover raise TypeError("repl must be a string or callable") is_compiled_re = _contains_compiled_re(pat) if not regex and is_compiled_re: raise ValueError( "Cannot use a compiled regex as replacement pattern with regex=False" ) if not regex and callable(repl): raise ValueError("Cannot use a callable replacement when regex=False") if regex: pat = self._re_compile(pat=pat, flags=flags, case=case) func = lambda x, ipat, irepl, i_n: ipat.sub( repl=irepl, string=x, count=max(i_n, 0) ) else: pat = self._stringify(pat) func = lambda x, ipat, irepl, i_n: x.replace(ipat, irepl, i_n) return self._apply(func=func, func_args=(pat, repl, n)) def extract( self, pat: str | bytes | Pattern | Any, dim: Hashable, case: bool | None = None, flags: int = 0, ) -> T_DataArray: r""" Extract the first match of capture groups in the regex pat as a new dimension in a DataArray. For each string in the DataArray, extract groups from the first match of regular expression pat. If `pat` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. dim : hashable or None Name of the new dimension to store the captured strings in. If None, the pattern must have only one capture group and the resulting DataArray will have the same size as the original. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- extracted : same type as values or object array Raises ------ ValueError `pat` has no capture groups. ValueError `dim` is None and there is more than one capture group. ValueError `case` is set when `pat` is a compiled regular expression. KeyError The given dimension is already present in the DataArray. Examples -------- Create a string array >>> value = xr.DataArray( ... [ ... [ ... "a_Xy_0", ... "ab_xY_10-bab_Xy_110-baab_Xy_1100", ... "abc_Xy_01-cbc_Xy_2210", ... ], ... [ ... "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", ... "", ... "abcdef_Xy_101-fef_Xy_5543210", ... ], ... ], ... dims=["X", "Y"], ... ) Extract matches >>> value.str.extract(r"(\w+)_Xy_(\d*)", dim="match") Size: 288B array([[['a', '0'], ['bab', '110'], ['abc', '01']], [['abcd', ''], ['', ''], ['abcdef', '101']]], dtype=' T_DataArray: r""" Extract all matches of capture groups in the regex pat as new dimensions in a DataArray. For each string in the DataArray, extract groups from all matches of regular expression pat. Equivalent to applying re.findall() to all the elements in the DataArray and splitting the results across dimensions. If `pat` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. group_dim : hashable Name of the new dimensions corresponding to the capture groups. This dimension is added to the new DataArray first. match_dim : hashable Name of the new dimensions corresponding to the matches for each group. This dimension is added to the new DataArray second. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- extracted : same type as values or object array Raises ------ ValueError `pat` has no capture groups. ValueError `case` is set when `pat` is a compiled regular expression. KeyError Either of the given dimensions is already present in the DataArray. KeyError The given dimensions names are the same. Examples -------- Create a string array >>> value = xr.DataArray( ... [ ... [ ... "a_Xy_0", ... "ab_xY_10-bab_Xy_110-baab_Xy_1100", ... "abc_Xy_01-cbc_Xy_2210", ... ], ... [ ... "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", ... "", ... "abcdef_Xy_101-fef_Xy_5543210", ... ], ... ], ... dims=["X", "Y"], ... ) Extract matches >>> value.str.extractall( ... r"(\w+)_Xy_(\d*)", group_dim="group", match_dim="match" ... ) Size: 1kB array([[[['a', '0'], ['', ''], ['', '']], [['bab', '110'], ['baab', '1100'], ['', '']], [['abc', '01'], ['cbc', '2210'], ['', '']]], [[['abcd', ''], ['dcd', '33210'], ['dccd', '332210']], [['', ''], ['', ''], ['', '']], [['abcdef', '101'], ['fef', '5543210'], ['', '']]]], dtype=' T_DataArray: r""" Find all occurrences of pattern or regular expression in the DataArray. Equivalent to applying re.findall() to all the elements in the DataArray. Results in an object array of lists. If there is only one capture group, the lists will be a sequence of matches. If there are multiple capture groups, the lists will be a sequence of lists, each of which contains a sequence of matches. If `pat` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- extracted : object array Raises ------ ValueError `pat` has no capture groups. ValueError `case` is set when `pat` is a compiled regular expression. Examples -------- Create a string array >>> value = xr.DataArray( ... [ ... [ ... "a_Xy_0", ... "ab_xY_10-bab_Xy_110-baab_Xy_1100", ... "abc_Xy_01-cbc_Xy_2210", ... ], ... [ ... "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", ... "", ... "abcdef_Xy_101-fef_Xy_5543210", ... ], ... ], ... dims=["X", "Y"], ... ) Extract matches >>> value.str.findall(r"(\w+)_Xy_(\d*)") Size: 48B array([[list([('a', '0')]), list([('bab', '110'), ('baab', '1100')]), list([('abc', '01'), ('cbc', '2210')])], [list([('abcd', ''), ('dcd', '33210'), ('dccd', '332210')]), list([]), list([('abcdef', '101'), ('fef', '5543210')])]], dtype=object) Dimensions without coordinates: X, Y See Also -------- DataArray.str.extract DataArray.str.extractall re.compile re.findall pandas.Series.str.findall """ pat = self._re_compile(pat=pat, flags=flags, case=case) def func(x, ipat): if ipat.groups == 0: raise ValueError("No capture groups found in pattern.") return ipat.findall(x) return self._apply(func=func, func_args=(pat,), dtype=np.object_) def _partitioner( self, *, func: Callable, dim: Hashable | None, sep: str | bytes | Any | None, ) -> T_DataArray: """ Implements logic for `partition` and `rpartition`. """ sep = self._stringify(sep) if dim is None: listfunc = lambda x, isep: list(func(x, isep)) return self._apply(func=listfunc, func_args=(sep,), dtype=np.object_) # _apply breaks on an empty array in this case if not self._obj.size: return self._obj.copy().expand_dims({dim: 0}, axis=-1) arrfunc = lambda x, isep: np.array(func(x, isep), dtype=self._obj.dtype) # dtype MUST be object or strings can be truncated # See: https://github.com/numpy/numpy/issues/8352 return duck_array_ops.astype( self._apply( func=arrfunc, func_args=(sep,), dtype=np.object_, output_core_dims=[[dim]], output_sizes={dim: 3}, ), self._obj.dtype.kind, ) def partition( self, dim: Hashable | None, sep: str | bytes | Any = " ", ) -> T_DataArray: """ Split the strings in the DataArray at the first occurrence of separator `sep`. This method splits the string at the first occurrence of `sep`, and returns 3 elements containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return 3 elements containing the string itself, followed by two empty strings. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the 3 elements in. If `None`, place the results as list elements in an object DataArray. sep : str or bytes or array-like, default: " " String to split on. If array-like, it is broadcast. Returns ------- partitioned : same type as values or object array See Also -------- DataArray.str.rpartition str.partition pandas.Series.str.partition """ return self._partitioner(func=self._obj.dtype.type.partition, dim=dim, sep=sep) def rpartition( self, dim: Hashable | None, sep: str | bytes | Any = " ", ) -> T_DataArray: """ Split the strings in the DataArray at the last occurrence of separator `sep`. This method splits the string at the last occurrence of `sep`, and returns 3 elements containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return 3 elements containing two empty strings, followed by the string itself. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the 3 elements in. If `None`, place the results as list elements in an object DataArray. sep : str or bytes or array-like, default: " " String to split on. If array-like, it is broadcast. Returns ------- rpartitioned : same type as values or object array See Also -------- DataArray.str.partition str.rpartition pandas.Series.str.rpartition """ return self._partitioner(func=self._obj.dtype.type.rpartition, dim=dim, sep=sep) def _splitter( self, *, func: Callable, pre: bool, dim: Hashable, sep: str | bytes | Any | None, maxsplit: int, ) -> DataArray: """ Implements logic for `split` and `rsplit`. """ if sep is not None: sep = self._stringify(sep) if dim is None: f_none = lambda x, isep: func(x, isep, maxsplit) return self._apply(func=f_none, func_args=(sep,), dtype=np.object_) # _apply breaks on an empty array in this case if not self._obj.size: return self._obj.copy().expand_dims({dim: 0}, axis=-1) f_count = lambda x, isep: max(len(func(x, isep, maxsplit)), 1) maxsplit = ( self._apply(func=f_count, func_args=(sep,), dtype=np.int_).max().data.item() - 1 ) def _dosplit(mystr, sep, maxsplit=maxsplit, dtype=self._obj.dtype): res = func(mystr, sep, maxsplit) if len(res) < maxsplit + 1: pad = [""] * (maxsplit + 1 - len(res)) if pre: res += pad else: res = pad + res return np.array(res, dtype=dtype) # dtype MUST be object or strings can be truncated # See: https://github.com/numpy/numpy/issues/8352 return duck_array_ops.astype( self._apply( func=_dosplit, func_args=(sep,), dtype=np.object_, output_core_dims=[[dim]], output_sizes={dim: maxsplit}, ), self._obj.dtype.kind, ) def split( self, dim: Hashable | None, sep: str | bytes | Any = None, maxsplit: int = -1, ) -> DataArray: r""" Split strings in a DataArray around the given separator/delimiter `sep`. Splits the string in the DataArray from the beginning, at the specified delimiter string. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the results in. If `None`, place the results as list elements in an object DataArray. sep : str, default: None String to split on. If ``None`` (the default), split on any whitespace. If array-like, it is broadcast. maxsplit : int, default: -1 Limit number of splits in output, starting from the beginning. If -1 (the default), return all splits. Returns ------- splitted : same type as values or object array Examples -------- Create a string DataArray >>> values = xr.DataArray( ... [ ... ["abc def", "spam\t\teggs\tswallow", "red_blue"], ... ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ... ], ... dims=["X", "Y"], ... ) Split once and put the results in a new dimension >>> values.str.split(dim="splitted", maxsplit=1) Size: 864B array([[['abc', 'def'], ['spam', 'eggs\tswallow'], ['red_blue', '']], [['test0', 'test1\ntest2\n\ntest3'], ['', ''], ['abra', 'ka\nda\tbra']]], dtype='>> values.str.split(dim="splitted") Size: 768B array([[['abc', 'def', '', ''], ['spam', 'eggs', 'swallow', ''], ['red_blue', '', '', '']], [['test0', 'test1', 'test2', 'test3'], ['', '', '', ''], ['abra', 'ka', 'da', 'bra']]], dtype='>> values.str.split(dim=None, maxsplit=1) Size: 48B array([[list(['abc', 'def']), list(['spam', 'eggs\tswallow']), list(['red_blue'])], [list(['test0', 'test1\ntest2\n\ntest3']), list([]), list(['abra', 'ka\nda\tbra'])]], dtype=object) Dimensions without coordinates: X, Y Split as many times as needed and put the results in a list >>> values.str.split(dim=None) Size: 48B array([[list(['abc', 'def']), list(['spam', 'eggs', 'swallow']), list(['red_blue'])], [list(['test0', 'test1', 'test2', 'test3']), list([]), list(['abra', 'ka', 'da', 'bra'])]], dtype=object) Dimensions without coordinates: X, Y Split only on spaces >>> values.str.split(dim="splitted", sep=" ") Size: 2kB array([[['abc', 'def', ''], ['spam\t\teggs\tswallow', '', ''], ['red_blue', '', '']], [['test0\ntest1\ntest2\n\ntest3', '', ''], ['', '', ''], ['abra', '', 'ka\nda\tbra']]], dtype=' DataArray: r""" Split strings in a DataArray around the given separator/delimiter `sep`. Splits the string in the DataArray from the end, at the specified delimiter string. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the results in. If `None`, place the results as list elements in an object DataArray sep : str, default: None String to split on. If ``None`` (the default), split on any whitespace. If array-like, it is broadcast. maxsplit : int, default: -1 Limit number of splits in output, starting from the end. If -1 (the default), return all splits. The final number of split values may be less than this if there are no DataArray elements with that many values. Returns ------- rsplitted : same type as values or object array Examples -------- Create a string DataArray >>> values = xr.DataArray( ... [ ... ["abc def", "spam\t\teggs\tswallow", "red_blue"], ... ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ... ], ... dims=["X", "Y"], ... ) Split once and put the results in a new dimension >>> values.str.rsplit(dim="splitted", maxsplit=1) Size: 816B array([[['abc', 'def'], ['spam\t\teggs', 'swallow'], ['', 'red_blue']], [['test0\ntest1\ntest2', 'test3'], ['', ''], ['abra ka\nda', 'bra']]], dtype='>> values.str.rsplit(dim="splitted") Size: 768B array([[['', '', 'abc', 'def'], ['', 'spam', 'eggs', 'swallow'], ['', '', '', 'red_blue']], [['test0', 'test1', 'test2', 'test3'], ['', '', '', ''], ['abra', 'ka', 'da', 'bra']]], dtype='>> values.str.rsplit(dim=None, maxsplit=1) Size: 48B array([[list(['abc', 'def']), list(['spam\t\teggs', 'swallow']), list(['red_blue'])], [list(['test0\ntest1\ntest2', 'test3']), list([]), list(['abra ka\nda', 'bra'])]], dtype=object) Dimensions without coordinates: X, Y Split as many times as needed and put the results in a list >>> values.str.rsplit(dim=None) Size: 48B array([[list(['abc', 'def']), list(['spam', 'eggs', 'swallow']), list(['red_blue'])], [list(['test0', 'test1', 'test2', 'test3']), list([]), list(['abra', 'ka', 'da', 'bra'])]], dtype=object) Dimensions without coordinates: X, Y Split only on spaces >>> values.str.rsplit(dim="splitted", sep=" ") Size: 2kB array([[['', 'abc', 'def'], ['', '', 'spam\t\teggs\tswallow'], ['', '', 'red_blue']], [['', '', 'test0\ntest1\ntest2\n\ntest3'], ['', '', ''], ['abra', '', 'ka\nda\tbra']]], dtype=' DataArray: """ Return DataArray of dummy/indicator variables. Each string in the DataArray is split at `sep`. A new dimension is created with coordinates for each unique result, and the corresponding element of that dimension is `True` if that result is present and `False` if not. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable Name for the dimension to place the results in. sep : str, default: "|". String to split on. If array-like, it is broadcast. Returns ------- dummies : array of bool Examples -------- Create a string array >>> values = xr.DataArray( ... [ ... ["a|ab~abc|abc", "ab", "a||abc|abcd"], ... ["abcd|ab|a", "abc|ab~abc", "|a"], ... ], ... dims=["X", "Y"], ... ) Extract dummy values >>> values.str.get_dummies(dim="dummies") Size: 30B array([[[ True, False, True, False, True], [False, True, False, False, False], [ True, False, True, True, False]], [[ True, True, False, True, False], [False, False, True, False, True], [ True, False, False, False, False]]]) Coordinates: * dummies (dummies) T_DataArray: """ Decode character string in the array using indicated encoding. Parameters ---------- encoding : str The encoding to use. Please see the Python documentation `codecs standard encoders `_ section for a list of encodings handlers. errors : str, default: "strict" The handler for encoding errors. Please see the Python documentation `codecs error handlers `_ for a list of error handlers. Returns ------- decoded : same type as values """ if encoding in _cpython_optimized_decoders: func = lambda x: x.decode(encoding, errors) else: decoder = codecs.getdecoder(encoding) func = lambda x: decoder(x, errors)[0] return self._apply(func=func, dtype=np.str_) def encode(self, encoding: str, errors: str = "strict") -> T_DataArray: """ Encode character string in the array using indicated encoding. Parameters ---------- encoding : str The encoding to use. Please see the Python documentation `codecs standard encoders `_ section for a list of encodings handlers. errors : str, default: "strict" The handler for encoding errors. Please see the Python documentation `codecs error handlers `_ for a list of error handlers. Returns ------- encoded : same type as values """ if encoding in _cpython_optimized_encoders: func = lambda x: x.encode(encoding, errors) else: encoder = codecs.getencoder(encoding) func = lambda x: encoder(x, errors)[0] return self._apply(func=func, dtype=np.bytes_) �����������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.12.0/xarray/core/common.py��������������������������������������������������������������0000664�0000000�0000000�00000223507�15114646760�0017454�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import datetime import warnings from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping from contextlib import suppress from html import escape from textwrap import dedent from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar, Union, overload import numpy as np import pandas as pd from xarray.core import dtypes, duck_array_ops, formatting, formatting_html from xarray.core.indexing import BasicIndexer, ExplicitlyIndexed from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import ResampleCompatible from xarray.core.utils import ( Frozen, either_dict_or_kwargs, is_scalar, ) from xarray.namedarray.core import _raise_if_any_duplicate_dimensions from xarray.namedarray.parallelcompat import get_chunked_array_type, guess_chunkmanager from xarray.namedarray.pycompat import is_chunked_array try: import cftime except ImportError: cftime = None # Used as a sentinel value to indicate a all dimensions ALL_DIMS = ... if TYPE_CHECKING: from numpy.typing import DTypeLike from xarray.computation.rolling_exp import RollingExp from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.indexes import Index from xarray.core.resample import Resample from xarray.core.types import ( DatetimeLike, DTypeLikeSave, ScalarOrArray, Self, SideOptions, T_Chunks, T_DataWithCoords, T_Variable, ) from xarray.core.variable import Variable from xarray.groupers import Resampler DTypeMaybeMapping = Union[DTypeLikeSave, Mapping[Any, DTypeLikeSave]] T_Resample = TypeVar("T_Resample", bound="Resample") C = TypeVar("C") T = TypeVar("T") P = ParamSpec("P") class ImplementsArrayReduce: __slots__ = () @classmethod def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool): if include_skipna: def wrapped_func(self, dim=None, axis=None, skipna=None, **kwargs): return self.reduce( func=func, dim=dim, axis=axis, skipna=skipna, **kwargs ) else: def wrapped_func(self, dim=None, axis=None, **kwargs): # type: ignore[misc] return self.reduce(func=func, dim=dim, axis=axis, **kwargs) return wrapped_func _reduce_extra_args_docstring = dedent( """\ dim : str or sequence of str, optional Dimension(s) over which to apply `{name}`. axis : int or sequence of int, optional Axis(es) over which to apply `{name}`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then `{name}` is calculated over axes.""" ) _cum_extra_args_docstring = dedent( """\ dim : str or sequence of str, optional Dimension over which to apply `{name}`. axis : int or sequence of int, optional Axis over which to apply `{name}`. Only one of the 'dim' and 'axis' arguments can be supplied.""" ) class ImplementsDatasetReduce: __slots__ = () @classmethod def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool): if include_skipna: def wrapped_func(self, dim=None, skipna=None, **kwargs): return self.reduce( func=func, dim=dim, skipna=skipna, numeric_only=numeric_only, **kwargs, ) else: def wrapped_func(self, dim=None, **kwargs): # type: ignore[misc] return self.reduce( func=func, dim=dim, numeric_only=numeric_only, **kwargs ) return wrapped_func _reduce_extra_args_docstring = dedent( """ dim : str or sequence of str, optional Dimension(s) over which to apply `{name}`. By default `{name}` is applied over all dimensions. """ ).strip() _cum_extra_args_docstring = dedent( """ dim : str or sequence of str, optional Dimension over which to apply `{name}`. axis : int or sequence of int, optional Axis over which to apply `{name}`. Only one of the 'dim' and 'axis' arguments can be supplied. """ ).strip() class AbstractArray: """Shared base class for DataArray and Variable.""" __slots__ = () def __bool__(self: Any) -> bool: return bool(self.values) def __float__(self: Any) -> float: return float(self.values) def __int__(self: Any) -> int: return int(self.values) def __complex__(self: Any) -> complex: return complex(self.values) def __array__( self: Any, dtype: DTypeLike | None = None, /, *, copy: bool | None = None ) -> np.ndarray: if not copy: if np.lib.NumpyVersion(np.__version__) >= "2.0.0": copy = None elif np.lib.NumpyVersion(np.__version__) <= "1.28.0": copy = False else: # 2.0.0 dev versions, handle cases where copy may or may not exist try: np.array([1]).__array__(copy=None) copy = None except TypeError: copy = False return np.array(self.values, dtype=dtype, copy=copy) def __repr__(self) -> str: return formatting.array_repr(self) def _repr_html_(self): if OPTIONS["display_style"] == "text": return f"
{escape(repr(self))}
" return formatting_html.array_repr(self) def __format__(self: Any, format_spec: str = "") -> str: if format_spec != "": if self.shape == (): # Scalar values might be ok use format_spec with instead of repr: return self.data.__format__(format_spec) else: # TODO: If it's an array the formatting.array_repr(self) should # take format_spec as an input. If we'd only use self.data we # lose all the information about coords for example which is # important information: raise NotImplementedError( "Using format_spec is only supported" f" when shape is (). Got shape = {self.shape}." ) else: return self.__repr__() def _iter(self: Any) -> Iterator[Any]: for n in range(len(self)): yield self[n] def __iter__(self: Any) -> Iterator[Any]: if self.ndim == 0: raise TypeError("iteration over a 0-d array") return self._iter() @overload def get_axis_num(self, dim: str) -> int: ... # type: ignore [overload-overlap] @overload def get_axis_num(self, dim: Iterable[Hashable]) -> tuple[int, ...]: ... @overload def get_axis_num(self, dim: Hashable) -> int: ... def get_axis_num(self, dim: Hashable | Iterable[Hashable]) -> int | tuple[int, ...]: """Return axis number(s) corresponding to dimension(s) in this array. Parameters ---------- dim : str or iterable of str Dimension name(s) for which to lookup axes. Returns ------- int or tuple of int Axis number or numbers corresponding to the given dimensions. """ if not isinstance(dim, str) and isinstance(dim, Iterable): return tuple(self._get_axis_num(d) for d in dim) else: return self._get_axis_num(dim) def _get_axis_num(self: Any, dim: Hashable) -> int: _raise_if_any_duplicate_dimensions(self.dims) try: return self.dims.index(dim) except ValueError as err: raise ValueError( f"{dim!r} not found in array dimensions {self.dims!r}" ) from err @property def sizes(self: Any) -> Mapping[Hashable, int]: """Ordered mapping from dimension names to lengths. Immutable. See Also -------- Dataset.sizes """ return Frozen(dict(zip(self.dims, self.shape, strict=True))) class AttrAccessMixin: """Mixin class that allows getting keys with attribute access""" __slots__ = () def __init_subclass__(cls, **kwargs): """Verify that all subclasses explicitly define ``__slots__``. If they don't, raise error in the core xarray module and a FutureWarning in third-party extensions. """ if not hasattr(object.__new__(cls), "__dict__"): pass elif cls.__module__.startswith("xarray."): raise AttributeError(f"{cls.__name__} must explicitly define __slots__") else: cls.__setattr__ = cls._setattr_dict warnings.warn( f"xarray subclass {cls.__name__} should explicitly define __slots__", FutureWarning, stacklevel=2, ) super().__init_subclass__(**kwargs) @property def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for attribute-style access""" yield from () @property def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for key-autocompletion""" yield from () def __getattr__(self, name: str) -> Any: if name not in {"__dict__", "__setstate__"}: # this avoids an infinite loop when pickle looks for the # __setstate__ attribute before the xarray object is initialized for source in self._attr_sources: with suppress(KeyError): return source[name] raise AttributeError( f"{type(self).__name__!r} object has no attribute {name!r}" ) # This complicated two-method design boosts overall performance of simple operations # - particularly DataArray methods that perform a _to_temp_dataset() round-trip - by # a whopping 8% compared to a single method that checks hasattr(self, "__dict__") at # runtime before every single assignment. All of this is just temporary until the # FutureWarning can be changed into a hard crash. def _setattr_dict(self, name: str, value: Any) -> None: """Deprecated third party subclass (see ``__init_subclass__`` above)""" object.__setattr__(self, name, value) if name in self.__dict__: # Custom, non-slotted attr, or improperly assigned variable? warnings.warn( f"Setting attribute {name!r} on a {type(self).__name__!r} object. Explicitly define __slots__ " "to suppress this warning for legitimate custom attributes and " "raise an error when attempting variables assignments.", FutureWarning, stacklevel=2, ) def __setattr__(self, name: str, value: Any) -> None: """Objects with ``__slots__`` raise AttributeError if you try setting an undeclared attribute. This is desirable, but the error message could use some improvement. """ try: object.__setattr__(self, name, value) except AttributeError as e: # Don't accidentally shadow custom AttributeErrors, e.g. # DataArray.dims.setter if str(e) != f"{type(self).__name__!r} object has no attribute {name!r}": raise raise AttributeError( f"cannot set attribute {name!r} on a {type(self).__name__!r} object. Use __setitem__ style" "assignment (e.g., `ds['name'] = ...`) instead of assigning variables." ) from e def __dir__(self) -> list[str]: """Provide method name lookup and completion. Only provide 'public' methods. """ extra_attrs = { item for source in self._attr_sources for item in source if isinstance(item, str) } return sorted(set(dir(type(self))) | extra_attrs) def _ipython_key_completions_(self) -> list[str]: """Provide method for the key-autocompletions in IPython. See https://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion For the details. """ items = { item for source in self._item_sources for item in source if isinstance(item, str) } return list(items) class TreeAttrAccessMixin(AttrAccessMixin): """Mixin class that allows getting keys with attribute access""" # TODO: Ensure ipython tab completion can include both child datatrees and # variables from Dataset objects on relevant nodes. __slots__ = () def __init_subclass__(cls, **kwargs): """This method overrides the check from ``AttrAccessMixin`` that ensures ``__dict__`` is absent in a class, with ``__slots__`` used instead. ``DataTree`` has some dynamically defined attributes in addition to those defined in ``__slots__``. (GH9068) """ if not hasattr(object.__new__(cls), "__dict__"): pass def get_squeeze_dims( xarray_obj, dim: Hashable | Iterable[Hashable] | None = None, axis: int | Iterable[int] | None = None, ) -> list[Hashable]: """Get a list of dimensions to squeeze out.""" if dim is not None and axis is not None: raise ValueError("cannot use both parameters `axis` and `dim`") if dim is None and axis is None: return [d for d, s in xarray_obj.sizes.items() if s == 1] if isinstance(dim, Iterable) and not isinstance(dim, str): dim = list(dim) elif dim is not None: dim = [dim] else: assert axis is not None if isinstance(axis, int): axis = [axis] axis = list(axis) if any(not isinstance(a, int) for a in axis): raise TypeError("parameter `axis` must be int or iterable of int.") alldims = list(xarray_obj.sizes.keys()) dim = [alldims[a] for a in axis] if any(xarray_obj.sizes[k] > 1 for k in dim): raise ValueError( "cannot select a dimension to squeeze out which has length greater than one" ) return dim class DataWithCoords(AttrAccessMixin): """Shared base class for Dataset and DataArray.""" _close: Callable[[], None] | None _indexes: dict[Hashable, Index] __slots__ = ("_close",) def squeeze( self, dim: Hashable | Iterable[Hashable] | None = None, drop: bool = False, axis: int | Iterable[int] | None = None, ) -> Self: """Return a new object with squeezed data. Parameters ---------- dim : None or Hashable or iterable of Hashable, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. drop : bool, default: False If ``drop=True``, drop squeezed coordinates instead of making them scalar. axis : None or int or iterable of int, optional Like dim, but positional. Returns ------- squeezed : same type as caller This object, but with with all or a subset of the dimensions of length 1 removed. See Also -------- numpy.squeeze """ dims = get_squeeze_dims(self, dim, axis) return self.isel(drop=drop, **dict.fromkeys(dims, 0)) def clip( self, min: ScalarOrArray | None = None, max: ScalarOrArray | None = None, *, keep_attrs: bool | None = None, ) -> Self: """ Return an array whose values are limited to ``[min, max]``. At least one of max or min must be given. Parameters ---------- min : None or Hashable, optional Minimum value. If None, no lower clipping is performed. max : None or Hashable, optional Maximum value. If None, no upper clipping is performed. keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- clipped : same type as caller This object, but with with values < min are replaced with min, and those > max with max. See Also -------- numpy.clip : equivalent function """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: # When this was a unary func, the default was True, so retaining the # default. keep_attrs = _get_keep_attrs(default=True) return apply_ufunc( duck_array_ops.clip, self, min, max, keep_attrs=keep_attrs, dask="allowed" ) def get_index(self, key: Hashable) -> pd.Index: """Get an index for a dimension, with fall-back to a default RangeIndex""" if key not in self.dims: raise KeyError(key) try: return self._indexes[key].to_pandas_index() except KeyError: return pd.Index(range(self.sizes[key]), name=key) def _calc_assign_results( self: C, kwargs: Mapping[Any, T | Callable[[C], T]] ) -> dict[Hashable, T]: return {k: v(self) if callable(v) else v for k, v in kwargs.items()} def assign_coords( self, coords: Mapping | None = None, **coords_kwargs: Any, ) -> Self: """Assign new coordinates to this object. Returns a new object with all the original data in addition to the new coordinates. Parameters ---------- coords : mapping of dim to coord, optional A mapping whose keys are the names of the coordinates and values are the coordinates to assign. The mapping will generally be a dict or :class:`Coordinates`. * If a value is a standard data value โ€” for example, a ``DataArray``, scalar, or array โ€” the data is simply assigned as a coordinate. * If a value is callable, it is called with this object as the only parameter, and the return value is used as new coordinate variables. * A coordinate can also be defined and attached to an existing dimension using a tuple with the first element the dimension name and the second element the values for this new coordinate. **coords_kwargs : optional The keyword arguments form of ``coords``. One of ``coords`` or ``coords_kwargs`` must be provided. Returns ------- assigned : same type as caller A new object with the new coordinates in addition to the existing data. Examples -------- Convert `DataArray` longitude coordinates from 0-359 to -180-179: >>> da = xr.DataArray( ... np.random.rand(4), ... coords=[np.array([358, 359, 0, 1])], ... dims="lon", ... ) >>> da Size: 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B 358 359 0 1 >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180)) Size: 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B -2 -1 0 1 The function also accepts dictionary arguments: >>> da.assign_coords({"lon": (((da.lon + 180) % 360) - 180)}) Size: 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B -2 -1 0 1 New coordinate can also be attached to an existing dimension: >>> lon_2 = np.array([300, 289, 0, 1]) >>> da.assign_coords(lon_2=("lon", lon_2)) Size: 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B 358 359 0 1 lon_2 (lon) int64 32B 300 289 0 1 Note that the same result can also be obtained with a dict e.g. >>> _ = da.assign_coords({"lon_2": ("lon", lon_2)}) Note the same method applies to `Dataset` objects. Convert `Dataset` longitude coordinates from 0-359 to -180-179: >>> temperature = np.linspace(20, 32, num=16).reshape(2, 2, 4) >>> precipitation = 2 * np.identity(4).reshape(2, 2, 4) >>> ds = xr.Dataset( ... data_vars=dict( ... temperature=(["x", "y", "time"], temperature), ... precipitation=(["x", "y", "time"], precipitation), ... ), ... coords=dict( ... lon=(["x", "y"], [[260.17, 260.68], [260.21, 260.77]]), ... lat=(["x", "y"], [[42.25, 42.21], [42.63, 42.59]]), ... time=pd.date_range("2014-09-06", periods=4), ... reference_time=pd.Timestamp("2014-09-05"), ... ), ... attrs=dict(description="Weather-related data"), ... ) >>> ds Size: 360B Dimensions: (x: 2, y: 2, time: 4) Coordinates: * time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09 lon (x, y) float64 32B 260.2 260.7 260.2 260.8 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Data variables: temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0 precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0 Attributes: description: Weather-related data >>> ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180)) Size: 360B Dimensions: (x: 2, y: 2, time: 4) Coordinates: * time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09 lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Data variables: temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0 precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0 Attributes: description: Weather-related data See Also -------- Dataset.assign Dataset.swap_dims Dataset.set_coords """ from xarray.core.coordinates import Coordinates coords_combined = either_dict_or_kwargs(coords, coords_kwargs, "assign_coords") data = self.copy(deep=False) results: Coordinates | dict[Hashable, Any] if isinstance(coords, Coordinates): results = coords else: results = self._calc_assign_results(coords_combined) data.coords.update(results) return data def assign_attrs(self, *args: Any, **kwargs: Any) -> Self: """Assign new attrs to this object. Returns a new object equivalent to ``self.attrs.update(*args, **kwargs)``. Parameters ---------- *args positional arguments passed into ``attrs.update``. **kwargs keyword arguments passed into ``attrs.update``. Examples -------- >>> dataset = xr.Dataset({"temperature": [25, 30, 27]}) >>> dataset Size: 24B Dimensions: (temperature: 3) Coordinates: * temperature (temperature) int64 24B 25 30 27 Data variables: *empty* >>> new_dataset = dataset.assign_attrs( ... units="Celsius", description="Temperature data" ... ) >>> new_dataset Size: 24B Dimensions: (temperature: 3) Coordinates: * temperature (temperature) int64 24B 25 30 27 Data variables: *empty* Attributes: units: Celsius description: Temperature data # Attributes of the new dataset >>> new_dataset.attrs {'units': 'Celsius', 'description': 'Temperature data'} Returns ------- assigned : same type as caller A new object with the new attrs in addition to the existing data. See Also -------- Dataset.assign """ out = self.copy(deep=False) out.attrs.update(*args, **kwargs) return out @overload def pipe( self, func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs, ) -> T: ... @overload def pipe( self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, ) -> T: ... def pipe( self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[P, T], str], *args: P.args, **kwargs: P.kwargs, ) -> T: """ Apply ``func(self, *args, **kwargs)`` This method replicates the pandas method of the same name. Parameters ---------- func : callable function to apply to this xarray object (Dataset/DataArray). ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the xarray object. *args positional arguments passed into ``func``. **kwargs a dictionary of keyword arguments passed into ``func``. Returns ------- object : Any the return type of ``func``. Notes ----- Use ``.pipe`` when chaining together functions that expect xarray or pandas objects, e.g., instead of writing .. code:: python f(g(h(ds), arg1=a), arg2=b, arg3=c) You can write .. code:: python (ds.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)) If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``arg2``: .. code:: python (ds.pipe(h).pipe(g, arg1=a).pipe((f, "arg2"), arg1=a, arg3=c)) Examples -------- >>> x = xr.Dataset( ... { ... "temperature_c": ( ... ("lat", "lon"), ... 20 * np.random.rand(4).reshape(2, 2), ... ), ... "precipitation": (("lat", "lon"), np.random.rand(4).reshape(2, 2)), ... }, ... coords={"lat": [10, 20], "lon": [150, 160]}, ... ) >>> x Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 >>> def adder(data, arg): ... return data + arg ... >>> def div(data, arg): ... return data / arg ... >>> def sub_mult(data, sub_arg, mult_arg): ... return (data * mult_arg) - sub_arg ... >>> x.pipe(adder, 2) Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 12.98 16.3 14.06 12.9 precipitation (lat, lon) float64 32B 2.424 2.646 2.438 2.892 >>> x.pipe(adder, arg=2) Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 12.98 16.3 14.06 12.9 precipitation (lat, lon) float64 32B 2.424 2.646 2.438 2.892 >>> ( ... x.pipe(adder, arg=2) ... .pipe(div, arg=2) ... .pipe(sub_mult, sub_arg=2, mult_arg=2) ... ) Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 See Also -------- pandas.DataFrame.pipe """ if isinstance(func, tuple): # Use different var when unpacking function from tuple because the type # signature of the unpacked function differs from the expected type # signature in the case where only a function is given, rather than a tuple. # This makes type checkers happy at both call sites below. f, target = func if target in kwargs: raise ValueError( f"{target} is both the pipe target and a keyword argument" ) kwargs[target] = self return f(*args, **kwargs) return func(self, *args, **kwargs) def rolling_exp( self: T_DataWithCoords, window: Mapping[Any, int] | None = None, window_type: str = "span", **window_kwargs, ) -> RollingExp[T_DataWithCoords]: """ Exponentially-weighted moving window. Similar to EWM in pandas Requires the optional Numbagg dependency. Parameters ---------- window : mapping of hashable to int, optional A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html **window_kwargs : optional The keyword arguments form of ``window``. One of window or window_kwargs must be provided. See Also -------- core.rolling_exp.RollingExp """ if "keep_attrs" in window_kwargs: warnings.warn( "Passing ``keep_attrs`` to ``rolling_exp`` has no effect. Pass" " ``keep_attrs`` directly to the applied function, e.g." " ``rolling_exp(...).mean(keep_attrs=False)``.", stacklevel=2, ) window = either_dict_or_kwargs(window, window_kwargs, "rolling_exp") from xarray.computation.rolling_exp import RollingExp return RollingExp(self, window, window_type) def _resample( self, resample_cls: type[T_Resample], indexer: Mapping[Hashable, ResampleCompatible | Resampler] | None, skipna: bool | None, closed: SideOptions | None, label: SideOptions | None, offset: pd.Timedelta | datetime.timedelta | str | None, origin: str | DatetimeLike, restore_coord_dims: bool | None, **indexer_kwargs: ResampleCompatible | Resampler, ) -> T_Resample: """Returns a Resample object for performing resampling operations. Handles both downsampling and upsampling. The resampled dimension must be a datetime-like coordinate. If any intervals contain no values from the original object, they will be given the value ``NaN``. Parameters ---------- indexer : {dim: freq}, optional Mapping from the dimension name to resample frequency [1]_. The dimension must be datetime-like. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. closed : {"left", "right"}, optional Side of each interval to treat as closed. label : {"left", "right"}, optional Side of each interval to use for labeling. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pd.Timestamp, datetime.datetime, np.datetime64, or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : pd.Timedelta, datetime.timedelta, or str, default is None An offset timedelta added to the origin. restore_coord_dims : bool, optional If True, also restore the dimension order of multi-dimensional coordinates. **indexer_kwargs : {dim: freq} The keyword arguments form of ``indexer``. One of indexer or indexer_kwargs must be provided. Returns ------- resampled : same type as caller This object resampled. Examples -------- Downsample monthly time-series data to seasonal data: >>> da = xr.DataArray( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( ... "1999-12-15", ... periods=12, ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", ... ) >>> da Size: 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.resample(time="QS-DEC").mean() Size: 32B array([ 1., 4., 7., 10.]) Coordinates: * time (time) datetime64[ns] 32B 1999-12-01 2000-03-01 ... 2000-09-01 Upsample monthly time-series data to daily data: >>> da.resample(time="1D").interpolate("linear") # +doctest: ELLIPSIS Size: 3kB array([ 0. , 0.03225806, 0.06451613, 0.09677419, 0.12903226, 0.16129032, 0.19354839, 0.22580645, 0.25806452, 0.29032258, 0.32258065, 0.35483871, 0.38709677, 0.41935484, 0.4516129 , 0.48387097, 0.51612903, 0.5483871 , 0.58064516, 0.61290323, 0.64516129, 0.67741935, 0.70967742, 0.74193548, 0.77419355, 0.80645161, 0.83870968, 0.87096774, 0.90322581, 0.93548387, 0.96774194, 1. , 1.03225806, 1.06451613, 1.09677419, 1.12903226, 1.16129032, 1.19354839, 1.22580645, 1.25806452, 1.29032258, 1.32258065, 1.35483871, 1.38709677, 1.41935484, 1.4516129 , 1.48387097, 1.51612903, 1.5483871 , 1.58064516, 1.61290323, 1.64516129, 1.67741935, 1.70967742, 1.74193548, 1.77419355, 1.80645161, 1.83870968, 1.87096774, 1.90322581, 1.93548387, 1.96774194, 2. , 2.03448276, 2.06896552, 2.10344828, 2.13793103, 2.17241379, 2.20689655, 2.24137931, 2.27586207, 2.31034483, 2.34482759, 2.37931034, 2.4137931 , 2.44827586, 2.48275862, 2.51724138, 2.55172414, 2.5862069 , 2.62068966, 2.65517241, 2.68965517, 2.72413793, 2.75862069, 2.79310345, 2.82758621, 2.86206897, 2.89655172, 2.93103448, 2.96551724, 3. , 3.03225806, 3.06451613, 3.09677419, 3.12903226, 3.16129032, 3.19354839, 3.22580645, 3.25806452, ... 7.87096774, 7.90322581, 7.93548387, 7.96774194, 8. , 8.03225806, 8.06451613, 8.09677419, 8.12903226, 8.16129032, 8.19354839, 8.22580645, 8.25806452, 8.29032258, 8.32258065, 8.35483871, 8.38709677, 8.41935484, 8.4516129 , 8.48387097, 8.51612903, 8.5483871 , 8.58064516, 8.61290323, 8.64516129, 8.67741935, 8.70967742, 8.74193548, 8.77419355, 8.80645161, 8.83870968, 8.87096774, 8.90322581, 8.93548387, 8.96774194, 9. , 9.03333333, 9.06666667, 9.1 , 9.13333333, 9.16666667, 9.2 , 9.23333333, 9.26666667, 9.3 , 9.33333333, 9.36666667, 9.4 , 9.43333333, 9.46666667, 9.5 , 9.53333333, 9.56666667, 9.6 , 9.63333333, 9.66666667, 9.7 , 9.73333333, 9.76666667, 9.8 , 9.83333333, 9.86666667, 9.9 , 9.93333333, 9.96666667, 10. , 10.03225806, 10.06451613, 10.09677419, 10.12903226, 10.16129032, 10.19354839, 10.22580645, 10.25806452, 10.29032258, 10.32258065, 10.35483871, 10.38709677, 10.41935484, 10.4516129 , 10.48387097, 10.51612903, 10.5483871 , 10.58064516, 10.61290323, 10.64516129, 10.67741935, 10.70967742, 10.74193548, 10.77419355, 10.80645161, 10.83870968, 10.87096774, 10.90322581, 10.93548387, 10.96774194, 11. ]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 Limit scope of upsampling method >>> da.resample(time="1D").nearest(tolerance="1D") Size: 3kB array([ 0., 0., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 1., 1., 1., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 2., 2., 2., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 3., 3., 3., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 4., 4., 4., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 5., 5., 5., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 6., 6., 6., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 7., 7., 7., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 8., 8., 8., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 9., 9., 9., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 10., 10., 10., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 11., 11.]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 See Also -------- pandas.Series.resample pandas.DataFrame.resample References ---------- .. [1] https://pandas.pydata.org/docs/user_guide/timeseries.html#dateoffset-objects """ # TODO support non-string indexer after removing the old API. from xarray.core.dataarray import DataArray from xarray.core.groupby import ResolvedGrouper from xarray.core.resample import RESAMPLE_DIM from xarray.groupers import Resampler, TimeResampler indexer = either_dict_or_kwargs(indexer, indexer_kwargs, "resample") if len(indexer) != 1: raise ValueError("Resampling only supported along single dimensions.") dim, freq = next(iter(indexer.items())) dim_name: Hashable = dim dim_coord = self[dim] group = DataArray( dim_coord, coords=dim_coord.coords, dims=dim_coord.dims, name=RESAMPLE_DIM ) grouper: Resampler if isinstance(freq, ResampleCompatible): grouper = TimeResampler( freq=freq, closed=closed, label=label, origin=origin, offset=offset ) elif isinstance(freq, Resampler): grouper = freq else: raise ValueError( "freq must be an object of type 'str', 'datetime.timedelta', " "'pandas.Timedelta', 'pandas.DateOffset', or 'TimeResampler'. " f"Received {type(freq)} instead." ) rgrouper = ResolvedGrouper(grouper, group, self) return resample_cls( self, (rgrouper,), dim=dim_name, resample_dim=RESAMPLE_DIM, restore_coord_dims=restore_coord_dims, ) def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self: """Filter elements from this object according to a condition. Returns elements from 'DataArray', where 'cond' is True, otherwise fill in 'other'. This operation follows the normal broadcasting and alignment rules that xarray uses for binary arithmetic. Parameters ---------- cond : DataArray, Dataset, or callable Locations at which to preserve this object's values. dtype must be `bool`. If a callable, the callable is passed this object, and the result is used as the value for cond. other : scalar, DataArray, Dataset, or callable, optional Value to use for locations in this object where ``cond`` is False. By default, these locations are filled with NA. If a callable, it must expect this object as its only parameter. drop : bool, default: False If True, coordinate labels that only correspond to False values of the condition are dropped from the result. Returns ------- DataArray or Dataset Same xarray type as caller, with dtype float64. Examples -------- >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=("x", "y")) >>> a Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 4) Size: 200B array([[ 0., 1., 2., 3., nan], [ 5., 6., 7., nan, nan], [10., 11., nan, nan, nan], [15., nan, nan, nan, nan], [nan, nan, nan, nan, nan]]) Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 5, -1) Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, -1], [10, 11, 12, -1, -1], [15, 16, -1, -1, -1], [20, -1, -1, -1, -1]]) Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 4, drop=True) Size: 128B array([[ 0., 1., 2., 3.], [ 5., 6., 7., nan], [10., 11., nan, nan], [15., nan, nan, nan]]) Dimensions without coordinates: x, y >>> a.where(lambda x: x.x + x.y < 4, lambda x: -x) Size: 200B array([[ 0, 1, 2, 3, -4], [ 5, 6, 7, -8, -9], [ 10, 11, -12, -13, -14], [ 15, -16, -17, -18, -19], [-20, -21, -22, -23, -24]]) Dimensions without coordinates: x, y See Also -------- numpy.where : corresponding numpy function where : equivalent function """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.structure.alignment import align if callable(cond): cond = cond(self) if callable(other): other = other(self) if drop: if not isinstance(cond, Dataset | DataArray): raise TypeError( f"cond argument is {cond!r} but must be a {Dataset!r} or {DataArray!r} (or a callable than returns one)." ) self, cond = align(self, cond) def _dataarray_indexer(dim: Hashable) -> DataArray: return cond.any(dim=(d for d in cond.dims if d != dim)) def _dataset_indexer(dim: Hashable) -> DataArray: cond_wdim = cond.drop_vars( var for var in cond if dim not in cond[var].dims ) keepany = cond_wdim.any(dim=(d for d in cond.dims if d != dim)) return keepany.to_dataarray().any("variable") _get_indexer = ( _dataarray_indexer if isinstance(cond, DataArray) else _dataset_indexer ) indexers = {} for dim in cond.sizes.keys(): indexers[dim] = _get_indexer(dim) self = self.isel(**indexers) cond = cond.isel(**indexers) from xarray.computation import ops return ops.where_method(self, cond, other) def set_close(self, close: Callable[[], None] | None) -> None: """Register the function that releases any resources linked to this object. This method controls how xarray cleans up resources associated with this object when the ``.close()`` method is called. It is mostly intended for backend developers and it is rarely needed by regular end-users. Parameters ---------- close : callable The function that when called like ``close()`` releases any resources linked to this object. """ self._close = close def close(self) -> None: """Release any resources linked to this object.""" if self._close is not None: self._close() self._close = None def isnull(self, keep_attrs: bool | None = None) -> Self: """Test each value in the array for whether it is a missing value. Parameters ---------- keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- isnull : DataArray or Dataset Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.isnull Examples -------- >>> array = xr.DataArray([1, np.nan, 3], dims="x") >>> array Size: 24B array([ 1., nan, 3.]) Dimensions without coordinates: x >>> array.isnull() Size: 3B array([False, True, False]) Dimensions without coordinates: x """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) return apply_ufunc( duck_array_ops.isnull, self, dask="allowed", keep_attrs=keep_attrs, ) def notnull(self, keep_attrs: bool | None = None) -> Self: """Test each value in the array for whether it is not a missing value. Parameters ---------- keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- notnull : DataArray or Dataset Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.notnull Examples -------- >>> array = xr.DataArray([1, np.nan, 3], dims="x") >>> array Size: 24B array([ 1., nan, 3.]) Dimensions without coordinates: x >>> array.notnull() Size: 3B array([ True, False, True]) Dimensions without coordinates: x """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) return apply_ufunc( duck_array_ops.notnull, self, dask="allowed", keep_attrs=keep_attrs, ) def isin(self, test_elements: Any) -> Self: """Tests each value in the array for whether it is in test elements. Parameters ---------- test_elements : array_like The values against which to test each value of `element`. This argument is flattened if an array or array_like. See numpy notes for behavior with non-array-like parameters. Returns ------- isin : DataArray or Dataset Has the same type and shape as this object, but with a bool dtype. Examples -------- >>> array = xr.DataArray([1, 2, 3], dims="x") >>> array.isin([1, 3]) Size: 3B array([ True, False, True]) Dimensions without coordinates: x See Also -------- numpy.isin """ from xarray.computation.apply_ufunc import apply_ufunc from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.variable import Variable if isinstance(test_elements, Dataset): raise TypeError( f"isin() argument must be convertible to an array: {test_elements}" ) elif isinstance(test_elements, Variable | DataArray): # need to explicitly pull out data to support dask arrays as the # second argument test_elements = test_elements.data return apply_ufunc( duck_array_ops.isin, self, kwargs=dict(test_elements=test_elements), dask="allowed", ) def astype( self, dtype, *, order=None, casting=None, subok=None, copy=None, keep_attrs=True, ) -> Self: """ Copy of the xarray object, with data cast to a specified type. Leaves coordinate dtype unchanged. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. โ€˜Cโ€™ means C order, โ€˜Fโ€™ means Fortran order, โ€˜Aโ€™ means โ€˜Fโ€™ order if all the arrays are Fortran contiguous, โ€˜Cโ€™ order otherwise, and โ€˜Kโ€™ means as close to the order the array elements appear in memory as possible. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to False and the `dtype` requirement is satisfied, the input array is returned instead of a copy. keep_attrs : bool, optional By default, astype keeps attributes. Set to False to remove attributes in the returned object. Returns ------- out : same as object New object with data cast to the specified type. Notes ----- The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed through to the ``astype`` method of the underlying array when a value different than ``None`` is supplied. Make sure to only supply these arguments if the underlying array class supports them. See Also -------- numpy.ndarray.astype dask.array.Array.astype sparse.COO.astype """ from xarray.computation.apply_ufunc import apply_ufunc kwargs = dict(order=order, casting=casting, subok=subok, copy=copy) kwargs = {k: v for k, v in kwargs.items() if v is not None} return apply_ufunc( duck_array_ops.astype, self, dtype, kwargs=kwargs, keep_attrs=keep_attrs, dask="allowed", ) def __enter__(self) -> Self: return self def __exit__(self, exc_type, exc_value, traceback) -> None: self.close() def __getitem__(self, value): # implementations of this class should implement this method raise NotImplementedError() @overload def full_like( other: DataArray, fill_value: Any, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> DataArray: ... @overload def full_like( other: Dataset, fill_value: Any, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset: ... @overload def full_like( other: Variable, fill_value: Any, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Variable: ... @overload def full_like( other: Dataset | DataArray, fill_value: Any, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = {}, # noqa: B006 chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray: ... @overload def full_like( other: Dataset | DataArray | Variable, fill_value: Any, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: ... def full_like( other: Dataset | DataArray | Variable, fill_value: Any, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: """ Return a new object with the same shape and type as a given object. Returned object will be chunked if if the given object is chunked, or if chunks or chunked_array_type are specified. Parameters ---------- other : DataArray, Dataset or Variable The reference object in input fill_value : scalar or dict-like Value to fill the new object with before returning it. If other is a Dataset, may also be a dict-like mapping data variables to fill values. dtype : dtype or dict-like of dtype, optional dtype of the new array. If a dict-like, maps dtypes to variables. If omitted, it defaults to other.dtype. chunks : int, "auto", tuple of int or mapping of Hashable to int, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, ``(5, 5)`` or ``{"x": 5, "y": 5}``. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. Returns ------- out : same as object New object with the same shape and type as other, with the data filled with fill_value. Coords will be copied from other. If other is based on dask, the new one will be as well, and will be split in the same chunks. Examples -------- >>> x = xr.DataArray( ... np.arange(6).reshape(2, 3), ... dims=["lat", "lon"], ... coords={"lat": [1, 2], "lon": [0, 1, 2]}, ... ) >>> x Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, 1) Size: 48B array([[1, 1, 1], [1, 1, 1]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, 0.5) Size: 48B array([[0, 0, 0], [0, 0, 0]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, 0.5, dtype=np.double) Size: 48B array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, np.nan, dtype=np.double) Size: 48B array([[nan, nan, nan], [nan, nan, nan]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> ds = xr.Dataset( ... {"a": ("x", [3, 5, 2]), "b": ("x", [9, 1, 0])}, coords={"x": [2, 4, 6]} ... ) >>> ds Size: 72B Dimensions: (x: 3) Coordinates: * x (x) int64 24B 2 4 6 Data variables: a (x) int64 24B 3 5 2 b (x) int64 24B 9 1 0 >>> xr.full_like(ds, fill_value={"a": 1, "b": 2}) Size: 72B Dimensions: (x: 3) Coordinates: * x (x) int64 24B 2 4 6 Data variables: a (x) int64 24B 1 1 1 b (x) int64 24B 2 2 2 >>> xr.full_like(ds, fill_value={"a": 1, "b": 2}, dtype={"a": bool, "b": float}) Size: 51B Dimensions: (x: 3) Coordinates: * x (x) int64 24B 2 4 6 Data variables: a (x) bool 3B True True True b (x) float64 24B 2.0 2.0 2.0 See Also -------- zeros_like ones_like """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.variable import Variable if not is_scalar(fill_value) and not ( isinstance(other, Dataset) and isinstance(fill_value, dict) ): raise ValueError( f"fill_value must be scalar or, for datasets, a dict-like. Received {fill_value} instead." ) if isinstance(other, Dataset): if not isinstance(fill_value, dict): fill_value = dict.fromkeys(other.data_vars.keys(), fill_value) dtype_: Mapping[Any, DTypeLikeSave] if not isinstance(dtype, Mapping): dtype_ = dict.fromkeys(other.data_vars.keys(), dtype) else: dtype_ = dtype data_vars = { k: _full_like_variable( v.variable, fill_value.get(k, dtypes.NA), dtype_.get(k, None), chunks, chunked_array_type, from_array_kwargs, ) for k, v in other.data_vars.items() } return Dataset(data_vars, coords=other.coords, attrs=other.attrs) elif isinstance(other, DataArray): if isinstance(dtype, Mapping): raise ValueError("'dtype' cannot be dict-like when passing a DataArray") return DataArray( _full_like_variable( other.variable, fill_value, dtype, chunks, chunked_array_type, from_array_kwargs, ), dims=other.dims, coords=other.coords, attrs=other.attrs, name=other.name, ) elif isinstance(other, Variable): if isinstance(dtype, Mapping): raise ValueError("'dtype' cannot be dict-like when passing a Variable") return _full_like_variable( other, fill_value, dtype, chunks, chunked_array_type, from_array_kwargs ) else: raise TypeError("Expected DataArray, Dataset, or Variable") def _full_like_variable( other: Variable, fill_value: Any, dtype: DTypeLike | None = None, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Variable: """Inner function of full_like, where other must be a variable""" from xarray.core.variable import Variable if fill_value is dtypes.NA: fill_value = dtypes.get_fill_value(dtype if dtype is not None else other.dtype) if ( is_chunked_array(other.data) or chunked_array_type is not None or chunks is not None ): if chunked_array_type is None: chunkmanager = get_chunked_array_type(other.data) else: chunkmanager = guess_chunkmanager(chunked_array_type) if dtype is None: dtype = other.dtype if from_array_kwargs is None: from_array_kwargs = {} data = chunkmanager.array_api.full( other.shape, fill_value, dtype=dtype, chunks=chunks or other.data.chunks, **from_array_kwargs, ) else: data = duck_array_ops.full_like(other.data, fill_value, dtype=dtype) return Variable(dims=other.dims, data=data, attrs=other.attrs) @overload def zeros_like( other: DataArray, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> DataArray: ... @overload def zeros_like( other: Dataset, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset: ... @overload def zeros_like( other: Variable, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Variable: ... @overload def zeros_like( other: Dataset | DataArray, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray: ... @overload def zeros_like( other: Dataset | DataArray | Variable, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: ... def zeros_like( other: Dataset | DataArray | Variable, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: """Return a new object of zeros with the same shape and type as a given dataarray or dataset. Parameters ---------- other : DataArray, Dataset or Variable The reference object. The output will have the same dimensions and coordinates as this object. dtype : dtype, optional dtype of the new array. If omitted, it defaults to other.dtype. chunks : int, "auto", tuple of int or mapping of Hashable to int, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, ``(5, 5)`` or ``{"x": 5, "y": 5}``. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. Returns ------- out : DataArray, Dataset or Variable New object of zeros with the same shape and type as other. Examples -------- >>> x = xr.DataArray( ... np.arange(6).reshape(2, 3), ... dims=["lat", "lon"], ... coords={"lat": [1, 2], "lon": [0, 1, 2]}, ... ) >>> x Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.zeros_like(x) Size: 48B array([[0, 0, 0], [0, 0, 0]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.zeros_like(x, dtype=float) Size: 48B array([[0., 0., 0.], [0., 0., 0.]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 See Also -------- ones_like full_like """ return full_like( other, 0, dtype, chunks=chunks, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) @overload def ones_like( other: DataArray, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> DataArray: ... @overload def ones_like( other: Dataset, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset: ... @overload def ones_like( other: Variable, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Variable: ... @overload def ones_like( other: Dataset | DataArray, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray: ... @overload def ones_like( other: Dataset | DataArray | Variable, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: ... def ones_like( other: Dataset | DataArray | Variable, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: """Return a new object of ones with the same shape and type as a given dataarray or dataset. Parameters ---------- other : DataArray, Dataset, or Variable The reference object. The output will have the same dimensions and coordinates as this object. dtype : dtype, optional dtype of the new array. If omitted, it defaults to other.dtype. chunks : int, "auto", tuple of int or mapping of Hashable to int, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, ``(5, 5)`` or ``{"x": 5, "y": 5}``. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. Returns ------- out : same as object New object of ones with the same shape and type as other. Examples -------- >>> x = xr.DataArray( ... np.arange(6).reshape(2, 3), ... dims=["lat", "lon"], ... coords={"lat": [1, 2], "lon": [0, 1, 2]}, ... ) >>> x Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.ones_like(x) Size: 48B array([[1, 1, 1], [1, 1, 1]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 See Also -------- zeros_like full_like """ return full_like( other, 1, dtype, chunks=chunks, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) def get_chunksizes( variables: Iterable[Variable], ) -> Mapping[Any, tuple[int, ...]]: chunks: dict[Any, tuple[int, ...]] = {} for v in variables: if hasattr(v._data, "chunks"): for dim, c in v.chunksizes.items(): if dim in chunks and c != chunks[dim]: raise ValueError( f"Object has inconsistent chunks along dimension {dim}. " "This can be fixed by calling unify_chunks()." ) chunks[dim] = c return Frozen(chunks) def is_np_datetime_like(dtype: DTypeLike | None) -> bool: """Check if a dtype is a subclass of the numpy datetime types""" return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64) def is_np_timedelta_like(dtype: DTypeLike | None) -> bool: """Check whether dtype is of the timedelta64 dtype.""" return np.issubdtype(dtype, np.timedelta64) def _contains_cftime_datetimes(array: Any) -> bool: """Check if an array inside a Variable contains cftime.datetime objects""" if cftime is None: return False if array.dtype == np.dtype("O") and array.size > 0: first_idx = (0,) * array.ndim if isinstance(array, ExplicitlyIndexed): first_idx = BasicIndexer(first_idx) sample = array[first_idx] return isinstance(np.asarray(sample).item(), cftime.datetime) return False def contains_cftime_datetimes(var: T_Variable) -> bool: """Check if an xarray.Variable contains cftime.datetime objects""" return _contains_cftime_datetimes(var._data) def _contains_datetime_like_objects(var: T_Variable) -> bool: """Check if a variable contains datetime like objects (either np.datetime64, np.timedelta64, or cftime.datetime) """ return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var) def _is_numeric_aggregatable_dtype(var: T_Variable) -> bool: """Check if a variable's dtype can be used in numeric aggregations like mean(). This includes: - Numeric types (int, float, complex) - Boolean type - Datetime types (datetime64, timedelta64) - Object arrays containing datetime-like objects (e.g., cftime) """ return ( np.issubdtype(var.dtype, np.number) or (var.dtype == np.bool_) or np.issubdtype(var.dtype, np.datetime64) or np.issubdtype(var.dtype, np.timedelta64) or _contains_cftime_datetimes(var._data) ) xarray-2025.12.0/xarray/core/coordinate_transform.py000066400000000000000000000065701511464676000224050ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Hashable, Iterable, Mapping from typing import Any, overload import numpy as np class CoordinateTransform: """Abstract coordinate transform with dimension & coordinate names. .. caution:: This API is experimental and subject to change. Please report any bugs or surprising behaviour you encounter. """ coord_names: tuple[Hashable, ...] dims: tuple[str, ...] dim_size: dict[str, int] dtype: Any def __init__( self, coord_names: Iterable[Hashable], dim_size: Mapping[str, int], dtype: Any = None, ): self.coord_names = tuple(coord_names) self.dims = tuple(dim_size) self.dim_size = dict(dim_size) if dtype is None: dtype = np.dtype(np.float64) self.dtype = dtype def forward(self, dim_positions: dict[str, Any]) -> dict[Hashable, Any]: """Perform grid -> world coordinate transformation. Parameters ---------- dim_positions : dict Grid location(s) along each dimension (axis). Returns ------- coord_labels : dict World coordinate labels. """ # TODO: cache the results in order to avoid re-computing # all labels when accessing the values of each coordinate one at a time raise NotImplementedError def reverse(self, coord_labels: dict[Hashable, Any]) -> dict[str, Any]: """Perform world -> grid coordinate reverse transformation. Parameters ---------- labels : dict World coordinate labels. Returns ------- dim_positions : dict Grid relative location(s) along each dimension (axis). """ raise NotImplementedError @overload def equals(self, other: CoordinateTransform) -> bool: ... @overload def equals( self, other: CoordinateTransform, *, exclude: frozenset[Hashable] | None = None ) -> bool: ... def equals(self, other: CoordinateTransform, **kwargs) -> bool: """Check equality with another CoordinateTransform of the same kind. Parameters ---------- other : CoordinateTransform The other CoordinateTransform object to compare with this object. exclude : frozenset of hashable, optional Dimensions excluded from checking. It is None by default, (i.e., when this method is not called in the context of alignment). For a n-dimensional transform this option allows a CoordinateTransform to optionally ignore any dimension in ``exclude`` when comparing ``self`` with ``other``. For a 1-dimensional transform this kwarg can be safely ignored, as this method is not called when all of the transform's dimensions are also excluded from alignment. """ raise NotImplementedError def generate_coords( self, dims: tuple[str, ...] | None = None ) -> dict[Hashable, Any]: """Compute all coordinate labels at once.""" if dims is None: dims = self.dims positions = np.meshgrid( *[np.arange(self.dim_size[d]) for d in dims], indexing="ij", ) dim_positions = {dim: positions[i] for i, dim in enumerate(dims)} return self.forward(dim_positions) xarray-2025.12.0/xarray/core/coordinates.py000066400000000000000000001354041511464676000204740ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping, Sequence from contextlib import contextmanager from typing import ( TYPE_CHECKING, Any, Generic, cast, ) import numpy as np import pandas as pd from xarray.core import formatting from xarray.core.indexes import ( Index, Indexes, PandasIndex, PandasMultiIndex, assert_no_index_corrupted, create_default_index_implicit, ) from xarray.core.types import DataVars, ErrorOptions, Self, T_DataArray, T_Xarray from xarray.core.utils import ( Frozen, ReprObject, either_dict_or_kwargs, emit_user_level_warning, ) from xarray.core.variable import Variable, as_variable, calculate_dimensions from xarray.structure.alignment import Aligner from xarray.structure.merge import merge_coordinates_without_align, merge_coords if TYPE_CHECKING: from xarray.core.common import DataWithCoords from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree # Used as the key corresponding to a DataArray's variable when converting # arbitrary DataArray objects to datasets _THIS_ARRAY = ReprObject("") class AbstractCoordinates(Mapping[Hashable, "T_DataArray"]): _data: DataWithCoords __slots__ = ("_data",) def __getitem__(self, key: Hashable) -> T_DataArray: raise NotImplementedError() @property def _names(self) -> set[Hashable]: raise NotImplementedError() @property def dims(self) -> Frozen[Hashable, int] | tuple[Hashable, ...]: raise NotImplementedError() @property def dtypes(self) -> Frozen[Hashable, np.dtype]: raise NotImplementedError() @property def indexes(self) -> Indexes[pd.Index]: """Mapping of pandas.Index objects used for label based indexing. Raises an error if this Coordinates object has indexes that cannot be coerced to pandas.Index objects. See Also -------- Coordinates.xindexes """ return self._data.indexes @property def xindexes(self) -> Indexes[Index]: """Mapping of :py:class:`~xarray.indexes.Index` objects used for label based indexing. """ return self._data.xindexes @property def variables(self): raise NotImplementedError() def _update_coords(self, coords, indexes): raise NotImplementedError() def _drop_coords(self, coord_names): raise NotImplementedError() def __iter__(self) -> Iterator[Hashable]: # needs to be in the same order as the dataset variables for k in self.variables: if k in self._names: yield k def __len__(self) -> int: return len(self._names) def __contains__(self, key: Hashable) -> bool: return key in self._names def __repr__(self) -> str: return formatting.coords_repr(self) def to_dataset(self) -> Dataset: raise NotImplementedError() def to_index(self, ordered_dims: Sequence[Hashable] | None = None) -> pd.Index: """Convert all index coordinates into a :py:class:`pandas.Index`. Parameters ---------- ordered_dims : sequence of hashable, optional Possibly reordered version of this object's dimensions indicating the order in which dimensions should appear on the result. Returns ------- pandas.Index Index subclass corresponding to the outer-product of all dimension coordinates. This will be a MultiIndex if this object is has more than more dimension. """ if ordered_dims is None: ordered_dims = list(self.dims) elif set(ordered_dims) != set(self.dims): raise ValueError( "ordered_dims must match dims, but does not: " f"{ordered_dims} vs {self.dims}" ) if len(ordered_dims) == 0: raise ValueError("no valid index for a 0-dimensional object") elif len(ordered_dims) == 1: (dim,) = ordered_dims return self._data.get_index(dim) else: indexes = [self._data.get_index(k) for k in ordered_dims] # compute the sizes of the repeat and tile for the cartesian product # (taken from pandas.core.reshape.util) index_lengths = np.fromiter( (len(index) for index in indexes), dtype=np.intp ) cumprod_lengths = np.cumprod(index_lengths) if cumprod_lengths[-1] == 0: # if any factor is empty, the cartesian product is empty repeat_counts = np.zeros_like(cumprod_lengths) else: # sizes of the repeats repeat_counts = cumprod_lengths[-1] / cumprod_lengths # sizes of the tiles tile_counts = np.roll(cumprod_lengths, 1) tile_counts[0] = 1 # loop over the indexes # for each MultiIndex or Index compute the cartesian product of the codes code_list = [] level_list = [] names = [] for i, index in enumerate(indexes): if isinstance(index, pd.MultiIndex): codes, levels = index.codes, index.levels else: code, level = pd.factorize(index) codes = [code] levels = [level] # compute the cartesian product code_list += [ np.tile(np.repeat(code, repeat_counts[i]), tile_counts[i]) for code in codes ] level_list += levels names += index.names return pd.MultiIndex( levels=level_list, # type: ignore[arg-type,unused-ignore] codes=[list(c) for c in code_list], names=names, ) class Coordinates(AbstractCoordinates): """Dictionary like container for Xarray coordinates (variables + indexes). This collection is a mapping of coordinate names to :py:class:`~xarray.DataArray` objects. It can be passed directly to the :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` constructors via their `coords` argument. This will add both the coordinates variables and their index. Coordinates are either: - returned via the :py:attr:`Dataset.coords`, :py:attr:`DataArray.coords`, and :py:attr:`DataTree.coords` properties, - built from Xarray or Pandas index objects (e.g., :py:meth:`Coordinates.from_xindex` or :py:meth:`Coordinates.from_pandas_multiindex`), - built manually from input coordinate data and Xarray ``Index`` objects via :py:meth:`Coordinates.__init__` (beware that no consistency check is done on those inputs). To create new coordinates from an existing Xarray ``Index`` object, use :py:meth:`Coordinates.from_xindex` instead of :py:meth:`Coordinates.__init__`. The latter is useful, e.g., for creating coordinates with no default index. Parameters ---------- coords: dict-like, optional Mapping where keys are coordinate names and values are objects that can be converted into a :py:class:`~xarray.Variable` object (see :py:func:`~xarray.as_variable`). If another :py:class:`~xarray.Coordinates` object is passed, its indexes will be added to the new created object. indexes: dict-like, optional Mapping where keys are coordinate names and values are :py:class:`~xarray.indexes.Index` objects. If None (default), pandas indexes will be created for each dimension coordinate. Passing an empty dictionary will skip this default behavior. Examples -------- Create a dimension coordinate with a default (pandas) index: >>> xr.Coordinates({"x": [1, 2]}) Coordinates: * x (x) int64 16B 1 2 Create a dimension coordinate with no index: >>> xr.Coordinates(coords={"x": [1, 2]}, indexes={}) Coordinates: x (x) int64 16B 1 2 Create a new Coordinates object from existing dataset coordinates (indexes are passed): >>> ds = xr.Dataset(coords={"x": [1, 2]}) >>> xr.Coordinates(ds.coords) Coordinates: * x (x) int64 16B 1 2 Create indexed coordinates from a ``pandas.MultiIndex`` object: >>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]]) >>> xr.Coordinates.from_pandas_multiindex(midx, "x") Coordinates: * x (x) object 32B MultiIndex * x_level_0 (x) object 32B 'a' 'a' 'b' 'b' * x_level_1 (x) int64 32B 0 1 0 1 Create a new Dataset object by passing a Coordinates object: >>> midx_coords = xr.Coordinates.from_pandas_multiindex(midx, "x") >>> xr.Dataset(coords=midx_coords) Size: 96B Dimensions: (x: 4) Coordinates: * x (x) object 32B MultiIndex * x_level_0 (x) object 32B 'a' 'a' 'b' 'b' * x_level_1 (x) int64 32B 0 1 0 1 Data variables: *empty* """ _data: DataWithCoords __slots__ = ("_data",) def __init__( self, coords: Mapping[Any, Any] | None = None, indexes: Mapping[Any, Index] | None = None, ) -> None: # When coordinates are constructed directly, an internal Dataset is # created so that it is compatible with the DatasetCoordinates and # DataArrayCoordinates classes serving as a proxy for the data. # TODO: refactor DataArray / Dataset so that Coordinates store the data. from xarray.core.dataset import Dataset if coords is None: coords = {} variables: dict[Hashable, Variable] default_indexes: dict[Hashable, PandasIndex] = {} coords_obj_indexes: dict[Hashable, Index] = {} if isinstance(coords, Coordinates): if indexes is not None: raise ValueError( "passing both a ``Coordinates`` object and a mapping of indexes " "to ``Coordinates.__init__`` is not allowed " "(this constructor does not support merging them)" ) variables = {k: v.copy() for k, v in coords.variables.items()} coords_obj_indexes = dict(coords.xindexes) else: variables = {} for name, data in coords.items(): var = as_variable(data, name=name, auto_convert=False) if var.dims == (name,) and indexes is None: index, index_vars = create_default_index_implicit(var, list(coords)) default_indexes.update(dict.fromkeys(index_vars, index)) variables.update(index_vars) else: variables[name] = var if indexes is None: indexes = {} else: indexes = dict(indexes) indexes.update(default_indexes) indexes.update(coords_obj_indexes) no_coord_index = set(indexes) - set(variables) if no_coord_index: raise ValueError( f"no coordinate variables found for these indexes: {no_coord_index}" ) for k, idx in indexes.items(): if not isinstance(idx, Index): raise TypeError(f"'{k}' is not an `xarray.indexes.Index` object") # maybe convert to base variable for k, v in variables.items(): if k not in indexes: variables[k] = v.to_base_variable() self._data = Dataset._construct_direct( coord_names=set(variables), variables=variables, indexes=indexes ) @classmethod def _construct_direct( cls, coords: dict[Any, Variable], indexes: dict[Any, Index], dims: dict[Any, int] | None = None, ) -> Self: from xarray.core.dataset import Dataset obj = object.__new__(cls) obj._data = Dataset._construct_direct( coord_names=set(coords), variables=coords, indexes=indexes, dims=dims, ) return obj @classmethod def from_xindex(cls, index: Index) -> Self: """Create Xarray coordinates from an existing Xarray index. Parameters ---------- index : Index Xarray index object. The index must support generating new coordinate variables from itself. Returns ------- coords : Coordinates A collection of Xarray indexed coordinates created from the index. """ variables = index.create_variables() if not variables: raise ValueError( "`Coordinates.from_xindex()` only supports index objects that can generate " "new coordinate variables from scratch. The given index (shown below) did not " f"create any coordinate.\n{index!r}" ) indexes = dict.fromkeys(variables, index) return cls(coords=variables, indexes=indexes) @classmethod def from_pandas_multiindex(cls, midx: pd.MultiIndex, dim: Hashable) -> Self: """Wrap a pandas multi-index as Xarray coordinates (dimension + levels). The returned coordinate variables can be directly assigned to a :py:class:`~xarray.Dataset` or :py:class:`~xarray.DataArray` via the ``coords`` argument of their constructor. Parameters ---------- midx : :py:class:`pandas.MultiIndex` Pandas multi-index object. dim : str Dimension name. Returns ------- coords : Coordinates A collection of Xarray indexed coordinates created from the multi-index. """ xr_idx = PandasMultiIndex(midx, dim) variables = xr_idx.create_variables() indexes = dict.fromkeys(variables, xr_idx) return cls(coords=variables, indexes=indexes) @property def _names(self) -> set[Hashable]: return self._data._coord_names @property def dims(self) -> Frozen[Hashable, int] | tuple[Hashable, ...]: """Mapping from dimension names to lengths or tuple of dimension names.""" return self._data.dims @property def sizes(self) -> Frozen[Hashable, int]: """Mapping from dimension names to lengths.""" return self._data.sizes @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from coordinate names to dtypes. Cannot be modified directly. See Also -------- Dataset.dtypes """ return Frozen({n: v.dtype for n, v in self._data.variables.items()}) @property def variables(self) -> Mapping[Hashable, Variable]: """Low level interface to Coordinates contents as dict of Variable objects. This dictionary is frozen to prevent mutation. """ return self._data.variables def to_dataset(self) -> Dataset: """Convert these coordinates into a new Dataset.""" names = [name for name in self._data._variables if name in self._names] return self._data._copy_listed(names) def __getitem__(self, key: Hashable) -> DataArray: return self._data[key] def __delitem__(self, key: Hashable) -> None: # redirect to DatasetCoordinates.__delitem__ del self._data.coords[key] def equals(self, other: Self) -> bool: """Two Coordinates objects are equal if they have matching variables, all of which are equal. See Also -------- Coordinates.identical """ if not isinstance(other, Coordinates): return False return self.to_dataset().equals(other.to_dataset()) def identical(self, other: Self) -> bool: """Like equals, but also checks all variable attributes. See Also -------- Coordinates.equals """ if not isinstance(other, Coordinates): return False return self.to_dataset().identical(other.to_dataset()) def _update_coords( self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index] ) -> None: # redirect to DatasetCoordinates._update_coords self._data.coords._update_coords(coords, indexes) def _drop_coords(self, coord_names): # redirect to DatasetCoordinates._drop_coords self._data.coords._drop_coords(coord_names) def _merge_raw(self, other, reflexive): """For use with binary arithmetic.""" if other is None: variables = dict(self.variables) indexes = dict(self.xindexes) else: coord_list = [self, other] if not reflexive else [other, self] variables, indexes = merge_coordinates_without_align(coord_list) return variables, indexes @contextmanager def _merge_inplace(self, other): """For use with in-place binary arithmetic.""" if other is None: yield else: # don't include indexes in prioritized, because we didn't align # first and we want indexes to be checked prioritized = { k: (v, None) for k, v in self.variables.items() if k not in self.xindexes } variables, indexes = merge_coordinates_without_align( [self, other], prioritized ) yield self._update_coords(variables, indexes) def merge(self, other: Mapping[Any, Any] | None) -> Dataset: """Merge two sets of coordinates to create a new Dataset The method implements the logic used for joining coordinates in the result of a binary operation performed on xarray objects: - If two index coordinates conflict (are not equal), an exception is raised. You must align your data before passing it to this method. - If an index coordinate and a non-index coordinate conflict, the non- index coordinate is dropped. - If two non-index coordinates conflict, both are dropped. Parameters ---------- other : dict-like, optional A :py:class:`Coordinates` object or any mapping that can be turned into coordinates. Returns ------- merged : Dataset A new Dataset with merged coordinates. """ from xarray.core.dataset import Dataset if other is None: return self.to_dataset() if not isinstance(other, Coordinates): other = Dataset(coords=other).coords coords, indexes = merge_coordinates_without_align([self, other]) coord_names = set(coords) return Dataset._construct_direct( variables=coords, coord_names=coord_names, indexes=indexes ) def __or__(self, other: Mapping[Any, Any] | None) -> Coordinates: """Merge two sets of coordinates to create a new Coordinates object The method implements the logic used for joining coordinates in the result of a binary operation performed on xarray objects: - If two index coordinates conflict (are not equal), an exception is raised. You must align your data before passing it to this method. - If an index coordinate and a non-index coordinate conflict, the non- index coordinate is dropped. - If two non-index coordinates conflict, both are dropped. Parameters ---------- other : dict-like, optional A :py:class:`Coordinates` object or any mapping that can be turned into coordinates. Returns ------- merged : Coordinates A new Coordinates object with merged coordinates. See Also -------- Coordinates.merge """ return self.merge(other).coords def __setitem__(self, key: Hashable, value: Any) -> None: self.update({key: value}) def update(self, other: Mapping[Any, Any]) -> None: """Update this Coordinates variables with other coordinate variables.""" if not len(other): return other_coords: Coordinates if isinstance(other, Coordinates): # Coordinates object: just pass it (default indexes won't be created) other_coords = other else: other_coords = create_coords_with_default_indexes( getattr(other, "variables", other) ) # Discard original indexed coordinates prior to merge allows to: # - fail early if the new coordinates don't preserve the integrity of existing # multi-coordinate indexes # - drop & replace coordinates without alignment (note: we must keep indexed # coordinates extracted from the DataArray objects passed as values to # `other` - if any - as those are still used for aligning the old/new coordinates) coords_to_align = drop_indexed_coords(set(other_coords) & set(other), self) coords, indexes = merge_coords( [coords_to_align, other_coords], priority_arg=1, indexes=coords_to_align.xindexes, ) # special case for PandasMultiIndex: updating only its dimension coordinate # is still allowed but depreciated. # It is the only case where we need to actually drop coordinates here (multi-index levels) # TODO: remove when removing PandasMultiIndex's dimension coordinate. self._drop_coords(self._names - coords_to_align._names) self._update_coords(coords, indexes) def assign(self, coords: Mapping | None = None, **coords_kwargs: Any) -> Self: """Assign new coordinates (and indexes) to a Coordinates object, returning a new object with all the original coordinates in addition to the new ones. Parameters ---------- coords : mapping of dim to coord, optional A mapping whose keys are the names of the coordinates and values are the coordinates to assign. The mapping will generally be a dict or :class:`Coordinates`. * If a value is a standard data value โ€” for example, a ``DataArray``, scalar, or array โ€” the data is simply assigned as a coordinate. * A coordinate can also be defined and attached to an existing dimension using a tuple with the first element the dimension name and the second element the values for this new coordinate. **coords_kwargs The keyword arguments form of ``coords``. One of ``coords`` or ``coords_kwargs`` must be provided. Returns ------- new_coords : Coordinates A new Coordinates object with the new coordinates (and indexes) in addition to all the existing coordinates. Examples -------- >>> coords = xr.Coordinates() >>> coords Coordinates: *empty* >>> coords.assign(x=[1, 2]) Coordinates: * x (x) int64 16B 1 2 >>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]]) >>> coords.assign(xr.Coordinates.from_pandas_multiindex(midx, "y")) Coordinates: * y (y) object 32B MultiIndex * y_level_0 (y) object 32B 'a' 'a' 'b' 'b' * y_level_1 (y) int64 32B 0 1 0 1 """ # TODO: this doesn't support a callable, which is inconsistent with `DataArray.assign_coords` coords = either_dict_or_kwargs(coords, coords_kwargs, "assign") new_coords = self.copy() new_coords.update(coords) return new_coords def _overwrite_indexes( self, indexes: Mapping[Any, Index], variables: Mapping[Any, Variable] | None = None, ) -> Self: results = self.to_dataset()._overwrite_indexes(indexes, variables) # TODO: remove cast once we get rid of DatasetCoordinates # and DataArrayCoordinates (i.e., Dataset and DataArray encapsulate Coordinates) return cast(Self, results.coords) def _reindex_callback( self, aligner: Aligner, dim_pos_indexers: dict[Hashable, Any], variables: dict[Hashable, Variable], indexes: dict[Hashable, Index], fill_value: Any, exclude_dims: frozenset[Hashable], exclude_vars: frozenset[Hashable], ) -> Self: """Callback called from ``Aligner`` to create a new reindexed Coordinate.""" aligned = self.to_dataset()._reindex_callback( aligner, dim_pos_indexers, variables, indexes, fill_value, exclude_dims, exclude_vars, ) # TODO: remove cast once we get rid of DatasetCoordinates # and DataArrayCoordinates (i.e., Dataset and DataArray encapsulate Coordinates) return cast(Self, aligned.coords) def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return self._data._ipython_key_completions_() def copy( self, deep: bool = False, memo: dict[int, Any] | None = None, ) -> Self: """Return a copy of this Coordinates object.""" # do not copy indexes (may corrupt multi-coordinate indexes) # TODO: disable variables deepcopy? it may also be problematic when they # encapsulate index objects like pd.Index variables = { k: v._copy(deep=deep, memo=memo) for k, v in self.variables.items() } # TODO: getting an error with `self._construct_direct`, possibly because of how # a subclass implements `_construct_direct`. (This was originally the same # runtime code, but we switched the type definitions in #8216, which # necessitates the cast.) return cast( Self, Coordinates._construct_direct( coords=variables, indexes=dict(self.xindexes), dims=dict(self.sizes) ), ) def drop_vars( self, names: str | Iterable[Hashable] | Callable[ [Coordinates | Dataset | DataArray | DataTree], str | Iterable[Hashable], ], *, errors: ErrorOptions = "raise", ) -> Self: """Drop variables from this Coordinates object. Note that indexes that depend on these variables will also be dropped. Parameters ---------- names : hashable or iterable or callable Name(s) of variables to drop. If a callable, this is object is passed as its only argument and its result is used. errors : {"raise", "ignore"}, default: "raise" Error treatment. - ``'raise'``: raises a :py:class:`ValueError` error if any of the variable passed are not in the dataset - ``'ignore'``: any given names that are in the dataset are dropped and no error is raised. """ return cast(Self, self.to_dataset().drop_vars(names, errors=errors).coords) def drop_dims( self, drop_dims: str | Iterable[Hashable], *, errors: ErrorOptions = "raise", ) -> Self: """Drop dimensions and associated variables from this dataset. Parameters ---------- drop_dims : str or Iterable of Hashable Dimension or dimensions to drop. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the dimensions passed are not in the dataset. If 'ignore', any given dimensions that are in the dataset are dropped and no error is raised. Returns ------- obj : Coordinates Coordinates object without the given dimensions (or any coordinates containing those dimensions). """ return cast(Self, self.to_dataset().drop_dims(drop_dims, errors=errors).coords) def rename_dims( self, dims_dict: Mapping[Any, Hashable] | None = None, **dims: Hashable, ) -> Self: """Returns a new object with renamed dimensions only. Parameters ---------- dims_dict : dict-like, optional Dictionary whose keys are current dimension names and whose values are the desired names. The desired names must not be the name of an existing dimension or Variable in the Coordinates. **dims : optional Keyword form of ``dims_dict``. One of dims_dict or dims must be provided. Returns ------- renamed : Coordinates Coordinates object with renamed dimensions. """ return cast(Self, self.to_dataset().rename_dims(dims_dict, **dims).coords) def rename_vars( self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Coordinates: """Returns a new object with renamed variables. Parameters ---------- name_dict : dict-like, optional Dictionary whose keys are current variable or coordinate names and whose values are the desired names. **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. Returns ------- renamed : Coordinates Coordinates object with renamed variables """ return cast(Self, self.to_dataset().rename_vars(name_dict, **names).coords) class DatasetCoordinates(Coordinates): """Dictionary like container for Dataset coordinates (variables + indexes). This collection can be passed directly to the :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` constructors via their `coords` argument. This will add both the coordinates variables and their index. """ _data: Dataset __slots__ = ("_data",) def __init__(self, dataset: Dataset): self._data = dataset @property def _names(self) -> set[Hashable]: return self._data._coord_names @property def dims(self) -> Frozen[Hashable, int]: # deliberately display all dims, not just those on coordinate variables - see https://github.com/pydata/xarray/issues/9466 return self._data.dims @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from coordinate names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- Dataset.dtypes """ return Frozen( { n: v.dtype for n, v in self._data._variables.items() if n in self._data._coord_names } ) @property def variables(self) -> Mapping[Hashable, Variable]: return Frozen( {k: v for k, v in self._data.variables.items() if k in self._names} ) def __getitem__(self, key: Hashable) -> DataArray: if key in self._data.data_vars: raise KeyError(key) return self._data[key] def to_dataset(self) -> Dataset: """Convert these coordinates into a new Dataset""" names = [name for name in self._data._variables if name in self._names] return self._data._copy_listed(names) def _update_coords( self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index] ) -> None: variables = self._data._variables.copy() variables.update(coords) # check for inconsistent state *before* modifying anything in-place dims = calculate_dimensions(variables) new_coord_names = set(coords) for dim in dims: if dim in variables: new_coord_names.add(dim) self._data._variables = variables self._data._coord_names.update(new_coord_names) self._data._dims = dims # TODO(shoyer): once ._indexes is always populated by a dict, modify # it to update inplace instead. original_indexes = dict(self._data.xindexes) original_indexes.update(indexes) self._data._indexes = original_indexes def _drop_coords(self, coord_names): # should drop indexed coordinates only for name in coord_names: del self._data._variables[name] del self._data._indexes[name] self._data._coord_names.difference_update(coord_names) def __delitem__(self, key: Hashable) -> None: if key in self: del self._data[key] else: raise KeyError( f"{key!r} is not in coordinate variables {tuple(self.keys())}" ) def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return [ key for key in self._data._ipython_key_completions_() if key not in self._data.data_vars ] class DataTreeCoordinates(Coordinates): """ Dictionary like container for coordinates of a DataTree node (variables + indexes). This collection can be passed directly to the :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` constructors via their `coords` argument. This will add both the coordinates variables and their index. """ # TODO: This only needs to be a separate class from `DatasetCoordinates` because DataTree nodes store their variables differently # internally than how Datasets do, see https://github.com/pydata/xarray/issues/9203. _data: DataTree # type: ignore[assignment] # complaining that DataTree is not a subclass of DataWithCoords - this can be fixed by refactoring, see #9203 __slots__ = ("_data",) def __init__(self, datatree: DataTree): self._data = datatree @property def _names(self) -> set[Hashable]: return set(self._data._coord_variables) @property def dims(self) -> Frozen[Hashable, int]: # deliberately display all dims, not just those on coordinate variables - see https://github.com/pydata/xarray/issues/9466 return Frozen(self._data.dims) @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from coordinate names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- Dataset.dtypes """ return Frozen({n: v.dtype for n, v in self._data._coord_variables.items()}) @property def variables(self) -> Mapping[Hashable, Variable]: return Frozen(self._data._coord_variables) def __getitem__(self, key: Hashable) -> DataArray: if key not in self._data._coord_variables: raise KeyError(key) return self._data.dataset[key] def to_dataset(self) -> Dataset: """Convert these coordinates into a new Dataset""" return self._data.dataset._copy_listed(self._names) def _update_coords( self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index] ) -> None: from xarray.core.datatree import check_alignment # create updated node (`.to_dataset` makes a copy so this doesn't modify in-place) node_ds = self._data.to_dataset(inherit=False) node_ds.coords._update_coords(coords, indexes) # check consistency *before* modifying anything in-place # TODO can we clean up the signature of check_alignment to make this less awkward? if self._data.parent is not None: parent_ds = self._data.parent._to_dataset_view( inherit=True, rebuild_dims=False ) else: parent_ds = None check_alignment(self._data.path, node_ds, parent_ds, self._data.children) # assign updated attributes coord_variables = dict(node_ds.coords.variables) self._data._node_coord_variables = coord_variables self._data._node_dims = node_ds._dims self._data._node_indexes = node_ds._indexes def _drop_coords(self, coord_names): # should drop indexed coordinates only for name in coord_names: del self._data._node_coord_variables[name] del self._data._node_indexes[name] def __delitem__(self, key: Hashable) -> None: if key in self: del self._data[key] # type: ignore[arg-type] # see https://github.com/pydata/xarray/issues/8836 else: raise KeyError(key) def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return [ key for key in self._data._ipython_key_completions_() if key in self._data._coord_variables ] class DataArrayCoordinates(Coordinates, Generic[T_DataArray]): """Dictionary like container for DataArray coordinates (variables + indexes). This collection can be passed directly to the :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` constructors via their `coords` argument. This will add both the coordinates variables and their index. """ _data: T_DataArray __slots__ = ("_data",) def __init__(self, dataarray: T_DataArray) -> None: self._data = dataarray @property def dims(self) -> tuple[Hashable, ...]: return self._data.dims @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from coordinate names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- DataArray.dtype """ return Frozen({n: v.dtype for n, v in self._data._coords.items()}) @property def _names(self) -> set[Hashable]: return set(self._data._coords) def __getitem__(self, key: Hashable) -> T_DataArray: return self._data._getitem_coord(key) def _update_coords( self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index] ) -> None: validate_dataarray_coords( self._data.shape, Coordinates._construct_direct(coords, indexes), self.dims ) self._data._coords = coords self._data._indexes = indexes def _drop_coords(self, coord_names): # should drop indexed coordinates only for name in coord_names: del self._data._coords[name] del self._data._indexes[name] @property def variables(self): return Frozen(self._data._coords) def to_dataset(self) -> Dataset: from xarray.core.dataset import Dataset coords = {k: v.copy(deep=False) for k, v in self._data._coords.items()} indexes = dict(self._data.xindexes) return Dataset._construct_direct(coords, set(coords), indexes=indexes) def __delitem__(self, key: Hashable) -> None: if key not in self: raise KeyError( f"{key!r} is not in coordinate variables {tuple(self.keys())}" ) assert_no_index_corrupted(self._data.xindexes, {key}) del self._data._coords[key] if key in self._data._indexes: del self._data._indexes[key] def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return self._data._ipython_key_completions_() def drop_indexed_coords( coords_to_drop: set[Hashable], coords: Coordinates ) -> Coordinates: """Drop indexed coordinates associated with coordinates in coords_to_drop. This will raise an error in case it corrupts any passed index and its coordinate variables. """ new_variables = dict(coords.variables) new_indexes = dict(coords.xindexes) for idx, idx_coords in coords.xindexes.group_by_index(): idx_drop_coords = set(idx_coords) & coords_to_drop # special case for pandas multi-index: still allow but deprecate # dropping only its dimension coordinate. # TODO: remove when removing PandasMultiIndex's dimension coordinate. if isinstance(idx, PandasMultiIndex) and idx_drop_coords == {idx.dim}: idx_drop_coords.update(idx.index.names) emit_user_level_warning( f"updating coordinate {idx.dim!r}, which is a PandasMultiIndex, would leave " f"the multi-index level coordinates {list(idx.index.names)!r} in an inconsistent state. " f"This will raise an error in the future. Use `.drop_vars({list(idx_coords)!r})` " "to drop the coordinates' values before assigning new coordinate values.", FutureWarning, ) elif idx_drop_coords and len(idx_drop_coords) != len(idx_coords): idx_drop_coords_str = ", ".join(f"{k!r}" for k in idx_drop_coords) idx_coords_str = ", ".join(f"{k!r}" for k in idx_coords) raise ValueError( f"cannot drop or update coordinate(s) {idx_drop_coords_str}, which would corrupt " f"the following index built from coordinates {idx_coords_str}:\n" f"{idx}" ) for k in idx_drop_coords: del new_variables[k] del new_indexes[k] return Coordinates._construct_direct(coords=new_variables, indexes=new_indexes) def assert_coordinate_consistent(obj: T_Xarray, coords: Mapping[Any, Variable]) -> None: """Make sure the dimension coordinate of obj is consistent with coords. obj: DataArray or Dataset coords: Dict-like of variables """ for k in obj.dims: # make sure there are no conflict in dimension coordinates if k in coords and k in obj.coords and not coords[k].equals(obj[k].variable): raise IndexError( f"dimension coordinate {k!r} conflicts between " f"indexed and indexing objects:\n{obj[k]}\nvs.\n{coords[k]}" ) def create_coords_with_default_indexes( coords: Mapping[Any, Any], data_vars: DataVars | None = None ) -> Coordinates: """Returns a Coordinates object from a mapping of coordinates (arbitrary objects). Create default (pandas) indexes for each of the input dimension coordinates. Extract coordinates from each input DataArray. """ # Note: data_vars is needed here only because a pd.MultiIndex object # can be promoted as coordinates. # TODO: It won't be relevant anymore when this behavior will be dropped # in favor of the more explicit ``Coordinates.from_pandas_multiindex()``. from xarray.core.dataarray import DataArray all_variables = dict(coords) if data_vars is not None: all_variables.update(data_vars) indexes: dict[Hashable, Index] = {} variables: dict[Hashable, Variable] = {} # promote any pandas multi-index in data_vars as coordinates coords_promoted: dict[Hashable, Any] = {} pd_mindex_keys: list[Hashable] = [] for k, v in all_variables.items(): if isinstance(v, pd.MultiIndex): coords_promoted[k] = v pd_mindex_keys.append(k) elif k in coords: coords_promoted[k] = v if pd_mindex_keys: pd_mindex_keys_fmt = ",".join([f"'{k}'" for k in pd_mindex_keys]) emit_user_level_warning( f"the `pandas.MultiIndex` object(s) passed as {pd_mindex_keys_fmt} coordinate(s) or " "data variable(s) will no longer be implicitly promoted and wrapped into " "multiple indexed coordinates in the future " "(i.e., one coordinate for each multi-index level + one dimension coordinate). " "If you want to keep this behavior, you need to first wrap it explicitly using " "`mindex_coords = xarray.Coordinates.from_pandas_multiindex(mindex_obj, 'dim')` " "and pass it as coordinates, e.g., `xarray.Dataset(coords=mindex_coords)`, " "`dataset.assign_coords(mindex_coords)` or `dataarray.assign_coords(mindex_coords)`.", FutureWarning, ) dataarray_coords: list[DataArrayCoordinates] = [] for name, obj in coords_promoted.items(): if isinstance(obj, DataArray): dataarray_coords.append(obj.coords) variable = as_variable(obj, name=name, auto_convert=False) if variable.dims == (name,): # still needed to convert to IndexVariable first due to some # pandas multi-index edge cases. variable = variable.to_index_variable() idx, idx_vars = create_default_index_implicit(variable, all_variables) indexes.update(dict.fromkeys(idx_vars, idx)) variables.update(idx_vars) all_variables.update(idx_vars) else: variables[name] = variable.to_base_variable() new_coords = Coordinates._construct_direct(coords=variables, indexes=indexes) # extract and merge coordinates and indexes from input DataArrays if dataarray_coords: prioritized = {k: (v, indexes.get(k)) for k, v in variables.items()} variables, indexes = merge_coordinates_without_align( dataarray_coords + [new_coords], prioritized=prioritized, ) new_coords = Coordinates._construct_direct(coords=variables, indexes=indexes) return new_coords class CoordinateValidationError(ValueError): """Error class for Xarray coordinate validation failures.""" def validate_dataarray_coords( shape: tuple[int, ...], coords: Coordinates | Mapping[Hashable, Variable], dim: tuple[Hashable, ...], ): """Validate coordinates ``coords`` to include in a DataArray defined by ``shape`` and dimensions ``dim``. If a coordinate is associated with an index, the validation is performed by the index. By default the coordinate dimensions must match (a subset of) the array dimensions (in any order) to conform to the DataArray model. The index may override this behavior with other validation rules, though. Non-index coordinates must all conform to the DataArray model. Scalar coordinates are always valid. """ sizes = dict(zip(dim, shape, strict=True)) dim_set = set(dim) indexes: Mapping[Hashable, Index] if isinstance(coords, Coordinates): indexes = coords.xindexes else: indexes = {} for k, v in coords.items(): if k in indexes: invalid = not indexes[k].should_add_coord_to_array(k, v, dim_set) else: invalid = any(d not in dim for d in v.dims) if invalid: raise CoordinateValidationError( f"coordinate {k} has dimensions {v.dims}, but these " "are not a subset of the DataArray " f"dimensions {dim}" ) for d, s in v.sizes.items(): if d in sizes and s != sizes[d]: raise CoordinateValidationError( f"conflicting sizes for dimension {d!r}: " f"length {sizes[d]} on the data but length {s} on " f"coordinate {k!r}" ) def coordinates_from_variable(variable: Variable) -> Coordinates: (name,) = variable.dims new_index, index_vars = create_default_index_implicit(variable) indexes = dict.fromkeys(index_vars, new_index) new_vars = new_index.create_variables() new_vars[name].attrs = variable.attrs return Coordinates(new_vars, indexes) xarray-2025.12.0/xarray/core/dataarray.py000066400000000000000000010732311511464676000201320ustar00rootroot00000000000000from __future__ import annotations import copy import datetime import warnings from collections.abc import ( Callable, Hashable, Iterable, Mapping, MutableMapping, Sequence, ) from functools import partial from os import PathLike from types import EllipsisType from typing import TYPE_CHECKING, Any, Generic, Literal, NoReturn, TypeVar, overload import numpy as np import pandas as pd from xarray.coding.calendar_ops import convert_calendar, interp_calendar from xarray.coding.cftimeindex import CFTimeIndex from xarray.computation import computation, ops from xarray.computation.arithmetic import DataArrayArithmetic from xarray.core import dtypes, indexing, utils from xarray.core._aggregations import DataArrayAggregations from xarray.core.accessor_dt import CombinedDatetimelikeAccessor from xarray.core.accessor_str import StringAccessor from xarray.core.common import AbstractArray, DataWithCoords, get_chunksizes from xarray.core.coordinates import ( Coordinates, DataArrayCoordinates, assert_coordinate_consistent, create_coords_with_default_indexes, validate_dataarray_coords, ) from xarray.core.dataset import Dataset from xarray.core.extension_array import PandasExtensionArray from xarray.core.formatting import format_item from xarray.core.indexes import ( Index, Indexes, PandasMultiIndex, filter_indexes_from_coords, isel_indexes, ) from xarray.core.indexing import is_fancy_indexer, map_index_queries from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import ( Bins, DaCompatible, NetcdfWriteModes, T_Chunks, T_DataArray, T_DataArrayOrSet, ZarrWriteModes, ) from xarray.core.utils import ( Default, FilteredMapping, ReprObject, _default, either_dict_or_kwargs, hashable, infix_dims, result_name, ) from xarray.core.variable import ( IndexVariable, Variable, as_compatible_data, as_variable, ) from xarray.plot.accessor import DataArrayPlotAccessor from xarray.plot.utils import _get_units_from_attrs from xarray.structure import alignment from xarray.structure.alignment import ( _broadcast_helper, _get_broadcast_dims_map_common_coords, align, ) from xarray.structure.chunks import unify_chunks from xarray.structure.merge import PANDAS_TYPES, MergeError from xarray.util.deprecation_helpers import _deprecate_positional_args, deprecate_dims if TYPE_CHECKING: from dask.dataframe import DataFrame as DaskDataFrame from dask.delayed import Delayed from iris.cube import Cube as iris_Cube from numpy.typing import ArrayLike from xarray.backends import ZarrStore from xarray.backends.api import T_NetcdfEngine, T_NetcdfTypes from xarray.computation.rolling import DataArrayCoarsen, DataArrayRolling from xarray.computation.weighted import DataArrayWeighted from xarray.core.groupby import DataArrayGroupBy from xarray.core.resample import DataArrayResample from xarray.core.types import ( CoarsenBoundaryOptions, DatetimeLike, DatetimeUnitOptions, Dims, ErrorOptions, ErrorOptionsWithWarn, GroupIndices, GroupInput, InterpOptions, PadModeOptions, PadReflectOptions, QuantileMethods, QueryEngineOptions, QueryParserOptions, ReindexMethodOptions, ResampleCompatible, Self, SideOptions, T_ChunkDimFreq, T_ChunksFreq, T_Xarray, ZarrStoreLike, ) from xarray.groupers import Grouper, Resampler from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint T_XarrayOther = TypeVar("T_XarrayOther", bound="DataArray" | Dataset) def _infer_coords_and_dims( shape: tuple[int, ...], coords: ( Sequence[Sequence | pd.Index | DataArray | Variable | np.ndarray] | Mapping | None ), dims: str | Iterable[Hashable] | None, ) -> tuple[Mapping[Hashable, Any], tuple[Hashable, ...]]: """All the logic for creating a new DataArray""" if ( coords is not None and not utils.is_dict_like(coords) and len(coords) != len(shape) ): raise ValueError( f"coords is not dict-like, but it has {len(coords)} items, " f"which does not match the {len(shape)} dimensions of the " "data" ) if isinstance(dims, str): dims = (dims,) elif dims is None: dims = [f"dim_{n}" for n in range(len(shape))] if coords is not None and len(coords) == len(shape): # try to infer dimensions from coords if utils.is_dict_like(coords): dims = list(coords.keys()) else: for n, (dim, coord) in enumerate(zip(dims, coords, strict=True)): coord = as_variable( coord, name=dim, auto_convert=False ).to_index_variable() dims[n] = coord.name dims_tuple = tuple(dims) if len(dims_tuple) != len(shape): raise ValueError( "different number of dimensions on data " f"and dims: {len(shape)} vs {len(dims_tuple)}" ) for d in dims_tuple: if not hashable(d): raise TypeError(f"Dimension {d} is not hashable") new_coords: Mapping[Hashable, Any] if isinstance(coords, Coordinates): new_coords = coords else: new_coords = {} if utils.is_dict_like(coords): for k, v in coords.items(): new_coords[k] = as_variable(v, name=k, auto_convert=False) if new_coords[k].dims == (k,): new_coords[k] = new_coords[k].to_index_variable() elif coords is not None: for dim, coord in zip(dims_tuple, coords, strict=True): var = as_variable(coord, name=dim, auto_convert=False) var.dims = (dim,) new_coords[dim] = var.to_index_variable() validate_dataarray_coords(shape, new_coords, dims_tuple) return new_coords, dims_tuple def _check_data_shape( data: Any, coords: ( Sequence[Sequence | pd.Index | DataArray | Variable | np.ndarray] | Mapping | None ), dims: str | Iterable[Hashable] | None, ) -> Any: if data is dtypes.NA: data = np.nan if coords is not None and utils.is_scalar(data, include_0d=False): if utils.is_dict_like(coords): if dims is None: return data else: data_shape = tuple( ( as_variable(coords[k], k, auto_convert=False).size if k in coords.keys() else 1 ) for k in dims ) else: data_shape = tuple( as_variable(coord, "foo", auto_convert=False).size for coord in coords ) data = np.full(data_shape, data) return data class _LocIndexer(Generic[T_DataArray]): __slots__ = ("data_array",) def __init__(self, data_array: T_DataArray): self.data_array = data_array def __getitem__(self, key) -> T_DataArray: if not utils.is_dict_like(key): # expand the indexer so we can handle Ellipsis labels = indexing.expanded_indexer(key, self.data_array.ndim) key = dict(zip(self.data_array.dims, labels, strict=True)) return self.data_array.sel(key) def __setitem__(self, key, value) -> None: if not utils.is_dict_like(key): # expand the indexer so we can handle Ellipsis labels = indexing.expanded_indexer(key, self.data_array.ndim) key = dict(zip(self.data_array.dims, labels, strict=True)) dim_indexers = map_index_queries(self.data_array, key).dim_indexers self.data_array[dim_indexers] = value # Used as the key corresponding to a DataArray's variable when converting # arbitrary DataArray objects to datasets _THIS_ARRAY = ReprObject("") class DataArray( AbstractArray, DataWithCoords, DataArrayArithmetic, DataArrayAggregations, ): """N-dimensional array with labeled coordinates and dimensions. DataArray provides a wrapper around numpy ndarrays that uses labeled dimensions and coordinates to support metadata aware operations. The API is similar to that for the pandas Series or DataFrame, but DataArray objects can have any number of dimensions, and their contents have fixed data types. Additional features over raw numpy arrays: - Apply operations over dimensions by name: ``x.sum('time')``. - Select or assign values by integer location (like numpy): ``x[:10]`` or by label (like pandas): ``x.loc['2014-01-01']`` or ``x.sel(time='2014-01-01')``. - Mathematical operations (e.g., ``x - y``) vectorize across multiple dimensions (known in numpy as "broadcasting") based on dimension names, regardless of their original order. - Keep track of arbitrary metadata in the form of a Python dictionary: ``x.attrs`` - Convert to a pandas Series: ``x.to_series()``. Getting items from or doing mathematical operations with a DataArray always returns another DataArray. Parameters ---------- data : array_like Values for this array. Must be an ``numpy.ndarray``, ndarray like, or castable to an ``ndarray``. If a self-described xarray or pandas object, attempts are made to use this array's metadata to fill in other unspecified arguments. A view of the array's data is used instead of a copy if possible. coords : sequence or dict of array_like or :py:class:`~xarray.Coordinates`, optional Coordinates (tick labels) to use for indexing along each dimension. The following notations are accepted: - mapping {dimension name: array-like} - sequence of tuples that are valid arguments for ``xarray.Variable()`` - (dims, data) - (dims, data, attrs) - (dims, data, attrs, encoding) Additionally, it is possible to define a coord whose name does not match the dimension name, or a coord based on multiple dimensions, with one of the following notations: - mapping {coord name: DataArray} - mapping {coord name: Variable} - mapping {coord name: (dimension name, array-like)} - mapping {coord name: (tuple of dimension names, array-like)} Alternatively, a :py:class:`~xarray.Coordinates` object may be used in order to explicitly pass indexes (e.g., a multi-index or any custom Xarray index) or to bypass the creation of a default index for any :term:`Dimension coordinate` included in that object. dims : Hashable or sequence of Hashable, optional Name(s) of the data dimension(s). Must be either a Hashable (only for 1D data) or a sequence of Hashables with length equal to the number of dimensions. If this argument is omitted, dimension names are taken from ``coords`` (if possible) and otherwise default to ``['dim_0', ... 'dim_n']``. name : str or None, optional Name of this array. attrs : dict_like or None, optional Attributes to assign to the new instance. By default, an empty attribute dictionary is initialized. (see FAQ, :ref:`approach to metadata`) indexes : :py:class:`~xarray.Indexes` or dict-like, optional For internal use only. For passing indexes objects to the new DataArray, use the ``coords`` argument instead with a :py:class:`~xarray.Coordinate` object (both coordinate variables and indexes will be extracted from the latter). Examples -------- Create data: >>> np.random.seed(0) >>> temperature = 15 + 8 * np.random.randn(2, 2, 3) >>> lon = [[-99.83, -99.32], [-99.79, -99.23]] >>> lat = [[42.25, 42.21], [42.63, 42.59]] >>> time = pd.date_range("2014-09-06", periods=3) >>> reference_time = pd.Timestamp("2014-09-05") Initialize a dataarray with multiple dimensions: >>> da = xr.DataArray( ... data=temperature, ... dims=["x", "y", "time"], ... coords=dict( ... lon=(["x", "y"], lon), ... lat=(["x", "y"], lat), ... time=time, ... reference_time=reference_time, ... ), ... attrs=dict( ... description="Ambient temperature.", ... units="degC", ... ), ... ) >>> da Size: 96B array([[[29.11241877, 18.20125767, 22.82990387], [32.92714559, 29.94046392, 7.18177696]], [[22.60070734, 13.78914233, 14.17424919], [18.28478802, 16.15234857, 26.63418806]]]) Coordinates: * time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08 lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Attributes: description: Ambient temperature. units: degC Find out where the coldest temperature was: >>> da.isel(da.argmin(...)) Size: 8B array(7.18177696) Coordinates: lon float64 8B -99.32 lat float64 8B 42.21 time datetime64[ns] 8B 2014-09-08 reference_time datetime64[ns] 8B 2014-09-05 Attributes: description: Ambient temperature. units: degC """ _cache: dict[str, Any] _coords: dict[Any, Variable] _close: Callable[[], None] | None _indexes: dict[Hashable, Index] _name: Hashable | None _variable: Variable __slots__ = ( "__weakref__", "_cache", "_close", "_coords", "_indexes", "_name", "_variable", ) dt = utils.UncachedAccessor(CombinedDatetimelikeAccessor["DataArray"]) def __init__( self, data: Any = dtypes.NA, coords: ( Sequence[Sequence | pd.Index | DataArray | Variable | np.ndarray] | Mapping | None ) = None, dims: str | Iterable[Hashable] | None = None, name: Hashable | None = None, attrs: Mapping | None = None, # internal parameters indexes: Mapping[Hashable, Index] | None = None, fastpath: bool = False, ) -> None: if fastpath: variable = data assert dims is None assert attrs is None assert indexes is not None else: if indexes is not None: raise ValueError( "Explicitly passing indexes via the `indexes` argument is not supported " "when `fastpath=False`. Use the `coords` argument instead." ) # try to fill in arguments from data if they weren't supplied if coords is None: if isinstance(data, DataArray): coords = data.coords elif isinstance(data, pd.Series): coords = [data.index] elif isinstance(data, pd.DataFrame): coords = [data.index, data.columns] elif isinstance(data, pd.Index | IndexVariable): coords = [data] if dims is None: dims = getattr(data, "dims", getattr(coords, "dims", None)) if name is None: name = getattr(data, "name", None) if attrs is None and not isinstance(data, PANDAS_TYPES): attrs = getattr(data, "attrs", None) data = _check_data_shape(data, coords, dims) data = as_compatible_data(data) coords, dims = _infer_coords_and_dims(data.shape, coords, dims) variable = Variable(dims, data, attrs, fastpath=True) if not isinstance(coords, Coordinates): coords = create_coords_with_default_indexes(coords) indexes = dict(coords.xindexes) coords = {k: v.copy() for k, v in coords.variables.items()} # These fully describe a DataArray self._variable = variable assert isinstance(coords, dict) self._coords = coords self._name = name self._indexes = dict(indexes) self._close = None @classmethod def _construct_direct( cls, variable: Variable, coords: dict[Any, Variable], name: Hashable, indexes: dict[Hashable, Index], ) -> Self: """Shortcut around __init__ for internal use when we want to skip costly validation """ obj = object.__new__(cls) obj._variable = variable obj._coords = coords obj._name = name obj._indexes = indexes obj._close = None return obj def _replace( self, variable: Variable | None = None, coords=None, name: Hashable | Default | None = _default, attrs=_default, indexes=None, ) -> Self: if variable is None: variable = self.variable if coords is None: coords = self._coords if indexes is None: indexes = self._indexes if name is _default: name = self.name if attrs is _default: attrs = copy.copy(self.attrs) else: variable = variable.copy() variable.attrs = attrs return type(self)(variable, coords, name=name, indexes=indexes, fastpath=True) def _replace_maybe_drop_dims( self, variable: Variable, name: Hashable | Default | None = _default, ) -> Self: if self.sizes == variable.sizes: coords = self._coords.copy() indexes = self._indexes elif set(self.dims) == set(variable.dims): # Shape has changed (e.g. from reduce(..., keepdims=True) new_sizes = dict(zip(self.dims, variable.shape, strict=True)) coords = { k: v for k, v in self._coords.items() if v.shape == tuple(new_sizes[d] for d in v.dims) } indexes = filter_indexes_from_coords(self._indexes, set(coords)) else: allowed_dims = set(variable.dims) coords = { k: v for k, v in self._coords.items() if set(v.dims) <= allowed_dims } indexes = filter_indexes_from_coords(self._indexes, set(coords)) return self._replace(variable, coords, name, indexes=indexes) def _overwrite_indexes( self, indexes: Mapping[Any, Index], variables: Mapping[Any, Variable] | None = None, drop_coords: list[Hashable] | None = None, rename_dims: Mapping[Any, Any] | None = None, ) -> Self: """Maybe replace indexes and their corresponding coordinates.""" if not indexes: return self if variables is None: variables = {} if drop_coords is None: drop_coords = [] new_variable = self.variable.copy() new_coords = self._coords.copy() new_indexes = dict(self._indexes) for name in indexes: new_coords[name] = variables[name] new_indexes[name] = indexes[name] for name in drop_coords: new_coords.pop(name) new_indexes.pop(name) if rename_dims: new_variable.dims = tuple(rename_dims.get(d, d) for d in new_variable.dims) return self._replace( variable=new_variable, coords=new_coords, indexes=new_indexes ) def _to_temp_dataset(self) -> Dataset: return self._to_dataset_whole(name=_THIS_ARRAY, shallow_copy=False) def _from_temp_dataset( self, dataset: Dataset, name: Hashable | Default | None = _default ) -> Self: variable = dataset._variables.pop(_THIS_ARRAY) coords = dataset._variables indexes = dataset._indexes return self._replace(variable, coords, name, indexes=indexes) def _to_dataset_split(self, dim: Hashable) -> Dataset: """splits dataarray along dimension 'dim'""" def subset(dim, label): array = self.loc[{dim: label}] array.attrs = {} return as_variable(array) variables_from_split = { label: subset(dim, label) for label in self.get_index(dim) } coord_names = set(self._coords) - {dim} ambiguous_vars = set(variables_from_split) & coord_names if ambiguous_vars: rename_msg_fmt = ", ".join([f"{v}=..." for v in sorted(ambiguous_vars)]) raise ValueError( f"Splitting along the dimension {dim!r} would produce the variables " f"{tuple(sorted(ambiguous_vars))} which are also existing coordinate " f"variables. Use DataArray.rename({rename_msg_fmt}) or " f"DataArray.assign_coords({dim}=...) to resolve this ambiguity." ) variables = variables_from_split | { k: v for k, v in self._coords.items() if k != dim } indexes = filter_indexes_from_coords(self._indexes, coord_names) dataset = Dataset._construct_direct( variables, coord_names, indexes=indexes, attrs=self.attrs ) return dataset def _to_dataset_whole( self, name: Hashable = None, shallow_copy: bool = True ) -> Dataset: if name is None: name = self.name if name is None: raise ValueError( "unable to convert unnamed DataArray to a " "Dataset without providing an explicit name" ) if name in self.coords: raise ValueError( "cannot create a Dataset from a DataArray with " "the same name as one of its coordinates" ) # use private APIs for speed: this is called by _to_temp_dataset(), # which is used in the guts of a lot of operations (e.g., reindex) variables = self._coords.copy() variables[name] = self.variable if shallow_copy: for k in variables: variables[k] = variables[k].copy(deep=False) indexes = self._indexes coord_names = set(self._coords) return Dataset._construct_direct(variables, coord_names, indexes=indexes) def to_dataset( self, dim: Hashable = None, *, name: Hashable = None, promote_attrs: bool = False, ) -> Dataset: """Convert a DataArray to a Dataset. Parameters ---------- dim : Hashable, optional Name of the dimension on this array along which to split this array into separate variables. If not provided, this array is converted into a Dataset of one variable. name : Hashable, optional Name to substitute for this array's name. Only valid if ``dim`` is not provided. promote_attrs : bool, default: False Set to True to shallow copy attrs of DataArray to returned Dataset. Returns ------- dataset : Dataset """ if dim is not None and dim not in self.dims: raise TypeError( f"{dim} is not a dim. If supplying a ``name``, pass as a kwarg." ) if dim is not None: if name is not None: raise TypeError("cannot supply both dim and name arguments") result = self._to_dataset_split(dim) else: result = self._to_dataset_whole(name) if promote_attrs: result.attrs = dict(self.attrs) return result @property def name(self) -> Hashable | None: """The name of this array.""" return self._name @name.setter def name(self, value: Hashable | None) -> None: self._name = value @property def variable(self) -> Variable: """Low level interface to the Variable object for this DataArray.""" return self._variable @property def dtype(self) -> np.dtype: """ Data-type of the arrayโ€™s elements. See Also -------- ndarray.dtype numpy.dtype """ return self.variable.dtype @property def shape(self) -> tuple[int, ...]: """ Tuple of array dimensions. See Also -------- numpy.ndarray.shape """ return self.variable.shape @property def size(self) -> int: """ Number of elements in the array. Equal to ``np.prod(a.shape)``, i.e., the product of the arrayโ€™s dimensions. See Also -------- numpy.ndarray.size """ return self.variable.size @property def nbytes(self) -> int: """ Total bytes consumed by the elements of this DataArray's data. If the underlying data array does not include ``nbytes``, estimates the bytes consumed based on the ``size`` and ``dtype``. """ return self.variable.nbytes @property def ndim(self) -> int: """ Number of array dimensions. See Also -------- numpy.ndarray.ndim """ return self.variable.ndim def __len__(self) -> int: return len(self.variable) @property def data(self) -> Any: """ The DataArray's data as an array. The underlying array type (e.g. dask, sparse, pint) is preserved. See Also -------- DataArray.to_numpy DataArray.as_numpy DataArray.values """ return self.variable.data @data.setter def data(self, value: Any) -> None: self.variable.data = value @property def values(self) -> np.ndarray: """ The array's data converted to numpy.ndarray. This will attempt to convert the array naively using np.array(), which will raise an error if the array type does not support coercion like this (e.g. cupy). Note that this array is not copied; operations on it follow numpy's rules of what generates a view vs. a copy, and changes to this array may be reflected in the DataArray as well. """ return self.variable.values @values.setter def values(self, value: Any) -> None: self.variable.values = value def to_numpy(self) -> np.ndarray: """ Coerces wrapped data to numpy and returns a numpy.ndarray. See Also -------- DataArray.as_numpy : Same but returns the surrounding DataArray instead. Dataset.as_numpy DataArray.values DataArray.data """ return self.variable.to_numpy() def as_numpy(self) -> Self: """ Coerces wrapped data and coordinates into numpy arrays, returning a DataArray. See Also -------- DataArray.to_numpy : Same but returns only the data as a numpy.ndarray object. Dataset.as_numpy : Converts all variables in a Dataset. DataArray.values DataArray.data """ coords = {k: v.as_numpy() for k, v in self._coords.items()} return self._replace(self.variable.as_numpy(), coords, indexes=self._indexes) @property def _in_memory(self) -> bool: return self.variable._in_memory def _to_index(self) -> pd.Index: return self.variable._to_index() def to_index(self) -> pd.Index: """Convert this variable to a pandas.Index. Only possible for 1D arrays. """ return self.variable.to_index() @property def dims(self) -> tuple[Hashable, ...]: """Tuple of dimension names associated with this array. Note that the type of this property is inconsistent with `Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for consistently named properties. See Also -------- DataArray.sizes Dataset.dims """ return self.variable.dims @dims.setter def dims(self, value: Any) -> NoReturn: raise AttributeError( "you cannot assign dims on a DataArray. Use " ".rename() or .swap_dims() instead." ) def _item_key_to_dict(self, key: Any) -> Mapping[Hashable, Any]: if utils.is_dict_like(key): return key key = indexing.expanded_indexer(key, self.ndim) return dict(zip(self.dims, key, strict=True)) def _getitem_coord(self, key: Any) -> Self: from xarray.core.dataset_utils import _get_virtual_variable try: var = self._coords[key] except KeyError: dim_sizes = dict(zip(self.dims, self.shape, strict=True)) _, key, var = _get_virtual_variable(self._coords, key, dim_sizes) return self._replace_maybe_drop_dims(var, name=key) def __getitem__(self, key: Any) -> Self: if isinstance(key, str): return self._getitem_coord(key) else: # xarray-style array indexing return self.isel(indexers=self._item_key_to_dict(key)) def __setitem__(self, key: Any, value: Any) -> None: if isinstance(key, str): self.coords[key] = value else: # Coordinates in key, value and self[key] should be consistent. # TODO Coordinate consistency in key is checked here, but it # causes unnecessary indexing. It should be optimized. obj = self[key] if isinstance(value, DataArray): assert_coordinate_consistent(value, obj.coords.variables) value = value.variable # DataArray key -> Variable key key = { k: v.variable if isinstance(v, DataArray) else v for k, v in self._item_key_to_dict(key).items() } self.variable[key] = value def __delitem__(self, key: Any) -> None: del self.coords[key] @property def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for attribute-style access""" yield from self._item_sources yield self.attrs @property def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for key-completion""" yield FilteredMapping(keys=self._coords, mapping=self.coords) # virtual coordinates yield FilteredMapping(keys=self.dims, mapping=self.coords) def __contains__(self, key: Any) -> bool: return key in self.data @property def loc(self) -> _LocIndexer: """Attribute for location based indexing like pandas.""" return _LocIndexer(self) @property def attrs(self) -> dict[Any, Any]: """Dictionary storing arbitrary metadata with this array.""" return self.variable.attrs @attrs.setter def attrs(self, value: Mapping[Any, Any]) -> None: self.variable.attrs = dict(value) @property def encoding(self) -> dict[Any, Any]: """Dictionary of format-specific settings for how this array should be serialized.""" return self.variable.encoding @encoding.setter def encoding(self, value: Mapping[Any, Any]) -> None: self.variable.encoding = dict(value) def reset_encoding(self) -> Self: warnings.warn( "reset_encoding is deprecated since 2023.11, use `drop_encoding` instead", stacklevel=2, ) return self.drop_encoding() def drop_encoding(self) -> Self: """Return a new DataArray without encoding on the array or any attached coords.""" ds = self._to_temp_dataset().drop_encoding() return self._from_temp_dataset(ds) @property def indexes(self) -> Indexes: """Mapping of pandas.Index objects used for label based indexing. Raises an error if this Dataset has indexes that cannot be coerced to pandas.Index objects. See Also -------- DataArray.xindexes """ return self.xindexes.to_pandas_indexes() @property def xindexes(self) -> Indexes[Index]: """Mapping of :py:class:`~xarray.indexes.Index` objects used for label based indexing. """ return Indexes(self._indexes, {k: self._coords[k] for k in self._indexes}) @property def coords(self) -> DataArrayCoordinates: """Mapping of :py:class:`~xarray.DataArray` objects corresponding to coordinate variables. See Also -------- Coordinates """ return DataArrayCoordinates(self) @overload def reset_coords( self, names: Dims = None, *, drop: Literal[False] = False, ) -> Dataset: ... @overload def reset_coords( self, names: Dims = None, *, drop: Literal[True], ) -> Self: ... def reset_coords( self, names: Dims = None, *, drop: bool = False, ) -> Self | Dataset: """Given names of coordinates, reset them to become variables. Parameters ---------- names : str, Iterable of Hashable or None, optional Name(s) of non-index coordinates in this dataset to reset into variables. By default, all non-index coordinates are reset. drop : bool, default: False If True, remove coordinates instead of converting them into variables. Returns ------- Dataset, or DataArray if ``drop == True`` Examples -------- >>> temperature = np.arange(25).reshape(5, 5) >>> pressure = np.arange(50, 75).reshape(5, 5) >>> da = xr.DataArray( ... data=temperature, ... dims=["x", "y"], ... coords=dict( ... lon=("x", np.arange(10, 15)), ... lat=("y", np.arange(20, 25)), ... Pressure=(["x", "y"], pressure), ... ), ... name="Temperature", ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: lon (x) int64 40B 10 11 12 13 14 lat (y) int64 40B 20 21 22 23 24 Pressure (x, y) int64 200B 50 51 52 53 54 55 56 57 ... 68 69 70 71 72 73 74 Dimensions without coordinates: x, y Return Dataset with target coordinate as a data variable rather than a coordinate variable: >>> da.reset_coords(names="Pressure") Size: 480B Dimensions: (x: 5, y: 5) Coordinates: lon (x) int64 40B 10 11 12 13 14 lat (y) int64 40B 20 21 22 23 24 Dimensions without coordinates: x, y Data variables: Pressure (x, y) int64 200B 50 51 52 53 54 55 56 ... 68 69 70 71 72 73 74 Temperature (x, y) int64 200B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 Return DataArray without targeted coordinate: >>> da.reset_coords(names="Pressure", drop=True) Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: lon (x) int64 40B 10 11 12 13 14 lat (y) int64 40B 20 21 22 23 24 Dimensions without coordinates: x, y """ if names is None: names = set(self.coords) - set(self._indexes) dataset = self.coords.to_dataset().reset_coords(names, drop) if drop: return self._replace(coords=dataset._variables) if self.name is None: raise ValueError( "cannot reset_coords with drop=False on an unnamed DataArray" ) dataset[self.name] = self.variable return dataset def __dask_tokenize__(self) -> object: from dask.base import normalize_token return normalize_token((type(self), self._variable, self._coords, self._name)) def __dask_graph__(self): return self._to_temp_dataset().__dask_graph__() def __dask_keys__(self): return self._to_temp_dataset().__dask_keys__() def __dask_layers__(self): return self._to_temp_dataset().__dask_layers__() @property def __dask_optimize__(self): return self._to_temp_dataset().__dask_optimize__ @property def __dask_scheduler__(self): return self._to_temp_dataset().__dask_scheduler__ def __dask_postcompute__(self): func, args = self._to_temp_dataset().__dask_postcompute__() return self._dask_finalize, (self.name, func) + args def __dask_postpersist__(self): func, args = self._to_temp_dataset().__dask_postpersist__() return self._dask_finalize, (self.name, func) + args @classmethod def _dask_finalize(cls, results, name, func, *args, **kwargs) -> Self: ds = func(results, *args, **kwargs) variable = ds._variables.pop(_THIS_ARRAY) coords = ds._variables indexes = ds._indexes return cls(variable, coords, name=name, indexes=indexes, fastpath=True) def load(self, **kwargs) -> Self: """Trigger loading data into memory and return this dataarray. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original dataarray is modified and returned. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : DataArray Same object but with lazy data and coordinates as in-memory arrays. See Also -------- dask.compute DataArray.load_async DataArray.compute Dataset.load Variable.load """ ds = self._to_temp_dataset().load(**kwargs) new = self._from_temp_dataset(ds) self._variable = new._variable self._coords = new._coords return self async def load_async(self, **kwargs) -> Self: """Trigger and await asynchronous loading of data into memory and return this dataarray. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original dataarray is modified and returned. Only works when opening data lazily from IO storage backends which support lazy asynchronous loading. Otherwise will raise a NotImplementedError. Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : Dataarray Same object but with lazy data and coordinates as in-memory arrays. See Also -------- dask.compute DataArray.compute DataArray.load Dataset.load_async Variable.load_async """ temp_ds = self._to_temp_dataset() ds = await temp_ds.load_async(**kwargs) new = self._from_temp_dataset(ds) self._variable = new._variable self._coords = new._coords return self def compute(self, **kwargs) -> Self: """Trigger loading data into memory and return a new dataarray. Data will be computed and/or loaded from disk or a remote source. Unlike ``.load``, the original dataarray is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : DataArray New object with the data and all coordinates as in-memory arrays. See Also -------- dask.compute DataArray.load DataArray.load_async Dataset.compute Variable.compute """ new = self.copy(deep=False) return new.load(**kwargs) def persist(self, **kwargs) -> Self: """Trigger computation in constituent dask arrays This keeps them as dask arrays but encourages them to keep data in memory. This is particularly useful when on a distributed machine. When on a single machine consider using ``.compute()`` instead. Like compute (but unlike load), the original dataset is left unaltered. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.persist``. Returns ------- object : DataArray New object with all dask-backed data and coordinates as persisted dask arrays. See Also -------- dask.persist """ ds = self._to_temp_dataset().persist(**kwargs) return self._from_temp_dataset(ds) def copy(self, deep: bool = True, data: Any = None) -> Self: """Returns a copy of this array. If `deep=True`, a deep copy is made of the data array. Otherwise, a shallow copy is made, and the returned data array's values are a new view of this data array's values. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Whether the data array and its coordinates are loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. When `data` is used, `deep` is ignored for all data variables, and only used for coords. Returns ------- copy : DataArray New object with dimensions, attributes, coordinates, name, encoding, and optionally data copied from original. Examples -------- Shallow versus deep copy >>> array = xr.DataArray([1, 2, 3], dims="x", coords={"x": ["a", "b", "c"]}) >>> array.copy() Size: 24B array([1, 2, 3]) Coordinates: * x (x) >> array_0 = array.copy(deep=False) >>> array_0[0] = 7 >>> array_0 Size: 24B array([7, 2, 3]) Coordinates: * x (x) >> array Size: 24B array([7, 2, 3]) Coordinates: * x (x) >> array.copy(data=[0.1, 0.2, 0.3]) Size: 24B array([0.1, 0.2, 0.3]) Coordinates: * x (x) >> array Size: 24B array([7, 2, 3]) Coordinates: * x (x) Self: variable = self.variable._copy(deep=deep, data=data, memo=memo) indexes, index_vars = self.xindexes.copy_indexes(deep=deep) coords = {} for k, v in self._coords.items(): if k in index_vars: coords[k] = index_vars[k] else: coords[k] = v._copy(deep=deep, memo=memo) return self._replace(variable, coords, indexes=indexes) def __copy__(self) -> Self: return self._copy(deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy(deep=True, memo=memo) # mutable objects should not be Hashable # https://github.com/python/mypy/issues/4266 __hash__ = None # type: ignore[assignment] @property def chunks(self) -> tuple[tuple[int, ...], ...] | None: """ Tuple of block lengths for this dataarray's data, in order of dimensions, or None if the underlying data is not a dask array. See Also -------- DataArray.chunk DataArray.chunksizes xarray.unify_chunks """ return self.variable.chunks @property def chunksizes(self) -> Mapping[Any, tuple[int, ...]]: """ Mapping from dimension names to block lengths for this dataarray's data. If this dataarray does not contain chunked arrays, the mapping will be empty. Cannot be modified directly, but can be modified by calling .chunk(). Differs from DataArray.chunks because it returns a mapping of dimensions to chunk shapes instead of a tuple of chunk shapes. See Also -------- DataArray.chunk DataArray.chunks xarray.unify_chunks """ all_variables = [self.variable] + [c.variable for c in self.coords.values()] return get_chunksizes(all_variables) def chunk( self, chunks: T_ChunksFreq = {}, # noqa: B006 # {} even though it's technically unsafe, is being used intentionally here (#4667) *, name_prefix: str = "xarray-", token: str | None = None, lock: bool = False, inline_array: bool = False, chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, **chunks_kwargs: T_ChunkDimFreq, ) -> Self: """Coerce this array's data into a dask arrays with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Along datetime-like dimensions, a pandas frequency string is also accepted. Parameters ---------- chunks : int, "auto", tuple of int or mapping of hashable to int or a pandas frequency string, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, ``(5, 5)`` or ``{"x": 5, "y": 5}`` or ``{"x": 5, "time": "YE"}``. name_prefix : str, optional Prefix for the name of the new dask array. token : str, optional Token uniquely identifying this array. lock : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. inline_array: bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided. Returns ------- chunked : xarray.DataArray See Also -------- DataArray.chunks DataArray.chunksizes xarray.unify_chunks dask.array.from_array """ chunk_mapping: T_ChunksFreq if chunks is None: warnings.warn( "None value for 'chunks' is deprecated. " "It will raise an error in the future. Use instead '{}'", category=FutureWarning, stacklevel=2, ) chunk_mapping = {} if isinstance(chunks, float | str | int): # ignoring type; unclear why it won't accept a Literal into the value. chunk_mapping = dict.fromkeys(self.dims, chunks) elif isinstance(chunks, tuple | list): utils.emit_user_level_warning( "Supplying chunks as dimension-order tuples is deprecated. " "It will raise an error in the future. Instead use a dict with dimension names as keys.", category=DeprecationWarning, ) if len(chunks) != len(self.dims): raise ValueError( f"chunks must have the same number of elements as dimensions. " f"Expected {len(self.dims)} elements, got {len(chunks)}." ) chunk_mapping = dict(zip(self.dims, chunks, strict=True)) else: chunk_mapping = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk") ds = self._to_temp_dataset().chunk( chunk_mapping, name_prefix=name_prefix, token=token, lock=lock, inline_array=inline_array, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) return self._from_temp_dataset(ds) def isel( self, indexers: Mapping[Any, Any] | None = None, drop: bool = False, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by selecting indexes along the specified dimension(s). Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by integers, slice objects or arrays. indexer can be an integer, slice, array-like or DataArray. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. drop : bool, default: False If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. Returns ------- indexed : xarray.DataArray See Also -------- :func:`Dataset.isel ` :func:`DataArray.sel ` :doc:`xarray-tutorial:intermediate/indexing/indexing` Tutorial material on indexing with Xarray objects :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic` Tutorial material on basics of indexing Examples -------- >>> da = xr.DataArray(np.arange(25).reshape(5, 5), dims=("x", "y")) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: x, y >>> tgt_x = xr.DataArray(np.arange(0, 5), dims="points") >>> tgt_y = xr.DataArray(np.arange(0, 5), dims="points") >>> da = da.isel(x=tgt_x, y=tgt_y) >>> da Size: 40B array([ 0, 6, 12, 18, 24]) Dimensions without coordinates: points """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") if any(is_fancy_indexer(idx) for idx in indexers.values()): ds = self._to_temp_dataset()._isel_fancy( indexers, drop=drop, missing_dims=missing_dims ) return self._from_temp_dataset(ds) # Much faster algorithm for when all indexers are ints, slices, one-dimensional # lists, or zero or one-dimensional np.ndarray's variable = self._variable.isel(indexers, missing_dims=missing_dims) indexes, index_variables = isel_indexes(self.xindexes, indexers) coords = {} for coord_name, coord_value in self._coords.items(): if coord_name in index_variables: coord_value = index_variables[coord_name] else: coord_indexers = { k: v for k, v in indexers.items() if k in coord_value.dims } if coord_indexers: coord_value = coord_value.isel(coord_indexers) if drop and coord_value.ndim == 0: continue coords[coord_name] = coord_value return self._replace(variable=variable, coords=coords, indexes=indexes) def sel( self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance=None, drop: bool = False, **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by selecting index labels along the specified dimension(s). In contrast to `DataArray.isel`, indexers for this method should use labels instead of integers. Under the hood, this method is powered by using pandas's powerful Index objects. This makes label based indexing essentially just as fast as using integer indexing. It also means this method uses pandas's (well documented) logic for indexing. This means you can use string shortcuts for datetime indexes (e.g., '2000-01' to select all values in January 2000). It also means that slices are treated as inclusive of both the start and stop values, unlike normal Python indexing. .. warning:: Do not try to assign values when using any of the indexing methods ``isel`` or ``sel``:: da = xr.DataArray([0, 1, 2, 3], dims=["x"]) # DO NOT do this da.isel(x=[0, 1, 2])[1] = -1 Assigning values with the chained indexing using ``.sel`` or ``.isel`` fails silently. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by scalars, slices or arrays of tick labels. For dimensions with multi-index, the indexer may also be a dict-like object with keys matching index level names. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: - None (default): only exact matches - pad / ffill: propagate last valid index value forward - backfill / bfill: propagate next valid index value backward - nearest: use nearest valid index value tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. drop : bool, optional If ``drop=True``, drop coordinates variables in `indexers` instead of making them scalar. **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : DataArray A new DataArray with the same contents as this DataArray, except the data and each dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this DataArray, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. See Also -------- :func:`Dataset.sel ` :func:`DataArray.isel ` :doc:`xarray-tutorial:intermediate/indexing/indexing` Tutorial material on indexing with Xarray objects :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic` Tutorial material on basics of indexing Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... coords={"x": np.arange(5), "y": np.arange(5)}, ... dims=("x", "y"), ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 40B 0 1 2 3 4 * y (y) int64 40B 0 1 2 3 4 >>> tgt_x = xr.DataArray(np.linspace(0, 4, num=5), dims="points") >>> tgt_y = xr.DataArray(np.linspace(0, 4, num=5), dims="points") >>> da = da.sel(x=tgt_x, y=tgt_y, method="nearest") >>> da Size: 40B array([ 0, 6, 12, 18, 24]) Coordinates: x (points) int64 40B 0 1 2 3 4 y (points) int64 40B 0 1 2 3 4 Dimensions without coordinates: points """ ds = self._to_temp_dataset().sel( indexers=indexers, drop=drop, method=method, tolerance=tolerance, **indexers_kwargs, ) return self._from_temp_dataset(ds) def _shuffle( self, dim: Hashable, *, indices: GroupIndices, chunks: T_Chunks ) -> Self: ds = self._to_temp_dataset()._shuffle(dim=dim, indices=indices, chunks=chunks) return self._from_temp_dataset(ds) def head( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by the the first `n` values along the specified dimension(s). Default `n` = 5 See Also -------- Dataset.head DataArray.tail DataArray.thin Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... dims=("x", "y"), ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: x, y >>> da.head(x=1) Size: 40B array([[0, 1, 2, 3, 4]]) Dimensions without coordinates: x, y >>> da.head({"x": 2, "y": 2}) Size: 32B array([[0, 1], [5, 6]]) Dimensions without coordinates: x, y """ ds = self._to_temp_dataset().head(indexers, **indexers_kwargs) return self._from_temp_dataset(ds) def tail( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by the the last `n` values along the specified dimension(s). Default `n` = 5 See Also -------- Dataset.tail DataArray.head DataArray.thin Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... dims=("x", "y"), ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: x, y >>> da.tail(y=1) Size: 40B array([[ 4], [ 9], [14], [19], [24]]) Dimensions without coordinates: x, y >>> da.tail({"x": 2, "y": 2}) Size: 32B array([[18, 19], [23, 24]]) Dimensions without coordinates: x, y """ ds = self._to_temp_dataset().tail(indexers, **indexers_kwargs) return self._from_temp_dataset(ds) def thin( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by each `n` value along the specified dimension(s). Examples -------- >>> x_arr = np.arange(0, 26) >>> x_arr array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]) >>> x = xr.DataArray( ... np.reshape(x_arr, (2, 13)), ... dims=("x", "y"), ... coords={"x": [0, 1], "y": np.arange(0, 13)}, ... ) >>> x Size: 208B array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]]) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12 >>> >>> x.thin(3) Size: 40B array([[ 0, 3, 6, 9, 12]]) Coordinates: * x (x) int64 8B 0 * y (y) int64 40B 0 3 6 9 12 >>> x.thin({"x": 2, "y": 5}) Size: 24B array([[ 0, 5, 10]]) Coordinates: * x (x) int64 8B 0 * y (y) int64 24B 0 5 10 See Also -------- Dataset.thin DataArray.head DataArray.tail """ ds = self._to_temp_dataset().thin(indexers, **indexers_kwargs) return self._from_temp_dataset(ds) def broadcast_like( self, other: T_DataArrayOrSet, *, exclude: Iterable[Hashable] | None = None, ) -> Self: """Broadcast this DataArray against another Dataset or DataArray. This is equivalent to xr.broadcast(other, self)[1] xarray objects are broadcast against each other in arithmetic operations, so this method is not be necessary for most uses. If no change is needed, the input data is returned to the output without being copied. If new coords are added by the broadcast, their values are NaN filled. Parameters ---------- other : Dataset or DataArray Object against which to broadcast this array. exclude : iterable of Hashable, optional Dimensions that must not be broadcasted Returns ------- new_da : DataArray The caller broadcasted against ``other``. Examples -------- >>> arr1 = xr.DataArray( ... np.random.randn(2, 3), ... dims=("x", "y"), ... coords={"x": ["a", "b"], "y": ["a", "b", "c"]}, ... ) >>> arr2 = xr.DataArray( ... np.random.randn(3, 2), ... dims=("x", "y"), ... coords={"x": ["a", "b", "c"], "y": ["a", "b"]}, ... ) >>> arr1 Size: 48B array([[ 1.76405235, 0.40015721, 0.97873798], [ 2.2408932 , 1.86755799, -0.97727788]]) Coordinates: * x (x) >> arr2 Size: 48B array([[ 0.95008842, -0.15135721], [-0.10321885, 0.4105985 ], [ 0.14404357, 1.45427351]]) Coordinates: * x (x) >> arr1.broadcast_like(arr2) Size: 72B array([[ 1.76405235, 0.40015721, 0.97873798], [ 2.2408932 , 1.86755799, -0.97727788], [ nan, nan, nan]]) Coordinates: * x (x) Self: """Callback called from ``Aligner`` to create a new reindexed DataArray.""" if isinstance(fill_value, dict): fill_value = fill_value.copy() sentinel = object() value = fill_value.pop(self.name, sentinel) if value is not sentinel: fill_value[_THIS_ARRAY] = value ds = self._to_temp_dataset() reindexed = ds._reindex_callback( aligner, dim_pos_indexers, variables, indexes, fill_value, exclude_dims, exclude_vars, ) da = self._from_temp_dataset(reindexed) da.encoding = self.encoding return da def reindex_like( self, other: T_DataArrayOrSet, *, method: ReindexMethodOptions = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value=dtypes.NA, ) -> Self: """ Conform this object onto the indexes of another object, for indexes which the objects share. Missing values are filled with ``fill_value``. The default fill value is NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to pandas.Index objects, which provides coordinates upon which to index the variables in this dataset. The indexes on this other object need not be the same as the indexes on this dataset. Any mismatched index values will be filled in with NaN, and any mismatched dimension names will simply be ignored. method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for filling index values from other not found on this data array: - None (default): don't fill gaps - pad / ffill: propagate last valid index value forward - backfill / bfill: propagate next valid index value backward - nearest: use nearest valid index value tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like must be the same size as the index and its dtype must exactly match the indexโ€™s type. copy : bool, default: True If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. Use this data array's name to refer to the data array's values. Returns ------- reindexed : DataArray Another dataset array, with this array's data but coordinates from the other object. Examples -------- >>> data = np.arange(12).reshape(4, 3) >>> da1 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]}, ... ) >>> da1 Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 >>> da2 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [40, 30, 20, 10], "y": [90, 80, 70]}, ... ) >>> da2 Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 40 30 20 10 * y (y) int64 24B 90 80 70 Reindexing with both DataArrays having the same coordinates set, but in different order: >>> da1.reindex_like(da2) Size: 96B array([[11, 10, 9], [ 8, 7, 6], [ 5, 4, 3], [ 2, 1, 0]]) Coordinates: * x (x) int64 32B 40 30 20 10 * y (y) int64 24B 90 80 70 Reindexing with the other array having additional coordinates: >>> da3 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [20, 10, 29, 39], "y": [70, 80, 90]}, ... ) >>> da1.reindex_like(da3) Size: 96B array([[ 3., 4., 5.], [ 0., 1., 2.], [nan, nan, nan], [nan, nan, nan]]) Coordinates: * x (x) int64 32B 20 10 29 39 * y (y) int64 24B 70 80 90 Filling missing values with the previous valid index with respect to the coordinates' value: >>> da1.reindex_like(da3, method="ffill") Size: 96B array([[3, 4, 5], [0, 1, 2], [3, 4, 5], [6, 7, 8]]) Coordinates: * x (x) int64 32B 20 10 29 39 * y (y) int64 24B 70 80 90 Filling missing values while tolerating specified error for inexact matches: >>> da1.reindex_like(da3, method="ffill", tolerance=5) Size: 96B array([[ 3., 4., 5.], [ 0., 1., 2.], [nan, nan, nan], [nan, nan, nan]]) Coordinates: * x (x) int64 32B 20 10 29 39 * y (y) int64 24B 70 80 90 Filling missing values with manually specified values: >>> da1.reindex_like(da3, fill_value=19) Size: 96B array([[ 3, 4, 5], [ 0, 1, 2], [19, 19, 19], [19, 19, 19]]) Coordinates: * x (x) int64 32B 20 10 29 39 * y (y) int64 24B 70 80 90 Note that unlike ``broadcast_like``, ``reindex_like`` doesn't create new dimensions: >>> da1.sel(x=20) Size: 24B array([3, 4, 5]) Coordinates: * y (y) int64 24B 70 80 90 x int64 8B 20 ...so ``b`` in not added here: >>> da1.sel(x=20).reindex_like(da1) Size: 24B array([3, 4, 5]) Coordinates: * y (y) int64 24B 70 80 90 x int64 8B 20 See Also -------- DataArray.reindex DataArray.broadcast_like align """ return alignment.reindex_like( self, other=other, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def reindex( self, indexers: Mapping[Any, Any] | None = None, *, method: ReindexMethodOptions = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value=dtypes.NA, **indexers_kwargs: Any, ) -> Self: """Conform this object onto the indexes of another object, filling in missing values with ``fill_value``. The default fill value is NaN. Parameters ---------- indexers : dict, optional Dictionary with keys given by dimension names and values given by arrays of coordinates tick labels. Any mismatched coordinate values will be filled in with NaN, and any mismatched dimension names will simply be ignored. One of indexers or indexers_kwargs must be provided. copy : bool, optional If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional Method to use for filling index values in ``indexers`` not found on this data array: - None (default): don't fill gaps - pad / ffill: propagate last valid index value forward - backfill / bfill: propagate next valid index value backward - nearest: use nearest valid index value tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like must be the same size as the index and its dtype must exactly match the indexโ€™s type. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. Use this data array's name to refer to the data array's values. **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- reindexed : DataArray Another dataset array, with this array's data but replaced coordinates. Examples -------- Reverse latitude: >>> da = xr.DataArray( ... np.arange(4), ... coords=[np.array([90, 89, 88, 87])], ... dims="lat", ... ) >>> da Size: 32B array([0, 1, 2, 3]) Coordinates: * lat (lat) int64 32B 90 89 88 87 >>> da.reindex(lat=da.lat[::-1]) Size: 32B array([3, 2, 1, 0]) Coordinates: * lat (lat) int64 32B 87 88 89 90 See Also -------- DataArray.reindex_like align """ indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex") return alignment.reindex( self, indexers=indexers, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def interp( self, coords: Mapping[Any, Any] | None = None, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, **coords_kwargs: Any, ) -> Self: """ Interpolate a DataArray onto new coordinates. Performs univariate or multivariate interpolation of a Dataset onto new coordinates, utilizing either NumPy or SciPy interpolation routines. Out-of-range values are filled with NaN, unless specified otherwise via `kwargs` to the numpy/scipy interpolant. Parameters ---------- coords : dict, optional Mapping from dimension names to the new coordinates. New coordinate can be a scalar, array-like or DataArray. If DataArrays are passed as new coordinates, their dimensions are used for the broadcasting. Missing values are skipped. method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \ "quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" } Interpolation method to use (see descriptions above). assume_sorted : bool, default: False If False, values of x can be in any order and they are sorted first. If True, x has to be an array of monotonically increasing values. kwargs : dict-like or None, default: None Additional keyword arguments passed to scipy's interpolator. Valid options and their behavior depend whether ``interp1d`` or ``interpn`` is used. **coords_kwargs : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. Returns ------- interpolated : DataArray New dataarray on the new coordinates. Notes ----- - SciPy is required for certain interpolation methods. - When interpolating along multiple dimensions with methods `linear` and `nearest`, the process attempts to decompose the interpolation into independent interpolations along one dimension at a time. - The specific interpolation method and dimensionality determine which interpolant is used: 1. **Interpolation along one dimension of 1D data (`method='linear'`)** - Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`. 2. **Interpolation along one dimension of N-dimensional data (N โ‰ฅ 1)** - Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"} use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp` (as in the case of `method='linear'` for 1D data). - If `method='polynomial'`, the `order` keyword argument must also be provided. 3. **Special interpolants for interpolation along one dimension of N-dimensional data (N โ‰ฅ 1)** - Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used: - `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator` - `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator` - `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator` - `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator` (`makima` is handled by passing the `makima` flag). 4. **Interpolation along multiple dimensions of multi-dimensional data** - Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear", "cubic", "quintic", "pchip"}. See Also -------- :mod:`scipy.interpolate` :doc:`xarray-tutorial:fundamentals/02.2_manipulating_dimensions` Tutorial material on manipulating data resolution using :py:func:`~xarray.DataArray.interp` Examples -------- >>> da = xr.DataArray( ... data=[[1, 4, 2, 9], [2, 7, 6, np.nan], [6, np.nan, 5, 8]], ... dims=("x", "y"), ... coords={"x": [0, 1, 2], "y": [10, 12, 14, 16]}, ... ) >>> da Size: 96B array([[ 1., 4., 2., 9.], [ 2., 7., 6., nan], [ 6., nan, 5., 8.]]) Coordinates: * x (x) int64 24B 0 1 2 * y (y) int64 32B 10 12 14 16 1D linear interpolation (the default): >>> da.interp(x=[0, 0.75, 1.25, 1.75]) Size: 128B array([[1. , 4. , 2. , nan], [1.75, 6.25, 5. , nan], [3. , nan, 5.75, nan], [5. , nan, 5.25, nan]]) Coordinates: * x (x) float64 32B 0.0 0.75 1.25 1.75 * y (y) int64 32B 10 12 14 16 1D nearest interpolation: >>> da.interp(x=[0, 0.75, 1.25, 1.75], method="nearest") Size: 128B array([[ 1., 4., 2., 9.], [ 2., 7., 6., nan], [ 2., 7., 6., nan], [ 6., nan, 5., 8.]]) Coordinates: * x (x) float64 32B 0.0 0.75 1.25 1.75 * y (y) int64 32B 10 12 14 16 1D linear extrapolation: >>> da.interp( ... x=[1, 1.5, 2.5, 3.5], ... method="linear", ... kwargs={"fill_value": "extrapolate"}, ... ) Size: 128B array([[ 2. , 7. , 6. , nan], [ 4. , nan, 5.5, nan], [ 8. , nan, 4.5, nan], [12. , nan, 3.5, nan]]) Coordinates: * x (x) float64 32B 1.0 1.5 2.5 3.5 * y (y) int64 32B 10 12 14 16 2D linear interpolation: >>> da.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method="linear") Size: 96B array([[2.5 , 3. , nan], [4. , 5.625, nan], [ nan, nan, nan], [ nan, nan, nan]]) Coordinates: * x (x) float64 32B 0.0 0.75 1.25 1.75 * y (y) int64 24B 11 13 15 """ if self.dtype.kind not in "uifc": raise TypeError( f"interp only works for a numeric type array. Given {self.dtype}." ) ds = self._to_temp_dataset().interp( coords, method=method, kwargs=kwargs, assume_sorted=assume_sorted, **coords_kwargs, ) return self._from_temp_dataset(ds) def interp_like( self, other: T_Xarray, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, ) -> Self: """Interpolate this object onto the coordinates of another object, filling out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. Missing values are skipped. method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \ "quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" } Interpolation method to use (see descriptions above). assume_sorted : bool, default: False If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs : dict, optional Additional keyword arguments passed to the interpolant. Returns ------- interpolated : DataArray Another dataarray by interpolating this dataarray's data along the coordinates of the other object. Notes ----- - scipy is required. - If the dataarray has object-type coordinates, reindex is used for these coordinates instead of the interpolation. - When interpolating along multiple dimensions with methods `linear` and `nearest`, the process attempts to decompose the interpolation into independent interpolations along one dimension at a time. - The specific interpolation method and dimensionality determine which interpolant is used: 1. **Interpolation along one dimension of 1D data (`method='linear'`)** - Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`. 2. **Interpolation along one dimension of N-dimensional data (N โ‰ฅ 1)** - Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"} use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp` (as in the case of `method='linear'` for 1D data). - If `method='polynomial'`, the `order` keyword argument must also be provided. 3. **Special interpolants for interpolation along one dimension of N-dimensional data (N โ‰ฅ 1)** - Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used: - `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator` - `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator` - `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator` - `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator` (`makima` is handled by passing the `makima` flag). 4. **Interpolation along multiple dimensions of multi-dimensional data** - Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear", "cubic", "quintic", "pchip"}. See Also -------- :func:`DataArray.interp` :func:`DataArray.reindex_like` :mod:`scipy.interpolate` Examples -------- >>> data = np.arange(12).reshape(4, 3) >>> da1 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]}, ... ) >>> da1 Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 >>> da2 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [10, 20, 29, 39], "y": [70, 80, 90]}, ... ) >>> da2 Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 10 20 29 39 * y (y) int64 24B 70 80 90 Interpolate the values in the coordinates of the other DataArray with respect to the source's values: >>> da2.interp_like(da1) Size: 96B array([[0. , 1. , 2. ], [3. , 4. , 5. ], [6.3, 7.3, 8.3], [nan, nan, nan]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 Could also extrapolate missing values: >>> da2.interp_like(da1, kwargs={"fill_value": "extrapolate"}) Size: 96B array([[ 0. , 1. , 2. ], [ 3. , 4. , 5. ], [ 6.3, 7.3, 8.3], [ 9.3, 10.3, 11.3]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 """ if self.dtype.kind not in "uifc": raise TypeError( f"interp only works for a numeric type array. Given {self.dtype}." ) ds = self._to_temp_dataset().interp_like( other, method=method, kwargs=kwargs, assume_sorted=assume_sorted ) return self._from_temp_dataset(ds) def rename( self, new_name_or_name_dict: Hashable | Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Self: """Returns a new DataArray with renamed coordinates, dimensions or a new name. Parameters ---------- new_name_or_name_dict : str or dict-like, optional If the argument is dict-like, it used as a mapping from old names to new names for coordinates or dimensions. Otherwise, use the argument as the new name for this array. **names : Hashable, optional The keyword arguments form of a mapping from old names to new names for coordinates or dimensions. One of new_name_or_name_dict or names must be provided. Returns ------- renamed : DataArray Renamed array or array with renamed coordinates. See Also -------- Dataset.rename DataArray.swap_dims """ if new_name_or_name_dict is None and not names: # change name to None? return self._replace(name=None) if utils.is_dict_like(new_name_or_name_dict) or new_name_or_name_dict is None: # change dims/coords name_dict = either_dict_or_kwargs(new_name_or_name_dict, names, "rename") dataset = self._to_temp_dataset()._rename(name_dict) return self._from_temp_dataset(dataset) if utils.hashable(new_name_or_name_dict) and names: # change name + dims/coords dataset = self._to_temp_dataset()._rename(names) dataarray = self._from_temp_dataset(dataset) return dataarray._replace(name=new_name_or_name_dict) # only change name return self._replace(name=new_name_or_name_dict) def swap_dims( self, dims_dict: Mapping[Any, Hashable] | None = None, **dims_kwargs, ) -> Self: """Returns a new DataArray with swapped dimensions. Parameters ---------- dims_dict : dict-like Dictionary whose keys are current dimension names and whose values are new names. **dims_kwargs : {existing_dim: new_dim, ...}, optional The keyword arguments form of ``dims_dict``. One of dims_dict or dims_kwargs must be provided. Returns ------- swapped : DataArray DataArray with swapped dimensions. Examples -------- >>> arr = xr.DataArray( ... data=[0, 1], ... dims="x", ... coords={"x": ["a", "b"], "y": ("x", [0, 1])}, ... ) >>> arr Size: 16B array([0, 1]) Coordinates: * x (x) >> arr.swap_dims({"x": "y"}) Size: 16B array([0, 1]) Coordinates: * y (y) int64 16B 0 1 x (y) >> arr.swap_dims({"x": "z"}) Size: 16B array([0, 1]) Coordinates: x (z) Self: """Return a new object with an additional axis (or axes) inserted at the corresponding position in the array shape. The new object is a view into the underlying array, not a copy. If dim is already a scalar coordinate, it will be promoted to a 1D coordinate consisting of a single value. The automatic creation of indexes to back new 1D coordinate variables controlled by the create_index_for_new_dim kwarg. Parameters ---------- dim : Hashable, sequence of Hashable, dict, or None, optional Dimensions to include on the new variable. If provided as str or sequence of str, then dimensions are inserted with length 1. If provided as a dict, then the keys are the new dimensions and the values are either integers (giving the length of the new dimensions) or sequence/ndarray (giving the coordinates of the new dimensions). axis : int, sequence of int, or None, default: None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a sequence of integers is passed, multiple axes are inserted. In this case, dim arguments should be same length list. If axis=None is passed, all the axes will be inserted to the start of the result array. create_index_for_new_dim : bool, default: True Whether to create new ``PandasIndex`` objects when the object being expanded contains scalar variables with names in ``dim``. **dim_kwargs : int or sequence or ndarray The keywords are arbitrary dimensions being inserted and the values are either the lengths of the new dims (if int is given), or their coordinates. Note, this is an alternative to passing a dict to the dim kwarg and will only be used if dim is None. Returns ------- expanded : DataArray This object, but with additional dimension(s). See Also -------- Dataset.expand_dims Examples -------- >>> da = xr.DataArray(np.arange(5), dims=("x")) >>> da Size: 40B array([0, 1, 2, 3, 4]) Dimensions without coordinates: x Add new dimension of length 2: >>> da.expand_dims(dim={"y": 2}) Size: 80B array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]) Dimensions without coordinates: y, x >>> da.expand_dims(dim={"y": 2}, axis=1) Size: 80B array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]) Dimensions without coordinates: x, y Add a new dimension with coordinates from array: >>> da.expand_dims(dim={"y": np.arange(5)}, axis=0) Size: 200B array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]) Coordinates: * y (y) int64 40B 0 1 2 3 4 Dimensions without coordinates: x """ if isinstance(dim, int): raise TypeError("dim should be Hashable or sequence/mapping of Hashables") elif isinstance(dim, Sequence) and not isinstance(dim, str): if len(dim) != len(set(dim)): raise ValueError("dims should not contain duplicate values.") dim = dict.fromkeys(dim, 1) elif dim is not None and not isinstance(dim, Mapping): dim = {dim: 1} dim = either_dict_or_kwargs(dim, dim_kwargs, "expand_dims") ds = self._to_temp_dataset().expand_dims( dim, axis, create_index_for_new_dim=create_index_for_new_dim ) return self._from_temp_dataset(ds) def set_index( self, indexes: Mapping[Any, Hashable | Sequence[Hashable]] | None = None, append: bool = False, **indexes_kwargs: Hashable | Sequence[Hashable], ) -> Self: """Set DataArray (multi-)indexes using one or more existing coordinates. This legacy method is limited to pandas (multi-)indexes and 1-dimensional "dimension" coordinates. See :py:meth:`~DataArray.set_xindex` for setting a pandas or a custom Xarray-compatible index from one or more arbitrary coordinates. Parameters ---------- indexes : {dim: index, ...} Mapping from names matching dimensions and values given by (lists of) the names of existing coordinates or variables to set as new (multi-)index. append : bool, default: False If True, append the supplied index(es) to the existing index(es). Otherwise replace the existing index(es). **indexes_kwargs : optional The keyword arguments form of ``indexes``. One of indexes or indexes_kwargs must be provided. Returns ------- obj : DataArray Another DataArray, with this data but replaced coordinates. Examples -------- >>> arr = xr.DataArray( ... data=np.ones((2, 3)), ... dims=["x", "y"], ... coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])}, ... ) >>> arr Size: 48B array([[1., 1., 1.], [1., 1., 1.]]) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 24B 0 1 2 a (x) int64 16B 3 4 >>> arr.set_index(x="a") Size: 48B array([[1., 1., 1.], [1., 1., 1.]]) Coordinates: * x (x) int64 16B 3 4 * y (y) int64 24B 0 1 2 See Also -------- DataArray.reset_index DataArray.set_xindex """ ds = self._to_temp_dataset().set_index(indexes, append=append, **indexes_kwargs) return self._from_temp_dataset(ds) def reset_index( self, dims_or_levels: Hashable | Sequence[Hashable], drop: bool = False, ) -> Self: """Reset the specified index(es) or multi-index level(s). This legacy method is specific to pandas (multi-)indexes and 1-dimensional "dimension" coordinates. See the more generic :py:meth:`~DataArray.drop_indexes` and :py:meth:`~DataArray.set_xindex` method to respectively drop and set pandas or custom indexes for arbitrary coordinates. Parameters ---------- dims_or_levels : Hashable or sequence of Hashable Name(s) of the dimension(s) and/or multi-index level(s) that will be reset. drop : bool, default: False If True, remove the specified indexes and/or multi-index levels instead of extracting them as new coordinates (default: False). Returns ------- obj : DataArray Another dataarray, with this dataarray's data but replaced coordinates. See Also -------- DataArray.set_index DataArray.set_xindex DataArray.drop_indexes """ ds = self._to_temp_dataset().reset_index(dims_or_levels, drop=drop) return self._from_temp_dataset(ds) def set_xindex( self, coord_names: str | Sequence[Hashable], index_cls: type[Index] | None = None, **options, ) -> Self: """Set a new, Xarray-compatible index from one or more existing coordinate(s). Parameters ---------- coord_names : str or list Name(s) of the coordinate(s) used to build the index. If several names are given, their order matters. index_cls : subclass of :class:`~xarray.indexes.Index` The type of index to create. By default, try setting a pandas (multi-)index from the supplied coordinates. **options Options passed to the index constructor. Returns ------- obj : DataArray Another dataarray, with this dataarray's data and with a new index. """ ds = self._to_temp_dataset().set_xindex(coord_names, index_cls, **options) return self._from_temp_dataset(ds) def reorder_levels( self, dim_order: Mapping[Any, Sequence[int | Hashable]] | None = None, **dim_order_kwargs: Sequence[int | Hashable], ) -> Self: """Rearrange index levels using input order. Parameters ---------- dim_order dict-like of Hashable to int or Hashable: optional Mapping from names matching dimensions and values given by lists representing new level orders. Every given dimension must have a multi-index. **dim_order_kwargs : optional The keyword arguments form of ``dim_order``. One of dim_order or dim_order_kwargs must be provided. Returns ------- obj : DataArray Another dataarray, with this dataarray's data but replaced coordinates. """ ds = self._to_temp_dataset().reorder_levels(dim_order, **dim_order_kwargs) return self._from_temp_dataset(ds) @partial(deprecate_dims, old_name="dimensions") def stack( self, dim: Mapping[Any, Sequence[Hashable]] | None = None, create_index: bool | None = True, index_cls: type[Index] = PandasMultiIndex, **dim_kwargs: Sequence[Hashable | EllipsisType], ) -> Self: """ Stack any number of existing dimensions into a single new dimension. New dimensions will be added at the end, and the corresponding coordinate variables will be combined into a MultiIndex. Parameters ---------- dim : mapping of Hashable to sequence of Hashable Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new dimensions, and the existing dimensions that they replace. An ellipsis (`...`) will be replaced by all unlisted dimensions. Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over all dimensions. create_index : bool or None, default: True If True, create a multi-index for each of the stacked dimensions. If False, don't create any index. If None, create a multi-index only if exactly one single (1-d) coordinate index is found for every dimension to stack. index_cls: class, optional Can be used to pass a custom multi-index type. Must be an Xarray index that implements `.stack()`. By default, a pandas multi-index wrapper is used. **dim_kwargs The keyword arguments form of ``dim``. One of dim or dim_kwargs must be provided. Returns ------- stacked : DataArray DataArray with stacked data. Examples -------- >>> arr = xr.DataArray( ... np.arange(6).reshape(2, 3), ... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])], ... ) >>> arr Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> stacked = arr.stack(z=("x", "y")) >>> stacked.indexes["z"] MultiIndex([('a', 0), ('a', 1), ('a', 2), ('b', 0), ('b', 1), ('b', 2)], name='z') See Also -------- DataArray.unstack """ ds = self._to_temp_dataset().stack( dim, create_index=create_index, index_cls=index_cls, **dim_kwargs, ) return self._from_temp_dataset(ds) def unstack( self, dim: Dims = None, *, fill_value: Any = dtypes.NA, sparse: bool = False, ) -> Self: """ Unstack existing dimensions corresponding to MultiIndexes into multiple new dimensions. New dimensions will be added at the end. Parameters ---------- dim : str, Iterable of Hashable or None, optional Dimension(s) over which to unstack. By default unstacks all MultiIndexes. fill_value : scalar or dict-like, default: nan Value to be filled. If a dict-like, maps variable names to fill values. Use the data array's name to refer to its name. If not provided or if the dict-like does not contain all variables, the dtype's NA value will be used. sparse : bool, default: False Use sparse-array if True Returns ------- unstacked : DataArray Array with unstacked data. Examples -------- >>> arr = xr.DataArray( ... np.arange(6).reshape(2, 3), ... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])], ... ) >>> arr Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> stacked = arr.stack(z=("x", "y")) >>> stacked.indexes["z"] MultiIndex([('a', 0), ('a', 1), ('a', 2), ('b', 0), ('b', 1), ('b', 2)], name='z') >>> roundtripped = stacked.unstack() >>> arr.identical(roundtripped) True See Also -------- DataArray.stack """ ds = self._to_temp_dataset().unstack(dim, fill_value=fill_value, sparse=sparse) return self._from_temp_dataset(ds) def to_unstacked_dataset(self, dim: Hashable, level: int | Hashable = 0) -> Dataset: """Unstack DataArray expanding to Dataset along a given level of a stacked coordinate. This is the inverse operation of Dataset.to_stacked_array. Parameters ---------- dim : Hashable Name of existing dimension to unstack level : int or Hashable, default: 0 The MultiIndex level to expand to a dataset along. Can either be the integer index of the level or its name. Returns ------- unstacked: Dataset Examples -------- >>> arr = xr.DataArray( ... np.arange(6).reshape(2, 3), ... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])], ... ) >>> data = xr.Dataset({"a": arr, "b": arr.isel(y=0)}) >>> data Size: 96B Dimensions: (x: 2, y: 3) Coordinates: * x (x) >> stacked = data.to_stacked_array("z", ["x"]) >>> stacked.indexes["z"] MultiIndex([('a', 0), ('a', 1), ('a', 2), ('b', nan)], name='z') >>> roundtripped = stacked.to_unstacked_dataset(dim="z") >>> data.identical(roundtripped) True See Also -------- Dataset.to_stacked_array """ idx = self._indexes[dim].to_pandas_index() if not isinstance(idx, pd.MultiIndex): raise ValueError(f"'{dim}' is not a stacked coordinate") level_number = idx._get_level_number(level) # type: ignore[attr-defined] variables = idx.levels[level_number] variable_dim = idx.names[level_number] # pull variables out of datarray data_dict = {} for k in variables: data_dict[k] = self.sel({variable_dim: k}, drop=True).squeeze(drop=True) # unstacked dataset return Dataset(data_dict) @deprecate_dims def transpose( self, *dim: Hashable, transpose_coords: bool = True, missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: """Return a new DataArray object with transposed dimensions. Parameters ---------- *dim : Hashable, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. transpose_coords : bool, default: True If True, also transpose the coordinates of this DataArray. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- transposed : DataArray The returned DataArray's array is transposed. Notes ----- This operation returns a view of this array's data. It is lazy for dask-backed DataArrays but not for numpy-backed DataArrays -- the data will be fully loaded. See Also -------- numpy.transpose Dataset.transpose """ if dim: dim = tuple(infix_dims(dim, self.dims, missing_dims)) variable = self.variable.transpose(*dim) if transpose_coords: coords: dict[Hashable, Variable] = {} for name, coord in self.coords.items(): coord_dims = tuple(d for d in dim if d in coord.dims) coords[name] = coord.variable.transpose(*coord_dims) return self._replace(variable, coords) else: return self._replace(variable) @property def T(self) -> Self: return self.transpose() def drop_vars( self, names: str | Iterable[Hashable] | Callable[[Self], str | Iterable[Hashable]], *, errors: ErrorOptions = "raise", ) -> Self: """Returns an array with dropped variables. Parameters ---------- names : Hashable or iterable of Hashable or Callable Name(s) of variables to drop. If a Callable, this object is passed as its only argument and its result is used. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the variable passed are not in the dataset. If 'ignore', any given names that are in the DataArray are dropped and no error is raised. Returns ------- dropped : Dataset New Dataset copied from `self` with variables removed. Examples ------- >>> data = np.arange(12).reshape(4, 3) >>> da = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]}, ... ) >>> da Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 Removing a single variable: >>> da.drop_vars("x") Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * y (y) int64 24B 70 80 90 Dimensions without coordinates: x Removing a list of variables: >>> da.drop_vars(["x", "y"]) Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Dimensions without coordinates: x, y >>> da.drop_vars(lambda x: x.coords) Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Dimensions without coordinates: x, y """ if callable(names): names = names(self) ds = self._to_temp_dataset().drop_vars(names, errors=errors) return self._from_temp_dataset(ds) def drop_indexes( self, coord_names: Hashable | Iterable[Hashable], *, errors: ErrorOptions = "raise", ) -> Self: """Drop the indexes assigned to the given coordinates. Parameters ---------- coord_names : hashable or iterable of hashable Name(s) of the coordinate(s) for which to drop the index. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the coordinates passed have no index or are not in the dataset. If 'ignore', no error is raised. Returns ------- dropped : DataArray A new dataarray with dropped indexes. """ ds = self._to_temp_dataset().drop_indexes(coord_names, errors=errors) return self._from_temp_dataset(ds) def drop( self, labels: Mapping[Any, Any] | None = None, dim: Hashable | None = None, *, errors: ErrorOptions = "raise", **labels_kwargs, ) -> Self: """Backward compatible method based on `drop_vars` and `drop_sel` Using either `drop_vars` or `drop_sel` is encouraged See Also -------- DataArray.drop_vars DataArray.drop_sel """ ds = self._to_temp_dataset().drop(labels, dim, errors=errors, **labels_kwargs) return self._from_temp_dataset(ds) def drop_sel( self, labels: Mapping[Any, Any] | None = None, *, errors: ErrorOptions = "raise", **labels_kwargs, ) -> Self: """Drop index labels from this DataArray. Parameters ---------- labels : mapping of Hashable to Any Index labels to drop errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the index labels passed are not in the dataset. If 'ignore', any given labels that are in the dataset are dropped and no error is raised. **labels_kwargs : {dim: label, ...}, optional The keyword arguments form of ``dim`` and ``labels`` Returns ------- dropped : DataArray Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... coords={"x": np.arange(0, 9, 2), "y": np.arange(0, 13, 3)}, ... dims=("x", "y"), ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 40B 0 2 4 6 8 * y (y) int64 40B 0 3 6 9 12 >>> da.drop_sel(x=[0, 2], y=9) Size: 96B array([[10, 11, 12, 14], [15, 16, 17, 19], [20, 21, 22, 24]]) Coordinates: * x (x) int64 24B 4 6 8 * y (y) int64 32B 0 3 6 12 >>> da.drop_sel({"x": 6, "y": [0, 3]}) Size: 96B array([[ 2, 3, 4], [ 7, 8, 9], [12, 13, 14], [22, 23, 24]]) Coordinates: * x (x) int64 32B 0 2 4 8 * y (y) int64 24B 6 9 12 """ if labels_kwargs or isinstance(labels, dict): labels = either_dict_or_kwargs(labels, labels_kwargs, "drop") ds = self._to_temp_dataset().drop_sel(labels, errors=errors) return self._from_temp_dataset(ds) def drop_isel( self, indexers: Mapping[Any, Any] | None = None, **indexers_kwargs ) -> Self: """Drop index positions from this DataArray. Parameters ---------- indexers : mapping of Hashable to Any or None, default: None Index locations to drop **indexers_kwargs : {dim: position, ...}, optional The keyword arguments form of ``dim`` and ``positions`` Returns ------- dropped : DataArray Raises ------ IndexError Examples -------- >>> da = xr.DataArray(np.arange(25).reshape(5, 5), dims=("X", "Y")) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: X, Y >>> da.drop_isel(X=[0, 4], Y=2) Size: 96B array([[ 5, 6, 8, 9], [10, 11, 13, 14], [15, 16, 18, 19]]) Dimensions without coordinates: X, Y >>> da.drop_isel({"X": 3, "Y": 3}) Size: 128B array([[ 0, 1, 2, 4], [ 5, 6, 7, 9], [10, 11, 12, 14], [20, 21, 22, 24]]) Dimensions without coordinates: X, Y """ dataset = self._to_temp_dataset() dataset = dataset.drop_isel(indexers=indexers, **indexers_kwargs) return self._from_temp_dataset(dataset) def dropna( self, dim: Hashable, *, how: Literal["any", "all"] = "any", thresh: int | None = None, ) -> Self: """Returns a new array with dropped labels for missing values along the provided dimension. Parameters ---------- dim : Hashable Dimension along which to drop missing values. Dropping along multiple dimensions simultaneously is not yet supported. how : {"any", "all"}, default: "any" - any : if any NA values are present, drop that label - all : if all values are NA, drop that label thresh : int or None, default: None If supplied, require this many non-NA values. Returns ------- dropped : DataArray Examples -------- >>> temperature = [ ... [0, 4, 2, 9], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 4, 2, 0], ... [3, 1, 0, 0], ... ] >>> da = xr.DataArray( ... data=temperature, ... dims=["Y", "X"], ... coords=dict( ... lat=("Y", np.array([-20.0, -20.25, -20.50, -20.75])), ... lon=("X", np.array([10.0, 10.25, 10.5, 10.75])), ... ), ... ) >>> da Size: 128B array([[ 0., 4., 2., 9.], [nan, nan, nan, nan], [nan, 4., 2., 0.], [ 3., 1., 0., 0.]]) Coordinates: lat (Y) float64 32B -20.0 -20.25 -20.5 -20.75 lon (X) float64 32B 10.0 10.25 10.5 10.75 Dimensions without coordinates: Y, X >>> da.dropna(dim="Y", how="any") Size: 64B array([[0., 4., 2., 9.], [3., 1., 0., 0.]]) Coordinates: lat (Y) float64 16B -20.0 -20.75 lon (X) float64 32B 10.0 10.25 10.5 10.75 Dimensions without coordinates: Y, X Drop values only if all values along the dimension are NaN: >>> da.dropna(dim="Y", how="all") Size: 96B array([[ 0., 4., 2., 9.], [nan, 4., 2., 0.], [ 3., 1., 0., 0.]]) Coordinates: lat (Y) float64 24B -20.0 -20.5 -20.75 lon (X) float64 32B 10.0 10.25 10.5 10.75 Dimensions without coordinates: Y, X """ ds = self._to_temp_dataset().dropna(dim, how=how, thresh=thresh) return self._from_temp_dataset(ds) def fillna(self, value: Any) -> Self: """Fill missing values in this object. This operation follows the normal broadcasting and alignment rules that xarray uses for binary arithmetic, except the result is aligned to this object (``join='left'``) instead of aligned to the intersection of index coordinates (``join='inner'``). Parameters ---------- value : scalar, ndarray or DataArray Used to fill all matching missing values in this array. If the argument is a DataArray, it is first aligned with (reindexed to) this array. Returns ------- filled : DataArray Examples -------- >>> da = xr.DataArray( ... np.array([1, 4, np.nan, 0, 3, np.nan]), ... dims="Z", ... coords=dict( ... Z=("Z", np.arange(6)), ... height=("Z", np.array([0, 10, 20, 30, 40, 50])), ... ), ... ) >>> da Size: 48B array([ 1., 4., nan, 0., 3., nan]) Coordinates: * Z (Z) int64 48B 0 1 2 3 4 5 height (Z) int64 48B 0 10 20 30 40 50 Fill all NaN values with 0: >>> da.fillna(0) Size: 48B array([1., 4., 0., 0., 3., 0.]) Coordinates: * Z (Z) int64 48B 0 1 2 3 4 5 height (Z) int64 48B 0 10 20 30 40 50 Fill NaN values with corresponding values in array: >>> da.fillna(np.array([2, 9, 4, 2, 8, 9])) Size: 48B array([1., 4., 4., 0., 3., 9.]) Coordinates: * Z (Z) int64 48B 0 1 2 3 4 5 height (Z) int64 48B 0 10 20 30 40 50 """ if utils.is_dict_like(value): raise TypeError( "cannot provide fill value as a dictionary with fillna on a DataArray" ) out = ops.fillna(self, value) return out def interpolate_na( self, dim: Hashable | None = None, method: InterpOptions = "linear", limit: int | None = None, use_coordinate: bool | str = True, max_gap: ( None | int | float | str | pd.Timedelta | np.timedelta64 | datetime.timedelta ) = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """Fill in NaNs by interpolating according to different methods. Parameters ---------- dim : Hashable or None, optional Specifies the dimension along which to interpolate. method : {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", \ "barycentric", "krogh", "pchip", "spline", "akima"}, default: "linear" String indicating which method to use for interpolation: - 'linear': linear interpolation. Additional keyword arguments are passed to :py:func:`numpy.interp` - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial': are passed to :py:func:`scipy.interpolate.interp1d`. If ``method='polynomial'``, the ``order`` keyword argument must also be provided. - 'barycentric', 'krogh', 'pchip', 'spline', 'akima': use their respective :py:class:`scipy.interpolate` classes. use_coordinate : bool or str, default: True Specifies which index to use as the x values in the interpolation formulated as `y = f(x)`. If False, values are treated as if equally-spaced along ``dim``. If True, the IndexVariable `dim` is used. If ``use_coordinate`` is a string, it specifies the name of a coordinate variable to use as the index. limit : int or None, default: None Maximum number of consecutive NaNs to fill. Must be greater than 0 or None for no limit. This filling is done regardless of the size of the gap in the data. To only interpolate over gaps less than a given length, see ``max_gap``. max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None Maximum size of gap, a continuous sequence of NaNs, that will be filled. Use None for no limit. When interpolating along a datetime64 dimension and ``use_coordinate=True``, ``max_gap`` can be one of the following: - a string that is valid input for pandas.to_timedelta - a :py:class:`numpy.timedelta64` object - a :py:class:`pandas.Timedelta` object - a :py:class:`datetime.timedelta` object Otherwise, ``max_gap`` must be an int or a float. Use of ``max_gap`` with unlabeled dimensions has not been implemented yet. Gap length is defined as the difference between coordinate values at the first data point after a gap and the last value before a gap. For gaps at the beginning (end), gap length is defined as the difference between coordinate values at the first (last) valid data point and the first (last) NaN. For example, consider:: array([nan, nan, nan, 1., nan, nan, 4., nan, nan]) Coordinates: * x (x) int64 0 1 2 3 4 5 6 7 8 The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively keep_attrs : bool or None, default: None If True, the dataarray's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : dict, optional parameters passed verbatim to the underlying interpolation function Returns ------- interpolated: DataArray Filled in DataArray. See Also -------- numpy.interp scipy.interpolate Examples -------- >>> da = xr.DataArray( ... [np.nan, 2, 3, np.nan, 0], dims="x", coords={"x": [0, 1, 2, 3, 4]} ... ) >>> da Size: 40B array([nan, 2., 3., nan, 0.]) Coordinates: * x (x) int64 40B 0 1 2 3 4 >>> da.interpolate_na(dim="x", method="linear") Size: 40B array([nan, 2. , 3. , 1.5, 0. ]) Coordinates: * x (x) int64 40B 0 1 2 3 4 >>> da.interpolate_na(dim="x", method="linear", fill_value="extrapolate") Size: 40B array([1. , 2. , 3. , 1.5, 0. ]) Coordinates: * x (x) int64 40B 0 1 2 3 4 """ from xarray.core.missing import interp_na return interp_na( self, dim=dim, method=method, limit=limit, use_coordinate=use_coordinate, max_gap=max_gap, keep_attrs=keep_attrs, **kwargs, ) def ffill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values forward *Requires bottleneck.* Parameters ---------- dim : Hashable Specifies the dimension along which to propagate values when filling. limit : int or None, default: None The maximum number of consecutive NaN values to forward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater than 0 or None for no limit. Must be None or greater than or equal to axis length if filling along chunked axes (dimensions). Returns ------- filled : DataArray Examples -------- >>> temperature = np.array( ... [ ... [np.nan, 1, 3], ... [0, np.nan, 5], ... [5, np.nan, np.nan], ... [3, np.nan, np.nan], ... [0, 2, 0], ... ] ... ) >>> da = xr.DataArray( ... data=temperature, ... dims=["Y", "X"], ... coords=dict( ... lat=("Y", np.array([-20.0, -20.25, -20.50, -20.75, -21.0])), ... lon=("X", np.array([10.0, 10.25, 10.5])), ... ), ... ) >>> da Size: 120B array([[nan, 1., 3.], [ 0., nan, 5.], [ 5., nan, nan], [ 3., nan, nan], [ 0., 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill all NaN values: >>> da.ffill(dim="Y", limit=None) Size: 120B array([[nan, 1., 3.], [ 0., 1., 5.], [ 5., 1., 5.], [ 3., 1., 5.], [ 0., 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill only the first of consecutive NaN values: >>> da.ffill(dim="Y", limit=1) Size: 120B array([[nan, 1., 3.], [ 0., 1., 5.], [ 5., nan, 5.], [ 3., nan, nan], [ 0., 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X """ from xarray.core.missing import ffill return ffill(self, dim, limit=limit) def bfill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values backward *Requires bottleneck.* Parameters ---------- dim : str Specifies the dimension along which to propagate values when filling. limit : int or None, default: None The maximum number of consecutive NaN values to backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater than 0 or None for no limit. Must be None or greater than or equal to axis length if filling along chunked axes (dimensions). Returns ------- filled : DataArray Examples -------- >>> temperature = np.array( ... [ ... [0, 1, 3], ... [0, np.nan, 5], ... [5, np.nan, np.nan], ... [3, np.nan, np.nan], ... [np.nan, 2, 0], ... ] ... ) >>> da = xr.DataArray( ... data=temperature, ... dims=["Y", "X"], ... coords=dict( ... lat=("Y", np.array([-20.0, -20.25, -20.50, -20.75, -21.0])), ... lon=("X", np.array([10.0, 10.25, 10.5])), ... ), ... ) >>> da Size: 120B array([[ 0., 1., 3.], [ 0., nan, 5.], [ 5., nan, nan], [ 3., nan, nan], [nan, 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill all NaN values: >>> da.bfill(dim="Y", limit=None) Size: 120B array([[ 0., 1., 3.], [ 0., 2., 5.], [ 5., 2., 0.], [ 3., 2., 0.], [nan, 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill only the first of consecutive NaN values: >>> da.bfill(dim="Y", limit=1) Size: 120B array([[ 0., 1., 3.], [ 0., nan, 5.], [ 5., nan, nan], [ 3., 2., 0.], [nan, 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X """ from xarray.core.missing import bfill return bfill(self, dim, limit=limit) def combine_first(self, other: Self) -> Self: """Combine two DataArray objects, with union of coordinates. This operation follows the normal broadcasting and alignment rules of ``join='outer'``. Default to non-null values of array calling the method. Use np.nan to fill in vacant cells after alignment. Parameters ---------- other : DataArray Used to fill all matching missing values in this array. Returns ------- DataArray """ return ops.fillna(self, other, join="outer") def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `f(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default `func` is applied over all dimensions. axis : int or sequence of int, optional Axis(es) over which to repeatedly apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `f(x)` without an axis argument). keep_attrs : bool or None, optional If True (default), the variable's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one. Coordinates that use these dimensions are removed. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray DataArray with this object's array replaced with an array with summarized data and the indicated dimension(s) removed. """ var = self.variable.reduce(func, dim, axis, keep_attrs, keepdims, **kwargs) return self._replace_maybe_drop_dims(var) def to_pandas(self) -> Self | pd.Series | pd.DataFrame: """Convert this array into a pandas object with the same shape. The type of the returned object depends on the number of DataArray dimensions: * 0D -> `xarray.DataArray` * 1D -> `pandas.Series` * 2D -> `pandas.DataFrame` Only works for arrays with 2 or fewer dimensions. The DataArray constructor performs the inverse transformation. Returns ------- result : DataArray | Series | DataFrame DataArray, pandas Series or pandas DataFrame. """ # TODO: consolidate the info about pandas constructors and the # attributes that correspond to their indexes into a separate module? constructors: dict[int, Callable] = { 0: lambda x: x, 1: pd.Series, 2: pd.DataFrame, } try: constructor = constructors[self.ndim] except KeyError as err: raise ValueError( f"Cannot convert arrays with {self.ndim} dimensions into " "pandas objects. Requires 2 or fewer dimensions." ) from err indexes = [self.get_index(dim) for dim in self.dims] if isinstance(self._variable._data, PandasExtensionArray): values = self._variable._data.array else: values = self.values pandas_object = constructor(values, *indexes) if isinstance(pandas_object, pd.Series): pandas_object.name = self.name return pandas_object def to_dataframe( self, name: Hashable | None = None, dim_order: Sequence[Hashable] | None = None ) -> pd.DataFrame: """Convert this array and its coordinates into a tidy pandas.DataFrame. The DataFrame is indexed by the Cartesian product of index coordinates (in the form of a :py:class:`pandas.MultiIndex`). Other coordinates are included as columns in the DataFrame. For 1D and 2D DataArrays, see also :py:func:`DataArray.to_pandas` which doesn't rely on a MultiIndex to build the DataFrame. Parameters ---------- name: Hashable or None, optional Name to give to this array (required if unnamed). dim_order: Sequence of Hashable or None, optional Hierarchical dimension order for the resulting dataframe. Array content is transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dataframe. If provided, must include all dimensions of this DataArray. By default, dimensions are sorted according to the DataArray dimensions order. Returns ------- result: DataFrame DataArray as a pandas DataFrame. See also -------- DataArray.to_pandas DataArray.to_series """ if name is None: name = self.name if name is None: raise ValueError( "cannot convert an unnamed DataArray to a " "DataFrame: use the ``name`` parameter" ) if self.ndim == 0: raise ValueError("cannot convert a scalar to a DataFrame") # By using a unique name, we can convert a DataArray into a DataFrame # even if it shares a name with one of its coordinates. # I would normally use unique_name = object() but that results in a # dataframe with columns in the wrong order, for reasons I have not # been able to debug (possibly a pandas bug?). unique_name = "__unique_name_identifier_z98xfz98xugfg73ho__" ds = self._to_dataset_whole(name=unique_name) if dim_order is None: ordered_dims = dict(zip(self.dims, self.shape, strict=True)) else: ordered_dims = ds._normalize_dim_order(dim_order=dim_order) df = ds._to_dataframe(ordered_dims) df.columns = [name if c == unique_name else c for c in df.columns] return df def to_series(self) -> pd.Series: """Convert this array into a pandas.Series. The Series is indexed by the Cartesian product of index coordinates (in the form of a :py:class:`pandas.MultiIndex`). Returns ------- result : Series DataArray as a pandas Series. See also -------- DataArray.to_pandas DataArray.to_dataframe """ index = self.coords.to_index() return pd.Series(self.values.reshape(-1), index=index, name=self.name) def to_masked_array(self, copy: bool = True) -> np.ma.MaskedArray: """Convert this array into a numpy.ma.MaskedArray Parameters ---------- copy : bool, default: True If True make a copy of the array in the result. If False, a MaskedArray view of DataArray.values is returned. Returns ------- result : MaskedArray Masked where invalid values (nan or inf) occur. """ values = self.to_numpy() # only compute lazy arrays once isnull = pd.isnull(values) return np.ma.MaskedArray(data=values, mask=isnull, copy=copy) # path=None writes to bytes @overload def to_netcdf( self, path: None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview: ... # compute=False returns dask.Delayed @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, *, compute: Literal[False], invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed: ... # default return None @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: Literal[True] = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> None: ... # if compute cannot be evaluated at type check time # we may get back either Delayed or None @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed | None: ... def to_netcdf( self, path: str | PathLike | None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview | Delayed | None: """Write DataArray contents to a netCDF file. Parameters ---------- path : str, path-like, file-like or None, optional Path to which to save this datatree, or a file-like object to write it to (which must support read and write and be seekable) or None (default) to return in-memory bytes as a memoryview. mode : {"w", "a"}, default: "w" Write ('w') or append ('a') mode. If mode='w', any existing file at this location will be overwritten. If mode='a', existing variables will be overwritten. format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API features. * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only netCDF 3 compatible API features. * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format, which fully supports 2+ GB files, but is only compatible with clients linked against netCDF version 3.6.0 or later. * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not handle 2+ GB files very well. All formats are supported by the netCDF4-python library. scipy.io.netcdf only supports the last two formats. The default format is NETCDF4 if you are saving a file to disk and have the netCDF4-python library available. Otherwise, xarray falls back to using scipy to write netCDF files and defaults to the NETCDF3_64BIT format (scipy does not support netCDF4). group : str, optional Path to the netCDF4 group in the given file to open (only works for format='NETCDF4'). The group(s) will be created if necessary. engine : {"netcdf4", "h5netcdf", "scipy"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, by default preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via ``netcdf_engine_order`` in ``xarray.set_options()``). encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1, "zlib": True}, ...}`` The `h5netcdf` engine supports both the NetCDF4-style compression encoding parameters ``{"zlib": True, "complevel": 9}`` and the h5py ones ``{"compression": "gzip", "compression_opts": 9}``. This allows using any compression plugin installed in the HDF5 library, e.g. LZF. unlimited_dims : iterable of Hashable, optional Dimension(s) that should be serialized as unlimited dimensions. By default, no dimensions are treated as unlimited dimensions. Note that unlimited_dims may also be set via ``dataset.encoding["unlimited_dims"]``. compute: bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. invalid_netcdf: bool, default: False Only valid along with ``engine="h5netcdf"``. If True, allow writing hdf5 files which are invalid netcdf as described in https://github.com/h5netcdf/h5netcdf. Returns ------- * ``memoryview`` if path is None * ``dask.delayed.Delayed`` if compute is False * None otherwise Notes ----- Only xarray.Dataset objects can be written to netCDF files, so the xarray.DataArray is converted to a xarray.Dataset object containing a single variable. If the DataArray has no name, or if the name is the same as a coordinate name, then it is given the name ``"__xarray_dataarray_variable__"``. [netCDF4 backend only] netCDF4 enums are decoded into the dataarray dtype metadata. See Also -------- Dataset.to_netcdf """ from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE from xarray.backends.writers import to_netcdf if self.name is None: # If no name is set then use a generic xarray name dataset = self.to_dataset(name=DATAARRAY_VARIABLE) elif self.name in self.coords or self.name in self.dims: # The name is the same as one of the coords names, which netCDF # doesn't support, so rename it but keep track of the old name dataset = self.to_dataset(name=DATAARRAY_VARIABLE) dataset.attrs[DATAARRAY_NAME] = self.name else: # No problems with the name - so we're fine! dataset = self.to_dataset() return to_netcdf( # type: ignore[return-value] # mypy cannot resolve the overloads:( dataset, path, mode=mode, format=format, group=group, engine=engine, encoding=encoding, unlimited_dims=unlimited_dims, compute=compute, multifile=False, invalid_netcdf=invalid_netcdf, auto_complex=auto_complex, ) # compute=True (default) returns ZarrStore @overload def to_zarr( self, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, *, encoding: Mapping | None = None, compute: Literal[True] = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore: ... # compute=False returns dask.Delayed @overload def to_zarr( self, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[False], consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> Delayed: ... def to_zarr( self, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: bool = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore | Delayed: """Write DataArray contents to a Zarr store Zarr chunks are determined in the following way: - From the ``chunks`` attribute in each variable's ``encoding`` (can be set via `DataArray.chunk`). - If the variable is a Dask array, from the dask chunks - If neither Dask chunks nor encoding chunks are present, chunks will be determined automatically by Zarr - If both Dask chunks and encoding chunks are present, encoding chunks will be used, provided that there is a many-to-one relationship between encoding chunks and dask chunks (i.e. Dask chunks are bigger than and evenly divide encoding chunks); otherwise raise a ``ValueError``. This restriction ensures that no synchronization / locks are required when writing. To disable this restriction, use ``safe_chunks=False``. Parameters ---------- store : zarr.storage.StoreLike, optional Store or path to directory in local or remote file system. chunk_store : MutableMapping, str or path-like, optional Store or path to directory in local or remote file system only for Zarr array chunks. Requires zarr-python v2.4.0 or later. mode : {"w", "w-", "a", "a-", r+", None}, optional Persistence mode: "w" means create (overwrite if exists); "w-" means create (fail if exists); "a" means override all existing variables including dimension coordinates (create if does not exist); "a-" means only append those variables that have ``append_dim``. "r+" means modify existing array *values* only (raise an error if any metadata or shapes would change). The default mode is "a" if ``append_dim`` is set. Otherwise, it is "r+" if ``region`` is set and ``w-`` otherwise. synchronizer : object, optional Zarr array synchronizer. group : str, optional Group path. (a.k.a. `path` in zarr terminology.) encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,}, ...}`` compute : bool, default: True If True write array data immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed to write array data later. Metadata is always updated eagerly. consolidated : bool, optional If True, apply zarr's `consolidate_metadata` function to the store after writing metadata and read existing stores with consolidated metadata; if False, do not. The default (`consolidated=None`) means write consolidated metadata and attempt to read consolidated metadata for existing stores (falling back to non-consolidated). When the experimental ``zarr_version=3``, ``consolidated`` must be either be ``None`` or ``False``. append_dim : hashable, optional If set, the dimension along which the data will be appended. All other dimensions on overridden variables must remain the same size. region : dict, optional Optional mapping from dimension names to integer slices along dataarray dimensions to indicate the region of existing zarr array(s) in which to write this datarray's data. For example, ``{'x': slice(0, 1000), 'y': slice(10000, 11000)}`` would indicate that values should be written to the region ``0:1000`` along ``x`` and ``10000:11000`` along ``y``. Two restrictions apply to the use of ``region``: - If ``region`` is set, _all_ variables in a dataarray must have at least one dimension in common with the region. Other variables should be written in a separate call to ``to_zarr()``. - Dimensions cannot be included in both ``region`` and ``append_dim`` at the same time. To create empty arrays to fill in with ``region``, use a separate call to ``to_zarr()`` with ``compute=False``. See "Modifying existing Zarr stores" in the reference documentation for full details. Users are expected to ensure that the specified region aligns with Zarr chunk boundaries, and that dask chunks are also aligned. Xarray makes limited checks that these multiple chunk boundaries line up. It is possible to write incomplete chunks and corrupt the data with this option if you are not careful. safe_chunks : bool, default: True If True, only allow writes to when there is a many-to-one relationship between Zarr chunks (specified in encoding) and Dask chunks. Set False to override this restriction; however, data may become corrupted if Zarr arrays are written in parallel. This option may be useful in combination with ``compute=False`` to initialize a Zarr store from an existing DataArray with arbitrary chunk structure. In addition to the many-to-one relationship validation, it also detects partial chunks writes when using the region parameter, these partial chunks are considered unsafe in the mode "r+" but safe in the mode "a". Note: Even with these validations it can still be unsafe to write two or more chunked arrays in the same location in parallel if they are not writing in independent regions, for those cases it is better to use a synchronizer. align_chunks: bool, default False If True, rechunks the Dask array to align with Zarr chunks before writing. This ensures each Dask chunk maps to one or more contiguous Zarr chunks, which avoids race conditions. Internally, the process sets safe_chunks=False and tries to preserve the original Dask chunking as much as possible. Note: While this alignment avoids write conflicts stemming from chunk boundary misalignment, it does not protect against race conditions if multiple uncoordinated processes write to the same Zarr array concurrently. storage_options : dict, optional Any additional parameters for the storage backend (ignored for local paths). zarr_version : int or None, optional .. deprecated:: 2024.9.1 Use ``zarr_format`` instead. zarr_format : int or None, optional The desired zarr format to target (currently 2 or 3). The default of None will attempt to determine the zarr version from ``store`` when possible, otherwise defaulting to the default version used by the zarr-python library installed. write_empty_chunks : bool or None, optional If True, all chunks will be stored regardless of their contents. If False, each chunk is compared to the array's fill value prior to storing. If a chunk is uniformly equal to the fill value, then that chunk is not be stored, and the store entry for that chunk's key is deleted. This setting enables sparser storage, as only chunks with non-fill-value data are stored, at the expense of overhead associated with checking the data of each chunk. If None (default) fall back to specification(s) in ``encoding`` or Zarr defaults. A ``ValueError`` will be raised if the value of this (if not None) differs with ``encoding``. chunkmanager_store_kwargs : dict, optional Additional keyword arguments passed on to the `ChunkManager.store` method used to store chunked arrays. For example for a dask array additional kwargs will be passed eventually to :py:func:`dask.array.store()`. Experimental API that should not be relied upon. Returns ------- * ``dask.delayed.Delayed`` if compute is False * ZarrStore otherwise References ---------- https://zarr.readthedocs.io/ Notes ----- Zarr chunking behavior: If chunks are found in the encoding argument or attribute corresponding to any DataArray, those chunks are used. If a DataArray is a dask array, it is written with those chunks. If not other chunks are found, Zarr uses its own heuristics to choose automatic chunk sizes. encoding: The encoding attribute (if exists) of the DataArray(s) will be used. Override any existing encodings by providing the ``encoding`` kwarg. ``fill_value`` handling: There exists a subtlety in interpreting zarr's ``fill_value`` property. For zarr v2 format arrays, ``fill_value`` is *always* interpreted as an invalid value similar to the ``_FillValue`` attribute in CF/netCDF. For Zarr v3 format arrays, only an explicit ``_FillValue`` attribute will be used to mask the data if requested using ``mask_and_scale=True``. See this `Github issue `_ for more. See Also -------- Dataset.to_zarr :ref:`io.zarr` The I/O user guide, with more details and examples. """ from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE from xarray.backends.writers import to_zarr if self.name is None: # If no name is set then use a generic xarray name dataset = self.to_dataset(name=DATAARRAY_VARIABLE) elif self.name in self.coords or self.name in self.dims: # The name is the same as one of the coords names, which the netCDF data model # does not support, so rename it but keep track of the old name dataset = self.to_dataset(name=DATAARRAY_VARIABLE) dataset.attrs[DATAARRAY_NAME] = self.name else: # No problems with the name - so we're fine! dataset = self.to_dataset() return to_zarr( # type: ignore[call-overload,misc] dataset, store=store, chunk_store=chunk_store, mode=mode, synchronizer=synchronizer, group=group, encoding=encoding, compute=compute, consolidated=consolidated, append_dim=append_dim, region=region, safe_chunks=safe_chunks, align_chunks=align_chunks, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, write_empty_chunks=write_empty_chunks, chunkmanager_store_kwargs=chunkmanager_store_kwargs, ) def to_dict( self, data: bool | Literal["list", "array"] = "list", encoding: bool = False ) -> dict[str, Any]: """ Convert this xarray.DataArray into a dictionary following xarray naming conventions. Converts all variables and attributes to native Python objects. Useful for converting to json. To avoid datetime incompatibility use decode_times=False kwarg in xarray.open_dataset. Parameters ---------- data : bool or {"list", "array"}, default: "list" Whether to include the actual data in the dictionary. When set to False, returns just the schema. If set to "array", returns data as underlying array type. If set to "list" (or True for backwards compatibility), returns data in lists of Python data types. Note that for obtaining the "list" output efficiently, use `da.compute().to_dict(data="list")`. encoding : bool, default: False Whether to include the Dataset's encoding in the dictionary. Returns ------- dict: dict See Also -------- DataArray.from_dict Dataset.to_dict """ d = self.variable.to_dict(data=data) d.update({"coords": {}, "name": self.name}) for k, coord in self.coords.items(): d["coords"][k] = coord.variable.to_dict(data=data) if encoding: d["encoding"] = dict(self.encoding) return d @classmethod def from_dict(cls, d: Mapping[str, Any]) -> Self: """Convert a dictionary into an xarray.DataArray Parameters ---------- d : dict Mapping with a minimum structure of {"dims": [...], "data": [...]} Returns ------- obj : xarray.DataArray See Also -------- DataArray.to_dict Dataset.from_dict Examples -------- >>> d = {"dims": "t", "data": [1, 2, 3]} >>> da = xr.DataArray.from_dict(d) >>> da Size: 24B array([1, 2, 3]) Dimensions without coordinates: t >>> d = { ... "coords": { ... "t": {"dims": "t", "data": [0, 1, 2], "attrs": {"units": "s"}} ... }, ... "attrs": {"title": "air temperature"}, ... "dims": "t", ... "data": [10, 20, 30], ... "name": "a", ... } >>> da = xr.DataArray.from_dict(d) >>> da Size: 24B array([10, 20, 30]) Coordinates: * t (t) int64 24B 0 1 2 Attributes: title: air temperature """ coords = None if "coords" in d: try: coords = { k: (v["dims"], v["data"], v.get("attrs")) for k, v in d["coords"].items() } except KeyError as e: raise ValueError( f"cannot convert dict when coords are missing the key '{e.args[0]}'" ) from e try: data = d["data"] except KeyError as err: raise ValueError("cannot convert dict without the key 'data''") from err else: obj = cls(data, coords, d.get("dims"), d.get("name"), d.get("attrs")) obj.encoding.update(d.get("encoding", {})) return obj @classmethod def from_series(cls, series: pd.Series, sparse: bool = False) -> DataArray: """Convert a pandas.Series into an xarray.DataArray. If the series's index is a MultiIndex, it will be expanded into a tensor product of one-dimensional coordinates (filling in missing values with NaN). Thus this operation should be the inverse of the `to_series` method. Parameters ---------- series : Series Pandas Series object to convert. sparse : bool, default: False If sparse=True, creates a sparse array instead of a dense NumPy array. Requires the pydata/sparse package. See Also -------- DataArray.to_series Dataset.from_dataframe """ temp_name = "__temporary_name" df = pd.DataFrame({temp_name: series}) ds = Dataset.from_dataframe(df, sparse=sparse) result = ds[temp_name] result.name = series.name return result def to_iris(self) -> iris_Cube: """Convert this array into an iris.cube.Cube""" from xarray.convert import to_iris return to_iris(self) @classmethod def from_iris(cls, cube: iris_Cube) -> Self: """Convert an iris.cube.Cube into an xarray.DataArray""" from xarray.convert import from_iris return from_iris(cube) def _all_compat(self, other: Self, compat_str: str) -> bool: """Helper function for equals, broadcast_equals, and identical""" def compat(x, y): return getattr(x.variable, compat_str)(y.variable) return utils.dict_equiv(self.coords, other.coords, compat=compat) and compat( self, other ) def broadcast_equals(self, other: Self) -> bool: """Two DataArrays are broadcast equal if they are equal after broadcasting them against each other such that they have the same dimensions. Parameters ---------- other : DataArray DataArray to compare to. Returns ---------- equal : bool True if the two DataArrays are broadcast equal. See Also -------- DataArray.equals DataArray.identical Examples -------- >>> a = xr.DataArray([1, 2], dims="X") >>> b = xr.DataArray([[1, 1], [2, 2]], dims=["X", "Y"]) >>> a Size: 16B array([1, 2]) Dimensions without coordinates: X >>> b Size: 32B array([[1, 1], [2, 2]]) Dimensions without coordinates: X, Y .equals returns True if two DataArrays have the same values, dimensions, and coordinates. .broadcast_equals returns True if the results of broadcasting two DataArrays against each other have the same values, dimensions, and coordinates. >>> a.equals(b) False >>> a2, b2 = xr.broadcast(a, b) >>> a2.equals(b2) True >>> a.broadcast_equals(b) True """ try: return self._all_compat(other, "broadcast_equals") except (TypeError, AttributeError): return False def equals(self, other: Self) -> bool: """True if two DataArrays have the same dimensions, coordinates and values; otherwise False. DataArrays can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for ``DataArray`` does element-wise comparisons (like numpy.ndarrays). Parameters ---------- other : DataArray DataArray to compare to. Returns ---------- equal : bool True if the two DataArrays are equal. See Also -------- DataArray.broadcast_equals DataArray.identical Examples -------- >>> a = xr.DataArray([1, 2, 3], dims="X") >>> b = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m")) >>> c = xr.DataArray([1, 2, 3], dims="Y") >>> d = xr.DataArray([3, 2, 1], dims="X") >>> a Size: 24B array([1, 2, 3]) Dimensions without coordinates: X >>> b Size: 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: m >>> c Size: 24B array([1, 2, 3]) Dimensions without coordinates: Y >>> d Size: 24B array([3, 2, 1]) Dimensions without coordinates: X >>> a.equals(b) True >>> a.equals(c) False >>> a.equals(d) False """ try: return self._all_compat(other, "equals") except (TypeError, AttributeError): return False def identical(self, other: Self) -> bool: """Like equals, but also checks the array name and attributes, and attributes on all coordinates. Parameters ---------- other : DataArray DataArray to compare to. Returns ---------- equal : bool True if the two DataArrays are identical. See Also -------- DataArray.broadcast_equals DataArray.equals Examples -------- >>> a = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m"), name="Width") >>> b = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m"), name="Width") >>> c = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="ft"), name="Width") >>> a Size: 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: m >>> b Size: 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: m >>> c Size: 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: ft >>> a.equals(b) True >>> a.identical(b) True >>> a.equals(c) True >>> a.identical(c) False """ try: return self.name == other.name and self._all_compat(other, "identical") except (TypeError, AttributeError): return False def __array_wrap__(self, obj, context=None, return_scalar=False) -> Self: new_var = self.variable.__array_wrap__(obj, context, return_scalar) return self._replace(new_var) def __matmul__(self, obj: T_Xarray) -> T_Xarray: return self.dot(obj) def __rmatmul__(self, other: T_Xarray) -> T_Xarray: # currently somewhat duplicative, as only other DataArrays are # compatible with matmul return computation.dot(other, self) def _unary_op(self, f: Callable, *args, **kwargs) -> Self: keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered") warnings.filterwarnings( "ignore", r"Mean of empty slice", category=RuntimeWarning ) with np.errstate(all="ignore"): da = self.__array_wrap__(f(self.variable.data, *args, **kwargs)) if keep_attrs: da.attrs = self.attrs return da def _binary_op( self, other: DaCompatible, f: Callable, reflexive: bool = False ) -> Self: from xarray.core.datatree import DataTree from xarray.core.groupby import GroupBy if isinstance(other, DataTree | Dataset | GroupBy): return NotImplemented if isinstance(other, DataArray): align_type = OPTIONS["arithmetic_join"] self, other = align(self, other, join=align_type, copy=False) other_variable_or_arraylike: DaCompatible = getattr(other, "variable", other) other_coords = getattr(other, "coords", None) variable = ( f(self.variable, other_variable_or_arraylike) if not reflexive else f(other_variable_or_arraylike, self.variable) ) coords, indexes = self.coords._merge_raw(other_coords, reflexive) name = result_name([self, other]) return self._replace(variable, coords, name, indexes=indexes) def _inplace_binary_op(self, other: DaCompatible, f: Callable) -> Self: from xarray.core.groupby import GroupBy if isinstance(other, GroupBy): raise TypeError( "in-place operations between a DataArray and " "a grouped object are not permitted" ) # n.b. we can't align other to self (with other.reindex_like(self)) # because `other` may be converted into floats, which would cause # in-place arithmetic to fail unpredictably. Instead, we simply # don't support automatic alignment with in-place arithmetic. other_coords = getattr(other, "coords", None) other_variable = getattr(other, "variable", other) try: with self.coords._merge_inplace(other_coords): f(self.variable, other_variable) except MergeError as exc: raise MergeError( "Automatic alignment is not supported for in-place operations.\n" "Consider aligning the indices manually or using a not-in-place operation.\n" "See https://github.com/pydata/xarray/issues/3910 for more explanations." ) from exc return self def _copy_attrs_from(self, other: DataArray | Dataset | Variable) -> None: self.attrs = other.attrs plot = utils.UncachedAccessor(DataArrayPlotAccessor) def _title_for_slice(self, truncate: int = 50) -> str: """ If the dataarray has 1 dimensional coordinates or comes from a slice we can show that info in the title Parameters ---------- truncate : int, default: 50 maximum number of characters for title Returns ------- title : string Can be used for plot titles """ one_dims = [] for dim, coord in self.coords.items(): if coord.size == 1: one_dims.append( f"{dim} = {format_item(coord.values)}{_get_units_from_attrs(coord)}" ) title = ", ".join(one_dims) if len(title) > truncate: title = title[: (truncate - 3)] + "..." return title def diff( self, dim: Hashable, n: int = 1, *, label: Literal["upper", "lower"] = "upper", ) -> Self: """Calculate the n-th order discrete difference along given axis. Parameters ---------- dim : Hashable Dimension over which to calculate the finite difference. n : int, default: 1 The number of times values are differenced. label : {"upper", "lower"}, default: "upper" The new coordinate in dimension ``dim`` will have the values of either the minuend's or subtrahend's coordinate for values 'upper' and 'lower', respectively. Returns ------- difference : DataArray The n-th order finite difference of this object. Notes ----- `n` matches numpy's behavior and is different from pandas' first argument named `periods`. Examples -------- >>> arr = xr.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ["x"]) >>> arr.diff("x") Size: 24B array([0, 1, 0]) Coordinates: * x (x) int64 24B 2 3 4 >>> arr.diff("x", 2) Size: 16B array([ 1, -1]) Coordinates: * x (x) int64 16B 3 4 See Also -------- DataArray.differentiate """ ds = self._to_temp_dataset().diff(n=n, dim=dim, label=label) return self._from_temp_dataset(ds) def shift( self, shifts: Mapping[Any, int] | None = None, fill_value: Any = dtypes.NA, **shifts_kwargs: int, ) -> Self: """Shift this DataArray by an offset along one or more dimensions. Only the data is moved; coordinates stay in place. This is consistent with the behavior of ``shift`` in pandas. Values shifted from beyond array bounds will appear at one end of each dimension, which are filled according to `fill_value`. For periodic offsets instead see `roll`. Parameters ---------- shifts : mapping of Hashable to int or None, optional Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value : scalar, optional Value to use for newly missing values **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : DataArray DataArray with the same coordinates and attributes but shifted data. See Also -------- roll Examples -------- >>> arr = xr.DataArray([5, 6, 7], dims="x") >>> arr.shift(x=1) Size: 24B array([nan, 5., 6.]) Dimensions without coordinates: x """ variable = self.variable.shift( shifts=shifts, fill_value=fill_value, **shifts_kwargs ) return self._replace(variable=variable) def roll( self, shifts: Mapping[Hashable, int] | None = None, roll_coords: bool = False, **shifts_kwargs: int, ) -> Self: """Roll this array by an offset along one or more dimensions. Unlike shift, roll treats the given dimensions as periodic, so will not create any missing values to be filled. Unlike shift, roll may rotate all variables, including coordinates if specified. The direction of rotation is consistent with :py:func:`numpy.roll`. Parameters ---------- shifts : mapping of Hashable to int, optional Integer offset to rotate each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. roll_coords : bool, default: False Indicates whether to roll the coordinates by the offset too. **shifts_kwargs : {dim: offset, ...}, optional The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- rolled : DataArray DataArray with the same attributes but rolled data and coordinates. See Also -------- shift Examples -------- >>> arr = xr.DataArray([5, 6, 7], dims="x") >>> arr.roll(x=1) Size: 24B array([7, 5, 6]) Dimensions without coordinates: x """ ds = self._to_temp_dataset().roll( shifts=shifts, roll_coords=roll_coords, **shifts_kwargs ) return self._from_temp_dataset(ds) @property def real(self) -> Self: """ The real part of the array. See Also -------- numpy.ndarray.real """ return self._replace(self.variable.real) @property def imag(self) -> Self: """ The imaginary part of the array. See Also -------- numpy.ndarray.imag """ return self._replace(self.variable.imag) @deprecate_dims def dot( self, other: T_Xarray, dim: Dims = None, ) -> T_Xarray: """Perform dot product of two DataArrays along their shared dims. Equivalent to taking taking tensordot over all shared dims. Parameters ---------- other : DataArray The other array with which the dot product is performed. dim : ..., str, Iterable of Hashable or None, optional Which dimensions to sum over. Ellipsis (`...`) sums over all dimensions. If not specified, then all the common dimensions are summed over. Returns ------- result : DataArray Array resulting from the dot product over all shared dimensions. See Also -------- dot numpy.tensordot Examples -------- >>> da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4)) >>> da = xr.DataArray(da_vals, dims=["x", "y", "z"]) >>> dm_vals = np.arange(4) >>> dm = xr.DataArray(dm_vals, dims=["z"]) >>> dm.dims ('z',) >>> da.dims ('x', 'y', 'z') >>> dot_result = da.dot(dm) >>> dot_result.dims ('x', 'y') """ if isinstance(other, Dataset): raise NotImplementedError( "dot products are not yet supported with Dataset objects." ) if not isinstance(other, DataArray): raise TypeError("dot only operates on DataArrays.") return computation.dot(self, other, dim=dim) def sortby( self, variables: ( Hashable | DataArray | Sequence[Hashable | DataArray] | Callable[[Self], Hashable | DataArray | Sequence[Hashable | DataArray]] ), ascending: bool = True, ) -> Self: """Sort object by labels or values (along an axis). Sorts the dataarray, either along specified dimensions, or according to values of 1-D dataarrays that share dimension with calling object. If the input variables are dataarrays, then the dataarrays are aligned (via left-join) to the calling object prior to sorting by cell values. NaNs are sorted to the end, following Numpy convention. If multiple sorts along the same dimension is given, numpy's lexsort is performed along that dimension: https://numpy.org/doc/stable/reference/generated/numpy.lexsort.html and the FIRST key in the sequence is used as the primary sort key, followed by the 2nd key, etc. Parameters ---------- variables : Hashable, DataArray, sequence of Hashable or DataArray, or Callable 1D DataArray objects or name(s) of 1D variable(s) in coords whose values are used to sort this array. If a callable, the callable is passed this object, and the result is used as the value for cond. ascending : bool, default: True Whether to sort by ascending or descending order. Returns ------- sorted : DataArray A new dataarray where all the specified dims are sorted by dim labels. See Also -------- Dataset.sortby numpy.sort pandas.sort_values pandas.sort_index Examples -------- >>> da = xr.DataArray( ... np.arange(5, 0, -1), ... coords=[pd.date_range("1/1/2000", periods=5)], ... dims="time", ... ) >>> da Size: 40B array([5, 4, 3, 2, 1]) Coordinates: * time (time) datetime64[ns] 40B 2000-01-01 2000-01-02 ... 2000-01-05 >>> da.sortby(da) Size: 40B array([1, 2, 3, 4, 5]) Coordinates: * time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01 >>> da.sortby(lambda x: x) Size: 40B array([1, 2, 3, 4, 5]) Coordinates: * time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01 """ # We need to convert the callable here rather than pass it through to the # dataset method, since otherwise the dataset method would try to call the # callable with the dataset as the object if callable(variables): variables = variables(self) ds = self._to_temp_dataset().sortby(variables, ascending=ascending) return self._from_temp_dataset(ds) def quantile( self, q: ArrayLike, dim: Dims = None, *, method: QuantileMethods = "linear", keep_attrs: bool | None = None, skipna: bool | None = None, interpolation: QuantileMethods | None = None, ) -> Self: """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. Parameters ---------- q : float or array-like of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or Iterable of Hashable, optional Dimension(s) over which to apply quantile. method : str, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points. The options sorted by their R type as summarized in the H&F paper [1]_ are: 1. "inverted_cdf" 2. "averaged_inverted_cdf" 3. "closest_observation" 4. "interpolated_inverted_cdf" 5. "hazen" 6. "weibull" 7. "linear" (default) 8. "median_unbiased" 9. "normal_unbiased" The first three methods are discontiuous. The following discontinuous variations of the default "linear" (7.) option are also available: * "lower" * "higher" * "midpoint" * "nearest" See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument was previously called "interpolation", renamed in accordance with numpy version 1.22.0. keep_attrs : bool or None, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- quantiles : DataArray If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile Examples -------- >>> da = xr.DataArray( ... data=[[0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]], ... coords={"x": [7, 9], "y": [1, 1.5, 2, 2.5]}, ... dims=("x", "y"), ... ) >>> da.quantile(0) # or da.quantile(0, dim=...) Size: 8B array(0.7) Coordinates: quantile float64 8B 0.0 >>> da.quantile(0, dim="x") Size: 32B array([0.7, 4.2, 2.6, 1.5]) Coordinates: * y (y) float64 32B 1.0 1.5 2.0 2.5 quantile float64 8B 0.0 >>> da.quantile([0, 0.5, 1]) Size: 24B array([0.7, 3.4, 9.4]) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 >>> da.quantile([0, 0.5, 1], dim="x") Size: 96B array([[0.7 , 4.2 , 2.6 , 1.5 ], [3.6 , 5.75, 6. , 1.7 ], [6.5 , 7.3 , 9.4 , 1.9 ]]) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 * y (y) float64 32B 1.0 1.5 2.0 2.5 References ---------- .. [1] R. J. Hyndman and Y. Fan, "Sample quantiles in statistical packages," The American Statistician, 50(4), pp. 361-365, 1996 """ ds = self._to_temp_dataset().quantile( q, dim=dim, keep_attrs=keep_attrs, method=method, skipna=skipna, interpolation=interpolation, ) return self._from_temp_dataset(ds) def rank( self, dim: Hashable, *, pct: bool = False, keep_attrs: bool | None = None, ) -> Self: """Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If pct, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : Hashable Dimension over which to compute rank. pct : bool, default: False If True, compute percentage ranks, otherwise compute integer ranks. keep_attrs : bool or None, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- ranked : DataArray DataArray with the same coordinates and dtype 'float64'. Examples -------- >>> arr = xr.DataArray([5, 6, 7], dims="x") >>> arr.rank("x") Size: 24B array([1., 2., 3.]) Dimensions without coordinates: x """ ds = self._to_temp_dataset().rank(dim, pct=pct, keep_attrs=keep_attrs) return self._from_temp_dataset(ds) def differentiate( self, coord: Hashable, edge_order: Literal[1, 2] = 1, datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Differentiate the array with the second order accurate central differences. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- coord : Hashable The coordinate to be used to compute the gradient. edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. datetime_unit : {"W", "D", "h", "m", "s", "ms", \ "us", "ns", "ps", "fs", "as", None}, optional Unit to compute gradient. Only valid for datetime coordinate. "Y" and "M" are not available as datetime_unit. Returns ------- differentiated: DataArray See also -------- numpy.gradient: corresponding numpy function Examples -------- >>> da = xr.DataArray( ... np.arange(12).reshape(4, 3), ... dims=["x", "y"], ... coords={"x": [0, 0.1, 1.1, 1.2]}, ... ) >>> da Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y >>> >>> da.differentiate("x") Size: 96B array([[30. , 30. , 30. ], [27.54545455, 27.54545455, 27.54545455], [27.54545455, 27.54545455, 27.54545455], [30. , 30. , 30. ]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y """ ds = self._to_temp_dataset().differentiate(coord, edge_order, datetime_unit) return self._from_temp_dataset(ds) def integrate( self, coord: Hashable | Sequence[Hashable] = None, datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Integrate along the given coordinate using the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- coord : Hashable, or sequence of Hashable Coordinate(s) used for the integration. datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ 'ps', 'fs', 'as', None}, optional Specify the unit if a datetime coordinate is used. Returns ------- integrated : DataArray See also -------- Dataset.integrate numpy.trapz : corresponding numpy function Examples -------- >>> da = xr.DataArray( ... np.arange(12).reshape(4, 3), ... dims=["x", "y"], ... coords={"x": [0, 0.1, 1.1, 1.2]}, ... ) >>> da Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y >>> >>> da.integrate("x") Size: 24B array([5.4, 6.6, 7.8]) Dimensions without coordinates: y """ ds = self._to_temp_dataset().integrate(coord, datetime_unit) return self._from_temp_dataset(ds) def cumulative_integrate( self, coord: Hashable | Sequence[Hashable] = None, datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Integrate cumulatively along the given coordinate using the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. The first entry of the cumulative integral is always 0, in order to keep the length of the dimension unchanged between input and output. Parameters ---------- coord : Hashable, or sequence of Hashable Coordinate(s) used for the integration. datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ 'ps', 'fs', 'as', None}, optional Specify the unit if a datetime coordinate is used. Returns ------- integrated : DataArray See also -------- Dataset.cumulative_integrate scipy.integrate.cumulative_trapezoid : corresponding scipy function Examples -------- >>> da = xr.DataArray( ... np.arange(12).reshape(4, 3), ... dims=["x", "y"], ... coords={"x": [0, 0.1, 1.1, 1.2]}, ... ) >>> da Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y >>> >>> da.cumulative_integrate("x") Size: 96B array([[0. , 0. , 0. ], [0.15, 0.25, 0.35], [4.65, 5.75, 6.85], [5.4 , 6.6 , 7.8 ]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y """ ds = self._to_temp_dataset().cumulative_integrate(coord, datetime_unit) return self._from_temp_dataset(ds) def unify_chunks(self) -> Self: """Unify chunk size along all chunked dimensions of this DataArray. Returns ------- DataArray with consistent chunk sizes for all dask-array variables See Also -------- dask.array.core.unify_chunks """ return unify_chunks(self)[0] def map_blocks( self, func: Callable[..., T_Xarray], args: Sequence[Any] = (), kwargs: Mapping[str, Any] | None = None, template: DataArray | Dataset | None = None, ) -> T_Xarray: """ Apply a function to each block of this DataArray. .. warning:: This method is experimental and its signature may change. Parameters ---------- func : callable User-provided function that accepts a DataArray as its first parameter. The function will receive a subset or 'block' of this DataArray (see below), corresponding to one chunk along each chunked dimension. ``func`` will be executed as ``func(subset_dataarray, *subset_args, **kwargs)``. This function must return either a single DataArray or a single Dataset. This function cannot add a new chunked dimension. args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with this object, otherwise an error is raised. kwargs : mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like this object but has sizes 0, to determine properties of the returned object such as dtype, variable names, attributes, new dimensions and new indexes (if any). ``template`` must be provided if the function changes the size of existing dimensions. When provided, ``attrs`` on variables in `template` are copied over to the result. Any ``attrs`` set by ``func`` will be ignored. Returns ------- A single DataArray or Dataset with dask backend, reassembled from the outputs of the function. Notes ----- This function is designed for when ``func`` needs to manipulate a whole xarray object subset to each block. Each block is loaded into memory. In the more common case where ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``. If none of the variables in this object is backed by dask arrays, calling this function is equivalent to calling ``func(obj, *args, **kwargs)``. See Also -------- :func:`dask.array.map_blocks ` :func:`xarray.apply_ufunc ` :func:`xarray.Dataset.map_blocks ` :doc:`xarray-tutorial:advanced/map_blocks/map_blocks` Advanced Tutorial on map_blocks with dask Examples -------- Calculate an anomaly from climatology using ``.groupby()``. Using ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``, its indices, and its methods like ``.groupby()``. >>> def calculate_anomaly(da, groupby_type="time.month"): ... gb = da.groupby(groupby_type) ... clim = gb.mean(dim="time") ... return gb - clim ... >>> time = xr.date_range("1990-01", "1992-01", freq="ME", use_cftime=True) >>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"]) >>> np.random.seed(123) >>> array = xr.DataArray( ... np.random.rand(len(time)), ... dims=["time"], ... coords={"time": time, "month": month}, ... ).chunk() >>> array.map_blocks(calculate_anomaly, template=array).compute() Size: 192B array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862, 0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714, -0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 , 0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108, 0.07673453, 0.22865714, 0.19063865, -0.0590131 ]) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments to the function being applied in ``xr.map_blocks()``: >>> array.map_blocks( ... calculate_anomaly, kwargs={"groupby_type": "time.year"}, template=array ... ) # doctest: +ELLIPSIS Size: 192B dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B dask.array """ from xarray.core.parallel import map_blocks return map_blocks(func, self, args, kwargs, template) def polyfit( self, dim: Hashable, deg: int, skipna: bool | None = None, rcond: float | None = None, w: Hashable | Any | None = None, full: bool = False, cov: bool | Literal["unscaled"] = False, ) -> Dataset: """ Least squares polynomial fit. This replicates the behaviour of `numpy.polyfit` but differs by skipping invalid values when `skipna = True`. Parameters ---------- dim : Hashable Coordinate along which to fit the polynomials. deg : int Degree of the fitting polynomial. skipna : bool or None, optional If True, removes all invalid values before fitting each 1D slices of the array. Default is True if data is stored in a dask.array or if there is any invalid values, False otherwise. rcond : float or None, optional Relative condition number to the fit. w : Hashable, array-like or None, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, default: False Whether to return the residuals, matrix rank and singular values in addition to the coefficients. cov : bool or "unscaled", default: False Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. Returns ------- polyfit_results : Dataset A single dataset which contains: polyfit_coefficients The coefficients of the best fit. polyfit_residuals The residuals of the least-square computation (only included if `full=True`). When the matrix rank is deficient, np.nan is returned. [dim]_matrix_rank The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`) [dim]_singular_value The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`) polyfit_covariance The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`) See Also -------- numpy.polyfit numpy.polyval xarray.polyval DataArray.curvefit """ # For DataArray, use the original implementation by converting to a dataset return self._to_temp_dataset().polyfit( dim, deg, skipna=skipna, rcond=rcond, w=w, full=full, cov=cov ) def pad( self, pad_width: Mapping[Any, int | tuple[int, int]] | None = None, mode: PadModeOptions = "constant", stat_length: ( int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None ) = None, constant_values: ( float | tuple[float, float] | Mapping[Any, tuple[float, float]] | None ) = None, end_values: int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None = None, reflect_type: PadReflectOptions = None, keep_attrs: bool | None = None, **pad_width_kwargs: Any, ) -> Self: """Pad this array along one or more dimensions. .. warning:: This function is experimental and its behaviour is likely to change especially regarding padding of dimension coordinates (or IndexVariables). When using one of the modes ("edge", "reflect", "symmetric", "wrap"), coordinates will be padded with the same mode, otherwise coordinates are padded using the "constant" mode with fill_value dtypes.NA. Parameters ---------- pad_width : mapping of Hashable to tuple of int Mapping with the form of {dim: (pad_before, pad_after)} describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad mode : {"constant", "edge", "linear_ramp", "maximum", "mean", "median", \ "minimum", "reflect", "symmetric", "wrap"}, default: "constant" How to pad the DataArray (taken from numpy docs): - "constant": Pads with a constant value. - "edge": Pads with the edge values of array. - "linear_ramp": Pads with the linear ramp between end_value and the array edge value. - "maximum": Pads with the maximum value of all or part of the vector along each axis. - "mean": Pads with the mean value of all or part of the vector along each axis. - "median": Pads with the median value of all or part of the vector along each axis. - "minimum": Pads with the minimum value of all or part of the vector along each axis. - "reflect": Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - "symmetric": Pads with the reflection of the vector mirrored along the edge of the array. - "wrap": Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. stat_length : int, tuple or mapping of Hashable to tuple, default: None Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique statistic lengths along each dimension. ((before, after),) yields same before and after statistic lengths for each dimension. (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. constant_values : scalar, tuple or mapping of Hashable to tuple, default: 0 Used in 'constant'. The values to set the padded values for each axis. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique pad constants along each dimension. ``((before, after),)`` yields same before and after constants for each dimension. ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all dimensions. Default is 0. end_values : scalar, tuple or mapping of Hashable to tuple, default: 0 Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique end values along each dimension. ``((before, after),)`` yields same before and after end values for each axis. ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is 0. reflect_type : {"even", "odd", None}, optional Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. **pad_width_kwargs The keyword arguments form of ``pad_width``. One of ``pad_width`` or ``pad_width_kwargs`` must be provided. Returns ------- padded : DataArray DataArray with the padded coordinates and data. See Also -------- DataArray.shift, DataArray.roll, DataArray.bfill, DataArray.ffill, numpy.pad, dask.array.pad Notes ----- For ``mode="constant"`` and ``constant_values=None``, integer types will be promoted to ``float`` and padded with ``np.nan``. Padding coordinates will drop their corresponding index (if any) and will reset default indexes for dimension coordinates. Examples -------- >>> arr = xr.DataArray([5, 6, 7], coords=[("x", [0, 1, 2])]) >>> arr.pad(x=(1, 2), constant_values=0) Size: 48B array([0, 5, 6, 7, 0, 0]) Coordinates: * x (x) float64 48B nan 0.0 1.0 2.0 nan nan >>> da = xr.DataArray( ... [[0, 1, 2, 3], [10, 11, 12, 13]], ... dims=["x", "y"], ... coords={"x": [0, 1], "y": [10, 20, 30, 40], "z": ("x", [100, 200])}, ... ) >>> da.pad(x=1) Size: 128B array([[nan, nan, nan, nan], [ 0., 1., 2., 3.], [10., 11., 12., 13.], [nan, nan, nan, nan]]) Coordinates: * x (x) float64 32B nan 0.0 1.0 nan * y (y) int64 32B 10 20 30 40 z (x) float64 32B nan 100.0 200.0 nan Careful, ``constant_values`` are coerced to the data type of the array which may lead to a loss of precision: >>> da.pad(x=1, constant_values=1.23456789) Size: 128B array([[ 1, 1, 1, 1], [ 0, 1, 2, 3], [10, 11, 12, 13], [ 1, 1, 1, 1]]) Coordinates: * x (x) float64 32B nan 0.0 1.0 nan * y (y) int64 32B 10 20 30 40 z (x) float64 32B nan 100.0 200.0 nan """ ds = self._to_temp_dataset().pad( pad_width=pad_width, mode=mode, stat_length=stat_length, constant_values=constant_values, end_values=end_values, reflect_type=reflect_type, keep_attrs=keep_attrs, **pad_width_kwargs, ) return self._from_temp_dataset(ds) def idxmin( self, dim: Hashable | None = None, *, skipna: bool | None = None, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, ) -> Self: """Return the coordinate label of the minimum value along a dimension. Returns a new `DataArray` named after the dimension with the values of the coordinate labels along that dimension corresponding to minimum values along that dimension. In comparison to :py:meth:`~DataArray.argmin`, this returns the coordinate label while :py:meth:`~DataArray.argmin` returns the index. Parameters ---------- dim : str, optional Dimension over which to apply `idxmin`. This is optional for 1D arrays, but required for arrays with 2 or more dimensions. skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- reduced : DataArray New `DataArray` object with `idxmin` applied to its data and the indicated dimension removed. See Also -------- Dataset.idxmin, DataArray.idxmax, DataArray.min, DataArray.argmin Examples -------- >>> array = xr.DataArray( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array.min() Size: 8B array(-2) >>> array.argmin(...) {'x': Size: 8B array(4)} >>> array.idxmin() Size: 4B array('e', dtype='>> array = xr.DataArray( ... [ ... [2.0, 1.0, 2.0, 0.0, -2.0], ... [-4.0, np.nan, 2.0, np.nan, -2.0], ... [np.nan, np.nan, 1.0, np.nan, np.nan], ... ], ... dims=["y", "x"], ... coords={"y": [-1, 0, 1], "x": np.arange(5.0) ** 2}, ... ) >>> array.min(dim="x") Size: 24B array([-2., -4., 1.]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.argmin(dim="x") Size: 24B array([4, 0, 2]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.idxmin(dim="x") Size: 24B array([16., 0., 4.]) Coordinates: * y (y) int64 24B -1 0 1 """ return computation._calc_idxminmax( array=self, func=lambda x, *args, **kwargs: x.argmin(*args, **kwargs), dim=dim, skipna=skipna, fill_value=fill_value, keep_attrs=keep_attrs, ) def idxmax( self, dim: Hashable = None, *, skipna: bool | None = None, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, ) -> Self: """Return the coordinate label of the maximum value along a dimension. Returns a new `DataArray` named after the dimension with the values of the coordinate labels along that dimension corresponding to maximum values along that dimension. In comparison to :py:meth:`~DataArray.argmax`, this returns the coordinate label while :py:meth:`~DataArray.argmax` returns the index. Parameters ---------- dim : Hashable, optional Dimension over which to apply `idxmax`. This is optional for 1D arrays, but required for arrays with 2 or more dimensions. skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- reduced : DataArray New `DataArray` object with `idxmax` applied to its data and the indicated dimension removed. See Also -------- Dataset.idxmax, DataArray.idxmin, DataArray.max, DataArray.argmax Examples -------- >>> array = xr.DataArray( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array.max() Size: 8B array(2) >>> array.argmax(...) {'x': Size: 8B array(1)} >>> array.idxmax() Size: 4B array('b', dtype='>> array = xr.DataArray( ... [ ... [2.0, 1.0, 2.0, 0.0, -2.0], ... [-4.0, np.nan, 2.0, np.nan, -2.0], ... [np.nan, np.nan, 1.0, np.nan, np.nan], ... ], ... dims=["y", "x"], ... coords={"y": [-1, 0, 1], "x": np.arange(5.0) ** 2}, ... ) >>> array.max(dim="x") Size: 24B array([2., 2., 1.]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.argmax(dim="x") Size: 24B array([0, 2, 2]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.idxmax(dim="x") Size: 24B array([0., 4., 4.]) Coordinates: * y (y) int64 24B -1 0 1 """ return computation._calc_idxminmax( array=self, func=lambda x, *args, **kwargs: x.argmax(*args, **kwargs), dim=dim, skipna=skipna, fill_value=fill_value, keep_attrs=keep_attrs, ) def argmin( self, dim: Dims = None, *, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, ) -> Self | dict[Hashable, Self]: """Index or indices of the minimum of the DataArray over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of DataArrays, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a DataArray with dtype int. If there are multiple minima, the indices of the first one found will be returned. Parameters ---------- dim : "...", str, Iterable of Hashable or None, optional The dimensions over which to find the minimum. By default, finds minimum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int or None, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : DataArray or dict of DataArray See Also -------- Variable.argmin, DataArray.idxmin Examples -------- >>> array = xr.DataArray([0, 2, -1, 3], dims="x") >>> array.min() Size: 8B array(-1) >>> array.argmin(...) {'x': Size: 8B array(2)} >>> array.isel(array.argmin(...)) Size: 8B array(-1) >>> array = xr.DataArray( ... [[[3, 2, 1], [3, 1, 2], [2, 1, 3]], [[1, 3, 2], [2, -5, 1], [2, 3, 1]]], ... dims=("x", "y", "z"), ... ) >>> array.min(dim="x") Size: 72B array([[ 1, 2, 1], [ 2, -5, 1], [ 2, 1, 1]]) Dimensions without coordinates: y, z >>> array.argmin(dim="x") Size: 72B array([[1, 0, 0], [1, 1, 1], [0, 0, 1]]) Dimensions without coordinates: y, z >>> array.argmin(dim=["x"]) {'x': Size: 72B array([[1, 0, 0], [1, 1, 1], [0, 0, 1]]) Dimensions without coordinates: y, z} >>> array.min(dim=("x", "z")) Size: 24B array([ 1, -5, 1]) Dimensions without coordinates: y >>> array.argmin(dim=["x", "z"]) {'x': Size: 24B array([0, 1, 0]) Dimensions without coordinates: y, 'z': Size: 24B array([2, 1, 1]) Dimensions without coordinates: y} >>> array.isel(array.argmin(dim=["x", "z"])) Size: 24B array([ 1, -5, 1]) Dimensions without coordinates: y """ result = self.variable.argmin(dim, axis, keep_attrs, skipna) if isinstance(result, dict): return {k: self._replace_maybe_drop_dims(v) for k, v in result.items()} else: return self._replace_maybe_drop_dims(result) def argmax( self, dim: Dims = None, *, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, ) -> Self | dict[Hashable, Self]: """Index or indices of the maximum of the DataArray over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of DataArrays, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a DataArray with dtype int. If there are multiple maxima, the indices of the first one found will be returned. Parameters ---------- dim : "...", str, Iterable of Hashable or None, optional The dimensions over which to find the maximum. By default, finds maximum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int or None, optional Axis over which to apply `argmax`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : DataArray or dict of DataArray See Also -------- Variable.argmax, DataArray.idxmax Examples -------- >>> array = xr.DataArray([0, 2, -1, 3], dims="x") >>> array.max() Size: 8B array(3) >>> array.argmax(...) {'x': Size: 8B array(3)} >>> array.isel(array.argmax(...)) Size: 8B array(3) >>> array = xr.DataArray( ... [[[3, 2, 1], [3, 1, 2], [2, 1, 3]], [[1, 3, 2], [2, 5, 1], [2, 3, 1]]], ... dims=("x", "y", "z"), ... ) >>> array.max(dim="x") Size: 72B array([[3, 3, 2], [3, 5, 2], [2, 3, 3]]) Dimensions without coordinates: y, z >>> array.argmax(dim="x") Size: 72B array([[0, 1, 1], [0, 1, 0], [0, 1, 0]]) Dimensions without coordinates: y, z >>> array.argmax(dim=["x"]) {'x': Size: 72B array([[0, 1, 1], [0, 1, 0], [0, 1, 0]]) Dimensions without coordinates: y, z} >>> array.max(dim=("x", "z")) Size: 24B array([3, 5, 3]) Dimensions without coordinates: y >>> array.argmax(dim=["x", "z"]) {'x': Size: 24B array([0, 1, 0]) Dimensions without coordinates: y, 'z': Size: 24B array([0, 1, 2]) Dimensions without coordinates: y} >>> array.isel(array.argmax(dim=["x", "z"])) Size: 24B array([3, 5, 3]) Dimensions without coordinates: y """ result = self.variable.argmax(dim, axis, keep_attrs, skipna) if isinstance(result, dict): return {k: self._replace_maybe_drop_dims(v) for k, v in result.items()} else: return self._replace_maybe_drop_dims(result) def query( self, queries: Mapping[Any, Any] | None = None, parser: QueryParserOptions = "pandas", engine: QueryEngineOptions = None, missing_dims: ErrorOptionsWithWarn = "raise", **queries_kwargs: Any, ) -> DataArray: """Return a new data array indexed along the specified dimension(s), where the indexers are given as strings containing Python expressions to be evaluated against the values in the array. Parameters ---------- queries : dict-like or None, optional A dict-like with keys matching dimensions and values given by strings containing Python expressions to be evaluated against the data variables in the dataset. The expressions will be evaluated using the pandas eval() function, and can contain any valid Python expressions but cannot contain any Python statements. parser : {"pandas", "python"}, default: "pandas" The parser to use to construct the syntax tree from the expression. The default of 'pandas' parses code slightly different than standard Python. Alternatively, you can parse an expression using the 'python' parser to retain strict Python semantics. engine : {"python", "numexpr", None}, default: None The engine used to evaluate the expression. Supported engines are: - None: tries to use numexpr, falls back to python - "numexpr": evaluates expressions using numexpr - "python": performs operations as if you had evalโ€™d in top level python missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **queries_kwargs : {dim: query, ...}, optional The keyword arguments form of ``queries``. One of queries or queries_kwargs must be provided. Returns ------- obj : DataArray A new DataArray with the same contents as this dataset, indexed by the results of the appropriate queries. See Also -------- DataArray.isel Dataset.query pandas.eval Examples -------- >>> da = xr.DataArray(np.arange(0, 5, 1), dims="x", name="a") >>> da Size: 40B array([0, 1, 2, 3, 4]) Dimensions without coordinates: x >>> da.query(x="a > 2") Size: 16B array([3, 4]) Dimensions without coordinates: x """ ds = self._to_dataset_whole(shallow_copy=True) ds = ds.query( queries=queries, parser=parser, engine=engine, missing_dims=missing_dims, **queries_kwargs, ) return ds[self.name] def curvefit( self, coords: str | DataArray | Iterable[str | DataArray], func: Callable[..., Any], reduce_dims: Dims = None, skipna: bool = True, p0: Mapping[str, float | DataArray] | None = None, bounds: Mapping[str, tuple[float | DataArray, float | DataArray]] | None = None, param_names: Sequence[str] | None = None, errors: ErrorOptions = "raise", kwargs: dict[str, Any] | None = None, ) -> Dataset: """ Curve fitting optimization for arbitrary functions. Wraps :py:func:`scipy.optimize.curve_fit` with :py:func:`~xarray.apply_ufunc`. Parameters ---------- coords : Hashable, DataArray, or sequence of DataArray or Hashable Independent coordinate(s) over which to perform the curve fitting. Must share at least one dimension with the calling object. When fitting multi-dimensional functions, supply `coords` as a sequence in the same order as arguments in `func`. To fit along existing dimensions of the calling object, `coords` can also be specified as a str or sequence of strs. func : callable User specified function in the form `f(x, *params)` which returns a numpy array of length `len(x)`. `params` are the fittable parameters which are optimized by scipy curve_fit. `x` can also be specified as a sequence containing multiple coordinates, e.g. `f((x0, x1), *params)`. reduce_dims : str, Iterable of Hashable or None, optional Additional dimension(s) over which to aggregate while fitting. For example, calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will aggregate all lat and lon points and fit the specified function along the time dimension. skipna : bool, default: True Whether to skip missing values when fitting. Default is True. p0 : dict-like or None, optional Optional dictionary of parameter names to initial guesses passed to the `curve_fit` `p0` arg. If the values are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be assigned initial values following the default scipy behavior. bounds : dict-like, optional Optional dictionary of parameter names to tuples of bounding values passed to the `curve_fit` `bounds` arg. If any of the bounds are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be unbounded following the default scipy behavior. param_names : sequence of Hashable or None, optional Sequence of names for the fittable parameters of `func`. If not supplied, this will be automatically determined by arguments of `func`. `param_names` should be manually supplied when fitting a function that takes a variable number of parameters. errors : {"raise", "ignore"}, default: "raise" If 'raise', any errors from the `scipy.optimize_curve_fit` optimization will raise an exception. If 'ignore', the coefficients and covariances for the coordinates where the fitting failed will be NaN. **kwargs : optional Additional keyword arguments to passed to scipy curve_fit. Returns ------- curvefit_results : Dataset A single dataset which contains: [var]_curvefit_coefficients The coefficients of the best fit. [var]_curvefit_covariance The covariance matrix of the coefficient estimates. Examples -------- Generate some exponentially decaying data, where the decay constant and amplitude are different for different values of the coordinate ``x``: >>> rng = np.random.default_rng(seed=0) >>> def exp_decay(t, time_constant, amplitude): ... return np.exp(-t / time_constant) * amplitude ... >>> t = np.arange(11) >>> da = xr.DataArray( ... np.stack( ... [ ... exp_decay(t, 1, 0.1), ... exp_decay(t, 2, 0.2), ... exp_decay(t, 3, 0.3), ... ] ... ) ... + rng.normal(size=(3, t.size)) * 0.01, ... coords={"x": [0, 1, 2], "time": t}, ... ) >>> da Size: 264B array([[ 0.1012573 , 0.0354669 , 0.01993775, 0.00602771, -0.00352513, 0.00428975, 0.01328788, 0.009562 , -0.00700381, -0.01264187, -0.0062282 ], [ 0.20041326, 0.09805582, 0.07138797, 0.03216692, 0.01974438, 0.01097441, 0.00679441, 0.01015578, 0.01408826, 0.00093645, 0.01501222], [ 0.29334805, 0.21847449, 0.16305984, 0.11130396, 0.07164415, 0.04744543, 0.03602333, 0.03129354, 0.01074885, 0.01284436, 0.00910995]]) Coordinates: * x (x) int64 24B 0 1 2 * time (time) int64 88B 0 1 2 3 4 5 6 7 8 9 10 Fit the exponential decay function to the data along the ``time`` dimension: >>> fit_result = da.curvefit("time", exp_decay) >>> fit_result["curvefit_coefficients"].sel( ... param="time_constant" ... ) # doctest: +NUMBER Size: 24B array([1.05692036, 1.73549638, 2.94215771]) Coordinates: * x (x) int64 24B 0 1 2 param >> fit_result["curvefit_coefficients"].sel(param="amplitude") Size: 24B array([0.1005489 , 0.19631423, 0.30003579]) Coordinates: * x (x) int64 24B 0 1 2 param >> fit_result = da.curvefit( ... "time", ... exp_decay, ... p0={ ... "amplitude": 0.2, ... "time_constant": xr.DataArray([1, 2, 3], coords=[da.x]), ... }, ... ) >>> fit_result["curvefit_coefficients"].sel(param="time_constant") Size: 24B array([1.0569213 , 1.73550052, 2.94215733]) Coordinates: * x (x) int64 24B 0 1 2 param >> fit_result["curvefit_coefficients"].sel(param="amplitude") Size: 24B array([0.10054889, 0.1963141 , 0.3000358 ]) Coordinates: * x (x) int64 24B 0 1 2 param `_ with more curve fitting functionality. """ # For DataArray, use the original implementation by converting to a dataset first return self._to_temp_dataset().curvefit( coords, func, reduce_dims=reduce_dims, skipna=skipna, p0=p0, bounds=bounds, param_names=param_names, errors=errors, kwargs=kwargs, ) def drop_duplicates( self, dim: Hashable | Iterable[Hashable], *, keep: Literal["first", "last", False] = "first", ) -> Self: """Returns a new DataArray with duplicate dimension values removed. Parameters ---------- dim : dimension label or labels Pass `...` to drop duplicates along all dimensions. keep : {"first", "last", False}, default: "first" Determines which duplicates (if any) to keep. - ``"first"`` : Drop duplicates except for the first occurrence. - ``"last"`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. Returns ------- DataArray See Also -------- Dataset.drop_duplicates Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... dims=("x", "y"), ... coords={"x": np.array([0, 0, 1, 2, 3]), "y": np.array([0, 1, 2, 3, 3])}, ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 40B 0 0 1 2 3 * y (y) int64 40B 0 1 2 3 3 >>> da.drop_duplicates(dim="x") Size: 160B array([[ 0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 32B 0 1 2 3 * y (y) int64 40B 0 1 2 3 3 >>> da.drop_duplicates(dim="x", keep="last") Size: 160B array([[ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 32B 0 1 2 3 * y (y) int64 40B 0 1 2 3 3 Drop all duplicate dimension values: >>> da.drop_duplicates(dim=...) Size: 128B array([[ 0, 1, 2, 3], [10, 11, 12, 13], [15, 16, 17, 18], [20, 21, 22, 23]]) Coordinates: * x (x) int64 32B 0 1 2 3 * y (y) int64 32B 0 1 2 3 """ deduplicated = self._to_temp_dataset().drop_duplicates(dim, keep=keep) return self._from_temp_dataset(deduplicated) def convert_calendar( self, calendar: str, dim: str = "time", align_on: str | None = None, missing: Any | None = None, use_cftime: bool | None = None, ) -> Self: """Convert the DataArray to another calendar. Only converts the individual timestamps, does not modify any data except in dropping invalid/surplus dates or inserting missing dates. If the source and target calendars are either no_leap, all_leap or a standard type, only the type of the time array is modified. When converting to a leap year from a non-leap year, the 29th of February is removed from the array. In the other direction the 29th of February will be missing in the output, unless `missing` is specified, in which case that value is inserted. For conversions involving `360_day` calendars, see Notes. This method is safe to use with sub-daily data as it doesn't touch the time part of the timestamps. Parameters --------- calendar : str The target calendar name. dim : str Name of the time coordinate. align_on : {None, 'date', 'year'} Must be specified when either source or target is a `360_day` calendar, ignored otherwise. See Notes. missing : Optional[any] By default, i.e. if the value is None, this method will simply attempt to convert the dates in the source calendar to the same dates in the target calendar, and drop any of those that are not possible to represent. If a value is provided, a new time coordinate will be created in the target calendar with the same frequency as the original time coordinate; for any dates that are not present in the source, the data will be filled with this value. Note that using this mode requires that the source data have an inferable frequency; for more information see :py:func:`xarray.infer_freq`. For certain frequency, source, and target calendar combinations, this could result in many missing values, see notes. use_cftime : boolean, optional Whether to use cftime objects in the output, only used if `calendar` is one of {"proleptic_gregorian", "gregorian" or "standard"}. If True, the new time axis uses cftime objects. If None (default), it uses :py:class:`numpy.datetime64` values if the date range permits it, and :py:class:`cftime.datetime` objects if not. If False, it uses :py:class:`numpy.datetime64` or fails. Returns ------- DataArray Copy of the dataarray with the time coordinate converted to the target calendar. If 'missing' was None (default), invalid dates in the new calendar are dropped, but missing dates are not inserted. If `missing` was given, the new data is reindexed to have a time axis with the same frequency as the source, but in the new calendar; any missing datapoints are filled with `missing`. Notes ----- Passing a value to `missing` is only usable if the source's time coordinate as an inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate if the target coordinate, generated from this frequency, has dates equivalent to the source. It is usually **not** appropriate to use this mode with: - Period-end frequencies : 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS' - Sub-monthly frequencies that do not divide a day evenly : 'W', 'nD' where `N != 1` or 'mH' where 24 % m != 0). If one of the source or target calendars is `"360_day"`, `align_on` must be specified and two options are offered. - "year" The dates are translated according to their relative position in the year, ignoring their original month and day information, meaning that the missing/surplus days are added/removed at regular intervals. From a `360_day` to a standard calendar, the output will be missing the following dates (day of year in parentheses): To a leap year: January 31st (31), March 31st (91), June 1st (153), July 31st (213), September 31st (275) and November 30th (335). To a non-leap year: February 6th (36), April 19th (109), July 2nd (183), September 12th (255), November 25th (329). From a standard calendar to a `"360_day"`, the following dates in the source array will be dropped: From a leap year: January 31st (31), April 1st (92), June 1st (153), August 1st (214), September 31st (275), December 1st (336) From a non-leap year: February 6th (37), April 20th (110), July 2nd (183), September 13th (256), November 25th (329) This option is best used on daily and subdaily data. - "date" The month/day information is conserved and invalid dates are dropped from the output. This means that when converting from a `"360_day"` to a standard calendar, all 31st (Jan, March, May, July, August, October and December) will be missing as there is no equivalent dates in the `"360_day"` calendar and the 29th (on non-leap years) and 30th of February will be dropped as there are no equivalent dates in a standard calendar. This option is best used with data on a frequency coarser than daily. """ return convert_calendar( self, calendar, dim=dim, align_on=align_on, missing=missing, use_cftime=use_cftime, ) def interp_calendar( self, target: pd.DatetimeIndex | CFTimeIndex | DataArray, dim: str = "time", ) -> Self: """Interpolates the DataArray to another calendar based on decimal year measure. Each timestamp in `source` and `target` are first converted to their decimal year equivalent then `source` is interpolated on the target coordinate. The decimal year of a timestamp is its year plus its sub-year component converted to the fraction of its year. For example "2000-03-01 12:00" is 2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar. This method should only be used when the time (HH:MM:SS) information of time coordinate is not important. Parameters ---------- target: DataArray or DatetimeIndex or CFTimeIndex The target time coordinate of a valid dtype (np.datetime64 or cftime objects) dim : str The time coordinate name. Return ------ DataArray The source interpolated on the decimal years of target, """ return interp_calendar(self, target, dim=dim) @_deprecate_positional_args("v2024.07.0") def groupby( self, group: GroupInput = None, *, squeeze: Literal[False] = False, restore_coord_dims: bool = False, eagerly_compute_group: Literal[False] | None = None, **groupers: Grouper, ) -> DataArrayGroupBy: """Returns a DataArrayGroupBy object for performing grouped operations. Parameters ---------- group : str or DataArray or IndexVariable or sequence of hashable or mapping of hashable to Grouper Array whose unique values should be used to group this array. If a Hashable, must be the name of a coordinate contained in this dataarray. If a dictionary, must map an existing variable name to a :py:class:`Grouper` instance. squeeze : False This argument is deprecated. restore_coord_dims : bool, default: False If True, also restore the dimension order of multi-dimensional coordinates. eagerly_compute_group: bool, optional This argument is deprecated. **groupers : Mapping of str to Grouper or Resampler Mapping of variable name to group by to :py:class:`Grouper` or :py:class:`Resampler` object. One of ``group`` or ``groupers`` must be provided. Only a single ``grouper`` is allowed at present. Returns ------- grouped : DataArrayGroupBy A `DataArrayGroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. Examples -------- Calculate daily anomalies for daily data: >>> da = xr.DataArray( ... np.linspace(0, 1826, num=1827), ... coords=[pd.date_range("2000-01-01", "2004-12-31", freq="D")], ... dims="time", ... ) >>> da Size: 15kB array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03], shape=(1827,)) Coordinates: * time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31 >>> da.groupby("time.dayofyear") - da.groupby("time.dayofyear").mean("time") Size: 15kB array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5], shape=(1827,)) Coordinates: * time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31 dayofyear (time) int64 15kB 1 2 3 4 5 6 7 8 ... 360 361 362 363 364 365 366 Use a ``Grouper`` object to be more explicit >>> da.coords["dayofyear"] = da.time.dt.dayofyear >>> da.groupby(dayofyear=xr.groupers.UniqueGrouper()).mean() Size: 3kB array([ 730.8, 731.8, 732.8, ..., 1093.8, 1094.8, 1095.5]) Coordinates: * dayofyear (dayofyear) int64 3kB 1 2 3 4 5 6 7 ... 361 362 363 364 365 366 >>> da = xr.DataArray( ... data=np.arange(12).reshape((4, 3)), ... dims=("x", "y"), ... coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))}, ... ) Grouping by a single variable is easy >>> da.groupby("letters") Execute a reduction >>> da.groupby("letters").sum() Size: 48B array([[ 9, 11, 13], [ 9, 11, 13]]) Coordinates: * letters (letters) object 16B 'a' 'b' Dimensions without coordinates: y Grouping by multiple variables >>> da.groupby(["letters", "x"]) Use Grouper objects to express more complicated GroupBy operations >>> from xarray.groupers import BinGrouper, UniqueGrouper >>> >>> da.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum() Size: 96B array([[[ 0., 1., 2.], [nan, nan, nan]], [[nan, nan, nan], [ 3., 4., 5.]]]) Coordinates: * x_bins (x_bins) interval[int64, right] 32B (5, 15] (15, 25] * letters (letters) object 16B 'a' 'b' Dimensions without coordinates: y See Also -------- :ref:`groupby` Users guide explanation of how to group and bin data. :doc:`xarray-tutorial:intermediate/computation/01-high-level-computation-patterns` Tutorial on :py:func:`~xarray.DataArray.Groupby` for windowed computation :doc:`xarray-tutorial:fundamentals/03.2_groupby_with_xarray` Tutorial on :py:func:`~xarray.DataArray.Groupby` demonstrating reductions, transformation and comparison with :py:func:`~xarray.DataArray.resample` :external:py:meth:`pandas.DataFrame.groupby ` :func:`DataArray.groupby_bins ` :func:`Dataset.groupby ` :func:`core.groupby.DataArrayGroupBy ` :func:`DataArray.coarsen ` :func:`Dataset.resample ` :func:`DataArray.resample ` """ from xarray.core.groupby import ( DataArrayGroupBy, _parse_group_and_groupers, _validate_groupby_squeeze, ) _validate_groupby_squeeze(squeeze) rgroupers = _parse_group_and_groupers( self, group, groupers, eagerly_compute_group=eagerly_compute_group ) return DataArrayGroupBy(self, rgroupers, restore_coord_dims=restore_coord_dims) @_deprecate_positional_args("v2024.07.0") def groupby_bins( self, group: Hashable | DataArray | IndexVariable, bins: Bins, right: bool = True, labels: ArrayLike | Literal[False] | None = None, precision: int = 3, include_lowest: bool = False, squeeze: Literal[False] = False, restore_coord_dims: bool = False, duplicates: Literal["raise", "drop"] = "raise", eagerly_compute_group: Literal[False] | None = None, ) -> DataArrayGroupBy: """Returns a DataArrayGroupBy object for performing grouped operations. Rather than using all unique values of `group`, the values are discretized first by applying `pandas.cut` [1]_ to `group`. Parameters ---------- group : Hashable, DataArray or IndexVariable Array whose binned values should be used to group this array. If a Hashable, must be the name of a coordinate contained in this dataarray. bins : int or array-like If bins is an int, it defines the number of equal-width bins in the range of x. However, in this case, the range of x is extended by .1% on each side to include the min or max values of x. If bins is a sequence it defines the bin edges allowing for non-uniform bin width. No extension of the range of x is done in this case. right : bool, default: True Indicates whether the bins include the rightmost edge or not. If right == True (the default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4]. labels : array-like, False or None, default: None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, string bin labels are assigned by `pandas.cut`. precision : int, default: 3 The precision at which to store and display the bins labels. include_lowest : bool, default: False Whether the first interval should be left-inclusive or not. squeeze : False This argument is deprecated. restore_coord_dims : bool, default: False If True, also restore the dimension order of multi-dimensional coordinates. duplicates : {"raise", "drop"}, default: "raise" If bin edges are not unique, raise ValueError or drop non-uniques. eagerly_compute_group: bool, optional This argument is deprecated. Returns ------- grouped : DataArrayGroupBy A `DataArrayGroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. The name of the group has the added suffix `_bins` in order to distinguish it from the original variable. See Also -------- :ref:`groupby` Users guide explanation of how to group and bin data. DataArray.groupby Dataset.groupby_bins core.groupby.DataArrayGroupBy pandas.DataFrame.groupby References ---------- .. [1] https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html """ from xarray.core.groupby import ( DataArrayGroupBy, ResolvedGrouper, _validate_groupby_squeeze, ) from xarray.groupers import BinGrouper _validate_groupby_squeeze(squeeze) grouper = BinGrouper( bins=bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, ) rgrouper = ResolvedGrouper( grouper, group, self, eagerly_compute_group=eagerly_compute_group ) return DataArrayGroupBy( self, (rgrouper,), restore_coord_dims=restore_coord_dims, ) def weighted(self, weights: DataArray) -> DataArrayWeighted: """ Weighted DataArray operations. Parameters ---------- weights : DataArray An array of weights associated with the values in this Dataset. Each value in the data contributes to the reduction operation according to its associated weight. Notes ----- ``weights`` must be a DataArray and cannot contain missing values. Missing values can be replaced by ``weights.fillna(0)``. Returns ------- computation.weighted.DataArrayWeighted See Also -------- :func:`Dataset.weighted ` :ref:`compute.weighted` User guide on weighted array reduction using :py:func:`~xarray.DataArray.weighted` :doc:`xarray-tutorial:fundamentals/03.4_weighted` Tutorial on Weighted Reduction using :py:func:`~xarray.DataArray.weighted` """ from xarray.computation.weighted import DataArrayWeighted return DataArrayWeighted(self, weights) def rolling( self, dim: Mapping[Any, int] | None = None, min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, **window_kwargs: int, ) -> DataArrayRolling: """ Rolling window object for DataArrays. Parameters ---------- dim : dict, optional Mapping from the dimension name to create the rolling iterator along (e.g. `time`) to its moving window size. min_periods : int or None, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool or Mapping to int, default: False Set the labels at the center of the window. The default, False, sets the labels at the right edge of the window. **window_kwargs : optional The keyword arguments form of ``dim``. One of dim or window_kwargs must be provided. Returns ------- computation.rolling.DataArrayRolling Examples -------- Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON: >>> da = xr.DataArray( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( ... "1999-12-15", ... periods=12, ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", ... ) >>> da Size: 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.rolling(time=3, center=True).mean() Size: 96B array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 Remove the NaNs using ``dropna()``: >>> da.rolling(time=3, center=True).mean().dropna("time") Size: 80B array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]) Coordinates: * time (time) datetime64[ns] 80B 2000-01-15 2000-02-15 ... 2000-10-15 See Also -------- DataArray.cumulative Dataset.rolling computation.rolling.DataArrayRolling """ from xarray.computation.rolling import DataArrayRolling dim = either_dict_or_kwargs(dim, window_kwargs, "rolling") return DataArrayRolling(self, dim, min_periods=min_periods, center=center) def cumulative( self, dim: str | Iterable[Hashable], min_periods: int = 1, ) -> DataArrayRolling: """ Accumulating object for DataArrays. Parameters ---------- dims : iterable of hashable The name(s) of the dimensions to create the cumulative window along min_periods : int, default: 1 Minimum number of observations in window required to have a value (otherwise result is NA). The default is 1 (note this is different from ``Rolling``, whose default is the size of the window). Returns ------- computation.rolling.DataArrayRolling Examples -------- Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON: >>> da = xr.DataArray( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( ... "1999-12-15", ... periods=12, ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", ... ) >>> da Size: 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.cumulative("time").sum() Size: 96B array([ 0., 1., 3., 6., 10., 15., 21., 28., 36., 45., 55., 66.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 See Also -------- DataArray.rolling Dataset.cumulative computation.rolling.DataArrayRolling """ from xarray.computation.rolling import DataArrayRolling # Could we abstract this "normalize and check 'dim'" logic? It's currently shared # with the same method in Dataset. if isinstance(dim, str): if dim not in self.dims: raise ValueError( f"Dimension {dim} not found in data dimensions: {self.dims}" ) dim = {dim: self.sizes[dim]} else: missing_dims = set(dim) - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {missing_dims} not found in data dimensions: {self.dims}" ) dim = {d: self.sizes[d] for d in dim} return DataArrayRolling(self, dim, min_periods=min_periods, center=False) def coarsen( self, dim: Mapping[Any, int] | None = None, boundary: CoarsenBoundaryOptions = "exact", side: SideOptions | Mapping[Any, SideOptions] = "left", coord_func: str | Callable | Mapping[Any, str | Callable] = "mean", **window_kwargs: int, ) -> DataArrayCoarsen: """ Coarsen object for DataArrays. Parameters ---------- dim : mapping of hashable to int, optional Mapping from the dimension name to the window size. boundary : {"exact", "trim", "pad"}, default: "exact" If 'exact', a ValueError will be raised if dimension size is not a multiple of the window size. If 'trim', the excess entries are dropped. If 'pad', NA will be padded. side : {"left", "right"} or mapping of str to {"left", "right"}, default: "left" coord_func : str or mapping of hashable to str, default: "mean" function (name) that is applied to the coordinates, or a mapping from coordinate name to function (name). Returns ------- computation.rolling.DataArrayCoarsen Examples -------- Coarsen the long time series by averaging over every three days. >>> da = xr.DataArray( ... np.linspace(0, 364, num=364), ... dims="time", ... coords={"time": pd.date_range("1999-12-15", periods=364)}, ... ) >>> da # +doctest: ELLIPSIS Size: 3kB array([ 0. , 1.00275482, 2.00550964, 3.00826446, 4.01101928, 5.0137741 , 6.01652893, 7.01928375, 8.02203857, 9.02479339, 10.02754821, 11.03030303, 12.03305785, 13.03581267, 14.03856749, 15.04132231, 16.04407713, 17.04683196, 18.04958678, 19.0523416 , 20.05509642, 21.05785124, 22.06060606, 23.06336088, 24.0661157 , 25.06887052, 26.07162534, 27.07438017, 28.07713499, 29.07988981, 30.08264463, 31.08539945, 32.08815427, 33.09090909, 34.09366391, 35.09641873, 36.09917355, 37.10192837, 38.1046832 , 39.10743802, 40.11019284, 41.11294766, 42.11570248, 43.1184573 , 44.12121212, 45.12396694, 46.12672176, 47.12947658, 48.1322314 , 49.13498623, 50.13774105, 51.14049587, 52.14325069, 53.14600551, 54.14876033, 55.15151515, 56.15426997, 57.15702479, 58.15977961, 59.16253444, 60.16528926, 61.16804408, 62.1707989 , 63.17355372, 64.17630854, 65.17906336, 66.18181818, 67.184573 , 68.18732782, 69.19008264, 70.19283747, 71.19559229, 72.19834711, 73.20110193, 74.20385675, 75.20661157, 76.20936639, 77.21212121, 78.21487603, 79.21763085, ... 284.78236915, 285.78512397, 286.78787879, 287.79063361, 288.79338843, 289.79614325, 290.79889807, 291.80165289, 292.80440771, 293.80716253, 294.80991736, 295.81267218, 296.815427 , 297.81818182, 298.82093664, 299.82369146, 300.82644628, 301.8292011 , 302.83195592, 303.83471074, 304.83746556, 305.84022039, 306.84297521, 307.84573003, 308.84848485, 309.85123967, 310.85399449, 311.85674931, 312.85950413, 313.86225895, 314.86501377, 315.8677686 , 316.87052342, 317.87327824, 318.87603306, 319.87878788, 320.8815427 , 321.88429752, 322.88705234, 323.88980716, 324.89256198, 325.8953168 , 326.89807163, 327.90082645, 328.90358127, 329.90633609, 330.90909091, 331.91184573, 332.91460055, 333.91735537, 334.92011019, 335.92286501, 336.92561983, 337.92837466, 338.93112948, 339.9338843 , 340.93663912, 341.93939394, 342.94214876, 343.94490358, 344.9476584 , 345.95041322, 346.95316804, 347.95592287, 348.95867769, 349.96143251, 350.96418733, 351.96694215, 352.96969697, 353.97245179, 354.97520661, 355.97796143, 356.98071625, 357.98347107, 358.9862259 , 359.98898072, 360.99173554, 361.99449036, 362.99724518, 364. ]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-12-12 >>> da.coarsen(time=3, boundary="trim").mean() # +doctest: ELLIPSIS Size: 968B array([ 1.00275482, 4.01101928, 7.01928375, 10.02754821, 13.03581267, 16.04407713, 19.0523416 , 22.06060606, 25.06887052, 28.07713499, 31.08539945, 34.09366391, 37.10192837, 40.11019284, 43.1184573 , 46.12672176, 49.13498623, 52.14325069, 55.15151515, 58.15977961, 61.16804408, 64.17630854, 67.184573 , 70.19283747, 73.20110193, 76.20936639, 79.21763085, 82.22589532, 85.23415978, 88.24242424, 91.25068871, 94.25895317, 97.26721763, 100.27548209, 103.28374656, 106.29201102, 109.30027548, 112.30853994, 115.31680441, 118.32506887, 121.33333333, 124.3415978 , 127.34986226, 130.35812672, 133.36639118, 136.37465565, 139.38292011, 142.39118457, 145.39944904, 148.4077135 , 151.41597796, 154.42424242, 157.43250689, 160.44077135, 163.44903581, 166.45730028, 169.46556474, 172.4738292 , 175.48209366, 178.49035813, 181.49862259, 184.50688705, 187.51515152, 190.52341598, 193.53168044, 196.5399449 , 199.54820937, 202.55647383, 205.56473829, 208.57300275, 211.58126722, 214.58953168, 217.59779614, 220.60606061, 223.61432507, 226.62258953, 229.63085399, 232.63911846, 235.64738292, 238.65564738, 241.66391185, 244.67217631, 247.68044077, 250.68870523, 253.6969697 , 256.70523416, 259.71349862, 262.72176309, 265.73002755, 268.73829201, 271.74655647, 274.75482094, 277.7630854 , 280.77134986, 283.77961433, 286.78787879, 289.79614325, 292.80440771, 295.81267218, 298.82093664, 301.8292011 , 304.83746556, 307.84573003, 310.85399449, 313.86225895, 316.87052342, 319.87878788, 322.88705234, 325.8953168 , 328.90358127, 331.91184573, 334.92011019, 337.92837466, 340.93663912, 343.94490358, 346.95316804, 349.96143251, 352.96969697, 355.97796143, 358.9862259 , 361.99449036]) Coordinates: * time (time) datetime64[ns] 968B 1999-12-16 1999-12-19 ... 2000-12-10 >>> See Also -------- :class:`computation.rolling.DataArrayCoarsen ` :func:`Dataset.coarsen ` :ref:`reshape.coarsen` User guide describing :py:func:`~xarray.DataArray.coarsen` :ref:`compute.coarsen` User guide on block aggregation :py:func:`~xarray.DataArray.coarsen` :doc:`xarray-tutorial:fundamentals/03.3_windowed` Tutorial on windowed computation using :py:func:`~xarray.DataArray.coarsen` """ from xarray.computation.rolling import DataArrayCoarsen dim = either_dict_or_kwargs(dim, window_kwargs, "coarsen") return DataArrayCoarsen( self, dim, boundary=boundary, side=side, coord_func=coord_func, ) @_deprecate_positional_args("v2024.07.0") def resample( self, indexer: Mapping[Hashable, ResampleCompatible | Resampler] | None = None, *, skipna: bool | None = None, closed: SideOptions | None = None, label: SideOptions | None = None, offset: pd.Timedelta | datetime.timedelta | str | None = None, origin: str | DatetimeLike = "start_day", restore_coord_dims: bool | None = None, **indexer_kwargs: ResampleCompatible | Resampler, ) -> DataArrayResample: """Returns a Resample object for performing resampling operations. Handles both downsampling and upsampling. The resampled dimension must be a datetime-like coordinate. If any intervals contain no values from the original object, they will be given the value ``NaN``. Parameters ---------- indexer : Mapping of Hashable to str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler, optional Mapping from the dimension name to resample frequency [1]_. The dimension must be datetime-like. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. closed : {"left", "right"}, optional Side of each interval to treat as closed. label : {"left", "right"}, optional Side of each interval to use for labeling. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pd.Timestamp, datetime.datetime, np.datetime64, or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : pd.Timedelta, datetime.timedelta, or str, default is None An offset timedelta added to the origin. restore_coord_dims : bool, optional If True, also restore the dimension order of multi-dimensional coordinates. **indexer_kwargs : str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler The keyword arguments form of ``indexer``. One of indexer or indexer_kwargs must be provided. Returns ------- resampled : core.resample.DataArrayResample This object resampled. Examples -------- Downsample monthly time-series data to seasonal data: >>> da = xr.DataArray( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( ... "1999-12-15", ... periods=12, ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", ... ) >>> da Size: 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.resample(time="QS-DEC").mean() Size: 32B array([ 1., 4., 7., 10.]) Coordinates: * time (time) datetime64[ns] 32B 1999-12-01 2000-03-01 ... 2000-09-01 Upsample monthly time-series data to daily data: >>> da.resample(time="1D").interpolate("linear") # +doctest: ELLIPSIS Size: 3kB array([ 0. , 0.03225806, 0.06451613, 0.09677419, 0.12903226, 0.16129032, 0.19354839, 0.22580645, 0.25806452, 0.29032258, 0.32258065, 0.35483871, 0.38709677, 0.41935484, 0.4516129 , 0.48387097, 0.51612903, 0.5483871 , 0.58064516, 0.61290323, 0.64516129, 0.67741935, 0.70967742, 0.74193548, 0.77419355, 0.80645161, 0.83870968, 0.87096774, 0.90322581, 0.93548387, 0.96774194, 1. , ..., 9. , 9.03333333, 9.06666667, 9.1 , 9.13333333, 9.16666667, 9.2 , 9.23333333, 9.26666667, 9.3 , 9.33333333, 9.36666667, 9.4 , 9.43333333, 9.46666667, 9.5 , 9.53333333, 9.56666667, 9.6 , 9.63333333, 9.66666667, 9.7 , 9.73333333, 9.76666667, 9.8 , 9.83333333, 9.86666667, 9.9 , 9.93333333, 9.96666667, 10. , 10.03225806, 10.06451613, 10.09677419, 10.12903226, 10.16129032, 10.19354839, 10.22580645, 10.25806452, 10.29032258, 10.32258065, 10.35483871, 10.38709677, 10.41935484, 10.4516129 , 10.48387097, 10.51612903, 10.5483871 , 10.58064516, 10.61290323, 10.64516129, 10.67741935, 10.70967742, 10.74193548, 10.77419355, 10.80645161, 10.83870968, 10.87096774, 10.90322581, 10.93548387, 10.96774194, 11. ]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 Limit scope of upsampling method >>> da.resample(time="1D").nearest(tolerance="1D") Size: 3kB array([ 0., 0., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 1., 1., 1., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 2., 2., 2., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 3., 3., 3., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 4., 4., 4., nan, nan, nan, nan, nan, ..., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 10., 10., 10., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 11., 11.]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 See Also -------- Dataset.resample pandas.Series.resample pandas.DataFrame.resample References ---------- .. [1] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases """ from xarray.core.resample import DataArrayResample return self._resample( resample_cls=DataArrayResample, indexer=indexer, skipna=skipna, closed=closed, label=label, offset=offset, origin=origin, restore_coord_dims=restore_coord_dims, **indexer_kwargs, ) def to_dask_dataframe( self, dim_order: Sequence[Hashable] | None = None, set_index: bool = False, ) -> DaskDataFrame: """Convert this array into a dask.dataframe.DataFrame. Parameters ---------- dim_order : Sequence of Hashable or None , optional Hierarchical dimension order for the resulting dataframe. Array content is transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dask dataframe. set_index : bool, default: False If set_index=True, the dask DataFrame is indexed by this dataset's coordinate. Since dask DataFrames do not support multi-indexes, set_index only works if the dataset only contains one dimension. Returns ------- dask.dataframe.DataFrame Examples -------- >>> da = xr.DataArray( ... np.arange(4 * 2 * 2).reshape(4, 2, 2), ... dims=("time", "lat", "lon"), ... coords={ ... "time": np.arange(4), ... "lat": [-30, -20], ... "lon": [120, 130], ... }, ... name="eg_dataarray", ... attrs={"units": "Celsius", "description": "Random temperature data"}, ... ) >>> da.to_dask_dataframe(["lat", "lon", "time"]).compute() lat lon time eg_dataarray 0 -30 120 0 0 1 -30 120 1 4 2 -30 120 2 8 3 -30 120 3 12 4 -30 130 0 1 5 -30 130 1 5 6 -30 130 2 9 7 -30 130 3 13 8 -20 120 0 2 9 -20 120 1 6 10 -20 120 2 10 11 -20 120 3 14 12 -20 130 0 3 13 -20 130 1 7 14 -20 130 2 11 15 -20 130 3 15 """ if self.name is None: raise ValueError( "Cannot convert an unnamed DataArray to a " "dask dataframe : use the ``.rename`` method to assign a name." ) name = self.name ds = self._to_dataset_whole(name, shallow_copy=False) return ds.to_dask_dataframe(dim_order, set_index) # this needs to be at the end, or mypy will confuse with `str` # https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names str = utils.UncachedAccessor(StringAccessor["DataArray"]) def drop_attrs(self, *, deep: bool = True) -> Self: """ Removes all attributes from the DataArray. Parameters ---------- deep : bool, default True Removes attributes from coordinates. Returns ------- DataArray """ if not deep: return self._replace(attrs={}) else: return ( self._to_temp_dataset() .drop_attrs(deep=deep) .pipe(self._from_temp_dataset) ) xarray-2025.12.0/xarray/core/dataset.py000066400000000000000000014332731511464676000176150ustar00rootroot00000000000000from __future__ import annotations import asyncio import copy import datetime import io import math import sys import warnings from collections import defaultdict from collections.abc import ( Callable, Collection, Hashable, Iterable, Iterator, Mapping, MutableMapping, Sequence, ) from functools import partial from html import escape from numbers import Number from operator import methodcaller from os import PathLike from types import EllipsisType from typing import IO, TYPE_CHECKING, Any, Literal, cast, overload import numpy as np import pandas as pd from xarray.coding.calendar_ops import convert_calendar, interp_calendar from xarray.coding.cftimeindex import CFTimeIndex, _parse_array_of_cftime_strings from xarray.compat.array_api_compat import to_like_array from xarray.computation import ops from xarray.computation.arithmetic import DatasetArithmetic from xarray.core import dtypes as xrdtypes from xarray.core import duck_array_ops, formatting, formatting_html, utils from xarray.core._aggregations import DatasetAggregations from xarray.core.common import ( DataWithCoords, _contains_datetime_like_objects, _is_numeric_aggregatable_dtype, get_chunksizes, ) from xarray.core.coordinates import ( Coordinates, DatasetCoordinates, assert_coordinate_consistent, ) from xarray.core.dataset_utils import _get_virtual_variable, _LocIndexer from xarray.core.dataset_variables import DataVariables from xarray.core.duck_array_ops import datetime_to_numeric from xarray.core.indexes import ( Index, Indexes, PandasIndex, PandasMultiIndex, assert_no_index_corrupted, create_default_index_implicit, filter_indexes_from_coords, isel_indexes, remove_unused_levels_categories, roll_indexes, ) from xarray.core.indexing import is_fancy_indexer, map_index_queries from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import ( Bins, NetcdfWriteModes, QuantileMethods, Self, T_ChunkDim, T_ChunksFreq, T_DataArray, T_DataArrayOrSet, ZarrWriteModes, ) from xarray.core.utils import ( Default, FilteredMapping, Frozen, FrozenMappingWarningOnValuesAccess, OrderedSet, _default, decode_numpy_dict_values, drop_dims_from_indexers, either_dict_or_kwargs, emit_user_level_warning, infix_dims, is_allowed_extension_array, is_dict_like, is_duck_array, is_duck_dask_array, is_scalar, maybe_wrap_array, parse_dims_as_set, ) from xarray.core.variable import ( UNSUPPORTED_EXTENSION_ARRAY_TYPES, IndexVariable, Variable, as_variable, broadcast_variables, calculate_dimensions, ) from xarray.namedarray.parallelcompat import get_chunked_array_type, guess_chunkmanager from xarray.namedarray.pycompat import array_type, is_chunked_array, to_numpy from xarray.plot.accessor import DatasetPlotAccessor from xarray.structure import alignment from xarray.structure.alignment import ( _broadcast_helper, _get_broadcast_dims_map_common_coords, align, ) from xarray.structure.chunks import _maybe_chunk, unify_chunks from xarray.structure.merge import ( dataset_merge_method, dataset_update_method, merge_coordinates_without_align, merge_data_and_coords, ) from xarray.util.deprecation_helpers import ( _COMPAT_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, _deprecate_positional_args, deprecate_dims, ) if TYPE_CHECKING: from dask.dataframe import DataFrame as DaskDataFrame from dask.delayed import Delayed from numpy.typing import ArrayLike from xarray.backends import AbstractDataStore, ZarrStore from xarray.backends.api import T_NetcdfEngine, T_NetcdfTypes from xarray.computation.rolling import DatasetCoarsen, DatasetRolling from xarray.computation.weighted import DatasetWeighted from xarray.core.dataarray import DataArray from xarray.core.groupby import DatasetGroupBy from xarray.core.resample import DatasetResample from xarray.core.types import ( CFCalendar, CoarsenBoundaryOptions, CombineAttrsOptions, CompatOptions, DataVars, DatetimeLike, DatetimeUnitOptions, Dims, DsCompatible, ErrorOptions, ErrorOptionsWithWarn, GroupIndices, GroupInput, InterpOptions, JoinOptions, PadModeOptions, PadReflectOptions, QueryEngineOptions, QueryParserOptions, ReindexMethodOptions, ResampleCompatible, SideOptions, T_ChunkDimFreq, T_Chunks, T_DatasetPadConstantValues, T_Xarray, ZarrStoreLike, ) from xarray.groupers import Grouper, Resampler from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint from xarray.structure.merge import CoercibleMapping, CoercibleValue # list of attributes of pd.DatetimeIndex that are ndarrays of time info _DATETIMEINDEX_COMPONENTS = [ "year", "month", "day", "hour", "minute", "second", "microsecond", "nanosecond", "date", "time", "dayofyear", "weekofyear", "dayofweek", "quarter", ] class Dataset( DataWithCoords, DatasetAggregations, DatasetArithmetic, Mapping[Hashable, "DataArray"], ): """A multi-dimensional, in memory, array database. A dataset resembles an in-memory representation of a NetCDF file, and consists of variables, coordinates and attributes which together form a self describing dataset. Dataset implements the mapping interface with keys given by variable names and values given by DataArray objects for each variable name. By default, pandas indexes are created for one dimensional variables with name equal to their dimension (i.e., :term:`Dimension coordinate`) so those variables can be readily used as coordinates for label based indexing. When a :py:class:`~xarray.Coordinates` object is passed to ``coords``, any existing index(es) built from those coordinates will be added to the Dataset. To load data from a file or file-like object, use the `open_dataset` function. Parameters ---------- data_vars : dict-like, optional A mapping from variable names to :py:class:`~xarray.DataArray` objects, :py:class:`~xarray.Variable` objects or to tuples of the form ``(dims, data[, attrs])`` which can be used as arguments to create a new ``Variable``. Each dimension must have the same length in all variables in which it appears. The following notations are accepted: - mapping {var name: DataArray} - mapping {var name: Variable} - mapping {var name: (dimension name, array-like)} - mapping {var name: (tuple of dimension names, array-like)} - mapping {dimension name: array-like} (if array-like is not a scalar it will be automatically moved to coords, see below) Each dimension must have the same length in all variables in which it appears. coords : :py:class:`~xarray.Coordinates` or dict-like, optional A :py:class:`~xarray.Coordinates` object or another mapping in similar form as the `data_vars` argument, except that each item is saved on the dataset as a "coordinate". These variables have an associated meaning: they describe constant/fixed/independent quantities, unlike the varying/measured/dependent quantities that belong in `variables`. The following notations are accepted for arbitrary mappings: - mapping {coord name: DataArray} - mapping {coord name: Variable} - mapping {coord name: (dimension name, array-like)} - mapping {coord name: (tuple of dimension names, array-like)} - mapping {dimension name: array-like} (the dimension name is implicitly set to be the same as the coord name) The last notation implies either that the coordinate value is a scalar or that it is a 1-dimensional array and the coord name is the same as the dimension name (i.e., a :term:`Dimension coordinate`). In the latter case, the 1-dimensional array will be assumed to give index values along the dimension with the same name. Alternatively, a :py:class:`~xarray.Coordinates` object may be used in order to explicitly pass indexes (e.g., a multi-index or any custom Xarray index) or to bypass the creation of a default index for any :term:`Dimension coordinate` included in that object. attrs : dict-like, optional Global attributes to save on this dataset. (see FAQ, :ref:`approach to metadata`) Examples -------- In this example dataset, we will represent measurements of the temperature and pressure that were made under various conditions: * the measurements were made on four different days; * they were made at two separate locations, which we will represent using their latitude and longitude; and * they were made using three instrument developed by three different manufacturers, which we will refer to using the strings `'manufac1'`, `'manufac2'`, and `'manufac3'`. >>> np.random.seed(0) >>> temperature = 15 + 8 * np.random.randn(2, 3, 4) >>> precipitation = 10 * np.random.rand(2, 3, 4) >>> lon = [-99.83, -99.32] >>> lat = [42.25, 42.21] >>> instruments = ["manufac1", "manufac2", "manufac3"] >>> time = pd.date_range("2014-09-06", periods=4) >>> reference_time = pd.Timestamp("2014-09-05") Here, we initialize the dataset with multiple dimensions. We use the string `"loc"` to represent the location dimension of the data, the string `"instrument"` to represent the instrument manufacturer dimension, and the string `"time"` for the time dimension. >>> ds = xr.Dataset( ... data_vars=dict( ... temperature=(["loc", "instrument", "time"], temperature), ... precipitation=(["loc", "instrument", "time"], precipitation), ... ), ... coords=dict( ... lon=("loc", lon), ... lat=("loc", lat), ... instrument=instruments, ... time=time, ... reference_time=reference_time, ... ), ... attrs=dict(description="Weather related data."), ... ) >>> ds Size: 552B Dimensions: (loc: 2, instrument: 3, time: 4) Coordinates: * instrument (instrument) >> ds.isel(ds.temperature.argmin(...)) Size: 80B Dimensions: () Coordinates: lon float64 8B -99.32 lat float64 8B 42.21 instrument None: if data_vars is None: data_vars = {} if coords is None: coords = {} both_data_and_coords = set(data_vars) & set(coords) if both_data_and_coords: raise ValueError( f"variables {both_data_and_coords!r} are found in both data_vars and coords" ) if isinstance(coords, Dataset): coords = coords._variables variables, coord_names, dims, indexes, _ = merge_data_and_coords( data_vars, coords ) self._attrs = dict(attrs) if attrs else None self._close = None self._encoding = None self._variables = variables self._coord_names = coord_names self._dims = dims self._indexes = indexes # TODO: dirty workaround for mypy 1.5 error with inherited DatasetOpsMixin vs. Mapping # related to https://github.com/python/mypy/issues/9319? def __eq__(self, other: DsCompatible) -> Self: # type: ignore[override] return super().__eq__(other) @classmethod def load_store(cls, store, decoder=None) -> Self: """Create a new dataset from the contents of a backends.*DataStore object """ variables, attributes = store.load() if decoder: variables, attributes = decoder(variables, attributes) obj = cls(variables, attrs=attributes) obj.set_close(store.close) return obj @property def variables(self) -> Frozen[Hashable, Variable]: """Low level interface to Dataset contents as dict of Variable objects. This ordered dictionary is frozen to prevent mutation that could violate Dataset invariants. It contains all variable objects constituting the Dataset, including both data variables and coordinates. """ return Frozen(self._variables) @property def attrs(self) -> dict[Any, Any]: """Dictionary of global attributes on this dataset""" if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Any, Any]) -> None: self._attrs = dict(value) if value else None @property def encoding(self) -> dict[Any, Any]: """Dictionary of global encoding attributes on this dataset""" if self._encoding is None: self._encoding = {} return self._encoding @encoding.setter def encoding(self, value: Mapping[Any, Any]) -> None: self._encoding = dict(value) def reset_encoding(self) -> Self: warnings.warn( "reset_encoding is deprecated since 2023.11, use `drop_encoding` instead", stacklevel=2, ) return self.drop_encoding() def drop_encoding(self) -> Self: """Return a new Dataset without encoding on the dataset or any of its variables/coords.""" variables = {k: v.drop_encoding() for k, v in self.variables.items()} return self._replace(variables=variables, encoding={}) @property def dims(self) -> Frozen[Hashable, int]: """Mapping from dimension names to lengths. Cannot be modified directly, but is updated when adding new variables. Note that type of this object differs from `DataArray.dims`. See `Dataset.sizes` and `DataArray.sizes` for consistently named properties. This property will be changed to return a type more consistent with `DataArray.dims` in the future, i.e. a set of dimension names. See Also -------- Dataset.sizes DataArray.dims """ return FrozenMappingWarningOnValuesAccess(self._dims) @property def sizes(self) -> Frozen[Hashable, int]: """Mapping from dimension names to lengths. Cannot be modified directly, but is updated when adding new variables. This is an alias for `Dataset.dims` provided for the benefit of consistency with `DataArray.sizes`. See Also -------- DataArray.sizes """ return Frozen(self._dims) @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from data variable names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- DataArray.dtype """ return Frozen( { n: v.dtype for n, v in self._variables.items() if n not in self._coord_names } ) def load(self, **kwargs) -> Self: """Trigger loading data into memory and return this dataset. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original dataset is modified and returned. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : Dataset Same object but with lazy data variables and coordinates as in-memory arrays. See Also -------- dask.compute Dataset.compute Dataset.load_async DataArray.load Variable.load """ # access .data to coerce everything to numpy or dask arrays chunked_data = { k: v._data for k, v in self.variables.items() if is_chunked_array(v._data) } if chunked_data: chunkmanager = get_chunked_array_type(*chunked_data.values()) # evaluate all the chunked arrays simultaneously evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute( *chunked_data.values(), **kwargs ) for k, data in zip(chunked_data, evaluated_data, strict=False): self.variables[k].data = data # load everything else sequentially [v.load() for k, v in self.variables.items() if k not in chunked_data] return self async def load_async(self, **kwargs) -> Self: """Trigger and await asynchronous loading of data into memory and return this dataset. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original dataset is modified and returned. Only works when opening data lazily from IO storage backends which support lazy asynchronous loading. Otherwise will raise a NotImplementedError. Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : Dataset Same object but with lazy data variables and coordinates as in-memory arrays. See Also -------- dask.compute Dataset.compute Dataset.load DataArray.load_async Variable.load_async """ # TODO refactor this to pull out the common chunked_data codepath # this blocks on chunked arrays but not on lazily indexed arrays # access .data to coerce everything to numpy or dask arrays chunked_data = { k: v._data for k, v in self.variables.items() if is_chunked_array(v._data) } if chunked_data: chunkmanager = get_chunked_array_type(*chunked_data.values()) # evaluate all the chunked arrays simultaneously evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute( *chunked_data.values(), **kwargs ) for k, data in zip(chunked_data, evaluated_data, strict=False): self.variables[k].data = data # load everything else concurrently coros = [ v.load_async() for k, v in self.variables.items() if k not in chunked_data ] await asyncio.gather(*coros) return self def __dask_tokenize__(self) -> object: from dask.base import normalize_token return normalize_token( (type(self), self._variables, self._coord_names, self._attrs or None) ) def __dask_graph__(self): graphs = {k: v.__dask_graph__() for k, v in self.variables.items()} graphs = {k: v for k, v in graphs.items() if v is not None} if not graphs: return None else: try: from dask.highlevelgraph import HighLevelGraph return HighLevelGraph.merge(*graphs.values()) except ImportError: from dask import sharedict return sharedict.merge(*graphs.values()) def __dask_keys__(self): import dask return [ v.__dask_keys__() for v in self.variables.values() if dask.is_dask_collection(v) ] def __dask_layers__(self): import dask return sum( ( v.__dask_layers__() for v in self.variables.values() if dask.is_dask_collection(v) ), (), ) @property def __dask_optimize__(self): import dask.array as da return da.Array.__dask_optimize__ @property def __dask_scheduler__(self): import dask.array as da return da.Array.__dask_scheduler__ def __dask_postcompute__(self): return self._dask_postcompute, () def __dask_postpersist__(self): return self._dask_postpersist, () def _dask_postcompute(self, results: Iterable[Variable]) -> Self: import dask variables = {} results_iter = iter(results) for k, v in self._variables.items(): if dask.is_dask_collection(v): rebuild, args = v.__dask_postcompute__() v = rebuild(next(results_iter), *args) variables[k] = v return type(self)._construct_direct( variables, self._coord_names, self._dims, self._attrs, self._indexes, self._encoding, self._close, ) def _dask_postpersist( self, dsk: Mapping, *, rename: Mapping[str, str] | None = None ) -> Self: from dask import is_dask_collection from dask.highlevelgraph import HighLevelGraph from dask.optimization import cull variables = {} for k, v in self._variables.items(): if not is_dask_collection(v): variables[k] = v continue if isinstance(dsk, HighLevelGraph): # dask >= 2021.3 # __dask_postpersist__() was called by dask.highlevelgraph. # Don't use dsk.cull(), as we need to prevent partial layers: # https://github.com/dask/dask/issues/7137 layers = v.__dask_layers__() if rename: layers = [rename.get(k, k) for k in layers] dsk2 = dsk.cull_layers(layers) elif rename: # pragma: nocover # At the moment of writing, this is only for forward compatibility. # replace_name_in_key requires dask >= 2021.3. from dask.base import flatten, replace_name_in_key keys = [ replace_name_in_key(k, rename) for k in flatten(v.__dask_keys__()) ] dsk2, _ = cull(dsk, keys) else: # __dask_postpersist__() was called by dask.optimize or dask.persist dsk2, _ = cull(dsk, v.__dask_keys__()) rebuild, args = v.__dask_postpersist__() # rename was added in dask 2021.3 kwargs = {"rename": rename} if rename else {} variables[k] = rebuild(dsk2, *args, **kwargs) return type(self)._construct_direct( variables, self._coord_names, self._dims, self._attrs, self._indexes, self._encoding, self._close, ) def compute(self, **kwargs) -> Self: """Trigger loading data into memory and return a new dataset. Data will be computed and/or loaded from disk or a remote source. Unlike ``.load``, the original dataset is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : Dataset New object with lazy data variables and coordinates as in-memory arrays. See Also -------- dask.compute Dataset.load Dataset.load_async DataArray.compute Variable.compute """ new = self.copy(deep=False) return new.load(**kwargs) def _persist_inplace(self, **kwargs) -> Self: """Persist all chunked arrays in memory.""" # access .data to coerce everything to numpy or dask arrays lazy_data = { k: v._data for k, v in self.variables.items() if is_chunked_array(v._data) } if lazy_data: chunkmanager = get_chunked_array_type(*lazy_data.values()) # evaluate all the dask arrays simultaneously evaluated_data = chunkmanager.persist(*lazy_data.values(), **kwargs) for k, data in zip(lazy_data, evaluated_data, strict=False): self.variables[k].data = data return self def persist(self, **kwargs) -> Self: """Trigger computation, keeping data as chunked arrays. This operation can be used to trigger computation on underlying dask arrays, similar to ``.compute()`` or ``.load()``. However this operation keeps the data as dask arrays. This is particularly useful when using the dask.distributed scheduler and you want to load a large amount of data into distributed memory. Like compute (but unlike load), the original dataset is left unaltered. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.persist``. Returns ------- object : Dataset New object with all dask-backed coordinates and data variables as persisted dask arrays. See Also -------- dask.persist """ new = self.copy(deep=False) return new._persist_inplace(**kwargs) @classmethod def _construct_direct( cls, variables: dict[Any, Variable], coord_names: set[Hashable], dims: dict[Any, int] | None = None, attrs: dict | None = None, indexes: dict[Any, Index] | None = None, encoding: dict | None = None, close: Callable[[], None] | None = None, ) -> Self: """Shortcut around __init__ for internal use when we want to skip costly validation """ if dims is None: dims = calculate_dimensions(variables) if indexes is None: indexes = {} obj = object.__new__(cls) obj._variables = variables obj._coord_names = coord_names obj._dims = dims obj._indexes = indexes obj._attrs = attrs obj._close = close obj._encoding = encoding return obj def _replace( self, variables: dict[Hashable, Variable] | None = None, coord_names: set[Hashable] | None = None, dims: dict[Any, int] | None = None, attrs: dict[Hashable, Any] | Default | None = _default, indexes: dict[Hashable, Index] | None = None, encoding: dict | Default | None = _default, inplace: bool = False, ) -> Self: """Fastpath constructor for internal use. Returns an object with optionally with replaced attributes. Explicitly passed arguments are *not* copied when placed on the new dataset. It is up to the caller to ensure that they have the right type and are not used elsewhere. """ if inplace: if variables is not None: self._variables = variables if coord_names is not None: self._coord_names = coord_names if dims is not None: self._dims = dims if attrs is not _default: self._attrs = attrs if indexes is not None: self._indexes = indexes if encoding is not _default: self._encoding = encoding obj = self else: if variables is None: variables = self._variables.copy() if coord_names is None: coord_names = self._coord_names.copy() if dims is None: dims = self._dims.copy() if attrs is _default: attrs = copy.copy(self._attrs) if indexes is None: indexes = self._indexes.copy() if encoding is _default: encoding = copy.copy(self._encoding) obj = self._construct_direct( variables, coord_names, dims, attrs, indexes, encoding ) return obj def _replace_with_new_dims( self, variables: dict[Hashable, Variable], coord_names: set | None = None, attrs: dict[Hashable, Any] | Default | None = _default, indexes: dict[Hashable, Index] | None = None, inplace: bool = False, ) -> Self: """Replace variables with recalculated dimensions.""" dims = calculate_dimensions(variables) return self._replace( variables, coord_names, dims, attrs, indexes, inplace=inplace ) def _replace_vars_and_dims( self, variables: dict[Hashable, Variable], coord_names: set | None = None, dims: dict[Hashable, int] | None = None, attrs: dict[Hashable, Any] | Default | None = _default, inplace: bool = False, ) -> Self: """Deprecated version of _replace_with_new_dims(). Unlike _replace_with_new_dims(), this method always recalculates indexes from variables. """ if dims is None: dims = calculate_dimensions(variables) return self._replace( variables, coord_names, dims, attrs, indexes=None, inplace=inplace ) def _overwrite_indexes( self, indexes: Mapping[Hashable, Index], variables: Mapping[Hashable, Variable] | None = None, drop_variables: list[Hashable] | None = None, drop_indexes: list[Hashable] | None = None, rename_dims: Mapping[Hashable, Hashable] | None = None, ) -> Self: """Maybe replace indexes. This function may do a lot more depending on index query results. """ if not indexes: return self if variables is None: variables = {} if drop_variables is None: drop_variables = [] if drop_indexes is None: drop_indexes = [] new_variables = self._variables.copy() new_coord_names = self._coord_names.copy() new_indexes = dict(self._indexes) index_variables = {} no_index_variables = {} for name, var in variables.items(): old_var = self._variables.get(name) if old_var is not None: var.attrs.update(old_var.attrs) var.encoding.update(old_var.encoding) if name in indexes: index_variables[name] = var else: no_index_variables[name] = var for name in indexes: new_indexes[name] = indexes[name] for name, var in index_variables.items(): new_coord_names.add(name) new_variables[name] = var # append no-index variables at the end for k in no_index_variables: new_variables.pop(k) new_variables.update(no_index_variables) for name in drop_indexes: new_indexes.pop(name) for name in drop_variables: new_variables.pop(name) new_indexes.pop(name, None) new_coord_names.remove(name) replaced = self._replace( variables=new_variables, coord_names=new_coord_names, indexes=new_indexes ) if rename_dims: # skip rename indexes: they should already have the right name(s) dims = replaced._rename_dims(rename_dims) new_variables, new_coord_names = replaced._rename_vars({}, rename_dims) return replaced._replace( variables=new_variables, coord_names=new_coord_names, dims=dims ) else: return replaced def copy(self, deep: bool = False, data: DataVars | None = None) -> Self: """Returns a copy of this dataset. If `deep=True`, a deep copy is made of each of the component variables. Otherwise, a shallow copy of each of the component variable is made, so that the underlying memory region of the new dataset is the same as in the original dataset. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, default: False Whether each component variable is loaded into memory and copied onto the new object. Default is False. data : dict-like or None, optional Data to use in the new object. Each item in `data` must have same shape as corresponding data variable in original. When `data` is used, `deep` is ignored for the data variables and only used for coords. Returns ------- object : Dataset New object with dimensions, attributes, coordinates, name, encoding, and optionally data copied from original. Examples -------- Shallow copy versus deep copy >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset( ... {"foo": da, "bar": ("x", [-1, 2])}, ... coords={"x": ["one", "two"]}, ... ) >>> ds.copy() Size: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) >> ds_0 = ds.copy(deep=False) >>> ds_0["foo"][0, 0] = 7 >>> ds_0 Size: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) >> ds Size: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) >> ds.copy(data={"foo": np.arange(6).reshape(2, 3), "bar": ["a", "b"]}) Size: 80B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) >> ds Size: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) Self: if data is None: data = {} elif not utils.is_dict_like(data): raise ValueError("Data must be dict-like") if data: var_keys = set(self.data_vars.keys()) data_keys = set(data.keys()) keys_not_in_vars = data_keys - var_keys if keys_not_in_vars: raise ValueError( "Data must only contain variables in original " f"dataset. Extra variables: {keys_not_in_vars}" ) keys_missing_from_data = var_keys - data_keys if keys_missing_from_data: raise ValueError( "Data must contain all variables in original " f"dataset. Data is missing {keys_missing_from_data}" ) indexes, index_vars = self.xindexes.copy_indexes(deep=deep) variables = {} for k, v in self._variables.items(): if k in index_vars: variables[k] = index_vars[k] else: variables[k] = v._copy(deep=deep, data=data.get(k), memo=memo) attrs = copy.deepcopy(self._attrs, memo) if deep else copy.copy(self._attrs) encoding = ( copy.deepcopy(self._encoding, memo) if deep else copy.copy(self._encoding) ) return self._replace(variables, indexes=indexes, attrs=attrs, encoding=encoding) def __copy__(self) -> Self: return self._copy(deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy(deep=True, memo=memo) def as_numpy(self) -> Self: """ Coerces wrapped data and coordinates into numpy arrays, returning a Dataset. See also -------- DataArray.as_numpy DataArray.to_numpy : Returns only the data as a numpy.ndarray object. """ numpy_variables = {k: v.as_numpy() for k, v in self.variables.items()} return self._replace(variables=numpy_variables) def _copy_listed(self, names: Iterable[Hashable]) -> Self: """Create a new Dataset with the listed variables from this dataset and the all relevant coordinates. Skips all validation. """ variables: dict[Hashable, Variable] = {} coord_names = set() indexes: dict[Hashable, Index] = {} for name in names: try: variables[name] = self._variables[name] except KeyError: ref_name, var_name, var = _get_virtual_variable( self._variables, name, self.sizes ) variables[var_name] = var if ref_name in self._coord_names or ref_name in self.dims: coord_names.add(var_name) if (var_name,) == var.dims: index, index_vars = create_default_index_implicit(var, names) indexes.update(dict.fromkeys(index_vars, index)) variables.update(index_vars) coord_names.update(index_vars) needed_dims: OrderedSet[Hashable] = OrderedSet() for v in variables.values(): needed_dims.update(v.dims) dims = {k: self.sizes[k] for k in needed_dims} # preserves ordering of coordinates for k in self._variables: if k not in self._coord_names: continue if set(self.variables[k].dims) <= needed_dims: variables[k] = self._variables[k] coord_names.add(k) indexes.update(filter_indexes_from_coords(self._indexes, coord_names)) return self._replace(variables, coord_names, dims, indexes=indexes) def _construct_dataarray(self, name: Hashable) -> DataArray: """Construct a DataArray by indexing this dataset""" from xarray.core.dataarray import DataArray try: variable = self._variables[name] except KeyError: _, name, variable = _get_virtual_variable(self._variables, name, self.sizes) needed_dims = set(variable.dims) coords: dict[Hashable, Variable] = {} # preserve ordering for k in self._variables: if k in self._indexes: add_coord = self._indexes[k].should_add_coord_to_array( k, self._variables[k], needed_dims ) else: var_dims = set(self._variables[k].dims) add_coord = k in self._coord_names and var_dims <= needed_dims if add_coord: coords[k] = self._variables[k] indexes = filter_indexes_from_coords(self._indexes, set(coords)) return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True) @property def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for attribute-style access""" yield from self._item_sources yield self.attrs @property def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for key-completion""" yield self.data_vars yield FilteredMapping(keys=self._coord_names, mapping=self.coords) # virtual coordinates yield FilteredMapping(keys=self.sizes, mapping=self) def __contains__(self, key: object) -> bool: """The 'in' operator will return true or false depending on whether 'key' is an array in the dataset or not. """ return key in self._variables def __len__(self) -> int: return len(self.data_vars) def __bool__(self) -> bool: return bool(self.data_vars) def __iter__(self) -> Iterator[Hashable]: return iter(self.data_vars) if TYPE_CHECKING: # needed because __getattr__ is returning Any and otherwise # this class counts as part of the SupportsArray Protocol __array__ = None # type: ignore[var-annotated,unused-ignore] else: def __array__(self, dtype=None, copy=None): raise TypeError( "cannot directly convert an xarray.Dataset into a " "numpy array. Instead, create an xarray.DataArray " "first, either with indexing on the Dataset or by " "invoking the `to_dataarray()` method." ) @property def nbytes(self) -> int: """ Total bytes consumed by the data arrays of all variables in this dataset. If the backend array for any variable does not include ``nbytes``, estimates the total bytes for that array based on the ``size`` and ``dtype``. """ return sum(v.nbytes for v in self.variables.values()) @property def loc(self) -> _LocIndexer[Self]: """Attribute for location based indexing. Only supports __getitem__, and only when the key is a dict of the form {dim: labels}. """ return _LocIndexer(self) @overload def __getitem__(self, key: Hashable) -> DataArray: ... # Mapping is Iterable @overload def __getitem__(self, key: Iterable[Hashable]) -> Self: ... def __getitem__( self, key: Mapping[Any, Any] | Hashable | Iterable[Hashable] ) -> Self | DataArray: """Access variables or coordinates of this dataset as a :py:class:`~xarray.DataArray` or a subset of variables or a indexed dataset. Indexing with a list of names will return a new ``Dataset`` object. """ from xarray.core.formatting import shorten_list_repr if utils.is_dict_like(key): return self.isel(**key) if utils.hashable(key): try: return self._construct_dataarray(key) except KeyError as e: message = f"No variable named {key!r}." best_guess = utils.did_you_mean(key, self.variables.keys()) if best_guess: message += f" {best_guess}" else: message += f" Variables on the dataset include {shorten_list_repr(list(self.variables.keys()), max_items=10)}" # If someone attempts `ds['foo' , 'bar']` instead of `ds[['foo', 'bar']]` if isinstance(key, tuple): message += f"\nHint: use a list to select multiple variables, for example `ds[{list(key)}]`" raise KeyError(message) from e if utils.iterable_of_hashable(key): return self._copy_listed(key) raise ValueError(f"Unsupported key-type {type(key)}") def __setitem__( self, key: Hashable | Iterable[Hashable] | Mapping, value: Any ) -> None: """Add an array to this dataset. Multiple arrays can be added at the same time, in which case each of the following operations is applied to the respective value. If key is dict-like, update all variables in the dataset one by one with the given value at the given location. If the given value is also a dataset, select corresponding variables in the given value and in the dataset to be changed. If value is a ` from .dataarray import DataArray`, call its `select_vars()` method, rename it to `key` and merge the contents of the resulting dataset into this dataset. If value is a `Variable` object (or tuple of form ``(dims, data[, attrs])``), add it to this dataset as a new variable. """ from xarray.core.dataarray import DataArray if utils.is_dict_like(key): # check for consistency and convert value to dataset value = self._setitem_check(key, value) # loop over dataset variables and set new values processed = [] for name, var in self.items(): try: var[key] = value[name] processed.append(name) except Exception as e: if processed: raise RuntimeError( "An error occurred while setting values of the" f" variable '{name}'. The following variables have" f" been successfully updated:\n{processed}" ) from e else: raise e elif utils.hashable(key): if isinstance(value, Dataset): raise TypeError( "Cannot assign a Dataset to a single key - only a DataArray or Variable " "object can be stored under a single key." ) self.update({key: value}) elif utils.iterable_of_hashable(key): keylist = list(key) if len(keylist) == 0: raise ValueError("Empty list of variables to be set") if len(keylist) == 1: self.update({keylist[0]: value}) else: if len(keylist) != len(value): raise ValueError( f"Different lengths of variables to be set " f"({len(keylist)}) and data used as input for " f"setting ({len(value)})" ) if isinstance(value, Dataset): self.update( dict(zip(keylist, value.data_vars.values(), strict=True)) ) elif isinstance(value, DataArray): raise ValueError("Cannot assign single DataArray to multiple keys") else: self.update(dict(zip(keylist, value, strict=True))) else: raise ValueError(f"Unsupported key-type {type(key)}") def _setitem_check(self, key, value): """Consistency check for __setitem__ When assigning values to a subset of a Dataset, do consistency check beforehand to avoid leaving the dataset in a partially updated state when an error occurs. """ from xarray.core.dataarray import DataArray if isinstance(value, Dataset): missing_vars = [ name for name in value.data_vars if name not in self.data_vars ] if missing_vars: raise ValueError( f"Variables {missing_vars} in new values" f" not available in original dataset:\n{self}" ) elif not any(isinstance(value, t) for t in [DataArray, Number, str]): raise TypeError( "Dataset assignment only accepts DataArrays, Datasets, and scalars." ) new_value = Dataset() for name, var in self.items(): # test indexing try: var_k = var[key] except Exception as e: raise ValueError( f"Variable '{name}': indexer {key} not available" ) from e if isinstance(value, Dataset): val = value[name] else: val = value if isinstance(val, DataArray): # check consistency of dimensions for dim in val.dims: if dim not in var_k.dims: raise KeyError( f"Variable '{name}': dimension '{dim}' appears in new values " f"but not in the indexed original data" ) dims = tuple(dim for dim in var_k.dims if dim in val.dims) if dims != val.dims: raise ValueError( f"Variable '{name}': dimension order differs between" f" original and new data:\n{dims}\nvs.\n{val.dims}" ) else: val = np.array(val) # type conversion new_value[name] = duck_array_ops.astype(val, dtype=var_k.dtype, copy=False) # check consistency of dimension sizes and dimension coordinates if isinstance(value, DataArray | Dataset): align(self[key], value, join="exact", copy=False) return new_value def __delitem__(self, key: Hashable) -> None: """Remove a variable from this dataset.""" assert_no_index_corrupted(self.xindexes, {key}) if key in self._indexes: del self._indexes[key] del self._variables[key] self._coord_names.discard(key) self._dims = calculate_dimensions(self._variables) # mutable objects should not be hashable # https://github.com/python/mypy/issues/4266 __hash__ = None # type: ignore[assignment] def _all_compat( self, other: Self, compat: str | Callable[[Variable, Variable], bool] ) -> bool: """Helper function for equals and identical""" if not callable(compat): compat_str = compat # some stores (e.g., scipy) do not seem to preserve order, so don't # require matching order for equality def compat(x: Variable, y: Variable) -> bool: return getattr(x, compat_str)(y) return self._coord_names == other._coord_names and utils.dict_equiv( self._variables, other._variables, compat=compat ) def broadcast_equals(self, other: Self) -> bool: """Two Datasets are broadcast equal if they are equal after broadcasting all variables against each other. For example, variables that are scalar in one dataset but non-scalar in the other dataset can still be broadcast equal if the the non-scalar variable is a constant. Examples -------- # 2D array with shape (1, 3) >>> data = np.array([[1, 2, 3]]) >>> a = xr.Dataset( ... {"variable_name": (("space", "time"), data)}, ... coords={"space": [0], "time": [0, 1, 2]}, ... ) >>> a Size: 56B Dimensions: (space: 1, time: 3) Coordinates: * space (space) int64 8B 0 * time (time) int64 24B 0 1 2 Data variables: variable_name (space, time) int64 24B 1 2 3 # 2D array with shape (3, 1) >>> data = np.array([[1], [2], [3]]) >>> b = xr.Dataset( ... {"variable_name": (("time", "space"), data)}, ... coords={"time": [0, 1, 2], "space": [0]}, ... ) >>> b Size: 56B Dimensions: (time: 3, space: 1) Coordinates: * time (time) int64 24B 0 1 2 * space (space) int64 8B 0 Data variables: variable_name (time, space) int64 24B 1 2 3 .equals returns True if two Datasets have the same values, dimensions, and coordinates. .broadcast_equals returns True if the results of broadcasting two Datasets against each other have the same values, dimensions, and coordinates. >>> a.equals(b) False >>> a.broadcast_equals(b) True >>> a2, b2 = xr.broadcast(a, b) >>> a2.equals(b2) True See Also -------- Dataset.equals Dataset.identical Dataset.broadcast """ try: return self._all_compat(other, "broadcast_equals") except (TypeError, AttributeError): return False def equals(self, other: Self) -> bool: """Two Datasets are equal if they have matching variables and coordinates, all of which are equal. Datasets can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for ``Dataset`` does element-wise comparisons (like numpy.ndarrays). Examples -------- # 2D array with shape (1, 3) >>> data = np.array([[1, 2, 3]]) >>> dataset1 = xr.Dataset( ... {"variable_name": (("space", "time"), data)}, ... coords={"space": [0], "time": [0, 1, 2]}, ... ) >>> dataset1 Size: 56B Dimensions: (space: 1, time: 3) Coordinates: * space (space) int64 8B 0 * time (time) int64 24B 0 1 2 Data variables: variable_name (space, time) int64 24B 1 2 3 # 2D array with shape (3, 1) >>> data = np.array([[1], [2], [3]]) >>> dataset2 = xr.Dataset( ... {"variable_name": (("time", "space"), data)}, ... coords={"time": [0, 1, 2], "space": [0]}, ... ) >>> dataset2 Size: 56B Dimensions: (time: 3, space: 1) Coordinates: * time (time) int64 24B 0 1 2 * space (space) int64 8B 0 Data variables: variable_name (time, space) int64 24B 1 2 3 >>> dataset1.equals(dataset2) False >>> dataset1.broadcast_equals(dataset2) True .equals returns True if two Datasets have the same values, dimensions, and coordinates. .broadcast_equals returns True if the results of broadcasting two Datasets against each other have the same values, dimensions, and coordinates. Similar for missing values too: >>> ds1 = xr.Dataset( ... { ... "temperature": (["x", "y"], [[1, np.nan], [3, 4]]), ... }, ... coords={"x": [0, 1], "y": [0, 1]}, ... ) >>> ds2 = xr.Dataset( ... { ... "temperature": (["x", "y"], [[1, np.nan], [3, 4]]), ... }, ... coords={"x": [0, 1], "y": [0, 1]}, ... ) >>> ds1.equals(ds2) True See Also -------- Dataset.broadcast_equals Dataset.identical """ try: return self._all_compat(other, "equals") except (TypeError, AttributeError): return False def identical(self, other: Self) -> bool: """Like equals, but also checks all dataset attributes and the attributes on all variables and coordinates. Example ------- >>> a = xr.Dataset( ... {"Width": ("X", [1, 2, 3])}, ... coords={"X": [1, 2, 3]}, ... attrs={"units": "m"}, ... ) >>> b = xr.Dataset( ... {"Width": ("X", [1, 2, 3])}, ... coords={"X": [1, 2, 3]}, ... attrs={"units": "m"}, ... ) >>> c = xr.Dataset( ... {"Width": ("X", [1, 2, 3])}, ... coords={"X": [1, 2, 3]}, ... attrs={"units": "ft"}, ... ) >>> a Size: 48B Dimensions: (X: 3) Coordinates: * X (X) int64 24B 1 2 3 Data variables: Width (X) int64 24B 1 2 3 Attributes: units: m >>> b Size: 48B Dimensions: (X: 3) Coordinates: * X (X) int64 24B 1 2 3 Data variables: Width (X) int64 24B 1 2 3 Attributes: units: m >>> c Size: 48B Dimensions: (X: 3) Coordinates: * X (X) int64 24B 1 2 3 Data variables: Width (X) int64 24B 1 2 3 Attributes: units: ft >>> a.equals(b) True >>> a.identical(b) True >>> a.equals(c) True >>> a.identical(c) False See Also -------- Dataset.broadcast_equals Dataset.equals """ try: return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat( other, "identical" ) except (TypeError, AttributeError): return False @property def indexes(self) -> Indexes[pd.Index]: """Mapping of pandas.Index objects used for label based indexing. Raises an error if this Dataset has indexes that cannot be coerced to pandas.Index objects. See Also -------- Dataset.xindexes """ return self.xindexes.to_pandas_indexes() @property def xindexes(self) -> Indexes[Index]: """Mapping of :py:class:`~xarray.indexes.Index` objects used for label based indexing. """ return Indexes(self._indexes, {k: self._variables[k] for k in self._indexes}) @property def coords(self) -> DatasetCoordinates: """Mapping of :py:class:`~xarray.DataArray` objects corresponding to coordinate variables. See Also -------- Coordinates """ return DatasetCoordinates(self) @property def data_vars(self) -> DataVariables: """Dictionary of DataArray objects corresponding to data variables""" return DataVariables(self) def set_coords(self, names: Hashable | Iterable[Hashable]) -> Self: """Given names of one or more variables, set them as coordinates Parameters ---------- names : hashable or iterable of hashable Name(s) of variables in this dataset to convert into coordinates. Examples -------- >>> dataset = xr.Dataset( ... { ... "pressure": ("time", [1.013, 1.2, 3.5]), ... "time": pd.date_range("2023-01-01", periods=3), ... } ... ) >>> dataset Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03 Data variables: pressure (time) float64 24B 1.013 1.2 3.5 >>> dataset.set_coords("pressure") Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03 pressure (time) float64 24B 1.013 1.2 3.5 Data variables: *empty* On calling ``set_coords`` , these data variables are converted to coordinates, as shown in the final dataset. Returns ------- Dataset See Also -------- Dataset.swap_dims Dataset.assign_coords """ # TODO: allow inserting new coordinates with this method, like # DataFrame.set_index? # nb. check in self._variables, not self.data_vars to insure that the # operation is idempotent if isinstance(names, str) or not isinstance(names, Iterable): names = [names] else: names = list(names) self._assert_all_in_dataset(names) obj = self.copy() obj._coord_names.update(names) return obj def reset_coords( self, names: Dims = None, drop: bool = False, ) -> Self: """Given names of coordinates, reset them to become variables Parameters ---------- names : str, Iterable of Hashable or None, optional Name(s) of non-index coordinates in this dataset to reset into variables. By default, all non-index coordinates are reset. drop : bool, default: False If True, remove coordinates instead of converting them into variables. Examples -------- >>> dataset = xr.Dataset( ... { ... "temperature": ( ... ["time", "lat", "lon"], ... [[[25, 26], [27, 28]], [[29, 30], [31, 32]]], ... ), ... "precipitation": ( ... ["time", "lat", "lon"], ... [[[0.5, 0.8], [0.2, 0.4]], [[0.3, 0.6], [0.7, 0.9]]], ... ), ... }, ... coords={ ... "time": pd.date_range(start="2023-01-01", periods=2), ... "lat": [40, 41], ... "lon": [-80, -79], ... "altitude": 1000, ... }, ... ) # Dataset before resetting coordinates >>> dataset Size: 184B Dimensions: (time: 2, lat: 2, lon: 2) Coordinates: * time (time) datetime64[ns] 16B 2023-01-01 2023-01-02 * lat (lat) int64 16B 40 41 * lon (lon) int64 16B -80 -79 altitude int64 8B 1000 Data variables: temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32 precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9 # Reset the 'altitude' coordinate >>> dataset_reset = dataset.reset_coords("altitude") # Dataset after resetting coordinates >>> dataset_reset Size: 184B Dimensions: (time: 2, lat: 2, lon: 2) Coordinates: * time (time) datetime64[ns] 16B 2023-01-01 2023-01-02 * lat (lat) int64 16B 40 41 * lon (lon) int64 16B -80 -79 Data variables: temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32 precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9 altitude int64 8B 1000 Returns ------- Dataset See Also -------- Dataset.set_coords """ if names is None: names = self._coord_names - set(self._indexes) else: if isinstance(names, str) or not isinstance(names, Iterable): names = [names] else: names = list(names) self._assert_all_in_dataset(names) bad_coords = set(names) & set(self._indexes) if bad_coords: raise ValueError( f"cannot remove index coordinates with reset_coords: {bad_coords}" ) obj = self.copy() obj._coord_names.difference_update(names) if drop: for name in names: del obj._variables[name] return obj def dump_to_store(self, store: AbstractDataStore, **kwargs) -> None: """Store dataset contents to a backends.*DataStore object.""" from xarray.backends.writers import dump_to_store # TODO: rename and/or cleanup this method to make it more consistent # with to_netcdf() dump_to_store(self, store, **kwargs) # path=None writes to bytes @overload def to_netcdf( self, path: None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview: ... # compute=False returns dask.Delayed @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, *, compute: Literal[False], invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed: ... # default return None @overload def to_netcdf( self, path: str | PathLike | io.IOBase, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: Literal[True] = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> None: ... # if compute cannot be evaluated at type check time # we may get back either Delayed or None @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed | None: ... def to_netcdf( self, path: str | PathLike | io.IOBase | None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview | Delayed | None: """Write dataset contents to a netCDF file. Parameters ---------- path : str, path-like, file-like or None, optional Path to which to save this datatree, or a file-like object to write it to (which must support read and write and be seekable) or None (default) to return in-memory bytes as a memoryview. mode : {"w", "a"}, default: "w" Write ('w') or append ('a') mode. If mode='w', any existing file at this location will be overwritten. If mode='a', existing variables will be overwritten. format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API features. * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only netCDF 3 compatible API features. * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format, which fully supports 2+ GB files, but is only compatible with clients linked against netCDF version 3.6.0 or later. * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not handle 2+ GB files very well. All formats are supported by the netCDF4-python library. scipy.io.netcdf only supports the last two formats. The default format is NETCDF4 if you are saving a file to disk and have the netCDF4-python library available. Otherwise, xarray falls back to using scipy to write netCDF files and defaults to the NETCDF3_64BIT format (scipy does not support netCDF4). group : str, optional Path to the netCDF4 group in the given file to open (only works for format='NETCDF4'). The group(s) will be created if necessary. engine : {"netcdf4", "h5netcdf", "scipy"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, by default preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via ``netcdf_engine_order`` in ``xarray.set_options()``). encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1, "zlib": True}, ...}``. If ``encoding`` is specified the original encoding of the variables of the dataset is ignored. The `h5netcdf` engine supports both the NetCDF4-style compression encoding parameters ``{"zlib": True, "complevel": 9}`` and the h5py ones ``{"compression": "gzip", "compression_opts": 9}``. This allows using any compression plugin installed in the HDF5 library, e.g. LZF. unlimited_dims : iterable of hashable, optional Dimension(s) that should be serialized as unlimited dimensions. By default, no dimensions are treated as unlimited dimensions. Note that unlimited_dims may also be set via ``dataset.encoding["unlimited_dims"]``. compute: bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. invalid_netcdf: bool, default: False Only valid along with ``engine="h5netcdf"``. If True, allow writing hdf5 files which are invalid netcdf as described in https://github.com/h5netcdf/h5netcdf. Returns ------- * ``memoryview`` if path is None * ``dask.delayed.Delayed`` if compute is False * ``None`` otherwise See Also -------- DataArray.to_netcdf """ if encoding is None: encoding = {} from xarray.backends.writers import to_netcdf return to_netcdf( # type: ignore[return-value] # mypy cannot resolve the overloads:( self, path, mode=mode, format=format, group=group, engine=engine, encoding=encoding, unlimited_dims=unlimited_dims, compute=compute, multifile=False, invalid_netcdf=invalid_netcdf, auto_complex=auto_complex, ) # compute=True (default) returns ZarrStore @overload def to_zarr( self, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[True] = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore: ... # compute=False returns dask.Delayed @overload def to_zarr( self, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[False], consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> Delayed: ... def to_zarr( self, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: bool = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore | Delayed: """Write dataset contents to a zarr group. Zarr chunks are determined in the following way: - From the ``chunks`` attribute in each variable's ``encoding`` (can be set via `Dataset.chunk`). - If the variable is a Dask array, from the dask chunks - If neither Dask chunks nor encoding chunks are present, chunks will be determined automatically by Zarr - If both Dask chunks and encoding chunks are present, encoding chunks will be used, provided that there is a many-to-one relationship between encoding chunks and dask chunks (i.e. Dask chunks are bigger than and evenly divide encoding chunks); otherwise raise a ``ValueError``. This restriction ensures that no synchronization / locks are required when writing. To disable this restriction, use ``safe_chunks=False``. Parameters ---------- store : zarr.storage.StoreLike, optional Store or path to directory in local or remote file system. chunk_store : MutableMapping, str or path-like, optional Store or path to directory in local or remote file system only for Zarr array chunks. Requires zarr-python v2.4.0 or later. mode : {"w", "w-", "a", "a-", r+", None}, optional Persistence mode: "w" means create (overwrite if exists); "w-" means create (fail if exists); "a" means override all existing variables including dimension coordinates (create if does not exist); "a-" means only append those variables that have ``append_dim``. "r+" means modify existing array *values* only (raise an error if any metadata or shapes would change). The default mode is "a" if ``append_dim`` is set. Otherwise, it is "r+" if ``region`` is set and ``w-`` otherwise. synchronizer : object, optional Zarr array synchronizer. group : str, optional Group path. (a.k.a. `path` in zarr terminology.) encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,}, ...}`` compute : bool, default: True If True write array data immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed to write array data later. Metadata is always updated eagerly. consolidated : bool, optional If True, apply :func:`zarr.convenience.consolidate_metadata` after writing metadata and read existing stores with consolidated metadata; if False, do not. The default (`consolidated=None`) means write consolidated metadata and attempt to read consolidated metadata for existing stores (falling back to non-consolidated). When the experimental ``zarr_version=3``, ``consolidated`` must be either be ``None`` or ``False``. append_dim : hashable, optional If set, the dimension along which the data will be appended. All other dimensions on overridden variables must remain the same size. region : dict or "auto", optional Optional mapping from dimension names to either a) ``"auto"``, or b) integer slices, indicating the region of existing zarr array(s) in which to write this dataset's data. If ``"auto"`` is provided the existing store will be opened and the region inferred by matching indexes. ``"auto"`` can be used as a single string, which will automatically infer the region for all dimensions, or as dictionary values for specific dimensions mixed together with explicit slices for other dimensions. Alternatively integer slices can be provided; for example, ``{'x': slice(0, 1000), 'y': slice(10000, 11000)}`` would indicate that values should be written to the region ``0:1000`` along ``x`` and ``10000:11000`` along ``y``. Two restrictions apply to the use of ``region``: - If ``region`` is set, _all_ variables in a dataset must have at least one dimension in common with the region. Other variables should be written in a separate single call to ``to_zarr()``. - Dimensions cannot be included in both ``region`` and ``append_dim`` at the same time. To create empty arrays to fill in with ``region``, use a separate call to ``to_zarr()`` with ``compute=False``. See "Modifying existing Zarr stores" in the reference documentation for full details. Users are expected to ensure that the specified region aligns with Zarr chunk boundaries, and that dask chunks are also aligned. Xarray makes limited checks that these multiple chunk boundaries line up. It is possible to write incomplete chunks and corrupt the data with this option if you are not careful. safe_chunks : bool, default: True If True, only allow writes to when there is a many-to-one relationship between Zarr chunks (specified in encoding) and Dask chunks. Set False to override this restriction; however, data may become corrupted if Zarr arrays are written in parallel. This option may be useful in combination with ``compute=False`` to initialize a Zarr from an existing Dataset with arbitrary chunk structure. In addition to the many-to-one relationship validation, it also detects partial chunks writes when using the region parameter, these partial chunks are considered unsafe in the mode "r+" but safe in the mode "a". Note: Even with these validations it can still be unsafe to write two or more chunked arrays in the same location in parallel if they are not writing in independent regions, for those cases it is better to use a synchronizer. align_chunks: bool, default False If True, rechunks the Dask array to align with Zarr chunks before writing. This ensures each Dask chunk maps to one or more contiguous Zarr chunks, which avoids race conditions. Internally, the process sets safe_chunks=False and tries to preserve the original Dask chunking as much as possible. Note: While this alignment avoids write conflicts stemming from chunk boundary misalignment, it does not protect against race conditions if multiple uncoordinated processes write to the same Zarr array concurrently. storage_options : dict, optional Any additional parameters for the storage backend (ignored for local paths). zarr_version : int or None, optional .. deprecated:: 2024.9.1 Use ``zarr_format`` instead. zarr_format : int or None, optional The desired zarr format to target (currently 2 or 3). The default of None will attempt to determine the zarr version from ``store`` when possible, otherwise defaulting to the default version used by the zarr-python library installed. write_empty_chunks : bool or None, optional If True, all chunks will be stored regardless of their contents. If False, each chunk is compared to the array's fill value prior to storing. If a chunk is uniformly equal to the fill value, then that chunk is not be stored, and the store entry for that chunk's key is deleted. This setting enables sparser storage, as only chunks with non-fill-value data are stored, at the expense of overhead associated with checking the data of each chunk. If None (default) fall back to specification(s) in ``encoding`` or Zarr defaults. A ``ValueError`` will be raised if the value of this (if not None) differs with ``encoding``. chunkmanager_store_kwargs : dict, optional Additional keyword arguments passed on to the `ChunkManager.store` method used to store chunked arrays. For example for a dask array additional kwargs will be passed eventually to :py:func:`dask.array.store()`. Experimental API that should not be relied upon. Returns ------- * ``dask.delayed.Delayed`` if compute is False * ZarrStore otherwise References ---------- https://zarr.readthedocs.io/ Notes ----- Zarr chunking behavior: If chunks are found in the encoding argument or attribute corresponding to any DataArray, those chunks are used. If a DataArray is a dask array, it is written with those chunks. If not other chunks are found, Zarr uses its own heuristics to choose automatic chunk sizes. encoding: The encoding attribute (if exists) of the DataArray(s) will be used. Override any existing encodings by providing the ``encoding`` kwarg. ``fill_value`` handling: There exists a subtlety in interpreting zarr's ``fill_value`` property. For Zarr v2 format arrays, ``fill_value`` is *always* interpreted as an invalid value similar to the ``_FillValue`` attribute in CF/netCDF. For Zarr v3 format arrays, only an explicit ``_FillValue`` attribute will be used to mask the data if requested using ``mask_and_scale=True``. To customize the fill value Zarr uses as a default for unwritten chunks on disk, set ``_FillValue`` in encoding for Zarr v2 or ``fill_value`` for Zarr v3. See this `Github issue `_ for more. See Also -------- :ref:`io.zarr` The I/O user guide, with more details and examples. """ from xarray.backends.writers import to_zarr return to_zarr( # type: ignore[call-overload,misc] self, store=store, chunk_store=chunk_store, storage_options=storage_options, mode=mode, synchronizer=synchronizer, group=group, encoding=encoding, compute=compute, consolidated=consolidated, append_dim=append_dim, region=region, safe_chunks=safe_chunks, align_chunks=align_chunks, zarr_version=zarr_version, zarr_format=zarr_format, write_empty_chunks=write_empty_chunks, chunkmanager_store_kwargs=chunkmanager_store_kwargs, ) def __repr__(self) -> str: return formatting.dataset_repr(self) def _repr_html_(self) -> str: if OPTIONS["display_style"] == "text": return f"
{escape(repr(self))}
" return formatting_html.dataset_repr(self) def info(self, buf: IO | None = None) -> None: """ Concise summary of a Dataset variables and attributes. Parameters ---------- buf : file-like, default: sys.stdout writable buffer See Also -------- pandas.DataFrame.assign ncdump : netCDF's ncdump """ if buf is None: # pragma: no cover buf = sys.stdout lines = [ "xarray.Dataset {", "dimensions:", ] for name, size in self.sizes.items(): lines.append(f"\t{name} = {size} ;") lines.append("\nvariables:") for name, da in self.variables.items(): dims = ", ".join(map(str, da.dims)) lines.append(f"\t{da.dtype} {name}({dims}) ;") for k, v in da.attrs.items(): lines.append(f"\t\t{name}:{k} = {v} ;") lines.append("\n// global attributes:") for k, v in self.attrs.items(): lines.append(f"\t:{k} = {v} ;") lines.append("}") buf.write("\n".join(lines)) @property def chunks(self) -> Mapping[Hashable, tuple[int, ...]]: """ Mapping from dimension names to block lengths for this dataset's data. If this dataset does not contain chunked arrays, the mapping will be empty. Cannot be modified directly, but can be modified by calling .chunk(). Same as Dataset.chunksizes, but maintained for backwards compatibility. See Also -------- Dataset.chunk Dataset.chunksizes xarray.unify_chunks """ return get_chunksizes(self.variables.values()) @property def chunksizes(self) -> Mapping[Hashable, tuple[int, ...]]: """ Mapping from dimension names to block lengths for this dataset's data. If this dataset does not contain chunked arrays, the mapping will be empty. Cannot be modified directly, but can be modified by calling .chunk(). Same as Dataset.chunks. See Also -------- Dataset.chunk Dataset.chunks xarray.unify_chunks """ return get_chunksizes(self.variables.values()) def chunk( self, chunks: T_ChunksFreq = {}, # noqa: B006 # {} even though it's technically unsafe, is being used intentionally here (#4667) name_prefix: str = "xarray-", token: str | None = None, lock: bool = False, inline_array: bool = False, chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, **chunks_kwargs: T_ChunkDimFreq, ) -> Self: """Coerce all arrays in this dataset into dask arrays with the given chunks. Non-dask arrays in this dataset will be converted to dask arrays. Dask arrays will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Along datetime-like dimensions, a :py:class:`Resampler` object (e.g. :py:class:`groupers.TimeResampler` or :py:class:`groupers.SeasonResampler`) is also accepted. Parameters ---------- chunks : int, tuple of int, "auto" or mapping of hashable to int or a Resampler, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, or ``{"x": 5, "y": 5}`` or ``{"x": 5, "time": TimeResampler(freq="YE")}`` or ``{"time": SeasonResampler(["DJF", "MAM", "JJA", "SON"])}``. name_prefix : str, default: "xarray-" Prefix for the name of any new dask arrays. token : str, optional Token uniquely identifying this dataset. lock : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. inline_array: bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided Returns ------- chunked : xarray.Dataset See Also -------- Dataset.chunks Dataset.chunksizes xarray.unify_chunks dask.array.from_array """ from xarray.groupers import Resampler if chunks is None and not chunks_kwargs: warnings.warn( "None value for 'chunks' is deprecated. " "It will raise an error in the future. Use instead '{}'", category=DeprecationWarning, stacklevel=2, ) chunks = {} chunks_mapping: Mapping[Any, Any] if not isinstance(chunks, Mapping) and chunks is not None: if isinstance(chunks, tuple | list): utils.emit_user_level_warning( "Supplying chunks as dimension-order tuples is deprecated. " "It will raise an error in the future. Instead use a dict with dimensions as keys.", category=DeprecationWarning, ) chunks_mapping = dict.fromkeys(self.dims, chunks) else: chunks_mapping = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk") bad_dims = chunks_mapping.keys() - self.sizes.keys() if bad_dims: raise ValueError( f"chunks keys {tuple(bad_dims)} not found in data dimensions {tuple(self.sizes.keys())}" ) def _resolve_resampler(name: Hashable, resampler: Resampler) -> tuple[int, ...]: variable = self._variables.get(name, None) if variable is None: raise ValueError( f"Cannot chunk by resampler {resampler!r} for virtual variable {name!r}." ) if variable.ndim != 1: raise ValueError( f"chunks={resampler!r} only supported for 1D variables. " f"Received variable {name!r} with {variable.ndim} dimensions instead." ) newchunks = resampler.compute_chunks(variable, dim=name) if sum(newchunks) != variable.shape[0]: raise ValueError( f"Logic bug in rechunking variable {name!r} using {resampler!r}. " "New chunks tuple does not match size of data. Please open an issue." ) return newchunks chunks_mapping_ints: Mapping[Any, T_ChunkDim] = { name: ( _resolve_resampler(name, chunks) if isinstance(chunks, Resampler) else chunks ) for name, chunks in chunks_mapping.items() } chunkmanager = guess_chunkmanager(chunked_array_type) if from_array_kwargs is None: from_array_kwargs = {} variables = { k: _maybe_chunk( k, v, chunks_mapping_ints, token, lock, name_prefix, inline_array=inline_array, chunked_array_type=chunkmanager, from_array_kwargs=from_array_kwargs.copy(), ) for k, v in self.variables.items() } return self._replace(variables) def _validate_indexers( self, indexers: Mapping[Any, Any], missing_dims: ErrorOptionsWithWarn = "raise" ) -> Iterator[tuple[Hashable, int | slice | np.ndarray | Variable]]: """Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the associated index is a DatetimeIndex or CFTimeIndex """ from xarray.core.dataarray import DataArray indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims) # all indexers should be int, slice, np.ndarrays, or Variable for k, v in indexers.items(): if isinstance(v, int | slice | Variable) and not isinstance(v, bool): yield k, v elif isinstance(v, DataArray): yield k, v.variable elif isinstance(v, tuple): yield k, as_variable(v) elif isinstance(v, Dataset): raise TypeError("cannot use a Dataset as an indexer") elif isinstance(v, Sequence) and len(v) == 0: yield k, np.empty((0,), dtype="int64") else: if not is_duck_array(v): v = np.asarray(v) if v.dtype.kind in "US": index = self._indexes[k].to_pandas_index() if isinstance(index, pd.DatetimeIndex): v = duck_array_ops.astype(v, dtype="datetime64[ns]") elif isinstance(index, CFTimeIndex): v = _parse_array_of_cftime_strings(v, index.date_type) if v.ndim > 1: raise IndexError( "Unlabeled multi-dimensional array cannot be " f"used for indexing: {k}" ) yield k, v def _validate_interp_indexers( self, indexers: Mapping[Any, Any] ) -> Iterator[tuple[Hashable, Variable]]: """Variant of _validate_indexers to be used for interpolation""" for k, v in self._validate_indexers(indexers): if isinstance(v, Variable): yield k, v elif is_scalar(v): yield k, Variable((), v, attrs=self.coords[k].attrs) elif isinstance(v, np.ndarray): yield k, Variable(dims=(k,), data=v, attrs=self.coords[k].attrs) else: raise TypeError(type(v)) def _get_indexers_coords_and_indexes(self, indexers): """Extract coordinates and indexes from indexers. Only coordinate with a name different from any of self.variables will be attached. """ from xarray.core.dataarray import DataArray coords_list = [] for k, v in indexers.items(): if isinstance(v, DataArray): if v.dtype.kind == "b": if v.ndim != 1: # we only support 1-d boolean array raise ValueError( f"{v.ndim:d}d-boolean array is used for indexing along " f"dimension {k!r}, but only 1d boolean arrays are " "supported." ) # Make sure in case of boolean DataArray, its # coordinate also should be indexed. v_coords = v[v.values.nonzero()[0]].coords else: v_coords = v.coords coords_list.append(v_coords) # we don't need to call align() explicitly or check indexes for # alignment, because merge_variables already checks for exact alignment # between dimension coordinates coords, indexes = merge_coordinates_without_align(coords_list) assert_coordinate_consistent(self, coords) # silently drop the conflicted variables. attached_coords = {k: v for k, v in coords.items() if k not in self._variables} attached_indexes = { k: v for k, v in indexes.items() if k not in self._variables } return attached_coords, attached_indexes def isel( self, indexers: Mapping[Any, Any] | None = None, drop: bool = False, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, ) -> Self: """Returns a new dataset with each array indexed along the specified dimension(s). This method selects values from each array using its `__getitem__` method, except this method does not require knowing the order of each array's dimensions. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by integers, slice objects or arrays. indexer can be a integer, slice, array-like or DataArray. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. drop : bool, default: False If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : Dataset A new Dataset with the same contents as this dataset, except each array and dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this dataset, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. Examples -------- >>> dataset = xr.Dataset( ... { ... "math_scores": ( ... ["student", "test"], ... [[90, 85, 92], [78, 80, 85], [95, 92, 98]], ... ), ... "english_scores": ( ... ["student", "test"], ... [[88, 90, 92], [75, 82, 79], [93, 96, 91]], ... ), ... }, ... coords={ ... "student": ["Alice", "Bob", "Charlie"], ... "test": ["Test 1", "Test 2", "Test 3"], ... }, ... ) # A specific element from the dataset is selected >>> dataset.isel(student=1, test=0) Size: 68B Dimensions: () Coordinates: student >> slice_of_data = dataset.isel(student=slice(0, 2), test=slice(0, 2)) >>> slice_of_data Size: 168B Dimensions: (student: 2, test: 2) Coordinates: * student (student) >> index_array = xr.DataArray([0, 2], dims="student") >>> indexed_data = dataset.isel(student=index_array) >>> indexed_data Size: 224B Dimensions: (student: 2, test: 3) Coordinates: * student (student) ` :func:`DataArray.isel ` :doc:`xarray-tutorial:intermediate/indexing/indexing` Tutorial material on indexing with Xarray objects :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic` Tutorial material on basics of indexing """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") if any(is_fancy_indexer(idx) for idx in indexers.values()): return self._isel_fancy(indexers, drop=drop, missing_dims=missing_dims) # Much faster algorithm for when all indexers are ints, slices, one-dimensional # lists, or zero or one-dimensional np.ndarray's indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims) variables = {} dims: dict[Hashable, int] = {} coord_names = self._coord_names.copy() indexes, index_variables = isel_indexes(self.xindexes, indexers) for name, var in self._variables.items(): # preserve variable order if name in index_variables: var = index_variables[name] else: var_indexers = {k: v for k, v in indexers.items() if k in var.dims} if var_indexers: var = var.isel(var_indexers) if drop and var.ndim == 0 and name in coord_names: coord_names.remove(name) continue variables[name] = var dims.update(zip(var.dims, var.shape, strict=True)) return self._construct_direct( variables=variables, coord_names=coord_names, dims=dims, attrs=self._attrs, indexes=indexes, encoding=self._encoding, close=self._close, ) def _isel_fancy( self, indexers: Mapping[Any, Any], *, drop: bool, missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: valid_indexers = dict(self._validate_indexers(indexers, missing_dims)) variables: dict[Hashable, Variable] = {} indexes, index_variables = isel_indexes(self.xindexes, valid_indexers) for name, var in self.variables.items(): if name in index_variables: new_var = index_variables[name] else: var_indexers = { k: v for k, v in valid_indexers.items() if k in var.dims } if var_indexers: new_var = var.isel(indexers=var_indexers) # drop scalar coordinates # https://github.com/pydata/xarray/issues/6554 if name in self.coords and drop and new_var.ndim == 0: continue else: new_var = var.copy(deep=False) if name not in indexes: new_var = new_var.to_base_variable() variables[name] = new_var coord_names = self._coord_names & variables.keys() selected = self._replace_with_new_dims(variables, coord_names, indexes) # Extract coordinates from indexers coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(indexers) variables.update(coord_vars) indexes.update(new_indexes) coord_names = self._coord_names & variables.keys() | coord_vars.keys() return self._replace_with_new_dims(variables, coord_names, indexes=indexes) def sel( self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance: int | float | Iterable[int | float] | None = None, drop: bool = False, **indexers_kwargs: Any, ) -> Self: """Returns a new dataset with each array indexed by tick labels along the specified dimension(s). In contrast to `Dataset.isel`, indexers for this method should use labels instead of integers. Under the hood, this method is powered by using pandas's powerful Index objects. This makes label based indexing essentially just as fast as using integer indexing. It also means this method uses pandas's (well documented) logic for indexing. This means you can use string shortcuts for datetime indexes (e.g., '2000-01' to select all values in January 2000). It also means that slices are treated as inclusive of both the start and stop values, unlike normal Python indexing. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by scalars, slices or arrays of tick labels. For dimensions with multi-index, the indexer may also be a dict-like object with keys matching index level names. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: * None (default): only exact matches * pad / ffill: propagate last valid index value forward * backfill / bfill: propagate next valid index value backward * nearest: use nearest valid index value tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. drop : bool, optional If ``drop=True``, drop coordinates variables in `indexers` instead of making them scalar. **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : Dataset A new Dataset with the same contents as this dataset, except each variable and dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this dataset, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. See Also -------- :func:`Dataset.isel ` :func:`DataArray.sel ` :doc:`xarray-tutorial:intermediate/indexing/indexing` Tutorial material on indexing with Xarray objects :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic` Tutorial material on basics of indexing """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "sel") query_results = map_index_queries( self, indexers=indexers, method=method, tolerance=tolerance ) if drop: no_scalar_variables = {} for k, v in query_results.variables.items(): if v.dims: no_scalar_variables[k] = v elif k in self._coord_names: query_results.drop_coords.append(k) query_results.variables = no_scalar_variables result = self.isel(indexers=query_results.dim_indexers, drop=drop) return result._overwrite_indexes(*query_results.as_tuple()[1:]) def _shuffle(self, dim, *, indices: GroupIndices, chunks: T_Chunks) -> Self: # Shuffling is only different from `isel` for chunked arrays. # Extract them out, and treat them specially. The rest, we route through isel. # This makes it easy to ensure correct handling of indexes. is_chunked = { name: var for name, var in self._variables.items() if is_chunked_array(var._data) } subset = self[[name for name in self._variables if name not in is_chunked]] no_slices: list[list[int]] = [ ( list(range(*idx.indices(self.sizes[dim]))) if isinstance(idx, slice) else idx ) for idx in indices ] no_slices = [idx for idx in no_slices if idx] shuffled = ( subset if dim not in subset.dims else subset.isel({dim: np.concatenate(no_slices)}) ) for name, var in is_chunked.items(): shuffled[name] = var._shuffle( indices=no_slices, dim=dim, chunks=chunks, ) return shuffled def head( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Returns a new dataset with the first `n` values of each array for the specified dimension(s). Parameters ---------- indexers : dict or int, default: 5 A dict with keys matching dimensions and integer values `n` or a single integer `n` applied over all dimensions. One of indexers or indexers_kwargs must be provided. **indexers_kwargs : {dim: n, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Examples -------- >>> dates = pd.date_range(start="2023-01-01", periods=5) >>> pageviews = [1200, 1500, 900, 1800, 2000] >>> visitors = [800, 1000, 600, 1200, 1500] >>> dataset = xr.Dataset( ... { ... "pageviews": (("date"), pageviews), ... "visitors": (("date"), visitors), ... }, ... coords={"date": dates}, ... ) >>> busiest_days = dataset.sortby("pageviews", ascending=False) >>> busiest_days.head() Size: 120B Dimensions: (date: 5) Coordinates: * date (date) datetime64[ns] 40B 2023-01-05 2023-01-04 ... 2023-01-03 Data variables: pageviews (date) int64 40B 2000 1800 1500 1200 900 visitors (date) int64 40B 1500 1200 1000 800 600 # Retrieve the 3 most busiest days in terms of pageviews >>> busiest_days.head(3) Size: 72B Dimensions: (date: 3) Coordinates: * date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02 Data variables: pageviews (date) int64 24B 2000 1800 1500 visitors (date) int64 24B 1500 1200 1000 # Using a dictionary to specify the number of elements for specific dimensions >>> busiest_days.head({"date": 3}) Size: 72B Dimensions: (date: 3) Coordinates: * date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02 Data variables: pageviews (date) int64 24B 2000 1800 1500 visitors (date) int64 24B 1500 1200 1000 See Also -------- Dataset.tail Dataset.thin DataArray.head """ if not indexers_kwargs: if indexers is None: indexers = 5 if not isinstance(indexers, int) and not is_dict_like(indexers): raise TypeError("indexers must be either dict-like or a single integer") if isinstance(indexers, int): indexers = dict.fromkeys(self.dims, indexers) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "head") for k, v in indexers.items(): if not isinstance(v, int): raise TypeError( "expected integer type indexer for " f"dimension {k!r}, found {type(v)!r}" ) elif v < 0: raise ValueError( "expected positive integer as indexer " f"for dimension {k!r}, found {v}" ) indexers_slices = {k: slice(val) for k, val in indexers.items()} return self.isel(indexers_slices) def tail( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Returns a new dataset with the last `n` values of each array for the specified dimension(s). Parameters ---------- indexers : dict or int, default: 5 A dict with keys matching dimensions and integer values `n` or a single integer `n` applied over all dimensions. One of indexers or indexers_kwargs must be provided. **indexers_kwargs : {dim: n, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Examples -------- >>> activity_names = ["Walking", "Running", "Cycling", "Swimming", "Yoga"] >>> durations = [30, 45, 60, 45, 60] # in minutes >>> energies = [150, 300, 250, 400, 100] # in calories >>> dataset = xr.Dataset( ... { ... "duration": (["activity"], durations), ... "energy_expenditure": (["activity"], energies), ... }, ... coords={"activity": activity_names}, ... ) >>> sorted_dataset = dataset.sortby("energy_expenditure", ascending=False) >>> sorted_dataset Size: 240B Dimensions: (activity: 5) Coordinates: * activity (activity) >> sorted_dataset.tail(3) Size: 144B Dimensions: (activity: 3) Coordinates: * activity (activity) >> sorted_dataset.tail({"activity": 3}) Size: 144B Dimensions: (activity: 3) Coordinates: * activity (activity) Self: """Returns a new dataset with each array indexed along every `n`-th value for the specified dimension(s) Parameters ---------- indexers : dict or int A dict with keys matching dimensions and integer values `n` or a single integer `n` applied over all dimensions. One of indexers or indexers_kwargs must be provided. **indexers_kwargs : {dim: n, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Examples -------- >>> x_arr = np.arange(0, 26) >>> x_arr array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]) >>> x = xr.DataArray( ... np.reshape(x_arr, (2, 13)), ... dims=("x", "y"), ... coords={"x": [0, 1], "y": np.arange(0, 13)}, ... ) >>> x_ds = xr.Dataset({"foo": x}) >>> x_ds Size: 328B Dimensions: (x: 2, y: 13) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12 Data variables: foo (x, y) int64 208B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 25 >>> x_ds.thin(3) Size: 88B Dimensions: (x: 1, y: 5) Coordinates: * x (x) int64 8B 0 * y (y) int64 40B 0 3 6 9 12 Data variables: foo (x, y) int64 40B 0 3 6 9 12 >>> x.thin({"x": 2, "y": 5}) Size: 24B array([[ 0, 5, 10]]) Coordinates: * x (x) int64 8B 0 * y (y) int64 24B 0 5 10 See Also -------- Dataset.head Dataset.tail DataArray.thin """ if ( not indexers_kwargs and not isinstance(indexers, int) and not is_dict_like(indexers) ): raise TypeError("indexers must be either dict-like or a single integer") if isinstance(indexers, int): indexers = dict.fromkeys(self.dims, indexers) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "thin") for k, v in indexers.items(): if not isinstance(v, int): raise TypeError( "expected integer type indexer for " f"dimension {k!r}, found {type(v)!r}" ) elif v < 0: raise ValueError( "expected positive integer as indexer " f"for dimension {k!r}, found {v}" ) elif v == 0: raise ValueError("step cannot be zero") indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()} return self.isel(indexers_slices) def broadcast_like( self, other: T_DataArrayOrSet, exclude: Iterable[Hashable] | None = None, ) -> Self: """Broadcast this DataArray against another Dataset or DataArray. This is equivalent to xr.broadcast(other, self)[1] Parameters ---------- other : Dataset or DataArray Object against which to broadcast this array. exclude : iterable of hashable, optional Dimensions that must not be broadcasted """ if exclude is None: exclude = set() else: exclude = set(exclude) args = align(other, self, join="outer", copy=False, exclude=exclude) dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude) return _broadcast_helper(args[1], exclude, dims_map, common_coords) def _reindex_callback( self, aligner: alignment.Aligner, dim_pos_indexers: dict[Hashable, Any], variables: dict[Hashable, Variable], indexes: dict[Hashable, Index], fill_value: Any, exclude_dims: frozenset[Hashable], exclude_vars: frozenset[Hashable], ) -> Self: """Callback called from ``Aligner`` to create a new reindexed Dataset.""" new_variables = variables.copy() new_indexes = indexes.copy() # re-assign variable metadata for name, new_var in new_variables.items(): var = self._variables.get(name) if var is not None: new_var.attrs = var.attrs new_var.encoding = var.encoding # pass through indexes from excluded dimensions # no extra check needed for multi-coordinate indexes, potential conflicts # should already have been detected when aligning the indexes for name, idx in self._indexes.items(): var = self._variables[name] if set(var.dims) <= exclude_dims: new_indexes[name] = idx new_variables[name] = var if not dim_pos_indexers: # fast path for no reindexing necessary if set(new_indexes) - set(self._indexes): # this only adds new indexes and their coordinate variables reindexed = self._overwrite_indexes(new_indexes, new_variables) else: reindexed = self.copy(deep=aligner.copy) else: to_reindex = { k: v for k, v in self.variables.items() if k not in variables and k not in exclude_vars } reindexed_vars = alignment.reindex_variables( to_reindex, dim_pos_indexers, copy=aligner.copy, fill_value=fill_value, sparse=aligner.sparse, ) new_variables.update(reindexed_vars) new_coord_names = self._coord_names | set(new_indexes) reindexed = self._replace_with_new_dims( new_variables, new_coord_names, indexes=new_indexes ) reindexed.encoding = self.encoding return reindexed def reindex_like( self, other: T_Xarray, method: ReindexMethodOptions = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = xrdtypes.NA, ) -> Self: """ Conform this object onto the indexes of another object, for indexes which the objects share. Missing values are filled with ``fill_value``. The default fill value is NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to pandas.Index objects, which provides coordinates upon which to index the variables in this dataset. The indexes on this other object need not be the same as the indexes on this dataset. Any mismatched index values will be filled in with NaN, and any mismatched dimension names will simply be ignored. method : {None, "nearest", "pad", "ffill", "backfill", "bfill", None}, optional Method to use for filling index values from other not found in this dataset: - None (default): don't fill gaps - "pad" / "ffill": propagate last valid index value forward - "backfill" / "bfill": propagate next valid index value backward - "nearest": use nearest valid index value tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like must be the same size as the index and its dtype must exactly match the indexโ€™s type. copy : bool, default: True If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like maps variable names to fill values. Returns ------- reindexed : Dataset Another dataset, with this dataset's data but coordinates from the other object. See Also -------- Dataset.reindex DataArray.reindex_like align """ return alignment.reindex_like( self, other=other, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def reindex( self, indexers: Mapping[Any, Any] | None = None, method: ReindexMethodOptions = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = xrdtypes.NA, **indexers_kwargs: Any, ) -> Self: """Conform this object onto a new set of indexes, filling in missing values with ``fill_value``. The default fill value is NaN. Parameters ---------- indexers : dict, optional Dictionary with keys given by dimension names and values given by arrays of coordinates tick labels. Any mismatched coordinate values will be filled in with NaN, and any mismatched dimension names will simply be ignored. One of indexers or indexers_kwargs must be provided. method : {None, "nearest", "pad", "ffill", "backfill", "bfill", None}, optional Method to use for filling index values in ``indexers`` not found in this dataset: - None (default): don't fill gaps - "pad" / "ffill": propagate last valid index value forward - "backfill" / "bfill": propagate next valid index value backward - "nearest": use nearest valid index value tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like must be the same size as the index and its dtype must exactly match the indexโ€™s type. copy : bool, default: True If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. sparse : bool, default: False use sparse-array. **indexers_kwargs : {dim: indexer, ...}, optional Keyword arguments in the same form as ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- reindexed : Dataset Another dataset, with this dataset's data but replaced coordinates. See Also -------- Dataset.reindex_like align pandas.Index.get_indexer Examples -------- Create a dataset with some fictional data. >>> x = xr.Dataset( ... { ... "temperature": ("station", 20 * np.random.rand(4)), ... "pressure": ("station", 500 * np.random.rand(4)), ... }, ... coords={"station": ["boston", "nyc", "seattle", "denver"]}, ... ) >>> x Size: 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.indexes Indexes: station Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station') Create a new index and reindex the dataset. By default values in the new index that do not have corresponding records in the dataset are assigned `NaN`. >>> new_index = ["boston", "austin", "seattle", "lincoln"] >>> x.reindex({"station": new_index}) Size: 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.reindex({"station": new_index}, fill_value=0) Size: 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.reindex( ... {"station": new_index}, fill_value={"temperature": 0, "pressure": 100} ... ) Size: 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.reindex({"station": new_index}, method="nearest") Traceback (most recent call last): ... raise ValueError('index must be monotonic increasing or decreasing') ValueError: index must be monotonic increasing or decreasing To further illustrate the filling functionality in reindex, we will create a dataset with a monotonically increasing index (for example, a sequence of dates). >>> x2 = xr.Dataset( ... { ... "temperature": ( ... "time", ... [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12], ... ), ... "pressure": ("time", 500 * np.random.rand(6)), ... }, ... coords={"time": pd.date_range("01/01/2019", periods=6, freq="D")}, ... ) >>> x2 Size: 144B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2019-01-01 2019-01-02 ... 2019-01-06 Data variables: temperature (time) float64 48B 15.57 12.77 nan 0.3081 16.59 15.12 pressure (time) float64 48B 481.8 191.7 395.9 264.4 284.0 462.8 Suppose we decide to expand the dataset to cover a wider date range. >>> time_index2 = pd.date_range("12/29/2018", periods=10, freq="D") >>> x2.reindex({"time": time_index2}) Size: 240B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07 Data variables: temperature (time) float64 80B nan nan nan 15.57 ... 0.3081 16.59 15.12 nan pressure (time) float64 80B nan nan nan 481.8 ... 264.4 284.0 462.8 nan The index entries that did not have a value in the original data frame (for example, `2018-12-29`) are by default filled with NaN. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the `NaN` values, pass `bfill` as an argument to the `method` keyword. >>> x3 = x2.reindex({"time": time_index2}, method="bfill") >>> x3 Size: 240B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07 Data variables: temperature (time) float64 80B 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan pressure (time) float64 80B 481.8 481.8 481.8 481.8 ... 284.0 462.8 nan Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`) will not be filled by any of the value propagation schemes. >>> x2.where(x2.temperature.isnull(), drop=True) Size: 24B Dimensions: (time: 1) Coordinates: * time (time) datetime64[ns] 8B 2019-01-03 Data variables: temperature (time) float64 8B nan pressure (time) float64 8B 395.9 >>> x3.where(x3.temperature.isnull(), drop=True) Size: 48B Dimensions: (time: 2) Coordinates: * time (time) datetime64[ns] 16B 2019-01-03 2019-01-07 Data variables: temperature (time) float64 16B nan nan pressure (time) float64 16B 395.9 nan This is because filling while reindexing does not look at dataset values, but only compares the original and desired indexes. If you do want to fill in the `NaN` values present in the original dataset, use the :py:meth:`~Dataset.fillna()` method. """ indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex") return alignment.reindex( self, indexers=indexers, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def _reindex( self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance: int | float | Iterable[int | float] | None = None, copy: bool = True, fill_value: Any = xrdtypes.NA, sparse: bool = False, **indexers_kwargs: Any, ) -> Self: """ Same as reindex but supports sparse option. """ indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex") return alignment.reindex( self, indexers=indexers, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, sparse=sparse, ) def interp( self, coords: Mapping[Any, Any] | None = None, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, method_non_numeric: str = "nearest", **coords_kwargs: Any, ) -> Self: """ Interpolate a Dataset onto new coordinates. Performs univariate or multivariate interpolation of a Dataset onto new coordinates, utilizing either NumPy or SciPy interpolation routines. Out-of-range values are filled with NaN, unless specified otherwise via `kwargs` to the numpy/scipy interpolant. Parameters ---------- coords : dict, optional Mapping from dimension names to the new coordinates. New coordinate can be a scalar, array-like or DataArray. If DataArrays are passed as new coordinates, their dimensions are used for the broadcasting. Missing values are skipped. method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \ "quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" } Interpolation method to use (see descriptions above). assume_sorted : bool, default: False If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs : dict, optional Additional keyword arguments passed to the interpolator. Valid options and their behavior depend which interpolant is used. method_non_numeric : {"nearest", "pad", "ffill", "backfill", "bfill"}, optional Method for non-numeric types. Passed on to :py:meth:`Dataset.reindex`. ``"nearest"`` is used by default. **coords_kwargs : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. Returns ------- interpolated : Dataset New dataset on the new coordinates. Notes ----- - SciPy is required for certain interpolation methods. - When interpolating along multiple dimensions with methods `linear` and `nearest`, the process attempts to decompose the interpolation into independent interpolations along one dimension at a time. - The specific interpolation method and dimensionality determine which interpolant is used: 1. **Interpolation along one dimension of 1D data (`method='linear'`)** - Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`. 2. **Interpolation along one dimension of N-dimensional data (N โ‰ฅ 1)** - Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"} use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp` (as in the case of `method='linear'` for 1D data). - If `method='polynomial'`, the `order` keyword argument must also be provided. 3. **Special interpolants for interpolation along one dimension of N-dimensional data (N โ‰ฅ 1)** - Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used: - `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator` - `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator` - `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator` - `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator` (`makima` is handled by passing the `makima` flag). 4. **Interpolation along multiple dimensions of multi-dimensional data** - Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear", "cubic", "quintic", "pchip"}. See Also -------- :mod:`scipy.interpolate` :doc:`xarray-tutorial:fundamentals/02.2_manipulating_dimensions` Tutorial material on manipulating data resolution using :py:func:`~xarray.Dataset.interp` Examples -------- >>> ds = xr.Dataset( ... data_vars={ ... "a": ("x", [5, 7, 4]), ... "b": ( ... ("x", "y"), ... [[1, 4, 2, 9], [2, 7, 6, np.nan], [6, np.nan, 5, 8]], ... ), ... }, ... coords={"x": [0, 1, 2], "y": [10, 12, 14, 16]}, ... ) >>> ds Size: 176B Dimensions: (x: 3, y: 4) Coordinates: * x (x) int64 24B 0 1 2 * y (y) int64 32B 10 12 14 16 Data variables: a (x) int64 24B 5 7 4 b (x, y) float64 96B 1.0 4.0 2.0 9.0 2.0 7.0 6.0 nan 6.0 nan 5.0 8.0 1D interpolation with the default method (linear): >>> ds.interp(x=[0, 0.75, 1.25, 1.75]) Size: 224B Dimensions: (x: 4, y: 4) Coordinates: * x (x) float64 32B 0.0 0.75 1.25 1.75 * y (y) int64 32B 10 12 14 16 Data variables: a (x) float64 32B 5.0 6.5 6.25 4.75 b (x, y) float64 128B 1.0 4.0 2.0 nan 1.75 ... nan 5.0 nan 5.25 nan 1D interpolation with a different method: >>> ds.interp(x=[0, 0.75, 1.25, 1.75], method="nearest") Size: 224B Dimensions: (x: 4, y: 4) Coordinates: * x (x) float64 32B 0.0 0.75 1.25 1.75 * y (y) int64 32B 10 12 14 16 Data variables: a (x) float64 32B 5.0 7.0 7.0 4.0 b (x, y) float64 128B 1.0 4.0 2.0 9.0 2.0 7.0 ... nan 6.0 nan 5.0 8.0 1D extrapolation: >>> ds.interp( ... x=[1, 1.5, 2.5, 3.5], ... method="linear", ... kwargs={"fill_value": "extrapolate"}, ... ) Size: 224B Dimensions: (x: 4, y: 4) Coordinates: * x (x) float64 32B 1.0 1.5 2.5 3.5 * y (y) int64 32B 10 12 14 16 Data variables: a (x) float64 32B 7.0 5.5 2.5 -0.5 b (x, y) float64 128B 2.0 7.0 6.0 nan 4.0 ... nan 12.0 nan 3.5 nan 2D interpolation: >>> ds.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method="linear") Size: 184B Dimensions: (x: 4, y: 3) Coordinates: * x (x) float64 32B 0.0 0.75 1.25 1.75 * y (y) int64 24B 11 13 15 Data variables: a (x) float64 32B 5.0 6.5 6.25 4.75 b (x, y) float64 96B 2.5 3.0 nan 4.0 5.625 ... nan nan nan nan nan """ from xarray.core import missing if kwargs is None: kwargs = {} coords = either_dict_or_kwargs(coords, coords_kwargs, "interp") indexers = dict(self._validate_interp_indexers(coords)) obj = self if assume_sorted else self.sortby(list(coords)) def maybe_variable(obj, k): # workaround to get variable for dimension without coordinate. try: return obj._variables[k] except KeyError: return as_variable((k, range(obj.sizes[k]))) def _validate_interp_indexer(x, new_x): # In the case of datetimes, the restrictions placed on indexers # used with interp are stronger than those which are placed on # isel, so we need an additional check after _validate_indexers. if _contains_datetime_like_objects( x ) and not _contains_datetime_like_objects(new_x): raise TypeError( "When interpolating over a datetime-like " "coordinate, the coordinates to " "interpolate to must be either datetime " "strings or datetimes. " f"Instead got\n{new_x}" ) return x, new_x validated_indexers = { k: _validate_interp_indexer(maybe_variable(obj, k), v) for k, v in indexers.items() } # optimization: subset to coordinate range of the target index if method in ["linear", "nearest"]: for k, v in validated_indexers.items(): obj, newidx = missing._localize(obj, {k: v}) validated_indexers[k] = newidx[k] has_chunked_array = bool( any(is_chunked_array(v._data) for v in obj._variables.values()) ) if has_chunked_array: # optimization: create dask coordinate arrays once per Dataset # rather than once per Variable when dask.array.unify_chunks is called later # GH4739 dask_indexers = { k: (index.to_base_variable().chunk(), dest.to_base_variable().chunk()) for k, (index, dest) in validated_indexers.items() } variables: dict[Hashable, Variable] = {} reindex_vars: list[Hashable] = [] for name, var in obj._variables.items(): if name in indexers: continue use_indexers = ( dask_indexers if is_duck_dask_array(var._data) else validated_indexers ) dtype_kind = var.dtype.kind if dtype_kind in "uifc": # For normal number types do the interpolation: var_indexers = {k: v for k, v in use_indexers.items() if k in var.dims} variables[name] = missing.interp(var, var_indexers, method, **kwargs) elif dtype_kind in "ObU" and (use_indexers.keys() & var.dims): if all(var.sizes[d] == 1 for d in (use_indexers.keys() & var.dims)): # Broadcastable, can be handled quickly without reindex: to_broadcast = (var.squeeze(),) + tuple( dest for _, dest in use_indexers.values() ) variables[name] = broadcast_variables(*to_broadcast)[0].copy( deep=True ) else: # For types that we do not understand do stepwise # interpolation to avoid modifying the elements. # reindex the variable instead because it supports # booleans and objects and retains the dtype but inside # this loop there might be some duplicate code that slows it # down, therefore collect these signals and run it later: reindex_vars.append(name) elif all(d not in indexers for d in var.dims): # For anything else we can only keep variables if they # are not dependent on any coords that are being # interpolated along: variables[name] = var if reindex_vars and ( reindex_indexers := { k: v for k, (_, v) in validated_indexers.items() if v.dims == (k,) } ): reindexed = alignment.reindex( obj[reindex_vars], indexers=reindex_indexers, method=method_non_numeric, exclude_vars=variables.keys(), ) indexes = dict(reindexed._indexes) variables.update(reindexed.variables) else: # Get the indexes that are not being interpolated along indexes = {k: v for k, v in obj._indexes.items() if k not in indexers} # Get the coords that also exist in the variables: coord_names = obj._coord_names & variables.keys() selected = self._replace_with_new_dims( variables.copy(), coord_names, indexes=indexes ) # Attach indexer as coordinate for k, v in indexers.items(): assert isinstance(v, Variable) if v.dims == (k,): index = PandasIndex(v, k, coord_dtype=v.dtype) index_vars = index.create_variables({k: v}) indexes[k] = index variables.update(index_vars) else: variables[k] = v # Extract coordinates from indexers coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(coords) variables.update(coord_vars) indexes.update(new_indexes) coord_names = obj._coord_names & variables.keys() | coord_vars.keys() return self._replace_with_new_dims(variables, coord_names, indexes=indexes) def interp_like( self, other: T_Xarray, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, method_non_numeric: str = "nearest", ) -> Self: """Interpolate this object onto the coordinates of another object. Performs univariate or multivariate interpolation of a Dataset onto new coordinates, utilizing either NumPy or SciPy interpolation routines. Out-of-range values are filled with NaN, unless specified otherwise via `kwargs` to the numpy/scipy interpolant. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. Missing values are skipped. method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \ "quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" } Interpolation method to use (see descriptions above). assume_sorted : bool, default: False If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs : dict, optional Additional keyword arguments passed to the interpolator. Valid options and their behavior depend which interpolant is use method_non_numeric : {"nearest", "pad", "ffill", "backfill", "bfill"}, optional Method for non-numeric types. Passed on to :py:meth:`Dataset.reindex`. ``"nearest"`` is used by default. Returns ------- interpolated : Dataset Another dataset by interpolating this dataset's data along the coordinates of the other object. Notes ----- - scipy is required. - If the dataset has object-type coordinates, reindex is used for these coordinates instead of the interpolation. - When interpolating along multiple dimensions with methods `linear` and `nearest`, the process attempts to decompose the interpolation into independent interpolations along one dimension at a time. - The specific interpolation method and dimensionality determine which interpolant is used: 1. **Interpolation along one dimension of 1D data (`method='linear'`)** - Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`. 2. **Interpolation along one dimension of N-dimensional data (N โ‰ฅ 1)** - Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"} use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp` (as in the case of `method='linear'` for 1D data). - If `method='polynomial'`, the `order` keyword argument must also be provided. 3. **Special interpolants for interpolation along one dimension of N-dimensional data (N โ‰ฅ 1)** - Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used: - `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator` - `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator` - `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator` - `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator` (`makima` is handled by passing the `makima` flag). 4. **Interpolation along multiple dimensions of multi-dimensional data** - Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear", "cubic", "quintic", "pchip"}. See Also -------- :func:`Dataset.interp` :func:`Dataset.reindex_like` :mod:`scipy.interpolate` """ if kwargs is None: kwargs = {} # pick only dimension coordinates with a single index coords: dict[Hashable, Variable] = {} other_indexes = other.xindexes for dim in self.dims: other_dim_coords = other_indexes.get_all_coords(dim, errors="ignore") if len(other_dim_coords) == 1: coords[dim] = other_dim_coords[dim] numeric_coords: dict[Hashable, Variable] = {} object_coords: dict[Hashable, Variable] = {} for k, v in coords.items(): if v.dtype.kind in "uifcMm": numeric_coords[k] = v else: object_coords[k] = v ds = self if object_coords: # We do not support interpolation along object coordinate. # reindex instead. ds = self.reindex(object_coords) return ds.interp( coords=numeric_coords, method=method, assume_sorted=assume_sorted, kwargs=kwargs, method_non_numeric=method_non_numeric, ) # Helper methods for rename() def _rename_vars( self, name_dict, dims_dict ) -> tuple[dict[Hashable, Variable], set[Hashable]]: variables = {} coord_names = set() for k, v in self.variables.items(): var = v.copy(deep=False) var.dims = tuple(dims_dict.get(dim, dim) for dim in v.dims) name = name_dict.get(k, k) if name in variables: raise ValueError(f"the new name {name!r} conflicts") variables[name] = var if k in self._coord_names: coord_names.add(name) return variables, coord_names def _rename_dims(self, name_dict: Mapping[Any, Hashable]) -> dict[Hashable, int]: return {name_dict.get(k, k): v for k, v in self.sizes.items()} def _rename_indexes( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable] ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: if not self._indexes: return {}, {} indexes = {} variables = {} for index, coord_names in self.xindexes.group_by_index(): new_index = index.rename(name_dict, dims_dict) new_coord_names = [name_dict.get(k, k) for k in coord_names] indexes.update(dict.fromkeys(new_coord_names, new_index)) new_index_vars = new_index.create_variables( { new: self._variables[old] for old, new in zip(coord_names, new_coord_names, strict=True) } ) variables.update(new_index_vars) return indexes, variables def _rename_all( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable] ) -> tuple[ dict[Hashable, Variable], set[Hashable], dict[Hashable, int], dict[Hashable, Index], ]: variables, coord_names = self._rename_vars(name_dict, dims_dict) dims = self._rename_dims(dims_dict) indexes, index_vars = self._rename_indexes(name_dict, dims_dict) variables = {k: index_vars.get(k, v) for k, v in variables.items()} return variables, coord_names, dims, indexes def _rename( self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Self: """Also used internally by DataArray so that the warning (if any) is raised at the right stack level. """ name_dict = either_dict_or_kwargs(name_dict, names, "rename") for k in name_dict.keys(): if k not in self and k not in self.dims: raise ValueError( f"cannot rename {k!r} because it is not a " "variable or dimension in this dataset" ) create_dim_coord = False new_k = name_dict[k] if k == new_k: continue # Same name, nothing to do if k in self.dims and new_k in self._coord_names: coord_dims = self._variables[name_dict[k]].dims if coord_dims == (k,): create_dim_coord = True elif k in self._coord_names and new_k in self.dims: coord_dims = self._variables[k].dims if coord_dims == (new_k,): create_dim_coord = True if create_dim_coord: warnings.warn( f"rename {k!r} to {name_dict[k]!r} does not create an index " "anymore. Try using swap_dims instead or use set_index " "after rename to create an indexed coordinate.", UserWarning, stacklevel=3, ) variables, coord_names, dims, indexes = self._rename_all( name_dict=name_dict, dims_dict=name_dict ) return self._replace(variables, coord_names, dims=dims, indexes=indexes) def rename( self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Self: """Returns a new object with renamed variables, coordinates and dimensions. Parameters ---------- name_dict : dict-like, optional Dictionary whose keys are current variable, coordinate or dimension names and whose values are the desired names. **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. Returns ------- renamed : Dataset Dataset with renamed variables, coordinates and dimensions. See Also -------- Dataset.swap_dims Dataset.rename_vars Dataset.rename_dims DataArray.rename """ return self._rename(name_dict=name_dict, **names) def rename_dims( self, dims_dict: Mapping[Any, Hashable] | None = None, **dims: Hashable, ) -> Self: """Returns a new object with renamed dimensions only. Parameters ---------- dims_dict : dict-like, optional Dictionary whose keys are current dimension names and whose values are the desired names. The desired names must not be the name of an existing dimension or Variable in the Dataset. **dims : optional Keyword form of ``dims_dict``. One of dims_dict or dims must be provided. Returns ------- renamed : Dataset Dataset with renamed dimensions. See Also -------- Dataset.swap_dims Dataset.rename Dataset.rename_vars DataArray.rename """ dims_dict = either_dict_or_kwargs(dims_dict, dims, "rename_dims") for k, v in dims_dict.items(): if k not in self.dims: raise ValueError( f"cannot rename {k!r} because it is not found " f"in the dimensions of this dataset {tuple(self.dims)}" ) if v in self.dims or v in self: raise ValueError( f"Cannot rename {k} to {v} because {v} already exists. " "Try using swap_dims instead." ) variables, coord_names, sizes, indexes = self._rename_all( name_dict={}, dims_dict=dims_dict ) return self._replace(variables, coord_names, dims=sizes, indexes=indexes) def rename_vars( self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Self: """Returns a new object with renamed variables including coordinates Parameters ---------- name_dict : dict-like, optional Dictionary whose keys are current variable or coordinate names and whose values are the desired names. **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. Returns ------- renamed : Dataset Dataset with renamed variables including coordinates See Also -------- Dataset.swap_dims Dataset.rename Dataset.rename_dims DataArray.rename """ name_dict = either_dict_or_kwargs(name_dict, names, "rename_vars") for k in name_dict: if k not in self: raise ValueError( f"cannot rename {k!r} because it is not a " "variable or coordinate in this dataset" ) variables, coord_names, dims, indexes = self._rename_all( name_dict=name_dict, dims_dict={} ) return self._replace(variables, coord_names, dims=dims, indexes=indexes) def swap_dims( self, dims_dict: Mapping[Any, Hashable] | None = None, **dims_kwargs ) -> Self: """Returns a new object with swapped dimensions. Parameters ---------- dims_dict : dict-like Dictionary whose keys are current dimension names and whose values are new names. **dims_kwargs : {existing_dim: new_dim, ...}, optional The keyword arguments form of ``dims_dict``. One of dims_dict or dims_kwargs must be provided. Returns ------- swapped : Dataset Dataset with swapped dimensions. Examples -------- >>> ds = xr.Dataset( ... data_vars={"a": ("x", [5, 7]), "b": ("x", [0.1, 2.4])}, ... coords={"x": ["a", "b"], "y": ("x", [0, 1])}, ... ) >>> ds Size: 56B Dimensions: (x: 2) Coordinates: * x (x) >> ds.swap_dims({"x": "y"}) Size: 56B Dimensions: (y: 2) Coordinates: * y (y) int64 16B 0 1 x (y) >> ds.swap_dims({"x": "z"}) Size: 56B Dimensions: (z: 2) Coordinates: x (z) Self: """Return a new object with an additional axis (or axes) inserted at the corresponding position in the array shape. The new object is a view into the underlying array, not a copy. If dim is already a scalar coordinate, it will be promoted to a 1D coordinate consisting of a single value. The automatic creation of indexes to back new 1D coordinate variables controlled by the create_index_for_new_dim kwarg. Parameters ---------- dim : hashable, sequence of hashable, mapping, or None Dimensions to include on the new variable. If provided as hashable or sequence of hashable, then dimensions are inserted with length 1. If provided as a mapping, then the keys are the new dimensions and the values are either integers (giving the length of the new dimensions) or array-like (giving the coordinates of the new dimensions). axis : int, sequence of int, or None, default: None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a sequence of integers is passed, multiple axes are inserted. In this case, dim arguments should be same length list. If axis=None is passed, all the axes will be inserted to the start of the result array. create_index_for_new_dim : bool, default: True Whether to create new ``PandasIndex`` objects when the object being expanded contains scalar variables with names in ``dim``. **dim_kwargs : int or sequence or ndarray The keywords are arbitrary dimensions being inserted and the values are either the lengths of the new dims (if int is given), or their coordinates. Note, this is an alternative to passing a dict to the dim kwarg and will only be used if dim is None. Returns ------- expanded : Dataset This object, but with additional dimension(s). Examples -------- >>> dataset = xr.Dataset({"temperature": ([], 25.0)}) >>> dataset Size: 8B Dimensions: () Data variables: temperature float64 8B 25.0 # Expand the dataset with a new dimension called "time" >>> dataset.expand_dims(dim="time") Size: 8B Dimensions: (time: 1) Dimensions without coordinates: time Data variables: temperature (time) float64 8B 25.0 # 1D data >>> temperature_1d = xr.DataArray([25.0, 26.5, 24.8], dims="x") >>> dataset_1d = xr.Dataset({"temperature": temperature_1d}) >>> dataset_1d Size: 24B Dimensions: (x: 3) Dimensions without coordinates: x Data variables: temperature (x) float64 24B 25.0 26.5 24.8 # Expand the dataset with a new dimension called "time" using axis argument >>> dataset_1d.expand_dims(dim="time", axis=0) Size: 24B Dimensions: (time: 1, x: 3) Dimensions without coordinates: time, x Data variables: temperature (time, x) float64 24B 25.0 26.5 24.8 # 2D data >>> temperature_2d = xr.DataArray(np.random.rand(3, 4), dims=("y", "x")) >>> dataset_2d = xr.Dataset({"temperature": temperature_2d}) >>> dataset_2d Size: 96B Dimensions: (y: 3, x: 4) Dimensions without coordinates: y, x Data variables: temperature (y, x) float64 96B 0.5488 0.7152 0.6028 ... 0.7917 0.5289 # Expand the dataset with a new dimension called "time" using axis argument >>> dataset_2d.expand_dims(dim="time", axis=2) Size: 96B Dimensions: (y: 3, x: 4, time: 1) Dimensions without coordinates: y, x, time Data variables: temperature (y, x, time) float64 96B 0.5488 0.7152 0.6028 ... 0.7917 0.5289 # Expand a scalar variable along a new dimension of the same name with and without creating a new index >>> ds = xr.Dataset(coords={"x": 0}) >>> ds Size: 8B Dimensions: () Coordinates: x int64 8B 0 Data variables: *empty* >>> ds.expand_dims("x") Size: 8B Dimensions: (x: 1) Coordinates: * x (x) int64 8B 0 Data variables: *empty* >>> ds.expand_dims("x").indexes Indexes: x Index([0], dtype='int64', name='x') >>> ds.expand_dims("x", create_index_for_new_dim=False).indexes Indexes: *empty* See Also -------- DataArray.expand_dims """ if dim is None: pass elif isinstance(dim, Mapping): # We're later going to modify dim in place; don't tamper with # the input dim = dict(dim) elif isinstance(dim, int): raise TypeError( "dim should be hashable or sequence of hashables or mapping" ) elif isinstance(dim, str) or not isinstance(dim, Sequence): dim = {dim: 1} elif isinstance(dim, Sequence): if len(dim) != len(set(dim)): raise ValueError("dims should not contain duplicate values.") dim = dict.fromkeys(dim, 1) dim = either_dict_or_kwargs(dim, dim_kwargs, "expand_dims") assert isinstance(dim, MutableMapping) if axis is None: axis = list(range(len(dim))) elif not isinstance(axis, Sequence): axis = [axis] if len(dim) != len(axis): raise ValueError("lengths of dim and axis should be identical.") for d in dim: if d in self.dims: raise ValueError(f"Dimension {d} already exists.") if d in self._variables and not utils.is_scalar(self._variables[d]): raise ValueError(f"{d} already exists as coordinate or variable name.") variables: dict[Hashable, Variable] = {} indexes: dict[Hashable, Index] = dict(self._indexes) coord_names = self._coord_names.copy() # If dim is a dict, then ensure that the values are either integers # or iterables. for k, v in dim.items(): if hasattr(v, "__iter__"): # If the value for the new dimension is an iterable, then # save the coordinates to the variables dict, and set the # value within the dim dict to the length of the iterable # for later use. if create_index_for_new_dim: index = PandasIndex(v, k) indexes[k] = index name_and_new_1d_var = index.create_variables() else: name_and_new_1d_var = {k: Variable(data=v, dims=k)} variables.update(name_and_new_1d_var) coord_names.add(k) dim[k] = variables[k].size elif isinstance(v, int): pass # Do nothing if the dimensions value is just an int else: raise TypeError( f"The value of new dimension {k} must be an iterable or an int" ) for k, v in self._variables.items(): if k not in dim: if k in coord_names: # Do not change coordinates variables[k] = v else: result_ndim = len(v.dims) + len(axis) for a in axis: if a < -result_ndim or result_ndim - 1 < a: raise IndexError( f"Axis {a} of variable {k} is out of bounds of the " f"expanded dimension size {result_ndim}" ) axis_pos = [a if a >= 0 else result_ndim + a for a in axis] if len(axis_pos) != len(set(axis_pos)): raise ValueError("axis should not contain duplicate values") # We need to sort them to make sure `axis` equals to the # axis positions of the result array. zip_axis_dim = sorted(zip(axis_pos, dim.items(), strict=True)) all_dims = list(zip(v.dims, v.shape, strict=True)) for d, c in zip_axis_dim: all_dims.insert(d, c) variables[k] = v.set_dims(dict(all_dims)) elif k not in variables: if k in coord_names and create_index_for_new_dim: # If dims includes a label of a non-dimension coordinate, # it will be promoted to a 1D coordinate with a single value. index, index_vars = create_default_index_implicit(v.set_dims(k)) indexes[k] = index variables.update(index_vars) else: if create_index_for_new_dim: warnings.warn( f"No index created for dimension {k} because variable {k} is not a coordinate. " f"To create an index for {k}, please first call `.set_coords('{k}')` on this object.", UserWarning, stacklevel=2, ) # create 1D variable without creating a new index new_1d_var = v.set_dims(k) variables.update({k: new_1d_var}) return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def set_index( self, indexes: Mapping[Any, Hashable | Sequence[Hashable]] | None = None, append: bool = False, **indexes_kwargs: Hashable | Sequence[Hashable], ) -> Self: """Set Dataset (multi-)indexes using one or more existing coordinates or variables. This legacy method is limited to pandas (multi-)indexes and 1-dimensional "dimension" coordinates. See :py:meth:`~Dataset.set_xindex` for setting a pandas or a custom Xarray-compatible index from one or more arbitrary coordinates. Parameters ---------- indexes : {dim: index, ...} Mapping from names matching dimensions and values given by (lists of) the names of existing coordinates or variables to set as new (multi-)index. append : bool, default: False If True, append the supplied index(es) to the existing index(es). Otherwise replace the existing index(es) (default). **indexes_kwargs : optional The keyword arguments form of ``indexes``. One of indexes or indexes_kwargs must be provided. Returns ------- obj : Dataset Another dataset, with this dataset's data but replaced coordinates. Examples -------- >>> arr = xr.DataArray( ... data=np.ones((2, 3)), ... dims=["x", "y"], ... coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])}, ... ) >>> ds = xr.Dataset({"v": arr}) >>> ds Size: 104B Dimensions: (x: 2, y: 3) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 24B 0 1 2 a (x) int64 16B 3 4 Data variables: v (x, y) float64 48B 1.0 1.0 1.0 1.0 1.0 1.0 >>> ds.set_index(x="a") Size: 88B Dimensions: (x: 2, y: 3) Coordinates: * x (x) int64 16B 3 4 * y (y) int64 24B 0 1 2 Data variables: v (x, y) float64 48B 1.0 1.0 1.0 1.0 1.0 1.0 See Also -------- Dataset.reset_index Dataset.set_xindex Dataset.swap_dims """ dim_coords = either_dict_or_kwargs(indexes, indexes_kwargs, "set_index") new_indexes: dict[Hashable, Index] = {} new_variables: dict[Hashable, Variable] = {} drop_indexes: set[Hashable] = set() drop_variables: set[Hashable] = set() replace_dims: dict[Hashable, Hashable] = {} all_var_names: set[Hashable] = set() for dim, _var_names in dim_coords.items(): if isinstance(_var_names, str) or not isinstance(_var_names, Sequence): var_names = [_var_names] else: var_names = list(_var_names) invalid_vars = set(var_names) - set(self._variables) if invalid_vars: raise ValueError( ", ".join([str(v) for v in invalid_vars]) + " variable(s) do not exist" ) all_var_names.update(var_names) drop_variables.update(var_names) # drop any pre-existing index involved and its corresponding coordinates index_coord_names = self.xindexes.get_all_coords(dim, errors="ignore") all_index_coord_names = set(index_coord_names) for k in var_names: all_index_coord_names.update( self.xindexes.get_all_coords(k, errors="ignore") ) drop_indexes.update(all_index_coord_names) drop_variables.update(all_index_coord_names) if len(var_names) == 1 and (not append or dim not in self._indexes): var_name = var_names[0] var = self._variables[var_name] # an error with a better message will be raised for scalar variables # when creating the PandasIndex if var.ndim > 0 and var.dims != (dim,): raise ValueError( f"dimension mismatch: try setting an index for dimension {dim!r} with " f"variable {var_name!r} that has dimensions {var.dims}" ) idx = PandasIndex.from_variables({dim: var}, options={}) idx_vars = idx.create_variables({var_name: var}) # trick to preserve coordinate order in this case if dim in self._coord_names: drop_variables.remove(dim) else: if append: current_variables = { k: self._variables[k] for k in index_coord_names } else: current_variables = {} idx, idx_vars = PandasMultiIndex.from_variables_maybe_expand( dim, current_variables, {k: self._variables[k] for k in var_names}, ) for n in idx.index.names: replace_dims[n] = dim new_indexes.update(dict.fromkeys(idx_vars, idx)) new_variables.update(idx_vars) # re-add deindexed coordinates (convert to base variables) for k in drop_variables: if ( k not in new_variables and k not in all_var_names and k in self._coord_names ): new_variables[k] = self._variables[k].to_base_variable() indexes_: dict[Any, Index] = { k: v for k, v in self._indexes.items() if k not in drop_indexes } indexes_.update(new_indexes) variables = { k: v for k, v in self._variables.items() if k not in drop_variables } variables.update(new_variables) # update dimensions if necessary, GH: 3512 for k, v in variables.items(): if any(d in replace_dims for d in v.dims): new_dims = [replace_dims.get(d, d) for d in v.dims] variables[k] = v._replace(dims=new_dims) coord_names = self._coord_names - drop_variables | set(new_variables) return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes_ ) def reset_index( self, dims_or_levels: Hashable | Sequence[Hashable], *, drop: bool = False, ) -> Self: """Reset the specified index(es) or multi-index level(s). This legacy method is specific to pandas (multi-)indexes and 1-dimensional "dimension" coordinates. See the more generic :py:meth:`~Dataset.drop_indexes` and :py:meth:`~Dataset.set_xindex` method to respectively drop and set pandas or custom indexes for arbitrary coordinates. Parameters ---------- dims_or_levels : Hashable or Sequence of Hashable Name(s) of the dimension(s) and/or multi-index level(s) that will be reset. drop : bool, default: False If True, remove the specified indexes and/or multi-index levels instead of extracting them as new coordinates (default: False). Returns ------- obj : Dataset Another dataset, with this dataset's data but replaced coordinates. See Also -------- Dataset.set_index Dataset.set_xindex Dataset.drop_indexes """ if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence): dims_or_levels = [dims_or_levels] invalid_coords = set(dims_or_levels) - set(self._indexes) if invalid_coords: raise ValueError( f"{tuple(invalid_coords)} are not coordinates with an index" ) drop_indexes: set[Hashable] = set() drop_variables: set[Hashable] = set() seen: set[Index] = set() new_indexes: dict[Hashable, Index] = {} new_variables: dict[Hashable, Variable] = {} def drop_or_convert(var_names): if drop: drop_variables.update(var_names) else: base_vars = { k: self._variables[k].to_base_variable() for k in var_names } new_variables.update(base_vars) for name in dims_or_levels: index = self._indexes[name] if index in seen: continue seen.add(index) idx_var_names = set(self.xindexes.get_all_coords(name)) drop_indexes.update(idx_var_names) if isinstance(index, PandasMultiIndex): # special case for pd.MultiIndex level_names = index.index.names keep_level_vars = { k: self._variables[k] for k in level_names if k not in dims_or_levels } if index.dim not in dims_or_levels and keep_level_vars: # do not drop the multi-index completely # instead replace it by a new (multi-)index with dropped level(s) idx = index.keep_levels(keep_level_vars) idx_vars = idx.create_variables(keep_level_vars) new_indexes.update(dict.fromkeys(idx_vars, idx)) new_variables.update(idx_vars) if not isinstance(idx, PandasMultiIndex): # multi-index reduced to single index # backward compatibility: unique level coordinate renamed to dimension drop_variables.update(keep_level_vars) drop_or_convert( [k for k in level_names if k not in keep_level_vars] ) else: # always drop the multi-index dimension variable drop_variables.add(index.dim) drop_or_convert(level_names) else: drop_or_convert(idx_var_names) indexes = {k: v for k, v in self._indexes.items() if k not in drop_indexes} indexes.update(new_indexes) variables = { k: v for k, v in self._variables.items() if k not in drop_variables } variables.update(new_variables) coord_names = self._coord_names - drop_variables return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def set_xindex( self, coord_names: str | Sequence[Hashable], index_cls: type[Index] | None = None, **options, ) -> Self: """Set a new, Xarray-compatible index from one or more existing coordinate(s). Parameters ---------- coord_names : str or list Name(s) of the coordinate(s) used to build the index. If several names are given, their order matters. index_cls : subclass of :class:`~xarray.indexes.Index`, optional The type of index to create. By default, try setting a ``PandasIndex`` if ``len(coord_names) == 1``, otherwise a ``PandasMultiIndex``. **options Options passed to the index constructor. Returns ------- obj : Dataset Another dataset, with this dataset's data and with a new index. """ # the Sequence check is required for mypy if is_scalar(coord_names) or not isinstance(coord_names, Sequence): coord_names = [coord_names] if index_cls is None: if len(coord_names) == 1: index_cls = PandasIndex else: index_cls = PandasMultiIndex elif not issubclass(index_cls, Index): raise TypeError(f"{index_cls} is not a subclass of xarray.Index") invalid_coords = set(coord_names) - self._coord_names if invalid_coords: msg = ["invalid coordinate(s)"] no_vars = invalid_coords - set(self._variables) data_vars = invalid_coords - no_vars if no_vars: msg.append(f"those variables don't exist: {no_vars}") if data_vars: msg.append( f"those variables are data variables: {data_vars}, use `set_coords` first" ) raise ValueError("\n".join(msg)) # we could be more clever here (e.g., drop-in index replacement if index # coordinates do not conflict), but let's not allow this for now indexed_coords = set(coord_names) & set(self._indexes) if indexed_coords: raise ValueError( f"those coordinates already have an index: {indexed_coords}" ) coord_vars = {name: self._variables[name] for name in coord_names} index = index_cls.from_variables(coord_vars, options=options) new_coord_vars = index.create_variables(coord_vars) # special case for setting a pandas multi-index from level coordinates # TODO: remove it once we depreciate pandas multi-index dimension (tuple # elements) coordinate if isinstance(index, PandasMultiIndex): coord_names = [index.dim] + list(coord_names) # Check for extra variables that don't match the coordinate names extra_vars = set(new_coord_vars) - set(coord_names) if extra_vars: extra_vars_str = ", ".join(f"'{name}'" for name in extra_vars) coord_names_str = ", ".join(f"'{name}'" for name in coord_names) raise ValueError( f"The index created extra variables {extra_vars_str} that are not " f"in the list of coordinates {coord_names_str}. " f"Use a factory method pattern instead:\n" f" index = {index_cls.__name__}.from_variables(ds, {list(coord_names)!r})\n" f" coords = xr.Coordinates.from_xindex(index)\n" f" ds = ds.assign_coords(coords)" ) variables: dict[Hashable, Variable] indexes: dict[Hashable, Index] if len(coord_names) == 1: variables = self._variables.copy() indexes = self._indexes.copy() name = list(coord_names).pop() if name in new_coord_vars: variables[name] = new_coord_vars[name] indexes[name] = index else: # reorder variables and indexes so that coordinates having the same # index are next to each other variables = {} for name, var in self._variables.items(): if name not in coord_names: variables[name] = var indexes = {} for name, idx in self._indexes.items(): if name not in coord_names: indexes[name] = idx for name in coord_names: try: variables[name] = new_coord_vars[name] except KeyError: variables[name] = self._variables[name] indexes[name] = index return self._replace( variables=variables, coord_names=self._coord_names | set(coord_names), indexes=indexes, ) def reorder_levels( self, dim_order: Mapping[Any, Sequence[int | Hashable]] | None = None, **dim_order_kwargs: Sequence[int | Hashable], ) -> Self: """Rearrange index levels using input order. Parameters ---------- dim_order : dict-like of Hashable to Sequence of int or Hashable, optional Mapping from names matching dimensions and values given by lists representing new level orders. Every given dimension must have a multi-index. **dim_order_kwargs : Sequence of int or Hashable, optional The keyword arguments form of ``dim_order``. One of dim_order or dim_order_kwargs must be provided. Returns ------- obj : Dataset Another dataset, with this dataset's data but replaced coordinates. """ dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, "reorder_levels") variables = self._variables.copy() indexes = dict(self._indexes) new_indexes: dict[Hashable, Index] = {} new_variables: dict[Hashable, IndexVariable] = {} for dim, order in dim_order.items(): index = self._indexes[dim] if not isinstance(index, PandasMultiIndex): raise ValueError(f"coordinate {dim} has no MultiIndex") level_vars = {k: self._variables[k] for k in order} idx = index.reorder_levels(level_vars) idx_vars = idx.create_variables(level_vars) new_indexes.update(dict.fromkeys(idx_vars, idx)) new_variables.update(idx_vars) indexes = {k: v for k, v in self._indexes.items() if k not in new_indexes} indexes.update(new_indexes) variables = {k: v for k, v in self._variables.items() if k not in new_variables} variables.update(new_variables) return self._replace(variables, indexes=indexes) def _get_stack_index( self, dim, multi=False, create_index=False, ) -> tuple[Index | None, dict[Hashable, Variable]]: """Used by stack and unstack to get one pandas (multi-)index among the indexed coordinates along dimension `dim`. If exactly one index is found, return it with its corresponding coordinate variables(s), otherwise return None and an empty dict. If `create_index=True`, create a new index if none is found or raise an error if multiple indexes are found. """ stack_index: Index | None = None stack_coords: dict[Hashable, Variable] = {} for name, index in self._indexes.items(): var = self._variables[name] if ( var.ndim == 1 and var.dims[0] == dim and ( # stack: must be a single coordinate index (not multi and not self.xindexes.is_multi(name)) # unstack: must be an index that implements .unstack or (multi and type(index).unstack is not Index.unstack) ) ): if stack_index is not None and index is not stack_index: # more than one index found, stop if create_index: raise ValueError( f"cannot stack dimension {dim!r} with `create_index=True` " "and with more than one index found along that dimension" ) return None, {} stack_index = index stack_coords[name] = var if create_index and stack_index is None: if dim in self._variables: var = self._variables[dim] else: _, _, var = _get_virtual_variable(self._variables, dim, self.sizes) # dummy index (only `stack_coords` will be used to construct the multi-index) stack_index = PandasIndex([0], dim) stack_coords = {dim: var} return stack_index, stack_coords def _stack_once( self, dims: Sequence[Hashable | EllipsisType], new_dim: Hashable, index_cls: type[Index], create_index: bool | None = True, ) -> Self: if dims == ...: raise ValueError("Please use [...] for dims, rather than just ...") if ... in dims: dims = list(infix_dims(dims, self.dims)) new_variables: dict[Hashable, Variable] = {} stacked_var_names: list[Hashable] = [] drop_indexes: list[Hashable] = [] for name, var in self.variables.items(): if any(d in var.dims for d in dims): add_dims = [d for d in dims if d not in var.dims] vdims = list(var.dims) + add_dims shape = [self.sizes[d] for d in vdims] exp_var = var.set_dims(vdims, shape) stacked_var = exp_var.stack(**{new_dim: dims}) new_variables[name] = stacked_var stacked_var_names.append(name) else: new_variables[name] = var.copy(deep=False) # drop indexes of stacked coordinates (if any) for name in stacked_var_names: drop_indexes += list(self.xindexes.get_all_coords(name, errors="ignore")) new_indexes = {} new_coord_names = set(self._coord_names) if create_index or create_index is None: product_vars: dict[Any, Variable] = {} for dim in dims: idx, idx_vars = self._get_stack_index(dim, create_index=create_index) if idx is not None: product_vars.update(idx_vars) if len(product_vars) == len(dims): idx = index_cls.stack(product_vars, new_dim) new_indexes[new_dim] = idx new_indexes.update(dict.fromkeys(product_vars, idx)) idx_vars = idx.create_variables(product_vars) # keep consistent multi-index coordinate order for k in idx_vars: new_variables.pop(k, None) new_variables.update(idx_vars) new_coord_names.update(idx_vars) indexes = {k: v for k, v in self._indexes.items() if k not in drop_indexes} indexes.update(new_indexes) return self._replace_with_new_dims( new_variables, coord_names=new_coord_names, indexes=indexes ) @partial(deprecate_dims, old_name="dimensions") def stack( self, dim: Mapping[Any, Sequence[Hashable | EllipsisType]] | None = None, create_index: bool | None = True, index_cls: type[Index] = PandasMultiIndex, **dim_kwargs: Sequence[Hashable | EllipsisType], ) -> Self: """ Stack any number of existing dimensions into a single new dimension. New dimensions will be added at the end, and by default the corresponding coordinate variables will be combined into a MultiIndex. Parameters ---------- dim : mapping of hashable to sequence of hashable Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new dimensions, and the existing dimensions that they replace. An ellipsis (`...`) will be replaced by all unlisted dimensions. Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over all dimensions. create_index : bool or None, default: True - True: create a multi-index for each of the stacked dimensions. - False: don't create any index. - None. create a multi-index only if exactly one single (1-d) coordinate index is found for every dimension to stack. index_cls: Index-class, default: PandasMultiIndex Can be used to pass a custom multi-index type (must be an Xarray index that implements `.stack()`). By default, a pandas multi-index wrapper is used. **dim_kwargs The keyword arguments form of ``dim``. One of dim or dim_kwargs must be provided. Returns ------- stacked : Dataset Dataset with stacked data. See Also -------- Dataset.unstack """ dim = either_dict_or_kwargs(dim, dim_kwargs, "stack") result = self for new_dim, dims in dim.items(): result = result._stack_once(dims, new_dim, index_cls, create_index) return result def to_stacked_array( self, new_dim: Hashable, sample_dims: Collection[Hashable], variable_dim: Hashable = "variable", name: Hashable | None = None, ) -> DataArray: """Combine variables of differing dimensionality into a DataArray without broadcasting. This method is similar to Dataset.to_dataarray but does not broadcast the variables. Parameters ---------- new_dim : hashable Name of the new stacked coordinate sample_dims : Collection of hashables List of dimensions that **will not** be stacked. Each array in the dataset must share these dimensions. For machine learning applications, these define the dimensions over which samples are drawn. variable_dim : hashable, default: "variable" Name of the level in the stacked coordinate which corresponds to the variables. name : hashable, optional Name of the new data array. Returns ------- stacked : DataArray DataArray with the specified dimensions and data variables stacked together. The stacked coordinate is named ``new_dim`` and represented by a MultiIndex object with a level containing the data variable names. The name of this level is controlled using the ``variable_dim`` argument. See Also -------- Dataset.to_dataarray Dataset.stack DataArray.to_unstacked_dataset Examples -------- >>> data = xr.Dataset( ... data_vars={ ... "a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), ... "b": ("x", [6, 7]), ... }, ... coords={"y": ["u", "v", "w"]}, ... ) >>> data Size: 76B Dimensions: (x: 2, y: 3) Coordinates: * y (y) >> data.to_stacked_array("z", sample_dims=["x"]) Size: 64B array([[0, 1, 2, 6], [3, 4, 5, 7]]) Coordinates: * z (z) object 32B MultiIndex * variable (z) Self: index, index_vars = index_and_vars variables: dict[Hashable, Variable] = {} indexes = {k: v for k, v in self._indexes.items() if k != dim} new_indexes, clean_index = index.unstack() indexes.update(new_indexes) for idx in new_indexes.values(): variables.update(idx.create_variables(index_vars)) for name, var in self.variables.items(): if name not in index_vars: if dim in var.dims: if isinstance(fill_value, Mapping): fill_value_ = fill_value.get(name, xrdtypes.NA) else: fill_value_ = fill_value variables[name] = var._unstack_once( index=clean_index, dim=dim, fill_value=fill_value_, sparse=sparse, ) else: variables[name] = var coord_names = set(self._coord_names) - {dim} | set(new_indexes) return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def _unstack_full_reindex( self, dim: Hashable, index_and_vars: tuple[Index, dict[Hashable, Variable]], fill_value, sparse: bool, ) -> Self: index, index_vars = index_and_vars variables: dict[Hashable, Variable] = {} indexes = {k: v for k, v in self._indexes.items() if k != dim} new_indexes, clean_index = index.unstack() indexes.update(new_indexes) new_index_variables = {} for idx in new_indexes.values(): new_index_variables.update(idx.create_variables(index_vars)) new_dim_sizes = {k: v.size for k, v in new_index_variables.items()} variables.update(new_index_variables) # take a shortcut in case the MultiIndex was not modified. full_idx = pd.MultiIndex.from_product( clean_index.levels, names=clean_index.names ) if clean_index.equals(full_idx): obj = self else: # TODO: we may depreciate implicit re-indexing with a pandas.MultiIndex xr_full_idx = PandasMultiIndex(full_idx, dim) indexers = Indexes( dict.fromkeys(index_vars, xr_full_idx), xr_full_idx.create_variables(index_vars), ) obj = self._reindex( indexers, copy=False, fill_value=fill_value, sparse=sparse ) for name, var in obj.variables.items(): if name not in index_vars: if dim in var.dims: variables[name] = var.unstack({dim: new_dim_sizes}) else: variables[name] = var coord_names = set(self._coord_names) - {dim} | set(new_dim_sizes) return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def unstack( self, dim: Dims = None, *, fill_value: Any = xrdtypes.NA, sparse: bool = False, ) -> Self: """ Unstack existing dimensions corresponding to MultiIndexes into multiple new dimensions. New dimensions will be added at the end. Parameters ---------- dim : str, Iterable of Hashable or None, optional Dimension(s) over which to unstack. By default unstacks all MultiIndexes. fill_value : scalar or dict-like, default: nan value to be filled. If a dict-like, maps variable names to fill values. If not provided or if the dict-like does not contain all variables, the dtype's NA value will be used. sparse : bool, default: False use sparse-array if True Returns ------- unstacked : Dataset Dataset with unstacked data. See Also -------- Dataset.stack """ if dim is None: dims = list(self.dims) else: if isinstance(dim, str) or not isinstance(dim, Iterable): dims = [dim] else: dims = list(dim) missing_dims = set(dims) - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {tuple(missing_dims)} not found in data dimensions {tuple(self.dims)}" ) # each specified dimension must have exactly one multi-index stacked_indexes: dict[Any, tuple[Index, dict[Hashable, Variable]]] = {} for d in dims: idx, idx_vars = self._get_stack_index(d, multi=True) if idx is not None: stacked_indexes[d] = idx, idx_vars if dim is None: dims = list(stacked_indexes) else: non_multi_dims = set(dims) - set(stacked_indexes) if non_multi_dims: raise ValueError( "cannot unstack dimensions that do not " f"have exactly one multi-index: {tuple(non_multi_dims)}" ) result = self.copy(deep=False) # we want to avoid allocating an object-dtype ndarray for a MultiIndex, # so we can't just access self.variables[v].data for every variable. # We only check the non-index variables. # https://github.com/pydata/xarray/issues/5902 nonindexes = [ self.variables[k] for k in set(self.variables) - set(self._indexes) ] # Notes for each of these cases: # 1. Dask arrays don't support assignment by index, which the fast unstack # function requires. # https://github.com/pydata/xarray/pull/4746#issuecomment-753282125 # 2. Sparse doesn't currently support (though we could special-case it) # https://github.com/pydata/sparse/issues/422 # 3. pint requires checking if it's a NumPy array until # https://github.com/pydata/xarray/pull/4751 is resolved, # Once that is resolved, explicitly exclude pint arrays. # pint doesn't implement `np.full_like` in a way that's # currently compatible. sparse_array_type = array_type("sparse") needs_full_reindex = any( is_duck_dask_array(v.data) or isinstance(v.data, sparse_array_type) or not isinstance(v.data, np.ndarray) for v in nonindexes ) for d in dims: if needs_full_reindex: result = result._unstack_full_reindex( d, stacked_indexes[d], fill_value, sparse ) else: result = result._unstack_once(d, stacked_indexes[d], fill_value, sparse) return result def update(self, other: CoercibleMapping) -> None: """Update this dataset's variables with those from another dataset. Just like :py:meth:`dict.update` this is a in-place operation. For a non-inplace version, see :py:meth:`Dataset.merge`. Parameters ---------- other : Dataset or mapping Variables with which to update this dataset. One of: - Dataset - mapping {var name: DataArray} - mapping {var name: Variable} - mapping {var name: (dimension name, array-like)} - mapping {var name: (tuple of dimension names, array-like)} Raises ------ ValueError If any dimensions would have inconsistent sizes in the updated dataset. See Also -------- Dataset.assign Dataset.merge """ merge_result = dataset_update_method(self, other) self._replace(inplace=True, **merge_result._asdict()) def merge( self, other: CoercibleMapping | DataArray, overwrite_vars: Hashable | Iterable[Hashable] = frozenset(), compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, fill_value: Any = xrdtypes.NA, combine_attrs: CombineAttrsOptions = "override", ) -> Self: """Merge the arrays of two datasets into a single dataset. This method generally does not allow for overriding data, with the exception of attributes, which are ignored on the second dataset. Variables with the same name are checked for conflicts via the equals or identical methods. Parameters ---------- other : Dataset or mapping Dataset or variables to merge with this dataset. overwrite_vars : hashable or iterable of hashable, optional If provided, update variables of these name(s) without checking for conflicts in this dataset. compat : {"identical", "equals", "broadcast_equals", \ "no_conflicts", "override", "minimal"}, default: "no_conflicts" String indicating how to compare variables of the same name for potential conflicts: - 'identical': all values, dimensions and attributes must be the same. - 'equals': all values and dimensions must be the same. - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - 'override': skip comparing and pick variable from first dataset - 'minimal': drop conflicting coordinates join : {"outer", "inner", "left", "right", "exact", "override"}, \ default: "outer" Method for joining ``self`` and ``other`` along shared dimensions: - 'outer': use the union of the indexes - 'inner': use the intersection of the indexes - 'left': use indexes from ``self`` - 'right': use indexes from ``other`` - 'exact': error instead of aligning non-equal indexes - 'override': use indexes from ``self`` that are the same size as those of ``other`` in that dimension fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- merged : Dataset Merged dataset. Raises ------ MergeError If any variables conflict (see ``compat``). See Also -------- Dataset.update """ from xarray.core.dataarray import DataArray other = other.to_dataset() if isinstance(other, DataArray) else other merge_result = dataset_merge_method( self, other, overwrite_vars=overwrite_vars, compat=compat, join=join, fill_value=fill_value, combine_attrs=combine_attrs, ) return self._replace(**merge_result._asdict()) def _assert_all_in_dataset( self, names: Iterable[Hashable], virtual_okay: bool = False ) -> None: bad_names = set(names) - set(self._variables) if virtual_okay: bad_names -= self.virtual_variables if bad_names: ordered_bad_names = [name for name in names if name in bad_names] raise ValueError( f"These variables cannot be found in this dataset: {ordered_bad_names}" ) def drop_vars( self, names: str | Iterable[Hashable] | Callable[[Self], str | Iterable[Hashable]], *, errors: ErrorOptions = "raise", ) -> Self: """Drop variables from this dataset. Parameters ---------- names : Hashable or iterable of Hashable or Callable Name(s) of variables to drop. If a Callable, this object is passed as its only argument and its result is used. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the variable passed are not in the dataset. If 'ignore', any given names that are in the dataset are dropped and no error is raised. Examples -------- >>> dataset = xr.Dataset( ... { ... "temperature": ( ... ["time", "latitude", "longitude"], ... [[[25.5, 26.3], [27.1, 28.0]]], ... ), ... "humidity": ( ... ["time", "latitude", "longitude"], ... [[[65.0, 63.8], [58.2, 59.6]]], ... ), ... "wind_speed": ( ... ["time", "latitude", "longitude"], ... [[[10.2, 8.5], [12.1, 9.8]]], ... ), ... }, ... coords={ ... "time": pd.date_range("2023-07-01", periods=1), ... "latitude": [40.0, 40.2], ... "longitude": [-75.0, -74.8], ... }, ... ) >>> dataset Size: 136B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 * latitude (latitude) float64 16B 40.0 40.2 * longitude (longitude) float64 16B -75.0 -74.8 Data variables: temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6 wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Drop the 'humidity' variable >>> dataset.drop_vars(["humidity"]) Size: 104B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 * latitude (latitude) float64 16B 40.0 40.2 * longitude (longitude) float64 16B -75.0 -74.8 Data variables: temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Drop the 'humidity', 'temperature' variables >>> dataset.drop_vars(["humidity", "temperature"]) Size: 72B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 * latitude (latitude) float64 16B 40.0 40.2 * longitude (longitude) float64 16B -75.0 -74.8 Data variables: wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Drop all indexes >>> dataset.drop_vars(lambda x: x.indexes) Size: 96B Dimensions: (time: 1, latitude: 2, longitude: 2) Dimensions without coordinates: time, latitude, longitude Data variables: temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6 wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Attempt to drop non-existent variable with errors="ignore" >>> dataset.drop_vars(["pressure"], errors="ignore") Size: 136B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 * latitude (latitude) float64 16B 40.0 40.2 * longitude (longitude) float64 16B -75.0 -74.8 Data variables: temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6 wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Attempt to drop non-existent variable with errors="raise" >>> dataset.drop_vars(["pressure"], errors="raise") Traceback (most recent call last): ValueError: These variables cannot be found in this dataset: ['pressure'] Raises ------ ValueError Raised if you attempt to drop a variable which is not present, and the kwarg ``errors='raise'``. Returns ------- dropped : Dataset See Also -------- DataArray.drop_vars """ if callable(names): names = names(self) # the Iterable check is required for mypy if is_scalar(names) or not isinstance(names, Iterable): names_set = {names} else: names_set = set(names) if errors == "raise": self._assert_all_in_dataset(names_set) # GH6505 other_names = set() for var in names_set: maybe_midx = self._indexes.get(var, None) if isinstance(maybe_midx, PandasMultiIndex): idx_coord_names = set(list(maybe_midx.index.names) + [maybe_midx.dim]) idx_other_names = idx_coord_names - set(names_set) other_names.update(idx_other_names) if other_names: names_set |= set(other_names) emit_user_level_warning( f"Deleting a single level of a MultiIndex is deprecated. Previously, this deleted all levels of a MultiIndex. " f"Please also drop the following variables: {other_names!r} to avoid an error in the future.", DeprecationWarning, ) assert_no_index_corrupted(self.xindexes, names_set) variables = {k: v for k, v in self._variables.items() if k not in names_set} coord_names = {k for k in self._coord_names if k in variables} indexes = {k: v for k, v in self._indexes.items() if k not in names_set} return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def drop_indexes( self, coord_names: Hashable | Iterable[Hashable], *, errors: ErrorOptions = "raise", ) -> Self: """Drop the indexes assigned to the given coordinates. Parameters ---------- coord_names : hashable or iterable of hashable Name(s) of the coordinate(s) for which to drop the index. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the coordinates passed have no index or are not in the dataset. If 'ignore', no error is raised. Returns ------- dropped : Dataset A new dataset with dropped indexes. """ # the Iterable check is required for mypy if is_scalar(coord_names) or not isinstance(coord_names, Iterable): coord_names = {coord_names} else: coord_names = set(coord_names) if errors == "raise": invalid_coords = coord_names - self._coord_names if invalid_coords: raise ValueError( f"The coordinates {tuple(invalid_coords)} are not found in the " f"dataset coordinates {tuple(self.coords.keys())}" ) unindexed_coords = set(coord_names) - set(self._indexes) if unindexed_coords: raise ValueError( f"those coordinates do not have an index: {unindexed_coords}" ) assert_no_index_corrupted(self.xindexes, coord_names, action="remove index(es)") variables = {} for name, var in self._variables.items(): if name in coord_names: variables[name] = var.to_base_variable() else: variables[name] = var indexes = {k: v for k, v in self._indexes.items() if k not in coord_names} return self._replace(variables=variables, indexes=indexes) def drop( self, labels=None, dim=None, *, errors: ErrorOptions = "raise", **labels_kwargs, ) -> Self: """Backward compatible method based on `drop_vars` and `drop_sel` Using either `drop_vars` or `drop_sel` is encouraged See Also -------- Dataset.drop_vars Dataset.drop_sel """ if errors not in ["raise", "ignore"]: raise ValueError('errors must be either "raise" or "ignore"') if is_dict_like(labels) and not isinstance(labels, dict): emit_user_level_warning( "dropping coordinates using `drop` is deprecated; use drop_vars.", DeprecationWarning, ) return self.drop_vars(labels, errors=errors) if labels_kwargs or isinstance(labels, dict): if dim is not None: raise ValueError("cannot specify dim and dict-like arguments.") labels = either_dict_or_kwargs(labels, labels_kwargs, "drop") if dim is None and (is_scalar(labels) or isinstance(labels, Iterable)): emit_user_level_warning( "dropping variables using `drop` is deprecated; use drop_vars.", DeprecationWarning, ) # for mypy if is_scalar(labels): labels = [labels] return self.drop_vars(labels, errors=errors) if dim is not None: warnings.warn( "dropping labels using list-like labels is deprecated; using " "dict-like arguments with `drop_sel`, e.g. `ds.drop_sel(dim=[labels]).", DeprecationWarning, stacklevel=2, ) return self.drop_sel({dim: labels}, errors=errors, **labels_kwargs) emit_user_level_warning( "dropping labels using `drop` is deprecated; use `drop_sel` instead.", DeprecationWarning, ) return self.drop_sel(labels, errors=errors) def drop_sel( self, labels=None, *, errors: ErrorOptions = "raise", **labels_kwargs ) -> Self: """Drop index labels from this dataset. Parameters ---------- labels : mapping of hashable to Any Index labels to drop errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the index labels passed are not in the dataset. If 'ignore', any given labels that are in the dataset are dropped and no error is raised. **labels_kwargs : {dim: label, ...}, optional The keyword arguments form of ``dim`` and ``labels`` Returns ------- dropped : Dataset Examples -------- >>> data = np.arange(6).reshape(2, 3) >>> labels = ["a", "b", "c"] >>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels}) >>> ds Size: 60B Dimensions: (x: 2, y: 3) Coordinates: * y (y) >> ds.drop_sel(y=["a", "c"]) Size: 20B Dimensions: (x: 2, y: 1) Coordinates: * y (y) >> ds.drop_sel(y="b") Size: 40B Dimensions: (x: 2, y: 2) Coordinates: * y (y) Self: """Drop index positions from this Dataset. Parameters ---------- indexers : mapping of hashable to Any Index locations to drop **indexers_kwargs : {dim: position, ...}, optional The keyword arguments form of ``dim`` and ``positions`` Returns ------- dropped : Dataset Raises ------ IndexError Examples -------- >>> data = np.arange(6).reshape(2, 3) >>> labels = ["a", "b", "c"] >>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels}) >>> ds Size: 60B Dimensions: (x: 2, y: 3) Coordinates: * y (y) >> ds.drop_isel(y=[0, 2]) Size: 20B Dimensions: (x: 2, y: 1) Coordinates: * y (y) >> ds.drop_isel(y=1) Size: 40B Dimensions: (x: 2, y: 2) Coordinates: * y (y) Self: """Drop dimensions and associated variables from this dataset. Parameters ---------- drop_dims : str or Iterable of Hashable Dimension or dimensions to drop. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the dimensions passed are not in the dataset. If 'ignore', any given dimensions that are in the dataset are dropped and no error is raised. Returns ------- obj : Dataset The dataset without the given dimensions (or any variables containing those dimensions). """ if errors not in ["raise", "ignore"]: raise ValueError('errors must be either "raise" or "ignore"') if isinstance(drop_dims, str) or not isinstance(drop_dims, Iterable): drop_dims = {drop_dims} else: drop_dims = set(drop_dims) if errors == "raise": missing_dims = drop_dims - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {tuple(missing_dims)} not found in data dimensions {tuple(self.dims)}" ) drop_vars = {k for k, v in self._variables.items() if set(v.dims) & drop_dims} return self.drop_vars(drop_vars) @deprecate_dims def transpose( self, *dim: Hashable, missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: """Return a new Dataset object with all array dimensions transposed. Although the order of dimensions on each array will change, the dataset dimensions themselves will remain in fixed (sorted) order. Parameters ---------- *dim : hashable, optional By default, reverse the dimensions on each array. Otherwise, reorder the dimensions to this order. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- transposed : Dataset Each array in the dataset (including) coordinates will be transposed to the given order. Notes ----- This operation returns a view of each array's data. It is lazy for dask-backed DataArrays but not for numpy-backed DataArrays -- the data will be fully loaded into memory. See Also -------- numpy.transpose DataArray.transpose """ # Raise error if list is passed as dim if (len(dim) > 0) and (isinstance(dim[0], list)): list_fix = [f"{x!r}" if isinstance(x, str) else f"{x}" for x in dim[0]] raise TypeError( f"transpose requires dim to be passed as multiple arguments. Expected `{', '.join(list_fix)}`. Received `{dim[0]}` instead" ) # Use infix_dims to check once for missing dimensions if len(dim) != 0: _ = list(infix_dims(dim, self.dims, missing_dims)) ds = self.copy() for name, var in self._variables.items(): var_dims = tuple(d for d in dim if d in (var.dims + (...,))) ds._variables[name] = var.transpose(*var_dims) return ds def dropna( self, dim: Hashable, *, how: Literal["any", "all"] = "any", thresh: int | None = None, subset: Iterable[Hashable] | None = None, ) -> Self: """Returns a new dataset with dropped labels for missing values along the provided dimension. Parameters ---------- dim : hashable Dimension along which to drop missing values. Dropping along multiple dimensions simultaneously is not yet supported. how : {"any", "all"}, default: "any" - any : if any NA values are present, drop that label - all : if all values are NA, drop that label thresh : int or None, optional If supplied, require this many non-NA values (summed over all the subset variables). subset : iterable of hashable or None, optional Which variables to check for missing values. By default, all variables in the dataset are checked. Examples -------- >>> dataset = xr.Dataset( ... { ... "temperature": ( ... ["time", "location"], ... [[23.4, 24.1], [np.nan, 22.1], [21.8, 24.2], [20.5, 25.3]], ... ) ... }, ... coords={"time": [1, 2, 3, 4], "location": ["A", "B"]}, ... ) >>> dataset Size: 104B Dimensions: (time: 4, location: 2) Coordinates: * time (time) int64 32B 1 2 3 4 * location (location) >> dataset.dropna(dim="time") Size: 80B Dimensions: (time: 3, location: 2) Coordinates: * time (time) int64 24B 1 3 4 * location (location) >> dataset.dropna(dim="time", how="any") Size: 80B Dimensions: (time: 3, location: 2) Coordinates: * time (time) int64 24B 1 3 4 * location (location) >> dataset.dropna(dim="time", how="all") Size: 104B Dimensions: (time: 4, location: 2) Coordinates: * time (time) int64 32B 1 2 3 4 * location (location) >> dataset.dropna(dim="time", thresh=2) Size: 80B Dimensions: (time: 3, location: 2) Coordinates: * time (time) int64 24B 1 3 4 * location (location) = thresh elif how == "any": mask = count == size elif how == "all": mask = count > 0 elif how is not None: raise ValueError(f"invalid how option: {how}") else: raise TypeError("must specify how or thresh") return self.isel({dim: mask}) def fillna(self, value: Any) -> Self: """Fill missing values in this object. This operation follows the normal broadcasting and alignment rules that xarray uses for binary arithmetic, except the result is aligned to this object (``join='left'``) instead of aligned to the intersection of index coordinates (``join='inner'``). Parameters ---------- value : scalar, ndarray, DataArray, dict or Dataset Used to fill all matching missing values in this dataset's data variables. Scalars, ndarrays or DataArrays arguments are used to fill all data with aligned coordinates (for DataArrays). Dictionaries or datasets match data variables and then align coordinates if necessary. Returns ------- Dataset Examples -------- >>> ds = xr.Dataset( ... { ... "A": ("x", [np.nan, 2, np.nan, 0]), ... "B": ("x", [3, 4, np.nan, 1]), ... "C": ("x", [np.nan, np.nan, np.nan, 5]), ... "D": ("x", [np.nan, 3, np.nan, 4]), ... }, ... coords={"x": [0, 1, 2, 3]}, ... ) >>> ds Size: 160B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 Data variables: A (x) float64 32B nan 2.0 nan 0.0 B (x) float64 32B 3.0 4.0 nan 1.0 C (x) float64 32B nan nan nan 5.0 D (x) float64 32B nan 3.0 nan 4.0 Replace all `NaN` values with 0s. >>> ds.fillna(0) Size: 160B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 Data variables: A (x) float64 32B 0.0 2.0 0.0 0.0 B (x) float64 32B 3.0 4.0 0.0 1.0 C (x) float64 32B 0.0 0.0 0.0 5.0 D (x) float64 32B 0.0 3.0 0.0 4.0 Replace all `NaN` elements in column โ€˜Aโ€™, โ€˜Bโ€™, โ€˜Cโ€™, and โ€˜Dโ€™, with 0, 1, 2, and 3 respectively. >>> values = {"A": 0, "B": 1, "C": 2, "D": 3} >>> ds.fillna(value=values) Size: 160B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 Data variables: A (x) float64 32B 0.0 2.0 0.0 0.0 B (x) float64 32B 3.0 4.0 1.0 1.0 C (x) float64 32B 2.0 2.0 2.0 5.0 D (x) float64 32B 3.0 3.0 3.0 4.0 """ if utils.is_dict_like(value): value_keys = getattr(value, "data_vars", value).keys() if not set(value_keys) <= set(self.data_vars.keys()): raise ValueError( "all variables in the argument to `fillna` " "must be contained in the original dataset" ) out = ops.fillna(self, value) return out def interpolate_na( self, dim: Hashable | None = None, method: InterpOptions = "linear", limit: int | None = None, use_coordinate: bool | Hashable = True, max_gap: ( int | float | str | pd.Timedelta | np.timedelta64 | datetime.timedelta | None ) = None, **kwargs: Any, ) -> Self: """Fill in NaNs by interpolating according to different methods. Parameters ---------- dim : Hashable or None, optional Specifies the dimension along which to interpolate. method : {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", \ "barycentric", "krogh", "pchip", "spline", "akima"}, default: "linear" String indicating which method to use for interpolation: - 'linear': linear interpolation. Additional keyword arguments are passed to :py:func:`numpy.interp` - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial': are passed to :py:func:`scipy.interpolate.interp1d`. If ``method='polynomial'``, the ``order`` keyword argument must also be provided. - 'barycentric', 'krogh', 'pchip', 'spline', 'akima': use their respective :py:class:`scipy.interpolate` classes. use_coordinate : bool or Hashable, default: True Specifies which index to use as the x values in the interpolation formulated as `y = f(x)`. If False, values are treated as if equally-spaced along ``dim``. If True, the IndexVariable `dim` is used. If ``use_coordinate`` is a string, it specifies the name of a coordinate variable to use as the index. limit : int, default: None Maximum number of consecutive NaNs to fill. Must be greater than 0 or None for no limit. This filling is done regardless of the size of the gap in the data. To only interpolate over gaps less than a given length, see ``max_gap``. max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta \ or None, default: None Maximum size of gap, a continuous sequence of NaNs, that will be filled. Use None for no limit. When interpolating along a datetime64 dimension and ``use_coordinate=True``, ``max_gap`` can be one of the following: - a string that is valid input for pandas.to_timedelta - a :py:class:`numpy.timedelta64` object - a :py:class:`pandas.Timedelta` object - a :py:class:`datetime.timedelta` object Otherwise, ``max_gap`` must be an int or a float. Use of ``max_gap`` with unlabeled dimensions has not been implemented yet. Gap length is defined as the difference between coordinate values at the first data point after a gap and the last value before a gap. For gaps at the beginning (end), gap length is defined as the difference between coordinate values at the first (last) valid data point and the first (last) NaN. For example, consider:: array([nan, nan, nan, 1., nan, nan, 4., nan, nan]) Coordinates: * x (x) int64 0 1 2 3 4 5 6 7 8 The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively **kwargs : dict, optional parameters passed verbatim to the underlying interpolation function Returns ------- interpolated: Dataset Filled in Dataset. Warning -------- When passing fill_value as a keyword argument with method="linear", it does not use ``numpy.interp`` but it uses ``scipy.interpolate.interp1d``, which provides the fill_value parameter. See Also -------- numpy.interp scipy.interpolate Examples -------- >>> ds = xr.Dataset( ... { ... "A": ("x", [np.nan, 2, 3, np.nan, 0]), ... "B": ("x", [3, 4, np.nan, 1, 7]), ... "C": ("x", [np.nan, np.nan, np.nan, 5, 0]), ... "D": ("x", [np.nan, 3, np.nan, -1, 4]), ... }, ... coords={"x": [0, 1, 2, 3, 4]}, ... ) >>> ds Size: 200B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 Data variables: A (x) float64 40B nan 2.0 3.0 nan 0.0 B (x) float64 40B 3.0 4.0 nan 1.0 7.0 C (x) float64 40B nan nan nan 5.0 0.0 D (x) float64 40B nan 3.0 nan -1.0 4.0 >>> ds.interpolate_na(dim="x", method="linear") Size: 200B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 Data variables: A (x) float64 40B nan 2.0 3.0 1.5 0.0 B (x) float64 40B 3.0 4.0 2.5 1.0 7.0 C (x) float64 40B nan nan nan 5.0 0.0 D (x) float64 40B nan 3.0 1.0 -1.0 4.0 >>> ds.interpolate_na(dim="x", method="linear", fill_value="extrapolate") Size: 200B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 Data variables: A (x) float64 40B 1.0 2.0 3.0 1.5 0.0 B (x) float64 40B 3.0 4.0 2.5 1.0 7.0 C (x) float64 40B 20.0 15.0 10.0 5.0 0.0 D (x) float64 40B 5.0 3.0 1.0 -1.0 4.0 """ from xarray.core.missing import _apply_over_vars_with_dim, interp_na new = _apply_over_vars_with_dim( interp_na, self, dim=dim, method=method, limit=limit, use_coordinate=use_coordinate, max_gap=max_gap, **kwargs, ) return new def ffill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values forward *Requires bottleneck.* Parameters ---------- dim : Hashable Specifies the dimension along which to propagate values when filling. limit : int or None, optional The maximum number of consecutive NaN values to forward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater than 0 or None for no limit. Must be None or greater than or equal to axis length if filling along chunked axes (dimensions). Examples -------- >>> time = pd.date_range("2023-01-01", periods=10, freq="D") >>> data = np.array( ... [1, np.nan, np.nan, np.nan, 5, np.nan, np.nan, 8, np.nan, 10] ... ) >>> dataset = xr.Dataset({"data": (("time",), data)}, coords={"time": time}) >>> dataset Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0 # Perform forward fill (ffill) on the dataset >>> dataset.ffill(dim="time") Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 1.0 1.0 1.0 5.0 5.0 5.0 8.0 8.0 10.0 # Limit the forward filling to a maximum of 2 consecutive NaN values >>> dataset.ffill(dim="time", limit=2) Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 1.0 1.0 nan 5.0 5.0 5.0 8.0 8.0 10.0 Returns ------- Dataset See Also -------- Dataset.bfill """ from xarray.core.missing import _apply_over_vars_with_dim, ffill new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit) return new def bfill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values backward *Requires bottleneck.* Parameters ---------- dim : Hashable Specifies the dimension along which to propagate values when filling. limit : int or None, optional The maximum number of consecutive NaN values to backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater than 0 or None for no limit. Must be None or greater than or equal to axis length if filling along chunked axes (dimensions). Examples -------- >>> time = pd.date_range("2023-01-01", periods=10, freq="D") >>> data = np.array( ... [1, np.nan, np.nan, np.nan, 5, np.nan, np.nan, 8, np.nan, 10] ... ) >>> dataset = xr.Dataset({"data": (("time",), data)}, coords={"time": time}) >>> dataset Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0 # filled dataset, fills NaN values by propagating values backward >>> dataset.bfill(dim="time") Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 5.0 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0 # Limit the backward filling to a maximum of 2 consecutive NaN values >>> dataset.bfill(dim="time", limit=2) Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 nan 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0 Returns ------- Dataset See Also -------- Dataset.ffill """ from xarray.core.missing import _apply_over_vars_with_dim, bfill new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit) return new def combine_first(self, other: Self) -> Self: """Combine two Datasets, default to data_vars of self. The new coordinates follow the normal broadcasting and alignment rules of ``join='outer'``. Vacant cells in the expanded coordinates are filled with np.nan. Parameters ---------- other : Dataset Used to fill all matching missing values in this array. Returns ------- Dataset """ out = ops.fillna(self, other, join="outer", dataset_join="outer") return out def reduce( self, func: Callable, dim: Dims = None, *, keep_attrs: bool | None = None, keepdims: bool = False, numeric_only: bool = False, **kwargs: Any, ) -> Self: """Reduce this dataset by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `f(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default `func` is applied over all dimensions. keep_attrs : bool or None, optional If True (default), the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one. Coordinates that use these dimensions are removed. numeric_only : bool, default: False If True, only apply ``func`` to variables with a numeric dtype. **kwargs : Any Additional keyword arguments passed on to ``func``. Returns ------- reduced : Dataset Dataset with this object's DataArrays replaced with new DataArrays of summarized data and the indicated dimension(s) removed. Examples -------- >>> dataset = xr.Dataset( ... { ... "math_scores": ( ... ["student", "test"], ... [[90, 85, 92], [78, 80, 85], [95, 92, 98]], ... ), ... "english_scores": ( ... ["student", "test"], ... [[88, 90, 92], [75, 82, 79], [93, 96, 91]], ... ), ... }, ... coords={ ... "student": ["Alice", "Bob", "Charlie"], ... "test": ["Test 1", "Test 2", "Test 3"], ... }, ... ) # Calculate the 75th percentile of math scores for each student using np.percentile >>> percentile_scores = dataset.reduce(np.percentile, q=75, dim="test") >>> percentile_scores Size: 132B Dimensions: (student: 3) Coordinates: * student (student) Self: """Apply a function to each data variable in this dataset Parameters ---------- func : callable Function which can be called in the form `func(x, *args, **kwargs)` to transform each DataArray `x` in this dataset into another DataArray. keep_attrs : bool or None, optional If True, both the dataset's and variables' attributes (`attrs`) will be copied from the original objects to the new ones. If False, the new dataset and variables will be returned without copying the attributes. args : iterable, optional Positional arguments passed on to `func`. **kwargs : Any Keyword arguments passed on to `func`. Returns ------- applied : Dataset Resulting dataset from applying ``func`` to each data variable. Examples -------- >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])}) >>> ds Size: 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773 bar (x) int64 16B -1 2 >>> ds.map(np.fabs) Size: 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773 bar (x) float64 16B 1.0 2.0 """ from xarray.core.dataarray import DataArray if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) variables = { k: maybe_wrap_array(v, func(v, *args, **kwargs)) for k, v in self.data_vars.items() } # Convert non-DataArray values to DataArrays variables = { k: v if isinstance(v, DataArray) else DataArray(v) for k, v in variables.items() } coord_vars, indexes = merge_coordinates_without_align( [v.coords for v in variables.values()] ) coords = Coordinates._construct_direct(coords=coord_vars, indexes=indexes) if keep_attrs: for k, v in variables.items(): v._copy_attrs_from(self.data_vars[k]) for k, v in coords.items(): if k in self.coords: v._copy_attrs_from(self.coords[k]) else: for v in variables.values(): v.attrs = {} for v in coords.values(): v.attrs = {} attrs = self.attrs if keep_attrs else None return type(self)(variables, coords=coords, attrs=attrs) def apply( self, func: Callable, keep_attrs: bool | None = None, args: Iterable[Any] = (), **kwargs: Any, ) -> Self: """ Backward compatible implementation of ``map`` See Also -------- Dataset.map """ warnings.warn( "Dataset.apply may be deprecated in the future. Using Dataset.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func, keep_attrs, args, **kwargs) def assign( self, variables: Mapping[Any, Any] | None = None, **variables_kwargs: Any, ) -> Self: """Assign new data variables to a Dataset, returning a new object with all the original variables in addition to the new ones. Parameters ---------- variables : mapping of hashable to Any Mapping from variables names to the new values. If the new values are callable, they are computed on the Dataset and assigned to new data variables. If the values are not callable, (e.g. a DataArray, scalar, or array), they are simply assigned. **variables_kwargs The keyword arguments form of ``variables``. One of variables or variables_kwargs must be provided. Returns ------- ds : Dataset A new Dataset with the new variables in addition to all the existing variables. Notes ----- Since ``kwargs`` is a dictionary, the order of your arguments may not be preserved, and so the order of the new variables is not well defined. Assigning multiple variables within the same ``assign`` is possible, but you cannot reference other variables created within the same ``assign`` call. The new assigned variables that replace existing coordinates in the original dataset are still listed as coordinates in the returned Dataset. See Also -------- pandas.DataFrame.assign Examples -------- >>> x = xr.Dataset( ... { ... "temperature_c": ( ... ("lat", "lon"), ... 20 * np.random.rand(4).reshape(2, 2), ... ), ... "precipitation": (("lat", "lon"), np.random.rand(4).reshape(2, 2)), ... }, ... coords={"lat": [10, 20], "lon": [150, 160]}, ... ) >>> x Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 Where the value is a callable, evaluated on dataset: >>> x.assign(temperature_f=lambda x: x.temperature_c * 9 / 5 + 32) Size: 128B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 temperature_f (lat, lon) float64 32B 51.76 57.75 53.7 51.62 Alternatively, the same behavior can be achieved by directly referencing an existing dataarray: >>> x.assign(temperature_f=x["temperature_c"] * 9 / 5 + 32) Size: 128B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 temperature_f (lat, lon) float64 32B 51.76 57.75 53.7 51.62 """ variables = either_dict_or_kwargs(variables, variables_kwargs, "assign") data = self.copy() # do all calculations first... results: CoercibleMapping = data._calc_assign_results(variables) # split data variables to add/replace vs. coordinates to replace results_data_vars: dict[Hashable, CoercibleValue] = {} results_coords: dict[Hashable, CoercibleValue] = {} for k, v in results.items(): if k in data._coord_names: results_coords[k] = v else: results_data_vars[k] = v # ... and then assign data.coords.update(results_coords) data.update(results_data_vars) return data def to_dataarray( self, dim: Hashable = "variable", name: Hashable | None = None ) -> DataArray: """Convert this dataset into an xarray.DataArray The data variables of this dataset will be broadcast against each other and stacked along the first axis of the new array. All coordinates of this dataset will remain coordinates. Parameters ---------- dim : Hashable, default: "variable" Name of the new dimension. name : Hashable or None, optional Name of the new data array. Returns ------- array : xarray.DataArray """ from xarray.core.dataarray import DataArray data_vars = [self.variables[k] for k in self.data_vars] broadcast_vars = broadcast_variables(*data_vars) data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0) dims = (dim,) + broadcast_vars[0].dims variable = Variable(dims, data, self.attrs, fastpath=True) coords = {k: v.variable for k, v in self.coords.items()} indexes = filter_indexes_from_coords(self._indexes, set(coords)) new_dim_index = PandasIndex(list(self.data_vars), dim) indexes[dim] = new_dim_index coords.update(new_dim_index.create_variables()) return DataArray._construct_direct(variable, coords, name, indexes) def to_array( self, dim: Hashable = "variable", name: Hashable | None = None ) -> DataArray: """Deprecated version of to_dataarray""" return self.to_dataarray(dim=dim, name=name) def _normalize_dim_order( self, dim_order: Sequence[Hashable] | None = None ) -> dict[Hashable, int]: """ Check the validity of the provided dimensions if any and return the mapping between dimension name and their size. Parameters ---------- dim_order: Sequence of Hashable or None, optional Dimension order to validate (default to the alphabetical order if None). Returns ------- result : dict[Hashable, int] Validated dimensions mapping. """ if dim_order is None: dim_order = list(self.dims) elif set(dim_order) != set(self.dims): raise ValueError( f"dim_order {dim_order} does not match the set of dimensions of this " f"Dataset: {list(self.dims)}" ) ordered_dims = {k: self.sizes[k] for k in dim_order} return ordered_dims def to_pandas(self) -> pd.Series | pd.DataFrame: """Convert this dataset into a pandas object without changing the number of dimensions. The type of the returned object depends on the number of Dataset dimensions: * 0D -> `pandas.Series` * 1D -> `pandas.DataFrame` Only works for Datasets with 1 or fewer dimensions. """ if len(self.dims) == 0: return pd.Series({k: v.item() for k, v in self.items()}) if len(self.dims) == 1: return self.to_dataframe() raise ValueError( f"cannot convert Datasets with {len(self.dims)} dimensions into " "pandas objects without changing the number of dimensions. " "Please use Dataset.to_dataframe() instead." ) def _to_dataframe(self, ordered_dims: Mapping[Any, int]): from xarray.core.extension_array import PandasExtensionArray # All and only non-index arrays (whether data or coordinates) should # become columns in the output DataFrame. Excluding indexes rather # than dims handles the case of a MultiIndex along a single dimension. columns_in_order = [k for k in self.variables if k not in self.xindexes] non_extension_array_columns = [ k for k in columns_in_order if not pd.api.types.is_extension_array_dtype(self.variables[k].data) # noqa: TID251 ] extension_array_columns = [ k for k in columns_in_order if pd.api.types.is_extension_array_dtype(self.variables[k].data) # noqa: TID251 ] extension_array_columns_different_index = [ k for k in extension_array_columns if set(self.variables[k].dims) != set(ordered_dims.keys()) ] extension_array_columns_same_index = [ k for k in extension_array_columns if k not in extension_array_columns_different_index ] data = [ self._variables[k].set_dims(ordered_dims).values.reshape(-1) for k in non_extension_array_columns ] index = self.coords.to_index([*ordered_dims]) broadcasted_df = pd.DataFrame( { **dict(zip(non_extension_array_columns, data, strict=True)), **{ c: self.variables[c].data for c in extension_array_columns_same_index }, }, index=index, ) for extension_array_column in extension_array_columns_different_index: extension_array = self.variables[extension_array_column].data index = self[ self.variables[extension_array_column].dims[0] ].coords.to_index() extension_array_df = pd.DataFrame( {extension_array_column: extension_array}, index=pd.Index(index.array) if isinstance(index, PandasExtensionArray) # type: ignore[redundant-expr] else index, ) extension_array_df.index.name = self.variables[extension_array_column].dims[ 0 ] broadcasted_df = broadcasted_df.join(extension_array_df) return broadcasted_df[columns_in_order] def to_dataframe(self, dim_order: Sequence[Hashable] | None = None) -> pd.DataFrame: """Convert this dataset into a pandas.DataFrame. Non-index variables in this dataset form the columns of the DataFrame. The DataFrame is indexed by the Cartesian product of this dataset's indices. Parameters ---------- dim_order: Sequence of Hashable or None, optional Hierarchical dimension order for the resulting dataframe. All arrays are transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dataframe. If provided, must include all dimensions of this dataset. By default, dimensions are in the same order as in `Dataset.sizes`. Returns ------- result : DataFrame Dataset as a pandas DataFrame. """ ordered_dims = self._normalize_dim_order(dim_order=dim_order) return self._to_dataframe(ordered_dims=ordered_dims) def _set_sparse_data_from_dataframe( self, idx: pd.Index, arrays: list[tuple[Hashable, np.ndarray]], dims: tuple ) -> None: from sparse import COO if isinstance(idx, pd.MultiIndex): coords = np.stack([np.asarray(code) for code in idx.codes], axis=0) is_sorted = idx.is_monotonic_increasing shape = tuple(lev.size for lev in idx.levels) else: coords = np.arange(idx.size).reshape(1, -1) is_sorted = True shape = (idx.size,) for name, values in arrays: # In virtually all real use cases, the sparse array will now have # missing values and needs a fill_value. For consistency, don't # special case the rare exceptions (e.g., dtype=int without a # MultiIndex). dtype, fill_value = xrdtypes.maybe_promote(values.dtype) values = np.asarray(values, dtype=dtype) data = COO( coords, values, shape, has_duplicates=False, sorted=is_sorted, fill_value=fill_value, ) self[name] = (dims, data) def _set_numpy_data_from_dataframe( self, idx: pd.Index, arrays: list[tuple[Hashable, np.ndarray]], dims: tuple ) -> None: if not isinstance(idx, pd.MultiIndex): for name, values in arrays: self[name] = (dims, values) return # NB: similar, more general logic, now exists in # variable.unstack_once; we could consider combining them at some # point. shape = tuple(lev.size for lev in idx.levels) indexer = tuple(idx.codes) # We already verified that the MultiIndex has all unique values, so # there are missing values if and only if the size of output arrays is # larger that the index. missing_values = math.prod(shape) > idx.shape[0] for name, values in arrays: # NumPy indexing is much faster than using DataFrame.reindex() to # fill in missing values: # https://stackoverflow.com/a/35049899/809705 if missing_values: dtype, fill_value = xrdtypes.maybe_promote(values.dtype) data = np.full(shape, fill_value, dtype) else: # If there are no missing values, keep the existing dtype # instead of promoting to support NA, e.g., keep integer # columns as integers. # TODO: consider removing this special case, which doesn't # exist for sparse=True. data = np.zeros(shape, values.dtype) data[indexer] = values self[name] = (dims, data) @classmethod def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> Self: """Convert a pandas.DataFrame into an xarray.Dataset Each column will be converted into an independent variable in the Dataset. If the dataframe's index is a MultiIndex, it will be expanded into a tensor product of one-dimensional indices (filling in missing values with NaN). If you rather preserve the MultiIndex use `xr.Dataset(df)`. This method will produce a Dataset very similar to that on which the 'to_dataframe' method was called, except with possibly redundant dimensions (since all dataset variables will have the same dimensionality). Parameters ---------- dataframe : DataFrame DataFrame from which to copy data and indices. sparse : bool, default: False If true, create a sparse arrays instead of dense numpy arrays. This can potentially save a large amount of memory if the DataFrame has a MultiIndex. Requires the sparse package (sparse.pydata.org). Returns ------- New Dataset. See Also -------- xarray.DataArray.from_series pandas.DataFrame.to_xarray """ # TODO: Add an option to remove dimensions along which the variables # are constant, to enable consistent serialization to/from a dataframe, # even if some variables have different dimensionality. if not dataframe.columns.is_unique: raise ValueError("cannot convert DataFrame with non-unique columns") idx = remove_unused_levels_categories(dataframe.index) if isinstance(idx, pd.MultiIndex) and not idx.is_unique: raise ValueError( "cannot convert a DataFrame with a non-unique MultiIndex into xarray" ) arrays = [] extension_arrays = [] for k, v in dataframe.items(): if not is_allowed_extension_array(v) or isinstance( v.array, UNSUPPORTED_EXTENSION_ARRAY_TYPES ): arrays.append((k, np.asarray(v))) else: extension_arrays.append((k, v)) indexes: dict[Hashable, Index] = {} index_vars: dict[Hashable, Variable] = {} if isinstance(idx, pd.MultiIndex): dims = tuple( name if name is not None else f"level_{n}" # type: ignore[redundant-expr,unused-ignore] for n, name in enumerate(idx.names) ) for dim, lev in zip(dims, idx.levels, strict=True): xr_idx = PandasIndex(lev, dim) indexes[dim] = xr_idx index_vars.update(xr_idx.create_variables()) arrays += [(k, np.asarray(v)) for k, v in extension_arrays] extension_arrays = [] else: index_name = idx.name if idx.name is not None else "index" dims = (index_name,) xr_idx = PandasIndex(idx, index_name) indexes[index_name] = xr_idx index_vars.update(xr_idx.create_variables()) obj = cls._construct_direct(index_vars, set(index_vars), indexes=indexes) if sparse: obj._set_sparse_data_from_dataframe(idx, arrays, dims) else: obj._set_numpy_data_from_dataframe(idx, arrays, dims) for name, extension_array in extension_arrays: obj[name] = (dims, extension_array) return obj[dataframe.columns] if len(dataframe.columns) else obj def to_dask_dataframe( self, dim_order: Sequence[Hashable] | None = None, set_index: bool = False ) -> DaskDataFrame: """ Convert this dataset into a dask.dataframe.DataFrame. The dimensions, coordinates and data variables in this dataset form the columns of the DataFrame. Parameters ---------- dim_order : list, optional Hierarchical dimension order for the resulting dataframe. All arrays are transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dask dataframe. If provided, must include all dimensions of this dataset. By default, dimensions are sorted alphabetically. set_index : bool, default: False If set_index=True, the dask DataFrame is indexed by this dataset's coordinate. Since dask DataFrames do not support multi-indexes, set_index only works if the dataset only contains one dimension. Returns ------- dask.dataframe.DataFrame """ import dask.array as da import dask.dataframe as dd ordered_dims = self._normalize_dim_order(dim_order=dim_order) columns = list(ordered_dims) columns.extend(k for k in self.coords if k not in self.dims) columns.extend(self.data_vars) ds_chunks = self.chunks series_list = [] df_meta = pd.DataFrame() for name in columns: try: var = self.variables[name] except KeyError: # dimension without a matching coordinate size = self.sizes[name] data = da.arange(size, chunks=size, dtype=np.int64) var = Variable((name,), data) # IndexVariable objects have a dummy .chunk() method if isinstance(var, IndexVariable): var = var.to_base_variable() # Make sure var is a dask array, otherwise the array can become too large # when it is broadcasted to several dimensions: if not is_duck_dask_array(var._data): var = var.chunk() # Broadcast then flatten the array: var_new_dims = var.set_dims(ordered_dims).chunk(ds_chunks) dask_array = var_new_dims._data.reshape(-1) series = dd.from_dask_array(dask_array, columns=name, meta=df_meta) series_list.append(series) df = dd.concat(series_list, axis=1) if set_index: dim_order = [*ordered_dims] if len(dim_order) == 1: (dim,) = dim_order df = df.set_index(dim) else: # triggers an error about multi-indexes, even if only one # dimension is passed df = df.set_index(dim_order) return df def to_dict( self, data: bool | Literal["list", "array"] = "list", encoding: bool = False ) -> dict[str, Any]: """ Convert this dataset to a dictionary following xarray naming conventions. Converts all variables and attributes to native Python objects Useful for converting to json. To avoid datetime incompatibility use decode_times=False kwarg in xarrray.open_dataset. Parameters ---------- data : bool or {"list", "array"}, default: "list" Whether to include the actual data in the dictionary. When set to False, returns just the schema. If set to "array", returns data as underlying array type. If set to "list" (or True for backwards compatibility), returns data in lists of Python data types. Note that for obtaining the "list" output efficiently, use `ds.compute().to_dict(data="list")`. encoding : bool, default: False Whether to include the Dataset's encoding in the dictionary. Returns ------- d : dict Dict with keys: "coords", "attrs", "dims", "data_vars" and optionally "encoding". See Also -------- Dataset.from_dict DataArray.to_dict """ d: dict = { "coords": {}, "attrs": decode_numpy_dict_values(self.attrs), "dims": dict(self.sizes), "data_vars": {}, } for k in self.coords: d["coords"].update( {k: self[k].variable.to_dict(data=data, encoding=encoding)} ) for k in self.data_vars: d["data_vars"].update( {k: self[k].variable.to_dict(data=data, encoding=encoding)} ) if encoding: d["encoding"] = dict(self.encoding) return d @classmethod def from_dict(cls, d: Mapping[Any, Any]) -> Self: """Convert a dictionary into an xarray.Dataset. Parameters ---------- d : dict-like Mapping with a minimum structure of ``{"var_0": {"dims": [..], "data": [..]}, \ ...}`` Returns ------- obj : Dataset See also -------- Dataset.to_dict DataArray.from_dict Examples -------- >>> d = { ... "t": {"dims": ("t"), "data": [0, 1, 2]}, ... "a": {"dims": ("t"), "data": ["a", "b", "c"]}, ... "b": {"dims": ("t"), "data": [10, 20, 30]}, ... } >>> ds = xr.Dataset.from_dict(d) >>> ds Size: 60B Dimensions: (t: 3) Coordinates: * t (t) int64 24B 0 1 2 Data variables: a (t) >> d = { ... "coords": { ... "t": {"dims": "t", "data": [0, 1, 2], "attrs": {"units": "s"}} ... }, ... "attrs": {"title": "air temperature"}, ... "dims": "t", ... "data_vars": { ... "a": {"dims": "t", "data": [10, 20, 30]}, ... "b": {"dims": "t", "data": ["a", "b", "c"]}, ... }, ... } >>> ds = xr.Dataset.from_dict(d) >>> ds Size: 60B Dimensions: (t: 3) Coordinates: * t (t) int64 24B 0 1 2 Data variables: a (t) int64 24B 10 20 30 b (t) Self: variables = {} keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) for k, v in self._variables.items(): if k in self._coord_names: variables[k] = v else: variables[k] = f(v, *args, **kwargs) if keep_attrs: variables[k]._attrs = v._attrs attrs = self._attrs if keep_attrs else None return self._replace_with_new_dims(variables, attrs=attrs) def _binary_op(self, other, f, reflexive=False, join=None) -> Dataset: from xarray.core.dataarray import DataArray from xarray.core.datatree import DataTree from xarray.core.groupby import GroupBy if isinstance(other, DataTree | GroupBy): return NotImplemented align_type = OPTIONS["arithmetic_join"] if join is None else join if isinstance(other, DataArray | Dataset): self, other = align(self, other, join=align_type, copy=False) g = f if not reflexive else lambda x, y: f(y, x) ds = self._calculate_binary_op(g, other, join=align_type) keep_attrs = _get_keep_attrs(default=True) if keep_attrs: # Combine attributes from both operands, dropping conflicts from xarray.structure.merge import merge_attrs self_attrs = self.attrs other_attrs = getattr(other, "attrs", {}) ds.attrs = merge_attrs([self_attrs, other_attrs], "drop_conflicts") return ds def _inplace_binary_op(self, other, f) -> Self: from xarray.core.dataarray import DataArray from xarray.core.groupby import GroupBy if isinstance(other, GroupBy): raise TypeError( "in-place operations between a Dataset and " "a grouped object are not permitted" ) # we don't actually modify arrays in-place with in-place Dataset # arithmetic -- this lets us automatically align things if isinstance(other, DataArray | Dataset): other = other.reindex_like(self, copy=False) g = ops.inplace_to_noninplace_op(f) ds = self._calculate_binary_op(g, other, inplace=True) self._replace_with_new_dims( ds._variables, ds._coord_names, attrs=ds._attrs, indexes=ds._indexes, inplace=True, ) return self def _calculate_binary_op( self, f, other, join="inner", inplace: bool = False ) -> Dataset: def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars): if inplace and set(lhs_data_vars) != set(rhs_data_vars): raise ValueError( "datasets must have the same data variables " f"for in-place arithmetic operations: {list(lhs_data_vars)}, {list(rhs_data_vars)}" ) dest_vars = {} for k in lhs_data_vars: if k in rhs_data_vars: dest_vars[k] = f(lhs_vars[k], rhs_vars[k]) elif join in ["left", "outer"]: dest_vars[k] = f(lhs_vars[k], np.nan) for k in rhs_data_vars: if k not in dest_vars and join in ["right", "outer"]: dest_vars[k] = f(rhs_vars[k], np.nan) return dest_vars if utils.is_dict_like(other) and not isinstance(other, Dataset): # can't use our shortcut of doing the binary operation with # Variable objects, so apply over our data vars instead. new_data_vars = apply_over_both( self.data_vars, other, self.data_vars, other ) return type(self)(new_data_vars) other_coords: Coordinates | None = getattr(other, "coords", None) ds = self.coords.merge(other_coords) if isinstance(other, Dataset): new_vars = apply_over_both( self.data_vars, other.data_vars, self.variables, other.variables ) else: other_variable = getattr(other, "variable", other) new_vars = {k: f(self.variables[k], other_variable) for k in self.data_vars} ds._variables.update(new_vars) ds._dims = calculate_dimensions(ds._variables) return ds def _copy_attrs_from(self, other): self.attrs = other.attrs for v in other.variables: if v in self.variables: self.variables[v].attrs = other.variables[v].attrs def diff( self, dim: Hashable, n: int = 1, *, label: Literal["upper", "lower"] = "upper", ) -> Self: """Calculate the n-th order discrete difference along given axis. Parameters ---------- dim : Hashable Dimension over which to calculate the finite difference. n : int, default: 1 The number of times values are differenced. label : {"upper", "lower"}, default: "upper" The new coordinate in dimension ``dim`` will have the values of either the minuend's or subtrahend's coordinate for values 'upper' and 'lower', respectively. Returns ------- difference : Dataset The n-th order finite difference of this object. Notes ----- `n` matches numpy's behavior and is different from pandas' first argument named `periods`. Examples -------- >>> ds = xr.Dataset({"foo": ("x", [5, 5, 6, 6])}) >>> ds.diff("x") Size: 24B Dimensions: (x: 3) Dimensions without coordinates: x Data variables: foo (x) int64 24B 0 1 0 >>> ds.diff("x", 2) Size: 16B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: foo (x) int64 16B 1 -1 See Also -------- Dataset.differentiate """ if n == 0: return self if n < 0: raise ValueError(f"order `n` must be non-negative but got {n}") # prepare slices slice_start = {dim: slice(None, -1)} slice_end = {dim: slice(1, None)} # prepare new coordinate if label == "upper": slice_new = slice_end elif label == "lower": slice_new = slice_start else: raise ValueError("The 'label' argument has to be either 'upper' or 'lower'") indexes, index_vars = isel_indexes(self.xindexes, slice_new) variables = {} for name, var in self.variables.items(): if name in index_vars: variables[name] = index_vars[name] elif dim in var.dims: if name in self.data_vars: variables[name] = var.isel(slice_end) - var.isel(slice_start) else: variables[name] = var.isel(slice_new) else: variables[name] = var difference = self._replace_with_new_dims(variables, indexes=indexes) if n > 1: return difference.diff(dim, n - 1) else: return difference def shift( self, shifts: Mapping[Any, int] | None = None, fill_value: Any = xrdtypes.NA, **shifts_kwargs: int, ) -> Self: """Shift this dataset by an offset along one or more dimensions. Only data variables are moved; coordinates stay in place. This is consistent with the behavior of ``shift`` in pandas. Values shifted from beyond array bounds will appear at one end of each dimension, which are filled according to `fill_value`. For periodic offsets instead see `roll`. Parameters ---------- shifts : mapping of hashable to int Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Dataset Dataset with the same coordinates and attributes but shifted data variables. See Also -------- roll Examples -------- >>> ds = xr.Dataset({"foo": ("x", list("abcde"))}) >>> ds.shift(x=2) Size: 40B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: foo (x) object 40B nan nan 'a' 'b' 'c' """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift") invalid = tuple(k for k in shifts if k not in self.dims) if invalid: raise ValueError( f"Dimensions {invalid} not found in data dimensions {tuple(self.dims)}" ) variables = {} for name, var in self.variables.items(): if name in self.data_vars: fill_value_ = ( fill_value.get(name, xrdtypes.NA) if isinstance(fill_value, dict) else fill_value ) var_shifts = {k: v for k, v in shifts.items() if k in var.dims} variables[name] = var.shift(fill_value=fill_value_, shifts=var_shifts) else: variables[name] = var return self._replace(variables) def roll( self, shifts: Mapping[Any, int] | None = None, roll_coords: bool = False, **shifts_kwargs: int, ) -> Self: """Roll this dataset by an offset along one or more dimensions. Unlike shift, roll treats the given dimensions as periodic, so will not create any missing values to be filled. Also unlike shift, roll may rotate all variables, including coordinates if specified. The direction of rotation is consistent with :py:func:`numpy.roll`. Parameters ---------- shifts : mapping of hashable to int, optional A dict with keys matching dimensions and values given by integers to rotate each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. roll_coords : bool, default: False Indicates whether to roll the coordinates by the offset too. **shifts_kwargs : {dim: offset, ...}, optional The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- rolled : Dataset Dataset with the same attributes but rolled data and coordinates. See Also -------- shift Examples -------- >>> ds = xr.Dataset({"foo": ("x", list("abcde"))}, coords={"x": np.arange(5)}) >>> ds.roll(x=2) Size: 60B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 Data variables: foo (x) >> ds.roll(x=2, roll_coords=True) Size: 60B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 3 4 0 1 2 Data variables: foo (x) Self: """ Sort object by labels or values (along an axis). Sorts the dataset, either along specified dimensions, or according to values of 1-D dataarrays that share dimension with calling object. If the input variables are dataarrays, then the dataarrays are aligned (via left-join) to the calling object prior to sorting by cell values. NaNs are sorted to the end, following Numpy convention. If multiple sorts along the same dimension is given, numpy's lexsort is performed along that dimension: https://numpy.org/doc/stable/reference/generated/numpy.lexsort.html and the FIRST key in the sequence is used as the primary sort key, followed by the 2nd key, etc. Parameters ---------- variables : Hashable, DataArray, sequence of Hashable or DataArray, or Callable 1D DataArray objects or name(s) of 1D variable(s) in coords whose values are used to sort this array. If a callable, the callable is passed this object, and the result is used as the value for cond. ascending : bool, default: True Whether to sort by ascending or descending order. Returns ------- sorted : Dataset A new dataset where all the specified dims are sorted by dim labels. See Also -------- DataArray.sortby numpy.sort pandas.sort_values pandas.sort_index Examples -------- >>> ds = xr.Dataset( ... { ... "A": (("x", "y"), [[1, 2], [3, 4]]), ... "B": (("x", "y"), [[5, 6], [7, 8]]), ... }, ... coords={"x": ["b", "a"], "y": [1, 0]}, ... ) >>> ds.sortby("x") Size: 88B Dimensions: (x: 2, y: 2) Coordinates: * x (x) >> ds.sortby(lambda x: -x["y"]) Size: 88B Dimensions: (x: 2, y: 2) Coordinates: * x (x) Self: """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements for each variable in the Dataset. Parameters ---------- q : float or array-like of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or Iterable of Hashable, optional Dimension(s) over which to apply quantile. method : str, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points. The options sorted by their R type as summarized in the H&F paper [1]_ are: 1. "inverted_cdf" 2. "averaged_inverted_cdf" 3. "closest_observation" 4. "interpolated_inverted_cdf" 5. "hazen" 6. "weibull" 7. "linear" (default) 8. "median_unbiased" 9. "normal_unbiased" The first three methods are discontiuous. The following discontinuous variations of the default "linear" (7.) option are also available: * "lower" * "higher" * "midpoint" * "nearest" See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument was previously called "interpolation", renamed in accordance with numpy version 1.22.0. keep_attrs : bool, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. numeric_only : bool, optional If True, only apply ``func`` to variables with a numeric dtype. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- quantiles : Dataset If `q` is a single quantile, then the result is a scalar for each variable in data_vars. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return Dataset. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, numpy.quantile, pandas.Series.quantile, DataArray.quantile Examples -------- >>> ds = xr.Dataset( ... {"a": (("x", "y"), [[0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]])}, ... coords={"x": [7, 9], "y": [1, 1.5, 2, 2.5]}, ... ) >>> ds.quantile(0) # or ds.quantile(0, dim=...) Size: 16B Dimensions: () Coordinates: quantile float64 8B 0.0 Data variables: a float64 8B 0.7 >>> ds.quantile(0, dim="x") Size: 72B Dimensions: (y: 4) Coordinates: * y (y) float64 32B 1.0 1.5 2.0 2.5 quantile float64 8B 0.0 Data variables: a (y) float64 32B 0.7 4.2 2.6 1.5 >>> ds.quantile([0, 0.5, 1]) Size: 48B Dimensions: (quantile: 3) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 Data variables: a (quantile) float64 24B 0.7 3.4 9.4 >>> ds.quantile([0, 0.5, 1], dim="x") Size: 152B Dimensions: (quantile: 3, y: 4) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 * y (y) float64 32B 1.0 1.5 2.0 2.5 Data variables: a (quantile, y) float64 96B 0.7 4.2 2.6 1.5 3.6 ... 6.5 7.3 9.4 1.9 References ---------- .. [1] R. J. Hyndman and Y. Fan, "Sample quantiles in statistical packages," The American Statistician, 50(4), pp. 361-365, 1996 """ # interpolation renamed to method in version 0.21.0 # check here and in variable to avoid repeated warnings if interpolation is not None: warnings.warn( "The `interpolation` argument to quantile was renamed to `method`.", FutureWarning, stacklevel=2, ) if method != "linear": raise TypeError("Cannot pass interpolation and method keywords!") method = interpolation dims: set[Hashable] if isinstance(dim, str): dims = {dim} elif dim is None or dim is ...: dims = set(self.dims) else: dims = set(dim) invalid_dims = set(dims) - set(self.dims) if invalid_dims: raise ValueError( f"Dimensions {tuple(invalid_dims)} not found in data dimensions {tuple(self.dims)}" ) q = np.asarray(q, dtype=np.float64) variables = {} for name, var in self.variables.items(): reduce_dims = [d for d in var.dims if d in dims] if reduce_dims or not var.dims: if name not in self.coords and ( not numeric_only or np.issubdtype(var.dtype, np.number) or var.dtype == np.bool_ ): variables[name] = var.quantile( q, dim=reduce_dims, method=method, keep_attrs=keep_attrs, skipna=skipna, ) else: variables[name] = var # construct the new dataset coord_names = {k for k in self.coords if k in variables} indexes = {k: v for k, v in self._indexes.items() if k in variables} if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) attrs = self.attrs if keep_attrs else None new = self._replace_with_new_dims( variables, coord_names=coord_names, attrs=attrs, indexes=indexes ) return new.assign_coords(quantile=q) def rank( self, dim: Hashable, *, pct: bool = False, keep_attrs: bool | None = None, ) -> Self: """Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If pct is True, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : Hashable Dimension over which to compute rank. pct : bool, default: False If True, compute percentage ranks, otherwise compute integer ranks. keep_attrs : bool or None, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- ranked : Dataset Variables that do not depend on `dim` are dropped. """ if not OPTIONS["use_bottleneck"]: raise RuntimeError( "rank requires bottleneck to be enabled." " Call `xr.set_options(use_bottleneck=True)` to enable it." ) if dim not in self.dims: raise ValueError( f"Dimension {dim!r} not found in data dimensions {tuple(self.dims)}" ) variables = {} for name, var in self.variables.items(): if name in self.data_vars: if dim in var.dims: variables[name] = var.rank(dim, pct=pct) else: variables[name] = var coord_names = set(self.coords) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) attrs = self.attrs if keep_attrs else None return self._replace(variables, coord_names, attrs=attrs) def differentiate( self, coord: Hashable, edge_order: Literal[1, 2] = 1, datetime_unit: DatetimeUnitOptions | None = None, ) -> Self: """Differentiate with the second order accurate central differences. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- coord : Hashable The coordinate to be used to compute the gradient. edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. datetime_unit : None or {"W", "D", "h", "m", "s", "ms", \ "us", "ns", "ps", "fs", "as", None}, default: None Unit to compute gradient. Only valid for datetime coordinate. Returns ------- differentiated: Dataset See also -------- numpy.gradient: corresponding numpy function """ if coord not in self.variables and coord not in self.dims: variables_and_dims = tuple(set(self.variables.keys()).union(self.dims)) raise ValueError( f"Coordinate {coord!r} not found in variables or dimensions {variables_and_dims}." ) coord_var = self[coord].variable if coord_var.ndim != 1: raise ValueError( f"Coordinate {coord} must be 1 dimensional but is {coord_var.ndim}" " dimensional" ) dim = coord_var.dims[0] if _contains_datetime_like_objects(coord_var): if coord_var.dtype.kind in "mM" and datetime_unit is None: datetime_unit = cast( "DatetimeUnitOptions", np.datetime_data(coord_var.dtype)[0] ) elif datetime_unit is None: datetime_unit = "s" # Default to seconds for cftime objects coord_var = coord_var._to_numeric(datetime_unit=datetime_unit) variables = {} for k, v in self.variables.items(): if k in self.data_vars and dim in v.dims and k not in self.coords: if _contains_datetime_like_objects(v): v = v._to_numeric(datetime_unit=datetime_unit) grad = duck_array_ops.gradient( v.data, coord_var.data, edge_order=edge_order, axis=v.get_axis_num(dim), ) variables[k] = Variable(v.dims, grad) else: variables[k] = v return self._replace(variables) def integrate( self, coord: Hashable | Sequence[Hashable], datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Integrate along the given coordinate using the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- coord : hashable, or sequence of hashable Coordinate(s) used for the integration. datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ 'ps', 'fs', 'as', None}, optional Specify the unit if datetime coordinate is used. Returns ------- integrated : Dataset See also -------- DataArray.integrate numpy.trapz : corresponding numpy function Examples -------- >>> ds = xr.Dataset( ... data_vars={"a": ("x", [5, 5, 6, 6]), "b": ("x", [1, 2, 1, 0])}, ... coords={"x": [0, 1, 2, 3], "y": ("x", [1, 7, 3, 5])}, ... ) >>> ds Size: 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 y (x) int64 32B 1 7 3 5 Data variables: a (x) int64 32B 5 5 6 6 b (x) int64 32B 1 2 1 0 >>> ds.integrate("x") Size: 16B Dimensions: () Data variables: a float64 8B 16.5 b float64 8B 3.5 >>> ds.integrate("y") Size: 16B Dimensions: () Data variables: a float64 8B 20.0 b float64 8B 4.0 """ if not isinstance(coord, list | tuple): coord = (coord,) result = self for c in coord: result = result._integrate_one(c, datetime_unit=datetime_unit) return result def _integrate_one(self, coord, datetime_unit=None, cumulative=False): if coord not in self.variables and coord not in self.dims: variables_and_dims = tuple(set(self.variables.keys()).union(self.dims)) raise ValueError( f"Coordinate {coord!r} not found in variables or dimensions {variables_and_dims}." ) coord_var = self[coord].variable if coord_var.ndim != 1: raise ValueError( f"Coordinate {coord} must be 1 dimensional but is {coord_var.ndim}" " dimensional" ) dim = coord_var.dims[0] if _contains_datetime_like_objects(coord_var): if coord_var.dtype.kind in "mM" and datetime_unit is None: datetime_unit, _ = np.datetime_data(coord_var.dtype) elif datetime_unit is None: datetime_unit = "s" # Default to seconds for cftime objects coord_var = coord_var._replace( data=datetime_to_numeric(coord_var.data, datetime_unit=datetime_unit) ) variables = {} coord_names = set() for k, v in self.variables.items(): if k in self.coords: if dim not in v.dims or cumulative: variables[k] = v coord_names.add(k) elif k in self.data_vars and dim in v.dims: coord_data = to_like_array(coord_var.data, like=v.data) if _contains_datetime_like_objects(v): v = datetime_to_numeric(v, datetime_unit=datetime_unit) if cumulative: integ = duck_array_ops.cumulative_trapezoid( v.data, coord_data, axis=v.get_axis_num(dim) ) v_dims = v.dims else: integ = duck_array_ops.trapz( v.data, coord_data, axis=v.get_axis_num(dim) ) v_dims = list(v.dims) v_dims.remove(dim) variables[k] = Variable(v_dims, integ) else: variables[k] = v indexes = {k: v for k, v in self._indexes.items() if k in variables} return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def cumulative_integrate( self, coord: Hashable | Sequence[Hashable], datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Integrate along the given coordinate using the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. The first entry of the cumulative integral of each variable is always 0, in order to keep the length of the dimension unchanged between input and output. Parameters ---------- coord : hashable, or sequence of hashable Coordinate(s) used for the integration. datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ 'ps', 'fs', 'as', None}, optional Specify the unit if datetime coordinate is used. Returns ------- integrated : Dataset See also -------- DataArray.cumulative_integrate scipy.integrate.cumulative_trapezoid : corresponding scipy function Examples -------- >>> ds = xr.Dataset( ... data_vars={"a": ("x", [5, 5, 6, 6]), "b": ("x", [1, 2, 1, 0])}, ... coords={"x": [0, 1, 2, 3], "y": ("x", [1, 7, 3, 5])}, ... ) >>> ds Size: 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 y (x) int64 32B 1 7 3 5 Data variables: a (x) int64 32B 5 5 6 6 b (x) int64 32B 1 2 1 0 >>> ds.cumulative_integrate("x") Size: 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 y (x) int64 32B 1 7 3 5 Data variables: a (x) float64 32B 0.0 5.0 10.5 16.5 b (x) float64 32B 0.0 1.5 3.0 3.5 >>> ds.cumulative_integrate("y") Size: 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 y (x) int64 32B 1 7 3 5 Data variables: a (x) float64 32B 0.0 30.0 8.0 20.0 b (x) float64 32B 0.0 9.0 3.0 4.0 """ if not isinstance(coord, list | tuple): coord = (coord,) result = self for c in coord: result = result._integrate_one( c, datetime_unit=datetime_unit, cumulative=True ) return result @property def real(self) -> Self: """ The real part of each data variable. See Also -------- numpy.ndarray.real """ return self.map(lambda x: x.real, keep_attrs=True) @property def imag(self) -> Self: """ The imaginary part of each data variable. See Also -------- numpy.ndarray.imag """ return self.map(lambda x: x.imag, keep_attrs=True) plot = utils.UncachedAccessor(DatasetPlotAccessor) def filter_by_attrs(self, **kwargs) -> Self: """Returns a ``Dataset`` with variables that match specific conditions. Can pass in ``key=value`` or ``key=callable``. A Dataset is returned containing only the variables for which all the filter tests pass. These tests are either ``key=value`` for which the attribute ``key`` has the exact value ``value`` or the callable passed into ``key=callable`` returns True. The callable will be passed a single value, either the value of the attribute ``key`` or ``None`` if the DataArray does not have an attribute with the name ``key``. Parameters ---------- **kwargs key : str Attribute name. value : callable or obj If value is a callable, it should return a boolean in the form of bool = func(attr) where attr is da.attrs[key]. Otherwise, value will be compared to the each DataArray's attrs[key]. Returns ------- new : Dataset New dataset with variables filtered by attribute. Examples -------- >>> temp = 15 + 8 * np.random.randn(2, 2, 3) >>> precip = 10 * np.random.rand(2, 2, 3) >>> lon = [[-99.83, -99.32], [-99.79, -99.23]] >>> lat = [[42.25, 42.21], [42.63, 42.59]] >>> dims = ["x", "y", "time"] >>> temp_attr = dict(standard_name="air_potential_temperature") >>> precip_attr = dict(standard_name="convective_precipitation_flux") >>> ds = xr.Dataset( ... dict( ... temperature=(dims, temp, temp_attr), ... precipitation=(dims, precip, precip_attr), ... ), ... coords=dict( ... lon=(["x", "y"], lon), ... lat=(["x", "y"], lat), ... time=pd.date_range("2014-09-06", periods=3), ... reference_time=pd.Timestamp("2014-09-05"), ... ), ... ) Get variables matching a specific standard_name: >>> ds.filter_by_attrs(standard_name="convective_precipitation_flux") Size: 192B Dimensions: (x: 2, y: 2, time: 3) Coordinates: * time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08 lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Data variables: precipitation (x, y, time) float64 96B 5.68 9.256 0.7104 ... 4.615 7.805 Get all variables that have a standard_name attribute: >>> standard_name = lambda v: v is not None >>> ds.filter_by_attrs(standard_name=standard_name) Size: 288B Dimensions: (x: 2, y: 2, time: 3) Coordinates: * time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08 lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Data variables: temperature (x, y, time) float64 96B 29.11 18.2 22.83 ... 16.15 26.63 precipitation (x, y, time) float64 96B 5.68 9.256 0.7104 ... 4.615 7.805 """ selection = [] for var_name, variable in self.variables.items(): has_value_flag = False for attr_name, pattern in kwargs.items(): attr_value = variable.attrs.get(attr_name) if (callable(pattern) and pattern(attr_value)) or attr_value == pattern: has_value_flag = True else: has_value_flag = False break if has_value_flag is True: selection.append(var_name) return self[selection] def unify_chunks(self) -> Self: """Unify chunk size along all chunked dimensions of this Dataset. Returns ------- Dataset with consistent chunk sizes for all dask-array variables See Also -------- dask.array.core.unify_chunks """ return unify_chunks(self)[0] def map_blocks( self, func: Callable[..., T_Xarray], args: Sequence[Any] = (), kwargs: Mapping[str, Any] | None = None, template: DataArray | Dataset | None = None, ) -> T_Xarray: """ Apply a function to each block of this Dataset. .. warning:: This method is experimental and its signature may change. Parameters ---------- func : callable User-provided function that accepts a Dataset as its first parameter. The function will receive a subset or 'block' of this Dataset (see below), corresponding to one chunk along each chunked dimension. ``func`` will be executed as ``func(subset_dataset, *subset_args, **kwargs)``. This function must return either a single DataArray or a single Dataset. This function cannot add a new chunked dimension. args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with obj, otherwise an error is raised. kwargs : Mapping or None Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. template : DataArray, Dataset or None, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like this object but has sizes 0, to determine properties of the returned object such as dtype, variable names, attributes, new dimensions and new indexes (if any). ``template`` must be provided if the function changes the size of existing dimensions. When provided, ``attrs`` on variables in `template` are copied over to the result. Any ``attrs`` set by ``func`` will be ignored. Returns ------- A single DataArray or Dataset with dask backend, reassembled from the outputs of the function. Notes ----- This function is designed for when ``func`` needs to manipulate a whole xarray object subset to each block. Each block is loaded into memory. In the more common case where ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``. If none of the variables in this object is backed by dask arrays, calling this function is equivalent to calling ``func(obj, *args, **kwargs)``. See Also -------- :func:`dask.array.map_blocks ` :func:`xarray.apply_ufunc ` :func:`xarray.DataArray.map_blocks ` :doc:`xarray-tutorial:advanced/map_blocks/map_blocks` Advanced Tutorial on map_blocks with dask Examples -------- Calculate an anomaly from climatology using ``.groupby()``. Using ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``, its indices, and its methods like ``.groupby()``. >>> def calculate_anomaly(da, groupby_type="time.month"): ... gb = da.groupby(groupby_type) ... clim = gb.mean(dim="time") ... return gb - clim ... >>> time = xr.date_range("1990-01", "1992-01", freq="ME", use_cftime=True) >>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"]) >>> np.random.seed(123) >>> array = xr.DataArray( ... np.random.rand(len(time)), ... dims=["time"], ... coords={"time": time, "month": month}, ... ).chunk() >>> ds = xr.Dataset({"a": array}) >>> ds.map_blocks(calculate_anomaly, template=ds).compute() Size: 576B Dimensions: (time: 24) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 Data variables: a (time) float64 192B 0.1289 0.1132 -0.0856 ... 0.1906 -0.05901 Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments to the function being applied in ``xr.map_blocks()``: >>> ds.map_blocks( ... calculate_anomaly, ... kwargs={"groupby_type": "time.year"}, ... template=ds, ... ) Size: 576B Dimensions: (time: 24) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B dask.array Data variables: a (time) float64 192B dask.array """ from xarray.core.parallel import map_blocks return map_blocks(func, self, args, kwargs, template) def polyfit( self, dim: Hashable, deg: int, skipna: bool | None = None, rcond: float | None = None, w: Hashable | Any = None, full: bool = False, cov: bool | Literal["unscaled"] = False, ) -> Self: """ Least squares polynomial fit. This replicates the behaviour of `numpy.polyfit` but differs by skipping invalid values when `skipna = True`. Parameters ---------- dim : hashable Coordinate along which to fit the polynomials. deg : int Degree of the fitting polynomial. skipna : bool or None, optional If True, removes all invalid values before fitting each 1D slices of the array. Default is True if data is stored in a dask.array or if there is any invalid values, False otherwise. rcond : float or None, optional Relative condition number to the fit. w : hashable or Any, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, default: False Whether to return the residuals, matrix rank and singular values in addition to the coefficients. cov : bool or "unscaled", default: False Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. Returns ------- polyfit_results : Dataset A single dataset which contains (for each "var" in the input dataset): [var]_polyfit_coefficients The coefficients of the best fit for each variable in this dataset. [var]_polyfit_residuals The residuals of the least-square computation for each variable (only included if `full=True`) When the matrix rank is deficient, np.nan is returned. [dim]_matrix_rank The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`) The rank is computed ignoring the NaN values that might be skipped. [dim]_singular_values The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`) [var]_polyfit_covariance The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`) Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is not raised with in-memory (not dask) data and `full=True`. See Also -------- numpy.polyfit numpy.polyval xarray.polyval """ from xarray.computation.fit import polyfit as polyfit_impl return polyfit_impl(self, dim, deg, skipna, rcond, w, full, cov) def pad( self, pad_width: Mapping[Any, int | tuple[int, int]] | None = None, mode: PadModeOptions = "constant", stat_length: ( int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None ) = None, constant_values: T_DatasetPadConstantValues | None = None, end_values: int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None = None, reflect_type: PadReflectOptions = None, keep_attrs: bool | None = None, **pad_width_kwargs: Any, ) -> Self: """Pad this dataset along one or more dimensions. .. warning:: This function is experimental and its behaviour is likely to change especially regarding padding of dimension coordinates (or IndexVariables). When using one of the modes ("edge", "reflect", "symmetric", "wrap"), coordinates will be padded with the same mode, otherwise coordinates are padded using the "constant" mode with fill_value dtypes.NA. Parameters ---------- pad_width : mapping of hashable to tuple of int Mapping with the form of {dim: (pad_before, pad_after)} describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad mode : {"constant", "edge", "linear_ramp", "maximum", "mean", "median", \ "minimum", "reflect", "symmetric", "wrap"}, default: "constant" How to pad the DataArray (taken from numpy docs): - "constant": Pads with a constant value. - "edge": Pads with the edge values of array. - "linear_ramp": Pads with the linear ramp between end_value and the array edge value. - "maximum": Pads with the maximum value of all or part of the vector along each axis. - "mean": Pads with the mean value of all or part of the vector along each axis. - "median": Pads with the median value of all or part of the vector along each axis. - "minimum": Pads with the minimum value of all or part of the vector along each axis. - "reflect": Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - "symmetric": Pads with the reflection of the vector mirrored along the edge of the array. - "wrap": Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. stat_length : int, tuple or mapping of hashable to tuple, default: None Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique statistic lengths along each dimension. ((before, after),) yields same before and after statistic lengths for each dimension. (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. constant_values : scalar, tuple, mapping of dim name to scalar or tuple, or \ mapping of var name to scalar, tuple or to mapping of dim name to scalar or tuple, default: None Used in 'constant'. The values to set the padded values for each data variable / axis. ``{var_1: {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}, ... var_M: (before, after)}`` unique pad constants per data variable. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique pad constants along each dimension. ``((before, after),)`` yields same before and after constants for each dimension. ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all dimensions. Default is ``None``, pads with ``np.nan``. end_values : scalar, tuple or mapping of hashable to tuple, default: None Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique end values along each dimension. ``((before, after),)`` yields same before and after end values for each axis. ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is None. reflect_type : {"even", "odd", None}, optional Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. **pad_width_kwargs The keyword arguments form of ``pad_width``. One of ``pad_width`` or ``pad_width_kwargs`` must be provided. Returns ------- padded : Dataset Dataset with the padded coordinates and data. See Also -------- Dataset.shift, Dataset.roll, Dataset.bfill, Dataset.ffill, numpy.pad, dask.array.pad Notes ----- By default when ``mode="constant"`` and ``constant_values=None``, integer types will be promoted to ``float`` and padded with ``np.nan``. To avoid type promotion specify ``constant_values=np.nan`` Padding coordinates will drop their corresponding index (if any) and will reset default indexes for dimension coordinates. Examples -------- >>> ds = xr.Dataset({"foo": ("x", range(5))}) >>> ds.pad(x=(1, 2)) Size: 64B Dimensions: (x: 8) Dimensions without coordinates: x Data variables: foo (x) float64 64B nan 0.0 1.0 2.0 3.0 4.0 nan nan """ pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad") if mode in ("edge", "reflect", "symmetric", "wrap"): coord_pad_mode = mode coord_pad_options = { "stat_length": stat_length, "constant_values": constant_values, "end_values": end_values, "reflect_type": reflect_type, } else: coord_pad_mode = "constant" coord_pad_options = {} if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) variables = {} # keep indexes that won't be affected by pad and drop all other indexes xindexes = self.xindexes pad_dims = set(pad_width) indexes = { k: idx for k, idx in xindexes.items() if not pad_dims.intersection(xindexes.get_all_dims(k)) } for name, var in self.variables.items(): var_pad_width = {k: v for k, v in pad_width.items() if k in var.dims} if not var_pad_width: variables[name] = var elif name in self.data_vars: if utils.is_dict_like(constant_values): if name in constant_values.keys(): filtered_constant_values = constant_values[name] elif not set(var.dims).isdisjoint(constant_values.keys()): filtered_constant_values = { k: v for k, v in constant_values.items() if k in var.dims } else: filtered_constant_values = 0 # TODO: https://github.com/pydata/xarray/pull/9353#discussion_r1724018352 else: filtered_constant_values = constant_values variables[name] = var.pad( pad_width=var_pad_width, mode=mode, stat_length=stat_length, constant_values=filtered_constant_values, end_values=end_values, reflect_type=reflect_type, keep_attrs=keep_attrs, ) else: variables[name] = var.pad( pad_width=var_pad_width, mode=coord_pad_mode, keep_attrs=keep_attrs, **coord_pad_options, # type: ignore[arg-type] ) # reset default index of dimension coordinates if (name,) == var.dims: dim_var = {name: variables[name]} index = PandasIndex.from_variables(dim_var, options={}) index_vars = index.create_variables(dim_var) indexes[name] = index variables[name] = index_vars[name] attrs = self._attrs if keep_attrs else None return self._replace_with_new_dims(variables, indexes=indexes, attrs=attrs) def idxmin( self, dim: Hashable | None = None, *, skipna: bool | None = None, fill_value: Any = xrdtypes.NA, keep_attrs: bool | None = None, ) -> Self: """Return the coordinate label of the minimum value along a dimension. Returns a new `Dataset` named after the dimension with the values of the coordinate labels along that dimension corresponding to minimum values along that dimension. In comparison to :py:meth:`~Dataset.argmin`, this returns the coordinate label while :py:meth:`~Dataset.argmin` returns the index. Parameters ---------- dim : Hashable, optional Dimension over which to apply `idxmin`. This is optional for 1D variables, but required for variables with 2 or more dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- reduced : Dataset New `Dataset` object with `idxmin` applied to its data and the indicated dimension removed. See Also -------- DataArray.idxmin, Dataset.idxmax, Dataset.min, Dataset.argmin Examples -------- >>> array1 = xr.DataArray( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array2 = xr.DataArray( ... [ ... [2.0, 1.0, 2.0, 0.0, -2.0], ... [-4.0, np.nan, 2.0, np.nan, -2.0], ... [np.nan, np.nan, 1.0, np.nan, np.nan], ... ], ... dims=["y", "x"], ... coords={"y": [-1, 0, 1], "x": ["a", "b", "c", "d", "e"]}, ... ) >>> ds = xr.Dataset({"int": array1, "float": array2}) >>> ds.min(dim="x") Size: 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int int64 8B -2 float (y) float64 24B -2.0 -4.0 1.0 >>> ds.argmin(dim="x") Size: 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int int64 8B 4 float (y) int64 24B 4 0 2 >>> ds.idxmin(dim="x") Size: 52B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int Self: """Return the coordinate label of the maximum value along a dimension. Returns a new `Dataset` named after the dimension with the values of the coordinate labels along that dimension corresponding to maximum values along that dimension. In comparison to :py:meth:`~Dataset.argmax`, this returns the coordinate label while :py:meth:`~Dataset.argmax` returns the index. Parameters ---------- dim : str, optional Dimension over which to apply `idxmax`. This is optional for 1D variables, but required for variables with 2 or more dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- reduced : Dataset New `Dataset` object with `idxmax` applied to its data and the indicated dimension removed. See Also -------- DataArray.idxmax, Dataset.idxmin, Dataset.max, Dataset.argmax Examples -------- >>> array1 = xr.DataArray( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array2 = xr.DataArray( ... [ ... [2.0, 1.0, 2.0, 0.0, -2.0], ... [-4.0, np.nan, 2.0, np.nan, -2.0], ... [np.nan, np.nan, 1.0, np.nan, np.nan], ... ], ... dims=["y", "x"], ... coords={"y": [-1, 0, 1], "x": ["a", "b", "c", "d", "e"]}, ... ) >>> ds = xr.Dataset({"int": array1, "float": array2}) >>> ds.max(dim="x") Size: 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int int64 8B 2 float (y) float64 24B 2.0 2.0 1.0 >>> ds.argmax(dim="x") Size: 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int int64 8B 1 float (y) int64 24B 0 2 2 >>> ds.idxmax(dim="x") Size: 52B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int Self: """Indices of the minima of the member variables. If there are multiple minima, the indices of the first one found will be returned. Parameters ---------- dim : Hashable, optional The dimension over which to find the minimum. By default, finds minimum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will be an error, since DataArray.argmin will return a dict with indices for all dimensions, which does not make sense for a Dataset. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Dataset Examples -------- >>> dataset = xr.Dataset( ... { ... "math_scores": ( ... ["student", "test"], ... [[90, 85, 79], [78, 80, 85], [95, 92, 98]], ... ), ... "english_scores": ( ... ["student", "test"], ... [[88, 90, 92], [75, 82, 79], [39, 96, 78]], ... ), ... }, ... coords={ ... "student": ["Alice", "Bob", "Charlie"], ... "test": ["Test 1", "Test 2", "Test 3"], ... }, ... ) # Indices of the minimum values along the 'student' dimension are calculated >>> argmin_indices = dataset.argmin(dim="student") >>> min_score_in_math = dataset["student"].isel( ... student=argmin_indices["math_scores"] ... ) >>> min_score_in_math Size: 84B array(['Bob', 'Bob', 'Alice'], dtype='>> min_score_in_english = dataset["student"].isel( ... student=argmin_indices["english_scores"] ... ) >>> min_score_in_english Size: 84B array(['Charlie', 'Bob', 'Charlie'], dtype=' Self: """Indices of the maxima of the member variables. If there are multiple maxima, the indices of the first one found will be returned. Parameters ---------- dim : str, optional The dimension over which to find the maximum. By default, finds maximum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will be an error, since DataArray.argmax will return a dict with indices for all dimensions, which does not make sense for a Dataset. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Dataset Examples -------- >>> dataset = xr.Dataset( ... { ... "math_scores": ( ... ["student", "test"], ... [[90, 85, 92], [78, 80, 85], [95, 92, 98]], ... ), ... "english_scores": ( ... ["student", "test"], ... [[88, 90, 92], [75, 82, 79], [93, 96, 91]], ... ), ... }, ... coords={ ... "student": ["Alice", "Bob", "Charlie"], ... "test": ["Test 1", "Test 2", "Test 3"], ... }, ... ) # Indices of the maximum values along the 'student' dimension are calculated >>> argmax_indices = dataset.argmax(dim="test") >>> argmax_indices Size: 132B Dimensions: (student: 3) Coordinates: * student (student) Self | T_DataArray: """ Calculate an expression supplied as a string in the context of the dataset. This is currently experimental; the API may change particularly around assignments, which currently return a ``Dataset`` with the additional variable. Currently only the ``python`` engine is supported, which has the same performance as executing in python. Parameters ---------- statement : str String containing the Python-like expression to evaluate. Returns ------- result : Dataset or DataArray, depending on whether ``statement`` contains an assignment. Examples -------- >>> ds = xr.Dataset( ... {"a": ("x", np.arange(0, 5, 1)), "b": ("x", np.linspace(0, 1, 5))} ... ) >>> ds Size: 80B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: a (x) int64 40B 0 1 2 3 4 b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 >>> ds.eval("a + b") Size: 40B array([0. , 1.25, 2.5 , 3.75, 5. ]) Dimensions without coordinates: x >>> ds.eval("c = a + b") Size: 120B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: a (x) int64 40B 0 1 2 3 4 b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 c (x) float64 40B 0.0 1.25 2.5 3.75 5.0 """ return pd.eval( # type: ignore[return-value] statement, resolvers=[self], target=self, parser=parser, # Because numexpr returns a numpy array, using that engine results in # different behavior. We'd be very open to a contribution handling this. engine="python", ) def query( self, queries: Mapping[Any, Any] | None = None, parser: QueryParserOptions = "pandas", engine: QueryEngineOptions = None, missing_dims: ErrorOptionsWithWarn = "raise", **queries_kwargs: Any, ) -> Self: """Return a new dataset with each array indexed along the specified dimension(s), where the indexers are given as strings containing Python expressions to be evaluated against the data variables in the dataset. Parameters ---------- queries : dict-like, optional A dict-like with keys matching dimensions and values given by strings containing Python expressions to be evaluated against the data variables in the dataset. The expressions will be evaluated using the pandas eval() function, and can contain any valid Python expressions but cannot contain any Python statements. parser : {"pandas", "python"}, default: "pandas" The parser to use to construct the syntax tree from the expression. The default of 'pandas' parses code slightly different than standard Python. Alternatively, you can parse an expression using the 'python' parser to retain strict Python semantics. engine : {"python", "numexpr", None}, default: None The engine used to evaluate the expression. Supported engines are: - None: tries to use numexpr, falls back to python - "numexpr": evaluates expressions using numexpr - "python": performs operations as if you had evalโ€™d in top level python missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **queries_kwargs : {dim: query, ...}, optional The keyword arguments form of ``queries``. One of queries or queries_kwargs must be provided. Returns ------- obj : Dataset A new Dataset with the same contents as this dataset, except each array and dimension is indexed by the results of the appropriate queries. See Also -------- Dataset.isel pandas.eval Examples -------- >>> a = np.arange(0, 5, 1) >>> b = np.linspace(0, 1, 5) >>> ds = xr.Dataset({"a": ("x", a), "b": ("x", b)}) >>> ds Size: 80B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: a (x) int64 40B 0 1 2 3 4 b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 >>> ds.query(x="a > 2") Size: 32B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: a (x) int64 16B 3 4 b (x) float64 16B 0.75 1.0 """ # allow queries to be given either as a dict or as kwargs queries = either_dict_or_kwargs(queries, queries_kwargs, "query") # check queries for dim, expr in queries.items(): if not isinstance(expr, str): msg = f"expr for dim {dim} must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) # evaluate the queries to create the indexers indexers = { dim: pd.eval(expr, resolvers=[self], parser=parser, engine=engine) for dim, expr in queries.items() } # apply the selection return self.isel(indexers, missing_dims=missing_dims) def curvefit( self, coords: str | DataArray | Iterable[str | DataArray], func: Callable[..., Any], reduce_dims: Dims = None, skipna: bool = True, p0: Mapping[str, float | DataArray] | None = None, bounds: Mapping[str, tuple[float | DataArray, float | DataArray]] | None = None, param_names: Sequence[str] | None = None, errors: ErrorOptions = "raise", kwargs: dict[str, Any] | None = None, ) -> Self: """ Curve fitting optimization for arbitrary functions. Wraps :py:func:`scipy.optimize.curve_fit` with :py:func:`~xarray.apply_ufunc`. Parameters ---------- coords : hashable, DataArray, or sequence of hashable or DataArray Independent coordinate(s) over which to perform the curve fitting. Must share at least one dimension with the calling object. When fitting multi-dimensional functions, supply `coords` as a sequence in the same order as arguments in `func`. To fit along existing dimensions of the calling object, `coords` can also be specified as a str or sequence of strs. func : callable User specified function in the form `f(x, *params)` which returns a numpy array of length `len(x)`. `params` are the fittable parameters which are optimized by scipy curve_fit. `x` can also be specified as a sequence containing multiple coordinates, e.g. `f((x0, x1), *params)`. reduce_dims : str, Iterable of Hashable or None, optional Additional dimension(s) over which to aggregate while fitting. For example, calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will aggregate all lat and lon points and fit the specified function along the time dimension. skipna : bool, default: True Whether to skip missing values when fitting. Default is True. p0 : dict-like, optional Optional dictionary of parameter names to initial guesses passed to the `curve_fit` `p0` arg. If the values are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be assigned initial values following the default scipy behavior. bounds : dict-like, optional Optional dictionary of parameter names to tuples of bounding values passed to the `curve_fit` `bounds` arg. If any of the bounds are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be unbounded following the default scipy behavior. param_names : sequence of hashable, optional Sequence of names for the fittable parameters of `func`. If not supplied, this will be automatically determined by arguments of `func`. `param_names` should be manually supplied when fitting a function that takes a variable number of parameters. errors : {"raise", "ignore"}, default: "raise" If 'raise', any errors from the `scipy.optimize_curve_fit` optimization will raise an exception. If 'ignore', the coefficients and covariances for the coordinates where the fitting failed will be NaN. **kwargs : optional Additional keyword arguments to passed to scipy curve_fit. Returns ------- curvefit_results : Dataset A single dataset which contains: [var]_curvefit_coefficients The coefficients of the best fit. [var]_curvefit_covariance The covariance matrix of the coefficient estimates. See Also -------- Dataset.polyfit scipy.optimize.curve_fit xarray.Dataset.xlm.modelfit External method from `xarray-lmfit `_ with more curve fitting functionality. """ from xarray.computation.fit import curvefit as curvefit_impl return curvefit_impl( self, coords, func, reduce_dims, skipna, p0, bounds, param_names, errors, kwargs, ) def drop_duplicates( self, dim: Hashable | Iterable[Hashable], *, keep: Literal["first", "last", False] = "first", ) -> Self: """Returns a new Dataset with duplicate dimension values removed. Parameters ---------- dim : dimension label or labels Pass `...` to drop duplicates along all dimensions. keep : {"first", "last", False}, default: "first" Determines which duplicates (if any) to keep. - ``"first"`` : Drop duplicates except for the first occurrence. - ``"last"`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. Returns ------- Dataset See Also -------- DataArray.drop_duplicates """ if isinstance(dim, str): dims: Iterable = (dim,) elif dim is ...: dims = self.dims elif not isinstance(dim, Iterable): dims = [dim] else: dims = dim missing_dims = set(dims) - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {tuple(missing_dims)} not found in data dimensions {tuple(self.dims)}" ) indexes = {dim: ~self.get_index(dim).duplicated(keep=keep) for dim in dims} return self.isel(indexes) def convert_calendar( self, calendar: CFCalendar, dim: Hashable = "time", align_on: Literal["date", "year"] | None = None, missing: Any | None = None, use_cftime: bool | None = None, ) -> Self: """Convert the Dataset to another calendar. Only converts the individual timestamps, does not modify any data except in dropping invalid/surplus dates or inserting missing dates. If the source and target calendars are either no_leap, all_leap or a standard type, only the type of the time array is modified. When converting to a leap year from a non-leap year, the 29th of February is removed from the array. In the other direction the 29th of February will be missing in the output, unless `missing` is specified, in which case that value is inserted. For conversions involving `360_day` calendars, see Notes. This method is safe to use with sub-daily data as it doesn't touch the time part of the timestamps. Parameters --------- calendar : str The target calendar name. dim : Hashable, default: "time" Name of the time coordinate. align_on : {None, 'date', 'year'}, optional Must be specified when either source or target is a `360_day` calendar, ignored otherwise. See Notes. missing : Any or None, optional By default, i.e. if the value is None, this method will simply attempt to convert the dates in the source calendar to the same dates in the target calendar, and drop any of those that are not possible to represent. If a value is provided, a new time coordinate will be created in the target calendar with the same frequency as the original time coordinate; for any dates that are not present in the source, the data will be filled with this value. Note that using this mode requires that the source data have an inferable frequency; for more information see :py:func:`xarray.infer_freq`. For certain frequency, source, and target calendar combinations, this could result in many missing values, see notes. use_cftime : bool or None, optional Whether to use cftime objects in the output, only used if `calendar` is one of {"proleptic_gregorian", "gregorian" or "standard"}. If True, the new time axis uses cftime objects. If None (default), it uses :py:class:`numpy.datetime64` values if the date range permits it, and :py:class:`cftime.datetime` objects if not. If False, it uses :py:class:`numpy.datetime64` or fails. Returns ------- Dataset Copy of the dataarray with the time coordinate converted to the target calendar. If 'missing' was None (default), invalid dates in the new calendar are dropped, but missing dates are not inserted. If `missing` was given, the new data is reindexed to have a time axis with the same frequency as the source, but in the new calendar; any missing datapoints are filled with `missing`. Notes ----- Passing a value to `missing` is only usable if the source's time coordinate as an inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate if the target coordinate, generated from this frequency, has dates equivalent to the source. It is usually **not** appropriate to use this mode with: - Period-end frequencies : 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS' - Sub-monthly frequencies that do not divide a day evenly : 'W', 'nD' where `N != 1` or 'mH' where 24 % m != 0). If one of the source or target calendars is `"360_day"`, `align_on` must be specified and two options are offered. - "year" The dates are translated according to their relative position in the year, ignoring their original month and day information, meaning that the missing/surplus days are added/removed at regular intervals. From a `360_day` to a standard calendar, the output will be missing the following dates (day of year in parentheses): To a leap year: January 31st (31), March 31st (91), June 1st (153), July 31st (213), September 31st (275) and November 30th (335). To a non-leap year: February 6th (36), April 19th (109), July 2nd (183), September 12th (255), November 25th (329). From a standard calendar to a `"360_day"`, the following dates in the source array will be dropped: From a leap year: January 31st (31), April 1st (92), June 1st (153), August 1st (214), September 31st (275), December 1st (336) From a non-leap year: February 6th (37), April 20th (110), July 2nd (183), September 13th (256), November 25th (329) This option is best used on daily and subdaily data. - "date" The month/day information is conserved and invalid dates are dropped from the output. This means that when converting from a `"360_day"` to a standard calendar, all 31st (Jan, March, May, July, August, October and December) will be missing as there is no equivalent dates in the `"360_day"` calendar and the 29th (on non-leap years) and 30th of February will be dropped as there are no equivalent dates in a standard calendar. This option is best used with data on a frequency coarser than daily. """ return convert_calendar( self, calendar, dim=dim, align_on=align_on, missing=missing, use_cftime=use_cftime, ) def interp_calendar( self, target: pd.DatetimeIndex | CFTimeIndex | DataArray, dim: Hashable = "time", ) -> Self: """Interpolates the Dataset to another calendar based on decimal year measure. Each timestamp in `source` and `target` are first converted to their decimal year equivalent then `source` is interpolated on the target coordinate. The decimal year of a timestamp is its year plus its sub-year component converted to the fraction of its year. For example "2000-03-01 12:00" is 2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar. This method should only be used when the time (HH:MM:SS) information of time coordinate is not important. Parameters ---------- target: DataArray or DatetimeIndex or CFTimeIndex The target time coordinate of a valid dtype (np.datetime64 or cftime objects) dim : Hashable, default: "time" The time coordinate name. Return ------ DataArray The source interpolated on the decimal years of target, """ return interp_calendar(self, target, dim=dim) @_deprecate_positional_args("v2024.07.0") def groupby( self, group: GroupInput = None, *, squeeze: Literal[False] = False, restore_coord_dims: bool = False, eagerly_compute_group: Literal[False] | None = None, **groupers: Grouper, ) -> DatasetGroupBy: """Returns a DatasetGroupBy object for performing grouped operations. Parameters ---------- group : str or DataArray or IndexVariable or sequence of hashable or mapping of hashable to Grouper Array whose unique values should be used to group this array. If a Hashable, must be the name of a coordinate contained in this dataarray. If a dictionary, must map an existing variable name to a :py:class:`Grouper` instance. squeeze : False This argument is deprecated. restore_coord_dims : bool, default: False If True, also restore the dimension order of multi-dimensional coordinates. eagerly_compute_group: False, optional This argument is deprecated. **groupers : Mapping of str to Grouper or Resampler Mapping of variable name to group by to :py:class:`Grouper` or :py:class:`Resampler` object. One of ``group`` or ``groupers`` must be provided. Only a single ``grouper`` is allowed at present. Returns ------- grouped : DatasetGroupBy A `DatasetGroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. Examples -------- >>> ds = xr.Dataset( ... {"foo": (("x", "y"), np.arange(12).reshape((4, 3)))}, ... coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))}, ... ) Grouping by a single variable is easy >>> ds.groupby("letters") Execute a reduction >>> ds.groupby("letters").sum() Size: 64B Dimensions: (letters: 2, y: 3) Coordinates: * letters (letters) object 16B 'a' 'b' Dimensions without coordinates: y Data variables: foo (letters, y) int64 48B 9 11 13 9 11 13 Grouping by multiple variables >>> ds.groupby(["letters", "x"]) Use Grouper objects to express more complicated GroupBy operations >>> from xarray.groupers import BinGrouper, UniqueGrouper >>> >>> ds.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum() Size: 144B Dimensions: (y: 3, x_bins: 2, letters: 2) Coordinates: * x_bins (x_bins) interval[int64, right] 32B (5, 15] (15, 25] * letters (letters) object 16B 'a' 'b' Dimensions without coordinates: y Data variables: foo (y, x_bins, letters) float64 96B 0.0 nan nan 3.0 ... nan nan 5.0 See Also -------- :ref:`groupby` Users guide explanation of how to group and bin data. :doc:`xarray-tutorial:intermediate/computation/01-high-level-computation-patterns` Tutorial on :py:func:`~xarray.Dataset.Groupby` for windowed computation. :doc:`xarray-tutorial:fundamentals/03.2_groupby_with_xarray` Tutorial on :py:func:`~xarray.Dataset.Groupby` demonstrating reductions, transformation and comparison with :py:func:`~xarray.Dataset.resample`. :external:py:meth:`pandas.DataFrame.groupby ` :func:`Dataset.groupby_bins ` :func:`DataArray.groupby ` :class:`core.groupby.DatasetGroupBy` :func:`Dataset.coarsen ` :func:`Dataset.resample ` :func:`DataArray.resample ` """ from xarray.core.groupby import ( DatasetGroupBy, _parse_group_and_groupers, _validate_groupby_squeeze, ) _validate_groupby_squeeze(squeeze) rgroupers = _parse_group_and_groupers( self, group, groupers, eagerly_compute_group=eagerly_compute_group ) return DatasetGroupBy(self, rgroupers, restore_coord_dims=restore_coord_dims) @_deprecate_positional_args("v2024.07.0") def groupby_bins( self, group: Hashable | DataArray | IndexVariable, bins: Bins, right: bool = True, labels: ArrayLike | None = None, precision: int = 3, include_lowest: bool = False, squeeze: Literal[False] = False, restore_coord_dims: bool = False, duplicates: Literal["raise", "drop"] = "raise", eagerly_compute_group: Literal[False] | None = None, ) -> DatasetGroupBy: """Returns a DatasetGroupBy object for performing grouped operations. Rather than using all unique values of `group`, the values are discretized first by applying `pandas.cut` [1]_ to `group`. Parameters ---------- group : Hashable, DataArray or IndexVariable Array whose binned values should be used to group this array. If a string, must be the name of a variable contained in this dataset. bins : int or array-like If bins is an int, it defines the number of equal-width bins in the range of x. However, in this case, the range of x is extended by .1% on each side to include the min or max values of x. If bins is a sequence it defines the bin edges allowing for non-uniform bin width. No extension of the range of x is done in this case. right : bool, default: True Indicates whether the bins include the rightmost edge or not. If right == True (the default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4]. labels : array-like or bool, default: None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, string bin labels are assigned by `pandas.cut`. precision : int, default: 3 The precision at which to store and display the bins labels. include_lowest : bool, default: False Whether the first interval should be left-inclusive or not. squeeze : False This argument is deprecated. restore_coord_dims : bool, default: False If True, also restore the dimension order of multi-dimensional coordinates. duplicates : {"raise", "drop"}, default: "raise" If bin edges are not unique, raise ValueError or drop non-uniques. eagerly_compute_group: False, optional This argument is deprecated. Returns ------- grouped : DatasetGroupBy A `DatasetGroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. The name of the group has the added suffix `_bins` in order to distinguish it from the original variable. See Also -------- :ref:`groupby` Users guide explanation of how to group and bin data. Dataset.groupby DataArray.groupby_bins core.groupby.DatasetGroupBy pandas.DataFrame.groupby References ---------- .. [1] https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html """ from xarray.core.groupby import ( DatasetGroupBy, ResolvedGrouper, _validate_groupby_squeeze, ) from xarray.groupers import BinGrouper _validate_groupby_squeeze(squeeze) grouper = BinGrouper( bins=bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, ) rgrouper = ResolvedGrouper( grouper, group, self, eagerly_compute_group=eagerly_compute_group ) return DatasetGroupBy( self, (rgrouper,), restore_coord_dims=restore_coord_dims, ) def weighted(self, weights: DataArray) -> DatasetWeighted: """ Weighted Dataset operations. Parameters ---------- weights : DataArray An array of weights associated with the values in this Dataset. Each value in the data contributes to the reduction operation according to its associated weight. Notes ----- ``weights`` must be a DataArray and cannot contain missing values. Missing values can be replaced by ``weights.fillna(0)``. Returns ------- computation.weighted.DatasetWeighted See Also -------- :func:`DataArray.weighted ` :ref:`compute.weighted` User guide on weighted array reduction using :py:func:`~xarray.Dataset.weighted` :doc:`xarray-tutorial:fundamentals/03.4_weighted` Tutorial on Weighted Reduction using :py:func:`~xarray.Dataset.weighted` """ from xarray.computation.weighted import DatasetWeighted return DatasetWeighted(self, weights) def rolling( self, dim: Mapping[Any, int] | None = None, min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, **window_kwargs: int, ) -> DatasetRolling: """ Rolling window object for Datasets. Parameters ---------- dim : dict, optional Mapping from the dimension name to create the rolling iterator along (e.g. `time`) to its moving window size. min_periods : int or None, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool or Mapping to int, default: False Set the labels at the center of the window. The default, False, sets the labels at the right edge of the window. **window_kwargs : optional The keyword arguments form of ``dim``. One of dim or window_kwargs must be provided. Returns ------- computation.rolling.DatasetRolling See Also -------- Dataset.cumulative DataArray.rolling DataArray.rolling_exp """ from xarray.computation.rolling import DatasetRolling dim = either_dict_or_kwargs(dim, window_kwargs, "rolling") return DatasetRolling(self, dim, min_periods=min_periods, center=center) def cumulative( self, dim: str | Iterable[Hashable], min_periods: int = 1, ) -> DatasetRolling: """ Accumulating object for Datasets Parameters ---------- dims : iterable of hashable The name(s) of the dimensions to create the cumulative window along min_periods : int, default: 1 Minimum number of observations in window required to have a value (otherwise result is NA). The default is 1 (note this is different from ``Rolling``, whose default is the size of the window). Returns ------- computation.rolling.DatasetRolling See Also -------- DataArray.cumulative Dataset.rolling Dataset.rolling_exp """ from xarray.computation.rolling import DatasetRolling if isinstance(dim, str): if dim not in self.dims: raise ValueError( f"Dimension {dim} not found in data dimensions: {self.dims}" ) dim = {dim: self.sizes[dim]} else: missing_dims = set(dim) - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {missing_dims} not found in data dimensions: {self.dims}" ) dim = {d: self.sizes[d] for d in dim} return DatasetRolling(self, dim, min_periods=min_periods, center=False) def coarsen( self, dim: Mapping[Any, int] | None = None, boundary: CoarsenBoundaryOptions = "exact", side: SideOptions | Mapping[Any, SideOptions] = "left", coord_func: str | Callable | Mapping[Any, str | Callable] = "mean", **window_kwargs: int, ) -> DatasetCoarsen: """ Coarsen object for Datasets. Parameters ---------- dim : mapping of hashable to int, optional Mapping from the dimension name to the window size. boundary : {"exact", "trim", "pad"}, default: "exact" If 'exact', a ValueError will be raised if dimension size is not a multiple of the window size. If 'trim', the excess entries are dropped. If 'pad', NA will be padded. side : {"left", "right"} or mapping of str to {"left", "right"}, default: "left" coord_func : str or mapping of hashable to str, default: "mean" function (name) that is applied to the coordinates, or a mapping from coordinate name to function (name). Returns ------- computation.rolling.DatasetCoarsen See Also -------- :class:`computation.rolling.DatasetCoarsen` :func:`DataArray.coarsen ` :ref:`reshape.coarsen` User guide describing :py:func:`~xarray.Dataset.coarsen` :ref:`compute.coarsen` User guide on block arrgragation :py:func:`~xarray.Dataset.coarsen` :doc:`xarray-tutorial:fundamentals/03.3_windowed` Tutorial on windowed computation using :py:func:`~xarray.Dataset.coarsen` """ from xarray.computation.rolling import DatasetCoarsen dim = either_dict_or_kwargs(dim, window_kwargs, "coarsen") return DatasetCoarsen( self, dim, boundary=boundary, side=side, coord_func=coord_func, ) @_deprecate_positional_args("v2024.07.0") def resample( self, indexer: Mapping[Any, ResampleCompatible | Resampler] | None = None, *, skipna: bool | None = None, closed: SideOptions | None = None, label: SideOptions | None = None, offset: pd.Timedelta | datetime.timedelta | str | None = None, origin: str | DatetimeLike = "start_day", restore_coord_dims: bool | None = None, **indexer_kwargs: ResampleCompatible | Resampler, ) -> DatasetResample: """Returns a Resample object for performing resampling operations. Handles both downsampling and upsampling. The resampled dimension must be a datetime-like coordinate. If any intervals contain no values from the original object, they will be given the value ``NaN``. Parameters ---------- indexer : Mapping of Hashable to str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler, optional Mapping from the dimension name to resample frequency [1]_. The dimension must be datetime-like. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. closed : {"left", "right"}, optional Side of each interval to treat as closed. label : {"left", "right"}, optional Side of each interval to use for labeling. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pd.Timestamp, datetime.datetime, np.datetime64, or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : pd.Timedelta, datetime.timedelta, or str, default is None An offset timedelta added to the origin. restore_coord_dims : bool, optional If True, also restore the dimension order of multi-dimensional coordinates. **indexer_kwargs : str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler The keyword arguments form of ``indexer``. One of indexer or indexer_kwargs must be provided. Returns ------- resampled : core.resample.DataArrayResample This object resampled. See Also -------- DataArray.resample pandas.Series.resample pandas.DataFrame.resample Dataset.groupby DataArray.groupby References ---------- .. [1] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases """ from xarray.core.resample import DatasetResample return self._resample( resample_cls=DatasetResample, indexer=indexer, skipna=skipna, closed=closed, label=label, offset=offset, origin=origin, restore_coord_dims=restore_coord_dims, **indexer_kwargs, ) def drop_attrs(self, *, deep: bool = True) -> Self: """ Removes all attributes from the Dataset and its variables. Parameters ---------- deep : bool, default True Removes attributes from all variables. Returns ------- Dataset """ # Remove attributes from the dataset self = self._replace(attrs={}) if not deep: return self # Remove attributes from each variable in the dataset for var in self.variables: # variables don't have a `._replace` method, so we copy and then remove # attrs. If we added a `._replace` method, we could use that instead. if var not in self.xindexes: self[var] = self[var].copy() self[var].attrs = {} new_idx_variables = {} # Not sure this is the most elegant way of doing this, but it works. # (Should we have a more general "map over all variables, including # indexes" approach?) for idx, idx_vars in self.xindexes.group_by_index(): # copy each coordinate variable of an index and drop their attrs temp_idx_variables = {k: v.copy() for k, v in idx_vars.items()} for v in temp_idx_variables.values(): v.attrs = {} # re-wrap the index object in new coordinate variables new_idx_variables.update(idx.create_variables(temp_idx_variables)) self = self.assign(new_idx_variables) return self xarray-2025.12.0/xarray/core/dataset_utils.py000066400000000000000000000051461511464676000210260ustar00rootroot00000000000000from __future__ import annotations import typing from collections.abc import Hashable, Mapping from typing import Any, Generic import pandas as pd from xarray.core import utils from xarray.core.common import _contains_datetime_like_objects from xarray.core.indexing import map_index_queries from xarray.core.types import T_Dataset from xarray.core.variable import IndexVariable, Variable if typing.TYPE_CHECKING: from xarray.core.dataset import Dataset class _LocIndexer(Generic[T_Dataset]): __slots__ = ("dataset",) def __init__(self, dataset: T_Dataset): self.dataset = dataset def __getitem__(self, key: Mapping[Any, Any]) -> T_Dataset: if not utils.is_dict_like(key): raise TypeError("can only lookup dictionaries from Dataset.loc") return self.dataset.sel(key) def __setitem__(self, key, value) -> None: if not utils.is_dict_like(key): raise TypeError( "can only set locations defined by dictionaries from Dataset.loc." f" Got: {key}" ) # set new values dim_indexers = map_index_queries(self.dataset, key).dim_indexers self.dataset[dim_indexers] = value def as_dataset(obj: Any) -> Dataset: """Cast the given object to a Dataset. Handles Datasets, DataArrays and dictionaries of variables. A new Dataset object is only created if the provided object is not already one. """ from xarray.core.dataset import Dataset if hasattr(obj, "to_dataset"): obj = obj.to_dataset() if not isinstance(obj, Dataset): obj = Dataset(obj) return obj def _get_virtual_variable( variables, key: Hashable, dim_sizes: Mapping | None = None ) -> tuple[Hashable, Hashable, Variable]: """Get a virtual variable (e.g., 'time.year') from a dict of xarray.Variable objects (if possible) """ from xarray.core.dataarray import DataArray if dim_sizes is None: dim_sizes = {} if key in dim_sizes: data = pd.Index(range(dim_sizes[key]), name=key) variable = IndexVariable((key,), data) return key, key, variable if not isinstance(key, str): raise KeyError(key) split_key = key.split(".", 1) if len(split_key) != 2: raise KeyError(key) ref_name, var_name = split_key ref_var = variables[ref_name] if _contains_datetime_like_objects(ref_var): ref_var = DataArray(ref_var) data = getattr(ref_var.dt, var_name).data else: data = getattr(ref_var, var_name).data virtual_var = Variable(ref_var.dims, data) return ref_name, var_name, virtual_var xarray-2025.12.0/xarray/core/dataset_variables.py000066400000000000000000000037641511464676000216420ustar00rootroot00000000000000import typing from collections.abc import Hashable, Iterator, Mapping import numpy as np from xarray.core import formatting from xarray.core.utils import Frozen from xarray.core.variable import Variable if typing.TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset class DataVariables(Mapping[Hashable, "DataArray"]): __slots__ = ("_dataset",) def __init__(self, dataset: "Dataset"): self._dataset = dataset def __iter__(self) -> Iterator[Hashable]: return ( key for key in self._dataset._variables if key not in self._dataset._coord_names ) def __len__(self) -> int: length = len(self._dataset._variables) - len(self._dataset._coord_names) assert length >= 0, "something is wrong with Dataset._coord_names" return length def __contains__(self, key: Hashable) -> bool: return key in self._dataset._variables and key not in self._dataset._coord_names def __getitem__(self, key: Hashable) -> "DataArray": if key not in self._dataset._coord_names: return self._dataset[key] raise KeyError(key) def __repr__(self) -> str: return formatting.data_vars_repr(self) @property def variables(self) -> Mapping[Hashable, Variable]: all_variables = self._dataset.variables return Frozen({k: all_variables[k] for k in self}) @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from data variable names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- Dataset.dtype """ return self._dataset.dtypes def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return [ key for key in self._dataset._ipython_key_completions_() if key not in self._dataset._coord_names ] xarray-2025.12.0/xarray/core/datatree.py000066400000000000000000002713411511464676000177540ustar00rootroot00000000000000from __future__ import annotations import functools import io import itertools import textwrap from collections import ChainMap, defaultdict from collections.abc import ( Callable, Hashable, Iterable, Iterator, Mapping, ) from dataclasses import dataclass, field from html import escape from os import PathLike from typing import ( TYPE_CHECKING, Any, Concatenate, Literal, NoReturn, ParamSpec, TypeAlias, TypeVar, Union, overload, ) from xarray.core import utils from xarray.core._aggregations import DataTreeAggregations from xarray.core._typed_ops import DataTreeOpsMixin from xarray.core.common import TreeAttrAccessMixin, get_chunksizes from xarray.core.coordinates import Coordinates, DataTreeCoordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.dataset_variables import DataVariables from xarray.core.datatree_mapping import ( add_path_context_to_errors, map_over_datasets, ) from xarray.core.formatting import ( datatree_repr, diff_treestructure, dims_and_coords_repr, ) from xarray.core.formatting_html import ( datatree_repr as datatree_repr_html, ) from xarray.core.indexes import Index, Indexes from xarray.core.options import OPTIONS as XR_OPTS from xarray.core.options import _get_keep_attrs from xarray.core.treenode import NamedNode, NodePath, zip_subtrees from xarray.core.types import Self from xarray.core.utils import ( Default, FilteredMapping, Frozen, _default, drop_dims_from_indexers, either_dict_or_kwargs, maybe_wrap_array, parse_dims_as_set, ) from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array from xarray.structure.alignment import align from xarray.structure.merge import dataset_update_method try: from xarray.core.variable import calculate_dimensions except ImportError: # for xarray versions 2022.03.0 and earlier from xarray.core.dataset import calculate_dimensions if TYPE_CHECKING: import numpy as np import pandas as pd from dask.delayed import Delayed from xarray.backends import ZarrStore from xarray.backends.writers import T_DataTreeNetcdfEngine, T_DataTreeNetcdfTypes from xarray.core.types import ( Dims, DtCompatible, ErrorOptions, ErrorOptionsWithWarn, NestedDict, NetcdfWriteModes, T_ChunkDimFreq, T_ChunksFreq, ZarrStoreLike, ZarrWriteModes, ) from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint from xarray.structure.merge import CoercibleMapping, CoercibleValue # """ # DEVELOPERS' NOTE # ---------------- # The idea of this module is to create a `DataTree` class which inherits the tree # structure from TreeNode, and also copies the entire API of `xarray.Dataset`, but with # certain methods decorated to instead map the dataset function over every node in the # tree. As this API is copied without directly subclassing `xarray.Dataset` we instead # create various Mixin classes (in ops.py) which each define part of `xarray.Dataset`'s # extensive API. # # Some of these methods must be wrapped to map over all nodes in the subtree. Others are # fine to inherit unaltered (normally because they (a) only call dataset properties and # (b) don't return a dataset that should be nested into a new tree) and some will get # overridden by the class definition of DataTree. # """ T_Path = Union[str, NodePath] T = TypeVar("T") P = ParamSpec("P") def _collect_data_and_coord_variables( data: Dataset, ) -> tuple[dict[Hashable, Variable], dict[Hashable, Variable]]: data_variables = {} coord_variables = {} for k, v in data.variables.items(): if k in data._coord_names: coord_variables[k] = v else: data_variables[k] = v return data_variables, coord_variables def _to_new_dataset(data: Dataset | Coordinates | None) -> Dataset: if isinstance(data, Dataset): ds = data.copy(deep=False) elif isinstance(data, Coordinates): ds = data.to_dataset() elif data is None: ds = Dataset() else: raise TypeError(f"data object is not an xarray.Dataset, dict, or None: {data}") return ds def _inherited_dataset(ds: Dataset, parent: Dataset) -> Dataset: return Dataset._construct_direct( variables=parent._variables | ds._variables, coord_names=parent._coord_names | ds._coord_names, dims=parent._dims | ds._dims, attrs=ds._attrs, indexes=parent._indexes | ds._indexes, encoding=ds._encoding, close=ds._close, ) def _without_header(text: str) -> str: return "\n".join(text.split("\n")[1:]) def _indented(text: str) -> str: return textwrap.indent(text, prefix=" ") def check_alignment( path: str, node_ds: Dataset, parent_ds: Dataset | None, children: Mapping[str, DataTree], ) -> None: if parent_ds is not None: try: align(node_ds, parent_ds, join="exact", copy=False) except ValueError as e: node_repr = _indented(_without_header(repr(node_ds))) parent_repr = _indented(dims_and_coords_repr(parent_ds)) raise ValueError( f"group {path!r} is not aligned with its parents:\n" f"Group:\n{node_repr}\nFrom parents:\n{parent_repr}" ) from e if children: if parent_ds is not None: base_ds = _inherited_dataset(node_ds, parent_ds) else: base_ds = node_ds for child_name, child in children.items(): child_path = str(NodePath(path) / child_name) child_ds = child.to_dataset(inherit=False) check_alignment(child_path, child_ds, base_ds, child.children) def _deduplicate_inherited_coordinates(child: DataTree, parent: DataTree) -> None: # This method removes repeated indexes (and corresponding coordinates) # that are repeated between a DataTree and its parents. removed_something = False for name in parent._indexes: if name in child._node_indexes: # Indexes on a Dataset always have a corresponding coordinate. # We already verified that these coordinates match in the # check_alignment() call from _pre_attach(). del child._node_indexes[name] del child._node_coord_variables[name] removed_something = True if removed_something: child._node_dims = calculate_dimensions( child._data_variables | child._node_coord_variables ) for grandchild in child._children.values(): _deduplicate_inherited_coordinates(grandchild, child) def _check_for_slashes_in_names(variables: Iterable[Hashable]) -> None: offending_variable_names = [ name for name in variables if isinstance(name, str) and "/" in name ] if len(offending_variable_names) > 0: raise ValueError( "Given variables have names containing the '/' character: " f"{offending_variable_names}. " "Variables stored in DataTree objects cannot have names containing '/' characters, as this would make path-like access to variables ambiguous." ) class DatasetView(Dataset): """ An immutable Dataset-like view onto the data in a single DataTree node. In-place operations modifying this object should raise an AttributeError. This requires overriding all inherited constructors. Operations returning a new result will return a new xarray.Dataset object. This includes all API on Dataset, which will be inherited. """ # TODO what happens if user alters (in-place) a DataArray they extracted from this object? __slots__ = ( "_attrs", "_cache", # used by _CachedAccessor "_close", "_coord_names", "_dims", "_encoding", "_indexes", "_variables", ) def __init__( self, data_vars: Mapping[Any, Any] | None = None, coords: Mapping[Any, Any] | None = None, attrs: Mapping[Any, Any] | None = None, ): raise AttributeError("DatasetView objects are not to be initialized directly") @classmethod def _constructor( cls, variables: dict[Any, Variable], coord_names: set[Hashable], dims: dict[Any, int], attrs: dict | None, indexes: dict[Any, Index], encoding: dict | None, close: Callable[[], None] | None, ) -> DatasetView: """Private constructor, from Dataset attributes.""" # We override Dataset._construct_direct below, so we need a new # constructor for creating DatasetView objects. obj: DatasetView = object.__new__(cls) obj._variables = variables obj._coord_names = coord_names obj._dims = dims obj._indexes = indexes obj._attrs = attrs obj._close = close obj._encoding = encoding return obj def __setitem__(self, key, val) -> None: raise AttributeError( "Mutation of the DatasetView is not allowed, please use `.__setitem__` on the wrapping DataTree node, " "or use `dt.to_dataset()` if you want a mutable dataset. If calling this from within `map_over_datasets`," "use `.copy()` first to get a mutable version of the input dataset." ) def update(self, other) -> NoReturn: raise AttributeError( "Mutation of the DatasetView is not allowed, please use `.update` on the wrapping DataTree node, " "or use `dt.to_dataset()` if you want a mutable dataset. If calling this from within `map_over_datasets`," "use `.copy()` first to get a mutable version of the input dataset." ) def set_close(self, close: Callable[[], None] | None) -> None: raise AttributeError("cannot modify a DatasetView()") def close(self) -> None: raise AttributeError( "cannot close a DatasetView(). Close the associated DataTree node instead" ) # FIXME https://github.com/python/mypy/issues/7328 @overload # type: ignore[override] def __getitem__(self, key: Mapping) -> Dataset: # type: ignore[overload-overlap] ... @overload def __getitem__(self, key: Hashable) -> DataArray: ... # See: https://github.com/pydata/xarray/issues/8855 @overload def __getitem__(self, key: Any) -> Dataset: ... def __getitem__(self, key) -> DataArray | Dataset: # TODO call the `_get_item` method of DataTree to allow path-like access to contents of other nodes # For now just call Dataset.__getitem__ return Dataset.__getitem__(self, key) @classmethod def _construct_direct( # type: ignore[override] cls, variables: dict[Any, Variable], coord_names: set[Hashable], dims: dict[Any, int] | None = None, attrs: dict | None = None, indexes: dict[Any, Index] | None = None, encoding: dict | None = None, close: Callable[[], None] | None = None, ) -> Dataset: """ Overriding this method (along with ._replace) and modifying it to return a Dataset object should hopefully ensure that the return type of any method on this object is a Dataset. """ if dims is None: dims = calculate_dimensions(variables) if indexes is None: indexes = {} obj = object.__new__(Dataset) obj._variables = variables obj._coord_names = coord_names obj._dims = dims obj._indexes = indexes obj._attrs = attrs obj._close = close obj._encoding = encoding return obj def _replace( # type: ignore[override] self, variables: dict[Hashable, Variable] | None = None, coord_names: set[Hashable] | None = None, dims: dict[Any, int] | None = None, attrs: dict[Hashable, Any] | Default | None = _default, indexes: dict[Hashable, Index] | None = None, encoding: dict | Default | None = _default, inplace: bool = False, ) -> Dataset: """ Overriding this method (along with ._construct_direct) and modifying it to return a Dataset object should hopefully ensure that the return type of any method on this object is a Dataset. """ if inplace: raise AttributeError("In-place mutation of the DatasetView is not allowed") return Dataset._replace( self, variables=variables, coord_names=coord_names, dims=dims, attrs=attrs, indexes=indexes, encoding=encoding, inplace=inplace, ) def map( # type: ignore[override] self, func: Callable, keep_attrs: bool | None = None, args: Iterable[Any] = (), **kwargs: Any, ) -> Dataset: """Apply a function to each data variable in this dataset Parameters ---------- func : callable Function which can be called in the form `func(x, *args, **kwargs)` to transform each DataArray `x` in this dataset into another DataArray. keep_attrs : bool | None, optional If True, both the dataset's and variables' attributes (`attrs`) will be copied from the original objects to the new ones. If False, the new dataset and variables will be returned without copying the attributes. args : iterable, optional Positional arguments passed on to `func`. **kwargs : Any Keyword arguments passed on to `func`. Returns ------- applied : Dataset Resulting dataset from applying ``func`` to each data variable. Examples -------- >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])}) >>> ds Size: 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773 bar (x) int64 16B -1 2 >>> ds.map(np.fabs) Size: 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773 bar (x) float64 16B 1.0 2.0 """ # Copied from xarray.Dataset so as not to call type(self), which causes problems (see https://github.com/xarray-contrib/datatree/issues/188). # TODO Refactor xarray upstream to avoid needing to overwrite this. if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) variables = { k: maybe_wrap_array(v, func(v, *args, **kwargs)) for k, v in self.data_vars.items() } if keep_attrs: for k, v in variables.items(): v._copy_attrs_from(self.data_vars[k]) attrs = self.attrs if keep_attrs else None # return type(self)(variables, attrs=attrs) return Dataset(variables, attrs=attrs) FromDictDataValue: TypeAlias = "CoercibleValue | Dataset | DataTree | None" @dataclass class _CoordWrapper: value: CoercibleValue @dataclass class _DatasetArgs: data_vars: dict[str, CoercibleValue] = field(default_factory=dict) coords: dict[str, CoercibleValue] = field(default_factory=dict) class DataTree( NamedNode, DataTreeAggregations, DataTreeOpsMixin, TreeAttrAccessMixin, Mapping[str, "DataArray | DataTree"], ): """ A tree-like hierarchical collection of xarray objects. Attempts to present an API like that of xarray.Dataset, but methods are wrapped to also update all the tree's child nodes. """ # TODO Some way of sorting children by depth # TODO do we need a watch out for if methods intended only for root nodes are called on non-root nodes? # TODO dataset methods which should not or cannot act over the whole tree, such as .to_array # TODO .loc method # TODO a lot of properties like .variables could be defined in a DataMapping class which both Dataset and DataTree inherit from # TODO all groupby classes # TODO a lot of properties like .variables could be defined in a DataMapping class which both Dataset and DataTree inherit from # TODO all groupby classes _name: str | None _parent: DataTree | None _children: dict[str, DataTree] _cache: dict[str, Any] # used by _CachedAccessor _data_variables: dict[Hashable, Variable] _node_coord_variables: dict[Hashable, Variable] _node_dims: dict[Hashable, int] _node_indexes: dict[Hashable, Index] _attrs: dict[Hashable, Any] | None _encoding: dict[Hashable, Any] | None _close: Callable[[], None] | None __slots__ = ( "_attrs", "_cache", # used by _CachedAccessor "_children", "_close", "_data_variables", "_encoding", "_name", "_node_coord_variables", "_node_dims", "_node_indexes", "_parent", ) def __init__( self, dataset: Dataset | Coordinates | None = None, children: Mapping[str, DataTree] | None = None, name: str | None = None, ): """ Create a single node of a DataTree. The node may optionally contain data in the form of data and coordinate variables, stored in the same way as data is stored in an xarray.Dataset. Parameters ---------- dataset : Dataset, optional Data to store directly at this node. children : Mapping[str, DataTree], optional Any child nodes of this node. name : str, optional Name for this node of the tree. Returns ------- DataTree See Also -------- DataTree.from_dict """ self._set_node_data(_to_new_dataset(dataset)) # comes after setting node data as this will check for clashes between child names and existing variable names super().__init__(name=name, children=children) def _set_node_data(self, dataset: Dataset): _check_for_slashes_in_names(dataset.variables) data_vars, coord_vars = _collect_data_and_coord_variables(dataset) self._data_variables = data_vars self._node_coord_variables = coord_vars self._node_dims = dataset._dims self._node_indexes = dataset._indexes self._encoding = dataset._encoding self._attrs = dataset._attrs self._close = dataset._close def _pre_attach(self: DataTree, parent: DataTree, name: str) -> None: super()._pre_attach(parent, name) if name in parent.dataset.variables: raise KeyError( f"parent {parent.name} already contains a variable named {name}" ) path = str(NodePath(parent.path) / name) node_ds = self.to_dataset(inherit=False) parent_ds = parent._to_dataset_view(rebuild_dims=False, inherit=True) check_alignment(path, node_ds, parent_ds, self.children) _deduplicate_inherited_coordinates(self, parent) @property def _node_coord_variables_with_index(self) -> Mapping[Hashable, Variable]: return FilteredMapping( keys=self._node_indexes, mapping=self._node_coord_variables ) @property def _coord_variables(self) -> ChainMap[Hashable, Variable]: # ChainMap is incorrected typed in typeshed (only the first argument # needs to be mutable) # https://github.com/python/typeshed/issues/8430 return ChainMap( self._node_coord_variables, *(p._node_coord_variables_with_index for p in self.parents), # type: ignore[arg-type] ) @property def _dims(self) -> ChainMap[Hashable, int]: return ChainMap(self._node_dims, *(p._node_dims for p in self.parents)) @property def _indexes(self) -> ChainMap[Hashable, Index]: return ChainMap(self._node_indexes, *(p._node_indexes for p in self.parents)) def _to_dataset_view(self, rebuild_dims: bool, inherit: bool) -> DatasetView: coord_vars = self._coord_variables if inherit else self._node_coord_variables variables = dict(self._data_variables) variables |= coord_vars if rebuild_dims: dims = calculate_dimensions(variables) elif inherit: # Note: rebuild_dims=False with inherit=True can create # technically invalid Dataset objects because it still includes # dimensions that are only defined on parent data variables # (i.e. not present on any parent coordinate variables). # # For example: # >>> tree = DataTree.from_dict( # ... { # ... "/": xr.Dataset({"foo": ("x", [1, 2])}), # x has size 2 # ... "/b": xr.Dataset(), # ... } # ... ) # >>> ds = tree["b"]._to_dataset_view(rebuild_dims=False, inherit=True) # >>> ds # Size: 0B # Dimensions: (x: 2) # Dimensions without coordinates: x # Data variables: # *empty* # # Notice the "x" dimension is still defined, even though there are no variables # or coordinates. # # Normally this is not supposed to be possible in xarray's data model, # but here it is useful internally for use cases where we # want to inherit everything from parents nodes, e.g., for align() and repr(). # # The user should never be able to see this dimension via public API. dims = dict(self._dims) else: dims = dict(self._node_dims) return DatasetView._constructor( variables=variables, coord_names=set(self._coord_variables), dims=dims, attrs=self._attrs, indexes=dict(self._indexes if inherit else self._node_indexes), encoding=self._encoding, close=None, ) @property def dataset(self) -> DatasetView: """ An immutable Dataset-like view onto the data in this node. Includes inherited coordinates and indexes from parent nodes. For a mutable Dataset containing the same data as in this node, use `.to_dataset()` instead. See Also -------- DataTree.to_dataset """ return self._to_dataset_view(rebuild_dims=True, inherit=True) @dataset.setter def dataset(self, data: Dataset | None = None) -> None: ds = _to_new_dataset(data) self._replace_node(ds) # soft-deprecated alias, to facilitate the transition from # xarray-contrib/datatree ds = dataset def to_dataset(self, inherit: bool = True) -> Dataset: """ Return the data in this node as a new xarray.Dataset object. Parameters ---------- inherit : bool, optional If False, only include coordinates and indexes defined at the level of this DataTree node, excluding any inherited coordinates and indexes. See Also -------- DataTree.dataset """ coord_vars = self._coord_variables if inherit else self._node_coord_variables variables = dict(self._data_variables) variables |= coord_vars dims = calculate_dimensions(variables) if inherit else dict(self._node_dims) return Dataset._construct_direct( variables, set(coord_vars), dims, None if self._attrs is None else dict(self._attrs), dict(self._indexes if inherit else self._node_indexes), None if self._encoding is None else dict(self._encoding), None, ) @property def has_data(self) -> bool: """Whether or not there are any variables in this node.""" return bool(self._data_variables or self._node_coord_variables) @property def has_attrs(self) -> bool: """Whether or not there are any metadata attributes in this node.""" return len(self.attrs.keys()) > 0 @property def is_empty(self) -> bool: """False if node contains any data or attrs. Does not look at children.""" return not (self.has_data or self.has_attrs) @property def is_hollow(self) -> bool: """True if only leaf nodes contain data.""" return not any(node.has_data for node in self.subtree if not node.is_leaf) @property def variables(self) -> Mapping[Hashable, Variable]: """Low level interface to node contents as dict of Variable objects. This dictionary is frozen to prevent mutation that could violate Dataset invariants. It contains all variable objects constituting this DataTree node, including both data variables and coordinates. """ return Frozen(self._data_variables | self._coord_variables) @property def attrs(self) -> dict[Hashable, Any]: """Dictionary of global attributes on this node object.""" if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Any, Any]) -> None: self._attrs = dict(value) @property def encoding(self) -> dict: """Dictionary of global encoding attributes on this node object.""" if self._encoding is None: self._encoding = {} return self._encoding @encoding.setter def encoding(self, value: Mapping) -> None: self._encoding = dict(value) @property def dims(self) -> Mapping[Hashable, int]: """Mapping from dimension names to lengths. Cannot be modified directly, but is updated when adding new variables. Note that type of this object differs from `DataArray.dims`. See `DataTree.sizes`, `Dataset.sizes`, and `DataArray.sizes` for consistently named properties. """ return Frozen(self._dims) @property def sizes(self) -> Mapping[Hashable, int]: """Mapping from dimension names to lengths. Cannot be modified directly, but is updated when adding new variables. This is an alias for `DataTree.dims` provided for the benefit of consistency with `DataArray.sizes`. See Also -------- DataArray.sizes """ return self.dims @property def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for attribute-style access""" yield from self._item_sources yield self.attrs @property def _item_sources(self) -> Iterable[Mapping[Any, Any]]: """Places to look-up items for key-completion""" yield self.data_vars yield FilteredMapping(keys=self._coord_variables, mapping=self.coords) # virtual coordinates yield FilteredMapping(keys=self.dims, mapping=self) # immediate child nodes yield self.children def _ipython_key_completions_(self) -> list[str]: """Provide method for the key-autocompletions in IPython. See https://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion For the details. """ # TODO allow auto-completing relative string paths, e.g. `dt['path/to/../ node'` # Would require changes to ipython's autocompleter, see https://github.com/ipython/ipython/issues/12420 # Instead for now we only list direct paths to all node in subtree explicitly items_on_this_node = self._item_sources paths_to_all_nodes_in_subtree = { path: node for path, node in self.subtree_with_keys if path != "." # exclude the root node } all_item_sources = itertools.chain( items_on_this_node, [paths_to_all_nodes_in_subtree] ) items = { item for source in all_item_sources for item in source if isinstance(item, str) } return list(items) def __contains__(self, key: object) -> bool: """The 'in' operator will return true or false depending on whether 'key' is either an array stored in the datatree or a child node, or neither. """ return key in self.variables or key in self.children def __bool__(self) -> bool: return bool(self._data_variables) or bool(self._children) def __iter__(self) -> Iterator[str]: return itertools.chain(self._data_variables, self._children) # type: ignore[arg-type] def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None ) -> np.ndarray: raise TypeError( "cannot directly convert a DataTree into a " "numpy array. Instead, create an xarray.DataArray " "first, either with indexing on the DataTree or by " "invoking the `to_array()` method." ) def __repr__(self) -> str: # type: ignore[override] return datatree_repr(self) def __str__(self) -> str: return datatree_repr(self) def _repr_html_(self): """Make html representation of datatree object""" if XR_OPTS["display_style"] == "text": return f"
{escape(repr(self))}
" return datatree_repr_html(self) def __enter__(self) -> Self: return self def __exit__(self, exc_type, exc_value, traceback) -> None: self.close() # DatasetView does not support close() or set_close(), so we reimplement # these methods on DataTree. def _close_node(self) -> None: if self._close is not None: self._close() self._close = None def close(self) -> None: """Close any files associated with this tree.""" for node in self.subtree: node._close_node() def set_close(self, close: Callable[[], None] | None) -> None: """Set the closer for this node.""" self._close = close def _replace_node( self: DataTree, data: Dataset | Default = _default, children: dict[str, DataTree] | Default = _default, ) -> None: ds = self.to_dataset(inherit=False) if data is _default else data if children is _default: children = self._children for child_name in children: if child_name in ds.variables: raise ValueError(f"node already contains a variable named {child_name}") parent_ds = ( self.parent._to_dataset_view(rebuild_dims=False, inherit=True) if self.parent is not None else None ) check_alignment(self.path, ds, parent_ds, children) if data is not _default: self._set_node_data(ds) if self.parent is not None: _deduplicate_inherited_coordinates(self, self.parent) self.children = children def _copy_node( self, inherit: bool, deep: bool = False, memo: dict[int, Any] | None = None ) -> Self: """Copy just one node of a tree.""" new_node = super()._copy_node(inherit=inherit, deep=deep, memo=memo) data = self._to_dataset_view(rebuild_dims=False, inherit=inherit)._copy( deep=deep, memo=memo ) new_node._set_node_data(data) return new_node def get( # type: ignore[override] self: DataTree, key: str, default: DataTree | DataArray | None = None ) -> DataTree | DataArray | None: """ Access child nodes, variables, or coordinates stored in this node. Returned object will be either a DataTree or DataArray object depending on whether the key given points to a child or variable. Parameters ---------- key : str Name of variable / child within this node. Must lie in this immediate node (not elsewhere in the tree). default : DataTree | DataArray | None, optional A value to return if the specified key does not exist. Default return value is None. """ if key in self.children: return self.children[key] elif key in self.dataset: return self.dataset[key] else: return default def __getitem__(self: DataTree, key: str) -> DataTree | DataArray: """ Access child nodes, variables, or coordinates stored anywhere in this tree. Returned object will be either a DataTree or DataArray object depending on whether the key given points to a child or variable. Parameters ---------- key : str Name of variable / child within this node, or unix-like path to variable / child within another node. Returns ------- DataTree | DataArray """ # Either: if utils.is_dict_like(key): # dict-like indexing raise NotImplementedError("Should this index over whole tree?") elif isinstance(key, str): # TODO should possibly deal with hashables in general? # path-like: a name of a node/variable, or path to a node/variable path = NodePath(key) return self._get_item(path) elif utils.is_list_like(key): # iterable of variable names raise NotImplementedError( "Selecting via tags is deprecated, and selecting multiple items should be " "implemented via .subset" ) else: raise ValueError(f"Invalid format for key: {key}") def _set(self, key: str, val: DataTree | CoercibleValue) -> None: """ Set the child node or variable with the specified key to value. Counterpart to the public .get method, and also only works on the immediate node, not other nodes in the tree. """ if isinstance(val, DataTree): # create and assign a shallow copy here so as not to alter original name of node in grafted tree new_node = val.copy(deep=False) new_node.name = key new_node._set_parent(new_parent=self, child_name=key) else: if not isinstance(val, DataArray | Variable): # accommodate other types that can be coerced into Variables val = DataArray(val) self.update({key: val}) def __setitem__( self, key: str, value: Any, ) -> None: """ Add either a child node or an array to the tree, at any position. Data can be added anywhere, and new nodes will be created to cross the path to the new location if necessary. If there is already a node at the given location, then if value is a Node class or Dataset it will overwrite the data already present at that node, and if value is a single array, it will be merged with it. """ # TODO xarray.Dataset accepts other possibilities, how do we exactly replicate all the behaviour? if utils.is_dict_like(key): raise NotImplementedError elif isinstance(key, str): # TODO should possibly deal with hashables in general? # path-like: a name of a node/variable, or path to a node/variable path = NodePath(key) if isinstance(value, Dataset): value = DataTree(dataset=value) return self._set_item(path, value, new_nodes_along_path=True) else: raise ValueError("Invalid format for key") def __delitem__(self, key: str) -> None: """Remove a variable or child node from this datatree node.""" if key in self.children: super().__delitem__(key) elif key in self._node_coord_variables: if key in self._node_indexes: del self._node_indexes[key] del self._node_coord_variables[key] self._node_dims = calculate_dimensions(self.variables) elif key in self._data_variables: del self._data_variables[key] self._node_dims = calculate_dimensions(self.variables) else: raise KeyError(key) @overload def update(self, other: Dataset) -> None: ... @overload def update(self, other: Mapping[Hashable, DataArray | Variable]) -> None: ... @overload def update(self, other: Mapping[str, DataTree | DataArray | Variable]) -> None: ... def update( self, other: ( Dataset | Mapping[Hashable, DataArray | Variable] | Mapping[str, DataTree | DataArray | Variable] ), ) -> None: """ Update this node's children and / or variables. Just like `dict.update` this is an in-place operation. """ new_children: dict[str, DataTree] = {} new_variables: CoercibleMapping if isinstance(other, Dataset): new_variables = other else: new_variables = {} for k, v in other.items(): if isinstance(v, DataTree): # avoid named node being stored under inconsistent key new_child: DataTree = v.copy() # Datatree's name is always a string until we fix that (#8836) new_child.name = str(k) new_children[str(k)] = new_child elif isinstance(v, DataArray | Variable): # TODO this should also accommodate other types that can be coerced into Variables new_variables[k] = v else: raise TypeError(f"Type {type(v)} cannot be assigned to a DataTree") vars_merge_result = dataset_update_method( self.to_dataset(inherit=False), new_variables ) data = Dataset._construct_direct(**vars_merge_result._asdict()) # TODO are there any subtleties with preserving order of children like this? merged_children = {**self.children, **new_children} self._replace_node(data, children=merged_children) def assign( self, items: Mapping[Any, Any] | None = None, **items_kwargs: Any ) -> DataTree: """ Assign new data variables or child nodes to a DataTree, returning a new object with all the original items in addition to the new ones. Parameters ---------- items : mapping of hashable to Any Mapping from variable or child node names to the new values. If the new values are callable, they are computed on the Dataset and assigned to new data variables. If the values are not callable, (e.g. a DataTree, DataArray, scalar, or array), they are simply assigned. **items_kwargs The keyword arguments form of ``variables``. One of variables or variables_kwargs must be provided. Returns ------- dt : DataTree A new DataTree with the new variables or children in addition to all the existing items. Notes ----- Since ``kwargs`` is a dictionary, the order of your arguments may not be preserved, and so the order of the new variables is not well-defined. Assigning multiple items within the same ``assign`` is possible, but you cannot reference other variables created within the same ``assign`` call. See Also -------- xarray.Dataset.assign pandas.DataFrame.assign """ items = either_dict_or_kwargs(items, items_kwargs, "assign") dt = self.copy() dt.update(items) return dt def drop_nodes( self: DataTree, names: str | Iterable[str], *, errors: ErrorOptions = "raise" ) -> DataTree: """ Drop child nodes from this node. Parameters ---------- names : str or iterable of str Name(s) of nodes to drop. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a KeyError if any of the node names passed are not present as children of this node. If 'ignore', any given names that are present are dropped and no error is raised. Returns ------- dropped : DataTree A copy of the node with the specified children dropped. """ # the Iterable check is required for mypy if isinstance(names, str) or not isinstance(names, Iterable): names = {names} else: names = set(names) if errors == "raise": extra = names - set(self.children) if extra: raise KeyError(f"Cannot drop all nodes - nodes {extra} not present") result = self.copy() children_to_keep = { name: child for name, child in result.children.items() if name not in names } result._replace_node(children=children_to_keep) return result @overload @classmethod def from_dict( cls, data: Mapping[str, FromDictDataValue] | None = ..., coords: Mapping[str, CoercibleValue] | None = ..., *, name: str | None = ..., nested: Literal[False] = ..., ) -> Self: ... @overload @classmethod def from_dict( cls, data: ( Mapping[str, FromDictDataValue | NestedDict[FromDictDataValue]] | None ) = ..., coords: Mapping[str, CoercibleValue | NestedDict[CoercibleValue]] | None = ..., *, name: str | None = ..., nested: Literal[True] = ..., ) -> Self: ... @classmethod def from_dict( cls, data: ( Mapping[str, FromDictDataValue | NestedDict[FromDictDataValue]] | None ) = None, coords: Mapping[str, CoercibleValue | NestedDict[CoercibleValue]] | None = None, *, name: str | None = None, nested: bool = False, ) -> Self: """ Create a datatree from a dictionary of data objects, organised by paths into the tree. Parameters ---------- data : dict-like, optional A mapping from path names to ``None`` (indicating an empty node), ``DataTree``, ``Dataset``, objects coercible into a ``DataArray`` or a nested dictionary of any of the above types. Path names should be given as unix-like paths, either absolute (/path/to/item) or relative to the root node (path/to/item). If path names containing more than one part are given, new tree nodes will be constructed automatically as necessary. To assign data to the root node of the tree use "", ".", "/" or "./" as the path. coords : dict-like, optional A mapping from path names to objects coercible into a DataArray, or nested dictionaries of coercible objects. name : Hashable | None, optional Name for the root node of the tree. Default is None. nested : bool, optional If true, nested dictionaries in ``data`` and ``coords`` are automatically flattened. Returns ------- DataTree See also -------- Dataset Notes ----- ``DataTree.from_dict`` serves a conceptually different purpose from ``Dataset.from_dict`` and ``DataArray.from_dict``. It converts a hierarchy of Xarray objects into a DataTree, rather than converting pure Python data structures. Examples -------- Construct a tree from a dict of Dataset objects: >>> dt = DataTree.from_dict( ... { ... "/": Dataset(coords={"time": [1, 2, 3]}), ... "/ocean": Dataset( ... { ... "temperature": ("time", [4, 5, 6]), ... "salinity": ("time", [7, 8, 9]), ... } ... ), ... "/atmosphere": Dataset( ... { ... "temperature": ("time", [2, 3, 4]), ... "humidity": ("time", [3, 4, 5]), ... } ... ), ... } ... ) >>> dt Group: / โ”‚ Dimensions: (time: 3) โ”‚ Coordinates: โ”‚ * time (time) int64 24B 1 2 3 โ”œโ”€โ”€ Group: /ocean โ”‚ Dimensions: (time: 3) โ”‚ Data variables: โ”‚ temperature (time) int64 24B 4 5 6 โ”‚ salinity (time) int64 24B 7 8 9 โ””โ”€โ”€ Group: /atmosphere Dimensions: (time: 3) Data variables: temperature (time) int64 24B 2 3 4 humidity (time) int64 24B 3 4 5 Or equivalently, use a dict of values that can be converted into `DataArray` objects, with syntax similar to the Dataset constructor: >>> dt2 = DataTree.from_dict( ... data={ ... "/ocean/temperature": ("time", [4, 5, 6]), ... "/ocean/salinity": ("time", [7, 8, 9]), ... "/atmosphere/temperature": ("time", [2, 3, 4]), ... "/atmosphere/humidity": ("time", [3, 4, 5]), ... }, ... coords={"/time": [1, 2, 3]}, ... ) >>> assert dt.identical(dt2) Nested dictionaries are automatically flattened if ``nested=True``: >>> DataTree.from_dict({"a": {"b": {"c": {"x": 1, "y": 2}}}}, nested=True) Group: / โ””โ”€โ”€ Group: /a โ””โ”€โ”€ Group: /a/b โ””โ”€โ”€ Group: /a/b/c Dimensions: () Data variables: x int64 8B 1 y int64 8B 2 """ if data is None: data = {} if coords is None: coords = {} if nested: data_items = utils.flat_items(data) coords_items = utils.flat_items(coords) else: data_items = data.items() coords_items = coords.items() for arg_name, items in [("data", data_items), ("coords", coords_items)]: for key, value in items: if isinstance(value, dict): raise TypeError( f"{arg_name} contains a dict value at {key=}, " "which is not a valid argument to " f"DataTree.from_dict() with nested=False: {value}" ) # Canonicalize and unify paths between `data` and `coords` flat_data_and_coords = itertools.chain( data_items, ((k, _CoordWrapper(v)) for k, v in coords_items), ) nodes: dict[NodePath, _CoordWrapper | FromDictDataValue] = {} for key, value in flat_data_and_coords: path = NodePath(key).absolute() if path in nodes: raise ValueError( f"multiple entries found corresponding to node {str(path)!r}" ) nodes[path] = value # Merge nodes corresponding to DataArrays into Datasets dataset_args: defaultdict[NodePath, _DatasetArgs] = defaultdict(_DatasetArgs) for path in list(nodes): node = nodes[path] if node is not None and not isinstance(node, Dataset | DataTree): if path.parent == path: raise ValueError("cannot set DataArray value at root") if path.parent in nodes: raise ValueError( f"cannot set DataArray value at {str(path)!r} when " f"parent node at {str(path.parent)!r} is also set" ) del nodes[path] if isinstance(node, _CoordWrapper): dataset_args[path.parent].coords[path.name] = node.value else: dataset_args[path.parent].data_vars[path.name] = node for path, args in dataset_args.items(): try: nodes[path] = Dataset(args.data_vars, args.coords) except (ValueError, TypeError) as e: raise type(e)( "failed to construct xarray.Dataset for DataTree node at " f"{str(path)!r} with data_vars={args.data_vars} and " f"coords={args.coords}" ) from e # Create the root node root_data = nodes.pop(NodePath("/"), None) if isinstance(root_data, cls): # use cls so type-checkers understand this method returns Self obj = root_data.copy() obj.name = name elif root_data is None or isinstance(root_data, Dataset): obj = cls(name=name, dataset=root_data, children=None) else: raise TypeError( f'root node data (at "", ".", "/" or "./") must be a Dataset ' f"or DataTree, got {type(root_data)}" ) def depth(item: tuple[NodePath, object]) -> int: node_path, _ = item return len(node_path.parts) if nodes: # Populate tree with children # Sort keys by depth so as to insert nodes from root first (see GH issue #9276) for path, node in sorted(nodes.items(), key=depth): # Create and set new node if isinstance(node, DataTree): new_node = node.copy() elif isinstance(node, Dataset) or node is None: new_node = cls(dataset=node) else: raise TypeError(f"invalid values: {node}") obj._set_item( path, new_node, allow_overwrite=False, new_nodes_along_path=True, ) return obj def to_dict(self, relative: bool = False) -> dict[str, Dataset]: """ Create a dictionary mapping of paths to the data contained in those nodes. Parameters ---------- relative : bool If True, return relative instead of absolute paths. Returns ------- dict[str, Dataset] See also -------- DataTree.subtree_with_keys """ return { node.relative_to(self) if relative else node.path: node.to_dataset() for node in self.subtree } @property def nbytes(self) -> int: return sum(node.to_dataset().nbytes for node in self.subtree) def __len__(self) -> int: return len(self.children) + len(self.data_vars) @property def indexes(self) -> Indexes[pd.Index]: """Mapping of pandas.Index objects used for label based indexing. Raises an error if this DataTree node has indexes that cannot be coerced to pandas.Index objects. See Also -------- DataTree.xindexes """ return self.xindexes.to_pandas_indexes() @property def xindexes(self) -> Indexes[Index]: """Mapping of xarray Index objects used for label based indexing.""" return Indexes( self._indexes, {k: self._coord_variables[k] for k in self._indexes} ) @property def coords(self) -> DataTreeCoordinates: """Dictionary of xarray.DataArray objects corresponding to coordinate variables """ return DataTreeCoordinates(self) @property def data_vars(self) -> DataVariables: """Dictionary of DataArray objects corresponding to data variables""" return DataVariables(self.to_dataset()) def isomorphic(self, other: DataTree) -> bool: """ Two DataTrees are considered isomorphic if the set of paths to their descendent nodes are the same. Nothing about the data in each node is checked. Isomorphism is a necessary condition for two trees to be used in a nodewise binary operation, such as ``tree1 + tree2``. Parameters ---------- other : DataTree The other tree object to compare to. See Also -------- DataTree.equals DataTree.identical """ return diff_treestructure(self, other) is None def equals(self, other: DataTree) -> bool: """ Two DataTrees are equal if they have isomorphic node structures, with matching node names, and if they have matching variables and coordinates, all of which are equal. Parameters ---------- other : DataTree The other tree object to compare to. See Also -------- Dataset.equals DataTree.isomorphic DataTree.identical """ if not self.isomorphic(other): return False # Note: by using .dataset, this intentionally does not check that # coordinates are defined at the same levels. return all( node.dataset.equals(other_node.dataset) for node, other_node in zip_subtrees(self, other) ) def _inherited_coords_set(self) -> set[str]: return set(self.parent.coords if self.parent else []) # type: ignore[arg-type] def identical(self, other: DataTree) -> bool: """ Like equals, but also checks attributes on all datasets, variables and coordinates, and requires that any inherited coordinates at the tree root are also inherited on the other tree. Parameters ---------- other : DataTree The other tree object to compare to. See Also -------- Dataset.identical DataTree.isomorphic DataTree.equals """ if not self.isomorphic(other): return False if self.name != other.name: return False if self._inherited_coords_set() != other._inherited_coords_set(): return False return all( node.dataset.identical(other_node.dataset) for node, other_node in zip_subtrees(self, other) ) def filter(self: DataTree, filterfunc: Callable[[DataTree], bool]) -> DataTree: """ Filter nodes according to a specified condition. Returns a new tree containing only the nodes in the original tree for which `fitlerfunc(node)` is True. Will also contain empty nodes at intermediate positions if required to support leaves. Parameters ---------- filterfunc: function A function which accepts only one DataTree - the node on which filterfunc will be called. Returns ------- DataTree See Also -------- match pipe map_over_datasets """ filtered_nodes = { path: node.dataset for path, node in self.subtree_with_keys if filterfunc(node) } return DataTree.from_dict(filtered_nodes, name=self.name) def filter_like(self, other: DataTree) -> DataTree: """ Filter a datatree like another datatree. Returns a new tree containing only the nodes in the original tree which are also present in the other tree. Parameters ---------- other : DataTree The tree to filter this tree by. Returns ------- DataTree See Also -------- filter isomorphic Examples -------- >>> dt = DataTree.from_dict( ... { ... "/a/A": None, ... "/a/B": None, ... "/b/A": None, ... "/b/B": None, ... } ... ) >>> other = DataTree.from_dict( ... { ... "/a/A": None, ... "/b/A": None, ... } ... ) >>> dt.filter_like(other) Group: / โ”œโ”€โ”€ Group: /a โ”‚ โ””โ”€โ”€ Group: /a/A โ””โ”€โ”€ Group: /b โ””โ”€โ”€ Group: /b/A """ other_keys = {key for key, _ in other.subtree_with_keys} return self.filter(lambda node: node.relative_to(self) in other_keys) def prune(self, drop_size_zero_vars: bool = False) -> DataTree: """ Remove empty nodes from the tree. Returns a new tree containing only nodes that contain data variables with actual data. Intermediate nodes are kept if they are required to support non-empty children. Parameters ---------- drop_size_zero_vars : bool, default False If True, also considers variables with zero size as empty. If False, keeps nodes with data variables even if they have zero size. Returns ------- DataTree A new tree with empty nodes removed. See Also -------- filter Examples -------- >>> dt = xr.DataTree.from_dict( ... { ... "/a": xr.Dataset({"foo": ("x", [1, 2])}), ... "/b": xr.Dataset({"bar": ("x", [])}), ... "/c": xr.Dataset(), ... } ... ) >>> dt.prune() # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE Group: / โ”œโ”€โ”€ Group: /a โ”‚ Dimensions: (x: 2) โ”‚ Dimensions without coordinates: x โ”‚ Data variables: โ”‚ foo (x) int64 16B 1 2 โ””โ”€โ”€ Group: /b Dimensions: (x: 0) Dimensions without coordinates: x Data variables: bar (x) float64 0B... The ``drop_size_zero_vars`` parameter controls whether variables with zero size are considered empty: >>> dt.prune(drop_size_zero_vars=True) Group: / โ””โ”€โ”€ Group: /a Dimensions: (x: 2) Dimensions without coordinates: x Data variables: foo (x) int64 16B 1 2 """ non_empty_cond: Callable[[DataTree], bool] if drop_size_zero_vars: non_empty_cond = lambda node: len(node.data_vars) > 0 and any( var.size > 0 for var in node.data_vars.values() ) else: non_empty_cond = lambda node: len(node.data_vars) > 0 return self.filter(non_empty_cond) def match(self, pattern: str) -> DataTree: """ Return nodes with paths matching pattern. Uses unix glob-like syntax for pattern-matching. Parameters ---------- pattern: str A pattern to match each node path against. Returns ------- DataTree See Also -------- filter pipe map_over_datasets Examples -------- >>> dt = DataTree.from_dict( ... { ... "/a/A": None, ... "/a/B": None, ... "/b/A": None, ... "/b/B": None, ... } ... ) >>> dt.match("*/B") Group: / โ”œโ”€โ”€ Group: /a โ”‚ โ””โ”€โ”€ Group: /a/B โ””โ”€โ”€ Group: /b โ””โ”€โ”€ Group: /b/B """ matching_nodes = { path: node.dataset for path, node in self.subtree_with_keys if NodePath(node.path).match(pattern) } return DataTree.from_dict(matching_nodes, name=self.name) @overload def map_over_datasets( self, func: Callable[..., Dataset | None], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> DataTree: ... @overload def map_over_datasets( self, func: Callable[..., tuple[Dataset | None, Dataset | None]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> tuple[DataTree, DataTree]: ... @overload def map_over_datasets( self, func: Callable[..., tuple[Dataset | None, ...]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> tuple[DataTree, ...]: ... def map_over_datasets( self, func: Callable[..., Dataset | None | tuple[Dataset | None, ...]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> DataTree | tuple[DataTree, ...]: """ Apply a function to every dataset in this subtree, returning a new tree which stores the results. The function will be applied to any dataset stored in this node, as well as any dataset stored in any of the descendant nodes. The returned tree will have the same structure as the original subtree. func needs to return a Dataset in order to rebuild the subtree. Parameters ---------- func : callable Function to apply to datasets with signature: `func(node.dataset, *args, **kwargs) -> Dataset`. Function will not be applied to any nodes without datasets. *args : tuple, optional Positional arguments passed on to `func`. Any DataTree arguments will be converted to Dataset objects via `.dataset`. kwargs : dict, optional Optional keyword arguments passed directly to ``func``. Returns ------- subtrees : DataTree, tuple of DataTrees One or more subtrees containing results from applying ``func`` to the data at each node. See also -------- map_over_datasets """ # TODO this signature means that func has no way to know which node it is being called upon - change? return map_over_datasets(func, self, *args, kwargs=kwargs) # type: ignore[arg-type] @overload def pipe( self, func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs, ) -> T: ... @overload def pipe( self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, ) -> T: ... def pipe( self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any, ) -> T: """Apply ``func(self, *args, **kwargs)`` This method replicates the pandas method of the same name. Parameters ---------- func : callable function to apply to this xarray object (Dataset/DataArray). ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the xarray object. *args positional arguments passed into ``func``. **kwargs a dictionary of keyword arguments passed into ``func``. Returns ------- object : T the return type of ``func``. Notes ----- Use ``.pipe`` when chaining together functions that expect xarray or pandas objects, e.g., instead of writing .. code:: python f(g(h(dt), arg1=a), arg2=b, arg3=c) You can write .. code:: python (dt.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)) If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``arg2``: .. code:: python (dt.pipe(h).pipe(g, arg1=a).pipe((f, "arg2"), arg1=a, arg3=c)) """ if isinstance(func, tuple): # Use different var when unpacking function from tuple because the type # signature of the unpacked function differs from the expected type # signature in the case where only a function is given, rather than a tuple. # This makes type checkers happy at both call sites below. f, target = func if target in kwargs: raise ValueError( f"{target} is both the pipe target and a keyword argument" ) kwargs[target] = self return f(*args, **kwargs) return func(self, *args, **kwargs) # TODO some kind of .collapse() or .flatten() method to merge a subtree @property def groups(self): """Return all groups in the tree, given as a tuple of path-like strings.""" return tuple(node.path for node in self.subtree) def _unary_op(self, f, *args, **kwargs) -> DataTree: # TODO do we need to any additional work to avoid duplication etc.? (Similar to aggregations) return self.map_over_datasets(functools.partial(f, **kwargs), *args) def _binary_op(self, other, f, reflexive=False, join=None) -> DataTree: from xarray.core.groupby import GroupBy if isinstance(other, GroupBy): return NotImplemented ds_binop = functools.partial( Dataset._binary_op, f=f, reflexive=reflexive, join=join, ) return map_over_datasets(ds_binop, self, other) def _inplace_binary_op(self, other, f) -> Self: from xarray.core.groupby import GroupBy if isinstance(other, GroupBy): raise TypeError( "in-place operations between a DataTree and " "a grouped object are not permitted" ) # TODO see GH issue #9629 for required implementation raise NotImplementedError() # TODO: dirty workaround for mypy 1.5 error with inherited DatasetOpsMixin vs. Mapping # related to https://github.com/python/mypy/issues/9319? def __eq__(self, other: DtCompatible) -> Self: # type: ignore[override] return super().__eq__(other) # filepath=None writes to a memoryview @overload def to_netcdf( self, filepath: None = None, mode: NetcdfWriteModes = "w", encoding=None, unlimited_dims=None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, compute: bool = True, **kwargs, ) -> memoryview: ... # compute=False returns dask.Delayed @overload def to_netcdf( self, filepath: str | PathLike | io.IOBase, mode: NetcdfWriteModes = "w", encoding=None, unlimited_dims=None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, *, compute: Literal[False], **kwargs, ) -> Delayed: ... # default return None @overload def to_netcdf( self, filepath: str | PathLike | io.IOBase, mode: NetcdfWriteModes = "w", encoding=None, unlimited_dims=None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, compute: Literal[True] = True, **kwargs, ) -> None: ... def to_netcdf( self, filepath: str | PathLike | io.IOBase | None = None, mode: NetcdfWriteModes = "w", encoding=None, unlimited_dims=None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, compute: bool = True, **kwargs, ) -> None | memoryview | Delayed: """ Write datatree contents to a netCDF file. Parameters ---------- filepath : str or PathLike or file-like object or None Path to which to save this datatree, or a file-like object to write it to (which must support read and write and be seekable) or None to return in-memory bytes as a memoryview. mode : {"w", "a"}, default: "w" Write ('w') or append ('a') mode. If mode='w', any existing file at this location will be overwritten. If mode='a', existing variables will be overwritten. Only applies to the root group. encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"root/set1": {"my_variable": {"dtype": "int16", "scale_factor": 0.1, "zlib": True}, ...}, ...}``. See ``xarray.Dataset.to_netcdf`` for available options. unlimited_dims : dict, optional Mapping of unlimited dimensions per group that that should be serialized as unlimited dimensions. By default, no dimensions are treated as unlimited dimensions. Note that unlimited_dims may also be set via ``dataset.encoding["unlimited_dims"]``. format : {"NETCDF4", }, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API features. engine : {"netcdf4", "h5netcdf"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, by default preferring "h5netcdf" over "netcdf4" (customizable via ``netcdf_engine_order`` in ``xarray.set_options()``). group : str, optional Path to the netCDF4 group in the given file to open as the root group of the ``DataTree``. Currently, specifying a group is not supported. write_inherited_coords : bool, default: False If true, replicate inherited coordinates on all descendant nodes. Otherwise, only write coordinates at the level at which they are originally defined. This saves disk space, but requires opening the full tree to load inherited coordinates. compute : bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. kwargs : Additional keyword arguments to be passed to ``xarray.Dataset.to_netcdf`` Returns ------- * ``memoryview`` if path is None * ``dask.delayed.Delayed`` if compute is False * ``None`` otherwise Note ---- Due to file format specifications the on-disk root group name is always ``"/"`` overriding any given ``DataTree`` root node name. """ from xarray.backends.writers import _datatree_to_netcdf return _datatree_to_netcdf( self, filepath, mode=mode, encoding=encoding, unlimited_dims=unlimited_dims, format=format, engine=engine, group=group, write_inherited_coords=write_inherited_coords, compute=compute, **kwargs, ) # compute=False returns dask.Delayed @overload def to_zarr( self, store: ZarrStoreLike, mode: ZarrWriteModes = "w-", encoding=None, consolidated: bool = True, group: str | None = None, write_inherited_coords: bool = False, *, compute: Literal[False], **kwargs, ) -> Delayed: ... # default returns ZarrStore @overload def to_zarr( self, store: ZarrStoreLike, mode: ZarrWriteModes = "w-", encoding=None, consolidated: bool = True, group: str | None = None, write_inherited_coords: bool = False, compute: Literal[True] = True, **kwargs, ) -> ZarrStore: ... def to_zarr( self, store: ZarrStoreLike, mode: ZarrWriteModes = "w-", encoding=None, consolidated: bool = True, group: str | None = None, write_inherited_coords: bool = False, compute: bool = True, **kwargs, ) -> ZarrStore | Delayed: """ Write datatree contents to a Zarr store. Parameters ---------- store : zarr.storage.StoreLike Store or path to directory in file system mode : {{"w", "w-", "a", "r+", None}, default: "w-" Persistence mode: โ€œwโ€ means create (overwrite if exists); โ€œw-โ€ means create (fail if exists); โ€œaโ€ means override existing variables (create if does not exist); โ€œr+โ€ means modify existing array values only (raise an error if any metadata or shapes would change). The default mode is โ€œw-โ€. encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"root/set1": {"my_variable": {"dtype": "int16", "scale_factor": 0.1}, ...}, ...}``. See ``xarray.Dataset.to_zarr`` for available options. consolidated : bool If True, apply zarr's `consolidate_metadata` function to the store after writing metadata for all groups. group : str, optional Group path. (a.k.a. `path` in zarr terminology.) write_inherited_coords : bool, default: False If true, replicate inherited coordinates on all descendant nodes. Otherwise, only write coordinates at the level at which they are originally defined. This saves disk space, but requires opening the full tree to load inherited coordinates. compute : bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. Metadata is always updated eagerly. kwargs : Additional keyword arguments to be passed to ``xarray.Dataset.to_zarr`` Note ---- Due to file format specifications the on-disk root group name is always ``"/"`` overriding any given ``DataTree`` root node name. """ from xarray.backends.writers import _datatree_to_zarr return _datatree_to_zarr( self, store, mode=mode, encoding=encoding, consolidated=consolidated, group=group, write_inherited_coords=write_inherited_coords, compute=compute, **kwargs, ) def _get_all_dims(self) -> set: all_dims: set[Any] = set() for node in self.subtree: all_dims.update(node._node_dims) return all_dims def reduce( self, func: Callable, dim: Dims = None, *, keep_attrs: bool | None = None, keepdims: bool = False, numeric_only: bool = False, **kwargs: Any, ) -> Self: """Reduce this tree by applying `func` along some dimension(s).""" dims = parse_dims_as_set(dim, self._get_all_dims()) result = {} for path, node in self.subtree_with_keys: reduce_dims = [d for d in node._node_dims if d in dims] node_result = node.dataset.reduce( func, reduce_dims, keep_attrs=keep_attrs, keepdims=keepdims, numeric_only=numeric_only, **kwargs, ) result[path] = node_result return type(self).from_dict(result, name=self.name) def _selective_indexing( self, func: Callable[[Dataset, Mapping[Any, Any]], Dataset], indexers: Mapping[Any, Any], missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: """Apply an indexing operation over the subtree, handling missing dimensions and inherited coordinates gracefully by only applying indexing at each node selectively. """ all_dims = self._get_all_dims() indexers = drop_dims_from_indexers(indexers, all_dims, missing_dims) result = {} for path, node in self.subtree_with_keys: node_indexers = {k: v for k, v in indexers.items() if k in node.dims} with add_path_context_to_errors(path): node_result = func(node.dataset, node_indexers) # Indexing datasets corresponding to each node results in redundant # coordinates when indexes from a parent node are inherited. # Ideally, we would avoid creating such coordinates in the first # place, but that would require implementing indexing operations at # the Variable instead of the Dataset level. if node is not self: for k in node_indexers: if k not in node._node_coord_variables and k in node_result.coords: # We remove all inherited coordinates. Coordinates # corresponding to an index would be de-duplicated by # _deduplicate_inherited_coordinates(), but indexing (e.g., # with a scalar) can also create scalar coordinates, which # need to be explicitly removed. del node_result.coords[k] result[path] = node_result return type(self).from_dict(result, name=self.name) def isel( self, indexers: Mapping[Any, Any] | None = None, drop: bool = False, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, ) -> Self: """Returns a new data tree with each array indexed along the specified dimension(s). This method selects values from each array using its `__getitem__` method, except this method does not require knowing the order of each array's dimensions. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by integers, slice objects or arrays. indexer can be an integer, slice, array-like or DataArray. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. drop : bool, default: False If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : DataTree A new DataTree with the same contents as this data tree, except each array and dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this dataset, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. See Also -------- DataTree.sel Dataset.isel """ def apply_indexers(dataset, node_indexers): return dataset.isel(node_indexers, drop=drop) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") return self._selective_indexing( apply_indexers, indexers, missing_dims=missing_dims ) def sel( self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance: int | float | Iterable[int | float] | None = None, drop: bool = False, **indexers_kwargs: Any, ) -> Self: """Returns a new data tree with each array indexed by tick labels along the specified dimension(s). In contrast to `DataTree.isel`, indexers for this method should use labels instead of integers. Under the hood, this method is powered by using pandas's powerful Index objects. This makes label based indexing essentially just as fast as using integer indexing. It also means this method uses pandas's (well documented) logic for indexing. This means you can use string shortcuts for datetime indexes (e.g., '2000-01' to select all values in January 2000). It also means that slices are treated as inclusive of both the start and stop values, unlike normal Python indexing. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by scalars, slices or arrays of tick labels. For dimensions with multi-index, the indexer may also be a dict-like object with keys matching index level names. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: * None (default): only exact matches * pad / ffill: propagate last valid index value forward * backfill / bfill: propagate next valid index value backward * nearest: use nearest valid index value tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. drop : bool, optional If ``drop=True``, drop coordinates variables in `indexers` instead of making them scalar. **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : DataTree A new DataTree with the same contents as this data tree, except each variable and dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this dataset, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. See Also -------- DataTree.isel Dataset.sel """ def apply_indexers(dataset, node_indexers): # TODO: reimplement in terms of map_index_queries(), to avoid # redundant look-ups of integer positions from labels (via indexes) # on child nodes. return dataset.sel( node_indexers, method=method, tolerance=tolerance, drop=drop ) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "sel") return self._selective_indexing(apply_indexers, indexers) def load(self, **kwargs) -> Self: """Manually trigger loading and/or computation of this datatree's data from disk or a remote source into memory and return this datatree. Unlike compute, the original datatree is modified and returned. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. See Also -------- Dataset.load dask.compute """ # access .data to coerce everything to numpy or dask arrays lazy_data = { path: { k: v._data for k, v in node.variables.items() if is_chunked_array(v._data) } for path, node in self.subtree_with_keys } flat_lazy_data = { (path, var_name): array for path, node in lazy_data.items() for var_name, array in node.items() } if flat_lazy_data: chunkmanager = get_chunked_array_type(*flat_lazy_data.values()) # evaluate all the chunked arrays simultaneously evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute( *flat_lazy_data.values(), **kwargs ) for (path, var_name), data in zip( flat_lazy_data, evaluated_data, strict=False ): self[path].variables[var_name].data = data # load everything else sequentially for node in self.subtree: for k, v in node.variables.items(): if k not in lazy_data: v.load() return self def compute(self, **kwargs) -> Self: """Manually trigger loading and/or computation of this datatree's data from disk or a remote source into memory and return a new datatree. Unlike load, the original datatree is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : DataTree New object with lazy data variables and coordinates as in-memory arrays. See Also -------- dask.compute """ new = self.copy(deep=False) return new.load(**kwargs) def _persist_inplace(self, **kwargs) -> Self: """Persist all chunked arrays in memory""" # access .data to coerce everything to numpy or dask arrays lazy_data = { path: { k: v._data for k, v in node.variables.items() if is_chunked_array(v._data) } for path, node in self.subtree_with_keys } flat_lazy_data = { (path, var_name): array for path, node in lazy_data.items() for var_name, array in node.items() } if flat_lazy_data: chunkmanager = get_chunked_array_type(*flat_lazy_data.values()) # evaluate all the dask arrays simultaneously evaluated_data = chunkmanager.persist(*flat_lazy_data.values(), **kwargs) for (path, var_name), data in zip( flat_lazy_data, evaluated_data, strict=False ): self[path].variables[var_name].data = data return self def persist(self, **kwargs) -> Self: """Trigger computation, keeping data as chunked arrays. This operation can be used to trigger computation on underlying dask arrays, similar to ``.compute()`` or ``.load()``. However this operation keeps the data as dask arrays. This is particularly useful when using the dask.distributed scheduler and you want to load a large amount of data into distributed memory. Like compute (but unlike load), the original dataset is left unaltered. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.persist``. Returns ------- object : DataTree New object with all dask-backed coordinates and data variables as persisted dask arrays. See Also -------- dask.persist """ new = self.copy(deep=False) return new._persist_inplace(**kwargs) @property def chunksizes(self) -> Mapping[str, Mapping[Hashable, tuple[int, ...]]]: """ Mapping from group paths to a mapping of chunksizes. If there's no chunked data in a group, the corresponding mapping of chunksizes will be empty. Cannot be modified directly, but can be modified by calling .chunk(). See Also -------- DataTree.chunk Dataset.chunksizes """ return Frozen( { node.path: get_chunksizes(node.variables.values()) for node in self.subtree } ) def chunk( self, chunks: T_ChunksFreq = {}, # noqa: B006 # {} even though it's technically unsafe, is being used intentionally here (#4667) name_prefix: str = "xarray-", token: str | None = None, lock: bool = False, inline_array: bool = False, chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, **chunks_kwargs: T_ChunkDimFreq, ) -> Self: """Coerce all arrays in all groups in this tree into dask arrays with the given chunks. Non-dask arrays in this tree will be converted to dask arrays. Dask arrays will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Along datetime-like dimensions, a :py:class:`groupers.TimeResampler` object is also accepted. Parameters ---------- chunks : int, tuple of int, "auto" or mapping of hashable to int or a TimeResampler, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, or ``{"x": 5, "y": 5}`` or ``{"x": 5, "time": TimeResampler(freq="YE")}``. name_prefix : str, default: "xarray-" Prefix for the name of any new dask arrays. token : str, optional Token uniquely identifying this datatree. lock : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. inline_array: bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. chunked_array_type: str, optional Which chunked array type to coerce this datatree's arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided Returns ------- chunked : xarray.DataTree See Also -------- Dataset.chunk Dataset.chunksizes xarray.unify_chunks dask.array.from_array """ # don't support deprecated ways of passing chunks if not isinstance(chunks, Mapping): raise TypeError( f"invalid type for chunks: {type(chunks)}. Only mappings are supported." ) combined_chunks = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk") all_dims = self._get_all_dims() bad_dims = combined_chunks.keys() - all_dims if bad_dims: raise ValueError( f"chunks keys {tuple(bad_dims)} not found in data dimensions {tuple(all_dims)}" ) rechunked_groups = { path: node.dataset.chunk( { dim: size for dim, size in combined_chunks.items() if dim in node._node_dims }, name_prefix=name_prefix, token=token, lock=lock, inline_array=inline_array, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) for path, node in self.subtree_with_keys } return self.from_dict(rechunked_groups, name=self.name) xarray-2025.12.0/xarray/core/datatree_mapping.py000066400000000000000000000166521511464676000214710ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Mapping from contextlib import contextmanager from typing import TYPE_CHECKING, Any, cast, overload from xarray.core.dataset import Dataset from xarray.core.treenode import group_subtrees from xarray.core.utils import result_name if TYPE_CHECKING: from xarray.core.datatree import DataTree @overload def map_over_datasets( func: Callable[..., Dataset | None], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> DataTree: ... # add an explicit overload for the most common case of two return values # (python typing does not have a way to match tuple lengths in general) @overload def map_over_datasets( func: Callable[..., tuple[Dataset | None, Dataset | None]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> tuple[DataTree, DataTree]: ... @overload def map_over_datasets( func: Callable[..., tuple[Dataset | None, ...]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> tuple[DataTree, ...]: ... def map_over_datasets( func: Callable[..., Dataset | None | tuple[Dataset | None, ...]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> DataTree | tuple[DataTree, ...]: """ Applies a function to every dataset in one or more DataTree objects with the same structure (ie.., that are isomorphic), returning new trees which store the results. The function will be applied to any dataset stored in any of the nodes in the trees. The returned trees will have the same structure as the supplied trees. ``func`` needs to return a Dataset, tuple of Dataset objects or None in order to be able to rebuild the subtrees after mapping, as each result will be assigned to its respective node of a new tree via `DataTree.from_dict`. Any returned value that is one of these types will be stacked into a separate tree before returning all of them. ``map_over_datasets`` is essentially syntactic sugar for the combination of ``group_subtrees`` and ``DataTree.from_dict``. For example, in the case of a two argument function that return one result, it is equivalent to:: results = {} for path, (left, right) in group_subtrees(left_tree, right_tree): results[path] = func(left.dataset, right.dataset) return DataTree.from_dict(results) Parameters ---------- func : callable Function to apply to datasets with signature: `func(*args: Dataset, **kwargs) -> Union[Dataset, tuple[Dataset, ...]]`. (i.e. func must accept at least one Dataset and return at least one Dataset.) *args : tuple, optional Positional arguments passed on to `func`. Any DataTree arguments will be converted to Dataset objects via `.dataset`. kwargs : dict, optional Optional keyword arguments passed directly to ``func``. Returns ------- Result of applying `func` to each node in the provided trees, packed back into DataTree objects via `DataTree.from_dict`. See also -------- DataTree.map_over_datasets group_subtrees DataTree.from_dict """ # TODO examples in the docstring # TODO inspect function to work out immediately if the wrong number of arguments were passed for it? from xarray.core.datatree import DataTree if kwargs is None: kwargs = {} # Walk all trees simultaneously, applying func to all nodes that lie in same position in different trees # We don't know which arguments are DataTrees so we zip all arguments together as iterables # Store tuples of results in a dict because we don't yet know how many trees we need to rebuild to return out_data_objects: dict[str, Dataset | tuple[Dataset | None, ...] | None] = {} tree_args = [arg for arg in args if isinstance(arg, DataTree)] name = result_name(tree_args) for path, node_tree_args in group_subtrees(*tree_args): node_dataset_args = [arg.dataset for arg in node_tree_args] for i, arg in enumerate(args): if not isinstance(arg, DataTree): node_dataset_args.insert(i, arg) with add_path_context_to_errors(path): results = func(*node_dataset_args, **kwargs) out_data_objects[path] = results num_return_values = _check_all_return_values(out_data_objects) if num_return_values is None: # one return value out_data = cast(Mapping[str, Dataset | None], out_data_objects) return DataTree.from_dict(out_data, name=name) # multiple return values out_data_tuples = cast(Mapping[str, tuple[Dataset | None, ...]], out_data_objects) output_dicts: list[dict[str, Dataset | None]] = [ {} for _ in range(num_return_values) ] for path, outputs in out_data_tuples.items(): for output_dict, output in zip(output_dicts, outputs, strict=False): output_dict[path] = output return tuple( DataTree.from_dict(output_dict, name=name) for output_dict in output_dicts ) @contextmanager def add_path_context_to_errors(path: str): """Add path context to any errors.""" try: yield except Exception as e: e.add_note(f"Raised whilst mapping function over node(s) with path {path!r}") raise def _check_single_set_return_values(path_to_node: str, obj: Any) -> int | None: """Check types returned from single evaluation of func, and return number of return values received from func.""" if isinstance(obj, Dataset | None): return None # no need to pack results if not isinstance(obj, tuple) or not all( isinstance(r, Dataset | None) for r in obj ): raise TypeError( f"the result of calling func on the node at position '{path_to_node}' is" f" not a Dataset or None or a tuple of such types:\n{obj!r}" ) return len(obj) def _check_all_return_values(returned_objects) -> int | None: """Walk through all values returned by mapping func over subtrees, raising on any invalid or inconsistent types.""" result_data_objects = list(returned_objects.items()) first_path, result = result_data_objects[0] return_values = _check_single_set_return_values(first_path, result) for path_to_node, obj in result_data_objects[1:]: cur_return_values = _check_single_set_return_values(path_to_node, obj) if return_values != cur_return_values: if return_values is None: raise TypeError( f"Calling func on the nodes at position {path_to_node} returns " f"a tuple of {cur_return_values} datasets, whereas calling func on the " f"nodes at position {first_path} instead returns a single dataset." ) elif cur_return_values is None: raise TypeError( f"Calling func on the nodes at position {path_to_node} returns " f"a single dataset, whereas calling func on the nodes at position " f"{first_path} instead returns a tuple of {return_values} datasets." ) else: raise TypeError( f"Calling func on the nodes at position {path_to_node} returns " f"a tuple of {cur_return_values} datasets, whereas calling func on " f"the nodes at position {first_path} instead returns a tuple of " f"{return_values} datasets." ) return return_values xarray-2025.12.0/xarray/core/datatree_render.py000066400000000000000000000224001511464676000213010ustar00rootroot00000000000000""" String Tree Rendering. Copied from anytree. Minor changes to `RenderDataTree` include accessing `children.values()`, and type hints. """ from __future__ import annotations from collections.abc import Iterable, Iterator from math import ceil from typing import TYPE_CHECKING, NamedTuple if TYPE_CHECKING: from xarray.core.datatree import DataTree class Row(NamedTuple): pre: str fill: str node: DataTree | str class AbstractStyle: def __init__(self, vertical: str, cont: str, end: str): """ Tree Render Style. Args: vertical: Sign for vertical line. cont: Chars for a continued branch. end: Chars for the last branch. """ super().__init__() self.vertical = vertical self.cont = cont self.end = end assert len(cont) == len(vertical) == len(end), ( f"'{vertical}', '{cont}' and '{end}' need to have equal length" ) @property def empty(self) -> str: """Empty string as placeholder.""" return " " * len(self.end) def __repr__(self) -> str: return f"{self.__class__.__name__}()" class ContStyle(AbstractStyle): def __init__(self): """ Continued style, without gaps. >>> from xarray.core.datatree import DataTree >>> from xarray.core.datatree_render import RenderDataTree >>> root = DataTree.from_dict( ... { ... "/": None, ... "/sub0": None, ... "/sub0/sub0B": None, ... "/sub0/sub0A": None, ... "/sub1": None, ... }, ... name="root", ... ) >>> print(RenderDataTree(root)) Group: / โ”œโ”€โ”€ Group: /sub0 โ”‚ โ”œโ”€โ”€ Group: /sub0/sub0B โ”‚ โ””โ”€โ”€ Group: /sub0/sub0A โ””โ”€โ”€ Group: /sub1 """ super().__init__("\u2502 ", "\u251c\u2500\u2500 ", "\u2514\u2500\u2500 ") class RenderDataTree: def __init__( self, node: DataTree, style=None, childiter: type = list, maxlevel: int | None = None, maxchildren: int | None = None, ): """ Render tree starting at `node`. Keyword Args: style (AbstractStyle): Render Style. childiter: Child iterator. Note, due to the use of node.children.values(), Iterables that change the order of children cannot be used (e.g., `reversed`). maxlevel: Limit rendering to this depth. maxchildren: Limit number of children at each node. :any:`RenderDataTree` is an iterator, returning a tuple with 3 items: `pre` tree prefix. `fill` filling for multiline entries. `node` :any:`NodeMixin` object. It is up to the user to assemble these parts to a whole. Examples -------- >>> from xarray import Dataset >>> from xarray.core.datatree import DataTree >>> from xarray.core.datatree_render import RenderDataTree >>> root = DataTree.from_dict( ... { ... "/": Dataset({"a": 0, "b": 1}), ... "/sub0": Dataset({"c": 2, "d": 3}), ... "/sub0/sub0B": Dataset({"e": 4}), ... "/sub0/sub0A": Dataset({"f": 5, "g": 6}), ... "/sub1": Dataset({"h": 7}), ... }, ... name="root", ... ) # Simple one line: >>> for pre, _, node in RenderDataTree(root): ... print(f"{pre}{node.name}") ... root โ”œโ”€โ”€ sub0 โ”‚ โ”œโ”€โ”€ sub0B โ”‚ โ””โ”€โ”€ sub0A โ””โ”€โ”€ sub1 # Multiline: >>> for pre, fill, node in RenderDataTree(root): ... print(f"{pre}{node.name}") ... for variable in node.variables: ... print(f"{fill}{variable}") ... root a b โ”œโ”€โ”€ sub0 โ”‚ c โ”‚ d โ”‚ โ”œโ”€โ”€ sub0B โ”‚ โ”‚ e โ”‚ โ””โ”€โ”€ sub0A โ”‚ f โ”‚ g โ””โ”€โ”€ sub1 h :any:`by_attr` simplifies attribute rendering and supports multiline: >>> print(RenderDataTree(root).by_attr()) root โ”œโ”€โ”€ sub0 โ”‚ โ”œโ”€โ”€ sub0B โ”‚ โ””โ”€โ”€ sub0A โ””โ”€โ”€ sub1 # `maxlevel` limits the depth of the tree: >>> print(RenderDataTree(root, maxlevel=2).by_attr("name")) root โ”œโ”€โ”€ sub0 โ””โ”€โ”€ sub1 # `maxchildren` limits the number of children per node >>> print(RenderDataTree(root, maxchildren=1).by_attr("name")) root โ”œโ”€โ”€ sub0 โ”‚ โ”œโ”€โ”€ sub0B โ”‚ ... ... """ if style is None: style = ContStyle() if not isinstance(style, AbstractStyle): style = style() self.node = node self.style = style self.childiter = childiter self.maxlevel = maxlevel self.maxchildren = maxchildren def __iter__(self) -> Iterator[Row]: return self.__next(self.node, tuple()) def __next( self, node: DataTree, continues: tuple[bool, ...], level: int = 0, ) -> Iterator[Row]: yield RenderDataTree.__item(node, continues, self.style) children = node.children.values() level += 1 if children and (self.maxlevel is None or level < self.maxlevel): nchildren = len(children) children = self.childiter(children) for i, (child, is_last) in enumerate(_is_last(children)): if ( self.maxchildren is None or i < ceil(self.maxchildren / 2) or i >= ceil(nchildren - self.maxchildren / 2) ): yield from self.__next( child, continues + (not is_last,), level=level, ) if ( self.maxchildren is not None and nchildren > self.maxchildren and i == ceil(self.maxchildren / 2) ): yield RenderDataTree.__item("...", continues, self.style) @staticmethod def __item( node: DataTree | str, continues: tuple[bool, ...], style: AbstractStyle ) -> Row: if not continues: return Row("", "", node) else: items = [style.vertical if cont else style.empty for cont in continues] indent = "".join(items[:-1]) branch = style.cont if continues[-1] else style.end pre = indent + branch fill = "".join(items) return Row(pre, fill, node) def __str__(self) -> str: return str(self.node) def __repr__(self) -> str: classname = self.__class__.__name__ args = [ repr(self.node), f"style={self.style!r}", f"childiter={self.childiter!r}", ] return f"{classname}({', '.join(args)})" def by_attr(self, attrname: str = "name") -> str: """ Return rendered tree with node attribute `attrname`. Examples -------- >>> from xarray import Dataset >>> from xarray.core.datatree import DataTree >>> from xarray.core.datatree_render import RenderDataTree >>> root = DataTree.from_dict( ... { ... "/sub0/sub0B": Dataset({"foo": 4, "bar": 109}), ... "/sub0/sub0A": None, ... "/sub1/sub1A": None, ... "/sub1/sub1B": Dataset({"bar": 8}), ... "/sub1/sub1C/sub1Ca": None, ... }, ... name="root", ... ) >>> print(RenderDataTree(root).by_attr("name")) root โ”œโ”€โ”€ sub0 โ”‚ โ”œโ”€โ”€ sub0B โ”‚ โ””โ”€โ”€ sub0A โ””โ”€โ”€ sub1 โ”œโ”€โ”€ sub1A โ”œโ”€โ”€ sub1B โ””โ”€โ”€ sub1C โ””โ”€โ”€ sub1Ca """ def get() -> Iterator[str]: for pre, fill, node in self: if isinstance(node, str): yield f"{fill}{node}" continue attr = ( attrname(node) if callable(attrname) else getattr(node, attrname, "") ) if isinstance(attr, list | tuple): lines = attr else: lines = str(attr).split("\n") yield f"{pre}{lines[0]}" for line in lines[1:]: yield f"{fill}{line}" return "\n".join(get()) def _is_last(iterable: Iterable) -> Iterator[tuple[DataTree, bool]]: iter_ = iter(iterable) try: nextitem = next(iter_) except StopIteration: pass else: item = nextitem while True: try: nextitem = next(iter_) yield item, False except StopIteration: yield nextitem, True break item = nextitem xarray-2025.12.0/xarray/core/dtypes.py000066400000000000000000000242211511464676000174640ustar00rootroot00000000000000from __future__ import annotations import functools from collections.abc import Iterable from typing import TYPE_CHECKING, TypeVar, cast import numpy as np from pandas.api.extensions import ExtensionDtype from xarray.compat import array_api_compat, npcompat from xarray.compat.npcompat import HAS_STRING_DTYPE from xarray.core import utils if TYPE_CHECKING: from typing import Any # Use as a sentinel value to indicate a dtype appropriate NA value. NA = utils.ReprObject("") @functools.total_ordering class AlwaysGreaterThan: def __gt__(self, other): return True def __eq__(self, other): return isinstance(other, type(self)) @functools.total_ordering class AlwaysLessThan: def __lt__(self, other): return True def __eq__(self, other): return isinstance(other, type(self)) # Equivalence to np.inf (-np.inf) for object-type INF = AlwaysGreaterThan() NINF = AlwaysLessThan() # Pairs of types that, if both found, should be promoted to object dtype # instead of following NumPy's own type-promotion rules. These type promotion # rules match pandas instead. For reference, see the NumPy type hierarchy: # https://numpy.org/doc/stable/reference/arrays.scalars.html PROMOTE_TO_OBJECT: tuple[tuple[type[np.generic], type[np.generic]], ...] = ( (np.number, np.character), # numpy promotes to character (np.bool_, np.character), # numpy promotes to character (np.bytes_, np.str_), # numpy promotes to unicode ) T_dtype = TypeVar("T_dtype", np.dtype, ExtensionDtype) def maybe_promote(dtype: T_dtype) -> tuple[T_dtype, Any]: """Simpler equivalent of pandas.core.common._maybe_promote Parameters ---------- dtype : np.dtype Returns ------- dtype : Promoted dtype that can hold missing values. fill_value : Valid missing value for the promoted dtype. """ # N.B. these casting rules should match pandas dtype_: np.typing.DTypeLike fill_value: Any if utils.is_allowed_extension_array_dtype(dtype): return dtype, cast(ExtensionDtype, dtype).na_value # type: ignore[redundant-cast] if not isinstance(dtype, np.dtype): raise TypeError( f"dtype {dtype} must be one of an extension array dtype or numpy dtype" ) elif HAS_STRING_DTYPE and np.issubdtype(dtype, np.dtypes.StringDType()): # for now, we always promote string dtypes to object for consistency with existing behavior # TODO: refactor this once we have a better way to handle numpy vlen-string dtypes dtype_ = object fill_value = np.nan elif isdtype(dtype, "real floating"): dtype_ = dtype fill_value = np.nan elif np.issubdtype(dtype, np.timedelta64): # See https://github.com/numpy/numpy/issues/10685 # np.timedelta64 is a subclass of np.integer # Check np.timedelta64 before np.integer fill_value = np.timedelta64("NaT") dtype_ = dtype elif isdtype(dtype, "integral"): dtype_ = np.float32 if dtype.itemsize <= 2 else np.float64 fill_value = np.nan elif isdtype(dtype, "complex floating"): dtype_ = dtype fill_value = np.nan + np.nan * 1j elif np.issubdtype(dtype, np.datetime64): dtype_ = dtype fill_value = np.datetime64("NaT") else: dtype_ = object fill_value = np.nan dtype_out = np.dtype(dtype_) fill_value = dtype_out.type(fill_value) return dtype_out, fill_value NAT_TYPES = {np.datetime64("NaT").dtype, np.timedelta64("NaT").dtype} def get_fill_value(dtype): """Return an appropriate fill value for this dtype. Parameters ---------- dtype : np.dtype Returns ------- fill_value : Missing value corresponding to this dtype. """ _, fill_value = maybe_promote(dtype) return fill_value def get_pos_infinity(dtype, max_for_int=False): """Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype max_for_int : bool Return np.iinfo(dtype).max instead of np.inf Returns ------- fill_value : positive infinity value corresponding to this dtype. """ if isdtype(dtype, "real floating"): return np.inf if isdtype(dtype, "integral"): if max_for_int: return np.iinfo(dtype).max else: return np.inf if isdtype(dtype, "complex floating"): return np.inf + 1j * np.inf if isdtype(dtype, "bool"): return True return np.array(INF, dtype=object) def get_neg_infinity(dtype, min_for_int=False): """Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype min_for_int : bool Return np.iinfo(dtype).min instead of -np.inf Returns ------- fill_value : positive infinity value corresponding to this dtype. """ if isdtype(dtype, "real floating"): return -np.inf if isdtype(dtype, "integral"): if min_for_int: return np.iinfo(dtype).min else: return -np.inf if isdtype(dtype, "complex floating"): return -np.inf - 1j * np.inf if isdtype(dtype, "bool"): return False return np.array(NINF, dtype=object) def is_datetime_like(dtype) -> bool: """Check if a dtype is a subclass of the numpy datetime types""" return _is_numpy_subdtype(dtype, (np.datetime64, np.timedelta64)) def is_object(dtype) -> bool: """Check if a dtype is object""" return _is_numpy_subdtype(dtype, object) def is_string(dtype) -> bool: """Check if a dtype is a string dtype""" return _is_numpy_subdtype(dtype, (np.str_, np.character)) def _is_numpy_subdtype(dtype, kind) -> bool: if not isinstance(dtype, np.dtype): return False kinds = kind if isinstance(kind, tuple) else (kind,) return any(np.issubdtype(dtype, kind) for kind in kinds) def isdtype(dtype, kind: str | tuple[str, ...], xp=None) -> bool: """Compatibility wrapper for isdtype() from the array API standard. Unlike xp.isdtype(), kind must be a string. """ # TODO(shoyer): remove this wrapper when Xarray requires # numpy>=2 and pandas extensions arrays are implemented in # Xarray via the array API if not isinstance(kind, str) and not ( isinstance(kind, tuple) and all(isinstance(k, str) for k in kind) # type: ignore[redundant-expr] ): raise TypeError(f"kind must be a string or a tuple of strings: {kind!r}") if isinstance(dtype, np.dtype): return npcompat.isdtype(dtype, kind) elif utils.is_allowed_extension_array_dtype(dtype): # we never want to match pandas extension array dtypes return False else: if xp is None: xp = np return xp.isdtype(dtype, kind) def maybe_promote_to_variable_width( array_or_dtype: np.typing.ArrayLike | np.typing.DTypeLike | ExtensionDtype | str | bytes, *, should_return_str_or_bytes: bool = False, ) -> np.typing.ArrayLike | np.typing.DTypeLike | ExtensionDtype: if isinstance(array_or_dtype, str | bytes): if should_return_str_or_bytes: return array_or_dtype return type(array_or_dtype) elif isinstance( dtype := getattr(array_or_dtype, "dtype", array_or_dtype), np.dtype ) and (np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.bytes_)): # drop the length from numpy's fixed-width string dtypes, it is better to # recalculate # TODO(keewis): remove once the minimum version of `numpy.result_type` does this # for us return dtype.type else: return array_or_dtype def should_promote_to_object( arrays_and_dtypes: Iterable[ np.typing.ArrayLike | np.typing.DTypeLike | ExtensionDtype ], xp, ) -> bool: """ Test whether the given arrays_and_dtypes, when evaluated individually, match the type promotion rules found in PROMOTE_TO_OBJECT. """ np_result_types = set() for arr_or_dtype in arrays_and_dtypes: try: result_type = array_api_compat.result_type( maybe_promote_to_variable_width(arr_or_dtype), xp=xp ) if isinstance(result_type, np.dtype): np_result_types.add(result_type) except TypeError: # passing individual objects to xp.result_type (i.e., what `array_api_compat.result_type` calls) means NEP-18 implementations won't have # a chance to intercept special values (such as NA) that numpy core cannot handle. # Thus they are considered as types that don't need promotion i.e., the `arr_or_dtype` that rose the `TypeError` will not contribute to `np_result_types`. pass if np_result_types: for left, right in PROMOTE_TO_OBJECT: if any(np.issubdtype(t, left) for t in np_result_types) and any( np.issubdtype(t, right) for t in np_result_types ): return True return False def result_type( *arrays_and_dtypes: np.typing.ArrayLike | np.typing.DTypeLike | ExtensionDtype, xp=None, ) -> np.dtype: """Like np.result_type, but with type promotion rules matching pandas. Examples of changed behavior: number + string -> object (not string) bytes + unicode -> object (not unicode) Parameters ---------- *arrays_and_dtypes : list of arrays and dtypes The dtype is extracted from both numpy and dask arrays. Returns ------- numpy.dtype for the result. """ # TODO (keewis): replace `array_api_compat.result_type` with `xp.result_type` once we # can require a version of the Array API that supports passing scalars to it. from xarray.core.duck_array_ops import get_array_namespace if xp is None: xp = get_array_namespace(arrays_and_dtypes) if should_promote_to_object(arrays_and_dtypes, xp): return np.dtype(object) maybe_promote = functools.partial( maybe_promote_to_variable_width, # let extension arrays handle their own str/bytes should_return_str_or_bytes=any( map(utils.is_allowed_extension_array_dtype, arrays_and_dtypes) ), ) return array_api_compat.result_type(*map(maybe_promote, arrays_and_dtypes), xp=xp) xarray-2025.12.0/xarray/core/duck_array_ops.py000066400000000000000000000734741511464676000211770ustar00rootroot00000000000000"""Compatibility module defining operations on duck numpy-arrays. Currently, this means Dask or NumPy arrays. None of these functions should accept or return xarray objects. """ from __future__ import annotations import contextlib import datetime import inspect import warnings from collections.abc import Callable from functools import partial from importlib import import_module from typing import Any import numpy as np import pandas as pd from numpy import ( isclose, isnat, take, unravel_index, # noqa: F401 ) from xarray.compat import dask_array_compat, dask_array_ops from xarray.compat.array_api_compat import get_array_namespace from xarray.core import dtypes, nputils from xarray.core.extension_array import ( PandasExtensionArray, as_extension_array, ) from xarray.core.options import OPTIONS from xarray.core.utils import ( is_allowed_extension_array_dtype, is_duck_array, is_duck_dask_array, module_available, ) from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import array_type, is_chunked_array # remove once numpy 2.0 is the oldest supported version if module_available("numpy", minversion="2.0.0.dev0"): from numpy.lib.array_utils import ( # type: ignore[import-not-found,unused-ignore] normalize_axis_index, ) else: from numpy.core.multiarray import ( # type: ignore[attr-defined,no-redef,unused-ignore] normalize_axis_index, ) dask_available = module_available("dask") def einsum(*args, **kwargs): if OPTIONS["use_opt_einsum"] and module_available("opt_einsum"): import opt_einsum return opt_einsum.contract(*args, **kwargs) else: xp = get_array_namespace(*args) return xp.einsum(*args, **kwargs) def tensordot(*args, **kwargs): xp = get_array_namespace(*args) return xp.tensordot(*args, **kwargs) def cross(*args, **kwargs): xp = get_array_namespace(*args) return xp.cross(*args, **kwargs) def gradient(f, *varargs, axis=None, edge_order=1): xp = get_array_namespace(f) return xp.gradient(f, *varargs, axis=axis, edge_order=edge_order) def _dask_or_eager_func( name, eager_module=np, dask_module="dask.array", dask_only_kwargs=tuple(), numpy_only_kwargs=tuple(), ): """Create a function that dispatches to dask for dask array inputs.""" def f(*args, **kwargs): if dask_available and any(is_duck_dask_array(a) for a in args): mod = ( import_module(dask_module) if isinstance(dask_module, str) else dask_module ) wrapped = getattr(mod, name) for kwarg in numpy_only_kwargs: kwargs.pop(kwarg, None) else: wrapped = getattr(eager_module, name) for kwarg in dask_only_kwargs: kwargs.pop(kwarg, None) return wrapped(*args, **kwargs) return f def fail_on_dask_array_input(values, msg=None, func_name=None): if is_duck_dask_array(values): if msg is None: msg = "%r is not yet a valid method on dask arrays" if func_name is None: func_name = inspect.stack()[1][3] raise NotImplementedError(msg % func_name) # Requires special-casing because pandas won't automatically dispatch to dask.isnull via NEP-18 pandas_isnull = _dask_or_eager_func("isnull", eager_module=pd, dask_module="dask.array") # TODO replace with simply np.ma.masked_invalid once numpy/numpy#16022 is fixed # TODO: replacing breaks iris + dask tests masked_invalid = _dask_or_eager_func( "masked_invalid", eager_module=np.ma, dask_module="dask.array.ma" ) def sliding_window_view(array, window_shape, axis=None, **kwargs): # TODO: some libraries (e.g. jax) don't have this, implement an alternative? xp = get_array_namespace(array) # sliding_window_view will not dispatch arbitrary kwargs (automatic_rechunk), # so we need to hand-code this. func = _dask_or_eager_func( "sliding_window_view", eager_module=xp.lib.stride_tricks, dask_module=dask_array_compat, dask_only_kwargs=("automatic_rechunk",), numpy_only_kwargs=("subok", "writeable"), ) return func(array, window_shape, axis=axis, **kwargs) def round(array): xp = get_array_namespace(array) return xp.round(array) around: Callable = round def isna(data: Any) -> bool: """Checks if data is literally np.nan or pd.NA. Parameters ---------- data Any python object Returns ------- Whether or not the data is np.nan or pd.NA """ return data is pd.NA or data is np.nan # noqa: PLW0177 def isnull(data): data = asarray(data) xp = get_array_namespace(data) scalar_type = data.dtype if dtypes.is_datetime_like(scalar_type): # datetime types use NaT for null # note: must check timedelta64 before integers, because currently # timedelta64 inherits from np.integer return isnat(data) elif dtypes.isdtype(scalar_type, ("real floating", "complex floating"), xp=xp): # float types use NaN for null xp = get_array_namespace(data) return xp.isnan(data) elif dtypes.isdtype(scalar_type, ("bool", "integral"), xp=xp) or ( isinstance(scalar_type, np.dtype) and ( np.issubdtype(scalar_type, np.character) or np.issubdtype(scalar_type, np.void) ) ): # these types cannot represent missing values # bool_ is for backwards compat with numpy<2, and cupy dtype = xp.bool_ if hasattr(xp, "bool_") else xp.bool return full_like(data, dtype=dtype, fill_value=False) # at this point, array should have dtype=object elif isinstance(data, np.ndarray) or pd.api.types.is_extension_array_dtype(data): # noqa: TID251 return pandas_isnull(data) else: # Not reachable yet, but intended for use with other duck array # types. For full consistency with pandas, we should accept None as # a null value as well as NaN, but it isn't clear how to do this # with duck typing. return data != data # noqa: PLR0124 def notnull(data): return ~isnull(data) def trapz(y, x, axis): if axis < 0: axis = y.ndim + axis x_sl1 = (slice(1, None),) + (None,) * (y.ndim - axis - 1) x_sl2 = (slice(None, -1),) + (None,) * (y.ndim - axis - 1) slice1 = (slice(None),) * axis + (slice(1, None),) slice2 = (slice(None),) * axis + (slice(None, -1),) dx = x[x_sl1] - x[x_sl2] integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)]) return sum(integrand, axis=axis, skipna=False) def cumulative_trapezoid(y, x, axis): if axis < 0: axis = y.ndim + axis x_sl1 = (slice(1, None),) + (None,) * (y.ndim - axis - 1) x_sl2 = (slice(None, -1),) + (None,) * (y.ndim - axis - 1) slice1 = (slice(None),) * axis + (slice(1, None),) slice2 = (slice(None),) * axis + (slice(None, -1),) dx = x[x_sl1] - x[x_sl2] integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)]) # Pad so that 'axis' has same length in result as it did in y pads = [(1, 0) if i == axis else (0, 0) for i in range(y.ndim)] xp = get_array_namespace(y, x) integrand = xp.pad(integrand, pads, mode="constant", constant_values=0.0) return cumsum(integrand, axis=axis, skipna=False) def full_like(a, fill_value, **kwargs): xp = get_array_namespace(a) return xp.full_like(a, fill_value, **kwargs) def empty_like(a, **kwargs): xp = get_array_namespace(a) return xp.empty_like(a, **kwargs) def astype(data, dtype, *, xp=None, **kwargs): if not hasattr(data, "__array_namespace__") and xp is None: return data.astype(dtype, **kwargs) if xp is None: xp = get_array_namespace(data) if xp == np: # numpy currently doesn't have a astype: return data.astype(dtype, **kwargs) return xp.astype(data, dtype, **kwargs) def asarray(data, xp=np, dtype=None): if is_duck_array(data): converted = data elif is_allowed_extension_array_dtype(dtype): # data may or may not be an ExtensionArray, so we can't rely on # np.asarray to call our NEP-18 handler; gotta hook it ourselves converted = PandasExtensionArray(as_extension_array(data, dtype)) else: converted = xp.asarray(data) if dtype is None or converted.dtype == dtype: return converted if xp is np or not hasattr(xp, "astype"): return converted.astype(dtype) else: return xp.astype(converted, dtype) def as_shared_dtype(scalars_or_arrays, xp=None): """Cast a arrays to a shared dtype using xarray's type promotion rules.""" # Avoid calling array_type("cupy") repeatidely in the any check array_type_cupy = array_type("cupy") if any(isinstance(x, array_type_cupy) for x in scalars_or_arrays): import cupy as cp xp = cp elif xp is None: xp = get_array_namespace(scalars_or_arrays) scalars_or_arrays = [ PandasExtensionArray(s_or_a) if isinstance(s_or_a, pd.api.extensions.ExtensionArray) else s_or_a for s_or_a in scalars_or_arrays ] # Pass arrays directly instead of dtypes to result_type so scalars # get handled properly. # Note that result_type() safely gets the dtype from dask arrays without # evaluating them. dtype = dtypes.result_type(*scalars_or_arrays, xp=xp) return [asarray(x, dtype=dtype, xp=xp) for x in scalars_or_arrays] def broadcast_to(array, shape): xp = get_array_namespace(array) return xp.broadcast_to(array, shape) def lazy_array_equiv(arr1, arr2): """Like array_equal, but doesn't actually compare values. Returns True when arr1, arr2 identical or their dask tokens are equal. Returns False when shapes are not equal. Returns None when equality cannot determined: one or both of arr1, arr2 are numpy arrays; or their dask tokens are not equal """ if arr1 is arr2: return True arr1 = asarray(arr1) arr2 = asarray(arr2) if arr1.shape != arr2.shape: return False if dask_available and is_duck_dask_array(arr1) and is_duck_dask_array(arr2): from dask.base import tokenize # GH3068, GH4221 if tokenize(arr1) == tokenize(arr2): return True else: return None return None def allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8): """Like np.allclose, but also allows values to be NaN in both arrays""" arr1 = asarray(arr1) arr2 = asarray(arr2) lazy_equiv = lazy_array_equiv(arr1, arr2) if lazy_equiv is None: with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered") return bool( array_all(isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=True)) ) else: return lazy_equiv def array_equiv(arr1, arr2): """Like np.array_equal, but also allows values to be NaN in both arrays""" arr1 = asarray(arr1) arr2 = asarray(arr2) lazy_equiv = lazy_array_equiv(arr1, arr2) if lazy_equiv is None: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "In the future, 'NAT == x'") flag_array = (arr1 == arr2) | (isnull(arr1) & isnull(arr2)) return bool(array_all(flag_array)) else: return lazy_equiv def array_notnull_equiv(arr1, arr2): """Like np.array_equal, but also allows values to be NaN in either or both arrays """ arr1 = asarray(arr1) arr2 = asarray(arr2) lazy_equiv = lazy_array_equiv(arr1, arr2) if lazy_equiv is None: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "In the future, 'NAT == x'") flag_array = (arr1 == arr2) | isnull(arr1) | isnull(arr2) return bool(array_all(flag_array)) else: return lazy_equiv def count(data, axis=None): """Count the number of non-NA in this array along the given axis or axes""" xp = get_array_namespace(data) return xp.sum(xp.logical_not(isnull(data)), axis=axis) def sum_where(data, axis=None, dtype=None, where=None): xp = get_array_namespace(data) if where is not None: a = where_method(xp.zeros_like(data), where, data) else: a = data result = xp.sum(a, axis=axis, dtype=dtype) return result def where(condition, x, y): """Three argument where() with better dtype promotion rules.""" xp = get_array_namespace(condition, x, y) dtype = xp.bool_ if hasattr(xp, "bool_") else xp.bool if not is_duck_array(condition): condition = asarray(condition, dtype=dtype, xp=xp) else: condition = astype(condition, dtype=dtype, xp=xp) promoted_x, promoted_y = as_shared_dtype([x, y], xp=xp) return xp.where(condition, promoted_x, promoted_y) def where_method(data, cond, other=dtypes.NA): if other is dtypes.NA: other = dtypes.get_fill_value(data.dtype) return where(cond, data, other) def fillna(data, other): # we need to pass data first so pint has a chance of returning the # correct unit # TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed return where(notnull(data), data, other) def logical_not(data): xp = get_array_namespace(data) return xp.logical_not(data) def clip(data, min=None, max=None): xp = get_array_namespace(data) return xp.clip(data, min, max) def concatenate(arrays, axis=0): """concatenate() with better dtype promotion rules.""" # TODO: `concat` is the xp compliant name, but fallback to concatenate for # older numpy and for cupy xp = get_array_namespace(*arrays) if hasattr(xp, "concat"): return xp.concat(as_shared_dtype(arrays, xp=xp), axis=axis) else: return xp.concatenate(as_shared_dtype(arrays, xp=xp), axis=axis) def stack(arrays, axis=0): """stack() with better dtype promotion rules.""" xp = get_array_namespace(arrays[0]) return xp.stack(as_shared_dtype(arrays, xp=xp), axis=axis) def reshape(array, shape): xp = get_array_namespace(array) return xp.reshape(array, shape) def ravel(array): return reshape(array, (-1,)) def transpose(array, axes=None): xp = get_array_namespace(array) return xp.transpose(array, axes) def moveaxis(array, source, destination): xp = get_array_namespace(array) return xp.moveaxis(array, source, destination) def pad(array, pad_width, **kwargs): xp = get_array_namespace(array) return xp.pad(array, pad_width, **kwargs) def quantile(array, q, axis=None, **kwargs): xp = get_array_namespace(array) return xp.quantile(array, q, axis=axis, **kwargs) @contextlib.contextmanager def _ignore_warnings_if(condition): if condition: with warnings.catch_warnings(): warnings.simplefilter("ignore") yield else: yield def _create_nan_agg_method(name, coerce_strings=False, invariant_0d=False): def f(values, axis=None, skipna=None, **kwargs): if kwargs.pop("out", None) is not None: raise TypeError(f"`out` is not valid for {name}") # The data is invariant in the case of 0d data, so do not # change the data (and dtype) # See https://github.com/pydata/xarray/issues/4885 if invariant_0d and axis == (): return values xp = get_array_namespace(values) values = asarray(values, xp=xp) if coerce_strings and dtypes.is_string(values.dtype): values = astype(values, object) func = None if skipna or ( skipna is None and ( dtypes.isdtype( values.dtype, ("complex floating", "real floating"), xp=xp ) or dtypes.is_object(values.dtype) ) ): from xarray.computation import nanops nanname = "nan" + name func = getattr(nanops, nanname) else: if name in ["sum", "prod"]: kwargs.pop("min_count", None) xp = get_array_namespace(values) func = getattr(xp, name) try: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "All-NaN slice encountered") return func(values, axis=axis, **kwargs) except AttributeError: if not is_duck_dask_array(values): raise try: # dask/dask#3133 dask sometimes needs dtype argument # if func does not accept dtype, then raises TypeError return func(values, axis=axis, dtype=values.dtype, **kwargs) except (AttributeError, TypeError) as err: raise NotImplementedError( f"{name} is not yet implemented on dask arrays" ) from err f.__name__ = name return f # Attributes `numeric_only`, `available_min_count` is used for docs. # See ops.inject_reduce_methods argmax = _create_nan_agg_method("argmax", coerce_strings=True) argmin = _create_nan_agg_method("argmin", coerce_strings=True) max = _create_nan_agg_method("max", coerce_strings=True, invariant_0d=True) min = _create_nan_agg_method("min", coerce_strings=True, invariant_0d=True) sum = _create_nan_agg_method("sum", invariant_0d=True) sum.numeric_only = True sum.available_min_count = True std = _create_nan_agg_method("std") std.numeric_only = True var = _create_nan_agg_method("var") var.numeric_only = True median = _create_nan_agg_method("median", invariant_0d=True) median.numeric_only = True prod = _create_nan_agg_method("prod", invariant_0d=True) prod.numeric_only = True prod.available_min_count = True cumprod_1d = _create_nan_agg_method("cumprod", invariant_0d=True) cumprod_1d.numeric_only = True cumsum_1d = _create_nan_agg_method("cumsum", invariant_0d=True) cumsum_1d.numeric_only = True def array_all(array, axis=None, keepdims=False, **kwargs): xp = get_array_namespace(array) return xp.all(array, axis=axis, keepdims=keepdims, **kwargs) def array_any(array, axis=None, keepdims=False, **kwargs): xp = get_array_namespace(array) return xp.any(array, axis=axis, keepdims=keepdims, **kwargs) _mean = _create_nan_agg_method("mean", invariant_0d=True) def _datetime_nanmin(array): return _datetime_nanreduce(array, min) def _datetime_nanreduce(array, func): """nanreduce() function for datetime64. Caveats that this function deals with: - In numpy < 1.18, min() on datetime64 incorrectly ignores NaT - numpy nanmin() don't work on datetime64 (all versions at the moment of writing) - dask min() does not work on datetime64 (all versions at the moment of writing) """ dtype = array.dtype assert dtypes.is_datetime_like(dtype) # (NaT).astype(float) does not produce NaN... array = where(pandas_isnull(array), np.nan, array.astype(float)) array = func(array, skipna=True) if isinstance(array, float): array = np.array(array) # ...but (NaN).astype("M8") does produce NaT return array.astype(dtype) def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float): """Convert an array containing datetime-like data to numerical values. Convert the datetime array to a timedelta relative to an offset. Parameters ---------- array : array-like Input data offset : None, datetime or cftime.datetime Datetime offset. If None, this is set by default to the array's minimum value to reduce round off errors. datetime_unit : {None, Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as} If not None, convert output to a given datetime unit. Note that some conversions are not allowed due to non-linear relationships between units. dtype : dtype Output dtype. Returns ------- array Numerical representation of datetime object relative to an offset. Notes ----- Some datetime unit conversions won't work, for example from days to years, even though some calendars would allow for them (e.g. no_leap). This is because there is no `cftime.timedelta` object. """ # Set offset to minimum if not given if offset is None: if dtypes.is_datetime_like(array.dtype): offset = _datetime_nanreduce(array, min) else: offset = min(array) # Compute timedelta object. # For np.datetime64, this can silently yield garbage due to overflow. # One option is to enforce 1970-01-01 as the universal offset. # This map_blocks call is for backwards compatibility. # dask == 2021.04.1 does not support subtracting object arrays # which is required for cftime if is_duck_dask_array(array) and dtypes.is_object(array.dtype): array = array.map_blocks(lambda a, b: a - b, offset, meta=array._meta) else: array = array - offset # Scalar is converted to 0d-array if not hasattr(array, "dtype"): array = np.array(array) # Convert timedelta objects to float by first converting to microseconds. if dtypes.is_object(array.dtype): return py_timedelta_to_float(array, datetime_unit or "ns").astype(dtype) # Convert np.NaT to np.nan elif dtypes.is_datetime_like(array.dtype): # Convert to specified timedelta units. if datetime_unit: array = array / np.timedelta64(1, datetime_unit) return np.where(isnull(array), np.nan, array.astype(dtype)) def timedelta_to_numeric(value, datetime_unit="ns", dtype=float): """Convert a timedelta-like object to numerical values. Parameters ---------- value : datetime.timedelta, numpy.timedelta64, pandas.Timedelta, str Time delta representation. datetime_unit : {Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as} The time units of the output values. Note that some conversions are not allowed due to non-linear relationships between units. dtype : type The output data type. """ if isinstance(value, datetime.timedelta): out = py_timedelta_to_float(value, datetime_unit) elif isinstance(value, np.timedelta64): out = np_timedelta64_to_float(value, datetime_unit) elif isinstance(value, pd.Timedelta): out = pd_timedelta_to_float(value, datetime_unit) elif isinstance(value, str): try: a = pd.to_timedelta(value) except ValueError as err: raise ValueError( f"Could not convert {value!r} to timedelta64 using pandas.to_timedelta" ) from err return py_timedelta_to_float(a, datetime_unit) else: raise TypeError( f"Expected value of type str, pandas.Timedelta, datetime.timedelta " f"or numpy.timedelta64, but received {type(value).__name__}" ) return out.astype(dtype) def _to_pytimedelta(array, unit="us"): return array.astype(f"timedelta64[{unit}]").astype(datetime.timedelta) def np_timedelta64_to_float(array, datetime_unit): """Convert numpy.timedelta64 to float, possibly at a loss of resolution.""" unit, _ = np.datetime_data(array.dtype) conversion_factor = np.timedelta64(1, unit) / np.timedelta64(1, datetime_unit) return conversion_factor * array.astype(np.float64) def pd_timedelta_to_float(value, datetime_unit): """Convert pandas.Timedelta to float. Notes ----- Built on the assumption that pandas timedelta values are in nanoseconds, which is also the numpy default resolution. """ value = value.to_timedelta64() return np_timedelta64_to_float(value, datetime_unit) def _timedelta_to_seconds(array): if isinstance(array, datetime.timedelta): return array.total_seconds() * 1e6 else: return np.reshape([a.total_seconds() for a in array.ravel()], array.shape) * 1e6 def py_timedelta_to_float(array, datetime_unit): """Convert a timedelta object to a float, possibly at a loss of resolution.""" array = asarray(array) if is_duck_dask_array(array): array = array.map_blocks( _timedelta_to_seconds, meta=np.array([], dtype=np.float64) ) else: array = _timedelta_to_seconds(array) conversion_factor = np.timedelta64(1, "us") / np.timedelta64(1, datetime_unit) return conversion_factor * array def mean(array, axis=None, skipna=None, **kwargs): """inhouse mean that can handle np.datetime64 or cftime.datetime dtypes""" from xarray.core.common import _contains_cftime_datetimes array = asarray(array) if dtypes.is_datetime_like(array.dtype): dmin = _datetime_nanreduce(array, min).astype("datetime64[Y]").astype(int) dmax = _datetime_nanreduce(array, max).astype("datetime64[Y]").astype(int) offset = ( np.array((dmin + dmax) // 2).astype("datetime64[Y]").astype(array.dtype) ) # From version 2025.01.2 xarray uses np.datetime64[unit], where unit # is one of "s", "ms", "us", "ns". # To not have to worry about the resolution, we just convert the output # to "timedelta64" (without unit) and let the dtype of offset take precedence. # This is fully backwards compatible with datetime64[ns]. return ( _mean( datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs ).astype("timedelta64") + offset ) elif _contains_cftime_datetimes(array): offset = min(array) timedeltas = datetime_to_numeric(array, offset, datetime_unit="us") mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs) return _to_pytimedelta(mean_timedeltas, unit="us") + offset else: return _mean(array, axis=axis, skipna=skipna, **kwargs) mean.numeric_only = True # type: ignore[attr-defined] def _nd_cum_func(cum_func, array, axis, **kwargs): array = asarray(array) if axis is None: axis = tuple(range(array.ndim)) if isinstance(axis, int): axis = (axis,) out = array for ax in axis: out = cum_func(out, axis=ax, **kwargs) return out def ndim(array) -> int: # Required part of the duck array and the array-api, but we fall back in case # https://docs.xarray.dev/en/latest/internals/duck-arrays-integration.html#duck-array-requirements return array.ndim if hasattr(array, "ndim") else np.ndim(array) def cumprod(array, axis=None, **kwargs): """N-dimensional version of cumprod.""" return _nd_cum_func(cumprod_1d, array, axis, **kwargs) def cumsum(array, axis=None, **kwargs): """N-dimensional version of cumsum.""" return _nd_cum_func(cumsum_1d, array, axis, **kwargs) def first(values, axis, skipna=None): """Return the first non-NA elements in this array along the given axis""" if (skipna or skipna is None) and not ( dtypes.isdtype(values.dtype, "signed integer") or dtypes.is_string(values.dtype) ): # only bother for dtypes that can hold NaN if is_chunked_array(values): return chunked_nanfirst(values, axis) else: return nputils.nanfirst(values, axis) return take(values, 0, axis=axis) def last(values, axis, skipna=None): """Return the last non-NA elements in this array along the given axis""" if (skipna or skipna is None) and not ( dtypes.isdtype(values.dtype, "signed integer") or dtypes.is_string(values.dtype) ): # only bother for dtypes that can hold NaN if is_chunked_array(values): return chunked_nanlast(values, axis) else: return nputils.nanlast(values, axis) return take(values, -1, axis=axis) def isin(element, test_elements, **kwargs): xp = get_array_namespace(element, test_elements) return xp.isin(element, test_elements, **kwargs) def least_squares(lhs, rhs, rcond=None, skipna=False): """Return the coefficients and residuals of a least-squares fit.""" if is_duck_dask_array(rhs): return dask_array_ops.least_squares(lhs, rhs, rcond=rcond, skipna=skipna) else: return nputils.least_squares(lhs, rhs, rcond=rcond, skipna=skipna) def _push(array, n: int | None = None, axis: int = -1): """ Use either bottleneck or numbagg depending on options & what's available """ if not OPTIONS["use_bottleneck"] and not OPTIONS["use_numbagg"]: raise RuntimeError( "ffill & bfill requires bottleneck or numbagg to be enabled." " Call `xr.set_options(use_bottleneck=True)` or `xr.set_options(use_numbagg=True)` to enable one." ) if OPTIONS["use_numbagg"] and module_available("numbagg"): import numbagg # type: ignore[import-not-found, unused-ignore] return numbagg.ffill(array, limit=n, axis=axis) # work around for bottleneck 178 limit = n if n is not None else array.shape[axis] import bottleneck as bn return bn.push(array, limit, axis) def push(array, n, axis, method="blelloch"): if not OPTIONS["use_bottleneck"] and not OPTIONS["use_numbagg"]: raise RuntimeError( "ffill & bfill requires bottleneck or numbagg to be enabled." " Call `xr.set_options(use_bottleneck=True)` or `xr.set_options(use_numbagg=True)` to enable one." ) if is_duck_dask_array(array): return dask_array_ops.push(array, n, axis, method=method) else: return _push(array, n, axis) def _first_last_wrapper(array, *, axis, op, keepdims): return op(array, axis, keepdims=keepdims) def _chunked_first_or_last(darray, axis, op): chunkmanager = get_chunked_array_type(darray) # This will raise the same error message seen for numpy axis = normalize_axis_index(axis, darray.ndim) wrapped_op = partial(_first_last_wrapper, op=op) return chunkmanager.reduction( darray, func=wrapped_op, aggregate_func=wrapped_op, axis=axis, dtype=darray.dtype, keepdims=False, # match numpy version ) def chunked_nanfirst(darray, axis): return _chunked_first_or_last(darray, axis, op=nputils.nanfirst) def chunked_nanlast(darray, axis): return _chunked_first_or_last(darray, axis, op=nputils.nanlast) xarray-2025.12.0/xarray/core/extension_array.py000066400000000000000000000274311511464676000213740ustar00rootroot00000000000000from __future__ import annotations import copy from collections.abc import Callable, Sequence from dataclasses import dataclass from typing import TYPE_CHECKING, Generic, cast import numpy as np import pandas as pd from packaging.version import Version from pandas.api.extensions import ExtensionArray, ExtensionDtype from pandas.api.types import is_scalar as pd_is_scalar from xarray.core.types import DTypeLikeSave, T_ExtensionArray from xarray.core.utils import ( NDArrayMixin, is_allowed_extension_array, is_allowed_extension_array_dtype, ) HANDLED_EXTENSION_ARRAY_FUNCTIONS: dict[Callable, Callable] = {} if TYPE_CHECKING: from typing import Any from pandas._typing import DtypeObj, Scalar def is_scalar(value: object) -> bool: """Workaround: pandas is_scalar doesn't recognize Categorical nulls for some reason.""" return value is pd.CategoricalDtype.na_value or pd_is_scalar(value) def implements(numpy_function_or_name: Callable | str) -> Callable: """Register an __array_function__ implementation. Pass a function directly if it's guaranteed to exist in all supported numpy versions, or a string to first check for its existence. """ def decorator(func): if isinstance(numpy_function_or_name, str): numpy_function = getattr(np, numpy_function_or_name, None) else: numpy_function = numpy_function_or_name if numpy_function: HANDLED_EXTENSION_ARRAY_FUNCTIONS[numpy_function] = func return func return decorator @implements(np.issubdtype) def __extension_duck_array__issubdtype( extension_array_dtype: T_ExtensionArray, other_dtype: DTypeLikeSave ) -> bool: return False # never want a function to think a pandas extension dtype is a subtype of numpy @implements("astype") # np.astype was added in 2.1.0, but we only require >=1.24 def __extension_duck_array__astype( array_or_scalar: T_ExtensionArray, dtype: DTypeLikeSave, order: str = "K", casting: str = "unsafe", subok: bool = True, copy: bool = True, device: str | None = None, ) -> ExtensionArray: if ( not ( is_allowed_extension_array(array_or_scalar) or is_allowed_extension_array_dtype(dtype) ) or casting != "unsafe" or not subok or order != "K" ): return NotImplemented return as_extension_array(array_or_scalar, dtype, copy=copy) @implements(np.asarray) def __extension_duck_array__asarray( array_or_scalar: np.typing.ArrayLike | T_ExtensionArray, dtype: DTypeLikeSave | None = None, ) -> ExtensionArray: if not is_allowed_extension_array(dtype): return NotImplemented return as_extension_array(array_or_scalar, dtype) def as_extension_array( array_or_scalar: np.typing.ArrayLike | T_ExtensionArray, dtype: ExtensionDtype | DTypeLikeSave | None, copy: bool = False, ) -> ExtensionArray: if is_scalar(array_or_scalar): return dtype.construct_array_type()._from_sequence( # type: ignore[union-attr] [array_or_scalar], dtype=dtype ) else: return array_or_scalar.astype(dtype, copy=copy) # type: ignore[union-attr] @implements(np.result_type) def __extension_duck_array__result_type( *arrays_and_dtypes: list[ np.typing.ArrayLike | np.typing.DTypeLike | ExtensionDtype | ExtensionArray ], ) -> DtypeObj: extension_arrays_and_dtypes: list[ExtensionDtype | ExtensionArray] = [ cast(ExtensionDtype | ExtensionArray, x) for x in arrays_and_dtypes if is_allowed_extension_array(x) or is_allowed_extension_array_dtype(x) ] if not extension_arrays_and_dtypes: return NotImplemented ea_dtypes: list[ExtensionDtype] = [ getattr(x, "dtype", cast(ExtensionDtype, x)) for x in extension_arrays_and_dtypes ] scalars = [ x for x in arrays_and_dtypes if is_scalar(x) and x not in {pd.NA, np.nan} ] # other_stuff could include: # - arrays such as pd.ABCSeries, np.ndarray, or other array-api duck arrays # - dtypes such as pd.DtypeObj, np.dtype, or other array-api duck dtypes other_stuff = [ x for x in arrays_and_dtypes if not is_allowed_extension_array_dtype(x) and not is_scalar(x) ] # We implement one special case: when possible, preserve Categoricals (avoid promoting # to object) by merging the categories of all given Categoricals + scalars + NA. # Ideally this could be upstreamed into pandas find_result_type / find_common_type. if not other_stuff and all( isinstance(x, pd.CategoricalDtype) and not x.ordered for x in ea_dtypes ): return union_unordered_categorical_and_scalar( cast(list[pd.CategoricalDtype], ea_dtypes), scalars, # type: ignore[arg-type] ) if not other_stuff and all( isinstance(x, type(ea_type := ea_dtypes[0])) for x in ea_dtypes ): return ea_type raise ValueError( f"Cannot cast values to shared type, found values: {arrays_and_dtypes}" ) def union_unordered_categorical_and_scalar( categorical_dtypes: list[pd.CategoricalDtype], scalars: list[Scalar] ) -> pd.CategoricalDtype: scalars = [x for x in scalars if x is not pd.CategoricalDtype.na_value] all_categories = set().union(*(x.categories for x in categorical_dtypes)) all_categories = all_categories.union(scalars) return pd.CategoricalDtype(categories=list(all_categories)) @implements(np.broadcast_to) def __extension_duck_array__broadcast(arr: T_ExtensionArray, shape: tuple): if shape[0] == len(arr) and len(shape) == 1: return arr raise NotImplementedError("Cannot broadcast 1d-only pandas extension array.") @implements(np.stack) def __extension_duck_array__stack(arr: T_ExtensionArray, axis: int): raise NotImplementedError("Cannot stack 1d-only pandas extension array.") @implements(np.concatenate) def __extension_duck_array__concatenate( arrays: Sequence[T_ExtensionArray], axis: int = 0, out=None ) -> T_ExtensionArray: return type(arrays[0])._concat_same_type(arrays) # type: ignore[attr-defined] @implements(np.where) def __extension_duck_array__where( condition: T_ExtensionArray | np.typing.ArrayLike, x: T_ExtensionArray, y: T_ExtensionArray | np.typing.ArrayLike, ) -> T_ExtensionArray: # pd.where won't broadcast 0-dim arrays across a scalar-like series; scalar y's must be preserved if hasattr(y, "shape") and len(y.shape) == 1 and y.shape[0] == 1: y = y[0] # type: ignore[index] return cast(T_ExtensionArray, pd.Series(x).where(condition, y).array) # type: ignore[arg-type] def _replace_duck(args, replacer: Callable[[PandasExtensionArray], list]) -> list: args_as_list = list(args) for index, value in enumerate(args_as_list): if isinstance(value, PandasExtensionArray): args_as_list[index] = replacer(value) elif isinstance(value, tuple): # should handle more than just tuple? iterable? args_as_list[index] = tuple(_replace_duck(value, replacer)) elif isinstance(value, list): args_as_list[index] = _replace_duck(value, replacer) return args_as_list def replace_duck_with_extension_array(args) -> tuple: return tuple(_replace_duck(args, lambda duck: duck.array)) def replace_duck_with_series(args) -> tuple: return tuple(_replace_duck(args, lambda duck: pd.Series(duck.array))) @implements(np.ndim) def __extension_duck_array__ndim(x: PandasExtensionArray) -> int: return x.ndim @implements(np.reshape) def __extension_duck_array__reshape( arr: T_ExtensionArray, shape: tuple ) -> T_ExtensionArray: if (shape[0] == len(arr) and len(shape) == 1) or shape == (-1,): return arr raise NotImplementedError( f"Cannot reshape 1d-only pandas extension array to: {shape}" ) @dataclass(frozen=True) class PandasExtensionArray(NDArrayMixin, Generic[T_ExtensionArray]): """NEP-18 compliant wrapper for pandas extension arrays. Parameters ---------- array : T_ExtensionArray The array to be wrapped upon e.g,. :py:class:`xarray.Variable` creation. ``` """ array: T_ExtensionArray def __post_init__(self): if not isinstance(self.array, pd.api.extensions.ExtensionArray): raise TypeError(f"{self.array} is not a pandas ExtensionArray.") # This does not use the UNSUPPORTED_EXTENSION_ARRAY_TYPES whitelist because # we do support extension arrays from datetime, for example, that need # duck array support internally via this class. These can appear from `DatetimeIndex` # wrapped by `PandasIndex` internally, for example. if not is_allowed_extension_array(self.array): raise TypeError( f"{self.array.dtype!r} should be converted to a numpy array in `xarray` internally." ) def __array_function__(self, func, types, args, kwargs): if func not in HANDLED_EXTENSION_ARRAY_FUNCTIONS: raise KeyError("Function not registered for pandas extension arrays.") args = replace_duck_with_extension_array(args) res = HANDLED_EXTENSION_ARRAY_FUNCTIONS[func](*args, **kwargs) if isinstance(res, ExtensionArray): return PandasExtensionArray(res) return res def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): return ufunc(*inputs, **kwargs) def __getitem__(self, key) -> PandasExtensionArray[T_ExtensionArray]: if ( isinstance(key, tuple) and len(key) == 1 ): # pyarrow type arrays can't handle single-length tuples (key,) = key item = self.array[key] if is_allowed_extension_array(item): return PandasExtensionArray(item) if is_scalar(item) or isinstance(key, int): return PandasExtensionArray(type(self.array)._from_sequence([item])) # type: ignore[call-arg,attr-defined,unused-ignore] return PandasExtensionArray(item) def __setitem__(self, key, val): self.array[key] = val def __len__(self): return len(self.array) def __eq__(self, other): if isinstance(other, PandasExtensionArray): return self.array == other.array return self.array == other def __ne__(self, other): return ~(self == other) @property def ndim(self) -> int: return 1 def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None ) -> np.ndarray: if Version(np.__version__) >= Version("2.0.0"): return np.asarray(self.array, dtype=dtype, copy=copy) else: return np.asarray(self.array, dtype=dtype) def __getattr__(self, attr: str) -> Any: # with __deepcopy__ or __copy__, the object is first constructed and then the sub-objects are attached (see https://docs.python.org/3/library/copy.html) # Thus, if we didn't have `super().__getattribute__("array")` this method would call `self.array` (i.e., `getattr(self, "array")`) again while looking for `__setstate__` # (which is apparently the first thing sought in copy.copy from the under-construction copied object), # which would cause a recursion error since `array` is not present on the object when it is being constructed during `__{deep}copy__`. # Even though we have defined these two methods now below due to `test_extension_array_copy_arrow_type` (cause unknown) # we leave this here as it more robust than self.array return getattr(super().__getattribute__("array"), attr) def __copy__(self) -> PandasExtensionArray[T_ExtensionArray]: return PandasExtensionArray(copy.copy(self.array)) def __deepcopy__( self, memo: dict[int, Any] | None = None ) -> PandasExtensionArray[T_ExtensionArray]: return PandasExtensionArray(copy.deepcopy(self.array, memo=memo)) xarray-2025.12.0/xarray/core/extensions.py000066400000000000000000000076551511464676000203670ustar00rootroot00000000000000from __future__ import annotations import warnings from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree class AccessorRegistrationWarning(Warning): """Warning for conflicts in accessor registration.""" class _CachedAccessor: """Custom property-like object (descriptor) for caching accessors.""" def __init__(self, name, accessor): self._name = name self._accessor = accessor def __get__(self, obj, cls): if obj is None: # we're accessing the attribute of the class, i.e., Dataset.geo return self._accessor # Use the same dict as @pandas.util.cache_readonly. # It must be explicitly declared in obj.__slots__. try: cache = obj._cache except AttributeError: cache = obj._cache = {} try: return cache[self._name] except KeyError: pass try: accessor_obj = self._accessor(obj) except AttributeError as err: # __getattr__ on data object will swallow any AttributeErrors # raised when initializing the accessor, so we need to raise as # something else (GH933): raise RuntimeError(f"error initializing {self._name!r} accessor.") from err cache[self._name] = accessor_obj return accessor_obj def _register_accessor(name, cls): def decorator(accessor): if hasattr(cls, name): warnings.warn( f"registration of accessor {accessor!r} under name {name!r} for type {cls!r} is " "overriding a preexisting attribute with the same name.", AccessorRegistrationWarning, stacklevel=2, ) setattr(cls, name, _CachedAccessor(name, accessor)) return accessor return decorator def register_dataarray_accessor(name): """Register a custom accessor on xarray.DataArray objects. Parameters ---------- name : str Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. See Also -------- register_dataset_accessor """ return _register_accessor(name, DataArray) def register_dataset_accessor(name): """Register a custom property on xarray.Dataset objects. Parameters ---------- name : str Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. Examples -------- In your library code: >>> @xr.register_dataset_accessor("geo") ... class GeoAccessor: ... def __init__(self, xarray_obj): ... self._obj = xarray_obj ... ... @property ... def center(self): ... # return the geographic center point of this dataset ... lon = self._obj.latitude ... lat = self._obj.longitude ... return (float(lon.mean()), float(lat.mean())) ... ... def plot(self): ... # plot this array's data on a map, e.g., using Cartopy ... pass ... Back in an interactive IPython session: >>> ds = xr.Dataset( ... {"longitude": np.linspace(0, 10), "latitude": np.linspace(0, 20)} ... ) >>> ds.geo.center (10.0, 5.0) >>> ds.geo.plot() # plots data on a map See Also -------- register_dataarray_accessor """ return _register_accessor(name, Dataset) def register_datatree_accessor(name): """Register a custom accessor on DataTree objects. Parameters ---------- name : str Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. See Also -------- xarray.register_dataarray_accessor xarray.register_dataset_accessor """ return _register_accessor(name, DataTree) xarray-2025.12.0/xarray/core/formatting.py000066400000000000000000001174231511464676000203350ustar00rootroot00000000000000"""String formatting routines for __repr__.""" from __future__ import annotations import contextlib import functools import math from collections import ChainMap, defaultdict from collections.abc import Collection, Hashable, Mapping, Sequence from datetime import datetime, timedelta from itertools import chain, zip_longest from reprlib import recursive_repr from textwrap import indent from typing import TYPE_CHECKING, Any import numpy as np import pandas as pd from pandas.errors import OutOfBoundsDatetime from xarray.core.datatree_render import RenderDataTree from xarray.core.duck_array_ops import array_all, array_any, array_equiv, astype, ravel from xarray.core.extension_array import PandasExtensionArray from xarray.core.indexing import ( BasicIndexer, ExplicitlyIndexed, MemoryCachedArray, ) from xarray.core.options import OPTIONS, _get_boolean_with_default from xarray.core.treenode import group_subtrees from xarray.core.utils import is_duck_array from xarray.namedarray.pycompat import array_type, to_duck_array if TYPE_CHECKING: from xarray.core.coordinates import AbstractCoordinates from xarray.core.datatree import DataTree from xarray.core.variable import Variable UNITS = ("B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") def pretty_print(x, numchars: int): """Given an object `x`, call `str(x)` and format the returned string so that it is numchars long, padding with trailing spaces or truncating with ellipses as necessary """ s = maybe_truncate(x, numchars) return s + " " * max(numchars - len(s), 0) def maybe_truncate(obj, maxlen=500): s = str(obj) if len(s) > maxlen: s = s[: (maxlen - 3)] + "..." return s def wrap_indent(text, start="", length=None): if length is None: length = len(start) indent = "\n" + " " * length return start + indent.join(x for x in text.splitlines()) def _get_indexer_at_least_n_items(shape, n_desired, from_end): assert 0 < n_desired <= math.prod(shape) cum_items = np.cumprod(shape[::-1]) n_steps = np.argmax(cum_items >= n_desired) stop = math.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]) indexer = ( ((-1 if from_end else 0),) * (len(shape) - 1 - n_steps) + ((slice(-stop, None) if from_end else slice(stop)),) + (slice(None),) * n_steps ) return indexer def first_n_items(array, n_desired): """Returns the first n_desired items of an array""" # Unfortunately, we can't just do array.flat[:n_desired] here because it # might not be a numpy.ndarray. Moreover, access to elements of the array # could be very expensive (e.g. if it's only available over DAP), so go out # of our way to get them in a single call to __getitem__ using only slices. from xarray.core.variable import Variable if n_desired < 1: raise ValueError("must request at least one item") if array.size == 0: # work around for https://github.com/numpy/numpy/issues/5195 return [] if n_desired < array.size: indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=False) if isinstance(array, ExplicitlyIndexed): indexer = BasicIndexer(indexer) array = array[indexer] # We pass variable objects in to handle indexing # with indexer above. It would not work with our # lazy indexing classes at the moment, so we cannot # pass Variable._data if isinstance(array, Variable): array = array._data return ravel(to_duck_array(array))[:n_desired] def last_n_items(array, n_desired): """Returns the last n_desired items of an array""" # Unfortunately, we can't just do array.flat[-n_desired:] here because it # might not be a numpy.ndarray. Moreover, access to elements of the array # could be very expensive (e.g. if it's only available over DAP), so go out # of our way to get them in a single call to __getitem__ using only slices. from xarray.core.variable import Variable if (n_desired == 0) or (array.size == 0): return [] if n_desired < array.size: indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=True) if isinstance(array, ExplicitlyIndexed): indexer = BasicIndexer(indexer) array = array[indexer] # We pass variable objects in to handle indexing # with indexer above. It would not work with our # lazy indexing classes at the moment, so we cannot # pass Variable._data if isinstance(array, Variable): array = array._data return ravel(to_duck_array(array))[-n_desired:] def last_item(array): """Returns the last item of an array.""" indexer = (slice(-1, None),) * array.ndim return ravel(to_duck_array(array[indexer])) def calc_max_rows_first(max_rows: int) -> int: """Calculate the first rows to maintain the max number of rows.""" return max_rows // 2 + max_rows % 2 def calc_max_rows_last(max_rows: int) -> int: """Calculate the last rows to maintain the max number of rows.""" return max_rows // 2 def format_timestamp(t): """Cast given object to a Timestamp and return a nicely formatted string""" try: timestamp = pd.Timestamp(t) datetime_str = timestamp.isoformat(sep=" ") except OutOfBoundsDatetime: datetime_str = str(t) try: date_str, time_str = datetime_str.split() except ValueError: # catch NaT and others that don't split nicely return datetime_str else: if time_str == "00:00:00": return date_str else: return f"{date_str}T{time_str}" def format_timedelta(t, timedelta_format=None): """Cast given object to a Timestamp and return a nicely formatted string""" timedelta_str = str(pd.Timedelta(t)) try: days_str, time_str = timedelta_str.split(" days ") except ValueError: # catch NaT and others that don't split nicely return timedelta_str else: if timedelta_format == "date": return days_str + " days" elif timedelta_format == "time": return time_str else: return timedelta_str def format_item(x, timedelta_format=None, quote_strings=True): """Returns a succinct summary of an object as a string""" if isinstance(x, PandasExtensionArray): # We want to bypass PandasExtensionArray's repr here # because its __repr__ is PandasExtensionArray(array=[...]) # and this function is only for single elements. return str(x.array[0]) if isinstance(x, np.datetime64 | datetime): return format_timestamp(x) if isinstance(x, np.timedelta64 | timedelta): return format_timedelta(x, timedelta_format=timedelta_format) elif isinstance(x, str | bytes): if hasattr(x, "dtype"): x = x.item() return repr(x) if quote_strings else x elif hasattr(x, "dtype") and np.issubdtype(x.dtype, np.floating) and x.shape == (): return f"{x.item():.4}" else: return str(x) def format_items(x): """Returns a succinct summaries of all items in a sequence as strings""" x = to_duck_array(x) timedelta_format = "datetime" if not isinstance(x, PandasExtensionArray) and np.issubdtype( x.dtype, np.timedelta64 ): x = astype(x, dtype="timedelta64[ns]") day_part = x[~pd.isnull(x)].astype("timedelta64[D]").astype("timedelta64[ns]") time_needed = x[~pd.isnull(x)] != day_part day_needed = day_part != np.timedelta64(0, "ns") if array_all(np.logical_not(day_needed)): timedelta_format = "time" elif array_all(np.logical_not(time_needed)): timedelta_format = "date" formatted = [format_item(xi, timedelta_format) for xi in x] return formatted def format_array_flat(array, max_width: int): """Return a formatted string for as many items in the flattened version of array that will fit within max_width characters. """ # every item will take up at least two characters, but we always want to # print at least first and last items max_possibly_relevant = min(max(array.size, 1), max(math.ceil(max_width / 2.0), 2)) relevant_front_items = format_items( first_n_items(array, (max_possibly_relevant + 1) // 2) ) relevant_back_items = format_items(last_n_items(array, max_possibly_relevant // 2)) # interleave relevant front and back items: # [a, b, c] and [y, z] -> [a, z, b, y, c] relevant_items = sum( zip_longest(relevant_front_items, reversed(relevant_back_items)), () )[:max_possibly_relevant] cum_len = np.cumsum([len(s) + 1 for s in relevant_items]) - 1 if (array.size > 2) and ( (max_possibly_relevant < array.size) or array_any(cum_len > max_width) ): padding = " ... " max_len = max(int(np.argmax(cum_len + len(padding) - 1 > max_width)), 2) count = min(array.size, max_len) else: count = array.size padding = "" if (count <= 1) else " " num_front = (count + 1) // 2 num_back = count - num_front # note that num_back is 0 <--> array.size is 0 or 1 # <--> relevant_back_items is [] pprint_str = "".join( [ " ".join(relevant_front_items[:num_front]), padding, " ".join(relevant_back_items[-num_back:]), ] ) # As a final check, if it's still too long even with the limit in values, # replace the end with an ellipsis # NB: this will still returns a full 3-character ellipsis when max_width < 3 if len(pprint_str) > max_width: pprint_str = pprint_str[: max(max_width - 3, 0)] + "..." return pprint_str # mapping of tuple[modulename, classname] to repr _KNOWN_TYPE_REPRS = { ("numpy", "ndarray"): "np.ndarray", ("sparse._coo.core", "COO"): "sparse.COO", } def inline_dask_repr(array): """Similar to dask.array.DataArray.__repr__, but without redundant information that's already printed by the repr function of the xarray wrapper. """ assert isinstance(array, array_type("dask")), array chunksize = tuple(c[0] for c in array.chunks) if hasattr(array, "_meta"): meta = array._meta identifier = (type(meta).__module__, type(meta).__name__) meta_repr = _KNOWN_TYPE_REPRS.get(identifier, ".".join(identifier)) meta_string = f", meta={meta_repr}" else: meta_string = "" return f"dask.array" def inline_sparse_repr(array): """Similar to sparse.COO.__repr__, but without the redundant shape/dtype.""" sparse_array_type = array_type("sparse") assert isinstance(array, sparse_array_type), array return f"<{type(array).__name__}: nnz={array.nnz:d}, fill_value={array.fill_value}>" def inline_variable_array_repr(var, max_width): """Build a one-line summary of a variable's data.""" if hasattr(var._data, "_repr_inline_"): return var._data._repr_inline_(max_width) if getattr(var, "_in_memory", False): return format_array_flat(var, max_width) dask_array_type = array_type("dask") if isinstance(var._data, dask_array_type): return inline_dask_repr(var.data) sparse_array_type = array_type("sparse") if isinstance(var._data, sparse_array_type): return inline_sparse_repr(var.data) if hasattr(var._data, "__array_function__"): return maybe_truncate(repr(var._data).replace("\n", " "), max_width) # internal xarray array type return "..." def summarize_variable( name: Hashable, var: Variable, col_width: int | None = None, max_width: int | None = None, is_index: bool = False, ): """Summarize a variable in one line, e.g., for the Dataset.__repr__.""" variable = getattr(var, "variable", var) if max_width is None: max_width_options = OPTIONS["display_width"] if not isinstance(max_width_options, int): raise TypeError(f"`max_width` value of `{max_width}` is not a valid int") else: max_width = max_width_options marker = "*" if is_index else " " first_col = f" {marker} {name} " if col_width is not None: first_col = pretty_print(first_col, col_width) if variable.dims: dims_str = ", ".join(map(str, variable.dims)) dims_str = f"({dims_str}) " else: dims_str = "" front_str = f"{first_col}{dims_str}{variable.dtype} {render_human_readable_nbytes(variable.nbytes)} " values_width = max_width - len(front_str) values_str = inline_variable_array_repr(variable, values_width) return f"{front_str}{values_str}" def summarize_attr(key, value, col_width=None): """Summary for __repr__ - use ``X.attrs[key]`` for full value.""" # Indent key and add ':', then right-pad if col_width is not None k_str = f" {key}:" if col_width is not None: k_str = pretty_print(k_str, col_width) # Replace tabs and newlines, so we print on one line in known width v_str = str(value).replace("\t", "\\t").replace("\n", "\\n") # Finally, truncate to the desired display width return maybe_truncate(f"{k_str} {v_str}", OPTIONS["display_width"]) EMPTY_REPR = " *empty*" def _calculate_col_width(col_items): max_name_length = max((len(str(s)) for s in col_items), default=0) col_width = max(max_name_length, 7) + 6 return col_width def _mapping_repr( mapping, title, summarizer, expand_option_name, col_width=None, max_rows=None, indexes=None, ): if col_width is None: col_width = _calculate_col_width(mapping) summarizer_kwargs = defaultdict(dict) if indexes is not None: summarizer_kwargs = {k: {"is_index": k in indexes} for k in mapping} summary = [f"{title}:"] if mapping: len_mapping = len(mapping) if not _get_boolean_with_default(expand_option_name, default=True): summary = [f"{summary[0]} ({len_mapping})"] elif max_rows is not None and len_mapping > max_rows: summary = [f"{summary[0]} ({max_rows}/{len_mapping})"] first_rows = calc_max_rows_first(max_rows) keys = list(mapping.keys()) summary += [ summarizer(k, mapping[k], col_width, **summarizer_kwargs[k]) for k in keys[:first_rows] ] if max_rows > 1: last_rows = calc_max_rows_last(max_rows) summary += [pretty_print(" ...", col_width) + " ..."] summary += [ summarizer(k, mapping[k], col_width, **summarizer_kwargs[k]) for k in keys[-last_rows:] ] else: summary += [ summarizer(k, v, col_width, **summarizer_kwargs[k]) for k, v in mapping.items() ] else: summary += [EMPTY_REPR] return "\n".join(summary) data_vars_repr = functools.partial( _mapping_repr, title="Data variables", summarizer=summarize_variable, expand_option_name="display_expand_data_vars", ) attrs_repr = functools.partial( _mapping_repr, title="Attributes", summarizer=summarize_attr, expand_option_name="display_expand_attrs", ) def coords_repr(coords: AbstractCoordinates, col_width=None, max_rows=None): if col_width is None: col_width = _calculate_col_width(coords) dims = tuple(coords._data.dims) dim_ordered_coords = sorted( coords.items(), key=lambda x: dims.index(x[0]) if x[0] in dims else len(dims) ) return _mapping_repr( dict(dim_ordered_coords), title="Coordinates", summarizer=summarize_variable, expand_option_name="display_expand_coords", col_width=col_width, indexes=coords.xindexes, max_rows=max_rows, ) def inherited_coords_repr(node: DataTree, col_width=None, max_rows=None): coords = inherited_vars(node._coord_variables) if col_width is None: col_width = _calculate_col_width(coords) return _mapping_repr( coords, title="Inherited coordinates", summarizer=summarize_variable, expand_option_name="display_expand_coords", col_width=col_width, indexes=node._indexes, max_rows=max_rows, ) def inline_index_repr(index: pd.Index, max_width: int) -> str: if hasattr(index, "_repr_inline_"): repr_ = index._repr_inline_(max_width=max_width) else: # fallback for the `pandas.Index` subclasses from # `Indexes.get_pandas_indexes` / `xr_obj.indexes` repr_ = repr(index) return repr_ def summarize_index( names: tuple[Hashable, ...], index, col_width: int, max_width: int | None = None, ) -> str: if max_width is None: max_width = OPTIONS["display_width"] def prefixes(length: int) -> list[str]: if length in (0, 1): return [" "] return ["โ”Œ"] + ["โ”‚"] * max(length - 2, 0) + ["โ””"] preformatted = [ pretty_print(f" {prefix} {name}", col_width) for prefix, name in zip(prefixes(len(names)), names, strict=True) ] head, *tail = preformatted index_width = max_width - len(head) repr_ = inline_index_repr(index, max_width=index_width) return "\n".join([head + repr_] + [line.rstrip() for line in tail]) def filter_nondefault_indexes(indexes, filter_indexes: bool): from xarray.core.indexes import PandasIndex, PandasMultiIndex if not filter_indexes: return indexes default_indexes = (PandasIndex, PandasMultiIndex) return { key: index for key, index in indexes.items() if not isinstance(index, default_indexes) } def indexes_repr(indexes, max_rows: int | None = None, title: str = "Indexes") -> str: col_width = _calculate_col_width(chain.from_iterable(indexes)) return _mapping_repr( indexes, title, summarize_index, "display_expand_indexes", col_width=col_width, max_rows=max_rows, ) def dim_summary(obj): elements = [f"{k}: {v}" for k, v in obj.sizes.items()] return ", ".join(elements) def _element_formatter( elements: Collection[Hashable], col_width: int, max_rows: int | None = None, delimiter: str = ", ", ) -> str: """ Formats elements for better readability. Once it becomes wider than the display width it will create a newline and continue indented to col_width. Once there are more rows than the maximum displayed rows it will start removing rows. Parameters ---------- elements : Collection of hashable Elements to join together. col_width : int The width to indent to if a newline has been made. max_rows : int, optional The maximum number of allowed rows. The default is None. delimiter : str, optional Delimiter to use between each element. The default is ", ". """ elements_len = len(elements) out = [""] length_row = 0 for i, v in enumerate(elements): delim = delimiter if i < elements_len - 1 else "" v_delim = f"{v}{delim}" length_element = len(v_delim) length_row += length_element # Create a new row if the next elements makes the print wider than # the maximum display width: if col_width + length_row > OPTIONS["display_width"]: out[-1] = out[-1].rstrip() # Remove trailing whitespace. out.append("\n" + pretty_print("", col_width) + v_delim) length_row = length_element else: out[-1] += v_delim # If there are too many rows of dimensions trim some away: if max_rows and (len(out) > max_rows): first_rows = calc_max_rows_first(max_rows) last_rows = calc_max_rows_last(max_rows) out = ( out[:first_rows] + ["\n" + pretty_print("", col_width) + "..."] + (out[-last_rows:] if max_rows > 1 else []) ) return "".join(out) def dim_summary_limited( sizes: Mapping[Any, int], col_width: int, max_rows: int | None = None ) -> str: elements = [f"{k}: {v}" for k, v in sizes.items()] return _element_formatter(elements, col_width, max_rows) def unindexed_dims_repr(dims, coords, max_rows: int | None = None): unindexed_dims = [d for d in dims if d not in coords] if unindexed_dims: dims_start = "Dimensions without coordinates: " dims_str = _element_formatter( unindexed_dims, col_width=len(dims_start), max_rows=max_rows ) return dims_start + dims_str else: return None @contextlib.contextmanager def set_numpy_options(*args, **kwargs): original = np.get_printoptions() np.set_printoptions(*args, **kwargs) try: yield finally: np.set_printoptions(**original) def limit_lines(string: str, *, limit: int): """ If the string is more lines than the limit, this returns the middle lines replaced by an ellipsis """ lines = string.splitlines() if len(lines) > limit: string = "\n".join(chain(lines[: limit // 2], ["..."], lines[-limit // 2 :])) return string def short_array_repr(array): from xarray.core.common import AbstractArray if isinstance(array, AbstractArray): array = array.data if isinstance(array, pd.api.extensions.ExtensionArray): return repr(array) array = to_duck_array(array) # default to lower precision so a full (abbreviated) line can fit on # one line with the default display_width options = { "precision": 6, "linewidth": OPTIONS["display_width"], "threshold": OPTIONS["display_values_threshold"], } if array.ndim < 3: edgeitems = 3 elif array.ndim == 3: edgeitems = 2 else: edgeitems = 1 options["edgeitems"] = edgeitems with set_numpy_options(**options): return repr(array) def short_data_repr(array): """Format "data" for DataArray and Variable.""" internal_data = getattr(array, "variable", array)._data if isinstance(array, np.ndarray): return short_array_repr(array) elif is_duck_array(internal_data): return limit_lines(repr(array.data), limit=40) elif getattr(array, "_in_memory", None): return short_array_repr(array) else: # internal xarray array type return f"[{array.size} values with dtype={array.dtype}]" def _get_indexes_dict(indexes): return { tuple(index_vars.keys()): idx for idx, index_vars in indexes.group_by_index() } @recursive_repr("") def array_repr(arr): from xarray.core.variable import Variable max_rows = OPTIONS["display_max_rows"] # used for DataArray, Variable and IndexVariable if hasattr(arr, "name") and arr.name is not None: name_str = f"{arr.name!r} " else: name_str = "" if ( isinstance(arr, Variable) or _get_boolean_with_default("display_expand_data", default=True) or isinstance(arr.variable._data, MemoryCachedArray) ): data_repr = short_data_repr(arr) else: data_repr = inline_variable_array_repr(arr.variable, OPTIONS["display_width"]) start = f" Size: {nbytes_str}", data_repr, ] if hasattr(arr, "coords"): if arr.coords: col_width = _calculate_col_width(arr.coords) summary.append( coords_repr(arr.coords, col_width=col_width, max_rows=max_rows) ) unindexed_dims_str = unindexed_dims_repr( arr.dims, arr.coords, max_rows=max_rows ) if unindexed_dims_str: summary.append(unindexed_dims_str) display_default_indexes = _get_boolean_with_default( "display_default_indexes", False ) xindexes = filter_nondefault_indexes( _get_indexes_dict(arr.xindexes), not display_default_indexes ) if xindexes: summary.append(indexes_repr(xindexes, max_rows=max_rows)) if arr.attrs: summary.append(attrs_repr(arr.attrs, max_rows=max_rows)) return "\n".join(summary) @recursive_repr("") def dataset_repr(ds): nbytes_str = render_human_readable_nbytes(ds.nbytes) summary = [f" Size: {nbytes_str}"] col_width = _calculate_col_width(ds.variables) max_rows = OPTIONS["display_max_rows"] dims_start = pretty_print("Dimensions:", col_width) dims_values = dim_summary_limited( ds.sizes, col_width=col_width + 1, max_rows=max_rows ) summary.append(f"{dims_start}({dims_values})") if ds.coords: summary.append(coords_repr(ds.coords, col_width=col_width, max_rows=max_rows)) unindexed_dims_str = unindexed_dims_repr(ds.dims, ds.coords, max_rows=max_rows) if unindexed_dims_str: summary.append(unindexed_dims_str) summary.append(data_vars_repr(ds.data_vars, col_width=col_width, max_rows=max_rows)) display_default_indexes = _get_boolean_with_default( "display_default_indexes", False ) xindexes = filter_nondefault_indexes( _get_indexes_dict(ds.xindexes), not display_default_indexes ) if xindexes: summary.append(indexes_repr(xindexes, max_rows=max_rows)) if ds.attrs: summary.append(attrs_repr(ds.attrs, max_rows=max_rows)) return "\n".join(summary) def dims_and_coords_repr(ds) -> str: """Partial Dataset repr for use inside DataTree inheritance errors.""" summary = [] col_width = _calculate_col_width(ds.coords) max_rows = OPTIONS["display_max_rows"] dims_start = pretty_print("Dimensions:", col_width) dims_values = dim_summary_limited( ds.sizes, col_width=col_width + 1, max_rows=max_rows ) summary.append(f"{dims_start}({dims_values})") if ds.coords: summary.append(coords_repr(ds.coords, col_width=col_width, max_rows=max_rows)) unindexed_dims_str = unindexed_dims_repr(ds.dims, ds.coords, max_rows=max_rows) if unindexed_dims_str: summary.append(unindexed_dims_str) return "\n".join(summary) def diff_name_summary(a, b) -> str: if a.name != b.name: return f"Differing names:\n {a.name!r} != {b.name!r}" else: return "" def diff_dim_summary(a, b) -> str: if a.sizes != b.sizes: return f"Differing dimensions:\n ({dim_summary(a)}) != ({dim_summary(b)})" else: return "" def _diff_mapping_repr( a_mapping, b_mapping, compat, title, summarizer, col_width=None, a_indexes=None, b_indexes=None, ): def compare_attr(a, b): if is_duck_array(a) or is_duck_array(b): return array_equiv(a, b) else: return a == b def extra_items_repr(extra_keys, mapping, ab_side, kwargs): extra_repr = [ summarizer(k, mapping[k], col_width, **kwargs[k]) for k in extra_keys ] if extra_repr: header = f"{title} only on the {ab_side} object:" return [header] + extra_repr else: return [] a_keys = set(a_mapping) b_keys = set(b_mapping) summary = [] diff_items = [] a_summarizer_kwargs = defaultdict(dict) if a_indexes is not None: a_summarizer_kwargs = {k: {"is_index": k in a_indexes} for k in a_mapping} b_summarizer_kwargs = defaultdict(dict) if b_indexes is not None: b_summarizer_kwargs = {k: {"is_index": k in b_indexes} for k in b_mapping} for k in a_keys & b_keys: try: # compare xarray variable if not callable(compat): compatible = getattr(a_mapping[k].variable, compat)( b_mapping[k].variable ) else: compatible = compat(a_mapping[k].variable, b_mapping[k].variable) is_variable = True except AttributeError: # compare attribute value compatible = compare_attr(a_mapping[k], b_mapping[k]) is_variable = False if not compatible: temp = [ summarizer(k, a_mapping[k], col_width, **a_summarizer_kwargs[k]), summarizer(k, b_mapping[k], col_width, **b_summarizer_kwargs[k]), ] if compat == "identical" and is_variable: attrs_summary = [] a_attrs = a_mapping[k].attrs b_attrs = b_mapping[k].attrs attrs_to_print = set(a_attrs) ^ set(b_attrs) attrs_to_print.update( { k for k in set(a_attrs) & set(b_attrs) if not compare_attr(a_attrs[k], b_attrs[k]) } ) for m in (a_mapping, b_mapping): attr_s = "\n".join( " " + summarize_attr(ak, av) for ak, av in m[k].attrs.items() if ak in attrs_to_print ) if attr_s: attr_s = " Differing variable attributes:\n" + attr_s attrs_summary.append(attr_s) temp = [ f"{var_s}\n{attr_s}" if attr_s else var_s for var_s, attr_s in zip(temp, attrs_summary, strict=True) ] # TODO: It should be possible recursively use _diff_mapping_repr # instead of explicitly handling variable attrs specially. # That would require some refactoring. # newdiff = _diff_mapping_repr( # {k: v for k,v in a_attrs.items() if k in attrs_to_print}, # {k: v for k,v in b_attrs.items() if k in attrs_to_print}, # compat=compat, # summarizer=summarize_attr, # title="Variable Attributes" # ) # temp += [newdiff] diff_items += [ ab_side + s[1:] for ab_side, s in zip(("L", "R"), temp, strict=True) ] if diff_items: summary += [f"Differing {title.lower()}:"] + diff_items summary += extra_items_repr(a_keys - b_keys, a_mapping, "left", a_summarizer_kwargs) summary += extra_items_repr( b_keys - a_keys, b_mapping, "right", b_summarizer_kwargs ) return "\n".join(summary) def diff_coords_repr(a, b, compat, col_width=None): return _diff_mapping_repr( a, b, compat, "Coordinates", summarize_variable, col_width=col_width, a_indexes=a.xindexes, b_indexes=b.xindexes, ) diff_data_vars_repr = functools.partial( _diff_mapping_repr, title="Data variables", summarizer=summarize_variable ) diff_attrs_repr = functools.partial( _diff_mapping_repr, title="Attributes", summarizer=summarize_attr ) def _compat_to_str(compat): if callable(compat): compat = compat.__name__ if compat == "equals": return "equal" elif compat == "allclose": return "close" else: return compat def diff_array_repr(a, b, compat): # used for DataArray, Variable and IndexVariable summary = [ f"Left and right {type(a).__name__} objects are not {_compat_to_str(compat)}" ] if dims_diff := diff_dim_summary(a, b): summary.append(dims_diff) if callable(compat): equiv = compat else: equiv = array_equiv if not equiv(a.data, b.data): temp = [wrap_indent(short_array_repr(obj), start=" ") for obj in (a, b)] diff_data_repr = [ ab_side + "\n" + ab_data_repr for ab_side, ab_data_repr in zip(("L", "R"), temp, strict=True) ] summary += ["Differing values:"] + diff_data_repr if hasattr(a, "coords"): col_width = _calculate_col_width(set(a.coords) | set(b.coords)) if coords_diff := diff_coords_repr( a.coords, b.coords, compat, col_width=col_width ): summary.append(coords_diff) if compat == "identical" and ( attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat) ): summary.append(attrs_diff) return "\n".join(summary) def diff_treestructure(a: DataTree, b: DataTree) -> str | None: """ Return a summary of why two trees are not isomorphic. If they are isomorphic return None. """ # .group_subtrees walks nodes in breadth-first-order, in order to produce as # shallow of a diff as possible for path, (node_a, node_b) in group_subtrees(a, b): if node_a.children.keys() != node_b.children.keys(): path_str = "root node" if path == "." else f"node {path!r}" child_summary = f"{list(node_a.children)} vs {list(node_b.children)}" diff = f"Children at {path_str} do not match: {child_summary}" return diff return None def diff_dataset_repr(a, b, compat): summary = [ f"Left and right {type(a).__name__} objects are not {_compat_to_str(compat)}" ] col_width = _calculate_col_width(set(list(a.variables) + list(b.variables))) if dims_diff := diff_dim_summary(a, b): summary.append(dims_diff) if coords_diff := diff_coords_repr(a.coords, b.coords, compat, col_width=col_width): summary.append(coords_diff) if data_diff := diff_data_vars_repr( a.data_vars, b.data_vars, compat, col_width=col_width ): summary.append(data_diff) if compat == "identical" and ( attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat) ): summary.append(attrs_diff) return "\n".join(summary) def diff_nodewise_summary(a: DataTree, b: DataTree, compat): """Iterates over all corresponding nodes, recording differences between data at each location.""" summary = [] for path, (node_a, node_b) in group_subtrees(a, b): a_ds, b_ds = node_a.dataset, node_b.dataset if not a_ds._all_compat(b_ds, compat): path_str = "root node" if path == "." else f"node {path!r}" dataset_diff = diff_dataset_repr(a_ds, b_ds, compat) data_diff = indent( "\n".join(dataset_diff.split("\n", 1)[1:]), prefix=" " ) nodediff = f"Data at {path_str} does not match:\n{data_diff}" summary.append(nodediff) return "\n\n".join(summary) def diff_datatree_repr(a: DataTree, b: DataTree, compat): summary = [ f"Left and right {type(a).__name__} objects are not {_compat_to_str(compat)}" ] if compat == "identical" and (diff_name := diff_name_summary(a, b)): summary.append(diff_name) treestructure_diff = diff_treestructure(a, b) # If the trees structures are different there is no point comparing each node, # and doing so would raise an error. # TODO we could show any differences in nodes up to the first place that structure differs? if treestructure_diff is not None: summary.append(treestructure_diff) elif compat != "isomorphic": nodewise_diff = diff_nodewise_summary(a, b, compat) summary.append(nodewise_diff) return "\n\n".join(summary) def inherited_vars(mapping: ChainMap) -> dict: return {k: v for k, v in mapping.parents.items() if k not in mapping.maps[0]} def _datatree_node_repr(node: DataTree, root: bool) -> str: summary = [f"Group: {node.path}"] col_width = _calculate_col_width(node.variables) max_rows = OPTIONS["display_max_rows"] inherited_coords = inherited_vars(node._coord_variables) # Only show dimensions if also showing a variable or coordinates section. show_dims = ( node._node_coord_variables or (root and inherited_coords) or node._data_variables ) dim_sizes = node.sizes if root else node._node_dims if show_dims: # Includes inherited dimensions. dims_start = pretty_print("Dimensions:", col_width) dims_values = dim_summary_limited( dim_sizes, col_width=col_width + 1, max_rows=max_rows ) summary.append(f"{dims_start}({dims_values})") if node._node_coord_variables: node_coords = node.to_dataset(inherit=False).coords summary.append(coords_repr(node_coords, col_width=col_width, max_rows=max_rows)) if root and inherited_coords: summary.append( inherited_coords_repr(node, col_width=col_width, max_rows=max_rows) ) if show_dims: unindexed_dims_str = unindexed_dims_repr( dim_sizes, node.coords, max_rows=max_rows ) if unindexed_dims_str: summary.append(unindexed_dims_str) if node._data_variables: summary.append( data_vars_repr(node._data_variables, col_width=col_width, max_rows=max_rows) ) # TODO: only show indexes defined at this node, with a separate section for # inherited indexes (if root=True) display_default_indexes = _get_boolean_with_default( "display_default_indexes", False ) xindexes = filter_nondefault_indexes( _get_indexes_dict(node.xindexes), not display_default_indexes ) if xindexes: summary.append(indexes_repr(xindexes, max_rows=max_rows)) if node.attrs: summary.append(attrs_repr(node.attrs, max_rows=max_rows)) return "\n".join(summary) def datatree_repr(dt: DataTree) -> str: """A printable representation of the structure of this entire tree.""" max_children = OPTIONS["display_max_children"] renderer = RenderDataTree(dt, maxchildren=max_children) name_info = "" if dt.name is None else f" {dt.name!r}" header = f"" lines = [header] root = True for pre, fill, node in renderer: if isinstance(node, str): lines.append(f"{fill}{node}") continue node_repr = _datatree_node_repr(node, root=root) root = False # only the first node is the root # TODO: figure out if we can restructure this logic to move child groups # up higher in the repr, directly below the header. # This would be more consistent with the HTML repr. raw_repr_lines = node_repr.splitlines() node_line = f"{pre}{raw_repr_lines[0]}" lines.append(node_line) for line in raw_repr_lines[1:]: if len(node.children) > 0: lines.append(f"{fill}{renderer.style.vertical}{line}") else: lines.append(f"{fill}{' ' * len(renderer.style.vertical)}{line}") return "\n".join(lines) def shorten_list_repr(items: Sequence, max_items: int) -> str: if len(items) <= max_items: return repr(items) else: first_half = repr(items[: max_items // 2])[ 1:-1 ] # Convert to string and remove brackets second_half = repr(items[-max_items // 2 :])[ 1:-1 ] # Convert to string and remove brackets return f"[{first_half}, ..., {second_half}]" def render_human_readable_nbytes( nbytes: int, /, *, attempt_constant_width: bool = False, ) -> str: """Renders simple human-readable byte count representation This is only a quick representation that should not be relied upon for precise needs. To get the exact byte count, please use the ``nbytes`` attribute directly. Parameters ---------- nbytes Byte count attempt_constant_width For reasonable nbytes sizes, tries to render a fixed-width representation. Returns ------- Human-readable representation of the byte count """ dividend = float(nbytes) divisor = 1000.0 last_unit_available = UNITS[-1] for unit in UNITS: if dividend < divisor or unit == last_unit_available: break dividend /= divisor dividend_str = f"{dividend:.0f}" unit_str = f"{unit}" if attempt_constant_width: dividend_str = dividend_str.rjust(3) unit_str = unit_str.ljust(2) string = f"{dividend_str}{unit_str}" return string xarray-2025.12.0/xarray/core/formatting_html.py000066400000000000000000000375451511464676000213670ustar00rootroot00000000000000from __future__ import annotations import uuid from collections import OrderedDict from collections.abc import Mapping from functools import lru_cache, partial from html import escape from importlib.resources import files from math import ceil from typing import TYPE_CHECKING, Literal from xarray.core.formatting import ( filter_nondefault_indexes, inherited_vars, inline_index_repr, inline_variable_array_repr, short_data_repr, ) from xarray.core.options import OPTIONS, _get_boolean_with_default STATIC_FILES = ( ("xarray.static.html", "icons-svg-inline.html"), ("xarray.static.css", "style.css"), ) if TYPE_CHECKING: from xarray.core.datatree import DataTree @lru_cache(None) def _load_static_files(): """Lazily load the resource files into memory the first time they are needed""" return [ files(package).joinpath(resource).read_text(encoding="utf-8") for package, resource in STATIC_FILES ] def short_data_repr_html(array) -> str: """Format "data" for DataArray and Variable.""" internal_data = getattr(array, "variable", array)._data if hasattr(internal_data, "_repr_html_"): return internal_data._repr_html_() text = escape(short_data_repr(array)) return f"
{text}
" def format_dims(dim_sizes, dims_with_index) -> str: if not dim_sizes: return "" dim_css_map = { dim: " class='xr-has-index'" if dim in dims_with_index else "" for dim in dim_sizes } dims_li = "".join( f"
  • {escape(str(dim))}: {size}
  • " for dim, size in dim_sizes.items() ) return f"
      {dims_li}
    " def summarize_attrs(attrs) -> str: attrs_dl = "".join( f"
    {escape(str(k))} :
    {escape(str(v))}
    " for k, v in attrs.items() ) return f"
    {attrs_dl}
    " def _icon(icon_name) -> str: # icon_name should be defined in xarray/static/html/icon-svg-inline.html return ( f"" ) def summarize_variable(name, var, is_index=False, dtype=None) -> str: variable = var.variable if hasattr(var, "variable") else var cssclass_idx = " class='xr-has-index'" if is_index else "" dims_str = f"({', '.join(escape(dim) for dim in var.dims)})" name = escape(str(name)) dtype = dtype or escape(str(var.dtype)) # "unique" ids required to expand/collapse subsections attrs_id = "attrs-" + str(uuid.uuid4()) data_id = "data-" + str(uuid.uuid4()) disabled = "" if len(var.attrs) else "disabled" preview = escape(inline_variable_array_repr(variable, 35)) attrs_ul = summarize_attrs(var.attrs) data_repr = short_data_repr_html(variable) attrs_icon = _icon("icon-file-text2") data_icon = _icon("icon-database") return ( f"
    {name}
    " f"
    {dims_str}
    " f"
    {dtype}
    " f"
    {preview}
    " f"" f"" f"" f"" f"
    {attrs_ul}
    " f"
    {data_repr}
    " ) def summarize_coords(variables) -> str: li_items = [] dims = tuple(variables._data.dims) dim_ordered_coords = sorted( variables.items(), key=lambda x: dims.index(x[0]) if x[0] in dims else len(dims) ) for k, v in dim_ordered_coords: li_content = summarize_variable(k, v, is_index=k in variables.xindexes) li_items.append(f"
  • {li_content}
  • ") vars_li = "".join(li_items) return f"
      {vars_li}
    " def summarize_vars(variables) -> str: vars_li = "".join( f"
  • {summarize_variable(k, v)}
  • " for k, v in variables.items() ) return f"
      {vars_li}
    " def short_index_repr_html(index) -> str: if hasattr(index, "_repr_html_"): return index._repr_html_() return f"
    {escape(repr(index))}
    " def summarize_index(coord_names, index) -> str: name = "
    ".join([escape(str(n)) for n in coord_names]) index_id = f"index-{uuid.uuid4()}" preview = escape(inline_index_repr(index, max_width=70)) details = short_index_repr_html(index) data_icon = _icon("icon-database") return ( f"
    {name}
    " f"
    {preview}
    " # need empty input + label here to conform to the fixed CSS grid layout f"" f"" f"" f"" f"
    {details}
    " ) def summarize_indexes(indexes) -> str: indexes_li = "".join( f"
  • {summarize_index(v, i)}
  • " for v, i in indexes.items() ) return f"
      {indexes_li}
    " def collapsible_section( name, inline_details="", details="", n_items=None, enabled=True, collapsed=False ) -> str: # "unique" id to expand/collapse the section data_id = "section-" + str(uuid.uuid4()) has_items = n_items is not None and n_items n_items_span = "" if n_items is None else f" ({n_items})" enabled = "" if enabled and has_items else "disabled" collapsed = "" if collapsed or not has_items else "checked" tip = " title='Expand/collapse section'" if enabled else "" return ( f"" f"" f"
    {inline_details}
    " f"
    {details}
    " ) def _mapping_section( mapping, name, details_func, max_items_collapse, expand_option_name, enabled=True, max_option_name: Literal["display_max_children"] | None = None, ) -> str: n_items = len(mapping) expanded = _get_boolean_with_default( expand_option_name, n_items < max_items_collapse ) collapsed = not expanded inline_details = "" if max_option_name and max_option_name in OPTIONS: max_items = int(OPTIONS[max_option_name]) if n_items > max_items: inline_details = f"({max_items}/{n_items})" return collapsible_section( name, inline_details=inline_details, details=details_func(mapping), n_items=n_items, enabled=enabled, collapsed=collapsed, ) def dim_section(obj) -> str: dim_list = format_dims(obj.sizes, obj.xindexes.dims) return collapsible_section( "Dimensions", inline_details=dim_list, enabled=False, collapsed=True ) def array_section(obj) -> str: # "unique" id to expand/collapse the section data_id = "section-" + str(uuid.uuid4()) collapsed = ( "checked" if _get_boolean_with_default("display_expand_data", default=True) else "" ) variable = getattr(obj, "variable", obj) preview = escape(inline_variable_array_repr(variable, max_width=70)) data_repr = short_data_repr_html(obj) data_icon = _icon("icon-database") return ( "
    " f"" f"" f"
    {preview}
    " f"
    {data_repr}
    " "
    " ) coord_section = partial( _mapping_section, name="Coordinates", details_func=summarize_coords, max_items_collapse=25, expand_option_name="display_expand_coords", ) datavar_section = partial( _mapping_section, name="Data variables", details_func=summarize_vars, max_items_collapse=15, expand_option_name="display_expand_data_vars", ) index_section = partial( _mapping_section, name="Indexes", details_func=summarize_indexes, max_items_collapse=0, expand_option_name="display_expand_indexes", ) attr_section = partial( _mapping_section, name="Attributes", details_func=summarize_attrs, max_items_collapse=10, expand_option_name="display_expand_attrs", ) def _get_indexes_dict(indexes): return { tuple(index_vars.keys()): idx for idx, index_vars in indexes.group_by_index() } def _obj_repr(obj, header_components, sections): """Return HTML repr of an xarray object. If CSS is not injected (untrusted notebook), fallback to the plain text repr. """ header = f"
    {''.join(h for h in header_components)}
    " sections = "".join(f"
  • {s}
  • " for s in sections) icons_svg, css_style = _load_static_files() return ( "
    " f"{icons_svg}" f"
    {escape(repr(obj))}
    " "" "
    " ) def array_repr(arr) -> str: dims = OrderedDict((k, v) for k, v in zip(arr.dims, arr.shape, strict=True)) if hasattr(arr, "xindexes"): indexed_dims = arr.xindexes.dims else: indexed_dims = {} obj_type = f"xarray.{type(arr).__name__}" arr_name = escape(repr(arr.name)) if getattr(arr, "name", None) else "" header_components = [ f"
    {obj_type}
    ", f"
    {arr_name}
    ", format_dims(dims, indexed_dims), ] sections = [array_section(arr)] if hasattr(arr, "coords"): if arr.coords: sections.append(coord_section(arr.coords)) if hasattr(arr, "xindexes"): display_default_indexes = _get_boolean_with_default( "display_default_indexes", False ) xindexes = filter_nondefault_indexes( _get_indexes_dict(arr.xindexes), not display_default_indexes ) if xindexes: indexes = _get_indexes_dict(arr.xindexes) sections.append(index_section(indexes)) if arr.attrs: sections.append(attr_section(arr.attrs)) return _obj_repr(arr, header_components, sections) def dataset_repr(ds) -> str: obj_type = f"xarray.{type(ds).__name__}" header_components = [f"
    {escape(obj_type)}
    "] sections = [] sections.append(dim_section(ds)) if ds.coords: sections.append(coord_section(ds.coords)) sections.append(datavar_section(ds.data_vars)) display_default_indexes = _get_boolean_with_default( "display_default_indexes", False ) xindexes = filter_nondefault_indexes( _get_indexes_dict(ds.xindexes), not display_default_indexes ) if xindexes: sections.append(index_section(xindexes)) if ds.attrs: sections.append(attr_section(ds.attrs)) return _obj_repr(ds, header_components, sections) def datatree_node_sections(node: DataTree, root: bool = False) -> list[str]: from xarray.core.coordinates import Coordinates ds = node._to_dataset_view(rebuild_dims=False, inherit=True) node_coords = node.to_dataset(inherit=False).coords # use this class to get access to .xindexes property inherited_coords = Coordinates( coords=inherited_vars(node._coord_variables), indexes=inherited_vars(node._indexes), ) # Only show dimensions if also showing a variable or coordinates section. show_dims = ( node._node_coord_variables or (root and inherited_coords) or node._data_variables ) sections = [] if node.children: children_max_items = 1 if ds.data_vars else 6 sections.append( children_section(node.children, max_items_collapse=children_max_items) ) if show_dims: sections.append(dim_section(ds)) if node_coords: sections.append(coord_section(node_coords)) # only show inherited coordinates on the root if root and inherited_coords: sections.append(inherited_coord_section(inherited_coords)) if ds.data_vars: sections.append(datavar_section(ds.data_vars)) if ds.attrs: sections.append(attr_section(ds.attrs)) return sections def summarize_datatree_children(children: Mapping[str, DataTree]) -> str: MAX_CHILDREN = OPTIONS["display_max_children"] n_children = len(children) children_html = [] for i, child in enumerate(children.values()): if i < ceil(MAX_CHILDREN / 2) or i >= ceil(n_children - MAX_CHILDREN / 2): is_last = i == (n_children - 1) children_html.append(datatree_child_repr(child, end=is_last)) elif n_children > MAX_CHILDREN and i == ceil(MAX_CHILDREN / 2): children_html.append("
    ...
    ") return "".join( [ "
    ", "".join(children_html), "
    ", ] ) children_section = partial( _mapping_section, name="Groups", details_func=summarize_datatree_children, max_option_name="display_max_children", expand_option_name="display_expand_groups", ) inherited_coord_section = partial( _mapping_section, name="Inherited coordinates", details_func=summarize_coords, max_items_collapse=25, expand_option_name="display_expand_coords", ) def datatree_child_repr(node: DataTree, end: bool = False) -> str: # Wrap DataTree HTML representation with a tee to the left of it. # # Enclosing HTML tag is a
    with :code:`display: inline-grid` style. # # Turns: # [ title ] # | details | # |_____________| # # into (A): # |โ”€ [ title ] # | | details | # | |_____________| # # or (B): # โ””โ”€ [ title ] # | details | # |_____________| end = bool(end) height = "100%" if end is False else "1.2em" # height of line path = escape(node.path) sections = datatree_node_sections(node, root=False) section_items = "".join(f"
  • {s}
  • " for s in sections) # TODO: Can we make the group name clickable to toggle the sections below? # This looks like it would require the input/label pattern used above. html = f"""
    {path}
      {section_items}
    """ return "".join(t.strip() for t in html.split("\n")) def datatree_repr(node: DataTree) -> str: header_components = [ f"
    xarray.{type(node).__name__}
    ", ] if node.name is not None: name = escape(repr(node.name)) header_components.append(f"
    {name}
    ") sections = datatree_node_sections(node, root=True) return _obj_repr(node, header_components, sections) xarray-2025.12.0/xarray/core/groupby.py000066400000000000000000002057671511464676000176630ustar00rootroot00000000000000from __future__ import annotations import copy import functools import itertools import warnings from collections.abc import Callable, Hashable, Iterator, Mapping, Sequence from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Generic, Literal, Union, cast import numpy as np import pandas as pd from packaging.version import Version from xarray.computation import ops from xarray.computation.arithmetic import ( DataArrayGroupbyArithmetic, DatasetGroupbyArithmetic, ) from xarray.core import dtypes, duck_array_ops, nputils from xarray.core._aggregations import ( DataArrayGroupByAggregations, DatasetGroupByAggregations, ) from xarray.core.common import ( ImplementsArrayReduce, ImplementsDatasetReduce, _is_numeric_aggregatable_dtype, ) from xarray.core.coordinates import Coordinates, coordinates_from_variable from xarray.core.duck_array_ops import where from xarray.core.formatting import format_array_flat from xarray.core.indexes import ( PandasMultiIndex, filter_indexes_from_coords, ) from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import ( Dims, QuantileMethods, T_DataArray, T_DataWithCoords, T_Xarray, ) from xarray.core.utils import ( FrozenMappingWarningOnValuesAccess, contains_only_chunked_or_numpy, either_dict_or_kwargs, emit_user_level_warning, hashable, is_scalar, maybe_wrap_array, module_available, peek_at, ) from xarray.core.variable import IndexVariable, Variable from xarray.namedarray.pycompat import is_chunked_array from xarray.structure.alignment import align, broadcast from xarray.structure.concat import concat from xarray.structure.merge import merge_coords if TYPE_CHECKING: from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import ( GroupIndex, GroupIndices, GroupInput, GroupKey, T_Chunks, ) from xarray.core.utils import Frozen from xarray.groupers import EncodedGroups, Grouper def check_reduce_dims(reduce_dims, dimensions): if reduce_dims is not ...: if is_scalar(reduce_dims): reduce_dims = [reduce_dims] if any(dim not in dimensions for dim in reduce_dims): raise ValueError( f"cannot reduce over dimensions {reduce_dims!r}. expected either '...' " f"to reduce over all dimensions or one or more of {dimensions!r}. " f"Alternatively, install the `flox` package. " ) def _codes_to_group_indices(codes: np.ndarray, N: int) -> GroupIndices: """Converts integer codes for groups to group indices.""" assert codes.ndim == 1 groups: GroupIndices = tuple([] for _ in range(N)) for n, g in enumerate(codes): if g >= 0: groups[g].append(n) return groups def _dummy_copy(xarray_obj): from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset if isinstance(xarray_obj, Dataset): res = Dataset( { k: dtypes.get_fill_value(v.dtype) for k, v in xarray_obj.data_vars.items() }, { k: dtypes.get_fill_value(v.dtype) for k, v in xarray_obj.coords.items() if k not in xarray_obj.dims }, xarray_obj.attrs, ) elif isinstance(xarray_obj, DataArray): res = DataArray( dtypes.get_fill_value(xarray_obj.dtype), { k: dtypes.get_fill_value(v.dtype) for k, v in xarray_obj.coords.items() if k not in xarray_obj.dims }, dims=[], name=xarray_obj.name, attrs=xarray_obj.attrs, ) else: # pragma: no cover raise AssertionError return res def _is_one_or_none(obj) -> bool: return obj == 1 or obj is None def _consolidate_slices(slices: list[slice]) -> list[slice]: """Consolidate adjacent slices in a list of slices.""" result: list[slice] = [] last_slice = slice(None) for slice_ in slices: if not isinstance(slice_, slice): raise ValueError(f"list element is not a slice: {slice_!r}") if ( result and last_slice.stop == slice_.start and _is_one_or_none(last_slice.step) and _is_one_or_none(slice_.step) ): last_slice = slice(last_slice.start, slice_.stop, slice_.step) result[-1] = last_slice else: result.append(slice_) last_slice = slice_ return result def _inverse_permutation_indices(positions, N: int | None = None) -> np.ndarray | None: """Like inverse_permutation, but also handles slices. Parameters ---------- positions : list of ndarray or slice If slice objects, all are assumed to be slices. Returns ------- np.ndarray of indices or None, if no permutation is necessary. """ if not positions: return None if isinstance(positions[0], slice): positions = _consolidate_slices(positions) if positions == slice(None): return None positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions] newpositions = nputils.inverse_permutation(np.concatenate(positions), N) return newpositions[newpositions != -1] class _DummyGroup(Generic[T_Xarray]): """Class for keeping track of grouped dimensions without coordinates. Should not be user visible. """ __slots__ = ("coords", "dataarray", "name", "size") def __init__(self, obj: T_Xarray, name: Hashable, coords) -> None: self.name = name self.coords = coords self.size = obj.sizes[name] @property def dims(self) -> tuple[Hashable]: return (self.name,) @property def ndim(self) -> Literal[1]: return 1 @property def values(self) -> range: return range(self.size) @property def data(self) -> np.ndarray: return np.arange(self.size, dtype=int) def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None ) -> np.ndarray: if copy is False: raise NotImplementedError(f"An array copy is necessary, got {copy = }.") return np.arange(self.size) @property def shape(self) -> tuple[int, ...]: return (self.size,) @property def attrs(self) -> dict: return {} def __getitem__(self, key): if isinstance(key, tuple): (key,) = key return self.values[key] def to_index(self) -> pd.Index: # could be pd.RangeIndex? return pd.Index(np.arange(self.size)) def copy(self, deep: bool = True, data: Any = None): raise NotImplementedError def to_dataarray(self) -> DataArray: from xarray.core.dataarray import DataArray return DataArray( data=self.data, dims=(self.name,), coords=self.coords, name=self.name ) def to_array(self) -> DataArray: """Deprecated version of to_dataarray.""" return self.to_dataarray() T_Group = Union["T_DataArray", _DummyGroup] def _ensure_1d( group: T_Group, obj: T_DataWithCoords ) -> tuple[ T_Group, T_DataWithCoords, Hashable | None, list[Hashable], ]: # 1D cases: do nothing if isinstance(group, _DummyGroup) or group.ndim == 1: return group, obj, None, [] from xarray.core.dataarray import DataArray if isinstance(group, DataArray): for dim in set(group.dims) - set(obj.dims): obj = obj.expand_dims(dim) # try to stack the dims of the group into a single dim orig_dims = group.dims stacked_dim = "stacked_" + "_".join(map(str, orig_dims)) # these dimensions get created by the stack operation inserted_dims = [dim for dim in group.dims if dim not in group.coords] # `newgroup` construction is optimized so we don't create an index unnecessarily, # or stack any non-dim coords unnecessarily newgroup = DataArray(group.variable.stack({stacked_dim: orig_dims})) newobj = obj.stack({stacked_dim: orig_dims}) return newgroup, newobj, stacked_dim, inserted_dims raise TypeError(f"group must be DataArray or _DummyGroup, got {type(group)!r}.") @dataclass class ResolvedGrouper(Generic[T_DataWithCoords]): """ Wrapper around a Grouper object. The Grouper object represents an abstract instruction to group an object. The ResolvedGrouper object is a concrete version that contains all the common logic necessary for a GroupBy problem including the intermediates necessary for executing a GroupBy calculation. Specialization to the grouping problem at hand, is accomplished by calling the `factorize` method on the encapsulated Grouper object. This class is private API, while Groupers are public. """ grouper: Grouper group: T_Group obj: T_DataWithCoords eagerly_compute_group: Literal[False] | None = field(repr=False, default=None) # returned by factorize: encoded: EncodedGroups = field(init=False, repr=False) @property def full_index(self) -> pd.Index: return self.encoded.full_index @property def codes(self) -> DataArray: return self.encoded.codes @property def unique_coord(self) -> Variable | _DummyGroup: return self.encoded.unique_coord def __post_init__(self) -> None: # This copy allows the BinGrouper.factorize() method # to update BinGrouper.bins when provided as int, using the output # of pd.cut # We do not want to modify the original object, since the same grouper # might be used multiple times. from xarray.groupers import BinGrouper, UniqueGrouper self.grouper = copy.deepcopy(self.grouper) self.group = _resolve_group(self.obj, self.group) if self.eagerly_compute_group: raise ValueError( f""""Eagerly computing the DataArray you're grouping by ({self.group.name!r}) " has been removed. Please load this array's data manually using `.compute` or `.load`. To intentionally avoid eager loading, either (1) specify `.groupby({self.group.name}=UniqueGrouper(labels=...))` or (2) pass explicit bin edges using ``bins`` or `.groupby({self.group.name}=BinGrouper(bins=...))`; as appropriate.""" ) if self.eagerly_compute_group is not None: emit_user_level_warning( "Passing `eagerly_compute_group` is now deprecated. It has no effect.", DeprecationWarning, ) if not isinstance(self.group, _DummyGroup) and is_chunked_array( self.group.variable._data ): # This requires a pass to discover the groups present if isinstance(self.grouper, UniqueGrouper) and self.grouper.labels is None: raise ValueError( "Please pass `labels` to UniqueGrouper when grouping by a chunked array." ) # this requires a pass to compute the bin edges if isinstance(self.grouper, BinGrouper) and isinstance( self.grouper.bins, int ): raise ValueError( "Please pass explicit bin edges to BinGrouper using the ``bins`` kwarg" "when grouping by a chunked array." ) self.encoded = self.grouper.factorize(self.group) @property def name(self) -> Hashable: """Name for the grouped coordinate after reduction.""" # the name has to come from unique_coord because we need `_bins` suffix for BinGrouper (name,) = self.encoded.unique_coord.dims return name @property def size(self) -> int: """Number of groups.""" return len(self) def __len__(self) -> int: """Number of groups.""" return len(self.encoded.full_index) def _parse_group_and_groupers( obj: T_Xarray, group: GroupInput, groupers: dict[str, Grouper], *, eagerly_compute_group: Literal[False] | None, ) -> tuple[ResolvedGrouper, ...]: from xarray.core.dataarray import DataArray from xarray.groupers import Grouper, UniqueGrouper if group is not None and groupers: raise ValueError( "Providing a combination of `group` and **groupers is not supported." ) if group is None and not groupers: raise ValueError("Either `group` or `**groupers` must be provided.") if isinstance(group, np.ndarray | pd.Index): raise TypeError( f"`group` must be a DataArray. Received {type(group).__name__!r} instead" ) if isinstance(group, Grouper): raise TypeError( "Cannot group by a Grouper object. " f"Instead use `.groupby(var_name={type(group).__name__}(...))`. " "You may need to assign the variable you're grouping by as a coordinate using `assign_coords`." ) if isinstance(group, Mapping): grouper_mapping = either_dict_or_kwargs(group, groupers, "groupby") group = None rgroupers: tuple[ResolvedGrouper, ...] if isinstance(group, DataArray | Variable): rgroupers = ( ResolvedGrouper( UniqueGrouper(), group, obj, eagerly_compute_group=eagerly_compute_group ), ) else: if group is not None: if TYPE_CHECKING: assert isinstance(group, str | Sequence) group_iter: Sequence[Hashable] = ( (group,) if isinstance(group, str) else group ) grouper_mapping = {g: UniqueGrouper() for g in group_iter} elif groupers: grouper_mapping = cast("Mapping[Hashable, Grouper]", groupers) rgroupers = tuple( ResolvedGrouper( grouper, group, obj, eagerly_compute_group=eagerly_compute_group ) for group, grouper in grouper_mapping.items() ) return rgroupers def _validate_groupby_squeeze(squeeze: Literal[False]) -> None: # While we don't generally check the type of every arg, passing # multiple dimensions as multiple arguments is common enough, and the # consequences hidden enough (strings evaluate as true) to warrant # checking here. # A future version could make squeeze kwarg only, but would face # backward-compat issues. if squeeze is not False: raise TypeError(f"`squeeze` must be False, but {squeeze!r} was supplied.") def _resolve_group( obj: T_DataWithCoords, group: T_Group | Hashable | IndexVariable ) -> T_Group: from xarray.core.dataarray import DataArray error_msg = ( "the group variable's length does not " "match the length of this variable along its " "dimensions" ) newgroup: T_Group if isinstance(group, DataArray): try: align(obj, group, join="exact", copy=False) except ValueError as err: raise ValueError(error_msg) from err newgroup = group.copy(deep=False) newgroup.name = group.name or "group" elif isinstance(group, IndexVariable): # This assumption is built in to _ensure_1d. if group.ndim != 1: raise ValueError( "Grouping by multi-dimensional IndexVariables is not allowed." "Convert to and pass a DataArray instead." ) (group_dim,) = group.dims if len(group) != obj.sizes[group_dim]: raise ValueError(error_msg) newgroup = DataArray(group) else: if not hashable(group): raise TypeError( "`group` must be an xarray.DataArray or the " "name of an xarray variable or dimension. " f"Received {group!r} instead." ) group_da: DataArray = obj[group] if group_da.name not in obj._indexes and group_da.name in obj.dims: # DummyGroups should not appear on groupby results newgroup = _DummyGroup(obj, group_da.name, group_da.coords) else: newgroup = group_da if newgroup.size == 0: raise ValueError(f"{newgroup.name} must not be empty") return newgroup @dataclass class ComposedGrouper: """ Helper class for multi-variable GroupBy. This satisfies the Grouper interface, but is awkward to wrap in ResolvedGrouper. For one, it simply re-infers a new EncodedGroups using known information in existing ResolvedGroupers. So passing in a `group` (hard to define), and `obj` (pointless) is not useful. """ groupers: tuple[ResolvedGrouper, ...] def factorize(self) -> EncodedGroups: from xarray.groupers import EncodedGroups groupers = self.groupers # At this point all arrays have been factorized. codes = tuple(grouper.codes for grouper in groupers) shape = tuple(grouper.size for grouper in groupers) masks = tuple((code == -1) for code in codes) # We broadcast the codes against each other broadcasted_codes = broadcast(*codes) # This fully broadcasted DataArray is used as a template later first_codes = broadcasted_codes[0] # Now we convert to a single variable GroupBy problem _flatcodes = np.ravel_multi_index( tuple(codes.data for codes in broadcasted_codes), shape, mode="wrap" ) # NaNs; as well as values outside the bins are coded by -1 # Restore these after the raveling broadcasted_masks = broadcast(*masks) mask = functools.reduce(np.logical_or, broadcasted_masks) # type: ignore[arg-type] _flatcodes = where(mask.data, -1, _flatcodes) full_index = pd.MultiIndex.from_product( [list(grouper.full_index.values) for grouper in groupers], names=tuple(grouper.name for grouper in groupers), ) if not full_index.is_unique: raise ValueError( "The output index for the GroupBy is non-unique. " "This is a bug in the Grouper provided." ) # This will be unused when grouping by dask arrays, so skip.. if not is_chunked_array(_flatcodes): # Constructing an index from the product is wrong when there are missing groups # (e.g. binning, resampling). Account for that now. midx = full_index[np.sort(pd.unique(_flatcodes[~mask]))] group_indices = _codes_to_group_indices(_flatcodes.ravel(), len(full_index)) else: midx = full_index group_indices = None dim_name = "stacked_" + "_".join(str(grouper.name) for grouper in groupers) coords = Coordinates.from_pandas_multiindex(midx, dim=dim_name) for grouper in groupers: coords.variables[grouper.name].attrs = grouper.group.attrs return EncodedGroups( codes=first_codes.copy(data=_flatcodes), full_index=full_index, group_indices=group_indices, unique_coord=Variable(dims=(dim_name,), data=midx.values), coords=coords, ) class GroupBy(Generic[T_Xarray]): """A object that implements the split-apply-combine pattern. Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over (unique_value, grouped_array) pairs, but the main way to interact with a groupby object are with the `apply` or `reduce` methods. You can also directly call numpy methods like `mean` or `std`. You should create a GroupBy object by using the `DataArray.groupby` or `Dataset.groupby` methods. See Also -------- Dataset.groupby DataArray.groupby """ __slots__ = ( "_by_chunked", "_codes", "_dims", "_group_dim", # cached properties "_groups", "_inserted_dims", "_len", "_obj", # Save unstacked object for flox "_original_obj", "_restore_coord_dims", "_sizes", "_stacked_dim", "encoded", # stack nD vars "group1d", "groupers", ) _obj: T_Xarray groupers: tuple[ResolvedGrouper, ...] _restore_coord_dims: bool _original_obj: T_Xarray _group_indices: GroupIndices _codes: tuple[DataArray, ...] _group_dim: Hashable _by_chunked: bool _groups: dict[GroupKey, GroupIndex] | None _dims: tuple[Hashable, ...] | Frozen[Hashable, int] | None _sizes: Mapping[Hashable, int] | None _len: int # _ensure_1d: group1d: T_Group _stacked_dim: Hashable | None _inserted_dims: list[Hashable] encoded: EncodedGroups def __init__( self, obj: T_Xarray, groupers: tuple[ResolvedGrouper, ...], restore_coord_dims: bool = True, ) -> None: """Create a GroupBy object Parameters ---------- obj : Dataset or DataArray Object to group. grouper : Grouper Grouper object restore_coord_dims : bool, default: True If True, also restore the dimension order of multi-dimensional coordinates. """ self._original_obj = obj self._restore_coord_dims = restore_coord_dims self.groupers = groupers if len(groupers) == 1: (grouper,) = groupers self.encoded = grouper.encoded else: if any( isinstance(obj._indexes.get(grouper.name, None), PandasMultiIndex) for grouper in groupers ): raise NotImplementedError( "Grouping by multiple variables, one of which " "wraps a Pandas MultiIndex, is not supported yet." ) self.encoded = ComposedGrouper(groupers).factorize() # specification for the groupby operation # TODO: handle obj having variables that are not present on any of the groupers # simple broadcasting fails for ExtensionArrays. codes = self.encoded.codes self._by_chunked = is_chunked_array(codes._variable._data) if not self._by_chunked: (self.group1d, self._obj, self._stacked_dim, self._inserted_dims) = ( _ensure_1d(group=codes, obj=obj) ) (self._group_dim,) = self.group1d.dims else: self.group1d = None # This transpose preserves dim order behaviour self._obj = obj.transpose(..., *codes.dims) self._stacked_dim = None self._inserted_dims = [] self._group_dim = None # cached attributes self._groups = None self._dims = None self._sizes = None self._len = len(self.encoded.full_index) @property def sizes(self) -> Mapping[Hashable, int]: """Ordered mapping from dimension names to lengths. Immutable. See Also -------- DataArray.sizes Dataset.sizes """ if self._sizes is None: index = self.encoded.group_indices[0] self._sizes = self._obj.isel({self._group_dim: index}).sizes return self._sizes def shuffle_to_chunks(self, chunks: T_Chunks = None) -> T_Xarray: """ Sort or "shuffle" the underlying object. "Shuffle" means the object is sorted so that all group members occur sequentially, in the same chunk. Multiple groups may occur in the same chunk. This method is particularly useful for chunked arrays (e.g. dask, cubed). particularly when you need to map a function that requires all members of a group to be present in a single chunk. For chunked array types, the order of appearance is not guaranteed, but will depend on the input chunking. Parameters ---------- chunks : int, tuple of int, "auto" or mapping of hashable to int or tuple of int, optional How to adjust chunks along dimensions not present in the array being grouped by. Returns ------- DataArrayGroupBy or DatasetGroupBy Examples -------- >>> import dask.array >>> da = xr.DataArray( ... dims="x", ... data=dask.array.arange(10, chunks=3), ... coords={"x": [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]}, ... name="a", ... ) >>> shuffled = da.groupby("x").shuffle_to_chunks() >>> shuffled Size: 80B dask.array Coordinates: * x (x) int64 80B 0 1 1 1 2 2 2 3 3 3 >>> shuffled.groupby("x").quantile(q=0.5).compute() Size: 32B array([9., 3., 4., 5.]) Coordinates: * x (x) int64 32B 0 1 2 3 quantile float64 8B 0.5 See Also -------- dask.dataframe.DataFrame.shuffle dask.array.shuffle """ self._raise_if_by_is_chunked() return self._shuffle_obj(chunks) def _shuffle_obj(self, chunks: T_Chunks) -> T_Xarray: from xarray.core.dataarray import DataArray was_array = isinstance(self._obj, DataArray) as_dataset = self._obj._to_temp_dataset() if was_array else self._obj for grouper in self.groupers: if grouper.name not in as_dataset._variables: as_dataset.coords[grouper.name] = grouper.group shuffled = as_dataset._shuffle( dim=self._group_dim, indices=self.encoded.group_indices, chunks=chunks ) unstacked: Dataset = self._maybe_unstack(shuffled) if was_array: return self._obj._from_temp_dataset(unstacked) else: return unstacked # type: ignore[return-value] def map( self, func: Callable, args: tuple[Any, ...] = (), shortcut: bool | None = None, **kwargs: Any, ) -> T_Xarray: raise NotImplementedError() def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> T_Xarray: raise NotImplementedError() def _raise_if_by_is_chunked(self): if self._by_chunked: raise ValueError( "This method is not supported when lazily grouping by a chunked array. " "Either load the array in to memory prior to grouping using .load or .compute, " " or explore another way of applying your function, " "potentially using the `flox` package." ) def _raise_if_not_single_group(self): if len(self.groupers) != 1: raise NotImplementedError( "This method is not supported for grouping by multiple variables yet." ) @property def groups(self) -> dict[GroupKey, GroupIndex]: """ Mapping from group labels to indices. The indices can be used to index the underlying object. """ # provided to mimic pandas.groupby if self._groups is None: self._groups = dict( zip( self.encoded.unique_coord.data, tuple(g for g in self.encoded.group_indices if g), strict=True, ) ) return self._groups def __getitem__(self, key: GroupKey) -> T_Xarray: """ Get DataArray or Dataset corresponding to a particular group label. """ self._raise_if_by_is_chunked() return self._obj.isel({self._group_dim: self.groups[key]}) def __len__(self) -> int: return self._len def __iter__(self) -> Iterator[tuple[GroupKey, T_Xarray]]: return zip(self.encoded.unique_coord.data, self._iter_grouped(), strict=True) def __repr__(self) -> str: text = ( f"<{self.__class__.__name__}, " f"grouped over {len(self.groupers)} grouper(s)," f" {self._len} groups in total:" ) for grouper in self.groupers: coord = grouper.unique_coord labels = ", ".join(format_array_flat(coord, 30).split()) text += ( f"\n {grouper.name!r}: {type(grouper.grouper).__name__}({grouper.group.name!r}), " f"{coord.size}/{grouper.full_index.size} groups with labels {labels}" ) return text + ">" def _iter_grouped(self) -> Iterator[T_Xarray]: """Iterate over each element in this group""" self._raise_if_by_is_chunked() for indices in self.encoded.group_indices: if indices: yield self._obj.isel({self._group_dim: indices}) def _infer_concat_args(self, applied_example): if self._group_dim in applied_example.dims: coord = self.group1d positions = self.encoded.group_indices else: coord = self.encoded.unique_coord positions = None (dim,) = coord.dims return dim, positions def _binary_op(self, other, f, reflexive=False): from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset g = f if not reflexive else lambda x, y: f(y, x) self._raise_if_not_single_group() (grouper,) = self.groupers obj = self._original_obj name = grouper.name group = grouper.group codes = self.encoded.codes dims = group.dims if isinstance(group, _DummyGroup): group = coord = group.to_dataarray() else: coord = grouper.unique_coord if isinstance(coord, Variable): assert coord.ndim == 1 (coord_dim,) = coord.dims # TODO: explicitly create Index here coord = DataArray(coord, coords={coord_dim: coord.data}) if not isinstance(other, Dataset | DataArray): raise TypeError( "GroupBy objects only support binary ops " "when the other argument is a Dataset or " "DataArray" ) if name not in other.dims: raise ValueError( "incompatible dimensions for a grouped " f"binary operation: the group variable {name!r} " "is not a dimension on the other argument " f"with dimensions {other.dims!r}" ) # Broadcast out scalars for backwards compatibility # TODO: get rid of this when fixing GH2145 for var in other.coords: if other[var].ndim == 0: other[var] = ( other[var].drop_vars(var).expand_dims({name: other.sizes[name]}) ) # need to handle NaNs in group or elements that don't belong to any bins mask = codes == -1 if mask.any(): obj = obj.where(~mask, drop=True) group = group.where(~mask, drop=True) codes = codes.where(~mask, drop=True).astype(int) # if other is dask-backed, that's a hint that the # "expanded" dataset is too big to hold in memory. # this can be the case when `other` was read from disk # and contains our lazy indexing classes # We need to check for dask-backed Datasets # so utils.is_duck_dask_array does not work for this check if obj.chunks and not other.chunks: # TODO: What about datasets with some dask vars, and others not? # This handles dims other than `name`` chunks = {k: v for k, v in obj.chunksizes.items() if k in other.dims} # a chunk size of 1 seems reasonable since we expect individual elements of # other to be repeated multiple times across the reduced dimension(s) chunks[name] = 1 other = other.chunk(chunks) # codes are defined for coord, so we align `other` with `coord` # before indexing other, _ = align(other, coord, join="right", copy=False) expanded = other.isel({name: codes}) result = g(obj, expanded) if group.ndim > 1: # backcompat: # TODO: get rid of this when fixing GH2145 for var in set(obj.coords) - set(obj.xindexes): if set(obj[var].dims) < set(group.dims): result[var] = obj[var].reset_coords(drop=True).broadcast_like(group) if isinstance(result, Dataset) and isinstance(obj, Dataset): for var in set(result): for d in dims: if d not in obj[var].dims: result[var] = result[var].transpose(d, ...) return result def _restore_dim_order(self, stacked): raise NotImplementedError def _maybe_reindex(self, combined): """Reindexing is needed in two cases: 1. Our index contained empty groups (e.g., from a resampling or binning). If we reduced on that dimension, we want to restore the full index. 2. We use a MultiIndex for multi-variable GroupBy. The MultiIndex stores each level's labels in sorted order which are then assigned on unstacking. So we need to restore the correct order here. """ has_missing_groups = ( self.encoded.unique_coord.size != self.encoded.full_index.size ) indexers = {} for grouper in self.groupers: index = combined._indexes.get(grouper.name, None) if (has_missing_groups and index is not None) or ( len(self.groupers) > 1 and not isinstance(grouper.full_index, pd.RangeIndex) and not index.index.equals(grouper.full_index) ): indexers[grouper.name] = grouper.full_index if indexers: combined = combined.reindex(**indexers) return combined def _maybe_unstack(self, obj): """This gets called if we are applying on an array with a multidimensional group.""" from xarray.groupers import UniqueGrouper stacked_dim = self._stacked_dim if stacked_dim is not None and stacked_dim in obj.dims: inserted_dims = self._inserted_dims obj = obj.unstack(stacked_dim) for dim in inserted_dims: if dim in obj.coords: del obj.coords[dim] obj._indexes = filter_indexes_from_coords(obj._indexes, set(obj.coords)) elif len(self.groupers) > 1: # TODO: we could clean this up by setting the appropriate `stacked_dim` # and `inserted_dims` # if multiple groupers all share the same single dimension, then # we don't stack/unstack. Do that manually now. dims_to_unstack = self.encoded.unique_coord.dims if all(dim in obj.dims for dim in dims_to_unstack): obj = obj.unstack(*dims_to_unstack) to_drop = [ grouper.name for grouper in self.groupers if isinstance(grouper.group, _DummyGroup) and isinstance(grouper.grouper, UniqueGrouper) ] obj = obj.drop_vars(to_drop) return obj def _flox_reduce( self, dim: Dims, keep_attrs: bool | None = None, **kwargs: Any, ) -> T_Xarray: """Adaptor function that translates our groupby API to that of flox.""" import flox from flox.xarray import xarray_reduce from xarray.core.dataset import Dataset obj = self._original_obj variables = ( {k: v.variable for k, v in obj.data_vars.items()} if isinstance(obj, Dataset) # type: ignore[redundant-expr] # seems to be a mypy bug else obj._coords ) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) if Version(flox.__version__) < Version("0.9") and not self._by_chunked: # preserve current strategy (approximately) for dask groupby # on older flox versions to prevent surprises. # flox >=0.9 will choose this on its own. kwargs.setdefault("method", "cohorts") midx_grouping_vars: tuple[Hashable, ...] = () for grouper in self.groupers: name = grouper.name maybe_midx = obj._indexes.get(name, None) if isinstance(maybe_midx, PandasMultiIndex): midx_grouping_vars += tuple(maybe_midx.index.names) + (name,) # For datasets, running a numeric-only reduction on non-numeric # variable will just drop it. non_numeric: dict[Hashable, Variable] if kwargs.pop("numeric_only", None): non_numeric = { name: var for name, var in variables.items() if ( not _is_numeric_aggregatable_dtype(var) # this avoids dropping any levels of a MultiIndex, which raises # a warning and name not in midx_grouping_vars and name not in obj.dims ) } else: non_numeric = {} if "min_count" in kwargs: if kwargs["func"] not in ["sum", "prod"]: raise TypeError("Received an unexpected keyword argument 'min_count'") elif kwargs["min_count"] is None: # set explicitly to avoid unnecessarily accumulating count kwargs["min_count"] = 0 parsed_dim: tuple[Hashable, ...] if isinstance(dim, str): parsed_dim = (dim,) elif dim is None: parsed_dim_list = list() # preserve order for dim_ in itertools.chain( *(grouper.codes.dims for grouper in self.groupers) ): if dim_ not in parsed_dim_list: parsed_dim_list.append(dim_) parsed_dim = tuple(parsed_dim_list) elif dim is ...: parsed_dim = tuple(obj.dims) else: parsed_dim = tuple(dim) # Do this so we raise the same error message whether flox is present or not. # Better to control it here than in flox. for grouper in self.groupers: if any( d not in grouper.codes.dims and d not in obj.dims for d in parsed_dim ): raise ValueError(f"cannot reduce over dimensions {dim}.") has_missing_groups = ( self.encoded.unique_coord.size != self.encoded.full_index.size ) if self._by_chunked or has_missing_groups or kwargs.get("min_count", 0) > 0: # Xarray *always* returns np.nan when there are no observations in a group, # We can fake that here by forcing min_count=1 when it is not set. # This handles boolean reductions, and count # See GH8090, GH9398 # Note that `has_missing_groups=False` when `self._by_chunked is True`. # We *choose* to always do the masking, so that behaviour is predictable # in some way. The real solution is to expose fill_value as a kwarg, # and set appropriate defaults :/. kwargs.setdefault("fill_value", np.nan) kwargs.setdefault("min_count", 1) # pass RangeIndex as a hint to flox that `by` is already factorized expected_groups = tuple( pd.RangeIndex(len(grouper)) for grouper in self.groupers ) codes = tuple(g.codes for g in self.groupers) result = xarray_reduce( obj.drop_vars(non_numeric.keys()), *codes, dim=parsed_dim, expected_groups=expected_groups, isbin=False, keep_attrs=keep_attrs, **kwargs, ) # we did end up reducing over dimension(s) that are # in the grouped variable group_dims = set(grouper.group.dims) new_coords = [] to_drop = [] if group_dims & set(parsed_dim): for grouper in self.groupers: output_index = grouper.full_index if isinstance(output_index, pd.RangeIndex): # flox always assigns an index so we must drop it here if we don't need it. to_drop.append(grouper.name) continue # TODO: We can't simply use `self.encoded.coords` here because it corresponds to `unique_coord`, # NOT `full_index`. We would need to construct a new Coordinates object, that corresponds to `full_index`. new_coords.append( # Using IndexVariable here ensures we reconstruct PandasMultiIndex with # all associated levels properly. coordinates_from_variable( IndexVariable( dims=grouper.name, data=output_index, attrs=grouper.codes.attrs, ) ) ) result = result.assign_coords( Coordinates._construct_direct(*merge_coords(new_coords)) ).drop_vars(to_drop) # broadcast any non-dim coord variables that don't # share all dimensions with the grouper result_variables = ( result._variables if isinstance(result, Dataset) else result._coords ) to_broadcast: dict[Hashable, Variable] = {} for name, var in variables.items(): dims_set = set(var.dims) if ( dims_set <= set(parsed_dim) and (dims_set & set(result.dims)) and name not in result_variables ): to_broadcast[name] = var for name, var in to_broadcast.items(): if new_dims := tuple(d for d in parsed_dim if d not in var.dims): new_sizes = tuple( result.sizes.get(dim, obj.sizes.get(dim)) for dim in new_dims ) result[name] = var.set_dims( new_dims + var.dims, new_sizes + var.shape ).transpose(..., *result.dims) if not isinstance(result, Dataset): # only restore dimension order for arrays result = self._restore_dim_order(result) return result def fillna(self, value: Any) -> T_Xarray: """Fill missing values in this object by group. This operation follows the normal broadcasting and alignment rules that xarray uses for binary arithmetic, except the result is aligned to this object (``join='left'``) instead of aligned to the intersection of index coordinates (``join='inner'``). Parameters ---------- value Used to fill all matching missing values by group. Needs to be of a valid type for the wrapped object's fillna method. Returns ------- same type as the grouped object See Also -------- Dataset.fillna DataArray.fillna """ return ops.fillna(self, value) def quantile( self, q: ArrayLike, dim: Dims = None, *, method: QuantileMethods = "linear", keep_attrs: bool | None = None, skipna: bool | None = None, interpolation: QuantileMethods | None = None, ) -> T_Xarray: """Compute the qth quantile over each array in the groups and concatenate them together into a new array. Parameters ---------- q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or Iterable of Hashable, optional Dimension(s) over which to apply quantile. Defaults to the grouped dimension. method : str, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points. The options sorted by their R type as summarized in the H&F paper [1]_ are: 1. "inverted_cdf" 2. "averaged_inverted_cdf" 3. "closest_observation" 4. "interpolated_inverted_cdf" 5. "hazen" 6. "weibull" 7. "linear" (default) 8. "median_unbiased" 9. "normal_unbiased" The first three methods are discontiuous. The following discontinuous variations of the default "linear" (7.) option are also available: * "lower" * "higher" * "midpoint" * "nearest" See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument was previously called "interpolation", renamed in accordance with numpy version 1.22.0. keep_attrs : bool or None, default: None If True, the dataarray's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- quantiles : Variable If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile. In either case a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile DataArray.quantile Examples -------- >>> da = xr.DataArray( ... [[1.3, 8.4, 0.7, 6.9], [0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]], ... coords={"x": [0, 0, 1], "y": [1, 1, 2, 2]}, ... dims=("x", "y"), ... ) >>> ds = xr.Dataset({"a": da}) >>> da.groupby("x").quantile(0) Size: 64B array([[0.7, 4.2, 0.7, 1.5], [6.5, 7.3, 2.6, 1.9]]) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 32B 1 1 2 2 quantile float64 8B 0.0 >>> ds.groupby("y").quantile(0, dim=...) Size: 40B Dimensions: (y: 2) Coordinates: * y (y) int64 16B 1 2 quantile float64 8B 0.0 Data variables: a (y) float64 16B 0.7 0.7 >>> da.groupby("x").quantile([0, 0.5, 1]) Size: 192B array([[[0.7 , 1. , 1.3 ], [4.2 , 6.3 , 8.4 ], [0.7 , 5.05, 9.4 ], [1.5 , 4.2 , 6.9 ]], [[6.5 , 6.5 , 6.5 ], [7.3 , 7.3 , 7.3 ], [2.6 , 2.6 , 2.6 ], [1.9 , 1.9 , 1.9 ]]]) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 32B 1 1 2 2 * quantile (quantile) float64 24B 0.0 0.5 1.0 >>> ds.groupby("y").quantile([0, 0.5, 1], dim=...) Size: 88B Dimensions: (y: 2, quantile: 3) Coordinates: * y (y) int64 16B 1 2 * quantile (quantile) float64 24B 0.0 0.5 1.0 Data variables: a (y, quantile) float64 48B 0.7 5.35 8.4 0.7 2.25 9.4 References ---------- .. [1] R. J. Hyndman and Y. Fan, "Sample quantiles in statistical packages," The American Statistician, 50(4), pp. 361-365, 1996 """ # Dataset.quantile does this, do it for flox to ensure same output. q = np.asarray(q, dtype=np.float64) if ( method == "linear" and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) and module_available("flox", minversion="0.9.4") ): result = self._flox_reduce( func="quantile", q=q, dim=dim, keep_attrs=keep_attrs, skipna=skipna ) return result else: if dim is None: dim = (self._group_dim,) return self.map( self._obj.__class__.quantile, shortcut=False, q=q, dim=dim or self._group_dim, method=method, keep_attrs=keep_attrs, skipna=skipna, interpolation=interpolation, ) def where(self, cond, other=dtypes.NA) -> T_Xarray: """Return elements from `self` or `other` depending on `cond`. Parameters ---------- cond : DataArray or Dataset Locations at which to preserve this objects values. dtypes have to be `bool` other : scalar, DataArray or Dataset, optional Value to use for locations in this object where ``cond`` is False. By default, inserts missing values. Returns ------- same type as the grouped object See Also -------- Dataset.where """ return ops.where_method(self, cond, other) def _first_or_last( self, op: Literal["first" | "last"], skipna: bool | None, keep_attrs: bool | None, ): if all( isinstance(maybe_slice, slice) and (maybe_slice.stop == maybe_slice.start + 1) for maybe_slice in self.encoded.group_indices ): # NB. this is currently only used for reductions along an existing # dimension return self._obj if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) if ( module_available("flox", minversion="0.10.0") and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): import flox.xrdtypes result = self._flox_reduce( dim=None, func=op, skipna=skipna, keep_attrs=keep_attrs, fill_value=flox.xrdtypes.NA, ) else: result = self.reduce( getattr(duck_array_ops, op), dim=[self._group_dim], skipna=skipna, keep_attrs=keep_attrs, ) return result def first( self, skipna: bool | None = None, keep_attrs: bool | None = None ) -> T_Xarray: """ Return the first element of each group along the group dimension Parameters ---------- skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. """ return self._first_or_last("first", skipna, keep_attrs) def last( self, skipna: bool | None = None, keep_attrs: bool | None = None ) -> T_Xarray: """ Return the last element of each group along the group dimension Parameters ---------- skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. """ return self._first_or_last("last", skipna, keep_attrs) def assign_coords(self, coords=None, **coords_kwargs): """Assign coordinates by group. See Also -------- Dataset.assign_coords Dataset.swap_dims """ coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, "assign_coords") return self.map(lambda ds: ds.assign_coords(**coords_kwargs)) def _maybe_reorder(xarray_obj, dim, positions, N: int | None): order = _inverse_permutation_indices(positions, N) if order is None or len(order) != xarray_obj.sizes[dim]: return xarray_obj else: return xarray_obj[{dim: order}] class DataArrayGroupByBase(GroupBy["DataArray"], DataArrayGroupbyArithmetic): """GroupBy object specialized to grouping DataArray objects""" __slots__ = () _dims: tuple[Hashable, ...] | None @property def dims(self) -> tuple[Hashable, ...]: self._raise_if_by_is_chunked() if self._dims is None: index = self.encoded.group_indices[0] self._dims = self._obj.isel({self._group_dim: index}).dims return self._dims def _iter_grouped_shortcut(self): """Fast version of `_iter_grouped` that yields Variables without metadata """ self._raise_if_by_is_chunked() var = self._obj.variable for _idx, indices in enumerate(self.encoded.group_indices): if indices: yield var[{self._group_dim: indices}] def _concat_shortcut(self, applied, dim, positions=None): # nb. don't worry too much about maintaining this method -- it does # speed things up, but it's not very interpretable and there are much # faster alternatives (e.g., doing the grouped aggregation in a # compiled language) # TODO: benbovy - explicit indexes: this fast implementation doesn't # create an explicit index for the stacked dim coordinate stacked = Variable.concat(applied, dim, shortcut=True) reordered = _maybe_reorder(stacked, dim, positions, N=self.group1d.size) return self._obj._replace_maybe_drop_dims(reordered) def _restore_dim_order(self, stacked: DataArray) -> DataArray: def lookup_order(dimension): for grouper in self.groupers: if dimension == grouper.name and grouper.group.ndim == 1: (dimension,) = grouper.group.dims if dimension in self._obj.dims: axis = self._obj.get_axis_num(dimension) else: axis = 1e6 # some arbitrarily high value return axis new_order = sorted(stacked.dims, key=lookup_order) stacked = stacked.transpose( *new_order, transpose_coords=self._restore_coord_dims ) return stacked def map( self, func: Callable[..., DataArray], args: tuple[Any, ...] = (), shortcut: bool | None = None, **kwargs: Any, ) -> DataArray: """Apply a function to each array in the group and concatenate them together into a new array. `func` is called like `func(ar, *args, **kwargs)` for each array `ar` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the array. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped array after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : callable Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: (1) The action of `func` does not depend on any of the array metadata (attributes or coordinates) but only on the data and dimensions. (2) The action of `func` creates arrays with homogeneous metadata, that is, with the same dimensions and attributes. If these conditions are satisfied `shortcut` provides significant speedup. This should be the case for many common groupby operations (e.g., applying numpy ufuncs). *args : tuple, optional Positional arguments passed to `func`. **kwargs Used to call `func(ar, **kwargs)` for each array `ar`. Returns ------- applied : DataArray The result of splitting, applying and combining this array. """ grouped = self._iter_grouped_shortcut() if shortcut else self._iter_grouped() applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped) return self._combine(applied, shortcut=shortcut) def apply(self, func, shortcut=False, args=(), **kwargs): """ Backward compatible implementation of ``map`` See Also -------- DataArrayGroupBy.map """ warnings.warn( "GroupBy.apply may be deprecated in the future. Using GroupBy.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func, shortcut=shortcut, args=args, **kwargs) def _combine(self, applied, shortcut=False): """Recombine the applied objects like the original.""" applied_example, applied = peek_at(applied) dim, positions = self._infer_concat_args(applied_example) if shortcut: combined = self._concat_shortcut(applied, dim, positions) else: combined = concat( applied, dim, data_vars="all", coords="different", compat="equals", join="outer", ) combined = _maybe_reorder(combined, dim, positions, N=self.group1d.size) if isinstance(combined, type(self._obj)): # only restore dimension order for arrays combined = self._restore_dim_order(combined) # assign coord and index when the applied function does not return that coord if dim not in applied_example.dims: combined = combined.assign_coords(self.encoded.coords) combined = self._maybe_unstack(combined) combined = self._maybe_reindex(combined) return combined def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> DataArray: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. If None, apply over the groupby dimension, if "..." apply over all dimensions. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dimension' and 'axis' arguments can be supplied. If neither are supplied, then `func` is calculated over all dimension for each group item. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if self._by_chunked: raise ValueError( "This method is not supported when lazily grouping by a chunked array. " "Try installing the `flox` package if you are using one of the standard " "reductions (e.g. `mean`). " ) if dim is None: dim = [self._group_dim] if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) def reduce_array(ar: DataArray) -> DataArray: return ar.reduce( func=func, dim=dim, axis=axis, keep_attrs=keep_attrs, keepdims=keepdims, **kwargs, ) check_reduce_dims(dim, self.dims) return self.map(reduce_array, shortcut=shortcut) class DataArrayGroupBy( DataArrayGroupByBase, DataArrayGroupByAggregations, ImplementsArrayReduce, ): __slots__ = () class DatasetGroupByBase(GroupBy["Dataset"], DatasetGroupbyArithmetic): __slots__ = () _dims: Frozen[Hashable, int] | None @property def dims(self) -> Frozen[Hashable, int]: self._raise_if_by_is_chunked() if self._dims is None: index = self.encoded.group_indices[0] self._dims = self._obj.isel({self._group_dim: index}).dims return FrozenMappingWarningOnValuesAccess(self._dims) def map( self, func: Callable[..., Dataset], args: tuple[Any, ...] = (), shortcut: bool | None = None, **kwargs: Any, ) -> Dataset: """Apply a function to each Dataset in the group and concatenate them together into a new Dataset. `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the datasets. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped item after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : callable Callable to apply to each sub-dataset. args : tuple, optional Positional arguments to pass to `func`. **kwargs Used to call `func(ds, **kwargs)` for each sub-dataset `ar`. Returns ------- applied : Dataset The result of splitting, applying and combining this dataset. """ # ignore shortcut if set (for now) applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped()) return self._combine(applied) def apply(self, func, args=(), shortcut=None, **kwargs): """ Backward compatible implementation of ``map`` See Also -------- DatasetGroupBy.map """ warnings.warn( "GroupBy.apply may be deprecated in the future. Using GroupBy.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func, shortcut=shortcut, args=args, **kwargs) def _combine(self, applied): """Recombine the applied objects like the original.""" applied_example, applied = peek_at(applied) dim, positions = self._infer_concat_args(applied_example) combined = concat( applied, dim, data_vars="all", coords="different", compat="equals", join="outer", ) combined = _maybe_reorder(combined, dim, positions, N=self.group1d.size) # assign coord when the applied function does not return that coord if dim not in applied_example.dims: combined = combined.assign_coords(self.encoded.coords) combined = self._maybe_unstack(combined) combined = self._maybe_reindex(combined) return combined def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> Dataset: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : ..., str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default apply over the groupby dimension, with "..." apply over all dimensions. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dimension' and 'axis' arguments can be supplied. If neither are supplied, then `func` is calculated over all dimension for each group item. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Dataset Array with summarized data and the indicated dimension(s) removed. """ if self._by_chunked: raise ValueError( "This method is not supported when lazily grouping by a chunked array. " "Try installing the `flox` package if you are using one of the standard " "reductions (e.g. `mean`). " ) if dim is None: dim = [self._group_dim] if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) def reduce_dataset(ds: Dataset) -> Dataset: return ds.reduce( func=func, dim=dim, axis=axis, keep_attrs=keep_attrs, keepdims=keepdims, **kwargs, ) check_reduce_dims(dim, self.dims) return self.map(reduce_dataset) def assign(self, **kwargs: Any) -> Dataset: """Assign data variables by group. See Also -------- Dataset.assign """ return self.map(lambda ds: ds.assign(**kwargs)) class DatasetGroupBy( DatasetGroupByBase, DatasetGroupByAggregations, ImplementsDatasetReduce, ): __slots__ = () xarray-2025.12.0/xarray/core/indexes.py000066400000000000000000002337211511464676000176220ustar00rootroot00000000000000from __future__ import annotations import collections.abc import copy import inspect from collections import defaultdict from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping, Sequence from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, overload import numpy as np import pandas as pd from xarray.core import formatting, nputils, utils from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.extension_array import PandasExtensionArray from xarray.core.indexing import ( CoordinateTransformIndexingAdapter, IndexSelResult, PandasIndexingAdapter, PandasMultiIndexingAdapter, ) from xarray.core.utils import ( Frozen, emit_user_level_warning, get_valid_numpy_dtype, is_allowed_extension_array_dtype, is_dict_like, is_scalar, ) if TYPE_CHECKING: from xarray.core.types import ErrorOptions, JoinOptions, Self from xarray.core.variable import Variable IndexVars = dict[Any, "Variable"] class Index: """ Base class inherited by all xarray-compatible indexes. Do not use this class directly for creating index objects. Xarray indexes are created exclusively from subclasses of ``Index``, mostly via Xarray's public API like ``Dataset.set_xindex``. Every subclass must at least implement :py:meth:`Index.from_variables`. The (re)implementation of the other methods of this base class is optional but mostly required in order to support operations relying on indexes such as label-based selection or alignment. The ``Index`` API closely follows the :py:meth:`Dataset` and :py:meth:`DataArray` API, e.g., for an index to support ``.sel()`` it needs to implement :py:meth:`Index.sel`, to support ``.stack()`` and ``.unstack()`` it needs to implement :py:meth:`Index.stack` and :py:meth:`Index.unstack`, etc. When a method is not (re)implemented, depending on the case the corresponding operation on a :py:meth:`Dataset` or :py:meth:`DataArray` either will raise a ``NotImplementedError`` or will simply drop/pass/copy the index from/to the result. Do not use this class directly for creating index objects. """ @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> Self: """Create a new index object from one or more coordinate variables. This factory method must be implemented in all subclasses of Index. The coordinate variables may be passed here in an arbitrary number and order and each with arbitrary dimensions. It is the responsibility of the index to check the consistency and validity of these coordinates. Parameters ---------- variables : dict-like Mapping of :py:class:`Variable` objects holding the coordinate labels to index. Returns ------- index : Index A new Index object. """ raise NotImplementedError() @classmethod def concat( cls, indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, ) -> Self: """Create a new index by concatenating one or more indexes of the same type. Implementation is optional but required in order to support ``concat``. Otherwise it will raise an error if the index needs to be updated during the operation. Parameters ---------- indexes : sequence of Index objects Indexes objects to concatenate together. All objects must be of the same type. dim : Hashable Name of the dimension to concatenate along. positions : None or list of integer arrays, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. Returns ------- index : Index A new Index object. """ raise NotImplementedError() @classmethod def stack(cls, variables: Mapping[Any, Variable], dim: Hashable) -> Self: """Create a new index by stacking coordinate variables into a single new dimension. Implementation is optional but required in order to support ``stack``. Otherwise it will raise an error when trying to pass the Index subclass as argument to :py:meth:`Dataset.stack`. Parameters ---------- variables : dict-like Mapping of :py:class:`Variable` objects to stack together. dim : Hashable Name of the new, stacked dimension. Returns ------- index A new Index object. """ raise NotImplementedError( f"{cls!r} cannot be used for creating an index of stacked coordinates" ) def unstack(self) -> tuple[dict[Hashable, Index], pd.MultiIndex]: """Unstack a (multi-)index into multiple (single) indexes. Implementation is optional but required in order to support unstacking the coordinates from which this index has been built. Returns ------- indexes : tuple A 2-length tuple where the 1st item is a dictionary of unstacked Index objects and the 2nd item is a :py:class:`pandas.MultiIndex` object used to unstack unindexed coordinate variables or data variables. """ raise NotImplementedError() def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> IndexVars: """Maybe create new coordinate variables from this index. This method is useful if the index data can be reused as coordinate variable data. It is often the case when the underlying index structure has an array-like interface, like :py:class:`pandas.Index` objects. The variables given as argument (if any) are either returned as-is (default behavior) or can be used to copy their metadata (attributes and encoding) into the new returned coordinate variables. Note: the input variables may or may not have been filtered for this index. Parameters ---------- variables : dict-like, optional Mapping of :py:class:`Variable` objects. Returns ------- index_variables : dict-like Dictionary of :py:class:`Variable` or :py:class:`IndexVariable` objects. """ if variables is not None: # pass through return dict(**variables) else: return {} def should_add_coord_to_array( self, name: Hashable, var: Variable, dims: set[Hashable], ) -> bool: """Define whether or not an index coordinate variable should be added to a new DataArray. This method is called repeatedly for each Variable associated with this index when creating a new DataArray (via its constructor or from a Dataset) or updating an existing one. The variables associated with this index are the ones passed to :py:meth:`Index.from_variables` and/or returned by :py:meth:`Index.create_variables`. By default returns ``True`` if the dimensions of the coordinate variable are a subset of the array dimensions and ``False`` otherwise (DataArray model). This default behavior may be overridden in Index subclasses to bypass strict conformance with the DataArray model. This is useful for example to include the (n+1)-dimensional cell boundary coordinate associated with an interval index. Returning ``False`` will either: - raise a :py:class:`CoordinateValidationError` when passing the coordinate directly to a new or an existing DataArray, e.g., via ``DataArray.__init__()`` or ``DataArray.assign_coords()`` - drop the coordinate (and therefore drop the index) when a new DataArray is constructed by indexing a Dataset Parameters ---------- name : Hashable Name of a coordinate variable associated to this index. var : Variable Coordinate variable object. dims: tuple Dimensions of the new DataArray object being created. """ return all(d in dims for d in var.dims) def to_pandas_index(self) -> pd.Index: """Cast this xarray index to a pandas.Index object or raise a ``TypeError`` if this is not supported. This method is used by all xarray operations that still rely on pandas.Index objects. By default it raises a ``TypeError``, unless it is re-implemented in subclasses of Index. """ raise TypeError(f"{self!r} cannot be cast to a pandas.Index object") def isel( self, indexers: Mapping[Any, int | slice | np.ndarray | Variable] ) -> Index | None: """Maybe returns a new index from the current index itself indexed by positional indexers. This method should be re-implemented in subclasses of Index if the wrapped index structure supports indexing operations. For example, indexing a ``pandas.Index`` is pretty straightforward as it behaves very much like an array. By contrast, it may be harder doing so for a structure like a kd-tree that differs much from a simple array. If not re-implemented in subclasses of Index, this method returns ``None``, i.e., calling :py:meth:`Dataset.isel` will either drop the index in the resulting dataset or pass it unchanged if its corresponding coordinate(s) are not indexed. Parameters ---------- indexers : dict A dictionary of positional indexers as passed from :py:meth:`Dataset.isel` and where the entries have been filtered for the current index. Returns ------- maybe_index : Index A new Index object or ``None``. """ return None def sel(self, labels: dict[Any, Any]) -> IndexSelResult: """Query the index with arbitrary coordinate label indexers. Implementation is optional but required in order to support label-based selection. Otherwise it will raise an error when trying to call :py:meth:`Dataset.sel` with labels for this index coordinates. Coordinate label indexers can be of many kinds, e.g., scalar, list, tuple, array-like, slice, :py:class:`Variable`, :py:class:`DataArray`, etc. It is the responsibility of the index to handle those indexers properly. Parameters ---------- labels : dict A dictionary of coordinate label indexers passed from :py:meth:`Dataset.sel` and where the entries have been filtered for the current index. Returns ------- sel_results : :py:class:`IndexSelResult` An index query result object that contains dimension positional indexers. It may also contain new indexes, coordinate variables, etc. """ raise NotImplementedError(f"{self!r} doesn't support label-based selection") def join(self, other: Self, how: JoinOptions = "inner") -> Self: """Return a new index from the combination of this index with another index of the same type. Implementation is optional but required in order to support alignment. Parameters ---------- other : Index The other Index object to combine with this index. join : str, optional Method for joining the two indexes (see :py:func:`~xarray.align`). Returns ------- joined : Index A new Index object. """ raise NotImplementedError( f"{self!r} doesn't support alignment with inner/outer join method" ) def reindex_like(self, other: Self) -> dict[Hashable, Any]: """Query the index with another index of the same type. Implementation is optional but required in order to support alignment. Parameters ---------- other : Index The other Index object used to query this index. Returns ------- dim_positional_indexers : dict A dictionary where keys are dimension names and values are positional indexers. """ raise NotImplementedError(f"{self!r} doesn't support re-indexing labels") @overload def equals(self, other: Index) -> bool: ... @overload def equals( self, other: Index, *, exclude: frozenset[Hashable] | None = None ) -> bool: ... def equals(self, other: Index, **kwargs) -> bool: """Compare this index with another index of the same type. Implementation is optional but required in order to support alignment. Parameters ---------- other : Index The other Index object to compare with this object. exclude : frozenset of hashable, optional Dimensions excluded from checking. It is None by default, (i.e., when this method is not called in the context of alignment). For a n-dimensional index this option allows an Index to optionally ignore any dimension in ``exclude`` when comparing ``self`` with ``other``. For a 1-dimensional index this kwarg can be safely ignored, as this method is not called when all of the index's dimensions are also excluded from alignment (note: the index's dimensions correspond to the union of the dimensions of all coordinate variables associated with this index). Returns ------- is_equal : bool ``True`` if the indexes are equal, ``False`` otherwise. """ raise NotImplementedError() def roll(self, shifts: Mapping[Any, int]) -> Self | None: """Roll this index by an offset along one or more dimensions. This method can be re-implemented in subclasses of Index, e.g., when the index can be itself indexed. If not re-implemented, this method returns ``None``, i.e., calling :py:meth:`Dataset.roll` will either drop the index in the resulting dataset or pass it unchanged if its corresponding coordinate(s) are not rolled. Parameters ---------- shifts : mapping of hashable to int, optional A dict with keys matching dimensions and values given by integers to rotate each of the given dimensions, as passed :py:meth:`Dataset.roll`. Returns ------- rolled : Index A new index with rolled data. """ return None def rename( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable], ) -> Self: """Maybe update the index with new coordinate and dimension names. This method should be re-implemented in subclasses of Index if it has attributes that depend on coordinate or dimension names. By default (if not re-implemented), it returns the index itself. Warning: the input names are not filtered for this method, they may correspond to any variable or dimension of a Dataset or a DataArray. Parameters ---------- name_dict : dict-like Mapping of current variable or coordinate names to the desired names, as passed from :py:meth:`Dataset.rename_vars`. dims_dict : dict-like Mapping of current dimension names to the desired names, as passed from :py:meth:`Dataset.rename_dims`. Returns ------- renamed : Index Index with renamed attributes. """ return self def copy(self, deep: bool = True) -> Self: """Return a (deep) copy of this index. Implementation in subclasses of Index is optional. The base class implements the default (deep) copy semantics. Parameters ---------- deep : bool, optional If true (default), a copy of the internal structures (e.g., wrapped index) is returned with the new object. Returns ------- index : Index A new Index object. """ return self._copy(deep=deep) def __copy__(self) -> Self: return self.copy(deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Index: return self._copy(deep=True, memo=memo) def _copy(self, deep: bool = True, memo: dict[int, Any] | None = None) -> Self: cls = self.__class__ copied = cls.__new__(cls) if deep: for k, v in self.__dict__.items(): setattr(copied, k, copy.deepcopy(v, memo)) else: copied.__dict__.update(self.__dict__) return copied def __getitem__(self, indexer: Any) -> Self: raise NotImplementedError() def _repr_inline_(self, max_width: int) -> str: return self.__class__.__name__ def _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index: from xarray.coding.cftimeindex import CFTimeIndex if len(index) > 0 and index.dtype == "O" and not isinstance(index, CFTimeIndex): try: return CFTimeIndex(index) except (ImportError, TypeError): return index else: return index def safe_cast_to_index(array: Any) -> pd.Index: """Given an array, safely cast it to a pandas.Index. If it is already a pandas.Index, return it unchanged. Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64, this function will not attempt to do automatic type conversion but will always return an index with dtype=object. """ from xarray.core.dataarray import DataArray from xarray.core.variable import Variable from xarray.namedarray.pycompat import to_numpy if isinstance(array, PandasExtensionArray): array = pd.Index(array.array) if isinstance(array, pd.Index): index = array elif isinstance(array, DataArray | Variable): # returns the original multi-index for pandas.MultiIndex level coordinates index = array._to_index() elif isinstance(array, Index): index = array.to_pandas_index() elif isinstance(array, PandasIndexingAdapter): index = array.array else: kwargs: dict[str, Any] = {} if hasattr(array, "dtype"): if array.dtype.kind == "O": kwargs["dtype"] = "object" elif array.dtype == "float16": emit_user_level_warning( ( "`pandas.Index` does not support the `float16` dtype." " Casting to `float64` for you, but in the future please" " manually cast to either `float32` and `float64`." ), category=DeprecationWarning, ) kwargs["dtype"] = "float64" index = pd.Index(to_numpy(array), **kwargs) return _maybe_cast_to_cftimeindex(index) def _sanitize_slice_element(x): from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if not isinstance(x, tuple) and len(np.shape(x)) != 0: raise ValueError( f"cannot use non-scalar arrays in a slice for xarray indexing: {x}" ) if isinstance(x, Variable | DataArray): x = x.values if isinstance(x, np.ndarray): x = x[()] return x def _query_slice(index, label, coord_name="", method=None, tolerance=None): if method is not None or tolerance is not None: raise NotImplementedError( "cannot use ``method`` argument if any indexers are slice objects" ) indexer = index.slice_indexer( _sanitize_slice_element(label.start), _sanitize_slice_element(label.stop), _sanitize_slice_element(label.step), ) if not isinstance(indexer, slice): # unlike pandas, in xarray we never want to silently convert a # slice indexer into an array indexer raise KeyError( "cannot represent labeled-based slice indexer for coordinate " f"{coord_name!r} with a slice over integer positions; the index is " "unsorted or non-unique" ) return indexer def _asarray_tuplesafe(values): """ Convert values into a numpy array of at most 1-dimension, while preserving tuples. Adapted from pandas.core.common._asarray_tuplesafe """ if isinstance(values, tuple): result = utils.to_0d_object_array(values) else: result = np.asarray(values) if result.ndim == 2: result = np.empty(len(values), dtype=object) result[:] = values return result def _is_nested_tuple(possible_tuple): return isinstance(possible_tuple, tuple) and any( isinstance(value, tuple | list | slice) for value in possible_tuple ) def normalize_label(value, dtype=None) -> np.ndarray: if getattr(value, "ndim", 1) <= 1: value = _asarray_tuplesafe(value) if dtype is not None and dtype.kind == "f" and value.dtype.kind != "b": # pd.Index built from coordinate with float precision != 64 # see https://github.com/pydata/xarray/pull/3153 for details # bypass coercing dtype for boolean indexers (ignore index) # see https://github.com/pydata/xarray/issues/5727 value = np.asarray(value, dtype=dtype) return value def as_scalar(value: np.ndarray): # see https://github.com/pydata/xarray/pull/4292 for details return value[()] if value.dtype.kind in "mM" else value.item() def get_indexer_nd(index: pd.Index, labels, method=None, tolerance=None) -> np.ndarray: """Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional labels """ flat_labels = np.ravel(labels) if flat_labels.dtype == "float16": flat_labels = flat_labels.astype("float64") flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance) indexer = flat_indexer.reshape(labels.shape) return indexer T_PandasIndex = TypeVar("T_PandasIndex", bound="PandasIndex") class PandasIndex(Index): """Wrap a pandas.Index as an xarray compatible index.""" index: pd.Index dim: Hashable coord_dtype: Any __slots__ = ("coord_dtype", "dim", "index") def __init__( self, array: Any, dim: Hashable, coord_dtype: Any = None, *, fastpath: bool = False, ): if fastpath: index = array else: index = safe_cast_to_index(array) if index.name is None: # make a shallow copy: cheap and because the index name may be updated # here or in other constructors (cannot use pd.Index.rename as this # constructor is also called from PandasMultiIndex) index = index.copy() index.name = dim self.index = index self.dim = dim if coord_dtype is None: if is_allowed_extension_array_dtype(index.dtype): cast(pd.api.extensions.ExtensionDtype, index.dtype) coord_dtype = index.dtype else: coord_dtype = get_valid_numpy_dtype(index) self.coord_dtype = coord_dtype def _replace(self, index, dim=None, coord_dtype=None): if dim is None: dim = self.dim if coord_dtype is None: coord_dtype = self.coord_dtype return type(self)(index, dim, coord_dtype, fastpath=True) @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> PandasIndex: if len(variables) != 1: raise ValueError( f"PandasIndex only accepts one variable, found {len(variables)} variables" ) name, var = next(iter(variables.items())) if var.ndim == 0: raise ValueError( f"cannot set a PandasIndex from the scalar variable {name!r}, " "only 1-dimensional variables are supported. " f"Note: you might want to use `obj.expand_dims({name!r})` to create a " f"new dimension and turn {name!r} as an indexed dimension coordinate." ) elif var.ndim != 1: raise ValueError( "PandasIndex only accepts a 1-dimensional variable, " f"variable {name!r} has {var.ndim} dimensions" ) dim = var.dims[0] # TODO: (benbovy - explicit indexes): add __index__ to ExplicitlyIndexesNDArrayMixin? # this could be eventually used by Variable.to_index() and would remove the need to perform # the checks below. # preserve wrapped pd.Index (if any) # accessing `.data` can load data from disk, so we only access if needed data = var._data if isinstance(var._data, PandasIndexingAdapter) else var.data # type: ignore[redundant-expr] # multi-index level variable: get level index if isinstance(var._data, PandasMultiIndexingAdapter): level = var._data.level if level is not None: data = var._data.array.get_level_values(level) obj = cls(data, dim, coord_dtype=var.dtype) assert not isinstance(obj.index, pd.MultiIndex) # Rename safely # make a shallow copy: cheap and because the index name may be updated # here or in other constructors (cannot use pd.Index.rename as this # constructor is also called from PandasMultiIndex) obj.index = obj.index.copy() obj.index.name = name return obj @staticmethod def _concat_indexes(indexes, dim, positions=None) -> pd.Index: new_pd_index: pd.Index if not indexes: new_pd_index = pd.Index([]) else: if not all(idx.dim == dim for idx in indexes): dims = ",".join({f"{idx.dim!r}" for idx in indexes}) raise ValueError( f"Cannot concatenate along dimension {dim!r} indexes with " f"dimensions: {dims}" ) pd_indexes = [idx.index for idx in indexes] new_pd_index = pd_indexes[0].append(pd_indexes[1:]) if positions is not None: indices = nputils.inverse_permutation(np.concatenate(positions)) new_pd_index = new_pd_index.take(indices) return new_pd_index @classmethod def concat( cls, indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, ) -> Self: new_pd_index = cls._concat_indexes(indexes, dim, positions) if not indexes: coord_dtype = None else: indexes_coord_dtypes = {idx.coord_dtype for idx in indexes} if len(indexes_coord_dtypes) == 1: coord_dtype = next(iter(indexes_coord_dtypes)) else: coord_dtype = np.result_type(*indexes_coord_dtypes) return cls(new_pd_index, dim=dim, coord_dtype=coord_dtype) def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> IndexVars: from xarray.core.variable import IndexVariable name = self.index.name attrs: Mapping[Hashable, Any] | None encoding: Mapping[Hashable, Any] | None if variables is not None and name in variables: var = variables[name] attrs = var.attrs encoding = var.encoding else: attrs = None encoding = None data = PandasIndexingAdapter(self.index, dtype=self.coord_dtype) var = IndexVariable(self.dim, data, attrs=attrs, encoding=encoding) return {name: var} def to_pandas_index(self) -> pd.Index: return self.index def isel( self, indexers: Mapping[Any, int | slice | np.ndarray | Variable] ) -> PandasIndex | None: from xarray.core.variable import Variable indxr = indexers[self.dim] if isinstance(indxr, Variable): if indxr.dims != (self.dim,): # can't preserve an index if result has new dimensions return None else: indxr = indxr.data if not isinstance(indxr, slice) and is_scalar(indxr): # scalar indexer: drop index return None return self._replace(self.index[indxr]) # type: ignore[index,unused-ignore] def sel( self, labels: dict[Any, Any], method=None, tolerance=None ) -> IndexSelResult: from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if method is not None and not isinstance(method, str): raise TypeError("``method`` must be a string") assert len(labels) == 1 coord_name, label = next(iter(labels.items())) if isinstance(label, slice): indexer = _query_slice(self.index, label, coord_name, method, tolerance) elif is_dict_like(label): raise ValueError( "cannot use a dict-like object for selection on " "a dimension that does not have a MultiIndex" ) else: label_array = normalize_label(label, dtype=self.coord_dtype) if label_array.ndim == 0: label_value = as_scalar(label_array) if isinstance(self.index, pd.CategoricalIndex): if method is not None: raise ValueError( "'method' is not supported when indexing using a CategoricalIndex." ) if tolerance is not None: raise ValueError( "'tolerance' is not supported when indexing using a CategoricalIndex." ) indexer = self.index.get_loc(label_value) elif method is not None: indexer = get_indexer_nd(self.index, label_array, method, tolerance) if np.any(indexer < 0): raise KeyError(f"not all values found in index {coord_name!r}") else: try: indexer = self.index.get_loc(label_value) except KeyError as e: raise KeyError( f"not all values found in index {coord_name!r}. " "Try setting the `method` keyword argument (example: method='nearest')." ) from e elif label_array.dtype.kind == "b": indexer = label_array else: indexer = get_indexer_nd(self.index, label_array, method, tolerance) if np.any(indexer < 0): raise KeyError(f"not all values found in index {coord_name!r}") # attach dimension names and/or coordinates to positional indexer if isinstance(label, Variable): indexer = Variable(label.dims, indexer) elif isinstance(label, DataArray): indexer = DataArray(indexer, coords=label._coords, dims=label.dims) return IndexSelResult({self.dim: indexer}) def equals(self, other: Index, *, exclude: frozenset[Hashable] | None = None): if not isinstance(other, PandasIndex): return False return self.index.equals(other.index) and self.dim == other.dim def join( self, other: Self, how: str = "inner", ) -> Self: if how == "outer": index = self.index.union(other.index) else: # how = "inner" index = self.index.intersection(other.index) coord_dtype = np.result_type(self.coord_dtype, other.coord_dtype) return type(self)(index, self.dim, coord_dtype=coord_dtype) def reindex_like( self, other: Self, method=None, tolerance=None ) -> dict[Hashable, Any]: if not self.index.is_unique: raise ValueError( f"cannot reindex or align along dimension {self.dim!r} because the " "(pandas) index has duplicate values" ) return {self.dim: get_indexer_nd(self.index, other.index, method, tolerance)} def roll(self, shifts: Mapping[Any, int]) -> PandasIndex: shift = shifts[self.dim] % self.index.shape[0] if shift != 0: new_pd_idx = self.index[-shift:].append(self.index[:-shift]) else: new_pd_idx = self.index[:] return self._replace(new_pd_idx) def rename(self, name_dict, dims_dict): if self.index.name not in name_dict and self.dim not in dims_dict: return self new_name = name_dict.get(self.index.name, self.index.name) index = self.index.rename(new_name) new_dim = dims_dict.get(self.dim, self.dim) return self._replace(index, dim=new_dim) def _copy( self: T_PandasIndex, deep: bool = True, memo: dict[int, Any] | None = None ) -> T_PandasIndex: if deep: # pandas is not using the memo index = self.index.copy(deep=True) else: # index will be copied in constructor index = self.index return self._replace(index) def __getitem__(self, indexer: Any): return self._replace(self.index[indexer]) def __repr__(self): return f"PandasIndex({self.index!r})" def _check_dim_compat(variables: Mapping[Any, Variable], all_dims: str = "equal"): """Check that all multi-index variable candidates are 1-dimensional and either share the same (single) dimension or each have a different dimension. """ if any(var.ndim != 1 for var in variables.values()): raise ValueError("PandasMultiIndex only accepts 1-dimensional variables") dims = {var.dims for var in variables.values()} if all_dims == "equal" and len(dims) > 1: raise ValueError( "unmatched dimensions for multi-index variables " + ", ".join([f"{k!r} {v.dims}" for k, v in variables.items()]) ) if all_dims == "different" and len(dims) < len(variables): raise ValueError( "conflicting dimensions for multi-index product variables " + ", ".join([f"{k!r} {v.dims}" for k, v in variables.items()]) ) T_PDIndex = TypeVar("T_PDIndex", bound=pd.Index) def remove_unused_levels_categories(index: T_PDIndex) -> T_PDIndex: """ Remove unused levels from MultiIndex and unused categories from CategoricalIndex """ if isinstance(index, pd.MultiIndex): new_index = cast(pd.MultiIndex, index.remove_unused_levels()) # if it contains CategoricalIndex, we need to remove unused categories # manually. See https://github.com/pandas-dev/pandas/issues/30846 if any(isinstance(lev, pd.CategoricalIndex) for lev in new_index.levels): levels = [] for i, level in enumerate(new_index.levels): if isinstance(level, pd.CategoricalIndex): level = level[new_index.codes[i]].remove_unused_categories() else: level = level[new_index.codes[i]] levels.append(level) # TODO: calling from_array() reorders MultiIndex levels. It would # be best to avoid this, if possible, e.g., by using # MultiIndex.remove_unused_levels() (which does not reorder) on the # part of the MultiIndex that is not categorical, or by fixing this # upstream in pandas. new_index = pd.MultiIndex.from_arrays(levels, names=new_index.names) return cast(T_PDIndex, new_index) if isinstance(index, pd.CategoricalIndex): return index.remove_unused_categories() # type: ignore[attr-defined] return index class PandasMultiIndex(PandasIndex): """Wrap a pandas.MultiIndex as an xarray compatible index.""" index: pd.MultiIndex dim: Hashable coord_dtype: Any level_coords_dtype: dict[Hashable | None, Any] __slots__ = ("coord_dtype", "dim", "index", "level_coords_dtype") def __init__(self, array: Any, dim: Hashable, level_coords_dtype: Any = None): super().__init__(array, dim) # default index level names names = [] for i, idx in enumerate(self.index.levels): name = idx.name or f"{dim}_level_{i}" if name == dim: raise ValueError( f"conflicting multi-index level name {name!r} with dimension {dim!r}" ) names.append(name) self.index.names = names if level_coords_dtype is None: level_coords_dtype = { idx.name: get_valid_numpy_dtype(idx) for idx in self.index.levels } self.level_coords_dtype = level_coords_dtype def _replace(self, index, dim=None, level_coords_dtype=None) -> PandasMultiIndex: if dim is None: dim = self.dim index.name = dim if level_coords_dtype is None: level_coords_dtype = self.level_coords_dtype return type(self)(index, dim, level_coords_dtype) @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> PandasMultiIndex: _check_dim_compat(variables) dim = next(iter(variables.values())).dims[0] index = pd.MultiIndex.from_arrays( [var.values for var in variables.values()], names=list(variables.keys()) ) index.name = dim level_coords_dtype = {name: var.dtype for name, var in variables.items()} obj = cls(index, dim, level_coords_dtype=level_coords_dtype) return obj @classmethod def concat( cls, indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, ) -> Self: new_pd_index = cls._concat_indexes(indexes, dim, positions) if not indexes: level_coords_dtype = None else: level_coords_dtype = {} for name in indexes[0].level_coords_dtype: level_coords_dtype[name] = np.result_type( *[idx.level_coords_dtype[name] for idx in indexes] ) return cls(new_pd_index, dim=dim, level_coords_dtype=level_coords_dtype) @classmethod def stack( cls, variables: Mapping[Any, Variable], dim: Hashable ) -> PandasMultiIndex: """Create a new Pandas MultiIndex from the product of 1-d variables (levels) along a new dimension. Level variables must have a dimension distinct from each other. Keeps levels the same (doesn't refactorize them) so that it gives back the original labels after a stack/unstack roundtrip. """ _check_dim_compat(variables, all_dims="different") level_indexes = [safe_cast_to_index(var) for var in variables.values()] for name, idx in zip(variables, level_indexes, strict=True): if isinstance(idx, pd.MultiIndex): raise ValueError( f"cannot create a multi-index along stacked dimension {dim!r} " f"from variable {name!r} that wraps a multi-index" ) # from_product sorts by default, so we can't use that always # https://github.com/pydata/xarray/issues/980 # https://github.com/pandas-dev/pandas/issues/14672 if all(index.is_monotonic_increasing for index in level_indexes): index = pd.MultiIndex.from_product( level_indexes, sortorder=0, names=list(variables.keys()) ) else: split_labels, levels = zip( *[lev.factorize() for lev in level_indexes], strict=True ) labels_mesh = np.meshgrid(*split_labels, indexing="ij") labels = [x.ravel().tolist() for x in labels_mesh] index = pd.MultiIndex( levels=levels, codes=labels, sortorder=0, names=list(variables.keys()) ) level_coords_dtype = {k: var.dtype for k, var in variables.items()} return cls(index, dim, level_coords_dtype=level_coords_dtype) def unstack(self) -> tuple[dict[Hashable, Index], pd.MultiIndex]: clean_index = remove_unused_levels_categories(self.index) if not clean_index.is_unique: raise ValueError( "Cannot unstack MultiIndex containing duplicates. Make sure entries " f"are unique, e.g., by calling ``.drop_duplicates('{self.dim}')``, " "before unstacking." ) new_indexes: dict[Hashable, Index] = {} for name, lev in zip(clean_index.names, clean_index.levels, strict=True): idx = PandasIndex( lev.copy(), name, coord_dtype=self.level_coords_dtype[name] ) new_indexes[name] = idx return new_indexes, clean_index @classmethod def from_variables_maybe_expand( cls, dim: Hashable, current_variables: Mapping[Any, Variable], variables: Mapping[Any, Variable], ) -> tuple[PandasMultiIndex, IndexVars]: """Create a new multi-index maybe by expanding an existing one with new variables as index levels. The index and its corresponding coordinates may be created along a new dimension. """ names: list[Hashable] = [] codes: list[Iterable[int]] = [] levels: list[Iterable[Any]] = [] level_variables: dict[Any, Variable] = {} _check_dim_compat({**current_variables, **variables}) if len(current_variables) > 1: # expand from an existing multi-index data = cast( PandasMultiIndexingAdapter, next(iter(current_variables.values()))._data ) current_index = data.array names.extend(current_index.names) codes.extend(current_index.codes) levels.extend(current_index.levels) for name in current_index.names: level_variables[name] = current_variables[name] elif len(current_variables) == 1: # expand from one 1D variable (no multi-index): convert it to an index level var = next(iter(current_variables.values())) new_var_name = f"{dim}_level_0" names.append(new_var_name) cat = pd.Categorical(var.values, ordered=True) codes.append(cat.codes) levels.append(cat.categories) level_variables[new_var_name] = var for name, var in variables.items(): names.append(name) cat = pd.Categorical(var.values, ordered=True) codes.append(cat.codes) levels.append(cat.categories) level_variables[name] = var codes_as_lists = [list(x) for x in codes] levels_as_lists = [list(level) for level in levels] index = pd.MultiIndex(levels=levels_as_lists, codes=codes_as_lists, names=names) level_coords_dtype = {k: var.dtype for k, var in level_variables.items()} obj = cls(index, dim, level_coords_dtype=level_coords_dtype) index_vars = obj.create_variables(level_variables) return obj, index_vars def keep_levels( self, level_variables: Mapping[Any, Variable] ) -> PandasMultiIndex | PandasIndex: """Keep only the provided levels and return a new multi-index with its corresponding coordinates. """ index = self.index.droplevel( [k for k in self.index.names if k not in level_variables] ) if isinstance(index, pd.MultiIndex): level_coords_dtype = {k: self.level_coords_dtype[k] for k in index.names} return self._replace(index, level_coords_dtype=level_coords_dtype) else: # backward compatibility: rename the level coordinate to the dimension name return PandasIndex( index.rename(self.dim), self.dim, coord_dtype=self.level_coords_dtype[index.name], ) def reorder_levels( self, level_variables: Mapping[Any, Variable] ) -> PandasMultiIndex: """Re-arrange index levels using input order and return a new multi-index with its corresponding coordinates. """ index = cast(pd.MultiIndex, self.index.reorder_levels(level_variables.keys())) level_coords_dtype = {k: self.level_coords_dtype[k] for k in index.names} return self._replace(index, level_coords_dtype=level_coords_dtype) def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> IndexVars: from xarray.core.variable import IndexVariable if variables is None: variables = {} index_vars: IndexVars = {} for name in (self.dim,) + tuple(self.index.names): if name == self.dim: level = None dtype = None else: level = name dtype = self.level_coords_dtype[name] var = variables.get(name) if var is not None: attrs = var.attrs encoding = var.encoding else: attrs = {} encoding = {} data = PandasMultiIndexingAdapter(self.index, dtype=dtype, level=level) # type: ignore[arg-type] # TODO: are Hashables ok? index_vars[name] = IndexVariable( self.dim, data, attrs=attrs, encoding=encoding, fastpath=True, ) return index_vars def sel(self, labels, method=None, tolerance=None) -> IndexSelResult: from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if method is not None or tolerance is not None: raise ValueError( "multi-index does not support ``method`` and ``tolerance``" ) new_index = None scalar_coord_values = {} indexer: int | slice | np.ndarray | Variable | DataArray # label(s) given for multi-index level(s) if all(lbl in self.index.names for lbl in labels): label_values = {} for k, v in labels.items(): label_array = normalize_label(v, dtype=self.level_coords_dtype[k]) try: label_values[k] = as_scalar(label_array) except ValueError as err: # label should be an item not an array-like raise ValueError( "Vectorized selection is not " f"available along coordinate {k!r} (multi-index level)" ) from err has_slice = any(isinstance(v, slice) for v in label_values.values()) if len(label_values) == self.index.nlevels and not has_slice: indexer = self.index.get_loc( tuple(label_values[k] for k in self.index.names) ) else: indexer, new_index = self.index.get_loc_level( tuple(label_values.values()), level=tuple(label_values.keys()) ) scalar_coord_values.update(label_values) # GH2619. Raise a KeyError if nothing is chosen if indexer.dtype.kind == "b" and indexer.sum() == 0: # type: ignore[union-attr] raise KeyError(f"{labels} not found") # assume one label value given for the multi-index "array" (dimension) else: if len(labels) > 1: coord_name = next(iter(set(labels) - set(self.index.names))) raise ValueError( f"cannot provide labels for both coordinate {coord_name!r} (multi-index array) " f"and one or more coordinates among {self.index.names!r} (multi-index levels)" ) coord_name, label = next(iter(labels.items())) if is_dict_like(label): invalid_levels = tuple( name for name in label if name not in self.index.names ) if invalid_levels: raise ValueError( f"multi-index level names {invalid_levels} not found in indexes {tuple(self.index.names)}" ) return self.sel(label) elif isinstance(label, slice): indexer = _query_slice(self.index, label, coord_name) elif isinstance(label, tuple): if _is_nested_tuple(label): indexer = self.index.get_locs(label) elif len(label) == self.index.nlevels: indexer = self.index.get_loc(label) else: levels = [self.index.names[i] for i in range(len(label))] indexer, new_index = self.index.get_loc_level(label, level=levels) scalar_coord_values.update(dict(zip(levels, label, strict=True))) else: label_array = normalize_label(label) if label_array.ndim == 0: label_value = as_scalar(label_array) indexer, new_index = self.index.get_loc_level(label_value, level=0) scalar_coord_values[self.index.names[0]] = label_value elif label_array.dtype.kind == "b": indexer = label_array else: if label_array.ndim > 1: raise ValueError( "Vectorized selection is not available along " f"coordinate {coord_name!r} with a multi-index" ) indexer = get_indexer_nd(self.index, label_array) if np.any(indexer < 0): raise KeyError(f"not all values found in index {coord_name!r}") # attach dimension names and/or coordinates to positional indexer if isinstance(label, Variable): indexer = Variable(label.dims, indexer) elif isinstance(label, DataArray): # do not include label-indexer DataArray coordinates that conflict # with the level names of this index coords = { k: v for k, v in label._coords.items() if k not in self.index.names } indexer = DataArray(indexer, coords=coords, dims=label.dims) if new_index is not None: if isinstance(new_index, pd.MultiIndex): level_coords_dtype = { k: self.level_coords_dtype[k] for k in new_index.names } new_index = self._replace( new_index, level_coords_dtype=level_coords_dtype ) dims_dict = {} drop_coords = [] else: new_index = PandasIndex( new_index, new_index.name, coord_dtype=self.level_coords_dtype[new_index.name], ) dims_dict = {self.dim: new_index.index.name} drop_coords = [self.dim] # variable(s) attrs and encoding metadata are propagated # when replacing the indexes in the resulting xarray object new_vars = new_index.create_variables() indexes = cast(dict[Any, Index], dict.fromkeys(new_vars, new_index)) # add scalar variable for each dropped level variables = new_vars for name, val in scalar_coord_values.items(): variables[name] = Variable([], val) return IndexSelResult( {self.dim: indexer}, indexes=indexes, variables=variables, drop_indexes=list(scalar_coord_values), drop_coords=drop_coords, rename_dims=dims_dict, ) else: return IndexSelResult({self.dim: indexer}) def join(self, other, how: str = "inner"): if how == "outer": # bug in pandas? need to reset index.name other_index = other.index.copy() other_index.name = None index = self.index.union(other_index) index.name = self.dim else: # how = "inner" index = self.index.intersection(other.index) level_coords_dtype = { k: np.result_type(lvl_dtype, other.level_coords_dtype[k]) for k, lvl_dtype in self.level_coords_dtype.items() } return type(self)(index, self.dim, level_coords_dtype=level_coords_dtype) def rename(self, name_dict, dims_dict): if not set(self.index.names) & set(name_dict) and self.dim not in dims_dict: return self # pandas 1.3.0: could simply do `self.index.rename(names_dict)` new_names = [name_dict.get(k, k) for k in self.index.names] index = self.index.rename(new_names) new_dim = dims_dict.get(self.dim, self.dim) new_level_coords_dtype = dict( zip(new_names, self.level_coords_dtype.values(), strict=True) ) return self._replace( index, dim=new_dim, level_coords_dtype=new_level_coords_dtype ) class CoordinateTransformIndex(Index): """Helper class for creating Xarray indexes based on coordinate transforms. - wraps a :py:class:`CoordinateTransform` instance - takes care of creating the index (lazy) coordinates - supports point-wise label-based selection - supports exact alignment only, by comparing indexes based on their transform (not on their explicit coordinate labels) .. caution:: This API is experimental and subject to change. Please report any bugs or surprising behaviour you encounter. """ transform: CoordinateTransform def __init__( self, transform: CoordinateTransform, ): self.transform = transform def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> IndexVars: from xarray.core.variable import Variable new_variables = {} for name in self.transform.coord_names: # copy attributes, if any attrs: Mapping[Hashable, Any] | None if variables is not None and name in variables: var = variables[name] attrs = var.attrs else: attrs = None data = CoordinateTransformIndexingAdapter(self.transform, name) new_variables[name] = Variable(self.transform.dims, data, attrs=attrs) return new_variables def isel( self, indexers: Mapping[Any, int | slice | np.ndarray | Variable] ) -> Index | None: # TODO: support returning a new index (e.g., possible to re-calculate the # the transform or calculate another transform on a reduced dimension space) return None def sel( self, labels: dict[Any, Any], method=None, tolerance=None ) -> IndexSelResult: from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if method != "nearest": raise ValueError( "CoordinateTransformIndex only supports selection with method='nearest'" ) labels_set = set(labels) coord_names_set = set(self.transform.coord_names) missing_labels = coord_names_set - labels_set if missing_labels: missing_labels_str = ",".join([f"{name}" for name in missing_labels]) raise ValueError(f"missing labels for coordinate(s): {missing_labels_str}.") label0_obj = next(iter(labels.values())) dim_size0 = getattr(label0_obj, "sizes", {}) is_xr_obj = [ isinstance(label, DataArray | Variable) for label in labels.values() ] if not all(is_xr_obj): raise TypeError( "CoordinateTransformIndex only supports advanced (point-wise) indexing " "with either xarray.DataArray or xarray.Variable objects." ) dim_size = [getattr(label, "sizes", {}) for label in labels.values()] if any(ds != dim_size0 for ds in dim_size): raise ValueError( "CoordinateTransformIndex only supports advanced (point-wise) indexing " "with xarray.DataArray or xarray.Variable objects of matching dimensions." ) coord_labels = { name: labels[name].values for name in self.transform.coord_names } dim_positions = self.transform.reverse(coord_labels) results: dict[str, Variable | DataArray] = {} dims0 = tuple(dim_size0) for dim, pos in dim_positions.items(): # TODO: rounding the decimal positions is not always the behavior we expect # (there are different ways to represent implicit intervals) # we should probably make this customizable. pos = np.round(pos).astype("int") if isinstance(label0_obj, Variable): results[dim] = Variable(dims0, pos) else: # dataarray results[dim] = DataArray(pos, dims=dims0) return IndexSelResult(results) def equals( self, other: Index, *, exclude: frozenset[Hashable] | None = None ) -> bool: if not isinstance(other, CoordinateTransformIndex): return False return self.transform.equals(other.transform, exclude=exclude) def rename( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable], ) -> Self: coord_names = self.transform.coord_names dims = self.transform.dims dim_size = self.transform.dim_size if not set(coord_names) & set(name_dict) and not set(dims) & set(dims_dict): return self new_transform = copy.deepcopy(self.transform) new_transform.coord_names = tuple(name_dict.get(n, n) for n in coord_names) new_transform.dims = tuple(str(dims_dict.get(d, d)) for d in dims) new_transform.dim_size = { str(dims_dict.get(d, d)): v for d, v in dim_size.items() } return type(self)(new_transform) def create_default_index_implicit( dim_variable: Variable, all_variables: Mapping | Iterable[Hashable] | None = None, ) -> tuple[PandasIndex, IndexVars]: """Create a default index from a dimension variable. Create a PandasMultiIndex if the given variable wraps a pandas.MultiIndex, otherwise create a PandasIndex (note that this will become obsolete once we depreciate implicitly passing a pandas.MultiIndex as a coordinate). """ if all_variables is None: all_variables = {} if not isinstance(all_variables, Mapping): all_variables = dict.fromkeys(all_variables) name = dim_variable.dims[0] array = getattr(dim_variable._data, "array", None) index: PandasIndex if isinstance(array, pd.MultiIndex): index = PandasMultiIndex(array, name) index_vars = index.create_variables() # check for conflict between level names and variable names duplicate_names = [k for k in index_vars if k in all_variables and k != name] if duplicate_names: # dirty workaround for an edge case where both the dimension # coordinate and the level coordinates are given for the same # multi-index object => do not raise an error # TODO: remove this check when removing the multi-index dimension coordinate if len(duplicate_names) < len(index.index.names): conflict = True else: duplicate_vars = [all_variables[k] for k in duplicate_names] conflict = any( v is None or not dim_variable.equals(v) for v in duplicate_vars ) if conflict: conflict_str = "\n".join(duplicate_names) raise ValueError( f"conflicting MultiIndex level / variable name(s):\n{conflict_str}" ) else: dim_var = {name: dim_variable} index = PandasIndex.from_variables(dim_var, options={}) index_vars = index.create_variables(dim_var) return index, index_vars # generic type that represents either a pandas or an xarray index T_PandasOrXarrayIndex = TypeVar("T_PandasOrXarrayIndex", Index, pd.Index) class Indexes(collections.abc.Mapping, Generic[T_PandasOrXarrayIndex]): """Immutable proxy for Dataset or DataArray indexes. It is a mapping where keys are coordinate names and values are either pandas or xarray indexes. It also contains the indexed coordinate variables and provides some utility methods. """ _index_type: type[Index | pd.Index] _indexes: dict[Any, T_PandasOrXarrayIndex] _variables: dict[Any, Variable] __slots__ = ( "__coord_name_id", "__id_coord_names", "__id_index", "_dims", "_index_type", "_indexes", "_variables", ) def __init__( self, indexes: Mapping[Any, T_PandasOrXarrayIndex] | None = None, variables: Mapping[Any, Variable] | None = None, index_type: type[Index | pd.Index] = Index, ): """Constructor not for public consumption. Parameters ---------- indexes : dict Indexes held by this object. variables : dict Indexed coordinate variables in this object. Entries must match those of `indexes`. index_type : type The type of all indexes, i.e., either :py:class:`xarray.indexes.Index` or :py:class:`pandas.Index`. """ if indexes is None: indexes = {} if variables is None: variables = {} unmatched_keys = set(indexes) ^ set(variables) if unmatched_keys: raise ValueError( f"unmatched keys found in indexes and variables: {unmatched_keys}" ) if any(not isinstance(idx, index_type) for idx in indexes.values()): index_type_str = f"{index_type.__module__}.{index_type.__name__}" raise TypeError( f"values of indexes must all be instances of {index_type_str}" ) self._index_type = index_type self._indexes = dict(**indexes) self._variables = dict(**variables) self._dims: Mapping[Hashable, int] | None = None self.__coord_name_id: dict[Any, int] | None = None self.__id_index: dict[int, T_PandasOrXarrayIndex] | None = None self.__id_coord_names: dict[int, tuple[Hashable, ...]] | None = None @property def _coord_name_id(self) -> dict[Any, int]: if self.__coord_name_id is None: self.__coord_name_id = {k: id(idx) for k, idx in self._indexes.items()} return self.__coord_name_id @property def _id_index(self) -> dict[int, T_PandasOrXarrayIndex]: if self.__id_index is None: self.__id_index = {id(idx): idx for idx in self.get_unique()} return self.__id_index @property def _id_coord_names(self) -> dict[int, tuple[Hashable, ...]]: if self.__id_coord_names is None: id_coord_names: Mapping[int, list[Hashable]] = defaultdict(list) for k, v in self._coord_name_id.items(): id_coord_names[v].append(k) self.__id_coord_names = {k: tuple(v) for k, v in id_coord_names.items()} return self.__id_coord_names @property def variables(self) -> Mapping[Hashable, Variable]: return Frozen(self._variables) @property def dims(self) -> Mapping[Hashable, int]: from xarray.core.variable import calculate_dimensions if self._dims is None: self._dims = calculate_dimensions(self._variables) return Frozen(self._dims) def copy(self) -> Indexes: return type(self)(dict(self._indexes), dict(self._variables)) def get_unique(self) -> list[T_PandasOrXarrayIndex]: """Return a list of unique indexes, preserving order.""" unique_indexes: list[T_PandasOrXarrayIndex] = [] seen: set[int] = set() for index in self._indexes.values(): index_id = id(index) if index_id not in seen: unique_indexes.append(index) seen.add(index_id) return unique_indexes def is_multi(self, key: Hashable) -> bool: """Return True if ``key`` maps to a multi-coordinate index, False otherwise. """ return len(self._id_coord_names[self._coord_name_id[key]]) > 1 def get_all_coords( self, key: Hashable, errors: ErrorOptions = "raise" ) -> dict[Hashable, Variable]: """Return all coordinates having the same index. Parameters ---------- key : hashable Index key. errors : {"raise", "ignore"}, default: "raise" If "raise", raises a ValueError if `key` is not in indexes. If "ignore", an empty tuple is returned instead. Returns ------- coords : dict A dictionary of all coordinate variables having the same index. """ if errors not in ["raise", "ignore"]: raise ValueError('errors must be either "raise" or "ignore"') if key not in self._indexes: if errors == "raise": raise ValueError(f"no index found for {key!r} coordinate") else: return {} all_coord_names = self._id_coord_names[self._coord_name_id[key]] return {k: self._variables[k] for k in all_coord_names} def get_all_dims( self, key: Hashable, errors: ErrorOptions = "raise" ) -> Mapping[Hashable, int]: """Return all dimensions shared by an index. Parameters ---------- key : hashable Index key. errors : {"raise", "ignore"}, default: "raise" If "raise", raises a ValueError if `key` is not in indexes. If "ignore", an empty tuple is returned instead. Returns ------- dims : dict A dictionary of all dimensions shared by an index. """ from xarray.core.variable import calculate_dimensions return calculate_dimensions(self.get_all_coords(key, errors=errors)) def group_by_index( self, ) -> list[tuple[T_PandasOrXarrayIndex, dict[Hashable, Variable]]]: """Returns a list of unique indexes and their corresponding coordinates.""" index_coords = [] for i, index in self._id_index.items(): coords = {k: self._variables[k] for k in self._id_coord_names[i]} index_coords.append((index, coords)) return index_coords def to_pandas_indexes(self) -> Indexes[pd.Index]: """Returns an immutable proxy for Dataset or DataArray pandas indexes. Raises an error if this proxy contains indexes that cannot be coerced to pandas.Index objects. """ indexes: dict[Hashable, pd.Index] = {} for k, idx in self._indexes.items(): if isinstance(idx, pd.Index): indexes[k] = idx elif isinstance(idx, Index): indexes[k] = idx.to_pandas_index() return Indexes(indexes, self._variables, index_type=pd.Index) def copy_indexes( self, deep: bool = True, memo: dict[int, T_PandasOrXarrayIndex] | None = None ) -> tuple[dict[Hashable, T_PandasOrXarrayIndex], dict[Hashable, Variable]]: """Return a new dictionary with copies of indexes, preserving unique indexes. Parameters ---------- deep : bool, default: True Whether the indexes are deep or shallow copied onto the new object. memo : dict if object id to copied objects or None, optional To prevent infinite recursion deepcopy stores all copied elements in this dict. """ new_indexes: dict[Hashable, T_PandasOrXarrayIndex] = {} new_index_vars: dict[Hashable, Variable] = {} xr_idx: Index new_idx: T_PandasOrXarrayIndex for idx, coords in self.group_by_index(): if isinstance(idx, pd.Index): convert_new_idx = True dim = next(iter(coords.values())).dims[0] if isinstance(idx, pd.MultiIndex): xr_idx = PandasMultiIndex(idx, dim) else: xr_idx = PandasIndex(idx, dim) else: convert_new_idx = False xr_idx = idx new_idx = xr_idx._copy(deep=deep, memo=memo) # type: ignore[assignment] idx_vars = xr_idx.create_variables(coords) if convert_new_idx: new_idx = new_idx.index # type: ignore[attr-defined] new_indexes.update(dict.fromkeys(coords, new_idx)) new_index_vars.update(idx_vars) return new_indexes, new_index_vars def __iter__(self) -> Iterator[T_PandasOrXarrayIndex]: return iter(self._indexes) def __len__(self) -> int: return len(self._indexes) def __contains__(self, key) -> bool: return key in self._indexes def __getitem__(self, key) -> T_PandasOrXarrayIndex: return self._indexes[key] def __repr__(self): indexes = formatting._get_indexes_dict(self) return formatting.indexes_repr(indexes) def default_indexes( coords: Mapping[Any, Variable], dims: Iterable ) -> dict[Hashable, Index]: """Default indexes for a Dataset/DataArray. Parameters ---------- coords : Mapping[Any, xarray.Variable] Coordinate variables from which to draw default indexes. dims : iterable Iterable of dimension names. Returns ------- Mapping from indexing keys (levels/dimension names) to indexes used for indexing along that dimension. """ indexes: dict[Hashable, Index] = {} coord_names = set(coords) for name, var in coords.items(): if name in dims and var.ndim == 1: index, index_vars = create_default_index_implicit(var, coords) if set(index_vars) <= coord_names: indexes.update(dict.fromkeys(index_vars, index)) return indexes def _wrap_index_equals( index: Index, ) -> Callable[[Index, frozenset[Hashable]], bool]: # TODO: remove this Index.equals() wrapper (backward compatibility) sig = inspect.signature(index.equals) if len(sig.parameters) == 1: index_cls_name = type(index).__module__ + "." + type(index).__qualname__ emit_user_level_warning( f"the signature ``{index_cls_name}.equals(self, other)`` is deprecated. " f"Please update it to " f"``{index_cls_name}.equals(self, other, *, exclude=None)`` " f"or kindly ask the maintainers of ``{index_cls_name}`` to do it. " "See documentation of xarray.Index.equals() for more info.", FutureWarning, ) exclude_kwarg = False else: exclude_kwarg = True def equals_wrapper(other: Index, exclude: frozenset[Hashable]) -> bool: if exclude_kwarg: return index.equals(other, exclude=exclude) else: return index.equals(other) return equals_wrapper def indexes_equal( index: Index, other_index: Index, variable: Variable, other_variable: Variable, cache: dict[tuple[int, int], bool | None] | None = None, ) -> bool: """Check if two indexes are equal, possibly with cached results. If the two indexes are not of the same type or they do not implement equality, fallback to coordinate labels equality check. """ if cache is None: # dummy cache cache = {} key = (id(index), id(other_index)) equal: bool | None = None if key not in cache: if type(index) is type(other_index): try: equal = index.equals(other_index) except NotImplementedError: equal = None else: cache[key] = equal else: equal = None else: equal = cache[key] if equal is None: equal = variable.equals(other_variable) return cast(bool, equal) def indexes_all_equal( elements: Sequence[tuple[Index, dict[Hashable, Variable]]], exclude_dims: frozenset[Hashable], ) -> bool: """Check if indexes are all equal. If they are not of the same type or they do not implement this check, check if their coordinate variables are all equal instead. """ def check_variables(): variables = [e[1] for e in elements] return any( not variables[0][k].equals(other_vars[k]) for other_vars in variables[1:] for k in variables[0] ) indexes = [e[0] for e in elements] same_objects = all(indexes[0] is other_idx for other_idx in indexes[1:]) if same_objects: return True same_type = all(type(indexes[0]) is type(other_idx) for other_idx in indexes[1:]) if same_type: index_equals_func = _wrap_index_equals(indexes[0]) try: not_equal = any( not index_equals_func(other_idx, exclude_dims) for other_idx in indexes[1:] ) except NotImplementedError: not_equal = check_variables() else: not_equal = check_variables() return not not_equal def _apply_indexes_fast(indexes: Indexes[Index], args: Mapping[Any, Any], func: str): # This function avoids the call to indexes.group_by_index # which is really slow when repeatedly iterating through # an array. However, it fails to return the correct ID for # multi-index arrays indexes_fast, coords = indexes._indexes, indexes._variables new_indexes: dict[Hashable, Index] = dict(indexes_fast.items()) new_index_variables: dict[Hashable, Variable] = {} for name, index in indexes_fast.items(): coord = coords[name] if hasattr(coord, "_indexes"): index_vars = {n: coords[n] for n in coord._indexes} else: index_vars = {name: coord} index_dims = {d for var in index_vars.values() for d in var.dims} index_args = {k: v for k, v in args.items() if k in index_dims} if index_args: new_index = getattr(index, func)(index_args) if new_index is not None: new_indexes.update(dict.fromkeys(index_vars, new_index)) new_index_vars = new_index.create_variables(index_vars) new_index_variables.update(new_index_vars) else: for k in index_vars: new_indexes.pop(k, None) return new_indexes, new_index_variables def _apply_indexes( indexes: Indexes[Index], args: Mapping[Any, Any], func: str, ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: new_indexes: dict[Hashable, Index] = dict(indexes.items()) new_index_variables: dict[Hashable, Variable] = {} for index, index_vars in indexes.group_by_index(): index_dims = {d for var in index_vars.values() for d in var.dims} index_args = {k: v for k, v in args.items() if k in index_dims} if index_args: new_index = getattr(index, func)(index_args) if new_index is not None: new_indexes.update(dict.fromkeys(index_vars, new_index)) new_index_vars = new_index.create_variables(index_vars) new_index_variables.update(new_index_vars) else: for k in index_vars: new_indexes.pop(k, None) return new_indexes, new_index_variables def isel_indexes( indexes: Indexes[Index], indexers: Mapping[Any, Any], ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: # Fast path function _apply_indexes_fast does not work with multi-coordinate # Xarray indexes (see https://github.com/pydata/xarray/issues/10063). # -> call it only in the most common case where all indexes are default # PandasIndex each associated to a single 1-dimensional coordinate. if any(type(idx) is not PandasIndex for idx in indexes._indexes.values()): return _apply_indexes(indexes, indexers, "isel") else: return _apply_indexes_fast(indexes, indexers, "isel") def roll_indexes( indexes: Indexes[Index], shifts: Mapping[Any, int], ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: return _apply_indexes(indexes, shifts, "roll") def filter_indexes_from_coords( indexes: Mapping[Any, Index], filtered_coord_names: set, ) -> dict[Hashable, Index]: """Filter index items given a (sub)set of coordinate names. Drop all multi-coordinate related index items for any key missing in the set of coordinate names. """ filtered_indexes: dict[Any, Index] = dict(indexes) index_coord_names: dict[Hashable, set[Hashable]] = defaultdict(set) for name, idx in indexes.items(): index_coord_names[id(idx)].add(name) for idx_coord_names in index_coord_names.values(): if not idx_coord_names <= filtered_coord_names: for k in idx_coord_names: del filtered_indexes[k] return filtered_indexes def assert_no_index_corrupted( indexes: Indexes[Index], coord_names: set[Hashable], action: str = "remove coordinate(s)", ) -> None: """Assert removing coordinates or indexes will not corrupt indexes.""" # An index may be corrupted when the set of its corresponding coordinate name(s) # partially overlaps the set of coordinate names to remove for index, index_coords in indexes.group_by_index(): common_names = set(index_coords) & coord_names if common_names and len(common_names) != len(index_coords): common_names_str = ", ".join(f"{k!r}" for k in common_names) index_names_str = ", ".join(f"{k!r}" for k in index_coords) raise ValueError( f"cannot {action} {common_names_str}, which would corrupt " f"the following index built from coordinates {index_names_str}:\n" f"{index}" ) xarray-2025.12.0/xarray/core/indexing.py000066400000000000000000002337641511464676000177770ustar00rootroot00000000000000from __future__ import annotations import enum import functools import math import operator from collections import Counter, defaultdict from collections.abc import Callable, Hashable, Iterable, Mapping from contextlib import suppress from dataclasses import dataclass, field from datetime import timedelta from typing import TYPE_CHECKING, Any, cast, overload import numpy as np import pandas as pd from numpy.typing import DTypeLike from packaging.version import Version from xarray.core import duck_array_ops from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.nputils import NumpyVIndexAdapter from xarray.core.types import T_Xarray from xarray.core.utils import ( NDArrayMixin, either_dict_or_kwargs, get_valid_numpy_dtype, is_allowed_extension_array, is_allowed_extension_array_dtype, is_duck_array, is_duck_dask_array, is_full_slice, is_scalar, is_valid_numpy_dtype, to_0d_array, ) from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import array_type, integer_types, is_chunked_array if TYPE_CHECKING: from xarray.core.extension_array import PandasExtensionArray from xarray.core.indexes import Index from xarray.core.types import Self from xarray.core.variable import Variable from xarray.namedarray._typing import _Shape, duckarray from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint BasicIndexerType = int | np.integer | slice OuterIndexerType = BasicIndexerType | np.ndarray[Any, np.dtype[np.integer]] @dataclass class IndexSelResult: """Index query results. Attributes ---------- dim_indexers: dict A dictionary where keys are array dimensions and values are location-based indexers. indexes: dict, optional New indexes to replace in the resulting DataArray or Dataset. variables : dict, optional New variables to replace in the resulting DataArray or Dataset. drop_coords : list, optional Coordinate(s) to drop in the resulting DataArray or Dataset. drop_indexes : list, optional Index(es) to drop in the resulting DataArray or Dataset. rename_dims : dict, optional A dictionary in the form ``{old_dim: new_dim}`` for dimension(s) to rename in the resulting DataArray or Dataset. """ dim_indexers: dict[Any, Any] indexes: dict[Any, Index] = field(default_factory=dict) variables: dict[Any, Variable] = field(default_factory=dict) drop_coords: list[Hashable] = field(default_factory=list) drop_indexes: list[Hashable] = field(default_factory=list) rename_dims: dict[Any, Hashable] = field(default_factory=dict) def as_tuple(self): """Unlike ``dataclasses.astuple``, return a shallow copy. See https://stackoverflow.com/a/51802661 """ return ( self.dim_indexers, self.indexes, self.variables, self.drop_coords, self.drop_indexes, self.rename_dims, ) def merge_sel_results(results: list[IndexSelResult]) -> IndexSelResult: all_dims_count = Counter([dim for res in results for dim in res.dim_indexers]) duplicate_dims = {k: v for k, v in all_dims_count.items() if v > 1} if duplicate_dims: # TODO: this message is not right when combining indexe(s) queries with # location-based indexing on a dimension with no dimension-coordinate (failback) fmt_dims = [ f"{dim!r}: {count} indexes involved" for dim, count in duplicate_dims.items() ] raise ValueError( "Xarray does not support label-based selection with more than one index " "over the following dimension(s):\n" + "\n".join(fmt_dims) + "\nSuggestion: use a multi-index for each of those dimension(s)." ) dim_indexers = {} indexes = {} variables = {} drop_coords = [] drop_indexes = [] rename_dims = {} for res in results: dim_indexers.update(res.dim_indexers) indexes.update(res.indexes) variables.update(res.variables) drop_coords += res.drop_coords drop_indexes += res.drop_indexes rename_dims.update(res.rename_dims) return IndexSelResult( dim_indexers, indexes, variables, drop_coords, drop_indexes, rename_dims ) def group_indexers_by_index( obj: T_Xarray, indexers: Mapping[Any, Any], options: Mapping[str, Any], ) -> list[tuple[Index, dict[Any, Any]]]: """Returns a list of unique indexes and their corresponding indexers.""" unique_indexes = {} grouped_indexers: Mapping[int | None, dict] = defaultdict(dict) for key, label in indexers.items(): index: Index = obj.xindexes.get(key, None) if index is not None: index_id = id(index) unique_indexes[index_id] = index grouped_indexers[index_id][key] = label elif key in obj.coords: raise KeyError(f"no index found for coordinate {key!r}") elif key not in obj.dims: raise KeyError( f"{key!r} is not a valid dimension or coordinate for " f"{obj.__class__.__name__} with dimensions {obj.dims!r}" ) elif len(options): raise ValueError( f"cannot supply selection options {options!r} for dimension {key!r}" "that has no associated coordinate or index" ) else: # key is a dimension without a "dimension-coordinate" # failback to location-based selection # TODO: depreciate this implicit behavior and suggest using isel instead? unique_indexes[None] = None grouped_indexers[None][key] = label return [(unique_indexes[k], grouped_indexers[k]) for k in unique_indexes] def map_index_queries( obj: T_Xarray, indexers: Mapping[Any, Any], method=None, tolerance: int | float | Iterable[int | float] | None = None, **indexers_kwargs: Any, ) -> IndexSelResult: """Execute index queries from a DataArray / Dataset and label-based indexers and return the (merged) query results. """ from xarray.core.dataarray import DataArray # TODO benbovy - flexible indexes: remove when custom index options are available if method is None and tolerance is None: options = {} else: options = {"method": method, "tolerance": tolerance} indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "map_index_queries") grouped_indexers = group_indexers_by_index(obj, indexers, options) results = [] for index, labels in grouped_indexers: if index is None: # forward dimension indexers with no index/coordinate results.append(IndexSelResult(labels)) else: results.append(index.sel(labels, **options)) merged = merge_sel_results(results) # drop dimension coordinates found in dimension indexers # (also drop multi-index if any) # (.sel() already ensures alignment) for k, v in merged.dim_indexers.items(): if isinstance(v, DataArray): if k in v._indexes: v = v.reset_index(k) drop_coords = [name for name in v._coords if name in merged.dim_indexers] merged.dim_indexers[k] = v.drop_vars(drop_coords) return merged def expanded_indexer(key, ndim): """Given a key for indexing an ndarray, return an equivalent key which is a tuple with length equal to the number of dimensions. The expansion is done by replacing all `Ellipsis` items with the right number of full slices and then padding the key with full slices so that it reaches the appropriate dimensionality. """ if not isinstance(key, tuple): # numpy treats non-tuple keys equivalent to tuples of length 1 key = (key,) new_key = [] # handling Ellipsis right is a little tricky, see: # https://numpy.org/doc/stable/reference/arrays.indexing.html#advanced-indexing found_ellipsis = False for k in key: if k is Ellipsis: if not found_ellipsis: new_key.extend((ndim + 1 - len(key)) * [slice(None)]) found_ellipsis = True else: new_key.append(slice(None)) else: new_key.append(k) if len(new_key) > ndim: raise IndexError("too many indices") new_key.extend((ndim - len(new_key)) * [slice(None)]) return tuple(new_key) def normalize_slice(sl: slice, size: int) -> slice: """ Ensure that given slice only contains positive start and stop values (stop can be -1 for full-size slices with negative steps, e.g. [-10::-1]) Examples -------- >>> normalize_slice(slice(0, 9), 10) slice(0, 9, 1) >>> normalize_slice(slice(0, -1), 10) slice(0, 9, 1) """ return slice(*sl.indices(size)) def _expand_slice(slice_: slice, size: int) -> np.ndarray[Any, np.dtype[np.integer]]: """ Expand slice to an array containing only positive integers. Examples -------- >>> _expand_slice(slice(0, 9), 10) array([0, 1, 2, 3, 4, 5, 6, 7, 8]) >>> _expand_slice(slice(0, -1), 10) array([0, 1, 2, 3, 4, 5, 6, 7, 8]) """ sl = normalize_slice(slice_, size) return np.arange(sl.start, sl.stop, sl.step) def slice_slice(old_slice: slice, applied_slice: slice, size: int) -> slice: """Given a slice and the size of the dimension to which it will be applied, index it with another slice to return a new slice equivalent to applying the slices sequentially """ old_slice = normalize_slice(old_slice, size) size_after_old_slice = len(range(old_slice.start, old_slice.stop, old_slice.step)) if size_after_old_slice == 0: # nothing left after applying first slice return slice(0) applied_slice = normalize_slice(applied_slice, size_after_old_slice) start = old_slice.start + applied_slice.start * old_slice.step if start < 0: # nothing left after applying second slice # (can only happen for old_slice.step < 0, e.g. [10::-1], [20:]) return slice(0) stop = old_slice.start + applied_slice.stop * old_slice.step if stop < 0: stop = None step = old_slice.step * applied_slice.step return slice(start, stop, step) def normalize_array( array: np.ndarray[Any, np.dtype[np.integer]], size: int ) -> np.ndarray[Any, np.dtype[np.integer]]: """ Ensure that the given array only contains positive values. Examples -------- >>> normalize_array(np.array([-1, -2, -3, -4]), 10) array([9, 8, 7, 6]) >>> normalize_array(np.array([-5, 3, 5, -1, 8]), 12) array([ 7, 3, 5, 11, 8]) """ if np.issubdtype(array.dtype, np.unsignedinteger): return array return np.where(array >= 0, array, array + size) def slice_slice_by_array( old_slice: slice, array: np.ndarray[Any, np.dtype[np.integer]], size: int, ) -> np.ndarray[Any, np.dtype[np.integer]]: """Given a slice and the size of the dimension to which it will be applied, index it with an array to return a new array equivalent to applying the slices sequentially Examples -------- >>> slice_slice_by_array(slice(2, 10), np.array([1, 3, 5]), 12) array([3, 5, 7]) >>> slice_slice_by_array(slice(1, None, 2), np.array([1, 3, 7, 8]), 20) array([ 3, 7, 15, 17]) >>> slice_slice_by_array(slice(None, None, -1), np.array([2, 4, 7]), 20) array([17, 15, 12]) """ # to get a concrete slice, limited to the size of the array normalized_slice = normalize_slice(old_slice, size) size_after_slice = len(range(*normalized_slice.indices(size))) normalized_array = normalize_array(array, size_after_slice) new_indexer = normalized_array * normalized_slice.step + normalized_slice.start if np.any(new_indexer >= size): raise IndexError("indices out of bounds") # TODO: more helpful error message return new_indexer def normalize_indexer(indexer, size): if isinstance(indexer, slice): return normalize_slice(indexer, size) elif isinstance(indexer, np.ndarray): return normalize_array(indexer, size) else: if indexer < 0: return size + indexer return indexer def _index_indexer_1d( old_indexer: OuterIndexerType, applied_indexer: OuterIndexerType, size: int, ) -> OuterIndexerType: if is_full_slice(applied_indexer): # shortcut for the usual case return old_indexer if is_full_slice(old_indexer): # shortcut for full slices return normalize_indexer(applied_indexer, size) indexer: OuterIndexerType if isinstance(old_indexer, slice): if isinstance(applied_indexer, slice): indexer = slice_slice(old_indexer, applied_indexer, size) elif isinstance(applied_indexer, integer_types): indexer = range(*old_indexer.indices(size))[applied_indexer] else: indexer = slice_slice_by_array(old_indexer, applied_indexer, size) elif isinstance(old_indexer, np.ndarray): indexer = old_indexer[applied_indexer] else: # should be unreachable raise ValueError("cannot index integers. Please open an issuec-") return indexer class ExplicitIndexer: """Base class for explicit indexer objects. ExplicitIndexer objects wrap a tuple of values given by their ``tuple`` property. These tuples should always have length equal to the number of dimensions on the indexed array. Do not instantiate BaseIndexer objects directly: instead, use one of the sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer. """ __slots__ = ("_key",) def __init__(self, key: tuple[Any, ...]): if type(self) is ExplicitIndexer: raise TypeError("cannot instantiate base ExplicitIndexer objects") self._key = tuple(key) @property def tuple(self) -> tuple[Any, ...]: return self._key def __repr__(self) -> str: return f"{type(self).__name__}({self.tuple})" @overload def as_integer_or_none(value: int) -> int: ... @overload def as_integer_or_none(value: None) -> None: ... def as_integer_or_none(value: int | None) -> int | None: return None if value is None else operator.index(value) def as_integer_slice(value: slice) -> slice: start = as_integer_or_none(value.start) stop = as_integer_or_none(value.stop) step = as_integer_or_none(value.step) return slice(start, stop, step) class IndexCallable: """Provide getitem and setitem syntax for callable objects.""" __slots__ = ("getter", "setter") def __init__( self, getter: Callable[..., Any], setter: Callable[..., Any] | None = None ): self.getter = getter self.setter = setter def __getitem__(self, key: Any) -> Any: return self.getter(key) def __setitem__(self, key: Any, value: Any) -> None: if self.setter is None: raise NotImplementedError( "Setting values is not supported for this indexer." ) self.setter(key, value) class BasicIndexer(ExplicitIndexer): """Tuple for basic indexing. All elements should be int or slice objects. Indexing follows NumPy's rules for basic indexing: each axis is independently sliced and axes indexed with an integer are dropped from the result. """ __slots__ = () def __init__(self, key: tuple[BasicIndexerType, ...]): if not isinstance(key, tuple): raise TypeError(f"key must be a tuple: {key!r}") new_key = [] for k in key: if isinstance(k, integer_types): k = int(k) elif isinstance(k, slice): k = as_integer_slice(k) else: raise TypeError( f"unexpected indexer type for {type(self).__name__}: {k!r}" ) new_key.append(k) super().__init__(tuple(new_key)) class OuterIndexer(ExplicitIndexer): """Tuple for outer/orthogonal indexing. All elements should be int, slice or 1-dimensional np.ndarray objects with an integer dtype. Indexing is applied independently along each axis, and axes indexed with an integer are dropped from the result. This type of indexing works like MATLAB/Fortran. """ __slots__ = () def __init__( self, key: tuple[BasicIndexerType | np.ndarray[Any, np.dtype[np.generic]], ...], ): if not isinstance(key, tuple): raise TypeError(f"key must be a tuple: {key!r}") new_key = [] for k in key: if isinstance(k, integer_types) and not isinstance(k, bool): k = int(k) elif isinstance(k, slice): k = as_integer_slice(k) elif is_duck_array(k): if not np.issubdtype(k.dtype, np.integer): raise TypeError( f"invalid indexer array, does not have integer dtype: {k!r}" ) if k.ndim > 1: # type: ignore[union-attr] raise TypeError( f"invalid indexer array for {type(self).__name__}; must be scalar " f"or have 1 dimension: {k!r}" ) k = duck_array_ops.astype(k, np.int64, copy=False) else: raise TypeError( f"unexpected indexer type for {type(self).__name__}: {k!r}, {type(k)}" ) new_key.append(k) super().__init__(tuple(new_key)) class VectorizedIndexer(ExplicitIndexer): """Tuple for vectorized indexing. All elements should be slice or N-dimensional np.ndarray objects with an integer dtype and the same number of dimensions. Indexing follows proposed rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules (including broadcasting) except sliced axes are always moved to the end: https://github.com/numpy/numpy/pull/6256 """ __slots__ = () def __init__(self, key: tuple[slice | np.ndarray[Any, np.dtype[np.generic]], ...]): if not isinstance(key, tuple): raise TypeError(f"key must be a tuple: {key!r}") new_key = [] ndim = None for k in key: if isinstance(k, slice): k = as_integer_slice(k) elif is_duck_array(k): if not np.issubdtype(k.dtype, np.integer): raise TypeError( f"invalid indexer array, does not have integer dtype: {k!r}" ) if ndim is None: ndim = k.ndim # type: ignore[union-attr] elif ndim != k.ndim: # type: ignore[union-attr] ndims = [k.ndim for k in key if isinstance(k, np.ndarray)] raise ValueError( "invalid indexer key: ndarray arguments " f"have different numbers of dimensions: {ndims}" ) k = duck_array_ops.astype(k, np.int64, copy=False) else: raise TypeError( f"unexpected indexer type for {type(self).__name__}: {k!r}" ) new_key.append(k) super().__init__(tuple(new_key)) class ExplicitlyIndexed: """Mixin to mark support for Indexer subclasses in indexing.""" __slots__ = () def __array__( self, dtype: DTypeLike | None = None, /, *, copy: bool | None = None ) -> np.ndarray: # Leave casting to an array up to the underlying array type. if Version(np.__version__) >= Version("2.0.0"): return np.asarray(self.get_duck_array(), dtype=dtype, copy=copy) else: return np.asarray(self.get_duck_array(), dtype=dtype) def get_duck_array(self): return self.array class ExplicitlyIndexedNDArrayMixin(NDArrayMixin, ExplicitlyIndexed): __slots__ = () def get_duck_array(self): raise NotImplementedError async def async_get_duck_array(self): raise NotImplementedError def _oindex_get(self, indexer: OuterIndexer): raise NotImplementedError( f"{self.__class__.__name__}._oindex_get method should be overridden" ) def _vindex_get(self, indexer: VectorizedIndexer): raise NotImplementedError( f"{self.__class__.__name__}._vindex_get method should be overridden" ) def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: raise NotImplementedError( f"{self.__class__.__name__}._oindex_set method should be overridden" ) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: raise NotImplementedError( f"{self.__class__.__name__}._vindex_set method should be overridden" ) def _check_and_raise_if_non_basic_indexer(self, indexer: ExplicitIndexer) -> None: if isinstance(indexer, VectorizedIndexer | OuterIndexer): raise TypeError( "Vectorized indexing with vectorized or outer indexers is not supported. " "Please use .vindex and .oindex properties to index the array." ) @property def oindex(self) -> IndexCallable: return IndexCallable(self._oindex_get, self._oindex_set) @property def vindex(self) -> IndexCallable: return IndexCallable(self._vindex_get, self._vindex_set) class IndexingAdapter(ExplicitlyIndexedNDArrayMixin): """Marker class for indexing adapters. These classes translate between Xarray's indexing semantics and the underlying array's indexing semantics. """ def get_duck_array(self): key = BasicIndexer((slice(None),) * self.ndim) return self[key] async def async_get_duck_array(self): """These classes are applied to in-memory arrays, so specific async support isn't needed.""" return self.get_duck_array() class ImplicitToExplicitIndexingAdapter(NDArrayMixin): """Wrap an array, converting tuples into the indicated explicit indexer.""" __slots__ = ("array", "indexer_cls") def __init__(self, array, indexer_cls: type[ExplicitIndexer] = BasicIndexer): self.array = as_indexable(array) self.indexer_cls = indexer_cls def __array__( self, dtype: DTypeLike | None = None, /, *, copy: bool | None = None ) -> np.ndarray: if Version(np.__version__) >= Version("2.0.0"): return np.asarray(self.get_duck_array(), dtype=dtype, copy=copy) else: return np.asarray(self.get_duck_array(), dtype=dtype) def get_duck_array(self): return self.array.get_duck_array() def __getitem__(self, key: Any): key = expanded_indexer(key, self.ndim) indexer = self.indexer_cls(key) result = apply_indexer(self.array, indexer) if isinstance(result, ExplicitlyIndexed): return type(self)(result, self.indexer_cls) else: # Sometimes explicitly indexed arrays return NumPy arrays or # scalars. return result class LazilyIndexedArray(ExplicitlyIndexedNDArrayMixin): """Wrap an array to make basic and outer indexing lazy.""" __slots__ = ("_shape", "array", "key") def __init__(self, array: Any, key: ExplicitIndexer | None = None): """ Parameters ---------- array : array_like Array like object to index. key : ExplicitIndexer, optional Array indexer. If provided, it is assumed to already be in canonical expanded form. """ if isinstance(array, type(self)) and key is None: # unwrap key = array.key # type: ignore[has-type, unused-ignore] array = array.array # type: ignore[has-type, unused-ignore] if key is None: key = BasicIndexer((slice(None),) * array.ndim) self.array = as_indexable(array) self.key = key shape: _Shape = () for size, k in zip(self.array.shape, self.key.tuple, strict=True): if isinstance(k, slice): shape += (len(range(*k.indices(size))),) elif isinstance(k, np.ndarray): shape += (k.size,) self._shape = shape def _updated_key(self, new_key: ExplicitIndexer) -> BasicIndexer | OuterIndexer: iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim)) full_key: list[OuterIndexerType] = [] for size, k in zip(self.array.shape, self.key.tuple, strict=True): if isinstance(k, integer_types): full_key.append(k) else: full_key.append(_index_indexer_1d(k, next(iter_new_key), size)) full_key_tuple = tuple(full_key) if all(isinstance(k, integer_types + (slice,)) for k in full_key_tuple): return BasicIndexer(cast(tuple[BasicIndexerType, ...], full_key_tuple)) return OuterIndexer(full_key_tuple) @property def shape(self) -> _Shape: return self._shape def get_duck_array(self): from xarray.backends.common import BackendArray if isinstance(self.array, BackendArray): array = self.array[self.key] else: array = apply_indexer(self.array, self.key) if isinstance(array, ExplicitlyIndexed): array = array.get_duck_array() return _wrap_numpy_scalars(array) async def async_get_duck_array(self): from xarray.backends.common import BackendArray if isinstance(self.array, BackendArray): array = await self.array.async_getitem(self.key) else: array = apply_indexer(self.array, self.key) if isinstance(array, ExplicitlyIndexed): array = await array.async_get_duck_array() return _wrap_numpy_scalars(array) def transpose(self, order): return LazilyVectorizedIndexedArray(self.array, self.key).transpose(order) def _oindex_get(self, indexer: OuterIndexer): return type(self)(self.array, self._updated_key(indexer)) def _vindex_get(self, indexer: VectorizedIndexer): array = LazilyVectorizedIndexedArray(self.array, self.key) return array.vindex[indexer] def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return type(self)(self.array, self._updated_key(indexer)) def _vindex_set(self, key: VectorizedIndexer, value: Any) -> None: raise NotImplementedError( "Lazy item assignment with the vectorized indexer is not yet " "implemented. Load your data first by .load() or compute()." ) def _oindex_set(self, key: OuterIndexer, value: Any) -> None: full_key = self._updated_key(key) self.array.oindex[full_key] = value def __setitem__(self, key: BasicIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(key) full_key = self._updated_key(key) self.array[full_key] = value def __repr__(self) -> str: return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})" # keep an alias to the old name for external backends pydata/xarray#5111 LazilyOuterIndexedArray = LazilyIndexedArray class LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin): """Wrap an array to make vectorized indexing lazy.""" __slots__ = ("array", "key") def __init__(self, array: duckarray[Any, Any], key: ExplicitIndexer): """ Parameters ---------- array : array_like Array like object to index. key : VectorizedIndexer """ if isinstance(key, BasicIndexer | OuterIndexer): self.key = _outer_to_vectorized_indexer(key, array.shape) elif isinstance(key, VectorizedIndexer): self.key = _arrayize_vectorized_indexer(key, array.shape) self.array = as_indexable(array) @property def shape(self) -> _Shape: return np.broadcast(*self.key.tuple).shape def get_duck_array(self): from xarray.backends.common import BackendArray if isinstance(self.array, BackendArray): array = self.array[self.key] else: array = apply_indexer(self.array, self.key) if isinstance(array, ExplicitlyIndexed): array = array.get_duck_array() return _wrap_numpy_scalars(array) async def async_get_duck_array(self): from xarray.backends.common import BackendArray if isinstance(self.array, BackendArray): array = await self.array.async_getitem(self.key) else: array = apply_indexer(self.array, self.key) if isinstance(array, ExplicitlyIndexed): array = await array.async_get_duck_array() return _wrap_numpy_scalars(array) def _updated_key(self, new_key: ExplicitIndexer): return _combine_indexers(self.key, self.shape, new_key) def _oindex_get(self, indexer: OuterIndexer): return type(self)(self.array, self._updated_key(indexer)) def _vindex_get(self, indexer: VectorizedIndexer): return type(self)(self.array, self._updated_key(indexer)) def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) # If the indexed array becomes a scalar, return LazilyIndexedArray if all(isinstance(ind, integer_types) for ind in indexer.tuple): key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple)) return LazilyIndexedArray(self.array, key) return type(self)(self.array, self._updated_key(indexer)) def transpose(self, order): key = VectorizedIndexer(tuple(k.transpose(order) for k in self.key.tuple)) return type(self)(self.array, key) def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: raise NotImplementedError( "Lazy item assignment with the vectorized indexer is not yet " "implemented. Load your data first by .load() or compute()." ) def __repr__(self) -> str: return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})" def _wrap_numpy_scalars(array): """Wrap NumPy scalars in 0d arrays.""" ndim = duck_array_ops.ndim(array) if ndim == 0 and ( isinstance(array, np.generic) or not (is_duck_array(array) or isinstance(array, NDArrayMixin)) ): return np.array(array) elif hasattr(array, "dtype"): return array elif ndim == 0: return np.array(array) else: return array class CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin): __slots__ = ("_copied", "array") def __init__(self, array: duckarray[Any, Any]): self.array = as_indexable(array) self._copied = False def _ensure_copied(self): if not self._copied: self.array = as_indexable(np.array(self.array)) self._copied = True def get_duck_array(self): return self.array.get_duck_array() async def async_get_duck_array(self): return await self.array.async_get_duck_array() def _oindex_get(self, indexer: OuterIndexer): return type(self)(_wrap_numpy_scalars(self.array.oindex[indexer])) def _vindex_get(self, indexer: VectorizedIndexer): return type(self)(_wrap_numpy_scalars(self.array.vindex[indexer])) def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return type(self)(_wrap_numpy_scalars(self.array[indexer])) def transpose(self, order): return self.array.transpose(order) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: self._ensure_copied() self.array.vindex[indexer] = value def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: self._ensure_copied() self.array.oindex[indexer] = value def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) self._ensure_copied() self.array[indexer] = value def __deepcopy__(self, memo): # CopyOnWriteArray is used to wrap backend array objects, which might # point to files on disk, so we can't rely on the default deepcopy # implementation. return type(self)(self.array) class MemoryCachedArray(ExplicitlyIndexedNDArrayMixin): __slots__ = ("array",) def __init__(self, array): self.array = _wrap_numpy_scalars(as_indexable(array)) def get_duck_array(self): duck_array = self.array.get_duck_array() # ensure the array object is cached in-memory self.array = as_indexable(duck_array) return duck_array async def async_get_duck_array(self): duck_array = await self.array.async_get_duck_array() # ensure the array object is cached in-memory self.array = as_indexable(duck_array) return duck_array def _oindex_get(self, indexer: OuterIndexer): return type(self)(_wrap_numpy_scalars(self.array.oindex[indexer])) def _vindex_get(self, indexer: VectorizedIndexer): return type(self)(_wrap_numpy_scalars(self.array.vindex[indexer])) def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return type(self)(_wrap_numpy_scalars(self.array[indexer])) def transpose(self, order): return self.array.transpose(order) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: self.array.vindex[indexer] = value def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: self.array.oindex[indexer] = value def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) self.array[indexer] = value def as_indexable(array): """ This function always returns an ExplicitlyIndexed subclass, so that the vectorized indexing is always possible with the returned object. """ if isinstance(array, ExplicitlyIndexed): return array if isinstance(array, np.ndarray): return NumpyIndexingAdapter(array) if isinstance(array, pd.Index): return PandasIndexingAdapter(array) if is_duck_dask_array(array): return DaskIndexingAdapter(array) if hasattr(array, "__array_namespace__"): return ArrayApiIndexingAdapter(array) if hasattr(array, "__array_function__"): return NdArrayLikeIndexingAdapter(array) raise TypeError(f"Invalid array type: {type(array)}") def _outer_to_vectorized_indexer( indexer: BasicIndexer | OuterIndexer, shape: _Shape ) -> VectorizedIndexer: """Convert an OuterIndexer into an vectorized indexer. Parameters ---------- indexer : Outer/Basic Indexer An indexer to convert. shape : tuple Shape of the array subject to the indexing. Returns ------- VectorizedIndexer Tuple suitable for use to index a NumPy array with vectorized indexing. Each element is an array: broadcasting them together gives the shape of the result. """ key = indexer.tuple n_dim = len([k for k in key if not isinstance(k, integer_types)]) i_dim = 0 new_key = [] for k, size in zip(key, shape, strict=True): if isinstance(k, integer_types): new_key.append(np.array(k).reshape((1,) * n_dim)) else: # np.ndarray or slice if isinstance(k, slice): k = np.arange(*k.indices(size)) assert k.dtype.kind in {"i", "u"} new_shape = [(1,) * i_dim + (k.size,) + (1,) * (n_dim - i_dim - 1)] new_key.append(k.reshape(*new_shape)) i_dim += 1 return VectorizedIndexer(tuple(new_key)) def _outer_to_numpy_indexer(indexer: BasicIndexer | OuterIndexer, shape: _Shape): """Convert an OuterIndexer into an indexer for NumPy. Parameters ---------- indexer : Basic/OuterIndexer An indexer to convert. shape : tuple Shape of the array subject to the indexing. Returns ------- tuple Tuple suitable for use to index a NumPy array. """ if len([k for k in indexer.tuple if not isinstance(k, slice)]) <= 1: # If there is only one vector and all others are slice, # it can be safely used in mixed basic/advanced indexing. # Boolean index should already be converted to integer array. return indexer.tuple else: return _outer_to_vectorized_indexer(indexer, shape).tuple def _combine_indexers(old_key, shape: _Shape, new_key) -> VectorizedIndexer: """Combine two indexers. Parameters ---------- old_key : ExplicitIndexer The first indexer for the original array shape : tuple of ints Shape of the original array to be indexed by old_key new_key The second indexer for indexing original[old_key] """ if not isinstance(old_key, VectorizedIndexer): old_key = _outer_to_vectorized_indexer(old_key, shape) if len(old_key.tuple) == 0: return new_key new_shape = np.broadcast(*old_key.tuple).shape if isinstance(new_key, VectorizedIndexer): new_key = _arrayize_vectorized_indexer(new_key, new_shape) else: new_key = _outer_to_vectorized_indexer(new_key, new_shape) return VectorizedIndexer( tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple)) ) @enum.unique class IndexingSupport(enum.Enum): # for backends that support only basic indexer BASIC = 0 # for backends that support basic / outer indexer OUTER = 1 # for backends that support outer indexer including at most 1 vector. OUTER_1VECTOR = 2 # for backends that support full vectorized indexer. VECTORIZED = 3 def explicit_indexing_adapter( key: ExplicitIndexer, shape: _Shape, indexing_support: IndexingSupport, raw_indexing_method: Callable[..., Any], ) -> Any: """Support explicit indexing by delegating to a raw indexing method. Outer and/or vectorized indexers are supported by indexing a second time with a NumPy array. Parameters ---------- key : ExplicitIndexer Explicit indexing object. shape : Tuple[int, ...] Shape of the indexed array. indexing_support : IndexingSupport enum Form of indexing supported by raw_indexing_method. raw_indexing_method : callable Function (like ndarray.__getitem__) that when called with indexing key in the form of a tuple returns an indexed array. Returns ------- Indexing result, in the form of a duck numpy-array. """ raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support) result = raw_indexing_method(raw_key.tuple) if numpy_indices.tuple: # index the loaded duck array indexable = as_indexable(result) result = apply_indexer(indexable, numpy_indices) return result async def async_explicit_indexing_adapter( key: ExplicitIndexer, shape: _Shape, indexing_support: IndexingSupport, raw_indexing_method: Callable[..., Any], ) -> Any: raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support) result = await raw_indexing_method(raw_key.tuple) if numpy_indices.tuple: # index the loaded duck array indexable = as_indexable(result) result = apply_indexer(indexable, numpy_indices) return result def apply_indexer(indexable, indexer: ExplicitIndexer): """Apply an indexer to an indexable object.""" if isinstance(indexer, VectorizedIndexer): return indexable.vindex[indexer] elif isinstance(indexer, OuterIndexer): return indexable.oindex[indexer] else: return indexable[indexer] def set_with_indexer(indexable, indexer: ExplicitIndexer, value: Any) -> None: """Set values in an indexable object using an indexer.""" if isinstance(indexer, VectorizedIndexer): indexable.vindex[indexer] = value elif isinstance(indexer, OuterIndexer): indexable.oindex[indexer] = value else: indexable[indexer] = value def decompose_indexer( indexer: ExplicitIndexer, shape: _Shape, indexing_support: IndexingSupport ) -> tuple[ExplicitIndexer, ExplicitIndexer]: if isinstance(indexer, VectorizedIndexer): return _decompose_vectorized_indexer(indexer, shape, indexing_support) if isinstance(indexer, BasicIndexer | OuterIndexer): return _decompose_outer_indexer(indexer, shape, indexing_support) raise TypeError(f"unexpected key type: {indexer}") def _decompose_slice(key: slice, size: int) -> tuple[slice, slice]: """convert a slice to successive two slices. The first slice always has a positive step. >>> _decompose_slice(slice(2, 98, 2), 99) (slice(2, 98, 2), slice(None, None, None)) >>> _decompose_slice(slice(98, 2, -2), 99) (slice(4, 99, 2), slice(None, None, -1)) >>> _decompose_slice(slice(98, 2, -2), 98) (slice(3, 98, 2), slice(None, None, -1)) >>> _decompose_slice(slice(360, None, -10), 361) (slice(0, 361, 10), slice(None, None, -1)) """ start, stop, step = key.indices(size) if step > 0: # If key already has a positive step, use it as is in the backend return key, slice(None) else: # determine stop precisely for step > 1 case # Use the range object to do the calculation # e.g. [98:2:-2] -> [98:3:-2] exact_stop = range(start, stop, step)[-1] return slice(exact_stop, start + 1, -step), slice(None, None, -1) def _decompose_vectorized_indexer( indexer: VectorizedIndexer, shape: _Shape, indexing_support: IndexingSupport, ) -> tuple[ExplicitIndexer, ExplicitIndexer]: """ Decompose vectorized indexer to the successive two indexers, where the first indexer will be used to index backend arrays, while the second one is used to index loaded on-memory np.ndarray. Parameters ---------- indexer : VectorizedIndexer indexing_support : one of IndexerSupport entries Returns ------- backend_indexer: OuterIndexer or BasicIndexer np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer) Notes ----- This function is used to realize the vectorized indexing for the backend arrays that only support basic or outer indexing. As an example, let us consider to index a few elements from a backend array with a vectorized indexer ([0, 3, 1], [2, 3, 2]). Even if the backend array only supports outer indexing, it is more efficient to load a subslice of the array than loading the entire array, >>> array = np.arange(36).reshape(6, 6) >>> backend_indexer = OuterIndexer((np.array([0, 1, 3]), np.array([2, 3]))) >>> # load subslice of the array ... array = NumpyIndexingAdapter(array).oindex[backend_indexer] >>> np_indexer = VectorizedIndexer((np.array([0, 2, 1]), np.array([0, 1, 0]))) >>> # vectorized indexing for on-memory np.ndarray. ... NumpyIndexingAdapter(array).vindex[np_indexer] array([ 2, 21, 8]) """ assert isinstance(indexer, VectorizedIndexer) if indexing_support is IndexingSupport.VECTORIZED: return indexer, BasicIndexer(()) backend_indexer_elems: list[slice | np.ndarray[Any, np.dtype[np.generic]]] = [] np_indexer_elems: list[slice | np.ndarray[Any, np.dtype[np.generic]]] = [] # convert negative indices indexer_elems = [ np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k for k, s in zip(indexer.tuple, shape, strict=True) ] for k, s in zip(indexer_elems, shape, strict=True): if isinstance(k, slice): # If it is a slice, then we will slice it as-is # (but make its step positive) in the backend, # and then use all of it (slice(None)) for the in-memory portion. bk_slice, np_slice = _decompose_slice(k, s) backend_indexer_elems.append(bk_slice) np_indexer_elems.append(np_slice) else: # If it is a (multidimensional) np.ndarray, just pickup the used # keys without duplication and store them as a 1d-np.ndarray. oind, vind = np.unique(k, return_inverse=True) backend_indexer_elems.append(oind) np_indexer_elems.append(vind.reshape(*k.shape)) backend_indexer = OuterIndexer(tuple(backend_indexer_elems)) np_indexer = VectorizedIndexer(tuple(np_indexer_elems)) if indexing_support is IndexingSupport.OUTER: return backend_indexer, np_indexer # If the backend does not support outer indexing, # backend_indexer (OuterIndexer) is also decomposed. backend_indexer1, np_indexer1 = _decompose_outer_indexer( backend_indexer, shape, indexing_support ) np_indexer = _combine_indexers(np_indexer1, shape, np_indexer) return backend_indexer1, np_indexer def _decompose_outer_indexer( indexer: BasicIndexer | OuterIndexer, shape: _Shape, indexing_support: IndexingSupport, ) -> tuple[ExplicitIndexer, ExplicitIndexer]: """ Decompose outer indexer to the successive two indexers, where the first indexer will be used to index backend arrays, while the second one is used to index the loaded on-memory np.ndarray. Parameters ---------- indexer : OuterIndexer or BasicIndexer indexing_support : One of the entries of IndexingSupport Returns ------- backend_indexer: OuterIndexer or BasicIndexer np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer) Notes ----- This function is used to realize the vectorized indexing for the backend arrays that only support basic or outer indexing. As an example, let us consider to index a few elements from a backend array with an orthogonal indexer ([0, 3, 1], [2, 3, 2]). Even if the backend array only supports basic indexing, it is more efficient to load a subslice of the array than loading the entire array, >>> array = np.arange(36).reshape(6, 6) >>> backend_indexer = BasicIndexer((slice(0, 3), slice(2, 4))) >>> # load subslice of the array ... array = NumpyIndexingAdapter(array)[backend_indexer] >>> np_indexer = OuterIndexer((np.array([0, 2, 1]), np.array([0, 1, 0]))) >>> # outer indexing for on-memory np.ndarray. ... NumpyIndexingAdapter(array).oindex[np_indexer] array([[ 2, 3, 2], [14, 15, 14], [ 8, 9, 8]]) """ backend_indexer: list[Any] = [] np_indexer: list[Any] = [] assert isinstance(indexer, OuterIndexer | BasicIndexer) if indexing_support == IndexingSupport.VECTORIZED: for k, s in zip(indexer.tuple, shape, strict=False): if isinstance(k, slice): # If it is a slice, then we will slice it as-is # (but make its step positive) in the backend, bk_slice, np_slice = _decompose_slice(k, s) backend_indexer.append(bk_slice) np_indexer.append(np_slice) else: backend_indexer.append(k) if not is_scalar(k): np_indexer.append(slice(None)) return type(indexer)(tuple(backend_indexer)), BasicIndexer(tuple(np_indexer)) # make indexer positive pos_indexer: list[np.ndarray | int | np.number] = [] for k, s in zip(indexer.tuple, shape, strict=False): if isinstance(k, np.ndarray): pos_indexer.append(np.where(k < 0, k + s, k)) elif isinstance(k, integer_types) and k < 0: pos_indexer.append(k + s) else: pos_indexer.append(k) indexer_elems = pos_indexer if indexing_support is IndexingSupport.OUTER_1VECTOR: # some backends such as h5py supports only 1 vector in indexers # We choose the most efficient axis gains = [ ( (np.max(k) - np.min(k) + 1.0) / len(np.unique(k)) if isinstance(k, np.ndarray) and k.size != 0 else 0 ) for k in indexer_elems ] array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None for i, (k, s) in enumerate(zip(indexer_elems, shape, strict=False)): if isinstance(k, np.ndarray) and k.size == 0: # empty np.ndarray key is converted to empty slice # see https://github.com/pydata/xarray/issues/10867 backend_indexer.append(slice(0, 0)) elif isinstance(k, np.ndarray) and i != array_index: # np.ndarray key is converted to slice that covers the entire # entries of this key. backend_indexer.append(slice(np.min(k), np.max(k) + 1)) np_indexer.append(k - np.min(k)) elif isinstance(k, np.ndarray): # Remove duplicates and sort them in the increasing order pkey, ekey = np.unique(k, return_inverse=True) backend_indexer.append(pkey) np_indexer.append(ekey) elif isinstance(k, integer_types): backend_indexer.append(k) else: # slice: convert positive step slice for backend bk_slice, np_slice = _decompose_slice(cast(slice, k), s) backend_indexer.append(bk_slice) np_indexer.append(np_slice) return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer))) if indexing_support == IndexingSupport.OUTER: for k, s in zip(indexer_elems, shape, strict=False): if isinstance(k, slice): # slice: convert positive step slice for backend bk_slice, np_slice = _decompose_slice(k, s) backend_indexer.append(bk_slice) np_indexer.append(np_slice) elif isinstance(k, integer_types): backend_indexer.append(k) elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all(): backend_indexer.append(k) np_indexer.append(slice(None)) else: # Remove duplicates and sort them in the increasing order oind, vind = np.unique(k, return_inverse=True) backend_indexer.append(oind) np_indexer.append(vind.reshape(*k.shape)) return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer))) # basic indexer assert indexing_support == IndexingSupport.BASIC for k, s in zip(indexer_elems, shape, strict=False): if isinstance(k, np.ndarray): # np.ndarray key is converted to slice that covers the entire # entries of this key. backend_indexer.append(slice(np.min(k), np.max(k) + 1)) np_indexer.append(k - np.min(k)) elif isinstance(k, integer_types): backend_indexer.append(k) else: # slice: convert positive step slice for backend bk_slice, np_slice = _decompose_slice(cast(slice, k), s) backend_indexer.append(bk_slice) np_indexer.append(np_slice) return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer))) def _posify_indices(indices: Any, size: int) -> np.ndarray: """Convert negative indices by their equivalent positive indices. Note: the resulting indices may still be out of bounds (< 0 or >= size). """ return np.where(indices < 0, size + indices, indices) def _check_bounds(indices: Any, size: int): """Check if the given indices are all within the array boundaries.""" if np.any((indices < 0) | (indices >= size)): raise IndexError("out of bounds index") def _arrayize_outer_indexer(indexer: OuterIndexer, shape) -> OuterIndexer: """Return a similar oindex with after replacing slices by arrays and negative indices by their corresponding positive indices. Also check if array indices are within bounds. """ new_key = [] for axis, value in enumerate(indexer.tuple): size = shape[axis] if isinstance(value, slice): value = _expand_slice(value, size) else: value = _posify_indices(value, size) _check_bounds(value, size) new_key.append(value) return OuterIndexer(tuple(new_key)) def _arrayize_vectorized_indexer( indexer: VectorizedIndexer, shape: _Shape ) -> VectorizedIndexer: """Return an identical vindex but slices are replaced by arrays""" slices = [v for v in indexer.tuple if isinstance(v, slice)] if len(slices) == 0: return indexer arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)] n_dim = arrays[0].ndim if len(arrays) > 0 else 0 i_dim = 0 new_key = [] for v, size in zip(indexer.tuple, shape, strict=True): if isinstance(v, np.ndarray): new_key.append(np.reshape(v, v.shape + (1,) * len(slices))) else: # slice shape = (1,) * (n_dim + i_dim) + (-1,) + (1,) * (len(slices) - i_dim - 1) new_key.append(np.arange(*v.indices(size)).reshape(shape)) i_dim += 1 return VectorizedIndexer(tuple(new_key)) def _chunked_array_with_chunks_hint( array, chunks, chunkmanager: ChunkManagerEntrypoint[Any] ): """Create a chunked array using the chunks hint for dimensions of size > 1.""" if len(chunks) < array.ndim: raise ValueError("not enough chunks in hint") new_chunks = [] for chunk, size in zip(chunks, array.shape, strict=False): new_chunks.append(chunk if size > 1 else (1,)) return chunkmanager.from_array(array, new_chunks) # type: ignore[arg-type] def _logical_any(args): return functools.reduce(operator.or_, args) def _masked_result_drop_slice(key, data: duckarray[Any, Any] | None = None): key = (k for k in key if not isinstance(k, slice)) chunks_hint = getattr(data, "chunks", None) new_keys = [] for k in key: if isinstance(k, np.ndarray): if is_chunked_array(data): # type: ignore[arg-type] chunkmanager = get_chunked_array_type(data) new_keys.append( _chunked_array_with_chunks_hint(k, chunks_hint, chunkmanager) ) elif isinstance(data, array_type("sparse")): import sparse new_keys.append(sparse.COO.from_numpy(k)) else: new_keys.append(k) else: new_keys.append(k) mask = _logical_any(k == -1 for k in new_keys) return mask def create_mask( indexer: ExplicitIndexer, shape: _Shape, data: duckarray[Any, Any] | None = None ): """Create a mask for indexing with a fill-value. Parameters ---------- indexer : ExplicitIndexer Indexer with -1 in integer or ndarray value to indicate locations in the result that should be masked. shape : tuple Shape of the array being indexed. data : optional Data for which mask is being created. If data is a dask arrays, its chunks are used as a hint for chunks on the resulting mask. If data is a sparse array, the returned mask is also a sparse array. Returns ------- mask : bool, np.ndarray, SparseArray or dask.array.Array with dtype=bool Same type as data. Has the same shape as the indexing result. """ if isinstance(indexer, OuterIndexer): key = _outer_to_vectorized_indexer(indexer, shape).tuple assert not any(isinstance(k, slice) for k in key) mask = _masked_result_drop_slice(key, data) elif isinstance(indexer, VectorizedIndexer): key = indexer.tuple base_mask = _masked_result_drop_slice(key, data) slice_shape = tuple( np.arange(*k.indices(size)).size for k, size in zip(key, shape, strict=False) if isinstance(k, slice) ) expanded_mask = base_mask[(Ellipsis,) + (np.newaxis,) * len(slice_shape)] mask = duck_array_ops.broadcast_to(expanded_mask, base_mask.shape + slice_shape) elif isinstance(indexer, BasicIndexer): mask = any(k == -1 for k in indexer.tuple) else: raise TypeError(f"unexpected key type: {type(indexer)}") return mask def _posify_mask_subindexer( index: np.ndarray[Any, np.dtype[np.generic]], ) -> np.ndarray[Any, np.dtype[np.generic]]: """Convert masked indices in a flat array to the nearest unmasked index. Parameters ---------- index : np.ndarray One dimensional ndarray with dtype=int. Returns ------- np.ndarray One dimensional ndarray with all values equal to -1 replaced by an adjacent non-masked element. """ masked = index == -1 unmasked_locs = np.flatnonzero(~masked) if not unmasked_locs.size: # indexing unmasked_locs is invalid return np.zeros_like(index) masked_locs = np.flatnonzero(masked) prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1) new_index = index.copy() new_index[masked_locs] = index[unmasked_locs[prev_value]] return new_index def posify_mask_indexer(indexer: ExplicitIndexer) -> ExplicitIndexer: """Convert masked values (-1) in an indexer to nearest unmasked values. This routine is useful for dask, where it can be much faster to index adjacent points than arbitrary points from the end of an array. Parameters ---------- indexer : ExplicitIndexer Input indexer. Returns ------- ExplicitIndexer Same type of input, with all values in ndarray keys equal to -1 replaced by an adjacent non-masked element. """ key = tuple( ( _posify_mask_subindexer(k.ravel()).reshape(k.shape) if isinstance(k, np.ndarray) else k ) for k in indexer.tuple ) return type(indexer)(key) def is_fancy_indexer(indexer: Any) -> bool: """Return False if indexer is an int, slice, a 1-dimensional list, or a 0 or 1-dimensional ndarray; in all other cases return True """ if isinstance(indexer, int | slice) and not isinstance(indexer, bool): return False if isinstance(indexer, np.ndarray): return indexer.ndim > 1 if isinstance(indexer, list): return bool(indexer) and not isinstance(indexer[0], int) return True class NumpyIndexingAdapter(IndexingAdapter): """Wrap a NumPy array to use explicit indexing.""" __slots__ = ("array",) def __init__(self, array): # In NumpyIndexingAdapter we only allow to store bare np.ndarray if not isinstance(array, np.ndarray): raise TypeError( "NumpyIndexingAdapter only wraps np.ndarray. " f"Trying to wrap {type(array)}" ) self.array = array def transpose(self, order): return self.array.transpose(order) def _oindex_get(self, indexer: OuterIndexer): key = _outer_to_numpy_indexer(indexer, self.array.shape) return self.array[key] def _vindex_get(self, indexer: VectorizedIndexer): _assert_not_chunked_indexer(indexer.tuple) array = NumpyVIndexAdapter(self.array) return array[indexer.tuple] def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) array = self.array # We want 0d slices rather than scalars. This is achieved by # appending an ellipsis (see # https://numpy.org/doc/stable/reference/arrays.indexing.html#detailed-notes). key = indexer.tuple + (Ellipsis,) return array[key] def _safe_setitem(self, array, key: tuple[Any, ...], value: Any) -> None: try: array[key] = value except ValueError as exc: # More informative exception if read-only view if not array.flags.writeable and not array.flags.owndata: raise ValueError( "Assignment destination is a view. " "Do you want to .copy() array first?" ) from exc else: raise exc def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: key = _outer_to_numpy_indexer(indexer, self.array.shape) self._safe_setitem(self.array, key, value) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: array = NumpyVIndexAdapter(self.array) self._safe_setitem(array, indexer.tuple, value) def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) array = self.array # We want 0d slices rather than scalars. This is achieved by # appending an ellipsis (see # https://numpy.org/doc/stable/reference/arrays.indexing.html#detailed-notes). key = indexer.tuple + (Ellipsis,) self._safe_setitem(array, key, value) class NdArrayLikeIndexingAdapter(NumpyIndexingAdapter): __slots__ = ("array",) def __init__(self, array): if not hasattr(array, "__array_function__"): raise TypeError( "NdArrayLikeIndexingAdapter must wrap an object that " "implements the __array_function__ protocol" ) self.array = array class ArrayApiIndexingAdapter(IndexingAdapter): """Wrap an array API array to use explicit indexing.""" __slots__ = ("array",) def __init__(self, array): if not hasattr(array, "__array_namespace__"): raise TypeError( "ArrayApiIndexingAdapter must wrap an object that " "implements the __array_namespace__ protocol" ) self.array = array def _oindex_get(self, indexer: OuterIndexer): # manual orthogonal indexing (implemented like DaskIndexingAdapter) key = indexer.tuple value = self.array for axis, subkey in reversed(list(enumerate(key))): value = value[(slice(None),) * axis + (subkey, Ellipsis)] return value def _vindex_get(self, indexer: VectorizedIndexer): raise TypeError("Vectorized indexing is not supported") def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return self.array[indexer.tuple] def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: self.array[indexer.tuple] = value def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: raise TypeError("Vectorized indexing is not supported") def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) self.array[indexer.tuple] = value def transpose(self, order): xp = self.array.__array_namespace__() return xp.permute_dims(self.array, order) def _apply_vectorized_indexer_dask_wrapper(indices, coord): from xarray.core.indexing import VectorizedIndexer, apply_indexer, as_indexable return apply_indexer( as_indexable(coord), VectorizedIndexer((indices.squeeze(axis=-1),)) ) def _assert_not_chunked_indexer(idxr: tuple[Any, ...]) -> None: if any(is_chunked_array(i) for i in idxr): raise ValueError( "Cannot index with a chunked array indexer. " "Please chunk the array you are indexing first, " "and drop any indexed dimension coordinate variables. " "Alternatively, call `.compute()` on any chunked arrays in the indexer." ) class DaskIndexingAdapter(IndexingAdapter): """Wrap a dask array to support explicit indexing.""" __slots__ = ("array",) def __init__(self, array): """This adapter is created in Variable.__getitem__ in Variable._broadcast_indexes. """ self.array = array def _oindex_get(self, indexer: OuterIndexer): key = indexer.tuple try: return self.array[key] except NotImplementedError: # manual orthogonal indexing value = self.array for axis, subkey in reversed(list(enumerate(key))): value = value[(slice(None),) * axis + (subkey,)] return value def _vindex_get(self, indexer: VectorizedIndexer): try: return self.array.vindex[indexer.tuple] except IndexError as e: # TODO: upstream to dask has_dask = any(is_duck_dask_array(i) for i in indexer.tuple) # this only works for "small" 1d coordinate arrays with one chunk # it is intended for idxmin, idxmax, and allows indexing with # the nD array output of argmin, argmax if ( not has_dask or len(indexer.tuple) > 1 or math.prod(self.array.numblocks) > 1 or self.array.ndim > 1 ): raise e (idxr,) = indexer.tuple if idxr.ndim == 0: return self.array[idxr.data] else: import dask.array return dask.array.map_blocks( _apply_vectorized_indexer_dask_wrapper, idxr[..., np.newaxis], self.array, chunks=idxr.chunks, drop_axis=-1, dtype=self.array.dtype, ) def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return self.array[indexer.tuple] def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: num_non_slices = sum(0 if isinstance(k, slice) else 1 for k in indexer.tuple) if num_non_slices > 1: raise NotImplementedError( "xarray can't set arrays with multiple array indices to dask yet." ) self.array[indexer.tuple] = value def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: self.array.vindex[indexer.tuple] = value def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) self.array[indexer.tuple] = value def transpose(self, order): return self.array.transpose(order) class PandasIndexingAdapter(IndexingAdapter): """Wrap a pandas.Index to preserve dtypes and handle explicit indexing.""" __slots__ = ("_dtype", "array") array: pd.Index _dtype: np.dtype | pd.api.extensions.ExtensionDtype def __init__( self, array: pd.Index, dtype: DTypeLike | pd.api.extensions.ExtensionDtype | None = None, ): from xarray.core.indexes import safe_cast_to_index self.array = safe_cast_to_index(array) if dtype is None: if is_allowed_extension_array(array): cast(pd.api.extensions.ExtensionDtype, array.dtype) self._dtype = array.dtype else: self._dtype = get_valid_numpy_dtype(array) elif is_allowed_extension_array_dtype(dtype): self._dtype = cast(pd.api.extensions.ExtensionDtype, dtype) else: self._dtype = np.dtype(cast(DTypeLike, dtype)) @property def _in_memory(self) -> bool: # prevent costly conversion of a memory-saving pd.RangeIndex into a # large numpy array. return not isinstance(self.array, pd.RangeIndex) @property def dtype(self) -> np.dtype | pd.api.extensions.ExtensionDtype: # type: ignore[override] return self._dtype def _get_numpy_dtype(self, dtype: np.typing.DTypeLike | None = None) -> np.dtype: if dtype is None: if is_valid_numpy_dtype(self.dtype): return cast(np.dtype, self.dtype) else: return get_valid_numpy_dtype(self.array) else: return np.dtype(dtype) def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None, ) -> np.ndarray: dtype = self._get_numpy_dtype(dtype) array = self.array if isinstance(array, pd.PeriodIndex): with suppress(AttributeError): # this might not be public API array = array.astype("object") if Version(np.__version__) >= Version("2.0.0"): return np.asarray(array.values, dtype=dtype, copy=copy) else: return np.asarray(array.values, dtype=dtype) def get_duck_array(self) -> np.ndarray | PandasExtensionArray: # We return an PandasExtensionArray wrapper type that satisfies # duck array protocols. # `NumpyExtensionArray` is excluded if is_allowed_extension_array(self.array): from xarray.core.extension_array import PandasExtensionArray return PandasExtensionArray(self.array.array) return np.asarray(self) @property def shape(self) -> _Shape: return (len(self.array),) def _convert_scalar(self, item) -> np.ndarray: if item is pd.NaT: # work around the impossibility of casting NaT with asarray # note: it probably would be better in general to return # pd.Timestamp rather np.than datetime64 but this is easier # (for now) item = np.datetime64("NaT", "ns") elif isinstance(item, pd.Timedelta): item = item.to_numpy() elif isinstance(item, timedelta): item = np.timedelta64(item) elif isinstance(item, pd.Timestamp): # Work around for GH: pydata/xarray#1932 and numpy/numpy#10668 # numpy fails to convert pd.Timestamp to np.datetime64[ns] item = np.asarray(item.to_datetime64()) elif self.dtype != object: dtype = self._get_numpy_dtype() item = np.asarray(item, dtype=dtype) # as for numpy.ndarray indexing, we always want the result to be # a NumPy array. return to_0d_array(item) def _index_get( self, indexer: ExplicitIndexer, func_name: str ) -> PandasIndexingAdapter | np.ndarray: key = indexer.tuple if len(key) == 1: # unpack key so it can index a pandas.Index object (pandas.Index # objects don't like tuples) (key,) = key # if multidimensional key, convert the index to numpy array and index the latter if getattr(key, "ndim", 0) > 1: indexable = NumpyIndexingAdapter(np.asarray(self)) return getattr(indexable, func_name)(indexer) # otherwise index the pandas index then re-wrap or convert the result result = self.array[key] if isinstance(result, pd.Index): return type(self)(result, dtype=self.dtype) else: return self._convert_scalar(result) def _oindex_get(self, indexer: OuterIndexer) -> PandasIndexingAdapter | np.ndarray: return self._index_get(indexer, "_oindex_get") def _vindex_get( self, indexer: VectorizedIndexer ) -> PandasIndexingAdapter | np.ndarray: _assert_not_chunked_indexer(indexer.tuple) return self._index_get(indexer, "_vindex_get") def __getitem__( self, indexer: ExplicitIndexer ) -> PandasIndexingAdapter | np.ndarray: return self._index_get(indexer, "__getitem__") def transpose(self, order) -> pd.Index: return self.array # self.array should be always one-dimensional def _repr_inline_(self, max_width: int) -> str: # we want to display values in the inline repr for lazy coordinates too # (pd.RangeIndex and pd.MultiIndex). `format_array_flat` prevents loading # the whole array in memory. from xarray.core.formatting import format_array_flat return format_array_flat(self, max_width) def __repr__(self) -> str: return f"{type(self).__name__}(array={self.array!r}, dtype={self.dtype!r})" def copy(self, deep: bool = True) -> Self: # Not the same as just writing `self.array.copy(deep=deep)`, as # shallow copies of the underlying numpy.ndarrays become deep ones # upon pickling # >>> len(pickle.dumps((self.array, self.array))) # 4000281 # >>> len(pickle.dumps((self.array, self.array.copy(deep=False)))) # 8000341 array = self.array.copy(deep=True) if deep else self.array return type(self)(array, self._dtype) @property def nbytes(self) -> int: if is_allowed_extension_array(self.array): return self.array.nbytes dtype = self._get_numpy_dtype() return dtype.itemsize * len(self.array) class PandasMultiIndexingAdapter(PandasIndexingAdapter): """Handles explicit indexing for a pandas.MultiIndex. This allows creating one instance for each multi-index level while preserving indexing efficiency (memoized + might reuse another instance with the same multi-index). """ __slots__ = ("_dtype", "adapter", "array", "level") array: pd.MultiIndex _dtype: np.dtype | pd.api.extensions.ExtensionDtype level: str | None def __init__( self, array: pd.MultiIndex, dtype: DTypeLike | pd.api.extensions.ExtensionDtype | None = None, level: str | None = None, ): super().__init__(array, dtype) self.level = level def __array__( self, dtype: DTypeLike | None = None, /, *, copy: bool | None = None, ) -> np.ndarray: dtype = self._get_numpy_dtype(dtype) if self.level is not None: return np.asarray( self.array.get_level_values(self.level).values, dtype=dtype ) else: return super().__array__(dtype, copy=copy) @property def _in_memory(self) -> bool: # The pd.MultiIndex's data is fully in memory, but it has a different # layout than the level and dimension coordinate arrays. Marking this # adapter class as a "lazy" array will prevent costly conversion when, # e.g., formatting the Xarray reprs. return False def _convert_scalar(self, item: Any): if isinstance(item, tuple) and self.level is not None: idx = tuple(self.array.names).index(self.level) item = item[idx] return super()._convert_scalar(item) def _index_get( self, indexer: ExplicitIndexer, func_name: str ) -> PandasIndexingAdapter | np.ndarray: result = super()._index_get(indexer, func_name) if isinstance(result, type(self)): result.level = self.level return result def __repr__(self) -> str: if self.level is None: return super().__repr__() else: props = ( f"(array={self.array!r}, level={self.level!r}, dtype={self.dtype!r})" ) return f"{type(self).__name__}{props}" def _repr_inline_(self, max_width: int) -> str: if self.level is None: return "MultiIndex" else: return super()._repr_inline_(max_width=max_width) def copy(self, deep: bool = True) -> Self: # see PandasIndexingAdapter.copy array = self.array.copy(deep=True) if deep else self.array return type(self)(array, self._dtype, self.level) class CoordinateTransformIndexingAdapter(IndexingAdapter): """Wrap a CoordinateTransform as a lazy coordinate array. Supports explicit indexing (both outer and vectorized). """ _transform: CoordinateTransform _coord_name: Hashable _dims: tuple[str, ...] def __init__( self, transform: CoordinateTransform, coord_name: Hashable, dims: tuple[str, ...] | None = None, ): self._transform = transform self._coord_name = coord_name self._dims = dims or transform.dims @property def dtype(self) -> np.dtype: return self._transform.dtype @property def shape(self) -> tuple[int, ...]: return tuple(self._transform.dim_size.values()) @property def _in_memory(self) -> bool: return False def get_duck_array(self) -> np.ndarray: all_coords = self._transform.generate_coords(dims=self._dims) return np.asarray(all_coords[self._coord_name]) def _oindex_get(self, indexer: OuterIndexer): expanded_indexer_ = OuterIndexer(expanded_indexer(indexer.tuple, self.ndim)) array_indexer = _arrayize_outer_indexer(expanded_indexer_, self.shape) positions = np.meshgrid(*array_indexer.tuple, indexing="ij") dim_positions = dict(zip(self._dims, positions, strict=False)) result = self._transform.forward(dim_positions) return np.asarray(result[self._coord_name]).squeeze() def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: raise TypeError( "setting values is not supported on coordinate transform arrays." ) def _vindex_get(self, indexer: VectorizedIndexer): expanded_indexer_ = VectorizedIndexer( expanded_indexer(indexer.tuple, self.ndim) ) array_indexer = _arrayize_vectorized_indexer(expanded_indexer_, self.shape) dim_positions = {} for i, (dim, pos) in enumerate( zip(self._dims, array_indexer.tuple, strict=False) ): pos = _posify_indices(pos, self.shape[i]) _check_bounds(pos, self.shape[i]) dim_positions[dim] = pos result = self._transform.forward(dim_positions) return np.asarray(result[self._coord_name]) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: raise TypeError( "setting values is not supported on coordinate transform arrays." ) def __getitem__(self, indexer: ExplicitIndexer): # TODO: make it lazy (i.e., re-calculate and re-wrap the transform) when possible? self._check_and_raise_if_non_basic_indexer(indexer) # also works with basic indexing return self._oindex_get(OuterIndexer(indexer.tuple)) def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: raise TypeError( "setting values is not supported on coordinate transform arrays." ) def transpose(self, order: Iterable[int]) -> Self: new_dims = tuple(self._dims[i] for i in order) return type(self)(self._transform, self._coord_name, new_dims) def __repr__(self: Any) -> str: return f"{type(self).__name__}(transform={self._transform!r})" def _repr_inline_(self, max_width: int) -> str: # we want to display values in the inline repr for this lazy coordinate # `format_array_flat` prevents loading the whole array in memory. from xarray.core.formatting import format_array_flat return format_array_flat(self, max_width) xarray-2025.12.0/xarray/core/missing.py000066400000000000000000000673611511464676000176410ustar00rootroot00000000000000from __future__ import annotations import datetime as dt import itertools import warnings from collections import ChainMap from collections.abc import Callable, Generator, Hashable, Sequence from functools import partial from numbers import Number from typing import TYPE_CHECKING, Any, TypeVar, get_args import numpy as np import pandas as pd from xarray.computation.apply_ufunc import apply_ufunc from xarray.core import utils from xarray.core.common import _contains_datetime_like_objects, ones_like from xarray.core.duck_array_ops import ( datetime_to_numeric, push, ravel, reshape, stack, timedelta_to_numeric, transpose, ) from xarray.core.options import _get_keep_attrs from xarray.core.types import Interp1dOptions, InterpnOptions, InterpOptions from xarray.core.utils import OrderedSet, is_scalar from xarray.core.variable import ( Variable, broadcast_variables, ) from xarray.namedarray.pycompat import is_chunked_array if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset InterpCallable = Callable[..., np.ndarray] # interpn Interpolator = Callable[..., Callable[..., np.ndarray]] # *Interpolator # interpolator objects return callables that can be evaluated SourceDest = dict[Hashable, tuple[Variable, Variable]] T = TypeVar("T") def _get_nan_block_lengths( obj: Dataset | DataArray | Variable, dim: Hashable, index: Variable ): """ Return an object where each NaN element in 'obj' is replaced by the length of the gap the element is in. """ # make variable so that we get broadcasting for free index = Variable([dim], index) # algorithm from https://github.com/pydata/xarray/pull/3302#discussion_r324707072 arange = ones_like(obj) * index valid = obj.notnull() valid_arange = arange.where(valid) cumulative_nans = valid_arange.ffill(dim=dim).fillna(index[0]) nan_block_lengths = ( cumulative_nans.diff(dim=dim, label="upper") .reindex({dim: obj[dim]}) .where(valid) .bfill(dim=dim) .where(~valid, 0) .fillna(index[-1] - valid_arange.max(dim=[dim])) ) return nan_block_lengths class BaseInterpolator: """Generic interpolator class for normalizing interpolation methods""" cons_kwargs: dict[str, Any] call_kwargs: dict[str, Any] f: Callable method: str def __call__(self, x): return self.f(x, **self.call_kwargs) def __repr__(self): return f"{self.__class__.__name__}: method={self.method}" class NumpyInterpolator(BaseInterpolator): """One-dimensional linear interpolation. See Also -------- numpy.interp """ def __init__(self, xi, yi, method="linear", fill_value=None, period=None): if method != "linear": raise ValueError("only method `linear` is valid for the NumpyInterpolator") self.method = method self.f = np.interp self.cons_kwargs = {} self.call_kwargs = {"period": period} self._xi = xi self._yi = yi nan = np.nan if yi.dtype.kind != "c" else np.nan + np.nan * 1j if fill_value is None: self._left = nan self._right = nan elif isinstance(fill_value, Sequence) and len(fill_value) == 2: self._left = fill_value[0] self._right = fill_value[1] elif is_scalar(fill_value): self._left = fill_value self._right = fill_value else: raise ValueError(f"{fill_value} is not a valid fill_value") def __call__(self, x): return self.f( x, self._xi, self._yi, left=self._left, right=self._right, **self.call_kwargs, ) class ScipyInterpolator(BaseInterpolator): """Interpolate a 1-D function using Scipy interp1d See Also -------- scipy.interpolate.interp1d """ def __init__( self, xi, yi, method=None, fill_value=None, assume_sorted=True, copy=False, bounds_error=False, order=None, axis=-1, **kwargs, ): from scipy.interpolate import interp1d if method is None: raise ValueError( "method is a required argument, please supply a " "valid scipy.inter1d method (kind)" ) if method == "polynomial": if order is None: raise ValueError("order is required when method=polynomial") method = order if method == "quintic": method = 5 self.method = method self.cons_kwargs = kwargs self.call_kwargs = {} nan = np.nan if yi.dtype.kind != "c" else np.nan + np.nan * 1j if fill_value is None and method == "linear": fill_value = nan, nan elif fill_value is None: fill_value = nan self.f = interp1d( xi, yi, kind=self.method, fill_value=fill_value, bounds_error=bounds_error, assume_sorted=assume_sorted, copy=copy, axis=axis, **self.cons_kwargs, ) class SplineInterpolator(BaseInterpolator): """One-dimensional smoothing spline fit to a given set of data points. See Also -------- scipy.interpolate.UnivariateSpline """ def __init__( self, xi, yi, method="spline", fill_value=None, order=3, nu=0, ext=None, **kwargs, ): from scipy.interpolate import UnivariateSpline if method != "spline": raise ValueError("only method `spline` is valid for the SplineInterpolator") self.method = method self.cons_kwargs = kwargs self.call_kwargs = {"nu": nu, "ext": ext} if fill_value is not None: raise ValueError("SplineInterpolator does not support fill_value") self.f = UnivariateSpline(xi, yi, k=order, **self.cons_kwargs) def _apply_over_vars_with_dim(func, self, dim=None, **kwargs): """Wrapper for datasets""" ds = type(self)(coords=self.coords, attrs=self.attrs) for name, var in self.data_vars.items(): if dim in var.dims: ds[name] = func(var, dim=dim, **kwargs) else: ds[name] = var return ds def get_clean_interp_index( arr, dim: Hashable, use_coordinate: Hashable | bool = True, strict: bool = True ): """Return index to use for x values in interpolation or curve fitting. Parameters ---------- arr : DataArray Array to interpolate or fit to a curve. dim : str Name of dimension along which to fit. use_coordinate : str or bool If use_coordinate is True, the coordinate that shares the name of the dimension along which interpolation is being performed will be used as the x values. If False, the x values are set as an equally spaced sequence. strict : bool Whether to raise errors if the index is either non-unique or non-monotonic (default). Returns ------- Variable Numerical values for the x-coordinates. Notes ----- If indexing is along the time dimension, datetime coordinates are converted to time deltas with respect to 1970-01-01. """ # Question: If use_coordinate is a string, what role does `dim` play? from xarray.coding.cftimeindex import CFTimeIndex if use_coordinate is False: axis = arr.get_axis_num(dim) return np.arange(arr.shape[axis], dtype=np.float64) if use_coordinate is True: index = arr.get_index(dim) else: # string index = arr.coords[use_coordinate] if index.ndim != 1: raise ValueError( f"Coordinates used for interpolation must be 1D, " f"{use_coordinate} is {index.ndim}D." ) index = index.to_index() # TODO: index.name is None for multiindexes # set name for nice error messages below if isinstance(index, pd.MultiIndex): index.name = dim if strict: if not index.is_monotonic_increasing: raise ValueError(f"Index {index.name!r} must be monotonically increasing") if not index.is_unique: raise ValueError(f"Index {index.name!r} has duplicate values") # Special case for non-standard calendar indexes # Numerical datetime values are defined with respect to 1970-01-01T00:00:00 in units of nanoseconds if isinstance(index, CFTimeIndex | pd.DatetimeIndex): offset = type(index[0])(1970, 1, 1) if isinstance(index, CFTimeIndex): index = index.values index = Variable( data=datetime_to_numeric(index, offset=offset, datetime_unit="ns"), dims=(dim,), ) # raise if index cannot be cast to a float (e.g. MultiIndex) try: index = index.values.astype(np.float64) except (TypeError, ValueError) as err: # pandas raises a TypeError # xarray/numpy raise a ValueError raise TypeError( f"Index {index.name!r} must be castable to float64 to support " f"interpolation or curve fitting, got {type(index).__name__}." ) from err return index def interp_na( self, dim: Hashable | None = None, use_coordinate: bool | str = True, method: InterpOptions = "linear", limit: int | None = None, max_gap: ( int | float | str | pd.Timedelta | np.timedelta64 | dt.timedelta | None ) = None, keep_attrs: bool | None = None, **kwargs, ): """Interpolate values according to different methods.""" from xarray.coding.cftimeindex import CFTimeIndex if dim is None: raise NotImplementedError("dim is a required argument") if limit is not None: valids = _get_valid_fill_mask(self, dim, limit) if max_gap is not None: max_type = type(max_gap).__name__ if not is_scalar(max_gap): raise ValueError("max_gap must be a scalar.") if ( dim in self._indexes and isinstance( self._indexes[dim].to_pandas_index(), pd.DatetimeIndex | CFTimeIndex ) and use_coordinate ): # Convert to float max_gap = timedelta_to_numeric(max_gap) if not use_coordinate and not isinstance(max_gap, Number | np.number): raise TypeError( f"Expected integer or floating point max_gap since use_coordinate=False. Received {max_type}." ) # method index = get_clean_interp_index(self, dim, use_coordinate=use_coordinate) interp_class, kwargs = _get_interpolator(method, **kwargs) interpolator = partial(func_interpolate_na, interp_class, **kwargs) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "overflow", RuntimeWarning) warnings.filterwarnings("ignore", "invalid value", RuntimeWarning) arr = apply_ufunc( interpolator, self, index, input_core_dims=[[dim], [dim]], output_core_dims=[[dim]], output_dtypes=[self.dtype], dask="parallelized", vectorize=True, keep_attrs=keep_attrs, ).transpose(*self.dims) if limit is not None: arr = arr.where(valids) if max_gap is not None: if dim not in self.coords: raise NotImplementedError( "max_gap not implemented for unlabeled coordinates yet." ) nan_block_lengths = _get_nan_block_lengths(self, dim, index) arr = arr.where(nan_block_lengths <= max_gap) return arr def func_interpolate_na(interpolator, y, x, **kwargs): """helper function to apply interpolation along 1 dimension""" # reversed arguments are so that attrs are preserved from da, not index # it would be nice if this wasn't necessary, works around: # "ValueError: assignment destination is read-only" in assignment below out = y.copy() nans = pd.isnull(y) nonans = ~nans # fast track for no-nans, all nan but one, and all-nans cases n_nans = nans.sum() if n_nans == 0 or n_nans >= len(y) - 1: return y f = interpolator(x[nonans], y[nonans], **kwargs) out[nans] = f(x[nans]) return out def _bfill(arr, n=None, axis=-1): """inverse of ffill""" arr = np.flip(arr, axis=axis) # fill arr = push(arr, axis=axis, n=n) # reverse back to original return np.flip(arr, axis=axis) def ffill(arr, dim=None, limit=None): """forward fill missing values""" axis = arr.get_axis_num(dim) # work around for bottleneck 178 _limit = limit if limit is not None else arr.shape[axis] return apply_ufunc( push, arr, dask="allowed", keep_attrs=True, output_dtypes=[arr.dtype], kwargs=dict(n=_limit, axis=axis), ).transpose(*arr.dims) def bfill(arr, dim=None, limit=None): """backfill missing values""" axis = arr.get_axis_num(dim) # work around for bottleneck 178 _limit = limit if limit is not None else arr.shape[axis] return apply_ufunc( _bfill, arr, dask="allowed", keep_attrs=True, output_dtypes=[arr.dtype], kwargs=dict(n=_limit, axis=axis), ).transpose(*arr.dims) def _import_interpolant(interpolant, method): """Import interpolant from scipy.interpolate.""" try: from scipy import interpolate return getattr(interpolate, interpolant) except ImportError as e: raise ImportError(f"Interpolation with method {method} requires scipy.") from e def _get_interpolator( method: InterpOptions, vectorizeable_only: bool = False, **kwargs ): """helper function to select the appropriate interpolator class returns interpolator class and keyword arguments for the class """ interp_class: Interpolator interp1d_methods = get_args(Interp1dOptions) valid_methods = tuple(vv for v in get_args(InterpOptions) for vv in get_args(v)) # prefer numpy.interp for 1d linear interpolation. This function cannot # take higher dimensional data but scipy.interp1d can. if ( method == "linear" and kwargs.get("fill_value") != "extrapolate" and not vectorizeable_only ): kwargs.update(method=method) interp_class = NumpyInterpolator elif method in valid_methods: if method in interp1d_methods: kwargs.update(method=method) interp_class = ScipyInterpolator elif method == "barycentric": kwargs.update(axis=-1) interp_class = _import_interpolant("BarycentricInterpolator", method) elif method in ["krogh", "krog"]: kwargs.update(axis=-1) interp_class = _import_interpolant("KroghInterpolator", method) elif method == "pchip": kwargs.update(axis=-1) # pchip default behavior is to extrapolate kwargs.setdefault("extrapolate", False) interp_class = _import_interpolant("PchipInterpolator", method) elif method == "spline": utils.emit_user_level_warning( "The 1d SplineInterpolator class is performing an incorrect calculation and " "is being deprecated. Please use `method=polynomial` for 1D Spline Interpolation.", PendingDeprecationWarning, ) if vectorizeable_only: raise ValueError(f"{method} is not a vectorizeable interpolator. ") kwargs.update(method=method) interp_class = SplineInterpolator elif method == "akima": kwargs.update(axis=-1) interp_class = _import_interpolant("Akima1DInterpolator", method) elif method == "makima": kwargs.update(method="makima", axis=-1) interp_class = _import_interpolant("Akima1DInterpolator", method) else: raise ValueError(f"{method} is not a valid scipy interpolator") else: raise ValueError(f"{method} is not a valid interpolator") return interp_class, kwargs def _get_interpolator_nd(method, **kwargs): """helper function to select the appropriate interpolator class returns interpolator class and keyword arguments for the class """ valid_methods = tuple(get_args(InterpnOptions)) if method in valid_methods: kwargs.update(method=method) kwargs.setdefault("bounds_error", False) interp_class = _import_interpolant("interpn", method) else: raise ValueError( f"{method} is not a valid interpolator for interpolating " "over multiple dimensions." ) return interp_class, kwargs def _get_valid_fill_mask(arr, dim, limit): """helper function to determine values that can be filled when limit is not None""" kw = {dim: limit + 1} # we explicitly use construct method to avoid copy. new_dim = utils.get_temp_dimname(arr.dims, "_window") return ( arr.isnull() .rolling(min_periods=1, **kw) .construct(new_dim, fill_value=False) .sum(new_dim, skipna=False) ) <= limit def _localize(obj: T, indexes_coords: SourceDest) -> tuple[T, SourceDest]: """Speed up for linear and nearest neighbor method. Only consider a subspace that is needed for the interpolation """ indexes = {} for dim, [x, new_x] in indexes_coords.items(): if is_chunked_array(new_x._data): continue new_x_loaded = new_x.data minval = np.nanmin(new_x_loaded) maxval = np.nanmax(new_x_loaded) index = x.to_index() imin, imax = index.get_indexer([minval, maxval], method="nearest") indexes[dim] = slice(max(imin - 2, 0), imax + 2) indexes_coords[dim] = (x[indexes[dim]], new_x) return obj.isel(indexes), indexes_coords # type: ignore[attr-defined] def _floatize_x( x: list[Variable], new_x: list[Variable] ) -> tuple[list[Variable], list[Variable]]: """Make x and new_x float. This is particularly useful for datetime dtype. """ for i in range(len(x)): if _contains_datetime_like_objects(x[i]): # Scipy casts coordinates to np.float64, which is not accurate # enough for datetime64 (uses 64bit integer). # We assume that the most of the bits are used to represent the # offset (min(x)) and the variation (x - min(x)) can be # represented by float. xmin = x[i].values.min() x[i] = x[i]._to_numeric(offset=xmin, dtype=np.float64) new_x[i] = new_x[i]._to_numeric(offset=xmin, dtype=np.float64) return x, new_x def interp( var: Variable, indexes_coords: SourceDest, method: InterpOptions, **kwargs, ) -> Variable: """Make an interpolation of Variable Parameters ---------- var : Variable indexes_coords Mapping from dimension name to a pair of original and new coordinates. Original coordinates should be sorted in strictly ascending order. Note that all the coordinates should be Variable objects. method : string One of {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}. For multidimensional interpolation, only {'linear', 'nearest'} can be used. **kwargs keyword arguments to be passed to scipy.interpolate Returns ------- Interpolated Variable See Also -------- DataArray.interp Dataset.interp """ if not indexes_coords: return var.copy() result = var if method in ["linear", "nearest", "slinear"]: # decompose the interpolation into a succession of independent interpolation. iter_indexes_coords = decompose_interp(indexes_coords) else: iter_indexes_coords = (_ for _ in [indexes_coords]) for indep_indexes_coords in iter_indexes_coords: var = result # target dimensions dims = list(indep_indexes_coords) # transpose to make the interpolated axis to the last position broadcast_dims = [d for d in var.dims if d not in dims] original_dims = broadcast_dims + dims result = interpolate_variable( var.transpose(*original_dims), {k: indep_indexes_coords[k] for k in dims}, method=method, kwargs=kwargs, ) # dimension of the output array out_dims: OrderedSet = OrderedSet() for d in var.dims: if d in dims: out_dims.update(indep_indexes_coords[d][1].dims) else: out_dims.add(d) if len(out_dims) > 1: result = result.transpose(*out_dims) return result def interpolate_variable( var: Variable, indexes_coords: SourceDest, *, method: InterpOptions, kwargs: dict[str, Any], ) -> Variable: """core routine that returns the interpolated variable.""" if not indexes_coords: return var.copy() if len(indexes_coords) == 1: func, kwargs = _get_interpolator(method, vectorizeable_only=True, **kwargs) else: func, kwargs = _get_interpolator_nd(method, **kwargs) in_coords, result_coords = zip(*(v for v in indexes_coords.values()), strict=True) # input coordinates along which we are interpolation are core dimensions # the corresponding output coordinates may or may not have the same name, # so `all_in_core_dims` is also `exclude_dims` all_in_core_dims = set(indexes_coords) result_dims = OrderedSet(itertools.chain(*(_.dims for _ in result_coords))) result_sizes = ChainMap(*(_.sizes for _ in result_coords)) # any dimensions on the output that are present on the input, but are not being # interpolated along are dimensions along which we automatically vectorize. # Consider the problem in https://github.com/pydata/xarray/issues/6799#issuecomment-2474126217 # In the following, dimension names are listed out in []. # # da[time, q, lat, lon].interp(q=bar[lat,lon]). Here `lat`, `lon` # are input dimensions, present on the output, but are not the coordinates # we are explicitly interpolating. These are the dimensions along which we vectorize. # `q` is the only input core dimensions, and changes size (disappears) # so it is in exclude_dims. vectorize_dims = (result_dims - all_in_core_dims) & set(var.dims) # remove any output broadcast dimensions from the list of core dimensions output_core_dims = tuple(d for d in result_dims if d not in vectorize_dims) input_core_dims = ( # all coordinates on the input that we interpolate along [tuple(indexes_coords)] # the input coordinates are always 1D at the moment, so we just need to list out their names + [tuple(_.dims) for _ in in_coords] # The last set of inputs are the coordinates we are interpolating to. + [ tuple(d for d in coord.dims if d not in vectorize_dims) for coord in result_coords ] ) output_sizes = {k: result_sizes[k] for k in output_core_dims} # scipy.interpolate.interp1d always forces to float. dtype = float if not issubclass(var.dtype.type, np.inexact) else var.dtype result = apply_ufunc( _interpnd, var, *in_coords, *result_coords, input_core_dims=input_core_dims, output_core_dims=[output_core_dims], exclude_dims=all_in_core_dims, dask="parallelized", kwargs=dict( interp_func=func, interp_kwargs=kwargs, # we leave broadcasting up to dask if possible # but we need broadcasted values in _interpnd, so propagate that # context (dimension names), and broadcast there # This would be unnecessary if we could tell apply_ufunc # to insert size-1 broadcast dimensions result_coord_core_dims=input_core_dims[-len(result_coords) :], ), # TODO: deprecate and have the user rechunk themselves dask_gufunc_kwargs=dict(output_sizes=output_sizes, allow_rechunk=True), output_dtypes=[dtype], vectorize=bool(vectorize_dims), keep_attrs=True, ) return result def _interp1d( var: Variable, x_: list[Variable], new_x_: list[Variable], func: Interpolator, kwargs, ) -> np.ndarray: """Core 1D array interpolation routine.""" # x, new_x are tuples of size 1. x, new_x = x_[0], new_x_[0] rslt = func(x.data, var, **kwargs)(ravel(new_x.data)) if new_x.ndim > 1: return reshape(rslt.data, (var.shape[:-1] + new_x.shape)) if new_x.ndim == 0: return rslt[..., -1] return rslt def _interpnd( data: np.ndarray, *coords: np.ndarray, interp_func: Interpolator | InterpCallable, interp_kwargs, result_coord_core_dims: list[tuple[Hashable, ...]], ) -> np.ndarray: """ Core nD array interpolation routine. The first half arrays in `coords` are original coordinates, the other half are destination coordinates. """ n_x = len(coords) // 2 ndim = data.ndim nconst = ndim - n_x # Convert everything to Variables, since that makes applying # `_localize` and `_floatize_x` much easier x = [ Variable([f"dim_{nconst + dim}"], _x, fastpath=True) for dim, _x in enumerate(coords[:n_x]) ] new_x = list( broadcast_variables( *( Variable(dims, _x, fastpath=True) for dims, _x in zip(result_coord_core_dims, coords[n_x:], strict=True) ) ) ) var = Variable([f"dim_{dim}" for dim in range(ndim)], data, fastpath=True) if interp_kwargs.get("method") in ["linear", "nearest"]: indexes_coords = { _x.dims[0]: (_x, _new_x) for _x, _new_x in zip(x, new_x, strict=True) } # simple speed up for the local interpolation var, indexes_coords = _localize(var, indexes_coords) x, new_x = tuple( list(_) for _ in zip(*(indexes_coords[d] for d in indexes_coords), strict=True) ) x_list, new_x_list = _floatize_x(x, new_x) if len(x) == 1: # TODO: narrow interp_func to interpolator here return _interp1d(var, x_list, new_x_list, interp_func, interp_kwargs) # type: ignore[arg-type] # move the interpolation axes to the start position data = transpose(var._data, range(-len(x), var.ndim - len(x))) # stack new_x to 1 vector, with reshape xi = stack([ravel(x1.data) for x1 in new_x_list], axis=-1) rslt: np.ndarray = interp_func(x_list, data, xi, **interp_kwargs) # type: ignore[assignment] # move back the interpolation axes to the last position rslt = transpose(rslt, range(-rslt.ndim + 1, 1)) return reshape(rslt, rslt.shape[:-1] + new_x[0].shape) def decompose_interp(indexes_coords: SourceDest) -> Generator[SourceDest, None]: """Decompose the interpolation into a succession of independent interpolation keeping the order""" dest_dims = [ dest[1].dims if dest[1].ndim > 0 else (dim,) for dim, dest in indexes_coords.items() ] partial_dest_dims: list[tuple[Hashable, ...]] = [] partial_indexes_coords: SourceDest = {} for i, index_coords in enumerate(indexes_coords.items()): partial_indexes_coords.update([index_coords]) if i == len(dest_dims) - 1: break partial_dest_dims += [dest_dims[i]] other_dims = dest_dims[i + 1 :] s_partial_dest_dims = {dim for dims in partial_dest_dims for dim in dims} s_other_dims = {dim for dims in other_dims for dim in dims} if not s_partial_dest_dims.intersection(s_other_dims): # this interpolation is orthogonal to the rest yield partial_indexes_coords partial_dest_dims = [] partial_indexes_coords = {} yield partial_indexes_coords xarray-2025.12.0/xarray/core/nputils.py000066400000000000000000000256231511464676000176610ustar00rootroot00000000000000from __future__ import annotations import warnings from collections.abc import Callable import numpy as np import pandas as pd from packaging.version import Version from xarray.compat.array_api_compat import get_array_namespace from xarray.core.utils import is_duck_array, module_available from xarray.namedarray import pycompat # remove once numpy 2.0 is the oldest supported version if module_available("numpy", minversion="2.0.0.dev0"): from numpy.lib.array_utils import ( # type: ignore[import-not-found,unused-ignore] normalize_axis_index, ) else: from numpy.core.multiarray import ( # type: ignore[attr-defined,no-redef,unused-ignore] normalize_axis_index, ) # remove once numpy 2.0 is the oldest supported version try: from numpy.exceptions import RankWarning # type: ignore[attr-defined,unused-ignore] except ImportError: from numpy import RankWarning # type: ignore[attr-defined,no-redef,unused-ignore] from xarray.core.options import OPTIONS try: import bottleneck as bn _BOTTLENECK_AVAILABLE = True except ImportError: # use numpy methods instead bn = np _BOTTLENECK_AVAILABLE = False def _select_along_axis(values, idx, axis): other_ind = np.ix_(*[np.arange(s) for s in idx.shape]) sl = other_ind[:axis] + (idx,) + other_ind[axis:] return values[sl] def nanfirst(values, axis, keepdims=False): if isinstance(axis, tuple): (axis,) = axis axis = normalize_axis_index(axis, values.ndim) idx_first = np.argmax(~pd.isnull(values), axis=axis) result = _select_along_axis(values, idx_first, axis) if keepdims: return np.expand_dims(result, axis=axis) else: return result def nanlast(values, axis, keepdims=False): if isinstance(axis, tuple): (axis,) = axis axis = normalize_axis_index(axis, values.ndim) rev = (slice(None),) * axis + (slice(None, None, -1),) idx_last = -1 - np.argmax(~pd.isnull(values)[rev], axis=axis) result = _select_along_axis(values, idx_last, axis) if keepdims: return np.expand_dims(result, axis=axis) else: return result def inverse_permutation(indices: np.ndarray, N: int | None = None) -> np.ndarray: """Return indices for an inverse permutation. Parameters ---------- indices : 1D np.ndarray with dtype=int Integer positions to assign elements to. N : int, optional Size of the array Returns ------- inverse_permutation : 1D np.ndarray with dtype=int Integer indices to take from the original array to create the permutation. """ if N is None: N = len(indices) # use intp instead of int64 because of windows :( inverse_permutation = np.full(N, -1, dtype=np.intp) inverse_permutation[indices] = np.arange(len(indices), dtype=np.intp) return inverse_permutation def _ensure_bool_is_ndarray(result, *args): # numpy will sometimes return a scalar value from binary comparisons if it # can't handle the comparison instead of broadcasting, e.g., # In [10]: 1 == np.array(['a', 'b']) # Out[10]: False # This function ensures that the result is the appropriate shape in these # cases if isinstance(result, bool): shape = np.broadcast(*args).shape constructor = np.ones if result else np.zeros result = constructor(shape, dtype=bool) return result def array_eq(self, other): with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"elementwise comparison failed") return _ensure_bool_is_ndarray(self == other, self, other) def array_ne(self, other): with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"elementwise comparison failed") return _ensure_bool_is_ndarray(self != other, self, other) def _is_contiguous(positions): """Given a non-empty list, does it consist of contiguous integers?""" previous = positions[0] for current in positions[1:]: if current != previous + 1: return False previous = current return True def _advanced_indexer_subspaces(key): """Indices of the advanced indexes subspaces for mixed indexing and vindex.""" if not isinstance(key, tuple): key = (key,) advanced_index_positions = [ i for i, k in enumerate(key) if not isinstance(k, slice) ] if not advanced_index_positions or not _is_contiguous(advanced_index_positions): # Nothing to reorder: dimensions on the indexing result are already # ordered like vindex. See NumPy's rule for "Combining advanced and # basic indexing": # https://numpy.org/doc/stable/reference/arrays.indexing.html#combining-advanced-and-basic-indexing return (), () non_slices = [k for k in key if not isinstance(k, slice)] broadcasted_shape = np.broadcast_shapes( *[item.shape if is_duck_array(item) else (0,) for item in non_slices] ) ndim = len(broadcasted_shape) mixed_positions = advanced_index_positions[0] + np.arange(ndim) vindex_positions = np.arange(ndim) return mixed_positions, vindex_positions class NumpyVIndexAdapter: """Object that implements indexing like vindex on an np.ndarray. This is a pure Python implementation of (some of) the logic in this NumPy proposal: https://github.com/numpy/numpy/pull/6256 """ def __init__(self, array): self._array = array def __getitem__(self, key): mixed_positions, vindex_positions = _advanced_indexer_subspaces(key) return np.moveaxis(self._array[key], mixed_positions, vindex_positions) def __setitem__(self, key, value): """Value must have dimensionality matching the key.""" mixed_positions, vindex_positions = _advanced_indexer_subspaces(key) self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions) def _create_method(name, npmodule=np) -> Callable: def f(values, axis=None, **kwargs): dtype = kwargs.get("dtype") bn_func = getattr(bn, name, None) xp = get_array_namespace(values) if xp is not np: func = getattr(xp, name, None) if func is not None: return func(values, axis=axis, **kwargs) if ( module_available("numbagg") and OPTIONS["use_numbagg"] and isinstance(values, np.ndarray) # numbagg<0.7.0 uses ddof=1 only, but numpy uses ddof=0 by default and ( pycompat.mod_version("numbagg") >= Version("0.7.0") or ("var" not in name and "std" not in name) or kwargs.get("ddof", 0) == 1 ) # TODO: bool? and values.dtype.kind in "uif" # and values.dtype.isnative and (dtype is None or np.dtype(dtype) == values.dtype) # numbagg.nanquantile only available after 0.8.0 and with linear method and ( name != "nanquantile" or ( pycompat.mod_version("numbagg") >= Version("0.8.0") and kwargs.get("method", "linear") == "linear" ) ) ): import numbagg # type: ignore[import-not-found, unused-ignore] nba_func = getattr(numbagg, name, None) if nba_func is not None: # numbagg does not use dtype kwargs.pop("dtype", None) # prior to 0.7.0, numbagg did not support ddof; we ensure it's limited # to ddof=1 above. if pycompat.mod_version("numbagg") < Version("0.7.0"): kwargs.pop("ddof", None) if name == "nanquantile": kwargs["quantiles"] = kwargs.pop("q") kwargs.pop("method", None) return nba_func(values, axis=axis, **kwargs) if ( _BOTTLENECK_AVAILABLE and OPTIONS["use_bottleneck"] and isinstance(values, np.ndarray) and bn_func is not None and not isinstance(axis, tuple) and values.dtype.kind in "uifc" and values.dtype.isnative and (dtype is None or np.dtype(dtype) == values.dtype) ): # bottleneck does not take care dtype, min_count kwargs.pop("dtype", None) result = bn_func(values, axis=axis, **kwargs) # bottleneck returns python scalars for reduction over all axes if isinstance(result, float): result = np.float64(result) else: result = getattr(npmodule, name)(values, axis=axis, **kwargs) return result f.__name__ = name return f def _nanpolyfit_1d(arr, x, rcond=None): out = np.full((x.shape[1] + 1,), np.nan) mask = np.isnan(arr) if not np.all(mask): out[:-1], resid, rank, _ = np.linalg.lstsq(x[~mask, :], arr[~mask], rcond=rcond) out[-1] = resid[0] if resid.size > 0 else np.nan warn_on_deficient_rank(rank, x.shape[1]) return out def warn_on_deficient_rank(rank, order): if rank != order: warnings.warn("Polyfit may be poorly conditioned", RankWarning, stacklevel=2) def least_squares(lhs, rhs, rcond=None, skipna=False): if rhs.ndim > 2: out_shape = rhs.shape rhs = rhs.reshape(rhs.shape[0], -1) else: out_shape = None if skipna: added_dim = rhs.ndim == 1 if added_dim: rhs = rhs.reshape(rhs.shape[0], 1) nan_cols = np.any(np.isnan(rhs), axis=0) out = np.empty((lhs.shape[1] + 1, rhs.shape[1])) if np.any(nan_cols): out[:, nan_cols] = np.apply_along_axis( _nanpolyfit_1d, 0, rhs[:, nan_cols], lhs ) if np.any(~nan_cols): out[:-1, ~nan_cols], resids, rank, _ = np.linalg.lstsq( lhs, rhs[:, ~nan_cols], rcond=rcond ) out[-1, ~nan_cols] = resids if resids.size > 0 else np.nan warn_on_deficient_rank(rank, lhs.shape[1]) coeffs = out[:-1, :] residuals = out[-1, :] if added_dim: coeffs = coeffs.reshape(coeffs.shape[0]) residuals = residuals.reshape(residuals.shape[0]) else: coeffs, residuals, rank, _ = np.linalg.lstsq(lhs, rhs, rcond=rcond) if residuals.size == 0: residuals = coeffs[0] * np.nan warn_on_deficient_rank(rank, lhs.shape[1]) if out_shape is not None: coeffs = coeffs.reshape(-1, *out_shape[1:]) residuals = residuals.reshape(*out_shape[1:]) return coeffs, residuals nanmin = _create_method("nanmin") nanmax = _create_method("nanmax") nanmean = _create_method("nanmean") nanmedian = _create_method("nanmedian") nanvar = _create_method("nanvar") nanstd = _create_method("nanstd") nanprod = _create_method("nanprod") nancumsum = _create_method("nancumsum") nancumprod = _create_method("nancumprod") nanargmin = _create_method("nanargmin") nanargmax = _create_method("nanargmax") nanquantile = _create_method("nanquantile") xarray-2025.12.0/xarray/core/options.py000066400000000000000000000326241511464676000176550ustar00rootroot00000000000000from __future__ import annotations import warnings from collections.abc import Sequence from typing import TYPE_CHECKING, Any, Literal, TypedDict from xarray.core.utils import FrozenDict if TYPE_CHECKING: from matplotlib.colors import Colormap Options = Literal[ "arithmetic_join", "chunk_manager", "cmap_divergent", "cmap_sequential", "display_max_children", "display_max_rows", "display_values_threshold", "display_style", "display_width", "display_expand_attrs", "display_expand_coords", "display_expand_data_vars", "display_expand_data", "display_expand_groups", "display_expand_indexes", "display_default_indexes", "enable_cftimeindex", "file_cache_maxsize", "keep_attrs", "netcdf_engine_order", "warn_for_unclosed_files", "use_bottleneck", "use_new_combine_kwarg_defaults", "use_numbagg", "use_opt_einsum", "use_flox", ] class T_Options(TypedDict): arithmetic_broadcast: bool arithmetic_join: Literal["inner", "outer", "left", "right", "exact"] chunk_manager: str cmap_divergent: str | Colormap cmap_sequential: str | Colormap display_max_children: int display_max_rows: int display_values_threshold: int display_style: Literal["text", "html"] display_width: int display_expand_attrs: Literal["default"] | bool display_expand_coords: Literal["default"] | bool display_expand_data_vars: Literal["default"] | bool display_expand_data: Literal["default"] | bool display_expand_groups: Literal["default"] | bool display_expand_indexes: Literal["default"] | bool display_default_indexes: Literal["default"] | bool enable_cftimeindex: bool file_cache_maxsize: int keep_attrs: Literal["default"] | bool netcdf_engine_order: Sequence[Literal["netcdf4", "h5netcdf", "scipy"]] warn_for_unclosed_files: bool use_bottleneck: bool use_flox: bool use_new_combine_kwarg_defaults: bool use_numbagg: bool use_opt_einsum: bool OPTIONS: T_Options = { "arithmetic_broadcast": True, "arithmetic_join": "inner", "chunk_manager": "dask", "cmap_divergent": "RdBu_r", "cmap_sequential": "viridis", "display_max_children": 6, "display_max_rows": 12, "display_values_threshold": 200, "display_style": "html", "display_width": 80, "display_expand_attrs": "default", "display_expand_coords": "default", "display_expand_data_vars": "default", "display_expand_data": "default", "display_expand_groups": "default", "display_expand_indexes": "default", "display_default_indexes": False, "enable_cftimeindex": True, "file_cache_maxsize": 128, "keep_attrs": "default", "netcdf_engine_order": ("netcdf4", "h5netcdf", "scipy"), "warn_for_unclosed_files": False, "use_bottleneck": True, "use_flox": True, "use_new_combine_kwarg_defaults": False, "use_numbagg": True, "use_opt_einsum": True, } _JOIN_OPTIONS = frozenset(["inner", "outer", "left", "right", "exact"]) _DISPLAY_OPTIONS = frozenset(["text", "html"]) _NETCDF_ENGINES = frozenset(["netcdf4", "h5netcdf", "scipy"]) def _positive_integer(value: Any) -> bool: return isinstance(value, int) and value > 0 _VALIDATORS = { "arithmetic_broadcast": lambda value: isinstance(value, bool), "arithmetic_join": _JOIN_OPTIONS.__contains__, "display_max_children": _positive_integer, "display_max_rows": _positive_integer, "display_values_threshold": _positive_integer, "display_style": _DISPLAY_OPTIONS.__contains__, "display_width": _positive_integer, "display_expand_attrs": lambda choice: choice in [True, False, "default"], "display_expand_coords": lambda choice: choice in [True, False, "default"], "display_expand_data_vars": lambda choice: choice in [True, False, "default"], "display_expand_data": lambda choice: choice in [True, False, "default"], "display_expand_indexes": lambda choice: choice in [True, False, "default"], "display_default_indexes": lambda choice: choice in [True, False, "default"], "enable_cftimeindex": lambda value: isinstance(value, bool), "file_cache_maxsize": _positive_integer, "keep_attrs": lambda choice: choice in [True, False, "default"], "netcdf_engine_order": lambda engines: set(engines) <= _NETCDF_ENGINES, "use_bottleneck": lambda value: isinstance(value, bool), "use_new_combine_kwarg_defaults": lambda value: isinstance(value, bool), "use_numbagg": lambda value: isinstance(value, bool), "use_opt_einsum": lambda value: isinstance(value, bool), "use_flox": lambda value: isinstance(value, bool), "warn_for_unclosed_files": lambda value: isinstance(value, bool), } def _set_file_cache_maxsize(value) -> None: from xarray.backends.file_manager import FILE_CACHE FILE_CACHE.maxsize = value def _warn_on_setting_enable_cftimeindex(enable_cftimeindex): warnings.warn( "The enable_cftimeindex option is now a no-op " "and will be removed in a future version of xarray.", FutureWarning, stacklevel=2, ) _SETTERS = { "enable_cftimeindex": _warn_on_setting_enable_cftimeindex, "file_cache_maxsize": _set_file_cache_maxsize, } def _get_boolean_with_default(option: Options, default: bool) -> bool: global_choice = OPTIONS[option] if global_choice == "default": return default elif isinstance(global_choice, bool): return global_choice else: raise ValueError( f"The global option {option} must be one of True, False or 'default'." ) def _get_keep_attrs(default: bool) -> bool: return _get_boolean_with_default("keep_attrs", default) class set_options: """ Set options for xarray in a controlled context. Parameters ---------- arithmetic_join : {"inner", "outer", "left", "right", "exact"}, default: "inner" DataArray/Dataset alignment in binary operations: - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. chunk_manager : str, default: "dask" Chunk manager to use for chunked array computations when multiple options are installed. cmap_divergent : str or matplotlib.colors.Colormap, default: "RdBu_r" Colormap to use for divergent data plots. If string, must be matplotlib built-in colormap. Can also be a Colormap object (e.g. mpl.colormaps["magma"]) cmap_sequential : str or matplotlib.colors.Colormap, default: "viridis" Colormap to use for nondivergent data plots. If string, must be matplotlib built-in colormap. Can also be a Colormap object (e.g. mpl.colormaps["magma"]) display_expand_attrs : {"default", True, False} Whether to expand the attributes section for display of ``DataArray`` or ``Dataset`` objects. Can be * ``True`` : to always expand attrs * ``False`` : to always collapse attrs * ``default`` : to expand unless over a pre-defined limit display_expand_coords : {"default", True, False} Whether to expand the coordinates section for display of ``DataArray`` or ``Dataset`` objects. Can be * ``True`` : to always expand coordinates * ``False`` : to always collapse coordinates * ``default`` : to expand unless over a pre-defined limit display_expand_data : {"default", True, False} Whether to expand the data section for display of ``DataArray`` objects. Can be * ``True`` : to always expand data * ``False`` : to always collapse data * ``default`` : to expand unless over a pre-defined limit display_expand_data_vars : {"default", True, False} Whether to expand the data variables section for display of ``Dataset`` objects. Can be * ``True`` : to always expand data variables * ``False`` : to always collapse data variables * ``default`` : to expand unless over a pre-defined limit display_expand_indexes : {"default", True, False} Whether to expand the indexes section for display of ``DataArray`` or ``Dataset``. Can be * ``True`` : to always expand indexes * ``False`` : to always collapse indexes * ``default`` : to expand unless over a pre-defined limit (always collapse for html style) display_max_children : int, default: 6 Maximum number of children to display for each node in a DataTree. display_max_rows : int, default: 12 Maximum display rows. display_values_threshold : int, default: 200 Total number of array elements which trigger summarization rather than full repr for variable data views (numpy arrays). display_style : {"text", "html"}, default: "html" Display style to use in jupyter for xarray objects. display_width : int, default: 80 Maximum display width for ``repr`` on xarray objects. file_cache_maxsize : int, default: 128 Maximum number of open files to hold in xarray's global least-recently-usage cached. This should be smaller than your system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux. keep_attrs : {"default", True, False} Whether to keep attributes on xarray Datasets/dataarrays after operations. Can be * ``True`` : to always keep attrs * ``False`` : to always discard attrs * ``default`` : to use original logic that attrs should only be kept in unambiguous circumstances netcdf_engine_order : sequence, default ['netcdf4', 'h5netcdf', 'scipy'] Preference order of backend engines to use when reading or writing netCDF files with ``open_dataset()`` and ``to_netcdf()`` if ``engine`` is not explicitly specified. May be any permutation or subset of ``['netcdf4', 'h5netcdf', 'scipy']``. use_bottleneck : bool, default: True Whether to use ``bottleneck`` to accelerate 1D reductions and 1D rolling reduction operations. use_flox : bool, default: True Whether to use ``numpy_groupies`` and `flox`` to accelerate groupby and resampling reductions. use_new_combine_kwarg_defaults : bool, default False Whether to use new kwarg default values for combine functions: :py:func:`~xarray.concat`, :py:func:`~xarray.merge`, :py:func:`~xarray.open_mfdataset`. New values are: * ``data_vars``: None * ``coords``: "minimal" * ``compat``: "override" * ``join``: "exact" use_numbagg : bool, default: True Whether to use ``numbagg`` to accelerate reductions. Takes precedence over ``use_bottleneck`` when both are True. use_opt_einsum : bool, default: True Whether to use ``opt_einsum`` to accelerate dot products. warn_for_unclosed_files : bool, default: False Whether or not to issue a warning when unclosed files are deallocated. This is mostly useful for debugging. Examples -------- It is possible to use ``set_options`` either as a context manager: >>> ds = xr.Dataset({"x": np.arange(1000)}) >>> with xr.set_options(display_width=40): ... print(ds) ... Size: 8kB Dimensions: (x: 1000) Coordinates: * x (x) int64 8kB 0 1 ... 999 Data variables: *empty* Or to set global options: >>> xr.set_options(display_width=80) # doctest: +ELLIPSIS """ def __init__(self, **kwargs): self.old = {} for k, v in kwargs.items(): if k not in OPTIONS: raise ValueError( f"argument name {k!r} is not in the set of valid options {set(OPTIONS)!r}" ) if k in _VALIDATORS and not _VALIDATORS[k](v): if k == "arithmetic_join": expected = f"Expected one of {_JOIN_OPTIONS!r}" elif k == "display_style": expected = f"Expected one of {_DISPLAY_OPTIONS!r}" elif k == "netcdf_engine_order": expected = f"Expected a subset of {sorted(_NETCDF_ENGINES)}" else: expected = "" raise ValueError( f"option {k!r} given an invalid value: {v!r}. " + expected ) self.old[k] = OPTIONS[k] self._apply_update(kwargs) def _apply_update(self, options_dict): for k, v in options_dict.items(): if k in _SETTERS: _SETTERS[k](v) OPTIONS.update(options_dict) def __enter__(self): return def __exit__(self, type, value, traceback): self._apply_update(self.old) def get_options(): """ Get options for xarray. See Also ---------- set_options """ return FrozenDict(OPTIONS) xarray-2025.12.0/xarray/core/parallel.py000066400000000000000000000607461511464676000177640ustar00rootroot00000000000000from __future__ import annotations import collections import itertools import operator from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from typing import TYPE_CHECKING, Any, Literal, TypedDict import numpy as np from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.indexes import Index from xarray.core.utils import is_dask_collection from xarray.core.variable import Variable from xarray.structure.alignment import align from xarray.structure.merge import merge if TYPE_CHECKING: from xarray.core.types import T_Xarray class ExpectedDict(TypedDict): shapes: dict[Hashable, int] coords: set[Hashable] data_vars: set[Hashable] def unzip(iterable): return zip(*iterable, strict=True) def assert_chunks_compatible(a: Dataset, b: Dataset): a = a.unify_chunks() b = b.unify_chunks() for dim in set(a.chunks).intersection(set(b.chunks)): if a.chunks[dim] != b.chunks[dim]: raise ValueError(f"Chunk sizes along dimension {dim!r} are not equal.") def check_result_variables( result: DataArray | Dataset, expected: ExpectedDict, kind: Literal["coords", "data_vars"], ): if kind == "coords": nice_str = "coordinate" elif kind == "data_vars": nice_str = "data" # check that coords and data variables are as expected missing = expected[kind] - set(getattr(result, kind)) if missing: raise ValueError( "Result from applying user function does not contain " f"{nice_str} variables {missing}." ) extra = set(getattr(result, kind)) - expected[kind] if extra: raise ValueError( "Result from applying user function has unexpected " f"{nice_str} variables {extra}." ) def dataset_to_dataarray(obj: Dataset) -> DataArray: if not isinstance(obj, Dataset): raise TypeError(f"Expected Dataset, got {type(obj)}") if len(obj.data_vars) > 1: raise TypeError( "Trying to convert Dataset with more than one data variable to DataArray" ) return next(iter(obj.data_vars.values())) def dataarray_to_dataset(obj: DataArray) -> Dataset: # only using _to_temp_dataset would break # func = lambda x: x.to_dataset() # since that relies on preserving name. if obj.name is None: dataset = obj._to_temp_dataset() else: dataset = obj.to_dataset() return dataset def make_meta(obj): """If obj is a DataArray or Dataset, return a new object of the same type and with the same variables and dtypes, but where all variables have size 0 and numpy backend. If obj is neither a DataArray nor Dataset, return it unaltered. """ if isinstance(obj, DataArray): obj_array = obj obj = dataarray_to_dataset(obj) elif isinstance(obj, Dataset): obj_array = None else: return obj from dask.array.utils import meta_from_array meta = Dataset() for name, variable in obj.variables.items(): meta_obj = meta_from_array(variable.data, ndim=variable.ndim) meta[name] = (variable.dims, meta_obj, variable.attrs) meta.attrs = obj.attrs meta = meta.set_coords(obj.coords) if obj_array is not None: return dataset_to_dataarray(meta) return meta def infer_template( func: Callable[..., T_Xarray], obj: DataArray | Dataset, *args, **kwargs ) -> T_Xarray: """Infer return object by running the function on meta objects.""" meta_args = [make_meta(arg) for arg in (obj,) + args] try: template = func(*meta_args, **kwargs) except Exception as e: raise Exception( "Cannot infer object returned from running user provided function. " "Please supply the 'template' kwarg to map_blocks." ) from e if not isinstance(template, Dataset | DataArray): raise TypeError( "Function must return an xarray DataArray or Dataset. Instead it returned " f"{type(template)}" ) return template def make_dict(x: DataArray | Dataset) -> dict[Hashable, Any]: """Map variable name to numpy(-like) data (Dataset.to_dict() is too complicated). """ if isinstance(x, DataArray): x = x._to_temp_dataset() return {k: v.data for k, v in x.variables.items()} def _get_chunk_slicer(dim: Hashable, chunk_index: Mapping, chunk_bounds: Mapping): if dim in chunk_index: which_chunk = chunk_index[dim] return slice(chunk_bounds[dim][which_chunk], chunk_bounds[dim][which_chunk + 1]) return slice(None) def subset_dataset_to_block( graph: dict, gname: str, dataset: Dataset, input_chunk_bounds, chunk_index ): """ Creates a task that subsets an xarray dataset to a block determined by chunk_index. Block extents are determined by input_chunk_bounds. Also subtasks that subset the constituent variables of a dataset. """ import dask # this will become [[name1, variable1], # [name2, variable2], # ...] # which is passed to dict and then to Dataset data_vars = [] coords = [] chunk_tuple = tuple(chunk_index.values()) chunk_dims_set = set(chunk_index) variable: Variable for name, variable in dataset.variables.items(): # make a task that creates tuple of (dims, chunk) if dask.is_dask_collection(variable.data): # get task name for chunk chunk = ( variable.data.name, *tuple(chunk_index[dim] for dim in variable.dims), ) chunk_variable_task = (f"{name}-{gname}-{chunk[0]!r}",) + chunk_tuple graph[chunk_variable_task] = ( tuple, [variable.dims, chunk, variable.attrs], ) else: assert name in dataset.dims or variable.ndim == 0 # non-dask array possibly with dimensions chunked on other variables # index into variable appropriately subsetter = { dim: _get_chunk_slicer(dim, chunk_index, input_chunk_bounds) for dim in variable.dims } if set(variable.dims) < chunk_dims_set: this_var_chunk_tuple = tuple(chunk_index[dim] for dim in variable.dims) else: this_var_chunk_tuple = chunk_tuple chunk_variable_task = ( f"{name}-{gname}-{dask.base.tokenize(subsetter)}", ) + this_var_chunk_tuple # We are including a dimension coordinate, # minimize duplication by not copying it in the graph for every chunk. if variable.ndim == 0 or chunk_variable_task not in graph: subset = variable.isel(subsetter) graph[chunk_variable_task] = ( tuple, [subset.dims, subset._data, subset.attrs], ) # this task creates dict mapping variable name to above tuple if name in dataset._coord_names: coords.append([name, chunk_variable_task]) else: data_vars.append([name, chunk_variable_task]) return (Dataset, (dict, data_vars), (dict, coords), dataset.attrs) def map_blocks( func: Callable[..., T_Xarray], obj: DataArray | Dataset, args: Sequence[Any] = (), kwargs: Mapping[str, Any] | None = None, template: DataArray | Dataset | None = None, ) -> T_Xarray: """Apply a function to each block of a DataArray or Dataset. .. warning:: This function is experimental and its signature may change. Parameters ---------- func : callable User-provided function that accepts a DataArray or Dataset as its first parameter ``obj``. The function will receive a subset or 'block' of ``obj`` (see below), corresponding to one chunk along each chunked dimension. ``func`` will be executed as ``func(subset_obj, *subset_args, **kwargs)``. This function must return either a single DataArray or a single Dataset. This function cannot add a new chunked dimension. obj : DataArray, Dataset Passed to the function as its first argument, one block at a time. args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with obj, otherwise an error is raised. kwargs : mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like ``obj`` but has sizes 0, to determine properties of the returned object such as dtype, variable names, attributes, new dimensions and new indexes (if any). ``template`` must be provided if the function changes the size of existing dimensions. When provided, ``attrs`` on variables in `template` are copied over to the result. Any ``attrs`` set by ``func`` will be ignored. Returns ------- obj : same as obj A single DataArray or Dataset with dask backend, reassembled from the outputs of the function. Notes ----- This function is designed for when ``func`` needs to manipulate a whole xarray object subset to each block. Each block is loaded into memory. In the more common case where ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``. If none of the variables in ``obj`` is backed by dask arrays, calling this function is equivalent to calling ``func(obj, *args, **kwargs)``. See Also -------- dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks xarray.DataArray.map_blocks Examples -------- Calculate an anomaly from climatology using ``.groupby()``. Using ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``, its indices, and its methods like ``.groupby()``. >>> def calculate_anomaly(da, groupby_type="time.month"): ... gb = da.groupby(groupby_type) ... clim = gb.mean(dim="time") ... return gb - clim ... >>> time = xr.date_range("1990-01", "1992-01", freq="ME", use_cftime=True) >>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"]) >>> np.random.seed(123) >>> array = xr.DataArray( ... np.random.rand(len(time)), ... dims=["time"], ... coords={"time": time, "month": month}, ... ).chunk() >>> array.map_blocks(calculate_anomaly, template=array).compute() Size: 192B array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862, 0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714, -0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 , 0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108, 0.07673453, 0.22865714, 0.19063865, -0.0590131 ]) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments to the function being applied in ``xr.map_blocks()``: >>> array.map_blocks( ... calculate_anomaly, ... kwargs={"groupby_type": "time.year"}, ... template=array, ... ) # doctest: +ELLIPSIS Size: 192B dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B dask.array """ def _wrapper( func: Callable, args: list, kwargs: dict, arg_is_array: Iterable[bool], expected: ExpectedDict, expected_indexes: dict[Hashable, Index], ): """ Wrapper function that receives datasets in args; converts to dataarrays when necessary; passes these to the user function `func` and checks returned objects for expected shapes/sizes/etc. """ converted_args = [ dataset_to_dataarray(arg) if is_array else arg for is_array, arg in zip(arg_is_array, args, strict=True) ] result = func(*converted_args, **kwargs) merged_coordinates = merge( [arg.coords for arg in args if isinstance(arg, Dataset | DataArray)], join="exact", compat="override", ).coords # check all dims are present missing_dimensions = set(expected["shapes"]) - set(result.sizes) if missing_dimensions: raise ValueError( f"Dimensions {missing_dimensions} missing on returned object." ) # check that index lengths and values are as expected for name, index in result._indexes.items(): if ( name in expected["shapes"] and result.sizes[name] != expected["shapes"][name] ): raise ValueError( f"Received dimension {name!r} of length {result.sizes[name]}. " f"Expected length {expected['shapes'][name]}." ) # ChainMap wants MutableMapping, but xindexes is Mapping merged_indexes = collections.ChainMap( expected_indexes, merged_coordinates.xindexes, # type: ignore[arg-type] ) expected_index = merged_indexes.get(name, None) if expected_index is not None and not index.equals(expected_index): raise ValueError( f"Expected index {name!r} to be {expected_index!r}. Received {index!r} instead." ) # check that all expected variables were returned check_result_variables(result, expected, "coords") if isinstance(result, Dataset): check_result_variables(result, expected, "data_vars") return make_dict(result) if template is not None and not isinstance(template, DataArray | Dataset): raise TypeError( f"template must be a DataArray or Dataset. Received {type(template).__name__} instead." ) if not isinstance(args, Sequence): raise TypeError("args must be a sequence (for example, a list or tuple).") if kwargs is None: kwargs = {} elif not isinstance(kwargs, Mapping): raise TypeError("kwargs must be a mapping (for example, a dict)") for value in kwargs.values(): if is_dask_collection(value): raise TypeError( "Cannot pass dask collections in kwargs yet. Please compute or " "load values before passing to map_blocks." ) if not is_dask_collection(obj): return func(obj, *args, **kwargs) try: import dask import dask.array from dask.base import tokenize from dask.highlevelgraph import HighLevelGraph except ImportError: pass all_args = [obj] + list(args) is_xarray = [isinstance(arg, Dataset | DataArray) for arg in all_args] is_array = [isinstance(arg, DataArray) for arg in all_args] # there should be a better way to group this. partition? xarray_indices, xarray_objs = unzip( (index, arg) for index, arg in enumerate(all_args) if is_xarray[index] ) others = [ (index, arg) for index, arg in enumerate(all_args) if not is_xarray[index] ] # all xarray objects must be aligned. This is consistent with apply_ufunc. aligned = align(*xarray_objs, join="exact") xarray_objs = tuple( dataarray_to_dataset(arg) if isinstance(arg, DataArray) else arg for arg in aligned ) # rechunk any numpy variables appropriately xarray_objs = tuple(arg.chunk(arg.chunksizes) for arg in xarray_objs) merged_coordinates = merge( [arg.coords for arg in aligned], join="exact", compat="override", ).coords _, npargs = unzip( sorted( list(zip(xarray_indices, xarray_objs, strict=True)) + others, key=lambda x: x[0], ) ) # check that chunk sizes are compatible input_chunks = dict(npargs[0].chunks) for arg in xarray_objs[1:]: assert_chunks_compatible(npargs[0], arg) input_chunks.update(arg.chunks) coordinates: Coordinates if template is None: # infer template by providing zero-shaped arrays template = infer_template(func, aligned[0], *args, **kwargs) template_coords = set(template.coords) preserved_coord_vars = template_coords & set(merged_coordinates) new_coord_vars = template_coords - set(merged_coordinates) preserved_coords = merged_coordinates.to_dataset()[preserved_coord_vars] # preserved_coords contains all coordinates variables that share a dimension # with any index variable in preserved_indexes # Drop any unneeded vars in a second pass, this is required for e.g. # if the mapped function were to drop a non-dimension coordinate variable. preserved_coords = preserved_coords.drop_vars( tuple(k for k in preserved_coords.variables if k not in template_coords) ) coordinates = merge( (preserved_coords, template.coords.to_dataset()[new_coord_vars]), # FIXME: this should be join="exact", but breaks a test join="outer", compat="override", ).coords output_chunks: Mapping[Hashable, tuple[int, ...]] = { dim: input_chunks[dim] for dim in template.dims if dim in input_chunks } else: # template xarray object has been provided with proper sizes and chunk shapes coordinates = template.coords output_chunks = template.chunksizes if not output_chunks: raise ValueError( "Provided template has no dask arrays. " " Please construct a template with appropriately chunked dask arrays." ) new_indexes = set(template.xindexes) - set(merged_coordinates) modified_indexes = set( name for name, xindex in coordinates.xindexes.items() if not xindex.equals(merged_coordinates.xindexes.get(name, None)) ) for dim in output_chunks: if dim in input_chunks and len(input_chunks[dim]) != len(output_chunks[dim]): raise ValueError( "map_blocks requires that one block of the input maps to one block of output. " f"Expected number of output chunks along dimension {dim!r} to be {len(input_chunks[dim])}. " f"Received {len(output_chunks[dim])} instead. Please provide template if not provided, or " "fix the provided template." ) if isinstance(template, DataArray): result_is_array = True template_name = template.name template = template._to_temp_dataset() elif isinstance(template, Dataset): result_is_array = False else: raise TypeError( f"func output must be DataArray or Dataset; got {type(template)}" ) # We're building a new HighLevelGraph hlg. We'll have one new layer # for each variable in the dataset, which is the result of the # func applied to the values. graph: dict[Any, Any] = {} new_layers: collections.defaultdict[str, dict[Any, Any]] = collections.defaultdict( dict ) gname = f"{dask.utils.funcname(func)}-{dask.base.tokenize(npargs[0], args, kwargs)}" # map dims to list of chunk indexes ichunk = {dim: range(len(chunks_v)) for dim, chunks_v in input_chunks.items()} # mapping from chunk index to slice bounds input_chunk_bounds = { dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in input_chunks.items() } output_chunk_bounds = { dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in output_chunks.items() } computed_variables = set(template.variables) - set(coordinates.indexes) # iterate over all possible chunk combinations for chunk_tuple in itertools.product(*ichunk.values()): # mapping from dimension name to chunk index chunk_index = dict(zip(ichunk.keys(), chunk_tuple, strict=True)) blocked_args = [ ( subset_dataset_to_block( graph, gname, arg, input_chunk_bounds, chunk_index ) if isxr else arg ) for isxr, arg in zip(is_xarray, npargs, strict=True) ] # only include new or modified indexes to minimize duplication of data indexes = { dim: coordinates.xindexes[dim][ _get_chunk_slicer(dim, chunk_index, output_chunk_bounds) ] for dim in (new_indexes | modified_indexes) } tokenized_indexes: dict[Hashable, str] = {} for k, v in indexes.items(): tokenized_v = tokenize(v) graph[f"{k}-coordinate-{tokenized_v}"] = v tokenized_indexes[k] = f"{k}-coordinate-{tokenized_v}" # raise nice error messages in _wrapper expected: ExpectedDict = { # input chunk 0 along a dimension maps to output chunk 0 along the same dimension # even if length of dimension is changed by the applied function "shapes": { k: output_chunks[k][v] for k, v in chunk_index.items() if k in output_chunks }, "data_vars": set(template.data_vars.keys()), "coords": set(template.coords.keys()), } from_wrapper = (gname,) + chunk_tuple graph[from_wrapper] = ( _wrapper, func, blocked_args, kwargs, is_array, expected, (dict, [[k, v] for k, v in tokenized_indexes.items()]), ) # mapping from variable name to dask graph key var_key_map: dict[Hashable, str] = {} for name in computed_variables: variable = template.variables[name] gname_l = f"{name}-{gname}" var_key_map[name] = gname_l # unchunked dimensions in the input have one chunk in the result # output can have new dimensions with exactly one chunk key: tuple[Any, ...] = (gname_l,) + tuple( chunk_index.get(dim, 0) for dim in variable.dims ) # We're adding multiple new layers to the graph: # The first new layer is the result of the computation on # the array. # Then we add one layer per variable, which extracts the # result for that variable, and depends on just the first new # layer. new_layers[gname_l][key] = (operator.getitem, from_wrapper, name) hlg = HighLevelGraph.from_collections( gname, graph, dependencies=[arg for arg in npargs if dask.is_dask_collection(arg)], ) # This adds in the getitems for each variable in the dataset. hlg = HighLevelGraph( {**hlg.layers, **new_layers}, dependencies={ **hlg.dependencies, **{name: {gname} for name in new_layers.keys()}, }, ) result = Dataset(coords=coordinates, attrs=template.attrs) for index in result._indexes: result[index].attrs = template[index].attrs result[index].encoding = template[index].encoding for name, gname_l in var_key_map.items(): dims = template[name].dims var_chunks = [] for dim in dims: if dim in output_chunks: var_chunks.append(output_chunks[dim]) elif dim in result._indexes: var_chunks.append((result.sizes[dim],)) elif dim in template.dims: # new unindexed dimension var_chunks.append((template.sizes[dim],)) data = dask.array.Array( hlg, name=gname_l, chunks=var_chunks, dtype=template[name].dtype ) result[name] = (dims, data, template[name].attrs) result[name].encoding = template[name].encoding result = result.set_coords(template._coord_names) if result_is_array: da = dataset_to_dataarray(result) da.name = template_name return da # type: ignore[return-value] return result # type: ignore[return-value] xarray-2025.12.0/xarray/core/resample.py000066400000000000000000000434061511464676000177720ustar00rootroot00000000000000from __future__ import annotations import warnings from collections.abc import Callable, Hashable, Iterable, Sequence from typing import TYPE_CHECKING, Any, Literal from xarray.core._aggregations import ( DataArrayResampleAggregations, DatasetResampleAggregations, ) from xarray.core.groupby import DataArrayGroupByBase, DatasetGroupByBase, GroupBy from xarray.core.types import Dims, InterpOptions, T_Xarray if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import T_Chunks from xarray.groupers import RESAMPLE_DIM class Resample(GroupBy[T_Xarray]): """An object that extends the `GroupBy` object with additional logic for handling specialized re-sampling operations. You should create a `Resample` object by using the `DataArray.resample` or `Dataset.resample` methods. The dimension along re-sampling See Also -------- DataArray.resample Dataset.resample """ def __init__( self, *args, dim: Hashable | None = None, resample_dim: Hashable | None = None, **kwargs, ) -> None: if dim == resample_dim: raise ValueError( f"Proxy resampling dimension ('{resample_dim}') " f"cannot have the same name as actual dimension ('{dim}')!" ) self._dim = dim super().__init__(*args, **kwargs) def _flox_reduce( self, dim: Dims, keep_attrs: bool | None = None, **kwargs, ) -> T_Xarray: result: T_Xarray = ( super() ._flox_reduce(dim=dim, keep_attrs=keep_attrs, **kwargs) .rename({RESAMPLE_DIM: self._group_dim}) # type: ignore[assignment] ) return result def shuffle_to_chunks(self, chunks: T_Chunks = None): """ Sort or "shuffle" the underlying object. "Shuffle" means the object is sorted so that all group members occur sequentially, in the same chunk. Multiple groups may occur in the same chunk. This method is particularly useful for chunked arrays (e.g. dask, cubed). particularly when you need to map a function that requires all members of a group to be present in a single chunk. For chunked array types, the order of appearance is not guaranteed, but will depend on the input chunking. Parameters ---------- chunks : int, tuple of int, "auto" or mapping of hashable to int or tuple of int, optional How to adjust chunks along dimensions not present in the array being grouped by. Returns ------- DataArrayGroupBy or DatasetGroupBy Examples -------- >>> import dask.array >>> da = xr.DataArray( ... dims="time", ... data=dask.array.arange(10, chunks=1), ... coords={"time": xr.date_range("2001-01-01", freq="12h", periods=10)}, ... name="a", ... ) >>> shuffled = da.resample(time="2D").shuffle_to_chunks() >>> shuffled Size: 80B dask.array Coordinates: * time (time) datetime64[ns] 80B 2001-01-01 ... 2001-01-05T12:00:00 See Also -------- dask.dataframe.DataFrame.shuffle dask.array.shuffle """ (_grouper,) = self.groupers return self._shuffle_obj(chunks).drop_vars(RESAMPLE_DIM) def _first_or_last( self, op: Literal["first", "last"], skipna: bool | None, keep_attrs: bool | None ) -> T_Xarray: from xarray.core.dataset import Dataset result = super()._first_or_last(op=op, skipna=skipna, keep_attrs=keep_attrs) if isinstance(result, Dataset): # Can't do this in the base class because group_dim is RESAMPLE_DIM # which is not present in the original object for var in result.data_vars: result._variables[var] = result._variables[var].transpose( *self._obj._variables[var].dims ) return result def _drop_coords(self) -> T_Xarray: """Drop non-dimension coordinates along the resampled dimension.""" obj = self._obj for k, v in obj.coords.items(): if k != self._dim and self._dim in v.dims: obj = obj.drop_vars([k]) return obj def pad(self, tolerance: float | Iterable[float] | str | None = None) -> T_Xarray: """Forward fill new values at up-sampled frequency. Parameters ---------- tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels to limit the up-sampling method. Up-sampled data with indices that satisfy the equation ``abs(index[indexer] - target) <= tolerance`` are filled by new values. Data with indices that are outside the given tolerance are filled with ``NaN`` s. Returns ------- padded : DataArray or Dataset """ obj = self._drop_coords() (grouper,) = self.groupers return obj.reindex( {self._dim: grouper.full_index}, method="pad", tolerance=tolerance ) ffill = pad def backfill( self, tolerance: float | Iterable[float] | str | None = None ) -> T_Xarray: """Backward fill new values at up-sampled frequency. Parameters ---------- tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels to limit the up-sampling method. Up-sampled data with indices that satisfy the equation ``abs(index[indexer] - target) <= tolerance`` are filled by new values. Data with indices that are outside the given tolerance are filled with ``NaN`` s. Returns ------- backfilled : DataArray or Dataset """ obj = self._drop_coords() (grouper,) = self.groupers return obj.reindex( {self._dim: grouper.full_index}, method="backfill", tolerance=tolerance ) bfill = backfill def nearest( self, tolerance: float | Iterable[float] | str | None = None ) -> T_Xarray: """Take new values from nearest original coordinate to up-sampled frequency coordinates. Parameters ---------- tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels to limit the up-sampling method. Up-sampled data with indices that satisfy the equation ``abs(index[indexer] - target) <= tolerance`` are filled by new values. Data with indices that are outside the given tolerance are filled with ``NaN`` s. Returns ------- upsampled : DataArray or Dataset """ obj = self._drop_coords() (grouper,) = self.groupers return obj.reindex( {self._dim: grouper.full_index}, method="nearest", tolerance=tolerance ) def interpolate(self, kind: InterpOptions = "linear", **kwargs) -> T_Xarray: """Interpolate up-sampled data using the original data as knots. Parameters ---------- kind : {"linear", "nearest", "zero", "slinear", \ "quadratic", "cubic", "polynomial"}, default: "linear" The method used to interpolate. The method should be supported by the scipy interpolator: - ``interp1d``: {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial"} - ``interpn``: {"linear", "nearest"} If ``"polynomial"`` is passed, the ``order`` keyword argument must also be provided. Returns ------- interpolated : DataArray or Dataset See Also -------- DataArray.interp Dataset.interp scipy.interpolate.interp1d """ return self._interpolate(kind=kind, **kwargs) def _interpolate(self, kind="linear", **kwargs) -> T_Xarray: """Apply scipy.interpolate.interp1d along resampling dimension.""" obj = self._drop_coords() (grouper,) = self.groupers kwargs.setdefault("bounds_error", False) return obj.interp( coords={self._dim: grouper.full_index}, assume_sorted=True, method=kind, kwargs=kwargs, ) class DataArrayResample( Resample["DataArray"], DataArrayGroupByBase, DataArrayResampleAggregations ): """DataArrayGroupBy object specialized to time resampling operations over a specified dimension """ def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> DataArray: """Reduce the items in this group by applying `func` along the pre-defined resampling dimension. Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray Array with summarized data and the indicated dimension(s) removed. """ return super().reduce( func=func, dim=dim, axis=axis, keep_attrs=keep_attrs, keepdims=keepdims, shortcut=shortcut, **kwargs, ) def map( self, func: Callable[..., Any], args: tuple[Any, ...] = (), shortcut: bool | None = False, **kwargs: Any, ) -> DataArray: """Apply a function to each array in the group and concatenate them together into a new array. `func` is called like `func(ar, *args, **kwargs)` for each array `ar` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the array. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped array after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : callable Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: (1) The action of `func` does not depend on any of the array metadata (attributes or coordinates) but only on the data and dimensions. (2) The action of `func` creates arrays with homogeneous metadata, that is, with the same dimensions and attributes. If these conditions are satisfied `shortcut` provides significant speedup. This should be the case for many common groupby operations (e.g., applying numpy ufuncs). args : tuple, optional Positional arguments passed on to `func`. **kwargs Used to call `func(ar, **kwargs)` for each array `ar`. Returns ------- applied : DataArray The result of splitting, applying and combining this array. """ # TODO: the argument order for Resample doesn't match that for its parent, # GroupBy combined = super().map(func, shortcut=shortcut, args=args, **kwargs) # If the aggregation function didn't drop the original resampling # dimension, then we need to do so before we can rename the proxy # dimension we used. if self._dim in combined.coords: combined = combined.drop_vars([self._dim]) if RESAMPLE_DIM in combined.dims: combined = combined.rename({RESAMPLE_DIM: self._dim}) return combined def apply(self, func, args=(), shortcut=None, **kwargs): """ Backward compatible implementation of ``map`` See Also -------- DataArrayResample.map """ warnings.warn( "Resample.apply may be deprecated in the future. Using Resample.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func=func, shortcut=shortcut, args=args, **kwargs) def asfreq(self) -> DataArray: """Return values of original object at the new up-sampling frequency; essentially a re-index with new times set to NaN. Returns ------- resampled : DataArray """ self._obj = self._drop_coords() return self.mean(None if self._dim is None else [self._dim]) class DatasetResample( Resample["Dataset"], DatasetGroupByBase, DatasetResampleAggregations ): """DatasetGroupBy object specialized to resampling a specified dimension""" def map( self, func: Callable[..., Any], args: tuple[Any, ...] = (), shortcut: bool | None = None, **kwargs: Any, ) -> Dataset: """Apply a function over each Dataset in the groups generated for resampling and concatenate them together into a new Dataset. `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the datasets. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped item after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : callable Callable to apply to each sub-dataset. args : tuple, optional Positional arguments passed on to `func`. **kwargs Used to call `func(ds, **kwargs)` for each sub-dataset `ar`. Returns ------- applied : Dataset The result of splitting, applying and combining this dataset. """ # ignore shortcut if set (for now) applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped()) combined = self._combine(applied) # If the aggregation function didn't drop the original resampling # dimension, then we need to do so before we can rename the proxy # dimension we used. if self._dim in combined.coords: combined = combined.drop_vars(self._dim) if RESAMPLE_DIM in combined.dims: combined = combined.rename({RESAMPLE_DIM: self._dim}) return combined def apply(self, func, args=(), shortcut=None, **kwargs): """ Backward compatible implementation of ``map`` See Also -------- DataSetResample.map """ warnings.warn( "Resample.apply may be deprecated in the future. Using Resample.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func=func, shortcut=shortcut, args=args, **kwargs) def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> Dataset: """Reduce the items in this group by applying `func` along the pre-defined resampling dimension. Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Dataset Array with summarized data and the indicated dimension(s) removed. """ return super().reduce( func=func, dim=dim, axis=axis, keep_attrs=keep_attrs, keepdims=keepdims, shortcut=shortcut, **kwargs, ) def asfreq(self) -> Dataset: """Return values of original object at the new up-sampling frequency; essentially a re-index with new times set to NaN. Returns ------- resampled : Dataset """ self._obj = self._drop_coords() return self.mean(None if self._dim is None else [self._dim]) xarray-2025.12.0/xarray/core/resample_cftime.py000066400000000000000000000460041511464676000213160ustar00rootroot00000000000000"""Resampling for CFTimeIndex. Does not support non-integer freq.""" # The mechanisms for resampling CFTimeIndex was copied and adapted from # the source code defined in pandas.core.resample # # For reference, here is a copy of the pandas copyright notice: # # BSD 3-Clause License # # Copyright (c) 2008-2012, AQR Capital Management, LLC, Lambda Foundry, Inc. # and PyData Development Team # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import datetime import typing import numpy as np import pandas as pd from xarray.coding.cftime_offsets import ( CFTIME_TICKS, BaseCFTimeOffset, MonthEnd, QuarterEnd, Tick, YearEnd, date_range, normalize_date, to_offset, ) from xarray.coding.cftimeindex import CFTimeIndex from xarray.core.types import SideOptions from xarray.core.utils import emit_user_level_warning if typing.TYPE_CHECKING: from xarray.core.types import CFTimeDatetime, ResampleCompatible class CFTimeGrouper: """This is a simple container for the grouping parameters that implements a single method, the only one required for resampling in xarray. It cannot be used in a call to groupby like a pandas.Grouper object can.""" freq: BaseCFTimeOffset closed: SideOptions label: SideOptions loffset: str | datetime.timedelta | BaseCFTimeOffset | None origin: str | CFTimeDatetime offset: datetime.timedelta | None def __init__( self, freq: ResampleCompatible | BaseCFTimeOffset, closed: SideOptions | None = None, label: SideOptions | None = None, origin: str | CFTimeDatetime = "start_day", offset: str | datetime.timedelta | BaseCFTimeOffset | None = None, ): self.freq = to_offset(freq) self.origin = origin if not isinstance(self.freq, CFTIME_TICKS): if offset is not None: message = ( "The 'offset' keyword does not take effect when " "resampling with a 'freq' that is not Tick-like (h, m, s, " "ms, us)" ) emit_user_level_warning(message, category=RuntimeWarning) if origin != "start_day": message = ( "The 'origin' keyword does not take effect when " "resampling with a 'freq' that is not Tick-like (h, m, s, " "ms, us)" ) emit_user_level_warning(message, category=RuntimeWarning) if isinstance(self.freq, MonthEnd | QuarterEnd | YearEnd) or self.origin in [ "end", "end_day", ]: # The backward resample sets ``closed`` to ``'right'`` by default # since the last value should be considered as the edge point for # the last bin. When origin in "end" or "end_day", the value for a # specific ``cftime.datetime`` index stands for the resample result # from the current ``cftime.datetime`` minus ``freq`` to the current # ``cftime.datetime`` with a right close. if closed is None: self.closed = "right" else: self.closed = closed if label is None: self.label = "right" else: self.label = label else: if closed is None: self.closed = "left" else: self.closed = closed if label is None: self.label = "left" else: self.label = label if offset is not None: try: self.offset = _convert_offset_to_timedelta(offset) except (ValueError, TypeError) as error: raise ValueError( f"offset must be a datetime.timedelta object or an offset string " f"that can be converted to a timedelta. Got {type(offset)} instead." ) from error else: self.offset = None def first_items(self, index: CFTimeIndex): """Meant to reproduce the results of the following grouper = pandas.Grouper(...) first_items = pd.Series(np.arange(len(index)), index).groupby(grouper).first() with index being a CFTimeIndex instead of a DatetimeIndex. """ datetime_bins, labels = _get_time_bins( index, self.freq, self.closed, self.label, self.origin, self.offset ) # check binner fits data if index[0] < datetime_bins[0]: raise ValueError("Value falls before first bin") if index[-1] > datetime_bins[-1]: raise ValueError("Value falls after last bin") integer_bins = np.searchsorted(index, datetime_bins, side=self.closed) counts = np.diff(integer_bins) codes = np.repeat(np.arange(len(labels)), counts) first_items = pd.Series(integer_bins[:-1], labels, copy=False) # Mask duplicate values with NaNs, preserving the last values non_duplicate = ~first_items.duplicated("last") return first_items.where(non_duplicate), codes def _get_time_bins( index: CFTimeIndex, freq: BaseCFTimeOffset, closed: SideOptions, label: SideOptions, origin: str | CFTimeDatetime, offset: datetime.timedelta | None, ): """Obtain the bins and their respective labels for resampling operations. Parameters ---------- index : CFTimeIndex Index object to be resampled (e.g., CFTimeIndex named 'time'). freq : xarray.coding.cftime_offsets.BaseCFTimeOffset The offset object representing target conversion a.k.a. resampling frequency (e.g., 'MS', '2D', 'H', or '3T' with coding.cftime_offsets.to_offset() applied to it). closed : 'left' or 'right' Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M' and 'A', which have a default of 'right'. label : 'left' or 'right' Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M' and 'A', which have a default of 'right'. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : datetime.timedelta, default is None An offset timedelta added to the origin. Returns ------- datetime_bins : CFTimeIndex Defines the edge of resampling bins by which original index values will be grouped into. labels : CFTimeIndex Define what the user actually sees the bins labeled as. """ if not isinstance(index, CFTimeIndex): raise TypeError( "index must be a CFTimeIndex, but got " f"an instance of {type(index).__name__!r}" ) if len(index) == 0: datetime_bins = labels = CFTimeIndex(data=[], name=index.name) return datetime_bins, labels first, last = _get_range_edges( index.min(), index.max(), freq, closed=closed, origin=origin, offset=offset ) datetime_bins = labels = date_range( freq=freq, start=first, end=last, name=index.name, use_cftime=True ) datetime_bins, labels = _adjust_bin_edges( datetime_bins, freq, closed, index, labels ) labels = labels[1:] if label == "right" else labels[:-1] # TODO: when CFTimeIndex supports missing values, if the reference index # contains missing values, insert the appropriate NaN value at the # beginning of the datetime_bins and labels indexes. return datetime_bins, labels def _adjust_bin_edges( datetime_bins: CFTimeIndex, freq: BaseCFTimeOffset, closed: SideOptions, index: CFTimeIndex, labels: CFTimeIndex, ) -> tuple[CFTimeIndex, CFTimeIndex]: """This is required for determining the bin edges resampling with month end, quarter end, and year end frequencies. Consider the following example. Let's say you want to downsample the time series with the following coordinates to month end frequency: CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00, 2000-02-01 12:00:00], dtype='object') Without this adjustment, _get_time_bins with month-end frequency will return the following index for the bin edges (default closed='right' and label='right' in this case): CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') If 2000-01-31 is used as a bound for a bin, the value on 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the month of January. To account for this, pandas adds a day minus one worth of microseconds to the bin edges generated by cftime range, so that we do bin the value at noon on January 31st in the January bin. This results in an index with bin edges like the following: CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59, 2000-02-29 23:59:59], dtype='object') The labels are still: CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') """ if isinstance(freq, MonthEnd | QuarterEnd | YearEnd): if closed == "right": datetime_bins = datetime_bins + datetime.timedelta(days=1, microseconds=-1) if datetime_bins[-2] > index.max(): datetime_bins = datetime_bins[:-1] labels = labels[:-1] return datetime_bins, labels def _get_range_edges( first: CFTimeDatetime, last: CFTimeDatetime, freq: BaseCFTimeOffset, closed: SideOptions = "left", origin: str | CFTimeDatetime = "start_day", offset: datetime.timedelta | None = None, ): """Get the correct starting and ending datetimes for the resampled CFTimeIndex range. Parameters ---------- first : cftime.datetime Uncorrected starting datetime object for resampled CFTimeIndex range. Usually the min of the original CFTimeIndex. last : cftime.datetime Uncorrected ending datetime object for resampled CFTimeIndex range. Usually the max of the original CFTimeIndex. freq : xarray.coding.cftime_offsets.BaseCFTimeOffset The offset object representing target conversion a.k.a. resampling frequency. Contains information on offset type (e.g. Day or 'D') and offset magnitude (e.g., n = 3). closed : 'left' or 'right' Which side of bin interval is closed. Defaults to 'left'. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : datetime.timedelta, default is None An offset timedelta added to the origin. Returns ------- first : cftime.datetime Corrected starting datetime object for resampled CFTimeIndex range. last : cftime.datetime Corrected ending datetime object for resampled CFTimeIndex range. """ if isinstance(freq, Tick): first, last = _adjust_dates_anchored( first, last, freq, closed=closed, origin=origin, offset=offset ) return first, last else: first = normalize_date(first) last = normalize_date(last) first = freq.rollback(first) if closed == "left" else first - freq last = last + freq return first, last def _adjust_dates_anchored( first: CFTimeDatetime, last: CFTimeDatetime, freq: Tick, closed: SideOptions = "right", origin: str | CFTimeDatetime = "start_day", offset: datetime.timedelta | None = None, ): """First and last offsets should be calculated from the start day to fix an error cause by resampling across multiple days when a one day period is not a multiple of the frequency. See https://github.com/pandas-dev/pandas/issues/8683 Parameters ---------- first : cftime.datetime A datetime object representing the start of a CFTimeIndex range. last : cftime.datetime A datetime object representing the end of a CFTimeIndex range. freq : xarray.coding.cftime_offsets.BaseCFTimeOffset The offset object representing target conversion a.k.a. resampling frequency. Contains information on offset type (e.g. Day or 'D') and offset magnitude (e.g., n = 3). closed : 'left' or 'right' Which side of bin interval is closed. Defaults to 'right'. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : datetime.timedelta, default is None An offset timedelta added to the origin. Returns ------- fresult : cftime.datetime A datetime object representing the start of a date range that has been adjusted to fix resampling errors. lresult : cftime.datetime A datetime object representing the end of a date range that has been adjusted to fix resampling errors. """ import cftime if origin == "start_day": origin_date = normalize_date(first) elif origin == "start": origin_date = first elif origin == "epoch": origin_date = type(first)(1970, 1, 1) elif origin in ["end", "end_day"]: origin_last = last if origin == "end" else _ceil_via_cftimeindex(last, "D") sub_freq_times = (origin_last - first) // freq.as_timedelta() if closed == "left": sub_freq_times += 1 first = origin_last - sub_freq_times * freq origin_date = first elif isinstance(origin, cftime.datetime): origin_date = origin else: raise ValueError( f"origin must be one of {{'epoch', 'start_day', 'start', 'end', 'end_day'}} " f"or a cftime.datetime object. Got {origin}." ) if offset is not None: origin_date = origin_date + offset foffset = (first - origin_date) % freq.as_timedelta() loffset = (last - origin_date) % freq.as_timedelta() if closed == "right": if foffset.total_seconds() > 0: fresult = first - foffset else: fresult = first - freq.as_timedelta() if loffset.total_seconds() > 0: lresult = last + (freq.as_timedelta() - loffset) else: lresult = last else: if foffset.total_seconds() > 0: fresult = first - foffset else: fresult = first if loffset.total_seconds() > 0: lresult = last + (freq.as_timedelta() - loffset) else: lresult = last + freq return fresult, lresult def exact_cftime_datetime_difference(a: CFTimeDatetime, b: CFTimeDatetime): """Exact computation of b - a Assumes: a = a_0 + a_m b = b_0 + b_m Here a_0, and b_0 represent the input dates rounded down to the nearest second, and a_m, and b_m represent the remaining microseconds associated with date a and date b. We can then express the value of b - a as: b - a = (b_0 + b_m) - (a_0 + a_m) = b_0 - a_0 + b_m - a_m By construction, we know that b_0 - a_0 must be a round number of seconds. Therefore we can take the result of b_0 - a_0 using ordinary cftime.datetime arithmetic and round to the nearest second. b_m - a_m is the remainder, in microseconds, and we can simply add this to the rounded timedelta. Parameters ---------- a : cftime.datetime Input datetime b : cftime.datetime Input datetime Returns ------- datetime.timedelta """ seconds = b.replace(microsecond=0) - a.replace(microsecond=0) seconds = round(seconds.total_seconds()) microseconds = b.microsecond - a.microsecond return datetime.timedelta(seconds=seconds, microseconds=microseconds) def _convert_offset_to_timedelta( offset: datetime.timedelta | str | BaseCFTimeOffset, ) -> datetime.timedelta: if isinstance(offset, datetime.timedelta): return offset if isinstance(offset, str | Tick): timedelta_cftime_offset = to_offset(offset) if isinstance(timedelta_cftime_offset, Tick): return timedelta_cftime_offset.as_timedelta() raise TypeError(f"Expected timedelta, str or Tick, got {type(offset)}") def _ceil_via_cftimeindex(date: CFTimeDatetime, freq: str | BaseCFTimeOffset): index = CFTimeIndex([date]) return index.ceil(freq).item() xarray-2025.12.0/xarray/core/treenode.py000066400000000000000000000713071511464676000177700ustar00rootroot00000000000000from __future__ import annotations import collections import sys from collections.abc import Iterator, Mapping from pathlib import PurePosixPath from typing import TYPE_CHECKING, Any, TypeVar from xarray.core.types import Self from xarray.core.utils import Frozen, is_dict_like if TYPE_CHECKING: from xarray.core.dataarray import DataArray class InvalidTreeError(Exception): """Raised when user attempts to create an invalid tree in some way.""" class NotFoundInTreeError(ValueError): """Raised when operation can't be completed because one node is not part of the expected tree.""" class NodePath(PurePosixPath): """Represents a path from one node to another within a tree.""" def __init__(self, *pathsegments): if sys.version_info >= (3, 12): super().__init__(*pathsegments) else: super().__new__(PurePosixPath, *pathsegments) if self.drive: raise ValueError("NodePaths cannot have drives") if self.root not in ["/", ""]: raise ValueError( 'Root of NodePath can only be either "/" or "", with "" meaning the path is relative.' ) # TODO should we also forbid suffixes to avoid node names with dots in them? def absolute(self) -> Self: """Convert into an absolute path.""" return type(self)("/", *self.parts) class TreeNode: """ Base class representing a node of a tree, with methods for traversing and altering the tree. This class stores no data, it has only parents and children attributes, and various methods. Stores child nodes in a dict, ensuring that equality checks between trees and order of child nodes is preserved (since python 3.7). Nodes themselves are intrinsically unnamed (do not possess a ._name attribute), but if the node has a parent you can find the key it is stored under via the .name property. The .parent attribute is read-only: to replace the parent using public API you must set this node as the child of a new parent using `new_parent.children[name] = child_node`, or to instead detach from the current parent use `child_node.orphan()`. This class is intended to be subclassed by DataTree, which will overwrite some of the inherited behaviour, in particular to make names an inherent attribute, and allow setting parents directly. The intention is to mirror the class structure of xarray.Variable & xarray.DataArray, where Variable is unnamed but DataArray is (optionally) named. Also allows access to any other node in the tree via unix-like paths, including upwards referencing via '../'. (This class is heavily inspired by the anytree library's NodeMixin class.) """ _parent: Self | None _children: dict[str, Self] def __init__(self, children: Mapping[str, Self] | None = None): """Create a parentless node.""" self._parent = None self._children = {} if children: # shallow copy to avoid modifying arguments in-place (see GH issue #9196) self.children = {name: child.copy() for name, child in children.items()} @property def parent(self) -> Self | None: """Parent of this node.""" return self._parent @parent.setter def parent(self, new_parent: Self) -> None: raise AttributeError( "Cannot set parent attribute directly, you must modify the children of the other node instead using dict-like syntax" ) def _set_parent( self, new_parent: Self | None, child_name: str | None = None ) -> None: # TODO is it possible to refactor in a way that removes this private method? if new_parent is not None and not isinstance(new_parent, TreeNode): raise TypeError( "Parent nodes must be of type DataTree or None, " f"not type {type(new_parent)}" ) old_parent = self._parent if new_parent is not old_parent: self._check_loop(new_parent) self._detach(old_parent) self._attach(new_parent, child_name) def _check_loop(self, new_parent: Self | None) -> None: """Checks that assignment of this new parent will not create a cycle.""" if new_parent is not None: if new_parent is self: raise InvalidTreeError( f"Cannot set parent, as node {self} cannot be a parent of itself." ) if self._is_descendant_of(new_parent): raise InvalidTreeError( "Cannot set parent, as intended parent is already a descendant of this node." ) def _is_descendant_of(self, node: Self) -> bool: return any(n is self for n in node.parents) def _detach(self, parent: Self | None) -> None: if parent is not None: self._pre_detach(parent) parents_children = parent.children parent._children = { name: child for name, child in parents_children.items() if child is not self } self._parent = None self._post_detach(parent) def _attach(self, parent: Self | None, child_name: str | None = None) -> None: if parent is not None: if child_name is None: raise ValueError( "To directly set parent, child needs a name, but child is unnamed" ) self._pre_attach(parent, child_name) parentchildren = parent._children assert not any(child is self for child in parentchildren), ( "Tree is corrupt." ) parentchildren[child_name] = self self._parent = parent self._post_attach(parent, child_name) else: self._parent = None def orphan(self) -> None: """Detach this node from its parent.""" self._set_parent(new_parent=None) @property def children(self) -> Mapping[str, Self]: """Child nodes of this node, stored under a mapping via their names.""" return Frozen(self._children) @children.setter def children(self, children: Mapping[str, Self]) -> None: self._check_children(children) children = {**children} old_children = self.children del self.children try: self._pre_attach_children(children) for name, child in children.items(): child._set_parent(new_parent=self, child_name=name) self._post_attach_children(children) assert len(self.children) == len(children) except Exception: # if something goes wrong then revert to previous children self.children = old_children raise @children.deleter def children(self) -> None: # TODO this just detaches all the children, it doesn't actually delete them... children = self.children self._pre_detach_children(children) for child in self.children.values(): child.orphan() assert len(self.children) == 0 self._post_detach_children(children) @staticmethod def _check_children(children: Mapping[str, TreeNode]) -> None: """Check children for correct types and for any duplicates.""" if not is_dict_like(children): raise TypeError( "children must be a dict-like mapping from names to node objects" ) seen = set() for name, child in children.items(): if not isinstance(child, TreeNode): raise TypeError( f"Cannot add object {name}. It is of type {type(child)}, " "but can only add children of type DataTree" ) childid = id(child) if childid not in seen: seen.add(childid) else: raise InvalidTreeError( f"Cannot add same node {name} multiple times as different children." ) def __repr__(self) -> str: return f"TreeNode(children={dict(self._children)})" def _pre_detach_children(self, children: Mapping[str, Self]) -> None: """Method call before detaching `children`.""" pass def _post_detach_children(self, children: Mapping[str, Self]) -> None: """Method call after detaching `children`.""" pass def _pre_attach_children(self, children: Mapping[str, Self]) -> None: """Method call before attaching `children`.""" pass def _post_attach_children(self, children: Mapping[str, Self]) -> None: """Method call after attaching `children`.""" pass def copy(self, *, inherit: bool = True, deep: bool = False) -> Self: """ Returns a copy of this subtree. Copies this node and all child nodes. If `deep=True`, a deep copy is made of each of the component variables. Otherwise, a shallow copy of each of the component variable is made, so that the underlying memory region of the new datatree is the same as in the original datatree. Parameters ---------- inherit : bool Whether inherited coordinates defined on parents of this node should also be copied onto the new tree. Only relevant if the `parent` of this node is not yet, and "Inherited coordinates" appear in its repr. deep : bool Whether each component variable is loaded into memory and copied onto the new object. Default is False. Returns ------- object : DataTree New object with dimensions, attributes, coordinates, name, encoding, and data of this node and all child nodes copied from original. See Also -------- xarray.Dataset.copy pandas.DataFrame.copy """ return self._copy_subtree(inherit=inherit, deep=deep) def _copy_subtree( self, inherit: bool, deep: bool = False, memo: dict[int, Any] | None = None ) -> Self: """Copy entire subtree recursively.""" new_tree = self._copy_node(inherit=inherit, deep=deep, memo=memo) for name, child in self.children.items(): # TODO use `.children[name] = ...` once #9477 is implemented new_tree._set( name, child._copy_subtree(inherit=False, deep=deep, memo=memo) ) return new_tree def _copy_node( self, inherit: bool, deep: bool = False, memo: dict[int, Any] | None = None ) -> Self: """Copy just one node of a tree""" new_empty_node = type(self)() return new_empty_node def __copy__(self) -> Self: return self._copy_subtree(inherit=True, deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy_subtree(inherit=True, deep=True, memo=memo) def _iter_parents(self) -> Iterator[Self]: """Iterate up the tree, starting from the current node's parent.""" node: Self | None = self.parent while node is not None: yield node node = node.parent def iter_lineage(self) -> tuple[Self, ...]: """Iterate up the tree, starting from the current node.""" from warnings import warn warn( "`iter_lineage` has been deprecated, and in the future will raise an error." "Please use `parents` from now on.", DeprecationWarning, stacklevel=2, ) return (self, *self.parents) @property def lineage(self) -> tuple[Self, ...]: """All parent nodes and their parent nodes, starting with the closest.""" from warnings import warn warn( "`lineage` has been deprecated, and in the future will raise an error." "Please use `parents` from now on.", DeprecationWarning, stacklevel=2, ) return self.iter_lineage() @property def parents(self) -> tuple[Self, ...]: """All parent nodes and their parent nodes, starting with the closest.""" return tuple(self._iter_parents()) @property def ancestors(self) -> tuple[Self, ...]: """All parent nodes and their parent nodes, starting with the most distant.""" from warnings import warn warn( "`ancestors` has been deprecated, and in the future will raise an error." "Please use `parents`. Example: `tuple(reversed(node.parents))`", DeprecationWarning, stacklevel=2, ) return (*reversed(self.parents), self) @property def root(self) -> Self: """Root node of the tree""" node = self while node.parent is not None: node = node.parent return node @property def is_root(self) -> bool: """Whether this node is the tree root.""" return self.parent is None @property def is_leaf(self) -> bool: """ Whether this node is a leaf node. Leaf nodes are defined as nodes which have no children. """ return self.children == {} @property def leaves(self) -> tuple[Self, ...]: """ All leaf nodes. Leaf nodes are defined as nodes which have no children. """ return tuple(node for node in self.subtree if node.is_leaf) @property def siblings(self) -> dict[str, Self]: """ Nodes with the same parent as this node. """ if self.parent: return { name: child for name, child in self.parent.children.items() if child is not self } else: return {} @property def subtree(self) -> Iterator[Self]: """ Iterate over all nodes in this tree, including both self and all descendants. Iterates breadth-first. See Also -------- DataTree.subtree_with_keys DataTree.descendants group_subtrees """ # https://en.wikipedia.org/wiki/Breadth-first_search#Pseudocode queue = collections.deque([self]) while queue: node = queue.popleft() yield node queue.extend(node.children.values()) @property def subtree_with_keys(self) -> Iterator[tuple[str, Self]]: """ Iterate over relative paths and node pairs for all nodes in this tree. Iterates breadth-first. See Also -------- DataTree.subtree DataTree.descendants group_subtrees """ queue = collections.deque([(NodePath(), self)]) while queue: path, node = queue.popleft() yield str(path), node queue.extend((path / name, child) for name, child in node.children.items()) @property def descendants(self) -> tuple[Self, ...]: """ Child nodes and all their child nodes. Returned in depth-first order. See Also -------- DataTree.subtree """ all_nodes = tuple(self.subtree) _this_node, *descendants = all_nodes return tuple(descendants) @property def level(self) -> int: """ Level of this node. Level means number of parent nodes above this node before reaching the root. The root node is at level 0. Returns ------- level : int See Also -------- depth width """ return len(self.parents) @property def depth(self) -> int: """ Maximum level of this tree. Measured from the root, which has a depth of 0. Returns ------- depth : int See Also -------- level width """ return max(node.level for node in self.root.subtree) @property def width(self) -> int: """ Number of nodes at this level in the tree. Includes number of immediate siblings, but also "cousins" in other branches and so-on. Returns ------- depth : int See Also -------- level depth """ return len([node for node in self.root.subtree if node.level == self.level]) def _pre_detach(self, parent: Self) -> None: """Method call before detaching from `parent`.""" pass def _post_detach(self, parent: Self) -> None: """Method call after detaching from `parent`.""" pass def _pre_attach(self, parent: Self, name: str) -> None: """Method call before attaching to `parent`.""" pass def _post_attach(self, parent: Self, name: str) -> None: """Method call after attaching to `parent`.""" pass def get(self, key: str, default: Self | None = None) -> Self | None: """ Return the child node with the specified key. Only looks for the node within the immediate children of this node, not in other nodes of the tree. """ if key in self.children: return self.children[key] else: return default # TODO `._walk` method to be called by both `_get_item` and `_set_item` def _get_item(self, path: str | NodePath) -> Self | DataArray: """ Returns the object lying at the given path. Raises a KeyError if there is no object at the given path. """ if isinstance(path, str): path = NodePath(path) if path.root: current_node = self.root _root, *parts = list(path.parts) else: current_node = self parts = list(path.parts) for part in parts: if part == "..": if current_node.parent is None: raise KeyError(f"Could not find node at {path}") else: current_node = current_node.parent elif part in ("", "."): pass else: child = current_node.get(part) if child is None: raise KeyError(f"Could not find node at {path}") current_node = child return current_node def _set(self, key: str, val: Any) -> None: """ Set the child node with the specified key to value. Counterpart to the public .get method, and also only works on the immediate node, not other nodes in the tree. """ new_children = {**self.children, key: val} self.children = new_children def _set_item( self, path: str | NodePath, item: Any, new_nodes_along_path: bool = False, allow_overwrite: bool = True, ) -> None: """ Set a new item in the tree, overwriting anything already present at that path. The given value either forms a new node of the tree or overwrites an existing item at that location. Parameters ---------- path item new_nodes_along_path : bool If true, then if necessary new nodes will be created along the given path, until the tree can reach the specified location. allow_overwrite : bool Whether or not to overwrite any existing node at the location given by path. Raises ------ KeyError If node cannot be reached, and new_nodes_along_path=False. Or if a node already exists at the specified path, and allow_overwrite=False. """ if isinstance(path, str): path = NodePath(path) if not path.name: raise ValueError("Can't set an item under a path which has no name") if path.root: # absolute path current_node = self.root _root, *parts, name = path.parts else: # relative path current_node = self *parts, name = path.parts if parts: # Walk to location of new node, creating intermediate node objects as we go if necessary for part in parts: if part == "..": if current_node.parent is None: # We can't create a parent if `new_nodes_along_path=True` as we wouldn't know what to name it raise KeyError(f"Could not reach node at path {path}") else: current_node = current_node.parent elif part in ("", "."): pass elif part in current_node.children: current_node = current_node.children[part] elif new_nodes_along_path: # Want child classes (i.e. DataTree) to populate tree with their own types new_node = type(self)() current_node._set(part, new_node) current_node = current_node.children[part] else: raise KeyError(f"Could not reach node at path {path}") if name in current_node.children: # Deal with anything already existing at this location if allow_overwrite: current_node._set(name, item) else: raise KeyError(f"Already a node object at path {path}") else: current_node._set(name, item) def __delitem__(self, key: str) -> None: """Remove a child node from this tree object.""" if key in self.children: child = self._children[key] del self._children[key] child.orphan() else: raise KeyError(key) def same_tree(self, other: Self) -> bool: """True if other node is in the same tree as this node.""" return self.root is other.root AnyNamedNode = TypeVar("AnyNamedNode", bound="NamedNode") def _validate_name(name: str | None) -> None: if name is not None: if not isinstance(name, str): raise TypeError("node name must be a string or None") if "/" in name: raise ValueError("node names cannot contain forward slashes") class NamedNode(TreeNode): """ A TreeNode which knows its own name. Implements path-like relationships to other nodes in its tree. """ _name: str | None def __init__( self, name: str | None = None, children: Mapping[str, Self] | None = None, ): super().__init__(children=children) _validate_name(name) self._name = name @property def name(self) -> str | None: """The name of this node.""" return self._name @name.setter def name(self, name: str | None) -> None: if self.parent is not None: raise ValueError( "cannot set the name of a node which already has a parent. " "Consider creating a detached copy of this node via .copy() " "on the parent node." ) _validate_name(name) self._name = name def __repr__(self, level=0): repr_value = "\t" * level + self.__str__() + "\n" for child in self.children: repr_value += self.get(child).__repr__(level + 1) return repr_value def __str__(self) -> str: name_repr = repr(self.name) if self.name is not None else "" return f"NamedNode({name_repr})" def _post_attach(self, parent: Self, name: str) -> None: """Ensures child has name attribute corresponding to key under which it has been stored.""" _validate_name(name) # is this check redundant? self._name = name def _copy_node( self, inherit: bool, deep: bool = False, memo: dict[int, Any] | None = None ) -> Self: """Copy just one node of a tree""" new_node = super()._copy_node(inherit=inherit, deep=deep, memo=memo) new_node._name = self.name return new_node @property def path(self) -> str: """Return the file-like path from the root to this node.""" if self.is_root: return "/" else: _root, *ancestors = tuple(reversed(self.parents)) # don't include name of root because (a) root might not have a name & (b) we want path relative to root. names = [*(node.name for node in ancestors), self.name] return "/" + "/".join(names) # type: ignore[arg-type] def relative_to(self, other: Self) -> str: """ Compute the relative path from this node to node `other`. If other is not in this tree, or it's otherwise impossible, raise a ValueError. """ if not self.same_tree(other): raise NotFoundInTreeError( "Cannot find relative path because nodes do not lie within the same tree" ) this_path = NodePath(self.path) if any(other.path == parent.path for parent in (self, *self.parents)): return str(this_path.relative_to(other.path)) else: common_ancestor = self.find_common_ancestor(other) path_to_common_ancestor = other._path_to_ancestor(common_ancestor) return str( path_to_common_ancestor / this_path.relative_to(common_ancestor.path) ) def find_common_ancestor(self, other: Self) -> Self: """ Find the first common ancestor of two nodes in the same tree. Raise ValueError if they are not in the same tree. """ if self is other: return self other_paths = [op.path for op in other.parents] for parent in (self, *self.parents): if parent.path in other_paths: return parent raise NotFoundInTreeError( "Cannot find common ancestor because nodes do not lie within the same tree" ) def _path_to_ancestor(self, ancestor: Self) -> NodePath: """Return the relative path from this node to the given ancestor node""" if not self.same_tree(ancestor): raise NotFoundInTreeError( "Cannot find relative path to ancestor because nodes do not lie within the same tree" ) if ancestor.path not in [a.path for a in (self, *self.parents)]: raise NotFoundInTreeError( "Cannot find relative path to ancestor because given node is not an ancestor of this node" ) parents_paths = [parent.path for parent in (self, *self.parents)] generation_gap = list(parents_paths).index(ancestor.path) path_upwards = "../" * generation_gap if generation_gap > 0 else "." return NodePath(path_upwards) class TreeIsomorphismError(ValueError): """Error raised if two tree objects do not share the same node structure.""" def group_subtrees( *trees: AnyNamedNode, ) -> Iterator[tuple[str, tuple[AnyNamedNode, ...]]]: """Iterate over subtrees grouped by relative paths in breadth-first order. `group_subtrees` allows for applying operations over all nodes of a collection of DataTree objects with nodes matched by their relative paths. Example usage:: outputs = {} for path, (node_a, node_b) in group_subtrees(tree_a, tree_b): outputs[path] = f(node_a, node_b) tree_out = DataTree.from_dict(outputs) Parameters ---------- *trees : Tree Trees to iterate over. Yields ------ A tuple of the relative path and corresponding nodes for each subtree in the inputs. Raises ------ TreeIsomorphismError If trees are not isomorphic, i.e., they have different structures. See also -------- DataTree.subtree DataTree.subtree_with_keys """ if not trees: raise TypeError("must pass at least one tree object") # https://en.wikipedia.org/wiki/Breadth-first_search#Pseudocode queue = collections.deque([(NodePath(), trees)]) while queue: path, active_nodes = queue.popleft() # yield before raising an error, in case the caller chooses to exit # iteration early yield str(path), active_nodes first_node = active_nodes[0] if any( sibling.children.keys() != first_node.children.keys() for sibling in active_nodes[1:] ): path_str = "root node" if not path.parts else f"node {str(path)!r}" child_summary = " vs ".join( str(list(node.children)) for node in active_nodes ) raise TreeIsomorphismError( f"children at {path_str} do not match: {child_summary}" ) for name in first_node.children: child_nodes = tuple(node.children[name] for node in active_nodes) queue.append((path / name, child_nodes)) def zip_subtrees( *trees: AnyNamedNode, ) -> Iterator[tuple[AnyNamedNode, ...]]: """Zip together subtrees aligned by relative path.""" for _, nodes in group_subtrees(*trees): yield nodes xarray-2025.12.0/xarray/core/types.py000066400000000000000000000272021511464676000173220ustar00rootroot00000000000000from __future__ import annotations import datetime from collections.abc import Callable, Collection, Hashable, Iterator, Mapping, Sequence from types import EllipsisType from typing import ( TYPE_CHECKING, Any, Literal, Protocol, Self, SupportsIndex, TypeAlias, TypeVar, Union, overload, runtime_checkable, ) import numpy as np import pandas as pd from numpy._typing import _SupportsDType from numpy.typing import ArrayLike if TYPE_CHECKING: from xarray.backends.common import BackendEntrypoint from xarray.core.common import AbstractArray, DataWithCoords from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.indexes import Index, Indexes from xarray.core.utils import Frozen from xarray.core.variable import IndexVariable, Variable from xarray.groupers import Grouper, Resampler from xarray.structure.alignment import Aligner GroupInput: TypeAlias = ( str | DataArray | IndexVariable | Sequence[Hashable] | Mapping[Any, Grouper] | None ) try: from dask.array import Array as DaskArray except ImportError: DaskArray = np.ndarray # type: ignore[misc, assignment, unused-ignore] try: from cubed import Array as CubedArray except ImportError: CubedArray = np.ndarray try: from zarr import Array as ZarrArray from zarr import Group as ZarrGroup except ImportError: ZarrArray = np.ndarray # type: ignore[misc, assignment, unused-ignore] ZarrGroup = Any # type: ignore[misc, assignment, unused-ignore] try: # this is V3 only from zarr.storage import StoreLike as ZarrStoreLike except ImportError: ZarrStoreLike = Any # type: ignore[misc, assignment, unused-ignore] # Anything that can be coerced to a shape tuple _ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] _DTypeLikeNested = Any # TODO: wait for support for recursive types # Xarray requires a Mapping[Hashable, dtype] in many places which # conflicts with numpys own DTypeLike (with dtypes for fields). # https://numpy.org/devdocs/reference/typing.html#numpy.typing.DTypeLike # This is a copy of this DTypeLike that allows only non-Mapping dtypes. DTypeLikeSave = Union[ np.dtype[Any], # default data type (float64) None, # array-scalar types and generic types type[Any], # character codes, type strings or comma-separated fields, e.g., 'float64' str, # (flexible_dtype, itemsize) tuple[_DTypeLikeNested, int], # (fixed_dtype, shape) tuple[_DTypeLikeNested, _ShapeLike], # (base_dtype, new_dtype) tuple[_DTypeLikeNested, _DTypeLikeNested], # because numpy does the same? list[Any], # anything with a dtype attribute _SupportsDType[np.dtype[Any]], ] else: DTypeLikeSave: Any = None # https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases try: from cftime import datetime as CFTimeDatetime except ImportError: CFTimeDatetime = np.datetime64 DatetimeLike: TypeAlias = ( pd.Timestamp | datetime.datetime | np.datetime64 | CFTimeDatetime ) class Alignable(Protocol): """Represents any Xarray type that supports alignment. It may be ``Dataset``, ``DataArray`` or ``Coordinates``. This protocol class is needed since those types do not all have a common base class. """ @property def dims(self) -> Frozen[Hashable, int] | tuple[Hashable, ...]: ... @property def sizes(self) -> Mapping[Hashable, int]: ... @property def xindexes(self) -> Indexes[Index]: ... def _reindex_callback( self, aligner: Any, dim_pos_indexers: dict[Hashable, Any], variables: dict[Hashable, Variable], indexes: dict[Hashable, Index], fill_value: Any, exclude_dims: frozenset[Hashable], exclude_vars: frozenset[Hashable], ) -> Self: ... def _overwrite_indexes( self, indexes: Mapping[Any, Index], variables: Mapping[Any, Variable] | None = None, ) -> Self: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[Hashable]: ... def copy( self, deep: bool = False, ) -> Self: ... T_Alignable = TypeVar("T_Alignable", bound="Alignable") T_Aligner = TypeVar("T_Aligner", bound="Aligner") T_Backend = TypeVar("T_Backend", bound="BackendEntrypoint") T_Dataset = TypeVar("T_Dataset", bound="Dataset") T_DataArray = TypeVar("T_DataArray", bound="DataArray") T_Variable = TypeVar("T_Variable", bound="Variable") T_Coordinates = TypeVar("T_Coordinates", bound="Coordinates") T_Array = TypeVar("T_Array", bound="AbstractArray") T_Index = TypeVar("T_Index", bound="Index") # `T_Xarray` is a type variable that can be either "DataArray" or "Dataset". When used # in a function definition, all inputs and outputs annotated with `T_Xarray` must be of # the same concrete type, either "DataArray" or "Dataset". This is generally preferred # over `T_DataArrayOrSet`, given the type system can determine the exact type. T_Xarray = TypeVar("T_Xarray", "DataArray", "Dataset") # `T_DataArrayOrSet` is a type variable that is bounded to either "DataArray" or # "Dataset". Use it for functions that might return either type, but where the exact # type cannot be determined statically using the type system. T_DataArrayOrSet = TypeVar("T_DataArrayOrSet", bound=Union["Dataset", "DataArray"]) # For working directly with `DataWithCoords`. It will only allow using methods defined # on `DataWithCoords`. T_DataWithCoords = TypeVar("T_DataWithCoords", bound="DataWithCoords") # Temporary placeholder for indicating an array api compliant type. # hopefully in the future we can narrow this down more: T_DuckArray = TypeVar("T_DuckArray", bound=Any, covariant=True) # noqa: PLC0105 # For typing pandas extension arrays. T_ExtensionArray = TypeVar("T_ExtensionArray", bound=pd.api.extensions.ExtensionArray) ScalarOrArray = Union["ArrayLike", np.generic] VarCompatible = Union["Variable", "ScalarOrArray"] DaCompatible = Union["DataArray", "VarCompatible"] DsCompatible = Union["Dataset", "DaCompatible"] DtCompatible = Union["DataTree", "DsCompatible"] GroupByCompatible = Union["Dataset", "DataArray"] # Don't change to Hashable | Collection[Hashable] # Read: https://github.com/pydata/xarray/issues/6142 Dims = Union[str, Collection[Hashable], EllipsisType, None] # FYI in some cases we don't allow `None`, which this doesn't take account of. # FYI the `str` is for a size string, e.g. "16MB", supported by dask. T_ChunkDim: TypeAlias = str | int | Literal["auto"] | tuple[int, ...] | None # noqa: PYI051 T_ChunkDimFreq: TypeAlias = Union["Resampler", T_ChunkDim] T_ChunksFreq: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDimFreq] # We allow the tuple form of this (though arguably we could transition to named dims only) T_Chunks: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDim] T_NormalizedChunks = tuple[tuple[int, ...], ...] DataVars = Mapping[Any, Any] ErrorOptions = Literal["raise", "ignore"] ErrorOptionsWithWarn = Literal["raise", "warn", "ignore"] CompatOptions = Literal[ "identical", "equals", "broadcast_equals", "no_conflicts", "override", "minimal" ] ConcatOptions = Literal["all", "minimal", "different"] CombineAttrsOptions = Union[ Literal["drop", "identical", "no_conflicts", "drop_conflicts", "override"], Callable[..., Any], ] JoinOptions = Literal["outer", "inner", "left", "right", "exact", "override"] Interp1dOptions = Literal[ "linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial", ] InterpolantOptions = Literal[ "barycentric", "krogh", "pchip", "spline", "akima", "makima" ] InterpnOptions = Literal["linear", "nearest", "slinear", "cubic", "quintic", "pchip"] InterpOptions = Union[Interp1dOptions, InterpolantOptions, InterpnOptions] DatetimeUnitOptions = ( Literal["W", "D", "h", "m", "s", "ms", "us", "ฮผs", "ns", "ps", "fs", "as"] | None ) NPDatetimeUnitOptions = Literal["D", "h", "m", "s", "ms", "us", "ns"] PDDatetimeUnitOptions = Literal["s", "ms", "us", "ns"] QueryEngineOptions = Literal["python", "numexpr"] | None QueryParserOptions = Literal["pandas", "python"] ReindexMethodOptions = Literal["nearest", "pad", "ffill", "backfill", "bfill"] | None PadModeOptions = Literal[ "constant", "edge", "linear_ramp", "maximum", "mean", "median", "minimum", "reflect", "symmetric", "wrap", ] T_PadConstantValues = float | tuple[float, float] T_VarPadConstantValues = T_PadConstantValues | Mapping[Any, T_PadConstantValues] T_DatasetPadConstantValues = ( T_VarPadConstantValues | Mapping[Any, T_VarPadConstantValues] ) PadReflectOptions = Literal["even", "odd"] | None CFCalendar = Literal[ "standard", "gregorian", "proleptic_gregorian", "noleap", "365_day", "360_day", "julian", "all_leap", "366_day", ] CoarsenBoundaryOptions = Literal["exact", "trim", "pad"] SideOptions = Literal["left", "right"] InclusiveOptions = Literal["both", "neither", "left", "right"] ScaleOptions = Literal["linear", "symlog", "log", "logit"] | None HueStyleOptions = Literal["continuous", "discrete"] | None AspectOptions = Union[Literal["auto", "equal"], float, None] ExtendOptions = Literal["neither", "both", "min", "max"] | None _T_co = TypeVar("_T_co", covariant=True) class NestedSequence(Protocol[_T_co]): def __len__(self, /) -> int: ... @overload def __getitem__(self, index: int, /) -> _T_co | NestedSequence[_T_co]: ... @overload def __getitem__(self, index: slice, /) -> NestedSequence[_T_co]: ... def __iter__(self, /) -> Iterator[_T_co | NestedSequence[_T_co]]: ... def __reversed__(self, /) -> Iterator[_T_co | NestedSequence[_T_co]]: ... _T = TypeVar("_T") NestedDict = dict[str, "NestedDict[_T] | _T"] AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True) # this is shamelessly stolen from pandas._typing @runtime_checkable class BaseBuffer(Protocol): @property def mode(self) -> str: # for _get_filepath_or_buffer ... def seek(self, offset: int, whence: int = ..., /) -> int: # with one argument: gzip.GzipFile, bz2.BZ2File # with two arguments: zip.ZipFile, read_sas ... def seekable(self) -> bool: # for bz2.BZ2File ... def tell(self) -> int: # for zip.ZipFile, read_stata, to_stata ... @runtime_checkable class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, n: int = ..., /) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... QuantileMethods = Literal[ "inverted_cdf", "averaged_inverted_cdf", "closest_observation", "interpolated_inverted_cdf", "hazen", "weibull", "linear", "median_unbiased", "normal_unbiased", "lower", "higher", "midpoint", "nearest", ] NetcdfWriteModes = Literal["w", "a"] ZarrWriteModes = Literal["w", "w-", "a", "a-", "r+", "r"] GroupKey = Any GroupIndex = Union[slice, list[int]] GroupIndices = tuple[GroupIndex, ...] Bins = Union[ int, Sequence[int], Sequence[float], Sequence[pd.Timestamp], np.ndarray, pd.Index ] ResampleCompatible: TypeAlias = str | datetime.timedelta | pd.Timedelta | pd.DateOffset class Closable(Protocol): def close(self) -> None: ... class Lock(Protocol): def acquire(self, *args, **kwargs) -> Any: ... def release(self) -> None: ... def __enter__(self) -> Any: ... def __exit__(self, *args, **kwargs) -> None: ... xarray-2025.12.0/xarray/core/utils.py000066400000000000000000001230331511464676000173150ustar00rootroot00000000000000"""Internal utilities; not for external use""" # Some functions in this module are derived from functions in pandas. For # reference, here is a copy of the pandas copyright notice: # BSD 3-Clause License # Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2011-2022, Open source contributors. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import contextlib import difflib import functools import importlib import inspect import io import itertools import math import os import re import sys import warnings from collections.abc import ( Callable, Collection, Container, Hashable, ItemsView, Iterable, Iterator, KeysView, Mapping, MutableMapping, MutableSet, Sequence, ValuesView, ) from collections.abc import ( Set as AbstractSet, ) from enum import Enum from pathlib import Path from types import EllipsisType, ModuleType from typing import ( TYPE_CHECKING, Any, Generic, Literal, TypeGuard, TypeVar, cast, overload, ) import numpy as np import pandas as pd from xarray.namedarray.utils import ( # noqa: F401 ReprObject, drop_missing_dims, either_dict_or_kwargs, infix_dims, is_dask_collection, is_dict_like, is_duck_array, is_duck_dask_array, module_available, to_0d_object_array, ) if TYPE_CHECKING: from xarray.core.types import Dims, ErrorOptionsWithWarn, NestedDict K = TypeVar("K") V = TypeVar("V") T = TypeVar("T") def is_allowed_extension_array_dtype(dtype: Any): return pd.api.types.is_extension_array_dtype(dtype) and not isinstance( # noqa: TID251 dtype, pd.StringDtype ) def is_allowed_extension_array(array: Any) -> bool: return ( hasattr(array, "dtype") and is_allowed_extension_array_dtype(array.dtype) and not isinstance(array, pd.arrays.NumpyExtensionArray) # type: ignore[attr-defined] ) def alias_message(old_name: str, new_name: str) -> str: return f"{old_name} has been deprecated. Use {new_name} instead." def alias_warning(old_name: str, new_name: str, stacklevel: int = 3) -> None: warnings.warn( alias_message(old_name, new_name), FutureWarning, stacklevel=stacklevel ) def alias(obj: Callable[..., T], old_name: str) -> Callable[..., T]: assert isinstance(old_name, str) @functools.wraps(obj) def wrapper(*args, **kwargs): alias_warning(old_name, obj.__name__) return obj(*args, **kwargs) wrapper.__doc__ = alias_message(old_name, obj.__name__) return wrapper def did_you_mean( word: Hashable, possibilities: Iterable[Hashable], *, n: int = 10 ) -> str: """ Suggest a few correct words based on a list of possibilities Parameters ---------- word : Hashable Word to compare to a list of possibilities. possibilities : Iterable of Hashable The iterable of Hashable that contains the correct values. n : int, default: 10 Maximum number of suggestions to show. Examples -------- >>> did_you_mean("bluch", ("blech", "gray_r", 1, None, (2, 56))) "Did you mean one of ('blech',)?" >>> did_you_mean("none", ("blech", "gray_r", 1, None, (2, 56))) 'Did you mean one of (None,)?' See also -------- https://en.wikipedia.org/wiki/String_metric """ # Convert all values to string, get_close_matches doesn't handle all hashables: possibilities_str: dict[str, Hashable] = {str(k): k for k in possibilities} msg = "" if len( best_str := difflib.get_close_matches( str(word), list(possibilities_str.keys()), n=n ) ): best = tuple(possibilities_str[k] for k in best_str) msg = f"Did you mean one of {best}?" return msg def get_valid_numpy_dtype(array: np.ndarray | pd.Index) -> np.dtype: """Return a numpy compatible dtype from either a numpy array or a pandas.Index. Used for wrapping a pandas.Index as an xarray.Variable. """ if isinstance(array, pd.PeriodIndex): return np.dtype("O") if hasattr(array, "categories"): # category isn't a real numpy dtype dtype = array.categories.dtype if not is_valid_numpy_dtype(dtype): dtype = np.dtype("O") return dtype if not is_valid_numpy_dtype(array.dtype): return np.dtype("O") return array.dtype # type: ignore[return-value] def maybe_coerce_to_str(index, original_coords): """maybe coerce a pandas Index back to a nunpy array of type str pd.Index uses object-dtype to store str - try to avoid this for coords """ from xarray.core import dtypes try: result_type = dtypes.result_type(*original_coords) except TypeError: pass else: if result_type.kind in "SU": index = np.asarray(index, dtype=result_type.type) return index def maybe_wrap_array(original, new_array): """Wrap a transformed array with __array_wrap__ if it can be done safely. This lets us treat arbitrary functions that take and return ndarray objects like ufuncs, as long as they return an array with the same shape. """ # in case func lost array's metadata if isinstance(new_array, np.ndarray) and new_array.shape == original.shape: return original.__array_wrap__(new_array) else: return new_array def equivalent(first: T, second: T) -> bool: """Compare two objects for equivalence (identity or equality), using array_equiv if either object is an ndarray. If both objects are lists, equivalent is sequentially called on all the elements. Returns False for any comparison that doesn't return a boolean, making this function safer to use with objects that have non-standard __eq__ implementations. """ # TODO: refactor to avoid circular import from xarray.core import duck_array_ops if first is second: return True if isinstance(first, np.ndarray) or isinstance(second, np.ndarray): return duck_array_ops.array_equiv(first, second) if isinstance(first, list) or isinstance(second, list): return list_equiv(first, second) # type: ignore[arg-type] # Check for NaN equivalence early (before equality comparison) # This handles both Python float NaN and NumPy scalar NaN (issue #10833) if pd.isnull(first) and pd.isnull(second): # type: ignore[call-overload] return True # For non-array/list types, use == but require boolean result result = first == second if not isinstance(result, bool): # Accept numpy bool scalars as well if isinstance(result, np.bool_): return bool(result) # Reject any other non-boolean type (Dataset, Series, custom objects, etc.) return False return result def list_equiv(first: Sequence[T], second: Sequence[T]) -> bool: if len(first) != len(second): return False return all(itertools.starmap(equivalent, zip(first, second, strict=True))) def peek_at(iterable: Iterable[T]) -> tuple[T, Iterator[T]]: """Returns the first value from iterable, as well as a new iterator with the same content as the original iterable """ gen = iter(iterable) peek = next(gen) return peek, itertools.chain([peek], gen) def update_safety_check( first_dict: Mapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> None: """Check the safety of updating one dictionary with another. Raises ValueError if dictionaries have non-compatible values for any key, where compatibility is determined by identity (they are the same item) or the `compat` function. Parameters ---------- first_dict, second_dict : dict-like All items in the second dictionary are checked against for conflicts against items in the first dictionary. compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. """ for k, v in second_dict.items(): if k in first_dict and not compat(v, first_dict[k]): raise ValueError( "unsafe to merge dictionaries without " f"overriding values; conflicting key {k!r}" ) def remove_incompatible_items( first_dict: MutableMapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> None: """Remove incompatible items from the first dictionary in-place. Items are retained if their keys are found in both dictionaries and the values are compatible. Parameters ---------- first_dict, second_dict : dict-like Mappings to merge. compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. """ for k in list(first_dict): if k not in second_dict or not compat(first_dict[k], second_dict[k]): del first_dict[k] def flat_items( nested: Mapping[str, NestedDict[T] | T], prefix: str | None = None, separator: str = "/", ) -> Iterable[tuple[str, T]]: """Yields flat items from a nested dictionary of dicts. Notes: - Only dict subclasses are flattened. - Duplicate items are not removed. These should be checked separately. """ for key, value in nested.items(): key = prefix + separator + key if prefix is not None else key if isinstance(value, dict): yield from flat_items(value, key, separator) else: yield key, value def is_full_slice(value: Any) -> bool: return isinstance(value, slice) and value == slice(None) def is_list_like(value: Any) -> TypeGuard[list | tuple]: return isinstance(value, list | tuple) def _is_scalar(value, include_0d): from xarray.core.variable import NON_NUMPY_SUPPORTED_ARRAY_TYPES if include_0d: include_0d = getattr(value, "ndim", None) == 0 return ( include_0d or isinstance(value, str | bytes) or not ( isinstance(value, (Iterable,) + NON_NUMPY_SUPPORTED_ARRAY_TYPES) or hasattr(value, "__array_function__") or hasattr(value, "__array_namespace__") ) ) def is_scalar(value: Any, include_0d: bool = True) -> TypeGuard[Hashable]: """Whether to treat a value as a scalar. Any non-iterable, string, or 0-D array """ return _is_scalar(value, include_0d) def is_valid_numpy_dtype(dtype: Any) -> bool: try: np.dtype(dtype) except (TypeError, ValueError): return False else: return True def to_0d_array(value: Any) -> np.ndarray: """Given a value, wrap it in a 0-D numpy.ndarray.""" if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0): return np.array(value) else: return to_0d_object_array(value) def dict_equiv( first: Mapping[K, V], second: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> bool: """Test equivalence of two dict-like objects. If any of the values are numpy arrays, compare them correctly. Parameters ---------- first, second : dict-like Dictionaries to compare for equality compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. Returns ------- equals : bool True if the dictionaries are equal """ for k in first: if k not in second or not compat(first[k], second[k]): return False return all(k in first for k in second) def compat_dict_intersection( first_dict: Mapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> MutableMapping[K, V]: """Return the intersection of two dictionaries as a new dictionary. Items are retained if their keys are found in both dictionaries and the values are compatible. Parameters ---------- first_dict, second_dict : dict-like Mappings to merge. compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. Returns ------- intersection : dict Intersection of the contents. """ new_dict = dict(first_dict) remove_incompatible_items(new_dict, second_dict, compat) return new_dict def compat_dict_union( first_dict: Mapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> MutableMapping[K, V]: """Return the union of two dictionaries as a new dictionary. An exception is raised if any keys are found in both dictionaries and the values are not compatible. Parameters ---------- first_dict, second_dict : dict-like Mappings to merge. compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. Returns ------- union : dict union of the contents. """ new_dict = dict(first_dict) update_safety_check(first_dict, second_dict, compat) new_dict.update(second_dict) return new_dict class Frozen(Mapping[K, V]): """Wrapper around an object implementing the mapping interface to make it immutable. If you really want to modify the mapping, the mutable version is saved under the `mapping` attribute. """ __slots__ = ("mapping",) def __init__(self, mapping: Mapping[K, V]): self.mapping = mapping def __getitem__(self, key: K) -> V: return self.mapping[key] def __iter__(self) -> Iterator[K]: return iter(self.mapping) def __len__(self) -> int: return len(self.mapping) def __contains__(self, key: object) -> bool: return key in self.mapping def __repr__(self) -> str: return f"{type(self).__name__}({self.mapping!r})" def FrozenDict(*args, **kwargs) -> Frozen: return Frozen(dict(*args, **kwargs)) class FrozenMappingWarningOnValuesAccess(Frozen[K, V]): """ Class which behaves like a Mapping but warns if the values are accessed. Temporary object to aid in deprecation cycle of `Dataset.dims` (see GH issue #8496). `Dataset.dims` is being changed from returning a mapping of dimension names to lengths to just returning a frozen set of dimension names (to increase consistency with `DataArray.dims`). This class retains backwards compatibility but raises a warning only if the return value of ds.dims is used like a dictionary (i.e. it doesn't raise a warning if used in a way that would also be valid for a FrozenSet, e.g. iteration). """ __slots__ = ("mapping",) def _warn(self) -> None: emit_user_level_warning( "The return type of `Dataset.dims` will be changed to return a set of dimension names in future, " "in order to be more consistent with `DataArray.dims`. To access a mapping from dimension names to lengths, " "please use `Dataset.sizes`.", FutureWarning, ) def __getitem__(self, key: K) -> V: self._warn() return super().__getitem__(key) @overload def get(self, key: K, /) -> V | None: ... @overload def get(self, key: K, /, default: V | T) -> V | T: ... def get(self, key: K, default: T | None = None) -> V | T | None: self._warn() return super().get(key, default) def keys(self) -> KeysView[K]: self._warn() return super().keys() def items(self) -> ItemsView[K, V]: self._warn() return super().items() def values(self) -> ValuesView[V]: self._warn() return super().values() class FilteredMapping(Mapping[K, V]): """Implements the Mapping interface. Uses the wrapped mapping for item lookup and a separate wrapped keys collection for iteration. Can be used to construct a mapping object from another dict-like object without eagerly accessing its items or when a mapping object is expected but only iteration over keys is actually used. Note: keys should be a subset of mapping, but FilteredMapping does not validate consistency of the provided `keys` and `mapping`. It is the caller's responsibility to ensure that they are suitable for the task at hand. """ __slots__ = ("keys_", "mapping") def __init__(self, keys: Collection[K], mapping: Mapping[K, V]): self.keys_ = keys # .keys is already a property on Mapping self.mapping = mapping def __getitem__(self, key: K) -> V: if key not in self.keys_: raise KeyError(key) return self.mapping[key] def __iter__(self) -> Iterator[K]: return iter(self.keys_) def __len__(self) -> int: return len(self.keys_) def __repr__(self) -> str: return f"{type(self).__name__}(keys={self.keys_!r}, mapping={self.mapping!r})" class OrderedSet(MutableSet[T]): """A simple ordered set. The API matches the builtin set, but it preserves insertion order of elements, like a dict. Note that, unlike in an OrderedDict, equality tests are not order-sensitive. """ _d: dict[T, None] __slots__ = ("_d",) def __init__(self, values: Iterable[T] | None = None): self._d = {} if values is not None: self.update(values) # Required methods for MutableSet def __contains__(self, value: Hashable) -> bool: return value in self._d def __iter__(self) -> Iterator[T]: return iter(self._d) def __len__(self) -> int: return len(self._d) def add(self, value: T) -> None: self._d[value] = None def discard(self, value: T) -> None: del self._d[value] # Additional methods def update(self, values: Iterable[T]) -> None: self._d.update(dict.fromkeys(values)) def __repr__(self) -> str: return f"{type(self).__name__}({list(self)!r})" class NdimSizeLenMixin: """Mixin class that extends a class that defines a ``shape`` property to one that also defines ``ndim``, ``size`` and ``__len__``. """ __slots__ = () @property def ndim(self: Any) -> int: """ Number of array dimensions. See Also -------- numpy.ndarray.ndim """ return len(self.shape) @property def size(self: Any) -> int: """ Number of elements in the array. Equal to ``np.prod(a.shape)``, i.e., the product of the arrayโ€™s dimensions. See Also -------- numpy.ndarray.size """ return math.prod(self.shape) def __len__(self: Any) -> int: try: return self.shape[0] except IndexError as err: raise TypeError("len() of unsized object") from err class NDArrayMixin(NdimSizeLenMixin): """Mixin class for making wrappers of N-dimensional arrays that conform to the ndarray interface required for the data argument to Variable objects. A subclass should set the `array` property and override one or more of `dtype`, `shape` and `__getitem__`. """ __slots__ = () @property def dtype(self: Any) -> np.dtype: return self.array.dtype @property def shape(self: Any) -> tuple[int, ...]: return self.array.shape def __getitem__(self: Any, key): return self.array[key] def __repr__(self: Any) -> str: return f"{type(self).__name__}(array={self.array!r})" @contextlib.contextmanager def close_on_error(f): """Context manager to ensure that a file opened by xarray is closed if an exception is raised before the user sees the file object. """ try: yield except Exception: f.close() raise def is_remote_uri(path: str) -> bool: """Finds URLs of the form protocol:// or protocol:: This also matches for http[s]://, which were the only remote URLs supported in <=v0.16.2. """ return bool(re.search(r"^[a-zA-Z][a-zA-Z0-9]*(\://|\:\:)", path)) def strip_uri_params(uri: str) -> str: """Strip query parameters and fragments from a URI. This is useful for extracting the file extension from URLs that contain query parameters (e.g., OPeNDAP constraint expressions). Parameters ---------- uri : str The URI to strip Returns ------- str The URI without query parameters (?) or fragments (#) Examples -------- >>> strip_uri_params("http://example.com/file.nc?var=temp&time=0") 'http://example.com/file.nc' >>> strip_uri_params("http://example.com/file.nc#section") 'http://example.com/file.nc' >>> strip_uri_params("/local/path/file.nc") '/local/path/file.nc' """ from urllib.parse import urlsplit, urlunsplit # Use urlsplit to properly parse the URI # This handles both absolute URLs and relative paths parsed = urlsplit(uri) # Reconstruct without query and fragment using urlunsplit return urlunsplit((parsed.scheme, parsed.netloc, parsed.path, "", "")) def read_magic_number_from_file(filename_or_obj, count=8) -> bytes: # check byte header to determine file type if not isinstance(filename_or_obj, io.IOBase): raise TypeError(f"cannot read the magic number from {type(filename_or_obj)}") if filename_or_obj.tell() != 0: filename_or_obj.seek(0) magic_number = filename_or_obj.read(count) filename_or_obj.seek(0) return magic_number def try_read_magic_number_from_path(pathlike, count=8) -> bytes | None: if isinstance(pathlike, str) or hasattr(pathlike, "__fspath__"): path = os.fspath(pathlike) try: with open(path, "rb") as f: return read_magic_number_from_file(f, count) except (FileNotFoundError, IsADirectoryError, TypeError): pass return None def try_read_magic_number_from_file_or_path(filename_or_obj, count=8) -> bytes | None: magic_number = try_read_magic_number_from_path(filename_or_obj, count) if magic_number is None: with contextlib.suppress(TypeError): magic_number = read_magic_number_from_file(filename_or_obj, count) return magic_number def is_uniform_spaced(arr, **kwargs) -> bool: """Return True if values of an array are uniformly spaced and sorted. >>> is_uniform_spaced(range(5)) True >>> is_uniform_spaced([-4, 0, 100]) False kwargs are additional arguments to ``np.isclose`` """ arr = np.array(arr, dtype=float) diffs = np.diff(arr) return bool(np.isclose(diffs.min(), diffs.max(), **kwargs)) def hashable(v: Any) -> TypeGuard[Hashable]: """Determine whether `v` can be hashed.""" try: hash(v) except TypeError: return False return True def iterable(v: Any) -> TypeGuard[Iterable[Any]]: """Determine whether `v` is iterable.""" try: iter(v) except TypeError: return False return True def iterable_of_hashable(v: Any) -> TypeGuard[Iterable[Hashable]]: """Determine whether `v` is an Iterable of Hashables.""" try: it = iter(v) except TypeError: return False return all(hashable(elm) for elm in it) def decode_numpy_dict_values(attrs: Mapping[K, V]) -> dict[K, V]: """Convert attribute values from numpy objects to native Python objects, for use in to_dict """ attrs = dict(attrs) for k, v in attrs.items(): if isinstance(v, np.ndarray): attrs[k] = cast(V, v.tolist()) elif isinstance(v, np.generic): attrs[k] = v.item() return attrs def ensure_us_time_resolution(val): """Convert val out of numpy time, for use in to_dict. Needed because of numpy bug GH#7619""" if np.issubdtype(val.dtype, np.datetime64): val = val.astype("datetime64[us]") elif np.issubdtype(val.dtype, np.timedelta64): val = val.astype("timedelta64[us]") return val class HiddenKeyDict(MutableMapping[K, V]): """Acts like a normal dictionary, but hides certain keys.""" __slots__ = ("_data", "_hidden_keys") # ``__init__`` method required to create instance from class. def __init__(self, data: MutableMapping[K, V], hidden_keys: Iterable[K]): self._data = data self._hidden_keys = frozenset(hidden_keys) def _raise_if_hidden(self, key: K) -> None: if key in self._hidden_keys: raise KeyError(f"Key `{key!r}` is hidden.") # The next five methods are requirements of the ABC. def __setitem__(self, key: K, value: V) -> None: self._raise_if_hidden(key) self._data[key] = value def __getitem__(self, key: K) -> V: self._raise_if_hidden(key) return self._data[key] def __delitem__(self, key: K) -> None: self._raise_if_hidden(key) del self._data[key] def __iter__(self) -> Iterator[K]: for k in self._data: if k not in self._hidden_keys: yield k def __len__(self) -> int: num_hidden = len(self._hidden_keys & self._data.keys()) return len(self._data) - num_hidden def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable: """Get a new dimension name based on new_dim, that is not used in dims. If the same name exists, we add an underscore(s) in the head. Example1: dims: ['a', 'b', 'c'] new_dim: ['_rolling'] -> ['_rolling'] Example2: dims: ['a', 'b', 'c', '_rolling'] new_dim: ['_rolling'] -> ['__rolling'] """ while new_dim in dims: new_dim = "_" + str(new_dim) return new_dim def drop_dims_from_indexers( indexers: Mapping[Any, Any], dims: Iterable[Hashable] | Mapping[Any, int], missing_dims: ErrorOptionsWithWarn, ) -> Mapping[Hashable, Any]: """Depending on the setting of missing_dims, drop any dimensions from indexers that are not present in dims. Parameters ---------- indexers : dict dims : sequence missing_dims : {"raise", "warn", "ignore"} """ if missing_dims == "raise": invalid = indexers.keys() - set(dims) if invalid: raise ValueError( f"Dimensions {invalid} do not exist. Expected one or more of {dims}" ) return indexers elif missing_dims == "warn": # don't modify input indexers = dict(indexers) invalid = indexers.keys() - set(dims) if invalid: warnings.warn( f"Dimensions {invalid} do not exist. Expected one or more of {dims}", stacklevel=2, ) for key in invalid: indexers.pop(key) return indexers elif missing_dims == "ignore": return {key: val for key, val in indexers.items() if key in dims} else: raise ValueError( f"Unrecognised option {missing_dims} for missing_dims argument" ) @overload def parse_dims_as_tuple( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: Literal[True] = True, ) -> tuple[Hashable, ...]: ... @overload def parse_dims_as_tuple( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: Literal[False], ) -> tuple[Hashable, ...] | EllipsisType | None: ... def parse_dims_as_tuple( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: bool = True, ) -> tuple[Hashable, ...] | EllipsisType | None: """Parse one or more dimensions. A single dimension must be always a str, multiple dimensions can be Hashables. This supports e.g. using a tuple as a dimension. If you supply e.g. a set of dimensions the order cannot be conserved, but for sequences it will be. Parameters ---------- dim : str, Iterable of Hashable, "..." or None Dimension(s) to parse. all_dims : tuple of Hashable All possible dimensions. check_exists: bool, default: True if True, check if dim is a subset of all_dims. replace_none : bool, default: True If True, return all_dims if dim is None or "...". Returns ------- parsed_dims : tuple of Hashable Input dimensions as a tuple. """ if dim is None or dim is ...: if replace_none: return all_dims return dim if isinstance(dim, str): dim = (dim,) if check_exists: _check_dims(set(dim), set(all_dims)) return tuple(dim) @overload def parse_dims_as_set( dim: Dims, all_dims: set[Hashable], *, check_exists: bool = True, replace_none: Literal[True] = True, ) -> set[Hashable]: ... @overload def parse_dims_as_set( dim: Dims, all_dims: set[Hashable], *, check_exists: bool = True, replace_none: Literal[False], ) -> set[Hashable] | EllipsisType | None: ... def parse_dims_as_set( dim: Dims, all_dims: set[Hashable], *, check_exists: bool = True, replace_none: bool = True, ) -> set[Hashable] | EllipsisType | None: """Like parse_dims_as_tuple, but returning a set instead of a tuple.""" # TODO: Consider removing parse_dims_as_tuple? if dim is None or dim is ...: if replace_none: return all_dims return dim if isinstance(dim, str): dim = {dim} dim = set(dim) if check_exists: _check_dims(dim, all_dims) return dim @overload def parse_ordered_dims( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: Literal[True] = True, ) -> tuple[Hashable, ...]: ... @overload def parse_ordered_dims( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: Literal[False], ) -> tuple[Hashable, ...] | EllipsisType | None: ... def parse_ordered_dims( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: bool = True, ) -> tuple[Hashable, ...] | EllipsisType | None: """Parse one or more dimensions. A single dimension must be always a str, multiple dimensions can be Hashables. This supports e.g. using a tuple as a dimension. An ellipsis ("...") in a sequence of dimensions will be replaced with all remaining dimensions. This only makes sense when the input is a sequence and not e.g. a set. Parameters ---------- dim : str, Sequence of Hashable or "...", "..." or None Dimension(s) to parse. If "..." appears in a Sequence it always gets replaced with all remaining dims all_dims : tuple of Hashable All possible dimensions. check_exists: bool, default: True if True, check if dim is a subset of all_dims. replace_none : bool, default: True If True, return all_dims if dim is None. Returns ------- parsed_dims : tuple of Hashable Input dimensions as a tuple. """ if dim is not None and dim is not ... and not isinstance(dim, str) and ... in dim: dims_set: set[Hashable | EllipsisType] = set(dim) all_dims_set = set(all_dims) if check_exists: _check_dims(dims_set, all_dims_set) if len(all_dims_set) != len(all_dims): raise ValueError("Cannot use ellipsis with repeated dims") dims = tuple(dim) if dims.count(...) > 1: raise ValueError("More than one ellipsis supplied") other_dims = tuple(d for d in all_dims if d not in dims_set) idx = dims.index(...) return dims[:idx] + other_dims + dims[idx + 1 :] else: # mypy cannot resolve that the sequence cannot contain "..." return parse_dims_as_tuple( # type: ignore[call-overload] dim=dim, all_dims=all_dims, check_exists=check_exists, replace_none=replace_none, ) def _check_dims(dim: AbstractSet[Hashable], all_dims: AbstractSet[Hashable]) -> None: wrong_dims = (dim - all_dims) - {...} if wrong_dims: wrong_dims_str = ", ".join(f"'{d}'" for d in wrong_dims) raise ValueError( f"Dimension(s) {wrong_dims_str} do not exist. Expected one or more of {all_dims}" ) _Accessor = TypeVar("_Accessor") class UncachedAccessor(Generic[_Accessor]): """Acts like a property, but on both classes and class instances This class is necessary because some tools (e.g. pydoc and sphinx) inspect classes for which property returns itself and not the accessor. """ def __init__(self, accessor: type[_Accessor]) -> None: self._accessor = accessor @overload def __get__(self, obj: None, cls) -> type[_Accessor]: ... @overload def __get__(self, obj: object, cls) -> _Accessor: ... def __get__(self, obj: object | None, cls) -> type[_Accessor] | _Accessor: if obj is None: return self._accessor return self._accessor(obj) # type: ignore[call-arg] # assume it is a valid accessor! # Singleton type, as per https://github.com/python/typing/pull/240 class Default(Enum): token = 0 _default = Default.token def iterate_nested(nested_list): for item in nested_list: if isinstance(item, list): yield from iterate_nested(item) else: yield item def contains_only_chunked_or_numpy(obj) -> bool: """Returns True if xarray object contains only numpy arrays or chunked arrays (i.e. pure dask or cubed). Expects obj to be Dataset or DataArray""" from xarray.core.dataarray import DataArray from xarray.core.indexing import ExplicitlyIndexed from xarray.namedarray.pycompat import is_chunked_array if isinstance(obj, DataArray): obj = obj._to_temp_dataset() return all( isinstance(var._data, ExplicitlyIndexed | np.ndarray) or is_chunked_array(var._data) for var in obj._variables.values() ) def find_stack_level(test_mode=False) -> int: """Find the first place in the stack that is not inside xarray or the Python standard library. This is unless the code emanates from a test, in which case we would prefer to see the xarray source. This function is taken from pandas and modified to exclude standard library paths. Parameters ---------- test_mode : bool Flag used for testing purposes to switch off the detection of test directories in the stack trace. Returns ------- stacklevel : int First level in the stack that is not part of xarray or the Python standard library. """ import xarray as xr pkg_dir = Path(xr.__file__).parent test_dir = pkg_dir / "tests" std_lib_init = sys.modules["os"].__file__ # Mostly to appease mypy; I don't think this can happen... if std_lib_init is None: return 0 std_lib_dir = Path(std_lib_init).parent frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if ( fname.startswith(str(pkg_dir)) and (not fname.startswith(str(test_dir)) or test_mode) ) or ( fname.startswith(str(std_lib_dir)) and "site-packages" not in fname and "dist-packages" not in fname ): frame = frame.f_back n += 1 else: break return n def emit_user_level_warning(message, category=None) -> None: """Emit a warning at the user level by inspecting the stack trace.""" stacklevel = find_stack_level() return warnings.warn(message, category=category, stacklevel=stacklevel) def consolidate_dask_from_array_kwargs( from_array_kwargs: dict[Any, Any], name: str | None = None, lock: bool | None = None, inline_array: bool | None = None, ) -> dict[Any, Any]: """ Merge dask-specific kwargs with arbitrary from_array_kwargs dict. Temporary function, to be deleted once explicitly passing dask-specific kwargs to .chunk() is deprecated. """ from_array_kwargs = _resolve_doubly_passed_kwarg( from_array_kwargs, kwarg_name="name", passed_kwarg_value=name, default=None, err_msg_dict_name="from_array_kwargs", ) from_array_kwargs = _resolve_doubly_passed_kwarg( from_array_kwargs, kwarg_name="lock", passed_kwarg_value=lock, default=False, err_msg_dict_name="from_array_kwargs", ) from_array_kwargs = _resolve_doubly_passed_kwarg( from_array_kwargs, kwarg_name="inline_array", passed_kwarg_value=inline_array, default=False, err_msg_dict_name="from_array_kwargs", ) return from_array_kwargs def _resolve_doubly_passed_kwarg( kwargs_dict: dict[Any, Any], kwarg_name: str, passed_kwarg_value: str | bool | None, default: bool | None, err_msg_dict_name: str, ) -> dict[Any, Any]: # if in kwargs_dict but not passed explicitly then just pass kwargs_dict through unaltered if kwarg_name in kwargs_dict and passed_kwarg_value is None: pass # if passed explicitly but not in kwargs_dict then use that elif kwarg_name not in kwargs_dict and passed_kwarg_value is not None: kwargs_dict[kwarg_name] = passed_kwarg_value # if in neither then use default elif kwarg_name not in kwargs_dict and passed_kwarg_value is None: kwargs_dict[kwarg_name] = default # if in both then raise else: raise ValueError( f"argument {kwarg_name} cannot be passed both as a keyword argument and within " f"the {err_msg_dict_name} dictionary" ) return kwargs_dict def attempt_import(module: str) -> ModuleType: """Import an optional dependency, and raise an informative error on failure. Parameters ---------- module : str Module to import. For example, ``'zarr'`` or ``'matplotlib.pyplot'``. Returns ------- module : ModuleType The Imported module. Raises ------ ImportError If the module could not be imported. Notes ----- Static type checkers will not be able to infer the type of the returned module, so it is recommended to precede this function with a direct import of the module, guarded by an ``if TYPE_CHECKING`` block, to preserve type checker functionality. See the examples section below for a demonstration. Examples -------- >>> from xarray.core.utils import attempt_import >>> if TYPE_CHECKING: ... import zarr ... else: ... zarr = attempt_import("zarr") ... """ install_mapping = dict(nc_time_axis="nc-time-axis") package_purpose = dict( zarr="for working with Zarr stores", cftime="for working with non-standard calendars", matplotlib="for plotting", hypothesis="for the `xarray.testing.strategies` submodule", ) package_name = module.split(".", maxsplit=1)[0] # e.g. "zarr" from "zarr.storage" install_name = install_mapping.get(package_name, package_name) reason = package_purpose.get(package_name, "") try: return importlib.import_module(module) except ImportError as e: raise ImportError( f"The {install_name} package is required {reason}" " but could not be imported." " Please install it with your package manager (e.g. conda or pip)." ) from e _DEFAULT_NAME = ReprObject("") def result_name(objects: Iterable[Any]) -> Any: # use the same naming heuristics as pandas: # https://github.com/blaze/blaze/issues/458#issuecomment-51936356 names = {getattr(obj, "name", _DEFAULT_NAME) for obj in objects} names.discard(_DEFAULT_NAME) if len(names) == 1: (name,) = names else: name = None return name def _get_func_args(func, param_names): """Use `inspect.signature` to try accessing `func` args. Otherwise, ensure they are provided by user. """ try: func_args = inspect.signature(func).parameters except ValueError as err: func_args = {} if not param_names: raise ValueError( "Unable to inspect `func` signature, and `param_names` was not provided." ) from err if param_names: params = param_names else: params = list(func_args)[1:] if any( (p.kind in [p.VAR_POSITIONAL, p.VAR_KEYWORD]) for p in func_args.values() ): raise ValueError( "`param_names` must be provided because `func` takes variable length arguments." ) return params, func_args xarray-2025.12.0/xarray/core/variable.py000066400000000000000000003445031511464676000177510ustar00rootroot00000000000000from __future__ import annotations import copy import itertools import math import numbers import warnings from collections.abc import Callable, Hashable, Mapping, Sequence from functools import partial from types import EllipsisType from typing import TYPE_CHECKING, Any, NoReturn, cast import numpy as np import pandas as pd from numpy.typing import ArrayLike from packaging.version import Version import xarray as xr # only for Dataset and DataArray from xarray.compat.array_api_compat import to_like_array from xarray.computation import ops from xarray.computation.arithmetic import VariableArithmetic from xarray.core import common, dtypes, duck_array_ops, indexing, nputils, utils from xarray.core.common import AbstractArray from xarray.core.extension_array import PandasExtensionArray from xarray.core.indexing import ( BasicIndexer, CoordinateTransformIndexingAdapter, OuterIndexer, PandasIndexingAdapter, VectorizedIndexer, as_indexable, ) from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.utils import ( OrderedSet, _default, consolidate_dask_from_array_kwargs, decode_numpy_dict_values, drop_dims_from_indexers, either_dict_or_kwargs, emit_user_level_warning, ensure_us_time_resolution, infix_dims, is_allowed_extension_array, is_dict_like, is_duck_array, is_duck_dask_array, maybe_coerce_to_str, ) from xarray.namedarray.core import NamedArray, _raise_if_any_duplicate_dimensions from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import ( async_to_duck_array, integer_types, is_0d_dask_array, is_chunked_array, to_duck_array, ) from xarray.namedarray.utils import module_available from xarray.util.deprecation_helpers import _deprecate_positional_args, deprecate_dims NON_NUMPY_SUPPORTED_ARRAY_TYPES = ( indexing.ExplicitlyIndexed, pd.Index, pd.api.extensions.ExtensionArray, PandasExtensionArray, ) # https://github.com/python/mypy/issues/224 BASIC_INDEXING_TYPES = integer_types + (slice,) UNSUPPORTED_EXTENSION_ARRAY_TYPES = ( pd.arrays.DatetimeArray, pd.arrays.TimedeltaArray, pd.arrays.NumpyExtensionArray, # type: ignore[attr-defined] ) if TYPE_CHECKING: from xarray.core.types import ( Dims, ErrorOptionsWithWarn, PadModeOptions, PadReflectOptions, QuantileMethods, Self, T_Chunks, T_DuckArray, T_VarPadConstantValues, ) from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint class MissingDimensionsError(ValueError): """Error class used when we can't safely guess a dimension name.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? def as_variable( obj: T_DuckArray | Any, name=None, auto_convert: bool = True ) -> Variable | IndexVariable: """Convert an object into a Variable. Parameters ---------- obj : object Object to convert into a Variable. - If the object is already a Variable, return a shallow copy. - Otherwise, if the object has 'dims' and 'data' attributes, convert it into a new Variable. - If all else fails, attempt to convert the object into a Variable by unpacking it into the arguments for creating a new Variable. name : str, optional If provided: - `obj` can be a 1D array, which is assumed to label coordinate values along a dimension of this given name. - Variables with name matching one of their dimensions are converted into `IndexVariable` objects. auto_convert : bool, optional For internal use only! If True, convert a "dimension" variable into an IndexVariable object (deprecated). Returns ------- var : Variable The newly created variable. """ from xarray.core.dataarray import DataArray # TODO: consider extending this method to automatically handle Iris and if isinstance(obj, DataArray): # extract the primary Variable from DataArrays obj = obj.variable if isinstance(obj, Variable): obj = obj.copy(deep=False) elif isinstance(obj, tuple): try: dims_, data_, *attrs = obj except ValueError as err: raise ValueError( f"Tuple {obj} is not in the form (dims, data[, attrs])" ) from err if isinstance(data_, DataArray): raise TypeError( f"Variable {name!r}: Using a DataArray object to construct a variable is" " ambiguous, please extract the data using the .data property." ) try: obj = Variable(dims_, data_, *attrs) except (TypeError, ValueError) as error: raise error.__class__( f"Variable {name!r}: Could not convert tuple of form " f"(dims, data[, attrs, encoding]): {obj} to Variable." ) from error elif utils.is_scalar(obj): obj = Variable([], obj) elif isinstance(obj, pd.Index | IndexVariable) and obj.name is not None: obj = Variable(obj.name, obj) elif isinstance(obj, set | dict): raise TypeError(f"variable {name!r} has invalid type {type(obj)!r}") elif name is not None: data: T_DuckArray = as_compatible_data(obj) if data.ndim != 1: raise MissingDimensionsError( f"cannot set variable {name!r} with {data.ndim!r}-dimensional data " "without explicit dimension names. Pass a tuple of " "(dims, data) instead." ) obj = Variable(name, data, fastpath=True) else: raise TypeError( f"Variable {name!r}: unable to convert object into a variable without an " f"explicit list of dimensions: {obj!r}" ) if auto_convert and name is not None and name in obj.dims and obj.ndim == 1: # automatically convert the Variable into an Index emit_user_level_warning( f"variable {name!r} with name matching its dimension will not be " "automatically converted into an `IndexVariable` object in the future.", FutureWarning, ) obj = obj.to_index_variable() return obj def _maybe_wrap_data(data): """ Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure they can be indexed properly. NumpyArrayAdapter, PandasIndexingAdapter and LazilyIndexedArray should all pass through unmodified. """ if isinstance(data, pd.Index): return PandasIndexingAdapter(data) if isinstance(data, UNSUPPORTED_EXTENSION_ARRAY_TYPES): return data.to_numpy() if isinstance( data, pd.api.extensions.ExtensionArray ) and is_allowed_extension_array(data): return PandasExtensionArray(data) return data def _possibly_convert_objects(values): """Convert object arrays into datetime64 and timedelta64 according to the pandas convention. For backwards compat, as of 3.0.0 pandas, object dtype inputs are cast to strings by `pandas.Series` but we output them as object dtype with the input metadata preserved as well. * datetime.datetime * datetime.timedelta * pd.Timestamp * pd.Timedelta """ as_series = pd.Series(values.ravel(), copy=False) result = np.asarray(as_series).reshape(values.shape) if not result.flags.writeable: # GH8843, pandas copy-on-write mode creates read-only arrays by default try: result.flags.writeable = True except ValueError: result = result.copy() # For why we need this behavior: https://github.com/pandas-dev/pandas/issues/61938 # Object datatype inputs that are strings # will be converted to strings by `pandas.Series`, and as of 3.0.0, lose # `dtype.metadata`. If the roundtrip back to numpy in this function yields an # object array again, the dtype.metadata will be preserved. if ( result.dtype.kind == "O" and values.dtype.kind == "O" and Version(pd.__version__) >= Version("3.0.0dev0") ): result.dtype = values.dtype return result def as_compatible_data( data: T_DuckArray | ArrayLike, fastpath: bool = False ) -> T_DuckArray: """Prepare and wrap data to put in a Variable. - If data does not have the necessary attributes, convert it to ndarray. - If it's a pandas.Timestamp, convert it to datetime64. - If data is already a pandas or xarray object (other than an Index), just use the values. Finally, wrap it up with an adapter if necessary. """ if fastpath and getattr(data, "ndim", None) is not None: return cast("T_DuckArray", data) from xarray.core.dataarray import DataArray # TODO: do this uwrapping in the Variable/NamedArray constructor instead. if isinstance(data, Variable): return cast("T_DuckArray", data._data) # TODO: do this uwrapping in the DataArray constructor instead. if isinstance(data, DataArray): return cast("T_DuckArray", data._variable._data) def convert_non_numpy_type(data): return cast("T_DuckArray", _maybe_wrap_data(data)) if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES): return convert_non_numpy_type(data) if isinstance(data, tuple): data = utils.to_0d_object_array(data) # we don't want nested self-described arrays if isinstance(data, pd.Series | pd.DataFrame): if ( isinstance(data, pd.Series) and is_allowed_extension_array(data.array) # Some datetime types are not allowed as well as backing Variable types and not isinstance(data.array, UNSUPPORTED_EXTENSION_ARRAY_TYPES) ): pandas_data = data.array else: pandas_data = data.values # type: ignore[assignment] if isinstance(pandas_data, NON_NUMPY_SUPPORTED_ARRAY_TYPES): return convert_non_numpy_type(pandas_data) else: data = pandas_data if isinstance(data, np.ma.MaskedArray): mask = np.ma.getmaskarray(data) if mask.any(): _dtype, fill_value = dtypes.maybe_promote(data.dtype) data = duck_array_ops.where_method(data, ~mask, fill_value) else: data = np.asarray(data) if isinstance(data, np.matrix): data = np.asarray(data) # immediately return array-like types except `numpy.ndarray` and `numpy` scalars # compare types with `is` instead of `isinstance` to allow `numpy.ndarray` subclasses is_numpy = type(data) is np.ndarray or isinstance(data, np.generic) if not is_numpy and ( hasattr(data, "__array_function__") or hasattr(data, "__array_namespace__") ): return cast("T_DuckArray", data) # anything left will be converted to `numpy.ndarray`, including `numpy` scalars data = np.asarray(data) if data.dtype.kind in "OMm": data = _possibly_convert_objects(data) return _maybe_wrap_data(data) def _as_array_or_item(data): """Return the given values as a numpy array, or as an individual item if it's a 0d datetime64 or timedelta64 array. Importantly, this function does not copy data if it is already an ndarray - otherwise, it will not be possible to update Variable values in place. This function mostly exists because 0-dimensional ndarrays with dtype=datetime64 are broken :( https://github.com/numpy/numpy/issues/4337 https://github.com/numpy/numpy/issues/7619 TODO: remove this (replace with np.asarray) once these issues are fixed """ data = np.asarray(data) if data.ndim == 0: kind = data.dtype.kind if kind in "mM": unit, _ = np.datetime_data(data.dtype) if kind == "M": data = np.datetime64(data, unit) elif kind == "m": data = np.timedelta64(data, unit) return data class Variable(NamedArray, AbstractArray, VariableArithmetic): """A netcdf-like variable consisting of dimensions, data and attributes which describe a single Array. A single Variable object is not fully described outside the context of its parent Dataset (if you want such a fully described object, use a DataArray instead). The main functional difference between Variables and numpy arrays is that numerical operations on Variables implement array broadcasting by dimension name. For example, adding an Variable with dimensions `('time',)` to another Variable with dimensions `('space',)` results in a new Variable with dimensions `('time', 'space')`. Furthermore, numpy reduce operations like ``mean`` or ``sum`` are overwritten to take a "dimension" argument instead of an "axis". Variables are light-weight objects used as the building block for datasets. They are more primitive objects, so operations with them provide marginally higher performance than using DataArrays. However, manipulating data in the form of a Dataset or DataArray should almost always be preferred, because they can use more complete metadata in context of coordinate labels. """ __slots__ = ("_attrs", "_data", "_dims", "_encoding") def __init__( self, dims, data: T_DuckArray | ArrayLike, attrs=None, encoding=None, fastpath=False, ): """ Parameters ---------- dims : str or sequence of str Name(s) of the the data dimension(s). Must be either a string (only for 1D data) or a sequence of strings with length equal to the number of dimensions. data : array_like Data array which supports numpy-like data access. attrs : dict_like or None, optional Attributes to assign to the new variable. If None (default), an empty attribute dictionary is initialized. (see FAQ, :ref:`approach to metadata`) encoding : dict_like or None, optional Dictionary specifying how to encode this array's data into a serialized format like netCDF4. Currently used keys (for netCDF) include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'. Well-behaved code to serialize a Variable should ignore unrecognized encoding items. """ super().__init__( dims=dims, data=as_compatible_data(data, fastpath=fastpath), attrs=attrs ) self._encoding: dict[Any, Any] | None = None if encoding is not None: self.encoding = encoding def _new( self, dims=_default, data=_default, attrs=_default, ): dims_ = copy.copy(self._dims) if dims is _default else dims if attrs is _default: attrs_ = None if self._attrs is None else self._attrs.copy() else: attrs_ = attrs if data is _default: return type(self)(dims_, copy.copy(self._data), attrs_) else: cls_ = type(self) return cls_(dims_, data, attrs_) @property def _in_memory(self) -> bool: if isinstance( self._data, PandasIndexingAdapter | CoordinateTransformIndexingAdapter ): return self._data._in_memory return isinstance( self._data, np.ndarray | np.number | PandasExtensionArray, ) or ( isinstance(self._data, indexing.MemoryCachedArray) and isinstance(self._data.array, indexing.NumpyIndexingAdapter) ) @property def data(self): """ The Variable's data as an array. The underlying array type (e.g. dask, sparse, pint) is preserved. See Also -------- Variable.to_numpy Variable.as_numpy Variable.values """ if isinstance(self._data, PandasExtensionArray): duck_array = self._data.array elif isinstance(self._data, indexing.ExplicitlyIndexed): duck_array = self._data.get_duck_array() elif is_duck_array(self._data): duck_array = self._data else: duck_array = self.values if isinstance(duck_array, PandasExtensionArray): # even though PandasExtensionArray is a duck array, # we should not return the PandasExtensionArray wrapper, # and instead return the underlying data. return duck_array.array return duck_array @data.setter # type: ignore[override,unused-ignore] def data(self, data: T_DuckArray | ArrayLike) -> None: data = as_compatible_data(data) self._check_shape(data) self._data = data def astype( self, dtype, *, order=None, casting=None, subok=None, copy=None, keep_attrs=True, ) -> Self: """ Copy of the Variable object, with data cast to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. โ€˜Cโ€™ means C order, โ€˜Fโ€™ means Fortran order, โ€˜Aโ€™ means โ€˜Fโ€™ order if all the arrays are Fortran contiguous, โ€˜Cโ€™ order otherwise, and โ€˜Kโ€™ means as close to the order the array elements appear in memory as possible. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to False and the `dtype` requirement is satisfied, the input array is returned instead of a copy. keep_attrs : bool, optional By default, astype keeps attributes. Set to False to remove attributes in the returned object. Returns ------- out : same as object New object with data cast to the specified type. Notes ----- The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed through to the ``astype`` method of the underlying array when a value different than ``None`` is supplied. Make sure to only supply these arguments if the underlying array class supports them. See Also -------- numpy.ndarray.astype dask.array.Array.astype sparse.COO.astype """ from xarray.computation.apply_ufunc import apply_ufunc kwargs = dict(order=order, casting=casting, subok=subok, copy=copy) kwargs = {k: v for k, v in kwargs.items() if v is not None} return apply_ufunc( duck_array_ops.astype, self, dtype, kwargs=kwargs, keep_attrs=keep_attrs, dask="allowed", ) def _dask_finalize(self, results, array_func, *args, **kwargs): data = array_func(results, *args, **kwargs) return Variable(self._dims, data, attrs=self._attrs, encoding=self._encoding) @property def values(self) -> np.ndarray: """The variable's data as a numpy.ndarray""" return _as_array_or_item(self._data) @values.setter def values(self, values): self.data = values def to_base_variable(self) -> Variable: """Return this variable as a base xarray.Variable""" return Variable( self._dims, self._data, self._attrs, encoding=self._encoding, fastpath=True ) to_variable = utils.alias(to_base_variable, "to_variable") def to_index_variable(self) -> IndexVariable: """Return this variable as an xarray.IndexVariable""" return IndexVariable( self._dims, self._data, self._attrs, encoding=self._encoding, fastpath=True ) to_coord = utils.alias(to_index_variable, "to_coord") def _to_index(self) -> pd.Index: return self.to_index_variable()._to_index() def to_index(self) -> pd.Index: """Convert this variable to a pandas.Index""" return self.to_index_variable().to_index() def to_dict( self, data: bool | str = "list", encoding: bool = False ) -> dict[str, Any]: """Dictionary representation of variable.""" item: dict[str, Any] = { "dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs), } if data is not False: if data in [True, "list"]: item["data"] = ensure_us_time_resolution(self.to_numpy()).tolist() elif data == "array": item["data"] = ensure_us_time_resolution(self.data) else: msg = 'data argument must be bool, "list", or "array"' raise ValueError(msg) else: item.update({"dtype": str(self.dtype), "shape": self.shape}) if encoding: item["encoding"] = dict(self.encoding) return item def _item_key_to_tuple(self, key): if is_dict_like(key): return tuple(key.get(dim, slice(None)) for dim in self.dims) else: return key def _broadcast_indexes(self, key): """Prepare an indexing key for an indexing operation. Parameters ---------- key : int, slice, array-like, dict or tuple of integer, slice and array-like Any valid input for indexing. Returns ------- dims : tuple Dimension of the resultant variable. indexers : IndexingTuple subclass Tuple of integer, array-like, or slices to use when indexing self._data. The type of this argument indicates the type of indexing to perform, either basic, outer or vectorized. new_order : Optional[Sequence[int]] Optional reordering to do on the result of indexing. If not None, the first len(new_order) indexing should be moved to these positions. """ key = self._item_key_to_tuple(key) # key is a tuple # key is a tuple of full size key = indexing.expanded_indexer(key, self.ndim) # Convert a scalar Variable to a 0d-array key = tuple( k.data if isinstance(k, Variable) and k.ndim == 0 else k for k in key ) # Convert a 0d numpy arrays to an integer # dask 0d arrays are passed through key = tuple( k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key ) if all( (isinstance(k, BASIC_INDEXING_TYPES) and not isinstance(k, bool)) for k in key ): return self._broadcast_indexes_basic(key) self._validate_indexers(key) # Detect it can be mapped as an outer indexer # If all key is unlabeled, or # key can be mapped as an OuterIndexer. if all(not isinstance(k, Variable) for k in key): return self._broadcast_indexes_outer(key) # If all key is 1-dimensional and there are no duplicate labels, # key can be mapped as an OuterIndexer. dims = [] for k, d in zip(key, self.dims, strict=True): if isinstance(k, Variable): if len(k.dims) > 1: return self._broadcast_indexes_vectorized(key) dims.append(k.dims[0]) elif not isinstance(k, integer_types): dims.append(d) if len(set(dims)) == len(dims): return self._broadcast_indexes_outer(key) return self._broadcast_indexes_vectorized(key) def _broadcast_indexes_basic(self, key): dims = tuple( dim for k, dim in zip(key, self.dims, strict=True) if not isinstance(k, integer_types) ) return dims, BasicIndexer(key), None def _validate_indexers(self, key): """Make sanity checks""" for dim, k in zip(self.dims, key, strict=True): if not isinstance(k, BASIC_INDEXING_TYPES): if not isinstance(k, Variable): if not is_duck_array(k): k = np.asarray(k) if k.ndim > 1: raise IndexError( "Unlabeled multi-dimensional array cannot be " f"used for indexing: {k}" ) if k.dtype.kind == "b": if self.shape[self.get_axis_num(dim)] != len(k): raise IndexError( f"Boolean array size {len(k):d} is used to index array " f"with shape {self.shape}." ) if k.ndim > 1: raise IndexError( f"{k.ndim}-dimensional boolean indexing is not supported. " ) if is_duck_dask_array(k.data): raise KeyError( "Indexing with a boolean dask array is not allowed. " "This will result in a dask array of unknown shape. " "Such arrays are unsupported by Xarray." "Please compute the indexer first using .compute()" ) if getattr(k, "dims", (dim,)) != (dim,): raise IndexError( "Boolean indexer should be unlabeled or on the " "same dimension to the indexed array. Indexer is " f"on {k.dims} but the target dimension is {dim}." ) def _broadcast_indexes_outer(self, key): # drop dim if k is integer or if k is a 0d dask array dims = tuple( k.dims[0] if isinstance(k, Variable) else dim for k, dim in zip(key, self.dims, strict=True) if (not isinstance(k, integer_types) and not is_0d_dask_array(k)) ) new_key = [] for k in key: if isinstance(k, Variable): k = k.data if not isinstance(k, BASIC_INDEXING_TYPES): if not is_duck_array(k): k = np.asarray(k) if k.size == 0: # Slice by empty list; numpy could not infer the dtype k = k.astype(int) elif k.dtype.kind == "b": (k,) = np.nonzero(k) new_key.append(k) return dims, OuterIndexer(tuple(new_key)), None def _broadcast_indexes_vectorized(self, key): variables = [] out_dims_set = OrderedSet() for dim, value in zip(self.dims, key, strict=True): if isinstance(value, slice): out_dims_set.add(dim) else: variable = ( value if isinstance(value, Variable) else as_variable(value, name=dim, auto_convert=False) ) if variable.dims == (dim,): variable = variable.to_index_variable() if variable.dtype.kind == "b": # boolean indexing case (variable,) = variable._nonzero() variables.append(variable) out_dims_set.update(variable.dims) variable_dims = set() for variable in variables: variable_dims.update(variable.dims) slices = [] for i, (dim, value) in enumerate(zip(self.dims, key, strict=True)): if isinstance(value, slice): if dim in variable_dims: # We only convert slice objects to variables if they share # a dimension with at least one other variable. Otherwise, # we can equivalently leave them as slices aknd transpose # the result. This is significantly faster/more efficient # for most array backends. values = np.arange(*value.indices(self.sizes[dim])) variables.insert(i - len(slices), Variable((dim,), values)) else: slices.append((i, value)) try: variables = _broadcast_compat_variables(*variables) except ValueError as err: raise IndexError(f"Dimensions of indexers mismatch: {key}") from err out_key = [variable.data for variable in variables] out_dims = tuple(out_dims_set) slice_positions = set() for i, value in slices: out_key.insert(i, value) new_position = out_dims.index(self.dims[i]) slice_positions.add(new_position) if slice_positions: new_order = [i for i in range(len(out_dims)) if i not in slice_positions] else: new_order = None return out_dims, VectorizedIndexer(tuple(out_key)), new_order def __getitem__(self, key) -> Self: """Return a new Variable object whose contents are consistent with getting the provided key from the underlying data. NB. __getitem__ and __setitem__ implement xarray-style indexing, where if keys are unlabeled arrays, we index the array orthogonally with them. If keys are labeled array (such as Variables), they are broadcasted with our usual scheme and then the array is indexed with the broadcasted key, like numpy's fancy indexing. If you really want to do indexing like `x[x > 0]`, manipulate the numpy array `x.values` directly. """ dims, indexer, new_order = self._broadcast_indexes(key) indexable = as_indexable(self._data) data = indexing.apply_indexer(indexable, indexer) if new_order: data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data) def _finalize_indexing_result(self, dims, data) -> Self: """Used by IndexVariable to return IndexVariable objects when possible.""" return self._replace(dims=dims, data=data) def _getitem_with_mask(self, key, fill_value=dtypes.NA): """Index this Variable with -1 remapped to fill_value.""" # TODO(shoyer): expose this method in public API somewhere (isel?) and # use it for reindex. # TODO(shoyer): add a sanity check that all other integers are # non-negative # TODO(shoyer): add an optimization, remapping -1 to an adjacent value # that is actually indexed rather than mapping it to the last value # along each axis. if fill_value is dtypes.NA: fill_value = dtypes.get_fill_value(self.dtype) dims, indexer, new_order = self._broadcast_indexes(key) if self.size: if is_duck_dask_array(self._data): # dask's indexing is faster this way; also vindex does not # support negative indices yet: # https://github.com/dask/dask/pull/2967 actual_indexer = indexing.posify_mask_indexer(indexer) else: actual_indexer = indexer indexable = as_indexable(self._data) data = indexing.apply_indexer(indexable, actual_indexer) mask = indexing.create_mask(indexer, self.shape, data) # we need to invert the mask in order to pass data first. This helps # pint to choose the correct unit # TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed mask = to_like_array(mask, data) data = duck_array_ops.where( duck_array_ops.logical_not(mask), data, fill_value ) else: # array cannot be indexed along dimensions of size 0, so just # build the mask directly instead. mask = indexing.create_mask(indexer, self.shape) data = duck_array_ops.broadcast_to(fill_value, getattr(mask, "shape", ())) if new_order: data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data) def __setitem__(self, key, value): """__setitem__ is overloaded to access the underlying numpy values with orthogonal indexing. See __getitem__ for more details. """ dims, index_tuple, new_order = self._broadcast_indexes(key) if not isinstance(value, Variable): value = as_compatible_data(value) if value.ndim > len(dims): raise ValueError( f"shape mismatch: value array of shape {value.shape} could not be " f"broadcast to indexing result with {len(dims)} dimensions" ) if value.ndim == 0: value = Variable((), value) else: value = Variable(dims[-value.ndim :], value) # broadcast to become assignable value = value.set_dims(dims).data if new_order: value = duck_array_ops.asarray(value) value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)] value = duck_array_ops.moveaxis(value, new_order, range(len(new_order))) indexable = as_indexable(self._data) indexing.set_with_indexer(indexable, index_tuple, value) @property def encoding(self) -> dict[Any, Any]: """Dictionary of encodings on this variable.""" if self._encoding is None: encoding: dict[Any, Any] = {} self._encoding = encoding return self._encoding @encoding.setter def encoding(self, value): try: self._encoding = dict(value) except ValueError as err: raise ValueError("encoding must be castable to a dictionary") from err def reset_encoding(self) -> Self: warnings.warn( "reset_encoding is deprecated since 2023.11, use `drop_encoding` instead", stacklevel=2, ) return self.drop_encoding() def drop_encoding(self) -> Self: """Return a new Variable without encoding.""" return self._replace(encoding={}) def _copy( self, deep: bool = True, data: T_DuckArray | ArrayLike | None = None, memo: dict[int, Any] | None = None, ) -> Self: if data is None: data_old = self._data if not isinstance(data_old, indexing.MemoryCachedArray): ndata = data_old else: # don't share caching between copies # TODO: MemoryCachedArray doesn't match the array api: ndata = indexing.MemoryCachedArray(data_old.array) # type: ignore[assignment] if deep: ndata = copy.deepcopy(ndata, memo) else: ndata = as_compatible_data(data) if self.shape != ndata.shape: raise ValueError( f"Data shape {ndata.shape} must match shape of object {self.shape}" ) attrs = copy.deepcopy(self._attrs, memo) if deep else copy.copy(self._attrs) encoding = ( copy.deepcopy(self._encoding, memo) if deep else copy.copy(self._encoding) ) # note: dims is already an immutable tuple return self._replace(data=ndata, attrs=attrs, encoding=encoding) def _replace( self, dims=_default, data=_default, attrs=_default, encoding=_default, ) -> Self: if dims is _default: dims = copy.copy(self._dims) if data is _default: data = copy.copy(self._data) if attrs is _default: attrs = copy.copy(self._attrs) if encoding is _default: encoding = copy.copy(self._encoding) return type(self)(dims, data, attrs, encoding, fastpath=True) def load(self, **kwargs) -> Self: """Trigger loading data into memory and return this variable. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original variable is modified and returned. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. Returns ------- object : Variable Same object but with lazy data as an in-memory array. See Also -------- dask.array.compute Variable.compute Variable.load_async DataArray.load Dataset.load """ self._data = to_duck_array(self._data, **kwargs) return self async def load_async(self, **kwargs) -> Self: """Trigger and await asynchronous loading of data into memory and return this variable. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original variable is modified and returned. Only works when opening data lazily from IO storage backends which support lazy asynchronous loading. Otherwise will raise a NotImplementedError. Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. Returns ------- object : Variable Same object but with lazy data as an in-memory array. See Also -------- dask.array.compute Variable.load Variable.compute DataArray.load_async Dataset.load_async """ self._data = await async_to_duck_array(self._data, **kwargs) return self def compute(self, **kwargs) -> Self: """Trigger loading data into memory and return a new variable. Data will be computed and/or loaded from disk or a remote source. The original variable is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. Returns ------- object : Variable New object with the data as an in-memory array. See Also -------- dask.array.compute Variable.load Variable.load_async DataArray.compute Dataset.compute """ new = self.copy(deep=False) return new.load(**kwargs) def _shuffle( self, indices: list[list[int]], dim: Hashable, chunks: T_Chunks ) -> Self: # TODO (dcherian): consider making this public API array = self._data if is_chunked_array(array): chunkmanager = get_chunked_array_type(array) return self._replace( data=chunkmanager.shuffle( array, indexer=indices, axis=self.get_axis_num(dim), chunks=chunks, ) ) else: return self.isel({dim: np.concatenate(indices)}) def isel( self, indexers: Mapping[Any, Any] | None = None, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, ) -> Self: """Return a new array indexed along the specified dimension(s). Parameters ---------- **indexers : {dim: indexer, ...} Keyword arguments with names matching dimensions and values given by integers, slice objects or arrays. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- obj : Array object A new Array with the selected data and dimensions. In general, the new variable's data will be a view of this variable's data, unless numpy fancy indexing was triggered by using an array indexer, in which case the data will be a copy. """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims) key = tuple(indexers.get(dim, slice(None)) for dim in self.dims) return self[key] def squeeze(self, dim=None): """Return a new object with squeezed data. Parameters ---------- dim : None or str or tuple of str, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. Returns ------- squeezed : same type as caller This object, but with with all or a subset of the dimensions of length 1 removed. See Also -------- numpy.squeeze """ dims = common.get_squeeze_dims(self, dim) return self.isel(dict.fromkeys(dims, 0)) def _shift_one_dim(self, dim, count, fill_value=dtypes.NA): axis = self.get_axis_num(dim) if count > 0: keep = slice(None, -count) elif count < 0: keep = slice(-count, None) else: keep = slice(None) trimmed_data = self[(slice(None),) * axis + (keep,)].data if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype width = min(abs(count), self.shape[axis]) dim_pad = (width, 0) if count >= 0 else (0, width) pads = [(0, 0) if d != dim else dim_pad for d in self.dims] data = duck_array_ops.pad( duck_array_ops.astype(trimmed_data, dtype), pads, mode="constant", constant_values=fill_value, ) if is_duck_dask_array(data): # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks data = data.rechunk(self.data.chunks) return self._replace(data=data) def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): """ Return a new Variable with shifted data. Parameters ---------- shifts : mapping of the form {dim: offset} Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value : scalar, optional Value to use for newly missing values **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but shifted data. """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift") result = self for dim, count in shifts.items(): result = result._shift_one_dim(dim, count, fill_value=fill_value) return result def _pad_options_dim_to_index( self, pad_option: Mapping[Any, int | float | tuple[int, int] | tuple[float, float]], fill_with_shape=False, ): # change number values to a tuple of two of those values for k, v in pad_option.items(): if isinstance(v, numbers.Number): pad_option[k] = (v, v) if fill_with_shape: return [ pad_option.get(d, (n, n)) for d, n in zip(self.dims, self.shape, strict=True) ] return [pad_option.get(d, (0, 0)) for d in self.dims] def pad( self, pad_width: Mapping[Any, int | tuple[int, int]] | None = None, mode: PadModeOptions = "constant", stat_length: ( int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None ) = None, constant_values: T_VarPadConstantValues | None = None, end_values: int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None = None, reflect_type: PadReflectOptions = None, keep_attrs: bool | None = None, **pad_width_kwargs: Any, ): """ Return a new Variable with padded data. Parameters ---------- pad_width : mapping of hashable to tuple of int Mapping with the form of {dim: (pad_before, pad_after)} describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad mode : str, default: "constant" See numpy / Dask docs stat_length : int, tuple or mapping of hashable to tuple Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. constant_values : scalar, tuple or mapping of hashable to scalar or tuple Used in 'constant'. The values to set the padded values for each axis. end_values : scalar, tuple or mapping of hashable to tuple Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. reflect_type : {"even", "odd"}, optional Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **pad_width_kwargs One of pad_width or pad_width_kwargs must be provided. Returns ------- padded : Variable Variable with the same dimensions and attributes but padded data. """ pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad") # change default behaviour of pad with mode constant if mode == "constant" and ( constant_values is None or constant_values is dtypes.NA ): dtype, constant_values = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype # create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty if isinstance(stat_length, dict): stat_length = self._pad_options_dim_to_index( stat_length, fill_with_shape=True ) if isinstance(constant_values, dict): constant_values = self._pad_options_dim_to_index(constant_values) if isinstance(end_values, dict): end_values = self._pad_options_dim_to_index(end_values) # workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303 if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]: stat_length = [(n, n) for n in self.shape] # type: ignore[assignment] pad_width_by_index = self._pad_options_dim_to_index(pad_width) # create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty pad_option_kwargs: dict[str, Any] = {} if stat_length is not None: pad_option_kwargs["stat_length"] = stat_length if constant_values is not None: pad_option_kwargs["constant_values"] = constant_values if end_values is not None: pad_option_kwargs["end_values"] = end_values if reflect_type is not None: pad_option_kwargs["reflect_type"] = reflect_type array = duck_array_ops.pad( duck_array_ops.astype(self.data, dtype, copy=False), pad_width_by_index, mode=mode, **pad_option_kwargs, ) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) attrs = self._attrs if keep_attrs else None return type(self)(self.dims, array, attrs=attrs) def _roll_one_dim(self, dim, count): axis = self.get_axis_num(dim) count %= self.shape[axis] if count != 0: indices = [slice(-count, None), slice(None, -count)] else: indices = [slice(None)] arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices] data = duck_array_ops.concatenate(arrays, axis) if is_duck_dask_array(data): # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks data = data.rechunk(self.data.chunks) return self._replace(data=data) def roll(self, shifts=None, **shifts_kwargs): """ Return a new Variable with rolld data. Parameters ---------- shifts : mapping of hashable to int Integer offset to roll along each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but rolled data. """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll") result = self for dim, count in shifts.items(): result = result._roll_one_dim(dim, count) return result @deprecate_dims def transpose( self, *dim: Hashable | EllipsisType, missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: """Return a new Variable object with transposed dimensions. Parameters ---------- *dim : Hashable, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Variable: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- transposed : Variable The returned object has transposed data and dimensions with the same attributes as the original. Notes ----- This operation returns a view of this variable's data. It is lazy for dask-backed Variables but not for numpy-backed Variables. See Also -------- numpy.transpose """ if len(dim) == 0: dim = self.dims[::-1] else: dim = tuple(infix_dims(dim, self.dims, missing_dims)) if len(dim) < 2 or dim == self.dims: # no need to transpose if only one dimension # or dims are in same order return self.copy(deep=False) axes = self.get_axis_num(dim) data = as_indexable(self._data).transpose(axes) return self._replace(dims=dim, data=data) @property def T(self) -> Self: return self.transpose() @deprecate_dims def set_dims(self, dim, shape=None): """Return a new variable with given set of dimensions. This method might be used to attach new dimension(s) to variable. When possible, this operation does not copy this variable's data. Parameters ---------- dim : str or sequence of str or dict Dimensions to include on the new variable. If a dict, values are used to provide the sizes of new dimensions; otherwise, new dimensions are inserted with length 1. Returns ------- Variable """ if isinstance(dim, str): dim = [dim] if shape is None and is_dict_like(dim): shape = tuple(dim.values()) missing_dims = set(self.dims) - set(dim) if missing_dims: raise ValueError( f"new dimensions {dim!r} must be a superset of " f"existing dimensions {self.dims!r}" ) self_dims = set(self.dims) expanded_dims = tuple(d for d in dim if d not in self_dims) + self.dims if self.dims == expanded_dims: # don't use broadcast_to unless necessary so the result remains # writeable if possible expanded_data = self._data elif shape is None or all( s == 1 for s, e in zip(shape, dim, strict=True) if e not in self_dims ): # "Trivial" broadcasting, i.e. simply inserting a new dimension # This is typically easier for duck arrays to implement # than the full "broadcast_to" semantics indexer = (None,) * (len(expanded_dims) - self.ndim) + (...,) # TODO: switch this to ._data once we teach ExplicitlyIndexed arrays to handle indexers with None. expanded_data = self.data[indexer] else: # elif shape is not None: dims_map = dict(zip(dim, shape, strict=True)) tmp_shape = tuple(dims_map[d] for d in expanded_dims) expanded_data = duck_array_ops.broadcast_to(self._data, tmp_shape) expanded_var = Variable( expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True ) return expanded_var.transpose(*dim) def _stack_once(self, dim: list[Hashable], new_dim: Hashable): if not set(dim) <= set(self.dims): raise ValueError(f"invalid existing dimensions: {dim}") if new_dim in self.dims: raise ValueError( "cannot create a new dimension with the same " "name as an existing dimension" ) if len(dim) == 0: # don't stack return self.copy(deep=False) other_dims = [d for d in self.dims if d not in dim] dim_order = other_dims + list(dim) reordered = self.transpose(*dim_order) new_shape = reordered.shape[: len(other_dims)] + (-1,) new_data = duck_array_ops.reshape(reordered.data, new_shape) new_dims = reordered.dims[: len(other_dims)] + (new_dim,) return type(self)( new_dims, new_data, self._attrs, self._encoding, fastpath=True ) @partial(deprecate_dims, old_name="dimensions") def stack(self, dim=None, **dim_kwargs): """ Stack any number of existing dim into a single new dimension. New dim will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Parameters ---------- dim : mapping of hashable to tuple of hashable Mapping of form new_name=(dim1, dim2, ...) describing the names of new dim, and the existing dim that they replace. **dim_kwargs The keyword arguments form of ``dim``. One of dim or dim_kwargs must be provided. Returns ------- stacked : Variable Variable with the same attributes but stacked data. See Also -------- Variable.unstack """ dim = either_dict_or_kwargs(dim, dim_kwargs, "stack") result = self for new_dim, dims in dim.items(): result = result._stack_once(dims, new_dim) return result def _unstack_once_full(self, dim: Mapping[Any, int], old_dim: Hashable) -> Self: """ Unstacks the variable without needing an index. Unlike `_unstack_once`, this function requires the existing dimension to contain the full product of the new dimensions. """ new_dim_names = tuple(dim.keys()) new_dim_sizes = tuple(dim.values()) if old_dim not in self.dims: raise ValueError(f"invalid existing dimension: {old_dim}") if set(new_dim_names).intersection(self.dims): raise ValueError( "cannot create a new dimension with the same " "name as an existing dimension" ) if math.prod(new_dim_sizes) != self.sizes[old_dim]: raise ValueError( "the product of the new dimension sizes must " "equal the size of the old dimension" ) other_dims = [d for d in self.dims if d != old_dim] dim_order = other_dims + [old_dim] reordered = self.transpose(*dim_order) new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes new_data = duck_array_ops.reshape(reordered.data, new_shape) new_dims = reordered.dims[: len(other_dims)] + new_dim_names return type(self)( new_dims, new_data, self._attrs, self._encoding, fastpath=True ) def _unstack_once( self, index: pd.MultiIndex, dim: Hashable, fill_value=dtypes.NA, sparse: bool = False, ) -> Variable: """ Unstacks this variable given an index to unstack and the name of the dimension to which the index refers. """ reordered = self.transpose(..., dim) new_dim_sizes = [lev.size for lev in index.levels] new_dim_names = index.names indexer = index.codes # Potentially we could replace `len(other_dims)` with just `-1` other_dims = [d for d in self.dims if d != dim] new_shape = tuple(list(reordered.shape[: len(other_dims)]) + new_dim_sizes) new_dims = reordered.dims[: len(other_dims)] + tuple(new_dim_names) create_template: Callable if fill_value is dtypes.NA: is_missing_values = math.prod(new_shape) > math.prod(self.shape) if is_missing_values: dtype, fill_value = dtypes.maybe_promote(self.dtype) create_template = partial( duck_array_ops.full_like, fill_value=fill_value ) else: dtype = self.dtype fill_value = dtypes.get_fill_value(dtype) create_template = duck_array_ops.empty_like else: dtype = self.dtype create_template = partial(duck_array_ops.full_like, fill_value=fill_value) if sparse: # unstacking a dense multitindexed array to a sparse array from sparse import COO codes = zip(*index.codes, strict=True) if reordered.ndim == 1: indexes = codes else: sizes = itertools.product(*[range(s) for s in reordered.shape[:-1]]) tuple_indexes = itertools.product(sizes, codes) indexes = (list(itertools.chain(*x)) for x in tuple_indexes) # type: ignore[assignment] data = COO( coords=np.array(list(indexes)).T, data=self.data.astype(dtype).ravel(), fill_value=fill_value, shape=new_shape, sorted=index.is_monotonic_increasing, ) else: data = create_template(self.data, shape=new_shape, dtype=dtype) # Indexer is a list of lists of locations. Each list is the locations # on the new dimension. This is robust to the data being sparse; in that # case the destinations will be NaN / zero. data[(..., *indexer)] = reordered return self.to_base_variable()._replace(dims=new_dims, data=data) @partial(deprecate_dims, old_name="dimensions") def unstack(self, dim=None, **dim_kwargs) -> Variable: """ Unstack an existing dimension into multiple new dimensions. New dimensions will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Note that unlike ``DataArray.unstack`` and ``Dataset.unstack``, this method requires the existing dimension to contain the full product of the new dimensions. Parameters ---------- dim : mapping of hashable to mapping of hashable to int Mapping of the form old_dim={dim1: size1, ...} describing the names of existing dimensions, and the new dimensions and sizes that they map to. **dim_kwargs The keyword arguments form of ``dim``. One of dim or dim_kwargs must be provided. Returns ------- unstacked : Variable Variable with the same attributes but unstacked data. See Also -------- Variable.stack DataArray.unstack Dataset.unstack """ dim = either_dict_or_kwargs(dim, dim_kwargs, "unstack") result = self for old_dim, dims in dim.items(): result = result._unstack_once_full(dims, old_dim) return result def fillna(self, value): return ops.fillna(self, value) def where(self, cond, other=dtypes.NA): return ops.where_method(self, cond, other) def clip(self, min=None, max=None): """ Return an array whose values are limited to ``[min, max]``. At least one of max or min must be given. Refer to `numpy.clip` for full documentation. See Also -------- numpy.clip : equivalent function """ from xarray.computation.apply_ufunc import apply_ufunc xp = duck_array_ops.get_array_namespace(self.data) return apply_ufunc(xp.clip, self, min, max, dask="allowed") def reduce( # type: ignore[override] self, func: Callable[..., Any], dim: Dims = None, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs, ) -> Variable: """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default `func` is applied over all dimensions. axis : int or Sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `func(x)` without an axis argument). keep_attrs : bool, optional If True (default), the variable's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ keep_attrs_ = ( _get_keep_attrs(default=True) if keep_attrs is None else keep_attrs ) # Note that the call order for Variable.mean is # Variable.mean -> NamedArray.mean -> Variable.reduce # -> NamedArray.reduce result = super().reduce( func=func, dim=dim, axis=axis, keepdims=keepdims, **kwargs ) # return Variable always to support IndexVariable return Variable( result.dims, result._data, attrs=result._attrs if keep_attrs_ else None ) @classmethod def concat( cls, variables, dim="concat_dim", positions=None, shortcut=False, combine_attrs="override", ): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"}, default: "override" String indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ from xarray.structure.merge import merge_attrs if not isinstance(dim, str): (dim,) = dim.dims # can't do this lazily: we need to loop through variables at least # twice variables = list(variables) first_var = variables[0] first_var_dims = first_var.dims arrays = [v._data for v in variables] if dim in first_var_dims: axis = first_var.get_axis_num(dim) dims = first_var_dims data = duck_array_ops.concatenate(arrays, axis=axis) if positions is not None: # TODO: deprecate this option -- we don't need it for groupby # any more. indices = nputils.inverse_permutation(np.concatenate(positions)) data = duck_array_ops.take(data, indices, axis=axis) else: axis = 0 dims = (dim,) + first_var_dims data = duck_array_ops.stack(arrays, axis=axis) attrs = merge_attrs( [var.attrs for var in variables], combine_attrs=combine_attrs ) encoding = dict(first_var.encoding) if not shortcut: for var in variables: if var.dims != first_var_dims: raise ValueError( f"Variable has dimensions {tuple(var.dims)} but first Variable has dimensions {tuple(first_var_dims)}" ) return cls(dims, data, attrs, encoding, fastpath=True) def equals(self, other, equiv=duck_array_ops.array_equiv): """True if two Variables have the same dimensions and values; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for Variables does element-wise comparisons (like numpy.ndarrays). """ other = getattr(other, "variable", other) try: return self.dims == other.dims and ( self._data is other._data or equiv(self.data, other.data) ) except (TypeError, AttributeError): return False def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv): """True if two Variables have the values after being broadcast against each other; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. """ try: self, other = broadcast_variables(self, other) except (ValueError, AttributeError): return False return self.equals(other, equiv=equiv) def identical(self, other, equiv=duck_array_ops.array_equiv): """Like equals, but also checks attributes.""" try: return utils.dict_equiv(self.attrs, other.attrs) and self.equals( other, equiv=equiv ) except (TypeError, AttributeError): return False def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv): """True if the intersection of two Variable's non-null data is equal; otherwise false. Variables can thus still be equal if there are locations where either, or both, contain NaN values. """ return self.broadcast_equals(other, equiv=equiv) def quantile( self, q: ArrayLike, dim: str | Sequence[Hashable] | None = None, method: QuantileMethods = "linear", keep_attrs: bool | None = None, skipna: bool | None = None, interpolation: QuantileMethods | None = None, ) -> Self: """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. Parameters ---------- q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. method : str, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points. The options sorted by their R type as summarized in the H&F paper [1]_ are: 1. "inverted_cdf" 2. "averaged_inverted_cdf" 3. "closest_observation" 4. "interpolated_inverted_cdf" 5. "hazen" 6. "weibull" 7. "linear" (default) 8. "median_unbiased" 9. "normal_unbiased" The first three methods are discontiuous. The following discontinuous variations of the default "linear" (7.) option are also available: * "lower" * "higher" * "midpoint" * "nearest" See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument was previously called "interpolation", renamed in accordance with numpy version 1.22.0. keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- quantiles : Variable If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, pandas.Series.quantile, Dataset.quantile DataArray.quantile References ---------- .. [1] R. J. Hyndman and Y. Fan, "Sample quantiles in statistical packages," The American Statistician, 50(4), pp. 361-365, 1996 """ from xarray.computation.apply_ufunc import apply_ufunc if interpolation is not None: warnings.warn( "The `interpolation` argument to quantile was renamed to `method`.", FutureWarning, stacklevel=2, ) if method != "linear": raise TypeError("Cannot pass interpolation and method keywords!") method = interpolation if skipna or (skipna is None and self.dtype.kind in "cfO"): _quantile_func = nputils.nanquantile else: _quantile_func = duck_array_ops.quantile if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) scalar = utils.is_scalar(q) q = np.atleast_1d(np.asarray(q, dtype=np.float64)) if dim is None: dim = self.dims if utils.is_scalar(dim): dim = [dim] xp = duck_array_ops.get_array_namespace(self.data) def _wrapper(npa, **kwargs): # move quantile axis to end. required for apply_ufunc return xp.moveaxis(_quantile_func(npa, **kwargs), 0, -1) # jax requires hashable axis = tuple(range(-1, -1 * len(dim) - 1, -1)) kwargs = {"q": q, "axis": axis, "method": method} result = apply_ufunc( _wrapper, self, input_core_dims=[dim], exclude_dims=set(dim), output_core_dims=[["quantile"]], output_dtypes=[np.float64], dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}), dask="allowed" if module_available("dask", "2024.11.0") else "parallelized", kwargs=kwargs, ) # for backward compatibility result = result.transpose("quantile", ...) if scalar: result = result.squeeze("quantile") if keep_attrs: result.attrs = self._attrs return result def rank(self, dim, pct=False): """Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : str Dimension over which to compute rank. pct : bool, optional If True, compute percentage ranks, otherwise compute integer ranks. Returns ------- ranked : Variable See Also -------- Dataset.rank, DataArray.rank """ # This could / should arguably be implemented at the DataArray & Dataset level if not OPTIONS["use_bottleneck"]: raise RuntimeError( "rank requires bottleneck to be enabled." " Call `xr.set_options(use_bottleneck=True)` to enable it." ) import bottleneck as bn func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata ranked = xr.apply_ufunc( func, self, input_core_dims=[[dim]], output_core_dims=[[dim]], dask="parallelized", kwargs=dict(axis=-1), ).transpose(*self.dims) if pct: count = self.notnull().sum(dim) ranked /= count return ranked @_deprecate_positional_args("v2024.11.0") def rolling_window( self, dim, window, window_dim, *, center=False, fill_value=dtypes.NA, **kwargs, ): """ Make a rolling_window along dim and add a new_dim to the last place. Parameters ---------- dim : str Dimension over which to compute rolling_window. For nd-rolling, should be list of dimensions. window : int Window size of the rolling For nd-rolling, should be list of integers. window_dim : str New name of the window dimension. For nd-rolling, should be list of strings. center : bool, default: False If True, pad fill_value for both ends. Otherwise, pad in the head of the axis. fill_value value to be filled. **kwargs Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. Returns ------- Variable that is a view of the original array with a added dimension of size w. The return dim: self.dims + (window_dim, ) The return shape: self.shape + (window, ) See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Examples -------- >>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4))) >>> v.rolling_window("b", 3, "window_dim") Size: 192B array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.]], [[nan, nan, 4.], [nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.]]]) >>> v.rolling_window("b", 3, "window_dim", center=True) Size: 192B array([[[nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.], [ 2., 3., nan]], [[nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.], [ 6., 7., nan]]]) """ if fill_value is dtypes.NA: # np.nan is passed dtype, fill_value = dtypes.maybe_promote(self.dtype) var = duck_array_ops.astype(self, dtype, copy=False) else: dtype = self.dtype var = self if utils.is_scalar(dim): for name, arg in zip( ["window", "window_dim", "center"], [window, window_dim, center], strict=True, ): if not utils.is_scalar(arg): raise ValueError( f"Expected {name}={arg!r} to be a scalar like 'dim'." ) dim = (dim,) # dim is now a list nroll = len(dim) if utils.is_scalar(window): window = [window] * nroll if utils.is_scalar(window_dim): window_dim = [window_dim] * nroll if utils.is_scalar(center): center = [center] * nroll if ( len(dim) != len(window) or len(dim) != len(window_dim) or len(dim) != len(center) ): raise ValueError( "'dim', 'window', 'window_dim', and 'center' must be the same length. " f"Received dim={dim!r}, window={window!r}, window_dim={window_dim!r}," f" and center={center!r}." ) pads = {} for d, win, cent in zip(dim, window, center, strict=True): if cent: start = win // 2 # 10 -> 5, 9 -> 4 end = win - 1 - start pads[d] = (start, end) else: pads[d] = (win - 1, 0) padded = var.pad(pads, mode="constant", constant_values=fill_value) axis = self.get_axis_num(dim) new_dims = self.dims + tuple(window_dim) return Variable( new_dims, duck_array_ops.sliding_window_view( padded.data, window_shape=window, axis=axis, **kwargs ), ) def coarsen( self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs ): """ Apply reduction function. """ windows = {k: v for k, v in windows.items() if k in self.dims} if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) if keep_attrs: _attrs = self.attrs else: _attrs = None if not windows: return self._replace(attrs=_attrs) reshaped, axes = self.coarsen_reshape(windows, boundary, side) if isinstance(func, str): name = func func = getattr(duck_array_ops, name, None) if func is None: raise NameError(f"{name} is not a valid method.") return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs) def coarsen_reshape(self, windows, boundary, side): """ Construct a reshaped-array for coarsen """ if not is_dict_like(boundary): boundary = dict.fromkeys(windows.keys(), boundary) if not is_dict_like(side): side = dict.fromkeys(windows.keys(), side) # remove unrelated dimensions boundary = {k: v for k, v in boundary.items() if k in windows} side = {k: v for k, v in side.items() if k in windows} for d, window in windows.items(): if window <= 0: raise ValueError( f"window must be > 0. Given {window} for dimension {d}" ) variable = self pad_widths = {} for d, window in windows.items(): # trim or pad the object size = variable.shape[self._get_axis_num(d)] n = int(size / window) if boundary[d] == "exact": if n * window != size: raise ValueError( f"Could not coarsen a dimension of size {size} with " f"window {window} and boundary='exact'. Try a different 'boundary' option." ) elif boundary[d] == "trim": if side[d] == "left": variable = variable.isel({d: slice(0, window * n)}) else: excess = size - window * n variable = variable.isel({d: slice(excess, None)}) elif boundary[d] == "pad": # pad pad = window * n - size if pad < 0: pad += window elif pad == 0: continue pad_widths[d] = (0, pad) if side[d] == "left" else (pad, 0) else: raise TypeError( f"{boundary[d]} is invalid for boundary. Valid option is 'exact', " "'trim' and 'pad'" ) if pad_widths: variable = variable.pad(pad_widths, mode="constant") shape = [] axes = [] axis_count = 0 for i, d in enumerate(variable.dims): if d in windows: size = variable.shape[i] shape.extend((int(size / windows[d]), windows[d])) axis_count += 1 axes.append(i + axis_count) else: shape.append(variable.shape[i]) return duck_array_ops.reshape(variable.data, shape), tuple(axes) def isnull(self, keep_attrs: bool | None = None): """Test each value in the array for whether it is a missing value. Returns ------- isnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.isnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var Size: 24B array([ 1., nan, 3.]) >>> var.isnull() Size: 3B array([False, True, False]) """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) return apply_ufunc( duck_array_ops.isnull, self, dask="allowed", keep_attrs=keep_attrs, ) def notnull(self, keep_attrs: bool | None = None): """Test each value in the array for whether it is not a missing value. Returns ------- notnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.notnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var Size: 24B array([ 1., nan, 3.]) >>> var.notnull() Size: 3B array([ True, False, True]) """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) return apply_ufunc( duck_array_ops.notnull, self, dask="allowed", keep_attrs=keep_attrs, ) @property def imag(self) -> Variable: """ The imaginary part of the variable. See Also -------- numpy.ndarray.imag """ return self._new(data=self.data.imag) @property def real(self) -> Variable: """ The real part of the variable. See Also -------- numpy.ndarray.real """ return self._new(data=self.data.real) def __array_wrap__(self, obj, context=None, return_scalar=False): return Variable(self.dims, obj) def _unary_op(self, f, *args, **kwargs): keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) with np.errstate(all="ignore"): result = self.__array_wrap__(f(self.data, *args, **kwargs)) if keep_attrs: result.attrs = self.attrs return result def _binary_op(self, other, f, reflexive=False): if isinstance(other, xr.DataTree | xr.DataArray | xr.Dataset): return NotImplemented if reflexive and issubclass(type(self), type(other)): other_data, self_data, dims = _broadcast_compat_data(other, self) else: self_data, other_data, dims = _broadcast_compat_data(self, other) keep_attrs = _get_keep_attrs(default=True) if keep_attrs: # Combine attributes from both operands, dropping conflicts from xarray.structure.merge import merge_attrs # Access attrs property to normalize None to {} due to property side effect self_attrs = self.attrs other_attrs = getattr(other, "attrs", {}) attrs = merge_attrs([self_attrs, other_attrs], "drop_conflicts") else: attrs = None with np.errstate(all="ignore"): new_data = ( f(self_data, other_data) if not reflexive else f(other_data, self_data) ) result = Variable(dims, new_data, attrs=attrs) return result def _inplace_binary_op(self, other, f): if isinstance(other, xr.Dataset): raise TypeError("cannot add a Dataset to a Variable in-place") self_data, other_data, dims = _broadcast_compat_data(self, other) if dims != self.dims: raise ValueError("dimensions cannot change for in-place operations") with np.errstate(all="ignore"): self.values = f(self_data, other_data) return self def _to_numeric(self, offset=None, datetime_unit=None, dtype=float): """A (private) method to convert datetime array to numeric dtype See duck_array_ops.datetime_to_numeric """ numeric_array = duck_array_ops.datetime_to_numeric( self.data, offset, datetime_unit, dtype ) return type(self)(self.dims, numeric_array, self._attrs) def _unravel_argminmax( self, argminmax: str, dim: Dims, axis: int | None, keep_attrs: bool | None, skipna: bool | None, ) -> Variable | dict[Hashable, Variable]: """Apply argmin or argmax over one or more dimensions, returning the result as a dict of DataArray that can be passed directly to isel. """ if dim is None and axis is None: warnings.warn( "Behaviour of argmin/argmax with neither dim nor axis argument will " "change to return a dict of indices of each dimension. To get a " "single, flat index, please use np.argmin(da.data) or " "np.argmax(da.data) instead of da.argmin() or da.argmax().", DeprecationWarning, stacklevel=3, ) argminmax_func = getattr(duck_array_ops, argminmax) if dim is ...: # In future, should do this also when (dim is None and axis is None) dim = self.dims if ( dim is None or axis is not None or not isinstance(dim, Sequence) or isinstance(dim, str) ): # Return int index if single dimension is passed, and is not part of a # sequence return self.reduce( argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna ) # Get a name for the new dimension that does not conflict with any existing # dimension newdimname = "_unravel_argminmax_dim_0" count = 1 while newdimname in self.dims: newdimname = f"_unravel_argminmax_dim_{count}" count += 1 stacked = self.stack({newdimname: dim}) result_dims = stacked.dims[:-1] reduce_shape = tuple(self.sizes[d] for d in dim) result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna) result_unravelled_indices = duck_array_ops.unravel_index( result_flat_indices.data, reduce_shape ) result = { d: Variable(dims=result_dims, data=i) for d, i in zip(dim, result_unravelled_indices, strict=True) } if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) if keep_attrs: for v in result.values(): v.attrs = self.attrs return result def argmin( self, dim: Dims = None, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, ) -> Variable | dict[Hashable, Variable]: """Index or indices of the minimum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple minima, the indices of the first one found will be returned. Parameters ---------- dim : "...", str, Iterable of Hashable or None, optional The dimensions over which to find the minimum. By default, finds minimum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See Also -------- DataArray.argmin, DataArray.idxmin """ return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna) def argmax( self, dim: Dims = None, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, ) -> Variable | dict[Hashable, Variable]: """Index or indices of the maximum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple maxima, the indices of the first one found will be returned. Parameters ---------- dim : "...", str, Iterable of Hashable or None, optional The dimensions over which to find the maximum. By default, finds maximum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See Also -------- DataArray.argmax, DataArray.idxmax """ return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna) def _as_sparse(self, sparse_format=_default, fill_value=_default) -> Variable: """ Use sparse-array as backend. """ from xarray.namedarray._typing import _default as _default_named if sparse_format is _default: sparse_format = _default_named if fill_value is _default: fill_value = _default_named out = super()._as_sparse(sparse_format, fill_value) return cast("Variable", out) def _to_dense(self) -> Variable: """ Change backend from sparse to np.array. """ out = super()._to_dense() return cast("Variable", out) def chunk( # type: ignore[override] self, chunks: T_Chunks = {}, # noqa: B006 # even though it's technically unsafe, it is being used intentionally here (#4667) name: str | None = None, lock: bool | None = None, inline_array: bool | None = None, chunked_array_type: str | ChunkManagerEntrypoint[Any] | None = None, from_array_kwargs: Any = None, **chunks_kwargs: Any, ) -> Self: """Coerce this array's data into a dask array with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Parameters ---------- chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. name : str, optional Used to generate the name for this array in the internal dask graph. Does not need not be unique. lock : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. inline_array : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntrypoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided. Returns ------- chunked : xarray.Variable See Also -------- Variable.chunks Variable.chunksizes xarray.unify_chunks dask.array.from_array """ if from_array_kwargs is None: from_array_kwargs = {} # TODO deprecate passing these dask-specific arguments explicitly. In future just pass everything via from_array_kwargs _from_array_kwargs = consolidate_dask_from_array_kwargs( from_array_kwargs, name=name, lock=lock, inline_array=inline_array, ) return super().chunk( chunks=chunks, chunked_array_type=chunked_array_type, from_array_kwargs=_from_array_kwargs, **chunks_kwargs, ) class IndexVariable(Variable): """Wrapper for accommodating a pandas.Index in an xarray.Variable. IndexVariable preserve loaded values in the form of a pandas.Index instead of a NumPy array. Hence, their values are immutable and must always be one- dimensional. They also have a name property, which is the name of their sole dimension unless another name is given. """ __slots__ = () # TODO: PandasIndexingAdapter doesn't match the array api: _data: PandasIndexingAdapter # type: ignore[assignment] def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False): super().__init__(dims, data, attrs, encoding, fastpath) if self.ndim != 1: raise ValueError(f"{type(self).__name__} objects must be 1-dimensional") # Unlike in Variable, always eagerly load values into memory if not isinstance(self._data, PandasIndexingAdapter): self._data = PandasIndexingAdapter(self._data) def __dask_tokenize__(self) -> object: from dask.base import normalize_token # Don't waste time converting pd.Index to np.ndarray return normalize_token( (type(self), self._dims, self._data.array, self._attrs or None) ) def load(self): # data is already loaded into memory for IndexVariable return self async def load_async(self): # data is already loaded into memory for IndexVariable return self # https://github.com/python/mypy/issues/1465 @Variable.data.setter # type: ignore[attr-defined] def data(self, data): raise ValueError( f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. " f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate." ) @Variable.values.setter # type: ignore[attr-defined] def values(self, values): raise ValueError( f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. " f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate." ) def chunk( self, chunks={}, # noqa: B006 # even though it's unsafe, it is being used intentionally here (#4667) name=None, lock=False, inline_array=False, chunked_array_type=None, from_array_kwargs=None, ): # Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk() return self.copy(deep=False) def _as_sparse(self, sparse_format=_default, fill_value=_default): # Dummy return self.copy(deep=False) def _to_dense(self): # Dummy return self.copy(deep=False) def _finalize_indexing_result(self, dims, data): if getattr(data, "ndim", 0) != 1: # returns Variable rather than IndexVariable if multi-dimensional return Variable(dims, data, self._attrs, self._encoding) else: return self._replace(dims=dims, data=data) def __setitem__(self, key, value): raise TypeError(f"{type(self).__name__} values cannot be modified") @classmethod def concat( cls, variables, dim="concat_dim", positions=None, shortcut=False, combine_attrs="override", ): """Specialized version of Variable.concat for IndexVariable objects. This exists because we want to avoid converting Index objects to NumPy arrays, if possible. """ from xarray.structure.merge import merge_attrs if not isinstance(dim, str): (dim,) = dim.dims variables = list(variables) first_var = variables[0] if any(not isinstance(v, cls) for v in variables): raise TypeError( "IndexVariable.concat requires that all input " "variables be IndexVariable objects" ) indexes = [v._data.array for v in variables] if not indexes: data = [] else: data = indexes[0].append(indexes[1:]) if positions is not None: indices = nputils.inverse_permutation(np.concatenate(positions)) data = data.take(indices) # keep as str if possible as pandas.Index uses object (converts to numpy array) data = maybe_coerce_to_str(data, variables) attrs = merge_attrs( [var.attrs for var in variables], combine_attrs=combine_attrs ) if not shortcut: for var in variables: if var.dims != first_var.dims: raise ValueError("inconsistent dimensions") return cls(first_var.dims, data, attrs) def copy(self, deep: bool = True, data: T_DuckArray | ArrayLike | None = None): """Returns a copy of this object. `deep` is ignored since data is stored in the form of pandas.Index, which is already immutable. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, default: True Deep is ignored when data is given. Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original. """ if data is None: ndata = self._data if deep: ndata = copy.deepcopy(ndata, None) else: ndata = as_compatible_data(data) if self.shape != ndata.shape: raise ValueError( f"Data shape {ndata.shape} must match shape of object {self.shape}" ) attrs = copy.deepcopy(self._attrs) if deep else copy.copy(self._attrs) encoding = copy.deepcopy(self._encoding) if deep else copy.copy(self._encoding) return self._replace(data=ndata, attrs=attrs, encoding=encoding) def equals(self, other, equiv=None): # if equiv is specified, super up if equiv is not None: return super().equals(other, equiv) # otherwise use the native index equals, rather than looking at _data other = getattr(other, "variable", other) try: return self.dims == other.dims and self._data_equals(other) except (TypeError, AttributeError): return False def _data_equals(self, other): return self._to_index().equals(other._to_index()) def to_index_variable(self) -> IndexVariable: """Return this variable as an xarray.IndexVariable""" return self.copy(deep=False) to_coord = utils.alias(to_index_variable, "to_coord") def _to_index(self) -> pd.Index: # n.b. creating a new pandas.Index from an old pandas.Index is # basically free as pandas.Index objects are immutable. # n.b.2. this method returns the multi-index instance for # a pandas multi-index level variable. assert self.ndim == 1 index = self._data.array if isinstance(index, pd.MultiIndex): # set default names for multi-index unnamed levels so that # we can safely rename dimension / coordinate later valid_level_names = [ name or f"{self.dims[0]}_level_{i}" for i, name in enumerate(index.names) ] index = index.set_names(valid_level_names) else: index = index.set_names(self.name) return index def to_index(self) -> pd.Index: """Convert this variable to a pandas.Index""" index = self._to_index() level = getattr(self._data, "level", None) if level is not None: # return multi-index level converted to a single index return index.get_level_values(level) else: return index @property def level_names(self) -> list[Hashable | None] | None: """Return MultiIndex level names or None if this IndexVariable has no MultiIndex. """ index = self.to_index() if isinstance(index, pd.MultiIndex): return list(index.names) else: return None def get_level_variable(self, level): """Return a new IndexVariable from a given MultiIndex level.""" if self.level_names is None: raise ValueError(f"IndexVariable {self.name!r} has no MultiIndex") index = self.to_index() return type(self)(self.dims, index.get_level_values(level)) @property def name(self) -> Hashable: return self.dims[0] @name.setter def name(self, value) -> NoReturn: raise AttributeError("cannot modify name of IndexVariable in-place") def _inplace_binary_op(self, other, f): raise TypeError( "Values of an IndexVariable are immutable and can not be modified inplace" ) def _unified_dims(variables): # validate dimensions all_dims = {} for var in variables: var_dims = var.dims _raise_if_any_duplicate_dimensions(var_dims, err_context="Broadcasting") for d, s in zip(var_dims, var.shape, strict=True): if d not in all_dims: all_dims[d] = s elif all_dims[d] != s: raise ValueError( "operands cannot be broadcast together " f"with mismatched lengths for dimension {d!r}: {(all_dims[d], s)}" ) return all_dims def _broadcast_compat_variables(*variables): """Create broadcast compatible variables, with the same dimensions. Unlike the result of broadcast_variables(), some variables may have dimensions of size 1 instead of the size of the broadcast dimension. """ dims = tuple(_unified_dims(variables)) return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables) def broadcast_variables(*variables: Variable) -> tuple[Variable, ...]: """Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions. """ dims_map = _unified_dims(variables) dims_tuple = tuple(dims_map) return tuple( var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables ) def _broadcast_compat_data(self, other): if not OPTIONS["arithmetic_broadcast"] and ( (isinstance(other, Variable) and self.dims != other.dims) or (is_duck_array(other) and self.ndim != other.ndim) ): raise ValueError( "Broadcasting is necessary but automatic broadcasting is disabled via " "global option `'arithmetic_broadcast'`. " "Use `xr.set_options(arithmetic_broadcast=True)` to enable automatic broadcasting." ) if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]): # `other` satisfies the necessary Variable API for broadcast_variables new_self, new_other = _broadcast_compat_variables(self, other) self_data = new_self.data other_data = new_other.data dims = new_self.dims else: # rely on numpy broadcasting rules self_data = self.data other_data = other dims = self.dims return self_data, other_data, dims def concat( variables, dim="concat_dim", positions=None, shortcut=False, combine_attrs="override", ): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"}, default: "override" String indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ variables = list(variables) if all(isinstance(v, IndexVariable) for v in variables): return IndexVariable.concat(variables, dim, positions, shortcut, combine_attrs) else: return Variable.concat(variables, dim, positions, shortcut, combine_attrs) def calculate_dimensions(variables: Mapping[Any, Variable]) -> dict[Hashable, int]: """Calculate the dimensions corresponding to a set of variables. Returns dictionary mapping from dimension names to sizes. Raises ValueError if any of the dimension sizes conflict. """ dims: dict[Hashable, int] = {} last_used = {} scalar_vars = {k for k, v in variables.items() if not v.dims} for k, var in variables.items(): for dim, size in zip(var.dims, var.shape, strict=True): if dim in scalar_vars: raise ValueError( f"dimension {dim!r} already exists as a scalar variable" ) if dim not in dims: dims[dim] = size last_used[dim] = k elif dims[dim] != size: raise ValueError( f"conflicting sizes for dimension {dim!r}: " f"length {size} on {k!r} and length {dims[dim]} on {last_used!r}" ) return dims xarray-2025.12.0/xarray/groupers.py000066400000000000000000001154421511464676000171000ustar00rootroot00000000000000""" This module provides Grouper objects that encapsulate the "factorization" process - conversion of value we are grouping by to integer codes (one per group). """ from __future__ import annotations import datetime import functools import itertools import operator from abc import ABC, abstractmethod from collections import defaultdict from collections.abc import Hashable, Mapping, Sequence from dataclasses import dataclass, field from itertools import chain, pairwise from typing import TYPE_CHECKING, Any, Literal, cast import numpy as np import pandas as pd from numpy.typing import ArrayLike from xarray.coding.cftime_offsets import BaseCFTimeOffset, _new_to_legacy_freq from xarray.coding.cftimeindex import CFTimeIndex from xarray.compat.toolzcompat import sliding_window from xarray.computation.apply_ufunc import apply_ufunc from xarray.core.common import ( _contains_cftime_datetimes, _contains_datetime_like_objects, ) from xarray.core.coordinates import Coordinates, coordinates_from_variable from xarray.core.dataarray import DataArray from xarray.core.duck_array_ops import array_all, isnull from xarray.core.formatting import first_n_items from xarray.core.groupby import T_Group, _DummyGroup from xarray.core.indexes import safe_cast_to_index from xarray.core.resample_cftime import CFTimeGrouper from xarray.core.types import ( Bins, DatetimeLike, GroupIndices, ResampleCompatible, Self, SideOptions, ) from xarray.core.variable import Variable from xarray.namedarray.pycompat import is_chunked_array __all__ = [ "BinGrouper", "EncodedGroups", "Grouper", "Resampler", "SeasonGrouper", "SeasonResampler", "TimeResampler", "UniqueGrouper", ] RESAMPLE_DIM = "__resample_dim__" @dataclass(init=False) class EncodedGroups: """ Dataclass for storing intermediate values for GroupBy operation. Returned by the ``factorize`` method on Grouper objects. Attributes ---------- codes : DataArray Same shape as the DataArray to group by. Values consist of a unique integer code for each group. full_index : pd.Index Pandas Index for the group coordinate containing unique group labels. This can differ from ``unique_coord`` in the case of resampling and binning, where certain groups in the output need not be present in the input. group_indices : tuple of int or slice or list of int, optional List of indices of array elements belonging to each group. Inferred if not provided. unique_coord : Variable, optional Unique group values present in dataset. Inferred if not provided """ codes: DataArray full_index: pd.Index group_indices: GroupIndices = field(init=False, repr=False) unique_coord: Variable | _DummyGroup = field(init=False, repr=False) coords: Coordinates = field(init=False, repr=False) def __init__( self, codes: DataArray, full_index: pd.Index, group_indices: GroupIndices | None = None, unique_coord: Variable | _DummyGroup | None = None, coords: Coordinates | None = None, ): from xarray.core.groupby import _codes_to_group_indices assert isinstance(codes, DataArray) if codes.name is None: raise ValueError("Please set a name on the array you are grouping by.") self.codes = codes assert isinstance(full_index, pd.Index) self.full_index = full_index if group_indices is None: if not is_chunked_array(codes.data): self.group_indices = tuple( g for g in _codes_to_group_indices( codes.data.ravel(), len(full_index) ) if g ) else: # We will not use this when grouping by a chunked array self.group_indices = tuple() else: self.group_indices = group_indices if unique_coord is None: unique_codes = np.sort(pd.unique(codes.data)) # Skip the -1 sentinel unique_codes = unique_codes[unique_codes >= 0] unique_values = full_index[unique_codes] self.unique_coord = Variable( dims=codes.name, data=unique_values, attrs=codes.attrs ) else: self.unique_coord = unique_coord if coords is None: assert not isinstance(self.unique_coord, _DummyGroup) self.coords = coordinates_from_variable(self.unique_coord) else: self.coords = coords class Grouper(ABC): """Abstract base class for Grouper objects that allow specializing GroupBy instructions.""" @abstractmethod def factorize(self, group: T_Group) -> EncodedGroups: """ Creates intermediates necessary for GroupBy. Parameters ---------- group : DataArray DataArray we are grouping by. Returns ------- EncodedGroups """ pass @abstractmethod def reset(self) -> Self: """ Creates a new version of this Grouper clearing any caches. """ pass class Resampler(Grouper): """ Abstract base class for Grouper objects that allow specializing resampling-type GroupBy instructions. Currently only used for TimeResampler, but could be used for SpaceResampler in the future. """ def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]: """ Compute chunk sizes for this resampler. This method should be implemented by subclasses to provide appropriate chunking behavior for their specific resampling strategy. Parameters ---------- variable : Variable The variable being chunked. dim : Hashable The name of the dimension being chunked. Returns ------- tuple[int, ...] A tuple of chunk sizes for the dimension. """ raise NotImplementedError("Subclasses must implement compute_chunks method") @dataclass class UniqueGrouper(Grouper): """ Grouper object for grouping by a categorical variable. Parameters ---------- labels: array-like, optional Group labels to aggregate on. This is required when grouping by a chunked array type (e.g. dask or cubed) since it is used to construct the coordinate on the output. Grouped operations will only be run on the specified group labels. Any group that is not present in ``labels`` will be ignored. """ _group_as_index: pd.Index | None = field(default=None, repr=False, init=False) labels: ArrayLike | None = field(default=None) @property def group_as_index(self) -> pd.Index: """Caches the group DataArray as a pandas Index.""" if self._group_as_index is None: if self.group.ndim == 1: self._group_as_index = self.group.to_index() else: self._group_as_index = pd.Index(np.array(self.group).ravel()) return self._group_as_index def reset(self) -> Self: return type(self)() def factorize(self, group: T_Group) -> EncodedGroups: self.group = group if is_chunked_array(group.data) and self.labels is None: raise ValueError( "When grouping by a dask array, `labels` must be passed using " "a UniqueGrouper object." ) if self.labels is not None: return self._factorize_given_labels(group) index = self.group_as_index is_unique_and_monotonic = isinstance(self.group, _DummyGroup) or ( index.is_unique and (index.is_monotonic_increasing or index.is_monotonic_decreasing) ) is_dimension = self.group.dims == (self.group.name,) can_squeeze = is_dimension and is_unique_and_monotonic if can_squeeze: return self._factorize_dummy() else: return self._factorize_unique() def _factorize_given_labels(self, group: T_Group) -> EncodedGroups: codes = apply_ufunc( _factorize_given_labels, group, kwargs={"labels": self.labels}, dask="parallelized", output_dtypes=[np.int64], keep_attrs=True, ) return EncodedGroups( codes=codes, full_index=pd.Index(self.labels), # type: ignore[arg-type] unique_coord=Variable( dims=codes.name, data=self.labels, attrs=self.group.attrs, ), ) def _factorize_unique(self) -> EncodedGroups: # look through group to find the unique values sort = not isinstance(self.group_as_index, pd.MultiIndex) unique_values, codes_ = unique_value_groups(self.group_as_index, sort=sort) if array_all(codes_ == -1): raise ValueError( "Failed to group data. Are you grouping by a variable that is all NaN?" ) codes = self.group.copy(data=codes_.reshape(self.group.shape), deep=False) unique_coord = Variable( dims=codes.name, data=unique_values, attrs=self.group.attrs ) full_index = ( unique_values if isinstance(unique_values, pd.MultiIndex) else pd.Index(unique_values) ) return EncodedGroups( codes=codes, full_index=full_index, unique_coord=unique_coord, coords=coordinates_from_variable(unique_coord), ) def _factorize_dummy(self) -> EncodedGroups: size = self.group.size # no need to factorize # use slices to do views instead of fancy indexing # equivalent to: group_indices = group_indices.reshape(-1, 1) group_indices: GroupIndices = tuple(slice(i, i + 1) for i in range(size)) size_range = np.arange(size) full_index: pd.Index unique_coord: _DummyGroup | Variable if isinstance(self.group, _DummyGroup): codes = self.group.to_dataarray().copy(data=size_range) unique_coord = self.group full_index = pd.RangeIndex(self.group.size) coords = Coordinates() else: codes = self.group.copy(data=size_range, deep=False) unique_coord = self.group.variable.to_base_variable() full_index = self.group_as_index if isinstance(full_index, pd.MultiIndex): coords = Coordinates.from_pandas_multiindex( full_index, dim=self.group.name ) else: if TYPE_CHECKING: assert isinstance(unique_coord, Variable) coords = coordinates_from_variable(unique_coord) return EncodedGroups( codes=codes, group_indices=group_indices, full_index=full_index, unique_coord=unique_coord, coords=coords, ) @dataclass class BinGrouper(Grouper): """ Grouper object for binning numeric data. Attributes ---------- bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or False, default None Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. If True, raises an error. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {"raise", "drop"}, default: "raise" If bin edges are not unique, raise ValueError or drop non-uniques. """ bins: Bins # The rest are copied from pandas right: bool = True labels: Any = None precision: int = 3 include_lowest: bool = False duplicates: Literal["raise", "drop"] = "raise" def reset(self) -> Self: return type(self)( bins=self.bins, right=self.right, labels=self.labels, precision=self.precision, include_lowest=self.include_lowest, duplicates=self.duplicates, ) def __post_init__(self) -> None: if array_all(isnull(self.bins)): raise ValueError("All bin edges are NaN.") def _cut(self, data): return pd.cut( np.asarray(data).ravel(), bins=self.bins, right=self.right, labels=self.labels, precision=self.precision, include_lowest=self.include_lowest, duplicates=self.duplicates, retbins=True, ) def _pandas_cut_wrapper(self, data, **kwargs): binned, bins = self._cut(data) if isinstance(self.bins, int): # we are running eagerly, update self.bins with actual edges instead self.bins = bins return binned.codes.reshape(data.shape) def factorize(self, group: T_Group) -> EncodedGroups: if isinstance(group, _DummyGroup): group = DataArray(group.data, dims=group.dims, name=group.name) by_is_chunked = is_chunked_array(group.data) if isinstance(self.bins, int) and by_is_chunked: raise ValueError( f"Bin edges must be provided when grouping by chunked arrays. Received {self.bins=!r} instead" ) codes = apply_ufunc( self._pandas_cut_wrapper, group, dask="parallelized", keep_attrs=True, output_dtypes=[np.int64], ) if not by_is_chunked and array_all(codes == -1): raise ValueError( f"None of the data falls within bins with edges {self.bins!r}" ) new_dim_name = f"{group.name}_bins" codes.name = new_dim_name # This seems silly, but it lets us have Pandas handle the complexity # of `labels`, `precision`, and `include_lowest`, even when group is a chunked array # Pandas ignores labels when IntervalIndex is passed if self.labels is None or not isinstance(self.bins, pd.IntervalIndex): dummy, _ = self._cut(np.array([0]).astype(group.dtype)) full_index = dummy.categories else: full_index = pd.Index(self.labels) if not by_is_chunked: uniques = np.sort(pd.unique(codes.data.ravel())) unique_values = full_index[uniques[uniques != -1]] else: unique_values = full_index unique_coord = Variable( dims=new_dim_name, data=unique_values, attrs=group.attrs ) return EncodedGroups( codes=codes, full_index=full_index, unique_coord=unique_coord, coords=coordinates_from_variable(unique_coord), ) @dataclass(repr=False) class TimeResampler(Resampler): """ Grouper object specialized to resampling the time coordinate. Attributes ---------- freq : str, datetime.timedelta, pandas.Timestamp, or pandas.DateOffset Frequency to resample to. See `Pandas frequency aliases `_ for a list of possible values. closed : {"left", "right"}, optional Side of each interval to treat as closed. label : {"left", "right"}, optional Side of each interval to use for labeling. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pandas.Timestamp, datetime.datetime, numpy.datetime64, or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : pd.Timedelta, datetime.timedelta, or str, default is None An offset timedelta added to the origin. """ freq: ResampleCompatible closed: SideOptions | None = field(default=None) label: SideOptions | None = field(default=None) origin: str | DatetimeLike = field(default="start_day") offset: pd.Timedelta | datetime.timedelta | str | None = field(default=None) index_grouper: CFTimeGrouper | pd.Grouper = field(init=False, repr=False) group_as_index: pd.Index = field(init=False, repr=False) def reset(self) -> Self: return type(self)( freq=self.freq, closed=self.closed, label=self.label, origin=self.origin, offset=self.offset, ) def _init_properties(self, group: T_Group) -> None: group_as_index = safe_cast_to_index(group) offset = self.offset if not group_as_index.is_monotonic_increasing: # TODO: sort instead of raising an error raise ValueError("Index must be monotonic for resampling") if isinstance(group_as_index, CFTimeIndex): self.index_grouper = CFTimeGrouper( freq=self.freq, closed=self.closed, label=self.label, origin=self.origin, offset=offset, ) else: if isinstance(self.freq, BaseCFTimeOffset): raise ValueError( "'BaseCFTimeOffset' resample frequencies are only supported " "when resampling a 'CFTimeIndex'" ) self.index_grouper = pd.Grouper( # TODO remove once requiring pandas >= 2.2 freq=_new_to_legacy_freq(self.freq), closed=self.closed, label=self.label, origin=self.origin, offset=offset, ) self.group_as_index = group_as_index def _get_index_and_items(self) -> tuple[pd.Index, pd.Series, np.ndarray]: first_items, codes = self.first_items() full_index = first_items.index if first_items.isnull().any(): first_items = first_items.dropna() full_index = full_index.rename("__resample_dim__") return full_index, first_items, codes def first_items(self) -> tuple[pd.Series, np.ndarray]: if isinstance(self.index_grouper, CFTimeGrouper): return self.index_grouper.first_items( cast(CFTimeIndex, self.group_as_index) ) else: s = pd.Series(np.arange(self.group_as_index.size), self.group_as_index) grouped = s.groupby(self.index_grouper) first_items = grouped.first() counts = grouped.count() # This way we generate codes for the final output index: full_index. # So for _flox_reduce we avoid one reindex and copy by avoiding # _maybe_reindex codes = np.repeat(np.arange(len(first_items)), counts) return first_items, codes def factorize(self, group: T_Group) -> EncodedGroups: self._init_properties(group) full_index, first_items, codes_ = self._get_index_and_items() sbins = first_items.values.astype(np.int64) group_indices: GroupIndices = tuple( list(itertools.starmap(slice, pairwise(sbins))) + [slice(sbins[-1], None)] ) unique_coord = Variable( dims=group.name, data=first_items.index, attrs=group.attrs ) codes = group.copy(data=codes_.reshape(group.shape), deep=False) return EncodedGroups( codes=codes, group_indices=group_indices, full_index=full_index, unique_coord=unique_coord, coords=coordinates_from_variable(unique_coord), ) def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]: """ Compute chunk sizes for this time resampler. This method is used during chunking operations to determine appropriate chunk sizes for the given variable when using this resampler. Parameters ---------- name : Hashable The name of the dimension being chunked. variable : Variable The variable being chunked. Returns ------- tuple[int, ...] A tuple of chunk sizes for the dimension. """ if not _contains_datetime_like_objects(variable): raise ValueError( f"Computing chunks with {type(self)!r} only supported for datetime variables. " f"Received variable with dtype {variable.dtype!r} instead." ) chunks = ( DataArray( np.ones(variable.shape, dtype=int), dims=(dim,), coords={dim: variable}, ) .resample({dim: self}) .sum() ) # When bins (binning) or time periods are missing (resampling) # we can end up with NaNs. Drop them. if chunks.dtype.kind == "f": chunks = chunks.dropna(dim).astype(int) chunks_tuple: tuple[int, ...] = tuple(chunks.data.tolist()) return chunks_tuple def _factorize_given_labels(data: np.ndarray, labels: np.ndarray) -> np.ndarray: # Copied from flox sorter = np.argsort(labels) is_sorted = array_all(sorter == np.arange(sorter.size)) codes = np.searchsorted(labels, data, sorter=sorter) mask = ~np.isin(data, labels) | isnull(data) | (codes == len(labels)) # codes is the index in to the sorted array. # if we didn't want sorting, unsort it back if not is_sorted: codes[codes == len(labels)] = -1 codes = sorter[(codes,)] codes[mask] = -1 return codes def unique_value_groups( ar, sort: bool = True ) -> tuple[np.ndarray | pd.Index, np.ndarray]: """Group an array by its unique values. Parameters ---------- ar : array-like Input array. This will be flattened if it is not already 1-D. sort : bool, default: True Whether or not to sort unique values. Returns ------- values : np.ndarray Sorted, unique values as returned by `np.unique`. indices : list of lists of int Each element provides the integer indices in `ar` with values given by the corresponding value in `unique_values`. """ inverse, values = pd.factorize(ar, sort=sort) if isinstance(values, pd.MultiIndex): values.names = ar.names return values, inverse def season_to_month_tuple(seasons: Sequence[str]) -> tuple[tuple[int, ...], ...]: """ >>> season_to_month_tuple(["DJF", "MAM", "JJA", "SON"]) ((12, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11)) >>> season_to_month_tuple(["DJFM", "MAMJ", "JJAS", "SOND"]) ((12, 1, 2, 3), (3, 4, 5, 6), (6, 7, 8, 9), (9, 10, 11, 12)) >>> season_to_month_tuple(["DJFM", "SOND"]) ((12, 1, 2, 3), (9, 10, 11, 12)) """ initials = "JFMAMJJASOND" starts = { "".join(s): i + 1 for s, i in zip(sliding_window(2, initials + "J"), range(12), strict=True) } result: list[tuple[int, ...]] = [] for i, season in enumerate(seasons): if len(season) == 1: if i < len(seasons) - 1: suffix = seasons[i + 1][0] else: suffix = seasons[0][0] else: suffix = season[1] start = starts[season[0] + suffix] month_append = [] for i in range(len(season[1:])): elem = start + i + 1 month_append.append(elem - 12 * (elem > 12)) result.append((start,) + tuple(month_append)) return tuple(result) def inds_to_season_string(asints: tuple[tuple[int, ...], ...]) -> tuple[str, ...]: inits = "JFMAMJJASOND" return tuple("".join([inits[i_ - 1] for i_ in t]) for t in asints) def is_sorted_periodic(lst): """Used to verify that seasons provided to SeasonResampler are in order.""" n = len(lst) # Find the wraparound point where the list decreases wrap_point = -1 for i in range(1, n): if lst[i] < lst[i - 1]: wrap_point = i break # If no wraparound point is found, the list is already sorted if wrap_point == -1: return True # Check if both parts around the wrap point are sorted for i in range(1, wrap_point): if lst[i] < lst[i - 1]: return False for i in range(wrap_point + 1, n): if lst[i] < lst[i - 1]: return False # Check wraparound condition return lst[-1] <= lst[0] @dataclass(kw_only=True, frozen=True) class SeasonsGroup: seasons: tuple[str, ...] # tuple[integer months] corresponding to each season inds: tuple[tuple[int, ...], ...] # integer code for each season, this is not simply range(len(seasons)) # when the seasons have overlaps codes: Sequence[int] def find_independent_seasons(seasons: Sequence[str]) -> Sequence[SeasonsGroup]: """ Iterates though a list of seasons e.g. ["DJF", "FMA", ...], and splits that into multiple sequences of non-overlapping seasons. >>> find_independent_seasons( ... ["DJF", "FMA", "AMJ", "JJA", "ASO", "OND"] ... ) # doctest: +NORMALIZE_WHITESPACE [SeasonsGroup(seasons=('DJF', 'AMJ', 'ASO'), inds=((12, 1, 2), (4, 5, 6), (8, 9, 10)), codes=[0, 2, 4]), SeasonsGroup(seasons=('FMA', 'JJA', 'OND'), inds=((2, 3, 4), (6, 7, 8), (10, 11, 12)), codes=[1, 3, 5])] >>> find_independent_seasons(["DJF", "MAM", "JJA", "SON"]) [SeasonsGroup(seasons=('DJF', 'MAM', 'JJA', 'SON'), inds=((12, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11)), codes=[0, 1, 2, 3])] """ season_inds = season_to_month_tuple(seasons) grouped = defaultdict(list) codes = defaultdict(list) seen: set[tuple[int, ...]] = set() # This is quadratic, but the number of seasons is at most 12 for i, current in enumerate(season_inds): # Start with a group if current not in seen: grouped[i].append(current) codes[i].append(i) seen.add(current) # Loop through remaining groups, and look for overlaps for j, second in enumerate(season_inds[i:]): if not (set(chain(*grouped[i])) & set(second)) and second not in seen: grouped[i].append(second) codes[i].append(j + i) seen.add(second) if len(seen) == len(seasons): break # found all non-overlapping groups for this row start over grouped_ints = tuple(tuple(idx) for idx in grouped.values() if idx) return [ SeasonsGroup(seasons=inds_to_season_string(inds), inds=inds, codes=codes) for inds, codes in zip(grouped_ints, codes.values(), strict=False) ] @dataclass class SeasonGrouper(Grouper): """Allows grouping using a custom definition of seasons. Parameters ---------- seasons: sequence of str List of strings representing seasons. E.g. ``"JF"`` or ``"JJA"`` etc. Overlapping seasons are allowed (e.g. ``["DJFM", "MAMJ", "JJAS", "SOND"]``) Examples -------- >>> SeasonGrouper(["JF", "MAM", "JJAS", "OND"]) SeasonGrouper(seasons=['JF', 'MAM', 'JJAS', 'OND']) The ordering is preserved >>> SeasonGrouper(["MAM", "JJAS", "OND", "JF"]) SeasonGrouper(seasons=['MAM', 'JJAS', 'OND', 'JF']) Overlapping seasons are allowed >>> SeasonGrouper(["DJFM", "MAMJ", "JJAS", "SOND"]) SeasonGrouper(seasons=['DJFM', 'MAMJ', 'JJAS', 'SOND']) """ seasons: Sequence[str] # drop_incomplete: bool = field(default=True) # TODO def factorize(self, group: T_Group) -> EncodedGroups: if TYPE_CHECKING: assert not isinstance(group, _DummyGroup) if not _contains_datetime_like_objects(group.variable): raise ValueError( "SeasonGrouper can only be used to group by datetime-like arrays." ) months = group.dt.month.data seasons_groups = find_independent_seasons(self.seasons) codes_ = np.full((len(seasons_groups),) + group.shape, -1, dtype=np.int8) group_indices: list[list[int]] = [[]] * len(self.seasons) for axis_index, seasgroup in enumerate(seasons_groups): for season_tuple, code in zip( seasgroup.inds, seasgroup.codes, strict=False ): mask = np.isin(months, season_tuple) codes_[axis_index, mask] = code (indices,) = mask.nonzero() group_indices[code] = indices.tolist() if np.all(codes_ == -1): raise ValueError( "Failed to group data. Are you grouping by a variable that is all NaN?" ) needs_dummy_dim = len(seasons_groups) > 1 codes = DataArray( dims=(("__season_dim__",) if needs_dummy_dim else tuple()) + group.dims, data=codes_ if needs_dummy_dim else codes_.squeeze(), attrs=group.attrs, name="season", ) unique_coord = Variable("season", self.seasons, attrs=group.attrs) full_index = pd.Index(self.seasons) return EncodedGroups( codes=codes, group_indices=tuple(group_indices), unique_coord=unique_coord, full_index=full_index, ) def reset(self) -> Self: return type(self)(self.seasons) @dataclass class SeasonResampler(Resampler): """Allows grouping using a custom definition of seasons. Parameters ---------- seasons: Sequence[str] An ordered list of seasons. drop_incomplete: bool Whether to drop seasons that are not completely included in the data. For example, if a time series starts in Jan-2001, and seasons includes `"DJF"` then observations from Jan-2001, and Feb-2001 are ignored in the grouping since Dec-2000 isn't present. Examples -------- >>> SeasonResampler(["JF", "MAM", "JJAS", "OND"]) SeasonResampler(seasons=['JF', 'MAM', 'JJAS', 'OND'], drop_incomplete=True) >>> SeasonResampler(["DJFM", "AM", "JJA", "SON"]) SeasonResampler(seasons=['DJFM', 'AM', 'JJA', 'SON'], drop_incomplete=True) """ seasons: Sequence[str] drop_incomplete: bool = field(default=True, kw_only=True) season_inds: Sequence[Sequence[int]] = field(init=False, repr=False) season_tuples: Mapping[str, Sequence[int]] = field(init=False, repr=False) def __post_init__(self): self.season_inds = season_to_month_tuple(self.seasons) all_inds = functools.reduce(operator.add, self.season_inds) if len(all_inds) > len(set(all_inds)): raise ValueError( f"Overlapping seasons are not allowed. Received {self.seasons!r}" ) self.season_tuples = dict(zip(self.seasons, self.season_inds, strict=True)) if not is_sorted_periodic(list(itertools.chain(*self.season_inds))): raise ValueError( "Resampling is only supported with sorted seasons. " f"Provided seasons {self.seasons!r} are not sorted." ) def factorize(self, group: T_Group) -> EncodedGroups: if group.ndim != 1: raise ValueError( "SeasonResampler can only be used to resample by 1D arrays." ) if not isinstance(group, DataArray) or not _contains_datetime_like_objects( group.variable ): raise ValueError( "SeasonResampler can only be used to group by datetime-like DataArrays." ) seasons = self.seasons season_inds = self.season_inds season_tuples = self.season_tuples nstr = max(len(s) for s in seasons) year = group.dt.year.astype(int) month = group.dt.month.astype(int) season_label = np.full(group.shape, "", dtype=f"U{nstr}") # offset years for seasons with December and January for season_str, season_ind in zip(seasons, season_inds, strict=True): season_label[month.isin(season_ind)] = season_str if "DJ" in season_str: after_dec = season_ind[season_str.index("D") + 1 :] # important: this is assuming non-overlapping seasons year[month.isin(after_dec)] -= 1 # Allow users to skip one or more months? # present_seasons is a mask that is True for months that are requested in the output present_seasons = season_label != "" if present_seasons.all(): # avoid copies if we can. present_seasons = slice(None) frame = pd.DataFrame( data={ "index": np.arange(group[present_seasons].size), "month": month[present_seasons], }, index=pd.MultiIndex.from_arrays( [year.data[present_seasons], season_label[present_seasons]], names=["year", "season"], ), ) agged = ( frame["index"] .groupby(["year", "season"], sort=False) .agg(["first", "count"]) ) first_items = agged["first"] counts = agged["count"] index_class: type[CFTimeIndex | pd.DatetimeIndex] if _contains_cftime_datetimes(group.data): index_class = CFTimeIndex datetime_class = type(first_n_items(group.data, 1).item()) else: index_class = pd.DatetimeIndex datetime_class = datetime.datetime # these are the seasons that are present unique_coord = index_class( [ datetime_class(year=year, month=season_tuples[season][0], day=1) for year, season in first_items.index ] ) # This sorted call is a hack. It's hard to figure out how # to start the iteration for arbitrary season ordering # for example "DJF" as first entry or last entry # So we construct the largest possible index and slice it to the # range present in the data. complete_index = index_class( sorted( [ datetime_class(year=y, month=m, day=1) for y, m in itertools.product( range(year[0].item(), year[-1].item() + 1), [s[0] for s in season_inds], ) ] ) ) # all years and seasons def get_label(year, season): month, *_ = season_tuples[season] return f"{year}-{month:02d}-01" unique_codes = np.arange(len(unique_coord)) valid_season_mask = season_label != "" first_valid_season, last_valid_season = season_label[valid_season_mask][[0, -1]] first_year, last_year = year.data[[0, -1]] if self.drop_incomplete: if month.data[valid_season_mask][0] != season_tuples[first_valid_season][0]: if "DJ" in first_valid_season: first_year += 1 first_valid_season = seasons[ (seasons.index(first_valid_season) + 1) % len(seasons) ] unique_codes -= 1 if ( month.data[valid_season_mask][-1] != season_tuples[last_valid_season][-1] ): last_valid_season = seasons[seasons.index(last_valid_season) - 1] if "DJ" in last_valid_season: last_year -= 1 unique_codes[-1] = -1 first_label = get_label(first_year, first_valid_season) last_label = get_label(last_year, last_valid_season) slicer = complete_index.slice_indexer(first_label, last_label) full_index = complete_index[slicer] final_codes = np.full(group.data.size, -1) final_codes[present_seasons] = np.repeat(unique_codes, counts) codes = group.copy(data=final_codes, deep=False) return EncodedGroups(codes=codes, full_index=full_index) def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]: """ Compute chunk sizes for this season resampler. This method is used during chunking operations to determine appropriate chunk sizes for the given variable when using this resampler. Parameters ---------- name : Hashable The name of the dimension being chunked. variable : Variable The variable being chunked. Returns ------- tuple[int, ...] A tuple of chunk sizes for the dimension. """ if not _contains_datetime_like_objects(variable): raise ValueError( f"Computing chunks with {type(self)!r} only supported for datetime variables. " f"Received variable with dtype {variable.dtype!r} instead." ) if len("".join(self.seasons)) != 12: raise ValueError( "Cannot rechunk with a SeasonResampler that does not cover all 12 months. " f"Received `seasons={self.seasons!r}`." ) # Create a temporary resampler that ignores drop_incomplete for chunking # This prevents data from being silently dropped during chunking resampler_for_chunking = type(self)(seasons=self.seasons, drop_incomplete=False) chunks = ( DataArray( np.ones(variable.shape, dtype=int), dims=(dim,), coords={dim: variable}, ) .resample({dim: resampler_for_chunking}) .sum() ) # When bins (binning) or time periods are missing (resampling) # we can end up with NaNs. Drop them. if chunks.dtype.kind == "f": chunks = chunks.dropna(dim).astype(int) chunks_tuple: tuple[int, ...] = tuple(chunks.data.tolist()) return chunks_tuple def reset(self) -> Self: return type(self)(seasons=self.seasons, drop_incomplete=self.drop_incomplete) xarray-2025.12.0/xarray/indexes/000077500000000000000000000000001511464676000163105ustar00rootroot00000000000000xarray-2025.12.0/xarray/indexes/__init__.py000066400000000000000000000011141511464676000204160ustar00rootroot00000000000000"""Xarray index objects for label-based selection and alignment of Dataset / DataArray objects. """ from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.indexes import ( CoordinateTransformIndex, Index, PandasIndex, PandasMultiIndex, ) from xarray.indexes.nd_point_index import NDPointIndex, TreeAdapter from xarray.indexes.range_index import RangeIndex __all__ = [ "CoordinateTransform", "CoordinateTransformIndex", "Index", "NDPointIndex", "PandasIndex", "PandasMultiIndex", "RangeIndex", "TreeAdapter", ] xarray-2025.12.0/xarray/indexes/nd_point_index.py000066400000000000000000000320771511464676000216740ustar00rootroot00000000000000from __future__ import annotations import abc from collections.abc import Hashable, Iterable, Mapping from typing import TYPE_CHECKING, Any, Generic, TypeVar import numpy as np from xarray.core.dataarray import DataArray from xarray.core.indexes import Index from xarray.core.indexing import IndexSelResult from xarray.core.utils import is_scalar from xarray.core.variable import Variable from xarray.structure.alignment import broadcast if TYPE_CHECKING: from scipy.spatial import KDTree from xarray.core.types import Self class TreeAdapter(abc.ABC): """Lightweight adapter abstract class for plugging in 3rd-party structures like :py:class:`scipy.spatial.KDTree` or :py:class:`sklearn.neighbors.KDTree` into :py:class:`~xarray.indexes.NDPointIndex`. """ @abc.abstractmethod def __init__(self, points: np.ndarray, *, options: Mapping[str, Any]): """ Parameters ---------- points : ndarray of shape (n_points, n_coordinates) Two-dimensional array of points/samples (rows) and their corresponding coordinate labels (columns) to index. """ ... @abc.abstractmethod def query(self, points: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """Query points. Parameters ---------- points: ndarray of shape (n_points, n_coordinates) Two-dimensional array of points/samples (rows) and their corresponding coordinate labels (columns) to query. Returns ------- distances : ndarray of shape (n_points) Distances to the nearest neighbors. indices : ndarray of shape (n_points) Indices of the nearest neighbors in the array of the indexed points. """ ... def equals(self, other: Self) -> bool: """Check equality with another TreeAdapter of the same kind. Parameters ---------- other : The other TreeAdapter object to compare with this object. """ raise NotImplementedError class ScipyKDTreeAdapter(TreeAdapter): """:py:class:`scipy.spatial.KDTree` adapter for :py:class:`~xarray.indexes.NDPointIndex`.""" _kdtree: KDTree def __init__(self, points: np.ndarray, options: Mapping[str, Any]): from scipy.spatial import KDTree self._kdtree = KDTree(points, **options) def query(self, points: np.ndarray) -> tuple[np.ndarray, np.ndarray]: return self._kdtree.query(points) # type: ignore[return-value,unused-ignore] def equals(self, other: Self) -> bool: return np.array_equal(self._kdtree.data, other._kdtree.data) def get_points(coords: Iterable[Variable | Any]) -> np.ndarray: """Re-arrange data from a sequence of xarray coordinate variables or labels into a 2-d array of shape (n_points, n_coordinates). """ data = [c.values if isinstance(c, Variable | DataArray) else c for c in coords] return np.stack([np.ravel(d) for d in data]).T T_TreeAdapter = TypeVar("T_TreeAdapter", bound=TreeAdapter) class NDPointIndex(Index, Generic[T_TreeAdapter]): """Xarray index for irregular, n-dimensional data. This index may be associated with a set of coordinate variables representing the arbitrary location of data points in an n-dimensional space. All coordinates must have the same shape and dimensions. The number of associated coordinate variables must correspond to the number of dimensions of the space. This index supports label-based selection (nearest neighbor lookup). It also has limited support for alignment. By default, this index relies on :py:class:`scipy.spatial.KDTree` for fast lookup. Do not use :py:meth:`~xarray.indexes.NDPointIndex.__init__` directly. Instead use :py:meth:`xarray.Dataset.set_xindex` or :py:meth:`xarray.DataArray.set_xindex` to create and set the index from existing coordinates (see the example below). Examples -------- An example using a dataset with 2-dimensional coordinates. >>> xx = [[1.0, 2.0], [3.0, 0.0]] >>> yy = [[11.0, 21.0], [29.0, 9.0]] >>> ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}) >>> ds Size: 64B Dimensions: (y: 2, x: 2) Coordinates: xx (y, x) float64 32B 1.0 2.0 3.0 0.0 yy (y, x) float64 32B 11.0 21.0 29.0 9.0 Dimensions without coordinates: y, x Data variables: *empty* Creation of an NDPointIndex from the "xx" and "yy" coordinate variables: >>> ds = ds.set_xindex(("xx", "yy"), xr.indexes.NDPointIndex) >>> ds Size: 64B Dimensions: (y: 2, x: 2) Coordinates: * xx (y, x) float64 32B 1.0 2.0 3.0 0.0 * yy (y, x) float64 32B 11.0 21.0 29.0 9.0 Dimensions without coordinates: y, x Data variables: *empty* Indexes: โ”Œ xx NDPointIndex (ScipyKDTreeAdapter) โ”” yy Point-wise (nearest-neighbor) data selection using Xarray's advanced indexing, i.e., using arbitrary dimension(s) for the Variable objects passed as labels: >>> ds.sel( ... xx=xr.Variable("points", [1.9, 0.1]), ... yy=xr.Variable("points", [13.0, 8.0]), ... method="nearest", ... ) Size: 32B Dimensions: (points: 2) Coordinates: xx (points) float64 16B 1.0 0.0 yy (points) float64 16B 11.0 9.0 Dimensions without coordinates: points Data variables: *empty* Data selection with scalar labels: >>> ds.sel(xx=1.9, yy=13.0, method="nearest") Size: 16B Dimensions: () Coordinates: xx float64 8B 1.0 yy float64 8B 11.0 Data variables: *empty* Data selection with broadcasting the input labels: >>> ds.sel(xx=1.9, yy=xr.Variable("points", [13.0, 8.0]), method="nearest") Size: 32B Dimensions: (points: 2) Coordinates: xx (points) float64 16B 1.0 0.0 yy (points) float64 16B 11.0 9.0 Dimensions without coordinates: points Data variables: *empty* >>> da = xr.DataArray( ... [[45.1, 53.3], [65.4, 78.2]], ... coords={"u": [1.9, 0.1], "v": [13.0, 8.0]}, ... dims=("u", "v"), ... ) >>> ds.sel(xx=da.u, yy=da.v, method="nearest") Size: 64B Dimensions: (u: 2, v: 2) Coordinates: xx (u, v) float64 32B 1.0 0.0 1.0 0.0 yy (u, v) float64 32B 11.0 9.0 11.0 9.0 Dimensions without coordinates: u, v Data variables: *empty* Data selection with array-like labels (implicit dimensions): >>> ds.sel(xx=[[1.9], [0.1]], yy=[[13.0], [8.0]], method="nearest") Size: 32B Dimensions: (y: 2, x: 1) Coordinates: xx (y, x) float64 16B 1.0 0.0 yy (y, x) float64 16B 11.0 9.0 Dimensions without coordinates: y, x Data variables: *empty* """ _tree_obj: T_TreeAdapter _coord_names: tuple[Hashable, ...] _dims: tuple[Hashable, ...] _shape: tuple[int, ...] def __init__( self, tree_obj: T_TreeAdapter, *, coord_names: tuple[Hashable, ...], dims: tuple[Hashable, ...], shape: tuple[int, ...], ): # this constructor is "private" assert isinstance(tree_obj, TreeAdapter) self._tree_obj = tree_obj assert len(coord_names) == len(dims) == len(shape) self._coord_names = coord_names self._dims = dims self._shape = shape @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> Self: if len({var.dims for var in variables.values()}) > 1: var_names = ",".join(vn for vn in variables) raise ValueError( f"variables {var_names} must all have the same dimensions and the same shape" ) var0 = next(iter(variables.values())) if len(variables) != len(var0.dims): raise ValueError( f"the number of variables {len(variables)} doesn't match " f"the number of dimensions {len(var0.dims)}" ) opts = dict(options) tree_adapter_cls: type[T_TreeAdapter] = opts.pop("tree_adapter_cls", None) if tree_adapter_cls is None: tree_adapter_cls = ScipyKDTreeAdapter points = get_points(variables.values()) return cls( tree_adapter_cls(points, options=opts), coord_names=tuple(variables), dims=var0.dims, shape=var0.shape, ) def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> dict[Any, Variable]: if variables is not None: for var in variables.values(): # maybe re-sync variable dimensions with the index object # returned by NDPointIndex.rename() if var.dims != self._dims: var.dims = self._dims return dict(**variables) else: return {} def equals( self, other: Index, *, exclude: frozenset[Hashable] | None = None ) -> bool: if not isinstance(other, NDPointIndex): return False if type(self._tree_obj) is not type(other._tree_obj): return False return self._tree_obj.equals(other._tree_obj) def _get_dim_indexers( self, indices: np.ndarray, label_dims: tuple[Hashable, ...], label_shape: tuple[int, ...], ) -> dict[Hashable, Variable]: """Returns dimension indexers based on the query results (indices) and the original label dimensions and shape. 1. Unravel the flat indices returned from the query 2. Reshape the unraveled indices according to indexers shapes 3. Wrap the indices in xarray.Variable objects. """ dim_indexers = {} u_indices = list(np.unravel_index(indices.ravel(), self._shape)) for dim, ind in zip(self._dims, u_indices, strict=False): dim_indexers[dim] = Variable(label_dims, ind.reshape(label_shape)) return dim_indexers def sel( self, labels: dict[Any, Any], method=None, tolerance=None ) -> IndexSelResult: if method != "nearest": raise ValueError( "NDPointIndex only supports selection with method='nearest'" ) missing_labels = set(self._coord_names) - set(labels) if missing_labels: missing_labels_str = ",".join([f"{name}" for name in missing_labels]) raise ValueError(f"missing labels for coordinate(s): {missing_labels_str}.") # maybe convert labels into xarray DataArray objects xr_labels: dict[Any, DataArray] = {} for name, lbl in labels.items(): if isinstance(lbl, DataArray): xr_labels[name] = lbl elif isinstance(lbl, Variable): xr_labels[name] = DataArray(lbl) elif is_scalar(lbl): xr_labels[name] = DataArray(lbl, dims=()) elif np.asarray(lbl).ndim == len(self._dims): xr_labels[name] = DataArray(lbl, dims=self._dims) else: raise ValueError( "invalid label value. NDPointIndex only supports advanced (point-wise) indexing " "with the following label value kinds:\n" "- xarray.DataArray or xarray.Variable objects\n" "- scalar values\n" "- unlabelled array-like objects with the same number of dimensions " f"than the {self._coord_names} coordinate variables ({len(self._dims)})" ) # broadcast xarray labels against one another and determine labels shape and dimensions broadcasted = broadcast(*xr_labels.values()) label_dims = broadcasted[0].dims label_shape = broadcasted[0].shape xr_labels = dict(zip(xr_labels, broadcasted, strict=True)) # get and return dimension indexers points = get_points(xr_labels[name] for name in self._coord_names) _, indices = self._tree_obj.query(points) dim_indexers = self._get_dim_indexers(indices, label_dims, label_shape) return IndexSelResult(dim_indexers=dim_indexers) def rename( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable], ) -> Self: if not set(self._coord_names) & set(name_dict) and not set(self._dims) & set( dims_dict ): return self new_coord_names = tuple(name_dict.get(n, n) for n in self._coord_names) new_dims = tuple(dims_dict.get(d, d) for d in self._dims) return type(self)( self._tree_obj, coord_names=new_coord_names, dims=new_dims, shape=self._shape, ) def _repr_inline_(self, max_width: int) -> str: tree_obj_type = self._tree_obj.__class__.__name__ return f"{self.__class__.__name__} ({tree_obj_type})" xarray-2025.12.0/xarray/indexes/range_index.py000066400000000000000000000325661511464676000211610ustar00rootroot00000000000000import math from collections.abc import Hashable, Mapping from typing import Any import numpy as np import pandas as pd from xarray.core import duck_array_ops from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.dataarray import DataArray from xarray.core.indexes import CoordinateTransformIndex, Index, PandasIndex from xarray.core.indexing import IndexSelResult from xarray.core.variable import Variable class RangeCoordinateTransform(CoordinateTransform): """1-dimensional coordinate transform representing a simple bounded interval with evenly spaced, floating-point values. """ start: float stop: float _step: float | None __slots__ = ("_step", "start", "stop") def __init__( self, start: float, stop: float, size: int, coord_name: Hashable, dim: str, dtype: Any = None, ): if dtype is None: dtype = np.dtype(np.float64) super().__init__([coord_name], {dim: size}, dtype=dtype) self.start = start self.stop = stop self._step = None # Will be calculated by property @property def coord_name(self) -> Hashable: return self.coord_names[0] @property def dim(self) -> str: return self.dims[0] @property def size(self) -> int: return self.dim_size[self.dim] @property def step(self) -> float: if self._step is not None: return self._step if self.size > 0: return (self.stop - self.start) / self.size else: # For empty arrays, default to 1.0 return 1.0 def forward(self, dim_positions: dict[str, Any]) -> dict[Hashable, Any]: positions = dim_positions[self.dim] labels = self.start + positions * self.step return {self.coord_name: labels} def reverse(self, coord_labels: dict[Hashable, Any]) -> dict[str, Any]: labels = coord_labels[self.coord_name] positions = (labels - self.start) / self.step return {self.dim: positions} def equals( self, other: CoordinateTransform, exclude: frozenset[Hashable] | None = None ) -> bool: if not isinstance(other, RangeCoordinateTransform): return False return ( self.start == other.start and self.stop == other.stop and self.size == other.size ) def slice(self, sl: slice) -> "RangeCoordinateTransform": new_range = range(self.size)[sl] new_size = len(new_range) new_start = self.start + new_range.start * self.step new_stop = self.start + new_range.stop * self.step result = type(self)( new_start, new_stop, new_size, self.coord_name, self.dim, dtype=self.dtype, ) if new_size == 0: # For empty slices, preserve step from parent result._step = self.step return result class RangeIndex(CoordinateTransformIndex): """Xarray index implementing a simple bounded 1-dimension interval with evenly spaced, monotonic floating-point values. This index is memory-saving, i.e., the values of its associated coordinate variable are not materialized in memory. Do not use :py:meth:`~xarray.indexes.RangeIndex.__init__` directly. Instead use :py:meth:`~xarray.indexes.RangeIndex.arange` or :py:meth:`~xarray.indexes.RangeIndex.linspace`, which are similar to :py:func:`numpy.arange` and :py:func:`numpy.linspace`. In the case of a monotonic integer range, it is better using a :py:class:`~xarray.indexes.PandasIndex` that wraps a :py:class:`pandas.RangeIndex`. """ transform: RangeCoordinateTransform def __init__(self, transform: RangeCoordinateTransform): super().__init__(transform) @classmethod def arange( cls, start: float | None = None, stop: float | None = None, step: float | None = None, *, coord_name: Hashable | None = None, dim: str, dtype: Any = None, ) -> "RangeIndex": """Create a new RangeIndex from given start, stop and step values. ``RangeIndex.arange`` can be called with a varying number of positional arguments: - ``RangeIndex.arange(stop)``: the index is within the half-open interval [0, stop) (in other words, the interval including start but excluding stop). - ``RangeIndex.arange(start, stop)``: the index is within the half-open interval [start, stop). - ``RangeIndex.arange(start, stop, step)``: the index is within the half-open interval [start, stop), with spacing between values given by step. .. note:: When using a non-integer step, such as 0.1, it is often better to use :py:meth:`~xarray.indexes.RangeIndex.linspace`. .. note:: ``RangeIndex.arange(start=4.0)`` returns a range index in the [0.0, 4.0) interval, i.e., ``start`` is interpreted as ``stop`` even when it is given as a unique keyword argument. Parameters ---------- start : float, optional Start of interval. The interval includes this value. The default start value is 0. If ``stop`` is not given, the value given here is interpreted as the end of the interval. stop : float End of interval. In general the interval does not include this value, except floating point round-off affects the size of the dimension. step : float, optional Spacing between values (default: 1.0). coord_name : Hashable, optional Name of the (lazy) coordinate variable that will be created and associated with the new index. If ``None``, the coordinate is named as the dimension name. dim : str Dimension name. dtype : dtype, optional The dtype of the coordinate variable (default: float64). Examples -------- >>> from xarray.indexes import RangeIndex >>> index = RangeIndex.arange(0.0, 1.0, 0.2, dim="x") >>> ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index)) >>> ds Size: 40B Dimensions: (x: 5) Coordinates: * x (x) float64 40B 0.0 0.2 0.4 0.6 0.8 Data variables: *empty* Indexes: x RangeIndex (start=0, stop=1, step=0.2) """ if stop is None: if start is None: raise TypeError("RangeIndex.arange() requires stop to be specified") else: stop = start start = None if start is None: start = 0.0 if step is None: step = 1.0 if coord_name is None: coord_name = dim size = math.ceil((stop - start) / step) transform = RangeCoordinateTransform( start, stop, size, coord_name, dim, dtype=dtype ) return cls(transform) @classmethod def linspace( cls, start: float, stop: float, num: int = 50, endpoint: bool = True, *, coord_name: Hashable | None = None, dim: str, dtype: Any = None, ) -> "RangeIndex": """Create a new RangeIndex from given start / stop values and number of values. Parameters ---------- start : float Start of interval. The interval includes this value. stop : float, optional End of interval. The interval includes this value if ``endpoint=True``. num : float, optional Number of values in the interval, i.e., dimension size (default: 50). endpoint : bool, optional If True (default), the ``stop`` value is included in the interval. coord_name : Hashable, optional Name of the (lazy) coordinate variable that will be created and associated with the new index. If ``None``, the coordinate is named as the dimension name. dim : str Dimension name. dtype : dtype, optional The dtype of the coordinate variable (default: float64). Examples -------- >>> from xarray.indexes import RangeIndex >>> index = RangeIndex.linspace(0.0, 1.0, 5, dim="x") >>> ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index)) >>> ds Size: 40B Dimensions: (x: 5) Coordinates: * x (x) float64 40B 0.0 0.25 0.5 0.75 1.0 Data variables: *empty* Indexes: x RangeIndex (start=0, stop=1.25, step=0.25) """ if coord_name is None: coord_name = dim if endpoint: stop += (stop - start) / (num - 1) transform = RangeCoordinateTransform( start, stop, num, coord_name, dim, dtype=dtype ) return cls(transform) @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> "RangeIndex": raise NotImplementedError( "cannot create a new RangeIndex from an existing coordinate. Use instead " "either `RangeIndex.arange()` or `RangeIndex.linspace()` together with " "`Coordinates.from_xindex()`" ) @property def start(self) -> float: """Returns the start of the interval (the interval includes this value).""" return self.transform.start @property def stop(self) -> float: """Returns the end of the interval (the interval does not include this value).""" return self.transform.stop @property def step(self) -> float: """Returns the spacing between values.""" return self.transform.step @property def coord_name(self) -> Hashable: return self.transform.coord_names[0] @property def dim(self) -> str: return self.transform.dims[0] @property def size(self) -> int: return self.transform.dim_size[self.dim] def isel( self, indexers: Mapping[Any, int | slice | np.ndarray | Variable] ) -> Index | None: idxer = indexers[self.dim] if isinstance(idxer, slice): return RangeIndex(self.transform.slice(idxer)) elif (isinstance(idxer, Variable) and idxer.ndim > 1) or duck_array_ops.ndim( idxer ) == 0: return None else: values = self.transform.forward({self.dim: np.asarray(idxer)})[ self.coord_name ] if isinstance(idxer, Variable): new_dim = idxer.dims[0] else: new_dim = self.dim pd_index = pd.Index(values, name=self.coord_name) return PandasIndex(pd_index, new_dim, coord_dtype=values.dtype) def sel( self, labels: dict[Any, Any], method=None, tolerance=None ) -> IndexSelResult: label = labels[self.dim] if method != "nearest": raise ValueError("RangeIndex only supports selection with method='nearest'") # TODO: for RangeIndex it might not be too hard to support tolerance if tolerance is not None: raise ValueError( "RangeIndex doesn't support selection with a given tolerance value yet" ) if isinstance(label, slice): if label.step is None: # continuous interval slice indexing (preserves the index) positions = self.transform.reverse( {self.coord_name: np.array([label.start, label.stop])} ) pos = np.round(positions[self.dim]).astype("int") new_start = max(pos[0], 0) new_stop = min(pos[1], self.size) return IndexSelResult({self.dim: slice(new_start, new_stop)}) else: # otherwise convert to basic (array) indexing label = np.arange(label.start, label.stop, label.step) # support basic indexing (in the 1D case basic vs. vectorized indexing # are pretty much similar) unwrap_xr = False if not isinstance(label, Variable | DataArray): # basic indexing -> either scalar or 1-d array try: var = Variable("_", label) except ValueError: var = Variable((), label) labels = {self.dim: var} unwrap_xr = True result = super().sel(labels, method=method, tolerance=tolerance) if unwrap_xr: dim_indexers = {self.dim: result.dim_indexers[self.dim].values} result = IndexSelResult(dim_indexers) return result def to_pandas_index(self) -> pd.Index: values = self.transform.generate_coords() return pd.Index(values[self.dim]) def _repr_inline_(self, max_width) -> str: params_fmt = ( f"start={self.start:.3g}, stop={self.stop:.3g}, step={self.step:.3g}" ) return f"{self.__class__.__name__} ({params_fmt})" def __repr__(self) -> str: params_fmt = ( f"start={self.start:.3g}, stop={self.stop:.3g}, step={self.step:.3g}, " f"size={self.size}, coord_name={self.coord_name!r}, dim={self.dim!r}" ) return f"{self.__class__.__name__} ({params_fmt})" xarray-2025.12.0/xarray/namedarray/000077500000000000000000000000001511464676000167745ustar00rootroot00000000000000xarray-2025.12.0/xarray/namedarray/__init__.py000066400000000000000000000000001511464676000210730ustar00rootroot00000000000000xarray-2025.12.0/xarray/namedarray/_aggregations.py000066400000000000000000000727731511464676000221770ustar00rootroot00000000000000"""Mixin classes with reduction operations.""" # This file was generated using xarray.util.generate_aggregations. Do not edit manually. from __future__ import annotations from collections.abc import Callable, Sequence from typing import Any from xarray.core import duck_array_ops from xarray.core.types import Dims, Self class NamedArrayAggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError() def count( self, dim: Dims = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count DataArray.count :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.count() Size: 8B array(5) """ return self.reduce( duck_array_ops.count, dim=dim, **kwargs, ) def all( self, dim: Dims = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all DataArray.all :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray( ... "x", np.array([True, True, True, True, True, False], dtype=bool) ... ) >>> na Size: 6B array([ True, True, True, True, True, False]) >>> na.all() Size: 1B array(False) """ return self.reduce( duck_array_ops.array_all, dim=dim, **kwargs, ) def any( self, dim: Dims = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any DataArray.any :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray( ... "x", np.array([True, True, True, True, True, False], dtype=bool) ... ) >>> na Size: 6B array([ True, True, True, True, True, False]) >>> na.any() Size: 1B array(True) """ return self.reduce( duck_array_ops.array_any, dim=dim, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max DataArray.max :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.max() Size: 8B array(3.) Use ``skipna`` to control whether NaNs are ignored. >>> na.max(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min DataArray.min :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.min() Size: 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> na.min(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean DataArray.mean :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.mean() Size: 8B array(1.6) Use ``skipna`` to control whether NaNs are ignored. >>> na.mean(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod DataArray.prod :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.prod() Size: 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> na.prod(skipna=False) Size: 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> na.prod(skipna=True, min_count=2) Size: 8B array(0.) """ return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum DataArray.sum :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.sum() Size: 8B array(8.) Use ``skipna`` to control whether NaNs are ignored. >>> na.sum(skipna=False) Size: 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> na.sum(skipna=True, min_count=2) Size: 8B array(8.) """ return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std DataArray.std :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.std() Size: 8B array(1.0198039) Use ``skipna`` to control whether NaNs are ignored. >>> na.std(skipna=False) Size: 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> na.std(skipna=True, ddof=1) Size: 8B array(1.14017543) """ return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 โ€œDelta Degrees of Freedomโ€: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var DataArray.var :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.var() Size: 8B array(1.04) Use ``skipna`` to control whether NaNs are ignored. >>> na.var(skipna=False) Size: 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> na.var(skipna=True, ddof=1) Size: 8B array(1.3) """ return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median DataArray.median :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.median() Size: 8B array(2.) Use ``skipna`` to control whether NaNs are ignored. >>> na.median(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum DataArray.cumsum NamedArray.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.cumsum() Size: 48B array([1., 3., 6., 6., 8., 8.]) Use ``skipna`` to control whether NaNs are ignored. >>> na.cumsum(skipna=False) Size: 48B array([ 1., 3., 6., 6., 8., nan]) """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod DataArray.cumprod NamedArray.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.cumprod() Size: 48B array([1., 2., 6., 0., 0., 0.]) Use ``skipna`` to control whether NaNs are ignored. >>> na.cumprod(skipna=False) Size: 48B array([ 1., 2., 6., 0., 0., nan]) """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, **kwargs, ) xarray-2025.12.0/xarray/namedarray/_array_api.py000066400000000000000000000135231511464676000214600ustar00rootroot00000000000000from __future__ import annotations from types import ModuleType from typing import Any import numpy as np from xarray.namedarray._typing import ( Default, _arrayapi, _Axes, _Axis, _default, _Dim, _DType, _ScalarType, _ShapeType, _SupportsImag, _SupportsReal, ) from xarray.namedarray.core import NamedArray def _get_data_namespace(x: NamedArray[Any, Any]) -> ModuleType: if isinstance(x._data, _arrayapi): return x._data.__array_namespace__() return np # %% Creation Functions def astype( x: NamedArray[_ShapeType, Any], dtype: _DType, /, *, copy: bool = True ) -> NamedArray[_ShapeType, _DType]: """ Copies an array to a specified data type irrespective of Type Promotion Rules rules. Parameters ---------- x : NamedArray Array to cast. dtype : _DType Desired data type. copy : bool, optional Specifies whether to copy an array when the specified dtype matches the data type of the input array x. If True, a newly allocated array must always be returned. If False and the specified dtype matches the data type of the input array, the input array must be returned; otherwise, a newly allocated array must be returned. Default: True. Returns ------- out : NamedArray An array having the specified data type. The returned array must have the same shape as x. Examples -------- >>> narr = NamedArray(("x",), np.asarray([1.5, 2.5])) >>> narr Size: 16B array([1.5, 2.5]) >>> astype(narr, np.dtype(np.int32)) Size: 8B array([1, 2], dtype=int32) """ if isinstance(x._data, _arrayapi): xp = x._data.__array_namespace__() return x._new(data=xp.astype(x._data, dtype, copy=copy)) # np.astype doesn't exist yet: return x._new(data=x._data.astype(dtype, copy=copy)) # type: ignore[attr-defined] # %% Elementwise Functions def imag( x: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] /, ) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]: """ Returns the imaginary component of a complex number for each element x_i of the input array x. Parameters ---------- x : NamedArray Input array. Should have a complex floating-point data type. Returns ------- out : NamedArray An array containing the element-wise results. The returned array must have a floating-point data type with the same floating-point precision as x (e.g., if x is complex64, the returned array must have the floating-point data type float32). Examples -------- >>> narr = NamedArray(("x",), np.asarray([1.0 + 2j, 2 + 4j])) >>> imag(narr) Size: 16B array([2., 4.]) """ xp = _get_data_namespace(x) out = x._new(data=xp.imag(x._data)) return out def real( x: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] /, ) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]: """ Returns the real component of a complex number for each element x_i of the input array x. Parameters ---------- x : NamedArray Input array. Should have a complex floating-point data type. Returns ------- out : NamedArray An array containing the element-wise results. The returned array must have a floating-point data type with the same floating-point precision as x (e.g., if x is complex64, the returned array must have the floating-point data type float32). Examples -------- >>> narr = NamedArray(("x",), np.asarray([1.0 + 2j, 2 + 4j])) >>> real(narr) Size: 16B array([1., 2.]) """ xp = _get_data_namespace(x) out = x._new(data=xp.real(x._data)) return out # %% Manipulation functions def expand_dims( x: NamedArray[Any, _DType], /, *, dim: _Dim | Default = _default, axis: _Axis = 0, ) -> NamedArray[Any, _DType]: """ Expands the shape of an array by inserting a new dimension of size one at the position specified by dims. Parameters ---------- x : Array to expand. dim : Dimension name. New dimension will be stored in the axis position. axis : (Not recommended) Axis position (zero-based). Default is 0. Returns ------- out : An expanded output array having the same data type as x. Examples -------- >>> x = NamedArray(("x", "y"), np.asarray([[1.0, 2.0], [3.0, 4.0]])) >>> expand_dims(x) Size: 32B array([[[1., 2.], [3., 4.]]]) >>> expand_dims(x, dim="z") Size: 32B array([[[1., 2.], [3., 4.]]]) """ xp = _get_data_namespace(x) dims = x.dims if dim is _default: dim = f"dim_{len(dims)}" d = list(dims) d.insert(axis, dim) out = x._new(dims=tuple(d), data=xp.expand_dims(x._data, axis=axis)) return out def permute_dims(x: NamedArray[Any, _DType], axes: _Axes) -> NamedArray[Any, _DType]: """ Permutes the dimensions of an array. Parameters ---------- x : Array to permute. axes : Permutation of the dimensions of x. Returns ------- out : An array with permuted dimensions. The returned array must have the same data type as x. """ dims = x.dims new_dims = tuple(dims[i] for i in axes) if isinstance(x._data, _arrayapi): xp = _get_data_namespace(x) out = x._new(dims=new_dims, data=xp.permute_dims(x._data, axes)) else: out = x._new(dims=new_dims, data=x._data.transpose(axes)) # type: ignore[attr-defined] return out xarray-2025.12.0/xarray/namedarray/_typing.py000066400000000000000000000176131511464676000210270ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from enum import Enum from types import EllipsisType, ModuleType from typing import ( TYPE_CHECKING, Any, Final, Literal, Protocol, SupportsIndex, TypeVar, Union, overload, runtime_checkable, ) import numpy as np try: from typing import TypeAlias except ImportError: if TYPE_CHECKING: raise else: Self: Any = None # Singleton type, as per https://github.com/python/typing/pull/240 class Default(Enum): token: Final = 0 _default = Default.token # https://stackoverflow.com/questions/74633074/how-to-type-hint-a-generic-numpy-array _T_co = TypeVar("_T_co", covariant=True) _dtype = np.dtype _DType = TypeVar("_DType", bound=np.dtype[Any]) _DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any]) # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` _ScalarType = TypeVar("_ScalarType", bound=np.generic) _ScalarType_co = TypeVar("_ScalarType_co", bound=np.generic, covariant=True) # A protocol for anything with the dtype attribute @runtime_checkable class _SupportsDType(Protocol[_DType_co]): @property def dtype(self) -> _DType_co: ... _DTypeLike = Union[ np.dtype[_ScalarType], type[_ScalarType], _SupportsDType[np.dtype[_ScalarType]], ] # For unknown shapes Dask uses np.nan, array_api uses None: _IntOrUnknown = int _Shape = tuple[_IntOrUnknown, ...] _ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] _ShapeType = TypeVar("_ShapeType", bound=Any) _ShapeType_co = TypeVar("_ShapeType_co", bound=Any, covariant=True) _Axis = int _Axes = tuple[_Axis, ...] _AxisLike = Union[_Axis, _Axes] _Chunks = tuple[_Shape, ...] _NormalizedChunks = tuple[tuple[int, ...], ...] # FYI in some cases we don't allow `None`, which this doesn't take account of. # # FYI the `str` is for a size string, e.g. "16MB", supported by dask. T_ChunkDim: TypeAlias = str | int | Literal["auto"] | tuple[int, ...] | None # noqa: PYI051 # We allow the tuple form of this (though arguably we could transition to named dims only) T_Chunks: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDim] _Dim = Hashable _Dims = tuple[_Dim, ...] _DimsLike = Union[str, Iterable[_Dim]] # https://data-apis.org/array-api/latest/API_specification/indexing.html # TODO: np.array_api was bugged and didn't allow (None,), but should! # https://github.com/numpy/numpy/pull/25022 # https://github.com/data-apis/array-api/pull/674 _IndexKey = Union[int, slice, EllipsisType] _IndexKeys = tuple[_IndexKey, ...] # tuple[Union[_IndexKey, None], ...] _IndexKeyLike = Union[_IndexKey, _IndexKeys] _AttrsLike = Union[Mapping[Any, Any], None] class _SupportsReal(Protocol[_T_co]): @property def real(self) -> _T_co: ... class _SupportsImag(Protocol[_T_co]): @property def imag(self) -> _T_co: ... @runtime_checkable class _array(Protocol[_ShapeType_co, _DType_co]): """ Minimal duck array named array uses. Corresponds to np.ndarray. """ @property def shape(self) -> _Shape: ... @property def dtype(self) -> _DType_co: ... @runtime_checkable class _arrayfunction( _array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Duck array supporting NEP 18. Corresponds to np.ndarray. """ @overload def __getitem__( self, key: _arrayfunction[Any, Any] | tuple[_arrayfunction[Any, Any], ...], / ) -> _arrayfunction[Any, _DType_co]: ... @overload def __getitem__(self, key: _IndexKeyLike, /) -> Any: ... def __getitem__( self, key: ( _IndexKeyLike | _arrayfunction[Any, Any] | tuple[_arrayfunction[Any, Any], ...] ), /, ) -> _arrayfunction[Any, _DType_co] | Any: ... @overload def __array__( self, dtype: None = ..., /, *, copy: bool | None = ... ) -> np.ndarray[Any, _DType_co]: ... @overload def __array__( self, dtype: _DType, /, *, copy: bool | None = ... ) -> np.ndarray[Any, _DType]: ... def __array__( self, dtype: _DType | None = ..., /, *, copy: bool | None = ... ) -> np.ndarray[Any, _DType] | np.ndarray[Any, _DType_co]: ... # TODO: Should return the same subclass but with a new dtype generic. # https://github.com/python/typing/issues/548 def __array_ufunc__( self, ufunc: Any, method: Any, *inputs: Any, **kwargs: Any, ) -> Any: ... # TODO: Should return the same subclass but with a new dtype generic. # https://github.com/python/typing/issues/548 def __array_function__( self, func: Callable[..., Any], types: Iterable[type], args: Iterable[Any], kwargs: Mapping[str, Any], ) -> Any: ... @property def imag(self) -> _arrayfunction[_ShapeType_co, Any]: ... @property def real(self) -> _arrayfunction[_ShapeType_co, Any]: ... @runtime_checkable class _arrayapi(_array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co]): """ Duck array supporting NEP 47. Corresponds to np.ndarray. """ def __getitem__( self, key: ( _IndexKeyLike | Any ), # TODO: Any should be _arrayapi[Any, _dtype[np.integer]] /, ) -> _arrayapi[Any, Any]: ... def __array_namespace__(self) -> ModuleType: ... # NamedArray can most likely use both __array_function__ and __array_namespace__: _arrayfunction_or_api = (_arrayfunction, _arrayapi) duckarray = Union[ _arrayfunction[_ShapeType_co, _DType_co], _arrayapi[_ShapeType_co, _DType_co] ] # Corresponds to np.typing.NDArray: DuckArray = _arrayfunction[Any, np.dtype[_ScalarType_co]] @runtime_checkable class _chunkedarray( _array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Minimal chunked duck array. Corresponds to np.ndarray. """ @property def chunks(self) -> _Chunks: ... @runtime_checkable class _chunkedarrayfunction( _arrayfunction[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Chunked duck array supporting NEP 18. Corresponds to np.ndarray. """ @property def chunks(self) -> _Chunks: ... @runtime_checkable class _chunkedarrayapi( _arrayapi[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Chunked duck array supporting NEP 47. Corresponds to np.ndarray. """ @property def chunks(self) -> _Chunks: ... # NamedArray can most likely use both __array_function__ and __array_namespace__: _chunkedarrayfunction_or_api = (_chunkedarrayfunction, _chunkedarrayapi) chunkedduckarray = Union[ _chunkedarrayfunction[_ShapeType_co, _DType_co], _chunkedarrayapi[_ShapeType_co, _DType_co], ] @runtime_checkable class _sparsearray( _array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Minimal sparse duck array. Corresponds to np.ndarray. """ def todense(self) -> np.ndarray[Any, _DType_co]: ... @runtime_checkable class _sparsearrayfunction( _arrayfunction[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Sparse duck array supporting NEP 18. Corresponds to np.ndarray. """ def todense(self) -> np.ndarray[Any, _DType_co]: ... @runtime_checkable class _sparsearrayapi( _arrayapi[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Sparse duck array supporting NEP 47. Corresponds to np.ndarray. """ def todense(self) -> np.ndarray[Any, _DType_co]: ... # NamedArray can most likely use both __array_function__ and __array_namespace__: _sparsearrayfunction_or_api = (_sparsearrayfunction, _sparsearrayapi) sparseduckarray = Union[ _sparsearrayfunction[_ShapeType_co, _DType_co], _sparsearrayapi[_ShapeType_co, _DType_co], ] ErrorOptions = Literal["raise", "ignore"] ErrorOptionsWithWarn = Literal["raise", "warn", "ignore"] xarray-2025.12.0/xarray/namedarray/core.py000066400000000000000000001161251511464676000203040ustar00rootroot00000000000000from __future__ import annotations import copy import math import warnings from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from itertools import starmap from types import EllipsisType from typing import ( TYPE_CHECKING, Any, Generic, Literal, TypeVar, cast, overload, ) import numpy as np # TODO: get rid of this after migrating this class to array API from xarray.core import dtypes, formatting, formatting_html from xarray.core.indexing import ( ExplicitlyIndexed, ImplicitToExplicitIndexingAdapter, OuterIndexer, ) from xarray.namedarray._aggregations import NamedArrayAggregations from xarray.namedarray._typing import ( ErrorOptionsWithWarn, _arrayapi, _arrayfunction_or_api, _chunkedarray, _default, _dtype, _DType_co, _ScalarType_co, _ShapeType_co, _sparsearrayfunction_or_api, _SupportsImag, _SupportsReal, ) from xarray.namedarray.parallelcompat import guess_chunkmanager from xarray.namedarray.pycompat import to_numpy from xarray.namedarray.utils import ( either_dict_or_kwargs, infix_dims, is_dict_like, is_duck_dask_array, to_0d_object_array, ) if TYPE_CHECKING: from numpy.typing import ArrayLike, NDArray from xarray.core.types import Dims, T_Chunks from xarray.namedarray._typing import ( Default, _AttrsLike, _Chunks, _Dim, _Dims, _DimsLike, _DType, _IntOrUnknown, _ScalarType, _Shape, _ShapeType, duckarray, ) from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint try: from dask.typing import ( Graph, NestedKeys, PostComputeCallable, PostPersistCallable, SchedulerGetCallable, ) except ImportError: Graph: Any # type: ignore[no-redef] NestedKeys: Any # type: ignore[no-redef] SchedulerGetCallable: Any # type: ignore[no-redef] PostComputeCallable: Any # type: ignore[no-redef] PostPersistCallable: Any # type: ignore[no-redef] from typing import Self T_NamedArray = TypeVar("T_NamedArray", bound="_NamedArray[Any]") T_NamedArrayInteger = TypeVar( "T_NamedArrayInteger", bound="_NamedArray[np.integer[Any]]" ) @overload def _new( x: NamedArray[Any, _DType_co], dims: _DimsLike | Default = ..., data: duckarray[_ShapeType, _DType] = ..., attrs: _AttrsLike | Default = ..., ) -> NamedArray[_ShapeType, _DType]: ... @overload def _new( x: NamedArray[_ShapeType_co, _DType_co], dims: _DimsLike | Default = ..., data: Default = ..., attrs: _AttrsLike | Default = ..., ) -> NamedArray[_ShapeType_co, _DType_co]: ... def _new( x: NamedArray[Any, _DType_co], dims: _DimsLike | Default = _default, data: duckarray[_ShapeType, _DType] | Default = _default, attrs: _AttrsLike | Default = _default, ) -> NamedArray[_ShapeType, _DType] | NamedArray[Any, _DType_co]: """ Create a new array with new typing information. Parameters ---------- x : NamedArray Array to create a new array from dims : Iterable of Hashable, optional Name(s) of the dimension(s). Will copy the dims from x by default. data : duckarray, optional The actual data that populates the array. Should match the shape specified by `dims`. Will copy the data from x by default. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Will copy the attrs from x by default. """ dims_ = copy.copy(x._dims) if dims is _default else dims attrs_: Mapping[Any, Any] | None if attrs is _default: attrs_ = None if x._attrs is None else x._attrs.copy() else: attrs_ = attrs if data is _default: return type(x)(dims_, copy.copy(x._data), attrs_) else: cls_ = cast("type[NamedArray[_ShapeType, _DType]]", type(x)) return cls_(dims_, data, attrs_) @overload def from_array( dims: _DimsLike, data: duckarray[_ShapeType, _DType], attrs: _AttrsLike = ..., ) -> NamedArray[_ShapeType, _DType]: ... @overload def from_array( dims: _DimsLike, data: ArrayLike, attrs: _AttrsLike = ..., ) -> NamedArray[Any, Any]: ... def from_array( dims: _DimsLike, data: duckarray[_ShapeType, _DType] | ArrayLike, attrs: _AttrsLike = None, ) -> NamedArray[_ShapeType, _DType] | NamedArray[Any, Any]: """ Create a Named array from an array-like object. Parameters ---------- dims : str or iterable of str Name(s) of the dimension(s). data : T_DuckArray or ArrayLike The actual data that populates the array. Should match the shape specified by `dims`. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Default is None, meaning no attributes will be stored. """ if isinstance(data, NamedArray): raise TypeError( "Array is already a Named array. Use 'data.data' to retrieve the data array" ) # TODO: dask.array.ma.MaskedArray also exists, better way? if isinstance(data, np.ma.MaskedArray): mask = np.ma.getmaskarray(data) # type: ignore[no-untyped-call] if mask.any(): # TODO: requires refactoring/vendoring xarray.core.dtypes and # xarray.core.duck_array_ops raise NotImplementedError("MaskedArray is not supported yet") return NamedArray(dims, data, attrs) if isinstance(data, _arrayfunction_or_api) and not isinstance(data, np.generic): return NamedArray(dims, data, attrs) if isinstance(data, tuple): return NamedArray(dims, to_0d_object_array(data), attrs) # validate whether the data is valid data types. return NamedArray(dims, np.asarray(data), attrs) class NamedArray(NamedArrayAggregations, Generic[_ShapeType_co, _DType_co]): """ A wrapper around duck arrays with named dimensions and attributes which describe a single Array. Numeric operations on this object implement array broadcasting and dimension alignment based on dimension names, rather than axis order. Parameters ---------- dims : str or iterable of hashable Name(s) of the dimension(s). data : array-like or duck-array The actual data that populates the array. Should match the shape specified by `dims`. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Default is None, meaning no attributes will be stored. Raises ------ ValueError If the `dims` length does not match the number of data dimensions (ndim). Examples -------- >>> data = np.array([1.5, 2, 3], dtype=float) >>> narr = NamedArray(("x",), data, {"units": "m"}) # TODO: Better name than narr? """ __slots__ = ("_attrs", "_data", "_dims") _data: duckarray[Any, _DType_co] _dims: _Dims _attrs: dict[Any, Any] | None def __init__( self, dims: _DimsLike, data: duckarray[Any, _DType_co], attrs: _AttrsLike = None, ): self._data = data self._dims = self._parse_dimensions(dims) self._attrs = dict(attrs) if attrs else None def __init_subclass__(cls, **kwargs: Any) -> None: if NamedArray in cls.__bases__ and (cls._new == NamedArray._new): # Type hinting does not work for subclasses unless _new is # overridden with the correct class. raise TypeError( "Subclasses of `NamedArray` must override the `_new` method." ) super().__init_subclass__(**kwargs) @overload def _new( self, dims: _DimsLike | Default = ..., data: duckarray[_ShapeType, _DType] = ..., attrs: _AttrsLike | Default = ..., ) -> NamedArray[_ShapeType, _DType]: ... @overload def _new( self, dims: _DimsLike | Default = ..., data: Default = ..., attrs: _AttrsLike | Default = ..., ) -> NamedArray[_ShapeType_co, _DType_co]: ... def _new( self, dims: _DimsLike | Default = _default, data: duckarray[Any, _DType] | Default = _default, attrs: _AttrsLike | Default = _default, ) -> NamedArray[_ShapeType, _DType] | NamedArray[_ShapeType_co, _DType_co]: """ Create a new array with new typing information. _new has to be reimplemented each time NamedArray is subclassed, otherwise type hints will not be correct. The same is likely true for methods that relied on _new. Parameters ---------- dims : Iterable of Hashable, optional Name(s) of the dimension(s). Will copy the dims from x by default. data : duckarray, optional The actual data that populates the array. Should match the shape specified by `dims`. Will copy the data from x by default. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Will copy the attrs from x by default. """ return _new(self, dims, data, attrs) def _replace( self, dims: _DimsLike | Default = _default, data: duckarray[_ShapeType_co, _DType_co] | Default = _default, attrs: _AttrsLike | Default = _default, ) -> Self: """ Create a new array with the same typing information. The types for each argument cannot change, use self._new if that is a risk. Parameters ---------- dims : Iterable of Hashable, optional Name(s) of the dimension(s). Will copy the dims from x by default. data : duckarray, optional The actual data that populates the array. Should match the shape specified by `dims`. Will copy the data from x by default. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Will copy the attrs from x by default. """ return cast("Self", self._new(dims, data, attrs)) def _copy( self, deep: bool = True, data: duckarray[_ShapeType_co, _DType_co] | None = None, memo: dict[int, Any] | None = None, ) -> Self: if data is None: ndata = self._data if deep: ndata = copy.deepcopy(ndata, memo=memo) else: ndata = data self._check_shape(ndata) attrs = ( copy.deepcopy(self._attrs, memo=memo) if deep else copy.copy(self._attrs) ) return self._replace(data=ndata, attrs=attrs) def __copy__(self) -> Self: return self._copy(deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy(deep=True, memo=memo) def copy( self, deep: bool = True, data: duckarray[_ShapeType_co, _DType_co] | None = None, ) -> Self: """Returns a copy of this object. If `deep=True`, the data array is loaded into memory and copied onto the new object. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, default: True Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. When `data` is used, `deep` is ignored. Returns ------- object : NamedArray New object with dimensions, attributes, and optionally data copied from original. """ return self._copy(deep=deep, data=data) @property def ndim(self) -> int: """ Number of array dimensions. See Also -------- numpy.ndarray.ndim """ return len(self.shape) @property def size(self) -> _IntOrUnknown: """ Number of elements in the array. Equal to ``np.prod(a.shape)``, i.e., the product of the arrayโ€™s dimensions. See Also -------- numpy.ndarray.size """ return math.prod(self.shape) def __len__(self) -> _IntOrUnknown: try: return self.shape[0] except Exception as exc: raise TypeError("len() of unsized object") from exc @property def dtype(self) -> _DType_co: """ Data-type of the arrayโ€™s elements. See Also -------- ndarray.dtype numpy.dtype """ return self._data.dtype @property def shape(self) -> _Shape: """ Get the shape of the array. Returns ------- shape : tuple of ints Tuple of array dimensions. See Also -------- numpy.ndarray.shape """ return self._data.shape @property def nbytes(self) -> _IntOrUnknown: """ Total bytes consumed by the elements of the data array. If the underlying data array does not include ``nbytes``, estimates the bytes consumed based on the ``size`` and ``dtype``. """ from xarray.namedarray._array_api import _get_data_namespace if hasattr(self._data, "nbytes"): return self._data.nbytes # type: ignore[no-any-return] if hasattr(self.dtype, "itemsize"): itemsize = self.dtype.itemsize elif isinstance(self._data, _arrayapi): xp = _get_data_namespace(self) if xp.isdtype(self.dtype, "bool"): itemsize = 1 elif xp.isdtype(self.dtype, "integral"): itemsize = xp.iinfo(self.dtype).bits // 8 else: itemsize = xp.finfo(self.dtype).bits // 8 else: raise TypeError( "cannot compute the number of bytes (no array API nor nbytes / itemsize)" ) return self.size * itemsize @property def dims(self) -> _Dims: """Tuple of dimension names with which this NamedArray is associated.""" return self._dims @dims.setter def dims(self, value: _DimsLike) -> None: self._dims = self._parse_dimensions(value) def _parse_dimensions(self, dims: _DimsLike) -> _Dims: dims = (dims,) if isinstance(dims, str) else tuple(dims) if len(dims) != self.ndim: raise ValueError( f"dimensions {dims} must have the same length as the " f"number of data dimensions, ndim={self.ndim}" ) if len(set(dims)) < len(dims): repeated_dims = {d for d in dims if dims.count(d) > 1} warnings.warn( f"Duplicate dimension names present: dimensions {repeated_dims} appear more than once in dims={dims}. " "We do not yet support duplicate dimension names, but we do allow initial construction of the object. " "We recommend you rename the dims immediately to become distinct, as most xarray functionality is likely to fail silently if you do not. " "To rename the dimensions you will need to set the ``.dims`` attribute of each variable, ``e.g. var.dims=('x0', 'x1')``.", UserWarning, stacklevel=2, ) return dims @property def attrs(self) -> dict[Any, Any]: """Dictionary of local attributes on this NamedArray.""" if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Any, Any]) -> None: self._attrs = dict(value) if value else None def _check_shape(self, new_data: duckarray[Any, _DType_co]) -> None: if new_data.shape != self.shape: raise ValueError( f"replacement data must match the {self.__class__.__name__}'s shape. " f"replacement data has shape {new_data.shape}; {self.__class__.__name__} has shape {self.shape}" ) @property def data(self) -> duckarray[Any, _DType_co]: """ The NamedArray's data as an array. The underlying array type (e.g. dask, sparse, pint) is preserved. """ return self._data @data.setter def data(self, data: duckarray[Any, _DType_co]) -> None: self._check_shape(data) self._data = data @property def imag( self: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] ) -> NamedArray[_ShapeType, _dtype[_ScalarType]]: """ The imaginary part of the array. See Also -------- numpy.ndarray.imag """ if isinstance(self._data, _arrayapi): from xarray.namedarray._array_api import imag return imag(self) return self._new(data=self._data.imag) @property def real( self: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] ) -> NamedArray[_ShapeType, _dtype[_ScalarType]]: """ The real part of the array. See Also -------- numpy.ndarray.real """ if isinstance(self._data, _arrayapi): from xarray.namedarray._array_api import real return real(self) return self._new(data=self._data.real) def __dask_tokenize__(self) -> object: # Use v.data, instead of v._data, in order to cope with the wrappers # around NetCDF and the like from dask.base import normalize_token return normalize_token((type(self), self._dims, self.data, self._attrs or None)) def __dask_graph__(self) -> Graph | None: if is_duck_dask_array(self._data): return self._data.__dask_graph__() else: # TODO: Should this method just raise instead? # raise NotImplementedError("Method requires self.data to be a dask array") return None def __dask_keys__(self) -> NestedKeys: if is_duck_dask_array(self._data): return self._data.__dask_keys__() else: raise AttributeError("Method requires self.data to be a dask array.") def __dask_layers__(self) -> Sequence[str]: if is_duck_dask_array(self._data): return self._data.__dask_layers__() else: raise AttributeError("Method requires self.data to be a dask array.") @property def __dask_optimize__( self, ) -> Callable[..., dict[Any, Any]]: if is_duck_dask_array(self._data): return self._data.__dask_optimize__ # type: ignore[no-any-return] else: raise AttributeError("Method requires self.data to be a dask array.") @property def __dask_scheduler__(self) -> SchedulerGetCallable: if is_duck_dask_array(self._data): return self._data.__dask_scheduler__ else: raise AttributeError("Method requires self.data to be a dask array.") def __dask_postcompute__( self, ) -> tuple[PostComputeCallable, tuple[Any, ...]]: if is_duck_dask_array(self._data): array_func, array_args = self._data.__dask_postcompute__() # type: ignore[no-untyped-call] return self._dask_finalize, (array_func,) + array_args else: raise AttributeError("Method requires self.data to be a dask array.") def __dask_postpersist__( self, ) -> tuple[ Callable[ [Graph, PostPersistCallable[Any], Any, Any], Self, ], tuple[Any, ...], ]: if is_duck_dask_array(self._data): a: tuple[PostPersistCallable[Any], tuple[Any, ...]] a = self._data.__dask_postpersist__() # type: ignore[no-untyped-call] array_func, array_args = a return self._dask_finalize, (array_func,) + array_args else: raise AttributeError("Method requires self.data to be a dask array.") def _dask_finalize( self, results: Graph, array_func: PostPersistCallable[Any], *args: Any, **kwargs: Any, ) -> Self: data = array_func(results, *args, **kwargs) return type(self)(self._dims, data, attrs=self._attrs) @overload def get_axis_num(self, dim: str) -> int: ... # type: ignore [overload-overlap] @overload def get_axis_num(self, dim: Iterable[Hashable]) -> tuple[int, ...]: ... @overload def get_axis_num(self, dim: Hashable) -> int: ... def get_axis_num(self, dim: Hashable | Iterable[Hashable]) -> int | tuple[int, ...]: """Return axis number(s) corresponding to dimension(s) in this array. Parameters ---------- dim : str or iterable of str Dimension name(s) for which to lookup axes. Returns ------- int or tuple of int Axis number or numbers corresponding to the given dimensions. """ if not isinstance(dim, str) and isinstance(dim, Iterable): return tuple(self._get_axis_num(d) for d in dim) else: return self._get_axis_num(dim) def _get_axis_num(self: Any, dim: Hashable) -> int: _raise_if_any_duplicate_dimensions(self.dims) try: return self.dims.index(dim) # type: ignore[no-any-return] except ValueError as err: raise ValueError( f"{dim!r} not found in array dimensions {self.dims!r}" ) from err @property def chunks(self) -> _Chunks | None: """ Tuple of block lengths for this NamedArray's data, in order of dimensions, or None if the underlying data is not a dask array. See Also -------- NamedArray.chunk NamedArray.chunksizes xarray.unify_chunks """ data = self._data if isinstance(data, _chunkedarray): return data.chunks else: return None @property def chunksizes( self, ) -> Mapping[_Dim, _Shape]: """ Mapping from dimension names to block lengths for this NamedArray's data. If this NamedArray does not contain chunked arrays, the mapping will be empty. Cannot be modified directly, but can be modified by calling .chunk(). Differs from NamedArray.chunks because it returns a mapping of dimensions to chunk shapes instead of a tuple of chunk shapes. See Also -------- NamedArray.chunk NamedArray.chunks xarray.unify_chunks """ data = self._data if isinstance(data, _chunkedarray): return dict(zip(self.dims, data.chunks, strict=True)) else: return {} @property def sizes(self) -> dict[_Dim, _IntOrUnknown]: """Ordered mapping from dimension names to lengths.""" return dict(zip(self.dims, self.shape, strict=True)) def chunk( self, chunks: T_Chunks = {}, # noqa: B006 # even though it's unsafe, it is being used intentionally here (#4667) chunked_array_type: str | ChunkManagerEntrypoint[Any] | None = None, from_array_kwargs: Any = None, **chunks_kwargs: Any, ) -> Self: """Coerce this array's data into a dask array with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Parameters ---------- chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntrypoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided. Returns ------- chunked : xarray.Variable See Also -------- Variable.chunks Variable.chunksizes xarray.unify_chunks dask.array.from_array """ if from_array_kwargs is None: from_array_kwargs = {} if chunks is None: warnings.warn( "None value for 'chunks' is deprecated. " "It will raise an error in the future. Use instead '{}'", category=FutureWarning, stacklevel=2, ) chunks = {} if isinstance(chunks, float | str | int | tuple | list): # TODO we shouldn't assume here that other chunkmanagers can handle these types # TODO should we call normalize_chunks here? pass # dask.array.from_array can handle these directly else: chunks = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk") if is_dict_like(chunks): # This method of iteration allows for duplicated dimension names, GH8579 chunks = { dim_number: chunks[dim] for dim_number, dim in enumerate(self.dims) if dim in chunks } chunkmanager = guess_chunkmanager(chunked_array_type) data_old = self._data if chunkmanager.is_chunked_array(data_old): data_chunked = chunkmanager.rechunk(data_old, chunks) # type: ignore[arg-type] else: ndata: duckarray[Any, Any] if not isinstance(data_old, ExplicitlyIndexed): ndata = data_old else: # Unambiguously handle array storage backends (like NetCDF4 and h5py) # that can't handle general array indexing. For example, in netCDF4 you # can do "outer" indexing along two dimensions independent, which works # differently from how NumPy handles it. # da.from_array works by using lazy indexing with a tuple of slices. # Using OuterIndexer is a pragmatic choice: dask does not yet handle # different indexing types in an explicit way: # https://github.com/dask/dask/issues/2883 ndata = ImplicitToExplicitIndexingAdapter(data_old, OuterIndexer) # type: ignore[assignment] if is_dict_like(chunks): chunks = tuple(starmap(chunks.get, enumerate(ndata.shape))) data_chunked = chunkmanager.from_array(ndata, chunks, **from_array_kwargs) # type: ignore[arg-type] return self._replace(data=data_chunked) def to_numpy(self) -> np.ndarray[Any, Any]: """Coerces wrapped data to numpy and returns a numpy.ndarray""" # TODO an entrypoint so array libraries can choose coercion method? return to_numpy(self._data) def as_numpy(self) -> Self: """Coerces wrapped data into a numpy array, returning a Variable.""" return self._replace(data=self.to_numpy()) def reduce( self, func: Callable[..., Any], dim: Dims = None, axis: int | Sequence[int] | None = None, keepdims: bool = False, **kwargs: Any, ) -> NamedArray[Any, Any]: """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default `func` is applied over all dimensions. axis : int or Sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `func(x)` without an axis argument). keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if dim == ...: dim = None if dim is not None and axis is not None: raise ValueError("cannot supply both 'axis' and 'dim' arguments") if dim is not None: axis = self.get_axis_num(dim) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", r"Mean of empty slice", category=RuntimeWarning ) if axis is not None: if isinstance(axis, tuple) and len(axis) == 1: # unpack axis for the benefit of functions # like np.argmin which can't handle tuple arguments axis = axis[0] data = func(self.data, axis=axis, **kwargs) else: data = func(self.data, **kwargs) if getattr(data, "shape", ()) == self.shape: dims = self.dims else: removed_axes: Iterable[int] if axis is None: removed_axes = range(self.ndim) else: removed_axes = np.atleast_1d(axis) % self.ndim if keepdims: # Insert np.newaxis for removed dims slices = tuple( np.newaxis if i in removed_axes else slice(None, None) for i in range(self.ndim) ) if getattr(data, "shape", None) is None: # Reduce has produced a scalar value, not an array-like data = np.asanyarray(data)[slices] else: data = data[slices] dims = self.dims else: dims = tuple( adim for n, adim in enumerate(self.dims) if n not in removed_axes ) # Return NamedArray to handle IndexVariable when data is nD return from_array(dims, data, attrs=self._attrs) def _nonzero(self: T_NamedArrayInteger) -> tuple[T_NamedArrayInteger, ...]: """Equivalent numpy's nonzero but returns a tuple of NamedArrays.""" # TODO: we should replace dask's native nonzero # after https://github.com/dask/dask/issues/1076 is implemented. # TODO: cast to ndarray and back to T_DuckArray is a workaround nonzeros = np.nonzero(cast("NDArray[np.integer[Any]]", self.data)) _attrs = self.attrs return tuple( cast("T_NamedArrayInteger", self._new((dim,), nz, _attrs)) for nz, dim in zip(nonzeros, self.dims, strict=True) ) def __repr__(self) -> str: return formatting.array_repr(self) def _repr_html_(self) -> str: return formatting_html.array_repr(self) def _as_sparse( self, sparse_format: Literal["coo"] | Default = _default, fill_value: ArrayLike | Default = _default, ) -> NamedArray[Any, _DType_co]: """ Use sparse-array as backend. """ import sparse from xarray.namedarray._array_api import astype # TODO: what to do if dask-backended? if fill_value is _default: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = dtypes.result_type(self.dtype, fill_value) if sparse_format is _default: sparse_format = "coo" try: as_sparse = getattr(sparse, f"as_{sparse_format.lower()}") except AttributeError as exc: raise ValueError(f"{sparse_format} is not a valid sparse format") from exc data = as_sparse(astype(self, dtype).data, fill_value=fill_value) return self._new(data=data) def _to_dense(self) -> NamedArray[Any, _DType_co]: """ Change backend from sparse to np.array. """ if isinstance(self._data, _sparsearrayfunction_or_api): data_dense: np.ndarray[Any, _DType_co] = self._data.todense() return self._new(data=data_dense) else: raise TypeError("self.data is not a sparse array") def permute_dims( self, *dim: Iterable[_Dim] | EllipsisType, missing_dims: ErrorOptionsWithWarn = "raise", ) -> NamedArray[Any, _DType_co]: """Return a new object with transposed dimensions. Parameters ---------- *dim : Hashable, optional By default, reverse the order of the dimensions. Otherwise, reorder the dimensions to this order. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the NamedArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- NamedArray The returned NamedArray has permuted dimensions and data with the same attributes as the original. See Also -------- numpy.transpose """ from xarray.namedarray._array_api import permute_dims if not dim: dims = self.dims[::-1] else: dims = tuple(infix_dims(dim, self.dims, missing_dims)) # type: ignore[arg-type] if len(dims) < 2 or dims == self.dims: # no need to transpose if only one dimension # or dims are in same order return self.copy(deep=False) axes = self.get_axis_num(dims) assert isinstance(axes, tuple) return permute_dims(self, axes) @property def T(self) -> NamedArray[Any, _DType_co]: """Return a new object with transposed dimensions.""" if self.ndim != 2: raise ValueError( f"x.T requires x to have 2 dimensions, got {self.ndim}. Use x.permute_dims() to permute dimensions." ) return self.permute_dims() def broadcast_to( self, dim: Mapping[_Dim, int] | None = None, **dim_kwargs: Any ) -> NamedArray[Any, _DType_co]: """ Broadcast the NamedArray to a new shape. New dimensions are not allowed. This method allows for the expansion of the array's dimensions to a specified shape. It handles both positional and keyword arguments for specifying the dimensions to broadcast. An error is raised if new dimensions are attempted to be added. Parameters ---------- dim : dict, str, sequence of str, optional Dimensions to broadcast the array to. If a dict, keys are dimension names and values are the new sizes. If a string or sequence of strings, existing dimensions are matched with a size of 1. **dim_kwargs : Any Additional dimensions specified as keyword arguments. Each keyword argument specifies the name of an existing dimension and its size. Returns ------- NamedArray A new NamedArray with the broadcasted dimensions. Examples -------- >>> data = np.asarray([[1.0, 2.0], [3.0, 4.0]]) >>> array = xr.NamedArray(("x", "y"), data) >>> array.sizes {'x': 2, 'y': 2} >>> broadcasted = array.broadcast_to(x=2, y=2) >>> broadcasted.sizes {'x': 2, 'y': 2} """ from xarray.core import duck_array_ops combined_dims = either_dict_or_kwargs(dim, dim_kwargs, "broadcast_to") # Check that no new dimensions are added if new_dims := set(combined_dims) - set(self.dims): raise ValueError( f"Cannot add new dimensions: {new_dims}. Only existing dimensions are allowed. " "Use `expand_dims` method to add new dimensions." ) # Create a dictionary of the current dimensions and their sizes current_shape = self.sizes # Update the current shape with the new dimensions, keeping the order of the original dimensions broadcast_shape = {d: current_shape.get(d, 1) for d in self.dims} broadcast_shape |= combined_dims # Ensure the dimensions are in the correct order ordered_dims = list(broadcast_shape.keys()) ordered_shape = tuple(broadcast_shape[d] for d in ordered_dims) data = duck_array_ops.broadcast_to(self._data, ordered_shape) # type: ignore[no-untyped-call] # TODO: use array-api-compat function return self._new(data=data, dims=ordered_dims) def expand_dims( self, dim: _Dim | Default = _default, ) -> NamedArray[Any, _DType_co]: """ Expand the dimensions of the NamedArray. This method adds new dimensions to the object. The new dimensions are added at the beginning of the array. Parameters ---------- dim : Hashable, optional Dimension name to expand the array to. This dimension will be added at the beginning of the array. Returns ------- NamedArray A new NamedArray with expanded dimensions. Examples -------- >>> data = np.asarray([[1.0, 2.0], [3.0, 4.0]]) >>> array = xr.NamedArray(("x", "y"), data) # expand dimensions by specifying a new dimension name >>> expanded = array.expand_dims(dim="z") >>> expanded.dims ('z', 'x', 'y') """ from xarray.namedarray._array_api import expand_dims return expand_dims(self, dim=dim) _NamedArray = NamedArray[Any, np.dtype[_ScalarType_co]] def _raise_if_any_duplicate_dimensions( dims: _Dims, err_context: str = "This function" ) -> None: if len(set(dims)) < len(dims): repeated_dims = {d for d in dims if dims.count(d) > 1} raise ValueError( f"{err_context} cannot handle duplicate dimensions, but dimensions {repeated_dims} appear more than once on this object's dims: {dims}" ) xarray-2025.12.0/xarray/namedarray/daskmanager.py000066400000000000000000000177731511464676000216420ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Iterable, Sequence from typing import TYPE_CHECKING, Any import numpy as np from xarray.core.indexing import ImplicitToExplicitIndexingAdapter from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint, T_ChunkedArray from xarray.namedarray.utils import is_duck_dask_array, module_available if TYPE_CHECKING: from xarray.namedarray._typing import ( T_Chunks, _DType_co, _NormalizedChunks, duckarray, ) try: from dask.array import Array as DaskArray except ImportError: DaskArray = np.ndarray[Any, Any] dask_available = module_available("dask") class DaskManager(ChunkManagerEntrypoint["DaskArray"]): array_cls: type[DaskArray] available: bool = dask_available def __init__(self) -> None: # TODO can we replace this with a class attribute instead? from dask.array import Array self.array_cls = Array def is_chunked_array(self, data: duckarray[Any, Any]) -> bool: return is_duck_dask_array(data) def chunks(self, data: Any) -> _NormalizedChunks: return data.chunks # type: ignore[no-any-return] def normalize_chunks( self, chunks: T_Chunks | _NormalizedChunks, shape: tuple[int, ...] | None = None, limit: int | None = None, dtype: _DType_co | None = None, previous_chunks: _NormalizedChunks | None = None, ) -> Any: """Called by open_dataset""" from dask.array.core import normalize_chunks return normalize_chunks( chunks, shape=shape, limit=limit, dtype=dtype, previous_chunks=previous_chunks, ) # type: ignore[no-untyped-call] def from_array( self, data: Any, chunks: T_Chunks | _NormalizedChunks, **kwargs: Any ) -> DaskArray | Any: import dask.array as da if isinstance(data, ImplicitToExplicitIndexingAdapter): # lazily loaded backend array classes should use NumPy array operations. kwargs["meta"] = np.ndarray return da.from_array( data, chunks, **kwargs, ) # type: ignore[no-untyped-call] def compute( self, *data: Any, **kwargs: Any ) -> tuple[np.ndarray[Any, _DType_co], ...]: from dask.array import compute return compute(*data, **kwargs) # type: ignore[no-untyped-call, no-any-return] def persist(self, *data: Any, **kwargs: Any) -> tuple[DaskArray | Any, ...]: from dask import persist return persist(*data, **kwargs) # type: ignore[no-untyped-call, no-any-return] @property def array_api(self) -> Any: from dask import array as da return da def reduction( self, arr: T_ChunkedArray, func: Callable[..., Any], combine_func: Callable[..., Any] | None = None, aggregate_func: Callable[..., Any] | None = None, axis: int | Sequence[int] | None = None, dtype: _DType_co | None = None, keepdims: bool = False, ) -> DaskArray | Any: from dask.array import reduction return reduction( arr, chunk=func, combine=combine_func, aggregate=aggregate_func, axis=axis, dtype=dtype, keepdims=keepdims, ) # type: ignore[no-untyped-call] def scan( self, func: Callable[..., Any], binop: Callable[..., Any], ident: float, arr: T_ChunkedArray, axis: int | None = None, dtype: _DType_co | None = None, **kwargs: Any, ) -> DaskArray | Any: from dask.array.reductions import cumreduction return cumreduction( func, binop, ident, arr, axis=axis, dtype=dtype, **kwargs, ) # type: ignore[no-untyped-call] def apply_gufunc( self, func: Callable[..., Any], signature: str, *args: Any, axes: Sequence[tuple[int, ...]] | None = None, axis: int | None = None, keepdims: bool = False, output_dtypes: Sequence[_DType_co] | None = None, output_sizes: dict[str, int] | None = None, vectorize: bool | None = None, allow_rechunk: bool = False, meta: tuple[np.ndarray[Any, _DType_co], ...] | None = None, **kwargs: Any, ) -> Any: from dask.array.gufunc import apply_gufunc return apply_gufunc( func, signature, *args, axes=axes, axis=axis, keepdims=keepdims, output_dtypes=output_dtypes, output_sizes=output_sizes, vectorize=vectorize, allow_rechunk=allow_rechunk, meta=meta, **kwargs, ) # type: ignore[no-untyped-call] def map_blocks( self, func: Callable[..., Any], *args: Any, dtype: _DType_co | None = None, chunks: tuple[int, ...] | None = None, drop_axis: int | Sequence[int] | None = None, new_axis: int | Sequence[int] | None = None, **kwargs: Any, ) -> Any: from dask.array import map_blocks # pass through name, meta, token as kwargs return map_blocks( func, *args, dtype=dtype, chunks=chunks, drop_axis=drop_axis, new_axis=new_axis, **kwargs, ) # type: ignore[no-untyped-call] def blockwise( self, func: Callable[..., Any], out_ind: Iterable[Any], *args: Any, # can't type this as mypy assumes args are all same type, but dask blockwise args alternate types name: str | None = None, token: Any | None = None, dtype: _DType_co | None = None, adjust_chunks: dict[Any, Callable[..., Any]] | None = None, new_axes: dict[Any, int] | None = None, align_arrays: bool = True, concatenate: bool | None = None, meta: tuple[np.ndarray[Any, _DType_co], ...] | None = None, **kwargs: Any, ) -> DaskArray | Any: from dask.array import blockwise return blockwise( func, out_ind, *args, name=name, token=token, dtype=dtype, adjust_chunks=adjust_chunks, new_axes=new_axes, align_arrays=align_arrays, concatenate=concatenate, meta=meta, **kwargs, ) # type: ignore[no-untyped-call] def unify_chunks( self, *args: Any, # can't type this as mypy assumes args are all same type, but dask unify_chunks args alternate types **kwargs: Any, ) -> tuple[dict[str, _NormalizedChunks], list[DaskArray]]: from dask.array.core import unify_chunks return unify_chunks(*args, **kwargs) # type: ignore[no-any-return, no-untyped-call] def store( self, sources: Any | Sequence[Any], targets: Any, **kwargs: Any, ) -> Any: from dask.array import store return store( sources=sources, targets=targets, **kwargs, ) def shuffle( self, x: DaskArray, indexer: list[list[int]], axis: int, chunks: T_Chunks ) -> DaskArray: import dask.array if not module_available("dask", minversion="2024.08.1"): raise ValueError( "This method is very inefficient on dask<2024.08.1. Please upgrade." ) if chunks is None: chunks = "auto" if chunks != "auto": raise NotImplementedError("Only chunks='auto' is supported at present.") return dask.array.shuffle(x, indexer, axis, chunks="auto") def get_auto_chunk_size(self) -> int: from dask import config as dask_config from dask.utils import parse_bytes return parse_bytes(dask_config.get("array.chunk-size")) xarray-2025.12.0/xarray/namedarray/dtypes.py000066400000000000000000000127241511464676000206640ustar00rootroot00000000000000from __future__ import annotations import functools from typing import Any, Literal, TypeGuard import numpy as np from xarray.namedarray import utils # Use as a sentinel value to indicate a dtype appropriate NA value. NA = utils.ReprObject("") @functools.total_ordering class AlwaysGreaterThan: def __gt__(self, other: object) -> Literal[True]: return True def __eq__(self, other: object) -> bool: return isinstance(other, type(self)) @functools.total_ordering class AlwaysLessThan: def __lt__(self, other: object) -> Literal[True]: return True def __eq__(self, other: object) -> bool: return isinstance(other, type(self)) # Equivalence to np.inf (-np.inf) for object-type INF = AlwaysGreaterThan() NINF = AlwaysLessThan() # Pairs of types that, if both found, should be promoted to object dtype # instead of following NumPy's own type-promotion rules. These type promotion # rules match pandas instead. For reference, see the NumPy type hierarchy: # https://numpy.org/doc/stable/reference/arrays.scalars.html PROMOTE_TO_OBJECT: tuple[tuple[type[np.generic], type[np.generic]], ...] = ( (np.number, np.character), # numpy promotes to character (np.bool_, np.character), # numpy promotes to character (np.bytes_, np.str_), # numpy promotes to unicode ) def maybe_promote(dtype: np.dtype[np.generic]) -> tuple[np.dtype[np.generic], Any]: """Simpler equivalent of pandas.core.common._maybe_promote Parameters ---------- dtype : np.dtype Returns ------- dtype : Promoted dtype that can hold missing values. fill_value : Valid missing value for the promoted dtype. """ # N.B. these casting rules should match pandas dtype_: np.typing.DTypeLike fill_value: Any if np.issubdtype(dtype, np.floating): dtype_ = dtype fill_value = np.nan elif np.issubdtype(dtype, np.timedelta64): # See https://github.com/numpy/numpy/issues/10685 # np.timedelta64 is a subclass of np.integer # Check np.timedelta64 before np.integer fill_value = np.timedelta64("NaT") dtype_ = dtype elif np.issubdtype(dtype, np.integer): dtype_ = np.float32 if dtype.itemsize <= 2 else np.float64 fill_value = np.nan elif np.issubdtype(dtype, np.complexfloating): dtype_ = dtype fill_value = np.nan + np.nan * 1j elif np.issubdtype(dtype, np.datetime64): dtype_ = dtype fill_value = np.datetime64("NaT") else: dtype_ = np.object_ fill_value = np.nan dtype_out = np.dtype(dtype_) fill_value = dtype_out.type(fill_value) return dtype_out, fill_value NAT_TYPES = {np.datetime64("NaT").dtype, np.timedelta64("NaT").dtype} def get_fill_value(dtype: np.dtype[np.generic]) -> Any: """Return an appropriate fill value for this dtype. Parameters ---------- dtype : np.dtype Returns ------- fill_value : Missing value corresponding to this dtype. """ _, fill_value = maybe_promote(dtype) return fill_value def get_pos_infinity( dtype: np.dtype[np.generic], max_for_int: bool = False ) -> float | complex | AlwaysGreaterThan: """Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype max_for_int : bool Return np.iinfo(dtype).max instead of np.inf Returns ------- fill_value : positive infinity value corresponding to this dtype. """ if issubclass(dtype.type, np.floating): return np.inf if issubclass(dtype.type, np.integer): return np.iinfo(dtype.type).max if max_for_int else np.inf if issubclass(dtype.type, np.complexfloating): return np.inf + 1j * np.inf return INF def get_neg_infinity( dtype: np.dtype[np.generic], min_for_int: bool = False ) -> float | complex | AlwaysLessThan: """Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype min_for_int : bool Return np.iinfo(dtype).min instead of -np.inf Returns ------- fill_value : positive infinity value corresponding to this dtype. """ if issubclass(dtype.type, np.floating): return -np.inf if issubclass(dtype.type, np.integer): return np.iinfo(dtype.type).min if min_for_int else -np.inf if issubclass(dtype.type, np.complexfloating): return -np.inf - 1j * np.inf return NINF def is_datetime_like( dtype: np.dtype[np.generic], ) -> TypeGuard[np.datetime64 | np.timedelta64]: """Check if a dtype is a subclass of the numpy datetime types""" return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64) def result_type( *arrays_and_dtypes: np.typing.ArrayLike | np.typing.DTypeLike | None, ) -> np.dtype[np.generic]: """Like np.result_type, but with type promotion rules matching pandas. Examples of changed behavior: number + string -> object (not string) bytes + unicode -> object (not unicode) Parameters ---------- *arrays_and_dtypes : list of arrays and dtypes The dtype is extracted from both numpy and dask arrays. Returns ------- numpy.dtype for the result. """ types = {np.result_type(t).type for t in arrays_and_dtypes} for left, right in PROMOTE_TO_OBJECT: if any(issubclass(t, left) for t in types) and any( issubclass(t, right) for t in types ): return np.dtype(object) return np.result_type(*arrays_and_dtypes) xarray-2025.12.0/xarray/namedarray/parallelcompat.py000066400000000000000000000667271511464676000223700ustar00rootroot00000000000000""" The code in this module is an experiment in going from N=1 to N=2 parallel computing frameworks in xarray. It could later be used as the basis for a public interface allowing any N frameworks to interoperate with xarray, but for now it is just a private experiment. """ from __future__ import annotations import functools from abc import ABC, abstractmethod from collections.abc import Callable, Iterable, Sequence from importlib.metadata import EntryPoint, entry_points from typing import TYPE_CHECKING, Any, Generic, Protocol, TypeVar import numpy as np from xarray.core.options import OPTIONS from xarray.core.utils import emit_user_level_warning from xarray.namedarray.pycompat import is_chunked_array if TYPE_CHECKING: from xarray.namedarray._typing import ( T_Chunks, _Chunks, _DType, _DType_co, _NormalizedChunks, _ShapeType, duckarray, ) class ChunkedArrayMixinProtocol(Protocol): def rechunk(self, chunks: Any, **kwargs: Any) -> Any: ... @property def dtype(self) -> np.dtype[Any]: ... @property def chunks(self) -> _NormalizedChunks: ... def compute( self, *data: Any, **kwargs: Any ) -> tuple[np.ndarray[Any, _DType_co], ...]: ... T_ChunkedArray = TypeVar("T_ChunkedArray", bound=ChunkedArrayMixinProtocol) KNOWN_CHUNKMANAGERS = { "dask": "dask", "cubed": "cubed-xarray", "arkouda": "arkouda-xarray", } @functools.lru_cache(maxsize=1) def list_chunkmanagers() -> dict[str, ChunkManagerEntrypoint[Any]]: """ Return a dictionary of available chunk managers and their ChunkManagerEntrypoint subclass objects. Returns ------- chunkmanagers : dict Dictionary whose values are registered ChunkManagerEntrypoint subclass instances, and whose values are the strings under which they are registered. """ entrypoints = entry_points(group="xarray.chunkmanagers") return load_chunkmanagers(entrypoints) def load_chunkmanagers( entrypoints: Sequence[EntryPoint], ) -> dict[str, ChunkManagerEntrypoint[Any]]: """Load entrypoints and instantiate chunkmanagers only once.""" loaded_entrypoints = {} for entrypoint in entrypoints: try: loaded_entrypoints[entrypoint.name] = entrypoint.load() except ModuleNotFoundError as e: emit_user_level_warning( f"Failed to load chunk manager entrypoint {entrypoint.name} due to {e}. Skipping.", ) available_chunkmanagers = { name: chunkmanager() for name, chunkmanager in loaded_entrypoints.items() if chunkmanager.available } return available_chunkmanagers def guess_chunkmanager( manager: str | ChunkManagerEntrypoint[Any] | None, ) -> ChunkManagerEntrypoint[Any]: """ Get namespace of chunk-handling methods, guessing from what's available. If the name of a specific ChunkManager is given (e.g. "dask"), then use that. Else use whatever is installed, defaulting to dask if there are multiple options. """ available_chunkmanagers = list_chunkmanagers() if manager is None: if len(available_chunkmanagers) == 1: # use the only option available manager = next(iter(available_chunkmanagers.keys())) else: # use the one in options (default dask) manager = OPTIONS["chunk_manager"] if isinstance(manager, str): if manager not in available_chunkmanagers and manager in KNOWN_CHUNKMANAGERS: raise ImportError( f"chunk manager {manager!r} is not available." f" Please make sure {KNOWN_CHUNKMANAGERS[manager]!r} is installed" " and importable." ) elif len(available_chunkmanagers) == 0: raise ImportError( "no chunk managers available. Try installing `dask` or another package" " that provides a chunk manager." ) elif manager not in available_chunkmanagers: raise ValueError( f"unrecognized chunk manager {manager!r} - must be one of the installed" f" chunk managers: {list(available_chunkmanagers)}" ) return available_chunkmanagers[manager] elif isinstance(manager, ChunkManagerEntrypoint): # already a valid ChunkManager so just pass through return manager else: raise TypeError( "manager must be a string or instance of ChunkManagerEntrypoint," f" but received type {type(manager)}" ) def get_chunked_array_type(*args: Any) -> ChunkManagerEntrypoint[Any]: """ Detects which parallel backend should be used for given set of arrays. Also checks that all arrays are of same chunking type (i.e. not a mix of cubed and dask). """ # TODO this list is probably redundant with something inside xarray.apply_ufunc ALLOWED_NON_CHUNKED_TYPES = {int, float, np.ndarray} chunked_arrays = [ a for a in args if is_chunked_array(a) and type(a) not in ALLOWED_NON_CHUNKED_TYPES ] # Asserts all arrays are the same type (or numpy etc.) chunked_array_types = {type(a) for a in chunked_arrays} if len(chunked_array_types) > 1: raise TypeError( f"Mixing chunked array types is not supported, but received multiple types: {chunked_array_types}" ) elif len(chunked_array_types) == 0: raise TypeError("Expected a chunked array but none were found") # iterate over defined chunk managers, seeing if each recognises this array type chunked_arr = chunked_arrays[0] chunkmanagers = list_chunkmanagers() selected = [ chunkmanager for chunkmanager in chunkmanagers.values() if chunkmanager.is_chunked_array(chunked_arr) ] if not selected: raise TypeError( f"Could not find a Chunk Manager which recognises type {type(chunked_arr)}" ) elif len(selected) >= 2: raise TypeError(f"Multiple ChunkManagers recognise type {type(chunked_arr)}") else: return selected[0] class ChunkManagerEntrypoint(ABC, Generic[T_ChunkedArray]): """ Interface between a particular parallel computing framework and xarray. This abstract base class must be subclassed by libraries implementing chunked array types, and registered via the ``chunkmanagers`` entrypoint. Abstract methods on this class must be implemented, whereas non-abstract methods are only required in order to enable a subset of xarray functionality, and by default will raise a ``NotImplementedError`` if called. Attributes ---------- array_cls Type of the array class this parallel computing framework provides. Parallel frameworks need to provide an array class that supports the array API standard. This attribute is used for array instance type checking at runtime. """ array_cls: type[T_ChunkedArray] available: bool = True @abstractmethod def __init__(self) -> None: """Used to set the array_cls attribute at import time.""" raise NotImplementedError() def is_chunked_array(self, data: duckarray[Any, Any]) -> bool: """ Check if the given object is an instance of this type of chunked array. Compares against the type stored in the array_cls attribute by default. Parameters ---------- data : Any Returns ------- is_chunked : bool See Also -------- dask.is_dask_collection """ return isinstance(data, self.array_cls) @abstractmethod def chunks(self, data: T_ChunkedArray) -> _NormalizedChunks: """ Return the current chunks of the given array. Returns chunks explicitly as a tuple of tuple of ints. Used internally by xarray objects' .chunks and .chunksizes properties. Parameters ---------- data : chunked array Returns ------- chunks : tuple[tuple[int, ...], ...] See Also -------- dask.array.Array.chunks cubed.Array.chunks """ raise NotImplementedError() @abstractmethod def normalize_chunks( self, chunks: _Chunks | _NormalizedChunks, shape: _ShapeType | None = None, limit: int | None = None, dtype: _DType | None = None, previous_chunks: _NormalizedChunks | None = None, ) -> _NormalizedChunks: """ Normalize given chunking pattern into an explicit tuple of tuples representation. Exposed primarily because different chunking backends may want to make different decisions about how to automatically chunk along dimensions not given explicitly in the input chunks. Called internally by xarray.open_dataset. Parameters ---------- chunks : tuple, int, dict, or string The chunks to be normalized. shape : Tuple[int] The shape of the array limit : int (optional) The maximum block size to target in bytes, if freedom is given to choose dtype : np.dtype previous_chunks : Tuple[Tuple[int]], optional Chunks from a previous array that we should use for inspiration when rechunking dimensions automatically. See Also -------- dask.array.core.normalize_chunks """ raise NotImplementedError() @abstractmethod def from_array( self, data: duckarray[Any, Any], chunks: _Chunks, **kwargs: Any ) -> T_ChunkedArray: """ Create a chunked array from a non-chunked numpy-like array. Generally input should have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing. Called when the .chunk method is called on an xarray object that is not already chunked. Also called within open_dataset (when chunks is not None) to create a chunked array from an xarray lazily indexed array. Parameters ---------- data : array_like chunks : int, tuple How to chunk the array. See Also -------- dask.array.from_array cubed.from_array """ raise NotImplementedError() def rechunk( self, data: T_ChunkedArray, chunks: _NormalizedChunks | tuple[int, ...] | _Chunks, **kwargs: Any, ) -> Any: """ Changes the chunking pattern of the given array. Called when the .chunk method is called on an xarray object that is already chunked. Parameters ---------- data : dask array Array to be rechunked. chunks : int, tuple, dict or str, optional The new block dimensions to create. -1 indicates the full size of the corresponding dimension. Default is "auto" which automatically determines chunk sizes. Returns ------- chunked array See Also -------- dask.array.Array.rechunk cubed.Array.rechunk """ from xarray.core.common import _contains_cftime_datetimes from xarray.namedarray.utils import _get_chunk if _contains_cftime_datetimes(data): chunks2 = _get_chunk(data, chunks, self, preferred_chunks={}) # type: ignore[arg-type] else: chunks2 = chunks # type: ignore[assignment] return data.rechunk(chunks2, **kwargs) @abstractmethod def compute( self, *data: T_ChunkedArray | Any, **kwargs: Any ) -> tuple[np.ndarray[Any, _DType_co], ...]: """ Computes one or more chunked arrays, returning them as eager numpy arrays. Called anytime something needs to computed, including multiple arrays at once. Used by `.compute`, `.persist`, `.values`. Parameters ---------- *data : object Any number of objects. If an object is an instance of the chunked array type, it is computed and the in-memory result returned as a numpy array. All other types should be passed through unchanged. Returns ------- objs The input, but with all chunked arrays now computed. See Also -------- dask.compute cubed.compute """ raise NotImplementedError() def shuffle( self, x: T_ChunkedArray, indexer: list[list[int]], axis: int, chunks: T_Chunks ) -> T_ChunkedArray: raise NotImplementedError() def persist( self, *data: T_ChunkedArray | Any, **kwargs: Any ) -> tuple[T_ChunkedArray | Any, ...]: """ Persist one or more chunked arrays in memory. Parameters ---------- *data : object Any number of objects. If an object is an instance of the chunked array type, it is persisted as a chunked array in memory. All other types should be passed through unchanged. Returns ------- objs The input, but with all chunked arrays now persisted in memory. See Also -------- dask.persist """ raise NotImplementedError() @property def array_api(self) -> Any: """ Return the array_api namespace following the python array API standard. See https://data-apis.org/array-api/latest/ . Currently used to access the array API function ``full_like``, which is called within the xarray constructors ``xarray.full_like``, ``xarray.ones_like``, ``xarray.zeros_like``, etc. See Also -------- dask.array cubed.array_api """ raise NotImplementedError() def reduction( self, arr: T_ChunkedArray, func: Callable[..., Any], combine_func: Callable[..., Any] | None = None, aggregate_func: Callable[..., Any] | None = None, axis: int | Sequence[int] | None = None, dtype: _DType_co | None = None, keepdims: bool = False, ) -> T_ChunkedArray: """ A general version of array reductions along one or more axes. Used inside some reductions like nanfirst, which is used by ``groupby.first``. Parameters ---------- arr : chunked array Data to be reduced along one or more axes. func : Callable(x_chunk, axis, keepdims) First function to be executed when resolving the dask graph. This function is applied in parallel to all original chunks of x. See below for function parameters. combine_func : Callable(x_chunk, axis, keepdims), optional Function used for intermediate recursive aggregation (see split_every below). If omitted, it defaults to aggregate_func. aggregate_func : Callable(x_chunk, axis, keepdims) Last function to be executed, producing the final output. It is always invoked, even when the reduced Array counts a single chunk along the reduced axes. axis : int or sequence of ints, optional Axis or axes to aggregate upon. If omitted, aggregate along all axes. dtype : np.dtype data type of output. This argument was previously optional, but leaving as ``None`` will now raise an exception. keepdims : boolean, optional Whether the reduction function should preserve the reduced axes, leaving them at size ``output_size``, or remove them. Returns ------- chunked array See Also -------- dask.array.reduction cubed.core.reduction """ raise NotImplementedError() def scan( self, func: Callable[..., Any], binop: Callable[..., Any], ident: float, arr: T_ChunkedArray, axis: int | None = None, dtype: _DType_co | None = None, **kwargs: Any, ) -> T_ChunkedArray: """ General version of a 1D scan, also known as a cumulative array reduction. Used in ``ffill`` and ``bfill`` in xarray. Parameters ---------- func: callable Cumulative function like np.cumsum or np.cumprod binop: callable Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul`` ident: Number Associated identity like ``np.cumsum->0`` or ``np.cumprod->1`` arr: dask Array axis: int, optional dtype: dtype Returns ------- Chunked array See also -------- dask.array.cumreduction """ raise NotImplementedError() @abstractmethod def apply_gufunc( self, func: Callable[..., Any], signature: str, *args: Any, axes: Sequence[tuple[int, ...]] | None = None, keepdims: bool = False, output_dtypes: Sequence[_DType_co] | None = None, vectorize: bool | None = None, **kwargs: Any, ) -> Any: """ Apply a generalized ufunc or similar python function to arrays. ``signature`` determines if the function consumes or produces core dimensions. The remaining dimensions in given input arrays (``*args``) are considered loop dimensions and are required to broadcast naturally against each other. In other terms, this function is like ``np.vectorize``, but for the blocks of chunked arrays. If the function itself shall also be vectorized use ``vectorize=True`` for convenience. Called inside ``xarray.apply_ufunc``, which is called internally for most xarray operations. Therefore this method must be implemented for the vast majority of xarray computations to be supported. Parameters ---------- func : callable Function to call like ``func(*args, **kwargs)`` on input arrays (``*args``) that returns an array or tuple of arrays. If multiple arguments with non-matching dimensions are supplied, this function is expected to vectorize (broadcast) over axes of positional arguments in the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, ``output_core_dims`` has to be set as well. signature: string Specifies what core dimensions are consumed and produced by ``func``. According to the specification of numpy.gufunc signature [2]_ *args : numeric Input arrays or scalars to the callable function. axes: List of tuples, optional, keyword only A list of tuples with indices of axes a generalized ufunc should operate on. For instance, for a signature of ``"(i,j),(j,k)->(i,k)"`` appropriate for matrix multiplication, the base elements are two-dimensional matrices and these are taken to be stored in the two last axes of each argument. The corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``. For simplicity, for generalized ufuncs that operate on 1-dimensional arrays (vectors), a single integer is accepted instead of a single-element tuple, and for generalized ufuncs for which all outputs are scalars, the output tuples can be omitted. keepdims: bool, optional, keyword only If this is set to True, axes which are reduced over will be left in the result as a dimension with size one, so that the result will broadcast correctly against the inputs. This option can only be used for generalized ufuncs that operate on inputs that all have the same number of core dimensions and with outputs that have no core dimensions , i.e., with signatures like ``"(i),(i)->()"`` or ``"(m,m)->()"``. If used, the location of the dimensions in the output can be controlled with axes and axis. output_dtypes : Optional, dtype or list of dtypes, keyword only Valid numpy dtype specification or list thereof. If not given, a call of ``func`` with a small set of data is performed in order to try to automatically determine the output dtypes. vectorize: bool, keyword only If set to ``True``, ``np.vectorize`` is applied to ``func`` for convenience. Defaults to ``False``. **kwargs : dict Extra keyword arguments to pass to `func` Returns ------- Single chunked array or tuple of chunked arrays See Also -------- dask.array.gufunc.apply_gufunc cubed.apply_gufunc References ---------- .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html .. [2] https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html """ raise NotImplementedError() def map_blocks( self, func: Callable[..., Any], *args: Any, dtype: _DType_co | None = None, chunks: tuple[int, ...] | None = None, drop_axis: int | Sequence[int] | None = None, new_axis: int | Sequence[int] | None = None, **kwargs: Any, ) -> Any: """ Map a function across all blocks of a chunked array. Called in elementwise operations, but notably not (currently) called within xarray.map_blocks. Parameters ---------- func : callable Function to apply to every block in the array. If ``func`` accepts ``block_info=`` or ``block_id=`` as keyword arguments, these will be passed dictionaries containing information about input and output chunks/arrays during computation. See examples for details. args : dask arrays or other objects dtype : np.dtype, optional The ``dtype`` of the output array. It is recommended to provide this. If not provided, will be inferred by applying the function to a small set of fake data. chunks : tuple, optional Chunk shape of resulting blocks if the function does not preserve shape. If not provided, the resulting array is assumed to have the same block structure as the first input array. drop_axis : number or iterable, optional Dimensions lost by the function. new_axis : number or iterable, optional New dimensions created by the function. Note that these are applied after ``drop_axis`` (if present). **kwargs : Other keyword arguments to pass to function. Values must be constants (not dask.arrays) See Also -------- dask.array.map_blocks cubed.map_blocks """ raise NotImplementedError() def blockwise( self, func: Callable[..., Any], out_ind: Iterable[Any], *args: Any, # can't type this as mypy assumes args are all same type, but dask blockwise args alternate types adjust_chunks: dict[Any, Callable[..., Any]] | None = None, new_axes: dict[Any, int] | None = None, align_arrays: bool = True, **kwargs: Any, ) -> Any: """ Tensor operation: Generalized inner and outer products. A broad class of blocked algorithms and patterns can be specified with a concise multi-index notation. The ``blockwise`` function applies an in-memory function across multiple blocks of multiple inputs in a variety of ways. Many chunked array operations are special cases of blockwise including elementwise, broadcasting, reductions, tensordot, and transpose. Currently only called explicitly in xarray when performing multidimensional interpolation. Parameters ---------- func : callable Function to apply to individual tuples of blocks out_ind : iterable Block pattern of the output, something like 'ijk' or (1, 2, 3) *args : sequence of Array, index pairs You may also pass literal arguments, accompanied by None index e.g. (x, 'ij', y, 'jk', z, 'i', some_literal, None) **kwargs : dict Extra keyword arguments to pass to function adjust_chunks : dict Dictionary mapping index to function to be applied to chunk sizes new_axes : dict, keyword only New indexes and their dimension lengths align_arrays: bool Whether or not to align chunks along equally sized dimensions when multiple arrays are provided. This allows for larger chunks in some arrays to be broken into smaller ones that match chunk sizes in other arrays such that they are compatible for block function mapping. If this is false, then an error will be thrown if arrays do not already have the same number of blocks in each dimension. See Also -------- dask.array.blockwise cubed.core.blockwise """ raise NotImplementedError() def unify_chunks( self, *args: Any, # can't type this as mypy assumes args are all same type, but dask unify_chunks args alternate types **kwargs: Any, ) -> tuple[dict[str, _NormalizedChunks], list[T_ChunkedArray]]: """ Unify chunks across a sequence of arrays. Called by xarray.unify_chunks. Parameters ---------- *args: sequence of Array, index pairs Sequence like (x, 'ij', y, 'jk', z, 'i') See Also -------- dask.array.core.unify_chunks cubed.core.unify_chunks """ raise NotImplementedError() def store( self, sources: T_ChunkedArray | Sequence[T_ChunkedArray], targets: Any, **kwargs: dict[str, Any], ) -> Any: """ Store chunked arrays in array-like objects, overwriting data in target. This stores chunked arrays into object that supports numpy-style setitem indexing (e.g. a Zarr Store). Allows storing values chunk by chunk so that it does not have to fill up memory. For best performance you likely want to align the block size of the storage target with the block size of your array. Used when writing to any registered xarray I/O backend. Parameters ---------- sources: Array or collection of Arrays targets: array-like or collection of array-likes These should support setitem syntax ``target[10:20] = ...``. If sources is a single item, targets must be a single item; if sources is a collection of arrays, targets must be a matching collection. kwargs: Parameters passed to compute/persist (only used if compute=True) See Also -------- dask.array.store cubed.store """ raise NotImplementedError() def get_auto_chunk_size( self, ) -> int: """ Get the default chunk size for a variable. This is used to determine the chunk size when opening a dataset with ``chunks="auto"`` or when rechunking an array with ``chunks="auto"``. Parameters ---------- target_chunksize : int, optional The target chunk size in bytes. If not provided, a default value is used. Returns ------- chunk_size : int The chunk size in bytes. """ raise NotImplementedError( "For 'auto' rechunking of cftime arrays, get_auto_chunk_size must be implemented by the chunk manager" ) xarray-2025.12.0/xarray/namedarray/pycompat.py000066400000000000000000000126341511464676000212100ustar00rootroot00000000000000from __future__ import annotations from importlib import import_module from types import ModuleType from typing import TYPE_CHECKING, Any, Literal import numpy as np from packaging.version import Version from xarray.core.utils import is_scalar from xarray.namedarray.utils import is_duck_array, is_duck_dask_array integer_types = (int, np.integer) if TYPE_CHECKING: ModType = Literal["dask", "pint", "cupy", "sparse", "cubed", "numbagg"] DuckArrayTypes = tuple[type[Any], ...] # TODO: improve this? maybe Generic from xarray.namedarray._typing import _DType, _ShapeType, duckarray class DuckArrayModule: """ Solely for internal isinstance and version checks. Motivated by having to only import pint when required (as pint currently imports xarray) https://github.com/pydata/xarray/pull/5561#discussion_r664815718 """ module: ModuleType | None version: Version type: DuckArrayTypes available: bool def __init__(self, mod: ModType) -> None: duck_array_module: ModuleType | None duck_array_version: Version duck_array_type: DuckArrayTypes try: duck_array_module = import_module(mod) duck_array_version = Version(duck_array_module.__version__) if mod == "dask": duck_array_type = (import_module("dask.array").Array,) elif mod == "pint": duck_array_type = (duck_array_module.Quantity,) elif mod == "cupy": duck_array_type = (duck_array_module.ndarray,) elif mod == "sparse": duck_array_type = (duck_array_module.SparseArray,) elif mod == "cubed": duck_array_type = (duck_array_module.Array,) # Not a duck array module, but using this system regardless, to get lazy imports elif mod == "numbagg": duck_array_type = () else: raise NotImplementedError except (ImportError, AttributeError): # pragma: no cover duck_array_module = None duck_array_version = Version("0.0.0") duck_array_type = () self.module = duck_array_module self.version = duck_array_version self.type = duck_array_type self.available = duck_array_module is not None _cached_duck_array_modules: dict[ModType, DuckArrayModule] = {} def _get_cached_duck_array_module(mod: ModType) -> DuckArrayModule: if mod not in _cached_duck_array_modules: duckmod = DuckArrayModule(mod) _cached_duck_array_modules[mod] = duckmod return duckmod else: return _cached_duck_array_modules[mod] def array_type(mod: ModType) -> DuckArrayTypes: """Quick wrapper to get the array class of the module.""" return _get_cached_duck_array_module(mod).type def mod_version(mod: ModType) -> Version: """Quick wrapper to get the version of the module.""" return _get_cached_duck_array_module(mod).version def is_chunked_array(x: duckarray[Any, Any]) -> bool: return is_duck_dask_array(x) or (is_duck_array(x) and hasattr(x, "chunks")) def is_0d_dask_array(x: duckarray[Any, Any]) -> bool: return is_duck_dask_array(x) and is_scalar(x) def to_numpy( data: duckarray[Any, Any], **kwargs: dict[str, Any] ) -> np.ndarray[Any, np.dtype[Any]]: from xarray.core.indexing import ExplicitlyIndexed from xarray.namedarray.parallelcompat import get_chunked_array_type try: # for tests only at the moment return data.to_numpy() # type: ignore[no-any-return,union-attr] except AttributeError: pass if isinstance(data, ExplicitlyIndexed): data = data.get_duck_array() # type: ignore[no-untyped-call] # TODO first attempt to call .to_numpy() once some libraries implement it if is_chunked_array(data): chunkmanager = get_chunked_array_type(data) data, *_ = chunkmanager.compute(data, **kwargs) if isinstance(data, array_type("cupy")): data = data.get() # pint has to be imported dynamically as pint imports xarray if isinstance(data, array_type("pint")): data = data.magnitude if isinstance(data, array_type("sparse")): data = data.todense() data = np.asarray(data) return data def to_duck_array(data: Any, **kwargs: dict[str, Any]) -> duckarray[_ShapeType, _DType]: from xarray.core.indexing import ( ExplicitlyIndexed, ImplicitToExplicitIndexingAdapter, ) from xarray.namedarray.parallelcompat import get_chunked_array_type if is_chunked_array(data): chunkmanager = get_chunked_array_type(data) loaded_data, *_ = chunkmanager.compute(data, **kwargs) # type: ignore[var-annotated] return loaded_data if isinstance(data, ExplicitlyIndexed | ImplicitToExplicitIndexingAdapter): return data.get_duck_array() # type: ignore[no-untyped-call, no-any-return] elif is_duck_array(data): return data else: return np.asarray(data) # type: ignore[return-value] async def async_to_duck_array( data: Any, **kwargs: dict[str, Any] ) -> duckarray[_ShapeType, _DType]: from xarray.core.indexing import ( ExplicitlyIndexed, ImplicitToExplicitIndexingAdapter, ) if isinstance(data, ExplicitlyIndexed | ImplicitToExplicitIndexingAdapter): return await data.async_get_duck_array() # type: ignore[union-attr, no-any-return] else: return to_duck_array(data, **kwargs) xarray-2025.12.0/xarray/namedarray/utils.py000066400000000000000000000243651511464676000205200ustar00rootroot00000000000000from __future__ import annotations import importlib import itertools import sys import warnings from collections.abc import Hashable, Iterable, Iterator, Mapping from functools import lru_cache from numbers import Number from typing import TYPE_CHECKING, Any, TypeVar, cast import numpy as np from packaging.version import Version from xarray.namedarray._typing import ErrorOptionsWithWarn, _DimsLike if TYPE_CHECKING: from typing import TypeGuard from numpy.typing import NDArray try: from dask.array.core import Array as DaskArray from dask.typing import DaskCollection except ImportError: DaskArray = NDArray # type: ignore[assignment, misc] DaskCollection: Any = NDArray # type: ignore[no-redef] from xarray.core.types import T_ChunkDim from xarray.namedarray._typing import DuckArray, _Dim, duckarray from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint K = TypeVar("K") V = TypeVar("V") T = TypeVar("T") @lru_cache def module_available(module: str, minversion: str | None = None) -> bool: """Checks whether a module is installed without importing it. Use this for a lightweight check and lazy imports. Parameters ---------- module : str Name of the module. minversion : str, optional Minimum version of the module Returns ------- available : bool Whether the module is installed. """ if importlib.util.find_spec(module) is None: return False if minversion is not None: version = importlib.metadata.version(module) return Version(version) >= Version(minversion) return True def is_dask_collection(x: object) -> TypeGuard[DaskCollection]: if module_available("dask"): from dask.base import is_dask_collection # use is_dask_collection function instead of dask.typing.DaskCollection # see https://github.com/pydata/xarray/pull/8241#discussion_r1476276023 return is_dask_collection(x) return False def is_duck_array(value: Any) -> TypeGuard[duckarray[Any, Any]]: # TODO: replace is_duck_array with runtime checks via _arrayfunction_or_api protocol on # python 3.12 and higher (see https://github.com/pydata/xarray/issues/8696#issuecomment-1924588981) if isinstance(value, np.ndarray): return True return ( hasattr(value, "ndim") and hasattr(value, "shape") and hasattr(value, "dtype") and ( (hasattr(value, "__array_function__") and hasattr(value, "__array_ufunc__")) or hasattr(value, "__array_namespace__") ) ) def is_duck_dask_array(x: duckarray[Any, Any]) -> TypeGuard[DaskArray]: return is_duck_array(x) and is_dask_collection(x) def to_0d_object_array( value: object, ) -> NDArray[np.object_]: """Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.""" result = np.empty((), dtype=object) result[()] = value return result def is_dict_like(value: Any) -> TypeGuard[Mapping[Any, Any]]: return hasattr(value, "keys") and hasattr(value, "__getitem__") def drop_missing_dims( supplied_dims: Iterable[_Dim], dims: Iterable[_Dim], missing_dims: ErrorOptionsWithWarn, ) -> _DimsLike: """Depending on the setting of missing_dims, drop any dimensions from supplied_dims that are not present in dims. Parameters ---------- supplied_dims : Iterable of Hashable dims : Iterable of Hashable missing_dims : {"raise", "warn", "ignore"} """ if missing_dims == "raise": supplied_dims_set = {val for val in supplied_dims if val is not ...} if invalid := supplied_dims_set - set(dims): raise ValueError( f"Dimensions {invalid} do not exist. Expected one or more of {dims}" ) return supplied_dims elif missing_dims == "warn": if invalid := set(supplied_dims) - set(dims): warnings.warn( f"Dimensions {invalid} do not exist. Expected one or more of {dims}", stacklevel=2, ) return [val for val in supplied_dims if val in dims or val is ...] elif missing_dims == "ignore": return [val for val in supplied_dims if val in dims or val is ...] else: raise ValueError( f"Unrecognised option {missing_dims} for missing_dims argument" ) def infix_dims( dims_supplied: Iterable[_Dim], dims_all: Iterable[_Dim], missing_dims: ErrorOptionsWithWarn = "raise", ) -> Iterator[_Dim]: """ Resolves a supplied list containing an ellipsis representing other items, to a generator with the 'realized' list of all items """ if ... in dims_supplied: dims_all_list = list(dims_all) if len(set(dims_all)) != len(dims_all_list): raise ValueError("Cannot use ellipsis with repeated dims") if list(dims_supplied).count(...) > 1: raise ValueError("More than one ellipsis supplied") other_dims = [d for d in dims_all if d not in dims_supplied] existing_dims = drop_missing_dims(dims_supplied, dims_all, missing_dims) for d in existing_dims: if d is ...: yield from other_dims else: yield d else: existing_dims = drop_missing_dims(dims_supplied, dims_all, missing_dims) if set(existing_dims) ^ set(dims_all): raise ValueError( f"{dims_supplied} must be a permuted list of {dims_all}, unless `...` is included" ) yield from existing_dims def either_dict_or_kwargs( pos_kwargs: Mapping[Any, T] | None, kw_kwargs: Mapping[str, T], func_name: str, ) -> Mapping[Hashable, T]: if pos_kwargs is None or pos_kwargs == {}: # Need an explicit cast to appease mypy due to invariance; see # https://github.com/python/mypy/issues/6228 return cast(Mapping[Hashable, T], kw_kwargs) if not is_dict_like(pos_kwargs): raise ValueError(f"the first argument to .{func_name} must be a dictionary") if kw_kwargs: raise ValueError( f"cannot specify both keyword and positional arguments to .{func_name}" ) return pos_kwargs def _get_chunk( # type: ignore[no-untyped-def] data: DuckArray[Any], chunks, chunkmanager: ChunkManagerEntrypoint[Any], *, preferred_chunks, dims=None, ) -> Mapping[Any, T_ChunkDim]: """ Return map from each dim to chunk sizes, accounting for backend's preferred chunks. """ from xarray.core.common import _contains_cftime_datetimes from xarray.core.utils import emit_user_level_warning from xarray.structure.chunks import _get_breaks_cached dims = chunks.keys() if dims is None else dims shape = data.shape # Determine the explicit requested chunks. preferred_chunk_shape = tuple( itertools.starmap(preferred_chunks.get, zip(dims, shape, strict=True)) ) if isinstance(chunks, Number) or (chunks == "auto"): chunks = dict.fromkeys(dims, chunks) chunk_shape = tuple( chunks.get(dim, None) or preferred_chunk_sizes for dim, preferred_chunk_sizes in zip(dims, preferred_chunk_shape, strict=True) ) limit: int | None if _contains_cftime_datetimes(data): limit, dtype = fake_target_chunksize(data, chunkmanager.get_auto_chunk_size()) else: limit = None dtype = data.dtype chunk_shape = chunkmanager.normalize_chunks( chunk_shape, shape=shape, dtype=dtype, limit=limit, previous_chunks=preferred_chunk_shape, ) # Warn where requested chunks break preferred chunks, provided that the variable # contains data. if data.size: # type: ignore[unused-ignore,attr-defined] # DuckArray protocol doesn't include 'size' - should it? for dim, size, chunk_sizes in zip(dims, shape, chunk_shape, strict=True): if preferred_chunk_sizes := preferred_chunks.get(dim): disagreement = _get_breaks_cached( size=size, chunk_sizes=chunk_sizes, preferred_chunk_sizes=preferred_chunk_sizes, ) if disagreement: emit_user_level_warning( "The specified chunks separate the stored chunks along " f'dimension "{dim}" starting at index {disagreement}. This could ' "degrade performance. Instead, consider rechunking after loading.", ) return dict(zip(dims, chunk_shape, strict=True)) def fake_target_chunksize( data: DuckArray[Any], limit: int, ) -> tuple[int, np.dtype[Any]]: """ The `normalize_chunks` algorithm takes a size `limit` in bytes, but will not work for object dtypes. So we rescale the `limit` to an appropriate one based on `float64` dtype, and pass that to `normalize_chunks`. Arguments --------- data : Variable or ChunkedArray The data for which we want to determine chunk sizes. limit : int The target chunk size in bytes. Passed to the chunk manager's `normalize_chunks` method. """ # Short circuit for non-object dtypes from xarray.core.common import _contains_cftime_datetimes if not _contains_cftime_datetimes(data): return limit, data.dtype from xarray.core.formatting import first_n_items output_dtype = np.dtype(np.float64) nbytes_approx: int = sys.getsizeof(first_n_items(data, 1)) # type: ignore[no-untyped-call] f64_nbytes = output_dtype.itemsize limit = int(limit * (f64_nbytes / nbytes_approx)) return limit, output_dtype class ReprObject: """Object that prints as the given value, for use with sentinel values.""" __slots__ = ("_value",) _value: str def __init__(self, value: str): self._value = value def __repr__(self) -> str: return self._value def __eq__(self, other: ReprObject | Any) -> bool: # TODO: What type can other be? ArrayLike? return self._value == other._value if isinstance(other, ReprObject) else False def __hash__(self) -> int: return hash((type(self), self._value)) def __dask_tokenize__(self) -> object: from dask.base import normalize_token return normalize_token((type(self), self._value)) xarray-2025.12.0/xarray/plot/000077500000000000000000000000001511464676000156275ustar00rootroot00000000000000xarray-2025.12.0/xarray/plot/__init__.py000066400000000000000000000010761511464676000177440ustar00rootroot00000000000000""" Use this module directly: import xarray.plot as xplt Or use the methods on a DataArray or Dataset: DataArray.plot._____ Dataset.plot._____ """ from xarray.plot.dataarray_plot import ( contour, contourf, hist, imshow, line, pcolormesh, plot, step, surface, ) from xarray.plot.dataset_plot import scatter from xarray.plot.facetgrid import FacetGrid __all__ = [ "FacetGrid", "contour", "contourf", "hist", "imshow", "line", "pcolormesh", "plot", "scatter", "step", "surface", ] xarray-2025.12.0/xarray/plot/accessor.py000066400000000000000000001243151511464676000200110ustar00rootroot00000000000000from __future__ import annotations import functools from collections.abc import Hashable, Iterable from typing import TYPE_CHECKING, Any, Literal, NoReturn, overload import numpy as np # Accessor methods have the same name as plotting methods, so we need a different namespace from xarray.plot import dataarray_plot, dataset_plot if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.collections import LineCollection, PathCollection, QuadMesh from matplotlib.colors import Normalize from matplotlib.container import BarContainer from matplotlib.contour import QuadContourSet from matplotlib.image import AxesImage from matplotlib.patches import Polygon from matplotlib.quiver import Quiver from mpl_toolkits.mplot3d.art3d import Line3D, Poly3DCollection from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import AspectOptions, HueStyleOptions, ScaleOptions from xarray.plot.facetgrid import FacetGrid class DataArrayPlotAccessor: """ Enables use of xarray.plot functions as attributes on a DataArray. For example, DataArray.plot.imshow """ _da: DataArray __slots__ = ("_da",) __doc__ = dataarray_plot.plot.__doc__ def __init__(self, darray: DataArray) -> None: self._da = darray # Should return Any such that the user does not run into problems # with the many possible return values @functools.wraps(dataarray_plot.plot, assigned=("__doc__", "__annotations__")) def __call__(self, **kwargs) -> Any: return dataarray_plot.plot(self._da, **kwargs) @functools.wraps(dataarray_plot.hist) def hist( self, *args, **kwargs ) -> tuple[np.ndarray, np.ndarray, BarContainer | Polygon]: return dataarray_plot.hist(self._da, *args, **kwargs) @overload def line( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> list[Line3D]: ... @overload def line( self, *args: Any, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def line( self, *args: Any, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.line, assigned=("__doc__",)) def line(self, *args, **kwargs) -> list[Line3D] | FacetGrid[DataArray]: return dataarray_plot.line(self._da, *args, **kwargs) @overload def step( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive **kwargs: Any, ) -> list[Line3D]: ... @overload def step( self, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def step( self, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.step, assigned=("__doc__",)) def step(self, *args, **kwargs) -> list[Line3D] | FacetGrid[DataArray]: return dataarray_plot.step(self._da, *args, **kwargs) @overload def scatter( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs, ) -> PathCollection: ... @overload def scatter( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs, ) -> FacetGrid[DataArray]: ... @overload def scatter( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.scatter, assigned=("__doc__",)) def scatter(self, *args, **kwargs) -> PathCollection | FacetGrid[DataArray]: return dataarray_plot.scatter(self._da, *args, **kwargs) @overload def imshow( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> AxesImage: ... @overload def imshow( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def imshow( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.imshow, assigned=("__doc__",)) def imshow(self, *args, **kwargs) -> AxesImage | FacetGrid[DataArray]: return dataarray_plot.imshow(self._da, *args, **kwargs) @overload def contour( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadContourSet: ... @overload def contour( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def contour( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.contour, assigned=("__doc__",)) def contour(self, *args, **kwargs) -> QuadContourSet | FacetGrid[DataArray]: return dataarray_plot.contour(self._da, *args, **kwargs) @overload def contourf( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadContourSet: ... @overload def contourf( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def contourf( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid: ... @functools.wraps(dataarray_plot.contourf, assigned=("__doc__",)) def contourf(self, *args, **kwargs) -> QuadContourSet | FacetGrid[DataArray]: return dataarray_plot.contourf(self._da, *args, **kwargs) @overload def pcolormesh( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadMesh: ... @overload def pcolormesh( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def pcolormesh( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.pcolormesh, assigned=("__doc__",)) def pcolormesh(self, *args, **kwargs) -> QuadMesh | FacetGrid[DataArray]: return dataarray_plot.pcolormesh(self._da, *args, **kwargs) @overload def surface( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> Poly3DCollection: ... @overload def surface( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid: ... @overload def surface( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid: ... @functools.wraps(dataarray_plot.surface, assigned=("__doc__",)) def surface(self, *args, **kwargs) -> Poly3DCollection: return dataarray_plot.surface(self._da, *args, **kwargs) class DatasetPlotAccessor: """ Enables use of xarray.plot functions as attributes on a Dataset. For example, Dataset.plot.scatter """ _ds: Dataset __slots__ = ("_ds",) def __init__(self, dataset: Dataset) -> None: self._ds = dataset def __call__(self, *args, **kwargs) -> NoReturn: raise ValueError( "Dataset.plot cannot be called directly. Use " "an explicit plot method, e.g. ds.plot.scatter(...)" ) @overload def scatter( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs: Any, ) -> PathCollection: ... @overload def scatter( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def scatter( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @functools.wraps(dataset_plot.scatter, assigned=("__doc__",)) def scatter(self, *args, **kwargs) -> PathCollection | FacetGrid[Dataset]: return dataset_plot.scatter(self._ds, *args, **kwargs) @overload def quiver( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: None = None, # no wrap -> primitive row: None = None, # no wrap -> primitive ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> Quiver: ... @overload def quiver( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable, # wrap -> FacetGrid row: Hashable | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def quiver( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable | None = None, row: Hashable, # wrap -> FacetGrid ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @functools.wraps(dataset_plot.quiver, assigned=("__doc__",)) def quiver(self, *args, **kwargs) -> Quiver | FacetGrid[Dataset]: return dataset_plot.quiver(self._ds, *args, **kwargs) @overload def streamplot( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: None = None, # no wrap -> primitive row: None = None, # no wrap -> primitive ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> LineCollection: ... @overload def streamplot( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable, # wrap -> FacetGrid row: Hashable | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def streamplot( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable | None = None, row: Hashable, # wrap -> FacetGrid ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @functools.wraps(dataset_plot.streamplot, assigned=("__doc__",)) def streamplot(self, *args, **kwargs) -> LineCollection | FacetGrid[Dataset]: return dataset_plot.streamplot(self._ds, *args, **kwargs) xarray-2025.12.0/xarray/plot/dataarray_plot.py000066400000000000000000002511421511464676000212140ustar00rootroot00000000000000from __future__ import annotations import functools import warnings from collections.abc import Callable, Hashable, Iterable, MutableMapping from typing import TYPE_CHECKING, Any, Literal, Union, cast, overload import numpy as np import pandas as pd from xarray.core.utils import attempt_import from xarray.plot.facetgrid import _easy_facetgrid from xarray.plot.utils import ( _LINEWIDTH_RANGE, _MARKERSIZE_RANGE, _add_colorbar, _add_legend, _assert_valid_xy, _determine_guide, _ensure_plottable, _guess_coords_to_plot, _infer_interval_breaks, _infer_xy_labels, _Normalize, _process_cmap_cbar_kwargs, _rescale_imshow_rgb, _resolve_intervals_1dplot, _resolve_intervals_2dplot, _set_concise_date, _update_axes, get_axis, label_from_attrs, ) from xarray.structure.alignment import broadcast from xarray.structure.concat import concat if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.collections import PathCollection, QuadMesh from matplotlib.colors import Colormap, Normalize from matplotlib.container import BarContainer from matplotlib.contour import QuadContourSet from matplotlib.image import AxesImage from matplotlib.patches import Polygon from mpl_toolkits.mplot3d.art3d import Line3D, Poly3DCollection from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.types import ( AspectOptions, ExtendOptions, HueStyleOptions, ScaleOptions, T_DataArray, ) from xarray.plot.facetgrid import FacetGrid _styles: dict[str, Any] = { # Add a white border to make it easier seeing overlapping markers: "scatter.edgecolors": "w", } def _infer_line_data( darray: DataArray, x: Hashable | None, y: Hashable | None, hue: Hashable | None ) -> tuple[DataArray, DataArray, DataArray | None, str]: ndims = len(darray.dims) if x is not None and y is not None: raise ValueError("Cannot specify both x and y kwargs for line plots.") if x is not None: _assert_valid_xy(darray, x, "x") if y is not None: _assert_valid_xy(darray, y, "y") if ndims == 1: huename = None hueplt = None huelabel = "" if x is not None: xplt = darray[x] yplt = darray elif y is not None: xplt = darray yplt = darray[y] else: # Both x & y are None dim = darray.dims[0] xplt = darray[dim] yplt = darray else: if x is None and y is None and hue is None: raise ValueError("For 2D inputs, please specify either hue, x or y.") if y is None: if hue is not None: _assert_valid_xy(darray, hue, "hue") xname, huename = _infer_xy_labels(darray=darray, x=x, y=hue) xplt = darray[xname] if xplt.ndim > 1: if huename in darray.dims: otherindex = 1 if darray.dims.index(huename) == 0 else 0 otherdim = darray.dims[otherindex] yplt = darray.transpose(otherdim, huename, transpose_coords=False) xplt = xplt.transpose(otherdim, huename, transpose_coords=False) else: raise ValueError( "For 2D inputs, hue must be a dimension" " i.e. one of " + repr(darray.dims) ) else: (xdim,) = darray[xname].dims (huedim,) = darray[huename].dims yplt = darray.transpose(xdim, huedim) else: yname, huename = _infer_xy_labels(darray=darray, x=y, y=hue) yplt = darray[yname] if yplt.ndim > 1: if huename in darray.dims: otherindex = 1 if darray.dims.index(huename) == 0 else 0 otherdim = darray.dims[otherindex] xplt = darray.transpose(otherdim, huename, transpose_coords=False) yplt = yplt.transpose(otherdim, huename, transpose_coords=False) else: raise ValueError( "For 2D inputs, hue must be a dimension" " i.e. one of " + repr(darray.dims) ) else: (ydim,) = darray[yname].dims (huedim,) = darray[huename].dims xplt = darray.transpose(ydim, huedim) huelabel = label_from_attrs(darray[huename]) hueplt = darray[huename] return xplt, yplt, hueplt, huelabel def _prepare_plot1d_data( darray: T_DataArray, coords_to_plot: MutableMapping[str, Hashable], plotfunc_name: str | None = None, _is_facetgrid: bool = False, ) -> dict[str, T_DataArray]: """ Prepare data for usage with plt.scatter. Parameters ---------- darray : T_DataArray Base DataArray. coords_to_plot : MutableMapping[str, Hashable] Coords that will be plotted. plotfunc_name : str | None Name of the plotting function that will be used. Returns ------- plts : dict[str, T_DataArray] Dict of DataArrays that will be sent to matplotlib. Examples -------- >>> # Make sure int coords are plotted: >>> a = xr.DataArray( ... data=[1, 2], ... coords={1: ("x", [0, 1], {"units": "s"})}, ... dims=("x",), ... name="a", ... ) >>> plts = xr.plot.dataarray_plot._prepare_plot1d_data( ... a, coords_to_plot={"x": 1, "z": None, "hue": None, "size": None} ... ) >>> # Check which coords to plot: >>> print({k: v.name for k, v in plts.items()}) {'y': 'a', 'x': 1} """ # If there are more than 1 dimension in the array than stack all the # dimensions so the plotter can plot anything: if darray.ndim > 1: # When stacking dims the lines will continue connecting. For floats # this can be solved by adding a nan element in between the flattening # points: dims_T = [] if np.issubdtype(darray.dtype, np.floating): for v in ["z", "x"]: dim = coords_to_plot.get(v, None) if (dim is not None) and (dim in darray.dims): darray_nan = np.nan * darray.isel({dim: -1}) darray = concat( [darray, darray_nan], dim=dim, coords="minimal", compat="override", join="exact", ) dims_T.append(coords_to_plot[v]) # Lines should never connect to the same coordinate when stacked, # transpose to avoid this as much as possible: darray = darray.transpose(..., *dims_T) # Array is now ready to be stacked: darray = darray.stack(_stacked_dim=darray.dims) # Broadcast together all the chosen variables: plts = dict(y=darray) plts.update( {k: darray.coords[v] for k, v in coords_to_plot.items() if v is not None} ) plts = dict(zip(plts.keys(), broadcast(*(plts.values())), strict=True)) return plts # return type is Any due to the many different possibilities def plot( darray: DataArray, *, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, ax: Axes | None = None, hue: Hashable | None = None, subplot_kws: dict[str, Any] | None = None, **kwargs: Any, ) -> Any: """ Default plot of DataArray using :py:mod:`matplotlib:matplotlib.pyplot`. Calls xarray plotting function based on the dimensions of the squeezed DataArray. =============== =========================== Dimensions Plotting function =============== =========================== 1 :py:func:`xarray.plot.line` 2 :py:func:`xarray.plot.pcolormesh` Anything else :py:func:`xarray.plot.hist` =============== =========================== Parameters ---------- darray : DataArray row : Hashable or None, optional If passed, make row faceted plots on this dimension name. col : Hashable or None, optional If passed, make column faceted plots on this dimension name. col_wrap : int or None, optional Use together with ``col`` to wrap faceted plots. ax : matplotlib axes object, optional Axes on which to plot. By default, use the current axes. Mutually exclusive with ``size``, ``figsize`` and facets. hue : Hashable or None, optional If passed, make faceted line plots with hue on this dimension name. subplot_kws : dict, optional Dictionary of keyword arguments for Matplotlib subplots (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`). **kwargs : optional Additional keyword arguments for Matplotlib. See Also -------- xarray.DataArray.squeeze """ darray = darray.squeeze( d for d, s in darray.sizes.items() if s == 1 and d not in (row, col, hue) ).compute() plot_dims = set(darray.dims) plot_dims.discard(row) plot_dims.discard(col) plot_dims.discard(hue) ndims = len(plot_dims) plotfunc: Callable if ndims == 0 or darray.size == 0: raise TypeError("No numeric data to plot.") if ndims in (1, 2): if row or col: kwargs["subplot_kws"] = subplot_kws kwargs["row"] = row kwargs["col"] = col kwargs["col_wrap"] = col_wrap if ndims == 1: plotfunc = line kwargs["hue"] = hue elif ndims == 2: if hue: plotfunc = line kwargs["hue"] = hue else: plotfunc = pcolormesh kwargs["subplot_kws"] = subplot_kws else: if row or col or hue: raise ValueError( "Only 1d and 2d plots are supported for facets in xarray. " "See the package `Seaborn` for more options." ) plotfunc = hist kwargs["ax"] = ax return plotfunc(darray, **kwargs) @overload def line( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, *args: Any, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> list[Line3D]: ... @overload def line( darray: T_DataArray, *args: Any, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def line( darray: T_DataArray, *args: Any, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... # This function signature should not change so that it can use # matplotlib format strings def line( darray: T_DataArray, *args: Any, row: Hashable | None = None, col: Hashable | None = None, figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> list[Line3D] | FacetGrid[T_DataArray]: """ Line plot of DataArray values. Wraps :py:func:`matplotlib:matplotlib.pyplot.plot`. Parameters ---------- darray : DataArray Either 1D or 2D. If 2D, one of ``hue``, ``x`` or ``y`` must be provided. row : Hashable, optional If passed, make row faceted plots on this dimension name. col : Hashable, optional If passed, make column faceted plots on this dimension name. figsize : tuple, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the *width* in inches. Only used if a ``size`` is provided. size : scalar, optional If provided, create a new figure for the plot with the given size: *height* (in inches) of each plot. See also: ``aspect``. ax : matplotlib axes object, optional Axes on which to plot. By default, the current is used. Mutually exclusive with ``size`` and ``figsize``. hue : Hashable, optional Dimension or coordinate for which you want multiple lines plotted. If plotting against a 2D coordinate, ``hue`` must be a dimension. x, y : Hashable, optional Dimension, coordinate or multi-index level for *x*, *y* axis. Only one of these may be specified. The other will be used for values from the DataArray on which this plot method is called. xincrease : bool or None, optional Should the values on the *x* axis be increasing from left to right? if ``None``, use the default for the Matplotlib function. yincrease : bool or None, optional Should the values on the *y* axis be increasing from top to bottom? if ``None``, use the default for the Matplotlib function. xscale, yscale : {'linear', 'symlog', 'log', 'logit'}, optional Specifies scaling for the *x*- and *y*-axis, respectively. xticks, yticks : array-like, optional Specify tick locations for *x*- and *y*-axis. xlim, ylim : tuple[float, float], optional Specify *x*- and *y*-axis limits. add_legend : bool, default: True Add legend with *y* axis coordinates (2D inputs only). *args, **kwargs : optional Additional arguments to :py:func:`matplotlib:matplotlib.pyplot.plot`. Returns ------- primitive : list of Line3D or FacetGrid When either col or row is given, returns a FacetGrid, otherwise a list of matplotlib Line3D objects. """ # Handle facetgrids first if row or col: allargs = locals().copy() allargs.update(allargs.pop("kwargs")) allargs.pop("darray") return _easy_facetgrid(darray, line, kind="line", **allargs) ndims = len(darray.dims) if ndims == 0 or darray.size == 0: # TypeError to be consistent with pandas raise TypeError("No numeric data to plot.") if ndims > 2: raise ValueError( "Line plots are for 1- or 2-dimensional DataArrays. " f"Passed DataArray has {ndims} " "dimensions" ) # The allargs dict passed to _easy_facetgrid above contains args if args == (): args = kwargs.pop("args", ()) else: assert "args" not in kwargs ax = get_axis(figsize, size, aspect, ax) xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue) # Remove pd.Intervals if contained in xplt.values and/or yplt.values. xplt_val, yplt_val, x_suffix, y_suffix, kwargs = _resolve_intervals_1dplot( xplt.to_numpy(), yplt.to_numpy(), kwargs ) xlabel = label_from_attrs(xplt, extra=x_suffix) ylabel = label_from_attrs(yplt, extra=y_suffix) _ensure_plottable(xplt_val, yplt_val) primitive = ax.plot(xplt_val, yplt_val, *args, **kwargs) if _labels: if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.set_title(darray._title_for_slice()) if darray.ndim == 2 and add_legend: assert hueplt is not None ax.legend(handles=primitive, labels=list(hueplt.to_numpy()), title=hue_label) if isinstance(xplt.dtype, np.dtype) and np.issubdtype(xplt.dtype, np.datetime64): # type: ignore[redundant-expr] _set_concise_date(ax, axis="x") _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim) return primitive @overload def step( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive **kwargs: Any, ) -> list[Line3D]: ... @overload def step( darray: DataArray, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def step( darray: DataArray, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid **kwargs: Any, ) -> FacetGrid[DataArray]: ... def step( darray: DataArray, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable | None = None, col: Hashable | None = None, **kwargs: Any, ) -> list[Line3D] | FacetGrid[DataArray]: """ Step plot of DataArray values. Similar to :py:func:`matplotlib:matplotlib.pyplot.step`. Parameters ---------- where : {'pre', 'post', 'mid'}, default: 'pre' Define where the steps should be placed: - ``'pre'``: The y value is continued constantly to the left from every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the value ``y[i]``. - ``'post'``: The y value is continued constantly to the right from every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the value ``y[i]``. - ``'mid'``: Steps occur half-way between the *x* positions. Note that this parameter is ignored if one coordinate consists of :py:class:`pandas.Interval` values, e.g. as a result of :py:func:`xarray.Dataset.groupby_bins`. In this case, the actual boundaries of the interval are used. drawstyle, ds : str or None, optional Additional drawstyle. Only use one of drawstyle and ds. row : Hashable, optional If passed, make row faceted plots on this dimension name. col : Hashable, optional If passed, make column faceted plots on this dimension name. *args, **kwargs : optional Additional arguments for :py:func:`xarray.plot.line`. Returns ------- primitive : list of Line3D or FacetGrid When either col or row is given, returns a FacetGrid, otherwise a list of matplotlib Line3D objects. """ if where not in {"pre", "post", "mid"}: raise ValueError("'where' argument to step must be 'pre', 'post' or 'mid'") if ds is not None: if drawstyle is None: drawstyle = ds else: raise TypeError("ds and drawstyle are mutually exclusive") if drawstyle is None: drawstyle = "" drawstyle = "steps-" + where + drawstyle return line(darray, *args, drawstyle=drawstyle, col=col, row=row, **kwargs) def hist( darray: DataArray, *args: Any, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, **kwargs: Any, ) -> tuple[np.ndarray, np.ndarray, BarContainer | Polygon]: """ Histogram of DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.hist`. Plots *N*-dimensional arrays by first flattening the array. Parameters ---------- darray : DataArray Can have any number of dimensions. figsize : Iterable of float, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the *width* in inches. Only used if a ``size`` is provided. size : scalar, optional If provided, create a new figure for the plot with the given size: *height* (in inches) of each plot. See also: ``aspect``. ax : matplotlib axes object, optional Axes on which to plot. By default, use the current axes. Mutually exclusive with ``size`` and ``figsize``. xincrease : bool or None, optional Should the values on the *x* axis be increasing from left to right? if ``None``, use the default for the Matplotlib function. yincrease : bool or None, optional Should the values on the *y* axis be increasing from top to bottom? if ``None``, use the default for the Matplotlib function. xscale, yscale : {'linear', 'symlog', 'log', 'logit'}, optional Specifies scaling for the *x*- and *y*-axis, respectively. xticks, yticks : array-like, optional Specify tick locations for *x*- and *y*-axis. xlim, ylim : tuple[float, float], optional Specify *x*- and *y*-axis limits. **kwargs : optional Additional keyword arguments to :py:func:`matplotlib:matplotlib.pyplot.hist`. """ assert len(args) == 0 if darray.ndim == 0 or darray.size == 0: # TypeError to be consistent with pandas raise TypeError("No numeric data to plot.") ax = get_axis(figsize, size, aspect, ax) no_nan_arr = np.ravel(darray.to_numpy()) no_nan = no_nan_arr[pd.notnull(no_nan_arr)] n, bins, patches = cast( tuple[np.ndarray, np.ndarray, Union["BarContainer", "Polygon"]], ax.hist(no_nan, **kwargs), ) ax.set_title(darray._title_for_slice()) ax.set_xlabel(label_from_attrs(darray)) _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim) return n, bins, patches def _plot1d(plotfunc): """Decorator for common 1d plotting logic.""" commondoc = """ Parameters ---------- darray : DataArray Must be 2 dimensional, unless creating faceted plots. x : Hashable or None, optional Coordinate for x axis. If None use darray.dims[1]. y : Hashable or None, optional Coordinate for y axis. If None use darray.dims[0]. z : Hashable or None, optional If specified plot 3D and use this coordinate for *z* axis. hue : Hashable or None, optional Dimension or coordinate for which you want multiple lines plotted. markersize: Hashable or None, optional scatter only. Variable by which to vary size of scattered points. linewidth: Hashable or None, optional Variable by which to vary linewidth. row : Hashable, optional If passed, make row faceted plots on this dimension name. col : Hashable, optional If passed, make column faceted plots on this dimension name. col_wrap : int, optional Use together with ``col`` to wrap faceted plots ax : matplotlib axes object, optional If None, uses the current axis. Not applicable when using facets. figsize : Iterable[float] or None, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the width in inches. Only used if a ``size`` is provided. xincrease : bool or None, default: True Should the values on the x axes be increasing from left to right? if None, use the default for the matplotlib function. yincrease : bool or None, default: True Should the values on the y axes be increasing from top to bottom? if None, use the default for the matplotlib function. add_legend : bool or None, optional If True use xarray metadata to add a legend. add_colorbar : bool or None, optional If True add a colorbar. add_labels : bool or None, optional If True use xarray metadata to label axes add_title : bool or None, optional If True use xarray metadata to add a title subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. Only applies to FacetGrid plotting. xscale : {'linear', 'symlog', 'log', 'logit'} or None, optional Specifies scaling for the x-axes. yscale : {'linear', 'symlog', 'log', 'logit'} or None, optional Specifies scaling for the y-axes. xticks : ArrayLike or None, optional Specify tick locations for x-axes. yticks : ArrayLike or None, optional Specify tick locations for y-axes. xlim : tuple[float, float] or None, optional Specify x-axes limits. ylim : tuple[float, float] or None, optional Specify y-axes limits. cmap : matplotlib colormap name or colormap, optional The mapping from data values to color space. Either a Matplotlib colormap name or object. If not provided, this will be either ``'viridis'`` (if the function infers a sequential dataset) or ``'RdBu_r'`` (if the function infers a diverging dataset). See :doc:`Choosing Colormaps in Matplotlib ` for more information. If *seaborn* is installed, ``cmap`` may also be a `seaborn color palette `_. Note: if ``cmap`` is a seaborn color palette, ``levels`` must also be specified. vmin : float or None, optional Lower value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. vmax : float or None, optional Upper value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. norm : matplotlib.colors.Normalize, optional If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding kwarg must be ``None``. extend : {'neither', 'both', 'min', 'max'}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits. levels : int or array-like, optional Split the colormap (``cmap``) into discrete color intervals. If an integer is provided, "nice" levels are chosen based on the data range: this can imply that the final number of levels is not exactly the expected one. Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to setting ``levels=np.linspace(vmin, vmax, N)``. **kwargs : optional Additional arguments to wrapped matplotlib function Returns ------- artist : The same type of primitive artist that the wrapped matplotlib function returns """ # Build on the original docstring plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}" @functools.wraps( plotfunc, assigned=("__module__", "__name__", "__qualname__", "__doc__") ) def newplotfunc( darray: DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs, ) -> Any: # All 1d plots in xarray share this function signature. # Method signature below should be consistent. if TYPE_CHECKING: import matplotlib.pyplot as plt else: plt = attempt_import("matplotlib.pyplot") if subplot_kws is None: subplot_kws = dict() # Handle facetgrids first if row or col: if z is not None: subplot_kws.update(projection="3d") allargs = locals().copy() allargs.update(allargs.pop("kwargs")) allargs.pop("darray") allargs.pop("plt") allargs["plotfunc"] = globals()[plotfunc.__name__] return _easy_facetgrid(darray, kind="plot1d", **allargs) if darray.ndim == 0 or darray.size == 0: # TypeError to be consistent with pandas raise TypeError("No numeric data to plot.") # The allargs dict passed to _easy_facetgrid above contains args if args == (): args = kwargs.pop("args", ()) if args: assert "args" not in kwargs # TODO: Deprecated since 2022.10: msg = "Using positional arguments is deprecated for plot methods, use keyword arguments instead." assert x is None x = args[0] if len(args) > 1: assert y is None y = args[1] if len(args) > 2: assert z is None z = args[2] if len(args) > 3: assert hue is None hue = args[3] if len(args) > 4: raise ValueError(msg) else: warnings.warn(msg, DeprecationWarning, stacklevel=2) del args if hue_style is not None: # TODO: Not used since 2022.10. Deprecated since 2023.07. warnings.warn( ( "hue_style is no longer used for plot1d plots " "and the argument will eventually be removed. " "Convert numbers to string for a discrete hue " "and use add_legend or add_colorbar to control which guide to display." ), DeprecationWarning, stacklevel=2, ) _is_facetgrid = kwargs.pop("_is_facetgrid", False) if plotfunc.__name__ == "scatter": size_ = kwargs.pop("_size", markersize) size_r = _MARKERSIZE_RANGE # Remove any nulls, .where(m, drop=True) doesn't work when m is # a dask array, so load the array to memory. # It will have to be loaded to memory at some point anyway: darray = darray.compute() darray = darray.where(darray.notnull(), drop=True) else: size_ = kwargs.pop("_size", linewidth) size_r = _LINEWIDTH_RANGE # Get data to plot: coords_to_plot: MutableMapping[str, Hashable | None] = dict( x=x, z=z, hue=hue, size=size_ ) if not _is_facetgrid: # Guess what coords to use if some of the values in coords_to_plot are None: coords_to_plot = _guess_coords_to_plot(darray, coords_to_plot, kwargs) plts = _prepare_plot1d_data(darray, coords_to_plot, plotfunc.__name__) xplt = plts.pop("x", None) yplt = plts.pop("y", None) zplt = plts.pop("z", None) kwargs.update(zplt=zplt) hueplt = plts.pop("hue", None) sizeplt = plts.pop("size", None) # Handle size and hue: hueplt_norm = _Normalize(data=hueplt) kwargs.update(hueplt=hueplt_norm.values) sizeplt_norm = _Normalize( data=sizeplt, width=size_r, _is_facetgrid=_is_facetgrid ) kwargs.update(sizeplt=sizeplt_norm.values) cmap_params_subset = kwargs.pop("cmap_params_subset", {}) cbar_kwargs = kwargs.pop("cbar_kwargs", {}) if hueplt_norm.data is not None: if not hueplt_norm.data_is_numeric: # Map hue values back to its original value: cbar_kwargs.update(format=hueplt_norm.format, ticks=hueplt_norm.ticks) levels = kwargs.get("levels", hueplt_norm.levels) cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( plotfunc, cast("DataArray", hueplt_norm.values).data, **locals(), ) # subset that can be passed to scatter, hist2d if not cmap_params_subset: ckw = {vv: cmap_params[vv] for vv in ("vmin", "vmax", "norm", "cmap")} cmap_params_subset.update(**ckw) with plt.rc_context(_styles): # type: ignore[arg-type, unused-ignore] if z is not None: import mpl_toolkits if ax is None: subplot_kws.update(projection="3d") ax = get_axis(figsize, size, aspect, ax, **subplot_kws) assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D) # Using 30, 30 minimizes rotation of the plot. Making it easier to # build on your intuition from 2D plots: ax.view_init(azim=30, elev=30, vertical_axis="y") else: ax = get_axis(figsize, size, aspect, ax, **subplot_kws) primitive = plotfunc( xplt, yplt, ax=ax, add_labels=add_labels, **cmap_params_subset, **kwargs, ) if np.any(np.asarray(add_labels)) and add_title: ax.set_title(darray._title_for_slice()) add_colorbar_, add_legend_ = _determine_guide( hueplt_norm, sizeplt_norm, add_colorbar, add_legend, plotfunc_name=plotfunc.__name__, ) if add_colorbar_: if "label" not in cbar_kwargs: cbar_kwargs["label"] = label_from_attrs(hueplt_norm.data) _add_colorbar( primitive, ax, kwargs.get("cbar_ax"), cbar_kwargs, cmap_params ) if add_legend_: if plotfunc.__name__ in ["scatter", "line"]: _add_legend( ( hueplt_norm if add_legend or not add_colorbar_ else _Normalize(None) ), sizeplt_norm, primitive, legend_ax=ax, plotfunc=plotfunc.__name__, ) else: hueplt_norm_values: list[np.ndarray | None] if hueplt_norm.data is not None: hueplt_norm_values = list(hueplt_norm.data.to_numpy()) else: hueplt_norm_values = [hueplt_norm.data] if plotfunc.__name__ == "hist": ax.legend( handles=primitive[-1], labels=hueplt_norm_values, title=label_from_attrs(hueplt_norm.data), ) else: ax.legend( handles=primitive, labels=hueplt_norm_values, title=label_from_attrs(hueplt_norm.data), ) _update_axes( ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim ) return primitive # we want to actually expose the signature of newplotfunc # and not the copied **kwargs from the plotfunc which # functools.wraps adds, so delete the wrapped attr del newplotfunc.__wrapped__ return newplotfunc def _add_labels( add_labels: bool | Iterable[bool], darrays: Iterable[DataArray | None], suffixes: Iterable[str], ax: Axes, ) -> None: """Set x, y, z labels.""" add_labels = [add_labels] * 3 if isinstance(add_labels, bool) else add_labels axes: tuple[Literal["x", "y", "z"], ...] = ("x", "y", "z") for axis, add_label, darray, suffix in zip( axes, add_labels, darrays, suffixes, strict=True ): if darray is None: continue if add_label: label = label_from_attrs(darray, extra=suffix) if label is not None: getattr(ax, f"set_{axis}label")(label) if np.issubdtype(darray.dtype, np.datetime64): _set_concise_date(ax, axis=axis) @overload def scatter( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs, ) -> PathCollection: ... @overload def scatter( darray: T_DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs, ) -> FacetGrid[T_DataArray]: ... @overload def scatter( darray: T_DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs, ) -> FacetGrid[T_DataArray]: ... @_plot1d def scatter( xplt: DataArray | None, yplt: DataArray | None, ax: Axes, add_labels: bool | Iterable[bool] = True, **kwargs, ) -> PathCollection: """Scatter variables against each other. Wraps :py:func:`matplotlib:matplotlib.pyplot.scatter`. """ if "u" in kwargs or "v" in kwargs: raise ValueError("u, v are not allowed in scatter plots.") zplt: DataArray | None = kwargs.pop("zplt", None) hueplt: DataArray | None = kwargs.pop("hueplt", None) sizeplt: DataArray | None = kwargs.pop("sizeplt", None) if hueplt is not None: kwargs.update(c=hueplt.to_numpy().ravel()) if sizeplt is not None: kwargs.update(s=sizeplt.to_numpy().ravel()) plts_or_none = (xplt, yplt, zplt) _add_labels(add_labels, plts_or_none, ("", "", ""), ax) xplt_np = None if xplt is None else xplt.to_numpy().ravel() yplt_np = None if yplt is None else yplt.to_numpy().ravel() zplt_np = None if zplt is None else zplt.to_numpy().ravel() plts_np = tuple(p for p in (xplt_np, yplt_np, zplt_np) if p is not None) if len(plts_np) == 3: import mpl_toolkits assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D) return ax.scatter(xplt_np, yplt_np, zplt_np, **kwargs) if len(plts_np) == 2: return ax.scatter(plts_np[0], plts_np[1], **kwargs) raise ValueError("At least two variables required for a scatter plot.") def _plot2d(plotfunc): """Decorator for common 2d plotting logic.""" commondoc = """ Parameters ---------- darray : DataArray Must be two-dimensional, unless creating faceted plots. x : Hashable or None, optional Coordinate for *x* axis. If ``None``, use ``darray.dims[1]``. y : Hashable or None, optional Coordinate for *y* axis. If ``None``, use ``darray.dims[0]``. figsize : Iterable or float or None, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. size : scalar, optional If provided, create a new figure for the plot with the given size: *height* (in inches) of each plot. See also: ``aspect``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the *width* in inches. Only used if a ``size`` is provided. ax : matplotlib axes object, optional Axes on which to plot. By default, use the current axes. Mutually exclusive with ``size`` and ``figsize``. row : Hashable or None, optional If passed, make row faceted plots on this dimension name. col : Hashable or None, optional If passed, make column faceted plots on this dimension name. col_wrap : int, optional Use together with ``col`` to wrap faceted plots. xincrease : None, True, or False, optional Should the values on the *x* axis be increasing from left to right? If ``None``, use the default for the Matplotlib function. yincrease : None, True, or False, optional Should the values on the *y* axis be increasing from top to bottom? If ``None``, use the default for the Matplotlib function. add_colorbar : bool, optional Add colorbar to axes. add_labels : bool, optional Use xarray metadata to label axes. vmin : float or None, optional Lower value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. vmax : float or None, optional Upper value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. cmap : matplotlib colormap name or colormap, optional The mapping from data values to color space. If not provided, this will be either be ``'viridis'`` (if the function infers a sequential dataset) or ``'RdBu_r'`` (if the function infers a diverging dataset). See :doc:`Choosing Colormaps in Matplotlib ` for more information. If *seaborn* is installed, ``cmap`` may also be a `seaborn color palette `_. Note: if ``cmap`` is a seaborn color palette and the plot type is not ``'contour'`` or ``'contourf'``, ``levels`` must also be specified. center : float or False, optional The value at which to center the colormap. Passing this value implies use of a diverging colormap. Setting it to ``False`` prevents use of a diverging colormap. robust : bool, optional If ``True`` and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. extend : {'neither', 'both', 'min', 'max'}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits. levels : int or array-like, optional Split the colormap (``cmap``) into discrete color intervals. If an integer is provided, "nice" levels are chosen based on the data range: this can imply that the final number of levels is not exactly the expected one. Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to setting ``levels=np.linspace(vmin, vmax, N)``. infer_intervals : bool, optional Only applies to pcolormesh. If ``True``, the coordinate intervals are passed to pcolormesh. If ``False``, the original coordinates are used (this can be useful for certain map projections). The default is to always infer intervals, unless the mesh is irregular and plotted on a map projection. colors : str or array-like of color-like, optional A single color or a sequence of colors. If the plot type is not ``'contour'`` or ``'contourf'``, the ``levels`` argument is required. subplot_kws : dict, optional Dictionary of keyword arguments for Matplotlib subplots. Only used for 2D and faceted plots. (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`). cbar_ax : matplotlib axes object, optional Axes in which to draw the colorbar. cbar_kwargs : dict, optional Dictionary of keyword arguments to pass to the colorbar (see :meth:`matplotlib:matplotlib.figure.Figure.colorbar`). xscale : {'linear', 'symlog', 'log', 'logit'} or None, optional Specifies scaling for the x-axes. yscale : {'linear', 'symlog', 'log', 'logit'} or None, optional Specifies scaling for the y-axes. xticks : ArrayLike or None, optional Specify tick locations for x-axes. yticks : ArrayLike or None, optional Specify tick locations for y-axes. xlim : tuple[float, float] or None, optional Specify x-axes limits. ylim : tuple[float, float] or None, optional Specify y-axes limits. norm : matplotlib.colors.Normalize, optional If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding kwarg must be ``None``. **kwargs : optional Additional keyword arguments to wrapped Matplotlib function. Returns ------- artist : The same type of primitive artist that the wrapped Matplotlib function returns. """ # Build on the original docstring plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}" @functools.wraps( plotfunc, assigned=("__module__", "__name__", "__qualname__", "__doc__") ) def newplotfunc( darray: DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> Any: # All 2d plots in xarray share this function signature. if args: # TODO: Deprecated since 2022.10: msg = "Using positional arguments is deprecated for plot methods, use keyword arguments instead." assert x is None x = args[0] if len(args) > 1: assert y is None y = args[1] if len(args) > 2: raise ValueError(msg) else: warnings.warn(msg, DeprecationWarning, stacklevel=2) del args # Decide on a default for the colorbar before facetgrids if add_colorbar is None: add_colorbar = True if plotfunc.__name__ == "contour" or ( plotfunc.__name__ == "surface" and cmap is None ): add_colorbar = False imshow_rgb = plotfunc.__name__ == "imshow" and darray.ndim == ( 3 + (row is not None) + (col is not None) ) if imshow_rgb: # Don't add a colorbar when showing an image with explicit colors add_colorbar = False # Matplotlib does not support normalising RGB data, so do it here. # See eg. https://github.com/matplotlib/matplotlib/pull/10220 if robust or vmax is not None or vmin is not None: darray = _rescale_imshow_rgb(darray.as_numpy(), vmin, vmax, robust) vmin, vmax, robust = None, None, False if subplot_kws is None: subplot_kws = dict() if plotfunc.__name__ == "surface" and not kwargs.get("_is_facetgrid"): if ax is None: # TODO: Importing Axes3D is no longer necessary in matplotlib >= 3.2. # Remove when minimum requirement of matplotlib is 3.2: from mpl_toolkits.mplot3d import Axes3D # delete so it does not end up in locals() del Axes3D # Need to create a "3d" Axes instance for surface plots subplot_kws["projection"] = "3d" # In facet grids, shared axis labels don't make sense for surface plots sharex = False sharey = False # Handle facetgrids first if row or col: allargs = locals().copy() del allargs["darray"] del allargs["imshow_rgb"] allargs.update(allargs.pop("kwargs")) # Need the decorated plotting function allargs["plotfunc"] = globals()[plotfunc.__name__] return _easy_facetgrid(darray, kind="dataarray", **allargs) if darray.ndim == 0 or darray.size == 0: # TypeError to be consistent with pandas raise TypeError("No numeric data to plot.") if ( plotfunc.__name__ == "surface" and not kwargs.get("_is_facetgrid") and ax is not None ): import mpl_toolkits if not isinstance(ax, mpl_toolkits.mplot3d.Axes3D): raise ValueError( "If ax is passed to surface(), it must be created with " 'projection="3d"' ) rgb = kwargs.pop("rgb", None) if rgb is not None and plotfunc.__name__ != "imshow": raise ValueError('The "rgb" keyword is only valid for imshow()') elif rgb is not None and not imshow_rgb: raise ValueError( 'The "rgb" keyword is only valid for imshow()' "with a three-dimensional array (per facet)" ) xlab, ylab = _infer_xy_labels( darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb ) xval = darray[xlab] yval = darray[ylab] if xval.ndim > 1 or yval.ndim > 1 or plotfunc.__name__ == "surface": # Passing 2d coordinate values, need to ensure they are transposed the same # way as darray. # Also surface plots always need 2d coordinates xval = xval.broadcast_like(darray) yval = yval.broadcast_like(darray) dims = darray.dims else: dims = (yval.dims[0], xval.dims[0]) # May need to transpose for correct x, y labels # xlab may be the name of a coord, we have to check for dim names if imshow_rgb: # For RGB[A] images, matplotlib requires the color dimension # to be last. In Xarray the order should be unimportant, so # we transpose to (y, x, color) to make this work. yx_dims = (ylab, xlab) dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims) if dims != darray.dims: darray = darray.transpose(*dims, transpose_coords=True) # better to pass the ndarrays directly to plotting functions xvalnp = xval.to_numpy() yvalnp = yval.to_numpy() # Pass the data as a masked ndarray too zval = darray.to_masked_array(copy=False) # Replace pd.Intervals if contained in xval or yval. xplt, xlab_extra = _resolve_intervals_2dplot(xvalnp, plotfunc.__name__) yplt, ylab_extra = _resolve_intervals_2dplot(yvalnp, plotfunc.__name__) _ensure_plottable(xplt, yplt, zval) cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( plotfunc, zval.data, **locals(), _is_facetgrid=kwargs.pop("_is_facetgrid", False), ) if "contour" in plotfunc.__name__: # extend is a keyword argument only for contour and contourf, but # passing it to the colorbar is sufficient for imshow and # pcolormesh kwargs["extend"] = cmap_params["extend"] kwargs["levels"] = cmap_params["levels"] # if colors == a single color, matplotlib draws dashed negative # contours. we lose this feature if we pass cmap and not colors if colors is not None: cmap_params["cmap"] = None kwargs["colors"] = colors if "pcolormesh" == plotfunc.__name__: kwargs["infer_intervals"] = infer_intervals kwargs["xscale"] = xscale kwargs["yscale"] = yscale if "imshow" == plotfunc.__name__ and isinstance(aspect, str): # forbid usage of mpl strings raise ValueError("plt.imshow's `aspect` kwarg is not available in xarray") ax = get_axis(figsize, size, aspect, ax, **subplot_kws) primitive = plotfunc( xplt, yplt, zval, ax=ax, cmap=cmap_params["cmap"], vmin=cmap_params["vmin"], vmax=cmap_params["vmax"], norm=cmap_params["norm"], **kwargs, ) # Label the plot with metadata if add_labels: ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra)) ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra)) ax.set_title(darray._title_for_slice()) if plotfunc.__name__ == "surface": import mpl_toolkits assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D) ax.set_zlabel(label_from_attrs(darray)) if add_colorbar: if add_labels and "label" not in cbar_kwargs: cbar_kwargs["label"] = label_from_attrs(darray) cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params) elif cbar_ax is not None or cbar_kwargs: # inform the user about keywords which aren't used raise ValueError( "cbar_ax and cbar_kwargs can't be used with add_colorbar=False." ) # origin kwarg overrides yincrease if "origin" in kwargs: yincrease = None _update_axes( ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim ) if np.issubdtype(xplt.dtype, np.datetime64): _set_concise_date(ax, "x") return primitive # we want to actually expose the signature of newplotfunc # and not the copied **kwargs from the plotfunc which # functools.wraps adds, so delete the wrapped attr del newplotfunc.__wrapped__ return newplotfunc @overload def imshow( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> AxesImage: ... @overload def imshow( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def imshow( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def imshow( x: np.ndarray, y: np.ndarray, z: np.ma.core.MaskedArray, ax: Axes, **kwargs: Any ) -> AxesImage: """ Image plot of 2D DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.imshow`. While other plot methods require the DataArray to be strictly two-dimensional, ``imshow`` also accepts a 3D array where some dimension can be interpreted as RGB or RGBA color channels and allows this dimension to be specified via the kwarg ``rgb=``. Unlike :py:func:`matplotlib:matplotlib.pyplot.imshow`, which ignores ``vmin``/``vmax`` for RGB(A) data, xarray *will* use ``vmin`` and ``vmax`` for RGB(A) data by applying a single scaling factor and offset to all bands. Passing ``robust=True`` infers ``vmin`` and ``vmax`` :ref:`in the usual way `. Additionally the y-axis is not inverted by default, you can restore the matplotlib behavior by setting `yincrease=False`. .. note:: This function needs uniformly spaced coordinates to properly label the axes. Call :py:meth:`DataArray.plot` to check. The pixels are centered on the coordinates. For example, if the coordinate value is 3.2, then the pixels for those coordinates will be centered on 3.2. """ if x.ndim != 1 or y.ndim != 1: raise ValueError( "imshow requires 1D coordinates, try using pcolormesh or contour(f)" ) def _center_pixels(x): """Center the pixels on the coordinates.""" if np.issubdtype(x.dtype, str): # When using strings as inputs imshow converts it to # integers. Choose extent values which puts the indices in # in the center of the pixels: return 0 - 0.5, len(x) - 0.5 try: # Center the pixels assuming uniform spacing: xstep = 0.5 * (x[1] - x[0]) except IndexError: # Arbitrary default value, similar to matplotlib behaviour: xstep = 0.1 return x[0] - xstep, x[-1] + xstep # Center the pixels: left, right = _center_pixels(x) top, bottom = _center_pixels(y) defaults: dict[str, Any] = {"origin": "upper", "interpolation": "nearest"} if not hasattr(ax, "projection"): # not for cartopy geoaxes defaults["aspect"] = "auto" # Allow user to override these defaults defaults.update(kwargs) if defaults["origin"] == "upper": defaults["extent"] = [left, right, bottom, top] else: defaults["extent"] = [left, right, top, bottom] if z.ndim == 3: # matplotlib imshow uses black for missing data, but Xarray makes # missing data transparent. We therefore add an alpha channel if # there isn't one, and set it to transparent where data is masked. if z.shape[-1] == 3: safe_dtype = np.promote_types(z.dtype, np.uint8) alpha = np.ma.ones(z.shape[:2] + (1,), dtype=safe_dtype) if np.issubdtype(z.dtype, np.integer): alpha[:] = 255 z = np.ma.concatenate((z, alpha), axis=2) else: z = z.copy() z[np.any(z.mask, axis=-1), -1] = 0 primitive = ax.imshow(z, **defaults) # If x or y are strings the ticklabels have been replaced with # integer indices. Replace them back to strings: for axis, v in [("x", x), ("y", y)]: if np.issubdtype(v.dtype, str): getattr(ax, f"set_{axis}ticks")(np.arange(len(v))) getattr(ax, f"set_{axis}ticklabels")(v) return primitive @overload def contour( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadContourSet: ... @overload def contour( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def contour( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def contour( x: np.ndarray, y: np.ndarray, z: np.ndarray, ax: Axes, **kwargs: Any ) -> QuadContourSet: """ Contour plot of 2D DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.contour`. """ primitive = ax.contour(x, y, z, **kwargs) return primitive @overload def contourf( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadContourSet: ... @overload def contourf( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def contourf( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def contourf( x: np.ndarray, y: np.ndarray, z: np.ndarray, ax: Axes, **kwargs: Any ) -> QuadContourSet: """ Filled contour plot of 2D DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.contourf`. """ primitive = ax.contourf(x, y, z, **kwargs) return primitive @overload def pcolormesh( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadMesh: ... @overload def pcolormesh( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def pcolormesh( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def pcolormesh( x: np.ndarray, y: np.ndarray, z: np.ndarray, ax: Axes, xscale: ScaleOptions | None = None, yscale: ScaleOptions | None = None, infer_intervals=None, **kwargs: Any, ) -> QuadMesh: """ Pseudocolor plot of 2D DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.pcolormesh`. """ # decide on a default for infer_intervals (GH781) x = np.asarray(x) if infer_intervals is None: if hasattr(ax, "projection"): if len(x.shape) == 1: infer_intervals = True else: infer_intervals = False else: infer_intervals = True if any(np.issubdtype(k.dtype, str) for k in (x, y)): # do not infer intervals if any axis contains str ticks, see #6775 infer_intervals = False if infer_intervals and ( (np.shape(x)[0] == np.shape(z)[1]) or ((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1])) ): if x.ndim == 1: x = _infer_interval_breaks(x, check_monotonic=True, scale=xscale) else: # we have to infer the intervals on both axes x = _infer_interval_breaks(x, axis=1, scale=xscale) x = _infer_interval_breaks(x, axis=0, scale=xscale) if infer_intervals and (np.shape(y)[0] == np.shape(z)[0]): if y.ndim == 1: y = _infer_interval_breaks(y, check_monotonic=True, scale=yscale) else: # we have to infer the intervals on both axes y = _infer_interval_breaks(y, axis=1, scale=yscale) y = _infer_interval_breaks(y, axis=0, scale=yscale) ax.grid(False) primitive = ax.pcolormesh(x, y, z, **kwargs) # by default, pcolormesh picks "round" values for bounds # this results in ugly looking plots with lots of surrounding whitespace if not hasattr(ax, "projection") and x.ndim == 1 and y.ndim == 1: # not a cartopy geoaxis ax.set_xlim(x[0], x[-1]) ax.set_ylim(y[0], y[-1]) return primitive @overload def surface( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> Poly3DCollection: ... @overload def surface( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def surface( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def surface( x: np.ndarray, y: np.ndarray, z: np.ndarray, ax: Axes, **kwargs: Any ) -> Poly3DCollection: """ Surface plot of 2D DataArray. Wraps :py:meth:`matplotlib:mpl_toolkits.mplot3d.axes3d.Axes3D.plot_surface`. """ import mpl_toolkits assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D) primitive = ax.plot_surface(x, y, z, **kwargs) return primitive xarray-2025.12.0/xarray/plot/dataset_plot.py000066400000000000000000000742261511464676000206770ustar00rootroot00000000000000from __future__ import annotations import functools import inspect import warnings from collections.abc import Callable, Hashable, Iterable from typing import TYPE_CHECKING, Any, TypeVar, overload from xarray.plot import dataarray_plot from xarray.plot.facetgrid import _easy_facetgrid from xarray.plot.utils import ( _add_colorbar, _get_nice_quiver_magnitude, _infer_meta_data, _process_cmap_cbar_kwargs, get_axis, ) from xarray.structure.alignment import broadcast if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.collections import LineCollection, PathCollection from matplotlib.colors import Colormap, Normalize from matplotlib.quiver import Quiver from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import ( AspectOptions, ExtendOptions, HueStyleOptions, ScaleOptions, ) from xarray.plot.facetgrid import FacetGrid def _dsplot(plotfunc): commondoc = """ Parameters ---------- ds : Dataset x : Hashable or None, optional Variable name for x-axis. y : Hashable or None, optional Variable name for y-axis. u : Hashable or None, optional Variable name for the *u* velocity (in *x* direction). quiver/streamplot plots only. v : Hashable or None, optional Variable name for the *v* velocity (in *y* direction). quiver/streamplot plots only. hue: Hashable or None, optional Variable by which to color scatter points or arrows. hue_style: {'continuous', 'discrete'} or None, optional How to use the ``hue`` variable: - ``'continuous'`` -- continuous color scale (default for numeric ``hue`` variables) - ``'discrete'`` -- a color for each unique value, using the default color cycle (default for non-numeric ``hue`` variables) row : Hashable or None, optional If passed, make row faceted plots on this dimension name. col : Hashable or None, optional If passed, make column faceted plots on this dimension name. col_wrap : int, optional Use together with ``col`` to wrap faceted plots. ax : matplotlib axes object or None, optional If ``None``, use the current axes. Not applicable when using facets. figsize : Iterable[float] or None, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the width in inches. Only used if a ``size`` is provided. sharex : bool or None, optional If True all subplots share the same x-axis. sharey : bool or None, optional If True all subplots share the same y-axis. add_guide: bool or None, optional Add a guide that depends on ``hue_style``: - ``'continuous'`` -- build a colorbar - ``'discrete'`` -- build a legend subplot_kws : dict or None, optional Dictionary of keyword arguments for Matplotlib subplots (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`). Only applies to FacetGrid plotting. cbar_kwargs : dict, optional Dictionary of keyword arguments to pass to the colorbar (see :meth:`matplotlib:matplotlib.figure.Figure.colorbar`). cbar_ax : matplotlib axes object, optional Axes in which to draw the colorbar. cmap : matplotlib colormap name or colormap, optional The mapping from data values to color space. Either a Matplotlib colormap name or object. If not provided, this will be either ``'viridis'`` (if the function infers a sequential dataset) or ``'RdBu_r'`` (if the function infers a diverging dataset). See :doc:`Choosing Colormaps in Matplotlib ` for more information. If *seaborn* is installed, ``cmap`` may also be a `seaborn color palette `_. Note: if ``cmap`` is a seaborn color palette, ``levels`` must also be specified. vmin : float or None, optional Lower value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. vmax : float or None, optional Upper value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. norm : matplotlib.colors.Normalize, optional If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding kwarg must be ``None``. infer_intervals: bool | None If True the intervals are inferred. center : float, optional The value at which to center the colormap. Passing this value implies use of a diverging colormap. Setting it to ``False`` prevents use of a diverging colormap. robust : bool, optional If ``True`` and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. colors : str or array-like of color-like, optional A single color or a list of colors. The ``levels`` argument is required. extend : {'neither', 'both', 'min', 'max'}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits. levels : int or array-like, optional Split the colormap (``cmap``) into discrete color intervals. If an integer is provided, "nice" levels are chosen based on the data range: this can imply that the final number of levels is not exactly the expected one. Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to setting ``levels=np.linspace(vmin, vmax, N)``. **kwargs : optional Additional keyword arguments to wrapped Matplotlib function. """ # Build on the original docstring plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}" @functools.wraps( plotfunc, assigned=("__module__", "__name__", "__qualname__", "__doc__") ) def newplotfunc( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, sharex: bool = True, sharey: bool = True, add_guide: bool | None = None, subplot_kws: dict[str, Any] | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> Any: if args: # TODO: Deprecated since 2022.10: msg = "Using positional arguments is deprecated for plot methods, use keyword arguments instead." assert x is None x = args[0] if len(args) > 1: assert y is None y = args[1] if len(args) > 2: assert u is None u = args[2] if len(args) > 3: assert v is None v = args[3] if len(args) > 4: assert hue is None hue = args[4] if len(args) > 5: raise ValueError(msg) else: warnings.warn(msg, DeprecationWarning, stacklevel=2) del args _is_facetgrid = kwargs.pop("_is_facetgrid", False) if _is_facetgrid: # facetgrid call meta_data = kwargs.pop("meta_data") else: meta_data = _infer_meta_data( ds, x, y, hue, hue_style, add_guide, funcname=plotfunc.__name__ ) hue_style = meta_data["hue_style"] # handle facetgrids first if col or row: allargs = locals().copy() allargs["plotfunc"] = globals()[plotfunc.__name__] allargs["data"] = ds # remove kwargs to avoid passing the information twice for arg in ["meta_data", "kwargs", "ds"]: del allargs[arg] return _easy_facetgrid(kind="dataset", **allargs, **kwargs) figsize = kwargs.pop("figsize", None) ax = get_axis(figsize, size, aspect, ax) if hue_style == "continuous" and hue is not None: if _is_facetgrid: cbar_kwargs = meta_data["cbar_kwargs"] cmap_params = meta_data["cmap_params"] else: cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( plotfunc, ds[hue].values, **locals() ) # subset that can be passed to scatter, hist2d cmap_params_subset = { vv: cmap_params[vv] for vv in ["vmin", "vmax", "norm", "cmap"] } else: cmap_params_subset = {} if (u is not None or v is not None) and plotfunc.__name__ not in ( "quiver", "streamplot", ): raise ValueError("u, v are only allowed for quiver or streamplot plots.") primitive = plotfunc( ds=ds, x=x, y=y, ax=ax, u=u, v=v, hue=hue, hue_style=hue_style, cmap_params=cmap_params_subset, **kwargs, ) if _is_facetgrid: # if this was called from Facetgrid.map_dataset, return primitive # finish here. Else, make labels if meta_data.get("xlabel", None): ax.set_xlabel(meta_data.get("xlabel")) if meta_data.get("ylabel", None): ax.set_ylabel(meta_data.get("ylabel")) if meta_data["add_legend"]: ax.legend(handles=primitive, title=meta_data.get("hue_label", None)) if meta_data["add_colorbar"]: cbar_kwargs = {} if cbar_kwargs is None else cbar_kwargs if "label" not in cbar_kwargs: cbar_kwargs["label"] = meta_data.get("hue_label", None) _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params) if meta_data["add_quiverkey"]: magnitude = _get_nice_quiver_magnitude(ds[u], ds[v]) units = ds[u].attrs.get("units", "") ax.quiverkey( primitive, X=0.85, Y=0.9, U=magnitude, label=f"{magnitude}\n{units}", labelpos="E", coordinates="figure", ) if plotfunc.__name__ in ("quiver", "streamplot"): title = ds[u]._title_for_slice() else: title = ds[x]._title_for_slice() ax.set_title(title) return primitive # we want to actually expose the signature of newplotfunc # and not the copied **kwargs from the plotfunc which # functools.wraps adds, so delete the wrapped attr del newplotfunc.__wrapped__ return newplotfunc @overload def quiver( # type: ignore[misc,unused-ignore] # None is hashable :( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: None = None, # no wrap -> primitive row: None = None, # no wrap -> primitive ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> Quiver: ... @overload def quiver( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable, # wrap -> FacetGrid row: Hashable | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def quiver( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable | None = None, row: Hashable, # wrap -> FacetGrid ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @_dsplot def quiver( ds: Dataset, x: Hashable, y: Hashable, ax: Axes, u: Hashable, v: Hashable, **kwargs: Any, ) -> Quiver: """Quiver plot of Dataset variables. Wraps :py:func:`matplotlib:matplotlib.pyplot.quiver`. """ import matplotlib as mpl if x is None or y is None or u is None or v is None: raise ValueError("Must specify x, y, u, v for quiver plots.") dx, dy, du, dv = broadcast(ds[x], ds[y], ds[u], ds[v]) args = [dx.values, dy.values, du.values, dv.values] hue = kwargs.pop("hue") cmap_params = kwargs.pop("cmap_params") if hue: args.append(ds[hue].values) # TODO: Fix this by always returning a norm with vmin, vmax in cmap_params if not cmap_params["norm"]: cmap_params["norm"] = mpl.colors.Normalize( cmap_params.pop("vmin"), cmap_params.pop("vmax") ) kwargs.pop("hue_style") kwargs.setdefault("pivot", "middle") hdl = ax.quiver(*args, **kwargs, **cmap_params) return hdl @overload def streamplot( # type: ignore[misc,unused-ignore] # None is hashable :( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: None = None, # no wrap -> primitive row: None = None, # no wrap -> primitive ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> LineCollection: ... @overload def streamplot( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable, # wrap -> FacetGrid row: Hashable | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def streamplot( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable | None = None, row: Hashable, # wrap -> FacetGrid ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @_dsplot def streamplot( ds: Dataset, x: Hashable, y: Hashable, ax: Axes, u: Hashable, v: Hashable, **kwargs: Any, ) -> LineCollection: """Plot streamlines of Dataset variables. Wraps :py:func:`matplotlib:matplotlib.pyplot.streamplot`. """ import matplotlib as mpl if x is None or y is None or u is None or v is None: raise ValueError("Must specify x, y, u, v for streamplot plots.") # Matplotlib's streamplot has strong restrictions on what x and y can be, so need to # get arrays transposed the 'right' way around. 'x' cannot vary within 'rows', so # the dimension of x must be the second dimension. 'y' cannot vary with 'columns' so # the dimension of y must be the first dimension. If x and y are both 2d, assume the # user has got them right already. xdim = ds[x].dims[0] if len(ds[x].dims) == 1 else None ydim = ds[y].dims[0] if len(ds[y].dims) == 1 else None if xdim is not None and ydim is None: ydims = set(ds[y].dims) - {xdim} if len(ydims) == 1: ydim = next(iter(ydims)) if ydim is not None and xdim is None: xdims = set(ds[x].dims) - {ydim} if len(xdims) == 1: xdim = next(iter(xdims)) dx, dy, du, dv = broadcast(ds[x], ds[y], ds[u], ds[v]) if xdim is not None and ydim is not None: # Need to ensure the arrays are transposed correctly dx = dx.transpose(ydim, xdim) dy = dy.transpose(ydim, xdim) du = du.transpose(ydim, xdim) dv = dv.transpose(ydim, xdim) hue = kwargs.pop("hue") cmap_params = kwargs.pop("cmap_params") if hue: if xdim is not None and ydim is not None: ds[hue] = ds[hue].transpose(ydim, xdim) kwargs["color"] = ds[hue].values # TODO: Fix this by always returning a norm with vmin, vmax in cmap_params if not cmap_params["norm"]: cmap_params["norm"] = mpl.colors.Normalize( cmap_params.pop("vmin"), cmap_params.pop("vmax") ) kwargs.pop("hue_style") hdl = ax.streamplot( dx.values, dy.values, du.values, dv.values, **kwargs, **cmap_params ) # Return .lines so colorbar creation works properly return hdl.lines F = TypeVar("F", bound=Callable) def _update_doc_to_dataset(dataarray_plotfunc: Callable) -> Callable[[F], F]: """ Add a common docstring by reusing the DataArray one. TODO: Reduce code duplication. * The goal is to reduce code duplication by moving all Dataset specific plots to the DataArray side and use this thin wrapper to handle the conversion between Dataset and DataArray. * Improve docstring handling, maybe reword the DataArray versions to explain Datasets better. Parameters ---------- dataarray_plotfunc : Callable Function that returns a finished plot primitive. """ # Build on the original docstring da_doc = dataarray_plotfunc.__doc__ if da_doc is None: raise NotImplementedError("DataArray plot method requires a docstring") da_str = """ Parameters ---------- darray : DataArray """ ds_str = """ The `y` DataArray will be used as base, any other variables are added as coords. Parameters ---------- ds : Dataset """ # TODO: improve this? if da_str in da_doc: ds_doc = da_doc.replace(da_str, ds_str).replace("darray", "ds") else: ds_doc = da_doc @functools.wraps(dataarray_plotfunc) def wrapper(dataset_plotfunc: F) -> F: dataset_plotfunc.__doc__ = ds_doc return dataset_plotfunc return wrapper # type: ignore[return-value] def _normalize_args( plotmethod: str, args: tuple[Any, ...], kwargs: dict[str, Any] ) -> dict[str, Any]: from xarray.core.dataarray import DataArray # Determine positional arguments keyword by inspecting the # signature of the plotmethod: locals_ = dict( inspect.signature(getattr(DataArray().plot, plotmethod)) .bind(*args, **kwargs) .arguments.items() ) locals_.update(locals_.pop("kwargs", {})) return locals_ def _temp_dataarray(ds: Dataset, y: Hashable, locals_: dict[str, Any]) -> DataArray: """Create a temporary datarray with extra coords.""" from xarray.core.dataarray import DataArray coords = dict(ds[y].coords) dims = set(ds[y].dims) # Add extra coords to the DataArray from valid kwargs, if using all # kwargs there is a risk that we add unnecessary dataarrays as # coords straining RAM further for example: # ds.both and extend="both" would add ds.both to the coords: valid_coord_kwargs = {"x", "z", "markersize", "hue", "row", "col", "u", "v"} coord_kwargs = locals_.keys() & valid_coord_kwargs for k in coord_kwargs: key = locals_[k] darray = ds.get(key) if darray is not None: coords[key] = darray dims.update(darray.dims) # Trim dataset from unnecessary dims: ds_trimmed = ds.drop_dims(ds.sizes.keys() - dims) # TODO: Use ds.dims in the future # The dataarray has to include all the dims. Broadcast to that shape # and add the additional coords: _y = ds[y].broadcast_like(ds_trimmed) return DataArray(_y, coords=coords) @overload def scatter( # type: ignore[misc,unused-ignore] # None is hashable :( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> PathCollection: ... @overload def scatter( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def scatter( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @_update_doc_to_dataset(dataarray_plot.scatter) def scatter( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> PathCollection | FacetGrid[DataArray]: """Scatter plot Dataset data variables against each other.""" locals_ = locals() del locals_["ds"] locals_.update(locals_.pop("kwargs", {})) da = _temp_dataarray(ds, y, locals_) return da.plot.scatter(*locals_.pop("args", ()), **locals_) xarray-2025.12.0/xarray/plot/facetgrid.py000066400000000000000000001121341511464676000201330ustar00rootroot00000000000000from __future__ import annotations import functools import itertools import warnings from collections.abc import Callable, Hashable, Iterable, MutableMapping from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, cast import numpy as np from xarray.core.formatting import format_item from xarray.core.types import HueStyleOptions, T_DataArrayOrSet from xarray.plot.utils import ( _LINEWIDTH_RANGE, _MARKERSIZE_RANGE, _add_legend, _determine_guide, _get_nice_quiver_magnitude, _guess_coords_to_plot, _infer_xy_labels, _Normalize, _parse_size, _process_cmap_cbar_kwargs, label_from_attrs, ) if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.cm import ScalarMappable from matplotlib.colorbar import Colorbar from matplotlib.figure import Figure from matplotlib.legend import Legend from matplotlib.quiver import QuiverKey from matplotlib.text import Annotation from xarray.core.dataarray import DataArray # Overrides axes.labelsize, xtick.major.size, ytick.major.size # from mpl.rcParams _FONTSIZE = "small" # For major ticks on x, y axes _NTICKS = 5 def _nicetitle(coord, value, maxchar, template): """ Put coord, value in template and truncate at maxchar """ prettyvalue = format_item(value, quote_strings=False) title = template.format(coord=coord, value=prettyvalue) if len(title) > maxchar: title = title[: (maxchar - 3)] + "..." return title T_FacetGrid = TypeVar("T_FacetGrid", bound="FacetGrid") class FacetGrid(Generic[T_DataArrayOrSet]): """ Initialize the Matplotlib figure and FacetGrid object. The :class:`FacetGrid` is an object that links a xarray DataArray to a Matplotlib figure with a particular structure. In particular, :class:`FacetGrid` is used to draw plots with multiple axes, where each axes shows the same relationship conditioned on different levels of some dimension. It's possible to condition on up to two variables by assigning variables to the rows and columns of the grid. The general approach to plotting here is called "small multiples", where the same kind of plot is repeated multiple times, and the specific use of small multiples to display the same relationship conditioned on one or more other variables is often called a "trellis plot". The basic workflow is to initialize the :class:`FacetGrid` object with the DataArray and the variable names that are used to structure the grid. Then plotting functions can be applied to each subset by calling :meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`. Attributes ---------- axs : ndarray of matplotlib.axes.Axes Array containing axes in corresponding position, as returned from :py:func:`matplotlib.pyplot.subplots`. col_labels : list of matplotlib.text.Annotation Column titles. row_labels : list of matplotlib.text.Annotation Row titles. fig : matplotlib.figure.Figure The figure containing all the axes. name_dicts : ndarray of dict Array containing dictionaries mapping coordinate names to values. ``None`` is used as a sentinel value for axes that should remain empty, i.e., sometimes the rightmost grid positions in the bottom row. """ data: T_DataArrayOrSet name_dicts: np.ndarray fig: Figure axs: np.ndarray row_names: list[np.ndarray] col_names: list[np.ndarray] figlegend: Legend | None quiverkey: QuiverKey | None cbar: Colorbar | None _single_group: bool | Hashable _nrow: int _row_var: Hashable | None _ncol: int _col_var: Hashable | None _col_wrap: int | None row_labels: list[Annotation | None] col_labels: list[Annotation | None] _x_var: None _y_var: None _hue_var: DataArray | None _cmap_extend: Any | None _mappables: list[ScalarMappable] _finalized: bool def __init__( self, data: T_DataArrayOrSet, col: Hashable | None = None, row: Hashable | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, figsize: Iterable[float] | None = None, aspect: float = 1, size: float = 3, subplot_kws: dict[str, Any] | None = None, ) -> None: """ Parameters ---------- data : DataArray or Dataset DataArray or Dataset to be plotted. row, col : str Dimension names that define subsets of the data, which will be drawn on separate facets in the grid. col_wrap : int, optional "Wrap" the grid the for the column variable after this number of columns, adding rows if ``col_wrap`` is less than the number of facets. sharex : bool, optional If true, the facets will share *x* axes. sharey : bool, optional If true, the facets will share *y* axes. figsize : Iterable of float or None, optional A tuple (width, height) of the figure in inches. If set, overrides ``size`` and ``aspect``. aspect : scalar, default: 1 Aspect ratio of each facet, so that ``aspect * size`` gives the width of each facet in inches. size : scalar, default: 3 Height (in inches) of each facet. See also: ``aspect``. subplot_kws : dict, optional Dictionary of keyword arguments for Matplotlib subplots (:py:func:`matplotlib.pyplot.subplots`). """ import matplotlib.pyplot as plt # Handle corner case of nonunique coordinates rep_col = col is not None and not data[col].to_index().is_unique rep_row = row is not None and not data[row].to_index().is_unique if rep_col or rep_row: raise ValueError( "Coordinates used for faceting cannot " "contain repeated (nonunique) values." ) # single_group is the grouping variable, if there is exactly one single_group: bool | Hashable if col and row: single_group = False nrow = len(data[row]) ncol = len(data[col]) nfacet = nrow * ncol if col_wrap is not None: warnings.warn( "Ignoring col_wrap since both col and row were passed", stacklevel=2 ) elif row and not col: single_group = row elif not row and col: single_group = col else: raise ValueError("Pass a coordinate name as an argument for row or col") # Compute grid shape if single_group: nfacet = len(data[single_group]) if col: # idea - could add heuristic for nice shapes like 3x4 ncol = nfacet if row: ncol = 1 if col_wrap is not None: # Overrides previous settings ncol = col_wrap nrow = int(np.ceil(nfacet / ncol)) # Set the subplot kwargs subplot_kws = {} if subplot_kws is None else subplot_kws if figsize is None: # Calculate the base figure size with extra horizontal space for a # colorbar cbar_space = 1 figsize = (ncol * size * aspect + cbar_space, nrow * size) fig, axs = plt.subplots( nrow, ncol, sharex=sharex, sharey=sharey, squeeze=False, figsize=figsize, subplot_kw=subplot_kws, ) # Set up the lists of names for the row and column facet variables col_names = list(data[col].to_numpy()) if col else [] row_names = list(data[row].to_numpy()) if row else [] if single_group: full: list[dict[Hashable, Any] | None] = [ {single_group: x} for x in data[single_group].to_numpy() ] empty: list[dict[Hashable, Any] | None] = [ None for x in range(nrow * ncol - len(full)) ] name_dict_list = full + empty else: rowcols = itertools.product(row_names, col_names) name_dict_list = [{row: r, col: c} for r, c in rowcols] name_dicts = np.array(name_dict_list).reshape(nrow, ncol) # Set up the class attributes # --------------------------- # First the public API self.data = data self.name_dicts = name_dicts self.fig = fig self.axs = axs self.row_names = row_names self.col_names = col_names # guides self.figlegend = None self.quiverkey = None self.cbar = None # Next the private variables self._single_group = single_group self._nrow = nrow self._row_var = row self._ncol = ncol self._col_var = col self._col_wrap = col_wrap self.row_labels = [None] * nrow self.col_labels = [None] * ncol self._x_var = None self._y_var = None self._hue_var = None self._cmap_extend = None self._mappables = [] self._finalized = False @property def axes(self) -> np.ndarray: warnings.warn( ( "self.axes is deprecated since 2022.11 in order to align with " "matplotlibs plt.subplots, use self.axs instead." ), DeprecationWarning, stacklevel=2, ) return self.axs @axes.setter def axes(self, axs: np.ndarray) -> None: warnings.warn( ( "self.axes is deprecated since 2022.11 in order to align with " "matplotlibs plt.subplots, use self.axs instead." ), DeprecationWarning, stacklevel=2, ) self.axs = axs @property def _left_axes(self) -> np.ndarray: return self.axs[:, 0] @property def _bottom_axes(self) -> np.ndarray: return self.axs[-1, :] def map_dataarray( self: T_FacetGrid, func: Callable, x: Hashable | None, y: Hashable | None, **kwargs: Any, ) -> T_FacetGrid: """ Apply a plotting function to a 2d facet's subset of the data. This is more convenient and less general than ``FacetGrid.map`` Parameters ---------- func : callable A plotting function with the same signature as a 2d xarray plotting method such as `xarray.plot.imshow` x, y : string Names of the coordinates to plot on x, y axes **kwargs additional keyword arguments to func Returns ------- self : FacetGrid object """ if kwargs.get("cbar_ax") is not None: raise ValueError("cbar_ax not supported by FacetGrid.") cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( func, self.data.to_numpy(), **kwargs ) self._cmap_extend = cmap_params.get("extend") # Order is important func_kwargs = { k: v for k, v in kwargs.items() if k not in {"cmap", "colors", "cbar_kwargs", "levels"} } func_kwargs.update(cmap_params) # to avoid redundant calling, colorbar and labelling is instead handled # by `_finalize_grid` at the end func_kwargs["add_colorbar"] = False if func.__name__ != "surface": func_kwargs["add_labels"] = False # Get x, y labels for the first subplot x, y = _infer_xy_labels( darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, imshow=func.__name__ == "imshow", rgb=kwargs.get("rgb"), ) for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # None is the sentinel value if d is not None: subset = self.data.loc[d] mappable = func( subset, x=x, y=y, ax=ax, **func_kwargs, _is_facetgrid=True ) self._mappables.append(mappable) xlabel = label_from_attrs(self.data[x]) ylabel = label_from_attrs(self.data[y]) self._finalize_grid(xlabel, ylabel) if kwargs.get("add_colorbar", True): self.add_colorbar(**cbar_kwargs) return self def map_plot1d( self: T_FacetGrid, func: Callable, x: Hashable | None, y: Hashable | None, *, z: Hashable | None = None, hue: Hashable | None = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, **kwargs: Any, ) -> T_FacetGrid: """ Apply a plotting function to a 1d facet's subset of the data. This is more convenient and less general than ``FacetGrid.map`` Parameters ---------- func : A plotting function with the same signature as a 1d xarray plotting method such as `xarray.plot.scatter` x, y : Names of the coordinates to plot on x, y axes **kwargs additional keyword arguments to func Returns ------- self : FacetGrid object """ # Copy data to allow converting categoricals to integers and storing # them in self.data. It is not possible to copy in the init # unfortunately as there are tests that relies on self.data being # mutable (test_names_appear_somewhere()). Maybe something to deprecate # not sure how much that is used outside these tests. self.data = self.data.copy() if kwargs.get("cbar_ax") is not None: raise ValueError("cbar_ax not supported by FacetGrid.") if func.__name__ == "scatter": size_ = kwargs.pop("_size", markersize) size_r = _MARKERSIZE_RANGE else: size_ = kwargs.pop("_size", linewidth) size_r = _LINEWIDTH_RANGE # Guess what coords to use if some of the values in coords_to_plot are None: coords_to_plot: MutableMapping[str, Hashable | None] = dict( x=x, z=z, hue=hue, size=size_ ) coords_to_plot = _guess_coords_to_plot(self.data, coords_to_plot, kwargs) # Handle hues: hue = coords_to_plot["hue"] hueplt = self.data.coords[hue] if hue else None # TODO: _infer_line_data2 ? hueplt_norm = _Normalize(hueplt) self._hue_var = hueplt cbar_kwargs = kwargs.pop("cbar_kwargs", {}) if hueplt_norm.data is not None: if not hueplt_norm.data_is_numeric: # TODO: Ticks seems a little too hardcoded, since it will always # show all the values. But maybe it's ok, since plotting hundreds # of categorical data isn't that meaningful anyway. cbar_kwargs.update(format=hueplt_norm.format, ticks=hueplt_norm.ticks) kwargs.update(levels=hueplt_norm.levels) cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( func, cast("DataArray", hueplt_norm.values).data, cbar_kwargs=cbar_kwargs, **kwargs, ) self._cmap_extend = cmap_params.get("extend") else: cmap_params = {} # Handle sizes: size_ = coords_to_plot["size"] sizeplt = self.data.coords[size_] if size_ else None sizeplt_norm = _Normalize(data=sizeplt, width=size_r) if sizeplt_norm.data is not None: self.data[size_] = sizeplt_norm.values # Add kwargs that are sent to the plotting function, # order is important ??? func_kwargs = { k: v for k, v in kwargs.items() if k not in {"cmap", "colors", "cbar_kwargs", "levels"} } func_kwargs.update(cmap_params) # Annotations will be handled later, skip those parts in the plotfunc: func_kwargs["add_colorbar"] = False func_kwargs["add_legend"] = False func_kwargs["add_title"] = False add_labels_ = np.zeros(self.axs.shape + (3,), dtype=bool) if kwargs.get("z") is not None: # 3d plots looks better with all labels. 3d plots can't sharex either so it # is easy to get lost while rotating the plots: add_labels_[:] = True else: # Subplots should have labels on the left and bottom edges only: add_labels_[-1, :, 0] = True # x add_labels_[:, 0, 1] = True # y # add_labels_[:, :, 2] = True # z # Set up the lists of names for the row and column facet variables: if self._single_group: full = tuple( {self._single_group: x} for x in range(self.data[self._single_group].size) ) empty = tuple(None for x in range(self._nrow * self._ncol - len(full))) name_d = full + empty else: rowcols = itertools.product( range(self.data[self._row_var].size), range(self.data[self._col_var].size), ) name_d = tuple({self._row_var: r, self._col_var: c} for r, c in rowcols) name_dicts = np.array(name_d).reshape(self._nrow, self._ncol) # Plot the data for each subplot: for add_lbls, d, ax in zip( add_labels_.reshape((self.axs.size, -1)), name_dicts.flat, self.axs.flat, strict=True, ): func_kwargs["add_labels"] = add_lbls # None is the sentinel value if d is not None: subset = self.data.isel(d) mappable = func( subset, x=x, y=y, ax=ax, hue=hue, _size=size_, **func_kwargs, _is_facetgrid=True, ) self._mappables.append(mappable) # Add titles and some touch ups: self._finalize_grid() self._set_lims() add_colorbar, add_legend = _determine_guide( hueplt_norm, sizeplt_norm, kwargs.get("add_colorbar"), kwargs.get("add_legend"), # kwargs.get("add_guide", None), # kwargs.get("hue_style", None), ) if add_legend: use_legend_elements = func.__name__ != "hist" if use_legend_elements: self.add_legend( use_legend_elements=use_legend_elements, hueplt_norm=hueplt_norm if not add_colorbar else _Normalize(None), sizeplt_norm=sizeplt_norm, primitive=self._mappables, legend_ax=self.fig, plotfunc=func.__name__, ) else: self.add_legend(use_legend_elements=use_legend_elements) if add_colorbar: # Colorbar is after legend so it correctly fits the plot: if "label" not in cbar_kwargs: cbar_kwargs["label"] = label_from_attrs(hueplt_norm.data) self.add_colorbar(**cbar_kwargs) return self def map_dataarray_line( self: T_FacetGrid, func: Callable, x: Hashable | None, y: Hashable | None, hue: Hashable | None, add_legend: bool = True, _labels=None, **kwargs: Any, ) -> T_FacetGrid: from xarray.plot.dataarray_plot import _infer_line_data for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # None is the sentinel value if d is not None: subset = self.data.loc[d] mappable = func( subset, x=x, y=y, ax=ax, hue=hue, add_legend=False, _labels=False, **kwargs, ) self._mappables.append(mappable) xplt, yplt, hueplt, huelabel = _infer_line_data( darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, hue=hue ) xlabel = label_from_attrs(xplt) ylabel = label_from_attrs(yplt) self._hue_var = hueplt self._finalize_grid(xlabel, ylabel) if add_legend and hueplt is not None and huelabel is not None: self.add_legend(label=huelabel) return self def map_dataset( self: T_FacetGrid, func: Callable, x: Hashable | None = None, y: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, add_guide: bool | None = None, **kwargs: Any, ) -> T_FacetGrid: from xarray.plot.dataset_plot import _infer_meta_data kwargs["add_guide"] = False if kwargs.get("markersize"): kwargs["size_mapping"] = _parse_size( self.data[kwargs["markersize"]], kwargs.pop("size_norm", None) ) meta_data = _infer_meta_data( self.data, x, y, hue, hue_style, add_guide, funcname=func.__name__ ) kwargs["meta_data"] = meta_data if hue and meta_data["hue_style"] == "continuous": cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( func, self.data[hue].to_numpy(), **kwargs ) kwargs["meta_data"]["cmap_params"] = cmap_params kwargs["meta_data"]["cbar_kwargs"] = cbar_kwargs kwargs["_is_facetgrid"] = True if func.__name__ == "quiver" and "scale" not in kwargs: raise ValueError("Please provide scale.") # TODO: come up with an algorithm for reasonable scale choice for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # None is the sentinel value if d is not None: subset = self.data.loc[d] maybe_mappable = func( ds=subset, x=x, y=y, hue=hue, hue_style=hue_style, ax=ax, **kwargs ) # TODO: this is needed to get legends to work. # but maybe_mappable is a list in that case :/ self._mappables.append(maybe_mappable) self._finalize_grid(meta_data["xlabel"], meta_data["ylabel"]) if hue: hue_label = meta_data.pop("hue_label", None) self._hue_label = hue_label if meta_data["add_legend"]: self._hue_var = meta_data["hue"] self.add_legend(label=hue_label) elif meta_data["add_colorbar"]: self.add_colorbar(label=hue_label, **cbar_kwargs) if meta_data["add_quiverkey"]: self.add_quiverkey(kwargs["u"], kwargs["v"]) return self def _finalize_grid(self, *axlabels: Hashable) -> None: """Finalize the annotations and layout.""" if not self._finalized: self.set_axis_labels(*axlabels) self.set_titles() self.fig.tight_layout() for ax, namedict in zip(self.axs.flat, self.name_dicts.flat, strict=True): if namedict is None: ax.set_visible(False) self._finalized = True def _adjust_fig_for_guide(self, guide) -> None: # Draw the plot to set the bounding boxes correctly if hasattr(self.fig.canvas, "get_renderer"): renderer = self.fig.canvas.get_renderer() else: raise RuntimeError("MPL backend has no renderer") self.fig.draw(renderer) # Calculate and set the new width of the figure so the legend fits guide_width = guide.get_window_extent(renderer).width / self.fig.dpi figure_width = self.fig.get_figwidth() total_width = figure_width + guide_width self.fig.set_figwidth(total_width) # Draw the plot again to get the new transformations self.fig.draw(renderer) # Now calculate how much space we need on the right side guide_width = guide.get_window_extent(renderer).width / self.fig.dpi space_needed = guide_width / total_width + 0.02 # margin = .01 # _space_needed = margin + space_needed right = 1 - space_needed # Place the subplot axes to give space for the legend self.fig.subplots_adjust(right=right) def add_legend( self, *, label: str | None = None, use_legend_elements: bool = False, **kwargs: Any, ) -> None: if use_legend_elements: self.figlegend = _add_legend(**kwargs) else: assert self._hue_var is not None self.figlegend = self.fig.legend( handles=self._mappables[-1], labels=list(self._hue_var.to_numpy()), title=label if label is not None else label_from_attrs(self._hue_var), loc=kwargs.pop("loc", "center right"), **kwargs, ) self._adjust_fig_for_guide(self.figlegend) def add_colorbar(self, **kwargs: Any) -> None: """Draw a colorbar.""" kwargs = kwargs.copy() if self._cmap_extend is not None: kwargs.setdefault("extend", self._cmap_extend) # dont pass extend as kwarg if it is in the mappable if hasattr(self._mappables[-1], "extend"): kwargs.pop("extend", None) if "label" not in kwargs: from xarray import DataArray assert isinstance(self.data, DataArray) kwargs.setdefault("label", label_from_attrs(self.data)) self.cbar = self.fig.colorbar( self._mappables[-1], ax=list(self.axs.flat), **kwargs ) def add_quiverkey(self, u: Hashable, v: Hashable, **kwargs: Any) -> None: kwargs = kwargs.copy() magnitude = _get_nice_quiver_magnitude(self.data[u], self.data[v]) units = self.data[u].attrs.get("units", "") self.quiverkey = self.axs.flat[-1].quiverkey( self._mappables[-1], X=0.8, Y=0.9, U=magnitude, label=f"{magnitude}\n{units}", labelpos="E", coordinates="figure", ) # TODO: does not work because self.quiverkey.get_window_extent(renderer) = 0 # https://github.com/matplotlib/matplotlib/issues/18530 # self._adjust_fig_for_guide(self.quiverkey.text) def _get_largest_lims(self) -> dict[str, tuple[float, float]]: """ Get largest limits in the facetgrid. Returns ------- lims_largest : dict[str, tuple[float, float]] Dictionary with the largest limits along each axis. Examples -------- >>> ds = xr.tutorial.scatter_example_dataset(seed=42) >>> fg = ds.plot.scatter(x="A", y="B", hue="y", row="x", col="w") >>> round(fg._get_largest_lims()["x"][0], 3) np.float64(-0.334) """ lims_largest: dict[str, tuple[float, float]] = dict( x=(np.inf, -np.inf), y=(np.inf, -np.inf), z=(np.inf, -np.inf) ) for axis in ("x", "y", "z"): # Find the plot with the largest xlim values: lower, upper = lims_largest[axis] for ax in self.axs.flat: get_lim: Callable[[], tuple[float, float]] | None = getattr( ax, f"get_{axis}lim", None ) if get_lim: lower_new, upper_new = get_lim() lower, upper = (min(lower, lower_new), max(upper, upper_new)) lims_largest[axis] = (lower, upper) return lims_largest def _set_lims( self, x: tuple[float, float] | None = None, y: tuple[float, float] | None = None, z: tuple[float, float] | None = None, ) -> None: """ Set the same limits for all the subplots in the facetgrid. Parameters ---------- x : tuple[float, float] or None, optional x axis limits. y : tuple[float, float] or None, optional y axis limits. z : tuple[float, float] or None, optional z axis limits. Examples -------- >>> ds = xr.tutorial.scatter_example_dataset(seed=42) >>> fg = ds.plot.scatter(x="A", y="B", hue="y", row="x", col="w") >>> fg._set_lims(x=(-0.3, 0.3), y=(0, 2), z=(0, 4)) >>> fg.axs[0, 0].get_xlim(), fg.axs[0, 0].get_ylim() ((np.float64(-0.3), np.float64(0.3)), (np.float64(0.0), np.float64(2.0))) """ lims_largest = self._get_largest_lims() # Set limits: for ax in self.axs.flat: for (axis, data_limit), parameter_limit in zip( lims_largest.items(), (x, y, z), strict=True ): set_lim = getattr(ax, f"set_{axis}lim", None) if set_lim: set_lim(data_limit if parameter_limit is None else parameter_limit) def set_axis_labels(self, *axlabels: Hashable) -> None: """Set axis labels on the left column and bottom row of the grid.""" from xarray.core.dataarray import DataArray for var, axis in zip(axlabels, ["x", "y", "z"], strict=False): if var is not None: if isinstance(var, DataArray): getattr(self, f"set_{axis}labels")(label_from_attrs(var)) else: getattr(self, f"set_{axis}labels")(str(var)) def _set_labels( self, axis: str, axes: Iterable, label: str | None = None, **kwargs ) -> None: if label is None: label = label_from_attrs(self.data[getattr(self, f"_{axis}_var")]) for ax in axes: getattr(ax, f"set_{axis}label")(label, **kwargs) def set_xlabels(self, label: str | None = None, **kwargs: Any) -> None: """Label the x axis on the bottom row of the grid.""" self._set_labels("x", self._bottom_axes, label, **kwargs) def set_ylabels(self, label: str | None = None, **kwargs: Any) -> None: """Label the y axis on the left column of the grid.""" self._set_labels("y", self._left_axes, label, **kwargs) def set_zlabels(self, label: str | None = None, **kwargs: Any) -> None: """Label the z axis.""" self._set_labels("z", self._left_axes, label, **kwargs) def set_titles( self, template: str = "{coord} = {value}", maxchar: int = 30, size=None, **kwargs, ) -> None: """ Draw titles either above each facet or on the grid margins. Parameters ---------- template : str, default: "{coord} = {value}" Template for plot titles containing {coord} and {value} maxchar : int, default: 30 Truncate titles at maxchar **kwargs : keyword args additional arguments to matplotlib.text Returns ------- self: FacetGrid object """ import matplotlib as mpl if size is None: size = mpl.rcParams["axes.labelsize"] nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template) if self._single_group: for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # Only label the ones with data if d is not None: coord, value = list(d.items()).pop() title = nicetitle(coord, value) ax.set_title(title, size=size, **kwargs) else: # The row titles on the right edge of the grid for index, (ax, row_name, handle) in enumerate( zip(self.axs[:, -1], self.row_names, self.row_labels, strict=True) ): title = nicetitle(coord=self._row_var, value=row_name) if not handle: self.row_labels[index] = ax.annotate( title, xy=(1.02, 0.5), xycoords="axes fraction", rotation=270, ha="left", va="center", **kwargs, ) else: handle.set_text(title) handle.update(kwargs) # The column titles on the top row for index, (ax, col_name, handle) in enumerate( zip(self.axs[0, :], self.col_names, self.col_labels, strict=True) ): title = nicetitle(coord=self._col_var, value=col_name) if not handle: self.col_labels[index] = ax.set_title(title, size=size, **kwargs) else: handle.set_text(title) handle.update(kwargs) def set_ticks( self, max_xticks: int = _NTICKS, max_yticks: int = _NTICKS, fontsize: str | int = _FONTSIZE, ) -> None: """ Set and control tick behavior. Parameters ---------- max_xticks, max_yticks : int, optional Maximum number of labeled ticks to plot on x, y axes fontsize : string or int Font size as used by matplotlib text Returns ------- self : FacetGrid object """ from matplotlib.ticker import MaxNLocator # Both are necessary x_major_locator = MaxNLocator(nbins=max_xticks) y_major_locator = MaxNLocator(nbins=max_yticks) for ax in self.axs.flat: ax.xaxis.set_major_locator(x_major_locator) ax.yaxis.set_major_locator(y_major_locator) for tick in itertools.chain( ax.xaxis.get_major_ticks(), ax.yaxis.get_major_ticks() ): tick.label1.set_fontsize(fontsize) def map( self: T_FacetGrid, func: Callable, *args: Hashable, **kwargs: Any ) -> T_FacetGrid: """ Apply a plotting function to each facet's subset of the data. Parameters ---------- func : callable A plotting function that takes data and keyword arguments. It must plot to the currently active matplotlib Axes and take a `color` keyword argument. If faceting on the `hue` dimension, it must also take a `label` keyword argument. *args : Hashable Column names in self.data that identify variables with data to plot. The data for each variable is passed to `func` in the order the variables are specified in the call. **kwargs : keyword arguments All keyword arguments are passed to the plotting function. Returns ------- self : FacetGrid object """ import matplotlib.pyplot as plt for ax, namedict in zip(self.axs.flat, self.name_dicts.flat, strict=True): if namedict is not None: data = self.data.loc[namedict] plt.sca(ax) innerargs = [data[a].to_numpy() for a in args] maybe_mappable = func(*innerargs, **kwargs) # TODO: better way to verify that an artist is mappable? # https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522 if maybe_mappable and hasattr(maybe_mappable, "autoscale_None"): self._mappables.append(maybe_mappable) self._finalize_grid(*args[:2]) return self def _easy_facetgrid( data: T_DataArrayOrSet, plotfunc: Callable, kind: Literal["line", "dataarray", "dataset", "plot1d"], x: Hashable | None = None, y: Hashable | None = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: float | None = None, size: float | None = None, subplot_kws: dict[str, Any] | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArrayOrSet]: """ Convenience method to call xarray.plot.FacetGrid from 2d plotting methods kwargs are the arguments to 2d plotting method """ if ax is not None: raise ValueError("Can't use axes when making faceted plots.") if aspect is None: aspect = 1 if size is None: size = 3 elif figsize is not None: raise ValueError("cannot provide both `figsize` and `size` arguments") if kwargs.get("z") is not None: # 3d plots doesn't support sharex, sharey, reset to mpl defaults: sharex = False sharey = False g = FacetGrid( data=data, col=col, row=row, col_wrap=col_wrap, sharex=sharex, sharey=sharey, figsize=figsize, aspect=aspect, size=size, subplot_kws=subplot_kws, ) if kind == "line": return g.map_dataarray_line(plotfunc, x, y, **kwargs) if kind == "dataarray": return g.map_dataarray(plotfunc, x, y, **kwargs) if kind == "plot1d": return g.map_plot1d(plotfunc, x, y, **kwargs) if kind == "dataset": return g.map_dataset(plotfunc, x, y, **kwargs) raise ValueError( f"kind must be one of `line`, `dataarray`, `dataset` or `plot1d`, got {kind}" ) xarray-2025.12.0/xarray/plot/utils.py000066400000000000000000001660771511464676000173620ustar00rootroot00000000000000from __future__ import annotations import itertools import textwrap import warnings from collections.abc import ( Callable, Hashable, Iterable, Mapping, MutableMapping, Sequence, ) from datetime import date, datetime from inspect import getfullargspec from typing import TYPE_CHECKING, Any, Literal, cast, overload import numpy as np import pandas as pd from xarray.core.indexes import PandasMultiIndex from xarray.core.options import OPTIONS from xarray.core.utils import ( attempt_import, is_scalar, module_available, ) from xarray.namedarray.pycompat import DuckArrayModule nc_time_axis_available = module_available("nc_time_axis") try: import cftime except ImportError: cftime = None if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.colors import Normalize from matplotlib.ticker import FuncFormatter from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import AspectOptions, ScaleOptions try: import matplotlib.pyplot as plt except ImportError: plt: Any = None # type: ignore[no-redef] ROBUST_PERCENTILE = 2.0 # copied from seaborn _MARKERSIZE_RANGE = (18.0, 36.0, 72.0) _LINEWIDTH_RANGE = (1.5, 1.5, 6.0) def _determine_extend(calc_data, vmin, vmax): extend_min = calc_data.min() < vmin extend_max = calc_data.max() > vmax if extend_min and extend_max: return "both" elif extend_min: return "min" elif extend_max: return "max" else: return "neither" def _build_discrete_cmap(cmap, levels, extend, filled): """ Build a discrete colormap and normalization of the data. """ import matplotlib as mpl if len(levels) == 1: levels = [levels[0], levels[0]] if not filled: # non-filled contour plots extend = "max" if extend == "both": ext_n = 2 elif extend in ["min", "max"]: ext_n = 1 else: ext_n = 0 n_colors = len(levels) + ext_n - 1 pal = _color_palette(cmap, n_colors) new_cmap, cnorm = mpl.colors.from_levels_and_colors(levels, pal, extend=extend) # copy the old cmap name, for easier testing new_cmap.name = getattr(cmap, "name", cmap) # copy colors to use for bad, under, and over values in case they have been # set to non-default values if isinstance(cmap, mpl.colors.Colormap): bad = cmap(np.nan) # Only update under and over if they were explicitly changed by the user # (i.e. are different from the lowest or highest values in cmap). Otherwise # leave unchanged so new_cmap uses its default values (its own lowest and # highest values). under = cmap(-np.inf) if under == cmap(0): under = None over = cmap(np.inf) if over == cmap(cmap.N - 1): over = None new_cmap = new_cmap.with_extremes(bad=bad, under=under, over=over) return new_cmap, cnorm def _color_palette(cmap, n_colors): import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap colors_i = np.linspace(0, 1.0, n_colors) if isinstance(cmap, list | tuple): # expand or truncate the list of colors to n_colors cmap = list(itertools.islice(itertools.cycle(cmap), n_colors)) cmap = ListedColormap(cmap) pal = cmap(colors_i) elif isinstance(cmap, str): # we have some sort of named palette try: # is this a matplotlib cmap? cmap = plt.get_cmap(cmap) pal = cmap(colors_i) except ValueError: # ValueError happens when mpl doesn't like a colormap, try seaborn try: from seaborn import color_palette pal = color_palette(cmap, n_colors=n_colors) except (ValueError, ImportError): # or maybe we just got a single color as a string cmap = ListedColormap([cmap] * n_colors) pal = cmap(colors_i) else: # cmap better be a LinearSegmentedColormap (e.g. viridis) pal = cmap(colors_i) return pal # _determine_cmap_params is adapted from Seaborn: # https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158 # Used under the terms of Seaborn's license, see licenses/SEABORN_LICENSE. def _determine_cmap_params( plot_data, vmin=None, vmax=None, cmap=None, center=None, robust=False, extend=None, levels=None, filled=True, norm=None, _is_facetgrid=False, ): """ Use some heuristics to set good defaults for colorbar and range. Parameters ---------- plot_data : Numpy array Doesn't handle xarray objects Returns ------- cmap_params : dict Use depends on the type of the plotting function """ if TYPE_CHECKING: import matplotlib as mpl else: mpl = attempt_import("matplotlib") if isinstance(levels, Iterable): levels = sorted(levels) calc_data = np.ravel(plot_data[np.isfinite(plot_data)]) # Handle all-NaN input data gracefully if calc_data.size == 0: # Arbitrary default for when all values are NaN calc_data = np.array(0.0) # Setting center=False prevents a divergent cmap possibly_divergent = center is not False # Set center to 0 so math below makes sense but remember its state center_is_none = False if center is None: center = 0 center_is_none = True # Setting both vmin and vmax prevents a divergent cmap if (vmin is not None) and (vmax is not None): possibly_divergent = False # Setting vmin or vmax implies linspaced levels user_minmax = (vmin is not None) or (vmax is not None) # vlim might be computed below vlim = None # save state; needed later vmin_was_none = vmin is None vmax_was_none = vmax is None if vmin is None: if robust: vmin = np.percentile(calc_data, ROBUST_PERCENTILE) else: vmin = calc_data.min() elif possibly_divergent: vlim = abs(vmin - center) if vmax is None: if robust: vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE) else: vmax = calc_data.max() elif possibly_divergent: vlim = abs(vmax - center) if possibly_divergent: levels_are_divergent = ( isinstance(levels, Iterable) and levels[0] * levels[-1] < 0 ) # kwargs not specific about divergent or not: infer defaults from data divergent = (vmin < 0 < vmax) or not center_is_none or levels_are_divergent else: divergent = False # A divergent map should be symmetric around the center value if divergent: if vlim is None: vlim = max(abs(vmin - center), abs(vmax - center)) vmin, vmax = -vlim, vlim # Now add in the centering value and set the limits vmin += center vmax += center # now check norm and harmonize with vmin, vmax if norm is not None: if norm.vmin is None: norm.vmin = vmin else: if not vmin_was_none and vmin != norm.vmin: raise ValueError("Cannot supply vmin and a norm with a different vmin.") vmin = norm.vmin if norm.vmax is None: norm.vmax = vmax else: if not vmax_was_none and vmax != norm.vmax: raise ValueError("Cannot supply vmax and a norm with a different vmax.") vmax = norm.vmax # if BoundaryNorm, then set levels if isinstance(norm, mpl.colors.BoundaryNorm): levels = norm.boundaries # Choose default colormaps if not provided if cmap is None: if divergent: cmap = OPTIONS["cmap_divergent"] else: cmap = OPTIONS["cmap_sequential"] # Handle discrete levels if levels is not None: if is_scalar(levels): if user_minmax: levels = np.linspace(vmin, vmax, levels) elif levels == 1: levels = np.asarray([(vmin + vmax) / 2]) else: # N in MaxNLocator refers to bins, not ticks ticker = mpl.ticker.MaxNLocator(levels - 1) levels = ticker.tick_values(vmin, vmax) vmin, vmax = levels[0], levels[-1] # GH3734 if vmin == vmax: vmin, vmax = mpl.ticker.LinearLocator(2).tick_values(vmin, vmax) if extend is None: extend = _determine_extend(calc_data, vmin, vmax) if (levels is not None) and (not isinstance(norm, mpl.colors.BoundaryNorm)): cmap, newnorm = _build_discrete_cmap(cmap, levels, extend, filled) norm = newnorm if norm is None else norm # vmin & vmax needs to be None if norm is passed # TODO: always return a norm with vmin and vmax if norm is not None: vmin = None vmax = None return dict( vmin=vmin, vmax=vmax, cmap=cmap, extend=extend, levels=levels, norm=norm ) def _infer_xy_labels_3d( darray: DataArray | Dataset, x: Hashable | None, y: Hashable | None, rgb: Hashable | None, ) -> tuple[Hashable, Hashable]: """ Determine x and y labels for showing RGB images. Attempts to infer which dimension is RGB/RGBA by size and order of dims. """ assert rgb is None or rgb != x assert rgb is None or rgb != y # Start by detecting and reporting invalid combinations of arguments assert darray.ndim == 3 not_none = [a for a in (x, y, rgb) if a is not None] if len(set(not_none)) < len(not_none): raise ValueError( "Dimension names must be None or unique strings, but imshow was " f"passed x={x!r}, y={y!r}, and rgb={rgb!r}." ) for label in not_none: if label not in darray.dims: raise ValueError(f"{label!r} is not a dimension") # Then calculate rgb dimension if certain and check validity could_be_color = [ label for label in darray.dims if darray[label].size in (3, 4) and label not in (x, y) ] if rgb is None and not could_be_color: raise ValueError( "A 3-dimensional array was passed to imshow(), but there is no " "dimension that could be color. At least one dimension must be " "of size 3 (RGB) or 4 (RGBA), and not given as x or y." ) if rgb is None and len(could_be_color) == 1: rgb = could_be_color[0] if rgb is not None and darray[rgb].size not in (3, 4): raise ValueError( f"Cannot interpret dim {rgb!r} of size {darray[rgb].size} as RGB or RGBA." ) # If rgb dimension is still unknown, there must be two or three dimensions # in could_be_color. We therefore warn, and use a heuristic to break ties. if rgb is None: assert len(could_be_color) in (2, 3) rgb = could_be_color[-1] warnings.warn( "Several dimensions of this array could be colors. Xarray " f"will use the last possible dimension ({rgb!r}) to match " "matplotlib.pyplot.imshow. You can pass names of x, y, " "and/or rgb dimensions to override this guess.", stacklevel=2, ) assert rgb is not None # Finally, we pick out the red slice and delegate to the 2D version: return _infer_xy_labels(darray.isel({rgb: 0}), x, y) def _infer_xy_labels( darray: DataArray | Dataset, x: Hashable | None, y: Hashable | None, imshow: bool = False, rgb: Hashable | None = None, ) -> tuple[Hashable, Hashable]: """ Determine x and y labels. For use in _plot2d darray must be a 2 dimensional data array, or 3d for imshow only. """ if (x is not None) and (x == y): raise ValueError("x and y cannot be equal.") if imshow and darray.ndim == 3: return _infer_xy_labels_3d(darray, x, y, rgb) if x is None and y is None: if darray.ndim != 2: raise ValueError("DataArray must be 2d") y, x = darray.dims elif x is None: _assert_valid_xy(darray, y, "y") x = darray.dims[0] if y == darray.dims[1] else darray.dims[1] elif y is None: _assert_valid_xy(darray, x, "x") y = darray.dims[0] if x == darray.dims[1] else darray.dims[1] else: _assert_valid_xy(darray, x, "x") _assert_valid_xy(darray, y, "y") if darray._indexes.get(x, 1) is darray._indexes.get(y, 2) and isinstance( darray._indexes[x], PandasMultiIndex ): raise ValueError("x and y cannot be levels of the same MultiIndex") return x, y # TODO: Can by used to more than x or y, rename? def _assert_valid_xy( darray: DataArray | Dataset, xy: Hashable | None, name: str ) -> None: """ make sure x and y passed to plotting functions are valid """ # MultiIndex cannot be plotted; no point in allowing them here multiindex_dims = { idx.dim for idx in darray.xindexes.get_unique() if isinstance(idx, PandasMultiIndex) } valid_xy = (set(darray.dims) | set(darray.coords)) - multiindex_dims if (xy is not None) and (xy not in valid_xy): valid_xy_str = "', '".join(sorted(str(v) for v in valid_xy)) raise ValueError( f"{name} must be one of None, '{valid_xy_str}'. Received '{xy}' instead." ) def get_axis( figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, **subplot_kws: Any, ) -> Axes: if TYPE_CHECKING: import matplotlib as mpl import matplotlib.pyplot as plt else: mpl = attempt_import("matplotlib") plt = attempt_import("matplotlib.pyplot") if figsize is not None: if ax is not None: raise ValueError("cannot provide both `figsize` and `ax` arguments") if size is not None: raise ValueError("cannot provide both `figsize` and `size` arguments") _, ax = plt.subplots(figsize=figsize, subplot_kw=subplot_kws) return ax if size is not None: if ax is not None: raise ValueError("cannot provide both `size` and `ax` arguments") if aspect is None or aspect == "auto": width, height = mpl.rcParams["figure.figsize"] faspect = width / height elif aspect == "equal": faspect = 1 else: faspect = aspect figsize = (size * faspect, size) _, ax = plt.subplots(figsize=figsize, subplot_kw=subplot_kws) return ax if aspect is not None: raise ValueError("cannot provide `aspect` argument without `size`") if subplot_kws and ax is not None: raise ValueError("cannot use subplot_kws with existing ax") if ax is None: ax = _maybe_gca(**subplot_kws) return ax def _maybe_gca(**subplot_kws: Any) -> Axes: import matplotlib.pyplot as plt # can call gcf unconditionally: either it exists or would be created by plt.axes f = plt.gcf() # only call gca if an active axes exists if f.axes: # can not pass kwargs to active axes return plt.gca() return plt.axes(**subplot_kws) def _get_units_from_attrs(da: DataArray) -> str: """Extracts and formats the unit/units from their attributes.""" pint_array_type = DuckArrayModule("pint").type units = " [{}]" if isinstance(da.data, pint_array_type): return units.format(str(da.data.units)) if "units" in da.attrs: return units.format(da.attrs["units"]) if "unit" in da.attrs: return units.format(da.attrs["unit"]) return "" def label_from_attrs(da: DataArray | None, extra: str = "") -> str: """Makes informative labels if variable metadata (attrs) follows CF conventions.""" if da is None: return "" name: str = "{}" if "long_name" in da.attrs: name = name.format(da.attrs["long_name"]) elif "standard_name" in da.attrs: name = name.format(da.attrs["standard_name"]) elif da.name is not None: name = name.format(da.name) else: name = "" units = _get_units_from_attrs(da) # Treat `name` differently if it's a latex sequence if name.startswith("$") and (name.count("$") % 2 == 0): return "$\n$".join( textwrap.wrap(name + extra + units, 60, break_long_words=False) ) else: return "\n".join(textwrap.wrap(name + extra + units, 30)) def _interval_to_mid_points(array: Iterable[pd.Interval]) -> np.ndarray: """ Helper function which returns an array with the Intervals' mid points. """ return np.array([x.mid for x in array]) def _interval_to_bound_points(array: Sequence[pd.Interval]) -> np.ndarray: """ Helper function which returns an array with the Intervals' boundaries. """ array_boundaries = np.array([x.left for x in array]) array_boundaries = np.concatenate((array_boundaries, np.array([array[-1].right]))) return array_boundaries def _interval_to_double_bound_points( xarray: Iterable[pd.Interval], yarray: Iterable ) -> tuple[np.ndarray, np.ndarray]: """ Helper function to deal with a xarray consisting of pd.Intervals. Each interval is replaced with both boundaries. I.e. the length of xarray doubles. yarray is modified so it matches the new shape of xarray. """ xarray1 = np.array([x.left for x in xarray]) xarray2 = np.array([x.right for x in xarray]) xarray_out = np.array( list(itertools.chain.from_iterable(zip(xarray1, xarray2, strict=True))) ) yarray_out = np.array( list(itertools.chain.from_iterable(zip(yarray, yarray, strict=True))) ) return xarray_out, yarray_out def _resolve_intervals_1dplot( xval: np.ndarray, yval: np.ndarray, kwargs: dict ) -> tuple[np.ndarray, np.ndarray, str, str, dict]: """ Helper function to replace the values of x and/or y coordinate arrays containing pd.Interval with their mid-points or - for step plots - double points which double the length. """ x_suffix = "" y_suffix = "" # Is it a step plot? (see matplotlib.Axes.step) if kwargs.get("drawstyle", "").startswith("steps-"): remove_drawstyle = False # Convert intervals to double points x_is_interval = _valid_other_type(xval, pd.Interval) y_is_interval = _valid_other_type(yval, pd.Interval) if x_is_interval and y_is_interval: raise TypeError("Can't step plot intervals against intervals.") elif x_is_interval: xval, yval = _interval_to_double_bound_points(xval, yval) remove_drawstyle = True elif y_is_interval: yval, xval = _interval_to_double_bound_points(yval, xval) remove_drawstyle = True # Remove steps-* to be sure that matplotlib is not confused if remove_drawstyle: del kwargs["drawstyle"] # Is it another kind of plot? else: # Convert intervals to mid points and adjust labels if _valid_other_type(xval, pd.Interval): xval = _interval_to_mid_points(xval) x_suffix = "_center" if _valid_other_type(yval, pd.Interval): yval = _interval_to_mid_points(yval) y_suffix = "_center" # return converted arguments return xval, yval, x_suffix, y_suffix, kwargs def _resolve_intervals_2dplot(val, func_name): """ Helper function to replace the values of a coordinate array containing pd.Interval with their mid-points or - for pcolormesh - boundaries which increases length by 1. """ label_extra = "" if _valid_other_type(val, pd.Interval): if func_name == "pcolormesh": val = _interval_to_bound_points(val) else: val = _interval_to_mid_points(val) label_extra = "_center" return val, label_extra def _valid_other_type( x: ArrayLike, types: type[object] | tuple[type[object], ...] ) -> bool: """ Do all elements of x have a type from types? """ return all(isinstance(el, types) for el in np.ravel(x)) def _valid_numpy_subdtype(x, numpy_types): """ Is any dtype from numpy_types superior to the dtype of x? """ # If any of the types given in numpy_types is understood as numpy.generic, # all possible x will be considered valid. This is probably unwanted. for t in numpy_types: assert not np.issubdtype(np.generic, t) return any(np.issubdtype(x.dtype, t) for t in numpy_types) def _ensure_plottable(*args) -> None: """ Raise exception if there is anything in args that can't be plotted on an axis by matplotlib. """ numpy_types: tuple[type[object], ...] = ( np.floating, np.integer, np.timedelta64, np.datetime64, np.bool_, np.str_, ) other_types: tuple[type[object], ...] = (datetime, date) cftime_datetime_types: tuple[type[object], ...] = ( () if cftime is None else (cftime.datetime,) ) other_types += cftime_datetime_types for x in args: if not ( _valid_numpy_subdtype(np.asarray(x), numpy_types) or _valid_other_type(np.asarray(x), other_types) ): raise TypeError( "Plotting requires coordinates to be numeric, boolean, " "or dates of type numpy.datetime64, " "datetime.datetime, cftime.datetime or " f"pandas.Interval. Received data of type {np.asarray(x).dtype} instead." ) if _valid_other_type(np.asarray(x), cftime_datetime_types): if nc_time_axis_available: # Register cftime datetypes to matplotlib.units.registry, # otherwise matplotlib will raise an error: import nc_time_axis # noqa: F401 else: raise ImportError( "Plotting of arrays of cftime.datetime " "objects or arrays indexed by " "cftime.datetime objects requires the " "optional `nc-time-axis` (v1.2.0 or later) " "package." ) def _is_numeric(arr): numpy_types = [np.floating, np.integer] return _valid_numpy_subdtype(arr, numpy_types) def _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params): cbar_kwargs.setdefault("extend", cmap_params["extend"]) if cbar_ax is None: cbar_kwargs.setdefault("ax", ax) else: cbar_kwargs.setdefault("cax", cbar_ax) # dont pass extend as kwarg if it is in the mappable if hasattr(primitive, "extend"): cbar_kwargs.pop("extend") fig = ax.get_figure() cbar = fig.colorbar(primitive, **cbar_kwargs) return cbar def _rescale_imshow_rgb(darray, vmin, vmax, robust): assert robust or vmin is not None or vmax is not None # Calculate vmin and vmax automatically for `robust=True` if robust: if vmax is None: vmax = np.nanpercentile(darray, 100 - ROBUST_PERCENTILE) if vmin is None: vmin = np.nanpercentile(darray, ROBUST_PERCENTILE) # If not robust and one bound is None, calculate the default other bound # and check that an interval between them exists. elif vmax is None: vmax = 255 if np.issubdtype(darray.dtype, np.integer) else 1 if vmax < vmin: raise ValueError( f"vmin={vmin!r} is less than the default vmax ({vmax!r}) - you must supply " "a vmax > vmin in this case." ) elif vmin is None: vmin = 0 if vmin > vmax: raise ValueError( f"vmax={vmax!r} is less than the default vmin (0) - you must supply " "a vmin < vmax in this case." ) # Scale interval [vmin .. vmax] to [0 .. 1], with darray as 64-bit float # to avoid precision loss, integer over/underflow, etc with extreme inputs. # After scaling, downcast to 32-bit float. This substantially reduces # memory usage after we hand `darray` off to matplotlib. darray = ((darray.astype("f8") - vmin) / (vmax - vmin)).astype("f4") return np.minimum(np.maximum(darray, 0), 1) def _update_axes( ax: Axes, xincrease: bool | None, yincrease: bool | None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, ) -> None: """ Update axes with provided parameters """ if xincrease is None: pass elif (xincrease and ax.xaxis_inverted()) or ( not xincrease and not ax.xaxis_inverted() ): ax.invert_xaxis() if yincrease is None: pass elif (yincrease and ax.yaxis_inverted()) or ( not yincrease and not ax.yaxis_inverted() ): ax.invert_yaxis() # The default xscale, yscale needs to be None. # If we set a scale it resets the axes formatters, # This means that set_xscale('linear') on a datetime axis # will remove the date labels. So only set the scale when explicitly # asked to. https://github.com/matplotlib/matplotlib/issues/8740 if xscale is not None: ax.set_xscale(xscale) if yscale is not None: ax.set_yscale(yscale) if xticks is not None: ax.set_xticks(xticks) if yticks is not None: ax.set_yticks(yticks) if xlim is not None: ax.set_xlim(xlim) if ylim is not None: ax.set_ylim(ylim) def _is_monotonic(coord, axis=0): """ >>> _is_monotonic(np.array([0, 1, 2])) np.True_ >>> _is_monotonic(np.array([2, 1, 0])) np.True_ >>> _is_monotonic(np.array([0, 2, 1])) np.False_ """ if coord.shape[axis] < 3: return True else: n = coord.shape[axis] delta_pos = coord.take(np.arange(1, n), axis=axis) >= coord.take( np.arange(0, n - 1), axis=axis ) delta_neg = coord.take(np.arange(1, n), axis=axis) <= coord.take( np.arange(0, n - 1), axis=axis ) return np.all(delta_pos) or np.all(delta_neg) def _infer_interval_breaks(coord, axis=0, scale=None, check_monotonic=False): """ >>> _infer_interval_breaks(np.arange(5)) array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5]) >>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1) array([[-0.5, 0.5, 1.5], [ 2.5, 3.5, 4.5]]) >>> _infer_interval_breaks(np.logspace(-2, 2, 5), scale="log") array([3.16227766e-03, 3.16227766e-02, 3.16227766e-01, 3.16227766e+00, 3.16227766e+01, 3.16227766e+02]) """ coord = np.asarray(coord) if check_monotonic and not _is_monotonic(coord, axis=axis): raise ValueError( "The input coordinate is not sorted in increasing " f"order along axis {axis}. This can lead to unexpected " "results. Consider calling the `sortby` method on " "the input DataArray. To plot data with categorical " "axes, consider using the `heatmap` function from " "the `seaborn` statistical plotting library." ) # If logscale, compute the intervals in the logarithmic space if scale == "log": if (coord <= 0).any(): raise ValueError( "Found negative or zero value in coordinates. " "Coordinates must be positive on logscale plots." ) coord = np.log10(coord) deltas = 0.5 * np.diff(coord, axis=axis) if deltas.size == 0: deltas = np.array(0.0) first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis) last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis) trim_last = tuple( slice(None, -1) if n == axis else slice(None) for n in range(coord.ndim) ) interval_breaks = np.concatenate( [first, coord[trim_last] + deltas, last], axis=axis ) if scale == "log": # Recovert the intervals into the linear space return np.power(10, interval_breaks) return interval_breaks def _process_cmap_cbar_kwargs( func, data, cmap=None, colors=None, cbar_kwargs: Iterable[tuple[str, Any]] | Mapping[str, Any] | None = None, levels=None, _is_facetgrid=False, **kwargs, ) -> tuple[dict[str, Any], dict[str, Any]]: """ Parameters ---------- func : plotting function data : ndarray, Data values Returns ------- cmap_params : dict cbar_kwargs : dict """ if func.__name__ == "surface": # Leave user to specify cmap settings for surface plots kwargs["cmap"] = cmap return { k: kwargs.get(k) for k in ["vmin", "vmax", "cmap", "extend", "levels", "norm"] }, {} cbar_kwargs = {} if cbar_kwargs is None else dict(cbar_kwargs) # colors is mutually exclusive with cmap if cmap and colors: raise ValueError("Can't specify both cmap and colors.") # colors is only valid when levels is supplied or the plot is of type # contour or contourf if colors and (("contour" not in func.__name__) and (levels is None)): raise ValueError("Can only specify colors with contour or levels") # we should not be getting a list of colors in cmap anymore # is there a better way to do this test? if isinstance(cmap, list | tuple): raise ValueError( "Specifying a list of colors in cmap is deprecated. " "Use colors keyword instead." ) cmap_kwargs = { "plot_data": data, "levels": levels, "cmap": colors or cmap, "filled": func.__name__ != "contour", } cmap_args = getfullargspec(_determine_cmap_params).args cmap_kwargs.update((a, kwargs[a]) for a in cmap_args if a in kwargs) if not _is_facetgrid: cmap_params = _determine_cmap_params(**cmap_kwargs) else: cmap_params = { k: cmap_kwargs[k] for k in ["vmin", "vmax", "cmap", "extend", "levels", "norm"] } return cmap_params, cbar_kwargs def _get_nice_quiver_magnitude(u, v): import matplotlib as mpl ticker = mpl.ticker.MaxNLocator(3) mean = np.mean(np.hypot(u.to_numpy(), v.to_numpy())) magnitude = ticker.tick_values(0, mean)[-2] return magnitude # Copied from matplotlib, tweaked so func can return strings. # https://github.com/matplotlib/matplotlib/issues/19555 def legend_elements( self, prop="colors", num="auto", fmt=None, func=lambda x: x, **kwargs ): """ Create legend handles and labels for a PathCollection. Each legend handle is a `.Line2D` representing the Path that was drawn, and each label is a string what each Path represents. This is useful for obtaining a legend for a `~.Axes.scatter` plot; e.g.:: scatter = plt.scatter([1, 2, 3], [4, 5, 6], c=[7, 2, 3]) plt.legend(*scatter.legend_elements()) creates three legend elements, one for each color with the numerical values passed to *c* as the labels. Also see the :ref:`automatedlegendcreation` example. Parameters ---------- prop : {"colors", "sizes"}, default: "colors" If "colors", the legend handles will show the different colors of the collection. If "sizes", the legend will show the different sizes. To set both, use *kwargs* to directly edit the `.Line2D` properties. num : int, None, "auto" (default), array-like, or `~.ticker.Locator` Target number of elements to create. If None, use all unique elements of the mappable array. If an integer, target to use *num* elements in the normed range. If *"auto"*, try to determine which option better suits the nature of the data. The number of created elements may slightly deviate from *num* due to a `~.ticker.Locator` being used to find useful locations. If a list or array, use exactly those elements for the legend. Finally, a `~.ticker.Locator` can be provided. fmt : str, `~matplotlib.ticker.Formatter`, or None (default) The format or formatter to use for the labels. If a string must be a valid input for a `~.StrMethodFormatter`. If None (the default), use a `~.ScalarFormatter`. func : function, default: ``lambda x: x`` Function to calculate the labels. Often the size (or color) argument to `~.Axes.scatter` will have been pre-processed by the user using a function ``s = f(x)`` to make the markers visible; e.g. ``size = np.log10(x)``. Providing the inverse of this function here allows that pre-processing to be inverted, so that the legend labels have the correct values; e.g. ``func = lambda x: 10**x``. **kwargs Allowed keyword arguments are *color* and *size*. E.g. it may be useful to set the color of the markers if *prop="sizes"* is used; similarly to set the size of the markers if *prop="colors"* is used. Any further parameters are passed onto the `.Line2D` instance. This may be useful to e.g. specify a different *markeredgecolor* or *alpha* for the legend handles. Returns ------- handles : list of `.Line2D` Visual representation of each element of the legend. labels : list of str The string labels for elements of the legend. """ import matplotlib as mpl mlines = mpl.lines handles = [] labels = [] if prop == "colors": arr = self.get_array() if arr is None: warnings.warn( "Collection without array used. Make sure to " "specify the values to be colormapped via the " "`c` argument.", stacklevel=2, ) return handles, labels _size = kwargs.pop("size", mpl.rcParams["lines.markersize"]) def _get_color_and_size(value): return self.cmap(self.norm(value)), _size elif prop == "sizes": if isinstance(self, mpl.collections.LineCollection): arr = self.get_linewidths() else: arr = self.get_sizes() _color = kwargs.pop("color", "k") def _get_color_and_size(value): return _color, np.sqrt(value) else: raise ValueError( "Valid values for `prop` are 'colors' or " f"'sizes'. You supplied '{prop}' instead." ) # Get the unique values and their labels: values = np.unique(arr) label_values = np.asarray(func(values)) label_values_are_numeric = np.issubdtype(label_values.dtype, np.number) # Handle the label format: if fmt is None and label_values_are_numeric: fmt = mpl.ticker.ScalarFormatter(useOffset=False, useMathText=True) elif fmt is None and not label_values_are_numeric: fmt = mpl.ticker.StrMethodFormatter("{x}") elif isinstance(fmt, str): fmt = mpl.ticker.StrMethodFormatter(fmt) fmt.create_dummy_axis() if num == "auto": num = 9 if len(values) <= num: num = None if label_values_are_numeric: label_values_min = label_values.min() label_values_max = label_values.max() fmt.axis.set_view_interval(label_values_min, label_values_max) fmt.axis.set_data_interval(label_values_min, label_values_max) if num is not None: # Labels are numerical but larger than the target # number of elements, reduce to target using matplotlibs # ticker classes: if isinstance(num, mpl.ticker.Locator): loc = num elif np.iterable(num): loc = mpl.ticker.FixedLocator(num) else: num = int(num) loc = mpl.ticker.MaxNLocator( nbins=num, min_n_ticks=num - 1, steps=[1, 2, 2.5, 3, 5, 6, 8, 10] ) # Get nicely spaced label_values: label_values = loc.tick_values(label_values_min, label_values_max) # Remove extrapolated label_values: cond = (label_values >= label_values_min) & ( label_values <= label_values_max ) label_values = label_values[cond] # Get the corresponding values by creating a linear interpolant # with small step size: values_interp = np.linspace(values.min(), values.max(), 256) label_values_interp = func(values_interp) ix = np.argsort(label_values_interp) values = np.interp(label_values, label_values_interp[ix], values_interp[ix]) elif num is not None and not label_values_are_numeric: # Labels are not numerical so modifying label_values is not # possible, instead filter the array with nicely distributed # indexes: if type(num) is int: loc = mpl.ticker.LinearLocator(num) else: raise ValueError("`num` only supports integers for non-numeric labels.") ind = loc.tick_values(0, len(label_values) - 1).astype(int) label_values = label_values[ind] values = values[ind] # Some formatters requires set_locs: if hasattr(fmt, "set_locs"): fmt.set_locs(label_values) # Default settings for handles, add or override with kwargs: kw = dict(markeredgewidth=self.get_linewidths()[0], alpha=self.get_alpha()) kw.update(kwargs) for val, lab in zip(values, label_values, strict=True): color, size = _get_color_and_size(val) if isinstance(self, mpl.collections.PathCollection): kw.update(linestyle="", marker=self.get_paths()[0], markersize=size) elif isinstance(self, mpl.collections.LineCollection): kw.update(linestyle=self.get_linestyle()[0], linewidth=size) h = mlines.Line2D([0], [0], color=color, **kw) handles.append(h) labels.append(fmt(lab)) return handles, labels def _legend_add_subtitle(handles, labels, text): """Add a subtitle to legend handles.""" import matplotlib.pyplot as plt if text and len(handles) > 1: # Create a blank handle that's not visible, the # invisibility will be used to discern which are subtitles # or not: blank_handle = plt.Line2D([], [], label=text) blank_handle.set_visible(False) # Subtitles are shown first: handles = [blank_handle] + handles labels = [text] + labels return handles, labels def _adjust_legend_subtitles(legend): """Make invisible-handle "subtitles" entries look more like titles.""" import matplotlib.pyplot as plt # Legend title not in rcParams until 3.0 font_size = plt.rcParams.get("legend.title_fontsize", None) hpackers = legend.findobj(plt.matplotlib.offsetbox.VPacker)[0].get_children() hpackers = [v for v in hpackers if isinstance(v, plt.matplotlib.offsetbox.HPacker)] for hpack in hpackers: areas = hpack.get_children() if len(areas) < 2: continue draw_area, text_area = areas handles = draw_area.get_children() # Assume that all artists that are not visible are # subtitles: if not all(artist.get_visible() for artist in handles): # Remove the dummy marker which will bring the text # more to the center: draw_area.set_width(0) for text in text_area.get_children(): if font_size is not None: # The sutbtitles should have the same font size # as normal legend titles: text.set_size(font_size) def _infer_meta_data(ds, x, y, hue, hue_style, add_guide, funcname): dvars = set(ds.variables.keys()) error_msg = f" must be one of ({', '.join(sorted(str(v) for v in dvars))})" if x not in dvars: raise ValueError(f"Expected 'x' {error_msg}. Received {x} instead.") if y not in dvars: raise ValueError(f"Expected 'y' {error_msg}. Received {y} instead.") if hue is not None and hue not in dvars: raise ValueError(f"Expected 'hue' {error_msg}. Received {hue} instead.") if hue: hue_is_numeric = _is_numeric(ds[hue].values) if hue_style is None: hue_style = "continuous" if hue_is_numeric else "discrete" if not hue_is_numeric and (hue_style == "continuous"): raise ValueError( f"Cannot create a colorbar for a non numeric coordinate: {hue}" ) if add_guide is None or add_guide is True: add_colorbar = hue_style == "continuous" add_legend = hue_style == "discrete" else: add_colorbar = False add_legend = False else: if add_guide is True and funcname not in ("quiver", "streamplot"): raise ValueError("Cannot set add_guide when hue is None.") add_legend = False add_colorbar = False if (add_guide or add_guide is None) and funcname == "quiver": add_quiverkey = True if hue: add_colorbar = True if not hue_style: hue_style = "continuous" elif hue_style != "continuous": raise ValueError( "hue_style must be 'continuous' or None for .plot.quiver or " ".plot.streamplot" ) else: add_quiverkey = False if (add_guide or add_guide is None) and funcname == "streamplot" and hue: add_colorbar = True if not hue_style: hue_style = "continuous" elif hue_style != "continuous": raise ValueError( "hue_style must be 'continuous' or None for .plot.quiver or " ".plot.streamplot" ) if hue_style is not None and hue_style not in ["discrete", "continuous"]: raise ValueError("hue_style must be either None, 'discrete' or 'continuous'.") if hue: hue_label = label_from_attrs(ds[hue]) hue = ds[hue] else: hue_label = None hue = None return { "add_colorbar": add_colorbar, "add_legend": add_legend, "add_quiverkey": add_quiverkey, "hue_label": hue_label, "hue_style": hue_style, "xlabel": label_from_attrs(ds[x]), "ylabel": label_from_attrs(ds[y]), "hue": hue, } @overload def _parse_size( data: None, norm: tuple[float | None, float | None, bool] | Normalize | None, ) -> None: ... @overload def _parse_size( data: DataArray, norm: tuple[float | None, float | None, bool] | Normalize | None, ) -> pd.Series: ... # copied from seaborn def _parse_size( data: DataArray | None, norm: tuple[float | None, float | None, bool] | Normalize | None, ) -> pd.Series | None: import matplotlib as mpl if data is None: return None flatdata = data.values.flatten() if not _is_numeric(flatdata): levels = np.unique(flatdata) numbers = np.arange(1, 1 + len(levels))[::-1] else: levels = numbers = np.sort(np.unique(flatdata)) min_width, _default_width, max_width = _MARKERSIZE_RANGE # width_range = min_width, max_width if norm is None: norm = mpl.colors.Normalize() elif isinstance(norm, tuple): norm = mpl.colors.Normalize(*norm) elif not isinstance(norm, mpl.colors.Normalize): err = "``size_norm`` must be None, tuple, or Normalize object." raise ValueError(err) assert isinstance(norm, mpl.colors.Normalize) norm.clip = True if not norm.scaled(): norm(np.asarray(numbers)) # limits = norm.vmin, norm.vmax scl = norm(numbers) widths = np.asarray(min_width + scl * (max_width - min_width)) if scl.mask.any(): widths[scl.mask] = 0 sizes = dict(zip(levels, widths, strict=True)) return pd.Series(sizes) class _Normalize(Sequence): """ Normalize numerical or categorical values to numerical values. The class includes helper methods that simplifies transforming to and from normalized values. Parameters ---------- data : DataArray DataArray to normalize. width : Sequence of three numbers, optional Normalize the data to these (min, default, max) values. The default is None. """ _data: DataArray | None _data_unique: np.ndarray _data_unique_index: np.ndarray _data_unique_inverse: np.ndarray _data_is_numeric: bool _width: tuple[float, float, float] | None __slots__ = ( "_data", "_data_is_numeric", "_data_unique", "_data_unique_index", "_data_unique_inverse", "_width", ) def __init__( self, data: DataArray | None, width: tuple[float, float, float] | None = None, _is_facetgrid: bool = False, ) -> None: self._data = data self._width = width if not _is_facetgrid else None pint_array_type = DuckArrayModule("pint").type to_unique = ( data.to_numpy() # type: ignore[union-attr] if isinstance(data if data is None else data.data, pint_array_type) else data ) data_unique, data_unique_inverse = np.unique(to_unique, return_inverse=True) # type: ignore[call-overload] self._data_unique = data_unique self._data_unique_index = np.arange(0, data_unique.size) self._data_unique_inverse = data_unique_inverse self._data_is_numeric = False if data is None else _is_numeric(data) def __repr__(self) -> str: with np.printoptions(precision=4, suppress=True, threshold=5): return ( f"<_Normalize(data, width={self._width})>\n" f"{self._data_unique} -> {self._values_unique}" ) def __len__(self) -> int: return len(self._data_unique) def __getitem__(self, key): return self._data_unique[key] @property def data(self) -> DataArray | None: return self._data @property def data_is_numeric(self) -> bool: """ Check if data is numeric. Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a).data_is_numeric False >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> _Normalize(a).data_is_numeric True >>> # TODO: Datetime should be numeric right? >>> a = xr.DataArray(pd.date_range("2000-1-1", periods=4)) >>> _Normalize(a).data_is_numeric False # TODO: Timedelta should be numeric right? >>> a = xr.DataArray(pd.timedelta_range("-1D", periods=4, freq="D")) >>> _Normalize(a).data_is_numeric True """ return self._data_is_numeric @overload def _calc_widths(self, y: np.ndarray) -> np.ndarray: ... @overload def _calc_widths(self, y: DataArray) -> DataArray: ... def _calc_widths(self, y: np.ndarray | DataArray) -> np.ndarray | DataArray: """ Normalize the values so they're in between self._width. """ if self._width is None: return y xmin, xdefault, xmax = self._width diff_maxy_miny = np.max(y) - np.min(y) if diff_maxy_miny == 0: # Use default with if y is constant: widths = xdefault + 0 * y else: # Normalize in between xmin and xmax: k = (y - np.min(y)) / diff_maxy_miny widths = xmin + k * (xmax - xmin) return widths @overload def _indexes_centered(self, x: np.ndarray) -> np.ndarray: ... @overload def _indexes_centered(self, x: DataArray) -> DataArray: ... def _indexes_centered(self, x: np.ndarray | DataArray) -> np.ndarray | DataArray: """ Offset indexes to make sure being in the center of self.levels. ["a", "b", "c"] -> [1, 3, 5] """ return x * 2 + 1 @property def values(self) -> DataArray | None: """ Return a normalized number array for the unique levels. Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a).values Size: 40B array([3, 1, 1, 3, 5]) Dimensions without coordinates: dim_0 >>> _Normalize(a, width=(18, 36, 72)).values Size: 40B array([45., 18., 18., 45., 72.]) Dimensions without coordinates: dim_0 >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> _Normalize(a).values Size: 48B array([0.5, 0. , 0. , 0.5, 2. , 3. ]) Dimensions without coordinates: dim_0 >>> _Normalize(a, width=(18, 36, 72)).values Size: 48B array([27., 18., 18., 27., 54., 72.]) Dimensions without coordinates: dim_0 >>> _Normalize(a * 0, width=(18, 36, 72)).values Size: 48B array([36., 36., 36., 36., 36., 36.]) Dimensions without coordinates: dim_0 """ if self.data is None: return None val: DataArray if self.data_is_numeric: val = self.data else: arr = self._indexes_centered(self._data_unique_inverse) val = self.data.copy(data=arr.reshape(self.data.shape)) return self._calc_widths(val) @property def _values_unique(self) -> np.ndarray | None: """ Return unique values. Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a)._values_unique array([1, 3, 5]) >>> _Normalize(a, width=(18, 36, 72))._values_unique array([18., 45., 72.]) >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> _Normalize(a)._values_unique array([0. , 0.5, 2. , 3. ]) >>> _Normalize(a, width=(18, 36, 72))._values_unique array([18., 27., 54., 72.]) """ if self.data is None: return None val: np.ndarray if self.data_is_numeric: val = self._data_unique else: val = self._indexes_centered(self._data_unique_index) return self._calc_widths(val) @property def ticks(self) -> np.ndarray | None: """ Return ticks for plt.colorbar if the data is not numeric. Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a).ticks array([1, 3, 5]) """ val: np.ndarray | None if self.data_is_numeric: val = None else: val = self._indexes_centered(self._data_unique_index) return val @property def levels(self) -> np.ndarray: """ Return discrete levels that will evenly bound self.values. ["a", "b", "c"] -> [0, 2, 4, 6] Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a).levels array([0, 2, 4, 6]) """ return ( np.append(self._data_unique_index, np.max(self._data_unique_index) + 1) * 2 ) @property def _lookup(self) -> pd.Series: if self._values_unique is None: raise ValueError("self.data can't be None.") return pd.Series(dict(zip(self._values_unique, self._data_unique, strict=True))) def _lookup_arr(self, x) -> np.ndarray: # Use reindex to be less sensitive to float errors. reindex only # works with sorted index. # Return as numpy array since legend_elements # seems to require that: return self._lookup.sort_index().reindex(x, method="nearest").to_numpy() @property def format(self) -> FuncFormatter: """ Return a FuncFormatter that maps self.values elements back to the original value as a string. Useful with plt.colorbar. Examples -------- >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> aa = _Normalize(a, width=(0, 0.5, 1)) >>> aa._lookup 0.000000 0.0 0.166667 0.5 0.666667 2.0 1.000000 3.0 dtype: float64 >>> aa.format(1) '3.0' """ import matplotlib.pyplot as plt def _func(x: Any, pos: Any | None = None): return f"{self._lookup_arr([x])[0]}" return plt.FuncFormatter(_func) @property def func(self) -> Callable[[Any, Any | None], Any]: """ Return a lambda function that maps self.values elements back to the original value as a numpy array. Useful with ax.legend_elements. Examples -------- >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> aa = _Normalize(a, width=(0, 0.5, 1)) >>> aa._lookup 0.000000 0.0 0.166667 0.5 0.666667 2.0 1.000000 3.0 dtype: float64 >>> aa.func([0.16, 1]) array([0.5, 3. ]) """ def _func(x: Any, pos: Any | None = None): return self._lookup_arr(x) return _func def _determine_guide( hueplt_norm: _Normalize, sizeplt_norm: _Normalize, add_colorbar: bool | None = None, add_legend: bool | None = None, plotfunc_name: str | None = None, ) -> tuple[bool, bool]: if plotfunc_name == "hist": return False, False if (add_colorbar) and hueplt_norm.data is None: raise KeyError("Cannot create a colorbar when hue is None.") if add_colorbar is None: if hueplt_norm.data is not None: add_colorbar = True else: add_colorbar = False if add_legend and hueplt_norm.data is None and sizeplt_norm.data is None: raise KeyError("Cannot create a legend when hue and markersize is None.") if add_legend is None: if ( not add_colorbar and (hueplt_norm.data is not None and hueplt_norm.data_is_numeric is False) ) or sizeplt_norm.data is not None: add_legend = True else: add_legend = False return add_colorbar, add_legend def _add_legend( hueplt_norm: _Normalize, sizeplt_norm: _Normalize, primitive, legend_ax, plotfunc: str, ): primitive = primitive if isinstance(primitive, list) else [primitive] handles, labels = [], [] for huesizeplt, prop in [ (hueplt_norm, "colors"), (sizeplt_norm, "sizes"), ]: if huesizeplt.data is not None: # Get legend handles and labels that displays the # values correctly. Order might be different because # legend_elements uses np.unique instead of pd.unique, # FacetGrid.add_legend might have troubles with this: hdl, lbl = [], [] for p in primitive: hdl_, lbl_ = legend_elements(p, prop, num="auto", func=huesizeplt.func) hdl += hdl_ lbl += lbl_ # Only save unique values: u, ind = np.unique(lbl, return_index=True) ind = np.argsort(ind) lbl = cast(list, u[ind].tolist()) hdl = cast(list, np.array(hdl)[ind].tolist()) # Add a subtitle: hdl, lbl = _legend_add_subtitle(hdl, lbl, label_from_attrs(huesizeplt.data)) handles += hdl labels += lbl legend = legend_ax.legend(handles, labels, framealpha=0.5) _adjust_legend_subtitles(legend) return legend def _guess_coords_to_plot( darray: DataArray, coords_to_plot: MutableMapping[str, Hashable | None], kwargs: dict, default_guess: tuple[str, ...] = ("x",), # TODO: Can this be normalized, plt.cbook.normalize_kwargs? ignore_guess_kwargs: tuple[tuple[str, ...], ...] = ((),), ) -> MutableMapping[str, Hashable]: """ Guess what coords to plot if some of the values in coords_to_plot are None which happens when the user has not defined all available ways of visualizing the data. Parameters ---------- darray : DataArray The DataArray to check for available coords. coords_to_plot : MutableMapping[str, Hashable] Coords defined by the user to plot. kwargs : dict Extra kwargs that will be sent to matplotlib. default_guess : Iterable[str], optional Default values and order to retrieve dims if values in dims_plot is missing, default: ("x", "hue", "size"). ignore_guess_kwargs : tuple[tuple[str, ...], ...] Matplotlib arguments to ignore. Examples -------- >>> ds = xr.tutorial.scatter_example_dataset(seed=42) >>> # Only guess x by default: >>> xr.plot.utils._guess_coords_to_plot( ... ds.A, ... coords_to_plot={"x": None, "z": None, "hue": None, "size": None}, ... kwargs={}, ... ) {'x': 'x', 'z': None, 'hue': None, 'size': None} >>> # Guess all plot dims with other default values: >>> xr.plot.utils._guess_coords_to_plot( ... ds.A, ... coords_to_plot={"x": None, "z": None, "hue": None, "size": None}, ... kwargs={}, ... default_guess=("x", "hue", "size"), ... ignore_guess_kwargs=((), ("c", "color"), ("s",)), ... ) {'x': 'x', 'z': None, 'hue': 'y', 'size': 'z'} >>> # Don't guess ยดsizeยด, since the matplotlib kwarg ยดsยด has been defined: >>> xr.plot.utils._guess_coords_to_plot( ... ds.A, ... coords_to_plot={"x": None, "z": None, "hue": None, "size": None}, ... kwargs={"s": 5}, ... default_guess=("x", "hue", "size"), ... ignore_guess_kwargs=((), ("c", "color"), ("s",)), ... ) {'x': 'x', 'z': None, 'hue': 'y', 'size': None} >>> # Prioritize ยดsizeยด over ยดsยด: >>> xr.plot.utils._guess_coords_to_plot( ... ds.A, ... coords_to_plot={"x": None, "z": None, "hue": None, "size": "x"}, ... kwargs={"s": 5}, ... default_guess=("x", "hue", "size"), ... ignore_guess_kwargs=((), ("c", "color"), ("s",)), ... ) {'x': 'y', 'z': None, 'hue': 'z', 'size': 'x'} """ coords_to_plot_exist = {k: v for k, v in coords_to_plot.items() if v is not None} available_coords = tuple( k for k in darray.coords.keys() if k not in coords_to_plot_exist.values() ) # If dims_plot[k] isn't defined then fill with one of the available dims, unless # one of related mpl kwargs has been used. This should have similar behaviour as # * plt.plot(x, y) -> Multiple lines with different colors if y is 2d. # * plt.plot(x, y, color="red") -> Multiple red lines if y is 2d. for k, dim, ign_kws in zip( default_guess, available_coords, ignore_guess_kwargs, strict=False ): if coords_to_plot.get(k, None) is None and all( kwargs.get(ign_kw) is None for ign_kw in ign_kws ): coords_to_plot[k] = dim for k, dim in coords_to_plot.items(): _assert_valid_xy(darray, dim, k) return coords_to_plot def _set_concise_date(ax: Axes, axis: Literal["x", "y", "z"] = "x") -> None: """ Use ConciseDateFormatter which is meant to improve the strings chosen for the ticklabels, and to minimize the strings used in those tick labels as much as possible. https://matplotlib.org/stable/gallery/ticks/date_concise_formatter.html Parameters ---------- ax : Axes Figure axes. axis : Literal["x", "y", "z"], optional Which axis to make concise. The default is "x". """ import matplotlib.dates as mdates locator = mdates.AutoDateLocator() formatter = mdates.ConciseDateFormatter(locator) _axis = getattr(ax, f"{axis}axis") _axis.set_major_locator(locator) _axis.set_major_formatter(formatter) xarray-2025.12.0/xarray/py.typed000066400000000000000000000000001511464676000163360ustar00rootroot00000000000000xarray-2025.12.0/xarray/static/000077500000000000000000000000001511464676000161405ustar00rootroot00000000000000xarray-2025.12.0/xarray/static/__init__.py000066400000000000000000000000001511464676000202370ustar00rootroot00000000000000xarray-2025.12.0/xarray/static/css/000077500000000000000000000000001511464676000167305ustar00rootroot00000000000000xarray-2025.12.0/xarray/static/css/__init__.py000066400000000000000000000000001511464676000210270ustar00rootroot00000000000000xarray-2025.12.0/xarray/static/css/style.css000066400000000000000000000207361511464676000206120ustar00rootroot00000000000000/* CSS stylesheet for displaying xarray objects in notebooks */ :root { --xr-font-color0: var( --jp-content-font-color0, var(--pst-color-text-base rgba(0, 0, 0, 1)) ); --xr-font-color2: var( --jp-content-font-color2, var(--pst-color-text-base, rgba(0, 0, 0, 0.54)) ); --xr-font-color3: var( --jp-content-font-color3, var(--pst-color-text-base, rgba(0, 0, 0, 0.38)) ); --xr-border-color: var( --jp-border-color2, hsl(from var(--pst-color-on-background, white) h s calc(l - 10)) ); --xr-disabled-color: var( --jp-layout-color3, hsl(from var(--pst-color-on-background, white) h s calc(l - 40)) ); --xr-background-color: var( --jp-layout-color0, var(--pst-color-on-background, white) ); --xr-background-color-row-even: var( --jp-layout-color1, hsl(from var(--pst-color-on-background, white) h s calc(l - 5)) ); --xr-background-color-row-odd: var( --jp-layout-color2, hsl(from var(--pst-color-on-background, white) h s calc(l - 15)) ); } html[theme="dark"], html[data-theme="dark"], body[data-theme="dark"], body.vscode-dark { --xr-font-color0: var( --jp-content-font-color0, var(--pst-color-text-base, rgba(255, 255, 255, 1)) ); --xr-font-color2: var( --jp-content-font-color2, var(--pst-color-text-base, rgba(255, 255, 255, 0.54)) ); --xr-font-color3: var( --jp-content-font-color3, var(--pst-color-text-base, rgba(255, 255, 255, 0.38)) ); --xr-border-color: var( --jp-border-color2, hsl(from var(--pst-color-on-background, #111111) h s calc(l + 10)) ); --xr-disabled-color: var( --jp-layout-color3, hsl(from var(--pst-color-on-background, #111111) h s calc(l + 40)) ); --xr-background-color: var( --jp-layout-color0, var(--pst-color-on-background, #111111) ); --xr-background-color-row-even: var( --jp-layout-color1, hsl(from var(--pst-color-on-background, #111111) h s calc(l + 5)) ); --xr-background-color-row-odd: var( --jp-layout-color2, hsl(from var(--pst-color-on-background, #111111) h s calc(l + 15)) ); } .xr-wrap { display: block !important; min-width: 300px; max-width: 700px; line-height: 1.6; } .xr-text-repr-fallback { /* fallback to plain text repr when CSS is not injected (untrusted notebook) */ display: none; } .xr-header { padding-top: 6px; padding-bottom: 6px; margin-bottom: 4px; border-bottom: solid 1px var(--xr-border-color); } .xr-header > div, .xr-header > ul { display: inline; margin-top: 0; margin-bottom: 0; } .xr-obj-type, .xr-obj-name, .xr-group-name { margin-left: 2px; margin-right: 10px; } .xr-group-name::before { content: "๐Ÿ“"; padding-right: 0.3em; } .xr-group-name, .xr-obj-type { color: var(--xr-font-color2); } .xr-sections { padding-left: 0 !important; display: grid; grid-template-columns: 150px auto auto 1fr 0 20px 0 20px; margin-block-start: 0; margin-block-end: 0; } .xr-section-item { display: contents; } .xr-section-item input { display: inline-block; opacity: 0; height: 0; margin: 0; } .xr-section-item input + label { color: var(--xr-disabled-color); border: 2px solid transparent !important; } .xr-section-item input:enabled + label { cursor: pointer; color: var(--xr-font-color2); } .xr-section-item input:focus + label { border: 2px solid var(--xr-font-color0) !important; } .xr-section-item input:enabled + label:hover { color: var(--xr-font-color0); } .xr-section-summary { grid-column: 1; color: var(--xr-font-color2); font-weight: 500; } .xr-section-summary > span { display: inline-block; padding-left: 0.5em; } .xr-section-summary-in:disabled + label { color: var(--xr-font-color2); } .xr-section-summary-in + label:before { display: inline-block; content: "โ–บ"; font-size: 11px; width: 15px; text-align: center; } .xr-section-summary-in:disabled + label:before { color: var(--xr-disabled-color); } .xr-section-summary-in:checked + label:before { content: "โ–ผ"; } .xr-section-summary-in:checked + label > span { display: none; } .xr-section-summary, .xr-section-inline-details { padding-top: 4px; } .xr-section-inline-details { grid-column: 2 / -1; } .xr-section-details { display: none; grid-column: 1 / -1; margin-top: 4px; margin-bottom: 5px; } .xr-section-summary-in:checked ~ .xr-section-details { display: contents; } .xr-group-box { display: inline-grid; grid-template-columns: 0px 20px auto; width: 100%; } .xr-group-box-vline { grid-column-start: 1; border-right: 0.2em solid; border-color: var(--xr-border-color); width: 0px; } .xr-group-box-hline { grid-column-start: 2; grid-row-start: 1; height: 1em; width: 20px; border-bottom: 0.2em solid; border-color: var(--xr-border-color); } .xr-group-box-contents { grid-column-start: 3; } .xr-array-wrap { grid-column: 1 / -1; display: grid; grid-template-columns: 20px auto; } .xr-array-wrap > label { grid-column: 1; vertical-align: top; } .xr-preview { color: var(--xr-font-color3); } .xr-array-preview, .xr-array-data { padding: 0 5px !important; grid-column: 2; } .xr-array-data, .xr-array-in:checked ~ .xr-array-preview { display: none; } .xr-array-in:checked ~ .xr-array-data, .xr-array-preview { display: inline-block; } .xr-dim-list { display: inline-block !important; list-style: none; padding: 0 !important; margin: 0; } .xr-dim-list li { display: inline-block; padding: 0; margin: 0; } .xr-dim-list:before { content: "("; } .xr-dim-list:after { content: ")"; } .xr-dim-list li:not(:last-child):after { content: ","; padding-right: 5px; } .xr-has-index { font-weight: bold; } .xr-var-list, .xr-var-item { display: contents; } .xr-var-item > div, .xr-var-item label, .xr-var-item > .xr-var-name span { background-color: var(--xr-background-color-row-even); border-color: var(--xr-background-color-row-odd); margin-bottom: 0; padding-top: 2px; } .xr-var-item > .xr-var-name:hover span { padding-right: 5px; } .xr-var-list > li:nth-child(odd) > div, .xr-var-list > li:nth-child(odd) > label, .xr-var-list > li:nth-child(odd) > .xr-var-name span { background-color: var(--xr-background-color-row-odd); border-color: var(--xr-background-color-row-even); } .xr-var-name { grid-column: 1; } .xr-var-dims { grid-column: 2; } .xr-var-dtype { grid-column: 3; text-align: right; color: var(--xr-font-color2); } .xr-var-preview { grid-column: 4; } .xr-index-preview { grid-column: 2 / 5; color: var(--xr-font-color2); } .xr-var-name, .xr-var-dims, .xr-var-dtype, .xr-preview, .xr-attrs dt { white-space: nowrap; overflow: hidden; text-overflow: ellipsis; padding-right: 10px; } .xr-var-name:hover, .xr-var-dims:hover, .xr-var-dtype:hover, .xr-attrs dt:hover { overflow: visible; width: auto; z-index: 1; } .xr-var-attrs, .xr-var-data, .xr-index-data { display: none; border-top: 2px dotted var(--xr-background-color); padding-bottom: 20px !important; padding-top: 10px !important; } .xr-var-attrs-in + label, .xr-var-data-in + label, .xr-index-data-in + label { padding: 0 1px; } .xr-var-attrs-in:checked ~ .xr-var-attrs, .xr-var-data-in:checked ~ .xr-var-data, .xr-index-data-in:checked ~ .xr-index-data { display: block; } .xr-var-data > table { float: right; } .xr-var-data > pre, .xr-index-data > pre, .xr-var-data > table > tbody > tr { background-color: transparent !important; } .xr-var-name span, .xr-var-data, .xr-index-name div, .xr-index-data, .xr-attrs { padding-left: 25px !important; } .xr-attrs, .xr-var-attrs, .xr-var-data, .xr-index-data { grid-column: 1 / -1; } dl.xr-attrs { padding: 0; margin: 0; display: grid; grid-template-columns: 125px auto; } .xr-attrs dt, .xr-attrs dd { padding: 0; margin: 0; float: left; padding-right: 10px; width: auto; } .xr-attrs dt { font-weight: normal; grid-column: 1; } .xr-attrs dt:hover span { display: inline-block; background: var(--xr-background-color); padding-right: 10px; } .xr-attrs dd { grid-column: 2; white-space: pre-wrap; word-break: break-all; } .xr-icon-database, .xr-icon-file-text2, .xr-no-icon { display: inline-block; vertical-align: middle; width: 1em; height: 1.5em !important; stroke-width: 0; stroke: currentColor; fill: currentColor; } .xr-var-attrs-in:checked + label > .xr-icon-file-text2, .xr-var-data-in:checked + label > .xr-icon-database, .xr-index-data-in:checked + label > .xr-icon-database { color: var(--xr-font-color0); filter: drop-shadow(1px 1px 5px var(--xr-font-color2)); stroke-width: 0.8px; } xarray-2025.12.0/xarray/static/html/000077500000000000000000000000001511464676000171045ustar00rootroot00000000000000xarray-2025.12.0/xarray/static/html/__init__.py000066400000000000000000000000001511464676000212030ustar00rootroot00000000000000xarray-2025.12.0/xarray/static/html/icons-svg-inline.html000066400000000000000000000024771511464676000231700ustar00rootroot00000000000000 xarray-2025.12.0/xarray/structure/000077500000000000000000000000001511464676000167115ustar00rootroot00000000000000xarray-2025.12.0/xarray/structure/__init__.py000066400000000000000000000000001511464676000210100ustar00rootroot00000000000000xarray-2025.12.0/xarray/structure/alignment.py000066400000000000000000001303241511464676000212440ustar00rootroot00000000000000from __future__ import annotations import functools import operator from collections import defaultdict from collections.abc import Callable, Hashable, Iterable, Mapping from contextlib import suppress from itertools import starmap from typing import TYPE_CHECKING, Any, Final, Generic, TypeVar, get_args, overload import numpy as np import pandas as pd from xarray.core import dtypes from xarray.core.indexes import ( Index, Indexes, PandasIndex, PandasMultiIndex, indexes_all_equal, safe_cast_to_index, ) from xarray.core.types import JoinOptions, T_Alignable from xarray.core.utils import emit_user_level_warning, is_dict_like, is_full_slice from xarray.core.variable import Variable, as_compatible_data, calculate_dimensions from xarray.util.deprecation_helpers import CombineKwargDefault if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import ( Alignable, T_DataArray, T_Dataset, T_DuckArray, ) class AlignmentError(ValueError): """Error class for alignment failures due to incompatible arguments.""" def reindex_variables( variables: Mapping[Any, Variable], dim_pos_indexers: Mapping[Any, Any], copy: bool = True, fill_value: Any = dtypes.NA, sparse: bool = False, ) -> dict[Hashable, Variable]: """Conform a dictionary of variables onto a new set of variables reindexed with dimension positional indexers and possibly filled with missing values. Not public API. """ new_variables = {} dim_sizes = calculate_dimensions(variables) masked_dims = set() unchanged_dims = set() for dim, indxr in dim_pos_indexers.items(): # Negative values in dim_pos_indexers mean values missing in the new index # See ``Index.reindex_like``. if (indxr < 0).any(): masked_dims.add(dim) elif np.array_equal(indxr, np.arange(dim_sizes.get(dim, 0))): unchanged_dims.add(dim) for name, var in variables.items(): if isinstance(fill_value, dict): fill_value_ = fill_value.get(name, dtypes.NA) else: fill_value_ = fill_value if sparse: var = var._as_sparse(fill_value=fill_value_) indxr = tuple( slice(None) if d in unchanged_dims else dim_pos_indexers.get(d, slice(None)) for d in var.dims ) needs_masking = any(d in masked_dims for d in var.dims) if needs_masking: new_var = var._getitem_with_mask(indxr, fill_value=fill_value_) elif all(is_full_slice(k) for k in indxr): # no reindexing necessary # here we need to manually deal with copying data, since # we neither created a new ndarray nor used fancy indexing new_var = var.copy(deep=copy) else: new_var = var[indxr] new_variables[name] = new_var return new_variables def _normalize_indexes( indexes: Mapping[Any, Any | T_DuckArray], ) -> Indexes: """Normalize the indexes/indexers given for re-indexing or alignment. Wrap any arbitrary array or `pandas.Index` as an Xarray `PandasIndex` associated with its corresponding dimension coordinate variable. """ xr_indexes: dict[Hashable, Index] = {} xr_variables: dict[Hashable, Variable] if isinstance(indexes, Indexes): xr_variables = dict(indexes.variables) else: xr_variables = {} for k, idx in indexes.items(): if not isinstance(idx, Index): if getattr(idx, "dims", (k,)) != (k,): raise AlignmentError( f"Indexer has dimensions {idx.dims} that are different " f"from that to be indexed along '{k}'" ) data: T_DuckArray = as_compatible_data(idx) pd_idx = safe_cast_to_index(data) if pd_idx.name != k: pd_idx = pd_idx.copy() pd_idx.name = k if isinstance(pd_idx, pd.MultiIndex): idx = PandasMultiIndex(pd_idx, k) else: idx = PandasIndex(pd_idx, k, coord_dtype=data.dtype) xr_variables.update(idx.create_variables()) xr_indexes[k] = idx return Indexes(xr_indexes, xr_variables) CoordNamesAndDims = tuple[tuple[Hashable, tuple[Hashable, ...]], ...] MatchingIndexKey = tuple[CoordNamesAndDims, type[Index]] IndexesToAlign = dict[MatchingIndexKey, Index] IndexVarsToAlign = dict[MatchingIndexKey, dict[Hashable, Variable]] class Aligner(Generic[T_Alignable]): """Implements all the complex logic for the re-indexing and alignment of Xarray objects. For internal use only, not public API. Usage: aligner = Aligner(*objects, **kwargs) aligner.align() aligned_objects = aligner.results """ objects: tuple[T_Alignable, ...] results: tuple[T_Alignable, ...] objects_matching_index_vars: tuple[ dict[MatchingIndexKey, dict[Hashable, Variable]], ... ] join: JoinOptions | CombineKwargDefault exclude_dims: frozenset[Hashable] exclude_vars: frozenset[Hashable] copy: bool fill_value: Any sparse: bool indexes: dict[MatchingIndexKey, Index] index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]] all_indexes: dict[MatchingIndexKey, list[Index]] all_index_vars: dict[MatchingIndexKey, list[dict[Hashable, Variable]]] aligned_indexes: dict[MatchingIndexKey, Index] aligned_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]] reindex: dict[MatchingIndexKey, bool] keep_original_indexes: set[MatchingIndexKey] reindex_kwargs: dict[str, Any] unindexed_dim_sizes: dict[Hashable, set] new_indexes: Indexes[Index] def __init__( self, objects: Iterable[T_Alignable], join: JoinOptions | CombineKwargDefault = "inner", indexes: Mapping[Any, Any] | None = None, exclude_dims: str | Iterable[Hashable] = frozenset(), exclude_vars: Iterable[Hashable] = frozenset(), method: str | None = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = dtypes.NA, sparse: bool = False, ): self.objects = tuple(objects) self.objects_matching_indexes: tuple[Any, ...] = () self.objects_matching_index_vars = () if not isinstance(join, CombineKwargDefault) and join not in get_args( JoinOptions ): raise ValueError(f"invalid value for join: {join}") self.join = join self.copy = copy self.fill_value = fill_value self.sparse = sparse if method is None and tolerance is None: self.reindex_kwargs = {} else: self.reindex_kwargs = {"method": method, "tolerance": tolerance} if isinstance(exclude_dims, str): exclude_dims = [exclude_dims] self.exclude_dims = frozenset(exclude_dims) self.exclude_vars = frozenset(exclude_vars) if indexes is None: indexes = {} self.indexes, self.index_vars = self._collect_indexes( _normalize_indexes(indexes) ) self.all_indexes = {} self.all_index_vars = {} self.unindexed_dim_sizes = {} self.aligned_indexes = {} self.aligned_index_vars = {} self.reindex = {} self.keep_original_indexes = set() self.results = tuple() def _collect_indexes( self, indexes: Indexes ) -> tuple[IndexesToAlign, IndexVarsToAlign]: """Collect input and/or object indexes for alignment. Return new dictionaries of xarray Index objects and coordinate variables, whose keys are used to later retrieve all the indexes to compare with each other (based on the name and dimensions of their associated coordinate variables as well as the Index type). """ collected_indexes = {} collected_index_vars = {} for idx, idx_vars in indexes.group_by_index(): idx_coord_names_and_dims = [] idx_all_dims: set[Hashable] = set() for name, var in idx_vars.items(): dims = var.dims idx_coord_names_and_dims.append((name, dims)) idx_all_dims.update(dims) key: MatchingIndexKey = (tuple(idx_coord_names_and_dims), type(idx)) if idx_all_dims: exclude_dims = idx_all_dims & self.exclude_dims if exclude_dims == idx_all_dims: # Do not collect an index if all the dimensions it uses are # also excluded from the alignment continue elif exclude_dims: # If the dimensions used by index partially overlap with the dimensions # excluded from alignment, it is possible to check index equality along # non-excluded dimensions only. However, in this case each of the aligned # objects must retain (a copy of) their original index. Re-indexing and # overriding the index are not supported. if self.join == "override": excl_dims_str = ", ".join(str(d) for d in exclude_dims) incl_dims_str = ", ".join( str(d) for d in idx_all_dims - exclude_dims ) raise AlignmentError( f"cannot exclude dimension(s) {excl_dims_str} from alignment " "with `join='override` because these are used by an index " f"together with non-excluded dimensions {incl_dims_str}" "(cannot safely override the index)." ) else: self.keep_original_indexes.add(key) collected_indexes[key] = idx collected_index_vars[key] = idx_vars return collected_indexes, collected_index_vars def find_matching_indexes(self) -> None: all_indexes: dict[MatchingIndexKey, list[Index]] all_index_vars: dict[MatchingIndexKey, list[dict[Hashable, Variable]]] all_indexes_dim_sizes: dict[MatchingIndexKey, dict[Hashable, set]] objects_matching_indexes: list[dict[MatchingIndexKey, Index]] objects_matching_index_vars: list[ dict[MatchingIndexKey, dict[Hashable, Variable]] ] all_indexes = defaultdict(list) all_index_vars = defaultdict(list) all_indexes_dim_sizes = defaultdict(lambda: defaultdict(set)) objects_matching_indexes = [] objects_matching_index_vars = [] for obj in self.objects: obj_indexes, obj_index_vars = self._collect_indexes(obj.xindexes) objects_matching_indexes.append(obj_indexes) objects_matching_index_vars.append(obj_index_vars) for key, idx in obj_indexes.items(): all_indexes[key].append(idx) for key, index_vars in obj_index_vars.items(): all_index_vars[key].append(index_vars) for dim, size in calculate_dimensions(index_vars).items(): all_indexes_dim_sizes[key][dim].add(size) self.objects_matching_indexes = tuple(objects_matching_indexes) self.objects_matching_index_vars = tuple(objects_matching_index_vars) self.all_indexes = all_indexes self.all_index_vars = all_index_vars if self.join == "override": for dim_sizes in all_indexes_dim_sizes.values(): for dim, sizes in dim_sizes.items(): if len(sizes) > 1: raise AlignmentError( "cannot align objects with join='override' with matching indexes " f"along dimension {dim!r} that don't have the same size" ) def find_matching_unindexed_dims(self) -> None: unindexed_dim_sizes = defaultdict(set) for obj in self.objects: for dim in obj.dims: if dim not in self.exclude_dims and dim not in obj.xindexes.dims: unindexed_dim_sizes[dim].add(obj.sizes[dim]) self.unindexed_dim_sizes = unindexed_dim_sizes def _need_reindex(self, dim, cmp_indexes) -> bool: """Whether or not we need to reindex variables for a set of matching indexes. We don't reindex when all matching indexes are equal for two reasons: - It's faster for the usual case (already aligned objects). - It ensures it's possible to do operations that don't require alignment on indexes with duplicate values (which cannot be reindexed with pandas). This is useful, e.g., for overwriting such duplicate indexes. """ if not indexes_all_equal(cmp_indexes, self.exclude_dims): # always reindex when matching indexes are not equal return True unindexed_dims_sizes = {} for d in dim: if d in self.unindexed_dim_sizes: sizes = self.unindexed_dim_sizes[d] if len(sizes) > 1: # reindex if different sizes are found for unindexed dims return True else: unindexed_dims_sizes[d] = next(iter(sizes)) if unindexed_dims_sizes: indexed_dims_sizes = {} for cmp in cmp_indexes: index_vars = cmp[1] for var in index_vars.values(): indexed_dims_sizes.update(var.sizes) for d, size in unindexed_dims_sizes.items(): if indexed_dims_sizes.get(d, -1) != size: # reindex if unindexed dimension size doesn't match return True return False def _get_index_joiner(self, index_cls) -> Callable: if self.join in ["outer", "inner"]: return functools.partial( functools.reduce, functools.partial(index_cls.join, how=self.join), ) elif self.join == "left": return operator.itemgetter(0) elif self.join == "right": return operator.itemgetter(-1) elif self.join == "override": # We rewrite all indexes and then use join='left' return operator.itemgetter(0) else: # join='exact' return dummy lambda (error is raised) return lambda _: None def align_indexes(self) -> None: """Compute all aligned indexes and their corresponding coordinate variables.""" aligned_indexes: dict[MatchingIndexKey, Index] = {} aligned_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]] = {} reindex: dict[MatchingIndexKey, bool] = {} new_indexes: dict[Hashable, Index] = {} new_index_vars: dict[Hashable, Variable] = {} def update_dicts( key: MatchingIndexKey, idx: Index, idx_vars: dict[Hashable, Variable], need_reindex: bool, ): reindex[key] = need_reindex aligned_indexes[key] = idx aligned_index_vars[key] = idx_vars for name, var in idx_vars.items(): if name in new_indexes: other_idx = new_indexes[name] other_var = new_index_vars[name] raise AlignmentError( f"cannot align objects on coordinate {name!r} because of conflicting indexes\n" f"first index: {idx!r}\nsecond index: {other_idx!r}\n" f"first variable: {var!r}\nsecond variable: {other_var!r}\n" ) new_indexes[name] = idx new_index_vars[name] = var for key, matching_indexes in self.all_indexes.items(): matching_index_vars = self.all_index_vars[key] dims = {d for coord in matching_index_vars[0].values() for d in coord.dims} index_cls = key[1] if self.join == "override": joined_index = matching_indexes[0] joined_index_vars = matching_index_vars[0] need_reindex = False elif key in self.indexes: joined_index = self.indexes[key] joined_index_vars = self.index_vars[key] cmp_indexes = list( zip( [joined_index] + matching_indexes, [joined_index_vars] + matching_index_vars, strict=True, ) ) need_reindex = self._need_reindex(dims, cmp_indexes) else: if len(matching_indexes) > 1: need_reindex = self._need_reindex( dims, list(zip(matching_indexes, matching_index_vars, strict=True)), ) else: need_reindex = False if need_reindex: if ( isinstance(self.join, CombineKwargDefault) and self.join != "exact" ): emit_user_level_warning( self.join.warning_message( "This change will result in the following ValueError: " "cannot be aligned with join='exact' because " "index/labels/sizes are not equal along " "these coordinates (dimensions): " + ", ".join( f"{name!r} {dims!r}" for name, dims in key[0] ), recommend_set_options=False, ), FutureWarning, ) if self.join == "exact": raise AlignmentError( "cannot align objects with join='exact' where " "index/labels/sizes are not equal along " "these coordinates (dimensions): " + ", ".join(f"{name!r} {dims!r}" for name, dims in key[0]) + ( self.join.error_message() if isinstance(self.join, CombineKwargDefault) else "" ) ) joiner = self._get_index_joiner(index_cls) joined_index = joiner(matching_indexes) if self.join == "left": joined_index_vars = matching_index_vars[0] elif self.join == "right": joined_index_vars = matching_index_vars[-1] else: joined_index_vars = joined_index.create_variables() else: joined_index = matching_indexes[0] joined_index_vars = matching_index_vars[0] update_dicts(key, joined_index, joined_index_vars, need_reindex) # Explicitly provided indexes that are not found in objects to align # may relate to unindexed dimensions so we add them too for key, idx in self.indexes.items(): if key not in aligned_indexes: index_vars = self.index_vars[key] update_dicts(key, idx, index_vars, False) self.aligned_indexes = aligned_indexes self.aligned_index_vars = aligned_index_vars self.reindex = reindex self.new_indexes = Indexes(new_indexes, new_index_vars) def assert_unindexed_dim_sizes_equal(self) -> None: for dim, sizes in self.unindexed_dim_sizes.items(): index_size = self.new_indexes.dims.get(dim) if index_size is not None: sizes.add(index_size) add_err_msg = ( f" (note: an index is found along that dimension " f"with size={index_size!r})" ) else: add_err_msg = "" if len(sizes) > 1: raise AlignmentError( f"cannot reindex or align along dimension {dim!r} " f"because of conflicting dimension sizes: {sizes!r}" + add_err_msg ) def override_indexes(self) -> None: objects = list(self.objects) for i, obj in enumerate(objects[1:]): new_indexes = {} new_variables = {} matching_indexes = self.objects_matching_indexes[i + 1] for key, aligned_idx in self.aligned_indexes.items(): obj_idx = matching_indexes.get(key) if obj_idx is not None: for name, var in self.aligned_index_vars[key].items(): new_indexes[name] = aligned_idx new_variables[name] = var.copy(deep=self.copy) objects[i + 1] = obj._overwrite_indexes(new_indexes, new_variables) self.results = tuple(objects) def _get_dim_pos_indexers( self, matching_indexes: dict[MatchingIndexKey, Index], ) -> dict[Hashable, Any]: dim_pos_indexers: dict[Hashable, Any] = {} dim_index: dict[Hashable, Index] = {} for key, aligned_idx in self.aligned_indexes.items(): obj_idx = matching_indexes.get(key) if obj_idx is not None and self.reindex[key]: indexers = obj_idx.reindex_like(aligned_idx, **self.reindex_kwargs) for dim, idxer in indexers.items(): if dim in self.exclude_dims: raise AlignmentError( f"cannot reindex or align along dimension {dim!r} because " "it is explicitly excluded from alignment. This is likely caused by " "wrong results returned by the `reindex_like` method of this index:\n" f"{obj_idx!r}" ) if dim in dim_pos_indexers and not np.array_equal( idxer, dim_pos_indexers[dim] ): raise AlignmentError( f"cannot reindex or align along dimension {dim!r} because " "of conflicting re-indexers returned by multiple indexes\n" f"first index: {obj_idx!r}\nsecond index: {dim_index[dim]!r}\n" ) dim_pos_indexers[dim] = idxer dim_index[dim] = obj_idx return dim_pos_indexers def _get_indexes_and_vars( self, obj: T_Alignable, matching_indexes: dict[MatchingIndexKey, Index], matching_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]], ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: new_indexes = {} new_variables = {} for key, aligned_idx in self.aligned_indexes.items(): aligned_idx_vars = self.aligned_index_vars[key] obj_idx = matching_indexes.get(key) obj_idx_vars = matching_index_vars.get(key) if obj_idx is None: # add the aligned index if it relates to unindexed dimensions in obj dims = {d for var in aligned_idx_vars.values() for d in var.dims} if dims <= set(obj.dims): obj_idx = aligned_idx if obj_idx is not None: # TODO: always copy object's index when no re-indexing is required? # (instead of assigning the aligned index) # (need performance assessment) if key in self.keep_original_indexes: assert self.reindex[key] is False new_idx = obj_idx.copy(deep=self.copy) new_idx_vars = new_idx.create_variables(obj_idx_vars) else: new_idx = aligned_idx new_idx_vars = { k: v.copy(deep=self.copy) for k, v in aligned_idx_vars.items() } new_indexes.update(dict.fromkeys(new_idx_vars, new_idx)) new_variables.update(new_idx_vars) return new_indexes, new_variables def _reindex_one( self, obj: T_Alignable, matching_indexes: dict[MatchingIndexKey, Index], matching_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]], ) -> T_Alignable: new_indexes, new_variables = self._get_indexes_and_vars( obj, matching_indexes, matching_index_vars ) dim_pos_indexers = self._get_dim_pos_indexers(matching_indexes) return obj._reindex_callback( self, dim_pos_indexers, new_variables, new_indexes, self.fill_value, self.exclude_dims, self.exclude_vars, ) def reindex_all(self) -> None: self.results = tuple( starmap( self._reindex_one, zip( self.objects, self.objects_matching_indexes, self.objects_matching_index_vars, strict=True, ), ) ) def align(self) -> None: if not self.indexes and len(self.objects) == 1: # fast path for the trivial case (obj,) = self.objects self.results = (obj.copy(deep=self.copy),) return self.find_matching_indexes() self.find_matching_unindexed_dims() self.align_indexes() self.assert_unindexed_dim_sizes_equal() if self.join == "override": self.override_indexes() elif self.join == "exact" and not self.copy: self.results = self.objects else: self.reindex_all() T_Obj1 = TypeVar("T_Obj1", bound="Alignable") T_Obj2 = TypeVar("T_Obj2", bound="Alignable") T_Obj3 = TypeVar("T_Obj3", bound="Alignable") T_Obj4 = TypeVar("T_Obj4", bound="Alignable") T_Obj5 = TypeVar("T_Obj5", bound="Alignable") @overload def align( obj1: T_Obj1, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1]: ... @overload def align( obj1: T_Obj1, obj2: T_Obj2, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1, T_Obj2]: ... @overload def align( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1, T_Obj2, T_Obj3]: ... @overload def align( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, obj4: T_Obj4, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4]: ... @overload def align( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, obj4: T_Obj4, obj5: T_Obj5, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4, T_Obj5]: ... @overload def align( *objects: T_Alignable, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Alignable, ...]: ... def align( *objects: T_Alignable, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Alignable, ...]: """ Given any number of Dataset and/or DataArray objects, returns new objects with aligned indexes and dimension sizes. Array from the aligned objects are suitable as input to mathematical operators, because along each dimension they have the same index and size. Missing values (if ``join != 'inner'``) are filled with ``fill_value``. The default fill value is NaN. Parameters ---------- *objects : Dataset or DataArray Objects to align. join : {"outer", "inner", "left", "right", "exact", "override"}, optional Method for joining the indexes of the passed objects along each dimension: - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. copy : bool, default: True If ``copy=True``, data in the return values is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, new xarray objects are always returned. indexes : dict-like, optional Any indexes explicitly provided with the `indexes` argument should be used in preference to the aligned indexes. exclude : str, iterable of hashable or None, optional Dimensions that must be excluded from alignment fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. Returns ------- aligned : tuple of DataArray or Dataset Tuple of objects with the same type as `*objects` with aligned coordinates. Raises ------ AlignmentError If any dimensions without labels on the arguments have different sizes, or a different size than the size of the aligned dimension labels. Examples -------- >>> x = xr.DataArray( ... [[25, 35], [10, 24]], ... dims=("lat", "lon"), ... coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]}, ... ) >>> y = xr.DataArray( ... [[20, 5], [7, 13]], ... dims=("lat", "lon"), ... coords={"lat": [35.0, 42.0], "lon": [100.0, 120.0]}, ... ) >>> x Size: 32B array([[25, 35], [10, 24]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> y Size: 32B array([[20, 5], [ 7, 13]]) Coordinates: * lat (lat) float64 16B 35.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y) >>> a Size: 16B array([[25, 35]]) Coordinates: * lat (lat) float64 8B 35.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 16B array([[20, 5]]) Coordinates: * lat (lat) float64 8B 35.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="outer") >>> a Size: 48B array([[25., 35.], [10., 24.], [nan, nan]]) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 48B array([[20., 5.], [nan, nan], [ 7., 13.]]) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="outer", fill_value=-999) >>> a Size: 48B array([[ 25, 35], [ 10, 24], [-999, -999]]) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 48B array([[ 20, 5], [-999, -999], [ 7, 13]]) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="left") >>> a Size: 32B array([[25, 35], [10, 24]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 32B array([[20., 5.], [nan, nan]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="right") >>> a Size: 32B array([[25., 35.], [nan, nan]]) Coordinates: * lat (lat) float64 16B 35.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 32B array([[20, 5], [ 7, 13]]) Coordinates: * lat (lat) float64 16B 35.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="exact") Traceback (most recent call last): ... xarray.structure.alignment.AlignmentError: cannot align objects with join='exact' ... >>> a, b = xr.align(x, y, join="override") >>> a Size: 32B array([[25, 35], [10, 24]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 32B array([[20, 5], [ 7, 13]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 """ aligner = Aligner( objects, join=join, copy=copy, indexes=indexes, exclude_dims=exclude, fill_value=fill_value, ) aligner.align() return aligner.results def deep_align( objects: Iterable[Any], join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), raise_on_invalid: bool = True, fill_value=dtypes.NA, ) -> list[Any]: """Align objects for merging, recursing into dictionary values. This function is not public API. """ from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset if indexes is None: indexes = {} def is_alignable(obj): return isinstance(obj, Coordinates | DataArray | Dataset) positions: list[int] = [] keys: list[type[object] | Hashable] = [] out: list[Any] = [] targets: list[Alignable] = [] no_key: Final = object() not_replaced: Final = object() for position, variables in enumerate(objects): if is_alignable(variables): positions.append(position) keys.append(no_key) targets.append(variables) out.append(not_replaced) elif is_dict_like(variables): current_out = {} for k, v in variables.items(): if is_alignable(v) and k not in indexes: # Skip variables in indexes for alignment, because these # should to be overwritten instead: # https://github.com/pydata/xarray/issues/725 # https://github.com/pydata/xarray/issues/3377 # TODO(shoyer): doing this here feels super-hacky -- can we # move it explicitly into merge instead? positions.append(position) keys.append(k) targets.append(v) current_out[k] = not_replaced else: current_out[k] = v out.append(current_out) elif raise_on_invalid: raise ValueError( "object to align is neither an xarray.Dataset, " f"an xarray.DataArray nor a dictionary: {variables!r}" ) else: out.append(variables) aligned = align( *targets, join=join, copy=copy, indexes=indexes, exclude=exclude, fill_value=fill_value, ) for position, key, aligned_obj in zip(positions, keys, aligned, strict=True): if key is no_key: out[position] = aligned_obj else: out[position][key] = aligned_obj return out def reindex( obj: T_Alignable, indexers: Mapping[Any, Any], method: str | None = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = dtypes.NA, sparse: bool = False, exclude_vars: Iterable[Hashable] = frozenset(), ) -> T_Alignable: """Re-index either a Dataset or a DataArray. Not public API. """ # TODO: (benbovy - explicit indexes): uncomment? # --> from reindex docstrings: "any mismatched dimension is simply ignored" # bad_keys = [k for k in indexers if k not in obj._indexes and k not in obj.dims] # if bad_keys: # raise ValueError( # f"indexer keys {bad_keys} do not correspond to any indexed coordinate " # "or unindexed dimension in the object to reindex" # ) aligner = Aligner( (obj,), indexes=indexers, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, sparse=sparse, exclude_vars=exclude_vars, ) aligner.align() return aligner.results[0] def reindex_like( obj: T_Alignable, other: Dataset | DataArray, method: str | None = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = dtypes.NA, ) -> T_Alignable: """Re-index either a Dataset or a DataArray like another Dataset/DataArray. Not public API. """ if not other._indexes: # This check is not performed in Aligner. for dim in other.dims: if dim in obj.dims: other_size = other.sizes[dim] obj_size = obj.sizes[dim] if other_size != obj_size: raise ValueError( "different size for unlabeled " f"dimension on argument {dim!r}: {other_size!r} vs {obj_size!r}" ) return reindex( obj, indexers=other.xindexes, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def _get_broadcast_dims_map_common_coords(args, exclude): common_coords = {} dims_map = {} for arg in args: for dim in arg.dims: if dim not in common_coords and dim not in exclude: dims_map[dim] = arg.sizes[dim] if dim in arg._indexes: common_coords.update(arg.xindexes.get_all_coords(dim)) return dims_map, common_coords def _broadcast_helper( arg: T_Alignable, exclude, dims_map, common_coords ) -> T_Alignable: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset def _set_dims(var): # Add excluded dims to a copy of dims_map var_dims_map = dims_map.copy() for dim in exclude: with suppress(ValueError): # ignore dim not in var.dims var_dims_map[dim] = var.shape[var.dims.index(dim)] return var.set_dims(var_dims_map) def _broadcast_array(array: T_DataArray) -> T_DataArray: data = _set_dims(array.variable) coords = dict(array.coords) coords.update(common_coords) return array.__class__( data, coords, data.dims, name=array.name, attrs=array.attrs ) def _broadcast_dataset(ds: T_Dataset) -> T_Dataset: data_vars = {k: _set_dims(ds.variables[k]) for k in ds.data_vars} coords = dict(ds.coords) coords.update(common_coords) return ds.__class__(data_vars, coords, ds.attrs) # remove casts once https://github.com/python/mypy/issues/12800 is resolved if isinstance(arg, DataArray): return _broadcast_array(arg) # type: ignore[return-value,unused-ignore] elif isinstance(arg, Dataset): return _broadcast_dataset(arg) # type: ignore[return-value,unused-ignore] else: raise ValueError("all input must be Dataset or DataArray objects") @overload def broadcast( obj1: T_Obj1, /, *, exclude: str | Iterable[Hashable] | None = None ) -> tuple[T_Obj1]: ... @overload def broadcast( obj1: T_Obj1, obj2: T_Obj2, /, *, exclude: str | Iterable[Hashable] | None = None ) -> tuple[T_Obj1, T_Obj2]: ... @overload def broadcast( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, /, *, exclude: str | Iterable[Hashable] | None = None, ) -> tuple[T_Obj1, T_Obj2, T_Obj3]: ... @overload def broadcast( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, obj4: T_Obj4, /, *, exclude: str | Iterable[Hashable] | None = None, ) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4]: ... @overload def broadcast( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, obj4: T_Obj4, obj5: T_Obj5, /, *, exclude: str | Iterable[Hashable] | None = None, ) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4, T_Obj5]: ... @overload def broadcast( *args: T_Alignable, exclude: str | Iterable[Hashable] | None = None ) -> tuple[T_Alignable, ...]: ... def broadcast( *args: T_Alignable, exclude: str | Iterable[Hashable] | None = None ) -> tuple[T_Alignable, ...]: """Explicitly broadcast any number of DataArray or Dataset objects against one another. xarray objects automatically broadcast against each other in arithmetic operations, so this function should not be necessary for normal use. If no change is needed, the input data is returned to the output without being copied. Parameters ---------- *args : DataArray or Dataset Arrays to broadcast against each other. exclude : str, iterable of hashable or None, optional Dimensions that must not be broadcasted Returns ------- broadcast : tuple of DataArray or tuple of Dataset The same data as the input arrays, but with additional dimensions inserted so that all data arrays have the same dimensions and shape. Examples -------- Broadcast two data arrays against one another to fill out their dimensions: >>> a = xr.DataArray([1, 2, 3], dims="x") >>> b = xr.DataArray([5, 6], dims="y") >>> a Size: 24B array([1, 2, 3]) Dimensions without coordinates: x >>> b Size: 16B array([5, 6]) Dimensions without coordinates: y >>> a2, b2 = xr.broadcast(a, b) >>> a2 Size: 48B array([[1, 1], [2, 2], [3, 3]]) Dimensions without coordinates: x, y >>> b2 Size: 48B array([[5, 6], [5, 6], [5, 6]]) Dimensions without coordinates: x, y Fill out the dimensions of all data variables in a dataset: >>> ds = xr.Dataset({"a": a, "b": b}) >>> (ds2,) = xr.broadcast(ds) # use tuple unpacking to extract one dataset >>> ds2 Size: 96B Dimensions: (x: 3, y: 2) Dimensions without coordinates: x, y Data variables: a (x, y) int64 48B 1 1 2 2 3 3 b (x, y) int64 48B 5 6 5 6 5 6 """ if exclude is None: exclude = set() args = align(*args, join="outer", copy=False, exclude=exclude) dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude) result = [_broadcast_helper(arg, exclude, dims_map, common_coords) for arg in args] return tuple(result) xarray-2025.12.0/xarray/structure/chunks.py000066400000000000000000000142111511464676000205550ustar00rootroot00000000000000""" Functions for handling chunked arrays. """ from __future__ import annotations import itertools from collections.abc import Hashable, Mapping from functools import lru_cache from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union, overload from xarray.core import utils from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import ( ChunkManagerEntrypoint, get_chunked_array_type, guess_chunkmanager, ) if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import T_ChunkDim from xarray.core.variable import Variable MissingCoreDimOptions = Literal["raise", "copy", "drop"] @lru_cache(maxsize=512) def _get_breaks_cached( *, size: int, chunk_sizes: tuple[int, ...], preferred_chunk_sizes: int | tuple[int, ...], ) -> int | None: if isinstance(preferred_chunk_sizes, int) and preferred_chunk_sizes == 1: # short-circuit for the trivial case return None # Determine the stop indices of the preferred chunks, but omit the last stop # (equal to the dim size). In particular, assume that when a sequence # expresses the preferred chunks, the sequence sums to the size. preferred_stops = ( range(preferred_chunk_sizes, size, preferred_chunk_sizes) if isinstance(preferred_chunk_sizes, int) else set(itertools.accumulate(preferred_chunk_sizes[:-1])) ) # Gather any stop indices of the specified chunks that are not a stop index # of a preferred chunk. Again, omit the last stop, assuming that it equals # the dim size. actual_stops = itertools.accumulate(chunk_sizes[:-1]) # This copy is required for parallel iteration actual_stops_2 = itertools.accumulate(chunk_sizes[:-1]) disagrees = itertools.compress( actual_stops_2, (a not in preferred_stops for a in actual_stops) ) try: return next(disagrees) except StopIteration: return None def _maybe_chunk( name: Hashable, var: Variable, chunks: Mapping[Any, T_ChunkDim] | None, token=None, lock=None, name_prefix: str = "xarray-", overwrite_encoded_chunks: bool = False, inline_array: bool = False, chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, ) -> Variable: from xarray.namedarray.daskmanager import DaskManager if chunks is not None: chunks = {dim: chunks[dim] for dim in var.dims if dim in chunks} if var.ndim: chunked_array_type = guess_chunkmanager( chunked_array_type ) # coerce string to ChunkManagerEntrypoint type if isinstance(chunked_array_type, DaskManager): from dask.base import tokenize # when rechunking by different amounts, make sure dask names change # by providing chunks as an input to tokenize. # subtle bugs result otherwise. see GH3350 # we use str() for speed, and use the name for the final array name on the next line token2 = tokenize(token or var._data, str(chunks)) name2 = f"{name_prefix}{name}-{token2}" from_array_kwargs = utils.consolidate_dask_from_array_kwargs( from_array_kwargs, name=name2, lock=lock, inline_array=inline_array, ) var = var.chunk( chunks, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) if overwrite_encoded_chunks and var.chunks is not None: var.encoding["chunks"] = tuple(x[0] for x in var.chunks) return var else: return var _T = TypeVar("_T", bound=Union["Dataset", "DataArray"]) _U = TypeVar("_U", bound=Union["Dataset", "DataArray"]) _V = TypeVar("_V", bound=Union["Dataset", "DataArray"]) @overload def unify_chunks(obj: _T, /) -> tuple[_T]: ... @overload def unify_chunks(obj1: _T, obj2: _U, /) -> tuple[_T, _U]: ... @overload def unify_chunks(obj1: _T, obj2: _U, obj3: _V, /) -> tuple[_T, _U, _V]: ... @overload def unify_chunks(*objects: Dataset | DataArray) -> tuple[Dataset | DataArray, ...]: ... def unify_chunks(*objects: Dataset | DataArray) -> tuple[Dataset | DataArray, ...]: """ Given any number of Dataset and/or DataArray objects, returns new objects with unified chunk size along all chunked dimensions. Returns ------- unified (DataArray or Dataset) โ€“ Tuple of objects with the same type as *objects with consistent chunk sizes for all dask-array variables See Also -------- dask.array.core.unify_chunks """ from xarray.core.dataarray import DataArray # Convert all objects to datasets datasets = [ obj._to_temp_dataset() if isinstance(obj, DataArray) else obj.copy() for obj in objects ] # Get arguments to pass into dask.array.core.unify_chunks unify_chunks_args = [] sizes: dict[Hashable, int] = {} for ds in datasets: for v in ds._variables.values(): if v.chunks is not None: # Check that sizes match across different datasets for dim, size in v.sizes.items(): try: if sizes[dim] != size: raise ValueError( f"Dimension {dim!r} size mismatch: {sizes[dim]} != {size}" ) except KeyError: sizes[dim] = size unify_chunks_args += [v._data, v._dims] # No dask arrays: Return inputs if not unify_chunks_args: return objects chunkmanager = get_chunked_array_type(*list(unify_chunks_args)) _, chunked_data = chunkmanager.unify_chunks(*unify_chunks_args) chunked_data_iter = iter(chunked_data) out: list[Dataset | DataArray] = [] for obj, ds in zip(objects, datasets, strict=True): for k, v in ds._variables.items(): if v.chunks is not None: ds._variables[k] = v.copy(data=next(chunked_data_iter)) out.append(obj._from_temp_dataset(ds) if isinstance(obj, DataArray) else ds) return tuple(out) xarray-2025.12.0/xarray/structure/combine.py000066400000000000000000001240521511464676000207030ustar00rootroot00000000000000from __future__ import annotations from collections import Counter, defaultdict from collections.abc import Callable, Hashable, Iterable, Iterator, Sequence from typing import TYPE_CHECKING, Literal, TypeAlias, TypeVar, cast, overload import pandas as pd from xarray.core import dtypes from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.utils import iterate_nested from xarray.structure.alignment import AlignmentError from xarray.structure.concat import concat from xarray.structure.merge import merge from xarray.util.deprecation_helpers import ( _COMPAT_DEFAULT, _COORDS_DEFAULT, _DATA_VARS_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, ) if TYPE_CHECKING: from xarray.core.types import ( CombineAttrsOptions, CompatOptions, JoinOptions, NestedSequence, ) T = TypeVar("T") def _infer_concat_order_from_positions( datasets: NestedSequence[T], ) -> dict[tuple[int, ...], T]: return dict(_infer_tile_ids_from_nested_list(datasets, ())) def _infer_tile_ids_from_nested_list( entry: NestedSequence[T], current_pos: tuple[int, ...] ) -> Iterator[tuple[tuple[int, ...], T]]: """ Given a list of lists (of lists...) of objects, returns an iterator which returns a tuple containing the index of each object in the nested list structure as the key, and the object. This can then be called by the dict constructor to create a dictionary of the objects organised by their position in the original nested list. Recursively traverses the given structure, while keeping track of the current position. Should work for any type of object which isn't a list. Parameters ---------- entry : list[list[obj, obj, ...], ...] List of lists of arbitrary depth, containing objects in the order they are to be concatenated. Returns ------- combined_tile_ids : dict[tuple(int, ...), obj] """ if not isinstance(entry, str) and isinstance(entry, Sequence): for i, item in enumerate(entry): yield from _infer_tile_ids_from_nested_list(item, current_pos + (i,)) else: yield current_pos, cast(T, entry) def _ensure_same_types(series, dim): if series.dtype == object: types = set(series.map(type)) if len(types) > 1: try: import cftime cftimes = any(issubclass(t, cftime.datetime) for t in types) except ImportError: cftimes = False types = ", ".join(t.__name__ for t in types) error_msg = ( f"Cannot combine along dimension '{dim}' with mixed types." f" Found: {types}." ) if cftimes: error_msg = ( f"{error_msg} If importing data directly from a file then " f"setting `use_cftime=True` may fix this issue." ) raise TypeError(error_msg) def _infer_concat_order_from_coords(datasets: list[Dataset] | list[DataTree]): concat_dims = [] tile_ids: list[tuple[int, ...]] = [() for ds in datasets] # All datasets have same variables because they've been grouped as such ds0 = datasets[0] for dim in ds0.dims: # Check if dim is a coordinate dimension if dim in ds0: # Need to read coordinate values to do ordering indexes: list[pd.Index] = [] for ds in datasets: index = ds._indexes.get(dim) if index is None: error_msg = ( f"Every dimension requires a corresponding 1D coordinate " f"and index for inferring concatenation order but the " f"coordinate '{dim}' has no corresponding index" ) raise ValueError(error_msg) # TODO (benbovy, flexible indexes): support flexible indexes? indexes.append(index.to_pandas_index()) # If dimension coordinate values are same on every dataset then # should be leaving this dimension alone (it's just a "bystander") if not all(index.equals(indexes[0]) for index in indexes[1:]): # Infer order datasets should be arranged in along this dim concat_dims.append(dim) if all(index.is_monotonic_increasing for index in indexes): ascending = True elif all(index.is_monotonic_decreasing for index in indexes): ascending = False else: raise ValueError( f"Coordinate variable {dim} is neither " "monotonically increasing nor " "monotonically decreasing on all datasets" ) # Assume that any two datasets whose coord along dim starts # with the same value have the same coord values throughout. if any(index.size == 0 for index in indexes): raise ValueError("Cannot handle size zero dimensions") first_items = pd.Index([index[0] for index in indexes]) series = first_items.to_series() # ensure series does not contain mixed types, e.g. cftime calendars _ensure_same_types(series, dim) # Sort datasets along dim # We want rank but with identical elements given identical # position indices - they should be concatenated along another # dimension, not along this one rank = series.rank( method="dense", ascending=ascending, numeric_only=False ) order = (rank.astype(int).values - 1).tolist() # Append positions along extra dimension to structure which # encodes the multi-dimensional concatenation order tile_ids = [ tile_id + (position,) for tile_id, position in zip(tile_ids, order, strict=True) ] if len(datasets) > 1 and not concat_dims: if any(isinstance(data, DataTree) for data in datasets): raise ValueError( "Did not find any dimension coordinates at root nodes " "to order the DataTree objects for concatenation" ) else: raise ValueError( "Could not find any dimension coordinates to use to " "order the Dataset objects for concatenation" ) combined_ids = dict(zip(tile_ids, datasets, strict=True)) return combined_ids, concat_dims def _check_dimension_depth_tile_ids(combined_tile_ids): """ Check all tuples are the same length, i.e. check that all lists are nested to the same depth. """ tile_ids = combined_tile_ids.keys() nesting_depths = [len(tile_id) for tile_id in tile_ids] if not nesting_depths: nesting_depths = [0] if set(nesting_depths) != {nesting_depths[0]}: raise ValueError( "The supplied objects do not form a hypercube because" " sub-lists do not have consistent depths" ) # return these just to be reused in _check_shape_tile_ids return tile_ids, nesting_depths def _check_shape_tile_ids(combined_tile_ids): """Check all lists along one dimension are same length.""" tile_ids, nesting_depths = _check_dimension_depth_tile_ids(combined_tile_ids) for dim in range(nesting_depths[0]): indices_along_dim = [tile_id[dim] for tile_id in tile_ids] occurrences = Counter(indices_along_dim) if len(set(occurrences.values())) != 1: raise ValueError( "The supplied objects do not form a hypercube " "because sub-lists do not have consistent " f"lengths along dimension {dim}" ) def _combine_nd( combined_ids, concat_dims, data_vars, coords, compat: CompatOptions | CombineKwargDefault, fill_value, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): """ Combines an N-dimensional structure of datasets into one by applying a series of either concat and merge operations along each dimension. No checks are performed on the consistency of the datasets, concat_dims or tile_IDs, because it is assumed that this has already been done. Parameters ---------- combined_ids : Dict[Tuple[int, ...]], xarray.Dataset | xarray.DataTree] Structure containing all datasets to be concatenated with "tile_IDs" as keys, which specify position within the desired final combined result. concat_dims : sequence of str The dimensions along which the datasets should be concatenated. Must be in order, and the length must match the length of the tuples used as keys in combined_ids. If the string is a dimension name then concat along that dimension, if it is None then merge. Returns ------- combined_ds : xarray.Dataset | xarray.DataTree """ example_tile_id = next(iter(combined_ids.keys())) n_dims = len(example_tile_id) if len(concat_dims) != n_dims: raise ValueError( f"concat_dims has length {len(concat_dims)} but the datasets " f"passed are nested in a {n_dims}-dimensional structure" ) # Each iteration of this loop reduces the length of the tile_ids tuples # by one. It always combines along the first dimension, removing the first # element of the tuple for concat_dim in concat_dims: combined_ids = _combine_all_along_first_dim( combined_ids, dim=concat_dim, data_vars=data_vars, coords=coords, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) (combined_ds,) = combined_ids.values() return combined_ds def _combine_all_along_first_dim( combined_ids, dim, data_vars, coords, compat: CompatOptions | CombineKwargDefault, fill_value, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): # Group into lines of datasets which must be combined along dim grouped = groupby_defaultdict(list(combined_ids.items()), key=_new_tile_id) # Combine all of these datasets along dim new_combined_ids = {} for new_id, group in grouped: combined_ids = dict(sorted(group)) datasets = combined_ids.values() new_combined_ids[new_id] = _combine_1d( datasets, concat_dim=dim, compat=compat, data_vars=data_vars, coords=coords, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) return new_combined_ids def _combine_1d( datasets, concat_dim, compat: CompatOptions | CombineKwargDefault, data_vars, coords, fill_value, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): """ Applies either concat or merge to 1D list of datasets depending on value of concat_dim """ if concat_dim is not None: try: combined = concat( datasets, dim=concat_dim, data_vars=data_vars, coords=coords, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) except ValueError as err: if "encountered unexpected variable" in str(err): raise ValueError( "These objects cannot be combined using only " "xarray.combine_nested, instead either use " "xarray.combine_by_coords, or do it manually " "with xarray.concat, xarray.merge and " "xarray.align" ) from err else: raise else: try: combined = merge( datasets, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) except AlignmentError as e: e.add_note( "If you are intending to concatenate datasets, please specify the concatenation dimension explicitly. " "Using merge to concatenate is quite inefficient." ) raise e return combined def _new_tile_id(single_id_ds_pair): tile_id, _ds = single_id_ds_pair return tile_id[1:] def _nested_combine( datasets, concat_dims, compat, data_vars, coords, ids, fill_value, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): if len(datasets) == 0: return Dataset() # Arrange datasets for concatenation # Use information from the shape of the user input if not ids: # Determine tile_IDs by structure of input in N-D # (i.e. ordering in list-of-lists) combined_ids = _infer_concat_order_from_positions(datasets) else: # Already sorted so just use the ids already passed combined_ids = dict(zip(ids, datasets, strict=True)) # Check that the inferred shape is combinable _check_shape_tile_ids(combined_ids) # Apply series of concatenate or merge operations along each dimension combined = _combine_nd( combined_ids, concat_dims=concat_dims, compat=compat, data_vars=data_vars, coords=coords, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) return combined # Define types for arbitrarily-nested list of lists. # Mypy doesn't seem to handle overloads properly with recursive types, so we # explicitly expand the first handful of levels of recursion. DatasetLike: TypeAlias = DataArray | Dataset DatasetHyperCube: TypeAlias = ( DatasetLike | Sequence[DatasetLike] | Sequence[Sequence[DatasetLike]] | Sequence[Sequence[Sequence[DatasetLike]]] | Sequence[Sequence[Sequence[Sequence[DatasetLike]]]] ) DataTreeHyperCube: TypeAlias = ( DataTree | Sequence[DataTree] | Sequence[Sequence[DataTree]] | Sequence[Sequence[Sequence[DataTree]]] | Sequence[Sequence[Sequence[Sequence[DataTree]]]] ) @overload def combine_nested( datasets: DatasetHyperCube, concat_dim: str | DataArray | list[str] | Sequence[str | DataArray | pd.Index | None] | None, compat: str | CombineKwargDefault = ..., data_vars: str | CombineKwargDefault = ..., coords: str | CombineKwargDefault = ..., fill_value: object = ..., join: JoinOptions | CombineKwargDefault = ..., combine_attrs: CombineAttrsOptions = ..., ) -> Dataset: ... @overload def combine_nested( datasets: DataTreeHyperCube, concat_dim: str | DataArray | list[str] | Sequence[str | DataArray | pd.Index | None] | None, compat: str | CombineKwargDefault = ..., data_vars: str | CombineKwargDefault = ..., coords: str | CombineKwargDefault = ..., fill_value: object = ..., join: JoinOptions | CombineKwargDefault = ..., combine_attrs: CombineAttrsOptions = ..., ) -> DataTree: ... def combine_nested( datasets: DatasetHyperCube | DataTreeHyperCube, concat_dim: str | DataArray | list[str] | Sequence[str | DataArray | pd.Index | None] | None, compat: str | CombineKwargDefault = _COMPAT_DEFAULT, data_vars: str | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: str | CombineKwargDefault = _COORDS_DEFAULT, fill_value: object = dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "drop", ) -> Dataset | DataTree: """ Explicitly combine an N-dimensional grid of datasets into one by using a succession of concat and merge operations along each dimension of the grid. Does not sort the supplied datasets under any circumstances, so the datasets must be passed in the order you wish them to be concatenated. It does align coordinates, but different variables on datasets can cause it to fail under some scenarios. In complex cases, you may need to clean up your data and use concat/merge explicitly. To concatenate along multiple dimensions the datasets must be passed as a nested list-of-lists, with a depth equal to the length of ``concat_dims``. ``combine_nested`` will concatenate along the top-level list first. Useful for combining datasets from a set of nested directories, or for collecting the output of a simulation parallelized along multiple dimensions. Parameters ---------- datasets : list or nested list of Dataset, DataArray or DataTree Dataset objects to combine. If concatenation or merging along more than one dimension is desired, then datasets must be supplied in a nested list-of-lists. concat_dim : str, or list of str, DataArray, Index or None Dimensions along which to concatenate variables, as used by :py:func:`xarray.concat`. Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation and merge instead along a particular dimension. The position of ``None`` in the list specifies the dimension of the nested-list input along which to merge. Must be the same length as the depth of the list passed to ``datasets``. compat : {"identical", "equals", "broadcast_equals", \ "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential merge conflicts: - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "equals": all values and dimensions must be the same. - "identical": all values, dimensions and attributes must be the same. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset data_vars : {"minimal", "different", "all" or list of str}, optional These data variables will be concatenated together: * "minimal": Only data variables in which the dimension already appears are included. * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * "all": All data variables will be concatenated. * None: Means ``"all"`` if ``dim`` is not present in any of the ``objs``, and ``"minimal"`` if ``dim`` is present in any of ``objs``. * list of dims: The listed data variables will be concatenated, in addition to the "minimal" data variables. coords : {"minimal", "different", "all" or list of str}, optional These coordinate variables will be concatenated together: * "minimal": Only coordinates in which the dimension already appears are included. If concatenating over a dimension _not_ present in any of the objects, then all data variables will be concatenated along that new dimension. * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of Hashable: The listed coordinate variables will be concatenated, in addition to the "minimal" coordinates. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding concat_dim) in objects - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "drop" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- combined : xarray.Dataset or xarray.DataTree Examples -------- A common task is collecting data from a parallelized simulation in which each process wrote out to a separate file. A domain which was decomposed into 4 parts, 2 each along both the x and y axes, requires organising the datasets into a doubly-nested list, e.g: >>> x1y1 = xr.Dataset( ... { ... "temperature": (("x", "y"), np.random.randn(2, 2)), ... "precipitation": (("x", "y"), np.random.randn(2, 2)), ... } ... ) >>> x1y1 Size: 64B Dimensions: (x: 2, y: 2) Dimensions without coordinates: x, y Data variables: temperature (x, y) float64 32B 1.764 0.4002 0.9787 2.241 precipitation (x, y) float64 32B 1.868 -0.9773 0.9501 -0.1514 >>> x1y2 = xr.Dataset( ... { ... "temperature": (("x", "y"), np.random.randn(2, 2)), ... "precipitation": (("x", "y"), np.random.randn(2, 2)), ... } ... ) >>> x2y1 = xr.Dataset( ... { ... "temperature": (("x", "y"), np.random.randn(2, 2)), ... "precipitation": (("x", "y"), np.random.randn(2, 2)), ... } ... ) >>> x2y2 = xr.Dataset( ... { ... "temperature": (("x", "y"), np.random.randn(2, 2)), ... "precipitation": (("x", "y"), np.random.randn(2, 2)), ... } ... ) >>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]] >>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"]) >>> combined Size: 256B Dimensions: (x: 4, y: 4) Dimensions without coordinates: x, y Data variables: temperature (x, y) float64 128B 1.764 0.4002 -0.1032 ... 0.04576 -0.1872 precipitation (x, y) float64 128B 1.868 -0.9773 0.761 ... 0.1549 0.3782 ``combine_nested`` can also be used to explicitly merge datasets with different variables. For example if we have 4 datasets, which are divided along two times, and contain two different variables, we can pass ``None`` to ``concat_dim`` to specify the dimension of the nested list over which we wish to use ``merge`` instead of ``concat``: >>> t1temp = xr.Dataset({"temperature": ("t", np.random.randn(5))}) >>> t1temp Size: 40B Dimensions: (t: 5) Dimensions without coordinates: t Data variables: temperature (t) float64 40B -0.8878 -1.981 -0.3479 0.1563 1.23 >>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))}) >>> t1precip Size: 40B Dimensions: (t: 5) Dimensions without coordinates: t Data variables: precipitation (t) float64 40B 1.202 -0.3873 -0.3023 -1.049 -1.42 >>> t2temp = xr.Dataset({"temperature": ("t", np.random.randn(5))}) >>> t2precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))}) >>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]] >>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None]) >>> combined Size: 160B Dimensions: (t: 10) Dimensions without coordinates: t Data variables: temperature (t) float64 80B -0.8878 -1.981 -0.3479 ... -0.4381 -1.253 precipitation (t) float64 80B 1.202 -0.3873 -0.3023 ... -0.8955 0.3869 See also -------- concat merge """ any_datasets = any(isinstance(obj, Dataset) for obj in iterate_nested(datasets)) any_unnamed_arrays = any( isinstance(obj, DataArray) and obj.name is None for obj in iterate_nested(datasets) ) if any_datasets and any_unnamed_arrays: raise ValueError("Can't combine datasets with unnamed arrays.") any_datatrees = any(isinstance(obj, DataTree) for obj in iterate_nested(datasets)) all_datatrees = all(isinstance(obj, DataTree) for obj in iterate_nested(datasets)) if any_datatrees and not all_datatrees: raise ValueError("Can't combine a mix of DataTree and non-DataTree objects.") concat_dims = ( [concat_dim] if isinstance(concat_dim, str | DataArray) or concat_dim is None else concat_dim ) # The IDs argument tells _nested_combine that datasets aren't yet sorted return _nested_combine( datasets, concat_dims=concat_dims, compat=compat, data_vars=data_vars, coords=coords, ids=False, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) def vars_as_keys(ds): return tuple(sorted(ds)) K = TypeVar("K", bound=Hashable) def groupby_defaultdict( iter: list[T], key: Callable[[T], K], ) -> Iterator[tuple[K, Iterator[T]]]: """replacement for itertools.groupby""" idx = defaultdict(list) for i, obj in enumerate(iter): idx[key(obj)].append(i) for k, ix in idx.items(): yield k, (iter[i] for i in ix) def _combine_single_variable_hypercube( datasets, fill_value, data_vars, coords, compat: CompatOptions | CombineKwargDefault, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): """ Attempt to combine a list of Datasets into a hypercube using their coordinates. All provided Datasets must belong to a single variable, ie. must be assigned the same variable name. This precondition is not checked by this function, so the caller is assumed to know what it's doing. This function is NOT part of the public API. """ if len(datasets) == 0: raise ValueError( "At least one Dataset is required to resolve variable names " "for combined hypercube." ) combined_ids, concat_dims = _infer_concat_order_from_coords(list(datasets)) if fill_value is None: # check that datasets form complete hypercube _check_shape_tile_ids(combined_ids) else: # check only that all datasets have same dimension depth for these # vars _check_dimension_depth_tile_ids(combined_ids) # Concatenate along all of concat_dims one by one to create single ds concatenated = _combine_nd( combined_ids, concat_dims=concat_dims, data_vars=data_vars, coords=coords, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) # Check the overall coordinates are monotonically increasing for dim in concat_dims: indexes = concatenated.indexes.get(dim) if not (indexes.is_monotonic_increasing or indexes.is_monotonic_decreasing): raise ValueError( "Resulting object does not have monotonic" f" global indexes along dimension {dim}" ) return concatenated def combine_by_coords( data_objects: Iterable[Dataset | DataArray] = [], compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT, data_vars: Literal["all", "minimal", "different"] | None | list[str] | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: str | CombineKwargDefault = _COORDS_DEFAULT, fill_value: object = dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "no_conflicts", ) -> Dataset | DataArray: """ Attempt to auto-magically combine the given datasets (or data arrays) into one by using dimension coordinates. This function attempts to combine a group of datasets along any number of dimensions into a single entity by inspecting coords and metadata and using a combination of concat and merge. Will attempt to order the datasets such that the values in their dimension coordinates are monotonic along all dimensions. If it cannot determine the order in which to concatenate the datasets, it will raise a ValueError. Non-coordinate dimensions will be ignored, as will any coordinate dimensions which do not vary between each dataset. Aligns coordinates, but different variables on datasets can cause it to fail under some scenarios. In complex cases, you may need to clean up your data and use concat/merge explicitly (also see `combine_nested`). Works well if, for example, you have N years of data and M data variables, and each combination of a distinct time period and set of data variables is saved as its own dataset. Also useful for if you have a simulation which is parallelized in multiple dimensions, but has global coordinates saved in each file specifying the positions of points within the global domain. Parameters ---------- data_objects : Iterable of Datasets or DataArrays Data objects to combine. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential conflicts: - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "equals": all values and dimensions must be the same. - "identical": all values, dimensions and attributes must be the same. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset data_vars : {"minimal", "different", "all" or list of str}, optional These data variables will be concatenated together: - "minimal": Only data variables in which the dimension already appears are included. - "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. - "all": All data variables will be concatenated. - list of str: The listed data variables will be concatenated, in addition to the "minimal" data variables. If objects are DataArrays, `data_vars` must be "all". coords : {"minimal", "different", "all"} or list of str, optional As per the "data_vars" kwarg, but for coordinate variables. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. If None, raises a ValueError if the passed Datasets do not create a complete hypercube. join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes in objects - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "no_conflicts" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- combined : xarray.Dataset or xarray.DataArray Will return a Dataset unless all the inputs are unnamed DataArrays, in which case a DataArray will be returned. See also -------- concat merge combine_nested Examples -------- Combining two datasets using their common dimension coordinates. Notice they are concatenated based on the values in their dimension coordinates, not on their position in the list passed to `combine_by_coords`. >>> x1 = xr.Dataset( ... { ... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)), ... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)), ... }, ... coords={"y": [0, 1], "x": [10, 20, 30]}, ... ) >>> x2 = xr.Dataset( ... { ... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)), ... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)), ... }, ... coords={"y": [2, 3], "x": [10, 20, 30]}, ... ) >>> x3 = xr.Dataset( ... { ... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)), ... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)), ... }, ... coords={"y": [2, 3], "x": [40, 50, 60]}, ... ) >>> x1 Size: 136B Dimensions: (y: 2, x: 3) Coordinates: * y (y) int64 16B 0 1 * x (x) int64 24B 10 20 30 Data variables: temperature (y, x) float64 48B 10.98 14.3 12.06 10.9 8.473 12.92 precipitation (y, x) float64 48B 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289 >>> x2 Size: 136B Dimensions: (y: 2, x: 3) Coordinates: * y (y) int64 16B 2 3 * x (x) int64 24B 10 20 30 Data variables: temperature (y, x) float64 48B 11.36 18.51 1.421 1.743 0.4044 16.65 precipitation (y, x) float64 48B 0.7782 0.87 0.9786 0.7992 0.4615 0.7805 >>> x3 Size: 136B Dimensions: (y: 2, x: 3) Coordinates: * y (y) int64 16B 2 3 * x (x) int64 24B 40 50 60 Data variables: temperature (y, x) float64 48B 2.365 12.8 2.867 18.89 10.44 8.293 precipitation (y, x) float64 48B 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176 >>> xr.combine_by_coords([x2, x1]) Size: 248B Dimensions: (y: 4, x: 3) Coordinates: * y (y) int64 32B 0 1 2 3 * x (x) int64 24B 10 20 30 Data variables: temperature (y, x) float64 96B 10.98 14.3 12.06 ... 1.743 0.4044 16.65 precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.4615 0.7805 >>> xr.combine_by_coords([x3, x1], join="outer") Size: 464B Dimensions: (y: 4, x: 6) Coordinates: * y (y) int64 32B 0 1 2 3 * x (x) int64 48B 10 20 30 40 50 60 Data variables: temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293 precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 >>> xr.combine_by_coords([x3, x1], join="override") Size: 256B Dimensions: (y: 2, x: 6) Coordinates: * y (y) int64 16B 0 1 * x (x) int64 48B 10 20 30 40 50 60 Data variables: temperature (y, x) float64 96B 10.98 14.3 12.06 ... 18.89 10.44 8.293 precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 >>> xr.combine_by_coords([x1, x2, x3], join="outer") Size: 464B Dimensions: (y: 4, x: 6) Coordinates: * y (y) int64 32B 0 1 2 3 * x (x) int64 48B 10 20 30 40 50 60 Data variables: temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293 precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 You can also combine DataArray objects, but the behaviour will differ depending on whether or not the DataArrays are named. If all DataArrays are named then they will be promoted to Datasets before combining, and then the resultant Dataset will be returned, e.g. >>> named_da1 = xr.DataArray( ... name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x" ... ) >>> named_da1 Size: 16B array([1., 2.]) Coordinates: * x (x) int64 16B 0 1 >>> named_da2 = xr.DataArray( ... name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x" ... ) >>> named_da2 Size: 16B array([3., 4.]) Coordinates: * x (x) int64 16B 2 3 >>> xr.combine_by_coords([named_da1, named_da2]) Size: 64B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 Data variables: a (x) float64 32B 1.0 2.0 3.0 4.0 If all the DataArrays are unnamed, a single DataArray will be returned, e.g. >>> unnamed_da1 = xr.DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") >>> unnamed_da2 = xr.DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") >>> xr.combine_by_coords([unnamed_da1, unnamed_da2]) Size: 32B array([1., 2., 3., 4.]) Coordinates: * x (x) int64 32B 0 1 2 3 Finally, if you attempt to combine a mix of unnamed DataArrays with either named DataArrays or Datasets, a ValueError will be raised (as this is an ambiguous operation). """ if any(isinstance(data_object, DataTree) for data_object in data_objects): raise NotImplementedError( "combine_by_coords() does not yet support DataTree objects." ) if not data_objects: return Dataset() objs_are_unnamed_dataarrays = [ isinstance(data_object, DataArray) and data_object.name is None for data_object in data_objects ] if any(objs_are_unnamed_dataarrays): if all(objs_are_unnamed_dataarrays): # Combine into a single larger DataArray temp_datasets = [ unnamed_dataarray._to_temp_dataset() for unnamed_dataarray in data_objects ] combined_temp_dataset = _combine_single_variable_hypercube( temp_datasets, fill_value=fill_value, data_vars=data_vars, coords=coords, compat=compat, join=join, combine_attrs=combine_attrs, ) return DataArray()._from_temp_dataset(combined_temp_dataset) else: # Must be a mix of unnamed dataarrays with either named dataarrays or with datasets # Can't combine these as we wouldn't know whether to merge or concatenate the arrays raise ValueError( "Can't automatically combine unnamed DataArrays with named DataArrays or Datasets." ) else: # Promote any named DataArrays to single-variable Datasets to simplify combining data_objects = [ obj.to_dataset() if isinstance(obj, DataArray) else obj for obj in data_objects ] # Group by data vars grouped_by_vars = groupby_defaultdict(data_objects, key=vars_as_keys) # Perform the multidimensional combine on each group of data variables # before merging back together concatenated_grouped_by_data_vars = tuple( _combine_single_variable_hypercube( tuple(datasets_with_same_vars), fill_value=fill_value, data_vars=data_vars, coords=coords, compat=compat, join=join, combine_attrs=combine_attrs, ) for vars, datasets_with_same_vars in grouped_by_vars ) return merge( concatenated_grouped_by_data_vars, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) xarray-2025.12.0/xarray/structure/concat.py000066400000000000000000001145201511464676000205350ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Hashable, Iterable from typing import TYPE_CHECKING, Any, Literal, Union, overload import numpy as np import pandas as pd from xarray.core import dtypes, utils from xarray.core.coordinates import Coordinates from xarray.core.duck_array_ops import lazy_array_equiv from xarray.core.indexes import Index, PandasIndex from xarray.core.types import T_DataArray, T_Dataset, T_Variable from xarray.core.utils import emit_user_level_warning from xarray.core.variable import Variable from xarray.core.variable import concat as concat_vars from xarray.structure.alignment import align, reindex_variables from xarray.structure.merge import ( _VALID_COMPAT, collect_variables_and_indexes, merge_attrs, merge_collected, ) from xarray.util.deprecation_helpers import ( _COMPAT_CONCAT_DEFAULT, _COORDS_DEFAULT, _DATA_VARS_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, ) if TYPE_CHECKING: from xarray.core.datatree import DataTree from xarray.core.types import ( CombineAttrsOptions, CompatOptions, ConcatOptions, JoinOptions, ) T_DataVars = Union[ConcatOptions, Iterable[Hashable], None] @overload def concat( objs: Iterable[DataTree], dim: Hashable | T_Variable | T_DataArray | pd.Index | Any, data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT, compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT, positions: Iterable[Iterable[int]] | None = None, fill_value: object = dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "override", create_index_for_new_dim: bool = True, ) -> DataTree: ... # TODO: replace dim: Any by 1D array_likes @overload def concat( objs: Iterable[T_Dataset], dim: Hashable | T_Variable | T_DataArray | pd.Index | Any, data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT, compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT, positions: Iterable[Iterable[int]] | None = None, fill_value: object = dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "override", create_index_for_new_dim: bool = True, ) -> T_Dataset: ... @overload def concat( objs: Iterable[T_DataArray], dim: Hashable | T_Variable | T_DataArray | pd.Index | Any, data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT, compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT, positions: Iterable[Iterable[int]] | None = None, fill_value: object = dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "override", create_index_for_new_dim: bool = True, ) -> T_DataArray: ... def concat( objs, dim, data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT, compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT, positions=None, fill_value=dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "override", create_index_for_new_dim: bool = True, ): """Concatenate xarray objects along a new or existing dimension. Parameters ---------- objs : sequence of DataArray, Dataset or DataTree xarray objects to concatenate together. Each object is expected to consist of variables and coordinates with matching shapes except for along the concatenated dimension. dim : Hashable or Variable or DataArray or pandas.Index Name of the dimension to concatenate along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. If dimension is provided as a Variable, DataArray or Index, its name is used as the dimension to concatenate along and the values are added as a coordinate. data_vars : {"minimal", "different", "all", None} or list of Hashable, optional These data variables will be concatenated together: * "minimal": Only data variables in which the dimension already appears are included. * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * "all": All data variables will be concatenated. * None: Means ``"all"`` if ``dim`` is not present in any of the ``objs``, and ``"minimal"`` if ``dim`` is present in any of ``objs``. * list of dims: The listed data variables will be concatenated, in addition to the "minimal" data variables. If objects are DataArrays, data_vars must be "all". coords : {"minimal", "different", "all"} or list of Hashable, optional These coordinate variables will be concatenated together: * "minimal": Only coordinates in which the dimension already appears are included. * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of Hashable: The listed coordinate variables will be concatenated, in addition to the "minimal" coordinates. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare non-concatenated variables of the same name for potential conflicts. This is passed down to merge. - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "equals": all values and dimensions must be the same. - "identical": all values, dimensions and attributes must be the same. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset positions : None or list of integer arrays, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding dim) in objects - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. create_index_for_new_dim : bool, default: True Whether to create a new ``PandasIndex`` object when the objects being concatenated contain scalar variables named ``dim``. Returns ------- concatenated : type of objs See also -------- merge Examples -------- >>> da = xr.DataArray( ... np.arange(6).reshape(2, 3), [("x", ["a", "b"]), ("y", [10, 20, 30])] ... ) >>> da Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> xr.concat([da.isel(y=slice(0, 1)), da.isel(y=slice(1, None))], dim="y") Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> xr.concat([da.isel(x=0), da.isel(x=1)], "x", coords="minimal") Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> xr.concat([da.isel(x=0), da.isel(x=1)], "new_dim", coords="all") Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * y (y) int64 24B 10 20 30 x (new_dim) >> xr.concat( ... [da.isel(x=0), da.isel(x=1)], ... pd.Index([-90, -100], name="new_dim"), ... coords="all", ... ) Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * new_dim (new_dim) int64 16B -90 -100 * y (y) int64 24B 10 20 30 x (new_dim) >> ds = xr.Dataset(coords={"x": 0}) >>> xr.concat([ds, ds], dim="x") Size: 16B Dimensions: (x: 2) Coordinates: * x (x) int64 16B 0 0 Data variables: *empty* >>> xr.concat([ds, ds], dim="x").indexes Indexes: x Index([0, 0], dtype='int64', name='x') >>> xr.concat([ds, ds], dim="x", create_index_for_new_dim=False).indexes Indexes: *empty* """ # TODO: add ignore_index arguments copied from pandas.concat # TODO: support concatenating scalar coordinates even if the concatenated # dimension already exists from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree try: first_obj, objs = utils.peek_at(objs) except StopIteration as err: raise ValueError("must supply at least one object to concatenate") from err if not isinstance(compat, CombineKwargDefault) and compat not in set( _VALID_COMPAT ) - {"minimal"}: raise ValueError( f"compat={compat!r} invalid: must be 'broadcast_equals', 'equals', 'identical', 'no_conflicts' or 'override'" ) if isinstance(first_obj, DataTree): return _datatree_concat( objs, dim=dim, data_vars=data_vars, coords=coords, compat=compat, positions=positions, fill_value=fill_value, join=join, combine_attrs=combine_attrs, create_index_for_new_dim=create_index_for_new_dim, ) elif isinstance(first_obj, DataArray): return _dataarray_concat( objs, dim=dim, data_vars=data_vars, coords=coords, compat=compat, positions=positions, fill_value=fill_value, join=join, combine_attrs=combine_attrs, create_index_for_new_dim=create_index_for_new_dim, ) elif isinstance(first_obj, Dataset): return _dataset_concat( objs, dim=dim, data_vars=data_vars, coords=coords, compat=compat, positions=positions, fill_value=fill_value, join=join, combine_attrs=combine_attrs, create_index_for_new_dim=create_index_for_new_dim, ) else: raise TypeError( "can only concatenate xarray Dataset and DataArray " f"objects, got {type(first_obj)}" ) def _calc_concat_dim_index( dim_or_data: Hashable | Any, ) -> tuple[Hashable, PandasIndex | None]: """Infer the dimension name and 1d index / coordinate variable (if appropriate) for concatenating along the new dimension. """ from xarray.core.dataarray import DataArray dim: Hashable | None if utils.hashable(dim_or_data): dim = dim_or_data index = None else: if not isinstance(dim_or_data, DataArray | Variable): dim = getattr(dim_or_data, "name", None) if dim is None: dim = "concat_dim" else: (dim,) = dim_or_data.dims coord_dtype = getattr(dim_or_data, "dtype", None) index = PandasIndex(dim_or_data, dim, coord_dtype=coord_dtype) return dim, index def _calc_concat_over( datasets: list[T_Dataset], dim: Hashable, all_dims: set[Hashable], data_vars: T_DataVars | Iterable[Hashable] | CombineKwargDefault, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault, compat: CompatOptions | CombineKwargDefault, ) -> tuple[set[Hashable], dict[Hashable, bool], list[int], set[Hashable]]: """ Determine which dataset variables need to be concatenated in the result, """ # variables to be concatenated concat_over = set() # variables checked for equality equals: dict[Hashable, bool] = {} # skip merging these variables. # if concatenating over a dimension 'x' that is associated with an index over 2 variables, # 'x' and 'y', then we assert join="equals" on `y` and don't need to merge it. # that assertion happens in the align step prior to this function being called skip_merge: set[Hashable] = set() if dim in all_dims: concat_over_existing_dim = True concat_over.add(dim) else: concat_over_existing_dim = False if data_vars == "minimal" and coords == "minimal" and not concat_over_existing_dim: raise ValueError( "Cannot specify both data_vars='minimal' and coords='minimal' when " "concatenating over a new dimension." ) if data_vars is None or ( isinstance(data_vars, CombineKwargDefault) and data_vars._value is None ): data_vars = "minimal" if concat_over_existing_dim else "all" concat_dim_lengths = [] for ds in datasets: if concat_over_existing_dim and dim not in ds.dims and dim in ds: ds = ds.set_coords(dim) concat_over.update(k for k, v in ds.variables.items() if dim in v.dims) for _, idx_vars in ds.xindexes.group_by_index(): if any(dim in v.dims for v in idx_vars.values()): skip_merge.update(idx_vars.keys()) concat_dim_lengths.append(ds.sizes.get(dim, 1)) def process_subset_opt( opt: ConcatOptions | Iterable[Hashable] | CombineKwargDefault, subset: Literal["coords", "data_vars"], ) -> None: original = set(concat_over) compat_str = ( compat._value if isinstance(compat, CombineKwargDefault) else compat ) assert compat_str is not None if isinstance(opt, str | CombineKwargDefault): if opt == "different": if isinstance(compat, CombineKwargDefault) and compat != "override": if not isinstance(opt, CombineKwargDefault): emit_user_level_warning( compat.warning_message( "This change will result in the following ValueError: " f"Cannot specify both {subset}='different' and compat='override'.", recommend_set_options=False, ), FutureWarning, ) if compat == "override": raise ValueError( f"Cannot specify both {subset}='different' and compat='override'." + ( compat.error_message() if isinstance(compat, CombineKwargDefault) else "" ) ) # all nonindexes that are not the same in each dataset for k in getattr(datasets[0], subset): if k not in concat_over: equal = None variables = [ ds.variables[k] for ds in datasets if k in ds.variables ] if len(variables) == 1: # coords="different" doesn't make sense when only one object # contains a particular variable. break elif len(variables) != len(datasets) and opt == "different": raise ValueError( f"{k!r} not present in all datasets and coords='different'. " f"Either add {k!r} to datasets where it is missing or " "specify coords='minimal'." ) # first check without comparing values i.e. no computes for var in variables[1:]: equal = getattr(variables[0], compat_str)( var, equiv=lazy_array_equiv ) if equal is not True: # exit early if we know these are not equal or that # equality cannot be determined i.e. one or all of # the variables wraps a numpy array break if equal is False: concat_over.add(k) elif equal is None: # Compare the variable of all datasets vs. the one # of the first dataset. Perform the minimum amount of # loads in order to avoid multiple loads from disk # while keeping the RAM footprint low. v_lhs = datasets[0].variables[k].load() # We'll need to know later on if variables are equal. computed = [] for ds_rhs in datasets[1:]: v_rhs = ds_rhs.variables[k].compute() computed.append(v_rhs) if not getattr(v_lhs, compat_str)(v_rhs): concat_over.add(k) equals[k] = False # computed variables are not to be re-computed # again in the future for ds, v in zip( datasets[1:], computed, strict=False ): ds.variables[k].data = v.data break else: equal = True if TYPE_CHECKING: assert equal is not None equals[k] = equal elif opt == "all": concat_over.update( set().union( *[set(getattr(d, subset)) - set(d.dims) for d in datasets] ) ) elif opt == "minimal": pass else: raise ValueError(f"unexpected value for {subset}: {opt}") if ( isinstance(opt, CombineKwargDefault) and opt._value is not None and original != concat_over and concat_over_existing_dim ): warnings.append( opt.warning_message( "This is likely to lead to different results when multiple datasets " "have matching variables with overlapping values.", ) ) else: valid_vars = tuple(getattr(datasets[0], subset)) invalid_vars = [k for k in opt if k not in valid_vars] if invalid_vars: if subset == "coords": raise ValueError( f"the variables {invalid_vars} in coords are not " f"found in the coordinates of the first dataset {valid_vars}" ) else: # note: data_vars are not listed in the error message here, # because there may be lots of them raise ValueError( f"the variables {invalid_vars} in data_vars are not " f"found in the data variables of the first dataset" ) concat_over.update(opt) warnings: list[str] = [] process_subset_opt(data_vars, "data_vars") process_subset_opt(coords, "coords") for warning in warnings: emit_user_level_warning(warning, FutureWarning) return concat_over, equals, concat_dim_lengths, skip_merge # determine dimensional coordinate names and a dict mapping name to DataArray def _parse_datasets( datasets: list[T_Dataset], ) -> tuple[ set[Hashable], dict[Hashable, Variable], dict[Hashable, int], set[Hashable], set[Hashable], list[Hashable], ]: dims: set[Hashable] = set() all_coord_names: set[Hashable] = set() data_vars: set[Hashable] = set() # list of data_vars dim_coords: dict[Hashable, Variable] = {} # maps dim name to variable dims_sizes: dict[Hashable, int] = {} # shared dimension sizes to expand variables variables_order: dict[Hashable, Variable] = {} # variables in order of appearance for ds in datasets: dims_sizes.update(ds.sizes) all_coord_names.update(ds.coords) data_vars.update(ds.data_vars) variables_order.update(ds.variables) # preserves ordering of dimensions for dim in ds.dims: if dim in dims: continue if dim in ds.coords and dim not in dim_coords: dim_coords[dim] = ds.coords[dim].variable dims = dims | set(ds.dims) return ( dims, dim_coords, dims_sizes, all_coord_names, data_vars, list(variables_order), ) def _dataset_concat( datasets: Iterable[T_Dataset], dim: Hashable | T_Variable | T_DataArray | pd.Index, data_vars: T_DataVars | CombineKwargDefault, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault, compat: CompatOptions | CombineKwargDefault, positions: Iterable[Iterable[int]] | None, fill_value: Any, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, create_index_for_new_dim: bool, *, preexisting_dim: bool = False, ) -> T_Dataset: """ Concatenate a sequence of datasets along a new or existing dimension """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset datasets = list(datasets) if not all(isinstance(dataset, Dataset) for dataset in datasets): raise TypeError( "The elements in the input list need to be either all 'Dataset's or all 'DataArray's" ) dim_var: Variable | None if isinstance(dim, DataArray): dim_var = dim.variable elif isinstance(dim, Variable): dim_var = dim else: dim_var = None dim_name, index = _calc_concat_dim_index(dim) # Make sure we're working on a copy (we'll be loading variables) datasets = [ds.copy() for ds in datasets] datasets = list( align( *datasets, join=join, copy=False, exclude=[dim_name], fill_value=fill_value ) ) all_dims, dim_coords, dims_sizes, coord_names, data_names, vars_order = ( _parse_datasets(datasets) ) if preexisting_dim: # When concatenating DataTree objects, a dimension may be pre-existing # because it exists elsewhere on the trees, even if it does not exist # on the dataset objects at this node. all_dims.add(dim_name) indexed_dim_names = set(dim_coords) both_data_and_coords = coord_names & data_names if both_data_and_coords: raise ValueError( f"{both_data_and_coords!r} is a coordinate in some datasets but not others." ) # we don't want the concat dimension in the result dataset yet dim_coords.pop(dim_name, None) dims_sizes.pop(dim_name, None) # case where concat dimension is a coordinate or data_var but not a dimension if ( dim_name in coord_names or dim_name in data_names ) and dim_name not in indexed_dim_names: datasets = [ ds.expand_dims(dim_name, create_index_for_new_dim=create_index_for_new_dim) for ds in datasets ] all_dims.add(dim_name) # This isn't being used any more, but keeping it up to date # just in case we decide to use it later. indexed_dim_names.add(dim_name) # determine which variables to concatenate concat_over, equals, concat_dim_lengths, skip_merge = _calc_concat_over( datasets, dim_name, all_dims, data_vars, coords, compat ) # determine which variables to merge, and then merge them according to compat variables_to_merge = (coord_names | data_names) - concat_over - skip_merge result_vars = {} result_indexes = {} if variables_to_merge: grouped = { k: v for k, v in collect_variables_and_indexes(datasets).items() if k in variables_to_merge } merged_vars, merged_indexes = merge_collected( grouped, compat=compat, equals=equals ) result_vars.update(merged_vars) result_indexes.update(merged_indexes) result_vars.update(dim_coords) # assign attrs and encoding from first dataset result_attrs = merge_attrs([ds.attrs for ds in datasets], combine_attrs) result_encoding = datasets[0].encoding # check that global attributes are fixed across all datasets if necessary if compat == "identical": for ds in datasets[1:]: if not utils.dict_equiv(ds.attrs, result_attrs): raise ValueError("Dataset global attributes not equal.") # we've already verified everything is consistent; now, calculate # shared dimension sizes so we can expand the necessary variables def ensure_common_dims(vars, concat_dim_lengths): # ensure each variable with the given name shares the same # dimensions and the same shape for all of them except along the # concat dimension common_dims = tuple(utils.OrderedSet(d for v in vars for d in v.dims)) if dim_name not in common_dims: common_dims = (dim_name,) + common_dims for var, dim_len in zip(vars, concat_dim_lengths, strict=True): if var.dims != common_dims: common_shape = tuple(dims_sizes.get(d, dim_len) for d in common_dims) var = var.set_dims(common_dims, common_shape) yield var # get the indexes to concatenate together, create a PandasIndex # for any scalar coordinate variable found with ``name`` matching ``dim``. # TODO: depreciate concat a mix of scalar and dimensional indexed coordinates? # TODO: (benbovy - explicit indexes): check index types and/or coordinates # of all datasets? def get_indexes(name): for ds in datasets: if name in ds._indexes: yield ds._indexes[name] elif name == dim_name: var = ds._variables[name] if not var.dims: data = var.set_dims(dim_name).values if create_index_for_new_dim: yield PandasIndex(data, dim_name, coord_dtype=var.dtype) # create concatenation index, needed for later reindexing # use np.cumulative_sum(concat_dim_lengths, include_initial=True) when we support numpy>=2 file_start_indexes = np.append(0, np.cumsum(concat_dim_lengths)) concat_index_size = file_start_indexes[-1] variable_index_mask = np.ones(concat_index_size, dtype=bool) variable_reindexer = None # stack up each variable and/or index to fill-out the dataset (in order) # n.b. this loop preserves variable order, needed for groupby. ndatasets = len(datasets) for name in vars_order: if name in concat_over and name not in result_indexes: variables = [] # Initialize the mask to all True then set False if any name is missing in # the datasets: variable_index_mask.fill(True) var_concat_dim_length = [] for i, ds in enumerate(datasets): if name in ds.variables: variables.append(ds[name].variable) var_concat_dim_length.append(concat_dim_lengths[i]) else: # raise if coordinate not in all datasets if name in coord_names: raise ValueError( f"coordinate {name!r} not present in all datasets." ) # Mask out the indexes without the name: start = file_start_indexes[i] end = file_start_indexes[i + 1] variable_index_mask[slice(start, end)] = False vars = ensure_common_dims(variables, var_concat_dim_length) # Try to concatenate the indexes, concatenate the variables when no index # is found on all datasets. indexes: list[Index] = list(get_indexes(name)) if indexes: if len(indexes) < ndatasets: raise ValueError( f"{name!r} must have either an index or no index in all datasets, " f"found {len(indexes)}/{len(datasets)} datasets with an index." ) combined_idx = indexes[0].concat(indexes, dim_name, positions) if name in datasets[0]._indexes: idx_vars = datasets[0].xindexes.get_all_coords(name) else: # index created from a scalar coordinate idx_vars = {name: datasets[0][name].variable} result_indexes.update(dict.fromkeys(idx_vars, combined_idx)) combined_idx_vars = combined_idx.create_variables(idx_vars) for k, v in combined_idx_vars.items(): v.attrs = merge_attrs( [ds.variables[k].attrs for ds in datasets], combine_attrs=combine_attrs, ) result_vars[k] = v else: combined_var = concat_vars( vars, dim_name, positions, combine_attrs=combine_attrs ) # reindex if variable is not present in all datasets if not variable_index_mask.all(): if variable_reindexer is None: # allocate only once variable_reindexer = np.empty( concat_index_size, # cannot use uint since we need -1 as a sentinel for reindexing dtype=np.min_scalar_type(-concat_index_size), ) np.cumsum(variable_index_mask, out=variable_reindexer) # variable_index_mask is boolean, so the first element is 1. # offset by 1 to start at 0. variable_reindexer -= 1 variable_reindexer[~variable_index_mask] = -1 combined_var = reindex_variables( variables={name: combined_var}, dim_pos_indexers={dim_name: variable_reindexer}, fill_value=fill_value, )[name] result_vars[name] = combined_var elif name in result_vars: # preserves original variable order result_vars[name] = result_vars.pop(name) absent_coord_names = coord_names - set(result_vars) if absent_coord_names: raise ValueError( f"Variables {absent_coord_names!r} are coordinates in some datasets but not others." ) result_data_vars = {} coord_vars = {} for name, result_var in result_vars.items(): if name in coord_names: coord_vars[name] = result_var else: result_data_vars[name] = result_var if index is not None: if dim_var is not None: index_vars = index.create_variables({dim_name: dim_var}) else: index_vars = index.create_variables() coord_vars[dim_name] = index_vars[dim_name] result_indexes[dim_name] = index coords_obj = Coordinates(coord_vars, indexes=result_indexes) result = type(datasets[0])(result_data_vars, coords=coords_obj, attrs=result_attrs) result.encoding = result_encoding return result def _dataarray_concat( arrays: Iterable[T_DataArray], dim: Hashable | T_Variable | T_DataArray | pd.Index, data_vars: T_DataVars | Iterable[Hashable] | CombineKwargDefault, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault, compat: CompatOptions | CombineKwargDefault, positions: Iterable[Iterable[int]] | None, fill_value: object, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, create_index_for_new_dim: bool, ) -> T_DataArray: from xarray.core.dataarray import DataArray arrays = list(arrays) if not all(isinstance(array, DataArray) for array in arrays): raise TypeError( "The elements in the input list need to be either all 'Dataset's or all 'DataArray's" ) # Allow passing `all` or `None` even though we always use `data_vars='all'` # when passing off to `_dataset_concat`. if not isinstance(data_vars, CombineKwargDefault) and data_vars not in [ "all", None, ]: raise ValueError( "data_vars is not a valid argument when concatenating DataArray objects" ) datasets = [] for n, arr in enumerate(arrays): if n == 0: name = arr.name elif name != arr.name: if compat == "identical": raise ValueError("array names not identical") else: arr = arr.rename(name) datasets.append(arr._to_temp_dataset()) ds = _dataset_concat( datasets, dim=dim, data_vars="all", coords=coords, compat=compat, positions=positions, fill_value=fill_value, join=join, combine_attrs=combine_attrs, create_index_for_new_dim=create_index_for_new_dim, ) merged_attrs = merge_attrs([da.attrs for da in arrays], combine_attrs) result = arrays[0]._from_temp_dataset(ds, name) result.attrs = merged_attrs return result def _datatree_concat( objs: Iterable[DataTree], dim: Hashable | Variable | T_DataArray | pd.Index | Any, data_vars: T_DataVars | Iterable[Hashable] | CombineKwargDefault, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault, compat: CompatOptions | CombineKwargDefault, positions: Iterable[Iterable[int]] | None, fill_value: Any, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, create_index_for_new_dim: bool, ) -> DataTree: """ Concatenate a sequence of datatrees along a new or existing dimension """ from xarray.core.datatree import DataTree from xarray.core.treenode import TreeIsomorphismError, group_subtrees dim_name, _ = _calc_concat_dim_index(dim) objs = list(objs) if not all(isinstance(obj, DataTree) for obj in objs): raise TypeError("All objects to concatenate must be DataTree objects") if compat == "identical": if any(obj.name != objs[0].name for obj in objs[1:]): raise ValueError("DataTree names not identical") dim_in_tree = any(dim_name in node.dims for node in objs[0].subtree) results = {} try: for path, nodes in group_subtrees(*objs): datasets_to_concat = [node.to_dataset() for node in nodes] results[path] = _dataset_concat( datasets_to_concat, dim=dim, data_vars=data_vars, coords=coords, compat=compat, positions=positions, fill_value=fill_value, join=join, combine_attrs=combine_attrs, create_index_for_new_dim=create_index_for_new_dim, preexisting_dim=dim_in_tree, ) except TreeIsomorphismError as e: raise ValueError("All trees must be isomorphic to be concatenated") from e return DataTree.from_dict(results, name=objs[0].name) xarray-2025.12.0/xarray/structure/merge.py000066400000000000000000001310561511464676000203700ustar00rootroot00000000000000from __future__ import annotations from collections import defaultdict from collections.abc import Hashable, Iterable, Mapping, Sequence from collections.abc import Set as AbstractSet from typing import TYPE_CHECKING, Any, NamedTuple, Union, cast, overload import pandas as pd from xarray.core import dtypes from xarray.core.duck_array_ops import lazy_array_equiv from xarray.core.indexes import ( Index, create_default_index_implicit, filter_indexes_from_coords, indexes_equal, ) from xarray.core.utils import ( Frozen, compat_dict_union, dict_equiv, emit_user_level_warning, equivalent, ) from xarray.core.variable import ( IndexVariable, Variable, as_variable, calculate_dimensions, ) from xarray.structure.alignment import deep_align from xarray.util.deprecation_helpers import ( _COMPAT_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, ) if TYPE_CHECKING: from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.types import ( CombineAttrsOptions, CompatOptions, DataVars, JoinOptions, ) DimsLike = Union[Hashable, Sequence[Hashable]] ArrayLike = Any VariableLike = Union[ ArrayLike, tuple[DimsLike, ArrayLike], tuple[DimsLike, ArrayLike, Mapping], tuple[DimsLike, ArrayLike, Mapping, Mapping], ] XarrayValue = Union[DataArray, Variable, VariableLike] DatasetLike = Union[Dataset, Coordinates, Mapping[Any, XarrayValue]] CoercibleValue = Union[XarrayValue, pd.Series, pd.DataFrame] CoercibleMapping = Union[Dataset, Mapping[Any, CoercibleValue]] PANDAS_TYPES = (pd.Series, pd.DataFrame) _VALID_COMPAT = Frozen( { "identical": 0, "equals": 1, "broadcast_equals": 2, "minimal": 3, "no_conflicts": 4, "override": 5, } ) class Context: """object carrying the information of a call""" def __init__(self, func): self.func = func def broadcast_dimension_size(variables: list[Variable]) -> dict[Hashable, int]: """Extract dimension sizes from a dictionary of variables. Raises ValueError if any dimensions have different sizes. """ dims: dict[Hashable, int] = {} for var in variables: for dim, size in zip(var.dims, var.shape, strict=True): if dim in dims and size != dims[dim]: raise ValueError(f"index {dim!r} not aligned") dims[dim] = size return dims class MergeError(ValueError): """Error class for merge failures due to incompatible arguments.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? def unique_variable( name: Hashable, variables: list[Variable], compat: CompatOptions | CombineKwargDefault = "broadcast_equals", equals: bool | None = None, ) -> tuple[bool | None, Variable]: """Return the unique variable from a list of variables or raise MergeError. Parameters ---------- name : hashable Name for this variable. variables : list of Variable List of Variable objects, all of which go by the same name in different inputs. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional Type of equality check to use. equals : None or bool, optional corresponding to result of compat test Returns ------- Variable to use in the result. Raises ------ MergeError: if any of the variables are not equal. """ out = variables[0] if len(variables) == 1 or compat == "override": return equals, out combine_method = None if compat == "minimal": compat = "broadcast_equals" if compat == "broadcast_equals": dim_lengths = broadcast_dimension_size(variables) out = out.set_dims(dim_lengths) if compat == "no_conflicts": combine_method = "fillna" # we return the lazy equals, so we can warn about behaviour changes lazy_equals = equals if equals is None: compat_str = ( compat._value if isinstance(compat, CombineKwargDefault) else compat ) assert compat_str is not None # first check without comparing values i.e. no computes for var in variables[1:]: equals = getattr(out, compat_str)(var, equiv=lazy_array_equiv) if equals is not True: break lazy_equals = equals if equals is None: # now compare values with minimum number of computes out = out.compute() for var in variables[1:]: equals = getattr(out, compat_str)(var) if not equals: break if not equals: raise MergeError( f"conflicting values for variable {name!r} on objects to be combined. " "You can skip this check by specifying compat='override'." ) if combine_method: for var in variables[1:]: out = getattr(out, combine_method)(var) return lazy_equals, out def _assert_compat_valid(compat): if not isinstance(compat, CombineKwargDefault) and compat not in _VALID_COMPAT: raise ValueError(f"compat={compat!r} invalid: must be {set(_VALID_COMPAT)}") MergeElement = tuple[Variable, Index | None] def _assert_prioritized_valid( grouped: dict[Hashable, list[MergeElement]], prioritized: Mapping[Any, MergeElement], ) -> None: """Make sure that elements given in prioritized will not corrupt any index given in grouped. """ prioritized_names = set(prioritized) grouped_by_index: dict[int, list[Hashable]] = defaultdict(list) indexes: dict[int, Index] = {} for name, elements_list in grouped.items(): for _, index in elements_list: if index is not None: grouped_by_index[id(index)].append(name) indexes[id(index)] = index # An index may be corrupted when the set of its corresponding coordinate name(s) # partially overlaps the set of names given in prioritized for index_id, index_coord_names in grouped_by_index.items(): index_names = set(index_coord_names) common_names = index_names & prioritized_names if common_names and len(common_names) != len(index_names): common_names_str = ", ".join(f"{k!r}" for k in common_names) index_names_str = ", ".join(f"{k!r}" for k in index_coord_names) raise ValueError( f"cannot set or update variable(s) {common_names_str}, which would corrupt " f"the following index built from coordinates {index_names_str}:\n" f"{indexes[index_id]!r}" ) def merge_collected( grouped: dict[Any, list[MergeElement]], prioritized: Mapping[Any, MergeElement] | None = None, compat: CompatOptions | CombineKwargDefault = "minimal", combine_attrs: CombineAttrsOptions = "override", equals: dict[Any, bool] | None = None, ) -> tuple[dict[Hashable, Variable], dict[Hashable, Index]]: """Merge dicts of variables, while resolving conflicts appropriately. Parameters ---------- grouped : mapping prioritized : mapping compat : str Type of equality check to use when checking for conflicts. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. equals : mapping, optional corresponding to result of compat test Returns ------- Dict with keys taken by the union of keys on list_of_mappings, and Variable values corresponding to those that should be found on the merged result. """ if prioritized is None: prioritized = {} if equals is None: equals = {} _assert_compat_valid(compat) _assert_prioritized_valid(grouped, prioritized) merged_vars: dict[Hashable, Variable] = {} merged_indexes: dict[Hashable, Index] = {} index_cmp_cache: dict[tuple[int, int], bool | None] = {} for name, elements_list in grouped.items(): if name in prioritized: variable, index = prioritized[name] merged_vars[name] = variable if index is not None: merged_indexes[name] = index else: attrs: dict[Any, Any] = {} indexed_elements = [ (variable, index) for variable, index in elements_list if index is not None ] if indexed_elements: # TODO(shoyer): consider adjusting this logic. Are we really # OK throwing away variable without an index in favor of # indexed variables, without even checking if values match? variable, index = indexed_elements[0] for other_var, other_index in indexed_elements[1:]: if not indexes_equal( index, other_index, variable, other_var, index_cmp_cache ): raise MergeError( f"conflicting values/indexes on objects to be combined for coordinate {name!r}\n" f"first index: {index!r}\nsecond index: {other_index!r}\n" f"first variable: {variable!r}\nsecond variable: {other_var!r}\n" ) if compat == "identical": for other_variable, _ in indexed_elements[1:]: if not dict_equiv(variable.attrs, other_variable.attrs): raise MergeError( "conflicting attribute values on combined " f"variable {name!r}:\nfirst value: {variable.attrs!r}\nsecond value: {other_variable.attrs!r}" ) attrs = merge_attrs( [var.attrs for var, _ in indexed_elements], combine_attrs=combine_attrs, ) merged_vars[name] = variable merged_indexes[name] = index else: variables = [variable for variable, _ in elements_list] try: equals_this_var, merged_vars[name] = unique_variable( name, variables, compat, equals.get(name) ) # This is very likely to result in false positives, but there is no way # to tell if the output will change without computing. if ( isinstance(compat, CombineKwargDefault) and compat == "no_conflicts" and len(variables) > 1 and not equals_this_var ): emit_user_level_warning( compat.warning_message( "This is likely to lead to different results when " "combining overlapping variables with the same name.", ), FutureWarning, ) except MergeError: if compat != "minimal": # we need more than "minimal" compatibility (for which # we drop conflicting coordinates) raise if name in merged_vars: attrs = merge_attrs( [var.attrs for var in variables], combine_attrs=combine_attrs ) if name in merged_vars and (merged_vars[name].attrs or attrs): # Ensure that assigning attrs does not affect the original input variable. merged_vars[name] = merged_vars[name].copy(deep=False) merged_vars[name].attrs = attrs return merged_vars, merged_indexes def collect_variables_and_indexes( list_of_mappings: Iterable[DatasetLike], indexes: Mapping[Any, Any] | None = None, ) -> dict[Hashable, list[MergeElement]]: """Collect variables and indexes from list of mappings of xarray objects. Mappings can be Dataset or Coordinates objects, in which case both variables and indexes are extracted from it. It can also have values of one of the following types: - an xarray.Variable - a tuple `(dims, data[, attrs[, encoding]])` that can be converted in an xarray.Variable - or an xarray.DataArray If a mapping of indexes is given, those indexes are assigned to all variables with a matching key/name. For dimension variables with no matching index, a default (pandas) index is assigned. DataArray indexes that don't match mapping keys are also extracted. """ from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset if indexes is None: indexes = {} grouped: dict[Hashable, list[MergeElement]] = defaultdict(list) def append(name, variable, index): grouped[name].append((variable, index)) def append_all(variables, indexes): for name, variable in variables.items(): append(name, variable, indexes.get(name)) for mapping in list_of_mappings: if isinstance(mapping, Coordinates | Dataset): append_all(mapping.variables, mapping.xindexes) continue for name, variable in mapping.items(): if isinstance(variable, DataArray): coords_ = variable._coords.copy() # use private API for speed indexes_ = dict(variable._indexes) # explicitly overwritten variables should take precedence coords_.pop(name, None) indexes_.pop(name, None) append_all(coords_, indexes_) variable = as_variable(variable, name=name, auto_convert=False) if name in indexes: append(name, variable, indexes[name]) elif variable.dims == (name,): idx, idx_vars = create_default_index_implicit(variable) append_all(idx_vars, dict.fromkeys(idx_vars, idx)) else: append(name, variable, None) return grouped def collect_from_coordinates( list_of_coords: list[Coordinates], ) -> dict[Hashable, list[MergeElement]]: """Collect variables and indexes to be merged from Coordinate objects.""" grouped: dict[Hashable, list[MergeElement]] = defaultdict(list) for coords in list_of_coords: variables = coords.variables indexes = coords.xindexes for name, variable in variables.items(): grouped[name].append((variable, indexes.get(name))) return grouped def merge_coordinates_without_align( objects: list[Coordinates], prioritized: Mapping[Any, MergeElement] | None = None, exclude_dims: AbstractSet = frozenset(), combine_attrs: CombineAttrsOptions = "override", ) -> tuple[dict[Hashable, Variable], dict[Hashable, Index]]: """Merge variables/indexes from coordinates without automatic alignments. This function is used for merging coordinate from pre-existing xarray objects. """ collected = collect_from_coordinates(objects) if exclude_dims: filtered: dict[Hashable, list[MergeElement]] = {} for name, elements in collected.items(): new_elements = [ (variable, index) for variable, index in elements if exclude_dims.isdisjoint(variable.dims) ] if new_elements: filtered[name] = new_elements else: filtered = collected # TODO: indexes should probably be filtered in collected elements # before merging them merged_coords, merged_indexes = merge_collected( filtered, prioritized, combine_attrs=combine_attrs ) merged_indexes = filter_indexes_from_coords(merged_indexes, set(merged_coords)) return merged_coords, merged_indexes def determine_coords( list_of_mappings: Iterable[DatasetLike], ) -> tuple[set[Hashable], set[Hashable]]: """Given a list of dicts with xarray object values, identify coordinates. Parameters ---------- list_of_mappings : list of dict or list of Dataset Of the same form as the arguments to expand_variable_dicts. Returns ------- coord_names : set of variable names noncoord_names : set of variable names All variable found in the input should appear in either the set of coordinate or non-coordinate names. """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset coord_names: set[Hashable] = set() noncoord_names: set[Hashable] = set() for mapping in list_of_mappings: if isinstance(mapping, Dataset): coord_names.update(mapping.coords) noncoord_names.update(mapping.data_vars) else: for name, var in mapping.items(): if isinstance(var, DataArray): coords = set(var._coords) # use private API for speed # explicitly overwritten variables should take precedence coords.discard(name) coord_names.update(coords) return coord_names, noncoord_names def coerce_pandas_values(objects: Iterable[CoercibleMapping]) -> list[DatasetLike]: """Convert pandas values found in a list of labeled objects. Parameters ---------- objects : list of Dataset or mapping The mappings may contain any sort of objects coercible to xarray.Variables as keys, including pandas objects. Returns ------- List of Dataset or dictionary objects. Any inputs or values in the inputs that were pandas objects have been converted into native xarray objects. """ from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset out: list[DatasetLike] = [] for obj in objects: variables: DatasetLike if isinstance(obj, Dataset | Coordinates): variables = obj else: variables = {} if isinstance(obj, PANDAS_TYPES): obj = dict(obj.items()) for k, v in obj.items(): if isinstance(v, PANDAS_TYPES): v = DataArray(v) variables[k] = v out.append(variables) return out def _get_priority_vars_and_indexes( objects: Sequence[DatasetLike], priority_arg: int | None, compat: CompatOptions | CombineKwargDefault = "equals", ) -> dict[Hashable, MergeElement]: """Extract the priority variable from a list of mappings. We need this method because in some cases the priority argument itself might have conflicting values (e.g., if it is a dict with two DataArray values with conflicting coordinate values). Parameters ---------- objects : sequence of dict-like of Variable Dictionaries in which to find the priority variables. priority_arg : int or None Integer object whose variable should take priority. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare non-concatenated variables of the same name for potential conflicts. This is passed down to merge. - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "equals": all values and dimensions must be the same. - "identical": all values, dimensions and attributes must be the same. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset Returns ------- A dictionary of variables and associated indexes (if any) to prioritize. """ if priority_arg is None: return {} collected = collect_variables_and_indexes([objects[priority_arg]]) variables, indexes = merge_collected(collected, compat=compat) grouped: dict[Hashable, MergeElement] = {} for name, variable in variables.items(): grouped[name] = (variable, indexes.get(name)) return grouped def merge_coords( objects: Iterable[CoercibleMapping], compat: CompatOptions = "minimal", join: JoinOptions = "outer", priority_arg: int | None = None, indexes: Mapping[Any, Index] | None = None, fill_value: object = dtypes.NA, ) -> tuple[dict[Hashable, Variable], dict[Hashable, Index]]: """Merge coordinate variables. See merge_core below for argument descriptions. This works similarly to merge_core, except everything we don't worry about whether variables are coordinates or not. """ _assert_compat_valid(compat) coerced = coerce_pandas_values(objects) aligned = deep_align( coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value ) collected = collect_variables_and_indexes(aligned, indexes=indexes) prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat) variables, out_indexes = merge_collected(collected, prioritized, compat=compat) return variables, out_indexes def equivalent_attrs(a: Any, b: Any) -> bool: """Check if two attribute values are equivalent. Returns False if the comparison raises ValueError or TypeError. This handles cases like numpy arrays with ambiguous truth values and xarray Datasets which can't be directly converted to numpy arrays. Since equivalent() now handles non-boolean returns by returning False, this wrapper mainly catches exceptions from comparisons that can't be evaluated at all. """ try: return equivalent(a, b) except (ValueError, TypeError): # These exceptions indicate the comparison is truly ambiguous # (e.g., nested numpy arrays that would raise "ambiguous truth value") return False def merge_attrs(variable_attrs, combine_attrs, context=None): """Combine attributes from different variables according to combine_attrs""" if not variable_attrs: # no attributes to merge return None if callable(combine_attrs): return combine_attrs(variable_attrs, context=context) elif combine_attrs == "drop": return {} elif combine_attrs == "override": return dict(variable_attrs[0]) elif combine_attrs == "no_conflicts": result = dict(variable_attrs[0]) for attrs in variable_attrs[1:]: try: result = compat_dict_union(result, attrs) except ValueError as e: raise MergeError( "combine_attrs='no_conflicts', but some values are not " f"the same. Merging {result} with {attrs}" ) from e return result elif combine_attrs == "drop_conflicts": result = {} dropped_keys = set() for attrs in variable_attrs: for key, value in attrs.items(): if key in dropped_keys: continue if key not in result: result[key] = value elif not equivalent_attrs(result[key], value): del result[key] dropped_keys.add(key) return result elif combine_attrs == "identical": result = dict(variable_attrs[0]) for attrs in variable_attrs[1:]: if not dict_equiv(result, attrs): raise MergeError( f"combine_attrs='identical', but attrs differ. First is {result} " f", other is {attrs}." ) return result else: raise ValueError(f"Unrecognised value for combine_attrs={combine_attrs}") class _MergeResult(NamedTuple): variables: dict[Hashable, Variable] coord_names: set[Hashable] dims: dict[Hashable, int] indexes: dict[Hashable, Index] attrs: dict[Hashable, Any] def merge_core( objects: Iterable[CoercibleMapping], compat: CompatOptions | CombineKwargDefault, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions = "override", priority_arg: int | None = None, explicit_coords: Iterable[Hashable] | None = None, indexes: Mapping[Any, Any] | None = None, fill_value: object = dtypes.NA, skip_align_args: list[int] | None = None, ) -> _MergeResult: """Core logic for merging labeled objects. This is not public API. Parameters ---------- objects : list of mapping All values must be convertible to labeled arrays. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional Compatibility checks to use when merging variables. join : {"outer", "inner", "left", "right"}, optional How to combine objects with different indexes. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" How to combine attributes of objects priority_arg : int, optional Optional argument in `objects` that takes precedence over the others. explicit_coords : set, optional An explicit list of variables from `objects` that are coordinates. indexes : dict, optional Dictionary with values given by xarray.Index objects or anything that may be cast to pandas.Index objects. fill_value : scalar, optional Value to use for newly missing values skip_align_args : list of int, optional Optional arguments in `objects` that are not included in alignment. Returns ------- variables : dict Dictionary of Variable objects. coord_names : set Set of coordinate names. dims : dict Dictionary mapping from dimension names to sizes. attrs : dict Dictionary of attributes """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset _assert_compat_valid(compat) objects = list(objects) if skip_align_args is None: skip_align_args = [] skip_align_objs = [(pos, objects.pop(pos)) for pos in skip_align_args] coerced = coerce_pandas_values(objects) aligned = deep_align( coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value, ) for pos, obj in skip_align_objs: aligned.insert(pos, obj) collected = collect_variables_and_indexes(aligned, indexes=indexes) prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat) variables, out_indexes = merge_collected( collected, prioritized, compat=compat, combine_attrs=combine_attrs, ) dims = calculate_dimensions(variables) coord_names, noncoord_names = determine_coords(coerced) if compat == "minimal": # coordinates may be dropped in merged results coord_names.intersection_update(variables) if explicit_coords is not None: coord_names.update(explicit_coords) for dim in dims: if dim in variables: coord_names.add(dim) ambiguous_coords = coord_names.intersection(noncoord_names) if ambiguous_coords: raise MergeError( "unable to determine if these variables should be " f"coordinates or not in the merged result: {ambiguous_coords}" ) attrs = merge_attrs( [var.attrs for var in coerced if isinstance(var, Dataset | DataArray)], combine_attrs, ) return _MergeResult(variables, coord_names, dims, out_indexes, attrs) def merge_trees( trees: Sequence[DataTree], compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, fill_value: object = dtypes.NA, combine_attrs: CombineAttrsOptions = "override", ) -> DataTree: """Merge specialized to DataTree objects.""" from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.datatree_mapping import add_path_context_to_errors if fill_value is not dtypes.NA: # fill_value support dicts, which probably should be mapped to sub-groups? raise NotImplementedError( "fill_value is not yet supported for DataTree objects in merge" ) node_lists: defaultdict[str, list[DataTree]] = defaultdict(list) for tree in trees: for key, node in tree.subtree_with_keys: node_lists[key].append(node) root_datasets = [node.dataset for node in node_lists.pop(".")] with add_path_context_to_errors("."): root_ds = merge( root_datasets, compat=compat, join=join, combine_attrs=combine_attrs ) result = DataTree(dataset=root_ds) def level(kv): # all trees with the same path have the same level _, trees = kv return trees[0].level for key, nodes in sorted(node_lists.items(), key=level): # Merge datasets, including inherited indexes to ensure alignment. datasets = [node.dataset for node in nodes] with add_path_context_to_errors(key): merge_result = merge_core( datasets, compat=compat, join=join, combine_attrs=combine_attrs, ) merged_ds = Dataset._construct_direct(**merge_result._asdict()) result[key] = DataTree(dataset=merged_ds) return result @overload def merge( objects: Iterable[DataTree], compat: CompatOptions | CombineKwargDefault = ..., join: JoinOptions | CombineKwargDefault = ..., fill_value: object = ..., combine_attrs: CombineAttrsOptions = ..., ) -> DataTree: ... @overload def merge( objects: Iterable[DataArray | Dataset | Coordinates | dict], compat: CompatOptions | CombineKwargDefault = ..., join: JoinOptions | CombineKwargDefault = ..., fill_value: object = ..., combine_attrs: CombineAttrsOptions = ..., ) -> Dataset: ... def merge( objects: Iterable[DataTree | DataArray | Dataset | Coordinates | dict], compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, fill_value: object = dtypes.NA, combine_attrs: CombineAttrsOptions = "override", ) -> DataTree | Dataset: """Merge any number of xarray objects into a single Dataset as variables. Parameters ---------- objects : iterable of DataArray, Dataset, DataTree or dict Merge together all variables from these objects. If any of them are DataArray objects, they must have a name. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", \ "override", "minimal"}, default: "no_conflicts" String indicating how to compare variables of the same name for potential conflicts: - "identical": all values, dimensions and attributes must be the same. - "equals": all values and dimensions must be the same. - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset - "minimal": drop conflicting coordinates join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer" String indicating how to combine differing indexes in objects. - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- Dataset or DataTree Objects with combined variables from the inputs. If any inputs are a DataTree, this will also be a DataTree. Otherwise it will be a Dataset. Examples -------- >>> x = xr.DataArray( ... [[1.0, 2.0], [3.0, 5.0]], ... dims=("lat", "lon"), ... coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]}, ... name="var1", ... ) >>> y = xr.DataArray( ... [[5.0, 6.0], [7.0, 8.0]], ... dims=("lat", "lon"), ... coords={"lat": [35.0, 42.0], "lon": [100.0, 150.0]}, ... name="var2", ... ) >>> z = xr.DataArray( ... [[0.0, 3.0], [4.0, 9.0]], ... dims=("time", "lon"), ... coords={"time": [30.0, 60.0], "lon": [100.0, 150.0]}, ... name="var3", ... ) >>> x Size: 32B array([[1., 2.], [3., 5.]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> y Size: 32B array([[5., 6.], [7., 8.]]) Coordinates: * lat (lat) float64 16B 35.0 42.0 * lon (lon) float64 16B 100.0 150.0 >>> z Size: 32B array([[0., 3.], [4., 9.]]) Coordinates: * time (time) float64 16B 30.0 60.0 * lon (lon) float64 16B 100.0 150.0 >>> xr.merge([x, y, z], join="outer") Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="identical", join="outer") Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="equals", join="outer") Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="equals", join="outer", fill_value=-999.0) Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0 var2 (lat, lon) float64 72B 5.0 -999.0 6.0 -999.0 ... 7.0 -999.0 8.0 var3 (time, lon) float64 48B 0.0 -999.0 3.0 4.0 -999.0 9.0 >>> xr.merge([x, y, z], join="override") Size: 144B Dimensions: (lat: 2, lon: 2, time: 2) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 32B 1.0 2.0 3.0 5.0 var2 (lat, lon) float64 32B 5.0 6.0 7.0 8.0 var3 (time, lon) float64 32B 0.0 3.0 4.0 9.0 >>> xr.merge([x, y, z], join="inner") Size: 64B Dimensions: (lat: 1, lon: 1, time: 2) Coordinates: * lat (lat) float64 8B 35.0 * lon (lon) float64 8B 100.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 8B 1.0 var2 (lat, lon) float64 8B 5.0 var3 (time, lon) float64 16B 0.0 4.0 >>> xr.merge([x, y, z], compat="identical", join="inner") Size: 64B Dimensions: (lat: 1, lon: 1, time: 2) Coordinates: * lat (lat) float64 8B 35.0 * lon (lon) float64 8B 100.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 8B 1.0 var2 (lat, lon) float64 8B 5.0 var3 (time, lon) float64 16B 0.0 4.0 >>> xr.merge([x, y, z], compat="broadcast_equals", join="outer") Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], join="exact") Traceback (most recent call last): ... xarray.structure.alignment.AlignmentError: cannot align objects with join='exact' where ... Raises ------ xarray.MergeError If any variables with the same name have conflicting values. See also -------- concat combine_nested combine_by_coords """ from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree objects = list(objects) if any(isinstance(obj, DataTree) for obj in objects): if not all(isinstance(obj, DataTree) for obj in objects): raise TypeError( "merge does not support mixed type arguments when one argument " f"is a DataTree: {objects}" ) trees = cast(list[DataTree], objects) return merge_trees( trees, compat=compat, join=join, combine_attrs=combine_attrs, fill_value=fill_value, ) dict_like_objects = [] for obj in objects: if not isinstance(obj, DataArray | Dataset | Coordinates | dict): raise TypeError( "objects must be an iterable containing only DataTree(s), " f"Dataset(s), DataArray(s), and dictionaries: {objects}" ) if isinstance(obj, DataArray): obj = obj.to_dataset(promote_attrs=True) elif isinstance(obj, Coordinates): obj = obj.to_dataset() dict_like_objects.append(obj) merge_result = merge_core( dict_like_objects, compat=compat, join=join, combine_attrs=combine_attrs, fill_value=fill_value, ) return Dataset._construct_direct(**merge_result._asdict()) def dataset_merge_method( dataset: Dataset, other: CoercibleMapping, overwrite_vars: Hashable | Iterable[Hashable], compat: CompatOptions | CombineKwargDefault, join: JoinOptions | CombineKwargDefault, fill_value: Any, combine_attrs: CombineAttrsOptions, ) -> _MergeResult: """Guts of the Dataset.merge method.""" # we are locked into supporting overwrite_vars for the Dataset.merge # method due for backwards compatibility # TODO: consider deprecating it? if not isinstance(overwrite_vars, str) and isinstance(overwrite_vars, Iterable): overwrite_vars = set(overwrite_vars) else: overwrite_vars = {overwrite_vars} if not overwrite_vars: objs = [dataset, other] priority_arg = None elif overwrite_vars == set(other): objs = [dataset, other] priority_arg = 1 else: other_overwrite: dict[Hashable, CoercibleValue] = {} other_no_overwrite: dict[Hashable, CoercibleValue] = {} for k, v in other.items(): if k in overwrite_vars: other_overwrite[k] = v else: other_no_overwrite[k] = v objs = [dataset, other_no_overwrite, other_overwrite] priority_arg = 2 return merge_core( objs, compat=compat, join=join, priority_arg=priority_arg, fill_value=fill_value, combine_attrs=combine_attrs, ) def dataset_update_method(dataset: Dataset, other: CoercibleMapping) -> _MergeResult: """Guts of the Dataset.update method. This drops a duplicated coordinates from `other` if `other` is not an `xarray.Dataset`, e.g., if it's a dict with DataArray values (GH2068, GH2180). """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset if not isinstance(other, Dataset): other = dict(other) for key, value in other.items(): if isinstance(value, DataArray): # drop conflicting coordinates coord_names = [ c for c in value.coords if c not in value.dims and c in dataset.coords ] if coord_names: value = value.drop_vars(coord_names) if isinstance(value.variable, IndexVariable): variable = value.variable.to_base_variable() value = value._replace(variable=variable) other[key] = value return merge_core( [dataset, other], compat="broadcast_equals", join="outer", priority_arg=1, indexes=dataset.xindexes, combine_attrs="override", ) def merge_data_and_coords(data_vars: DataVars, coords) -> _MergeResult: """Used in Dataset.__init__.""" from xarray.core.coordinates import Coordinates, create_coords_with_default_indexes if isinstance(coords, Coordinates): coords = coords.copy() else: coords = create_coords_with_default_indexes(coords, data_vars) # exclude coords from alignment (all variables in a Coordinates object should # already be aligned together) and use coordinates' indexes to align data_vars return merge_core( [data_vars, coords], compat="broadcast_equals", join="outer", combine_attrs="override", explicit_coords=tuple(coords), indexes=coords.xindexes, priority_arg=1, skip_align_args=[1], ) xarray-2025.12.0/xarray/testing/000077500000000000000000000000001511464676000163265ustar00rootroot00000000000000xarray-2025.12.0/xarray/testing/__init__.py000066400000000000000000000011551511464676000204410ustar00rootroot00000000000000from xarray.testing.assertions import ( # noqa: F401 _assert_dataarray_invariants, _assert_dataset_invariants, _assert_indexes_invariants_checks, _assert_internal_invariants, _assert_variable_invariants, _data_allclose_or_equiv, assert_allclose, assert_chunks_equal, assert_duckarray_allclose, assert_duckarray_equal, assert_equal, assert_identical, assert_isomorphic, ) __all__ = [ "assert_allclose", "assert_chunks_equal", "assert_duckarray_allclose", "assert_duckarray_equal", "assert_equal", "assert_identical", "assert_isomorphic", ] xarray-2025.12.0/xarray/testing/assertions.py000066400000000000000000000436041511464676000211010ustar00rootroot00000000000000"""Testing functions exposed to the user API""" import functools import warnings from collections.abc import Hashable from typing import Any import numpy as np import pandas as pd from xarray.core import duck_array_ops, formatting, utils from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.datatree_mapping import map_over_datasets from xarray.core.formatting import diff_datatree_repr from xarray.core.indexes import Index, PandasIndex, PandasMultiIndex, default_indexes from xarray.core.variable import IndexVariable, Variable def ensure_warnings(func): # sometimes tests elevate warnings to errors # -> make sure that does not happen in the assert_* functions @functools.wraps(func) def wrapper(*args, **kwargs): __tracebackhide__ = True with warnings.catch_warnings(): # only remove filters that would "error" warnings.filters = [f for f in warnings.filters if f[0] != "error"] return func(*args, **kwargs) return wrapper def _decode_string_data(data): if data.dtype.kind == "S": return np.char.decode(data, "utf-8", "replace") return data def _data_allclose_or_equiv(arr1, arr2, rtol=1e-05, atol=1e-08, decode_bytes=True): if any(arr.dtype.kind == "S" for arr in [arr1, arr2]) and decode_bytes: arr1 = _decode_string_data(arr1) arr2 = _decode_string_data(arr2) exact_dtypes = ["M", "m", "O", "S", "U"] if any(arr.dtype.kind in exact_dtypes for arr in [arr1, arr2]): return duck_array_ops.array_equiv(arr1, arr2) else: return duck_array_ops.allclose_or_equiv(arr1, arr2, rtol=rtol, atol=atol) @ensure_warnings def assert_isomorphic(a: DataTree, b: DataTree): """ Two DataTrees are considered isomorphic if the set of paths to their descendent nodes are the same. Nothing about the data or attrs in each node is checked. Isomorphism is a necessary condition for two trees to be used in a nodewise binary operation, such as tree1 + tree2. Parameters ---------- a : DataTree The first object to compare. b : DataTree The second object to compare. See Also -------- DataTree.isomorphic assert_equal assert_identical """ __tracebackhide__ = True assert isinstance(a, type(b)) if isinstance(a, DataTree): assert a.isomorphic(b), diff_datatree_repr(a, b, "isomorphic") else: raise TypeError(f"{type(a)} not of type DataTree") def maybe_transpose_dims(a, b, check_dim_order: bool): """Helper for assert_equal/allclose/identical""" __tracebackhide__ = True def _maybe_transpose_dims(a, b): if not isinstance(a, Variable | DataArray | Dataset): return b if set(a.dims) == set(b.dims): # Ensure transpose won't fail if a dimension is missing # If this is the case, the difference will be caught by the caller return b.transpose(*a.dims) return b if check_dim_order: return b if isinstance(a, DataTree): return map_over_datasets(_maybe_transpose_dims, a, b) return _maybe_transpose_dims(a, b) @ensure_warnings def assert_equal(a, b, check_dim_order: bool = True): """Like :py:func:`numpy.testing.assert_array_equal`, but for xarray objects. Raises an AssertionError if two objects are not equal. This will match data values, dimensions and coordinates, but not names or attributes (except for Dataset objects for which the variable names must match). Arrays with NaN in the same location are considered equal. For DataTree objects, assert_equal is mapped over all Datasets on each node, with the DataTrees being equal if both are isomorphic and the corresponding Datasets at each node are themselves equal. Parameters ---------- a : xarray.Dataset, xarray.DataArray, xarray.Variable, xarray.Coordinates or xarray.core.datatree.DataTree. The first object to compare. b : xarray.Dataset, xarray.DataArray, xarray.Variable, xarray.Coordinates or xarray.core.datatree.DataTree. The second object to compare. check_dim_order : bool, optional, default is True Whether dimensions must be in the same order. See Also -------- assert_identical, assert_allclose, Dataset.equals, DataArray.equals numpy.testing.assert_array_equal """ __tracebackhide__ = True assert type(a) is type(b) or ( isinstance(a, Coordinates) and isinstance(b, Coordinates) ) b = maybe_transpose_dims(a, b, check_dim_order) if isinstance(a, Variable | DataArray): assert a.equals(b), formatting.diff_array_repr(a, b, "equals") elif isinstance(a, Dataset): assert a.equals(b), formatting.diff_dataset_repr(a, b, "equals") elif isinstance(a, Coordinates): assert a.equals(b), formatting.diff_coords_repr(a, b, "equals") elif isinstance(a, DataTree): assert a.equals(b), diff_datatree_repr(a, b, "equals") else: raise TypeError(f"{type(a)} not supported by assertion comparison") @ensure_warnings def assert_identical(a, b): """Like :py:func:`xarray.testing.assert_equal`, but also matches the objects' names and attributes. Raises an AssertionError if two objects are not identical. For DataTree objects, assert_identical is mapped over all Datasets on each node, with the DataTrees being identical if both are isomorphic and the corresponding Datasets at each node are themselves identical. Parameters ---------- a : xarray.Dataset, xarray.DataArray, xarray.Variable or xarray.Coordinates The first object to compare. b : xarray.Dataset, xarray.DataArray, xarray.Variable or xarray.Coordinates The second object to compare. See Also -------- assert_equal, assert_allclose, Dataset.equals, DataArray.equals """ __tracebackhide__ = True assert type(a) is type(b) or ( isinstance(a, Coordinates) and isinstance(b, Coordinates) ) if isinstance(a, Variable): assert a.identical(b), formatting.diff_array_repr(a, b, "identical") elif isinstance(a, DataArray): assert a.name == b.name, ( f"DataArray names are different. L: {a.name}, R: {b.name}" ) assert a.identical(b), formatting.diff_array_repr(a, b, "identical") elif isinstance(a, Dataset | Variable): assert a.identical(b), formatting.diff_dataset_repr(a, b, "identical") elif isinstance(a, Coordinates): assert a.identical(b), formatting.diff_coords_repr(a, b, "identical") elif isinstance(a, DataTree): assert a.identical(b), diff_datatree_repr(a, b, "identical") else: raise TypeError(f"{type(a)} not supported by assertion comparison") @ensure_warnings def assert_allclose( a, b, rtol=1e-05, atol=1e-08, decode_bytes=True, check_dim_order: bool = True ): """Like :py:func:`numpy.testing.assert_allclose`, but for xarray objects. Raises an AssertionError if two objects are not equal up to desired tolerance. Parameters ---------- a : xarray.Dataset, xarray.DataArray or xarray.Variable The first object to compare. b : xarray.Dataset, xarray.DataArray or xarray.Variable The second object to compare. rtol : float, optional Relative tolerance. atol : float, optional Absolute tolerance. decode_bytes : bool, optional Whether byte dtypes should be decoded to strings as UTF-8 or not. This is useful for testing serialization methods on Python 3 that return saved strings as bytes. check_dim_order : bool, optional, default is True Whether dimensions must be in the same order. See Also -------- assert_identical, assert_equal, numpy.testing.assert_allclose """ __tracebackhide__ = True assert type(a) is type(b) b = maybe_transpose_dims(a, b, check_dim_order) equiv = functools.partial( _data_allclose_or_equiv, rtol=rtol, atol=atol, decode_bytes=decode_bytes ) equiv.__name__ = "allclose" # type: ignore[attr-defined] def compat_variable(a, b): a = getattr(a, "variable", a) b = getattr(b, "variable", b) return a.dims == b.dims and (a._data is b._data or equiv(a.data, b.data)) def compat_node(a, b): return a.ds._coord_names == b.ds._coord_names and utils.dict_equiv( a.variables, b.variables, compat=compat_variable ) if isinstance(a, Variable): allclose = compat_variable(a, b) assert allclose, formatting.diff_array_repr(a, b, compat=equiv) elif isinstance(a, DataArray): allclose = utils.dict_equiv( a.coords, b.coords, compat=compat_variable ) and compat_variable(a.variable, b.variable) assert allclose, formatting.diff_array_repr(a, b, compat=equiv) elif isinstance(a, Dataset): allclose = a._coord_names == b._coord_names and utils.dict_equiv( a.variables, b.variables, compat=compat_variable ) assert allclose, formatting.diff_dataset_repr(a, b, compat=equiv) elif isinstance(a, Coordinates): allclose = utils.dict_equiv(a.variables, b.variables, compat=compat_variable) assert allclose, formatting.diff_coords_repr(a, b, compat=equiv) elif isinstance(a, DataTree): allclose = utils.dict_equiv( dict(a.subtree_with_keys), dict(b.subtree_with_keys), compat=compat_node ) assert allclose, formatting.diff_datatree_repr(a, b, compat=equiv) else: raise TypeError(f"{type(a)} not supported by assertion comparison") def _format_message(x, y, err_msg, verbose): diff = x - y abs_diff = max(abs(diff)) rel_diff = "not implemented" n_diff = np.count_nonzero(diff) n_total = diff.size fraction = f"{n_diff} / {n_total}" percentage = float(n_diff / n_total * 100) parts = [ "Arrays are not equal", err_msg, f"Mismatched elements: {fraction} ({percentage:.0f}%)", f"Max absolute difference: {abs_diff}", f"Max relative difference: {rel_diff}", ] if verbose: parts += [ f" x: {x!r}", f" y: {y!r}", ] return "\n".join(parts) @ensure_warnings def assert_duckarray_allclose( actual, desired, rtol=1e-07, atol=0, err_msg="", verbose=True ): """Like `np.testing.assert_allclose`, but for duckarrays.""" __tracebackhide__ = True allclose = duck_array_ops.allclose_or_equiv(actual, desired, rtol=rtol, atol=atol) assert allclose, _format_message(actual, desired, err_msg=err_msg, verbose=verbose) @ensure_warnings def assert_duckarray_equal(x, y, err_msg="", verbose=True): """Like `np.testing.assert_array_equal`, but for duckarrays""" __tracebackhide__ = True if not utils.is_duck_array(x) and not utils.is_scalar(x): x = np.asarray(x) if not utils.is_duck_array(y) and not utils.is_scalar(y): y = np.asarray(y) if (utils.is_duck_array(x) and utils.is_scalar(y)) or ( utils.is_scalar(x) and utils.is_duck_array(y) ): equiv = duck_array_ops.array_all(x == y) else: equiv = duck_array_ops.array_equiv(x, y) assert equiv, _format_message(x, y, err_msg=err_msg, verbose=verbose) def assert_chunks_equal(a, b): """ Assert that chunksizes along chunked dimensions are equal. Parameters ---------- a : xarray.Dataset or xarray.DataArray The first object to compare. b : xarray.Dataset or xarray.DataArray The second object to compare. """ if isinstance(a, DataArray) != isinstance(b, DataArray): raise TypeError("a and b have mismatched types") left = a.unify_chunks() right = b.unify_chunks() assert left.chunks == right.chunks def _assert_indexes_invariants_checks( indexes, possible_coord_variables, dims, check_default=True ): assert isinstance(indexes, dict), indexes assert all(isinstance(v, Index) for v in indexes.values()), { k: type(v) for k, v in indexes.items() } if check_default: index_vars = { k for k, v in possible_coord_variables.items() if isinstance(v, IndexVariable) } assert indexes.keys() <= index_vars, (set(indexes), index_vars) assert all( k in index_vars for k, v in possible_coord_variables.items() if v.dims == (k,) ), {k: type(v) for k, v in possible_coord_variables.items()} assert not any( isinstance(v, IndexVariable) for k, v in possible_coord_variables.items() if k not in indexes.keys() ), {k: type(v) for k, v in possible_coord_variables.items()} # check pandas index wrappers vs. coordinate data adapters for k, index in indexes.items(): if isinstance(index, PandasIndex): pd_index = index.index var = possible_coord_variables[k] assert (index.dim,) == var.dims, (pd_index, var) if k == index.dim: # skip multi-index levels here (checked below) assert index.coord_dtype == var.dtype, (index.coord_dtype, var.dtype) assert isinstance(var._data.array, pd.Index), var._data.array # TODO: check identity instead of equality? assert pd_index.equals(var._data.array), (pd_index, var) if isinstance(index, PandasMultiIndex): pd_index = index.index for name in index.index.names: assert name in possible_coord_variables, (pd_index, index_vars) var = possible_coord_variables[name] assert (index.dim,) == var.dims, (pd_index, var) assert index.level_coords_dtype[name] == var.dtype, ( index.level_coords_dtype[name], var.dtype, ) assert isinstance(var._data.array, pd.MultiIndex), var._data.array assert pd_index.equals(var._data.array), (pd_index, var) # check all all levels are in `indexes` assert name in indexes, (name, set(indexes)) # index identity is used to find unique indexes in `indexes` assert index is indexes[name], (pd_index, indexes[name].index) if check_default: defaults = default_indexes(possible_coord_variables, dims) assert indexes.keys() == defaults.keys(), (set(indexes), set(defaults)) assert all(v.equals(defaults[k]) for k, v in indexes.items()), ( indexes, defaults, ) def _assert_variable_invariants( var: Variable | Any, name: Hashable = None, ) -> None: if name is None: name_or_empty: tuple = () else: name_or_empty = (name,) assert isinstance(var, Variable), {name: type(var)} assert isinstance(var._dims, tuple), name_or_empty + (var._dims,) assert len(var._dims) == len(var._data.shape), name_or_empty + ( var._dims, var._data.shape, ) assert isinstance(var._encoding, type(None) | dict), name_or_empty + ( var._encoding, ) assert isinstance(var._attrs, type(None) | dict), name_or_empty + (var._attrs,) def _assert_dataarray_invariants(da: DataArray, check_default_indexes: bool): _assert_variable_invariants(da._variable) assert isinstance(da._coords, dict), da._coords if check_default_indexes: assert all(set(v.dims) <= set(da.dims) for v in da._coords.values()), ( da.dims, {k: v.dims for k, v in da._coords.items()}, ) for k, v in da._coords.items(): _assert_variable_invariants(v, k) assert da._indexes is not None _assert_indexes_invariants_checks( da._indexes, da._coords, da.dims, check_default=check_default_indexes ) def _assert_dataset_invariants(ds: Dataset, check_default_indexes: bool): assert isinstance(ds._variables, dict), type(ds._variables) for k, v in ds._variables.items(): _assert_variable_invariants(v, k) assert isinstance(ds._coord_names, set), ds._coord_names assert ds._coord_names <= ds._variables.keys(), ( ds._coord_names, set(ds._variables), ) assert type(ds._dims) is dict, ds._dims assert all(isinstance(v, int) for v in ds._dims.values()), ds._dims var_dims: set[Hashable] = set() for v in ds._variables.values(): var_dims.update(v.dims) assert ds._dims.keys() == var_dims, (set(ds._dims), var_dims) assert all( ds._dims[k] == v.sizes[k] for v in ds._variables.values() for k in v.sizes ), (ds._dims, {k: v.sizes for k, v in ds._variables.items()}) assert ds._indexes is not None _assert_indexes_invariants_checks( ds._indexes, ds._variables, ds._dims, check_default=check_default_indexes ) assert isinstance(ds._encoding, type(None) | dict) assert isinstance(ds._attrs, type(None) | dict) def _assert_internal_invariants( xarray_obj: DataArray | Dataset | Variable, check_default_indexes: bool ): """Validate that an xarray object satisfies its own internal invariants. This exists for the benefit of xarray's own test suite, but may be useful in external projects if they (ill-advisedly) create objects using xarray's private APIs. """ if isinstance(xarray_obj, Variable): _assert_variable_invariants(xarray_obj) elif isinstance(xarray_obj, DataArray): _assert_dataarray_invariants( xarray_obj, check_default_indexes=check_default_indexes ) elif isinstance(xarray_obj, Dataset): _assert_dataset_invariants( xarray_obj, check_default_indexes=check_default_indexes ) elif isinstance(xarray_obj, Coordinates): _assert_dataset_invariants( xarray_obj.to_dataset(), check_default_indexes=check_default_indexes ) else: raise TypeError( f"{type(xarray_obj)} is not a supported type for xarray invariant checks" ) xarray-2025.12.0/xarray/testing/strategies.py000066400000000000000000000432161511464676000210600ustar00rootroot00000000000000import datetime import warnings from collections.abc import Hashable, Iterable, Mapping, Sequence from typing import TYPE_CHECKING, Any, Protocol, overload import hypothesis.extra.numpy as npst import numpy as np from hypothesis.errors import InvalidArgument import xarray as xr from xarray.core.types import T_DuckArray from xarray.core.utils import attempt_import, module_available if TYPE_CHECKING: from xarray.core.types import _DTypeLikeNested, _ShapeLike if TYPE_CHECKING: import hypothesis.strategies as st else: st = attempt_import("hypothesis.strategies") __all__ = [ "attrs", "cftime_datetimes", "datetimes", "dimension_names", "dimension_sizes", "names", "pandas_index_dtypes", "supported_dtypes", "unique_subset_of", "variables", ] class ArrayStrategyFn(Protocol[T_DuckArray]): def __call__( self, *, shape: "_ShapeLike", dtype: "_DTypeLikeNested", ) -> st.SearchStrategy[T_DuckArray]: ... def supported_dtypes() -> st.SearchStrategy[np.dtype]: """ Generates only those numpy dtypes which xarray can handle. Use instead of hypothesis.extra.numpy.scalar_dtypes in order to exclude weirder dtypes such as unicode, byte_string, array, or nested dtypes. Also excludes datetimes, which dodges bugs with pandas non-nanosecond datetime overflows. Checks only native endianness. Requires the hypothesis package to be installed. See Also -------- :ref:`testing.hypothesis`_ """ # TODO should this be exposed publicly? # We should at least decide what the set of numpy dtypes that xarray officially supports is. return ( npst.integer_dtypes(endianness="=") | npst.unsigned_integer_dtypes(endianness="=") | npst.floating_dtypes(endianness="=") | npst.complex_number_dtypes(endianness="=") # | npst.datetime64_dtypes() # | npst.timedelta64_dtypes() # | npst.unicode_string_dtypes() ) def pandas_index_dtypes() -> st.SearchStrategy[np.dtype]: """ Dtypes supported by pandas indexes. Restrict datetime64 and timedelta64 to ns frequency till Xarray relaxes that. """ return ( npst.integer_dtypes(endianness="=", sizes=(32, 64)) | npst.unsigned_integer_dtypes(endianness="=", sizes=(32, 64)) | npst.floating_dtypes(endianness="=", sizes=(32, 64)) # TODO: unset max_period | npst.datetime64_dtypes(endianness="=", max_period="ns") # TODO: set max_period="D" | npst.timedelta64_dtypes(endianness="=", max_period="ns") | npst.unicode_string_dtypes(endianness="=") ) def datetimes() -> st.SearchStrategy: """ Generates datetime objects including both standard library datetimes and cftime datetimes. Returns standard library datetime.datetime objects, and if cftime is available, also includes cftime datetime objects from various calendars. Requires the hypothesis package to be installed. See Also -------- :ref:`testing.hypothesis`_ """ strategy = st.datetimes() if module_available("cftime"): strategy = strategy | cftime_datetimes() return strategy # TODO Generalize to all valid unicode characters once formatting bugs in xarray's reprs are fixed + docs can handle it. _readable_characters = st.characters( categories=["L", "N"], max_codepoint=0x017F ) # only use characters within the "Latin Extended-A" subset of unicode def names() -> st.SearchStrategy[str]: """ Generates arbitrary string names for dimensions / variables. Requires the hypothesis package to be installed. See Also -------- :ref:`testing.hypothesis`_ """ return st.text( _readable_characters, min_size=1, max_size=5, ) def dimension_names( *, name_strategy=None, min_dims: int = 0, max_dims: int = 3, ) -> st.SearchStrategy[list[Hashable]]: """ Generates an arbitrary list of valid dimension names. Requires the hypothesis package to be installed. Parameters ---------- name_strategy Strategy for making names. Useful if we need to share this. min_dims Minimum number of dimensions in generated list. max_dims Maximum number of dimensions in generated list. """ if name_strategy is None: name_strategy = names() return st.lists( elements=name_strategy, min_size=min_dims, max_size=max_dims, unique=True, ) def dimension_sizes( *, dim_names: st.SearchStrategy[Hashable] = names(), # noqa: B008 min_dims: int = 0, max_dims: int = 3, min_side: int = 1, max_side: int | None = None, ) -> st.SearchStrategy[Mapping[Hashable, int]]: """ Generates an arbitrary mapping from dimension names to lengths. Requires the hypothesis package to be installed. Parameters ---------- dim_names: strategy generating strings, optional Strategy for generating dimension names. Defaults to the `names` strategy. min_dims: int, optional Minimum number of dimensions in generated list. Default is 1. max_dims: int, optional Maximum number of dimensions in generated list. Default is 3. min_side: int, optional Minimum size of a dimension. Default is 1. max_side: int, optional Minimum size of a dimension. Default is `min_length` + 5. See Also -------- :ref:`testing.hypothesis`_ """ if max_side is None: max_side = min_side + 3 return st.dictionaries( keys=dim_names, values=st.integers(min_value=min_side, max_value=max_side), min_size=min_dims, max_size=max_dims, ) _readable_strings = st.text( _readable_characters, max_size=5, ) _attr_keys = _readable_strings _small_arrays = npst.arrays( shape=npst.array_shapes( max_side=2, max_dims=2, ), dtype=npst.scalar_dtypes() | npst.byte_string_dtypes() | npst.unicode_string_dtypes(), ) _attr_values = st.none() | st.booleans() | _readable_strings | _small_arrays simple_attrs = st.dictionaries(_attr_keys, _attr_values) def attrs() -> st.SearchStrategy[Mapping[Hashable, Any]]: """ Generates arbitrary valid attributes dictionaries for xarray objects. The generated dictionaries can potentially be recursive. Requires the hypothesis package to be installed. See Also -------- :ref:`testing.hypothesis`_ """ return st.recursive( st.dictionaries(_attr_keys, _attr_values), lambda children: st.dictionaries(_attr_keys, children), max_leaves=3, ) ATTRS = attrs() @st.composite def variables( draw: st.DrawFn, *, array_strategy_fn: ArrayStrategyFn | None = None, dims: st.SearchStrategy[Sequence[Hashable] | Mapping[Hashable, int]] | None = None, dtype: st.SearchStrategy[np.dtype] | None = None, attrs: st.SearchStrategy[Mapping] = ATTRS, ) -> xr.Variable: """ Generates arbitrary xarray.Variable objects. Follows the basic signature of the xarray.Variable constructor, but allows passing alternative strategies to generate either numpy-like array data or dimensions. Also allows specifying the shape or dtype of the wrapped array up front. Passing nothing will generate a completely arbitrary Variable (containing a numpy array). Requires the hypothesis package to be installed. Parameters ---------- array_strategy_fn: Callable which returns a strategy generating array-likes, optional Callable must only accept shape and dtype kwargs, and must generate results consistent with its input. If not passed the default is to generate a small numpy array with one of the supported_dtypes. dims: Strategy for generating the dimensions, optional Can either be a strategy for generating a sequence of string dimension names, or a strategy for generating a mapping of string dimension names to integer lengths along each dimension. If provided as a mapping the array shape will be passed to array_strategy_fn. Default is to generate arbitrary dimension names for each axis in data. dtype: Strategy which generates np.dtype objects, optional Will be passed in to array_strategy_fn. Default is to generate any scalar dtype using supported_dtypes. Be aware that this default set of dtypes includes some not strictly allowed by the array API standard. attrs: Strategy which generates dicts, optional Default is to generate a nested attributes dictionary containing arbitrary strings, booleans, integers, Nones, and numpy arrays. Returns ------- variable_strategy Strategy for generating xarray.Variable objects. Raises ------ ValueError If a custom array_strategy_fn returns a strategy which generates an example array inconsistent with the shape & dtype input passed to it. Examples -------- Generate completely arbitrary Variable objects backed by a numpy array: >>> variables().example() # doctest: +SKIP array([43506, -16, -151], dtype=int32) >>> variables().example() # doctest: +SKIP array([[[-10000000., -10000000.], [-10000000., -10000000.]], [[-10000000., -10000000.], [ 0., -10000000.]], [[ 0., -10000000.], [-10000000., inf]], [[ -0., -10000000.], [-10000000., -0.]]], dtype=float32) Attributes: ล›ล™ฤด: {'ฤ‰': {'iฤฅf': array([-30117, -1740], dtype=int16)}} Generate only Variable objects with certain dimension names: >>> variables(dims=st.just(["a", "b"])).example() # doctest: +SKIP array([[ 248, 4294967295, 4294967295], [2412855555, 3514117556, 4294967295], [ 111, 4294967295, 4294967295], [4294967295, 1084434988, 51688], [ 47714, 252, 11207]], dtype=uint32) Generate only Variable objects with certain dimension names and lengths: >>> variables(dims=st.just({"a": 2, "b": 1})).example() # doctest: +SKIP array([[-1.00000000e+007+3.40282347e+038j], [-2.75034266e-225+2.22507386e-311j]]) See Also -------- :ref:`testing.hypothesis`_ """ if dtype is None: dtype = supported_dtypes() if not isinstance(dims, st.SearchStrategy) and dims is not None: raise InvalidArgument( f"dims must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dims)}. " "To specify fixed contents, use hypothesis.strategies.just()." ) if not isinstance(dtype, st.SearchStrategy) and dtype is not None: raise InvalidArgument( f"dtype must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dtype)}. " "To specify fixed contents, use hypothesis.strategies.just()." ) if not isinstance(attrs, st.SearchStrategy) and attrs is not None: raise InvalidArgument( f"attrs must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(attrs)}. " "To specify fixed contents, use hypothesis.strategies.just()." ) _array_strategy_fn: ArrayStrategyFn if array_strategy_fn is None: # For some reason if I move the default value to the function signature definition mypy incorrectly says the ignore is no longer necessary, making it impossible to satisfy mypy _array_strategy_fn = npst.arrays # type: ignore[assignment] # npst.arrays has extra kwargs that we aren't using later elif not callable(array_strategy_fn): raise InvalidArgument( "array_strategy_fn must be a Callable that accepts the kwargs dtype and shape and returns a hypothesis " "strategy which generates corresponding array-like objects." ) else: _array_strategy_fn = ( array_strategy_fn # satisfy mypy that this new variable cannot be None ) _dtype = draw(dtype) if dims is not None: # generate dims first then draw data to match _dims = draw(dims) if isinstance(_dims, Sequence): dim_names = list(_dims) valid_shapes = npst.array_shapes(min_dims=len(_dims), max_dims=len(_dims)) _shape = draw(valid_shapes) array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype) elif isinstance(_dims, Mapping | dict): # should be a mapping of form {dim_names: lengths} dim_names, _shape = list(_dims.keys()), tuple(_dims.values()) array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype) else: raise InvalidArgument( f"Invalid type returned by dims strategy - drew an object of type {type(dims)}" ) else: # nothing provided, so generate everything consistently # We still generate the shape first here just so that we always pass shape to array_strategy_fn _shape = draw(npst.array_shapes()) array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype) dim_names = draw(dimension_names(min_dims=len(_shape), max_dims=len(_shape))) _data = draw(array_strategy) if _data.shape != _shape: raise ValueError( "array_strategy_fn returned an array object with a different shape than it was passed." f"Passed {_shape}, but returned {_data.shape}." "Please either specify a consistent shape via the dims kwarg or ensure the array_strategy_fn callable " "obeys the shape argument passed to it." ) if _data.dtype != _dtype: raise ValueError( "array_strategy_fn returned an array object with a different dtype than it was passed." f"Passed {_dtype}, but returned {_data.dtype}" "Please either specify a consistent dtype via the dtype kwarg or ensure the array_strategy_fn callable " "obeys the dtype argument passed to it." ) return xr.Variable(dims=dim_names, data=_data, attrs=draw(attrs)) @overload def unique_subset_of( objs: Sequence[Hashable], *, min_size: int = 0, max_size: int | None = None, ) -> st.SearchStrategy[Sequence[Hashable]]: ... @overload def unique_subset_of( objs: Mapping[Hashable, Any], *, min_size: int = 0, max_size: int | None = None, ) -> st.SearchStrategy[Mapping[Hashable, Any]]: ... @st.composite def unique_subset_of( draw: st.DrawFn, objs: Sequence[Hashable] | Mapping[Hashable, Any], *, min_size: int = 0, max_size: int | None = None, ) -> Sequence[Hashable] | Mapping[Hashable, Any]: """ Return a strategy which generates a unique subset of the given objects. Each entry in the output subset will be unique (if input was a sequence) or have a unique key (if it was a mapping). Requires the hypothesis package to be installed. Parameters ---------- objs: Union[Sequence[Hashable], Mapping[Hashable, Any]] Objects from which to sample to produce the subset. min_size: int, optional Minimum size of the returned subset. Default is 0. max_size: int, optional Maximum size of the returned subset. Default is the full length of the input. If set to 0 the result will be an empty mapping. Returns ------- unique_subset_strategy Strategy generating subset of the input. Examples -------- >>> unique_subset_of({"x": 2, "y": 3}).example() # doctest: +SKIP {'y': 3} >>> unique_subset_of(["x", "y"]).example() # doctest: +SKIP ['x'] See Also -------- :ref:`testing.hypothesis`_ """ if not isinstance(objs, Iterable): raise TypeError( f"Object to sample from must be an Iterable or a Mapping, but received type {type(objs)}" ) if len(objs) == 0: raise ValueError("Can't sample from a length-zero object.") keys = list(objs.keys()) if isinstance(objs, Mapping) else objs subset_keys = draw( st.lists( st.sampled_from(keys), unique=True, min_size=min_size, max_size=max_size, ) ) return ( {k: objs[k] for k in subset_keys} if isinstance(objs, Mapping) else subset_keys ) @st.composite def cftime_datetimes(draw: st.DrawFn): """ Generates cftime datetime objects across various calendars. This strategy generates cftime datetime objects from all available cftime calendars with dates ranging from year -99999 to 99999. Requires both the hypothesis and cftime packages to be installed. Returns ------- cftime_datetime_strategy Strategy for generating cftime datetime objects. See Also -------- :ref:`testing.hypothesis`_ """ from xarray.tests import _all_cftime_date_types date_types = _all_cftime_date_types() calendars = list(date_types) calendar = draw(st.sampled_from(calendars)) date_type = date_types[calendar] with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=".*date/calendar/year zero.*") daysinmonth = date_type(99999, 12, 1).daysinmonth min_value = date_type(-99999, 1, 1) max_value = date_type(99999, 12, daysinmonth, 23, 59, 59, 999999) unit_microsecond = datetime.timedelta(microseconds=1) timespan_microseconds = (max_value - min_value) // unit_microsecond microseconds_offset = draw(st.integers(0, timespan_microseconds)) return min_value + datetime.timedelta(microseconds=microseconds_offset) xarray-2025.12.0/xarray/tests/000077500000000000000000000000001511464676000160135ustar00rootroot00000000000000xarray-2025.12.0/xarray/tests/CLAUDE.md000066400000000000000000000060251511464676000172750ustar00rootroot00000000000000# Testing Guidelines for xarray ## Handling Optional Dependencies xarray has many optional dependencies that may not be available in all testing environments. Always use the standard decorators and patterns when writing tests that require specific dependencies. ### Standard Decorators **ALWAYS use decorators** like `@requires_dask`, `@requires_cftime`, etc. instead of conditional `if` statements. All available decorators are defined in `xarray/tests/__init__.py` (look for `requires_*` decorators). ### DO NOT use conditional imports or skipif โŒ **WRONG - Do not do this:** ```python def test_mean_with_cftime(): if has_dask: # WRONG! ds = ds.chunk({}) result = ds.mean() ``` โŒ **ALSO WRONG - Avoid pytest.mark.skipif in parametrize:** ```python @pytest.mark.parametrize( "chunk", [ pytest.param( True, marks=pytest.mark.skipif(not has_dask, reason="requires dask") ), False, ], ) def test_something(chunk): ... ``` โœ… **CORRECT - Do this instead:** ```python def test_mean_with_cftime(): # Test without dask result = ds.mean() @requires_dask def test_mean_with_cftime_dask(): # Separate test for dask functionality ds = ds.chunk({}) result = ds.mean() ``` โœ… **OR for parametrized tests, split them:** ```python def test_something_without_dask(): # Test the False case ... @requires_dask def test_something_with_dask(): # Test the True case with dask ... ``` ### Multiple dependencies When a test requires multiple optional dependencies: ```python @requires_dask @requires_scipy def test_interpolation_with_dask(): ... ``` ### Importing optional dependencies in tests For imports within test functions, use `pytest.importorskip`: ```python def test_cftime_functionality(): cftime = pytest.importorskip("cftime") # Now use cftime ``` ### Common patterns 1. **Split tests by dependency** - Don't mix optional dependency code with base functionality: ```python def test_base_functionality(): # Core test without optional deps result = ds.mean() assert result is not None @requires_dask def test_dask_functionality(): # Dask-specific test ds_chunked = ds.chunk({}) result = ds_chunked.mean() assert result is not None ``` 2. **Use fixtures for dependency-specific setup**: ```python @pytest.fixture def dask_array(): pytest.importorskip("dask.array") import dask.array as da return da.from_array([1, 2, 3], chunks=2) ``` 3. **Check available implementations**: ```python from xarray.core.duck_array_ops import available_implementations @pytest.mark.parametrize("implementation", available_implementations()) def test_with_available_backends(implementation): ... ``` ### Key Points - CI environments intentionally exclude certain dependencies (e.g., `all-but-dask`, `bare-minimum`) - A test failing in "all-but-dask" because it uses dask is a test bug, not a CI issue - Look at similar existing tests for patterns to follow xarray-2025.12.0/xarray/tests/__init__.py000066400000000000000000000345701511464676000201350ustar00rootroot00000000000000from __future__ import annotations import importlib import platform import string import warnings from contextlib import contextmanager, nullcontext from unittest import mock # noqa: F401 import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_equal # noqa: F401 from packaging.version import Version from pandas.testing import assert_frame_equal # noqa: F401 import xarray.testing from xarray import Dataset from xarray.coding.times import _STANDARD_CALENDARS as _STANDARD_CALENDARS_UNSORTED from xarray.core.duck_array_ops import allclose_or_equiv # noqa: F401 from xarray.core.extension_array import PandasExtensionArray from xarray.core.options import set_options from xarray.core.variable import IndexVariable from xarray.testing import ( # noqa: F401 assert_chunks_equal, assert_duckarray_allclose, assert_duckarray_equal, ) from xarray.tests.arrays import ( # noqa: F401 ConcatenatableArray, DuckArrayWrapper, FirstElementAccessibleArray, InaccessibleArray, IndexableArray, UnexpectedDataAccess, ) # import mpl and change the backend before other mpl imports try: import matplotlib as mpl # Order of imports is important here. # Using a different backend makes Travis CI work mpl.use("Agg") except ImportError: pass # https://github.com/pydata/xarray/issues/7322 warnings.filterwarnings("ignore", "'urllib3.contrib.pyopenssl' module is deprecated") warnings.filterwarnings("ignore", "Deprecated call to `pkg_resources.declare_namespace") warnings.filterwarnings("ignore", "pkg_resources is deprecated as an API") warnings.filterwarnings("ignore", message="numpy.ndarray size changed") arm_xfail = pytest.mark.xfail( platform.machine() == "aarch64" or "arm" in platform.machine(), reason="expected failure on ARM", ) def assert_writeable(ds): readonly = [ name for name, var in ds.variables.items() if not isinstance(var, IndexVariable) and not isinstance( var.data, PandasExtensionArray | pd.api.extensions.ExtensionArray ) and not var.data.flags.writeable ] assert not readonly, readonly def _importorskip( modname: str, minversion: str | None = None ) -> tuple[bool, pytest.MarkDecorator]: try: mod = importlib.import_module(modname) has = True if minversion is not None: v = getattr(mod, "__version__", "999") if Version(v) < Version(minversion): raise ImportError("Minimum version not satisfied") except ImportError: has = False reason = f"requires {modname}" if minversion is not None: reason += f">={minversion}" func = pytest.mark.skipif(not has, reason=reason) return has, func has_matplotlib, requires_matplotlib = _importorskip("matplotlib") has_scipy, requires_scipy = _importorskip("scipy") has_scipy_ge_1_13, requires_scipy_ge_1_13 = _importorskip("scipy", "1.13") with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="'cgi' is deprecated and slated for removal in Python 3.13", category=DeprecationWarning, ) has_pydap, requires_pydap = _importorskip("pydap.client") has_netCDF4, requires_netCDF4 = _importorskip("netCDF4") with warnings.catch_warnings(): # see https://github.com/pydata/xarray/issues/8537 warnings.filterwarnings( "ignore", message="h5py is running against HDF5 1.14.3", category=UserWarning, ) has_h5netcdf, requires_h5netcdf = _importorskip("h5netcdf") has_cftime, requires_cftime = _importorskip("cftime") has_dask, requires_dask = _importorskip("dask") has_dask_ge_2024_08_1, requires_dask_ge_2024_08_1 = _importorskip( "dask", minversion="2024.08.1" ) has_dask_ge_2024_11_0, requires_dask_ge_2024_11_0 = _importorskip("dask", "2024.11.0") has_dask_ge_2025_1_0, requires_dask_ge_2025_1_0 = _importorskip("dask", "2025.1.0") if has_dask_ge_2025_1_0: has_dask_expr = True requires_dask_expr = pytest.mark.skipif(not has_dask_expr, reason="should not skip") else: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="The current Dask DataFrame implementation is deprecated.", category=DeprecationWarning, ) has_dask_expr, requires_dask_expr = _importorskip("dask_expr") has_bottleneck, requires_bottleneck = _importorskip("bottleneck") has_rasterio, requires_rasterio = _importorskip("rasterio") has_zarr, requires_zarr = _importorskip("zarr") has_zarr_v3, requires_zarr_v3 = _importorskip("zarr", "3.0.0") has_zarr_v3_dtypes, requires_zarr_v3_dtypes = _importorskip("zarr", "3.1.0") has_zarr_v3_async_oindex, requires_zarr_v3_async_oindex = _importorskip("zarr", "3.1.2") if has_zarr_v3: import zarr # manual update by checking attrs for now # TODO: use version specifier # installing from git main is giving me a lower version than the # most recently released zarr has_zarr_v3_dtypes = hasattr(zarr.core, "dtype") has_zarr_v3_async_oindex = hasattr(zarr.AsyncArray, "oindex") requires_zarr_v3_dtypes = pytest.mark.skipif( not has_zarr_v3_dtypes, reason="requires zarr>3.1.0" ) requires_zarr_v3_async_oindex = pytest.mark.skipif( not has_zarr_v3_async_oindex, reason="requires zarr>3.1.1" ) has_fsspec, requires_fsspec = _importorskip("fsspec") has_iris, requires_iris = _importorskip("iris") has_numbagg, requires_numbagg = _importorskip("numbagg") has_pyarrow, requires_pyarrow = _importorskip("pyarrow") with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="is_categorical_dtype is deprecated and will be removed in a future version.", category=DeprecationWarning, ) # seaborn uses the deprecated `pandas.is_categorical_dtype` has_seaborn, requires_seaborn = _importorskip("seaborn") has_sparse, requires_sparse = _importorskip("sparse") has_cupy, requires_cupy = _importorskip("cupy") has_cartopy, requires_cartopy = _importorskip("cartopy") has_pint, requires_pint = _importorskip("pint") has_numexpr, requires_numexpr = _importorskip("numexpr") has_flox, requires_flox = _importorskip("flox") has_netcdf, requires_netcdf = _importorskip("netcdf") has_pandas_ge_2_2, requires_pandas_ge_2_2 = _importorskip("pandas", "2.2") has_pandas_3, requires_pandas_3 = _importorskip("pandas", "3.0.0.dev0") # some special cases has_scipy_or_netCDF4 = has_scipy or has_netCDF4 requires_scipy_or_netCDF4 = pytest.mark.skipif( not has_scipy_or_netCDF4, reason="requires scipy or netCDF4" ) has_h5netcdf_or_netCDF4 = has_h5netcdf or has_netCDF4 requires_h5netcdf_or_netCDF4 = pytest.mark.skipif( not has_h5netcdf_or_netCDF4, reason="requires h5netcdf or netCDF4" ) has_numbagg_or_bottleneck = has_numbagg or has_bottleneck requires_numbagg_or_bottleneck = pytest.mark.skipif( not has_numbagg_or_bottleneck, reason="requires numbagg or bottleneck" ) has_numpy_2, requires_numpy_2 = _importorskip("numpy", "2.0.0") has_flox_0_9_12, requires_flox_0_9_12 = _importorskip("flox", "0.9.12") has_array_api_strict, requires_array_api_strict = _importorskip("array_api_strict") parametrize_zarr_format = pytest.mark.parametrize( "zarr_format", [ pytest.param(2, id="zarr_format=2"), pytest.param( 3, marks=pytest.mark.skipif( not has_zarr_v3, reason="zarr-python v2 cannot understand the zarr v3 format", ), id="zarr_format=3", ), ], ) def _importorskip_h5netcdf_ros3(has_h5netcdf: bool): if not has_h5netcdf: return has_h5netcdf, pytest.mark.skipif( not has_h5netcdf, reason="requires h5netcdf" ) import h5py h5py_with_ros3 = h5py.get_config().ros3 return h5py_with_ros3, pytest.mark.skipif( not h5py_with_ros3, reason="requires h5netcdf>=1.3.0 and h5py with ros3 support", ) has_h5netcdf_ros3, requires_h5netcdf_ros3 = _importorskip_h5netcdf_ros3(has_h5netcdf) has_netCDF4_1_6_2_or_above, requires_netCDF4_1_6_2_or_above = _importorskip( "netCDF4", "1.6.2" ) has_h5netcdf_1_4_0_or_above, requires_h5netcdf_1_4_0_or_above = _importorskip( "h5netcdf", "1.4.0.dev" ) has_h5netcdf_1_7_0_or_above, requires_h5netcdf_1_7_0_or_above = _importorskip( "h5netcdf", "1.7.0.dev" ) has_netCDF4_1_7_0_or_above, requires_netCDF4_1_7_0_or_above = _importorskip( "netCDF4", "1.7.0" ) # change some global options for tests set_options(warn_for_unclosed_files=True) if has_dask: import dask class CountingScheduler: """Simple dask scheduler counting the number of computes. Reference: https://stackoverflow.com/questions/53289286/""" def __init__(self, max_computes=0): self.total_computes = 0 self.max_computes = max_computes def __call__(self, dsk, keys, **kwargs): self.total_computes += 1 if self.total_computes > self.max_computes: raise RuntimeError( f"Too many computes. Total: {self.total_computes} > max: {self.max_computes}." ) return dask.get(dsk, keys, **kwargs) def raise_if_dask_computes(max_computes=0): # return a dummy context manager so that this can be used for non-dask objects if not has_dask: return nullcontext() scheduler = CountingScheduler(max_computes) return dask.config.set(scheduler=scheduler) flaky = pytest.mark.flaky network = pytest.mark.network class ReturnItem: def __getitem__(self, key): return key class IndexerMaker: def __init__(self, indexer_cls): self._indexer_cls = indexer_cls def __getitem__(self, key): if not isinstance(key, tuple): key = (key,) return self._indexer_cls(key) def source_ndarray(array): """Given an ndarray, return the base object which holds its memory, or the object itself. """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", "DatetimeIndex.base") warnings.filterwarnings("ignore", "TimedeltaIndex.base") base = getattr(array, "base", np.asarray(array).base) if base is None: base = array return base def format_record(record) -> str: """Format warning record like `FutureWarning('Function will be deprecated...')`""" return f"{str(record.category)[8:-2]}('{record.message}'))" @contextmanager def assert_no_warnings(): with warnings.catch_warnings(record=True) as record: yield record assert len(record) == 0, ( f"Got {len(record)} unexpected warning(s): {[format_record(r) for r in record]}" ) # Internal versions of xarray's test functions that validate additional # invariants def assert_equal(a, b, check_default_indexes=True): __tracebackhide__ = True xarray.testing.assert_equal(a, b) xarray.testing._assert_internal_invariants(a, check_default_indexes) xarray.testing._assert_internal_invariants(b, check_default_indexes) def assert_identical(a, b, check_default_indexes=True): __tracebackhide__ = True xarray.testing.assert_identical(a, b) xarray.testing._assert_internal_invariants(a, check_default_indexes) xarray.testing._assert_internal_invariants(b, check_default_indexes) def assert_allclose(a, b, check_default_indexes=True, **kwargs): __tracebackhide__ = True xarray.testing.assert_allclose(a, b, **kwargs) xarray.testing._assert_internal_invariants(a, check_default_indexes) xarray.testing._assert_internal_invariants(b, check_default_indexes) _DEFAULT_TEST_DIM_SIZES = (8, 9, 10) def create_test_data( seed: int = 12345, add_attrs: bool = True, dim_sizes: tuple[int, int, int] = _DEFAULT_TEST_DIM_SIZES, use_extension_array: bool = False, ) -> Dataset: rs = np.random.default_rng(seed) _vars = { "var1": ["dim1", "dim2"], "var2": ["dim1", "dim2"], "var3": ["dim3", "dim1"], } _dims = {"dim1": dim_sizes[0], "dim2": dim_sizes[1], "dim3": dim_sizes[2]} obj = Dataset() obj["dim2"] = ("dim2", 0.5 * np.arange(_dims["dim2"])) if _dims["dim3"] > 26: raise RuntimeError( f"Not enough letters for filling this dimension size ({_dims['dim3']})" ) obj["dim3"] = ("dim3", list(string.ascii_lowercase[0 : _dims["dim3"]])) obj["time"] = ( "time", pd.date_range( "2000-01-01", periods=20, unit="ns", ), ) for v, dims in sorted(_vars.items()): data = rs.normal(size=tuple(_dims[d] for d in dims)) obj[v] = (dims, data) if add_attrs: obj[v].attrs = {"foo": "variable"} if use_extension_array: obj["var4"] = ( "dim1", pd.Categorical( rs.choice( list(string.ascii_lowercase[: rs.integers(1, 5)]), size=dim_sizes[0], ) ), ) if has_pyarrow: obj["var5"] = ( "dim1", pd.array( rs.integers(1, 10, size=dim_sizes[0]).tolist(), dtype="int64[pyarrow]", ), ) if dim_sizes == _DEFAULT_TEST_DIM_SIZES: numbers_values = np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3], dtype="int64") else: numbers_values = rs.integers(0, 3, _dims["dim3"], dtype="int64") obj.coords["numbers"] = ("dim3", numbers_values) obj.encoding = {"foo": "bar"} assert_writeable(obj) return obj _STANDARD_CALENDAR_NAMES = sorted(_STANDARD_CALENDARS_UNSORTED) _NON_STANDARD_CALENDAR_NAMES = { "noleap", "365_day", "360_day", "julian", "all_leap", "366_day", } _NON_STANDARD_CALENDARS = [ pytest.param(cal, marks=requires_cftime) for cal in sorted(_NON_STANDARD_CALENDAR_NAMES) ] _STANDARD_CALENDARS = [ pytest.param(cal, marks=requires_cftime if cal != "standard" else ()) for cal in _STANDARD_CALENDAR_NAMES ] _ALL_CALENDARS = sorted(_STANDARD_CALENDARS + _NON_STANDARD_CALENDARS) _CFTIME_CALENDARS = [ pytest.param(*p.values, marks=requires_cftime) for p in _ALL_CALENDARS ] def _all_cftime_date_types(): import cftime return { "noleap": cftime.DatetimeNoLeap, "365_day": cftime.DatetimeNoLeap, "360_day": cftime.Datetime360Day, "julian": cftime.DatetimeJulian, "all_leap": cftime.DatetimeAllLeap, "366_day": cftime.DatetimeAllLeap, "gregorian": cftime.DatetimeGregorian, "proleptic_gregorian": cftime.DatetimeProlepticGregorian, } xarray-2025.12.0/xarray/tests/arrays.py000066400000000000000000000164111511464676000176710ustar00rootroot00000000000000""" This module contains various lazy array classes which can be wrapped and manipulated by xarray objects but will raise on data access. """ from collections.abc import Callable, Iterable from typing import Any, Self import numpy as np from xarray.core import utils from xarray.core.indexing import ExplicitlyIndexed class UnexpectedDataAccess(Exception): pass class InaccessibleArray(utils.NDArrayMixin, ExplicitlyIndexed): """Disallows any loading.""" def __init__(self, array): self.array = array def get_duck_array(self): raise UnexpectedDataAccess("Tried accessing data") def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None ) -> np.ndarray: raise UnexpectedDataAccess("Tried accessing data") def __getitem__(self, key): raise UnexpectedDataAccess("Tried accessing data.") class FirstElementAccessibleArray(InaccessibleArray): def __getitem__(self, key): tuple_idxr = key.tuple if len(tuple_idxr) > 1: raise UnexpectedDataAccess("Tried accessing more than one element.") return self.array[tuple_idxr] class IndexableArray(InaccessibleArray): """An InaccessibleArray subclass that supports indexing.""" def __getitem__(self, key): return type(self)(self.array[key]) def transpose(self, axes): return type(self)(self.array.transpose(axes)) class DuckArrayWrapper(utils.NDArrayMixin): """Array-like that prevents casting to array. Modeled after cupy.""" def __init__(self, array: np.ndarray): self.array = array def __getitem__(self, key): return type(self)(self.array[key]) def to_numpy(self) -> np.ndarray: """Allow explicit conversions to numpy in `to_numpy`, but disallow np.asarray etc.""" return self.array def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None ) -> np.ndarray: raise UnexpectedDataAccess("Tried accessing data") def __array_namespace__(self): """Present to satisfy is_duck_array test.""" from xarray.tests import namespace return namespace CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS: dict[str, Callable] = {} def implements(numpy_function): """Register an __array_function__ implementation for ConcatenatableArray objects.""" def decorator(func): CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS[numpy_function] = func return func return decorator @implements(np.concatenate) def concatenate( arrays: Iterable["ConcatenatableArray"], /, *, axis=0 ) -> "ConcatenatableArray": if any(not isinstance(arr, ConcatenatableArray) for arr in arrays): raise TypeError result = np.concatenate([arr._array for arr in arrays], axis=axis) return ConcatenatableArray(result) @implements(np.stack) def stack( arrays: Iterable["ConcatenatableArray"], /, *, axis=0 ) -> "ConcatenatableArray": if any(not isinstance(arr, ConcatenatableArray) for arr in arrays): raise TypeError result = np.stack([arr._array for arr in arrays], axis=axis) return ConcatenatableArray(result) @implements(np.result_type) def result_type(*arrays_and_dtypes) -> np.dtype: """Called by xarray to ensure all arguments to concat have the same dtype.""" first_dtype, *other_dtypes = (np.dtype(obj) for obj in arrays_and_dtypes) for other_dtype in other_dtypes: if other_dtype != first_dtype: raise ValueError("dtypes not all consistent") return first_dtype @implements(np.broadcast_to) def broadcast_to( x: "ConcatenatableArray", /, shape: tuple[int, ...] ) -> "ConcatenatableArray": """ Broadcasts an array to a specified shape, by either manipulating chunk keys or copying chunk manifest entries. """ if not isinstance(x, ConcatenatableArray): raise TypeError result = np.broadcast_to(x._array, shape=shape) return ConcatenatableArray(result) @implements(np.full_like) def full_like( x: "ConcatenatableArray", /, fill_value, **kwargs ) -> "ConcatenatableArray": """ Broadcasts an array to a specified shape, by either manipulating chunk keys or copying chunk manifest entries. """ if not isinstance(x, ConcatenatableArray): raise TypeError return ConcatenatableArray(np.full(x.shape, fill_value=fill_value, **kwargs)) @implements(np.all) def numpy_all(x: "ConcatenatableArray", **kwargs) -> "ConcatenatableArray": return type(x)(np.all(x._array, **kwargs)) class ConcatenatableArray: """Disallows loading or coercing to an index but does support concatenation / stacking.""" def __init__(self, array): # use ._array instead of .array because we don't want this to be accessible even to xarray's internals (e.g. create_default_index_implicit) self._array = array @property def dtype(self: Any) -> np.dtype: return self._array.dtype @property def shape(self: Any) -> tuple[int, ...]: return self._array.shape @property def ndim(self: Any) -> int: return self._array.ndim def __repr__(self: Any) -> str: return f"{type(self).__name__}(array={self._array!r})" def get_duck_array(self): raise UnexpectedDataAccess("Tried accessing data") def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None ) -> np.ndarray: raise UnexpectedDataAccess("Tried accessing data") def __getitem__(self, key) -> Self: """Some cases of concat require supporting expanding dims by dimensions of size 1""" # see https://data-apis.org/array-api/2022.12/API_specification/indexing.html#multi-axis-indexing arr = self._array for axis, indexer_1d in enumerate(key): if indexer_1d is None: arr = np.expand_dims(arr, axis) elif indexer_1d is Ellipsis: pass else: raise UnexpectedDataAccess("Tried accessing data.") return type(self)(arr) def __eq__(self, other: Self) -> Self: # type: ignore[override] return type(self)(self._array == other._array) def __array_function__(self, func, types, args, kwargs) -> Any: if func not in CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS: return NotImplemented # Note: this allows subclasses that don't override # __array_function__ to handle ManifestArray objects if not all(issubclass(t, ConcatenatableArray) for t in types): return NotImplemented return CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS[func](*args, **kwargs) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs) -> Any: """We have to define this in order to convince xarray that this class is a duckarray, even though we will never support ufuncs.""" return NotImplemented def astype(self, dtype: np.dtype, /, *, copy: bool = True) -> Self: """Needed because xarray will call this even when it's a no-op""" if dtype != self.dtype: raise NotImplementedError() else: return self def __and__(self, other: Self) -> Self: return type(self)(self._array & other._array) def __or__(self, other: Self) -> Self: return type(self)(self._array | other._array) xarray-2025.12.0/xarray/tests/conftest.py000066400000000000000000000163531511464676000202220ustar00rootroot00000000000000from __future__ import annotations import warnings import numpy as np import pandas as pd import pytest import xarray as xr from xarray import DataArray, Dataset, DataTree from xarray.tests import create_test_data, has_cftime, requires_dask @pytest.fixture(autouse=True) def handle_numpy_1_warnings(): """Handle NumPy 1.x DeprecationWarnings for out-of-bound integer conversions. NumPy 1.x raises DeprecationWarning when converting out-of-bounds values (e.g., 255 to int8), while NumPy 2.x raises OverflowError. This fixture suppresses the warning in NumPy 1.x environments to allow tests to pass. """ # Only apply for NumPy < 2.0 if np.__version__.startswith("1."): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "NumPy will stop allowing conversion of out-of-bound Python integers", DeprecationWarning, ) yield else: yield @pytest.fixture(params=["numpy", pytest.param("dask", marks=requires_dask)]) def backend(request): return request.param @pytest.fixture(params=["numbagg", "bottleneck", None]) def compute_backend(request): if request.param is None: options = dict(use_bottleneck=False, use_numbagg=False) elif request.param == "bottleneck": options = dict(use_bottleneck=True, use_numbagg=False) elif request.param == "numbagg": options = dict(use_bottleneck=False, use_numbagg=True) else: raise ValueError with xr.set_options(**options): yield request.param @pytest.fixture(params=[1]) def ds(request, backend): if request.param == 1: ds = Dataset( dict( z1=(["y", "x"], np.random.randn(2, 8)), z2=(["time", "y"], np.random.randn(10, 2)), ), dict( x=("x", np.linspace(0, 1.0, 8)), time=("time", np.linspace(0, 1.0, 10)), c=("y", ["a", "b"]), y=range(2), ), ) elif request.param == 2: ds = Dataset( dict( z1=(["time", "y"], np.random.randn(10, 2)), z2=(["time"], np.random.randn(10)), z3=(["x", "time"], np.random.randn(8, 10)), ), dict( x=("x", np.linspace(0, 1.0, 8)), time=("time", np.linspace(0, 1.0, 10)), c=("y", ["a", "b"]), y=range(2), ), ) elif request.param == 3: ds = create_test_data() else: raise ValueError if backend == "dask": return ds.chunk() return ds @pytest.fixture(params=[1]) def da(request, backend): if request.param == 1: times = pd.date_range("2000-01-01", freq="1D", periods=21) da = DataArray( np.random.random((3, 21, 4)), dims=("a", "time", "x"), coords=dict(time=times), ) if request.param == 2: da = DataArray([0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time") if request.param == "repeating_ints": da = DataArray( np.tile(np.arange(12), 5).reshape(5, 4, 3), coords={"x": list("abc"), "y": list("defg")}, dims=list("zyx"), ) if backend == "dask": return da.chunk() elif backend == "numpy": return da else: raise ValueError @pytest.fixture( params=[ False, pytest.param( True, marks=pytest.mark.skipif(not has_cftime, reason="no cftime") ), ] ) def use_cftime(request): return request.param @pytest.fixture(params=[Dataset, DataArray]) def type(request): return request.param @pytest.fixture(params=[1]) def d(request, backend, type) -> DataArray | Dataset: """ For tests which can test either a DataArray or a Dataset. """ result: DataArray | Dataset if request.param == 1: ds = Dataset( dict( a=(["x", "z"], np.arange(24).reshape(2, 12)), b=(["y", "z"], np.arange(100, 136).reshape(3, 12).astype(np.float64)), ), dict( x=("x", np.linspace(0, 1.0, 2)), y=range(3), z=("z", pd.date_range("2000-01-01", periods=12)), w=("x", ["a", "b"]), ), ) if type == DataArray: result = ds["a"].assign_coords(w=ds.coords["w"]) elif type == Dataset: result = ds else: raise ValueError else: raise ValueError if backend == "dask": return result.chunk() elif backend == "numpy": return result else: raise ValueError @pytest.fixture def byte_attrs_dataset(): """For testing issue #9407""" null_byte = b"\x00" other_bytes = bytes(range(1, 256)) ds = Dataset({"x": 1}, coords={"x_coord": [1]}) ds["x"].attrs["null_byte"] = null_byte ds["x"].attrs["other_bytes"] = other_bytes expected = ds.copy() expected["x"].attrs["null_byte"] = "" expected["x"].attrs["other_bytes"] = other_bytes.decode(errors="replace") return { "input": ds, "expected": expected, "h5netcdf_error": r"Invalid value provided for attribute .*: .*\. Null characters .*", } @pytest.fixture(scope="module") def create_test_datatree(): """ Create a test datatree with this structure: Group: / โ”‚ Dimensions: (y: 3, x: 2) โ”‚ Dimensions without coordinates: y, x โ”‚ Data variables: โ”‚ a (y) int64 24B 6 7 8 โ”‚ set0 (x) int64 16B 9 10 โ”œโ”€โ”€ Group: /set1 โ”‚ โ”‚ Dimensions: () โ”‚ โ”‚ Data variables: โ”‚ โ”‚ a int64 8B 0 โ”‚ โ”‚ b int64 8B 1 โ”‚ โ”œโ”€โ”€ Group: /set1/set1 โ”‚ โ””โ”€โ”€ Group: /set1/set2 โ”œโ”€โ”€ Group: /set2 โ”‚ โ”‚ Dimensions: (x: 2) โ”‚ โ”‚ Dimensions without coordinates: x โ”‚ โ”‚ Data variables: โ”‚ โ”‚ a (x) int64 16B 2 3 โ”‚ โ”‚ b (x) float64 16B 0.1 0.2 โ”‚ โ””โ”€โ”€ Group: /set2/set1 โ””โ”€โ”€ Group: /set3 The structure has deliberately repeated names of tags, variables, and dimensions in order to better check for bugs caused by name conflicts. """ def _create_test_datatree(modify=lambda ds: ds): set1_data = modify(xr.Dataset({"a": 0, "b": 1})) set2_data = modify(xr.Dataset({"a": ("x", [2, 3]), "b": ("x", [0.1, 0.2])})) root_data = modify(xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])})) root = DataTree.from_dict( { "/": root_data, "/set1": set1_data, "/set1/set1": None, "/set1/set2": None, "/set2": set2_data, "/set2/set1": None, "/set3": None, } ) return root return _create_test_datatree @pytest.fixture(scope="module") def simple_datatree(create_test_datatree): """ Invoke create_test_datatree fixture (callback). Returns a DataTree. """ return create_test_datatree() @pytest.fixture(params=["s", "ms", "us", "ns"]) def time_unit(request): return request.param xarray-2025.12.0/xarray/tests/data/000077500000000000000000000000001511464676000167245ustar00rootroot00000000000000xarray-2025.12.0/xarray/tests/data/bears.nc000066400000000000000000000022401511464676000203400ustar00rootroot00000000000000CDF ij bears_lenl history๎This is an example of a multi-line global\012attribute. It could be used for representing the\012processing history of the data, for example. 2017-12-12 15:55:12 GMT Hyrax-1.14.0 http://test.opendap.org/opendap/hyrax/data/nc/bears.nc.nc?DODS_EXTRA.Unlimited_Dimensionk i attr11attr21 2 3 4 i_1.attr3_117 i_1.attr3_2@3@7@;j bears acttext string\012\011123acsุaclBhacfภ?€acdฟ๐?่ string_lengthorder ,shot8aloanPcross0hl˜ @@€@ภindistinguishable@@@€@ @ภ@เShิฅ@@?0@@ B _ €xarray-2025.12.0/xarray/tests/data/example.grib000066400000000000000000000121601511464676000212240ustar00rootroot00000000000000GRIBฺแH ]J€0…]J€฿€]J€]J€%€dณ  FEะ€ )`P`P`P`PŠy€j€{๐7777GRIBฺแ H ]J€0…]J€฿€]J€]J€%€dณ  FG๔€ )˜@˜@˜@˜@ ึเิ`๏€7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dณ  FJŒ€ )] ] ] ] ่ภำ€ถ ำ@7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dLK@ GFฒ€ ) x x x xา(หเอห87777GRIBฺแ H ]J€0…]J€฿€]J€]J€%€dLK@ GG"€ )XXXXฬXลจส ศฐ7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dLK@ GEๆ€ )ีˆาxาะิ8    7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dณ  FFD€ )____‰Pwi๐y@7777GRIBฺแ H ]J€0…]J€฿€]J€]J€%€dณ  FH$€ )– – – – ั@ิ@๑ 7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dณ  FJ|€ )\เ\เ\เ\เ์`ำ`น`า@7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dLK@ GFย€ ) ัฐหpอ€ส๘7777GRIBฺแ H ]J€0…]J€฿€]J€]J€%€dLK@ GG'€ )8888ฬ8ลˆสXศจ7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dLK@ GEโ€ )ึHัฐา๘ิเ7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dณ  C|€)œ˜จ7777GRIBฺแ H ]J€0…]J€฿€]J€]J€%€dณ  C{€) œ˜˜7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dณ  C{€)œ ค˜7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dLK@ Ci€)ˆŒ˜7777GRIBฺแ H ]J€0…]J€฿€]J€]J€%€dLK@ Ci€)ˆ””Œ7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dLK@ Cg€)” ˜$$$$7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dณ  C|€)œ˜ค7777GRIBฺแ H ]J€0…]J€฿€]J€]J€%€dณ  C{€) œ ”7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dณ  Cz€)  คœ 7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dLK@ Ci€)ˆŒ˜7777GRIBฺแ H ]J€0…]J€฿€]J€]J€%€dLK@ Ci€)ˆ˜Œ7777GRIBฺแH ]J€0…]J€฿€]J€]J€%€dLK@ Cg€)” ”$$$$7777xarray-2025.12.0/xarray/tests/data/example.ict000066400000000000000000000014171511464676000210630ustar00rootroot0000000000000029, 1001 Henderson, Barron U.S. EPA Example file with artificial data JUST_A_TEST 1, 1 2018, 04, 27 2018, 04, 27 0 Start_UTC 5 1, 1, 1, 1, 1 -9999, -9999, -9999, -9999, -9999 lat, degrees_north lon, degrees_east elev, meters TEST_ppbv, ppbv TESTM_ppbv, ppbv 0 9 INDEPENDENT_VARIABLE_DEFINITION: Start_UTC INDEPENDENT_VARIABLE_UNITS: Start_UTC ULOD_FLAG: -7777 ULOD_VALUE: N/A LLOD_FLAG: -8888 LLOD_VALUE: N/A, N/A, N/A, N/A, 0.025 OTHER_COMMENTS: www-air.larc.nasa.gov/missions/etc/IcarttDataFormat.htm REVISION: R0 R0: No comments for this revision. Start_UTC, lat, lon, elev, TEST_ppbv, TESTM_ppbv 43200, 41.00000, -71.00000, 5, 1.2345, 2.220 46800, 42.00000, -72.00000, 15, 2.3456, -9999 50400, 42.00000, -73.00000, 20, 3.4567, -7777 50400, 42.00000, -74.00000, 25, 4.5678, -8888 xarray-2025.12.0/xarray/tests/data/example.uamiv000066400000000000000000000011401511464676000214160ustar00rootroot000000000000000A V E R A G E C A M x 5 . 4 0 T e s t P r o b l e m - - M e c h 6 C F C B 0 5 v 5 . 4 0 . m i d w e s t . 3 6 . 1 2 . jj?€0<ษA\ษส&G  G  <(O 3 (jj?€|O 3 ?€@@@@€@ @ภ@เAAA A0A@APA`ApA€AˆAA˜|xarray-2025.12.0/xarray/tests/data/example_1.nc000066400000000000000000000033101511464676000211160ustar00rootroot00000000000000CDF latlon leveltime sourceFictional Model Output temp  long_name temperatureunitscelsius rh  long_namerelative humidity valid_range?๐ศlat units degrees_northlon units degrees_east(คlevel units millibarsฬtime unitshours since 1996-1-1ฤ(2<`tŠ ฌฬำ็๑่Rผ๔|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐|๐?>Lฬอ>ฬฬอ>Lฬอ>™™š>Lฬอ>ฬฬอ??™š?333=ฬฬอ>™™š=ฬฬอ=ฬฬอ=ฬฬอ=ฬฬอ??333?Lฬอ?Lฬอ=ฬฬอ>Lฬอ>Lฬอ>Lฬอ>Lฬอ??333?Lฬอ?fff?fff=ฬฬอ>Lฬอ>™™š>™™š>™™š>™™š?333?Lฬอ?fff?fff=ฬฬอ>Lฬอ>ฬฬอ>ฬฬอ>ฬฬอ>ฬฬอ?333?fff?fff €xarray-2025.12.0/xarray/tests/data/example_1.nc.gz000066400000000000000000000007261511464676000215450ustar00rootroot00000000000000‹ฑ!Texample_2.ncํ”ฟKร@ว/๖‡ิVP(‚‹dt้P Bม6ƒาIjlฏํม5)w—‚เเ์ไฺNŽ ŽqrีYGGกพป™ดX๐hเรๅ๎}๏๛^%ู?hX!ษ Rิ0 Œบ๗=คcЇ˜"uI]Z>Žฆจ =ฒX+Z]6ค%ˆ๏นิ>๒˜ฺวสๅฅ^๙เภไึ๛๖+่y๊่6=ื$”kyน3WLญfว—[˜rp,)–_dŒ๕”&ฅ๙Oฎu†ก+dˆํ^ะ'm"ฮฃX~่Rาn2ื๋*u6Q?r>ฬ๊ๅ# ฿‰Z3ฝ›ญตป c๔|&zq฿‹พNœ9ฟy>ใƒ].ดว6„ngฮิ2๚9>น>ก”œนŒวตฌA(D๑ป`้žฮ๓(๖เเ6'^ ๅjuทT.•๕๐ห>Dฯ‡ถขัฐ7™LN\7ภฯภ+๐|‚อ;pฉ๎!ื๔ม‚ใภวU? Ÿ๊! วัhlๆ2ๆlŽฦNฅRฉ…QLŽI”โ์‘ิŒO‚)Mงำ‘ิ’๙ฬ๊ไOใWNฃดFWธด~;kว…ศxarray-2025.12.0/xarray/tests/indexes.py000066400000000000000000000045511511464676000200310ustar00rootroot00000000000000from collections.abc import Hashable, Iterable, Mapping, Sequence from typing import Any import numpy as np from xarray import Variable from xarray.core.indexes import Index, PandasIndex from xarray.core.types import Self class ScalarIndex(Index): def __init__(self, value: int): self.value = value @classmethod def from_variables(cls, variables, *, options) -> Self: var = next(iter(variables.values())) return cls(int(var.values)) def equals(self, other, *, exclude=None) -> bool: return isinstance(other, ScalarIndex) and other.value == self.value class XYIndex(Index): def __init__(self, x: PandasIndex, y: PandasIndex): self.x: PandasIndex = x self.y: PandasIndex = y @classmethod def from_variables(cls, variables, *, options): return cls( x=PandasIndex.from_variables({"x": variables["x"]}, options=options), y=PandasIndex.from_variables({"y": variables["y"]}, options=options), ) def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> dict[Any, Variable]: return self.x.create_variables() | self.y.create_variables() def equals(self, other, exclude=None): if exclude is None: exclude = frozenset() x_eq = True if self.x.dim in exclude else self.x.equals(other.x) y_eq = True if self.y.dim in exclude else self.y.equals(other.y) return x_eq and y_eq @classmethod def concat( cls, indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, ) -> Self: first = next(iter(indexes)) if dim == "x": newx = PandasIndex.concat( tuple(i.x for i in indexes), dim=dim, positions=positions ) newy = first.y elif dim == "y": newx = first.x newy = PandasIndex.concat( tuple(i.y for i in indexes), dim=dim, positions=positions ) return cls(x=newx, y=newy) def isel(self, indexers: Mapping[Any, int | slice | np.ndarray | Variable]) -> Self: newx = self.x.isel({"x": indexers.get("x", slice(None))}) newy = self.y.isel({"y": indexers.get("y", slice(None))}) assert newx is not None assert newy is not None return type(self)(newx, newy) xarray-2025.12.0/xarray/tests/namespace.py000066400000000000000000000002411511464676000203160ustar00rootroot00000000000000from xarray.core import duck_array_ops def reshape(array, shape, **kwargs): return type(array)(duck_array_ops.reshape(array.array, shape=shape, **kwargs)) xarray-2025.12.0/xarray/tests/test_accessor_dt.py000066400000000000000000000546561511464676000217350ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pandas as pd import pytest import xarray as xr from xarray.tests import ( _CFTIME_CALENDARS, _all_cftime_date_types, assert_allclose, assert_array_equal, assert_chunks_equal, assert_equal, assert_identical, raise_if_dask_computes, requires_cftime, requires_dask, ) class TestDatetimeAccessor: @pytest.fixture(autouse=True) def setup(self): nt = 100 data = np.random.rand(10, 10, nt) lons = np.linspace(0, 11, 10) lats = np.linspace(0, 20, 10) self.times = pd.date_range(start="2000/01/01", freq="h", periods=nt) self.data = xr.DataArray( data, coords=[lons, lats, self.times], dims=["lon", "lat", "time"], name="data", ) self.times_arr = np.random.choice(self.times, size=(10, 10, nt)) self.times_data = xr.DataArray( self.times_arr, coords=[lons, lats, self.times], dims=["lon", "lat", "time"], name="data", ) @pytest.mark.parametrize( "field", [ "year", "month", "day", "hour", "minute", "second", "microsecond", "nanosecond", "week", "weekofyear", "dayofweek", "weekday", "dayofyear", "quarter", "date", "time", "daysinmonth", "days_in_month", "is_month_start", "is_month_end", "is_quarter_start", "is_quarter_end", "is_year_start", "is_year_end", "is_leap_year", ], ) def test_field_access(self, field) -> None: if field in ["week", "weekofyear"]: data = self.times.isocalendar()["week"] else: data = getattr(self.times, field) if data.dtype.kind != "b" and field not in ("date", "time"): # pandas 2.0 returns int32 for integer fields now data = data.astype("int64") translations = { "weekday": "dayofweek", "daysinmonth": "days_in_month", "weekofyear": "week", } name = translations.get(field, field) expected = xr.DataArray(data, name=name, coords=[self.times], dims=["time"]) if field in ["week", "weekofyear"]: with pytest.warns( FutureWarning, match="dt.weekofyear and dt.week have been deprecated" ): actual = getattr(self.data.time.dt, field) else: actual = getattr(self.data.time.dt, field) assert not isinstance(actual.variable, xr.IndexVariable) assert expected.dtype == actual.dtype assert_identical(expected, actual) def test_total_seconds(self) -> None: # Subtract a value in the middle of the range to ensure that some values # are negative delta = self.data.time - np.datetime64("2000-01-03") actual = delta.dt.total_seconds() expected = xr.DataArray( np.arange(-48, 52, dtype=np.float64) * 3600, name="total_seconds", coords=[self.data.time], ) # This works with assert_identical when pandas is >=1.5.0. assert_allclose(expected, actual) @pytest.mark.parametrize( "field, pandas_field", [ ("year", "year"), ("week", "week"), ("weekday", "day"), ], ) def test_isocalendar(self, field, pandas_field) -> None: # pandas isocalendar has dtypy UInt32Dtype, convert to Int64 expected = pd.Index(getattr(self.times.isocalendar(), pandas_field).astype(int)) expected = xr.DataArray( expected, name=field, coords=[self.times], dims=["time"] ) actual = self.data.time.dt.isocalendar()[field] assert_equal(expected, actual) def test_calendar(self) -> None: cal = self.data.time.dt.calendar assert cal == "proleptic_gregorian" def test_strftime(self) -> None: assert ( "2000-01-01 01:00:00" == self.data.time.dt.strftime("%Y-%m-%d %H:%M:%S")[1] ) @requires_cftime @pytest.mark.parametrize( "calendar,expected", [("standard", 366), ("noleap", 365), ("360_day", 360), ("all_leap", 366)], ) def test_days_in_year(self, calendar, expected) -> None: assert ( self.data.convert_calendar(calendar, align_on="year").time.dt.days_in_year == expected ).all() def test_not_datetime_type(self) -> None: nontime_data = self.data.copy() int_data = np.arange(len(self.data.time)).astype("int8") nontime_data = nontime_data.assign_coords(time=int_data) with pytest.raises(AttributeError, match=r"dt"): _ = nontime_data.time.dt @pytest.mark.filterwarnings("ignore:dt.weekofyear and dt.week have been deprecated") @requires_dask @pytest.mark.parametrize( "field", [ "year", "month", "day", "hour", "minute", "second", "microsecond", "nanosecond", "week", "weekofyear", "dayofweek", "weekday", "dayofyear", "quarter", "date", "time", "is_month_start", "is_month_end", "is_quarter_start", "is_quarter_end", "is_year_start", "is_year_end", "is_leap_year", "days_in_year", ], ) def test_dask_field_access(self, field) -> None: import dask.array as da expected = getattr(self.times_data.dt, field) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = getattr(dask_times_2d.dt, field) assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual.compute(), expected.compute()) @requires_dask @pytest.mark.parametrize( "field", [ "year", "week", "weekday", ], ) def test_isocalendar_dask(self, field) -> None: import dask.array as da expected = getattr(self.times_data.dt.isocalendar(), field) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = dask_times_2d.dt.isocalendar()[field] assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual.compute(), expected.compute()) @requires_dask @pytest.mark.parametrize( "method, parameters", [ ("floor", "D"), ("ceil", "D"), ("round", "D"), ("strftime", "%Y-%m-%d %H:%M:%S"), ], ) def test_dask_accessor_method(self, method, parameters) -> None: import dask.array as da expected = getattr(self.times_data.dt, method)(parameters) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = getattr(dask_times_2d.dt, method)(parameters) assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual.compute(), expected.compute()) def test_seasons(self) -> None: dates = xr.date_range( start="2000/01/01", freq="ME", periods=12, use_cftime=False ) dates = dates.append(pd.Index([np.datetime64("NaT")])) dates = xr.DataArray(dates) seasons = xr.DataArray( [ "DJF", "DJF", "MAM", "MAM", "MAM", "JJA", "JJA", "JJA", "SON", "SON", "SON", "DJF", "nan", ] ) assert_array_equal(seasons.values, dates.dt.season.values) @pytest.mark.parametrize( "method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")] ) def test_accessor_method(self, method, parameters) -> None: dates = pd.date_range("2014-01-01", "2014-05-01", freq="h") xdates = xr.DataArray(dates, dims=["time"]) expected = getattr(dates, method)(parameters) actual = getattr(xdates.dt, method)(parameters) assert_array_equal(expected, actual) class TestTimedeltaAccessor: @pytest.fixture(autouse=True) def setup(self): nt = 100 data = np.random.rand(10, 10, nt) lons = np.linspace(0, 11, 10) lats = np.linspace(0, 20, 10) self.times = pd.timedelta_range(start="1 day", freq="6h", periods=nt) self.data = xr.DataArray( data, coords=[lons, lats, self.times], dims=["lon", "lat", "time"], name="data", ) self.times_arr = np.random.choice(self.times, size=(10, 10, nt)) self.times_data = xr.DataArray( self.times_arr, coords=[lons, lats, self.times], dims=["lon", "lat", "time"], name="data", ) def test_not_datetime_type(self) -> None: nontime_data = self.data.copy() int_data = np.arange(len(self.data.time)).astype("int8") nontime_data = nontime_data.assign_coords(time=int_data) with pytest.raises(AttributeError, match=r"dt"): _ = nontime_data.time.dt @pytest.mark.parametrize( "field", ["days", "seconds", "microseconds", "nanoseconds"] ) def test_field_access(self, field) -> None: expected = xr.DataArray( getattr(self.times, field), name=field, coords=[self.times], dims=["time"] ) actual = getattr(self.data.time.dt, field) assert_equal(expected, actual) @pytest.mark.parametrize( "method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")] ) def test_accessor_methods(self, method, parameters) -> None: dates = pd.timedelta_range(start="1 day", end="30 days", freq="6h") xdates = xr.DataArray(dates, dims=["time"]) expected = getattr(dates, method)(parameters) actual = getattr(xdates.dt, method)(parameters) assert_array_equal(expected, actual) @requires_dask @pytest.mark.parametrize( "field", ["days", "seconds", "microseconds", "nanoseconds"] ) def test_dask_field_access(self, field) -> None: import dask.array as da expected = getattr(self.times_data.dt, field) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = getattr(dask_times_2d.dt, field) assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual, expected) @requires_dask @pytest.mark.parametrize( "method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")] ) def test_dask_accessor_method(self, method, parameters) -> None: import dask.array as da expected = getattr(self.times_data.dt, method)(parameters) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = getattr(dask_times_2d.dt, method)(parameters) assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual.compute(), expected.compute()) _NT = 100 @pytest.fixture(params=_CFTIME_CALENDARS) def calendar(request): return request.param @pytest.fixture def cftime_date_type(calendar): if calendar == "standard": calendar = "proleptic_gregorian" return _all_cftime_date_types()[calendar] @pytest.fixture def times(calendar): import cftime return cftime.num2date( np.arange(_NT), units="hours since 2000-01-01", calendar=calendar, only_use_cftime_datetimes=True, ) @pytest.fixture def data(times): data = np.random.rand(10, 10, _NT) lons = np.linspace(0, 11, 10) lats = np.linspace(0, 20, 10) return xr.DataArray( data, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data" ) @pytest.fixture def times_3d(times): lons = np.linspace(0, 11, 10) lats = np.linspace(0, 20, 10) times_arr = np.random.choice(times, size=(10, 10, _NT)) return xr.DataArray( times_arr, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data" ) @requires_cftime @pytest.mark.parametrize( "field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"] ) def test_field_access(data, field) -> None: result = getattr(data.time.dt, field) expected = xr.DataArray( getattr(xr.coding.cftimeindex.CFTimeIndex(data.time.values), field), name=field, coords=data.time.coords, dims=data.time.dims, ) assert_equal(result, expected) @requires_cftime def test_calendar_cftime(data) -> None: expected = data.time.values[0].calendar assert data.time.dt.calendar == expected def test_calendar_datetime64_2d() -> None: data = xr.DataArray(np.zeros((4, 5), dtype="datetime64[ns]"), dims=("x", "y")) assert data.dt.calendar == "proleptic_gregorian" @requires_dask def test_calendar_datetime64_3d_dask() -> None: import dask.array as da data = xr.DataArray( da.zeros((4, 5, 6), dtype="datetime64[ns]"), dims=("x", "y", "z") ) with raise_if_dask_computes(): assert data.dt.calendar == "proleptic_gregorian" @requires_dask @requires_cftime def test_calendar_dask_cftime() -> None: from cftime import num2date # 3D lazy dask data = xr.DataArray( num2date( np.random.randint(1, 1000000, size=(4, 5, 6)), "hours since 1970-01-01T00:00", calendar="noleap", ), dims=("x", "y", "z"), ).chunk() with raise_if_dask_computes(max_computes=2): assert data.dt.calendar == "noleap" @requires_cftime def test_isocalendar_cftime(data) -> None: with pytest.raises( AttributeError, match=r"'CFTimeIndex' object has no attribute 'isocalendar'" ): data.time.dt.isocalendar() @requires_cftime def test_date_cftime(data) -> None: with pytest.raises( AttributeError, match=r"'CFTimeIndex' object has no attribute `date`. Consider using the floor method instead, for instance: `.time.dt.floor\('D'\)`.", ): data.time.dt.date() @requires_cftime @pytest.mark.filterwarnings("ignore::RuntimeWarning") def test_cftime_strftime_access(data) -> None: """compare cftime formatting against datetime formatting""" date_format = "%Y%m%d%H" result = data.time.dt.strftime(date_format) datetime_array = xr.DataArray( xr.coding.cftimeindex.CFTimeIndex(data.time.values).to_datetimeindex( time_unit="ns" ), name="stftime", coords=data.time.coords, dims=data.time.dims, ) expected = datetime_array.dt.strftime(date_format) assert_equal(result, expected) @requires_cftime @requires_dask @pytest.mark.parametrize( "field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"] ) def test_dask_field_access_1d(data, field) -> None: import dask.array as da expected = xr.DataArray( getattr(xr.coding.cftimeindex.CFTimeIndex(data.time.values), field), name=field, dims=["time"], ) times = xr.DataArray(data.time.values, dims=["time"]).chunk({"time": 50}) result = getattr(times.dt, field) assert isinstance(result.data, da.Array) assert result.chunks == times.chunks assert_equal(result.compute(), expected) @requires_cftime @requires_dask @pytest.mark.parametrize( "field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"] ) def test_dask_field_access(times_3d, data, field) -> None: import dask.array as da expected = xr.DataArray( getattr( xr.coding.cftimeindex.CFTimeIndex(times_3d.values.ravel()), field ).reshape(times_3d.shape), name=field, coords=times_3d.coords, dims=times_3d.dims, ) times_3d = times_3d.chunk({"lon": 5, "lat": 5, "time": 50}) result = getattr(times_3d.dt, field) assert isinstance(result.data, da.Array) assert result.chunks == times_3d.chunks assert_equal(result.compute(), expected) @requires_cftime def test_seasons(cftime_date_type) -> None: dates = xr.DataArray( np.array([cftime_date_type(2000, month, 15) for month in range(1, 13)]) ) seasons = xr.DataArray( [ "DJF", "DJF", "MAM", "MAM", "MAM", "JJA", "JJA", "JJA", "SON", "SON", "SON", "DJF", ] ) assert_array_equal(seasons.values, dates.dt.season.values) @pytest.fixture def cftime_rounding_dataarray(cftime_date_type): return xr.DataArray( [ [cftime_date_type(1, 1, 1, 1), cftime_date_type(1, 1, 1, 15)], [cftime_date_type(1, 1, 1, 23), cftime_date_type(1, 1, 2, 1)], ] ) @requires_cftime @requires_dask @pytest.mark.parametrize("use_dask", [False, True]) def test_cftime_floor_accessor( cftime_rounding_dataarray, cftime_date_type, use_dask ) -> None: import dask.array as da freq = "D" expected = xr.DataArray( [ [cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 1, 0)], [cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 2, 0)], ], name="floor", ) if use_dask: chunks = {"dim_0": 1} # Currently a compute is done to inspect a single value of the array # if it is of object dtype to check if it is a cftime.datetime (if not # we raise an error when using the dt accessor). with raise_if_dask_computes(max_computes=1): result = cftime_rounding_dataarray.chunk(chunks).dt.floor(freq) expected = expected.chunk(chunks) assert isinstance(result.data, da.Array) assert result.chunks == expected.chunks else: result = cftime_rounding_dataarray.dt.floor(freq) assert_identical(result, expected) @requires_cftime @requires_dask @pytest.mark.parametrize("use_dask", [False, True]) def test_cftime_ceil_accessor( cftime_rounding_dataarray, cftime_date_type, use_dask ) -> None: import dask.array as da freq = "D" expected = xr.DataArray( [ [cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 2, 0)], [cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 3, 0)], ], name="ceil", ) if use_dask: chunks = {"dim_0": 1} # Currently a compute is done to inspect a single value of the array # if it is of object dtype to check if it is a cftime.datetime (if not # we raise an error when using the dt accessor). with raise_if_dask_computes(max_computes=1): result = cftime_rounding_dataarray.chunk(chunks).dt.ceil(freq) expected = expected.chunk(chunks) assert isinstance(result.data, da.Array) assert result.chunks == expected.chunks else: result = cftime_rounding_dataarray.dt.ceil(freq) assert_identical(result, expected) @requires_cftime @requires_dask @pytest.mark.parametrize("use_dask", [False, True]) def test_cftime_round_accessor( cftime_rounding_dataarray, cftime_date_type, use_dask ) -> None: import dask.array as da freq = "D" expected = xr.DataArray( [ [cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 2, 0)], [cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 2, 0)], ], name="round", ) if use_dask: chunks = {"dim_0": 1} # Currently a compute is done to inspect a single value of the array # if it is of object dtype to check if it is a cftime.datetime (if not # we raise an error when using the dt accessor). with raise_if_dask_computes(max_computes=1): result = cftime_rounding_dataarray.chunk(chunks).dt.round(freq) expected = expected.chunk(chunks) assert isinstance(result.data, da.Array) assert result.chunks == expected.chunks else: result = cftime_rounding_dataarray.dt.round(freq) assert_identical(result, expected) @pytest.mark.parametrize( "use_cftime", [False, pytest.param(True, marks=requires_cftime)], ids=lambda x: f"use_cftime={x}", ) @pytest.mark.parametrize( "use_dask", [False, pytest.param(True, marks=requires_dask)], ids=lambda x: f"use_dask={x}", ) def test_decimal_year(use_cftime, use_dask) -> None: year = 2000 periods = 10 freq = "h" shape = (2, 5) dims = ["x", "y"] hours_in_year = 24 * 366 times = xr.date_range(f"{year}", periods=periods, freq=freq, use_cftime=use_cftime) da = xr.DataArray(times.values.reshape(shape), dims=dims) if use_dask: da = da.chunk({"y": 2}) # Computing the decimal year for a cftime datetime array requires a # number of small computes (6): # - 4x one compute per .dt accessor call (requires inspecting one # object-dtype array element to see if it is time-like) # - 2x one compute per calendar inference (requires inspecting one # array element to read off the calendar) max_computes = 6 * use_cftime with raise_if_dask_computes(max_computes=max_computes): result = da.dt.decimal_year else: result = da.dt.decimal_year expected = xr.DataArray( year + np.arange(periods).reshape(shape) / hours_in_year, dims=dims ) xr.testing.assert_equal(result, expected) xarray-2025.12.0/xarray/tests/test_accessor_str.py000066400000000000000000003560001511464676000221220ustar00rootroot00000000000000# Tests for the `str` accessor are derived from the original # pandas string accessor tests. # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import re from collections.abc import Callable import numpy as np import pytest import xarray as xr from xarray.tests import assert_equal, assert_identical, requires_dask @pytest.fixture( params=[pytest.param(np.str_, id="str"), pytest.param(np.bytes_, id="bytes")] ) def dtype(request): return request.param @requires_dask def test_dask() -> None: import dask.array as da arr = da.from_array(["a", "b", "c"], chunks=-1) xarr = xr.DataArray(arr) result = xarr.str.len().compute() expected = xr.DataArray([1, 1, 1]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_count(dtype) -> None: values = xr.DataArray(["foo", "foofoo", "foooofooofommmfoo"]).astype(dtype) pat_str = dtype(r"f[o]+") pat_re = re.compile(pat_str) result_str = values.str.count(pat_str) result_re = values.str.count(pat_re) expected = xr.DataArray([1, 2, 4]) assert result_str.dtype == expected.dtype assert result_re.dtype == expected.dtype assert_equal(result_str, expected) assert_equal(result_re, expected) def test_count_broadcast(dtype) -> None: values = xr.DataArray(["foo", "foofoo", "foooofooofommmfoo"]).astype(dtype) pat_str = np.array([r"f[o]+", r"o", r"m"]).astype(dtype) pat_re = np.array([re.compile(x) for x in pat_str]) result_str = values.str.count(pat_str) result_re = values.str.count(pat_re) expected = xr.DataArray([1, 4, 3]) assert result_str.dtype == expected.dtype assert result_re.dtype == expected.dtype assert_equal(result_str, expected) assert_equal(result_re, expected) def test_contains(dtype) -> None: values = xr.DataArray(["Foo", "xYz", "fOOomMm__fOo", "MMM_"]).astype(dtype) # case insensitive using regex pat = values.dtype.type("FOO|mmm") result = values.str.contains(pat, case=False) expected = xr.DataArray([True, False, True, True]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.contains(re.compile(pat, flags=re.IGNORECASE)) assert result.dtype == expected.dtype assert_equal(result, expected) # case sensitive using regex pat = values.dtype.type("Foo|mMm") result = values.str.contains(pat) expected = xr.DataArray([True, False, True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.contains(re.compile(pat)) assert result.dtype == expected.dtype assert_equal(result, expected) # case insensitive without regex result = values.str.contains("foo", regex=False, case=False) expected = xr.DataArray([True, False, True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) # case sensitive without regex result = values.str.contains("fO", regex=False, case=True) expected = xr.DataArray([False, False, True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) # regex regex=False pat_re = re.compile("(/w+)") with pytest.raises( ValueError, match=r"Must use regular expression matching for regular expression object.", ): values.str.contains(pat_re, regex=False) def test_contains_broadcast(dtype) -> None: values = xr.DataArray(["Foo", "xYz", "fOOomMm__fOo", "MMM_"], dims="X").astype( dtype ) pat_str = xr.DataArray(["FOO|mmm", "Foo", "MMM"], dims="Y").astype(dtype) pat_re = xr.DataArray([re.compile(x) for x in pat_str.data], dims="Y") # case insensitive using regex result = values.str.contains(pat_str, case=False) expected = xr.DataArray( [ [True, True, False], [False, False, False], [True, True, True], [True, False, True], ], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) # case sensitive using regex result = values.str.contains(pat_str) expected = xr.DataArray( [ [False, True, False], [False, False, False], [False, False, False], [False, False, True], ], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.contains(pat_re) assert result.dtype == expected.dtype assert_equal(result, expected) # case insensitive without regex result = values.str.contains(pat_str, regex=False, case=False) expected = xr.DataArray( [ [False, True, False], [False, False, False], [False, True, True], [False, False, True], ], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) # case insensitive with regex result = values.str.contains(pat_str, regex=False, case=True) expected = xr.DataArray( [ [False, True, False], [False, False, False], [False, False, False], [False, False, True], ], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) def test_starts_ends_with(dtype) -> None: values = xr.DataArray(["om", "foo_nom", "nom", "bar_foo", "foo"]).astype(dtype) result = values.str.startswith("foo") expected = xr.DataArray([False, True, False, False, True]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.endswith("foo") expected = xr.DataArray([False, False, False, True, True]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_starts_ends_with_broadcast(dtype) -> None: values = xr.DataArray( ["om", "foo_nom", "nom", "bar_foo", "foo_bar"], dims="X" ).astype(dtype) pat = xr.DataArray(["foo", "bar"], dims="Y").astype(dtype) result = values.str.startswith(pat) expected = xr.DataArray( [[False, False], [True, False], [False, False], [False, True], [True, False]], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.endswith(pat) expected = xr.DataArray( [[False, False], [False, False], [False, False], [True, False], [False, True]], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) def test_case_bytes() -> None: value = xr.DataArray(["SOme wOrd"]).astype(np.bytes_) exp_capitalized = xr.DataArray(["Some word"]).astype(np.bytes_) exp_lowered = xr.DataArray(["some word"]).astype(np.bytes_) exp_swapped = xr.DataArray(["soME WoRD"]).astype(np.bytes_) exp_titled = xr.DataArray(["Some Word"]).astype(np.bytes_) exp_uppered = xr.DataArray(["SOME WORD"]).astype(np.bytes_) res_capitalized = value.str.capitalize() res_lowered = value.str.lower() res_swapped = value.str.swapcase() res_titled = value.str.title() res_uppered = value.str.upper() assert res_capitalized.dtype == exp_capitalized.dtype assert res_lowered.dtype == exp_lowered.dtype assert res_swapped.dtype == exp_swapped.dtype assert res_titled.dtype == exp_titled.dtype assert res_uppered.dtype == exp_uppered.dtype assert_equal(res_capitalized, exp_capitalized) assert_equal(res_lowered, exp_lowered) assert_equal(res_swapped, exp_swapped) assert_equal(res_titled, exp_titled) assert_equal(res_uppered, exp_uppered) def test_case_str() -> None: # This string includes some unicode characters # that are common case management corner cases value = xr.DataArray(["SOme wOrd ว„ รŸ แพ› ฮฃฮฃ ๏ฌƒโตร… ร‡ โ… "]).astype(np.str_) exp_capitalized = xr.DataArray(["Some word ว† รŸ แพ“ ฯƒฯ‚ ๏ฌƒโตรฅ รง โ…ฐ"]).astype(np.str_) exp_lowered = xr.DataArray(["some word ว† รŸ แพ“ ฯƒฯ‚ ๏ฌƒโตรฅ รง โ…ฐ"]).astype(np.str_) exp_swapped = xr.DataArray(["soME WoRD ว† SS แพ› ฯƒฯ‚ FFIโตรฅ รง โ…ฐ"]).astype(np.str_) exp_titled = xr.DataArray(["Some Word ว… Ss แพ› ฮฃฯ‚ Ffiโตร… ร‡ โ… "]).astype(np.str_) exp_uppered = xr.DataArray(["SOME WORD ว„ SS แผซฮ™ ฮฃฮฃ FFIโตร… ร‡ โ… "]).astype(np.str_) exp_casefolded = xr.DataArray(["some word ว† ss แผฃฮน ฯƒฯƒ ffiโตรฅ รง โ…ฐ"]).astype(np.str_) exp_norm_nfc = xr.DataArray(["SOme wOrd ว„ รŸ แพ› ฮฃฮฃ ๏ฌƒโตร… ร‡ โ… "]).astype(np.str_) exp_norm_nfkc = xr.DataArray(["SOme wOrd Dลฝ รŸ แพ› ฮฃฮฃ ffi5ร… ร‡ I"]).astype(np.str_) exp_norm_nfd = xr.DataArray(["SOme wOrd ว„ รŸ ฮ—ฬ”ฬ€อ… ฮฃฮฃ ๏ฌƒโตAฬŠ Cฬง โ… "]).astype(np.str_) exp_norm_nfkd = xr.DataArray(["SOme wOrd DZฬŒ รŸ ฮ—ฬ”ฬ€อ… ฮฃฮฃ ffi5AฬŠ Cฬง I"]).astype(np.str_) res_capitalized = value.str.capitalize() res_casefolded = value.str.casefold() res_lowered = value.str.lower() res_swapped = value.str.swapcase() res_titled = value.str.title() res_uppered = value.str.upper() res_norm_nfc = value.str.normalize("NFC") res_norm_nfd = value.str.normalize("NFD") res_norm_nfkc = value.str.normalize("NFKC") res_norm_nfkd = value.str.normalize("NFKD") assert res_capitalized.dtype == exp_capitalized.dtype assert res_casefolded.dtype == exp_casefolded.dtype assert res_lowered.dtype == exp_lowered.dtype assert res_swapped.dtype == exp_swapped.dtype assert res_titled.dtype == exp_titled.dtype assert res_uppered.dtype == exp_uppered.dtype assert res_norm_nfc.dtype == exp_norm_nfc.dtype assert res_norm_nfd.dtype == exp_norm_nfd.dtype assert res_norm_nfkc.dtype == exp_norm_nfkc.dtype assert res_norm_nfkd.dtype == exp_norm_nfkd.dtype assert_equal(res_capitalized, exp_capitalized) assert_equal(res_casefolded, exp_casefolded) assert_equal(res_lowered, exp_lowered) assert_equal(res_swapped, exp_swapped) assert_equal(res_titled, exp_titled) assert_equal(res_uppered, exp_uppered) assert_equal(res_norm_nfc, exp_norm_nfc) assert_equal(res_norm_nfd, exp_norm_nfd) assert_equal(res_norm_nfkc, exp_norm_nfkc) assert_equal(res_norm_nfkd, exp_norm_nfkd) def test_replace(dtype) -> None: values = xr.DataArray(["fooBAD__barBAD"], dims=["x"]).astype(dtype) result = values.str.replace("BAD[_]*", "") expected = xr.DataArray(["foobar"], dims=["x"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace("BAD[_]*", "", n=1) expected = xr.DataArray(["foobarBAD"], dims=["x"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) pat = xr.DataArray(["BAD[_]*", "AD[_]*"], dims=["y"]).astype(dtype) result = values.str.replace(pat, "") expected = xr.DataArray([["foobar", "fooBbarB"]], dims=["x", "y"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) repl = xr.DataArray(["", "spam"], dims=["y"]).astype(dtype) result = values.str.replace(pat, repl, n=1) expected = xr.DataArray([["foobarBAD", "fooBspambarBAD"]], dims=["x", "y"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected) values = xr.DataArray( ["A", "B", "C", "Aaba", "Baca", "", "CABA", "dog", "cat"] ).astype(dtype) expected = xr.DataArray( ["YYY", "B", "C", "YYYaba", "Baca", "", "CYYYBYYY", "dog", "cat"] ).astype(dtype) result = values.str.replace("A", "YYY") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace("A", "YYY", regex=False) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace("A", "YYY", case=False) expected = xr.DataArray( ["YYY", "B", "C", "YYYYYYbYYY", "BYYYcYYY", "", "CYYYBYYY", "dog", "cYYYt"] ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace("^.a|dog", "XX-XX ", case=False) expected = xr.DataArray( ["A", "B", "C", "XX-XX ba", "XX-XX ca", "", "XX-XX BA", "XX-XX ", "XX-XX t"] ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_replace_callable() -> None: values = xr.DataArray(["fooBAD__barBAD"]) # test with callable repl = lambda m: m.group(0).swapcase() result = values.str.replace("[a-z][A-Z]{2}", repl, n=2) exp = xr.DataArray(["foObaD__baRbaD"]) assert result.dtype == exp.dtype assert_equal(result, exp) # test regex named groups values = xr.DataArray(["Foo Bar Baz"]) pat = r"(?P\w+) (?P\w+) (?P\w+)" repl = lambda m: m.group("middle").swapcase() result = values.str.replace(pat, repl) exp = xr.DataArray(["bAR"]) assert result.dtype == exp.dtype assert_equal(result, exp) # test broadcast values = xr.DataArray(["Foo Bar Baz"], dims=["x"]) pat = r"(?P\w+) (?P\w+) (?P\w+)" repl2 = xr.DataArray( [ lambda m: m.group("first").swapcase(), lambda m: m.group("middle").swapcase(), lambda m: m.group("last").swapcase(), ], dims=["Y"], ) result = values.str.replace(pat, repl2) exp = xr.DataArray([["fOO", "bAR", "bAZ"]], dims=["x", "Y"]) assert result.dtype == exp.dtype assert_equal(result, exp) def test_replace_unicode() -> None: # flags + unicode values = xr.DataArray([b"abcd,\xc3\xa0".decode("utf-8")]) expected = xr.DataArray([b"abcd, \xc3\xa0".decode("utf-8")]) pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE) result = values.str.replace(pat, ", ") assert result.dtype == expected.dtype assert_equal(result, expected) # broadcast version values = xr.DataArray([b"abcd,\xc3\xa0".decode("utf-8")], dims=["X"]) expected = xr.DataArray( [[b"abcd, \xc3\xa0".decode("utf-8"), b"BAcd,\xc3\xa0".decode("utf-8")]], dims=["X", "Y"], ) pat2 = xr.DataArray( [re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE), r"ab"], dims=["Y"] ) repl = xr.DataArray([", ", "BA"], dims=["Y"]) result = values.str.replace(pat2, repl) assert result.dtype == expected.dtype assert_equal(result, expected) def test_replace_compiled_regex(dtype) -> None: values = xr.DataArray(["fooBAD__barBAD"], dims=["x"]).astype(dtype) # test with compiled regex pat = re.compile(dtype("BAD[_]*")) result = values.str.replace(pat, "") expected = xr.DataArray(["foobar"], dims=["x"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace(pat, "", n=1) expected = xr.DataArray(["foobarBAD"], dims=["x"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) # broadcast pat2 = xr.DataArray( [re.compile(dtype("BAD[_]*")), re.compile(dtype("AD[_]*"))], dims=["y"] ) result = values.str.replace(pat2, "") expected = xr.DataArray([["foobar", "fooBbarB"]], dims=["x", "y"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) repl = xr.DataArray(["", "spam"], dims=["y"]).astype(dtype) result = values.str.replace(pat2, repl, n=1) expected = xr.DataArray([["foobarBAD", "fooBspambarBAD"]], dims=["x", "y"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected) # case and flags provided to str.replace will have no effect # and will produce warnings values = xr.DataArray(["fooBAD__barBAD__bad"]).astype(dtype) pat3 = re.compile(dtype("BAD[_]*")) with pytest.raises( ValueError, match=r"Flags cannot be set when pat is a compiled regex." ): result = values.str.replace(pat3, "", flags=re.IGNORECASE) with pytest.raises( ValueError, match=r"Case cannot be set when pat is a compiled regex." ): result = values.str.replace(pat3, "", case=False) with pytest.raises( ValueError, match=r"Case cannot be set when pat is a compiled regex." ): result = values.str.replace(pat3, "", case=True) # test with callable values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype) repl2 = lambda m: m.group(0).swapcase() pat4 = re.compile(dtype("[a-z][A-Z]{2}")) result = values.str.replace(pat4, repl2, n=2) expected = xr.DataArray(["foObaD__baRbaD"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_replace_literal(dtype) -> None: # GH16808 literal replace (regex=False vs regex=True) values = xr.DataArray(["f.o", "foo"], dims=["X"]).astype(dtype) expected = xr.DataArray(["bao", "bao"], dims=["X"]).astype(dtype) result = values.str.replace("f.", "ba") assert result.dtype == expected.dtype assert_equal(result, expected) expected = xr.DataArray(["bao", "foo"], dims=["X"]).astype(dtype) result = values.str.replace("f.", "ba", regex=False) assert result.dtype == expected.dtype assert_equal(result, expected) # Broadcast pat = xr.DataArray(["f.", ".o"], dims=["yy"]).astype(dtype) expected = xr.DataArray([["bao", "fba"], ["bao", "bao"]], dims=["X", "yy"]).astype( dtype ) result = values.str.replace(pat, "ba") assert result.dtype == expected.dtype assert_equal(result, expected) expected = xr.DataArray([["bao", "fba"], ["foo", "foo"]], dims=["X", "yy"]).astype( dtype ) result = values.str.replace(pat, "ba", regex=False) assert result.dtype == expected.dtype assert_equal(result, expected) # Cannot do a literal replace if given a callable repl or compiled # pattern callable_repl = lambda m: m.group(0).swapcase() compiled_pat = re.compile("[a-z][A-Z]{2}") msg = "Cannot use a callable replacement when regex=False" with pytest.raises(ValueError, match=msg): values.str.replace("abc", callable_repl, regex=False) msg = "Cannot use a compiled regex as replacement pattern with regex=False" with pytest.raises(ValueError, match=msg): values.str.replace(compiled_pat, "", regex=False) def test_extract_extractall_findall_empty_raises(dtype) -> None: pat_str = dtype(r".*") pat_re = re.compile(pat_str) value = xr.DataArray([["a"]], dims=["X", "Y"]).astype(dtype) with pytest.raises(ValueError, match=r"No capture groups found in pattern."): value.str.extract(pat=pat_str, dim="ZZ") with pytest.raises(ValueError, match=r"No capture groups found in pattern."): value.str.extract(pat=pat_re, dim="ZZ") with pytest.raises(ValueError, match=r"No capture groups found in pattern."): value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") with pytest.raises(ValueError, match=r"No capture groups found in pattern."): value.str.extractall(pat=pat_re, group_dim="XX", match_dim="YY") with pytest.raises(ValueError, match=r"No capture groups found in pattern."): value.str.findall(pat=pat_str) with pytest.raises(ValueError, match=r"No capture groups found in pattern."): value.str.findall(pat=pat_re) def test_extract_multi_None_raises(dtype) -> None: pat_str = r"(\w+)_(\d+)" pat_re = re.compile(pat_str) value = xr.DataArray([["a_b"]], dims=["X", "Y"]).astype(dtype) with pytest.raises( ValueError, match=r"Dimension must be specified if more than one capture group is given.", ): value.str.extract(pat=pat_str, dim=None) with pytest.raises( ValueError, match=r"Dimension must be specified if more than one capture group is given.", ): value.str.extract(pat=pat_re, dim=None) def test_extract_extractall_findall_case_re_raises(dtype) -> None: pat_str = r".*" pat_re = re.compile(pat_str) value = xr.DataArray([["a"]], dims=["X", "Y"]).astype(dtype) with pytest.raises( ValueError, match=r"Case cannot be set when pat is a compiled regex." ): value.str.extract(pat=pat_re, case=True, dim="ZZ") with pytest.raises( ValueError, match=r"Case cannot be set when pat is a compiled regex." ): value.str.extract(pat=pat_re, case=False, dim="ZZ") with pytest.raises( ValueError, match=r"Case cannot be set when pat is a compiled regex." ): value.str.extractall(pat=pat_re, case=True, group_dim="XX", match_dim="YY") with pytest.raises( ValueError, match=r"Case cannot be set when pat is a compiled regex." ): value.str.extractall(pat=pat_re, case=False, group_dim="XX", match_dim="YY") with pytest.raises( ValueError, match=r"Case cannot be set when pat is a compiled regex." ): value.str.findall(pat=pat_re, case=True) with pytest.raises( ValueError, match=r"Case cannot be set when pat is a compiled regex." ): value.str.findall(pat=pat_re, case=False) def test_extract_extractall_name_collision_raises(dtype) -> None: pat_str = r"(\w+)" pat_re = re.compile(pat_str) value = xr.DataArray([["a"]], dims=["X", "Y"]).astype(dtype) with pytest.raises(KeyError, match=r"Dimension 'X' already present in DataArray."): value.str.extract(pat=pat_str, dim="X") with pytest.raises(KeyError, match=r"Dimension 'X' already present in DataArray."): value.str.extract(pat=pat_re, dim="X") with pytest.raises( KeyError, match=r"Group dimension 'X' already present in DataArray." ): value.str.extractall(pat=pat_str, group_dim="X", match_dim="ZZ") with pytest.raises( KeyError, match=r"Group dimension 'X' already present in DataArray." ): value.str.extractall(pat=pat_re, group_dim="X", match_dim="YY") with pytest.raises( KeyError, match=r"Match dimension 'Y' already present in DataArray." ): value.str.extractall(pat=pat_str, group_dim="XX", match_dim="Y") with pytest.raises( KeyError, match=r"Match dimension 'Y' already present in DataArray." ): value.str.extractall(pat=pat_re, group_dim="XX", match_dim="Y") with pytest.raises( KeyError, match=r"Group dimension 'ZZ' is the same as match dimension 'ZZ'." ): value.str.extractall(pat=pat_str, group_dim="ZZ", match_dim="ZZ") with pytest.raises( KeyError, match=r"Group dimension 'ZZ' is the same as match dimension 'ZZ'." ): value.str.extractall(pat=pat_re, group_dim="ZZ", match_dim="ZZ") def test_extract_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) targ_none = xr.DataArray( [["a", "bab", "abc"], ["abcd", "", "abcdef"]], dims=["X", "Y"] ).astype(dtype) targ_dim = xr.DataArray( [[["a"], ["bab"], ["abc"]], [["abcd"], [""], ["abcdef"]]], dims=["X", "Y", "XX"] ).astype(dtype) res_str_none = value.str.extract(pat=pat_str, dim=None) res_str_dim = value.str.extract(pat=pat_str, dim="XX") res_str_none_case = value.str.extract(pat=pat_str, dim=None, case=True) res_str_dim_case = value.str.extract(pat=pat_str, dim="XX", case=True) res_re_none = value.str.extract(pat=pat_compiled, dim=None) res_re_dim = value.str.extract(pat=pat_compiled, dim="XX") assert res_str_none.dtype == targ_none.dtype assert res_str_dim.dtype == targ_dim.dtype assert res_str_none_case.dtype == targ_none.dtype assert res_str_dim_case.dtype == targ_dim.dtype assert res_re_none.dtype == targ_none.dtype assert res_re_dim.dtype == targ_dim.dtype assert_equal(res_str_none, targ_none) assert_equal(res_str_dim, targ_dim) assert_equal(res_str_none_case, targ_none) assert_equal(res_str_dim_case, targ_dim) assert_equal(res_re_none, targ_none) assert_equal(res_re_dim, targ_dim) def test_extract_single_nocase(dtype) -> None: pat_str = r"(\w+)?_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "_Xy_1", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) targ_none = xr.DataArray( [["a", "ab", "abc"], ["abcd", "", "abcdef"]], dims=["X", "Y"] ).astype(dtype) targ_dim = xr.DataArray( [[["a"], ["ab"], ["abc"]], [["abcd"], [""], ["abcdef"]]], dims=["X", "Y", "XX"] ).astype(dtype) res_str_none = value.str.extract(pat=pat_str, dim=None, case=False) res_str_dim = value.str.extract(pat=pat_str, dim="XX", case=False) res_re_none = value.str.extract(pat=pat_compiled, dim=None) res_re_dim = value.str.extract(pat=pat_compiled, dim="XX") assert res_re_dim.dtype == targ_none.dtype assert res_str_dim.dtype == targ_dim.dtype assert res_re_none.dtype == targ_none.dtype assert res_re_dim.dtype == targ_dim.dtype assert_equal(res_str_none, targ_none) assert_equal(res_str_dim, targ_dim) assert_equal(res_re_none, targ_none) assert_equal(res_re_dim, targ_dim) def test_extract_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [["a", "0"], ["bab", "110"], ["abc", "01"]], [["abcd", ""], ["", ""], ["abcdef", "101"]], ], dims=["X", "Y", "XX"], ).astype(dtype) res_str = value.str.extract(pat=pat_str, dim="XX") res_re = value.str.extract(pat=pat_compiled, dim="XX") res_str_case = value.str.extract(pat=pat_str, dim="XX", case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extract_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [["a", "0"], ["ab", "10"], ["abc", "01"]], [["abcd", ""], ["", ""], ["abcdef", "101"]], ], dims=["X", "Y", "XX"], ).astype(dtype) res_str = value.str.extract(pat=pat_str, dim="XX", case=False) res_re = value.str.extract(pat=pat_compiled, dim="XX") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extract_broadcast(dtype) -> None: value = xr.DataArray( ["a_Xy_0", "ab_xY_10", "abc_Xy_01"], dims=["X"], ).astype(dtype) pat_str = xr.DataArray( [r"(\w+)_Xy_(\d*)", r"(\w+)_xY_(\d*)"], dims=["Y"], ).astype(dtype) pat_compiled = value.str._re_compile(pat=pat_str) expected_list = [ [["a", "0"], ["", ""]], [["", ""], ["ab", "10"]], [["abc", "01"], ["", ""]], ] expected = xr.DataArray(expected_list, dims=["X", "Y", "Zz"]).astype(dtype) res_str = value.str.extract(pat=pat_str, dim="Zz") res_re = value.str.extract(pat=pat_compiled, dim="Zz") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_single_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [[[["a"]], [[""]], [["abc"]]], [[["abcd"]], [[""]], [["abcdef"]]]], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") res_str_case = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=True ) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extractall_single_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [[[["a"]], [["ab"]], [["abc"]]], [[["abcd"]], [[""]], [["abcdef"]]]], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=False ) res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_single_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [[["a"], [""], [""]], [["bab"], ["baab"], [""]], [["abc"], ["cbc"], [""]]], [ [["abcd"], ["dcd"], ["dccd"]], [[""], [""], [""]], [["abcdef"], ["fef"], [""]], ], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") res_str_case = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=True ) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extractall_single_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [ [["a"], [""], [""]], [["ab"], ["bab"], ["baab"]], [["abc"], ["cbc"], [""]], ], [ [["abcd"], ["dcd"], ["dccd"]], [[""], [""], [""]], [["abcdef"], ["fef"], [""]], ], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=False ) res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_multi_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [[["a", "0"]], [["", ""]], [["abc", "01"]]], [[["abcd", ""]], [["", ""]], [["abcdef", "101"]]], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") res_str_case = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=True ) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extractall_multi_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [[["a", "0"]], [["ab", "10"]], [["abc", "01"]]], [[["abcd", ""]], [["", ""]], [["abcdef", "101"]]], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=False ) res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_multi_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [ [["a", "0"], ["", ""], ["", ""]], [["bab", "110"], ["baab", "1100"], ["", ""]], [["abc", "01"], ["cbc", "2210"], ["", ""]], ], [ [["abcd", ""], ["dcd", "33210"], ["dccd", "332210"]], [["", ""], ["", ""], ["", ""]], [["abcdef", "101"], ["fef", "5543210"], ["", ""]], ], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") res_str_case = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=True ) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extractall_multi_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [ [["a", "0"], ["", ""], ["", ""]], [["ab", "10"], ["bab", "110"], ["baab", "1100"]], [["abc", "01"], ["cbc", "2210"], ["", ""]], ], [ [["abcd", ""], ["dcd", "33210"], ["dccd", "332210"]], [["", ""], ["", ""], ["", ""]], [["abcdef", "101"], ["fef", "5543210"], ["", ""]], ], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=False ) res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_broadcast(dtype) -> None: value = xr.DataArray( ["a_Xy_0", "ab_xY_10", "abc_Xy_01"], dims=["X"], ).astype(dtype) pat_str = xr.DataArray( [r"(\w+)_Xy_(\d*)", r"(\w+)_xY_(\d*)"], dims=["Y"], ).astype(dtype) pat_re = value.str._re_compile(pat=pat_str) expected_list = [ [[["a", "0"]], [["", ""]]], [[["", ""]], [["ab", "10"]]], [[["abc", "01"]], [["", ""]]], ] expected = xr.DataArray(expected_list, dims=["X", "Y", "ZX", "ZY"]).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="ZX", match_dim="ZY") res_re = value.str.extractall(pat=pat_re, group_dim="ZX", match_dim="ZY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_single_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str)) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list]] = [[["a"], [], ["abc"]], [["abcd"], [], ["abcdef"]]] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) res_str_case = value.str.findall(pat=pat_str, case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_findall_single_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list]] = [ [["a"], ["ab"], ["abc"]], [["abcd"], [], ["abcdef"]], ] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str, case=False) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_single_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str)) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list]] = [ [["a"], ["bab", "baab"], ["abc", "cbc"]], [ ["abcd", "dcd", "dccd"], [], ["abcdef", "fef"], ], ] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) res_str_case = value.str.findall(pat=pat_str, case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_findall_single_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list]] = [ [ ["a"], ["ab", "bab", "baab"], ["abc", "cbc"], ], [ ["abcd", "dcd", "dccd"], [], ["abcdef", "fef"], ], ] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str, case=False) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_multi_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str)) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list[list]]] = [ [[["a", "0"]], [], [["abc", "01"]]], [[["abcd", ""]], [], [["abcdef", "101"]]], ] expected_dtype = [ [[tuple(dtype(x) for x in y) for y in z] for z in w] for w in expected_list ] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) res_str_case = value.str.findall(pat=pat_str, case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_findall_multi_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list[list]]] = [ [[["a", "0"]], [["ab", "10"]], [["abc", "01"]]], [[["abcd", ""]], [], [["abcdef", "101"]]], ] expected_dtype = [ [[tuple(dtype(x) for x in y) for y in z] for z in w] for w in expected_list ] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str, case=False) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_multi_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str)) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list[list]]] = [ [ [["a", "0"]], [["bab", "110"], ["baab", "1100"]], [["abc", "01"], ["cbc", "2210"]], ], [ [["abcd", ""], ["dcd", "33210"], ["dccd", "332210"]], [], [["abcdef", "101"], ["fef", "5543210"]], ], ] expected_dtype = [ [[tuple(dtype(x) for x in y) for y in z] for z in w] for w in expected_list ] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) res_str_case = value.str.findall(pat=pat_str, case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_findall_multi_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list[list]]] = [ [ [["a", "0"]], [["ab", "10"], ["bab", "110"], ["baab", "1100"]], [["abc", "01"], ["cbc", "2210"]], ], [ [["abcd", ""], ["dcd", "33210"], ["dccd", "332210"]], [], [["abcdef", "101"], ["fef", "5543210"]], ], ] expected_dtype = [ [[tuple(dtype(x) for x in y) for y in z] for z in w] for w in expected_list ] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str, case=False) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_broadcast(dtype) -> None: value = xr.DataArray( ["a_Xy_0", "ab_xY_10", "abc_Xy_01"], dims=["X"], ).astype(dtype) pat_str = xr.DataArray( [r"(\w+)_Xy_\d*", r"\w+_Xy_(\d*)"], dims=["Y"], ).astype(dtype) pat_re = value.str._re_compile(pat=pat_str) expected_list: list[list[list]] = [[["a"], ["0"]], [[], []], [["abc"], ["01"]]] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_repeat(dtype) -> None: values = xr.DataArray(["a", "b", "c", "d"]).astype(dtype) result = values.str.repeat(3) result_mul = values.str * 3 expected = xr.DataArray(["aaa", "bbb", "ccc", "ddd"]).astype(dtype) assert result.dtype == expected.dtype assert result_mul.dtype == expected.dtype assert_equal(result_mul, expected) assert_equal(result, expected) def test_repeat_broadcast(dtype) -> None: values = xr.DataArray(["a", "b", "c", "d"], dims=["X"]).astype(dtype) reps = xr.DataArray([3, 4], dims=["Y"]) result = values.str.repeat(reps) result_mul = values.str * reps expected = xr.DataArray( [["aaa", "aaaa"], ["bbb", "bbbb"], ["ccc", "cccc"], ["ddd", "dddd"]], dims=["X", "Y"], ).astype(dtype) assert result.dtype == expected.dtype assert result_mul.dtype == expected.dtype assert_equal(result_mul, expected) assert_equal(result, expected) def test_match(dtype) -> None: values = xr.DataArray(["fooBAD__barBAD", "foo"]).astype(dtype) # New match behavior introduced in 0.13 pat = values.dtype.type(".*(BAD[_]+).*(BAD)") result = values.str.match(pat) expected = xr.DataArray([True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.match(re.compile(pat)) assert result.dtype == expected.dtype assert_equal(result, expected) # Case-sensitive pat = values.dtype.type(".*BAD[_]+.*BAD") result = values.str.match(pat) expected = xr.DataArray([True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.match(re.compile(pat)) assert result.dtype == expected.dtype assert_equal(result, expected) # Case-insensitive pat = values.dtype.type(".*bAd[_]+.*bad") result = values.str.match(pat, case=False) expected = xr.DataArray([True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.match(re.compile(pat, flags=re.IGNORECASE)) assert result.dtype == expected.dtype assert_equal(result, expected) def test_empty_str_methods() -> None: empty = xr.DataArray(np.empty(shape=(0,), dtype="U")) empty_str = empty empty_int = xr.DataArray(np.empty(shape=(0,), dtype=int)) empty_bool = xr.DataArray(np.empty(shape=(0,), dtype=bool)) empty_bytes = xr.DataArray(np.empty(shape=(0,), dtype="S")) # TODO: Determine why U and S dtype sizes don't match and figure # out a reliable way to predict what they should be assert empty_bool.dtype == empty.str.contains("a").dtype assert empty_bool.dtype == empty.str.endswith("a").dtype assert empty_bool.dtype == empty.str.match("^a").dtype assert empty_bool.dtype == empty.str.startswith("a").dtype assert empty_bool.dtype == empty.str.isalnum().dtype assert empty_bool.dtype == empty.str.isalpha().dtype assert empty_bool.dtype == empty.str.isdecimal().dtype assert empty_bool.dtype == empty.str.isdigit().dtype assert empty_bool.dtype == empty.str.islower().dtype assert empty_bool.dtype == empty.str.isnumeric().dtype assert empty_bool.dtype == empty.str.isspace().dtype assert empty_bool.dtype == empty.str.istitle().dtype assert empty_bool.dtype == empty.str.isupper().dtype assert empty_bytes.dtype.kind == empty.str.encode("ascii").dtype.kind assert empty_int.dtype.kind == empty.str.count("a").dtype.kind assert empty_int.dtype.kind == empty.str.find("a").dtype.kind assert empty_int.dtype.kind == empty.str.len().dtype.kind assert empty_int.dtype.kind == empty.str.rfind("a").dtype.kind assert empty_str.dtype.kind == empty.str.capitalize().dtype.kind assert empty_str.dtype.kind == empty.str.center(42).dtype.kind assert empty_str.dtype.kind == empty.str.get(0).dtype.kind assert empty_str.dtype.kind == empty.str.lower().dtype.kind assert empty_str.dtype.kind == empty.str.lstrip().dtype.kind assert empty_str.dtype.kind == empty.str.pad(42).dtype.kind assert empty_str.dtype.kind == empty.str.repeat(3).dtype.kind assert empty_str.dtype.kind == empty.str.rstrip().dtype.kind assert empty_str.dtype.kind == empty.str.slice(step=1).dtype.kind assert empty_str.dtype.kind == empty.str.slice(stop=1).dtype.kind assert empty_str.dtype.kind == empty.str.strip().dtype.kind assert empty_str.dtype.kind == empty.str.swapcase().dtype.kind assert empty_str.dtype.kind == empty.str.title().dtype.kind assert empty_str.dtype.kind == empty.str.upper().dtype.kind assert empty_str.dtype.kind == empty.str.wrap(42).dtype.kind assert empty_str.dtype.kind == empty_bytes.str.decode("ascii").dtype.kind assert_equal(empty_bool, empty.str.contains("a")) assert_equal(empty_bool, empty.str.endswith("a")) assert_equal(empty_bool, empty.str.match("^a")) assert_equal(empty_bool, empty.str.startswith("a")) assert_equal(empty_bool, empty.str.isalnum()) assert_equal(empty_bool, empty.str.isalpha()) assert_equal(empty_bool, empty.str.isdecimal()) assert_equal(empty_bool, empty.str.isdigit()) assert_equal(empty_bool, empty.str.islower()) assert_equal(empty_bool, empty.str.isnumeric()) assert_equal(empty_bool, empty.str.isspace()) assert_equal(empty_bool, empty.str.istitle()) assert_equal(empty_bool, empty.str.isupper()) assert_equal(empty_bytes, empty.str.encode("ascii")) assert_equal(empty_int, empty.str.count("a")) assert_equal(empty_int, empty.str.find("a")) assert_equal(empty_int, empty.str.len()) assert_equal(empty_int, empty.str.rfind("a")) assert_equal(empty_str, empty.str.capitalize()) assert_equal(empty_str, empty.str.center(42)) assert_equal(empty_str, empty.str.get(0)) assert_equal(empty_str, empty.str.lower()) assert_equal(empty_str, empty.str.lstrip()) assert_equal(empty_str, empty.str.pad(42)) assert_equal(empty_str, empty.str.repeat(3)) assert_equal(empty_str, empty.str.replace("a", "b")) assert_equal(empty_str, empty.str.rstrip()) assert_equal(empty_str, empty.str.slice(step=1)) assert_equal(empty_str, empty.str.slice(stop=1)) assert_equal(empty_str, empty.str.strip()) assert_equal(empty_str, empty.str.swapcase()) assert_equal(empty_str, empty.str.title()) assert_equal(empty_str, empty.str.upper()) assert_equal(empty_str, empty.str.wrap(42)) assert_equal(empty_str, empty_bytes.str.decode("ascii")) table = str.maketrans("a", "b") assert empty_str.dtype.kind == empty.str.translate(table).dtype.kind assert_equal(empty_str, empty.str.translate(table)) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.isalnum(), [True, True, True, True, True, False, True, True, False, False], id="isalnum", ), pytest.param( lambda x: x.str.isalpha(), [True, True, True, False, False, False, True, False, False, False], id="isalpha", ), pytest.param( lambda x: x.str.isdigit(), [False, False, False, True, False, False, False, True, False, False], id="isdigit", ), pytest.param( lambda x: x.str.islower(), [False, True, False, False, False, False, False, False, False, False], id="islower", ), pytest.param( lambda x: x.str.isspace(), [False, False, False, False, False, False, False, False, False, True], id="isspace", ), pytest.param( lambda x: x.str.istitle(), [True, False, True, False, True, False, False, False, False, False], id="istitle", ), pytest.param( lambda x: x.str.isupper(), [True, False, False, False, True, False, True, False, False, False], id="isupper", ), ], ) def test_ismethods( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: list[bool] ) -> None: values = xr.DataArray( ["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "] ).astype(dtype) expected_da = xr.DataArray(expected) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) def test_isnumeric() -> None: # 0x00bc: ยผ VULGAR FRACTION ONE QUARTER # 0x2605: โ˜… not number # 0x1378: แธ ETHIOPIC NUMBER SEVENTY # 0xFF13: ๏ผ“ Em 3 values = xr.DataArray(["A", "3", "ยผ", "โ˜…", "แธ", "๏ผ“", "four"]) exp_numeric = xr.DataArray([False, True, True, False, True, True, False]) exp_decimal = xr.DataArray([False, True, False, False, False, True, False]) res_numeric = values.str.isnumeric() res_decimal = values.str.isdecimal() assert res_numeric.dtype == exp_numeric.dtype assert res_decimal.dtype == exp_decimal.dtype assert_equal(res_numeric, exp_numeric) assert_equal(res_decimal, exp_decimal) def test_len(dtype) -> None: values = ["foo", "fooo", "fooooo", "fooooooo"] result = xr.DataArray(values).astype(dtype).str.len() expected = xr.DataArray([len(x) for x in values]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_find(dtype) -> None: values = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXX"]) values = values.astype(dtype) result_0 = values.str.find("EF") result_1 = values.str.find("EF", side="left") expected_0 = xr.DataArray([4, 3, 1, 0, -1]) expected_1 = xr.DataArray([v.find(dtype("EF")) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.rfind("EF") result_1 = values.str.find("EF", side="right") expected_0 = xr.DataArray([4, 5, 7, 4, -1]) expected_1 = xr.DataArray([v.rfind(dtype("EF")) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.find("EF", 3) result_1 = values.str.find("EF", 3, side="left") expected_0 = xr.DataArray([4, 3, 7, 4, -1]) expected_1 = xr.DataArray([v.find(dtype("EF"), 3) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.rfind("EF", 3) result_1 = values.str.find("EF", 3, side="right") expected_0 = xr.DataArray([4, 5, 7, 4, -1]) expected_1 = xr.DataArray([v.rfind(dtype("EF"), 3) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.find("EF", 3, 6) result_1 = values.str.find("EF", 3, 6, side="left") expected_0 = xr.DataArray([4, 3, -1, 4, -1]) expected_1 = xr.DataArray([v.find(dtype("EF"), 3, 6) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.rfind("EF", 3, 6) result_1 = values.str.find("EF", 3, 6, side="right") expected_0 = xr.DataArray([4, 3, -1, 4, -1]) expected_1 = xr.DataArray([v.rfind(dtype("EF"), 3, 6) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) def test_find_broadcast(dtype) -> None: values = xr.DataArray( ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXX"], dims=["X"] ) values = values.astype(dtype) sub = xr.DataArray(["EF", "BC", "XX"], dims=["Y"]).astype(dtype) start = xr.DataArray([0, 7], dims=["Z"]) end = xr.DataArray([6, 9], dims=["Z"]) result_0 = values.str.find(sub, start, end) result_1 = values.str.find(sub, start, end, side="left") expected = xr.DataArray( [ [[4, -1], [1, -1], [-1, -1]], [[3, -1], [0, -1], [-1, -1]], [[1, 7], [-1, -1], [-1, -1]], [[0, -1], [-1, -1], [-1, -1]], [[-1, -1], [-1, -1], [0, -1]], ], dims=["X", "Y", "Z"], ) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = values.str.rfind(sub, start, end) result_1 = values.str.find(sub, start, end, side="right") expected = xr.DataArray( [ [[4, -1], [1, -1], [-1, -1]], [[3, -1], [0, -1], [-1, -1]], [[1, 7], [-1, -1], [-1, -1]], [[4, -1], [-1, -1], [-1, -1]], [[-1, -1], [-1, -1], [1, -1]], ], dims=["X", "Y", "Z"], ) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) def test_index(dtype) -> None: s = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"]).astype(dtype) result_0 = s.str.index("EF") result_1 = s.str.index("EF", side="left") expected = xr.DataArray([4, 3, 1, 0]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.rindex("EF") result_1 = s.str.index("EF", side="right") expected = xr.DataArray([4, 5, 7, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.index("EF", 3) result_1 = s.str.index("EF", 3, side="left") expected = xr.DataArray([4, 3, 7, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.rindex("EF", 3) result_1 = s.str.index("EF", 3, side="right") expected = xr.DataArray([4, 5, 7, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.index("E", 4, 8) result_1 = s.str.index("E", 4, 8, side="left") expected = xr.DataArray([4, 5, 7, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.rindex("E", 0, 5) result_1 = s.str.index("E", 0, 5, side="right") expected = xr.DataArray([4, 3, 1, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) matchtype = "subsection" if dtype == np.bytes_ else "substring" with pytest.raises(ValueError, match=f"{matchtype} not found"): s.str.index("DE") def test_index_broadcast(dtype) -> None: values = xr.DataArray( ["ABCDEFGEFDBCA", "BCDEFEFEFDBC", "DEFBCGHIEFBC", "EFGHBCEFBCBCBCEF"], dims=["X"], ) values = values.astype(dtype) sub = xr.DataArray(["EF", "BC"], dims=["Y"]).astype(dtype) start = xr.DataArray([0, 6], dims=["Z"]) end = xr.DataArray([6, 12], dims=["Z"]) result_0 = values.str.index(sub, start, end) result_1 = values.str.index(sub, start, end, side="left") expected = xr.DataArray( [[[4, 7], [1, 10]], [[3, 7], [0, 10]], [[1, 8], [3, 10]], [[0, 6], [4, 8]]], dims=["X", "Y", "Z"], ) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = values.str.rindex(sub, start, end) result_1 = values.str.index(sub, start, end, side="right") expected = xr.DataArray( [[[4, 7], [1, 10]], [[3, 7], [0, 10]], [[1, 8], [3, 10]], [[0, 6], [4, 10]]], dims=["X", "Y", "Z"], ) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) def test_translate() -> None: values = xr.DataArray(["abcdefg", "abcc", "cdddfg", "cdefggg"]) table = str.maketrans("abc", "cde") result = values.str.translate(table) expected = xr.DataArray(["cdedefg", "cdee", "edddfg", "edefggg"]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_pad_center_ljust_rjust(dtype) -> None: values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype) result = values.str.center(5) expected = xr.DataArray([" a ", " b ", " c ", "eeeee"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(5, side="both") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.ljust(5) expected = xr.DataArray(["a ", "b ", "c ", "eeeee"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(5, side="right") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rjust(5) expected = xr.DataArray([" a", " b", " c", "eeeee"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(5, side="left") assert result.dtype == expected.dtype assert_equal(result, expected) def test_pad_center_ljust_rjust_fillchar(dtype) -> None: values = xr.DataArray(["a", "bb", "cccc", "ddddd", "eeeeee"]).astype(dtype) result = values.str.center(5, fillchar="X") expected = xr.DataArray(["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(5, side="both", fillchar="X") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.ljust(5, fillchar="X") expected = xr.DataArray(["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected.astype(dtype)) result = values.str.pad(5, side="right", fillchar="X") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rjust(5, fillchar="X") expected = xr.DataArray(["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected.astype(dtype)) result = values.str.pad(5, side="left", fillchar="X") assert result.dtype == expected.dtype assert_equal(result, expected) # If fillchar is not a charatter, normal str raises TypeError # 'aaa'.ljust(5, 'XY') # TypeError: must be char, not str template = "fillchar must be a character, not {dtype}" with pytest.raises(TypeError, match=template.format(dtype="str")): values.str.center(5, fillchar="XY") with pytest.raises(TypeError, match=template.format(dtype="str")): values.str.ljust(5, fillchar="XY") with pytest.raises(TypeError, match=template.format(dtype="str")): values.str.rjust(5, fillchar="XY") with pytest.raises(TypeError, match=template.format(dtype="str")): values.str.pad(5, fillchar="XY") def test_pad_center_ljust_rjust_broadcast(dtype) -> None: values = xr.DataArray(["a", "bb", "cccc", "ddddd", "eeeeee"], dims="X").astype( dtype ) width = xr.DataArray([5, 4], dims="Y") fillchar = xr.DataArray(["X", "#"], dims="Y").astype(dtype) result = values.str.center(width, fillchar=fillchar) expected = xr.DataArray( [ ["XXaXX", "#a##"], ["XXbbX", "#bb#"], ["Xcccc", "cccc"], ["ddddd", "ddddd"], ["eeeeee", "eeeeee"], ], dims=["X", "Y"], ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(width, side="both", fillchar=fillchar) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.ljust(width, fillchar=fillchar) expected = xr.DataArray( [ ["aXXXX", "a###"], ["bbXXX", "bb##"], ["ccccX", "cccc"], ["ddddd", "ddddd"], ["eeeeee", "eeeeee"], ], dims=["X", "Y"], ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected.astype(dtype)) result = values.str.pad(width, side="right", fillchar=fillchar) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rjust(width, fillchar=fillchar) expected = xr.DataArray( [ ["XXXXa", "###a"], ["XXXbb", "##bb"], ["Xcccc", "cccc"], ["ddddd", "ddddd"], ["eeeeee", "eeeeee"], ], dims=["X", "Y"], ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected.astype(dtype)) result = values.str.pad(width, side="left", fillchar=fillchar) assert result.dtype == expected.dtype assert_equal(result, expected) def test_zfill(dtype) -> None: values = xr.DataArray(["1", "22", "aaa", "333", "45678"]).astype(dtype) result = values.str.zfill(5) expected = xr.DataArray(["00001", "00022", "00aaa", "00333", "45678"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.zfill(3) expected = xr.DataArray(["001", "022", "aaa", "333", "45678"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_zfill_broadcast(dtype) -> None: values = xr.DataArray(["1", "22", "aaa", "333", "45678"]).astype(dtype) width = np.array([4, 5, 0, 3, 8]) result = values.str.zfill(width) expected = xr.DataArray(["0001", "00022", "aaa", "333", "00045678"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_slice(dtype) -> None: arr = xr.DataArray(["aafootwo", "aabartwo", "aabazqux"]).astype(dtype) result = arr.str.slice(2, 5) exp = xr.DataArray(["foo", "bar", "baz"]).astype(dtype) assert result.dtype == exp.dtype assert_equal(result, exp) for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2), (3, 0, -1)]: try: result = arr.str[start:stop:step] expected = xr.DataArray([s[start:stop:step] for s in arr.values]) assert_equal(result, expected.astype(dtype)) except IndexError: print(f"failed on {start}:{stop}:{step}") raise def test_slice_broadcast(dtype) -> None: arr = xr.DataArray(["aafootwo", "aabartwo", "aabazqux"]).astype(dtype) start = xr.DataArray([1, 2, 3]) stop = 5 result = arr.str.slice(start=start, stop=stop) exp = xr.DataArray(["afoo", "bar", "az"]).astype(dtype) assert result.dtype == exp.dtype assert_equal(result, exp) def test_slice_replace(dtype) -> None: da = lambda x: xr.DataArray(x).astype(dtype) values = da(["short", "a bit longer", "evenlongerthanthat", ""]) expected = da(["shrt", "a it longer", "evnlongerthanthat", ""]) result = values.str.slice_replace(2, 3) assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shzrt", "a zit longer", "evznlongerthanthat", "z"]) result = values.str.slice_replace(2, 3, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"]) result = values.str.slice_replace(2, 2, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"]) result = values.str.slice_replace(2, 1, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shorz", "a bit longez", "evenlongerthanthaz", "z"]) result = values.str.slice_replace(-1, None, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["zrt", "zer", "zat", "z"]) result = values.str.slice_replace(None, -2, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shortz", "a bit znger", "evenlozerthanthat", "z"]) result = values.str.slice_replace(6, 8, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["zrt", "a zit longer", "evenlongzerthanthat", "z"]) result = values.str.slice_replace(-10, 3, "z") assert result.dtype == expected.dtype assert_equal(result, expected) def test_slice_replace_broadcast(dtype) -> None: values = xr.DataArray(["short", "a bit longer", "evenlongerthanthat", ""]).astype( dtype ) start = 2 stop = np.array([4, 5, None, 7]) repl = "test" expected = xr.DataArray(["shtestt", "a test longer", "evtest", "test"]).astype( dtype ) result = values.str.slice_replace(start, stop, repl) assert result.dtype == expected.dtype assert_equal(result, expected) def test_strip_lstrip_rstrip(dtype) -> None: values = xr.DataArray([" aa ", " bb \n", "cc "]).astype(dtype) result = values.str.strip() expected = xr.DataArray(["aa", "bb", "cc"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.lstrip() expected = xr.DataArray(["aa ", "bb \n", "cc "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rstrip() expected = xr.DataArray([" aa", " bb", "cc"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_strip_lstrip_rstrip_args(dtype) -> None: values = xr.DataArray(["xxABCxx", "xx BNSD", "LDFJH xx"]).astype(dtype) result = values.str.strip("x") expected = xr.DataArray(["ABC", " BNSD", "LDFJH "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.lstrip("x") expected = xr.DataArray(["ABCxx", " BNSD", "LDFJH xx"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rstrip("x") expected = xr.DataArray(["xxABC", "xx BNSD", "LDFJH "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_strip_lstrip_rstrip_broadcast(dtype) -> None: values = xr.DataArray(["xxABCxx", "yy BNSD", "LDFJH zz"]).astype(dtype) to_strip = xr.DataArray(["x", "y", "z"]).astype(dtype) result = values.str.strip(to_strip) expected = xr.DataArray(["ABC", " BNSD", "LDFJH "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.lstrip(to_strip) expected = xr.DataArray(["ABCxx", " BNSD", "LDFJH zz"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rstrip(to_strip) expected = xr.DataArray(["xxABC", "yy BNSD", "LDFJH "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_wrap() -> None: # test values are: two words less than width, two words equal to width, # two words greater than width, one word less than width, one word # equal to width, one word greater than width, multiple tokens with # trailing whitespace equal to width values = xr.DataArray( [ "hello world", "hello world!", "hello world!!", "abcdefabcde", "abcdefabcdef", "abcdefabcdefa", "ab ab ab ab ", "ab ab ab ab a", "\t", ] ) # expected values expected = xr.DataArray( [ "hello world", "hello world!", "hello\nworld!!", "abcdefabcde", "abcdefabcdef", "abcdefabcdef\na", "ab ab ab ab", "ab ab ab ab\na", "", ] ) result = values.str.wrap(12, break_long_words=True) assert result.dtype == expected.dtype assert_equal(result, expected) # test with pre and post whitespace (non-unicode), NaN, and non-ascii # Unicode values = xr.DataArray([" pre ", "\xac\u20ac\U00008000 abadcafe"]) expected = xr.DataArray([" pre", "\xac\u20ac\U00008000 ab\nadcafe"]) result = values.str.wrap(6) assert result.dtype == expected.dtype assert_equal(result, expected) def test_wrap_kwargs_passed() -> None: # GH4334 values = xr.DataArray(" hello world ") result = values.str.wrap(7) expected = xr.DataArray(" hello\nworld") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.wrap(7, drop_whitespace=False) expected = xr.DataArray(" hello\n world\n ") assert result.dtype == expected.dtype assert_equal(result, expected) def test_get(dtype) -> None: values = xr.DataArray(["a_b_c", "c_d_e", "f_g_h"]).astype(dtype) result = values.str[2] expected = xr.DataArray(["b", "d", "g"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) # bounds testing values = xr.DataArray(["1_2_3_4_5", "6_7_8_9_10", "11_12"]).astype(dtype) # positive index result = values.str[5] expected = xr.DataArray(["_", "_", ""]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) # negative index result = values.str[-6] expected = xr.DataArray(["_", "8", ""]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_get_default(dtype) -> None: # GH4334 values = xr.DataArray(["a_b", "c", ""]).astype(dtype) result = values.str.get(2, "default") expected = xr.DataArray(["b", "default", "default"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_get_broadcast(dtype) -> None: values = xr.DataArray(["a_b_c", "c_d_e", "f_g_h"], dims=["X"]).astype(dtype) inds = xr.DataArray([0, 2], dims=["Y"]) result = values.str.get(inds) expected = xr.DataArray( [["a", "b"], ["c", "d"], ["f", "g"]], dims=["X", "Y"] ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_encode_decode() -> None: data = xr.DataArray(["a", "b", "a\xe4"]) encoded = data.str.encode("utf-8") decoded = encoded.str.decode("utf-8") assert data.dtype == decoded.dtype assert_equal(data, decoded) def test_encode_decode_errors() -> None: encodeBase = xr.DataArray(["a", "b", "a\x9d"]) msg = ( r"'charmap' codec can't encode character '\\x9d' in position 1:" " character maps to " ) with pytest.raises(UnicodeEncodeError, match=msg): encodeBase.str.encode("cp1252") f = lambda x: x.encode("cp1252", "ignore") result = encodeBase.str.encode("cp1252", "ignore") expected = xr.DataArray([f(x) for x in encodeBase.values.tolist()]) assert result.dtype == expected.dtype assert_equal(result, expected) decodeBase = xr.DataArray([b"a", b"b", b"a\x9d"]) msg = ( "'charmap' codec can't decode byte 0x9d in position 1:" " character maps to " ) with pytest.raises(UnicodeDecodeError, match=msg): decodeBase.str.decode("cp1252") f = lambda x: x.decode("cp1252", "ignore") result = decodeBase.str.decode("cp1252", "ignore") expected = xr.DataArray([f(x) for x in decodeBase.values.tolist()]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_partition_whitespace(dtype) -> None: values = xr.DataArray( [ ["abc def", "spam eggs swallow", "red_blue"], ["test0 test1 test2 test3", "", "abra ka da bra"], ], dims=["X", "Y"], ).astype(dtype) exp_part_dim_list = [ [ ["abc", " ", "def"], ["spam", " ", "eggs swallow"], ["red_blue", "", ""], ], [ ["test0", " ", "test1 test2 test3"], ["", "", ""], ["abra", " ", "ka da bra"], ], ] exp_rpart_dim_list = [ [ ["abc", " ", "def"], ["spam eggs", " ", "swallow"], ["", "", "red_blue"], ], [ ["test0 test1 test2", " ", "test3"], ["", "", ""], ["abra ka da", " ", "bra"], ], ] exp_part_dim = xr.DataArray(exp_part_dim_list, dims=["X", "Y", "ZZ"]).astype(dtype) exp_rpart_dim = xr.DataArray(exp_rpart_dim_list, dims=["X", "Y", "ZZ"]).astype( dtype ) res_part_dim = values.str.partition(dim="ZZ") res_rpart_dim = values.str.rpartition(dim="ZZ") assert res_part_dim.dtype == exp_part_dim.dtype assert res_rpart_dim.dtype == exp_rpart_dim.dtype assert_equal(res_part_dim, exp_part_dim) assert_equal(res_rpart_dim, exp_rpart_dim) def test_partition_comma(dtype) -> None: values = xr.DataArray( [ ["abc, def", "spam, eggs, swallow", "red_blue"], ["test0, test1, test2, test3", "", "abra, ka, da, bra"], ], dims=["X", "Y"], ).astype(dtype) exp_part_dim_list = [ [ ["abc", ", ", "def"], ["spam", ", ", "eggs, swallow"], ["red_blue", "", ""], ], [ ["test0", ", ", "test1, test2, test3"], ["", "", ""], ["abra", ", ", "ka, da, bra"], ], ] exp_rpart_dim_list = [ [ ["abc", ", ", "def"], ["spam, eggs", ", ", "swallow"], ["", "", "red_blue"], ], [ ["test0, test1, test2", ", ", "test3"], ["", "", ""], ["abra, ka, da", ", ", "bra"], ], ] exp_part_dim = xr.DataArray(exp_part_dim_list, dims=["X", "Y", "ZZ"]).astype(dtype) exp_rpart_dim = xr.DataArray(exp_rpart_dim_list, dims=["X", "Y", "ZZ"]).astype( dtype ) res_part_dim = values.str.partition(sep=", ", dim="ZZ") res_rpart_dim = values.str.rpartition(sep=", ", dim="ZZ") assert res_part_dim.dtype == exp_part_dim.dtype assert res_rpart_dim.dtype == exp_rpart_dim.dtype assert_equal(res_part_dim, exp_part_dim) assert_equal(res_rpart_dim, exp_rpart_dim) def test_partition_empty(dtype) -> None: values = xr.DataArray([], dims=["X"]).astype(dtype) expected = xr.DataArray(np.zeros((0, 0)), dims=["X", "ZZ"]).astype(dtype) res = values.str.partition(sep=", ", dim="ZZ") assert res.dtype == expected.dtype assert_equal(res, expected) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.split(dim=None), [ [["abc", "def"], ["spam", "eggs", "swallow"], ["red_blue"]], [["test0", "test1", "test2", "test3"], [], ["abra", "ka", "da", "bra"]], ], id="split_full", ), pytest.param( lambda x: x.str.rsplit(dim=None), [ [["abc", "def"], ["spam", "eggs", "swallow"], ["red_blue"]], [["test0", "test1", "test2", "test3"], [], ["abra", "ka", "da", "bra"]], ], id="rsplit_full", ), pytest.param( lambda x: x.str.split(dim=None, maxsplit=1), [ [["abc", "def"], ["spam", "eggs\tswallow"], ["red_blue"]], [["test0", "test1\ntest2\n\ntest3"], [], ["abra", "ka\nda\tbra"]], ], id="split_1", ), pytest.param( lambda x: x.str.rsplit(dim=None, maxsplit=1), [ [["abc", "def"], ["spam\t\teggs", "swallow"], ["red_blue"]], [["test0\ntest1\ntest2", "test3"], [], ["abra ka\nda", "bra"]], ], id="rsplit_1", ), ], ) def test_split_whitespace_nodim( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: xr.DataArray ) -> None: values = xr.DataArray( [ ["abc def", "spam\t\teggs\tswallow", "red_blue"], ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ], dims=["X", "Y"], ).astype(dtype) expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected] expected_np = np.array(expected_dtype, dtype=np.object_) expected_da = xr.DataArray(expected_np, dims=["X", "Y"]) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.split(dim="ZZ"), [ [ ["abc", "def", "", ""], ["spam", "eggs", "swallow", ""], ["red_blue", "", "", ""], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="split_full", ), pytest.param( lambda x: x.str.rsplit(dim="ZZ"), [ [ ["", "", "abc", "def"], ["", "spam", "eggs", "swallow"], ["", "", "", "red_blue"], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_full", ), pytest.param( lambda x: x.str.split(dim="ZZ", maxsplit=1), [ [["abc", "def"], ["spam", "eggs\tswallow"], ["red_blue", ""]], [["test0", "test1\ntest2\n\ntest3"], ["", ""], ["abra", "ka\nda\tbra"]], ], id="split_1", ), pytest.param( lambda x: x.str.rsplit(dim="ZZ", maxsplit=1), [ [["abc", "def"], ["spam\t\teggs", "swallow"], ["", "red_blue"]], [["test0\ntest1\ntest2", "test3"], ["", ""], ["abra ka\nda", "bra"]], ], id="rsplit_1", ), ], ) def test_split_whitespace_dim( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: xr.DataArray ) -> None: values = xr.DataArray( [ ["abc def", "spam\t\teggs\tswallow", "red_blue"], ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ], dims=["X", "Y"], ).astype(dtype) expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected] expected_np = np.array(expected_dtype, dtype=np.object_) expected_da = xr.DataArray(expected_np, dims=["X", "Y", "ZZ"]).astype(dtype) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.split(sep=",", dim=None), [ [["abc", "def"], ["spam", "", "eggs", "swallow"], ["red_blue"]], [ ["test0", "test1", "test2", "test3"], [""], ["abra", "ka", "da", "bra"], ], ], id="split_full", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim=None), [ [["abc", "def"], ["spam", "", "eggs", "swallow"], ["red_blue"]], [ ["test0", "test1", "test2", "test3"], [""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_full", ), pytest.param( lambda x: x.str.split(sep=",", dim=None, maxsplit=1), [ [["abc", "def"], ["spam", ",eggs,swallow"], ["red_blue"]], [["test0", "test1,test2,test3"], [""], ["abra", "ka,da,bra"]], ], id="split_1", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim=None, maxsplit=1), [ [["abc", "def"], ["spam,,eggs", "swallow"], ["red_blue"]], [["test0,test1,test2", "test3"], [""], ["abra,ka,da", "bra"]], ], id="rsplit_1", ), pytest.param( lambda x: x.str.split(sep=",", dim=None, maxsplit=10), [ [["abc", "def"], ["spam", "", "eggs", "swallow"], ["red_blue"]], [ ["test0", "test1", "test2", "test3"], [""], ["abra", "ka", "da", "bra"], ], ], id="split_10", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim=None, maxsplit=10), [ [["abc", "def"], ["spam", "", "eggs", "swallow"], ["red_blue"]], [ ["test0", "test1", "test2", "test3"], [""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_10", ), ], ) def test_split_comma_nodim( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: xr.DataArray ) -> None: values = xr.DataArray( [ ["abc,def", "spam,,eggs,swallow", "red_blue"], ["test0,test1,test2,test3", "", "abra,ka,da,bra"], ], dims=["X", "Y"], ).astype(dtype) expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected] expected_np = np.array(expected_dtype, dtype=np.object_) expected_da = xr.DataArray(expected_np, dims=["X", "Y"]) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.split(sep=",", dim="ZZ"), [ [ ["abc", "def", "", ""], ["spam", "", "eggs", "swallow"], ["red_blue", "", "", ""], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="split_full", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim="ZZ"), [ [ ["", "", "abc", "def"], ["spam", "", "eggs", "swallow"], ["", "", "", "red_blue"], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_full", ), pytest.param( lambda x: x.str.split(sep=",", dim="ZZ", maxsplit=1), [ [["abc", "def"], ["spam", ",eggs,swallow"], ["red_blue", ""]], [["test0", "test1,test2,test3"], ["", ""], ["abra", "ka,da,bra"]], ], id="split_1", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim="ZZ", maxsplit=1), [ [["abc", "def"], ["spam,,eggs", "swallow"], ["", "red_blue"]], [["test0,test1,test2", "test3"], ["", ""], ["abra,ka,da", "bra"]], ], id="rsplit_1", ), pytest.param( lambda x: x.str.split(sep=",", dim="ZZ", maxsplit=10), [ [ ["abc", "def", "", ""], ["spam", "", "eggs", "swallow"], ["red_blue", "", "", ""], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="split_10", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim="ZZ", maxsplit=10), [ [ ["", "", "abc", "def"], ["spam", "", "eggs", "swallow"], ["", "", "", "red_blue"], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_10", ), ], ) def test_split_comma_dim( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: xr.DataArray ) -> None: values = xr.DataArray( [ ["abc,def", "spam,,eggs,swallow", "red_blue"], ["test0,test1,test2,test3", "", "abra,ka,da,bra"], ], dims=["X", "Y"], ).astype(dtype) expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected] expected_np = np.array(expected_dtype, dtype=np.object_) expected_da = xr.DataArray(expected_np, dims=["X", "Y", "ZZ"]).astype(dtype) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) def test_splitters_broadcast(dtype) -> None: values = xr.DataArray( ["ab cd,de fg", "spam, ,eggs swallow", "red_blue"], dims=["X"], ).astype(dtype) sep = xr.DataArray( [" ", ","], dims=["Y"], ).astype(dtype) expected_left = xr.DataArray( [ [["ab", "cd,de fg"], ["ab cd", "de fg"]], [["spam,", ",eggs swallow"], ["spam", " ,eggs swallow"]], [["red_blue", ""], ["red_blue", ""]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) expected_right = xr.DataArray( [ [["ab cd,de", "fg"], ["ab cd", "de fg"]], [["spam, ,eggs", "swallow"], ["spam, ", "eggs swallow"]], [["", "red_blue"], ["", "red_blue"]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) res_left = values.str.split(dim="ZZ", sep=sep, maxsplit=1) res_right = values.str.rsplit(dim="ZZ", sep=sep, maxsplit=1) # assert res_left.dtype == expected_left.dtype # assert res_right.dtype == expected_right.dtype assert_equal(res_left, expected_left) assert_equal(res_right, expected_right) expected_left = xr.DataArray( [ [["ab", " ", "cd,de fg"], ["ab cd", ",", "de fg"]], [["spam,", " ", ",eggs swallow"], ["spam", ",", " ,eggs swallow"]], [["red_blue", "", ""], ["red_blue", "", ""]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) expected_right = xr.DataArray( [ [["ab", " ", "cd,de fg"], ["ab cd", ",", "de fg"]], [["spam,", " ", ",eggs swallow"], ["spam", ",", " ,eggs swallow"]], [["red_blue", "", ""], ["red_blue", "", ""]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) res_left = values.str.partition(dim="ZZ", sep=sep) res_right = values.str.partition(dim="ZZ", sep=sep) # assert res_left.dtype == expected_left.dtype # assert res_right.dtype == expected_right.dtype assert_equal(res_left, expected_left) assert_equal(res_right, expected_right) def test_split_empty(dtype) -> None: values = xr.DataArray([], dims=["X"]).astype(dtype) expected = xr.DataArray(np.zeros((0, 0)), dims=["X", "ZZ"]).astype(dtype) res = values.str.split(sep=", ", dim="ZZ") assert res.dtype == expected.dtype assert_equal(res, expected) def test_get_dummies(dtype) -> None: values_line = xr.DataArray( [["a|ab~abc|abc", "ab", "a||abc|abcd"], ["abcd|ab|a", "abc|ab~abc", "|a"]], dims=["X", "Y"], ).astype(dtype) values_comma = xr.DataArray( [["a~ab|abc~~abc", "ab", "a~abc~abcd"], ["abcd~ab~a", "abc~ab|abc", "~a"]], dims=["X", "Y"], ).astype(dtype) vals_line = np.array(["a", "ab", "abc", "abcd", "ab~abc"]).astype(dtype) vals_comma = np.array(["a", "ab", "abc", "abcd", "ab|abc"]).astype(dtype) expected_list = [ [ [True, False, True, False, True], [False, True, False, False, False], [True, False, True, True, False], ], [ [True, True, False, True, False], [False, False, True, False, True], [True, False, False, False, False], ], ] expected_np = np.array(expected_list) expected = xr.DataArray(expected_np, dims=["X", "Y", "ZZ"]) targ_line = expected.copy() targ_comma = expected.copy() targ_line.coords["ZZ"] = vals_line targ_comma.coords["ZZ"] = vals_comma res_default = values_line.str.get_dummies(dim="ZZ") res_line = values_line.str.get_dummies(dim="ZZ", sep="|") res_comma = values_comma.str.get_dummies(dim="ZZ", sep="~") assert res_default.dtype == targ_line.dtype assert res_line.dtype == targ_line.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_default, targ_line) assert_equal(res_line, targ_line) assert_equal(res_comma, targ_comma) def test_get_dummies_broadcast(dtype) -> None: values = xr.DataArray( ["x~x|x~x", "x", "x|x~x", "x~x"], dims=["X"], ).astype(dtype) sep = xr.DataArray( ["|", "~"], dims=["Y"], ).astype(dtype) expected_list = [ [[False, False, True], [True, True, False]], [[True, False, False], [True, False, False]], [[True, False, True], [True, True, False]], [[False, False, True], [True, False, False]], ] expected_np = np.array(expected_list) expected = xr.DataArray(expected_np, dims=["X", "Y", "ZZ"]) expected.coords["ZZ"] = np.array(["x", "x|x", "x~x"]).astype(dtype) res = values.str.get_dummies(dim="ZZ", sep=sep) assert res.dtype == expected.dtype assert_equal(res, expected) def test_get_dummies_empty(dtype) -> None: values = xr.DataArray([], dims=["X"]).astype(dtype) expected = xr.DataArray(np.zeros((0, 0)), dims=["X", "ZZ"]).astype(dtype) res = values.str.get_dummies(dim="ZZ") assert res.dtype == expected.dtype assert_equal(res, expected) def test_splitters_empty_str(dtype) -> None: values = xr.DataArray( [["", "", ""], ["", "", ""]], dims=["X", "Y"], ).astype(dtype) targ_partition_dim = xr.DataArray( [ [["", "", ""], ["", "", ""], ["", "", ""]], [["", "", ""], ["", "", ""], ["", "", ""]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) targ_partition_none_list = [ [["", "", ""], ["", "", ""], ["", "", ""]], [["", "", ""], ["", "", ""], ["", "", "", ""]], ] targ_partition_none_list = [ [[dtype(x) for x in y] for y in z] for z in targ_partition_none_list ] targ_partition_none_np = np.array(targ_partition_none_list, dtype=np.object_) del targ_partition_none_np[-1, -1][-1] targ_partition_none = xr.DataArray( targ_partition_none_np, dims=["X", "Y"], ) targ_split_dim = xr.DataArray( [[[""], [""], [""]], [[""], [""], [""]]], dims=["X", "Y", "ZZ"], ).astype(dtype) targ_split_none = xr.DataArray( np.array([[[], [], []], [[], [], [""]]], dtype=np.object_), dims=["X", "Y"], ) del targ_split_none.data[-1, -1][-1] res_partition_dim = values.str.partition(dim="ZZ") res_rpartition_dim = values.str.rpartition(dim="ZZ") res_partition_none = values.str.partition(dim=None) res_rpartition_none = values.str.rpartition(dim=None) res_split_dim = values.str.split(dim="ZZ") res_rsplit_dim = values.str.rsplit(dim="ZZ") res_split_none = values.str.split(dim=None) res_rsplit_none = values.str.rsplit(dim=None) res_dummies = values.str.rsplit(dim="ZZ") assert res_partition_dim.dtype == targ_partition_dim.dtype assert res_rpartition_dim.dtype == targ_partition_dim.dtype assert res_partition_none.dtype == targ_partition_none.dtype assert res_rpartition_none.dtype == targ_partition_none.dtype assert res_split_dim.dtype == targ_split_dim.dtype assert res_rsplit_dim.dtype == targ_split_dim.dtype assert res_split_none.dtype == targ_split_none.dtype assert res_rsplit_none.dtype == targ_split_none.dtype assert res_dummies.dtype == targ_split_dim.dtype assert_equal(res_partition_dim, targ_partition_dim) assert_equal(res_rpartition_dim, targ_partition_dim) assert_equal(res_partition_none, targ_partition_none) assert_equal(res_rpartition_none, targ_partition_none) assert_equal(res_split_dim, targ_split_dim) assert_equal(res_rsplit_dim, targ_split_dim) assert_equal(res_split_none, targ_split_none) assert_equal(res_rsplit_none, targ_split_none) assert_equal(res_dummies, targ_split_dim) def test_cat_str(dtype) -> None: values_1 = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], ).astype(dtype) values_2 = "111" targ_blank = xr.DataArray( [["a111", "bb111", "cccc111"], ["ddddd111", "eeee111", "fff111"]], dims=["X", "Y"], ).astype(dtype) targ_space = xr.DataArray( [["a 111", "bb 111", "cccc 111"], ["ddddd 111", "eeee 111", "fff 111"]], dims=["X", "Y"], ).astype(dtype) targ_bars = xr.DataArray( [["a||111", "bb||111", "cccc||111"], ["ddddd||111", "eeee||111", "fff||111"]], dims=["X", "Y"], ).astype(dtype) targ_comma = xr.DataArray( [["a, 111", "bb, 111", "cccc, 111"], ["ddddd, 111", "eeee, 111", "fff, 111"]], dims=["X", "Y"], ).astype(dtype) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_uniform(dtype) -> None: values_1 = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], ).astype(dtype) values_2 = xr.DataArray( [["11111", "222", "33"], ["4", "5555", "66"]], dims=["X", "Y"], ) targ_blank = xr.DataArray( [["a11111", "bb222", "cccc33"], ["ddddd4", "eeee5555", "fff66"]], dims=["X", "Y"], ).astype(dtype) targ_space = xr.DataArray( [["a 11111", "bb 222", "cccc 33"], ["ddddd 4", "eeee 5555", "fff 66"]], dims=["X", "Y"], ).astype(dtype) targ_bars = xr.DataArray( [["a||11111", "bb||222", "cccc||33"], ["ddddd||4", "eeee||5555", "fff||66"]], dims=["X", "Y"], ).astype(dtype) targ_comma = xr.DataArray( [["a, 11111", "bb, 222", "cccc, 33"], ["ddddd, 4", "eeee, 5555", "fff, 66"]], dims=["X", "Y"], ).astype(dtype) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_broadcast_right(dtype) -> None: values_1 = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], ).astype(dtype) values_2 = xr.DataArray( ["11111", "222", "33"], dims=["Y"], ) targ_blank = xr.DataArray( [["a11111", "bb222", "cccc33"], ["ddddd11111", "eeee222", "fff33"]], dims=["X", "Y"], ).astype(dtype) targ_space = xr.DataArray( [["a 11111", "bb 222", "cccc 33"], ["ddddd 11111", "eeee 222", "fff 33"]], dims=["X", "Y"], ).astype(dtype) targ_bars = xr.DataArray( [["a||11111", "bb||222", "cccc||33"], ["ddddd||11111", "eeee||222", "fff||33"]], dims=["X", "Y"], ).astype(dtype) targ_comma = xr.DataArray( [["a, 11111", "bb, 222", "cccc, 33"], ["ddddd, 11111", "eeee, 222", "fff, 33"]], dims=["X", "Y"], ).astype(dtype) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_broadcast_left(dtype) -> None: values_1 = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], ).astype(dtype) values_2 = xr.DataArray( [["11111", "222", "33"], ["4", "5555", "66"]], dims=["X", "Y"], ) targ_blank = ( xr.DataArray( [["a11111", "bb222", "cccc33"], ["a4", "bb5555", "cccc66"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_space = ( xr.DataArray( [["a 11111", "bb 222", "cccc 33"], ["a 4", "bb 5555", "cccc 66"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_bars = ( xr.DataArray( [["a||11111", "bb||222", "cccc||33"], ["a||4", "bb||5555", "cccc||66"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_comma = ( xr.DataArray( [["a, 11111", "bb, 222", "cccc, 33"], ["a, 4", "bb, 5555", "cccc, 66"]], dims=["X", "Y"], ) .astype(dtype) .T ) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_broadcast_both(dtype) -> None: values_1 = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], ).astype(dtype) values_2 = xr.DataArray( ["11111", "4"], dims=["X"], ) targ_blank = ( xr.DataArray( [["a11111", "bb11111", "cccc11111"], ["a4", "bb4", "cccc4"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_space = ( xr.DataArray( [["a 11111", "bb 11111", "cccc 11111"], ["a 4", "bb 4", "cccc 4"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_bars = ( xr.DataArray( [["a||11111", "bb||11111", "cccc||11111"], ["a||4", "bb||4", "cccc||4"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_comma = ( xr.DataArray( [["a, 11111", "bb, 11111", "cccc, 11111"], ["a, 4", "bb, 4", "cccc, 4"]], dims=["X", "Y"], ) .astype(dtype) .T ) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_multi() -> None: values_1 = xr.DataArray( ["11111", "4"], dims=["X"], ) values_2 = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], ).astype(np.bytes_) values_3 = np.array(3.4) values_4 = "" values_5 = np.array("", dtype=np.str_) sep = xr.DataArray( [" ", ", "], dims=["ZZ"], ).astype(np.str_) expected = xr.DataArray( [ [ ["11111 a 3.4 ", "11111, a, 3.4, , "], ["11111 bb 3.4 ", "11111, bb, 3.4, , "], ["11111 cccc 3.4 ", "11111, cccc, 3.4, , "], ], [ ["4 a 3.4 ", "4, a, 3.4, , "], ["4 bb 3.4 ", "4, bb, 3.4, , "], ["4 cccc 3.4 ", "4, cccc, 3.4, , "], ], ], dims=["X", "Y", "ZZ"], ).astype(np.str_) res = values_1.str.cat(values_2, values_3, values_4, values_5, sep=sep) assert res.dtype == expected.dtype assert_equal(res, expected) def test_join_scalar(dtype) -> None: values = xr.DataArray("aaa").astype(dtype) targ = xr.DataArray("aaa").astype(dtype) res_blank = values.str.join() res_space = values.str.join(sep=" ") assert res_blank.dtype == targ.dtype assert res_space.dtype == targ.dtype assert_identical(res_blank, targ) assert_identical(res_space, targ) def test_join_vector(dtype) -> None: values = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], ).astype(dtype) targ_blank = xr.DataArray("abbcccc").astype(dtype) targ_space = xr.DataArray("a bb cccc").astype(dtype) res_blank_none = values.str.join() res_blank_y = values.str.join(dim="Y") res_space_none = values.str.join(sep=" ") res_space_y = values.str.join(dim="Y", sep=" ") assert res_blank_none.dtype == targ_blank.dtype assert res_blank_y.dtype == targ_blank.dtype assert res_space_none.dtype == targ_space.dtype assert res_space_y.dtype == targ_space.dtype assert_identical(res_blank_none, targ_blank) assert_identical(res_blank_y, targ_blank) assert_identical(res_space_none, targ_space) assert_identical(res_space_y, targ_space) def test_join_2d(dtype) -> None: values = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], ).astype(dtype) targ_blank_x = xr.DataArray( ["addddd", "bbeeee", "ccccfff"], dims=["Y"], ).astype(dtype) targ_space_x = xr.DataArray( ["a ddddd", "bb eeee", "cccc fff"], dims=["Y"], ).astype(dtype) targ_blank_y = xr.DataArray( ["abbcccc", "dddddeeeefff"], dims=["X"], ).astype(dtype) targ_space_y = xr.DataArray( ["a bb cccc", "ddddd eeee fff"], dims=["X"], ).astype(dtype) res_blank_x = values.str.join(dim="X") res_blank_y = values.str.join(dim="Y") res_space_x = values.str.join(dim="X", sep=" ") res_space_y = values.str.join(dim="Y", sep=" ") assert res_blank_x.dtype == targ_blank_x.dtype assert res_blank_y.dtype == targ_blank_y.dtype assert res_space_x.dtype == targ_space_x.dtype assert res_space_y.dtype == targ_space_y.dtype assert_identical(res_blank_x, targ_blank_x) assert_identical(res_blank_y, targ_blank_y) assert_identical(res_space_x, targ_space_x) assert_identical(res_space_y, targ_space_y) with pytest.raises( ValueError, match=r"Dimension must be specified for multidimensional arrays." ): values.str.join() def test_join_broadcast(dtype) -> None: values = xr.DataArray( ["a", "bb", "cccc"], dims=["X"], ).astype(dtype) sep = xr.DataArray( [" ", ", "], dims=["ZZ"], ).astype(dtype) expected = xr.DataArray( ["a bb cccc", "a, bb, cccc"], dims=["ZZ"], ).astype(dtype) res = values.str.join(sep=sep) assert res.dtype == expected.dtype assert_identical(res, expected) def test_format_scalar() -> None: values = xr.DataArray( ["{}.{Y}.{ZZ}", "{},{},{X},{X}", "{X}-{Y}-{ZZ}"], dims=["X"], ).astype(np.str_) pos0 = 1 pos1 = 1.2 pos2 = "2.3" X = "'test'" Y = "X" ZZ = None W = "NO!" expected = xr.DataArray( ["1.X.None", "1,1.2,'test','test'", "'test'-X-None"], dims=["X"], ).astype(np.str_) res = values.str.format(pos0, pos1, pos2, X=X, Y=Y, ZZ=ZZ, W=W) assert res.dtype == expected.dtype assert_equal(res, expected) def test_format_broadcast() -> None: values = xr.DataArray( ["{}.{Y}.{ZZ}", "{},{},{X},{X}", "{X}-{Y}-{ZZ}"], dims=["X"], ).astype(np.str_) pos0 = 1 pos1 = 1.2 pos2 = xr.DataArray( ["2.3", "3.44444"], dims=["YY"], ) X = "'test'" Y = "X" ZZ = None W = "NO!" expected = xr.DataArray( [ ["1.X.None", "1.X.None"], ["1,1.2,'test','test'", "1,1.2,'test','test'"], ["'test'-X-None", "'test'-X-None"], ], dims=["X", "YY"], ).astype(np.str_) res = values.str.format(pos0, pos1, pos2, X=X, Y=Y, ZZ=ZZ, W=W) assert res.dtype == expected.dtype assert_equal(res, expected) def test_mod_scalar() -> None: values = xr.DataArray( ["%s.%s.%s", "%s,%s,%s", "%s-%s-%s"], dims=["X"], ).astype(np.str_) pos0 = 1 pos1 = 1.2 pos2 = "2.3" expected = xr.DataArray( ["1.1.2.2.3", "1,1.2,2.3", "1-1.2-2.3"], dims=["X"], ).astype(np.str_) res = values.str % (pos0, pos1, pos2) assert res.dtype == expected.dtype assert_equal(res, expected) def test_mod_dict() -> None: values = xr.DataArray( ["%(a)s.%(a)s.%(b)s", "%(b)s,%(c)s,%(b)s", "%(c)s-%(b)s-%(a)s"], dims=["X"], ).astype(np.str_) a = 1 b = 1.2 c = "2.3" expected = xr.DataArray( ["1.1.1.2", "1.2,2.3,1.2", "2.3-1.2-1"], dims=["X"], ).astype(np.str_) res = values.str % {"a": a, "b": b, "c": c} assert res.dtype == expected.dtype assert_equal(res, expected) def test_mod_broadcast_single() -> None: values = xr.DataArray( ["%s_1", "%s_2", "%s_3"], dims=["X"], ).astype(np.str_) pos = xr.DataArray( ["2.3", "3.44444"], dims=["YY"], ) expected = xr.DataArray( [["2.3_1", "3.44444_1"], ["2.3_2", "3.44444_2"], ["2.3_3", "3.44444_3"]], dims=["X", "YY"], ).astype(np.str_) res = values.str % pos assert res.dtype == expected.dtype assert_equal(res, expected) def test_mod_broadcast_multi() -> None: values = xr.DataArray( ["%s.%s.%s", "%s,%s,%s", "%s-%s-%s"], dims=["X"], ).astype(np.str_) pos0 = 1 pos1 = 1.2 pos2 = xr.DataArray( ["2.3", "3.44444"], dims=["YY"], ) expected = xr.DataArray( [ ["1.1.2.2.3", "1.1.2.3.44444"], ["1,1.2,2.3", "1,1.2,3.44444"], ["1-1.2-2.3", "1-1.2-3.44444"], ], dims=["X", "YY"], ).astype(np.str_) res = values.str % (pos0, pos1, pos2) assert res.dtype == expected.dtype assert_equal(res, expected) xarray-2025.12.0/xarray/tests/test_array_api.py000066400000000000000000000107151511464676000213770ustar00rootroot00000000000000from __future__ import annotations import pytest import xarray as xr from xarray.testing import assert_equal np = pytest.importorskip("numpy", minversion="1.22") xp = pytest.importorskip("array_api_strict") from array_api_strict._array_object import Array # isort:skip # type: ignore[no-redef] @pytest.fixture def arrays() -> tuple[xr.DataArray, xr.DataArray]: np_arr = xr.DataArray( np.array([[1.0, 2.0, 3.0], [4.0, 5.0, np.nan]]), dims=("x", "y"), coords={"x": [10, 20]}, ) xp_arr = xr.DataArray( xp.asarray([[1.0, 2.0, 3.0], [4.0, 5.0, np.nan]]), dims=("x", "y"), coords={"x": [10, 20]}, ) assert isinstance(xp_arr.data, Array) return np_arr, xp_arr def test_arithmetic(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr + 7 actual = xp_arr + 7 assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_aggregation(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.sum() actual = xp_arr.sum() assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_aggregation_skipna(arrays) -> None: np_arr, xp_arr = arrays expected = np_arr.sum(skipna=False) actual = xp_arr.sum(skipna=False) assert isinstance(actual.data, Array) assert_equal(actual, expected) # casting nan warns @pytest.mark.filterwarnings("ignore:invalid value encountered in cast") def test_astype(arrays) -> None: np_arr, xp_arr = arrays expected = np_arr.astype(np.int64) actual = xp_arr.astype(xp.int64) assert actual.dtype == xp.int64 assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_broadcast(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays np_arr2 = xr.DataArray(np.array([1.0, 2.0]), dims="x") xp_arr2 = xr.DataArray(xp.asarray([1.0, 2.0]), dims="x") expected = xr.broadcast(np_arr, np_arr2) actual = xr.broadcast(xp_arr, xp_arr2) assert len(actual) == len(expected) for a, e in zip(actual, expected, strict=True): assert isinstance(a.data, Array) assert_equal(a, e) def test_broadcast_during_arithmetic(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays np_arr2 = xr.DataArray(np.array([1.0, 2.0]), dims="x") xp_arr2 = xr.DataArray(xp.asarray([1.0, 2.0]), dims="x") expected = np_arr * np_arr2 actual = xp_arr * xp_arr2 assert isinstance(actual.data, Array) assert_equal(actual, expected) expected = np_arr2 * np_arr actual = xp_arr2 * xp_arr assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_concat(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = xr.concat((np_arr, np_arr), dim="x") actual = xr.concat((xp_arr, xp_arr), dim="x") assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_indexing(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr[:, 0] actual = xp_arr[:, 0] assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_properties(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.data.nbytes assert np_arr.nbytes == expected assert xp_arr.nbytes == expected def test_reorganizing_operation(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.transpose() actual = xp_arr.transpose() assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_stack(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.stack(z=("x", "y")) actual = xp_arr.stack(z=("x", "y")) assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_unstack(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.stack(z=("x", "y")).unstack() actual = xp_arr.stack(z=("x", "y")).unstack() assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_where() -> None: np_arr = xr.DataArray(np.array([1, 0]), dims="x") xp_arr = xr.DataArray(xp.asarray([1, 0]), dims="x") expected = xr.where(np_arr, 1, 0) actual = xr.where(xp_arr, 1, 0) assert isinstance(actual.data, Array) assert_equal(actual, expected) xarray-2025.12.0/xarray/tests/test_assertions.py000066400000000000000000000156041511464676000216240ustar00rootroot00000000000000from __future__ import annotations import warnings import numpy as np import pytest import xarray as xr from xarray.tests import has_dask try: from dask.array import from_array as dask_from_array except ImportError: dask_from_array = lambda x: x # type: ignore[assignment, misc] try: import pint unit_registry: pint.UnitRegistry = pint.UnitRegistry(force_ndarray_like=True) def quantity(x): return unit_registry.Quantity(x, "m") has_pint = True except ImportError: def quantity(x): return x has_pint = False def test_allclose_regression() -> None: x = xr.DataArray(1.01) y = xr.DataArray(1.02) xr.testing.assert_allclose(x, y, atol=0.01) @pytest.mark.parametrize( "obj1,obj2", ( pytest.param( xr.Variable("x", [1e-17, 2]), xr.Variable("x", [0, 3]), id="Variable" ), pytest.param( xr.DataArray([1e-17, 2], dims="x"), xr.DataArray([0, 3], dims="x"), id="DataArray", ), pytest.param( xr.Dataset({"a": ("x", [1e-17, 2]), "b": ("y", [-2e-18, 2])}), xr.Dataset({"a": ("x", [0, 2]), "b": ("y", [0, 1])}), id="Dataset", ), pytest.param( xr.DataArray(np.array("a", dtype="|S1")), xr.DataArray(np.array("b", dtype="|S1")), id="DataArray_with_character_dtype", ), pytest.param( xr.Coordinates({"x": [1e-17, 2]}), xr.Coordinates({"x": [0, 3]}), id="Coordinates", ), pytest.param( xr.DataTree.from_dict( { "/b": xr.Dataset({"a": ("x", [1e-17, 2]), "b": ("y", [-2e-18, 2])}), } ), xr.DataTree.from_dict( { "/b": xr.Dataset({"a": ("x", [0, 2]), "b": ("y", [0, 1])}), } ), id="DataTree", ), ), ) def test_assert_allclose(obj1, obj2) -> None: with pytest.raises(AssertionError): xr.testing.assert_allclose(obj1, obj2) with pytest.raises(AssertionError): xr.testing.assert_allclose(obj1, obj2, check_dim_order=False) @pytest.mark.parametrize("func", ["assert_equal", "assert_allclose"]) def test_assert_allclose_equal_transpose(func) -> None: """Transposed DataArray raises assertion unless check_dim_order=False.""" obj1 = xr.DataArray([[0, 1, 2], [2, 3, 4]], dims=["a", "b"]) obj2 = xr.DataArray([[0, 2], [1, 3], [2, 4]], dims=["b", "a"]) with pytest.raises(AssertionError): getattr(xr.testing, func)(obj1, obj2) getattr(xr.testing, func)(obj1, obj2, check_dim_order=False) ds1 = obj1.to_dataset(name="varname") ds1["var2"] = obj1 ds2 = obj1.to_dataset(name="varname") ds2["var2"] = obj1.transpose() with pytest.raises(AssertionError): getattr(xr.testing, func)(ds1, ds2) getattr(xr.testing, func)(ds1, ds2, check_dim_order=False) def test_assert_equal_transpose_datatree() -> None: """Ensure `check_dim_order=False` works for transposed DataTree""" ds = xr.Dataset(data_vars={"data": (("x", "y"), [[1, 2]])}) a = xr.DataTree.from_dict({"node": ds}) b = xr.DataTree.from_dict({"node": ds.transpose("y", "x")}) with pytest.raises(AssertionError): xr.testing.assert_equal(a, b) xr.testing.assert_equal(a, b, check_dim_order=False) @pytest.mark.filterwarnings("error") @pytest.mark.parametrize( "duckarray", ( pytest.param(np.array, id="numpy"), pytest.param( dask_from_array, id="dask", marks=pytest.mark.skipif(not has_dask, reason="requires dask"), ), pytest.param( quantity, id="pint", marks=pytest.mark.skipif(not has_pint, reason="requires pint"), ), ), ) @pytest.mark.parametrize( ["obj1", "obj2"], ( pytest.param([1e-10, 2], [0.0, 2.0], id="both arrays"), pytest.param([1e-17, 2], 0.0, id="second scalar"), pytest.param(0.0, [1e-17, 2], id="first scalar"), ), ) def test_assert_duckarray_equal_failing(duckarray, obj1, obj2) -> None: # TODO: actually check the repr a = duckarray(obj1) b = duckarray(obj2) with pytest.raises(AssertionError): xr.testing.assert_duckarray_equal(a, b) @pytest.mark.filterwarnings("error") @pytest.mark.parametrize( "duckarray", ( pytest.param( np.array, id="numpy", ), pytest.param( dask_from_array, id="dask", marks=pytest.mark.skipif(not has_dask, reason="requires dask"), ), pytest.param( quantity, id="pint", marks=pytest.mark.skipif(not has_pint, reason="requires pint"), ), ), ) @pytest.mark.parametrize( ["obj1", "obj2"], ( pytest.param([0, 2], [0.0, 2.0], id="both arrays"), pytest.param([0, 0], 0.0, id="second scalar"), pytest.param(0.0, [0, 0], id="first scalar"), ), ) def test_assert_duckarray_equal(duckarray, obj1, obj2) -> None: a = duckarray(obj1) b = duckarray(obj2) xr.testing.assert_duckarray_equal(a, b) @pytest.mark.parametrize( "func", [ "assert_equal", "assert_identical", "assert_allclose", "assert_duckarray_equal", "assert_duckarray_allclose", ], ) def test_ensure_warnings_not_elevated(func) -> None: # make sure warnings are not elevated to errors in the assertion functions # e.g. by @pytest.mark.filterwarnings("error") # see https://github.com/pydata/xarray/pull/4760#issuecomment-774101639 # define a custom Variable class that raises a warning in assert_* class WarningVariable(xr.Variable): @property # type: ignore[misc] def dims(self): warnings.warn("warning in test", stacklevel=2) return super().dims def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None, ) -> np.ndarray: warnings.warn("warning in test", stacklevel=2) return super().__array__(dtype, copy=copy) a = WarningVariable("x", [1]) b = WarningVariable("x", [2]) with warnings.catch_warnings(record=True) as w: # elevate warnings to errors warnings.filterwarnings("error") with pytest.raises(AssertionError): getattr(xr.testing, func)(a, b) assert len(w) > 0 # ensure warnings still raise outside of assert_* with pytest.raises(UserWarning): warnings.warn("test", stacklevel=2) # ensure warnings stay ignored in assert_* with warnings.catch_warnings(record=True) as w: # ignore warnings warnings.filterwarnings("ignore") with pytest.raises(AssertionError): getattr(xr.testing, func)(a, b) assert len(w) == 0 xarray-2025.12.0/xarray/tests/test_backends.py000066400000000000000000011606421511464676000212100ustar00rootroot00000000000000from __future__ import annotations import asyncio import contextlib import gzip import itertools import math import os.path import pickle import platform import re import shutil import sys import tempfile import uuid import warnings from collections import ChainMap from collections.abc import Generator, Iterator, Mapping from contextlib import ExitStack from importlib import import_module from io import BytesIO from pathlib import Path from typing import TYPE_CHECKING, Any, Final, Literal, cast from unittest.mock import patch import numpy as np import pandas as pd import pytest from packaging.version import Version from pandas.errors import OutOfBoundsDatetime import xarray as xr import xarray.testing as xrt from xarray import ( DataArray, Dataset, DataTree, backends, load_dataarray, load_dataset, load_datatree, open_dataarray, open_dataset, open_mfdataset, save_mfdataset, ) from xarray.backends.common import robust_getitem from xarray.backends.h5netcdf_ import H5netcdfBackendEntrypoint from xarray.backends.netcdf3 import _nc3_dtype_coercions from xarray.backends.netCDF4_ import ( NetCDF4BackendEntrypoint, _extract_nc4_variable_encoding, ) from xarray.backends.pydap_ import PydapDataStore from xarray.backends.scipy_ import ScipyBackendEntrypoint from xarray.backends.zarr import ZarrStore from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.coding.cftime_offsets import date_range from xarray.coding.strings import check_vlen_dtype, create_vlen_dtype from xarray.coding.variables import SerializationWarning from xarray.conventions import encode_dataset_coordinates from xarray.core import indexing from xarray.core.common import _contains_cftime_datetimes from xarray.core.indexes import PandasIndex from xarray.core.options import set_options from xarray.core.types import PDDatetimeUnitOptions from xarray.core.utils import module_available from xarray.namedarray.pycompat import array_type from xarray.structure.alignment import AlignmentError from xarray.tests import ( assert_allclose, assert_array_equal, assert_equal, assert_identical, assert_no_warnings, has_dask, has_h5netcdf_1_4_0_or_above, has_netCDF4, has_numpy_2, has_scipy, has_zarr, has_zarr_v3, has_zarr_v3_async_oindex, has_zarr_v3_dtypes, mock, network, parametrize_zarr_format, requires_cftime, requires_dask, requires_fsspec, requires_h5netcdf, requires_h5netcdf_1_4_0_or_above, requires_h5netcdf_1_7_0_or_above, requires_h5netcdf_or_netCDF4, requires_h5netcdf_ros3, requires_iris, requires_netcdf, requires_netCDF4, requires_netCDF4_1_6_2_or_above, requires_netCDF4_1_7_0_or_above, requires_pydap, requires_scipy, requires_scipy_or_netCDF4, requires_zarr, requires_zarr_v3, ) from xarray.tests.test_coding_times import ( _ALL_CALENDARS, _NON_STANDARD_CALENDARS, _STANDARD_CALENDARS, ) from xarray.tests.test_dataset import ( create_append_string_length_mismatch_test_data, create_append_test_data, create_test_data, ) with contextlib.suppress(ImportError): import netCDF4 as nc4 with contextlib.suppress(ImportError): import dask import dask.array as da with contextlib.suppress(ImportError): import fsspec if has_zarr: import zarr import zarr.codecs if has_zarr_v3: from zarr.storage import MemoryStore as KVStore from zarr.storage import WrapperStore ZARR_FORMATS = [2, 3] else: ZARR_FORMATS = [2] try: from zarr import ( # type: ignore[attr-defined,no-redef,unused-ignore] KVStoreV3 as KVStore, ) except ImportError: KVStore = None # type: ignore[assignment,misc,unused-ignore] WrapperStore = object # type: ignore[assignment,misc,unused-ignore] else: KVStore = None # type: ignore[assignment,misc,unused-ignore] WrapperStore = object # type: ignore[assignment,misc,unused-ignore] ZARR_FORMATS = [] @pytest.fixture(scope="module", params=ZARR_FORMATS) def default_zarr_format(request) -> Generator[None, None]: if has_zarr_v3: with zarr.config.set(default_zarr_format=request.param): yield else: yield def skip_if_zarr_format_3(reason: str): if has_zarr_v3 and zarr.config["default_zarr_format"] == 3: pytest.skip(reason=f"Unsupported with zarr_format=3: {reason}") def skip_if_zarr_format_2(reason: str): if not has_zarr_v3 or (zarr.config["default_zarr_format"] == 2): pytest.skip(reason=f"Unsupported with zarr_format=2: {reason}") ON_WINDOWS = sys.platform == "win32" default_value = object() def _check_compression_codec_available(codec: str | None) -> bool: """Check if a compression codec is available in the netCDF4 library. Parameters ---------- codec : str or None The compression codec name (e.g., 'zstd', 'blosc_lz', etc.) Returns ------- bool True if the codec is available, False otherwise. """ if codec is None or codec in ("zlib", "szip"): # These are standard and should be available return True if not has_netCDF4: return False try: import os import netCDF4 # Try to create a file with the compression to test availability with tempfile.NamedTemporaryFile(suffix=".nc", delete=False) as tmp: tmp_path = tmp.name try: nc = netCDF4.Dataset(tmp_path, "w", format="NETCDF4") nc.createDimension("x", 10) # Attempt to create a variable with the compression if codec and codec.startswith("blosc"): nc.createVariable( # type: ignore[call-overload, unused-ignore] varname="test", datatype="f4", dimensions=("x",), compression=codec, blosc_shuffle=1, ) else: nc.createVariable( # type: ignore[call-overload, unused-ignore] varname="test", datatype="f4", dimensions=("x",), compression=codec ) nc.close() os.unlink(tmp_path) return True except (RuntimeError, netCDF4.NetCDF4MissingFeatureException): # Codec not available if os.path.exists(tmp_path): with contextlib.suppress(OSError): os.unlink(tmp_path) return False except Exception: # Any other error, assume codec is not available return False dask_array_type = array_type("dask") if TYPE_CHECKING: from xarray.backends.api import T_NetcdfEngine, T_NetcdfTypes def open_example_dataset(name, *args, **kwargs) -> Dataset: return open_dataset( os.path.join(os.path.dirname(__file__), "data", name), *args, **kwargs ) def open_example_mfdataset(names, *args, **kwargs) -> Dataset: return open_mfdataset( [os.path.join(os.path.dirname(__file__), "data", name) for name in names], *args, **kwargs, ) def create_masked_and_scaled_data(dtype: np.dtype) -> Dataset: x = np.array([np.nan, np.nan, 10, 10.1, 10.2], dtype=dtype) encoding = { "_FillValue": -1, "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), "dtype": "i2", } return Dataset({"x": ("t", x, {}, encoding)}) def create_encoded_masked_and_scaled_data(dtype: np.dtype) -> Dataset: attributes = { "_FillValue": -1, "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } return Dataset( {"x": ("t", np.array([-1, -1, 0, 1, 2], dtype=np.int16), attributes)} ) def create_unsigned_masked_scaled_data(dtype: np.dtype) -> Dataset: encoding = { "_FillValue": -1, "_Unsigned": "true", "dtype": "i1", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } x = np.array([10.0, 10.1, 22.7, 22.8, np.nan], dtype=dtype) return Dataset({"x": ("t", x, {}, encoding)}) def create_encoded_unsigned_masked_scaled_data(dtype: np.dtype) -> Dataset: # These are values as written to the file: the _FillValue will # be represented in the signed form. attributes = { "_FillValue": -1, "_Unsigned": "true", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } # Create unsigned data corresponding to [0, 1, 127, 128, 255] unsigned sb = np.asarray([0, 1, 127, -128, -1], dtype="i1") return Dataset({"x": ("t", sb, attributes)}) def create_bad_unsigned_masked_scaled_data(dtype: np.dtype) -> Dataset: encoding = { "_FillValue": 255, "_Unsigned": True, "dtype": "i1", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } x = np.array([10.0, 10.1, 22.7, 22.8, np.nan], dtype=dtype) return Dataset({"x": ("t", x, {}, encoding)}) def create_bad_encoded_unsigned_masked_scaled_data(dtype: np.dtype) -> Dataset: # These are values as written to the file: the _FillValue will # be represented in the signed form. attributes = { "_FillValue": -1, "_Unsigned": True, "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } # Create signed data corresponding to [0, 1, 127, 128, 255] unsigned sb = np.asarray([0, 1, 127, -128, -1], dtype="i1") return Dataset({"x": ("t", sb, attributes)}) def create_signed_masked_scaled_data(dtype: np.dtype) -> Dataset: encoding = { "_FillValue": -127, "_Unsigned": "false", "dtype": "i1", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } x = np.array([-1.0, 10.1, 22.7, np.nan], dtype=dtype) return Dataset({"x": ("t", x, {}, encoding)}) def create_encoded_signed_masked_scaled_data(dtype: np.dtype) -> Dataset: # These are values as written to the file: the _FillValue will # be represented in the signed form. attributes = { "_FillValue": -127, "_Unsigned": "false", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } # Create signed data corresponding to [0, 1, 127, 128, 255] unsigned sb = np.asarray([-110, 1, 127, -127], dtype="i1") return Dataset({"x": ("t", sb, attributes)}) def create_unsigned_false_masked_scaled_data(dtype: np.dtype) -> Dataset: encoding = { "_FillValue": 255, "_Unsigned": "false", "dtype": "u1", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } x = np.array([-1.0, 10.1, 22.7, np.nan], dtype=dtype) return Dataset({"x": ("t", x, {}, encoding)}) def create_encoded_unsigned_false_masked_scaled_data(dtype: np.dtype) -> Dataset: # These are values as written to the file: the _FillValue will # be represented in the unsigned form. attributes = { "_FillValue": 255, "_Unsigned": "false", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } # Create unsigned data corresponding to [-110, 1, 127, 255] signed sb = np.asarray([146, 1, 127, 255], dtype="u1") return Dataset({"x": ("t", sb, attributes)}) def create_boolean_data() -> Dataset: attributes = {"units": "-"} return Dataset( { "x": ( ("t", "x"), [[False, True, False, True], [True, False, False, True]], attributes, ) } ) class TestCommon: def test_robust_getitem(self) -> None: class UnreliableArrayFailure(Exception): pass class UnreliableArray: def __init__(self, array, failures=1): self.array = array self.failures = failures def __getitem__(self, key): if self.failures > 0: self.failures -= 1 raise UnreliableArrayFailure return self.array[key] array = UnreliableArray([0]) with pytest.raises(UnreliableArrayFailure): array[0] assert array[0] == 0 actual = robust_getitem(array, 0, catch=UnreliableArrayFailure, initial_delay=0) assert actual == 0 class NetCDF3Only: netcdf3_formats: tuple[T_NetcdfTypes, ...] = ("NETCDF3_CLASSIC", "NETCDF3_64BIT") @pytest.mark.asyncio @pytest.mark.skip(reason="NetCDF backends don't support async loading") async def test_load_async(self) -> None: pass @requires_scipy def test_dtype_coercion_error(self) -> None: """Failing dtype coercion should lead to an error""" for dtype, format in itertools.product( _nc3_dtype_coercions, self.netcdf3_formats ): if dtype == "bool": # coerced upcast (bool to int8) ==> can never fail continue # Using the largest representable value, create some data that will # no longer compare equal after the coerced downcast maxval = np.iinfo(dtype).max x = np.array([0, 1, 2, maxval], dtype=dtype) ds = Dataset({"x": ("t", x, {})}) with create_tmp_file(allow_cleanup_failure=False) as path: with pytest.raises(ValueError, match="could not safely cast"): ds.to_netcdf(path, format=format) class DatasetIOBase: engine: T_NetcdfEngine | None = None file_format: T_NetcdfTypes | None = None def create_store(self): raise NotImplementedError() @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path: self.save(data, path, **save_kwargs) with self.open(path, **open_kwargs) as ds: yield ds @contextlib.contextmanager def roundtrip_append( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path: for i, key in enumerate(data.variables): mode = "a" if i > 0 else "w" self.save(data[[key]], path, mode=mode, **save_kwargs) with self.open(path, **open_kwargs) as ds: yield ds # The save/open methods may be overwritten below def save(self, dataset, path, **kwargs): return dataset.to_netcdf( path, engine=self.engine, format=self.file_format, **kwargs ) @contextlib.contextmanager def open(self, path, **kwargs): with open_dataset(path, engine=self.engine, **kwargs) as ds: yield ds def test_zero_dimensional_variable(self) -> None: expected = create_test_data() expected["float_var"] = ([], 1.0e9, {"units": "units of awesome"}) expected["bytes_var"] = ([], b"foobar") expected["string_var"] = ([], "foobar") with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_write_store(self) -> None: expected = create_test_data() with self.create_store() as store: expected.dump_to_store(store) # we need to cf decode the store because it has time and # non-dimension coordinates with xr.decode_cf(store) as actual: assert_allclose(expected, actual) def check_dtypes_roundtripped(self, expected, actual): for k in expected.variables: expected_dtype = expected.variables[k].dtype # For NetCDF3, the backend should perform dtype coercion if ( isinstance(self, NetCDF3Only) and str(expected_dtype) in _nc3_dtype_coercions ): expected_dtype = np.dtype(_nc3_dtype_coercions[str(expected_dtype)]) actual_dtype = actual.variables[k].dtype # TODO: check expected behavior for string dtypes more carefully string_kinds = {"O", "S", "U"} assert expected_dtype == actual_dtype or ( expected_dtype.kind in string_kinds and actual_dtype.kind in string_kinds ) def test_roundtrip_test_data(self) -> None: expected = create_test_data() with self.roundtrip(expected) as actual: self.check_dtypes_roundtripped(expected, actual) assert_identical(expected, actual) def test_load(self) -> None: # Note: please keep this in sync with test_load_async below as much as possible! expected = create_test_data() @contextlib.contextmanager def assert_loads(vars=None): if vars is None: vars = expected with self.roundtrip(expected) as actual: for k, v in actual.variables.items(): # IndexVariables are eagerly loaded into memory assert v._in_memory == (k in actual.dims) yield actual for k, v in actual.variables.items(): if k in vars: assert v._in_memory assert_identical(expected, actual) with pytest.raises(AssertionError): # make sure the contextmanager works! with assert_loads() as ds: pass with assert_loads() as ds: ds.load() with assert_loads(["var1", "dim1", "dim2"]) as ds: ds["var1"].load() # verify we can read data even after closing the file with self.roundtrip(expected) as ds: actual = ds.load() assert_identical(expected, actual) @pytest.mark.asyncio async def test_load_async(self) -> None: # Note: please keep this in sync with test_load above as much as possible! # Copied from `test_load` on the base test class, but won't work for netcdf expected = create_test_data() @contextlib.contextmanager def assert_loads(vars=None): if vars is None: vars = expected with self.roundtrip(expected) as actual: for k, v in actual.variables.items(): # IndexVariables are eagerly loaded into memory assert v._in_memory == (k in actual.dims) yield actual for k, v in actual.variables.items(): if k in vars: assert v._in_memory assert_identical(expected, actual) with pytest.raises(AssertionError): # make sure the contextmanager works! with assert_loads() as ds: pass with assert_loads() as ds: await ds.load_async() with assert_loads(["var1", "dim1", "dim2"]) as ds: await ds["var1"].load_async() # verify we can read data even after closing the file with self.roundtrip(expected) as ds: actual = await ds.load_async() assert_identical(expected, actual) def test_dataset_compute(self) -> None: expected = create_test_data() with self.roundtrip(expected) as actual: # Test Dataset.compute() for k, v in actual.variables.items(): # IndexVariables are eagerly cached assert v._in_memory == (k in actual.dims) computed = actual.compute() for k, v in actual.variables.items(): assert v._in_memory == (k in actual.dims) for v in computed.variables.values(): assert v._in_memory assert_identical(expected, actual) assert_identical(expected, computed) def test_pickle(self) -> None: expected = Dataset({"foo": ("x", [42])}) with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped: # Windows doesn't like reopening an already open file raw_pickle = pickle.dumps(roundtripped) with pickle.loads(raw_pickle) as unpickled_ds: assert_identical(expected, unpickled_ds) def test_pickle_dataarray(self) -> None: expected = Dataset({"foo": ("x", [42])}) with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped: raw_pickle = pickle.dumps(roundtripped["foo"]) with pickle.loads(raw_pickle) as unpickled: assert_identical(expected["foo"], unpickled) def test_dataset_caching(self) -> None: expected = Dataset({"foo": ("x", [5, 6, 7])}) with self.roundtrip(expected) as actual: assert isinstance(actual.foo.variable._data, indexing.MemoryCachedArray) assert not actual.foo.variable._in_memory _ = actual.foo.values # cache assert actual.foo.variable._in_memory with self.roundtrip(expected, open_kwargs={"cache": False}) as actual: assert isinstance(actual.foo.variable._data, indexing.CopyOnWriteArray) assert not actual.foo.variable._in_memory _ = actual.foo.values # no caching assert not actual.foo.variable._in_memory def test_roundtrip_None_variable(self) -> None: expected = Dataset({None: (("x", "y"), [[0, 1], [2, 3]])}) with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_roundtrip_object_dtype(self) -> None: floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object) floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object) bytes_ = np.array([b"ab", b"cdef", b"g"], dtype=object) bytes_nans = np.array([b"ab", b"cdef", np.nan], dtype=object) strings = np.array(["ab", "cdef", "g"], dtype=object) strings_nans = np.array(["ab", "cdef", np.nan], dtype=object) all_nans = np.array([np.nan, np.nan], dtype=object) original = Dataset( { "floats": ("a", floats), "floats_nans": ("a", floats_nans), "bytes": ("b", bytes_), "bytes_nans": ("b", bytes_nans), "strings": ("b", strings), "strings_nans": ("b", strings_nans), "all_nans": ("c", all_nans), "nan": ([], np.nan), } ) expected = original.copy(deep=True) with self.roundtrip(original) as actual: try: assert_identical(expected, actual) except AssertionError: # Most stores use '' for nans in strings, but some don't. # First try the ideal case (where the store returns exactly) # the original Dataset), then try a more realistic case. # This currently includes all netCDF files when encoding is not # explicitly set. # https://github.com/pydata/xarray/issues/1647 # Also Zarr expected["bytes_nans"][-1] = b"" expected["strings_nans"][-1] = "" assert_identical(expected, actual) def test_roundtrip_string_data(self) -> None: expected = Dataset({"x": ("t", ["ab", "cdef"])}) with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_roundtrip_string_encoded_characters(self) -> None: expected = Dataset({"x": ("t", ["ab", "cdef"])}) expected["x"].encoding["dtype"] = "S1" with self.roundtrip(expected) as actual: assert_identical(expected, actual) assert actual["x"].encoding["_Encoding"] == "utf-8" expected["x"].encoding["_Encoding"] = "ascii" with self.roundtrip(expected) as actual: assert_identical(expected, actual) assert actual["x"].encoding["_Encoding"] == "ascii" def test_roundtrip_numpy_datetime_data(self) -> None: times = pd.to_datetime(["2000-01-01", "2000-01-02", "NaT"], unit="ns") expected = Dataset({"t": ("t", times), "t0": times[0]}) kwargs = {"encoding": {"t0": {"units": "days since 1950-01-01"}}} with self.roundtrip(expected, save_kwargs=kwargs) as actual: assert_identical(expected, actual) assert actual.t0.encoding["units"] == "days since 1950-01-01" @requires_cftime def test_roundtrip_cftime_datetime_data(self) -> None: from xarray.tests.test_coding_times import _all_cftime_date_types date_types = _all_cftime_date_types() for date_type in date_types.values(): times = [date_type(1, 1, 1), date_type(1, 1, 2)] expected = Dataset({"t": ("t", times), "t0": times[0]}) kwargs = {"encoding": {"t0": {"units": "days since 0001-01-01"}}} expected_decoded_t = np.array(times) expected_decoded_t0 = np.array([date_type(1, 1, 1)]) expected_calendar = times[0].calendar with warnings.catch_warnings(): if expected_calendar in {"proleptic_gregorian", "standard"}: warnings.filterwarnings("ignore", "Unable to decode time axis") with self.roundtrip(expected, save_kwargs=kwargs) as actual: # proleptic gregorian will be decoded into numpy datetime64 # fixing to expectations if actual.t.dtype.kind == "M": dtype = actual.t.dtype expected_decoded_t = expected_decoded_t.astype(dtype) expected_decoded_t0 = expected_decoded_t0.astype(dtype) assert_array_equal(actual.t.values, expected_decoded_t) assert ( actual.t.encoding["units"] == "days since 0001-01-01 00:00:00.000000" ) assert actual.t.encoding["calendar"] == expected_calendar assert_array_equal(actual.t0.values, expected_decoded_t0) assert actual.t0.encoding["units"] == "days since 0001-01-01" assert actual.t.encoding["calendar"] == expected_calendar def test_roundtrip_timedelta_data(self) -> None: # todo: suggestion from review: # roundtrip large microsecond or coarser resolution timedeltas, # though we cannot test that until we fix the timedelta decoding # to support large ranges time_deltas = pd.to_timedelta(["1h", "2h", "NaT"]).as_unit("s") # type: ignore[arg-type, unused-ignore] encoding = {"units": "seconds"} expected = Dataset({"td": ("td", time_deltas), "td0": time_deltas[0]}) expected["td"].encoding = encoding expected["td0"].encoding = encoding with self.roundtrip( expected, open_kwargs={"decode_timedelta": CFTimedeltaCoder(time_unit="ns")} ) as actual: assert_identical(expected, actual) def test_roundtrip_timedelta_data_via_dtype( self, time_unit: PDDatetimeUnitOptions ) -> None: time_deltas = pd.to_timedelta(["1h", "2h", "NaT"]).as_unit(time_unit) # type: ignore[arg-type, unused-ignore] expected = Dataset( {"td": ("td", time_deltas), "td0": time_deltas[0].to_numpy()} ) with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_roundtrip_float64_data(self) -> None: expected = Dataset({"x": ("y", np.array([1.0, 2.0, np.pi], dtype="float64"))}) with self.roundtrip(expected) as actual: assert_identical(expected, actual) @requires_netcdf def test_roundtrip_example_1_netcdf(self) -> None: with open_example_dataset("example_1.nc") as expected: with self.roundtrip(expected) as actual: # we allow the attributes to differ since that # will depend on the encoding used. For example, # without CF encoding 'actual' will end up with # a dtype attribute. assert_equal(expected, actual) def test_roundtrip_coordinates(self) -> None: original = Dataset( {"foo": ("x", [0, 1])}, {"x": [2, 3], "y": ("a", [42]), "z": ("x", [4, 5])} ) with self.roundtrip(original) as actual: assert_identical(original, actual) original["foo"].encoding["coordinates"] = "y" with self.roundtrip(original, open_kwargs={"decode_coords": False}) as expected: # check roundtripping when decode_coords=False with self.roundtrip( expected, open_kwargs={"decode_coords": False} ) as actual: assert_identical(expected, actual) def test_roundtrip_global_coordinates(self) -> None: original = Dataset( {"foo": ("x", [0, 1])}, {"x": [2, 3], "y": ("a", [42]), "z": ("x", [4, 5])} ) with self.roundtrip(original) as actual: assert_identical(original, actual) # test that global "coordinates" is as expected _, attrs = encode_dataset_coordinates(original) assert attrs["coordinates"] == "y" # test warning when global "coordinates" is already set original.attrs["coordinates"] = "foo" with pytest.warns(SerializationWarning): _, attrs = encode_dataset_coordinates(original) assert attrs["coordinates"] == "foo" def test_roundtrip_coordinates_with_space(self) -> None: original = Dataset(coords={"x": 0, "y z": 1}) expected = Dataset({"y z": 1}, {"x": 0}) with pytest.warns(SerializationWarning): with self.roundtrip(original) as actual: assert_identical(expected, actual) def test_roundtrip_boolean_dtype(self) -> None: original = create_boolean_data() assert original["x"].dtype == "bool" with self.roundtrip(original) as actual: assert_identical(original, actual) assert actual["x"].dtype == "bool" # this checks for preserving dtype during second roundtrip # see https://github.com/pydata/xarray/issues/7652#issuecomment-1476956975 with self.roundtrip(actual) as actual2: assert_identical(original, actual2) assert actual2["x"].dtype == "bool" with self.roundtrip(actual) as actual3: # GH10536 assert_identical(original.transpose(), actual3.transpose()) def test_orthogonal_indexing(self) -> None: in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: indexers = {"dim1": [1, 2, 0], "dim2": [3, 2, 0, 3], "dim3": np.arange(5)} expected = in_memory.isel(indexers) actual = on_disk.isel(**indexers) # make sure the array is not yet loaded into memory assert not actual["var1"].variable._in_memory assert_identical(expected, actual) # do it twice, to make sure we're switched from orthogonal -> numpy # when we cached the values actual = on_disk.isel(**indexers) assert_identical(expected, actual) def test_vectorized_indexing(self) -> None: in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: indexers = { "dim1": DataArray([0, 2, 0], dims="a"), "dim2": DataArray([0, 2, 3], dims="a"), } expected = in_memory.isel(indexers) actual = on_disk.isel(**indexers) # make sure the array is not yet loaded into memory assert not actual["var1"].variable._in_memory assert_identical(expected, actual.load()) # do it twice, to make sure we're switched from # vectorized -> numpy when we cached the values actual = on_disk.isel(**indexers) assert_identical(expected, actual) def multiple_indexing(indexers): # make sure a sequence of lazy indexings certainly works. with self.roundtrip(in_memory) as on_disk: actual = on_disk["var3"] expected = in_memory["var3"] for ind in indexers: actual = actual.isel(ind) expected = expected.isel(ind) # make sure the array is not yet loaded into memory assert not actual.variable._in_memory assert_identical(expected, actual.load()) # two-staged vectorized-indexing indexers2 = [ { "dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]), "dim3": DataArray([[0, 4], [1, 3], [2, 2]], dims=["a", "b"]), }, {"a": DataArray([0, 1], dims=["c"]), "b": DataArray([0, 1], dims=["c"])}, ] multiple_indexing(indexers2) # vectorized-slice mixed indexers3 = [ { "dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]), "dim3": slice(None, 10), } ] multiple_indexing(indexers3) # vectorized-integer mixed indexers4 = [ {"dim3": 0}, {"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"])}, {"a": slice(None, None, 2)}, ] multiple_indexing(indexers4) # vectorized-integer mixed indexers5 = [ {"dim3": 0}, {"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"])}, {"a": 1, "b": 0}, ] multiple_indexing(indexers5) def test_vectorized_indexing_negative_step(self) -> None: # use dask explicitly when present open_kwargs: dict[str, Any] | None if has_dask: open_kwargs = {"chunks": {}} else: open_kwargs = None in_memory = create_test_data() def multiple_indexing(indexers): # make sure a sequence of lazy indexings certainly works. with self.roundtrip(in_memory, open_kwargs=open_kwargs) as on_disk: actual = on_disk["var3"] expected = in_memory["var3"] for ind in indexers: actual = actual.isel(ind) expected = expected.isel(ind) # make sure the array is not yet loaded into memory assert not actual.variable._in_memory assert_identical(expected, actual.load()) # with negative step slice. indexers = [ { "dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]), "dim3": slice(-1, 1, -1), } ] multiple_indexing(indexers) # with negative step slice. indexers = [ { "dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]), "dim3": slice(-1, 1, -2), } ] multiple_indexing(indexers) def test_outer_indexing_reversed(self) -> None: # regression test for GH6560 ds = xr.Dataset( {"z": (("t", "p", "y", "x"), np.ones((1, 1, 31, 40)))}, ) with self.roundtrip(ds) as on_disk: subset = on_disk.isel(t=[0], p=0).z[:, ::10, ::10][:, ::-1, :] assert subset.sizes == subset.load().sizes def test_isel_dataarray(self) -> None: # Make sure isel works lazily. GH:issue:1688 in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: expected = in_memory.isel(dim2=in_memory["dim2"] < 3) actual = on_disk.isel(dim2=on_disk["dim2"] < 3) assert_identical(expected, actual) def test_empty_isel(self) -> None: # Make sure isel works lazily with empty indexer. # GH:issue:10867 in_memory = xr.Dataset({"a": ("x", np.arange(4))}, coords={"x": np.arange(4)}) with self.roundtrip(in_memory) as on_disk: expected = in_memory.isel(x=[]) actual = on_disk.isel(x=[]) assert_identical(expected, actual) def validate_array_type(self, ds): # Make sure that only NumpyIndexingAdapter stores a bare np.ndarray. def find_and_validate_array(obj): # recursively called function. obj: array or array wrapper. if hasattr(obj, "array"): if isinstance(obj.array, indexing.ExplicitlyIndexed): find_and_validate_array(obj.array) elif isinstance(obj.array, np.ndarray): assert isinstance(obj, indexing.NumpyIndexingAdapter) elif isinstance(obj.array, dask_array_type): assert isinstance(obj, indexing.DaskIndexingAdapter) elif isinstance(obj.array, pd.Index): assert isinstance(obj, indexing.PandasIndexingAdapter) else: raise TypeError(f"{type(obj.array)} is wrapped by {type(obj)}") for v in ds.variables.values(): find_and_validate_array(v._data) def test_array_type_after_indexing(self) -> None: in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: self.validate_array_type(on_disk) indexers = {"dim1": [1, 2, 0], "dim2": [3, 2, 0, 3], "dim3": np.arange(5)} expected = in_memory.isel(indexers) actual = on_disk.isel(**indexers) assert_identical(expected, actual) self.validate_array_type(actual) # do it twice, to make sure we're switched from orthogonal -> numpy # when we cached the values actual = on_disk.isel(**indexers) assert_identical(expected, actual) self.validate_array_type(actual) def test_dropna(self) -> None: # regression test for GH:issue:1694 a = np.random.randn(4, 3) a[1, 1] = np.nan in_memory = xr.Dataset( {"a": (("y", "x"), a)}, coords={"y": np.arange(4), "x": np.arange(3)} ) assert_identical( in_memory.dropna(dim="x"), in_memory.isel(x=slice(None, None, 2)) ) with self.roundtrip(in_memory) as on_disk: self.validate_array_type(on_disk) expected = in_memory.dropna(dim="x") actual = on_disk.dropna(dim="x") assert_identical(expected, actual) def test_ondisk_after_print(self) -> None: """Make sure print does not load file into memory""" in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: repr(on_disk) assert not on_disk["var1"]._in_memory class CFEncodedBase(DatasetIOBase): def test_roundtrip_bytes_with_fill_value(self) -> None: values = np.array([b"ab", b"cdef", np.nan], dtype=object) encoding = {"_FillValue": b"X", "dtype": "S1"} original = Dataset({"x": ("t", values, {}, encoding)}) expected = original.copy(deep=True) with self.roundtrip(original) as actual: assert_identical(expected, actual) original = Dataset({"x": ("t", values, {}, {"_FillValue": b""})}) with self.roundtrip(original) as actual: assert_identical(expected, actual) def test_roundtrip_string_with_fill_value_nchar(self) -> None: values = np.array(["ab", "cdef", np.nan], dtype=object) expected = Dataset({"x": ("t", values)}) encoding = {"dtype": "S1", "_FillValue": b"X"} original = Dataset({"x": ("t", values, {}, encoding)}) # Not supported yet. with pytest.raises(NotImplementedError): with self.roundtrip(original) as actual: assert_identical(expected, actual) def test_roundtrip_empty_vlen_string_array(self) -> None: # checks preserving vlen dtype for empty arrays GH7862 dtype = create_vlen_dtype(str) original = Dataset({"a": np.array([], dtype=dtype)}) assert check_vlen_dtype(original["a"].dtype) is str with self.roundtrip(original) as actual: assert_identical(original, actual) if np.issubdtype(actual["a"].dtype, object): # only check metadata for capable backends # eg. NETCDF3 based backends do not roundtrip metadata if actual["a"].dtype.metadata is not None: assert check_vlen_dtype(actual["a"].dtype) is str else: # zarr v3 sends back " None: if hasattr(self, "zarr_version") and dtype == np.float32: pytest.skip("float32 will be treated as float64 in zarr") decoded = decoded_fn(dtype) encoded = encoded_fn(dtype) if decoded["x"].encoding["dtype"] == "u1" and not ( (self.engine == "netcdf4" and self.file_format is None) or self.file_format == "NETCDF4" ): pytest.skip("uint8 data can't be written to non-NetCDF4 data") with self.roundtrip(decoded) as actual: for k in decoded.variables: assert decoded.variables[k].dtype == actual.variables[k].dtype # CF _FillValue is always on-disk type assert ( decoded.variables[k].encoding["_FillValue"] == actual.variables[k].encoding["_FillValue"] ) assert_allclose(decoded, actual, decode_bytes=False) with self.roundtrip(decoded, open_kwargs=dict(decode_cf=False)) as actual: # TODO: this assumes that all roundtrips will first # encode. Is that something we want to test for? for k in encoded.variables: assert encoded.variables[k].dtype == actual.variables[k].dtype # CF _FillValue is always on-disk type assert ( decoded.variables[k].encoding["_FillValue"] == actual.variables[k].attrs["_FillValue"] ) assert_allclose(encoded, actual, decode_bytes=False) with self.roundtrip(encoded, open_kwargs=dict(decode_cf=False)) as actual: for k in encoded.variables: assert encoded.variables[k].dtype == actual.variables[k].dtype # CF _FillValue is always on-disk type assert ( encoded.variables[k].attrs["_FillValue"] == actual.variables[k].attrs["_FillValue"] ) assert_allclose(encoded, actual, decode_bytes=False) # make sure roundtrip encoding didn't change the # original dataset. assert_allclose(encoded, encoded_fn(dtype), decode_bytes=False) with self.roundtrip(encoded) as actual: for k in decoded.variables: assert decoded.variables[k].dtype == actual.variables[k].dtype assert_allclose(decoded, actual, decode_bytes=False) @pytest.mark.parametrize( ("fill_value", "exp_fill_warning"), [ (np.int8(-1), False), (np.uint8(255), True), (-1, False), (255, True), ], ) def test_roundtrip_unsigned(self, fill_value, exp_fill_warning): @contextlib.contextmanager def _roundtrip_with_warnings(*args, **kwargs): is_np2 = module_available("numpy", minversion="2.0.0.dev0") if exp_fill_warning and is_np2: warn_checker: contextlib.AbstractContextManager = pytest.warns( SerializationWarning, match="_FillValue attribute can't be represented", ) else: warn_checker = contextlib.nullcontext() with warn_checker: with self.roundtrip(*args, **kwargs) as actual: yield actual # regression/numpy2 test for encoding = { "_FillValue": fill_value, "_Unsigned": "true", "dtype": "i1", } x = np.array([0, 1, 127, 128, 254, np.nan], dtype=np.float32) decoded = Dataset({"x": ("t", x, {}, encoding)}) attributes = { "_FillValue": fill_value, "_Unsigned": "true", } # Create unsigned data corresponding to [0, 1, 127, 128, 255] unsigned sb = np.asarray([0, 1, 127, -128, -2, -1], dtype="i1") encoded = Dataset({"x": ("t", sb, attributes)}) unsigned_dtype = np.dtype(f"u{sb.dtype.itemsize}") with _roundtrip_with_warnings(decoded) as actual: for k in decoded.variables: assert decoded.variables[k].dtype == actual.variables[k].dtype exp_fv = decoded.variables[k].encoding["_FillValue"] if exp_fill_warning: exp_fv = np.array(exp_fv, dtype=unsigned_dtype).view(sb.dtype) assert exp_fv == actual.variables[k].encoding["_FillValue"] assert_allclose(decoded, actual, decode_bytes=False) with _roundtrip_with_warnings( decoded, open_kwargs=dict(decode_cf=False) ) as actual: for k in encoded.variables: assert encoded.variables[k].dtype == actual.variables[k].dtype exp_fv = encoded.variables[k].attrs["_FillValue"] if exp_fill_warning: exp_fv = np.array(exp_fv, dtype=unsigned_dtype).view(sb.dtype) assert exp_fv == actual.variables[k].attrs["_FillValue"] assert_allclose(encoded, actual, decode_bytes=False) @staticmethod def _create_cf_dataset(): original = Dataset( dict( variable=( ("ln_p", "latitude", "longitude"), np.arange(8, dtype="f4").reshape(2, 2, 2), {"ancillary_variables": "std_devs det_lim"}, ), std_devs=( ("ln_p", "latitude", "longitude"), np.arange(0.1, 0.9, 0.1).reshape(2, 2, 2), {"standard_name": "standard_error"}, ), det_lim=( (), 0.1, {"standard_name": "detection_minimum"}, ), ), dict( latitude=("latitude", [0, 1], {"units": "degrees_north"}), longitude=("longitude", [0, 1], {"units": "degrees_east"}), latlon=((), -1, {"grid_mapping_name": "latitude_longitude"}), latitude_bnds=(("latitude", "bnds2"), [[0, 1], [1, 2]]), longitude_bnds=(("longitude", "bnds2"), [[0, 1], [1, 2]]), areas=( ("latitude", "longitude"), [[1, 1], [1, 1]], {"units": "degree^2"}, ), ln_p=( "ln_p", [1.0, 0.5], { "standard_name": "atmosphere_ln_pressure_coordinate", "computed_standard_name": "air_pressure", }, ), P0=((), 1013.25, {"units": "hPa"}), ), ) original["variable"].encoding.update( {"cell_measures": "area: areas", "grid_mapping": "latlon"}, ) original.coords["latitude"].encoding.update( dict(grid_mapping="latlon", bounds="latitude_bnds") ) original.coords["longitude"].encoding.update( dict(grid_mapping="latlon", bounds="longitude_bnds") ) original.coords["ln_p"].encoding.update({"formula_terms": "p0: P0 lev : ln_p"}) return original def test_grid_mapping_and_bounds_are_not_coordinates_in_file(self) -> None: original = self._create_cf_dataset() with self.roundtrip(original, open_kwargs={"decode_coords": False}) as ds: assert ds.coords["latitude"].attrs["bounds"] == "latitude_bnds" assert ds.coords["longitude"].attrs["bounds"] == "longitude_bnds" assert "coordinates" not in ds["variable"].attrs assert "coordinates" not in ds.attrs def test_coordinate_variables_after_dataset_roundtrip(self) -> None: original = self._create_cf_dataset() with self.roundtrip(original, open_kwargs={"decode_coords": "all"}) as actual: assert_identical(actual, original) with self.roundtrip(original) as actual: expected = original.reset_coords( ["latitude_bnds", "longitude_bnds", "areas", "P0", "latlon"] ) # equal checks that coords and data_vars are equal which # should be enough # identical would require resetting a number of attributes # skip that. assert_equal(actual, expected) def test_grid_mapping_and_bounds_are_coordinates_after_dataarray_roundtrip( self, ) -> None: original = self._create_cf_dataset() # The DataArray roundtrip should have the same warnings as the # Dataset, but we already tested for those, so just go for the # new warnings. It would appear that there is no way to tell # pytest "This warning and also this warning should both be # present". # xarray/tests/test_conventions.py::TestCFEncodedDataStore # needs the to_dataset. The other backends should be fine # without it. with pytest.warns( UserWarning, match=( r"Variable\(s\) referenced in bounds not in variables: " r"\['l(at|ong)itude_bnds'\]" ), ): with self.roundtrip( original["variable"].to_dataset(), open_kwargs={"decode_coords": "all"} ) as actual: assert_identical(actual, original["variable"].to_dataset()) @requires_iris @requires_netcdf def test_coordinate_variables_after_iris_roundtrip(self) -> None: original = self._create_cf_dataset() iris_cube = original["variable"].to_iris() actual = DataArray.from_iris(iris_cube) # Bounds will be missing (xfail) del original.coords["latitude_bnds"], original.coords["longitude_bnds"] # Ancillary vars will be missing # Those are data_vars, and will be dropped when grabbing the variable assert_identical(actual, original["variable"]) def test_coordinates_encoding(self) -> None: def equals_latlon(obj): return obj in {"lat lon", "lon lat"} original = Dataset( {"temp": ("x", [0, 1]), "precip": ("x", [0, -1])}, {"lat": ("x", [2, 3]), "lon": ("x", [4, 5])}, ) with self.roundtrip(original) as actual: assert_identical(actual, original) with self.roundtrip(original, open_kwargs=dict(decode_coords=False)) as ds: assert equals_latlon(ds["temp"].attrs["coordinates"]) assert equals_latlon(ds["precip"].attrs["coordinates"]) assert "coordinates" not in ds.attrs assert "coordinates" not in ds["lat"].attrs assert "coordinates" not in ds["lon"].attrs modified = original.drop_vars(["temp", "precip"]) with self.roundtrip(modified) as actual: assert_identical(actual, modified) with self.roundtrip(modified, open_kwargs=dict(decode_coords=False)) as ds: assert equals_latlon(ds.attrs["coordinates"]) assert "coordinates" not in ds["lat"].attrs assert "coordinates" not in ds["lon"].attrs original["temp"].encoding["coordinates"] = "lat" with self.roundtrip(original) as actual: assert_identical(actual, original) original["precip"].encoding["coordinates"] = "lat" with self.roundtrip(original, open_kwargs=dict(decode_coords=True)) as ds: assert "lon" not in ds["temp"].encoding["coordinates"] assert "lon" not in ds["precip"].encoding["coordinates"] assert "coordinates" not in ds["lat"].encoding assert "coordinates" not in ds["lon"].encoding def test_roundtrip_endian(self) -> None: skip_if_zarr_format_3("zarr v3 has not implemented endian support yet") ds = Dataset( { "x": np.arange(3, 10, dtype=">i2"), "y": np.arange(3, 20, dtype=" None: te = (TypeError, "string or None") ve = (ValueError, "string must be length 1 or") data = np.random.random((2, 2)) da = xr.DataArray(data) for name, (error, msg) in zip( [0, (4, 5), True, ""], [te, te, te, ve], strict=True ): ds = Dataset({name: da}) with pytest.raises(error) as excinfo: with self.roundtrip(ds): pass excinfo.match(msg) excinfo.match(repr(name)) def test_encoding_kwarg(self) -> None: ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs: dict[str, Any] = dict(encoding={"x": {"dtype": "f4"}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: encoded_dtype = actual.x.encoding["dtype"] # On OS X, dtype sometimes switches endianness for unclear reasons assert encoded_dtype.kind == "f" and encoded_dtype.itemsize == 4 assert ds.x.encoding == {} kwargs = dict(encoding={"x": {"foo": "bar"}}) with pytest.raises(ValueError, match=r"unexpected encoding"): with self.roundtrip(ds, save_kwargs=kwargs) as actual: pass kwargs = dict(encoding={"x": "foo"}) with pytest.raises(ValueError, match=r"must be castable"): with self.roundtrip(ds, save_kwargs=kwargs) as actual: pass kwargs = dict(encoding={"invalid": {}}) with pytest.raises(KeyError): with self.roundtrip(ds, save_kwargs=kwargs) as actual: pass def test_encoding_unlimited_dims(self) -> None: if isinstance(self, ZarrBase): pytest.skip("No unlimited_dims handled in zarr.") ds = Dataset({"x": ("y", np.arange(10.0))}) with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # Regression test for https://github.com/pydata/xarray/issues/2134 with self.roundtrip(ds, save_kwargs=dict(unlimited_dims="y")) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) ds.encoding = {"unlimited_dims": ["y"]} with self.roundtrip(ds) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # Regression test for https://github.com/pydata/xarray/issues/2134 ds.encoding = {"unlimited_dims": "y"} with self.roundtrip(ds) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # test unlimited_dims validation # https://github.com/pydata/xarray/issues/10549 ds.encoding = {"unlimited_dims": "z"} with pytest.warns( UserWarning, match=r"Unlimited dimension\(s\) .* declared in 'dataset.encoding'", ): with self.roundtrip(ds) as _: pass ds.encoding = {} with pytest.raises( ValueError, match=r"Unlimited dimension\(s\) .* declared in 'unlimited_dims-kwarg'", ): with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["z"])) as _: pass def test_encoding_kwarg_dates(self) -> None: ds = Dataset({"t": pd.date_range("2000-01-01", periods=3)}) units = "days since 1900-01-01" kwargs = dict(encoding={"t": {"units": units}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual.t.encoding["units"] == units assert_identical(actual, ds) def test_encoding_kwarg_fixed_width_string(self) -> None: # regression test for GH2149 for strings in [[b"foo", b"bar", b"baz"], ["foo", "bar", "baz"]]: ds = Dataset({"x": strings}) kwargs = dict(encoding={"x": {"dtype": "S1"}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual["x"].encoding["dtype"] == "S1" assert_identical(actual, ds) def test_default_fill_value(self) -> None: # Test default encoding for float: ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs = dict(encoding={"x": {"dtype": "f4"}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert math.isnan(actual.x.encoding["_FillValue"]) assert ds.x.encoding == {} # Test default encoding for int: ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs = dict(encoding={"x": {"dtype": "int16"}}) with warnings.catch_warnings(): warnings.filterwarnings("ignore", ".*floating point data as an integer") with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert "_FillValue" not in actual.x.encoding assert ds.x.encoding == {} # Test default encoding for implicit int: ds = Dataset({"x": ("y", np.arange(10, dtype="int16"))}) with self.roundtrip(ds) as actual: assert "_FillValue" not in actual.x.encoding assert ds.x.encoding == {} def test_explicitly_omit_fill_value(self) -> None: ds = Dataset({"x": ("y", [np.pi, -np.pi])}) ds.x.encoding["_FillValue"] = None with self.roundtrip(ds) as actual: assert "_FillValue" not in actual.x.encoding def test_explicitly_omit_fill_value_via_encoding_kwarg(self) -> None: ds = Dataset({"x": ("y", [np.pi, -np.pi])}) kwargs = dict(encoding={"x": {"_FillValue": None}}) # _FillValue is not a valid encoding for Zarr with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert "_FillValue" not in actual.x.encoding assert ds.y.encoding == {} def test_explicitly_omit_fill_value_in_coord(self) -> None: ds = Dataset({"x": ("y", [np.pi, -np.pi])}, coords={"y": [0.0, 1.0]}) ds.y.encoding["_FillValue"] = None with self.roundtrip(ds) as actual: assert "_FillValue" not in actual.y.encoding def test_explicitly_omit_fill_value_in_coord_via_encoding_kwarg(self) -> None: ds = Dataset({"x": ("y", [np.pi, -np.pi])}, coords={"y": [0.0, 1.0]}) kwargs = dict(encoding={"y": {"_FillValue": None}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert "_FillValue" not in actual.y.encoding assert ds.y.encoding == {} def test_encoding_same_dtype(self) -> None: ds = Dataset({"x": ("y", np.arange(10.0, dtype="f4"))}) kwargs = dict(encoding={"x": {"dtype": "f4"}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: encoded_dtype = actual.x.encoding["dtype"] # On OS X, dtype sometimes switches endianness for unclear reasons assert encoded_dtype.kind == "f" and encoded_dtype.itemsize == 4 assert ds.x.encoding == {} def test_append_write(self) -> None: # regression for GH1215 data = create_test_data() with self.roundtrip_append(data) as actual: assert_identical(data, actual) def test_append_overwrite_values(self) -> None: # regression for GH1215 data = create_test_data() with create_tmp_file(allow_cleanup_failure=False) as tmp_file: self.save(data, tmp_file, mode="w") data["var2"][:] = -999 data["var9"] = data["var2"] * 3 self.save(data[["var2", "var9"]], tmp_file, mode="a") with self.open(tmp_file) as actual: assert_identical(data, actual) def test_append_with_invalid_dim_raises(self) -> None: data = create_test_data() with create_tmp_file(allow_cleanup_failure=False) as tmp_file: self.save(data, tmp_file, mode="w") data["var9"] = data["var2"] * 3 data = data.isel(dim1=slice(2, 6)) # modify one dimension with pytest.raises( ValueError, match=r"Unable to update size for existing dimension" ): self.save(data, tmp_file, mode="a") def test_multiindex_not_implemented(self) -> None: ds = Dataset(coords={"y": ("x", [1, 2]), "z": ("x", ["a", "b"])}).set_index( x=["y", "z"] ) with pytest.raises(NotImplementedError, match=r"MultiIndex"): with self.roundtrip(ds): pass # regression GH8628 (can serialize reset multi-index level coordinates) ds_reset = ds.reset_index("x") with self.roundtrip(ds_reset) as actual: assert_identical(actual, ds_reset) @requires_dask def test_string_object_warning(self) -> None: original = Dataset( { "x": ( [ "y", ], np.array(["foo", "bar"], dtype=object), ) } ).chunk() with pytest.warns(SerializationWarning, match="dask array with dtype=object"): with self.roundtrip(original) as actual: assert_identical(original, actual) @pytest.mark.parametrize( "indexer", ( {"y": [1]}, {"y": slice(2)}, {"y": 1}, {"x": [1], "y": [1]}, {"x": ("x0", [0, 1]), "y": ("x0", [0, 1])}, ), ) def test_indexing_roundtrip(self, indexer) -> None: # regression test for GH8909 ds = xr.Dataset() ds["A"] = xr.DataArray([[1, "a"], [2, "b"]], dims=["x", "y"]) with self.roundtrip(ds) as ds2: expected = ds2.sel(indexer) with self.roundtrip(expected) as actual: assert_identical(actual, expected) class NetCDFBase(CFEncodedBase): """Tests for all netCDF3 and netCDF4 backends.""" @pytest.mark.asyncio @pytest.mark.skip(reason="NetCDF backends don't support async loading") async def test_load_async(self) -> None: await super().test_load_async() @pytest.mark.skipif( ON_WINDOWS, reason="Windows does not allow modifying open files" ) def test_refresh_from_disk(self) -> None: # regression test for https://github.com/pydata/xarray/issues/4862 with create_tmp_file() as example_1_path: with create_tmp_file() as example_1_modified_path: with open_example_dataset("example_1.nc") as example_1: self.save(example_1, example_1_path) example_1.rh.values += 100 self.save(example_1, example_1_modified_path) a = open_dataset(example_1_path, engine=self.engine).load() # Simulate external process modifying example_1.nc while this script is running shutil.copy(example_1_modified_path, example_1_path) # Reopen example_1.nc (modified) as `b`; note that `a` has NOT been closed b = open_dataset(example_1_path, engine=self.engine).load() try: assert not np.array_equal(a.rh.values, b.rh.values) finally: a.close() b.close() def test_byte_attrs(self, byte_attrs_dataset: dict[str, Any]) -> None: # test for issue #9407 input = byte_attrs_dataset["input"] expected = byte_attrs_dataset["expected"] with self.roundtrip(input) as actual: assert_identical(actual, expected) _counter = itertools.count() @contextlib.contextmanager def create_tmp_file( suffix: str = ".nc", allow_cleanup_failure: bool = False ) -> Iterator[str]: temp_dir = tempfile.mkdtemp() path = os.path.join(temp_dir, f"temp-{next(_counter)}{suffix}") try: yield path finally: try: shutil.rmtree(temp_dir) except OSError: if not allow_cleanup_failure: raise @contextlib.contextmanager def create_tmp_files( nfiles: int, suffix: str = ".nc", allow_cleanup_failure: bool = False ) -> Iterator[list[str]]: with ExitStack() as stack: files = [ stack.enter_context(create_tmp_file(suffix, allow_cleanup_failure)) for _ in range(nfiles) ] yield files class NetCDF4Base(NetCDFBase): """Tests for both netCDF4-python and h5netcdf.""" engine: T_NetcdfEngine = "netcdf4" def test_open_group(self) -> None: # Create a netCDF file with a dataset stored within a group with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as rootgrp: foogrp = rootgrp.createGroup("foo") ds = foogrp ds.createDimension("time", size=10) x = np.arange(10) ds.createVariable("x", np.int32, dimensions=("time",)) ds.variables["x"][:] = x expected = Dataset() expected["x"] = ("time", x) # check equivalent ways to specify group for group in "foo", "/foo", "foo/", "/foo/": with self.open(tmp_file, group=group) as actual: assert_equal(actual["x"], expected["x"]) # check that missing group raises appropriate exception with pytest.raises(OSError): open_dataset(tmp_file, group="bar") with pytest.raises(ValueError, match=r"must be a string"): open_dataset(tmp_file, group=(1, 2, 3)) def test_open_subgroup(self) -> None: # Create a netCDF file with a dataset stored within a group within a # group with create_tmp_file() as tmp_file: rootgrp = nc4.Dataset(tmp_file, "w") foogrp = rootgrp.createGroup("foo") bargrp = foogrp.createGroup("bar") ds = bargrp ds.createDimension("time", size=10) x = np.arange(10) ds.createVariable("x", np.int32, dimensions=("time",)) ds.variables["x"][:] = x rootgrp.close() expected = Dataset() expected["x"] = ("time", x) # check equivalent ways to specify group for group in "foo/bar", "/foo/bar", "foo/bar/", "/foo/bar/": with self.open(tmp_file, group=group) as actual: assert_equal(actual["x"], expected["x"]) def test_write_groups(self) -> None: data1 = create_test_data() data2 = data1 * 2 with create_tmp_file() as tmp_file: self.save(data1, tmp_file, group="data/1") self.save(data2, tmp_file, group="data/2", mode="a") with self.open(tmp_file, group="data/1") as actual1: assert_identical(data1, actual1) with self.open(tmp_file, group="data/2") as actual2: assert_identical(data2, actual2) def test_child_group_with_inconsistent_dimensions(self) -> None: base = Dataset(coords={"x": [1, 2]}) child = Dataset(coords={"x": [1, 2, 3]}) with create_tmp_file() as tmp_file: self.save(base, tmp_file) self.save(child, tmp_file, group="child", mode="a") with self.open(tmp_file) as actual_base: assert_identical(base, actual_base) with self.open(tmp_file, group="child") as actual_child: assert_identical(child, actual_child) @pytest.mark.parametrize( "input_strings, is_bytes", [ ([b"foo", b"bar", b"baz"], True), (["foo", "bar", "baz"], False), (["foรณ", "bรกr", "baลบ"], False), ], ) def test_encoding_kwarg_vlen_string( self, input_strings: list[str], is_bytes: bool ) -> None: original = Dataset({"x": input_strings}) expected_string = ["foo", "bar", "baz"] if is_bytes else input_strings expected = Dataset({"x": expected_string}) kwargs = dict(encoding={"x": {"dtype": str}}) with self.roundtrip(original, save_kwargs=kwargs) as actual: assert actual["x"].encoding["dtype"] == "=U3" assert actual["x"].dtype == "=U3" assert_identical(actual, expected) @pytest.mark.parametrize("fill_value", ["XXX", "", "bรกr"]) def test_roundtrip_string_with_fill_value_vlen(self, fill_value: str) -> None: values = np.array(["ab", "cdef", np.nan], dtype=object) expected = Dataset({"x": ("t", values)}) original = Dataset({"x": ("t", values, {}, {"_FillValue": fill_value})}) with self.roundtrip(original) as actual: assert_identical(expected, actual) original = Dataset({"x": ("t", values, {}, {"_FillValue": ""})}) with self.roundtrip(original) as actual: assert_identical(expected, actual) def test_roundtrip_character_array(self) -> None: with create_tmp_file() as tmp_file: values = np.array([["a", "b", "c"], ["d", "e", "f"]], dtype="S") with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("x", 2) nc.createDimension("string3", 3) v = nc.createVariable("x", np.dtype("S1"), ("x", "string3")) v[:] = values values = np.array(["abc", "def"], dtype="S") expected = Dataset({"x": ("x", values)}) with open_dataset(tmp_file) as actual: assert_identical(expected, actual) # regression test for #157 with self.roundtrip(actual) as roundtripped: assert_identical(expected, roundtripped) def test_default_to_char_arrays(self) -> None: data = Dataset({"x": np.array(["foo", "zzzz"], dtype="S")}) with self.roundtrip(data) as actual: assert_identical(data, actual) assert actual["x"].dtype == np.dtype("S4") def test_open_encodings(self) -> None: # Create a netCDF file with explicit time units # and make sure it makes it into the encodings # and survives a round trip with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as ds: ds.createDimension("time", size=10) ds.createVariable("time", np.int32, dimensions=("time",)) units = "days since 1999-01-01" ds.variables["time"].setncattr("units", units) ds.variables["time"][:] = np.arange(10) + 4 expected = Dataset() time = pd.date_range("1999-01-05", periods=10, unit="ns") encoding = {"units": units, "dtype": np.dtype("int32")} expected["time"] = ("time", time, {}, encoding) with open_dataset(tmp_file) as actual: assert_equal(actual["time"], expected["time"]) actual_encoding = { k: v for k, v in actual["time"].encoding.items() if k in expected["time"].encoding } assert actual_encoding == expected["time"].encoding def test_dump_encodings(self) -> None: # regression test for #709 ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs = dict(encoding={"x": {"zlib": True}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual.x.encoding["zlib"] def test_dump_and_open_encodings(self) -> None: # Create a netCDF file with explicit time units # and make sure it makes it into the encodings # and survives a round trip with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as ds: ds.createDimension("time", size=10) ds.createVariable("time", np.int32, dimensions=("time",)) units = "days since 1999-01-01" ds.variables["time"].setncattr("units", units) ds.variables["time"][:] = np.arange(10) + 4 with open_dataset(tmp_file) as xarray_dataset: with create_tmp_file() as tmp_file2: xarray_dataset.to_netcdf(tmp_file2) with nc4.Dataset(tmp_file2, "r") as ds: assert ds.variables["time"].getncattr("units") == units assert_array_equal(ds.variables["time"], np.arange(10) + 4) def test_compression_encoding_legacy(self) -> None: data = create_test_data() data["var2"].encoding.update( { "zlib": True, "chunksizes": (5, 5), "fletcher32": True, "shuffle": True, "original_shape": data.var2.shape, } ) with self.roundtrip(data) as actual: for k, v in data["var2"].encoding.items(): assert v == actual["var2"].encoding[k] # regression test for #156 expected = data.isel(dim1=0) with self.roundtrip(expected) as actual: assert_equal(expected, actual) def test_encoding_kwarg_compression(self) -> None: ds = Dataset({"x": np.arange(10.0)}) encoding = dict( dtype="f4", zlib=True, complevel=9, fletcher32=True, chunksizes=(5,), shuffle=True, ) kwargs = dict(encoding=dict(x=encoding)) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert_equal(actual, ds) assert actual.x.encoding["dtype"] == "f4" assert actual.x.encoding["zlib"] assert actual.x.encoding["complevel"] == 9 assert actual.x.encoding["fletcher32"] assert actual.x.encoding["chunksizes"] == (5,) assert actual.x.encoding["shuffle"] assert ds.x.encoding == {} def test_keep_chunksizes_if_no_original_shape(self) -> None: ds = Dataset({"x": [1, 2, 3]}) chunksizes = (2,) ds.variables["x"].encoding = {"chunksizes": chunksizes} with self.roundtrip(ds) as actual: assert_identical(ds, actual) assert_array_equal( ds["x"].encoding["chunksizes"], actual["x"].encoding["chunksizes"] ) def test_preferred_chunks_is_present(self) -> None: ds = Dataset({"x": [1, 2, 3]}) chunksizes = (2,) ds.variables["x"].encoding = {"chunksizes": chunksizes} with self.roundtrip(ds) as actual: assert actual["x"].encoding["preferred_chunks"] == {"x": 2} @requires_dask def test_auto_chunking_is_based_on_disk_chunk_sizes(self) -> None: x_size = y_size = 1000 y_chunksize = y_size x_chunksize = 10 with dask.config.set({"array.chunk-size": "100KiB"}): with self.chunked_roundtrip( (1, y_size, x_size), (1, y_chunksize, x_chunksize), open_kwargs={"chunks": "auto"}, ) as ds: _t_chunks, y_chunks, x_chunks = ds["image"].data.chunks assert all(np.asanyarray(y_chunks) == y_chunksize) # Check that the chunk size is a multiple of the file chunk size assert all(np.asanyarray(x_chunks) % x_chunksize == 0) @requires_dask def test_base_chunking_uses_disk_chunk_sizes(self) -> None: x_size = y_size = 1000 y_chunksize = y_size x_chunksize = 10 with self.chunked_roundtrip( (1, y_size, x_size), (1, y_chunksize, x_chunksize), open_kwargs={"chunks": {}}, ) as ds: for chunksizes, expected in zip( ds["image"].data.chunks, (1, y_chunksize, x_chunksize), strict=True ): assert all(np.asanyarray(chunksizes) == expected) @contextlib.contextmanager def chunked_roundtrip( self, array_shape: tuple[int, int, int], chunk_sizes: tuple[int, int, int], open_kwargs: dict[str, Any] | None = None, ) -> Generator[Dataset, None, None]: t_size, y_size, x_size = array_shape t_chunksize, y_chunksize, x_chunksize = chunk_sizes image = xr.DataArray( np.arange(t_size * x_size * y_size, dtype=np.int16).reshape( (t_size, y_size, x_size) ), dims=["t", "y", "x"], ) image.encoding = {"chunksizes": (t_chunksize, y_chunksize, x_chunksize)} dataset = xr.Dataset(dict(image=image)) with self.roundtrip(dataset, open_kwargs=open_kwargs) as ds: yield ds def test_preferred_chunks_are_disk_chunk_sizes(self) -> None: x_size = y_size = 1000 y_chunksize = y_size x_chunksize = 10 with self.chunked_roundtrip( (1, y_size, x_size), (1, y_chunksize, x_chunksize) ) as ds: assert ds["image"].encoding["preferred_chunks"] == { "t": 1, "y": y_chunksize, "x": x_chunksize, } def test_encoding_chunksizes_unlimited(self) -> None: # regression test for GH1225 ds = Dataset({"x": [1, 2, 3], "y": ("x", [2, 3, 4])}) ds.variables["x"].encoding = { "zlib": False, "shuffle": False, "complevel": 0, "fletcher32": False, "contiguous": False, "chunksizes": (2**20,), "original_shape": (3,), } with self.roundtrip(ds) as actual: assert_equal(ds, actual) def test_mask_and_scale(self) -> None: with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("t", 5) nc.createVariable("x", "int16", ("t",), fill_value=-1) v = nc.variables["x"] v.set_auto_maskandscale(False) v.add_offset = 10 v.scale_factor = 0.1 v[:] = np.array([-1, -1, 0, 1, 2]) dtype = type(v.scale_factor) # first make sure netCDF4 reads the masked and scaled data # correctly with nc4.Dataset(tmp_file, mode="r") as nc: expected = np.ma.array( [-1, -1, 10, 10.1, 10.2], mask=[True, True, False, False, False] ) actual = nc.variables["x"][:] assert_array_equal(expected, actual) # now check xarray with open_dataset(tmp_file) as ds: expected = create_masked_and_scaled_data(np.dtype(dtype)) assert_identical(expected, ds) def test_0dimensional_variable(self) -> None: # This fix verifies our work-around to this netCDF4-python bug: # https://github.com/Unidata/netcdf4-python/pull/220 with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: v = nc.createVariable("x", "int16") v[...] = 123 with open_dataset(tmp_file) as ds: expected = Dataset({"x": ((), 123)}) assert_identical(expected, ds) def test_read_variable_len_strings(self) -> None: with create_tmp_file() as tmp_file: values = np.array(["foo", "bar", "baz"], dtype=object) with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("x", 3) v = nc.createVariable("x", str, ("x",)) v[:] = values expected = Dataset({"x": ("x", values)}) for kwargs in [{}, {"decode_cf": True}]: with open_dataset(tmp_file, **cast(dict, kwargs)) as actual: assert_identical(expected, actual) def test_raise_on_forward_slashes_in_names(self) -> None: # test for forward slash in variable names and dimensions # see GH 7943 data_vars: list[dict[str, Any]] = [ {"PASS/FAIL": (["PASSFAIL"], np.array([0]))}, {"PASS/FAIL": np.array([0])}, {"PASSFAIL": (["PASS/FAIL"], np.array([0]))}, ] for dv in data_vars: ds = Dataset(data_vars=dv) with pytest.raises(ValueError, match="Forward slashes '/' are not allowed"): with self.roundtrip(ds): pass @requires_netCDF4 def test_encoding_enum__no_fill_value(self, recwarn): with create_tmp_file() as tmp_file: cloud_type_dict = {"clear": 0, "cloudy": 1} with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("time", size=2) cloud_type = nc.createEnumType(np.uint8, "cloud_type", cloud_type_dict) v = nc.createVariable( "clouds", cloud_type, "time", fill_value=None, ) v[:] = 1 with open_dataset(tmp_file, engine="netcdf4") as original: save_kwargs = {} # We don't expect any errors. # This is effectively a void context manager expected_warnings = 0 if self.engine == "h5netcdf": if not has_h5netcdf_1_4_0_or_above: save_kwargs["invalid_netcdf"] = True expected_warnings = 1 expected_msg = "You are writing invalid netcdf features to file" else: expected_warnings = 1 expected_msg = "Creating variable with default fill_value 0 which IS defined in enum type" with self.roundtrip(original, save_kwargs=save_kwargs) as actual: assert len(recwarn) == expected_warnings if expected_warnings: assert issubclass(recwarn[0].category, UserWarning) assert str(recwarn[0].message).startswith(expected_msg) assert_equal(original, actual) assert ( actual.clouds.encoding["dtype"].metadata["enum"] == cloud_type_dict ) if not ( self.engine == "h5netcdf" and not has_h5netcdf_1_4_0_or_above ): # not implemented in h5netcdf yet assert ( actual.clouds.encoding["dtype"].metadata["enum_name"] == "cloud_type" ) @requires_netCDF4 def test_encoding_enum__multiple_variable_with_enum(self): with create_tmp_file() as tmp_file: cloud_type_dict = {"clear": 0, "cloudy": 1, "missing": 255} with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("time", size=2) cloud_type = nc.createEnumType(np.uint8, "cloud_type", cloud_type_dict) nc.createVariable( "clouds", cloud_type, "time", fill_value=255, ) nc.createVariable( "tifa", cloud_type, "time", fill_value=255, ) with open_dataset(tmp_file, engine="netcdf4") as original: save_kwargs = {} if self.engine == "h5netcdf" and not has_h5netcdf_1_4_0_or_above: save_kwargs["invalid_netcdf"] = True with self.roundtrip(original, save_kwargs=save_kwargs) as actual: assert_equal(original, actual) assert ( actual.clouds.encoding["dtype"] == actual.tifa.encoding["dtype"] ) assert ( actual.clouds.encoding["dtype"].metadata == actual.tifa.encoding["dtype"].metadata ) assert ( actual.clouds.encoding["dtype"].metadata["enum"] == cloud_type_dict ) if not ( self.engine == "h5netcdf" and not has_h5netcdf_1_4_0_or_above ): # not implemented in h5netcdf yet assert ( actual.clouds.encoding["dtype"].metadata["enum_name"] == "cloud_type" ) @requires_netCDF4 def test_encoding_enum__error_multiple_variable_with_changing_enum(self): """ Given 2 variables, if they share the same enum type, the 2 enum definition should be identical. """ with create_tmp_file() as tmp_file: cloud_type_dict = {"clear": 0, "cloudy": 1, "missing": 255} with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("time", size=2) cloud_type = nc.createEnumType(np.uint8, "cloud_type", cloud_type_dict) nc.createVariable( "clouds", cloud_type, "time", fill_value=255, ) nc.createVariable( "tifa", cloud_type, "time", fill_value=255, ) with open_dataset(tmp_file, engine="netcdf4") as original: assert ( original.clouds.encoding["dtype"].metadata == original.tifa.encoding["dtype"].metadata ) modified_enum = original.clouds.encoding["dtype"].metadata["enum"] modified_enum.update({"neblig": 2}) original.clouds.encoding["dtype"] = np.dtype( "u1", metadata={"enum": modified_enum, "enum_name": "cloud_type"}, ) if not (self.engine == "h5netcdf" and not has_h5netcdf_1_4_0_or_above): # not implemented yet in h5netcdf with pytest.raises( ValueError, match=( r"Cannot save variable .*" r" because an enum `cloud_type` already exists in the Dataset .*" ), ): with self.roundtrip(original): pass @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_create_default_indexes(self, tmp_path, create_default_indexes) -> None: store_path = tmp_path / "tmp.nc" original_ds = xr.Dataset( {"data": ("x", np.arange(3))}, coords={"x": [-1, 0, 1]} ) original_ds.to_netcdf(store_path, engine=self.engine, mode="w") with open_dataset( store_path, engine=self.engine, create_default_indexes=create_default_indexes, ) as loaded_ds: if create_default_indexes: assert list(loaded_ds.xindexes) == ["x"] and isinstance( loaded_ds.xindexes["x"], PandasIndex ) else: assert len(loaded_ds.xindexes) == 0 @requires_netCDF4 class TestNetCDF4Data(NetCDF4Base): @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.NetCDF4DataStore.open(tmp_file, mode="w") as store: yield store def test_variable_order(self) -> None: # doesn't work with scipy or h5py :( ds = Dataset() ds["a"] = 1 ds["z"] = 2 ds["b"] = 3 ds.coords["c"] = 4 with self.roundtrip(ds) as actual: assert list(ds.variables) == list(actual.variables) def test_unsorted_index_raises(self) -> None: # should be fixed in netcdf4 v1.2.1 random_data = np.random.random(size=(4, 6)) dim0 = [0, 1, 2, 3] dim1 = [0, 2, 1, 3, 5, 4] # We will sort this in a later step da = xr.DataArray( data=random_data, dims=("dim0", "dim1"), coords={"dim0": dim0, "dim1": dim1}, name="randovar", ) ds = da.to_dataset() with self.roundtrip(ds) as ondisk: inds = np.argsort(dim1) ds2 = ondisk.isel(dim1=inds) # Older versions of NetCDF4 raise an exception here, and if so we # want to ensure we improve (that is, replace) the error message try: _ = ds2.randovar.values except IndexError as err: assert "first by calling .load" in str(err) def test_setncattr_string(self) -> None: list_of_strings = ["list", "of", "strings"] one_element_list_of_strings = ["one element"] one_string = "one string" attrs = { "foo": list_of_strings, "bar": one_element_list_of_strings, "baz": one_string, } ds = Dataset({"x": ("y", [1, 2, 3], attrs)}, attrs=attrs) with self.roundtrip(ds) as actual: for totest in [actual, actual["x"]]: assert_array_equal(list_of_strings, totest.attrs["foo"]) assert_array_equal(one_element_list_of_strings, totest.attrs["bar"]) assert one_string == totest.attrs["baz"] @pytest.mark.parametrize( "compression", [ None, "zlib", "szip", pytest.param( "zstd", marks=pytest.mark.xfail( not _check_compression_codec_available("zstd"), reason="zstd codec not available in netCDF4 installation", ), ), pytest.param( "blosc_lz", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_lz"), reason="blosc_lz codec not available in netCDF4 installation", ), ), pytest.param( "blosc_lz4", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_lz4"), reason="blosc_lz4 codec not available in netCDF4 installation", ), ), pytest.param( "blosc_lz4hc", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_lz4hc"), reason="blosc_lz4hc codec not available in netCDF4 installation", ), ), pytest.param( "blosc_zlib", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_zlib"), reason="blosc_zlib codec not available in netCDF4 installation", ), ), pytest.param( "blosc_zstd", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_zstd"), reason="blosc_zstd codec not available in netCDF4 installation", ), ), ], ) @requires_netCDF4_1_6_2_or_above @pytest.mark.xfail(ON_WINDOWS, reason="new compression not yet implemented") def test_compression_encoding(self, compression: str | None) -> None: data = create_test_data(dim_sizes=(20, 80, 10)) encoding_params: dict[str, Any] = dict(compression=compression, blosc_shuffle=1) data["var2"].encoding.update(encoding_params) data["var2"].encoding.update( { "chunksizes": (20, 40), "original_shape": data.var2.shape, "blosc_shuffle": 1, "fletcher32": False, } ) with self.roundtrip(data) as actual: expected_encoding = data["var2"].encoding.copy() # compression does not appear in the retrieved encoding, that differs # from the input encoding. shuffle also chantges. Here we modify the # expected encoding to account for this compression = expected_encoding.pop("compression") blosc_shuffle = expected_encoding.pop("blosc_shuffle") if compression is not None: if "blosc" in compression and blosc_shuffle: expected_encoding["blosc"] = { "compressor": compression, "shuffle": blosc_shuffle, } expected_encoding["shuffle"] = False elif compression == "szip": expected_encoding["szip"] = { "coding": "nn", "pixels_per_block": 8, } expected_encoding["shuffle"] = False else: # This will set a key like zlib=true which is what appears in # the encoding when we read it. expected_encoding[compression] = True if compression == "zstd": expected_encoding["shuffle"] = False else: expected_encoding["shuffle"] = False actual_encoding = actual["var2"].encoding assert expected_encoding.items() <= actual_encoding.items() if ( encoding_params["compression"] is not None and "blosc" not in encoding_params["compression"] ): # regression test for #156 expected = data.isel(dim1=0) with self.roundtrip(expected) as actual: assert_equal(expected, actual) @pytest.mark.skip(reason="https://github.com/Unidata/netcdf4-python/issues/1195") def test_refresh_from_disk(self) -> None: super().test_refresh_from_disk() @requires_netCDF4_1_7_0_or_above def test_roundtrip_complex(self): expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))}) skwargs = dict(auto_complex=True) okwargs = dict(auto_complex=True) with self.roundtrip( expected, save_kwargs=skwargs, open_kwargs=okwargs ) as actual: assert_equal(expected, actual) @requires_netCDF4 class TestNetCDF4AlreadyOpen: def test_base_case(self) -> None: with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: v = nc.createVariable("x", "int") v[...] = 42 nc = nc4.Dataset(tmp_file, mode="r") store = backends.NetCDF4DataStore(nc) with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) def test_group(self) -> None: with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: group = nc.createGroup("g") v = group.createVariable("x", "int") v[...] = 42 nc = nc4.Dataset(tmp_file, mode="r") store = backends.NetCDF4DataStore(nc.groups["g"]) with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) nc = nc4.Dataset(tmp_file, mode="r") store = backends.NetCDF4DataStore(nc, group="g") with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) with nc4.Dataset(tmp_file, mode="r") as nc: with pytest.raises(ValueError, match="must supply a root"): backends.NetCDF4DataStore(nc.groups["g"], group="g") def test_deepcopy(self) -> None: # regression test for https://github.com/pydata/xarray/issues/4425 with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("x", 10) v = nc.createVariable("y", np.int32, ("x",)) v[:] = np.arange(10) h5 = nc4.Dataset(tmp_file, mode="r") store = backends.NetCDF4DataStore(h5) with open_dataset(store) as ds: copied = ds.copy(deep=True) expected = Dataset({"y": ("x", np.arange(10))}) assert_identical(expected, copied) class InMemoryNetCDF: engine: T_NetcdfEngine | None def test_roundtrip_via_memoryview(self) -> None: original = create_test_data() result = original.to_netcdf(engine=self.engine) roundtrip = load_dataset(result, engine=self.engine) assert_identical(roundtrip, original) def test_roundtrip_via_bytes(self) -> None: original = create_test_data() result = bytes(original.to_netcdf(engine=self.engine)) roundtrip = load_dataset(result, engine=self.engine) assert_identical(roundtrip, original) def test_pickle_open_dataset_from_bytes(self) -> None: original = Dataset({"foo": ("x", [1, 2, 3])}) netcdf_bytes = bytes(original.to_netcdf(engine=self.engine)) with open_dataset(netcdf_bytes, engine=self.engine) as roundtrip: with pickle.loads(pickle.dumps(roundtrip)) as unpickled: assert_identical(unpickled, original) def test_compute_false(self) -> None: original = create_test_data() with pytest.raises( NotImplementedError, match=re.escape("to_netcdf() with compute=False is not yet implemented"), ): original.to_netcdf(engine=self.engine, compute=False) class InMemoryNetCDFWithGroups(InMemoryNetCDF): def test_roundtrip_group_via_memoryview(self) -> None: original = create_test_data() netcdf_bytes = original.to_netcdf(group="sub", engine=self.engine) roundtrip = load_dataset(netcdf_bytes, group="sub", engine=self.engine) assert_identical(roundtrip, original) class FileObjectNetCDF: engine: T_NetcdfEngine def test_file_remains_open(self) -> None: data = Dataset({"foo": ("x", [1, 2, 3])}) f = BytesIO() data.to_netcdf(f, engine=self.engine) assert not f.closed restored = open_dataset(f, engine=self.engine) assert not f.closed assert_identical(restored, data) restored.close() assert not f.closed @requires_h5netcdf_or_netCDF4 class TestGenericNetCDF4InMemory(InMemoryNetCDFWithGroups): engine = None @requires_netCDF4 class TestNetCDF4InMemory(InMemoryNetCDFWithGroups): engine: T_NetcdfEngine = "netcdf4" @requires_netCDF4 @requires_dask @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") class TestNetCDF4ViaDaskData(TestNetCDF4Data): @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if open_kwargs is None: open_kwargs = {} if save_kwargs is None: save_kwargs = {} open_kwargs.setdefault("chunks", -1) with TestNetCDF4Data.roundtrip( self, data, save_kwargs, open_kwargs, allow_cleanup_failure ) as ds: yield ds def test_unsorted_index_raises(self) -> None: # Skip when using dask because dask rewrites indexers to getitem, # dask first pulls items by block. pass @pytest.mark.skip(reason="caching behavior differs for dask") def test_dataset_caching(self) -> None: pass def test_write_inconsistent_chunks(self) -> None: # Construct two variables with the same dimensions, but different # chunk sizes. x = da.zeros((100, 100), dtype="f4", chunks=(50, 100)) x = DataArray(data=x, dims=("lat", "lon"), name="x") x.encoding["chunksizes"] = (50, 100) x.encoding["original_shape"] = (100, 100) y = da.ones((100, 100), dtype="f4", chunks=(100, 50)) y = DataArray(data=y, dims=("lat", "lon"), name="y") y.encoding["chunksizes"] = (100, 50) y.encoding["original_shape"] = (100, 100) # Put them both into the same dataset ds = Dataset({"x": x, "y": y}) with self.roundtrip(ds) as actual: assert actual["x"].encoding["chunksizes"] == (50, 100) assert actual["y"].encoding["chunksizes"] == (100, 50) # Flaky test. Very open to contributions on fixing this @pytest.mark.flaky def test_roundtrip_coordinates(self) -> None: super().test_roundtrip_coordinates() @requires_cftime def test_roundtrip_cftime_bnds(self): # Regression test for issue #7794 import cftime original = xr.Dataset( { "foo": ("time", [0.0]), "time_bnds": ( ("time", "bnds"), [ [ cftime.Datetime360Day(2005, 12, 1, 0, 0, 0, 0), cftime.Datetime360Day(2005, 12, 2, 0, 0, 0, 0), ] ], ), }, {"time": [cftime.Datetime360Day(2005, 12, 1, 12, 0, 0, 0)]}, ) with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with open_dataset(tmp_file) as actual: # Operation to load actual time_bnds into memory assert_array_equal(actual.time_bnds.values, original.time_bnds.values) chunked = actual.chunk(time=1) with create_tmp_file() as tmp_file_chunked: chunked.to_netcdf(tmp_file_chunked) @requires_zarr @pytest.mark.usefixtures("default_zarr_format") class ZarrBase(CFEncodedBase): DIMENSION_KEY = "_ARRAY_DIMENSIONS" zarr_version = 2 version_kwargs: dict[str, Any] = {} def create_zarr_target(self): raise NotImplementedError @contextlib.contextmanager def create_store(self, cache_members: bool = False): with self.create_zarr_target() as store_target: yield backends.ZarrStore.open_group( store_target, mode="w", cache_members=cache_members, **self.version_kwargs, ) def save(self, dataset, store_target, **kwargs): # type: ignore[override] return dataset.to_zarr(store=store_target, **kwargs, **self.version_kwargs) @contextlib.contextmanager def open(self, path, **kwargs): with xr.open_dataset( path, engine="zarr", mode="r", **kwargs, **self.version_kwargs ) as ds: yield ds @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} with self.create_zarr_target() as store_target: self.save(data, store_target, **save_kwargs) with self.open(store_target, **open_kwargs) as ds: yield ds @pytest.mark.asyncio @pytest.mark.skipif( not has_zarr_v3, reason="zarr-python <3 did not support async loading", ) async def test_load_async(self) -> None: await super().test_load_async() def test_roundtrip_bytes_with_fill_value(self): pytest.xfail("Broken by Zarr 3.0.7") @pytest.mark.parametrize("consolidated", [False, True, None]) def test_roundtrip_consolidated(self, consolidated) -> None: expected = create_test_data() with self.roundtrip( expected, save_kwargs={"consolidated": consolidated}, open_kwargs={"backend_kwargs": {"consolidated": consolidated}}, ) as actual: self.check_dtypes_roundtripped(expected, actual) assert_identical(expected, actual) def test_read_non_consolidated_warning(self) -> None: expected = create_test_data() with self.create_zarr_target() as store: self.save( expected, store_target=store, consolidated=False, **self.version_kwargs ) if getattr(store, "supports_consolidated_metadata", True): with pytest.warns( RuntimeWarning, match="Failed to open Zarr store with consolidated", ): with xr.open_zarr(store, **self.version_kwargs) as ds: assert_identical(ds, expected) def test_non_existent_store(self) -> None: patterns = [ "No such file or directory", "Unable to find group", "No group found in store", "does not exist", ] with pytest.raises(FileNotFoundError, match=f"({'|'.join(patterns)})"): xr.open_zarr(f"{uuid.uuid4()}") @pytest.mark.skipif(has_zarr_v3, reason="chunk_store not implemented in zarr v3") def test_with_chunkstore(self) -> None: expected = create_test_data() with ( self.create_zarr_target() as store_target, self.create_zarr_target() as chunk_store, ): save_kwargs = {"chunk_store": chunk_store} self.save(expected, store_target, **save_kwargs) # the chunk store must have been populated with some entries assert len(chunk_store) > 0 open_kwargs = {"backend_kwargs": {"chunk_store": chunk_store}} with self.open(store_target, **open_kwargs) as ds: assert_equal(ds, expected) @requires_dask def test_auto_chunk(self) -> None: original = create_test_data().chunk() with self.roundtrip(original, open_kwargs={"chunks": None}) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # there should be no chunks assert v.chunks is None with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # chunk size should be the same as original assert v.chunks == original[k].chunks @requires_dask @pytest.mark.filterwarnings("ignore:The specified chunks separate:UserWarning") def test_manual_chunk(self) -> None: original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3}) # Using chunks = None should return non-chunked arrays open_kwargs: dict[str, Any] = {"chunks": None} with self.roundtrip(original, open_kwargs=open_kwargs) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # there should be no chunks assert v.chunks is None # uniform arrays for i in range(2, 6): rechunked = original.chunk(chunks=i) open_kwargs = {"chunks": i} with self.roundtrip(original, open_kwargs=open_kwargs) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # chunk size should be the same as rechunked assert v.chunks == rechunked[k].chunks chunks = {"dim1": 2, "dim2": 3, "dim3": 5} rechunked = original.chunk(chunks=chunks) open_kwargs = { "chunks": chunks, "backend_kwargs": {"overwrite_encoded_chunks": True}, } with self.roundtrip(original, open_kwargs=open_kwargs) as actual: for k, v in actual.variables.items(): assert v.chunks == rechunked[k].chunks with self.roundtrip(actual) as auto: # encoding should have changed for k, v in actual.variables.items(): assert v.chunks == rechunked[k].chunks assert_identical(actual, auto) assert_identical(actual.load(), auto.load()) def test_unlimited_dims_encoding_is_ignored(self) -> None: ds = Dataset({"x": np.arange(10)}) ds.encoding = {"unlimited_dims": ["x"]} with self.roundtrip(ds) as actual: assert_identical(ds, actual) @requires_dask @pytest.mark.filterwarnings("ignore:.*does not have a Zarr V3 specification.*") def test_warning_on_bad_chunks(self) -> None: original = create_test_data().chunk({"dim1": 4, "dim2": 3, "dim3": 3}) bad_chunks = (2, {"dim2": (3, 3, 2, 1)}) for chunks in bad_chunks: kwargs = {"chunks": chunks} with pytest.warns(UserWarning): with self.roundtrip(original, open_kwargs=kwargs) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) good_chunks: tuple[dict[str, Any], ...] = ({"dim2": 3}, {"dim3": (6, 4)}, {}) for chunks in good_chunks: kwargs = {"chunks": chunks} with assert_no_warnings(): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=".*Zarr format 3 specification.*", category=UserWarning, ) with self.roundtrip(original, open_kwargs=kwargs) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) @requires_dask def test_deprecate_auto_chunk(self) -> None: original = create_test_data().chunk() with pytest.raises(TypeError): with self.roundtrip(original, open_kwargs={"auto_chunk": True}) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # chunk size should be the same as original assert v.chunks == original[k].chunks with pytest.raises(TypeError): with self.roundtrip(original, open_kwargs={"auto_chunk": False}) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # there should be no chunks assert v.chunks is None @requires_dask def test_write_uneven_dask_chunks(self) -> None: # regression for GH#2225 original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3}) with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual: for k, v in actual.data_vars.items(): assert v.chunks == actual[k].chunks def test_chunk_encoding(self) -> None: # These datasets have no dask chunks. All chunking specified in # encoding data = create_test_data() chunks = (5, 5) data["var2"].encoding.update({"chunks": chunks}) with self.roundtrip(data) as actual: assert chunks == actual["var2"].encoding["chunks"] # expect an error with non-integer chunks data["var2"].encoding.update({"chunks": (5, 4.5)}) with pytest.raises(TypeError): with self.roundtrip(data) as actual: pass def test_shard_encoding(self) -> None: # These datasets have no dask chunks. All chunking/sharding specified in # encoding if has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3: data = create_test_data() chunks = (1, 1) shards = (5, 5) data["var2"].encoding.update({"chunks": chunks}) data["var2"].encoding.update({"shards": shards}) with self.roundtrip(data) as actual: assert shards == actual["var2"].encoding["shards"] # expect an error with shards not divisible by chunks data["var2"].encoding.update({"chunks": (2, 2)}) with pytest.raises(ValueError): with self.roundtrip(data) as actual: pass @requires_dask @pytest.mark.skipif( ON_WINDOWS, reason="Very flaky on Windows CI. Can re-enable assuming it starts consistently passing.", ) def test_chunk_encoding_with_dask(self) -> None: # These datasets DO have dask chunks. Need to check for various # interactions between dask and zarr chunks ds = xr.DataArray((np.arange(12)), dims="x", name="var1").to_dataset() # - no encoding specified - # zarr automatically gets chunk information from dask chunks ds_chunk4 = ds.chunk({"x": 4}) with self.roundtrip(ds_chunk4) as actual: assert (4,) == actual["var1"].encoding["chunks"] # should fail if dask_chunks are irregular... ds_chunk_irreg = ds.chunk({"x": (5, 4, 3)}) with pytest.raises(ValueError, match=r"uniform chunk sizes."): with self.roundtrip(ds_chunk_irreg) as actual: pass # should fail if encoding["chunks"] clashes with dask_chunks badenc = ds.chunk({"x": 4}) badenc.var1.encoding["chunks"] = (6,) with pytest.raises(ValueError, match=r"named 'var1' would overlap"): with self.roundtrip(badenc) as actual: pass # unless... with self.roundtrip(badenc, save_kwargs={"safe_chunks": False}) as actual: # don't actually check equality because the data could be corrupted pass # if dask chunks (4) are an integer multiple of zarr chunks (2) it should not fail... goodenc = ds.chunk({"x": 4}) goodenc.var1.encoding["chunks"] = (2,) with self.roundtrip(goodenc) as actual: pass # if initial dask chunks are aligned, size of last dask chunk doesn't matter goodenc = ds.chunk({"x": (3, 3, 6)}) goodenc.var1.encoding["chunks"] = (3,) with self.roundtrip(goodenc) as actual: pass goodenc = ds.chunk({"x": (3, 6, 3)}) goodenc.var1.encoding["chunks"] = (3,) with self.roundtrip(goodenc) as actual: pass # ... also if the last chunk is irregular ds_chunk_irreg = ds.chunk({"x": (5, 5, 2)}) with self.roundtrip(ds_chunk_irreg) as actual: assert (5,) == actual["var1"].encoding["chunks"] # re-save Zarr arrays with self.roundtrip(ds_chunk_irreg) as original: with self.roundtrip(original) as actual: assert_identical(original, actual) # but intermediate unaligned chunks are bad badenc = ds.chunk({"x": (3, 5, 3, 1)}) badenc.var1.encoding["chunks"] = (3,) with pytest.raises(ValueError, match=r"would overlap multiple Dask chunks"): with self.roundtrip(badenc) as actual: pass # - encoding specified - # specify compatible encodings for chunk_enc in 4, (4,): ds_chunk4["var1"].encoding.update({"chunks": chunk_enc}) with self.roundtrip(ds_chunk4) as actual: assert (4,) == actual["var1"].encoding["chunks"] # TODO: remove this failure once synchronized overlapping writes are # supported by xarray ds_chunk4["var1"].encoding.update({"chunks": 5}) with pytest.raises(ValueError, match=r"named 'var1' would overlap"): with self.roundtrip(ds_chunk4) as actual: pass # override option with self.roundtrip(ds_chunk4, save_kwargs={"safe_chunks": False}) as actual: # don't actually check equality because the data could be corrupted pass @requires_netcdf def test_drop_encoding(self): with open_example_dataset("example_1.nc") as ds: encodings = {v: {**ds[v].encoding} for v in ds.data_vars} with self.create_zarr_target() as store: ds.to_zarr(store, encoding=encodings) def test_hidden_zarr_keys(self) -> None: skip_if_zarr_format_3("This test is unnecessary; no hidden Zarr keys") expected = create_test_data() with self.create_store() as store: expected.dump_to_store(store) zarr_group = store.ds # check that a variable hidden attribute is present and correct # JSON only has a single array type, which maps to list in Python. # In contrast, dims in xarray is always a tuple. for var in expected.variables.keys(): dims = zarr_group[var].attrs[self.DIMENSION_KEY] assert dims == list(expected[var].dims) with xr.decode_cf(store): # make sure it is hidden for var in expected.variables.keys(): assert self.DIMENSION_KEY not in expected[var].attrs # put it back and try removing from a variable attrs = dict(zarr_group["var2"].attrs) del attrs[self.DIMENSION_KEY] zarr_group["var2"].attrs.put(attrs) with pytest.raises(KeyError): with xr.decode_cf(store): pass def test_dimension_names(self) -> None: skip_if_zarr_format_2("No dimension names in V2") expected = create_test_data() with self.create_store() as store: expected.dump_to_store(store) zarr_group = store.ds for var in zarr_group: assert expected[var].dims == zarr_group[var].metadata.dimension_names @pytest.mark.parametrize("group", [None, "group1"]) def test_write_persistence_modes(self, group) -> None: original = create_test_data() # overwrite mode with self.roundtrip( original, save_kwargs={"mode": "w", "group": group}, open_kwargs={"group": group}, ) as actual: assert_identical(original, actual) # don't overwrite mode with self.roundtrip( original, save_kwargs={"mode": "w-", "group": group}, open_kwargs={"group": group}, ) as actual: assert_identical(original, actual) # make sure overwriting works as expected with self.create_zarr_target() as store: self.save(original, store) # should overwrite with no error self.save(original, store, mode="w", group=group) with self.open(store, group=group) as actual: assert_identical(original, actual) with pytest.raises((ValueError, FileExistsError)): self.save(original, store, mode="w-") # check append mode for normal write with self.roundtrip( original, save_kwargs={"mode": "a", "group": group}, open_kwargs={"group": group}, ) as actual: assert_identical(original, actual) # check append mode for append write ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", group=group, **self.version_kwargs) ds_to_append.to_zarr( store_target, append_dim="time", group=group, **self.version_kwargs ) original = xr.concat([ds, ds_to_append], dim="time") actual = xr.open_dataset( store_target, group=group, engine="zarr", **self.version_kwargs ) assert_identical(original, actual) def test_compressor_encoding(self) -> None: # specify a custom compressor original = create_test_data() if has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3: encoding_key = "compressors" # all parameters need to be explicitly specified in order for the comparison to pass below encoding = { "serializer": zarr.codecs.BytesCodec(endian="little"), encoding_key: ( zarr.codecs.BloscCodec( cname="zstd", clevel=3, shuffle="shuffle", typesize=8, blocksize=0, ), ), } else: from numcodecs.blosc import Blosc encoding_key = "compressors" if has_zarr_v3 else "compressor" comp = Blosc(cname="zstd", clevel=3, shuffle=2) encoding = {encoding_key: (comp,) if has_zarr_v3 else comp} save_kwargs = dict(encoding={"var1": encoding}) with self.roundtrip(original, save_kwargs=save_kwargs) as ds: enc = ds["var1"].encoding[encoding_key] assert enc == encoding[encoding_key] def test_group(self) -> None: original = create_test_data() group = "some/random/path" with self.roundtrip( original, save_kwargs={"group": group}, open_kwargs={"group": group} ) as actual: assert_identical(original, actual) def test_zarr_mode_w_overwrites_encoding(self) -> None: data = Dataset({"foo": ("x", [1.0, 1.0, 1.0])}) with self.create_zarr_target() as store: data.to_zarr( store, **self.version_kwargs, encoding={"foo": {"add_offset": 1}} ) np.testing.assert_equal( zarr.open_group(store, **self.version_kwargs)["foo"], data.foo.data - 1 ) data.to_zarr( store, **self.version_kwargs, encoding={"foo": {"add_offset": 0}}, mode="w", ) np.testing.assert_equal( zarr.open_group(store, **self.version_kwargs)["foo"], data.foo.data ) def test_encoding_kwarg_fixed_width_string(self) -> None: # not relevant for zarr, since we don't use EncodedStringCoder pass def test_dataset_caching(self) -> None: super().test_dataset_caching() def test_append_write(self) -> None: super().test_append_write() def test_append_with_mode_rplus_success(self) -> None: original = Dataset({"foo": ("x", [1])}) modified = Dataset({"foo": ("x", [2])}) with self.create_zarr_target() as store: original.to_zarr(store, **self.version_kwargs) modified.to_zarr(store, mode="r+", **self.version_kwargs) with self.open(store) as actual: assert_identical(actual, modified) def test_append_with_mode_rplus_fails(self) -> None: original = Dataset({"foo": ("x", [1])}) modified = Dataset({"bar": ("x", [2])}) with self.create_zarr_target() as store: original.to_zarr(store, **self.version_kwargs) with pytest.raises( ValueError, match="dataset contains non-pre-existing variables" ): modified.to_zarr(store, mode="r+", **self.version_kwargs) def test_append_with_invalid_dim_raises(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises( ValueError, match="does not match any existing dataset dimensions" ): ds_to_append.to_zarr( store_target, append_dim="notvalid", **self.version_kwargs ) def test_append_with_no_dims_raises(self) -> None: with self.create_zarr_target() as store_target: Dataset({"foo": ("x", [1])}).to_zarr( store_target, mode="w", **self.version_kwargs ) with pytest.raises(ValueError, match="different dimension names"): Dataset({"foo": ("y", [2])}).to_zarr( store_target, mode="a", **self.version_kwargs ) def test_append_with_append_dim_not_set_raises(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises(ValueError, match="different dimension sizes"): ds_to_append.to_zarr(store_target, mode="a", **self.version_kwargs) def test_append_with_mode_not_a_raises(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises(ValueError, match="cannot set append_dim unless"): ds_to_append.to_zarr( store_target, mode="w", append_dim="time", **self.version_kwargs ) def test_append_with_existing_encoding_raises(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises(ValueError, match="but encoding was provided"): ds_to_append.to_zarr( store_target, append_dim="time", encoding={"da": {"compressor": None}}, **self.version_kwargs, ) @pytest.mark.parametrize("dtype", ["U", "S"]) def test_append_string_length_mismatch_raises(self, dtype) -> None: if has_zarr_v3 and not has_zarr_v3_dtypes: skip_if_zarr_format_3("This actually works fine with Zarr format 3") ds, ds_to_append = create_append_string_length_mismatch_test_data(dtype) with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises(ValueError, match="Mismatched dtypes for variable"): ds_to_append.to_zarr( store_target, append_dim="time", **self.version_kwargs ) @pytest.mark.parametrize("dtype", ["U", "S"]) def test_append_string_length_mismatch_works(self, dtype) -> None: skip_if_zarr_format_2("This doesn't work with Zarr format 2") # ...but it probably would if we used object dtype if has_zarr_v3_dtypes: pytest.skip("This works on pre ZDtype Zarr-Python, but fails after.") ds, ds_to_append = create_append_string_length_mismatch_test_data(dtype) expected = xr.concat([ds, ds_to_append], dim="time") with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs) actual = xr.open_dataset(store_target, engine="zarr") xr.testing.assert_identical(expected, actual) def test_check_encoding_is_consistent_after_append(self) -> None: ds, ds_to_append, _ = create_append_test_data() # check encoding consistency with self.create_zarr_target() as store_target: import numcodecs encoding_value: Any if has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3: compressor = zarr.codecs.BloscCodec() else: compressor = numcodecs.Blosc() encoding_key = "compressors" if has_zarr_v3 else "compressor" encoding_value = (compressor,) if has_zarr_v3 else compressor encoding = {"da": {encoding_key: encoding_value}} ds.to_zarr(store_target, mode="w", encoding=encoding, **self.version_kwargs) original_ds = xr.open_dataset( store_target, engine="zarr", **self.version_kwargs ) original_encoding = original_ds["da"].encoding[encoding_key] ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs) actual_ds = xr.open_dataset( store_target, engine="zarr", **self.version_kwargs ) actual_encoding = actual_ds["da"].encoding[encoding_key] assert original_encoding == actual_encoding assert_identical( xr.open_dataset( store_target, engine="zarr", **self.version_kwargs ).compute(), xr.concat([ds, ds_to_append], dim="time"), ) def test_append_with_new_variable(self) -> None: ds, ds_to_append, ds_with_new_var = create_append_test_data() # check append mode for new variable with self.create_zarr_target() as store_target: combined = xr.concat([ds, ds_to_append], dim="time") combined.to_zarr(store_target, mode="w", **self.version_kwargs) assert_identical( combined, xr.open_dataset(store_target, engine="zarr", **self.version_kwargs), ) ds_with_new_var.to_zarr(store_target, mode="a", **self.version_kwargs) combined = xr.concat([ds, ds_to_append], dim="time") combined["new_var"] = ds_with_new_var["new_var"] assert_identical( combined, xr.open_dataset(store_target, engine="zarr", **self.version_kwargs), ) def test_append_with_append_dim_no_overwrite(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) original = xr.concat([ds, ds_to_append], dim="time") original2 = xr.concat([original, ds_to_append], dim="time") # overwrite a coordinate; # for mode='a-', this will not get written to the store # because it does not have the append_dim as a dim lon = ds_to_append.lon.to_numpy().copy() lon[:] = -999 ds_to_append["lon"] = lon ds_to_append.to_zarr( store_target, mode="a-", append_dim="time", **self.version_kwargs ) actual = xr.open_dataset(store_target, engine="zarr", **self.version_kwargs) assert_identical(original, actual) # by default, mode="a" will overwrite all coordinates. ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs) actual = xr.open_dataset(store_target, engine="zarr", **self.version_kwargs) lon = original2.lon.to_numpy().copy() lon[:] = -999 original2["lon"] = lon assert_identical(original2, actual) @requires_dask def test_to_zarr_compute_false_roundtrip(self) -> None: from dask.delayed import Delayed original = create_test_data().chunk() with self.create_zarr_target() as store: delayed_obj = self.save(original, store, compute=False) assert isinstance(delayed_obj, Delayed) # make sure target store has not been written to yet with pytest.raises(AssertionError): with self.open(store) as actual: assert_identical(original, actual) delayed_obj.compute() with self.open(store) as actual: assert_identical(original, actual) @requires_dask def test_to_zarr_append_compute_false_roundtrip(self) -> None: from dask.delayed import Delayed ds, ds_to_append, _ = create_append_test_data() ds, ds_to_append = ds.chunk(), ds_to_append.chunk() with pytest.warns(SerializationWarning): with self.create_zarr_target() as store: delayed_obj = self.save(ds, store, compute=False, mode="w") assert isinstance(delayed_obj, Delayed) with pytest.raises(AssertionError): with self.open(store) as actual: assert_identical(ds, actual) delayed_obj.compute() with self.open(store) as actual: assert_identical(ds, actual) delayed_obj = self.save( ds_to_append, store, compute=False, append_dim="time" ) assert isinstance(delayed_obj, Delayed) with pytest.raises(AssertionError): with self.open(store) as actual: assert_identical( xr.concat([ds, ds_to_append], dim="time"), actual ) delayed_obj.compute() with self.open(store) as actual: assert_identical(xr.concat([ds, ds_to_append], dim="time"), actual) @pytest.mark.parametrize("chunk", [False, True]) def test_save_emptydim(self, chunk) -> None: if chunk and not has_dask: pytest.skip("requires dask") ds = Dataset({"x": (("a", "b"), np.empty((5, 0))), "y": ("a", [1, 2, 5, 8, 9])}) if chunk: ds = ds.chunk({}) # chunk dataset to save dask array with self.roundtrip(ds) as ds_reload: assert_identical(ds, ds_reload) @requires_dask def test_no_warning_from_open_emptydim_with_chunks(self) -> None: ds = Dataset({"x": (("a", "b"), np.empty((5, 0)))}).chunk({"a": 1}) with assert_no_warnings(): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=".*Zarr format 3 specification.*", category=UserWarning, ) with self.roundtrip(ds, open_kwargs=dict(chunks={"a": 1})) as ds_reload: assert_identical(ds, ds_reload) @pytest.mark.parametrize("consolidated", [False, True, None]) @pytest.mark.parametrize("compute", [False, True]) @pytest.mark.parametrize("use_dask", [False, True]) @pytest.mark.parametrize("write_empty", [False, True, None]) def test_write_region(self, consolidated, compute, use_dask, write_empty) -> None: if (use_dask or not compute) and not has_dask: pytest.skip("requires dask") zeros = Dataset({"u": (("x",), np.zeros(10))}) nonzeros = Dataset({"u": (("x",), np.arange(1, 11))}) if use_dask: zeros = zeros.chunk(2) nonzeros = nonzeros.chunk(2) with self.create_zarr_target() as store: zeros.to_zarr( store, consolidated=consolidated, compute=compute, encoding={"u": dict(chunks=2)}, **self.version_kwargs, ) if compute: with xr.open_zarr( store, consolidated=consolidated, **self.version_kwargs ) as actual: assert_identical(actual, zeros) for i in range(0, 10, 2): region = {"x": slice(i, i + 2)} nonzeros.isel(region).to_zarr( store, region=region, consolidated=consolidated, write_empty_chunks=write_empty, **self.version_kwargs, ) with xr.open_zarr( store, consolidated=consolidated, **self.version_kwargs ) as actual: assert_identical(actual, nonzeros) def test_region_scalar(self) -> None: ds = Dataset({"x": 0}) with self.create_zarr_target() as store: ds.to_zarr(store) ds.to_zarr(store, region={}, mode="r+") with xr.open_zarr(store) as actual: assert_identical(actual, ds) @pytest.mark.parametrize("mode", [None, "r+", "a"]) def test_write_region_mode(self, mode) -> None: zeros = Dataset({"u": (("x",), np.zeros(10))}) nonzeros = Dataset({"u": (("x",), np.arange(1, 11))}) with self.create_zarr_target() as store: zeros.to_zarr(store, **self.version_kwargs) for region in [{"x": slice(5)}, {"x": slice(5, 10)}]: nonzeros.isel(region).to_zarr( store, region=region, mode=mode, **self.version_kwargs ) with xr.open_zarr(store, **self.version_kwargs) as actual: assert_identical(actual, nonzeros) @requires_dask def test_write_preexisting_override_metadata(self) -> None: """Metadata should be overridden if mode="a" but not in mode="r+".""" original = Dataset( {"u": (("x",), np.zeros(10), {"variable": "original"})}, attrs={"global": "original"}, ) both_modified = Dataset( {"u": (("x",), np.ones(10), {"variable": "modified"})}, attrs={"global": "modified"}, ) global_modified = Dataset( {"u": (("x",), np.ones(10), {"variable": "original"})}, attrs={"global": "modified"}, ) only_new_data = Dataset( {"u": (("x",), np.ones(10), {"variable": "original"})}, attrs={"global": "original"}, ) with self.create_zarr_target() as store: original.to_zarr(store, compute=False, **self.version_kwargs) both_modified.to_zarr(store, mode="a", **self.version_kwargs) with self.open(store) as actual: # NOTE: this arguably incorrect -- we should probably be # overriding the variable metadata, too. See the TODO note in # ZarrStore.set_variables. assert_identical(actual, global_modified) with self.create_zarr_target() as store: original.to_zarr(store, compute=False, **self.version_kwargs) both_modified.to_zarr(store, mode="r+", **self.version_kwargs) with self.open(store) as actual: assert_identical(actual, only_new_data) with self.create_zarr_target() as store: original.to_zarr(store, compute=False, **self.version_kwargs) # with region, the default mode becomes r+ both_modified.to_zarr( store, region={"x": slice(None)}, **self.version_kwargs ) with self.open(store) as actual: assert_identical(actual, only_new_data) def test_write_region_errors(self) -> None: data = Dataset({"u": (("x",), np.arange(5))}) data2 = Dataset({"u": (("x",), np.array([10, 11]))}) @contextlib.contextmanager def setup_and_verify_store(expected=data): with self.create_zarr_target() as store: data.to_zarr(store, **self.version_kwargs) yield store with self.open(store) as actual: assert_identical(actual, expected) # verify the base case works expected = Dataset({"u": (("x",), np.array([10, 11, 2, 3, 4]))}) with setup_and_verify_store(expected) as store: data2.to_zarr(store, region={"x": slice(2)}, **self.version_kwargs) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=re.escape( "cannot set region unless mode='a', mode='a-', mode='r+' or mode=None" ), ): data.to_zarr( store, region={"x": slice(None)}, mode="w", **self.version_kwargs ) with setup_and_verify_store() as store: with pytest.raises(TypeError, match=r"must be a dict"): data.to_zarr(store, region=slice(None), **self.version_kwargs) # type: ignore[call-overload] with setup_and_verify_store() as store: with pytest.raises(TypeError, match=r"must be slice objects"): data2.to_zarr(store, region={"x": [0, 1]}, **self.version_kwargs) # type: ignore[dict-item] with setup_and_verify_store() as store: with pytest.raises(ValueError, match=r"step on all slices"): data2.to_zarr( store, region={"x": slice(None, None, 2)}, **self.version_kwargs ) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=r"all keys in ``region`` are not in Dataset dimensions", ): data.to_zarr(store, region={"y": slice(None)}, **self.version_kwargs) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=r"all variables in the dataset to write must have at least one dimension in common", ): data2.assign(v=2).to_zarr( store, region={"x": slice(2)}, **self.version_kwargs ) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=r"cannot list the same dimension in both" ): data.to_zarr( store, region={"x": slice(None)}, append_dim="x", **self.version_kwargs, ) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=r"variable 'u' already exists with different dimension sizes", ): data2.to_zarr(store, region={"x": slice(3)}, **self.version_kwargs) @requires_dask def test_encoding_chunksizes(self) -> None: # regression test for GH2278 # see also test_encoding_chunksizes_unlimited nx, ny, nt = 4, 4, 5 original = xr.Dataset( {}, coords={ "x": np.arange(nx), "y": np.arange(ny), "t": np.arange(nt), }, ) original["v"] = xr.Variable(("x", "y", "t"), np.zeros((nx, ny, nt))) original = original.chunk({"t": 1, "x": 2, "y": 2}) with self.roundtrip(original) as ds1: assert_equal(ds1, original) with self.roundtrip(ds1.isel(t=0)) as ds2: assert_equal(ds2, original.isel(t=0)) @requires_dask def test_chunk_encoding_with_partial_dask_chunks(self) -> None: original = xr.Dataset( {"x": xr.DataArray(np.random.random(size=(6, 8)), dims=("a", "b"))} ).chunk({"a": 3}) with self.roundtrip( original, save_kwargs={"encoding": {"x": {"chunks": [3, 2]}}} ) as ds1: assert_equal(ds1, original) @requires_dask def test_chunk_encoding_with_larger_dask_chunks(self) -> None: original = xr.Dataset({"a": ("x", [1, 2, 3, 4])}).chunk({"x": 2}) with self.roundtrip( original, save_kwargs={"encoding": {"a": {"chunks": [1]}}} ) as ds1: assert_equal(ds1, original) @requires_cftime def test_open_zarr_use_cftime(self) -> None: ds = create_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, **self.version_kwargs) ds_a = xr.open_zarr(store_target, **self.version_kwargs) assert_identical(ds, ds_a) decoder = CFDatetimeCoder(use_cftime=True) ds_b = xr.open_zarr( store_target, decode_times=decoder, **self.version_kwargs ) assert xr.coding.times.contains_cftime_datetimes(ds_b.time.variable) def test_write_read_select_write(self) -> None: # Test for https://github.com/pydata/xarray/issues/4084 ds = create_test_data() # NOTE: using self.roundtrip, which uses open_dataset, will not trigger the bug. with self.create_zarr_target() as initial_store: ds.to_zarr(initial_store, mode="w", **self.version_kwargs) ds1 = xr.open_zarr(initial_store, **self.version_kwargs) # Combination of where+squeeze triggers error on write. ds_sel = ds1.where(ds1.coords["dim3"] == "a", drop=True).squeeze("dim3") with self.create_zarr_target() as final_store: ds_sel.to_zarr(final_store, mode="w", **self.version_kwargs) @pytest.mark.parametrize("obj", [Dataset(), DataArray(name="foo")]) def test_attributes(self, obj) -> None: obj = obj.copy() obj.attrs["good"] = {"key": "value"} ds = obj if isinstance(obj, Dataset) else obj.to_dataset() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, **self.version_kwargs) assert_identical(ds, xr.open_zarr(store_target, **self.version_kwargs)) obj.attrs["bad"] = DataArray() ds = obj if isinstance(obj, Dataset) else obj.to_dataset() with self.create_zarr_target() as store_target: with pytest.raises(TypeError, match=r"Invalid attribute in Dataset.attrs."): ds.to_zarr(store_target, **self.version_kwargs) @requires_dask @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) def test_chunked_datetime64_or_timedelta64(self, dtype) -> None: # Generalized from @malmans2's test in PR #8253 original = create_test_data().astype(dtype).chunk(1) with self.roundtrip( original, open_kwargs={ "chunks": {}, "decode_timedelta": CFTimedeltaCoder(time_unit="ns"), }, ) as actual: for name, actual_var in actual.variables.items(): assert original[name].chunks == actual_var.chunks assert original.chunks == actual.chunks @requires_cftime @requires_dask def test_chunked_cftime_datetime(self) -> None: # Based on @malmans2's test in PR #8253 times = date_range("2000", freq="D", periods=3, use_cftime=True) original = xr.Dataset(data_vars={"chunked_times": (["time"], times)}) original = original.chunk({"time": 1}) with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual: for name, actual_var in actual.variables.items(): assert original[name].chunks == actual_var.chunks assert original.chunks == actual.chunks def test_cache_members(self) -> None: """ Ensure that if `ZarrStore` is created with `cache_members` set to `True`, a `ZarrStore` only inspects the underlying zarr group once, and that the results of that inspection are cached. Otherwise, `ZarrStore.members` should inspect the underlying zarr group each time it is invoked """ with self.create_zarr_target() as store_target: zstore_mut = backends.ZarrStore.open_group( store_target, mode="w", cache_members=False ) # ensure that the keys are sorted array_keys = sorted(("foo", "bar")) # create some arrays for ak in array_keys: zstore_mut.zarr_group.create(name=ak, shape=(1,), dtype="uint8") zstore_stat = backends.ZarrStore.open_group( store_target, mode="r", cache_members=True ) observed_keys_0 = sorted(zstore_stat.array_keys()) assert observed_keys_0 == array_keys # create a new array new_key = "baz" zstore_mut.zarr_group.create(name=new_key, shape=(1,), dtype="uint8") observed_keys_1 = sorted(zstore_stat.array_keys()) assert observed_keys_1 == array_keys observed_keys_2 = sorted(zstore_mut.array_keys()) assert observed_keys_2 == sorted(array_keys + [new_key]) @requires_dask @pytest.mark.parametrize("dtype", [int, float]) def test_zarr_fill_value_setting(self, dtype): # When zarr_format=2, _FillValue sets fill_value # When zarr_format=3, fill_value is set independently # We test this by writing a dask array with compute=False, # on read we should receive chunks filled with `fill_value` fv = -1 ds = xr.Dataset( {"foo": ("x", dask.array.from_array(np.array([0, 0, 0], dtype=dtype)))} ) expected = xr.Dataset({"foo": ("x", [fv] * 3)}) zarr_format_2 = ( has_zarr_v3 and zarr.config.get("default_zarr_format") == 2 ) or not has_zarr_v3 if zarr_format_2: attr = "_FillValue" expected.foo.attrs[attr] = fv else: attr = "fill_value" if dtype is float: # for floats, Xarray inserts a default `np.nan` expected.foo.attrs["_FillValue"] = np.nan # turn off all decoding so we see what Zarr returns to us. # Since chunks, are not written, we should receive on `fill_value` open_kwargs = { "mask_and_scale": False, "consolidated": False, "use_zarr_fill_value_as_mask": False, } save_kwargs = dict(compute=False, consolidated=False) with self.roundtrip( ds, save_kwargs=ChainMap(save_kwargs, dict(encoding={"foo": {attr: fv}})), open_kwargs=open_kwargs, ) as actual: assert_identical(actual, expected) ds.foo.encoding[attr] = fv with self.roundtrip( ds, save_kwargs=save_kwargs, open_kwargs=open_kwargs ) as actual: assert_identical(actual, expected) if zarr_format_2: ds = ds.drop_encoding() with pytest.raises(ValueError, match="_FillValue"): with self.roundtrip( ds, save_kwargs=ChainMap( save_kwargs, dict(encoding={"foo": {"fill_value": fv}}) ), open_kwargs=open_kwargs, ): pass # TODO: this doesn't fail because of the # ``raise_on_invalid=vn in check_encoding_set`` line in zarr.py # ds.foo.encoding["fill_value"] = fv @requires_zarr @pytest.mark.skipif( KVStore is None, reason="zarr-python 2.x or ZARR_V3_EXPERIMENTAL_API is unset." ) class TestInstrumentedZarrStore: if has_zarr_v3: methods = [ "get", "set", "list_dir", "list_prefix", ] else: methods = [ "__iter__", "__contains__", "__setitem__", "__getitem__", "listdir", "list_prefix", ] @contextlib.contextmanager def create_zarr_target(self): if Version(zarr.__version__) < Version("2.18.0"): pytest.skip("Instrumented tests only work on latest Zarr.") if has_zarr_v3: kwargs = {"read_only": False} else: kwargs = {} # type: ignore[arg-type,unused-ignore] store = KVStore({}, **kwargs) # type: ignore[arg-type,unused-ignore] yield store def make_patches(self, store): from unittest.mock import MagicMock return { method: MagicMock( f"KVStore.{method}", side_effect=getattr(store, method), autospec=True, ) for method in self.methods } def summarize(self, patches): summary = {} for name, patch_ in patches.items(): count = 0 for call in patch_.mock_calls: if "zarr.json" not in call.args: count += 1 summary[name.strip("_")] = count return summary def check_requests(self, expected, patches): summary = self.summarize(patches) for k in summary: assert summary[k] <= expected[k], (k, summary) def test_append(self) -> None: original = Dataset({"foo": ("x", [1])}, coords={"x": [0]}) modified = Dataset({"foo": ("x", [2])}, coords={"x": [1]}) with self.create_zarr_target() as store: if has_zarr_v3: # TODO: verify these expected = { "set": 5, "get": 4, "list_dir": 2, "list_prefix": 1, } else: expected = { "iter": 1, "contains": 18, "setitem": 10, "getitem": 13, "listdir": 0, "list_prefix": 3, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): original.to_zarr(store) self.check_requests(expected, patches) patches = self.make_patches(store) # v2024.03.0: {'iter': 6, 'contains': 2, 'setitem': 5, 'getitem': 10, 'listdir': 6, 'list_prefix': 0} # 6057128b: {'iter': 5, 'contains': 2, 'setitem': 5, 'getitem': 10, "listdir": 5, "list_prefix": 0} if has_zarr_v3: expected = { "set": 4, "get": 9, # TODO: fixme upstream (should be 8) "list_dir": 2, # TODO: fixme upstream (should be 2) "list_prefix": 0, } else: expected = { "iter": 1, "contains": 11, "setitem": 6, "getitem": 15, "listdir": 0, "list_prefix": 1, } with patch.multiple(KVStore, **patches): modified.to_zarr(store, mode="a", append_dim="x") self.check_requests(expected, patches) patches = self.make_patches(store) if has_zarr_v3: expected = { "set": 4, "get": 9, # TODO: fixme upstream (should be 8) "list_dir": 2, # TODO: fixme upstream (should be 2) "list_prefix": 0, } else: expected = { "iter": 1, "contains": 11, "setitem": 6, "getitem": 15, "listdir": 0, "list_prefix": 1, } with patch.multiple(KVStore, **patches): modified.to_zarr(store, mode="a-", append_dim="x") self.check_requests(expected, patches) with open_dataset(store, engine="zarr") as actual: assert_identical( actual, xr.concat([original, modified, modified], dim="x") ) @requires_dask def test_region_write(self) -> None: ds = Dataset({"foo": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}).chunk() with self.create_zarr_target() as store: if has_zarr_v3: expected = { "set": 5, "get": 2, "list_dir": 2, "list_prefix": 4, } else: expected = { "iter": 1, "contains": 16, "setitem": 9, "getitem": 13, "listdir": 0, "list_prefix": 5, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): ds.to_zarr(store, mode="w", compute=False) self.check_requests(expected, patches) # v2024.03.0: {'iter': 5, 'contains': 2, 'setitem': 1, 'getitem': 6, 'listdir': 5, 'list_prefix': 0} # 6057128b: {'iter': 4, 'contains': 2, 'setitem': 1, 'getitem': 5, 'listdir': 4, 'list_prefix': 0} if has_zarr_v3: expected = { "set": 1, "get": 3, "list_dir": 0, "list_prefix": 0, } else: expected = { "iter": 1, "contains": 6, "setitem": 1, "getitem": 7, "listdir": 0, "list_prefix": 0, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): ds.to_zarr(store, region={"x": slice(None)}) self.check_requests(expected, patches) # v2024.03.0: {'iter': 6, 'contains': 4, 'setitem': 1, 'getitem': 11, 'listdir': 6, 'list_prefix': 0} # 6057128b: {'iter': 4, 'contains': 2, 'setitem': 1, 'getitem': 7, 'listdir': 4, 'list_prefix': 0} if has_zarr_v3: expected = { "set": 1, "get": 4, "list_dir": 0, "list_prefix": 0, } else: expected = { "iter": 1, "contains": 6, "setitem": 1, "getitem": 8, "listdir": 0, "list_prefix": 0, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): ds.to_zarr(store, region="auto") self.check_requests(expected, patches) if has_zarr_v3: expected = { "set": 0, "get": 5, "list_dir": 0, "list_prefix": 0, } else: expected = { "iter": 1, "contains": 6, "setitem": 0, "getitem": 8, "listdir": 0, "list_prefix": 0, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): with open_dataset(store, engine="zarr") as actual: assert_identical(actual, ds) self.check_requests(expected, patches) @requires_zarr class TestZarrDictStore(ZarrBase): @contextlib.contextmanager def create_zarr_target(self): if has_zarr_v3: yield zarr.storage.MemoryStore({}, read_only=False) else: yield {} def test_chunk_key_encoding_v2(self) -> None: encoding = {"name": "v2", "configuration": {"separator": "/"}} # Create a dataset with a variable name containing a period data = np.ones((4, 4)) original = Dataset({"var1": (("x", "y"), data)}) # Set up chunk key encoding with slash separator encoding = { "var1": { "chunk_key_encoding": encoding, "chunks": (2, 2), } } # Write to store with custom encoding with self.create_zarr_target() as store: original.to_zarr(store, encoding=encoding) # Verify the chunk keys in store use the slash separator if not has_zarr_v3: chunk_keys = [k for k in store.keys() if k.startswith("var1/")] assert len(chunk_keys) > 0 for key in chunk_keys: assert "/" in key assert "." not in key.split("/")[1:] # No dots in chunk coordinates # Read back and verify data with xr.open_zarr(store) as actual: assert_identical(original, actual) # Verify chunks are preserved assert actual["var1"].encoding["chunks"] == (2, 2) @pytest.mark.asyncio @requires_zarr_v3 async def test_async_load_multiple_variables(self) -> None: target_class = zarr.AsyncArray method_name = "getitem" original_method = getattr(target_class, method_name) # the indexed coordinate variables is not lazy, so the create_test_dataset has 4 lazy variables in total N_LAZY_VARS = 4 original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, zarr_format=3, consolidated=False) with patch.object( target_class, method_name, wraps=original_method, autospec=True ) as mocked_meth: # blocks upon loading the coordinate variables here ds = xr.open_zarr(store, consolidated=False, chunks=None) # TODO we're not actually testing that these indexing methods are not blocking... result_ds = await ds.load_async() mocked_meth.assert_called() assert mocked_meth.call_count == N_LAZY_VARS mocked_meth.assert_awaited() xrt.assert_identical(result_ds, ds.load()) @pytest.mark.asyncio @requires_zarr_v3 @pytest.mark.parametrize("cls_name", ["Variable", "DataArray", "Dataset"]) async def test_concurrent_load_multiple_objects( self, cls_name, ) -> None: N_OBJECTS = 5 N_LAZY_VARS = { "Variable": 1, "DataArray": 1, "Dataset": 4, } # specific to the create_test_data() used target_class = zarr.AsyncArray method_name = "getitem" original_method = getattr(target_class, method_name) original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, consolidated=False, zarr_format=3) with patch.object( target_class, method_name, wraps=original_method, autospec=True ) as mocked_meth: xr_obj = get_xr_obj(store, cls_name) # TODO we're not actually testing that these indexing methods are not blocking... coros = [xr_obj.load_async() for _ in range(N_OBJECTS)] results = await asyncio.gather(*coros) mocked_meth.assert_called() assert mocked_meth.call_count == N_OBJECTS * N_LAZY_VARS[cls_name] mocked_meth.assert_awaited() for result in results: xrt.assert_identical(result, xr_obj.load()) @pytest.mark.asyncio @requires_zarr_v3 @pytest.mark.parametrize("cls_name", ["Variable", "DataArray", "Dataset"]) @pytest.mark.parametrize( "indexer, method, target_zarr_class", [ pytest.param({}, "sel", "zarr.AsyncArray", id="no-indexing-sel"), pytest.param({}, "isel", "zarr.AsyncArray", id="no-indexing-isel"), pytest.param({"dim2": 1.0}, "sel", "zarr.AsyncArray", id="basic-int-sel"), pytest.param({"dim2": 2}, "isel", "zarr.AsyncArray", id="basic-int-isel"), pytest.param( {"dim2": slice(1.0, 3.0)}, "sel", "zarr.AsyncArray", id="basic-slice-sel", ), pytest.param( {"dim2": slice(1, 3)}, "isel", "zarr.AsyncArray", id="basic-slice-isel" ), pytest.param( {"dim2": [1.0, 3.0]}, "sel", "zarr.core.indexing.AsyncOIndex", id="outer-sel", ), pytest.param( {"dim2": [1, 3]}, "isel", "zarr.core.indexing.AsyncOIndex", id="outer-isel", ), pytest.param( { "dim1": xr.Variable(data=[2, 3], dims="points"), "dim2": xr.Variable(data=[1.0, 2.0], dims="points"), }, "sel", "zarr.core.indexing.AsyncVIndex", id="vectorized-sel", ), pytest.param( { "dim1": xr.Variable(data=[2, 3], dims="points"), "dim2": xr.Variable(data=[1, 3], dims="points"), }, "isel", "zarr.core.indexing.AsyncVIndex", id="vectorized-isel", ), ], ) async def test_indexing( self, cls_name, method, indexer, target_zarr_class, ) -> None: if not has_zarr_v3_async_oindex and target_zarr_class in ( "zarr.core.indexing.AsyncOIndex", "zarr.core.indexing.AsyncVIndex", ): pytest.skip( "current version of zarr does not support orthogonal or vectorized async indexing" ) if cls_name == "Variable" and method == "sel": pytest.skip("Variable doesn't have a .sel method") # Each type of indexing ends up calling a different zarr indexing method # They all use a method named .getitem, but on a different internal zarr class def _resolve_class_from_string(class_path: str) -> type[Any]: """Resolve a string class path like 'zarr.AsyncArray' to the actual class.""" module_path, class_name = class_path.rsplit(".", 1) module = import_module(module_path) return getattr(module, class_name) target_class = _resolve_class_from_string(target_zarr_class) method_name = "getitem" original_method = getattr(target_class, method_name) original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, consolidated=False, zarr_format=3) with patch.object( target_class, method_name, wraps=original_method, autospec=True ) as mocked_meth: xr_obj = get_xr_obj(store, cls_name) # TODO we're not actually testing that these indexing methods are not blocking... result = await getattr(xr_obj, method)(**indexer).load_async() mocked_meth.assert_called() mocked_meth.assert_awaited() assert mocked_meth.call_count > 0 expected = getattr(xr_obj, method)(**indexer).load() xrt.assert_identical(result, expected) @pytest.mark.asyncio @pytest.mark.parametrize( ("indexer", "expected_err_msg"), [ pytest.param( {"dim2": 2}, "basic async indexing", marks=pytest.mark.skipif( has_zarr_v3, reason="current version of zarr has basic async indexing", ), ), # tests basic indexing pytest.param( {"dim2": [1, 3]}, "orthogonal async indexing", marks=pytest.mark.skipif( has_zarr_v3_async_oindex, reason="current version of zarr has async orthogonal indexing", ), ), # tests oindexing pytest.param( { "dim1": xr.Variable(data=[2, 3], dims="points"), "dim2": xr.Variable(data=[1, 3], dims="points"), }, "vectorized async indexing", marks=pytest.mark.skipif( has_zarr_v3_async_oindex, reason="current version of zarr has async vectorized indexing", ), ), # tests vindexing ], ) @parametrize_zarr_format async def test_raise_on_older_zarr_version( self, indexer, expected_err_msg, zarr_format, ): """Test that trying to use async load with insufficiently new version of zarr raises a clear error""" original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, consolidated=False, zarr_format=zarr_format) ds = xr.open_zarr(store, consolidated=False, chunks=None) var = ds["var1"].variable with pytest.raises(NotImplementedError, match=expected_err_msg): await var.isel(**indexer).load_async() def get_xr_obj( store: zarr.abc.store.Store, cls_name: Literal["Variable", "DataArray", "Dataset"] ): ds = xr.open_zarr(store, consolidated=False, chunks=None) match cls_name: case "Variable": return ds["var1"].variable case "DataArray": return ds["var1"] case "Dataset": return ds class NoConsolidatedMetadataSupportStore(WrapperStore): """ Store that explicitly does not support consolidated metadata. Useful as a proxy for stores like Icechunk, see https://github.com/zarr-developers/zarr-python/pull/3119. """ supports_consolidated_metadata = False def __init__( self, store, *, read_only: bool = False, ) -> None: self._store = store.with_read_only(read_only=read_only) def with_read_only( self, read_only: bool = False ) -> NoConsolidatedMetadataSupportStore: return type(self)( store=self._store, read_only=read_only, ) @requires_zarr_v3 class TestZarrNoConsolidatedMetadataSupport(ZarrBase): @contextlib.contextmanager def create_zarr_target(self): # TODO the zarr version would need to be >3.08 for the supports_consolidated_metadata property to have any effect yield NoConsolidatedMetadataSupportStore( zarr.storage.MemoryStore({}, read_only=False) ) @requires_zarr @pytest.mark.skipif( ON_WINDOWS, reason="Very flaky on Windows CI. Can re-enable assuming it starts consistently passing.", ) class TestZarrDirectoryStore(ZarrBase): @contextlib.contextmanager def create_zarr_target(self): with create_tmp_file(suffix=".zarr") as tmp: yield tmp @requires_zarr class TestZarrWriteEmpty(TestZarrDirectoryStore): @contextlib.contextmanager def temp_dir(self) -> Iterator[tuple[str, str]]: with tempfile.TemporaryDirectory() as d: store = os.path.join(d, "test.zarr") yield d, store @contextlib.contextmanager def roundtrip_dir( self, data, store, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False, ) -> Iterator[Dataset]: if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} data.to_zarr(store, **save_kwargs, **self.version_kwargs) with xr.open_dataset( store, engine="zarr", **open_kwargs, **self.version_kwargs ) as ds: yield ds @requires_dask def test_default_zarr_fill_value(self): inputs = xr.Dataset({"floats": ("x", [1.0]), "ints": ("x", [1])}).chunk() expected = xr.Dataset({"floats": ("x", [np.nan]), "ints": ("x", [0])}) with self.temp_dir() as (_d, store): inputs.to_zarr(store, compute=False) with open_dataset(store) as on_disk: assert np.isnan(on_disk.variables["floats"].encoding["_FillValue"]) assert ( "_FillValue" not in on_disk.variables["ints"].encoding ) # use default if not has_zarr_v3: # zarr-python v2 interprets fill_value=None inconsistently del on_disk["ints"] del expected["ints"] assert_identical(expected, on_disk) @pytest.mark.parametrize("consolidated", [True, False, None]) @pytest.mark.parametrize("write_empty", [True, False, None]) def test_write_empty( self, consolidated: bool | None, write_empty: bool | None, ) -> None: def assert_expected_files(expected: list[str], store: str) -> None: """Convenience for comparing with actual files written""" ls = [] test_root = os.path.join(store, "test") for root, _, files in os.walk(test_root): ls.extend( [ os.path.join(root, f).removeprefix(test_root).lstrip("/") for f in files ] ) assert set(expected) == { file.lstrip("c/") for file in ls if (file not in (".zattrs", ".zarray", "zarr.json")) } # The zarr format is set by the `default_zarr_format` # pytest fixture that acts on a superclass zarr_format_3 = has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3 if (write_empty is False) or (write_empty is None and has_zarr_v3): expected = ["0.1.0"] else: expected = [ "0.0.0", "0.0.1", "0.1.0", "0.1.1", ] # use nan for default fill_value behaviour data = np.array([np.nan, np.nan, 1.0, np.nan]).reshape((1, 2, 2)) if zarr_format_3: # transform to the path style of zarr 3 # e.g. 0/0/1 expected = [e.replace(".", "/") for e in expected] ds = xr.Dataset(data_vars={"test": (("Z", "Y", "X"), data)}) if has_dask: ds["test"] = ds["test"].chunk(1) encoding = None else: encoding = {"test": {"chunks": (1, 1, 1)}} with self.temp_dir() as (_d, store): ds.to_zarr( store, mode="w", encoding=encoding, write_empty_chunks=write_empty, ) # check expected files after a write assert_expected_files(expected, store) with self.roundtrip_dir( ds, store, save_kwargs={ "mode": "a", "append_dim": "Z", "write_empty_chunks": write_empty, }, ) as a_ds: expected_ds = xr.concat([ds, ds], dim="Z") assert_identical(a_ds, expected_ds.compute()) # add the new files we expect to be created by the append # that was performed by the roundtrip_dir if (write_empty is False) or (write_empty is None and has_zarr_v3): expected.append("1.1.0") elif not has_zarr_v3 or has_zarr_v3_async_oindex: # this was broken from zarr 3.0.0 until 3.1.2 # async oindex released in 3.1.2 along with a fix # for write_empty_chunks in append expected.extend( [ "1.1.0", "1.0.0", "1.0.1", "1.1.1", ] ) else: expected.append("1.1.0") if zarr_format_3: expected = [e.replace(".", "/") for e in expected] assert_expected_files(expected, store) def test_avoid_excess_metadata_calls(self) -> None: """Test that chunk requests do not trigger redundant metadata requests. This test targets logic in backends.zarr.ZarrArrayWrapper, asserting that calls to retrieve chunk data after initialization do not trigger additional metadata requests. https://github.com/pydata/xarray/issues/8290 """ ds = xr.Dataset(data_vars={"test": (("Z",), np.array([123]).reshape(1))}) # The call to retrieve metadata performs a group lookup. We patch Group.__getitem__ # so that we can inspect calls to this method - specifically count of calls. # Use of side_effect means that calls are passed through to the original method # rather than a mocked method. Group: Any if has_zarr_v3: Group = zarr.AsyncGroup patched = patch.object( Group, "getitem", side_effect=Group.getitem, autospec=True ) else: Group = zarr.Group patched = patch.object( Group, "__getitem__", side_effect=Group.__getitem__, autospec=True ) with self.create_zarr_target() as store, patched as mock: ds.to_zarr(store, mode="w") # We expect this to request array metadata information, so call_count should be == 1, xrds = xr.open_zarr(store) call_count = mock.call_count assert call_count == 1 # compute() requests array data, which should not trigger additional metadata requests # we assert that the number of calls has not increased after fetchhing the array xrds.test.compute(scheduler="sync") assert mock.call_count == call_count @requires_zarr @requires_fsspec @pytest.mark.skipif(has_zarr_v3, reason="Difficult to test.") def test_zarr_storage_options() -> None: pytest.importorskip("aiobotocore") ds = create_test_data() store_target = "memory://test.zarr" ds.to_zarr(store_target, storage_options={"test": "zarr_write"}) ds_a = xr.open_zarr(store_target, storage_options={"test": "zarr_read"}) assert_identical(ds, ds_a) @requires_zarr def test_zarr_version_deprecated() -> None: ds = create_test_data() store: Any if has_zarr_v3: store = KVStore() else: store = {} with pytest.warns(FutureWarning, match="zarr_version"): ds.to_zarr(store=store, zarr_version=2) with pytest.warns(FutureWarning, match="zarr_version"): xr.open_zarr(store=store, zarr_version=2) with pytest.raises(ValueError, match="zarr_format"): xr.open_zarr(store=store, zarr_version=2, zarr_format=3) @requires_scipy class TestScipyInMemoryData(CFEncodedBase, NetCDF3Only, InMemoryNetCDF): engine: T_NetcdfEngine = "scipy" @contextlib.contextmanager def create_store(self): fobj = BytesIO() yield backends.ScipyDataStore(fobj, "w") @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} saved = self.save(data, path=None, **save_kwargs) with self.open(saved, **open_kwargs) as ds: yield ds @pytest.mark.asyncio @pytest.mark.skip(reason="NetCDF backends don't support async loading") async def test_load_async(self) -> None: await super().test_load_async() @requires_scipy class TestScipyFileObject(CFEncodedBase, NetCDF3Only, FileObjectNetCDF): # TODO: Consider consolidating some of these cases (e.g., # test_file_remains_open) with TestH5NetCDFFileObject engine: T_NetcdfEngine = "scipy" @contextlib.contextmanager def create_store(self): fobj = BytesIO() yield backends.ScipyDataStore(fobj, "w") @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} with create_tmp_file() as tmp_file: with open(tmp_file, "wb") as f: self.save(data, f, **save_kwargs) with open(tmp_file, "rb") as f: with self.open(f, **open_kwargs) as ds: yield ds @pytest.mark.asyncio @pytest.mark.skip(reason="NetCDF backends don't support async loading") async def test_load_async(self) -> None: await super().test_load_async() @pytest.mark.skip(reason="cannot pickle file objects") def test_pickle(self) -> None: super().test_pickle() @pytest.mark.skip(reason="cannot pickle file objects") def test_pickle_dataarray(self) -> None: super().test_pickle_dataarray() @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_create_default_indexes(self, tmp_path, create_default_indexes) -> None: store_path = tmp_path / "tmp.nc" original_ds = xr.Dataset( {"data": ("x", np.arange(3))}, coords={"x": [-1, 0, 1]} ) original_ds.to_netcdf(store_path, engine=self.engine, mode="w") with open_dataset( store_path, engine=self.engine, create_default_indexes=create_default_indexes, ) as loaded_ds: if create_default_indexes: assert list(loaded_ds.xindexes) == ["x"] and isinstance( loaded_ds.xindexes["x"], PandasIndex ) else: assert len(loaded_ds.xindexes) == 0 @requires_scipy class TestScipyFilePath(NetCDF3Only, CFEncodedBase): engine: T_NetcdfEngine = "scipy" @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.ScipyDataStore(tmp_file, mode="w") as store: yield store def test_array_attrs(self) -> None: ds = Dataset(attrs={"foo": [[1, 2], [3, 4]]}) with pytest.raises(ValueError, match=r"must be 1-dimensional"): with self.roundtrip(ds): pass def test_roundtrip_example_1_netcdf_gz(self) -> None: with open_example_dataset("example_1.nc.gz") as expected: with open_example_dataset("example_1.nc") as actual: assert_identical(expected, actual) def test_netcdf3_endianness(self) -> None: # regression test for GH416 with open_example_dataset("bears.nc", engine="scipy") as expected: for var in expected.variables.values(): assert var.dtype.isnative @requires_netCDF4 def test_nc4_scipy(self) -> None: with create_tmp_file(allow_cleanup_failure=True) as tmp_file: with nc4.Dataset(tmp_file, "w", format="NETCDF4") as rootgrp: rootgrp.createGroup("foo") with pytest.raises(TypeError, match=r"pip install netcdf4"): open_dataset(tmp_file, engine="scipy") @requires_netCDF4 class TestNetCDF3ViaNetCDF4Data(NetCDF3Only, CFEncodedBase): engine: T_NetcdfEngine = "netcdf4" file_format: T_NetcdfTypes = "NETCDF3_CLASSIC" @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.NetCDF4DataStore.open( tmp_file, mode="w", format="NETCDF3_CLASSIC" ) as store: yield store def test_encoding_kwarg_vlen_string(self) -> None: original = Dataset({"x": ["foo", "bar", "baz"]}) kwargs = dict(encoding={"x": {"dtype": str}}) with pytest.raises(ValueError, match=r"encoding dtype=str for vlen"): with self.roundtrip(original, save_kwargs=kwargs): pass @requires_netCDF4 class TestNetCDF4ClassicViaNetCDF4Data(NetCDF3Only, CFEncodedBase): engine: T_NetcdfEngine = "netcdf4" file_format: T_NetcdfTypes = "NETCDF4_CLASSIC" @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.NetCDF4DataStore.open( tmp_file, mode="w", format="NETCDF4_CLASSIC" ) as store: yield store @requires_h5netcdf def test_string_attributes_stored_as_char(self, tmp_path): import h5netcdf original = Dataset(attrs={"foo": "bar"}) store_path = tmp_path / "tmp.nc" original.to_netcdf(store_path, engine=self.engine, format=self.file_format) with h5netcdf.File(store_path, "r") as ds: # Check that the attribute is stored as a char array assert ds._h5file.attrs["foo"].dtype == np.dtype("S3") @requires_h5netcdf_1_7_0_or_above class TestNetCDF4ClassicViaH5NetCDFData(TestNetCDF4ClassicViaNetCDF4Data): engine: T_NetcdfEngine = "h5netcdf" file_format: T_NetcdfTypes = "NETCDF4_CLASSIC" @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.H5NetCDFStore.open( tmp_file, mode="w", format="NETCDF4_CLASSIC" ) as store: yield store @requires_netCDF4 def test_cross_engine_read_write_netcdf4(self) -> None: # Drop dim3, because its labels include strings. These appear to be # not properly read with python-netCDF4, which converts them into # unicode instead of leaving them as bytes. data = create_test_data().drop_vars("dim3") data.attrs["foo"] = "bar" valid_engines: list[T_NetcdfEngine] = ["netcdf4", "h5netcdf"] for write_engine in valid_engines: with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, engine=write_engine, format=self.file_format) for read_engine in valid_engines: with open_dataset(tmp_file, engine=read_engine) as actual: assert_identical(data, actual) def test_group_fails(self): # Check writing group data fails with CLASSIC format original = create_test_data() with pytest.raises( ValueError, match=r"Cannot create sub-groups in `NETCDF4_CLASSIC` format." ): original.to_netcdf(group="sub", format=self.file_format, engine=self.engine) @requires_scipy_or_netCDF4 class TestGenericNetCDFData(NetCDF3Only, CFEncodedBase): # verify that we can read and write netCDF3 files as long as we have scipy # or netCDF4-python installed file_format: T_NetcdfTypes = "NETCDF3_64BIT" def test_write_store(self) -> None: # there's no specific store to test here pass @requires_scipy @requires_netCDF4 def test_engine(self) -> None: data = create_test_data() with pytest.raises(ValueError, match=r"unrecognized engine"): data.to_netcdf("foo.nc", engine="foobar") # type: ignore[call-overload] with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file) with pytest.raises(ValueError, match=r"unrecognized engine"): open_dataset(tmp_file, engine="foobar") with pytest.raises( TypeError, match=re.escape("file objects are not supported by the netCDF4 backend"), ): data.to_netcdf(BytesIO(), engine="netcdf4") with pytest.raises( TypeError, match=re.escape("file objects are not supported by the netCDF4 backend"), ): open_dataset(BytesIO(), engine="netcdf4") bytes_io = BytesIO() data.to_netcdf(bytes_io, engine="scipy") with pytest.raises(ValueError, match=r"unrecognized engine"): open_dataset(bytes_io, engine="foobar") def test_cross_engine_read_write_netcdf3(self) -> None: data = create_test_data() valid_engines: set[T_NetcdfEngine] = set() if has_netCDF4: valid_engines.add("netcdf4") if has_scipy: valid_engines.add("scipy") for write_engine in valid_engines: for format in self.netcdf3_formats: with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, format=format, engine=write_engine) for read_engine in valid_engines: with open_dataset(tmp_file, engine=read_engine) as actual: # hack to allow test to work: # coord comes back as DataArray rather than coord, # and so need to loop through here rather than in # the test function (or we get recursion) [ assert_allclose(data[k].variable, actual[k].variable) for k in data.variables ] def test_encoding_unlimited_dims(self) -> None: ds = Dataset({"x": ("y", np.arange(10.0))}) with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # Regression test for https://github.com/pydata/xarray/issues/2134 with self.roundtrip(ds, save_kwargs=dict(unlimited_dims="y")) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) ds.encoding = {"unlimited_dims": ["y"]} with self.roundtrip(ds) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # Regression test for https://github.com/pydata/xarray/issues/2134 ds.encoding = {"unlimited_dims": "y"} with self.roundtrip(ds) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) @requires_scipy def test_roundtrip_via_bytes(self) -> None: original = create_test_data() netcdf_bytes = original.to_netcdf() roundtrip = load_dataset(netcdf_bytes) assert_identical(roundtrip, original) @pytest.mark.xfail( reason="scipy.io.netcdf_file closes files upon garbage collection" ) @requires_scipy def test_roundtrip_via_file_object(self) -> None: original = create_test_data() f = BytesIO() original.to_netcdf(f) assert not f.closed restored = open_dataset(f) assert not f.closed assert_identical(restored, original) restored.close() assert not f.closed @requires_h5netcdf @requires_netCDF4 @pytest.mark.filterwarnings("ignore:use make_scale(name) instead") class TestH5NetCDFData(NetCDF4Base): engine: T_NetcdfEngine = "h5netcdf" @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: yield backends.H5NetCDFStore.open(tmp_file, "w") @pytest.mark.skipif( has_h5netcdf_1_4_0_or_above, reason="only valid for h5netcdf < 1.4.0" ) def test_complex(self) -> None: expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))}) save_kwargs = {"invalid_netcdf": True} with pytest.warns(UserWarning, match="You are writing invalid netcdf features"): with self.roundtrip(expected, save_kwargs=save_kwargs) as actual: assert_equal(expected, actual) @pytest.mark.skipif( has_h5netcdf_1_4_0_or_above, reason="only valid for h5netcdf < 1.4.0" ) @pytest.mark.parametrize("invalid_netcdf", [None, False]) def test_complex_error(self, invalid_netcdf) -> None: import h5netcdf expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))}) save_kwargs = {"invalid_netcdf": invalid_netcdf} with pytest.raises( h5netcdf.CompatibilityError, match="are not a supported NetCDF feature" ): with self.roundtrip(expected, save_kwargs=save_kwargs) as actual: assert_equal(expected, actual) def test_numpy_bool_(self) -> None: # h5netcdf loads booleans as numpy.bool_, this type needs to be supported # when writing invalid_netcdf datasets in order to support a roundtrip expected = Dataset({"x": ("y", np.ones(5), {"numpy_bool": np.bool_(True)})}) save_kwargs = {"invalid_netcdf": True} with pytest.warns(UserWarning, match="You are writing invalid netcdf features"): with self.roundtrip(expected, save_kwargs=save_kwargs) as actual: assert_identical(expected, actual) def test_cross_engine_read_write_netcdf4(self) -> None: # Drop dim3, because its labels include strings. These appear to be # not properly read with python-netCDF4, which converts them into # unicode instead of leaving them as bytes. data = create_test_data().drop_vars("dim3") data.attrs["foo"] = "bar" valid_engines: list[T_NetcdfEngine] = ["netcdf4", "h5netcdf"] for write_engine in valid_engines: with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, engine=write_engine) for read_engine in valid_engines: with open_dataset(tmp_file, engine=read_engine) as actual: assert_identical(data, actual) def test_read_byte_attrs_as_unicode(self) -> None: with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as nc: nc.foo = b"bar" with open_dataset(tmp_file) as actual: expected = Dataset(attrs={"foo": "bar"}) assert_identical(expected, actual) def test_compression_encoding_h5py(self) -> None: ENCODINGS: tuple[tuple[dict[str, Any], dict[str, Any]], ...] = ( # h5py style compression with gzip codec will be converted to # NetCDF4-Python style on round-trip ( {"compression": "gzip", "compression_opts": 9}, {"zlib": True, "complevel": 9}, ), # What can't be expressed in NetCDF4-Python style is # round-tripped unaltered ( {"compression": "lzf", "compression_opts": None}, {"compression": "lzf", "compression_opts": None}, ), # If both styles are used together, h5py format takes precedence ( { "compression": "lzf", "compression_opts": None, "zlib": True, "complevel": 9, }, {"compression": "lzf", "compression_opts": None}, ), ) for compr_in, compr_out in ENCODINGS: data = create_test_data() compr_common = { "chunksizes": (5, 5), "fletcher32": True, "shuffle": True, "original_shape": data.var2.shape, } data["var2"].encoding.update(compr_in) data["var2"].encoding.update(compr_common) compr_out.update(compr_common) data["scalar"] = ("scalar_dim", np.array([2.0])) data["scalar"] = data["scalar"][0] with self.roundtrip(data) as actual: for k, v in compr_out.items(): assert v == actual["var2"].encoding[k] def test_compression_check_encoding_h5py(self) -> None: """When mismatched h5py and NetCDF4-Python encodings are expressed in to_netcdf(encoding=...), must raise ValueError """ data = Dataset({"x": ("y", np.arange(10.0))}) # Compatible encodings are graciously supported with create_tmp_file() as tmp_file: data.to_netcdf( tmp_file, engine="h5netcdf", encoding={ "x": { "compression": "gzip", "zlib": True, "compression_opts": 6, "complevel": 6, } }, ) with open_dataset(tmp_file, engine="h5netcdf") as actual: assert actual.x.encoding["zlib"] is True assert actual.x.encoding["complevel"] == 6 # Incompatible encodings cause a crash with create_tmp_file() as tmp_file: with pytest.raises( ValueError, match=r"'zlib' and 'compression' encodings mismatch" ): data.to_netcdf( tmp_file, engine="h5netcdf", encoding={"x": {"compression": "lzf", "zlib": True}}, ) with create_tmp_file() as tmp_file: with pytest.raises( ValueError, match=r"'complevel' and 'compression_opts' encodings mismatch", ): data.to_netcdf( tmp_file, engine="h5netcdf", encoding={ "x": { "compression": "gzip", "compression_opts": 5, "complevel": 6, } }, ) def test_dump_encodings_h5py(self) -> None: # regression test for #709 ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs = {"encoding": {"x": {"compression": "gzip", "compression_opts": 9}}} with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual.x.encoding["zlib"] assert actual.x.encoding["complevel"] == 9 kwargs = {"encoding": {"x": {"compression": "lzf", "compression_opts": None}}} with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual.x.encoding["compression"] == "lzf" assert actual.x.encoding["compression_opts"] is None def test_decode_utf8_warning(self) -> None: title = b"\xc3" with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as f: f.title = title with pytest.warns(UnicodeWarning, match="returning bytes undecoded") as w: ds = xr.load_dataset(tmp_file, engine="h5netcdf") assert ds.title == title assert "attribute 'title' of h5netcdf object '/'" in str(w[0].message) def test_byte_attrs(self, byte_attrs_dataset: dict[str, Any]) -> None: with pytest.raises(ValueError, match=byte_attrs_dataset["h5netcdf_error"]): super().test_byte_attrs(byte_attrs_dataset) @requires_h5netcdf_1_4_0_or_above def test_roundtrip_complex(self): expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))}) with self.roundtrip(expected) as actual: assert_equal(expected, actual) def test_phony_dims_warning(self) -> None: import h5py foo_data = np.arange(125).reshape(5, 5, 5) bar_data = np.arange(625).reshape(25, 5, 5) var = {"foo1": foo_data, "foo2": bar_data, "foo3": foo_data, "foo4": bar_data} with create_tmp_file() as tmp_file: with h5py.File(tmp_file, "w") as f: grps = ["bar", "baz"] for grp in grps: fx = f.create_group(grp) for k, v in var.items(): fx.create_dataset(k, data=v) with pytest.warns(UserWarning, match="The 'phony_dims' kwarg"): with xr.open_dataset(tmp_file, engine="h5netcdf", group="bar") as ds: assert ds.sizes == { "phony_dim_0": 5, "phony_dim_1": 5, "phony_dim_2": 5, "phony_dim_3": 25, } @requires_h5netcdf @requires_netCDF4 class TestH5NetCDFAlreadyOpen: def test_open_dataset_group(self) -> None: import h5netcdf with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: group = nc.createGroup("g") v = group.createVariable("x", "int") v[...] = 42 kwargs = {"decode_vlen_strings": True} h5 = h5netcdf.File(tmp_file, mode="r", **kwargs) store = backends.H5NetCDFStore(h5["g"]) with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) h5 = h5netcdf.File(tmp_file, mode="r", **kwargs) store = backends.H5NetCDFStore(h5, group="g") with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) def test_deepcopy(self) -> None: import h5netcdf with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("x", 10) v = nc.createVariable("y", np.int32, ("x",)) v[:] = np.arange(10) kwargs = {"decode_vlen_strings": True} h5 = h5netcdf.File(tmp_file, mode="r", **kwargs) store = backends.H5NetCDFStore(h5) with open_dataset(store) as ds: copied = ds.copy(deep=True) expected = Dataset({"y": ("x", np.arange(10))}) assert_identical(expected, copied) @requires_h5netcdf class TestH5NetCDFFileObject(TestH5NetCDFData, FileObjectNetCDF): engine: T_NetcdfEngine = "h5netcdf" def test_open_badbytes(self) -> None: with pytest.raises( ValueError, match=r"match in any of xarray's currently installed IO" ): with open_dataset(b"garbage"): pass with pytest.raises( ValueError, match=r"not the signature of a valid netCDF4 file" ): with open_dataset(b"garbage", engine="h5netcdf"): pass with pytest.raises( ValueError, match=r"not the signature of a valid netCDF4 file" ): with open_dataset(BytesIO(b"garbage"), engine="h5netcdf"): pass def test_open_twice(self) -> None: expected = create_test_data() with create_tmp_file() as tmp_file: expected.to_netcdf(tmp_file, engine=self.engine) with open(tmp_file, "rb") as f: with open_dataset(f, engine=self.engine): with open_dataset(f, engine=self.engine): pass # should not crash @requires_scipy def test_open_fileobj(self) -> None: # open in-memory datasets instead of local file paths expected = create_test_data().drop_vars("dim3") expected.attrs["foo"] = "bar" with create_tmp_file() as tmp_file: expected.to_netcdf(tmp_file, engine="h5netcdf") with open(tmp_file, "rb") as f: with open_dataset(f, engine="h5netcdf") as actual: assert_identical(expected, actual) f.seek(0) with open_dataset(f) as actual: assert_identical(expected, actual) f.seek(0) with BytesIO(f.read()) as bio: with open_dataset(bio, engine="h5netcdf") as actual: assert_identical(expected, actual) f.seek(0) with pytest.raises(TypeError, match="not a valid NetCDF 3"): open_dataset(f, engine="scipy") # TODO: this additional open is required since scipy seems to close the file # when it fails on the TypeError (though didn't when we used # `raises_regex`?). Ref https://github.com/pydata/xarray/pull/5191 with open(tmp_file, "rb") as f: f.seek(8) with open_dataset(f): # ensure file gets closed pass @requires_fsspec def test_fsspec(self) -> None: expected = create_test_data() with create_tmp_file() as tmp_file: expected.to_netcdf(tmp_file, engine="h5netcdf") with fsspec.open(tmp_file, "rb") as f: with open_dataset(f, engine="h5netcdf") as actual: assert_identical(actual, expected) # fsspec.open() creates a pickleable file, unlike open() with pickle.loads(pickle.dumps(actual)) as unpickled: assert_identical(unpickled, expected) @requires_h5netcdf class TestH5NetCDFInMemoryData(InMemoryNetCDFWithGroups): engine: T_NetcdfEngine = "h5netcdf" @requires_h5netcdf @requires_dask @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") class TestH5NetCDFViaDaskData(TestH5NetCDFData): @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} open_kwargs.setdefault("chunks", -1) with TestH5NetCDFData.roundtrip( self, data, save_kwargs, open_kwargs, allow_cleanup_failure ) as ds: yield ds @pytest.mark.skip(reason="caching behavior differs for dask") def test_dataset_caching(self) -> None: pass def test_write_inconsistent_chunks(self) -> None: # Construct two variables with the same dimensions, but different # chunk sizes. x = da.zeros((100, 100), dtype="f4", chunks=(50, 100)) x = DataArray(data=x, dims=("lat", "lon"), name="x") x.encoding["chunksizes"] = (50, 100) x.encoding["original_shape"] = (100, 100) y = da.ones((100, 100), dtype="f4", chunks=(100, 50)) y = DataArray(data=y, dims=("lat", "lon"), name="y") y.encoding["chunksizes"] = (100, 50) y.encoding["original_shape"] = (100, 100) # Put them both into the same dataset ds = Dataset({"x": x, "y": y}) with self.roundtrip(ds) as actual: assert actual["x"].encoding["chunksizes"] == (50, 100) assert actual["y"].encoding["chunksizes"] == (100, 50) @requires_netCDF4 @requires_h5netcdf def test_memoryview_write_h5netcdf_read_netcdf4() -> None: original = create_test_data() result = original.to_netcdf(engine="h5netcdf") roundtrip = load_dataset(result, engine="netcdf4") assert_identical(roundtrip, original) @requires_netCDF4 @requires_h5netcdf def test_memoryview_write_netcdf4_read_h5netcdf() -> None: original = create_test_data() result = original.to_netcdf(engine="netcdf4") roundtrip = load_dataset(result, engine="h5netcdf") assert_identical(roundtrip, original) @network @requires_h5netcdf_ros3 class TestH5NetCDFDataRos3Driver(TestCommon): engine: T_NetcdfEngine = "h5netcdf" test_remote_dataset: str = "https://archive.unidata.ucar.edu/software/netcdf/examples/OMI-Aura_L2-example.nc" @pytest.mark.filterwarnings("ignore:Duplicate dimension names") def test_get_variable_list(self) -> None: with open_dataset( self.test_remote_dataset, engine="h5netcdf", backend_kwargs={"driver": "ros3"}, ) as actual: assert "Temperature" in list(actual) @pytest.mark.filterwarnings("ignore:Duplicate dimension names") def test_get_variable_list_empty_driver_kwds(self) -> None: driver_kwds = { "secret_id": b"", "secret_key": b"", } backend_kwargs = {"driver": "ros3", "driver_kwds": driver_kwds} with open_dataset( self.test_remote_dataset, engine="h5netcdf", backend_kwargs=backend_kwargs ) as actual: assert "Temperature" in list(actual) @pytest.fixture(params=["scipy", "netcdf4", "h5netcdf", "zarr"]) def readengine(request): return request.param @pytest.fixture(params=[1, 20]) def nfiles(request): return request.param @pytest.fixture(params=[5, None]) def file_cache_maxsize(request): maxsize = request.param if maxsize is not None: with set_options(file_cache_maxsize=maxsize): yield maxsize else: yield maxsize @pytest.fixture(params=[True, False]) def parallel(request): return request.param @pytest.fixture(params=[None, 5]) def chunks(request): return request.param @pytest.fixture(params=["tmp_path", "ZipStore", "Dict"]) def tmp_store(request, tmp_path): if request.param == "tmp_path": return tmp_path elif request.param == "ZipStore": from zarr.storage import ZipStore path = tmp_path / "store.zip" return ZipStore(path) elif request.param == "Dict": return dict() else: raise ValueError("not supported") # using pytest.mark.skipif does not work so this a work around def skip_if_not_engine(engine): if engine == "netcdf4": pytest.importorskip("netCDF4") else: pytest.importorskip(engine) @requires_dask @pytest.mark.filterwarnings("ignore:use make_scale(name) instead") @pytest.mark.skip( reason="Flaky test which can cause the worker to crash (so don't xfail). Very open to contributions fixing this" ) def test_open_mfdataset_manyfiles( readengine, nfiles, parallel, chunks, file_cache_maxsize ): # skip certain combinations skip_if_not_engine(readengine) randdata = np.random.randn(nfiles) original = Dataset({"foo": ("x", randdata)}) # test standard open_mfdataset approach with too many files with create_tmp_files(nfiles) as tmpfiles: # split into multiple sets of temp files for ii in original.x.values: subds = original.isel(x=slice(ii, ii + 1)) if readengine != "zarr": subds.to_netcdf(tmpfiles[ii], engine=readengine) else: # if writeengine == "zarr": subds.to_zarr(store=tmpfiles[ii]) # check that calculation on opened datasets works properly with open_mfdataset( tmpfiles, combine="nested", concat_dim="x", engine=readengine, parallel=parallel, chunks=chunks if (not chunks and readengine != "zarr") else "auto", ) as actual: # check that using open_mfdataset returns dask arrays for variables assert isinstance(actual["foo"].data, dask_array_type) assert_identical(original, actual) @requires_netCDF4 @requires_dask def test_open_mfdataset_can_open_path_objects() -> None: dataset = os.path.join(os.path.dirname(__file__), "data", "example_1.nc") with open_mfdataset(Path(dataset)) as actual: assert isinstance(actual, Dataset) @requires_netCDF4 @requires_dask def test_open_mfdataset_list_attr() -> None: """ Case when an attribute of type list differs across the multiple files """ with create_tmp_files(2) as nfiles: for i in range(2): with nc4.Dataset(nfiles[i], "w") as f: f.createDimension("x", 3) vlvar = f.createVariable("test_var", np.int32, ("x")) # here create an attribute as a list vlvar.test_attr = [f"string a {i}", f"string b {i}"] vlvar[:] = np.arange(3) with open_dataset(nfiles[0]) as ds1: with open_dataset(nfiles[1]) as ds2: original = xr.concat([ds1, ds2], dim="x") with xr.open_mfdataset( [nfiles[0], nfiles[1]], combine="nested", concat_dim="x" ) as actual: assert_identical(actual, original) @requires_scipy_or_netCDF4 @requires_dask class TestOpenMFDatasetWithDataVarsAndCoordsKw: coord_name = "lon" var_name = "v1" @contextlib.contextmanager def setup_files_and_datasets(self, *, fuzz=0, new_combine_kwargs: bool = False): ds1, ds2 = self.gen_datasets_with_common_coord_and_time() # to test join='exact' ds1["x"] = ds1.x + fuzz with create_tmp_file() as tmpfile1: with create_tmp_file() as tmpfile2: # save data to the temporary files ds1.to_netcdf(tmpfile1) ds2.to_netcdf(tmpfile2) with set_options(use_new_combine_kwarg_defaults=new_combine_kwargs): yield [tmpfile1, tmpfile2], [ds1, ds2] def gen_datasets_with_common_coord_and_time(self): # create coordinate data nx = 10 nt = 10 x = np.arange(nx) t1 = np.arange(nt) t2 = np.arange(nt, 2 * nt, 1) v1 = np.random.randn(nt, nx) v2 = np.random.randn(nt, nx) ds1 = Dataset( data_vars={self.var_name: (["t", "x"], v1), self.coord_name: ("x", 2 * x)}, coords={"t": (["t"], t1), "x": (["x"], x)}, ) ds2 = Dataset( data_vars={self.var_name: (["t", "x"], v2), self.coord_name: ("x", 2 * x)}, coords={"t": (["t"], t2), "x": (["x"], x)}, ) return ds1, ds2 @pytest.mark.parametrize( "combine, concat_dim", [("nested", "t"), ("by_coords", None)] ) @pytest.mark.parametrize("opt", ["all", "minimal", "different"]) @pytest.mark.parametrize("join", ["outer", "inner", "left", "right"]) def test_open_mfdataset_does_same_as_concat( self, combine, concat_dim, opt, join ) -> None: with self.setup_files_and_datasets() as (files, [ds1, ds2]): if combine == "by_coords": files.reverse() with open_mfdataset( files, data_vars=opt, combine=combine, concat_dim=concat_dim, join=join, compat="equals", ) as ds: ds_expect = xr.concat( [ds1, ds2], data_vars=opt, dim="t", join=join, compat="equals" ) assert_identical(ds, ds_expect) @pytest.mark.parametrize("use_new_combine_kwarg_defaults", [True, False]) @pytest.mark.parametrize( ["combine_attrs", "attrs", "expected", "expect_error"], ( pytest.param("drop", [{"a": 1}, {"a": 2}], {}, False, id="drop"), pytest.param( "override", [{"a": 1}, {"a": 2}], {"a": 1}, False, id="override" ), pytest.param( "no_conflicts", [{"a": 1}, {"a": 2}], None, True, id="no_conflicts" ), pytest.param( "identical", [{"a": 1, "b": 2}, {"a": 1, "c": 3}], None, True, id="identical", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": -1, "c": 3}], {"a": 1, "c": 3}, False, id="drop_conflicts", ), ), ) def test_open_mfdataset_dataset_combine_attrs( self, use_new_combine_kwarg_defaults, combine_attrs, attrs, expected, expect_error, ): with self.setup_files_and_datasets() as (files, [_ds1, _ds2]): # Give the files an inconsistent attribute for i, f in enumerate(files): ds = open_dataset(f).load() ds.attrs = attrs[i] ds.close() ds.to_netcdf(f) with set_options( use_new_combine_kwarg_defaults=use_new_combine_kwarg_defaults ): warning: contextlib.AbstractContextManager = ( pytest.warns(FutureWarning) if not use_new_combine_kwarg_defaults else contextlib.nullcontext() ) error: contextlib.AbstractContextManager = ( pytest.raises(xr.MergeError) if expect_error else contextlib.nullcontext() ) with warning: with error: with xr.open_mfdataset( files, combine="nested", concat_dim="t", combine_attrs=combine_attrs, ) as ds: assert ds.attrs == expected def test_open_mfdataset_dataset_attr_by_coords(self) -> None: """ Case when an attribute differs across the multiple files """ with self.setup_files_and_datasets() as (files, [_ds1, _ds2]): # Give the files an inconsistent attribute for i, f in enumerate(files): ds = open_dataset(f).load() ds.attrs["test_dataset_attr"] = 10 + i ds.close() ds.to_netcdf(f) with set_options(use_new_combine_kwarg_defaults=True): with xr.open_mfdataset(files, combine="nested", concat_dim="t") as ds: assert ds.test_dataset_attr == 10 def test_open_mfdataset_dataarray_attr_by_coords(self) -> None: """ Case when an attribute of a member DataArray differs across the multiple files """ with self.setup_files_and_datasets(new_combine_kwargs=True) as ( files, [_ds1, _ds2], ): # Give the files an inconsistent attribute for i, f in enumerate(files): ds = open_dataset(f).load() ds["v1"].attrs["test_dataarray_attr"] = i ds.close() ds.to_netcdf(f) with xr.open_mfdataset( files, data_vars=None, combine="nested", concat_dim="t" ) as ds: assert ds["v1"].test_dataarray_attr == 0 @pytest.mark.parametrize( "combine, concat_dim", [("nested", "t"), ("by_coords", None)] ) @pytest.mark.parametrize( "kwargs", [ {"data_vars": "all"}, {"data_vars": "minimal"}, { "data_vars": "all", "coords": "different", "compat": "no_conflicts", }, # old defaults { "data_vars": None, "coords": "minimal", "compat": "override", }, # new defaults {"data_vars": "different", "compat": "no_conflicts"}, {}, ], ) def test_open_mfdataset_exact_join_raises_error( self, combine, concat_dim, kwargs ) -> None: with self.setup_files_and_datasets(fuzz=0.1, new_combine_kwargs=True) as ( files, _, ): if combine == "by_coords": files.reverse() with pytest.raises( ValueError, match="cannot align objects with join='exact'" ): open_mfdataset( files, **kwargs, combine=combine, concat_dim=concat_dim, join="exact", ) def test_open_mfdataset_defaults_with_exact_join_warns_as_well_as_raising( self, ) -> None: with self.setup_files_and_datasets(fuzz=0.1, new_combine_kwargs=True) as ( files, _, ): files.reverse() with pytest.raises( ValueError, match="cannot align objects with join='exact'" ): open_mfdataset(files, combine="by_coords") def test_common_coord_when_datavars_all(self) -> None: opt: Final = "all" with self.setup_files_and_datasets() as (files, [ds1, ds2]): # open the files with the data_var option with open_mfdataset( files, data_vars=opt, combine="nested", concat_dim="t" ) as ds: coord_shape = ds[self.coord_name].shape coord_shape1 = ds1[self.coord_name].shape coord_shape2 = ds2[self.coord_name].shape var_shape = ds[self.var_name].shape assert var_shape == coord_shape assert coord_shape1 != coord_shape assert coord_shape2 != coord_shape def test_common_coord_when_datavars_minimal(self) -> None: opt: Final = "minimal" with self.setup_files_and_datasets(new_combine_kwargs=True) as ( files, [ds1, ds2], ): # open the files using data_vars option with open_mfdataset( files, data_vars=opt, combine="nested", concat_dim="t" ) as ds: coord_shape = ds[self.coord_name].shape coord_shape1 = ds1[self.coord_name].shape coord_shape2 = ds2[self.coord_name].shape var_shape = ds[self.var_name].shape assert var_shape != coord_shape assert coord_shape1 == coord_shape assert coord_shape2 == coord_shape def test_invalid_data_vars_value_should_fail(self) -> None: with self.setup_files_and_datasets() as (files, _): with pytest.raises(ValueError): with open_mfdataset(files, data_vars="minimum", combine="by_coords"): # type: ignore[arg-type] pass # test invalid coord parameter with pytest.raises(ValueError): with open_mfdataset(files, coords="minimum", combine="by_coords"): pass @pytest.mark.parametrize( "combine, concat_dim", [("nested", "t"), ("by_coords", None)] ) @pytest.mark.parametrize( "kwargs", [{"data_vars": "different"}, {"coords": "different"}] ) def test_open_mfdataset_warns_when_kwargs_set_to_different( self, combine, concat_dim, kwargs ) -> None: with self.setup_files_and_datasets(new_combine_kwargs=True) as ( files, [ds1, ds2], ): if combine == "by_coords": files.reverse() with pytest.raises( ValueError, match="Previously the default was `compat='no_conflicts'`" ): open_mfdataset(files, combine=combine, concat_dim=concat_dim, **kwargs) with pytest.raises( ValueError, match="Previously the default was `compat='equals'`" ): xr.concat([ds1, ds2], dim="t", **kwargs) with set_options(use_new_combine_kwarg_defaults=False): expectation: contextlib.AbstractContextManager = ( pytest.warns( FutureWarning, match="will change from data_vars='all'", ) if "data_vars" not in kwargs else contextlib.nullcontext() ) with pytest.warns( FutureWarning, match="will change from compat='equals'", ): with expectation: ds_expect = xr.concat([ds1, ds2], dim="t", **kwargs) with pytest.warns( FutureWarning, match="will change from compat='no_conflicts'" ): with expectation: with open_mfdataset( files, combine=combine, concat_dim=concat_dim, **kwargs ) as ds: assert_identical(ds, ds_expect) @requires_dask @requires_scipy @requires_netCDF4 class TestDask(DatasetIOBase): @contextlib.contextmanager def create_store(self): yield Dataset() @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): yield data.chunk() # Override methods in DatasetIOBase - not applicable to dask def test_roundtrip_string_encoded_characters(self) -> None: pass def test_roundtrip_coordinates_with_space(self) -> None: pass def test_roundtrip_numpy_datetime_data(self) -> None: # Override method in DatasetIOBase - remove not applicable # save_kwargs times = pd.to_datetime(["2000-01-01", "2000-01-02", "NaT"], unit="ns") expected = Dataset({"t": ("t", times), "t0": times[0]}) with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_roundtrip_cftime_datetime_data(self) -> None: # Override method in DatasetIOBase - remove not applicable # save_kwargs from xarray.tests.test_coding_times import _all_cftime_date_types date_types = _all_cftime_date_types() for date_type in date_types.values(): times = [date_type(1, 1, 1), date_type(1, 1, 2)] expected = Dataset({"t": ("t", times), "t0": times[0]}) expected_decoded_t = np.array(times) expected_decoded_t0 = np.array([date_type(1, 1, 1)]) with self.roundtrip(expected) as actual: assert_array_equal(actual.t.values, expected_decoded_t) assert_array_equal(actual.t0.values, expected_decoded_t0) def test_write_store(self) -> None: # Override method in DatasetIOBase - not applicable to dask pass def test_dataset_caching(self) -> None: expected = Dataset({"foo": ("x", [5, 6, 7])}) with self.roundtrip(expected) as actual: assert not actual.foo.variable._in_memory _ = actual.foo.values # no caching assert not actual.foo.variable._in_memory def test_open_mfdataset(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert isinstance(actual.foo.variable.data, da.Array) assert actual.foo.variable.data.chunks == ((5, 5),) assert_identical(original, actual) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested", chunks={"x": 3} ) as actual: assert actual.foo.variable.data.chunks == ((3, 2, 3, 2),) with pytest.raises(OSError, match=r"no files to open"): open_mfdataset("foo-bar-baz-*.nc") with pytest.raises(ValueError, match=r"wild-card"): open_mfdataset("http://some/remote/uri") @requires_fsspec def test_open_mfdataset_no_files(self) -> None: pytest.importorskip("aiobotocore") # glob is attempted as of #4823, but finds no files with pytest.raises(OSError, match=r"no files"): open_mfdataset("http://some/remote/uri", engine="zarr") def test_open_mfdataset_2d(self) -> None: original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: with create_tmp_file() as tmp3: with create_tmp_file() as tmp4: original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1) original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2) original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3) original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4) with open_mfdataset( [[tmp1, tmp2], [tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], ) as actual: assert isinstance(actual.foo.variable.data, da.Array) assert actual.foo.variable.data.chunks == ((5, 5), (4, 4)) assert_identical(original, actual) with open_mfdataset( [[tmp1, tmp2], [tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], chunks={"x": 3, "y": 2}, ) as actual: assert actual.foo.variable.data.chunks == ( (3, 2, 3, 2), (2, 2, 2, 2), ) def test_open_mfdataset_pathlib(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmps1: with create_tmp_file() as tmps2: tmp1 = Path(tmps1) tmp2 = Path(tmps2) original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert_identical(original, actual) def test_open_mfdataset_2d_pathlib(self) -> None: original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))}) with create_tmp_file() as tmps1: with create_tmp_file() as tmps2: with create_tmp_file() as tmps3: with create_tmp_file() as tmps4: tmp1 = Path(tmps1) tmp2 = Path(tmps2) tmp3 = Path(tmps3) tmp4 = Path(tmps4) original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1) original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2) original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3) original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4) with open_mfdataset( [[tmp1, tmp2], [tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], ) as actual: assert_identical(original, actual) def test_open_mfdataset_2(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert_identical(original, actual) def test_open_mfdataset_with_ignore(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_files(2) as (tmp1, tmp2): ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, "non-existent-file.nc", tmp2], concat_dim="x", combine="nested", errors="ignore", ) as actual: assert_identical(original, actual) def test_open_mfdataset_with_warn(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with pytest.warns(UserWarning, match=r"Ignoring."): with create_tmp_files(2) as (tmp1, tmp2): ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, "non-existent-file.nc", tmp2], concat_dim="x", combine="nested", errors="warn", ) as actual: assert_identical(original, actual) def test_open_mfdataset_2d_with_ignore(self) -> None: original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))}) with create_tmp_files(4) as (tmp1, tmp2, tmp3, tmp4): original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1) original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2) original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3) original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4) with open_mfdataset( [[tmp1, tmp2], ["non-existent-file.nc", tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], errors="ignore", ) as actual: assert_identical(original, actual) def test_open_mfdataset_2d_with_warn(self) -> None: original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))}) with pytest.warns(UserWarning, match=r"Ignoring."): with create_tmp_files(4) as (tmp1, tmp2, tmp3, tmp4): original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1) original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2) original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3) original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4) with open_mfdataset( [[tmp1, tmp2, "non-existent-file.nc"], [tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], errors="warn", ) as actual: assert_identical(original, actual) def test_attrs_mfdataset(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.attrs["test1"] = "foo" ds2.attrs["test2"] = "bar" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: # presumes that attributes inherited from # first dataset loaded assert actual.test1 == ds1.test1 # attributes from ds2 are not retained, e.g., with pytest.raises(AttributeError, match=r"no attribute"): _ = actual.test2 def test_open_mfdataset_attrs_file(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_files(2) as (tmp1, tmp2): ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.attrs["test1"] = "foo" ds2.attrs["test2"] = "bar" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2 ) as actual: # attributes are inherited from the master file assert actual.attrs["test2"] == ds2.attrs["test2"] # attributes from ds1 are not retained, e.g., assert "test1" not in actual.attrs def test_open_mfdataset_attrs_file_path(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_files(2) as (tmps1, tmps2): tmp1 = Path(tmps1) tmp2 = Path(tmps2) ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.attrs["test1"] = "foo" ds2.attrs["test2"] = "bar" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2 ) as actual: # attributes are inherited from the master file assert actual.attrs["test2"] == ds2.attrs["test2"] # attributes from ds1 are not retained, e.g., assert "test1" not in actual.attrs def test_open_mfdataset_auto_combine(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10)), "x": np.arange(10)}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with open_mfdataset([tmp2, tmp1], combine="by_coords") as actual: assert_identical(original, actual) def test_open_mfdataset_raise_on_bad_combine_args(self) -> None: # Regression test for unhelpful error shown in #5230 original = Dataset({"foo": ("x", np.random.randn(10)), "x": np.arange(10)}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with pytest.raises(ValueError, match="`concat_dim` has no effect"): open_mfdataset([tmp1, tmp2], concat_dim="x") def test_encoding_mfdataset(self) -> None: original = Dataset( { "foo": ("t", np.random.randn(10)), "t": ("t", pd.date_range(start="2010-01-01", periods=10, freq="1D")), } ) original.t.encoding["units"] = "days since 2010-01-01" with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: ds1 = original.isel(t=slice(5)) ds2 = original.isel(t=slice(5, 10)) ds1.t.encoding["units"] = "days since 2010-01-01" ds2.t.encoding["units"] = "days since 2000-01-01" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], combine="nested", concat_dim="t" ) as actual: assert actual.t.encoding["units"] == original.t.encoding["units"] assert actual.t.encoding["units"] == ds1.t.encoding["units"] assert actual.t.encoding["units"] != ds2.t.encoding["units"] def test_encoding_mfdataset_new_defaults(self) -> None: original = Dataset( { "foo": ("t", np.random.randn(10)), "t": ("t", pd.date_range(start="2010-01-01", periods=10, freq="1D")), } ) original.t.encoding["units"] = "days since 2010-01-01" with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: ds1 = original.isel(t=slice(5)) ds2 = original.isel(t=slice(5, 10)) ds1.t.encoding["units"] = "days since 2010-01-01" ds2.t.encoding["units"] = "days since 2000-01-01" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) for setting in [True, False]: with set_options(use_new_combine_kwarg_defaults=setting): with open_mfdataset( [tmp1, tmp2], combine="nested", concat_dim="t" ) as old: assert ( old.t.encoding["units"] == original.t.encoding["units"] ) assert old.t.encoding["units"] == ds1.t.encoding["units"] assert old.t.encoding["units"] != ds2.t.encoding["units"] with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises( AlignmentError, match="If you are intending to concatenate" ): open_mfdataset([tmp1, tmp2], combine="nested") def test_preprocess_mfdataset(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp) def preprocess(ds): return ds.assign_coords(z=0) expected = preprocess(original) with open_mfdataset( tmp, preprocess=preprocess, combine="by_coords" ) as actual: assert_identical(expected, actual) def test_save_mfdataset_roundtrip(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))] with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: save_mfdataset(datasets, [tmp1, tmp2]) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert_identical(actual, original) def test_save_mfdataset_invalid(self) -> None: ds = Dataset() with pytest.raises(ValueError, match=r"cannot use mode"): save_mfdataset([ds, ds], ["same", "same"]) with pytest.raises(ValueError, match=r"same length"): save_mfdataset([ds, ds], ["only one path"]) def test_save_mfdataset_invalid_dataarray(self) -> None: # regression test for GH1555 da = DataArray([1, 2]) with pytest.raises(TypeError, match=r"supports writing Dataset"): save_mfdataset([da], ["dataarray"]) def test_save_mfdataset_pathlib_roundtrip(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))] with create_tmp_file() as tmps1: with create_tmp_file() as tmps2: tmp1 = Path(tmps1) tmp2 = Path(tmps2) save_mfdataset(datasets, [tmp1, tmp2]) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert_identical(actual, original) def test_save_mfdataset_pass_kwargs(self) -> None: # create a timeseries to store in a netCDF file times = [0, 1] time = xr.DataArray(times, dims=("time",)) # create a simple dataset to write using save_mfdataset test_ds = xr.Dataset() test_ds["time"] = time # make sure the times are written as double and # turn off fill values encoding = dict(time=dict(dtype="double")) unlimited_dims = ["time"] # set the output file name output_path = "test.nc" # attempt to write the dataset with the encoding and unlimited args # passed through xr.save_mfdataset( [test_ds], [output_path], encoding=encoding, unlimited_dims=unlimited_dims ) def test_open_and_do_math(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_mfdataset(tmp, combine="by_coords") as ds: actual = 1.0 * ds assert_allclose(original, actual, decode_bytes=False) @pytest.mark.parametrize( "kwargs", [pytest.param({"concat_dim": None}, id="none"), pytest.param({}, id="default")], ) def test_open_mfdataset_concat_dim(self, kwargs) -> None: with set_options(use_new_combine_kwarg_defaults=True): with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: data = Dataset({"x": 0}) data.to_netcdf(tmp1) Dataset({"x": np.nan}).to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], **kwargs, combine="nested" ) as actual: assert_identical(data, actual) def test_open_dataset(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_dataset(tmp, chunks={"x": 5}) as actual: assert isinstance(actual.foo.variable.data, da.Array) assert actual.foo.variable.data.chunks == ((5, 5),) assert_identical(original, actual) with open_dataset(tmp, chunks=5) as actual: assert_identical(original, actual) with open_dataset(tmp) as actual: assert isinstance(actual.foo.variable.data, np.ndarray) assert_identical(original, actual) def test_open_single_dataset(self) -> None: # Test for issue GH #1988. This makes sure that the # concat_dim is utilized when specified in open_mfdataset(). rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) dim = DataArray([100], name="baz", dims="baz") expected = Dataset( {"foo": (("baz", "x"), rnddata[np.newaxis, :])}, {"baz": [100]} ) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_mfdataset( [tmp], concat_dim=dim, data_vars="all", combine="nested" ) as actual: assert_identical(expected, actual) def test_open_multi_dataset(self) -> None: # Test for issue GH #1988 and #2647. This makes sure that the # concat_dim is utilized when specified in open_mfdataset(). # The additional wrinkle is to ensure that a length greater # than one is tested as well due to numpy's implicit casting # of 1-length arrays to booleans in tests, which allowed # #2647 to still pass the test_open_single_dataset(), # which is itself still needed as-is because the original # bug caused one-length arrays to not be used correctly # in concatenation. rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) dim = DataArray([100, 150], name="baz", dims="baz") expected = Dataset( {"foo": (("baz", "x"), np.tile(rnddata[np.newaxis, :], (2, 1)))}, {"baz": [100, 150]}, ) with create_tmp_file() as tmp1, create_tmp_file() as tmp2: original.to_netcdf(tmp1) original.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim=dim, data_vars="all", combine="nested" ) as actual: assert_identical(expected, actual) @requires_cftime def test_open_dataset_cftime_autochunk(self) -> None: """Create a dataset with cftime datetime objects and ensure that auto-chunking works correctly.""" import cftime original = xr.Dataset( { "foo": ("time", [0.0]), "time_bnds": ( ("time", "bnds"), [ [ cftime.Datetime360Day(2005, 12, 1, 0, 0, 0, 0), cftime.Datetime360Day(2005, 12, 2, 0, 0, 0, 0), ] ], ), }, {"time": [cftime.Datetime360Day(2005, 12, 1, 12, 0, 0, 0)]}, ) with self.roundtrip(original, open_kwargs={"chunks": "auto"}) as actual: assert isinstance(actual.time_bnds.variable.data, da.Array) assert _contains_cftime_datetimes(actual.time) assert_identical(original, actual) # Flaky test. Very open to contributions on fixing this @pytest.mark.flaky def test_dask_roundtrip(self) -> None: with create_tmp_file() as tmp: data = create_test_data() data.to_netcdf(tmp) chunks = {"dim1": 4, "dim2": 4, "dim3": 4, "time": 10} with open_dataset(tmp, chunks=chunks) as dask_ds: assert_identical(data, dask_ds) with create_tmp_file() as tmp2: dask_ds.to_netcdf(tmp2) with open_dataset(tmp2) as on_disk: assert_identical(data, on_disk) def test_deterministic_names(self) -> None: with create_tmp_file() as tmp: data = create_test_data() data.to_netcdf(tmp) with open_mfdataset(tmp, combine="by_coords") as ds: original_names = {k: v.data.name for k, v in ds.data_vars.items()} with open_mfdataset(tmp, combine="by_coords") as ds: repeat_names = {k: v.data.name for k, v in ds.data_vars.items()} for var_name, dask_name in original_names.items(): assert var_name in dask_name assert dask_name[:13] == "open_dataset-" assert original_names == repeat_names def test_dataarray_compute(self) -> None: # Test DataArray.compute() on dask backend. # The test for Dataset.compute() is already in DatasetIOBase; # however dask is the only tested backend which supports DataArrays actual = DataArray([1, 2]).chunk() computed = actual.compute() assert not actual._in_memory assert computed._in_memory assert_allclose(actual, computed, decode_bytes=False) def test_save_mfdataset_compute_false_roundtrip(self) -> None: from dask.delayed import Delayed original = Dataset({"foo": ("x", np.random.randn(10))}).chunk() datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))] with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp1: with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp2: delayed_obj = save_mfdataset( datasets, [tmp1, tmp2], engine=self.engine, compute=False ) assert isinstance(delayed_obj, Delayed) delayed_obj.compute() with open_mfdataset( [tmp1, tmp2], combine="nested", concat_dim="x" ) as actual: assert_identical(actual, original) def test_load_dataset(self) -> None: with create_tmp_file() as tmp: original = Dataset({"foo": ("x", np.random.randn(10))}) original.to_netcdf(tmp) ds = load_dataset(tmp) assert_identical(original, ds) # this would fail if we used open_dataset instead of load_dataset ds.to_netcdf(tmp) def test_load_dataarray(self) -> None: with create_tmp_file() as tmp: original = DataArray(np.random.randn(10), dims=["x"]) original.to_netcdf(tmp) da = load_dataarray(tmp) assert_identical(original, da) # this would fail if we used open_dataarray instead of # load_dataarray da.to_netcdf(tmp) def test_load_datatree(self) -> None: with create_tmp_file() as tmp: original = DataTree(Dataset({"foo": ("x", np.random.randn(10))})) original.to_netcdf(tmp) dt = load_datatree(tmp) xr.testing.assert_identical(original, dt) # this would fail if we used open_datatree instead of # load_datatree dt.to_netcdf(tmp) @pytest.mark.skipif( ON_WINDOWS, reason="counting number of tasks in graph fails on windows for some reason", ) def test_inline_array(self) -> None: with create_tmp_file() as tmp: original = Dataset({"foo": ("x", np.random.randn(10))}) original.to_netcdf(tmp) chunks = {"time": 10} def num_graph_nodes(obj): return len(obj.__dask_graph__()) with ( open_dataset(tmp, inline_array=False, chunks=chunks) as not_inlined_ds, open_dataset(tmp, inline_array=True, chunks=chunks) as inlined_ds, ): assert num_graph_nodes(inlined_ds) < num_graph_nodes(not_inlined_ds) with ( open_dataarray( tmp, inline_array=False, chunks=chunks ) as not_inlined_da, open_dataarray(tmp, inline_array=True, chunks=chunks) as inlined_da, ): assert num_graph_nodes(inlined_da) < num_graph_nodes(not_inlined_da) @requires_scipy_or_netCDF4 @requires_pydap @pytest.mark.filterwarnings("ignore:The binary mode of fromstring is deprecated") class TestPydap: def convert_to_pydap_dataset(self, original): from pydap.model import BaseType, DatasetType ds = DatasetType("bears", **original.attrs) for key, var in original.data_vars.items(): ds[key] = BaseType( key, var.values, dtype=var.values.dtype.kind, dims=var.dims, **var.attrs ) # check all dims are stored in ds for d in original.coords: ds[d] = BaseType(d, original[d].values, dims=(d,), **original[d].attrs) return ds @contextlib.contextmanager def create_datasets(self, **kwargs): with open_example_dataset("bears.nc") as expected: # print("QQ0:", expected["bears"].load()) pydap_ds = self.convert_to_pydap_dataset(expected) actual = open_dataset(PydapDataStore(pydap_ds)) # netcdf converts string to byte not unicode # fixed in pydap 3.5.6. https://github.com/pydap/pydap/issues/510 actual["bears"].values = actual["bears"].values.astype("S") yield actual, expected def test_cmp_local_file(self) -> None: with self.create_datasets() as (actual, expected): assert_equal(actual, expected) # global attributes should be global attributes on the dataset assert "NC_GLOBAL" not in actual.attrs assert "history" in actual.attrs # we don't check attributes exactly with assertDatasetIdentical() # because the test DAP server seems to insert some extra # attributes not found in the netCDF file. assert actual.attrs.keys() == expected.attrs.keys() with self.create_datasets() as (actual, expected): assert_equal(actual[{"l": 2}], expected[{"l": 2}]) with self.create_datasets() as (actual, expected): # always return arrays and not scalars # scalars will be promoted to unicode for numpy >= 2.3.0 assert_equal(actual.isel(i=[0], j=[-1]), expected.isel(i=[0], j=[-1])) with self.create_datasets() as (actual, expected): assert_equal(actual.isel(j=slice(1, 2)), expected.isel(j=slice(1, 2))) with self.create_datasets() as (actual, expected): indexers = {"i": [1, 0, 0], "j": [1, 2, 0, 1]} assert_equal(actual.isel(**indexers), expected.isel(**indexers)) with self.create_datasets() as (actual, expected): indexers2 = { "i": DataArray([0, 1, 0], dims="a"), "j": DataArray([0, 2, 1], dims="a"), } assert_equal(actual.isel(**indexers2), expected.isel(**indexers2)) def test_compatible_to_netcdf(self) -> None: # make sure it can be saved as a netcdf with self.create_datasets() as (actual, expected): with create_tmp_file() as tmp_file: actual.to_netcdf(tmp_file) with open_dataset(tmp_file) as actual2: assert_equal(actual2, expected) @requires_dask def test_dask(self) -> None: with self.create_datasets(chunks={"j": 2}) as (actual, expected): assert_equal(actual, expected) @network @requires_scipy_or_netCDF4 @requires_pydap class TestPydapOnline(TestPydap): @contextlib.contextmanager def create_dap2_datasets(self, **kwargs): # in pydap 3.5.0, urls defaults to dap2. url = "http://test.opendap.org/opendap/data/nc/bears.nc" actual = open_dataset(url, engine="pydap", **kwargs) # pydap <3.5.6 converts to unicode dtype=|U. Not what # xarray expects. Thus force to bytes dtype. pydap >=3.5.6 # does not convert to unicode. https://github.com/pydap/pydap/issues/510 actual["bears"].values = actual["bears"].values.astype("S") with open_example_dataset("bears.nc") as expected: yield actual, expected def output_grid_deprecation_warning_dap2dataset(self): with pytest.warns(DeprecationWarning, match="`output_grid` is deprecated"): with self.create_dap2_datasets(output_grid=True) as (actual, expected): assert_equal(actual, expected) def create_dap4_dataset(self, **kwargs): url = "dap4://test.opendap.org/opendap/data/nc/bears.nc" actual = open_dataset(url, engine="pydap", **kwargs) with open_example_dataset("bears.nc") as expected: # workaround to restore string which is converted to byte # only needed for pydap <3.5.6 https://github.com/pydap/pydap/issues/510 expected["bears"].values = expected["bears"].values.astype("S") yield actual, expected def test_session(self) -> None: from requests import Session session = Session() # blank requests.Session object with mock.patch("pydap.client.open_url") as mock_func: xr.backends.PydapDataStore.open("http://test.url", session=session) mock_func.assert_called_with( url="http://test.url", application=None, session=session, output_grid=False, timeout=120, verify=True, user_charset=None, ) @requires_pydap @network @pytest.mark.parametrize("protocol", ["dap2", "dap4"]) def test_batchdap4_downloads(tmpdir, protocol) -> None: """Test that in dap4, all dimensions are downloaded at once""" import pydap from pydap.net import create_session _version_ = Version(pydap.__version__) # Create a session with pre-set params in pydap backend, to cache urls cache_name = tmpdir / "debug" session = create_session(use_cache=True, cache_kwargs={"cache_name": cache_name}) session.cache.clear() url = "https://test.opendap.org/opendap/hyrax/data/nc/coads_climatology.nc" ds = open_dataset( url.replace("https", protocol), session=session, engine="pydap", decode_times=False, ) if protocol == "dap4": if _version_ > Version("3.5.5"): # total downloads are: # 1 dmr + 1 dap (all dimensions at once) assert len(session.cache.urls()) == 2 # now load the rest of the variables ds.load() # each non-dimension array is downloaded with an individual https requests assert len(session.cache.urls()) == 2 + 4 else: assert len(session.cache.urls()) == 4 ds.load() assert len(session.cache.urls()) == 4 + 4 elif protocol == "dap2": # das + dds + 3 dods urls for dimensions alone assert len(session.cache.urls()) == 5 class TestEncodingInvalid: def test_extract_nc4_variable_encoding(self) -> None: var = xr.Variable(("x",), [1, 2, 3], {}, {"foo": "bar"}) with pytest.raises(ValueError, match=r"unexpected encoding"): _extract_nc4_variable_encoding(var, raise_on_invalid=True) var = xr.Variable(("x",), [1, 2, 3], {}, {"chunking": (2, 1)}) encoding = _extract_nc4_variable_encoding(var) assert {} == encoding # regression test var = xr.Variable(("x",), [1, 2, 3], {}, {"shuffle": True}) encoding = _extract_nc4_variable_encoding(var, raise_on_invalid=True) assert {"shuffle": True} == encoding # Variables with unlim dims must be chunked on output. var = xr.Variable(("x",), [1, 2, 3], {}, {"contiguous": True}) encoding = _extract_nc4_variable_encoding(var, unlimited_dims=("x",)) assert {} == encoding @requires_netCDF4 def test_extract_nc4_variable_encoding_netcdf4(self): # New netCDF4 1.6.0 compression argument. var = xr.Variable(("x",), [1, 2, 3], {}, {"compression": "szlib"}) _extract_nc4_variable_encoding(var, backend="netCDF4", raise_on_invalid=True) @pytest.mark.xfail def test_extract_h5nc_encoding(self) -> None: # not supported with h5netcdf (yet) var = xr.Variable(("x",), [1, 2, 3], {}, {"least_significant_digit": 2}) with pytest.raises(ValueError, match=r"unexpected encoding"): _extract_nc4_variable_encoding(var, raise_on_invalid=True) class MiscObject: pass @requires_netCDF4 class TestValidateAttrs: def test_validating_attrs(self) -> None: def new_dataset(): return Dataset({"data": ("y", np.arange(10.0))}, {"y": np.arange(10)}) def new_dataset_and_dataset_attrs(): ds = new_dataset() return ds, ds.attrs def new_dataset_and_data_attrs(): ds = new_dataset() return ds, ds.data.attrs def new_dataset_and_coord_attrs(): ds = new_dataset() return ds, ds.coords["y"].attrs for new_dataset_and_attrs in [ new_dataset_and_dataset_attrs, new_dataset_and_data_attrs, new_dataset_and_coord_attrs, ]: ds, attrs = new_dataset_and_attrs() attrs[123] = "test" with pytest.raises(TypeError, match=r"Invalid name for attr: 123"): ds.to_netcdf("test.nc") ds, attrs = new_dataset_and_attrs() attrs[MiscObject()] = "test" with pytest.raises(TypeError, match=r"Invalid name for attr: "): ds.to_netcdf("test.nc") ds, attrs = new_dataset_and_attrs() attrs[""] = "test" with pytest.raises(ValueError, match=r"Invalid name for attr '':"): ds.to_netcdf("test.nc") # This one should work ds, attrs = new_dataset_and_attrs() attrs["test"] = "test" with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = {"a": 5} with pytest.raises(TypeError, match=r"Invalid value for attr 'test'"): ds.to_netcdf("test.nc") ds, attrs = new_dataset_and_attrs() attrs["test"] = MiscObject() with pytest.raises(TypeError, match=r"Invalid value for attr 'test'"): ds.to_netcdf("test.nc") ds, attrs = new_dataset_and_attrs() attrs["test"] = 5 with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = 3.14 with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = [1, 2, 3, 4] with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = (1.9, 2.5) with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = np.arange(5) with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = "This is a string" with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = "" with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) @requires_scipy_or_netCDF4 class TestDataArrayToNetCDF: def test_dataarray_to_netcdf_no_name(self) -> None: original_da = DataArray(np.arange(12).reshape((3, 4))) with create_tmp_file() as tmp: original_da.to_netcdf(tmp) with open_dataarray(tmp) as loaded_da: assert_identical(original_da, loaded_da) def test_dataarray_to_netcdf_with_name(self) -> None: original_da = DataArray(np.arange(12).reshape((3, 4)), name="test") with create_tmp_file() as tmp: original_da.to_netcdf(tmp) with open_dataarray(tmp) as loaded_da: assert_identical(original_da, loaded_da) def test_dataarray_to_netcdf_coord_name_clash(self) -> None: original_da = DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], name="x" ) with create_tmp_file() as tmp: original_da.to_netcdf(tmp) with open_dataarray(tmp) as loaded_da: assert_identical(original_da, loaded_da) def test_open_dataarray_options(self) -> None: data = DataArray(np.arange(5), coords={"y": ("x", range(5))}, dims=["x"]) with create_tmp_file() as tmp: data.to_netcdf(tmp) expected = data.drop_vars("y") with open_dataarray(tmp, drop_variables=["y"]) as loaded: assert_identical(expected, loaded) @requires_scipy def test_dataarray_to_netcdf_return_bytes(self) -> None: # regression test for GH1410 data = xr.DataArray([1, 2, 3]) output = data.to_netcdf(engine="scipy") assert isinstance(output, memoryview) def test_dataarray_to_netcdf_no_name_pathlib(self) -> None: original_da = DataArray(np.arange(12).reshape((3, 4))) with create_tmp_file() as tmps: tmp = Path(tmps) original_da.to_netcdf(tmp) with open_dataarray(tmp) as loaded_da: assert_identical(original_da, loaded_da) @requires_zarr class TestDataArrayToZarr: def skip_if_zarr_python_3_and_zip_store(self, store) -> None: if has_zarr_v3 and isinstance(store, zarr.storage.ZipStore): pytest.skip( reason="zarr-python 3.x doesn't support reopening ZipStore with a new mode." ) def test_dataarray_to_zarr_no_name(self, tmp_store) -> None: self.skip_if_zarr_python_3_and_zip_store(tmp_store) original_da = DataArray(np.arange(12).reshape((3, 4))) original_da.to_zarr(tmp_store) with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) def test_dataarray_to_zarr_with_name(self, tmp_store) -> None: self.skip_if_zarr_python_3_and_zip_store(tmp_store) original_da = DataArray(np.arange(12).reshape((3, 4)), name="test") original_da.to_zarr(tmp_store) with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) def test_dataarray_to_zarr_coord_name_clash(self, tmp_store) -> None: self.skip_if_zarr_python_3_and_zip_store(tmp_store) original_da = DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], name="x" ) original_da.to_zarr(tmp_store) with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) def test_open_dataarray_options(self, tmp_store) -> None: self.skip_if_zarr_python_3_and_zip_store(tmp_store) data = DataArray(np.arange(5), coords={"y": ("x", range(1, 6))}, dims=["x"]) data.to_zarr(tmp_store) expected = data.drop_vars("y") with open_dataarray(tmp_store, engine="zarr", drop_variables=["y"]) as loaded: assert_identical(expected, loaded) @requires_dask def test_dataarray_to_zarr_compute_false(self, tmp_store) -> None: from dask.delayed import Delayed skip_if_zarr_format_3(tmp_store) original_da = DataArray(np.arange(12).reshape((3, 4))) output = original_da.to_zarr(tmp_store, compute=False) assert isinstance(output, Delayed) output.compute() with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) @requires_dask def test_dataarray_to_zarr_align_chunks_true(self, tmp_store) -> None: # TODO: Improve data integrity checks when using Dask. # Detecting automatic alignment issues in Dask can be tricky, # as unintended misalignment might lead to subtle data corruption. # For now, ensure that the parameter is present, but explore # more robust verification methods to confirm data consistency. skip_if_zarr_format_3(tmp_store) arr = DataArray( np.arange(4), dims=["a"], coords={"a": np.arange(4)}, name="foo" ).chunk(a=(2, 1, 1)) arr.to_zarr( tmp_store, align_chunks=True, encoding={"foo": {"chunks": (3,)}}, ) with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(arr, loaded_da) @requires_scipy_or_netCDF4 def test_no_warning_from_dask_effective_get() -> None: with create_tmp_file() as tmpfile: with assert_no_warnings(): ds = Dataset() ds.to_netcdf(tmpfile) @requires_scipy_or_netCDF4 def test_source_encoding_always_present() -> None: # Test for GH issue #2550. rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_dataset(tmp) as ds: assert ds.encoding["source"] == tmp @requires_scipy_or_netCDF4 def test_source_encoding_always_present_with_pathlib() -> None: # Test for GH issue #5888. rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_dataset(Path(tmp)) as ds: assert ds.encoding["source"] == tmp @requires_h5netcdf @requires_fsspec def test_source_encoding_always_present_with_fsspec() -> None: import fsspec rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) with create_tmp_file() as tmp: original.to_netcdf(tmp) fs = fsspec.filesystem("file") with fs.open(tmp) as f, open_dataset(f) as ds: assert ds.encoding["source"] == tmp with fs.open(tmp) as f, open_mfdataset([f]) as ds: assert "foo" in ds def _assert_no_dates_out_of_range_warning(record): undesired_message = "dates out of range" for warning in record: assert undesired_message not in str(warning.message) @requires_scipy_or_netCDF4 @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_use_cftime_standard_calendar_default_in_range(calendar) -> None: x = [0, 1] time = [0, 720] units_date = "2000-01-01" units = "days since 2000-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar x_timedeltas = np.array(x).astype("timedelta64[D]") time_timedeltas = np.array(time).astype("timedelta64[D]") decoded_x = np.datetime64(units_date, "ns") + x_timedeltas decoded_time = np.datetime64(units_date, "ns") + time_timedeltas expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x") expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time") with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with warnings.catch_warnings(record=True) as record: with open_dataset(tmp_file) as ds: assert_identical(expected_x, ds.x) assert_identical(expected_time, ds.time) _assert_no_dates_out_of_range_warning(record) @requires_cftime @requires_scipy_or_netCDF4 @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) def test_use_cftime_standard_calendar_default_out_of_range(calendar) -> None: # todo: check, if we still need to test for two dates import cftime x = [0, 1] time = [0, 720] units = "days since 1582-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True) decoded_time = cftime.num2date( time, units, calendar, only_use_cftime_datetimes=True ) expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x") expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time") with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with pytest.warns(SerializationWarning): with open_dataset(tmp_file) as ds: assert_identical(expected_x, ds.x) assert_identical(expected_time, ds.time) @requires_cftime @requires_scipy_or_netCDF4 @pytest.mark.parametrize("calendar", _ALL_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_true(calendar, units_year) -> None: import cftime x = [0, 1] time = [0, 720] units = f"days since {units_year}-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True) decoded_time = cftime.num2date( time, units, calendar, only_use_cftime_datetimes=True ) expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x") expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time") with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with warnings.catch_warnings(record=True) as record: decoder = CFDatetimeCoder(use_cftime=True) with open_dataset(tmp_file, decode_times=decoder) as ds: assert_identical(expected_x, ds.x) assert_identical(expected_time, ds.time) _assert_no_dates_out_of_range_warning(record) @requires_scipy_or_netCDF4 @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) @pytest.mark.xfail( has_numpy_2, reason="https://github.com/pandas-dev/pandas/issues/56996" ) def test_use_cftime_false_standard_calendar_in_range(calendar) -> None: x = [0, 1] time = [0, 720] units_date = "2000-01-01" units = "days since 2000-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar x_timedeltas = np.array(x).astype("timedelta64[D]") time_timedeltas = np.array(time).astype("timedelta64[D]") decoded_x = np.datetime64(units_date, "ns") + x_timedeltas decoded_time = np.datetime64(units_date, "ns") + time_timedeltas expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x") expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time") with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with warnings.catch_warnings(record=True) as record: coder = xr.coders.CFDatetimeCoder(use_cftime=False) with open_dataset(tmp_file, decode_times=coder) as ds: assert_identical(expected_x, ds.x) assert_identical(expected_time, ds.time) _assert_no_dates_out_of_range_warning(record) @requires_scipy_or_netCDF4 @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) def test_use_cftime_false_standard_calendar_out_of_range(calendar) -> None: x = [0, 1] time = [0, 720] units = "days since 1582-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) decoder = CFDatetimeCoder(use_cftime=False) with pytest.raises((OutOfBoundsDatetime, ValueError)): open_dataset(tmp_file, decode_times=decoder) @requires_scipy_or_netCDF4 @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_false_nonstandard_calendar(calendar, units_year) -> None: x = [0, 1] time = [0, 720] units = f"days since {units_year}" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) decoder = CFDatetimeCoder(use_cftime=False) with pytest.raises((OutOfBoundsDatetime, ValueError)): open_dataset(tmp_file, decode_times=decoder) @pytest.mark.parametrize("engine", ["netcdf4", "scipy"]) def test_invalid_netcdf_raises(engine) -> None: data = create_test_data() with pytest.raises(ValueError, match=r"unrecognized option 'invalid_netcdf'"): data.to_netcdf("foo.nc", engine=engine, invalid_netcdf=True) @requires_zarr def test_encode_zarr_attr_value() -> None: # array -> list arr = np.array([1, 2, 3]) expected1 = [1, 2, 3] actual1 = backends.zarr.encode_zarr_attr_value(arr) assert isinstance(actual1, list) assert actual1 == expected1 # scalar array -> scalar sarr = np.array(1)[()] expected2 = 1 actual2 = backends.zarr.encode_zarr_attr_value(sarr) assert isinstance(actual2, int) assert actual2 == expected2 # string -> string (no change) expected3 = "foo" actual3 = backends.zarr.encode_zarr_attr_value(expected3) assert isinstance(actual3, str) assert actual3 == expected3 @requires_zarr def test_extract_zarr_variable_encoding() -> None: var = xr.Variable("x", [1, 2]) actual = backends.zarr.extract_zarr_variable_encoding(var, zarr_format=3) assert "chunks" in actual assert actual["chunks"] == ("auto" if has_zarr_v3 else None) var = xr.Variable("x", [1, 2], encoding={"chunks": (1,)}) actual = backends.zarr.extract_zarr_variable_encoding(var, zarr_format=3) assert actual["chunks"] == (1,) # does not raise on invalid var = xr.Variable("x", [1, 2], encoding={"foo": (1,)}) actual = backends.zarr.extract_zarr_variable_encoding(var, zarr_format=3) # raises on invalid var = xr.Variable("x", [1, 2], encoding={"foo": (1,)}) with pytest.raises(ValueError, match=r"unexpected encoding parameters"): actual = backends.zarr.extract_zarr_variable_encoding( var, raise_on_invalid=True, zarr_format=3 ) @requires_zarr @requires_fsspec @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") def test_open_fsspec() -> None: import fsspec if not ( ( hasattr(zarr.storage, "FSStore") and hasattr(zarr.storage.FSStore, "getitems") ) # zarr v2 or hasattr(zarr.storage, "FsspecStore") # zarr v3 ): pytest.skip("zarr too old") ds = open_dataset(os.path.join(os.path.dirname(__file__), "data", "example_1.nc")) m = fsspec.filesystem("memory") mm = m.get_mapper("out1.zarr") ds.to_zarr(mm) # old interface ds0 = ds.copy() # pd.to_timedelta returns ns-precision, but the example data is in second precision # so we need to fix this ds0["time"] = ds.time + np.timedelta64(1, "D") mm = m.get_mapper("out2.zarr") ds0.to_zarr(mm) # old interface # single dataset url = "memory://out2.zarr" ds2 = open_dataset(url, engine="zarr") xr.testing.assert_equal(ds0, ds2) # single dataset with caching url = "simplecache::memory://out2.zarr" ds2 = open_dataset(url, engine="zarr") xr.testing.assert_equal(ds0, ds2) # open_mfdataset requires dask if has_dask: # multi dataset url = "memory://out*.zarr" ds2 = open_mfdataset(url, engine="zarr") xr.testing.assert_equal(xr.concat([ds, ds0], dim="time"), ds2) # multi dataset with caching url = "simplecache::memory://out*.zarr" ds2 = open_mfdataset(url, engine="zarr") xr.testing.assert_equal(xr.concat([ds, ds0], dim="time"), ds2) @requires_h5netcdf @requires_netCDF4 def test_load_single_value_h5netcdf(tmp_path: Path) -> None: """Test that numeric single-element vector attributes are handled fine. At present (h5netcdf v0.8.1), the h5netcdf exposes single-valued numeric variable attributes as arrays of length 1, as opposed to scalars for the NetCDF4 backend. This was leading to a ValueError upon loading a single value from a file, see #4471. Test that loading causes no failure. """ ds = xr.Dataset( { "test": xr.DataArray( np.array([0]), dims=("x",), attrs={"scale_factor": 1, "add_offset": 0} ) } ) ds.to_netcdf(tmp_path / "test.nc") with xr.open_dataset(tmp_path / "test.nc", engine="h5netcdf") as ds2: ds2["test"][0].load() @requires_zarr @requires_dask @pytest.mark.parametrize( "chunks", ["auto", -1, {}, {"x": "auto"}, {"x": -1}, {"x": "auto", "y": -1}] ) def test_open_dataset_chunking_zarr(chunks, tmp_path: Path) -> None: encoded_chunks = 100 dask_arr = da.from_array( np.ones((500, 500), dtype="float64"), chunks=encoded_chunks ) ds = xr.Dataset( { "test": xr.DataArray( dask_arr, dims=("x", "y"), ) } ) ds["test"].encoding["chunks"] = encoded_chunks ds.to_zarr(tmp_path / "test.zarr") with dask.config.set({"array.chunk-size": "1MiB"}): expected = ds.chunk(chunks) with open_dataset( tmp_path / "test.zarr", engine="zarr", chunks=chunks ) as actual: xr.testing.assert_chunks_equal(actual, expected) @requires_zarr @requires_dask @pytest.mark.parametrize( "chunks", ["auto", -1, {}, {"x": "auto"}, {"x": -1}, {"x": "auto", "y": -1}] ) @pytest.mark.filterwarnings("ignore:The specified chunks separate") def test_chunking_consintency(chunks, tmp_path: Path) -> None: encoded_chunks: dict[str, Any] = {} dask_arr = da.from_array( np.ones((500, 500), dtype="float64"), chunks=encoded_chunks ) ds = xr.Dataset( { "test": xr.DataArray( dask_arr, dims=("x", "y"), ) } ) ds["test"].encoding["chunks"] = encoded_chunks ds.to_zarr(tmp_path / "test.zarr") ds.to_netcdf(tmp_path / "test.nc") with dask.config.set({"array.chunk-size": "1MiB"}): expected = ds.chunk(chunks) with xr.open_dataset( tmp_path / "test.zarr", engine="zarr", chunks=chunks ) as actual: xr.testing.assert_chunks_equal(actual, expected) with xr.open_dataset(tmp_path / "test.nc", chunks=chunks) as actual: xr.testing.assert_chunks_equal(actual, expected) def _check_guess_can_open_and_open(entrypoint, obj, engine, expected): assert entrypoint.guess_can_open(obj) with open_dataset(obj, engine=engine) as actual: assert_identical(expected, actual) @requires_netCDF4 def test_netcdf4_entrypoint(tmp_path: Path) -> None: entrypoint = NetCDF4BackendEntrypoint() ds = create_test_data() path = tmp_path / "foo" ds.to_netcdf(path, format="NETCDF3_CLASSIC") _check_guess_can_open_and_open(entrypoint, path, engine="netcdf4", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="netcdf4", expected=ds) path = tmp_path / "bar" ds.to_netcdf(path, format="NETCDF4_CLASSIC") _check_guess_can_open_and_open(entrypoint, path, engine="netcdf4", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="netcdf4", expected=ds) # Remote URLs without extensions return True (backward compatibility) assert entrypoint.guess_can_open("http://something/remote") # Remote URLs with netCDF extensions are also claimed assert entrypoint.guess_can_open("http://something/remote.nc") assert entrypoint.guess_can_open("something-local.nc") assert entrypoint.guess_can_open("something-local.nc4") assert entrypoint.guess_can_open("something-local.cdf") assert not entrypoint.guess_can_open("not-found-and-no-extension") contents = ds.to_netcdf(engine="netcdf4") _check_guess_can_open_and_open(entrypoint, contents, engine="netcdf4", expected=ds) path = tmp_path / "baz" with open(path, "wb") as f: f.write(b"not-a-netcdf-file") assert not entrypoint.guess_can_open(path) @requires_scipy def test_scipy_entrypoint(tmp_path: Path) -> None: entrypoint = ScipyBackendEntrypoint() ds = create_test_data() path = tmp_path / "foo" ds.to_netcdf(path, engine="scipy") _check_guess_can_open_and_open(entrypoint, path, engine="scipy", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="scipy", expected=ds) with open(path, "rb") as f: _check_guess_can_open_and_open(entrypoint, f, engine="scipy", expected=ds) contents = ds.to_netcdf(engine="scipy") _check_guess_can_open_and_open(entrypoint, contents, engine="scipy", expected=ds) _check_guess_can_open_and_open( entrypoint, BytesIO(contents), engine="scipy", expected=ds ) path = tmp_path / "foo.nc.gz" with gzip.open(path, mode="wb") as f: f.write(contents) _check_guess_can_open_and_open(entrypoint, path, engine="scipy", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="scipy", expected=ds) assert entrypoint.guess_can_open("something-local.nc") assert entrypoint.guess_can_open("something-local.nc.gz") assert not entrypoint.guess_can_open("not-found-and-no-extension") assert not entrypoint.guess_can_open(b"not-a-netcdf-file") # Should not claim .gz files that aren't netCDF assert not entrypoint.guess_can_open("something.zarr.gz") assert not entrypoint.guess_can_open("something.tar.gz") assert not entrypoint.guess_can_open("something.txt.gz") @requires_h5netcdf def test_h5netcdf_entrypoint(tmp_path: Path) -> None: entrypoint = H5netcdfBackendEntrypoint() ds = create_test_data() path = tmp_path / "foo" ds.to_netcdf(path, engine="h5netcdf") _check_guess_can_open_and_open(entrypoint, path, engine="h5netcdf", expected=ds) _check_guess_can_open_and_open( entrypoint, str(path), engine="h5netcdf", expected=ds ) with open(path, "rb") as f: _check_guess_can_open_and_open(entrypoint, f, engine="h5netcdf", expected=ds) contents = ds.to_netcdf(engine="h5netcdf") _check_guess_can_open_and_open(entrypoint, contents, engine="h5netcdf", expected=ds) assert entrypoint.guess_can_open("something-local.nc") assert entrypoint.guess_can_open("something-local.nc4") assert entrypoint.guess_can_open("something-local.cdf") assert not entrypoint.guess_can_open("not-found-and-no-extension") @requires_zarr def test_zarr_entrypoint(tmp_path: Path) -> None: from xarray.backends.zarr import ZarrBackendEntrypoint entrypoint = ZarrBackendEntrypoint() ds = create_test_data() path = tmp_path / "foo.zarr" ds.to_zarr(path) _check_guess_can_open_and_open(entrypoint, path, engine="zarr", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="zarr", expected=ds) # add a trailing slash to the path and check again _check_guess_can_open_and_open( entrypoint, str(path) + "/", engine="zarr", expected=ds ) # Test the new functionality: .zarr with trailing slash assert entrypoint.guess_can_open("something-local.zarr") assert entrypoint.guess_can_open("something-local.zarr/") # With trailing slash assert not entrypoint.guess_can_open("something-local.nc") assert not entrypoint.guess_can_open("not-found-and-no-extension") assert not entrypoint.guess_can_open("something.zarr.txt") @requires_h5netcdf @requires_netCDF4 @requires_zarr def test_remote_url_backend_auto_detection() -> None: """ Test that remote URLs are correctly selected by the backend resolution system. This tests the fix for issue where netCDF4, h5netcdf, and pydap backends were claiming ALL remote URLs, preventing remote Zarr stores from being auto-detected. See: https://github.com/pydata/xarray/issues/10801 """ from xarray.backends.plugins import guess_engine # Test cases: (url, expected_backend) test_cases = [ # Remote Zarr URLs ("https://example.com/store.zarr", "zarr"), ("http://example.com/data.zarr/", "zarr"), ("s3://bucket/path/to/data.zarr", "zarr"), # Remote netCDF URLs (non-DAP) - netcdf4 wins (first in order, no query params) ("https://example.com/file.nc", "netcdf4"), ("http://example.com/data.nc4", "netcdf4"), ("https://example.com/test.cdf", "netcdf4"), ("s3://bucket/path/to/data.nc", "netcdf4"), # Remote netCDF URLs with query params - netcdf4 wins # Note: Query params are typically indicative of DAP URLs (e.g., OPeNDAP constraint expressions), # so we prefer netcdf4 (which has DAP support) over h5netcdf (which doesn't) ("https://example.com/data.nc?var=temperature&time=0", "netcdf4"), ( "http://test.opendap.org/opendap/dap4/StaggeredGrid.nc4?dap4.ce=/time[0:1:0]", "netcdf4", ), # DAP URLs with .nc extensions (no query params) - netcdf4 wins (first in order) ("http://test.opendap.org/opendap/dap4/StaggeredGrid.nc4", "netcdf4"), ("https://example.com/DAP4/data.nc", "netcdf4"), ("http://example.com/data/Dap4/file.nc", "netcdf4"), ] for url, expected_backend in test_cases: engine = guess_engine(url) assert engine == expected_backend, ( f"URL {url!r} should select {expected_backend!r} but got {engine!r}" ) # DAP URLs - netcdf4 should handle these (it comes first in backend order) # Both netcdf4 and pydap can open DAP URLs, but netcdf4 has priority expected_dap_backend = "netcdf4" dap_urls = [ # Explicit DAP protocol schemes "dap2://opendap.earthdata.nasa.gov/collections/dataset", "dap4://opendap.earthdata.nasa.gov/collections/dataset", "dap://example.com/dataset", "DAP2://example.com/dataset", # uppercase scheme "DAP4://example.com/dataset", # uppercase scheme # DAP path indicators "https://example.com/services/DAP2/dataset", # uppercase in path "http://test.opendap.org/opendap/data/nc/file.nc", # /opendap/ path "https://coastwatch.pfeg.noaa.gov/erddap/griddap/erdMH1chla8day", # ERDDAP "http://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/", # THREDDS dodsC "https://disc2.gesdisc.eosdis.nasa.gov/dods/TRMM_3B42", # GrADS /dods/ ] for url in dap_urls: engine = guess_engine(url) assert engine == expected_dap_backend, ( f"URL {url!r} should select {expected_dap_backend!r} but got {engine!r}" ) # URLs with .dap suffix are claimed by netcdf4 (backward compatibility fallback) # Note: .dap suffix is intentionally NOT recognized as a DAP dataset URL fallback_urls = [ ("http://test.opendap.org/opendap/data/nc/coads_climatology.nc.dap", "netcdf4"), ("https://example.com/data.dap", "netcdf4"), ] for url, expected_backend in fallback_urls: engine = guess_engine(url) assert engine == expected_backend @requires_netCDF4 @pytest.mark.parametrize("str_type", (str, np.str_)) def test_write_file_from_np_str(str_type: type[str | np.str_], tmpdir: str) -> None: # https://github.com/pydata/xarray/pull/5264 scenarios = [str_type(v) for v in ["scenario_a", "scenario_b", "scenario_c"]] years = range(2015, 2100 + 1) tdf = pd.DataFrame( data=np.random.random((len(scenarios), len(years))), columns=years, index=scenarios, ) tdf.index.name = "scenario" tdf.columns.name = "year" tdf = cast(pd.DataFrame, tdf.stack()) tdf.name = "tas" txr = tdf.to_xarray() txr.to_netcdf(tmpdir.join("test.nc")) @requires_zarr @requires_netCDF4 class TestNCZarr: @property def netcdfc_version(self): return Version(nc4.getlibversion().split()[0].split("-development")[0]) def _create_nczarr(self, filename): if self.netcdfc_version < Version("4.8.1"): pytest.skip("requires netcdf-c>=4.8.1") if platform.system() == "Windows" and self.netcdfc_version == Version("4.8.1"): # Bug in netcdf-c==4.8.1 (typo: Nan instead of NaN) # https://github.com/Unidata/netcdf-c/issues/2265 pytest.skip("netcdf-c==4.8.1 has issues on Windows") ds = create_test_data() # Drop dim3: netcdf-c does not support dtype=' None: with create_tmp_file(suffix=".zarr") as tmp: expected = self._create_nczarr(tmp) actual = xr.open_zarr(tmp, consolidated=False) assert_identical(expected, actual) def test_overwriting_nczarr(self) -> None: with create_tmp_file(suffix=".zarr") as tmp: ds = self._create_nczarr(tmp) expected = ds[["var1"]] expected.to_zarr(tmp, mode="w") actual = xr.open_zarr(tmp, consolidated=False) assert_identical(expected, actual) @pytest.mark.parametrize("mode", ["a", "r+"]) @pytest.mark.filterwarnings("ignore:.*non-consolidated metadata.*") def test_raise_writing_to_nczarr(self, mode) -> None: if self.netcdfc_version > Version("4.8.1"): pytest.skip("netcdf-c>4.8.1 adds the _ARRAY_DIMENSIONS attribute") with create_tmp_file(suffix=".zarr") as tmp: ds = self._create_nczarr(tmp) with pytest.raises( KeyError, match="missing the attribute `_ARRAY_DIMENSIONS`," ): ds.to_zarr(tmp, mode=mode) @requires_netCDF4 @requires_dask @pytest.mark.usefixtures("default_zarr_format") def test_pickle_open_mfdataset_dataset(): with open_example_mfdataset(["bears.nc"]) as ds: assert_identical(ds, pickle.loads(pickle.dumps(ds))) @requires_zarr @pytest.mark.usefixtures("default_zarr_format") def test_zarr_closing_internal_zip_store(): store_name = "tmp.zarr.zip" original_da = DataArray(np.arange(12).reshape((3, 4))) original_da.to_zarr(store_name, mode="w") with open_dataarray(store_name, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) @requires_zarr @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_zarr_create_default_indexes(tmp_path, create_default_indexes) -> None: store_path = tmp_path / "tmp.zarr" original_ds = xr.Dataset({"data": ("x", np.arange(3))}, coords={"x": [-1, 0, 1]}) original_ds.to_zarr(store_path, mode="w") with open_dataset( store_path, engine="zarr", create_default_indexes=create_default_indexes ) as loaded_ds: if create_default_indexes: assert list(loaded_ds.xindexes) == ["x"] and isinstance( loaded_ds.xindexes["x"], PandasIndex ) else: assert len(loaded_ds.xindexes) == 0 @requires_zarr @pytest.mark.usefixtures("default_zarr_format") def test_raises_key_error_on_invalid_zarr_store(tmp_path): root = zarr.open_group(tmp_path / "tmp.zarr") if Version(zarr.__version__) < Version("3.0.0"): root.create_dataset("bar", shape=(3, 5), dtype=np.float32) else: root.create_array("bar", shape=(3, 5), dtype=np.float32) with pytest.raises(KeyError, match=r"xarray to determine variable dimensions"): xr.open_zarr(tmp_path / "tmp.zarr", consolidated=False) @requires_zarr @pytest.mark.usefixtures("default_zarr_format") class TestZarrRegionAuto: """These are separated out since we should not need to test this logic with every store.""" @contextlib.contextmanager def create_zarr_target(self): with create_tmp_file(suffix=".zarr") as tmp: yield tmp @contextlib.contextmanager def create(self): x = np.arange(0, 50, 10) y = np.arange(0, 20, 2) data = np.ones((5, 10)) ds = xr.Dataset( {"test": xr.DataArray(data, dims=("x", "y"), coords={"x": x, "y": y})} ) with self.create_zarr_target() as target: self.save(target, ds) yield target, ds def save(self, target, ds, **kwargs): ds.to_zarr(target, **kwargs) @pytest.mark.parametrize( "region", [ pytest.param("auto", id="full-auto"), pytest.param({"x": "auto", "y": slice(6, 8)}, id="mixed-auto"), ], ) def test_zarr_region_auto(self, region): with self.create() as (target, ds): ds_region = 1 + ds.isel(x=slice(2, 4), y=slice(6, 8)) self.save(target, ds_region, region=region) ds_updated = xr.open_zarr(target) expected = ds.copy() expected["test"][2:4, 6:8] += 1 assert_identical(ds_updated, expected) def test_zarr_region_auto_noncontiguous(self): with self.create() as (target, ds): with pytest.raises(ValueError): self.save(target, ds.isel(x=[0, 2, 3], y=[5, 6]), region="auto") dsnew = ds.copy() dsnew["x"] = dsnew.x + 5 with pytest.raises(KeyError): self.save(target, dsnew, region="auto") def test_zarr_region_index_write(self, tmp_path): region: Mapping[str, slice] | Literal["auto"] region_slice = dict(x=slice(2, 4), y=slice(6, 8)) with self.create() as (target, ds): ds_region = 1 + ds.isel(region_slice) for region in [region_slice, "auto"]: # type: ignore[assignment] with patch.object( ZarrStore, "set_variables", side_effect=ZarrStore.set_variables, autospec=True, ) as mock: self.save(target, ds_region, region=region, mode="r+") # should write the data vars but never the index vars with auto mode for call in mock.call_args_list: written_variables = call.args[1].keys() assert "test" in written_variables assert "x" not in written_variables assert "y" not in written_variables def test_zarr_region_append(self): with self.create() as (target, ds): x_new = np.arange(40, 70, 10) data_new = np.ones((3, 10)) ds_new = xr.Dataset( { "test": xr.DataArray( data_new, dims=("x", "y"), coords={"x": x_new, "y": ds.y}, ) } ) # Now it is valid to use auto region detection with the append mode, # but it is still unsafe to modify dimensions or metadata using the region # parameter. with pytest.raises(KeyError): self.save(target, ds_new, mode="a", append_dim="x", region="auto") def test_zarr_region(self): with self.create() as (target, ds): ds_transposed = ds.transpose("y", "x") ds_region = 1 + ds_transposed.isel(x=[0], y=[0]) self.save(target, ds_region, region={"x": slice(0, 1), "y": slice(0, 1)}) # Write without region self.save(target, ds_transposed, mode="r+") @requires_dask def test_zarr_region_chunk_partial(self): """ Check that writing to partial chunks with `region` fails, assuming `safe_chunks=False`. """ ds = ( xr.DataArray(np.arange(120).reshape(4, 3, -1), dims=list("abc")) .rename("var1") .to_dataset() ) with self.create_zarr_target() as target: self.save(target, ds.chunk(5), compute=False, mode="w") with pytest.raises(ValueError): for r in range(ds.sizes["a"]): self.save( target, ds.chunk(3).isel(a=[r]), region=dict(a=slice(r, r + 1)) ) @requires_dask def test_zarr_append_chunk_partial(self): t_coords = np.array([np.datetime64("2020-01-01").astype("datetime64[ns]")]) data = np.ones((10, 10)) da = xr.DataArray( data.reshape((-1, 10, 10)), dims=["time", "x", "y"], coords={"time": t_coords}, name="foo", ) new_time = np.array([np.datetime64("2021-01-01").astype("datetime64[ns]")]) da2 = xr.DataArray( data.reshape((-1, 10, 10)), dims=["time", "x", "y"], coords={"time": new_time}, name="foo", ) with self.create_zarr_target() as target: self.save(target, da, mode="w", encoding={"foo": {"chunks": (5, 5, 1)}}) with pytest.raises(ValueError, match="encoding was provided"): self.save( target, da2, append_dim="time", mode="a", encoding={"foo": {"chunks": (1, 1, 1)}}, ) # chunking with dask sidesteps the encoding check, so we need a different check with pytest.raises(ValueError, match="Specified Zarr chunks"): self.save( target, da2.chunk({"x": 1, "y": 1, "time": 1}), append_dim="time", mode="a", ) @pytest.mark.xfail( ON_WINDOWS, reason="Permission errors from Zarr: https://github.com/pydata/xarray/pull/10793", ) @requires_dask def test_zarr_region_chunk_partial_offset(self): # https://github.com/pydata/xarray/pull/8459#issuecomment-1819417545 with self.create_zarr_target() as store: data = np.ones((30,)) da = xr.DataArray( data, dims=["x"], coords={"x": range(30)}, name="foo" ).chunk(x=10) self.save(store, da, compute=False) self.save(store, da.isel(x=slice(10)).chunk(x=(10,)), region="auto") self.save( store, da.isel(x=slice(5, 25)).chunk(x=(10, 10)), safe_chunks=False, region="auto", ) with pytest.raises(ValueError): self.save( store, da.isel(x=slice(5, 25)).chunk(x=(10, 10)), region="auto" ) @requires_dask def test_zarr_safe_chunk_append_dim(self): with self.create_zarr_target() as store: data = np.ones((20,)) da = xr.DataArray( data, dims=["x"], coords={"x": range(20)}, name="foo" ).chunk(x=5) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") with pytest.raises(ValueError): # If the first chunk is smaller than the border size then raise an error self.save( store, da.isel(x=slice(7, 11)).chunk(x=(2, 2)), append_dim="x", safe_chunks=True, ) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") # If the first chunk is of the size of the border size then it is valid self.save( store, da.isel(x=slice(7, 11)).chunk(x=(3, 1)), safe_chunks=True, append_dim="x", ) assert xr.open_zarr(store)["foo"].equals(da.isel(x=slice(0, 11))) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") # If the first chunk is of the size of the border size + N * zchunk then it is valid self.save( store, da.isel(x=slice(7, 17)).chunk(x=(8, 2)), safe_chunks=True, append_dim="x", ) assert xr.open_zarr(store)["foo"].equals(da.isel(x=slice(0, 17))) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") with pytest.raises(ValueError): # If the first chunk is valid but the other are not then raise an error self.save( store, da.isel(x=slice(7, 14)).chunk(x=(3, 3, 1)), append_dim="x", safe_chunks=True, ) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") with pytest.raises(ValueError): # If the first chunk have a size bigger than the border size but not enough # to complete the size of the next chunk then an error must be raised self.save( store, da.isel(x=slice(7, 14)).chunk(x=(4, 3)), append_dim="x", safe_chunks=True, ) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") # Append with a single chunk it's totally valid, # and it does not matter the size of the chunk self.save( store, da.isel(x=slice(7, 19)).chunk(x=-1), append_dim="x", safe_chunks=True, ) assert xr.open_zarr(store)["foo"].equals(da.isel(x=slice(0, 19))) @requires_dask @pytest.mark.parametrize("mode", ["r+", "a"]) def test_zarr_safe_chunk_region(self, mode: Literal["r+", "a"]): with self.create_zarr_target() as store: arr = xr.DataArray( list(range(11)), dims=["a"], coords={"a": list(range(11))}, name="foo" ).chunk(a=3) self.save(store, arr, mode="w") with pytest.raises(ValueError): # There are two Dask chunks on the same Zarr chunk, # which means that it is unsafe in any mode self.save( store, arr.isel(a=slice(0, 3)).chunk(a=(2, 1)), region="auto", mode=mode, ) with pytest.raises(ValueError): # the first chunk is covering the border size, but it is not # completely covering the second chunk, which means that it is # unsafe in any mode self.save( store, arr.isel(a=slice(1, 5)).chunk(a=(3, 1)), region="auto", mode=mode, ) with pytest.raises(ValueError): # The first chunk is safe but the other two chunks are overlapping with # the same Zarr chunk self.save( store, arr.isel(a=slice(0, 5)).chunk(a=(3, 1, 1)), region="auto", mode=mode, ) # Fully update two contiguous chunks is safe in any mode self.save(store, arr.isel(a=slice(3, 9)), region="auto", mode=mode) # The last chunk is considered full based on their current size (2) self.save(store, arr.isel(a=slice(9, 11)), region="auto", mode=mode) self.save( store, arr.isel(a=slice(6, None)).chunk(a=-1), region="auto", mode=mode ) # Write the last chunk of a region partially is safe in "a" mode self.save(store, arr.isel(a=slice(3, 8)), region="auto", mode="a") with pytest.raises(ValueError): # with "r+" mode it is invalid to write partial chunk self.save(store, arr.isel(a=slice(3, 8)), region="auto", mode="r+") # This is safe with mode "a", the border size is covered by the first chunk of Dask self.save( store, arr.isel(a=slice(1, 4)).chunk(a=(2, 1)), region="auto", mode="a" ) with pytest.raises(ValueError): # This is considered unsafe in mode "r+" because it is writing in a partial chunk self.save( store, arr.isel(a=slice(1, 4)).chunk(a=(2, 1)), region="auto", mode="r+", ) # This is safe on mode "a" because there is a single dask chunk self.save( store, arr.isel(a=slice(1, 5)).chunk(a=(4,)), region="auto", mode="a" ) with pytest.raises(ValueError): # This is unsafe on mode "r+", because the Dask chunk is partially writing # in the first chunk of Zarr self.save( store, arr.isel(a=slice(1, 5)).chunk(a=(4,)), region="auto", mode="r+", ) # The first chunk is completely covering the first Zarr chunk # and the last chunk is a partial one self.save( store, arr.isel(a=slice(0, 5)).chunk(a=(3, 2)), region="auto", mode="a" ) with pytest.raises(ValueError): # The last chunk is partial, so it is considered unsafe on mode "r+" self.save( store, arr.isel(a=slice(0, 5)).chunk(a=(3, 2)), region="auto", mode="r+", ) # The first chunk is covering the border size (2 elements) # and also the second chunk (3 elements), so it is valid self.save( store, arr.isel(a=slice(1, 8)).chunk(a=(5, 2)), region="auto", mode="a" ) with pytest.raises(ValueError): # The first chunk is not fully covering the first zarr chunk self.save( store, arr.isel(a=slice(1, 8)).chunk(a=(5, 2)), region="auto", mode="r+", ) with pytest.raises(ValueError): # Validate that the border condition is not affecting the "r+" mode self.save(store, arr.isel(a=slice(1, 9)), region="auto", mode="r+") self.save(store, arr.isel(a=slice(10, 11)), region="auto", mode="a") with pytest.raises(ValueError): # Validate that even if we write with a single Dask chunk on the last Zarr # chunk it is still unsafe if it is not fully covering it # (the last Zarr chunk has size 2) self.save(store, arr.isel(a=slice(10, 11)), region="auto", mode="r+") # Validate the same as the above test but in the beginning of the last chunk self.save(store, arr.isel(a=slice(9, 10)), region="auto", mode="a") with pytest.raises(ValueError): self.save(store, arr.isel(a=slice(9, 10)), region="auto", mode="r+") self.save( store, arr.isel(a=slice(7, None)).chunk(a=-1), region="auto", mode="a" ) with pytest.raises(ValueError): # Test that even a Dask chunk that covers the last Zarr chunk can be unsafe # if it is partial covering other Zarr chunks self.save( store, arr.isel(a=slice(7, None)).chunk(a=-1), region="auto", mode="r+", ) with pytest.raises(ValueError): # If the chunk is of size equal to the one in the Zarr encoding, but # it is partially writing in the first chunk then raise an error self.save( store, arr.isel(a=slice(8, None)).chunk(a=3), region="auto", mode="r+", ) with pytest.raises(ValueError): self.save( store, arr.isel(a=slice(5, -1)).chunk(a=5), region="auto", mode="r+" ) # Test if the code is detecting the last chunk correctly data = np.random.default_rng(0).random((2920, 25, 53)) ds = xr.Dataset({"temperature": (("time", "lat", "lon"), data)}) chunks = {"time": 1000, "lat": 25, "lon": 53} self.save(store, ds.chunk(chunks), compute=False, mode="w") region = {"time": slice(1000, 2000, 1)} chunk = ds.isel(region) chunk = chunk.chunk() self.save(store, chunk.chunk(), region=region) @requires_dask def test_dataset_to_zarr_align_chunks_true(self, tmp_store) -> None: # This test is a replica of the one in `test_dataarray_to_zarr_align_chunks_true` # but for datasets with self.create_zarr_target() as store: ds = ( DataArray( np.arange(4).reshape((2, 2)), dims=["a", "b"], coords={ "a": np.arange(2), "b": np.arange(2), }, ) .chunk(a=(1, 1), b=(1, 1)) .to_dataset(name="foo") ) self.save( store, ds, align_chunks=True, encoding={"foo": {"chunks": (3, 3)}}, mode="w", ) assert_identical(ds, xr.open_zarr(store)) ds = ( DataArray( np.arange(4, 8).reshape((2, 2)), dims=["a", "b"], coords={ "a": np.arange(2), "b": np.arange(2), }, ) .chunk(a=(1, 1), b=(1, 1)) .to_dataset(name="foo") ) self.save( store, ds, align_chunks=True, region="auto", ) assert_identical(ds, xr.open_zarr(store)) @requires_h5netcdf @requires_fsspec def test_h5netcdf_storage_options() -> None: with create_tmp_files(2, allow_cleanup_failure=ON_WINDOWS) as (f1, f2): ds1 = create_test_data() ds1.to_netcdf(f1, engine="h5netcdf") ds2 = create_test_data() ds2.to_netcdf(f2, engine="h5netcdf") files = [f"file://{f}" for f in [f1, f2]] with xr.open_mfdataset( files, engine="h5netcdf", concat_dim="time", data_vars="all", combine="nested", storage_options={"skip_instance_cache": False}, ) as ds: assert_identical(xr.concat([ds1, ds2], dim="time", data_vars="all"), ds) xarray-2025.12.0/xarray/tests/test_backends_api.py000066400000000000000000000272071511464676000220370ustar00rootroot00000000000000from __future__ import annotations import io import re import sys from numbers import Number import numpy as np import pytest import xarray as xr from xarray.backends.writers import get_default_netcdf_write_engine from xarray.tests import ( assert_identical, assert_no_warnings, requires_dask, requires_h5netcdf, requires_netCDF4, requires_scipy, ) @requires_netCDF4 @requires_scipy @requires_h5netcdf def test_get_default_netcdf_write_engine() -> None: assert xr.get_options()["netcdf_engine_order"] == ("netcdf4", "h5netcdf", "scipy") engine = get_default_netcdf_write_engine("", format=None) assert engine == "netcdf4" engine = get_default_netcdf_write_engine("", format="NETCDF4") assert engine == "netcdf4" engine = get_default_netcdf_write_engine("", format="NETCDF4_CLASSIC") assert engine == "netcdf4" engine = get_default_netcdf_write_engine("", format="NETCDF3_CLASSIC") assert engine == "netcdf4" engine = get_default_netcdf_write_engine(io.BytesIO(), format=None) assert engine == "h5netcdf" engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF4") assert engine == "h5netcdf" engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF3_CLASSIC") assert engine == "scipy" engine = get_default_netcdf_write_engine("path.zarr#mode=nczarr", format=None) assert engine == "netcdf4" with xr.set_options(netcdf_engine_order=["netcdf4", "scipy", "h5netcdf"]): engine = get_default_netcdf_write_engine(io.BytesIO(), format=None) assert engine == "scipy" engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF4") assert engine == "h5netcdf" engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF3_CLASSIC") assert engine == "scipy" with xr.set_options(netcdf_engine_order=["h5netcdf", "scipy", "netcdf4"]): engine = get_default_netcdf_write_engine("", format=None) assert engine == "h5netcdf" engine = get_default_netcdf_write_engine("", format="NETCDF4") assert engine == "h5netcdf" engine = get_default_netcdf_write_engine("", format="NETCDF4_CLASSIC") assert engine == "netcdf4" engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF4") assert engine == "h5netcdf" engine = get_default_netcdf_write_engine("", format="NETCDF3_CLASSIC") assert engine == "scipy" engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF3_CLASSIC") assert engine == "scipy" @requires_h5netcdf def test_default_engine_h5netcdf(monkeypatch): """Test the default netcdf engine when h5netcdf is the only importable module.""" monkeypatch.delitem(sys.modules, "netCDF4", raising=False) monkeypatch.delitem(sys.modules, "scipy", raising=False) monkeypatch.setattr(sys, "meta_path", []) engine = get_default_netcdf_write_engine("", format=None) assert engine == "h5netcdf" with pytest.raises( ValueError, match=re.escape( "cannot write NetCDF files with format='NETCDF3_CLASSIC' because " "none of the suitable backend libraries (SUITABLE_BACKENDS) are installed" ).replace("SUITABLE_BACKENDS", r"(scipy, netCDF4)|(netCDF4, scipy)"), ): get_default_netcdf_write_engine("", format="NETCDF3_CLASSIC") def test_default_engine_nczarr_no_netcdf4_python(monkeypatch): monkeypatch.delitem(sys.modules, "netCDF4", raising=False) monkeypatch.setattr(sys, "meta_path", []) with pytest.raises( ValueError, match=re.escape( "cannot write NetCDF files in NCZarr format because " "none of the suitable backend libraries (netCDF4) are installed" ), ): get_default_netcdf_write_engine("#mode=nczarr", format=None) def test_custom_engine() -> None: expected = xr.Dataset( dict(a=2 * np.arange(5)), coords=dict(x=("x", np.arange(5), dict(units="s"))) ) class CustomBackend(xr.backends.BackendEntrypoint): def open_dataset( self, filename_or_obj, drop_variables=None, **kwargs, ) -> xr.Dataset: return expected.copy(deep=True) actual = xr.open_dataset("fake_filename", engine=CustomBackend) assert_identical(expected, actual) def test_multiindex() -> None: # GH7139 # Check that we properly handle backends that change index variables dataset = xr.Dataset(coords={"coord1": ["A", "B"], "coord2": [1, 2]}) dataset = dataset.stack(z=["coord1", "coord2"]) class MultiindexBackend(xr.backends.BackendEntrypoint): def open_dataset( self, filename_or_obj, drop_variables=None, **kwargs, ) -> xr.Dataset: return dataset.copy(deep=True) loaded = xr.open_dataset("fake_filename", engine=MultiindexBackend) assert_identical(dataset, loaded) class PassThroughBackendEntrypoint(xr.backends.BackendEntrypoint): """Access an object passed to the `open_dataset` method.""" def open_dataset(self, dataset, *, drop_variables=None): """Return the first argument.""" return dataset def explicit_chunks(chunks, shape): """Return explicit chunks, expanding any integer member to a tuple of integers.""" # Emulate `dask.array.core.normalize_chunks` but for simpler inputs. return tuple( ( ( (size // chunk) * (chunk,) + ((size % chunk,) if size % chunk or size == 0 else ()) ) if isinstance(chunk, Number) else chunk ) for chunk, size in zip(chunks, shape, strict=True) ) @requires_dask class TestPreferredChunks: """Test behaviors related to the backend's preferred chunks.""" var_name = "data" def create_dataset(self, shape, pref_chunks): """Return a dataset with a variable with the given shape and preferred chunks.""" dims = tuple(f"dim_{idx}" for idx in range(len(shape))) return xr.Dataset( { self.var_name: xr.Variable( dims, np.empty(shape, dtype=np.dtype("V1")), encoding={ "preferred_chunks": dict(zip(dims, pref_chunks, strict=True)) }, ) } ) def check_dataset(self, initial, final, expected_chunks): assert_identical(initial, final) assert final[self.var_name].chunks == expected_chunks @pytest.mark.parametrize( "shape,pref_chunks", [ # Represent preferred chunking with int. ((5,), (2,)), # Represent preferred chunking with tuple. ((5,), ((2, 2, 1),)), # Represent preferred chunking with int in two dims. ((5, 6), (4, 2)), # Represent preferred chunking with tuple in second dim. ((5, 6), (4, (2, 2, 2))), ], ) @pytest.mark.parametrize("request_with_empty_map", [False, True]) def test_honor_chunks(self, shape, pref_chunks, request_with_empty_map): """Honor the backend's preferred chunks when opening a dataset.""" initial = self.create_dataset(shape, pref_chunks) # To keep the backend's preferred chunks, the `chunks` argument must be an # empty mapping or map dimensions to `None`. chunks = ( {} if request_with_empty_map else dict.fromkeys(initial[self.var_name].dims, None) ) final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, chunks=chunks ) self.check_dataset(initial, final, explicit_chunks(pref_chunks, shape)) @pytest.mark.parametrize( "shape,pref_chunks,req_chunks", [ # Preferred chunking is int; requested chunking is int. ((5,), (2,), (3,)), # Preferred chunking is int; requested chunking is tuple. ((5,), (2,), ((2, 1, 1, 1),)), # Preferred chunking is tuple; requested chunking is int. ((5,), ((2, 2, 1),), (3,)), # Preferred chunking is tuple; requested chunking is tuple. ((5,), ((2, 2, 1),), ((2, 1, 1, 1),)), # Split chunks along a dimension other than the first. ((1, 5), (1, 2), (1, 3)), ], ) def test_split_chunks(self, shape, pref_chunks, req_chunks): """Warn when the requested chunks separate the backend's preferred chunks.""" initial = self.create_dataset(shape, pref_chunks) with pytest.warns(UserWarning): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, chunks=dict(zip(initial[self.var_name].dims, req_chunks, strict=True)), ) self.check_dataset(initial, final, explicit_chunks(req_chunks, shape)) @pytest.mark.parametrize( "shape,pref_chunks,req_chunks", [ # Keep preferred chunks using int representation. ((5,), (2,), (2,)), # Keep preferred chunks using tuple representation. ((5,), (2,), ((2, 2, 1),)), # Join chunks, leaving a final short chunk. ((5,), (2,), (4,)), # Join all chunks with an int larger than the dimension size. ((5,), (2,), (6,)), # Join one chunk using tuple representation. ((5,), (1,), ((1, 1, 2, 1),)), # Join one chunk using int representation. ((5,), ((1, 1, 2, 1),), (2,)), # Join multiple chunks using tuple representation. ((5,), ((1, 1, 2, 1),), ((2, 3),)), # Join chunks in multiple dimensions. ((5, 5), (2, (1, 1, 2, 1)), (4, (2, 3))), ], ) def test_join_chunks(self, shape, pref_chunks, req_chunks): """Don't warn when the requested chunks join or keep the preferred chunks.""" initial = self.create_dataset(shape, pref_chunks) with assert_no_warnings(): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, chunks=dict(zip(initial[self.var_name].dims, req_chunks, strict=True)), ) self.check_dataset(initial, final, explicit_chunks(req_chunks, shape)) @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_default_indexes(self, create_default_indexes): """Create default indexes if the backend does not create them.""" coords = xr.Coordinates({"x": ("x", [0, 1]), "y": list("abc")}, indexes={}) initial = xr.Dataset({"a": ("x", [1, 2])}, coords=coords) with assert_no_warnings(): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, create_default_indexes=create_default_indexes, ) if create_default_indexes: assert all(name in final.xindexes for name in ["x", "y"]) else: assert len(final.xindexes) == 0 @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_default_indexes_passthrough(self, create_default_indexes): """Allow creating indexes in the backend.""" initial = xr.Dataset( {"a": (["x", "y"], [[1, 2, 3], [4, 5, 6]])}, coords={"x": ("x", [0, 1]), "y": ("y", list("abc"))}, ).stack(z=["x", "y"]) with assert_no_warnings(): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, create_default_indexes=create_default_indexes, ) assert initial.coords.equals(final.coords) xarray-2025.12.0/xarray/tests/test_backends_chunks.py000066400000000000000000000061611511464676000225550ustar00rootroot00000000000000import numpy as np import pytest import xarray as xr from xarray.backends.chunks import align_nd_chunks, build_grid_chunks, grid_rechunk from xarray.tests import requires_dask @pytest.mark.parametrize( "size, chunk_size, region, expected_chunks", [ (10, 3, slice(1, 11), (2, 3, 3, 2)), (10, 3, slice(None, None), (3, 3, 3, 1)), (10, 3, None, (3, 3, 3, 1)), (10, 3, slice(None, 10), (3, 3, 3, 1)), (10, 3, slice(0, None), (3, 3, 3, 1)), (2, 10, slice(0, 3), (2,)), (4, 10, slice(7, 10), (3, 1)), ], ) def test_build_grid_chunks(size, chunk_size, region, expected_chunks): grid_chunks = build_grid_chunks( size, chunk_size=chunk_size, region=region, ) assert grid_chunks == expected_chunks @pytest.mark.parametrize( "nd_v_chunks, nd_backend_chunks, expected_chunks", [ (((2, 2, 2, 2),), ((3, 3, 2),), ((3, 3, 2),)), # ND cases (((2, 4), (2, 3)), ((2, 2, 2), (3, 2)), ((2, 4), (3, 2))), ], ) def test_align_nd_chunks(nd_v_chunks, nd_backend_chunks, expected_chunks): aligned_nd_chunks = align_nd_chunks( nd_v_chunks=nd_v_chunks, nd_backend_chunks=nd_backend_chunks, ) assert aligned_nd_chunks == expected_chunks @requires_dask @pytest.mark.parametrize( "enc_chunks, region, nd_v_chunks, expected_chunks", [ ( (3,), (slice(2, 14),), ((6, 6),), ( ( 4, 6, 2, ), ), ), ( (6,), (slice(0, 13),), ((6, 7),), ( ( 6, 7, ), ), ), ((6,), (slice(0, 13),), ((6, 6, 1),), ((6, 6, 1),)), ((3,), (slice(2, 14),), ((1, 3, 2, 6),), ((1, 3, 6, 2),)), ((3,), (slice(2, 14),), ((2, 2, 2, 6),), ((4, 6, 2),)), ((3,), (slice(2, 14),), ((3, 1, 3, 5),), ((4, 3, 5),)), ((4,), (slice(1, 13),), ((1, 1, 1, 4, 3, 2),), ((3, 4, 4, 1),)), ((5,), (slice(4, 16),), ((5, 7),), ((6, 6),)), # ND cases ( (3, 6), (slice(2, 14), slice(0, 13)), ((6, 6), (6, 7)), ( ( 4, 6, 2, ), ( 6, 7, ), ), ), ], ) def test_grid_rechunk(enc_chunks, region, nd_v_chunks, expected_chunks): dims = [f"dim_{i}" for i in range(len(region))] coords = { dim: list(range(r.start, r.stop)) for dim, r in zip(dims, region, strict=False) } shape = tuple(r.stop - r.start for r in region) arr = xr.DataArray( np.arange(np.prod(shape)).reshape(shape), dims=dims, coords=coords, ) arr = arr.chunk(dict(zip(dims, nd_v_chunks, strict=False))) result = grid_rechunk( arr.variable, enc_chunks=enc_chunks, region=region, ) assert result.chunks == expected_chunks xarray-2025.12.0/xarray/tests/test_backends_common.py000066400000000000000000000031641511464676000225520ustar00rootroot00000000000000from __future__ import annotations import io import re import numpy as np import pytest import xarray as xr from xarray.backends.common import _infer_dtype, robust_getitem from xarray.tests import requires_scipy class DummyFailure(Exception): pass class DummyArray: def __init__(self, failures): self.failures = failures def __getitem__(self, key): if self.failures: self.failures -= 1 raise DummyFailure return "success" def test_robust_getitem() -> None: array = DummyArray(failures=2) with pytest.raises(DummyFailure): array[...] result = robust_getitem(array, ..., catch=DummyFailure, initial_delay=1) assert result == "success" array = DummyArray(failures=3) with pytest.raises(DummyFailure): robust_getitem(array, ..., catch=DummyFailure, initial_delay=1, max_retries=2) @pytest.mark.parametrize( "data", [ np.array([["ab", "cdef", b"X"], [1, 2, "c"]], dtype=object), np.array([["x", 1], ["y", 2]], dtype="object"), ], ) def test_infer_dtype_error_on_mixed_types(data): with pytest.raises(ValueError, match="unable to infer dtype on variable"): _infer_dtype(data, "test") @requires_scipy def test_encoding_failure_note(): # Create an arbitrary value that cannot be encoded in netCDF3 ds = xr.Dataset({"invalid": np.array([2**63 - 1], dtype=np.int64)}) f = io.BytesIO() with pytest.raises( ValueError, match=re.escape( "Raised while encoding variable 'invalid' with value None: with pytest.raises( ValueError, match=r"group '/child' is not aligned with its parents" ): super().test_child_group_with_inconsistent_dimensions() def diff_chunks( comparison: dict[tuple[str, Hashable], bool], tree1: DataTree, tree2: DataTree ) -> str: mismatching_variables = [loc for loc, equals in comparison.items() if not equals] variable_messages = [ "\n".join( [ f"L {path}:{name}: {tree1[path].variables[name].chunksizes}", f"R {path}:{name}: {tree2[path].variables[name].chunksizes}", ] ) for path, name in mismatching_variables ] return "\n".join(["Differing chunk sizes:"] + variable_messages) def assert_chunks_equal( actual: DataTree, expected: DataTree, enforce_dask: bool = False ) -> None: __tracebackhide__ = True from xarray.namedarray.pycompat import array_type dask_array_type = array_type("dask") comparison = { (path, name): ( ( not enforce_dask or isinstance(node1.variables[name].data, dask_array_type) ) and node1.variables[name].chunksizes == node2.variables[name].chunksizes ) for path, (node1, node2) in xr.group_subtrees(actual, expected) for name in node1.variables.keys() } assert all(comparison.values()), diff_chunks(comparison, actual, expected) @pytest.fixture(scope="module") def unaligned_datatree_nc(tmp_path_factory): """Creates a test netCDF4 file with the following unaligned structure, writes it to a /tmp directory and returns the file path of the netCDF4 file. Group: / โ”‚ Dimensions: (lat: 1, lon: 2) โ”‚ Dimensions without coordinates: lat, lon โ”‚ Data variables: โ”‚ root_variable (lat, lon) float64 16B ... โ””โ”€โ”€ Group: /Group1 โ”‚ Dimensions: (lat: 1, lon: 2) โ”‚ Dimensions without coordinates: lat, lon โ”‚ Data variables: โ”‚ group_1_var (lat, lon) float64 16B ... โ””โ”€โ”€ Group: /Group1/subgroup1 Dimensions: (lat: 2, lon: 2) Dimensions without coordinates: lat, lon Data variables: subgroup1_var (lat, lon) float64 32B ... """ filepath = tmp_path_factory.mktemp("data") / "unaligned_subgroups.nc" with nc4.Dataset(filepath, "w", format="NETCDF4") as root_group: group_1 = root_group.createGroup("/Group1") subgroup_1 = group_1.createGroup("/subgroup1") root_group.createDimension("lat", 1) root_group.createDimension("lon", 2) root_group.createVariable("root_variable", np.float64, ("lat", "lon")) group_1_var = group_1.createVariable("group_1_var", np.float64, ("lat", "lon")) group_1_var[:] = np.array([[0.1, 0.2]]) group_1_var.units = "K" group_1_var.long_name = "air_temperature" subgroup_1.createDimension("lat", 2) subgroup1_var = subgroup_1.createVariable( "subgroup1_var", np.float64, ("lat", "lon") ) subgroup1_var[:] = np.array([[0.1, 0.2]]) yield filepath @pytest.fixture(scope="module") def unaligned_datatree_zarr_factory( tmp_path_factory, ) -> Generator[ Callable[[Literal[2, 3]], Path], None, None, ]: """Creates a zarr store with the following unaligned group hierarchy: Group: / โ”‚ Dimensions: (y: 3, x: 2) โ”‚ Dimensions without coordinates: y, x โ”‚ Data variables: โ”‚ a (y) int64 24B ... โ”‚ set0 (x) int64 16B ... โ””โ”€โ”€ Group: /Group1 โ”‚ โ”‚ Dimensions: () โ”‚ โ”‚ Data variables: โ”‚ โ”‚ a int64 8B ... โ”‚ โ”‚ b int64 8B ... โ”‚ โ””โ”€โ”€ /Group1/subgroup1 โ”‚ Dimensions: () โ”‚ Data variables: โ”‚ a int64 8B ... โ”‚ b int64 8B ... โ””โ”€โ”€ Group: /Group2 Dimensions: (y: 2, x: 2) Dimensions without coordinates: y, x Data variables: a (y) int64 16B ... b (x) float64 16B ... """ def _unaligned_datatree_zarr(zarr_format: Literal[2, 3]) -> Path: filepath = tmp_path_factory.mktemp("data") / "unaligned_simple_datatree.zarr" root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": 0, "b": 1}) set2_data = xr.Dataset({"a": ("y", [2, 3]), "b": ("x", [0.1, 0.2])}) root_data.to_zarr( filepath, mode="w", zarr_format=zarr_format, ) set1_data.to_zarr( filepath, group="/Group1", mode="a", zarr_format=zarr_format, ) set2_data.to_zarr( filepath, group="/Group2", mode="a", zarr_format=zarr_format, ) set1_data.to_zarr( filepath, group="/Group1/subgroup1", mode="a", zarr_format=zarr_format, ) return filepath yield _unaligned_datatree_zarr class NetCDFIOBase: engine: T_DataTreeNetcdfEngine | None def test_to_netcdf(self, tmpdir, simple_datatree): filepath = tmpdir / "test.nc" original_dt = simple_datatree original_dt.to_netcdf(filepath, engine=self.engine) with open_datatree(filepath, engine=self.engine) as roundtrip_dt: assert roundtrip_dt._close is not None assert_equal(original_dt, roundtrip_dt) def test_decode_cf(self, tmpdir): filepath = tmpdir / "test-cf-convention.nc" original_dt = xr.DataTree( xr.Dataset( { "test": xr.DataArray( data=np.array([0, 1, 2], dtype=np.uint16), attrs={"_FillValue": 99}, ), } ) ) original_dt.to_netcdf(filepath, engine=self.engine) with open_datatree( filepath, engine=self.engine, decode_cf=False ) as roundtrip_dt: assert original_dt["test"].dtype == roundtrip_dt["test"].dtype def test_to_netcdf_inherited_coords(self, tmpdir) -> None: filepath = tmpdir / "test.nc" original_dt = DataTree.from_dict( { "/": xr.Dataset({"a": (("x",), [1, 2])}, coords={"x": [3, 4]}), "/sub": xr.Dataset({"b": (("x",), [5, 6])}), } ) original_dt.to_netcdf(filepath, engine=self.engine) with open_datatree(filepath, engine=self.engine) as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) subtree = cast(DataTree, roundtrip_dt["/sub"]) assert "x" not in subtree.to_dataset(inherit=False).coords def test_netcdf_encoding(self, tmpdir, simple_datatree) -> None: filepath = tmpdir / "test.nc" original_dt = simple_datatree # add compression comp = dict(zlib=True, complevel=9) enc = {"/set2": dict.fromkeys(original_dt["/set2"].dataset.data_vars, comp)} original_dt.to_netcdf(filepath, encoding=enc, engine=self.engine) with open_datatree(filepath, engine=self.engine) as roundtrip_dt: assert roundtrip_dt["/set2/a"].encoding["zlib"] == comp["zlib"] assert roundtrip_dt["/set2/a"].encoding["complevel"] == comp["complevel"] enc["/not/a/group"] = {"foo": "bar"} # type: ignore[dict-item] with pytest.raises(ValueError, match=r"unexpected encoding group.*"): original_dt.to_netcdf(filepath, encoding=enc, engine=self.engine) def test_write_subgroup(self, tmpdir) -> None: original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ).children["child"] expected_dt = original_dt.copy() expected_dt.name = None filepath = tmpdir / "test.zarr" original_dt.to_netcdf(filepath, engine=self.engine) with open_datatree(filepath, engine=self.engine) as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) assert_identical(expected_dt, roundtrip_dt) @requires_netCDF4 def test_no_redundant_dimensions(self, tmpdir) -> None: # regression test for https://github.com/pydata/xarray/issues/10241 original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ) filepath = tmpdir / "test.zarr" original_dt.to_netcdf(filepath, engine=self.engine) root = nc4.Dataset(str(filepath)) child = root.groups["child"] assert list(root.dimensions) == ["x"] assert list(child.dimensions) == [] @requires_dask def test_compute_false(self, tmpdir, simple_datatree): filepath = tmpdir / "test.nc" original_dt = simple_datatree.chunk() result = original_dt.to_netcdf(filepath, engine=self.engine, compute=False) if not ON_WINDOWS: # File at filepath is not closed until .compute() is called. On # Windows, this means we can't open it yet. with open_datatree(filepath, engine=self.engine) as in_progress_dt: assert in_progress_dt.isomorphic(original_dt) assert not in_progress_dt.equals(original_dt) result.compute() with open_datatree(filepath, engine=self.engine) as written_dt: assert_identical(written_dt, original_dt) def test_default_write_engine(self, tmpdir, simple_datatree, monkeypatch): # Ensure the other netCDF library are not installed exclude = "netCDF4" if self.engine == "h5netcdf" else "h5netcdf" monkeypatch.delitem(sys.modules, exclude, raising=False) monkeypatch.setattr(sys, "meta_path", []) filepath = tmpdir + "/phony_dims.nc" original_dt = simple_datatree original_dt.to_netcdf(filepath) # should not raise @requires_dask def test_open_datatree_chunks(self, tmpdir) -> None: filepath = tmpdir / "test.nc" chunks = {"x": 2, "y": 1} root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": ("y", [-1, 0, 1]), "b": ("x", [-10, 6])}) set2_data = xr.Dataset({"a": ("y", [1, 2, 3]), "b": ("x", [0.1, 0.2])}) original_tree = DataTree.from_dict( { "/": root_data.chunk(chunks), "/group1": set1_data.chunk(chunks), "/group2": set2_data.chunk(chunks), } ) original_tree.to_netcdf(filepath, engine=self.engine) with open_datatree(filepath, engine=self.engine, chunks=chunks) as tree: xr.testing.assert_identical(tree, original_tree) assert_chunks_equal(tree, original_tree, enforce_dask=True) def test_roundtrip_via_memoryview(self, simple_datatree) -> None: original_dt = simple_datatree memview = original_dt.to_netcdf(engine=self.engine) roundtrip_dt = load_datatree(memview, engine=self.engine) assert_equal(original_dt, roundtrip_dt) def test_to_memoryview_compute_false(self, simple_datatree) -> None: original_dt = simple_datatree with pytest.raises( NotImplementedError, match=re.escape("to_netcdf() with compute=False is not yet implemented"), ): original_dt.to_netcdf(engine=self.engine, compute=False) def test_open_datatree_specific_group(self, tmpdir, simple_datatree) -> None: """Test opening a specific group within a NetCDF file using `open_datatree`.""" filepath = tmpdir / "test.nc" group = "/set1" original_dt = simple_datatree original_dt.to_netcdf(filepath, engine=self.engine) expected_subtree = original_dt[group].copy() expected_subtree.orphan() with open_datatree(filepath, group=group, engine=self.engine) as subgroup_tree: assert subgroup_tree.root.parent is None assert_equal(subgroup_tree, expected_subtree) @requires_h5netcdf_or_netCDF4 class TestGenericNetCDFIO(NetCDFIOBase): engine: T_DataTreeNetcdfEngine | None = None @requires_netCDF4 def test_open_netcdf3(self, tmpdir) -> None: filepath = tmpdir / "test.nc" ds = xr.Dataset({"foo": 1}) ds.to_netcdf(filepath, format="NETCDF3_CLASSIC") expected_dt = DataTree(ds) roundtrip_dt = load_datatree(filepath) # must use netCDF4 engine assert_equal(expected_dt, roundtrip_dt) @requires_h5netcdf @requires_netCDF4 def test_memoryview_write_h5netcdf_read_netcdf4(self, simple_datatree) -> None: original_dt = simple_datatree memview = original_dt.to_netcdf(engine="h5netcdf") roundtrip_dt = load_datatree(memview, engine="netcdf4") assert_equal(original_dt, roundtrip_dt) @requires_h5netcdf @requires_netCDF4 def test_memoryview_write_netcdf4_read_h5netcdf(self, simple_datatree) -> None: original_dt = simple_datatree memview = original_dt.to_netcdf(engine="netcdf4") roundtrip_dt = load_datatree(memview, engine="h5netcdf") assert_equal(original_dt, roundtrip_dt) def test_open_datatree_unaligned_hierarchy(self, unaligned_datatree_nc) -> None: with pytest.raises( ValueError, match=( re.escape( "group '/Group1/subgroup1' is not aligned with its parents:\nGroup:\n" ) + ".*" ), ): open_datatree(unaligned_datatree_nc) def test_open_groups(self, unaligned_datatree_nc) -> None: """Test `open_groups` with a netCDF4 file with an unaligned group hierarchy.""" unaligned_dict_of_datasets = open_groups(unaligned_datatree_nc) # Check that group names are keys in the dictionary of `xr.Datasets` assert "/" in unaligned_dict_of_datasets.keys() assert "/Group1" in unaligned_dict_of_datasets.keys() assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys() # Check that group name returns the correct datasets with xr.open_dataset(unaligned_datatree_nc, group="/") as expected: assert_identical(unaligned_dict_of_datasets["/"], expected) with xr.open_dataset(unaligned_datatree_nc, group="Group1") as expected: assert_identical(unaligned_dict_of_datasets["/Group1"], expected) with xr.open_dataset( unaligned_datatree_nc, group="/Group1/subgroup1" ) as expected: assert_identical(unaligned_dict_of_datasets["/Group1/subgroup1"], expected) for ds in unaligned_dict_of_datasets.values(): ds.close() @requires_dask def test_open_groups_chunks(self, tmpdir) -> None: """Test `open_groups` with chunks on a netcdf4 file.""" chunks = {"x": 2, "y": 1} filepath = tmpdir / "test.nc" chunks = {"x": 2, "y": 1} root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": ("y", [-1, 0, 1]), "b": ("x", [-10, 6])}) set2_data = xr.Dataset({"a": ("y", [1, 2, 3]), "b": ("x", [0.1, 0.2])}) original_tree = DataTree.from_dict( { "/": root_data.chunk(chunks), "/group1": set1_data.chunk(chunks), "/group2": set2_data.chunk(chunks), } ) original_tree.to_netcdf(filepath, mode="w") dict_of_datasets = open_groups(filepath, chunks=chunks) for path, ds in dict_of_datasets.items(): assert {k: max(vs) for k, vs in ds.chunksizes.items()} == chunks, ( f"unexpected chunking for {path}" ) for ds in dict_of_datasets.values(): ds.close() @requires_netCDF4 class TestNetCDF4DatatreeIO(NetCDFIOBase): engine: T_DataTreeNetcdfEngine | None = "netcdf4" def test_open_groups_to_dict(self, tmpdir) -> None: """Create an aligned netCDF4 with the following structure to test `open_groups` and `DataTree.from_dict`. Group: / โ”‚ Dimensions: (lat: 1, lon: 2) โ”‚ Dimensions without coordinates: lat, lon โ”‚ Data variables: โ”‚ root_variable (lat, lon) float64 16B ... โ””โ”€โ”€ Group: /Group1 โ”‚ Dimensions: (lat: 1, lon: 2) โ”‚ Dimensions without coordinates: lat, lon โ”‚ Data variables: โ”‚ group_1_var (lat, lon) float64 16B ... โ””โ”€โ”€ Group: /Group1/subgroup1 Dimensions: (lat: 1, lon: 2) Dimensions without coordinates: lat, lon Data variables: subgroup1_var (lat, lon) float64 16B ... """ filepath = tmpdir + "/all_aligned_child_nodes.nc" with nc4.Dataset(filepath, "w", format="NETCDF4") as root_group: group_1 = root_group.createGroup("/Group1") subgroup_1 = group_1.createGroup("/subgroup1") root_group.createDimension("lat", 1) root_group.createDimension("lon", 2) root_group.createVariable("root_variable", np.float64, ("lat", "lon")) group_1_var = group_1.createVariable( "group_1_var", np.float64, ("lat", "lon") ) group_1_var[:] = np.array([[0.1, 0.2]]) group_1_var.units = "K" group_1_var.long_name = "air_temperature" subgroup1_var = subgroup_1.createVariable( "subgroup1_var", np.float64, ("lat", "lon") ) subgroup1_var[:] = np.array([[0.1, 0.2]]) aligned_dict_of_datasets = open_groups(filepath) aligned_dt = DataTree.from_dict(aligned_dict_of_datasets) with open_datatree(filepath) as opened_tree: assert opened_tree.identical(aligned_dt) for ds in aligned_dict_of_datasets.values(): ds.close() @requires_h5netcdf class TestH5NetCDFDatatreeIO(NetCDFIOBase): engine: T_DataTreeNetcdfEngine | None = "h5netcdf" def test_phony_dims_warning(self, tmpdir) -> None: filepath = tmpdir + "/phony_dims.nc" import h5py foo_data = np.arange(125).reshape(5, 5, 5) bar_data = np.arange(625).reshape(25, 5, 5) var = {"foo1": foo_data, "foo2": bar_data, "foo3": foo_data, "foo4": bar_data} with h5py.File(filepath, "w") as f: grps = ["bar", "baz"] for grp in grps: fx = f.create_group(grp) for k, v in var.items(): fx.create_dataset(k, data=v) with pytest.warns(UserWarning, match="The 'phony_dims' kwarg"): with open_datatree(filepath, engine=self.engine) as tree: assert tree.bar.dims == { "phony_dim_0": 5, "phony_dim_1": 5, "phony_dim_2": 5, "phony_dim_3": 25, } def test_roundtrip_using_filelike_object(self, tmpdir, simple_datatree) -> None: original_dt = simple_datatree filepath = tmpdir + "/test.nc" # h5py requires both read and write access when writing, it will # work with file-like objects provided they support both, and are # seekable. with open(filepath, "wb+") as file: original_dt.to_netcdf(file, engine=self.engine) with open(filepath, "rb") as file: with open_datatree(file, engine=self.engine) as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) @network @requires_pydap class TestPyDAPDatatreeIO: """Test PyDAP backend for DataTree.""" engine: T_DataTreeNetcdfEngine | None = "pydap" # you can check these by adding a .dmr to urls, and replacing dap4 with http unaligned_datatree_url = ( "dap4://test.opendap.org/opendap/dap4/unaligned_simple_datatree.nc.h5" ) all_aligned_child_nodes_url = ( "dap4://test.opendap.org/opendap/dap4/all_aligned_child_nodes.nc.h5" ) simplegroup_datatree_url = "dap4://test.opendap.org/opendap/dap4/SimpleGroup.nc4.h5" def test_open_datatree_unaligned_hierarchy( self, url=unaligned_datatree_url, ) -> None: with pytest.raises( ValueError, match=( re.escape( "group '/Group1/subgroup1' is not aligned with its parents:\nGroup:\n" ) + ".*" ), ): open_datatree(url, engine=self.engine) def test_open_groups(self, url=unaligned_datatree_url) -> None: """Test `open_groups` with a netCDF4/HDF5 file with an unaligned group hierarchy.""" unaligned_dict_of_datasets = open_groups(url, engine=self.engine) # Check that group names are keys in the dictionary of `xr.Datasets` assert "/" in unaligned_dict_of_datasets.keys() assert "/Group1" in unaligned_dict_of_datasets.keys() assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys() # Check that group name returns the correct datasets with xr.open_dataset(url, engine=self.engine, group="/") as expected: assert_identical(unaligned_dict_of_datasets["/"], expected) with xr.open_dataset(url, group="Group1", engine=self.engine) as expected: assert_identical(unaligned_dict_of_datasets["/Group1"], expected) with xr.open_dataset( url, group="/Group1/subgroup1", engine=self.engine, ) as expected: assert_identical(unaligned_dict_of_datasets["/Group1/subgroup1"], expected) def test_inherited_coords(self, tmpdir, url=simplegroup_datatree_url) -> None: """Test that `open_datatree` inherits coordinates from root tree. This particular h5 file is a test file that inherits the time coordinate from the root dataset to the child dataset. Group: / โ”‚ Dimensions: (time: 1, Z: 1000, nv: 2) โ”‚ Coordinates: | time: (time) float32 0.5 | Z: (Z) float32 -0.0 -1.0 -2.0 ... โ”‚ Data variables: โ”‚ Pressure (Z) float32 ... | time_bnds (time, nv) float32 ... โ””โ”€โ”€ Group: /SimpleGroup โ”‚ Dimensions: (time: 1, Z: 1000, nv: 2, Y: 40, X: 40) โ”‚ Coordinates: | Y: (Y) int16 1 2 3 4 ... | X: (X) int16 1 2 3 4 ... | Inherited coordinates: | time: (time) float32 0.5 | Z: (Z) float32 -0.0 -1.0 -2.0 ... โ”‚ Data variables: โ”‚ Temperature (time, Z, Y, X) float32 ... | Salinity (time, Z, Y, X) float32 ... """ import pydap from pydap.net import create_session # Create a session with pre-set retry params in pydap backend, to cache urls cache_name = tmpdir / "debug" session = create_session( use_cache=True, cache_kwargs={"cache_name": cache_name} ) session.cache.clear() _version_ = Version(pydap.__version__) tree = open_datatree(url, engine=self.engine, session=session) assert set(tree.dims) == {"time", "Z", "nv"} assert tree["/SimpleGroup"].coords["time"].dims == ("time",) assert tree["/SimpleGroup"].coords["Z"].dims == ("Z",) assert tree["/SimpleGroup"].coords["Y"].dims == ("Y",) assert tree["/SimpleGroup"].coords["X"].dims == ("X",) with xr.open_dataset(url, engine=self.engine, group="/SimpleGroup") as expected: assert set(tree["/SimpleGroup"].dims) == set( list(expected.dims) + ["Z", "nv"] ) if _version_ > Version("3.5.5"): # Total downloads are: 1 dmr, + 1 dap url for all dimensions for each group assert len(session.cache.urls()) == 3 else: # 1 dmr + 1 dap url per dimension (total there are 4 dimension arrays) assert len(session.cache.urls()) == 5 def test_open_groups_to_dict(self, url=all_aligned_child_nodes_url) -> None: aligned_dict_of_datasets = open_groups(url, engine=self.engine) aligned_dt = DataTree.from_dict(aligned_dict_of_datasets) with open_datatree(url, engine=self.engine) as opened_tree: assert opened_tree.identical(aligned_dt) @requires_zarr @parametrize_zarr_format class TestZarrDatatreeIO: engine = "zarr" def test_to_zarr(self, tmpdir, simple_datatree, zarr_format) -> None: filepath = str(tmpdir / "test.zarr") original_dt = simple_datatree original_dt.to_zarr(filepath, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) @pytest.mark.filterwarnings( "ignore:Numcodecs codecs are not in the Zarr version 3 specification" ) def test_zarr_encoding(self, tmpdir, simple_datatree, zarr_format) -> None: filepath = str(tmpdir / "test.zarr") original_dt = simple_datatree if zarr_format == 2: from numcodecs.blosc import Blosc codec = Blosc(cname="zstd", clevel=3, shuffle=2) comp = {"compressors": (codec,)} if has_zarr_v3 else {"compressor": codec} elif zarr_format == 3: # specifying codecs in zarr_format=3 requires importing from zarr 3 namespace from zarr.registry import get_codec_class Blosc = get_codec_class("numcodecs.blosc") comp = {"compressors": (Blosc(cname="zstd", clevel=3),)} # type: ignore[call-arg] enc = {"/set2": dict.fromkeys(original_dt["/set2"].dataset.data_vars, comp)} original_dt.to_zarr(filepath, encoding=enc, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr") as roundtrip_dt: compressor_key = "compressors" if has_zarr_v3 else "compressor" assert ( roundtrip_dt["/set2/a"].encoding[compressor_key] == comp[compressor_key] ) enc["/not/a/group"] = {"foo": "bar"} # type: ignore[dict-item] with pytest.raises(ValueError, match=r"unexpected encoding group.*"): original_dt.to_zarr(filepath, encoding=enc, zarr_format=zarr_format) @pytest.mark.xfail(reason="upstream zarr read-only changes have broken this test") @pytest.mark.filterwarnings("ignore:Duplicate name") def test_to_zarr_zip_store(self, tmpdir, simple_datatree, zarr_format) -> None: from zarr.storage import ZipStore filepath = str(tmpdir / "test.zarr.zip") original_dt = simple_datatree store = ZipStore(filepath, mode="w") original_dt.to_zarr(store, zarr_format=zarr_format) with open_datatree(store, engine="zarr") as roundtrip_dt: # type: ignore[arg-type, unused-ignore] assert_equal(original_dt, roundtrip_dt) def test_to_zarr_not_consolidated( self, tmpdir, simple_datatree, zarr_format ) -> None: filepath = tmpdir / "test.zarr" zmetadata = filepath / ".zmetadata" s1zmetadata = filepath / "set1" / ".zmetadata" filepath = str(filepath) # casting to str avoids a pathlib bug in xarray original_dt = simple_datatree original_dt.to_zarr(filepath, consolidated=False, zarr_format=zarr_format) assert not zmetadata.exists() assert not s1zmetadata.exists() with pytest.warns(RuntimeWarning, match="consolidated"): with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) def test_to_zarr_default_write_mode( self, tmpdir, simple_datatree, zarr_format ) -> None: simple_datatree.to_zarr(str(tmpdir), zarr_format=zarr_format) import zarr # expected exception type changed in zarr-python v2->v3, see https://github.com/zarr-developers/zarr-python/issues/2821 expected_exception_type = ( FileExistsError if has_zarr_v3 else zarr.errors.ContainsGroupError ) # with default settings, to_zarr should not overwrite an existing dir with pytest.raises(expected_exception_type): simple_datatree.to_zarr(str(tmpdir)) @requires_dask def test_to_zarr_compute_false( self, tmp_path: Path, simple_datatree: DataTree, zarr_format: Literal[2, 3] ) -> None: import dask.array as da storepath = tmp_path / "test.zarr" original_dt = simple_datatree.chunk() result = original_dt.to_zarr( str(storepath), compute=False, zarr_format=zarr_format ) def assert_expected_zarr_files_exist( arr_dir: Path, chunks_expected: bool, is_scalar: bool, zarr_format: Literal[2, 3], ) -> None: """For one zarr array, check that all expected metadata and chunk data files exist.""" # TODO: This function is now so complicated that it's practically checking compliance with the whole zarr spec... # TODO: Perhaps it would be better to instead trust that zarr-python is spec-compliant and check `DataTree` against zarr-python? # TODO: The way to do that would ideally be to use zarr-pythons ability to determine how many chunks have been initialized. if zarr_format == 2: zarray_file, zattrs_file = (arr_dir / ".zarray"), (arr_dir / ".zattrs") assert zarray_file.exists() and zarray_file.is_file() assert zattrs_file.exists() and zattrs_file.is_file() chunk_file = arr_dir / "0" if chunks_expected: # assumes empty chunks were written # (i.e. they did not contain only fill_value and write_empty_chunks was False) assert chunk_file.exists() and chunk_file.is_file() else: # either dask array or array of all fill_values assert not chunk_file.exists() elif zarr_format == 3: metadata_file = arr_dir / "zarr.json" assert metadata_file.exists() and metadata_file.is_file() chunks_dir = arr_dir / "c" chunk_file = chunks_dir / "0" if chunks_expected: # assumes empty chunks were written # (i.e. they did not contain only fill_value and write_empty_chunks was False) if is_scalar: # this is the expected behaviour for storing scalars in zarr 3, see https://github.com/pydata/xarray/issues/10147 assert chunks_dir.exists() and chunks_dir.is_file() else: assert chunks_dir.exists() and chunks_dir.is_dir() assert chunk_file.exists() and chunk_file.is_file() else: assert not chunks_dir.exists() assert not chunk_file.exists() DEFAULT_ZARR_FILL_VALUE = 0 # The default value of write_empty_chunks changed from True->False in zarr-python v2->v3 WRITE_EMPTY_CHUNKS_DEFAULT = not has_zarr_v3 for node in original_dt.subtree: # inherited variables aren't meant to be written to zarr local_node_variables = node.to_dataset(inherit=False).variables for name, var in local_node_variables.items(): var_dir = storepath / node.path.removeprefix("/") / name # type: ignore[operator] assert_expected_zarr_files_exist( arr_dir=var_dir, # don't expect dask.Arrays to be written to disk, as compute=False # also don't expect numpy arrays containing only zarr's fill_value to be written to disk chunks_expected=( not isinstance(var.data, da.Array) and ( var.data != DEFAULT_ZARR_FILL_VALUE or WRITE_EMPTY_CHUNKS_DEFAULT ) ), is_scalar=not bool(var.dims), zarr_format=zarr_format, ) in_progress_dt = load_datatree(str(storepath), engine="zarr") assert not in_progress_dt.equals(original_dt) result.compute() written_dt = load_datatree(str(storepath), engine="zarr") assert_identical(written_dt, original_dt) @requires_dask def test_rplus_mode( self, tmp_path: Path, simple_datatree: DataTree, zarr_format: Literal[2, 3] ) -> None: storepath = tmp_path / "test.zarr" original_dt = simple_datatree.chunk() original_dt.to_zarr(storepath, compute=False, zarr_format=zarr_format) original_dt.to_zarr(storepath, mode="r+") with open_datatree(str(storepath), engine="zarr") as written_dt: assert_identical(written_dt, original_dt) @requires_dask def test_to_zarr_no_redundant_computation(self, tmpdir, zarr_format) -> None: import dask.array as da eval_count = 0 def expensive_func(x): nonlocal eval_count eval_count += 1 return x + 1 base = da.random.random((), chunks=()) derived1 = da.map_blocks(expensive_func, base, meta=np.array((), np.float64)) derived2 = derived1 + 1 # depends on derived1 tree = DataTree.from_dict( { "group1": xr.Dataset({"derived": derived1}), "group2": xr.Dataset({"derived": derived2}), } ) filepath = str(tmpdir / "test.zarr") tree.to_zarr(filepath, zarr_format=zarr_format) assert eval_count == 1 # not 2 def test_to_zarr_inherited_coords(self, tmpdir, zarr_format): original_dt = DataTree.from_dict( { "/": xr.Dataset({"a": (("x",), [1, 2])}, coords={"x": [3, 4]}), "/sub": xr.Dataset({"b": (("x",), [5, 6])}), } ) filepath = str(tmpdir / "test.zarr") original_dt.to_zarr(filepath, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) subtree = cast(DataTree, roundtrip_dt["/sub"]) assert "x" not in subtree.to_dataset(inherit=False).coords def test_open_groups_round_trip(self, tmpdir, simple_datatree, zarr_format) -> None: """Test `open_groups` opens a zarr store with the `simple_datatree` structure.""" filepath = str(tmpdir / "test.zarr") original_dt = simple_datatree original_dt.to_zarr(filepath, zarr_format=zarr_format) roundtrip_dict = open_groups(filepath, engine="zarr") roundtrip_dt = DataTree.from_dict(roundtrip_dict) with open_datatree(filepath, engine="zarr") as opened_tree: assert opened_tree.identical(roundtrip_dt) for ds in roundtrip_dict.values(): ds.close() @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) def test_open_datatree_unaligned_hierarchy( self, unaligned_datatree_zarr_factory, zarr_format ) -> None: storepath = unaligned_datatree_zarr_factory(zarr_format=zarr_format) with pytest.raises( ValueError, match=( re.escape("group '/Group2' is not aligned with its parents:") + ".*" ), ): open_datatree(storepath, engine="zarr") @requires_dask def test_open_datatree_chunks(self, tmpdir, zarr_format) -> None: filepath = str(tmpdir / "test.zarr") chunks = {"x": 2, "y": 1} root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": ("y", [-1, 0, 1]), "b": ("x", [-10, 6])}) set2_data = xr.Dataset({"a": ("y", [1, 2, 3]), "b": ("x", [0.1, 0.2])}) original_tree = DataTree.from_dict( { "/": root_data.chunk(chunks), "/group1": set1_data.chunk(chunks), "/group2": set2_data.chunk(chunks), } ) original_tree.to_zarr(filepath, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr", chunks=chunks) as tree: xr.testing.assert_identical(tree, original_tree) assert_chunks_equal(tree, original_tree, enforce_dask=True) # https://github.com/pydata/xarray/issues/10098 # If the open tasks are not give unique tokens per node, and the # dask graph is computed in one go, data won't be uniquely loaded # from each node. xr.testing.assert_identical(tree.compute(), original_tree) @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) def test_open_groups(self, unaligned_datatree_zarr_factory, zarr_format) -> None: """Test `open_groups` with a zarr store of an unaligned group hierarchy.""" storepath = unaligned_datatree_zarr_factory(zarr_format=zarr_format) unaligned_dict_of_datasets = open_groups(storepath, engine="zarr") assert "/" in unaligned_dict_of_datasets.keys() assert "/Group1" in unaligned_dict_of_datasets.keys() assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys() assert "/Group2" in unaligned_dict_of_datasets.keys() # Check that group name returns the correct datasets with xr.open_dataset(storepath, group="/", engine="zarr") as expected: assert_identical(unaligned_dict_of_datasets["/"], expected) with xr.open_dataset(storepath, group="Group1", engine="zarr") as expected: assert_identical(unaligned_dict_of_datasets["/Group1"], expected) with xr.open_dataset( storepath, group="/Group1/subgroup1", engine="zarr" ) as expected: assert_identical(unaligned_dict_of_datasets["/Group1/subgroup1"], expected) with xr.open_dataset(storepath, group="/Group2", engine="zarr") as expected: assert_identical(unaligned_dict_of_datasets["/Group2"], expected) for ds in unaligned_dict_of_datasets.values(): ds.close() @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) @pytest.mark.parametrize("write_consolidated_metadata", [True, False, None]) def test_open_datatree_specific_group( self, tmpdir, simple_datatree, write_consolidated_metadata, zarr_format, ) -> None: """Test opening a specific group within a Zarr store using `open_datatree`.""" filepath = str(tmpdir / "test.zarr") group = "/set2" original_dt = simple_datatree original_dt.to_zarr( filepath, consolidated=write_consolidated_metadata, zarr_format=zarr_format ) expected_subtree = original_dt[group].copy() expected_subtree.orphan() with open_datatree(filepath, group=group, engine=self.engine) as subgroup_tree: assert subgroup_tree.root.parent is None assert_equal(subgroup_tree, expected_subtree) @requires_dask def test_open_groups_chunks(self, tmpdir, zarr_format) -> None: """Test `open_groups` with chunks on a zarr store.""" chunks = {"x": 2, "y": 1} filepath = str(tmpdir / "test.zarr") root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": ("y", [-1, 0, 1]), "b": ("x", [-10, 6])}) set2_data = xr.Dataset({"a": ("y", [1, 2, 3]), "b": ("x", [0.1, 0.2])}) original_tree = DataTree.from_dict( { "/": root_data.chunk(chunks), "/group1": set1_data.chunk(chunks), "/group2": set2_data.chunk(chunks), } ) original_tree.to_zarr(filepath, mode="w", zarr_format=zarr_format) dict_of_datasets = open_groups(filepath, engine="zarr", chunks=chunks) for path, ds in dict_of_datasets.items(): assert {k: max(vs) for k, vs in ds.chunksizes.items()} == chunks, ( f"unexpected chunking for {path}" ) for ds in dict_of_datasets.values(): ds.close() def test_write_subgroup(self, tmpdir, zarr_format) -> None: original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ).children["child"] expected_dt = original_dt.copy() expected_dt.name = None filepath = str(tmpdir / "test.zarr") original_dt.to_zarr(filepath, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) assert_identical(expected_dt, roundtrip_dt) @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) def test_write_inherited_coords_false(self, tmpdir, zarr_format) -> None: original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ) filepath = str(tmpdir / "test.zarr") original_dt.to_zarr( filepath, write_inherited_coords=False, zarr_format=zarr_format ) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_identical(original_dt, roundtrip_dt) expected_child = original_dt.children["child"].copy(inherit=False) expected_child.name = None with open_datatree(filepath, group="child", engine="zarr") as roundtrip_child: assert_identical(expected_child, roundtrip_child) @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) def test_write_inherited_coords_true(self, tmpdir, zarr_format) -> None: original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ) filepath = str(tmpdir / "test.zarr") original_dt.to_zarr( filepath, write_inherited_coords=True, zarr_format=zarr_format ) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_identical(original_dt, roundtrip_dt) expected_child = original_dt.children["child"].copy(inherit=True) expected_child.name = None with open_datatree(filepath, group="child", engine="zarr") as roundtrip_child: assert_identical(expected_child, roundtrip_child) @pytest.mark.xfail( ON_WINDOWS, reason="Permission errors from Zarr: https://github.com/pydata/xarray/pull/10793", ) @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) def test_zarr_engine_recognised(self, tmpdir, zarr_format) -> None: """Test that xarray can guess the zarr backend when the engine is not specified""" original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ) filepath = str(tmpdir / "test.zarr") original_dt.to_zarr( filepath, write_inherited_coords=True, zarr_format=zarr_format ) with open_datatree(filepath) as roundtrip_dt: assert_identical(original_dt, roundtrip_dt) xarray-2025.12.0/xarray/tests/test_backends_file_manager.py000066400000000000000000000175171511464676000237020ustar00rootroot00000000000000from __future__ import annotations import gc import pickle import threading from unittest import mock import pytest from xarray.backends.file_manager import CachingFileManager, PickleableFileManager from xarray.backends.lru_cache import LRUCache from xarray.core.options import set_options from xarray.tests import assert_no_warnings @pytest.fixture(params=[1, 2, 3, None]) def file_cache(request): maxsize = request.param if maxsize is None: yield {} else: yield LRUCache(maxsize) def test_file_manager_mock_write(file_cache) -> None: mock_file = mock.Mock() opener = mock.Mock(spec=open, return_value=mock_file) lock = mock.MagicMock(spec=threading.Lock()) manager = CachingFileManager(opener, "filename", lock=lock, cache=file_cache) f = manager.acquire() f.write("contents") manager.close() assert not file_cache opener.assert_called_once_with("filename") mock_file.write.assert_called_once_with("contents") mock_file.close.assert_called_once_with() lock.__enter__.assert_has_calls([mock.call(), mock.call()]) @pytest.mark.parametrize("warn_for_unclosed_files", [True, False]) def test_file_manager_autoclose(warn_for_unclosed_files) -> None: mock_file = mock.Mock() opener = mock.Mock(return_value=mock_file) cache: dict = {} manager = CachingFileManager(opener, "filename", cache=cache) manager.acquire() assert cache # can no longer use pytest.warns(None) if warn_for_unclosed_files: ctx = pytest.warns(RuntimeWarning) else: ctx = assert_no_warnings() # type: ignore[assignment] with set_options(warn_for_unclosed_files=warn_for_unclosed_files): with ctx: del manager gc.collect() assert not cache mock_file.close.assert_called_once_with() def test_file_manager_autoclose_while_locked() -> None: opener = mock.Mock() lock = threading.Lock() cache: dict = {} manager = CachingFileManager(opener, "filename", lock=lock, cache=cache) manager.acquire() assert cache lock.acquire() with set_options(warn_for_unclosed_files=False): del manager gc.collect() # can't clear the cache while locked, but also don't block in __del__ assert cache def test_file_manager_repr() -> None: opener = mock.Mock() manager = CachingFileManager(opener, "my-file") assert "my-file" in repr(manager) def test_file_manager_cache_and_refcounts() -> None: mock_file = mock.Mock() opener = mock.Mock(spec=open, return_value=mock_file) cache: dict = {} ref_counts: dict = {} manager = CachingFileManager(opener, "filename", cache=cache, ref_counts=ref_counts) assert ref_counts[manager._key] == 1 assert not cache manager.acquire() assert len(cache) == 1 with set_options(warn_for_unclosed_files=False): del manager gc.collect() assert not ref_counts assert not cache def test_file_manager_cache_repeated_open() -> None: mock_file = mock.Mock() opener = mock.Mock(spec=open, return_value=mock_file) cache: dict = {} manager = CachingFileManager(opener, "filename", cache=cache) manager.acquire() assert len(cache) == 1 manager2 = CachingFileManager(opener, "filename", cache=cache) manager2.acquire() assert len(cache) == 2 with set_options(warn_for_unclosed_files=False): del manager gc.collect() assert len(cache) == 1 with set_options(warn_for_unclosed_files=False): del manager2 gc.collect() assert not cache def test_file_manager_cache_with_pickle(tmpdir) -> None: path = str(tmpdir.join("testing.txt")) with open(path, "w") as f: f.write("data") cache: dict = {} with mock.patch("xarray.backends.file_manager.FILE_CACHE", cache): assert not cache manager = CachingFileManager(open, path, mode="r") manager.acquire() assert len(cache) == 1 manager2 = pickle.loads(pickle.dumps(manager)) manager2.acquire() assert len(cache) == 1 with set_options(warn_for_unclosed_files=False): del manager gc.collect() # assert len(cache) == 1 with set_options(warn_for_unclosed_files=False): del manager2 gc.collect() assert not cache def test_file_manager_write_consecutive(tmpdir, file_cache) -> None: path1 = str(tmpdir.join("testing1.txt")) path2 = str(tmpdir.join("testing2.txt")) manager1 = CachingFileManager(open, path1, mode="w", cache=file_cache) manager2 = CachingFileManager(open, path2, mode="w", cache=file_cache) f1a = manager1.acquire() f1a.write("foo") f1a.flush() f2 = manager2.acquire() f2.write("bar") f2.flush() f1b = manager1.acquire() f1b.write("baz") assert (getattr(file_cache, "maxsize", float("inf")) > 1) == (f1a is f1b) manager1.close() manager2.close() with open(path1) as f: assert f.read() == "foobaz" with open(path2) as f: assert f.read() == "bar" def test_file_manager_write_concurrent(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) manager = CachingFileManager(open, path, mode="w", cache=file_cache) f1 = manager.acquire() f2 = manager.acquire() f3 = manager.acquire() assert f1 is f2 assert f2 is f3 f1.write("foo") f1.flush() f2.write("bar") f2.flush() f3.write("baz") f3.flush() manager.close() with open(path) as f: assert f.read() == "foobarbaz" def test_file_manager_write_pickle(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) manager = CachingFileManager(open, path, mode="w", cache=file_cache) f = manager.acquire() f.write("foo") f.flush() manager2 = pickle.loads(pickle.dumps(manager)) f2 = manager2.acquire() f2.write("bar") manager2.close() manager.close() with open(path) as f: assert f.read() == "foobar" def test_file_manager_read(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) with open(path, "w") as f: f.write("foobar") manager = CachingFileManager(open, path, cache=file_cache) f = manager.acquire() assert f.read() == "foobar" manager.close() def test_file_manager_acquire_context(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) with open(path, "w") as f: f.write("foobar") class AcquisitionError(Exception): pass manager = CachingFileManager(open, path, cache=file_cache) with pytest.raises(AcquisitionError): with manager.acquire_context() as f: assert f.read() == "foobar" raise AcquisitionError assert not file_cache # file was *not* already open with manager.acquire_context() as f: assert f.read() == "foobar" with pytest.raises(AcquisitionError): with manager.acquire_context() as f: f.seek(0) assert f.read() == "foobar" raise AcquisitionError assert file_cache # file *was* already open manager.close() def test_pickleable_file_manager_write_pickle(tmpdir) -> None: path = str(tmpdir.join("testing.txt")) manager = PickleableFileManager(open, path, mode="w") f = manager.acquire() f.write("foo") f.flush() manager2 = pickle.loads(pickle.dumps(manager)) f2 = manager2.acquire() f2.write("bar") manager2.close() manager.close() with open(path) as f: assert f.read() == "foobar" def test_pickleable_file_manager_preserves_closed(tmpdir) -> None: path = str(tmpdir.join("testing.txt")) manager = PickleableFileManager(open, path, mode="w") f = manager.acquire() f.write("foo") manager.close() manager2 = pickle.loads(pickle.dumps(manager)) assert manager2._closed assert repr(manager2) == "" xarray-2025.12.0/xarray/tests/test_backends_locks.py000066400000000000000000000005561511464676000223770ustar00rootroot00000000000000from __future__ import annotations import threading from xarray.backends import locks def test_threaded_lock() -> None: lock1 = locks._get_threaded_lock("foo") assert isinstance(lock1, type(threading.Lock())) lock2 = locks._get_threaded_lock("foo") assert lock1 is lock2 lock3 = locks._get_threaded_lock("bar") assert lock1 is not lock3 xarray-2025.12.0/xarray/tests/test_backends_lru_cache.py000066400000000000000000000044061511464676000232070ustar00rootroot00000000000000from __future__ import annotations from typing import Any from unittest import mock import pytest from xarray.backends.lru_cache import LRUCache def test_simple() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=2) cache["x"] = 1 cache["y"] = 2 assert cache["x"] == 1 assert cache["y"] == 2 assert len(cache) == 2 assert dict(cache) == {"x": 1, "y": 2} assert list(cache.keys()) == ["x", "y"] assert list(cache.items()) == [("x", 1), ("y", 2)] cache["z"] = 3 assert len(cache) == 2 assert list(cache.items()) == [("y", 2), ("z", 3)] def test_trivial() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=0) cache["x"] = 1 assert len(cache) == 0 def test_invalid() -> None: with pytest.raises(TypeError): LRUCache(maxsize=None) # type: ignore[arg-type] with pytest.raises(ValueError): LRUCache(maxsize=-1) def test_update_priority() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=2) cache["x"] = 1 cache["y"] = 2 assert list(cache) == ["x", "y"] assert "x" in cache # contains assert list(cache) == ["y", "x"] assert cache["y"] == 2 # getitem assert list(cache) == ["x", "y"] cache["x"] = 3 # setitem assert list(cache.items()) == [("y", 2), ("x", 3)] def test_del() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=2) cache["x"] = 1 cache["y"] = 2 del cache["x"] assert dict(cache) == {"y": 2} def test_on_evict() -> None: on_evict = mock.Mock() cache = LRUCache(maxsize=1, on_evict=on_evict) cache["x"] = 1 cache["y"] = 2 on_evict.assert_called_once_with("x", 1) def test_on_evict_trivial() -> None: on_evict = mock.Mock() cache = LRUCache(maxsize=0, on_evict=on_evict) cache["x"] = 1 on_evict.assert_called_once_with("x", 1) def test_resize() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=2) assert cache.maxsize == 2 cache["w"] = 0 cache["x"] = 1 cache["y"] = 2 assert list(cache.items()) == [("x", 1), ("y", 2)] cache.maxsize = 10 cache["z"] = 3 assert list(cache.items()) == [("x", 1), ("y", 2), ("z", 3)] cache.maxsize = 1 assert list(cache.items()) == [("z", 3)] with pytest.raises(ValueError): cache.maxsize = -1 xarray-2025.12.0/xarray/tests/test_calendar_ops.py000066400000000000000000000246341511464676000220670ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pandas as pd import pytest from xarray import CFTimeIndex, DataArray, Dataset, infer_freq from xarray.coding.calendar_ops import convert_calendar, interp_calendar from xarray.coding.cftime_offsets import date_range from xarray.testing import assert_identical from xarray.tests import requires_cftime cftime = pytest.importorskip("cftime") @pytest.mark.parametrize( "source, target, use_cftime, freq", [ ("standard", "noleap", None, "D"), ("noleap", "proleptic_gregorian", True, "D"), ("noleap", "all_leap", None, "D"), ("all_leap", "proleptic_gregorian", False, "4h"), ], ) def test_convert_calendar(source, target, use_cftime, freq): src = DataArray( date_range("2004-01-01", "2004-12-31", freq=freq, calendar=source), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ) conv = convert_calendar(da_src, target, use_cftime=use_cftime) assert conv.time.dt.calendar == target if source != "noleap": expected_times = date_range( "2004-01-01", "2004-12-31", freq=freq, use_cftime=use_cftime, calendar=target, ) else: expected_times_pre_leap = date_range( "2004-01-01", "2004-02-28", freq=freq, use_cftime=use_cftime, calendar=target, ) expected_times_post_leap = date_range( "2004-03-01", "2004-12-31", freq=freq, use_cftime=use_cftime, calendar=target, ) expected_times = expected_times_pre_leap.append(expected_times_post_leap) np.testing.assert_array_equal(conv.time, expected_times) def test_convert_calendar_dataset(): # Check that variables without a time dimension are not modified src = DataArray( date_range("2004-01-01", "2004-12-31", freq="D", calendar="standard"), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ).expand_dims(lat=[0, 1]) ds_src = Dataset({"hastime": da_src, "notime": (("lat",), [0, 1])}) conv = convert_calendar(ds_src, "360_day", align_on="date") assert conv.time.dt.calendar == "360_day" assert_identical(ds_src.notime, conv.notime) @pytest.mark.parametrize( "source,target,freq", [ ("standard", "360_day", "D"), ("360_day", "proleptic_gregorian", "D"), ("proleptic_gregorian", "360_day", "4h"), ], ) @pytest.mark.parametrize("align_on", ["date", "year"]) def test_convert_calendar_360_days(source, target, freq, align_on): src = DataArray( date_range("2004-01-01", "2004-12-30", freq=freq, calendar=source), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ) conv = convert_calendar(da_src, target, align_on=align_on) assert conv.time.dt.calendar == target if align_on == "date": np.testing.assert_array_equal( conv.time.resample(time="ME").last().dt.day, [30, 29, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30], ) elif target == "360_day": np.testing.assert_array_equal( conv.time.resample(time="ME").last().dt.day, [30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 29], ) else: np.testing.assert_array_equal( conv.time.resample(time="ME").last().dt.day, [30, 29, 30, 30, 31, 30, 30, 31, 30, 31, 29, 31], ) if source == "360_day" and align_on == "year": assert conv.size == 360 if freq == "D" else 360 * 4 else: assert conv.size == 359 if freq == "D" else 359 * 4 def test_convert_calendar_360_days_random(): da_std = DataArray( np.linspace(0, 1, 366), dims=("time",), coords={ "time": date_range( "2004-01-01", "2004-12-31", freq="D", calendar="standard", use_cftime=False, ) }, ) da_360 = DataArray( np.linspace(0, 1, 360), dims=("time",), coords={ "time": date_range("2004-01-01", "2004-12-30", freq="D", calendar="360_day") }, ) conv = convert_calendar(da_std, "360_day", align_on="random") conv2 = convert_calendar(da_std, "360_day", align_on="random") assert (conv != conv2).any() conv = convert_calendar(da_360, "standard", use_cftime=False, align_on="random") assert np.datetime64("2004-02-29") not in conv.time conv2 = convert_calendar(da_360, "standard", use_cftime=False, align_on="random") assert (conv2 != conv).any() # Ensure that added days are evenly distributed in the 5 fifths of each year conv = convert_calendar(da_360, "noleap", align_on="random", missing=np.nan) conv = conv.where(conv.isnull(), drop=True) nandoys = conv.time.dt.dayofyear[:366] assert all(nandoys < np.array([74, 147, 220, 293, 366])) assert all(nandoys > np.array([0, 73, 146, 219, 292])) @requires_cftime @pytest.mark.parametrize( "source,target,freq", [ ("standard", "noleap", "D"), ("noleap", "proleptic_gregorian", "4h"), ("noleap", "all_leap", "ME"), ("360_day", "noleap", "D"), ("noleap", "360_day", "D"), ], ) def test_convert_calendar_missing(source, target, freq): src = DataArray( date_range( "2004-01-01", "2004-12-31" if source != "360_day" else "2004-12-30", freq=freq, calendar=source, ), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ) out = convert_calendar(da_src, target, missing=np.nan, align_on="date") expected_freq = freq assert infer_freq(out.time) == expected_freq expected = date_range( "2004-01-01", "2004-12-31" if target != "360_day" else "2004-12-30", freq=freq, calendar=target, ) np.testing.assert_array_equal(out.time, expected) if freq != "ME": out_without_missing = convert_calendar(da_src, target, align_on="date") expected_nan = out.isel(time=~out.time.isin(out_without_missing.time)) assert expected_nan.isnull().all() expected_not_nan = out.sel(time=out_without_missing.time) assert_identical(expected_not_nan, out_without_missing) @requires_cftime def test_convert_calendar_errors(): src_nl = DataArray( date_range("0000-01-01", "0000-12-31", freq="D", calendar="noleap"), dims=("time",), name="time", ) # no align_on for conversion to 360_day with pytest.raises(ValueError, match="Argument `align_on` must be specified"): convert_calendar(src_nl, "360_day") # Standard doesn't support year 0 with pytest.raises( ValueError, match="Source time coordinate contains dates with year 0" ): convert_calendar(src_nl, "standard") # no align_on for conversion from 360 day src_360 = convert_calendar(src_nl, "360_day", align_on="year") with pytest.raises(ValueError, match="Argument `align_on` must be specified"): convert_calendar(src_360, "noleap") # Datetime objects da = DataArray([0, 1, 2], dims=("x",), name="x") with pytest.raises( ValueError, match=r"Coordinate x must contain datetime objects." ): convert_calendar(da, "standard", dim="x") def test_convert_calendar_dimension_name(): src = DataArray( date_range("2004-01-01", "2004-01-31", freq="D", calendar="noleap"), dims=("date",), name="date", ) out = convert_calendar(src, "proleptic_gregorian", dim="date") np.testing.assert_array_equal(src, out) def test_convert_calendar_same_calendar(): src = DataArray( date_range("2000-01-01", periods=12, freq="6h", use_cftime=False), dims=("time",), name="time", ) out = convert_calendar(src, "proleptic_gregorian") assert src is out @pytest.mark.parametrize( "source,target", [ ("standard", "noleap"), ("noleap", "proleptic_gregorian"), ("standard", "360_day"), ("360_day", "proleptic_gregorian"), ("noleap", "all_leap"), ("360_day", "noleap"), ], ) def test_interp_calendar(source, target): src = DataArray( date_range("2004-01-01", "2004-07-30", freq="D", calendar=source), dims=("time",), name="time", ) tgt = DataArray( date_range("2004-01-01", "2004-07-30", freq="D", calendar=target), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ) conv = interp_calendar(da_src, tgt) assert_identical(tgt.time, conv.time) np.testing.assert_almost_equal(conv.max(), 1, 2) assert conv.min() == 0 @requires_cftime def test_interp_calendar_errors(): src_nl = DataArray( [1] * 100, dims=("time",), coords={ "time": date_range("0000-01-01", periods=100, freq="MS", calendar="noleap") }, ) tgt_360 = date_range("0001-01-01", "0001-12-30", freq="MS", calendar="standard") with pytest.raises( ValueError, match="Source time coordinate contains dates with year 0" ): interp_calendar(src_nl, tgt_360) da1 = DataArray([0, 1, 2], dims=("x",), name="x") da2 = da1 + 1 with pytest.raises( ValueError, match=r"Both 'source.x' and 'target' must contain datetime objects." ): interp_calendar(da1, da2, dim="x") @requires_cftime @pytest.mark.parametrize( ("source_calendar", "target_calendar", "expected_index"), [("standard", "noleap", CFTimeIndex), ("all_leap", "standard", pd.DatetimeIndex)], ) def test_convert_calendar_produces_time_index( source_calendar, target_calendar, expected_index ): # https://github.com/pydata/xarray/issues/9138 time = date_range("2000-01-01", "2002-01-01", freq="D", calendar=source_calendar) temperature = np.ones(len(time)) da = DataArray( data=temperature, dims=["time"], coords=dict( time=time, ), ) converted = da.convert_calendar(target_calendar) assert isinstance(converted.indexes["time"], expected_index) xarray-2025.12.0/xarray/tests/test_cftime_offsets.py000066400000000000000000001540231511464676000224310ustar00rootroot00000000000000from __future__ import annotations import warnings from itertools import product, starmap from typing import TYPE_CHECKING, Literal import numpy as np import pandas as pd import pytest from xarray import CFTimeIndex from xarray.coding.cftime_offsets import ( _MONTH_ABBREVIATIONS, BaseCFTimeOffset, Day, Hour, Microsecond, Millisecond, Minute, MonthBegin, MonthEnd, QuarterBegin, QuarterEnd, Second, Tick, YearBegin, YearEnd, _legacy_to_new_freq, _new_to_legacy_freq, cftime_range, date_range, date_range_like, get_date_type, to_cftime_datetime, to_offset, ) from xarray.coding.frequencies import infer_freq from xarray.core.dataarray import DataArray from xarray.tests import ( _CFTIME_CALENDARS, assert_no_warnings, has_cftime, has_pandas_ge_2_2, requires_cftime, requires_pandas_3, ) cftime = pytest.importorskip("cftime") def _id_func(param): """Called on each parameter passed to pytest.mark.parametrize""" return str(param) @pytest.fixture(params=_CFTIME_CALENDARS) def calendar(request): return request.param @pytest.mark.parametrize( ("offset", "expected_n"), [ (BaseCFTimeOffset(), 1), (YearBegin(), 1), (YearEnd(), 1), (QuarterBegin(), 1), (QuarterEnd(), 1), (Tick(), 1), (Day(), 1), (Hour(), 1), (Minute(), 1), (Second(), 1), (Millisecond(), 1), (Microsecond(), 1), (BaseCFTimeOffset(n=2), 2), (YearBegin(n=2), 2), (YearEnd(n=2), 2), (QuarterBegin(n=2), 2), (QuarterEnd(n=2), 2), (Tick(n=2), 2), (Day(n=2), 2), (Hour(n=2), 2), (Minute(n=2), 2), (Second(n=2), 2), (Millisecond(n=2), 2), (Microsecond(n=2), 2), ], ids=_id_func, ) def test_cftime_offset_constructor_valid_n(offset, expected_n): assert offset.n == expected_n @pytest.mark.parametrize( ("offset", "invalid_n"), [ (BaseCFTimeOffset, 1.5), (YearBegin, 1.5), (YearEnd, 1.5), (QuarterBegin, 1.5), (QuarterEnd, 1.5), (MonthBegin, 1.5), (MonthEnd, 1.5), (Tick, 1.5), (Day, 1.5), (Hour, 1.5), (Minute, 1.5), (Second, 1.5), (Millisecond, 1.5), (Microsecond, 1.5), ], ids=_id_func, ) def test_cftime_offset_constructor_invalid_n(offset, invalid_n): with pytest.raises(TypeError): offset(n=invalid_n) @pytest.mark.parametrize( ("offset", "expected_month"), [ (YearBegin(), 1), (YearEnd(), 12), (YearBegin(month=5), 5), (YearEnd(month=5), 5), (QuarterBegin(), 3), (QuarterEnd(), 3), (QuarterBegin(month=5), 5), (QuarterEnd(month=5), 5), ], ids=_id_func, ) def test_year_offset_constructor_valid_month(offset, expected_month): assert offset.month == expected_month @pytest.mark.parametrize( ("offset", "invalid_month", "exception"), [ (YearBegin, 0, ValueError), (YearEnd, 0, ValueError), (YearBegin, 13, ValueError), (YearEnd, 13, ValueError), (YearBegin, 1.5, TypeError), (YearEnd, 1.5, TypeError), (QuarterBegin, 0, ValueError), (QuarterEnd, 0, ValueError), (QuarterBegin, 1.5, TypeError), (QuarterEnd, 1.5, TypeError), (QuarterBegin, 13, ValueError), (QuarterEnd, 13, ValueError), ], ids=_id_func, ) def test_year_offset_constructor_invalid_month(offset, invalid_month, exception): with pytest.raises(exception): offset(month=invalid_month) @pytest.mark.parametrize( ("offset", "expected"), [ (BaseCFTimeOffset(), None), (MonthBegin(), "MS"), (MonthEnd(), "ME"), (YearBegin(), "YS-JAN"), (YearEnd(), "YE-DEC"), (QuarterBegin(), "QS-MAR"), (QuarterEnd(), "QE-MAR"), (Day(), "D"), (Hour(), "h"), (Minute(), "min"), (Second(), "s"), (Millisecond(), "ms"), (Microsecond(), "us"), ], ids=_id_func, ) def test_rule_code(offset, expected): assert offset.rule_code() == expected @pytest.mark.parametrize( ("offset", "expected"), [ (BaseCFTimeOffset(), ""), (YearBegin(), ""), (QuarterBegin(), ""), ], ids=_id_func, ) def test_str_and_repr(offset, expected): assert str(offset) == expected assert repr(offset) == expected @pytest.mark.parametrize( "offset", [BaseCFTimeOffset(), MonthBegin(), QuarterBegin(), YearBegin()], ids=_id_func, ) def test_to_offset_offset_input(offset): assert to_offset(offset) == offset @pytest.mark.parametrize( ("freq", "expected"), [ ("M", MonthEnd()), ("2M", MonthEnd(n=2)), ("ME", MonthEnd()), ("2ME", MonthEnd(n=2)), ("MS", MonthBegin()), ("2MS", MonthBegin(n=2)), ("D", Day()), ("2D", Day(n=2)), ("H", Hour()), ("2H", Hour(n=2)), ("h", Hour()), ("2h", Hour(n=2)), ("T", Minute()), ("2T", Minute(n=2)), ("min", Minute()), ("2min", Minute(n=2)), ("S", Second()), ("2S", Second(n=2)), ("L", Millisecond(n=1)), ("2L", Millisecond(n=2)), ("ms", Millisecond(n=1)), ("2ms", Millisecond(n=2)), ("U", Microsecond(n=1)), ("2U", Microsecond(n=2)), ("us", Microsecond(n=1)), ("2us", Microsecond(n=2)), # negative ("-2M", MonthEnd(n=-2)), ("-2ME", MonthEnd(n=-2)), ("-2MS", MonthBegin(n=-2)), ("-2D", Day(n=-2)), ("-2H", Hour(n=-2)), ("-2h", Hour(n=-2)), ("-2T", Minute(n=-2)), ("-2min", Minute(n=-2)), ("-2S", Second(n=-2)), ("-2L", Millisecond(n=-2)), ("-2ms", Millisecond(n=-2)), ("-2U", Microsecond(n=-2)), ("-2us", Microsecond(n=-2)), ], ids=_id_func, ) @pytest.mark.filterwarnings("ignore::FutureWarning") # Deprecation of "M" etc. def test_to_offset_sub_annual(freq, expected): assert to_offset(freq) == expected _ANNUAL_OFFSET_TYPES = { "A": YearEnd, "AS": YearBegin, "Y": YearEnd, "YS": YearBegin, "YE": YearEnd, } @pytest.mark.parametrize( ("month_int", "month_label"), list(_MONTH_ABBREVIATIONS.items()) + [(0, "")] ) @pytest.mark.parametrize("multiple", [None, 2, -1]) @pytest.mark.parametrize("offset_str", ["AS", "A", "YS", "Y"]) @pytest.mark.filterwarnings("ignore::FutureWarning") # Deprecation of "A" etc. def test_to_offset_annual(month_label, month_int, multiple, offset_str): freq = offset_str offset_type = _ANNUAL_OFFSET_TYPES[offset_str] if month_label: freq = f"{freq}-{month_label}" if multiple: freq = f"{multiple}{freq}" result = to_offset(freq) if multiple and month_int: expected = offset_type(n=multiple, month=month_int) elif multiple: expected = offset_type(n=multiple) elif month_int: expected = offset_type(month=month_int) else: expected = offset_type() assert result == expected _QUARTER_OFFSET_TYPES = {"Q": QuarterEnd, "QS": QuarterBegin, "QE": QuarterEnd} @pytest.mark.parametrize( ("month_int", "month_label"), list(_MONTH_ABBREVIATIONS.items()) + [(0, "")] ) @pytest.mark.parametrize("multiple", [None, 2, -1]) @pytest.mark.parametrize("offset_str", ["QS", "Q", "QE"]) @pytest.mark.filterwarnings("ignore::FutureWarning") # Deprecation of "Q" etc. def test_to_offset_quarter(month_label, month_int, multiple, offset_str): freq = offset_str offset_type = _QUARTER_OFFSET_TYPES[offset_str] if month_label: freq = f"{freq}-{month_label}" if multiple: freq = f"{multiple}{freq}" result = to_offset(freq) if multiple and month_int: expected = offset_type(n=multiple, month=month_int) elif multiple: if month_int: expected = offset_type(n=multiple) elif offset_type == QuarterBegin: expected = offset_type(n=multiple, month=1) elif offset_type == QuarterEnd: expected = offset_type(n=multiple, month=12) elif month_int: expected = offset_type(month=month_int) elif offset_type == QuarterBegin: expected = offset_type(month=1) elif offset_type == QuarterEnd: expected = offset_type(month=12) assert result == expected @pytest.mark.parametrize("freq", ["Z", "7min2", "AM", "M-", "AS-", "QS-", "1H1min"]) def test_invalid_to_offset_str(freq): with pytest.raises(ValueError): to_offset(freq) @pytest.mark.parametrize( ("argument", "expected_date_args"), [("2000-01-01", (2000, 1, 1)), ((2000, 1, 1), (2000, 1, 1))], ids=_id_func, ) def test_to_cftime_datetime(calendar, argument, expected_date_args): date_type = get_date_type(calendar) expected = date_type(*expected_date_args) if isinstance(argument, tuple): argument = date_type(*argument) result = to_cftime_datetime(argument, calendar=calendar) assert result == expected def test_to_cftime_datetime_error_no_calendar(): with pytest.raises(ValueError): to_cftime_datetime("2000") def test_to_cftime_datetime_error_type_error(): with pytest.raises(TypeError): to_cftime_datetime(1) _EQ_TESTS_A = [ BaseCFTimeOffset(), YearBegin(), YearEnd(), YearBegin(month=2), YearEnd(month=2), QuarterBegin(), QuarterEnd(), QuarterBegin(month=2), QuarterEnd(month=2), MonthBegin(), MonthEnd(), Day(), Hour(), Minute(), Second(), Millisecond(), Microsecond(), ] _EQ_TESTS_B = [ BaseCFTimeOffset(n=2), YearBegin(n=2), YearEnd(n=2), YearBegin(n=2, month=2), YearEnd(n=2, month=2), QuarterBegin(n=2), QuarterEnd(n=2), QuarterBegin(n=2, month=2), QuarterEnd(n=2, month=2), MonthBegin(n=2), MonthEnd(n=2), Day(n=2), Hour(n=2), Minute(n=2), Second(n=2), Millisecond(n=2), Microsecond(n=2), ] @pytest.mark.parametrize(("a", "b"), product(_EQ_TESTS_A, _EQ_TESTS_B), ids=_id_func) def test_neq(a, b): assert a != b _EQ_TESTS_B_COPY = [ BaseCFTimeOffset(n=2), YearBegin(n=2), YearEnd(n=2), YearBegin(n=2, month=2), YearEnd(n=2, month=2), QuarterBegin(n=2), QuarterEnd(n=2), QuarterBegin(n=2, month=2), QuarterEnd(n=2, month=2), MonthBegin(n=2), MonthEnd(n=2), Day(n=2), Hour(n=2), Minute(n=2), Second(n=2), Millisecond(n=2), Microsecond(n=2), ] @pytest.mark.parametrize( ("a", "b"), zip(_EQ_TESTS_B, _EQ_TESTS_B_COPY, strict=True), ids=_id_func ) def test_eq(a, b): assert a == b _MUL_TESTS = [ (BaseCFTimeOffset(), 3, BaseCFTimeOffset(n=3)), (BaseCFTimeOffset(), -3, BaseCFTimeOffset(n=-3)), (YearEnd(), 3, YearEnd(n=3)), (YearBegin(), 3, YearBegin(n=3)), (QuarterEnd(), 3, QuarterEnd(n=3)), (QuarterBegin(), 3, QuarterBegin(n=3)), (MonthEnd(), 3, MonthEnd(n=3)), (MonthBegin(), 3, MonthBegin(n=3)), (Tick(), 3, Tick(n=3)), (Day(), 3, Day(n=3)), (Hour(), 3, Hour(n=3)), (Minute(), 3, Minute(n=3)), (Second(), 3, Second(n=3)), (Millisecond(), 3, Millisecond(n=3)), (Microsecond(), 3, Microsecond(n=3)), (Hour(), 0.5, Minute(n=30)), (Hour(), -0.5, Minute(n=-30)), (Minute(), 0.5, Second(n=30)), (Second(), 0.5, Millisecond(n=500)), (Millisecond(), 0.5, Microsecond(n=500)), ] @pytest.mark.parametrize(("offset", "multiple", "expected"), _MUL_TESTS, ids=_id_func) def test_mul(offset, multiple, expected): assert offset * multiple == expected @pytest.mark.parametrize(("offset", "multiple", "expected"), _MUL_TESTS, ids=_id_func) def test_rmul(offset, multiple, expected): assert multiple * offset == expected def test_mul_float_multiple_next_higher_resolution(): """Test more than one iteration through _next_higher_resolution is required.""" assert 1e-6 * Second() == Microsecond() assert 1e-6 / 60 * Minute() == Microsecond() @pytest.mark.parametrize( "offset", [ YearBegin(), YearEnd(), QuarterBegin(), QuarterEnd(), MonthBegin(), MonthEnd(), Day(), ], ids=_id_func, ) def test_nonTick_offset_multiplied_float_error(offset): """Test that the appropriate error is raised if a non-Tick offset is multiplied by a float.""" with pytest.raises(TypeError, match="unsupported operand type"): offset * 0.5 def test_Microsecond_multiplied_float_error(): """Test that the appropriate error is raised if a Tick offset is multiplied by a float which causes it not to be representable by a microsecond-precision timedelta.""" with pytest.raises( ValueError, match="Could not convert to integer offset at any resolution" ): Microsecond() * 0.5 @pytest.mark.parametrize( ("offset", "expected"), [ (BaseCFTimeOffset(), BaseCFTimeOffset(n=-1)), (YearEnd(), YearEnd(n=-1)), (YearBegin(), YearBegin(n=-1)), (QuarterEnd(), QuarterEnd(n=-1)), (QuarterBegin(), QuarterBegin(n=-1)), (MonthEnd(), MonthEnd(n=-1)), (MonthBegin(), MonthBegin(n=-1)), (Day(), Day(n=-1)), (Hour(), Hour(n=-1)), (Minute(), Minute(n=-1)), (Second(), Second(n=-1)), (Millisecond(), Millisecond(n=-1)), (Microsecond(), Microsecond(n=-1)), ], ids=_id_func, ) def test_neg(offset: BaseCFTimeOffset, expected: BaseCFTimeOffset) -> None: assert -offset == expected _ADD_TESTS = [ (Day(n=2), (1, 1, 3)), (Hour(n=2), (1, 1, 1, 2)), (Minute(n=2), (1, 1, 1, 0, 2)), (Second(n=2), (1, 1, 1, 0, 0, 2)), (Millisecond(n=2), (1, 1, 1, 0, 0, 0, 2000)), (Microsecond(n=2), (1, 1, 1, 0, 0, 0, 2)), ] @pytest.mark.parametrize(("offset", "expected_date_args"), _ADD_TESTS, ids=_id_func) def test_add_sub_monthly(offset, expected_date_args, calendar): date_type = get_date_type(calendar) initial = date_type(1, 1, 1) expected = date_type(*expected_date_args) result = offset + initial assert result == expected def test_add_daily_offsets() -> None: offset = Day(n=2) expected = Day(n=4) result = offset + offset assert result == expected def test_subtract_daily_offsets() -> None: offset = Day(n=2) expected = Day(n=0) result = offset - offset assert result == expected @pytest.mark.parametrize(("offset", "expected_date_args"), _ADD_TESTS, ids=_id_func) def test_radd_sub_monthly(offset, expected_date_args, calendar): date_type = get_date_type(calendar) initial = date_type(1, 1, 1) expected = date_type(*expected_date_args) result = initial + offset assert result == expected @pytest.mark.parametrize( ("offset", "expected_date_args"), [ (Day(n=2), (1, 1, 1)), (Hour(n=2), (1, 1, 2, 22)), (Minute(n=2), (1, 1, 2, 23, 58)), (Second(n=2), (1, 1, 2, 23, 59, 58)), (Millisecond(n=2), (1, 1, 2, 23, 59, 59, 998000)), (Microsecond(n=2), (1, 1, 2, 23, 59, 59, 999998)), ], ids=_id_func, ) def test_rsub_sub_monthly(offset, expected_date_args, calendar): date_type = get_date_type(calendar) initial = date_type(1, 1, 3) expected = date_type(*expected_date_args) result = initial - offset assert result == expected @pytest.mark.parametrize("offset", _EQ_TESTS_A, ids=_id_func) def test_sub_error(offset, calendar): date_type = get_date_type(calendar) initial = date_type(1, 1, 1) with pytest.raises(TypeError): offset - initial @pytest.mark.parametrize( ("a", "b"), zip(_EQ_TESTS_A, _EQ_TESTS_B, strict=True), ids=_id_func ) def test_minus_offset(a, b): result = b - a expected = a assert result == expected @pytest.mark.parametrize( ("a", "b"), list(zip(np.roll(_EQ_TESTS_A, 1), _EQ_TESTS_B, strict=True)) # type: ignore[arg-type] + [(YearEnd(month=1), YearEnd(month=2))], ids=_id_func, ) def test_minus_offset_error(a, b): with pytest.raises(TypeError): b - a @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_date_args"), [ ((1, 1, 1), MonthBegin(), (1, 2, 1)), ((1, 1, 1), MonthBegin(n=2), (1, 3, 1)), ((1, 1, 7), MonthBegin(), (1, 2, 1)), ((1, 1, 7), MonthBegin(n=2), (1, 3, 1)), ((1, 3, 1), MonthBegin(n=-1), (1, 2, 1)), ((1, 3, 1), MonthBegin(n=-2), (1, 1, 1)), ((1, 3, 3), MonthBegin(n=-1), (1, 3, 1)), ((1, 3, 3), MonthBegin(n=-2), (1, 2, 1)), ((1, 2, 1), MonthBegin(n=14), (2, 4, 1)), ((2, 4, 1), MonthBegin(n=-14), (1, 2, 1)), ((1, 1, 1, 5, 5, 5, 5), MonthBegin(), (1, 2, 1, 5, 5, 5, 5)), ((1, 1, 3, 5, 5, 5, 5), MonthBegin(), (1, 2, 1, 5, 5, 5, 5)), ((1, 1, 3, 5, 5, 5, 5), MonthBegin(n=-1), (1, 1, 1, 5, 5, 5, 5)), ], ids=_id_func, ) def test_add_month_begin(calendar, initial_date_args, offset, expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_year_month", "expected_sub_day"), [ ((1, 1, 1), MonthEnd(), (1, 1), ()), ((1, 1, 1), MonthEnd(n=2), (1, 2), ()), ((1, 3, 1), MonthEnd(n=-1), (1, 2), ()), ((1, 3, 1), MonthEnd(n=-2), (1, 1), ()), ((1, 2, 1), MonthEnd(n=14), (2, 3), ()), ((2, 4, 1), MonthEnd(n=-14), (1, 2), ()), ((1, 1, 1, 5, 5, 5, 5), MonthEnd(), (1, 1), (5, 5, 5, 5)), ((1, 2, 1, 5, 5, 5, 5), MonthEnd(n=-1), (1, 1), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_month_end( calendar, initial_date_args, offset, expected_year_month, expected_sub_day ): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ( "initial_year_month", "initial_sub_day", "offset", "expected_year_month", "expected_sub_day", ), [ ((1, 1), (), MonthEnd(), (1, 2), ()), ((1, 1), (), MonthEnd(n=2), (1, 3), ()), ((1, 3), (), MonthEnd(n=-1), (1, 2), ()), ((1, 3), (), MonthEnd(n=-2), (1, 1), ()), ((1, 2), (), MonthEnd(n=14), (2, 4), ()), ((2, 4), (), MonthEnd(n=-14), (1, 2), ()), ((1, 1), (5, 5, 5, 5), MonthEnd(), (1, 2), (5, 5, 5, 5)), ((1, 2), (5, 5, 5, 5), MonthEnd(n=-1), (1, 1), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_month_end_onOffset( calendar, initial_year_month, initial_sub_day, offset, expected_year_month, expected_sub_day, ): date_type = get_date_type(calendar) reference_args = initial_year_month + (1,) reference = date_type(*reference_args) initial_date_args = initial_year_month + (reference.daysinmonth,) + initial_sub_day initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_date_args"), [ ((1, 1, 1), YearBegin(), (2, 1, 1)), ((1, 1, 1), YearBegin(n=2), (3, 1, 1)), ((1, 1, 1), YearBegin(month=2), (1, 2, 1)), ((1, 1, 7), YearBegin(n=2), (3, 1, 1)), ((2, 2, 1), YearBegin(n=-1), (2, 1, 1)), ((1, 1, 2), YearBegin(n=-1), (1, 1, 1)), ((1, 1, 1, 5, 5, 5, 5), YearBegin(), (2, 1, 1, 5, 5, 5, 5)), ((2, 1, 1, 5, 5, 5, 5), YearBegin(n=-1), (1, 1, 1, 5, 5, 5, 5)), ], ids=_id_func, ) def test_add_year_begin(calendar, initial_date_args, offset, expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_year_month", "expected_sub_day"), [ ((1, 1, 1), YearEnd(), (1, 12), ()), ((1, 1, 1), YearEnd(n=2), (2, 12), ()), ((1, 1, 1), YearEnd(month=1), (1, 1), ()), ((2, 3, 1), YearEnd(n=-1), (1, 12), ()), ((1, 3, 1), YearEnd(n=-1, month=2), (1, 2), ()), ((1, 1, 1, 5, 5, 5, 5), YearEnd(), (1, 12), (5, 5, 5, 5)), ((1, 1, 1, 5, 5, 5, 5), YearEnd(n=2), (2, 12), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_year_end( calendar, initial_date_args, offset, expected_year_month, expected_sub_day ): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ( "initial_year_month", "initial_sub_day", "offset", "expected_year_month", "expected_sub_day", ), [ ((1, 12), (), YearEnd(), (2, 12), ()), ((1, 12), (), YearEnd(n=2), (3, 12), ()), ((2, 12), (), YearEnd(n=-1), (1, 12), ()), ((3, 12), (), YearEnd(n=-2), (1, 12), ()), ((1, 1), (), YearEnd(month=2), (1, 2), ()), ((1, 12), (5, 5, 5, 5), YearEnd(), (2, 12), (5, 5, 5, 5)), ((2, 12), (5, 5, 5, 5), YearEnd(n=-1), (1, 12), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_year_end_onOffset( calendar, initial_year_month, initial_sub_day, offset, expected_year_month, expected_sub_day, ): date_type = get_date_type(calendar) reference_args = initial_year_month + (1,) reference = date_type(*reference_args) initial_date_args = initial_year_month + (reference.daysinmonth,) + initial_sub_day initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_date_args"), [ ((1, 1, 1), QuarterBegin(), (1, 3, 1)), ((1, 1, 1), QuarterBegin(n=2), (1, 6, 1)), ((1, 1, 1), QuarterBegin(month=2), (1, 2, 1)), ((1, 1, 7), QuarterBegin(n=2), (1, 6, 1)), ((2, 2, 1), QuarterBegin(n=-1), (1, 12, 1)), ((1, 3, 2), QuarterBegin(n=-1), (1, 3, 1)), ((1, 1, 1, 5, 5, 5, 5), QuarterBegin(), (1, 3, 1, 5, 5, 5, 5)), ((2, 1, 1, 5, 5, 5, 5), QuarterBegin(n=-1), (1, 12, 1, 5, 5, 5, 5)), ], ids=_id_func, ) def test_add_quarter_begin(calendar, initial_date_args, offset, expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_year_month", "expected_sub_day"), [ ((1, 1, 1), QuarterEnd(), (1, 3), ()), ((1, 1, 1), QuarterEnd(n=2), (1, 6), ()), ((1, 1, 1), QuarterEnd(month=1), (1, 1), ()), ((2, 3, 1), QuarterEnd(n=-1), (1, 12), ()), ((1, 3, 1), QuarterEnd(n=-1, month=2), (1, 2), ()), ((1, 1, 1, 5, 5, 5, 5), QuarterEnd(), (1, 3), (5, 5, 5, 5)), ((1, 1, 1, 5, 5, 5, 5), QuarterEnd(n=2), (1, 6), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_quarter_end( calendar, initial_date_args, offset, expected_year_month, expected_sub_day ): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ( "initial_year_month", "initial_sub_day", "offset", "expected_year_month", "expected_sub_day", ), [ ((1, 12), (), QuarterEnd(), (2, 3), ()), ((1, 12), (), QuarterEnd(n=2), (2, 6), ()), ((1, 12), (), QuarterEnd(n=-1), (1, 9), ()), ((1, 12), (), QuarterEnd(n=-2), (1, 6), ()), ((1, 1), (), QuarterEnd(month=2), (1, 2), ()), ((1, 12), (5, 5, 5, 5), QuarterEnd(), (2, 3), (5, 5, 5, 5)), ((1, 12), (5, 5, 5, 5), QuarterEnd(n=-1), (1, 9), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_quarter_end_onOffset( calendar, initial_year_month, initial_sub_day, offset, expected_year_month, expected_sub_day, ): date_type = get_date_type(calendar) reference_args = initial_year_month + (1,) reference = date_type(*reference_args) initial_date_args = initial_year_month + (reference.daysinmonth,) + initial_sub_day initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected # Note for all sub-monthly offsets, pandas always returns True for onOffset @pytest.mark.parametrize( ("date_args", "offset", "expected"), [ ((1, 1, 1), MonthBegin(), True), ((1, 1, 1, 1), MonthBegin(), True), ((1, 1, 5), MonthBegin(), False), ((1, 1, 5), MonthEnd(), False), ((1, 3, 1), QuarterBegin(), True), ((1, 3, 1, 1), QuarterBegin(), True), ((1, 3, 5), QuarterBegin(), False), ((1, 12, 1), QuarterEnd(), False), ((1, 1, 1), YearBegin(), True), ((1, 1, 1, 1), YearBegin(), True), ((1, 1, 5), YearBegin(), False), ((1, 12, 1), YearEnd(), False), ((1, 1, 1), Day(), True), ((1, 1, 1, 1), Day(), True), ((1, 1, 1), Hour(), True), ((1, 1, 1), Minute(), True), ((1, 1, 1), Second(), True), ((1, 1, 1), Millisecond(), True), ((1, 1, 1), Microsecond(), True), ], ids=_id_func, ) def test_onOffset(calendar, date_args, offset, expected): date_type = get_date_type(calendar) date = date_type(*date_args) result = offset.onOffset(date) assert result == expected @pytest.mark.parametrize( ("year_month_args", "sub_day_args", "offset"), [ ((1, 1), (), MonthEnd()), ((1, 1), (1,), MonthEnd()), ((1, 12), (), QuarterEnd()), ((1, 1), (), QuarterEnd(month=1)), ((1, 12), (), YearEnd()), ((1, 1), (), YearEnd(month=1)), ], ids=_id_func, ) def test_onOffset_month_or_quarter_or_year_end( calendar, year_month_args, sub_day_args, offset ): date_type = get_date_type(calendar) reference_args = year_month_args + (1,) reference = date_type(*reference_args) date_args = year_month_args + (reference.daysinmonth,) + sub_day_args date = date_type(*date_args) result = offset.onOffset(date) assert result @pytest.mark.parametrize( ("offset", "initial_date_args", "partial_expected_date_args"), [ (YearBegin(), (1, 3, 1), (2, 1)), (YearBegin(), (1, 1, 1), (1, 1)), (YearBegin(n=2), (1, 3, 1), (2, 1)), (YearBegin(n=2, month=2), (1, 3, 1), (2, 2)), (YearEnd(), (1, 3, 1), (1, 12)), (YearEnd(n=2), (1, 3, 1), (1, 12)), (YearEnd(n=2, month=2), (1, 3, 1), (2, 2)), (YearEnd(n=2, month=4), (1, 4, 30), (1, 4)), (QuarterBegin(), (1, 3, 2), (1, 6)), (QuarterBegin(), (1, 4, 1), (1, 6)), (QuarterBegin(n=2), (1, 4, 1), (1, 6)), (QuarterBegin(n=2, month=2), (1, 4, 1), (1, 5)), (QuarterEnd(), (1, 3, 1), (1, 3)), (QuarterEnd(n=2), (1, 3, 1), (1, 3)), (QuarterEnd(n=2, month=2), (1, 3, 1), (1, 5)), (QuarterEnd(n=2, month=4), (1, 4, 30), (1, 4)), (MonthBegin(), (1, 3, 2), (1, 4)), (MonthBegin(), (1, 3, 1), (1, 3)), (MonthBegin(n=2), (1, 3, 2), (1, 4)), (MonthEnd(), (1, 3, 2), (1, 3)), (MonthEnd(), (1, 4, 30), (1, 4)), (MonthEnd(n=2), (1, 3, 2), (1, 3)), (Day(), (1, 3, 2, 1), (1, 3, 2, 1)), (Hour(), (1, 3, 2, 1, 1), (1, 3, 2, 1, 1)), (Minute(), (1, 3, 2, 1, 1, 1), (1, 3, 2, 1, 1, 1)), (Second(), (1, 3, 2, 1, 1, 1, 1), (1, 3, 2, 1, 1, 1, 1)), (Millisecond(), (1, 3, 2, 1, 1, 1, 1000), (1, 3, 2, 1, 1, 1, 1000)), (Microsecond(), (1, 3, 2, 1, 1, 1, 1), (1, 3, 2, 1, 1, 1, 1)), ], ids=_id_func, ) def test_rollforward(calendar, offset, initial_date_args, partial_expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) if isinstance(offset, MonthBegin | QuarterBegin | YearBegin): expected_date_args = partial_expected_date_args + (1,) elif isinstance(offset, MonthEnd | QuarterEnd | YearEnd): reference_args = partial_expected_date_args + (1,) reference = date_type(*reference_args) expected_date_args = partial_expected_date_args + (reference.daysinmonth,) else: expected_date_args = partial_expected_date_args expected = date_type(*expected_date_args) result = offset.rollforward(initial) assert result == expected @pytest.mark.parametrize( ("offset", "initial_date_args", "partial_expected_date_args"), [ (YearBegin(), (1, 3, 1), (1, 1)), (YearBegin(n=2), (1, 3, 1), (1, 1)), (YearBegin(n=2, month=2), (1, 3, 1), (1, 2)), (YearBegin(), (1, 1, 1), (1, 1)), (YearBegin(n=2, month=2), (1, 2, 1), (1, 2)), (YearEnd(), (2, 3, 1), (1, 12)), (YearEnd(n=2), (2, 3, 1), (1, 12)), (YearEnd(n=2, month=2), (2, 3, 1), (2, 2)), (YearEnd(month=4), (1, 4, 30), (1, 4)), (QuarterBegin(), (1, 3, 2), (1, 3)), (QuarterBegin(), (1, 4, 1), (1, 3)), (QuarterBegin(n=2), (1, 4, 1), (1, 3)), (QuarterBegin(n=2, month=2), (1, 4, 1), (1, 2)), (QuarterEnd(), (2, 3, 1), (1, 12)), (QuarterEnd(n=2), (2, 3, 1), (1, 12)), (QuarterEnd(n=2, month=2), (2, 3, 1), (2, 2)), (QuarterEnd(n=2, month=4), (1, 4, 30), (1, 4)), (MonthBegin(), (1, 3, 2), (1, 3)), (MonthBegin(n=2), (1, 3, 2), (1, 3)), (MonthBegin(), (1, 3, 1), (1, 3)), (MonthEnd(), (1, 3, 2), (1, 2)), (MonthEnd(n=2), (1, 3, 2), (1, 2)), (MonthEnd(), (1, 4, 30), (1, 4)), (Day(), (1, 3, 2, 1), (1, 3, 2, 1)), (Hour(), (1, 3, 2, 1, 1), (1, 3, 2, 1, 1)), (Minute(), (1, 3, 2, 1, 1, 1), (1, 3, 2, 1, 1, 1)), (Second(), (1, 3, 2, 1, 1, 1, 1), (1, 3, 2, 1, 1, 1, 1)), (Millisecond(), (1, 3, 2, 1, 1, 1, 1000), (1, 3, 2, 1, 1, 1, 1000)), (Microsecond(), (1, 3, 2, 1, 1, 1, 1), (1, 3, 2, 1, 1, 1, 1)), ], ids=_id_func, ) def test_rollback(calendar, offset, initial_date_args, partial_expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) if isinstance(offset, MonthBegin | QuarterBegin | YearBegin): expected_date_args = partial_expected_date_args + (1,) elif isinstance(offset, MonthEnd | QuarterEnd | YearEnd): reference_args = partial_expected_date_args + (1,) reference = date_type(*reference_args) expected_date_args = partial_expected_date_args + (reference.daysinmonth,) else: expected_date_args = partial_expected_date_args expected = date_type(*expected_date_args) result = offset.rollback(initial) assert result == expected _CFTIME_RANGE_TESTS = [ ( "0001-01-01", "0001-01-04", None, "D", "neither", False, [(1, 1, 2), (1, 1, 3)], ), ( "0001-01-01", "0001-01-04", None, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-01-01", "0001-01-04", None, "D", "left", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3)], ), ( "0001-01-01", "0001-01-04", None, "D", "right", False, [(1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-01-01T01:00:00", "0001-01-04", None, "D", "both", False, [(1, 1, 1, 1), (1, 1, 2, 1), (1, 1, 3, 1)], ), ( "0001-01-01 01:00:00", "0001-01-04", None, "D", "both", False, [(1, 1, 1, 1), (1, 1, 2, 1), (1, 1, 3, 1)], ), ( "0001-01-01T01:00:00", "0001-01-04", None, "D", "both", True, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-01-01", None, 4, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( None, "0001-01-04", 4, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( (1, 1, 1), "0001-01-04", None, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( (1, 1, 1), (1, 1, 4), None, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-01-30", "0011-02-01", None, "3YS-JUN", "both", False, [(1, 6, 1), (4, 6, 1), (7, 6, 1), (10, 6, 1)], ), ("0001-01-04", "0001-01-01", None, "D", "both", False, []), ( "0010", None, 4, YearBegin(n=-2), "both", False, [(10, 1, 1), (8, 1, 1), (6, 1, 1), (4, 1, 1)], ), ( "0010", None, 4, "-2YS", "both", False, [(10, 1, 1), (8, 1, 1), (6, 1, 1), (4, 1, 1)], ), ( "0001-01-01", "0001-01-04", 4, None, "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-06-01", None, 4, "3QS-JUN", "both", False, [(1, 6, 1), (2, 3, 1), (2, 12, 1), (3, 9, 1)], ), ( "0001-06-01", None, 4, "-1MS", "both", False, [(1, 6, 1), (1, 5, 1), (1, 4, 1), (1, 3, 1)], ), ( "0001-01-30", None, 4, "-1D", "both", False, [(1, 1, 30), (1, 1, 29), (1, 1, 28), (1, 1, 27)], ), ] @pytest.mark.parametrize( ("start", "end", "periods", "freq", "inclusive", "normalize", "expected_date_args"), _CFTIME_RANGE_TESTS, ids=_id_func, ) def test_cftime_range( start, end, periods, freq, inclusive, normalize, calendar, expected_date_args ): date_type = get_date_type(calendar) expected_dates = list(starmap(date_type, expected_date_args)) if isinstance(start, tuple): start = date_type(*start) if isinstance(end, tuple): end = date_type(*end) with pytest.warns(DeprecationWarning): result = cftime_range( start=start, end=end, periods=periods, freq=freq, inclusive=inclusive, normalize=normalize, calendar=calendar, ) resulting_dates = result.values assert isinstance(result, CFTimeIndex) if freq is not None: np.testing.assert_equal(resulting_dates, expected_dates) else: # If we create a linear range of dates using cftime.num2date # we will not get exact round number dates. This is because # datetime arithmetic in cftime is accurate approximately to # 1 millisecond (see https://unidata.github.io/cftime/api.html). deltas = resulting_dates - expected_dates deltas = np.array([delta.total_seconds() for delta in deltas]) assert np.max(np.abs(deltas)) < 0.001 def test_date_range_name(): result = date_range(start="2000", periods=4, name="foo") assert result.name == "foo" result = date_range(start="2000", periods=4) assert result.name is None @pytest.mark.parametrize( ("start", "end", "periods", "freq", "inclusive"), [ (None, None, 5, "YE", None), ("2000", None, None, "YE", None), (None, "2000", None, "YE", None), (None, None, None, None, None), ("2000", "2001", None, "YE", "up"), ("2000", "2001", 5, "YE", None), ], ) def test_invalid_date_range_cftime_inputs( start: str | None, end: str | None, periods: int | None, freq: str | None, inclusive: Literal["up"] | None, ) -> None: with pytest.raises(ValueError): date_range(start, end, periods, freq, inclusive=inclusive, use_cftime=True) # type: ignore[arg-type] _CALENDAR_SPECIFIC_MONTH_END_TESTS = [ ("noleap", [(2, 28), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ("all_leap", [(2, 29), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ("360_day", [(2, 30), (4, 30), (6, 30), (8, 30), (10, 30), (12, 30)]), ("standard", [(2, 29), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ("gregorian", [(2, 29), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ("julian", [(2, 29), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ] @pytest.mark.parametrize( ("calendar", "expected_month_day"), _CALENDAR_SPECIFIC_MONTH_END_TESTS, ids=_id_func, ) def test_calendar_specific_month_end( calendar: str, expected_month_day: list[tuple[int, int]] ) -> None: year = 2000 # Use a leap-year to highlight calendar differences date_type = get_date_type(calendar) expected = [date_type(year, *args) for args in expected_month_day] result = date_range( start="2000-02", end="2001", freq="2ME", calendar=calendar, use_cftime=True, ).values np.testing.assert_equal(result, expected) @pytest.mark.parametrize( ("calendar", "expected_month_day"), _CALENDAR_SPECIFIC_MONTH_END_TESTS, ids=_id_func, ) def test_calendar_specific_month_end_negative_freq( calendar: str, expected_month_day: list[tuple[int, int]] ) -> None: year = 2000 # Use a leap-year to highlight calendar differences date_type = get_date_type(calendar) expected = [date_type(year, *args) for args in expected_month_day[::-1]] result = date_range( start="2001", end="2000", freq="-2ME", calendar=calendar, use_cftime=True ).values np.testing.assert_equal(result, expected) @pytest.mark.parametrize( ("calendar", "start", "end", "expected_number_of_days"), [ ("noleap", "2000", "2001", 365), ("all_leap", "2000", "2001", 366), ("360_day", "2000", "2001", 360), ("standard", "2000", "2001", 366), ("gregorian", "2000", "2001", 366), ("julian", "2000", "2001", 366), ("noleap", "2001", "2002", 365), ("all_leap", "2001", "2002", 366), ("360_day", "2001", "2002", 360), ("standard", "2001", "2002", 365), ("gregorian", "2001", "2002", 365), ("julian", "2001", "2002", 365), ], ) def test_calendar_year_length( calendar: str, start: str, end: str, expected_number_of_days: int ) -> None: result = date_range( start, end, freq="D", inclusive="left", calendar=calendar, use_cftime=True ) assert len(result) == expected_number_of_days @pytest.mark.parametrize("freq", ["YE", "ME", "D"]) def test_dayofweek_after_cftime(freq: str) -> None: result = date_range("2000-02-01", periods=3, freq=freq, use_cftime=True).dayofweek # TODO: remove once requiring pandas 2.2+ freq = _new_to_legacy_freq(freq) expected = pd.date_range("2000-02-01", periods=3, freq=freq).dayofweek np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize("freq", ["YE", "ME", "D"]) def test_dayofyear_after_cftime(freq: str) -> None: result = date_range("2000-02-01", periods=3, freq=freq, use_cftime=True).dayofyear # TODO: remove once requiring pandas 2.2+ freq = _new_to_legacy_freq(freq) expected = pd.date_range("2000-02-01", periods=3, freq=freq).dayofyear np.testing.assert_array_equal(result, expected) def test_cftime_range_standard_calendar_refers_to_gregorian() -> None: from cftime import DatetimeGregorian (result,) = date_range("2000", periods=1, use_cftime=True) assert isinstance(result, DatetimeGregorian) @pytest.mark.parametrize( "start,calendar,use_cftime,expected_type", [ ("1990-01-01", "standard", None, pd.DatetimeIndex), ("1990-01-01", "proleptic_gregorian", True, CFTimeIndex), ("1990-01-01", "noleap", None, CFTimeIndex), ("1990-01-01", "gregorian", False, pd.DatetimeIndex), ("1400-01-01", "standard", None, CFTimeIndex), ("3400-01-01", "standard", None, CFTimeIndex), ], ) def test_date_range( start: str, calendar: str, use_cftime: bool | None, expected_type ) -> None: dr = date_range( start, periods=14, freq="D", calendar=calendar, use_cftime=use_cftime ) assert isinstance(dr, expected_type) def test_date_range_errors() -> None: with pytest.raises(ValueError, match="Date range is invalid"): date_range( "1400-01-01", periods=1, freq="D", calendar="standard", use_cftime=False ) with pytest.raises(ValueError, match="Date range is invalid"): date_range( "2480-01-01", periods=1, freq="D", calendar="proleptic_gregorian", use_cftime=False, ) with pytest.raises(ValueError, match="Invalid calendar "): date_range( "1900-01-01", periods=1, freq="D", calendar="noleap", use_cftime=False ) @requires_cftime @pytest.mark.parametrize( "start,freq,cal_src,cal_tgt,use_cftime,exp0,exp_pd", [ ("2020-02-01", "4ME", "standard", "noleap", None, "2020-02-28", False), ("2020-02-01", "ME", "noleap", "gregorian", True, "2020-02-29", True), ("2020-02-01", "QE-DEC", "noleap", "gregorian", True, "2020-03-31", True), ("2020-02-01", "YS-FEB", "noleap", "gregorian", True, "2020-02-01", True), ("2020-02-01", "YE-FEB", "noleap", "gregorian", True, "2020-02-29", True), ("2020-02-01", "-1YE-FEB", "noleap", "gregorian", True, "2019-02-28", True), ("2020-02-28", "3h", "all_leap", "gregorian", False, "2020-02-28", True), ("2020-03-30", "ME", "360_day", "gregorian", False, "2020-03-31", True), ("2020-03-31", "ME", "gregorian", "360_day", None, "2020-03-30", False), ("2020-03-31", "-1ME", "gregorian", "360_day", None, "2020-03-30", False), ], ) def test_date_range_like(start, freq, cal_src, cal_tgt, use_cftime, exp0, exp_pd): expected_freq = freq source = date_range(start, periods=12, freq=freq, calendar=cal_src) out = date_range_like(source, cal_tgt, use_cftime=use_cftime) assert len(out) == 12 assert infer_freq(out) == expected_freq assert out[0].isoformat().startswith(exp0) if exp_pd: assert isinstance(out, pd.DatetimeIndex) else: assert isinstance(out, CFTimeIndex) assert out.calendar == cal_tgt @requires_cftime @pytest.mark.parametrize( "freq", ("YE", "YS", "YE-MAY", "MS", "ME", "QS", "h", "min", "s") ) @pytest.mark.parametrize("use_cftime", (True, False)) def test_date_range_like_no_deprecation(freq, use_cftime): # ensure no internal warnings # TODO: remove once freq string deprecation is finished source = date_range("2000", periods=3, freq=freq, use_cftime=False) with assert_no_warnings(): date_range_like(source, "standard", use_cftime=use_cftime) def test_date_range_like_same_calendar(): src = date_range("2000-01-01", periods=12, freq="6h", use_cftime=False) out = date_range_like(src, "standard", use_cftime=False) assert src is out @pytest.mark.filterwarnings("ignore:Converting non-default") def test_date_range_like_errors(): src = date_range("1899-02-03", periods=20, freq="D", use_cftime=False) src = src[np.arange(20) != 10] # Remove 1 day so the frequency is not inferable. with pytest.raises( ValueError, match=r"`date_range_like` was unable to generate a range as the source frequency was not inferable.", ): date_range_like(src, "gregorian") src = DataArray( np.array( [["1999-01-01", "1999-01-02"], ["1999-01-03", "1999-01-04"]], dtype=np.datetime64, ), dims=("x", "y"), ) with pytest.raises( ValueError, match=r"'source' must be a 1D array of datetime objects for inferring its range.", ): date_range_like(src, "noleap") da = DataArray([1, 2, 3, 4], dims=("time",)) with pytest.raises( ValueError, match=r"'source' must be a 1D array of datetime objects for inferring its range.", ): date_range_like(da, "noleap") def as_timedelta_not_implemented_error(): tick = Tick() with pytest.raises(NotImplementedError): tick.as_timedelta() @pytest.mark.parametrize("use_cftime", [True, False]) def test_cftime_or_date_range_invalid_inclusive_value(use_cftime: bool) -> None: if use_cftime and not has_cftime: pytest.skip("requires cftime") if TYPE_CHECKING: pytest.skip("inclusive type checked internally") with pytest.raises(ValueError, match="nclusive"): date_range("2000", periods=3, inclusive="foo", use_cftime=use_cftime) @pytest.mark.parametrize("use_cftime", [True, False]) def test_cftime_or_date_range_inclusive_None(use_cftime: bool) -> None: if use_cftime and not has_cftime: pytest.skip("requires cftime") result_None = date_range("2000-01-01", "2000-01-04", use_cftime=use_cftime) result_both = date_range( "2000-01-01", "2000-01-04", inclusive="both", use_cftime=use_cftime ) np.testing.assert_equal(result_None.values, result_both.values) @pytest.mark.parametrize( "freq", ["A", "AS", "Q", "M", "H", "T", "S", "L", "U", "Y", "A-MAY"] ) def test_to_offset_deprecation_warning(freq): # Test for deprecations outlined in GitHub issue #8394 with pytest.warns(FutureWarning, match="is deprecated"): to_offset(freq) @pytest.mark.skipif(has_pandas_ge_2_2, reason="only relevant for pandas lt 2.2") @pytest.mark.parametrize( "freq, expected", ( ["Y", "YE"], ["A", "YE"], ["Q", "QE"], ["M", "ME"], ["AS", "YS"], ["YE", "YE"], ["QE", "QE"], ["ME", "ME"], ["YS", "YS"], ), ) @pytest.mark.parametrize("n", ("", "2")) def test_legacy_to_new_freq(freq, expected, n): freq = f"{n}{freq}" result = _legacy_to_new_freq(freq) expected = f"{n}{expected}" assert result == expected @pytest.mark.skipif(has_pandas_ge_2_2, reason="only relevant for pandas lt 2.2") @pytest.mark.parametrize("year_alias", ("YE", "Y", "A")) @pytest.mark.parametrize("n", ("", "2")) def test_legacy_to_new_freq_anchored(year_alias, n): for month in _MONTH_ABBREVIATIONS.values(): freq = f"{n}{year_alias}-{month}" result = _legacy_to_new_freq(freq) expected = f"{n}YE-{month}" assert result == expected @pytest.mark.skipif(has_pandas_ge_2_2, reason="only relevant for pandas lt 2.2") @pytest.mark.filterwarnings("ignore:'[AY]' is deprecated") @pytest.mark.parametrize( "freq, expected", (["A", "A"], ["YE", "A"], ["Y", "A"], ["QE", "Q"], ["ME", "M"], ["YS", "AS"]), ) @pytest.mark.parametrize("n", ("", "2")) def test_new_to_legacy_freq(freq, expected, n): freq = f"{n}{freq}" result = _new_to_legacy_freq(freq) expected = f"{n}{expected}" assert result == expected @pytest.mark.skipif(has_pandas_ge_2_2, reason="only relevant for pandas lt 2.2") @pytest.mark.filterwarnings("ignore:'[AY]-.{3}' is deprecated") @pytest.mark.parametrize("year_alias", ("A", "Y", "YE")) @pytest.mark.parametrize("n", ("", "2")) def test_new_to_legacy_freq_anchored(year_alias, n): for month in _MONTH_ABBREVIATIONS.values(): freq = f"{n}{year_alias}-{month}" result = _new_to_legacy_freq(freq) expected = f"{n}A-{month}" assert result == expected @pytest.mark.skipif(has_pandas_ge_2_2, reason="only for pandas lt 2.2") @pytest.mark.parametrize( "freq, expected", ( # pandas-only freq strings are passed through ("BH", "BH"), ("CBH", "CBH"), ("N", "N"), ), ) def test_legacy_to_new_freq_pd_freq_passthrough(freq, expected): result = _legacy_to_new_freq(freq) assert result == expected @pytest.mark.filterwarnings("ignore:'.' is deprecated ") @pytest.mark.skipif(has_pandas_ge_2_2, reason="only for pandas lt 2.2") @pytest.mark.parametrize( "freq, expected", ( # these are each valid in pandas lt 2.2 ("T", "T"), ("min", "min"), ("S", "S"), ("s", "s"), ("L", "L"), ("ms", "ms"), ("U", "U"), ("us", "us"), # pandas-only freq strings are passed through ("bh", "bh"), ("cbh", "cbh"), ("ns", "ns"), ), ) def test_new_to_legacy_freq_pd_freq_passthrough(freq, expected): result = _new_to_legacy_freq(freq) assert result == expected @pytest.mark.filterwarnings("ignore:Converting a CFTimeIndex with:") @pytest.mark.parametrize("start", ("2000", "2001")) @pytest.mark.parametrize("end", ("2000", "2001")) @pytest.mark.parametrize( "freq", ( "MS", pytest.param("-1MS", marks=requires_pandas_3), "YS", pytest.param("-1YS", marks=requires_pandas_3), "ME", pytest.param("-1ME", marks=requires_pandas_3), "YE", pytest.param("-1YE", marks=requires_pandas_3), ), ) def test_cftime_range_same_as_pandas(start, end, freq) -> None: result = date_range(start, end, freq=freq, calendar="standard", use_cftime=True) result = result.to_datetimeindex(time_unit="ns") expected = date_range(start, end, freq=freq, use_cftime=False) np.testing.assert_array_equal(result, expected) @pytest.mark.filterwarnings("ignore:Converting a CFTimeIndex with:") @pytest.mark.parametrize( "start, end, periods", [ ("2022-01-01", "2022-01-10", 2), ("2022-03-01", "2022-03-31", 2), ("2022-01-01", "2022-01-10", None), ("2022-03-01", "2022-03-31", None), ], ) def test_cftime_range_no_freq(start, end, periods): """ Test whether date_range produces the same result as Pandas when freq is not provided, but start, end and periods are. """ # Generate date ranges using cftime_range cftimeindex = date_range(start=start, end=end, periods=periods, use_cftime=True) result = cftimeindex.to_datetimeindex(time_unit="ns") expected = pd.date_range(start=start, end=end, periods=periods) np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize( "start, end, periods", [ ("2022-01-01", "2022-01-10", 2), ("2022-03-01", "2022-03-31", 2), ("2022-01-01", "2022-01-10", None), ("2022-03-01", "2022-03-31", None), ], ) def test_date_range_no_freq(start, end, periods): """ Test whether date_range produces the same result as Pandas when freq is not provided, but start, end and periods are. """ # Generate date ranges using date_range result = date_range(start=start, end=end, periods=periods) expected = pd.date_range(start=start, end=end, periods=periods) np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize( "offset", [ MonthBegin(n=1), MonthEnd(n=1), QuarterBegin(n=1), QuarterEnd(n=1), YearBegin(n=1), YearEnd(n=1), ], ids=lambda x: f"{x}", ) @pytest.mark.parametrize("has_year_zero", [False, True]) def test_offset_addition_preserves_has_year_zero(offset, has_year_zero): with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="this date/calendar/year zero") datetime = cftime.DatetimeGregorian(-1, 12, 31, has_year_zero=has_year_zero) result = datetime + offset assert result.has_year_zero == datetime.has_year_zero if has_year_zero: assert result.year == 0 else: assert result.year == 1 @pytest.mark.parametrize( "offset", [ MonthBegin(n=1), MonthEnd(n=1), QuarterBegin(n=1), QuarterEnd(n=1), YearBegin(n=1), YearEnd(n=1), ], ids=lambda x: f"{x}", ) @pytest.mark.parametrize("has_year_zero", [False, True]) def test_offset_subtraction_preserves_has_year_zero(offset, has_year_zero): datetime = cftime.DatetimeGregorian(1, 1, 1, has_year_zero=has_year_zero) result = datetime - offset assert result.has_year_zero == datetime.has_year_zero if has_year_zero: assert result.year == 0 else: assert result.year == -1 @pytest.mark.parametrize("has_year_zero", [False, True]) def test_offset_day_option_end_accounts_for_has_year_zero(has_year_zero): offset = MonthEnd(n=1) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="this date/calendar/year zero") datetime = cftime.DatetimeGregorian(-1, 1, 31, has_year_zero=has_year_zero) result = datetime + offset assert result.has_year_zero == datetime.has_year_zero if has_year_zero: assert result.day == 28 else: assert result.day == 29 xarray-2025.12.0/xarray/tests/test_cftimeindex.py000066400000000000000000001306171511464676000217330ustar00rootroot00000000000000from __future__ import annotations import pickle from datetime import timedelta from textwrap import dedent import numpy as np import pandas as pd import pytest import xarray as xr from xarray.coding.cftimeindex import ( CFTimeIndex, _parse_array_of_cftime_strings, _parsed_string_to_bounds, assert_all_valid_date_type, ) from xarray.coding.times import ( _parse_iso8601, parse_iso8601_like, ) from xarray.core.types import PDDatetimeUnitOptions from xarray.tests import ( _ALL_CALENDARS, _NON_STANDARD_CALENDAR_NAMES, _all_cftime_date_types, assert_array_equal, assert_identical, has_cftime, requires_cftime, ) # cftime 1.5.2 renames "gregorian" to "standard" standard_or_gregorian = "" if has_cftime: standard_or_gregorian = "standard" def date_dict( year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, ): return dict( year=year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond, ) ISO8601_LIKE_STRING_TESTS = { "year": ("1999", date_dict(year="1999")), "month": ("199901", date_dict(year="1999", month="01")), "month-dash": ("1999-01", date_dict(year="1999", month="01")), "day": ("19990101", date_dict(year="1999", month="01", day="01")), "day-dash": ("1999-01-01", date_dict(year="1999", month="01", day="01")), "hour": ("19990101T12", date_dict(year="1999", month="01", day="01", hour="12")), "hour-dash": ( "1999-01-01T12", date_dict(year="1999", month="01", day="01", hour="12"), ), "hour-space-separator": ( "1999-01-01 12", date_dict(year="1999", month="01", day="01", hour="12"), ), "minute": ( "19990101T1234", date_dict(year="1999", month="01", day="01", hour="12", minute="34"), ), "minute-dash": ( "1999-01-01T12:34", date_dict(year="1999", month="01", day="01", hour="12", minute="34"), ), "minute-space-separator": ( "1999-01-01 12:34", date_dict(year="1999", month="01", day="01", hour="12", minute="34"), ), "second": ( "19990101T123456", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56" ), ), "second-dash": ( "1999-01-01T12:34:56", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56" ), ), "second-space-separator": ( "1999-01-01 12:34:56", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56" ), ), "microsecond-1": ( "19990101T123456.123456", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56", microsecond="123456", ), ), "microsecond-2": ( "19990101T123456.1", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56", microsecond="1", ), ), } @pytest.mark.parametrize( ("string", "expected"), list(ISO8601_LIKE_STRING_TESTS.values()), ids=list(ISO8601_LIKE_STRING_TESTS.keys()), ) @pytest.mark.parametrize( "five_digit_year", [False, True], ids=["four-digit-year", "five-digit-year"] ) @pytest.mark.parametrize("sign", ["", "+", "-"], ids=["None", "plus", "minus"]) def test_parse_iso8601_like( five_digit_year: bool, sign: str, string: str, expected: dict ) -> None: pre = "1" if five_digit_year else "" datestring = sign + pre + string result = parse_iso8601_like(datestring) expected = expected.copy() expected.update(year=sign + pre + expected["year"]) assert result == expected # check malformed single digit addendum # this check is only performed when we have at least "hour" given # like "1999010101", where a single added digit should raise # for "1999" (year), "199901" (month) and "19990101" (day) # and a single added digit the string would just be interpreted # as having a 5-digit year. if result["microsecond"] is None and result["hour"] is not None: with pytest.raises(ValueError): parse_iso8601_like(datestring + "3") # check malformed floating point addendum if result["second"] is None or result["microsecond"] is not None: with pytest.raises(ValueError): parse_iso8601_like(datestring + ".3") _CFTIME_CALENDARS = [ "365_day", "360_day", "julian", "all_leap", "366_day", "gregorian", "proleptic_gregorian", ] @pytest.fixture(params=_CFTIME_CALENDARS) def date_type(request): return _all_cftime_date_types()[request.param] @pytest.fixture def index(date_type): dates = [ date_type(1, 1, 1), date_type(1, 2, 1), date_type(2, 1, 1), date_type(2, 2, 1), ] return CFTimeIndex(dates) @pytest.fixture def monotonic_decreasing_index(date_type): dates = [ date_type(2, 2, 1), date_type(2, 1, 1), date_type(1, 2, 1), date_type(1, 1, 1), ] return CFTimeIndex(dates) @pytest.fixture def length_one_index(date_type): dates = [date_type(1, 1, 1)] return CFTimeIndex(dates) @pytest.fixture def da(index): return xr.DataArray([1, 2, 3, 4], coords=[index], dims=["time"]) @pytest.fixture def series(index): return pd.Series([1, 2, 3, 4], index=index) @pytest.fixture def df(index): return pd.DataFrame([1, 2, 3, 4], index=index) @pytest.fixture def feb_days(date_type): import cftime if date_type is cftime.DatetimeAllLeap: return 29 elif date_type is cftime.Datetime360Day: return 30 else: return 28 @pytest.fixture def dec_days(date_type): import cftime if date_type is cftime.Datetime360Day: return 30 else: return 31 @pytest.fixture def index_with_name(date_type): dates = [ date_type(1, 1, 1), date_type(1, 2, 1), date_type(2, 1, 1), date_type(2, 2, 1), ] return CFTimeIndex(dates, name="foo") @requires_cftime @pytest.mark.parametrize(("name", "expected_name"), [("bar", "bar"), (None, "foo")]) def test_constructor_with_name(index_with_name, name, expected_name): result = CFTimeIndex(index_with_name, name=name).name assert result == expected_name @requires_cftime def test_assert_all_valid_date_type(date_type, index): import cftime if date_type is cftime.DatetimeNoLeap: mixed_date_types = np.array( [date_type(1, 1, 1), cftime.DatetimeAllLeap(1, 2, 1)] ) else: mixed_date_types = np.array( [date_type(1, 1, 1), cftime.DatetimeNoLeap(1, 2, 1)] ) with pytest.raises(TypeError): assert_all_valid_date_type(mixed_date_types) with pytest.raises(TypeError): assert_all_valid_date_type(np.array([1, date_type(1, 1, 1)])) assert_all_valid_date_type(np.array([date_type(1, 1, 1), date_type(1, 2, 1)])) @requires_cftime @pytest.mark.parametrize( ("field", "expected"), [ ("year", [1, 1, 2, 2]), ("month", [1, 2, 1, 2]), ("day", [1, 1, 1, 1]), ("hour", [0, 0, 0, 0]), ("minute", [0, 0, 0, 0]), ("second", [0, 0, 0, 0]), ("microsecond", [0, 0, 0, 0]), ], ) def test_cftimeindex_field_accessors(index, field, expected): result = getattr(index, field) expected = np.array(expected, dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime @pytest.mark.parametrize( ("field"), [ "year", "month", "day", "hour", "minute", "second", "microsecond", "dayofyear", "dayofweek", "days_in_month", ], ) def test_empty_cftimeindex_field_accessors(field): index = CFTimeIndex([]) result = getattr(index, field) expected = np.array([], dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime def test_cftimeindex_dayofyear_accessor(index): result = index.dayofyear expected = np.array([date.dayofyr for date in index], dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime def test_cftimeindex_dayofweek_accessor(index): result = index.dayofweek expected = np.array([date.dayofwk for date in index], dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime def test_cftimeindex_days_in_month_accessor(index): result = index.days_in_month expected = np.array([date.daysinmonth for date in index], dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime @pytest.mark.parametrize( ("string", "date_args", "reso"), [ ("1999", (1999, 1, 1), "year"), ("199902", (1999, 2, 1), "month"), ("19990202", (1999, 2, 2), "day"), ("19990202T01", (1999, 2, 2, 1), "hour"), ("19990202T0101", (1999, 2, 2, 1, 1), "minute"), ("19990202T010156", (1999, 2, 2, 1, 1, 56), "second"), ("19990202T010156.123456", (1999, 2, 2, 1, 1, 56, 123456), "microsecond"), ], ) def test_parse_iso8601_with_reso(date_type, string, date_args, reso): expected_date = date_type(*date_args) expected_reso = reso result_date, result_reso = _parse_iso8601(date_type, string) assert result_date == expected_date assert result_reso == expected_reso @requires_cftime def test_parse_string_to_bounds_year(date_type, dec_days): parsed = date_type(2, 2, 10, 6, 2, 8, 1) expected_start = date_type(2, 1, 1) expected_end = date_type(2, 12, dec_days, 23, 59, 59, 999999) result_start, result_end = _parsed_string_to_bounds(date_type, "year", parsed) assert result_start == expected_start assert result_end == expected_end @requires_cftime def test_parse_string_to_bounds_month_feb(date_type, feb_days): parsed = date_type(2, 2, 10, 6, 2, 8, 1) expected_start = date_type(2, 2, 1) expected_end = date_type(2, 2, feb_days, 23, 59, 59, 999999) result_start, result_end = _parsed_string_to_bounds(date_type, "month", parsed) assert result_start == expected_start assert result_end == expected_end @requires_cftime def test_parse_string_to_bounds_month_dec(date_type, dec_days): parsed = date_type(2, 12, 1) expected_start = date_type(2, 12, 1) expected_end = date_type(2, 12, dec_days, 23, 59, 59, 999999) result_start, result_end = _parsed_string_to_bounds(date_type, "month", parsed) assert result_start == expected_start assert result_end == expected_end @requires_cftime @pytest.mark.parametrize( ("reso", "ex_start_args", "ex_end_args"), [ ("day", (2, 2, 10), (2, 2, 10, 23, 59, 59, 999999)), ("hour", (2, 2, 10, 6), (2, 2, 10, 6, 59, 59, 999999)), ("minute", (2, 2, 10, 6, 2), (2, 2, 10, 6, 2, 59, 999999)), ("second", (2, 2, 10, 6, 2, 8), (2, 2, 10, 6, 2, 8, 999999)), ], ) def test_parsed_string_to_bounds_sub_monthly( date_type, reso, ex_start_args, ex_end_args ): parsed = date_type(2, 2, 10, 6, 2, 8, 123456) expected_start = date_type(*ex_start_args) expected_end = date_type(*ex_end_args) result_start, result_end = _parsed_string_to_bounds(date_type, reso, parsed) assert result_start == expected_start assert result_end == expected_end @requires_cftime def test_parsed_string_to_bounds_raises(date_type): with pytest.raises(KeyError): _parsed_string_to_bounds(date_type, "a", date_type(1, 1, 1)) @requires_cftime def test_get_loc(date_type, index): result = index.get_loc("0001") assert result == slice(0, 2) result = index.get_loc(date_type(1, 2, 1)) assert result == 1 result = index.get_loc("0001-02-01") assert result == slice(1, 2) with pytest.raises(KeyError, match=r"1234"): index.get_loc("1234") @requires_cftime def test_get_slice_bound(date_type, index): result = index.get_slice_bound("0001", "left") expected = 0 assert result == expected result = index.get_slice_bound("0001", "right") expected = 2 assert result == expected result = index.get_slice_bound(date_type(1, 3, 1), "left") expected = 2 assert result == expected result = index.get_slice_bound(date_type(1, 3, 1), "right") expected = 2 assert result == expected @requires_cftime def test_get_slice_bound_decreasing_index(date_type, monotonic_decreasing_index): result = monotonic_decreasing_index.get_slice_bound("0001", "left") expected = 2 assert result == expected result = monotonic_decreasing_index.get_slice_bound("0001", "right") expected = 4 assert result == expected result = monotonic_decreasing_index.get_slice_bound(date_type(1, 3, 1), "left") expected = 2 assert result == expected result = monotonic_decreasing_index.get_slice_bound(date_type(1, 3, 1), "right") expected = 2 assert result == expected @requires_cftime def test_get_slice_bound_length_one_index(date_type, length_one_index): result = length_one_index.get_slice_bound("0001", "left") expected = 0 assert result == expected result = length_one_index.get_slice_bound("0001", "right") expected = 1 assert result == expected result = length_one_index.get_slice_bound(date_type(1, 3, 1), "left") expected = 1 assert result == expected result = length_one_index.get_slice_bound(date_type(1, 3, 1), "right") expected = 1 assert result == expected @requires_cftime def test_string_slice_length_one_index(length_one_index): da = xr.DataArray([1], coords=[length_one_index], dims=["time"]) result = da.sel(time=slice("0001", "0001")) assert_identical(result, da) @requires_cftime def test_date_type_property(date_type, index): assert index.date_type is date_type @requires_cftime def test_contains(date_type, index): assert "0001-01-01" in index assert "0001" in index assert "0003" not in index assert date_type(1, 1, 1) in index assert date_type(3, 1, 1) not in index @requires_cftime def test_groupby(da): result = da.groupby("time.month").sum("time") expected = xr.DataArray([4, 6], coords=[[1, 2]], dims=["month"]) assert_identical(result, expected) SEL_STRING_OR_LIST_TESTS = { "string": "0001", "string-slice": slice("0001-01-01", "0001-12-30"), "bool-list": [True, True, False, False], } @requires_cftime @pytest.mark.parametrize( "sel_arg", list(SEL_STRING_OR_LIST_TESTS.values()), ids=list(SEL_STRING_OR_LIST_TESTS.keys()), ) def test_sel_string_or_list(da, index, sel_arg): expected = xr.DataArray([1, 2], coords=[index[:2]], dims=["time"]) result = da.sel(time=sel_arg) assert_identical(result, expected) @requires_cftime def test_sel_date_slice_or_list(da, index, date_type): expected = xr.DataArray([1, 2], coords=[index[:2]], dims=["time"]) result = da.sel(time=slice(date_type(1, 1, 1), date_type(1, 12, 30))) assert_identical(result, expected) result = da.sel(time=[date_type(1, 1, 1), date_type(1, 2, 1)]) assert_identical(result, expected) @requires_cftime def test_sel_date_scalar(da, date_type, index): expected = xr.DataArray(1).assign_coords(time=index[0]) result = da.sel(time=date_type(1, 1, 1)) assert_identical(result, expected) @requires_cftime def test_sel_date_distant_date(da, date_type, index): expected = xr.DataArray(4).assign_coords(time=index[3]) result = da.sel(time=date_type(2000, 1, 1), method="nearest") assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [ {"method": "nearest"}, {"method": "nearest", "tolerance": timedelta(days=70)}, {"method": "nearest", "tolerance": timedelta(days=1800000)}, ], ) def test_sel_date_scalar_nearest(da, date_type, index, sel_kwargs): expected = xr.DataArray(2).assign_coords(time=index[1]) result = da.sel(time=date_type(1, 4, 1), **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray(3).assign_coords(time=index[2]) result = da.sel(time=date_type(1, 11, 1), **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "pad"}, {"method": "pad", "tolerance": timedelta(days=365)}], ) def test_sel_date_scalar_pad(da, date_type, index, sel_kwargs): expected = xr.DataArray(2).assign_coords(time=index[1]) result = da.sel(time=date_type(1, 4, 1), **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray(2).assign_coords(time=index[1]) result = da.sel(time=date_type(1, 11, 1), **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "backfill"}, {"method": "backfill", "tolerance": timedelta(days=365)}], ) def test_sel_date_scalar_backfill(da, date_type, index, sel_kwargs): expected = xr.DataArray(3).assign_coords(time=index[2]) result = da.sel(time=date_type(1, 4, 1), **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray(3).assign_coords(time=index[2]) result = da.sel(time=date_type(1, 11, 1), **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [ {"method": "pad", "tolerance": timedelta(days=20)}, {"method": "backfill", "tolerance": timedelta(days=20)}, {"method": "nearest", "tolerance": timedelta(days=20)}, ], ) def test_sel_date_scalar_tolerance_raises(da, date_type, sel_kwargs): with pytest.raises(KeyError): da.sel(time=date_type(1, 5, 1), **sel_kwargs) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "nearest"}, {"method": "nearest", "tolerance": timedelta(days=70)}], ) def test_sel_date_list_nearest(da, date_type, index, sel_kwargs): expected = xr.DataArray([2, 2], coords=[[index[1], index[1]]], dims=["time"]) result = da.sel(time=[date_type(1, 3, 1), date_type(1, 4, 1)], **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray([2, 3], coords=[[index[1], index[2]]], dims=["time"]) result = da.sel(time=[date_type(1, 3, 1), date_type(1, 12, 1)], **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray([3, 3], coords=[[index[2], index[2]]], dims=["time"]) result = da.sel(time=[date_type(1, 11, 1), date_type(1, 12, 1)], **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "pad"}, {"method": "pad", "tolerance": timedelta(days=365)}], ) def test_sel_date_list_pad(da, date_type, index, sel_kwargs): expected = xr.DataArray([2, 2], coords=[[index[1], index[1]]], dims=["time"]) result = da.sel(time=[date_type(1, 3, 1), date_type(1, 4, 1)], **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "backfill"}, {"method": "backfill", "tolerance": timedelta(days=365)}], ) def test_sel_date_list_backfill(da, date_type, index, sel_kwargs): expected = xr.DataArray([3, 3], coords=[[index[2], index[2]]], dims=["time"]) result = da.sel(time=[date_type(1, 3, 1), date_type(1, 4, 1)], **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [ {"method": "pad", "tolerance": timedelta(days=20)}, {"method": "backfill", "tolerance": timedelta(days=20)}, {"method": "nearest", "tolerance": timedelta(days=20)}, ], ) def test_sel_date_list_tolerance_raises(da, date_type, sel_kwargs): with pytest.raises(KeyError): da.sel(time=[date_type(1, 2, 1), date_type(1, 5, 1)], **sel_kwargs) @requires_cftime def test_isel(da, index): expected = xr.DataArray(1).assign_coords(time=index[0]) result = da.isel(time=0) assert_identical(result, expected) expected = xr.DataArray([1, 2], coords=[index[:2]], dims=["time"]) result = da.isel(time=[0, 1]) assert_identical(result, expected) @pytest.fixture def scalar_args(date_type): return [date_type(1, 1, 1)] @pytest.fixture def range_args(date_type): return [ "0001", slice("0001-01-01", "0001-12-30"), slice(None, "0001-12-30"), slice(date_type(1, 1, 1), date_type(1, 12, 30)), slice(None, date_type(1, 12, 30)), ] @requires_cftime def test_indexing_in_series_getitem(series, index, scalar_args, range_args): for arg in scalar_args: assert series[arg] == 1 expected = pd.Series([1, 2], index=index[:2]) for arg in range_args: assert series[arg].equals(expected) @requires_cftime def test_indexing_in_series_loc(series, index, scalar_args, range_args): for arg in scalar_args: assert series.loc[arg] == 1 expected = pd.Series([1, 2], index=index[:2]) for arg in range_args: assert series.loc[arg].equals(expected) @requires_cftime def test_indexing_in_series_iloc(series, index): expected1 = 1 assert series.iloc[0] == expected1 expected2 = pd.Series([1, 2], index=index[:2]) assert series.iloc[:2].equals(expected2) @requires_cftime def test_series_dropna(index): series = pd.Series([0.0, 1.0, np.nan, np.nan], index=index) expected = series.iloc[:2] result = series.dropna() assert result.equals(expected) @requires_cftime def test_indexing_in_dataframe_loc(df, index, scalar_args, range_args): expected_s = pd.Series([1], name=index[0]) for arg in scalar_args: result_s = df.loc[arg] assert result_s.equals(expected_s) expected_df = pd.DataFrame([1, 2], index=index[:2]) for arg in range_args: result_df = df.loc[arg] assert result_df.equals(expected_df) @requires_cftime def test_indexing_in_dataframe_iloc(df, index): expected_s = pd.Series([1], name=index[0]) result_s = df.iloc[0] assert result_s.equals(expected_s) assert result_s.equals(expected_s) expected_df = pd.DataFrame([1, 2], index=index[:2]) result_df = df.iloc[:2] assert result_df.equals(expected_df) @requires_cftime def test_concat_cftimeindex(date_type): da1 = xr.DataArray( [1.0, 2.0], coords=[[date_type(1, 1, 1), date_type(1, 2, 1)]], dims=["time"] ) da2 = xr.DataArray( [3.0, 4.0], coords=[[date_type(1, 3, 1), date_type(1, 4, 1)]], dims=["time"] ) da = xr.concat([da1, da2], dim="time") assert isinstance(da.xindexes["time"].to_pandas_index(), CFTimeIndex) @requires_cftime def test_empty_cftimeindex(): index = CFTimeIndex([]) assert index.date_type is None @requires_cftime def test_cftimeindex_add(index): date_type = index.date_type expected_dates = [ date_type(1, 1, 2), date_type(1, 2, 2), date_type(2, 1, 2), date_type(2, 2, 2), ] expected = CFTimeIndex(expected_dates) result = index + timedelta(days=1) assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_add_timedeltaindex(calendar) -> None: a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) deltas = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)]) result = a + deltas expected = a.shift(2, "D") assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("n", [2.0, 1.5]) @pytest.mark.parametrize( "freq,units", [ ("h", "h"), ("min", "min"), ("s", "s"), ("ms", "ms"), ], ) @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_shift_float(n, freq, units, calendar) -> None: a = xr.date_range("2000", periods=3, calendar=calendar, freq="D", use_cftime=True) result = a + pd.Timedelta(n, units) expected = a.shift(n, freq) assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime def test_cftimeindex_shift_float_us() -> None: a = xr.date_range("2000", periods=3, freq="D", use_cftime=True) with pytest.raises( ValueError, match="Could not convert to integer offset at any resolution" ): a.shift(2.5, "us") @requires_cftime @pytest.mark.parametrize("freq", ["YS", "YE", "QS", "QE", "MS", "ME", "D"]) def test_cftimeindex_shift_float_fails_for_non_tick_freqs(freq) -> None: a = xr.date_range("2000", periods=3, freq="D", use_cftime=True) with pytest.raises(TypeError, match="unsupported operand type"): a.shift(2.5, freq) @requires_cftime def test_cftimeindex_radd(index): date_type = index.date_type expected_dates = [ date_type(1, 1, 2), date_type(1, 2, 2), date_type(2, 1, 2), date_type(2, 2, 2), ] expected = CFTimeIndex(expected_dates) result = timedelta(days=1) + index assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_timedeltaindex_add_cftimeindex(calendar) -> None: a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) deltas = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)]) result = deltas + a expected = a.shift(2, "D") assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime def test_cftimeindex_sub_timedelta(index): date_type = index.date_type expected_dates = [ date_type(1, 1, 2), date_type(1, 2, 2), date_type(2, 1, 2), date_type(2, 2, 2), ] expected = CFTimeIndex(expected_dates) result = index + timedelta(days=2) result = result - timedelta(days=1) assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize( "other", [np.array(4 * [timedelta(days=1)]), np.array(timedelta(days=1))], ids=["1d-array", "scalar-array"], ) def test_cftimeindex_sub_timedelta_array(index, other): date_type = index.date_type expected_dates = [ date_type(1, 1, 2), date_type(1, 2, 2), date_type(2, 1, 2), date_type(2, 2, 2), ] expected = CFTimeIndex(expected_dates) result = index + timedelta(days=2) result = result - other assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_cftimeindex(calendar) -> None: a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) b = a.shift(2, "D") result = b - a expected = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)]) assert result.equals(expected) assert isinstance(result, pd.TimedeltaIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_cftime_datetime(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) result = a - a[0] expected = pd.TimedeltaIndex([timedelta(days=i) for i in range(5)]) assert result.equals(expected) assert isinstance(result, pd.TimedeltaIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftime_datetime_sub_cftimeindex(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) result = a[0] - a expected = pd.TimedeltaIndex([timedelta(days=-i) for i in range(5)]) assert result.equals(expected) assert isinstance(result, pd.TimedeltaIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_distant_cftime_datetime_sub_cftimeindex(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) with pytest.raises(ValueError, match="difference exceeds"): a.date_type(1, 1, 1) - a @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_timedeltaindex(calendar) -> None: a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) deltas = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)]) result = a - deltas expected = a.shift(-2, "D") assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_index_of_cftime_datetimes(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) b = pd.Index(a.values) expected = a - a result = a - b assert result.equals(expected) assert isinstance(result, pd.TimedeltaIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_not_implemented(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) with pytest.raises(TypeError, match="unsupported operand"): a - 1 @requires_cftime def test_cftimeindex_rsub(index): with pytest.raises(TypeError): timedelta(days=1) - index @requires_cftime @pytest.mark.parametrize("freq", ["D", timedelta(days=1)]) def test_cftimeindex_shift(index, freq) -> None: date_type = index.date_type expected_dates = [ date_type(1, 1, 3), date_type(1, 2, 3), date_type(2, 1, 3), date_type(2, 2, 3), ] expected = CFTimeIndex(expected_dates) result = index.shift(2, freq) assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime def test_cftimeindex_shift_invalid_periods() -> None: index = xr.date_range("2000", periods=3, use_cftime=True) with pytest.raises(TypeError): index.shift("a", "D") @requires_cftime def test_cftimeindex_shift_invalid_freq() -> None: index = xr.date_range("2000", periods=3, use_cftime=True) with pytest.raises(TypeError): index.shift(1, 1) @requires_cftime @pytest.mark.parametrize( ("calendar", "expected"), [ ("noleap", "noleap"), ("365_day", "noleap"), ("360_day", "360_day"), ("julian", "julian"), ("gregorian", standard_or_gregorian), ("standard", standard_or_gregorian), ("proleptic_gregorian", "proleptic_gregorian"), ], ) def test_cftimeindex_calendar_property(calendar, expected): index = xr.date_range(start="2000", periods=3, calendar=calendar, use_cftime=True) assert index.calendar == expected @requires_cftime def test_empty_cftimeindex_calendar_property(): index = CFTimeIndex([]) assert index.calendar is None @requires_cftime @pytest.mark.parametrize( "calendar", [ "noleap", "365_day", "360_day", "julian", "gregorian", "standard", "proleptic_gregorian", ], ) def test_cftimeindex_freq_property_none_size_lt_3(calendar): for periods in range(3): index = xr.date_range( start="2000", periods=periods, calendar=calendar, use_cftime=True ) assert index.freq is None @requires_cftime @pytest.mark.parametrize( ("calendar", "expected"), [ ("noleap", "noleap"), ("365_day", "noleap"), ("360_day", "360_day"), ("julian", "julian"), ("gregorian", standard_or_gregorian), ("standard", standard_or_gregorian), ("proleptic_gregorian", "proleptic_gregorian"), ], ) def test_cftimeindex_calendar_repr(calendar, expected): """Test that cftimeindex has calendar property in repr.""" index = xr.date_range(start="2000", periods=3, calendar=calendar, use_cftime=True) repr_str = index.__repr__() assert f" calendar='{expected}'" in repr_str assert "2000-01-01 00:00:00, 2000-01-02 00:00:00" in repr_str @requires_cftime @pytest.mark.parametrize("periods", [2, 40]) def test_cftimeindex_periods_repr(periods): """Test that cftimeindex has periods property in repr.""" index = xr.date_range(start="2000", periods=periods, use_cftime=True) repr_str = index.__repr__() assert f" length={periods}" in repr_str @requires_cftime @pytest.mark.parametrize("calendar", ["noleap", "360_day", "standard"]) @pytest.mark.parametrize("freq", ["D", "h"]) def test_cftimeindex_freq_in_repr(freq, calendar): """Test that cftimeindex has frequency property in repr.""" index = xr.date_range( start="2000", periods=3, freq=freq, calendar=calendar, use_cftime=True ) repr_str = index.__repr__() assert f", freq='{freq}'" in repr_str @requires_cftime @pytest.mark.parametrize( "periods,expected", [ ( 2, f"""\ CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00], dtype='object', length=2, calendar='{standard_or_gregorian}', freq=None)""", ), ( 4, f"""\ CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00, 2000-01-03 00:00:00, 2000-01-04 00:00:00], dtype='object', length=4, calendar='{standard_or_gregorian}', freq='D')""", ), ( 101, f"""\ CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00, 2000-01-03 00:00:00, 2000-01-04 00:00:00, 2000-01-05 00:00:00, 2000-01-06 00:00:00, 2000-01-07 00:00:00, 2000-01-08 00:00:00, 2000-01-09 00:00:00, 2000-01-10 00:00:00, ... 2000-04-01 00:00:00, 2000-04-02 00:00:00, 2000-04-03 00:00:00, 2000-04-04 00:00:00, 2000-04-05 00:00:00, 2000-04-06 00:00:00, 2000-04-07 00:00:00, 2000-04-08 00:00:00, 2000-04-09 00:00:00, 2000-04-10 00:00:00], dtype='object', length=101, calendar='{standard_or_gregorian}', freq='D')""", ), ], ) def test_cftimeindex_repr_formatting(periods, expected): """Test that cftimeindex.__repr__ is formatted similar to pd.Index.__repr__.""" index = xr.date_range(start="2000", periods=periods, freq="D", use_cftime=True) expected = dedent(expected) assert expected == repr(index) @requires_cftime @pytest.mark.parametrize("display_width", [40, 80, 100]) @pytest.mark.parametrize("periods", [2, 3, 4, 100, 101]) def test_cftimeindex_repr_formatting_width(periods, display_width): """Test that cftimeindex is sensitive to OPTIONS['display_width'].""" index = xr.date_range(start="2000", periods=periods, use_cftime=True) len_intro_str = len("CFTimeIndex(") with xr.set_options(display_width=display_width): repr_str = index.__repr__() splitted = repr_str.split("\n") for i, s in enumerate(splitted): # check that lines not longer than OPTIONS['display_width'] assert len(s) <= display_width, f"{len(s)} {s} {display_width}" if i > 0: # check for initial spaces assert s[:len_intro_str] == " " * len_intro_str @requires_cftime @pytest.mark.parametrize("periods", [22, 50, 100]) def test_cftimeindex_repr_101_shorter(periods): index_101 = xr.date_range(start="2000", periods=101, use_cftime=True) index_periods = xr.date_range(start="2000", periods=periods, use_cftime=True) index_101_repr_str = index_101.__repr__() index_periods_repr_str = index_periods.__repr__() assert len(index_101_repr_str) < len(index_periods_repr_str) @requires_cftime def test_parse_array_of_cftime_strings(): from cftime import DatetimeNoLeap strings = np.array([["2000-01-01", "2000-01-02"], ["2000-01-03", "2000-01-04"]]) expected = np.array( [ [DatetimeNoLeap(2000, 1, 1), DatetimeNoLeap(2000, 1, 2)], [DatetimeNoLeap(2000, 1, 3), DatetimeNoLeap(2000, 1, 4)], ] ) result = _parse_array_of_cftime_strings(strings, DatetimeNoLeap) np.testing.assert_array_equal(result, expected) # Test scalar array case strings = np.array("2000-01-01") expected = np.array(DatetimeNoLeap(2000, 1, 1)) result = _parse_array_of_cftime_strings(strings, DatetimeNoLeap) np.testing.assert_array_equal(result, expected) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_strftime_of_cftime_array(calendar): date_format = "%Y%m%d%H%M" cf_values = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) dt_values = pd.date_range("2000", periods=5) expected = pd.Index(dt_values.strftime(date_format)) result = cf_values.strftime(date_format) assert result.equals(expected) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) @pytest.mark.parametrize("unsafe", [False, True]) def test_to_datetimeindex(calendar, unsafe) -> None: index = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) expected = pd.date_range("2000", periods=5, unit="ns") if calendar in _NON_STANDARD_CALENDAR_NAMES and not unsafe: with pytest.warns(RuntimeWarning, match="non-standard"): result = index.to_datetimeindex(time_unit="ns") else: result = index.to_datetimeindex(unsafe=unsafe, time_unit="ns") assert result.equals(expected) np.testing.assert_array_equal(result, expected) assert isinstance(result, pd.DatetimeIndex) @requires_cftime def test_to_datetimeindex_future_warning() -> None: index = xr.date_range("2000", periods=5, use_cftime=True) expected = pd.date_range("2000", periods=5, unit="ns") with pytest.warns(FutureWarning, match="In a future version"): result = index.to_datetimeindex() assert result.equals(expected) assert result.dtype == expected.dtype @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_to_datetimeindex_out_of_range(calendar) -> None: index = xr.date_range("0001", periods=5, calendar=calendar, use_cftime=True) with pytest.raises(ValueError, match="0001"): index.to_datetimeindex(time_unit="ns") @requires_cftime @pytest.mark.parametrize("unsafe", [False, True]) def test_to_datetimeindex_gregorian_pre_reform(unsafe) -> None: index = xr.date_range("1582", periods=5, calendar="gregorian", use_cftime=True) if unsafe: result = index.to_datetimeindex(time_unit="us", unsafe=unsafe) else: with pytest.warns(RuntimeWarning, match="reform"): result = index.to_datetimeindex(time_unit="us", unsafe=unsafe) expected = pd.date_range("1582", periods=5, unit="us") assert result.equals(expected) assert result.dtype == expected.dtype @requires_cftime @pytest.mark.parametrize("calendar", ["all_leap", "360_day"]) def test_to_datetimeindex_feb_29(calendar) -> None: index = xr.date_range("2001-02-28", periods=2, calendar=calendar, use_cftime=True) with pytest.raises(ValueError, match="29"): index.to_datetimeindex(time_unit="ns") @pytest.mark.xfail(reason="fails on pandas main branch") @requires_cftime def test_multiindex(): index = xr.date_range( "2001-01-01", periods=100, calendar="360_day", use_cftime=True ) mindex = pd.MultiIndex.from_arrays([index]) assert mindex.get_loc("2001-01") == slice(0, 30) @requires_cftime @pytest.mark.parametrize("freq", ["3663s", "33min", "2h"]) @pytest.mark.parametrize("method", ["floor", "ceil", "round"]) def test_rounding_methods_against_datetimeindex(freq, method) -> None: # for now unit="us" seems good enough expected = pd.date_range("2000-01-02T01:03:51", periods=10, freq="1777s", unit="ns") expected = getattr(expected, method)(freq) result = xr.date_range( "2000-01-02T01:03:51", periods=10, freq="1777s", use_cftime=True ) result = getattr(result, method)(freq).to_datetimeindex(time_unit="ns") assert result.equals(expected) @requires_cftime @pytest.mark.parametrize("method", ["floor", "ceil", "round"]) def test_rounding_methods_empty_cftimindex(method): index = CFTimeIndex([]) result = getattr(index, method)("2s") expected = CFTimeIndex([]) assert result.equals(expected) assert result is not index @requires_cftime @pytest.mark.parametrize("method", ["floor", "ceil", "round"]) def test_rounding_methods_invalid_freq(method): index = xr.date_range( "2000-01-02T01:03:51", periods=10, freq="1777s", use_cftime=True ) with pytest.raises(ValueError, match="fixed"): getattr(index, method)("MS") @pytest.fixture def rounding_index(date_type): return xr.CFTimeIndex( [ date_type(1, 1, 1, 1, 59, 59, 999512), date_type(1, 1, 1, 3, 0, 1, 500001), date_type(1, 1, 1, 7, 0, 6, 499999), ] ) @requires_cftime def test_ceil(rounding_index, date_type): result = rounding_index.ceil("s") expected = xr.CFTimeIndex( [ date_type(1, 1, 1, 2, 0, 0, 0), date_type(1, 1, 1, 3, 0, 2, 0), date_type(1, 1, 1, 7, 0, 7, 0), ] ) assert result.equals(expected) @requires_cftime def test_floor(rounding_index, date_type): result = rounding_index.floor("s") expected = xr.CFTimeIndex( [ date_type(1, 1, 1, 1, 59, 59, 0), date_type(1, 1, 1, 3, 0, 1, 0), date_type(1, 1, 1, 7, 0, 6, 0), ] ) assert result.equals(expected) @requires_cftime def test_round(rounding_index, date_type): result = rounding_index.round("s") expected = xr.CFTimeIndex( [ date_type(1, 1, 1, 2, 0, 0, 0), date_type(1, 1, 1, 3, 0, 2, 0), date_type(1, 1, 1, 7, 0, 6, 0), ] ) assert result.equals(expected) @requires_cftime def test_asi8(date_type): index = xr.CFTimeIndex([date_type(1970, 1, 1), date_type(1970, 1, 2)]) result = index.asi8 expected = 1000000 * 86400 * np.array([0, 1]) np.testing.assert_array_equal(result, expected) @requires_cftime def test_asi8_distant_date(): """Test that asi8 conversion is truly exact.""" import cftime date_type = cftime.DatetimeProlepticGregorian index = xr.CFTimeIndex([date_type(10731, 4, 22, 3, 25, 45, 123456)]) result = index.asi8 expected = np.array([1000000 * 86400 * 400 * 8000 + 12345 * 1000000 + 123456]) np.testing.assert_array_equal(result, expected) @requires_cftime def test_asi8_empty_cftimeindex(): index = xr.CFTimeIndex([]) result = index.asi8 expected = np.array([], dtype=np.int64) np.testing.assert_array_equal(result, expected) @requires_cftime def test_infer_freq_valid_types(time_unit: PDDatetimeUnitOptions) -> None: cf_index = xr.date_range("2000-01-01", periods=3, freq="D", use_cftime=True) assert xr.infer_freq(cf_index) == "D" assert xr.infer_freq(xr.DataArray(cf_index)) == "D" pd_index = pd.date_range("2000-01-01", periods=3, freq="D").as_unit(time_unit) assert xr.infer_freq(pd_index) == "D" assert xr.infer_freq(xr.DataArray(pd_index)) == "D" pd_td_index = pd.timedelta_range(start="1D", periods=3, freq="D").as_unit(time_unit) assert xr.infer_freq(pd_td_index) == "D" assert xr.infer_freq(xr.DataArray(pd_td_index)) == "D" @requires_cftime def test_infer_freq_invalid_inputs(): # Non-datetime DataArray with pytest.raises(ValueError, match="must contain datetime-like objects"): xr.infer_freq(xr.DataArray([0, 1, 2])) index = xr.date_range("1990-02-03", periods=4, freq="MS", use_cftime=True) # 2D DataArray with pytest.raises(ValueError, match="must be 1D"): xr.infer_freq(xr.DataArray([index, index])) # CFTimeIndex too short with pytest.raises(ValueError, match="Need at least 3 dates to infer frequency"): xr.infer_freq(index[:2]) # Non-monotonic input assert xr.infer_freq(index[np.array([0, 2, 1, 3])]) is None # Non-unique input assert xr.infer_freq(index[np.array([0, 1, 1, 2])]) is None # No unique frequency (here 1st step is MS, second is 2MS) assert xr.infer_freq(index[np.array([0, 1, 3])]) is None # Same, but for QS index = xr.date_range("1990-02-03", periods=4, freq="QS", use_cftime=True) assert xr.infer_freq(index[np.array([0, 1, 3])]) is None @requires_cftime @pytest.mark.parametrize( "freq", [ "300YS-JAN", "YE-DEC", "YS-JUL", "2YS-FEB", "QE-NOV", "3QS-DEC", "MS", "4ME", "7D", "D", "30h", "5min", "40s", ], ) @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_infer_freq(freq, calendar): index = xr.date_range( "2000-01-01", periods=3, freq=freq, calendar=calendar, use_cftime=True ) out = xr.infer_freq(index) assert out == freq @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_pickle_cftimeindex(calendar): idx = xr.date_range( "2000-01-01", periods=3, freq="D", calendar=calendar, use_cftime=True ) idx_pkl = pickle.loads(pickle.dumps(idx)) assert (idx == idx_pkl).all() xarray-2025.12.0/xarray/tests/test_cftimeindex_resample.py000066400000000000000000000220021511464676000236070ustar00rootroot00000000000000from __future__ import annotations import datetime from typing import TypedDict import numpy as np import pandas as pd import pytest import xarray as xr from xarray.coding.cftime_offsets import ( CFTIME_TICKS, Day, _new_to_legacy_freq, to_offset, ) from xarray.coding.cftimeindex import CFTimeIndex from xarray.core.resample_cftime import CFTimeGrouper from xarray.tests import has_pandas_3 cftime = pytest.importorskip("cftime") # Create a list of pairs of similar-length initial and resample frequencies # that cover: # - Resampling from shorter to longer frequencies # - Resampling from longer to shorter frequencies # - Resampling from one initial frequency to another. # These are used to test the cftime version of resample against pandas # with a standard calendar. FREQS = [ ("8003D", "4001D"), ("8003D", "16006D"), ("8003D", "21YS"), ("6h", "3h"), ("6h", "12h"), ("6h", "400min"), ("3D", "D"), ("3D", "6D"), ("11D", "MS"), ("3MS", "MS"), ("3MS", "6MS"), ("3MS", "85D"), ("7ME", "3ME"), ("7ME", "14ME"), ("7ME", "2QS-APR"), ("43QS-AUG", "21QS-AUG"), ("43QS-AUG", "86QS-AUG"), ("43QS-AUG", "11YE-JUN"), ("11QE-JUN", "5QE-JUN"), ("11QE-JUN", "22QE-JUN"), ("11QE-JUN", "51MS"), ("3YS-MAR", "YS-MAR"), ("3YS-MAR", "6YS-MAR"), ("3YS-MAR", "14QE-FEB"), ("7YE-MAY", "3YE-MAY"), ("7YE-MAY", "14YE-MAY"), ("7YE-MAY", "85ME"), ] def has_tick_resample_freq(freqs): resample_freq, _ = freqs resample_freq_as_offset = to_offset(resample_freq) return isinstance(resample_freq_as_offset, CFTIME_TICKS) def has_non_tick_resample_freq(freqs): return not has_tick_resample_freq(freqs) FREQS_WITH_TICK_RESAMPLE_FREQ = list(filter(has_tick_resample_freq, FREQS)) FREQS_WITH_NON_TICK_RESAMPLE_FREQ = list(filter(has_non_tick_resample_freq, FREQS)) def compare_against_pandas( da_datetimeindex, da_cftimeindex, freq, closed=None, label=None, offset=None, origin=None, ) -> None: if isinstance(origin, tuple): origin_pandas = pd.Timestamp(datetime.datetime(*origin)) origin_cftime = cftime.DatetimeGregorian(*origin) else: origin_pandas = origin origin_cftime = origin try: result_datetimeindex = da_datetimeindex.resample( time=freq, closed=closed, label=label, offset=offset, origin=origin_pandas, ).mean() except ValueError: with pytest.raises(ValueError): da_cftimeindex.resample( time=freq, closed=closed, label=label, origin=origin_cftime, offset=offset, ).mean() else: result_cftimeindex = da_cftimeindex.resample( time=freq, closed=closed, label=label, origin=origin_cftime, offset=offset, ).mean() # TODO (benbovy - flexible indexes): update when CFTimeIndex is a xarray Index subclass result_cftimeindex["time"] = ( result_cftimeindex.xindexes["time"] .to_pandas_index() .to_datetimeindex(time_unit="ns") ) xr.testing.assert_identical(result_cftimeindex, result_datetimeindex) def da(index) -> xr.DataArray: return xr.DataArray( np.arange(100.0, 100.0 + index.size), coords=[index], dims=["time"] ) @pytest.mark.parametrize( "freqs", FREQS_WITH_TICK_RESAMPLE_FREQ, ids=lambda x: "{}->{}".format(*x) ) @pytest.mark.parametrize("closed", [None, "left", "right"]) @pytest.mark.parametrize("label", [None, "left", "right"]) @pytest.mark.parametrize("offset", [None, "5s"], ids=lambda x: f"{x}") def test_resample_with_tick_resample_freq(freqs, closed, label, offset) -> None: initial_freq, resample_freq = freqs start = "2000-01-01T12:07:01" origin = "start" datetime_index = pd.date_range( start=start, periods=5, freq=_new_to_legacy_freq(initial_freq) ) cftime_index = xr.date_range( start=start, periods=5, freq=initial_freq, use_cftime=True ) da_datetimeindex = da(datetime_index) da_cftimeindex = da(cftime_index) compare_against_pandas( da_datetimeindex, da_cftimeindex, resample_freq, closed=closed, label=label, offset=offset, origin=origin, ) @pytest.mark.parametrize( "freqs", FREQS_WITH_NON_TICK_RESAMPLE_FREQ, ids=lambda x: "{}->{}".format(*x) ) @pytest.mark.parametrize("closed", [None, "left", "right"]) @pytest.mark.parametrize("label", [None, "left", "right"]) def test_resample_with_non_tick_resample_freq(freqs, closed, label) -> None: initial_freq, resample_freq = freqs resample_freq_as_offset = to_offset(resample_freq) if isinstance(resample_freq_as_offset, Day) and not has_pandas_3: pytest.skip("Only valid for pandas >= 3.0") start = "2000-01-01T12:07:01" # Set offset and origin to their default values since they have no effect # on resampling data with a non-tick resample frequency. offset = None origin = "start_day" datetime_index = pd.date_range( start=start, periods=5, freq=_new_to_legacy_freq(initial_freq) ) cftime_index = xr.date_range( start=start, periods=5, freq=initial_freq, use_cftime=True ) da_datetimeindex = da(datetime_index) da_cftimeindex = da(cftime_index) compare_against_pandas( da_datetimeindex, da_cftimeindex, resample_freq, closed=closed, label=label, offset=offset, origin=origin, ) @pytest.mark.parametrize( ("freq", "expected"), [ ("s", "left"), ("min", "left"), ("h", "left"), ("D", "left"), ("ME", "right"), ("MS", "left"), ("QE", "right"), ("QS", "left"), ("YE", "right"), ("YS", "left"), ], ) def test_closed_label_defaults(freq, expected) -> None: assert CFTimeGrouper(freq=freq).closed == expected assert CFTimeGrouper(freq=freq).label == expected @pytest.mark.filterwarnings("ignore:Converting a CFTimeIndex") @pytest.mark.parametrize( "calendar", ["gregorian", "noleap", "all_leap", "360_day", "julian"] ) def test_calendars(calendar: str) -> None: # Limited testing for non-standard calendars freq, closed, label = "8001min", None, None xr_index = xr.date_range( start="2004-01-01T12:07:01", periods=7, freq="3D", calendar=calendar, use_cftime=True, ) pd_index = pd.date_range(start="2004-01-01T12:07:01", periods=7, freq="3D") da_cftime = da(xr_index).resample(time=freq, closed=closed, label=label).mean() da_datetime = da(pd_index).resample(time=freq, closed=closed, label=label).mean() # TODO (benbovy - flexible indexes): update when CFTimeIndex is a xarray Index subclass new_pd_index = da_cftime.xindexes["time"].to_pandas_index() assert isinstance(new_pd_index, CFTimeIndex) # shouldn't that be a pd.Index? da_cftime["time"] = new_pd_index.to_datetimeindex(time_unit="ns") xr.testing.assert_identical(da_cftime, da_datetime) class DateRangeKwargs(TypedDict): start: str periods: int freq: str @pytest.mark.parametrize("closed", ["left", "right"]) @pytest.mark.parametrize( "origin", ["start_day", "start", "end", "end_day", "epoch", (1970, 1, 1, 3, 2)], ids=lambda x: f"{x}", ) def test_origin(closed, origin) -> None: initial_freq, resample_freq = ("3h", "9h") start = "1969-12-31T12:07:01" index_kwargs: DateRangeKwargs = dict(start=start, periods=12, freq=initial_freq) datetime_index = pd.date_range(**index_kwargs) cftime_index = xr.date_range(**index_kwargs, use_cftime=True) da_datetimeindex = da(datetime_index) da_cftimeindex = da(cftime_index) compare_against_pandas( da_datetimeindex, da_cftimeindex, resample_freq, closed=closed, origin=origin, ) @pytest.mark.parametrize("offset", ["foo", "5MS", 10]) def test_invalid_offset_error(offset: str | int) -> None: cftime_index = xr.date_range("2000", periods=5, use_cftime=True) da_cftime = da(cftime_index) with pytest.raises(ValueError, match="offset must be"): da_cftime.resample(time="2h", offset=offset) # type: ignore[arg-type] def test_timedelta_offset() -> None: timedelta = datetime.timedelta(seconds=5) string = "5s" cftime_index = xr.date_range("2000", periods=5, use_cftime=True) da_cftime = da(cftime_index) timedelta_result = da_cftime.resample(time="2h", offset=timedelta).mean() string_result = da_cftime.resample(time="2h", offset=string).mean() xr.testing.assert_identical(timedelta_result, string_result) @pytest.mark.parametrize(("option", "value"), [("offset", "5s"), ("origin", "start")]) def test_non_tick_option_warning(option, value) -> None: cftime_index = xr.date_range("2000", periods=5, use_cftime=True) da_cftime = da(cftime_index) kwargs = {option: value} with pytest.warns(RuntimeWarning, match=option): da_cftime.resample(time="ME", **kwargs) xarray-2025.12.0/xarray/tests/test_coarsen.py000066400000000000000000000271451511464676000210670ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pandas as pd import pytest import xarray as xr from xarray import DataArray, Dataset, set_options from xarray.core import duck_array_ops from xarray.tests import ( assert_allclose, assert_equal, assert_identical, has_dask, raise_if_dask_computes, requires_cftime, ) def test_coarsen_absent_dims_error(ds: Dataset) -> None: with pytest.raises( ValueError, match=r"Window dimensions \('foo',\) not found in Dataset dimensions", ): ds.coarsen(foo=2) @pytest.mark.parametrize("dask", [True, False]) @pytest.mark.parametrize(("boundary", "side"), [("trim", "left"), ("pad", "right")]) def test_coarsen_dataset(ds, dask, boundary, side): if dask and has_dask: ds = ds.chunk({"x": 4}) actual = ds.coarsen(time=2, x=3, boundary=boundary, side=side).max() assert_equal( actual["z1"], ds["z1"].coarsen(x=3, boundary=boundary, side=side).max() ) # coordinate should be mean by default assert_equal( actual["time"], ds["time"].coarsen(time=2, boundary=boundary, side=side).mean() ) @pytest.mark.parametrize("dask", [True, False]) def test_coarsen_coords(ds, dask): if dask and has_dask: ds = ds.chunk({"x": 4}) # check if coord_func works actual = ds.coarsen(time=2, x=3, boundary="trim", coord_func={"time": "max"}).max() assert_equal(actual["z1"], ds["z1"].coarsen(x=3, boundary="trim").max()) assert_equal(actual["time"], ds["time"].coarsen(time=2, boundary="trim").max()) # raise if exact with pytest.raises(ValueError): ds.coarsen(x=3).mean() # should be no error ds.isel(x=slice(0, 3 * (len(ds["x"]) // 3))).coarsen(x=3).mean() # working test with pd.time da = xr.DataArray( np.linspace(0, 365, num=364), dims="time", coords={"time": pd.date_range("1999-12-15", periods=364)}, ) actual = da.coarsen(time=2).mean() # type: ignore[attr-defined] @requires_cftime def test_coarsen_coords_cftime(): times = xr.date_range("2000", periods=6, use_cftime=True) da = xr.DataArray(range(6), [("time", times)]) actual = da.coarsen(time=3).mean() # type: ignore[attr-defined] expected_times = xr.date_range("2000-01-02", freq="3D", periods=2, use_cftime=True) np.testing.assert_array_equal(actual.time, expected_times) @pytest.mark.parametrize( "funcname, argument", [ ("reduce", (np.mean,)), ("mean", ()), ], ) def test_coarsen_keep_attrs(funcname, argument) -> None: global_attrs = {"units": "test", "long_name": "testing"} da_attrs = {"da_attr": "test"} attrs_coords = {"attrs_coords": "test"} da_not_coarsend_attrs = {"da_not_coarsend_attr": "test"} data = np.linspace(10, 15, 100) coords = np.linspace(1, 10, 100) ds = Dataset( data_vars={ "da": ("coord", data, da_attrs), "da_not_coarsend": ("no_coord", data, da_not_coarsend_attrs), }, coords={"coord": ("coord", coords, attrs_coords)}, attrs=global_attrs, ) # attrs are kept by default func = getattr(ds.coarsen(dim={"coord": 5}), funcname) result = func(*argument) assert result.attrs == global_attrs assert result.da.attrs == da_attrs assert result.da_not_coarsend.attrs == da_not_coarsend_attrs assert result.coord.attrs == attrs_coords assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" # discard attrs func = getattr(ds.coarsen(dim={"coord": 5}), funcname) result = func(*argument, keep_attrs=False) assert result.attrs == {} assert result.da.attrs == {} assert result.da_not_coarsend.attrs == {} assert result.coord.attrs == {} assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" # test discard attrs using global option func = getattr(ds.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument) assert result.attrs == {} assert result.da.attrs == {} assert result.da_not_coarsend.attrs == {} assert result.coord.attrs == {} assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" # keyword takes precedence over global option func = getattr(ds.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument, keep_attrs=True) assert result.attrs == global_attrs assert result.da.attrs == da_attrs assert result.da_not_coarsend.attrs == da_not_coarsend_attrs assert result.coord.attrs == attrs_coords assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" func = getattr(ds.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=True): result = func(*argument, keep_attrs=False) assert result.attrs == {} assert result.da.attrs == {} assert result.da_not_coarsend.attrs == {} assert result.coord.attrs == {} assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" @pytest.mark.slow @pytest.mark.parametrize("ds", (1, 2), indirect=True) @pytest.mark.parametrize("window", (1, 2, 3, 4)) @pytest.mark.parametrize("name", ("sum", "mean", "std", "var", "min", "max", "median")) def test_coarsen_reduce(ds: Dataset, window, name) -> None: # Use boundary="trim" to accommodate all window sizes used in tests coarsen_obj = ds.coarsen(time=window, boundary="trim") # add nan prefix to numpy methods to get similar behavior as bottleneck actual = coarsen_obj.reduce(getattr(np, f"nan{name}")) expected = getattr(coarsen_obj, name)() assert_allclose(actual, expected) # make sure the order of data_var are not changed. assert list(ds.data_vars.keys()) == list(actual.data_vars.keys()) # Make sure the dimension order is restored for key, src_var in ds.data_vars.items(): assert src_var.dims == actual[key].dims @pytest.mark.parametrize( "funcname, argument", [ ("reduce", (np.mean,)), ("mean", ()), ], ) def test_coarsen_da_keep_attrs(funcname, argument) -> None: attrs_da = {"da_attr": "test"} attrs_coords = {"attrs_coords": "test"} data = np.linspace(10, 15, 100) coords = np.linspace(1, 10, 100) da = DataArray( data, dims=("coord"), coords={"coord": ("coord", coords, attrs_coords)}, attrs=attrs_da, name="name", ) # attrs are kept by default func = getattr(da.coarsen(dim={"coord": 5}), funcname) result = func(*argument) assert result.attrs == attrs_da assert da.coord.attrs == attrs_coords assert result.name == "name" # discard attrs func = getattr(da.coarsen(dim={"coord": 5}), funcname) result = func(*argument, keep_attrs=False) assert result.attrs == {} # XXX: no assert? _ = da.coord.attrs == {} assert result.name == "name" # test discard attrs using global option func = getattr(da.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument) assert result.attrs == {} # XXX: no assert? _ = da.coord.attrs == {} assert result.name == "name" # keyword takes precedence over global option func = getattr(da.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument, keep_attrs=True) assert result.attrs == attrs_da # XXX: no assert? _ = da.coord.attrs == {} assert result.name == "name" func = getattr(da.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=True): result = func(*argument, keep_attrs=False) assert result.attrs == {} # XXX: no assert? _ = da.coord.attrs == {} assert result.name == "name" @pytest.mark.parametrize("da", (1, 2), indirect=True) @pytest.mark.parametrize("window", (1, 2, 3, 4)) @pytest.mark.parametrize("name", ("sum", "mean", "std", "max")) def test_coarsen_da_reduce(da, window, name) -> None: if da.isnull().sum() > 1 and window == 1: pytest.skip("These parameters lead to all-NaN slices") # Use boundary="trim" to accommodate all window sizes used in tests coarsen_obj = da.coarsen(time=window, boundary="trim") # add nan prefix to numpy methods to get similar # behavior as bottleneck actual = coarsen_obj.reduce(getattr(np, f"nan{name}")) expected = getattr(coarsen_obj, name)() assert_allclose(actual, expected) class TestCoarsenConstruct: @pytest.mark.parametrize("dask", [True, False]) def test_coarsen_construct(self, dask: bool) -> None: ds = Dataset( { "vart": ("time", np.arange(48), {"a": "b"}), "varx": ("x", np.arange(10), {"a": "b"}), "vartx": (("x", "time"), np.arange(480).reshape(10, 48), {"a": "b"}), "vary": ("y", np.arange(12)), }, coords={"time": np.arange(48), "y": np.arange(12)}, attrs={"foo": "bar"}, ) if dask and has_dask: ds = ds.chunk({"x": 4, "time": 10}) expected = xr.Dataset(attrs={"foo": "bar"}) expected["vart"] = ( ("year", "month"), duck_array_ops.reshape(ds.vart.data, (-1, 12)), {"a": "b"}, ) expected["varx"] = ( ("x", "x_reshaped"), duck_array_ops.reshape(ds.varx.data, (-1, 5)), {"a": "b"}, ) expected["vartx"] = ( ("x", "x_reshaped", "year", "month"), duck_array_ops.reshape(ds.vartx.data, (2, 5, 4, 12)), {"a": "b"}, ) expected["vary"] = ds.vary expected.coords["time"] = ( ("year", "month"), duck_array_ops.reshape(ds.time.data, (-1, 12)), ) with raise_if_dask_computes(): actual = ds.coarsen(time=12, x=5).construct( {"time": ("year", "month"), "x": ("x", "x_reshaped")} ) assert_identical(actual, expected) with raise_if_dask_computes(): actual = ds.coarsen(time=12, x=5).construct( time=("year", "month"), x=("x", "x_reshaped") ) assert_identical(actual, expected) with raise_if_dask_computes(): actual = ds.coarsen(time=12, x=5).construct( {"time": ("year", "month"), "x": ("x", "x_reshaped")}, keep_attrs=False ) for var in actual: assert actual[var].attrs == {} assert actual.attrs == {} with raise_if_dask_computes(): actual = ds.vartx.coarsen(time=12, x=5).construct( {"time": ("year", "month"), "x": ("x", "x_reshaped")} ) assert_identical(actual, expected["vartx"]) with pytest.raises(ValueError): ds.coarsen(time=12).construct(foo="bar") with pytest.raises(ValueError): ds.coarsen(time=12, x=2).construct(time=("year", "month")) with pytest.raises(ValueError): ds.coarsen(time=12).construct() with pytest.raises(ValueError): ds.coarsen(time=12).construct(time="bar") with pytest.raises(ValueError): ds.coarsen(time=12).construct(time=("bar",)) def test_coarsen_construct_keeps_all_coords(self): da = xr.DataArray(np.arange(24), dims=["time"]) da = da.assign_coords(day=365 * da) result = da.coarsen(time=12).construct(time=("year", "month")) assert list(da.coords) == list(result.coords) ds = da.to_dataset(name="T") ds_result = ds.coarsen(time=12).construct(time=("year", "month")) assert list(da.coords) == list(ds_result.coords) xarray-2025.12.0/xarray/tests/test_coding.py000066400000000000000000000120661511464676000206740ustar00rootroot00000000000000from __future__ import annotations from contextlib import suppress import numpy as np import pandas as pd import pytest import xarray as xr from xarray.coding import variables from xarray.conventions import decode_cf_variable, encode_cf_variable from xarray.tests import assert_allclose, assert_equal, assert_identical, requires_dask with suppress(ImportError): import dask.array as da def test_CFMaskCoder_decode() -> None: original = xr.Variable(("x",), [0, -1, 1], {"_FillValue": -1}) expected = xr.Variable(("x",), [0, np.nan, 1]) coder = variables.CFMaskCoder() encoded = coder.decode(original) assert_identical(expected, encoded) encoding_with_dtype = { "dtype": np.dtype("float64"), "_FillValue": np.float32(1e20), "missing_value": np.float64(1e20), } encoding_without_dtype = { "_FillValue": np.float32(1e20), "missing_value": np.float64(1e20), } CFMASKCODER_ENCODE_DTYPE_CONFLICT_TESTS = { "numeric-with-dtype": ([0.0, -1.0, 1.0], encoding_with_dtype), "numeric-without-dtype": ([0.0, -1.0, 1.0], encoding_without_dtype), "times-with-dtype": (pd.date_range("2000", periods=3), encoding_with_dtype), } @pytest.mark.parametrize( ("data", "encoding"), CFMASKCODER_ENCODE_DTYPE_CONFLICT_TESTS.values(), ids=list(CFMASKCODER_ENCODE_DTYPE_CONFLICT_TESTS.keys()), ) def test_CFMaskCoder_encode_missing_fill_values_conflict(data, encoding) -> None: original = xr.Variable(("x",), data, encoding=encoding) encoded = encode_cf_variable(original) assert encoded.dtype == encoded.attrs["missing_value"].dtype assert encoded.dtype == encoded.attrs["_FillValue"].dtype roundtripped = decode_cf_variable("foo", encoded) assert_identical(roundtripped, original) def test_CFMaskCoder_missing_value() -> None: expected = xr.DataArray( np.array([[26915, 27755, -9999, 27705], [25595, -9999, 28315, -9999]]), dims=["npts", "ntimes"], name="tmpk", ) expected.attrs["missing_value"] = -9999 decoded = xr.decode_cf(expected.to_dataset()) encoded, _ = xr.conventions.cf_encoder(decoded.variables, decoded.attrs) assert_equal(encoded["tmpk"], expected.variable) decoded.tmpk.encoding["_FillValue"] = -9940 with pytest.raises(ValueError): encoded, _ = xr.conventions.cf_encoder(decoded.variables, decoded.attrs) @requires_dask def test_CFMaskCoder_decode_dask() -> None: original = xr.Variable(("x",), [0, -1, 1], {"_FillValue": -1}).chunk() expected = xr.Variable(("x",), [0, np.nan, 1]) coder = variables.CFMaskCoder() encoded = coder.decode(original) assert isinstance(encoded.data, da.Array) assert_identical(expected, encoded) # TODO(shoyer): port other fill-value tests # TODO(shoyer): parameterize when we have more coders def test_coder_roundtrip() -> None: original = xr.Variable(("x",), [0.0, np.nan, 1.0]) coder = variables.CFMaskCoder() roundtripped = coder.decode(coder.encode(original)) assert_identical(original, roundtripped) @pytest.mark.parametrize("dtype", ["u1", "u2", "i1", "i2", "f2", "f4"]) @pytest.mark.parametrize("dtype2", ["f4", "f8"]) def test_scaling_converts_to_float(dtype: str, dtype2: str) -> None: dt = np.dtype(dtype2) original = xr.Variable( ("x",), np.arange(10, dtype=dtype), encoding=dict(scale_factor=dt.type(10)) ) coder = variables.CFScaleOffsetCoder() encoded = coder.encode(original) assert encoded.dtype == dt roundtripped = coder.decode(encoded) assert_identical(original, roundtripped) assert roundtripped.dtype == dt @pytest.mark.parametrize("scale_factor", (10, [10])) @pytest.mark.parametrize("add_offset", (0.1, [0.1])) def test_scaling_offset_as_list(scale_factor, add_offset) -> None: # test for #4631 encoding = dict(scale_factor=scale_factor, add_offset=add_offset) original = xr.Variable(("x",), np.arange(10.0), encoding=encoding) coder = variables.CFScaleOffsetCoder() encoded = coder.encode(original) roundtripped = coder.decode(encoded) assert_allclose(original, roundtripped) @pytest.mark.parametrize("bits", [1, 2, 4, 8]) def test_decode_unsigned_from_signed(bits) -> None: unsigned_dtype = np.dtype(f"u{bits}") signed_dtype = np.dtype(f"i{bits}") original_values = np.array([np.iinfo(unsigned_dtype).max], dtype=unsigned_dtype) encoded = xr.Variable( ("x",), original_values.astype(signed_dtype), attrs={"_Unsigned": "true"} ) coder = variables.CFMaskCoder() decoded = coder.decode(encoded) assert decoded.dtype == unsigned_dtype assert decoded.values == original_values @pytest.mark.parametrize("bits", [1, 2, 4, 8]) def test_decode_signed_from_unsigned(bits) -> None: unsigned_dtype = np.dtype(f"u{bits}") signed_dtype = np.dtype(f"i{bits}") original_values = np.array([-1], dtype=signed_dtype) encoded = xr.Variable( ("x",), original_values.astype(unsigned_dtype), attrs={"_Unsigned": "false"} ) coder = variables.CFMaskCoder() decoded = coder.decode(encoded) assert decoded.dtype == signed_dtype assert decoded.values == original_values xarray-2025.12.0/xarray/tests/test_coding_strings.py000066400000000000000000000224101511464676000224370ustar00rootroot00000000000000from __future__ import annotations from contextlib import suppress import numpy as np import pytest from xarray import Variable from xarray.coding import strings from xarray.core import indexing from xarray.tests import ( IndexerMaker, assert_array_equal, assert_identical, requires_dask, ) with suppress(ImportError): import dask.array as da def test_vlen_dtype() -> None: dtype = strings.create_vlen_dtype(str) assert dtype.metadata["element_type"] is str assert strings.is_unicode_dtype(dtype) assert not strings.is_bytes_dtype(dtype) assert strings.check_vlen_dtype(dtype) is str dtype = strings.create_vlen_dtype(bytes) assert dtype.metadata["element_type"] is bytes assert not strings.is_unicode_dtype(dtype) assert strings.is_bytes_dtype(dtype) assert strings.check_vlen_dtype(dtype) is bytes # check h5py variant ("vlen") dtype = np.dtype("O", metadata={"vlen": str}) # type: ignore[call-overload,unused-ignore] assert strings.check_vlen_dtype(dtype) is str assert strings.check_vlen_dtype(np.dtype(object)) is None @pytest.mark.parametrize("numpy_str_type", (np.str_, np.bytes_)) def test_numpy_subclass_handling(numpy_str_type) -> None: with pytest.raises(TypeError, match="unsupported type for vlen_dtype"): strings.create_vlen_dtype(numpy_str_type) def test_EncodedStringCoder_decode() -> None: coder = strings.EncodedStringCoder() raw_data = np.array([b"abc", "รŸโˆ‚ยตโˆ†".encode()]) raw = Variable(("x",), raw_data, {"_Encoding": "utf-8"}) actual = coder.decode(raw) expected = Variable(("x",), np.array(["abc", "รŸโˆ‚ยตโˆ†"], dtype=object)) assert_identical(actual, expected) assert_identical(coder.decode(actual[0]), expected[0]) @requires_dask def test_EncodedStringCoder_decode_dask() -> None: coder = strings.EncodedStringCoder() raw_data = np.array([b"abc", "รŸโˆ‚ยตโˆ†".encode()]) raw = Variable(("x",), raw_data, {"_Encoding": "utf-8"}).chunk() actual = coder.decode(raw) assert isinstance(actual.data, da.Array) expected = Variable(("x",), np.array(["abc", "รŸโˆ‚ยตโˆ†"], dtype=object)) assert_identical(actual, expected) actual_indexed = coder.decode(actual[0]) assert isinstance(actual_indexed.data, da.Array) assert_identical(actual_indexed, expected[0]) def test_EncodedStringCoder_encode() -> None: dtype = strings.create_vlen_dtype(str) raw_data = np.array(["abc", "รŸโˆ‚ยตโˆ†"], dtype=dtype) expected_data = np.array([r.encode("utf-8") for r in raw_data], dtype=object) coder = strings.EncodedStringCoder(allows_unicode=True) raw = Variable(("x",), raw_data, encoding={"dtype": "S1"}) actual = coder.encode(raw) expected = Variable(("x",), expected_data, attrs={"_Encoding": "utf-8"}) assert_identical(actual, expected) raw = Variable(("x",), raw_data) assert_identical(coder.encode(raw), raw) coder = strings.EncodedStringCoder(allows_unicode=False) assert_identical(coder.encode(raw), expected) @pytest.mark.parametrize( "original", [ Variable(("x",), [b"ab", b"cdef"]), Variable((), b"ab"), Variable(("x",), [b"a", b"b"]), Variable((), b"a"), ], ) def test_CharacterArrayCoder_roundtrip(original) -> None: coder = strings.CharacterArrayCoder() roundtripped = coder.decode(coder.encode(original)) assert_identical(original, roundtripped) @pytest.mark.parametrize( "data", [ np.array([b"a", b"bc"]), np.array([b"a", b"bc"], dtype=strings.create_vlen_dtype(bytes)), ], ) def test_CharacterArrayCoder_encode(data) -> None: coder = strings.CharacterArrayCoder() raw = Variable(("x",), data) actual = coder.encode(raw) expected = Variable(("x", "string2"), np.array([[b"a", b""], [b"b", b"c"]])) assert_identical(actual, expected) @pytest.mark.parametrize( ["original", "expected_char_dim_name"], [ (Variable(("x",), [b"ab", b"cdef"]), "string4"), (Variable(("x",), [b"ab", b"cdef"], encoding={"char_dim_name": "foo"}), "foo"), ], ) def test_CharacterArrayCoder_char_dim_name(original, expected_char_dim_name) -> None: coder = strings.CharacterArrayCoder() encoded = coder.encode(original) roundtripped = coder.decode(encoded) assert encoded.dims[-1] == expected_char_dim_name assert roundtripped.encoding["char_dim_name"] == expected_char_dim_name assert roundtripped.dims[-1] == original.dims[-1] @pytest.mark.parametrize( [ "original", "expected_char_dim_name", "expected_char_dim_length", "warning_message", ], [ ( Variable(("x",), [b"ab", b"cde"], encoding={"char_dim_name": "foo4"}), "foo3", 3, "String dimension naming mismatch", ), ( Variable( ("x",), [b"ab", b"cde"], encoding={"original_shape": (2, 4), "char_dim_name": "foo"}, ), "foo3", 3, "String dimension length mismatch", ), ], ) def test_CharacterArrayCoder_dim_mismatch_warnings( original, expected_char_dim_name, expected_char_dim_length, warning_message ) -> None: coder = strings.CharacterArrayCoder() with pytest.warns(UserWarning, match=warning_message): encoded = coder.encode(original) roundtripped = coder.decode(encoded) assert encoded.dims[-1] == expected_char_dim_name assert encoded.sizes[expected_char_dim_name] == expected_char_dim_length assert roundtripped.encoding["char_dim_name"] == expected_char_dim_name assert roundtripped.dims[-1] == original.dims[-1] def test_StackedBytesArray() -> None: array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]], dtype="S") actual = strings.StackedBytesArray(array) expected = np.array([b"abc", b"def"], dtype="S") assert actual.dtype == expected.dtype assert actual.shape == expected.shape assert actual.size == expected.size assert actual.ndim == expected.ndim assert len(actual) == len(expected) assert_array_equal(expected, actual) B = IndexerMaker(indexing.BasicIndexer) assert_array_equal(expected[:1], actual[B[:1]]) with pytest.raises(IndexError): actual[B[:, :2]] def test_StackedBytesArray_scalar() -> None: array = np.array([b"a", b"b", b"c"], dtype="S") actual = strings.StackedBytesArray(array) expected = np.array(b"abc") assert actual.dtype == expected.dtype assert actual.shape == expected.shape assert actual.size == expected.size assert actual.ndim == expected.ndim with pytest.raises(TypeError): len(actual) np.testing.assert_array_equal(expected, actual) B = IndexerMaker(indexing.BasicIndexer) with pytest.raises(IndexError): actual[B[:2]] def test_StackedBytesArray_vectorized_indexing() -> None: array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]], dtype="S") stacked = strings.StackedBytesArray(array) expected = np.array([[b"abc", b"def"], [b"def", b"abc"]]) V = IndexerMaker(indexing.VectorizedIndexer) indexer = V[np.array([[0, 1], [1, 0]])] actual = stacked.vindex[indexer] assert_array_equal(actual, expected) def test_char_to_bytes() -> None: array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]]) expected = np.array([b"abc", b"def"]) actual = strings.char_to_bytes(array) assert_array_equal(actual, expected) expected = np.array([b"ad", b"be", b"cf"]) actual = strings.char_to_bytes(array.T) # non-contiguous assert_array_equal(actual, expected) def test_char_to_bytes_ndim_zero() -> None: expected = np.array(b"a") actual = strings.char_to_bytes(expected) assert_array_equal(actual, expected) def test_char_to_bytes_size_zero() -> None: array = np.zeros((3, 0), dtype="S1") expected = np.array([b"", b"", b""]) actual = strings.char_to_bytes(array) assert_array_equal(actual, expected) @requires_dask def test_char_to_bytes_dask() -> None: numpy_array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]]) array = da.from_array(numpy_array, ((2,), (3,))) expected = np.array([b"abc", b"def"]) actual = strings.char_to_bytes(array) assert isinstance(actual, da.Array) assert actual.chunks == ((2,),) assert actual.dtype == "S3" assert_array_equal(np.array(actual), expected) with pytest.raises(ValueError, match=r"stacked dask character array"): strings.char_to_bytes(array.rechunk(1)) def test_bytes_to_char() -> None: array = np.array([[b"ab", b"cd"], [b"ef", b"gh"]]) expected = np.array([[[b"a", b"b"], [b"c", b"d"]], [[b"e", b"f"], [b"g", b"h"]]]) actual = strings.bytes_to_char(array) assert_array_equal(actual, expected) expected = np.array([[[b"a", b"b"], [b"e", b"f"]], [[b"c", b"d"], [b"g", b"h"]]]) actual = strings.bytes_to_char(array.T) # non-contiguous assert_array_equal(actual, expected) @requires_dask def test_bytes_to_char_dask() -> None: numpy_array = np.array([b"ab", b"cd"]) array = da.from_array(numpy_array, ((1, 1),)) expected = np.array([[b"a", b"b"], [b"c", b"d"]]) actual = strings.bytes_to_char(array) assert isinstance(actual, da.Array) assert actual.chunks == ((1, 1), ((2,))) assert actual.dtype == "S1" assert_array_equal(np.array(actual), expected) xarray-2025.12.0/xarray/tests/test_coding_times.py000066400000000000000000002374761511464676000221130ustar00rootroot00000000000000from __future__ import annotations import warnings from datetime import datetime, timedelta from itertools import product, starmap from typing import Literal import numpy as np import pandas as pd import pytest from pandas.errors import OutOfBoundsDatetime, OutOfBoundsTimedelta from xarray import ( DataArray, Dataset, Variable, conventions, date_range, decode_cf, ) from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.coding.times import ( _encode_datetime_with_cftime, _netcdf_to_numpy_timeunit, _numpy_to_netcdf_timeunit, _should_cftime_be_used, cftime_to_nptime, decode_cf_datetime, decode_cf_timedelta, encode_cf_datetime, encode_cf_timedelta, format_cftime_datetime, infer_datetime_units, infer_timedelta_units, ) from xarray.coding.variables import SerializationWarning from xarray.conventions import _update_bounds_attributes, cf_encoder from xarray.core.common import contains_cftime_datetimes from xarray.core.types import PDDatetimeUnitOptions from xarray.core.utils import is_duck_dask_array from xarray.testing import assert_equal, assert_identical from xarray.tests import ( _ALL_CALENDARS, _NON_STANDARD_CALENDARS, _STANDARD_CALENDAR_NAMES, _STANDARD_CALENDARS, DuckArrayWrapper, FirstElementAccessibleArray, _all_cftime_date_types, arm_xfail, assert_array_equal, assert_duckarray_allclose, assert_duckarray_equal, assert_no_warnings, has_cftime, requires_cftime, requires_dask, ) _CF_DATETIME_NUM_DATES_UNITS = [ (np.arange(10), "days since 2000-01-01", "s"), (np.arange(10).astype("float64"), "days since 2000-01-01", "s"), (np.arange(10).astype("float32"), "days since 2000-01-01", "s"), (np.arange(10).reshape(2, 5), "days since 2000-01-01", "s"), (12300 + np.arange(5), "hours since 1680-01-01 00:00:00", "s"), # here we add a couple minor formatting errors to test # the robustness of the parsing algorithm. (12300 + np.arange(5), "hour since 1680-01-01 00:00:00", "s"), (12300 + np.arange(5), "Hour since 1680-01-01 00:00:00", "s"), (12300 + np.arange(5), " Hour since 1680-01-01 00:00:00 ", "s"), (10, "days since 2000-01-01", "s"), ([10], "daYs since 2000-01-01", "s"), ([[10]], "days since 2000-01-01", "s"), ([10, 10], "days since 2000-01-01", "s"), (np.array(10), "days since 2000-01-01", "s"), (0, "days since 1000-01-01", "s"), ([0], "days since 1000-01-01", "s"), ([[0]], "days since 1000-01-01", "s"), (np.arange(2), "days since 1000-01-01", "s"), (np.arange(0, 100000, 20000), "days since 1900-01-01", "s"), (np.arange(0, 100000, 20000), "days since 1-01-01", "s"), (17093352.0, "hours since 1-1-1 00:00:0.0", "s"), ([0.5, 1.5], "hours since 1900-01-01T00:00:00", "s"), (0, "milliseconds since 2000-01-01T00:00:00", "s"), (0, "microseconds since 2000-01-01T00:00:00", "s"), (np.int32(788961600), "seconds since 1981-01-01", "s"), # GH2002 (12300 + np.arange(5), "hour since 1680-01-01 00:00:00.500000", "us"), (164375, "days since 1850-01-01 00:00:00", "s"), (164374.5, "days since 1850-01-01 00:00:00", "s"), ([164374.5, 168360.5], "days since 1850-01-01 00:00:00", "s"), ] _CF_DATETIME_TESTS = [ num_dates_units + (calendar,) for num_dates_units, calendar in product( _CF_DATETIME_NUM_DATES_UNITS, _STANDARD_CALENDAR_NAMES ) ] @requires_cftime @pytest.mark.filterwarnings("ignore:Ambiguous reference date string") @pytest.mark.filterwarnings("ignore:Times can't be serialized faithfully") @pytest.mark.parametrize( ["num_dates", "units", "minimum_resolution", "calendar"], _CF_DATETIME_TESTS ) def test_cf_datetime( num_dates, units: str, minimum_resolution: PDDatetimeUnitOptions, calendar: str, time_unit: PDDatetimeUnitOptions, ) -> None: import cftime expected = cftime.num2date( num_dates, units, calendar, only_use_cftime_datetimes=True ) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime(num_dates, units, calendar, time_unit=time_unit) if actual.dtype.kind != "O": if np.timedelta64(1, time_unit) > np.timedelta64(1, minimum_resolution): expected_unit = minimum_resolution else: expected_unit = time_unit expected = cftime_to_nptime(expected, time_unit=expected_unit) assert_array_equal(actual, expected) encoded1, _, _ = encode_cf_datetime(actual, units, calendar) assert_array_equal(num_dates, encoded1) if hasattr(num_dates, "ndim") and num_dates.ndim == 1 and "1000" not in units: # verify that wrapping with a pandas.Index works # note that it *does not* currently work to put # non-datetime64 compatible dates into a pandas.Index encoded2, _, _ = encode_cf_datetime(pd.Index(actual), units, calendar) assert_array_equal(num_dates, encoded2) @requires_cftime def test_decode_cf_datetime_overflow(time_unit: PDDatetimeUnitOptions) -> None: # checks for # https://github.com/pydata/pandas/issues/14068 # https://github.com/pydata/xarray/issues/975 from cftime import DatetimeGregorian datetime = DatetimeGregorian units = "days since 2000-01-01 00:00:00" # date after 2262 and before 1678 days = (-117710, 95795) expected = (datetime(1677, 9, 20), datetime(2262, 4, 12)) for i, day in enumerate(days): with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") result = decode_cf_datetime( day, units, calendar="standard", time_unit=time_unit ) assert result == expected[i] # additional check to see if type/dtypes are correct if time_unit == "ns": assert isinstance(result.item(), datetime) else: assert result.dtype == np.dtype(f"=M8[{time_unit}]") def test_decode_cf_datetime_non_standard_units() -> None: expected = pd.date_range(periods=100, start="1970-01-01", freq="h") # netCDFs from madis.noaa.gov use this format for their time units # they cannot be parsed by cftime, but pd.Timestamp works units = "hours since 1-1-1970" actual = decode_cf_datetime(np.arange(100), units) assert_array_equal(actual, expected) @requires_cftime def test_decode_cf_datetime_non_iso_strings() -> None: # datetime strings that are _almost_ ISO compliant but not quite, # but which cftime.num2date can still parse correctly expected = pd.date_range(periods=100, start="2000-01-01", freq="h") cases = [ (np.arange(100), "hours since 2000-01-01 0"), (np.arange(100), "hours since 2000-1-1 0"), (np.arange(100), "hours since 2000-01-01 0:00"), ] for num_dates, units in cases: actual = decode_cf_datetime(num_dates, units) assert_array_equal(actual, expected) @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_decode_standard_calendar_inside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions ) -> None: import cftime units = "hours since 0001-01-01" times = pd.date_range( "2001-04-01-00", end="2001-04-30-23", unit=time_unit, freq="h" ) # to_pydatetime() will return microsecond time = cftime.date2num(times.to_pydatetime(), units, calendar=calendar) expected = times.values # for cftime we get "us" resolution # ns resolution is handled by cftime due to the reference date # being out of bounds, but the times themselves are # representable with nanosecond resolution. actual = decode_cf_datetime(time, units, calendar=calendar, time_unit=time_unit) assert actual.dtype == np.dtype(f"=M8[{time_unit}]") assert_array_equal(actual, expected) @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) def test_decode_non_standard_calendar_inside_timestamp_range(calendar) -> None: import cftime units = "days since 0001-01-01" times = pd.date_range("2001-04-01-00", end="2001-04-30-23", freq="h") non_standard_time = cftime.date2num(times.to_pydatetime(), units, calendar=calendar) expected = cftime.num2date( non_standard_time, units, calendar=calendar, only_use_cftime_datetimes=True ) expected_dtype = np.dtype("O") actual = decode_cf_datetime(non_standard_time, units, calendar=calendar) assert actual.dtype == expected_dtype assert_array_equal(actual, expected) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_decode_dates_outside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions ) -> None: import cftime units = "days since 0001-01-01" times = [datetime(1, 4, 1, h) for h in range(1, 5)] time = cftime.date2num(times, units, calendar=calendar) expected = cftime.num2date( time, units, calendar=calendar, only_use_cftime_datetimes=True ) if calendar == "proleptic_gregorian" and time_unit != "ns": expected = cftime_to_nptime(expected, time_unit=time_unit) expected_date_type = type(expected[0]) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime(time, units, calendar=calendar, time_unit=time_unit) assert all(isinstance(value, expected_date_type) for value in actual) assert_array_equal(actual, expected) @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) @pytest.mark.parametrize("num_time", [735368, [735368], [[735368]]]) def test_decode_standard_calendar_single_element_inside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions, num_time, ) -> None: units = "days since 0001-01-01" with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime( num_time, units, calendar=calendar, time_unit=time_unit ) assert actual.dtype == np.dtype(f"=M8[{time_unit}]") @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) def test_decode_non_standard_calendar_single_element_inside_timestamp_range( calendar, ) -> None: units = "days since 0001-01-01" for num_time in [735368, [735368], [[735368]]]: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime(num_time, units, calendar=calendar) assert actual.dtype == np.dtype("O") @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) def test_decode_single_element_outside_timestamp_range(calendar) -> None: import cftime units = "days since 0001-01-01" for days in [1, 1470376]: for num_time in [days, [days], [[days]]]: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime(num_time, units, calendar=calendar) expected = cftime.num2date( days, units, calendar, only_use_cftime_datetimes=True ) assert isinstance(actual.item(), type(expected)) @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_decode_standard_calendar_multidim_time_inside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions, ) -> None: import cftime units = "days since 0001-01-01" times1 = pd.date_range("2001-04-01", end="2001-04-05", freq="D") times2 = pd.date_range("2001-05-01", end="2001-05-05", freq="D") time1 = cftime.date2num(times1.to_pydatetime(), units, calendar=calendar) time2 = cftime.date2num(times2.to_pydatetime(), units, calendar=calendar) mdim_time = np.empty((len(time1), 2)) mdim_time[:, 0] = time1 mdim_time[:, 1] = time2 expected1 = times1.values expected2 = times2.values actual = decode_cf_datetime( mdim_time, units, calendar=calendar, time_unit=time_unit ) assert actual.dtype == np.dtype(f"=M8[{time_unit}]") assert_array_equal(actual[:, 0], expected1) assert_array_equal(actual[:, 1], expected2) @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) def test_decode_nonstandard_calendar_multidim_time_inside_timestamp_range( calendar, ) -> None: import cftime units = "days since 0001-01-01" times1 = pd.date_range("2001-04-01", end="2001-04-05", freq="D") times2 = pd.date_range("2001-05-01", end="2001-05-05", freq="D") time1 = cftime.date2num(times1.to_pydatetime(), units, calendar=calendar) time2 = cftime.date2num(times2.to_pydatetime(), units, calendar=calendar) mdim_time = np.empty((len(time1), 2)) mdim_time[:, 0] = time1 mdim_time[:, 1] = time2 if cftime.__name__ == "cftime": expected1 = cftime.num2date( time1, units, calendar, only_use_cftime_datetimes=True ) expected2 = cftime.num2date( time2, units, calendar, only_use_cftime_datetimes=True ) else: expected1 = cftime.num2date(time1, units, calendar) expected2 = cftime.num2date(time2, units, calendar) expected_dtype = np.dtype("O") actual = decode_cf_datetime(mdim_time, units, calendar=calendar) assert actual.dtype == expected_dtype assert_array_equal(actual[:, 0], expected1) assert_array_equal(actual[:, 1], expected2) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_decode_multidim_time_outside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions ) -> None: import cftime units = "days since 0001-01-01" times1 = [datetime(1, 4, day) for day in range(1, 6)] times2 = [datetime(1, 5, day) for day in range(1, 6)] time1 = cftime.date2num(times1, units, calendar=calendar) time2 = cftime.date2num(times2, units, calendar=calendar) mdim_time = np.empty((len(time1), 2)) mdim_time[:, 0] = time1 mdim_time[:, 1] = time2 expected1 = cftime.num2date(time1, units, calendar, only_use_cftime_datetimes=True) expected2 = cftime.num2date(time2, units, calendar, only_use_cftime_datetimes=True) if calendar == "proleptic_gregorian" and time_unit != "ns": expected1 = cftime_to_nptime(expected1, time_unit=time_unit) expected2 = cftime_to_nptime(expected2, time_unit=time_unit) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime( mdim_time, units, calendar=calendar, time_unit=time_unit ) dtype: np.dtype dtype = np.dtype("O") if calendar == "proleptic_gregorian" and time_unit != "ns": dtype = np.dtype(f"=M8[{time_unit}]") assert actual.dtype == dtype assert_array_equal(actual[:, 0], expected1) assert_array_equal(actual[:, 1], expected2) @requires_cftime @pytest.mark.parametrize( ("calendar", "num_time"), [("360_day", 720058.0), ("all_leap", 732059.0), ("366_day", 732059.0)], ) def test_decode_non_standard_calendar_single_element(calendar, num_time) -> None: import cftime units = "days since 0001-01-01" actual = decode_cf_datetime(num_time, units, calendar=calendar) expected = np.asarray( cftime.num2date(num_time, units, calendar, only_use_cftime_datetimes=True) ) assert actual.dtype == np.dtype("O") assert expected == actual @requires_cftime def test_decode_360_day_calendar() -> None: import cftime calendar = "360_day" # ensure leap year doesn't matter for year in [2010, 2011, 2012, 2013, 2014]: units = f"days since {year}-01-01" num_times = np.arange(100) expected = cftime.num2date( num_times, units, calendar, only_use_cftime_datetimes=True ) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") actual = decode_cf_datetime(num_times, units, calendar=calendar) assert len(w) == 0 assert actual.dtype == np.dtype("O") assert_array_equal(actual, expected) @requires_cftime def test_decode_abbreviation() -> None: """Test making sure we properly fall back to cftime on abbreviated units.""" import cftime val = np.array([1586628000000.0]) units = "msecs since 1970-01-01T00:00:00Z" actual = decode_cf_datetime(val, units) expected = cftime_to_nptime(cftime.num2date(val, units)) assert_array_equal(actual, expected) @arm_xfail @requires_cftime @pytest.mark.parametrize( ["num_dates", "units", "expected_list"], [ ([np.nan], "days since 2000-01-01", ["NaT"]), ([np.nan, 0], "days since 2000-01-01", ["NaT", "2000-01-01T00:00:00Z"]), ( [np.nan, 0, 1], "days since 2000-01-01", ["NaT", "2000-01-01T00:00:00Z", "2000-01-02T00:00:00Z"], ), ], ) def test_cf_datetime_nan(num_dates, units, expected_list) -> None: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "All-NaN") actual = decode_cf_datetime(num_dates, units) # use pandas because numpy will deprecate timezone-aware conversions expected = pd.to_datetime(expected_list).to_numpy(dtype="datetime64[ns]") assert_array_equal(expected, actual) @requires_cftime def test_decoded_cf_datetime_array_2d(time_unit: PDDatetimeUnitOptions) -> None: # regression test for GH1229 variable = Variable( ("x", "y"), np.array([[0, 1], [2, 3]]), {"units": "days since 2000-01-01"} ) result = CFDatetimeCoder(time_unit=time_unit).decode(variable) assert result.dtype == f"datetime64[{time_unit}]" expected = pd.date_range("2000-01-01", periods=4).values.reshape(2, 2) assert_array_equal(np.asarray(result), expected) @pytest.mark.parametrize("decode_times", [True, False]) @pytest.mark.parametrize("mask_and_scale", [True, False]) def test_decode_datetime_mask_and_scale( decode_times: bool, mask_and_scale: bool ) -> None: attrs = { "units": "nanoseconds since 1970-01-01", "calendar": "proleptic_gregorian", "_FillValue": np.int16(-1), "add_offset": 100000.0, } encoded = Variable(["time"], np.array([0, -1, 1], "int16"), attrs=attrs) decoded = conventions.decode_cf_variable( "foo", encoded, mask_and_scale=mask_and_scale, decode_times=decode_times ) result = conventions.encode_cf_variable(decoded, name="foo") assert_identical(encoded, result) assert encoded.dtype == result.dtype FREQUENCIES_TO_ENCODING_UNITS = { "ns": "nanoseconds", "us": "microseconds", "ms": "milliseconds", "s": "seconds", "min": "minutes", "h": "hours", "D": "days", } @pytest.mark.parametrize(("freq", "units"), FREQUENCIES_TO_ENCODING_UNITS.items()) def test_infer_datetime_units(freq, units) -> None: dates = pd.date_range("2000", periods=2, freq=freq) expected = f"{units} since 2000-01-01 00:00:00" assert expected == infer_datetime_units(dates) @pytest.mark.parametrize( ["dates", "expected"], [ ( pd.to_datetime(["1900-01-01", "1900-01-02", "NaT"], unit="ns"), "days since 1900-01-01 00:00:00", ), ( pd.to_datetime(["NaT", "1900-01-01"], unit="ns"), "days since 1900-01-01 00:00:00", ), (pd.to_datetime(["NaT"], unit="ns"), "days since 1970-01-01 00:00:00"), ], ) def test_infer_datetime_units_with_NaT(dates, expected) -> None: assert expected == infer_datetime_units(dates) _CFTIME_DATETIME_UNITS_TESTS = [ ([(1900, 1, 1), (1900, 1, 1)], "days since 1900-01-01 00:00:00.000000"), ( [(1900, 1, 1), (1900, 1, 2), (1900, 1, 2, 0, 0, 1)], "seconds since 1900-01-01 00:00:00.000000", ), ( [(1900, 1, 1), (1900, 1, 8), (1900, 1, 16)], "days since 1900-01-01 00:00:00.000000", ), ] @requires_cftime @pytest.mark.parametrize( "calendar", _NON_STANDARD_CALENDARS + ["gregorian", "proleptic_gregorian"] ) @pytest.mark.parametrize(("date_args", "expected"), _CFTIME_DATETIME_UNITS_TESTS) def test_infer_cftime_datetime_units(calendar, date_args, expected) -> None: date_type = _all_cftime_date_types()[calendar] dates = list(starmap(date_type, date_args)) assert expected == infer_datetime_units(dates) @pytest.mark.filterwarnings("ignore:Timedeltas can't be serialized faithfully") @pytest.mark.parametrize( ["timedeltas", "units", "numbers"], [ ("1D", "days", np.int64(1)), (["1D", "2D", "3D"], "days", np.array([1, 2, 3], "int64")), ("1h", "hours", np.int64(1)), ("1ms", "milliseconds", np.int64(1)), ("1us", "microseconds", np.int64(1)), ("1ns", "nanoseconds", np.int64(1)), (["NaT", "0s", "1s"], None, [np.iinfo(np.int64).min, 0, 1]), (["30m", "60m"], "hours", [0.5, 1.0]), ("NaT", "days", np.iinfo(np.int64).min), (["NaT", "NaT"], "days", [np.iinfo(np.int64).min, np.iinfo(np.int64).min]), ], ) def test_cf_timedelta(timedeltas, units, numbers) -> None: if timedeltas == "NaT": timedeltas = np.timedelta64("NaT", "ns") else: timedeltas = pd.to_timedelta(timedeltas).to_numpy() numbers = np.array(numbers) expected = numbers actual, _ = encode_cf_timedelta(timedeltas, units) assert_array_equal(expected, actual) assert expected.dtype == actual.dtype if units is not None: expected = timedeltas actual = decode_cf_timedelta(numbers, units) assert_array_equal(expected, actual) assert expected.dtype == actual.dtype expected = np.timedelta64("NaT", "ns") actual = decode_cf_timedelta(np.array(np.nan), "days") assert_array_equal(expected, actual) assert expected.dtype == actual.dtype def test_cf_timedelta_2d() -> None: units = "days" numbers = np.atleast_2d([1, 2, 3]) timedeltas = np.atleast_2d(pd.to_timedelta(["1D", "2D", "3D"]).to_numpy()) expected = timedeltas actual = decode_cf_timedelta(numbers, units) assert_array_equal(expected, actual) assert expected.dtype == actual.dtype @pytest.mark.parametrize("encoding_unit", FREQUENCIES_TO_ENCODING_UNITS.values()) def test_decode_cf_timedelta_time_unit( time_unit: PDDatetimeUnitOptions, encoding_unit ) -> None: encoded = 1 encoding_unit_as_numpy = _netcdf_to_numpy_timeunit(encoding_unit) if np.timedelta64(1, time_unit) > np.timedelta64(1, encoding_unit_as_numpy): expected = np.timedelta64(encoded, encoding_unit_as_numpy) else: expected = np.timedelta64(encoded, encoding_unit_as_numpy).astype( f"timedelta64[{time_unit}]" ) result = decode_cf_timedelta(encoded, encoding_unit, time_unit) assert result == expected assert result.dtype == expected.dtype def test_decode_cf_timedelta_time_unit_out_of_bounds( time_unit: PDDatetimeUnitOptions, ) -> None: # Define a scale factor that will guarantee overflow with the given # time_unit. scale_factor = np.timedelta64(1, time_unit) // np.timedelta64(1, "ns") encoded = scale_factor * 300 * 365 with pytest.raises(OutOfBoundsTimedelta): decode_cf_timedelta(encoded, "days", time_unit) def test_cf_timedelta_roundtrip_large_value(time_unit: PDDatetimeUnitOptions) -> None: value = np.timedelta64(np.iinfo(np.int64).max, time_unit) encoded, units = encode_cf_timedelta(value) decoded = decode_cf_timedelta(encoded, units, time_unit=time_unit) assert value == decoded assert value.dtype == decoded.dtype @pytest.mark.parametrize( ["deltas", "expected"], [ (pd.to_timedelta(["1 day", "2 days"]), "days"), (pd.to_timedelta(["1h", "1 day 1 hour"]), "hours"), (pd.to_timedelta(["1m", "2m", np.nan]), "minutes"), (pd.to_timedelta(["1m3s", "1m4s"]), "seconds"), ], ) def test_infer_timedelta_units(deltas, expected) -> None: assert expected == infer_timedelta_units(deltas) @requires_cftime @pytest.mark.parametrize( ["date_args", "expected"], [ ((1, 2, 3, 4, 5, 6), "0001-02-03 04:05:06.000000"), ((10, 2, 3, 4, 5, 6), "0010-02-03 04:05:06.000000"), ((100, 2, 3, 4, 5, 6), "0100-02-03 04:05:06.000000"), ((1000, 2, 3, 4, 5, 6), "1000-02-03 04:05:06.000000"), ], ) def test_format_cftime_datetime(date_args, expected) -> None: date_types = _all_cftime_date_types() for date_type in date_types.values(): result = format_cftime_datetime(date_type(*date_args)) assert result == expected @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_decode_cf(calendar, time_unit: PDDatetimeUnitOptions) -> None: days = [1.0, 2.0, 3.0] # TODO: GH5690 โ€”ย do we want to allow this type for `coords`? da = DataArray(days, coords=[days], dims=["time"], name="test") ds = da.to_dataset() for v in ["test", "time"]: ds[v].attrs["units"] = "days since 2001-01-01" ds[v].attrs["calendar"] = calendar if not has_cftime and calendar not in _STANDARD_CALENDAR_NAMES: with pytest.raises(ValueError): ds = decode_cf(ds) else: ds = decode_cf(ds, decode_times=CFDatetimeCoder(time_unit=time_unit)) if calendar not in _STANDARD_CALENDAR_NAMES: assert ds.test.dtype == np.dtype("O") else: assert ds.test.dtype == np.dtype(f"=M8[{time_unit}]") def test_decode_cf_time_bounds(time_unit: PDDatetimeUnitOptions) -> None: da = DataArray( np.arange(6, dtype="int64").reshape((3, 2)), coords={"time": [1, 2, 3]}, dims=("time", "nbnd"), name="time_bnds", ) attrs = { "units": "days since 2001-01", "calendar": "standard", "bounds": "time_bnds", } ds = da.to_dataset() ds["time"].attrs.update(attrs) _update_bounds_attributes(ds.variables) assert ds.variables["time_bnds"].attrs == { "units": "days since 2001-01", "calendar": "standard", } dsc = decode_cf(ds, decode_times=CFDatetimeCoder(time_unit=time_unit)) assert dsc.time_bnds.dtype == np.dtype(f"=M8[{time_unit}]") dsc = decode_cf(ds, decode_times=False) assert dsc.time_bnds.dtype == np.dtype("int64") # Do not overwrite existing attrs ds = da.to_dataset() ds["time"].attrs.update(attrs) bnd_attr = {"units": "hours since 2001-01", "calendar": "noleap"} ds["time_bnds"].attrs.update(bnd_attr) _update_bounds_attributes(ds.variables) assert ds.variables["time_bnds"].attrs == bnd_attr # If bounds variable not available do not complain ds = da.to_dataset() ds["time"].attrs.update(attrs) ds["time"].attrs["bounds"] = "fake_var" _update_bounds_attributes(ds.variables) @requires_cftime def test_encode_time_bounds() -> None: time = pd.date_range("2000-01-16", periods=1) time_bounds = pd.date_range("2000-01-01", periods=2, freq="MS") ds = Dataset(dict(time=time, time_bounds=time_bounds)) ds.time.attrs = {"bounds": "time_bounds"} ds.time.encoding = {"calendar": "noleap", "units": "days since 2000-01-01"} expected = {} # expected['time'] = Variable(data=np.array([15]), dims=['time']) expected["time_bounds"] = Variable(data=np.array([0, 31]), dims=["time_bounds"]) encoded, _ = cf_encoder(ds.variables, ds.attrs) assert_equal(encoded["time_bounds"], expected["time_bounds"]) assert "calendar" not in encoded["time_bounds"].attrs assert "units" not in encoded["time_bounds"].attrs # if time_bounds attrs are same as time attrs, it doesn't matter ds.time_bounds.encoding = {"calendar": "noleap", "units": "days since 2000-01-01"} encoded, _ = cf_encoder(dict(ds.variables.items()), ds.attrs) assert_equal(encoded["time_bounds"], expected["time_bounds"]) assert "calendar" not in encoded["time_bounds"].attrs assert "units" not in encoded["time_bounds"].attrs # for CF-noncompliant case of time_bounds attrs being different from # time attrs; preserve them for faithful roundtrip ds.time_bounds.encoding = {"calendar": "noleap", "units": "days since 1849-01-01"} encoded, _ = cf_encoder(dict(ds.variables.items()), ds.attrs) with pytest.raises(AssertionError): assert_equal(encoded["time_bounds"], expected["time_bounds"]) assert "calendar" not in encoded["time_bounds"].attrs assert encoded["time_bounds"].attrs["units"] == ds.time_bounds.encoding["units"] ds.time.encoding = {} with pytest.warns(UserWarning): cf_encoder(ds.variables, ds.attrs) @pytest.fixture(params=_ALL_CALENDARS) def calendar(request): return request.param @pytest.fixture def times(calendar): import cftime return cftime.num2date( np.arange(4), units="hours since 2000-01-01", calendar=calendar, only_use_cftime_datetimes=True, ) @pytest.fixture def data(times): data = np.random.rand(2, 2, 4) lons = np.linspace(0, 11, 2) lats = np.linspace(0, 20, 2) return DataArray( data, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data" ) @pytest.fixture def times_3d(times): lons = np.linspace(0, 11, 2) lats = np.linspace(0, 20, 2) times_arr = np.random.choice(times, size=(2, 2, 4)) return DataArray( times_arr, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data" ) @requires_cftime def test_contains_cftime_datetimes_1d(data) -> None: assert contains_cftime_datetimes(data.time.variable) @requires_cftime @requires_dask def test_contains_cftime_datetimes_dask_1d(data) -> None: assert contains_cftime_datetimes(data.time.variable.chunk()) @requires_cftime def test_contains_cftime_datetimes_3d(times_3d) -> None: assert contains_cftime_datetimes(times_3d.variable) @requires_cftime @requires_dask def test_contains_cftime_datetimes_dask_3d(times_3d) -> None: assert contains_cftime_datetimes(times_3d.variable.chunk()) @pytest.mark.parametrize("non_cftime_data", [DataArray([]), DataArray([1, 2])]) def test_contains_cftime_datetimes_non_cftimes(non_cftime_data) -> None: assert not contains_cftime_datetimes(non_cftime_data.variable) @requires_dask @pytest.mark.parametrize("non_cftime_data", [DataArray([]), DataArray([1, 2])]) def test_contains_cftime_datetimes_non_cftimes_dask(non_cftime_data) -> None: assert not contains_cftime_datetimes(non_cftime_data.variable.chunk()) @requires_cftime @pytest.mark.parametrize("shape", [(24,), (8, 3), (2, 4, 3)]) def test_encode_cf_datetime_overflow(shape) -> None: # Test for fix to GH 2272 dates = pd.date_range("2100", periods=24).values.reshape(shape) units = "days since 1800-01-01" calendar = "standard" num, _, _ = encode_cf_datetime(dates, units, calendar) roundtrip = decode_cf_datetime(num, units, calendar) np.testing.assert_array_equal(dates, roundtrip) def test_encode_expected_failures() -> None: dates = pd.date_range("2000", periods=3) with pytest.raises(ValueError, match="invalid time units"): encode_cf_datetime(dates, units="days after 2000-01-01") with pytest.raises(ValueError, match="invalid reference date"): encode_cf_datetime(dates, units="days since NO_YEAR") def test_encode_cf_datetime_pandas_min() -> None: # GH 2623 dates = pd.date_range("2000", periods=3) num, units, calendar = encode_cf_datetime(dates) expected_num = np.array([0.0, 1.0, 2.0]) expected_units = "days since 2000-01-01 00:00:00" expected_calendar = "proleptic_gregorian" np.testing.assert_array_equal(num, expected_num) assert units == expected_units assert calendar == expected_calendar @requires_cftime def test_encode_cf_datetime_invalid_pandas_valid_cftime() -> None: num, units, calendar = encode_cf_datetime( pd.date_range("2000", periods=3), # Pandas fails to parse this unit, but cftime is quite happy with it "days since 1970-01-01 00:00:00 00", "standard", ) expected_num = [10957, 10958, 10959] expected_units = "days since 1970-01-01 00:00:00 00" expected_calendar = "standard" assert_array_equal(num, expected_num) assert units == expected_units assert calendar == expected_calendar @requires_cftime def test_time_units_with_timezone_roundtrip(calendar) -> None: # Regression test for GH 2649 expected_units = "days since 2000-01-01T00:00:00-05:00" expected_num_dates = np.array([1, 2, 3]) dates = decode_cf_datetime(expected_num_dates, expected_units, calendar) # Check that dates were decoded to UTC; here the hours should all # equal 5. result_hours = DataArray(dates).dt.hour expected_hours = DataArray([5, 5, 5]) assert_equal(result_hours, expected_hours) # Check that the encoded values are accurately roundtripped. result_num_dates, result_units, result_calendar = encode_cf_datetime( dates, expected_units, calendar ) if calendar in _STANDARD_CALENDARS: assert_duckarray_equal(result_num_dates, expected_num_dates) else: # cftime datetime arithmetic is not quite exact. assert_duckarray_allclose(result_num_dates, expected_num_dates) assert result_units == expected_units assert result_calendar == calendar @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_use_cftime_default_standard_calendar_in_range(calendar) -> None: numerical_dates = [0, 1] units = "days since 2000-01-01" expected = pd.date_range("2000", periods=2) with assert_no_warnings(): result = decode_cf_datetime(numerical_dates, units, calendar) np.testing.assert_array_equal(result, expected) @requires_cftime @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) @pytest.mark.parametrize("units_year", [1500, 1580]) def test_use_cftime_default_standard_calendar_out_of_range( calendar, units_year ) -> None: from cftime import num2date numerical_dates = [0, 1] units = f"days since {units_year}-01-01" expected = num2date( numerical_dates, units, calendar, only_use_cftime_datetimes=True ) with pytest.warns(SerializationWarning): result = decode_cf_datetime(numerical_dates, units, calendar) np.testing.assert_array_equal(result, expected) @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_default_non_standard_calendar( calendar, units_year, time_unit: PDDatetimeUnitOptions ) -> None: from cftime import num2date numerical_dates = [0, 1] units = f"days since {units_year}-01-01" expected = num2date( numerical_dates, units, calendar, only_use_cftime_datetimes=True ) if time_unit == "ns" and units_year == 2500: with pytest.warns(SerializationWarning, match="Unable to decode time axis"): result = decode_cf_datetime( numerical_dates, units, calendar, time_unit=time_unit ) else: with assert_no_warnings(): result = decode_cf_datetime( numerical_dates, units, calendar, time_unit=time_unit ) np.testing.assert_array_equal(result, expected) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_true(calendar, units_year) -> None: from cftime import num2date numerical_dates = [0, 1] units = f"days since {units_year}-01-01" expected = num2date( numerical_dates, units, calendar, only_use_cftime_datetimes=True ) with assert_no_warnings(): result = decode_cf_datetime(numerical_dates, units, calendar, use_cftime=True) np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_use_cftime_false_standard_calendar_in_range(calendar) -> None: numerical_dates = [0, 1] units = "days since 2000-01-01" expected = pd.date_range("2000", periods=2) with assert_no_warnings(): result = decode_cf_datetime(numerical_dates, units, calendar, use_cftime=False) np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) @pytest.mark.parametrize("units_year", [1500, 1582]) def test_use_cftime_false_standard_calendar_out_of_range(calendar, units_year) -> None: numerical_dates = [0, 1] units = f"days since {units_year}-01-01" with pytest.raises(OutOfBoundsDatetime): decode_cf_datetime(numerical_dates, units, calendar, use_cftime=False) @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_false_non_standard_calendar(calendar, units_year) -> None: numerical_dates = [0, 1] units = f"days since {units_year}-01-01" with pytest.raises(OutOfBoundsDatetime): decode_cf_datetime(numerical_dates, units, calendar, use_cftime=False) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_decode_ambiguous_time_warns(calendar) -> None: # GH 4422, 4506 from cftime import num2date # we don't decode non-standard calendards with # pandas so expect no warning will be emitted is_standard_calendar = calendar in _STANDARD_CALENDAR_NAMES dates = [1, 2, 3] units = "days since 1-1-1" expected = num2date(dates, units, calendar=calendar, only_use_cftime_datetimes=True) if is_standard_calendar: with pytest.warns(SerializationWarning) as record: result = decode_cf_datetime(dates, units, calendar=calendar) relevant_warnings = [ r for r in record.list if str(r.message).startswith("Ambiguous reference date string: 1-1-1") ] assert len(relevant_warnings) == 1 else: with assert_no_warnings(): result = decode_cf_datetime(dates, units, calendar=calendar) np.testing.assert_array_equal(result, expected) @pytest.mark.filterwarnings("ignore:Times can't be serialized faithfully") @pytest.mark.parametrize("encoding_units", FREQUENCIES_TO_ENCODING_UNITS.values()) @pytest.mark.parametrize("freq", FREQUENCIES_TO_ENCODING_UNITS.keys()) @pytest.mark.parametrize("use_cftime", [True, False]) def test_encode_cf_datetime_defaults_to_correct_dtype( encoding_units, freq, use_cftime ) -> None: if not has_cftime and use_cftime: pytest.skip("Test requires cftime") if (freq == "ns" or encoding_units == "nanoseconds") and use_cftime: pytest.skip("Nanosecond frequency is not valid for cftime dates.") times = date_range("2000", periods=3, freq=freq, use_cftime=use_cftime) units = f"{encoding_units} since 2000-01-01" encoded, _units, _ = encode_cf_datetime(times, units) numpy_timeunit = _netcdf_to_numpy_timeunit(encoding_units) encoding_units_as_timedelta = np.timedelta64(1, numpy_timeunit) if pd.to_timedelta(1, freq) >= encoding_units_as_timedelta: assert encoded.dtype == np.int64 else: assert encoded.dtype == np.float64 @pytest.mark.parametrize("freq", FREQUENCIES_TO_ENCODING_UNITS.keys()) def test_encode_decode_roundtrip_datetime64( freq, time_unit: PDDatetimeUnitOptions ) -> None: # See GH 4045. Prior to GH 4684 this test would fail for frequencies of # "s", "ms", "us", and "ns". initial_time = pd.date_range("1678-01-01", periods=1) times = initial_time.append(pd.date_range("1968", periods=2, freq=freq)) variable = Variable(["time"], times) encoded = conventions.encode_cf_variable(variable) decoded = conventions.decode_cf_variable( "time", encoded, decode_times=CFDatetimeCoder(time_unit=time_unit) ) assert_equal(variable, decoded) @requires_cftime @pytest.mark.parametrize("freq", ["us", "ms", "s", "min", "h", "D"]) def test_encode_decode_roundtrip_cftime(freq) -> None: initial_time = date_range("0001", periods=1, use_cftime=True) times = initial_time.append( date_range("0001", periods=2, freq=freq, use_cftime=True) + timedelta(days=291000 * 365) ) variable = Variable(["time"], times) encoded = conventions.encode_cf_variable(variable) decoder = CFDatetimeCoder(use_cftime=True) decoded = conventions.decode_cf_variable("time", encoded, decode_times=decoder) assert_equal(variable, decoded) @requires_cftime def test__encode_datetime_with_cftime() -> None: # See GH 4870. cftime versions > 1.4.0 required us to adapt the # way _encode_datetime_with_cftime was written. import cftime calendar = "gregorian" times = cftime.num2date([0, 1], "hours since 2000-01-01", calendar) encoding_units = "days since 2000-01-01" # Since netCDF files do not support storing float128 values, we ensure that # float64 values are used by setting longdouble=False in num2date. This try # except logic can be removed when xarray's minimum version of cftime is at # least 1.6.2. try: expected = cftime.date2num(times, encoding_units, calendar, longdouble=False) except TypeError: expected = cftime.date2num(times, encoding_units, calendar) result = _encode_datetime_with_cftime(times, encoding_units, calendar) np.testing.assert_equal(result, expected) @requires_cftime def test_round_trip_standard_calendar_cftime_datetimes_pre_reform() -> None: from cftime import DatetimeGregorian dates = np.array([DatetimeGregorian(1, 1, 1), DatetimeGregorian(2000, 1, 1)]) encoded = encode_cf_datetime(dates, "seconds since 2000-01-01", "standard") with pytest.warns(SerializationWarning, match="Unable to decode time axis"): decoded = decode_cf_datetime(*encoded) np.testing.assert_equal(decoded, dates) @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) def test_encode_cf_datetime_gregorian_proleptic_gregorian_mismatch_error( calendar: str, time_unit: PDDatetimeUnitOptions, ) -> None: if time_unit == "ns": pytest.skip("datetime64[ns] values can only be defined post reform") dates = np.array(["0001-01-01", "2001-01-01"], dtype=f"datetime64[{time_unit}]") with pytest.raises(ValueError, match="proleptic_gregorian"): encode_cf_datetime(dates, "seconds since 2000-01-01", calendar) @pytest.mark.parametrize("calendar", ["gregorian", "Gregorian", "GREGORIAN"]) def test_decode_encode_roundtrip_with_non_lowercase_letters( calendar, time_unit: PDDatetimeUnitOptions ) -> None: # See GH 5093. times = [0, 1] units = "days since 2000-01-01" attrs = {"calendar": calendar, "units": units} variable = Variable(["time"], times, attrs) decoded = conventions.decode_cf_variable( "time", variable, decode_times=CFDatetimeCoder(time_unit=time_unit) ) encoded = conventions.encode_cf_variable(decoded) # Previously this would erroneously be an array of cftime.datetime # objects. We check here that it is decoded properly to np.datetime64. assert np.issubdtype(decoded.dtype, np.datetime64) # Use assert_identical to ensure that the calendar attribute maintained its # original form throughout the roundtripping process, uppercase letters and # all. assert_identical(variable, encoded) @requires_cftime def test_should_cftime_be_used_source_outside_range(): src = date_range( "1000-01-01", periods=100, freq="MS", calendar="noleap", use_cftime=True ) with pytest.raises( ValueError, match=r"Source time range is not valid for numpy datetimes." ): _should_cftime_be_used(src, "standard", False) @requires_cftime def test_should_cftime_be_used_target_not_npable(): src = date_range( "2000-01-01", periods=100, freq="MS", calendar="noleap", use_cftime=True ) with pytest.raises( ValueError, match=r"Calendar 'noleap' is only valid with cftime." ): _should_cftime_be_used(src, "noleap", False) @pytest.mark.parametrize( "dtype", [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64], ) def test_decode_cf_datetime_varied_integer_dtypes(dtype): units = "seconds since 2018-08-22T03:23:03Z" num_dates = dtype(50) # Set use_cftime=False to ensure we cannot mask a failure by falling back # to cftime. result = decode_cf_datetime(num_dates, units, use_cftime=False) expected = np.asarray(np.datetime64("2018-08-22T03:23:53", "ns")) np.testing.assert_equal(result, expected) @requires_cftime def test_decode_cf_datetime_uint64_with_cftime(): units = "days since 1700-01-01" num_dates = np.uint64(182621) result = decode_cf_datetime(num_dates, units) expected = np.asarray(np.datetime64("2200-01-01", "ns")) np.testing.assert_equal(result, expected) def test_decode_cf_datetime_uint64_with_pandas_overflow_error(): units = "nanoseconds since 1970-01-01" calendar = "standard" num_dates = np.uint64(1_000_000 * 86_400 * 360 * 500_000) with pytest.raises(OutOfBoundsTimedelta): decode_cf_datetime(num_dates, units, calendar, use_cftime=False) @requires_cftime def test_decode_cf_datetime_uint64_with_cftime_overflow_error(): units = "microseconds since 1700-01-01" calendar = "360_day" num_dates = np.uint64(1_000_000 * 86_400 * 360 * 500_000) with pytest.raises(OverflowError): decode_cf_datetime(num_dates, units, calendar) @pytest.mark.parametrize("use_cftime", [True, False]) def test_decode_0size_datetime(use_cftime): # GH1329 if use_cftime and not has_cftime: pytest.skip() dtype = object if use_cftime else "=M8[ns]" expected = np.array([], dtype=dtype) actual = decode_cf_datetime( np.zeros(shape=0, dtype=np.int64), units="days since 1970-01-01 00:00:00", calendar="proleptic_gregorian", use_cftime=use_cftime, ) np.testing.assert_equal(expected, actual) def test_decode_float_datetime(): num_dates = np.array([1867128, 1867134, 1867140], dtype="float32") units = "hours since 1800-01-01" calendar = "standard" expected = np.array( ["2013-01-01T00:00:00", "2013-01-01T06:00:00", "2013-01-01T12:00:00"], dtype="datetime64[ns]", ) actual = decode_cf_datetime( num_dates, units=units, calendar=calendar, use_cftime=False ) np.testing.assert_equal(actual, expected) @pytest.mark.parametrize("time_unit", ["ms", "us", "ns"]) def test_decode_float_datetime_with_decimals( time_unit: PDDatetimeUnitOptions, ) -> None: # test resolution enhancement for floats values = np.array([0, 0.125, 0.25, 0.375, 0.75, 1.0], dtype="float32") expected = np.array( [ "2000-01-01T00:00:00.000", "2000-01-01T00:00:00.125", "2000-01-01T00:00:00.250", "2000-01-01T00:00:00.375", "2000-01-01T00:00:00.750", "2000-01-01T00:00:01.000", ], dtype=f"=M8[{time_unit}]", ) units = "seconds since 2000-01-01" calendar = "standard" actual = decode_cf_datetime(values, units, calendar, time_unit=time_unit) assert actual.dtype == expected.dtype np.testing.assert_equal(actual, expected) @pytest.mark.parametrize( "time_unit, num", [("s", 0.123), ("ms", 0.1234), ("us", 0.1234567)] ) def test_coding_float_datetime_warning( time_unit: PDDatetimeUnitOptions, num: float ) -> None: units = "seconds since 2000-01-01" calendar = "standard" values = np.array([num], dtype="float32") with pytest.warns( SerializationWarning, match=f"Can't decode floating point datetimes to {time_unit!r}", ): decode_cf_datetime(values, units, calendar, time_unit=time_unit) @requires_cftime def test_scalar_unit() -> None: # test that a scalar units (often NaN when using to_netcdf) does not raise an error variable = Variable(("x", "y"), np.array([[0, 1], [2, 3]]), {"units": np.nan}) result = CFDatetimeCoder().decode(variable) assert np.isnan(result.attrs["units"]) @requires_cftime def test_contains_cftime_lazy() -> None: import cftime from xarray.core.common import _contains_cftime_datetimes times = np.array( [cftime.DatetimeGregorian(1, 1, 2, 0), cftime.DatetimeGregorian(1, 1, 2, 0)], dtype=object, ) array = FirstElementAccessibleArray(times) assert _contains_cftime_datetimes(array) @pytest.mark.parametrize( "timestr, format, dtype, fill_value, use_encoding", [ ("1677-09-21T00:12:43.145224193", "ns", np.int64, 20, True), ("1970-09-21T00:12:44.145224808", "ns", np.float64, 1e30, True), ( "1677-09-21T00:12:43.145225216", "ns", np.float64, -9.223372036854776e18, True, ), ("1677-09-21T00:12:43.145224193", "ns", np.int64, None, False), ("1677-09-21T00:12:43.145225", "us", np.int64, None, False), ("1970-01-01T00:00:01.000001", "us", np.int64, None, False), ("1677-09-21T00:21:52.901038080", "ns", np.float32, 20.0, True), ], ) def test_roundtrip_datetime64_nanosecond_precision( timestr: str, format: Literal["ns", "us"], dtype: np.typing.DTypeLike | None, fill_value: int | float | None, use_encoding: bool, time_unit: PDDatetimeUnitOptions, ) -> None: # test for GH7817 time = np.datetime64(timestr, format) times = [np.datetime64("1970-01-01T00:00:00", format), np.datetime64("NaT"), time] if use_encoding: encoding = dict(dtype=dtype, _FillValue=fill_value) else: encoding = {} var = Variable(["time"], times, encoding=encoding) assert var.dtype == np.dtype(f"=M8[{format}]") encoded_var = conventions.encode_cf_variable(var) assert ( encoded_var.attrs["units"] == f"{_numpy_to_netcdf_timeunit(format)} since 1970-01-01 00:00:00" ) assert encoded_var.attrs["calendar"] == "proleptic_gregorian" assert encoded_var.data.dtype == dtype decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit) ) result_unit = ( format if np.timedelta64(1, format) <= np.timedelta64(1, time_unit) else time_unit ) assert decoded_var.dtype == np.dtype(f"=M8[{result_unit}]") assert ( decoded_var.encoding["units"] == f"{_numpy_to_netcdf_timeunit(format)} since 1970-01-01 00:00:00" ) assert decoded_var.encoding["dtype"] == dtype assert decoded_var.encoding["calendar"] == "proleptic_gregorian" assert_identical(var, decoded_var) def test_roundtrip_datetime64_nanosecond_precision_warning( time_unit: PDDatetimeUnitOptions, ) -> None: # test warning if times can't be serialized faithfully times = [ np.datetime64("1970-01-01T00:01:00", time_unit), np.datetime64("NaT", time_unit), np.datetime64("1970-01-02T00:01:00", time_unit), ] units = "days since 1970-01-10T01:01:00" needed_units = "hours" new_units = f"{needed_units} since 1970-01-10T01:01:00" encoding = dict(dtype=None, _FillValue=20, units=units) var = Variable(["time"], times, encoding=encoding) with pytest.warns(UserWarning, match=f"Resolution of {needed_units!r} needed."): encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.float64 assert encoded_var.attrs["units"] == units assert encoded_var.attrs["_FillValue"] == 20.0 decoded_var = conventions.decode_cf_variable("foo", encoded_var) assert_identical(var, decoded_var) encoding = dict(dtype="int64", _FillValue=20, units=units) var = Variable(["time"], times, encoding=encoding) with pytest.warns( UserWarning, match=f"Serializing with units {new_units!r} instead." ): encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.int64 assert encoded_var.attrs["units"] == new_units assert encoded_var.attrs["_FillValue"] == 20 decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit) ) assert_identical(var, decoded_var) encoding = dict(dtype="float64", _FillValue=20, units=units) var = Variable(["time"], times, encoding=encoding) with warnings.catch_warnings(): warnings.simplefilter("error") encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.float64 assert encoded_var.attrs["units"] == units assert encoded_var.attrs["_FillValue"] == 20.0 decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit) ) assert_identical(var, decoded_var) encoding = dict(dtype="int64", _FillValue=20, units=new_units) var = Variable(["time"], times, encoding=encoding) with warnings.catch_warnings(): warnings.simplefilter("error") encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.int64 assert encoded_var.attrs["units"] == new_units assert encoded_var.attrs["_FillValue"] == 20 decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit) ) assert_identical(var, decoded_var) @pytest.mark.parametrize( "dtype, fill_value", [(np.int64, 20), (np.int64, np.iinfo(np.int64).min), (np.float64, 1e30)], ) def test_roundtrip_timedelta64_nanosecond_precision( dtype: np.typing.DTypeLike | None, fill_value: int | float, time_unit: PDDatetimeUnitOptions, ) -> None: # test for GH7942 one_day = np.timedelta64(1, "ns") nat = np.timedelta64("nat", "ns") timedelta_values = (np.arange(5) * one_day).astype("timedelta64[ns]") timedelta_values[2] = nat timedelta_values[4] = nat encoding = dict(dtype=dtype, _FillValue=fill_value, units="nanoseconds") var = Variable(["time"], timedelta_values, encoding=encoding) encoded_var = conventions.encode_cf_variable(var) decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit), decode_timedelta=CFTimedeltaCoder(time_unit=time_unit), ) assert_identical(var, decoded_var) def test_roundtrip_timedelta64_nanosecond_precision_warning() -> None: # test warning if timedeltas can't be serialized faithfully one_day = np.timedelta64(1, "D") nat = np.timedelta64("nat", "ns") timedelta_values = (np.arange(5) * one_day).astype("timedelta64[ns]") timedelta_values[2] = nat timedelta_values[4] = np.timedelta64(12, "h").astype("timedelta64[ns]") units = "days" needed_units = "hours" wmsg = ( f"Timedeltas can't be serialized faithfully with requested units {units!r}. " f"Serializing with units {needed_units!r} instead." ) encoding = dict(dtype=np.int64, _FillValue=20, units=units) var = Variable(["time"], timedelta_values, encoding=encoding) with pytest.warns(UserWarning, match=wmsg): encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.int64 assert encoded_var.attrs["units"] == needed_units assert encoded_var.attrs["_FillValue"] == 20 decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_timedelta=CFTimedeltaCoder(time_unit="ns") ) assert_identical(var, decoded_var) assert decoded_var.encoding["dtype"] == np.int64 _TEST_ROUNDTRIP_FLOAT_TIMES_TESTS = { "GH-8271": ( 20.0, np.array( ["1970-01-01 00:00:00", "1970-01-01 06:00:00", "NaT"], dtype="datetime64[ns]", ), "days since 1960-01-01", np.array([3653, 3653.25, 20.0]), ), "GH-9488-datetime64[ns]": ( 1.0e20, np.array(["2010-01-01 12:00:00", "NaT"], dtype="datetime64[ns]"), "seconds since 2010-01-01", np.array([43200, 1.0e20]), ), "GH-9488-timedelta64[ns]": ( 1.0e20, np.array([1_000_000_000, "NaT"], dtype="timedelta64[ns]"), "seconds", np.array([1.0, 1.0e20]), ), } @pytest.mark.parametrize( ("fill_value", "times", "units", "encoded_values"), _TEST_ROUNDTRIP_FLOAT_TIMES_TESTS.values(), ids=_TEST_ROUNDTRIP_FLOAT_TIMES_TESTS.keys(), ) def test_roundtrip_float_times(fill_value, times, units, encoded_values) -> None: # Regression test for GitHub issues #8271 and #9488 var = Variable( ["time"], times, encoding=dict(dtype=np.float64, _FillValue=fill_value, units=units), ) encoded_var = conventions.encode_cf_variable(var) np.testing.assert_array_equal(encoded_var, encoded_values) assert encoded_var.attrs["units"] == units assert encoded_var.attrs["_FillValue"] == fill_value decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_timedelta=CFTimedeltaCoder(time_unit="ns") ) assert_identical(var, decoded_var) assert decoded_var.encoding["units"] == units assert decoded_var.encoding["_FillValue"] == fill_value _ENCODE_DATETIME64_VIA_DASK_TESTS = { "pandas-encoding-with-prescribed-units-and-dtype": ( "D", "days since 1700-01-01", np.dtype("int32"), ), "mixed-cftime-pandas-encoding-with-prescribed-units-and-dtype": pytest.param( "250YS", "days since 1700-01-01", np.dtype("int32"), marks=requires_cftime ), "pandas-encoding-with-default-units-and-dtype": ("250YS", None, None), } @requires_dask @pytest.mark.parametrize( ("freq", "units", "dtype"), _ENCODE_DATETIME64_VIA_DASK_TESTS.values(), ids=_ENCODE_DATETIME64_VIA_DASK_TESTS.keys(), ) def test_encode_cf_datetime_datetime64_via_dask( freq, units, dtype, time_unit: PDDatetimeUnitOptions ) -> None: import dask.array times_pd = pd.date_range(start="1700", freq=freq, periods=3, unit=time_unit) times = dask.array.from_array(times_pd, chunks=1) encoded_times, encoding_units, encoding_calendar = encode_cf_datetime( times, units, None, dtype ) assert is_duck_dask_array(encoded_times) assert encoded_times.chunks == times.chunks if units is not None and dtype is not None: assert encoding_units == units assert encoded_times.dtype == dtype else: expected_netcdf_time_unit = _numpy_to_netcdf_timeunit(time_unit) assert encoding_units == f"{expected_netcdf_time_unit} since 1970-01-01" assert encoded_times.dtype == np.dtype("int64") assert encoding_calendar == "proleptic_gregorian" decoded_times = decode_cf_datetime( encoded_times, encoding_units, encoding_calendar, time_unit=time_unit ) np.testing.assert_equal(decoded_times, times) assert decoded_times.dtype == times.dtype @requires_dask @pytest.mark.parametrize( ("range_function", "start", "units", "dtype"), [ (pd.date_range, "2000", None, np.dtype("int32")), (pd.date_range, "2000", "days since 2000-01-01", None), (pd.timedelta_range, "0D", None, np.dtype("int32")), (pd.timedelta_range, "0D", "days", None), ], ) def test_encode_via_dask_cannot_infer_error( range_function, start, units, dtype ) -> None: values = range_function(start=start, freq="D", periods=3) encoding = dict(units=units, dtype=dtype) variable = Variable(["time"], values, encoding=encoding).chunk({"time": 1}) with pytest.raises(ValueError, match="When encoding chunked arrays"): conventions.encode_cf_variable(variable) @requires_cftime @requires_dask @pytest.mark.parametrize( ("units", "dtype"), [("days since 1700-01-01", np.dtype("int32")), (None, None)] ) def test_encode_cf_datetime_cftime_datetime_via_dask(units, dtype) -> None: import dask.array calendar = "standard" times_idx = date_range( start="1700", freq="D", periods=3, calendar=calendar, use_cftime=True ) times = dask.array.from_array(times_idx, chunks=1) encoded_times, encoding_units, encoding_calendar = encode_cf_datetime( times, units, None, dtype ) assert is_duck_dask_array(encoded_times) assert encoded_times.chunks == times.chunks if units is not None and dtype is not None: assert encoding_units == units assert encoded_times.dtype == dtype else: assert encoding_units == "microseconds since 1970-01-01" assert encoded_times.dtype == np.int64 assert encoding_calendar == calendar decoded_times = decode_cf_datetime( encoded_times, encoding_units, encoding_calendar, use_cftime=True ) np.testing.assert_equal(decoded_times, times) @pytest.mark.parametrize( "use_cftime", [False, pytest.param(True, marks=requires_cftime)] ) @pytest.mark.parametrize("use_dask", [False, pytest.param(True, marks=requires_dask)]) def test_encode_cf_datetime_units_change(use_cftime, use_dask) -> None: times = date_range(start="2000", freq="12h", periods=3, use_cftime=use_cftime) encoding = dict(units="days since 2000-01-01", dtype=np.dtype("int64")) variable = Variable(["time"], times, encoding=encoding) if use_dask: variable = variable.chunk({"time": 1}) with pytest.raises(ValueError, match="Times can't be serialized"): conventions.encode_cf_variable(variable).compute() else: with pytest.warns(UserWarning, match="Times can't be serialized"): encoded = conventions.encode_cf_variable(variable) if use_cftime: expected_units = "hours since 2000-01-01 00:00:00.000000" else: expected_units = "hours since 2000-01-01" assert encoded.attrs["units"] == expected_units decoded = conventions.decode_cf_variable( "name", encoded, decode_times=CFDatetimeCoder(use_cftime=use_cftime) ) assert_equal(variable, decoded) @pytest.mark.parametrize("use_dask", [False, pytest.param(True, marks=requires_dask)]) def test_encode_cf_datetime_precision_loss_regression_test(use_dask) -> None: # Regression test for # https://github.com/pydata/xarray/issues/9134#issuecomment-2191446463 times = date_range("2000", periods=5, freq="ns") encoding = dict(units="seconds since 1970-01-01", dtype=np.dtype("int64")) variable = Variable(["time"], times, encoding=encoding) if use_dask: variable = variable.chunk({"time": 1}) with pytest.raises(ValueError, match="Times can't be serialized"): conventions.encode_cf_variable(variable).compute() else: with pytest.warns(UserWarning, match="Times can't be serialized"): encoded = conventions.encode_cf_variable(variable) decoded = conventions.decode_cf_variable("name", encoded) assert_equal(variable, decoded) @requires_dask @pytest.mark.parametrize( ("units", "dtype"), [("days", np.dtype("int32")), (None, None)] ) def test_encode_cf_timedelta_via_dask( units: str | None, dtype: np.dtype | None, time_unit: PDDatetimeUnitOptions ) -> None: import dask.array times_pd = pd.timedelta_range(start="0D", freq="D", periods=3, unit=time_unit) # type: ignore[call-arg,unused-ignore] times = dask.array.from_array(times_pd, chunks=1) encoded_times, encoding_units = encode_cf_timedelta(times, units, dtype) assert is_duck_dask_array(encoded_times) assert encoded_times.chunks == times.chunks if units is not None and dtype is not None: assert encoding_units == units assert encoded_times.dtype == dtype else: assert encoding_units == _numpy_to_netcdf_timeunit(time_unit) assert encoded_times.dtype == np.dtype("int64") decoded_times = decode_cf_timedelta( encoded_times, encoding_units, time_unit=time_unit ) np.testing.assert_equal(decoded_times, times) assert decoded_times.dtype == times.dtype @pytest.mark.parametrize("use_dask", [False, pytest.param(True, marks=requires_dask)]) def test_encode_cf_timedelta_units_change(use_dask) -> None: timedeltas = pd.timedelta_range(start="0h", freq="12h", periods=3) encoding = dict(units="days", dtype=np.dtype("int64")) variable = Variable(["time"], timedeltas, encoding=encoding) if use_dask: variable = variable.chunk({"time": 1}) with pytest.raises(ValueError, match="Timedeltas can't be serialized"): conventions.encode_cf_variable(variable).compute() else: # In this case we automatically modify the encoding units to continue # encoding with integer values. with pytest.warns(UserWarning, match="Timedeltas can't be serialized"): encoded = conventions.encode_cf_variable(variable) assert encoded.attrs["units"] == "hours" decoded = conventions.decode_cf_variable( "name", encoded, decode_timedelta=CFTimedeltaCoder(time_unit="ns") ) assert_equal(variable, decoded) @pytest.mark.parametrize("use_dask", [False, pytest.param(True, marks=requires_dask)]) def test_encode_cf_timedelta_small_dtype_missing_value(use_dask) -> None: # Regression test for GitHub issue #9134 timedeltas = np.array([1, 2, "NaT", 4], dtype="timedelta64[D]").astype( "timedelta64[ns]" ) encoding = dict(units="days", dtype=np.dtype("int16"), _FillValue=np.int16(-1)) variable = Variable(["time"], timedeltas, encoding=encoding) if use_dask: variable = variable.chunk({"time": 1}) encoded = conventions.encode_cf_variable(variable) decoded = conventions.decode_cf_variable("name", encoded, decode_timedelta=True) assert_equal(variable, decoded) _DECODE_TIMEDELTA_VIA_UNITS_TESTS = { "default": (True, None, np.dtype("timedelta64[ns]"), True), "decode_timedelta=True": (True, True, np.dtype("timedelta64[ns]"), False), "decode_timedelta=False": (True, False, np.dtype("int64"), False), "inherit-time_unit-from-decode_times": ( CFDatetimeCoder(time_unit="s"), None, np.dtype("timedelta64[s]"), True, ), "set-time_unit-via-CFTimedeltaCoder-decode_times=True": ( True, CFTimedeltaCoder(time_unit="s"), np.dtype("timedelta64[s]"), False, ), "set-time_unit-via-CFTimedeltaCoder-decode_times=False": ( False, CFTimedeltaCoder(time_unit="s"), np.dtype("timedelta64[s]"), False, ), "override-time_unit-from-decode_times": ( CFDatetimeCoder(time_unit="ns"), CFTimedeltaCoder(time_unit="s"), np.dtype("timedelta64[s]"), False, ), } @pytest.mark.parametrize( ("decode_times", "decode_timedelta", "expected_dtype", "warns"), list(_DECODE_TIMEDELTA_VIA_UNITS_TESTS.values()), ids=list(_DECODE_TIMEDELTA_VIA_UNITS_TESTS.keys()), ) def test_decode_timedelta_via_units( decode_times, decode_timedelta, expected_dtype, warns ) -> None: timedeltas = pd.timedelta_range(0, freq="D", periods=3) attrs = {"units": "days"} var = Variable(["time"], timedeltas, encoding=attrs) encoded = Variable(["time"], np.array([0, 1, 2]), attrs=attrs) if warns: with pytest.warns( FutureWarning, match="xarray will not decode the variable 'foo' into a timedelta64 dtype", ): decoded = conventions.decode_cf_variable( "foo", encoded, decode_times=decode_times, decode_timedelta=decode_timedelta, ) else: decoded = conventions.decode_cf_variable( "foo", encoded, decode_times=decode_times, decode_timedelta=decode_timedelta ) if decode_timedelta is False: assert_equal(encoded, decoded) else: assert_equal(var, decoded) assert decoded.dtype == expected_dtype _DECODE_TIMEDELTA_VIA_DTYPE_TESTS = { "default": (True, None, "ns", np.dtype("timedelta64[ns]")), "decode_timedelta=False": (True, False, "ns", np.dtype("int64")), "decode_timedelta=True": (True, True, "ns", np.dtype("timedelta64[ns]")), "use-original-units": (True, True, "s", np.dtype("timedelta64[s]")), "inherit-time_unit-from-decode_times": ( CFDatetimeCoder(time_unit="s"), None, "ns", np.dtype("timedelta64[s]"), ), "set-time_unit-via-CFTimedeltaCoder-decode_times=True": ( True, CFTimedeltaCoder(time_unit="s"), "ns", np.dtype("timedelta64[s]"), ), "set-time_unit-via-CFTimedeltaCoder-decode_times=False": ( False, CFTimedeltaCoder(time_unit="s"), "ns", np.dtype("timedelta64[s]"), ), "override-time_unit-from-decode_times": ( CFDatetimeCoder(time_unit="ns"), CFTimedeltaCoder(time_unit="s"), "ns", np.dtype("timedelta64[s]"), ), "decode-different-units": ( True, CFTimedeltaCoder(time_unit="us"), "s", np.dtype("timedelta64[us]"), ), } @pytest.mark.parametrize( ("decode_times", "decode_timedelta", "original_unit", "expected_dtype"), list(_DECODE_TIMEDELTA_VIA_DTYPE_TESTS.values()), ids=list(_DECODE_TIMEDELTA_VIA_DTYPE_TESTS.keys()), ) def test_decode_timedelta_via_dtype( decode_times, decode_timedelta, original_unit, expected_dtype ) -> None: timedeltas = pd.timedelta_range(0, freq="D", periods=3, unit=original_unit) # type: ignore[call-arg,unused-ignore] encoding = {"units": "days"} var = Variable(["time"], timedeltas, encoding=encoding) encoded = conventions.encode_cf_variable(var) assert encoded.attrs["dtype"] == f"timedelta64[{original_unit}]" assert encoded.attrs["units"] == encoding["units"] decoded = conventions.decode_cf_variable( "foo", encoded, decode_times=decode_times, decode_timedelta=decode_timedelta ) if decode_timedelta is False: assert_equal(encoded, decoded) else: assert_equal(var, decoded) assert decoded.dtype == expected_dtype @pytest.mark.parametrize("dtype", [np.uint64, np.int64, np.float64]) def test_decode_timedelta_dtypes(dtype) -> None: encoded = Variable(["time"], np.arange(10), {"units": "seconds"}) coder = CFTimedeltaCoder(time_unit="s") decoded = coder.decode(encoded) assert decoded.dtype.kind == "m" assert_equal(coder.encode(decoded), encoded) def test_lazy_decode_timedelta_unexpected_dtype() -> None: attrs = {"units": "seconds"} encoded = Variable(["time"], [0, 0.5, 1], attrs=attrs) decoded = conventions.decode_cf_variable( "foo", encoded, decode_timedelta=CFTimedeltaCoder(time_unit="s") ) expected_dtype_upon_lazy_decoding = np.dtype("timedelta64[s]") assert decoded.dtype == expected_dtype_upon_lazy_decoding expected_dtype_upon_loading = np.dtype("timedelta64[ms]") with pytest.warns(SerializationWarning, match="Can't decode floating"): assert decoded.load().dtype == expected_dtype_upon_loading def test_lazy_decode_timedelta_error() -> None: attrs = {"units": "seconds"} encoded = Variable(["time"], [0, np.iinfo(np.int64).max, 1], attrs=attrs) decoded = conventions.decode_cf_variable( "foo", encoded, decode_timedelta=CFTimedeltaCoder(time_unit="ms") ) with pytest.raises(OutOfBoundsTimedelta, match="overflow"): decoded.load() @pytest.mark.parametrize( "calendar", [ "standard", pytest.param( "360_day", marks=pytest.mark.skipif(not has_cftime, reason="no cftime") ), ], ) def test_duck_array_decode_times(calendar) -> None: from xarray.core.indexing import LazilyIndexedArray days = LazilyIndexedArray(DuckArrayWrapper(np.array([1.0, 2.0, 3.0]))) var = Variable( ["time"], days, {"units": "days since 2001-01-01", "calendar": calendar} ) decoded = conventions.decode_cf_variable( "foo", var, decode_times=CFDatetimeCoder(use_cftime=None) ) if calendar not in _STANDARD_CALENDAR_NAMES: assert decoded.dtype == np.dtype("O") else: assert decoded.dtype == np.dtype("=M8[ns]") @pytest.mark.parametrize("decode_timedelta", [True, False]) @pytest.mark.parametrize("mask_and_scale", [True, False]) def test_decode_timedelta_mask_and_scale( decode_timedelta: bool, mask_and_scale: bool ) -> None: attrs = { "dtype": "timedelta64[ns]", "units": "nanoseconds", "_FillValue": np.int16(-1), "add_offset": 100000.0, } encoded = Variable(["time"], np.array([0, -1, 1], "int16"), attrs=attrs) decoded = conventions.decode_cf_variable( "foo", encoded, mask_and_scale=mask_and_scale, decode_timedelta=decode_timedelta ) result = conventions.encode_cf_variable(decoded, name="foo") assert_identical(encoded, result) assert encoded.dtype == result.dtype def test_decode_floating_point_timedelta_no_serialization_warning() -> None: attrs = {"units": "seconds"} encoded = Variable(["time"], [0, 0.1, 0.2], attrs=attrs) decoded = conventions.decode_cf_variable("foo", encoded, decode_timedelta=True) with assert_no_warnings(): decoded.load() def test_timedelta64_coding_via_dtype(time_unit: PDDatetimeUnitOptions) -> None: timedeltas = np.array([0, 1, "NaT"], dtype=f"timedelta64[{time_unit}]") variable = Variable(["time"], timedeltas) expected_units = _numpy_to_netcdf_timeunit(time_unit) encoded = conventions.encode_cf_variable(variable) assert encoded.attrs["dtype"] == f"timedelta64[{time_unit}]" assert encoded.attrs["units"] == expected_units decoded = conventions.decode_cf_variable("timedeltas", encoded) assert decoded.encoding["dtype"] == np.dtype("int64") assert decoded.encoding["units"] == expected_units assert_identical(decoded, variable) assert decoded.dtype == variable.dtype reencoded = conventions.encode_cf_variable(decoded) assert_identical(reencoded, encoded) assert reencoded.dtype == encoded.dtype def test_timedelta_coding_via_dtype_non_pandas_coarse_resolution_warning() -> None: attrs = {"dtype": "timedelta64[D]", "units": "days"} encoded = Variable(["time"], [0, 1, 2], attrs=attrs) with pytest.warns(UserWarning, match="xarray only supports"): decoded = conventions.decode_cf_variable("timedeltas", encoded) expected_array = np.array([0, 1, 2], dtype="timedelta64[D]") expected_array = expected_array.astype("timedelta64[s]") expected = Variable(["time"], expected_array) assert_identical(decoded, expected) assert decoded.dtype == np.dtype("timedelta64[s]") @pytest.mark.xfail(reason="xarray does not recognize picoseconds as time-like") def test_timedelta_coding_via_dtype_non_pandas_fine_resolution_warning() -> None: attrs = {"dtype": "timedelta64[ps]", "units": "picoseconds"} encoded = Variable(["time"], [0, 1000, 2000], attrs=attrs) with pytest.warns(UserWarning, match="xarray only supports"): decoded = conventions.decode_cf_variable("timedeltas", encoded) expected_array = np.array([0, 1000, 2000], dtype="timedelta64[ps]") expected_array = expected_array.astype("timedelta64[ns]") expected = Variable(["time"], expected_array) assert_identical(decoded, expected) assert decoded.dtype == np.dtype("timedelta64[ns]") def test_timedelta_decode_via_dtype_invalid_encoding() -> None: attrs = {"dtype": "timedelta64[s]", "units": "seconds"} encoding = {"units": "foo"} encoded = Variable(["time"], [0, 1, 2], attrs=attrs, encoding=encoding) with pytest.raises(ValueError, match="failed to prevent"): conventions.decode_cf_variable("timedeltas", encoded) @pytest.mark.parametrize("attribute", ["dtype", "units"]) def test_timedelta_encode_via_dtype_invalid_attribute(attribute) -> None: timedeltas = pd.timedelta_range(0, freq="D", periods=3) attrs = {attribute: "foo"} variable = Variable(["time"], timedeltas, attrs=attrs) with pytest.raises(ValueError, match="failed to prevent"): conventions.encode_cf_variable(variable) @pytest.mark.parametrize( ("decode_via_units", "decode_via_dtype", "attrs", "expect_timedelta64"), [ (True, True, {"units": "seconds"}, True), (True, False, {"units": "seconds"}, True), (False, True, {"units": "seconds"}, False), (False, False, {"units": "seconds"}, False), (True, True, {"dtype": "timedelta64[s]", "units": "seconds"}, True), (True, False, {"dtype": "timedelta64[s]", "units": "seconds"}, True), (False, True, {"dtype": "timedelta64[s]", "units": "seconds"}, True), (False, False, {"dtype": "timedelta64[s]", "units": "seconds"}, False), ], ids=lambda x: f"{x!r}", ) def test_timedelta_decoding_options( decode_via_units, decode_via_dtype, attrs, expect_timedelta64 ) -> None: array = np.array([0, 1, 2], dtype=np.dtype("int64")) encoded = Variable(["time"], array, attrs=attrs) # Confirm we decode to the expected dtype. decode_timedelta = CFTimedeltaCoder( time_unit="s", decode_via_units=decode_via_units, decode_via_dtype=decode_via_dtype, ) decoded = conventions.decode_cf_variable( "foo", encoded, decode_timedelta=decode_timedelta ) if expect_timedelta64: assert decoded.dtype == np.dtype("timedelta64[s]") else: assert decoded.dtype == np.dtype("int64") # Confirm we exactly roundtrip. reencoded = conventions.encode_cf_variable(decoded) expected = encoded.copy() if "dtype" not in attrs and decode_via_units: expected.attrs["dtype"] = "timedelta64[s]" assert_identical(reencoded, expected) def test_timedelta_encoding_explicit_non_timedelta64_dtype() -> None: encoding = {"dtype": np.dtype("int32")} timedeltas = pd.timedelta_range(0, freq="D", periods=3) variable = Variable(["time"], timedeltas, encoding=encoding) encoded = conventions.encode_cf_variable(variable) assert encoded.attrs["units"] == "days" assert encoded.attrs["dtype"] == "timedelta64[ns]" assert encoded.dtype == np.dtype("int32") decoded = conventions.decode_cf_variable("foo", encoded) assert_identical(decoded, variable) reencoded = conventions.encode_cf_variable(decoded) assert_identical(reencoded, encoded) assert encoded.attrs["units"] == "days" assert encoded.attrs["dtype"] == "timedelta64[ns]" assert encoded.dtype == np.dtype("int32") @pytest.mark.parametrize("mask_attribute", ["_FillValue", "missing_value"]) def test_timedelta64_coding_via_dtype_with_mask( time_unit: PDDatetimeUnitOptions, mask_attribute: str ) -> None: timedeltas = np.array([0, 1, "NaT"], dtype=f"timedelta64[{time_unit}]") mask = 10 variable = Variable(["time"], timedeltas, encoding={mask_attribute: mask}) expected_dtype = f"timedelta64[{time_unit}]" expected_units = _numpy_to_netcdf_timeunit(time_unit) encoded = conventions.encode_cf_variable(variable) assert encoded.attrs["dtype"] == expected_dtype assert encoded.attrs["units"] == expected_units assert encoded.attrs[mask_attribute] == mask assert encoded[-1] == mask decoded = conventions.decode_cf_variable("timedeltas", encoded) assert decoded.encoding["dtype"] == np.dtype("int64") assert decoded.encoding["units"] == expected_units assert decoded.encoding[mask_attribute] == mask assert np.isnat(decoded[-1]) assert_identical(decoded, variable) assert decoded.dtype == variable.dtype reencoded = conventions.encode_cf_variable(decoded) assert_identical(reencoded, encoded) assert reencoded.dtype == encoded.dtype def test_roundtrip_0size_timedelta(time_unit: PDDatetimeUnitOptions) -> None: # regression test for GitHub issue #10310 encoding = {"units": "days", "dtype": np.dtype("int64")} data = np.array([], dtype=f"=m8[{time_unit}]") decoded = Variable(["time"], data, encoding=encoding) encoded = conventions.encode_cf_variable(decoded, name="foo") assert encoded.dtype == encoding["dtype"] assert encoded.attrs["units"] == encoding["units"] decoded = conventions.decode_cf_variable("foo", encoded, decode_timedelta=True) assert decoded.dtype == np.dtype(f"=m8[{time_unit}]") with assert_no_warnings(): decoded.load() assert decoded.dtype == np.dtype("=m8[s]") assert decoded.encoding == encoding def test_roundtrip_empty_datetime64_array(time_unit: PDDatetimeUnitOptions) -> None: # Regression test for GitHub issue #10722. encoding = { "units": "days since 1990-1-1", "dtype": np.dtype("float64"), "calendar": "standard", } times = date_range("2000", periods=0, unit=time_unit) variable = Variable(["time"], times, encoding=encoding) encoded = conventions.encode_cf_variable(variable, name="foo") assert encoded.dtype == np.dtype("float64") decode_times = CFDatetimeCoder(time_unit=time_unit) roundtripped = conventions.decode_cf_variable( "foo", encoded, decode_times=decode_times ) assert_identical(variable, roundtripped) assert roundtripped.dtype == variable.dtype xarray-2025.12.0/xarray/tests/test_combine.py000066400000000000000000001522701511464676000210470ustar00rootroot00000000000000from __future__ import annotations import re from itertools import product import numpy as np import pytest from xarray import ( DataArray, Dataset, DataTree, MergeError, combine_by_coords, combine_nested, concat, merge, set_options, ) from xarray.core import dtypes from xarray.structure.combine import ( _check_shape_tile_ids, _combine_all_along_first_dim, _combine_nd, _infer_concat_order_from_coords, _infer_concat_order_from_positions, _new_tile_id, ) from xarray.tests import assert_equal, assert_identical, requires_cftime from xarray.tests.test_dataset import create_test_data def assert_combined_tile_ids_equal(dict1: dict, dict2: dict) -> None: assert len(dict1) == len(dict2) for k in dict1.keys(): assert k in dict2.keys() assert_equal(dict1[k], dict2[k]) class TestTileIDsFromNestedList: def test_1d(self): ds = create_test_data input = [ds(0), ds(1)] expected = {(0,): ds(0), (1,): ds(1)} actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions( input ) assert_combined_tile_ids_equal(expected, actual) def test_2d(self): ds = create_test_data input = [[ds(0), ds(1)], [ds(2), ds(3)], [ds(4), ds(5)]] expected = { (0, 0): ds(0), (0, 1): ds(1), (1, 0): ds(2), (1, 1): ds(3), (2, 0): ds(4), (2, 1): ds(5), } actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions( input ) assert_combined_tile_ids_equal(expected, actual) def test_3d(self): ds = create_test_data input = [ [[ds(0), ds(1)], [ds(2), ds(3)], [ds(4), ds(5)]], [[ds(6), ds(7)], [ds(8), ds(9)], [ds(10), ds(11)]], ] expected = { (0, 0, 0): ds(0), (0, 0, 1): ds(1), (0, 1, 0): ds(2), (0, 1, 1): ds(3), (0, 2, 0): ds(4), (0, 2, 1): ds(5), (1, 0, 0): ds(6), (1, 0, 1): ds(7), (1, 1, 0): ds(8), (1, 1, 1): ds(9), (1, 2, 0): ds(10), (1, 2, 1): ds(11), } actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions( input ) assert_combined_tile_ids_equal(expected, actual) def test_single_dataset(self): ds = create_test_data(0) input = [ds] expected = {(0,): ds} actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions( input ) assert_combined_tile_ids_equal(expected, actual) def test_redundant_nesting(self): ds = create_test_data input = [[ds(0)], [ds(1)]] expected = {(0, 0): ds(0), (1, 0): ds(1)} actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions( input ) assert_combined_tile_ids_equal(expected, actual) def test_ignore_empty_list(self): ds = create_test_data(0) input: list = [ds, []] expected = {(0,): ds} actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions( input ) assert_combined_tile_ids_equal(expected, actual) def test_uneven_depth_input(self): # Auto_combine won't work on ragged input # but this is just to increase test coverage ds = create_test_data input: list = [ds(0), [ds(1), ds(2)]] expected = {(0,): ds(0), (1, 0): ds(1), (1, 1): ds(2)} actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions( input ) assert_combined_tile_ids_equal(expected, actual) def test_uneven_length_input(self): # Auto_combine won't work on ragged input # but this is just to increase test coverage ds = create_test_data input = [[ds(0)], [ds(1), ds(2)]] expected = {(0, 0): ds(0), (1, 0): ds(1), (1, 1): ds(2)} actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions( input ) assert_combined_tile_ids_equal(expected, actual) def test_infer_from_datasets(self): ds = create_test_data input = [ds(0), ds(1)] expected = {(0,): ds(0), (1,): ds(1)} actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions( input ) assert_combined_tile_ids_equal(expected, actual) class TestTileIDsFromCoords: def test_1d(self): ds0 = Dataset({"x": [0, 1]}) ds1 = Dataset({"x": [2, 3]}) expected = {(0,): ds0, (1,): ds1} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["x"] def test_2d(self): ds0 = Dataset({"x": [0, 1], "y": [10, 20, 30]}) ds1 = Dataset({"x": [2, 3], "y": [10, 20, 30]}) ds2 = Dataset({"x": [0, 1], "y": [40, 50, 60]}) ds3 = Dataset({"x": [2, 3], "y": [40, 50, 60]}) ds4 = Dataset({"x": [0, 1], "y": [70, 80, 90]}) ds5 = Dataset({"x": [2, 3], "y": [70, 80, 90]}) expected = { (0, 0): ds0, (1, 0): ds1, (0, 1): ds2, (1, 1): ds3, (0, 2): ds4, (1, 2): ds5, } actual, concat_dims = _infer_concat_order_from_coords( [ds1, ds0, ds3, ds5, ds2, ds4] ) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["x", "y"] def test_no_dimension_coords(self): ds0 = Dataset({"foo": ("x", [0, 1])}) ds1 = Dataset({"foo": ("x", [2, 3])}) with pytest.raises(ValueError, match=r"Could not find any dimension"): _infer_concat_order_from_coords([ds1, ds0]) def test_coord_not_monotonic(self): ds0 = Dataset({"x": [0, 1]}) ds1 = Dataset({"x": [3, 2]}) with pytest.raises( ValueError, match=r"Coordinate variable x is neither monotonically increasing nor", ): _infer_concat_order_from_coords([ds1, ds0]) def test_coord_monotonically_decreasing(self): ds0 = Dataset({"x": [3, 2]}) ds1 = Dataset({"x": [1, 0]}) expected = {(0,): ds0, (1,): ds1} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["x"] def test_no_concatenation_needed(self): ds = Dataset({"foo": ("x", [0, 1])}) expected = {(): ds} actual, concat_dims = _infer_concat_order_from_coords([ds]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == [] def test_2d_plus_bystander_dim(self): ds0 = Dataset({"x": [0, 1], "y": [10, 20, 30], "t": [0.1, 0.2]}) ds1 = Dataset({"x": [2, 3], "y": [10, 20, 30], "t": [0.1, 0.2]}) ds2 = Dataset({"x": [0, 1], "y": [40, 50, 60], "t": [0.1, 0.2]}) ds3 = Dataset({"x": [2, 3], "y": [40, 50, 60], "t": [0.1, 0.2]}) expected = {(0, 0): ds0, (1, 0): ds1, (0, 1): ds2, (1, 1): ds3} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0, ds3, ds2]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["x", "y"] def test_string_coords(self): ds0 = Dataset({"person": ["Alice", "Bob"]}) ds1 = Dataset({"person": ["Caroline", "Daniel"]}) expected = {(0,): ds0, (1,): ds1} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["person"] # Decided against natural sorting of string coords GH #2616 def test_lexicographic_sort_string_coords(self): ds0 = Dataset({"simulation": ["run8", "run9"]}) ds1 = Dataset({"simulation": ["run10", "run11"]}) expected = {(0,): ds1, (1,): ds0} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["simulation"] def test_datetime_coords(self): ds0 = Dataset( {"time": np.array(["2000-03-06", "2000-03-07"], dtype="datetime64[ns]")} ) ds1 = Dataset( {"time": np.array(["1999-01-01", "1999-02-04"], dtype="datetime64[ns]")} ) expected = {(0,): ds1, (1,): ds0} actual, concat_dims = _infer_concat_order_from_coords([ds0, ds1]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["time"] @pytest.fixture(scope="module") def create_combined_ids(): return _create_combined_ids def _create_combined_ids(shape): tile_ids = _create_tile_ids(shape) nums = range(len(tile_ids)) return { tile_id: create_test_data(num) for tile_id, num in zip(tile_ids, nums, strict=True) } def _create_tile_ids(shape): tile_ids = product(*(range(i) for i in shape)) return list(tile_ids) class TestNewTileIDs: @pytest.mark.parametrize( "old_id, new_id", [((3, 0, 1), (0, 1)), ((0, 0), (0,)), ((1,), ()), ((0,), ()), ((1, 0), (0,))], ) def test_new_tile_id(self, old_id, new_id): ds = create_test_data assert _new_tile_id((old_id, ds)) == new_id def test_get_new_tile_ids(self, create_combined_ids): shape = (1, 2, 3) combined_ids = create_combined_ids(shape) expected_tile_ids = sorted(combined_ids.keys()) actual_tile_ids = _create_tile_ids(shape) assert expected_tile_ids == actual_tile_ids class TestCombineND: @pytest.mark.parametrize( "concat_dim, kwargs", [("dim1", {}), ("new_dim", {"data_vars": "all"})] ) def test_concat_once(self, create_combined_ids, concat_dim, kwargs): shape = (2,) combined_ids = create_combined_ids(shape) ds = create_test_data result = _combine_all_along_first_dim( combined_ids, dim=concat_dim, data_vars="all", coords="different", compat="no_conflicts", fill_value=dtypes.NA, join="outer", combine_attrs="drop", ) expected_ds = concat([ds(0), ds(1)], dim=concat_dim, **kwargs) assert_combined_tile_ids_equal(result, {(): expected_ds}) def test_concat_only_first_dim(self, create_combined_ids): shape = (2, 3) combined_ids = create_combined_ids(shape) result = _combine_all_along_first_dim( combined_ids, dim="dim1", data_vars="all", coords="different", compat="no_conflicts", fill_value=dtypes.NA, join="outer", combine_attrs="drop", ) ds = create_test_data partway1 = concat([ds(0), ds(3)], dim="dim1") partway2 = concat([ds(1), ds(4)], dim="dim1") partway3 = concat([ds(2), ds(5)], dim="dim1") expected_datasets = [partway1, partway2, partway3] expected = {(i,): ds for i, ds in enumerate(expected_datasets)} assert_combined_tile_ids_equal(result, expected) @pytest.mark.parametrize( "concat_dim, kwargs", [("dim1", {}), ("new_dim", {"data_vars": "all"})] ) def test_concat_twice(self, create_combined_ids, concat_dim, kwargs): shape = (2, 3) combined_ids = create_combined_ids(shape) result = _combine_nd( combined_ids, concat_dims=["dim1", concat_dim], data_vars="all", coords="different", compat="no_conflicts", fill_value=dtypes.NA, join="outer", combine_attrs="drop", ) ds = create_test_data partway1 = concat([ds(0), ds(3)], dim="dim1") partway2 = concat([ds(1), ds(4)], dim="dim1") partway3 = concat([ds(2), ds(5)], dim="dim1") expected = concat([partway1, partway2, partway3], **kwargs, dim=concat_dim) assert_equal(result, expected) class TestCheckShapeTileIDs: def test_check_depths(self): ds = create_test_data(0) combined_tile_ids = {(0,): ds, (0, 1): ds} with pytest.raises( ValueError, match=r"sub-lists do not have consistent depths" ): _check_shape_tile_ids(combined_tile_ids) def test_check_lengths(self): ds = create_test_data(0) combined_tile_ids = {(0, 0): ds, (0, 1): ds, (0, 2): ds, (1, 0): ds, (1, 1): ds} with pytest.raises( ValueError, match=r"sub-lists do not have consistent lengths" ): _check_shape_tile_ids(combined_tile_ids) class TestNestedCombine: def test_nested_concat(self): objs = [Dataset({"x": [0]}), Dataset({"x": [1]})] expected = Dataset({"x": [0, 1]}) actual = combine_nested(objs, concat_dim="x") assert_identical(expected, actual) actual = combine_nested(objs, concat_dim=["x"]) assert_identical(expected, actual) actual = combine_nested([actual], concat_dim=None) assert_identical(expected, actual) actual = combine_nested([actual], concat_dim="x") assert_identical(expected, actual) objs = [Dataset({"x": [0, 1]}), Dataset({"x": [2]})] actual = combine_nested(objs, concat_dim="x") expected = Dataset({"x": [0, 1, 2]}) assert_identical(expected, actual) # ensure combine_nested handles non-sorted variables objs = [ Dataset({"x": ("a", [0]), "y": ("a", [0])}), Dataset({"y": ("a", [1]), "x": ("a", [1])}), ] actual = combine_nested(objs, concat_dim="a") expected = Dataset({"x": ("a", [0, 1]), "y": ("a", [0, 1])}) assert_identical(expected, actual) objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1]})] actual = combine_nested(objs, concat_dim="x") expected = Dataset({"x": [0, 1], "y": [0]}) assert_identical(expected, actual) @pytest.mark.parametrize( "join, expected", [ ("outer", Dataset({"x": [0, 1], "y": [0, 1]})), ("inner", Dataset({"x": [0, 1], "y": []})), ("left", Dataset({"x": [0, 1], "y": [0]})), ("right", Dataset({"x": [0, 1], "y": [1]})), ], ) def test_combine_nested_join(self, join, expected): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})] actual = combine_nested(objs, concat_dim="x", join=join) assert_identical(expected, actual) def test_combine_nested_join_exact(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})] with pytest.raises(ValueError, match=r"cannot align.*join.*exact"): combine_nested(objs, concat_dim="x", join="exact") def test_empty_input(self): assert_identical(Dataset(), combine_nested([], concat_dim="x")) # Fails because of concat's weird treatment of dimension coords, see #2975 @pytest.mark.xfail def test_nested_concat_too_many_dims_at_once(self): objs = [Dataset({"x": [0], "y": [1]}), Dataset({"y": [0], "x": [1]})] with pytest.raises(ValueError, match="not equal across datasets"): combine_nested(objs, concat_dim="x", coords="minimal") def test_nested_concat_along_new_dim(self): objs = [ Dataset({"a": ("x", [10]), "x": [0]}), Dataset({"a": ("x", [20]), "x": [0]}), ] expected = Dataset({"a": (("t", "x"), [[10], [20]]), "x": [0]}) actual = combine_nested(objs, data_vars="all", concat_dim="t") assert_identical(expected, actual) # Same but with a DataArray as new dim, see GH #1988 and #2647 dim = DataArray([100, 150], name="baz", dims="baz") expected = Dataset( {"a": (("baz", "x"), [[10], [20]]), "x": [0], "baz": [100, 150]} ) actual = combine_nested(objs, data_vars="all", concat_dim=dim) assert_identical(expected, actual) def test_nested_merge_with_self(self): data = Dataset({"x": 0}) actual = combine_nested([data, data, data], concat_dim=None) assert_identical(data, actual) def test_nested_merge_with_overlapping_values(self): ds1 = Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds2 = Dataset({"a": ("x", [2, 3]), "x": [1, 2]}) expected = Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]}) with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): actual = combine_nested([ds1, ds2], join="outer", concat_dim=None) assert_identical(expected, actual) actual = combine_nested( [ds1, ds2], join="outer", compat="no_conflicts", concat_dim=None ) assert_identical(expected, actual) actual = combine_nested( [ds1, ds2], join="outer", compat="no_conflicts", concat_dim=[None] ) assert_identical(expected, actual) def test_nested_merge_with_nan_no_conflicts(self): tmp1 = Dataset({"x": 0}) tmp2 = Dataset({"x": np.nan}) actual = combine_nested([tmp1, tmp2], compat="no_conflicts", concat_dim=None) assert_identical(tmp1, actual) with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): combine_nested([tmp1, tmp2], concat_dim=None) actual = combine_nested([tmp1, tmp2], compat="no_conflicts", concat_dim=[None]) assert_identical(tmp1, actual) def test_nested_merge_with_concat_dim_explicitly_provided(self): # Test the issue reported in GH #1988 objs = [Dataset({"x": 0, "y": 1})] dim = DataArray([100], name="baz", dims="baz") actual = combine_nested(objs, concat_dim=[dim], data_vars="all") expected = Dataset({"x": ("baz", [0]), "y": ("baz", [1])}, {"baz": [100]}) assert_identical(expected, actual) def test_nested_merge_with_non_scalars(self): # Just making sure that auto_combine is doing what is # expected for non-scalar values, too. objs = [Dataset({"x": ("z", [0, 1]), "y": ("z", [1, 2])})] dim = DataArray([100], name="baz", dims="baz") actual = combine_nested(objs, concat_dim=[dim], data_vars="all") expected = Dataset( {"x": (("baz", "z"), [[0, 1]]), "y": (("baz", "z"), [[1, 2]])}, {"baz": [100]}, ) assert_identical(expected, actual) def test_concat_multiple_dims(self): objs = [ [Dataset({"a": (("x", "y"), [[0]])}), Dataset({"a": (("x", "y"), [[1]])})], [Dataset({"a": (("x", "y"), [[2]])}), Dataset({"a": (("x", "y"), [[3]])})], ] actual = combine_nested(objs, concat_dim=["x", "y"]) expected = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])}) assert_identical(expected, actual) def test_concat_name_symmetry(self): """Inspired by the discussion on GH issue #2777""" da1 = DataArray(name="a", data=[[0]], dims=["x", "y"]) da2 = DataArray(name="b", data=[[1]], dims=["x", "y"]) da3 = DataArray(name="a", data=[[2]], dims=["x", "y"]) da4 = DataArray(name="b", data=[[3]], dims=["x", "y"]) x_first = combine_nested([[da1, da2], [da3, da4]], concat_dim=["x", "y"]) y_first = combine_nested([[da1, da3], [da2, da4]], concat_dim=["y", "x"]) assert_identical(x_first, y_first) def test_concat_one_dim_merge_another(self): data = create_test_data(add_attrs=False) data1 = data.copy(deep=True) data2 = data.copy(deep=True) objs = [ [data1.var1.isel(dim2=slice(4)), data2.var1.isel(dim2=slice(4, 9))], [data1.var2.isel(dim2=slice(4)), data2.var2.isel(dim2=slice(4, 9))], ] expected = data[["var1", "var2"]] actual = combine_nested(objs, concat_dim=[None, "dim2"]) assert_identical(expected, actual) def test_auto_combine_2d(self): ds = create_test_data partway1 = concat([ds(0), ds(3)], dim="dim1") partway2 = concat([ds(1), ds(4)], dim="dim1") partway3 = concat([ds(2), ds(5)], dim="dim1") expected = concat([partway1, partway2, partway3], data_vars="all", dim="dim2") datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4), ds(5)]] result = combine_nested( datasets, data_vars="all", concat_dim=["dim1", "dim2"], ) assert_equal(result, expected) def test_auto_combine_2d_combine_attrs_kwarg(self): ds = lambda x: create_test_data(x, add_attrs=False) partway1 = concat([ds(0), ds(3)], dim="dim1") partway2 = concat([ds(1), ds(4)], dim="dim1") partway3 = concat([ds(2), ds(5)], dim="dim1") expected = concat([partway1, partway2, partway3], data_vars="all", dim="dim2") expected_dict = {} expected_dict["drop"] = expected.copy(deep=True) expected_dict["drop"].attrs = {} expected_dict["no_conflicts"] = expected.copy(deep=True) expected_dict["no_conflicts"].attrs = { "a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, } expected_dict["override"] = expected.copy(deep=True) expected_dict["override"].attrs = {"a": 1} f = lambda attrs, context: attrs[0] expected_dict[f] = expected.copy(deep=True) # type: ignore[index] expected_dict[f].attrs = f([{"a": 1}], None) # type: ignore[index] datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4), ds(5)]] datasets[0][0].attrs = {"a": 1} datasets[0][1].attrs = {"a": 1, "b": 2} datasets[0][2].attrs = {"a": 1, "c": 3} datasets[1][0].attrs = {"a": 1, "d": 4} datasets[1][1].attrs = {"a": 1, "e": 5} datasets[1][2].attrs = {"a": 1, "f": 6} with pytest.raises(ValueError, match=r"combine_attrs='identical'"): result = combine_nested( datasets, concat_dim=["dim1", "dim2"], data_vars="all", combine_attrs="identical", ) for combine_attrs, expected in expected_dict.items(): result = combine_nested( datasets, concat_dim=["dim1", "dim2"], data_vars="all", combine_attrs=combine_attrs, ) # type: ignore[call-overload] assert_identical(result, expected) def test_combine_nested_missing_data_new_dim(self): # Your data includes "time" and "station" dimensions, and each year's # data has a different set of stations. datasets = [ Dataset({"a": ("x", [2, 3]), "x": [1, 2]}), Dataset({"a": ("x", [1, 2]), "x": [0, 1]}), ] expected = Dataset( {"a": (("t", "x"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {"x": [0, 1, 2]} ) actual = combine_nested(datasets, data_vars="all", join="outer", concat_dim="t") assert_identical(expected, actual) def test_invalid_hypercube_input(self): ds = create_test_data datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4)]] with pytest.raises( ValueError, match=r"sub-lists do not have consistent lengths" ): combine_nested(datasets, concat_dim=["dim1", "dim2"]) datasets2: list = [[ds(0), ds(1)], [[ds(3), ds(4)]]] with pytest.raises( ValueError, match=r"sub-lists do not have consistent depths" ): combine_nested(datasets2, concat_dim=["dim1", "dim2"]) datasets = [[ds(0), ds(1)], [ds(3), ds(4)]] with pytest.raises(ValueError, match=r"concat_dims has length"): combine_nested(datasets, concat_dim=["dim1"]) def test_merge_one_dim_concat_another(self): objs = [ [Dataset({"foo": ("x", [0, 1])}), Dataset({"bar": ("x", [10, 20])})], [Dataset({"foo": ("x", [2, 3])}), Dataset({"bar": ("x", [30, 40])})], ] expected = Dataset({"foo": ("x", [0, 1, 2, 3]), "bar": ("x", [10, 20, 30, 40])}) actual = combine_nested(objs, concat_dim=["x", None], compat="equals") assert_identical(expected, actual) # Proving it works symmetrically objs = [ [Dataset({"foo": ("x", [0, 1])}), Dataset({"foo": ("x", [2, 3])})], [Dataset({"bar": ("x", [10, 20])}), Dataset({"bar": ("x", [30, 40])})], ] actual = combine_nested(objs, concat_dim=[None, "x"], compat="equals") assert_identical(expected, actual) def test_combine_concat_over_redundant_nesting(self): objs = [[Dataset({"x": [0]}), Dataset({"x": [1]})]] actual = combine_nested(objs, concat_dim=[None, "x"]) expected = Dataset({"x": [0, 1]}) assert_identical(expected, actual) objs = [[Dataset({"x": [0]})], [Dataset({"x": [1]})]] actual = combine_nested(objs, concat_dim=["x", None]) expected = Dataset({"x": [0, 1]}) assert_identical(expected, actual) objs = [[Dataset({"x": [0]})]] actual = combine_nested(objs, concat_dim=[None, None]) expected = Dataset({"x": [0]}) assert_identical(expected, actual) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}]) def test_combine_nested_fill_value(self, fill_value): datasets = [ Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1]), "x": [1, 2]}), Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1]), "x": [0, 1]}), ] if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_a = fill_value_b = np.nan elif isinstance(fill_value, dict): fill_value_a = fill_value["a"] fill_value_b = fill_value["b"] else: fill_value_a = fill_value_b = fill_value expected = Dataset( { "a": (("t", "x"), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]), "b": (("t", "x"), [[fill_value_b, -2, 1], [3, -1, fill_value_b]]), }, {"x": [0, 1, 2]}, ) actual = combine_nested( datasets, concat_dim="t", data_vars="all", join="outer", fill_value=fill_value, ) assert_identical(expected, actual) def test_combine_nested_unnamed_data_arrays(self): unnamed_array = DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") actual = combine_nested([unnamed_array], concat_dim="x") expected = unnamed_array assert_identical(expected, actual) unnamed_array1 = DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") unnamed_array2 = DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") actual = combine_nested([unnamed_array1, unnamed_array2], concat_dim="x") expected = DataArray( data=[1.0, 2.0, 3.0, 4.0], coords={"x": [0, 1, 2, 3]}, dims="x" ) assert_identical(expected, actual) da1 = DataArray(data=[[0.0]], coords={"x": [0], "y": [0]}, dims=["x", "y"]) da2 = DataArray(data=[[1.0]], coords={"x": [0], "y": [1]}, dims=["x", "y"]) da3 = DataArray(data=[[2.0]], coords={"x": [1], "y": [0]}, dims=["x", "y"]) da4 = DataArray(data=[[3.0]], coords={"x": [1], "y": [1]}, dims=["x", "y"]) objs = [[da1, da2], [da3, da4]] expected = DataArray( data=[[0.0, 1.0], [2.0, 3.0]], coords={"x": [0, 1], "y": [0, 1]}, dims=["x", "y"], ) actual = combine_nested(objs, concat_dim=["x", "y"]) assert_identical(expected, actual) # TODO aijams - Determine if this test is appropriate. def test_nested_combine_mixed_datasets_arrays(self): objs = [ DataArray([0, 1], dims=("x"), coords=({"x": [0, 1]})), Dataset({"x": [2, 3]}), ] with pytest.raises( ValueError, match=r"Can't combine datasets with unnamed arrays." ): combine_nested(objs, "x") # type: ignore[arg-type] def test_nested_combine_mixed_datatrees_and_datasets(self): objs = [DataTree.from_dict({"foo": 0}), Dataset({"foo": 1})] with pytest.raises( ValueError, match=r"Can't combine a mix of DataTree and non-DataTree objects.", ): combine_nested(objs, concat_dim="x") # type: ignore[arg-type] def test_datatree(self): objs = [DataTree.from_dict({"foo": 0}), DataTree.from_dict({"foo": 1})] expected = DataTree.from_dict({"foo": ("x", [0, 1])}) actual = combine_nested(objs, concat_dim="x") assert expected.identical(actual) class TestCombineDatasetsbyCoords: def test_combine_by_coords(self): objs = [Dataset({"x": [0]}), Dataset({"x": [1]})] actual = combine_by_coords(objs) expected = Dataset({"x": [0, 1]}) assert_identical(expected, actual) actual = combine_by_coords([actual]) assert_identical(expected, actual) objs = [Dataset({"x": [0, 1]}), Dataset({"x": [2]})] actual = combine_by_coords(objs) expected = Dataset({"x": [0, 1, 2]}) assert_identical(expected, actual) def test_combine_by_coords_handles_non_sorted_variables(self): # ensure auto_combine handles non-sorted variables objs = [ Dataset({"x": ("a", [0]), "y": ("a", [0]), "a": [0]}), Dataset({"x": ("a", [1]), "y": ("a", [1]), "a": [1]}), ] actual = combine_by_coords(objs, join="outer") expected = Dataset({"x": ("a", [0, 1]), "y": ("a", [0, 1]), "a": [0, 1]}) assert_identical(expected, actual) def test_combine_by_coords_multiple_variables(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})] actual = combine_by_coords(objs, join="outer") expected = Dataset({"x": [0, 1], "y": [0, 1]}) assert_equal(actual, expected) def test_combine_by_coords_for_scalar_variables(self): objs = [Dataset({"x": 0}), Dataset({"x": 1})] with pytest.raises( ValueError, match=r"Could not find any dimension coordinates" ): combine_by_coords(objs) def test_combine_by_coords_requires_coord_or_index(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [0]})] with pytest.raises( ValueError, match=r"Every dimension requires a corresponding 1D coordinate and index", ): combine_by_coords(objs) def test_empty_input(self): assert_identical(Dataset(), combine_by_coords([])) @pytest.mark.parametrize( "join, expected", [ ("outer", Dataset({"x": [0, 1], "y": [0, 1]})), ("inner", Dataset({"x": [0, 1], "y": []})), ("left", Dataset({"x": [0, 1], "y": [0]})), ("right", Dataset({"x": [0, 1], "y": [1]})), ], ) def test_combine_coords_join(self, join, expected): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})] actual = combine_nested(objs, concat_dim="x", join=join) assert_identical(expected, actual) def test_combine_coords_join_exact(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})] with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*"): combine_nested(objs, concat_dim="x", join="exact") @pytest.mark.parametrize( "combine_attrs, expected", [ ("drop", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={})), ( "no_conflicts", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "b": 2}), ), ("override", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1})), ( lambda attrs, context: attrs[1], Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "b": 2}), ), ], ) def test_combine_coords_combine_attrs(self, combine_attrs, expected): objs = [ Dataset({"x": [0], "y": [0]}, attrs={"a": 1}), Dataset({"x": [1], "y": [1]}, attrs={"a": 1, "b": 2}), ] actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs=combine_attrs ) assert_identical(expected, actual) if combine_attrs == "no_conflicts": objs[1].attrs["a"] = 2 with pytest.raises(ValueError, match=r"combine_attrs='no_conflicts'"): actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs=combine_attrs ) def test_combine_coords_combine_attrs_identical(self): objs = [ Dataset({"x": [0], "y": [0]}, attrs={"a": 1}), Dataset({"x": [1], "y": [1]}, attrs={"a": 1}), ] expected = Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1}) actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs="identical" ) assert_identical(expected, actual) objs[1].attrs["b"] = 2 with pytest.raises(ValueError, match=r"combine_attrs='identical'"): actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs="identical" ) def test_combine_nested_combine_attrs_drop_conflicts(self): objs = [ Dataset({"x": [0], "y": [0]}, attrs={"a": 1, "b": 2, "c": 3}), Dataset({"x": [1], "y": [1]}, attrs={"a": 1, "b": 0, "d": 3}), ] expected = Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "c": 3, "d": 3}) actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs="drop_conflicts" ) assert_identical(expected, actual) @pytest.mark.parametrize( "combine_attrs, attrs1, attrs2, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 1, "b": 2, "c": 3}, {"b": 1, "c": 3, "d": 4}, {"a": 1, "c": 3, "d": 4}, False, ), ], ) def test_combine_nested_combine_attrs_variables( self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception ): """check that combine_attrs is used on data variables and coords""" data1 = Dataset( { "a": ("x", [1, 2], attrs1), "b": ("x", [3, -1], attrs1), "x": ("x", [0, 1], attrs1), } ) data2 = Dataset( { "a": ("x", [2, 3], attrs2), "b": ("x", [-2, 1], attrs2), "x": ("x", [2, 3], attrs2), } ) if expect_exception: with pytest.raises(MergeError, match="combine_attrs"): combine_by_coords([data1, data2], combine_attrs=combine_attrs) else: actual = combine_by_coords([data1, data2], combine_attrs=combine_attrs) expected = Dataset( { "a": ("x", [1, 2, 2, 3], expected_attrs), "b": ("x", [3, -1, -2, 1], expected_attrs), }, {"x": ("x", [0, 1, 2, 3], expected_attrs)}, ) assert_identical(actual, expected) @pytest.mark.parametrize( "combine_attrs, attrs1, attrs2, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 1, "b": 2, "c": 3}, {"b": 1, "c": 3, "d": 4}, {"a": 1, "c": 3, "d": 4}, False, ), ], ) def test_combine_by_coords_combine_attrs_variables( self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception ): """check that combine_attrs is used on data variables and coords""" data1 = Dataset( {"x": ("a", [0], attrs1), "y": ("a", [0], attrs1), "a": ("a", [0], attrs1)} ) data2 = Dataset( {"x": ("a", [1], attrs2), "y": ("a", [1], attrs2), "a": ("a", [1], attrs2)} ) if expect_exception: with pytest.raises(MergeError, match="combine_attrs"): combine_by_coords([data1, data2], combine_attrs=combine_attrs) else: actual = combine_by_coords([data1, data2], combine_attrs=combine_attrs) expected = Dataset( { "x": ("a", [0, 1], expected_attrs), "y": ("a", [0, 1], expected_attrs), "a": ("a", [0, 1], expected_attrs), } ) assert_identical(actual, expected) def test_infer_order_from_coords(self): data = create_test_data() objs = [data.isel(dim2=slice(4, 9)), data.isel(dim2=slice(4))] actual = combine_by_coords(objs, data_vars="all") expected = data assert expected.broadcast_equals(actual) # type: ignore[arg-type] with set_options(use_new_combine_kwarg_defaults=True): actual = combine_by_coords(objs) assert_identical(actual, expected) def test_combine_leaving_bystander_dimensions(self): # Check non-monotonic bystander dimension coord doesn't raise # ValueError on combine (https://github.com/pydata/xarray/issues/3150) ycoord = ["a", "c", "b"] data = np.random.rand(7, 3) ds1 = Dataset( data_vars=dict(data=(["x", "y"], data[:3, :])), coords=dict(x=[1, 2, 3], y=ycoord), ) ds2 = Dataset( data_vars=dict(data=(["x", "y"], data[3:, :])), coords=dict(x=[4, 5, 6, 7], y=ycoord), ) expected = Dataset( data_vars=dict(data=(["x", "y"], data)), coords=dict(x=[1, 2, 3, 4, 5, 6, 7], y=ycoord), ) actual = combine_by_coords((ds1, ds2)) assert_identical(expected, actual) def test_combine_by_coords_previously_failed(self): # In the above scenario, one file is missing, containing the data for # one year's data for one variable. datasets = [ Dataset({"a": ("x", [0]), "x": [0]}), Dataset({"b": ("x", [0]), "x": [0]}), Dataset({"a": ("x", [1]), "x": [1]}), ] expected = Dataset({"a": ("x", [0, 1]), "b": ("x", [0, np.nan])}, {"x": [0, 1]}) actual = combine_by_coords(datasets, join="outer") assert_identical(expected, actual) def test_combine_by_coords_still_fails(self): # concat can't handle new variables (yet): # https://github.com/pydata/xarray/issues/508 datasets = [Dataset({"x": 0}, {"y": 0}), Dataset({"x": 1}, {"y": 1, "z": 1})] with pytest.raises(ValueError): combine_by_coords(datasets, "y") # type: ignore[arg-type] def test_combine_by_coords_no_concat(self): objs = [Dataset({"x": 0}), Dataset({"y": 1})] actual = combine_by_coords(objs) expected = Dataset({"x": 0, "y": 1}) assert_identical(expected, actual) objs = [Dataset({"x": 0, "y": 1}), Dataset({"y": np.nan, "z": 2})] actual = combine_by_coords(objs, compat="no_conflicts") expected = Dataset({"x": 0, "y": 1, "z": 2}) assert_identical(expected, actual) def test_check_for_impossible_ordering(self): ds0 = Dataset({"x": [0, 1, 5]}) ds1 = Dataset({"x": [2, 3]}) with pytest.raises( ValueError, match=r"does not have monotonic global indexes along dimension x", ): combine_by_coords([ds1, ds0]) def test_combine_by_coords_incomplete_hypercube(self): # test that this succeeds with default fill_value x1 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]}) x2 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [1], "x": [0]}) x3 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [1]}) actual = combine_by_coords([x1, x2, x3], join="outer") expected = Dataset( {"a": (("y", "x"), [[1, 1], [1, np.nan]])}, coords={"y": [0, 1], "x": [0, 1]}, ) assert_identical(expected, actual) # test that this fails if fill_value is None with pytest.raises( ValueError, match="supplied objects do not form a hypercube" ): combine_by_coords([x1, x2, x3], join="outer", fill_value=None) def test_combine_by_coords_override_order(self) -> None: # regression test for https://github.com/pydata/xarray/issues/8828 x1 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]}) x2 = Dataset( {"a": (("y", "x"), [[2]]), "b": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]}, ) actual = combine_by_coords([x1, x2], compat="override") assert_equal(actual["a"], actual["b"]) assert_equal(actual["a"], x1["a"]) actual = combine_by_coords([x2, x1], compat="override") assert_equal(actual["a"], x2["a"]) class TestCombineMixedObjectsbyCoords: def test_combine_by_coords_mixed_unnamed_dataarrays(self): named_da = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") unnamed_da = DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") with pytest.raises( ValueError, match="Can't automatically combine unnamed DataArrays with" ): combine_by_coords([named_da, unnamed_da]) da = DataArray([0, 1], dims="x", coords=({"x": [0, 1]})) ds = Dataset({"x": [2, 3]}) with pytest.raises( ValueError, match="Can't automatically combine unnamed DataArrays with", ): combine_by_coords([da, ds]) def test_combine_coords_mixed_datasets_named_dataarrays(self): da = DataArray(name="a", data=[4, 5], dims="x", coords=({"x": [0, 1]})) ds = Dataset({"b": ("x", [2, 3])}) actual = combine_by_coords([da, ds]) expected = Dataset( {"a": ("x", [4, 5]), "b": ("x", [2, 3])}, coords={"x": ("x", [0, 1])} ) assert_identical(expected, actual) def test_combine_by_coords_all_unnamed_dataarrays(self): unnamed_array = DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") actual = combine_by_coords([unnamed_array]) expected = unnamed_array assert_identical(expected, actual) unnamed_array1 = DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") unnamed_array2 = DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") actual = combine_by_coords([unnamed_array1, unnamed_array2]) expected = DataArray( data=[1.0, 2.0, 3.0, 4.0], coords={"x": [0, 1, 2, 3]}, dims="x" ) assert_identical(expected, actual) def test_combine_by_coords_all_named_dataarrays(self): named_da = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") actual = combine_by_coords([named_da]) expected = named_da.to_dataset() assert_identical(expected, actual) named_da1 = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") named_da2 = DataArray(name="b", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") actual = combine_by_coords([named_da1, named_da2], join="outer") expected = Dataset( { "a": DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x"), "b": DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x"), } ) assert_identical(expected, actual) def test_combine_by_coords_all_dataarrays_with_the_same_name(self): named_da1 = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") named_da2 = DataArray(name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") actual = combine_by_coords([named_da1, named_da2], join="outer") expected = merge([named_da1, named_da2], compat="no_conflicts", join="outer") assert_identical(expected, actual) def test_combine_by_coords_datatree(self): tree = DataTree.from_dict({"/nested/foo": ("x", [10])}, coords={"x": [1]}) with pytest.raises( NotImplementedError, match=re.escape( "combine_by_coords() does not yet support DataTree objects." ), ): combine_by_coords([tree]) # type: ignore[list-item] class TestNewDefaults: def test_concat_along_existing_dim(self): concat_dim = "dim1" ds = create_test_data with set_options(use_new_combine_kwarg_defaults=False): old = concat([ds(0), ds(1)], dim=concat_dim) with set_options(use_new_combine_kwarg_defaults=True): new = concat([ds(0), ds(1)], dim=concat_dim) assert_identical(old, new) def test_concat_along_new_dim(self): concat_dim = "new_dim" ds = create_test_data with set_options(use_new_combine_kwarg_defaults=False): old = concat([ds(0), ds(1)], dim=concat_dim) with set_options(use_new_combine_kwarg_defaults=True): new = concat([ds(0), ds(1)], dim=concat_dim) assert concat_dim in old.dims assert concat_dim in new.dims def test_nested_merge_with_overlapping_values(self): ds1 = Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds2 = Dataset({"a": ("x", [2, 3]), "x": [1, 2]}) expected = Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from join='outer' to join='exact'" ): with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): old = combine_nested([ds1, ds2], concat_dim=None) with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises(ValueError, match="might be related to new default"): combine_nested([ds1, ds2], concat_dim=None) assert_identical(old, expected) def test_nested_merge_with_nan_order_matters(self): ds1 = Dataset({"x": 0}) ds2 = Dataset({"x": np.nan}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): old = combine_nested([ds1, ds2], concat_dim=None) with set_options(use_new_combine_kwarg_defaults=True): new = combine_nested([ds1, ds2], concat_dim=None) assert_identical(ds1, old) assert_identical(old, new) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): old = combine_nested([ds2, ds1], concat_dim=None) with set_options(use_new_combine_kwarg_defaults=True): new = combine_nested([ds2, ds1], concat_dim=None) assert_identical(ds1, old) with pytest.raises(AssertionError): assert_identical(old, new) def test_nested_merge_with_concat_dim_explicitly_provided(self): # Test the issue reported in GH #1988 objs = [Dataset({"x": 0, "y": 1})] dim = DataArray([100], name="baz", dims="baz") expected = Dataset({"x": ("baz", [0]), "y": ("baz", [1])}, {"baz": [100]}) with set_options(use_new_combine_kwarg_defaults=False): old = combine_nested(objs, concat_dim=dim) with set_options(use_new_combine_kwarg_defaults=True): new = combine_nested(objs, concat_dim=dim) assert_identical(expected, old) assert_identical(old, new) def test_combine_nested_missing_data_new_dim(self): # Your data includes "time" and "station" dimensions, and each year's # data has a different set of stations. datasets = [ Dataset({"a": ("x", [2, 3]), "x": [1, 2]}), Dataset({"a": ("x", [1, 2]), "x": [0, 1]}), ] expected = Dataset( {"a": (("t", "x"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {"x": [0, 1, 2]} ) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from join='outer' to join='exact'" ): old = combine_nested(datasets, concat_dim="t") with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises(ValueError, match="might be related to new default"): combine_nested(datasets, concat_dim="t") new = combine_nested(datasets, concat_dim="t", join="outer") assert_identical(expected, old) assert_identical(expected, new) def test_combine_by_coords_multiple_variables(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})] expected = Dataset({"x": [0, 1], "y": [0, 1]}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from join='outer' to join='exact'" ): old = combine_by_coords(objs) with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises(ValueError, match="might be related to new default"): combine_by_coords(objs) assert_identical(old, expected) @requires_cftime def test_combine_by_coords_distant_cftime_dates(): # Regression test for https://github.com/pydata/xarray/issues/3535 import cftime time_1 = [cftime.DatetimeGregorian(4500, 12, 31)] time_2 = [cftime.DatetimeGregorian(4600, 12, 31)] time_3 = [cftime.DatetimeGregorian(5100, 12, 31)] da_1 = DataArray([0], dims=["time"], coords=[time_1], name="a").to_dataset() da_2 = DataArray([1], dims=["time"], coords=[time_2], name="a").to_dataset() da_3 = DataArray([2], dims=["time"], coords=[time_3], name="a").to_dataset() result = combine_by_coords([da_1, da_2, da_3]) expected_time = np.concatenate([time_1, time_2, time_3]) expected = DataArray( [0, 1, 2], dims=["time"], coords=[expected_time], name="a" ).to_dataset() assert_identical(result, expected) @requires_cftime def test_combine_by_coords_raises_for_differing_calendars(): # previously failed with uninformative StopIteration instead of TypeError # https://github.com/pydata/xarray/issues/4495 import cftime time_1 = [cftime.DatetimeGregorian(2000, 1, 1)] time_2 = [cftime.DatetimeProlepticGregorian(2001, 1, 1)] da_1 = DataArray([0], dims=["time"], coords=[time_1], name="a").to_dataset() da_2 = DataArray([1], dims=["time"], coords=[time_2], name="a").to_dataset() error_msg = ( "Cannot combine along dimension 'time' with mixed types." " Found:.*" " If importing data directly from a file then setting" " `use_cftime=True` may fix this issue." ) with pytest.raises(TypeError, match=error_msg): combine_by_coords([da_1, da_2]) def test_combine_by_coords_raises_for_differing_types(): # str and byte cannot be compared da_1 = DataArray([0], dims=["time"], coords=[["a"]], name="a").to_dataset() da_2 = DataArray([1], dims=["time"], coords=[[b"b"]], name="a").to_dataset() with pytest.raises( TypeError, match=r"Cannot combine along dimension 'time' with mixed types." ): combine_by_coords([da_1, da_2]) xarray-2025.12.0/xarray/tests/test_computation.py000066400000000000000000002534341511464676000220010ustar00rootroot00000000000000from __future__ import annotations import functools import operator import pickle import numpy as np import pandas as pd import pytest from numpy.testing import assert_allclose, assert_array_equal import xarray as xr from xarray.computation.apply_ufunc import ( _UFuncSignature, apply_ufunc, broadcast_compat_data, collect_dict_values, join_dict_keys, ordered_set_intersection, ordered_set_union, unified_dim_sizes, ) from xarray.core.utils import result_name from xarray.structure.alignment import broadcast from xarray.tests import ( has_dask, raise_if_dask_computes, requires_cftime, requires_dask, ) def assert_identical(a, b): """A version of this function which accepts numpy arrays""" __tracebackhide__ = True from xarray.testing import assert_identical as assert_identical_ if hasattr(a, "identical"): assert_identical_(a, b) else: assert_array_equal(a, b) def test_signature_properties() -> None: sig = _UFuncSignature([["x"], ["x", "y"]], [["z"]]) assert sig.input_core_dims == (("x",), ("x", "y")) assert sig.output_core_dims == (("z",),) assert sig.all_input_core_dims == frozenset(["x", "y"]) assert sig.all_output_core_dims == frozenset(["z"]) assert sig.num_inputs == 2 assert sig.num_outputs == 1 assert str(sig) == "(x),(x,y)->(z)" assert sig.to_gufunc_string() == "(dim0),(dim0,dim1)->(dim2)" assert ( sig.to_gufunc_string(exclude_dims=set("x")) == "(dim0_0),(dim0_1,dim1)->(dim2)" ) # dimension names matter assert _UFuncSignature([["x"]]) != _UFuncSignature([["y"]]) def test_result_name() -> None: class Named: def __init__(self, name=None): self.name = name assert result_name([1, 2]) is None assert result_name([Named()]) is None assert result_name([Named("foo"), 2]) == "foo" assert result_name([Named("foo"), Named("bar")]) is None assert result_name([Named("foo"), Named()]) is None def test_ordered_set_union() -> None: assert list(ordered_set_union([[1, 2]])) == [1, 2] assert list(ordered_set_union([[1, 2], [2, 1]])) == [1, 2] assert list(ordered_set_union([[0], [1, 2], [1, 3]])) == [0, 1, 2, 3] def test_ordered_set_intersection() -> None: assert list(ordered_set_intersection([[1, 2]])) == [1, 2] assert list(ordered_set_intersection([[1, 2], [2, 1]])) == [1, 2] assert list(ordered_set_intersection([[1, 2], [1, 3]])) == [1] assert list(ordered_set_intersection([[1, 2], [2]])) == [2] def test_join_dict_keys() -> None: dicts = [dict.fromkeys(keys) for keys in [["x", "y"], ["y", "z"]]] assert list(join_dict_keys(dicts, "left")) == ["x", "y"] assert list(join_dict_keys(dicts, "right")) == ["y", "z"] assert list(join_dict_keys(dicts, "inner")) == ["y"] assert list(join_dict_keys(dicts, "outer")) == ["x", "y", "z"] with pytest.raises(ValueError): join_dict_keys(dicts, "exact") with pytest.raises(KeyError): join_dict_keys(dicts, "foobar") def test_collect_dict_values() -> None: dicts = [{"x": 1, "y": 2, "z": 3}, {"z": 4}, 5] expected = [[1, 0, 5], [2, 0, 5], [3, 4, 5]] collected = collect_dict_values(dicts, ["x", "y", "z"], fill_value=0) assert collected == expected def identity(x): return x def test_apply_identity() -> None: array = np.arange(10) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) apply_identity = functools.partial(apply_ufunc, identity) assert_identical(array, apply_identity(array)) assert_identical(variable, apply_identity(variable)) assert_identical(data_array, apply_identity(data_array)) assert_identical(data_array, apply_identity(data_array.groupby("x"))) assert_identical(dataset, apply_identity(dataset)) assert_identical(dataset, apply_identity(dataset.groupby("x"))) def add(a, b): return apply_ufunc(operator.add, a, b) def test_apply_two_inputs() -> None: array = np.array([1, 2, 3]) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) zero_array = np.zeros_like(array) zero_variable = xr.Variable("x", zero_array) zero_data_array = xr.DataArray(zero_variable, [("x", -array)]) zero_dataset = xr.Dataset({"y": zero_variable}, {"x": -array}) assert_identical(array, add(array, zero_array)) assert_identical(array, add(zero_array, array)) assert_identical(variable, add(variable, zero_array)) assert_identical(variable, add(variable, zero_variable)) assert_identical(variable, add(zero_array, variable)) assert_identical(variable, add(zero_variable, variable)) assert_identical(data_array, add(data_array, zero_array)) assert_identical(data_array, add(data_array, zero_variable)) assert_identical(data_array, add(data_array, zero_data_array)) assert_identical(data_array, add(zero_array, data_array)) assert_identical(data_array, add(zero_variable, data_array)) assert_identical(data_array, add(zero_data_array, data_array)) assert_identical(dataset, add(dataset, zero_array)) assert_identical(dataset, add(dataset, zero_variable)) assert_identical(dataset, add(dataset, zero_data_array)) assert_identical(dataset, add(dataset, zero_dataset)) assert_identical(dataset, add(zero_array, dataset)) assert_identical(dataset, add(zero_variable, dataset)) assert_identical(dataset, add(zero_data_array, dataset)) assert_identical(dataset, add(zero_dataset, dataset)) assert_identical(data_array, add(data_array.groupby("x"), zero_data_array)) assert_identical(data_array, add(zero_data_array, data_array.groupby("x"))) assert_identical(dataset, add(data_array.groupby("x"), zero_dataset)) assert_identical(dataset, add(zero_dataset, data_array.groupby("x"))) assert_identical(dataset, add(dataset.groupby("x"), zero_data_array)) assert_identical(dataset, add(dataset.groupby("x"), zero_dataset)) assert_identical(dataset, add(zero_data_array, dataset.groupby("x"))) assert_identical(dataset, add(zero_dataset, dataset.groupby("x"))) def test_apply_1d_and_0d() -> None: array = np.array([1, 2, 3]) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) zero_array = 0 zero_variable = xr.Variable((), zero_array) zero_data_array = xr.DataArray(zero_variable) zero_dataset = xr.Dataset({"y": zero_variable}) assert_identical(array, add(array, zero_array)) assert_identical(array, add(zero_array, array)) assert_identical(variable, add(variable, zero_array)) assert_identical(variable, add(variable, zero_variable)) assert_identical(variable, add(zero_array, variable)) assert_identical(variable, add(zero_variable, variable)) assert_identical(data_array, add(data_array, zero_array)) assert_identical(data_array, add(data_array, zero_variable)) assert_identical(data_array, add(data_array, zero_data_array)) assert_identical(data_array, add(zero_array, data_array)) assert_identical(data_array, add(zero_variable, data_array)) assert_identical(data_array, add(zero_data_array, data_array)) assert_identical(dataset, add(dataset, zero_array)) assert_identical(dataset, add(dataset, zero_variable)) assert_identical(dataset, add(dataset, zero_data_array)) assert_identical(dataset, add(dataset, zero_dataset)) assert_identical(dataset, add(zero_array, dataset)) assert_identical(dataset, add(zero_variable, dataset)) assert_identical(dataset, add(zero_data_array, dataset)) assert_identical(dataset, add(zero_dataset, dataset)) assert_identical(data_array, add(data_array.groupby("x"), zero_data_array)) assert_identical(data_array, add(zero_data_array, data_array.groupby("x"))) assert_identical(dataset, add(data_array.groupby("x"), zero_dataset)) assert_identical(dataset, add(zero_dataset, data_array.groupby("x"))) assert_identical(dataset, add(dataset.groupby("x"), zero_data_array)) assert_identical(dataset, add(dataset.groupby("x"), zero_dataset)) assert_identical(dataset, add(zero_data_array, dataset.groupby("x"))) assert_identical(dataset, add(zero_dataset, dataset.groupby("x"))) def test_apply_two_outputs() -> None: array = np.arange(5) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) def twice(obj): def func(x): return (x, x) return apply_ufunc(func, obj, output_core_dims=[[], []]) out0, out1 = twice(array) assert_identical(out0, array) assert_identical(out1, array) out0, out1 = twice(variable) assert_identical(out0, variable) assert_identical(out1, variable) out0, out1 = twice(data_array) assert_identical(out0, data_array) assert_identical(out1, data_array) out0, out1 = twice(dataset) assert_identical(out0, dataset) assert_identical(out1, dataset) out0, out1 = twice(data_array.groupby("x")) assert_identical(out0, data_array) assert_identical(out1, data_array) out0, out1 = twice(dataset.groupby("x")) assert_identical(out0, dataset) assert_identical(out1, dataset) def test_apply_missing_dims() -> None: ## Single arg def add_one(a, core_dims, on_missing_core_dim): return apply_ufunc( lambda x: x + 1, a, input_core_dims=core_dims, output_core_dims=core_dims, on_missing_core_dim=on_missing_core_dim, ) array = np.arange(6).reshape(2, 3) variable = xr.Variable(["x", "y"], array) variable_no_y = xr.Variable(["x", "z"], array) ds = xr.Dataset({"x_y": variable, "x_z": variable_no_y}) # Check the standard stuff works OK assert_identical( add_one(ds[["x_y"]], core_dims=[["y"]], on_missing_core_dim="raise"), ds[["x_y"]] + 1, ) # `raise` โ€”ย should raise on a missing dim with pytest.raises(ValueError): add_one(ds, core_dims=[["y"]], on_missing_core_dim="raise") # `drop` โ€” should drop the var with the missing dim assert_identical( add_one(ds, core_dims=[["y"]], on_missing_core_dim="drop"), (ds + 1).drop_vars("x_z"), ) # `copy` โ€” should not add one to the missing with `copy` copy_result = add_one(ds, core_dims=[["y"]], on_missing_core_dim="copy") assert_identical(copy_result["x_y"], (ds + 1)["x_y"]) assert_identical(copy_result["x_z"], ds["x_z"]) ## Multiple args def sum_add(a, b, core_dims, on_missing_core_dim): return apply_ufunc( lambda a, b, axis=None: a.sum(axis) + b.sum(axis), a, b, input_core_dims=core_dims, on_missing_core_dim=on_missing_core_dim, ) # Check the standard stuff works OK assert_identical( sum_add( ds[["x_y"]], ds[["x_y"]], core_dims=[["x", "y"], ["x", "y"]], on_missing_core_dim="raise", ), ds[["x_y"]].sum() * 2, ) # `raise` โ€”ย should raise on a missing dim with pytest.raises( ValueError, match=r".*Missing core dims \{'y'\} from arg number 1 on a variable named `x_z`:\n.* None: data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) def twice(obj): def func(x): return (x, x) return apply_ufunc(func, obj, output_core_dims=[[], []], dask="parallelized") out0, out1 = twice(data_array.chunk({"x": 1})) assert_identical(data_array, out0) assert_identical(data_array, out1) def test_apply_input_core_dimension() -> None: def first_element(obj, dim): def func(x): return x[..., 0] return apply_ufunc(func, obj, input_core_dims=[[dim]]) array = np.array([[1, 2], [3, 4]]) variable = xr.Variable(["x", "y"], array) data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]}) dataset = xr.Dataset({"data": data_array}) expected_variable_x = xr.Variable(["y"], [1, 2]) expected_data_array_x = xr.DataArray(expected_variable_x, {"y": [-1, -2]}) expected_dataset_x = xr.Dataset({"data": expected_data_array_x}) expected_variable_y = xr.Variable(["x"], [1, 3]) expected_data_array_y = xr.DataArray(expected_variable_y, {"x": ["a", "b"]}) expected_dataset_y = xr.Dataset({"data": expected_data_array_y}) assert_identical(expected_variable_x, first_element(variable, "x")) assert_identical(expected_variable_y, first_element(variable, "y")) assert_identical(expected_data_array_x, first_element(data_array, "x")) assert_identical(expected_data_array_y, first_element(data_array, "y")) assert_identical(expected_dataset_x, first_element(dataset, "x")) assert_identical(expected_dataset_y, first_element(dataset, "y")) assert_identical(expected_data_array_x, first_element(data_array.groupby("y"), "x")) assert_identical(expected_dataset_x, first_element(dataset.groupby("y"), "x")) def multiply(*args): val = args[0] for arg in args[1:]: val = val * arg return val # regression test for GH:2341 with pytest.raises(ValueError): apply_ufunc( multiply, data_array, data_array["y"].values, input_core_dims=[["y"]], output_core_dims=[["y"]], ) expected = xr.DataArray( multiply(data_array, data_array["y"]), dims=["x", "y"], coords=data_array.coords ) actual = apply_ufunc( multiply, data_array, data_array["y"].values, input_core_dims=[["y"], []], output_core_dims=[["y"]], ) assert_identical(expected, actual) def test_apply_output_core_dimension() -> None: def stack_negative(obj): def func(x): return np.stack([x, -x], axis=-1) result = apply_ufunc(func, obj, output_core_dims=[["sign"]]) if isinstance(result, xr.Dataset | xr.DataArray): result.coords["sign"] = [1, -1] return result array = np.array([[1, 2], [3, 4]]) variable = xr.Variable(["x", "y"], array) data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]}) dataset = xr.Dataset({"data": data_array}) stacked_array = np.array([[[1, -1], [2, -2]], [[3, -3], [4, -4]]]) stacked_variable = xr.Variable(["x", "y", "sign"], stacked_array) stacked_coords = {"x": ["a", "b"], "y": [-1, -2], "sign": [1, -1]} stacked_data_array = xr.DataArray(stacked_variable, stacked_coords) stacked_dataset = xr.Dataset({"data": stacked_data_array}) assert_identical(stacked_array, stack_negative(array)) assert_identical(stacked_variable, stack_negative(variable)) assert_identical(stacked_data_array, stack_negative(data_array)) assert_identical(stacked_dataset, stack_negative(dataset)) assert_identical(stacked_data_array, stack_negative(data_array.groupby("x"))) assert_identical(stacked_dataset, stack_negative(dataset.groupby("x"))) def original_and_stack_negative(obj): def func(x): return (x, np.stack([x, -x], axis=-1)) result = apply_ufunc(func, obj, output_core_dims=[[], ["sign"]]) if isinstance(result[1], xr.Dataset | xr.DataArray): result[1].coords["sign"] = [1, -1] return result out0, out1 = original_and_stack_negative(array) assert_identical(array, out0) assert_identical(stacked_array, out1) out0, out1 = original_and_stack_negative(variable) assert_identical(variable, out0) assert_identical(stacked_variable, out1) out0, out1 = original_and_stack_negative(data_array) assert_identical(data_array, out0) assert_identical(stacked_data_array, out1) out0, out1 = original_and_stack_negative(dataset) assert_identical(dataset, out0) assert_identical(stacked_dataset, out1) out0, out1 = original_and_stack_negative(data_array.groupby("x")) assert_identical(data_array, out0) assert_identical(stacked_data_array, out1) out0, out1 = original_and_stack_negative(dataset.groupby("x")) assert_identical(dataset, out0) assert_identical(stacked_dataset, out1) def test_apply_exclude() -> None: def concatenate(objects, dim="x"): def func(*x): return np.concatenate(x, axis=-1) result = apply_ufunc( func, *objects, input_core_dims=[[dim]] * len(objects), output_core_dims=[[dim]], exclude_dims={dim}, ) if isinstance(result, xr.Dataset | xr.DataArray): # note: this will fail if dim is not a coordinate on any input new_coord = np.concatenate([obj.coords[dim] for obj in objects]) result.coords[dim] = new_coord return result arrays = [np.array([1]), np.array([2, 3])] variables = [xr.Variable("x", a) for a in arrays] data_arrays = [ xr.DataArray(v, {"x": c, "y": ("x", range(len(c)))}) for v, c in zip(variables, [["a"], ["b", "c"]], strict=True) ] datasets = [xr.Dataset({"data": data_array}) for data_array in data_arrays] expected_array = np.array([1, 2, 3]) expected_variable = xr.Variable("x", expected_array) expected_data_array = xr.DataArray(expected_variable, [("x", list("abc"))]) expected_dataset = xr.Dataset({"data": expected_data_array}) assert_identical(expected_array, concatenate(arrays)) assert_identical(expected_variable, concatenate(variables)) assert_identical(expected_data_array, concatenate(data_arrays)) assert_identical(expected_dataset, concatenate(datasets)) # must also be a core dimension with pytest.raises(ValueError): apply_ufunc(identity, variables[0], exclude_dims={"x"}) def test_apply_groupby_add() -> None: array = np.arange(5) variable = xr.Variable("x", array) coords = {"x": -array, "y": ("x", [0, 0, 1, 1, 2])} data_array = xr.DataArray(variable, coords, dims="x") dataset = xr.Dataset({"z": variable}, coords) other_variable = xr.Variable("y", [0, 10]) other_data_array = xr.DataArray(other_variable, dims="y") other_dataset = xr.Dataset({"z": other_variable}) expected_variable = xr.Variable("x", [0, 1, 12, 13, np.nan]) expected_data_array = xr.DataArray(expected_variable, coords, dims="x") expected_dataset = xr.Dataset({"z": expected_variable}, coords) assert_identical( expected_data_array, add(data_array.groupby("y"), other_data_array) ) assert_identical(expected_dataset, add(data_array.groupby("y"), other_dataset)) assert_identical(expected_dataset, add(dataset.groupby("y"), other_data_array)) assert_identical(expected_dataset, add(dataset.groupby("y"), other_dataset)) # cannot be performed with xarray.Variable objects that share a dimension with pytest.raises(ValueError): add(data_array.groupby("y"), other_variable) # if they are all grouped the same way with pytest.raises(ValueError): add(data_array.groupby("y"), data_array[:4].groupby("y")) with pytest.raises(ValueError): add(data_array.groupby("y"), data_array[1:].groupby("y")) with pytest.raises(ValueError): add(data_array.groupby("y"), other_data_array.groupby("y")) with pytest.raises(ValueError): add(data_array.groupby("y"), data_array.groupby("x")) @pytest.mark.filterwarnings("ignore:Duplicate dimension names present") def test_unified_dim_sizes() -> None: assert unified_dim_sizes([xr.Variable((), 0)]) == {} assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1])]) == {"x": 1} assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("y", [1, 2])]) == { "x": 1, "y": 2, } assert unified_dim_sizes( [xr.Variable(("x", "z"), [[1]]), xr.Variable(("y", "z"), [[1, 2], [3, 4]])], exclude_dims={"z"}, ) == {"x": 1, "y": 2} with pytest.raises(ValueError, match="broadcasting cannot handle"): with pytest.warns(UserWarning, match="Duplicate dimension names"): unified_dim_sizes([xr.Variable(("x", "x"), [[1]])]) # mismatched lengths with pytest.raises(ValueError): unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1, 2])]) def test_broadcast_compat_data_1d() -> None: data = np.arange(5) var = xr.Variable("x", data) assert_identical(data, broadcast_compat_data(var, ("x",), ())) assert_identical(data, broadcast_compat_data(var, (), ("x",))) assert_identical(data[:], broadcast_compat_data(var, ("w",), ("x",))) assert_identical(data[:, None], broadcast_compat_data(var, ("w", "x", "y"), ())) with pytest.raises(ValueError): broadcast_compat_data(var, ("x",), ("w",)) with pytest.raises(ValueError): broadcast_compat_data(var, (), ()) def test_broadcast_compat_data_2d() -> None: data = np.arange(12).reshape(3, 4) var = xr.Variable(["x", "y"], data) assert_identical(data, broadcast_compat_data(var, ("x", "y"), ())) assert_identical(data, broadcast_compat_data(var, ("x",), ("y",))) assert_identical(data, broadcast_compat_data(var, (), ("x", "y"))) assert_identical(data.T, broadcast_compat_data(var, ("y", "x"), ())) assert_identical(data.T, broadcast_compat_data(var, ("y",), ("x",))) assert_identical(data, broadcast_compat_data(var, ("w", "x"), ("y",))) assert_identical(data, broadcast_compat_data(var, ("w",), ("x", "y"))) assert_identical(data.T, broadcast_compat_data(var, ("w",), ("y", "x"))) assert_identical( data[:, :, None], broadcast_compat_data(var, ("w", "x", "y", "z"), ()) ) assert_identical( data[None, :, :].T, broadcast_compat_data(var, ("w", "y", "x", "z"), ()) ) def test_keep_attrs() -> None: def add(a, b, keep_attrs): return apply_ufunc(operator.add, a, b, keep_attrs=keep_attrs) a = xr.DataArray([0, 1], [("x", [0, 1])]) a.attrs["attr"] = "da" a["x"].attrs["attr"] = "da_coord" b = xr.DataArray([1, 2], [("x", [0, 1])]) actual = add(a, b, keep_attrs=False) assert not actual.attrs actual = add(a, b, keep_attrs=True) assert_identical(actual.attrs, a.attrs) assert_identical(actual["x"].attrs, a["x"].attrs) actual = add(a.variable, b.variable, keep_attrs=False) assert not actual.attrs actual = add(a.variable, b.variable, keep_attrs=True) assert_identical(actual.attrs, a.attrs) ds_a = xr.Dataset({"x": [0, 1]}) ds_a.attrs["attr"] = "ds" ds_a.x.attrs["attr"] = "da" ds_b = xr.Dataset({"x": [0, 1]}) actual = add(ds_a, ds_b, keep_attrs=False) assert not actual.attrs actual = add(ds_a, ds_b, keep_attrs=True) assert_identical(actual.attrs, ds_a.attrs) assert_identical(actual.x.attrs, ds_a.x.attrs) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_variable(strategy, attrs, expected, error) -> None: a = xr.Variable("x", [0, 1], attrs=attrs[0]) b = xr.Variable("x", [0, 1], attrs=attrs[1]) c = xr.Variable("x", [0, 1], attrs=attrs[2]) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: expected = xr.Variable("x", [0, 3], attrs=expected) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_dataarray(strategy, attrs, expected, error) -> None: a = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[0]) b = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[1]) c = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[2]) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: expected = xr.DataArray(dims="x", data=[0, 3], attrs=expected) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) @pytest.mark.parametrize("variant", ("dim", "coord")) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_dataarray_variables( variant, strategy, attrs, expected, error ): compute_attrs = { "dim": lambda attrs, default: (attrs, default), "coord": lambda attrs, default: (default, attrs), }[variant] dim_attrs, coord_attrs = compute_attrs(attrs, [{}, {}, {}]) a = xr.DataArray( dims="x", data=[0, 1], coords={"x": ("x", [0, 1], dim_attrs[0]), "u": ("x", [0, 1], coord_attrs[0])}, ) b = xr.DataArray( dims="x", data=[0, 1], coords={"x": ("x", [0, 1], dim_attrs[1]), "u": ("x", [0, 1], coord_attrs[1])}, ) c = xr.DataArray( dims="x", data=[0, 1], coords={"x": ("x", [0, 1], dim_attrs[2]), "u": ("x", [0, 1], coord_attrs[2])}, ) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: dim_attrs, coord_attrs = compute_attrs(expected, {}) expected = xr.DataArray( dims="x", data=[0, 3], coords={"x": ("x", [0, 1], dim_attrs), "u": ("x", [0, 1], coord_attrs)}, ) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_dataset(strategy, attrs, expected, error) -> None: a = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[0]) b = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[1]) c = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[2]) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: expected = xr.Dataset({"a": ("x", [0, 3])}, attrs=expected) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) @pytest.mark.parametrize("variant", ("data", "dim", "coord")) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_dataset_variables( variant, strategy, attrs, expected, error ): compute_attrs = { "data": lambda attrs, default: (attrs, default, default), "dim": lambda attrs, default: (default, attrs, default), "coord": lambda attrs, default: (default, default, attrs), }[variant] data_attrs, dim_attrs, coord_attrs = compute_attrs(attrs, [{}, {}, {}]) a = xr.Dataset( {"a": ("x", [], data_attrs[0])}, coords={"x": ("x", [], dim_attrs[0]), "u": ("x", [], coord_attrs[0])}, ) b = xr.Dataset( {"a": ("x", [], data_attrs[1])}, coords={"x": ("x", [], dim_attrs[1]), "u": ("x", [], coord_attrs[1])}, ) c = xr.Dataset( {"a": ("x", [], data_attrs[2])}, coords={"x": ("x", [], dim_attrs[2]), "u": ("x", [], coord_attrs[2])}, ) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: data_attrs, dim_attrs, coord_attrs = compute_attrs(expected, {}) expected = xr.Dataset( {"a": ("x", [], data_attrs)}, coords={"x": ("x", [], dim_attrs), "u": ("x", [], coord_attrs)}, ) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) def test_dataset_join() -> None: ds0 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds1 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]}) # by default, cannot have different labels with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*"): apply_ufunc(operator.add, ds0, ds1) with pytest.raises(TypeError, match=r"must supply"): apply_ufunc(operator.add, ds0, ds1, dataset_join="outer") def add(a, b, join, dataset_join): return apply_ufunc( operator.add, a, b, join=join, dataset_join=dataset_join, dataset_fill_value=np.nan, ) actual = add(ds0, ds1, "outer", "inner") expected = xr.Dataset({"a": ("x", [np.nan, 101, np.nan]), "x": [0, 1, 2]}) assert_identical(actual, expected) actual = add(ds0, ds1, "outer", "outer") assert_identical(actual, expected) with pytest.raises(ValueError, match=r"data variable names"): apply_ufunc(operator.add, ds0, xr.Dataset({"b": 1})) ds2 = xr.Dataset({"b": ("x", [99, 3]), "x": [1, 2]}) actual = add(ds0, ds2, "outer", "inner") expected = xr.Dataset({"x": [0, 1, 2]}) assert_identical(actual, expected) # we used np.nan as the fill_value in add() above actual = add(ds0, ds2, "outer", "outer") expected = xr.Dataset( { "a": ("x", [np.nan, np.nan, np.nan]), "b": ("x", [np.nan, np.nan, np.nan]), "x": [0, 1, 2], } ) assert_identical(actual, expected) @requires_dask def test_apply_dask() -> None: import dask.array as da array = da.ones((2,), chunks=2) variable = xr.Variable("x", array) coords = xr.DataArray(variable).coords.variables data_array = xr.DataArray(variable, dims=["x"], coords=coords) dataset = xr.Dataset({"y": variable}) # encountered dask array, but did not set dask='allowed' with pytest.raises(ValueError): apply_ufunc(identity, array) with pytest.raises(ValueError): apply_ufunc(identity, variable) with pytest.raises(ValueError): apply_ufunc(identity, data_array) with pytest.raises(ValueError): apply_ufunc(identity, dataset) # unknown setting for dask array handling with pytest.raises(ValueError): apply_ufunc(identity, array, dask="unknown") # type: ignore[arg-type] def dask_safe_identity(x): return apply_ufunc(identity, x, dask="allowed") assert array is dask_safe_identity(array) actual = dask_safe_identity(variable) assert isinstance(actual.data, da.Array) assert_identical(variable, actual) actual = dask_safe_identity(data_array) assert isinstance(actual.data, da.Array) assert_identical(data_array, actual) actual = dask_safe_identity(dataset) assert isinstance(actual["y"].data, da.Array) assert_identical(dataset, actual) @requires_dask def test_apply_dask_parallelized_one_arg() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1)) data_array = xr.DataArray(array, dims=("x", "y")) def parallel_identity(x): return apply_ufunc(identity, x, dask="parallelized", output_dtypes=[x.dtype]) actual = parallel_identity(data_array) assert isinstance(actual.data, da.Array) assert actual.data.chunks == array.chunks assert_identical(data_array, actual) computed = data_array.compute() actual = parallel_identity(computed) assert_identical(computed, actual) @requires_dask def test_apply_dask_parallelized_two_args() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1), dtype=np.int64) data_array = xr.DataArray(array, dims=("x", "y")) data_array.name = None def parallel_add(x, y): return apply_ufunc( operator.add, x, y, dask="parallelized", output_dtypes=[np.int64] ) def check(x, y): actual = parallel_add(x, y) assert isinstance(actual.data, da.Array) assert actual.data.chunks == array.chunks assert_identical(data_array, actual) check(data_array, 0) check(0, data_array) check(data_array, xr.DataArray(0)) check(data_array, 0 * data_array) check(data_array, 0 * data_array[0]) check(data_array[:, 0], 0 * data_array[0]) check(data_array, 0 * data_array.compute()) @requires_dask def test_apply_dask_parallelized_errors() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1)) data_array = xr.DataArray(array, dims=("x", "y")) # from apply_array_ufunc with pytest.raises(ValueError, match=r"at least one input is an xarray object"): apply_ufunc(identity, array, dask="parallelized") # formerly from _apply_blockwise, now from apply_variable_ufunc with pytest.raises(ValueError, match=r"consists of multiple chunks"): apply_ufunc( identity, data_array, dask="parallelized", output_dtypes=[float], input_core_dims=[("y",)], output_core_dims=[("y",)], ) # it's currently impossible to silence these warnings from inside dask.array: # https://github.com/dask/dask/issues/3245 @requires_dask @pytest.mark.filterwarnings("ignore:Mean of empty slice") def test_apply_dask_multiple_inputs() -> None: import dask.array as da def covariance(x, y): return ( (x - x.mean(axis=-1, keepdims=True)) * (y - y.mean(axis=-1, keepdims=True)) ).mean(axis=-1) rs = np.random.default_rng(42) array1 = da.from_array(rs.random((4, 4)), chunks=(2, 4)) array2 = da.from_array(rs.random((4, 4)), chunks=(2, 4)) data_array_1 = xr.DataArray(array1, dims=("x", "z")) data_array_2 = xr.DataArray(array2, dims=("y", "z")) expected = apply_ufunc( covariance, data_array_1.compute(), data_array_2.compute(), input_core_dims=[["z"], ["z"]], ) allowed = apply_ufunc( covariance, data_array_1, data_array_2, input_core_dims=[["z"], ["z"]], dask="allowed", ) assert isinstance(allowed.data, da.Array) xr.testing.assert_allclose(expected, allowed.compute()) parallelized = apply_ufunc( covariance, data_array_1, data_array_2, input_core_dims=[["z"], ["z"]], dask="parallelized", output_dtypes=[float], ) assert isinstance(parallelized.data, da.Array) xr.testing.assert_allclose(expected, parallelized.compute()) @requires_dask def test_apply_dask_new_output_dimension() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1)) data_array = xr.DataArray(array, dims=("x", "y")) def stack_negative(obj): def func(x): return np.stack([x, -x], axis=-1) return apply_ufunc( func, obj, output_core_dims=[["sign"]], dask="parallelized", output_dtypes=[obj.dtype], dask_gufunc_kwargs=dict(output_sizes={"sign": 2}), ) expected = stack_negative(data_array.compute()) actual = stack_negative(data_array) assert actual.dims == ("x", "y", "sign") assert actual.shape == (2, 2, 2) assert isinstance(actual.data, da.Array) assert_identical(expected, actual) @requires_dask def test_apply_dask_new_output_sizes() -> None: ds = xr.Dataset({"foo": (["lon", "lat"], np.arange(10 * 10).reshape((10, 10)))}) ds["bar"] = ds["foo"] newdims = {"lon_new": 3, "lat_new": 6} def extract(obj): def func(da): return da[1:4, 1:7] return apply_ufunc( func, obj, dask="parallelized", input_core_dims=[["lon", "lat"]], output_core_dims=[["lon_new", "lat_new"]], dask_gufunc_kwargs=dict(output_sizes=newdims), ) expected = extract(ds) actual = extract(ds.chunk()) assert actual.sizes == {"lon_new": 3, "lat_new": 6} assert_identical(expected.chunk(), actual) @requires_dask def test_apply_dask_new_output_sizes_not_supplied_same_dim_names() -> None: # test for missing output_sizes kwarg sneaking through # see GH discussion 7503 data = np.random.randn(4, 4, 3, 2) da = xr.DataArray(data=data, dims=("x", "y", "i", "j")).chunk(x=1, y=1) with pytest.raises(ValueError, match="output_sizes"): xr.apply_ufunc( np.linalg.pinv, da, input_core_dims=[["i", "j"]], output_core_dims=[["i", "j"]], exclude_dims={"i", "j"}, dask="parallelized", ) def pandas_median(x): return pd.Series(x).median() def test_vectorize() -> None: data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) actual = apply_ufunc( pandas_median, data_array, input_core_dims=[["y"]], vectorize=True ) assert_identical(expected, actual) @requires_dask def test_vectorize_dask() -> None: # run vectorization in dask.array.gufunc by using `dask='parallelized'` data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) actual = apply_ufunc( pandas_median, data_array.chunk({"x": 1}), input_core_dims=[["y"]], vectorize=True, dask="parallelized", output_dtypes=[float], ) assert_identical(expected, actual) @requires_dask def test_vectorize_dask_dtype() -> None: # ensure output_dtypes is preserved with vectorize=True # GH4015 # integer data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) actual = apply_ufunc( pandas_median, data_array.chunk({"x": 1}), input_core_dims=[["y"]], vectorize=True, dask="parallelized", output_dtypes=[int], ) assert_identical(expected, actual) assert expected.dtype == actual.dtype # complex data_array = xr.DataArray([[0 + 0j, 1 + 2j, 2 + 1j]], dims=("x", "y")) expected = data_array.copy() actual = apply_ufunc( identity, data_array.chunk({"x": 1}), vectorize=True, dask="parallelized", output_dtypes=[complex], ) assert_identical(expected, actual) assert expected.dtype == actual.dtype @requires_dask @pytest.mark.parametrize( "data_array", [ xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")), xr.DataArray([[0 + 0j, 1 + 2j, 2 + 1j]], dims=("x", "y")), ], ) def test_vectorize_dask_dtype_without_output_dtypes(data_array) -> None: # ensure output_dtypes is preserved with vectorize=True # GH4015 expected = data_array.copy() actual = apply_ufunc( identity, data_array.chunk({"x": 1}), vectorize=True, dask="parallelized", ) assert_identical(expected, actual) assert expected.dtype == actual.dtype @requires_dask def test_vectorize_dask_dtype_meta() -> None: data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) actual = apply_ufunc( pandas_median, data_array.chunk({"x": 1}), input_core_dims=[["y"]], vectorize=True, dask="parallelized", dask_gufunc_kwargs=dict(meta=np.ndarray((0, 0), dtype=float)), ) assert_identical(expected, actual) assert float == actual.dtype def pandas_median_add(x, y): # function which can consume input of unequal length return pd.Series(x).median() + pd.Series(y).median() def test_vectorize_exclude_dims() -> None: # GH 3890 data_array_a = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) data_array_b = xr.DataArray([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]], dims=("x", "y")) expected = xr.DataArray([3, 5], dims=["x"]) actual = apply_ufunc( pandas_median_add, data_array_a, data_array_b, input_core_dims=[["y"], ["y"]], vectorize=True, exclude_dims=set("y"), ) assert_identical(expected, actual) @requires_dask def test_vectorize_exclude_dims_dask() -> None: # GH 3890 data_array_a = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) data_array_b = xr.DataArray([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]], dims=("x", "y")) expected = xr.DataArray([3, 5], dims=["x"]) actual = apply_ufunc( pandas_median_add, data_array_a.chunk({"x": 1}), data_array_b.chunk({"x": 1}), input_core_dims=[["y"], ["y"]], exclude_dims=set("y"), vectorize=True, dask="parallelized", output_dtypes=[float], ) assert_identical(expected, actual) def test_corr_only_dataarray() -> None: with pytest.raises(TypeError, match=r"Only xr.DataArray is supported"): xr.corr(xr.Dataset(), xr.Dataset()) # type: ignore[type-var] @pytest.fixture(scope="module") def arrays(): da = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) return [ da.isel(time=range(18)), da.isel(time=range(2, 20)).rolling(time=3, center=True).mean(), xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"]), xr.DataArray([[1, 2], [np.nan, np.nan]], dims=["x", "time"]), xr.DataArray([[1, 2], [2, 1]], dims=["x", "time"]), ] @pytest.fixture(scope="module") def array_tuples(arrays): return [ (arrays[0], arrays[0]), (arrays[0], arrays[1]), (arrays[1], arrays[1]), (arrays[2], arrays[2]), (arrays[2], arrays[3]), (arrays[2], arrays[4]), (arrays[4], arrays[2]), (arrays[3], arrays[3]), (arrays[4], arrays[4]), ] @pytest.mark.parametrize("ddof", [0, 1]) @pytest.mark.parametrize("n", [3, 4, 5, 6, 7, 8]) @pytest.mark.parametrize("dim", [None, "x", "time"]) @requires_dask def test_lazy_corrcov( n: int, dim: str | None, ddof: int, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: # GH 5284 from dask import is_dask_collection da_a, da_b = array_tuples[n] with raise_if_dask_computes(): cov = xr.cov(da_a.chunk(), da_b.chunk(), dim=dim, ddof=ddof) assert is_dask_collection(cov) corr = xr.corr(da_a.chunk(), da_b.chunk(), dim=dim) assert is_dask_collection(corr) @pytest.mark.parametrize("ddof", [0, 1]) @pytest.mark.parametrize("n", [0, 1, 2]) @pytest.mark.parametrize("dim", [None, "time"]) def test_cov( n: int, dim: str | None, ddof: int, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: da_a, da_b = array_tuples[n] if dim is not None: def np_cov_ind(ts1, ts2, a, x): # Ensure the ts are aligned and missing values ignored ts1, ts2 = broadcast(ts1, ts2) valid_values = ts1.notnull() & ts2.notnull() # While dropping isn't ideal here, numpy will return nan # if any segment contains a NaN. ts1 = ts1.where(valid_values) ts2 = ts2.where(valid_values) return np.ma.cov( np.ma.masked_invalid(ts1.sel(a=a, x=x).data.flatten()), np.ma.masked_invalid(ts2.sel(a=a, x=x).data.flatten()), ddof=ddof, )[0, 1] expected = np.zeros((3, 4)) for a in [0, 1, 2]: for x in [0, 1, 2, 3]: expected[a, x] = np_cov_ind(da_a, da_b, a=a, x=x) actual = xr.cov(da_a, da_b, dim=dim, ddof=ddof) assert_allclose(actual, expected) else: def np_cov(ts1, ts2): # Ensure the ts are aligned and missing values ignored ts1, ts2 = broadcast(ts1, ts2) valid_values = ts1.notnull() & ts2.notnull() ts1 = ts1.where(valid_values) ts2 = ts2.where(valid_values) return np.ma.cov( np.ma.masked_invalid(ts1.data.flatten()), np.ma.masked_invalid(ts2.data.flatten()), ddof=ddof, )[0, 1] expected = np_cov(da_a, da_b) actual = xr.cov(da_a, da_b, dim=dim, ddof=ddof) assert_allclose(actual, expected) @pytest.mark.parametrize("n", [0, 1, 2]) @pytest.mark.parametrize("dim", [None, "time"]) def test_corr( n: int, dim: str | None, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: da_a, da_b = array_tuples[n] if dim is not None: def np_corr_ind(ts1, ts2, a, x): # Ensure the ts are aligned and missing values ignored ts1, ts2 = broadcast(ts1, ts2) valid_values = ts1.notnull() & ts2.notnull() ts1 = ts1.where(valid_values) ts2 = ts2.where(valid_values) return np.ma.corrcoef( np.ma.masked_invalid(ts1.sel(a=a, x=x).data.flatten()), np.ma.masked_invalid(ts2.sel(a=a, x=x).data.flatten()), )[0, 1] expected = np.zeros((3, 4)) for a in [0, 1, 2]: for x in [0, 1, 2, 3]: expected[a, x] = np_corr_ind(da_a, da_b, a=a, x=x) actual = xr.corr(da_a, da_b, dim) assert_allclose(actual, expected) else: def np_corr(ts1, ts2): # Ensure the ts are aligned and missing values ignored ts1, ts2 = broadcast(ts1, ts2) valid_values = ts1.notnull() & ts2.notnull() ts1 = ts1.where(valid_values) ts2 = ts2.where(valid_values) return np.ma.corrcoef( np.ma.masked_invalid(ts1.data.flatten()), np.ma.masked_invalid(ts2.data.flatten()), )[0, 1] expected = np_corr(da_a, da_b) actual = xr.corr(da_a, da_b, dim) assert_allclose(actual, expected) @pytest.mark.parametrize("n", range(9)) @pytest.mark.parametrize("dim", [None, "time", "x"]) def test_covcorr_consistency( n: int, dim: str | None, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: da_a, da_b = array_tuples[n] # Testing that xr.corr and xr.cov are consistent with each other # 1. Broadcast the two arrays da_a, da_b = broadcast(da_a, da_b) # 2. Ignore the nans valid_values = da_a.notnull() & da_b.notnull() da_a = da_a.where(valid_values) da_b = da_b.where(valid_values) expected = xr.cov(da_a, da_b, dim=dim, ddof=0) / ( da_a.std(dim=dim) * da_b.std(dim=dim) ) actual = xr.corr(da_a, da_b, dim=dim) assert_allclose(actual, expected) @requires_dask @pytest.mark.parametrize("n", range(9)) @pytest.mark.parametrize("dim", [None, "time", "x"]) @pytest.mark.filterwarnings("ignore:invalid value encountered in .*divide") def test_corr_lazycorr_consistency( n: int, dim: str | None, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: da_a, da_b = array_tuples[n] da_al = da_a.chunk() da_bl = da_b.chunk() c_abl = xr.corr(da_al, da_bl, dim=dim) c_ab = xr.corr(da_a, da_b, dim=dim) c_ab_mixed = xr.corr(da_a, da_bl, dim=dim) assert_allclose(c_ab, c_abl) assert_allclose(c_ab, c_ab_mixed) @requires_dask def test_corr_dtype_error(): da_a = xr.DataArray([[1, 2], [2, 1]], dims=["x", "time"]) da_b = xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"]) xr.testing.assert_equal(xr.corr(da_a, da_b), xr.corr(da_a.chunk(), da_b.chunk())) xr.testing.assert_equal(xr.corr(da_a, da_b), xr.corr(da_a, da_b.chunk())) @pytest.mark.parametrize("n", range(5)) @pytest.mark.parametrize("dim", [None, "time", "x", ["time", "x"]]) def test_autocov(n: int, dim: str | None, arrays) -> None: da = arrays[n] # Testing that the autocovariance*(N-1) is ~=~ to the variance matrix # 1. Ignore the nans valid_values = da.notnull() # Because we're using ddof=1, this requires > 1 value in each sample da = da.where(valid_values.sum(dim=dim) > 1) expected = ((da - da.mean(dim=dim)) ** 2).sum(dim=dim, skipna=True, min_count=1) actual = xr.cov(da, da, dim=dim) * (valid_values.sum(dim) - 1) assert_allclose(actual, expected) def test_complex_cov() -> None: da = xr.DataArray([1j, -1j]) actual = xr.cov(da, da) assert abs(actual.item()) == 2 @pytest.mark.parametrize("weighted", [True, False]) def test_bilinear_cov_corr(weighted: bool) -> None: # Test the bilinear properties of covariance and correlation da = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) db = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) dc = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) if weighted: weights = xr.DataArray( np.abs(np.random.random(4)), dims=("x"), ) else: weights = None k = np.random.random(1)[0] # Test covariance properties assert_allclose( xr.cov(da + k, db, weights=weights), xr.cov(da, db, weights=weights) ) assert_allclose( xr.cov(da, db + k, weights=weights), xr.cov(da, db, weights=weights) ) assert_allclose( xr.cov(da + dc, db, weights=weights), xr.cov(da, db, weights=weights) + xr.cov(dc, db, weights=weights), ) assert_allclose( xr.cov(da, db + dc, weights=weights), xr.cov(da, db, weights=weights) + xr.cov(da, dc, weights=weights), ) assert_allclose( xr.cov(k * da, db, weights=weights), k * xr.cov(da, db, weights=weights) ) assert_allclose( xr.cov(da, k * db, weights=weights), k * xr.cov(da, db, weights=weights) ) # Test correlation properties assert_allclose( xr.corr(da + k, db, weights=weights), xr.corr(da, db, weights=weights) ) assert_allclose( xr.corr(da, db + k, weights=weights), xr.corr(da, db, weights=weights) ) assert_allclose( xr.corr(k * da, db, weights=weights), xr.corr(da, db, weights=weights) ) assert_allclose( xr.corr(da, k * db, weights=weights), xr.corr(da, db, weights=weights) ) def test_equally_weighted_cov_corr() -> None: # Test that equal weights for all values produces same results as weights=None da = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) db = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) assert_allclose( xr.cov(da, db, weights=None), xr.cov(da, db, weights=xr.DataArray(1)) ) assert_allclose( xr.cov(da, db, weights=None), xr.cov(da, db, weights=xr.DataArray(2)) ) assert_allclose( xr.corr(da, db, weights=None), xr.corr(da, db, weights=xr.DataArray(1)) ) assert_allclose( xr.corr(da, db, weights=None), xr.corr(da, db, weights=xr.DataArray(2)) ) @requires_dask def test_vectorize_dask_new_output_dims() -> None: # regression test for GH3574 # run vectorization in dask.array.gufunc by using `dask='parallelized'` data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) func = lambda x: x[np.newaxis, ...] expected = data_array.expand_dims("z") actual = apply_ufunc( func, data_array.chunk({"x": 1}), output_core_dims=[["z"]], vectorize=True, dask="parallelized", output_dtypes=[float], dask_gufunc_kwargs=dict(output_sizes={"z": 1}), ).transpose(*expected.dims) assert_identical(expected, actual) with pytest.raises( ValueError, match=r"dimension 'z1' in 'output_sizes' must correspond" ): apply_ufunc( func, data_array.chunk({"x": 1}), output_core_dims=[["z"]], vectorize=True, dask="parallelized", output_dtypes=[float], dask_gufunc_kwargs=dict(output_sizes={"z1": 1}), ) with pytest.raises( ValueError, match=r"dimension 'z' in 'output_core_dims' needs corresponding" ): apply_ufunc( func, data_array.chunk({"x": 1}), output_core_dims=[["z"]], vectorize=True, dask="parallelized", output_dtypes=[float], ) def test_output_wrong_number() -> None: variable = xr.Variable("x", np.arange(10)) def identity(x): return x def tuple3x(x): return (x, x, x) with pytest.raises( ValueError, match=r"number of outputs.* Received a with 10 elements. Expected a tuple of 2 elements:\n\narray\(\[0", ): apply_ufunc(identity, variable, output_core_dims=[(), ()]) with pytest.raises(ValueError, match=r"number of outputs"): apply_ufunc(tuple3x, variable, output_core_dims=[(), ()]) def test_output_wrong_dims() -> None: variable = xr.Variable("x", np.arange(10)) def add_dim(x): return x[..., np.newaxis] def remove_dim(x): return x[..., 0] with pytest.raises( ValueError, match=r"unexpected number of dimensions.*from:\n\n.*array\(\[\[0", ): apply_ufunc(add_dim, variable, output_core_dims=[("y", "z")]) with pytest.raises(ValueError, match=r"unexpected number of dimensions"): apply_ufunc(add_dim, variable) with pytest.raises(ValueError, match=r"unexpected number of dimensions"): apply_ufunc(remove_dim, variable) def test_output_wrong_dim_size() -> None: array = np.arange(10) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) def truncate(array): return array[:5] def apply_truncate_broadcast_invalid(obj): return apply_ufunc(truncate, obj) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_broadcast_invalid(variable) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_broadcast_invalid(data_array) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_broadcast_invalid(dataset) def apply_truncate_x_x_invalid(obj): return apply_ufunc( truncate, obj, input_core_dims=[["x"]], output_core_dims=[["x"]] ) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_x_x_invalid(variable) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_x_x_invalid(data_array) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_x_x_invalid(dataset) def apply_truncate_x_z(obj): return apply_ufunc( truncate, obj, input_core_dims=[["x"]], output_core_dims=[["z"]] ) assert_identical(xr.Variable("z", array[:5]), apply_truncate_x_z(variable)) assert_identical( xr.DataArray(array[:5], dims=["z"]), apply_truncate_x_z(data_array) ) assert_identical(xr.Dataset({"y": ("z", array[:5])}), apply_truncate_x_z(dataset)) def apply_truncate_x_x_valid(obj): return apply_ufunc( truncate, obj, input_core_dims=[["x"]], output_core_dims=[["x"]], exclude_dims={"x"}, ) assert_identical(xr.Variable("x", array[:5]), apply_truncate_x_x_valid(variable)) assert_identical( xr.DataArray(array[:5], dims=["x"]), apply_truncate_x_x_valid(data_array) ) assert_identical( xr.Dataset({"y": ("x", array[:5])}), apply_truncate_x_x_valid(dataset) ) @pytest.mark.parametrize("use_dask", [True, False]) def test_dot(use_dask: bool) -> None: if use_dask and not has_dask: pytest.skip("test for dask.") a = np.arange(30 * 4).reshape(30, 4) b = np.arange(30 * 4 * 5).reshape(30, 4, 5) c = np.arange(5 * 60).reshape(5, 60) da_a = xr.DataArray(a, dims=["a", "b"], coords={"a": np.linspace(0, 1, 30)}) da_b = xr.DataArray(b, dims=["a", "b", "c"], coords={"a": np.linspace(0, 1, 30)}) da_c = xr.DataArray(c, dims=["c", "e"]) if use_dask: da_a = da_a.chunk({"a": 3}) da_b = da_b.chunk({"a": 3}) da_c = da_c.chunk({"c": 3}) actual = xr.dot(da_a, da_b, dim=["a", "b"]) assert actual.dims == ("c",) assert (actual.data == np.einsum("ij,ijk->k", a, b)).all() assert isinstance(actual.variable.data, type(da_a.variable.data)) actual = xr.dot(da_a, da_b) assert actual.dims == ("c",) assert (actual.data == np.einsum("ij,ijk->k", a, b)).all() assert isinstance(actual.variable.data, type(da_a.variable.data)) # for only a single array is passed without dims argument, just return # as is actual = xr.dot(da_a) assert_identical(da_a, actual) # test for variable actual = xr.dot(da_a.variable, da_b.variable) assert actual.dims == ("c",) assert (actual.data == np.einsum("ij,ijk->k", a, b)).all() assert isinstance(actual.data, type(da_a.variable.data)) if use_dask: da_a = da_a.chunk({"a": 3}) da_b = da_b.chunk({"a": 3}) actual = xr.dot(da_a, da_b, dim=["b"]) assert actual.dims == ("a", "c") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() assert isinstance(actual.variable.data, type(da_a.variable.data)) actual = xr.dot(da_a, da_b, dim=["b"]) assert actual.dims == ("a", "c") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() actual = xr.dot(da_a, da_b, dim="b") assert actual.dims == ("a", "c") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() actual = xr.dot(da_a, da_b, dim="a") assert actual.dims == ("b", "c") assert (actual.data == np.einsum("ij,ijk->jk", a, b)).all() actual = xr.dot(da_a, da_b, dim="c") assert actual.dims == ("a", "b") assert (actual.data == np.einsum("ij,ijk->ij", a, b)).all() actual = xr.dot(da_a, da_b, da_c, dim=["a", "b"]) assert actual.dims == ("c", "e") assert (actual.data == np.einsum("ij,ijk,kl->kl ", a, b, c)).all() # should work with tuple actual = xr.dot(da_a, da_b, dim=("c",)) assert actual.dims == ("a", "b") assert (actual.data == np.einsum("ij,ijk->ij", a, b)).all() # default dims actual = xr.dot(da_a, da_b, da_c) assert actual.dims == ("e",) assert (actual.data == np.einsum("ij,ijk,kl->l ", a, b, c)).all() # 1 array summation actual = xr.dot(da_a, dim="a") assert actual.dims == ("b",) assert (actual.data == np.einsum("ij->j ", a)).all() # empty dim actual = xr.dot(da_a.sel(a=[]), da_a.sel(a=[]), dim="a") assert actual.dims == ("b",) assert (actual.data == np.zeros(actual.shape)).all() # Ellipsis (...) sums over all dimensions actual = xr.dot(da_a, da_b, dim=...) assert actual.dims == () assert (actual.data == np.einsum("ij,ijk->", a, b)).all() actual = xr.dot(da_a, da_b, da_c, dim=...) assert actual.dims == () assert (actual.data == np.einsum("ij,ijk,kl-> ", a, b, c)).all() actual = xr.dot(da_a, dim=...) assert actual.dims == () assert (actual.data == np.einsum("ij-> ", a)).all() actual = xr.dot(da_a.sel(a=[]), da_a.sel(a=[]), dim=...) assert actual.dims == () assert (actual.data == np.zeros(actual.shape)).all() # Invalid cases if not use_dask: with pytest.raises(TypeError): xr.dot(da_a, dim="a", invalid=None) with pytest.raises(TypeError): xr.dot(da_a.to_dataset(name="da"), dim="a") with pytest.raises(TypeError): xr.dot(dim="a") # einsum parameters actual = xr.dot(da_a, da_b, dim=["b"], order="C") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() assert actual.values.flags["C_CONTIGUOUS"] assert not actual.values.flags["F_CONTIGUOUS"] actual = xr.dot(da_a, da_b, dim=["b"], order="F") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() # dask converts Fortran arrays to C order when merging the final array if not use_dask: assert not actual.values.flags["C_CONTIGUOUS"] assert actual.values.flags["F_CONTIGUOUS"] # einsum has a constant string as of the first parameter, which makes # it hard to pass to xarray.apply_ufunc. # make sure dot() uses functools.partial(einsum, subscripts), which # can be pickled, and not a lambda, which can't. pickle.loads(pickle.dumps(xr.dot(da_a))) @pytest.mark.parametrize("use_dask", [True, False]) def test_dot_align_coords(use_dask: bool) -> None: # GH 3694 if use_dask and not has_dask: pytest.skip("test for dask.") a = np.arange(30 * 4).reshape(30, 4) b = np.arange(30 * 4 * 5).reshape(30, 4, 5) # use partially overlapping coords coords_a = {"a": np.arange(30), "b": np.arange(4)} coords_b = {"a": np.arange(5, 35), "b": np.arange(1, 5)} da_a = xr.DataArray(a, dims=["a", "b"], coords=coords_a) da_b = xr.DataArray(b, dims=["a", "b", "c"], coords=coords_b) if use_dask: da_a = da_a.chunk({"a": 3}) da_b = da_b.chunk({"a": 3}) # join="inner" is the default actual = xr.dot(da_a, da_b) # `dot` sums over the common dimensions of the arguments expected = (da_a * da_b).sum(["a", "b"]) xr.testing.assert_allclose(expected, actual) actual = xr.dot(da_a, da_b, dim=...) expected = (da_a * da_b).sum() xr.testing.assert_allclose(expected, actual) with xr.set_options(arithmetic_join="exact"): with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*not equal.*"): xr.dot(da_a, da_b) # NOTE: dot always uses `join="inner"` because `(a * b).sum()` yields the same for all # join method (except "exact") with xr.set_options(arithmetic_join="left"): actual = xr.dot(da_a, da_b) expected = (da_a * da_b).sum(["a", "b"]) xr.testing.assert_allclose(expected, actual) with xr.set_options(arithmetic_join="right"): actual = xr.dot(da_a, da_b) expected = (da_a * da_b).sum(["a", "b"]) xr.testing.assert_allclose(expected, actual) with xr.set_options(arithmetic_join="outer"): actual = xr.dot(da_a, da_b) expected = (da_a * da_b).sum(["a", "b"]) xr.testing.assert_allclose(expected, actual) def test_where() -> None: cond = xr.DataArray([True, False], dims="x") actual = xr.where(cond, 1, 0) expected = xr.DataArray([1, 0], dims="x") assert_identical(expected, actual) def test_where_attrs() -> None: cond = xr.DataArray([True, False], coords={"a": [0, 1]}, attrs={"attr": "cond_da"}) cond["a"].attrs = {"attr": "cond_coord"} input_cond = cond.copy() x = xr.DataArray([1, 1], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) x["a"].attrs = {"attr": "x_coord"} y = xr.DataArray([0, 0], coords={"a": [0, 1]}, attrs={"attr": "y_da"}) y["a"].attrs = {"attr": "y_coord"} # 3 DataArrays, takes attrs from x actual = xr.where(cond, x, y, keep_attrs=True) expected = xr.DataArray([1, 0], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) expected["a"].attrs = {"attr": "x_coord"} assert_identical(expected, actual) # Check also that input coordinate attributes weren't modified by reference assert x["a"].attrs == {"attr": "x_coord"} assert y["a"].attrs == {"attr": "y_coord"} assert cond["a"].attrs == {"attr": "cond_coord"} assert_identical(cond, input_cond) # 3 DataArrays, drop attrs actual = xr.where(cond, x, y, keep_attrs=False) expected = xr.DataArray([1, 0], coords={"a": [0, 1]}) assert_identical(expected, actual) assert_identical(expected.coords["a"], actual.coords["a"]) # Check also that input coordinate attributes weren't modified by reference assert x["a"].attrs == {"attr": "x_coord"} assert y["a"].attrs == {"attr": "y_coord"} assert cond["a"].attrs == {"attr": "cond_coord"} assert_identical(cond, input_cond) # x as a scalar, takes no attrs actual = xr.where(cond, 0, y, keep_attrs=True) expected = xr.DataArray([0, 0], coords={"a": [0, 1]}) assert_identical(expected, actual) # y as a scalar, takes attrs from x actual = xr.where(cond, x, 0, keep_attrs=True) expected = xr.DataArray([1, 0], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) expected["a"].attrs = {"attr": "x_coord"} assert_identical(expected, actual) # x and y as a scalar, takes no attrs actual = xr.where(cond, 1, 0, keep_attrs=True) expected = xr.DataArray([1, 0], coords={"a": [0, 1]}) assert_identical(expected, actual) # cond and y as a scalar, takes attrs from x actual = xr.where(True, x, y, keep_attrs=True) expected = xr.DataArray([1, 1], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) expected["a"].attrs = {"attr": "x_coord"} assert_identical(expected, actual) # no xarray objects, handle no attrs actual_np = xr.where(True, 0, 1, keep_attrs=True) expected_np = np.array(0) assert_identical(expected_np, actual_np) # DataArray and 2 Datasets, takes attrs from x ds_x = xr.Dataset(data_vars={"x": x}, attrs={"attr": "x_ds"}) ds_y = xr.Dataset(data_vars={"x": y}, attrs={"attr": "y_ds"}) ds_actual = xr.where(cond, ds_x, ds_y, keep_attrs=True) ds_expected = xr.Dataset( data_vars={ "x": xr.DataArray([1, 0], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) }, attrs={"attr": "x_ds"}, ) ds_expected["a"].attrs = {"attr": "x_coord"} assert_identical(ds_expected, ds_actual) # 2 DataArrays and 1 Dataset, takes attrs from x ds_actual = xr.where(cond, x.rename("x"), ds_y, keep_attrs=True) ds_expected = xr.Dataset( data_vars={ "x": xr.DataArray([1, 0], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) }, ) ds_expected["a"].attrs = {"attr": "x_coord"} assert_identical(ds_expected, ds_actual) @pytest.mark.parametrize( "use_dask", [pytest.param(False, id="nodask"), pytest.param(True, id="dask")] ) @pytest.mark.parametrize( ["x", "coeffs", "expected"], [ pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray([2, 3, 4], dims="degree", coords={"degree": [0, 1, 2]}), xr.DataArray([9, 2 + 6 + 16, 2 + 9 + 36], dims="x"), id="simple", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [[0, 1], [0, 1]], dims=("y", "degree"), coords={"degree": [0, 1]} ), xr.DataArray([[1, 1], [2, 2], [3, 3]], dims=("x", "y")), id="broadcast-x", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [[0, 1], [1, 0], [1, 1]], dims=("x", "degree"), coords={"degree": [0, 1]}, ), xr.DataArray([1, 1, 1 + 3], dims="x"), id="shared-dim", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray([1, 0, 0], dims="degree", coords={"degree": [2, 1, 0]}), xr.DataArray([1, 2**2, 3**2], dims="x"), id="reordered-index", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray([5], dims="degree", coords={"degree": [3]}), xr.DataArray([5, 5 * 2**3, 5 * 3**3], dims="x"), id="sparse-index", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.Dataset( {"a": ("degree", [0, 1]), "b": ("degree", [1, 0])}, coords={"degree": [0, 1]}, ), xr.Dataset({"a": ("x", [1, 2, 3]), "b": ("x", [1, 1, 1])}), id="array-dataset", ), pytest.param( xr.Dataset({"a": ("x", [1, 2, 3]), "b": ("x", [2, 3, 4])}), xr.DataArray([1, 1], dims="degree", coords={"degree": [0, 1]}), xr.Dataset({"a": ("x", [2, 3, 4]), "b": ("x", [3, 4, 5])}), id="dataset-array", ), pytest.param( xr.Dataset({"a": ("x", [1, 2, 3]), "b": ("y", [2, 3, 4])}), xr.Dataset( {"a": ("degree", [0, 1]), "b": ("degree", [1, 1])}, coords={"degree": [0, 1]}, ), xr.Dataset({"a": ("x", [1, 2, 3]), "b": ("y", [3, 4, 5])}), id="dataset-dataset", ), pytest.param( xr.DataArray(pd.date_range("1970-01-01", freq="s", periods=3), dims="x"), xr.DataArray([0, 1], dims="degree", coords={"degree": [0, 1]}), xr.DataArray( [0, 1e9, 2e9], dims="x", coords={"x": pd.date_range("1970-01-01", freq="s", periods=3)}, ), id="datetime", ), pytest.param( # Force a non-ns unit for the coordinate, make sure we convert to `ns` # for backwards compatibility at the moment. This can be relaxed in the future. xr.DataArray( pd.date_range("1970-01-01", freq="s", periods=3, unit="s"), dims="x" ), xr.DataArray([0, 1], dims="degree", coords={"degree": [0, 1]}), xr.DataArray( [0, 1e9, 2e9], dims="x", coords={"x": pd.date_range("1970-01-01", freq="s", periods=3)}, ), id="datetime-non-ns", ), pytest.param( xr.DataArray( np.array([1000, 2000, 3000], dtype="timedelta64[ns]"), dims="x" ), xr.DataArray([0, 1], dims="degree", coords={"degree": [0, 1]}), xr.DataArray([1000.0, 2000.0, 3000.0], dims="x"), id="timedelta", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [2, 3, 4], dims="degree", coords={"degree": np.array([0, 1, 2], dtype=np.int64)}, ), xr.DataArray([9, 2 + 6 + 16, 2 + 9 + 36], dims="x"), id="int64-degree", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [2, 3, 4], dims="degree", coords={"degree": np.array([0, 1, 2], dtype=np.int32)}, ), xr.DataArray([9, 2 + 6 + 16, 2 + 9 + 36], dims="x"), id="int32-degree", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [2, 3, 4], dims="degree", coords={"degree": np.array([0, 1, 2], dtype=np.uint8)}, ), xr.DataArray([9, 2 + 6 + 16, 2 + 9 + 36], dims="x"), id="uint8-degree", ), ], ) def test_polyval( use_dask: bool, x: xr.DataArray | xr.Dataset, coeffs: xr.DataArray | xr.Dataset, expected: xr.DataArray | xr.Dataset, ) -> None: if use_dask: if not has_dask: pytest.skip("requires dask") coeffs = coeffs.chunk({"degree": 2}) x = x.chunk({"x": 2}) with raise_if_dask_computes(): actual = xr.polyval(coord=x, coeffs=coeffs) xr.testing.assert_allclose(actual, expected) @requires_cftime @pytest.mark.parametrize( "use_dask", [pytest.param(False, id="nodask"), pytest.param(True, id="dask")] ) @pytest.mark.parametrize("date", ["1970-01-01", "0753-04-21"]) def test_polyval_cftime(use_dask: bool, date: str) -> None: import cftime x = xr.DataArray( xr.date_range(date, freq="1s", periods=3, use_cftime=True), dims="x", ) coeffs = xr.DataArray([0, 1], dims="degree", coords={"degree": [0, 1]}) if use_dask: if not has_dask: pytest.skip("requires dask") coeffs = coeffs.chunk({"degree": 2}) x = x.chunk({"x": 2}) with raise_if_dask_computes(max_computes=1): actual = xr.polyval(coord=x, coeffs=coeffs) t0 = xr.date_range(date, periods=1)[0] offset = (t0 - cftime.DatetimeGregorian(1970, 1, 1)).total_seconds() * 1e9 expected = ( xr.DataArray( [0, 1e9, 2e9], dims="x", coords={"x": xr.date_range(date, freq="1s", periods=3, use_cftime=True)}, ) + offset ) xr.testing.assert_allclose(actual, expected) def test_polyval_degree_dim_checks() -> None: x = xr.DataArray([1, 2, 3], dims="x") coeffs = xr.DataArray([2, 3, 4], dims="degree", coords={"degree": [0, 1, 2]}) with pytest.raises(ValueError): xr.polyval(x, coeffs.drop_vars("degree")) with pytest.raises(ValueError): xr.polyval(x, coeffs.assign_coords(degree=coeffs.degree.astype(float))) @pytest.mark.parametrize( "use_dask", [pytest.param(False, id="nodask"), pytest.param(True, id="dask")] ) @pytest.mark.parametrize( "x", [ pytest.param(xr.DataArray([0, 1, 2], dims="x"), id="simple"), pytest.param( xr.DataArray(pd.date_range("1970-01-01", freq="ns", periods=3), dims="x"), id="datetime", ), # Force a non-ns unit for the coordinate, make sure we convert to `ns` in both polyfit & polval # for backwards compatibility at the moment. This can be relaxed in the future. pytest.param( xr.DataArray( pd.date_range("1970-01-01", freq="s", unit="s", periods=3), dims="x" ), id="datetime-non-ns", ), pytest.param( xr.DataArray(np.array([0, 1, 2], dtype="timedelta64[ns]"), dims="x"), id="timedelta", ), ], ) @pytest.mark.parametrize( "y", [ pytest.param(xr.DataArray([1, 6, 17], dims="x"), id="1D"), pytest.param( xr.DataArray([[1, 6, 17], [34, 57, 86]], dims=("y", "x")), id="2D" ), ], ) def test_polyfit_polyval_integration( use_dask: bool, x: xr.DataArray, y: xr.DataArray ) -> None: y.coords["x"] = x if use_dask: if not has_dask: pytest.skip("requires dask") y = y.chunk({"x": 2}) fit = y.polyfit(dim="x", deg=2) evaluated = xr.polyval(y.x, fit.polyfit_coefficients) expected = y.transpose(*evaluated.dims) xr.testing.assert_allclose(evaluated.variable, expected.variable) @pytest.mark.parametrize("use_dask", [False, True]) @pytest.mark.parametrize( "a, b, ae, be, dim, axis", [ [ xr.DataArray([1, 2, 3]), xr.DataArray([4, 5, 6]), np.array([1, 2, 3]), np.array([4, 5, 6]), "dim_0", -1, ], [ xr.DataArray([1, 2]), xr.DataArray([4, 5, 6]), np.array([1, 2, 0]), np.array([4, 5, 6]), "dim_0", -1, ], [ xr.Variable(dims=["dim_0"], data=[1, 2, 3]), xr.Variable(dims=["dim_0"], data=[4, 5, 6]), np.array([1, 2, 3]), np.array([4, 5, 6]), "dim_0", -1, ], [ xr.Variable(dims=["dim_0"], data=[1, 2]), xr.Variable(dims=["dim_0"], data=[4, 5, 6]), np.array([1, 2, 0]), np.array([4, 5, 6]), "dim_0", -1, ], [ # Test dim in the middle: xr.DataArray( np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)), dims=["time", "cartesian", "var"], coords=dict( time=(["time"], np.arange(0, 5)), cartesian=(["cartesian"], ["x", "y", "z"]), var=(["var"], [1, 1.5, 2, 2.5]), ), ), xr.DataArray( np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)) + 1, dims=["time", "cartesian", "var"], coords=dict( time=(["time"], np.arange(0, 5)), cartesian=(["cartesian"], ["x", "y", "z"]), var=(["var"], [1, 1.5, 2, 2.5]), ), ), np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)), np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)) + 1, "cartesian", 1, ], # Test 1 sized arrays with coords: pytest.param( xr.DataArray( np.array([1]), dims=["cartesian"], coords=dict(cartesian=(["cartesian"], ["z"])), ), xr.DataArray( np.array([4, 5, 6]), dims=["cartesian"], coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])), ), np.array([0, 0, 1]), np.array([4, 5, 6]), "cartesian", -1, marks=(pytest.mark.xfail(),), ), # Test filling in between with coords: pytest.param( xr.DataArray( [1, 2], dims=["cartesian"], coords=dict(cartesian=(["cartesian"], ["x", "z"])), ), xr.DataArray( [4, 5, 6], dims=["cartesian"], coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])), ), np.array([1, 0, 2]), np.array([4, 5, 6]), "cartesian", -1, marks=(pytest.mark.xfail(),), ), ], ) def test_cross(a, b, ae, be, dim: str, axis: int, use_dask: bool) -> None: expected = np.cross(ae, be, axis=axis) if use_dask: if not has_dask: pytest.skip("test for dask.") a = a.chunk() b = b.chunk() actual = xr.cross(a, b, dim=dim) xr.testing.assert_duckarray_allclose(expected, actual) @pytest.mark.parametrize("compute_backend", ["numbagg"], indirect=True) def test_complex_number_reduce(compute_backend): da = xr.DataArray(np.ones((2,), dtype=np.complex64), dims=["x"]) # Check that xarray doesn't call into numbagg, which doesn't compile for complex # numbers at the moment (but will when numba supports dynamic compilation) da.min() def test_fix() -> None: val = 3.0 val_fixed = np.fix(val) da = xr.DataArray([val]) expected = xr.DataArray([val_fixed]) actual = np.fix(da) assert_identical(expected, actual) xarray-2025.12.0/xarray/tests/test_concat.py000066400000000000000000002050111511464676000206720ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable from contextlib import AbstractContextManager, nullcontext from copy import deepcopy from typing import TYPE_CHECKING, Any, Literal import numpy as np import pandas as pd import pytest from xarray import ( AlignmentError, DataArray, Dataset, Variable, concat, open_dataset, set_options, ) from xarray.core import dtypes, types from xarray.core.coordinates import Coordinates from xarray.core.datatree import DataTree from xarray.core.indexes import PandasIndex from xarray.structure import merge from xarray.tests import ( ConcatenatableArray, InaccessibleArray, UnexpectedDataAccess, assert_array_equal, assert_equal, assert_identical, requires_dask, requires_pyarrow, requires_scipy_or_netCDF4, ) from xarray.tests.indexes import XYIndex from xarray.tests.test_dataset import create_test_data if TYPE_CHECKING: from xarray.core.types import CombineAttrsOptions, JoinOptions # helper method to create multiple tests datasets to concat def create_concat_datasets( num_datasets: int = 2, seed: int | None = None, include_day: bool = True ) -> list[Dataset]: rng = np.random.default_rng(seed) lat = rng.standard_normal(size=(1, 4)) lon = rng.standard_normal(size=(1, 4)) result = [] variables = ["temperature", "pressure", "humidity", "precipitation", "cloud_cover"] for i in range(num_datasets): if include_day: data_tuple = ( ["x", "y", "day"], rng.standard_normal(size=(1, 4, 2)), ) data_vars = dict.fromkeys(variables, data_tuple) result.append( Dataset( data_vars=data_vars, coords={ "lat": (["x", "y"], lat), "lon": (["x", "y"], lon), "day": ["day" + str(i * 2 + 1), "day" + str(i * 2 + 2)], }, ) ) else: data_tuple = ( ["x", "y"], rng.standard_normal(size=(1, 4)), ) data_vars = dict.fromkeys(variables, data_tuple) result.append( Dataset( data_vars=data_vars, coords={"lat": (["x", "y"], lat), "lon": (["x", "y"], lon)}, ) ) return result # helper method to create multiple tests datasets to concat with specific types def create_typed_datasets( num_datasets: int = 2, seed: int | None = None ) -> list[Dataset]: var_strings = ["a", "b", "c", "d", "e", "f", "g", "h"] rng = np.random.default_rng(seed) lat = rng.standard_normal(size=(1, 4)) lon = rng.standard_normal(size=(1, 4)) return [ Dataset( data_vars={ "float": (["x", "y", "day"], rng.standard_normal(size=(1, 4, 2))), "float2": (["x", "y", "day"], rng.standard_normal(size=(1, 4, 2))), "string": ( ["x", "y", "day"], rng.choice(var_strings, size=(1, 4, 2)), ), "int": (["x", "y", "day"], rng.integers(0, 10, size=(1, 4, 2))), "datetime64": ( ["x", "y", "day"], np.arange( np.datetime64("2017-01-01"), np.datetime64("2017-01-09") ).reshape(1, 4, 2), ), "timedelta64": ( ["x", "y", "day"], np.reshape([pd.Timedelta(days=i) for i in range(8)], [1, 4, 2]), ), }, coords={ "lat": (["x", "y"], lat), "lon": (["x", "y"], lon), "day": ["day" + str(i * 2 + 1), "day" + str(i * 2 + 2)], }, ) for i in range(num_datasets) ] def test_concat_compat() -> None: ds1 = Dataset( { "has_x_y": (("y", "x"), [[1, 2]]), "has_x": ("x", [1, 2]), "no_x_y": ("z", [1, 2]), }, coords={"x": [0, 1], "y": [0], "z": [-1, -2]}, ) ds2 = Dataset( { "has_x_y": (("y", "x"), [[3, 4]]), "has_x": ("x", [1, 2]), "no_x_y": (("q", "z"), [[1, 2]]), }, coords={"x": [0, 1], "y": [1], "z": [-1, -2], "q": [0]}, ) result = concat([ds1, ds2], dim="y", data_vars="minimal", compat="broadcast_equals") assert_equal(ds2.no_x_y, result.no_x_y.transpose()) for var in ["has_x", "no_x_y"]: assert "y" not in result[var].dims and "y" not in result[var].coords with pytest.raises(ValueError, match=r"'q' not present in all datasets"): concat([ds1, ds2], dim="q", data_vars="all", join="outer") with pytest.raises(ValueError, match=r"'q' not present in all datasets"): concat([ds2, ds1], dim="q", data_vars="all", join="outer") def test_concat_missing_var() -> None: datasets = create_concat_datasets(2, seed=123) expected = concat(datasets, dim="day") vars_to_drop = ["humidity", "precipitation", "cloud_cover"] expected = expected.drop_vars(vars_to_drop) expected["pressure"][..., 2:] = np.nan datasets[0] = datasets[0].drop_vars(vars_to_drop) datasets[1] = datasets[1].drop_vars(vars_to_drop + ["pressure"]) actual = concat(datasets, dim="day") assert list(actual.data_vars.keys()) == ["temperature", "pressure"] assert_identical(actual, expected) @pytest.mark.parametrize("var", ["var4", pytest.param("var5", marks=requires_pyarrow)]) def test_concat_extension_array(var) -> None: data1 = create_test_data(use_extension_array=True) data2 = create_test_data(use_extension_array=True) concatenated = concat([data1, data2], dim="dim1") assert pd.Series( concatenated[var] == type(data2[var].variable.data)._concat_same_type( [ data1[var].variable.data, data2[var].variable.data, ] ) ).all() # need to wrap in series because pyarrow bool does not support `all` def test_concat_missing_multiple_consecutive_var() -> None: datasets = create_concat_datasets(3, seed=123) expected = concat(datasets, dim="day") vars_to_drop = ["humidity", "pressure"] expected["pressure"][..., :4] = np.nan expected["humidity"][..., :4] = np.nan datasets[0] = datasets[0].drop_vars(vars_to_drop) datasets[1] = datasets[1].drop_vars(vars_to_drop) actual = concat(datasets, dim="day") assert list(actual.data_vars.keys()) == [ "temperature", "precipitation", "cloud_cover", "pressure", "humidity", ] assert_identical(actual, expected) def test_concat_all_empty() -> None: ds1 = Dataset() ds2 = Dataset() expected = Dataset() actual = concat([ds1, ds2], dim="new_dim") assert_identical(actual, expected) def test_concat_second_empty() -> None: ds1 = Dataset(data_vars={"a": ("y", [0.1])}, coords={"x": 0.1}) ds2 = Dataset(coords={"x": 0.1}) expected = Dataset(data_vars={"a": ("y", [0.1, np.nan])}, coords={"x": 0.1}) actual = concat([ds1, ds2], dim="y") assert_identical(actual, expected) expected = Dataset( data_vars={"a": ("y", [0.1, np.nan])}, coords={"x": ("y", [0.1, 0.1])} ) actual = concat([ds1, ds2], dim="y", coords="all") assert_identical(actual, expected) def test_concat_second_empty_with_scalar_data_var_only_on_first() -> None: # Check concatenating scalar data_var only present in ds1 ds1 = Dataset(data_vars={"a": ("y", [0.1]), "b": 0.1}, coords={"x": 0.1}) ds2 = Dataset(coords={"x": 0.1}) expected = Dataset( data_vars={"a": ("y", [0.1, np.nan]), "b": ("y", [0.1, np.nan])}, coords={"x": ("y", [0.1, 0.1])}, ) actual = concat([ds1, ds2], dim="y", coords="all", data_vars="all") assert_identical(actual, expected) expected = Dataset( data_vars={"a": ("y", [0.1, np.nan]), "b": 0.1}, coords={"x": 0.1} ) actual = concat( [ds1, ds2], dim="y", coords="different", data_vars="different", compat="equals" ) assert_identical(actual, expected) def test_concat_multiple_missing_variables() -> None: datasets = create_concat_datasets(2, seed=123) expected = concat(datasets, dim="day") vars_to_drop = ["pressure", "cloud_cover"] expected["pressure"][..., 2:] = np.nan expected["cloud_cover"][..., 2:] = np.nan datasets[1] = datasets[1].drop_vars(vars_to_drop) actual = concat(datasets, dim="day") # check the variables orders are the same assert list(actual.data_vars.keys()) == [ "temperature", "pressure", "humidity", "precipitation", "cloud_cover", ] assert_identical(actual, expected) @pytest.mark.parametrize("include_day", [True, False]) def test_concat_multiple_datasets_missing_vars(include_day: bool) -> None: vars_to_drop = [ "temperature", "pressure", "humidity", "precipitation", "cloud_cover", ] # must specify if concat_dim='day' is not part of the vars kwargs = {"data_vars": "all"} if not include_day else {} datasets = create_concat_datasets( len(vars_to_drop), seed=123, include_day=include_day ) expected = concat(datasets, dim="day", **kwargs) # type: ignore[call-overload] for i, name in enumerate(vars_to_drop): if include_day: expected[name][..., i * 2 : (i + 1) * 2] = np.nan else: expected[name][i : i + 1, ...] = np.nan # set up the test data datasets = [ ds.drop_vars(varname) for ds, varname in zip(datasets, vars_to_drop, strict=True) ] actual = concat(datasets, dim="day", **kwargs) # type: ignore[call-overload] assert list(actual.data_vars.keys()) == [ "pressure", "humidity", "precipitation", "cloud_cover", "temperature", ] assert_identical(actual, expected) def test_concat_multiple_datasets_with_multiple_missing_variables() -> None: vars_to_drop_in_first = ["temperature", "pressure"] vars_to_drop_in_second = ["humidity", "precipitation", "cloud_cover"] datasets = create_concat_datasets(2, seed=123) expected = concat(datasets, dim="day") for name in vars_to_drop_in_first: expected[name][..., :2] = np.nan for name in vars_to_drop_in_second: expected[name][..., 2:] = np.nan # set up the test data datasets[0] = datasets[0].drop_vars(vars_to_drop_in_first) datasets[1] = datasets[1].drop_vars(vars_to_drop_in_second) actual = concat(datasets, dim="day") assert list(actual.data_vars.keys()) == [ "humidity", "precipitation", "cloud_cover", "temperature", "pressure", ] assert_identical(actual, expected) def test_concat_type_of_missing_fill() -> None: datasets = create_typed_datasets(2, seed=123) expected1 = concat(datasets, dim="day", fill_value=dtypes.NA) expected2 = concat(datasets[::-1], dim="day", fill_value=dtypes.NA) vars = ["float", "float2", "string", "int", "datetime64", "timedelta64"] expected = [expected2, expected1] for i, exp in enumerate(expected): sl = slice(i * 2, (i + 1) * 2) exp["float2"][..., sl] = np.nan exp["datetime64"][..., sl] = np.nan exp["timedelta64"][..., sl] = np.nan var = exp["int"] * 1.0 var[..., sl] = np.nan exp["int"] = var var = exp["string"].astype(object) var[..., sl] = np.nan exp["string"] = var # set up the test data datasets[1] = datasets[1].drop_vars(vars[1:]) actual = concat(datasets, dim="day", fill_value=dtypes.NA) assert_identical(actual, expected[1]) # reversed actual = concat(datasets[::-1], dim="day", fill_value=dtypes.NA) assert_identical(actual, expected[0]) def test_concat_order_when_filling_missing() -> None: vars_to_drop_in_first: list[str] = [] # drop middle vars_to_drop_in_second = ["humidity"] datasets = create_concat_datasets(2, seed=123) expected1 = concat(datasets, dim="day") for name in vars_to_drop_in_second: expected1[name][..., 2:] = np.nan expected2 = concat(datasets[::-1], dim="day") for name in vars_to_drop_in_second: expected2[name][..., :2] = np.nan # set up the test data datasets[0] = datasets[0].drop_vars(vars_to_drop_in_first) datasets[1] = datasets[1].drop_vars(vars_to_drop_in_second) actual = concat(datasets, dim="day") assert list(actual.data_vars.keys()) == [ "temperature", "pressure", "humidity", "precipitation", "cloud_cover", ] assert_identical(actual, expected1) actual = concat(datasets[::-1], dim="day") assert list(actual.data_vars.keys()) == [ "temperature", "pressure", "precipitation", "cloud_cover", "humidity", ] assert_identical(actual, expected2) @pytest.fixture def concat_var_names() -> Callable: # create var names list with one missing value def get_varnames(var_cnt: int = 10, list_cnt: int = 10) -> list[list[str]]: orig = [f"d{i:02d}" for i in range(var_cnt)] var_names = [] for _i in range(list_cnt): l1 = orig.copy() var_names.append(l1) return var_names return get_varnames @pytest.fixture def create_concat_ds() -> Callable: def create_ds( var_names: list[list[str]], dim: bool = False, coord: bool = False, drop_idx: list[int] | None = None, ) -> list[Dataset]: out_ds = [] ds = Dataset() ds = ds.assign_coords({"x": np.arange(2)}) ds = ds.assign_coords({"y": np.arange(3)}) ds = ds.assign_coords({"z": np.arange(4)}) for i, dsl in enumerate(var_names): vlist = dsl.copy() if drop_idx is not None: vlist.pop(drop_idx[i]) foo_data = np.arange(48, dtype=float).reshape(2, 2, 3, 4) dsi = ds.copy() if coord: dsi = ds.assign({"time": (["time"], [i * 2, i * 2 + 1])}) for k in vlist: dsi = dsi.assign({k: (["time", "x", "y", "z"], foo_data.copy())}) if not dim: dsi = dsi.isel(time=0) out_ds.append(dsi) return out_ds return create_ds @pytest.mark.parametrize("dim", [True, False]) @pytest.mark.parametrize("coord", [True, False]) def test_concat_fill_missing_variables( concat_var_names, create_concat_ds, dim: bool, coord: bool ) -> None: var_names = concat_var_names() drop_idx = [0, 7, 6, 4, 4, 8, 0, 6, 2, 0] expected = concat( create_concat_ds(var_names, dim=dim, coord=coord), dim="time", data_vars="all" ) for i, idx in enumerate(drop_idx): if dim: expected[var_names[0][idx]][i * 2 : i * 2 + 2] = np.nan else: expected[var_names[0][idx]][i] = np.nan concat_ds = create_concat_ds(var_names, dim=dim, coord=coord, drop_idx=drop_idx) actual = concat(concat_ds, dim="time", data_vars="all") assert list(actual.data_vars.keys()) == [ "d01", "d02", "d03", "d04", "d05", "d06", "d07", "d08", "d09", "d00", ] assert_identical(actual, expected) class TestConcatDataset: @pytest.fixture def data(self, request) -> Dataset: use_extension_array = request.param if hasattr(request, "param") else False return create_test_data(use_extension_array=use_extension_array).drop_dims( "dim3" ) def rectify_dim_order(self, data: Dataset, dataset) -> Dataset: # return a new dataset with all variable dimensions transposed into # the order in which they are found in `data` return Dataset( {k: v.transpose(*data[k].dims) for k, v in dataset.data_vars.items()}, dataset.coords, attrs=dataset.attrs, ) @pytest.mark.parametrize("coords", ["different", "minimal"]) @pytest.mark.parametrize( "dim,data", [["dim1", True], ["dim2", False]], indirect=["data"] ) def test_concat_simple(self, data: Dataset, dim, coords) -> None: datasets = [g for _, g in data.groupby(dim)] assert_identical(data, concat(datasets, dim, coords=coords, compat="equals")) def test_concat_merge_variables_present_in_some_datasets( self, data: Dataset ) -> None: # coordinates present in some datasets but not others ds1 = Dataset(data_vars={"a": ("y", [0.1])}, coords={"x": 0.1}) ds2 = Dataset(data_vars={"a": ("y", [0.2])}, coords={"z": 0.2}) actual = concat([ds1, ds2], dim="y", coords="minimal") expected = Dataset({"a": ("y", [0.1, 0.2])}, coords={"x": 0.1, "z": 0.2}) assert_identical(expected, actual) # data variables present in some datasets but not others split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))] data0, data1 = deepcopy(split_data) data1["foo"] = ("bar", np.random.randn(10)) actual = concat([data0, data1], "dim1", data_vars="minimal") expected = data.copy().assign(foo=data1.foo) assert_identical(expected, actual) # expand foo actual = concat([data0, data1], "dim1", data_vars="all") foo = np.ones((8, 10), dtype=data1.foo.dtype) * np.nan foo[3:] = data1.foo.values[None, ...] expected = data.copy().assign(foo=(["dim1", "bar"], foo)) assert_identical(expected, actual) @pytest.mark.parametrize("data", [False], indirect=["data"]) def test_concat_2(self, data: Dataset) -> None: dim = "dim2" datasets = [g.squeeze(dim) for _, g in data.groupby(dim, squeeze=False)] concat_over = [k for k, v in data.coords.items() if dim in v.dims and k != dim] actual = concat(datasets, data[dim], coords=concat_over) assert_identical(data, self.rectify_dim_order(data, actual)) @pytest.mark.parametrize("coords", ["different", "minimal", "all"]) @pytest.mark.parametrize("dim", ["dim1", "dim2"]) def test_concat_coords_kwarg( self, data: Dataset, dim: str, coords: Literal["all", "minimal", "different"] ) -> None: data = data.copy(deep=True) # make sure the coords argument behaves as expected data.coords["extra"] = ("dim4", np.arange(3)) datasets = [g for _, g in data.groupby(dim)] actual = concat( datasets, data[dim], coords=coords, data_vars="all", compat="equals" ) if coords == "all": expected = np.array([data["extra"].values for _ in range(data.sizes[dim])]) assert_array_equal(actual["extra"].values, expected) else: assert_equal(data["extra"], actual["extra"]) def test_concat(self, data: Dataset) -> None: split_data = [ data.isel(dim1=slice(3)), data.isel(dim1=3), data.isel(dim1=slice(4, None)), ] assert_identical(data, concat(split_data, "dim1")) def test_concat_dim_precedence(self, data: Dataset) -> None: # verify that the dim argument takes precedence over # concatenating dataset variables of the same name dim = (2 * data["dim1"]).rename("dim1") datasets = [g for _, g in data.groupby("dim1", squeeze=False)] expected = data.copy() expected["dim1"] = dim assert_identical(expected, concat(datasets, dim)) def test_concat_data_vars_typing(self) -> None: # Testing typing, can be removed if the next function works with annotations. data = Dataset({"foo": ("x", np.random.randn(10))}) objs: list[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))] actual = concat(objs, dim="x", data_vars="minimal") assert_identical(data, actual) @pytest.mark.parametrize("data_vars", ["minimal", "different", "all", [], ["foo"]]) def test_concat_data_vars(self, data_vars) -> None: data = Dataset({"foo": ("x", np.random.randn(10))}) objs: list[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))] actual = concat(objs, dim="x", data_vars=data_vars, compat="equals") assert_identical(data, actual) @pytest.mark.parametrize("coords", ["different", "all", ["c"]]) def test_concat_coords(self, coords) -> None: data = Dataset({"foo": ("x", np.random.randn(10))}) expected = data.assign_coords(c=("x", [0] * 5 + [1] * 5)) objs = [ data.isel(x=slice(5)).assign_coords(c=0), data.isel(x=slice(5, None)).assign_coords(c=1), ] if coords == "different": actual = concat(objs, dim="x", coords=coords, compat="equals") else: actual = concat(objs, dim="x", coords=coords) assert_identical(expected, actual) @pytest.mark.parametrize("coords", ["minimal", []]) def test_concat_coords_raises_merge_error(self, coords) -> None: data = Dataset({"foo": ("x", np.random.randn(10))}) objs = [ data.isel(x=slice(5)).assign_coords(c=0), data.isel(x=slice(5, None)).assign_coords(c=1), ] with pytest.raises(merge.MergeError, match="conflicting values"): concat(objs, dim="x", coords=coords, compat="equals") @pytest.mark.parametrize("data_vars", ["different", "all", ["foo"]]) def test_concat_constant_index(self, data_vars) -> None: # GH425 ds1 = Dataset({"foo": 1.5}, {"y": 1}) ds2 = Dataset({"foo": 2.5}, {"y": 1}) expected = Dataset({"foo": ("y", [1.5, 2.5]), "y": [1, 1]}) if data_vars == "different": actual = concat([ds1, ds2], "y", data_vars=data_vars, compat="equals") else: actual = concat([ds1, ds2], "y", data_vars=data_vars) assert_identical(expected, actual) def test_concat_constant_index_None(self) -> None: ds1 = Dataset({"foo": 1.5}, {"y": 1}) ds2 = Dataset({"foo": 2.5}, {"y": 1}) actual = concat([ds1, ds2], "new_dim", data_vars=None, compat="equals") expected = Dataset( {"foo": ("new_dim", [1.5, 2.5])}, coords={"y": 1}, ) assert_identical(actual, expected) def test_concat_constant_index_minimal(self) -> None: ds1 = Dataset({"foo": 1.5}, {"y": 1}) ds2 = Dataset({"foo": 2.5}, {"y": 1}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.raises(merge.MergeError, match="conflicting values"): concat([ds1, ds2], dim="new_dim", data_vars="minimal") with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises( ValueError, match="data_vars='minimal' and coords='minimal'" ): concat([ds1, ds2], dim="new_dim", data_vars="minimal") def test_concat_size0(self) -> None: data = create_test_data() split_data = [data.isel(dim1=slice(0, 0)), data] actual = concat(split_data, "dim1") assert_identical(data, actual) actual = concat(split_data[::-1], "dim1") assert_identical(data, actual) def test_concat_autoalign(self) -> None: ds1 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])}) ds2 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 3])])}) actual = concat([ds1, ds2], "y", data_vars="all", join="outer") expected = Dataset( { "foo": DataArray( [[1, 2, np.nan], [1, np.nan, 2]], dims=["y", "x"], coords={"x": [1, 2, 3]}, ) } ) assert_identical(expected, actual) def test_concat_errors(self) -> None: data = create_test_data() split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))] with pytest.raises(ValueError, match=r"must supply at least one"): concat([], "dim1") with pytest.raises(ValueError, match=r"Cannot specify both .*='different'"): concat( [data, data], dim="concat_dim", data_vars="different", compat="override" ) with pytest.raises(ValueError, match=r"must supply at least one"): concat([], "dim1") with pytest.raises(ValueError, match=r"are not found in the coordinates"): concat([data, data], "new_dim", coords=["not_found"]) with pytest.raises(ValueError, match=r"are not found in the data variables"): concat([data, data], "new_dim", data_vars=["not_found"]) with pytest.raises(ValueError, match=r"global attributes not"): # call deepcopy separately to get unique attrs data0 = deepcopy(split_data[0]) data1 = deepcopy(split_data[1]) data1.attrs["foo"] = "bar" concat([data0, data1], "dim1", compat="identical") assert_identical(data, concat([data0, data1], "dim1", compat="equals")) with pytest.raises(ValueError, match=r"compat.* invalid"): concat(split_data, "dim1", compat="foobar") # type: ignore[call-overload] with pytest.raises(ValueError, match=r"compat.* invalid"): concat(split_data, "dim1", compat="minimal") with pytest.raises(ValueError, match=r"unexpected value for"): concat([data, data], "new_dim", coords="foobar") with pytest.raises( ValueError, match=r"coordinate in some datasets but not others" ): concat([Dataset({"x": 0}), Dataset({"x": [1]})], dim="z") with pytest.raises( ValueError, match=r"coordinate in some datasets but not others" ): concat([Dataset({"x": 0}), Dataset({}, {"x": 1})], dim="z") def test_concat_join_kwarg(self) -> None: ds1 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]}) ds2 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]}) expected: dict[JoinOptions, Any] = {} expected["outer"] = Dataset( {"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])}, {"x": [0, 1], "y": [0, 0.0001]}, ) expected["inner"] = Dataset( {"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []} ) expected["left"] = Dataset( {"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) expected["right"] = Dataset( {"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0.0001]}, ) expected["override"] = Dataset( {"a": (("x", "y"), np.array([0, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) with pytest.raises(ValueError, match=r"cannot align.*exact.*dimensions.*'y'"): actual = concat([ds1, ds2], join="exact", dim="x") for join, expected_item in expected.items(): actual = concat([ds1, ds2], join=join, dim="x") assert_equal(actual, expected_item) # regression test for #3681 actual = concat( [ds1.drop_vars("x"), ds2.drop_vars("x")], join="override", dim="y" ) expected2 = Dataset( {"a": (("x", "y"), np.array([0, 0], ndmin=2))}, coords={"y": [0, 0.0001]} ) assert_identical(actual, expected2) @pytest.mark.parametrize( "combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": 41, "c": 43, "d": 44}, False, ), ( lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {}, {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": -1, "b": 0, "c": 1}, False, ), ], ) def test_concat_combine_attrs_kwarg( self, combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception ): ds1 = Dataset({"a": ("x", [0])}, coords={"x": [0]}, attrs=var1_attrs) ds2 = Dataset({"a": ("x", [0])}, coords={"x": [1]}, attrs=var2_attrs) if expect_exception: with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"): concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) else: actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) expected = Dataset( {"a": ("x", [0, 0])}, {"x": [0, 1]}, attrs=expected_attrs ) assert_identical(actual, expected) @pytest.mark.parametrize( "combine_attrs, attrs1, attrs2, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": 41, "c": 43, "d": 44}, False, ), ( lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {}, {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": -1, "b": 0, "c": 1}, False, ), ], ) def test_concat_combine_attrs_kwarg_variables( self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception ): """check that combine_attrs is used on data variables and coords""" ds1 = Dataset({"a": ("x", [0], attrs1)}, coords={"x": ("x", [0], attrs1)}) ds2 = Dataset({"a": ("x", [0], attrs2)}, coords={"x": ("x", [1], attrs2)}) if expect_exception: with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"): concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) else: actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) expected = Dataset( {"a": ("x", [0, 0], expected_attrs)}, {"x": ("x", [0, 1], expected_attrs)}, ) assert_identical(actual, expected) def test_concat_promote_shape_with_mixed_dims_within_variables(self) -> None: objs = [Dataset({}, {"x": 0}), Dataset({"x": [1]})] actual = concat(objs, "x") expected = Dataset({"x": [0, 1]}) assert_identical(actual, expected) objs = [Dataset({"x": [0]}), Dataset({}, {"x": 1})] actual = concat(objs, "x") assert_identical(actual, expected) def test_concat_promote_shape_with_mixed_dims_between_variables(self) -> None: objs = [Dataset({"x": [2], "y": 3}), Dataset({"x": [4], "y": 5})] actual = concat(objs, "x", data_vars="all") expected = Dataset({"x": [2, 4], "y": ("x", [3, 5])}) assert_identical(actual, expected) def test_concat_promote_shape_with_mixed_dims_in_coord_variable(self) -> None: objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1]}, {"y": ("x", [-2])})] actual = concat(objs, "x") expected = Dataset({"x": [0, 1]}, {"y": ("x", [-1, -2])}) assert_identical(actual, expected) def test_concat_promote_shape_for_scalars_with_mixed_lengths_along_concat_dim( self, ) -> None: # values should repeat objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})] actual = concat(objs, "x", coords="different", compat="equals") expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])}) assert_identical(actual, expected) actual = concat(objs, "x", coords="all") assert_identical(actual, expected) def test_concat_promote_shape_broadcast_1d_x_1d_goes_to_2d(self) -> None: objs = [ Dataset({"z": ("x", [-1])}, {"x": [0], "y": [0]}), Dataset({"z": ("y", [1])}, {"x": [1], "y": [0]}), ] actual = concat(objs, "x") expected = Dataset({"z": (("x", "y"), [[-1], [1]])}, {"x": [0, 1], "y": [0]}) assert_identical(actual, expected) def test_concat_promote_shape_with_scalar_coordinates(self) -> None: # regression GH6384 objs = [ Dataset({}, {"x": pd.Interval(-1, 0, closed="right")}), Dataset({"x": [pd.Interval(0, 1, closed="right")]}), ] actual = concat(objs, "x") expected = Dataset( { "x": [ pd.Interval(-1, 0, closed="right"), pd.Interval(0, 1, closed="right"), ] } ) assert_identical(actual, expected) def test_concat_promote_shape_with_coordinates_of_particular_dtypes(self) -> None: # regression GH6416 (coord dtype) and GH6434 time_data1 = np.array(["2022-01-01", "2022-02-01"], dtype="datetime64[ns]") time_data2 = np.array("2022-03-01", dtype="datetime64[ns]") time_expected = np.array( ["2022-01-01", "2022-02-01", "2022-03-01"], dtype="datetime64[ns]" ) objs = [Dataset({}, {"time": time_data1}), Dataset({}, {"time": time_data2})] actual = concat(objs, "time") expected = Dataset({}, {"time": time_expected}) assert_identical(actual, expected) assert isinstance(actual.indexes["time"], pd.DatetimeIndex) def test_concat_do_not_promote(self) -> None: # GH438 objs = [ Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}), Dataset({"y": ("t", [2])}, {"x": 1, "t": [0]}), ] expected = Dataset({"y": ("t", [1, 2])}, {"x": 1, "t": [0, 0]}) actual = concat(objs, "t") assert_identical(expected, actual) objs = [ Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}), Dataset({"y": ("t", [2])}, {"x": 2, "t": [0]}), ] with pytest.raises(ValueError): concat(objs, "t", coords="minimal") def test_concat_dim_is_variable(self) -> None: objs = [Dataset({"x": 0}), Dataset({"x": 1})] coord = Variable("y", [3, 4], attrs={"foo": "bar"}) expected = Dataset({"x": ("y", [0, 1]), "y": coord}) actual = concat(objs, coord, data_vars="all") assert_identical(actual, expected) def test_concat_dim_is_dataarray(self) -> None: objs = [Dataset({"x": 0}), Dataset({"x": 1})] coord = DataArray([3, 4], dims="y", attrs={"foo": "bar"}) expected = Dataset({"x": ("y", [0, 1]), "y": coord}) actual = concat(objs, coord, data_vars="all") assert_identical(actual, expected) def test_concat_multiindex(self) -> None: midx = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") expected = Dataset(coords=midx_coords) actual = concat( [expected.isel(x=slice(2)), expected.isel(x=slice(2, None))], "x" ) assert expected.equals(actual) assert isinstance(actual.x.to_index(), pd.MultiIndex) def test_concat_along_new_dim_multiindex(self) -> None: # see https://github.com/pydata/xarray/issues/6881 level_names = ["x_level_0", "x_level_1"] midx = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]], names=level_names) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") ds = Dataset(coords=midx_coords) concatenated = concat([ds], "new") actual = list(concatenated.xindexes.get_all_coords("x")) expected = ["x"] + level_names assert actual == expected @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}]) def test_concat_fill_value(self, fill_value) -> None: datasets = [ Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1])}, {"x": [1, 2]}), Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1])}, {"x": [0, 1]}), ] if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_a = fill_value_b = np.nan elif isinstance(fill_value, dict): fill_value_a = fill_value["a"] fill_value_b = fill_value["b"] else: fill_value_a = fill_value_b = fill_value expected = Dataset( { "a": (("t", "x"), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]), "b": (("t", "x"), [[fill_value_b, -2, 1], [3, -1, fill_value_b]]), }, {"x": [0, 1, 2]}, ) actual = concat( datasets, dim="t", fill_value=fill_value, data_vars="all", join="outer" ) assert_identical(actual, expected) @pytest.mark.parametrize("dtype", [str, bytes]) @pytest.mark.parametrize("dim", ["x1", "x2"]) def test_concat_str_dtype(self, dtype, dim) -> None: data = np.arange(4).reshape([2, 2]) da1 = Dataset( { "data": (["x1", "x2"], data), "x1": [0, 1], "x2": np.array(["a", "b"], dtype=dtype), } ) da2 = Dataset( { "data": (["x1", "x2"], data), "x1": np.array([1, 2]), "x2": np.array(["c", "d"], dtype=dtype), } ) actual = concat([da1, da2], dim=dim, join="outer") assert np.issubdtype(actual.x2.dtype, dtype) def test_concat_avoids_index_auto_creation(self) -> None: # TODO once passing indexes={} directly to Dataset constructor is allowed then no need to create coords first coords = Coordinates( {"x": ConcatenatableArray(np.array([1, 2, 3]))}, indexes={} ) datasets = [ Dataset( {"a": (["x", "y"], ConcatenatableArray(np.zeros((3, 3))))}, coords=coords, ) for _ in range(2) ] # should not raise on concat combined = concat(datasets, dim="x") assert combined["a"].shape == (6, 3) assert combined["a"].dims == ("x", "y") # nor have auto-created any indexes assert combined.indexes == {} # should not raise on stack combined = concat(datasets, dim="z", data_vars="all") assert combined["a"].shape == (2, 3, 3) assert combined["a"].dims == ("z", "x", "y") # nor have auto-created any indexes assert combined.indexes == {} def test_concat_avoids_index_auto_creation_new_1d_coord(self) -> None: # create 0D coordinates (without indexes) datasets = [ Dataset( coords={"x": ConcatenatableArray(np.array(10))}, ) for _ in range(2) ] with pytest.raises(UnexpectedDataAccess): concat(datasets, dim="x", create_index_for_new_dim=True) # should not raise on concat iff create_index_for_new_dim=False combined = concat(datasets, dim="x", create_index_for_new_dim=False) assert combined["x"].shape == (2,) assert combined["x"].dims == ("x",) # nor have auto-created any indexes assert combined.indexes == {} def test_concat_promote_shape_without_creating_new_index(self) -> None: # different shapes but neither have indexes ds1 = Dataset(coords={"x": 0}) ds2 = Dataset(data_vars={"x": [1]}).drop_indexes("x") actual = concat([ds1, ds2], dim="x", create_index_for_new_dim=False) expected = Dataset(data_vars={"x": [0, 1]}).drop_indexes("x") assert_identical(actual, expected, check_default_indexes=False) assert actual.indexes == {} @requires_scipy_or_netCDF4 def test_concat_combine_attrs_nan_after_netcdf_roundtrip(self, tmp_path) -> None: # Test for issue #10833: NaN attributes should be preserved # with combine_attrs="drop_conflicts" after NetCDF roundtrip import numpy as np # Create arrays with matching NaN fill_value attribute ds1 = Dataset( {"a": ("x", [0, 1])}, attrs={"fill_value": np.nan, "sensor": "G18", "field": "CTH"}, ) ds2 = Dataset( {"a": ("x", [2, 3])}, attrs={"fill_value": np.nan, "sensor": "G16", "field": "CTH"}, ) # Save to NetCDF and reload (converts Python float NaN to NumPy scalar NaN) path1 = tmp_path / "ds1.nc" path2 = tmp_path / "ds2.nc" ds1.to_netcdf(path1) ds2.to_netcdf(path2) ds1_loaded = open_dataset(path1) ds2_loaded = open_dataset(path2) # Verify that NaN attributes are preserved after concat actual = concat( [ds1_loaded, ds2_loaded], dim="y", combine_attrs="drop_conflicts" ) # fill_value should be preserved (not dropped) since both have NaN assert "fill_value" in actual.attrs assert np.isnan(actual.attrs["fill_value"]) # field should be preserved (identical in both) assert actual.attrs["field"] == "CTH" # sensor should be dropped (conflicts) assert "sensor" not in actual.attrs ds1_loaded.close() ds2_loaded.close() class TestConcatDataArray: def test_concat(self) -> None: ds = Dataset( { "foo": (["x", "y"], np.random.random((2, 3))), "bar": (["x", "y"], np.random.random((2, 3))), }, {"x": [0, 1]}, ) foo = ds["foo"] bar = ds["bar"] # from dataset array: expected = DataArray( np.array([foo.values, bar.values]), dims=["w", "x", "y"], coords={"x": [0, 1]}, ) actual = concat([foo, bar], "w") assert_equal(expected, actual) # from iteration: grouped = [g.squeeze() for _, g in foo.groupby("x", squeeze=False)] stacked = concat(grouped, ds["x"]) assert_identical(foo, stacked) # with an index as the 'dim' argument stacked = concat(grouped, pd.Index(ds["x"], name="x")) assert_identical(foo, stacked) actual2 = concat( [foo.isel(x=0), foo.isel(x=1)], pd.Index([0, 1]), coords="all" ).reset_coords(drop=True) expected = foo[:2].rename({"x": "concat_dim"}) assert_identical(expected, actual2) actual3 = concat( [foo.isel(x=0), foo.isel(x=1)], [0, 1], coords="all" ).reset_coords(drop=True) expected = foo[:2].rename({"x": "concat_dim"}) assert_identical(expected, actual3) with pytest.raises(ValueError, match=r"not identical"): concat([foo, bar], dim="w", compat="identical") with pytest.raises(ValueError, match=r"not a valid argument"): concat([foo, bar], dim="w", data_vars="different") def test_concat_encoding(self) -> None: # Regression test for GH1297 ds = Dataset( { "foo": (["x", "y"], np.random.random((2, 3))), "bar": (["x", "y"], np.random.random((2, 3))), }, {"x": [0, 1]}, ) foo = ds["foo"] foo.encoding = {"complevel": 5} ds.encoding = {"unlimited_dims": "x"} assert concat([foo, foo], dim="x").encoding == foo.encoding assert concat([ds, ds], dim="x").encoding == ds.encoding @requires_dask def test_concat_lazy(self) -> None: import dask.array as da arrays = [ DataArray( da.from_array(InaccessibleArray(np.zeros((3, 3))), 3), dims=["x", "y"] ) for _ in range(2) ] # should not raise combined = concat(arrays, dim="z") assert combined.shape == (2, 3, 3) assert combined.dims == ("z", "x", "y") def test_concat_avoids_index_auto_creation(self) -> None: # TODO once passing indexes={} directly to DataArray constructor is allowed then no need to create coords first coords = Coordinates( {"x": ConcatenatableArray(np.array([1, 2, 3]))}, indexes={} ) arrays = [ DataArray( ConcatenatableArray(np.zeros((3, 3))), dims=["x", "y"], coords=coords, ) for _ in range(2) ] # should not raise on concat combined = concat(arrays, dim="x") assert combined.shape == (6, 3) assert combined.dims == ("x", "y") # nor have auto-created any indexes assert combined.indexes == {} # should not raise on stack combined = concat(arrays, dim="z") assert combined.shape == (2, 3, 3) assert combined.dims == ("z", "x", "y") # nor have auto-created any indexes assert combined.indexes == {} @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0]) def test_concat_fill_value(self, fill_value) -> None: foo = DataArray([1, 2], coords=[("x", [1, 2])]) bar = DataArray([1, 2], coords=[("x", [1, 3])]) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value = np.nan expected = DataArray( [[1, 2, fill_value], [1, fill_value, 2]], dims=["y", "x"], coords={"x": [1, 2, 3]}, ) actual = concat((foo, bar), dim="y", fill_value=fill_value, join="outer") assert_identical(actual, expected) def test_concat_join_kwarg(self) -> None: ds1 = Dataset( {"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]} ).to_dataarray() ds2 = Dataset( {"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]} ).to_dataarray() expected: dict[JoinOptions, Any] = {} expected["outer"] = Dataset( {"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])}, {"x": [0, 1], "y": [0, 0.0001]}, ) expected["inner"] = Dataset( {"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []} ) expected["left"] = Dataset( {"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) expected["right"] = Dataset( {"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0.0001]}, ) expected["override"] = Dataset( {"a": (("x", "y"), np.array([0, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) with pytest.raises(ValueError, match=r"cannot align.*exact.*dimensions.*'y'"): actual = concat([ds1, ds2], join="exact", dim="x") for join, expected_item in expected.items(): actual = concat([ds1, ds2], join=join, dim="x") assert_equal(actual, expected_item.to_dataarray()) def test_concat_combine_attrs_kwarg(self) -> None: da1 = DataArray([0], coords=[("x", [0])], attrs={"b": 42}) da2 = DataArray([0], coords=[("x", [1])], attrs={"b": 42, "c": 43}) expected: dict[CombineAttrsOptions, Any] = {} expected["drop"] = DataArray([0, 0], coords=[("x", [0, 1])]) expected["no_conflicts"] = DataArray( [0, 0], coords=[("x", [0, 1])], attrs={"b": 42, "c": 43} ) expected["override"] = DataArray( [0, 0], coords=[("x", [0, 1])], attrs={"b": 42} ) with pytest.raises(ValueError, match=r"combine_attrs='identical'"): actual = concat([da1, da2], dim="x", combine_attrs="identical") with pytest.raises(ValueError, match=r"combine_attrs='no_conflicts'"): da3 = da2.copy(deep=True) da3.attrs["b"] = 44 actual = concat([da1, da3], dim="x", combine_attrs="no_conflicts") for combine_attrs, expected_item in expected.items(): actual = concat([da1, da2], dim="x", combine_attrs=combine_attrs) assert_identical(actual, expected_item) @pytest.mark.parametrize("dtype", [str, bytes]) @pytest.mark.parametrize("dim", ["x1", "x2"]) def test_concat_str_dtype(self, dtype, dim) -> None: data = np.arange(4).reshape([2, 2]) da1 = DataArray( data=data, dims=["x1", "x2"], coords={"x1": [0, 1], "x2": np.array(["a", "b"], dtype=dtype)}, ) da2 = DataArray( data=data, dims=["x1", "x2"], coords={"x1": np.array([1, 2]), "x2": np.array(["c", "d"], dtype=dtype)}, ) actual = concat([da1, da2], dim=dim, join="outer") assert np.issubdtype(actual.x2.dtype, dtype) def test_concat_coord_name(self) -> None: da = DataArray([0], dims="a") da_concat = concat([da, da], dim=DataArray([0, 1], dims="b")) assert list(da_concat.coords) == ["b"] da_concat_std = concat([da, da], dim=DataArray([0, 1])) assert list(da_concat_std.coords) == ["dim_0"] @pytest.mark.parametrize("attr1", ({"a": {"meta": [10, 20, 30]}}, {"a": [1, 2, 3]}, {})) @pytest.mark.parametrize("attr2", ({"a": [1, 2, 3]}, {})) def test_concat_attrs_first_variable(attr1, attr2) -> None: arrs = [ DataArray([[1], [2]], dims=["x", "y"], attrs=attr1), DataArray([[3], [4]], dims=["x", "y"], attrs=attr2), ] concat_attrs = concat(arrs, "y").attrs assert concat_attrs == attr1 def test_concat_merge_single_non_dim_coord() -> None: da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1}) da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]}) expected = DataArray(range(1, 7), dims="x", coords={"x": range(1, 7), "y": 1}) actual = concat([da1, da2], "x", coords="minimal", compat="override") assert_identical(actual, expected) actual = concat([da1, da2], "x", coords="different", compat="equals") assert_identical(actual, expected) with pytest.raises(ValueError, match=r"'y' not present in all datasets."): concat([da1, da2], dim="x", coords="all") da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1}) da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]}) da3 = DataArray([7, 8, 9], dims="x", coords={"x": [7, 8, 9], "y": 1}) with pytest.raises(ValueError, match=r"'y' not present in all datasets"): concat([da1, da2, da3], dim="x", coords="all") with pytest.raises(ValueError, match=r"'y' not present in all datasets"): concat([da1, da2, da3], dim="x", coords="different", compat="equals") def test_concat_preserve_coordinate_order() -> None: x = np.arange(0, 5) y = np.arange(0, 10) time = np.arange(0, 4) data = np.zeros((4, 10, 5), dtype=bool) ds1 = Dataset( {"data": (["time", "y", "x"], data[0:2])}, coords={"time": time[0:2], "y": y, "x": x}, ) ds2 = Dataset( {"data": (["time", "y", "x"], data[2:4])}, coords={"time": time[2:4], "y": y, "x": x}, ) expected = Dataset( {"data": (["time", "y", "x"], data)}, coords={"time": time, "y": y, "x": x}, ) actual = concat([ds1, ds2], dim="time") # check dimension order for act, exp in zip(actual.dims, expected.dims, strict=True): assert act == exp assert actual.sizes[act] == expected.sizes[exp] # check coordinate order for act, exp in zip(actual.coords, expected.coords, strict=True): assert act == exp assert_identical(actual.coords[act], expected.coords[exp]) def test_concat_typing_check() -> None: ds = Dataset({"foo": 1}, {"bar": 2}) da = Dataset({"foo": 3}, {"bar": 4}).to_dataarray(dim="foo") # concatenate a list of non-homogeneous types must raise TypeError with pytest.raises( TypeError, match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's", ): concat([ds, da], dim="foo") # type: ignore[list-item] with pytest.raises( TypeError, match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's", ): concat([da, ds], dim="foo") # type: ignore[list-item] def test_concat_not_all_indexes() -> None: ds1 = Dataset(coords={"x": ("x", [1, 2])}) # ds2.x has no default index ds2 = Dataset(coords={"x": ("y", [3, 4])}) with pytest.raises( ValueError, match=r"'x' must have either an index or no index in all datasets.*" ): concat([ds1, ds2], dim="x") def test_concat_index_not_same_dim() -> None: ds1 = Dataset(coords={"x": ("x", [1, 2])}) ds2 = Dataset(coords={"x": ("y", [3, 4])}) # TODO: use public API for setting a non-default index, when available ds2._indexes["x"] = PandasIndex([3, 4], "y") with pytest.raises( ValueError, match=r"Cannot concatenate along dimension 'x' indexes with dimensions.*", ): concat([ds1, ds2], dim="x") class TestNewDefaults: def test_concat_second_empty_with_scalar_data_var_only_on_first(self) -> None: ds1 = Dataset(data_vars={"a": ("y", [0.1]), "b": 0.1}, coords={"x": 0.1}) ds2 = Dataset(coords={"x": 0.1}) expected = Dataset( data_vars={"a": ("y", [0.1, np.nan]), "b": 0.1}, coords={"x": 0.1} ) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from compat='equals' to compat='override'", ): actual = concat( [ds1, ds2], dim="y", coords="different", data_vars="different" ) assert_identical(actual, expected) with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises(ValueError, match="might be related to new default"): concat([ds1, ds2], dim="y", coords="different", data_vars="different") def test_concat_multiple_datasets_missing_vars(self) -> None: vars_to_drop = [ "temperature", "pressure", "humidity", "precipitation", "cloud_cover", ] datasets = create_concat_datasets( len(vars_to_drop), seed=123, include_day=False ) # set up the test data datasets = [ ds.drop_vars(varname) for ds, varname in zip(datasets, vars_to_drop, strict=True) ] with set_options(use_new_combine_kwarg_defaults=False): old = concat(datasets, dim="day") with set_options(use_new_combine_kwarg_defaults=True): new = concat(datasets, dim="day") assert_identical(old, new) @pytest.mark.parametrize("coords", ["different", "minimal", "all"]) def test_concat_coords_kwarg( self, coords: Literal["all", "minimal", "different"] ) -> None: data = create_test_data().drop_dims("dim3") # make sure the coords argument behaves as expected data.coords["extra"] = ("dim4", np.arange(3)) datasets = [g for _, g in data.groupby("dim1")] with set_options(use_new_combine_kwarg_defaults=False): expectation: AbstractContextManager = ( pytest.warns( FutureWarning, match="will change from compat='equals' to compat='override'", ) if coords == "different" else nullcontext() ) with expectation: old = concat(datasets, data["dim1"], coords=coords) with set_options(use_new_combine_kwarg_defaults=True): if coords == "different": with pytest.raises(ValueError): concat(datasets, data["dim1"], coords=coords) else: new = concat(datasets, data["dim1"], coords=coords) assert_identical(old, new) def test_concat_promote_shape_for_scalars_with_mixed_lengths_along_concat_dim( self, ) -> None: # values should repeat objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})] expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from coords='different' to coords='minimal'", ): old = concat(objs, "x") assert_identical(old, expected) with set_options(use_new_combine_kwarg_defaults=True): new = concat(objs, "x") with pytest.raises(AssertionError): assert_identical(new, old) with pytest.raises(ValueError, match="might be related to new default"): concat(objs, "x", coords="different") with pytest.raises(merge.MergeError, match="conflicting values"): concat(objs, "x", compat="equals") new = concat(objs, "x", coords="different", compat="equals") assert_identical(old, new) def test_concat_multi_dim_index() -> None: ds1 = ( Dataset( {"foo": (("x", "y"), np.random.randn(2, 2))}, coords={"x": [1, 2], "y": [3, 4]}, ) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) ds2 = ( Dataset( {"foo": (("x", "y"), np.random.randn(2, 2))}, coords={"x": [1, 2], "y": [5, 6]}, ) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) expected = ( Dataset( { "foo": ( ("x", "y"), np.concatenate([ds1.foo.data, ds2.foo.data], axis=-1), ) }, coords={"x": [1, 2], "y": [3, 4, 5, 6]}, ) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) # note: missing 'override' joins: list[types.JoinOptions] = ["inner", "outer", "exact", "left", "right"] for join in joins: actual = concat([ds1, ds2], dim="y", join=join) assert_identical(actual, expected, check_default_indexes=False) with pytest.raises(AlignmentError): actual = concat([ds1, ds2], dim="x", join="exact") # TODO: fix these, or raise better error message with pytest.raises(AssertionError): joins_lr: list[types.JoinOptions] = ["left", "right"] for join in joins_lr: actual = concat([ds1, ds2], dim="x", join=join) class TestConcatDataTree: def test_concat_datatree_along_existing_dim(self): dt1 = DataTree.from_dict(data={"/a": ("x", [1]), "/b": 3}, coords={"/x": [0]}) dt2 = DataTree.from_dict(data={"/a": ("x", [2]), "/b": 3}, coords={"/x": [1]}) expected = DataTree.from_dict( data={"/a": ("x", [1, 2]), "/b": 3}, coords={"/x": [0, 1]} ) actual = concat([dt1, dt2], dim="x", data_vars="minimal", coords="minimal") assert actual.identical(expected) def test_concat_datatree_along_existing_dim_defaults(self): # scalar coordinate dt1 = DataTree.from_dict(data={"/a": ("x", [1])}, coords={"/x": [0], "/b": 3}) dt2 = DataTree.from_dict(data={"/a": ("x", [2])}, coords={"/x": [1], "/b": 3}) expected = DataTree.from_dict( data={"/a": ("x", [1, 2])}, coords={"/x": [0, 1], "b": 3} ) actual = concat([dt1, dt2], dim="x") assert actual.identical(expected) # scalar data variable dt1 = DataTree.from_dict(data={"/a": ("x", [1]), "/b": 3}, coords={"/x": [0]}) dt2 = DataTree.from_dict(data={"/a": ("x", [2]), "/b": 3}, coords={"/x": [1]}) expected = DataTree.from_dict( data={"/a": ("x", [1, 2]), "/b": ("x", [3, 3])}, coords={"/x": [0, 1]} ) with pytest.warns( FutureWarning, match="will change from data_vars='all' to data_vars=None" ): actual = concat([dt1, dt2], dim="x") assert actual.identical(expected) def test_concat_datatree_isomorphic_error(self): dt1 = DataTree.from_dict(data={"/data": ("x", [1]), "/a": None}) dt2 = DataTree.from_dict(data={"/data": ("x", [2]), "/b": None}) with pytest.raises( ValueError, match="All trees must be isomorphic to be concatenated" ): concat([dt1, dt2], dim="x", data_vars="minimal", coords="minimal") def test_concat_datatree_datavars_all(self): dt1 = DataTree.from_dict(data={"/a": 1, "/c/b": ("y", [10])}) dt2 = DataTree.from_dict(data={"/a": 2, "/c/b": ("y", [20])}) dim = pd.Index([100, 200], name="x") actual = concat([dt1, dt2], dim=dim, data_vars="all", coords="minimal") expected = DataTree.from_dict( data={ "/a": (("x",), [1, 2]), "/c/b": (("x", "y"), [[10], [20]]), }, coords={"/x": dim}, ) assert actual.identical(expected) def test_concat_datatree_coords_all(self): dt1 = DataTree.from_dict(data={"/child/d": ("y", [10])}, coords={"/c": 1}) dt2 = DataTree.from_dict(data={"/child/d": ("y", [10])}, coords={"/c": 2}) dim = pd.Index([0, 1], name="x") actual = concat( [dt1, dt2], dim=dim, data_vars="minimal", coords="all", compat="equals" ) expected = DataTree.from_dict( data={"/child/d": ("y", [10])}, coords={ "/c": (("x",), [1, 2]), "/x": dim, "/child/x": dim, }, ) assert actual.identical(expected) def test_concat_datatree_datavars_different(self): dt1 = DataTree.from_dict(data={"/a": 0, "/b": 1}) dt2 = DataTree.from_dict(data={"/a": 0, "/b": 2}) dim = pd.Index([0, 1], name="x") actual = concat( [dt1, dt2], dim=dim, data_vars="different", coords="minimal", compat="equals", ) expected = DataTree.from_dict( data={"/a": 0, "/b": (("x",), [1, 2])}, coords={"/x": dim} ) assert actual.identical(expected) def test_concat_datatree_nodes(self): dt1 = DataTree.from_dict(data={"/a/d": ("x", [1])}, coords={"/x": [0]}) dt2 = DataTree.from_dict(data={"/a/d": ("x", [2])}, coords={"/x": [1]}) actual = concat([dt1, dt2], dim="x", data_vars="minimal", coords="minimal") expected = DataTree.from_dict( data={"/a/d": ("x", [1, 2])}, coords={"/x": [0, 1]} ) assert actual.identical(expected) def test_concat_datatree_names(self): dt1 = DataTree(Dataset({"a": ("x", [1])}), name="a") dt2 = DataTree(Dataset({"a": ("x", [2])}), name="b") result = concat( [dt1, dt2], dim="x", data_vars="minimal", coords="minimal", compat="equals" ) assert result.name == "a" expected = DataTree(Dataset({"a": ("x", [1, 2])}), name="a") assert result.identical(expected) with pytest.raises(ValueError, match="DataTree names not identical"): concat( [dt1, dt2], dim="x", data_vars="minimal", coords="minimal", compat="identical", ) def test_concat_along_new_dim_raises_for_minimal(self): dt1 = DataTree.from_dict({"/a/d": 1}) dt2 = DataTree.from_dict({"/a/d": 2}) with pytest.raises( ValueError, match="data_vars='minimal' and coords='minimal'" ): concat([dt1, dt2], dim="y", data_vars="minimal", coords="minimal") def test_concat_data_in_child_only(self): dt1 = DataTree.from_dict( data={"/child/a": ("x", [1])}, coords={"/child/x": [0]} ) dt2 = DataTree.from_dict( data={"/child/a": ("x", [2])}, coords={"/child/x": [1]} ) actual = concat([dt1, dt2], dim="x", data_vars="minimal", coords="minimal") expected = DataTree.from_dict( data={"/child/a": ("x", [1, 2])}, coords={"/child/x": [0, 1]} ) assert actual.identical(expected) def test_concat_data_in_child_only_defaults(self): dt1 = DataTree.from_dict( data={"/child/a": ("x", [1])}, coords={"/child/x": [0]} ) dt2 = DataTree.from_dict( data={"/child/a": ("x", [2])}, coords={"/child/x": [1]} ) actual = concat([dt1, dt2], dim="x") expected = DataTree.from_dict( data={"/child/a": ("x", [1, 2])}, coords={"/child/x": [0, 1]} ) assert actual.identical(expected) def test_concat_data_in_child_new_dim(self): dt1 = DataTree.from_dict(data={"/child/a": 1}, coords={"/child/x": 0}) dt2 = DataTree.from_dict(data={"/child/a": 2}, coords={"/child/x": 1}) actual = concat([dt1, dt2], dim="x") expected = DataTree.from_dict( data={"/child/a": ("x", [1, 2])}, coords={"/child/x": [0, 1]} ) assert actual.identical(expected) def test_concat_different_dims_in_different_child(self): dt1 = DataTree.from_dict(coords={"/first/x": [1], "/second/x": [2]}) dt2 = DataTree.from_dict(coords={"/first/x": [3], "/second/x": [4]}) actual = concat([dt1, dt2], dim="x") expected = DataTree.from_dict(coords={"/first/x": [1, 3], "/second/x": [2, 4]}) assert actual.identical(expected) xarray-2025.12.0/xarray/tests/test_conventions.py000066400000000000000000000647411511464676000220050ustar00rootroot00000000000000from __future__ import annotations import contextlib import warnings import numpy as np import pandas as pd import pytest from xarray import ( Dataset, SerializationWarning, Variable, coding, conventions, date_range, open_dataset, ) from xarray.backends.common import WritableCFDataStore from xarray.backends.memory import InMemoryDataStore from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.conventions import decode_cf from xarray.testing import assert_identical from xarray.tests import ( assert_array_equal, requires_cftime, requires_dask, requires_netCDF4, ) from xarray.tests.test_backends import CFEncodedBase class TestBoolTypeArray: def test_booltype_array(self) -> None: x = np.array([1, 0, 1, 1, 0], dtype="i1") bx = coding.variables.BoolTypeArray(x) assert bx.dtype == bool assert_array_equal(bx, np.array([True, False, True, True, False], dtype=bool)) x = np.array([[1, 0, 1], [0, 1, 0]], dtype="i1") bx = coding.variables.BoolTypeArray(x) assert_array_equal(bx.transpose((1, 0)), x.transpose((1, 0))) class TestNativeEndiannessArray: def test(self) -> None: x = np.arange(5, dtype=">i8") expected = np.arange(5, dtype="int64") a = coding.variables.NativeEndiannessArray(x) assert a.dtype == expected.dtype assert a.dtype == expected[:].dtype assert_array_equal(a, expected) y = np.arange(6, dtype=">i8").reshape((2, 3)) b = coding.variables.NativeEndiannessArray(y) expected2 = np.arange(6, dtype="int64").reshape((2, 3)) assert_array_equal(b.transpose((1, 0)), expected2.transpose((1, 0))) def test_decode_cf_with_conflicting_fill_missing_value() -> None: expected = Variable(["t"], [np.nan, np.nan, 2], {"units": "foobar"}) var = Variable( ["t"], np.arange(3), {"units": "foobar", "missing_value": 0, "_FillValue": 1} ) with pytest.warns(SerializationWarning, match="has multiple fill"): actual = conventions.decode_cf_variable("t", var) assert_identical(actual, expected) expected = Variable(["t"], np.arange(10), {"units": "foobar"}) var = Variable( ["t"], np.arange(10), {"units": "foobar", "missing_value": np.nan, "_FillValue": np.nan}, ) # the following code issues two warnings, so we need to check for both with pytest.warns(SerializationWarning) as winfo: actual = conventions.decode_cf_variable("t", var) for aw in winfo: assert "non-conforming" in str(aw.message) assert_identical(actual, expected) var = Variable( ["t"], np.arange(10), { "units": "foobar", "missing_value": np.float32(np.nan), "_FillValue": np.float32(np.nan), }, ) # the following code issues two warnings, so we need to check for both with pytest.warns(SerializationWarning) as winfo: actual = conventions.decode_cf_variable("t", var) for aw in winfo: assert "non-conforming" in str(aw.message) assert_identical(actual, expected) def test_decode_cf_variable_with_mismatched_coordinates() -> None: # tests for decoding mismatched coordinates attributes # see GH #1809 zeros1 = np.zeros((1, 5, 3)) orig = Dataset( { "XLONG": (["x", "y"], zeros1.squeeze(0), {}), "XLAT": (["x", "y"], zeros1.squeeze(0), {}), "foo": (["time", "x", "y"], zeros1, {"coordinates": "XTIME XLONG XLAT"}), "time": ("time", [0.0], {"units": "hours since 2017-01-01"}), } ) decoded = conventions.decode_cf(orig, decode_coords=True) assert decoded["foo"].encoding["coordinates"] == "XTIME XLONG XLAT" assert list(decoded.coords.keys()) == ["XLONG", "XLAT", "time"] decoded = conventions.decode_cf(orig, decode_coords=False) assert "coordinates" not in decoded["foo"].encoding assert decoded["foo"].attrs.get("coordinates") == "XTIME XLONG XLAT" assert list(decoded.coords.keys()) == ["time"] @requires_cftime class TestEncodeCFVariable: def test_incompatible_attributes(self) -> None: invalid_vars = [ Variable( ["t"], pd.date_range("2000-01-01", periods=3), {"units": "foobar"} ), Variable(["t"], pd.to_timedelta(["1 day"]), {"units": "foobar"}), # type: ignore[arg-type, unused-ignore] Variable(["t"], [0, 1, 2], {"add_offset": 0}, {"add_offset": 2}), Variable(["t"], [0, 1, 2], {"_FillValue": 0}, {"_FillValue": 2}), ] for var in invalid_vars: with pytest.raises(ValueError): conventions.encode_cf_variable(var) def test_missing_fillvalue(self) -> None: v = Variable(["x"], np.array([np.nan, 1, 2, 3])) v.encoding = {"dtype": "int16"} # Expect both the SerializationWarning and the RuntimeWarning from numpy with pytest.warns(Warning) as record: conventions.encode_cf_variable(v) # Check we got the expected warnings warning_messages = [str(w.message) for w in record] assert any( "floating point data as an integer" in msg for msg in warning_messages ) assert any( "invalid value encountered in cast" in msg for msg in warning_messages ) def test_multidimensional_coordinates(self) -> None: # regression test for GH1763 # Set up test case with coordinates that have overlapping (but not # identical) dimensions. zeros1 = np.zeros((1, 5, 3)) zeros2 = np.zeros((1, 6, 3)) zeros3 = np.zeros((1, 5, 4)) orig = Dataset( { "lon1": (["x1", "y1"], zeros1.squeeze(0), {}), "lon2": (["x2", "y1"], zeros2.squeeze(0), {}), "lon3": (["x1", "y2"], zeros3.squeeze(0), {}), "lat1": (["x1", "y1"], zeros1.squeeze(0), {}), "lat2": (["x2", "y1"], zeros2.squeeze(0), {}), "lat3": (["x1", "y2"], zeros3.squeeze(0), {}), "foo1": (["time", "x1", "y1"], zeros1, {"coordinates": "lon1 lat1"}), "foo2": (["time", "x2", "y1"], zeros2, {"coordinates": "lon2 lat2"}), "foo3": (["time", "x1", "y2"], zeros3, {"coordinates": "lon3 lat3"}), "time": ("time", [0.0], {"units": "hours since 2017-01-01"}), } ) orig = conventions.decode_cf(orig) # Encode the coordinates, as they would be in a netCDF output file. enc, attrs = conventions.encode_dataset_coordinates(orig) # Make sure we have the right coordinates for each variable. foo1_coords = enc["foo1"].attrs.get("coordinates", "") foo2_coords = enc["foo2"].attrs.get("coordinates", "") foo3_coords = enc["foo3"].attrs.get("coordinates", "") assert foo1_coords == "lon1 lat1" assert foo2_coords == "lon2 lat2" assert foo3_coords == "lon3 lat3" # Should not have any global coordinates. assert "coordinates" not in attrs def test_var_with_coord_attr(self) -> None: # regression test for GH6310 # don't overwrite user-defined "coordinates" attributes orig = Dataset( {"values": ("time", np.zeros(2), {"coordinates": "time lon lat"})}, coords={ "time": ("time", np.zeros(2)), "lat": ("time", np.zeros(2)), "lon": ("time", np.zeros(2)), }, ) # Encode the coordinates, as they would be in a netCDF output file. enc, attrs = conventions.encode_dataset_coordinates(orig) # Make sure we have the right coordinates for each variable. values_coords = enc["values"].attrs.get("coordinates", "") assert values_coords == "time lon lat" # Should not have any global coordinates. assert "coordinates" not in attrs def test_do_not_overwrite_user_coordinates(self) -> None: # don't overwrite user-defined "coordinates" encoding orig = Dataset( coords={"x": [0, 1, 2], "y": ("x", [5, 6, 7]), "z": ("x", [8, 9, 10])}, data_vars={"a": ("x", [1, 2, 3]), "b": ("x", [3, 5, 6])}, ) orig["a"].encoding["coordinates"] = "y" orig["b"].encoding["coordinates"] = "z" enc, _ = conventions.encode_dataset_coordinates(orig) assert enc["a"].attrs["coordinates"] == "y" assert enc["b"].attrs["coordinates"] == "z" orig["a"].attrs["coordinates"] = "foo" with pytest.raises(ValueError, match=r"'coordinates' found in both attrs"): conventions.encode_dataset_coordinates(orig) def test_deterministic_coords_encoding(self) -> None: # the coordinates attribute is sorted when set by xarray.conventions ... # ... on a variable's coordinates attribute ds = Dataset({"foo": 0}, coords={"baz": 0, "bar": 0}) vars, attrs = conventions.encode_dataset_coordinates(ds) assert vars["foo"].attrs["coordinates"] == "bar baz" assert attrs.get("coordinates") is None # ... on the global coordinates attribute ds = ds.drop_vars("foo") vars, attrs = conventions.encode_dataset_coordinates(ds) assert attrs["coordinates"] == "bar baz" def test_emit_coordinates_attribute_in_attrs(self) -> None: orig = Dataset( {"a": 1, "b": 1}, coords={"t": np.array("2004-11-01T00:00:00", dtype=np.datetime64)}, ) orig["a"].attrs["coordinates"] = None enc, _ = conventions.encode_dataset_coordinates(orig) # check coordinate attribute emitted for 'a' assert "coordinates" not in enc["a"].attrs assert "coordinates" not in enc["a"].encoding # check coordinate attribute not emitted for 'b' assert enc["b"].attrs.get("coordinates") == "t" assert "coordinates" not in enc["b"].encoding def test_emit_coordinates_attribute_in_encoding(self) -> None: orig = Dataset( {"a": 1, "b": 1}, coords={"t": np.array("2004-11-01T00:00:00", dtype=np.datetime64)}, ) orig["a"].encoding["coordinates"] = None enc, _ = conventions.encode_dataset_coordinates(orig) # check coordinate attribute emitted for 'a' assert "coordinates" not in enc["a"].attrs assert "coordinates" not in enc["a"].encoding # check coordinate attribute not emitted for 'b' assert enc["b"].attrs.get("coordinates") == "t" assert "coordinates" not in enc["b"].encoding @requires_cftime class TestDecodeCF: def test_dataset(self) -> None: original = Dataset( { "t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}), "foo": ("t", [0, 0, 0], {"coordinates": "y", "units": "bar"}), "y": ("t", [5, 10, -999], {"_FillValue": -999}), } ) expected = Dataset( {"foo": ("t", [0, 0, 0], {"units": "bar"})}, { "t": pd.date_range("2000-01-01", periods=3), "y": ("t", [5.0, 10.0, np.nan]), }, ) actual = conventions.decode_cf(original) assert_identical(expected, actual) def test_invalid_coordinates(self) -> None: # regression test for GH308, GH1809 original = Dataset({"foo": ("t", [1, 2], {"coordinates": "invalid"})}) decoded = Dataset({"foo": ("t", [1, 2], {}, {"coordinates": "invalid"})}) actual = conventions.decode_cf(original) assert_identical(decoded, actual) actual = conventions.decode_cf(original, decode_coords=False) assert_identical(original, actual) def test_decode_coordinates(self) -> None: # regression test for GH610 original = Dataset( {"foo": ("t", [1, 2], {"coordinates": "x"}), "x": ("t", [4, 5])} ) actual = conventions.decode_cf(original) assert actual.foo.encoding["coordinates"] == "x" def test_decode_coordinates_with_key_values(self) -> None: # regression test for GH9761 original = Dataset( { "temp": ( ("y", "x"), np.random.rand(2, 2), { "long_name": "temperature", "units": "K", "coordinates": "lat lon", "grid_mapping": "crs", }, ), "x": ( ("x"), np.arange(2), {"standard_name": "projection_x_coordinate", "units": "m"}, ), "y": ( ("y"), np.arange(2), {"standard_name": "projection_y_coordinate", "units": "m"}, ), "lat": ( ("y", "x"), np.random.rand(2, 2), {"standard_name": "latitude", "units": "degrees_north"}, ), "lon": ( ("y", "x"), np.random.rand(2, 2), {"standard_name": "longitude", "units": "degrees_east"}, ), "crs": ( (), None, { "grid_mapping_name": "transverse_mercator", "longitude_of_central_meridian": -2.0, }, ), "crs2": ( (), None, { "grid_mapping_name": "longitude_latitude", "longitude_of_central_meridian": -2.0, }, ), }, ) original.temp.attrs["grid_mapping"] = "crs: x y" _vars, _attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs"} original.temp.attrs["grid_mapping"] = "crs: x y crs2: lat lon" _vars, _attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs", "crs2"} # stray colon original.temp.attrs["grid_mapping"] = "crs: x y crs2 : lat lon" _vars, _attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs", "crs2"} original.temp.attrs["grid_mapping"] = "crs x y crs2: lat lon" with pytest.raises(ValueError, match="misses ':'"): conventions.decode_cf_variables(original.variables, {}, decode_coords="all") del original.temp.attrs["grid_mapping"] original.temp.attrs["formula_terms"] = "A: lat D: lon E: crs2" _vars, _attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs2"} original.temp.attrs["formula_terms"] = "A: lat lon D: crs E: crs2" with pytest.warns(UserWarning, match="has malformed content"): _vars, _attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs", "crs2"} def test_0d_int32_encoding(self) -> None: original = Variable((), np.int32(0), encoding={"dtype": "int64"}) expected = Variable((), np.int64(0)) actual = coding.variables.NonStringCoder().encode(original) assert_identical(expected, actual) def test_decode_cf_with_multiple_missing_values(self) -> None: original = Variable(["t"], [0, 1, 2], {"missing_value": np.array([0, 1])}) expected = Variable(["t"], [np.nan, np.nan, 2], {}) with pytest.warns(SerializationWarning, match="has multiple fill"): actual = conventions.decode_cf_variable("t", original) assert_identical(expected, actual) def test_decode_cf_with_drop_variables(self) -> None: original = Dataset( { "t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}), "x": ("x", [9, 8, 7], {"units": "km"}), "foo": ( ("t", "x"), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {"units": "bar"}, ), "y": ("t", [5, 10, -999], {"_FillValue": -999}), } ) expected = Dataset( { "t": pd.date_range("2000-01-01", periods=3), "foo": ( ("t", "x"), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {"units": "bar"}, ), "y": ("t", [5, 10, np.nan]), } ) actual = conventions.decode_cf(original, drop_variables=("x",)) actual2 = conventions.decode_cf(original, drop_variables="x") assert_identical(expected, actual) assert_identical(expected, actual2) @pytest.mark.filterwarnings("ignore:Ambiguous reference date string") def test_invalid_time_units_raises_eagerly(self) -> None: ds = Dataset({"time": ("time", [0, 1], {"units": "foobar since 123"})}) with pytest.raises(ValueError, match=r"unable to decode time"): decode_cf(ds) @pytest.mark.parametrize("decode_times", [True, False]) def test_invalid_timedelta_units_do_not_decode(self, decode_times) -> None: # regression test for #8269 ds = Dataset( {"time": ("time", [0, 1, 20], {"units": "days invalid", "_FillValue": 20})} ) expected = Dataset( {"time": ("time", [0.0, 1.0, np.nan], {"units": "days invalid"})} ) assert_identical(expected, decode_cf(ds, decode_times=decode_times)) @requires_cftime @pytest.mark.parametrize("time_unit", ["s", "ms", "us", "ns"]) def test_dataset_repr_with_netcdf4_datetimes(self, time_unit) -> None: # regression test for #347 attrs = {"units": "days since 0001-01-01", "calendar": "noleap"} with warnings.catch_warnings(): warnings.filterwarnings("ignore", "unable to decode time") ds = decode_cf(Dataset({"time": ("time", [0, 1], attrs)})) assert "(time) object" in repr(ds) attrs = {"units": "days since 1900-01-01"} ds = decode_cf( Dataset({"time": ("time", [0, 1], attrs)}), decode_times=CFDatetimeCoder(time_unit=time_unit), ) assert f"(time) datetime64[{time_unit}]" in repr(ds) @requires_cftime def test_decode_cf_datetime_transition_to_invalid(self) -> None: # manually create dataset with not-decoded date from datetime import datetime ds = Dataset(coords={"time": [0, 266 * 365]}) units = "days since 2000-01-01 00:00:00" ds.time.attrs = dict(units=units) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "unable to decode time") ds_decoded = conventions.decode_cf(ds) expected = np.array([datetime(2000, 1, 1, 0, 0), datetime(2265, 10, 28, 0, 0)]) assert_array_equal(ds_decoded.time.values, expected) @requires_dask def test_decode_cf_with_dask(self) -> None: import dask.array as da original = Dataset( { "t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}), "foo": ("t", [0, 0, 0], {"coordinates": "y", "units": "bar"}), "bar": ("string2", [b"a", b"b"]), "baz": (("x"), [b"abc"], {"_Encoding": "utf-8"}), "y": ("t", [5, 10, -999], {"_FillValue": -999}), } ).chunk() decoded = conventions.decode_cf(original) assert all( isinstance(var.data, da.Array) for name, var in decoded.variables.items() if name not in decoded.xindexes ) assert_identical(decoded, conventions.decode_cf(original).compute()) @requires_dask def test_decode_dask_times(self) -> None: original = Dataset.from_dict( { "coords": {}, "dims": {"time": 5}, "data_vars": { "average_T1": { "dims": ("time",), "attrs": {"units": "days since 1958-01-01 00:00:00"}, "data": [87659.0, 88024.0, 88389.0, 88754.0, 89119.0], } }, } ) assert_identical( conventions.decode_cf(original.chunk()), conventions.decode_cf(original).chunk(), ) @pytest.mark.parametrize("time_unit", ["s", "ms", "us", "ns"]) def test_decode_cf_time_kwargs(self, time_unit) -> None: ds = Dataset.from_dict( { "coords": { "timedelta": { "data": np.array([1, 2, 3], dtype="int64"), "dims": "timedelta", "attrs": {"units": "days"}, }, "time": { "data": np.array([1, 2, 3], dtype="int64"), "dims": "time", "attrs": {"units": "days since 2000-01-01"}, }, }, "dims": {"time": 3, "timedelta": 3}, "data_vars": { "a": {"dims": ("time", "timedelta"), "data": np.ones((3, 3))}, }, } ) dsc = conventions.decode_cf( ds, decode_times=CFDatetimeCoder(time_unit=time_unit), decode_timedelta=CFTimedeltaCoder(time_unit=time_unit), ) assert dsc.timedelta.dtype == np.dtype(f"m8[{time_unit}]") assert dsc.time.dtype == np.dtype(f"M8[{time_unit}]") dsc = conventions.decode_cf(ds, decode_times=False) assert dsc.timedelta.dtype == np.dtype("int64") assert dsc.time.dtype == np.dtype("int64") dsc = conventions.decode_cf( ds, decode_times=CFDatetimeCoder(time_unit=time_unit), decode_timedelta=False, ) assert dsc.timedelta.dtype == np.dtype("int64") assert dsc.time.dtype == np.dtype(f"M8[{time_unit}]") dsc = conventions.decode_cf(ds, decode_times=False, decode_timedelta=True) assert dsc.timedelta.dtype == np.dtype("m8[ns]") assert dsc.time.dtype == np.dtype("int64") class CFEncodedInMemoryStore(WritableCFDataStore, InMemoryDataStore): def encode_variable(self, var, name=None): """encode one variable""" coder = coding.strings.EncodedStringCoder(allows_unicode=True) var = coder.encode(var, name=name) return var @requires_netCDF4 class TestCFEncodedDataStore(CFEncodedBase): @contextlib.contextmanager def create_store(self): yield CFEncodedInMemoryStore() @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} store = CFEncodedInMemoryStore() data.dump_to_store(store, **save_kwargs) yield open_dataset(store, **open_kwargs) @pytest.mark.skip("cannot roundtrip coordinates yet for CFEncodedInMemoryStore") def test_roundtrip_coordinates(self) -> None: pass def test_invalid_dataarray_names_raise(self) -> None: # only relevant for on-disk file formats pass def test_encoding_kwarg(self) -> None: # we haven't bothered to raise errors yet for unexpected encodings in # this test dummy pass def test_encoding_kwarg_fixed_width_string(self) -> None: # CFEncodedInMemoryStore doesn't support explicit string encodings. pass def test_encoding_unlimited_dims(self) -> None: # CFEncodedInMemoryStore doesn't support unlimited_dims. pass class TestDecodeCFVariableWithArrayUnits: def test_decode_cf_variable_with_array_units(self) -> None: v = Variable(["t"], [1, 2, 3], {"units": np.array(["foobar"], dtype=object)}) v_decoded = conventions.decode_cf_variable("test2", v) assert_identical(v, v_decoded) def test_decode_cf_variable_timedelta64(): variable = Variable(["time"], pd.timedelta_range("1D", periods=2)) decoded = conventions.decode_cf_variable("time", variable) assert decoded.encoding == {} assert_identical(decoded, variable) def test_decode_cf_variable_datetime64(): variable = Variable(["time"], pd.date_range("2000", periods=2)) decoded = conventions.decode_cf_variable("time", variable) assert decoded.encoding == {} assert_identical(decoded, variable) @requires_cftime def test_decode_cf_variable_cftime(): variable = Variable(["time"], date_range("2000", periods=2, use_cftime=True)) decoded = conventions.decode_cf_variable("time", variable) assert decoded.encoding == {} assert_identical(decoded, variable) def test_scalar_units() -> None: # test that scalar units does not raise an exception var = Variable(["t"], [np.nan, np.nan, 2], {"units": np.nan}) actual = conventions.decode_cf_variable("t", var) assert_identical(actual, var) def test_decode_cf_error_includes_variable_name(): ds = Dataset({"my_invalid_var": ([], 1e36, {"units": "days since 2000-01-01"})}) with pytest.raises( ValueError, match=r"unable to decode(?s:.*)my_invalid_var", ): decode_cf(ds) def test_encode_cf_variable_with_vlen_dtype() -> None: v = Variable( ["x"], np.array(["a", "b"], dtype=coding.strings.create_vlen_dtype(str)) ) encoded_v = conventions.encode_cf_variable(v) assert encoded_v.data.dtype.kind == "O" assert coding.strings.check_vlen_dtype(encoded_v.data.dtype) is str # empty array v = Variable(["x"], np.array([], dtype=coding.strings.create_vlen_dtype(str))) encoded_v = conventions.encode_cf_variable(v) assert encoded_v.data.dtype.kind == "O" assert coding.strings.check_vlen_dtype(encoded_v.data.dtype) is str def test_decode_cf_variables_decode_timedelta_warning() -> None: v = Variable(["time"], [1, 2], attrs={"units": "seconds"}) variables = {"a": v} with warnings.catch_warnings(): warnings.filterwarnings("error", "decode_timedelta", FutureWarning) conventions.decode_cf_variables(variables, {}, decode_timedelta=True) with pytest.warns(FutureWarning, match="decode_timedelta"): conventions.decode_cf_variables(variables, {}) xarray-2025.12.0/xarray/tests/test_coordinate_transform.py000066400000000000000000000202521511464676000236470ustar00rootroot00000000000000from collections.abc import Hashable from typing import Any import numpy as np import pytest import xarray as xr from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.indexes import CoordinateTransformIndex from xarray.tests import assert_equal, assert_identical class SimpleCoordinateTransform(CoordinateTransform): """Simple uniform scale transform in a 2D space (x/y coordinates).""" def __init__(self, shape: tuple[int, int], scale: float, dtype: Any = None): super().__init__(("x", "y"), {"x": shape[1], "y": shape[0]}, dtype=dtype) self.scale = scale # array dimensions in reverse order (y = rows, x = cols) self.xy_dims = tuple(self.dims) self.dims = (self.dims[1], self.dims[0]) def forward(self, dim_positions: dict[str, Any]) -> dict[Hashable, Any]: assert set(dim_positions) == set(self.dims) return { name: dim_positions[dim] * self.scale for name, dim in zip(self.coord_names, self.xy_dims, strict=False) } def reverse(self, coord_labels: dict[Hashable, Any]) -> dict[str, Any]: return {dim: coord_labels[dim] / self.scale for dim in self.xy_dims} def equals( self, other: CoordinateTransform, exclude: frozenset[Hashable] | None = None ) -> bool: if not isinstance(other, SimpleCoordinateTransform): return False return self.scale == other.scale def __repr__(self) -> str: return f"Scale({self.scale})" def test_abstract_coordinate_transform() -> None: tr = CoordinateTransform(["x"], {"x": 5}) with pytest.raises(NotImplementedError): tr.forward({"x": [1, 2]}) with pytest.raises(NotImplementedError): tr.reverse({"x": [3.0, 4.0]}) with pytest.raises(NotImplementedError): tr.equals(CoordinateTransform(["x"], {"x": 5})) def test_coordinate_transform_init() -> None: tr = SimpleCoordinateTransform((4, 4), 2.0) assert tr.coord_names == ("x", "y") # array dimensions in reverse order (y = rows, x = cols) assert tr.dims == ("y", "x") assert tr.dim_size == {"x": 4, "y": 4} assert tr.dtype == np.dtype(np.float64) tr2 = SimpleCoordinateTransform((4, 4), 2.0, dtype=np.int64) assert tr2.dtype == np.dtype(np.int64) @pytest.mark.parametrize("dims", [None, ("y", "x")]) def test_coordinate_transform_generate_coords(dims) -> None: tr = SimpleCoordinateTransform((2, 2), 2.0) actual = tr.generate_coords(dims) expected = {"x": [[0.0, 2.0], [0.0, 2.0]], "y": [[0.0, 0.0], [2.0, 2.0]]} assert set(actual) == set(expected) np.testing.assert_array_equal(actual["x"], expected["x"]) np.testing.assert_array_equal(actual["y"], expected["y"]) def create_coords(scale: float, shape: tuple[int, int]) -> xr.Coordinates: """Create x/y Xarray coordinate variables from a simple coordinate transform.""" tr = SimpleCoordinateTransform(shape, scale) index = CoordinateTransformIndex(tr) return xr.Coordinates.from_xindex(index) def test_coordinate_transform_variable() -> None: coords = create_coords(scale=2.0, shape=(2, 2)) assert coords["x"].dtype == np.dtype(np.float64) assert coords["y"].dtype == np.dtype(np.float64) assert coords["x"].shape == (2, 2) assert coords["y"].shape == (2, 2) np.testing.assert_array_equal(np.array(coords["x"]), [[0.0, 2.0], [0.0, 2.0]]) np.testing.assert_array_equal(np.array(coords["y"]), [[0.0, 0.0], [2.0, 2.0]]) def assert_repr(var: xr.Variable): assert ( repr(var._data) == "CoordinateTransformIndexingAdapter(transform=Scale(2.0))" ) assert_repr(coords["x"].variable) assert_repr(coords["y"].variable) def test_coordinate_transform_variable_repr_inline() -> None: var = create_coords(scale=2.0, shape=(2, 2))["x"].variable actual = var._data._repr_inline_(70) # type: ignore[union-attr] assert actual == "0.0 2.0 0.0 2.0" # truncated inline repr var2 = create_coords(scale=2.0, shape=(10, 10))["x"].variable actual2 = var2._data._repr_inline_(70) # type: ignore[union-attr] assert ( actual2 == "0.0 2.0 4.0 6.0 8.0 10.0 12.0 ... 6.0 8.0 10.0 12.0 14.0 16.0 18.0" ) def test_coordinate_transform_variable_repr() -> None: var = create_coords(scale=2.0, shape=(2, 2))["x"].variable actual = repr(var) expected = """ Size: 32B [4 values with dtype=float64] """.strip() assert actual == expected def test_coordinate_transform_variable_basic_outer_indexing() -> None: var = create_coords(scale=2.0, shape=(4, 4))["x"].variable assert var[0, 0] == 0.0 assert var[0, 1] == 2.0 assert var[0, -1] == 6.0 np.testing.assert_array_equal(var[:, 0:2], [[0.0, 2.0]] * 4) with pytest.raises(IndexError, match="out of bounds index"): var[5] with pytest.raises(IndexError, match="out of bounds index"): var[-5] def test_coordinate_transform_variable_vectorized_indexing() -> None: var = create_coords(scale=2.0, shape=(4, 4))["x"].variable actual = var[{"x": xr.Variable("z", [0]), "y": xr.Variable("z", [0])}] expected = xr.Variable("z", [0.0]) assert_equal(actual, expected) with pytest.raises(IndexError, match="out of bounds index"): var[{"x": xr.Variable("z", [5]), "y": xr.Variable("z", [5])}] def test_coordinate_transform_setitem_error() -> None: var = create_coords(scale=2.0, shape=(4, 4))["x"].variable # basic indexing with pytest.raises(TypeError, match="setting values is not supported"): var[0, 0] = 1.0 # outer indexing with pytest.raises(TypeError, match="setting values is not supported"): var[[0, 2], 0] = [1.0, 2.0] # vectorized indexing with pytest.raises(TypeError, match="setting values is not supported"): var[{"x": xr.Variable("z", [0]), "y": xr.Variable("z", [0])}] = 1.0 def test_coordinate_transform_transpose() -> None: coords = create_coords(scale=2.0, shape=(2, 2)) actual = coords["x"].transpose().values expected = [[0.0, 0.0], [2.0, 2.0]] np.testing.assert_array_equal(actual, expected) def test_coordinate_transform_equals() -> None: ds1 = create_coords(scale=2.0, shape=(2, 2)).to_dataset() ds2 = create_coords(scale=2.0, shape=(2, 2)).to_dataset() ds3 = create_coords(scale=4.0, shape=(2, 2)).to_dataset() # cannot use `assert_equal()` test utility function here yet # (indexes invariant check are still based on IndexVariable, which # doesn't work with coordinate transform index coordinate variables) assert ds1.equals(ds2) assert not ds1.equals(ds3) def test_coordinate_transform_sel() -> None: ds = create_coords(scale=2.0, shape=(4, 4)).to_dataset() data = [ [0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0], [8.0, 9.0, 10.0, 11.0], [12.0, 13.0, 14.0, 15.0], ] ds["data"] = (("y", "x"), data) actual = ds.sel( x=xr.Variable("z", [0.5, 5.5]), y=xr.Variable("z", [0.0, 0.5]), method="nearest" ) expected = ds.isel(x=xr.Variable("z", [0, 3]), y=xr.Variable("z", [0, 0])) # cannot use `assert_equal()` test utility function here yet # (indexes invariant check are still based on IndexVariable, which # doesn't work with coordinate transform index coordinate variables) assert actual.equals(expected) with pytest.raises(ValueError, match=r".*only supports selection.*nearest"): ds.sel(x=xr.Variable("z", [0.5, 5.5]), y=xr.Variable("z", [0.0, 0.5])) with pytest.raises(ValueError, match=r"missing labels for coordinate.*y"): ds.sel(x=[0.5, 5.5], method="nearest") with pytest.raises(TypeError, match=r".*only supports advanced.*indexing"): ds.sel(x=[0.5, 5.5], y=[0.0, 0.5], method="nearest") with pytest.raises(ValueError, match=r".*only supports advanced.*indexing"): ds.sel( x=xr.Variable("z", [0.5, 5.5]), y=xr.Variable("z", [0.0, 0.5, 1.5]), method="nearest", ) def test_coordinate_transform_rename() -> None: ds = xr.Dataset(coords=create_coords(scale=2.0, shape=(2, 2))) roundtripped = ds.rename(x="u", y="v").rename(u="x", v="y") assert_identical(ds, roundtripped, check_default_indexes=False) xarray-2025.12.0/xarray/tests/test_coordinates.py000066400000000000000000000253671511464676000217530ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Mapping import numpy as np import pandas as pd import pytest from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.indexes import Index, PandasIndex, PandasMultiIndex from xarray.core.variable import IndexVariable, Variable from xarray.structure.alignment import align from xarray.tests import assert_identical, source_ndarray class TestCoordinates: def test_init_noindex(self) -> None: coords = Coordinates(coords={"foo": ("x", [0, 1, 2])}) expected = Dataset(coords={"foo": ("x", [0, 1, 2])}) assert_identical(coords.to_dataset(), expected) def test_init_default_index(self) -> None: coords = Coordinates(coords={"x": [1, 2]}) expected = Dataset(coords={"x": [1, 2]}) assert_identical(coords.to_dataset(), expected) assert "x" in coords.xindexes @pytest.mark.filterwarnings("error:IndexVariable") def test_init_no_default_index(self) -> None: # dimension coordinate with no default index (explicit) coords = Coordinates(coords={"x": [1, 2]}, indexes={}) assert "x" not in coords.xindexes assert not isinstance(coords["x"], IndexVariable) def test_init_from_coords(self) -> None: expected = Dataset(coords={"foo": ("x", [0, 1, 2])}) coords = Coordinates(coords=expected.coords) assert_identical(coords.to_dataset(), expected) # test variables copied assert coords.variables["foo"] is not expected.variables["foo"] # test indexes are extracted expected = Dataset(coords={"x": [0, 1, 2]}) coords = Coordinates(coords=expected.coords) assert_identical(coords.to_dataset(), expected) assert expected.xindexes == coords.xindexes # coords + indexes not supported with pytest.raises( ValueError, match=r"passing both.*Coordinates.*indexes.*not allowed" ): coords = Coordinates( coords=expected.coords, indexes={"x": PandasIndex([0, 1, 2], "x")} ) def test_init_empty(self) -> None: coords = Coordinates() assert len(coords) == 0 def test_init_index_error(self) -> None: idx = PandasIndex([1, 2, 3], "x") with pytest.raises(ValueError, match="no coordinate variables found"): Coordinates(indexes={"x": idx}) with pytest.raises(TypeError, match=r".* is not an `xarray.indexes.Index`"): Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": "not_an_xarray_index"}, # type: ignore[dict-item] ) def test_init_dim_sizes_conflict(self) -> None: with pytest.raises(ValueError): Coordinates(coords={"foo": ("x", [1, 2]), "bar": ("x", [1, 2, 3, 4])}) def test_from_xindex(self) -> None: idx = PandasIndex([1, 2, 3], "x") coords = Coordinates.from_xindex(idx) assert isinstance(coords.xindexes["x"], PandasIndex) assert coords.xindexes["x"].equals(idx) expected = PandasIndex(idx, "x").create_variables() assert list(coords.variables) == list(expected) assert_identical(expected["x"], coords.variables["x"]) def test_from_xindex_error(self) -> None: class CustomIndexNoCoordsGenerated(Index): def create_variables(self, variables: Mapping | None = None): return {} idx = CustomIndexNoCoordsGenerated() with pytest.raises(ValueError, match=r".*index.*did not create any coordinate"): Coordinates.from_xindex(idx) def test_from_pandas_multiindex(self) -> None: midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")) coords = Coordinates.from_pandas_multiindex(midx, "x") assert isinstance(coords.xindexes["x"], PandasMultiIndex) assert coords.xindexes["x"].index.equals(midx) assert coords.xindexes["x"].dim == "x" expected = PandasMultiIndex(midx, "x").create_variables() assert list(coords.variables) == list(expected) for name in ("x", "one", "two"): assert_identical(expected[name], coords.variables[name]) @pytest.mark.filterwarnings("ignore:return type") def test_dims(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) assert set(coords.dims) == {"x"} def test_sizes(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) assert coords.sizes == {"x": 3} def test_dtypes(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) assert coords.dtypes == {"x": int} def test_getitem(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) assert_identical( coords["x"], DataArray([0, 1, 2], coords={"x": [0, 1, 2]}, name="x"), ) def test_delitem(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) del coords["x"] assert "x" not in coords with pytest.raises( KeyError, match="'nonexistent' is not in coordinate variables" ): del coords["nonexistent"] def test_update(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) coords.update({"y": ("y", [4, 5, 6])}) assert "y" in coords assert "y" in coords.xindexes expected = DataArray([4, 5, 6], coords={"y": [4, 5, 6]}, name="y") assert_identical(coords["y"], expected) def test_equals(self): coords = Coordinates(coords={"x": [0, 1, 2]}) assert coords.equals(coords) # Test with a different Coordinates object instead of a string other_coords = Coordinates(coords={"x": [3, 4, 5]}) assert not coords.equals(other_coords) def test_identical(self): coords = Coordinates(coords={"x": [0, 1, 2]}) assert coords.identical(coords) # Test with a different Coordinates object instead of a string other_coords = Coordinates(coords={"x": [3, 4, 5]}) assert not coords.identical(other_coords) def test_assign(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) expected = Coordinates(coords={"x": [0, 1, 2], "y": [3, 4]}) actual = coords.assign(y=[3, 4]) assert_identical(actual, expected) actual = coords.assign({"y": [3, 4]}) assert_identical(actual, expected) def test_copy(self) -> None: no_index_coords = Coordinates({"foo": ("x", [1, 2, 3])}) copied = no_index_coords.copy() assert_identical(no_index_coords, copied) v0 = no_index_coords.variables["foo"] v1 = copied.variables["foo"] assert v0 is not v1 assert source_ndarray(v0.data) is source_ndarray(v1.data) deep_copied = no_index_coords.copy(deep=True) assert_identical(no_index_coords.to_dataset(), deep_copied.to_dataset()) v0 = no_index_coords.variables["foo"] v1 = deep_copied.variables["foo"] assert v0 is not v1 assert source_ndarray(v0.data) is not source_ndarray(v1.data) def test_align(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) left = coords # test Coordinates._reindex_callback right = coords.to_dataset().isel(x=[0, 1]).coords left2, right2 = align(left, right, join="inner") assert_identical(left2, right2) # test Coordinates._overwrite_indexes right.update({"x": ("x", [4, 5, 6])}) left2, right2 = align(left, right, join="override") assert_identical(left2, left) assert_identical(left2, right2) def test_dataset_from_coords_with_multidim_var_same_name(self): # regression test for GH #8883 var = Variable(data=np.arange(6).reshape(2, 3), dims=["x", "y"]) coords = Coordinates(coords={"x": var}, indexes={}) ds = Dataset(coords=coords) assert ds.coords["x"].dims == ("x", "y") def test_drop_vars(self): coords = Coordinates( coords={ "x": Variable("x", range(3)), "y": Variable("y", list("ab")), "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)), }, indexes={}, ) actual = coords.drop_vars("x") assert isinstance(actual, Coordinates) assert set(actual.variables) == {"a", "y"} actual = coords.drop_vars(["x", "y"]) assert isinstance(actual, Coordinates) assert set(actual.variables) == {"a"} def test_drop_dims(self) -> None: coords = Coordinates( coords={ "x": Variable("x", range(3)), "y": Variable("y", list("ab")), "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)), }, indexes={}, ) actual = coords.drop_dims("x") assert isinstance(actual, Coordinates) assert set(actual.variables) == {"y"} actual = coords.drop_dims(["x", "y"]) assert isinstance(actual, Coordinates) assert set(actual.variables) == set() def test_rename_dims(self) -> None: coords = Coordinates( coords={ "x": Variable("x", range(3)), "y": Variable("y", list("ab")), "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)), }, indexes={}, ) actual = coords.rename_dims({"x": "X"}) assert isinstance(actual, Coordinates) assert set(actual.dims) == {"X", "y"} assert set(actual.variables) == {"a", "x", "y"} actual = coords.rename_dims({"x": "u", "y": "v"}) assert isinstance(actual, Coordinates) assert set(actual.dims) == {"u", "v"} assert set(actual.variables) == {"a", "x", "y"} def test_rename_vars(self) -> None: coords = Coordinates( coords={ "x": Variable("x", range(3)), "y": Variable("y", list("ab")), "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)), }, indexes={}, ) actual = coords.rename_vars({"x": "X"}) assert isinstance(actual, Coordinates) assert set(actual.dims) == {"x", "y"} assert set(actual.variables) == {"a", "X", "y"} actual = coords.rename_vars({"x": "u", "y": "v"}) assert isinstance(actual, Coordinates) assert set(actual.dims) == {"x", "y"} assert set(actual.variables) == {"a", "u", "v"} def test_operator_merge(self) -> None: coords1 = Coordinates({"x": ("x", [0, 1, 2])}) coords2 = Coordinates({"y": ("y", [3, 4, 5])}) expected = Dataset(coords={"x": [0, 1, 2], "y": [3, 4, 5]}) actual = coords1 | coords2 assert_identical(Dataset(coords=actual), expected) xarray-2025.12.0/xarray/tests/test_cupy.py000066400000000000000000000032211511464676000204020ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pandas as pd import pytest import xarray as xr cp = pytest.importorskip("cupy") @pytest.fixture def toy_weather_data(): """Construct the example DataSet from the Toy weather data example. https://docs.xarray.dev/en/stable/examples/weather-data.html Here we construct the DataSet exactly as shown in the example and then convert the numpy arrays to cupy. """ np.random.seed(123) times = pd.date_range("2000-01-01", "2001-12-31", name="time") annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28)) base = 10 + 15 * annual_cycle.reshape(-1, 1) tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3) tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3) ds = xr.Dataset( { "tmin": (("time", "location"), tmin_values), "tmax": (("time", "location"), tmax_values), }, {"time": times, "location": ["IA", "IN", "IL"]}, ) ds.tmax.data = cp.asarray(ds.tmax.data) ds.tmin.data = cp.asarray(ds.tmin.data) return ds def test_cupy_import() -> None: """Check the import worked.""" assert cp def test_check_data_stays_on_gpu(toy_weather_data) -> None: """Perform some operations and check the data stays on the GPU.""" freeze = (toy_weather_data["tmin"] <= 0).groupby("time.month").mean("time") assert isinstance(freeze.data, cp.ndarray) def test_where() -> None: from xarray.core.duck_array_ops import where data = cp.zeros(10) output = where(data < 1, 1, data).all() assert output assert isinstance(output, cp.ndarray) xarray-2025.12.0/xarray/tests/test_dask.py000066400000000000000000002013321511464676000203470ustar00rootroot00000000000000from __future__ import annotations import operator import pickle import sys from contextlib import suppress from textwrap import dedent from typing import Any import numpy as np import pandas as pd import pytest import xarray as xr import xarray.ufuncs as xu from xarray import DataArray, Dataset, Variable from xarray.core import duck_array_ops from xarray.core.duck_array_ops import lazy_array_equiv from xarray.core.indexes import PandasIndex from xarray.testing import assert_chunks_equal from xarray.tests import ( assert_allclose, assert_array_equal, assert_equal, assert_frame_equal, assert_identical, mock, raise_if_dask_computes, requires_pint, requires_scipy_or_netCDF4, ) from xarray.tests.test_backends import create_tmp_file dask = pytest.importorskip("dask") da = pytest.importorskip("dask.array") dd = pytest.importorskip("dask.dataframe") ON_WINDOWS = sys.platform == "win32" def test_raise_if_dask_computes(): data = da.from_array(np.random.default_rng(0).random((4, 6)), chunks=(2, 2)) with pytest.raises(RuntimeError, match=r"Too many computes"): with raise_if_dask_computes(): data.compute() class DaskTestCase: def assertLazyAnd(self, expected, actual, test): with dask.config.set(scheduler="synchronous"): test(actual, expected) if isinstance(actual, Dataset): for k, v in actual.variables.items(): if k in actual.xindexes: assert isinstance(v.data, np.ndarray) else: assert isinstance(v.data, da.Array) elif isinstance(actual, DataArray): assert isinstance(actual.data, da.Array) for k, v in actual.coords.items(): if k in actual.xindexes: assert isinstance(v.data, np.ndarray) else: assert isinstance(v.data, da.Array) elif isinstance(actual, Variable): assert isinstance(actual.data, da.Array) else: raise AssertionError() class TestVariable(DaskTestCase): def assertLazyAndIdentical(self, expected, actual): self.assertLazyAnd(expected, actual, assert_identical) def assertLazyAndAllClose(self, expected, actual): self.assertLazyAnd(expected, actual, assert_allclose) @pytest.fixture(autouse=True) def setUp(self): self.values = np.random.default_rng(0).random((4, 6)) self.data = da.from_array(self.values, chunks=(2, 2)) self.eager_var = Variable(("x", "y"), self.values) self.lazy_var = Variable(("x", "y"), self.data) def test_basics(self): v = self.lazy_var assert self.data is v.data assert self.data.chunks == v.chunks assert_array_equal(self.values, v) def test_copy(self): self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy()) self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy(deep=True)) def test_chunk(self): test_cases: list[tuple[int | dict[str, Any], tuple[tuple[int, ...], ...]]] = [ ({}, ((2, 2), (2, 2, 2))), (3, ((3, 1), (3, 3))), ({"x": 3, "y": 3}, ((3, 1), (3, 3))), ({"x": 3}, ((3, 1), (2, 2, 2))), ({"x": (3, 1)}, ((3, 1), (2, 2, 2))), ] for chunks, expected in test_cases: rechunked = self.lazy_var.chunk(chunks) assert rechunked.chunks == expected self.assertLazyAndIdentical(self.eager_var, rechunked) expected_chunksizes = dict(zip(self.lazy_var.dims, expected, strict=True)) assert rechunked.chunksizes == expected_chunksizes def test_indexing(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u[0], v[0]) self.assertLazyAndIdentical(u[:1], v[:1]) self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]]) @pytest.mark.parametrize( "expected_data, index", [ (da.array([99, 2, 3, 4]), 0), (da.array([99, 99, 99, 4]), slice(2, None, -1)), (da.array([99, 99, 3, 99]), [0, -1, 1]), (da.array([99, 99, 99, 4]), np.arange(3)), (da.array([1, 99, 99, 99]), [False, True, True, True]), (da.array([1, 99, 99, 99]), np.array([False, True, True, True])), (da.array([99, 99, 99, 99]), Variable(("x"), np.array([True] * 4))), ], ) def test_setitem_dask_array(self, expected_data, index): arr = Variable(("x"), da.array([1, 2, 3, 4])) expected = Variable(("x"), expected_data) with raise_if_dask_computes(): arr[index] = 99 assert_identical(arr, expected) def test_squeeze(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze()) def test_equals(self): v = self.lazy_var assert v.equals(v) assert isinstance(v.data, da.Array) assert v.identical(v) assert isinstance(v.data, da.Array) def test_transpose(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u.T, v.T) def test_shift(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2)) self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2)) assert v.data.chunks == v.shift(x=1).data.chunks def test_roll(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2)) assert v.data.chunks == v.roll(x=1).data.chunks def test_unary_op(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(-u, -v) self.assertLazyAndIdentical(abs(u), abs(v)) self.assertLazyAndIdentical(u.round(), v.round()) def test_binary_op(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(2 * u, 2 * v) self.assertLazyAndIdentical(u + u, v + v) self.assertLazyAndIdentical(u[0] + u, v[0] + v) def test_binary_op_bitshift(self) -> None: # bit shifts only work on ints so we need to generate # new eager and lazy vars rng = np.random.default_rng(0) values = rng.integers(low=-10000, high=10000, size=(4, 6)) data = da.from_array(values, chunks=(2, 2)) u = Variable(("x", "y"), values) v = Variable(("x", "y"), data) self.assertLazyAndIdentical(u << 2, v << 2) self.assertLazyAndIdentical(u << 5, v << 5) self.assertLazyAndIdentical(u >> 2, v >> 2) self.assertLazyAndIdentical(u >> 5, v >> 5) def test_repr(self): expected = dedent( f"""\ Size: 192B {self.lazy_var.data!r}""" ) assert expected == repr(self.lazy_var) def test_pickle(self): # Test that pickling/unpickling does not convert the dask # backend to numpy a1 = Variable(["x"], build_dask_array("x")) a1.compute() assert not a1._in_memory assert kernel_call_count == 1 a2 = pickle.loads(pickle.dumps(a1)) assert kernel_call_count == 1 assert_identical(a1, a2) assert not a1._in_memory assert not a2._in_memory def test_reduce(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(u.mean(), v.mean()) self.assertLazyAndAllClose(u.std(), v.std()) with raise_if_dask_computes(): actual = v.argmax(dim="x") self.assertLazyAndAllClose(u.argmax(dim="x"), actual) with raise_if_dask_computes(): actual = v.argmin(dim="x") self.assertLazyAndAllClose(u.argmin(dim="x"), actual) self.assertLazyAndAllClose((u > 1).any(), (v > 1).any()) self.assertLazyAndAllClose((u < 1).all("x"), (v < 1).all("x")) with pytest.raises(NotImplementedError, match=r"only works along an axis"): v.median() with pytest.raises(NotImplementedError, match=r"only works along an axis"): v.median(v.dims) with raise_if_dask_computes(): v.reduce(duck_array_ops.mean) def test_missing_values(self): values = np.array([0, 1, np.nan, 3]) data = da.from_array(values, chunks=(2,)) eager_var = Variable("x", values) lazy_var = Variable("x", data) self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var)) self.assertLazyAndIdentical(Variable("x", range(4)), lazy_var.fillna(2)) self.assertLazyAndIdentical(eager_var.count(), lazy_var.count()) def test_concat(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], "x")) self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], "x")) self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], "x")) self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], "x")) self.assertLazyAndIdentical( u[:3], Variable.concat([v[[0, 2]], v[[1]]], "x", positions=[[0, 2], [1]]) ) def test_missing_methods(self): v = self.lazy_var with pytest.raises(NotImplementedError, match="dask"): v.argsort() with pytest.raises(NotImplementedError, match="dask"): v[0].item() # type: ignore[attr-defined] def test_univariate_ufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.sin(u), np.sin(v)) def test_bivariate_ufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.maximum(u, 0), np.maximum(v, 0)) self.assertLazyAndAllClose(np.maximum(u, 0), np.maximum(0, v)) def test_univariate_xufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.sin(u), xu.sin(v)) def test_bivariate_xufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0)) self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v)) def test_compute(self): u = self.eager_var v = self.lazy_var assert dask.is_dask_collection(v) (v2,) = dask.compute(v + 1) assert not dask.is_dask_collection(v2) assert ((u + 1).data == v2.data).all() def test_persist(self): u = self.eager_var v = self.lazy_var + 1 (v2,) = dask.persist(v) assert v is not v2 assert len(v2.__dask_graph__()) < len(v.__dask_graph__()) # type: ignore[arg-type] assert v2.__dask_keys__() == v.__dask_keys__() assert dask.is_dask_collection(v) assert dask.is_dask_collection(v2) self.assertLazyAndAllClose(u + 1, v) self.assertLazyAndAllClose(u + 1, v2) @requires_pint def test_tokenize_duck_dask_array(self): import pint unit_registry: pint.UnitRegistry = pint.UnitRegistry() q = unit_registry.Quantity(self.data, "meter") variable = xr.Variable(("x", "y"), q) token = dask.base.tokenize(variable) post_op = variable + 5 * unit_registry.meter assert dask.base.tokenize(variable) != dask.base.tokenize(post_op) # Immutability check assert dask.base.tokenize(variable) == token class TestDataArrayAndDataset(DaskTestCase): def assertLazyAndIdentical(self, expected, actual): self.assertLazyAnd(expected, actual, assert_identical) def assertLazyAndAllClose(self, expected, actual): self.assertLazyAnd(expected, actual, assert_allclose) def assertLazyAndEqual(self, expected, actual): self.assertLazyAnd(expected, actual, assert_equal) @pytest.fixture(autouse=True) def setUp(self): self.values = np.random.randn(4, 6) self.data = da.from_array(self.values, chunks=(2, 2)) self.eager_array = DataArray( self.values, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) self.lazy_array = DataArray( self.data, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) def test_chunk(self) -> None: test_cases: list[ tuple[int | str | dict[str, Any], tuple[tuple[int, ...], ...]] ] = [ ({}, ((2, 2), (2, 2, 2))), (3, ((3, 1), (3, 3))), ({"x": 3, "y": 3}, ((3, 1), (3, 3))), ({"x": 3}, ((3, 1), (2, 2, 2))), ({"x": (3, 1)}, ((3, 1), (2, 2, 2))), ({"x": "16B"}, ((1, 1, 1, 1), (2, 2, 2))), ("16B", ((1, 1, 1, 1), (1,) * 6)), ("16MB", ((4,), (6,))), ] for chunks, expected in test_cases: # Test DataArray rechunked = self.lazy_array.chunk(chunks) assert rechunked.chunks == expected self.assertLazyAndIdentical(self.eager_array, rechunked) expected_chunksizes = dict(zip(self.lazy_array.dims, expected, strict=True)) assert rechunked.chunksizes == expected_chunksizes # Test Dataset lazy_dataset = self.lazy_array.to_dataset() eager_dataset = self.eager_array.to_dataset() expected_chunksizes = dict(zip(lazy_dataset.dims, expected, strict=True)) rechunked = lazy_dataset.chunk(chunks) # type: ignore[assignment] # Dataset.chunks has a different return type to DataArray.chunks - see issue #5843 assert rechunked.chunks == expected_chunksizes self.assertLazyAndIdentical(eager_dataset, rechunked) assert rechunked.chunksizes == expected_chunksizes def test_rechunk(self): chunked = self.eager_array.chunk({"x": 2}).chunk({"y": 2}) assert chunked.chunks == ((2,) * 2, (2,) * 3) self.assertLazyAndIdentical(self.lazy_array, chunked) def test_new_chunk(self): chunked = self.eager_array.chunk() assert chunked.data.name.startswith("xarray-") def test_lazy_dataset(self): lazy_ds = Dataset({"foo": (("x", "y"), self.data)}) assert isinstance(lazy_ds.foo.variable.data, da.Array) def test_lazy_array(self): u = self.eager_array v = self.lazy_array self.assertLazyAndAllClose(u, v) self.assertLazyAndAllClose(-u, -v) self.assertLazyAndAllClose(u.T, v.T) self.assertLazyAndAllClose(u.mean(), v.mean()) self.assertLazyAndAllClose(1 + u, 1 + v) actual = xr.concat([v[:2], v[2:]], "x") self.assertLazyAndAllClose(u, actual) def test_compute(self): u = self.eager_array v = self.lazy_array assert dask.is_dask_collection(v) (v2,) = dask.compute(v + 1) assert not dask.is_dask_collection(v2) assert ((u + 1).data == v2.data).all() def test_persist(self): u = self.eager_array v = self.lazy_array + 1 (v2,) = dask.persist(v) assert v is not v2 assert len(v2.__dask_graph__()) < len(v.__dask_graph__()) assert v2.__dask_keys__() == v.__dask_keys__() assert dask.is_dask_collection(v) assert dask.is_dask_collection(v2) self.assertLazyAndAllClose(u + 1, v) self.assertLazyAndAllClose(u + 1, v2) def test_concat_loads_variables(self): # Test that concat() computes not-in-memory variables at most once # and loads them in the output, while leaving the input unaltered. d1 = build_dask_array("d1") c1 = build_dask_array("c1") d2 = build_dask_array("d2") c2 = build_dask_array("c2") d3 = build_dask_array("d3") c3 = build_dask_array("c3") # Note: c is a non-index coord. # Index coords are loaded by IndexVariable.__init__. ds1 = Dataset(data_vars={"d": ("x", d1)}, coords={"c": ("x", c1)}) ds2 = Dataset(data_vars={"d": ("x", d2)}, coords={"c": ("x", c2)}) ds3 = Dataset(data_vars={"d": ("x", d3)}, coords={"c": ("x", c3)}) assert kernel_call_count == 0 out = xr.concat( [ds1, ds2, ds3], dim="n", data_vars="different", coords="different", compat="equals", ) # each kernel is computed exactly once assert kernel_call_count == 6 # variables are loaded in the output assert isinstance(out["d"].data, np.ndarray) assert isinstance(out["c"].data, np.ndarray) out = xr.concat([ds1, ds2, ds3], dim="n", data_vars="all", coords="all") # no extra kernel calls assert kernel_call_count == 6 assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=["d"], coords=["c"]) # no extra kernel calls assert kernel_call_count == 6 assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=[], coords=[]) # variables are loaded once as we are validating that they're identical assert kernel_call_count == 12 assert isinstance(out["d"].data, np.ndarray) assert isinstance(out["c"].data, np.ndarray) out = xr.concat( [ds1, ds2, ds3], dim="n", data_vars="different", coords="different", compat="identical", ) # compat=identical doesn't do any more kernel calls than compat=equals assert kernel_call_count == 18 assert isinstance(out["d"].data, np.ndarray) assert isinstance(out["c"].data, np.ndarray) # When the test for different turns true halfway through, # stop computing variables as it would not have any benefit ds4 = Dataset(data_vars={"d": ("x", [2.0])}, coords={"c": ("x", [2.0])}) out = xr.concat( [ds1, ds2, ds4, ds3], dim="n", data_vars="different", coords="different", compat="equals", ) # the variables of ds1 and ds2 were computed, but those of ds3 didn't assert kernel_call_count == 22 assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) # the data of ds1 and ds2 was loaded into numpy and then # concatenated to the data of ds3. Thus, only ds3 is computed now. out.compute() assert kernel_call_count == 24 # Finally, test that originals are unaltered assert ds1["d"].data is d1 assert ds1["c"].data is c1 assert ds2["d"].data is d2 assert ds2["c"].data is c2 assert ds3["d"].data is d3 assert ds3["c"].data is c3 # now check that concat() is correctly using dask name equality to skip loads out = xr.concat( [ds1, ds1, ds1], dim="n", data_vars="different", coords="different", compat="equals", ) assert kernel_call_count == 24 # variables are not loaded in the output assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat( [ds1, ds1, ds1], dim="n", data_vars=[], coords=[], compat="identical" ) assert kernel_call_count == 24 # variables are not loaded in the output assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat( [ds1, ds2.compute(), ds3], dim="n", data_vars="all", coords="different", compat="identical", ) # c1,c3 must be computed for comparison since c2 is numpy; # d2 is computed too assert kernel_call_count == 28 out = xr.concat( [ds1, ds2.compute(), ds3], dim="n", data_vars="all", coords="all", compat="identical", ) # no extra computes assert kernel_call_count == 30 # Finally, test that originals are unaltered assert ds1["d"].data is d1 assert ds1["c"].data is c1 assert ds2["d"].data is d2 assert ds2["c"].data is c2 assert ds3["d"].data is d3 assert ds3["c"].data is c3 def test_groupby(self): u = self.eager_array v = self.lazy_array expected = u.groupby("x").mean(...) with raise_if_dask_computes(): actual = v.groupby("x").mean(...) self.assertLazyAndAllClose(expected, actual) def test_rolling(self): u = self.eager_array v = self.lazy_array expected = u.rolling(x=2).mean() with raise_if_dask_computes(): actual = v.rolling(x=2).mean() self.assertLazyAndAllClose(expected, actual) @pytest.mark.parametrize("func", ["first", "last"]) def test_groupby_first_last(self, func): method = operator.methodcaller(func) u = self.eager_array v = self.lazy_array for coords in [u.coords, v.coords]: coords["ab"] = ("x", ["a", "a", "b", "b"]) expected = method(u.groupby("ab")) with raise_if_dask_computes(): actual = method(v.groupby("ab")) self.assertLazyAndAllClose(expected, actual) with raise_if_dask_computes(): actual = method(v.groupby("ab")) self.assertLazyAndAllClose(expected, actual) def test_reindex(self): u = self.eager_array.assign_coords(y=range(6)) v = self.lazy_array.assign_coords(y=range(6)) kwargs_list: list[dict[str, Any]] = [ {"x": [2, 3, 4]}, {"x": [1, 100, 2, 101, 3]}, {"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]}, ] for kwargs in kwargs_list: expected = u.reindex(**kwargs) actual = v.reindex(**kwargs) self.assertLazyAndAllClose(expected, actual) def test_to_dataset_roundtrip(self): u = self.eager_array v = self.lazy_array expected = u.assign_coords(x=u["x"]) self.assertLazyAndEqual(expected, v.to_dataset("x").to_dataarray("x")) def test_merge(self): def duplicate_and_merge(array): return xr.merge([array, array.rename("bar")]).to_dataarray() expected = duplicate_and_merge(self.eager_array) actual = duplicate_and_merge(self.lazy_array) self.assertLazyAndEqual(expected, actual) def test_ufuncs(self): u = self.eager_array v = self.lazy_array self.assertLazyAndAllClose(np.sin(u), np.sin(v)) def test_where_dispatching(self): a = np.arange(10) b = a > 3 x = da.from_array(a, 5) y = da.from_array(b, 5) expected = DataArray(a).where(b) self.assertLazyAndEqual(expected, DataArray(a).where(y)) self.assertLazyAndEqual(expected, DataArray(x).where(b)) self.assertLazyAndEqual(expected, DataArray(x).where(y)) def test_simultaneous_compute(self): ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk() count = [0] def counting_get(*args, **kwargs): count[0] += 1 return dask.get(*args, **kwargs) ds.load(scheduler=counting_get) assert count[0] == 1 def test_duplicate_dims(self): data = np.random.normal(size=(4, 4)) with pytest.warns(UserWarning, match="Duplicate dimension"): arr = DataArray(data, dims=("x", "x")) with pytest.warns(UserWarning, match="Duplicate dimension"): chunked_array = arr.chunk({"x": 2}) assert chunked_array.chunks == ((2, 2), (2, 2)) assert chunked_array.chunksizes == {"x": (2, 2)} def test_stack(self): data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4)) arr = DataArray(data, dims=("w", "x", "y")) stacked = arr.stack(z=("x", "y")) z = pd.MultiIndex.from_product( [list(range(3)), list(range(4))], names=["x", "y"] ) expected = DataArray(data.reshape(2, -1), {"z": z}, dims=["w", "z"]) assert stacked.data.chunks == expected.data.chunks self.assertLazyAndEqual(expected, stacked) def test_dot(self): eager = self.eager_array.dot(self.eager_array[0]) lazy = self.lazy_array.dot(self.lazy_array[0]) self.assertLazyAndAllClose(eager, lazy) def test_dataarray_repr(self): data = build_dask_array("data") nonindex_coord = build_dask_array("coord") a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) expected = dedent( f"""\ Size: 8B {data!r} Coordinates: y (x) int64 8B dask.array Dimensions without coordinates: x""" ) assert expected == repr(a) assert kernel_call_count == 0 # should not evaluate dask array def test_dataset_repr(self): data = build_dask_array("data") nonindex_coord = build_dask_array("coord") ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) expected = dedent( """\ Size: 16B Dimensions: (x: 1) Coordinates: y (x) int64 8B dask.array Dimensions without coordinates: x Data variables: a (x) int64 8B dask.array""" ) assert expected == repr(ds) assert kernel_call_count == 0 # should not evaluate dask array def test_dataarray_pickle(self): # Test that pickling/unpickling converts the dask backend # to numpy in neither the data variable nor the non-index coords data = build_dask_array("data") nonindex_coord = build_dask_array("coord") a1 = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) a1.compute() assert not a1._in_memory assert not a1.coords["y"]._in_memory assert kernel_call_count == 2 a2 = pickle.loads(pickle.dumps(a1)) assert kernel_call_count == 2 assert_identical(a1, a2) assert not a1._in_memory assert not a2._in_memory assert not a1.coords["y"]._in_memory assert not a2.coords["y"]._in_memory def test_dataset_pickle(self): # Test that pickling/unpickling converts the dask backend # to numpy in neither the data variables nor the non-index coords data = build_dask_array("data") nonindex_coord = build_dask_array("coord") ds1 = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) ds1.compute() assert not ds1["a"]._in_memory assert not ds1["y"]._in_memory assert kernel_call_count == 2 ds2 = pickle.loads(pickle.dumps(ds1)) assert kernel_call_count == 2 assert_identical(ds1, ds2) assert not ds1["a"]._in_memory assert not ds2["a"]._in_memory assert not ds1["y"]._in_memory assert not ds2["y"]._in_memory def test_dataarray_getattr(self): # ipython/jupyter does a long list of getattr() calls to when trying to # represent an object. # Make sure we're not accidentally computing dask variables. data = build_dask_array("data") nonindex_coord = build_dask_array("coord") a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) with suppress(AttributeError): _ = a.NOTEXIST assert kernel_call_count == 0 def test_dataset_getattr(self): # Test that pickling/unpickling converts the dask backend # to numpy in neither the data variables nor the non-index coords data = build_dask_array("data") nonindex_coord = build_dask_array("coord") ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) with suppress(AttributeError): _ = ds.NOTEXIST assert kernel_call_count == 0 def test_values(self): # Test that invoking the values property does not convert the dask # backend to numpy a = DataArray([1, 2]).chunk() assert not a._in_memory assert a.values.tolist() == [1, 2] assert not a._in_memory def test_from_dask_variable(self): # Test array creation from Variable with dask backend. # This is used e.g. in broadcast() a = DataArray(self.lazy_array.variable, coords={"x": range(4)}, name="foo") self.assertLazyAndIdentical(self.lazy_array, a) @requires_pint def test_tokenize_duck_dask_array(self): import pint unit_registry: pint.UnitRegistry = pint.UnitRegistry() q = unit_registry.Quantity(self.data, unit_registry.meter) data_array = xr.DataArray( data=q, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) token = dask.base.tokenize(data_array) post_op = data_array + 5 * unit_registry.meter assert dask.base.tokenize(data_array) != dask.base.tokenize(post_op) # Immutability check assert dask.base.tokenize(data_array) == token class TestToDaskDataFrame: @pytest.mark.xfail(reason="https://github.com/dask/dask/issues/11584") def test_to_dask_dataframe(self): # Test conversion of Datasets to dask DataFrames x = np.random.randn(10) y = np.arange(10, dtype="uint8") t = list("abcdefghij") ds = Dataset( {"a": ("t", da.from_array(x, chunks=4)), "b": ("t", y), "t": ("t", t)} ) expected_pd = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t")) # test if 1-D index is correctly set up expected = dd.from_pandas(expected_pd, chunksize=4) actual = ds.to_dask_dataframe(set_index=True) # test if we have dask dataframes assert isinstance(actual, dd.DataFrame) # use the .equals from pandas to check dataframes are equivalent assert_frame_equal(actual.compute(), expected.compute()) # test if no index is given expected = dd.from_pandas(expected_pd.reset_index(drop=False), chunksize=4) actual = ds.to_dask_dataframe(set_index=False) assert isinstance(actual, dd.DataFrame) assert_frame_equal(actual.compute(), expected.compute()) @pytest.mark.xfail( reason="Currently pandas with pyarrow installed will return a `string[pyarrow]` type, " "which causes the `y` column to have a different type depending on whether pyarrow is installed" ) def test_to_dask_dataframe_2D(self): # Test if 2-D dataset is supplied w = np.random.randn(2, 3) ds = Dataset({"w": (("x", "y"), da.from_array(w, chunks=(1, 2)))}) ds["x"] = ("x", np.array([0, 1], np.int64)) ds["y"] = ("y", list("abc")) # dask dataframes do not (yet) support multiindex, # but when it does, this would be the expected index: exp_index = pd.MultiIndex.from_arrays( [[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"] ) expected = pd.DataFrame({"w": w.reshape(-1)}, index=exp_index) # so for now, reset the index expected = expected.reset_index(drop=False) actual = ds.to_dask_dataframe(set_index=False) assert isinstance(actual, dd.DataFrame) assert_frame_equal(actual.compute(), expected) @pytest.mark.xfail(raises=NotImplementedError) def test_to_dask_dataframe_2D_set_index(self): # This will fail until dask implements MultiIndex support w = da.from_array(np.random.randn(2, 3), chunks=(1, 2)) ds = Dataset({"w": (("x", "y"), w)}) ds["x"] = ("x", np.array([0, 1], np.int64)) ds["y"] = ("y", list("abc")) expected = ds.compute().to_dataframe() actual = ds.to_dask_dataframe(set_index=True) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) def test_to_dask_dataframe_coordinates(self): # Test if coordinate is also a dask array x = np.random.randn(10) t = np.arange(10) * 2 ds = Dataset( { "a": ("t", da.from_array(x, chunks=4)), "t": ("t", da.from_array(t, chunks=4)), } ) expected_pd = pd.DataFrame({"a": x}, index=pd.Index(t, name="t")) expected = dd.from_pandas(expected_pd, chunksize=4) actual = ds.to_dask_dataframe(set_index=True) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected.compute(), actual.compute()) @pytest.mark.xfail( reason="Currently pandas with pyarrow installed will return a `string[pyarrow]` type, " "which causes the index to have a different type depending on whether pyarrow is installed" ) def test_to_dask_dataframe_not_daskarray(self): # Test if DataArray is not a dask array x = np.random.randn(10) y = np.arange(10, dtype="uint8") t = list("abcdefghij") ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)}) expected = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t")) actual = ds.to_dask_dataframe(set_index=True) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) def test_to_dask_dataframe_no_coordinate(self): x = da.from_array(np.random.randn(10), chunks=4) ds = Dataset({"x": ("dim_0", x)}) expected = ds.compute().to_dataframe().reset_index() actual = ds.to_dask_dataframe() assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) expected = ds.compute().to_dataframe() actual = ds.to_dask_dataframe(set_index=True) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) def test_to_dask_dataframe_dim_order(self): values = np.array([[1, 2], [3, 4]], dtype=np.int64) ds = Dataset({"w": (("x", "y"), values)}).chunk(1) expected = ds["w"].to_series().reset_index() actual = ds.to_dask_dataframe(dim_order=["x", "y"]) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) expected = ds["w"].T.to_series().reset_index() actual = ds.to_dask_dataframe(dim_order=["y", "x"]) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) with pytest.raises(ValueError, match=r"does not match the set of dimensions"): ds.to_dask_dataframe(dim_order=["x"]) @pytest.mark.parametrize("method", ["load", "compute"]) def test_dask_kwargs_variable(method): chunked_array = da.from_array(np.arange(3), chunks=(2,)) x = Variable("y", chunked_array) # args should be passed on to dask.compute() (via DaskManager.compute()) with mock.patch.object(da, "compute", return_value=(np.arange(3),)) as mock_compute: getattr(x, method)(foo="bar") mock_compute.assert_called_with(chunked_array, foo="bar") @pytest.mark.parametrize("method", ["load", "compute", "persist"]) def test_dask_kwargs_dataarray(method): data = da.from_array(np.arange(3), chunks=(2,)) x = DataArray(data) if method in ["load", "compute"]: dask_func = "dask.array.compute" else: dask_func = "dask.persist" # args should be passed on to "dask_func" with mock.patch(dask_func) as mock_func: getattr(x, method)(foo="bar") mock_func.assert_called_with(data, foo="bar") @pytest.mark.parametrize("method", ["load", "compute", "persist"]) def test_dask_kwargs_dataset(method): data = da.from_array(np.arange(3), chunks=(2,)) x = Dataset({"x": (("y"), data)}) if method in ["load", "compute"]: dask_func = "dask.array.compute" else: dask_func = "dask.persist" # args should be passed on to "dask_func" with mock.patch(dask_func) as mock_func: getattr(x, method)(foo="bar") mock_func.assert_called_with(data, foo="bar") kernel_call_count = 0 def kernel(name): """Dask kernel to test pickling/unpickling and __repr__. Must be global to make it pickleable. """ global kernel_call_count kernel_call_count += 1 return np.ones(1, dtype=np.int64) def build_dask_array(name): global kernel_call_count kernel_call_count = 0 return dask.array.Array( dask={(name, 0): (kernel, name)}, name=name, chunks=((1,),), dtype=np.int64 ) @pytest.mark.parametrize( "persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]] ) def test_persist_Dataset(persist): ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk() ds = ds + 1 n = len(ds.foo.data.dask) ds2 = persist(ds) assert len(ds2.foo.data.dask) == 1 assert len(ds.foo.data.dask) == n # doesn't mutate in place @pytest.mark.parametrize( "persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]] ) def test_persist_DataArray(persist): x = da.arange(10, chunks=(5,)) y = DataArray(x) z = y + 1 n = len(z.data.dask) zz = persist(z) assert len(z.data.dask) == n assert len(zz.data.dask) == zz.data.npartitions def test_dataarray_with_dask_coords(): import toolz x = xr.Variable("x", da.arange(8, chunks=(4,))) y = xr.Variable("y", da.arange(8, chunks=(4,)) * 2) data = da.random.random((8, 8), chunks=(4, 4)) + 1 array = xr.DataArray(data, dims=["x", "y"]) array.coords["xx"] = x array.coords["yy"] = y assert dict(array.__dask_graph__()) == toolz.merge( data.__dask_graph__(), x.__dask_graph__(), y.__dask_graph__() ) (array2,) = dask.compute(array) assert not dask.is_dask_collection(array2) assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values()) def test_basic_compute(): ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk({"x": 2}) for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]: with dask.config.set(scheduler=get): ds.compute() ds.foo.compute() ds.foo.variable.compute() def test_dataset_as_delayed(): ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk() assert dask.delayed(ds).compute() == ds.compute() def make_da(): da = xr.DataArray( np.ones((10, 20)), dims=["x", "y"], coords={"x": np.arange(10), "y": np.arange(100, 120)}, name="a", ).chunk({"x": 4, "y": 5}) da.x.attrs["long_name"] = "x" da.attrs["test"] = "test" da.coords["c2"] = 0.5 da.coords["ndcoord"] = da.x * 2 da.coords["cxy"] = (da.x * da.y).chunk({"x": 4, "y": 5}) return da def make_ds(): map_ds = xr.Dataset() map_ds["a"] = make_da() map_ds["b"] = map_ds.a + 50 map_ds["c"] = map_ds.x + 20 map_ds = map_ds.chunk({"x": 4, "y": 5}) map_ds["d"] = ("z", [1, 1, 1, 1]) map_ds["z"] = [0, 1, 2, 3] map_ds["e"] = map_ds.x + map_ds.y map_ds.coords["c1"] = 0.5 map_ds.coords["cx"] = ("x", np.arange(len(map_ds.x))) map_ds.coords["cx"].attrs["test2"] = "test2" map_ds.attrs["test"] = "test" map_ds.coords["xx"] = map_ds["a"] * map_ds.y map_ds.x.attrs["long_name"] = "x" map_ds.y.attrs["long_name"] = "y" return map_ds # fixtures cannot be used in parametrize statements # instead use this workaround # https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly @pytest.fixture def map_da(): return make_da() @pytest.fixture def map_ds(): return make_ds() def test_unify_chunks(map_ds): ds_copy = map_ds.copy() ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10}) with pytest.raises(ValueError, match=r"inconsistent chunks"): _ = ds_copy.chunks expected_chunks = {"x": (4, 4, 2), "y": (5, 5, 5, 5)} with raise_if_dask_computes(): actual_chunks = ds_copy.unify_chunks().chunks assert actual_chunks == expected_chunks assert_identical(map_ds, ds_copy.unify_chunks()) out_a, out_b = xr.unify_chunks(ds_copy.cxy, ds_copy.drop_vars("cxy")) assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5)) assert out_b.chunks == expected_chunks # Test unordered dims da = ds_copy["cxy"] out_a, out_b = xr.unify_chunks(da.chunk({"x": -1}), da.T.chunk({"y": -1})) assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5)) assert out_b.chunks == ((5, 5, 5, 5), (4, 4, 2)) # Test mismatch with pytest.raises(ValueError, match=r"Dimension 'x' size mismatch: 10 != 2"): xr.unify_chunks(da, da.isel(x=slice(2))) @pytest.mark.parametrize("obj", [make_ds(), make_da()]) @pytest.mark.parametrize( "transform", [lambda x: x.compute(), lambda x: x.unify_chunks()] ) def test_unify_chunks_shallow_copy(obj, transform): obj = transform(obj) unified = obj.unify_chunks() assert_identical(obj, unified) # assert obj is not unified @pytest.mark.parametrize("obj", [make_da()]) def test_auto_chunk_da(obj): actual = obj.chunk("auto").data expected = obj.data.rechunk("auto") np.testing.assert_array_equal(actual, expected) assert actual.chunks == expected.chunks def test_auto_chunk_da_cftime(): yrs = np.arange(2000, 2120) cftime_dates = xr.date_range( start=f"{yrs[0]}-01-01", end=f"{yrs[-1]}-12-31", freq="1YE", use_cftime=True ) yr_array = np.tile(cftime_dates.values, (10, 1)) da = xr.DataArray( yr_array, dims=["x", "t"], coords={"x": np.arange(10), "t": cftime_dates} ).chunk({"x": 4, "t": 5}) actual = da.chunk("auto").data expected = da.data.rechunk({0: 10, 1: 120}) np.testing.assert_array_equal(actual, expected) assert actual.chunks == expected.chunks def test_map_blocks_error(map_da, map_ds): def bad_func(darray): return (darray * darray.x + 5 * darray.y)[:1, :1] with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"): xr.map_blocks(bad_func, map_da).compute() def returns_numpy(darray): return (darray * darray.x + 5 * darray.y).values with pytest.raises(TypeError, match=r"Function must return an xarray DataArray"): xr.map_blocks(returns_numpy, map_da) with pytest.raises(TypeError, match=r"args must be"): xr.map_blocks(operator.add, map_da, args=10) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"kwargs must be"): xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20]) # type: ignore[arg-type] def really_bad_func(darray): raise ValueError("couldn't do anything.") with pytest.raises(Exception, match=r"Cannot infer"): xr.map_blocks(really_bad_func, map_da) ds_copy = map_ds.copy() ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10}) with pytest.raises(ValueError, match=r"inconsistent chunks"): xr.map_blocks(bad_func, ds_copy) with pytest.raises(TypeError, match=r"Cannot pass dask collections"): xr.map_blocks(bad_func, map_da, kwargs=dict(a=map_da.chunk())) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks(obj): def func(obj): result = obj + obj.x + 5 * obj.y return result with raise_if_dask_computes(): actual = xr.map_blocks(func, obj) expected = func(obj) assert_chunks_equal(expected.chunk(), actual) assert_identical(actual, expected) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_mixed_type_inputs(obj): def func(obj1, non_xarray_input, obj2): result = obj1 + obj1.x + 5 * obj1.y return result with raise_if_dask_computes(): actual = xr.map_blocks(func, obj, args=["non_xarray_input", obj]) expected = func(obj, "non_xarray_input", obj) assert_chunks_equal(expected.chunk(), actual) assert_identical(actual, expected) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_convert_args_to_list(obj): expected = obj + 10 with raise_if_dask_computes(): actual = xr.map_blocks(operator.add, obj, [10]) assert_chunks_equal(expected.chunk(), actual) assert_identical(actual, expected) def test_map_blocks_dask_args(): da1 = xr.DataArray( np.ones((10, 20)), dims=["x", "y"], coords={"x": np.arange(10), "y": np.arange(20)}, ).chunk({"x": 5, "y": 4}) # check that block shapes are the same def sumda(da1, da2): assert da1.shape == da2.shape return da1 + da2 da2 = da1 + 1 with raise_if_dask_computes(): mapped = xr.map_blocks(sumda, da1, args=[da2]) xr.testing.assert_equal(da1 + da2, mapped) # one dimension in common da2 = (da1 + 1).isel(x=1, drop=True) with raise_if_dask_computes(): mapped = xr.map_blocks(operator.add, da1, args=[da2]) xr.testing.assert_equal(da1 + da2, mapped) # test that everything works when dimension names are different da2 = (da1 + 1).isel(x=1, drop=True).rename({"y": "k"}) with raise_if_dask_computes(): mapped = xr.map_blocks(operator.add, da1, args=[da2]) xr.testing.assert_equal(da1 + da2, mapped) with pytest.raises(ValueError, match=r"Chunk sizes along dimension 'x'"): xr.map_blocks(operator.add, da1, args=[da1.chunk({"x": 1})]) with pytest.raises(ValueError, match=r"cannot align.*index.*are not equal"): xr.map_blocks(operator.add, da1, args=[da1.reindex(x=np.arange(20))]) # reduction da1 = da1.chunk({"x": -1}) da2 = da1 + 1 with raise_if_dask_computes(): mapped = xr.map_blocks(lambda a, b: (a + b).sum("x"), da1, args=[da2]) xr.testing.assert_equal((da1 + da2).sum("x"), mapped) # reduction with template da1 = da1.chunk({"x": -1}) da2 = da1 + 1 with raise_if_dask_computes(): mapped = xr.map_blocks( lambda a, b: (a + b).sum("x"), da1, args=[da2], template=da1.sum("x") ) xr.testing.assert_equal((da1 + da2).sum("x"), mapped) # bad template: not chunked with pytest.raises(ValueError, match="Provided template has no dask arrays"): xr.map_blocks( lambda a, b: (a + b).sum("x"), da1, args=[da2], template=da1.sum("x").compute(), ) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_add_attrs(obj): def add_attrs(obj): obj = obj.copy(deep=True) obj.attrs["new"] = "new" obj.cxy.attrs["new2"] = "new2" return obj expected = add_attrs(obj) with raise_if_dask_computes(): actual = xr.map_blocks(add_attrs, obj) assert_identical(actual, expected) # when template is specified, attrs are copied from template, not set by function with raise_if_dask_computes(): actual = xr.map_blocks(add_attrs, obj, template=obj) assert_identical(actual, obj) def test_map_blocks_change_name(map_da): def change_name(obj): obj = obj.copy(deep=True) obj.name = "new" return obj expected = change_name(map_da) with raise_if_dask_computes(): actual = xr.map_blocks(change_name, map_da) assert_identical(actual, expected) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_kwargs(obj): expected = xr.full_like(obj, fill_value=np.nan) with raise_if_dask_computes(): actual = xr.map_blocks(xr.full_like, obj, kwargs=dict(fill_value=np.nan)) assert_chunks_equal(expected.chunk(), actual) assert_identical(actual, expected) def test_map_blocks_to_dataarray(map_ds): with raise_if_dask_computes(): actual = xr.map_blocks(lambda x: x.to_dataarray(), map_ds) # to_dataarray does not preserve name, so cannot use assert_identical assert_equal(actual, map_ds.to_dataarray()) @pytest.mark.parametrize( "func", [ lambda x: x, lambda x: x.to_dataset(), lambda x: x.drop_vars("x"), lambda x: x.expand_dims(k=[1, 2, 3]), lambda x: x.expand_dims(k=3), lambda x: x.assign_coords(new_coord=("y", x.y.data * 2)), lambda x: x.astype(np.int32), lambda x: x.x, ], ) def test_map_blocks_da_transformations(func, map_da): with raise_if_dask_computes(): actual = xr.map_blocks(func, map_da) assert_identical(actual, func(map_da)) @pytest.mark.parametrize( "func", [ lambda x: x, lambda x: x.drop_vars("cxy"), lambda x: x.drop_vars("a"), lambda x: x.drop_vars("x"), lambda x: x.expand_dims(k=[1, 2, 3]), lambda x: x.expand_dims(k=3), lambda x: x.rename({"a": "new1", "b": "new2"}), lambda x: x.x, ], ) def test_map_blocks_ds_transformations(func, map_ds): with raise_if_dask_computes(): actual = xr.map_blocks(func, map_ds) assert_identical(actual, func(map_ds)) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_da_ds_with_template(obj): func = lambda x: x.isel(x=[1]) # a simple .isel(x=[1, 5, 9]) puts all those in a single chunk. template = xr.concat([obj.isel(x=[i]) for i in [1, 5, 9]], data_vars=None, dim="x") with raise_if_dask_computes(): actual = xr.map_blocks(func, obj, template=template) assert_identical(actual, template) # Check that indexes are written into the graph directly dsk = dict(actual.__dask_graph__()) assert {k for k in dsk if "x-coordinate" in k} assert all( isinstance(v, PandasIndex) for k, v in dsk.items() if "x-coordinate" in k ) with raise_if_dask_computes(): actual = obj.map_blocks(func, template=template) assert_identical(actual, template) def test_map_blocks_roundtrip_string_index(): ds = xr.Dataset( {"data": (["label"], [1, 2, 3])}, coords={"label": ["foo", "bar", "baz"]} ).chunk(label=1) assert ds.label.dtype == np.dtype("=U3") mapped = ds.map_blocks(lambda x: x, template=ds) assert mapped.label.dtype == ds.label.dtype mapped = ds.map_blocks(lambda x: x, template=None) assert mapped.label.dtype == ds.label.dtype mapped = ds.data.map_blocks(lambda x: x, template=ds.data) assert mapped.label.dtype == ds.label.dtype mapped = ds.data.map_blocks(lambda x: x, template=None) assert mapped.label.dtype == ds.label.dtype def test_map_blocks_template_convert_object(): da = make_da() ds = da.to_dataset() func = lambda x: x.to_dataset().isel(x=[1]) template = xr.concat([da.to_dataset().isel(x=[i]) for i in [1, 5, 9]], dim="x") with raise_if_dask_computes(): actual = xr.map_blocks(func, da, template=template) assert_identical(actual, template) func = lambda x: x.to_dataarray().isel(x=[1]) template = xr.concat([ds.to_dataarray().isel(x=[i]) for i in [1, 5, 9]], dim="x") with raise_if_dask_computes(): actual = xr.map_blocks(func, ds, template=template) assert_identical(actual, template) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_errors_bad_template(obj): with pytest.raises(ValueError, match=r"unexpected coordinate variables"): xr.map_blocks(lambda x: x.assign_coords(a=10), obj, template=obj).compute() with pytest.raises(ValueError, match=r"does not contain coordinate variables"): xr.map_blocks(lambda x: x.drop_vars("cxy"), obj, template=obj).compute() with pytest.raises(ValueError, match=r"Dimensions {'x'} missing"): xr.map_blocks(lambda x: x.isel(x=1), obj, template=obj).compute() with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"): xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=obj).compute() with pytest.raises(TypeError, match=r"must be a DataArray"): xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=(obj,)).compute() # type: ignore[arg-type] with pytest.raises(ValueError, match=r"map_blocks requires that one block"): xr.map_blocks( lambda x: x.isel(x=[1]).assign_coords(x=10), obj, template=obj.isel(x=[1]) ).compute() with pytest.raises(ValueError, match=r"Expected index 'x' to be"): xr.map_blocks( lambda a: a.isel(x=[1]).assign_coords(x=[120]), # assign bad index values obj, template=xr.concat( [obj.isel(x=[i]) for i in [1, 5, 9]], data_vars=None, dim="x" ), ).compute() def test_map_blocks_errors_bad_template_2(map_ds): with pytest.raises(ValueError, match=r"unexpected data variables {'xyz'}"): xr.map_blocks(lambda x: x.assign(xyz=1), map_ds, template=map_ds).compute() @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_object_method(obj): def func(obj): result = obj + obj.x + 5 * obj.y return result with raise_if_dask_computes(): expected = xr.map_blocks(func, obj) actual = obj.map_blocks(func) assert_identical(expected, actual) def test_map_blocks_hlg_layers(): # regression test for #3599 ds = xr.Dataset( { "x": (("a",), dask.array.ones(10, chunks=(5,))), "z": (("b",), dask.array.ones(10, chunks=(5,))), } ) mapped = ds.map_blocks(lambda x: x) xr.testing.assert_equal(mapped, ds) def test_make_meta(map_ds): from xarray.core.parallel import make_meta meta = make_meta(map_ds) for variable in map_ds._coord_names: assert variable in meta._coord_names assert meta.coords[variable].shape == (0,) * meta.coords[variable].ndim for variable in map_ds.data_vars: assert variable in meta.data_vars assert meta.data_vars[variable].shape == (0,) * meta.data_vars[variable].ndim def test_identical_coords_no_computes(): lons2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x")) a = xr.DataArray( da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2} ) b = xr.DataArray( da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2} ) with raise_if_dask_computes(): c = a + b assert_identical(c, a) @pytest.mark.parametrize( "obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()] ) @pytest.mark.parametrize( "transform", [ lambda x: x.reset_coords(), lambda x: x.reset_coords(drop=True), lambda x: x.isel(x=1), lambda x: x.attrs.update(new_attrs=1), lambda x: x.assign_coords(cxy=1), lambda x: x.rename({"x": "xnew"}), lambda x: x.rename({"cxy": "cxynew"}), ], ) def test_token_changes_on_transform(obj, transform): with raise_if_dask_computes(): assert dask.base.tokenize(obj) != dask.base.tokenize(transform(obj)) @pytest.mark.parametrize( "obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()] ) def test_token_changes_when_data_changes(obj): with raise_if_dask_computes(): t1 = dask.base.tokenize(obj) # Change data_var if isinstance(obj, DataArray): obj *= 2 else: obj["a"] *= 2 with raise_if_dask_computes(): t2 = dask.base.tokenize(obj) assert t2 != t1 # Change non-index coord obj.coords["ndcoord"] *= 2 with raise_if_dask_computes(): t3 = dask.base.tokenize(obj) assert t3 != t2 # Change IndexVariable obj = obj.assign_coords(x=obj.x * 2) with raise_if_dask_computes(): t4 = dask.base.tokenize(obj) assert t4 != t3 @pytest.mark.parametrize("obj", [make_da().compute(), make_ds().compute()]) def test_token_changes_when_buffer_changes(obj): with raise_if_dask_computes(): t1 = dask.base.tokenize(obj) if isinstance(obj, DataArray): obj[0, 0] = 123 else: obj["a"][0, 0] = 123 with raise_if_dask_computes(): t2 = dask.base.tokenize(obj) assert t2 != t1 obj.coords["ndcoord"][0] = 123 with raise_if_dask_computes(): t3 = dask.base.tokenize(obj) assert t3 != t2 @pytest.mark.parametrize( "transform", [lambda x: x, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)], ) @pytest.mark.parametrize("obj", [make_da(), make_ds(), make_ds().variables["a"]]) def test_token_identical(obj, transform): with raise_if_dask_computes(): assert dask.base.tokenize(obj) == dask.base.tokenize(transform(obj)) assert dask.base.tokenize(obj.compute()) == dask.base.tokenize( transform(obj.compute()) ) @pytest.mark.parametrize( "obj", [ make_ds(), # Dataset make_ds().variables["c2"], # Variable make_ds().variables["x"], # IndexVariable ], ) def test_tokenize_empty_attrs(obj): """Issues #6970 and #8788""" obj.attrs = {} assert obj._attrs is None a = dask.base.tokenize(obj) assert obj.attrs == {} assert obj._attrs == {} # attrs getter changed None to dict b = dask.base.tokenize(obj) assert a == b obj2 = obj.copy() c = dask.base.tokenize(obj2) assert a == c def test_recursive_token(): """Test that tokenization is invoked recursively, and doesn't just rely on the output of str() """ a = np.ones(10000) b = np.ones(10000) b[5000] = 2 assert str(a) == str(b) assert dask.base.tokenize(a) != dask.base.tokenize(b) # Test DataArray and Variable da_a = DataArray(a) da_b = DataArray(b) assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b) # Test Dataset ds_a = da_a.to_dataset(name="x") ds_b = da_b.to_dataset(name="x") assert dask.base.tokenize(ds_a) != dask.base.tokenize(ds_b) # Test IndexVariable da_a = DataArray(a, dims=["x"], coords={"x": a}) da_b = DataArray(a, dims=["x"], coords={"x": b}) assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b) @requires_scipy_or_netCDF4 def test_normalize_token_with_backend(map_ds): with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file: map_ds.to_netcdf(tmp_file) read = xr.open_dataset(tmp_file) assert dask.base.tokenize(map_ds) != dask.base.tokenize(read) read.close() @pytest.mark.parametrize( "compat", ["broadcast_equals", "equals", "identical", "no_conflicts"] ) def test_lazy_array_equiv_variables(compat): var1 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2)) var2 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2)) var3 = xr.Variable(("y", "x"), da.zeros((20, 10), chunks=2)) with raise_if_dask_computes(): assert getattr(var1, compat)(var2, equiv=lazy_array_equiv) # values are actually equal, but we don't know that till we compute, return None with raise_if_dask_computes(): assert getattr(var1, compat)(var2 / 2, equiv=lazy_array_equiv) is None # shapes are not equal, return False without computes with raise_if_dask_computes(): assert getattr(var1, compat)(var3, equiv=lazy_array_equiv) is False # if one or both arrays are numpy, return None assert getattr(var1, compat)(var2.compute(), equiv=lazy_array_equiv) is None assert ( getattr(var1.compute(), compat)(var2.compute(), equiv=lazy_array_equiv) is None ) with raise_if_dask_computes(): assert getattr(var1, compat)(var2.transpose("y", "x")) @pytest.mark.parametrize( "compat", ["broadcast_equals", "equals", "identical", "no_conflicts"] ) def test_lazy_array_equiv_merge(compat): da1 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x")) da2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x")) da3 = xr.DataArray(da.ones((20, 10), chunks=2), dims=("y", "x")) with raise_if_dask_computes(): xr.merge([da1, da2], compat=compat) # shapes are not equal; no computes necessary with raise_if_dask_computes(max_computes=0): with pytest.raises(ValueError): xr.merge([da1, da3], compat=compat) with raise_if_dask_computes(max_computes=2): xr.merge([da1, da2 / 2], compat=compat) @pytest.mark.filterwarnings("ignore::FutureWarning") # transpose_coords @pytest.mark.parametrize("obj", [make_da(), make_ds()]) @pytest.mark.parametrize( "transform", [ lambda a: a.assign_attrs(new_attr="anew"), lambda a: a.assign_coords(cxy=a.cxy), lambda a: a.copy(), lambda a: a.isel(x=slice(None)), lambda a: a.loc[dict(x=slice(None))], lambda a: a.transpose(...), lambda a: a.squeeze(), # no dimensions to squeeze lambda a: a.reindex(x=a.x), lambda a: a.reindex_like(a), lambda a: a.rename({"cxy": "cnew"}).rename({"cnew": "cxy"}), lambda a: a.pipe(lambda x: x), lambda a: xr.align(a, xr.zeros_like(a))[0], # assign # swap_dims # set_index / reset_index ], ) def test_transforms_pass_lazy_array_equiv(obj, transform): with raise_if_dask_computes(): assert_equal(obj, transform(obj)) def test_more_transforms_pass_lazy_array_equiv(map_da, map_ds): with raise_if_dask_computes(): assert_equal(map_ds.cxy.broadcast_like(map_ds.cxy), map_ds.cxy) assert_equal(xr.broadcast(map_ds.cxy, map_ds.cxy)[0], map_ds.cxy) assert_equal(map_ds.map(lambda x: x), map_ds) assert_equal(map_ds.set_coords("a").reset_coords("a"), map_ds) assert_equal(map_ds.assign({"a": map_ds.a}), map_ds) # fails because of index error # assert_equal( # map_ds.rename_dims({"x": "xnew"}).rename_dims({"xnew": "x"}), map_ds # ) assert_equal( map_ds.rename_vars({"cxy": "cnew"}).rename_vars({"cnew": "cxy"}), map_ds ) assert_equal(map_da._from_temp_dataset(map_da._to_temp_dataset()), map_da) assert_equal(map_da.astype(map_da.dtype), map_da) assert_equal(map_da.transpose("y", "x", transpose_coords=False).cxy, map_da.cxy) def test_optimize(): # https://github.com/pydata/xarray/issues/3698 a = dask.array.ones((10, 4), chunks=(5, 2)) arr = xr.DataArray(a).chunk(5) (arr2,) = dask.optimize(arr) arr2.compute() def test_graph_manipulation(): """dask.graph_manipulation passes an optional parameter, "rename", to the rebuilder function returned by __dask_postperist__; also, the dsk passed to the rebuilder is a HighLevelGraph whereas with dask.persist() and dask.optimize() it's a plain dict. """ import dask.graph_manipulation as gm v = Variable(["x"], [1, 2]).chunk(-1).chunk(1) * 2 da = DataArray(v) ds = Dataset({"d1": v[0], "d2": v[1], "d3": ("x", [3, 4])}) v2, da2, ds2 = gm.clone(v, da, ds) assert_equal(v2, v) assert_equal(da2, da) assert_equal(ds2, ds) for a, b in ((v, v2), (da, da2), (ds, ds2)): assert a.__dask_layers__() != b.__dask_layers__() assert len(a.__dask_layers__()) == len(b.__dask_layers__()) assert a.__dask_graph__().keys() != b.__dask_graph__().keys() # type: ignore[union-attr] assert len(a.__dask_graph__()) == len(b.__dask_graph__()) # type: ignore[arg-type] assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys() # type: ignore[union-attr] assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers) # type: ignore[union-attr] # Above we performed a slice operation; adding the two slices back together creates # a diamond-shaped dependency graph, which in turn will trigger a collision in layer # names if we were to use HighLevelGraph.cull() instead of # HighLevelGraph.cull_layers() in Dataset.__dask_postpersist__(). assert_equal(ds2.d1 + ds2.d2, ds.d1 + ds.d2) def test_new_index_var_computes_once(): # regression test for GH1533 data = dask.array.from_array(np.array([100, 200])) with raise_if_dask_computes(max_computes=1): Dataset(coords={"z": ("z", data)}) def test_minimize_graph_size(): # regression test for https://github.com/pydata/xarray/issues/8409 ds = Dataset( { "foo": ( ("x", "y", "z"), dask.array.ones((120, 120, 120), chunks=(20, 20, 1)), ) }, coords={"x": np.arange(120), "y": np.arange(120), "z": np.arange(120)}, ) mapped = ds.map_blocks(lambda x: x) graph = dict(mapped.__dask_graph__()) numchunks = {k: len(v) for k, v in ds.chunksizes.items()} for var in "xyz": actual = len([key for key in graph if var in key[0]]) # assert that we only include each chunk of an index variable # is only included once, not the product of number of chunks of # all the other dimensions. # e.g. previously for 'x', actual == numchunks['y'] * numchunks['z'] assert actual == numchunks[var], (actual, numchunks[var]) def test_idxmin_chunking(): # GH9425 x, y, t = 100, 100, 10 rang = np.arange(t * x * y) da = xr.DataArray( rang.reshape(t, x, y), coords={"time": range(t), "x": range(x), "y": range(y)} ) da = da.chunk(dict(time=-1, x=25, y=25)) actual = da.idxmin("time") assert actual.chunksizes == {k: da.chunksizes[k] for k in ["x", "y"]} assert_identical(actual, da.compute().idxmin("time")) def test_conjugate(): # Test for https://github.com/pydata/xarray/issues/10302 z = 1j * da.arange(100) data = xr.DataArray(z, coords={"x": np.arange(100)}) conj_data = data.conjugate() assert dask.is_dask_collection(conj_data) assert_equal(conj_data, data.conj()) xarray-2025.12.0/xarray/tests/test_dataarray.py000066400000000000000000010377261511464676000214140ustar00rootroot00000000000000from __future__ import annotations import pickle import re import sys import warnings from collections.abc import Hashable from copy import deepcopy from textwrap import dedent from typing import Any, Final, Literal, cast import numpy as np import pandas as pd import pytest # remove once numpy 2.0 is the oldest supported version try: from numpy.exceptions import RankWarning except ImportError: from numpy import RankWarning # type: ignore[no-redef,attr-defined,unused-ignore] import xarray as xr import xarray.core.missing from xarray import ( DataArray, Dataset, IndexVariable, Variable, align, broadcast, set_options, ) from xarray.coders import CFDatetimeCoder from xarray.core import dtypes from xarray.core.common import full_like from xarray.core.coordinates import Coordinates, CoordinateValidationError from xarray.core.indexes import Index, PandasIndex, filter_indexes_from_coords from xarray.core.types import QueryEngineOptions, QueryParserOptions from xarray.core.utils import is_scalar from xarray.testing import _assert_internal_invariants from xarray.tests import ( InaccessibleArray, ReturnItem, assert_allclose, assert_array_equal, assert_chunks_equal, assert_equal, assert_identical, assert_no_warnings, has_dask, has_dask_ge_2025_1_0, has_pyarrow, raise_if_dask_computes, requires_bottleneck, requires_cupy, requires_dask, requires_dask_expr, requires_iris, requires_numexpr, requires_pint, requires_pyarrow, requires_scipy, requires_sparse, source_ndarray, ) try: from pandas.errors import UndefinedVariableError except ImportError: # TODO: remove once we stop supporting pandas<1.4.3 from pandas.core.computation.ops import UndefinedVariableError pytestmark = [ pytest.mark.filterwarnings("error:Mean of empty slice"), pytest.mark.filterwarnings("error:All-NaN (slice|axis) encountered"), ] class TestDataArray: @pytest.fixture(autouse=True) def setup(self): self.attrs = {"attr1": "value1", "attr2": 2929} self.x = np.random.random((10, 20)) self.v = Variable(["x", "y"], self.x) self.va = Variable(["x", "y"], self.x, self.attrs) self.ds = Dataset({"foo": self.v}) self.dv = self.ds["foo"] self.mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("level_1", "level_2") ) self.mda = DataArray([0, 1, 2, 3], coords={"x": self.mindex}, dims="x").astype( np.uint64 ) def test_repr(self) -> None: v = Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"}) v = v.astype(np.uint64) coords = {"x": np.arange(3, dtype=np.uint64), "other": np.uint64(0)} data_array = DataArray(v, coords, name="my_variable") expected = dedent( """\ Size: 48B array([[1, 2, 3], [4, 5, 6]], dtype=uint64) Coordinates: * x (x) uint64 24B 0 1 2 other uint64 8B 0 Dimensions without coordinates: time Attributes: foo: bar""" ) assert expected == repr(data_array) def test_repr_multiindex(self) -> None: obj_size = np.dtype("O").itemsize expected = dedent( f"""\ Size: 32B array([0, 1, 2, 3], dtype=uint64) Coordinates: * x (x) object {4 * obj_size}B MultiIndex * level_1 (x) object {4 * obj_size}B 'a' 'a' 'b' 'b' * level_2 (x) int64 32B 1 2 1 2""" ) assert expected == repr(self.mda) def test_repr_multiindex_long(self) -> None: mindex_long = pd.MultiIndex.from_product( [["a", "b", "c", "d"], [1, 2, 3, 4, 5, 6, 7, 8]], names=("level_1", "level_2"), ) mda_long = DataArray( list(range(32)), coords={"x": mindex_long}, dims="x" ).astype(np.uint64) obj_size = np.dtype("O").itemsize expected = dedent( f"""\ Size: 256B array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], dtype=uint64) Coordinates: * x (x) object {32 * obj_size}B MultiIndex * level_1 (x) object {32 * obj_size}B 'a' 'a' 'a' 'a' 'a' 'a' ... 'd' 'd' 'd' 'd' 'd' 'd' * level_2 (x) int64 256B 1 2 3 4 5 6 7 8 1 2 3 4 ... 5 6 7 8 1 2 3 4 5 6 7 8""" ) assert expected == repr(mda_long) def test_properties(self) -> None: assert_equal(self.dv.variable, self.v) assert_array_equal(self.dv.values, self.v.values) for attr in ["dims", "dtype", "shape", "size", "nbytes", "ndim", "attrs"]: assert getattr(self.dv, attr) == getattr(self.v, attr) assert len(self.dv) == len(self.v) assert_equal(self.dv.variable, self.v) assert set(self.dv.coords) == set(self.ds.coords) for k, v in self.dv.coords.items(): assert_array_equal(v, self.ds.coords[k]) with pytest.raises(AttributeError): _ = self.dv.dataset assert isinstance(self.ds["x"].to_index(), pd.Index) with pytest.raises(ValueError, match=r"must be 1-dimensional"): self.ds["foo"].to_index() with pytest.raises(AttributeError): self.dv.variable = self.v # type: ignore[misc] def test_data_property(self) -> None: array = DataArray(np.zeros((3, 4))) actual = array.copy() actual.values = np.ones((3, 4)) assert_array_equal(np.ones((3, 4)), actual.values) actual.data = 2 * np.ones((3, 4)) assert_array_equal(2 * np.ones((3, 4)), actual.data) assert_array_equal(actual.data, actual.values) def test_indexes(self) -> None: array = DataArray(np.zeros((2, 3)), [("x", [0, 1]), ("y", ["a", "b", "c"])]) expected_indexes = {"x": pd.Index([0, 1]), "y": pd.Index(["a", "b", "c"])} expected_xindexes = { k: PandasIndex(idx, k) for k, idx in expected_indexes.items() } assert array.xindexes.keys() == expected_xindexes.keys() assert array.indexes.keys() == expected_indexes.keys() assert all(isinstance(idx, pd.Index) for idx in array.indexes.values()) assert all(isinstance(idx, Index) for idx in array.xindexes.values()) for k in expected_indexes: assert array.xindexes[k].equals(expected_xindexes[k]) assert array.indexes[k].equals(expected_indexes[k]) def test_get_index(self) -> None: array = DataArray(np.zeros((2, 3)), coords={"x": ["a", "b"]}, dims=["x", "y"]) assert array.get_index("x").equals(pd.Index(["a", "b"])) assert array.get_index("y").equals(pd.Index([0, 1, 2])) with pytest.raises(KeyError): array.get_index("z") def test_get_index_size_zero(self) -> None: array = DataArray(np.zeros((0,)), dims=["x"]) actual = array.get_index("x") expected = pd.Index([], dtype=np.int64) assert actual.equals(expected) assert actual.dtype == expected.dtype def test_struct_array_dims(self) -> None: """ This test checks subtraction of two DataArrays for the case when dimension is a structured array. """ # GH837, GH861 # checking array subtraction when dims are the same p_data = np.array( [("Abe", 180), ("Stacy", 150), ("Dick", 200)], dtype=[("name", "|S256"), ("height", object)], ) weights_0 = DataArray( [80, 56, 120], dims=["participant"], coords={"participant": p_data} ) weights_1 = DataArray( [81, 52, 115], dims=["participant"], coords={"participant": p_data} ) actual = weights_1 - weights_0 expected = DataArray( [1, -4, -5], dims=["participant"], coords={"participant": p_data} ) assert_identical(actual, expected) # checking array subtraction when dims are not the same p_data_alt = np.array( [("Abe", 180), ("Stacy", 151), ("Dick", 200)], dtype=[("name", "|S256"), ("height", object)], ) weights_1 = DataArray( [81, 52, 115], dims=["participant"], coords={"participant": p_data_alt} ) actual = weights_1 - weights_0 expected = DataArray( [1, -5], dims=["participant"], coords={"participant": p_data[[0, 2]]} ) assert_identical(actual, expected) # checking array subtraction when dims are not the same and one # is np.nan p_data_nan = np.array( [("Abe", 180), ("Stacy", np.nan), ("Dick", 200)], dtype=[("name", "|S256"), ("height", object)], ) weights_1 = DataArray( [81, 52, 115], dims=["participant"], coords={"participant": p_data_nan} ) actual = weights_1 - weights_0 expected = DataArray( [1, -5], dims=["participant"], coords={"participant": p_data[[0, 2]]} ) assert_identical(actual, expected) def test_name(self) -> None: arr = self.dv assert arr.name == "foo" copied = arr.copy() arr.name = "bar" assert arr.name == "bar" assert_equal(copied, arr) actual = DataArray(IndexVariable("x", [3])) actual.name = "y" expected = DataArray([3], [("x", [3])], name="y") assert_identical(actual, expected) def test_dims(self) -> None: arr = self.dv assert arr.dims == ("x", "y") with pytest.raises(AttributeError, match=r"you cannot assign"): arr.dims = ("w", "z") def test_sizes(self) -> None: array = DataArray(np.zeros((3, 4)), dims=["x", "y"]) assert array.sizes == {"x": 3, "y": 4} assert tuple(array.sizes) == array.dims with pytest.raises(TypeError): array.sizes["foo"] = 5 # type: ignore[index] def test_encoding(self) -> None: expected = {"foo": "bar"} self.dv.encoding["foo"] = "bar" assert expected == self.dv.encoding expected2 = {"baz": 0} self.dv.encoding = expected2 assert expected2 is not self.dv.encoding def test_drop_encoding(self) -> None: array = self.mda encoding = {"scale_factor": 10} array.encoding = encoding array["x"].encoding = encoding assert array.encoding == encoding assert array["x"].encoding == encoding actual = array.drop_encoding() # did not modify in place assert array.encoding == encoding assert array["x"].encoding == encoding # variable and coord encoding is empty assert actual.encoding == {} assert actual["x"].encoding == {} def test_constructor(self) -> None: data = np.random.random((2, 3)) # w/o coords, w/o dims actual = DataArray(data) expected = Dataset({None: (["dim_0", "dim_1"], data)})[None] assert_identical(expected, actual) actual = DataArray(data, [["a", "b"], [-1, -2, -3]]) expected = Dataset( { None: (["dim_0", "dim_1"], data), "dim_0": ("dim_0", ["a", "b"]), "dim_1": ("dim_1", [-1, -2, -3]), } )[None] assert_identical(expected, actual) # pd.Index coords, w/o dims actual = DataArray( data, [pd.Index(["a", "b"], name="x"), pd.Index([-1, -2, -3], name="y")] ) expected = Dataset( {None: (["x", "y"], data), "x": ("x", ["a", "b"]), "y": ("y", [-1, -2, -3])} )[None] assert_identical(expected, actual) # list coords, w dims coords1: list[Any] = [["a", "b"], [-1, -2, -3]] actual = DataArray(data, coords1, ["x", "y"]) assert_identical(expected, actual) # pd.Index coords, w dims coords2: list[pd.Index] = [ pd.Index(["a", "b"], name="A"), pd.Index([-1, -2, -3], name="B"), ] actual = DataArray(data, coords2, ["x", "y"]) assert_identical(expected, actual) # dict coords, w dims coords3 = {"x": ["a", "b"], "y": [-1, -2, -3]} actual = DataArray(data, coords3, ["x", "y"]) assert_identical(expected, actual) # dict coords, w/o dims actual = DataArray(data, coords3) assert_identical(expected, actual) # tuple[dim, list] coords, w/o dims coords4 = [("x", ["a", "b"]), ("y", [-1, -2, -3])] actual = DataArray(data, coords4) assert_identical(expected, actual) # partial dict coords, w dims expected = Dataset({None: (["x", "y"], data), "x": ("x", ["a", "b"])})[None] actual = DataArray(data, {"x": ["a", "b"]}, ["x", "y"]) assert_identical(expected, actual) # w/o coords, w dims actual = DataArray(data, dims=["x", "y"]) expected = Dataset({None: (["x", "y"], data)})[None] assert_identical(expected, actual) # w/o coords, w dims, w name actual = DataArray(data, dims=["x", "y"], name="foo") expected = Dataset({"foo": (["x", "y"], data)})["foo"] assert_identical(expected, actual) # w/o coords, w/o dims, w name actual = DataArray(data, name="foo") expected = Dataset({"foo": (["dim_0", "dim_1"], data)})["foo"] assert_identical(expected, actual) # w/o coords, w dims, w attrs actual = DataArray(data, dims=["x", "y"], attrs={"bar": 2}) expected = Dataset({None: (["x", "y"], data, {"bar": 2})})[None] assert_identical(expected, actual) # w/o coords, w dims (ds has attrs) actual = DataArray(data, dims=["x", "y"]) expected = Dataset({None: (["x", "y"], data, {}, {"bar": 2})})[None] assert_identical(expected, actual) # data is list, w coords actual = DataArray([1, 2, 3], coords={"x": [0, 1, 2]}) expected = DataArray([1, 2, 3], coords=[("x", [0, 1, 2])]) assert_identical(expected, actual) def test_constructor_invalid(self) -> None: data = np.random.randn(3, 2) with pytest.raises(ValueError, match=r"coords is not dict-like"): DataArray(data, [[0, 1, 2]], ["x", "y"]) with pytest.raises(ValueError, match=r"not a subset of the .* dim"): DataArray(data, {"x": [0, 1, 2]}, ["a", "b"]) with pytest.raises(ValueError, match=r"not a subset of the .* dim"): DataArray(data, {"x": [0, 1, 2]}) with pytest.raises(TypeError, match=r"is not hashable"): DataArray(data, dims=["x", []]) # type: ignore[list-item] with pytest.raises( CoordinateValidationError, match=r"conflicting sizes for dim" ): DataArray([1, 2, 3], coords=[("x", [0, 1])]) with pytest.raises( CoordinateValidationError, match=r"conflicting sizes for dim" ): DataArray([1, 2], coords={"x": [0, 1], "y": ("x", [1])}, dims="x") with pytest.raises(ValueError, match=r"conflicting MultiIndex"): DataArray(np.random.rand(4, 4), [("x", self.mindex), ("y", self.mindex)]) with pytest.raises(ValueError, match=r"conflicting MultiIndex"): DataArray(np.random.rand(4, 4), [("x", self.mindex), ("level_1", range(4))]) def test_constructor_from_self_described(self) -> None: data: list[list[float]] = [[-0.1, 21], [0, 2]] expected = DataArray( data, coords={"x": ["a", "b"], "y": [-1, -2]}, dims=["x", "y"], name="foobar", attrs={"bar": 2}, ) actual = DataArray(expected) assert_identical(expected, actual) actual = DataArray(expected.values, actual.coords) assert_equal(expected, actual) frame = pd.DataFrame( data, index=pd.Index(["a", "b"], name="x"), columns=pd.Index([-1, -2], name="y"), ) actual = DataArray(frame) assert_equal(expected, actual) series = pd.Series(data[0], index=pd.Index([-1, -2], name="y")) actual = DataArray(series) assert_equal(expected[0].reset_coords("x", drop=True), actual) expected = DataArray( data, coords={"x": ["a", "b"], "y": [-1, -2], "a": 0, "z": ("x", [-0.5, 0.5])}, dims=["x", "y"], ) actual = DataArray(expected) assert_identical(expected, actual) actual = DataArray(expected.values, expected.coords) assert_identical(expected, actual) expected = Dataset({"foo": ("foo", ["a", "b"])})["foo"] actual = DataArray(pd.Index(["a", "b"], name="foo")) assert_identical(expected, actual) actual = DataArray(IndexVariable("foo", ["a", "b"])) assert_identical(expected, actual) @requires_dask def test_constructor_from_self_described_chunked(self) -> None: expected = DataArray( [[-0.1, 21], [0, 2]], coords={"x": ["a", "b"], "y": [-1, -2]}, dims=["x", "y"], name="foobar", attrs={"bar": 2}, ).chunk() actual = DataArray(expected) assert_identical(expected, actual) assert_chunks_equal(expected, actual) def test_constructor_from_0d(self) -> None: expected = Dataset({None: ([], 0)})[None] actual = DataArray(0) assert_identical(expected, actual) @requires_dask def test_constructor_dask_coords(self) -> None: # regression test for GH1684 import dask.array as da coord = da.arange(8, chunks=(4,)) data = da.random.random((8, 8), chunks=(4, 4)) + 1 actual = DataArray(data, coords={"x": coord, "y": coord}, dims=["x", "y"]) ecoord = np.arange(8) expected = DataArray(data, coords={"x": ecoord, "y": ecoord}, dims=["x", "y"]) assert_equal(actual, expected) def test_constructor_no_default_index(self) -> None: # explicitly passing a Coordinates object skips the creation of default index da = DataArray(range(3), coords=Coordinates({"x": [1, 2, 3]}, indexes={})) assert "x" in da.coords assert "x" not in da.xindexes def test_constructor_multiindex(self) -> None: midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")) coords = Coordinates.from_pandas_multiindex(midx, "x") da = DataArray(range(4), coords=coords, dims="x") assert_identical(da.coords, coords) def test_constructor_custom_index(self) -> None: class CustomIndex(Index): ... coords = Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()} ) da = DataArray(range(3), coords=coords) assert isinstance(da.xindexes["x"], CustomIndex) # test coordinate variables copied assert da.coords["x"] is not coords.variables["x"] def test_constructor_extra_dim_index_coord(self) -> None: class AnyIndex(Index): def should_add_coord_to_array(self, name, var, dims): return True idx = AnyIndex() coords = Coordinates( coords={ "x": ("x", [1, 2]), "x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]), }, indexes={"x": idx, "x_bounds": idx}, ) actual = DataArray([1.0, 2.0], coords=coords, dims="x") assert_identical(actual.coords, coords, check_default_indexes=False) assert "x_bnds" not in actual.dims def test_equals_and_identical(self) -> None: orig = DataArray(np.arange(5.0), {"a": 42}, dims="x") expected = orig actual = orig.copy() assert expected.equals(actual) assert expected.identical(actual) actual = expected.rename("baz") assert expected.equals(actual) assert not expected.identical(actual) actual = expected.rename({"x": "xxx"}) assert not expected.equals(actual) assert not expected.identical(actual) actual = expected.copy() actual.attrs["foo"] = "bar" assert expected.equals(actual) assert not expected.identical(actual) actual = expected.copy() actual["x"] = ("x", -np.arange(5)) assert not expected.equals(actual) assert not expected.identical(actual) actual = expected.reset_coords(drop=True) assert not expected.equals(actual) assert not expected.identical(actual) actual = orig.copy() actual[0] = np.nan expected = actual.copy() assert expected.equals(actual) assert expected.identical(actual) actual[:] = np.nan assert not expected.equals(actual) assert not expected.identical(actual) actual = expected.copy() actual["a"] = 100000 assert not expected.equals(actual) assert not expected.identical(actual) def test_equals_failures(self) -> None: orig = DataArray(np.arange(5.0), {"a": 42}, dims="x") assert not orig.equals(np.arange(5)) # type: ignore[arg-type] assert not orig.identical(123) # type: ignore[arg-type] assert not orig.broadcast_equals({1: 2}) # type: ignore[arg-type] def test_broadcast_equals(self) -> None: a = DataArray([0, 0], {"y": 0}, dims="x") b = DataArray([0, 0], {"y": ("x", [0, 0])}, dims="x") assert a.broadcast_equals(b) assert b.broadcast_equals(a) assert not a.equals(b) assert not a.identical(b) c = DataArray([0], coords={"x": 0}, dims="y") assert not a.broadcast_equals(c) assert not c.broadcast_equals(a) def test_getitem(self) -> None: # strings pull out dataarrays assert_identical(self.dv, self.ds["foo"]) x = self.dv["x"] y = self.dv["y"] assert_identical(self.ds["x"], x) assert_identical(self.ds["y"], y) arr = ReturnItem() for i in [ arr[:], arr[...], arr[x.values], arr[x.variable], arr[x], arr[x, y], arr[x.values > -1], arr[x.variable > -1], arr[x > -1], arr[x > -1, y > -1], ]: assert_equal(self.dv, self.dv[i]) for i in [ arr[0], arr[:, 0], arr[:3, :2], arr[x.values[:3]], arr[x.variable[:3]], arr[x[:3]], arr[x[:3], y[:4]], arr[x.values > 3], arr[x.variable > 3], arr[x > 3], arr[x > 3, y > 3], ]: assert_array_equal(self.v[i], self.dv[i]) def test_getitem_dict(self) -> None: actual = self.dv[{"x": slice(3), "y": 0}] expected = self.dv.isel(x=slice(3), y=0) assert_identical(expected, actual) def test_getitem_coords(self) -> None: orig = DataArray( [[10], [20]], { "x": [1, 2], "y": [3], "z": 4, "x2": ("x", ["a", "b"]), "y2": ("y", ["c"]), "xy": (["y", "x"], [["d", "e"]]), }, dims=["x", "y"], ) assert_identical(orig, orig[:]) assert_identical(orig, orig[:, :]) assert_identical(orig, orig[...]) assert_identical(orig, orig[:2, :1]) assert_identical(orig, orig[[0, 1], [0]]) actual = orig[0, 0] expected = DataArray( 10, {"x": 1, "y": 3, "z": 4, "x2": "a", "y2": "c", "xy": "d"} ) assert_identical(expected, actual) actual = orig[0, :] expected = DataArray( [10], { "x": 1, "y": [3], "z": 4, "x2": "a", "y2": ("y", ["c"]), "xy": ("y", ["d"]), }, dims="y", ) assert_identical(expected, actual) actual = orig[:, 0] expected = DataArray( [10, 20], { "x": [1, 2], "y": 3, "z": 4, "x2": ("x", ["a", "b"]), "y2": "c", "xy": ("x", ["d", "e"]), }, dims="x", ) assert_identical(expected, actual) def test_getitem_dataarray(self) -> None: # It should not conflict da = DataArray(np.arange(12).reshape((3, 4)), dims=["x", "y"]) ind = DataArray([[0, 1], [0, 1]], dims=["x", "z"]) actual = da[ind] assert_array_equal(actual, da.values[[[0, 1], [0, 1]], :]) da = DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]}, ) ind = xr.DataArray([[0, 1], [0, 1]], dims=["X", "Y"]) actual = da[ind] expected = da.values[[[0, 1], [0, 1]], :] assert_array_equal(actual, expected) assert actual.dims == ("X", "Y", "y") # boolean indexing ind = xr.DataArray([True, True, False], dims=["x"]) assert_equal(da[ind], da[[0, 1], :]) assert_equal(da[ind], da[[0, 1]]) assert_equal(da[ind], da[ind.values]) def test_getitem_empty_index(self) -> None: da = DataArray(np.arange(12).reshape((3, 4)), dims=["x", "y"]) assert_identical(da[{"x": []}], DataArray(np.zeros((0, 4)), dims=["x", "y"])) assert_identical( da.loc[{"y": []}], DataArray(np.zeros((3, 0)), dims=["x", "y"]) ) assert_identical(da[[]], DataArray(np.zeros((0, 4)), dims=["x", "y"])) def test_getitem_typeerror(self) -> None: with pytest.raises(TypeError, match=r"unexpected indexer type"): self.dv[True] with pytest.raises(TypeError, match=r"unexpected indexer type"): self.dv[np.array(True)] with pytest.raises(TypeError, match=r"invalid indexer array"): self.dv[3.0] with pytest.raises(TypeError, match=r"invalid indexer array"): self.dv[None] def test_setitem(self) -> None: # basic indexing should work as numpy's indexing tuples: list[tuple[int | list[int] | slice, int | list[int] | slice]] = [ (0, 0), (0, slice(None, None)), (slice(None, None), slice(None, None)), (slice(None, None), 0), ([1, 0], slice(None, None)), (slice(None, None), [1, 0]), ] for t in tuples: expected = np.arange(6).reshape(3, 2) orig = DataArray( np.arange(6).reshape(3, 2), { "x": [1, 2, 3], "y": ["a", "b"], "z": 4, "x2": ("x", ["a", "b", "c"]), "y2": ("y", ["d", "e"]), }, dims=["x", "y"], ) orig[t] = 1 expected[t] = 1 assert_array_equal(orig.values, expected) def test_setitem_fancy(self) -> None: # vectorized indexing da = DataArray(np.ones((3, 2)), dims=["x", "y"]) ind = Variable(["a"], [0, 1]) da[dict(x=ind, y=ind)] = 0 expected = DataArray([[0, 1], [1, 0], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) # assign another 0d-variable da[dict(x=ind, y=ind)] = Variable((), 0) expected = DataArray([[0, 1], [1, 0], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) # assign another 1d-variable da[dict(x=ind, y=ind)] = Variable(["a"], [2, 3]) expected = DataArray([[2, 1], [1, 3], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) # 2d-vectorized indexing da = DataArray(np.ones((3, 2)), dims=["x", "y"]) ind_x = DataArray([[0, 1]], dims=["a", "b"]) ind_y = DataArray([[1, 0]], dims=["a", "b"]) da[dict(x=ind_x, y=ind_y)] = 0 expected = DataArray([[1, 0], [0, 1], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) da = DataArray(np.ones((3, 2)), dims=["x", "y"]) ind = Variable(["a"], [0, 1]) da[ind] = 0 expected = DataArray([[0, 0], [0, 0], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) def test_setitem_dataarray(self) -> None: def get_data(): return DataArray( np.ones((4, 3, 2)), dims=["x", "y", "z"], coords={ "x": np.arange(4), "y": ["a", "b", "c"], "non-dim": ("x", [1, 3, 4, 2]), }, ) da = get_data() # indexer with inconsistent coordinates. ind = DataArray(np.arange(1, 4), dims=["x"], coords={"x": np.random.randn(3)}) with pytest.raises(IndexError, match=r"dimension coordinate 'x'"): da[dict(x=ind)] = 0 # indexer with consistent coordinates. ind = DataArray(np.arange(1, 4), dims=["x"], coords={"x": np.arange(1, 4)}) da[dict(x=ind)] = 0 # should not raise assert np.allclose(da[dict(x=ind)].values, 0) assert_identical(da["x"], get_data()["x"]) assert_identical(da["non-dim"], get_data()["non-dim"]) da = get_data() # conflict in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [0, 1, 2], "non-dim": ("x", [0, 2, 4])}, ) with pytest.raises(IndexError, match=r"dimension coordinate 'x'"): da[dict(x=ind)] = value # consistent coordinate in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])}, ) da[dict(x=ind)] = value assert np.allclose(da[dict(x=ind)].values, 0) assert_identical(da["x"], get_data()["x"]) assert_identical(da["non-dim"], get_data()["non-dim"]) # Conflict in the non-dimension coordinate value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])}, ) da[dict(x=ind)] = value # should not raise # conflict in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [0, 1, 2], "non-dim": ("x", [0, 2, 4])}, ) with pytest.raises(IndexError, match=r"dimension coordinate 'x'"): da[dict(x=ind)] = value # consistent coordinate in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])}, ) da[dict(x=ind)] = value # should not raise def test_setitem_vectorized(self) -> None: # Regression test for GH:7030 # Positional indexing v = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"]) b = xr.DataArray([[0, 0], [1, 0]], dims=["u", "v"]) c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"]) w = xr.DataArray([-1, -2], dims=["u"]) index = dict(b=b, c=c) v[index] = w assert (v[index] == w).all() # Indexing with coordinates v = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"]) v.coords["b"] = [2, 4, 6] b = xr.DataArray([[2, 2], [4, 2]], dims=["u", "v"]) c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"]) w = xr.DataArray([-1, -2], dims=["u"]) index = dict(b=b, c=c) v.loc[index] = w assert (v.loc[index] == w).all() def test_contains(self) -> None: data_array = DataArray([1, 2]) assert 1 in data_array assert 3 not in data_array def test_pickle(self) -> None: data = DataArray(np.random.random((3, 3)), dims=("id", "time")) roundtripped = pickle.loads(pickle.dumps(data)) assert_identical(data, roundtripped) @requires_dask def test_chunk(self) -> None: unblocked = DataArray(np.ones((3, 4))) assert unblocked.chunks is None blocked = unblocked.chunk() assert blocked.chunks == ((3,), (4,)) first_dask_name = blocked.data.name with pytest.warns(DeprecationWarning): blocked = unblocked.chunk(chunks=((2, 1), (2, 2))) # type: ignore[arg-type] assert blocked.chunks == ((2, 1), (2, 2)) assert blocked.data.name != first_dask_name blocked = unblocked.chunk(chunks=(3, 3)) assert blocked.chunks == ((3,), (3, 1)) assert blocked.data.name != first_dask_name with pytest.raises(ValueError): blocked.chunk(chunks=(3, 3, 3)) # name doesn't change when rechunking by same amount # this fails if ReprObject doesn't have __dask_tokenize__ defined assert unblocked.chunk(2).data.name == unblocked.chunk(2).data.name assert blocked.load().chunks is None # Check that kwargs are passed import dask.array as da blocked = unblocked.chunk(name_prefix="testname_") assert isinstance(blocked.data, da.Array) assert "testname_" in blocked.data.name # test kwargs form of chunks blocked = unblocked.chunk(dim_0=3, dim_1=3) assert blocked.chunks == ((3,), (3, 1)) assert blocked.data.name != first_dask_name def test_isel(self) -> None: assert_identical(self.dv[0], self.dv.isel(x=0)) assert_identical(self.dv, self.dv.isel(x=slice(None))) assert_identical(self.dv[:3], self.dv.isel(x=slice(3))) assert_identical(self.dv[:3, :5], self.dv.isel(x=slice(3), y=slice(5))) with pytest.raises( ValueError, match=r"Dimensions {'not_a_dim'} do not exist. Expected " r"one or more of \('x', 'y'\)", ): self.dv.isel(not_a_dim=0) with pytest.warns( UserWarning, match=r"Dimensions {'not_a_dim'} do not exist. " r"Expected one or more of \('x', 'y'\)", ): self.dv.isel(not_a_dim=0, missing_dims="warn") assert_identical(self.dv, self.dv.isel(not_a_dim=0, missing_dims="ignore")) def test_isel_types(self) -> None: # regression test for #1405 da = DataArray([1, 2, 3], dims="x") # uint64 assert_identical( da.isel(x=np.array([0], dtype="uint64")), da.isel(x=np.array([0])) ) # uint32 assert_identical( da.isel(x=np.array([0], dtype="uint32")), da.isel(x=np.array([0])) ) # int64 assert_identical( da.isel(x=np.array([0], dtype="int64")), da.isel(x=np.array([0])) ) @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_isel_fancy(self) -> None: shape = (10, 7, 6) np_array = np.random.random(shape) da = DataArray( np_array, dims=["time", "y", "x"], coords={"time": np.arange(0, 100, 10)} ) y = [1, 3] x = [3, 0] expected = da.values[:, y, x] actual = da.isel(y=(("test_coord",), y), x=(("test_coord",), x)) assert actual.coords["test_coord"].shape == (len(y),) assert list(actual.coords) == ["time"] assert actual.dims == ("time", "test_coord") np.testing.assert_equal(actual, expected) # a few corner cases da.isel( time=(("points",), [1, 2]), x=(("points",), [2, 2]), y=(("points",), [3, 4]) ) np.testing.assert_allclose( da.isel( time=(("p",), [1]), x=(("p",), [2]), y=(("p",), [4]) ).values.squeeze(), np_array[1, 4, 2].squeeze(), ) da.isel(time=(("points",), [1, 2])) y = [-1, 0] x = [-2, 2] expected2 = da.values[:, y, x] actual2 = da.isel(x=(("points",), x), y=(("points",), y)).values np.testing.assert_equal(actual2, expected2) # test that the order of the indexers doesn't matter assert_identical( da.isel(y=(("points",), y), x=(("points",), x)), da.isel(x=(("points",), x), y=(("points",), y)), ) # make sure we're raising errors in the right places with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"): da.isel(y=(("points",), [1, 2]), x=(("points",), [1, 2, 3])) # tests using index or DataArray as indexers stations = Dataset() stations["station"] = (("station",), ["A", "B", "C"]) stations["dim1s"] = (("station",), [1, 2, 3]) stations["dim2s"] = (("station",), [4, 5, 1]) actual3 = da.isel(x=stations["dim1s"], y=stations["dim2s"]) assert "station" in actual3.coords assert "station" in actual3.dims assert_identical(actual3["station"], stations["station"]) with pytest.raises(ValueError, match=r"conflicting values/indexes on "): da.isel( x=DataArray([0, 1, 2], dims="station", coords={"station": [0, 1, 2]}), y=DataArray([0, 1, 2], dims="station", coords={"station": [0, 1, 3]}), ) # multi-dimensional selection stations = Dataset() stations["a"] = (("a",), ["A", "B", "C"]) stations["b"] = (("b",), [0, 1]) stations["dim1s"] = (("a", "b"), [[1, 2], [2, 3], [3, 4]]) stations["dim2s"] = (("a",), [4, 5, 1]) actual4 = da.isel(x=stations["dim1s"], y=stations["dim2s"]) assert "a" in actual4.coords assert "a" in actual4.dims assert "b" in actual4.coords assert "b" in actual4.dims assert_identical(actual4["a"], stations["a"]) assert_identical(actual4["b"], stations["b"]) expected4 = da.variable[ :, stations["dim2s"].variable, stations["dim1s"].variable ] assert_array_equal(actual4, expected4) def test_sel(self) -> None: self.ds["x"] = ("x", np.array(list("abcdefghij"))) da = self.ds["foo"] assert_identical(da, da.sel(x=slice(None))) assert_identical(da[1], da.sel(x="b")) assert_identical(da[:3], da.sel(x=slice("c"))) assert_identical(da[:3], da.sel(x=["a", "b", "c"])) assert_identical(da[:, :4], da.sel(y=(self.ds["y"] < 4))) # verify that indexing with a dataarray works b = DataArray("b") assert_identical(da[1], da.sel(x=b)) assert_identical(da[[1]], da.sel(x=slice(b, b))) def test_sel_dataarray(self) -> None: # indexing with DataArray self.ds["x"] = ("x", np.array(list("abcdefghij"))) da = self.ds["foo"] ind = DataArray(["a", "b", "c"], dims=["x"]) actual = da.sel(x=ind) assert_identical(actual, da.isel(x=[0, 1, 2])) # along new dimension ind = DataArray(["a", "b", "c"], dims=["new_dim"]) actual = da.sel(x=ind) assert_array_equal(actual, da.isel(x=[0, 1, 2])) assert "new_dim" in actual.dims # with coordinate ind = DataArray( ["a", "b", "c"], dims=["new_dim"], coords={"new_dim": [0, 1, 2]} ) actual = da.sel(x=ind) assert_array_equal(actual, da.isel(x=[0, 1, 2])) assert "new_dim" in actual.dims assert "new_dim" in actual.coords assert_equal(actual["new_dim"].drop_vars("x"), ind["new_dim"]) def test_sel_invalid_slice(self) -> None: array = DataArray(np.arange(10), [("x", np.arange(10))]) with pytest.raises(ValueError, match=r"cannot use non-scalar arrays"): array.sel(x=slice(array.x)) def test_sel_dataarray_datetime_slice(self) -> None: # regression test for GH1240 times = pd.date_range("2000-01-01", freq="D", periods=365) array = DataArray(np.arange(365), [("time", times)]) result = array.sel(time=slice(array.time[0], array.time[-1])) assert_equal(result, array) array = DataArray(np.arange(365), [("delta", times - times[0])]) result = array.sel(delta=slice(array.delta[0], array.delta[-1])) assert_equal(result, array) @pytest.mark.parametrize( ["coord_values", "indices"], ( pytest.param( np.array([0.0, 0.111, 0.222, 0.333], dtype="float64"), slice(1, 3), id="float64", ), pytest.param( np.array([0.0, 0.111, 0.222, 0.333], dtype="float32"), slice(1, 3), id="float32", ), pytest.param( np.array([0.0, 0.111, 0.222, 0.333], dtype="float32"), [2], id="scalar" ), ), ) def test_sel_float(self, coord_values, indices) -> None: data_values = np.arange(4) arr = DataArray(data_values, coords={"x": coord_values}, dims="x") actual = arr.sel(x=coord_values[indices]) expected = DataArray( data_values[indices], coords={"x": coord_values[indices]}, dims="x" ) assert_equal(actual, expected) def test_sel_float16(self) -> None: data_values = np.arange(4) coord_values = np.array([0.0, 0.111, 0.222, 0.333], dtype="float16") indices = slice(1, 3) message = "`pandas.Index` does not support the `float16` dtype.*" with pytest.warns(DeprecationWarning, match=message): arr = DataArray(data_values, coords={"x": coord_values}, dims="x") with pytest.warns(DeprecationWarning, match=message): expected = DataArray( data_values[indices], coords={"x": coord_values[indices]}, dims="x" ) actual = arr.sel(x=coord_values[indices]) assert_equal(actual, expected) def test_sel_float_multiindex(self) -> None: # regression test https://github.com/pydata/xarray/issues/5691 # test multi-index created from coordinates, one with dtype=float32 lvl1 = ["a", "a", "b", "b"] lvl2 = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) da = xr.DataArray( [1, 2, 3, 4], dims="x", coords={"lvl1": ("x", lvl1), "lvl2": ("x", lvl2)} ) da = da.set_index(x=["lvl1", "lvl2"]) actual = da.sel(lvl1="a", lvl2=0.1) expected = da.isel(x=0) assert_equal(actual, expected) def test_sel_no_index(self) -> None: array = DataArray(np.arange(10), dims="x") assert_identical(array[0], array.sel(x=0)) assert_identical(array[:5], array.sel(x=slice(5))) assert_identical(array[[0, -1]], array.sel(x=[0, -1])) assert_identical(array[array < 5], array.sel(x=(array < 5))) def test_sel_method(self) -> None: data = DataArray(np.random.randn(3, 4), [("x", [0, 1, 2]), ("y", list("abcd"))]) with pytest.raises(KeyError, match="Try setting the `method`"): data.sel(y="ab") expected = data.sel(y=["a", "b"]) actual = data.sel(y=["ab", "ba"], method="pad") assert_identical(expected, actual) expected = data.sel(x=[1, 2]) actual = data.sel(x=[0.9, 1.9], method="backfill", tolerance=1) assert_identical(expected, actual) def test_sel_drop(self) -> None: data = DataArray([1, 2, 3], [("x", [0, 1, 2])]) expected = DataArray(1) selected = data.sel(x=0, drop=True) assert_identical(expected, selected) expected = DataArray(1, {"x": 0}) selected = data.sel(x=0, drop=False) assert_identical(expected, selected) data = DataArray([1, 2, 3], dims=["x"]) expected = DataArray(1) selected = data.sel(x=0, drop=True) assert_identical(expected, selected) def test_isel_drop(self) -> None: data = DataArray([1, 2, 3], [("x", [0, 1, 2])]) expected = DataArray(1) selected = data.isel(x=0, drop=True) assert_identical(expected, selected) expected = DataArray(1, {"x": 0}) selected = data.isel(x=0, drop=False) assert_identical(expected, selected) def test_head(self) -> None: assert_equal(self.dv.isel(x=slice(5)), self.dv.head(x=5)) assert_equal(self.dv.isel(x=slice(0)), self.dv.head(x=0)) assert_equal( self.dv.isel({dim: slice(6) for dim in self.dv.dims}), self.dv.head(6) ) assert_equal( self.dv.isel({dim: slice(5) for dim in self.dv.dims}), self.dv.head() ) with pytest.raises(TypeError, match=r"either dict-like or a single int"): self.dv.head([3]) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"expected integer type"): self.dv.head(x=3.1) with pytest.raises(ValueError, match=r"expected positive int"): self.dv.head(-3) def test_tail(self) -> None: assert_equal(self.dv.isel(x=slice(-5, None)), self.dv.tail(x=5)) assert_equal(self.dv.isel(x=slice(0)), self.dv.tail(x=0)) assert_equal( self.dv.isel({dim: slice(-6, None) for dim in self.dv.dims}), self.dv.tail(6), ) assert_equal( self.dv.isel({dim: slice(-5, None) for dim in self.dv.dims}), self.dv.tail() ) with pytest.raises(TypeError, match=r"either dict-like or a single int"): self.dv.tail([3]) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"expected integer type"): self.dv.tail(x=3.1) with pytest.raises(ValueError, match=r"expected positive int"): self.dv.tail(-3) def test_thin(self) -> None: assert_equal(self.dv.isel(x=slice(None, None, 5)), self.dv.thin(x=5)) assert_equal( self.dv.isel({dim: slice(None, None, 6) for dim in self.dv.dims}), self.dv.thin(6), ) with pytest.raises(TypeError, match=r"either dict-like or a single int"): self.dv.thin([3]) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"expected integer type"): self.dv.thin(x=3.1) with pytest.raises(ValueError, match=r"expected positive int"): self.dv.thin(-3) with pytest.raises(ValueError, match=r"cannot be zero"): self.dv.thin(time=0) def test_loc(self) -> None: self.ds["x"] = ("x", np.array(list("abcdefghij"))) da = self.ds["foo"] # typing issue: see https://github.com/python/mypy/issues/2410 assert_identical(da[:3], da.loc[:"c"]) # type: ignore[misc] assert_identical(da[1], da.loc["b"]) assert_identical(da[1], da.loc[{"x": "b"}]) assert_identical(da[1], da.loc["b", ...]) assert_identical(da[:3], da.loc[["a", "b", "c"]]) assert_identical(da[:3, :4], da.loc[["a", "b", "c"], np.arange(4)]) assert_identical(da[:, :4], da.loc[:, self.ds["y"] < 4]) def test_loc_datetime64_value(self) -> None: # regression test for https://github.com/pydata/xarray/issues/4283 t = np.array(["2017-09-05T12", "2017-09-05T15"], dtype="datetime64[ns]") array = DataArray(np.ones(t.shape), dims=("time",), coords=(t,)) assert_identical(array.loc[{"time": t[0]}], array[0]) def test_loc_assign(self) -> None: self.ds["x"] = ("x", np.array(list("abcdefghij"))) da = self.ds["foo"] # assignment # typing issue: see https://github.com/python/mypy/issues/2410 da.loc["a":"j"] = 0 # type: ignore[misc] assert np.all(da.values == 0) da.loc[{"x": slice("a", "j")}] = 2 assert np.all(da.values == 2) da.loc[{"x": slice("a", "j")}] = 2 assert np.all(da.values == 2) # Multi dimensional case da = DataArray(np.arange(12).reshape(3, 4), dims=["x", "y"]) da.loc[0, 0] = 0 assert da.values[0, 0] == 0 assert da.values[0, 1] != 0 da = DataArray(np.arange(12).reshape(3, 4), dims=["x", "y"]) da.loc[0] = 0 assert np.all(da.values[0] == np.zeros(4)) assert da.values[1, 0] != 0 def test_loc_assign_dataarray(self) -> None: def get_data(): return DataArray( np.ones((4, 3, 2)), dims=["x", "y", "z"], coords={ "x": np.arange(4), "y": ["a", "b", "c"], "non-dim": ("x", [1, 3, 4, 2]), }, ) da = get_data() # indexer with inconsistent coordinates. ind = DataArray(np.arange(1, 4), dims=["y"], coords={"y": np.random.randn(3)}) with pytest.raises(IndexError, match=r"dimension coordinate 'y'"): da.loc[dict(x=ind)] = 0 # indexer with consistent coordinates. ind = DataArray(np.arange(1, 4), dims=["x"], coords={"x": np.arange(1, 4)}) da.loc[dict(x=ind)] = 0 # should not raise assert np.allclose(da[dict(x=ind)].values, 0) assert_identical(da["x"], get_data()["x"]) assert_identical(da["non-dim"], get_data()["non-dim"]) da = get_data() # conflict in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [0, 1, 2], "non-dim": ("x", [0, 2, 4])}, ) with pytest.raises(IndexError, match=r"dimension coordinate 'x'"): da.loc[dict(x=ind)] = value # consistent coordinate in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])}, ) da.loc[dict(x=ind)] = value assert np.allclose(da[dict(x=ind)].values, 0) assert_identical(da["x"], get_data()["x"]) assert_identical(da["non-dim"], get_data()["non-dim"]) def test_loc_single_boolean(self) -> None: data = DataArray([0, 1], coords=[[True, False]]) assert data.loc[True] == 0 assert data.loc[False] == 1 def test_loc_dim_name_collision_with_sel_params(self) -> None: da = xr.DataArray( [[0, 0], [1, 1]], dims=["dim1", "method"], coords={"dim1": ["x", "y"], "method": ["a", "b"]}, ) np.testing.assert_array_equal( da.loc[dict(dim1=["x", "y"], method=["a"])], [[0], [1]] ) def test_selection_multiindex(self) -> None: mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three") ) mdata = DataArray(range(8), [("x", mindex)]) def test_sel( lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None ) -> None: da = mdata.sel(x=lab_indexer) expected_da = mdata.isel(x=pos_indexer) if not replaced_idx: assert_identical(da, expected_da) else: if renamed_dim: assert da.dims[0] == renamed_dim da = da.rename({renamed_dim: "x"}) assert_identical(da.variable, expected_da.variable) assert not da["x"].equals(expected_da["x"]) test_sel(("a", 1, -1), 0) test_sel(("b", 2, -2), -1) test_sel(("a", 1), [0, 1], replaced_idx=True, renamed_dim="three") test_sel(("a",), range(4), replaced_idx=True) test_sel("a", range(4), replaced_idx=True) test_sel([("a", 1, -1), ("b", 2, -2)], [0, 7]) test_sel(slice("a", "b"), range(8)) test_sel(slice(("a", 1), ("b", 1)), range(6)) test_sel({"one": "a", "two": 1, "three": -1}, 0) test_sel({"one": "a", "two": 1}, [0, 1], replaced_idx=True, renamed_dim="three") test_sel({"one": "a"}, range(4), replaced_idx=True) assert_identical(mdata.loc["a"], mdata.sel(x="a")) assert_identical(mdata.loc[("a", 1), ...], mdata.sel(x=("a", 1))) assert_identical(mdata.loc[{"one": "a"}, ...], mdata.sel(x={"one": "a"})) with pytest.raises(IndexError): mdata.loc[("a", 1)] assert_identical(mdata.sel(x={"one": "a", "two": 1}), mdata.sel(one="a", two=1)) def test_selection_multiindex_remove_unused(self) -> None: # GH2619. For MultiIndex, we need to call remove_unused. ds = xr.DataArray( np.arange(40).reshape(8, 5), dims=["x", "y"], coords={"x": np.arange(8), "y": np.arange(5)}, ) ds = ds.stack(xy=["x", "y"]) ds_isel = ds.isel(xy=ds["x"] < 4) with pytest.raises(KeyError): ds_isel.sel(x=5) actual = ds_isel.unstack() expected = ds.reset_index("xy").isel(xy=ds["x"] < 4) expected = expected.set_index(xy=["x", "y"]).unstack() assert_identical(expected, actual) def test_selection_multiindex_from_level(self) -> None: # GH: 3512 da = DataArray([0, 1], dims=["x"], coords={"x": [0, 1], "y": "a"}) db = DataArray([2, 3], dims=["x"], coords={"x": [0, 1], "y": "b"}) data = xr.concat( [da, db], dim="x", coords="different", compat="equals" ).set_index(xy=["x", "y"]) assert data.dims == ("xy",) actual = data.sel(y="a") expected = data.isel(xy=[0, 1]).unstack("xy").squeeze("y") assert_equal(actual, expected) def test_concat_with_default_coords_warns(self) -> None: da = DataArray([0, 1], dims=["x"], coords={"x": [0, 1], "y": "a"}) db = DataArray([2, 3], dims=["x"], coords={"x": [0, 1], "y": "b"}) with pytest.warns(FutureWarning): original = xr.concat([da, db], dim="x") assert original.y.size == 4 with set_options(use_new_combine_kwarg_defaults=True): # default compat="override" will pick the first one new = xr.concat([da, db], dim="x") assert new.y.size == 1 def test_virtual_default_coords(self) -> None: array = DataArray(np.zeros((5,)), dims="x") expected = DataArray(range(5), dims="x", name="x") assert_identical(expected, array["x"]) assert_identical(expected, array.coords["x"]) def test_virtual_time_components(self) -> None: dates = pd.date_range("2000-01-01", periods=10) da = DataArray(np.arange(1, 11), [("time", dates)]) assert_array_equal(da["time.dayofyear"], da.values) assert_array_equal(da.coords["time.dayofyear"], da.values) def test_coords(self) -> None: # use int64 to ensure repr() consistency on windows coords = [ IndexVariable("x", np.array([-1, -2], "int64")), IndexVariable("y", np.array([0, 1, 2], "int64")), ] da = DataArray(np.random.randn(2, 3), coords, name="foo") # len assert len(da.coords) == 2 # iter assert list(da.coords) == ["x", "y"] assert coords[0].identical(da.coords["x"]) assert coords[1].identical(da.coords["y"]) assert "x" in da.coords assert 0 not in da.coords assert "foo" not in da.coords with pytest.raises(KeyError): da.coords[0] with pytest.raises(KeyError): da.coords["foo"] # repr expected_repr = dedent( """\ Coordinates: * x (x) int64 16B -1 -2 * y (y) int64 24B 0 1 2""" ) actual = repr(da.coords) assert expected_repr == actual # dtypes assert da.coords.dtypes == {"x": np.dtype("int64"), "y": np.dtype("int64")} del da.coords["x"] da._indexes = filter_indexes_from_coords(da.xindexes, set(da.coords)) expected = DataArray(da.values, {"y": [0, 1, 2]}, dims=["x", "y"], name="foo") assert_identical(da, expected) with pytest.raises( ValueError, match=r"cannot drop or update coordinate.*corrupt.*index " ): self.mda["level_1"] = ("x", np.arange(4)) self.mda.coords["level_1"] = ("x", np.arange(4)) def test_coords_to_index(self) -> None: da = DataArray(np.zeros((2, 3)), [("x", [1, 2]), ("y", list("abc"))]) with pytest.raises(ValueError, match=r"no valid index"): da[0, 0].coords.to_index() expected = pd.Index(["a", "b", "c"], name="y") actual = da[0].coords.to_index() assert expected.equals(actual) expected = pd.MultiIndex.from_product( [[1, 2], ["a", "b", "c"]], names=["x", "y"] ) actual = da.coords.to_index() assert expected.equals(actual) expected = pd.MultiIndex.from_product( [["a", "b", "c"], [1, 2]], names=["y", "x"] ) actual = da.coords.to_index(["y", "x"]) assert expected.equals(actual) with pytest.raises(ValueError, match=r"ordered_dims must match"): da.coords.to_index(["x"]) def test_coord_coords(self) -> None: orig = DataArray( [10, 20], {"x": [1, 2], "x2": ("x", ["a", "b"]), "z": 4}, dims="x" ) actual = orig.coords["x"] expected = DataArray( [1, 2], {"z": 4, "x2": ("x", ["a", "b"]), "x": [1, 2]}, dims="x", name="x" ) assert_identical(expected, actual) del actual.coords["x2"] assert_identical(expected.reset_coords("x2", drop=True), actual) actual.coords["x3"] = ("x", ["a", "b"]) expected = DataArray( [1, 2], {"z": 4, "x3": ("x", ["a", "b"]), "x": [1, 2]}, dims="x", name="x" ) assert_identical(expected, actual) def test_reset_coords(self) -> None: data = DataArray( np.zeros((3, 4)), {"bar": ("x", ["a", "b", "c"]), "baz": ("y", range(4)), "y": range(4)}, dims=["x", "y"], name="foo", ) actual1 = data.reset_coords() expected1 = Dataset( { "foo": (["x", "y"], np.zeros((3, 4))), "bar": ("x", ["a", "b", "c"]), "baz": ("y", range(4)), "y": range(4), } ) assert_identical(actual1, expected1) actual2 = data.reset_coords(["bar", "baz"]) assert_identical(actual2, expected1) actual3 = data.reset_coords("bar") expected3 = Dataset( {"foo": (["x", "y"], np.zeros((3, 4))), "bar": ("x", ["a", "b", "c"])}, {"baz": ("y", range(4)), "y": range(4)}, ) assert_identical(actual3, expected3) actual4 = data.reset_coords(["bar"]) assert_identical(actual4, expected3) actual5 = data.reset_coords(drop=True) expected5 = DataArray( np.zeros((3, 4)), coords={"y": range(4)}, dims=["x", "y"], name="foo" ) assert_identical(actual5, expected5) actual6 = data.copy().reset_coords(drop=True) assert_identical(actual6, expected5) actual7 = data.reset_coords("bar", drop=True) expected7 = DataArray( np.zeros((3, 4)), {"baz": ("y", range(4)), "y": range(4)}, dims=["x", "y"], name="foo", ) assert_identical(actual7, expected7) with pytest.raises(ValueError, match=r"cannot be found"): data.reset_coords("foo", drop=True) with pytest.raises(ValueError, match=r"cannot be found"): data.reset_coords("not_found") with pytest.raises(ValueError, match=r"cannot remove index"): data.reset_coords("y") # non-dimension index coordinate midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=("lvl1", "lvl2")) data = DataArray([1, 2, 3, 4], coords={"x": midx}, dims="x", name="foo") with pytest.raises(ValueError, match=r"cannot remove index"): data.reset_coords("lvl1") def test_assign_coords(self) -> None: array = DataArray(10) actual = array.assign_coords(c=42) expected = DataArray(10, {"c": 42}) assert_identical(actual, expected) with pytest.raises( ValueError, match=r"cannot drop or update coordinate.*corrupt.*index " ): self.mda.assign_coords(level_1=("x", range(4))) # GH: 2112 da = xr.DataArray([0, 1, 2], dims="x") with pytest.raises(CoordinateValidationError): da["x"] = [0, 1, 2, 3] # size conflict with pytest.raises(CoordinateValidationError): da.coords["x"] = [0, 1, 2, 3] # size conflict with pytest.raises(CoordinateValidationError): da.coords["x"] = ("y", [1, 2, 3]) # no new dimension to a DataArray def test_assign_coords_existing_multiindex(self) -> None: data = self.mda with pytest.warns( FutureWarning, match=r"updating coordinate.*MultiIndex.*inconsistent" ): data.assign_coords(x=range(4)) def test_assign_coords_custom_index(self) -> None: class CustomIndex(Index): pass coords = Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()} ) da = xr.DataArray([0, 1, 2], dims="x") actual = da.assign_coords(coords) assert isinstance(actual.xindexes["x"], CustomIndex) def test_assign_coords_no_default_index(self) -> None: coords = Coordinates({"y": [1, 2, 3]}, indexes={}) da = DataArray([1, 2, 3], dims="y") actual = da.assign_coords(coords) assert_identical(actual.coords, coords, check_default_indexes=False) assert "y" not in actual.xindexes def test_assign_coords_extra_dim_index_coord(self) -> None: class AnyIndex(Index): def should_add_coord_to_array(self, name, var, dims): return True idx = AnyIndex() coords = Coordinates( coords={ "x": ("x", [1, 2]), "x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]), }, indexes={"x": idx, "x_bounds": idx}, ) da = DataArray([1.0, 2.0], dims="x") actual = da.assign_coords(coords) expected = DataArray([1.0, 2.0], coords=coords, dims="x") assert_identical(actual, expected, check_default_indexes=False) assert "x_bnds" not in actual.dims def test_assign_coords_uses_base_variable_class(self) -> None: a = DataArray([0, 1, 3], dims=["x"], coords={"x": [0, 1, 2]}) a = a.assign_coords(foo=a.x) # explicit check assert isinstance(a["x"].variable, IndexVariable) assert not isinstance(a["foo"].variable, IndexVariable) # test internal invariant checks when comparing the datasets expected = DataArray( [0, 1, 3], dims=["x"], coords={"x": [0, 1, 2], "foo": ("x", [0, 1, 2])} ) assert_identical(a, expected) def test_coords_alignment(self) -> None: lhs = DataArray([1, 2, 3], [("x", [0, 1, 2])]) rhs = DataArray([2, 3, 4], [("x", [1, 2, 3])]) lhs.coords["rhs"] = rhs expected = DataArray( [1, 2, 3], coords={"rhs": ("x", [np.nan, 2, 3]), "x": [0, 1, 2]}, dims="x" ) assert_identical(lhs, expected) def test_set_coords_update_index(self) -> None: actual = DataArray([1, 2, 3], [("x", [1, 2, 3])]) actual.coords["x"] = ["a", "b", "c"] assert actual.xindexes["x"].to_pandas_index().equals(pd.Index(["a", "b", "c"])) def test_set_coords_multiindex_level(self) -> None: with pytest.raises( ValueError, match=r"cannot drop or update coordinate.*corrupt.*index " ): self.mda["level_1"] = range(4) def test_coords_replacement_alignment(self) -> None: # regression test for GH725 arr = DataArray([0, 1, 2], dims=["abc"]) new_coord = DataArray([1, 2, 3], dims=["abc"], coords=[[1, 2, 3]]) arr["abc"] = new_coord expected = DataArray([0, 1, 2], coords=[("abc", [1, 2, 3])]) assert_identical(arr, expected) def test_coords_non_string(self) -> None: arr = DataArray(0, coords={1: 2}) actual = arr.coords[1] expected = DataArray(2, coords={1: 2}, name=1) assert_identical(actual, expected) def test_coords_delitem_delete_indexes(self) -> None: # regression test for GH3746 arr = DataArray(np.ones((2,)), dims="x", coords={"x": [0, 1]}) del arr.coords["x"] assert "x" not in arr.xindexes def test_coords_delitem_multiindex_level(self) -> None: with pytest.raises( ValueError, match=r"cannot remove coordinate.*corrupt.*index " ): del self.mda.coords["level_1"] def test_broadcast_like(self) -> None: arr1 = DataArray( np.ones((2, 3)), dims=["x", "y"], coords={"x": ["a", "b"], "y": ["a", "b", "c"]}, ) arr2 = DataArray( np.ones((3, 2)), dims=["x", "y"], coords={"x": ["a", "b", "c"], "y": ["a", "b"]}, ) orig1, orig2 = broadcast(arr1, arr2) new1 = arr1.broadcast_like(arr2) new2 = arr2.broadcast_like(arr1) assert_identical(orig1, new1) assert_identical(orig2, new2) orig3 = DataArray(np.random.randn(5), [("x", range(5))]) orig4 = DataArray(np.random.randn(6), [("y", range(6))]) new3, new4 = broadcast(orig3, orig4) assert_identical(orig3.broadcast_like(orig4), new3.transpose("y", "x")) assert_identical(orig4.broadcast_like(orig3), new4) def test_reindex_like(self) -> None: foo = DataArray(np.random.randn(5, 6), [("x", range(5)), ("y", range(6))]) bar = foo[:2, :2] assert_identical(foo.reindex_like(bar), bar) expected = foo.copy() expected[:] = np.nan expected[:2, :2] = bar assert_identical(bar.reindex_like(foo), expected) def test_reindex_like_no_index(self) -> None: foo = DataArray(np.random.randn(5, 6), dims=["x", "y"]) assert_identical(foo, foo.reindex_like(foo)) bar = foo[:4] with pytest.raises(ValueError, match=r"different size for unlabeled"): foo.reindex_like(bar) def test_reindex_regressions(self) -> None: da = DataArray(np.random.randn(5), coords=[("time", range(5))]) time2 = DataArray(np.arange(5), dims="time2") with pytest.raises(ValueError): da.reindex(time=time2) # regression test for #736, reindex can not change complex nums dtype xnp = np.array([1, 2, 3], dtype=complex) x = DataArray(xnp, coords=[[0.1, 0.2, 0.3]]) y = DataArray([2, 5, 6, 7, 8], coords=[[-1.1, 0.21, 0.31, 0.41, 0.51]]) re_dtype = x.reindex_like(y, method="pad").dtype assert x.dtype == re_dtype def test_reindex_method(self) -> None: x = DataArray([10, 20], dims="y", coords={"y": [0, 1]}) y = [-0.1, 0.5, 1.1] actual = x.reindex(y=y, method="backfill", tolerance=0.2) expected = DataArray([10, np.nan, np.nan], coords=[("y", y)]) assert_identical(expected, actual) actual = x.reindex(y=y, method="backfill", tolerance=[0.1, 0.1, 0.01]) expected = DataArray([10, np.nan, np.nan], coords=[("y", y)]) assert_identical(expected, actual) alt = Dataset({"y": y}) actual = x.reindex_like(alt, method="backfill") expected = DataArray([10, 20, np.nan], coords=[("y", y)]) assert_identical(expected, actual) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {None: 2, "u": 1}]) def test_reindex_fill_value(self, fill_value) -> None: x = DataArray([10, 20], dims="y", coords={"y": [0, 1], "u": ("y", [1, 2])}) y = [0, 1, 2] if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_var = fill_value_u = np.nan elif isinstance(fill_value, dict): fill_value_var = fill_value[None] fill_value_u = fill_value["u"] else: fill_value_var = fill_value_u = fill_value actual = x.reindex(y=y, fill_value=fill_value) expected = DataArray( [10, 20, fill_value_var], dims="y", coords={"y": y, "u": ("y", [1, 2, fill_value_u])}, ) assert_identical(expected, actual) @pytest.mark.parametrize("dtype", [str, bytes]) def test_reindex_str_dtype(self, dtype) -> None: data = DataArray( [1, 2], dims="x", coords={"x": np.array(["a", "b"], dtype=dtype)} ) actual = data.reindex(x=data.x) expected = data assert_identical(expected, actual) assert actual.dtype == expected.dtype def test_reindex_empty_array_dtype(self) -> None: # Dtype of reindex result should match dtype of the original DataArray. # See GH issue #7299 x = xr.DataArray([], dims=("x",), coords={"x": []}).astype("float32") y = x.reindex(x=[1.0, 2.0]) assert x.dtype == y.dtype, ( "Dtype of reindexed DataArray should match dtype of the original DataArray" ) assert y.dtype == np.float32, ( "Dtype of reindexed DataArray should remain float32" ) @pytest.mark.parametrize( "extension_array", [ pytest.param(pd.Categorical(["a", "b", "c"]), id="categorical"), ] + ( [ pytest.param( pd.array([1, 2, 3], dtype="int64[pyarrow]"), id="int64[pyarrow]", ) ] if has_pyarrow else [] ), ) def test_reindex_extension_array(self, extension_array) -> None: srs = pd.Series(index=["e", "f", "g"], data=extension_array) x = srs.to_xarray() y = x.reindex(index=["f", "g", "z"]) assert_array_equal(x, extension_array) pd.testing.assert_extension_array_equal( y.data, extension_array._from_sequence( [extension_array[1], extension_array[2], pd.NA], dtype=extension_array.dtype, ), ) assert x.dtype == y.dtype == extension_array.dtype @pytest.mark.parametrize( "fill_value,extension_array", [ pytest.param("a", pd.Categorical([pd.NA, "a", "b"]), id="categorical"), ] + ( [ pytest.param( 0, pd.array([pd.NA, 1, 1], dtype="int64[pyarrow]"), id="int64[pyarrow]", ) ] if has_pyarrow else [] ), ) def test_fillna_extension_array(self, fill_value, extension_array) -> None: srs: pd.Series = pd.Series(index=np.array([1, 2, 3]), data=extension_array) da = srs.to_xarray() filled = da.fillna(fill_value) assert filled.dtype == srs.dtype assert (filled.values == np.array([fill_value, *(srs.values[1:])])).all() @requires_pyarrow def test_fillna_extension_array_bad_val(self) -> None: srs: pd.Series = pd.Series( index=np.array([1, 2, 3]), data=pd.array([pd.NA, 1, 1], dtype="int64[pyarrow]"), ) da = srs.to_xarray() with pytest.raises(ValueError): da.fillna("a") @pytest.mark.parametrize( "extension_array", [ pytest.param(pd.Categorical([pd.NA, "a", "b"]), id="categorical"), ] + ( [ pytest.param( pd.array([pd.NA, 1, 1], dtype="int64[pyarrow]"), id="int64[pyarrow]" ) ] if has_pyarrow else [] ), ) def test_dropna_extension_array(self, extension_array) -> None: srs: pd.Series = pd.Series(index=np.array([1, 2, 3]), data=extension_array) da = srs.to_xarray() filled = da.dropna("index") assert filled.dtype == srs.dtype assert (filled.values == srs.values[1:]).all() def test_rename(self) -> None: da = xr.DataArray( [1, 2, 3], dims="dim", name="name", coords={"coord": ("dim", [5, 6, 7])} ) # change name renamed_name = da.rename("name_new") assert renamed_name.name == "name_new" expected_name = da.copy() expected_name.name = "name_new" assert_identical(renamed_name, expected_name) # change name to None? renamed_noname = da.rename(None) assert renamed_noname.name is None expected_noname = da.copy() expected_noname.name = None assert_identical(renamed_noname, expected_noname) renamed_noname = da.rename() assert renamed_noname.name is None assert_identical(renamed_noname, expected_noname) # change dim renamed_dim = da.rename({"dim": "dim_new"}) assert renamed_dim.dims == ("dim_new",) expected_dim = xr.DataArray( [1, 2, 3], dims="dim_new", name="name", coords={"coord": ("dim_new", [5, 6, 7])}, ) assert_identical(renamed_dim, expected_dim) # change dim with kwargs renamed_dimkw = da.rename(dim="dim_new") assert renamed_dimkw.dims == ("dim_new",) assert_identical(renamed_dimkw, expected_dim) # change coords renamed_coord = da.rename({"coord": "coord_new"}) assert "coord_new" in renamed_coord.coords expected_coord = xr.DataArray( [1, 2, 3], dims="dim", name="name", coords={"coord_new": ("dim", [5, 6, 7])} ) assert_identical(renamed_coord, expected_coord) # change coords with kwargs renamed_coordkw = da.rename(coord="coord_new") assert "coord_new" in renamed_coordkw.coords assert_identical(renamed_coordkw, expected_coord) # change coord and dim renamed_both = da.rename({"dim": "dim_new", "coord": "coord_new"}) assert renamed_both.dims == ("dim_new",) assert "coord_new" in renamed_both.coords expected_both = xr.DataArray( [1, 2, 3], dims="dim_new", name="name", coords={"coord_new": ("dim_new", [5, 6, 7])}, ) assert_identical(renamed_both, expected_both) # change coord and dim with kwargs renamed_bothkw = da.rename(dim="dim_new", coord="coord_new") assert renamed_bothkw.dims == ("dim_new",) assert "coord_new" in renamed_bothkw.coords assert_identical(renamed_bothkw, expected_both) # change all renamed_all = da.rename("name_new", dim="dim_new", coord="coord_new") assert renamed_all.name == "name_new" assert renamed_all.dims == ("dim_new",) assert "coord_new" in renamed_all.coords expected_all = xr.DataArray( [1, 2, 3], dims="dim_new", name="name_new", coords={"coord_new": ("dim_new", [5, 6, 7])}, ) assert_identical(renamed_all, expected_all) def test_rename_dimension_coord_warnings(self) -> None: # create a dimension coordinate by renaming a dimension or coordinate # should raise a warning (no index created) da = DataArray([0, 0], coords={"x": ("y", [0, 1])}, dims="y") with pytest.warns( UserWarning, match=r"rename 'x' to 'y' does not create an index.*" ): da.rename(x="y") da = xr.DataArray([0, 0], coords={"y": ("x", [0, 1])}, dims="x") with pytest.warns( UserWarning, match=r"rename 'x' to 'y' does not create an index.*" ): da.rename(x="y") # No operation should not raise a warning da = xr.DataArray( data=np.ones((2, 3)), dims=["x", "y"], coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])}, ) with warnings.catch_warnings(): warnings.simplefilter("error") da.rename(x="x") def test_replace(self) -> None: # Tests the `attrs` replacement and whether it interferes with a # `variable` replacement da = self.mda attrs1 = {"a1": "val1", "a2": 161} x = np.ones((10, 20)) v = Variable(["x", "y"], x) assert da._replace(variable=v, attrs=attrs1).attrs == attrs1 attrs2 = {"b1": "val2", "b2": 1312} va = Variable(["x", "y"], x, attrs2) # assuming passed `attrs` should prevail assert da._replace(variable=va, attrs=attrs1).attrs == attrs1 # assuming `va.attrs` should be adopted assert da._replace(variable=va).attrs == attrs2 def test_init_value(self) -> None: expected = DataArray( np.full((3, 4), 3), dims=["x", "y"], coords=[range(3), range(4)] ) actual = DataArray(3, dims=["x", "y"], coords=[range(3), range(4)]) assert_identical(expected, actual) expected = DataArray( np.full((1, 10, 2), 0), dims=["w", "x", "y"], coords={"x": np.arange(10), "y": ["north", "south"]}, ) actual = DataArray(0, dims=expected.dims, coords=expected.coords) assert_identical(expected, actual) expected = DataArray( np.full((10, 2), np.nan), coords=[("x", np.arange(10)), ("y", ["a", "b"])] ) actual = DataArray(coords=[("x", np.arange(10)), ("y", ["a", "b"])]) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"different number of dim"): DataArray(np.array(1), coords={"x": np.arange(10)}, dims=["x"]) with pytest.raises(ValueError, match=r"does not match the 0 dim"): DataArray(np.array(1), coords=[("x", np.arange(10))]) def test_swap_dims(self) -> None: array = DataArray(np.random.randn(3), {"x": list("abc")}, "x") expected = DataArray(array.values, {"x": ("y", list("abc"))}, dims="y") actual = array.swap_dims({"x": "y"}) assert_identical(expected, actual) for dim_name in set().union(expected.xindexes.keys(), actual.xindexes.keys()): assert actual.xindexes[dim_name].equals(expected.xindexes[dim_name]) # as kwargs array = DataArray(np.random.randn(3), {"x": list("abc")}, "x") expected = DataArray(array.values, {"x": ("y", list("abc"))}, dims="y") actual = array.swap_dims(x="y") assert_identical(expected, actual) for dim_name in set().union(expected.xindexes.keys(), actual.xindexes.keys()): assert actual.xindexes[dim_name].equals(expected.xindexes[dim_name]) # multiindex case idx = pd.MultiIndex.from_arrays([list("aab"), list("yzz")], names=["y1", "y2"]) array = DataArray(np.random.randn(3), {"y": ("x", idx)}, "x") expected = DataArray(array.values, {"y": idx}, "y") actual = array.swap_dims({"x": "y"}) assert_identical(expected, actual) for dim_name in set().union(expected.xindexes.keys(), actual.xindexes.keys()): assert actual.xindexes[dim_name].equals(expected.xindexes[dim_name]) def test_expand_dims_error(self) -> None: array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) with pytest.raises(TypeError, match=r"dim should be Hashable or"): array.expand_dims(0) with pytest.raises(ValueError, match=r"lengths of dim and axis"): # dims and axis argument should be the same length array.expand_dims(dim=["a", "b"], axis=[1, 2, 3]) with pytest.raises(ValueError, match=r"Dimension x already"): # Should not pass the already existing dimension. array.expand_dims(dim=["x"]) # raise if duplicate with pytest.raises(ValueError, match=r"duplicate values"): array.expand_dims(dim=["y", "y"]) with pytest.raises(ValueError, match=r"duplicate values"): array.expand_dims(dim=["y", "z"], axis=[1, 1]) with pytest.raises(ValueError, match=r"duplicate values"): array.expand_dims(dim=["y", "z"], axis=[2, -2]) # out of bounds error, axis must be in [-4, 3] with pytest.raises(IndexError): array.expand_dims(dim=["y", "z"], axis=[2, 4]) with pytest.raises(IndexError): array.expand_dims(dim=["y", "z"], axis=[2, -5]) # Does not raise an IndexError array.expand_dims(dim=["y", "z"], axis=[2, -4]) array.expand_dims(dim=["y", "z"], axis=[2, 3]) array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) with pytest.raises(TypeError): array.expand_dims({"new_dim": 3.2}) # Attempt to use both dim and kwargs with pytest.raises(ValueError): array.expand_dims({"d": 4}, e=4) def test_expand_dims(self) -> None: array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) # pass only dim label actual = array.expand_dims(dim="y") expected = DataArray( np.expand_dims(array.values, 0), dims=["y", "x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) roundtripped = actual.squeeze("y", drop=True) assert_identical(array, roundtripped) # pass multiple dims actual = array.expand_dims(dim=["y", "z"]) expected = DataArray( np.expand_dims(np.expand_dims(array.values, 0), 0), dims=["y", "z", "x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) roundtripped = actual.squeeze(["y", "z"], drop=True) assert_identical(array, roundtripped) # pass multiple dims and axis. Axis is out of order actual = array.expand_dims(dim=["z", "y"], axis=[2, 1]) expected = DataArray( np.expand_dims(np.expand_dims(array.values, 1), 2), dims=["x", "y", "z", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) # make sure the attrs are tracked assert actual.attrs["key"] == "entry" roundtripped = actual.squeeze(["z", "y"], drop=True) assert_identical(array, roundtripped) # Negative axis and they are out of order actual = array.expand_dims(dim=["y", "z"], axis=[-1, -2]) expected = DataArray( np.expand_dims(np.expand_dims(array.values, -1), -1), dims=["x", "dim_0", "z", "y"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) assert actual.attrs["key"] == "entry" roundtripped = actual.squeeze(["y", "z"], drop=True) assert_identical(array, roundtripped) def test_expand_dims_with_scalar_coordinate(self) -> None: array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3), "z": 1.0}, attrs={"key": "entry"}, ) actual = array.expand_dims(dim="z") expected = DataArray( np.expand_dims(array.values, 0), dims=["z", "x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3), "z": np.ones(1)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) roundtripped = actual.squeeze(["z"], drop=False) assert_identical(array, roundtripped) def test_expand_dims_with_greater_dim_size(self) -> None: array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3), "z": 1.0}, attrs={"key": "entry"}, ) actual = array.expand_dims({"y": 2, "z": 1, "dim_1": ["a", "b", "c"]}) expected_coords = { "y": [0, 1], "z": [1.0], "dim_1": ["a", "b", "c"], "x": np.linspace(0, 1, 3), "dim_0": range(4), } expected = DataArray( array.values * np.ones([2, 1, 3, 3, 4]), coords=expected_coords, dims=list(expected_coords.keys()), attrs={"key": "entry"}, ).drop_vars(["y", "dim_0"]) assert_identical(expected, actual) # Test with kwargs instead of passing dict to dim arg. other_way = array.expand_dims(dim_1=["a", "b", "c"]) other_way_expected = DataArray( array.values * np.ones([3, 3, 4]), coords={ "dim_1": ["a", "b", "c"], "x": np.linspace(0, 1, 3), "dim_0": range(4), "z": 1.0, }, dims=["dim_1", "x", "dim_0"], attrs={"key": "entry"}, ).drop_vars("dim_0") assert_identical(other_way_expected, other_way) def test_set_index(self) -> None: indexes = [self.mindex.get_level_values(n) for n in self.mindex.names] # type: ignore[arg-type,unused-ignore] # pandas-stubs varies coords = {idx.name: ("x", idx) for idx in indexes} array = DataArray(self.mda.values, coords=coords, dims="x") expected = self.mda.copy() level_3 = ("x", [1, 2, 3, 4]) array["level_3"] = level_3 expected["level_3"] = level_3 obj = array.set_index(x=self.mindex.names) assert_identical(obj, expected) obj = obj.set_index(x="level_3", append=True) expected = array.set_index(x=["level_1", "level_2", "level_3"]) assert_identical(obj, expected) array = array.set_index(x=["level_1", "level_2", "level_3"]) assert_identical(array, expected) array2d = DataArray( np.random.rand(2, 2), coords={"x": ("x", [0, 1]), "level": ("y", [1, 2])}, dims=("x", "y"), ) with pytest.raises(ValueError, match=r"dimension mismatch"): array2d.set_index(x="level") # Issue 3176: Ensure clear error message on key error. with pytest.raises(ValueError, match=r".*variable\(s\) do not exist"): obj.set_index(x="level_4") def test_reset_index(self) -> None: indexes = [self.mindex.get_level_values(n) for n in self.mindex.names] # type: ignore[arg-type,unused-ignore] # pandas-stubs varies coords = {idx.name: ("x", idx) for idx in indexes} expected = DataArray(self.mda.values, coords=coords, dims="x") obj = self.mda.reset_index("x") assert_identical(obj, expected, check_default_indexes=False) assert len(obj.xindexes) == 0 obj = self.mda.reset_index(self.mindex.names) assert_identical(obj, expected, check_default_indexes=False) assert len(obj.xindexes) == 0 obj = self.mda.reset_index(["x", "level_1"]) assert_identical(obj, expected, check_default_indexes=False) assert len(obj.xindexes) == 0 coords = { "x": ("x", self.mindex.droplevel("level_1")), "level_1": ("x", self.mindex.get_level_values("level_1")), } expected = DataArray(self.mda.values, coords=coords, dims="x") obj = self.mda.reset_index(["level_1"]) assert_identical(obj, expected, check_default_indexes=False) assert list(obj.xindexes) == ["x"] assert type(obj.xindexes["x"]) is PandasIndex expected = DataArray(self.mda.values, dims="x") obj = self.mda.reset_index("x", drop=True) assert_identical(obj, expected, check_default_indexes=False) array = self.mda.copy() array = array.reset_index(["x"], drop=True) assert_identical(array, expected, check_default_indexes=False) # single index array = DataArray([1, 2], coords={"x": ["a", "b"]}, dims="x") obj = array.reset_index("x") print(obj.x.variable) print(array.x.variable) assert_equal(obj.x.variable, array.x.variable.to_base_variable()) assert len(obj.xindexes) == 0 def test_reset_index_keep_attrs(self) -> None: coord_1 = DataArray([1, 2], dims=["coord_1"], attrs={"attrs": True}) da = DataArray([1, 0], [coord_1]) obj = da.reset_index("coord_1") assert obj.coord_1.attrs == da.coord_1.attrs assert len(obj.xindexes) == 0 def test_reorder_levels(self) -> None: midx = self.mindex.reorder_levels(["level_2", "level_1"]) expected = DataArray(self.mda.values, coords={"x": midx}, dims="x") obj = self.mda.reorder_levels(x=["level_2", "level_1"]) assert_identical(obj, expected) array = DataArray([1, 2], dims="x") with pytest.raises(KeyError): array.reorder_levels(x=["level_1", "level_2"]) array["x"] = [0, 1] with pytest.raises(ValueError, match=r"has no MultiIndex"): array.reorder_levels(x=["level_1", "level_2"]) def test_set_xindex(self) -> None: da = DataArray( [1, 2, 3, 4], coords={"foo": ("x", ["a", "a", "b", "b"])}, dims="x" ) class IndexWithOptions(Index): def __init__(self, opt): self.opt = opt @classmethod def from_variables(cls, variables, options): return cls(options["opt"]) indexed = da.set_xindex("foo", IndexWithOptions, opt=1) assert "foo" in indexed.xindexes assert indexed.xindexes["foo"].opt == 1 # type: ignore[attr-defined] def test_dataset_getitem(self) -> None: dv = self.ds["foo"] assert_identical(dv, self.dv) def test_array_interface(self) -> None: assert_array_equal(np.asarray(self.dv), self.x) # test patched in methods assert_array_equal(self.dv.astype(float), self.v.astype(float)) assert_array_equal(self.dv.argsort(), self.v.argsort()) assert_array_equal(self.dv.clip(2, 3), self.v.clip(2, 3)) # test ufuncs expected = deepcopy(self.ds) expected["foo"][:] = np.sin(self.x) assert_equal(expected["foo"], np.sin(self.dv)) assert_array_equal(self.dv, np.maximum(self.v, self.dv)) bar = Variable(["x", "y"], np.zeros((10, 20))) assert_equal(self.dv, np.maximum(self.dv, bar)) def test_astype_attrs(self) -> None: # Split into two loops for mypy - Variable, DataArray, and Dataset # don't share a common base class, so mypy infers type object for v, # which doesn't have the attrs or astype methods for v in [self.mda.copy(), self.ds.copy()]: v.attrs["foo"] = "bar" assert v.attrs == v.astype(float).attrs assert not v.astype(float, keep_attrs=False).attrs # Test Variable separately to avoid mypy inferring object type va = self.va.copy() va.attrs["foo"] = "bar" assert va.attrs == va.astype(float).attrs assert not va.astype(float, keep_attrs=False).attrs def test_astype_dtype(self) -> None: original = DataArray([-1, 1, 2, 3, 1000]) converted = original.astype(float) assert_array_equal(original, converted) assert np.issubdtype(original.dtype, np.integer) assert np.issubdtype(converted.dtype, np.floating) def test_astype_order(self) -> None: original = DataArray([[1, 2], [3, 4]]) converted = original.astype("d", order="F") assert_equal(original, converted) assert original.values.flags["C_CONTIGUOUS"] assert converted.values.flags["F_CONTIGUOUS"] def test_astype_subok(self) -> None: class NdArraySubclass(np.ndarray): pass original = DataArray(NdArraySubclass(np.arange(3))) converted_not_subok = original.astype("d", subok=False) converted_subok = original.astype("d", subok=True) if not isinstance(original.data, NdArraySubclass): pytest.xfail("DataArray cannot be backed yet by a subclasses of np.ndarray") assert isinstance(converted_not_subok.data, np.ndarray) assert not isinstance(converted_not_subok.data, NdArraySubclass) assert isinstance(converted_subok.data, NdArraySubclass) def test_is_null(self) -> None: x = np.random.default_rng(42).random((5, 6)) x[x < 0] = np.nan original = DataArray(x, [-np.arange(5), np.arange(6)], ["x", "y"]) expected = DataArray(pd.isnull(x), [-np.arange(5), np.arange(6)], ["x", "y"]) assert_identical(expected, original.isnull()) assert_identical(~expected, original.notnull()) def test_math(self) -> None: x = self.x v = self.v a = self.dv # variable math was already tested extensively, so let's just make sure # that all types are properly converted here assert_equal(a, +a) assert_equal(a, a + 0) assert_equal(a, 0 + a) assert_equal(a, a + 0 * v) assert_equal(a, 0 * v + a) assert_equal(a, a + 0 * x) assert_equal(a, 0 * x + a) assert_equal(a, a + 0 * a) assert_equal(a, 0 * a + a) def test_math_automatic_alignment(self) -> None: a = DataArray(range(5), [("x", range(5))]) b = DataArray(range(5), [("x", range(1, 6))]) expected = DataArray(np.ones(4), [("x", [1, 2, 3, 4])]) assert_identical(a - b, expected) def test_non_overlapping_dataarrays_return_empty_result(self) -> None: a = DataArray(range(5), [("x", range(5))]) result = a.isel(x=slice(2)) + a.isel(x=slice(2, None)) assert len(result["x"]) == 0 def test_empty_dataarrays_return_empty_result(self) -> None: a = DataArray(data=[]) result = a * a assert len(result["dim_0"]) == 0 def test_inplace_math_basics(self) -> None: x = self.x a = self.dv v = a.variable b = a b += 1 assert b is a assert b.variable is v assert_array_equal(b.values, x) assert source_ndarray(b.values) is x def test_inplace_math_error(self) -> None: data = np.random.rand(4) times = np.arange(4) foo = DataArray(data, coords=[times], dims=["time"]) b = times.copy() with pytest.raises( TypeError, match=r"Values of an IndexVariable are immutable" ): foo.coords["time"] += 1 # Check error throwing prevented inplace operation assert_array_equal(foo.coords["time"], b) def test_inplace_math_automatic_alignment(self) -> None: a = DataArray(range(5), [("x", range(5))]) b = DataArray(range(1, 6), [("x", range(1, 6))]) with pytest.raises(xr.MergeError, match="Automatic alignment is not supported"): a += b with pytest.raises(xr.MergeError, match="Automatic alignment is not supported"): b += a def test_math_name(self) -> None: # Verify that name is preserved only when it can be done unambiguously. # The rule (copied from pandas.Series) is keep the current name only if # the other object has the same name or no name attribute and this # object isn't a coordinate; otherwise reset to None. a = self.dv assert (+a).name == "foo" assert (a + 0).name == "foo" assert (a + a.rename(None)).name is None assert (a + a.rename("bar")).name is None assert (a + a).name == "foo" assert (+a["x"]).name == "x" assert (a["x"] + 0).name == "x" assert (a + a["x"]).name is None def test_math_with_coords(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray(np.random.randn(2, 3), coords, dims=["x", "y"]) actual = orig + 1 expected = DataArray(orig.values + 1, orig.coords) assert_identical(expected, actual) actual = 1 + orig assert_identical(expected, actual) actual = orig + orig[0, 0] exp_coords = {k: v for k, v in coords.items() if k != "lat"} expected = DataArray( orig.values + orig.values[0, 0], exp_coords, dims=["x", "y"] ) assert_identical(expected, actual) actual = orig[0, 0] + orig assert_identical(expected, actual) actual = orig[0, 0] + orig[-1, -1] expected = DataArray(orig.values[0, 0] + orig.values[-1, -1], {"c": -999}) assert_identical(expected, actual) actual = orig[:, 0] + orig[0, :] exp_values = orig[:, 0].values[:, None] + orig[0, :].values[None, :] expected = DataArray(exp_values, exp_coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig[0, :] + orig[:, 0] assert_identical(expected.transpose(transpose_coords=True), actual) actual = orig - orig.transpose(transpose_coords=True) expected = DataArray(np.zeros((2, 3)), orig.coords) assert_identical(expected, actual) actual = orig.transpose(transpose_coords=True) - orig assert_identical(expected.transpose(transpose_coords=True), actual) alt = DataArray([1, 1], {"x": [-1, -2], "c": "foo", "d": 555}, "x") actual = orig + alt expected = orig + 1 expected.coords["d"] = 555 del expected.coords["c"] assert_identical(expected, actual) actual = alt + orig assert_identical(expected, actual) def test_index_math(self) -> None: orig = DataArray(range(3), dims="x", name="x") actual = orig + 1 expected = DataArray(1 + np.arange(3), dims="x", name="x") assert_identical(expected, actual) # regression tests for #254 actual = orig[0] < orig expected = DataArray([False, True, True], dims="x", name="x") assert_identical(expected, actual) actual = orig > orig[0] assert_identical(expected, actual) def test_dataset_math(self) -> None: # more comprehensive tests with multiple dataset variables obs = Dataset( {"tmin": ("x", np.arange(5)), "tmax": ("x", 10 + np.arange(5))}, {"x": ("x", 0.5 * np.arange(5)), "loc": ("x", range(-2, 3))}, ) actual1 = 2 * obs["tmax"] expected1 = DataArray(2 * (10 + np.arange(5)), obs.coords, name="tmax") assert_identical(actual1, expected1) actual2 = obs["tmax"] - obs["tmin"] expected2 = DataArray(10 * np.ones(5), obs.coords) assert_identical(actual2, expected2) sim = Dataset( { "tmin": ("x", 1 + np.arange(5)), "tmax": ("x", 11 + np.arange(5)), # does *not* include 'loc' as a coordinate "x": ("x", 0.5 * np.arange(5)), } ) actual3 = sim["tmin"] - obs["tmin"] expected3 = DataArray(np.ones(5), obs.coords, name="tmin") assert_identical(actual3, expected3) actual4 = -obs["tmin"] + sim["tmin"] assert_identical(actual4, expected3) actual5 = sim["tmin"].copy() actual5 -= obs["tmin"] assert_identical(actual5, expected3) actual6 = sim.copy() actual6["tmin"] = sim["tmin"] - obs["tmin"] expected6 = Dataset( {"tmin": ("x", np.ones(5)), "tmax": ("x", sim["tmax"].values)}, obs.coords ) assert_identical(actual6, expected6) actual7 = sim.copy() actual7["tmin"] -= obs["tmin"] assert_identical(actual7, expected6) def test_stack_unstack(self) -> None: orig = DataArray( [[0, 1], [2, 3]], dims=["x", "y"], attrs={"foo": 2}, ) assert_identical(orig, orig.unstack()) # test GH3000 a = orig[:0, :1].stack(new_dim=("x", "y")).indexes["new_dim"] b = pd.MultiIndex( levels=[ pd.Index([], dtype=np.int64), # type: ignore[list-item,unused-ignore] pd.Index([0], dtype=np.int64), # type: ignore[list-item,unused-ignore] ], codes=[[], []], names=["x", "y"], ) pd.testing.assert_index_equal(a, b) actual = orig.stack(z=["x", "y"]).unstack("z").drop_vars(["x", "y"]) assert_identical(orig, actual) actual = orig.stack(z=[...]).unstack("z").drop_vars(["x", "y"]) assert_identical(orig, actual) dims = ["a", "b", "c", "d", "e"] coords = { "a": [0], "b": [1, 2], "c": [3, 4, 5], "d": [6, 7], "e": [8], } orig = xr.DataArray(np.random.rand(1, 2, 3, 2, 1), coords=coords, dims=dims) stacked = orig.stack(ab=["a", "b"], cd=["c", "d"]) unstacked = stacked.unstack(["ab", "cd"]) assert_identical(orig, unstacked.transpose(*dims)) unstacked = stacked.unstack() assert_identical(orig, unstacked.transpose(*dims)) def test_stack_unstack_decreasing_coordinate(self) -> None: # regression test for GH980 orig = DataArray( np.random.rand(3, 4), dims=("y", "x"), coords={"x": np.arange(4), "y": np.arange(3, 0, -1)}, ) stacked = orig.stack(allpoints=["y", "x"]) actual = stacked.unstack("allpoints") assert_identical(orig, actual) def test_unstack_pandas_consistency(self) -> None: df = pd.DataFrame({"foo": range(3), "x": ["a", "b", "b"], "y": [0, 0, 1]}) s = df.set_index(["x", "y"])["foo"] expected = DataArray(s.unstack(), name="foo") actual = DataArray(s, dims="z").unstack("z") assert_identical(expected, actual) def test_unstack_requires_unique(self) -> None: df = pd.DataFrame({"foo": range(2), "x": ["a", "a"], "y": [0, 0]}) s = df.set_index(["x", "y"])["foo"] with pytest.raises( ValueError, match="Cannot unstack MultiIndex containing duplicates" ): DataArray(s, dims="z").unstack("z") @pytest.mark.filterwarnings("error") def test_unstack_roundtrip_integer_array(self) -> None: arr = xr.DataArray( np.arange(6).reshape(2, 3), coords={"x": ["a", "b"], "y": [0, 1, 2]}, dims=["x", "y"], ) stacked = arr.stack(z=["x", "y"]) roundtripped = stacked.unstack() assert_identical(arr, roundtripped) def test_stack_nonunique_consistency(self, da) -> None: da = da.isel(time=0, drop=True) # 2D actual = da.stack(z=["a", "x"]) expected = DataArray(da.to_pandas().stack(), dims="z") assert_identical(expected, actual) def test_to_unstacked_dataset_raises_value_error(self) -> None: data = DataArray([0, 1], dims="x", coords={"x": [0, 1]}) with pytest.raises(ValueError, match="'x' is not a stacked coordinate"): data.to_unstacked_dataset("x", 0) def test_transpose(self) -> None: da = DataArray( np.random.randn(3, 4, 5), dims=("x", "y", "z"), coords={ "x": range(3), "y": range(4), "z": range(5), "xy": (("x", "y"), np.random.randn(3, 4)), }, ) actual = da.transpose(transpose_coords=False) expected = DataArray(da.values.T, dims=("z", "y", "x"), coords=da.coords) assert_equal(expected, actual) actual = da.transpose("z", "y", "x", transpose_coords=True) expected = DataArray( da.values.T, dims=("z", "y", "x"), coords={ "x": da.x.values, "y": da.y.values, "z": da.z.values, "xy": (("y", "x"), da.xy.values.T), }, ) assert_equal(expected, actual) # same as previous but with ellipsis actual = da.transpose("z", ..., "x", transpose_coords=True) assert_equal(expected, actual) # same as previous but with a missing dimension actual = da.transpose( "z", "y", "x", "not_a_dim", transpose_coords=True, missing_dims="ignore" ) assert_equal(expected, actual) with pytest.raises(ValueError): da.transpose("x", "y") with pytest.raises(ValueError): da.transpose("not_a_dim", "z", "x", ...) with pytest.warns(UserWarning): da.transpose("not_a_dim", "y", "x", ..., missing_dims="warn") def test_squeeze(self) -> None: assert_equal(self.dv.variable.squeeze(), self.dv.squeeze().variable) def test_squeeze_drop(self) -> None: array = DataArray([1], [("x", [0])]) expected = DataArray(1) actual = array.squeeze(drop=True) assert_identical(expected, actual) expected = DataArray(1, {"x": 0}) actual = array.squeeze(drop=False) assert_identical(expected, actual) array = DataArray([[[0.0, 1.0]]], dims=["dim_0", "dim_1", "dim_2"]) expected = DataArray([[0.0, 1.0]], dims=["dim_1", "dim_2"]) actual = array.squeeze(axis=0) assert_identical(expected, actual) array = DataArray([[[[0.0, 1.0]]]], dims=["dim_0", "dim_1", "dim_2", "dim_3"]) expected = DataArray([[0.0, 1.0]], dims=["dim_1", "dim_3"]) actual = array.squeeze(axis=(0, 2)) assert_identical(expected, actual) array = DataArray([[[0.0, 1.0]]], dims=["dim_0", "dim_1", "dim_2"]) with pytest.raises(ValueError): array.squeeze(axis=0, dim="dim_1") def test_drop_coordinates(self) -> None: expected = DataArray(np.random.randn(2, 3), dims=["x", "y"]) arr = expected.copy() arr.coords["z"] = 2 actual = arr.drop_vars("z") assert_identical(expected, actual) with pytest.raises(ValueError): arr.drop_vars("not found") actual = expected.drop_vars("not found", errors="ignore") assert_identical(actual, expected) with pytest.raises(ValueError, match=r"cannot be found"): arr.drop_vars("w") actual = expected.drop_vars("w", errors="ignore") assert_identical(actual, expected) renamed = arr.rename("foo") with pytest.raises(ValueError, match=r"cannot be found"): renamed.drop_vars("foo") actual = renamed.drop_vars("foo", errors="ignore") assert_identical(actual, renamed) def test_drop_vars_callable(self) -> None: A = DataArray( np.random.randn(2, 3), dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4, 5]} ) expected = A.drop_vars(["x", "y"]) actual = A.drop_vars(lambda x: x.indexes) assert_identical(expected, actual) def test_drop_multiindex_level(self) -> None: # GH6505 expected = self.mda.drop_vars(["x", "level_1", "level_2"]) with pytest.warns(DeprecationWarning): actual = self.mda.drop_vars("level_1") assert_identical(expected, actual) def test_drop_all_multiindex_levels(self) -> None: dim_levels = ["x", "level_1", "level_2"] actual = self.mda.drop_vars(dim_levels) # no error, multi-index dropped for key in dim_levels: assert key not in actual.xindexes def test_drop_index_labels(self) -> None: arr = DataArray(np.random.randn(2, 3), coords={"y": [0, 1, 2]}, dims=["x", "y"]) actual = arr.drop_sel(y=[0, 1]) expected = arr[:, 2:] assert_identical(actual, expected) with pytest.raises((KeyError, ValueError), match=r"not .* in axis"): actual = arr.drop_sel(y=[0, 1, 3]) actual = arr.drop_sel(y=[0, 1, 3], errors="ignore") assert_identical(actual, expected) with pytest.warns(DeprecationWarning): arr.drop([0, 1, 3], dim="y", errors="ignore") # type: ignore[arg-type] def test_drop_index_positions(self) -> None: arr = DataArray(np.random.randn(2, 3), dims=["x", "y"]) actual = arr.drop_isel(y=[0, 1]) expected = arr[:, 2:] assert_identical(actual, expected) def test_drop_indexes(self) -> None: arr = DataArray([1, 2, 3], coords={"x": ("x", [1, 2, 3])}, dims="x") actual = arr.drop_indexes("x") assert "x" not in actual.xindexes actual = arr.drop_indexes("not_a_coord", errors="ignore") assert_identical(actual, arr) def test_dropna(self) -> None: x = np.random.randn(4, 4) x[::2, 0] = np.nan arr = DataArray(x, dims=["a", "b"]) actual = arr.dropna("a") expected = arr[1::2] assert_identical(actual, expected) actual = arr.dropna("b", how="all") assert_identical(actual, arr) actual = arr.dropna("a", thresh=1) assert_identical(actual, arr) actual = arr.dropna("b", thresh=3) expected = arr[:, 1:] assert_identical(actual, expected) def test_where(self) -> None: arr = DataArray(np.arange(4), dims="x") expected = arr.sel(x=slice(2)) actual = arr.where(arr.x < 2, drop=True) assert_identical(actual, expected) def test_where_lambda(self) -> None: arr = DataArray(np.arange(4), dims="y") expected = arr.sel(y=slice(2)) actual = arr.where(lambda x: x.y < 2, drop=True) assert_identical(actual, expected) def test_where_other_lambda(self) -> None: arr = DataArray(np.arange(4), dims="y") expected = xr.concat( [arr.sel(y=slice(2)), arr.sel(y=slice(2, None)) + 1], dim="y" ) actual = arr.where(lambda x: x.y < 2, lambda x: x + 1) assert_identical(actual, expected) def test_where_string(self) -> None: array = DataArray(["a", "b"]) expected = DataArray(np.array(["a", np.nan], dtype=object)) actual = array.where([True, False]) assert_identical(actual, expected) def test_cumops(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) actual = orig.cumsum() expected = DataArray([[-1, -1, 0], [-4, -4, 0]], coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig.cumsum("x") expected = DataArray([[-1, 0, 1], [-4, 0, 4]], coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig.cumsum("y") expected = DataArray([[-1, -1, 0], [-3, -3, 0]], coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig.cumprod("x") expected = DataArray([[-1, 0, 1], [3, 0, 3]], coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig.cumprod("y") expected = DataArray([[-1, 0, 0], [-3, 0, 0]], coords, dims=["x", "y"]) assert_identical(expected, actual) def test_reduce(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) actual = orig.mean() expected = DataArray(0, {"c": -999}) assert_identical(expected, actual) actual = orig.mean(["x", "y"]) assert_identical(expected, actual) actual = orig.mean("x") expected = DataArray([-2, 0, 2], {"y": coords["y"], "c": -999}, "y") assert_identical(expected, actual) actual = orig.mean(["x"]) assert_identical(expected, actual) actual = orig.mean("y") expected = DataArray([0, 0], {"x": coords["x"], "c": -999}, "x") assert_identical(expected, actual) assert_equal(self.dv.reduce(np.mean, "x").variable, self.v.reduce(np.mean, "x")) orig = DataArray([[1, 0, np.nan], [3, 0, 3]], coords, dims=["x", "y"]) actual = orig.count() expected = DataArray(5, {"c": -999}) assert_identical(expected, actual) # uint support orig = DataArray(np.arange(6).reshape(3, 2).astype("uint"), dims=["x", "y"]) assert orig.dtype.kind == "u" actual = orig.mean(dim="x", skipna=True) expected = DataArray(orig.values.astype(int), dims=["x", "y"]).mean("x") assert_equal(actual, expected) def test_reduce_keepdims(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) # Mean on all axes loses non-constant coordinates actual = orig.mean(keepdims=True) expected = DataArray( orig.data.mean(keepdims=True), dims=orig.dims, coords={k: v for k, v in coords.items() if k == "c"}, ) assert_equal(actual, expected) assert actual.sizes["x"] == 1 assert actual.sizes["y"] == 1 # Mean on specific axes loses coordinates not involving that axis actual = orig.mean("y", keepdims=True) expected = DataArray( orig.data.mean(axis=1, keepdims=True), dims=orig.dims, coords={k: v for k, v in coords.items() if k not in ["y", "lat"]}, ) assert_equal(actual, expected) @requires_bottleneck def test_reduce_keepdims_bottleneck(self) -> None: import bottleneck coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) # Bottleneck does not have its own keepdims implementation actual = orig.reduce(bottleneck.nanmean, keepdims=True) expected = orig.mean(keepdims=True) assert_equal(actual, expected) def test_reduce_dtype(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) for dtype in [np.float16, np.float32, np.float64]: assert orig.astype(float).mean(dtype=dtype).dtype == dtype def test_reduce_out(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) with pytest.raises(TypeError): orig.mean(out=np.ones(orig.shape)) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize("skipna", [True, False, None]) @pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]]) @pytest.mark.parametrize( "axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]], strict=True), ) def test_quantile(self, q, axis, dim, skipna, compute_backend) -> None: va = self.va.copy(deep=True) va[0, 0] = np.nan actual = DataArray(va).quantile(q, dim=dim, keep_attrs=True, skipna=skipna) _percentile_func = np.nanpercentile if skipna in (True, None) else np.percentile expected = _percentile_func(va.values, np.array(q) * 100, axis=axis) np.testing.assert_allclose(actual.values, expected) if is_scalar(q): assert "quantile" not in actual.dims else: assert "quantile" in actual.dims assert actual.attrs == self.attrs @pytest.mark.parametrize("method", ["midpoint", "lower"]) def test_quantile_method(self, method) -> None: q = [0.25, 0.5, 0.75] actual = DataArray(self.va).quantile(q, method=method) expected = np.nanquantile(self.dv.values, np.array(q), method=method) np.testing.assert_allclose(actual.values, expected) @pytest.mark.filterwarnings( "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning" ) @pytest.mark.parametrize("method", ["midpoint", "lower"]) def test_quantile_interpolation_deprecated(self, method) -> None: da = DataArray(self.va) q = [0.25, 0.5, 0.75] with pytest.warns( FutureWarning, match="`interpolation` argument to quantile was renamed to `method`", ): actual = da.quantile(q, interpolation=method) expected = da.quantile(q, method=method) np.testing.assert_allclose(actual.values, expected.values) with warnings.catch_warnings(record=True): with pytest.raises(TypeError, match="interpolation and method keywords"): da.quantile(q, method=method, interpolation=method) def test_reduce_keep_attrs(self) -> None: # Test default behavior (keeps attrs for reduction operations) vm = self.va.mean() assert len(vm.attrs) == len(self.attrs) assert vm.attrs == self.attrs # Test explicitly keeping attrs vm = self.va.mean(keep_attrs=True) assert len(vm.attrs) == len(self.attrs) assert vm.attrs == self.attrs # Test explicitly dropping attrs vm = self.va.mean(keep_attrs=False) assert len(vm.attrs) == 0 assert vm.attrs == {} def test_assign_attrs(self) -> None: expected = DataArray([], attrs=dict(a=1, b=2)) expected.attrs["a"] = 1 expected.attrs["b"] = 2 new = DataArray([]) actual = DataArray([]).assign_attrs(a=1, b=2) assert_identical(actual, expected) assert new.attrs == {} expected.attrs["c"] = 3 new_actual = actual.assign_attrs({"c": 3}) assert_identical(new_actual, expected) assert actual.attrs == {"a": 1, "b": 2} def test_drop_attrs(self) -> None: # Mostly tested in test_dataset.py, but adding a very small test here coord_ = DataArray([], attrs=dict(d=3, e=4)) da = DataArray([], attrs=dict(a=1, b=2)).assign_coords(dict(coord_=coord_)) assert da.drop_attrs().attrs == {} assert da.drop_attrs().coord_.attrs == {} assert da.drop_attrs(deep=False).coord_.attrs == dict(d=3, e=4) @pytest.mark.parametrize( "func", [lambda x: x.clip(0, 1), lambda x: np.float64(1.0) * x, np.abs, abs] ) def test_propagate_attrs(self, func) -> None: da = DataArray(self.va) # test defaults assert func(da).attrs == da.attrs with set_options(keep_attrs=False): assert func(da).attrs == {} with set_options(keep_attrs=True): assert func(da).attrs == da.attrs def test_fillna(self) -> None: a = DataArray([np.nan, 1, np.nan, 3], coords={"x": range(4)}, dims="x") actual = a.fillna(-1) expected = DataArray([-1, 1, -1, 3], coords={"x": range(4)}, dims="x") assert_identical(expected, actual) b = DataArray(range(4), coords={"x": range(4)}, dims="x") actual = a.fillna(b) expected = b.copy() assert_identical(expected, actual) actual = a.fillna(np.arange(4)) assert_identical(expected, actual) actual = a.fillna(b[:3]) assert_identical(expected, actual) actual = a.fillna(b[:0]) assert_identical(a, actual) with pytest.raises(TypeError, match=r"fillna on a DataArray"): a.fillna({0: 0}) with pytest.raises(ValueError, match=r"broadcast"): a.fillna(np.array([1, 2])) def test_align(self) -> None: array = DataArray( np.random.random((6, 8)), coords={"x": list("abcdef")}, dims=["x", "y"] ) array1, array2 = align(array, array[:5], join="inner") assert_identical(array1, array[:5]) assert_identical(array2, array[:5]) def test_align_dtype(self) -> None: # regression test for #264 x1 = np.arange(30) x2 = np.arange(5, 35) a = DataArray(np.random.random((30,)).astype(np.float32), [("x", x1)]) b = DataArray(np.random.random((30,)).astype(np.float32), [("x", x2)]) c, _d = align(a, b, join="outer") assert c.dtype == np.float32 def test_align_copy(self) -> None: x = DataArray([1, 2, 3], coords=[("a", [1, 2, 3])]) y = DataArray([1, 2], coords=[("a", [3, 1])]) expected_x2 = x expected_y2 = DataArray([2, np.nan, 1], coords=[("a", [1, 2, 3])]) x2, y2 = align(x, y, join="outer", copy=False) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert source_ndarray(x2.data) is source_ndarray(x.data) x2, y2 = align(x, y, join="outer", copy=True) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert source_ndarray(x2.data) is not source_ndarray(x.data) # Trivial align - 1 element x = DataArray([1, 2, 3], coords=[("a", [1, 2, 3])]) (x2,) = align(x, copy=False) assert_identical(x, x2) assert source_ndarray(x2.data) is source_ndarray(x.data) (x2,) = align(x, copy=True) assert_identical(x, x2) assert source_ndarray(x2.data) is not source_ndarray(x.data) def test_align_override(self) -> None: left = DataArray([1, 2, 3], dims="x", coords={"x": [0, 1, 2]}) right = DataArray( np.arange(9).reshape((3, 3)), dims=["x", "y"], coords={"x": [0.1, 1.1, 2.1], "y": [1, 2, 3]}, ) expected_right = DataArray( np.arange(9).reshape(3, 3), dims=["x", "y"], coords={"x": [0, 1, 2], "y": [1, 2, 3]}, ) new_left, new_right = align(left, right, join="override") assert_identical(left, new_left) assert_identical(new_right, expected_right) new_left, new_right = align(left, right, exclude="x", join="override") assert_identical(left, new_left) assert_identical(right, new_right) new_left, new_right = xr.align( left.isel(x=0, drop=True), right, exclude="x", join="override" ) assert_identical(left.isel(x=0, drop=True), new_left) assert_identical(right, new_right) with pytest.raises( ValueError, match=r"cannot align.*join.*override.*same size" ): align(left.isel(x=0).expand_dims("x"), right, join="override") @pytest.mark.parametrize( "darrays", [ [ DataArray(0), DataArray([1], [("x", [1])]), DataArray([2, 3], [("x", [2, 3])]), ], [ DataArray([2, 3], [("x", [2, 3])]), DataArray([1], [("x", [1])]), DataArray(0), ], ], ) def test_align_override_error(self, darrays) -> None: with pytest.raises( ValueError, match=r"cannot align.*join.*override.*same size" ): xr.align(*darrays, join="override") def test_align_exclude(self) -> None: x = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, -2]), ("b", [3, 4])]) y = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, 20]), ("b", [5, 6])]) z = DataArray([1], dims=["a"], coords={"a": [20], "b": 7}) x2, y2, z2 = align(x, y, z, join="outer", exclude=["b"]) expected_x2 = DataArray( [[3, 4], [1, 2], [np.nan, np.nan]], coords=[("a", [-2, -1, 20]), ("b", [3, 4])], ) expected_y2 = DataArray( [[np.nan, np.nan], [1, 2], [3, 4]], coords=[("a", [-2, -1, 20]), ("b", [5, 6])], ) expected_z2 = DataArray( [np.nan, np.nan, 1], dims=["a"], coords={"a": [-2, -1, 20], "b": 7} ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert_identical(expected_z2, z2) def test_align_indexes(self) -> None: x = DataArray([1, 2, 3], coords=[("a", [-1, 10, -2])]) y = DataArray([1, 2], coords=[("a", [-2, -1])]) x2, y2 = align(x, y, join="outer", indexes={"a": [10, -1, -2]}) expected_x2 = DataArray([2, 1, 3], coords=[("a", [10, -1, -2])]) expected_y2 = DataArray([np.nan, 2, 1], coords=[("a", [10, -1, -2])]) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) (x2,) = align(x, join="outer", indexes={"a": [-2, 7, 10, -1]}) expected_x2 = DataArray([3, np.nan, 2, 1], coords=[("a", [-2, 7, 10, -1])]) assert_identical(expected_x2, x2) def test_align_without_indexes_exclude(self) -> None: arrays = [DataArray([1, 2, 3], dims=["x"]), DataArray([1, 2], dims=["x"])] result0, result1 = align(*arrays, exclude=["x"]) assert_identical(result0, arrays[0]) assert_identical(result1, arrays[1]) def test_align_mixed_indexes(self) -> None: array_no_coord = DataArray([1, 2], dims=["x"]) array_with_coord = DataArray([1, 2], coords=[("x", ["a", "b"])]) result0, result1 = align(array_no_coord, array_with_coord) assert_identical(result0, array_with_coord) assert_identical(result1, array_with_coord) result0, result1 = align(array_no_coord, array_with_coord, exclude=["x"]) assert_identical(result0, array_no_coord) assert_identical(result1, array_with_coord) def test_align_without_indexes_errors(self) -> None: with pytest.raises( ValueError, match=r"cannot.*align.*dimension.*conflicting.*sizes.*", ): align(DataArray([1, 2, 3], dims=["x"]), DataArray([1, 2], dims=["x"])) with pytest.raises( ValueError, match=r"cannot.*align.*dimension.*conflicting.*sizes.*", ): align( DataArray([1, 2, 3], dims=["x"]), DataArray([1, 2], coords=[("x", [0, 1])]), ) def test_align_str_dtype(self) -> None: a = DataArray([0, 1], dims=["x"], coords={"x": ["a", "b"]}) b = DataArray([1, 2], dims=["x"], coords={"x": ["b", "c"]}) expected_a = DataArray( [0, 1, np.nan], dims=["x"], coords={"x": ["a", "b", "c"]} ) expected_b = DataArray( [np.nan, 1, 2], dims=["x"], coords={"x": ["a", "b", "c"]} ) actual_a, actual_b = xr.align(a, b, join="outer") assert_identical(expected_a, actual_a) assert expected_a.x.dtype == actual_a.x.dtype assert_identical(expected_b, actual_b) assert expected_b.x.dtype == actual_b.x.dtype def test_broadcast_on_vs_off_global_option_different_dims(self) -> None: xda_1 = xr.DataArray([1], dims="x1") xda_2 = xr.DataArray([1], dims="x2") with xr.set_options(arithmetic_broadcast=True): expected_xda = xr.DataArray([[1.0]], dims=("x1", "x2")) actual_xda = xda_1 / xda_2 assert_identical(actual_xda, expected_xda) with xr.set_options(arithmetic_broadcast=False): with pytest.raises( ValueError, match=re.escape( "Broadcasting is necessary but automatic broadcasting is disabled via " "global option `'arithmetic_broadcast'`. " "Use `xr.set_options(arithmetic_broadcast=True)` to enable automatic broadcasting." ), ): xda_1 / xda_2 @pytest.mark.parametrize("arithmetic_broadcast", [True, False]) def test_broadcast_on_vs_off_global_option_same_dims( self, arithmetic_broadcast: bool ) -> None: # Ensure that no error is raised when arithmetic broadcasting is disabled, # when broadcasting is not needed. The two DataArrays have the same # dimensions of the same size. xda_1 = xr.DataArray([1], dims="x") xda_2 = xr.DataArray([1], dims="x") expected_xda = xr.DataArray([2.0], dims=("x",)) with xr.set_options(arithmetic_broadcast=arithmetic_broadcast): assert_identical(xda_1 + xda_2, expected_xda) assert_identical(xda_1 + np.array([1.0]), expected_xda) assert_identical(np.array([1.0]) + xda_1, expected_xda) def test_broadcast_arrays(self) -> None: x = DataArray([1, 2], coords=[("a", [-1, -2])], name="x") y = DataArray([1, 2], coords=[("b", [3, 4])], name="y") x2, y2 = broadcast(x, y) expected_coords = [("a", [-1, -2]), ("b", [3, 4])] expected_x2 = DataArray([[1, 1], [2, 2]], expected_coords, name="x") expected_y2 = DataArray([[1, 2], [1, 2]], expected_coords, name="y") assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) x = DataArray(np.random.randn(2, 3), dims=["a", "b"]) y = DataArray(np.random.randn(3, 2), dims=["b", "a"]) x2, y2 = broadcast(x, y) expected_x2 = x expected_y2 = y.T assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_broadcast_arrays_misaligned(self) -> None: # broadcast on misaligned coords must auto-align x = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, -2]), ("b", [3, 4])]) y = DataArray([1, 2], coords=[("a", [-1, 20])]) expected_x2 = DataArray( [[3, 4], [1, 2], [np.nan, np.nan]], coords=[("a", [-2, -1, 20]), ("b", [3, 4])], ) expected_y2 = DataArray( [[np.nan, np.nan], [1, 1], [2, 2]], coords=[("a", [-2, -1, 20]), ("b", [3, 4])], ) x2, y2 = broadcast(x, y) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_broadcast_arrays_nocopy(self) -> None: # Test that input data is not copied over in case # no alteration is needed x = DataArray([1, 2], coords=[("a", [-1, -2])], name="x") y = DataArray(3, name="y") expected_x2 = DataArray([1, 2], coords=[("a", [-1, -2])], name="x") expected_y2 = DataArray([3, 3], coords=[("a", [-1, -2])], name="y") x2, y2 = broadcast(x, y) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert source_ndarray(x2.data) is source_ndarray(x.data) # single-element broadcast (trivial case) (x2,) = broadcast(x) assert_identical(x, x2) assert source_ndarray(x2.data) is source_ndarray(x.data) def test_broadcast_arrays_exclude(self) -> None: x = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, -2]), ("b", [3, 4])]) y = DataArray([1, 2], coords=[("a", [-1, 20])]) z = DataArray(5, coords={"b": 5}) x2, y2, z2 = broadcast(x, y, z, exclude=["b"]) expected_x2 = DataArray( [[3, 4], [1, 2], [np.nan, np.nan]], coords=[("a", [-2, -1, 20]), ("b", [3, 4])], ) expected_y2 = DataArray([np.nan, 1, 2], coords=[("a", [-2, -1, 20])]) expected_z2 = DataArray( [5, 5, 5], dims=["a"], coords={"a": [-2, -1, 20], "b": 5} ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert_identical(expected_z2, z2) def test_broadcast_coordinates(self) -> None: # regression test for GH649 ds = Dataset({"a": (["x", "y"], np.ones((5, 6)))}) x_bc, y_bc, a_bc = broadcast(ds.x, ds.y, ds.a) assert_identical(ds.a, a_bc) X, Y = np.meshgrid(np.arange(5), np.arange(6), indexing="ij") exp_x = DataArray(X, dims=["x", "y"], name="x") exp_y = DataArray(Y, dims=["x", "y"], name="y") assert_identical(exp_x, x_bc) assert_identical(exp_y, y_bc) def test_to_pandas(self) -> None: # 0d actual_xr = DataArray(42).to_pandas() expected = np.array(42) assert_array_equal(actual_xr, expected) # 1d values = np.random.randn(3) index = pd.Index(["a", "b", "c"], name="x") da = DataArray(values, coords=[index]) actual_s = da.to_pandas() assert_array_equal(np.asarray(actual_s.values), values) assert_array_equal(actual_s.index, index) assert_array_equal(actual_s.index.name, "x") # 2d values = np.random.randn(3, 2) da = DataArray( values, coords=[("x", ["a", "b", "c"]), ("y", [0, 1])], name="foo" ) actual_df = da.to_pandas() assert_array_equal(np.asarray(actual_df.values), values) assert_array_equal(actual_df.index, ["a", "b", "c"]) assert_array_equal(actual_df.columns, [0, 1]) # roundtrips for shape in [(3,), (3, 4)]: dims = list("abc")[: len(shape)] da = DataArray(np.random.randn(*shape), dims=dims) roundtripped = DataArray(da.to_pandas()).drop_vars(dims) assert_identical(da, roundtripped) with pytest.raises(ValueError, match=r"Cannot convert"): DataArray(np.random.randn(1, 2, 3, 4, 5)).to_pandas() def test_to_dataframe(self) -> None: # regression test for #260 arr_np = np.random.randn(3, 4) arr = DataArray(arr_np, [("B", [1, 2, 3]), ("A", list("cdef"))], name="foo") expected_s = arr.to_series() actual_s = arr.to_dataframe()["foo"] assert_array_equal(np.asarray(expected_s.values), np.asarray(actual_s.values)) assert_array_equal(np.asarray(expected_s.name), np.asarray(actual_s.name)) assert_array_equal(expected_s.index.values, actual_s.index.values) actual_s = arr.to_dataframe(dim_order=["A", "B"])["foo"] assert_array_equal(arr_np.transpose().reshape(-1), np.asarray(actual_s.values)) # regression test for coords with different dimensions arr.coords["C"] = ("B", [-1, -2, -3]) expected_df = arr.to_series().to_frame() expected_df["C"] = [-1] * 4 + [-2] * 4 + [-3] * 4 expected_df = expected_df[["C", "foo"]] actual_df = arr.to_dataframe() assert_array_equal(np.asarray(expected_df.values), np.asarray(actual_df.values)) assert_array_equal(expected_df.columns.values, actual_df.columns.values) assert_array_equal(expected_df.index.values, actual_df.index.values) with pytest.raises(ValueError, match="does not match the set of dimensions"): arr.to_dataframe(dim_order=["B", "A", "C"]) with pytest.raises(ValueError, match=r"cannot convert a scalar"): arr.sel(A="c", B=2).to_dataframe() arr.name = None # unnamed with pytest.raises(ValueError, match=r"unnamed"): arr.to_dataframe() def test_to_dataframe_multiindex(self) -> None: # regression test for #3008 arr_np = np.random.randn(4, 3) mindex = pd.MultiIndex.from_product([[1, 2], list("ab")], names=["A", "B"]) arr = DataArray(arr_np, [("MI", mindex), ("C", [5, 6, 7])], name="foo") actual = arr.to_dataframe() index_pd = actual.index assert isinstance(index_pd, pd.MultiIndex) assert_array_equal(np.asarray(actual["foo"].values), arr_np.flatten()) assert_array_equal(index_pd.names, list("ABC")) assert_array_equal(index_pd.levels[0], [1, 2]) assert_array_equal(index_pd.levels[1], ["a", "b"]) assert_array_equal(index_pd.levels[2], [5, 6, 7]) # test converting a dataframe MultiIndexed along a single dimension mindex_single = pd.MultiIndex.from_product( [list(range(6)), list("ab")], names=["A", "B"] ) arr_multi_single = DataArray( arr_np.flatten(), [("MI", mindex_single)], dims="MI", name="test" ) actual_df = arr_multi_single.to_dataframe() expected_df = arr_multi_single.to_series().to_frame() assert expected_df.equals(actual_df) def test_to_dataframe_0length(self) -> None: # regression test for #3008 arr_np = np.random.randn(4, 0) mindex = pd.MultiIndex.from_product([[1, 2], list("ab")], names=["A", "B"]) arr = DataArray(arr_np, [("MI", mindex), ("C", [])], name="foo") actual = arr.to_dataframe() assert len(actual) == 0 assert_array_equal(actual.index.names, list("ABC")) @pytest.mark.parametrize( "x_dtype,y_dtype,v_dtype", [ (np.uint32, np.float32, np.uint32), (np.int16, np.float64, np.int64), (np.uint8, np.float32, np.uint16), (np.int32, np.float32, np.int8), ], ) def test_to_dataframe_coord_dtypes_2d(self, x_dtype, y_dtype, v_dtype) -> None: x = np.array([1], dtype=x_dtype) y = np.array([1.0], dtype=y_dtype) v = np.array([[42]], dtype=v_dtype) da = DataArray(v, dims=["x", "y"], coords={"x": x, "y": y}) df = da.to_dataframe(name="v").reset_index() # Check that coordinate dtypes are preserved assert df["x"].dtype == np.dtype(x_dtype), ( f"x coord: expected {x_dtype}, got {df['x'].dtype}" ) assert df["y"].dtype == np.dtype(y_dtype), ( f"y coord: expected {y_dtype}, got {df['y'].dtype}" ) assert df["v"].dtype == np.dtype(v_dtype), ( f"v data: expected {v_dtype}, got {df['v'].dtype}" ) @requires_dask_expr @requires_dask @pytest.mark.xfail(not has_dask_ge_2025_1_0, reason="dask-expr is broken") def test_to_dask_dataframe(self) -> None: arr_np = np.arange(3 * 4).reshape(3, 4) arr = DataArray(arr_np, [("B", [1, 2, 3]), ("A", list("cdef"))], name="foo") expected_s = arr.to_series() actual = arr.to_dask_dataframe()["foo"] assert_array_equal(actual.values, np.asarray(expected_s.values)) actual = arr.to_dask_dataframe(dim_order=["A", "B"])["foo"] assert_array_equal(arr_np.transpose().reshape(-1), actual.values) # regression test for coords with different dimensions arr.coords["C"] = ("B", [-1, -2, -3]) expected_df = arr.to_series().to_frame() expected_df["C"] = [-1] * 4 + [-2] * 4 + [-3] * 4 expected_df = expected_df[["C", "foo"]] actual = arr.to_dask_dataframe()[["C", "foo"]] assert_array_equal(expected_df.values, np.asarray(actual.values)) assert_array_equal( expected_df.columns.values, np.asarray(actual.columns.values) ) with pytest.raises(ValueError, match="does not match the set of dimensions"): arr.to_dask_dataframe(dim_order=["B", "A", "C"]) arr.name = None with pytest.raises( ValueError, match="Cannot convert an unnamed DataArray", ): arr.to_dask_dataframe() def test_to_pandas_name_matches_coordinate(self) -> None: # coordinate with same name as array arr = DataArray([1, 2, 3], dims="x", name="x") series = arr.to_series() assert_array_equal([1, 2, 3], list(series.values)) assert_array_equal([0, 1, 2], list(series.index.values)) assert "x" == series.name assert "x" == series.index.name frame = arr.to_dataframe() expected = series.to_frame() assert expected.equals(frame) def test_to_and_from_series(self) -> None: expected = self.dv.to_dataframe()["foo"] actual = self.dv.to_series() assert_array_equal(expected.values, actual.values) assert_array_equal(expected.index.values, actual.index.values) assert "foo" == actual.name # test roundtrip assert_identical(self.dv, DataArray.from_series(actual).drop_vars(["x", "y"])) # test name is None actual.name = None expected_da = self.dv.rename(None) assert_identical( expected_da, DataArray.from_series(actual).drop_vars(["x", "y"]) ) def test_from_series_multiindex(self) -> None: # GH:3951 df = pd.DataFrame({"B": [1, 2, 3], "A": [4, 5, 6]}) df = df.rename_axis("num").rename_axis("alpha", axis=1) actual = df.stack("alpha").to_xarray() assert (actual.sel(alpha="B") == [1, 2, 3]).all() assert (actual.sel(alpha="A") == [4, 5, 6]).all() @requires_sparse def test_from_series_sparse(self) -> None: import sparse series = pd.Series([1, 2], index=[("a", 1), ("b", 2)]) actual_sparse = DataArray.from_series(series, sparse=True) actual_dense = DataArray.from_series(series, sparse=False) assert isinstance(actual_sparse.data, sparse.COO) actual_sparse.data = actual_sparse.data.todense() assert_identical(actual_sparse, actual_dense) @requires_sparse def test_from_multiindex_series_sparse(self) -> None: # regression test for GH4019 import sparse idx = pd.MultiIndex.from_product( [list(np.arange(3)), list(np.arange(5))], names=["a", "b"] ) series: pd.Series = pd.Series( np.random.default_rng(0).random(len(idx)), index=idx ).sample(n=5, random_state=3) dense = DataArray.from_series(series, sparse=False) expected_coords = sparse.COO.from_numpy(dense.data, np.nan).coords actual_sparse = xr.DataArray.from_series(series, sparse=True) actual_coords = actual_sparse.data.coords np.testing.assert_equal(actual_coords, expected_coords) def test_nbytes_does_not_load_data(self) -> None: array = InaccessibleArray(np.zeros((3, 3), dtype="uint8")) da = xr.DataArray(array, dims=["x", "y"]) # If xarray tries to instantiate the InaccessibleArray to compute # nbytes, the following will raise an error. # However, it should still be able to accurately give us information # about the number of bytes from the metadata assert da.nbytes == 9 # Here we confirm that this does not depend on array having the # nbytes property, since it isn't really required by the array # interface. nbytes is more a property of arrays that have been # cast to numpy arrays. assert not hasattr(array, "nbytes") def test_to_and_from_empty_series(self) -> None: # GH697 expected: pd.Series[Any] = pd.Series([], dtype=np.float64) da = DataArray.from_series(expected) assert len(da) == 0 actual = da.to_series() assert len(actual) == 0 assert expected.equals(actual) def test_series_categorical_index(self) -> None: # regression test for GH700 if not hasattr(pd, "CategoricalIndex"): pytest.skip("requires pandas with CategoricalIndex") s = pd.Series(np.arange(5), index=pd.CategoricalIndex(list("aabbc"))) arr = DataArray(s) assert "a a b b" in repr(arr) # should not error @pytest.mark.parametrize("use_dask", [True, False]) @pytest.mark.parametrize("data", ["list", "array", True]) @pytest.mark.parametrize("encoding", [True, False]) def test_to_and_from_dict( self, encoding: bool, data: bool | Literal["list", "array"], use_dask: bool ) -> None: if use_dask and not has_dask: pytest.skip("requires dask") encoding_data = {"bar": "spam"} array = DataArray( np.random.randn(2, 3), {"x": ["a", "b"]}, ["x", "y"], name="foo" ) array.encoding = encoding_data return_data = array.to_numpy() coords_data = np.array(["a", "b"]) if data == "list" or data is True: return_data = return_data.tolist() coords_data = coords_data.tolist() expected: dict[str, Any] = { "name": "foo", "dims": ("x", "y"), "data": return_data, "attrs": {}, "coords": {"x": {"dims": ("x",), "data": coords_data, "attrs": {}}}, } if encoding: expected["encoding"] = encoding_data if has_dask: da = array.chunk() else: da = array if data == "array" or data is False: with raise_if_dask_computes(): actual = da.to_dict(encoding=encoding, data=data) else: actual = da.to_dict(encoding=encoding, data=data) # check that they are identical np.testing.assert_equal(expected, actual) # check roundtrip assert_identical(da, DataArray.from_dict(actual)) # a more bare bones representation still roundtrips d = { "name": "foo", "dims": ("x", "y"), "data": da.values.tolist(), "coords": {"x": {"dims": "x", "data": ["a", "b"]}}, } assert_identical(da, DataArray.from_dict(d)) # and the most bare bones representation still roundtrips d = {"name": "foo", "dims": ("x", "y"), "data": da.values} assert_identical(da.drop_vars("x"), DataArray.from_dict(d)) # missing a dims in the coords d = { "dims": ("x", "y"), "data": da.values, "coords": {"x": {"data": ["a", "b"]}}, } with pytest.raises( ValueError, match=r"cannot convert dict when coords are missing the key 'dims'", ): DataArray.from_dict(d) # this one is missing some necessary information d = {"dims": "t"} with pytest.raises( ValueError, match=r"cannot convert dict without the key 'data'" ): DataArray.from_dict(d) # check the data=False option expected_no_data = expected.copy() del expected_no_data["data"] del expected_no_data["coords"]["x"]["data"] endiantype = "U1" expected_no_data["coords"]["x"].update({"dtype": endiantype, "shape": (2,)}) expected_no_data.update({"dtype": "float64", "shape": (2, 3)}) actual_no_data = da.to_dict(data=False, encoding=encoding) assert expected_no_data == actual_no_data def test_to_and_from_dict_with_time_dim(self) -> None: x = np.random.randn(10, 3) t = pd.date_range("20130101", periods=10) lat = [77.7, 83.2, 76] da = DataArray(x, {"t": t, "lat": lat}, dims=["t", "lat"]) roundtripped = DataArray.from_dict(da.to_dict()) assert_identical(da, roundtripped) def test_to_and_from_dict_with_nan_nat(self) -> None: y = np.random.randn(10, 3) y[2] = np.nan t = pd.Series(pd.date_range("20130101", periods=10)) t[2] = np.nan lat = [77.7, 83.2, 76] da = DataArray(y, {"t": t, "lat": lat}, dims=["t", "lat"]) roundtripped = DataArray.from_dict(da.to_dict()) assert_identical(da, roundtripped) def test_to_dict_with_numpy_attrs(self) -> None: # this doesn't need to roundtrip x = np.random.randn(10, 3) t = list("abcdefghij") lat = [77.7, 83.2, 76] attrs = { "created": np.float64(1998), "coords": np.array([37, -110.1, 100]), "maintainer": "bar", } da = DataArray(x, {"t": t, "lat": lat}, dims=["t", "lat"], attrs=attrs) expected_attrs = { "created": attrs["created"].item(), # type: ignore[attr-defined] "coords": attrs["coords"].tolist(), # type: ignore[attr-defined] "maintainer": "bar", } actual = da.to_dict() # check that they are identical assert expected_attrs == actual["attrs"] def test_to_masked_array(self) -> None: rs = np.random.default_rng(44) x = rs.random(size=(10, 20)) x_masked = np.ma.masked_where(x < 0.5, x) da = DataArray(x_masked) # Test round trip x_masked_2 = da.to_masked_array() da_2 = DataArray(x_masked_2) assert_array_equal(x_masked, x_masked_2) assert_equal(da, da_2) da_masked_array = da.to_masked_array(copy=True) assert isinstance(da_masked_array, np.ma.MaskedArray) # Test masks assert_array_equal(da_masked_array.mask, x_masked.mask) # Test that mask is unpacked correctly assert_array_equal(da.values, x_masked.filled(np.nan)) # Test that the underlying data (including nans) hasn't changed assert_array_equal(da_masked_array, x_masked.filled(np.nan)) # Test that copy=False gives access to values masked_array = da.to_masked_array(copy=False) masked_array[0, 0] = 10.0 assert masked_array[0, 0] == 10.0 assert da[0, 0].values == 10.0 assert masked_array.base is da.values assert isinstance(masked_array, np.ma.MaskedArray) # Test with some odd arrays for v in [4, np.nan, True, "4", "four"]: da = DataArray(v) ma = da.to_masked_array() assert isinstance(ma, np.ma.MaskedArray) # Fix GH issue 684 - masked arrays mask should be an array not a scalar N = 4 v = range(N) da = DataArray(v) ma = da.to_masked_array() assert isinstance(ma.mask, np.ndarray) and len(ma.mask) == N def test_to_dataset_whole(self) -> None: unnamed = DataArray([1, 2], dims="x") with pytest.raises(ValueError, match=r"unable to convert unnamed"): unnamed.to_dataset() actual = unnamed.to_dataset(name="foo") expected = Dataset({"foo": ("x", [1, 2])}) assert_identical(expected, actual) named = DataArray([1, 2], dims="x", name="foo", attrs={"y": "testattr"}) actual = named.to_dataset() expected = Dataset({"foo": ("x", [1, 2], {"y": "testattr"})}) assert_identical(expected, actual) # Test promoting attrs actual = named.to_dataset(promote_attrs=True) expected = Dataset( {"foo": ("x", [1, 2], {"y": "testattr"})}, attrs={"y": "testattr"} ) assert_identical(expected, actual) with pytest.raises(TypeError): actual = named.to_dataset("bar") def test_to_dataset_split(self) -> None: array = DataArray( [[1, 2], [3, 4], [5, 6]], coords=[("x", list("abc")), ("y", [0.0, 0.1])], attrs={"a": 1}, ) expected = Dataset( {"a": ("y", [1, 2]), "b": ("y", [3, 4]), "c": ("y", [5, 6])}, coords={"y": [0.0, 0.1]}, attrs={"a": 1}, ) actual = array.to_dataset("x") assert_identical(expected, actual) with pytest.raises(TypeError): array.to_dataset("x", name="foo") roundtripped = actual.to_dataarray(dim="x") assert_identical(array, roundtripped) array = DataArray([1, 2, 3], dims="x") expected = Dataset({0: 1, 1: 2, 2: 3}) actual = array.to_dataset("x") assert_identical(expected, actual) def test_to_dataset_retains_keys(self) -> None: # use dates as convenient non-str objects. Not a specific date test import datetime dates = [datetime.date(2000, 1, d) for d in range(1, 4)] array = DataArray([1, 2, 3], coords=[("x", dates)], attrs={"a": 1}) # convert to dataset and back again result = array.to_dataset("x").to_dataarray(dim="x") assert_equal(array, result) def test_to_dataset_coord_value_is_dim(self) -> None: # github issue #7823 array = DataArray( np.zeros((3, 3)), coords={ # 'a' is both a coordinate value and the name of a coordinate "x": ["a", "b", "c"], "a": [1, 2, 3], }, ) with pytest.raises( ValueError, match=( re.escape("dimension 'x' would produce the variables ('a',)") + ".*" + re.escape("DataArray.rename(a=...) or DataArray.assign_coords(x=...)") ), ): array.to_dataset("x") # test error message formatting when there are multiple ambiguous # values/coordinates array2 = DataArray( np.zeros((3, 3, 2)), coords={ "x": ["a", "b", "c"], "a": [1, 2, 3], "b": [0.0, 0.1], }, ) with pytest.raises( ValueError, match=( re.escape("dimension 'x' would produce the variables ('a', 'b')") + ".*" + re.escape( "DataArray.rename(a=..., b=...) or DataArray.assign_coords(x=...)" ) ), ): array2.to_dataset("x") def test__title_for_slice(self) -> None: array = DataArray( np.ones((4, 3, 2)), dims=["a", "b", "c"], coords={"a": range(4), "b": range(3), "c": range(2)}, ) assert "" == array._title_for_slice() assert "c = 0" == array.isel(c=0)._title_for_slice() title = array.isel(b=1, c=0)._title_for_slice() assert title in {"b = 1, c = 0", "c = 0, b = 1"} a2 = DataArray(np.ones((4, 1)), dims=["a", "b"]) assert "" == a2._title_for_slice() def test__title_for_slice_truncate(self) -> None: array = DataArray(np.ones(4)) array.coords["a"] = "a" * 100 array.coords["b"] = "b" * 100 nchar = 80 title = array._title_for_slice(truncate=nchar) assert nchar == len(title) assert title.endswith("...") def test_dataarray_diff_n1(self) -> None: da = DataArray(np.random.randn(3, 4), dims=["x", "y"]) actual = da.diff("y") expected = DataArray(np.diff(da.values, axis=1), dims=["x", "y"]) assert_equal(expected, actual) def test_coordinate_diff(self) -> None: # regression test for GH634 arr = DataArray(range(0, 20, 2), dims=["lon"], coords=[range(10)]) lon = arr.coords["lon"] expected = DataArray([1] * 9, dims=["lon"], coords=[range(1, 10)], name="lon") actual = lon.diff("lon") assert_equal(expected, actual) @pytest.mark.parametrize("offset", [-5, 0, 1, 2]) @pytest.mark.parametrize("fill_value, dtype", [(2, int), (dtypes.NA, float)]) def test_shift(self, offset, fill_value, dtype) -> None: arr = DataArray([1, 2, 3], dims="x") actual = arr.shift(x=1, fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value = np.nan expected = DataArray([fill_value, 1, 2], dims="x") assert_identical(expected, actual) assert actual.dtype == dtype arr = DataArray([1, 2, 3], [("x", ["a", "b", "c"])]) expected = DataArray(arr.to_pandas().shift(offset)) actual = arr.shift(x=offset) assert_identical(expected, actual) def test_roll_coords(self) -> None: arr = DataArray([1, 2, 3], coords={"x": range(3)}, dims="x") actual = arr.roll(x=1, roll_coords=True) expected = DataArray([3, 1, 2], coords=[("x", [2, 0, 1])]) assert_identical(expected, actual) def test_roll_no_coords(self) -> None: arr = DataArray([1, 2, 3], coords={"x": range(3)}, dims="x") actual = arr.roll(x=1) expected = DataArray([3, 1, 2], coords=[("x", [0, 1, 2])]) assert_identical(expected, actual) def test_copy_with_data(self) -> None: orig = DataArray( np.random.random(size=(2, 2)), dims=("x", "y"), attrs={"attr1": "value1"}, coords={"x": [4, 3]}, name="helloworld", ) new_data = np.arange(4).reshape(2, 2) actual = orig.copy(data=new_data) expected = orig.copy() expected.data = new_data assert_identical(expected, actual) @pytest.mark.xfail(raises=AssertionError) @pytest.mark.parametrize( "deep, expected_orig", [ [ True, xr.DataArray( xr.IndexVariable("a", np.array([1, 2])), coords={"a": [1, 2]}, dims=["a"], ), ], [ False, xr.DataArray( xr.IndexVariable("a", np.array([999, 2])), coords={"a": [999, 2]}, dims=["a"], ), ], ], ) def test_copy_coords(self, deep, expected_orig) -> None: """The test fails for the shallow copy, and apparently only on Windows for some reason. In windows coords seem to be immutable unless it's one dataarray deep copied from another.""" da = xr.DataArray( np.ones([2, 2, 2]), coords={"a": [1, 2], "b": ["x", "y"], "c": [0, 1]}, dims=["a", "b", "c"], ) da_cp = da.copy(deep) new_a = np.array([999, 2]) da_cp.coords["a"] = da_cp["a"].copy(data=new_a) expected_cp = xr.DataArray( xr.IndexVariable("a", np.array([999, 2])), coords={"a": [999, 2]}, dims=["a"], ) assert_identical(da_cp["a"], expected_cp) assert_identical(da["a"], expected_orig) def test_real_and_imag(self) -> None: array = DataArray(1 + 2j) assert_identical(array.real, DataArray(1)) assert_identical(array.imag, DataArray(2)) def test_setattr_raises(self) -> None: array = DataArray(0, coords={"scalar": 1}, attrs={"foo": "bar"}) with pytest.raises(AttributeError, match=r"cannot set attr"): array.scalar = 2 with pytest.raises(AttributeError, match=r"cannot set attr"): array.foo = 2 with pytest.raises(AttributeError, match=r"cannot set attr"): array.other = 2 def test_full_like(self) -> None: # For more thorough tests, see test_variable.py da = DataArray( np.random.random(size=(2, 2)), dims=("x", "y"), attrs={"attr1": "value1"}, coords={"x": [4, 3]}, name="helloworld", ) actual = full_like(da, 2) expect = da.copy(deep=True) expect.values = np.array([[2.0, 2.0], [2.0, 2.0]]) assert_identical(expect, actual) # override dtype actual = full_like(da, fill_value=True, dtype=bool) expect.values = np.array([[True, True], [True, True]]) assert expect.dtype == bool assert_identical(expect, actual) with pytest.raises(ValueError, match="'dtype' cannot be dict-like"): full_like(da, fill_value=True, dtype={"x": bool}) def test_dot(self) -> None: x = np.linspace(-3, 3, 6) y = np.linspace(-3, 3, 5) z = range(4) da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4)) da = DataArray(da_vals, coords=[x, y, z], dims=["x", "y", "z"]) dm_vals1 = range(4) dm1 = DataArray(dm_vals1, coords=[z], dims=["z"]) # nd dot 1d actual1 = da.dot(dm1) expected_vals1 = np.tensordot(da_vals, dm_vals1, (2, 0)) expected1 = DataArray(expected_vals1, coords=[x, y], dims=["x", "y"]) assert_equal(expected1, actual1) # all shared dims actual2 = da.dot(da) expected_vals2 = np.tensordot(da_vals, da_vals, axes=([0, 1, 2], [0, 1, 2])) expected2 = DataArray(expected_vals2) assert_equal(expected2, actual2) # multiple shared dims dm_vals3 = np.arange(20 * 5 * 4).reshape((20, 5, 4)) j = np.linspace(-3, 3, 20) dm3 = DataArray(dm_vals3, coords=[j, y, z], dims=["j", "y", "z"]) actual3 = da.dot(dm3) expected_vals3 = np.tensordot(da_vals, dm_vals3, axes=([1, 2], [1, 2])) expected3 = DataArray(expected_vals3, coords=[x, j], dims=["x", "j"]) assert_equal(expected3, actual3) # Ellipsis: all dims are shared actual4 = da.dot(da, dim=...) expected4 = da.dot(da) assert_equal(expected4, actual4) # Ellipsis: not all dims are shared actual5 = da.dot(dm3, dim=...) expected5 = da.dot(dm3, dim=("j", "x", "y", "z")) assert_equal(expected5, actual5) with pytest.raises(NotImplementedError): da.dot(dm3.to_dataset(name="dm")) with pytest.raises(TypeError): da.dot(dm3.values) # type: ignore[type-var] def test_dot_align_coords(self) -> None: # GH 3694 x = np.linspace(-3, 3, 6) y = np.linspace(-3, 3, 5) z_a = range(4) da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4)) da = DataArray(da_vals, coords=[x, y, z_a], dims=["x", "y", "z"]) z_m = range(2, 6) dm_vals1 = range(4) dm1 = DataArray(dm_vals1, coords=[z_m], dims=["z"]) with xr.set_options(arithmetic_join="exact"): with pytest.raises( ValueError, match=r"cannot align.*join.*exact.*not equal.*" ): da.dot(dm1) da_aligned, dm_aligned = xr.align(da, dm1, join="inner") # nd dot 1d actual1 = da.dot(dm1) expected_vals1 = np.tensordot(da_aligned.values, dm_aligned.values, (2, 0)) expected1 = DataArray(expected_vals1, coords=[x, da_aligned.y], dims=["x", "y"]) assert_equal(expected1, actual1) # multiple shared dims dm_vals2 = np.arange(20 * 5 * 4).reshape((20, 5, 4)) j = np.linspace(-3, 3, 20) dm2 = DataArray(dm_vals2, coords=[j, y, z_m], dims=["j", "y", "z"]) da_aligned, dm_aligned = xr.align(da, dm2, join="inner") actual2 = da.dot(dm2) expected_vals2 = np.tensordot( da_aligned.values, dm_aligned.values, axes=([1, 2], [1, 2]) ) expected2 = DataArray(expected_vals2, coords=[x, j], dims=["x", "j"]) assert_equal(expected2, actual2) def test_matmul(self) -> None: # copied from above (could make a fixture) x = np.linspace(-3, 3, 6) y = np.linspace(-3, 3, 5) z = range(4) da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4)) da = DataArray(da_vals, coords=[x, y, z], dims=["x", "y", "z"]) result = da @ da expected = da.dot(da) assert_identical(result, expected) def test_matmul_align_coords(self) -> None: # GH 3694 x_a = np.arange(6) x_b = np.arange(2, 8) da_vals = np.arange(6) da_a = DataArray(da_vals, coords=[x_a], dims=["x"]) da_b = DataArray(da_vals, coords=[x_b], dims=["x"]) # only test arithmetic_join="inner" (=default) result = da_a @ da_b expected = da_a.dot(da_b) assert_identical(result, expected) with xr.set_options(arithmetic_join="exact"): with pytest.raises( ValueError, match=r"cannot align.*join.*exact.*not equal.*" ): da_a @ da_b def test_binary_op_propagate_indexes(self) -> None: # regression test for GH2227 self.dv["x"] = np.arange(self.dv.sizes["x"]) expected = self.dv.xindexes["x"] actual = (self.dv * 10).xindexes["x"] assert expected is actual actual = (self.dv > 10).xindexes["x"] assert expected is actual # use mda for bitshift test as it's type int actual = (self.mda << 2).xindexes["x"] expected = self.mda.xindexes["x"] assert expected is actual def test_binary_op_join_setting(self) -> None: dim = "x" align_type: Final = "outer" coords_l, coords_r = [0, 1, 2], [1, 2, 3] missing_3 = xr.DataArray(coords_l, [(dim, coords_l)]) missing_0 = xr.DataArray(coords_r, [(dim, coords_r)]) with xr.set_options(arithmetic_join=align_type): actual = missing_0 + missing_3 _missing_0_aligned, _missing_3_aligned = xr.align( missing_0, missing_3, join=align_type ) expected = xr.DataArray([np.nan, 2, 4, np.nan], [(dim, [0, 1, 2, 3])]) assert_equal(actual, expected) def test_combine_first(self) -> None: ar0 = DataArray([[0, 0], [0, 0]], [("x", ["a", "b"]), ("y", [-1, 0])]) ar1 = DataArray([[1, 1], [1, 1]], [("x", ["b", "c"]), ("y", [0, 1])]) ar2 = DataArray([2], [("x", ["d"])]) actual = ar0.combine_first(ar1) expected = DataArray( [[0, 0, np.nan], [0, 0, 1], [np.nan, 1, 1]], [("x", ["a", "b", "c"]), ("y", [-1, 0, 1])], ) assert_equal(actual, expected) actual = ar1.combine_first(ar0) expected = DataArray( [[0, 0, np.nan], [0, 1, 1], [np.nan, 1, 1]], [("x", ["a", "b", "c"]), ("y", [-1, 0, 1])], ) assert_equal(actual, expected) actual = ar0.combine_first(ar2) expected = DataArray( [[0, 0], [0, 0], [2, 2]], [("x", ["a", "b", "d"]), ("y", [-1, 0])] ) assert_equal(actual, expected) def test_sortby(self) -> None: da = DataArray( [[1, 2], [3, 4], [5, 6]], [("x", ["c", "b", "a"]), ("y", [1, 0])] ) sorted1d = DataArray( [[5, 6], [3, 4], [1, 2]], [("x", ["a", "b", "c"]), ("y", [1, 0])] ) sorted2d = DataArray( [[6, 5], [4, 3], [2, 1]], [("x", ["a", "b", "c"]), ("y", [0, 1])] ) expected = sorted1d dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])]) actual = da.sortby(dax) assert_equal(actual, expected) # test descending order sort actual = da.sortby(dax, ascending=False) assert_equal(actual, da) # test alignment (fills in nan for 'c') dax_short = DataArray([98, 97], [("x", ["b", "a"])]) actual = da.sortby(dax_short) assert_equal(actual, expected) # test multi-dim sort by 1D dataarray values expected = sorted2d dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])]) day = DataArray([90, 80], [("y", [1, 0])]) actual = da.sortby([day, dax]) assert_equal(actual, expected) expected = sorted1d actual = da.sortby("x") assert_equal(actual, expected) expected = sorted2d actual = da.sortby(["x", "y"]) assert_equal(actual, expected) @requires_bottleneck def test_rank(self) -> None: # floats ar = DataArray([[3, 4, np.nan, 1]]) expect_0 = DataArray([[1, 1, np.nan, 1]]) expect_1 = DataArray([[2, 3, np.nan, 1]]) assert_equal(ar.rank("dim_0"), expect_0) assert_equal(ar.rank("dim_1"), expect_1) # int x = DataArray([3, 2, 1]) assert_equal(x.rank("dim_0"), x) # str y = DataArray(["c", "b", "a"]) assert_equal(y.rank("dim_0"), x) x = DataArray([3.0, 1.0, np.nan, 2.0, 4.0], dims=("z",)) y = DataArray([0.75, 0.25, np.nan, 0.5, 1.0], dims=("z",)) assert_equal(y.rank("z", pct=True), y) @pytest.mark.parametrize("use_dask", [True, False]) @pytest.mark.parametrize("use_datetime", [True, False]) @pytest.mark.filterwarnings("ignore:overflow encountered in multiply") def test_polyfit(self, use_dask, use_datetime) -> None: if use_dask and not has_dask: pytest.skip("requires dask") xcoord = xr.DataArray( pd.date_range("1970-01-01", freq="D", periods=10), dims=("x",), name="x" ) x = xr.core.missing.get_clean_interp_index(xcoord, "x") if not use_datetime: xcoord = x da_raw = DataArray( np.stack((10 + 1e-15 * x + 2e-28 * x**2, 30 + 2e-14 * x + 1e-29 * x**2)), dims=("d", "x"), coords={"x": xcoord, "d": [0, 1]}, ) if use_dask: da = da_raw.chunk({"d": 1}) else: da = da_raw out = da.polyfit("x", 2) expected = DataArray( [[2e-28, 1e-15, 10], [1e-29, 2e-14, 30]], dims=("d", "degree"), coords={"degree": [2, 1, 0], "d": [0, 1]}, ).T assert_allclose(out.polyfit_coefficients, expected, rtol=1e-3) # Full output and deficient rank with warnings.catch_warnings(): warnings.simplefilter("ignore", RankWarning) out = da.polyfit("x", 12, full=True) assert out.polyfit_residuals.isnull().all() # With NaN da_raw[0, 1:3] = np.nan if use_dask: da = da_raw.chunk({"d": 1}) else: da = da_raw out = da.polyfit("x", 2, skipna=True, cov=True) assert_allclose(out.polyfit_coefficients, expected, rtol=1e-3) assert "polyfit_covariance" in out # Skipna + Full output out = da.polyfit("x", 2, skipna=True, full=True) assert_allclose(out.polyfit_coefficients, expected, rtol=1e-3) assert out.x_matrix_rank == 3 np.testing.assert_almost_equal(out.polyfit_residuals, [0, 0]) with warnings.catch_warnings(): warnings.simplefilter("ignore", RankWarning) out = da.polyfit("x", 8, full=True) np.testing.assert_array_equal(out.polyfit_residuals.isnull(), [True, False]) @requires_dask def test_polyfit_nd_dask(self) -> None: da = ( DataArray(np.arange(120), dims="time", coords={"time": np.arange(120)}) .chunk({"time": 20}) .expand_dims(lat=5, lon=5) .chunk({"lat": 2, "lon": 2}) ) actual = da.polyfit("time", 1, skipna=False) expected = da.compute().polyfit("time", 1, skipna=False) assert_allclose(actual, expected) def test_pad_constant(self) -> None: ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5)) actual = ar.pad(dim_0=(1, 3)) expected = DataArray( np.pad( np.arange(3 * 4 * 5).reshape(3, 4, 5).astype(np.float32), mode="constant", pad_width=((1, 3), (0, 0), (0, 0)), constant_values=np.nan, ) ) assert actual.shape == (7, 4, 5) assert_identical(actual, expected) ar = xr.DataArray([9], dims="x") actual = ar.pad(x=1) expected = xr.DataArray([np.nan, 9, np.nan], dims="x") assert_identical(actual, expected) actual = ar.pad(x=1, constant_values=1.23456) expected = xr.DataArray([1, 9, 1], dims="x") assert_identical(actual, expected) with pytest.raises(ValueError, match="cannot convert float NaN to integer"): ar.pad(x=1, constant_values=np.nan) def test_pad_coords(self) -> None: ar = DataArray( np.arange(3 * 4 * 5).reshape(3, 4, 5), [("x", np.arange(3)), ("y", np.arange(4)), ("z", np.arange(5))], ) actual = ar.pad(x=(1, 3), constant_values=1) expected = DataArray( np.pad( np.arange(3 * 4 * 5).reshape(3, 4, 5), mode="constant", pad_width=((1, 3), (0, 0), (0, 0)), constant_values=1, ), [ ( "x", np.pad( np.arange(3).astype(np.float32), mode="constant", pad_width=(1, 3), constant_values=np.nan, ), ), ("y", np.arange(4)), ("z", np.arange(5)), ], ) assert_identical(actual, expected) @pytest.mark.parametrize("mode", ("minimum", "maximum", "mean", "median")) @pytest.mark.parametrize( "stat_length", (None, 3, (1, 3), {"dim_0": (2, 1), "dim_2": (4, 2)}) ) def test_pad_stat_length(self, mode, stat_length) -> None: ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5)) actual = ar.pad(dim_0=(1, 3), dim_2=(2, 2), mode=mode, stat_length=stat_length) if isinstance(stat_length, dict): stat_length = (stat_length["dim_0"], (4, 4), stat_length["dim_2"]) expected = DataArray( np.pad( np.arange(3 * 4 * 5).reshape(3, 4, 5), pad_width=((1, 3), (0, 0), (2, 2)), mode=mode, stat_length=stat_length, ) ) assert actual.shape == (7, 4, 9) assert_identical(actual, expected) @pytest.mark.parametrize( "end_values", (None, 3, (3, 5), {"dim_0": (2, 1), "dim_2": (4, 2)}) ) def test_pad_linear_ramp(self, end_values) -> None: ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5)) actual = ar.pad( dim_0=(1, 3), dim_2=(2, 2), mode="linear_ramp", end_values=end_values ) if end_values is None: end_values = 0 elif isinstance(end_values, dict): end_values = (end_values["dim_0"], (4, 4), end_values["dim_2"]) expected = DataArray( np.pad( np.arange(3 * 4 * 5).reshape(3, 4, 5), pad_width=((1, 3), (0, 0), (2, 2)), mode="linear_ramp", end_values=end_values, ) ) assert actual.shape == (7, 4, 9) assert_identical(actual, expected) @pytest.mark.parametrize("mode", ("reflect", "symmetric")) @pytest.mark.parametrize("reflect_type", (None, "even", "odd")) def test_pad_reflect(self, mode, reflect_type) -> None: ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5)) actual = ar.pad( dim_0=(1, 3), dim_2=(2, 2), mode=mode, reflect_type=reflect_type ) np_kwargs = { "array": np.arange(3 * 4 * 5).reshape(3, 4, 5), "pad_width": ((1, 3), (0, 0), (2, 2)), "mode": mode, } # numpy does not support reflect_type=None if reflect_type is not None: np_kwargs["reflect_type"] = reflect_type expected = DataArray(np.pad(**np_kwargs)) assert actual.shape == (7, 4, 9) assert_identical(actual, expected) @pytest.mark.parametrize( ["keep_attrs", "attrs", "expected"], [ pytest.param(None, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="default"), pytest.param(False, {"a": 1, "b": 2}, {}, id="False"), pytest.param(True, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="True"), ], ) def test_pad_keep_attrs(self, keep_attrs, attrs, expected) -> None: arr = xr.DataArray( [1, 2], dims="x", coords={"c": ("x", [-1, 1], attrs)}, attrs=attrs ) expected = xr.DataArray( [0, 1, 2, 0], dims="x", coords={"c": ("x", [np.nan, -1, 1, np.nan], expected)}, attrs=expected, ) keep_attrs_ = "default" if keep_attrs is None else keep_attrs with set_options(keep_attrs=keep_attrs_): actual = arr.pad({"x": (1, 1)}, mode="constant", constant_values=0) xr.testing.assert_identical(actual, expected) actual = arr.pad( {"x": (1, 1)}, mode="constant", constant_values=0, keep_attrs=keep_attrs ) xr.testing.assert_identical(actual, expected) @pytest.mark.parametrize("parser", ["pandas", "python"]) @pytest.mark.parametrize( "engine", ["python", None, pytest.param("numexpr", marks=[requires_numexpr])] ) @pytest.mark.parametrize( "backend", ["numpy", pytest.param("dask", marks=[requires_dask])] ) def test_query( self, backend, engine: QueryEngineOptions, parser: QueryParserOptions ) -> None: """Test querying a dataset.""" # setup test data np.random.seed(42) a = np.arange(0, 10, 1) b = np.random.randint(0, 100, size=10) c = np.linspace(0, 1, 20) d = np.random.choice(["foo", "bar", "baz"], size=30, replace=True).astype( object ) aa = DataArray(data=a, dims=["x"], name="a", coords={"a2": ("x", a)}) bb = DataArray(data=b, dims=["x"], name="b", coords={"b2": ("x", b)}) cc = DataArray(data=c, dims=["y"], name="c", coords={"c2": ("y", c)}) dd = DataArray(data=d, dims=["z"], name="d", coords={"d2": ("z", d)}) if backend == "dask": import dask.array as da aa = aa.copy(data=da.from_array(a, chunks=3)) bb = bb.copy(data=da.from_array(b, chunks=3)) cc = cc.copy(data=da.from_array(c, chunks=7)) dd = dd.copy(data=da.from_array(d, chunks=12)) # query single dim, single variable with raise_if_dask_computes(): actual = aa.query(x="a2 > 5", engine=engine, parser=parser) expect = aa.isel(x=(a > 5)) assert_identical(expect, actual) # query single dim, single variable, via dict with raise_if_dask_computes(): actual = aa.query(dict(x="a2 > 5"), engine=engine, parser=parser) expect = aa.isel(dict(x=(a > 5))) assert_identical(expect, actual) # query single dim, single variable with raise_if_dask_computes(): actual = bb.query(x="b2 > 50", engine=engine, parser=parser) expect = bb.isel(x=(b > 50)) assert_identical(expect, actual) # query single dim, single variable with raise_if_dask_computes(): actual = cc.query(y="c2 < .5", engine=engine, parser=parser) expect = cc.isel(y=(c < 0.5)) assert_identical(expect, actual) # query single dim, single string variable if parser == "pandas": # N.B., this query currently only works with the pandas parser # xref https://github.com/pandas-dev/pandas/issues/40436 with raise_if_dask_computes(): actual = dd.query(z='d2 == "bar"', engine=engine, parser=parser) expect = dd.isel(z=(d == "bar")) assert_identical(expect, actual) # test error handling with pytest.raises(ValueError): aa.query("a > 5") # type: ignore[arg-type] # must be dict or kwargs with pytest.raises(ValueError): aa.query(x=(a > 5)) # must be query string with pytest.raises(UndefinedVariableError): aa.query(x="spam > 50") # name not present @requires_scipy @pytest.mark.parametrize("use_dask", [True, False]) def test_curvefit(self, use_dask) -> None: if use_dask and not has_dask: pytest.skip("requires dask") def exp_decay(t, n0, tau=1): return n0 * np.exp(-t / tau) t = np.arange(0, 5, 0.5) da = DataArray( np.stack([exp_decay(t, 3, 3), exp_decay(t, 5, 4), np.nan * t], axis=-1), dims=("t", "x"), coords={"t": t, "x": [0, 1, 2]}, ) da[0, 0] = np.nan expected = DataArray( [[3, 3], [5, 4], [np.nan, np.nan]], dims=("x", "param"), coords={"x": [0, 1, 2], "param": ["n0", "tau"]}, ) if use_dask: da = da.chunk({"x": 1}) fit = da.curvefit( coords=[da.t], func=exp_decay, p0={"n0": 4}, bounds={"tau": (2, 6)} ) assert_allclose(fit.curvefit_coefficients, expected, rtol=1e-3) da = da.compute() fit = da.curvefit(coords="t", func=np.power, reduce_dims="x", param_names=["a"]) assert "a" in fit.param assert "x" not in fit.dims def test_curvefit_helpers(self) -> None: def exp_decay(t, n0, tau=1): return n0 * np.exp(-t / tau) from xarray.computation.fit import _get_func_args, _initialize_curvefit_params params, func_args = _get_func_args(exp_decay, []) assert params == ["n0", "tau"] param_defaults, bounds_defaults = _initialize_curvefit_params( params, {"n0": 4}, {"tau": [5, np.inf]}, func_args ) assert param_defaults == {"n0": 4, "tau": 6} assert bounds_defaults == {"n0": (-np.inf, np.inf), "tau": (5, np.inf)} # DataArray as bound param_defaults, bounds_defaults = _initialize_curvefit_params( params=params, p0={"n0": 4}, bounds={"tau": [DataArray([3, 4], coords=[("x", [1, 2])]), np.inf]}, func_args=func_args, ) assert param_defaults["n0"] == 4 assert ( param_defaults["tau"] == xr.DataArray([4, 5], coords=[("x", [1, 2])]) ).all() assert bounds_defaults["n0"] == (-np.inf, np.inf) assert ( bounds_defaults["tau"][0] == DataArray([3, 4], coords=[("x", [1, 2])]) ).all() assert bounds_defaults["tau"][1] == np.inf param_names = ["a"] params, func_args = _get_func_args(np.power, param_names) assert params == param_names with pytest.raises(ValueError): _get_func_args(np.power, []) @requires_scipy @pytest.mark.parametrize("use_dask", [True, False]) def test_curvefit_multidimensional_guess(self, use_dask: bool) -> None: if use_dask and not has_dask: pytest.skip("requires dask") def sine(t, a, f, p): return a * np.sin(2 * np.pi * (f * t + p)) t = np.arange(0, 2, 0.02) da = DataArray( np.stack([sine(t, 1.0, 2, 0), sine(t, 1.0, 2, 0)]), coords={"x": [0, 1], "t": t}, ) # Fitting to a sine curve produces a different result depending on the # initial guess: either the phase is zero and the amplitude is positive # or the phase is 0.5 * 2pi and the amplitude is negative. expected = DataArray( [[1, 2, 0], [-1, 2, 0.5]], coords={"x": [0, 1], "param": ["a", "f", "p"]}, ) # Different initial guesses for different values of x a_guess = DataArray([1, -1], coords=[da.x]) p_guess = DataArray([0, 0.5], coords=[da.x]) if use_dask: da = da.chunk({"x": 1}) fit = da.curvefit( coords=[da.t], func=sine, p0={"a": a_guess, "p": p_guess, "f": 2}, ) assert_allclose(fit.curvefit_coefficients, expected) with pytest.raises( ValueError, match=r"Initial guess for 'a' has unexpected dimensions .* should only have " "dimensions that are in data dimensions", ): # initial guess with additional dimensions should be an error da.curvefit( coords=[da.t], func=sine, p0={"a": DataArray([1, 2], coords={"foo": [1, 2]})}, ) @requires_scipy @pytest.mark.parametrize("use_dask", [True, False]) def test_curvefit_multidimensional_bounds(self, use_dask: bool) -> None: if use_dask and not has_dask: pytest.skip("requires dask") def sine(t, a, f, p): return a * np.sin(2 * np.pi * (f * t + p)) t = np.arange(0, 2, 0.02) da = xr.DataArray( np.stack([sine(t, 1.0, 2, 0), sine(t, 1.0, 2, 0)]), coords={"x": [0, 1], "t": t}, ) # Fit a sine with different bounds: positive amplitude should result in a fit with # phase 0 and negative amplitude should result in phase 0.5 * 2pi. expected = DataArray( [[1, 2, 0], [-1, 2, 0.5]], coords={"x": [0, 1], "param": ["a", "f", "p"]}, ) if use_dask: da = da.chunk({"x": 1}) fit = da.curvefit( coords=[da.t], func=sine, p0={"f": 2, "p": 0.25}, # this guess is needed to get the expected result bounds={ "a": ( DataArray([0, -2], coords=[da.x]), DataArray([2, 0], coords=[da.x]), ), }, ) assert_allclose(fit.curvefit_coefficients, expected) # Scalar lower bound with array upper bound fit2 = da.curvefit( coords=[da.t], func=sine, p0={"f": 2, "p": 0.25}, # this guess is needed to get the expected result bounds={ "a": (-2, DataArray([2, 0], coords=[da.x])), }, ) assert_allclose(fit2.curvefit_coefficients, expected) with pytest.raises( ValueError, match=r"Upper bound for 'a' has unexpected dimensions .* should only have " "dimensions that are in data dimensions", ): # bounds with additional dimensions should be an error da.curvefit( coords=[da.t], func=sine, bounds={"a": (0, DataArray([1], coords={"foo": [1]}))}, ) @requires_scipy @pytest.mark.parametrize("use_dask", [True, False]) def test_curvefit_ignore_errors(self, use_dask: bool) -> None: if use_dask and not has_dask: pytest.skip("requires dask") # nonsense function to make the optimization fail def line(x, a, b): if a > 10: return 0 return a * x + b da = DataArray( [[1, 3, 5], [0, 20, 40]], coords={"i": [1, 2], "x": [0.0, 1.0, 2.0]}, ) if use_dask: da = da.chunk({"i": 1}) expected = DataArray( [[2, 1], [np.nan, np.nan]], coords={"i": [1, 2], "param": ["a", "b"]} ) with pytest.raises(RuntimeError, match="calls to function has reached maxfev"): da.curvefit( coords="x", func=line, # limit maximum number of calls so the optimization fails kwargs=dict(maxfev=5), ).compute() # have to compute to raise the error fit = da.curvefit( coords="x", func=line, errors="ignore", # limit maximum number of calls so the optimization fails kwargs=dict(maxfev=5), ).compute() assert_allclose(fit.curvefit_coefficients, expected) class TestReduce: @pytest.fixture(autouse=True) def setup(self): self.attrs = {"attr1": "value1", "attr2": 2929} @pytest.mark.parametrize( ["x", "minindex", "maxindex", "nanindex"], [ pytest.param(np.array([0, 1, 2, 0, -2, -4, 2]), 5, 2, None, id="int"), pytest.param( np.array([0.0, 1.0, 2.0, 0.0, -2.0, -4.0, 2.0]), 5, 2, None, id="float" ), pytest.param( np.array([1.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0]), 5, 2, 1, id="nan" ), pytest.param( np.array([1.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0]).astype("object"), 5, 2, 1, marks=pytest.mark.filterwarnings( "ignore:invalid value encountered in reduce:RuntimeWarning" ), id="obj", ), pytest.param(np.array([np.nan, np.nan]), np.nan, np.nan, 0, id="allnan"), pytest.param( np.array( ["2015-12-31", "2020-01-02", "2020-01-01", "2016-01-01"], dtype="datetime64[ns]", ), 0, 1, None, id="datetime", ), ], ) class TestReduce1D(TestReduce): def test_min( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) if np.isnan(minindex): minindex = 0 expected0 = ar.isel(x=minindex, drop=True) result0 = ar.min(keep_attrs=True) assert_identical(result0, expected0) # Default keeps attrs for reduction operations result1 = ar.min() expected1 = expected0.copy() assert_identical(result1, expected1) result2 = ar.min(skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = ar.isel(x=nanindex, drop=True) else: expected2 = expected1 assert_identical(result2, expected2) # Test explicitly dropping attrs result3 = ar.min(keep_attrs=False) expected3 = expected0.copy() expected3.attrs = {} assert_identical(result3, expected3) def test_max( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) if np.isnan(minindex): maxindex = 0 expected0 = ar.isel(x=maxindex, drop=True) result0 = ar.max(keep_attrs=True) assert_identical(result0, expected0) # Default keeps attrs for reduction operations result1 = ar.max() expected1 = expected0.copy() assert_identical(result1, expected1) result2 = ar.max(skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = ar.isel(x=nanindex, drop=True) else: expected2 = expected1 assert_identical(result2, expected2) # Test explicitly dropping attrs result3 = ar.max(keep_attrs=False) expected3 = expected0.copy() expected3.attrs = {} assert_identical(result3, expected3) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmin( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"]) if np.isnan(minindex): with pytest.raises(ValueError): ar.argmin() return expected0 = indarr[minindex] expected0.attrs = self.attrs # argmin should preserve attrs from input result0 = ar.argmin() assert_identical(result0, expected0) result1 = ar.argmin(keep_attrs=True) expected1 = expected0.copy() expected1.attrs = self.attrs assert_identical(result1, expected1) result2 = ar.argmin(skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = indarr.isel(x=nanindex, drop=True) expected2.attrs = self.attrs # Default keeps attrs for reduction operations else: expected2 = expected0 assert_identical(result2, expected2) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmax( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"]) if np.isnan(maxindex): with pytest.raises(ValueError): ar.argmax() return expected0 = indarr[maxindex] expected0.attrs = self.attrs # Default keeps attrs for reduction operations result0 = ar.argmax() assert_identical(result0, expected0) result1 = ar.argmax(keep_attrs=True) expected1 = expected0.copy() expected1.attrs = self.attrs assert_identical(result1, expected1) result2 = ar.argmax(skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = indarr.isel(x=nanindex, drop=True) expected2.attrs = self.attrs # Default keeps attrs for reduction operations else: expected2 = expected0 assert_identical(result2, expected2) @pytest.mark.parametrize( "use_dask", [ pytest.param( True, marks=pytest.mark.skipif(not has_dask, reason="no dask") ), False, ], ) def test_idxmin( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, use_dask: bool, ) -> None: ar0_raw = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) if use_dask: ar0 = ar0_raw.chunk() else: ar0 = ar0_raw with pytest.raises( KeyError, match=r"'spam' not found in array dimensions", ): ar0.idxmin(dim="spam") # Scalar Dataarray with pytest.raises(ValueError): xr.DataArray(5).idxmin() coordarr0 = xr.DataArray(ar0.coords["x"].data, dims=["x"]) coordarr1 = coordarr0.copy() hasna = np.isnan(minindex) if np.isnan(minindex): minindex = 0 if hasna: coordarr1[...] = 1 fill_value_0 = np.nan else: fill_value_0 = 1 expected0 = ( (coordarr1 * fill_value_0).isel(x=minindex, drop=True).astype("float") ) expected0.name = "x" expected0.attrs = self.attrs # Default keeps attrs for reduction operations # Default fill value (NaN) result0 = ar0.idxmin() assert_identical(result0, expected0) # Manually specify NaN fill_value result1 = ar0.idxmin(fill_value=np.nan) assert_identical(result1, expected0) # keep_attrs result2 = ar0.idxmin(keep_attrs=True) expected2 = expected0.copy() expected2.attrs = self.attrs assert_identical(result2, expected2) # skipna=False if nanindex is not None and ar0.dtype.kind != "O": expected3 = coordarr0.isel(x=nanindex, drop=True).astype("float") expected3.name = "x" expected3.attrs = self.attrs # Default keeps attrs for reduction operations else: expected3 = expected0.copy() result3 = ar0.idxmin(skipna=False) assert_identical(result3, expected3) # fill_value should be ignored with skipna=False result4 = ar0.idxmin(skipna=False, fill_value=-100j) assert_identical(result4, expected3) # Float fill_value if hasna: fill_value_5 = -1.1 else: fill_value_5 = 1 expected5 = (coordarr1 * fill_value_5).isel(x=minindex, drop=True) expected5.name = "x" expected5.attrs = self.attrs # Default keeps attrs for reduction operations result5 = ar0.idxmin(fill_value=-1.1) assert_identical(result5, expected5) # Integer fill_value if hasna: fill_value_6 = -1 else: fill_value_6 = 1 expected6 = (coordarr1 * fill_value_6).isel(x=minindex, drop=True) expected6.name = "x" expected6.attrs = self.attrs # Default keeps attrs for reduction operations result6 = ar0.idxmin(fill_value=-1) assert_identical(result6, expected6) # Complex fill_value if hasna: fill_value_7 = -1j else: fill_value_7 = 1 expected7 = (coordarr1 * fill_value_7).isel(x=minindex, drop=True) expected7.name = "x" expected7.attrs = self.attrs # Default keeps attrs for reduction operations result7 = ar0.idxmin(fill_value=-1j) assert_identical(result7, expected7) @pytest.mark.parametrize("use_dask", [True, False]) def test_idxmax( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, use_dask: bool, ) -> None: if use_dask and not has_dask: pytest.skip("requires dask") if use_dask and x.dtype.kind == "M": pytest.xfail("dask operation 'argmax' breaks when dtype is datetime64 (M)") ar0_raw = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) if use_dask: ar0 = ar0_raw.chunk({}) else: ar0 = ar0_raw with pytest.raises( KeyError, match=r"'spam' not found in array dimensions", ): ar0.idxmax(dim="spam") # Scalar Dataarray with pytest.raises(ValueError): xr.DataArray(5).idxmax() coordarr0 = xr.DataArray(ar0.coords["x"].data, dims=["x"]) coordarr1 = coordarr0.copy() hasna = np.isnan(maxindex) if np.isnan(maxindex): maxindex = 0 if hasna: coordarr1[...] = 1 fill_value_0 = np.nan else: fill_value_0 = 1 expected0 = ( (coordarr1 * fill_value_0).isel(x=maxindex, drop=True).astype("float") ) expected0.name = "x" expected0.attrs = self.attrs # Default keeps attrs for reduction operations # Default fill value (NaN) result0 = ar0.idxmax() assert_identical(result0, expected0) # Manually specify NaN fill_value result1 = ar0.idxmax(fill_value=np.nan) assert_identical(result1, expected0) # keep_attrs result2 = ar0.idxmax(keep_attrs=True) expected2 = expected0.copy() expected2.attrs = self.attrs assert_identical(result2, expected2) # skipna=False if nanindex is not None and ar0.dtype.kind != "O": expected3 = coordarr0.isel(x=nanindex, drop=True).astype("float") expected3.name = "x" expected3.attrs = self.attrs # Default keeps attrs for reduction operations else: expected3 = expected0.copy() result3 = ar0.idxmax(skipna=False) assert_identical(result3, expected3) # fill_value should be ignored with skipna=False result4 = ar0.idxmax(skipna=False, fill_value=-100j) assert_identical(result4, expected3) # Float fill_value if hasna: fill_value_5 = -1.1 else: fill_value_5 = 1 expected5 = (coordarr1 * fill_value_5).isel(x=maxindex, drop=True) expected5.name = "x" expected5.attrs = self.attrs # Default keeps attrs for reduction operations result5 = ar0.idxmax(fill_value=-1.1) assert_identical(result5, expected5) # Integer fill_value if hasna: fill_value_6 = -1 else: fill_value_6 = 1 expected6 = (coordarr1 * fill_value_6).isel(x=maxindex, drop=True) expected6.name = "x" expected6.attrs = self.attrs # Default keeps attrs for reduction operations result6 = ar0.idxmax(fill_value=-1) assert_identical(result6, expected6) # Complex fill_value if hasna: fill_value_7 = -1j else: fill_value_7 = 1 expected7 = (coordarr1 * fill_value_7).isel(x=maxindex, drop=True) expected7.name = "x" expected7.attrs = self.attrs # Default keeps attrs for reduction operations result7 = ar0.idxmax(fill_value=-1j) assert_identical(result7, expected7) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmin_dim( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"]) if np.isnan(minindex): with pytest.raises(ValueError): ar.argmin() return expected0 = {"x": indarr[minindex]} for da in expected0.values(): da.attrs = self.attrs # Default keeps attrs for reduction operations result0 = ar.argmin(...) for key in expected0: assert_identical(result0[key], expected0[key]) result1 = ar.argmin(..., keep_attrs=True) expected1 = deepcopy(expected0) for da in expected1.values(): da.attrs = self.attrs for key in expected1: assert_identical(result1[key], expected1[key]) result2 = ar.argmin(..., skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = {"x": indarr.isel(x=nanindex, drop=True)} expected2[ "x" ].attrs = self.attrs # Default keeps attrs for reduction operations else: expected2 = expected0 for key in expected2: assert_identical(result2[key], expected2[key]) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmax_dim( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"]) if np.isnan(maxindex): with pytest.raises(ValueError): ar.argmax() return expected0 = {"x": indarr[maxindex]} for da in expected0.values(): da.attrs = self.attrs # Default keeps attrs for reduction operations result0 = ar.argmax(...) for key in expected0: assert_identical(result0[key], expected0[key]) result1 = ar.argmax(..., keep_attrs=True) expected1 = deepcopy(expected0) for da in expected1.values(): da.attrs = self.attrs for key in expected1: assert_identical(result1[key], expected1[key]) result2 = ar.argmax(..., skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = {"x": indarr.isel(x=nanindex, drop=True)} expected2[ "x" ].attrs = self.attrs # Default keeps attrs for reduction operations else: expected2 = expected0 for key in expected2: assert_identical(result2[key], expected2[key]) @pytest.mark.parametrize( ["x", "minindex", "maxindex", "nanindex"], [ pytest.param( np.array( [ [0, 1, 2, 0, -2, -4, 2], [1, 1, 1, 1, 1, 1, 1], [0, 0, -10, 5, 20, 0, 0], ] ), [5, 0, 2], [2, 0, 4], [None, None, None], id="int", ), pytest.param( np.array( [ [2.0, 1.0, 2.0, 0.0, -2.0, -4.0, 2.0], [-4.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0], [np.nan] * 7, ] ), [5, 0, np.nan], [0, 2, np.nan], [None, 1, 0], id="nan", ), pytest.param( np.array( [ [2.0, 1.0, 2.0, 0.0, -2.0, -4.0, 2.0], [-4.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0], [np.nan] * 7, ] ).astype("object"), [5, 0, np.nan], [0, 2, np.nan], [None, 1, 0], marks=pytest.mark.filterwarnings( "ignore:invalid value encountered in reduce:RuntimeWarning:" ), id="obj", ), pytest.param( np.array( [ ["2015-12-31", "2020-01-02", "2020-01-01", "2016-01-01"], ["2020-01-02", "2020-01-02", "2020-01-02", "2020-01-02"], ["1900-01-01", "1-02-03", "1900-01-02", "1-02-03"], ], dtype="datetime64[ns]", ), [0, 0, 1], [1, 0, 2], [None, None, None], id="datetime", ), ], ) class TestReduce2D(TestReduce): def test_min( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) minindex = [x if not np.isnan(x) else 0 for x in minindex] expected0list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected0 = xr.concat(expected0list, dim="y") result0 = ar.min(dim="x", keep_attrs=True) assert_identical(result0, expected0) # Default keeps attrs for reduction operations result1 = ar.min(dim="x") assert_identical(result1, expected0) # Test explicitly dropping attrs result1_no_attrs = ar.min(dim="x", keep_attrs=False) expected1 = expected0.copy() expected1.attrs = {} assert_identical(result1_no_attrs, expected1) result2 = ar.min(axis=1) assert_identical(result2, expected0) # Default keeps attrs minindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(minindex, nanindex, strict=True) ] expected2list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected2 = xr.concat(expected2list, dim="y") expected2.attrs = self.attrs # Default keeps attrs for reduction operations result3 = ar.min(dim="x", skipna=False) assert_identical(result3, expected2) def test_max( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) maxindex = [x if not np.isnan(x) else 0 for x in maxindex] expected0list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected0 = xr.concat(expected0list, dim="y") result0 = ar.max(dim="x", keep_attrs=True) assert_identical(result0, expected0) # Default keeps attrs for reduction operations result1 = ar.max(dim="x") assert_identical(result1, expected0) # Test explicitly dropping attrs result1_no_attrs = ar.max(dim="x", keep_attrs=False) expected1 = expected0.copy() expected1.attrs = {} assert_identical(result1_no_attrs, expected1) result2 = ar.max(axis=1) assert_identical(result2, expected0) # Default keeps attrs maxindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(maxindex, nanindex, strict=True) ] expected2list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected2 = xr.concat(expected2list, dim="y") expected2.attrs = self.attrs # Default keeps attrs for reduction operations result3 = ar.max(dim="x", skipna=False) assert_identical(result3, expected2) def test_argmin( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) indarrnp = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1]) indarr = xr.DataArray(indarrnp, dims=ar.dims, coords=ar.coords) if np.isnan(minindex).any(): with pytest.raises(ValueError): ar.argmin(dim="x") return expected0list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected0 = xr.concat(expected0list, dim="y") expected0.attrs = self.attrs # Default keeps attrs for reduction operations result0 = ar.argmin(dim="x") assert_identical(result0, expected0) result1 = ar.argmin(axis=1) assert_identical(result1, expected0) result2 = ar.argmin(dim="x", keep_attrs=True) expected1 = expected0.copy() expected1.attrs = self.attrs assert_identical(result2, expected1) minindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(minindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected2 = xr.concat(expected2list, dim="y") expected2.attrs = self.attrs # Default keeps attrs for reduction operations result3 = ar.argmin(dim="x", skipna=False) assert_identical(result3, expected2) def test_argmax( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) indarr_np = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1]) indarr = xr.DataArray(indarr_np, dims=ar.dims, coords=ar.coords) if np.isnan(maxindex).any(): with pytest.raises(ValueError): ar.argmax(dim="x") return expected0list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected0 = xr.concat(expected0list, dim="y") expected0.attrs = self.attrs # Default keeps attrs for reduction operations result0 = ar.argmax(dim="x") assert_identical(result0, expected0) result1 = ar.argmax(axis=1) assert_identical(result1, expected0) result2 = ar.argmax(dim="x", keep_attrs=True) expected1 = expected0.copy() expected1.attrs = self.attrs assert_identical(result2, expected1) maxindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(maxindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected2 = xr.concat(expected2list, dim="y") expected2.attrs = self.attrs # Default keeps attrs for reduction operations result3 = ar.argmax(dim="x", skipna=False) assert_identical(result3, expected2) @pytest.mark.parametrize( "use_dask", [pytest.param(True, id="dask"), pytest.param(False, id="nodask")] ) def test_idxmin( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], use_dask: bool, ) -> None: if use_dask and not has_dask: pytest.skip("requires dask") if use_dask and x.dtype.kind == "M": pytest.xfail("dask operation 'argmin' breaks when dtype is datetime64 (M)") if x.dtype.kind == "O": # TODO: nanops._nan_argminmax_object computes once to check for all-NaN slices. max_computes = 1 else: max_computes = 0 ar0_raw = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) if use_dask: ar0 = ar0_raw.chunk({}) else: ar0 = ar0_raw assert_identical(ar0, ar0) # No dimension specified with pytest.raises(ValueError): ar0.idxmin() # dim doesn't exist with pytest.raises(KeyError): ar0.idxmin(dim="Y") assert_identical(ar0, ar0) coordarr0 = xr.DataArray( np.tile(ar0.coords["x"], [x.shape[0], 1]), dims=ar0.dims, coords=ar0.coords ) hasna = [np.isnan(x) for x in minindex] coordarr1 = coordarr0.copy() coordarr1[hasna, :] = 1 minindex0 = [x if not np.isnan(x) else 0 for x in minindex] nan_mult_0 = np.array([np.nan if x else 1 for x in hasna])[:, None] expected0list = [ (coordarr1 * nan_mult_0).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex0) ] expected0 = xr.concat(expected0list, dim="y") expected0.name = "x" expected0.attrs = self.attrs # Default keeps attrs for reduction operations # Default fill value (NaN) with raise_if_dask_computes(max_computes=max_computes): result0 = ar0.idxmin(dim="x") assert_identical(result0, expected0) # Manually specify NaN fill_value with raise_if_dask_computes(max_computes=max_computes): result1 = ar0.idxmin(dim="x", fill_value=np.nan) assert_identical(result1, expected0) # keep_attrs with raise_if_dask_computes(max_computes=max_computes): result2 = ar0.idxmin(dim="x", keep_attrs=True) expected2 = expected0.copy() expected2.attrs = self.attrs assert_identical(result2, expected2) # skipna=False minindex3 = [ x if y is None or ar0.dtype.kind == "O" else y for x, y in zip(minindex0, nanindex, strict=True) ] expected3list = [ coordarr0.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex3) ] expected3 = xr.concat(expected3list, dim="y") expected3.name = "x" expected3.attrs = self.attrs # Default keeps attrs for reduction operations with raise_if_dask_computes(max_computes=max_computes): result3 = ar0.idxmin(dim="x", skipna=False) assert_identical(result3, expected3) # fill_value should be ignored with skipna=False with raise_if_dask_computes(max_computes=max_computes): result4 = ar0.idxmin(dim="x", skipna=False, fill_value=-100j) assert_identical(result4, expected3) # Float fill_value nan_mult_5 = np.array([-1.1 if x else 1 for x in hasna])[:, None] expected5list = [ (coordarr1 * nan_mult_5).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex0) ] expected5 = xr.concat(expected5list, dim="y") expected5.name = "x" expected5.attrs = self.attrs # Default keeps attrs for reduction operations with raise_if_dask_computes(max_computes=max_computes): result5 = ar0.idxmin(dim="x", fill_value=-1.1) assert_identical(result5, expected5) # Integer fill_value nan_mult_6 = np.array([-1 if x else 1 for x in hasna])[:, None] expected6list = [ (coordarr1 * nan_mult_6).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex0) ] expected6 = xr.concat(expected6list, dim="y") expected6.name = "x" expected6.attrs = self.attrs # Default keeps attrs for reduction operations with raise_if_dask_computes(max_computes=max_computes): result6 = ar0.idxmin(dim="x", fill_value=-1) assert_identical(result6, expected6) # Complex fill_value nan_mult_7 = np.array([-5j if x else 1 for x in hasna])[:, None] expected7list = [ (coordarr1 * nan_mult_7).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex0) ] expected7 = xr.concat(expected7list, dim="y") expected7.name = "x" expected7.attrs = self.attrs # Default keeps attrs for reduction operations with raise_if_dask_computes(max_computes=max_computes): result7 = ar0.idxmin(dim="x", fill_value=-5j) assert_identical(result7, expected7) @pytest.mark.parametrize( "use_dask", [pytest.param(True, id="dask"), pytest.param(False, id="nodask")] ) def test_idxmax( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], use_dask: bool, ) -> None: if use_dask and not has_dask: pytest.skip("requires dask") if use_dask and x.dtype.kind == "M": pytest.xfail("dask operation 'argmax' breaks when dtype is datetime64 (M)") if x.dtype.kind == "O": # TODO: nanops._nan_argminmax_object computes once to check for all-NaN slices. max_computes = 1 else: max_computes = 0 ar0_raw = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) if use_dask: ar0 = ar0_raw.chunk({}) else: ar0 = ar0_raw # No dimension specified with pytest.raises(ValueError): ar0.idxmax() # dim doesn't exist with pytest.raises(KeyError): ar0.idxmax(dim="Y") ar1 = ar0.copy() del ar1.coords["y"] with pytest.raises(KeyError): ar1.idxmax(dim="y") coordarr0 = xr.DataArray( np.tile(ar0.coords["x"], [x.shape[0], 1]), dims=ar0.dims, coords=ar0.coords ) hasna = [np.isnan(x) for x in maxindex] coordarr1 = coordarr0.copy() coordarr1[hasna, :] = 1 maxindex0 = [x if not np.isnan(x) else 0 for x in maxindex] nan_mult_0 = np.array([np.nan if x else 1 for x in hasna])[:, None] expected0list = [ (coordarr1 * nan_mult_0).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex0) ] expected0 = xr.concat(expected0list, dim="y") expected0.name = "x" expected0.attrs = self.attrs # Default keeps attrs for reduction operations # Default fill value (NaN) with raise_if_dask_computes(max_computes=max_computes): result0 = ar0.idxmax(dim="x") assert_identical(result0, expected0) # Manually specify NaN fill_value with raise_if_dask_computes(max_computes=max_computes): result1 = ar0.idxmax(dim="x", fill_value=np.nan) assert_identical(result1, expected0) # keep_attrs with raise_if_dask_computes(max_computes=max_computes): result2 = ar0.idxmax(dim="x", keep_attrs=True) expected2 = expected0.copy() expected2.attrs = self.attrs assert_identical(result2, expected2) # skipna=False maxindex3 = [ x if y is None or ar0.dtype.kind == "O" else y for x, y in zip(maxindex0, nanindex, strict=True) ] expected3list = [ coordarr0.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex3) ] expected3 = xr.concat(expected3list, dim="y") expected3.name = "x" expected3.attrs = self.attrs # Default keeps attrs for reduction operations with raise_if_dask_computes(max_computes=max_computes): result3 = ar0.idxmax(dim="x", skipna=False) assert_identical(result3, expected3) # fill_value should be ignored with skipna=False with raise_if_dask_computes(max_computes=max_computes): result4 = ar0.idxmax(dim="x", skipna=False, fill_value=-100j) assert_identical(result4, expected3) # Float fill_value nan_mult_5 = np.array([-1.1 if x else 1 for x in hasna])[:, None] expected5list = [ (coordarr1 * nan_mult_5).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex0) ] expected5 = xr.concat(expected5list, dim="y") expected5.name = "x" expected5.attrs = self.attrs # Default keeps attrs for reduction operations with raise_if_dask_computes(max_computes=max_computes): result5 = ar0.idxmax(dim="x", fill_value=-1.1) assert_identical(result5, expected5) # Integer fill_value nan_mult_6 = np.array([-1 if x else 1 for x in hasna])[:, None] expected6list = [ (coordarr1 * nan_mult_6).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex0) ] expected6 = xr.concat(expected6list, dim="y") expected6.name = "x" expected6.attrs = self.attrs # Default keeps attrs for reduction operations with raise_if_dask_computes(max_computes=max_computes): result6 = ar0.idxmax(dim="x", fill_value=-1) assert_identical(result6, expected6) # Complex fill_value nan_mult_7 = np.array([-5j if x else 1 for x in hasna])[:, None] expected7list = [ (coordarr1 * nan_mult_7).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex0) ] expected7 = xr.concat(expected7list, dim="y") expected7.name = "x" expected7.attrs = self.attrs # Default keeps attrs for reduction operations with raise_if_dask_computes(max_computes=max_computes): result7 = ar0.idxmax(dim="x", fill_value=-5j) assert_identical(result7, expected7) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmin_dim( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) indarrnp = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1]) indarr = xr.DataArray(indarrnp, dims=ar.dims, coords=ar.coords) if np.isnan(minindex).any(): with pytest.raises(ValueError): ar.argmin(dim="x") return expected0list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected0 = {"x": xr.concat(expected0list, dim="y")} expected0[ "x" ].attrs = self.attrs # Default keeps attrs for reduction operations result0 = ar.argmin(dim=["x"]) for key in expected0: assert_identical(result0[key], expected0[key]) result1 = ar.argmin(dim=["x"], keep_attrs=True) expected1 = deepcopy(expected0) expected1["x"].attrs = self.attrs for key in expected1: assert_identical(result1[key], expected1[key]) minindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(minindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected2 = {"x": xr.concat(expected2list, dim="y")} expected2[ "x" ].attrs = self.attrs # Default keeps attrs for reduction operations result2 = ar.argmin(dim=["x"], skipna=False) for key in expected2: assert_identical(result2[key], expected2[key]) result3 = ar.argmin(...) # TODO: remove cast once argmin typing is overloaded min_xind = cast(DataArray, ar.isel(expected0).argmin()) expected3 = { "y": DataArray(min_xind, attrs=self.attrs), "x": DataArray(minindex[min_xind.item()], attrs=self.attrs), } for key in expected3: assert_identical(result3[key], expected3[key]) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmax_dim( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) indarrnp = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1]) indarr = xr.DataArray(indarrnp, dims=ar.dims, coords=ar.coords) if np.isnan(maxindex).any(): with pytest.raises(ValueError): ar.argmax(dim="x") return expected0list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected0 = {"x": xr.concat(expected0list, dim="y")} expected0[ "x" ].attrs = self.attrs # Default keeps attrs for reduction operations result0 = ar.argmax(dim=["x"]) for key in expected0: assert_identical(result0[key], expected0[key]) result1 = ar.argmax(dim=["x"], keep_attrs=True) expected1 = deepcopy(expected0) expected1["x"].attrs = self.attrs for key in expected1: assert_identical(result1[key], expected1[key]) maxindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(maxindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected2 = {"x": xr.concat(expected2list, dim="y")} expected2[ "x" ].attrs = self.attrs # Default keeps attrs for reduction operations result2 = ar.argmax(dim=["x"], skipna=False) for key in expected2: assert_identical(result2[key], expected2[key]) result3 = ar.argmax(...) # TODO: remove cast once argmax typing is overloaded max_xind = cast(DataArray, ar.isel(expected0).argmax()) expected3 = { "y": DataArray(max_xind, attrs=self.attrs), "x": DataArray(maxindex[max_xind.item()], attrs=self.attrs), } for key in expected3: assert_identical(result3[key], expected3[key]) @pytest.mark.parametrize( "x, minindices_x, minindices_y, minindices_z, minindices_xy, " "minindices_xz, minindices_yz, minindices_xyz, maxindices_x, " "maxindices_y, maxindices_z, maxindices_xy, maxindices_xz, maxindices_yz, " "maxindices_xyz, nanindices_x, nanindices_y, nanindices_z, nanindices_xy, " "nanindices_xz, nanindices_yz, nanindices_xyz", [ pytest.param( np.array( [ [[0, 1, 2, 0], [-2, -4, 2, 0]], [[1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, -10, 5], [20, 0, 0, 0]], ] ), {"x": np.array([[0, 2, 2, 0], [0, 0, 2, 0]])}, {"y": np.array([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]])}, {"z": np.array([[0, 1], [0, 0], [2, 1]])}, {"x": np.array([0, 0, 2, 0]), "y": np.array([1, 1, 0, 0])}, {"x": np.array([2, 0]), "z": np.array([2, 1])}, {"y": np.array([1, 0, 0]), "z": np.array([1, 0, 2])}, {"x": np.array(2), "y": np.array(0), "z": np.array(2)}, {"x": np.array([[1, 0, 0, 2], [2, 1, 0, 1]])}, {"y": np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 1, 0]])}, {"z": np.array([[2, 2], [0, 0], [3, 0]])}, {"x": np.array([2, 0, 0, 2]), "y": np.array([1, 0, 0, 0])}, {"x": np.array([2, 2]), "z": np.array([3, 0])}, {"y": np.array([0, 0, 1]), "z": np.array([2, 0, 0])}, {"x": np.array(2), "y": np.array(1), "z": np.array(0)}, {"x": np.array([[None, None, None, None], [None, None, None, None]])}, { "y": np.array( [ [None, None, None, None], [None, None, None, None], [None, None, None, None], ] ) }, {"z": np.array([[None, None], [None, None], [None, None]])}, { "x": np.array([None, None, None, None]), "y": np.array([None, None, None, None]), }, {"x": np.array([None, None]), "z": np.array([None, None])}, {"y": np.array([None, None, None]), "z": np.array([None, None, None])}, {"x": np.array(None), "y": np.array(None), "z": np.array(None)}, id="int", ), pytest.param( np.array( [ [[2.0, 1.0, 2.0, 0.0], [-2.0, -4.0, 2.0, 0.0]], [[-4.0, np.nan, 2.0, np.nan], [-2.0, -4.0, 2.0, 0.0]], [[np.nan] * 4, [np.nan] * 4], ] ), {"x": np.array([[1, 0, 0, 0], [0, 0, 0, 0]])}, { "y": np.array( [[1, 1, 0, 0], [0, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]] ) }, {"z": np.array([[3, 1], [0, 1], [np.nan, np.nan]])}, {"x": np.array([1, 0, 0, 0]), "y": np.array([0, 1, 0, 0])}, {"x": np.array([1, 0]), "z": np.array([0, 1])}, {"y": np.array([1, 0, np.nan]), "z": np.array([1, 0, np.nan])}, {"x": np.array(0), "y": np.array(1), "z": np.array(1)}, {"x": np.array([[0, 0, 0, 0], [0, 0, 0, 0]])}, { "y": np.array( [[0, 0, 0, 0], [1, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]] ) }, {"z": np.array([[0, 2], [2, 2], [np.nan, np.nan]])}, {"x": np.array([0, 0, 0, 0]), "y": np.array([0, 0, 0, 0])}, {"x": np.array([0, 0]), "z": np.array([2, 2])}, {"y": np.array([0, 0, np.nan]), "z": np.array([0, 2, np.nan])}, {"x": np.array(0), "y": np.array(0), "z": np.array(0)}, {"x": np.array([[2, 1, 2, 1], [2, 2, 2, 2]])}, { "y": np.array( [[None, None, None, None], [None, 0, None, 0], [0, 0, 0, 0]] ) }, {"z": np.array([[None, None], [1, None], [0, 0]])}, {"x": np.array([2, 1, 2, 1]), "y": np.array([0, 0, 0, 0])}, {"x": np.array([1, 2]), "z": np.array([1, 0])}, {"y": np.array([None, 0, 0]), "z": np.array([None, 1, 0])}, {"x": np.array(1), "y": np.array(0), "z": np.array(1)}, id="nan", ), pytest.param( np.array( [ [[2.0, 1.0, 2.0, 0.0], [-2.0, -4.0, 2.0, 0.0]], [[-4.0, np.nan, 2.0, np.nan], [-2.0, -4.0, 2.0, 0.0]], [[np.nan] * 4, [np.nan] * 4], ] ).astype("object"), {"x": np.array([[1, 0, 0, 0], [0, 0, 0, 0]])}, { "y": np.array( [[1, 1, 0, 0], [0, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]] ) }, {"z": np.array([[3, 1], [0, 1], [np.nan, np.nan]])}, {"x": np.array([1, 0, 0, 0]), "y": np.array([0, 1, 0, 0])}, {"x": np.array([1, 0]), "z": np.array([0, 1])}, {"y": np.array([1, 0, np.nan]), "z": np.array([1, 0, np.nan])}, {"x": np.array(0), "y": np.array(1), "z": np.array(1)}, {"x": np.array([[0, 0, 0, 0], [0, 0, 0, 0]])}, { "y": np.array( [[0, 0, 0, 0], [1, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]] ) }, {"z": np.array([[0, 2], [2, 2], [np.nan, np.nan]])}, {"x": np.array([0, 0, 0, 0]), "y": np.array([0, 0, 0, 0])}, {"x": np.array([0, 0]), "z": np.array([2, 2])}, {"y": np.array([0, 0, np.nan]), "z": np.array([0, 2, np.nan])}, {"x": np.array(0), "y": np.array(0), "z": np.array(0)}, {"x": np.array([[2, 1, 2, 1], [2, 2, 2, 2]])}, { "y": np.array( [[None, None, None, None], [None, 0, None, 0], [0, 0, 0, 0]] ) }, {"z": np.array([[None, None], [1, None], [0, 0]])}, {"x": np.array([2, 1, 2, 1]), "y": np.array([0, 0, 0, 0])}, {"x": np.array([1, 2]), "z": np.array([1, 0])}, {"y": np.array([None, 0, 0]), "z": np.array([None, 1, 0])}, {"x": np.array(1), "y": np.array(0), "z": np.array(1)}, id="obj", ), pytest.param( np.array( [ [["2015-12-31", "2020-01-02"], ["2020-01-01", "2016-01-01"]], [["2020-01-02", "2020-01-02"], ["2020-01-02", "2020-01-02"]], [["1900-01-01", "1-02-03"], ["1900-01-02", "1-02-03"]], ], dtype="datetime64[ns]", ), {"x": np.array([[2, 2], [2, 2]])}, {"y": np.array([[0, 1], [0, 0], [0, 0]])}, {"z": np.array([[0, 1], [0, 0], [1, 1]])}, {"x": np.array([2, 2]), "y": np.array([0, 0])}, {"x": np.array([2, 2]), "z": np.array([1, 1])}, {"y": np.array([0, 0, 0]), "z": np.array([0, 0, 1])}, {"x": np.array(2), "y": np.array(0), "z": np.array(1)}, {"x": np.array([[1, 0], [1, 1]])}, {"y": np.array([[1, 0], [0, 0], [1, 0]])}, {"z": np.array([[1, 0], [0, 0], [0, 0]])}, {"x": np.array([1, 0]), "y": np.array([0, 0])}, {"x": np.array([0, 1]), "z": np.array([1, 0])}, {"y": np.array([0, 0, 1]), "z": np.array([1, 0, 0])}, {"x": np.array(0), "y": np.array(0), "z": np.array(1)}, {"x": np.array([[None, None], [None, None]])}, {"y": np.array([[None, None], [None, None], [None, None]])}, {"z": np.array([[None, None], [None, None], [None, None]])}, {"x": np.array([None, None]), "y": np.array([None, None])}, {"x": np.array([None, None]), "z": np.array([None, None])}, {"y": np.array([None, None, None]), "z": np.array([None, None, None])}, {"x": np.array(None), "y": np.array(None), "z": np.array(None)}, id="datetime", ), ], ) class TestReduce3D(TestReduce): def test_argmin_dim( self, x: np.ndarray, minindices_x: dict[str, np.ndarray], minindices_y: dict[str, np.ndarray], minindices_z: dict[str, np.ndarray], minindices_xy: dict[str, np.ndarray], minindices_xz: dict[str, np.ndarray], minindices_yz: dict[str, np.ndarray], minindices_xyz: dict[str, np.ndarray], maxindices_x: dict[str, np.ndarray], maxindices_y: dict[str, np.ndarray], maxindices_z: dict[str, np.ndarray], maxindices_xy: dict[str, np.ndarray], maxindices_xz: dict[str, np.ndarray], maxindices_yz: dict[str, np.ndarray], maxindices_xyz: dict[str, np.ndarray], nanindices_x: dict[str, np.ndarray], nanindices_y: dict[str, np.ndarray], nanindices_z: dict[str, np.ndarray], nanindices_xy: dict[str, np.ndarray], nanindices_xz: dict[str, np.ndarray], nanindices_yz: dict[str, np.ndarray], nanindices_xyz: dict[str, np.ndarray], ) -> None: ar = xr.DataArray( x, dims=["x", "y", "z"], coords={ "x": np.arange(x.shape[0]) * 4, "y": 1 - np.arange(x.shape[1]), "z": 2 + 3 * np.arange(x.shape[2]), }, attrs=self.attrs, ) for inds in [ minindices_x, minindices_y, minindices_z, minindices_xy, minindices_xz, minindices_yz, minindices_xyz, ]: if np.array([np.isnan(i) for i in inds.values()]).any(): with pytest.raises(ValueError): ar.argmin(dim=list(inds)) return result0 = ar.argmin(dim=["x"]) assert isinstance(result0, dict) expected0 = { key: xr.DataArray(value, dims=("y", "z"), attrs=self.attrs) for key, value in minindices_x.items() } for key in expected0: assert_identical(result0[key].drop_vars(["y", "z"]), expected0[key]) result1 = ar.argmin(dim=["y"]) assert isinstance(result1, dict) expected1 = { key: xr.DataArray(value, dims=("x", "z"), attrs=self.attrs) for key, value in minindices_y.items() } for key in expected1: assert_identical(result1[key].drop_vars(["x", "z"]), expected1[key]) result2 = ar.argmin(dim=["z"]) assert isinstance(result2, dict) expected2 = { key: xr.DataArray(value, dims=("x", "y"), attrs=self.attrs) for key, value in minindices_z.items() } for key in expected2: assert_identical(result2[key].drop_vars(["x", "y"]), expected2[key]) result3 = ar.argmin(dim=("x", "y")) assert isinstance(result3, dict) expected3 = { key: xr.DataArray(value, dims=("z"), attrs=self.attrs) for key, value in minindices_xy.items() } for key in expected3: assert_identical(result3[key].drop_vars("z"), expected3[key]) result4 = ar.argmin(dim=("x", "z")) assert isinstance(result4, dict) expected4 = { key: xr.DataArray(value, dims=("y"), attrs=self.attrs) for key, value in minindices_xz.items() } for key in expected4: assert_identical(result4[key].drop_vars("y"), expected4[key]) result5 = ar.argmin(dim=("y", "z")) assert isinstance(result5, dict) expected5 = { key: xr.DataArray(value, dims=("x"), attrs=self.attrs) for key, value in minindices_yz.items() } for key in expected5: assert_identical(result5[key].drop_vars("x"), expected5[key]) result6 = ar.argmin(...) assert isinstance(result6, dict) expected6 = { key: xr.DataArray(value, attrs=self.attrs) for key, value in minindices_xyz.items() } for key in expected6: assert_identical(result6[key], expected6[key]) minindices_x = { key: xr.where( nanindices_x[key] == None, # noqa: E711 minindices_x[key], nanindices_x[key], ) for key in minindices_x } expected7 = { key: xr.DataArray(value, dims=("y", "z"), attrs=self.attrs) for key, value in minindices_x.items() } result7 = ar.argmin(dim=["x"], skipna=False) assert isinstance(result7, dict) for key in expected7: assert_identical(result7[key].drop_vars(["y", "z"]), expected7[key]) minindices_y = { key: xr.where( nanindices_y[key] == None, # noqa: E711 minindices_y[key], nanindices_y[key], ) for key in minindices_y } expected8 = { key: xr.DataArray(value, dims=("x", "z"), attrs=self.attrs) for key, value in minindices_y.items() } result8 = ar.argmin(dim=["y"], skipna=False) assert isinstance(result8, dict) for key in expected8: assert_identical(result8[key].drop_vars(["x", "z"]), expected8[key]) minindices_z = { key: xr.where( nanindices_z[key] == None, # noqa: E711 minindices_z[key], nanindices_z[key], ) for key in minindices_z } expected9 = { key: xr.DataArray(value, dims=("x", "y"), attrs=self.attrs) for key, value in minindices_z.items() } result9 = ar.argmin(dim=["z"], skipna=False) assert isinstance(result9, dict) for key in expected9: assert_identical(result9[key].drop_vars(["x", "y"]), expected9[key]) minindices_xy = { key: xr.where( nanindices_xy[key] == None, # noqa: E711 minindices_xy[key], nanindices_xy[key], ) for key in minindices_xy } expected10 = { key: xr.DataArray(value, dims="z", attrs=self.attrs) for key, value in minindices_xy.items() } result10 = ar.argmin(dim=("x", "y"), skipna=False) assert isinstance(result10, dict) for key in expected10: assert_identical(result10[key].drop_vars("z"), expected10[key]) minindices_xz = { key: xr.where( nanindices_xz[key] == None, # noqa: E711 minindices_xz[key], nanindices_xz[key], ) for key in minindices_xz } expected11 = { key: xr.DataArray(value, dims="y", attrs=self.attrs) for key, value in minindices_xz.items() } result11 = ar.argmin(dim=("x", "z"), skipna=False) assert isinstance(result11, dict) for key in expected11: assert_identical(result11[key].drop_vars("y"), expected11[key]) minindices_yz = { key: xr.where( nanindices_yz[key] == None, # noqa: E711 minindices_yz[key], nanindices_yz[key], ) for key in minindices_yz } expected12 = { key: xr.DataArray(value, dims="x", attrs=self.attrs) for key, value in minindices_yz.items() } result12 = ar.argmin(dim=("y", "z"), skipna=False) assert isinstance(result12, dict) for key in expected12: assert_identical(result12[key].drop_vars("x"), expected12[key]) minindices_xyz = { key: xr.where( nanindices_xyz[key] == None, # noqa: E711 minindices_xyz[key], nanindices_xyz[key], ) for key in minindices_xyz } expected13 = { key: xr.DataArray(value, attrs=self.attrs) for key, value in minindices_xyz.items() } result13 = ar.argmin(..., skipna=False) assert isinstance(result13, dict) for key in expected13: assert_identical(result13[key], expected13[key]) def test_argmax_dim( self, x: np.ndarray, minindices_x: dict[str, np.ndarray], minindices_y: dict[str, np.ndarray], minindices_z: dict[str, np.ndarray], minindices_xy: dict[str, np.ndarray], minindices_xz: dict[str, np.ndarray], minindices_yz: dict[str, np.ndarray], minindices_xyz: dict[str, np.ndarray], maxindices_x: dict[str, np.ndarray], maxindices_y: dict[str, np.ndarray], maxindices_z: dict[str, np.ndarray], maxindices_xy: dict[str, np.ndarray], maxindices_xz: dict[str, np.ndarray], maxindices_yz: dict[str, np.ndarray], maxindices_xyz: dict[str, np.ndarray], nanindices_x: dict[str, np.ndarray], nanindices_y: dict[str, np.ndarray], nanindices_z: dict[str, np.ndarray], nanindices_xy: dict[str, np.ndarray], nanindices_xz: dict[str, np.ndarray], nanindices_yz: dict[str, np.ndarray], nanindices_xyz: dict[str, np.ndarray], ) -> None: ar = xr.DataArray( x, dims=["x", "y", "z"], coords={ "x": np.arange(x.shape[0]) * 4, "y": 1 - np.arange(x.shape[1]), "z": 2 + 3 * np.arange(x.shape[2]), }, attrs=self.attrs, ) for inds in [ maxindices_x, maxindices_y, maxindices_z, maxindices_xy, maxindices_xz, maxindices_yz, maxindices_xyz, ]: if np.array([np.isnan(i) for i in inds.values()]).any(): with pytest.raises(ValueError): ar.argmax(dim=list(inds)) return result0 = ar.argmax(dim=["x"]) assert isinstance(result0, dict) expected0 = { key: xr.DataArray(value, dims=("y", "z"), attrs=self.attrs) for key, value in maxindices_x.items() } for key in expected0: assert_identical(result0[key].drop_vars(["y", "z"]), expected0[key]) result1 = ar.argmax(dim=["y"]) assert isinstance(result1, dict) expected1 = { key: xr.DataArray(value, dims=("x", "z"), attrs=self.attrs) for key, value in maxindices_y.items() } for key in expected1: assert_identical(result1[key].drop_vars(["x", "z"]), expected1[key]) result2 = ar.argmax(dim=["z"]) assert isinstance(result2, dict) expected2 = { key: xr.DataArray(value, dims=("x", "y"), attrs=self.attrs) for key, value in maxindices_z.items() } for key in expected2: assert_identical(result2[key].drop_vars(["x", "y"]), expected2[key]) result3 = ar.argmax(dim=("x", "y")) assert isinstance(result3, dict) expected3 = { key: xr.DataArray(value, dims=("z"), attrs=self.attrs) for key, value in maxindices_xy.items() } for key in expected3: assert_identical(result3[key].drop_vars("z"), expected3[key]) result4 = ar.argmax(dim=("x", "z")) assert isinstance(result4, dict) expected4 = { key: xr.DataArray(value, dims=("y"), attrs=self.attrs) for key, value in maxindices_xz.items() } for key in expected4: assert_identical(result4[key].drop_vars("y"), expected4[key]) result5 = ar.argmax(dim=("y", "z")) assert isinstance(result5, dict) expected5 = { key: xr.DataArray(value, dims=("x"), attrs=self.attrs) for key, value in maxindices_yz.items() } for key in expected5: assert_identical(result5[key].drop_vars("x"), expected5[key]) result6 = ar.argmax(...) assert isinstance(result6, dict) expected6 = { key: xr.DataArray(value, attrs=self.attrs) for key, value in maxindices_xyz.items() } for key in expected6: assert_identical(result6[key], expected6[key]) maxindices_x = { key: xr.where( nanindices_x[key] == None, # noqa: E711 maxindices_x[key], nanindices_x[key], ) for key in maxindices_x } expected7 = { key: xr.DataArray(value, dims=("y", "z"), attrs=self.attrs) for key, value in maxindices_x.items() } result7 = ar.argmax(dim=["x"], skipna=False) assert isinstance(result7, dict) for key in expected7: assert_identical(result7[key].drop_vars(["y", "z"]), expected7[key]) maxindices_y = { key: xr.where( nanindices_y[key] == None, # noqa: E711 maxindices_y[key], nanindices_y[key], ) for key in maxindices_y } expected8 = { key: xr.DataArray(value, dims=("x", "z"), attrs=self.attrs) for key, value in maxindices_y.items() } result8 = ar.argmax(dim=["y"], skipna=False) assert isinstance(result8, dict) for key in expected8: assert_identical(result8[key].drop_vars(["x", "z"]), expected8[key]) maxindices_z = { key: xr.where( nanindices_z[key] == None, # noqa: E711 maxindices_z[key], nanindices_z[key], ) for key in maxindices_z } expected9 = { key: xr.DataArray(value, dims=("x", "y"), attrs=self.attrs) for key, value in maxindices_z.items() } result9 = ar.argmax(dim=["z"], skipna=False) assert isinstance(result9, dict) for key in expected9: assert_identical(result9[key].drop_vars(["x", "y"]), expected9[key]) maxindices_xy = { key: xr.where( nanindices_xy[key] == None, # noqa: E711 maxindices_xy[key], nanindices_xy[key], ) for key in maxindices_xy } expected10 = { key: xr.DataArray(value, dims="z", attrs=self.attrs) for key, value in maxindices_xy.items() } result10 = ar.argmax(dim=("x", "y"), skipna=False) assert isinstance(result10, dict) for key in expected10: assert_identical(result10[key].drop_vars("z"), expected10[key]) maxindices_xz = { key: xr.where( nanindices_xz[key] == None, # noqa: E711 maxindices_xz[key], nanindices_xz[key], ) for key in maxindices_xz } expected11 = { key: xr.DataArray(value, dims="y", attrs=self.attrs) for key, value in maxindices_xz.items() } result11 = ar.argmax(dim=("x", "z"), skipna=False) assert isinstance(result11, dict) for key in expected11: assert_identical(result11[key].drop_vars("y"), expected11[key]) maxindices_yz = { key: xr.where( nanindices_yz[key] == None, # noqa: E711 maxindices_yz[key], nanindices_yz[key], ) for key in maxindices_yz } expected12 = { key: xr.DataArray(value, dims="x", attrs=self.attrs) for key, value in maxindices_yz.items() } result12 = ar.argmax(dim=("y", "z"), skipna=False) assert isinstance(result12, dict) for key in expected12: assert_identical(result12[key].drop_vars("x"), expected12[key]) maxindices_xyz = { key: xr.where( nanindices_xyz[key] == None, # noqa: E711 maxindices_xyz[key], nanindices_xyz[key], ) for key in maxindices_xyz } expected13 = { key: xr.DataArray(value, attrs=self.attrs) for key, value in maxindices_xyz.items() } result13 = ar.argmax(..., skipna=False) assert isinstance(result13, dict) for key in expected13: assert_identical(result13[key], expected13[key]) class TestReduceND(TestReduce): @pytest.mark.parametrize("op", ["idxmin", "idxmax"]) @pytest.mark.parametrize("ndim", [3, 5]) def test_idxminmax_dask(self, op: str, ndim: int) -> None: if not has_dask: pytest.skip("requires dask") ar0_raw = xr.DataArray( np.random.random_sample(size=[10] * ndim), dims=list("abcdefghij"[: ndim - 1]) + ["x"], coords={"x": np.arange(10)}, attrs=self.attrs, ) ar0_dsk = ar0_raw.chunk({}) # Assert idx is the same with dask and without assert_equal(getattr(ar0_dsk, op)(dim="x"), getattr(ar0_raw, op)(dim="x")) @pytest.mark.parametrize("da", ("repeating_ints",), indirect=True) def test_isin(da) -> None: expected = DataArray( np.asarray([[0, 0, 0], [1, 0, 0]]), dims=list("yx"), coords={"x": list("abc"), "y": list("de")}, ).astype("bool") result = da.isin([3]).sel(y=list("de"), z=0) assert_equal(result, expected) expected = DataArray( np.asarray([[0, 0, 1], [1, 0, 0]]), dims=list("yx"), coords={"x": list("abc"), "y": list("de")}, ).astype("bool") result = da.isin([2, 3]).sel(y=list("de"), z=0) assert_equal(result, expected) def test_raise_no_warning_for_nan_in_binary_ops() -> None: with assert_no_warnings(): _ = xr.DataArray([1, 2, np.nan]) > 0 def test_binary_ops_attrs_drop_conflicts() -> None: # Test that binary operations combine attrs with drop_conflicts behavior attrs_a = {"units": "meters", "long_name": "distance", "source": "sensor_a"} attrs_b = {"units": "feet", "resolution": "high", "source": "sensor_b"} da1 = xr.DataArray([1, 2, 3], attrs=attrs_a) da2 = xr.DataArray([4, 5, 6], attrs=attrs_b) # With keep_attrs=True (default), should combine attrs dropping conflicts result = da1 + da2 # "units" and "source" conflict, so they're dropped # "long_name" only in da1, "resolution" only in da2, so they're kept assert result.attrs == {"long_name": "distance", "resolution": "high"} # Test with identical values for some attrs attrs_c = {"units": "meters", "type": "data", "source": "sensor_c"} da3 = xr.DataArray([7, 8, 9], attrs=attrs_c) result2 = da1 + da3 # "units" has same value, so kept; "source" conflicts, so dropped # "long_name" from da1, "type" from da3 assert result2.attrs == {"units": "meters", "long_name": "distance", "type": "data"} # With keep_attrs=False, attrs should be empty with xr.set_options(keep_attrs=False): result3 = da1 + da2 assert result3.attrs == {} @pytest.mark.filterwarnings("error") def test_no_warning_for_all_nan() -> None: _ = xr.DataArray([np.nan, np.nan]).mean() def test_name_in_masking() -> None: name = "RingoStarr" da = xr.DataArray(range(10), coords=[("x", range(10))], name=name) assert da.where(da > 5).name == name assert da.where((da > 5).rename("YokoOno")).name == name assert da.where(da > 5, drop=True).name == name assert da.where((da > 5).rename("YokoOno"), drop=True).name == name class TestIrisConversion: @requires_iris def test_to_and_from_iris(self) -> None: import cf_units # iris requirement import iris # to iris coord_dict: dict[Hashable, Any] = {} coord_dict["distance"] = ("distance", [-2, 2], {"units": "meters"}) coord_dict["time"] = ("time", pd.date_range("2000-01-01", periods=3)) coord_dict["height"] = 10 coord_dict["distance2"] = ("distance", [0, 1], {"foo": "bar"}) coord_dict["time2"] = (("distance", "time"), [[0, 1, 2], [2, 3, 4]]) original = DataArray( np.arange(6, dtype="float").reshape(2, 3), coord_dict, name="Temperature", attrs={ "baz": 123, "units": "Kelvin", "standard_name": "fire_temperature", "long_name": "Fire Temperature", }, dims=("distance", "time"), ) # Set a bad value to test the masking logic original.data[0, 2] = np.nan original.attrs["cell_methods"] = "height: mean (comment: A cell method)" actual = original.to_iris() assert_array_equal(actual.data, original.data) assert actual.var_name == original.name assert tuple(d.var_name for d in actual.dim_coords) == original.dims assert actual.cell_methods == ( iris.coords.CellMethod( method="mean", coords=("height",), intervals=(), comments=("A cell method",), ), ) for coord, original_key in zip((actual.coords()), original.coords, strict=True): original_coord = original.coords[original_key] assert coord.var_name == original_coord.name assert_array_equal( coord.points, CFDatetimeCoder().encode(original_coord.variable).values ) assert actual.coord_dims(coord) == original.get_axis_num( original.coords[coord.var_name].dims ) assert ( actual.coord("distance2").attributes["foo"] == original.coords["distance2"].attrs["foo"] ) assert actual.coord("distance").units == cf_units.Unit( original.coords["distance"].units ) assert actual.attributes["baz"] == original.attrs["baz"] assert actual.standard_name == original.attrs["standard_name"] roundtripped = DataArray.from_iris(actual) assert_identical(original, roundtripped) actual.remove_coord("time") auto_time_dimension = DataArray.from_iris(actual) assert auto_time_dimension.dims == ("distance", "dim_1") @requires_iris @requires_dask def test_to_and_from_iris_dask(self) -> None: import cf_units # iris requirement import dask.array as da import iris coord_dict: dict[Hashable, Any] = {} coord_dict["distance"] = ("distance", [-2, 2], {"units": "meters"}) coord_dict["time"] = ("time", pd.date_range("2000-01-01", periods=3)) coord_dict["height"] = 10 coord_dict["distance2"] = ("distance", [0, 1], {"foo": "bar"}) coord_dict["time2"] = (("distance", "time"), [[0, 1, 2], [2, 3, 4]]) original = DataArray( da.from_array(np.arange(-1, 5, dtype="float").reshape(2, 3), 3), coord_dict, name="Temperature", attrs=dict( baz=123, units="Kelvin", standard_name="fire_temperature", long_name="Fire Temperature", ), dims=("distance", "time"), ) # Set a bad value to test the masking logic original.data = da.ma.masked_less(original.data, 0) original.attrs["cell_methods"] = "height: mean (comment: A cell method)" actual = original.to_iris() # Be careful not to trigger the loading of the iris data actual_data = ( actual.core_data() if hasattr(actual, "core_data") else actual.data ) assert_array_equal(actual_data, original.data) assert actual.var_name == original.name assert tuple(d.var_name for d in actual.dim_coords) == original.dims assert actual.cell_methods == ( iris.coords.CellMethod( method="mean", coords=("height",), intervals=(), comments=("A cell method",), ), ) for coord, original_key in zip((actual.coords()), original.coords, strict=True): original_coord = original.coords[original_key] assert coord.var_name == original_coord.name assert_array_equal( coord.points, CFDatetimeCoder().encode(original_coord.variable).values ) assert actual.coord_dims(coord) == original.get_axis_num( original.coords[coord.var_name].dims ) assert ( actual.coord("distance2").attributes["foo"] == original.coords["distance2"].attrs["foo"] ) assert actual.coord("distance").units == cf_units.Unit( original.coords["distance"].units ) assert actual.attributes["baz"] == original.attrs["baz"] assert actual.standard_name == original.attrs["standard_name"] roundtripped = DataArray.from_iris(actual) assert_identical(original, roundtripped) # If the Iris version supports it then we should have a dask array # at each stage of the conversion if hasattr(actual, "core_data"): assert isinstance(original.data, type(actual.core_data())) assert isinstance(original.data, type(roundtripped.data)) actual.remove_coord("time") auto_time_dimension = DataArray.from_iris(actual) assert auto_time_dimension.dims == ("distance", "dim_1") @requires_iris @pytest.mark.parametrize( "var_name, std_name, long_name, name, attrs", [ ( "var_name", "height", "Height", "var_name", {"standard_name": "height", "long_name": "Height"}, ), ( None, "height", "Height", "height", {"standard_name": "height", "long_name": "Height"}, ), (None, None, "Height", "Height", {"long_name": "Height"}), (None, None, None, None, {}), ], ) def test_da_name_from_cube( self, std_name, long_name, var_name, name, attrs ) -> None: from iris.cube import Cube cube = Cube([], var_name=var_name, standard_name=std_name, long_name=long_name) result = xr.DataArray.from_iris(cube) expected = xr.DataArray([], name=name, attrs=attrs) xr.testing.assert_identical(result, expected) @requires_iris @pytest.mark.parametrize( "var_name, std_name, long_name, name, attrs", [ ( "var_name", "height", "Height", "var_name", {"standard_name": "height", "long_name": "Height"}, ), ( None, "height", "Height", "height", {"standard_name": "height", "long_name": "Height"}, ), (None, None, "Height", "Height", {"long_name": "Height"}), (None, None, None, "unknown", {}), ], ) def test_da_coord_name_from_cube( self, std_name, long_name, var_name, name, attrs ) -> None: from iris.coords import DimCoord from iris.cube import Cube latitude = DimCoord( [-90, 0, 90], standard_name=std_name, var_name=var_name, long_name=long_name ) data = [0, 0, 0] cube = Cube(data, dim_coords_and_dims=[(latitude, 0)]) result = xr.DataArray.from_iris(cube) expected = xr.DataArray(data, coords=[(name, [-90, 0, 90], attrs)]) xr.testing.assert_identical(result, expected) @requires_iris def test_prevent_duplicate_coord_names(self) -> None: from iris.coords import DimCoord from iris.cube import Cube # Iris enforces unique coordinate names. Because we use a different # name resolution order a valid iris Cube with coords that have the # same var_name would lead to duplicate dimension names in the # DataArray longitude = DimCoord([0, 360], standard_name="longitude", var_name="duplicate") latitude = DimCoord( [-90, 0, 90], standard_name="latitude", var_name="duplicate" ) data = [[0, 0, 0], [0, 0, 0]] cube = Cube(data, dim_coords_and_dims=[(longitude, 0), (latitude, 1)]) with pytest.raises(ValueError): xr.DataArray.from_iris(cube) @requires_iris @pytest.mark.parametrize( "coord_values", [["IA", "IL", "IN"], [0, 2, 1]], # non-numeric values # non-monotonic values ) def test_fallback_to_iris_AuxCoord(self, coord_values) -> None: from iris.coords import AuxCoord from iris.cube import Cube data = [0, 0, 0] da = xr.DataArray(data, coords=[coord_values], dims=["space"]) result = xr.DataArray.to_iris(da) expected = Cube( data, aux_coords_and_dims=[(AuxCoord(coord_values, var_name="space"), 0)] ) assert result == expected def test_no_dict() -> None: d = DataArray() with pytest.raises(AttributeError): _ = d.__dict__ def test_subclass_slots() -> None: """Test that DataArray subclasses must explicitly define ``__slots__``. .. note:: As of 0.13.0, this is actually mitigated into a FutureWarning for any class defined outside of the xarray package. """ with pytest.raises(AttributeError) as e: class MyArray(DataArray): pass assert str(e.value) == "MyArray must explicitly define __slots__" def test_weakref() -> None: """Classes with __slots__ are incompatible with the weakref module unless they explicitly state __weakref__ among their slots """ from weakref import ref a = DataArray(1) r = ref(a) assert r() is a def test_delete_coords() -> None: """Make sure that deleting a coordinate doesn't corrupt the DataArray. See issue #3899. Also test that deleting succeeds and produces the expected output. """ a0 = DataArray( np.array([[1, 2, 3], [4, 5, 6]]), dims=["y", "x"], coords={"x": ["a", "b", "c"], "y": [-1, 1]}, ) assert_identical(a0, a0) a1 = a0.copy() del a1.coords["y"] # This test will detect certain sorts of corruption in the DataArray assert_identical(a0, a0) assert a0.dims == ("y", "x") assert a1.dims == ("y", "x") assert set(a0.coords.keys()) == {"x", "y"} assert set(a1.coords.keys()) == {"x"} def test_deepcopy_nested_attrs() -> None: """Check attrs deep copy, see :issue:`2835`""" da1 = xr.DataArray([[1, 2], [3, 4]], dims=("x", "y"), coords={"x": [10, 20]}) da1.attrs["flat"] = "0" da1.attrs["nested"] = {"level1a": "1", "level1b": "1"} da2 = da1.copy(deep=True) da2.attrs["new"] = "2" da2.attrs.update({"new2": "2"}) da2.attrs["flat"] = "2" da2.attrs["nested"]["level1a"] = "2" da2.attrs["nested"].update({"level1b": "2"}) # Coarse test assert not da1.identical(da2) # Check attrs levels assert da1.attrs["flat"] != da2.attrs["flat"] assert da1.attrs["nested"] != da2.attrs["nested"] assert "new" not in da1.attrs assert "new2" not in da1.attrs def test_deepcopy_obj_array() -> None: x0 = DataArray(np.array([object()])) x1 = deepcopy(x0) assert x0.values[0] is not x1.values[0] def test_deepcopy_recursive() -> None: # GH:issue:7111 # direct recursion da = xr.DataArray([1, 2], dims=["x"]) da.attrs["other"] = da # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError da.copy(deep=True) # indirect recursion da2 = xr.DataArray([5, 6], dims=["y"]) da.attrs["other"] = da2 da2.attrs["other"] = da # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError da.copy(deep=True) da2.copy(deep=True) def test_clip(da: DataArray) -> None: with raise_if_dask_computes(): result = da.clip(min=0.5) assert result.min() >= 0.5 result = da.clip(max=0.5) assert result.max() <= 0.5 result = da.clip(min=0.25, max=0.75) assert result.min() >= 0.25 assert result.max() <= 0.75 with raise_if_dask_computes(): result = da.clip(min=da.mean("x"), max=da.mean("a")) assert result.dims == da.dims assert_array_equal( result.data, np.clip(da.data, da.mean("x").data[:, :, np.newaxis], da.mean("a").data), ) with_nans = da.isel(time=[0, 1]).reindex_like(da) with raise_if_dask_computes(): result = da.clip(min=da.mean("x"), max=da.mean("a")) result = da.clip(with_nans) # The values should be the same where there were NaNs. assert_array_equal(result.isel(time=[0, 1]), with_nans.isel(time=[0, 1])) # Unclear whether we want this work, OK to adjust the test when we have decided. with pytest.raises(ValueError, match=r"cannot reindex or align along dimension.*"): result = da.clip(min=da.mean("x"), max=da.mean("a").isel(x=[0, 1])) class TestDropDuplicates: @pytest.mark.parametrize("keep", ["first", "last", False]) def test_drop_duplicates_1d(self, keep) -> None: da = xr.DataArray( [0, 5, 6, 7], dims="time", coords={"time": [0, 0, 1, 2]}, name="test" ) if keep == "first": data = [0, 6, 7] time = [0, 1, 2] elif keep == "last": data = [5, 6, 7] time = [0, 1, 2] else: data = [6, 7] time = [1, 2] expected = xr.DataArray(data, dims="time", coords={"time": time}, name="test") result = da.drop_duplicates("time", keep=keep) assert_equal(expected, result) with pytest.raises( ValueError, match=re.escape( "Dimensions ('space',) not found in data dimensions ('time',)" ), ): da.drop_duplicates("space", keep=keep) def test_drop_duplicates_2d(self) -> None: da = xr.DataArray( [[0, 5, 6, 7], [2, 1, 3, 4]], dims=["space", "time"], coords={"space": [10, 10], "time": [0, 0, 1, 2]}, name="test", ) expected = xr.DataArray( [[0, 6, 7]], dims=["space", "time"], coords={"time": ("time", [0, 1, 2]), "space": ("space", [10])}, name="test", ) result = da.drop_duplicates(["time", "space"], keep="first") assert_equal(expected, result) result = da.drop_duplicates(..., keep="first") assert_equal(expected, result) class TestNumpyCoercion: # TODO once flexible indexes refactor complete also test coercion of dimension coords def test_from_numpy(self) -> None: da = xr.DataArray([1, 2, 3], dims="x", coords={"lat": ("x", [4, 5, 6])}) assert_identical(da.as_numpy(), da) np.testing.assert_equal(da.to_numpy(), np.array([1, 2, 3])) np.testing.assert_equal(da["lat"].to_numpy(), np.array([4, 5, 6])) def test_to_numpy(self) -> None: arr = np.array([1, 2, 3]) da = xr.DataArray(arr, dims="x", coords={"lat": ("x", [4, 5, 6])}) with assert_no_warnings(): np.testing.assert_equal(np.asarray(da), arr) np.testing.assert_equal(np.array(da), arr) @requires_dask def test_from_dask(self) -> None: da = xr.DataArray([1, 2, 3], dims="x", coords={"lat": ("x", [4, 5, 6])}) da_chunked = da.chunk(1) assert_identical(da_chunked.as_numpy(), da.compute()) np.testing.assert_equal(da.to_numpy(), np.array([1, 2, 3])) np.testing.assert_equal(da["lat"].to_numpy(), np.array([4, 5, 6])) @requires_pint def test_from_pint(self) -> None: from pint import Quantity arr = np.array([1, 2, 3]) da = xr.DataArray( Quantity(arr, units="Pa"), dims="x", coords={"lat": ("x", Quantity(arr + 3, units="m"))}, ) expected = xr.DataArray(arr, dims="x", coords={"lat": ("x", arr + 3)}) assert_identical(da.as_numpy(), expected) np.testing.assert_equal(da.to_numpy(), arr) np.testing.assert_equal(da["lat"].to_numpy(), arr + 3) @requires_sparse def test_from_sparse(self) -> None: import sparse arr = np.diagflat([1, 2, 3]) sparr = sparse.COO.from_numpy(arr) da = xr.DataArray( sparr, dims=["x", "y"], coords={"elev": (("x", "y"), sparr + 3)} ) expected = xr.DataArray( arr, dims=["x", "y"], coords={"elev": (("x", "y"), arr + 3)} ) assert_identical(da.as_numpy(), expected) np.testing.assert_equal(da.to_numpy(), arr) @requires_cupy def test_from_cupy(self) -> None: import cupy as cp arr = np.array([1, 2, 3]) da = xr.DataArray( cp.array(arr), dims="x", coords={"lat": ("x", cp.array(arr + 3))} ) expected = xr.DataArray(arr, dims="x", coords={"lat": ("x", arr + 3)}) assert_identical(da.as_numpy(), expected) np.testing.assert_equal(da.to_numpy(), arr) @requires_dask @requires_pint def test_from_pint_wrapping_dask(self) -> None: import dask from pint import Quantity arr = np.array([1, 2, 3]) d = dask.array.from_array(arr) da = xr.DataArray( Quantity(d, units="Pa"), dims="x", coords={"lat": ("x", Quantity(d, units="m") * 2)}, ) result = da.as_numpy() result.name = None # remove dask-assigned name expected = xr.DataArray(arr, dims="x", coords={"lat": ("x", arr * 2)}) assert_identical(result, expected) np.testing.assert_equal(da.to_numpy(), arr) class TestStackEllipsis: # https://github.com/pydata/xarray/issues/6051 def test_result_as_expected(self) -> None: da = DataArray([[1, 2], [1, 2]], dims=("x", "y")) result = da.stack(flat=[...]) expected = da.stack(flat=da.dims) assert_identical(result, expected) def test_error_on_ellipsis_without_list(self) -> None: da = DataArray([[1, 2], [1, 2]], dims=("x", "y")) with pytest.raises(ValueError): da.stack(flat=...) # type: ignore[arg-type] def test_nD_coord_dataarray() -> None: # should succeed da = DataArray( np.ones((2, 4)), dims=("x", "y"), coords={ "x": (("x", "y"), np.arange(8).reshape((2, 4))), "y": ("y", np.arange(4)), }, ) _assert_internal_invariants(da, check_default_indexes=True) da2 = DataArray(np.ones(4), dims=("y"), coords={"y": ("y", np.arange(4))}) da3 = DataArray(np.ones(4), dims=("z")) _, actual = xr.align(da, da2) assert_identical(da2, actual) expected = da.drop_vars("x") _, actual = xr.broadcast(da, da2) assert_identical(expected, actual) actual, _ = xr.broadcast(da, da3) expected = da.expand_dims(z=4, axis=-1) assert_identical(actual, expected) da4 = DataArray(np.ones((2, 4)), coords={"x": 0}, dims=["x", "y"]) _assert_internal_invariants(da4, check_default_indexes=True) assert "x" not in da4.xindexes assert "x" in da4.coords def test_lazy_data_variable_not_loaded(): # GH8753 array = InaccessibleArray(np.array([1, 2, 3])) v = Variable(data=array, dims="x") # No data needs to be accessed, so no error should be raised da = xr.DataArray(v) # No data needs to be accessed, so no error should be raised xr.DataArray(da) def test_unstack_index_var() -> None: source = xr.DataArray(range(2), dims=["x"], coords=[["a", "b"]]) da = source.x da = da.assign_coords(y=("x", ["c", "d"]), z=("x", ["e", "f"])) da = da.set_index(x=["y", "z"]) actual = da.unstack("x") expected = xr.DataArray( np.array([["a", np.nan], [np.nan, "b"]], dtype=object), coords={"y": ["c", "d"], "z": ["e", "f"]}, name="x", ) assert_identical(actual, expected) xarray-2025.12.0/xarray/tests/test_dataarray_typing.yml000066400000000000000000000207251511464676000231450ustar00rootroot00000000000000- case: test_mypy_pipe_lambda_noarg_return_type main: | from xarray import DataArray da = DataArray().pipe(lambda data: data) reveal_type(da) # N: Revealed type is "xarray.core.dataarray.DataArray" - case: test_mypy_pipe_lambda_posarg_return_type main: | from xarray import DataArray da = DataArray().pipe(lambda data, arg: arg, "foo") reveal_type(da) # N: Revealed type is "builtins.str" - case: test_mypy_pipe_lambda_chaining_return_type main: | from xarray import DataArray answer = DataArray().pipe(lambda data, arg: arg, "foo").count("o") reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_lambda_missing_arg main: | from xarray import DataArray # Call to pipe missing argument for lambda parameter `arg` da = DataArray().pipe(lambda data, arg: data) out: | main:4: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Any, Any], Any]" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_lambda_extra_arg main: | from xarray import DataArray # Call to pipe with extra argument for lambda da = DataArray().pipe(lambda data: data, "oops!") out: | main:4: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Any], Any]", "str" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_posarg main: | from xarray import DataArray def f(da: DataArray, arg: int) -> DataArray: return da # Call to pipe missing argument for function parameter `arg` da = DataArray().pipe(f) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[DataArray, int], DataArray]" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_extra_posarg main: | from xarray import DataArray def f(da: DataArray, arg: int) -> DataArray: return da # Call to pipe missing keyword for kwonly parameter `kwonly` da = DataArray().pipe(f, 42, "oops!") out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[DataArray, int], DataArray]", "int", "str" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_kwarg main: | from xarray import DataArray def f(da: DataArray, arg: int, *, kwonly: int) -> DataArray: return da # Call to pipe missing argument for kwonly parameter `kwonly` da = DataArray().pipe(f, 42) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[DataArray, int, NamedArg(int, 'kwonly')], DataArray]", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_keyword main: | from xarray import DataArray def f(da: DataArray, arg: int, *, kwonly: int) -> DataArray: return da # Call to pipe missing keyword for kwonly parameter `kwonly` da = DataArray().pipe(f, 42, 99) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[DataArray, int, NamedArg(int, 'kwonly')], DataArray]", "int", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_unexpected_keyword skip: True # mypy 1.18+ outputs "defined here" notes without line numbers (e.g., "xarray/core/common.py: note:...") # pytest-mypy-plugins expects all lines to match "file:line: severity: message" format and can't parse these notes. # This is a mypy behavior, not a bug. The test would need pytest-mypy-plugins to support notes without line numbers. main: | from xarray import DataArray def f(da: DataArray, arg: int, *, kwonly: int) -> DataArray: return da # Call to pipe using wrong keyword: `kw` instead of `kwonly` da = DataArray().pipe(f, 42, kw=99) out: | main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataWithCoords" [call-arg] # Note: mypy 1.18.1 also outputs: xarray/core/common: note: "pipe" of "DataWithCoords" defined here - case: test_mypy_pipe_tuple_return_type_dataarray main: | from xarray import DataArray def f(arg: int, da: DataArray) -> DataArray: return da da = DataArray().pipe((f, "da"), 42) reveal_type(da) # N: Revealed type is "xarray.core.dataarray.DataArray" - case: test_mypy_pipe_tuple_return_type_other main: | from xarray import DataArray def f(arg: int, da: DataArray) -> int: return arg answer = DataArray().pipe((f, "da"), 42) reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_tuple_missing_arg main: | from xarray import DataArray def f(arg: int, da: DataArray) -> DataArray: return da # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are missing an argument for parameter `arg`, so we get no error here. da = DataArray().pipe((f, "da")) reveal_type(da) # N: Revealed type is "xarray.core.dataarray.DataArray" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we failed to pass an argument for `arg`. da = DataArray().pipe(lambda data, arg: f(arg, data)) out: | main:17: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Any, Any], DataArray]" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_tuple_extra_arg main: | from xarray import DataArray def f(arg: int, da: DataArray) -> DataArray: return da # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are providing too many args for `f`, so we get no error here. da = DataArray().pipe((f, "da"), 42, "foo") reveal_type(da) # N: Revealed type is "xarray.core.dataarray.DataArray" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we passed too many arguments. da = DataArray().pipe(lambda data, arg: f(arg, data), 42, "foo") out: | main:17: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Any, Any], DataArray]", "int", "str" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T xarray-2025.12.0/xarray/tests/test_dataset.py000066400000000000000000011275111511464676000210610ustar00rootroot00000000000000from __future__ import annotations import pickle import re import sys import warnings from collections.abc import Hashable from copy import copy, deepcopy from io import StringIO from textwrap import dedent from typing import Any, Literal, cast import numpy as np import pandas as pd import pytest from packaging.version import Version from pandas.core.indexes.datetimes import DatetimeIndex # remove once numpy 2.0 is the oldest supported version try: from numpy.exceptions import RankWarning except ImportError: from numpy import RankWarning # type: ignore[no-redef,attr-defined,unused-ignore] import contextlib from pandas.errors import UndefinedVariableError import xarray as xr from xarray import ( AlignmentError, DataArray, Dataset, IndexVariable, MergeError, Variable, align, backends, broadcast, open_dataset, set_options, ) from xarray.coding.cftimeindex import CFTimeIndex from xarray.core import dtypes, indexing, utils from xarray.core.common import duck_array_ops, full_like from xarray.core.coordinates import Coordinates, DatasetCoordinates from xarray.core.indexes import Index, PandasIndex from xarray.core.types import ArrayLike from xarray.core.utils import is_scalar from xarray.groupers import SeasonResampler, TimeResampler from xarray.namedarray.pycompat import array_type, integer_types from xarray.testing import _assert_internal_invariants from xarray.tests import ( DuckArrayWrapper, InaccessibleArray, UnexpectedDataAccess, assert_allclose, assert_array_equal, assert_equal, assert_identical, assert_no_warnings, assert_writeable, create_test_data, has_cftime, has_dask, has_pyarrow, raise_if_dask_computes, requires_bottleneck, requires_cftime, requires_cupy, requires_dask, requires_numexpr, requires_pint, requires_pyarrow, requires_scipy, requires_sparse, source_ndarray, ) from xarray.tests.indexes import ScalarIndex, XYIndex with contextlib.suppress(ImportError): import dask.array as da # from numpy version 2.0 trapz is deprecated and renamed to trapezoid # remove once numpy 2.0 is the oldest supported version try: from numpy import trapezoid # type: ignore[attr-defined,unused-ignore] except ImportError: from numpy import ( # type: ignore[arg-type,no-redef,attr-defined,unused-ignore] trapz as trapezoid, ) sparse_array_type = array_type("sparse") pytestmark = [ pytest.mark.filterwarnings("error:Mean of empty slice"), pytest.mark.filterwarnings("error:All-NaN (slice|axis) encountered"), ] def create_append_test_data(seed=None) -> tuple[Dataset, Dataset, Dataset]: rs = np.random.default_rng(seed) lat = [2, 1, 0] lon = [0, 1, 2] nt1 = 3 nt2 = 2 time1 = pd.date_range("2000-01-01", periods=nt1).as_unit("ns") time2 = pd.date_range("2000-02-01", periods=nt2).as_unit("ns") string_var = np.array(["a", "bc", "def"], dtype=object) string_var_to_append = np.array(["asdf", "asdfg"], dtype=object) string_var_fixed_length = np.array(["aa", "bb", "cc"], dtype="|S2") string_var_fixed_length_to_append = np.array(["dd", "ee"], dtype="|S2") unicode_var = np.array(["รกรณ", "รกรณ", "รกรณ"]) datetime_var = np.array( ["2019-01-01", "2019-01-02", "2019-01-03"], dtype="datetime64[ns]" ) datetime_var_to_append = np.array( ["2019-01-04", "2019-01-05"], dtype="datetime64[ns]" ) bool_var = np.array([True, False, True], dtype=bool) bool_var_to_append = np.array([False, True], dtype=bool) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Converting non-default") ds = xr.Dataset( data_vars={ "da": xr.DataArray( rs.random((3, 3, nt1)), coords=[lat, lon, time1], dims=["lat", "lon", "time"], ), "string_var": ("time", string_var), "string_var_fixed_length": ("time", string_var_fixed_length), "unicode_var": ("time", unicode_var), "datetime_var": ("time", datetime_var), "bool_var": ("time", bool_var), } ) ds_to_append = xr.Dataset( data_vars={ "da": xr.DataArray( rs.random((3, 3, nt2)), coords=[lat, lon, time2], dims=["lat", "lon", "time"], ), "string_var": ("time", string_var_to_append), "string_var_fixed_length": ("time", string_var_fixed_length_to_append), "unicode_var": ("time", unicode_var[:nt2]), "datetime_var": ("time", datetime_var_to_append), "bool_var": ("time", bool_var_to_append), } ) ds_with_new_var = xr.Dataset( data_vars={ "new_var": xr.DataArray( rs.random((3, 3, nt1 + nt2)), coords=[lat, lon, time1.append(time2)], dims=["lat", "lon", "time"], ) } ) assert_writeable(ds) assert_writeable(ds_to_append) assert_writeable(ds_with_new_var) return ds, ds_to_append, ds_with_new_var def create_append_string_length_mismatch_test_data(dtype) -> tuple[Dataset, Dataset]: def make_datasets(data, data_to_append) -> tuple[Dataset, Dataset]: ds = xr.Dataset( {"temperature": (["time"], data)}, coords={"time": [0, 1, 2]}, ) ds_to_append = xr.Dataset( {"temperature": (["time"], data_to_append)}, coords={"time": [0, 1, 2]} ) assert_writeable(ds) assert_writeable(ds_to_append) return ds, ds_to_append u2_strings = ["ab", "cd", "ef"] u5_strings = ["abc", "def", "ghijk"] s2_strings = np.array(["aa", "bb", "cc"], dtype="|S2") s3_strings = np.array(["aaa", "bbb", "ccc"], dtype="|S3") if dtype == "U": return make_datasets(u2_strings, u5_strings) elif dtype == "S": return make_datasets(s2_strings, s3_strings) else: raise ValueError(f"unsupported dtype {dtype}.") def create_test_multiindex() -> Dataset: mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("level_1", "level_2") ) return Dataset({}, Coordinates.from_pandas_multiindex(mindex, "x")) def create_test_stacked_array() -> tuple[DataArray, DataArray]: x = DataArray(pd.Index(np.r_[:10], name="x")) y = DataArray(pd.Index(np.r_[:20], name="y")) a = x * y b = x * y * y return a, b class InaccessibleVariableDataStore(backends.InMemoryDataStore): """ Store that does not allow any data access. """ def __init__(self): super().__init__() self._indexvars = set() def store(self, variables, *args, **kwargs) -> None: super().store(variables, *args, **kwargs) for k, v in variables.items(): if isinstance(v, IndexVariable): self._indexvars.add(k) def get_variables(self): def lazy_inaccessible(k, v): if k in self._indexvars: return v data = indexing.LazilyIndexedArray(InaccessibleArray(v.values)) return Variable(v.dims, data, v.attrs) return {k: lazy_inaccessible(k, v) for k, v in self._variables.items()} class DuckBackendArrayWrapper(backends.common.BackendArray): """Mimic a BackendArray wrapper around DuckArrayWrapper""" def __init__(self, array): self.array = DuckArrayWrapper(array) self.shape = array.shape self.dtype = array.dtype def get_array(self): return self.array def __getitem__(self, key): return self.array[key.tuple] class AccessibleAsDuckArrayDataStore(backends.InMemoryDataStore): """ Store that returns a duck array, not convertible to numpy array, on read. Modeled after nVIDIA's kvikio. """ def __init__(self): super().__init__() self._indexvars = set() def store(self, variables, *args, **kwargs) -> None: super().store(variables, *args, **kwargs) for k, v in variables.items(): if isinstance(v, IndexVariable): self._indexvars.add(k) def get_variables(self) -> dict[Any, xr.Variable]: def lazy_accessible(k, v) -> xr.Variable: if k in self._indexvars: return v data = indexing.LazilyIndexedArray(DuckBackendArrayWrapper(v.values)) return Variable(v.dims, data, v.attrs) return {k: lazy_accessible(k, v) for k, v in self._variables.items()} class TestDataset: def test_repr(self) -> None: data = create_test_data(seed=123, use_extension_array=True) data.attrs["foo"] = "bar" # need to insert str dtype at runtime to handle different endianness var5 = ( "\n var5 (dim1) int64[pyarrow] 64B 5 9 7 2 6 2 8 1" if has_pyarrow else "" ) expected = dedent( f"""\ Size: 2kB Dimensions: (dim2: 9, dim3: 10, time: 20, dim1: 8) Coordinates: * dim2 (dim2) float64 72B 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 * dim3 (dim3) {data["dim3"].dtype} 40B 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' * time (time) datetime64[ns] 160B 2000-01-01 2000-01-02 ... 2000-01-20 numbers (dim3) int64 80B 0 1 2 0 0 1 1 2 2 3 Dimensions without coordinates: dim1 Data variables: var1 (dim1, dim2) float64 576B -0.9891 -0.3678 1.288 ... -0.2116 0.364 var2 (dim1, dim2) float64 576B 0.953 1.52 1.704 ... 0.1347 -0.6423 var3 (dim3, dim1) float64 640B 0.4107 0.9941 0.1665 ... 0.716 1.555 var4 (dim1) category 3{6 if Version(pd.__version__) >= Version("3.0.0dev0") else 2}B b c b a c a c a{var5} Attributes: foo: bar""" ) actual = "\n".join(x.rstrip() for x in repr(data).split("\n")) assert expected == actual with set_options(display_width=100): max_len = max(map(len, repr(data).split("\n"))) assert 90 < max_len < 100 expected = dedent( """\ Size: 0B Dimensions: () Data variables: *empty*""" ) actual = "\n".join(x.rstrip() for x in repr(Dataset()).split("\n")) print(actual) assert expected == actual # verify that ... doesn't appear for scalar coordinates data = Dataset({"foo": ("x", np.ones(10))}).mean() expected = dedent( """\ Size: 8B Dimensions: () Data variables: foo float64 8B 1.0""" ) actual = "\n".join(x.rstrip() for x in repr(data).split("\n")) print(actual) assert expected == actual # verify long attributes are truncated data = Dataset(attrs={"foo": "bar" * 1000}) assert len(repr(data)) < 1000 def test_repr_multiindex(self) -> None: data = create_test_multiindex() obj_size = np.dtype("O").itemsize expected = dedent( f"""\ Size: {8 * obj_size + 32}B Dimensions: (x: 4) Coordinates: * x (x) object {4 * obj_size}B MultiIndex * level_1 (x) object {4 * obj_size}B 'a' 'a' 'b' 'b' * level_2 (x) int64 32B 1 2 1 2 Data variables: *empty*""" ) actual = "\n".join(x.rstrip() for x in repr(data).split("\n")) print(actual) assert expected == actual # verify that long level names are not truncated midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("a_quite_long_level_name", "level_2") ) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") data = Dataset({}, midx_coords) expected = dedent( f"""\ Size: {8 * obj_size + 32}B Dimensions: (x: 4) Coordinates: * x (x) object {4 * obj_size}B MultiIndex * a_quite_long_level_name (x) object {4 * obj_size}B 'a' 'a' 'b' 'b' * level_2 (x) int64 32B 1 2 1 2 Data variables: *empty*""" ) actual = "\n".join(x.rstrip() for x in repr(data).split("\n")) print(actual) assert expected == actual def test_repr_period_index(self) -> None: data = create_test_data(seed=456) data.coords["time"] = pd.period_range("2000-01-01", periods=20, freq="D") # check that creating the repr doesn't raise an error #GH645 repr(data) def test_unicode_data(self) -> None: # regression test for GH834 data = Dataset({"foรธ": ["baยฎ"]}, attrs={"รฅ": "โˆ‘"}) repr(data) # should not raise byteorder = "<" if sys.byteorder == "little" else ">" expected = dedent( f"""\ Size: 12B Dimensions: (foรธ: 1) Coordinates: * foรธ (foรธ) {byteorder}U3 12B {"baยฎ"!r} Data variables: *empty* Attributes: รฅ: โˆ‘""" ) actual = str(data) assert expected == actual def test_repr_nep18(self) -> None: class Array: def __init__(self): self.shape = (2,) self.ndim = 1 self.dtype = np.dtype(np.float64) def __array_function__(self, *args, **kwargs): return NotImplemented def __array_ufunc__(self, *args, **kwargs): return NotImplemented def __repr__(self): return "Custom\nArray" dataset = Dataset({"foo": ("x", Array())}) expected = dedent( """\ Size: 16B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: foo (x) float64 16B Custom Array""" ) assert expected == repr(dataset) def test_info(self) -> None: ds = create_test_data(seed=123) ds = ds.drop_vars("dim3") # string type prints differently in PY2 vs PY3 ds.attrs["unicode_attr"] = "baยฎ" ds.attrs["string_attr"] = "bar" buf = StringIO() ds.info(buf=buf) expected = dedent( """\ xarray.Dataset { dimensions: \tdim2 = 9 ; \ttime = 20 ; \tdim1 = 8 ; \tdim3 = 10 ; variables: \tfloat64 dim2(dim2) ; \tdatetime64[ns] time(time) ; \tfloat64 var1(dim1, dim2) ; \t\tvar1:foo = variable ; \tfloat64 var2(dim1, dim2) ; \t\tvar2:foo = variable ; \tfloat64 var3(dim3, dim1) ; \t\tvar3:foo = variable ; \tint64 numbers(dim3) ; // global attributes: \t:unicode_attr = baยฎ ; \t:string_attr = bar ; }""" ) actual = buf.getvalue() assert expected == actual buf.close() def test_constructor(self) -> None: x1 = ("x", 2 * np.arange(100)) x2 = ("x", np.arange(1000)) z = (["x", "y"], np.arange(1000).reshape(100, 10)) with pytest.raises(ValueError, match=r"conflicting sizes"): Dataset({"a": x1, "b": x2}) with pytest.raises(TypeError, match=r"tuple of form"): Dataset({"x": (1, 2, 3, 4, 5, 6, 7)}) with pytest.raises(ValueError, match=r"already exists as a scalar"): Dataset({"x": 0, "y": ("x", [1, 2, 3])}) # nD coordinate variable "x" sharing name with dimension actual = Dataset({"a": x1, "x": z}) assert "x" not in actual.xindexes _assert_internal_invariants(actual, check_default_indexes=True) # verify handling of DataArrays expected = Dataset({"x": x1, "z": z}) actual = Dataset({"z": expected["z"]}) assert_identical(expected, actual) def test_constructor_1d(self) -> None: expected = Dataset({"x": (["x"], 5.0 + np.arange(5))}) actual = Dataset({"x": 5.0 + np.arange(5)}) assert_identical(expected, actual) actual = Dataset({"x": [5, 6, 7, 8, 9]}) assert_identical(expected, actual) def test_constructor_0d(self) -> None: expected = Dataset({"x": ([], 1)}) for arg in [1, np.array(1), expected["x"]]: actual = Dataset({"x": arg}) assert_identical(expected, actual) class Arbitrary: pass d = pd.Timestamp("2000-01-01T12") args = [ True, None, 3.4, np.nan, "hello", b"raw", np.datetime64("2000-01-01"), d, d.to_pydatetime(), Arbitrary(), ] for arg in args: print(arg) expected = Dataset({"x": ([], arg)}) actual = Dataset({"x": arg}) assert_identical(expected, actual) def test_constructor_auto_align(self) -> None: a = DataArray([1, 2], [("x", [0, 1])]) b = DataArray([3, 4], [("x", [1, 2])]) # verify align uses outer join expected = Dataset( {"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]} ) actual = Dataset({"a": a, "b": b}) assert_identical(expected, actual) # regression test for GH346 assert isinstance(actual.variables["x"], IndexVariable) # variable with different dimensions c = ("y", [3, 4]) expected2 = expected.merge({"c": c}) actual = Dataset({"a": a, "b": b, "c": c}) assert_identical(expected2, actual) # variable that is only aligned against the aligned variables d = ("x", [3, 2, 1]) expected3 = expected.merge({"d": d}) actual = Dataset({"a": a, "b": b, "d": d}) assert_identical(expected3, actual) e = ("x", [0, 0]) with pytest.raises(ValueError, match=r"conflicting sizes"): Dataset({"a": a, "b": b, "e": e}) def test_constructor_pandas_sequence(self) -> None: ds = self.make_example_math_dataset() pandas_objs = { var_name: ds[var_name].to_pandas() for var_name in ["foo", "bar"] } ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs) del ds_based_on_pandas["x"] assert_equal(ds, ds_based_on_pandas) # reindex pandas obj, check align works rearranged_index = reversed(pandas_objs["foo"].index) pandas_objs["foo"] = pandas_objs["foo"].reindex(rearranged_index) ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs) del ds_based_on_pandas["x"] assert_equal(ds, ds_based_on_pandas) def test_constructor_pandas_single(self) -> None: das = [ DataArray(np.random.rand(4), dims=["a"]), # series DataArray(np.random.rand(4, 3), dims=["a", "b"]), # df ] for a in das: pandas_obj = a.to_pandas() ds_based_on_pandas = Dataset(pandas_obj) # type: ignore[arg-type] # TODO: improve typing of __init__ for dim in ds_based_on_pandas.data_vars: assert isinstance(dim, int) assert_array_equal(ds_based_on_pandas[dim], pandas_obj[dim]) def test_constructor_compat(self) -> None: data = {"x": DataArray(0, coords={"y": 1}), "y": ("z", [1, 1, 1])} expected = Dataset({"x": 0}, {"y": ("z", [1, 1, 1])}) actual = Dataset(data) assert_identical(expected, actual) data = {"y": ("z", [1, 1, 1]), "x": DataArray(0, coords={"y": 1})} actual = Dataset(data) assert_identical(expected, actual) original = Dataset( {"a": (("x", "y"), np.ones((2, 3)))}, {"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]}, ) expected = Dataset( {"a": ("x", np.ones(2)), "b": ("y", np.ones(3))}, {"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]}, ) actual = Dataset( {"a": original["a"][:, 0], "b": original["a"][0].drop_vars("x")} ) assert_identical(expected, actual) data = {"x": DataArray(0, coords={"y": 3}), "y": ("z", [1, 1, 1])} with pytest.raises(MergeError): Dataset(data) data = {"x": DataArray(0, coords={"y": 1}), "y": [1, 1]} actual = Dataset(data) expected = Dataset({"x": 0}, {"y": [1, 1]}) assert_identical(expected, actual) def test_constructor_with_coords(self) -> None: with pytest.raises(ValueError, match=r"found in both data_vars and"): Dataset({"a": ("x", [1])}, {"a": ("x", [1])}) ds = Dataset({}, {"a": ("x", [1])}) assert not ds.data_vars assert list(ds.coords.keys()) == ["a"] mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("level_1", "level_2") ) with pytest.raises(ValueError, match=r"conflicting MultiIndex"): with pytest.warns( FutureWarning, match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): Dataset({}, {"x": mindex, "y": mindex}) Dataset({}, {"x": mindex, "level_1": range(4)}) def test_constructor_no_default_index(self) -> None: # explicitly passing a Coordinates object skips the creation of default index ds = Dataset(coords=Coordinates({"x": [1, 2, 3]}, indexes={})) assert "x" in ds assert "x" not in ds.xindexes def test_constructor_multiindex(self) -> None: midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")) coords = Coordinates.from_pandas_multiindex(midx, "x") ds = Dataset(coords=coords) assert_identical(ds, coords.to_dataset()) with pytest.warns( FutureWarning, match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): Dataset(data_vars={"x": midx}) with pytest.warns( FutureWarning, match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): Dataset(coords={"x": midx}) def test_constructor_custom_index(self) -> None: class CustomIndex(Index): ... coords = Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()} ) ds = Dataset(coords=coords) assert isinstance(ds.xindexes["x"], CustomIndex) # test coordinate variables copied assert ds.variables["x"] is not coords.variables["x"] @pytest.mark.filterwarnings("ignore:return type") def test_properties(self) -> None: ds = create_test_data() # dims / sizes # These exact types aren't public API, but this makes sure we don't # change them inadvertently: assert isinstance(ds.dims, utils.Frozen) # TODO change after deprecation cycle in GH #8500 is complete assert isinstance(ds.dims.mapping, dict) assert type(ds.dims.mapping) is dict with pytest.warns( FutureWarning, match=r" To access a mapping from dimension names to lengths, please use `Dataset.sizes`", ): assert ds.dims == ds.sizes assert ds.sizes == {"dim1": 8, "dim2": 9, "dim3": 10, "time": 20} # dtypes assert isinstance(ds.dtypes, utils.Frozen) assert isinstance(ds.dtypes.mapping, dict) assert ds.dtypes == { "var1": np.dtype("float64"), "var2": np.dtype("float64"), "var3": np.dtype("float64"), } # data_vars assert list(ds) == list(ds.data_vars) assert list(ds.keys()) == list(ds.data_vars) assert "aasldfjalskdfj" not in ds.variables assert "dim1" in repr(ds.variables) assert len(ds) == 3 assert bool(ds) assert list(ds.data_vars) == ["var1", "var2", "var3"] assert list(ds.data_vars.keys()) == ["var1", "var2", "var3"] assert "var1" in ds.data_vars assert "dim1" not in ds.data_vars assert "numbers" not in ds.data_vars assert len(ds.data_vars) == 3 # xindexes assert set(ds.xindexes) == {"dim2", "dim3", "time"} assert len(ds.xindexes) == 3 assert "dim2" in repr(ds.xindexes) assert all(isinstance(idx, Index) for idx in ds.xindexes.values()) # indexes assert set(ds.indexes) == {"dim2", "dim3", "time"} assert len(ds.indexes) == 3 assert "dim2" in repr(ds.indexes) assert all(isinstance(idx, pd.Index) for idx in ds.indexes.values()) # coords assert list(ds.coords) == ["dim2", "dim3", "time", "numbers"] assert "dim2" in ds.coords assert "numbers" in ds.coords assert "var1" not in ds.coords assert "dim1" not in ds.coords assert len(ds.coords) == 4 # nbytes assert ( Dataset({"x": np.int64(1), "y": np.array([1, 2], dtype=np.float32)}).nbytes == 16 ) def test_warn_ds_dims_deprecation(self) -> None: # TODO remove after deprecation cycle in GH #8500 is complete ds = create_test_data() with pytest.warns(FutureWarning, match="return type"): ds.dims["dim1"] with pytest.warns(FutureWarning, match="return type"): ds.dims.keys() with pytest.warns(FutureWarning, match="return type"): ds.dims.values() with pytest.warns(FutureWarning, match="return type"): ds.dims.items() with assert_no_warnings(): len(ds.dims) ds.dims.__iter__() _ = "dim1" in ds.dims def test_asarray(self) -> None: ds = Dataset({"x": 0}) with pytest.raises(TypeError, match=r"cannot directly convert"): np.asarray(ds) def test_get_index(self) -> None: ds = Dataset({"foo": (("x", "y"), np.zeros((2, 3)))}, coords={"x": ["a", "b"]}) assert ds.get_index("x").equals(pd.Index(["a", "b"])) assert ds.get_index("y").equals(pd.Index([0, 1, 2])) with pytest.raises(KeyError): ds.get_index("z") def test_attr_access(self) -> None: ds = Dataset( {"tmin": ("x", [42], {"units": "Celsius"})}, attrs={"title": "My test data"} ) assert_identical(ds.tmin, ds["tmin"]) assert_identical(ds.tmin.x, ds.x) assert ds.title == ds.attrs["title"] assert ds.tmin.units == ds["tmin"].attrs["units"] assert {"tmin", "title"} <= set(dir(ds)) assert "units" in set(dir(ds.tmin)) # should defer to variable of same name ds.attrs["tmin"] = -999 assert ds.attrs["tmin"] == -999 assert_identical(ds.tmin, ds["tmin"]) def test_variable(self) -> None: a = Dataset() d = np.random.random((10, 3)) a["foo"] = (("time", "x"), d) assert "foo" in a.variables assert "foo" in a a["bar"] = (("time", "x"), d) # order of creation is preserved assert list(a.variables) == ["foo", "bar"] assert_array_equal(a["foo"].values, d) # try to add variable with dim (10,3) with data that's (3,10) with pytest.raises(ValueError): a["qux"] = (("time", "x"), d.T) def test_modify_inplace(self) -> None: a = Dataset() vec = np.random.random((10,)) attributes = {"foo": "bar"} a["x"] = ("x", vec, attributes) assert "x" in a.coords assert isinstance(a.coords["x"].to_index(), pd.Index) assert_identical(a.coords["x"].variable, a.variables["x"]) b = Dataset() b["x"] = ("x", vec, attributes) assert_identical(a["x"], b["x"]) assert a.sizes == b.sizes # this should work a["x"] = ("x", vec[:5]) a["z"] = ("x", np.arange(5)) with pytest.raises(ValueError): # now it shouldn't, since there is a conflicting length a["x"] = ("x", vec[:4]) arr = np.random.random((10, 1)) scal = np.array(0) with pytest.raises(ValueError): a["y"] = ("y", arr) with pytest.raises(ValueError): a["y"] = ("y", scal) assert "y" not in a.dims def test_coords_properties(self) -> None: # use int64 for repr consistency on windows data = Dataset( { "x": ("x", np.array([-1, -2], "int64")), "y": ("y", np.array([0, 1, 2], "int64")), "foo": (["x", "y"], np.random.randn(2, 3)), }, {"a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10)}, ) coords = data.coords assert isinstance(coords, DatasetCoordinates) # len assert len(coords) == 4 # iter assert list(coords) == ["x", "y", "a", "b"] assert_identical(coords["x"].variable, data["x"].variable) assert_identical(coords["y"].variable, data["y"].variable) assert "x" in coords assert "a" in coords assert 0 not in coords assert "foo" not in coords with pytest.raises(KeyError): coords["foo"] with pytest.raises(KeyError): coords[0] # repr expected = dedent( """\ Coordinates: * x (x) int64 16B -1 -2 * y (y) int64 24B 0 1 2 a (x) int64 16B 4 5 b int64 8B -10""" ) actual = repr(coords) assert expected == actual # dims assert coords.sizes == {"x": 2, "y": 3} # dtypes assert coords.dtypes == { "x": np.dtype("int64"), "y": np.dtype("int64"), "a": np.dtype("int64"), "b": np.dtype("int64"), } def test_coords_modify(self) -> None: data = Dataset( { "x": ("x", [-1, -2]), "y": ("y", [0, 1, 2]), "foo": (["x", "y"], np.random.randn(2, 3)), }, {"a": ("x", [4, 5]), "b": -10}, ) actual = data.copy(deep=True) actual.coords["x"] = ("x", ["a", "b"]) assert_array_equal(actual["x"], ["a", "b"]) actual = data.copy(deep=True) actual.coords["z"] = ("z", ["a", "b"]) assert_array_equal(actual["z"], ["a", "b"]) actual = data.copy(deep=True) with pytest.raises(ValueError, match=r"conflicting dimension sizes"): actual.coords["x"] = ("x", [-1]) assert_identical(actual, data) # should not be modified actual = data.copy() del actual.coords["b"] expected = data.reset_coords("b", drop=True) assert_identical(expected, actual) with pytest.raises(KeyError): del data.coords["not_found"] with pytest.raises(KeyError): del data.coords["foo"] actual = data.copy(deep=True) actual.coords.update({"c": 11}) expected = data.merge({"c": 11}).set_coords("c") assert_identical(expected, actual) # regression test for GH3746 del actual.coords["x"] assert "x" not in actual.xindexes def test_update_index(self) -> None: actual = Dataset(coords={"x": [1, 2, 3]}) actual["x"] = ["a", "b", "c"] assert actual.xindexes["x"].to_pandas_index().equals(pd.Index(["a", "b", "c"])) def test_coords_setitem_with_new_dimension(self) -> None: actual = Dataset() actual.coords["foo"] = ("x", [1, 2, 3]) expected = Dataset(coords={"foo": ("x", [1, 2, 3])}) assert_identical(expected, actual) def test_coords_setitem_multiindex(self) -> None: data = create_test_multiindex() with pytest.raises(ValueError, match=r"cannot drop or update.*corrupt.*index "): data.coords["level_1"] = range(4) def test_coords_set(self) -> None: one_coord = Dataset({"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])}) two_coords = Dataset({"zzz": ("x", [2])}, {"x": ("x", [0]), "yy": ("x", [1])}) all_coords = Dataset( coords={"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])} ) actual = one_coord.set_coords("x") assert_identical(one_coord, actual) actual = one_coord.set_coords(["x"]) assert_identical(one_coord, actual) actual = one_coord.set_coords("yy") assert_identical(two_coords, actual) actual = one_coord.set_coords(["yy", "zzz"]) assert_identical(all_coords, actual) actual = one_coord.reset_coords() assert_identical(one_coord, actual) actual = two_coords.reset_coords() assert_identical(one_coord, actual) actual = all_coords.reset_coords() assert_identical(one_coord, actual) actual = all_coords.reset_coords(["yy", "zzz"]) assert_identical(one_coord, actual) actual = all_coords.reset_coords("zzz") assert_identical(two_coords, actual) with pytest.raises(ValueError, match=r"cannot remove index"): one_coord.reset_coords("x") actual = all_coords.reset_coords("zzz", drop=True) expected = all_coords.drop_vars("zzz") assert_identical(expected, actual) expected = two_coords.drop_vars("zzz") assert_identical(expected, actual) def test_coords_to_dataset(self) -> None: orig = Dataset({"foo": ("y", [-1, 0, 1])}, {"x": 10, "y": [2, 3, 4]}) expected = Dataset(coords={"x": 10, "y": [2, 3, 4]}) actual = orig.coords.to_dataset() assert_identical(expected, actual) def test_coords_merge(self) -> None: orig_coords = Dataset(coords={"a": ("x", [1, 2]), "x": [0, 1]}).coords other_coords = Dataset(coords={"b": ("x", ["a", "b"]), "x": [0, 1]}).coords expected = Dataset( coords={"a": ("x", [1, 2]), "b": ("x", ["a", "b"]), "x": [0, 1]} ) actual = orig_coords.merge(other_coords) assert_identical(expected, actual) actual = other_coords.merge(orig_coords) assert_identical(expected, actual) other_coords = Dataset(coords={"x": ("x", ["a"])}).coords with pytest.raises(MergeError): orig_coords.merge(other_coords) other_coords = Dataset(coords={"x": ("x", ["a", "b"])}).coords with pytest.raises(MergeError): orig_coords.merge(other_coords) other_coords = Dataset(coords={"x": ("x", ["a", "b", "c"])}).coords with pytest.raises(MergeError): orig_coords.merge(other_coords) other_coords = Dataset(coords={"a": ("x", [8, 9])}).coords expected = Dataset(coords={"x": range(2)}) actual = orig_coords.merge(other_coords) assert_identical(expected, actual) actual = other_coords.merge(orig_coords) assert_identical(expected, actual) other_coords = Dataset(coords={"x": np.nan}).coords actual = orig_coords.merge(other_coords) assert_identical(orig_coords.to_dataset(), actual) actual = other_coords.merge(orig_coords) assert_identical(orig_coords.to_dataset(), actual) def test_coords_merge_mismatched_shape(self) -> None: orig_coords = Dataset(coords={"a": ("x", [1, 1])}).coords other_coords = Dataset(coords={"a": 1}).coords expected = orig_coords.to_dataset() actual = orig_coords.merge(other_coords) assert_identical(expected, actual) other_coords = Dataset(coords={"a": ("y", [1])}).coords expected = Dataset(coords={"a": (["x", "y"], [[1], [1]])}) actual = orig_coords.merge(other_coords) assert_identical(expected, actual) actual = other_coords.merge(orig_coords) assert_identical(expected.transpose(), actual) orig_coords = Dataset(coords={"a": ("x", [np.nan])}).coords other_coords = Dataset(coords={"a": np.nan}).coords expected = orig_coords.to_dataset() actual = orig_coords.merge(other_coords) assert_identical(expected, actual) def test_data_vars_properties(self) -> None: ds = Dataset() ds["foo"] = (("x",), [1.0]) ds["bar"] = 2.0 # iter assert set(ds.data_vars) == {"foo", "bar"} assert "foo" in ds.data_vars assert "x" not in ds.data_vars assert_identical(ds["foo"], ds.data_vars["foo"]) # repr expected = dedent( """\ Data variables: foo (x) float64 8B 1.0 bar float64 8B 2.0""" ) actual = repr(ds.data_vars) assert expected == actual # dtypes assert ds.data_vars.dtypes == { "foo": np.dtype("float64"), "bar": np.dtype("float64"), } # len ds.coords["x"] = [1] assert len(ds.data_vars) == 2 # https://github.com/pydata/xarray/issues/7588 with pytest.raises( AssertionError, match=r"something is wrong with Dataset._coord_names" ): ds._coord_names = {"w", "x", "y", "z"} len(ds.data_vars) def test_equals_and_identical(self) -> None: data = create_test_data(seed=42) assert data.equals(data) assert data.identical(data) data2 = create_test_data(seed=42) data2.attrs["foobar"] = "baz" assert data.equals(data2) assert not data.identical(data2) del data2["time"] assert not data.equals(data2) data = create_test_data(seed=42).rename({"var1": None}) assert data.equals(data) assert data.identical(data) data2 = data.reset_coords() assert not data2.equals(data) assert not data2.identical(data) def test_equals_failures(self) -> None: data = create_test_data() assert not data.equals("foo") # type: ignore[arg-type] assert not data.identical(123) # type: ignore[arg-type] assert not data.broadcast_equals({1: 2}) # type: ignore[arg-type] def test_broadcast_equals(self) -> None: data1 = Dataset(coords={"x": 0}) data2 = Dataset(coords={"x": [0]}) assert data1.broadcast_equals(data2) assert not data1.equals(data2) assert not data1.identical(data2) def test_attrs(self) -> None: data = create_test_data(seed=42) data.attrs = {"foobar": "baz"} assert data.attrs["foobar"], "baz" assert isinstance(data.attrs, dict) def test_chunks_does_not_load_data(self) -> None: # regression test for GH6538 store = InaccessibleVariableDataStore() create_test_data().dump_to_store(store) ds = open_dataset(store) assert ds.chunks == {} @requires_dask @pytest.mark.parametrize( "use_cftime,calendar", [ (False, "standard"), (pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "standard"), (pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "noleap"), (pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "360_day"), ], ) def test_chunk_by_season_resampler(self, use_cftime: bool, calendar: str) -> None: import dask.array N = 365 + 365 # 2 years - 1 day time = xr.date_range( "2000-01-01", periods=N, freq="D", use_cftime=use_cftime, calendar=calendar ) ds = Dataset( { "pr": ("time", dask.array.random.random((N), chunks=(20))), "pr2d": (("x", "time"), dask.array.random.random((10, N), chunks=(20))), "ones": ("time", np.ones((N,))), }, coords={"time": time}, ) # Standard seasons rechunked = ds.chunk( {"x": 2, "time": SeasonResampler(["DJF", "MAM", "JJA", "SON"])} ) assert rechunked.chunksizes["x"] == (2,) * 5 assert len(rechunked.chunksizes["time"]) == 9 assert rechunked.chunksizes["x"] == (2,) * 5 assert sum(rechunked.chunksizes["time"]) == ds.sizes["time"] if calendar == "standard": assert rechunked.chunksizes["time"] == (60, 92, 92, 91, 90, 92, 92, 91, 30) elif calendar == "noleap": assert rechunked.chunksizes["time"] == (59, 92, 92, 91, 90, 92, 92, 91, 31) elif calendar == "360_day": assert rechunked.chunksizes["time"] == (60, 90, 90, 90, 90, 90, 90, 90, 40) else: raise AssertionError("unreachable") # Custom seasons rechunked = ds.chunk( {"x": 2, "time": SeasonResampler(["DJFM", "AM", "JJA", "SON"])} ) assert len(rechunked.chunksizes["time"]) == 9 assert sum(rechunked.chunksizes["time"]) == ds.sizes["time"] assert rechunked.chunksizes["x"] == (2,) * 5 if calendar == "standard": assert rechunked.chunksizes["time"] == (91, 61, 92, 91, 121, 61, 92, 91, 30) elif calendar == "noleap": assert rechunked.chunksizes["time"] == (90, 61, 92, 91, 121, 61, 92, 91, 31) elif calendar == "360_day": assert rechunked.chunksizes["time"] == (90, 60, 90, 90, 120, 60, 90, 90, 40) else: raise AssertionError("unreachable") # Test that drop_incomplete doesn't affect chunking rechunked_drop_true = ds.chunk( time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=True) ) rechunked_drop_false = ds.chunk( time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=False) ) assert ( rechunked_drop_true.chunksizes["time"] == rechunked_drop_false.chunksizes["time"] ) @requires_dask def test_chunk_by_season_resampler_errors(self): """Test error handling for SeasonResampler chunking.""" # Test error on missing season (should fail with incomplete seasons) ds = Dataset( {"x": ("time", np.arange(12))}, coords={"time": pd.date_range("2000-01-01", periods=12, freq="MS")}, ) with pytest.raises(ValueError, match="does not cover all 12 months"): ds.chunk(time=SeasonResampler(["DJF", "MAM", "SON"])) ds = Dataset({"foo": ("x", [1, 2, 3])}) # Test error on virtual variable with pytest.raises(ValueError, match="virtual variable"): ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"])) # Test error on non-datetime variable ds["x"] = ("x", [1, 2, 3]) with pytest.raises(ValueError, match="datetime variables"): ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"])) # Test successful case with 1D datetime variable ds["x"] = ("x", xr.date_range("2001-01-01", periods=3, freq="D")) # This should work result = ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"])) assert result.chunks is not None # Test error on missing season (should fail with incomplete seasons) with pytest.raises(ValueError): ds.chunk(x=SeasonResampler(["DJF", "MAM", "SON"])) @requires_dask def test_chunk(self) -> None: data = create_test_data() for v in data.variables.values(): assert isinstance(v.data, np.ndarray) assert data.chunks == {} reblocked = data.chunk() for k, v in reblocked.variables.items(): if k in reblocked.dims: assert isinstance(v.data, np.ndarray) else: assert isinstance(v.data, da.Array) expected_chunks: dict[Hashable, tuple[int, ...]] = { "dim1": (8,), "dim2": (9,), "dim3": (10,), } assert reblocked.chunks == expected_chunks # test kwargs form of chunks assert data.chunk(expected_chunks).chunks == expected_chunks def get_dask_names(ds): return {k: v.data.name for k, v in ds.items()} orig_dask_names = get_dask_names(reblocked) reblocked = data.chunk({"time": 5, "dim1": 5, "dim2": 5, "dim3": 5}) # time is not a dim in any of the data_vars, so it # doesn't get chunked expected_chunks = {"dim1": (5, 3), "dim2": (5, 4), "dim3": (5, 5)} assert reblocked.chunks == expected_chunks # make sure dask names change when rechunking by different amounts # regression test for GH3350 new_dask_names = get_dask_names(reblocked) for k, v in new_dask_names.items(): assert v != orig_dask_names[k] reblocked = data.chunk(expected_chunks) assert reblocked.chunks == expected_chunks # reblock on already blocked data orig_dask_names = get_dask_names(reblocked) reblocked = reblocked.chunk(expected_chunks) new_dask_names = get_dask_names(reblocked) assert reblocked.chunks == expected_chunks assert_identical(reblocked, data) # rechunking with same chunk sizes should not change names for k, v in new_dask_names.items(): assert v == orig_dask_names[k] with pytest.raises( ValueError, match=re.escape( "chunks keys ('foo',) not found in data dimensions ('dim2', 'dim3', 'time', 'dim1')" ), ): data.chunk({"foo": 10}) @requires_dask @pytest.mark.parametrize( "calendar", ( "standard", pytest.param( "gregorian", marks=pytest.mark.skipif(not has_cftime, reason="needs cftime"), ), ), ) @pytest.mark.parametrize("freq", ["D", "W", "5ME", "YE"]) @pytest.mark.parametrize("add_gap", [True, False]) def test_chunk_by_frequency(self, freq: str, calendar: str, add_gap: bool) -> None: import dask.array N = 365 * 2 ฮ”N = 28 # noqa: PLC2401 time = xr.date_range( "2001-01-01", periods=N + ฮ”N, freq="D", calendar=calendar ).to_numpy(copy=True) if add_gap: # introduce an empty bin time[31 : 31 + ฮ”N] = np.datetime64("NaT") time = time[~np.isnat(time)] else: time = time[:N] ds = Dataset( { "pr": ("time", dask.array.random.random((N), chunks=(20))), "pr2d": (("x", "time"), dask.array.random.random((10, N), chunks=(20))), "ones": ("time", np.ones((N,))), }, coords={"time": time}, ) rechunked = ds.chunk(x=2, time=TimeResampler(freq)) expected = tuple( ds.ones.resample(time=freq).sum().dropna("time").astype(int).data.tolist() ) assert rechunked.chunksizes["time"] == expected assert rechunked.chunksizes["x"] == (2,) * 5 rechunked = ds.chunk({"x": 2, "time": TimeResampler(freq)}) assert rechunked.chunksizes["time"] == expected assert rechunked.chunksizes["x"] == (2,) * 5 def test_chunk_by_frequency_errors(self): ds = Dataset({"foo": ("x", [1, 2, 3])}) with pytest.raises(ValueError, match="virtual variable"): ds.chunk(x=TimeResampler("YE")) ds["x"] = ("x", [1, 2, 3]) with pytest.raises(ValueError, match="datetime variables"): ds.chunk(x=TimeResampler("YE")) ds["x"] = ("x", xr.date_range("2001-01-01", periods=3, freq="D")) with pytest.raises(ValueError, match="Invalid frequency"): ds.chunk(x=TimeResampler("foo")) @requires_dask def test_dask_is_lazy(self) -> None: store = InaccessibleVariableDataStore() create_test_data().dump_to_store(store) ds = open_dataset(store).chunk() with pytest.raises(UnexpectedDataAccess): ds.load() with pytest.raises(UnexpectedDataAccess): _ = ds["var1"].values # these should not raise UnexpectedDataAccess: _ = ds.var1.data ds.isel(time=10) ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1) ds.transpose() ds.mean() ds.fillna(0) ds.rename({"dim1": "foobar"}) ds.set_coords("var1") ds.drop_vars("var1") def test_isel(self) -> None: data = create_test_data() slicers: dict[Hashable, slice] = { "dim1": slice(None, None, 2), "dim2": slice(0, 2), } ret = data.isel(slicers) # Verify that only the specified dimension was altered assert list(data.dims) == list(ret.dims) for d in data.dims: if d in slicers: assert ret.sizes[d] == np.arange(data.sizes[d])[slicers[d]].size else: assert data.sizes[d] == ret.sizes[d] # Verify that the data is what we expect for v in data.variables: assert data[v].dims == ret[v].dims assert data[v].attrs == ret[v].attrs slice_list = [slice(None)] * data[v].values.ndim for d, s in slicers.items(): if d in data[v].dims: inds = np.nonzero(np.array(data[v].dims) == d)[0] for ind in inds: slice_list[ind] = s expected = data[v].values[tuple(slice_list)] actual = ret[v].values np.testing.assert_array_equal(expected, actual) with pytest.raises(ValueError): data.isel(not_a_dim=slice(0, 2)) with pytest.raises( ValueError, match=r"Dimensions {'not_a_dim'} do not exist. Expected " r"one or more of " r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*", ): data.isel(not_a_dim=slice(0, 2)) with pytest.warns( UserWarning, match=r"Dimensions {'not_a_dim'} do not exist. " r"Expected one or more of " r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*", ): data.isel(not_a_dim=slice(0, 2), missing_dims="warn") assert_identical(data, data.isel(not_a_dim=slice(0, 2), missing_dims="ignore")) ret = data.isel(dim1=0) assert {"time": 20, "dim2": 9, "dim3": 10} == ret.sizes assert set(data.data_vars) == set(ret.data_vars) assert set(data.coords) == set(ret.coords) assert set(data.xindexes) == set(ret.xindexes) ret = data.isel(time=slice(2), dim1=0, dim2=slice(5)) assert {"time": 2, "dim2": 5, "dim3": 10} == ret.sizes assert set(data.data_vars) == set(ret.data_vars) assert set(data.coords) == set(ret.coords) assert set(data.xindexes) == set(ret.xindexes) ret = data.isel(time=0, dim1=0, dim2=slice(5)) assert {"dim2": 5, "dim3": 10} == ret.sizes assert set(data.data_vars) == set(ret.data_vars) assert set(data.coords) == set(ret.coords) assert set(data.xindexes) == set(list(ret.xindexes) + ["time"]) def test_isel_fancy(self) -> None: # isel with fancy indexing. data = create_test_data() pdim1 = [1, 2, 3] pdim2 = [4, 5, 1] pdim3 = [1, 2, 3] actual = data.isel( dim1=(("test_coord",), pdim1), dim2=(("test_coord",), pdim2), dim3=(("test_coord",), pdim3), ) assert "test_coord" in actual.dims assert actual.coords["test_coord"].shape == (len(pdim1),) # Should work with DataArray actual = data.isel( dim1=DataArray(pdim1, dims="test_coord"), dim2=(("test_coord",), pdim2), dim3=(("test_coord",), pdim3), ) assert "test_coord" in actual.dims assert actual.coords["test_coord"].shape == (len(pdim1),) expected = data.isel( dim1=(("test_coord",), pdim1), dim2=(("test_coord",), pdim2), dim3=(("test_coord",), pdim3), ) assert_identical(actual, expected) # DataArray with coordinate idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)}) idx2 = DataArray(pdim2, dims=["b"], coords={"b": np.random.randn(3)}) idx3 = DataArray(pdim3, dims=["c"], coords={"c": np.random.randn(3)}) # Should work with DataArray actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3) assert "a" in actual.dims assert "b" in actual.dims assert "c" in actual.dims assert "time" in actual.coords assert "dim2" in actual.coords assert "dim3" in actual.coords expected = data.isel( dim1=(("a",), pdim1), dim2=(("b",), pdim2), dim3=(("c",), pdim3) ) expected = expected.assign_coords(a=idx1["a"], b=idx2["b"], c=idx3["c"]) assert_identical(actual, expected) idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)}) idx2 = DataArray(pdim2, dims=["a"]) idx3 = DataArray(pdim3, dims=["a"]) # Should work with DataArray actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3) assert "a" in actual.dims assert "time" in actual.coords assert "dim2" in actual.coords assert "dim3" in actual.coords expected = data.isel( dim1=(("a",), pdim1), dim2=(("a",), pdim2), dim3=(("a",), pdim3) ) expected = expected.assign_coords(a=idx1["a"]) assert_identical(actual, expected) actual = data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2)) assert "points" in actual.dims assert "dim3" in actual.dims assert "dim3" not in actual.data_vars np.testing.assert_array_equal(data["dim2"][pdim2], actual["dim2"]) # test that the order of the indexers doesn't matter assert_identical( data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2)), data.isel(dim2=(("points",), pdim2), dim1=(("points",), pdim1)), ) # make sure we're raising errors in the right places with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"): data.isel(dim1=(("points",), [1, 2]), dim2=(("points",), [1, 2, 3])) with pytest.raises(TypeError, match=r"cannot use a Dataset"): data.isel(dim1=Dataset({"points": [1, 2]})) # test to be sure we keep around variables that were not indexed ds = Dataset({"x": [1, 2, 3, 4], "y": 0}) actual = ds.isel(x=(("points",), [0, 1, 2])) assert_identical(ds["y"], actual["y"]) # tests using index or DataArray as indexers stations = Dataset() stations["station"] = (("station",), ["A", "B", "C"]) stations["dim1s"] = (("station",), [1, 2, 3]) stations["dim2s"] = (("station",), [4, 5, 1]) actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"]) assert "station" in actual.coords assert "station" in actual.dims assert_identical(actual["station"].drop_vars(["dim2"]), stations["station"]) with pytest.raises(ValueError, match=r"conflicting values/indexes on "): data.isel( dim1=DataArray( [0, 1, 2], dims="station", coords={"station": [0, 1, 2]} ), dim2=DataArray( [0, 1, 2], dims="station", coords={"station": [0, 1, 3]} ), ) # multi-dimensional selection stations = Dataset() stations["a"] = (("a",), ["A", "B", "C"]) stations["b"] = (("b",), [0, 1]) stations["dim1s"] = (("a", "b"), [[1, 2], [2, 3], [3, 4]]) stations["dim2s"] = (("a",), [4, 5, 1]) actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"]) assert "a" in actual.coords assert "a" in actual.dims assert "b" in actual.coords assert "b" in actual.dims assert "dim2" in actual.coords assert "a" in actual["dim2"].dims assert_identical(actual["a"].drop_vars(["dim2"]), stations["a"]) assert_identical(actual["b"], stations["b"]) expected_var1 = data["var1"].variable[ stations["dim1s"].variable, stations["dim2s"].variable ] expected_var2 = data["var2"].variable[ stations["dim1s"].variable, stations["dim2s"].variable ] expected_var3 = data["var3"].variable[slice(None), stations["dim1s"].variable] assert_equal(actual["a"].drop_vars("dim2"), stations["a"]) assert_array_equal(actual["var1"], expected_var1) assert_array_equal(actual["var2"], expected_var2) assert_array_equal(actual["var3"], expected_var3) # test that drop works ds = xr.Dataset({"a": (("x",), [1, 2, 3])}, coords={"b": (("x",), [5, 6, 7])}) actual = ds.isel({"x": 1}, drop=False) expected = xr.Dataset({"a": 2}, coords={"b": 6}) assert_identical(actual, expected) actual = ds.isel({"x": 1}, drop=True) expected = xr.Dataset({"a": 2}) assert_identical(actual, expected) actual = ds.isel({"x": DataArray(1)}, drop=False) expected = xr.Dataset({"a": 2}, coords={"b": 6}) assert_identical(actual, expected) actual = ds.isel({"x": DataArray(1)}, drop=True) expected = xr.Dataset({"a": 2}) assert_identical(actual, expected) def test_isel_dataarray(self) -> None: """Test for indexing by DataArray""" data = create_test_data() # indexing with DataArray with same-name coordinates. indexing_da = DataArray( np.arange(1, 4), dims=["dim1"], coords={"dim1": np.random.randn(3)} ) actual = data.isel(dim1=indexing_da) assert_identical(indexing_da["dim1"], actual["dim1"]) assert_identical(data["dim2"], actual["dim2"]) # Conflict in the dimension coordinate indexing_da = DataArray( np.arange(1, 4), dims=["dim2"], coords={"dim2": np.random.randn(3)} ) with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"): data.isel(dim2=indexing_da) # Also the case for DataArray with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"): data["var2"].isel(dim2=indexing_da) with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"): data["dim2"].isel(dim2=indexing_da) # same name coordinate which does not conflict indexing_da = DataArray( np.arange(1, 4), dims=["dim2"], coords={"dim2": data["dim2"].values[1:4]} ) actual = data.isel(dim2=indexing_da) assert_identical(actual["dim2"], indexing_da["dim2"]) # Silently drop conflicted (non-dimensional) coordinate of indexer indexing_da = DataArray( np.arange(1, 4), dims=["dim2"], coords={ "dim2": data["dim2"].values[1:4], "numbers": ("dim2", np.arange(2, 5)), }, ) actual = data.isel(dim2=indexing_da) assert_identical(actual["numbers"], data["numbers"]) # boolean data array with coordinate with the same name indexing_da = DataArray( np.arange(1, 10), dims=["dim2"], coords={"dim2": data["dim2"].values} ) indexing_da = indexing_da < 3 actual = data.isel(dim2=indexing_da) assert_identical(actual["dim2"], data["dim2"][:2]) # boolean data array with non-dimensioncoordinate indexing_da = DataArray( np.arange(1, 10), dims=["dim2"], coords={ "dim2": data["dim2"].values, "non_dim": (("dim2",), np.random.randn(9)), "non_dim2": 0, }, ) indexing_da = indexing_da < 3 actual = data.isel(dim2=indexing_da) assert_identical( actual["dim2"].drop_vars("non_dim").drop_vars("non_dim2"), data["dim2"][:2] ) assert_identical(actual["non_dim"], indexing_da["non_dim"][:2]) assert_identical(actual["non_dim2"], indexing_da["non_dim2"]) # non-dimension coordinate will be also attached indexing_da = DataArray( np.arange(1, 4), dims=["dim2"], coords={"non_dim": (("dim2",), np.random.randn(3))}, ) actual = data.isel(dim2=indexing_da) assert "non_dim" in actual assert "non_dim" in actual.coords # Index by a scalar DataArray indexing_da = DataArray(3, dims=[], coords={"station": 2}) actual = data.isel(dim2=indexing_da) assert "station" in actual actual = data.isel(dim2=indexing_da["station"]) assert "station" in actual # indexer generated from coordinates indexing_ds = Dataset({}, coords={"dim2": [0, 1, 2]}) with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"): actual = data.isel(dim2=indexing_ds["dim2"]) def test_isel_fancy_convert_index_variable(self) -> None: # select index variable "x" with a DataArray of dim "z" # -> drop index and convert index variable to base variable ds = xr.Dataset({"foo": ("x", [1, 2, 3])}, coords={"x": [0, 1, 2]}) idxr = xr.DataArray([1], dims="z", name="x") actual = ds.isel(x=idxr) assert "x" not in actual.xindexes assert not isinstance(actual.x.variable, IndexVariable) def test_isel_multicoord_index(self) -> None: # regression test https://github.com/pydata/xarray/issues/10063 # isel on a multi-coordinate index should return a unique index associated # to each coordinate coords = xr.Coordinates(coords={"x": [0, 1], "y": [1, 2]}, indexes={}) ds = xr.Dataset(coords=coords).set_xindex(["x", "y"], XYIndex) ds2 = ds.isel(x=slice(None), y=slice(None)) assert ds2.xindexes["x"] is ds2.xindexes["y"] def test_sel(self) -> None: data = create_test_data() int_slicers = {"dim1": slice(None, None, 2), "dim2": slice(2), "dim3": slice(3)} loc_slicers = { "dim1": slice(None, None, 2), "dim2": slice(0, 0.5), "dim3": slice("a", "c"), } assert_equal(data.isel(int_slicers), data.sel(loc_slicers)) data["time"] = ("time", pd.date_range("2000-01-01", periods=20)) assert_equal(data.isel(time=0), data.sel(time="2000-01-01")) assert_equal( data.isel(time=slice(10)), data.sel(time=slice("2000-01-01", "2000-01-10")) ) assert_equal(data, data.sel(time=slice("1999", "2005"))) times = pd.date_range("2000-01-01", periods=3) assert_equal(data.isel(time=slice(3)), data.sel(time=times)) assert_equal( data.isel(time=slice(3)), data.sel(time=(data["time.dayofyear"] <= 3)) ) td = pd.to_timedelta(np.arange(3), unit="days") data = Dataset({"x": ("td", np.arange(3)), "td": td}) assert_equal(data, data.sel(td=td)) assert_equal(data, data.sel(td=slice("3 days"))) assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0 days"))) assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0h"))) assert_equal(data.isel(td=slice(1, 3)), data.sel(td=slice("1 days", "2 days"))) def test_sel_dataarray(self) -> None: data = create_test_data() ind = DataArray([0.0, 0.5, 1.0], dims=["dim2"]) actual = data.sel(dim2=ind) assert_equal(actual, data.isel(dim2=[0, 1, 2])) # with different dimension ind = DataArray([0.0, 0.5, 1.0], dims=["new_dim"]) actual = data.sel(dim2=ind) expected = data.isel(dim2=Variable("new_dim", [0, 1, 2])) assert "new_dim" in actual.dims assert_equal(actual, expected) # Multi-dimensional ind = DataArray([[0.0], [0.5], [1.0]], dims=["new_dim", "new_dim2"]) actual = data.sel(dim2=ind) expected = data.isel(dim2=Variable(("new_dim", "new_dim2"), [[0], [1], [2]])) assert "new_dim" in actual.dims assert "new_dim2" in actual.dims assert_equal(actual, expected) # with coordinate ind = DataArray( [0.0, 0.5, 1.0], dims=["new_dim"], coords={"new_dim": ["a", "b", "c"]} ) actual = data.sel(dim2=ind) expected = data.isel(dim2=[0, 1, 2]).rename({"dim2": "new_dim"}) assert "new_dim" in actual.dims assert "new_dim" in actual.coords assert_equal( actual.drop_vars("new_dim").drop_vars("dim2"), expected.drop_vars("new_dim") ) assert_equal(actual["new_dim"].drop_vars("dim2"), ind["new_dim"]) # with conflicted coordinate (silently ignored) ind = DataArray( [0.0, 0.5, 1.0], dims=["dim2"], coords={"dim2": ["a", "b", "c"]} ) actual = data.sel(dim2=ind) expected = data.isel(dim2=[0, 1, 2]) assert_equal(actual, expected) # with conflicted coordinate (silently ignored) ind = DataArray( [0.0, 0.5, 1.0], dims=["new_dim"], coords={"new_dim": ["a", "b", "c"], "dim2": 3}, ) actual = data.sel(dim2=ind) assert_equal( actual["new_dim"].drop_vars("dim2"), ind["new_dim"].drop_vars("dim2") ) expected = data.isel(dim2=[0, 1, 2]) expected["dim2"] = (("new_dim"), expected["dim2"].values) assert_equal(actual["dim2"].drop_vars("new_dim"), expected["dim2"]) assert actual["var1"].dims == ("dim1", "new_dim") # with non-dimensional coordinate ind = DataArray( [0.0, 0.5, 1.0], dims=["dim2"], coords={ "dim2": ["a", "b", "c"], "numbers": ("dim2", [0, 1, 2]), "new_dim": ("dim2", [1.1, 1.2, 1.3]), }, ) actual = data.sel(dim2=ind) expected = data.isel(dim2=[0, 1, 2]) assert_equal(actual.drop_vars("new_dim"), expected) assert np.allclose(actual["new_dim"].values, ind["new_dim"].values) def test_sel_dataarray_mindex(self) -> None: midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two")) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") midx_coords["y"] = range(3) mds = xr.Dataset( {"var": (("x", "y"), np.random.rand(6, 3))}, coords=midx_coords ) actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="x")) actual_sel = mds.sel(x=DataArray(midx[:3], dims="x")) assert actual_isel["x"].dims == ("x",) assert actual_sel["x"].dims == ("x",) assert_identical(actual_isel, actual_sel) actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="z")) actual_sel = mds.sel(x=Variable("z", midx[:3])) assert actual_isel["x"].dims == ("z",) assert actual_sel["x"].dims == ("z",) assert_identical(actual_isel, actual_sel) # with coordinate actual_isel = mds.isel( x=xr.DataArray(np.arange(3), dims="z", coords={"z": [0, 1, 2]}) ) actual_sel = mds.sel( x=xr.DataArray(midx[:3], dims="z", coords={"z": [0, 1, 2]}) ) assert actual_isel["x"].dims == ("z",) assert actual_sel["x"].dims == ("z",) assert_identical(actual_isel, actual_sel) # Vectorized indexing with level-variables raises an error with pytest.raises(ValueError, match=r"Vectorized selection is "): mds.sel(one=["a", "b"]) with pytest.raises( ValueError, match=r"Vectorized selection is not available along coordinate 'x' with a multi-index", ): mds.sel( x=xr.DataArray( [np.array(midx[:2]), np.array(midx[-2:])], dims=["a", "b"] ) ) def test_sel_categorical(self) -> None: ind = pd.Series(["foo", "bar"], dtype="category") df = pd.DataFrame({"ind": ind, "values": [1, 2]}) ds = df.set_index("ind").to_xarray() actual = ds.sel(ind="bar") expected = ds.isel(ind=1) assert_identical(expected, actual) def test_sel_categorical_error(self) -> None: ind = pd.Series(["foo", "bar"], dtype="category") df = pd.DataFrame({"ind": ind, "values": [1, 2]}) ds = df.set_index("ind").to_xarray() with pytest.raises(ValueError): ds.sel(ind="bar", method="nearest") with pytest.raises(ValueError): ds.sel(ind="bar", tolerance="nearest") # type: ignore[arg-type] def test_categorical_index(self) -> None: cat = pd.CategoricalIndex( ["foo", "bar", "foo"], categories=["foo", "bar", "baz", "qux", "quux", "corge"], ) ds = xr.Dataset( {"var": ("cat", np.arange(3))}, coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 1])}, ) # test slice actual1 = ds.sel(cat="foo") expected1 = ds.isel(cat=[0, 2]) assert_identical(expected1, actual1) # make sure the conversion to the array works actual2 = ds.sel(cat="foo")["cat"].values assert (actual2 == np.array(["foo", "foo"])).all() ds = ds.set_index(index=["cat", "c"]) actual3 = ds.unstack("index") assert actual3["var"].shape == (2, 2) def test_categorical_index_reindex(self) -> None: cat = pd.CategoricalIndex( ["foo", "bar", "baz"], categories=["foo", "bar", "baz", "qux", "quux", "corge"], ) ds = xr.Dataset( {"var": ("cat", np.arange(3))}, coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 2])}, ) actual = ds.reindex(cat=["foo"])["cat"].values assert (actual == np.array(["foo"])).all() @pytest.mark.parametrize("fill_value", [np.nan, pd.NA, None]) @pytest.mark.parametrize( "extension_array", [ pytest.param( pd.Categorical( ["foo", "bar", "baz"], categories=["foo", "bar", "baz", "qux"], ), id="categorical", ), ] + ( [ pytest.param( pd.array([1, 1, None], dtype="int64[pyarrow]"), id="int64[pyarrow]" ) ] if has_pyarrow else [] ), ) def test_extensionarray_negative_reindex(self, fill_value, extension_array) -> None: ds = xr.Dataset( {"arr": ("index", extension_array)}, coords={"index": ("index", np.arange(3))}, ) kwargs = {} if fill_value is not None: kwargs["fill_value"] = fill_value reindexed_cat = cast( pd.api.extensions.ExtensionArray, (ds.reindex(index=[-1, 1, 1], **kwargs)["arr"].to_pandas().values), ) assert reindexed_cat.equals( # type: ignore[attr-defined] pd.array( [pd.NA, extension_array[1], extension_array[1]], dtype=extension_array.dtype, ) ) @requires_pyarrow def test_extension_array_reindex_same(self) -> None: series = pd.Series([1, 2, pd.NA, 3], dtype="int32[pyarrow]") test = xr.Dataset({"test": series}) res = test.reindex(dim_0=series.index) align(res, test, join="exact") def test_categorical_multiindex(self) -> None: i1 = pd.Series([0, 0]) cat = pd.CategoricalDtype(categories=["foo", "baz", "bar"]) i2 = pd.Series(["baz", "bar"], dtype=cat) df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2]}).set_index( ["i1", "i2"] ) actual = df.to_xarray() assert actual["values"].shape == (1, 2) def test_sel_drop(self) -> None: data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]}) expected = Dataset({"foo": 1}) selected = data.sel(x=0, drop=True) assert_identical(expected, selected) expected = Dataset({"foo": 1}, {"x": 0}) selected = data.sel(x=0, drop=False) assert_identical(expected, selected) data = Dataset({"foo": ("x", [1, 2, 3])}) expected = Dataset({"foo": 1}) selected = data.sel(x=0, drop=True) assert_identical(expected, selected) def test_sel_drop_mindex(self) -> None: midx = pd.MultiIndex.from_arrays([["a", "a"], [1, 2]], names=("foo", "bar")) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") data = Dataset(coords=midx_coords) actual = data.sel(foo="a", drop=True) assert "foo" not in actual.coords actual = data.sel(foo="a", drop=False) assert_equal(actual.foo, DataArray("a", coords={"foo": "a"})) def test_isel_drop(self) -> None: data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]}) expected = Dataset({"foo": 1}) selected = data.isel(x=0, drop=True) assert_identical(expected, selected) expected = Dataset({"foo": 1}, {"x": 0}) selected = data.isel(x=0, drop=False) assert_identical(expected, selected) def test_head(self) -> None: data = create_test_data() expected = data.isel(time=slice(5), dim2=slice(6)) actual = data.head(time=5, dim2=6) assert_equal(expected, actual) expected = data.isel(time=slice(0)) actual = data.head(time=0) assert_equal(expected, actual) expected = data.isel({dim: slice(6) for dim in data.dims}) actual = data.head(6) assert_equal(expected, actual) expected = data.isel({dim: slice(5) for dim in data.dims}) actual = data.head() assert_equal(expected, actual) with pytest.raises(TypeError, match=r"either dict-like or a single int"): data.head([3]) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"expected integer type"): data.head(dim2=3.1) with pytest.raises(ValueError, match=r"expected positive int"): data.head(time=-3) def test_tail(self) -> None: data = create_test_data() expected = data.isel(time=slice(-5, None), dim2=slice(-6, None)) actual = data.tail(time=5, dim2=6) assert_equal(expected, actual) expected = data.isel(dim1=slice(0)) actual = data.tail(dim1=0) assert_equal(expected, actual) expected = data.isel({dim: slice(-6, None) for dim in data.dims}) actual = data.tail(6) assert_equal(expected, actual) expected = data.isel({dim: slice(-5, None) for dim in data.dims}) actual = data.tail() assert_equal(expected, actual) with pytest.raises(TypeError, match=r"either dict-like or a single int"): data.tail([3]) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"expected integer type"): data.tail(dim2=3.1) with pytest.raises(ValueError, match=r"expected positive int"): data.tail(time=-3) def test_thin(self) -> None: data = create_test_data() expected = data.isel(time=slice(None, None, 5), dim2=slice(None, None, 6)) actual = data.thin(time=5, dim2=6) assert_equal(expected, actual) expected = data.isel({dim: slice(None, None, 6) for dim in data.dims}) actual = data.thin(6) assert_equal(expected, actual) with pytest.raises(TypeError, match=r"either dict-like or a single int"): data.thin([3]) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"expected integer type"): data.thin(dim2=3.1) with pytest.raises(ValueError, match=r"cannot be zero"): data.thin(time=0) with pytest.raises(ValueError, match=r"expected positive int"): data.thin(time=-3) @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_sel_fancy(self) -> None: data = create_test_data() # add in a range() index data["dim1"] = data.dim1 pdim1 = [1, 2, 3] pdim2 = [4, 5, 1] pdim3 = [1, 2, 3] expected = data.isel( dim1=Variable(("test_coord",), pdim1), dim2=Variable(("test_coord",), pdim2), dim3=Variable(("test_coord"), pdim3), ) actual = data.sel( dim1=Variable(("test_coord",), data.dim1[pdim1]), dim2=Variable(("test_coord",), data.dim2[pdim2]), dim3=Variable(("test_coord",), data.dim3[pdim3]), ) assert_identical(expected, actual) # DataArray Indexer idx_t = DataArray( data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]} ) idx_2 = DataArray( data["dim2"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]} ) idx_3 = DataArray( data["dim3"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]} ) actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3) expected = data.isel( time=Variable(("a",), [3, 2, 1]), dim2=Variable(("a",), [3, 2, 1]), dim3=Variable(("a",), [3, 2, 1]), ) expected = expected.assign_coords(a=idx_t["a"]) assert_identical(expected, actual) idx_t = DataArray( data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]} ) idx_2 = DataArray( data["dim2"][[2, 1, 3]].values, dims=["b"], coords={"b": [0, 1, 2]} ) idx_3 = DataArray( data["dim3"][[1, 2, 1]].values, dims=["c"], coords={"c": [0.0, 1.1, 2.2]} ) actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3) expected = data.isel( time=Variable(("a",), [3, 2, 1]), dim2=Variable(("b",), [2, 1, 3]), dim3=Variable(("c",), [1, 2, 1]), ) expected = expected.assign_coords(a=idx_t["a"], b=idx_2["b"], c=idx_3["c"]) assert_identical(expected, actual) # test from sel_points data = Dataset({"foo": (("x", "y"), np.arange(9).reshape(3, 3))}) data.coords.update({"x": [0, 1, 2], "y": [0, 1, 2]}) expected = Dataset( {"foo": ("points", [0, 4, 8])}, coords={ "x": Variable(("points",), [0, 1, 2]), "y": Variable(("points",), [0, 1, 2]), }, ) actual = data.sel( x=Variable(("points",), [0, 1, 2]), y=Variable(("points",), [0, 1, 2]) ) assert_identical(expected, actual) expected.coords.update({"x": ("points", [0, 1, 2]), "y": ("points", [0, 1, 2])}) actual = data.sel( x=Variable(("points",), [0.1, 1.1, 2.5]), y=Variable(("points",), [0, 1.2, 2.0]), method="pad", ) assert_identical(expected, actual) idx_x = DataArray([0, 1, 2], dims=["a"], coords={"a": ["a", "b", "c"]}) idx_y = DataArray([0, 2, 1], dims=["b"], coords={"b": [0, 3, 6]}) expected_ary = data["foo"][[0, 1, 2], [0, 2, 1]] actual = data.sel(x=idx_x, y=idx_y) assert_array_equal(expected_ary, actual["foo"]) assert_identical(actual["a"].drop_vars("x"), idx_x["a"]) assert_identical(actual["b"].drop_vars("y"), idx_y["b"]) with pytest.raises(KeyError): data.sel(x=[2.5], y=[2.0], method="pad", tolerance=1e-3) def test_sel_method(self) -> None: data = create_test_data() expected = data.sel(dim2=1) actual = data.sel(dim2=0.95, method="nearest") assert_identical(expected, actual) actual = data.sel(dim2=0.95, method="nearest", tolerance=1) assert_identical(expected, actual) with pytest.raises(KeyError): actual = data.sel(dim2=np.pi, method="nearest", tolerance=0) expected = data.sel(dim2=[1.5]) actual = data.sel(dim2=[1.45], method="backfill") assert_identical(expected, actual) with pytest.raises(NotImplementedError, match=r"slice objects"): data.sel(dim2=slice(1, 3), method="ffill") with pytest.raises(TypeError, match=r"``method``"): # this should not pass silently data.sel(dim2=1, method=data) # type: ignore[arg-type] # cannot pass method if there is no associated coordinate with pytest.raises(ValueError, match=r"cannot supply"): data.sel(dim1=0, method="nearest") def test_loc(self) -> None: data = create_test_data() expected = data.sel(dim3="a") actual = data.loc[dict(dim3="a")] assert_identical(expected, actual) with pytest.raises(TypeError, match=r"can only lookup dict"): data.loc["a"] # type: ignore[index] def test_selection_multiindex(self) -> None: midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three") ) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") mdata = Dataset(data_vars={"var": ("x", range(8))}, coords=midx_coords) def test_sel( lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None ) -> None: ds = mdata.sel(x=lab_indexer) expected_ds = mdata.isel(x=pos_indexer) if not replaced_idx: assert_identical(ds, expected_ds) else: if renamed_dim: assert ds["var"].dims[0] == renamed_dim ds = ds.rename({renamed_dim: "x"}) assert_identical(ds["var"].variable, expected_ds["var"].variable) assert not ds["x"].equals(expected_ds["x"]) test_sel(("a", 1, -1), 0) test_sel(("b", 2, -2), -1) test_sel(("a", 1), [0, 1], replaced_idx=True, renamed_dim="three") test_sel(("a",), range(4), replaced_idx=True) test_sel("a", range(4), replaced_idx=True) test_sel([("a", 1, -1), ("b", 2, -2)], [0, 7]) test_sel(slice("a", "b"), range(8)) test_sel(slice(("a", 1), ("b", 1)), range(6)) test_sel({"one": "a", "two": 1, "three": -1}, 0) test_sel({"one": "a", "two": 1}, [0, 1], replaced_idx=True, renamed_dim="three") test_sel({"one": "a"}, range(4), replaced_idx=True) assert_identical(mdata.loc[{"x": {"one": "a"}}], mdata.sel(x={"one": "a"})) assert_identical(mdata.loc[{"x": "a"}], mdata.sel(x="a")) assert_identical(mdata.loc[{"x": ("a", 1)}], mdata.sel(x=("a", 1))) assert_identical(mdata.loc[{"x": ("a", 1, -1)}], mdata.sel(x=("a", 1, -1))) assert_identical(mdata.sel(x={"one": "a", "two": 1}), mdata.sel(one="a", two=1)) def test_broadcast_like(self) -> None: original1 = DataArray( np.random.randn(5), [("x", range(5))], name="a" ).to_dataset() original2 = DataArray(np.random.randn(6), [("y", range(6))], name="b") expected1, expected2 = broadcast(original1, original2) assert_identical( original1.broadcast_like(original2), expected1.transpose("y", "x") ) assert_identical(original2.broadcast_like(original1), expected2) def test_to_pandas(self) -> None: # 0D -> series actual = Dataset({"a": 1, "b": 2}).to_pandas() expected = pd.Series([1, 2], ["a", "b"]) assert_array_equal(actual, expected) # 1D -> dataframe x = np.random.randn(10) y = np.random.randn(10) t = list("abcdefghij") ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)}) actual_df = ds.to_pandas() expected_df = ds.to_dataframe() assert expected_df.equals(actual_df), (expected_df, actual_df) # 2D -> error x2d = np.random.randn(10, 10) y2d = np.random.randn(10, 10) with pytest.raises(ValueError, match=r"cannot convert Datasets"): Dataset({"a": (["t", "r"], x2d), "b": (["t", "r"], y2d)}).to_pandas() def test_reindex_like(self) -> None: data = create_test_data() data["letters"] = ("dim3", 10 * ["a"]) expected = data.isel(dim1=slice(10), time=slice(13)) actual = data.reindex_like(expected) assert_identical(actual, expected) expected = data.copy(deep=True) expected["dim3"] = ("dim3", list("cdefghijkl")) expected["var3"][:-2] = expected["var3"][2:].values expected["var3"][-2:] = np.nan expected["letters"] = expected["letters"].astype(object) expected["letters"][-2:] = np.nan expected["numbers"] = expected["numbers"].astype(float) expected["numbers"][:-2] = expected["numbers"][2:].values expected["numbers"][-2:] = np.nan actual = data.reindex_like(expected) assert_identical(actual, expected) def test_reindex(self) -> None: data = create_test_data() assert_identical(data, data.reindex()) expected = data.assign_coords(dim1=data["dim1"]) actual = data.reindex(dim1=data["dim1"]) assert_identical(actual, expected) actual = data.reindex(dim1=data["dim1"].values) assert_identical(actual, expected) actual = data.reindex(dim1=data["dim1"].to_index()) assert_identical(actual, expected) with pytest.raises( ValueError, match=r"cannot reindex or align along dimension" ): data.reindex(dim1=data["dim1"][:5]) expected = data.isel(dim2=slice(5)) actual = data.reindex(dim2=data["dim2"][:5]) assert_identical(actual, expected) # test dict-like argument actual = data.reindex({"dim2": data["dim2"]}) expected = data assert_identical(actual, expected) with pytest.raises(ValueError, match=r"cannot specify both"): data.reindex({"x": 0}, x=0) with pytest.raises(ValueError, match=r"dictionary"): data.reindex("foo") # type: ignore[arg-type] # invalid dimension # TODO: (benbovy - explicit indexes): uncomment? # --> from reindex docstrings: "any mismatched dimension is simply ignored" # with pytest.raises(ValueError, match=r"indexer keys.*not correspond.*"): # data.reindex(invalid=0) # out of order expected = data.sel(dim2=data["dim2"][:5:-1]) actual = data.reindex(dim2=data["dim2"][:5:-1]) assert_identical(actual, expected) # multiple fill values expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign( var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)), var2=lambda ds: ds.var2.copy(data=[[-20, -20, -20, -20]] * len(ds.dim1)), ) actual = data.reindex( dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10, "var2": -20} ) assert_identical(actual, expected) # use the default value expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign( var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)), var2=lambda ds: ds.var2.copy( data=[[np.nan, np.nan, np.nan, np.nan]] * len(ds.dim1) ), ) actual = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10}) assert_identical(actual, expected) # regression test for #279 expected = Dataset({"x": ("time", np.random.randn(5))}, {"time": range(5)}) time2 = DataArray(np.arange(5), dims="time2") with pytest.raises(ValueError): actual = expected.reindex(time=time2) # another regression test ds = Dataset( {"foo": (["x", "y"], np.zeros((3, 4)))}, {"x": range(3), "y": range(4)} ) expected = Dataset( {"foo": (["x", "y"], np.zeros((3, 2)))}, {"x": [0, 1, 3], "y": [0, 1]} ) expected["foo"][-1] = np.nan actual = ds.reindex(x=[0, 1, 3], y=[0, 1]) assert_identical(expected, actual) def test_reindex_attrs_encoding(self) -> None: ds = Dataset( {"data": ("x", [1, 2, 3])}, {"x": ("x", [0, 1, 2], {"foo": "bar"}, {"bar": "baz"})}, ) actual = ds.reindex(x=[0, 1]) expected = Dataset( {"data": ("x", [1, 2])}, {"x": ("x", [0, 1], {"foo": "bar"}, {"bar": "baz"})}, ) assert_identical(actual, expected) assert actual.x.encoding == expected.x.encoding def test_reindex_warning(self) -> None: data = create_test_data() with pytest.raises(ValueError): # DataArray with different dimension raises Future warning ind = xr.DataArray([0.0, 1.0], dims=["new_dim"], name="ind") data.reindex(dim2=ind) # Should not warn ind = xr.DataArray([0.0, 1.0], dims=["dim2"], name="ind") with warnings.catch_warnings(record=True) as ws: data.reindex(dim2=ind) assert len(ws) == 0 def test_reindex_variables_copied(self) -> None: data = create_test_data() reindexed_data = data.reindex(copy=False) for k in data.variables: assert reindexed_data.variables[k] is not data.variables[k] def test_reindex_method(self) -> None: ds = Dataset({"x": ("y", [10, 20]), "y": [0, 1]}) y = [-0.5, 0.5, 1.5] actual = ds.reindex(y=y, method="backfill") expected = Dataset({"x": ("y", [10, 20, np.nan]), "y": y}) assert_identical(expected, actual) actual = ds.reindex(y=y, method="backfill", tolerance=0.1) expected = Dataset({"x": ("y", 3 * [np.nan]), "y": y}) assert_identical(expected, actual) actual = ds.reindex(y=y, method="backfill", tolerance=[0.1, 0.5, 0.1]) expected = Dataset({"x": ("y", [np.nan, 20, np.nan]), "y": y}) assert_identical(expected, actual) actual = ds.reindex(y=[0.1, 0.1, 1], tolerance=[0, 0.1, 0], method="nearest") expected = Dataset({"x": ("y", [np.nan, 10, 20]), "y": [0.1, 0.1, 1]}) assert_identical(expected, actual) actual = ds.reindex(y=y, method="pad") expected = Dataset({"x": ("y", [np.nan, 10, 20]), "y": y}) assert_identical(expected, actual) alt = Dataset({"y": y}) actual = ds.reindex_like(alt, method="pad") assert_identical(expected, actual) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}]) def test_reindex_fill_value(self, fill_value) -> None: ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]}) y = [0, 1, 2] actual = ds.reindex(y=y, fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_x = fill_value_z = np.nan elif isinstance(fill_value, dict): fill_value_x = fill_value["x"] fill_value_z = fill_value["z"] else: fill_value_x = fill_value_z = fill_value expected = Dataset( { "x": ("y", [10, 20, fill_value_x]), "z": ("y", [-20, -10, fill_value_z]), "y": y, } ) assert_identical(expected, actual) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}]) def test_reindex_like_fill_value(self, fill_value) -> None: ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]}) y = [0, 1, 2] alt = Dataset({"y": y}) actual = ds.reindex_like(alt, fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_x = fill_value_z = np.nan elif isinstance(fill_value, dict): fill_value_x = fill_value["x"] fill_value_z = fill_value["z"] else: fill_value_x = fill_value_z = fill_value expected = Dataset( { "x": ("y", [10, 20, fill_value_x]), "z": ("y", [-20, -10, fill_value_z]), "y": y, } ) assert_identical(expected, actual) @pytest.mark.parametrize("dtype", [str, bytes]) def test_reindex_str_dtype(self, dtype) -> None: data = Dataset({"data": ("x", [1, 2]), "x": np.array(["a", "b"], dtype=dtype)}) actual = data.reindex(x=data.x) expected = data assert_identical(expected, actual) assert actual.x.dtype == expected.x.dtype def test_reindex_with_multiindex_level(self) -> None: # test for https://github.com/pydata/xarray/issues/10347 mindex = pd.MultiIndex.from_product( [[100, 200, 300], [1, 2, 3, 4]], names=["x", "y"] ) y_idx = PandasIndex(mindex.levels[1], "y") ds1 = xr.Dataset(coords={"y": [1, 2, 3]}) ds2 = xr.Dataset(coords=xr.Coordinates.from_xindex(y_idx)) actual = ds1.reindex(y=ds2.y) assert_identical(actual, ds2) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": 2, "bar": 1}]) def test_align_fill_value(self, fill_value) -> None: x = Dataset({"foo": DataArray([1, 2], dims=["x"], coords={"x": [1, 2]})}) y = Dataset({"bar": DataArray([1, 2], dims=["x"], coords={"x": [1, 3]})}) x2, y2 = align(x, y, join="outer", fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_foo = fill_value_bar = np.nan elif isinstance(fill_value, dict): fill_value_foo = fill_value["foo"] fill_value_bar = fill_value["bar"] else: fill_value_foo = fill_value_bar = fill_value expected_x2 = Dataset( { "foo": DataArray( [1, 2, fill_value_foo], dims=["x"], coords={"x": [1, 2, 3]} ) } ) expected_y2 = Dataset( { "bar": DataArray( [1, fill_value_bar, 2], dims=["x"], coords={"x": [1, 2, 3]} ) } ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_align(self) -> None: left = create_test_data() right = left.copy(deep=True) right["dim3"] = ("dim3", list("cdefghijkl")) right["var3"][:-2] = right["var3"][2:].values right["var3"][-2:] = np.random.randn(*right["var3"][-2:].shape) right["numbers"][:-2] = right["numbers"][2:].values right["numbers"][-2:] = -10 intersection = list("cdefghij") union = list("abcdefghijkl") left2, right2 = align(left, right, join="inner") assert_array_equal(left2["dim3"], intersection) assert_identical(left2, right2) left2, right2 = align(left, right, join="outer") assert_array_equal(left2["dim3"], union) assert_equal(left2["dim3"].variable, right2["dim3"].variable) assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection)) assert np.isnan(left2["var3"][-2:]).all() assert np.isnan(right2["var3"][:2]).all() left2, right2 = align(left, right, join="left") assert_equal(left2["dim3"].variable, right2["dim3"].variable) assert_equal(left2["dim3"].variable, left["dim3"].variable) assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection)) assert np.isnan(right2["var3"][:2]).all() left2, right2 = align(left, right, join="right") assert_equal(left2["dim3"].variable, right2["dim3"].variable) assert_equal(left2["dim3"].variable, right["dim3"].variable) assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection)) assert np.isnan(left2["var3"][-2:]).all() with pytest.raises(ValueError, match=r"invalid value for join"): align(left, right, join="foobar") # type: ignore[call-overload] with pytest.raises(TypeError): align(left, right, foo="bar") # type: ignore[call-overload] def test_align_exact(self) -> None: left = xr.Dataset(coords={"x": [0, 1]}) right = xr.Dataset(coords={"x": [1, 2]}) left1, left2 = xr.align(left, left, join="exact") assert_identical(left1, left) assert_identical(left2, left) with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*not equal.*"): xr.align(left, right, join="exact") def test_align_override(self) -> None: left = xr.Dataset(coords={"x": [0, 1, 2]}) right = xr.Dataset(coords={"x": [0.1, 1.1, 2.1], "y": [1, 2, 3]}) expected_right = xr.Dataset(coords={"x": [0, 1, 2], "y": [1, 2, 3]}) new_left, new_right = xr.align(left, right, join="override") assert_identical(left, new_left) assert_identical(new_right, expected_right) new_left, new_right = xr.align(left, right, exclude="x", join="override") assert_identical(left, new_left) assert_identical(right, new_right) new_left, new_right = xr.align( left.isel(x=0, drop=True), right, exclude="x", join="override" ) assert_identical(left.isel(x=0, drop=True), new_left) assert_identical(right, new_right) with pytest.raises( ValueError, match=r"cannot align.*join.*override.*same size" ): xr.align(left.isel(x=0).expand_dims("x"), right, join="override") def test_align_exclude(self) -> None: x = Dataset( { "foo": DataArray( [[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]} ) } ) y = Dataset( { "bar": DataArray( [[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 3], "y": [5, 6]} ) } ) x2, y2 = align(x, y, exclude=["y"], join="outer") expected_x2 = Dataset( { "foo": DataArray( [[1, 2], [3, 4], [np.nan, np.nan]], dims=["x", "y"], coords={"x": [1, 2, 3], "y": [3, 4]}, ) } ) expected_y2 = Dataset( { "bar": DataArray( [[1, 2], [np.nan, np.nan], [3, 4]], dims=["x", "y"], coords={"x": [1, 2, 3], "y": [5, 6]}, ) } ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_align_nocopy(self) -> None: x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [1, 2, 3])])}) y = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])}) expected_x2 = x expected_y2 = Dataset( {"foo": DataArray([1, 2, np.nan], coords=[("x", [1, 2, 3])])} ) x2, y2 = align(x, y, copy=False, join="outer") assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert source_ndarray(x["foo"].data) is source_ndarray(x2["foo"].data) x2, y2 = align(x, y, copy=True, join="outer") assert source_ndarray(x["foo"].data) is not source_ndarray(x2["foo"].data) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_align_indexes(self) -> None: x = Dataset({"foo": DataArray([1, 2, 3], dims="x", coords=[("x", [1, 2, 3])])}) (x2,) = align(x, indexes={"x": [2, 3, 1]}) expected_x2 = Dataset( {"foo": DataArray([2, 3, 1], dims="x", coords={"x": [2, 3, 1]})} ) assert_identical(expected_x2, x2) def test_align_multiple_indexes_common_dim(self) -> None: a = Dataset(coords={"x": [1, 2], "xb": ("x", [3, 4])}).set_xindex("xb") b = Dataset(coords={"x": [1], "xb": ("x", [3])}).set_xindex("xb") (a2, b2) = align(a, b, join="inner") assert_identical(a2, b, check_default_indexes=False) assert_identical(b2, b, check_default_indexes=False) c = Dataset(coords={"x": [1, 3], "xb": ("x", [2, 4])}).set_xindex("xb") with pytest.raises(AlignmentError, match=r".*conflicting re-indexers"): align(a, c) def test_align_conflicting_indexes(self) -> None: class CustomIndex(PandasIndex): ... a = Dataset(coords={"xb": ("x", [3, 4])}).set_xindex("xb") b = Dataset(coords={"xb": ("x", [3])}).set_xindex("xb", CustomIndex) with pytest.raises(AlignmentError, match=r"cannot align.*conflicting indexes"): align(a, b) def test_align_non_unique(self) -> None: x = Dataset({"foo": ("x", [3, 4, 5]), "x": [0, 0, 1]}) x1, x2 = align(x, x) assert_identical(x1, x) assert_identical(x2, x) y = Dataset({"bar": ("x", [6, 7]), "x": [0, 1]}) with pytest.raises(ValueError, match=r"cannot reindex or align"): align(x, y) def test_align_str_dtype(self) -> None: a = Dataset({"foo": ("x", [0, 1])}, coords={"x": ["a", "b"]}) b = Dataset({"foo": ("x", [1, 2])}, coords={"x": ["b", "c"]}) expected_a = Dataset( {"foo": ("x", [0, 1, np.nan])}, coords={"x": ["a", "b", "c"]} ) expected_b = Dataset( {"foo": ("x", [np.nan, 1, 2])}, coords={"x": ["a", "b", "c"]} ) actual_a, actual_b = xr.align(a, b, join="outer") assert_identical(expected_a, actual_a) assert expected_a.x.dtype == actual_a.x.dtype assert_identical(expected_b, actual_b) assert expected_b.x.dtype == actual_b.x.dtype @pytest.mark.parametrize("join", ["left", "override"]) def test_align_index_var_attrs(self, join) -> None: # regression test https://github.com/pydata/xarray/issues/6852 # aligning two objects should have no side effect on their index variable # metadata. ds = Dataset(coords={"x": ("x", [1, 2, 3], {"units": "m"})}) ds_noattr = Dataset(coords={"x": ("x", [1, 2, 3])}) xr.align(ds_noattr, ds, join=join) assert ds.x.attrs == {"units": "m"} assert ds_noattr.x.attrs == {} def test_align_scalar_index(self) -> None: # ensure that indexes associated with scalar coordinates are not ignored # during alignment ds1 = Dataset(coords={"x": 0}).set_xindex("x", ScalarIndex) ds2 = Dataset(coords={"x": 0}).set_xindex("x", ScalarIndex) actual = xr.align(ds1, ds2, join="exact") assert_identical(actual[0], ds1, check_default_indexes=False) assert_identical(actual[1], ds2, check_default_indexes=False) ds3 = Dataset(coords={"x": 1}).set_xindex("x", ScalarIndex) with pytest.raises(AlignmentError, match="cannot align objects"): xr.align(ds1, ds3, join="exact") def test_align_multi_dim_index_exclude_dims(self) -> None: ds1 = ( Dataset(coords={"x": [1, 2], "y": [3, 4]}) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) ds2 = ( Dataset(coords={"x": [1, 2], "y": [5, 6]}) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) for join in ("outer", "exact"): actual = xr.align(ds1, ds2, join=join, exclude="y") assert_identical(actual[0], ds1, check_default_indexes=False) assert_identical(actual[1], ds2, check_default_indexes=False) with pytest.raises( AlignmentError, match=r"cannot align objects.*index.*not equal" ): xr.align(ds1, ds2, join="exact") with pytest.raises(AlignmentError, match="cannot exclude dimension"): xr.align(ds1, ds2, join="override", exclude="y") def test_align_index_equals_future_warning(self) -> None: # TODO: remove this test once the deprecation cycle is completed class DeprecatedEqualsSignatureIndex(PandasIndex): def equals(self, other: Index) -> bool: # type: ignore[override] return super().equals(other, exclude=None) ds = ( Dataset(coords={"x": [1, 2]}) .drop_indexes("x") .set_xindex("x", DeprecatedEqualsSignatureIndex) ) with pytest.warns(FutureWarning, match=r"signature.*deprecated"): xr.align(ds, ds.copy(), join="exact") def test_broadcast(self) -> None: ds = Dataset( {"foo": 0, "bar": ("x", [1]), "baz": ("y", [2, 3])}, {"c": ("x", [4])} ) expected = Dataset( { "foo": (("x", "y"), [[0, 0]]), "bar": (("x", "y"), [[1, 1]]), "baz": (("x", "y"), [[2, 3]]), }, {"c": ("x", [4])}, ) (actual,) = broadcast(ds) assert_identical(expected, actual) ds_x = Dataset({"foo": ("x", [1])}) ds_y = Dataset({"bar": ("y", [2, 3])}) expected_x = Dataset({"foo": (("x", "y"), [[1, 1]])}) expected_y = Dataset({"bar": (("x", "y"), [[2, 3]])}) actual_x, actual_y = broadcast(ds_x, ds_y) assert_identical(expected_x, actual_x) assert_identical(expected_y, actual_y) array_y = ds_y["bar"] expected_y2 = expected_y["bar"] actual_x2, actual_y2 = broadcast(ds_x, array_y) assert_identical(expected_x, actual_x2) assert_identical(expected_y2, actual_y2) def test_broadcast_nocopy(self) -> None: # Test that data is not copied if not needed x = Dataset({"foo": (("x", "y"), [[1, 1]])}) y = Dataset({"bar": ("y", [2, 3])}) (actual_x,) = broadcast(x) assert_identical(x, actual_x) assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data) actual_x, _actual_y = broadcast(x, y) assert_identical(x, actual_x) assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data) def test_broadcast_exclude(self) -> None: x = Dataset( { "foo": DataArray( [[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]} ), "bar": DataArray(5), } ) y = Dataset( { "foo": DataArray( [[1, 2]], dims=["z", "y"], coords={"z": [1], "y": [5, 6]} ) } ) x2, y2 = broadcast(x, y, exclude=["y"]) expected_x2 = Dataset( { "foo": DataArray( [[[1, 2]], [[3, 4]]], dims=["x", "z", "y"], coords={"z": [1], "x": [1, 2], "y": [3, 4]}, ), "bar": DataArray( [[5], [5]], dims=["x", "z"], coords={"x": [1, 2], "z": [1]} ), } ) expected_y2 = Dataset( { "foo": DataArray( [[[1, 2]], [[1, 2]]], dims=["x", "z", "y"], coords={"z": [1], "x": [1, 2], "y": [5, 6]}, ) } ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_broadcast_misaligned(self) -> None: x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [-1, -2, -3])])}) y = Dataset( { "bar": DataArray( [[1, 2], [3, 4]], dims=["y", "x"], coords={"y": [1, 2], "x": [10, -3]}, ) } ) x2, y2 = broadcast(x, y) expected_x2 = Dataset( { "foo": DataArray( [[3, 3], [2, 2], [1, 1], [np.nan, np.nan]], dims=["x", "y"], coords={"y": [1, 2], "x": [-3, -2, -1, 10]}, ) } ) expected_y2 = Dataset( { "bar": DataArray( [[2, 4], [np.nan, np.nan], [np.nan, np.nan], [1, 3]], dims=["x", "y"], coords={"y": [1, 2], "x": [-3, -2, -1, 10]}, ) } ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_broadcast_multi_index(self) -> None: # GH6430 ds = Dataset( {"foo": (("x", "y", "z"), np.ones((3, 4, 2)))}, {"x": ["a", "b", "c"], "y": [1, 2, 3, 4]}, ) stacked = ds.stack(space=["x", "y"]) broadcasted, _ = broadcast(stacked, stacked.space) assert broadcasted.xindexes["x"] is broadcasted.xindexes["space"] assert broadcasted.xindexes["y"] is broadcasted.xindexes["space"] def test_variable_indexing(self) -> None: data = create_test_data() v = data["var1"] d1 = data["dim1"] d2 = data["dim2"] assert_equal(v, v[d1.values]) assert_equal(v, v[d1]) assert_equal(v[:3], v[d1 < 3]) assert_equal(v[:, 3:], v[:, d2 >= 1.5]) assert_equal(v[:3, 3:], v[d1 < 3, d2 >= 1.5]) assert_equal(v[:3, :2], v[range(3), range(2)]) assert_equal(v[:3, :2], v.loc[d1[:3], d2[:2]]) def test_drop_variables(self) -> None: data = create_test_data() assert_identical(data, data.drop_vars([])) expected = Dataset({k: data[k] for k in data.variables if k != "time"}) actual = data.drop_vars("time") assert_identical(expected, actual) actual = data.drop_vars(["time"]) assert_identical(expected, actual) with pytest.raises( ValueError, match=re.escape( "These variables cannot be found in this dataset: ['not_found_here']" ), ): data.drop_vars("not_found_here") actual = data.drop_vars("not_found_here", errors="ignore") assert_identical(data, actual) actual = data.drop_vars(["not_found_here"], errors="ignore") assert_identical(data, actual) actual = data.drop_vars(["time", "not_found_here"], errors="ignore") assert_identical(expected, actual) # deprecated approach with `drop` works (straight copy paste from above) with pytest.warns(DeprecationWarning): actual = data.drop("not_found_here", errors="ignore") assert_identical(data, actual) with pytest.warns(DeprecationWarning): actual = data.drop(["not_found_here"], errors="ignore") assert_identical(data, actual) with pytest.warns(DeprecationWarning): actual = data.drop(["time", "not_found_here"], errors="ignore") assert_identical(expected, actual) with pytest.warns(DeprecationWarning): actual = data.drop({"time", "not_found_here"}, errors="ignore") assert_identical(expected, actual) def test_drop_multiindex_level(self) -> None: data = create_test_multiindex() expected = data.drop_vars(["x", "level_1", "level_2"]) with pytest.warns(DeprecationWarning): actual = data.drop_vars("level_1") assert_identical(expected, actual) def test_drop_multiindex_labels(self) -> None: data = create_test_multiindex() mindex = pd.MultiIndex.from_tuples( [ ("a", 2), ("b", 1), ("b", 2), ], names=("level_1", "level_2"), ) expected = Dataset({}, Coordinates.from_pandas_multiindex(mindex, "x")) actual = data.drop_sel(x=("a", 1)) assert_identical(expected, actual) def test_drop_index_labels(self) -> None: data = Dataset({"A": (["x", "y"], np.random.randn(2, 3)), "x": ["a", "b"]}) with pytest.warns(DeprecationWarning): actual = data.drop(["a"], dim="x") expected = data.isel(x=[1]) assert_identical(expected, actual) with pytest.warns(DeprecationWarning): actual = data.drop(["a", "b"], dim="x") expected = data.isel(x=slice(0, 0)) assert_identical(expected, actual) with pytest.raises(KeyError): # not contained in axis with pytest.warns(DeprecationWarning): data.drop(["c"], dim="x") with pytest.warns(DeprecationWarning): actual = data.drop(["c"], dim="x", errors="ignore") assert_identical(data, actual) with pytest.raises(ValueError): data.drop(["c"], dim="x", errors="wrong_value") # type: ignore[arg-type] with pytest.warns(DeprecationWarning): actual = data.drop(["a", "b", "c"], "x", errors="ignore") expected = data.isel(x=slice(0, 0)) assert_identical(expected, actual) # DataArrays as labels are a nasty corner case as they are not # Iterable[Hashable] - DataArray.__iter__ yields scalar DataArrays. actual = data.drop_sel(x=DataArray(["a", "b", "c"]), errors="ignore") expected = data.isel(x=slice(0, 0)) assert_identical(expected, actual) with pytest.warns(DeprecationWarning): data.drop(DataArray(["a", "b", "c"]), dim="x", errors="ignore") assert_identical(expected, actual) actual = data.drop_sel(y=[1]) expected = data.isel(y=[0, 2]) assert_identical(expected, actual) with pytest.raises(KeyError, match=r"not found in axis"): data.drop_sel(x=0) def test_drop_labels_by_keyword(self) -> None: data = Dataset( {"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)} ) # Basic functionality. assert len(data.coords["x"]) == 2 with pytest.warns(DeprecationWarning): ds1 = data.drop(["a"], dim="x") ds2 = data.drop_sel(x="a") ds3 = data.drop_sel(x=["a"]) ds4 = data.drop_sel(x=["a", "b"]) ds5 = data.drop_sel(x=["a", "b"], y=range(0, 6, 2)) arr = DataArray(range(3), dims=["c"]) with pytest.warns(DeprecationWarning): data.drop(arr.coords) with pytest.warns(DeprecationWarning): data.drop(arr.xindexes) assert_array_equal(ds1.coords["x"], ["b"]) assert_array_equal(ds2.coords["x"], ["b"]) assert_array_equal(ds3.coords["x"], ["b"]) assert ds4.coords["x"].size == 0 assert ds5.coords["x"].size == 0 assert_array_equal(ds5.coords["y"], [1, 3, 5]) # Error handling if user tries both approaches. with pytest.raises(ValueError): data.drop(labels=["a"], x="a") with pytest.raises(ValueError): data.drop(labels=["a"], dim="x", x="a") warnings.filterwarnings("ignore", r"\W*drop") with pytest.raises(ValueError): data.drop(dim="x", x="a") def test_drop_labels_by_position(self) -> None: data = Dataset( {"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)} ) # Basic functionality. assert len(data.coords["x"]) == 2 actual = data.drop_isel(x=0) expected = data.drop_sel(x="a") assert_identical(expected, actual) actual = data.drop_isel(x=[0]) expected = data.drop_sel(x=["a"]) assert_identical(expected, actual) actual = data.drop_isel(x=[0, 1]) expected = data.drop_sel(x=["a", "b"]) assert_identical(expected, actual) assert actual.coords["x"].size == 0 actual = data.drop_isel(x=[0, 1], y=range(0, 6, 2)) expected = data.drop_sel(x=["a", "b"], y=range(0, 6, 2)) assert_identical(expected, actual) assert actual.coords["x"].size == 0 with pytest.raises(KeyError): data.drop_isel(z=1) def test_drop_indexes(self) -> None: ds = Dataset( coords={ "x": ("x", [0, 1, 2]), "y": ("y", [3, 4, 5]), "foo": ("x", ["a", "a", "b"]), } ) actual = ds.drop_indexes("x") assert "x" not in actual.xindexes assert type(actual.x.variable) is Variable actual = ds.drop_indexes(["x", "y"]) assert "x" not in actual.xindexes assert "y" not in actual.xindexes assert type(actual.x.variable) is Variable assert type(actual.y.variable) is Variable with pytest.raises( ValueError, match=r"The coordinates \('not_a_coord',\) are not found in the dataset coordinates", ): ds.drop_indexes("not_a_coord") with pytest.raises(ValueError, match="those coordinates do not have an index"): ds.drop_indexes("foo") actual = ds.drop_indexes(["foo", "not_a_coord"], errors="ignore") assert_identical(actual, ds) # test index corrupted midx = pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=["a", "b"]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") ds = Dataset(coords=midx_coords) with pytest.raises(ValueError, match=r".*would corrupt the following index.*"): ds.drop_indexes("a") def test_drop_dims(self) -> None: data = xr.Dataset( { "A": (["x", "y"], np.random.randn(2, 3)), "B": ("x", np.random.randn(2)), "x": ["a", "b"], "z": np.pi, } ) actual = data.drop_dims("x") expected = data.drop_vars(["A", "B", "x"]) assert_identical(expected, actual) actual = data.drop_dims("y") expected = data.drop_vars("A") assert_identical(expected, actual) actual = data.drop_dims(["x", "y"]) expected = data.drop_vars(["A", "B", "x"]) assert_identical(expected, actual) with pytest.raises((ValueError, KeyError)): data.drop_dims("z") # not a dimension with pytest.raises((ValueError, KeyError)): data.drop_dims(None) # type:ignore[arg-type] actual = data.drop_dims("z", errors="ignore") assert_identical(data, actual) # should this be allowed? actual = data.drop_dims(None, errors="ignore") # type:ignore[arg-type] assert_identical(data, actual) with pytest.raises(ValueError): actual = data.drop_dims("z", errors="wrong_value") # type: ignore[arg-type] actual = data.drop_dims(["x", "y", "z"], errors="ignore") expected = data.drop_vars(["A", "B", "x"]) assert_identical(expected, actual) def test_copy(self) -> None: data = create_test_data() data.attrs["Test"] = [1, 2, 3] for copied in [data.copy(deep=False), copy(data)]: assert_identical(data, copied) assert data.encoding == copied.encoding # Note: IndexVariable objects with string dtype are always # copied because of xarray.core.indexes.safe_cast_to_index. # Limiting the test to data variables. for k in data.data_vars: v0 = data.variables[k] v1 = copied.variables[k] assert source_ndarray(v0.data) is source_ndarray(v1.data) copied["foo"] = ("z", np.arange(5)) assert "foo" not in data copied.attrs["foo"] = "bar" assert "foo" not in data.attrs assert data.attrs["Test"] is copied.attrs["Test"] for copied in [data.copy(deep=True), deepcopy(data)]: assert_identical(data, copied) for k, v0 in data.variables.items(): v1 = copied.variables[k] assert v0 is not v1 assert data.attrs["Test"] is not copied.attrs["Test"] def test_copy_with_data(self) -> None: orig = create_test_data() new_data = {k: np.random.randn(*v.shape) for k, v in orig.data_vars.items()} actual = orig.copy(data=new_data) expected = orig.copy() for k, v in new_data.items(): expected[k].data = v assert_identical(expected, actual) @pytest.mark.xfail(raises=AssertionError) @pytest.mark.parametrize( "deep, expected_orig", [ [ True, xr.DataArray( xr.IndexVariable("a", np.array([1, 2])), coords={"a": [1, 2]}, dims=["a"], ), ], [ False, xr.DataArray( xr.IndexVariable("a", np.array([999, 2])), coords={"a": [999, 2]}, dims=["a"], ), ], ], ) def test_copy_coords(self, deep, expected_orig) -> None: """The test fails for the shallow copy, and apparently only on Windows for some reason. In windows coords seem to be immutable unless it's one dataset deep copied from another.""" ds = xr.DataArray( np.ones([2, 2, 2]), coords={"a": [1, 2], "b": ["x", "y"], "c": [0, 1]}, dims=["a", "b", "c"], name="value", ).to_dataset() ds_cp = ds.copy(deep=deep) new_a = np.array([999, 2]) ds_cp.coords["a"] = ds_cp.a.copy(data=new_a) expected_cp = xr.DataArray( xr.IndexVariable("a", new_a), coords={"a": [999, 2]}, dims=["a"], ) assert_identical(ds_cp.coords["a"], expected_cp) assert_identical(ds.coords["a"], expected_orig) def test_copy_with_data_errors(self) -> None: orig = create_test_data() new_var1 = np.arange(orig["var1"].size).reshape(orig["var1"].shape) with pytest.raises(ValueError, match=r"Data must be dict-like"): orig.copy(data=new_var1) # type: ignore[arg-type] with pytest.raises(ValueError, match=r"only contain variables in original"): orig.copy(data={"not_in_original": new_var1}) with pytest.raises(ValueError, match=r"contain all variables in original"): orig.copy(data={"var1": new_var1}) def test_drop_encoding(self) -> None: orig = create_test_data() vencoding = {"scale_factor": 10} orig.encoding = {"foo": "bar"} for k in orig.variables.keys(): orig[k].encoding = vencoding actual = orig.drop_encoding() assert actual.encoding == {} for v in actual.variables.values(): assert v.encoding == {} assert_equal(actual, orig) def test_rename(self) -> None: data = create_test_data() newnames = { "var1": "renamed_var1", "dim2": "renamed_dim2", } renamed = data.rename(newnames) variables = dict(data.variables) for nk, nv in newnames.items(): variables[nv] = variables.pop(nk) for k, v in variables.items(): dims = list(v.dims) for name, newname in newnames.items(): if name in dims: dims[dims.index(name)] = newname assert_equal( Variable(dims, v.values, v.attrs), renamed[k].variable.to_base_variable(), ) assert v.encoding == renamed[k].encoding assert type(v) is type(renamed.variables[k]) assert "var1" not in renamed assert "dim2" not in renamed with pytest.raises(ValueError, match=r"cannot rename 'not_a_var'"): data.rename({"not_a_var": "nada"}) with pytest.raises(ValueError, match=r"'var1' conflicts"): data.rename({"var2": "var1"}) # verify that we can rename a variable without accessing the data var1 = data["var1"] data["var1"] = (var1.dims, InaccessibleArray(var1.values)) renamed = data.rename(newnames) with pytest.raises(UnexpectedDataAccess): _ = renamed["renamed_var1"].values # https://github.com/python/mypy/issues/10008 renamed_kwargs = data.rename(**newnames) # type: ignore[arg-type] assert_identical(renamed, renamed_kwargs) def test_rename_old_name(self) -> None: # regtest for GH1477 data = create_test_data() with pytest.raises(ValueError, match=r"'samecol' conflicts"): data.rename({"var1": "samecol", "var2": "samecol"}) # This shouldn't cause any problems. data.rename({"var1": "var2", "var2": "var1"}) def test_rename_same_name(self) -> None: data = create_test_data() newnames = {"var1": "var1", "dim2": "dim2"} renamed = data.rename(newnames) assert_identical(renamed, data) def test_rename_dims(self) -> None: original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42}) expected = Dataset( {"x": ("x_new", [0, 1, 2]), "y": ("x_new", [10, 11, 12]), "z": 42} ) # TODO: (benbovy - explicit indexes) update when set_index supports # setting index for non-dimension variables expected = expected.set_coords("x") actual = original.rename_dims({"x": "x_new"}) assert_identical(expected, actual, check_default_indexes=False) actual_2 = original.rename_dims(x="x_new") assert_identical(expected, actual_2, check_default_indexes=False) # Test to raise ValueError dims_dict_bad = {"x_bad": "x_new"} with pytest.raises(ValueError): original.rename_dims(dims_dict_bad) with pytest.raises(ValueError): original.rename_dims({"x": "z"}) def test_rename_vars(self) -> None: original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42}) expected = Dataset( {"x_new": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42} ) # TODO: (benbovy - explicit indexes) update when set_index supports # setting index for non-dimension variables expected = expected.set_coords("x_new") actual = original.rename_vars({"x": "x_new"}) assert_identical(expected, actual, check_default_indexes=False) actual_2 = original.rename_vars(x="x_new") assert_identical(expected, actual_2, check_default_indexes=False) # Test to raise ValueError names_dict_bad = {"x_bad": "x_new"} with pytest.raises(ValueError): original.rename_vars(names_dict_bad) def test_rename_dimension_coord(self) -> None: # rename a dimension corodinate to a non-dimension coordinate # should preserve index original = Dataset(coords={"x": ("x", [0, 1, 2])}) actual = original.rename_vars({"x": "x_new"}) assert "x_new" in actual.xindexes actual_2 = original.rename_dims({"x": "x_new"}) assert "x" in actual_2.xindexes def test_rename_dimension_coord_warnings(self) -> None: # create a dimension coordinate by renaming a dimension or coordinate # should raise a warning (no index created) ds = Dataset(coords={"x": ("y", [0, 1])}) with pytest.warns( UserWarning, match=r"rename 'x' to 'y' does not create an index.*" ): ds.rename(x="y") ds = Dataset(coords={"y": ("x", [0, 1])}) with pytest.warns( UserWarning, match=r"rename 'x' to 'y' does not create an index.*" ): ds.rename(x="y") # No operation should not raise a warning ds = Dataset( data_vars={"data": (("x", "y"), np.ones((2, 3)))}, coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])}, ) with warnings.catch_warnings(): warnings.simplefilter("error") ds.rename(x="x") def test_rename_multiindex(self) -> None: midx = pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=["a", "b"]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") original = Dataset({}, midx_coords) midx_renamed = midx.rename(["a", "c"]) midx_coords_renamed = Coordinates.from_pandas_multiindex(midx_renamed, "x") expected = Dataset({}, midx_coords_renamed) actual = original.rename({"b": "c"}) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"'a' conflicts"): with pytest.warns(UserWarning, match="does not create an index anymore"): original.rename({"x": "a"}) with pytest.raises(ValueError, match=r"'x' conflicts"): with pytest.warns(UserWarning, match="does not create an index anymore"): original.rename({"a": "x"}) with pytest.raises(ValueError, match=r"'b' conflicts"): original.rename({"a": "b"}) def test_rename_preserve_attrs_encoding(self) -> None: # test propagate attrs/encoding to new variable(s) created from Index object original = Dataset(coords={"x": ("x", [0, 1, 2])}) expected = Dataset(coords={"y": ("y", [0, 1, 2])}) for ds, dim in zip([original, expected], ["x", "y"], strict=True): ds[dim].attrs = {"foo": "bar"} ds[dim].encoding = {"foo": "bar"} actual = original.rename({"x": "y"}) assert_identical(actual, expected) @requires_cftime def test_rename_does_not_change_CFTimeIndex_type(self) -> None: # make sure CFTimeIndex is not converted to DatetimeIndex #3522 time = xr.date_range( start="2000", periods=6, freq="2MS", calendar="noleap", use_cftime=True ) orig = Dataset(coords={"time": time}) renamed = orig.rename(time="time_new") assert "time_new" in renamed.xindexes # TODO: benbovy - flexible indexes: update when CFTimeIndex # inherits from xarray.Index assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), CFTimeIndex) assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new" # check original has not changed assert "time" in orig.xindexes assert isinstance(orig.xindexes["time"].to_pandas_index(), CFTimeIndex) assert orig.xindexes["time"].to_pandas_index().name == "time" # note: rename_dims(time="time_new") drops "ds.indexes" renamed = orig.rename_dims() assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex) renamed = orig.rename_vars() assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex) def test_rename_does_not_change_DatetimeIndex_type(self) -> None: # make sure DatetimeIndex is conderved on rename time = pd.date_range(start="2000", periods=6, freq="2MS") orig = Dataset(coords={"time": time}) renamed = orig.rename(time="time_new") assert "time_new" in renamed.xindexes # TODO: benbovy - flexible indexes: update when DatetimeIndex # inherits from xarray.Index? assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), DatetimeIndex) assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new" # check original has not changed assert "time" in orig.xindexes assert isinstance(orig.xindexes["time"].to_pandas_index(), DatetimeIndex) assert orig.xindexes["time"].to_pandas_index().name == "time" # note: rename_dims(time="time_new") drops "ds.indexes" renamed = orig.rename_dims() assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex) renamed = orig.rename_vars() assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex) def test_swap_dims(self) -> None: original = Dataset({"x": [1, 2, 3], "y": ("x", list("abc")), "z": 42}) expected = Dataset({"z": 42}, {"x": ("y", [1, 2, 3]), "y": list("abc")}) actual = original.swap_dims({"x": "y"}) assert_identical(expected, actual) assert isinstance(actual.variables["y"], IndexVariable) assert isinstance(actual.variables["x"], Variable) assert actual.xindexes["y"].equals(expected.xindexes["y"]) roundtripped = actual.swap_dims({"y": "x"}) assert_identical(original.set_coords("y"), roundtripped) with pytest.raises(ValueError, match=r"cannot swap"): original.swap_dims({"y": "x"}) with pytest.raises(ValueError, match=r"replacement dimension"): original.swap_dims({"x": "z"}) expected = Dataset( {"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])} ) actual = original.swap_dims({"x": "u"}) assert_identical(expected, actual) # as kwargs expected = Dataset( {"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])} ) actual = original.swap_dims(x="u") assert_identical(expected, actual) # handle multiindex case midx = pd.MultiIndex.from_arrays([list("aab"), list("yzz")], names=["y1", "y2"]) original = Dataset({"x": [1, 2, 3], "y": ("x", midx), "z": 42}) midx_coords = Coordinates.from_pandas_multiindex(midx, "y") midx_coords["x"] = ("y", [1, 2, 3]) expected = Dataset({"z": 42}, midx_coords) actual = original.swap_dims({"x": "y"}) assert_identical(expected, actual) assert isinstance(actual.variables["y"], IndexVariable) assert isinstance(actual.variables["x"], Variable) assert actual.xindexes["y"].equals(expected.xindexes["y"]) def test_expand_dims_error(self) -> None: original = Dataset( { "x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3)), "z": ("a", np.random.randn(3)), }, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) with pytest.raises(ValueError, match=r"already exists"): original.expand_dims(dim=["x"]) # Make sure it raises true error also for non-dimensional coordinates # which has dimension. original = original.set_coords("z") with pytest.raises(ValueError, match=r"already exists"): original.expand_dims(dim=["z"]) original = Dataset( { "x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3)), "z": ("a", np.random.randn(3)), }, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) with pytest.raises(TypeError, match=r"value of new dimension"): original.expand_dims({"d": 3.2}) with pytest.raises(ValueError, match=r"both keyword and positional"): original.expand_dims({"d": 4}, e=4) def test_expand_dims_int(self) -> None: original = Dataset( {"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))}, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) actual = original.expand_dims(["z"], [1]) expected = Dataset( { "x": original["x"].expand_dims("z", 1), "y": original["y"].expand_dims("z", 1), }, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) assert_identical(expected, actual) # make sure squeeze restores the original data set. roundtripped = actual.squeeze("z") assert_identical(original, roundtripped) # another test with a negative axis actual = original.expand_dims(["z"], [-1]) expected = Dataset( { "x": original["x"].expand_dims("z", -1), "y": original["y"].expand_dims("z", -1), }, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) assert_identical(expected, actual) # make sure squeeze restores the original data set. roundtripped = actual.squeeze("z") assert_identical(original, roundtripped) def test_expand_dims_coords(self) -> None: original = Dataset({"x": ("a", np.array([1, 2, 3]))}) expected = Dataset( {"x": (("b", "a"), np.array([[1, 2, 3], [1, 2, 3]]))}, coords={"b": [1, 2]} ) actual = original.expand_dims(dict(b=[1, 2])) assert_identical(expected, actual) assert "b" not in original._coord_names def test_expand_dims_existing_scalar_coord(self) -> None: original = Dataset({"x": 1}, {"a": 2}) expected = Dataset({"x": (("a",), [1])}, {"a": [2]}) actual = original.expand_dims("a") assert_identical(expected, actual) def test_isel_expand_dims_roundtrip(self) -> None: original = Dataset({"x": (("a",), [1])}, {"a": [2]}) actual = original.isel(a=0).expand_dims("a") assert_identical(actual, original) def test_expand_dims_mixed_int_and_coords(self) -> None: # Test expanding one dimension to have size > 1 that doesn't have # coordinates, and also expanding another dimension to have size > 1 # that DOES have coordinates. original = Dataset( {"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))}, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, ) actual = original.expand_dims({"d": 4, "e": ["l", "m", "n"]}) expected = Dataset( { "x": xr.DataArray( original["x"].values * np.ones([4, 3, 3]), coords=dict(d=range(4), e=["l", "m", "n"], a=np.linspace(0, 1, 3)), dims=["d", "e", "a"], ).drop_vars("d"), "y": xr.DataArray( original["y"].values * np.ones([4, 3, 4, 3]), coords=dict( d=range(4), e=["l", "m", "n"], b=np.linspace(0, 1, 4), a=np.linspace(0, 1, 3), ), dims=["d", "e", "b", "a"], ).drop_vars("d"), }, coords={"c": np.linspace(0, 1, 5)}, ) assert_identical(actual, expected) def test_expand_dims_kwargs_python36plus(self) -> None: original = Dataset( {"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))}, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) other_way = original.expand_dims(e=["l", "m", "n"]) other_way_expected = Dataset( { "x": xr.DataArray( original["x"].values * np.ones([3, 3]), coords=dict(e=["l", "m", "n"], a=np.linspace(0, 1, 3)), dims=["e", "a"], ), "y": xr.DataArray( original["y"].values * np.ones([3, 4, 3]), coords=dict( e=["l", "m", "n"], b=np.linspace(0, 1, 4), a=np.linspace(0, 1, 3), ), dims=["e", "b", "a"], ), }, coords={"c": np.linspace(0, 1, 5)}, attrs={"key": "entry"}, ) assert_identical(other_way_expected, other_way) @pytest.mark.parametrize("create_index_for_new_dim_flag", [True, False]) def test_expand_dims_create_index_data_variable( self, create_index_for_new_dim_flag ): # data variables should not gain an index ever ds = Dataset({"x": 0}) if create_index_for_new_dim_flag: with pytest.warns(UserWarning, match="No index created"): expanded = ds.expand_dims( "x", create_index_for_new_dim=create_index_for_new_dim_flag ) else: expanded = ds.expand_dims( "x", create_index_for_new_dim=create_index_for_new_dim_flag ) # TODO Can't just create the expected dataset directly using constructor because of GH issue 8959 expected = Dataset({"x": ("x", [0])}).drop_indexes("x").reset_coords("x") assert_identical(expanded, expected, check_default_indexes=False) assert expanded.indexes == {} def test_expand_dims_create_index_coordinate_variable(self): # coordinate variables should gain an index only if create_index_for_new_dim is True (the default) ds = Dataset(coords={"x": 0}) expanded = ds.expand_dims("x") expected = Dataset({"x": ("x", [0])}) assert_identical(expanded, expected) expanded_no_index = ds.expand_dims("x", create_index_for_new_dim=False) # TODO Can't just create the expected dataset directly using constructor because of GH issue 8959 expected = Dataset(coords={"x": ("x", [0])}).drop_indexes("x") assert_identical(expanded_no_index, expected, check_default_indexes=False) assert expanded_no_index.indexes == {} def test_expand_dims_create_index_from_iterable(self): ds = Dataset(coords={"x": 0}) expanded = ds.expand_dims(x=[0, 1]) expected = Dataset({"x": ("x", [0, 1])}) assert_identical(expanded, expected) expanded_no_index = ds.expand_dims(x=[0, 1], create_index_for_new_dim=False) # TODO Can't just create the expected dataset directly using constructor because of GH issue 8959 expected = Dataset(coords={"x": ("x", [0, 1])}).drop_indexes("x") assert_identical(expanded, expected, check_default_indexes=False) assert expanded_no_index.indexes == {} def test_expand_dims_non_nanosecond_conversion(self) -> None: # Regression test for https://github.com/pydata/xarray/issues/7493#issuecomment-1953091000 # todo: test still needed? ds = Dataset().expand_dims({"time": [np.datetime64("2018-01-01", "m")]}) assert ds.time.dtype == np.dtype("datetime64[s]") def test_set_index(self) -> None: expected = create_test_multiindex() mindex = expected["x"].to_index() indexes = [mindex.get_level_values(str(n)) for n in mindex.names] coords = {idx.name: ("x", idx) for idx in indexes} ds = Dataset({}, coords=coords) obj = ds.set_index(x=mindex.names) assert_identical(obj, expected) # ensure pre-existing indexes involved are removed # (level_2 should be a coordinate with no index) ds = create_test_multiindex() coords = {"x": coords["level_1"], "level_2": coords["level_2"]} expected = Dataset({}, coords=coords) obj = ds.set_index(x="level_1") assert_identical(obj, expected) # ensure set_index with no existing index and a single data var given # doesn't return multi-index ds = Dataset(data_vars={"x_var": ("x", [0, 1, 2])}) expected = Dataset(coords={"x": [0, 1, 2]}) assert_identical(ds.set_index(x="x_var"), expected) with pytest.raises(ValueError, match=r"bar variable\(s\) do not exist"): ds.set_index(foo="bar") with pytest.raises(ValueError, match=r"dimension mismatch.*"): ds.set_index(y="x_var") ds = Dataset(coords={"x": 1}) with pytest.raises( ValueError, match=r".*cannot set a PandasIndex.*scalar variable.*" ): ds.set_index(x="x") def test_set_index_deindexed_coords(self) -> None: # test de-indexed coordinates are converted to base variable # https://github.com/pydata/xarray/issues/6969 one = ["a", "a", "b", "b"] two = [1, 2, 1, 2] three = ["c", "c", "d", "d"] four = [3, 4, 3, 4] midx_12 = pd.MultiIndex.from_arrays([one, two], names=["one", "two"]) midx_34 = pd.MultiIndex.from_arrays([three, four], names=["three", "four"]) coords = Coordinates.from_pandas_multiindex(midx_12, "x") coords["three"] = ("x", three) coords["four"] = ("x", four) ds = xr.Dataset(coords=coords) actual = ds.set_index(x=["three", "four"]) coords_expected = Coordinates.from_pandas_multiindex(midx_34, "x") coords_expected["one"] = ("x", one) coords_expected["two"] = ("x", two) expected = xr.Dataset(coords=coords_expected) assert_identical(actual, expected) def test_reset_index(self) -> None: ds = create_test_multiindex() mindex = ds["x"].to_index() indexes = [mindex.get_level_values(str(n)) for n in mindex.names] coords = {idx.name: ("x", idx) for idx in indexes} expected = Dataset({}, coords=coords) obj = ds.reset_index("x") assert_identical(obj, expected, check_default_indexes=False) assert len(obj.xindexes) == 0 ds = Dataset(coords={"y": ("x", [1, 2, 3])}) with pytest.raises(ValueError, match=r".*not coordinates with an index"): ds.reset_index("y") def test_reset_index_keep_attrs(self) -> None: coord_1 = DataArray([1, 2], dims=["coord_1"], attrs={"attrs": True}) ds = Dataset({}, {"coord_1": coord_1}) obj = ds.reset_index("coord_1") assert ds.coord_1.attrs == obj.coord_1.attrs assert len(obj.xindexes) == 0 def test_reset_index_drop_dims(self) -> None: ds = Dataset(coords={"x": [1, 2]}) reset = ds.reset_index("x", drop=True) assert len(reset.dims) == 0 @pytest.mark.parametrize( ["arg", "drop", "dropped", "converted", "renamed"], [ ("foo", False, [], [], {"bar": "x"}), ("foo", True, ["foo"], [], {"bar": "x"}), ("x", False, ["x"], ["foo", "bar"], {}), ("x", True, ["x", "foo", "bar"], [], {}), (["foo", "bar"], False, ["x"], ["foo", "bar"], {}), (["foo", "bar"], True, ["x", "foo", "bar"], [], {}), (["x", "foo"], False, ["x"], ["foo", "bar"], {}), (["foo", "x"], True, ["x", "foo", "bar"], [], {}), ], ) def test_reset_index_drop_convert( self, arg: str | list[str], drop: bool, dropped: list[str], converted: list[str], renamed: dict[str, str], ) -> None: # regressions https://github.com/pydata/xarray/issues/6946 and # https://github.com/pydata/xarray/issues/6989 # check that multi-index dimension or level coordinates are dropped, converted # from IndexVariable to Variable or renamed to dimension as expected midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("foo", "bar")) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") ds = xr.Dataset(coords=midx_coords) reset = ds.reset_index(arg, drop=drop) for name in dropped: assert name not in reset.variables for name in converted: assert_identical(reset[name].variable, ds[name].variable.to_base_variable()) for old_name, new_name in renamed.items(): assert_identical(ds[old_name].variable, reset[new_name].variable) def test_reorder_levels(self) -> None: ds = create_test_multiindex() mindex = ds["x"].to_index() assert isinstance(mindex, pd.MultiIndex) midx = mindex.reorder_levels(["level_2", "level_1"]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") expected = Dataset({}, coords=midx_coords) # check attrs propagated ds["level_1"].attrs["foo"] = "bar" expected["level_1"].attrs["foo"] = "bar" reindexed = ds.reorder_levels(x=["level_2", "level_1"]) assert_identical(reindexed, expected) ds = Dataset({}, coords={"x": [1, 2]}) with pytest.raises(ValueError, match=r"has no MultiIndex"): ds.reorder_levels(x=["level_1", "level_2"]) def test_set_xindex(self) -> None: ds = Dataset( coords={"foo": ("x", ["a", "a", "b", "b"]), "bar": ("x", [0, 1, 2, 3])} ) actual = ds.set_xindex("foo") expected = ds.set_index(x="foo").rename_vars(x="foo") assert_identical(actual, expected, check_default_indexes=False) actual_mindex = ds.set_xindex(["foo", "bar"]) expected_mindex = ds.set_index(x=["foo", "bar"]) assert_identical(actual_mindex, expected_mindex) class NotAnIndex: ... with pytest.raises(TypeError, match=r".*not a subclass of xarray.Index"): ds.set_xindex("foo", NotAnIndex) # type: ignore[arg-type] with pytest.raises(ValueError, match="those variables don't exist"): ds.set_xindex("not_a_coordinate", PandasIndex) ds["data_var"] = ("x", [1, 2, 3, 4]) with pytest.raises(ValueError, match="those variables are data variables"): ds.set_xindex("data_var", PandasIndex) ds2 = Dataset(coords={"x": ("x", [0, 1, 2, 3])}) with pytest.raises(ValueError, match="those coordinates already have an index"): ds2.set_xindex("x", PandasIndex) def test_set_xindex_options(self) -> None: ds = Dataset(coords={"foo": ("x", ["a", "a", "b", "b"])}) class IndexWithOptions(Index): def __init__(self, opt): self.opt = opt @classmethod def from_variables(cls, variables, options): return cls(options["opt"]) indexed = ds.set_xindex("foo", IndexWithOptions, opt=1) assert indexed.xindexes["foo"].opt == 1 # type: ignore[attr-defined] def test_stack(self) -> None: ds = Dataset( data_vars={"b": (("x", "y"), [[0, 1], [2, 3]])}, coords={"x": ("x", [0, 1]), "y": ["a", "b"]}, ) midx_expected = pd.MultiIndex.from_product( [[0, 1], ["a", "b"]], names=["x", "y"] ) midx_coords_expected = Coordinates.from_pandas_multiindex(midx_expected, "z") expected = Dataset( data_vars={"b": ("z", [0, 1, 2, 3])}, coords=midx_coords_expected ) # check attrs propagated ds["x"].attrs["foo"] = "bar" expected["x"].attrs["foo"] = "bar" actual = ds.stack(z=["x", "y"]) assert_identical(expected, actual) assert list(actual.xindexes) == ["z", "x", "y"] actual = ds.stack(z=[...]) assert_identical(expected, actual) # non list dims with ellipsis actual = ds.stack(z=(...,)) assert_identical(expected, actual) # ellipsis with given dim actual = ds.stack(z=[..., "y"]) assert_identical(expected, actual) midx_expected = pd.MultiIndex.from_product( [["a", "b"], [0, 1]], names=["y", "x"] ) midx_coords_expected = Coordinates.from_pandas_multiindex(midx_expected, "z") expected = Dataset( data_vars={"b": ("z", [0, 2, 1, 3])}, coords=midx_coords_expected ) expected["x"].attrs["foo"] = "bar" actual = ds.stack(z=["y", "x"]) assert_identical(expected, actual) assert list(actual.xindexes) == ["z", "y", "x"] @pytest.mark.parametrize( "create_index,expected_keys", [ (True, ["z", "x", "y"]), (False, []), (None, ["z", "x", "y"]), ], ) def test_stack_create_index(self, create_index, expected_keys) -> None: ds = Dataset( data_vars={"b": (("x", "y"), [[0, 1], [2, 3]])}, coords={"x": ("x", [0, 1]), "y": ["a", "b"]}, ) actual = ds.stack(z=["x", "y"], create_index=create_index) assert list(actual.xindexes) == expected_keys # TODO: benbovy (flexible indexes) - test error multiple indexes found # along dimension + create_index=True def test_stack_multi_index(self) -> None: # multi-index on a dimension to stack is discarded too midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=("lvl1", "lvl2")) coords = Coordinates.from_pandas_multiindex(midx, "x") coords["y"] = [0, 1] ds = xr.Dataset( data_vars={"b": (("x", "y"), [[0, 1], [2, 3], [4, 5], [6, 7]])}, coords=coords, ) expected = Dataset( data_vars={"b": ("z", [0, 1, 2, 3, 4, 5, 6, 7])}, coords={ "x": ("z", np.repeat(midx.values, 2)), "lvl1": ("z", np.repeat(midx.get_level_values("lvl1"), 2)), "lvl2": ("z", np.repeat(midx.get_level_values("lvl2"), 2)), "y": ("z", [0, 1, 0, 1] * 2), }, ) actual = ds.stack(z=["x", "y"], create_index=False) assert_identical(expected, actual) assert len(actual.xindexes) == 0 with pytest.raises(ValueError, match=r"cannot create.*wraps a multi-index"): ds.stack(z=["x", "y"], create_index=True) def test_stack_non_dim_coords(self) -> None: ds = Dataset( data_vars={"b": (("x", "y"), [[0, 1], [2, 3]])}, coords={"x": ("x", [0, 1]), "y": ["a", "b"]}, ).rename_vars(x="xx") exp_index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["xx", "y"]) exp_coords = Coordinates.from_pandas_multiindex(exp_index, "z") expected = Dataset(data_vars={"b": ("z", [0, 1, 2, 3])}, coords=exp_coords) actual = ds.stack(z=["x", "y"]) assert_identical(expected, actual) assert list(actual.xindexes) == ["z", "xx", "y"] def test_unstack(self) -> None: index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["x", "y"]) coords = Coordinates.from_pandas_multiindex(index, "z") ds = Dataset(data_vars={"b": ("z", [0, 1, 2, 3])}, coords=coords) expected = Dataset( {"b": (("x", "y"), [[0, 1], [2, 3]]), "x": [0, 1], "y": ["a", "b"]} ) # check attrs propagated ds["x"].attrs["foo"] = "bar" expected["x"].attrs["foo"] = "bar" for dim in ["z", ["z"], None]: actual = ds.unstack(dim) assert_identical(actual, expected) def test_unstack_errors(self) -> None: ds = Dataset({"x": [1, 2, 3]}) with pytest.raises( ValueError, match=re.escape("Dimensions ('foo',) not found in data dimensions ('x',)"), ): ds.unstack("foo") with pytest.raises(ValueError, match=r".*do not have exactly one multi-index"): ds.unstack("x") ds = Dataset({"da": [1, 2]}, coords={"y": ("x", [1, 1]), "z": ("x", [0, 0])}) ds = ds.set_index(x=("y", "z")) with pytest.raises( ValueError, match="Cannot unstack MultiIndex containing duplicates" ): ds.unstack("x") def test_unstack_fill_value(self) -> None: ds = xr.Dataset( {"var": (("x",), np.arange(6)), "other_var": (("x",), np.arange(3, 9))}, coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)}, ) # make ds incomplete ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"]) # test fill_value actual1 = ds.unstack("index", fill_value=-1) expected1 = ds.unstack("index").fillna(-1).astype(int) assert actual1["var"].dtype == int assert_equal(actual1, expected1) actual2 = ds["var"].unstack("index", fill_value=-1) expected2 = ds["var"].unstack("index").fillna(-1).astype(int) assert_equal(actual2, expected2) actual3 = ds.unstack("index", fill_value={"var": -1, "other_var": 1}) expected3 = ds.unstack("index").fillna({"var": -1, "other_var": 1}).astype(int) assert_equal(actual3, expected3) actual4 = ds.unstack("index", fill_value={"var": -1}) expected4 = ds.unstack("index").fillna({"var": -1, "other_var": np.nan}) assert_equal(actual4, expected4) @requires_sparse def test_unstack_sparse(self) -> None: ds = xr.Dataset( {"var": (("x",), np.arange(6))}, coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)}, ) # make ds incomplete ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"]) # test fill_value actual1 = ds.unstack("index", sparse=True) expected1 = ds.unstack("index") assert isinstance(actual1["var"].data, sparse_array_type) assert actual1["var"].variable._to_dense().equals(expected1["var"].variable) assert actual1["var"].data.density < 1.0 actual2 = ds["var"].unstack("index", sparse=True) expected2 = ds["var"].unstack("index") assert isinstance(actual2.data, sparse_array_type) assert actual2.variable._to_dense().equals(expected2.variable) assert actual2.data.density < 1.0 midx = pd.MultiIndex.from_arrays([np.arange(3), np.arange(3)], names=["a", "b"]) coords = Coordinates.from_pandas_multiindex(midx, "z") coords["foo"] = np.arange(4) coords["bar"] = np.arange(5) ds_eye = Dataset( {"var": (("z", "foo", "bar"), np.ones((3, 4, 5)))}, coords=coords ) actual3 = ds_eye.unstack(sparse=True, fill_value=0) assert isinstance(actual3["var"].data, sparse_array_type) expected3 = xr.Dataset( { "var": ( ("foo", "bar", "a", "b"), np.broadcast_to(np.eye(3, 3), (4, 5, 3, 3)), ) }, coords={ "foo": np.arange(4), "bar": np.arange(5), "a": np.arange(3), "b": np.arange(3), }, ) actual3["var"].data = actual3["var"].data.todense() assert_equal(expected3, actual3) def test_stack_unstack_fast(self) -> None: ds = Dataset( { "a": ("x", [0, 1]), "b": (("x", "y"), [[0, 1], [2, 3]]), "x": [0, 1], "y": ["a", "b"], } ) actual = ds.stack(z=["x", "y"]).unstack("z") assert actual.broadcast_equals(ds) actual = ds[["b"]].stack(z=["x", "y"]).unstack("z") assert actual.identical(ds[["b"]]) def test_stack_unstack_slow(self) -> None: ds = Dataset( data_vars={ "a": ("x", [0, 1]), "b": (("x", "y"), [[0, 1], [2, 3]]), }, coords={"x": [0, 1], "y": ["a", "b"]}, ) stacked = ds.stack(z=["x", "y"]) actual = stacked.isel(z=slice(None, None, -1)).unstack("z") assert actual.broadcast_equals(ds) stacked = ds[["b"]].stack(z=["x", "y"]) actual = stacked.isel(z=slice(None, None, -1)).unstack("z") assert actual.identical(ds[["b"]]) def test_to_stacked_array_invalid_sample_dims(self) -> None: data = xr.Dataset( data_vars={"a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7])}, coords={"y": ["u", "v", "w"]}, ) with pytest.raises( ValueError, match=r"Variables in the dataset must contain all ``sample_dims`` \(\['y'\]\) but 'b' misses \['y'\]", ): data.to_stacked_array("features", sample_dims=["y"]) def test_to_stacked_array_name(self) -> None: name = "adf9d" # make a two dimensional dataset a, b = create_test_stacked_array() D = xr.Dataset({"a": a, "b": b}) sample_dims = ["x"] y = D.to_stacked_array("features", sample_dims, name=name) assert y.name == name def test_to_stacked_array_dtype_dims(self) -> None: # make a two dimensional dataset a, b = create_test_stacked_array() D = xr.Dataset({"a": a, "b": b}) sample_dims = ["x"] y = D.to_stacked_array("features", sample_dims) mindex = y.xindexes["features"].to_pandas_index() assert isinstance(mindex, pd.MultiIndex) assert mindex.levels[1].dtype == D.y.dtype assert y.dims == ("x", "features") def test_to_stacked_array_to_unstacked_dataset(self) -> None: # single dimension: regression test for GH4049 arr = xr.DataArray(np.arange(3), coords=[("x", [0, 1, 2])]) data = xr.Dataset({"a": arr, "b": arr}) stacked = data.to_stacked_array("y", sample_dims=["x"]) unstacked = stacked.to_unstacked_dataset("y") assert_identical(unstacked, data) # make a two dimensional dataset a, b = create_test_stacked_array() D = xr.Dataset({"a": a, "b": b}) sample_dims = ["x"] y = D.to_stacked_array("features", sample_dims).transpose("x", "features") x = y.to_unstacked_dataset("features") assert_identical(D, x) # test on just one sample x0 = y[0].to_unstacked_dataset("features") d0 = D.isel(x=0) assert_identical(d0, x0) def test_to_stacked_array_to_unstacked_dataset_different_dimension(self) -> None: # test when variables have different dimensionality a, b = create_test_stacked_array() sample_dims = ["x"] D = xr.Dataset({"a": a, "b": b.isel(y=0)}) y = D.to_stacked_array("features", sample_dims) x = y.to_unstacked_dataset("features") assert_identical(D, x) def test_to_stacked_array_preserves_dtype(self) -> None: # regression test for bug found in https://github.com/pydata/xarray/pull/8872#issuecomment-2081218616 ds = xr.Dataset( data_vars={ "a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7]), }, coords={"y": ["u", "v", "w"]}, ) stacked = ds.to_stacked_array("z", sample_dims=["x"]) # coordinate created from variables names should be of string dtype data = np.array(["a", "a", "a", "b"], dtype=" None: # test that to_stacked_array uses updated dim order after transposition ds = xr.Dataset( data_vars=dict( v1=(["d1", "d2"], np.arange(6).reshape((2, 3))), ), coords=dict( d1=(["d1"], np.arange(2)), d2=(["d2"], np.arange(3)), ), ) da = ds.to_stacked_array( new_dim="new_dim", sample_dims=[], variable_dim="variable", ) dsT = ds.transpose() daT = dsT.to_stacked_array( new_dim="new_dim", sample_dims=[], variable_dim="variable", ) v1 = np.arange(6) v1T = np.arange(6).reshape((2, 3)).T.flatten() np.testing.assert_equal(da.to_numpy(), v1) np.testing.assert_equal(daT.to_numpy(), v1T) def test_update(self) -> None: data = create_test_data(seed=0) expected = data.copy() var2 = Variable("dim1", np.arange(8)) actual = data actual.update({"var2": var2}) expected["var2"] = var2 assert_identical(expected, actual) actual = data.copy() actual.update(data) assert_identical(expected, actual) other = Dataset(attrs={"new": "attr"}) actual = data.copy() actual.update(other) assert_identical(expected, actual) def test_update_overwrite_coords(self) -> None: data = Dataset({"a": ("x", [1, 2])}, {"b": 3}) data.update(Dataset(coords={"b": 4})) expected = Dataset({"a": ("x", [1, 2])}, {"b": 4}) assert_identical(data, expected) data = Dataset({"a": ("x", [1, 2])}, {"b": 3}) data.update(Dataset({"c": 5}, coords={"b": 4})) expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 4}) assert_identical(data, expected) data = Dataset({"a": ("x", [1, 2])}, {"b": 3}) data.update({"c": DataArray(5, coords={"b": 4})}) expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 3}) assert_identical(data, expected) def test_update_multiindex_level(self) -> None: data = create_test_multiindex() with pytest.raises( ValueError, match=r"cannot set or update variable.*corrupt.*index " ): data.update({"level_1": range(4)}) def test_update_auto_align(self) -> None: ds = Dataset({"x": ("t", [3, 4])}, {"t": [0, 1]}) expected1 = Dataset( {"x": ("t", [3, 4]), "y": ("t", [np.nan, 5])}, {"t": [0, 1]} ) actual1 = ds.copy() other1 = {"y": ("t", [5]), "t": [1]} with pytest.raises(ValueError, match=r"conflicting sizes"): actual1.update(other1) actual1.update(Dataset(other1)) assert_identical(expected1, actual1) actual2 = ds.copy() other2 = Dataset({"y": ("t", [5]), "t": [100]}) actual2.update(other2) expected2 = Dataset( {"x": ("t", [3, 4]), "y": ("t", [np.nan] * 2)}, {"t": [0, 1]} ) assert_identical(expected2, actual2) def test_getitem(self) -> None: data = create_test_data() assert isinstance(data["var1"], DataArray) assert_equal(data["var1"].variable, data.variables["var1"]) with pytest.raises(KeyError): data["notfound"] with pytest.raises(KeyError): data[["var1", "notfound"]] with pytest.raises( KeyError, match=r"Hint: use a list to select multiple variables, for example `ds\[\['var1', 'var2'\]\]`", ): data["var1", "var2"] actual1 = data[["var1", "var2"]] expected1 = Dataset({"var1": data["var1"], "var2": data["var2"]}) assert_equal(expected1, actual1) actual2 = data["numbers"] expected2 = DataArray( data["numbers"].variable, {"dim3": data["dim3"], "numbers": data["numbers"]}, dims="dim3", name="numbers", ) assert_identical(expected2, actual2) actual3 = data[dict(dim1=0)] expected3 = data.isel(dim1=0) assert_identical(expected3, actual3) def test_getitem_hashable(self) -> None: data = create_test_data() data[(3, 4)] = data["var1"] + 1 expected = data["var1"] + 1 expected.name = (3, 4) assert_identical(expected, data[(3, 4)]) with pytest.raises(KeyError, match=r"('var1', 'var2')"): data[("var1", "var2")] def test_getitem_multiple_dtype(self) -> None: keys = ["foo", 1] dataset = Dataset({key: ("dim0", range(1)) for key in keys}) assert_identical(dataset, dataset[keys]) def test_getitem_extra_dim_index_coord(self) -> None: class AnyIndex(Index): def should_add_coord_to_array(self, name, var, dims): return True idx = AnyIndex() coords = Coordinates( coords={ "x": ("x", [1, 2]), "x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]), }, indexes={"x": idx, "x_bounds": idx}, ) ds = Dataset({"foo": (("x"), [1.0, 2.0])}, coords=coords) actual = ds["foo"] assert_identical(actual.coords, coords, check_default_indexes=False) assert "x_bnds" not in actual.dims def test_virtual_variables_default_coords(self) -> None: dataset = Dataset({"foo": ("x", range(10))}) expected1 = DataArray(range(10), dims="x", name="x") actual1 = dataset["x"] assert_identical(expected1, actual1) assert isinstance(actual1.variable, IndexVariable) actual2 = dataset[["x", "foo"]] expected2 = dataset.assign_coords(x=range(10)) assert_identical(expected2, actual2) def test_virtual_variables_time(self) -> None: # access virtual variables data = create_test_data() index = data.variables["time"].to_index() assert isinstance(index, pd.DatetimeIndex) assert_array_equal(data["time.month"].values, index.month) assert_array_equal(data["time.season"].values, "DJF") # test virtual variable math assert_array_equal(data["time.dayofyear"] + 1, 2 + np.arange(20)) assert_array_equal(np.sin(data["time.dayofyear"]), np.sin(1 + np.arange(20))) # ensure they become coordinates expected = Dataset({}, {"dayofyear": data["time.dayofyear"]}) actual = data[["time.dayofyear"]] assert_equal(expected, actual) # non-coordinate variables ds = Dataset({"t": ("x", pd.date_range("2000-01-01", periods=3))}) assert (ds["t.year"] == 2000).all() def test_virtual_variable_same_name(self) -> None: # regression test for GH367 times = pd.date_range("2000-01-01", freq="h", periods=5) data = Dataset({"time": times}) actual = data["time.time"] expected = DataArray(times.time, [("time", times)], name="time") assert_identical(actual, expected) def test_time_season(self) -> None: time = xr.date_range("2000-01-01", periods=12, freq="ME", use_cftime=False) ds = Dataset({"t": time}) seas = ["DJF"] * 2 + ["MAM"] * 3 + ["JJA"] * 3 + ["SON"] * 3 + ["DJF"] assert_array_equal(seas, ds["t.season"]) def test_slice_virtual_variable(self) -> None: data = create_test_data() assert_equal( data["time.dayofyear"][:10].variable, Variable(["time"], 1 + np.arange(10)) ) assert_equal(data["time.dayofyear"][0].variable, Variable([], 1)) def test_setitem(self) -> None: # assign a variable var = Variable(["dim1"], np.random.randn(8)) data1 = create_test_data() data1["A"] = var data2 = data1.copy() data2["A"] = var assert_identical(data1, data2) # assign a dataset array dv = 2 * data2["A"] data1["B"] = dv.variable data2["B"] = dv assert_identical(data1, data2) # can't assign an ND array without dimensions with pytest.raises(ValueError, match=r"without explicit dimension names"): data2["C"] = var.values.reshape(2, 4) # but can assign a 1D array data1["C"] = var.values data2["C"] = ("C", var.values) assert_identical(data1, data2) # can assign a scalar data1["scalar"] = 0 data2["scalar"] = ([], 0) assert_identical(data1, data2) # can't use the same dimension name as a scalar var with pytest.raises(ValueError, match=r"already exists as a scalar"): data1["newvar"] = ("scalar", [3, 4, 5]) # can't resize a used dimension with pytest.raises(ValueError, match=r"conflicting dimension sizes"): data1["dim1"] = data1["dim1"][:5] # override an existing value data1["A"] = 3 * data2["A"] assert_equal(data1["A"], 3 * data2["A"]) # can't assign a dataset to a single key with pytest.raises(TypeError, match="Cannot assign a Dataset to a single key"): data1["D"] = xr.Dataset() # test assignment with positional and label-based indexing data3 = data1[["var1", "var2"]] data3["var3"] = data3.var1.isel(dim1=0) data4 = data3.copy() err_msg = ( "can only set locations defined by dictionaries from Dataset.loc. Got: a" ) with pytest.raises(TypeError, match=err_msg): data1.loc["a"] = 0 err_msg = r"Variables \['A', 'B', 'scalar'\] in new values not available in original dataset:" with pytest.raises(ValueError, match=err_msg): data4[{"dim2": 1}] = data1[{"dim2": 2}] err_msg = "Variable 'var3': indexer {'dim2': 0} not available" with pytest.raises(ValueError, match=err_msg): data1[{"dim2": 0}] = 0.0 err_msg = "Variable 'var1': indexer {'dim2': 10} not available" with pytest.raises(ValueError, match=err_msg): data4[{"dim2": 10}] = data3[{"dim2": 2}] err_msg = "Variable 'var1': dimension 'dim2' appears in new values" with pytest.raises(KeyError, match=err_msg): data4[{"dim2": 2}] = data3[{"dim2": [2]}] err_msg = ( "Variable 'var2': dimension order differs between original and new data" ) data3["var2"] = data3["var2"].T with pytest.raises(ValueError, match=err_msg): data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3]}] data3["var2"] = data3["var2"].T err_msg = r"cannot align objects.*not equal along these coordinates.*" with pytest.raises(ValueError, match=err_msg): data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3, 4]}] err_msg = "Dataset assignment only accepts DataArrays, Datasets, and scalars." with pytest.raises(TypeError, match=err_msg): data4[{"dim2": [2, 3]}] = data3["var1"][{"dim2": [3, 4]}].values data5 = data4.astype(str) data5["var4"] = data4["var1"] # convert to `np.str_('a')` once `numpy<2.0` has been dropped err_msg = "could not convert string to float: .*'a'.*" with pytest.raises(ValueError, match=err_msg): data5[{"dim2": 1}] = "a" data4[{"dim2": 0}] = 0.0 data4[{"dim2": 1}] = data3[{"dim2": 2}] data4.loc[{"dim2": 1.5}] = 1.0 data4.loc[{"dim2": 2.0}] = data3.loc[{"dim2": 2.5}] for v, dat3 in data3.items(): dat4 = data4[v] assert_array_equal(dat4[{"dim2": 0}], 0.0) assert_array_equal(dat4[{"dim2": 1}], dat3[{"dim2": 2}]) assert_array_equal(dat4.loc[{"dim2": 1.5}], 1.0) assert_array_equal(dat4.loc[{"dim2": 2.0}], dat3.loc[{"dim2": 2.5}]) unchanged = [1.0, 2.5, 3.0, 3.5, 4.0] assert_identical( dat4.loc[{"dim2": unchanged}], dat3.loc[{"dim2": unchanged}] ) def test_setitem_pandas(self) -> None: ds = self.make_example_math_dataset() ds["x"] = np.arange(3) ds_copy = ds.copy() ds_copy["bar"] = ds["bar"].to_pandas() assert_equal(ds, ds_copy) def test_setitem_auto_align(self) -> None: ds = Dataset() ds["x"] = ("y", range(3)) ds["y"] = 1 + np.arange(3) expected = Dataset({"x": ("y", range(3)), "y": 1 + np.arange(3)}) assert_identical(ds, expected) ds["y"] = DataArray(range(3), dims="y") expected = Dataset({"x": ("y", range(3))}, {"y": range(3)}) assert_identical(ds, expected) ds["x"] = DataArray([1, 2], coords=[("y", [0, 1])]) expected = Dataset({"x": ("y", [1, 2, np.nan])}, {"y": range(3)}) assert_identical(ds, expected) ds["x"] = 42 expected = Dataset({"x": 42, "y": range(3)}) assert_identical(ds, expected) ds["x"] = DataArray([4, 5, 6, 7], coords=[("y", [0, 1, 2, 3])]) expected = Dataset({"x": ("y", [4, 5, 6])}, {"y": range(3)}) assert_identical(ds, expected) def test_setitem_dimension_override(self) -> None: # regression test for GH-3377 ds = xr.Dataset({"x": [0, 1, 2]}) ds["x"] = ds["x"][:2] expected = Dataset({"x": [0, 1]}) assert_identical(ds, expected) ds = xr.Dataset({"x": [0, 1, 2]}) ds["x"] = np.array([0, 1]) assert_identical(ds, expected) ds = xr.Dataset({"x": [0, 1, 2]}) ds.coords["x"] = [0, 1] assert_identical(ds, expected) def test_setitem_with_coords(self) -> None: # Regression test for GH:2068 ds = create_test_data() other = DataArray( np.arange(10), dims="dim3", coords={"numbers": ("dim3", np.arange(10))} ) expected = ds.copy() expected["var3"] = other.drop_vars("numbers") actual = ds.copy() actual["var3"] = other assert_identical(expected, actual) assert "numbers" in other.coords # should not change other # with alignment other = ds["var3"].isel(dim3=slice(1, -1)) other["numbers"] = ("dim3", np.arange(8)) actual = ds.copy() actual["var3"] = other assert "numbers" in other.coords # should not change other expected = ds.copy() expected["var3"] = ds["var3"].isel(dim3=slice(1, -1)) assert_identical(expected, actual) # with non-duplicate coords other = ds["var3"].isel(dim3=slice(1, -1)) other["numbers"] = ("dim3", np.arange(8)) other["position"] = ("dim3", np.arange(8)) actual = ds.copy() actual["var3"] = other assert "position" in actual assert "position" in other.coords # assigning a coordinate-only dataarray actual = ds.copy() other = actual["numbers"] other[0] = 10 actual["numbers"] = other assert actual["numbers"][0] == 10 # GH: 2099 ds = Dataset( {"var": ("x", [1, 2, 3])}, coords={"x": [0, 1, 2], "z1": ("x", [1, 2, 3]), "z2": ("x", [1, 2, 3])}, ) ds["var"] = ds["var"] * 2 assert np.allclose(ds["var"], [2, 4, 6]) def test_setitem_align_new_indexes(self) -> None: ds = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]}) ds["bar"] = DataArray([2, 3, 4], [("x", [1, 2, 3])]) expected = Dataset( {"foo": ("x", [1, 2, 3]), "bar": ("x", [np.nan, 2, 3])}, {"x": [0, 1, 2]} ) assert_identical(ds, expected) def test_setitem_vectorized(self) -> None: # Regression test for GH:7030 # Positional indexing da = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"]) ds = xr.Dataset({"da": da}) b = xr.DataArray([[0, 0], [1, 0]], dims=["u", "v"]) c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"]) w = xr.DataArray([-1, -2], dims=["u"]) index = dict(b=b, c=c) ds[index] = xr.Dataset({"da": w}) assert (ds[index]["da"] == w).all() # Indexing with coordinates da = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"]) ds = xr.Dataset({"da": da}) ds.coords["b"] = [2, 4, 6] b = xr.DataArray([[2, 2], [4, 2]], dims=["u", "v"]) c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"]) w = xr.DataArray([-1, -2], dims=["u"]) index = dict(b=b, c=c) ds.loc[index] = xr.Dataset({"da": w}, coords={"b": ds.coords["b"]}) assert (ds.loc[index]["da"] == w).all() @pytest.mark.parametrize("dtype", [str, bytes]) def test_setitem_str_dtype(self, dtype) -> None: ds = xr.Dataset(coords={"x": np.array(["x", "y"], dtype=dtype)}) # test Dataset update ds["foo"] = xr.DataArray(np.array([0, 0]), dims=["x"]) assert np.issubdtype(ds.x.dtype, dtype) def test_setitem_using_list(self) -> None: # assign a list of variables var1 = Variable(["dim1"], np.random.randn(8)) var2 = Variable(["dim1"], np.random.randn(8)) actual = create_test_data() expected = actual.copy() expected["A"] = var1 expected["B"] = var2 actual[["A", "B"]] = [var1, var2] assert_identical(actual, expected) # assign a list of dataset arrays dv = 2 * expected[["A", "B"]] actual[["C", "D"]] = [d.variable for d in dv.data_vars.values()] expected[["C", "D"]] = dv assert_identical(actual, expected) @pytest.mark.parametrize( "var_list, data, error_regex", [ ( ["A", "B"], [Variable(["dim1"], np.random.randn(8))], r"Different lengths", ), ([], [Variable(["dim1"], np.random.randn(8))], r"Empty list of variables"), (["A", "B"], xr.DataArray([1, 2]), r"assign single DataArray"), ], ) def test_setitem_using_list_errors(self, var_list, data, error_regex) -> None: actual = create_test_data() with pytest.raises(ValueError, match=error_regex): actual[var_list] = data def test_setitem_uses_base_variable_class_even_for_index_variables(self) -> None: ds = Dataset(coords={"x": [1, 2, 3]}) ds["y"] = ds["x"] # explicit check assert isinstance(ds["x"].variable, IndexVariable) assert not isinstance(ds["y"].variable, IndexVariable) # test internal invariant checks when comparing the datasets expected = Dataset(data_vars={"y": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}) assert_identical(ds, expected) def test_assign(self) -> None: ds = Dataset() actual = ds.assign(x=[0, 1, 2], y=2) expected = Dataset({"x": [0, 1, 2], "y": 2}) assert_identical(actual, expected) assert list(actual.variables) == ["x", "y"] assert_identical(ds, Dataset()) actual = actual.assign(y=lambda ds: ds.x**2) expected = Dataset({"y": ("x", [0, 1, 4]), "x": [0, 1, 2]}) assert_identical(actual, expected) actual = actual.assign_coords(z=2) expected = Dataset({"y": ("x", [0, 1, 4])}, {"z": 2, "x": [0, 1, 2]}) assert_identical(actual, expected) def test_assign_coords(self) -> None: ds = Dataset() actual = ds.assign(x=[0, 1, 2], y=2) actual = actual.assign_coords(x=list("abc")) expected = Dataset({"x": list("abc"), "y": 2}) assert_identical(actual, expected) actual = ds.assign(x=[0, 1, 2], y=[2, 3]) actual = actual.assign_coords({"y": [2.0, 3.0]}) expected = ds.assign(x=[0, 1, 2], y=[2.0, 3.0]) assert_identical(actual, expected) def test_assign_attrs(self) -> None: expected = Dataset(attrs=dict(a=1, b=2)) new = Dataset() actual = new.assign_attrs(a=1, b=2) assert_identical(actual, expected) assert new.attrs == {} expected.attrs["c"] = 3 new_actual = actual.assign_attrs({"c": 3}) assert_identical(new_actual, expected) assert actual.attrs == dict(a=1, b=2) def test_drop_attrs(self) -> None: # Simple example ds = Dataset().assign_attrs(a=1, b=2) original = ds.copy() expected = Dataset() result = ds.drop_attrs() assert_identical(result, expected) # Doesn't change original assert_identical(ds, original) # Example with variables and coords with attrs, and a multiindex. (arguably # should have used a canonical dataset with all the features we're should # support...) var = Variable("x", [1, 2, 3], attrs=dict(x=1, y=2)) idx = IndexVariable("y", [1, 2, 3], attrs=dict(c=1, d=2)) mx = xr.Coordinates.from_pandas_multiindex( pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=["d", "e"]), "z" ) ds = Dataset(dict(var1=var), coords=dict(y=idx, z=mx)).assign_attrs(a=1, b=2) assert ds.attrs != {} assert ds["var1"].attrs != {} assert ds["y"].attrs != {} assert ds.coords["y"].attrs != {} original = ds.copy(deep=True) result = ds.drop_attrs() assert result.attrs == {} assert result["var1"].attrs == {} assert result["y"].attrs == {} assert list(result.data_vars) == list(ds.data_vars) assert list(result.coords) == list(ds.coords) # Doesn't change original assert_identical(ds, original) # Specifically test that the attrs on the coords are still there. (The index # can't currently contain `attrs`, so we can't test those.) assert ds.coords["y"].attrs != {} # Test for deep=False result_shallow = ds.drop_attrs(deep=False) assert result_shallow.attrs == {} assert result_shallow["var1"].attrs != {} assert result_shallow["y"].attrs != {} assert list(result.data_vars) == list(ds.data_vars) assert list(result.coords) == list(ds.coords) def test_drop_attrs_custom_index(self): class CustomIndex(Index): @classmethod def from_variables(cls, variables, *, options=None): return cls() ds = xr.Dataset(coords={"y": ("x", [1, 2])}).set_xindex("y", CustomIndex) # should not raise a TypeError ds.drop_attrs() # make sure the index didn't disappear assert "y" in ds.xindexes def test_assign_multiindex_level(self) -> None: data = create_test_multiindex() with pytest.raises(ValueError, match=r"cannot drop or update.*corrupt.*index "): data.assign(level_1=range(4)) data.assign_coords(level_1=range(4)) def test_assign_new_multiindex(self) -> None: midx = pd.MultiIndex.from_arrays([["a", "a", "b", "b"], [0, 1, 0, 1]]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") ds = Dataset(coords={"x": [1, 2]}) expected = Dataset(coords=midx_coords) with pytest.warns( FutureWarning, match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): actual = ds.assign(x=midx) assert_identical(actual, expected) @pytest.mark.parametrize("orig_coords", [{}, {"x": range(4)}]) def test_assign_coords_new_multiindex(self, orig_coords) -> None: ds = Dataset(coords=orig_coords) midx = pd.MultiIndex.from_arrays( [["a", "a", "b", "b"], [0, 1, 0, 1]], names=("one", "two") ) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") expected = Dataset(coords=midx_coords) with pytest.warns( FutureWarning, match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): actual = ds.assign_coords({"x": midx}) assert_identical(actual, expected) actual = ds.assign_coords(midx_coords) assert_identical(actual, expected) def test_assign_coords_existing_multiindex(self) -> None: data = create_test_multiindex() with pytest.warns( FutureWarning, match=r"updating coordinate.*MultiIndex.*inconsistent" ): updated = data.assign_coords(x=range(4)) # https://github.com/pydata/xarray/issues/7097 (coord names updated) assert len(updated.coords) == 1 with pytest.warns( FutureWarning, match=r"updating coordinate.*MultiIndex.*inconsistent" ): updated = data.assign(x=range(4)) # https://github.com/pydata/xarray/issues/7097 (coord names updated) assert len(updated.coords) == 1 def test_assign_all_multiindex_coords(self) -> None: data = create_test_multiindex() actual = data.assign(x=range(4), level_1=range(4), level_2=range(4)) # no error but multi-index dropped in favor of single indexes for each level assert ( actual.xindexes["x"] is not actual.xindexes["level_1"] is not actual.xindexes["level_2"] ) def test_assign_coords_custom_index_side_effect(self) -> None: # test that assigning new coordinates do not reset other dimension coord indexes # to default (pandas) index (https://github.com/pydata/xarray/issues/7346) class CustomIndex(PandasIndex): pass ds = ( Dataset(coords={"x": [1, 2, 3]}) .drop_indexes("x") .set_xindex("x", CustomIndex) ) actual = ds.assign_coords(y=[4, 5, 6]) assert isinstance(actual.xindexes["x"], CustomIndex) def test_assign_coords_custom_index(self) -> None: class CustomIndex(Index): pass coords = Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()} ) ds = Dataset() actual = ds.assign_coords(coords) assert isinstance(actual.xindexes["x"], CustomIndex) def test_assign_coords_no_default_index(self) -> None: coords = Coordinates({"y": [1, 2, 3]}, indexes={}) ds = Dataset() actual = ds.assign_coords(coords) expected = coords.to_dataset() assert_identical(expected, actual, check_default_indexes=False) assert "y" not in actual.xindexes def test_merge_multiindex_level(self) -> None: data = create_test_multiindex() other = Dataset({"level_1": ("x", [0, 1])}) with pytest.raises(ValueError, match=r".*conflicting dimension sizes.*"): data.merge(other) other = Dataset({"level_1": ("x", range(4))}) with pytest.raises( ValueError, match=r"unable to determine.*coordinates or not.*" ): data.merge(other) # `other` Dataset coordinates are ignored (bug or feature?) other = Dataset(coords={"level_1": ("x", range(4))}) assert_identical(data.merge(other), data) def test_setitem_original_non_unique_index(self) -> None: # regression test for GH943 original = Dataset({"data": ("x", np.arange(5))}, coords={"x": [0, 1, 2, 0, 1]}) expected = Dataset({"data": ("x", np.arange(5))}, {"x": range(5)}) actual = original.copy() actual["x"] = list(range(5)) assert_identical(actual, expected) actual = original.copy() actual["x"] = ("x", list(range(5))) assert_identical(actual, expected) actual = original.copy() actual.coords["x"] = list(range(5)) assert_identical(actual, expected) def test_setitem_both_non_unique_index(self) -> None: # regression test for GH956 names = ["joaquin", "manolo", "joaquin"] values = np.random.randint(0, 256, (3, 4, 4)) array = DataArray( values, dims=["name", "row", "column"], coords=[names, range(4), range(4)] ) expected = Dataset({"first": array, "second": array}) actual = array.rename("first").to_dataset() actual["second"] = array assert_identical(expected, actual) def test_setitem_multiindex_level(self) -> None: data = create_test_multiindex() with pytest.raises( ValueError, match=r"cannot set or update variable.*corrupt.*index " ): data["level_1"] = range(4) def test_delitem(self) -> None: data = create_test_data() all_items = set(data.variables) assert set(data.variables) == all_items del data["var1"] assert set(data.variables) == all_items - {"var1"} del data["numbers"] assert set(data.variables) == all_items - {"var1", "numbers"} assert "numbers" not in data.coords expected = Dataset() actual = Dataset({"y": ("x", [1, 2])}) del actual["y"] assert_identical(expected, actual) def test_delitem_multiindex_level(self) -> None: data = create_test_multiindex() with pytest.raises( ValueError, match=r"cannot remove coordinate.*corrupt.*index " ): del data["level_1"] def test_squeeze(self) -> None: data = Dataset({"foo": (["x", "y", "z"], [[[1], [2]]])}) test_args: list[list] = [[], [["x"]], [["x", "z"]]] for args in test_args: def get_args(args, v): return [set(args[0]) & set(v.dims)] if args else [] expected = Dataset( {k: v.squeeze(*get_args(args, v)) for k, v in data.variables.items()} ) expected = expected.set_coords(data.coords) assert_identical(expected, data.squeeze(*args)) # invalid squeeze with pytest.raises(ValueError, match=r"cannot select a dimension"): data.squeeze("y") def test_squeeze_drop(self) -> None: data = Dataset({"foo": ("x", [1])}, {"x": [0]}) expected = Dataset({"foo": 1}) selected = data.squeeze(drop=True) assert_identical(expected, selected) expected = Dataset({"foo": 1}, {"x": 0}) selected = data.squeeze(drop=False) assert_identical(expected, selected) data = Dataset({"foo": (("x", "y"), [[1]])}, {"x": [0], "y": [0]}) expected = Dataset({"foo": 1}) selected = data.squeeze(drop=True) assert_identical(expected, selected) expected = Dataset({"foo": ("x", [1])}, {"x": [0]}) selected = data.squeeze(dim="y", drop=True) assert_identical(expected, selected) data = Dataset({"foo": (("x",), [])}, {"x": []}) selected = data.squeeze(drop=True) assert_identical(data, selected) def test_to_dataarray(self) -> None: ds = Dataset( {"a": 1, "b": ("x", [1, 2, 3])}, coords={"c": 42}, attrs={"Conventions": "None"}, ) data = [[1, 1, 1], [1, 2, 3]] coords = {"c": 42, "variable": ["a", "b"]} dims = ("variable", "x") expected = DataArray(data, coords, dims, attrs=ds.attrs) actual = ds.to_dataarray() assert_identical(expected, actual) actual = ds.to_dataarray("abc", name="foo") expected = expected.rename({"variable": "abc"}).rename("foo") assert_identical(expected, actual) def test_to_and_from_dataframe(self) -> None: x = np.random.randn(10) y = np.random.randn(10) t = list("abcdefghij") cat = pd.Categorical(["a", "b"] * 5) ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t), "cat": ("t", cat)}) expected = pd.DataFrame( np.array([x, y]).T, columns=["a", "b"], index=pd.Index(t, name="t") ) expected["cat"] = cat actual = ds.to_dataframe() # use the .equals method to check all DataFrame metadata assert expected.equals(actual), (expected, actual) # verify coords are included actual = ds.set_coords("b").to_dataframe() assert expected.equals(actual), (expected, actual) # check roundtrip assert_identical(ds, Dataset.from_dataframe(actual)) assert isinstance(ds["cat"].variable.data.dtype, pd.CategoricalDtype) # test a case with a MultiIndex w = np.random.randn(2, 3) cat = pd.Categorical(["a", "a", "c"]) ds = Dataset({"w": (("x", "y"), w), "cat": ("y", cat)}) ds["y"] = ("y", list("abc")) exp_index = pd.MultiIndex.from_arrays( [[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"] ) expected = pd.DataFrame( {"w": w.reshape(-1), "cat": pd.Categorical(["a", "a", "c", "a", "a", "c"])}, index=exp_index, ) actual = ds.to_dataframe() assert expected.equals(actual) # check roundtrip # from_dataframe attempts to broadcast across because it doesn't know better, so cat must be converted ds["cat"] = (("x", "y"), np.stack((ds["cat"].to_numpy(), ds["cat"].to_numpy()))) assert_identical(ds.assign_coords(x=[0, 1]), Dataset.from_dataframe(actual)) # Check multiindex reordering new_order = ["x", "y"] # revert broadcasting fix above for 1d arrays ds["cat"] = ("y", cat) actual = ds.to_dataframe(dim_order=new_order) assert expected.equals(actual) new_order = ["y", "x"] exp_index = pd.MultiIndex.from_arrays( [["a", "a", "b", "b", "c", "c"], [0, 1, 0, 1, 0, 1]], names=["y", "x"] ) expected = pd.DataFrame( { "w": w.transpose().reshape(-1), "cat": pd.Categorical(["a", "a", "a", "a", "c", "c"]), }, index=exp_index, ) actual = ds.to_dataframe(dim_order=new_order) assert expected.equals(actual) invalid_order = ["x"] with pytest.raises( ValueError, match="does not match the set of dimensions of this" ): ds.to_dataframe(dim_order=invalid_order) invalid_order = ["x", "z"] with pytest.raises( ValueError, match="does not match the set of dimensions of this" ): ds.to_dataframe(dim_order=invalid_order) # test a case with a MultiIndex along a single dimension data_dict = dict( x=[1, 2, 1, 2, 1], y=["a", "a", "b", "b", "b"], z=[5, 10, 15, 20, 25] ) data_dict_w_dims = {k: ("single_dim", v) for k, v in data_dict.items()} # Dataset multi-indexed along "single_dim" by "x" and "y" ds = Dataset(data_dict_w_dims).set_coords(["x", "y"]).set_xindex(["x", "y"]) expected = pd.DataFrame(data_dict).set_index(["x", "y"]) actual = ds.to_dataframe() assert expected.equals(actual) # should be possible to reset index, as there should be no duplication # between index and columns, and dataframes should still be equal assert expected.reset_index().equals(actual.reset_index()) # MultiIndex deduplication should not affect other coordinates. mindex_single = pd.MultiIndex.from_product( [list(range(6)), list("ab")], names=["A", "B"] ) ds = DataArray( range(12), [("MI", mindex_single)], dims="MI", name="test" )._to_dataset_whole() ds.coords["C"] = "a single value" ds.coords["D"] = ds.coords["A"] ** 2 expected = pd.DataFrame( dict( test=range(12), C="a single value", D=[0, 0, 1, 1, 4, 4, 9, 9, 16, 16, 25, 25], ) ).set_index(mindex_single) actual = ds.to_dataframe() assert expected.equals(actual) assert expected.reset_index().equals(actual.reset_index()) # check pathological cases df = pd.DataFrame([1]) actual_ds = Dataset.from_dataframe(df) expected_ds = Dataset({0: ("index", [1])}, {"index": [0]}) assert_identical(expected_ds, actual_ds) df = pd.DataFrame() actual_ds = Dataset.from_dataframe(df) expected_ds = Dataset(coords={"index": []}) assert_identical(expected_ds, actual_ds) # GH697 df = pd.DataFrame({"A": []}) actual_ds = Dataset.from_dataframe(df) expected_ds = Dataset({"A": DataArray([], dims=("index",))}, {"index": []}) assert_identical(expected_ds, actual_ds) # regression test for GH278 # use int64 to ensure consistent results for the pandas .equals method # on windows (which requires the same dtype) ds = Dataset({"x": pd.Index(["bar"]), "a": ("y", np.array([1], "int64"))}).isel( x=0 ) # use .loc to ensure consistent results on Python 3 actual = ds.to_dataframe().loc[:, ["a", "x"]] expected = pd.DataFrame( [[1, "bar"]], index=pd.Index([0], name="y"), columns=["a", "x"] ) assert expected.equals(actual), (expected, actual) ds = Dataset({"x": np.array([0], "int64"), "y": np.array([1], "int64")}) actual = ds.to_dataframe() idx = pd.MultiIndex.from_arrays([[0], [1]], names=["x", "y"]) expected = pd.DataFrame([[]], index=idx) assert expected.equals(actual), (expected, actual) def test_from_dataframe_categorical_dtype_index(self) -> None: cat = pd.CategoricalIndex(list("abcd")) df = pd.DataFrame({"f": [0, 1, 2, 3]}, index=cat) ds = df.to_xarray() restored = ds.to_dataframe() df.index.name = ( "index" # restored gets the name because it has the coord with the name ) pd.testing.assert_frame_equal(df, restored) def test_from_dataframe_categorical_index(self) -> None: cat = pd.CategoricalDtype( categories=["foo", "bar", "baz", "qux", "quux", "corge"] ) i1 = pd.Series(["foo", "bar", "foo"], dtype=cat) i2 = pd.Series(["bar", "bar", "baz"], dtype=cat) df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2, 3]}) ds = df.set_index("i1").to_xarray() assert len(ds["i1"]) == 3 ds = df.set_index(["i1", "i2"]).to_xarray() assert len(ds["i1"]) == 2 assert len(ds["i2"]) == 2 def test_from_dataframe_categorical_index_string_categories(self) -> None: cat = pd.CategoricalIndex( pd.Categorical.from_codes( np.array([1, 1, 0, 2], dtype=np.int64), # type: ignore[arg-type] categories=pd.Index(["foo", "bar", "baz"], dtype="string"), ) ) ser = pd.Series(1, index=cat) ds = ser.to_xarray() assert ds.coords.dtypes["index"] == ser.index.dtype @requires_sparse def test_from_dataframe_sparse(self) -> None: import sparse df_base = pd.DataFrame( {"x": range(10), "y": list("abcdefghij"), "z": np.arange(0, 100, 10)} ) ds_sparse = Dataset.from_dataframe(df_base.set_index("x"), sparse=True) ds_dense = Dataset.from_dataframe(df_base.set_index("x"), sparse=False) assert isinstance(ds_sparse["y"].data, sparse.COO) assert isinstance(ds_sparse["z"].data, sparse.COO) ds_sparse["y"].data = ds_sparse["y"].data.todense() ds_sparse["z"].data = ds_sparse["z"].data.todense() assert_identical(ds_dense, ds_sparse) ds_sparse = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=True) ds_dense = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=False) assert isinstance(ds_sparse["z"].data, sparse.COO) ds_sparse["z"].data = ds_sparse["z"].data.todense() assert_identical(ds_dense, ds_sparse) def test_to_and_from_empty_dataframe(self) -> None: # GH697 expected = pd.DataFrame({"foo": []}) ds = Dataset.from_dataframe(expected) assert len(ds["foo"]) == 0 actual = ds.to_dataframe() assert len(actual) == 0 assert expected.equals(actual) def test_from_dataframe_multiindex(self) -> None: index = pd.MultiIndex.from_product([["a", "b"], [1, 2, 3]], names=["x", "y"]) df = pd.DataFrame({"z": np.arange(6)}, index=index) expected = Dataset( {"z": (("x", "y"), [[0, 1, 2], [3, 4, 5]])}, coords={"x": ["a", "b"], "y": [1, 2, 3]}, ) actual = Dataset.from_dataframe(df) assert_identical(actual, expected) df2 = df.iloc[[3, 2, 1, 0, 4, 5], :] actual = Dataset.from_dataframe(df2) assert_identical(actual, expected) df3 = df.iloc[:4, :] expected3 = Dataset( {"z": (("x", "y"), [[0, 1, 2], [3, np.nan, np.nan]])}, coords={"x": ["a", "b"], "y": [1, 2, 3]}, ) actual = Dataset.from_dataframe(df3) assert_identical(actual, expected3) df_nonunique = df.iloc[[0, 0], :] with pytest.raises(ValueError, match=r"non-unique MultiIndex"): Dataset.from_dataframe(df_nonunique) def test_from_dataframe_unsorted_levels(self) -> None: # regression test for GH-4186 index = pd.MultiIndex( levels=[["b", "a"], ["foo"]], codes=[[0, 1], [0, 0]], names=["lev1", "lev2"] ) df = pd.DataFrame({"c1": [0, 2], "c2": [1, 3]}, index=index) expected = Dataset( { "c1": (("lev1", "lev2"), [[0], [2]]), "c2": (("lev1", "lev2"), [[1], [3]]), }, coords={"lev1": ["b", "a"], "lev2": ["foo"]}, ) actual = Dataset.from_dataframe(df) assert_identical(actual, expected) def test_from_dataframe_non_unique_columns(self) -> None: # regression test for GH449 df = pd.DataFrame(np.zeros((2, 2))) df.columns = ["foo", "foo"] # type: ignore[assignment,list-item,unused-ignore] with pytest.raises(ValueError, match=r"non-unique columns"): Dataset.from_dataframe(df) def test_convert_dataframe_with_many_types_and_multiindex(self) -> None: # regression test for GH737 df = pd.DataFrame( { "a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("u1"), "d": np.arange(4.0, 7.0, dtype="float64"), "e": [True, False, True], "f": pd.Categorical(list("abc")), "g": pd.date_range("20130101", periods=3), "h": pd.date_range("20130101", periods=3, tz="America/New_York"), } ) df.index = pd.MultiIndex.from_product([["a"], range(3)], names=["one", "two"]) roundtripped = Dataset.from_dataframe(df).to_dataframe() # we can't do perfectly, but we should be at least as faithful as # np.asarray expected = df.apply(np.asarray) assert roundtripped.equals(expected) @pytest.mark.parametrize("encoding", [True, False]) @pytest.mark.parametrize("data", [True, "list", "array"]) def test_to_and_from_dict( self, encoding: bool, data: bool | Literal["list", "array"] ) -> None: # # Dimensions: (t: 10) # Coordinates: # * t (t) U1" expected_no_data["coords"]["t"].update({"dtype": endiantype, "shape": (10,)}) expected_no_data["data_vars"]["a"].update({"dtype": "float64", "shape": (10,)}) expected_no_data["data_vars"]["b"].update({"dtype": "float64", "shape": (10,)}) actual_no_data = ds.to_dict(data=False, encoding=encoding) assert expected_no_data == actual_no_data # verify coords are included roundtrip expected_ds = ds.set_coords("b") actual2 = Dataset.from_dict(expected_ds.to_dict(data=data, encoding=encoding)) assert_identical(expected_ds, actual2) if encoding: assert set(expected_ds.variables) == set(actual2.variables) for vv in ds.variables: np.testing.assert_equal(expected_ds[vv].encoding, actual2[vv].encoding) # test some incomplete dicts: # this one has no attrs field, the dims are strings, and x, y are # np.arrays d = { "coords": {"t": {"dims": "t", "data": t}}, "dims": "t", "data_vars": {"a": {"dims": "t", "data": x}, "b": {"dims": "t", "data": y}}, } assert_identical(ds, Dataset.from_dict(d)) # this is kind of a flattened version with no coords, or data_vars d = { "a": {"dims": "t", "data": x}, "t": {"data": t, "dims": "t"}, "b": {"dims": "t", "data": y}, } assert_identical(ds, Dataset.from_dict(d)) # this one is missing some necessary information d = { "a": {"data": x}, "t": {"data": t, "dims": "t"}, "b": {"dims": "t", "data": y}, } with pytest.raises( ValueError, match=r"cannot convert dict without the key 'dims'" ): Dataset.from_dict(d) def test_to_and_from_dict_with_time_dim(self) -> None: x = np.random.randn(10, 3) y = np.random.randn(10, 3) t = pd.date_range("20130101", periods=10) lat = [77.7, 83.2, 76] ds = Dataset( { "a": (["t", "lat"], x), "b": (["t", "lat"], y), "t": ("t", t), "lat": ("lat", lat), } ) roundtripped = Dataset.from_dict(ds.to_dict()) assert_identical(ds, roundtripped) @pytest.mark.parametrize("data", [True, "list", "array"]) def test_to_and_from_dict_with_nan_nat( self, data: bool | Literal["list", "array"] ) -> None: x = np.random.randn(10, 3) y = np.random.randn(10, 3) y[2] = np.nan t = pd.Series(pd.date_range("20130101", periods=10)) t[2] = np.nan lat = [77.7, 83.2, 76] ds = Dataset( { "a": (["t", "lat"], x), "b": (["t", "lat"], y), "t": ("t", t), "lat": ("lat", lat), } ) roundtripped = Dataset.from_dict(ds.to_dict(data=data)) assert_identical(ds, roundtripped) def test_to_dict_with_numpy_attrs(self) -> None: # this doesn't need to roundtrip x = np.random.randn(10) y = np.random.randn(10) t = list("abcdefghij") attrs = { "created": np.float64(1998), "coords": np.array([37, -110.1, 100]), "maintainer": "bar", } ds = Dataset({"a": ("t", x, attrs), "b": ("t", y, attrs), "t": ("t", t)}) expected_attrs = { "created": attrs["created"].item(), # type: ignore[attr-defined] "coords": attrs["coords"].tolist(), # type: ignore[attr-defined] "maintainer": "bar", } actual = ds.to_dict() # check that they are identical assert expected_attrs == actual["data_vars"]["a"]["attrs"] def test_pickle(self) -> None: data = create_test_data() roundtripped = pickle.loads(pickle.dumps(data)) assert_identical(data, roundtripped) # regression test for #167: assert data.sizes == roundtripped.sizes def test_lazy_load(self) -> None: store = InaccessibleVariableDataStore() create_test_data().dump_to_store(store) for decode_cf in [True, False]: ds = open_dataset(store, decode_cf=decode_cf) with pytest.raises(UnexpectedDataAccess): ds.load() with pytest.raises(UnexpectedDataAccess): _ = ds["var1"].values # these should not raise UnexpectedDataAccess: ds.isel(time=10) ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1) def test_lazy_load_duck_array(self) -> None: store = AccessibleAsDuckArrayDataStore() create_test_data().dump_to_store(store) for decode_cf in [True, False]: ds = open_dataset(store, decode_cf=decode_cf) with pytest.raises(UnexpectedDataAccess): _ = ds["var1"].values # these should not raise UnexpectedDataAccess: _ = ds.var1.data ds.isel(time=10) ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1) repr(ds) # preserve the duck array type and don't cast to array assert isinstance(ds["var1"].load().data, DuckArrayWrapper) assert isinstance( ds["var1"].isel(dim2=0, dim1=0).load().data, DuckArrayWrapper ) ds.close() def test_dropna(self) -> None: x = np.random.randn(4, 4) x[::2, 0] = np.nan y = np.random.randn(4) y[-1] = np.nan ds = Dataset({"foo": (("a", "b"), x), "bar": (("b", y))}) expected = ds.isel(a=slice(1, None, 2)) actual = ds.dropna("a") assert_identical(actual, expected) expected = ds.isel(b=slice(1, 3)) actual = ds.dropna("b") assert_identical(actual, expected) actual = ds.dropna("b", subset=["foo", "bar"]) assert_identical(actual, expected) expected = ds.isel(b=slice(1, None)) actual = ds.dropna("b", subset=["foo"]) assert_identical(actual, expected) expected = ds.isel(b=slice(3)) actual = ds.dropna("b", subset=["bar"]) assert_identical(actual, expected) actual = ds.dropna("a", subset=[]) assert_identical(actual, ds) actual = ds.dropna("a", subset=["bar"]) assert_identical(actual, ds) actual = ds.dropna("a", how="all") assert_identical(actual, ds) actual = ds.dropna("b", how="all", subset=["bar"]) expected = ds.isel(b=[0, 1, 2]) assert_identical(actual, expected) actual = ds.dropna("b", thresh=1, subset=["bar"]) assert_identical(actual, expected) actual = ds.dropna("b", thresh=2) assert_identical(actual, ds) actual = ds.dropna("b", thresh=4) expected = ds.isel(b=[1, 2, 3]) assert_identical(actual, expected) actual = ds.dropna("a", thresh=3) expected = ds.isel(a=[1, 3]) assert_identical(actual, ds) with pytest.raises( ValueError, match=r"'foo' not found in data dimensions \('a', 'b'\)", ): ds.dropna("foo") with pytest.raises(ValueError, match=r"invalid how"): ds.dropna("a", how="somehow") # type: ignore[arg-type] with pytest.raises(TypeError, match=r"must specify how or thresh"): ds.dropna("a", how=None) # type: ignore[arg-type] @pytest.mark.parametrize( "fill_value,extension_array", [ pytest.param("a", pd.Categorical([pd.NA, "a", "b"]), id="category"), ] + ( [ pytest.param( 0, pd.array([pd.NA, 1, 1], dtype="int64[pyarrow]"), id="int64[pyarrow]", ) ] if has_pyarrow else [] ), ) def test_fillna_extension_array(self, fill_value, extension_array) -> None: srs = pd.DataFrame({"data": extension_array}, index=np.array([1, 2, 3])) ds = srs.to_xarray() filled = ds.fillna(fill_value) assert filled["data"].dtype == extension_array.dtype assert ( filled["data"].values == np.array([fill_value, *srs["data"].values[1:]], dtype="object") ).all() @pytest.mark.parametrize( "extension_array", [ pytest.param(pd.Categorical([pd.NA, "a", "b"]), id="category"), ] + ( [ pytest.param( pd.array([pd.NA, 1, 1], dtype="int64[pyarrow]"), id="int64[pyarrow]" ) ] if has_pyarrow else [] ), ) def test_dropna_extension_array(self, extension_array) -> None: srs = pd.DataFrame({"data": extension_array}, index=np.array([1, 2, 3])) ds = srs.to_xarray() dropped = ds.dropna("index") assert dropped["data"].dtype == extension_array.dtype assert (dropped["data"].values == srs["data"].values[1:]).all() def test_fillna(self) -> None: ds = Dataset({"a": ("x", [np.nan, 1, np.nan, 3])}, {"x": [0, 1, 2, 3]}) # fill with -1 actual1 = ds.fillna(-1) expected = Dataset({"a": ("x", [-1, 1, -1, 3])}, {"x": [0, 1, 2, 3]}) assert_identical(expected, actual1) actual2 = ds.fillna({"a": -1}) assert_identical(expected, actual2) other = Dataset({"a": -1}) actual3 = ds.fillna(other) assert_identical(expected, actual3) actual4 = ds.fillna({"a": other.a}) assert_identical(expected, actual4) # fill with range(4) b = DataArray(range(4), coords=[("x", range(4))]) actual5 = ds.fillna(b) expected = b.rename("a").to_dataset() assert_identical(expected, actual5) actual6 = ds.fillna(expected) assert_identical(expected, actual6) actual7 = ds.fillna(np.arange(4)) assert_identical(expected, actual7) actual8 = ds.fillna(b[:3]) assert_identical(expected, actual8) # okay to only include some data variables ds["b"] = np.nan actual9 = ds.fillna({"a": -1}) expected = Dataset( {"a": ("x", [-1, 1, -1, 3]), "b": np.nan}, {"x": [0, 1, 2, 3]} ) assert_identical(expected, actual9) # but new data variables is not okay with pytest.raises(ValueError, match=r"must be contained"): ds.fillna({"x": 0}) # empty argument should be OK result1 = ds.fillna({}) assert_identical(ds, result1) result2 = ds.fillna(Dataset(coords={"c": 42})) expected = ds.assign_coords(c=42) assert_identical(expected, result2) da = DataArray(range(5), name="a", attrs={"attr": "da"}) actual10 = da.fillna(1) assert actual10.name == "a" assert actual10.attrs == da.attrs ds = Dataset({"a": da}, attrs={"attr": "ds"}) actual11 = ds.fillna({"a": 1}) assert actual11.attrs == ds.attrs assert actual11.a.name == "a" assert actual11.a.attrs == ds.a.attrs @pytest.mark.parametrize( "func", [lambda x: x.clip(0, 1), lambda x: np.float64(1.0) * x, np.abs, abs] ) def test_propagate_attrs(self, func) -> None: da = DataArray(range(5), name="a", attrs={"attr": "da"}) ds = Dataset({"a": da}, attrs={"attr": "ds"}) # test defaults assert func(ds).attrs == ds.attrs with set_options(keep_attrs=False): assert func(ds).attrs != ds.attrs assert func(ds).a.attrs != ds.a.attrs with set_options(keep_attrs=False): assert func(ds).attrs != ds.attrs assert func(ds).a.attrs != ds.a.attrs with set_options(keep_attrs=True): assert func(ds).attrs == ds.attrs assert func(ds).a.attrs == ds.a.attrs def test_where(self) -> None: ds = Dataset({"a": ("x", range(5))}) expected1 = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])}) actual1 = ds.where(ds > 1) assert_identical(expected1, actual1) actual2 = ds.where(ds.a > 1) assert_identical(expected1, actual2) actual3 = ds.where(ds.a.values > 1) assert_identical(expected1, actual3) actual4 = ds.where(True) assert_identical(ds, actual4) expected5 = ds.copy(deep=True) expected5["a"].values = np.array([np.nan] * 5) actual5 = ds.where(False) assert_identical(expected5, actual5) # 2d ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])}) expected6 = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])}) actual6 = ds.where(ds > 0) assert_identical(expected6, actual6) # attrs da = DataArray(range(5), name="a", attrs={"attr": "da"}) actual7 = da.where(da.values > 1) assert actual7.name == "a" assert actual7.attrs == da.attrs ds = Dataset({"a": da}, attrs={"attr": "ds"}) actual8 = ds.where(ds > 0) assert actual8.attrs == ds.attrs assert actual8.a.name == "a" assert actual8.a.attrs == ds.a.attrs # lambda ds = Dataset({"a": ("x", range(5))}) expected9 = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])}) actual9 = ds.where(lambda x: x > 1) assert_identical(expected9, actual9) def test_where_other(self) -> None: ds = Dataset({"a": ("x", range(5))}, {"x": range(5)}) expected = Dataset({"a": ("x", [-1, -1, 2, 3, 4])}, {"x": range(5)}) actual = ds.where(ds > 1, -1) assert_equal(expected, actual) assert actual.a.dtype == int actual = ds.where(lambda x: x > 1, -1) assert_equal(expected, actual) actual = ds.where(ds > 1, other=-1, drop=True) expected_nodrop = ds.where(ds > 1, -1) _, expected = xr.align(actual, expected_nodrop, join="left") assert_equal(actual, expected) assert actual.a.dtype == int with pytest.raises(ValueError, match=r"cannot align .* are not equal"): ds.where(ds > 1, ds.isel(x=slice(3))) with pytest.raises(ValueError, match=r"exact match required"): ds.where(ds > 1, ds.assign(b=2)) def test_where_drop(self) -> None: # if drop=True # 1d # data array case array = DataArray(range(5), coords=[range(5)], dims=["x"]) expected1 = DataArray(range(5)[2:], coords=[range(5)[2:]], dims=["x"]) actual1 = array.where(array > 1, drop=True) assert_identical(expected1, actual1) # dataset case ds = Dataset({"a": array}) expected2 = Dataset({"a": expected1}) actual2 = ds.where(ds > 1, drop=True) assert_identical(expected2, actual2) actual3 = ds.where(ds.a > 1, drop=True) assert_identical(expected2, actual3) with pytest.raises(TypeError, match=r"must be a"): ds.where(np.arange(5) > 1, drop=True) # 1d with odd coordinates array = DataArray( np.array([2, 7, 1, 8, 3]), coords=[np.array([3, 1, 4, 5, 9])], dims=["x"] ) expected4 = DataArray( np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], dims=["x"] ) actual4 = array.where(array > 2, drop=True) assert_identical(expected4, actual4) # 1d multiple variables ds = Dataset({"a": (("x"), [0, 1, 2, 3]), "b": (("x"), [4, 5, 6, 7])}) expected5 = Dataset( {"a": (("x"), [np.nan, 1, 2, 3]), "b": (("x"), [4, 5, 6, np.nan])} ) actual5 = ds.where((ds > 0) & (ds < 7), drop=True) assert_identical(expected5, actual5) # 2d ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])}) expected6 = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])}) actual6 = ds.where(ds > 0, drop=True) assert_identical(expected6, actual6) # 2d with odd coordinates ds = Dataset( {"a": (("x", "y"), [[0, 1], [2, 3]])}, coords={ "x": [4, 3], "y": [1, 2], "z": (["x", "y"], [[np.exp(1), np.pi], [np.pi * np.exp(1), np.pi * 3]]), }, ) expected7 = Dataset( {"a": (("x", "y"), [[3]])}, coords={"x": [3], "y": [2], "z": (["x", "y"], [[np.pi * 3]])}, ) actual7 = ds.where(ds > 2, drop=True) assert_identical(expected7, actual7) # 2d multiple variables ds = Dataset( {"a": (("x", "y"), [[0, 1], [2, 3]]), "b": (("x", "y"), [[4, 5], [6, 7]])} ) expected8 = Dataset( { "a": (("x", "y"), [[np.nan, 1], [2, 3]]), "b": (("x", "y"), [[4, 5], [6, 7]]), } ) actual8 = ds.where(ds > 0, drop=True) assert_identical(expected8, actual8) # mixed dimensions: PR#6690, Issue#6227 ds = xr.Dataset( { "a": ("x", [1, 2, 3]), "b": ("y", [2, 3, 4]), "c": (("x", "y"), np.arange(9).reshape((3, 3))), } ) expected9 = xr.Dataset( { "a": ("x", [np.nan, 3]), "b": ("y", [np.nan, 3, 4]), "c": (("x", "y"), np.arange(3.0, 9.0).reshape((2, 3))), } ) actual9 = ds.where(ds > 2, drop=True) assert actual9.sizes["x"] == 2 assert_identical(expected9, actual9) def test_where_drop_empty(self) -> None: # regression test for GH1341 array = DataArray(np.random.rand(100, 10), dims=["nCells", "nVertLevels"]) mask = DataArray(np.zeros((100,), dtype="bool"), dims="nCells") actual = array.where(mask, drop=True) expected = DataArray(np.zeros((0, 10)), dims=["nCells", "nVertLevels"]) assert_identical(expected, actual) def test_where_drop_no_indexes(self) -> None: ds = Dataset({"foo": ("x", [0.0, 1.0])}) expected = Dataset({"foo": ("x", [1.0])}) actual = ds.where(ds == 1, drop=True) assert_identical(expected, actual) def test_reduce(self) -> None: data = create_test_data() assert len(data.mean().coords) == 0 actual = data.max() expected = Dataset({k: v.max() for k, v in data.data_vars.items()}) assert_equal(expected, actual) assert_equal(data.min(dim=["dim1"]), data.min(dim="dim1")) for reduct, expected_dims in [ ("dim2", ["dim3", "time", "dim1"]), (["dim2", "time"], ["dim3", "dim1"]), (("dim2", "time"), ["dim3", "dim1"]), ((), ["dim2", "dim3", "time", "dim1"]), ]: actual_dims = list(data.min(dim=reduct).dims) assert actual_dims == expected_dims assert_equal(data.mean(dim=[]), data) with pytest.raises(ValueError): data.mean(axis=0) def test_reduce_coords(self) -> None: # regression test for GH1470 data = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"b": 4}) expected = xr.Dataset({"a": 2}, coords={"b": 4}) actual = data.mean("x") assert_identical(actual, expected) # should be consistent actual = data["a"].mean("x").to_dataset() assert_identical(actual, expected) def test_mean_uint_dtype(self) -> None: data = xr.Dataset( { "a": (("x", "y"), np.arange(6).reshape(3, 2).astype("uint")), "b": (("x",), np.array([0.1, 0.2, np.nan])), } ) actual = data.mean("x", skipna=True) expected = xr.Dataset( {"a": data["a"].mean("x"), "b": data["b"].mean("x", skipna=True)} ) assert_identical(actual, expected) def test_reduce_bad_dim(self) -> None: data = create_test_data() with pytest.raises( ValueError, match=re.escape("Dimension(s) 'bad_dim' do not exist"), ): data.mean(dim="bad_dim") def test_reduce_cumsum(self) -> None: data = xr.Dataset( {"a": 1, "b": ("x", [1, 2]), "c": (("x", "y"), [[np.nan, 3], [0, 4]])} ) assert_identical(data.fillna(0), data.cumsum("y")) expected = xr.Dataset( {"a": 1, "b": ("x", [1, 3]), "c": (("x", "y"), [[0, 3], [0, 7]])} ) assert_identical(expected, data.cumsum()) @pytest.mark.parametrize( "reduct, expected", [ ("dim1", ["dim2", "dim3", "time", "dim1"]), ("dim2", ["dim3", "time", "dim1", "dim2"]), ("dim3", ["dim2", "time", "dim1", "dim3"]), ("time", ["dim2", "dim3", "dim1"]), ], ) @pytest.mark.parametrize("func", ["cumsum", "cumprod"]) def test_reduce_cumsum_test_dims(self, reduct, expected, func) -> None: data = create_test_data() with pytest.raises( ValueError, match=re.escape("Dimension(s) 'bad_dim' do not exist"), ): getattr(data, func)(dim="bad_dim") # ensure dimensions are correct actual = getattr(data, func)(dim=reduct).dims assert list(actual) == expected def test_reduce_non_numeric(self) -> None: data1 = create_test_data(seed=44, use_extension_array=True) data2 = create_test_data(seed=44) add_vars = {"var6": ["dim1", "dim2"], "var7": ["dim1"]} for v, dims in sorted(add_vars.items()): size = tuple(data1.sizes[d] for d in dims) data = np.random.randint(0, 100, size=size).astype(np.str_) data1[v] = (dims, data, {"foo": "variable"}) # var4 and var5 are extension arrays and should be dropped assert ( "var4" not in data1.mean() and "var5" not in data1.mean() and "var6" not in data1.mean() and "var7" not in data1.mean() ) assert_equal(data1.mean(), data2.mean()) assert_equal(data1.mean(dim="dim1"), data2.mean(dim="dim1")) assert "var6" not in data1.mean(dim="dim2") and "var7" in data1.mean(dim="dim2") @pytest.mark.filterwarnings( "ignore:Once the behaviour of DataArray:DeprecationWarning" ) def test_reduce_strings(self) -> None: expected = Dataset({"x": "a"}) ds = Dataset({"x": ("y", ["a", "b"])}) ds.coords["y"] = [-10, 10] actual = ds.min() assert_identical(expected, actual) expected = Dataset({"x": "b"}) actual = ds.max() assert_identical(expected, actual) expected = Dataset({"x": 0}) actual = ds.argmin() assert_identical(expected, actual) expected = Dataset({"x": 1}) actual = ds.argmax() assert_identical(expected, actual) expected = Dataset({"x": -10}) actual = ds.idxmin() assert_identical(expected, actual) expected = Dataset({"x": 10}) actual = ds.idxmax() assert_identical(expected, actual) expected = Dataset({"x": b"a"}) ds = Dataset({"x": ("y", np.array(["a", "b"], "S1"))}) actual = ds.min() assert_identical(expected, actual) expected = Dataset({"x": "a"}) ds = Dataset({"x": ("y", np.array(["a", "b"], "U1"))}) actual = ds.min() assert_identical(expected, actual) def test_reduce_dtypes(self) -> None: # regression test for GH342 expected = Dataset({"x": 1}) actual = Dataset({"x": True}).sum() assert_identical(expected, actual) # regression test for GH505 expected = Dataset({"x": 3}) actual = Dataset({"x": ("y", np.array([1, 2], "uint16"))}).sum() assert_identical(expected, actual) expected = Dataset({"x": 1 + 1j}) actual = Dataset({"x": ("y", [1, 1j])}).sum() assert_identical(expected, actual) def test_reduce_keep_attrs(self) -> None: data = create_test_data() _attrs = {"attr1": "value1", "attr2": 2929} attrs = dict(_attrs) data.attrs = attrs # Test default behavior (keeps attrs for reduction operations) ds = data.mean() assert ds.attrs == attrs for k, v in ds.data_vars.items(): assert v.attrs == data[k].attrs # Test explicitly keeping attrs ds = data.mean(keep_attrs=True) assert ds.attrs == attrs for k, v in ds.data_vars.items(): assert v.attrs == data[k].attrs # Test explicitly dropping attrs ds = data.mean(keep_attrs=False) assert ds.attrs == {} for v in ds.data_vars.values(): assert v.attrs == {} @pytest.mark.filterwarnings( "ignore:Once the behaviour of DataArray:DeprecationWarning" ) def test_reduce_argmin(self) -> None: # regression test for #205 ds = Dataset({"a": ("x", [0, 1])}) expected = Dataset({"a": ([], 0)}) actual = ds.argmin() assert_identical(expected, actual) actual = ds.argmin("x") assert_identical(expected, actual) def test_reduce_scalars(self) -> None: ds = Dataset({"x": ("a", [2, 2]), "y": 2, "z": ("b", [2])}) expected = Dataset({"x": 0, "y": 0, "z": 0}) actual = ds.var() assert_identical(expected, actual) expected = Dataset({"x": 0, "y": 0, "z": ("b", [0])}) actual = ds.var("a") assert_identical(expected, actual) def test_reduce_only_one_axis(self) -> None: def mean_only_one_axis(x, axis): if not isinstance(axis, integer_types): raise TypeError("non-integer axis") return x.mean(axis) ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])}) expected = Dataset({"a": ("x", [2])}) actual = ds.reduce(mean_only_one_axis, "y") assert_identical(expected, actual) with pytest.raises( TypeError, match=r"missing 1 required positional argument: 'axis'" ): ds.reduce(mean_only_one_axis) def test_reduce_no_axis(self) -> None: def total_sum(x): return np.sum(x.flatten()) ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])}) expected = Dataset({"a": ((), 10)}) actual = ds.reduce(total_sum) assert_identical(expected, actual) with pytest.raises(TypeError, match=r"unexpected keyword argument 'axis'"): ds.reduce(total_sum, dim="x") def test_reduce_keepdims(self) -> None: ds = Dataset( {"a": (["x", "y"], [[0, 1, 2, 3, 4]])}, coords={ "y": [0, 1, 2, 3, 4], "x": [0], "lat": (["x", "y"], [[0, 1, 2, 3, 4]]), "c": -999.0, }, ) # Shape should match behaviour of numpy reductions with keepdims=True # Coordinates involved in the reduction should be removed actual = ds.mean(keepdims=True) expected = Dataset( {"a": (["x", "y"], np.mean(ds.a, keepdims=True).data)}, coords={"c": ds.c} ) assert_identical(expected, actual) actual = ds.mean("x", keepdims=True) expected = Dataset( {"a": (["x", "y"], np.mean(ds.a, axis=0, keepdims=True).data)}, coords={"y": ds.y, "c": ds.c}, ) assert_identical(expected, actual) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize("skipna", [True, False, None]) @pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]]) def test_quantile(self, q, skipna, compute_backend) -> None: ds = create_test_data(seed=123) ds.var1.data[0, 0] = np.nan for dim in [None, "dim1", ["dim1"]]: ds_quantile = ds.quantile(q, dim=dim, skipna=skipna) if is_scalar(q): assert "quantile" not in ds_quantile.dims else: assert "quantile" in ds_quantile.dims for var, dar in ds.data_vars.items(): assert var in ds_quantile assert_identical( ds_quantile[var], dar.quantile(q, dim=dim, skipna=skipna) ) dim = ["dim1", "dim2"] ds_quantile = ds.quantile(q, dim=dim, skipna=skipna) assert "dim3" in ds_quantile.dims assert all(d not in ds_quantile.dims for d in dim) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize("skipna", [True, False]) def test_quantile_skipna(self, skipna, compute_backend) -> None: q = 0.1 dim = "time" ds = Dataset({"a": ([dim], np.arange(0, 11))}) ds = ds.where(ds >= 1) result = ds.quantile(q=q, dim=dim, skipna=skipna) value = 1.9 if skipna else np.nan expected = Dataset({"a": value}, coords={"quantile": q}) assert_identical(result, expected) @pytest.mark.parametrize("method", ["midpoint", "lower"]) def test_quantile_method(self, method) -> None: ds = create_test_data(seed=123) q = [0.25, 0.5, 0.75] result = ds.quantile(q, method=method) assert_identical(result.var1, ds.var1.quantile(q, method=method)) assert_identical(result.var2, ds.var2.quantile(q, method=method)) assert_identical(result.var3, ds.var3.quantile(q, method=method)) @pytest.mark.filterwarnings( "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning" ) @pytest.mark.parametrize("method", ["midpoint", "lower"]) def test_quantile_interpolation_deprecated(self, method) -> None: ds = create_test_data(seed=123) q = [0.25, 0.5, 0.75] with pytest.warns( FutureWarning, match="`interpolation` argument to quantile was renamed to `method`", ): ds.quantile(q, interpolation=method) with warnings.catch_warnings(record=True): with pytest.raises(TypeError, match="interpolation and method keywords"): ds.quantile(q, method=method, interpolation=method) @requires_bottleneck def test_rank(self) -> None: ds = create_test_data(seed=1234) # only ds.var3 depends on dim3 z = ds.rank("dim3") assert ["var3"] == list(z.data_vars) # same as dataarray version x = z.var3 y = ds.var3.rank("dim3") assert_equal(x, y) # coordinates stick assert list(z.coords) == list(ds.coords) assert list(x.coords) == list(y.coords) # invalid dim with pytest.raises( ValueError, match=re.escape( "Dimension 'invalid_dim' not found in data dimensions ('dim3', 'dim1')" ), ): x.rank("invalid_dim") def test_rank_use_bottleneck(self) -> None: ds = Dataset({"a": ("x", [0, np.nan, 2]), "b": ("y", [4, 6, 3, 4])}) with xr.set_options(use_bottleneck=False): with pytest.raises(RuntimeError): ds.rank("x") def test_count(self) -> None: ds = Dataset({"x": ("a", [np.nan, 1]), "y": 0, "z": np.nan}) expected = Dataset({"x": 1, "y": 1, "z": 0}) actual = ds.count() assert_identical(expected, actual) def test_map(self) -> None: data = create_test_data() data.attrs["foo"] = "bar" # data.map keeps all attrs by default assert_identical(data.map(np.mean), data.mean()) expected = data.mean(keep_attrs=True) actual = data.map(lambda x: x.mean(keep_attrs=True), keep_attrs=True) assert_identical(expected, actual) assert_identical(data.map(lambda x: x, keep_attrs=True), data.drop_vars("time")) def scale(x, multiple=1): return multiple * x actual = data.map(scale, multiple=2) assert_equal(actual["var1"], 2 * data["var1"]) assert_identical(actual["numbers"], data["numbers"]) actual = data.map(np.asarray) expected = data.drop_vars("time") # time is not used on a data var assert_equal(expected, actual) def test_map_coords_attrs(self) -> None: ds = xr.Dataset( { "a": ( ["x", "y", "z"], np.arange(24).reshape(3, 4, 2), {"attr1": "value1"}, ), "b": ("y", np.arange(4), {"attr2": "value2"}), }, coords={ "x": ("x", np.array([-1, 0, 1]), {"attr3": "value3"}), "z": ("z", list("ab"), {"attr4": "value4"}), }, ) def func(arr): if "y" not in arr.dims: return arr # drop attrs from coords return arr.mean(dim="y").drop_attrs() expected = ds.mean(dim="y", keep_attrs=True) actual = ds.map(func, keep_attrs=True) assert_identical(actual, expected) assert actual["x"].attrs ds["x"].attrs["y"] = "x" assert ds["x"].attrs != actual["x"].attrs def test_map_non_dataarray_outputs(self) -> None: # Test that map handles non-DataArray outputs by converting them # Regression test for GH10835 ds = xr.Dataset({"foo": ("x", [1, 2, 3]), "bar": ("y", [4, 5])}) # Scalar output result = ds.map(lambda x: 1) expected = xr.Dataset({"foo": 1, "bar": 1}) assert_identical(result, expected) # Numpy array output with same shape result = ds.map(lambda x: x.values) expected = ds.copy() assert_identical(result, expected) # Mixed: some return scalars, some return arrays def mixed_func(x): if "x" in x.dims: return 42 return x result = ds.map(mixed_func) expected = xr.Dataset({"foo": 42, "bar": ("y", [4, 5])}) assert_identical(result, expected) def test_apply_pending_deprecated_map(self) -> None: data = create_test_data() data.attrs["foo"] = "bar" with pytest.warns(PendingDeprecationWarning): # data.apply keeps all attrs by default assert_identical(data.apply(np.mean), data.mean()) def make_example_math_dataset(self): variables = { "bar": ("x", np.arange(100, 400, 100)), "foo": (("x", "y"), 1.0 * np.arange(12).reshape(3, 4)), } coords = {"abc": ("x", ["a", "b", "c"]), "y": 10 * np.arange(4)} ds = Dataset(variables, coords) ds["foo"][0, 0] = np.nan return ds def test_dataset_number_math(self) -> None: ds = self.make_example_math_dataset() assert_identical(ds, +ds) assert_identical(ds, ds + 0) assert_identical(ds, 0 + ds) assert_identical(ds, ds + np.array(0)) assert_identical(ds, np.array(0) + ds) actual = ds.copy(deep=True) actual += 0 assert_identical(ds, actual) # casting nan warns @pytest.mark.filterwarnings("ignore:invalid value encountered in cast") def test_unary_ops(self) -> None: ds = self.make_example_math_dataset() assert_identical(ds.map(abs), abs(ds)) assert_identical(ds.map(lambda x: x + 4), ds + 4) for func in [ lambda x: x.isnull(), lambda x: x.round(), lambda x: x.astype(int), ]: assert_identical(ds.map(func), func(ds)) assert_identical(ds.isnull(), ~ds.notnull()) # don't actually patch these methods in with pytest.raises(AttributeError): _ = ds.item with pytest.raises(AttributeError): _ = ds.searchsorted def test_dataset_array_math(self) -> None: ds = self.make_example_math_dataset() expected = ds.map(lambda x: x - ds["foo"]) assert_identical(expected, ds - ds["foo"]) assert_identical(expected, -ds["foo"] + ds) assert_identical(expected, ds - ds["foo"].variable) assert_identical(expected, -ds["foo"].variable + ds) actual = ds.copy(deep=True) actual -= ds["foo"] assert_identical(expected, actual) expected = ds.map(lambda x: x + ds["bar"]) assert_identical(expected, ds + ds["bar"]) actual = ds.copy(deep=True) actual += ds["bar"] assert_identical(expected, actual) expected = Dataset({"bar": ds["bar"] + np.arange(3)}) assert_identical(expected, ds[["bar"]] + np.arange(3)) assert_identical(expected, np.arange(3) + ds[["bar"]]) def test_dataset_dataset_math(self) -> None: ds = self.make_example_math_dataset() assert_identical(ds, ds + 0 * ds) assert_identical(ds, ds + {"foo": 0, "bar": 0}) expected = ds.map(lambda x: 2 * x) assert_identical(expected, 2 * ds) assert_identical(expected, ds + ds) assert_identical(expected, ds + ds.data_vars) assert_identical(expected, ds + dict(ds.data_vars)) actual = ds.copy(deep=True) expected_id = id(actual) actual += ds assert_identical(expected, actual) assert expected_id == id(actual) assert_identical(ds == ds, ds.notnull()) subsampled = ds.isel(y=slice(2)) expected = 2 * subsampled assert_identical(expected, subsampled + ds) assert_identical(expected, ds + subsampled) def test_dataset_math_auto_align(self) -> None: ds = self.make_example_math_dataset() subset = ds.isel(y=[1, 3]) expected = 2 * subset actual = ds + subset assert_identical(expected, actual) actual = ds.isel(y=slice(1)) + ds.isel(y=slice(1, None)) expected = 2 * ds.drop_sel(y=ds.y) assert_equal(actual, expected) actual = ds + ds[["bar"]] expected = (2 * ds[["bar"]]).merge(ds.coords, compat="override") assert_identical(expected, actual) assert_identical(ds + Dataset(), ds.coords.to_dataset()) assert_identical(Dataset() + Dataset(), Dataset()) ds2 = Dataset(coords={"bar": 42}) assert_identical(ds + ds2, ds.coords.merge(ds2)) # maybe unary arithmetic with empty datasets should raise instead? assert_identical(Dataset() + 1, Dataset()) actual = ds.copy(deep=True) other = ds.isel(y=slice(2)) actual += other expected = ds + other.reindex_like(ds) assert_identical(expected, actual) def test_dataset_math_errors(self) -> None: ds = self.make_example_math_dataset() with pytest.raises(TypeError): ds["foo"] += ds with pytest.raises(TypeError): ds["foo"].variable += ds with pytest.raises(ValueError, match=r"must have the same"): ds += ds[["bar"]] # verify we can rollback in-place operations if something goes wrong # nb. inplace datetime64 math actually will work with an integer array # but not floats thanks to numpy's inconsistent handling other = DataArray(np.datetime64("2000-01-01"), coords={"c": 2}) actual = ds.copy(deep=True) with pytest.raises(TypeError): actual += other assert_identical(actual, ds) def test_dataset_transpose(self) -> None: ds = Dataset( { "a": (("x", "y"), np.random.randn(3, 4)), "b": (("y", "x"), np.random.randn(4, 3)), }, coords={ "x": range(3), "y": range(4), "xy": (("x", "y"), np.random.randn(3, 4)), }, ) actual = ds.transpose() expected = Dataset( {"a": (("y", "x"), ds.a.values.T), "b": (("x", "y"), ds.b.values.T)}, coords={ "x": ds.x.values, "y": ds.y.values, "xy": (("y", "x"), ds.xy.values.T), }, ) assert_identical(expected, actual) actual = ds.transpose(...) expected = ds assert_identical(expected, actual) actual = ds.transpose("x", "y") expected = ds.map(lambda x: x.transpose("x", "y", transpose_coords=True)) assert_identical(expected, actual) ds = create_test_data() actual = ds.transpose() for k in ds.variables: assert actual[k].dims[::-1] == ds[k].dims new_order = ("dim2", "dim3", "dim1", "time") actual = ds.transpose(*new_order) for k in ds.variables: expected_dims = tuple(d for d in new_order if d in ds[k].dims) assert actual[k].dims == expected_dims # same as above but with ellipsis new_order = ("dim2", "dim3", "dim1", "time") actual = ds.transpose("dim2", "dim3", ...) for k in ds.variables: expected_dims = tuple(d for d in new_order if d in ds[k].dims) assert actual[k].dims == expected_dims # test missing dimension, raise error with pytest.raises(ValueError): ds.transpose(..., "not_a_dim") # test missing dimension, ignore error actual = ds.transpose(..., "not_a_dim", missing_dims="ignore") expected_ell = ds.transpose(...) assert_identical(expected_ell, actual) # test missing dimension, raise warning with pytest.warns(UserWarning): actual = ds.transpose(..., "not_a_dim", missing_dims="warn") assert_identical(expected_ell, actual) assert "T" not in dir(ds) def test_dataset_ellipsis_transpose_different_ordered_vars(self) -> None: # https://github.com/pydata/xarray/issues/1081#issuecomment-544350457 ds = Dataset( dict( a=(("w", "x", "y", "z"), np.ones((2, 3, 4, 5))), b=(("x", "w", "y", "z"), np.zeros((3, 2, 4, 5))), ) ) result = ds.transpose(..., "z", "y") assert list(result["a"].dims) == list("wxzy") assert list(result["b"].dims) == list("xwzy") def test_dataset_retains_period_index_on_transpose(self) -> None: ds = create_test_data() ds["time"] = pd.period_range("2000-01-01", periods=20) transposed = ds.transpose() assert isinstance(transposed.time.to_index(), pd.PeriodIndex) def test_dataset_diff_n1_simple(self) -> None: ds = Dataset({"foo": ("x", [5, 5, 6, 6])}) actual = ds.diff("x") expected = Dataset({"foo": ("x", [0, 1, 0])}) assert_equal(expected, actual) def test_dataset_diff_n1_label(self) -> None: ds = Dataset({"foo": ("x", [5, 5, 6, 6])}, {"x": [0, 1, 2, 3]}) actual = ds.diff("x", label="lower") expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [0, 1, 2]}) assert_equal(expected, actual) actual = ds.diff("x", label="upper") expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [1, 2, 3]}) assert_equal(expected, actual) def test_dataset_diff_n1(self) -> None: ds = create_test_data(seed=1) actual = ds.diff("dim2") expected_dict = {} expected_dict["var1"] = DataArray( np.diff(ds["var1"].values, axis=1), {"dim2": ds["dim2"].values[1:]}, ["dim1", "dim2"], ) expected_dict["var2"] = DataArray( np.diff(ds["var2"].values, axis=1), {"dim2": ds["dim2"].values[1:]}, ["dim1", "dim2"], ) expected_dict["var3"] = ds["var3"] expected = Dataset(expected_dict, coords={"time": ds["time"].values}) expected.coords["numbers"] = ("dim3", ds["numbers"].values) assert_equal(expected, actual) def test_dataset_diff_n2(self) -> None: ds = create_test_data(seed=1) actual = ds.diff("dim2", n=2) expected_dict = {} expected_dict["var1"] = DataArray( np.diff(ds["var1"].values, axis=1, n=2), {"dim2": ds["dim2"].values[2:]}, ["dim1", "dim2"], ) expected_dict["var2"] = DataArray( np.diff(ds["var2"].values, axis=1, n=2), {"dim2": ds["dim2"].values[2:]}, ["dim1", "dim2"], ) expected_dict["var3"] = ds["var3"] expected = Dataset(expected_dict, coords={"time": ds["time"].values}) expected.coords["numbers"] = ("dim3", ds["numbers"].values) assert_equal(expected, actual) def test_dataset_diff_exception_n_neg(self) -> None: ds = create_test_data(seed=1) with pytest.raises(ValueError, match=r"must be non-negative"): ds.diff("dim2", n=-1) def test_dataset_diff_exception_label_str(self) -> None: ds = create_test_data(seed=1) with pytest.raises(ValueError, match=r"'label' argument has to"): ds.diff("dim2", label="raise_me") # type: ignore[arg-type] @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": -10}]) def test_shift(self, fill_value) -> None: coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]} attrs = {"meta": "data"} ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs) actual = ds.shift(x=1, fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value = np.nan elif isinstance(fill_value, dict): fill_value = fill_value.get("foo", np.nan) expected = Dataset({"foo": ("x", [fill_value, 1, 2])}, coords, attrs) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"dimensions"): ds.shift(foo=123) def test_roll_coords(self) -> None: coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]} attrs = {"meta": "data"} ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs) actual = ds.roll(x=1, roll_coords=True) ex_coords = {"bar": ("x", list("cab")), "x": [2, -4, 3]} expected = Dataset({"foo": ("x", [3, 1, 2])}, ex_coords, attrs) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"dimensions"): ds.roll(foo=123, roll_coords=True) def test_roll_no_coords(self) -> None: coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]} attrs = {"meta": "data"} ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs) actual = ds.roll(x=1) expected = Dataset({"foo": ("x", [3, 1, 2])}, coords, attrs) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"dimensions"): ds.roll(abc=321) def test_roll_multidim(self) -> None: # regression test for 2445 arr = xr.DataArray( [[1, 2, 3], [4, 5, 6]], coords={"x": range(3), "y": range(2)}, dims=("y", "x"), ) actual = arr.roll(x=1, roll_coords=True) expected = xr.DataArray( [[3, 1, 2], [6, 4, 5]], coords=[("y", [0, 1]), ("x", [2, 0, 1])] ) assert_identical(expected, actual) def test_real_and_imag(self) -> None: attrs = {"foo": "bar"} ds = Dataset({"x": ((), 1 + 2j, attrs)}, attrs=attrs) expected_re = Dataset({"x": ((), 1, attrs)}, attrs=attrs) assert_identical(ds.real, expected_re) expected_im = Dataset({"x": ((), 2, attrs)}, attrs=attrs) assert_identical(ds.imag, expected_im) def test_setattr_raises(self) -> None: ds = Dataset({}, coords={"scalar": 1}, attrs={"foo": "bar"}) with pytest.raises(AttributeError, match=r"cannot set attr"): ds.scalar = 2 with pytest.raises(AttributeError, match=r"cannot set attr"): ds.foo = 2 with pytest.raises(AttributeError, match=r"cannot set attr"): ds.other = 2 def test_filter_by_attrs(self) -> None: precip = dict(standard_name="convective_precipitation_flux") temp0 = dict(standard_name="air_potential_temperature", height="0 m") temp10 = dict(standard_name="air_potential_temperature", height="10 m") ds = Dataset( { "temperature_0": (["t"], [0], temp0), "temperature_10": (["t"], [0], temp10), "precipitation": (["t"], [0], precip), }, coords={"time": (["t"], [0], dict(axis="T", long_name="time_in_seconds"))}, ) # Test return empty Dataset. ds.filter_by_attrs(standard_name="invalid_standard_name") new_ds = ds.filter_by_attrs(standard_name="invalid_standard_name") assert not bool(new_ds.data_vars) # Test return one DataArray. new_ds = ds.filter_by_attrs(standard_name="convective_precipitation_flux") assert new_ds["precipitation"].standard_name == "convective_precipitation_flux" assert_equal(new_ds["precipitation"], ds["precipitation"]) # Test filter coordinates new_ds = ds.filter_by_attrs(long_name="time_in_seconds") assert new_ds["time"].long_name == "time_in_seconds" assert not bool(new_ds.data_vars) # Test return more than one DataArray. new_ds = ds.filter_by_attrs(standard_name="air_potential_temperature") assert len(new_ds.data_vars) == 2 for var in new_ds.data_vars: assert new_ds[var].standard_name == "air_potential_temperature" # Test callable. new_ds = ds.filter_by_attrs(height=lambda v: v is not None) assert len(new_ds.data_vars) == 2 for var in new_ds.data_vars: assert new_ds[var].standard_name == "air_potential_temperature" new_ds = ds.filter_by_attrs(height="10 m") assert len(new_ds.data_vars) == 1 for var in new_ds.data_vars: assert new_ds[var].height == "10 m" # Test return empty Dataset due to conflicting filters new_ds = ds.filter_by_attrs( standard_name="convective_precipitation_flux", height="0 m" ) assert not bool(new_ds.data_vars) # Test return one DataArray with two filter conditions new_ds = ds.filter_by_attrs( standard_name="air_potential_temperature", height="0 m" ) for var in new_ds.data_vars: assert new_ds[var].standard_name == "air_potential_temperature" assert new_ds[var].height == "0 m" assert new_ds[var].height != "10 m" # Test return empty Dataset due to conflicting callables new_ds = ds.filter_by_attrs( standard_name=lambda v: False, height=lambda v: True ) assert not bool(new_ds.data_vars) def test_binary_op_propagate_indexes(self) -> None: ds = Dataset( {"d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]})} ) expected = ds.xindexes["x"] actual = (ds * 2).xindexes["x"] assert expected is actual def test_binary_op_join_setting(self) -> None: # arithmetic_join applies to data array coordinates missing_2 = xr.Dataset({"x": [0, 1]}) missing_0 = xr.Dataset({"x": [1, 2]}) with xr.set_options(arithmetic_join="outer"): actual = missing_2 + missing_0 expected = xr.Dataset({"x": [0, 1, 2]}) assert_equal(actual, expected) # arithmetic join also applies to data_vars ds1 = xr.Dataset({"foo": 1, "bar": 2}) ds2 = xr.Dataset({"bar": 2, "baz": 3}) expected = xr.Dataset({"bar": 4}) # default is inner joining actual = ds1 + ds2 assert_equal(actual, expected) with xr.set_options(arithmetic_join="outer"): expected = xr.Dataset({"foo": np.nan, "bar": 4, "baz": np.nan}) actual = ds1 + ds2 assert_equal(actual, expected) with xr.set_options(arithmetic_join="left"): expected = xr.Dataset({"foo": np.nan, "bar": 4}) actual = ds1 + ds2 assert_equal(actual, expected) with xr.set_options(arithmetic_join="right"): expected = xr.Dataset({"bar": 4, "baz": np.nan}) actual = ds1 + ds2 assert_equal(actual, expected) @pytest.mark.parametrize( ["keep_attrs", "expected"], ( pytest.param(False, {}, id="False"), pytest.param( True, {"foo": "a", "bar": "b", "baz": "c"}, id="True" ), # drop_conflicts combines non-conflicting attrs ), ) def test_binary_ops_keep_attrs(self, keep_attrs, expected) -> None: ds1 = xr.Dataset({"a": 1}, attrs={"foo": "a", "bar": "b"}) ds2 = xr.Dataset({"a": 1}, attrs={"foo": "a", "baz": "c"}) with xr.set_options(keep_attrs=keep_attrs): ds_result = ds1 + ds2 assert ds_result.attrs == expected def test_binary_ops_attrs_drop_conflicts(self) -> None: # Test that binary operations combine attrs with drop_conflicts behavior attrs1 = {"units": "meters", "long_name": "distance", "source": "sensor_a"} attrs2 = {"units": "feet", "resolution": "high", "source": "sensor_b"} ds1 = xr.Dataset({"a": 1}, attrs=attrs1) ds2 = xr.Dataset({"a": 2}, attrs=attrs2) # With keep_attrs=True (default), should combine attrs dropping conflicts result = ds1 + ds2 # "units" and "source" conflict, so they're dropped # "long_name" only in ds1, "resolution" only in ds2, so they're kept assert result.attrs == {"long_name": "distance", "resolution": "high"} # Test with identical values for some attrs attrs3 = {"units": "meters", "type": "data", "source": "sensor_c"} ds3 = xr.Dataset({"a": 3}, attrs=attrs3) result2 = ds1 + ds3 # "units" has same value, so kept; "source" conflicts, so dropped # "long_name" from ds1, "type" from ds3 assert result2.attrs == { "units": "meters", "long_name": "distance", "type": "data", } # With keep_attrs=False, attrs should be empty with xr.set_options(keep_attrs=False): result3 = ds1 + ds2 assert result3.attrs == {} def test_full_like(self) -> None: # For more thorough tests, see test_variable.py # Note: testing data_vars with mismatched dtypes ds = Dataset( { "d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]}), "d2": DataArray([1.1, 2.2, 3.3], dims=["y"]), }, attrs={"foo": "bar"}, ) actual = full_like(ds, 2) expected = ds.copy(deep=True) # https://github.com/python/mypy/issues/3004 expected["d1"].values = [2, 2, 2] # type: ignore[assignment,unused-ignore] expected["d2"].values = [2.0, 2.0, 2.0] # type: ignore[assignment,unused-ignore] assert expected["d1"].dtype == int assert expected["d2"].dtype == float assert_identical(expected, actual) # override dtype actual = full_like(ds, fill_value=True, dtype=bool) expected = ds.copy(deep=True) expected["d1"].values = [True, True, True] # type: ignore[assignment,unused-ignore] expected["d2"].values = [True, True, True] # type: ignore[assignment,unused-ignore] assert expected["d1"].dtype == bool assert expected["d2"].dtype == bool assert_identical(expected, actual) # with multiple fill values actual = full_like(ds, {"d1": 1, "d2": 2.3}) expected = ds.assign(d1=("x", [1, 1, 1]), d2=("y", [2.3, 2.3, 2.3])) assert expected["d1"].dtype == int assert expected["d2"].dtype == float assert_identical(expected, actual) # override multiple dtypes actual = full_like(ds, fill_value={"d1": 1, "d2": 2.3}, dtype={"d1": bool}) expected = ds.assign(d1=("x", [True, True, True]), d2=("y", [2.3, 2.3, 2.3])) assert expected["d1"].dtype == bool assert expected["d2"].dtype == float assert_identical(expected, actual) def test_combine_first(self) -> None: dsx0 = DataArray([0, 0], [("x", ["a", "b"])]).to_dataset(name="dsx0") dsx1 = DataArray([1, 1], [("x", ["b", "c"])]).to_dataset(name="dsx1") actual = dsx0.combine_first(dsx1) expected = Dataset( {"dsx0": ("x", [0, 0, np.nan]), "dsx1": ("x", [np.nan, 1, 1])}, coords={"x": ["a", "b", "c"]}, ) assert_equal(actual, expected) assert_equal(actual, xr.merge([dsx0, dsx1], join="outer")) # works just like xr.merge([self, other]) dsy2 = DataArray([2, 2, 2], [("x", ["b", "c", "d"])]).to_dataset(name="dsy2") actual = dsx0.combine_first(dsy2) expected = xr.merge([dsy2, dsx0], join="outer") assert_equal(actual, expected) def test_sortby(self) -> None: ds = Dataset( { "A": DataArray( [[1, 2], [3, 4], [5, 6]], [("x", ["c", "b", "a"]), ("y", [1, 0])] ), "B": DataArray([[5, 6], [7, 8], [9, 10]], dims=["x", "y"]), } ) sorted1d = Dataset( { "A": DataArray( [[5, 6], [3, 4], [1, 2]], [("x", ["a", "b", "c"]), ("y", [1, 0])] ), "B": DataArray([[9, 10], [7, 8], [5, 6]], dims=["x", "y"]), } ) sorted2d = Dataset( { "A": DataArray( [[6, 5], [4, 3], [2, 1]], [("x", ["a", "b", "c"]), ("y", [0, 1])] ), "B": DataArray([[10, 9], [8, 7], [6, 5]], dims=["x", "y"]), } ) expected = sorted1d dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])]) actual = ds.sortby(dax) assert_equal(actual, expected) # test descending order sort actual = ds.sortby(dax, ascending=False) assert_equal(actual, ds) # test alignment (fills in nan for 'c') dax_short = DataArray([98, 97], [("x", ["b", "a"])]) actual = ds.sortby(dax_short) assert_equal(actual, expected) # test 1-D lexsort # dax0 is sorted first to give indices of [1, 2, 0] # and then dax1 would be used to move index 2 ahead of 1 dax0 = DataArray([100, 95, 95], [("x", ["c", "b", "a"])]) dax1 = DataArray([0, 1, 0], [("x", ["c", "b", "a"])]) actual = ds.sortby([dax0, dax1]) # lexsort underneath gives [2, 1, 0] assert_equal(actual, expected) expected = sorted2d # test multi-dim sort by 1D dataarray values day = DataArray([90, 80], [("y", [1, 0])]) actual = ds.sortby([day, dax]) assert_equal(actual, expected) # test exception-raising with pytest.raises(KeyError): actual = ds.sortby("z") with pytest.raises(ValueError) as excinfo: actual = ds.sortby(ds["A"]) assert "DataArray is not 1-D" in str(excinfo.value) expected = sorted1d actual = ds.sortby("x") assert_equal(actual, expected) # test pandas.MultiIndex indices = (("b", 1), ("b", 0), ("a", 1), ("a", 0)) midx = pd.MultiIndex.from_tuples(indices, names=["one", "two"]) ds_midx = Dataset( { "A": DataArray( [[1, 2], [3, 4], [5, 6], [7, 8]], [("x", midx), ("y", [1, 0])] ), "B": DataArray([[5, 6], [7, 8], [9, 10], [11, 12]], dims=["x", "y"]), } ) actual = ds_midx.sortby("x") midx_reversed = pd.MultiIndex.from_tuples( tuple(reversed(indices)), names=["one", "two"] ) expected = Dataset( { "A": DataArray( [[7, 8], [5, 6], [3, 4], [1, 2]], [("x", midx_reversed), ("y", [1, 0])], ), "B": DataArray([[11, 12], [9, 10], [7, 8], [5, 6]], dims=["x", "y"]), } ) assert_equal(actual, expected) # multi-dim sort by coordinate objects expected = sorted2d actual = ds.sortby(["x", "y"]) assert_equal(actual, expected) # test descending order sort actual = ds.sortby(["x", "y"], ascending=False) assert_equal(actual, ds) def test_attribute_access(self) -> None: ds = create_test_data(seed=1) for key in ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"]: assert_equal(ds[key], getattr(ds, key)) assert key in dir(ds) for key in ["dim3", "dim1", "numbers"]: assert_equal(ds["var3"][key], getattr(ds.var3, key)) assert key in dir(ds["var3"]) # attrs assert ds["var3"].attrs["foo"] == ds.var3.foo assert "foo" in dir(ds["var3"]) def test_ipython_key_completion(self) -> None: ds = create_test_data(seed=1) actual = ds._ipython_key_completions_() expected = ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"] for item in actual: ds[item] # should not raise assert sorted(actual) == sorted(expected) # for dataarray actual = ds["var3"]._ipython_key_completions_() expected = ["dim3", "dim1", "numbers"] for item in actual: ds["var3"][item] # should not raise assert sorted(actual) == sorted(expected) # MultiIndex ds_midx = ds.stack(dim12=["dim2", "dim3"]) actual = ds_midx._ipython_key_completions_() expected = [ "var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers", "dim12", ] for item in actual: ds_midx[item] # should not raise assert sorted(actual) == sorted(expected) # coords actual = ds.coords._ipython_key_completions_() expected = ["time", "dim1", "dim2", "dim3", "numbers"] for item in actual: ds.coords[item] # should not raise assert sorted(actual) == sorted(expected) actual = ds["var3"].coords._ipython_key_completions_() expected = ["dim1", "dim3", "numbers"] for item in actual: ds["var3"].coords[item] # should not raise assert sorted(actual) == sorted(expected) coords = Coordinates(ds.coords) actual = coords._ipython_key_completions_() expected = ["time", "dim2", "dim3", "numbers"] for item in actual: coords[item] # should not raise assert sorted(actual) == sorted(expected) # data_vars actual = ds.data_vars._ipython_key_completions_() expected = ["var1", "var2", "var3", "dim1"] for item in actual: ds.data_vars[item] # should not raise assert sorted(actual) == sorted(expected) def test_polyfit_output(self) -> None: ds = create_test_data(seed=1) out = ds.polyfit("dim2", 2, full=False) assert "var1_polyfit_coefficients" in out out = ds.polyfit("dim1", 2, full=True) assert "var1_polyfit_coefficients" in out assert "dim1_matrix_rank" in out out = ds.polyfit("time", 2) assert len(out.data_vars) == 0 def test_polyfit_weighted(self) -> None: ds = create_test_data(seed=1) ds = ds.broadcast_like(ds) # test more than 2 dimensions (issue #9972) ds_copy = ds.copy(deep=True) expected = ds.polyfit("dim2", 2) actual = ds.polyfit("dim2", 2, w=np.ones(ds.sizes["dim2"])) xr.testing.assert_identical(expected, actual) # Make sure weighted polyfit does not change the original object (issue #5644) xr.testing.assert_identical(ds, ds_copy) def test_polyfit_coord(self) -> None: # Make sure polyfit works when given a non-dimension coordinate. ds = create_test_data(seed=1) out = ds.polyfit("numbers", 2, full=False) assert "var3_polyfit_coefficients" in out assert "dim1" in out.dims assert "dim2" not in out assert "dim3" not in out def test_polyfit_coord_output(self) -> None: da = xr.DataArray( [1, 3, 2], dims=["x"], coords=dict(x=["a", "b", "c"], y=("x", [0, 1, 2])) ) out = da.polyfit("y", deg=1)["polyfit_coefficients"] assert out.sel(degree=0).item() == pytest.approx(1.5) assert out.sel(degree=1).item() == pytest.approx(0.5) def test_polyfit_warnings(self) -> None: ds = create_test_data(seed=1) with warnings.catch_warnings(record=True) as ws: ds.var1.polyfit("dim2", 10, full=False) assert len(ws) == 1 assert ws[0].category == RankWarning ds.var1.polyfit("dim2", 10, full=True) assert len(ws) == 1 def test_polyfit_polyval(self) -> None: da = xr.DataArray( np.arange(1, 10).astype(np.float64), dims=["x"], coords=dict(x=np.arange(9)) ) out = da.polyfit("x", 3, full=False) da_fitval = xr.polyval(da.x, out.polyfit_coefficients) # polyval introduces very small errors (1e-16 here) xr.testing.assert_allclose(da_fitval, da) da = da.assign_coords(x=xr.date_range("2001-01-01", periods=9, freq="YS")) out = da.polyfit("x", 3, full=False) da_fitval = xr.polyval(da.x, out.polyfit_coefficients) xr.testing.assert_allclose(da_fitval, da, rtol=1e-3) @requires_cftime def test_polyfit_polyval_cftime(self) -> None: da = xr.DataArray( np.arange(1, 10).astype(np.float64), dims=["x"], coords=dict( x=xr.date_range("2001-01-01", periods=9, freq="YS", calendar="noleap") ), ) out = da.polyfit("x", 3, full=False) da_fitval = xr.polyval(da.x, out.polyfit_coefficients) np.testing.assert_allclose(da_fitval, da) @staticmethod def _test_data_var_interior( original_data_var, padded_data_var, padded_dim_name, expected_pad_values ): np.testing.assert_equal( np.unique(padded_data_var.isel({padded_dim_name: [0, -1]})), expected_pad_values, ) np.testing.assert_array_equal( padded_data_var.isel({padded_dim_name: slice(1, -1)}), original_data_var ) @pytest.mark.parametrize("padded_dim_name", ["dim1", "dim2", "dim3", "time"]) @pytest.mark.parametrize( ["constant_values"], [ pytest.param(None, id="default"), pytest.param(42, id="scalar"), pytest.param((42, 43), id="tuple"), pytest.param({"dim1": 42, "dim2": 43}, id="per dim scalar"), pytest.param({"dim1": (42, 43), "dim2": (43, 44)}, id="per dim tuple"), pytest.param({"var1": 42, "var2": (42, 43)}, id="per var"), pytest.param({"var1": 42, "dim1": (42, 43)}, id="mixed"), ], ) def test_pad(self, padded_dim_name, constant_values) -> None: ds = create_test_data(seed=1) padded = ds.pad({padded_dim_name: (1, 1)}, constant_values=constant_values) # test padded dim values and size for ds_dim_name, ds_dim in ds.sizes.items(): if ds_dim_name == padded_dim_name: np.testing.assert_equal(padded.sizes[ds_dim_name], ds_dim + 2) if ds_dim_name in padded.coords: assert padded[ds_dim_name][[0, -1]].isnull().all() else: np.testing.assert_equal(padded.sizes[ds_dim_name], ds_dim) # check if coord "numbers" with dimension dim3 is padded correctly if padded_dim_name == "dim3": assert padded["numbers"][[0, -1]].isnull().all() # twarning: passes but dtype changes from int to float np.testing.assert_array_equal(padded["numbers"][1:-1], ds["numbers"]) # test if data_vars are paded with correct values for data_var_name, data_var in padded.data_vars.items(): if padded_dim_name in data_var.dims: if utils.is_dict_like(constant_values): if ( expected := constant_values.get(data_var_name, None) ) is not None or ( expected := constant_values.get(padded_dim_name, None) ) is not None: self._test_data_var_interior( ds[data_var_name], data_var, padded_dim_name, expected ) else: self._test_data_var_interior( ds[data_var_name], data_var, padded_dim_name, 0 ) elif constant_values: self._test_data_var_interior( ds[data_var_name], data_var, padded_dim_name, constant_values ) else: self._test_data_var_interior( ds[data_var_name], data_var, padded_dim_name, np.nan ) else: assert_array_equal(data_var, ds[data_var_name]) @pytest.mark.parametrize( ["keep_attrs", "attrs", "expected"], [ pytest.param(None, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="default"), pytest.param(False, {"a": 1, "b": 2}, {}, id="False"), pytest.param(True, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="True"), ], ) def test_pad_keep_attrs(self, keep_attrs, attrs, expected) -> None: ds = xr.Dataset( {"a": ("x", [1, 2], attrs), "b": ("y", [1, 2], attrs)}, coords={"c": ("x", [-1, 1], attrs), "d": ("y", [-1, 1], attrs)}, attrs=attrs, ) expected = xr.Dataset( {"a": ("x", [0, 1, 2, 0], expected), "b": ("y", [1, 2], attrs)}, coords={ "c": ("x", [np.nan, -1, 1, np.nan], expected), "d": ("y", [-1, 1], attrs), }, attrs=expected, ) keep_attrs_ = "default" if keep_attrs is None else keep_attrs with set_options(keep_attrs=keep_attrs_): actual = ds.pad({"x": (1, 1)}, mode="constant", constant_values=0) xr.testing.assert_identical(actual, expected) actual = ds.pad( {"x": (1, 1)}, mode="constant", constant_values=0, keep_attrs=keep_attrs ) xr.testing.assert_identical(actual, expected) def test_astype_attrs(self) -> None: data = create_test_data(seed=123) data.attrs["foo"] = "bar" assert data.attrs == data.astype(float).attrs assert data.var1.attrs == data.astype(float).var1.attrs assert not data.astype(float, keep_attrs=False).attrs assert not data.astype(float, keep_attrs=False).var1.attrs @pytest.mark.parametrize("parser", ["pandas", "python"]) @pytest.mark.parametrize( "engine", ["python", None, pytest.param("numexpr", marks=[requires_numexpr])] ) @pytest.mark.parametrize( "backend", ["numpy", pytest.param("dask", marks=[requires_dask])] ) def test_query(self, backend, engine, parser) -> None: """Test querying a dataset.""" # setup test data np.random.seed(42) a = np.arange(0, 10, 1) b = np.random.randint(0, 100, size=10) c = np.linspace(0, 1, 20) d = np.random.choice(["foo", "bar", "baz"], size=30, replace=True).astype( object ) e = np.arange(0, 10 * 20).reshape(10, 20) f = np.random.normal(0, 1, size=(10, 20, 30)) if backend == "numpy": ds = Dataset( { "a": ("x", a), "b": ("x", b), "c": ("y", c), "d": ("z", d), "e": (("x", "y"), e), "f": (("x", "y", "z"), f), }, coords={ "a2": ("x", a), "b2": ("x", b), "c2": ("y", c), "d2": ("z", d), "e2": (("x", "y"), e), "f2": (("x", "y", "z"), f), }, ) elif backend == "dask": ds = Dataset( { "a": ("x", da.from_array(a, chunks=3)), "b": ("x", da.from_array(b, chunks=3)), "c": ("y", da.from_array(c, chunks=7)), "d": ("z", da.from_array(d, chunks=12)), "e": (("x", "y"), da.from_array(e, chunks=(3, 7))), "f": (("x", "y", "z"), da.from_array(f, chunks=(3, 7, 12))), }, coords={ "a2": ("x", a), "b2": ("x", b), "c2": ("y", c), "d2": ("z", d), "e2": (("x", "y"), e), "f2": (("x", "y", "z"), f), }, ) # query single dim, single variable with raise_if_dask_computes(): actual = ds.query(x="a2 > 5", engine=engine, parser=parser) expect = ds.isel(x=(a > 5)) assert_identical(expect, actual) # query single dim, single variable, via dict with raise_if_dask_computes(): actual = ds.query(dict(x="a2 > 5"), engine=engine, parser=parser) expect = ds.isel(dict(x=(a > 5))) assert_identical(expect, actual) # query single dim, single variable with raise_if_dask_computes(): actual = ds.query(x="b2 > 50", engine=engine, parser=parser) expect = ds.isel(x=(b > 50)) assert_identical(expect, actual) # query single dim, single variable with raise_if_dask_computes(): actual = ds.query(y="c2 < .5", engine=engine, parser=parser) expect = ds.isel(y=(c < 0.5)) assert_identical(expect, actual) # query single dim, single string variable if parser == "pandas": # N.B., this query currently only works with the pandas parser # xref https://github.com/pandas-dev/pandas/issues/40436 with raise_if_dask_computes(): actual = ds.query(z='d2 == "bar"', engine=engine, parser=parser) expect = ds.isel(z=(d == "bar")) assert_identical(expect, actual) # query single dim, multiple variables with raise_if_dask_computes(): actual = ds.query(x="(a2 > 5) & (b2 > 50)", engine=engine, parser=parser) expect = ds.isel(x=((a > 5) & (b > 50))) assert_identical(expect, actual) # query single dim, multiple variables with computation with raise_if_dask_computes(): actual = ds.query(x="(a2 * b2) > 250", engine=engine, parser=parser) expect = ds.isel(x=(a * b) > 250) assert_identical(expect, actual) # check pandas query syntax is supported if parser == "pandas": with raise_if_dask_computes(): actual = ds.query( x="(a2 > 5) and (b2 > 50)", engine=engine, parser=parser ) expect = ds.isel(x=((a > 5) & (b > 50))) assert_identical(expect, actual) # query multiple dims via kwargs with raise_if_dask_computes(): actual = ds.query(x="a2 > 5", y="c2 < .5", engine=engine, parser=parser) expect = ds.isel(x=(a > 5), y=(c < 0.5)) assert_identical(expect, actual) # query multiple dims via kwargs if parser == "pandas": with raise_if_dask_computes(): actual = ds.query( x="a2 > 5", y="c2 < .5", z="d2 == 'bar'", engine=engine, parser=parser, ) expect = ds.isel(x=(a > 5), y=(c < 0.5), z=(d == "bar")) assert_identical(expect, actual) # query multiple dims via dict with raise_if_dask_computes(): actual = ds.query( dict(x="a2 > 5", y="c2 < .5"), engine=engine, parser=parser ) expect = ds.isel(dict(x=(a > 5), y=(c < 0.5))) assert_identical(expect, actual) # query multiple dims via dict if parser == "pandas": with raise_if_dask_computes(): actual = ds.query( dict(x="a2 > 5", y="c2 < .5", z="d2 == 'bar'"), engine=engine, parser=parser, ) expect = ds.isel(dict(x=(a > 5), y=(c < 0.5), z=(d == "bar"))) assert_identical(expect, actual) # test error handling with pytest.raises(ValueError): ds.query("a > 5") # type: ignore[arg-type] # must be dict or kwargs with pytest.raises(ValueError): ds.query(x=(a > 5)) with pytest.raises(IndexError): ds.query(y="a > 5") # wrong length dimension with pytest.raises(IndexError): ds.query(x="c < .5") # wrong length dimension with pytest.raises(IndexError): ds.query(x="e > 100") # wrong number of dimensions with pytest.raises(UndefinedVariableError): ds.query(x="spam > 50") # name not present # pytest tests โ€” new tests should go here, rather than in the class. @pytest.mark.parametrize("parser", ["pandas", "python"]) def test_eval(ds, parser) -> None: """Currently much more minimal testing that `query` above, and much of the setup isn't used. But the risks are fairly low โ€” `query` shares much of the code, and the method is currently experimental.""" actual = ds.eval("z1 + 5", parser=parser) expect = ds["z1"] + 5 assert_identical(expect, actual) # check pandas query syntax is supported if parser == "pandas": actual = ds.eval("(z1 > 5) and (z2 > 0)", parser=parser) expect = (ds["z1"] > 5) & (ds["z2"] > 0) assert_identical(expect, actual) @pytest.mark.parametrize("test_elements", ([1, 2], np.array([1, 2]), DataArray([1, 2]))) def test_isin(test_elements, backend) -> None: expected = Dataset( data_vars={ "var1": (("dim1",), [0, 1]), "var2": (("dim1",), [1, 1]), "var3": (("dim1",), [0, 1]), } ).astype("bool") if backend == "dask": expected = expected.chunk() result = Dataset( data_vars={ "var1": (("dim1",), [0, 1]), "var2": (("dim1",), [1, 2]), "var3": (("dim1",), [0, 1]), } ).isin(test_elements) assert_equal(result, expected) def test_isin_dataset() -> None: ds = Dataset({"x": [1, 2]}) with pytest.raises(TypeError): ds.isin(ds) @pytest.mark.parametrize( "unaligned_coords", ( {"x": [2, 1, 0]}, {"x": (["x"], np.asarray([2, 1, 0]))}, {"x": (["x"], np.asarray([1, 2, 0]))}, {"x": pd.Index([2, 1, 0])}, {"x": Variable(dims="x", data=[0, 2, 1])}, {"x": IndexVariable(dims="x", data=[0, 1, 2])}, {"y": 42}, {"y": ("x", [2, 1, 0])}, {"y": ("x", np.asarray([2, 1, 0]))}, {"y": (["x"], np.asarray([2, 1, 0]))}, ), ) @pytest.mark.parametrize("coords", ({"x": ("x", [0, 1, 2])}, {"x": [0, 1, 2]})) def test_dataset_constructor_aligns_to_explicit_coords( unaligned_coords, coords ) -> None: a = xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords) expected = xr.Dataset(coords=coords) expected["a"] = a result = xr.Dataset({"a": a}, coords=coords) assert_equal(expected, result) def test_error_message_on_set_supplied() -> None: with pytest.raises(TypeError, match="has invalid type "): xr.Dataset(dict(date=[1, 2, 3], sec={4})) @pytest.mark.parametrize("unaligned_coords", ({"y": ("b", np.asarray([2, 1, 0]))},)) def test_constructor_raises_with_invalid_coords(unaligned_coords) -> None: with pytest.raises(ValueError, match="not a subset of the DataArray dimensions"): xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords) @pytest.mark.parametrize("ds", [3], indirect=True) def test_dir_expected_attrs(ds) -> None: some_expected_attrs = {"pipe", "mean", "isnull", "var1", "dim2", "numbers"} result = dir(ds) assert set(result) >= some_expected_attrs def test_dir_non_string(ds) -> None: # add a numbered key to ensure this doesn't break dir ds[5] = "foo" result = dir(ds) assert 5 not in result # GH2172 sample_data = np.random.uniform(size=[2, 2000, 10000]) x = xr.Dataset({"sample_data": (sample_data.shape, sample_data)}) x2 = x["sample_data"] dir(x2) def test_dir_unicode(ds) -> None: ds["unicode"] = "uni" result = dir(ds) assert "unicode" in result def test_raise_no_warning_for_nan_in_binary_ops() -> None: with assert_no_warnings(): _ = Dataset(data_vars={"x": ("y", [1, 2, np.nan])}) > 0 @pytest.mark.filterwarnings("error") @pytest.mark.parametrize("ds", (2,), indirect=True) def test_raise_no_warning_assert_close(ds) -> None: assert_allclose(ds, ds) @pytest.mark.parametrize("dask", [True, False]) @pytest.mark.parametrize("edge_order", [1, 2]) def test_differentiate(dask, edge_order) -> None: rs = np.random.default_rng(42) coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8] da = xr.DataArray( rs.random((8, 6)), dims=["x", "y"], coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.random((8, 6)))}, ) if dask and has_dask: da = da.chunk({"x": 4}) ds = xr.Dataset({"var": da}) # along x actual = da.differentiate("x", edge_order) expected_x = xr.DataArray( np.gradient(da, da["x"], axis=0, edge_order=edge_order), dims=da.dims, coords=da.coords, ) assert_equal(expected_x, actual) assert_equal( ds["var"].differentiate("x", edge_order=edge_order), ds.differentiate("x", edge_order=edge_order)["var"], ) # coordinate should not change assert_equal(da["x"], actual["x"]) # along y actual = da.differentiate("y", edge_order) expected_y = xr.DataArray( np.gradient(da, da["y"], axis=1, edge_order=edge_order), dims=da.dims, coords=da.coords, ) assert_equal(expected_y, actual) assert_equal(actual, ds.differentiate("y", edge_order=edge_order)["var"]) assert_equal( ds["var"].differentiate("y", edge_order=edge_order), ds.differentiate("y", edge_order=edge_order)["var"], ) with pytest.raises(ValueError): da.differentiate("x2d") @pytest.mark.parametrize("dask", [True, False]) def test_differentiate_datetime(dask) -> None: rs = np.random.default_rng(42) coord = np.array( [ "2004-07-13", "2006-01-13", "2010-08-13", "2010-09-13", "2010-10-11", "2010-12-13", "2011-02-13", "2012-08-13", ], dtype="datetime64", ) da = xr.DataArray( rs.random((8, 6)), dims=["x", "y"], coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.random((8, 6)))}, ) if dask and has_dask: da = da.chunk({"x": 4}) # along x actual = da.differentiate("x", edge_order=1, datetime_unit="D") expected_x = xr.DataArray( np.gradient( da, da["x"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1 ), dims=da.dims, coords=da.coords, ) assert_equal(expected_x, actual) actual2 = da.differentiate("x", edge_order=1, datetime_unit="h") assert np.allclose(actual, actual2 * 24) # for datetime variable actual = da["x"].differentiate("x", edge_order=1, datetime_unit="D") assert np.allclose(actual, 1.0) # with different date unit da = xr.DataArray(coord.astype("datetime64[ms]"), dims=["x"], coords={"x": coord}) actual = da.differentiate("x", edge_order=1) assert np.allclose(actual, 1.0) @requires_cftime @pytest.mark.parametrize("dask", [True, False]) def test_differentiate_cftime(dask) -> None: rs = np.random.default_rng(42) coord = xr.date_range("2000", periods=8, freq="2ME", use_cftime=True) da = xr.DataArray( rs.random((8, 6)), coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.random((8, 6)))}, dims=["time", "y"], ) if dask and has_dask: da = da.chunk({"time": 4}) actual = da.differentiate("time", edge_order=1, datetime_unit="D") expected_data = np.gradient( da, da["time"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1 ) expected = xr.DataArray(expected_data, coords=da.coords, dims=da.dims) assert_equal(expected, actual) actual2 = da.differentiate("time", edge_order=1, datetime_unit="h") assert_allclose(actual, actual2 * 24) # Test the differentiation of datetimes themselves actual = da["time"].differentiate("time", edge_order=1, datetime_unit="D") assert_allclose(actual, xr.ones_like(da["time"]).astype(float)) @pytest.mark.parametrize("dask", [True, False]) def test_integrate(dask) -> None: rs = np.random.default_rng(42) coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8] da = xr.DataArray( rs.random((8, 6)), dims=["x", "y"], coords={ "x": coord, "x2": (("x",), rs.random(8)), "z": 3, "x2d": (("x", "y"), rs.random((8, 6))), }, ) if dask and has_dask: da = da.chunk({"x": 4}) ds = xr.Dataset({"var": da}) # along x actual = da.integrate("x") # coordinate that contains x should be dropped. expected_x = xr.DataArray( trapezoid(da.compute(), da["x"], axis=0), dims=["y"], coords={k: v for k, v in da.coords.items() if "x" not in v.dims}, ) assert_allclose(expected_x, actual.compute()) assert_equal(ds["var"].integrate("x"), ds.integrate("x")["var"]) # make sure result is also a dask array (if the source is dask array) assert isinstance(actual.data, type(da.data)) # along y actual = da.integrate("y") expected_y = xr.DataArray( trapezoid(da, da["y"], axis=1), dims=["x"], coords={k: v for k, v in da.coords.items() if "y" not in v.dims}, ) assert_allclose(expected_y, actual.compute()) assert_equal(actual, ds.integrate("y")["var"]) assert_equal(ds["var"].integrate("y"), ds.integrate("y")["var"]) # along x and y actual = da.integrate(("y", "x")) assert actual.ndim == 0 with pytest.raises(ValueError): da.integrate("x2d") @requires_scipy @pytest.mark.parametrize("dask", [True, False]) def test_cumulative_integrate(dask) -> None: rs = np.random.default_rng(43) coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8] da = xr.DataArray( rs.random((8, 6)), dims=["x", "y"], coords={ "x": coord, "x2": (("x",), rs.random(8)), "z": 3, "x2d": (("x", "y"), rs.random((8, 6))), }, ) if dask and has_dask: da = da.chunk({"x": 4}) ds = xr.Dataset({"var": da}) # along x actual = da.cumulative_integrate("x") from scipy.integrate import cumulative_trapezoid expected_x = xr.DataArray( cumulative_trapezoid(da.compute(), da["x"], axis=0, initial=0.0), # type: ignore[call-overload,unused-ignore] dims=["x", "y"], coords=da.coords, ) assert_allclose(expected_x, actual.compute()) assert_equal( ds["var"].cumulative_integrate("x"), ds.cumulative_integrate("x")["var"], ) # make sure result is also a dask array (if the source is dask array) assert isinstance(actual.data, type(da.data)) # along y actual = da.cumulative_integrate("y") expected_y = xr.DataArray( cumulative_trapezoid(da, da["y"], axis=1, initial=0.0), # type: ignore[call-overload,unused-ignore] dims=["x", "y"], coords=da.coords, ) assert_allclose(expected_y, actual.compute()) assert_equal(actual, ds.cumulative_integrate("y")["var"]) assert_equal( ds["var"].cumulative_integrate("y"), ds.cumulative_integrate("y")["var"], ) # along x and y actual = da.cumulative_integrate(("y", "x")) assert actual.ndim == 2 with pytest.raises(ValueError): da.cumulative_integrate("x2d") @pytest.mark.parametrize("dask", [True, False]) @pytest.mark.parametrize("which_datetime", ["np", "cftime"]) def test_trapezoid_datetime(dask, which_datetime) -> None: rs = np.random.default_rng(42) coord: ArrayLike if which_datetime == "np": coord = np.array( [ "2004-07-13", "2006-01-13", "2010-08-13", "2010-09-13", "2010-10-11", "2010-12-13", "2011-02-13", "2012-08-13", ], dtype="datetime64", ) else: if not has_cftime: pytest.skip("Test requires cftime.") coord = xr.date_range("2000", periods=8, freq="2D", use_cftime=True) da = xr.DataArray( rs.random((8, 6)), coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.random((8, 6)))}, dims=["time", "y"], ) if dask and has_dask: da = da.chunk({"time": 4}) actual = da.integrate("time", datetime_unit="D") expected_data = trapezoid( da.compute().data, duck_array_ops.datetime_to_numeric(da["time"].data, datetime_unit="D"), axis=0, ) expected = xr.DataArray( expected_data, dims=["y"], coords={k: v for k, v in da.coords.items() if "time" not in v.dims}, ) assert_allclose(expected, actual.compute()) # make sure result is also a dask array (if the source is dask array) assert isinstance(actual.data, type(da.data)) actual2 = da.integrate("time", datetime_unit="h") assert_allclose(actual, actual2 / 24.0) def test_no_dict() -> None: d = Dataset() with pytest.raises(AttributeError): _ = d.__dict__ def test_subclass_slots() -> None: """Test that Dataset subclasses must explicitly define ``__slots__``. .. note:: As of 0.13.0, this is actually mitigated into a FutureWarning for any class defined outside of the xarray package. """ with pytest.raises(AttributeError) as e: class MyDS(Dataset): pass assert str(e.value) == "MyDS must explicitly define __slots__" def test_weakref() -> None: """Classes with __slots__ are incompatible with the weakref module unless they explicitly state __weakref__ among their slots """ from weakref import ref ds = Dataset() r = ref(ds) assert r() is ds def test_deepcopy_obj_array() -> None: x0 = Dataset(dict(foo=DataArray(np.array([object()])))) x1 = deepcopy(x0) assert x0["foo"].values[0] is not x1["foo"].values[0] def test_deepcopy_recursive() -> None: # GH:issue:7111 # direct recursion ds = xr.Dataset({"a": (["x"], [1, 2])}) ds.attrs["other"] = ds # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError ds.copy(deep=True) # indirect recursion ds2 = xr.Dataset({"b": (["y"], [3, 4])}) ds.attrs["other"] = ds2 ds2.attrs["other"] = ds # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError ds.copy(deep=True) ds2.copy(deep=True) def test_clip(ds) -> None: result = ds.clip(min=0.5) assert all((result.min(...) >= 0.5).values()) result = ds.clip(max=0.5) assert all((result.max(...) <= 0.5).values()) result = ds.clip(min=0.25, max=0.75) assert all((result.min(...) >= 0.25).values()) assert all((result.max(...) <= 0.75).values()) result = ds.clip(min=ds.mean("y"), max=ds.mean("y")) assert result.sizes == ds.sizes class TestDropDuplicates: @pytest.mark.parametrize("keep", ["first", "last", False]) def test_drop_duplicates_1d(self, keep) -> None: ds = xr.Dataset( {"a": ("time", [0, 5, 6, 7]), "b": ("time", [9, 3, 8, 2])}, coords={"time": [0, 0, 1, 2]}, ) if keep == "first": a = [0, 6, 7] b = [9, 8, 2] time = [0, 1, 2] elif keep == "last": a = [5, 6, 7] b = [3, 8, 2] time = [0, 1, 2] else: a = [6, 7] b = [8, 2] time = [1, 2] expected = xr.Dataset( {"a": ("time", a), "b": ("time", b)}, coords={"time": time} ) result = ds.drop_duplicates("time", keep=keep) assert_equal(expected, result) with pytest.raises( ValueError, match=re.escape( "Dimensions ('space',) not found in data dimensions ('time',)" ), ): ds.drop_duplicates("space", keep=keep) class TestNumpyCoercion: def test_from_numpy(self) -> None: ds = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", [4, 5, 6])}) assert_identical(ds.as_numpy(), ds) @requires_dask def test_from_dask(self) -> None: ds = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", [4, 5, 6])}) ds_chunked = ds.chunk(1) assert_identical(ds_chunked.as_numpy(), ds.compute()) @requires_pint def test_from_pint(self) -> None: from pint import Quantity arr = np.array([1, 2, 3]) ds = xr.Dataset( {"a": ("x", Quantity(arr, units="Pa"))}, coords={"lat": ("x", Quantity(arr + 3, units="m"))}, ) expected = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", arr + 3)}) assert_identical(ds.as_numpy(), expected) @requires_sparse def test_from_sparse(self) -> None: import sparse arr = np.diagflat([1, 2, 3]) sparr = sparse.COO.from_numpy(arr) ds = xr.Dataset( {"a": (["x", "y"], sparr)}, coords={"elev": (("x", "y"), sparr + 3)} ) expected = xr.Dataset( {"a": (["x", "y"], arr)}, coords={"elev": (("x", "y"), arr + 3)} ) assert_identical(ds.as_numpy(), expected) @requires_cupy def test_from_cupy(self) -> None: import cupy as cp arr = np.array([1, 2, 3]) ds = xr.Dataset( {"a": ("x", cp.array(arr))}, coords={"lat": ("x", cp.array(arr + 3))} ) expected = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", arr + 3)}) assert_identical(ds.as_numpy(), expected) @requires_dask @requires_pint def test_from_pint_wrapping_dask(self) -> None: import dask from pint import Quantity arr = np.array([1, 2, 3]) d = dask.array.from_array(arr) ds = xr.Dataset( {"a": ("x", Quantity(d, units="Pa"))}, coords={"lat": ("x", Quantity(d, units="m") * 2)}, ) result = ds.as_numpy() expected = xr.Dataset({"a": ("x", arr)}, coords={"lat": ("x", arr * 2)}) assert_identical(result, expected) def test_string_keys_typing() -> None: """Tests that string keys to `variables` are permitted by mypy""" da = xr.DataArray(np.arange(10), dims=["x"]) ds = xr.Dataset(dict(x=da)) mapping = {"y": da} ds.assign(variables=mapping) def test_transpose_error() -> None: # Transpose dataset with list as argument # Should raise error ds = xr.Dataset({"foo": (("x", "y"), [[21]]), "bar": (("x", "y"), [[12]])}) with pytest.raises( TypeError, match=re.escape( "transpose requires dim to be passed as multiple arguments. Expected `'y', 'x'`. Received `['y', 'x']` instead" ), ): ds.transpose(["y", "x"]) # type: ignore[arg-type] xarray-2025.12.0/xarray/tests/test_dataset_typing.yml000066400000000000000000000202451511464676000226170ustar00rootroot00000000000000- case: test_mypy_pipe_lambda_noarg_return_type main: | from xarray import Dataset ds = Dataset().pipe(lambda data: data) reveal_type(ds) # N: Revealed type is "xarray.core.dataset.Dataset" - case: test_mypy_pipe_lambda_posarg_return_type main: | from xarray import Dataset ds = Dataset().pipe(lambda data, arg: arg, "foo") reveal_type(ds) # N: Revealed type is "builtins.str" - case: test_mypy_pipe_lambda_chaining_return_type main: | from xarray import Dataset answer = Dataset().pipe(lambda data, arg: arg, "foo").count("o") reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_lambda_missing_arg main: | from xarray import Dataset # Call to pipe missing argument for lambda parameter `arg` ds = Dataset().pipe(lambda data, arg: data) out: | main:4: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Any, Any], Any]" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_lambda_extra_arg main: | from xarray import Dataset # Call to pipe with extra argument for lambda ds = Dataset().pipe(lambda data: data, "oops!") out: | main:4: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Any], Any]", "str" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_posarg main: | from xarray import Dataset def f(ds: Dataset, arg: int) -> Dataset: return ds # Call to pipe missing argument for function parameter `arg` ds = Dataset().pipe(f) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Dataset, int], Dataset]" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_extra_posarg main: | from xarray import Dataset def f(ds: Dataset, arg: int) -> Dataset: return ds # Call to pipe missing keyword for kwonly parameter `kwonly` ds = Dataset().pipe(f, 42, "oops!") out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Dataset, int], Dataset]", "int", "str" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_kwarg main: | from xarray import Dataset def f(ds: Dataset, arg: int, *, kwonly: int) -> Dataset: return ds # Call to pipe missing argument for kwonly parameter `kwonly` ds = Dataset().pipe(f, 42) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Dataset, int, NamedArg(int, 'kwonly')], Dataset]", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_keyword main: | from xarray import Dataset def f(ds: Dataset, arg: int, *, kwonly: int) -> Dataset: return ds # Call to pipe missing keyword for kwonly parameter `kwonly` ds = Dataset().pipe(f, 42, 99) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Dataset, int, NamedArg(int, 'kwonly')], Dataset]", "int", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_unexpected_keyword skip: True # mypy 1.18.1 outputs "defined here" notes without line numbers that pytest-mypy-plugins can't parse # See: https://github.com/python/mypy/issues/19257 (mypy issue about missing line numbers) main: | from xarray import Dataset def f(ds: Dataset, arg: int, *, kwonly: int) -> Dataset: return ds # Call to pipe using wrong keyword: `kw` instead of `kwonly` ds = Dataset().pipe(f, 42, kw=99) out: | main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataWithCoords" [call-arg] # Note: mypy 1.18.1 also outputs a "defined here" note that pytest-mypy-plugins can't parse - case: test_mypy_pipe_tuple_return_type_dataset main: | from xarray import Dataset def f(arg: int, ds: Dataset) -> Dataset: return ds ds = Dataset().pipe((f, "ds"), 42) reveal_type(ds) # N: Revealed type is "xarray.core.dataset.Dataset" - case: test_mypy_pipe_tuple_return_type_other main: | from xarray import Dataset def f(arg: int, ds: Dataset) -> int: return arg answer = Dataset().pipe((f, "ds"), 42) reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_tuple_missing_arg main: | from xarray import Dataset def f(arg: int, ds: Dataset) -> Dataset: return ds # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are missing an argument for parameter `arg`, so we get no error here. ds = Dataset().pipe((f, "ds")) reveal_type(ds) # N: Revealed type is "xarray.core.dataset.Dataset" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we failed to pass an argument for `arg`. ds = Dataset().pipe(lambda data, arg: f(arg, data)) out: | main:17: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Any, Any], Dataset]" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_tuple_extra_arg main: | from xarray import Dataset def f(arg: int, ds: Dataset) -> Dataset: return ds # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are providing too many args for `f`, so we get no error here. ds = Dataset().pipe((f, "ds"), 42, "foo") reveal_type(ds) # N: Revealed type is "xarray.core.dataset.Dataset" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we passed too many arguments. ds = Dataset().pipe(lambda data, arg: f(arg, data), 42, "foo") out: | main:17: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Any, Any], Dataset]", "int", "str" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T xarray-2025.12.0/xarray/tests/test_datatree.py000066400000000000000000002677441511464676000212410ustar00rootroot00000000000000import re import sys import typing from collections.abc import Callable, Mapping from copy import copy, deepcopy from textwrap import dedent import numpy as np import pytest import xarray as xr from xarray import DataArray, Dataset from xarray.core.coordinates import DataTreeCoordinates from xarray.core.datatree import DataTree from xarray.core.treenode import NotFoundInTreeError from xarray.testing import assert_equal, assert_identical from xarray.tests import ( assert_array_equal, create_test_data, requires_dask, source_ndarray, ) ON_WINDOWS = sys.platform == "win32" class TestTreeCreation: def test_empty(self) -> None: dt = DataTree(name="root") assert dt.name == "root" assert dt.parent is None assert dt.children == {} assert_identical(dt.to_dataset(), xr.Dataset()) def test_name(self) -> None: dt = DataTree() assert dt.name is None dt = DataTree(name="foo") assert dt.name == "foo" dt.name = "bar" assert dt.name == "bar" dt = DataTree(children={"foo": DataTree()}) assert dt["/foo"].name == "foo" with pytest.raises( ValueError, match="cannot set the name of a node which already has a parent" ): dt["/foo"].name = "bar" detached = dt["/foo"].copy() assert detached.name == "foo" detached.name = "bar" assert detached.name == "bar" def test_bad_names(self) -> None: with pytest.raises(TypeError): DataTree(name=5) # type: ignore[arg-type] with pytest.raises(ValueError): DataTree(name="folder/data") def test_data_arg(self) -> None: ds = xr.Dataset({"foo": 42}) tree: DataTree = DataTree(dataset=ds) assert_identical(tree.to_dataset(), ds) with pytest.raises(TypeError): DataTree(dataset=xr.DataArray(42, name="foo")) # type: ignore[arg-type] def test_child_data_not_copied(self) -> None: # regression test for https://github.com/pydata/xarray/issues/9683 class NoDeepCopy: def __deepcopy__(self, memo): raise TypeError("class can't be deepcopied") da = xr.DataArray(NoDeepCopy()) ds = xr.Dataset({"var": da}) dt1 = xr.DataTree(ds) dt2 = xr.DataTree(ds, children={"child": dt1}) dt3 = xr.DataTree.from_dict({"/": ds, "child": ds}) assert_identical(dt2, dt3) class TestFamilyTree: def test_dont_modify_children_inplace(self) -> None: # GH issue 9196 child = DataTree() DataTree(children={"child": child}) assert child.parent is None def test_create_two_children(self) -> None: root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": 0, "b": 1}) root = DataTree.from_dict( {"/": root_data, "/set1": set1_data, "/set1/set2": None} ) assert root["/set1"].name == "set1" assert root["/set1/set2"].name == "set2" def test_create_full_tree(self, simple_datatree) -> None: d = simple_datatree.to_dict() d_keys = list(d.keys()) expected_keys = [ "/", "/set1", "/set2", "/set3", "/set1/set1", "/set1/set2", "/set2/set1", ] assert d_keys == expected_keys class TestNames: def test_child_gets_named_on_attach(self) -> None: sue = DataTree() mary = DataTree(children={"Sue": sue}) assert mary.children["Sue"].name == "Sue" def test_dataset_containing_slashes(self) -> None: xda: xr.DataArray = xr.DataArray( [[1, 2]], coords={"label": ["a"], "R30m/y": [30, 60]}, ) xds: xr.Dataset = xr.Dataset({"group/subgroup/my_variable": xda}) with pytest.raises( ValueError, match=re.escape( "Given variables have names containing the '/' character: " "['R30m/y', 'group/subgroup/my_variable']. " "Variables stored in DataTree objects cannot have names containing '/' characters, " "as this would make path-like access to variables ambiguous." ), ): DataTree(xds) class TestPaths: def test_path_property(self) -> None: john = DataTree.from_dict( { "/Mary/Sue": DataTree(), } ) assert john["/Mary/Sue"].path == "/Mary/Sue" assert john.path == "/" def test_path_roundtrip(self) -> None: john = DataTree.from_dict( { "/Mary/Sue": DataTree(), } ) assert john["/Mary/Sue"].name == "Sue" def test_same_tree(self) -> None: john = DataTree.from_dict( { "/Mary": DataTree(), "/Kate": DataTree(), } ) mary = john.children["Mary"] kate = john.children["Kate"] assert mary.same_tree(kate) def test_relative_paths(self) -> None: john = DataTree.from_dict( { "/Mary/Sue": DataTree(), "/Annie": DataTree(), } ) sue = john.children["Mary"].children["Sue"] annie = john.children["Annie"] assert sue.relative_to(john) == "Mary/Sue" assert john.relative_to(sue) == "../.." assert annie.relative_to(sue) == "../../Annie" assert sue.relative_to(annie) == "../Mary/Sue" assert sue.relative_to(sue) == "." evil_kate = DataTree() with pytest.raises( NotFoundInTreeError, match="nodes do not lie within the same tree" ): sue.relative_to(evil_kate) class TestStoreDatasets: def test_create_with_data(self) -> None: dat = xr.Dataset({"a": 0}) john = DataTree(name="john", dataset=dat) assert_identical(john.to_dataset(), dat) with pytest.raises(TypeError): DataTree(name="mary", dataset="junk") # type: ignore[arg-type] def test_set_data(self) -> None: john = DataTree(name="john") dat = xr.Dataset({"a": 0}) john.dataset = dat # type: ignore[assignment,unused-ignore] assert_identical(john.to_dataset(), dat) with pytest.raises(TypeError): john.dataset = "junk" # type: ignore[assignment] def test_has_data(self) -> None: john = DataTree(name="john", dataset=xr.Dataset({"a": 0})) assert john.has_data john_no_data = DataTree(name="john", dataset=None) assert not john_no_data.has_data def test_is_hollow(self) -> None: john = DataTree(dataset=xr.Dataset({"a": 0})) assert john.is_hollow eve = DataTree(children={"john": john}) assert eve.is_hollow eve.dataset = xr.Dataset({"a": 1}) # type: ignore[assignment,unused-ignore] assert not eve.is_hollow class TestToDataset: def test_to_dataset_inherited(self) -> None: base = xr.Dataset(coords={"a": [1], "b": 2}) sub = xr.Dataset(coords={"c": [3]}) tree = DataTree.from_dict({"/": base, "/sub": sub}) subtree = typing.cast(DataTree, tree["sub"]) assert_identical(tree.to_dataset(inherit=False), base) assert_identical(subtree.to_dataset(inherit=False), sub) sub_and_base = xr.Dataset(coords={"a": [1], "c": [3]}) # no "b" assert_identical(tree.to_dataset(inherit=True), base) assert_identical(subtree.to_dataset(inherit=True), sub_and_base) class TestVariablesChildrenNameCollisions: def test_parent_already_has_variable_with_childs_name(self) -> None: with pytest.raises(KeyError, match="already contains a variable named a"): DataTree.from_dict({"/": xr.Dataset({"a": [0], "b": 1}), "/a": None}) def test_parent_already_has_variable_with_childs_name_update(self) -> None: dt = DataTree(dataset=xr.Dataset({"a": [0], "b": 1})) with pytest.raises(ValueError, match="already contains a variable named a"): dt.update({"a": DataTree()}) def test_assign_when_already_child_with_variables_name(self) -> None: dt = DataTree.from_dict( { "/a": DataTree(), } ) with pytest.raises(ValueError, match="node already contains a variable"): dt.dataset = xr.Dataset({"a": 0}) # type: ignore[assignment,unused-ignore] dt.dataset = xr.Dataset() # type: ignore[assignment,unused-ignore] new_ds = dt.to_dataset().assign(a=xr.DataArray(0)) with pytest.raises(ValueError, match="node already contains a variable"): dt.dataset = new_ds # type: ignore[assignment,unused-ignore] class TestGet: ... class TestGetItem: def test_getitem_node(self) -> None: folder1 = DataTree.from_dict( { "/results/highres": DataTree(), } ) assert folder1["results"].name == "results" assert folder1["results/highres"].name == "highres" def test_getitem_self(self) -> None: dt = DataTree() assert dt["."] is dt def test_getitem_single_data_variable(self) -> None: data = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results", dataset=data) assert_identical(results["temp"], data["temp"]) def test_getitem_single_data_variable_from_node(self) -> None: data = xr.Dataset({"temp": [0, 50]}) folder1 = DataTree.from_dict( { "/results/highres": data, } ) assert_identical(folder1["results/highres/temp"], data["temp"]) def test_getitem_nonexistent_node(self) -> None: folder1 = DataTree.from_dict({"/results": DataTree()}, name="folder1") with pytest.raises(KeyError): folder1["results/highres"] def test_getitem_nonexistent_variable(self) -> None: data = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results", dataset=data) with pytest.raises(KeyError): results["pressure"] @pytest.mark.xfail(reason="Should be deprecated in favour of .subset") def test_getitem_multiple_data_variables(self) -> None: data = xr.Dataset({"temp": [0, 50], "p": [5, 8, 7]}) results = DataTree(name="results", dataset=data) assert_identical(results[["temp", "p"]], data[["temp", "p"]]) # type: ignore[index] @pytest.mark.xfail( reason="Indexing needs to return whole tree (GH https://github.com/xarray-contrib/datatree/issues/77)" ) def test_getitem_dict_like_selection_access_to_dataset(self) -> None: data = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results", dataset=data) assert_identical(results[{"temp": 1}], data[{"temp": 1}]) # type: ignore[index] class TestUpdate: def test_update(self) -> None: dt = DataTree() dt.update({"foo": xr.DataArray(0), "a": DataTree()}) expected = DataTree.from_dict({"/": xr.Dataset({"foo": 0}), "a": None}) assert_equal(dt, expected) assert dt.groups == ("/", "/a") def test_update_new_named_dataarray(self) -> None: da = xr.DataArray(name="temp", data=[0, 50]) folder1 = DataTree(name="folder1") folder1.update({"results": da}) expected = da.rename("results") assert_equal(folder1["results"], expected) def test_update_doesnt_alter_child_name(self) -> None: dt = DataTree() dt.update({"foo": xr.DataArray(0), "a": DataTree(name="b")}) assert "a" in dt.children child = dt["a"] assert child.name == "a" def test_update_overwrite(self) -> None: actual = DataTree.from_dict({"a": DataTree(xr.Dataset({"x": 1}))}) actual.update({"a": DataTree(xr.Dataset({"x": 2}))}) expected = DataTree.from_dict({"a": DataTree(xr.Dataset({"x": 2}))}) assert_equal(actual, expected) def test_update_coordinates(self) -> None: expected = DataTree.from_dict({"/": xr.Dataset(coords={"a": 1})}) actual = DataTree.from_dict({"/": xr.Dataset()}) actual.update(xr.Dataset(coords={"a": 1})) assert_equal(actual, expected) def test_update_inherited_coords(self) -> None: expected = DataTree.from_dict( { "/": xr.Dataset(coords={"a": 1}), "/b": xr.Dataset(coords={"c": 1}), } ) actual = DataTree.from_dict( { "/": xr.Dataset(coords={"a": 1}), "/b": xr.Dataset(), } ) actual["/b"].update(xr.Dataset(coords={"c": 1})) assert_identical(actual, expected) # DataTree.identical() currently does not require that non-inherited # coordinates are defined identically, so we need to check this # explicitly actual_node = actual.children["b"].to_dataset(inherit=False) expected_node = expected.children["b"].to_dataset(inherit=False) assert_identical(actual_node, expected_node) class TestCopy: def test_copy(self, create_test_datatree) -> None: dt = create_test_datatree() for node in dt.root.subtree: node.attrs["Test"] = [1, 2, 3] for copied in [dt.copy(deep=False), copy(dt)]: assert_identical(dt, copied) for node, copied_node in zip( dt.root.subtree, copied.root.subtree, strict=True ): assert node.encoding == copied_node.encoding # Note: IndexVariable objects with string dtype are always # copied because of xarray.core.util.safe_cast_to_index. # Limiting the test to data variables. for k in node.data_vars: v0 = node.variables[k] v1 = copied_node.variables[k] assert source_ndarray(v0.data) is source_ndarray(v1.data) copied_node["foo"] = xr.DataArray(data=np.arange(5), dims="z") assert "foo" not in node copied_node.attrs["foo"] = "bar" assert "foo" not in node.attrs assert node.attrs["Test"] is copied_node.attrs["Test"] def test_copy_subtree(self) -> None: dt = DataTree.from_dict({"/level1/level2/level3": xr.Dataset()}) actual = dt["/level1/level2"].copy() expected = DataTree.from_dict({"/level3": xr.Dataset()}, name="level2") assert_identical(actual, expected) def test_copy_coord_inheritance(self) -> None: tree = DataTree.from_dict( {"/": xr.Dataset(coords={"x": [0, 1]}), "/c": DataTree()} ) actual = tree.copy() node_ds = actual.children["c"].to_dataset(inherit=False) assert_identical(node_ds, xr.Dataset()) actual = tree.children["c"].copy() expected = DataTree(Dataset(coords={"x": [0, 1]}), name="c") assert_identical(expected, actual) actual = tree.children["c"].copy(inherit=False) expected = DataTree(name="c") assert_identical(expected, actual) def test_deepcopy(self, create_test_datatree) -> None: dt = create_test_datatree() for node in dt.root.subtree: node.attrs["Test"] = [1, 2, 3] for copied in [dt.copy(deep=True), deepcopy(dt)]: assert_identical(dt, copied) for node, copied_node in zip( dt.root.subtree, copied.root.subtree, strict=True ): assert node.encoding == copied_node.encoding # Note: IndexVariable objects with string dtype are always # copied because of xarray.core.util.safe_cast_to_index. # Limiting the test to data variables. for k in node.data_vars: v0 = node.variables[k] v1 = copied_node.variables[k] assert source_ndarray(v0.data) is not source_ndarray(v1.data) copied_node["foo"] = xr.DataArray(data=np.arange(5), dims="z") assert "foo" not in node copied_node.attrs["foo"] = "bar" assert "foo" not in node.attrs assert node.attrs["Test"] is not copied_node.attrs["Test"] @pytest.mark.xfail(reason="data argument not yet implemented") def test_copy_with_data(self, create_test_datatree) -> None: orig = create_test_datatree() # TODO use .data_vars once that property is available data_vars = { k: v for k, v in orig.variables.items() if k not in orig._coord_names } new_data = {k: np.random.randn(*v.shape) for k, v in data_vars.items()} actual = orig.copy(data=new_data) expected = orig.copy() for k, v in new_data.items(): expected[k].data = v assert_identical(expected, actual) # TODO test parents and children? class TestSetItem: def test_setitem_new_child_node(self) -> None: john = DataTree(name="john") mary = DataTree(name="mary") john["mary"] = mary grafted_mary = john["mary"] assert grafted_mary.parent is john assert grafted_mary.name == "mary" def test_setitem_unnamed_child_node_becomes_named(self) -> None: john2 = DataTree(name="john2") john2["sonny"] = DataTree() assert john2["sonny"].name == "sonny" def test_setitem_new_grandchild_node(self) -> None: john = DataTree.from_dict({"/Mary/Rose": DataTree()}) new_rose = DataTree(dataset=xr.Dataset({"x": 0})) john["Mary/Rose"] = new_rose grafted_rose = john["Mary/Rose"] assert grafted_rose.parent is john["/Mary"] assert grafted_rose.name == "Rose" def test_grafted_subtree_retains_name(self) -> None: subtree = DataTree(name="original_subtree_name") root = DataTree(name="root") root["new_subtree_name"] = subtree assert subtree.name == "original_subtree_name" def test_setitem_new_empty_node(self) -> None: john = DataTree(name="john") john["mary"] = DataTree() mary = john["mary"] assert isinstance(mary, DataTree) assert_identical(mary.to_dataset(), xr.Dataset()) def test_setitem_overwrite_data_in_node_with_none(self) -> None: john = DataTree.from_dict({"/mary": xr.Dataset()}, name="john") john["mary"] = DataTree() assert_identical(john["mary"].to_dataset(), xr.Dataset()) john.dataset = xr.Dataset() # type: ignore[assignment,unused-ignore] with pytest.raises(ValueError, match="has no name"): john["."] = DataTree() @pytest.mark.xfail(reason="assigning Datasets doesn't yet create new nodes") def test_setitem_dataset_on_this_node(self) -> None: data = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results") results["."] = data assert_identical(results.to_dataset(), data) def test_setitem_dataset_as_new_node(self) -> None: data = xr.Dataset({"temp": [0, 50]}) folder1 = DataTree(name="folder1") folder1["results"] = data assert_identical(folder1["results"].to_dataset(), data) def test_setitem_dataset_as_new_node_requiring_intermediate_nodes(self) -> None: data = xr.Dataset({"temp": [0, 50]}) folder1 = DataTree(name="folder1") folder1["results/highres"] = data assert_identical(folder1["results/highres"].to_dataset(), data) def test_setitem_named_dataarray(self) -> None: da = xr.DataArray(name="temp", data=[0, 50]) folder1 = DataTree(name="folder1") folder1["results"] = da expected = da.rename("results") assert_equal(folder1["results"], expected) def test_setitem_unnamed_dataarray(self) -> None: data = xr.DataArray([0, 50]) folder1 = DataTree(name="folder1") folder1["results"] = data assert_equal(folder1["results"], data) def test_setitem_variable(self) -> None: var = xr.Variable(data=[0, 50], dims="x") folder1 = DataTree(name="folder1") folder1["results"] = var assert_equal(folder1["results"], xr.DataArray(var)) def test_setitem_coerce_to_dataarray(self) -> None: folder1 = DataTree(name="folder1") folder1["results"] = 0 assert_equal(folder1["results"], xr.DataArray(0)) def test_setitem_add_new_variable_to_empty_node(self) -> None: results = DataTree(name="results") results["pressure"] = xr.DataArray(data=[2, 3]) assert "pressure" in results.dataset results["temp"] = xr.Variable(data=[10, 11], dims=["x"]) assert "temp" in results.dataset # What if there is a path to traverse first? results_with_path = DataTree(name="results") results_with_path["highres/pressure"] = xr.DataArray(data=[2, 3]) assert "pressure" in results_with_path["highres"].dataset results_with_path["highres/temp"] = xr.Variable(data=[10, 11], dims=["x"]) assert "temp" in results_with_path["highres"].dataset def test_setitem_dataarray_replace_existing_node(self) -> None: t = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results", dataset=t) p = xr.DataArray(data=[2, 3]) results["pressure"] = p expected = t.assign(pressure=p) assert_identical(results.to_dataset(), expected) class TestCoords: def test_properties(self) -> None: # use int64 for repr consistency on windows ds = Dataset( data_vars={ "foo": (["x", "y"], np.random.randn(2, 3)), }, coords={ "x": ("x", np.array([-1, -2], "int64")), "y": ("y", np.array([0, 1, 2], "int64")), "a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10), }, ) dt = DataTree(dataset=ds) dt["child"] = DataTree() coords = dt.coords assert isinstance(coords, DataTreeCoordinates) # len assert len(coords) == 4 # iter assert list(coords) == ["x", "y", "a", "b"] assert_identical(coords["x"].variable, dt["x"].variable) assert_identical(coords["y"].variable, dt["y"].variable) assert "x" in coords assert "a" in coords assert 0 not in coords assert "foo" not in coords assert "child" not in coords with pytest.raises(KeyError): coords["foo"] # TODO this currently raises a ValueError instead of a KeyError # with pytest.raises(KeyError): # coords[0] # repr expected = dedent( """\ Coordinates: * x (x) int64 16B -1 -2 * y (y) int64 24B 0 1 2 a (x) int64 16B 4 5 b int64 8B -10""" ) actual = repr(coords) assert expected == actual # dims assert coords.sizes == {"x": 2, "y": 3} # dtypes assert coords.dtypes == { "x": np.dtype("int64"), "y": np.dtype("int64"), "a": np.dtype("int64"), "b": np.dtype("int64"), } def test_modify(self) -> None: ds = Dataset( data_vars={ "foo": (["x", "y"], np.random.randn(2, 3)), }, coords={ "x": ("x", np.array([-1, -2], "int64")), "y": ("y", np.array([0, 1, 2], "int64")), "a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10), }, ) dt = DataTree(dataset=ds) dt["child"] = DataTree() actual = dt.copy(deep=True) actual.coords["x"] = ("x", ["a", "b"]) assert_array_equal(actual["x"], ["a", "b"]) actual = dt.copy(deep=True) actual.coords["z"] = ("z", ["a", "b"]) assert_array_equal(actual["z"], ["a", "b"]) actual = dt.copy(deep=True) with pytest.raises(ValueError, match=r"conflicting dimension sizes"): actual.coords["x"] = ("x", [-1]) assert_identical(actual, dt) # should not be modified # TODO: re-enable after implementing reset_coords() # actual = dt.copy() # del actual.coords["b"] # expected = dt.reset_coords("b", drop=True) # assert_identical(expected, actual) with pytest.raises(KeyError): del dt.coords["not_found"] with pytest.raises(KeyError): del dt.coords["foo"] # TODO: re-enable after implementing assign_coords() # actual = dt.copy(deep=True) # actual.coords.update({"c": 11}) # expected = dt.assign_coords({"c": 11}) # assert_identical(expected, actual) # # regression test for GH3746 # del actual.coords["x"] # assert "x" not in actual.xindexes # test that constructors can also handle the `DataTreeCoordinates` object ds2 = Dataset(coords=dt.coords) assert_identical(ds2.coords, dt.coords) da = DataArray(coords=dt.coords) assert_identical(da.coords, dt.coords) # DataTree constructor doesn't accept coords= but should still be able to handle DatasetCoordinates dt2 = DataTree(dataset=dt.coords) assert_identical(dt2.coords, dt.coords) def test_inherited(self) -> None: ds = Dataset( data_vars={ "foo": (["x", "y"], np.random.randn(2, 3)), }, coords={ "x": ("x", np.array([-1, -2], "int64")), "y": ("y", np.array([0, 1, 2], "int64")), "a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10), }, ) dt = DataTree(dataset=ds) dt["child"] = DataTree() child = dt["child"] assert set(dt.coords) == {"x", "y", "a", "b"} assert set(child.coords) == {"x", "y"} actual = child.copy(deep=True) actual.coords["x"] = ("x", ["a", "b"]) assert_array_equal(actual["x"], ["a", "b"]) actual = child.copy(deep=True) actual.coords.update({"c": 11}) expected = child.copy(deep=True) expected.coords["c"] = 11 # check we have only altered the child node assert_identical(expected.root, actual.root) with pytest.raises(KeyError): # cannot delete inherited coordinate from child node del child["x"] # TODO requires a fix for #9472 # actual = child.copy(deep=True) # actual.coords.update({"c": 11}) # expected = child.assign_coords({"c": 11}) # assert_identical(expected, actual) def test_delitem() -> None: ds = Dataset({"a": 0}, coords={"x": ("x", [1, 2]), "z": "a"}) dt = DataTree(ds, children={"c": DataTree()}) with pytest.raises(KeyError): del dt["foo"] # test delete children del dt["c"] assert dt.children == {} assert set(dt.variables) == {"x", "z", "a"} with pytest.raises(KeyError): del dt["c"] # test delete variables del dt["a"] assert set(dt.coords) == {"x", "z"} with pytest.raises(KeyError): del dt["a"] # test delete coordinates del dt["z"] assert set(dt.coords) == {"x"} with pytest.raises(KeyError): del dt["z"] # test delete indexed coordinates del dt["x"] assert dt.variables == {} assert dt.coords == {} assert dt.indexes == {} with pytest.raises(KeyError): del dt["x"] class TestTreeFromDict: def test_data_in_root(self) -> None: dat = xr.Dataset() dt = DataTree.from_dict({"/": dat}) assert dt.name is None assert dt.parent is None assert dt.children == {} assert_identical(dt.to_dataset(), dat) def test_one_layer(self) -> None: dat1, dat2 = xr.Dataset({"a": 1}), xr.Dataset({"b": 2}) dt = DataTree.from_dict({"run1": dat1, "run2": dat2}) assert_identical(dt.to_dataset(), xr.Dataset()) assert dt.name is None assert_identical(dt["run1"].to_dataset(), dat1) assert dt["run1"].children == {} assert_identical(dt["run2"].to_dataset(), dat2) assert dt["run2"].children == {} def test_two_layers(self) -> None: dat1, dat2 = xr.Dataset({"a": 1}), xr.Dataset({"a": [1, 2]}) dt = DataTree.from_dict({"highres/run": dat1, "lowres/run": dat2}) assert "highres" in dt.children assert "lowres" in dt.children highres_run = dt["highres/run"] assert_identical(highres_run.to_dataset(), dat1) def test_nones(self) -> None: dt = DataTree.from_dict({"d": None, "d/e": None}) assert [node.name for node in dt.subtree] == [None, "d", "e"] assert [node.path for node in dt.subtree] == ["/", "/d", "/d/e"] assert_identical(dt["d/e"].to_dataset(), xr.Dataset()) def test_full(self, simple_datatree) -> None: dt = simple_datatree paths = [node.path for node in dt.subtree] assert paths == [ "/", "/set1", "/set2", "/set3", "/set1/set1", "/set1/set2", "/set2/set1", ] def test_datatree_values(self) -> None: dat1 = DataTree(dataset=xr.Dataset({"a": 1})) expected = DataTree() expected["a"] = dat1 actual = DataTree.from_dict({"a": dat1}) assert_identical(actual, expected) def test_roundtrip_to_dict(self, simple_datatree) -> None: tree = simple_datatree roundtrip = DataTree.from_dict(tree.to_dict()) assert_identical(tree, roundtrip) def test_to_dict(self): tree = DataTree.from_dict({"/a/b/c": None}) roundtrip = DataTree.from_dict(tree.to_dict()) assert_identical(tree, roundtrip) roundtrip = DataTree.from_dict(tree.to_dict(relative=True)) assert_identical(tree, roundtrip) roundtrip = DataTree.from_dict(tree.children["a"].to_dict(relative=False)) assert_identical(tree, roundtrip) expected = DataTree.from_dict({"b/c": None}) actual = DataTree.from_dict(tree.children["a"].to_dict(relative=True)) assert_identical(expected, actual) def test_roundtrip_unnamed_root(self, simple_datatree) -> None: # See GH81 dt = simple_datatree dt.name = "root" roundtrip = DataTree.from_dict(dt.to_dict()) assert roundtrip.equals(dt) def test_insertion_order(self) -> None: # regression test for GH issue #9276 reversed = DataTree.from_dict( { "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Bart": xr.Dataset({"age": 10}), "/Homer": xr.Dataset({"age": 39}), "/": xr.Dataset({"age": 83}), } ) expected = DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Homer": xr.Dataset({"age": 39}), "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Bart": xr.Dataset({"age": 10}), } ) assert reversed.equals(expected) # Check that Bart and Lisa's order is still preserved within the group, # despite 'Bart' coming before 'Lisa' when sorted alphabetically assert list(reversed["Homer"].children.keys()) == ["Lisa", "Bart"] def test_array_values_dataarray(self) -> None: expected = DataTree(dataset=Dataset({"a": 1})) actual = DataTree.from_dict({"a": DataArray(1)}) assert_identical(actual, expected) def test_array_values_scalars(self) -> None: expected = DataTree( dataset=Dataset({"a": 1}), children={"b": DataTree(Dataset({"c": 2, "d": 3}))}, ) actual = DataTree.from_dict({"a": 1, "b/c": 2, "b/d": 3}) assert_identical(actual, expected) def test_invalid_values(self) -> None: with pytest.raises( TypeError, match=re.escape( r"failed to construct xarray.Dataset for DataTree node at '/' " r"with data_vars={'a': set()} and coords={}" ), ): DataTree.from_dict({"a": set()}) def test_array_values_nested_key(self) -> None: expected = DataTree( children={"a": DataTree(children={"b": DataTree(Dataset({"c": 1}))})} ) actual = DataTree.from_dict(data={"a/b/c": 1}) assert_identical(actual, expected) def test_nested_array_values(self) -> None: expected = DataTree( children={"a": DataTree(children={"b": DataTree(Dataset({"c": 1}))})} ) actual = DataTree.from_dict({"a": {"b": {"c": 1}}}, nested=True) assert_identical(actual, expected) def test_nested_array_values_without_nested_kwarg(self) -> None: with pytest.raises( TypeError, match=re.escape( r"data contains a dict value at key='a', which is not a valid " r"argument to DataTree.from_dict() with nested=False: " r"{'b': {'c': 1}}" ), ): DataTree.from_dict({"a": {"b": {"c": 1}}}) def test_nested_array_values_duplicates(self) -> None: with pytest.raises( ValueError, match=re.escape("multiple entries found corresponding to node '/a/b'"), ): DataTree.from_dict({"a": {"b": 1}, "a/b": 2}, nested=True) def test_array_values_data_and_coords(self) -> None: expected = DataTree(dataset=Dataset({"a": 1}, coords={"b": 2})) actual = DataTree.from_dict(data={"a": 1}, coords={"b": 2}) assert_identical(actual, expected) def test_data_and_coords_conflicting(self) -> None: with pytest.raises( ValueError, match=re.escape("multiple entries found corresponding to node '/a'"), ): DataTree.from_dict(data={"a": 1}, coords={"a": 2}) def test_array_values_new_name(self) -> None: expected = DataTree(dataset=Dataset({"foo": 1})) data = {"foo": xr.DataArray(1, name="bar")} actual = DataTree.from_dict(data) assert_identical(actual, expected) def test_array_values_at_root(self) -> None: with pytest.raises(ValueError, match="cannot set DataArray value at root"): DataTree.from_dict({"/": 1}) def test_array_values_parent_node_also_set(self) -> None: with pytest.raises( ValueError, match=re.escape( r"cannot set DataArray value at '/a' when parent node at '/' is also set" ), ): DataTree.from_dict({"/": Dataset(), "/a": 1}) def test_relative_paths(self) -> None: tree = DataTree.from_dict({".": None, "foo": None, "./bar": None, "x/y": None}) paths = [node.path for node in tree.subtree] assert paths == [ "/", "/foo", "/bar", "/x", "/x/y", ] def test_root_keys(self): ds = Dataset({"x": 1}) expected = DataTree(dataset=ds) actual = DataTree.from_dict({"": ds}) assert_identical(actual, expected) actual = DataTree.from_dict({".": ds}) assert_identical(actual, expected) actual = DataTree.from_dict({"/": ds}) assert_identical(actual, expected) actual = DataTree.from_dict({"./": ds}) assert_identical(actual, expected) def test_multiple_entries(self): with pytest.raises( ValueError, match="multiple entries found corresponding to node '/'" ): DataTree.from_dict({"": None, ".": None}) with pytest.raises( ValueError, match="multiple entries found corresponding to node '/a'" ): DataTree.from_dict({"a": None, "/a": None}) def test_name(self): tree = DataTree.from_dict({"/": None}, name="foo") assert tree.name == "foo" tree = DataTree.from_dict({"/": DataTree()}, name="foo") assert tree.name == "foo" tree = DataTree.from_dict({"/": DataTree(name="bar")}, name="foo") assert tree.name == "foo" class TestDatasetView: def test_view_contents(self) -> None: ds = create_test_data() dt = DataTree(dataset=ds) assert ds.identical( dt.dataset ) # this only works because Dataset.identical doesn't check types assert isinstance(dt.dataset, xr.Dataset) def test_immutability(self) -> None: # See issue https://github.com/xarray-contrib/datatree/issues/38 dt = DataTree.from_dict( { "/": None, "/a": None, }, name="root", ) with pytest.raises( AttributeError, match="Mutation of the DatasetView is not allowed" ): dt.dataset["a"] = xr.DataArray(0) with pytest.raises( AttributeError, match="Mutation of the DatasetView is not allowed" ): dt.dataset.update({"a": 0}) # TODO are there any other ways you can normally modify state (in-place)? # (not attribute-like assignment because that doesn't work on Dataset anyway) def test_methods(self) -> None: ds = create_test_data() dt = DataTree(dataset=ds) assert ds.mean().identical(dt.dataset.mean()) assert isinstance(dt.dataset.mean(), xr.Dataset) def test_arithmetic(self, create_test_datatree) -> None: dt = create_test_datatree() expected = create_test_datatree(modify=lambda ds: 10.0 * ds)[ "set1" ].to_dataset() result = 10.0 * dt["set1"].dataset assert result.identical(expected) def test_init_via_type(self) -> None: # from datatree GH issue https://github.com/xarray-contrib/datatree/issues/188 # xarray's .weighted is unusual because it uses type() to create a Dataset/DataArray a = xr.DataArray( np.random.rand(3, 4, 10), dims=["x", "y", "time"], coords={"area": (["x", "y"], np.random.rand(3, 4))}, ).to_dataset(name="data") dt = DataTree(dataset=a) def weighted_mean(ds): return ds.weighted(ds.area).mean(["x", "y"]) weighted_mean(dt.dataset) def test_map_keep_attrs(self) -> None: # test DatasetView.map(..., keep_attrs=...) data = xr.DataArray([1, 2, 3], dims="x", attrs={"da": "attrs"}) ds = xr.Dataset({"data": data}, attrs={"ds": "attrs"}) dt = DataTree(ds) def func_keep(ds): # x.mean() removes the attrs of the data_vars return ds.map(lambda x: x.mean(), keep_attrs=True) result = xr.map_over_datasets(func_keep, dt) expected = dt.mean(keep_attrs=True) xr.testing.assert_identical(result, expected) # DatasetView.map keeps attrs by default def func(ds): # ds.map and x.mean() both keep attrs by default return ds.map(lambda x: x.mean()) result = xr.map_over_datasets(func, dt) expected = dt.mean() xr.testing.assert_identical(result, expected) class TestAccess: def test_attribute_access(self, create_test_datatree) -> None: dt = create_test_datatree() # vars / coords for key in ["a", "set0"]: assert_equal(dt[key], getattr(dt, key)) assert key in dir(dt) # dims assert_equal(dt["a"]["y"], dt.a.y) assert "y" in dir(dt["a"]) # children for key in ["set1", "set2", "set3"]: assert_equal(dt[key], getattr(dt, key)) assert key in dir(dt) # attrs dt.attrs["meta"] = "NASA" assert dt.attrs["meta"] == "NASA" assert "meta" in dir(dt) def test_ipython_key_completions_complex(self, create_test_datatree) -> None: dt = create_test_datatree() key_completions = dt._ipython_key_completions_() node_keys = [node.path[1:] for node in dt.descendants] assert all(node_key in key_completions for node_key in node_keys) var_keys = list(dt.variables.keys()) assert all(var_key in key_completions for var_key in var_keys) def test_ipython_key_completions_subnode(self) -> None: tree = xr.DataTree.from_dict({"/": None, "/a": None, "/a/b/": None}) expected = ["b"] actual = tree["a"]._ipython_key_completions_() assert expected == actual def test_operation_with_attrs_but_no_data(self) -> None: # tests bug from xarray-datatree GH262 xs = xr.Dataset({"testvar": xr.DataArray(np.ones((2, 3)))}) dt = DataTree.from_dict({"node1": xs, "node2": xs}) dt.attrs["test_key"] = 1 # sel works fine without this line dt.sel(dim_0=0) class TestRepr: def test_repr_four_nodes(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset( {"e": (("x",), [1.0, 2.0])}, coords={"x": [2.0, 3.0]}, ), "/b": xr.Dataset({"f": (("y",), [3.0])}), "/b/c": xr.Dataset(), "/b/d": xr.Dataset({"g": 4.0}), } ) result = repr(dt) expected = dedent( """ Group: / โ”‚ Dimensions: (x: 2) โ”‚ Coordinates: โ”‚ * x (x) float64 16B 2.0 3.0 โ”‚ Data variables: โ”‚ e (x) float64 16B 1.0 2.0 โ””โ”€โ”€ Group: /b โ”‚ Dimensions: (y: 1) โ”‚ Dimensions without coordinates: y โ”‚ Data variables: โ”‚ f (y) float64 8B 3.0 โ”œโ”€โ”€ Group: /b/c โ””โ”€โ”€ Group: /b/d Dimensions: () Data variables: g float64 8B 4.0 """ ).strip() assert result == expected result = repr(dt.b) expected = dedent( """ Group: /b โ”‚ Dimensions: (x: 2, y: 1) โ”‚ Inherited coordinates: โ”‚ * x (x) float64 16B 2.0 3.0 โ”‚ Dimensions without coordinates: y โ”‚ Data variables: โ”‚ f (y) float64 8B 3.0 โ”œโ”€โ”€ Group: /b/c โ””โ”€โ”€ Group: /b/d Dimensions: () Data variables: g float64 8B 4.0 """ ).strip() assert result == expected result = repr(dt.b.d) expected = dedent( """ Group: /b/d Dimensions: (x: 2, y: 1) Inherited coordinates: * x (x) float64 16B 2.0 3.0 Dimensions without coordinates: y Data variables: g float64 8B 4.0 """ ).strip() assert result == expected def test_repr_two_children(self) -> None: tree = DataTree.from_dict( { "/": Dataset(coords={"x": [1.0]}), "/first_child": None, "/second_child": Dataset({"foo": ("x", [0.0])}, coords={"z": 1.0}), } ) result = repr(tree) expected = dedent( """ Group: / โ”‚ Dimensions: (x: 1) โ”‚ Coordinates: โ”‚ * x (x) float64 8B 1.0 โ”œโ”€โ”€ Group: /first_child โ””โ”€โ”€ Group: /second_child Dimensions: (x: 1) Coordinates: z float64 8B 1.0 Data variables: foo (x) float64 8B 0.0 """ ).strip() assert result == expected result = repr(tree["first_child"]) expected = dedent( """ Group: /first_child Dimensions: (x: 1) Inherited coordinates: * x (x) float64 8B 1.0 """ ).strip() assert result == expected result = repr(tree["second_child"]) expected = dedent( """ Group: /second_child Dimensions: (x: 1) Coordinates: z float64 8B 1.0 Inherited coordinates: * x (x) float64 8B 1.0 Data variables: foo (x) float64 8B 0.0 """ ).strip() assert result == expected def test_repr_truncates_nodes(self) -> None: # construct a datatree with 50 nodes number_of_files = 10 number_of_groups = 5 tree_dict = {} for f in range(number_of_files): for g in range(number_of_groups): tree_dict[f"file_{f}/group_{g}"] = Dataset({"g": f * g}) tree = DataTree.from_dict(tree_dict) with xr.set_options(display_max_children=3): result = repr(tree) expected = dedent( """ Group: / โ”œโ”€โ”€ Group: /file_0 โ”‚ โ”œโ”€โ”€ Group: /file_0/group_0 โ”‚ โ”‚ Dimensions: () โ”‚ โ”‚ Data variables: โ”‚ โ”‚ g int64 8B 0 โ”‚ โ”œโ”€โ”€ Group: /file_0/group_1 โ”‚ โ”‚ Dimensions: () โ”‚ โ”‚ Data variables: โ”‚ โ”‚ g int64 8B 0 โ”‚ ... โ”‚ โ””โ”€โ”€ Group: /file_0/group_4 โ”‚ Dimensions: () โ”‚ Data variables: โ”‚ g int64 8B 0 โ”œโ”€โ”€ Group: /file_1 โ”‚ โ”œโ”€โ”€ Group: /file_1/group_0 โ”‚ โ”‚ Dimensions: () โ”‚ โ”‚ Data variables: โ”‚ โ”‚ g int64 8B 0 โ”‚ โ”œโ”€โ”€ Group: /file_1/group_1 โ”‚ โ”‚ Dimensions: () โ”‚ โ”‚ Data variables: โ”‚ โ”‚ g int64 8B 1 โ”‚ ... โ”‚ โ””โ”€โ”€ Group: /file_1/group_4 โ”‚ Dimensions: () โ”‚ Data variables: โ”‚ g int64 8B 4 ... โ””โ”€โ”€ Group: /file_9 โ”œโ”€โ”€ Group: /file_9/group_0 โ”‚ Dimensions: () โ”‚ Data variables: โ”‚ g int64 8B 0 โ”œโ”€โ”€ Group: /file_9/group_1 โ”‚ Dimensions: () โ”‚ Data variables: โ”‚ g int64 8B 9 ... โ””โ”€โ”€ Group: /file_9/group_4 Dimensions: () Data variables: g int64 8B 36 """ ).strip() assert expected == result with xr.set_options(display_max_children=10): result = repr(tree) for key in tree_dict: assert key in result def test_repr_inherited_dims(self) -> None: tree = DataTree.from_dict( { "/": Dataset({"foo": ("x", [1.0])}), "/child": Dataset({"bar": ("y", [2.0])}), } ) result = repr(tree) expected = dedent( """ Group: / โ”‚ Dimensions: (x: 1) โ”‚ Dimensions without coordinates: x โ”‚ Data variables: โ”‚ foo (x) float64 8B 1.0 โ””โ”€โ”€ Group: /child Dimensions: (y: 1) Dimensions without coordinates: y Data variables: bar (y) float64 8B 2.0 """ ).strip() assert result == expected result = repr(tree["child"]) expected = dedent( """ Group: /child Dimensions: (x: 1, y: 1) Dimensions without coordinates: x, y Data variables: bar (y) float64 8B 2.0 """ ).strip() assert result == expected @pytest.mark.skipif( ON_WINDOWS, reason="windows (pre NumPy2) uses int32 instead of int64" ) def test_doc_example(self) -> None: # regression test for https://github.com/pydata/xarray/issues/9499 time = xr.DataArray( data=np.array(["2022-01", "2023-01"], dtype=" Group: / โ”‚ Dimensions: (time: 2) โ”‚ Coordinates: โ”‚ * time (time) Group: /weather โ”‚ Dimensions: (time: 2, station: 6) โ”‚ Coordinates: โ”‚ * station (station) str: return re.escape(dedent(message).strip()) class TestInheritance: def test_inherited_dims(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset({"d": (("x",), [1, 2])}), "/b": xr.Dataset({"e": (("y",), [3])}), "/c": xr.Dataset({"f": (("y",), [3, 4, 5])}), } ) assert dt.sizes == {"x": 2} # nodes should include inherited dimensions assert dt.b.sizes == {"x": 2, "y": 1} assert dt.c.sizes == {"x": 2, "y": 3} # dataset objects created from nodes should not assert dt.b.dataset.sizes == {"y": 1} assert dt.b.to_dataset(inherit=True).sizes == {"y": 1} assert dt.b.to_dataset(inherit=False).sizes == {"y": 1} def test_inherited_coords_index(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset({"d": (("x",), [1, 2])}, coords={"x": [2, 3]}), "/b": xr.Dataset({"e": (("y",), [3])}), } ) assert "x" in dt["/b"].indexes assert "x" in dt["/b"].coords xr.testing.assert_identical(dt["/x"], dt["/b/x"]) def test_inherit_only_index_coords(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1], "y": 2}), "/b": xr.Dataset(coords={"z": 3}), } ) assert dt.coords.keys() == {"x", "y"} xr.testing.assert_equal( dt["/x"], xr.DataArray([1], dims=["x"], coords={"x": [1], "y": 2}) ) xr.testing.assert_equal(dt["/y"], xr.DataArray(2, coords={"y": 2})) assert dt["/b"].coords.keys() == {"x", "z"} xr.testing.assert_equal( dt["/b/x"], xr.DataArray([1], dims=["x"], coords={"x": [1], "z": 3}) ) xr.testing.assert_equal(dt["/b/z"], xr.DataArray(3, coords={"z": 3})) def test_inherited_coords_with_index_are_deduplicated(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2]}), "/b": xr.Dataset(coords={"x": [1, 2]}), } ) child_dataset = dt.children["b"].to_dataset(inherit=False) expected = xr.Dataset() assert_identical(child_dataset, expected) dt["/c"] = xr.Dataset({"foo": ("x", [4, 5])}, coords={"x": [1, 2]}) child_dataset = dt.children["c"].to_dataset(inherit=False) expected = xr.Dataset({"foo": ("x", [4, 5])}) assert_identical(child_dataset, expected) def test_deduplicated_after_setitem(self) -> None: # regression test for GH #9601 dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2]}), "/b": None, } ) dt["b/x"] = dt["x"] child_dataset = dt.children["b"].to_dataset(inherit=False) expected = xr.Dataset() assert_identical(child_dataset, expected) def test_inconsistent_dims(self) -> None: expected_msg = _exact_match( """ group '/b' is not aligned with its parents: Group: Dimensions: (x: 1) Dimensions without coordinates: x Data variables: c (x) float64 8B 3.0 From parents: Dimensions: (x: 2) Dimensions without coordinates: x """ ) with pytest.raises(ValueError, match=expected_msg): DataTree.from_dict( { "/": xr.Dataset({"a": (("x",), [1.0, 2.0])}), "/b": xr.Dataset({"c": (("x",), [3.0])}), } ) dt = DataTree() dt["/a"] = xr.DataArray([1.0, 2.0], dims=["x"]) with pytest.raises(ValueError, match=expected_msg): dt["/b/c"] = xr.DataArray([3.0], dims=["x"]) b = DataTree(dataset=xr.Dataset({"c": (("x",), [3.0])})) with pytest.raises(ValueError, match=expected_msg): DataTree( dataset=xr.Dataset({"a": (("x",), [1.0, 2.0])}), children={"b": b}, ) def test_inconsistent_child_indexes(self) -> None: expected_msg = _exact_match( """ group '/b' is not aligned with its parents: Group: Dimensions: (x: 1) Coordinates: * x (x) float64 8B 2.0 Data variables: *empty* From parents: Dimensions: (x: 1) Coordinates: * x (x) float64 8B 1.0 """ ) with pytest.raises(ValueError, match=expected_msg): DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1.0]}), "/b": xr.Dataset(coords={"x": [2.0]}), } ) dt = DataTree() dt.dataset = xr.Dataset(coords={"x": [1.0]}) # type: ignore[assignment,unused-ignore] dt["/b"] = DataTree() with pytest.raises(ValueError, match=expected_msg): dt["/b"].dataset = xr.Dataset(coords={"x": [2.0]}) b = DataTree(xr.Dataset(coords={"x": [2.0]})) with pytest.raises(ValueError, match=expected_msg): DataTree(dataset=xr.Dataset(coords={"x": [1.0]}), children={"b": b}) def test_inconsistent_grandchild_indexes(self) -> None: expected_msg = _exact_match( """ group '/b/c' is not aligned with its parents: Group: Dimensions: (x: 1) Coordinates: * x (x) float64 8B 2.0 Data variables: *empty* From parents: Dimensions: (x: 1) Coordinates: * x (x) float64 8B 1.0 """ ) with pytest.raises(ValueError, match=expected_msg): DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1.0]}), "/b/c": xr.Dataset(coords={"x": [2.0]}), } ) dt = DataTree() dt.dataset = xr.Dataset(coords={"x": [1.0]}) # type: ignore[assignment,unused-ignore] dt["/b/c"] = DataTree() with pytest.raises(ValueError, match=expected_msg): dt["/b/c"].dataset = xr.Dataset(coords={"x": [2.0]}) c = DataTree(xr.Dataset(coords={"x": [2.0]})) b = DataTree(children={"c": c}) with pytest.raises(ValueError, match=expected_msg): DataTree(dataset=xr.Dataset(coords={"x": [1.0]}), children={"b": b}) def test_inconsistent_grandchild_dims(self) -> None: expected_msg = _exact_match( """ group '/b/c' is not aligned with its parents: Group: Dimensions: (x: 1) Dimensions without coordinates: x Data variables: d (x) float64 8B 3.0 From parents: Dimensions: (x: 2) Dimensions without coordinates: x """ ) with pytest.raises(ValueError, match=expected_msg): DataTree.from_dict( { "/": xr.Dataset({"a": (("x",), [1.0, 2.0])}), "/b/c": xr.Dataset({"d": (("x",), [3.0])}), } ) dt = DataTree() dt["/a"] = xr.DataArray([1.0, 2.0], dims=["x"]) with pytest.raises(ValueError, match=expected_msg): dt["/b/c/d"] = xr.DataArray([3.0], dims=["x"]) class TestRestructuring: def test_drop_nodes(self) -> None: sue = DataTree.from_dict({"Mary": None, "Kate": None, "Ashley": None}) # test drop just one node dropped_one = sue.drop_nodes(names="Mary") assert "Mary" not in dropped_one.children # test drop multiple nodes dropped = sue.drop_nodes(names=["Mary", "Kate"]) assert not {"Mary", "Kate"}.intersection(set(dropped.children)) assert "Ashley" in dropped.children # test raise with pytest.raises(KeyError, match=r"nodes {'Mary'} not present"): dropped.drop_nodes(names=["Mary", "Ashley"]) # test ignore childless = dropped.drop_nodes(names=["Mary", "Ashley"], errors="ignore") assert childless.children == {} def test_assign(self) -> None: dt = DataTree() expected = DataTree.from_dict({"/": xr.Dataset({"foo": 0}), "/a": None}) # kwargs form result = dt.assign(foo=xr.DataArray(0), a=DataTree()) assert_equal(result, expected) # dict form result = dt.assign({"foo": xr.DataArray(0), "a": DataTree()}) assert_equal(result, expected) def test_filter_like(self) -> None: flower_tree = DataTree.from_dict( {"root": None, "trunk": None, "leaves": None, "flowers": None} ) fruit_tree = DataTree.from_dict( {"root": None, "trunk": None, "leaves": None, "fruit": None} ) barren_tree = DataTree.from_dict({"root": None, "trunk": None}) # test filter_like tree filtered_tree = flower_tree.filter_like(barren_tree) assert filtered_tree.equals(barren_tree) assert "flowers" not in filtered_tree.children # test symmetrical pruning results in isomorphic trees assert flower_tree.filter_like(fruit_tree).isomorphic( fruit_tree.filter_like(flower_tree) ) # test "deep" pruning dt = DataTree.from_dict( {"/a/A": None, "/a/B": None, "/b/A": None, "/b/B": None} ) other = DataTree.from_dict({"/a/A": None, "/b/A": None}) filtered = dt.filter_like(other) assert filtered.equals(other) class TestPipe: def test_noop(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() actual = dt.pipe(lambda tree: tree) assert actual.identical(dt) def test_args(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() def f(tree: DataTree, x: int, y: int) -> DataTree: return tree.assign( arr_with_attrs=xr.Variable("dim0", [], attrs=dict(x=x, y=y)) ) actual = dt.pipe(f, 1, 2) assert actual["arr_with_attrs"].attrs == dict(x=1, y=2) def test_kwargs(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() def f(tree: DataTree, *, x: int, y: int, z: int) -> DataTree: return tree.assign( arr_with_attrs=xr.Variable("dim0", [], attrs=dict(x=x, y=y, z=z)) ) attrs = {"x": 1, "y": 2, "z": 3} actual = dt.pipe(f, **attrs) assert actual["arr_with_attrs"].attrs == attrs def test_args_kwargs(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() def f(tree: DataTree, x: int, *, y: int, z: int) -> DataTree: return tree.assign( arr_with_attrs=xr.Variable("dim0", [], attrs=dict(x=x, y=y, z=z)) ) attrs = {"x": 1, "y": 2, "z": 3} actual = dt.pipe(f, attrs["x"], y=attrs["y"], z=attrs["z"]) assert actual["arr_with_attrs"].attrs == attrs def test_named_self(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() def f(x: int, tree: DataTree, y: int): tree.attrs.update({"x": x, "y": y}) return tree attrs = {"x": 1, "y": 2} actual = dt.pipe((f, "tree"), **attrs) assert actual is dt and actual.attrs == attrs class TestIsomorphicEqualsAndIdentical: def test_isomorphic(self): tree = DataTree.from_dict({"/a": None, "/a/b": None, "/c": None}) diff_data = DataTree.from_dict( {"/a": None, "/a/b": None, "/c": xr.Dataset({"foo": 1})} ) assert tree.isomorphic(diff_data) diff_order = DataTree.from_dict({"/c": None, "/a": None, "/a/b": None}) assert tree.isomorphic(diff_order) diff_nodes = DataTree.from_dict({"/a": None, "/a/b": None, "/d": None}) assert not tree.isomorphic(diff_nodes) more_nodes = DataTree.from_dict( {"/a": None, "/a/b": None, "/c": None, "/d": None} ) assert not tree.isomorphic(more_nodes) def test_minimal_variations(self): tree = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": 2}), } ) assert tree.equals(tree) assert tree.identical(tree) child = tree.children["child"] assert child.equals(child) assert child.identical(child) new_child = DataTree(dataset=Dataset({"x": 2}), name="child") assert child.equals(new_child) assert child.identical(new_child) anonymous_child = DataTree(dataset=Dataset({"x": 2})) # TODO: re-enable this after fixing .equals() not to require matching # names on the root node (i.e., after switching to use zip_subtrees) # assert child.equals(anonymous_child) assert not child.identical(anonymous_child) different_variables = DataTree.from_dict( { "/": Dataset(), "/other": Dataset({"x": 2}), } ) assert not tree.equals(different_variables) assert not tree.identical(different_variables) different_root_data = DataTree.from_dict( { "/": Dataset({"x": 4}), "/child": Dataset({"x": 2}), } ) assert not tree.equals(different_root_data) assert not tree.identical(different_root_data) different_child_data = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": 3}), } ) assert not tree.equals(different_child_data) assert not tree.identical(different_child_data) different_child_node_attrs = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": 2}, attrs={"foo": "bar"}), } ) assert tree.equals(different_child_node_attrs) assert not tree.identical(different_child_node_attrs) different_child_variable_attrs = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": ((), 2, {"foo": "bar"})}), } ) assert tree.equals(different_child_variable_attrs) assert not tree.identical(different_child_variable_attrs) different_name = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": 2}), }, name="different", ) # TODO: re-enable this after fixing .equals() not to require matching # names on the root node (i.e., after switching to use zip_subtrees) # assert tree.equals(different_name) assert not tree.identical(different_name) def test_differently_inherited_coordinates(self): root = DataTree.from_dict( { "/": Dataset(coords={"x": [1, 2]}), "/child": Dataset(), } ) child = root.children["child"] assert child.equals(child) assert child.identical(child) new_child = DataTree(dataset=Dataset(coords={"x": [1, 2]}), name="child") assert child.equals(new_child) assert not child.identical(new_child) deeper_root = DataTree(children={"root": root}) grandchild = deeper_root.children["root"].children["child"] assert child.equals(grandchild) assert child.identical(grandchild) class TestSubset: def test_match(self) -> None: # TODO is this example going to cause problems with case sensitivity? dt = DataTree.from_dict( { "/a/A": None, "/a/B": None, "/b/A": None, "/b/B": None, } ) result = dt.match("*/B") expected = DataTree.from_dict( { "/a/B": None, "/b/B": None, } ) assert_identical(result, expected) result = dt.children["a"].match("B") expected = DataTree.from_dict({"/B": None}, name="a") assert_identical(result, expected) def test_filter(self) -> None: simpsons = DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Herbert": xr.Dataset({"age": 40}), "/Homer": xr.Dataset({"age": 39}), "/Homer/Bart": xr.Dataset({"age": 10}), "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Maggie": xr.Dataset({"age": 1}), }, name="Abe", ) expected = DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Herbert": xr.Dataset({"age": 40}), "/Homer": xr.Dataset({"age": 39}), }, name="Abe", ) elders = simpsons.filter(lambda node: node["age"].item() > 18) assert_identical(elders, expected) expected = DataTree.from_dict({"/Bart": xr.Dataset({"age": 10})}, name="Homer") actual = simpsons.children["Homer"].filter( lambda node: node["age"].item() == 10 ) assert_identical(actual, expected) def test_prune_basic(self) -> None: tree = DataTree.from_dict( {"/a": xr.Dataset({"foo": ("x", [1, 2])}), "/b": xr.Dataset()} ) pruned = tree.prune() assert "a" in pruned.children assert "b" not in pruned.children assert_identical( pruned.children["a"].to_dataset(), tree.children["a"].to_dataset() ) def test_prune_with_zero_size_vars(self) -> None: tree = DataTree.from_dict( { "/a": xr.Dataset({"foo": ("x", [1, 2])}), "/b": xr.Dataset({"empty": ("dim", [])}), "/c": xr.Dataset(), } ) pruned_default = tree.prune() expected_default = DataTree.from_dict( { "/a": xr.Dataset({"foo": ("x", [1, 2])}), "/b": xr.Dataset({"empty": ("dim", [])}), } ) assert_identical(pruned_default, expected_default) pruned_strict = tree.prune(drop_size_zero_vars=True) expected_strict = DataTree.from_dict( { "/a": xr.Dataset({"foo": ("x", [1, 2])}), } ) assert_identical(pruned_strict, expected_strict) def test_prune_with_intermediate_nodes(self) -> None: tree = DataTree.from_dict( { "/": xr.Dataset(), "/group1": xr.Dataset(), "/group1/subA": xr.Dataset({"temp": ("x", [1, 2])}), "/group1/subB": xr.Dataset(), "/group2": xr.Dataset({"empty": ("dim", [])}), } ) pruned = tree.prune() expected_tree = DataTree.from_dict( { "/group1/subA": xr.Dataset({"temp": ("x", [1, 2])}), "/group2": xr.Dataset({"empty": ("dim", [])}), } ) assert_identical(pruned, expected_tree) def test_prune_after_filtering(self) -> None: from pandas import date_range ds1 = xr.Dataset( {"foo": ("time", [1, 2, 3, 4, 5])}, coords={"time": date_range("2023-01-01", periods=5, freq="D")}, ) ds2 = xr.Dataset( {"var": ("time", [1, 2, 3, 4, 5])}, coords={"time": date_range("2023-01-04", periods=5, freq="D")}, ) tree = DataTree.from_dict({"a": ds1, "b": ds2}) filtered = tree.sel(time=slice("2023-01-01", "2023-01-03")) pruned = filtered.prune(drop_size_zero_vars=True) expected_tree = DataTree.from_dict( {"a": ds1.sel(time=slice("2023-01-01", "2023-01-03"))} ) assert_identical(pruned, expected_tree) class TestIndexing: def test_isel_siblings(self) -> None: tree = DataTree.from_dict( { "/first": xr.Dataset({"a": ("x", [1, 2])}), "/second": xr.Dataset({"b": ("x", [1, 2, 3])}), } ) expected = DataTree.from_dict( { "/first": xr.Dataset({"a": 2}), "/second": xr.Dataset({"b": 3}), } ) actual = tree.isel(x=-1) assert_identical(actual, expected) expected = DataTree.from_dict( { "/first": xr.Dataset({"a": ("x", [1])}), "/second": xr.Dataset({"b": ("x", [1])}), } ) actual = tree.isel(x=slice(1)) assert_identical(actual, expected) actual = tree.isel(x=[0]) assert_identical(actual, expected) actual = tree.isel(x=slice(None)) assert_identical(actual, tree) def test_isel_inherited(self) -> None: tree = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2]}), "/child": xr.Dataset({"foo": ("x", [3, 4])}), } ) expected = DataTree.from_dict( { "/": xr.Dataset(coords={"x": 2}), "/child": xr.Dataset({"foo": 4}), } ) actual = tree.isel(x=-1) assert_identical(actual, expected) expected = DataTree.from_dict( { "/child": xr.Dataset({"foo": 4}), } ) actual = tree.isel(x=-1, drop=True) assert_identical(actual, expected) expected = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1]}), "/child": xr.Dataset({"foo": ("x", [3])}), } ) actual = tree.isel(x=[0]) assert_identical(actual, expected) actual = tree.isel(x=slice(None)) # TODO: re-enable after the fix to copy() from #9628 is submitted # actual = tree.children["child"].isel(x=slice(None)) # expected = tree.children["child"].copy() # assert_identical(actual, expected) actual = tree.children["child"].isel(x=0) expected = DataTree( dataset=xr.Dataset({"foo": 3}, coords={"x": 1}), name="child", ) assert_identical(actual, expected) def test_sel(self) -> None: tree = DataTree.from_dict( { "/first": xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}), "/second": xr.Dataset({"b": ("x", [4, 5])}, coords={"x": [2, 3]}), } ) expected = DataTree.from_dict( { "/first": xr.Dataset({"a": 2}, coords={"x": 2}), "/second": xr.Dataset({"b": 4}, coords={"x": 2}), } ) actual = tree.sel(x=2) assert_identical(actual, expected) actual = tree.children["first"].sel(x=2) expected = DataTree( dataset=xr.Dataset({"a": 2}, coords={"x": 2}), name="first", ) assert_identical(actual, expected) def test_sel_isel_error_has_node_info(self) -> None: tree = DataTree.from_dict( { "/first": xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}), "/second": xr.Dataset({"b": ("x", [4, 5])}, coords={"x": [2, 3]}), } ) with pytest.raises( KeyError, match=re.escape( "Raised whilst mapping function over node(s) with path 'second'" ), ): tree.sel(x=1) with pytest.raises( IndexError, match=re.escape( "Raised whilst mapping function over node(s) with path 'first'" ), ): tree.isel(x=4) class TestAggregations: def test_reduce_method(self) -> None: ds = xr.Dataset({"a": ("x", [False, True, False])}) dt = DataTree.from_dict({"/": ds, "/results": ds}) expected = DataTree.from_dict({"/": ds.any(), "/results": ds.any()}) result = dt.any() assert_equal(result, expected) def test_nan_reduce_method(self) -> None: ds = xr.Dataset({"a": ("x", [1, 2, 3])}) dt = DataTree.from_dict({"/": ds, "/results": ds}) expected = DataTree.from_dict({"/": ds.mean(), "/results": ds.mean()}) result = dt.mean() assert_equal(result, expected) def test_cum_method(self) -> None: ds = xr.Dataset({"a": ("x", [1, 2, 3])}) dt = DataTree.from_dict({"/": ds, "/results": ds}) expected = DataTree.from_dict( { "/": ds.cumsum(), "/results": ds.cumsum(), } ) result = dt.cumsum() assert_equal(result, expected) def test_dim_argument(self) -> None: dt = DataTree.from_dict( { "/a": xr.Dataset({"A": ("x", [1, 2])}), "/b": xr.Dataset({"B": ("y", [1, 2])}), } ) expected = DataTree.from_dict( { "/a": xr.Dataset({"A": 1.5}), "/b": xr.Dataset({"B": 1.5}), } ) actual = dt.mean() assert_equal(expected, actual) actual = dt.mean(dim=...) assert_equal(expected, actual) expected = DataTree.from_dict( { "/a": xr.Dataset({"A": 1.5}), "/b": xr.Dataset({"B": ("y", [1.0, 2.0])}), } ) actual = dt.mean("x") assert_equal(expected, actual) with pytest.raises( ValueError, match=re.escape("Dimension(s) 'invalid' do not exist."), ): dt.mean("invalid") def test_subtree(self) -> None: tree = DataTree.from_dict( { "/child": Dataset({"a": ("x", [1, 2])}), } ) expected = DataTree(dataset=Dataset({"a": 1.5}), name="child") actual = tree.children["child"].mean() assert_identical(expected, actual) class TestOps: def test_unary_op(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) expected = DataTree.from_dict({"/": (-ds1), "/subnode": (-ds2)}) result = -dt assert_equal(result, expected) def test_unary_op_inherited_coords(self) -> None: tree = DataTree(xr.Dataset(coords={"x": [1, 2, 3]})) tree["/foo"] = DataTree(xr.Dataset({"bar": ("x", [4, 5, 6])})) actual = -tree actual_dataset = actual.children["foo"].to_dataset(inherit=False) assert "x" not in actual_dataset.coords expected = tree.copy() # unary ops are not applied to coordinate variables, only data variables expected["/foo/bar"].data = np.array([-4, -5, -6]) assert_identical(actual, expected) def test_binary_op_on_int(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) expected = DataTree.from_dict({"/": ds1 * 5, "/subnode": ds2 * 5}) result = dt * 5 assert_equal(result, expected) def test_binary_op_on_dataarray(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict( { "/": ds1, "/subnode": ds2, } ) other_da = xr.DataArray(name="z", data=[0.1, 0.2], dims="z") expected = DataTree.from_dict( { "/": ds1 * other_da, "/subnode": ds2 * other_da, } ) result = dt * other_da assert_equal(result, expected) def test_binary_op_on_dataset(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict( { "/": ds1, "/subnode": ds2, } ) other_ds = xr.Dataset({"z": ("z", [0.1, 0.2])}) expected = DataTree.from_dict( { "/": ds1 * other_ds, "/subnode": ds2 * other_ds, } ) result = dt * other_ds assert_equal(result, expected) def test_binary_op_on_datatree(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) expected = DataTree.from_dict({"/": ds1 * ds1, "/subnode": ds2 * ds2}) result = dt * dt assert_equal(result, expected) def test_binary_op_order_invariant(self) -> None: tree_ab = DataTree.from_dict({"/a": Dataset({"a": 1}), "/b": Dataset({"b": 2})}) tree_ba = DataTree.from_dict({"/b": Dataset({"b": 2}), "/a": Dataset({"a": 1})}) expected = DataTree.from_dict( {"/a": Dataset({"a": 2}), "/b": Dataset({"b": 4})} ) actual = tree_ab + tree_ba assert_identical(expected, actual) def test_arithmetic_inherited_coords(self) -> None: tree = DataTree(xr.Dataset(coords={"x": [1, 2, 3]})) tree["/foo"] = DataTree(xr.Dataset({"bar": ("x", [4, 5, 6])})) actual = 2 * tree actual_dataset = actual.children["foo"].to_dataset(inherit=False) assert "x" not in actual_dataset.coords expected = tree.copy() expected["/foo/bar"].data = np.array([8, 10, 12]) assert_identical(actual, expected) def test_binary_op_commutativity_with_dataset(self) -> None: # regression test for #9365 ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict( { "/": ds1, "/subnode": ds2, } ) other_ds = xr.Dataset({"z": ("z", [0.1, 0.2])}) expected = DataTree.from_dict( { "/": ds1 * other_ds, "/subnode": ds2 * other_ds, } ) result = other_ds * dt assert_equal(result, expected) def test_inplace_binary_op(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) expected = DataTree.from_dict({"/": ds1 + 1, "/subnode": ds2 + 1}) dt += 1 assert_equal(dt, expected) def test_dont_broadcast_single_node_tree(self) -> None: # regression test for https://github.com/pydata/xarray/issues/9365#issuecomment-2291622577 ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) node = dt["/subnode"] with pytest.raises( xr.TreeIsomorphismError, match=re.escape(r"children at root node do not match: ['subnode'] vs []"), ): dt * node class TestUFuncs: @pytest.mark.xfail(reason="__array_ufunc__ not implemented yet") def test_tree(self, create_test_datatree): dt = create_test_datatree() expected = create_test_datatree(modify=np.sin) result_tree = np.sin(dt) assert_equal(result_tree, expected) class Closer: def __init__(self): self.closed = False def close(self): if self.closed: raise RuntimeError("already closed") self.closed = True @pytest.fixture def tree_and_closers(): tree = DataTree.from_dict({"/child/grandchild": None}) closers = { "/": Closer(), "/child": Closer(), "/child/grandchild": Closer(), } for path, closer in closers.items(): tree[path].set_close(closer.close) return tree, closers class TestClose: def test_close(self, tree_and_closers): tree, closers = tree_and_closers assert not any(closer.closed for closer in closers.values()) tree.close() assert all(closer.closed for closer in closers.values()) tree.close() # should not error def test_context_manager(self, tree_and_closers): tree, closers = tree_and_closers assert not any(closer.closed for closer in closers.values()) with tree: pass assert all(closer.closed for closer in closers.values()) def test_close_child(self, tree_and_closers): tree, closers = tree_and_closers assert not any(closer.closed for closer in closers.values()) tree["child"].close() # should only close descendants assert not closers["/"].closed assert closers["/child"].closed assert closers["/child/grandchild"].closed def test_close_datasetview(self, tree_and_closers): tree, _ = tree_and_closers with pytest.raises( AttributeError, match=re.escape( r"cannot close a DatasetView(). Close the associated DataTree node instead" ), ): tree.dataset.close() with pytest.raises( AttributeError, match=re.escape(r"cannot modify a DatasetView()") ): tree.dataset.set_close(None) def test_close_dataset(self, tree_and_closers): tree, closers = tree_and_closers ds = tree.to_dataset() # should discard closers ds.close() assert not closers["/"].closed # with tree: # pass @requires_dask class TestDask: def test_chunksizes(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) groups = { "/": ds1.chunk({"x": 5}), "/group1": ds2.chunk({"y": 3}), "/group2": ds3.chunk({"z": 2}), "/group1/subgroup1": ds4.chunk({"x": 5}), } tree = xr.DataTree.from_dict(groups) expected_chunksizes = {path: node.chunksizes for path, node in groups.items()} assert tree.chunksizes == expected_chunksizes def test_load(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) groups = {"/": ds1, "/group1": ds2, "/group2": ds3, "/group1/subgroup1": ds4} expected = xr.DataTree.from_dict(groups) tree = xr.DataTree.from_dict( { "/": ds1.chunk({"x": 5}), "/group1": ds2.chunk({"y": 3}), "/group2": ds3.chunk({"z": 2}), "/group1/subgroup1": ds4.chunk({"x": 5}), } ) expected_chunksizes: Mapping[str, Mapping] expected_chunksizes = {node.path: {} for node in tree.subtree} actual = tree.load() assert_identical(actual, expected) assert tree.chunksizes == expected_chunksizes assert actual.chunksizes == expected_chunksizes tree = xr.DataTree.from_dict(groups) actual = tree.load() assert_identical(actual, expected) assert actual.chunksizes == expected_chunksizes def test_compute(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) expected = xr.DataTree.from_dict( {"/": ds1, "/group1": ds2, "/group2": ds3, "/group1/subgroup1": ds4} ) tree = xr.DataTree.from_dict( { "/": ds1.chunk({"x": 5}), "/group1": ds2.chunk({"y": 3}), "/group2": ds3.chunk({"z": 2}), "/group1/subgroup1": ds4.chunk({"x": 5}), } ) original_chunksizes = tree.chunksizes expected_chunksizes: Mapping[str, Mapping] expected_chunksizes = {node.path: {} for node in tree.subtree} actual = tree.compute() assert_identical(actual, expected) assert actual.chunksizes == expected_chunksizes, "mismatching chunksizes" assert tree.chunksizes == original_chunksizes, "original tree was modified" def test_persist(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) def fn(x): return 2 * x expected = xr.DataTree.from_dict( { "/": fn(ds1).chunk({"x": 5}), "/group1": fn(ds2).chunk({"y": 3}), "/group2": fn(ds3).chunk({"z": 2}), "/group1/subgroup1": fn(ds4).chunk({"x": 5}), } ) # Add trivial second layer to the task graph, persist should reduce to one tree = xr.DataTree.from_dict( { "/": fn(ds1.chunk({"x": 5})), "/group1": fn(ds2.chunk({"y": 3})), "/group2": fn(ds3.chunk({"z": 2})), "/group1/subgroup1": fn(ds4.chunk({"x": 5})), } ) original_chunksizes = tree.chunksizes original_hlg_depths = { node.path: len(node.dataset.__dask_graph__().layers) for node in tree.subtree } actual = tree.persist() actual_hlg_depths = { node.path: len(node.dataset.__dask_graph__().layers) for node in actual.subtree } assert_identical(actual, expected) assert actual.chunksizes == original_chunksizes, "chunksizes were modified" assert tree.chunksizes == original_chunksizes, ( "original chunksizes were modified" ) assert all(d == 1 for d in actual_hlg_depths.values()), ( "unexpected dask graph depth" ) assert all(d == 2 for d in original_hlg_depths.values()), ( "original dask graph was modified" ) def test_chunk(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) expected = xr.DataTree.from_dict( { "/": ds1.chunk({"x": 5}), "/group1": ds2.chunk({"y": 3}), "/group2": ds3.chunk({"z": 2}), "/group1/subgroup1": ds4.chunk({"x": 5}), } ) tree = xr.DataTree.from_dict( {"/": ds1, "/group1": ds2, "/group2": ds3, "/group1/subgroup1": ds4} ) actual = tree.chunk({"x": 5, "y": 3, "z": 2}) assert_identical(actual, expected) assert actual.chunksizes == expected.chunksizes with pytest.raises(TypeError, match="invalid type"): tree.chunk(None) with pytest.raises(TypeError, match="invalid type"): tree.chunk((1, 2)) with pytest.raises(ValueError, match="not found in data dimensions"): tree.chunk({"u": 2}) xarray-2025.12.0/xarray/tests/test_datatree_mapping.py000066400000000000000000000226371511464676000227420ustar00rootroot00000000000000import re import numpy as np import pytest import xarray as xr from xarray.core.datatree_mapping import map_over_datasets from xarray.core.treenode import TreeIsomorphismError from xarray.testing import assert_equal, assert_identical empty = xr.Dataset() class TestMapOverSubTree: def test_no_trees_passed(self): with pytest.raises(TypeError, match="must pass at least one tree object"): map_over_datasets(lambda x: x, "dt") def test_not_isomorphic(self, create_test_datatree): dt1 = create_test_datatree() dt2 = create_test_datatree() dt2["set1/set2/extra"] = xr.DataTree(name="extra") with pytest.raises( TreeIsomorphismError, match=re.escape( r"children at node 'set1/set2' do not match: [] vs ['extra']" ), ): map_over_datasets(lambda x, y: None, dt1, dt2) def test_no_trees_returned(self, create_test_datatree): dt1 = create_test_datatree() dt2 = create_test_datatree() expected = xr.DataTree.from_dict(dict.fromkeys(dt1.to_dict())) actual = map_over_datasets(lambda x, y: None, dt1, dt2) assert_equal(expected, actual) def test_single_tree_arg(self, create_test_datatree): dt = create_test_datatree() expected = create_test_datatree(modify=lambda x: 10.0 * x) result_tree = map_over_datasets(lambda x: 10 * x, dt) assert_equal(result_tree, expected) def test_single_tree_arg_plus_arg(self, create_test_datatree): dt = create_test_datatree() expected = create_test_datatree(modify=lambda ds: (10.0 * ds)) result_tree = map_over_datasets(lambda x, y: x * y, dt, 10.0) assert_equal(result_tree, expected) result_tree = map_over_datasets(lambda x, y: x * y, 10.0, dt) assert_equal(result_tree, expected) def test_single_tree_arg_plus_kwarg(self, create_test_datatree): dt = create_test_datatree() expected = create_test_datatree(modify=lambda ds: (10.0 * ds)) def multiply_by_kwarg(ds, **kwargs): ds = ds * kwargs.pop("multiplier") return ds result_tree = map_over_datasets( multiply_by_kwarg, dt, kwargs=dict(multiplier=10.0) ) assert_equal(result_tree, expected) def test_multiple_tree_args(self, create_test_datatree): dt1 = create_test_datatree() dt2 = create_test_datatree() expected = create_test_datatree(modify=lambda ds: 2.0 * ds) result = map_over_datasets(lambda x, y: x + y, dt1, dt2) assert_equal(result, expected) def test_return_multiple_trees(self, create_test_datatree): dt = create_test_datatree() dt_min, dt_max = map_over_datasets(lambda x: (x.min(), x.max()), dt) expected_min = create_test_datatree(modify=lambda ds: ds.min()) assert_equal(dt_min, expected_min) expected_max = create_test_datatree(modify=lambda ds: ds.max()) assert_equal(dt_max, expected_max) def test_return_wrong_type(self, simple_datatree): dt1 = simple_datatree with pytest.raises( TypeError, match=re.escape( "the result of calling func on the node at position '.' is not a " "Dataset or None or a tuple of such types" ), ): map_over_datasets(lambda x: "string", dt1) # type: ignore[arg-type,return-value] def test_return_tuple_of_wrong_types(self, simple_datatree): dt1 = simple_datatree with pytest.raises( TypeError, match=re.escape( "the result of calling func on the node at position '.' is not a " "Dataset or None or a tuple of such types" ), ): map_over_datasets(lambda x: (x, "string"), dt1) # type: ignore[arg-type,return-value] def test_return_inconsistent_number_of_results(self, simple_datatree): dt1 = simple_datatree with pytest.raises( TypeError, match=re.escape( r"Calling func on the nodes at position set1 returns a tuple " "of 0 datasets, whereas calling func on the nodes at position " ". instead returns a tuple of 2 datasets." ), ): # Datasets in simple_datatree have different numbers of dims map_over_datasets(lambda ds: tuple((None,) * len(ds.dims)), dt1) def test_wrong_number_of_arguments_for_func(self, simple_datatree): dt = simple_datatree with pytest.raises( TypeError, match="takes 1 positional argument but 2 were given" ): map_over_datasets(lambda x: 10 * x, dt, dt) def test_map_single_dataset_against_whole_tree(self, create_test_datatree): dt = create_test_datatree() def nodewise_merge(node_ds, fixed_ds): return xr.merge([node_ds, fixed_ds]) other_ds = xr.Dataset({"z": ("z", [0])}) expected = create_test_datatree(modify=lambda ds: xr.merge([ds, other_ds])) result_tree = map_over_datasets(nodewise_merge, dt, other_ds) assert_equal(result_tree, expected) @pytest.mark.xfail def test_trees_with_different_node_names(self): # TODO test this after I've got good tests for renaming nodes raise NotImplementedError def test_tree_method(self, create_test_datatree): dt = create_test_datatree() def multiply(ds, times): return times * ds expected = create_test_datatree(modify=lambda ds: 10.0 * ds) result_tree = dt.map_over_datasets(multiply, 10.0) assert_equal(result_tree, expected) def test_tree_method_with_kwarg(self, create_test_datatree): dt = create_test_datatree() def multiply(ds, **kwargs): return kwargs.pop("times") * ds expected = create_test_datatree(modify=lambda ds: 10.0 * ds) result_tree = dt.map_over_datasets(multiply, kwargs=dict(times=10.0)) assert_equal(result_tree, expected) def test_discard_ancestry(self, create_test_datatree): # Check for datatree GH issue https://github.com/xarray-contrib/datatree/issues/48 dt = create_test_datatree() subtree = dt["set1"] expected = create_test_datatree(modify=lambda ds: 10.0 * ds)["set1"] result_tree = map_over_datasets(lambda x: 10.0 * x, subtree) assert_equal(result_tree, expected) def test_keep_attrs_on_empty_nodes(self, create_test_datatree): # GH278 dt = create_test_datatree() dt["set1/set2"].attrs["foo"] = "bar" def empty_func(ds): return ds result = dt.map_over_datasets(empty_func) assert result["set1/set2"].attrs == dt["set1/set2"].attrs def test_error_contains_path_of_offending_node(self, create_test_datatree): dt = create_test_datatree() dt["set1"]["bad_var"] = 0 print(dt) def fail_on_specific_node(ds): if "bad_var" in ds: raise ValueError("Failed because 'bar_var' present in dataset") with pytest.raises( ValueError, match=re.escape( r"Raised whilst mapping function over node(s) with path 'set1'" ), ): dt.map_over_datasets(fail_on_specific_node) def test_inherited_coordinates_with_index(self): root = xr.Dataset(coords={"x": [1, 2]}) child = xr.Dataset({"foo": ("x", [0, 1])}) # no coordinates tree = xr.DataTree.from_dict({"/": root, "/child": child}) actual = tree.map_over_datasets(lambda ds: ds) # identity assert isinstance(actual, xr.DataTree) assert_identical(tree, actual) actual_child = actual.children["child"].to_dataset(inherit=False) assert_identical(actual_child, child) class TestMutableOperations: def test_construct_using_type(self): # from datatree GH issue https://github.com/xarray-contrib/datatree/issues/188 # xarray's .weighted is unusual because it uses type() to create a Dataset/DataArray a = xr.DataArray( np.random.rand(3, 4, 10), dims=["x", "y", "time"], coords={"area": (["x", "y"], np.random.rand(3, 4))}, ).to_dataset(name="data") b = xr.DataArray( np.random.rand(2, 6, 14), dims=["x", "y", "time"], coords={"area": (["x", "y"], np.random.rand(2, 6))}, ).to_dataset(name="data") dt = xr.DataTree.from_dict({"a": a, "b": b}) def weighted_mean(ds): if "area" not in ds.coords: return None return ds.weighted(ds.area).mean(["x", "y"]) dt.map_over_datasets(weighted_mean) def test_alter_inplace_forbidden(self): simpsons = xr.DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Herbert": xr.Dataset({"age": 40}), "/Homer": xr.Dataset({"age": 39}), "/Homer/Bart": xr.Dataset({"age": 10}), "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Maggie": xr.Dataset({"age": 1}), }, name="Abe", ) def fast_forward(ds: xr.Dataset, years: float) -> xr.Dataset: """Add some years to the age, but by altering the given dataset""" ds["age"] = ds["age"] + years return ds with pytest.raises(AttributeError): simpsons.map_over_datasets(fast_forward, 10) xarray-2025.12.0/xarray/tests/test_datatree_typing.yml000066400000000000000000000205171511464676000227650ustar00rootroot00000000000000- case: test_mypy_pipe_lambda_noarg_return_type main: | from xarray import DataTree dt = DataTree().pipe(lambda data: data) reveal_type(dt) # N: Revealed type is "xarray.core.datatree.DataTree" - case: test_mypy_pipe_lambda_posarg_return_type main: | from xarray import DataTree dt = DataTree().pipe(lambda data, arg: arg, "foo") reveal_type(dt) # N: Revealed type is "builtins.str" - case: test_mypy_pipe_lambda_chaining_return_type main: | from xarray import DataTree answer = DataTree().pipe(lambda data, arg: arg, "foo").count("o") reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_lambda_missing_arg main: | from xarray import DataTree # Call to pipe missing argument for lambda parameter `arg` dt = DataTree().pipe(lambda data, arg: data) out: | main:4: error: No overload variant of "pipe" of "DataTree" matches argument type "Callable[[Any, Any], Any]" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_lambda_extra_arg main: | from xarray import DataTree # Call to pipe with extra argument for lambda dt = DataTree().pipe(lambda data: data, "oops!") out: | main:4: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[Any], Any]", "str" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_posarg main: | from xarray import DataTree def f(dt: DataTree, arg: int) -> DataTree: return dt # Call to pipe missing argument for function parameter `arg` dt = DataTree().pipe(f) out: | main:7: error: No overload variant of "pipe" of "DataTree" matches argument type "Callable[[DataTree, int], DataTree]" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_extra_posarg main: | from xarray import DataTree def f(dt: DataTree, arg: int) -> DataTree: return dt # Call to pipe missing keyword for kwonly parameter `kwonly` dt = DataTree().pipe(f, 42, "oops!") out: | main:7: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[DataTree, int], DataTree]", "int", "str" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_kwarg main: | from xarray import DataTree def f(dt: DataTree, arg: int, *, kwonly: int) -> DataTree: return dt # Call to pipe missing argument for kwonly parameter `kwonly` dt = DataTree().pipe(f, 42) out: | main:7: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[DataTree, int, NamedArg(int, 'kwonly')], DataTree]", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_keyword main: | from xarray import DataTree def f(dt: DataTree, arg: int, *, kwonly: int) -> DataTree: return dt # Call to pipe missing keyword for kwonly parameter `kwonly` dt = DataTree().pipe(f, 42, 99) out: | main:7: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[DataTree, int, NamedArg(int, 'kwonly')], DataTree]", "int", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_unexpected_keyword skip: True # mypy 1.18+ outputs "defined here" notes without line numbers (e.g., "xarray/core/datatree.py: note:...") # pytest-mypy-plugins expects all lines to match "file:line: severity: message" format and can't parse these notes. # This is a mypy behavior, not a bug. The test would need pytest-mypy-plugins to support notes without line numbers. main: | from xarray import DataTree def f(dt: DataTree, arg: int, *, kwonly: int) -> DataTree: return dt # Call to pipe using wrong keyword: `kw` instead of `kwonly` dt = DataTree().pipe(f, 42, kw=99) out: | main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataTree" [call-arg] # Note: mypy 1.18.1 also outputs a "defined here" note that pytest-mypy-plugins can't parse - case: test_mypy_pipe_tuple_return_type_datatree main: | from xarray import DataTree def f(arg: int, dt: DataTree) -> DataTree: return dt dt = DataTree().pipe((f, "dt"), 42) reveal_type(dt) # N: Revealed type is "xarray.core.datatree.DataTree" - case: test_mypy_pipe_tuple_return_type_other main: | from xarray import DataTree def f(arg: int, dt: DataTree) -> int: return arg answer = DataTree().pipe((f, "dt"), 42) reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_tuple_missing_arg main: | from xarray import DataTree def f(arg: int, dt: DataTree) -> DataTree: return dt # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are missing an argument for parameter `arg`, so we get no error here. dt = DataTree().pipe((f, "dt")) reveal_type(dt) # N: Revealed type is "xarray.core.datatree.DataTree" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we failed to pass an argument for `arg`. dt = DataTree().pipe(lambda data, arg: f(arg, data)) out: | main:17: error: No overload variant of "pipe" of "DataTree" matches argument type "Callable[[Any, Any], DataTree]" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_tuple_extra_arg main: | from xarray import DataTree def f(arg: int, dt: DataTree) -> DataTree: return dt # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are providing too many args for `f`, so we get no error here. dt = DataTree().pipe((f, "dt"), 42, "foo") reveal_type(dt) # N: Revealed type is "xarray.core.datatree.DataTree" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we passed too many arguments. dt = DataTree().pipe(lambda data, arg: f(arg, data), 42, "foo") out: | main:17: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[Any, Any], DataTree]", "int", "str" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T xarray-2025.12.0/xarray/tests/test_deprecation_helpers.py000066400000000000000000000110261511464676000234430ustar00rootroot00000000000000import pytest from xarray.util.deprecation_helpers import _deprecate_positional_args def test_deprecate_positional_args_warns_for_function(): @_deprecate_positional_args("v0.1") def f1(a, b, *, c="c", d="d"): return a, b, c, d result = f1(1, 2) assert result == (1, 2, "c", "d") result = f1(1, 2, c=3, d=4) assert result == (1, 2, 3, 4) with pytest.warns(FutureWarning, match=r".*v0.1"): result = f1(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c' as positional"): result = f1(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c, d' as positional"): result = f1(1, 2, 3, 4) # type: ignore[misc] assert result == (1, 2, 3, 4) @_deprecate_positional_args("v0.1") def f2(a="a", *, b="b", c="c", d="d"): return a, b, c, d with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = f2(1, 2) # type: ignore[misc] assert result == (1, 2, "c", "d") @_deprecate_positional_args("v0.1") def f3(a, *, b="b", **kwargs): return a, b, kwargs with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = f3(1, 2) # type: ignore[misc] assert result == (1, 2, {}) with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = f3(1, 2, f="f") # type: ignore[misc] assert result == (1, 2, {"f": "f"}) @_deprecate_positional_args("v0.1") def f4(a, /, *, b="b", **kwargs): return a, b, kwargs result = f4(1) assert result == (1, "b", {}) result = f4(1, b=2, f="f") assert result == (1, 2, {"f": "f"}) with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = f4(1, 2, f="f") # type: ignore[misc] assert result == (1, 2, {"f": "f"}) with pytest.raises(TypeError, match=r"Keyword-only param without default"): @_deprecate_positional_args("v0.1") def f5(a, *, b, c=3, **kwargs): pass def test_deprecate_positional_args_warns_for_class(): class A1: @_deprecate_positional_args("v0.1") def method(self, a, b, *, c="c", d="d"): return a, b, c, d result = A1().method(1, 2) assert result == (1, 2, "c", "d") result = A1().method(1, 2, c=3, d=4) assert result == (1, 2, 3, 4) with pytest.warns(FutureWarning, match=r".*v0.1"): result = A1().method(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c' as positional"): result = A1().method(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c, d' as positional"): result = A1().method(1, 2, 3, 4) # type: ignore[misc] assert result == (1, 2, 3, 4) class A2: @_deprecate_positional_args("v0.1") def method(self, a=1, b=1, *, c="c", d="d"): return a, b, c, d with pytest.warns(FutureWarning, match=r"Passing 'c' as positional"): result = A2().method(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c, d' as positional"): result = A2().method(1, 2, 3, 4) # type: ignore[misc] assert result == (1, 2, 3, 4) class A3: @_deprecate_positional_args("v0.1") def method(self, a, *, b="b", **kwargs): return a, b, kwargs with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = A3().method(1, 2) # type: ignore[misc] assert result == (1, 2, {}) with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = A3().method(1, 2, f="f") # type: ignore[misc] assert result == (1, 2, {"f": "f"}) class A4: @_deprecate_positional_args("v0.1") def method(self, a, /, *, b="b", **kwargs): return a, b, kwargs result = A4().method(1) assert result == (1, "b", {}) result = A4().method(1, b=2, f="f") assert result == (1, 2, {"f": "f"}) with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = A4().method(1, 2, f="f") # type: ignore[misc] assert result == (1, 2, {"f": "f"}) with pytest.raises(TypeError, match=r"Keyword-only param without default"): class A5: @_deprecate_positional_args("v0.1") def __init__(self, a, *, b, c=3, **kwargs): pass xarray-2025.12.0/xarray/tests/test_distributed.py000066400000000000000000000243651511464676000217600ustar00rootroot00000000000000"""isort:skip_file""" from __future__ import annotations import pickle from typing import TYPE_CHECKING, Any import numpy as np import pytest if TYPE_CHECKING: import dask import dask.array as da import distributed else: dask = pytest.importorskip("dask") da = pytest.importorskip("dask.array") distributed = pytest.importorskip("distributed") import contextlib from dask.distributed import Client, Lock from distributed.client import futures_of from distributed.utils_test import ( cleanup, # noqa: F401 client, # noqa: F401 cluster, cluster_fixture, # noqa: F401 gen_cluster, loop, # noqa: F401 loop_in_thread, # noqa: F401 ) import xarray as xr from xarray.backends.locks import HDF5_LOCK, CombinedLock, SerializableLock from xarray.tests import ( assert_allclose, assert_identical, has_h5netcdf, has_netCDF4, has_scipy, requires_cftime, requires_netCDF4, requires_zarr, ) from xarray.tests.test_backends import ( ON_WINDOWS, create_tmp_file, ) from xarray.tests.test_dataset import create_test_data @pytest.fixture def tmp_netcdf_filename(tmpdir): return str(tmpdir.join("testfile.nc")) ENGINES = [] if has_scipy: ENGINES.append("scipy") if has_netCDF4: ENGINES.append("netcdf4") if has_h5netcdf: ENGINES.append("h5netcdf") NC_FORMATS = { "netcdf4": [ "NETCDF3_CLASSIC", "NETCDF3_64BIT_OFFSET", "NETCDF3_64BIT_DATA", "NETCDF4_CLASSIC", "NETCDF4", ], "scipy": ["NETCDF3_CLASSIC", "NETCDF3_64BIT"], "h5netcdf": ["NETCDF4"], } ENGINES_AND_FORMATS = [ ("netcdf4", "NETCDF3_CLASSIC"), ("netcdf4", "NETCDF4_CLASSIC"), ("netcdf4", "NETCDF4"), ("h5netcdf", "NETCDF4"), ("scipy", "NETCDF3_64BIT"), ] @pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) @pytest.mark.parametrize("compute", [True, False]) def test_dask_distributed_netcdf_roundtrip( loop, # noqa: F811 tmp_netcdf_filename, engine, nc_format, compute, ): if engine not in ENGINES: pytest.skip("engine not available") chunks = {"dim1": 4, "dim2": 3, "dim3": 6} with cluster() as (s, [_a, _b]): with Client(s["address"], loop=loop): original = create_test_data().chunk(chunks) if engine == "scipy": with pytest.raises(NotImplementedError): original.to_netcdf( tmp_netcdf_filename, engine=engine, format=nc_format ) return result = original.to_netcdf( tmp_netcdf_filename, engine=engine, format=nc_format, compute=compute ) if not compute: result.compute() with xr.open_dataset( tmp_netcdf_filename, chunks=chunks, engine=engine ) as restored: assert isinstance(restored.var1.data, da.Array) computed = restored.compute() assert_allclose(original, computed) @requires_netCDF4 def test_dask_distributed_write_netcdf_with_dimensionless_variables( loop, # noqa: F811 tmp_netcdf_filename, ): with cluster() as (s, [_a, _b]): with Client(s["address"], loop=loop): original = xr.Dataset({"x": da.zeros(())}) original.to_netcdf(tmp_netcdf_filename) with xr.open_dataset(tmp_netcdf_filename) as actual: assert actual.x.shape == () @requires_cftime @requires_netCDF4 @pytest.mark.parametrize("parallel", (True, False)) def test_open_mfdataset_can_open_files_with_cftime_index(parallel, tmp_path): T = xr.date_range("20010101", "20010501", calendar="360_day", use_cftime=True) Lon = np.arange(100) data = np.random.random((T.size, Lon.size)) da = xr.DataArray(data, coords={"time": T, "Lon": Lon}, name="test") file_path = tmp_path / "test.nc" da.to_netcdf(file_path) with cluster() as (s, [_a, _b]): with Client(s["address"]): with xr.open_mfdataset(file_path, parallel=parallel) as tf: assert_identical(tf["test"], da) @requires_cftime @requires_netCDF4 @pytest.mark.parametrize("parallel", (True, False)) def test_open_mfdataset_multiple_files_parallel_distributed(parallel, tmp_path): lon = np.arange(100) time = xr.date_range("20010101", periods=100, calendar="360_day", use_cftime=True) data = np.random.random((time.size, lon.size)) da = xr.DataArray(data, coords={"time": time, "lon": lon}, name="test") fnames = [] for i in range(0, 100, 10): fname = tmp_path / f"test_{i}.nc" da.isel(time=slice(i, i + 10)).to_netcdf(fname) fnames.append(fname) with cluster() as (s, [_a, _b]): with Client(s["address"]): with xr.open_mfdataset( fnames, parallel=parallel, concat_dim="time", combine="nested" ) as tf: assert_identical(tf["test"], da) # TODO: move this to test_backends.py @requires_cftime @requires_netCDF4 @pytest.mark.parametrize("parallel", (True, False)) def test_open_mfdataset_multiple_files_parallel(parallel, tmp_path): if parallel: pytest.skip( "Flaky in CI. Would be a welcome contribution to make a similar test reliable." ) lon = np.arange(100) time = xr.date_range("20010101", periods=100, calendar="360_day", use_cftime=True) data = np.random.random((time.size, lon.size)) da = xr.DataArray(data, coords={"time": time, "lon": lon}, name="test") fnames = [] for i in range(0, 100, 10): fname = tmp_path / f"test_{i}.nc" da.isel(time=slice(i, i + 10)).to_netcdf(fname) fnames.append(fname) for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]: with dask.config.set(scheduler=get): with xr.open_mfdataset( fnames, parallel=parallel, concat_dim="time", combine="nested" ) as tf: assert_identical(tf["test"], da) @pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) def test_dask_distributed_read_netcdf_integration_test( loop, # noqa: F811 tmp_netcdf_filename, engine, nc_format, ): if engine not in ENGINES: pytest.skip("engine not available") chunks = {"dim1": 4, "dim2": 3, "dim3": 6} with cluster() as (s, [_a, _b]): with Client(s["address"], loop=loop): original = create_test_data() original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format) with xr.open_dataset( tmp_netcdf_filename, chunks=chunks, engine=engine ) as restored: assert isinstance(restored.var1.data, da.Array) computed = restored.compute() assert_allclose(original, computed) # fixture vendored from dask # heads-up, this is using quite private zarr API # https://github.com/dask/dask/blob/e04734b4d8959ba259801f2e2a490cb4ee8d891f/dask/tests/test_distributed.py#L338-L358 @pytest.fixture def zarr(client): # noqa: F811 zarr_lib = pytest.importorskip("zarr") # Zarr-Python 3 lazily allocates a dedicated thread/IO loop # for to execute async tasks. To avoid having this thread # be picked up as a "leaked thread", we manually trigger it's # creation before using zarr try: _ = zarr_lib.core.sync._get_loop() _ = zarr_lib.core.sync._get_executor() yield zarr_lib except AttributeError: yield zarr_lib finally: # Zarr-Python 3 lazily allocates an IO thread, a thread pool executor, and # an IO loop. Here we clean up these resources to avoid leaking threads # In normal operations, this is done as by an atexit handler when Zarr # is shutting down. with contextlib.suppress(AttributeError): zarr_lib.core.sync.cleanup_resources() @requires_zarr @pytest.mark.parametrize("consolidated", [True, False]) @pytest.mark.parametrize("compute", [True, False]) def test_dask_distributed_zarr_integration_test( client, # noqa: F811 zarr, consolidated: bool, compute: bool, ) -> None: if consolidated: write_kwargs: dict[str, Any] = {"consolidated": True} read_kwargs: dict[str, Any] = {"backend_kwargs": {"consolidated": True}} else: write_kwargs = read_kwargs = {} chunks = {"dim1": 4, "dim2": 3, "dim3": 5} original = create_test_data().chunk(chunks) with create_tmp_file(allow_cleanup_failure=ON_WINDOWS, suffix=".zarrc") as filename: maybe_futures = original.to_zarr( # type: ignore[call-overload] #mypy bug? filename, compute=compute, **write_kwargs ) if not compute: maybe_futures.compute() with xr.open_dataset( filename, chunks="auto", engine="zarr", **read_kwargs ) as restored: assert isinstance(restored.var1.data, da.Array) computed = restored.compute() assert_allclose(original, computed) @gen_cluster(client=True) async def test_async(c, s, a, b) -> None: x = create_test_data() assert not dask.is_dask_collection(x) y = x.chunk({"dim2": 4}) + 10 assert dask.is_dask_collection(y) assert dask.is_dask_collection(y.var1) assert dask.is_dask_collection(y.var2) z = c.persist(y) assert str(z) assert dask.is_dask_collection(z) assert dask.is_dask_collection(z.var1) assert dask.is_dask_collection(z.var2) assert len(y.__dask_graph__()) > len(z.__dask_graph__()) assert not futures_of(y) assert futures_of(z) future = c.compute(z) w = await future assert not dask.is_dask_collection(w) assert_allclose(x + 10, w) assert s.tasks def test_hdf5_lock() -> None: assert isinstance(HDF5_LOCK, SerializableLock) @gen_cluster(client=True) async def test_serializable_locks(c, s, a, b) -> None: def f(x, lock=None): with lock: return x + 1 # note, the creation of Lock needs to be done inside a cluster for lock in [ HDF5_LOCK, Lock(), Lock("filename.nc"), CombinedLock([HDF5_LOCK]), CombinedLock([HDF5_LOCK, Lock("filename.nc")]), ]: futures = c.map(f, list(range(10)), lock=lock) await c.gather(futures) lock2 = pickle.loads(pickle.dumps(lock)) assert type(lock) is type(lock2) xarray-2025.12.0/xarray/tests/test_dtypes.py000066400000000000000000000137331511464676000207430ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pytest from xarray.core import dtypes from xarray.tests import requires_array_api_strict try: import array_api_strict except ImportError: class DummyArrayAPINamespace: bool = None # type: ignore[unused-ignore,var-annotated] int32 = None # type: ignore[unused-ignore,var-annotated] float64 = None # type: ignore[unused-ignore,var-annotated] array_api_strict = DummyArrayAPINamespace @pytest.mark.parametrize( "args, expected", [ ([bool], bool), ([bool, np.bytes_], np.object_), ([np.float32, np.float64], np.float64), ([np.float32, np.bytes_], np.object_), ([np.str_, np.int64], np.object_), ([np.str_, np.str_], np.str_), ([np.bytes_, np.str_], np.object_), ([np.dtype(" None: actual = dtypes.result_type(*args) assert actual == expected @pytest.mark.parametrize( ["values", "expected"], ( ([np.arange(3, dtype="float32"), np.nan], np.float32), ([np.arange(3, dtype="int8"), 1], np.int8), ([np.array(["a", "b"], dtype=str), np.nan], object), ([np.array([b"a", b"b"], dtype=bytes), True], object), ([np.array([b"a", b"b"], dtype=bytes), "c"], object), ([np.array(["a", "b"], dtype=str), "c"], np.dtype(str)), ([np.array(["a", "b"], dtype=str), None], object), ([0, 1], np.dtype("int")), ), ) def test_result_type_scalars(values, expected) -> None: actual = dtypes.result_type(*values) assert np.issubdtype(actual, expected) def test_result_type_dask_array() -> None: # verify it works without evaluating dask arrays da = pytest.importorskip("dask.array") dask = pytest.importorskip("dask") def error(): raise RuntimeError array = da.from_delayed(dask.delayed(error)(), (), np.float64) with pytest.raises(RuntimeError): array.compute() actual = dtypes.result_type(array) assert actual == np.float64 # note that this differs from the behavior for scalar numpy arrays, which # would get promoted to float32 actual = dtypes.result_type(array, np.array([0.5, 1.0], dtype=np.float32)) assert actual == np.float64 @pytest.mark.parametrize("obj", [1.0, np.inf, "ab", 1.0 + 1.0j, True]) def test_inf(obj) -> None: assert dtypes.INF > obj assert dtypes.NINF < obj @pytest.mark.parametrize( "kind, expected", [ ("b", (np.float32, "nan")), # dtype('int8') ("B", (np.float32, "nan")), # dtype('uint8') ("c", (np.dtype("O"), "nan")), # dtype('S1') ("D", (np.complex128, "(nan+nanj)")), # dtype('complex128') ("d", (np.float64, "nan")), # dtype('float64') ("e", (np.float16, "nan")), # dtype('float16') ("F", (np.complex64, "(nan+nanj)")), # dtype('complex64') ("f", (np.float32, "nan")), # dtype('float32') ("h", (np.float32, "nan")), # dtype('int16') ("H", (np.float32, "nan")), # dtype('uint16') ("i", (np.float64, "nan")), # dtype('int32') ("I", (np.float64, "nan")), # dtype('uint32') ("l", (np.float64, "nan")), # dtype('int64') ("L", (np.float64, "nan")), # dtype('uint64') ("m", (np.timedelta64, "NaT")), # dtype(' None: # 'g': np.float128 is not tested : not available on all platforms # 'G': np.complex256 is not tested : not available on all platforms actual = dtypes.maybe_promote(np.dtype(kind)) assert actual[0] == expected[0] assert str(actual[1]) == expected[1] def test_nat_types_membership() -> None: assert np.datetime64("NaT").dtype in dtypes.NAT_TYPES assert np.timedelta64("NaT").dtype in dtypes.NAT_TYPES assert np.float64 not in dtypes.NAT_TYPES @pytest.mark.parametrize( ["dtype", "kinds", "xp", "expected"], ( (np.dtype("int32"), "integral", np, True), (np.dtype("float16"), "real floating", np, True), (np.dtype("complex128"), "complex floating", np, True), (np.dtype("U"), "numeric", np, False), pytest.param( array_api_strict.int32, "integral", array_api_strict, True, marks=requires_array_api_strict, id="array_api-int", ), pytest.param( array_api_strict.float64, "real floating", array_api_strict, True, marks=requires_array_api_strict, id="array_api-float", ), pytest.param( array_api_strict.bool, "numeric", array_api_strict, False, marks=requires_array_api_strict, id="array_api-bool", ), ), ) def test_isdtype(dtype, kinds, xp, expected) -> None: actual = dtypes.isdtype(dtype, kinds, xp=xp) assert actual == expected @pytest.mark.parametrize( ["dtype", "kinds", "xp", "error", "pattern"], ( (np.dtype("int32"), "foo", np, (TypeError, ValueError), "kind"), (np.dtype("int32"), np.signedinteger, np, TypeError, "kind"), (np.dtype("float16"), 1, np, TypeError, "kind"), ), ) def test_isdtype_error(dtype, kinds, xp, error, pattern): with pytest.raises(error, match=pattern): dtypes.isdtype(dtype, kinds, xp=xp) xarray-2025.12.0/xarray/tests/test_duck_array_ops.py000066400000000000000000001134021511464676000224320ustar00rootroot00000000000000from __future__ import annotations import copy import datetime as dt import pickle import warnings from typing import Any import numpy as np import pandas as pd import pytest from numpy import array, nan from xarray import DataArray, Dataset, concat, date_range from xarray.coding.times import _NS_PER_TIME_DELTA from xarray.core import dtypes, duck_array_ops from xarray.core.duck_array_ops import ( array_notnull_equiv, concatenate, count, first, gradient, last, least_squares, mean, np_timedelta64_to_float, pd_timedelta_to_float, push, py_timedelta_to_float, stack, timedelta_to_numeric, where, ) from xarray.core.extension_array import PandasExtensionArray from xarray.core.types import NPDatetimeUnitOptions, PDDatetimeUnitOptions from xarray.namedarray.pycompat import array_type from xarray.testing import assert_allclose, assert_equal, assert_identical from xarray.tests import ( arm_xfail, assert_array_equal, has_dask, has_scipy, raise_if_dask_computes, requires_bottleneck, requires_cftime, requires_dask, requires_pyarrow, ) dask_array_type = array_type("dask") @pytest.fixture def categorical1(): return pd.Categorical(["cat1", "cat2", "cat2", "cat1", "cat2"]) @pytest.fixture def categorical2(): return pd.Categorical(["cat2", "cat1", "cat2", "cat3", "cat1"]) try: import pyarrow as pa @pytest.fixture def arrow1(): return pd.arrays.ArrowExtensionArray( # type: ignore[attr-defined] pa.array([{"x": 1, "y": True}, {"x": 2, "y": False}]) ) @pytest.fixture def arrow2(): return pd.arrays.ArrowExtensionArray( # type: ignore[attr-defined] pa.array([{"x": 3, "y": False}, {"x": 4, "y": True}]) ) except ImportError: pass @pytest.fixture def int1(): return pd.arrays.IntegerArray( np.array([1, 2, 3, 4, 5]), np.array([True, False, False, True, True]) ) @pytest.fixture def int2(): return pd.arrays.IntegerArray( np.array([6, 7, 8, 9, 10]), np.array([True, True, False, True, False]) ) class TestOps: @pytest.fixture(autouse=True) def setUp(self): self.x = array( [ [ [nan, nan, 2.0, nan], [nan, 5.0, 6.0, nan], [8.0, 9.0, 10.0, nan], ], [ [nan, 13.0, 14.0, 15.0], [nan, 17.0, 18.0, nan], [nan, 21.0, nan, nan], ], ] ) def test_first(self): expected_results = [ array([[nan, 13, 2, 15], [nan, 5, 6, nan], [8, 9, 10, nan]]), array([[8, 5, 2, nan], [nan, 13, 14, 15]]), array([[2, 5, 8], [13, 17, 21]]), ] for axis, expected in zip( [0, 1, 2, -3, -2, -1], 2 * expected_results, strict=True ): actual = first(self.x, axis) assert_array_equal(expected, actual) expected = self.x[0] actual = first(self.x, axis=0, skipna=False) assert_array_equal(expected, actual) expected = self.x[..., 0] actual = first(self.x, axis=-1, skipna=False) assert_array_equal(expected, actual) with pytest.raises(IndexError, match=r"out of bounds"): first(self.x, 3) def test_last(self): expected_results = [ array([[nan, 13, 14, 15], [nan, 17, 18, nan], [8, 21, 10, nan]]), array([[8, 9, 10, nan], [nan, 21, 18, 15]]), array([[2, 6, 10], [15, 18, 21]]), ] for axis, expected in zip( [0, 1, 2, -3, -2, -1], 2 * expected_results, strict=True ): actual = last(self.x, axis) assert_array_equal(expected, actual) expected = self.x[-1] actual = last(self.x, axis=0, skipna=False) assert_array_equal(expected, actual) expected = self.x[..., -1] actual = last(self.x, axis=-1, skipna=False) assert_array_equal(expected, actual) with pytest.raises(IndexError, match=r"out of bounds"): last(self.x, 3) def test_count(self): assert 12 == count(self.x) expected = array([[1, 2, 3], [3, 2, 1]]) assert_array_equal(expected, count(self.x, axis=-1)) assert 1 == count(np.datetime64("2000-01-01")) def test_where_type_promotion(self): result = where(np.array([True, False]), np.array([1, 2]), np.array(["a", "b"])) assert_array_equal(result, np.array([1, "b"], dtype=object)) result = where([True, False], np.array([1, 2], np.float32), np.nan) assert result.dtype == np.float32 assert_array_equal(result, np.array([1, np.nan], dtype=np.float32)) def test_where_extension_duck_array(self, categorical1, categorical2): where_res = where( np.array([True, False, True, False, False]), PandasExtensionArray(categorical1), PandasExtensionArray(categorical2), ) assert isinstance(where_res, PandasExtensionArray) assert ( where_res == pd.Categorical(["cat1", "cat1", "cat2", "cat3", "cat1"]) ).all() def test_concatenate_extension_duck_array(self, categorical1, categorical2): concate_res = concatenate( [PandasExtensionArray(categorical1), PandasExtensionArray(categorical2)] ) assert isinstance(concate_res, PandasExtensionArray) assert ( concate_res == type(categorical1)._concat_same_type((categorical1, categorical2)) ).all() @requires_pyarrow def test_extension_array_pyarrow_concatenate(self, arrow1, arrow2): concatenated = concatenate( (PandasExtensionArray(arrow1), PandasExtensionArray(arrow2)) ) assert concatenated[2].array[0]["x"] == 3 assert concatenated[3].array[0]["y"] @requires_pyarrow def test_extension_array_copy_arrow_type(self): arr = pd.array([pd.NA, 1, 2], dtype="int64[pyarrow]") # Relying on the `__getattr__` of `PandasExtensionArray` to do the deep copy # recursively only fails for `int64[pyarrow]` and similar types so this # test ensures that copying still works there. assert isinstance( copy.deepcopy(PandasExtensionArray(arr), memo=None).array, type(arr) ) def test___getitem__extension_duck_array(self, categorical1): extension_duck_array = PandasExtensionArray(categorical1) assert (extension_duck_array[0:2] == categorical1[0:2]).all() assert isinstance(extension_duck_array[0:2], PandasExtensionArray) assert extension_duck_array[0] == categorical1[0] assert isinstance(extension_duck_array[0], PandasExtensionArray) mask = [True, False, True, False, True] assert (extension_duck_array[mask] == categorical1[mask]).all() def test__setitem__extension_duck_array(self, categorical1): extension_duck_array = PandasExtensionArray(categorical1) extension_duck_array[2] = "cat1" # already existing category assert extension_duck_array[2] == "cat1" with pytest.raises(TypeError, match="Cannot setitem on a Categorical"): extension_duck_array[2] = "cat4" # new category def test_stack_type_promotion(self): result = stack([1, "b"]) assert_array_equal(result, np.array([1, "b"], dtype=object)) def test_concatenate_type_promotion(self): result = concatenate([np.array([1]), np.array(["b"])]) assert_array_equal(result, np.array([1, "b"], dtype=object)) @pytest.mark.filterwarnings("error") def test_all_nan_arrays(self): assert np.isnan(mean([np.nan, np.nan])) @requires_dask class TestDaskOps(TestOps): @pytest.fixture(autouse=True) def setUp(self): import dask.array self.x = dask.array.from_array( [ [ [nan, nan, 2.0, nan], [nan, 5.0, 6.0, nan], [8.0, 9.0, 10.0, nan], ], [ [nan, 13.0, 14.0, 15.0], [nan, 17.0, 18.0, nan], [nan, 21.0, nan, nan], ], ], chunks=(2, 1, 2), ) def test_cumsum_1d(): inputs = np.array([0, 1, 2, 3]) expected = np.array([0, 1, 3, 6]) actual = duck_array_ops.cumsum(inputs) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=0) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=-1) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=(0,)) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=()) assert_array_equal(inputs, actual) def test_cumsum_2d(): inputs = np.array([[1, 2], [3, 4]]) expected = np.array([[1, 3], [4, 10]]) actual = duck_array_ops.cumsum(inputs) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=(0, 1)) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=()) assert_array_equal(inputs, actual) def test_cumprod_2d(): inputs = np.array([[1, 2], [3, 4]]) expected = np.array([[1, 2], [3, 2 * 3 * 4]]) actual = duck_array_ops.cumprod(inputs) assert_array_equal(expected, actual) actual = duck_array_ops.cumprod(inputs, axis=(0, 1)) assert_array_equal(expected, actual) actual = duck_array_ops.cumprod(inputs, axis=()) assert_array_equal(inputs, actual) class TestArrayNotNullEquiv: @pytest.mark.parametrize( "arr1, arr2", [ (np.array([1, 2, 3]), np.array([1, 2, 3])), (np.array([1, 2, np.nan]), np.array([1, np.nan, 3])), (np.array([np.nan, 2, np.nan]), np.array([1, np.nan, np.nan])), ], ) def test_equal(self, arr1, arr2): assert array_notnull_equiv(arr1, arr2) def test_some_not_equal(self): a = np.array([1, 2, 4]) b = np.array([1, np.nan, 3]) assert not array_notnull_equiv(a, b) def test_wrong_shape(self): a = np.array([[1, np.nan, np.nan, 4]]) b = np.array([[1, 2], [np.nan, 4]]) assert not array_notnull_equiv(a, b) @pytest.mark.parametrize( "val1, val2, val3, null", [ ( np.datetime64("2000"), np.datetime64("2001"), np.datetime64("2002"), np.datetime64("NaT"), ), (1.0, 2.0, 3.0, np.nan), ("foo", "bar", "baz", None), ("foo", "bar", "baz", np.nan), ], ) def test_types(self, val1, val2, val3, null): dtype = object if isinstance(val1, str) else None arr1 = np.array([val1, null, val3, null], dtype=dtype) arr2 = np.array([val1, val2, null, null], dtype=dtype) assert array_notnull_equiv(arr1, arr2) def construct_dataarray(dim_num, dtype, contains_nan, dask): # dimnum <= 3 rng = np.random.default_rng(0) shapes = [16, 8, 4][:dim_num] dims = ("x", "y", "z")[:dim_num] if np.issubdtype(dtype, np.floating): array = rng.random(shapes).astype(dtype) elif np.issubdtype(dtype, np.integer): array = rng.integers(0, 10, size=shapes).astype(dtype) elif np.issubdtype(dtype, np.bool_): array = rng.integers(0, 1, size=shapes).astype(dtype) elif dtype is str: array = rng.choice(["a", "b", "c", "d"], size=shapes) else: raise ValueError if contains_nan: inds = rng.choice(range(array.size), int(array.size * 0.2)) dtype, fill_value = dtypes.maybe_promote(array.dtype) array = array.astype(dtype) array.flat[inds] = fill_value da = DataArray(array, dims=dims, coords={"x": np.arange(16)}, name="da") if dask and has_dask: chunks = dict.fromkeys(dims, 4) da = da.chunk(chunks) return da def from_series_or_scalar(se): if isinstance(se, pd.Series): return DataArray.from_series(se) else: # scalar case return DataArray(se) def series_reduce(da, func, dim, **kwargs): """convert DataArray to pd.Series, apply pd.func, then convert back to a DataArray. Multiple dims cannot be specified.""" # pd no longer accepts skipna=None https://github.com/pandas-dev/pandas/issues/44178 if kwargs.get("skipna", True) is None: kwargs["skipna"] = True if dim is None or da.ndim == 1: se = da.to_series() return from_series_or_scalar(getattr(se, func)(**kwargs)) else: dims = list(da.dims) dims.remove(dim) d = dims[0] da1 = [ series_reduce(da.isel(**{d: i}), func, dim, **kwargs) for i in range(len(da[d])) ] if d in da.coords: return concat(da1, dim=da[d]) return concat(da1, dim=d) def assert_dask_array(da, dask): if dask and da.ndim > 0: assert isinstance(da.data, dask_array_type) @arm_xfail @pytest.mark.filterwarnings("ignore:All-NaN .* encountered:RuntimeWarning") @pytest.mark.parametrize("dask", [False, True] if has_dask else [False]) def test_datetime_mean(dask: bool, time_unit: PDDatetimeUnitOptions) -> None: # Note: only testing numpy, as dask is broken upstream dtype = f"M8[{time_unit}]" da = DataArray( np.array(["2010-01-01", "NaT", "2010-01-03", "NaT", "NaT"], dtype=dtype), dims=["time"], ) if dask: # Trigger use case where a chunk is full of NaT da = da.chunk({"time": 3}) expect = DataArray(np.array("2010-01-02", dtype="M8[ns]")) expect_nat = DataArray(np.array("NaT", dtype="M8[ns]")) actual = da.mean() if dask: assert actual.chunks is not None assert_equal(actual, expect) actual = da.mean(skipna=False) if dask: assert actual.chunks is not None assert_equal(actual, expect_nat) # tests for 1d array full of NaT assert_equal(da[[1]].mean(), expect_nat) assert_equal(da[[1]].mean(skipna=False), expect_nat) # tests for a 0d array assert_equal(da[0].mean(), da[0]) assert_equal(da[0].mean(skipna=False), da[0]) assert_equal(da[1].mean(), expect_nat) assert_equal(da[1].mean(skipna=False), expect_nat) @requires_cftime @pytest.mark.parametrize("dask", [False, True]) def test_cftime_datetime_mean(dask): if dask and not has_dask: pytest.skip("requires dask") times = date_range("2000", periods=4, use_cftime=True) da = DataArray(times, dims=["time"]) da_2d = DataArray(times.values.reshape(2, 2)) if dask: da = da.chunk({"time": 2}) da_2d = da_2d.chunk({"dim_0": 2}) expected = da.isel(time=0) # one compute needed to check the array contains cftime datetimes with raise_if_dask_computes(max_computes=1): result = da.isel(time=0).mean() assert_dask_array(result, dask) assert_equal(result, expected) expected = DataArray(times.date_type(2000, 1, 2, 12)) with raise_if_dask_computes(max_computes=1): result = da.mean() assert_dask_array(result, dask) assert_equal(result, expected) with raise_if_dask_computes(max_computes=1): result = da_2d.mean() assert_dask_array(result, dask) assert_equal(result, expected) @pytest.mark.parametrize("dask", [False, True]) def test_mean_over_long_spanning_datetime64(dask) -> None: if dask and not has_dask: pytest.skip("requires dask") array = np.array(["1678-01-01", "NaT", "2260-01-01"], dtype="datetime64[ns]") da = DataArray(array, dims=["time"]) if dask: da = da.chunk({"time": 2}) expected = DataArray(np.array("1969-01-01", dtype="datetime64[ns]")) result = da.mean() assert_equal(result, expected) @requires_cftime @requires_dask def test_mean_over_non_time_dim_of_dataset_with_dask_backed_cftime_data(): # Regression test for part two of GH issue 5897: averaging over a non-time # dimension still fails if the time variable is dask-backed. ds = Dataset( { "var1": ( ("time",), date_range("2021-10-31", periods=10, freq="D", use_cftime=True), ), "var2": (("x",), list(range(10))), } ) expected = ds.mean("x") result = ds.chunk({}).mean("x") assert_equal(result, expected) @requires_cftime def test_cftime_datetime_mean_long_time_period(): import cftime times = np.array( [ [ cftime.DatetimeNoLeap(400, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(520, 12, 31, 0, 0, 0, 0), ], [ cftime.DatetimeNoLeap(520, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(640, 12, 31, 0, 0, 0, 0), ], [ cftime.DatetimeNoLeap(640, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(760, 12, 31, 0, 0, 0, 0), ], ] ) da = DataArray(times, dims=["time", "d2"]) result = da.mean("d2") expected = DataArray( [ cftime.DatetimeNoLeap(460, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(580, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(700, 12, 31, 0, 0, 0, 0), ], dims=["time"], ) assert_equal(result, expected) def test_empty_axis_dtype(): ds = Dataset() ds["pos"] = [1, 2, 3] ds["data"] = ("pos", "time"), [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]] ds["var"] = "pos", [2, 3, 4] assert_identical(ds.mean(dim="time")["var"], ds["var"]) assert_identical(ds.max(dim="time")["var"], ds["var"]) assert_identical(ds.min(dim="time")["var"], ds["var"]) assert_identical(ds.sum(dim="time")["var"], ds["var"]) @pytest.mark.parametrize("dim_num", [1, 2]) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["sum", "min", "max", "mean", "var"]) # TODO test cumsum, cumprod @pytest.mark.parametrize("skipna", [False, True]) @pytest.mark.parametrize("aggdim", [None, "x"]) def test_reduce(dim_num, dtype, dask, func, skipna, aggdim): if aggdim == "y" and dim_num < 2: pytest.skip("dim not in this test") if dtype == np.bool_ and func == "mean": pytest.skip("numpy does not support this") if dask and not has_dask: pytest.skip("requires dask") if dask and skipna is False and dtype == np.bool_: pytest.skip("dask does not compute object-typed array") rtol = 1e-04 if dtype == np.float32 else 1e-05 da = construct_dataarray(dim_num, dtype, contains_nan=True, dask=dask) axis = None if aggdim is None else da.get_axis_num(aggdim) # TODO: remove these after resolving # https://github.com/dask/dask/issues/3245 with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Mean of empty slice") warnings.filterwarnings("ignore", "All-NaN slice") warnings.filterwarnings("ignore", "invalid value encountered in") if da.dtype.kind == "O" and skipna: # Numpy < 1.13 does not handle object-type array. try: if skipna: expected = getattr(np, f"nan{func}")(da.values, axis=axis) else: expected = getattr(np, func)(da.values, axis=axis) actual = getattr(da, func)(skipna=skipna, dim=aggdim) assert_dask_array(actual, dask) np.testing.assert_allclose( actual.values, np.array(expected), rtol=1.0e-4, equal_nan=True ) except (TypeError, AttributeError, ZeroDivisionError): # TODO currently, numpy does not support some methods such as # nanmean for object dtype pass actual = getattr(da, func)(skipna=skipna, dim=aggdim) # for dask case, make sure the result is the same for numpy backend expected = getattr(da.compute(), func)(skipna=skipna, dim=aggdim) assert_allclose(actual, expected, rtol=rtol) # make sure the compatibility with pandas' results. if func in ["var", "std"]: expected = series_reduce(da, func, skipna=skipna, dim=aggdim, ddof=0) assert_allclose(actual, expected, rtol=rtol) # also check ddof!=0 case actual = getattr(da, func)(skipna=skipna, dim=aggdim, ddof=5) if dask: assert isinstance(da.data, dask_array_type) expected = series_reduce(da, func, skipna=skipna, dim=aggdim, ddof=5) assert_allclose(actual, expected, rtol=rtol) else: expected = series_reduce(da, func, skipna=skipna, dim=aggdim) assert_allclose(actual, expected, rtol=rtol) # make sure the dtype argument if func not in ["max", "min"]: actual = getattr(da, func)(skipna=skipna, dim=aggdim, dtype=float) assert_dask_array(actual, dask) assert actual.dtype == float # without nan da = construct_dataarray(dim_num, dtype, contains_nan=False, dask=dask) actual = getattr(da, func)(skipna=skipna) if dask: assert isinstance(da.data, dask_array_type) expected = getattr(np, f"nan{func}")(da.values) if actual.dtype == object: assert actual.values == np.array(expected) else: assert np.allclose(actual.values, np.array(expected), rtol=rtol) @pytest.mark.parametrize("dim_num", [1, 2]) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_, str]) @pytest.mark.parametrize("contains_nan", [True, False]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["min", "max"]) @pytest.mark.parametrize("skipna", [False, True]) @pytest.mark.parametrize("aggdim", ["x", "y"]) def test_argmin_max(dim_num, dtype, contains_nan, dask, func, skipna, aggdim): # pandas-dev/pandas#16830, we do not check consistency with pandas but # just make sure da[da.argmin()] == da.min() if aggdim == "y" and dim_num < 2: pytest.skip("dim not in this test") if dask and not has_dask: pytest.skip("requires dask") if contains_nan: if not skipna: pytest.skip("numpy's argmin (not nanargmin) does not handle object-dtype") if skipna and np.dtype(dtype).kind in "iufc": pytest.skip("numpy's nanargmin raises ValueError for all nan axis") da = construct_dataarray(dim_num, dtype, contains_nan=contains_nan, dask=dask) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "All-NaN slice") actual = da.isel( **{aggdim: getattr(da, "arg" + func)(dim=aggdim, skipna=skipna).compute()} ) expected = getattr(da, func)(dim=aggdim, skipna=skipna) assert_allclose( actual.drop_vars(list(actual.coords)), expected.drop_vars(list(expected.coords)), ) def test_argmin_max_error(): da = construct_dataarray(2, np.bool_, contains_nan=True, dask=False) da[0] = np.nan with pytest.raises(ValueError): da.argmin(dim="y") @pytest.mark.parametrize( ["array", "expected"], [ ( np.array([np.datetime64("2000-01-01"), np.datetime64("NaT")]), np.array([False, True]), ), ( np.array([np.timedelta64(1, "h"), np.timedelta64("NaT")]), np.array([False, True]), ), ( np.array([0.0, np.nan]), np.array([False, True]), ), ( np.array([1j, np.nan]), np.array([False, True]), ), ( np.array(["foo", np.nan], dtype=object), np.array([False, True]), ), ( np.array([1, 2], dtype=int), np.array([False, False]), ), ( np.array([True, False], dtype=bool), np.array([False, False]), ), ], ) def test_isnull(array, expected): actual = duck_array_ops.isnull(array) np.testing.assert_equal(expected, actual) @requires_dask def test_isnull_with_dask(): da = construct_dataarray(2, np.float32, contains_nan=True, dask=True) assert isinstance(da.isnull().data, dask_array_type) assert_equal(da.isnull().load(), da.load().isnull()) @pytest.mark.skipif(not has_dask, reason="This is for dask.") @pytest.mark.parametrize("axis", [0, -1, 1]) @pytest.mark.parametrize("edge_order", [1, 2]) def test_dask_gradient(axis, edge_order): import dask.array as da array = np.array(np.random.randn(100, 5, 40)) x = np.exp(np.linspace(0, 1, array.shape[axis])) darray = da.from_array(array, chunks=[(6, 30, 30, 20, 14), 5, 8]) expected = gradient(array, x, axis=axis, edge_order=edge_order) actual = gradient(darray, x, axis=axis, edge_order=edge_order) assert isinstance(actual, da.Array) assert_array_equal(actual, expected) @pytest.mark.parametrize("dim_num", [1, 2]) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["sum", "prod"]) @pytest.mark.parametrize("aggdim", [None, "x"]) @pytest.mark.parametrize("contains_nan", [True, False]) @pytest.mark.parametrize("skipna", [True, False, None]) def test_min_count(dim_num, dtype, dask, func, aggdim, contains_nan, skipna): if dask and not has_dask: pytest.skip("requires dask") da = construct_dataarray(dim_num, dtype, contains_nan=contains_nan, dask=dask) min_count = 3 # If using Dask, the function call should be lazy. with raise_if_dask_computes(): actual = getattr(da, func)(dim=aggdim, skipna=skipna, min_count=min_count) expected = series_reduce(da, func, skipna=skipna, dim=aggdim, min_count=min_count) assert_allclose(actual, expected) assert_dask_array(actual, dask) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["sum", "prod"]) def test_min_count_nd(dtype, dask, func): if dask and not has_dask: pytest.skip("requires dask") min_count = 3 dim_num = 3 da = construct_dataarray(dim_num, dtype, contains_nan=True, dask=dask) # If using Dask, the function call should be lazy. with raise_if_dask_computes(): actual = getattr(da, func)( dim=["x", "y", "z"], skipna=True, min_count=min_count ) # Supplying all dims is equivalent to supplying `...` or `None` expected = getattr(da, func)(dim=..., skipna=True, min_count=min_count) assert_allclose(actual, expected) assert_dask_array(actual, dask) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["sum", "prod"]) @pytest.mark.parametrize("dim", [None, "a", "b"]) def test_min_count_specific(dask, func, dim): if dask and not has_dask: pytest.skip("requires dask") # Simple array with four non-NaN values. da = DataArray(np.ones((6, 6), dtype=np.float64) * np.nan, dims=("a", "b")) da[0][0] = 2 da[0][3] = 2 da[3][0] = 2 da[3][3] = 2 if dask: da = da.chunk({"a": 3, "b": 3}) # Expected result if we set min_count to the number of non-NaNs in a # row/column/the entire array. if dim: min_count = 2 expected = DataArray( [4.0, np.nan, np.nan] * 2, dims=("a" if dim == "b" else "b",) ) else: min_count = 4 expected = DataArray(8.0 if func == "sum" else 16.0) # Check for that min_count. with raise_if_dask_computes(): actual = getattr(da, func)(dim, skipna=True, min_count=min_count) assert_dask_array(actual, dask) assert_allclose(actual, expected) # With min_count being one higher, should get all NaN. min_count += 1 expected *= np.nan with raise_if_dask_computes(): actual = getattr(da, func)(dim, skipna=True, min_count=min_count) assert_dask_array(actual, dask) assert_allclose(actual, expected) @pytest.mark.parametrize("func", ["sum", "prod"]) def test_min_count_dataset(func): da = construct_dataarray(2, dtype=float, contains_nan=True, dask=False) ds = Dataset({"var1": da}, coords={"scalar": 0}) actual = getattr(ds, func)(dim="x", skipna=True, min_count=3)["var1"] expected = getattr(ds["var1"], func)(dim="x", skipna=True, min_count=3) assert_allclose(actual, expected) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("skipna", [False, True]) @pytest.mark.parametrize("func", ["sum", "prod"]) def test_multiple_dims(dtype, dask, skipna, func): if dask and not has_dask: pytest.skip("requires dask") da = construct_dataarray(3, dtype, contains_nan=True, dask=dask) actual = getattr(da, func)(("x", "y"), skipna=skipna) expected = getattr(getattr(da, func)("x", skipna=skipna), func)("y", skipna=skipna) assert_allclose(actual, expected) @pytest.mark.parametrize("dask", [True, False]) def test_datetime_to_numeric_datetime64(dask, time_unit: PDDatetimeUnitOptions): if dask and not has_dask: pytest.skip("requires dask") times = pd.date_range("2000", periods=5, freq="7D").as_unit(time_unit).values if dask: import dask.array times = dask.array.from_array(times, chunks=-1) with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric(times, datetime_unit="h") expected = 24 * np.arange(0, 35, 7) np.testing.assert_array_equal(result, expected) offset = times[1] with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric( times, offset=offset, datetime_unit="h" ) expected = 24 * np.arange(-7, 28, 7) np.testing.assert_array_equal(result, expected) dtype = np.float32 with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric( times, datetime_unit="h", dtype=dtype ) expected2 = 24 * np.arange(0, 35, 7).astype(dtype) np.testing.assert_array_equal(result, expected2) @requires_cftime @pytest.mark.parametrize("dask", [True, False]) def test_datetime_to_numeric_cftime(dask): if dask and not has_dask: pytest.skip("requires dask") times = date_range( "2000", periods=5, freq="7D", calendar="standard", use_cftime=True ).values if dask: import dask.array times = dask.array.from_array(times, chunks=-1) with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric(times, datetime_unit="h", dtype=int) expected = 24 * np.arange(0, 35, 7) np.testing.assert_array_equal(result, expected) offset = times[1] with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric( times, offset=offset, datetime_unit="h", dtype=int ) expected = 24 * np.arange(-7, 28, 7) np.testing.assert_array_equal(result, expected) dtype = np.float32 with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric( times, datetime_unit="h", dtype=dtype ) expected2: Any = 24 * np.arange(0, 35, 7).astype(dtype) np.testing.assert_array_equal(result, expected2) with raise_if_dask_computes(): if dask: time = dask.array.asarray(times[1]) else: time = np.asarray(times[1]) result = duck_array_ops.datetime_to_numeric( time, offset=times[0], datetime_unit="h", dtype=int ) expected3 = np.array(24 * 7).astype(int) np.testing.assert_array_equal(result, expected3) @requires_cftime def test_datetime_to_numeric_potential_overflow(time_unit: PDDatetimeUnitOptions): import cftime if time_unit == "ns": pytest.skip("out-of-bounds datetime64 overflow") dtype = f"M8[{time_unit}]" times = pd.date_range("2000", periods=5, freq="7D").values.astype(dtype) cftimes = date_range( "2000", periods=5, freq="7D", calendar="proleptic_gregorian", use_cftime=True ).values offset = np.datetime64("0001-01-01", time_unit) cfoffset = cftime.DatetimeProlepticGregorian(1, 1, 1) result = duck_array_ops.datetime_to_numeric( times, offset=offset, datetime_unit="D", dtype=int ) cfresult = duck_array_ops.datetime_to_numeric( cftimes, offset=cfoffset, datetime_unit="D", dtype=int ) expected = 730119 + np.arange(0, 35, 7) np.testing.assert_array_equal(result, expected) np.testing.assert_array_equal(cfresult, expected) def test_py_timedelta_to_float(): assert py_timedelta_to_float(dt.timedelta(days=1), "ns") == 86400 * 1e9 assert py_timedelta_to_float(dt.timedelta(days=1e6), "ps") == 86400 * 1e18 assert py_timedelta_to_float(dt.timedelta(days=1e6), "ns") == 86400 * 1e15 assert py_timedelta_to_float(dt.timedelta(days=1e6), "us") == 86400 * 1e12 assert py_timedelta_to_float(dt.timedelta(days=1e6), "ms") == 86400 * 1e9 assert py_timedelta_to_float(dt.timedelta(days=1e6), "s") == 86400 * 1e6 assert py_timedelta_to_float(dt.timedelta(days=1e6), "D") == 1e6 @pytest.mark.parametrize("np_dt_unit", ["D", "h", "m", "s", "ms", "us", "ns"]) def test_np_timedelta64_to_float( np_dt_unit: NPDatetimeUnitOptions, time_unit: PDDatetimeUnitOptions ): # tests any combination of source np.timedelta64 (NPDatetimeUnitOptions) with # np_timedelta_to_float with dedicated target unit (PDDatetimeUnitOptions) td = np.timedelta64(1, np_dt_unit) expected = _NS_PER_TIME_DELTA[np_dt_unit] / _NS_PER_TIME_DELTA[time_unit] out = np_timedelta64_to_float(td, datetime_unit=time_unit) np.testing.assert_allclose(out, expected) assert isinstance(out, float) out = np_timedelta64_to_float(np.atleast_1d(td), datetime_unit=time_unit) np.testing.assert_allclose(out, expected) @pytest.mark.parametrize("np_dt_unit", ["D", "h", "m", "s", "ms", "us", "ns"]) def test_pd_timedelta_to_float( np_dt_unit: NPDatetimeUnitOptions, time_unit: PDDatetimeUnitOptions ): # tests any combination of source pd.Timedelta (NPDatetimeUnitOptions) with # np_timedelta_to_float with dedicated target unit (PDDatetimeUnitOptions) td = pd.Timedelta(1, np_dt_unit) expected = _NS_PER_TIME_DELTA[np_dt_unit] / _NS_PER_TIME_DELTA[time_unit] out = pd_timedelta_to_float(td, datetime_unit=time_unit) np.testing.assert_allclose(out, expected) assert isinstance(out, float) @pytest.mark.parametrize( "td", [dt.timedelta(days=1), np.timedelta64(1, "D"), pd.Timedelta(1, "D"), "1 day"] ) def test_timedelta_to_numeric(td, time_unit: PDDatetimeUnitOptions): # Scalar input out = timedelta_to_numeric(td, time_unit) expected = _NS_PER_TIME_DELTA["D"] / _NS_PER_TIME_DELTA[time_unit] np.testing.assert_allclose(out, expected) assert isinstance(out, float) @pytest.mark.parametrize("use_dask", [True, False]) @pytest.mark.parametrize("skipna", [True, False]) def test_least_squares(use_dask, skipna): if use_dask and (not has_dask or not has_scipy): pytest.skip("requires dask and scipy") lhs = np.array([[1, 2], [1, 2], [3, 2]]) rhs = DataArray(np.array([3, 5, 7]), dims=("y",)) if use_dask: rhs = rhs.chunk({"y": 1}) coeffs, residuals = least_squares(lhs, rhs.data, skipna=skipna) np.testing.assert_allclose(coeffs, [1.5, 1.25]) np.testing.assert_allclose(residuals, [2.0]) @requires_dask @requires_bottleneck @pytest.mark.parametrize("method", ["sequential", "blelloch"]) @pytest.mark.parametrize( "arr", [ [np.nan, 1, 2, 3, np.nan, np.nan, np.nan, np.nan, 4, 5, np.nan, 6], [ np.nan, np.nan, np.nan, 2, np.nan, np.nan, np.nan, 9, np.nan, np.nan, np.nan, np.nan, ], ], ) def test_push_dask(method, arr): import bottleneck import dask.array as da arr = np.array(arr) chunks = list(range(1, 11)) + [(1, 2, 3, 2, 2, 1, 1)] for n in [None, 1, 2, 3, 4, 5, 11]: expected = bottleneck.push(arr, axis=0, n=n) for c in chunks: with raise_if_dask_computes(): actual = push(da.from_array(arr, chunks=c), axis=0, n=n, method=method) np.testing.assert_equal(actual, expected) def test_extension_array_equality(categorical1, int1): int_duck_array = PandasExtensionArray(int1) categorical_duck_array = PandasExtensionArray(categorical1) assert (int_duck_array != categorical_duck_array).all() assert (categorical_duck_array == categorical1).all() assert (int_duck_array[0:2] == int1[0:2]).all() def test_extension_array_singleton_equality(categorical1): categorical_duck_array = PandasExtensionArray(categorical1) assert (categorical_duck_array != "cat3").all() def test_extension_array_repr(int1): int_duck_array = PandasExtensionArray(int1) assert repr(int1) in repr(int_duck_array) def test_extension_array_result_type_categorical(categorical1, categorical2): res = np.result_type( PandasExtensionArray(categorical1), PandasExtensionArray(categorical2) ) assert isinstance(res, pd.CategoricalDtype) assert set(res.categories) == set(categorical1.categories) | set( categorical2.categories ) assert not res.ordered assert categorical1.dtype == np.result_type( PandasExtensionArray(categorical1), pd.CategoricalDtype.na_value ) def test_extension_array_attr(): array = pd.Categorical(["cat2", "cat1", "cat2", "cat3", "cat1"]) wrapped = PandasExtensionArray(array) assert_array_equal(array.categories, wrapped.categories) assert array.nbytes == wrapped.nbytes roundtripped = pickle.loads(pickle.dumps(wrapped)) assert isinstance(roundtripped, PandasExtensionArray) assert (roundtripped == wrapped).all() interval_array = pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3], closed="right") wrapped = PandasExtensionArray(interval_array) assert_array_equal(wrapped.left, interval_array.left, strict=True) assert wrapped.closed == interval_array.closed xarray-2025.12.0/xarray/tests/test_duck_array_wrapping.py000066400000000000000000000430261511464676000234640ustar00rootroot00000000000000import numpy as np import pandas as pd import pytest import xarray as xr # Don't run cupy in CI because it requires a GPU NAMESPACE_ARRAYS = { "cupy": { "attrs": { "array": "ndarray", "constructor": "asarray", }, "xfails": {"quantile": "no nanquantile"}, }, "dask.array": { "attrs": { "array": "Array", "constructor": "from_array", }, "xfails": { "argsort": "no argsort", "conjugate": "conj but no conjugate", "searchsorted": "dask.array.searchsorted but no Array.searchsorted", }, }, "jax.numpy": { "attrs": { "array": "ndarray", "constructor": "asarray", }, "xfails": { "rolling_construct": "no sliding_window_view", "rolling_reduce": "no sliding_window_view", "cumulative_construct": "no sliding_window_view", "cumulative_reduce": "no sliding_window_view", }, }, "pint": { "attrs": { "array": "Quantity", "constructor": "Quantity", }, "xfails": { "all": "returns a bool", "any": "returns a bool", "argmax": "returns an int", "argmin": "returns an int", "argsort": "returns an int", "count": "returns an int", "dot": "no tensordot", "full_like": "should work, see: https://github.com/hgrecco/pint/pull/1669", "idxmax": "returns the coordinate", "idxmin": "returns the coordinate", "isin": "returns a bool", "isnull": "returns a bool", "notnull": "returns a bool", "rolling_reduce": "no dispatch for numbagg/bottleneck", "cumulative_reduce": "no dispatch for numbagg/bottleneck", "searchsorted": "returns an int", "weighted": "no tensordot", }, }, "sparse": { "attrs": { "array": "COO", "constructor": "COO", }, "xfails": { "cov": "dense output", "corr": "no nanstd", "cross": "no cross", "count": "dense output", "dot": "fails on some platforms/versions", "isin": "no isin", "rolling_construct": "no sliding_window_view", "rolling_reduce": "no sliding_window_view", "cumulative_construct": "no sliding_window_view", "cumulative_reduce": "no sliding_window_view", "coarsen_construct": "pad constant_values must be fill_value", "coarsen_reduce": "pad constant_values must be fill_value", "weighted": "fill_value error", "coarsen": "pad constant_values must be fill_value", "quantile": "no non skipping version", "differentiate": "no gradient", "argmax": "no nan skipping version", "argmin": "no nan skipping version", "idxmax": "no nan skipping version", "idxmin": "no nan skipping version", "median": "no nan skipping version", "std": "no nan skipping version", "var": "no nan skipping version", "cumsum": "no cumsum", "cumprod": "no cumprod", "argsort": "no argsort", "conjugate": "no conjugate", "searchsorted": "no searchsorted", "shift": "pad constant_values must be fill_value", "pad": "pad constant_values must be fill_value", }, }, } try: import jax # type: ignore[import-not-found,unused-ignore] # enable double-precision jax.config.update("jax_enable_x64", True) except ImportError: pass class _BaseTest: def setup_for_test(self, request, namespace): self.namespace = namespace self.xp = pytest.importorskip(namespace) self.Array = getattr(self.xp, NAMESPACE_ARRAYS[namespace]["attrs"]["array"]) self.constructor = getattr( self.xp, NAMESPACE_ARRAYS[namespace]["attrs"]["constructor"] ) xarray_method = request.node.name.split("test_")[1].split("[")[0] if xarray_method in NAMESPACE_ARRAYS[namespace]["xfails"]: reason = NAMESPACE_ARRAYS[namespace]["xfails"][xarray_method] pytest.xfail(f"xfail for {self.namespace}: {reason}") def get_test_dataarray(self): data = np.asarray([[1, 2, 3, np.nan, 5]]) x = np.arange(5) data = self.constructor(data) return xr.DataArray( data, dims=["y", "x"], coords={"y": [1], "x": x}, name="foo", ) @pytest.mark.parametrize("namespace", NAMESPACE_ARRAYS) class TestTopLevelMethods(_BaseTest): @pytest.fixture(autouse=True) def setUp(self, request, namespace): self.setup_for_test(request, namespace) self.x1 = self.get_test_dataarray() self.x2 = self.get_test_dataarray().assign_coords(x=np.arange(2, 7)) def test_apply_ufunc(self): func = lambda x: x + 1 result = xr.apply_ufunc(func, self.x1, dask="parallelized") assert isinstance(result.data, self.Array) def test_align(self): result = xr.align(self.x1, self.x2) assert isinstance(result[0].data, self.Array) assert isinstance(result[1].data, self.Array) def test_broadcast(self): result = xr.broadcast(self.x1, self.x2) assert isinstance(result[0].data, self.Array) assert isinstance(result[1].data, self.Array) def test_concat(self): result = xr.concat([self.x1, self.x2], dim="x") assert isinstance(result.data, self.Array) def test_merge(self): result = xr.merge([self.x1, self.x2], compat="override", join="outer") assert isinstance(result.foo.data, self.Array) def test_where(self): x1, x2 = xr.align(self.x1, self.x2, join="inner") result = xr.where(x1 > 2, x1, x2) assert isinstance(result.data, self.Array) def test_full_like(self): result = xr.full_like(self.x1, 0) assert isinstance(result.data, self.Array) def test_cov(self): result = xr.cov(self.x1, self.x2) assert isinstance(result.data, self.Array) def test_corr(self): result = xr.corr(self.x1, self.x2) assert isinstance(result.data, self.Array) def test_cross(self): x1, x2 = xr.align(self.x1.squeeze(), self.x2.squeeze(), join="inner") result = xr.cross(x1, x2, dim="x") assert isinstance(result.data, self.Array) def test_dot(self): result = xr.dot(self.x1, self.x2) assert isinstance(result.data, self.Array) def test_map_blocks(self): result = xr.map_blocks(lambda x: x + 1, self.x1) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("namespace", NAMESPACE_ARRAYS) class TestDataArrayMethods(_BaseTest): @pytest.fixture(autouse=True) def setUp(self, request, namespace): self.setup_for_test(request, namespace) self.x = self.get_test_dataarray() def test_loc(self): result = self.x.loc[{"x": slice(1, 3)}] assert isinstance(result.data, self.Array) def test_isel(self): result = self.x.isel(x=slice(1, 3)) assert isinstance(result.data, self.Array) def test_sel(self): result = self.x.sel(x=slice(1, 3)) assert isinstance(result.data, self.Array) def test_squeeze(self): result = self.x.squeeze("y") assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="interp uses numpy and scipy") def test_interp(self): # TODO: some cases could be made to work result = self.x.interp(x=2.5) assert isinstance(result.data, self.Array) def test_isnull(self): result = self.x.isnull() assert isinstance(result.data, self.Array) def test_notnull(self): result = self.x.notnull() assert isinstance(result.data, self.Array) def test_count(self): result = self.x.count() assert isinstance(result.data, self.Array) def test_dropna(self): result = self.x.dropna(dim="x") assert isinstance(result.data, self.Array) def test_fillna(self): result = self.x.fillna(0) assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="ffill uses bottleneck or numbagg") def test_ffill(self): result = self.x.ffill() assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="bfill uses bottleneck or numbagg") def test_bfill(self): result = self.x.bfill() assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="interpolate_na uses numpy and scipy") def test_interpolate_na(self): result = self.x.interpolate_na() assert isinstance(result.data, self.Array) def test_where(self): result = self.x.where(self.x > 2) assert isinstance(result.data, self.Array) def test_isin(self): test_elements = self.constructor(np.asarray([1])) result = self.x.isin(test_elements) assert isinstance(result.data, self.Array) def test_groupby(self): result = self.x.groupby("x").mean() assert isinstance(result.data, self.Array) def test_groupby_bins(self): result = self.x.groupby_bins("x", bins=[0, 2, 4, 6]).mean() assert isinstance(result.data, self.Array) def test_rolling_iter(self): result = self.x.rolling(x=3) elem = next(iter(result))[1] assert isinstance(elem.data, self.Array) def test_rolling_construct(self): result = self.x.rolling(x=3).construct(x="window") assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_rolling_reduce(self, skipna): result = self.x.rolling(x=3).mean(skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="rolling_exp uses numbagg") def test_rolling_exp_reduce(self): result = self.x.rolling_exp(x=3).mean() assert isinstance(result.data, self.Array) def test_cumulative_iter(self): result = self.x.cumulative("x") elem = next(iter(result))[1] assert isinstance(elem.data, self.Array) def test_cumulative_construct(self): result = self.x.cumulative("x").construct(x="window") assert isinstance(result.data, self.Array) def test_cumulative_reduce(self): result = self.x.cumulative("x").sum() assert isinstance(result.data, self.Array) def test_weighted(self): result = self.x.weighted(self.x.fillna(0)).mean() assert isinstance(result.data, self.Array) def test_coarsen_construct(self): result = self.x.coarsen(x=2, boundary="pad").construct(x=["a", "b"]) assert isinstance(result.data, self.Array) def test_coarsen_reduce(self): result = self.x.coarsen(x=2, boundary="pad").mean() assert isinstance(result.data, self.Array) def test_resample(self): time_coord = pd.date_range("2000-01-01", periods=5) result = self.x.assign_coords(x=time_coord).resample(x="D").mean() assert isinstance(result.data, self.Array) def test_diff(self): result = self.x.diff("x") assert isinstance(result.data, self.Array) def test_dot(self): result = self.x.dot(self.x) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_quantile(self, skipna): result = self.x.quantile(0.5, skipna=skipna) assert isinstance(result.data, self.Array) def test_differentiate(self): # edge_order is not implemented in jax, and only supports passing None edge_order = None if self.namespace == "jax.numpy" else 1 result = self.x.differentiate("x", edge_order=edge_order) assert isinstance(result.data, self.Array) def test_integrate(self): result = self.x.integrate("x") assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="polyfit uses numpy linalg") def test_polyfit(self): # TODO: this could work, there are just a lot of different linalg calls result = self.x.polyfit("x", 1) assert isinstance(result.polyfit_coefficients.data, self.Array) def test_map_blocks(self): result = self.x.map_blocks(lambda x: x + 1) assert isinstance(result.data, self.Array) def test_all(self): result = self.x.all(dim="x") assert isinstance(result.data, self.Array) def test_any(self): result = self.x.any(dim="x") assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_argmax(self, skipna): result = self.x.argmax(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_argmin(self, skipna): result = self.x.argmin(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_idxmax(self, skipna): result = self.x.idxmax(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_idxmin(self, skipna): result = self.x.idxmin(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_max(self, skipna): result = self.x.max(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_min(self, skipna): result = self.x.min(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_mean(self, skipna): result = self.x.mean(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_median(self, skipna): result = self.x.median(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_prod(self, skipna): result = self.x.prod(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_sum(self, skipna): result = self.x.sum(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_std(self, skipna): result = self.x.std(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_var(self, skipna): result = self.x.var(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_cumsum(self, skipna): result = self.x.cumsum(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_cumprod(self, skipna): result = self.x.cumprod(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) def test_argsort(self): result = self.x.argsort() assert isinstance(result.data, self.Array) def test_astype(self): result = self.x.astype(int) assert isinstance(result.data, self.Array) def test_clip(self): result = self.x.clip(min=2.0, max=4.0) assert isinstance(result.data, self.Array) def test_conj(self): result = self.x.conj() assert isinstance(result.data, self.Array) def test_conjugate(self): result = self.x.conjugate() assert isinstance(result.data, self.Array) def test_imag(self): result = self.x.imag assert isinstance(result.data, self.Array) def test_searchsorted(self): v = self.constructor(np.asarray([3])) result = self.x.squeeze().searchsorted(v) assert isinstance(result, self.Array) def test_round(self): result = self.x.round() assert isinstance(result.data, self.Array) def test_real(self): result = self.x.real assert isinstance(result.data, self.Array) def test_T(self): result = self.x.T assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="rank uses bottleneck") def test_rank(self): # TODO: scipy has rankdata, as does jax, so this can work result = self.x.rank() assert isinstance(result.data, self.Array) def test_transpose(self): result = self.x.transpose() assert isinstance(result.data, self.Array) def test_stack(self): result = self.x.stack(z=("x", "y")) assert isinstance(result.data, self.Array) def test_unstack(self): result = self.x.stack(z=("x", "y")).unstack("z") assert isinstance(result.data, self.Array) def test_shift(self): result = self.x.shift(x=1) assert isinstance(result.data, self.Array) def test_roll(self): result = self.x.roll(x=1) assert isinstance(result.data, self.Array) def test_pad(self): result = self.x.pad(x=1) assert isinstance(result.data, self.Array) def test_sortby(self): result = self.x.sortby("x") assert isinstance(result.data, self.Array) def test_broadcast_like(self): result = self.x.broadcast_like(self.x) assert isinstance(result.data, self.Array) xarray-2025.12.0/xarray/tests/test_error_messages.py000066400000000000000000000007771511464676000224570ustar00rootroot00000000000000""" This new file is intended to test the quality & friendliness of error messages that are raised by xarray. It's currently separate from the standard tests, which are more focused on the functions working (though we could consider integrating them.). """ import pytest def test_no_var_in_dataset(ds): with pytest.raises( KeyError, match=( r"No variable named 'foo'. Variables on the dataset include \['z1', 'z2', 'x', 'time', 'c', 'y'\]" ), ): ds["foo"] xarray-2025.12.0/xarray/tests/test_extensions.py000066400000000000000000000057061511464676000216330ustar00rootroot00000000000000from __future__ import annotations import pickle import pytest import xarray as xr from xarray.core.extensions import register_datatree_accessor from xarray.tests import assert_identical @register_datatree_accessor("example_accessor") @xr.register_dataset_accessor("example_accessor") @xr.register_dataarray_accessor("example_accessor") class ExampleAccessor: """For the pickling tests below.""" def __init__(self, xarray_obj): self.obj = xarray_obj class TestAccessor: def test_register(self) -> None: @register_datatree_accessor("demo") @xr.register_dataset_accessor("demo") @xr.register_dataarray_accessor("demo") class DemoAccessor: """Demo accessor.""" def __init__(self, xarray_obj): self._obj = xarray_obj @property def foo(self): return "bar" dt: xr.DataTree = xr.DataTree() assert dt.demo.foo == "bar" ds = xr.Dataset() assert ds.demo.foo == "bar" da = xr.DataArray(0) assert da.demo.foo == "bar" # accessor is cached assert ds.demo is ds.demo # check descriptor assert ds.demo.__doc__ == "Demo accessor." # TODO: typing doesn't seem to work with accessors assert xr.Dataset.demo.__doc__ == "Demo accessor." # type: ignore[attr-defined] assert isinstance(ds.demo, DemoAccessor) assert xr.Dataset.demo is DemoAccessor # type: ignore[attr-defined] # ensure we can remove it del xr.Dataset.demo # type: ignore[attr-defined] assert not hasattr(xr.Dataset, "demo") with pytest.warns(Warning, match="overriding a preexisting attribute"): @xr.register_dataarray_accessor("demo") class Foo: pass # it didn't get registered again assert not hasattr(xr.Dataset, "demo") def test_pickle_dataset(self) -> None: ds = xr.Dataset() ds_restored = pickle.loads(pickle.dumps(ds)) assert_identical(ds, ds_restored) # state save on the accessor is restored assert ds.example_accessor is ds.example_accessor ds.example_accessor.value = "foo" ds_restored = pickle.loads(pickle.dumps(ds)) assert_identical(ds, ds_restored) assert ds_restored.example_accessor.value == "foo" def test_pickle_dataarray(self) -> None: array = xr.Dataset() assert array.example_accessor is array.example_accessor array_restored = pickle.loads(pickle.dumps(array)) assert_identical(array, array_restored) def test_broken_accessor(self) -> None: # regression test for GH933 @xr.register_dataset_accessor("stupid_accessor") class BrokenAccessor: def __init__(self, xarray_obj): raise AttributeError("broken") with pytest.raises(RuntimeError, match=r"error initializing"): _ = xr.Dataset().stupid_accessor xarray-2025.12.0/xarray/tests/test_formatting.py000066400000000000000000001175501511464676000216070ustar00rootroot00000000000000from __future__ import annotations import sys from textwrap import dedent import numpy as np import pandas as pd import pytest import xarray as xr from xarray.core import formatting from xarray.core.indexes import Index from xarray.tests import requires_cftime, requires_dask, requires_netCDF4 class CustomIndex(Index): names: tuple[str, ...] def __init__(self, names: tuple[str, ...]): self.names = names def __repr__(self): return f"CustomIndex(coords={self.names})" class TestFormatting: def test_get_indexer_at_least_n_items(self) -> None: cases = [ ((20,), (slice(10),), (slice(-10, None),)), ((3, 20), (0, slice(10)), (-1, slice(-10, None))), ((2, 10), (0, slice(10)), (-1, slice(-10, None))), ((2, 5), (slice(2), slice(None)), (slice(-2, None), slice(None))), ((1, 2, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))), ((2, 3, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))), ( (1, 10, 1), (0, slice(10), slice(None)), (-1, slice(-10, None), slice(None)), ), ( (2, 5, 1), (slice(2), slice(None), slice(None)), (slice(-2, None), slice(None), slice(None)), ), ((2, 5, 3), (0, slice(4), slice(None)), (-1, slice(-4, None), slice(None))), ( (2, 3, 3), (slice(2), slice(None), slice(None)), (slice(-2, None), slice(None), slice(None)), ), ] for shape, start_expected, end_expected in cases: actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=False) assert start_expected == actual actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=True) assert end_expected == actual def test_first_n_items(self) -> None: array = np.arange(100).reshape(10, 5, 2) for n in [3, 10, 13, 100, 200]: actual = formatting.first_n_items(array, n) expected = array.flat[:n] assert (expected == actual).all() with pytest.raises(ValueError, match=r"at least one item"): formatting.first_n_items(array, 0) def test_last_n_items(self) -> None: array = np.arange(100).reshape(10, 5, 2) for n in [3, 10, 13, 100, 200]: actual = formatting.last_n_items(array, n) expected = array.flat[-n:] assert (expected == actual).all() with pytest.raises(ValueError, match=r"at least one item"): formatting.first_n_items(array, 0) def test_last_item(self) -> None: array = np.arange(100) reshape = ((10, 10), (1, 100), (2, 2, 5, 5)) expected = np.array([99]) for r in reshape: result = formatting.last_item(array.reshape(r)) assert result == expected def test_format_item(self) -> None: cases = [ (pd.Timestamp("2000-01-01T12"), "2000-01-01T12:00:00"), (pd.Timestamp("2000-01-01"), "2000-01-01"), (pd.Timestamp("NaT"), "NaT"), (pd.Timedelta("10 days 1 hour"), "10 days 01:00:00"), (pd.Timedelta("-3 days"), "-3 days +00:00:00"), (pd.Timedelta("3 hours"), "0 days 03:00:00"), (pd.Timedelta("NaT"), "NaT"), ("foo", "'foo'"), (b"foo", "b'foo'"), (1, "1"), (1.0, "1.0"), (np.float16(1.1234), "1.123"), (np.float32(1.0111111), "1.011"), (np.float64(22.222222), "22.22"), (np.zeros((1, 1)), "[[0.]]"), (np.zeros(2), "[0. 0.]"), (np.zeros((2, 2)), "[[0. 0.]\n [0. 0.]]"), ] for item, expected in cases: actual = formatting.format_item(item) assert expected == actual def test_format_items(self) -> None: cases = [ (np.arange(4) * np.timedelta64(1, "D"), "0 days 1 days 2 days 3 days"), ( np.arange(4) * np.timedelta64(3, "h"), "00:00:00 03:00:00 06:00:00 09:00:00", ), ( np.arange(4) * np.timedelta64(500, "ms"), "00:00:00 00:00:00.500000 00:00:01 00:00:01.500000", ), (pd.to_timedelta(["NaT", "0s", "1s", "NaT"]), "NaT 00:00:00 00:00:01 NaT"), # type: ignore[arg-type, unused-ignore] ( pd.to_timedelta(["1 day 1 hour", "1 day", "0 hours"]), # type: ignore[arg-type, unused-ignore] "1 days 01:00:00 1 days 00:00:00 0 days 00:00:00", ), ([1, 2, 3], "1 2 3"), ] for item, expected in cases: actual = " ".join(formatting.format_items(item)) assert expected == actual def test_format_array_flat(self) -> None: actual = formatting.format_array_flat(np.arange(100), 2) expected = "..." assert expected == actual actual = formatting.format_array_flat(np.arange(100), 9) expected = "0 ... 99" assert expected == actual actual = formatting.format_array_flat(np.arange(100), 10) expected = "0 1 ... 99" assert expected == actual actual = formatting.format_array_flat(np.arange(100), 13) expected = "0 1 ... 98 99" assert expected == actual actual = formatting.format_array_flat(np.arange(100), 15) expected = "0 1 2 ... 98 99" assert expected == actual # NB: Probably not ideal; an alternative would be cutting after the # first ellipsis actual = formatting.format_array_flat(np.arange(100.0), 11) expected = "0.0 ... ..." assert expected == actual actual = formatting.format_array_flat(np.arange(100.0), 12) expected = "0.0 ... 99.0" assert expected == actual actual = formatting.format_array_flat(np.arange(3), 5) expected = "0 1 2" assert expected == actual actual = formatting.format_array_flat(np.arange(4.0), 11) expected = "0.0 ... 3.0" assert expected == actual actual = formatting.format_array_flat(np.arange(0), 0) expected = "" assert expected == actual actual = formatting.format_array_flat(np.arange(1), 1) expected = "0" assert expected == actual actual = formatting.format_array_flat(np.arange(2), 3) expected = "0 1" assert expected == actual actual = formatting.format_array_flat(np.arange(4), 7) expected = "0 1 2 3" assert expected == actual actual = formatting.format_array_flat(np.arange(5), 7) expected = "0 ... 4" assert expected == actual long_str = [" ".join(["hello world" for _ in range(100)])] actual = formatting.format_array_flat(np.asarray([long_str]), 21) expected = "'hello world hello..." assert expected == actual def test_pretty_print(self) -> None: assert formatting.pretty_print("abcdefghij", 8) == "abcde..." assert formatting.pretty_print("รŸ", 1) == "รŸ" def test_maybe_truncate(self) -> None: assert formatting.maybe_truncate("รŸ", 10) == "รŸ" def test_format_timestamp_invalid_pandas_format(self) -> None: expected = "2021-12-06 17:00:00 00" with pytest.raises(ValueError): formatting.format_timestamp(expected) def test_format_timestamp_out_of_bounds(self) -> None: from datetime import datetime date = datetime(1300, 12, 1) expected = "1300-12-01" result = formatting.format_timestamp(date) assert result == expected date = datetime(2300, 12, 1) expected = "2300-12-01" result = formatting.format_timestamp(date) assert result == expected def test_attribute_repr(self) -> None: short = formatting.summarize_attr("key", "Short string") long = formatting.summarize_attr("key", 100 * "Very long string ") newlines = formatting.summarize_attr("key", "\n\n\n") tabs = formatting.summarize_attr("key", "\t\t\t") assert short == " key: Short string" assert len(long) <= 80 assert long.endswith("...") assert "\n" not in newlines assert "\t" not in tabs def test_index_repr(self) -> None: coord_names = ("x", "y") index = CustomIndex(coord_names) names = ("x",) normal = formatting.summarize_index(names, index, col_width=20) assert names[0] in normal assert len(normal.splitlines()) == len(names) assert "CustomIndex" in normal class IndexWithInlineRepr(CustomIndex): def _repr_inline_(self, max_width: int): return f"CustomIndex[{', '.join(self.names)}]" index = IndexWithInlineRepr(coord_names) inline = formatting.summarize_index(names, index, col_width=20) assert names[0] in inline assert index._repr_inline_(max_width=40) in inline @pytest.mark.parametrize( "names", ( ("x",), ("x", "y"), ("x", "y", "z"), ("x", "y", "z", "a"), ), ) def test_index_repr_grouping(self, names) -> None: index = CustomIndex(names) normal = formatting.summarize_index(names, index, col_width=20) assert all(name in normal for name in names) assert len(normal.splitlines()) == len(names) assert "CustomIndex" in normal hint_chars = [line[2] for line in normal.splitlines()] if len(names) <= 1: assert hint_chars == [" "] else: assert hint_chars[0] == "โ”Œ" and hint_chars[-1] == "โ””" assert len(names) == 2 or hint_chars[1:-1] == ["โ”‚"] * (len(names) - 2) def test_diff_array_repr(self) -> None: da_a = xr.DataArray( np.array([[1, 2, 3], [4, 5, 6]], dtype="int64"), dims=("x", "y"), coords={ "x": np.array(["a", "b"], dtype="U1"), "y": np.array([1, 2, 3], dtype="int64"), }, attrs={"units": "m", "description": "desc"}, ) da_b = xr.DataArray( np.array([1, 2], dtype="int64"), dims="x", coords={ "x": np.array(["a", "c"], dtype="U1"), "label": ("x", np.array([1, 2], dtype="int64")), }, attrs={"units": "kg"}, ) byteorder = "<" if sys.byteorder == "little" else ">" expected = dedent( f"""\ Left and right DataArray objects are not identical Differing dimensions: (x: 2, y: 3) != (x: 2) Differing values: L array([[1, 2, 3], [4, 5, 6]], dtype=int64) R array([1, 2], dtype=int64) Differing coordinates: L * x (x) {byteorder}U1 8B 'a' 'b' R * x (x) {byteorder}U1 8B 'a' 'c' Coordinates only on the left object: * y (y) int64 24B 1 2 3 Coordinates only on the right object: label (x) int64 16B 1 2 Differing attributes: L units: m R units: kg Attributes only on the left object: description: desc""" ) actual = formatting.diff_array_repr(da_a, da_b, "identical") try: assert actual == expected except AssertionError: # depending on platform, dtype may not be shown in numpy array repr assert actual == expected.replace(", dtype=int64", "") da_a = xr.DataArray( np.array([[1, 2, 3], [4, 5, 6]], dtype="int8"), dims=("x", "y"), coords=xr.Coordinates( { "x": np.array([True, False], dtype="bool"), "y": np.array([1, 2, 3], dtype="int16"), }, indexes={"y": CustomIndex(("y",))}, ), ) da_b = xr.DataArray( np.array([1, 2], dtype="int8"), dims="x", coords=xr.Coordinates( { "x": np.array([True, False], dtype="bool"), "label": ("x", np.array([1, 2], dtype="int16")), }, indexes={"label": CustomIndex(("label",))}, ), ) expected = dedent( """\ Left and right DataArray objects are not equal Differing dimensions: (x: 2, y: 3) != (x: 2) Differing values: L array([[1, 2, 3], [4, 5, 6]], dtype=int8) R array([1, 2], dtype=int8) Coordinates only on the left object: * y (y) int16 6B 1 2 3 Coordinates only on the right object: * label (x) int16 4B 1 2 """.rstrip() ) actual = formatting.diff_array_repr(da_a, da_b, "equals") assert actual == expected va = xr.Variable( "x", np.array([1, 2, 3], dtype="int64"), {"title": "test Variable"} ) vb = xr.Variable(("x", "y"), np.array([[1, 2, 3], [4, 5, 6]], dtype="int64")) expected = dedent( """\ Left and right Variable objects are not equal Differing dimensions: (x: 3) != (x: 2, y: 3) Differing values: L array([1, 2, 3], dtype=int64) R array([[1, 2, 3], [4, 5, 6]], dtype=int64)""" ) actual = formatting.diff_array_repr(va, vb, "equals") try: assert actual == expected except AssertionError: assert actual == expected.replace(", dtype=int64", "") @pytest.mark.filterwarnings("error") def test_diff_attrs_repr_with_array(self) -> None: attrs_a = {"attr": np.array([0, 1])} attrs_b = {"attr": 1} expected = dedent( """\ Differing attributes: L attr: [0 1] R attr: 1 """ ).strip() actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals") assert expected == actual attrs_c = {"attr": np.array([-3, 5])} expected = dedent( """\ Differing attributes: L attr: [0 1] R attr: [-3 5] """ ).strip() actual = formatting.diff_attrs_repr(attrs_a, attrs_c, "equals") assert expected == actual # should not raise a warning attrs_c = {"attr": np.array([0, 1, 2])} expected = dedent( """\ Differing attributes: L attr: [0 1] R attr: [0 1 2] """ ).strip() actual = formatting.diff_attrs_repr(attrs_a, attrs_c, "equals") assert expected == actual def test__diff_mapping_repr_array_attrs_on_variables(self) -> None: a = { "a": xr.DataArray( dims="x", data=np.array([1], dtype="int16"), attrs={"b": np.array([1, 2], dtype="int8")}, ) } b = { "a": xr.DataArray( dims="x", data=np.array([1], dtype="int16"), attrs={"b": np.array([2, 3], dtype="int8")}, ) } actual = formatting.diff_data_vars_repr(a, b, compat="identical", col_width=8) expected = dedent( """\ Differing data variables: L a (x) int16 2B 1 Differing variable attributes: b: [1 2] R a (x) int16 2B 1 Differing variable attributes: b: [2 3] """.rstrip() ) assert actual == expected def test_diff_dataset_repr(self) -> None: ds_a = xr.Dataset( data_vars={ "var1": (("x", "y"), np.array([[1, 2, 3], [4, 5, 6]], dtype="int64")), "var2": ("x", np.array([3, 4], dtype="int64")), }, coords={ "x": ( "x", np.array(["a", "b"], dtype="U1"), {"foo": "bar", "same": "same"}, ), "y": np.array([1, 2, 3], dtype="int64"), }, attrs={"title": "mytitle", "description": "desc"}, ) ds_b = xr.Dataset( data_vars={"var1": ("x", np.array([1, 2], dtype="int64"))}, coords={ "x": ( "x", np.array(["a", "c"], dtype="U1"), {"source": 0, "foo": "baz", "same": "same"}, ), "label": ("x", np.array([1, 2], dtype="int64")), }, attrs={"title": "newtitle"}, ) byteorder = "<" if sys.byteorder == "little" else ">" expected = dedent( f"""\ Left and right Dataset objects are not identical Differing dimensions: (x: 2, y: 3) != (x: 2) Differing coordinates: L * x (x) {byteorder}U1 8B 'a' 'b' Differing variable attributes: foo: bar R * x (x) {byteorder}U1 8B 'a' 'c' Differing variable attributes: source: 0 foo: baz Coordinates only on the left object: * y (y) int64 24B 1 2 3 Coordinates only on the right object: label (x) int64 16B 1 2 Differing data variables: L var1 (x, y) int64 48B 1 2 3 4 5 6 R var1 (x) int64 16B 1 2 Data variables only on the left object: var2 (x) int64 16B 3 4 Differing attributes: L title: mytitle R title: newtitle Attributes only on the left object: description: desc""" ) actual = formatting.diff_dataset_repr(ds_a, ds_b, "identical") assert actual == expected def test_array_repr(self) -> None: ds = xr.Dataset( coords={ "foo": np.array([1, 2, 3], dtype=np.uint64), "bar": np.array([1, 2, 3], dtype=np.uint64), } ) ds[(1, 2)] = xr.DataArray(np.array([0], dtype=np.uint64), dims="test") ds_12 = ds[(1, 2)] # Test repr function behaves correctly: actual = formatting.array_repr(ds_12) expected = dedent( """\ Size: 8B array([0], dtype=uint64) Dimensions without coordinates: test""" ) assert actual == expected # Test repr, str prints returns correctly as well: assert repr(ds_12) == expected assert str(ds_12) == expected # f-strings (aka format(...)) by default should use the repr: actual = f"{ds_12}" assert actual == expected with xr.set_options(display_expand_data=False): actual = formatting.array_repr(ds[(1, 2)]) expected = dedent( """\ Size: 8B 0 Dimensions without coordinates: test""" ) assert actual == expected def test_array_repr_variable(self) -> None: var = xr.Variable("x", [0, 1]) formatting.array_repr(var) with xr.set_options(display_expand_data=False): formatting.array_repr(var) def test_array_repr_recursive(self) -> None: # GH:issue:7111 # direct recursion var = xr.Variable("x", [0, 1]) var.attrs["x"] = var formatting.array_repr(var) da = xr.DataArray([0, 1], dims=["x"]) da.attrs["x"] = da formatting.array_repr(da) # indirect recursion var.attrs["x"] = da da.attrs["x"] = var formatting.array_repr(var) formatting.array_repr(da) @requires_dask def test_array_scalar_format(self) -> None: # Test numpy scalars: var = xr.DataArray(np.array(0)) assert format(var, "") == repr(var) assert format(var, "d") == "0" assert format(var, ".2f") == "0.00" # Test dask scalars, not supported however: import dask.array as da var = xr.DataArray(da.array(0)) assert format(var, "") == repr(var) with pytest.raises(TypeError) as excinfo: format(var, ".2f") assert "unsupported format string passed to" in str(excinfo.value) # Test numpy arrays raises: var = xr.DataArray([0.1, 0.2]) with pytest.raises(NotImplementedError) as excinfo: # type: ignore[assignment] format(var, ".2f") assert "Using format_spec is only supported" in str(excinfo.value) def test_datatree_print_empty_node(self): dt: xr.DataTree = xr.DataTree(name="root") printout = str(dt) assert printout == "\nGroup: /" def test_datatree_print_empty_node_with_attrs(self): dat = xr.Dataset(attrs={"note": "has attrs"}) dt: xr.DataTree = xr.DataTree(name="root", dataset=dat) printout = str(dt) assert printout == dedent( """\ Group: / Attributes: note: has attrs""" ) def test_datatree_print_node_with_data(self): dat = xr.Dataset({"a": [0, 2]}) dt: xr.DataTree = xr.DataTree(name="root", dataset=dat) printout = str(dt) expected = [ "", "Group: /", "Dimensions", "Coordinates", "a", ] for expected_line, printed_line in zip( expected, printout.splitlines(), strict=True ): assert expected_line in printed_line def test_datatree_printout_nested_node(self): dat = xr.Dataset({"a": [0, 2]}) root = xr.DataTree.from_dict( { "/results": dat, } ) printout = str(root) assert printout.splitlines()[3].startswith(" ") def test_datatree_repr_of_node_with_data(self): dat = xr.Dataset({"a": [0, 2]}) dt: xr.DataTree = xr.DataTree(name="root", dataset=dat) assert "Coordinates" in repr(dt) def test_diff_datatree_repr_different_groups(self): dt_1: xr.DataTree = xr.DataTree.from_dict({"a": None}) dt_2: xr.DataTree = xr.DataTree.from_dict({"b": None}) expected = dedent( """\ Left and right DataTree objects are not identical Children at root node do not match: ['a'] vs ['b']""" ) actual = formatting.diff_datatree_repr(dt_1, dt_2, "identical") assert actual == expected def test_diff_datatree_repr_different_subgroups(self): dt_1: xr.DataTree = xr.DataTree.from_dict({"a": None, "a/b": None, "a/c": None}) dt_2: xr.DataTree = xr.DataTree.from_dict({"a": None, "a/b": None}) expected = dedent( """\ Left and right DataTree objects are not isomorphic Children at node 'a' do not match: ['b', 'c'] vs ['b']""" ) actual = formatting.diff_datatree_repr(dt_1, dt_2, "isomorphic") assert actual == expected def test_diff_datatree_repr_node_data(self): # casting to int64 explicitly ensures that int64s are created on all architectures ds1 = xr.Dataset({"u": np.int64(0), "v": np.int64(1)}) ds3 = xr.Dataset({"w": np.int64(5)}) dt_1: xr.DataTree = xr.DataTree.from_dict({"a": ds1, "a/b": ds3}) ds2 = xr.Dataset({"u": np.int64(0)}) ds4 = xr.Dataset({"w": np.int64(6)}) dt_2: xr.DataTree = xr.DataTree.from_dict({"a": ds2, "a/b": ds4}, name="foo") expected = dedent( """\ Left and right DataTree objects are not identical Differing names: None != 'foo' Data at node 'a' does not match: Data variables only on the left object: v int64 8B 1 Data at node 'a/b' does not match: Differing data variables: L w int64 8B 5 R w int64 8B 6""" ) actual = formatting.diff_datatree_repr(dt_1, dt_2, "identical") assert actual == expected def test_diff_datatree_repr_equals(self) -> None: ds1 = xr.Dataset(data_vars={"data": ("y", [5, 2])}) ds2 = xr.Dataset(data_vars={"data": (("x", "y"), [[5, 2]])}) dt1 = xr.DataTree.from_dict({"node": ds1}) dt2 = xr.DataTree.from_dict({"node": ds2}) expected = dedent( """\ Left and right DataTree objects are not equal Data at node 'node' does not match: Differing dimensions: (y: 2) != (x: 1, y: 2) Differing data variables: L data (y) int64 16B 5 2 R data (x, y) int64 16B 5 2""" ) actual = formatting.diff_datatree_repr(dt1, dt2, "equals") assert actual == expected def test_inline_variable_array_repr_custom_repr() -> None: class CustomArray: def __init__(self, value, attr): self.value = value self.attr = attr def _repr_inline_(self, width): formatted = f"({self.attr}) {self.value}" if len(formatted) > width: formatted = f"({self.attr}) ..." return formatted def __array_namespace__(self, *args, **kwargs): return NotImplemented @property def shape(self) -> tuple[int, ...]: return self.value.shape @property def dtype(self): return self.value.dtype @property def ndim(self): return self.value.ndim value = CustomArray(np.array([20, 40]), "m") variable = xr.Variable("x", value) max_width = 10 actual = formatting.inline_variable_array_repr(variable, max_width=10) assert actual == value._repr_inline_(max_width) def test_set_numpy_options() -> None: original_options = np.get_printoptions() with formatting.set_numpy_options(threshold=10): assert len(repr(np.arange(500))) < 200 # original options are restored assert np.get_printoptions() == original_options def test_short_array_repr() -> None: cases = [ np.random.randn(500), np.random.randn(20, 20), np.random.randn(5, 10, 15), np.random.randn(5, 10, 15, 3), np.random.randn(100, 5, 1), ] # number of lines: # for default numpy repr: 167, 140, 254, 248, 599 # for short_array_repr: 1, 7, 24, 19, 25 for array in cases: num_lines = formatting.short_array_repr(array).count("\n") + 1 assert num_lines < 30 # threshold option (default: 200) array2 = np.arange(100) assert "..." not in formatting.short_array_repr(array2) with xr.set_options(display_values_threshold=10): assert "..." in formatting.short_array_repr(array2) def test_large_array_repr_length() -> None: da = xr.DataArray(np.random.randn(100, 5, 1)) result = repr(da).splitlines() assert len(result) < 50 @requires_netCDF4 def test_repr_file_collapsed(tmp_path) -> None: arr_to_store = xr.DataArray(np.arange(300, dtype=np.int64), dims="test") arr_to_store.to_netcdf(tmp_path / "test.nc", engine="netcdf4") with ( xr.open_dataarray(tmp_path / "test.nc") as arr, xr.set_options(display_expand_data=False), ): actual = repr(arr) expected = dedent( """\ Size: 2kB [300 values with dtype=int64] Dimensions without coordinates: test""" ) assert actual == expected arr_loaded = arr.compute() actual = arr_loaded.__repr__() expected = dedent( """\ Size: 2kB 0 1 2 3 4 5 6 7 8 9 10 11 12 ... 288 289 290 291 292 293 294 295 296 297 298 299 Dimensions without coordinates: test""" ) assert actual == expected @pytest.mark.parametrize( "display_max_rows, n_vars, n_attr", [(50, 40, 30), (35, 40, 30), (11, 40, 30), (1, 40, 30)], ) def test__mapping_repr(display_max_rows, n_vars, n_attr) -> None: long_name = "long_name" a = np.char.add(long_name, np.arange(0, n_vars).astype(str)) b = np.char.add("attr_", np.arange(0, n_attr).astype(str)) c = np.char.add("coord", np.arange(0, n_vars).astype(str)) attrs = dict.fromkeys(b, 2) coords = {_c: np.array([0, 1], dtype=np.uint64) for _c in c} data_vars = dict() for v, _c in zip(a, coords.items(), strict=True): data_vars[v] = xr.DataArray( name=v, data=np.array([3, 4], dtype=np.uint64), dims=[_c[0]], coords=dict([_c]), ) ds = xr.Dataset(data_vars) ds.attrs = attrs with xr.set_options(display_max_rows=display_max_rows): # Parse the data_vars print and show only data_vars rows: summary = formatting.dataset_repr(ds).split("\n") summary = [v for v in summary if long_name in v] # The length should be less than or equal to display_max_rows: len_summary = len(summary) data_vars_print_size = min(display_max_rows, len_summary) assert len_summary == data_vars_print_size summary = formatting.data_vars_repr(ds.data_vars).split("\n") summary = [v for v in summary if long_name in v] # The length should be equal to the number of data variables len_summary = len(summary) assert len_summary == n_vars summary = formatting.coords_repr(ds.coords).split("\n") summary = [v for v in summary if "coord" in v] # The length should be equal to the number of data variables len_summary = len(summary) assert len_summary == n_vars with xr.set_options( display_max_rows=display_max_rows, display_expand_coords=False, display_expand_data_vars=False, display_expand_attrs=False, ): actual = formatting.dataset_repr(ds) col_width = formatting._calculate_col_width(ds.variables) dims_start = formatting.pretty_print("Dimensions:", col_width) dims_values = formatting.dim_summary_limited( ds.sizes, col_width=col_width + 1, max_rows=display_max_rows ) expected_size = "1kB" expected = f"""\ Size: {expected_size} {dims_start}({dims_values}) Coordinates: ({n_vars}) Data variables: ({n_vars}) Attributes: ({n_attr})""" expected = dedent(expected) assert actual == expected def test__mapping_repr_recursive() -> None: # GH:issue:7111 # direct recursion ds = xr.Dataset({"a": ("x", [1, 2, 3])}) ds.attrs["ds"] = ds formatting.dataset_repr(ds) # indirect recursion ds2 = xr.Dataset({"b": ("y", [1, 2, 3])}) ds.attrs["ds"] = ds2 ds2.attrs["ds"] = ds formatting.dataset_repr(ds2) def test__element_formatter(n_elements: int = 100) -> None: expected = """\ Dimensions without coordinates: dim_0: 3, dim_1: 3, dim_2: 3, dim_3: 3, dim_4: 3, dim_5: 3, dim_6: 3, dim_7: 3, dim_8: 3, dim_9: 3, dim_10: 3, dim_11: 3, dim_12: 3, dim_13: 3, dim_14: 3, dim_15: 3, dim_16: 3, dim_17: 3, dim_18: 3, dim_19: 3, dim_20: 3, dim_21: 3, dim_22: 3, dim_23: 3, ... dim_76: 3, dim_77: 3, dim_78: 3, dim_79: 3, dim_80: 3, dim_81: 3, dim_82: 3, dim_83: 3, dim_84: 3, dim_85: 3, dim_86: 3, dim_87: 3, dim_88: 3, dim_89: 3, dim_90: 3, dim_91: 3, dim_92: 3, dim_93: 3, dim_94: 3, dim_95: 3, dim_96: 3, dim_97: 3, dim_98: 3, dim_99: 3""" expected = dedent(expected) intro = "Dimensions without coordinates: " elements = [ f"{k}: {v}" for k, v in {f"dim_{k}": 3 for k in np.arange(n_elements)}.items() ] values = xr.core.formatting._element_formatter( elements, col_width=len(intro), max_rows=12 ) actual = intro + values assert expected == actual def test_lazy_array_wont_compute() -> None: from xarray.core.indexing import LazilyIndexedArray class LazilyIndexedArrayNotComputable(LazilyIndexedArray): def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None, ) -> np.ndarray: raise NotImplementedError("Computing this array is not possible.") arr = LazilyIndexedArrayNotComputable(np.array([1, 2])) var = xr.DataArray(arr) # These will crash if var.data are converted to numpy arrays: var.__repr__() var._repr_html_() @pytest.mark.parametrize("as_dataset", (False, True)) def test_format_xindexes_none(as_dataset: bool) -> None: # ensure repr for empty xindexes can be displayed #8367 expected = """\ Indexes: *empty*""" expected = dedent(expected) obj: xr.DataArray | xr.Dataset = xr.DataArray() obj = obj._to_temp_dataset() if as_dataset else obj actual = repr(obj.xindexes) assert actual == expected @pytest.mark.parametrize("as_dataset", (False, True)) def test_format_xindexes(as_dataset: bool) -> None: expected = """\ Indexes: x PandasIndex""" expected = dedent(expected) obj: xr.DataArray | xr.Dataset = xr.DataArray([1], coords={"x": [1]}) obj = obj._to_temp_dataset() if as_dataset else obj actual = repr(obj.xindexes) assert actual == expected @requires_cftime def test_empty_cftimeindex_repr() -> None: index = xr.coding.cftimeindex.CFTimeIndex([]) expected = """\ Indexes: time CFTimeIndex([], dtype='object', length=0, calendar=None, freq=None)""" expected = dedent(expected) da = xr.DataArray([], coords={"time": index}) actual = repr(da.indexes) assert actual == expected def test_display_nbytes() -> None: xds = xr.Dataset( { "foo": np.arange(1200, dtype=np.int16), "bar": np.arange(111, dtype=np.int16), } ) # Note: int16 is used to ensure that dtype is shown in the # numpy array representation for all OSes included Windows actual = repr(xds) expected = """ Size: 3kB Dimensions: (foo: 1200, bar: 111) Coordinates: * foo (foo) int16 2kB 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 * bar (bar) int16 222B 0 1 2 3 4 5 6 7 ... 104 105 106 107 108 109 110 Data variables: *empty* """.strip() assert actual == expected actual = repr(xds["foo"]) array_repr = repr(xds.foo.data).replace("\n ", "") expected = f""" Size: 2kB {array_repr} Coordinates: * foo (foo) int16 2kB 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 """.strip() assert actual == expected def test_array_repr_dtypes(): # These dtypes are expected to be represented similarly # on Ubuntu, macOS and Windows environments of the CI. # Unsigned integer could be used as easy replacements # for tests where the data-type does not matter, # but the repr does, including the size # (size of an int == size of a uint) # Signed integer dtypes ds = xr.DataArray(np.array([0], dtype="int8"), dims="x") actual = repr(ds) expected = """ Size: 1B array([0], dtype=int8) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="int16"), dims="x") actual = repr(ds) expected = """ Size: 2B array([0], dtype=int16) Dimensions without coordinates: x """.strip() assert actual == expected # Unsigned integer dtypes ds = xr.DataArray(np.array([0], dtype="uint8"), dims="x") actual = repr(ds) expected = """ Size: 1B array([0], dtype=uint8) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="uint16"), dims="x") actual = repr(ds) expected = """ Size: 2B array([0], dtype=uint16) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="uint32"), dims="x") actual = repr(ds) expected = """ Size: 4B array([0], dtype=uint32) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="uint64"), dims="x") actual = repr(ds) expected = """ Size: 8B array([0], dtype=uint64) Dimensions without coordinates: x """.strip() assert actual == expected # Float dtypes ds = xr.DataArray(np.array([0.0]), dims="x") actual = repr(ds) expected = """ Size: 8B array([0.]) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="float16"), dims="x") actual = repr(ds) expected = """ Size: 2B array([0.], dtype=float16) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="float32"), dims="x") actual = repr(ds) expected = """ Size: 4B array([0.], dtype=float32) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="float64"), dims="x") actual = repr(ds) expected = """ Size: 8B array([0.]) Dimensions without coordinates: x """.strip() assert actual == expected # Signed integer dtypes array = np.array([0]) ds = xr.DataArray(array, dims="x") actual = repr(ds) expected = f""" Size: {array.dtype.itemsize}B {array!r} Dimensions without coordinates: x """.strip() assert actual == expected array = np.array([0], dtype="int32") ds = xr.DataArray(array, dims="x") actual = repr(ds) expected = f""" Size: 4B {array!r} Dimensions without coordinates: x """.strip() assert actual == expected array = np.array([0], dtype="int64") ds = xr.DataArray(array, dims="x") actual = repr(ds) expected = f""" Size: 8B {array!r} Dimensions without coordinates: x """.strip() assert actual == expected def test_repr_pandas_range_index() -> None: # lazy data repr but values shown in inline repr xidx = xr.indexes.PandasIndex(pd.RangeIndex(10), "x") ds = xr.Dataset(coords=xr.Coordinates.from_xindex(xidx)) actual = repr(ds.x) expected = """ Size: 80B [10 values with dtype=int64] Coordinates: * x (x) int64 80B 0 1 2 3 4 5 6 7 8 9 """.strip() assert actual == expected def test_repr_pandas_multi_index() -> None: # lazy data repr but values shown in inline repr midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=["foo", "bar"]) coords = xr.Coordinates.from_pandas_multiindex(midx, "x") ds = xr.Dataset(coords=coords) actual = repr(ds.x) expected = """ Size: 32B [4 values with dtype=object] Coordinates: * x (x) object 32B MultiIndex * foo (x) object 32B 'a' 'a' 'b' 'b' * bar (x) int64 32B 1 2 1 2 """.strip() assert actual == expected actual = repr(ds.foo) expected = """ Size: 32B [4 values with dtype=object] Coordinates: * x (x) object 32B MultiIndex * foo (x) object 32B 'a' 'a' 'b' 'b' * bar (x) int64 32B 1 2 1 2 """.strip() assert actual == expected xarray-2025.12.0/xarray/tests/test_formatting_html.py000066400000000000000000000301431511464676000226230ustar00rootroot00000000000000from __future__ import annotations import re from functools import partial import numpy as np import pandas as pd import pytest import xarray as xr from xarray.core import formatting_html as fh from xarray.core.coordinates import Coordinates def drop_fallback_text_repr(html: str) -> str: pattern = ( re.escape("
    ") + "[^<]*" + re.escape("
    ") ) return re.sub(pattern, "", html) XarrayTypes = xr.DataTree | xr.Dataset | xr.DataArray | xr.Variable def xarray_html_only_repr(obj: XarrayTypes) -> str: return drop_fallback_text_repr(obj._repr_html_()) def assert_consistent_text_and_html( obj: XarrayTypes, section_headers: list[str] ) -> None: actual_html = xarray_html_only_repr(obj) actual_text = repr(obj) for section_header in section_headers: assert actual_html.count(section_header) == actual_text.count(section_header), ( section_header ) assert_consistent_text_and_html_dataarray = partial( assert_consistent_text_and_html, section_headers=[ "Coordinates", "Indexes", "Attributes", ], ) assert_consistent_text_and_html_dataset = partial( assert_consistent_text_and_html, section_headers=[ "Dimensions", "Coordinates", "Data variables", "Indexes", "Attributes", ], ) assert_consistent_text_and_html_datatree = partial( assert_consistent_text_and_html, section_headers=[ "Dimensions", "Coordinates", "Inherited coordinates", "Data variables", "Indexes", "Attributes", ], ) @pytest.fixture def dataarray() -> xr.DataArray: return xr.DataArray(np.random.default_rng(0).random((4, 6))) @pytest.fixture def dask_dataarray(dataarray: xr.DataArray) -> xr.DataArray: pytest.importorskip("dask") return dataarray.chunk() @pytest.fixture def multiindex() -> xr.Dataset: midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("level_1", "level_2") ) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") return xr.Dataset({}, midx_coords) @pytest.fixture def dataset() -> xr.Dataset: times = pd.date_range("2000-01-01", "2001-12-31", name="time") annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28)) base = 10 + 15 * annual_cycle.reshape(-1, 1) tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3) tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3) return xr.Dataset( { "tmin": (("time", "location"), tmin_values), "tmax": (("time", "location"), tmax_values), }, {"location": ["", "IN", "IL"], "time": times}, attrs={"description": "Test data."}, ) def test_short_data_repr_html(dataarray: xr.DataArray) -> None: data_repr = fh.short_data_repr_html(dataarray) assert data_repr.startswith("
    array")
    
    
    def test_short_data_repr_html_non_str_keys(dataset: xr.Dataset) -> None:
        ds = dataset.assign({2: lambda x: x["tmin"]})
        fh.dataset_repr(ds)
    
    
    def test_short_data_repr_html_dask(dask_dataarray: xr.DataArray) -> None:
        assert hasattr(dask_dataarray.data, "_repr_html_")
        data_repr = fh.short_data_repr_html(dask_dataarray)
        assert data_repr == dask_dataarray.data._repr_html_()
    
    
    def test_format_dims_no_dims() -> None:
        dims: dict = {}
        dims_with_index: list = []
        formatted = fh.format_dims(dims, dims_with_index)
        assert formatted == ""
    
    
    def test_format_dims_unsafe_dim_name() -> None:
        dims = {"": 3, "y": 2}
        dims_with_index: list = []
        formatted = fh.format_dims(dims, dims_with_index)
        assert "<x>" in formatted
    
    
    def test_format_dims_non_index() -> None:
        dims, dims_with_index = {"x": 3, "y": 2}, ["time"]
        formatted = fh.format_dims(dims, dims_with_index)
        assert "class='xr-has-index'" not in formatted
    
    
    def test_format_dims_index() -> None:
        dims, dims_with_index = {"x": 3, "y": 2}, ["x"]
        formatted = fh.format_dims(dims, dims_with_index)
        assert "class='xr-has-index'" in formatted
    
    
    def test_summarize_attrs_with_unsafe_attr_name_and_value() -> None:
        attrs = {"": 3, "y": ""}
        formatted = fh.summarize_attrs(attrs)
        assert "
    <x> :
    " in formatted assert "
    y :
    " in formatted assert "
    3
    " in formatted assert "
    <pd.DataFrame>
    " in formatted def test_repr_of_dataarray() -> None: dataarray = xr.DataArray(np.random.default_rng(0).random((4, 6))) formatted = xarray_html_only_repr(dataarray) assert "dim_0" in formatted # has an expanded data section assert formatted.count("class='xr-array-in' type='checkbox' checked>") == 1 # coords, indexes and attrs don't have an items so they'll be omitted assert "Coordinates" not in formatted assert "Indexes" not in formatted assert "Attributes" not in formatted assert_consistent_text_and_html_dataarray(dataarray) with xr.set_options(display_expand_data=False): formatted = xarray_html_only_repr(dataarray) assert "dim_0" in formatted # has a collapsed data section assert formatted.count("class='xr-array-in' type='checkbox' checked>") == 0 # coords, indexes and attrs don't have an items so they'll be omitted assert "Coordinates" not in formatted assert "Indexes" not in formatted assert "Attributes" not in formatted def test_repr_coords_order_of_datarray() -> None: da1 = xr.DataArray( np.empty((2, 2)), coords={"foo": [0, 1], "bar": [0, 1]}, dims=["foo", "bar"], ) da2 = xr.DataArray( np.empty((2, 2)), coords={"bar": [0, 1], "foo": [0, 1]}, dims=["bar", "foo"], ) ds = xr.Dataset({"da1": da1, "da2": da2}) bar_line = ( "bar
    (bar)" ) foo_line = ( "foo
    (foo)" ) formatted_da1 = fh.array_repr(ds.da1) assert formatted_da1.index(foo_line) < formatted_da1.index(bar_line) formatted_da2 = fh.array_repr(ds.da2) assert formatted_da2.index(bar_line) < formatted_da2.index(foo_line) def test_repr_of_multiindex(multiindex: xr.Dataset) -> None: formatted = fh.dataset_repr(multiindex) assert "(x)" in formatted assert_consistent_text_and_html_dataset(multiindex) def test_repr_of_dataset(dataset: xr.Dataset) -> None: formatted = xarray_html_only_repr(dataset) # coords, attrs, and data_vars are expanded assert ( formatted.count("class='xr-section-summary-in' type='checkbox' checked>") == 3 ) # indexes is omitted assert "Indexes" not in formatted assert "<U4" in formatted or ">U4" in formatted assert "<IA>" in formatted assert_consistent_text_and_html_dataset(dataset) with xr.set_options( display_expand_coords=False, display_expand_data_vars=False, display_expand_attrs=False, display_expand_indexes=True, display_default_indexes=True, ): formatted = xarray_html_only_repr(dataset) # coords, attrs, and data_vars are collapsed, indexes is shown & expanded assert ( formatted.count("class='xr-section-summary-in' type='checkbox' checked>") == 1 ) assert "Indexes" in formatted assert "<U4" in formatted or ">U4" in formatted assert "<IA>" in formatted def test_repr_text_fallback(dataset: xr.Dataset) -> None: formatted = fh.dataset_repr(dataset) # Just test that the "pre" block used for fallback to plain text is present. assert "
    " in formatted
    
    
    def test_repr_coords_order_of_dataset() -> None:
        ds = xr.Dataset()
        ds.coords["as"] = 10
        ds["var"] = xr.DataArray(np.ones((10,)), dims="x", coords={"x": np.arange(10)})
        formatted = fh.dataset_repr(ds)
    
        x_line = "x
    (x)" as_line = "as
    ()" assert formatted.index(x_line) < formatted.index(as_line) def test_variable_repr_html() -> None: v = xr.Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"}) assert hasattr(v, "_repr_html_") with xr.set_options(display_style="html"): html = v._repr_html_().strip() # We don't do a complete string identity since # html output is probably subject to change, is long and... reasons. # Just test that something reasonable was produced. assert html.startswith("") assert "xarray.Variable" in html def test_repr_of_nonstr_dataset(dataset: xr.Dataset) -> None: ds = dataset.copy() ds.attrs[1] = "Test value" ds[2] = ds["tmin"] formatted = fh.dataset_repr(ds) assert "
    1 :
    Test value
    " in formatted assert "
    2" in formatted def test_repr_of_nonstr_dataarray(dataarray: xr.DataArray) -> None: da = dataarray.rename(dim_0=15) da.attrs[1] = "value" formatted = fh.array_repr(da) assert "
    1 :
    value
    " in formatted assert "
  • 15: 4
  • " in formatted def test_nonstr_variable_repr_html() -> None: v = xr.Variable(["time", 10], [[1, 2, 3], [4, 5, 6]], {22: "bar"}) assert hasattr(v, "_repr_html_") with xr.set_options(display_style="html"): html = v._repr_html_().strip() assert "
    22 :
    bar
    " in html assert "
  • 10: 3
  • " in html class TestDataTreeTruncatesNodes: def test_many_nodes(self) -> None: # construct a datatree with 500 nodes number_of_files = 20 number_of_groups = 25 tree_dict = {} for f in range(number_of_files): for g in range(number_of_groups): tree_dict[f"file_{f}/group_{g}"] = xr.Dataset({"g": f * g}) tree = xr.DataTree.from_dict(tree_dict) with xr.set_options(display_style="html"): result = tree._repr_html_() assert "6/20" in result for i in range(number_of_files): if i < 3 or i >= (number_of_files - 3): assert f"file_{i}
    " in result else: assert f"file_{i}
    " not in result assert "6/25" in result for i in range(number_of_groups): if i < 3 or i >= (number_of_groups - 3): assert f"group_{i}" in result else: assert f"group_{i}" not in result with xr.set_options(display_style="html", display_max_children=3): result = tree._repr_html_() assert "3/20" in result for i in range(number_of_files): if i < 2 or i >= (number_of_files - 1): assert f"file_{i}" in result else: assert f"file_{i}" not in result assert "3/25" in result for i in range(number_of_groups): if i < 2 or i >= (number_of_groups - 1): assert f"group_{i}" in result else: assert f"group_{i}" not in result class TestDataTreeInheritance: def test_inherited_section_present(self) -> None: dt = xr.DataTree.from_dict(data={"a/b/c": None}, coords={"x": [1]}) root_html = dt._repr_html_() assert "Inherited coordinates" not in root_html child_html = xarray_html_only_repr(dt["a"]) assert child_html.count("Inherited coordinates") == 1 def test_repr_consistency(self) -> None: dt = xr.DataTree.from_dict({"/a/b/c": None}) assert_consistent_text_and_html_datatree(dt) assert_consistent_text_and_html_datatree(dt["a"]) assert_consistent_text_and_html_datatree(dt["a/b"]) assert_consistent_text_and_html_datatree(dt["a/b/c"]) def test_no_repeated_style_or_fallback_text(self) -> None: dt = xr.DataTree.from_dict({"/a/b/c": None}) html = dt._repr_html_() assert html.count("