pax_global_header00006660000000000000000000000064151266164300014516gustar00rootroot0000000000000052 comment=bfc25c48447c8795a53fc8acae3f1e29f1d72726 netcdf4-python-1.7.4rel/000077500000000000000000000000001512661643000151005ustar00rootroot00000000000000netcdf4-python-1.7.4rel/.github/000077500000000000000000000000001512661643000164405ustar00rootroot00000000000000netcdf4-python-1.7.4rel/.github/dependabot.yml000066400000000000000000000005721512661643000212740ustar00rootroot00000000000000# See https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/keeping-your-actions-up-to-date-with-dependabot version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" labels: - "Bot" groups: github-actions: patterns: - '*' netcdf4-python-1.7.4rel/.github/stubtest-allowlist000066400000000000000000000020331512661643000222460ustar00rootroot00000000000000netCDF4.RealTypeLiteral netCDF4.ComplexTypeLiteral netCDF4.NumericTypeLiteral netCDF4.CharTypeLiteral netCDF4.TypeLiteral netCDF4.NumPyRealType netCDF4.NumPyComplexType netCDF4.NumPyNumericType netCDF4.NetCDFUDTClass netCDF4.AccessMode netCDF4.CompressionLevel netCDF4.CompressionType netCDF4.DatatypeType netCDF4.DimensionsType netCDF4.DiskFormat netCDF4.EndianType netCDF4.Format netCDF4.QuantizeMode netCDF4.CalendarType netCDF4.DateTimeArray netCDF4.FiltersDict netCDF4.SzipInfo netCDF4.BloscInfo netCDF4.BoolInt netCDF4.VarT netCDF4.RealVarT netCDF4.ComplexVarT netCDF4.NumericVarT netCDF4.Dimension.__reduce_cython__ netCDF4.Dimension.__setstate_cython__ netCDF4.Variable.auto_complex netCDF4.Variable.__iter__ netCDF4._netCDF4.Dimension.__reduce_cython__ netCDF4._netCDF4.Dimension.__setstate_cython__ netCDF4._netCDF4.NC_DISKLESS netCDF4._netCDF4.NC_PERSIST netCDF4._netCDF4.Variable.auto_complex netCDF4._netCDF4.Variable.__iter__ netCDF4._netCDF4.__reduce_cython__ netCDF4._netCDF4.__setstate_cython__ netCDF4._netCDF4.__test__ netCDF4.utilsnetcdf4-python-1.7.4rel/.github/workflows/000077500000000000000000000000001512661643000204755ustar00rootroot00000000000000netcdf4-python-1.7.4rel/.github/workflows/build_latest.yml000066400000000000000000000102571512661643000237000ustar00rootroot00000000000000name: Build and Test Linux with latest netcdf-c on: [push, pull_request] jobs: build-linux: name: Python (${{ matrix.python-version }}) runs-on: ubuntu-latest env: PNETCDF_VERSION: 1.14.1 NETCDF_VERSION: 4.9.3 NETCDF_DIR: ${{ github.workspace }}/.. NETCDF_EXTRA_CONFIG: --enable-pnetcdf #CC: mpicc.mpich CC: mpicc #NO_NET: 1 strategy: matrix: python-version: ["3.14"] steps: - uses: actions/checkout@v6 with: submodules: true - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} - name: Install Ubuntu Dependencies run: | sudo apt-get update #sudo apt-get install mpich libmpich-dev libhdf5-mpich-dev openmpi-bin openmpi-common libopenmpi-dev libhdf5-openmpi-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev sudo apt-get install openmpi-common libopenmpi-dev openmpi-bin libhdf5-openmpi-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev echo "Download and build PnetCDF version ${PNETCDF_VERSION}" wget https://parallel-netcdf.github.io/Release/pnetcdf-${PNETCDF_VERSION}.tar.gz tar -xzf pnetcdf-${PNETCDF_VERSION}.tar.gz pushd pnetcdf-${PNETCDF_VERSION} ./configure --prefix $NETCDF_DIR --enable-shared --disable-fortran --disable-cxx make -j 2 sudo make install popd echo "Download and build netCDF version ${NETCDF_VERSION}" wget https://downloads.unidata.ucar.edu/netcdf-c/${NETCDF_VERSION}/netcdf-c-${NETCDF_VERSION}.tar.gz tar -xzf netcdf-c-${NETCDF_VERSION}.tar.gz pushd netcdf-c-${NETCDF_VERSION} #export CPPFLAGS="-I/usr/include/hdf5/mpich -I${NETCDF_DIR}/include" export CPPFLAGS="-I/usr/include/hdf5/openmpi -I${NETCDF_DIR}/include" export LDFLAGS="-L${NETCDF_DIR}/lib" #export LIBS="-lhdf5_mpich_hl -lhdf5_mpich -lm -lz" export LIBS="-lhdf5_openmpi_hl -lhdf5_openmpi -lm -lz" which $CC ./configure --prefix $NETCDF_DIR --enable-netcdf-4 --enable-shared --enable-dap --enable-parallel4 $NETCDF_EXTRA_CONFIG make -j 2 sudo make install popd # - name: The job has failed # if: ${{ failure() }} # run: | # cd netcdf-c-${NETCDF_VERSION} # cat config.log - name: Install python dependencies via pip run: | python -m pip install --upgrade pip python -m pip install numpy cython cftime pytest twine wheel check-manifest mpi4py typing-extensions - name: Install netcdf4-python run: | export PATH=${NETCDF_DIR}/bin:${PATH} export NETCDF_PLUGIN_DIR=${{ github.workspace }}/netcdf-c-${NETCDF_VERSION}/plugins/plugindir python -m pip install . --no-build-isolation - name: Test run: | export PATH=${NETCDF_DIR}/bin:${PATH} python checkversion.py # serial cd test python run_all.py # parallel (hdf5 for netcdf4, pnetcdf for netcdf3) cd ../examples #mpirun.mpich -np 4 python mpi_example.py mpirun -np 4 --oversubscribe python mpi_example.py if [ $? -ne 0 ] ; then echo "hdf5 mpi test failed!" exit 1 else echo "hdf5 mpi test passed!" fi #mpirun.mpich -np 4 python mpi_example_compressed.py mpirun -np 4 --oversubscribe python mpi_example_compressed.py if [ $? -ne 0 ] ; then echo "hdf5 compressed mpi test failed!" exit 1 else echo "hdf5 compressed mpi test passed!" fi #mpirun.mpich -np 4 python mpi_example.py NETCDF3_64BIT_DATA mpirun -np 4 --oversubscribe python mpi_example.py NETCDF3_64BIT_DATA if [ $? -ne 0 ] ; then echo "pnetcdf mpi test failed!" exit 1 else echo "pnetcdf mpi test passed!" fi # - name: Tarball # run: | # export PATH=${NETCDF_DIR}/bin:${PATH} # python setup.py --version # check-manifest --version # check-manifest --verbose # pip wheel . -w dist --no-deps # twine check dist/* netcdf4-python-1.7.4rel/.github/workflows/build_master.yml000066400000000000000000000062221512661643000236740ustar00rootroot00000000000000name: Build and Test on Linux with netcdf-c github master on: [push, pull_request] jobs: build-linux: name: Python (${{ matrix.python-version }}) runs-on: ubuntu-latest env: NETCDF_DIR: ${{ github.workspace }}/.. #CC: mpicc.mpich CC: mpicc #NO_NET: 1 strategy: matrix: python-version: ["3.14"] steps: - uses: actions/checkout@v6 with: submodules: true - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} - name: Install Ubuntu Dependencies run: | sudo apt-get update #sudo apt-get install mpich libmpich-dev libhdf5-mpich-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev sudo apt-get install openmpi-common libopenmpi-dev openmpi-bin libhdf5-openmpi-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev echo "Download and build netCDF github master" git clone https://github.com/Unidata/netcdf-c pushd netcdf-c #export CPPFLAGS="-I/usr/include/hdf5/mpich -I${NETCDF_DIR}/include" export CPPFLAGS="-I/usr/include/hdf5/openmpi -I${NETCDF_DIR}/include" export LDFLAGS="-L${NETCDF_DIR}/lib" #export LIBS="-lhdf5_mpich_hl -lhdf5_mpich -lm -lz" export LIBS="-lhdf5_openmpi_hl -lhdf5_openmpi -lm -lz" autoreconf -i ./configure --prefix $NETCDF_DIR --enable-netcdf-4 --enable-shared --enable-dap --enable-parallel4 make -j 2 sudo make install popd # - name: The job has failed # if: ${{ failure() }} # run: | # cd netcdf-c-${NETCDF_VERSION} # cat config.log - name: Install python dependencies via pip run: | python -m pip install --upgrade pip python -m pip install numpy cython cftime pytest twine wheel check-manifest mpi4py mypy types-setuptools typing-extensions - name: Install netcdf4-python run: | export PATH=${NETCDF_DIR}/bin:${PATH} export NETCDF_PLUGIN_DIR=${{ github.workspace }}/netcdf-c/plugins/plugindir python -m pip install . --no-build-isolation - name: Test run: | export PATH=${NETCDF_DIR}/bin:${PATH} #export HDF5_PLUGIN_PATH=${NETCDF_DIR}/plugins/plugindir python checkversion.py # serial cd test python run_all.py # parallel cd ../examples #mpirun.mpich -np 4 python mpi_example.py mpirun -np 4 --oversubscribe python mpi_example.py if [ $? -ne 0 ] ; then echo "hdf5 mpi test failed!" exit 1 else echo "hdf5 mpi test passed!" fi #mpirun.mpich -np 4 python mpi_example_compressed.py mpirun -np 4 --oversubscribe python mpi_example_compressed.py if [ $? -ne 0 ] ; then echo "hdf5 compressed mpi test failed!" exit 1 else echo "hdf5 compressed mpi test passed!" fi - name: Stubtest run: | stubtest netCDF4 --allowlist .github/stubtest-allowlist --mypy-config-file=pyproject.toml mypy test mypy examples netcdf4-python-1.7.4rel/.github/workflows/build_old.yml000066400000000000000000000103051512661643000231540ustar00rootroot00000000000000name: Build and Test Linux with older netcdf-c on: [push, pull_request] jobs: build-linux: name: Python (${{ matrix.python-version }}) runs-on: ubuntu-latest env: PNETCDF_VERSION: 1.12.1 NETCDF_VERSION: 4.7.4 NETCDF_DIR: ${{ github.workspace }}/.. NETCDF_EXTRA_CONFIG: --enable-pnetcdf #CC: mpicc.mpich CC: mpicc #NO_NET: 1 strategy: matrix: python-version: ["3.14"] steps: - uses: actions/checkout@v6 with: submodules: true - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} - name: Install Ubuntu Dependencies run: | sudo apt-get update #sudo apt-get install mpich libmpich-dev libhdf5-mpich-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev sudo apt-get install openmpi-common libopenmpi-dev openmpi-bin libhdf5-openmpi-dev libcurl4-openssl-dev bzip2 libsnappy-dev libblosc-dev libzstd-dev echo "Download and build PnetCDF version ${PNETCDF_VERSION}" wget https://parallel-netcdf.github.io/Release/pnetcdf-${PNETCDF_VERSION}.tar.gz tar -xzf pnetcdf-${PNETCDF_VERSION}.tar.gz pushd pnetcdf-${PNETCDF_VERSION} ./configure --prefix $NETCDF_DIR --enable-shared --disable-fortran --disable-cxx make -j 2 sudo make install popd echo "Download and build netCDF version ${NETCDF_VERSION}" #wget https://downloads.unidata.ucar.edu/netcdf-c/${NETCDF_VERSION}/netcdf-c-${NETCDF_VERSION}.tar.gz wget https://www.gfd-dennou.org/arch/netcdf/unidata-mirror/netcdf-c-${NETCDF_VERSION}.tar.gz tar -xzf netcdf-c-${NETCDF_VERSION}.tar.gz pushd netcdf-c-${NETCDF_VERSION} #export CPPFLAGS="-I/usr/include/hdf5/mpich -I${NETCDF_DIR}/include" export CPPFLAGS="-I/usr/include/hdf5/openmpi -I${NETCDF_DIR}/include" export LDFLAGS="-L${NETCDF_DIR}/lib" #export LIBS="-lhdf5_mpich_hl -lhdf5_mpich -lm -lz" export LIBS="-lhdf5_openmpi_hl -lhdf5_openmpi -lm -lz" ./configure --prefix $NETCDF_DIR --enable-netcdf-4 --enable-shared --enable-dap --enable-parallel4 $NETCDF_EXTRA_CONFIG make -j 2 sudo make install popd # - name: The job has failed # if: ${{ failure() }} # run: | # cd netcdf-c-${NETCDF_VERSION} # cat config.log - name: Install python dependencies via pip run: | python -m pip install --upgrade pip python -m pip install numpy cython cftime pytest twine wheel check-manifest mpi4py typing-extensions - name: Install netcdf4-python run: | export PATH=${NETCDF_DIR}/bin:${PATH} export NETCDF_PLUGIN_DIR=${{ github.workspace }}/netcdf-c-${NETCDF_VERSION}/plugins/plugindir python -m pip install . --no-build-isolation - name: Test run: | export PATH=${NETCDF_DIR}/bin:${PATH} python checkversion.py # serial cd test python run_all.py # parallel (hdf5 for netcdf4, pnetcdf for netcdf3) cd ../examples #mpirun.mpich -np 4 python mpi_example.py mpirun -np 4 --oversubscribe python mpi_example.py if [ $? -ne 0 ] ; then echo "hdf5 mpi test failed!" exit 1 else echo "hdf5 mpi test passed!" fi #mpirun.mpich -np 4 python mpi_example_compressed.py mpirun -np 4 --oversubscribe python mpi_example_compressed.py if [ $? -ne 0 ] ; then echo "hdf5 compressed mpi test failed!" exit 1 else echo "hdf5 compressed mpi test passed!" fi #mpirun.mpich -np 4 python mpi_example.py NETCDF3_64BIT_DATA mpirun -np 4 --oversubscribe python mpi_example.py NETCDF3_64BIT_DATA if [ $? -ne 0 ] ; then echo "pnetcdf mpi test failed!" exit 1 else echo "pnetcdf mpi test passed!" fi # - name: Tarball # run: | # export PATH=${NETCDF_DIR}/bin:${PATH} # python setup.py --version # check-manifest --version # check-manifest --verbose # pip wheel . -w dist --no-deps # twine check dist/* netcdf4-python-1.7.4rel/.github/workflows/cibuildwheel.yml000066400000000000000000000134031512661643000236610ustar00rootroot00000000000000name: Wheels on: pull_request: push: tags: - "v*" release: types: - published permissions: contents: read jobs: build_sdist: name: Build source distribution runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v6 with: fetch-depth: 0 - uses: actions/setup-python@v6 name: Install Python with: python-version: 3.x - name: Install APT packages if: contains(${{ matrix.os }}, 'ubuntu') run: | sudo apt update sudo apt install libhdf5-dev libnetcdf-dev - name: Build sdist run: > pip install build && python -m build --sdist . --outdir dist - uses: actions/upload-artifact@v6 with: name: pypi-artifacts path: ${{ github.workspace }}/dist/*.tar.gz build_bdist: name: "Build ${{ matrix.os }} (${{ matrix.arch }}) wheels" runs-on: ${{ matrix.os }} # Prevent hanging when building from emulation like aarch64. timeout-minutes: 300 strategy: fail-fast: false matrix: include: - os: ubuntu-22.04 arch: x86_64 - os: ubuntu-24.04-arm arch: aarch64 steps: - uses: actions/checkout@v6 with: fetch-depth: 0 # For aarch64 support # https://cibuildwheel.pypa.io/en/stable/faq/#emulation - uses: docker/setup-qemu-action@v3 with: platforms: all if: runner.os == 'Linux' && matrix.arch == 'aarch64' - name: Build oldest and newest Python shell: bash # On PRs we run only oldest and newest Python versions to reduce CI load. # Skips pypy and musllinux everywhere. # We are building 310, 311 and 314 for now. # (3.11 is the oldest version for which we support abi3 wheels) # These needs to rotate every new Python release. run: | set -x echo "CIBW_BUILD=cp310-* cp311-* cp314-* cp314t-*" >> $GITHUB_ENV set +x if: ${{ github.event_name }} == "pull_request" - name: "Building ${{ matrix.os }} (${{ matrix.arch }}) wheels" uses: pypa/cibuildwheel@v3.3.0 env: CIBW_ARCHS: ${{ matrix.arch }} - uses: actions/upload-artifact@v6 with: name: pypi-artifacts-${{ matrix.os }}-${{ matrix.arch }} path: ${{ github.workspace }}/wheelhouse/*.whl build_wheels_winmac: name: Build wheels for ${{matrix.arch}} on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - os: windows-latest arch: AMD64 - os: macos-14 arch: arm64 - os: macos-15-intel arch: x86_64 steps: - uses: actions/checkout@v6 with: fetch-depth: 0 - uses: actions/setup-python@v6 name: Install Python with: python-version: 3.x - name: Setup Micromamba Python ${{ matrix.python-version }} uses: mamba-org/setup-micromamba@v2 with: environment-name: build init-shell: bash create-args: >- python=${{ matrix.python-version }} libnetcdf=4.9.3 --channel conda-forge - name: Build wheels for Windows/Mac uses: pypa/cibuildwheel@v3.3.0 env: CIBW_ARCHS: ${{ matrix.arch }} - uses: actions/upload-artifact@v6 with: name: pypi-artifacts-${{ matrix.os }}-${{ matrix.arch }} path: ${{ github.workspace }}/wheelhouse/*.whl build_wheels_windows_arm: name: Build wheels for ARM64 on Windows runs-on: windows-11-arm steps: - uses: actions/checkout@v6 with: fetch-depth: 0 - uses: actions/setup-python@v6 name: Install Python with: python-version: 3.x - name: Install vcpkg dependencies shell: pwsh run: | # Install vcpkg git clone https://github.com/Microsoft/vcpkg.git C:\vcpkg cd C:\vcpkg .\bootstrap-vcpkg.bat # Install netcdf and dependencies .\vcpkg.exe install hdf5:arm64-windows netcdf-c:arm64-windows zlib:arm64-windows # Set environment variables for build echo "HDF5_DIR=C:\vcpkg\installed\arm64-windows" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append echo "NETCDF4_DIR=C:\vcpkg\installed\arm64-windows" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - name: Build wheels for Windows ARM64 uses: pypa/cibuildwheel@v3.3.0 env: CIBW_ARCHS: ARM64 CIBW_SKIP: "cp310-*" - uses: actions/upload-artifact@v6 with: name: pypi-artifacts-windows-11-arm-ARM64 path: ${{ github.workspace }}/wheelhouse/*.whl show-artifacts: needs: [build_bdist, build_sdist, build_wheels_winmac, build_wheels_windows_arm] name: "Show artifacts" runs-on: ubuntu-22.04 steps: - uses: actions/download-artifact@v7 with: pattern: pypi-artifacts* path: ${{ github.workspace }}/dist merge-multiple: true - shell: bash run: | ls -lh ${{ github.workspace }}/dist publish-artifacts-pypi: needs: [build_bdist, build_sdist, build_wheels_winmac, build_wheels_windows_arm] name: "Publish to PyPI" runs-on: ubuntu-22.04 # upload to PyPI for every tag starting with 'v' if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v') steps: - uses: actions/download-artifact@v7 with: pattern: pypi-artifacts* path: ${{ github.workspace }}/dist merge-multiple: true - uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_PASSWORD }} print_hash: true netcdf4-python-1.7.4rel/.github/workflows/miniconda.yml000066400000000000000000000054171512661643000231700ustar00rootroot00000000000000name: Build and Test on: pull_request: push: branches: [master] jobs: run-serial: runs-on: ${{ matrix.os }} #env: # NO_NET: 1 strategy: matrix: python-version: [ "3.10", "3.11", "3.12", "3.13", "3.14" ] os: [windows-latest, ubuntu-latest, macos-latest] platform: [x64, x32] exclude: - os: macos-latest platform: x32 fail-fast: false defaults: run: shell: bash -l {0} steps: - uses: actions/checkout@v6 with: submodules: true - name: Setup Micromamba uses: mamba-org/setup-micromamba@v2 with: environment-name: TEST init-shell: bash create-args: >- python=${{ matrix.python-version }} numpy cython pip setuptools pytest hdf5 libnetcdf cftime zlib certifi typing-extensions --channel conda-forge - name: Install netcdf4-python run: | export PATH="${CONDA_PREFIX}/bin:${CONDA_PREFIX}/Library/bin:$PATH" # so setup.py finds nc-config python -m pip install -v -e . --no-deps --no-build-isolation --force-reinstall - name: Tests run: | if [ "$RUNNER_OS" == "Windows" ]; then export HDF5_PLUGIN_PATH="${CONDA_PREFIX}\\Library\\hdf5\\lib\\plugin" else export HDF5_PLUGIN_PATH="${CONDA_PREFIX}/hdf5/lib/plugin/" fi pytest -s -rxs -v test run-mpi: runs-on: ${{ matrix.os }} strategy: matrix: python-version: [ "3.12" ] os: [ubuntu-latest] platform: [x64] defaults: run: shell: bash -l {0} steps: - uses: actions/checkout@v6 with: submodules: true - name: Setup Micromamba uses: mamba-org/setup-micromamba@v2 with: environment-name: TEST init-shell: bash create-args: >- python=${{ matrix.python-version }} numpy cython pip pytest openmpi mpi4py hdf5=*=mpi* libnetcdf=*=mpi* cftime zlib certifi typing-extensions --channel conda-forge - name: Install netcdf4-python with mpi run: | export PATH="${CONDA_PREFIX}/bin:${CONDA_PREFIX}/Library/bin:$PATH" # so setup.py finds nc-config nc-config --all python -m pip install -v -e . --no-build-isolation --no-deps --force-reinstall - name: Tests run: | cd test && python run_all.py cd ../examples export PATH="${CONDA_PREFIX}/bin:${CONDA_PREFIX}/Library/bin:$PATH" which mpirun mpirun --version mpirun -np 4 --oversubscribe python mpi_example.py # for openmpi #mpirun -np 4 python mpi_example.py if [ $? -ne 0 ] ; then echo "hdf5 mpi test failed!" exit 1 else echo "hdf5 mpi test passed!" fi netcdf4-python-1.7.4rel/.gitmodules000066400000000000000000000000001512661643000172430ustar00rootroot00000000000000netcdf4-python-1.7.4rel/Changelog000066400000000000000000002222251512661643000167170ustar00rootroot00000000000000 version 1.7.4 (tag v1.7.4rel) ================================ * Make sure automatic conversion of character arrays <--> string arrays works for Unicode strings (issue #1440). (previously only worked correctly for encoding="ascii"). * Add netcdf plugins (blosc, zstd, bzip2) in wheels. Blosc plugin doesn't work in Windows wheels. Macos wheels now use conda provided libs. (PR #1450) * Add windows/arm (PR #1453) and free-threaded python wheels (issue #1454). Windows wheels now use netcdf-c 4.9.3. WARNING: netcdf-c is not thread-safe and netcdf4-python does have internal locking so expect segfaults if you use netcdf4-python on multiple threads with free-threaded python. Users must exercise care to only call netcdf from a single thread. version 1.7.3 (tag v1.7.3rel) ============================= * Python 3.14 wheels (issue #1432) * support os.PathLike arguments for `Dataset.fromcdl` and raise a `FileNotFoundError` if the cdl is missing and a `FileExistsError` if the nc file already exists (PR #1387) * raise more informative error when trying to iterate or perform a membership operation on a Dataset (issue #1383) * fix type hint for createEnumType (issue #1378) * add python 3.13 to windows wheel builds (PR #1377) * allow slicing of vlen and string variables with non-unitary strides (issue #1408). version 1.7.2 (tag v1.7.2rel) ============================= * add static type hints (PRs #1302, #1349) * Expose nc_rc_set, nc_rc_get (via rc_set, rc_get module functions). (PR #1348) * Add Variable.get_fill_value and allow `fill_value='default'` to set `_FillValue` attribute using default fill values. (issue #1374, PR #1375). * Fix NETCDF3 endian error (issue #1373, PR #1355). version 1.7.1 (tag v1.7.1rel) =============================== * include nc_complex source code from v0.2.0 tag (instead of using submodule). * add aarch64 wheels. version 1.7.0 (tag v1.7.0rel) =============================== * add support for complex numbers via `auto_complex` keyword to `Dataset` (PR #1295) * fix for deprecated Cython `DEF` and `IF` statements using compatibility header with shims for unavailable functionality (PR #1277) * use `szip` as the library name on Windows (PR #1304) * add support for MS-MPI `MPI_Message` detection (PR #1305) * fix for issue #1306 - surprising result when indexing vlen str with non-contiguous indices. * Fix bug in set_collective introduced in PR #1277 (collective mode was always set). version 1.6.5 (tag v1.6.5rel) =============================== * fix for issue #1271 (mask ignored if bool MA assigned to uint8 var) * include information on specific object when reporting errors from netcdf-c * python 3.12 wheels added, support for python 3.7 removed. version 1.6.4 (tag v1.6.4rel) =============================== * set path to SSL certificates internally, so https DAP URLs work with wheels (issue #1246, requires nc_rc_set function available starting with netcdf-c 4.9.1, plus bugfix in netcdf-c PR #2690). Added certifi as a dependency. * Added `isopen` method to `MFDataset` object to check if underlying files are open. version 1.6.3 (tag v1.6.3rel) ============================== * Use ``nc_put_vars`` for strided writes for netcdf-c >= 4.6.2 (issue #1222). * _Unsigned="false" should be same as not having _Unsigned set (issue #1232). _Unsigned now must be set to "true" or "True" for variable to be interpreted as unsigned, instead of just having _Unsigned be set (to anything). * pypi wheels built with netcdf-c 4.9.1. version 1.6.2 (tag v1.6.2rel) ============================== * Added ``netCDF4.__has_set_alignment__`` property to help identify if the underlying netcdf4 supports setting the HDF5 alignment. * Slicing multi-dimensional variables with an all False boolean index array now returns an empty numpy array (instead of raising an exception - issue #1197). Behavior now consistent with numpy slicing. * fix problem with compiling using netcdf-c < 4.9.0 (issue #1209) * pypi wheels build with netcdf-c 4.9.0. version 1.6.1 (tag v1.6.1rel) ============================== * add Dataset methods has__filter (where =zstd,blosc,bzip2,szip) to check for availability of extra compression filters. * release GIL for all C-lib calls (issue #1180). * Add support for nc_set_alignment and nc_get_alignment to control alignment of data within HDF5 files. version 1.6.0 (tag v1.6.0rel) ============================== * add support for new quantization functionality in netcdf-c 4.9.0 via "signficant_digits" and "quantize_mode" kwargs in Dataset.createVariable. Default quantization_mode is "BitGroom", but alternate methods "BitRound" and GranularBitRound" also supported. * opening a Dataset in append mode (mode = 'a' or 'r+') creates a Dataset if one does not already exist (similar to python open builtin). Issue #1144. Added a mode='x' option (as in python open) which is the same as mode='w' with clobber=False. * allow createVariable to accept either Dimension instances or Dimension names in "dimensions" tuple kwarg (issue #1145). * remove all vestiges of python 2 in _netCDF4.pyx and set cython language_level directive to 3 in setup.py. * add 'compression' kwarg to createVariable to enable new compression functionality in netcdf-c 4.9.0. 'None','zlib','szip','zstd','bzip2' 'blosc_lz','blosc_lz4','blosc_lz4hc','blosc_zlib' and 'blosc_zstd' are currently supported. 'blosc_shuffle', 'szip_mask' and 'szip_pixels_per_block' kwargs also added. compression='zlib' is equivalent to (the now deprecated) zlib=True. If the environment variable NETCDF_PLUGIN_DIR is set to point to the directory with the compression plugin lib__nc* files, then the compression plugins will be installed within the package and be automatically available (the binary wheels have this). Otherwise, the environment variable HDF5_PLUGIN_PATH needs to be set at runtime to point to plugins in order to use the new compression options. * MFDataset did not aggregate 'name' variable attribute (issue #1153). * issue warning instead of raising an exception if missing_value or _FillValue can't be cast to the variable type when creating a masked array (issue #1152). * Define MPI_Session for compatibility with current mpi4py (PR #1156). version 1.5.8 (tag v1.5.8rel) ============================== * Fix Enum bug (issue #1128): the enum_dict member of an EnumType read from a file contains invalid values when the enum is large enough (more than 127 or 255 members). * Binary wheels for aarch64 and python 3.10. version 1.5.7 (tag v1.5.7rel) ============================== * don't try to mask vlens with default _FillValue, since vlens don't have a default _FillValue. This gets rid of numpy DeprecationWarning (issue #1099). * update docs to reflect the fact that a variable must be in collective mode before writing compressed data to it in parallel. Added a test for this (examples/mpi_example_compressed.py). Issue #1108. * Fix OverflowError when dimension sizes become greater than 2**32-1 elements on Windows (Issue #1112). * Don't return masked arrays for vlens (only for primitive and enum types - issue #1115). version 1.5.6 (tag v1.5.6rel) ============================== * move CI/CD tests from travis/appveyor to Github Actions (PR #1061). * move netCDF4 dir under src so module can be imported in source directory (PR #1062). * change numpy.bool to numpy.bool_ and numpy.float to numpy.float_ (float and bool are deprecated in numpy 1.20, issue #1065) * clean up docstrings so that they work with latest pdoc. * update cython numpy API to remove deprecation warnings. * Add "fromcdl" and "tocdl" Dataset methods for import/export of CDL via ncdump/ncgen called externally via the subprocess module (issue #1078). * remove python 2.7 support. * broadcast data (if possible)to conform to variable shape when writing to a slice (issue #1083). version 1.5.5.1 (tag v1.5.5.1rel) ================================== * rebuild binary wheels for linux and OSX to link netcdf-c 4.7.4 and hdf5 1.12.0. version 1.5.5 (tag v1.5.5rel) ============================== * have setup.py always try use nc-config first to find paths to netcdf and hdf5 libraries and headers. Don't use pkg-config to find HDF5 if HDF5 env vars are set (or read from setup.cfg). * Change MIT license text to standard OSI wording (PR #1046). version 1.5.4 (tag v1.5.4rel) ============================== * fix printing of variable objects for variables that end with the letter 'u' (issue #983). * make sure root group has 'name' attribute (issue #988). * add the ability to pack vlen floats to integers using scale_factor/add_offset (issue #1003) * use len instead of deprecated numpy.alen (issue #1008) * check size on valid_range instead of using len (issue #1013). * add `set_chunk_cache/get_chunk_cache` module functions to reset the default chunk cache sizes before opening a Dataset (issue #1018). * replace use of numpy's deprecated tostring() method with tobytes() (issue #1023). * bump minimal numpy version to 1.9 (first version to have tobytes()). version 1.5.3 (tag v1.5.3rel) ============================== * make sure arrays are masked that are not filled when auto_fill is off (issue #972). * python 3.8 binary wheels. version 1.5.2 (tag v1.5.2rel) ============================== * fix for scaling bug when _Unsigned attribute is set and byteorder of data does not match native byteorder (issue #930). * revise documentation for Python 3 (issue #946). * establish support for Python 2.7, 3.5, 3.6 and 3.7 (issue #948). * use dict built-in instead of OrderedDict for Python 3.7+ (pull request #955). * remove underline ANSI in Dataset string representation (pull request #956). * remove newlines from string representation (pull request #960). * fix for issue #957 (size of scalar var is a float since numpy.prod(())=1.0). * make sure Variable.setncattr fails to set _FillValue (issue #959). * fix detection of parallel HDF5 support with netcdf-c 4.6.1 (issue #964). version 1.5.1.2 (tag v1.5.1.2rel) ================================== * fix another slicing bug introduced by the fix to issue #906 (issue #922). version 1.5.1.1 (tag v1.5.1.1rel) ================================== * fixed __version__ attribute (was set incorrectly in 1.5.1 release). * fix for issue #919 (assigning 2d array to 3d variable with singleton first dimension with v[:] = a). * minimum numpy changed from 1.9.0 to 1.10.0. version 1.5.1 (tag v1.5.1rel) ============================== * fix issue #908 by adding workaround for incorrect value returned by nc_inq_var_fill for netcdf-c < 4.5.1. * fix bug writing slice to unlimited dimension that is not the first (leftmost). Issue #906. * make sure data gets converted to type of scale_factor when add_offset=0 and scale_factor=1 (issue #913). * fix for reading empty (NIL) string attributes (issue #915). version 1.5.0.1 (tag v1.5.0.1rel) ================================== * binary wheels for linux and macosx rebuilt against netcdf-c 4.6.3 (instead of 4.4.1.1). * add read-shared mode (mode='rs'). Significantly speeds up reads of NETCDF3 files (pull request #902). version 1.5.0 (tag v1.5.0rel) =============================== * added support for parallel IO in the classic netcdf-3 formats through the pnetcdf library (pull request #897). version 1.4.3.2 (tag v1.4.3.2) =============================== * include missing membuf.pyx file in release source tarball. version 1.4.3.1 (tag v1.4.3.1) =============================== * fix bug in implementation of NETCDF4_CLASSIC support for parallel IO in v1.4.3 release. version 1.4.3 (tag v1.4.3rel) ============================= * make set_always_mask work in MFDataset. * fix saving diskless files to disk with netcdf-c >= 4.6.2. * write to an in-memory Dataset, memoryview buffer returned by Dataset.close() (issue #865, requires netcdf-c >= 4.6.2) * fix performance regression when using large sequences of consecutive integers for indexing with netcdf-c >= 4.6.2 (issue #870). * improved error messages for ncinfo and other utilities (issue #873). * fix for int64 attributes not being created for NETCDF3_64BIT_DATA (CDF5) files (issue #878). * fix for MPI parallel error ("NetCDF: Attempt to use feature that was not turned on when netCDF was built") using netcdf-c 4.6.2 (issue #883). * Added methods `set_ncstring_attrs()` to Dataset, Group and Variable that forces all text attributes to be written as variable length strings (netCDF type NC_STRING - issue #882). * Allow parallel mode with NETCDF4_CLASSIC files (issue #890). version 1.4.2 (tag v1.4.2rel) ============================= * add get_dims Variable method (issue #824) * make sure format keyword not ignored when mode is 'ws' (issue #827) * fix numpy FutureWarning (non-tuple sequence for multidimensional indexing is deprecated), issue #833. * add 'master_file' kwarg to MFDataset.__init__ (issue #835). * always use nc_get_vars for strided access over OpenDAP (issue #838). * raise FutureWarning when trying to set multi-dimensional array attribute while still silently flattening the array (issue #841). Will change to ValueError in next release (1.4.3). * fix parallel writes when both nc4 parallel and pnetcdf parallel options enabled in the netcdf-c library (issue #820). * fix for writing masked scalar character variable (issue #850). version 1.4.1 (tag v1.4.1rel) ============================= * disable workaround for slow nc_get_vars for __netcdflibversion__ >= 4.6.2, since a fix was added to speed up nc_get_vars in the C library. Issue 680. * new Dataset and Variable methods (set_always_mask) to optionally re-enable old behaviour (return masked arrays only if selected slice contains missing values) (issue #809). version 1.4.0 (tag v1.4.0rel) ============================= * fixed bug in detection of CDF5 library support in setup.py (pull request #736, issue #713). * fixed reading of variables with zero-length dimensions in NETCDF3_CLASSIC files (issue #743). * allow integer-like objects in VLEN slices (not just python ints, issue #526, pull request #757). * treating _FillValue as a valid_min/valid_max was too surprising, despite the fact the thet netcdf docs 'attribute best practices' suggests that clients should to this. Revert this change from issue #576 (issue #761). * remove netcdftime, since it is now a separate package. date2num, num2date and date2index still importable from netCDF4. * fix 'Unreachable code' cython warning (issue #767). * Change behavior of string attributes so that nc.stringatt = ['foo','bar'] produces an vlen string array attribute in NETCDF4, instead of concatenating into a single string ('foobar'). In NETCDF3/NETCDF4_CLASSIC, an IOError is now raised, instead of writing 'foobar'. Issue #770. * fix loading of enum type names (issue #775). * make sure missing_value applies only to scaled short integers if auto-scaling is on (issue #777). * automatically create views of compound types with character arrays as numpy strings (issue #773). Can be disabled using 'set_auto_chartostring(False)'. Numpy structured array dtypes with 'SN' string subtypes can now be used to define netcdf compound types (they get converted to ('S1',N) character array types automatically). * always return masked array by default, even if there are no masked values (too surprising to get ndarray or MaskedArray depending on slice, issue #785). * treat valid_min/valid_max/_FillValue/missing_value as unsigned integers if _Unsigned is set (to mimic behaviour of netcdf-java). Conversion to unsigned type now occurs before masking and scale/offset operation. Issue #794. version 1.3.1 (tag v1.3.1rel) ============================= * add parallel IO capabilities. netcdf-c and hdf5 must be compiled with MPI support, and mpi4py must be installed. To open a file for parallel access, use `parallel=True` in `Dataset.__init__` and optionally pass the mpi4py Comm instance using the `comm` kwarg and the mpi4py Info instance using the `info` kwarg. IO can be toggled between collective and independent using `Variable.set_collective`. See `examples/mpi_example.py`. Issue #717, pull request #716. Minimum cython dependency bumped from 0.19 to 0.21. * Add optional `MFTime` calendar overload to use across all files, for example, `'standard'` or `'gregorian'`. If `None` (the default), check that the calendar attribute is present on each variable and values are unique across files raising a `ValueError` otherwise. * Allow _FillValue to be set for vlen string variables (issue #730). version 1.3.0 (tag v1.3.0rel) ============================== * always search for HDF5 headers when building, even when nc-config is used (since nc-config does not always include the path to the HDF5 headers). Also use H5get_libversion to obtain HDF5 version info instead of H5public.h. Fixes issue #677. * encoding kwarg added to Dataset.__init__ and Dataset.filepath (default is to use sys.getfilesystemencoding()) so that oddball encodings (such as cp1252 on windows) can be handled in Dataset filepaths (issue #686). * Calls to nc_get_vars are avoided, since nc_get_vars is very slow (issue #680). Strided slices are now converted to multiple calls to nc_get_vara. This speeds up strided slice reads by a factor of 10-100 (especially for NETCDF4/HDF5 files) in most cases. In some cases, strided reads using nc_get_vars are faster (e.g. strided reads over many dimensions such as var[:,::2,::2,::2])), so a variable method use_nc_get_vars was added. var.use_nc_get_vars(True) will tell the library to use nc_get_vars instead of multiple calls to nc_get_vara, which was the default behaviour previous to this change. * fix utc offset time zone conversion in netcdftime - it was being done exactly backwards (issue #685 - thanks to @pgamez and @mdecker). * Fix error message for illegal ellipsis slicing, add test (issue #701). * Improve timezone format parsing in netcdftime (https://github.com/Unidata/netcdftime/issues/17). * make sure numpy datatypes used to define CompoundTypes have isalignedstruct flag set to True (issue #705), otherwise. segfaults can occur. Fix required raising them minimum numpy requirement from 1.7.0 to 1.9.0. * ignore missing_value, _FillValue, valid_range, valid_min and valid_max when creating masked arrays if attribute cannot be safely cast to variable data type (and issue a warning). When setting these attributes don't cast to variable dtype unless it can be done safely and issue a warning. Issue #707. version 1.2.9 (tag v1.2.9rel) ============================== * Fix for auto scaling and masking when _Unsigned attribute set (create view as unsigned type after scaling and masking). Issue #671. * Always mask values outside valid_min, valid_max (not just when missing_value attribute present). Issue #672. * Fix setup.py so pip install doesn't fail if cython not installed. setuptools >= 18.0 now required for installation (Issue #666). version 1.2.8 (tag v1.2.8rel) ============================== * recognize _Unsigned attribute used by netcdf-java to designate unsigned integer data stored with a signed integer type in netcdf-3 (issue #656). * add Dataset init memory parameter to allow loading a file from memory (pull request #652, issues #406 and #295). * fix for negative times in num2date (issue #659). * fix for failing tests in numpy 1.13 due to changes in numpy.ma (issue #662). * Checking for _Encoding attribute for NC_STRING variables, otherwise use 'utf-8'. 'utf-8' is used everywhere else, 'default_encoding' global module variable is no longer used. getncattr method now takes optional kwarg 'encoding' (default 'utf-8') so encoding of attributes can be specified if desired. If _Encoding is specified for an NC_CHAR ('S1') variable, the chartostring utility function is used to convert the array of characters to an array of strings with one less dimension (the last dimension is interpreted as the length of each string) when reading the data. When writing the data, stringtochar is used to convert a numpy array of fixed length strings to an array of characters with one more dimension. chartostring and stringtochar now also have an 'encoding' kwarg. Automatic conversion to/from character to string arrays can be turned off via a new set_auto_chartostring Dataset and Variable method (default is True). Addresses issue #654. * Cython >= 0.19 now required, _netCDF4.c and _netcdftime.c removed from repository. version 1.2.7 (tag v1.2.7rel) ============================== * fix for issue #624 (error in conversion to masked array when variable slice returns a scalar). This is a regression introduced in 1.2.5 associated with support for vector missing_values. Test (tst_masked5.py) added for vector missing_values. * fix for python 3.6 compatibility (error retrieving character _FillValue attribute, issue #626). Test with python 3.6 using travis CI. version 1.2.6 (tag v1.2.6rel) ============================== * fix some test failures on big endian PPC64 that were due to errors in byte-swapping logic. Also fixed bug in enum code exposed on PPC64 (issue #608). * remove support for python 2.6 (it probably still will work for a while though). * Sometimes checking that data being assigned to a variable has an 'ndim' attribute is not sufficient, instead check to see that the object supports the buffer interface (issue #613). * make get_variables_by_attributes work in MFDataset (issue #610) The hack is also applied for set_auto_maskandscale, set_auto_scale, set_automask, so these don't have to be duplicated in MFDataset (pull request #571). version 1.2.5 (tag v1.2.5rel) ============================== * Add MFDataset.set_auto_maskandscale (plus set_auto_scale, set_auto_mask). Fixes issue #570. * Use valid_min/valid_max/valid_range attributes when defining mask (issue #576). Values outside the valid range are considered to be missing when defining the mask. * Fix for issue #584 (add support for dates before -4712-1-1 in 360_day and 365_day calendars to netcdftime.utime). * Fix for issue #593: add support for datetime.timedelta operations (adding and subtracting timedelta, subtracting two datetime instances to compute time duration between them), implement datetime.replace() and datetime.__str__(). datetime.__repr__() includes the full state of an instance. Add datetime.calendar. datetime comparison operators have full accuracy now. * Fix for issue #585 by increasing the size of the buffer used to store the filepath. * Fix for issue #592: Add support for string array attributes. (When reading, a vlen string array attribute is returned as a list of strings. To write, use var.setncattr_string("name", ["two", "strings"]).) * Fix for issue #596 - julian day calculations wrong for negative years, caused incorrect roundtrip num2date(date2num(date)) roundtrip for dates with year < 0. * Make sure negative years work in utime.num2date (issue #596). * raise NotImplementedError when trying to pickle Dataset, Variable, CompoundType, VLType, EnumType and MFDataset (issue #602). * Fix for issue #527: initialize vldata[i].p in Variable._get(...). version 1.2.4 (tag v1.2.4rel) ============================== * Fix for issue #554. It is now ensured that data is in native endian byte order before passing to netcdf-c library. Data read from variable with non-native byte order is also byte-swapped, so that dtype remains consistent with netcdf variable. Behavior now consistent with h5py. * raise warning for HDF5 1.10.x (issue #549), since backwards incompatible files may be created. * raise AttributeError instead of RuntimeError when attribute operation fails. raise IOError instead of RuntimeError when nc_create or nc_open fails (issue #546). * Use NamedTemporaryFile instead of deprecated mktemp in tests (pull request #543). * add AppVeyor automated windows tests (pull request #540). version 1.2.3.1 (tag v1.2.3.1rel) ================================== * fix bug in setup.py (pull request #539, introduced in issue #518). version 1.2.3 (tag v1.2.3rel) ============================== * try to avoid writing NC_STRING attributes if possible, by trying to convert unicode strings to ascii and write as NC_CHAR (issue #529). This preserves compatibility with clients (like Matlab) that can't deal with NC_STRING attributes. A 'setncattr_string' method was added for Dataset and Variable to that users can force attributes to be written as NC_STRING if necessary. * fix failing tests with numpy 1.11 (issues #521 and #522). * fix indentation bug in nc4tonc3 utility (issue #519). * add the capability in setup.py to use pkg-config instead of nc-config (pull request #518). * make sure slices which return scalar masked arrays are consistent with numpy.ma (issue #515). * add test/tst_cdf5.py and test/tst_filepath.py (to test new NETCDF3_64BIT_DATA format and filepath Dataset method). * expose netcdftime.__version__ (issue #504). * fix potential memory leak in Dataset.filepath in attempt to fix mysterious segfaults on CentOS6 (issue #506). Segfaults can apparently still occur on systems like CentOS6 with old versions of glibc. version 1.2.2 (tag v1.2.2rel) ============================= * fix failing tests on python 2.6 (issue #497). Change minimum required python from 2.5 to 2.6. * Potential memory leaks fixed by freeing string pointers internally allocated in netcdf-c using nc_free_string. Also use nc_free_vlens to free space allocated for vlens inside netcdf-c (issue #495). * invoke str on filename argument to Dataset constructor, so pathlib instances can be used (issue #489). * don't use hardwired NC_MAX_DIMS or NC_MAX_VARS internally to allocate space for dimension or variable ids. Instead, find out the number of dims and vars and use malloc. NC_MAX_NAME is still used to allocate space for attribute and variable names, since there is no obvious way to determine the length of these names. * if trying to write a unicode attribute, check to see if it exists first and is NC_CHAR, and if so, delete it and recreate it. Workaround for C lib bug discovered in issue #485. * support for NETCDF3_64BIT_DATA format supported in netcdf-c 4.4.0. Similar to NETCDF3_64BIT (now NETCDF3_64BIT_OFFSET), but includes 64 bit dimensions and sizes, plus unsigned and 64 bit integer data types. * make sure chunksize does not exceed dimension size (for non-unlimited dimensions) on variable creation (issue #480). * add 'size' attribute to Dimension (same as len(d), where d is a Dimension instance, issue #477). * fix bug in nc3tonc4 with --unpackshort=1 (issue #474). * dates do not have to be contiguous, i.e. can be before and after the missing dates in Gregorian calendar (pull request #476). version 1.2.1 (tag v1.2.1rel) ============================= * add the capability to slice variables with unsorted integer sequences, or integer sequences with duplicates (issue #467). This was done by converting boolean array slices to integer array slices internally, instead of the other way around. * raise TypeError if masked array assigned to a VLEN str variable slice (issue #464). * Ellipsis now can be used with scalar VLEN str variables (issue #458). Slicing of scalar VLEN (non-str) variables now works. * Allow non-positive reference years in non-real-world calendars (issue #442). version 1.2.0 (tag v1.2.0rel) ============================= * Fixes to setup.py for building on windows (issue #460). * warnings now issued if file being read contains unsupported variables or data types (they were previously being silently skipped). * added 'get_variables_by_attributes' method (issue #454). * check for 'units' attribute in date2index (issue #453). * added support for enum types (issue #452). * added 'isopen' Dataset method (issue #450). * raise ValueError if year 0 or negative year used in time units string. The year 0 does not exist in the Julian and Gregorian calendars (issue #442). version 1.1.9 (tag v1.1.9rel) ============================= * fix for issue #391 (data is already byte-swapped to native endian format by the HDF4 library). * fix for issue #415 (copy.deepcopy does not work on netcdftime datetime object). * fix for issue #420 - len(v) where v is a scalar variable returned unexpected IndexError, now returns "TypeError: len() on unsized object" (same as numpy does for len() on a scalar array). * translate docstrings from epydoc markup to markdown, so pdoc can be used (epydoc is dead). * add small offset in conversion to Julian date for numerical stability (more accurate round trip calculations). This offset is removed in back conversion only from microseconds. Pull request #433. * add detection of unsigned integers to handling of automatic packing (set_auto_scale and set_auto_maskandscale) when writing. Pull request #435. * use USE_SETUPCFG env var to over-ride use of setup.cfg. If USE_SETUPCFG evaluates to false, setup.cfg will not be used and all configuration variables can be set from environment variables. Useful when using 'pip install' and nc-config is broken (issue #438). * fix for integer overflow in date2index (issue #444). version 1.1.8 (tag v1.1.8rel) ============================= * v[...] now returns a numpy scalar array (not just a scalar) when v is a scalar netcdf variable (issue #413). * unix-like paths can now be used in createVariable and createGroup. v = nc.createVariable('/path/to/var1',('xdim','ydim'),float) will create a Variable named 'var1', while also creating the Groups 'path' and 'path/to' if they do not already exist. Similarly, g = nc.createGroup('/path/to') acts like 'mkdir -p' in unix, creating the Groups 'path' and '/path/to', if they don't already exist. Users who relied on nc.createGroup(groupname) failing when the group already exists will have to modify their code, since nc.createGroup will now return the existing group instance. Dataset.__getitem__ also added. nc['/path/to'] returns a Group instance, and nc['/path/to/var1'] returns a Variable instance. * change minimum required numpy to 1.7.0, fix so all tests pass with 1.7.0. Added travis tests for minimum required cython, numpy (issue #404). * enable abbreviations to time units specification, as allowed in CF (issue #402). Now, instead of just 'seconds' and 'seconds', 'secs', 'sec' and 's' are also allowed (similar to minutes, days and hours). * install utility scripts in utils directory with setuptools entry points (pull request #392 from @mindw). Code for utilities moved to netCDF4_utils.py - makes utilities more windows-friendly. * make sure booleans are treated correctly in setup.cfg. Add use_cython (default True) to setup.cfg. If set to False, then cython will not be used to compile netCDF4.pyx (existing netCDF4.c will be used instead). * use "from Cython.Build import cythonize" instead of "from Cython.Distutils import build_ext" in setup.py (issue #393) to conform to new cython build mechanism (CEP 201, described at https://github.com/cython/cython/wiki/enhancements-distutils_preprocessing). * unicode attributes now written as strings, not bytes (using nc_put_att_string instead of nc_put_att_text, issue #388). * add __orthogonal_indexing__ attribute to Variable, Dataset and Group (issue #385) to denote that Variable objects do not follow numpy indexing semantics for integer and boolean array indices. * make sure application of scale_factor and add_offset works correctly when scale_factor not given (issue #381). * add man pages for nc3tonc4, nc4tonc3, ncinfo in man directory. Not installed by setup.py (contributed by Ross Gammon, issue #383). * replace tabs with spaces by running reindent.py on all *.py and *.pyx files (issue #378). * refactor netCDF4_utils and netCDF4 module into netCDF4 package. Refactoring effectively removes netCDF4 utils private attributes from netCDF4 namespace, so has the potential to break code using private attributes (issue #409). version 1.1.7 (tag v1.1.7rel) ============================= * check to make sure cython >= 0.19 is available before trying to use it (otherwise compilation with fail). Issue 367. * add ipython notebooks from Unidata workshop in examples directory. * fix ellipsis variable slicing regression (issue 371). * release the Global Interpreter Lock (GIL) when calling the C library for read operations. Speeds up multi-threaded reads (issue 369). Caution - the HDF5 library may need to be compiled with the threadsafe option to ensure that global data structures are not corrupted by simultaneous manipulation by different threads. * Make sure USE_NCCONFIG environment variable takes precedence over value of use_ncconfig in setup.cfg. With this change, 'pip install netCDF4' with USE_NCCONFIG=1 will use environment variables to find paths to libraries and include files, instead of relying on nc-config (issue #341). version 1.1.6 (tag v1.1.6rel) ============================= * fix for issue 353 (num2date can no longer handle units like 'hours since 2000-01-01 0'). * fix for issue 354 (num2date no longer supports multi-dimensional arrays). * fix for spurious UserWarning about endian-ness mismatch (issue 364). * make calendar name keyword for num2date/date2num case insensitive (issue 362). * make sure units parser returns time-zone naive datetime instance that includes UTC offset (issue 357). UTC offset was applied incorrectly in netcdftime.date2num and num2date. No longer need to depend on python-dateutil. version 1.1.5 (tag v1.1.5rel) ============================= * add dependency on python-dateutil in setup.py and install docs. * use python datetime in num2date and date2num whenever possible. Remove duplicate num2date and date2num functions from netcdftime. Addresses issue #344. Add microsecond capability to netcdftime.datetime. Roundtrip accuracy of num2date/date2num now down to less than a millisecond. * use nc-config by default to find dependencies. setup.py modified to handle failure to find nc-config more gracefully (issue #340). If you wish to use env vars to point to the libs, you must first move the setup.cfg file out of the way (rename it to setup.cfg.save), or set USE_NCCONFIG to 0. * if endian-ness of variable is specified, adjust datatype to reflect this when opening a file (issue 346). * fix for issue #349 (seconds outside the range 0-59 in netcdftime.num2date). version 1.1.4 (tag v1.1.4rel) ============================= * speedup conversion of array indices to slices (issue #325). * fix for issue #330 (incorrect values for seconds returned by netcdftime). * fix reading of scalar vlen variables (issue #333). * setting fill_value=False in createVariable for vlen and compound variables now does nothing, instead of causing an error when the Dataset is closed (issue #331). * cython will regenerate netCDF4.c when install is run, not just build. Makes 'pip install' do the right thing when cython is installed (issue #263). version 1.1.3 (tag v1.1.3rel) ============================= * checked in _datetime.c to git (resolves issue #315). Note - _datetime.c was *not* included in the 1.1.2 release. * Changed __str__ to __repr__ in MFDataset, to be consistent with Dataset (issue #317). IPython uses __repr__ to make use-friendly human-readable summaries of objects in the terminal. version 1.1.2 (tag v1.1.2rel) ============================= * fix for issue 312 (allow slicing with objects that can be cast to ints). * indexing netCDF variables with integer sequences and boolean arrays now behave the same way (integer sequences are converted to boolean arrays internally). Addresses issue #300. Since indexing using integer sequences does not behave exactly as before, some client code may break. For example, previously when integer index arrays had the same length, and that length was equal to the number of dimensions of the array being indexed, netcdf4-python mirrored the numpy indexing behavior and treated the elements of the index arrays as individual sets of integer indices. This special case has been removed. An IndexError is now raised when the new behavior would produce a different result than the old, i.e. when the indices in an integer sequence are not sorted, or there are duplicate indices in the sequence. * fix for issue #310 (masked arrays not returned correctly when variable has non native endian-ness). * fix for issue #306 (slicing variable with "-1" when there is only one element along that dimension). * Improved speed of num2date and date2num for standard, julian, gregorian and proleptic gregorian calendars by vectorizing the functions. See Issue #296 * Fix for issue #301 ("Datestring parser chokes on years with extra space"). * Add name property for Dimension, Variable and Group instances (to access string name associated with instance). * Allow for null byte attributes (so _FillValue='\x00' can be set manually). Issue 273. * Added __repr__ (matching __str__) for all types (pull request #291). IPython uses __repr__ to make use-friendly human-readable summaries of objects in the terminal. version 1.1.1 (tag v1.1.1rel) ============================== * make sure _FillValue is a byte for character arrays in Python 3 (issue 271). * add numpy to install_requires in setup.py (issue #282, fixes issue #211). 'pip install netcdf4-python' will no longer fail if numpy not installed. * Fix for issue 278 (UnicodeDecodeError reading netcdf.h from setup.py with Python 3.4). * Make netcdftime.datetime immutable and hashable (issue 255). * Fix issue with slicing of scalar VLEN arrays (issue 270). * Add set_auto_mask and set_auto_scale methods to control auto scaling and auto masking separately. (issue 269). Also added set_auto_maskandscale, set_auto_scale, set_auto_mask Dataset/Group methods that recursively walk through all variables in the Dataset/Group. * Make sure file_format attribute propagated to Group instances (issue 265). * Fix for issue #259 ("Cannot use broadcasting to set all elements of a Variable to a given value"). version 1.1.0 (tag v1.1.0rel) ============================= * revert weakref change, so that previous behaviour (Dimensions and Variables keep strong references to parent Dataset) is the default. New keyword argument 'keepweakref' for Dataset.__init__ can be set to true to get weak references. version 1.0.9 (tag v1.0.9rel) ============================= * speed up the creation of new Group instances (issue 239). * fix logic errors in setup.py (issue 236). * it is now possible to create and set variable length string variables with numpy string datatypes (pull request 224). * add .travis.yml (for travis-ci testing on github), silence warnings from test output (issue 225). * modify __unicode__ for Variable and Dimension to return more useful error message when Dataset object has been garbage collected. * use weak references to group instances when creating Dimension and Variable objects. This prevents cyclic references messing up garbage collection (issue 218, pull request 219). * accessing values from a 0-dimensional Variable now returns a 0-dimensional numpy array, not a 1-dimensional array (issue 220). To write code compatible with both the old and new (fixed) behavior, wrap values accessed from a 0-dimensional Variable with numpy.asscalar. * add an __array__ method to Variable to make numpy ufuncs faster (issue 216). * change download_url in setup.py to point to pypi instead of googlecode. * fix for date2index error when time variable has only one entry (issue 215). * silence warnings ("Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). Each pointer declaration should be on its own line") with Cython 0.2. * reduced memory usage for Variable.__getitem__ under Python 2. version 1.0.8 (tag v1.0.8rel) ============================= * change file_format Dataset attribute to data_model (keeping file_format for backward compatibility). Add disk_format attribute (underlying disk format, one of NETCDF3, HDF4, HDF5, DAP2, DAP4, PNETCDF or UNDEFINED). Uses nc_inq_format_extended, added in version 4.3.1 of library. If using earlier version of lib, disk_format will be set to UNDEFINED. * default _FillValue now ignored for byte data types (int8 and uint8) as per http://www.unidata.ucar.edu/software/netcdf/docs/netcdf-c/Fill-Values.html#Fill-Values "If you need a fill value for a byte variable, it is recommended that you explicitly define an appropriate _FillValue attribute, as generic utilities such as ncdump will not assume a default fill value for byte variables". ncinfo now returns fill mode information (issue 209). * check to see if filling was disabled before masking data equal to default fill value (issue 209). * add variable type information to Dataset.__repr__ (output of ncinfo). version 1.0.7 (tag v1.0.7rel) ============================= * add the ability to specify the locations of hdf4,jpeg and curl libs, in case netCDF4 was built statically with HDF4 and/or OpenDAP support (issue 207). * add 'ncinfo' utility (like 'ncdump -h' but less verbose). * more information displayed when Dataset or Group instance is printed. * fix for issue 194 (versions after 1.0.5 fail for netcdf 4.1.1, due to call to nc_inq_path, which was added in netcdf 4.1.2). Fixed by adding compile time API check similar to what was done for nc_rename_grp. If filepath Dataset method is called an exception will be raised at runtime if the module was built with netcdf < 4.1.2, or cython was not installed at build time. * fix for issues 202 and 206 (exception raised by numpy.isnan for character data types). * if dateutils not installed and time unit accuracy < 1 second requested, have netcdftime raise an ImportError. version 1.0.6 (svn revision 1312) ================================ * issue warning of endian-ness of dtype argument does not match endian kwarg in createVariable. * make sure netcdf type NC_CHAR always returned in numpy array dtype 'S1' (sometimes arrays of type 'U1' were being returned). Fixes intermittently failing test tst_compoundatt.py on python 3.3. * fix for issue 201 (if data associated with numpy array not the same endian-ness as dtype, data was written incorrectly). Now bytes are swapped if necessary. Variable.endian() now returns 'native' instead of None for NETCDF3 formatted files. createVariable now enforces endian='native' for NETCDF3 files. Added tst_endian.py test case. * fix for issue 200 (library version detection failed on cygwin). * fix for issue 199 (nc4tonc3 utility not copying global attributes). * fix for issue 198 (setup.py chokes when no arguments given). * fix for issue 197 (slicing of netCDF variables using lists of integers). * create 'path' attribute for group instance using posixpath, instead of os.path (to ensure the unix path is used on all platforms). Issue 196. * fix for issue 196 (test failures on win32 due to files being deleted before they are closed). version 1.0.5 (svn revision 1278) ================================ * change setup.py to compile the Cython sources directly, if cython is available. This allows for "ifdef" like capability to modify source at compile time to account for changes in netcdf API (e.g. the forthcoming addition of the nc_rename_grp in version 4.3.1). * added a "renameGroup" method, which raises an exception if the netcdf lib version linked does not support it. Requires netcdf >= 4.3.1. * support for more than one missing value (missing_value attribute is a vector) when converting to masked array. * add 'renameAttribute' method to Dataset, Group and Variable. * fix so that var[:] = x works if x is a scalar, and var is a netcdf variable with an unlimited dimension that has shape () - i.e. no data has been written to it yet. Before this change, var[:] = x did not write any data. Now the scalar x will be written as the first entry in var along the unlimited dimension. * remove dos line feeds from nc3tonc4 (issue 181). * add datatype property for Variable that returns numpy dtype for primitive datatypes (same as dtype attribute) but returns CompoundType or VLType instance for compound or vlen variables (issue 178). * fix logic for deciding where to look for nc-config in setup.py (issue 177). * issue a warning and don't try to apply scale_factor or add_offset if these attributes are not convertible to floats (issue 176). * add filepath method to Dataset instance to return file path (or opendap URL) used to create Dataset (issue 172). * fix for issue 170 (opening a remote DAP dataset fails after creating a NETCDF4 formatted file). * fix for issue 169 (error in chartostring function on 64-bit windows). * add support for missing_value or _FillValue == NaN (issue 168). * added a Dimension.group() method (issue 165). version 1.0.4 (svn revision 1229) ================================= * fixed alignment bug that could cause memory corruption when reading compound type variables. All users of compound types should upgrade. version 1.0.3 (svn revision 1219) ================================= * don't try to write empty data array to netcdf file (fixed failing test with netcdf 4.3.0rc2). * date2num, num2date and date2index now can handle units of microseconds and milliseconds (for proleptic_gregorian calendar, or gregorian and standard calendars as long as the time origin is after 1582-10-15). Issue 159. * Added a _grp attribute to Dimension (issue 165). * don't bundle ordereddict (issue 164). * support reading of vlen string attributes (issue 156). * add --vars option to nc3tonc4 (issue 154). * Don't try to set fletcher32 checksum on scalar variables (it causes HDF5 to crash). Fixes issue 150. * Add --istart/--istop options to nc3tonc4 (issue 148, courtesy of Rich Signell). * fix for proleptic_gregorian in netcdftime.py (courtesy of Matthias Cuntz). version 1.0.2 (svn revision 1196) ================================= * disable version check for HDF5, which is broken by hdf5 1.8.10. * make sure all files have a calendar attribute in MFTime (issue 144). * more robust fix to issue 90 (array shape modified by assignment to a netCDF variable with one more dimension), including test case. version 1.0.1 (svn revision 1190) ================================= * fix error that occurred when retrieving data from a variable that has a missing_value attribute specified as a string (issue 142). * automatically close netcdf files when there are no references left to Dataset object (using __dealloc__ method). Fixes issue 137. * fix for slicing of scalar vlen string variables (issue 140). * fix to allow writing of unicode data to a NC_CHAR variable. * allow for writing of large variables (> 2**32 elements). Fixes issue 130. version 1.0fix1 =============== * fix python 3 incompatibility in setup.py (issue 125). version 1.0 (svn revision 1164) =============================== * add 'aggdim' keyword to MFDataset, so the name of the dimension to aggregate over can be specified (instead of using the unlimited dimension). aggdim=None by default, which results in the previous behavior. aggdim must be the leftmost dimension of all the variables to be aggregated. * raise IndexError when indexing a netcdf variable out of range so iterating over a variable in a for loop behaves as expected (as described in http://effbot.org/zone/python-for-statement.htm). Fixes issue 121. * added MacPorts portfile (so it can be installed via MacPorts on macosx using a "local Portfile repository"). Installs from svn HEAD using 'port install netcdf4-python'. * added experimental 'diskless' file capability (only added to the C lib after the 4.2 release). Controlled by kwarg 'diskless' to netCDF4.Dataset (default False). diskless=True when creating a file results in a file that exists only in memory, closing the file makes the data disappear, except if persist=True keyword given in which case it is persisted to a disk file on close. diskless=True when opening a file creates an in-memory copy of the file for faster access. * add the ability to specify the location of the required libs (and whether to use nc-config) with setup.cfg, instead of using environment variables. * fix ISO9601 date parser so it recognizes time zone offsets in time unit strings (contributed by David Hassel, issue 114, r1117). * add setncatts Dataset,Group and Variable method to add a bunch of attributes (given in a python dictionary) at once. Speeds things up for NETCDF3 and NETCDF4_CLASSIC files a lot, since nc_redef/nc_enddef not need to be called for each attribute (issue 85, r1113). Adding 1000 attributes is about 35 times faster using setncatts to add them all at once. Makes no difference for NETCDF4 formatted files, since nc_redef/nc_enddef is not called. * only round after apply scale_factor and add_offset if variable type is integer (issue 111, r1109). * Fixed bug with all False Boolean index (r1107). * added support for after, before and nearest selection method to date2index fast "first guess" indexing (r1106). * Remove white space in time units string (netcdftime._parse_date). An extra space in the time units of one CMIP3 model caused an error (r1105). * based on results with examples/bench_compress2.py, change default complevel for zlib compression from 6 to 4. If complevel=0, turn compression off entirely (set zlib=False) (r1102). version 0.9.9 (svn revision 1099) ================================ * changed default unicode encoding from "latin-1" to "utf-8", since this is the python 3 default, and the only encoding that appears to work for dimension and variable names. * added test case for unicode attributes, variable and dimension names. * fixes for unicode variable, dimension and group names. * fix for unicode attributes in python3 (ncdump did not intrepret them as text strings). Issue 107. * add --format option to nc4tonc3 utility (can be either NETCDF3_CLASSIC or NETCDF3_64BIT). Fixes issue 104. version 0.9.8 (svn revision 1080) ================================ * use numpy.ma.isMA to check for masked array (instead of checking for presence of 'mask' attribute). * fixes for AIX with ibm xlc compiler. * make sure unicode attributes don't get converted to ascii strings (issue 98). version 0.9.7 (svn revision 1073) ================================ * Added __str__ methods to Dataset, Variable, Dimension, CompoundType, VLType and MFDataset, so useful human-readable information is provided when these objects are printed in an interactive session. * don't try to apply scale_factor and offset if scale_factor=1 and add_offset=0 (to avoid making copies of large arrays). * changed netCDF4._default_fillvals to netCDF4.default_fillvals (to make part of public API). Added to docs (issue 94). version 0.9.6 (svn revision 1043) ================================= * changed default unicode encoding from "ascii" to "latin-1" (iso-8859-1). * add "unicode_error" module variable to control what happens when characters cannot be decoded by the encoding specified by the "default_encoding" module variable (which is "ascii" by default). unicode_error = "replace" by default which means bad characters are replace by "?". Previously an error was raised, the old behavior can be obtained by setting unicode_error = 'strict'. Fixes issue 92. * add __enter__ and __exit__ methods so you can do "with Dataset(url) as f:" (issue 89). * don't add extra singleton dimensions to rhs numpy arrays when assigning to a netcdf variable. Fixes issue 90. * coerce missing_value attribute to same type as variable (for primitive types). Fixes issue 91. version 0.9.5 (svn revision 1031) ================================ * fix for compound variables on python 3.2. * fix slicing of masked MFDataset variables (issue 83). * round to nearest integer after packing with scale_factor and add_offset (instead of truncation) (issue 84). * if add_offset missing, but scale_factor present, assume add_offset zero. if scale_factor missing, but add_offset present, assume scale_factor one. (this is consistent with unidata recommendations - issue 86). * only try to convert strings to bytes for python 3 so Dataset can be subclassed (issue 87). version 0.9.4 (svn revision 1018) ================================ * tested with python 2.7.1/3.1.3 using netcdf 4.1.2 and hdf5 1.8.6. * Added a 'default_encoding' module variable that controls how unicode strings are encoded into bytes. Default is 'ascii'. * now works on Python 3. * netCDF3 module removed. If you still need it, get it from netCDF4 0.9.3. * regenerated C source with Cython 0.14.1. * Added a MFTime class. Provide a unified interface to MFDataset time variable using different time units. * Fixed bug in netcdftime (issue 75) that occurs when time specified is within one second of the end of the month. * on unix-like systems, the environment variable USE_NCCONFIG can be set to tell setup.py to use the nc-config script installed by netcdf to figure out where all the libs and headers are (without having to specify NETCDF_DIR, HDF5_DIR, etc). Only works with netcdf 4.1.2. version 0.9.3 (svn revision 930) ================================ * fix chunk sizes bug (chunk sizes pointer should be size_t, not int). Fixes issue 66. Added test in tst_compression.py * fixed writing of data with missing values with scale/offset packing. Added test (tst_masked2.py). * fix iso8601 regex in netcdftime date parser so it can parse 'hours since 1-1-1 ...' (year had to be 4 digits previously) version 0.9.2 (svn revision 907) ================================ * fix netcdftime bug with '360_day' calendar. Fixes issue 59. * make sure scalar slice of 1d variable returns array scalar (not array of shape (1,)). Fixes issue 57. * updated date parser in netcdftime. Can now handle units like "seconds since 1970-01-01T00:00:00Z". * added support in setup.py for specifying the locations of the HDF5/netcdf-4 headers and libs separately with environment variables (HDF5_INCDIR, HDF5_LIBDIR).i Patch contributed by Patrice Dumas. * add masked array support to num2date (dates for missing times set to None). * add chunk_cache keyword to createVariable. HDF5 default is 1mb, which can cause problems when creating 1000's of variables. In such cases, chunk_cache can be reduced, or set to zero. * add set_var_chunk_cache and get_var_chunk_cache Variable methods. * raise AttributeError when trying to set _FillValue attribute (it can only be reliably set on variable creation, using the fill_value keyword to createVariable). version 0.9.1 (svn revision 879) ================================ * raise ImportError if netcdf-4 < 4.1.1 or hdf5 <= 1.8.4. * add __netcdf4libversion__ and __hdf5libversion__ module variables. * make sure data is not truncated to integers before scale_factor and add_offset is applied (issue 46). * fix bug in date2num with noleap calendar in netcdftime (issue 45). * fix bug in 360day calendar in netcdftime (issue 44). * python 2.4 compatibility restored (by modifying OrderedDict). Fixes issue 37. * make sure int64 attributes cast to int32 when format=NETCDF4_CLASSIC. This was causing tst_multifile.py to fail on 64-bit platforms. * fix tutorial.py to cast 64 bit integers to 32 bit when writing to 32-bit integer vlen (was causing tutorial.py to fail on 64-bit platforms). * remove nose dependency from tst_netcdftime.py. version 0.9 (svn revision 846) ============================== * fixed bug (issue 30) with date2index occurring with dates outside the support. * make sure that auto masking works with MFDataset. * fix bug (issue 34) when slicing MFDataset variables with dimensions of length 1. * used ordered dictionaries for variables, dimensions, groups etc to preserve creation order (makes it easier to copy files, fixes issue 28). * change auto_maskandscale default to True. This means data will automatically be converted to and from masked arrays. Data scaled as short integers using the scale_factor and add_offset attributes will also be automatically converted to/from float arrays. * add setncattr, getncattr, delncattr methods (for setting/getting/deleting netcdf attributes with names that clash with the reserved python attributes). version 0.8.2 (svn revision 769) ================================ * compound type tests re-enabled. Compound and vlen types now fully supported in netcdf-4.1-beta2. * make sure data retrieved from a netCDF variable is not coerced to a python scalar (it should remain a numpy scalar array). * fix docs to point out that an unlimited dimension can be created by setting size to *either* None or 0 in createDimension. * fix another slicing corner case. * remove auto pickling/unpickling into vlen strings (too cute, sometimes produced surprising results). version 0.8.1 (svn revision 744) ================================ * added 'cmptypes' and 'vltypes' Group/Dataset attributes, which contain dictionaries that map the names of compound and vlen types to CompoundType and VLType instances. * Experimental variable-length (vlen) data type support added. * changes to accommodate compound types in netcdf-4.1-beta snapshots. Compound types now work correctly for snapshots >= 20090603. * Added __len__ method and 'size' property to Variable class. * In date2index, replaced the brute force method by the bisection method and added a 'select' keyword to find the index of the date before, after or nearest the given date. * Fixed bug occurring when indexing with a numpy array of length 1. * Fixed bug that occurred when -1 was used as a variable index. * enabled 'shared access' mode for NETCDF3 formatted files (mode='ws', 'r+s' or 'as'). Writes in shared mode are unbuffered, which can improve performance for non-sequential access. * fixed bug in renameVariable that caused failure when new name is longer than old name, and file format is NETCDF3_64BIT or NETCDF3_CLASSIC. version 0.8 (svn revision 685) ============================== * added 'stringtoarr' utility function for converting python strings to numpy character arrays of a specified size. * initial support for compound data types (which are mapped to structured numpy arrays). Compound data types are created with the createCompoundTYpe Dataset or Group method. Both attributes and variables can be compound types. * make sure 64-bit integer attributes converted to 32 bits when writing to a NETCDF3 formatted file. * added nc4tonc3 utility for converted NETCDF4_CLASSIC files to NETCDF3_64BIT files (useful for sharing data with colleagues that don't have netcdf-4 capable clients). version 0.7.7 (svn revision 626) ================================ * David Huard reworked fancy indexing - it is now much more efficient and less of a memory hog. Now works differently than numpy fancy indexing - 1d arrays of boolean or integer indices work independently on each dimension. This enables things like: >>> tempdat = temp[[0,1,3],lats>0,lons>0] (retrieves 1st, 2nd and 4th levels, all Northern Hem. and Eastern Hem. grid points - note that this would raise an IndexError in numpy) * added opendap test (tst_dap.py). * bugfix for nc3tonc4 utility. * fix MFDataset.Variable. __getattr__ to raise AttributeError instead of KeyError when attribute not found. * netcdftime version number upped to 0.7. version 0.7.6 (svn revision 574) ================================ * added date2index function, courtesy of David Huard, which finds the indices in a netCDF time variable corresponding to a sequence of datetime instances. * make _get_att/_set_att raise AttributeError instead of RuntimeError, so that getattr(object, 'nonexistantattribute', None) works. (thanks David Huard) * v[:] = data now works along unlim dim, i.e. you can do this: file = Dataset('test.nc', "w") file.createDimension("time", None) # unlimited dimension var = file.createVariable("var", 'd', ("time",)) # you used to have to do this #var[0:10] = numpy.arange(10) # but now you can simply do this var[:] = numpy.arange(10) version 0.7.5 (svn revision 549) ================================ * return a scalar array, not a python scalar, when a slice returns a single number. This is more consistent with numpy behavior, and fixes a bug in MFDataset slicing. * added 'exclude' parameter to MFDataset.__init__ * added set_auto_maskandscale method to MFDataset variables. version 0.7.4 (svn revision 540) ================================ * ensure all arithmetic is done with float64 in netcdftime (Rob Hetland). * fixes for netcdf-4.0-beta2 ('chunking' keyword to createVariable replaced by 'contiguous'). Now works with netcdf-4.0-beta2 and hdf5-1.8.0 final, but is incompatible with netcdf-4.0-beta1. version 0.7.3.1 (svn revision 507) ================================== * netCDF3 docs were missing from 0.7.3. * make sure quantization function preserves fill_value of masked arrays. version 0.7.3 (svn revision 501) ================================ * MFnetCDF4 module merged into netCDF4 and netCDF3 (now called MFDataset). * added netCDF3 module for those who can't install the netCDF 4 lib. * added set_auto_maskandscale Variable method to enable automatic packing and unpacking of short integers (using scale_factor and add_offset attributes) and automatic conversion to/from masked arrays (using missing_value or _FillValue attribute) on a per-variable basis. var.set_auto_maskandscale(True) turns automatic conversion on (it is off by default). * automatically pack/unpack short integer variables if scale_factor and add_offset variable attributes are set. * added support for masked arrays. If you try to write a masked array to a variable with the missing_value or _FillValue attributes set, the masked array is filled with that value before being written to the file. If you read data from a variable with the missing_value or _FillValue attribute set, a masked array is returned with the appropriate values masked. * added date2num and num2date functions. * added capability to use 'fancy indexing' with variable objects (i.e. using sequences of integers or booleans in slices). WARNING: if a sequence of integers or booleans is used to slice a netCDF4 variable, all of the data in that dimension is read into a numpy array, and then the sequence is used to slice the numpy array, returning just the requested elements to the user. This can potentially gobble a lot of memory and degrade performance (especially if 'fancy indexing' is done on the left-most dimension). * added convenience functions stringtochar and chartostring for converting character arrays to arrays of fixed-length strings and vice-versa. Example usage in examples/test_stringarr.py. 20070826 - version 0.7.1 (svn revision 400) =========================================== * added 'endian()' and 'chunking()' Variable methods (to inquire about endian and chunking variable settings). * 'ndim' attribute was not public (so it couldn't be accessed from python). Fixed. * added 'endian' kwarg to createVariable (to set the endian-ness used in the HDF5 file). * can now manually set HDF5 chunksizes for each dimension at variable creation, using 'chunksizes' kwarg to createVariable. * added "getlibversion()" function to get info about version of netcdf-4 library used to build module. * if a variable has an unsupported datatype (such as 'compound', or 'vlen'), then instead of raising an exception, just skip it. Print a useful error message when an attribute with an unsupported datatype is accessed. * if variable dimension is specified as 'dimname' or ('dimname') in createVariable, it is automatically converted to a tuple ('dimname',). Better error messages when specified dimension can't be found. * createVariable accepts numpy dtype object as datatype. dtype variable attribute is now a numpy dtype object. 20070723 - version 0.7 (svn revision 361) ========================================= * renamed MFnetCDF4_classic --> MFnetCDF4. * eliminated netCDF4_classic module (all file formats handled by netCDF4 module now). * removed all user-defined data type stuff (it was hacky and made the code too complex - wait till there is a real use case to refactor and put back in). * added 'ndim' variable attribute (number of variable dimensions). 20070424 - version 0.6.3 (svn revision 302) =========================================== * passes all tests with netcdf-4.0-beta1/hdf5-1.8.0-beta1. * if slice index is not a slice object, assume it's an integer (and try to convert to one if it is not). This allows numpy scalar arrays to work as slice indices. * (netCDF4_classic only) try to make sure file is not left in 'define mode' when exception is raised. * if slicing a variable results in a array with shape (1,), just return a scalar (except for compound types). * added instructions for using the netCDF4_classic module to serve data over http with the DAP using pydap (http://pydap.org). * added --quiet and --chunk options to nc3tonc4. * Turned off zlib compression by default so as not to violate the 'principle of least surprise'. Shuffle filter still activated by default when zlib compression turned on. * Fixed bug in fletcher32 checksum activation call. Renamed compression() variable method to filters(), include fletcher32 checksum flag in output. * added utility for converting GRIB1 files to compressed NETCDF4_CLASSIC files (requires PyNIO). * added 'compression()' variable method that returns a dict with compression filter parameter settings for that variable. (rev 237) * reimplemented 'shape' and 'dimensions' variable attributes as properties. * fixed bug when 'chunking' keyword in createVariable was set to 'sub' (caused Bus Error on MacOS X). * Setting 'shuffle=0' keyword in createVariable was turning off zlib compression filter instead of shuffle filter. Fixed. 20070213 - version 0.6.2 ======================== * updated for compatibility with netcdf-4.0-alpha18 and hdf5 1.8.0alpha5 (shared dimensions actually work now). * netCDF4.createVariable can now use old single character Numeric typecodes for datatype specification. * Improvements to MFDataset (now called MFnetCDF4_classic) by Rob Hetland. 20061121 - version 0.6.1 ======================== * bugfixes for negative strides. * bugfix for empty string attributes. * support for shared dimensions (variables can use dimensions defined only in a parent group). This doesn't actually work yet, because of a bug in netcdf-4.0-alpha17. * now requires Pyrex (C source files generated on the fly when setup.py is run). 20061003 - version 0.6 ====================== * if fill_value keyword to createVariable is set to the Boolean False (not an integer that evaluates to False), no pre-filling is done for that variable. * updated to be compatible with netcdf-4.0-alpha17. Can now install pure-python netcdftime separately with setup-netcdftime.py. netcdftime will try to use numpy, but fall back to Numeric if numpy not installed. * generated source files with a version of pyrex (from http://codespeak.net/svn/lxml/pyrex/) that produces extensions compatible with python 2.5. * added new module for multi-file access of NETCDF3 and NETCDF4_CLASSIC files (MFDataset). Based on CDFMF from pycdf. * implement negative strides in variable slicing (feature missing from Scientific.IO.NetCDF). Now variables support full python extended slicing syntax. 20060925 - version 0.5.1 ======================== * on 64-bit systems integer attributes in netCDF4_classic failed, since there is no 64-bit integer data type. Fixed by downcasting to 32-bit integer. 20060920 - version 0.5 ====================== * Compound type support! (members must be fixed data primitive types - no user-defined types or NC_STRING variables allowed). Attributes are still restricted to primitive data types (no vlen or compound type attributes). * Assigning single values to a slice now does the Right Thing, i.e. >>> data[:] = 1 fills all the elements with 1 (instead of raising an IndexError). * Tested with numpy 1.0b5, netcdf-4.0-alpha16, HDF5 1.7.52 alpha. * Added renameDimension and renameVariable methods to Dataset and Group classes. * netCDF attributes can be deleted using python del (i.e. 'del dset.foo'). * Moved examples from test and test_classic to examples and examples_classic directories. * Added proper unit tests (in test and test_classic directories). * NULL characters are removed from text attributes. * Variable _FillValue can be set using new keyword argument 'fill_value' to createVariable Dataset and Group method. * docstrings now formatted with epydoc (http://epydoc.sf.net). * improved Scientific.IO.NetCDF compatibility for netCDF4_classic (typecode method, ability to use old Numeric typecodes). * zlib=False or complevel=0 disables shuffle filter in createVariable. * subversion repository hosted on Google projects (http://code.google.com/p/netcdf4-python/). * examples_classic/bench2.py is a performance comparison with Scientific.IO.NetCDF (the numpy version provided by pynetcdf). * __dict__ attribute of Dataset, Group or Variable provides a python dictionary with all netCDF attribute name/value pairs (just like Scientific.IO.NetCDF). 20060710 - version 0.4.5 ======================== * fixed to work with recent svn versions of numpy * Now requires at least numpy 0.9.8. * Raise a AttributeError if user tries to rebind a private attribute (like 'variables', 'dimensions' or 'dtype'). 20060629 - version 0.4.4 ======================== * fixed to work with netcdf-4.0-alpha14. * automatically cast _FillValue attribute to variable type, to avoid surprising error message. 20060320 - version 0.4.3 ======================== updated netcdftime module yet again added 'all_leap'/'366_day' and '360_day' calendars. netCDFTime class renamed utime, fwd and inv methods renamed date2num and num2date. These methods can now handle numpy arrays as well as scalars. a 'real' python datetime instance is returned if calendar is gregorian, otherwise a 'datetime-like' instance is returned (python datetime can't handle funky dates in 'all_leap' and '360_day' calendars). 20060316 - version 0.4.2 ======================== udunits module replaced by pure python version, renamed 'netcdftime' No longer requires udunits library. Includes 4 calendars ('julian','standard'/'gregorian','proleptic_gregorian','noleap'/'365_day'). Calendar names and their interpretations follow the CF metadata convention. 20060310 - version 0.4.1 ======================== udunits module included for doing time conversions. 20060306 - version 0.4 ====================== netCDF4_classic module can now write NETCDF3_CLASSIC, NETCDF4_64BIT as well as NETCDF4_CLASSIC files. The file format is given as an optional keyword to the Dataset constructor ('NETCDF4_CLASSIC' is the default). Preliminary work on compound types done - but awaiting the next alpha of the netCDF 4 library to complete (bugs in alpha12 prevent it from working properly if the compound type has fields which are arrays). 20060217 - version 0.3.1 ======================== refactored user-defined data type support - user-defined data types are now described by an instance of the class UserType. usertype and usertype_name keyword args eliminated from createVariable. 20060214 - version 0.3 ====================== support for variable length strengths (typecode = 'S') and variable-length, or 'ragged' arrays (vlen user-defined datatype). Arrays of python objects can be saved as pickled strings with datatype = 'S'. 20050128 - version 0.2.5 ======================== added support for scalar variables (and assignValue, getValue Variable methods for Scientific.IO.NetCDF compatibility). 20051123 - version 0.2.4 ======================== numpy 0.9.4 compatibility Changed data type codes from ('d', 'f', 'i', 'h', ...) to ('f8', 'f4', 'i4', 'i2', ...). 20050110 - version 0.2.3 ======================== added ellipsis slicing capability 20050106 - version 0.2.2 ======================== changed scipy_core to numpy. 20051228 - version 0.2.1 ======================== bugfixes, added 'nc3tonc4' utility to convert netCDF version 3 files to NETCDF4_CLASSIC files (with compression). The converted files can be read from netCDF 3 clients that have been re-linked to the netCDF 4 library. 'chunking' keyword added to createVariable in netCDF4 module. 20051224 - version 0.2 ====================== Added netCDF4_classic module - which creates files in NETCDF4_CLASSIC format. These files are compatible with netCDF 3 clients which have been linked against the netCDF 4 lib. This module does not use any new features of the netCDF 4 API except zlib compression. Unlike any other netCDF 3 python client, it can transparently compress data with zlib compression and the HDF5 shuffle filter. 20051222 - version 0.1 ====================== First release. Supports groups, multiple unlimited dimensions, zlib compression (plus shuffle filter and fletcher32 checksum) and all new primitive data types. No support for user-defined data types yet. netcdf4-python-1.7.4rel/LICENSE000066400000000000000000000020401512661643000161010ustar00rootroot00000000000000Copyright 2008 Jeffrey Whitaker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. netcdf4-python-1.7.4rel/MANIFEST.in000066400000000000000000000013571512661643000166440ustar00rootroot00000000000000include docs/index.html recursive-include man * recursive-include external * include MANIFEST.in include README.htmldocs include Changelog include setup.cfg include examples/*py include examples/README.md exclude examples/data include test/*py include test/*nc include src/netCDF4/__init__.py include src/netCDF4/_netCDF4.pyx exclude src/netCDF4/_netCDF4.c include src/netCDF4/utils.py include src/netCDF4/plugins/empty.txt include src/netCDF4/py.typed include src/netCDF4/*.pyi include include/netCDF4.pxi include include/mpi-compat.h include include/membuf.pyx include include/netcdf-compat.h include include/no_parallel_support_imports.pxi.in include include/parallel_support_imports.pxi.in include *.md include *.py include *.release include *.sh netcdf4-python-1.7.4rel/README.htmldocs000066400000000000000000000006031512661643000175730ustar00rootroot00000000000000To update web docs at http://github.unidata.io/netcdf4-python: First install pdoc (https://github.com/pdoc3/pdoc) Then in netcdf4-python github clone directory (after building and installing github master), generate docs by running create_docs.sh. Docs are put in docs/index.html. Github pages (https://unidata.github.io/netcdf4-python/) points to docs/index.html in master branch. netcdf4-python-1.7.4rel/README.md000066400000000000000000000450261512661643000163660ustar00rootroot00000000000000# [netcdf4-python](http://unidata.github.io/netcdf4-python) [Python](http://python.org)/[numpy](http://numpy.org) interface to the netCDF [C library](https://github.com/Unidata/netcdf-c). [![CodeQL](https://github.com/Unidata/netcdf4-python/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/Unidata/netcdf4-python/actions/workflows/github-code-scanning/codeql) [![PyPI package](https://img.shields.io/pypi/v/netCDF4.svg)](http://python.org/pypi/netCDF4) [![Anaconda-Server Badge](https://anaconda.org/conda-forge/netCDF4/badges/version.svg)](https://anaconda.org/conda-forge/netCDF4) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.2592291.svg)](https://doi.org/10.5281/zenodo.2592290) ## News For details on the latest updates, see the [Changelog](https://github.com/Unidata/netcdf4-python/blob/master/Changelog). 1/5/2026: Version [1.7.4](https://pypi.python.org/pypi/netCDF4/1.7.4) released. Compression plugins now included in wheels, windows/arm64 and free-threaded python wheels provided. Automatic conversion of character arrays <--> string arrays works for Unicode (not just ascii) strings. WARNING: netcdf-c is not thread-safe and netcdf4-python does have internal locking so expect segfaults if you use netcdf4-python on multiple threads with free-threaded python. Users must exercise care to only call netcdf from a single thread. 10/13/2025: Version [1.7.3](https://pypi.python.org/pypi/netCDF4/1.7.3) released. Minor updates/bugfixes and python 3.14 wheels, see Changelog for details. 10/22/2024: Version [1.7.2](https://pypi.python.org/pypi/netCDF4/1.7.2) released. Minor updates/bugfixes and python 3.13 wheels, see Changelog for details. 06/17/2024: Version [1.7.1](https://pypi.python.org/pypi/netCDF4/1.7.1) released. Fixes for wheels, no code changes. 06/13/2024: Version [1.7.0](https://pypi.python.org/pypi/netCDF4/1.7.0) released. Add support for complex numbers via `auto_complex` keyword to `Dataset` ([PR #1295](https://github.com/Unidata/netcdf4-python/pull/1295)) 10/20/2023: Version [1.6.5](https://pypi.python.org/pypi/netCDF4/1.6.5) released. Fix for issue #1271 (mask ignored if bool MA assigned to uint8 var), support for python 3.12 (removal of python 3.7 support), more informative error messages. 6/4/2023: Version [1.6.4](https://pypi.python.org/pypi/netCDF4/1.6.4) released. Now requires [certifi](https://github.com/certifi/python-certifi) to locate SSL certificates - this allows OpenDAP https URLs to work with linux wheels (issue [#1246](https://github.com/Unidata/netcdf4-python/issues/1246)). 3/3/2023: Version [1.6.3](https://pypi.python.org/pypi/netCDF4/1.6.3) released. 11/15/2022: Version [1.6.2](https://pypi.python.org/pypi/netCDF4/1.6.2) released. Fix for compilation with netcdf-c < 4.9.0 (issue [#1209](https://github.com/Unidata/netcdf4-python/issues/1209)). Slicing multi-dimensional variables with an all False boolean index array now returns an empty numpy array (instead of raising an exception - issue [#1197](https://github.com/Unidata/netcdf4-python/issues/1197)). 09/18/2022: Version [1.6.1](https://pypi.python.org/pypi/netCDF4/1.6.1) released. GIL now released for all C lib calls, `set_alignment` and `get_alignment` module functions added to modify/retrieve HDF5 data alignment properties. Added `Dataset` methods to query availability of optional compression filters. 06/24/2022: Version [1.6.0](https://pypi.python.org/pypi/netCDF4/1.6.0) released. Support for quantization (bit-grooming and bit-rounding) functionality in netcdf-c 4.9.0 which can dramatically improve compression. Dataset.createVariable now accepts dimension instances (instead of just dimension names). 'compression' kwarg added to Dataset.createVariable to support szip as well as new compression algorithms available in netcdf-c 4.9.0 through compression plugins (such as zstd, bzip2 and blosc). Working arm64 wheels for Apple M1 Silicon now available on pypi. 10/31/2021: Version [1.5.8](https://pypi.python.org/pypi/netCDF4/1.5.8) released. Fix Enum bug, add binary wheels for aarch64 and python 3.10. 6/22/2021: Version [1.5.7](https://pypi.python.org/pypi/netCDF4/1.5.7) released. Fixed OverflowError on Windows when reading data with dimension sizes greater than 2**32-1. Masked arrays no longer returned for vlens. 2/15/2021: Version [1.5.6](https://pypi.python.org/pypi/netCDF4/1.5.6) released. Added `Dataset.fromcdl` and `Dataset.tocdl`, which require `ncdump` and `ncgen` utilities to be in `$PATH`. Removed python 2.7 support. 12/20/2020: Version [1.5.5.1](https://pypi.python.org/pypi/netCDF4/1.5.5.1) released. Updated binary wheels for OSX and linux that link latest netcdf-c and hdf5 libs. 12/01/2020: Version [1.5.5](https://pypi.python.org/pypi/netCDF4/1.5.5) released. Update license wording to be consistent with MIT license. 07/23/2020: Version [1.5.4](https://pypi.python.org/pypi/netCDF4/1.5.4) released. Now requires numpy >= 1.9. 10/27/2019: Version [1.5.3](https://pypi.python.org/pypi/netCDF4/1.5.3) released. Fix for [issue #972](https://github.com/Unidata/netcdf4-python/issues/972), plus binary wheels for python 3.8. 09/03/2019: Version [1.5.2](https://pypi.python.org/pypi/netCDF4/1.5.2) released. Bugfixes, no new features. 05/06/2019: Version [1.5.1.2](https://pypi.python.org/pypi/netCDF4/1.5.1.2) released. Fixes another slicing regression ([issue #922)](https://github.com/Unidata/netcdf4-python/issues/922)) introduced in the 1.5.1 release. 05/02/2019: Version [1.5.1.1](https://pypi.python.org/pypi/netCDF4/1.5.1.1) released. Fixes incorrect `__version__` module variable in 1.5.1 release, plus a slicing bug ([issue #919)](https://github.com/Unidata/netcdf4-python/issues/919)). 04/30/2019: Version [1.5.1](https://pypi.python.org/pypi/netCDF4/1.5.1) released. Bugfixes, no new features. 04/02/2019: Version [1.5.0.1](https://pypi.python.org/pypi/netCDF4/1.5.0.1) released. Binary wheels for macos x and linux rebuilt with netcdf-c 4.6.3 (instead of 4.4.1.1). Added read-shared capability for faster reads of NETCDF3 files (mode='rs'). 03/24/2019: Version [1.5.0](https://pypi.python.org/pypi/netCDF4/1.5.0) released. Parallel IO support for classic file formats added using the pnetcdf library (contribution from Lars Pastewka, [pull request #897](https://github.com/Unidata/netcdf4-python/pull/897)). 03/08/2019: Version [1.4.3.2](https://pypi.python.org/pypi/netCDF4/1.4.3.2) released. Include missing membuf.pyx file in source tarball. No need to update if you installed 1.4.3.1 from a binary wheel. 03/07/2019: Version [1.4.3.1](https://pypi.python.org/pypi/netCDF4/1.4.3.1) released. Fixes bug in implementation of NETCDF4_CLASSIC parallel IO support in 1.4.3. 03/05/2019: Version [1.4.3](https://pypi.python.org/pypi/netCDF4/1.4.3) released. Issues with netcdf-c 4.6.2 fixed (including broken parallel IO). `set_ncstring_attrs()` method added, memoryview buffer now returned when an in-memory Dataset is closed. 10/26/2018: Version [1.4.2](https://pypi.python.org/pypi/netCDF4/1.4.2) released. Minor bugfixes, added `Variable.get_dims()` method and `master_file` kwarg for `MFDataset.__init__`. 08/10/2018: Version [1.4.1](https://pypi.python.org/pypi/netCDF4/1.4.1) released. The old slicing behavior (numpy array returned unless missing values are present, otherwise masked array returned) is re-enabled via `set_always_mask(False)`. 05/11/2018: Version [1.4.0](https://pypi.python.org/pypi/netCDF4/1.4.0) released. The netcdftime package is no longer included, it is now a separate [package](https://pypi.python.org/pypi/cftime) dependency. In addition to several bug fixes, there are a few important changes to the default behaviour to note: * Slicing a netCDF variable will now always return masked array by default, even if there are no masked values. The result depended on the slice before, which was too surprising. If auto-masking is turned off (with `set_auto_mask(False)`) a numpy array will always be returned. * `_FillValue` is no longer treated as a valid_min/valid_max. This was too surprising, despite the fact the thet netcdf docs [attribute best practices](https://www.unidata.ucar.edu/software/netcdf/docs/attribute_conventions.html) suggests that clients should to this if `valid_min`, `valid_max` and `valid_range` are not set. * Changed behavior of string attributes so that `nc.stringatt = ['foo','bar']` produces an vlen string array attribute in NETCDF4, instead of concatenating into a single string (`foobar`). In NETCDF3/NETCDF4_CLASSIC, an IOError is now raised, instead of writing `foobar`. * Retrieved compound-type variable data now returned with character array elements converted to numpy strings ([issue #773](https://github.com/Unidata/netcdf4-python/issues/773)). Works for assignment also. Can be disabled using `set_auto_chartostring(False)`. Numpy structured array dtypes with `'SN'` string subtypes can now be used to define netcdf compound types in `createCompoundType` (they get converted to `('S1',N)` character array types automatically). * `valid_min`, `valid_max`, `_FillValue` and `missing_value` are now treated as unsigned integers if `_Unsigned` variable attribute is set (to mimic behaviour of netcdf-java). Conversion to unsigned type now occurs before masking and scale/offset operation ([issue #794](https://github.com/Unidata/netcdf4-python/issues/794)) 11/01/2017: Version [1.3.1](https://pypi.python.org/pypi/netCDF4/1.3.1) released. Parallel IO support with MPI! Requires that netcdf-c and hdf5 be built with MPI support, and [mpi4py](http://mpi4py.readthedocs.io/en/stable). To open a file for parallel access in a program running in an MPI environment using mpi4py, just use `parallel=True` when creating the `Dataset` instance. See [`examples/mpi_example.py`](https://github.com/Unidata/netcdf4-python/blob/master/examples/mpi_example.py) for a demonstration. For more info, see the tutorial [section](http://unidata.github.io/netcdf4-python/#section13). 9/25/2017: Version [1.3.0](https://pypi.python.org/pypi/netCDF4/1.3.0) released. Bug fixes for `netcdftime` and optimizations for reading strided slices. `encoding` kwarg added to `Dataset.__init__` and `Dataset.filepath` to deal with oddball encodings in filename paths (`sys.getfilesystemencoding()` is used by default to determine encoding). Make sure numpy datatypes used to define CompoundTypes have `isalignedstruct` flag set to avoid segfaults - which required bumping the minimum required numpy from 1.7.0 to 1.9.0. In cases where `missing_value/valid_min/valid_max/_FillValue` cannot be safely cast to the variable's dtype, they are no longer be used to automatically mask the data and a warning message is issued. 6/10/2017: Version [1.2.9](https://pypi.python.org/pypi/netCDF4/1.2.9) released. Fixes for auto-scaling and masking when `_Unsigned` and/or `valid_min`, `valid_max` attributes present. setup.py updated so that `pip install` works if cython not installed. Now requires [setuptools](https://pypi.python.org/pypi/setuptools) version 18.0 or greater. 6/1/2017: Version [1.2.8](https://pypi.python.org/pypi/netCDF4/1.2.8) released. From Changelog: * recognize `_Unsigned` attribute used by [netcdf-java](http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/) to designate unsigned integer data stored with a signed integer type in netcdf-3 [issue #656](https://github.com/Unidata/netcdf4-python/issues/656). * add Dataset init memory parameter to allow loading a file from memory [pull request #652](https://github.com/Unidata/netcdf4-python/pull/652), [issue #406](https://github.com/Unidata/netcdf4-python/issues/406) and [issue #295](https://github.com/Unidata/netcdf4-python/issues/295). * fix for negative times in num2date [issue #659](https://github.com/Unidata/netcdf4-python/pull/659). * fix for failing tests in numpy 1.13 due to changes in `numpy.ma` [issue #662](https://github.com/Unidata/netcdf4-python/issues/662). * Checking for `_Encoding` attribute for `NC_STRING` variables, otherwise use 'utf-8'. 'utf-8' is used everywhere else, 'default_encoding' global module variable is no longer used. getncattr method now takes optional kwarg 'encoding' (default 'utf-8') so encoding of attributes can be specified if desired. If `_Encoding` is specified for an `NC_CHAR` (`'S1'`) variable, the chartostring utility function is used to convert the array of characters to an array of strings with one less dimension (the last dimension is interpreted as the length of each string) when reading the data. When writing the data, stringtochar is used to convert a numpy array of fixed length strings to an array of characters with one more dimension. chartostring and stringtochar now also have an 'encoding' kwarg. Automatic conversion to/from character to string arrays can be turned off via a new `set_auto_chartostring` Dataset and Variable method (default is `True`). Addresses [issue #654](https://github.com/Unidata/netcdf4-python/issues/654) * [Cython](http://cython.org) >= 0.19 now required, `_netCDF4.c` and `_netcdftime.c` removed from repository. 1/8/2017: Version [1.2.7](https://pypi.python.org/pypi/netCDF4/1.2.7) released. Python 3.6 compatibility, and fix for vector missing_values. 12/10/2016: Version [1.2.6](https://pypi.python.org/pypi/netCDF4/1.2.6) released. Bug fixes for Enum data type, and _FillValue/missing_value usage when data is stored in non-native endian format. Add get_variables_by_attributes to MFDataset. Support for python 2.6 removed. 12/1/2016: Version [1.2.5](https://pypi.python.org/pypi/netCDF4/1.2.5) released. See the [Changelog](https://github.com/Unidata/netcdf4-python/blob/master/Changelog) for changes. 4/15/2016: Version [1.2.4](https://pypi.python.org/pypi/netCDF4/1.2.4) released. Bugs in handling of variables with specified non-native "endian-ness" (byte-order) fixed ([issue #554] (https://github.com/Unidata/netcdf4-python/issues/554)). Build instructions updated and warning issued to deal with potential backwards incompatibility introduced when using HDF5 1.10.x (see [Unidata/netcdf-c/issue#250](https://github.com/Unidata/netcdf-c/issues/250)). 3/10/2016: Version [1.2.3](https://pypi.python.org/pypi/netCDF4/1.2.3) released. Various bug fixes. All text attributes in ``NETCDF4`` formatted files are now written as type ``NC_CHAR``, unless they contain unicode characters that cannot be encoded in ascii, in which case they are written as ``NC_STRING``. Previously, all unicode strings were written as ``NC_STRING``. This change preserves compatibility with clients, like Matlab, that can't deal with ``NC_STRING`` attributes. A ``setncattr_string`` method was added to force attributes to be written as ``NC_STRING``. 1/1/2016: Version [1.2.2](https://pypi.python.org/pypi/netCDF4/1.2.2) released. Mostly bugfixes, but with two new features. * support for the new ``NETCDF3_64BIT_DATA`` format introduced in netcdf-c 4.4.0. Similar to ``NETCDF3_64BIT`` (now ``NETCDF3_64BIT_OFFSET``), but includes 64 bit dimension sizes (> 2 billion), plus unsigned and 64 bit integer data types. Uses the classic (netcdf-3) data model, and does not use HDF5 as the underlying storage format. * Dimension objects now have a ``size`` attribute, which is the current length of the dimension (same as invoking ``len`` on the Dimension instance). The minimum required python version has now been increased from 2.5 to 2.6. 10/15/2015: Version [1.2.1](https://pypi.python.org/pypi/netCDF4/1.2.1) released. Adds the ability to slice Variables with unsorted integer sequences, and integer sequences with duplicates. 9/23/2015: Version [1.2.0](https://pypi.python.org/pypi/netCDF4/1.2.0) released. New features: * [get_variables_by_attributes](http://unidata.github.io/netcdf4-python/#netCDF4.Dataset.get_variables_by_attributes) ``Dataset`` and ``Group`` method for retrieving variables that have matching attributes. * Support for [Enum](http://unidata.github.io/netcdf4-python/#section12) data types. * [isopen](http://unidata.github.io/netcdf4-python/#netCDF4.Dataset.isopen) `Dataset` method. 7/28/2015: Version [1.1.9](https://pypi.python.org/pypi/netCDF4/1.1.9) bugfix release. 5/14/2015: Version [1.1.8](https://pypi.python.org/pypi/netCDF4/1.1.8) released. Unix-like paths can now be used in `createVariable` and `createGroup`. ```python v = nc.createVariable('/path/to/var1', ('xdim', 'ydim'), float) ``` will create a variable named 'var1', while also creating the groups 'path' and 'path/to' if they do not already exist. Similarly, ```python g = nc.createGroup('/path/to') ``` now acts like `mkdir -p` in unix, creating groups 'path' and '/path/to', if they don't already exist. Users who relied on `nc.createGroup(groupname)` failing when the group already exists will have to modify their code, since `nc.createGroup` will now return the existing group instance. `Dataset.__getitem__` was also added. `nc['/path/to']` now returns a group instance, and `nc['/path/to/var1']` now returns a variable instance. 3/19/2015: Version [1.1.7](https://pypi.python.org/pypi/netCDF4/1.1.7) released. Global Interpreter Lock (GIL) now released when extension module calls C library for read operations. This speeds up concurrent reads when using threads. Users who wish to use netcdf4-python inside threads should read http://www.hdfgroup.org/hdf5-quest.html#gconc regarding thread-safety in the HDF5 C library. Fixes to `setup.py` now ensure that `pip install netCDF4` with `export USE_NCCONFIG=0` will use environment variables to find paths to libraries and include files, instead of relying exclusively on the nc-config utility. ## Installation The easiest way to install is through pip: ```shell pip install netCDF4 ``` or, if you are a user of the Conda package manager, ```shell conda install -c conda-forge netCDF4 ``` ## Development installation * Clone GitHub repository (`git clone https://github.com/Unidata/netcdf4-python.git`) * Make sure [numpy](http://www.numpy.org/) and [Cython](http://cython.org/) are installed and you have [Python](https://www.python.org) 3.8 or newer. * Make sure [HDF5](http://www.h5py.org/) and netcdf-4 are installed, and the `nc-config` utility is in your Unix PATH. * Run `python setup.py build`, then `pip install -e .`. * To run all the tests, execute `cd test && python run_all.py`. ## Documentation See the online [docs](http://unidata.github.io/netcdf4-python) for more details. ## Usage ###### Sample [iPython](http://ipython.org/) notebooks available in the examples directory on [reading](http://nbviewer.ipython.org/github/Unidata/netcdf4-python/blob/master/examples/reading_netCDF.ipynb) and [writing](http://nbviewer.ipython.org/github/Unidata/netcdf4-python/blob/master/examples/writing_netCDF.ipynb) netCDF data with Python. netcdf4-python-1.7.4rel/README.release000066400000000000000000000024701512661643000174020ustar00rootroot00000000000000* create a release branch ('vX.Y.Zrel'). In the release branch... * make sure version number in PKG-INFO, setup.py and netCDF4/_netCDF4.pyx are up to date (in _netCDF4.pyx, change 'Version' in first line of docstring at top of file, and __version__ variable). * update Changelog and README.md as needed. * commit and push all of the above changes. * install the module (python setup.py install), then run 'sh create_docs.sh' to update html docs. Commit and push the update to docs/netCDF4/index.html. * create a pull request for the release branch. * After release branch has been merged, tag a release git tag -a vX.Y.Zrel -m "version X.Y.Z release" git push origin --tags * push an empty commit to the netcdf4-python-wheels repo to trigger new builds. (e.g. git commit --allow-empty -m "Trigger build") You will likely want to edit the .travis.yml file at https://github.com/MacPython/netcdf4-python-wheels to specify the BUILD_COMMIT before triggering a build. * update the pypi entry, upload the wheels from wheels.scipy.org. Lastly, create a source tarball using 'python setup.py sdist' and upload to pypi. * update web docs by copying docs/netCDF4/index.html somewhere, switch to the gh-pages branch, copy the index.html file back, commit and push the updated index.html file (see README.gh-pages). netcdf4-python-1.7.4rel/checkversion.py000066400000000000000000000004071512661643000201360ustar00rootroot00000000000000import netCDF4, numpy print('netcdf4-python version: %s'%netCDF4.__version__) print('HDF5 lib version: %s'%netCDF4.__hdf5libversion__) print('netcdf lib version: %s'%netCDF4.__netcdf4libversion__) print('numpy version %s' % numpy.__version__) netcdf4-python-1.7.4rel/create_docs.sh000066400000000000000000000002721512661643000177100ustar00rootroot00000000000000# use pdoc (https://pdoc3.github.io/pdoc/) to generate API docs pdoc3 --html --config show_source_code=False --force -o 'docs' netCDF4 /bin/cp -f docs/netCDF4/index.html docs/index.html netcdf4-python-1.7.4rel/docs/000077500000000000000000000000001512661643000160305ustar00rootroot00000000000000netcdf4-python-1.7.4rel/docs/index.html000066400000000000000000006537131512661643000200440ustar00rootroot00000000000000 netCDF4 API documentation

Package netCDF4

Version 1.7.4

Introduction

netcdf4-python is a Python interface to the netCDF C library.

netCDF version 4 has many features not found in earlier versions of the library and is implemented on top of HDF5. This module can read and write files in both the new netCDF 4 and the old netCDF 3 format, and can create files that are readable by HDF5 clients. The API modelled after Scientific.IO.NetCDF, and should be familiar to users of that module.

Most new features of netCDF 4 are implemented, such as multiple unlimited dimensions, groups and data compression. All the new numeric data types (such as 64 bit and unsigned integer types) are implemented. Compound (struct), variable length (vlen) and enumerated (enum) data types are supported, but not the opaque data type. Mixtures of compound, vlen and enum data types (such as compound types containing enums, or vlens containing compound types) are not supported.

Quick Install

  • the easiest way to get going is to install via pip install netCDF4. (or if you use the conda package manager conda install -c conda-forge netCDF4).

Developer Install

  • Clone the github repository. Make sure you either clone recursively, or run git submodule update --init to ensure all the submodules are also checked out.
  • Make sure the dependencies are satisfied (Python 3.8 or later, numpy, Cython, cftime, setuptools, the HDF5 C library, and the netCDF C library). For MPI parallel IO support, an MPI-enabled versions of the netcdf library is required, as is mpi4py. Parallel IO further depends on the existence of MPI-enabled HDF5 or the PnetCDF library.
  • By default, the utility nc-config (installed with netcdf-c) will be run used to determine where all the dependencies live.
  • If nc-config is not in your default PATH, you can set the NETCDF4_DIR environment variable and setup.py will look in $NETCDF4_DIR/bin. You can also use the file setup.cfg to set the path to nc-config, or enter the paths to the libraries and include files manually. Just edit the setup.cfg file in a text editor and follow the instructions in the comments. To disable the use of nc-config, set the env var USE_NCCONFIG to 0. To disable the use of setup.cfg, set USE_SETUPCFG to 0. As a last resort, the library and include paths can be set via environment variables. If you go this route, set USE_NCCONFIG and USE_SETUPCFG to 0, and specify NETCDF4_LIBDIR, NETCDF4_INCDIR, HDF5_LIBDIR and HDF5_INCDIR. If the dependencies are not found in any of the paths specified by environment variables, then standard locations (such as /usr and /usr/local) are searched.
  • if the env var NETCDF_PLUGIN_DIR is set to point to the location of the netcdf-c compression plugins built by netcdf >= 4.9.0, they will be installed inside the package. In this case HDF5_PLUGIN_PATH will be set to the package installation path on import, so the extra compression algorithms available in netcdf-c >= 4.9.0 will automatically be available. Otherwise, the user will have to set HDF5_PLUGIN_PATH explicitly to have access to the extra compression plugins.
  • run pip install -v . (as root if necessary)
  • run the tests in the 'test' directory by running python run_all.py.

Tutorial

All of the code in this tutorial is available in examples/tutorial.py, except the parallel IO example, which is in examples/mpi_example.py. Unit tests are in the test directory.

Creating/Opening/Closing a netCDF file

To create a netCDF file from python, you simply call the Dataset constructor. This is also the method used to open an existing netCDF file. If the file is open for write access (mode='w', 'r+' or 'a'), you may write any type of data including new dimensions, groups, variables and attributes. netCDF files come in five flavors (NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET, NETCDF3_64BIT_DATA, NETCDF4_CLASSIC, and NETCDF4). NETCDF3_CLASSIC was the original netcdf binary format, and was limited to file sizes less than 2 Gb. NETCDF3_64BIT_OFFSET was introduced in version 3.6.0 of the library, and extended the original binary format to allow for file sizes greater than 2 Gb. NETCDF3_64BIT_DATA is a new format that requires version 4.4.0 of the C library - it extends the NETCDF3_64BIT_OFFSET binary format to allow for unsigned/64 bit integer data types and 64-bit dimension sizes. NETCDF3_64BIT is an alias for NETCDF3_64BIT_OFFSET. NETCDF4_CLASSIC files use the version 4 disk format (HDF5), but omits features not found in the version 3 API. They can be read by netCDF 3 clients only if they have been relinked against the netCDF 4 library. They can also be read by HDF5 clients. NETCDF4 files use the version 4 disk format (HDF5) and use the new features of the version 4 API. The netCDF4 module can read and write files in any of these formats. When creating a new file, the format may be specified using the format keyword in the Dataset constructor. The default format is NETCDF4. To see how a given file is formatted, you can examine the data_model attribute. Closing the netCDF file is accomplished via the Dataset.close() method of the Dataset instance.

Here's an example:

>>> from netCDF4 import Dataset
>>> rootgrp = Dataset("test.nc", "w", format="NETCDF4")
>>> print(rootgrp.data_model)
NETCDF4
>>> rootgrp.close()

Remote OPeNDAP-hosted datasets can be accessed for reading over http if a URL is provided to the Dataset constructor instead of a filename. However, this requires that the netCDF library be built with OPenDAP support, via the --enable-dap configure option (added in version 4.0.1).

Groups in a netCDF file

netCDF version 4 added support for organizing data in hierarchical groups, which are analogous to directories in a filesystem. Groups serve as containers for variables, dimensions and attributes, as well as other groups. A Dataset creates a special group, called the 'root group', which is similar to the root directory in a unix filesystem. To create Group instances, use the Dataset.createGroup() method of a Dataset or Group instance. Dataset.createGroup() takes a single argument, a python string containing the name of the new group. The new Group instances contained within the root group can be accessed by name using the groups dictionary attribute of the Dataset instance. Only NETCDF4 formatted files support Groups, if you try to create a Group in a netCDF 3 file you will get an error message.

>>> rootgrp = Dataset("test.nc", "a")
>>> fcstgrp = rootgrp.createGroup("forecasts")
>>> analgrp = rootgrp.createGroup("analyses")
>>> print(rootgrp.groups)
{'forecasts': <class 'netCDF4._netCDF4.Group'>
group /forecasts:
    dimensions(sizes):
    variables(dimensions):
    groups: , 'analyses': <class 'netCDF4._netCDF4.Group'>
group /analyses:
    dimensions(sizes):
    variables(dimensions):
    groups: }
>>>

Groups can exist within groups in a Dataset, just as directories exist within directories in a unix filesystem. Each Group instance has a groups attribute dictionary containing all of the group instances contained within that group. Each Group instance also has a path attribute that contains a simulated unix directory path to that group. To simplify the creation of nested groups, you can use a unix-like path as an argument to Dataset.createGroup().

>>> fcstgrp1 = rootgrp.createGroup("/forecasts/model1")
>>> fcstgrp2 = rootgrp.createGroup("/forecasts/model2")

If any of the intermediate elements of the path do not exist, they are created, just as with the unix command 'mkdir -p'. If you try to create a group that already exists, no error will be raised, and the existing group will be returned.

Here's an example that shows how to navigate all the groups in a Dataset. The function walktree is a Python generator that is used to walk the directory tree. Note that printing the Dataset or Group object yields summary information about it's contents.

>>> def walktree(top):
...     yield top.groups.values()
...     for value in top.groups.values():
...         yield from walktree(value)
>>> print(rootgrp)
<class 'netCDF4._netCDF4.Dataset'>
root group (NETCDF4 data model, file format HDF5):
    dimensions(sizes):
    variables(dimensions):
    groups: forecasts, analyses
>>> for children in walktree(rootgrp):
...     for child in children:
...         print(child)
<class 'netCDF4._netCDF4.Group'>
group /forecasts:
    dimensions(sizes):
    variables(dimensions):
    groups: model1, model2
<class 'netCDF4._netCDF4.Group'>
group /analyses:
    dimensions(sizes):
    variables(dimensions):
    groups:
<class 'netCDF4._netCDF4.Group'>
group /forecasts/model1:
    dimensions(sizes):
    variables(dimensions):
    groups:
<class 'netCDF4._netCDF4.Group'>
group /forecasts/model2:
    dimensions(sizes):
    variables(dimensions):
    groups:

Dimensions in a netCDF file

netCDF defines the sizes of all variables in terms of dimensions, so before any variables can be created the dimensions they use must be created first. A special case, not often used in practice, is that of a scalar variable, which has no dimensions. A dimension is created using the Dataset.createDimension() method of a Dataset or Group instance. A Python string is used to set the name of the dimension, and an integer value is used to set the size. To create an unlimited dimension (a dimension that can be appended to), the size value is set to None or 0. In this example, there both the time and level dimensions are unlimited. Having more than one unlimited dimension is a new netCDF 4 feature, in netCDF 3 files there may be only one, and it must be the first (leftmost) dimension of the variable.

>>> level = rootgrp.createDimension("level", None)
>>> time = rootgrp.createDimension("time", None)
>>> lat = rootgrp.createDimension("lat", 73)
>>> lon = rootgrp.createDimension("lon", 144)

All of the Dimension instances are stored in a python dictionary.

>>> print(rootgrp.dimensions)
{'level': <class 'netCDF4._netCDF4.Dimension'> (unlimited): name = 'level', size = 0, 'time': <class 'netCDF4._netCDF4.Dimension'> (unlimited): name = 'time', size = 0, 'lat': <class 'netCDF4._netCDF4.Dimension'>: name = 'lat', size = 73, 'lon': <class 'netCDF4._netCDF4.Dimension'>: name = 'lon', size = 144}

Using the python len function with a Dimension instance returns current size of that dimension. Dimension.isunlimited() method of a Dimension instance be used to determine if the dimensions is unlimited, or appendable.

>>> print(len(lon))
144
>>> print(lon.isunlimited())
False
>>> print(time.isunlimited())
True

Printing the Dimension object provides useful summary info, including the name and length of the dimension, and whether it is unlimited.

>>> for dimobj in rootgrp.dimensions.values():
...     print(dimobj)
<class 'netCDF4._netCDF4.Dimension'> (unlimited): name = 'level', size = 0
<class 'netCDF4._netCDF4.Dimension'> (unlimited): name = 'time', size = 0
<class 'netCDF4._netCDF4.Dimension'>: name = 'lat', size = 73
<class 'netCDF4._netCDF4.Dimension'>: name = 'lon', size = 144

Dimension names can be changed using the Dataset.renameDimension() method of a Dataset or Group instance.

Variables in a netCDF file

netCDF variables behave much like python multidimensional array objects supplied by the numpy module. However, unlike numpy arrays, netCDF4 variables can be appended to along one or more 'unlimited' dimensions. To create a netCDF variable, use the Dataset.createVariable() method of a Dataset or Group instance. The Dataset.createVariable() method has two mandatory arguments, the variable name (a Python string), and the variable datatype. The variable's dimensions are given by a tuple containing the dimension names (defined previously with Dataset.createDimension()). To create a scalar variable, simply leave out the dimensions keyword. The variable primitive datatypes correspond to the dtype attribute of a numpy array. You can specify the datatype as a numpy dtype object, or anything that can be converted to a numpy dtype object. Valid datatype specifiers include:

Specifier Datatype Old typecodes
'f4' 32-bit floating point 'f'
'f8' 64-bit floating point 'd'
'i4' 32-bit signed integer 'i' 'l'
'i2' 16-bit signed integer 'h' 's'
'i8' 64-bit signed integer
'i1' 8-bit signed integer 'b' 'B'
'u1' 8-bit unsigned integer
'u2' 16-bit unsigned integer
'u4' 32-bit unsigned integer
'u8' 64-bit unsigned integer
'S1' single-character string 'c'

The unsigned integer types and the 64-bit integer type can only be used if the file format is NETCDF4.

The dimensions themselves are usually also defined as variables, called coordinate variables. The Dataset.createVariable() method returns an instance of the Variable class whose methods can be used later to access and set variable data and attributes.

>>> times = rootgrp.createVariable("time","f8",("time",))
>>> levels = rootgrp.createVariable("level","i4",("level",))
>>> latitudes = rootgrp.createVariable("lat","f4",("lat",))
>>> longitudes = rootgrp.createVariable("lon","f4",("lon",))
>>> # two dimensions unlimited
>>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",))
>>> temp.units = "K"

To get summary info on a Variable instance in an interactive session, just print it.

>>> print(temp)
<class 'netCDF4._netCDF4.Variable'>
float32 temp(time, level, lat, lon)
    units: K
unlimited dimensions: time, level
current shape = (0, 0, 73, 144)
filling on, default _FillValue of 9.969209968386869e+36 used

You can use a path to create a Variable inside a hierarchy of groups.

>>> ftemp = rootgrp.createVariable("/forecasts/model1/temp","f4",("time","level","lat","lon",))

If the intermediate groups do not yet exist, they will be created.

You can also query a Dataset or Group instance directly to obtain Group or Variable instances using paths.

>>> print(rootgrp["/forecasts/model1"])  # a Group instance
<class 'netCDF4._netCDF4.Group'>
group /forecasts/model1:
    dimensions(sizes):
    variables(dimensions): float32 temp(time,level,lat,lon)
    groups:
>>> print(rootgrp["/forecasts/model1/temp"])  # a Variable instance
<class 'netCDF4._netCDF4.Variable'>
float32 temp(time, level, lat, lon)
path = /forecasts/model1
unlimited dimensions: time, level
current shape = (0, 0, 73, 144)
filling on, default _FillValue of 9.969209968386869e+36 used

All of the variables in the Dataset or Group are stored in a Python dictionary, in the same way as the dimensions:

>>> print(rootgrp.variables)
{'time': <class 'netCDF4._netCDF4.Variable'>
float64 time(time)
unlimited dimensions: time
current shape = (0,)
filling on, default _FillValue of 9.969209968386869e+36 used, 'level': <class 'netCDF4._netCDF4.Variable'>
int32 level(level)
unlimited dimensions: level
current shape = (0,)
filling on, default _FillValue of -2147483647 used, 'lat': <class 'netCDF4._netCDF4.Variable'>
float32 lat(lat)
unlimited dimensions:
current shape = (73,)
filling on, default _FillValue of 9.969209968386869e+36 used, 'lon': <class 'netCDF4._netCDF4.Variable'>
float32 lon(lon)
unlimited dimensions:
current shape = (144,)
filling on, default _FillValue of 9.969209968386869e+36 used, 'temp': <class 'netCDF4._netCDF4.Variable'>
float32 temp(time, level, lat, lon)
    units: K
unlimited dimensions: time, level
current shape = (0, 0, 73, 144)
filling on, default _FillValue of 9.969209968386869e+36 used}

Variable names can be changed using the Dataset.renameVariable() method of a Dataset instance.

Variables can be sliced similar to numpy arrays, but there are some differences. See Writing data to and retrieving data from a netCDF variable for more details.

Attributes in a netCDF file

There are two types of attributes in a netCDF file, global and variable. Global attributes provide information about a group, or the entire dataset, as a whole. Variable attributes provide information about one of the variables in a group. Global attributes are set by assigning values to Dataset or Group instance variables. Variable attributes are set by assigning values to Variable instances variables. Attributes can be strings, numbers or sequences. Returning to our example,

>>> import time
>>> rootgrp.description = "bogus example script"
>>> rootgrp.history = "Created " + time.ctime(time.time())
>>> rootgrp.source = "netCDF4 python module tutorial"
>>> latitudes.units = "degrees north"
>>> longitudes.units = "degrees east"
>>> levels.units = "hPa"
>>> temp.units = "K"
>>> times.units = "hours since 0001-01-01 00:00:00.0"
>>> times.calendar = "gregorian"

The Dataset.ncattrs() method of a Dataset, Group or Variable instance can be used to retrieve the names of all the netCDF attributes. This method is provided as a convenience, since using the built-in dir Python function will return a bunch of private methods and attributes that cannot (or should not) be modified by the user.

>>> for name in rootgrp.ncattrs():
...     print("Global attr {} = {}".format(name, getattr(rootgrp, name)))
Global attr description = bogus example script
Global attr history = Created Mon Jul  8 14:19:41 2019
Global attr source = netCDF4 python module tutorial

The __dict__ attribute of a Dataset, Group or Variable instance provides all the netCDF attribute name/value pairs in a python dictionary:

>>> print(rootgrp.__dict__)
{'description': 'bogus example script', 'history': 'Created Mon Jul  8 14:19:41 2019', 'source': 'netCDF4 python module tutorial'}

Attributes can be deleted from a netCDF Dataset, Group or Variable using the python del statement (i.e. del grp.foo removes the attribute foo the the group grp).

Writing data to and retrieving data from a netCDF variable

Now that you have a netCDF Variable instance, how do you put data into it? You can just treat it like an array and assign data to a slice.

>>> import numpy as np
>>> lats =  np.arange(-90,91,2.5)
>>> lons =  np.arange(-180,180,2.5)
>>> latitudes[:] = lats
>>> longitudes[:] = lons
>>> print("latitudes =\n{}".format(latitudes[:]))
latitudes =
[-90.  -87.5 -85.  -82.5 -80.  -77.5 -75.  -72.5 -70.  -67.5 -65.  -62.5
 -60.  -57.5 -55.  -52.5 -50.  -47.5 -45.  -42.5 -40.  -37.5 -35.  -32.5
 -30.  -27.5 -25.  -22.5 -20.  -17.5 -15.  -12.5 -10.   -7.5  -5.   -2.5
   0.    2.5   5.    7.5  10.   12.5  15.   17.5  20.   22.5  25.   27.5
  30.   32.5  35.   37.5  40.   42.5  45.   47.5  50.   52.5  55.   57.5
  60.   62.5  65.   67.5  70.   72.5  75.   77.5  80.   82.5  85.   87.5
  90. ]

Unlike NumPy's array objects, netCDF Variable objects with unlimited dimensions will grow along those dimensions if you assign data outside the currently defined range of indices.

>>> # append along two unlimited dimensions by assigning to slice.
>>> nlats = len(rootgrp.dimensions["lat"])
>>> nlons = len(rootgrp.dimensions["lon"])
>>> print("temp shape before adding data = {}".format(temp.shape))
temp shape before adding data = (0, 0, 73, 144)
>>>
>>> from numpy.random import uniform
>>> temp[0:5, 0:10, :, :] = uniform(size=(5, 10, nlats, nlons))
>>> print("temp shape after adding data = {}".format(temp.shape))
temp shape after adding data = (5, 10, 73, 144)
>>>
>>> # levels have grown, but no values yet assigned.
>>> print("levels shape after adding pressure data = {}".format(levels.shape))
levels shape after adding pressure data = (10,)

Note that the size of the levels variable grows when data is appended along the level dimension of the variable temp, even though no data has yet been assigned to levels.

>>> # now, assign data to levels dimension variable.
>>> levels[:] =  [1000.,850.,700.,500.,300.,250.,200.,150.,100.,50.]

However, that there are some differences between NumPy and netCDF variable slicing rules. Slices behave as usual, being specified as a start:stop:step triplet. Using a scalar integer index i takes the ith element and reduces the rank of the output array by one. Boolean array and integer sequence indexing behaves differently for netCDF variables than for numpy arrays. Only 1-d boolean arrays and integer sequences are allowed, and these indices work independently along each dimension (similar to the way vector subscripts work in fortran). This means that

>>> temp[0, 0, [0,1,2,3], [0,1,2,3]].shape
(4, 4)

returns an array of shape (4,4) when slicing a netCDF variable, but for a numpy array it returns an array of shape (4,). Similarly, a netCDF variable of shape (2,3,4,5) indexed with [0, array([True, False, True]), array([False, True, True, True]), :] would return a (2, 3, 5) array. In NumPy, this would raise an error since it would be equivalent to [0, [0,1], [1,2,3], :]. When slicing with integer sequences, the indices need not be sorted and may contain duplicates (both of these are new features in version 1.2.1). While this behaviour may cause some confusion for those used to NumPy's 'fancy indexing' rules, it provides a very powerful way to extract data from multidimensional netCDF variables by using logical operations on the dimension arrays to create slices.

For example,

>>> tempdat = temp[::2, [1,3,6], lats>0, lons>0]

will extract time indices 0,2 and 4, pressure levels 850, 500 and 200 hPa, all Northern Hemisphere latitudes and Eastern Hemisphere longitudes, resulting in a numpy array of shape (3, 3, 36, 71).

>>> print("shape of fancy temp slice = {}".format(tempdat.shape))
shape of fancy temp slice = (3, 3, 36, 71)

Special note for scalar variables: To extract data from a scalar variable v with no associated dimensions, use numpy.asarray(v) or v[…]. The result will be a numpy scalar array.

By default, netcdf4-python returns numpy masked arrays with values equal to the missing_value or _FillValue variable attributes masked for primitive and enum data types. The Dataset.set_auto_mask() Dataset and Variable methods can be used to disable this feature so that numpy arrays are always returned, with the missing values included. Prior to version 1.4.0 the default behavior was to only return masked arrays when the requested slice contained missing values. This behavior can be recovered using the Dataset.set_always_mask() method. If a masked array is written to a netCDF variable, the masked elements are filled with the value specified by the missing_value attribute. If the variable has no missing_value, the _FillValue is used instead.

Dealing with time coordinates

Time coordinate values pose a special challenge to netCDF users. Most metadata standards (such as CF) specify that time should be measure relative to a fixed date using a certain calendar, with units specified like hours since YY-MM-DD hh:mm:ss. These units can be awkward to deal with, without a utility to convert the values to and from calendar dates. The functions num2date and date2num are provided by cftime to do just that. Here's an example of how they can be used:

>>> # fill in times.
>>> from datetime import datetime, timedelta
>>> from cftime import num2date, date2num
>>> dates = [datetime(2001,3,1)+n*timedelta(hours=12) for n in range(temp.shape[0])]
>>> times[:] = date2num(dates,units=times.units,calendar=times.calendar)
>>> print("time values (in units {}):\n{}".format(times.units, times[:]))
time values (in units hours since 0001-01-01 00:00:00.0):
[17533104. 17533116. 17533128. 17533140. 17533152.]
>>> dates = num2date(times[:],units=times.units,calendar=times.calendar)
>>> print("dates corresponding to time values:\n{}".format(dates))
 [cftime.DatetimeGregorian(2001, 3, 1, 0, 0, 0, 0, has_year_zero=False)
  cftime.DatetimeGregorian(2001, 3, 1, 12, 0, 0, 0, has_year_zero=False)
  cftime.DatetimeGregorian(2001, 3, 2, 0, 0, 0, 0, has_year_zero=False)
  cftime.DatetimeGregorian(2001, 3, 2, 12, 0, 0, 0, has_year_zero=False)
  cftime.DatetimeGregorian(2001, 3, 3, 0, 0, 0, 0, has_year_zero=False)]

num2date() converts numeric values of time in the specified units and calendar to datetime objects, and date2num() does the reverse. All the calendars currently defined in the CF metadata convention are supported. A function called date2index() is also provided which returns the indices of a netCDF time variable corresponding to a sequence of datetime instances.

Reading data from a multi-file netCDF dataset

If you want to read data from a variable that spans multiple netCDF files, you can use the MFDataset class to read the data as if it were contained in a single file. Instead of using a single filename to create a Dataset instance, create a MFDataset instance with either a list of filenames, or a string with a wildcard (which is then converted to a sorted list of files using the python glob module). Variables in the list of files that share the same unlimited dimension are aggregated together, and can be sliced across multiple files. To illustrate this, let's first create a bunch of netCDF files with the same variable (with the same unlimited dimension). The files must in be in NETCDF3_64BIT_OFFSET, NETCDF3_64BIT_DATA, NETCDF3_CLASSIC or NETCDF4_CLASSIC format (NETCDF4 formatted multi-file datasets are not supported).

>>> for nf in range(10):
...     with Dataset("mftest%s.nc" % nf, "w", format="NETCDF4_CLASSIC") as f:
...         _ = f.createDimension("x",None)
...         x = f.createVariable("x","i",("x",))
...         x[0:10] = np.arange(nf*10,10*(nf+1))

Now read all the files back in at once with MFDataset

>>> from netCDF4 import MFDataset
>>> f = MFDataset("mftest*nc")
>>> print(f.variables["x"][:])
[ 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
 96 97 98 99]

Note that MFDataset can only be used to read, not write, multi-file datasets.

Efficient compression of netCDF variables

Data stored in netCDF Variable objects can be compressed and decompressed on the fly. The compression algorithm used is determined by the compression keyword argument to the Dataset.createVariable() method. zlib compression is always available, szip is available if the linked HDF5 library supports it, and zstd, bzip2, blosc_lz,blosc_lz4,blosc_lz4hc, blosc_zlib and blosc_zstd are available via optional external plugins. The complevel keyword regulates the speed and efficiency of the compression for zlib, bzip and zstd (1 being fastest, but lowest compression ratio, 9 being slowest but best compression ratio). The default value of complevel is 4. Setting shuffle=False will turn off the HDF5 shuffle filter, which de-interlaces a block of data before zlib compression by reordering the bytes. The shuffle filter can significantly improve compression ratios, and is on by default if compression=zlib. Setting fletcher32 keyword argument to Dataset.createVariable() to True (it's False by default) enables the Fletcher32 checksum algorithm for error detection. It's also possible to set the HDF5 chunking parameters and endian-ness of the binary data stored in the HDF5 file with the chunksizes and endian keyword arguments to Dataset.createVariable(). These keyword arguments only are relevant for NETCDF4 and NETCDF4_CLASSIC files (where the underlying file format is HDF5) and are silently ignored if the file format is NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET or NETCDF3_64BIT_DATA. If the HDF5 library is built with szip support, compression=szip can also be used (in conjunction with the szip_coding and szip_pixels_per_block keyword arguments).

If your data only has a certain number of digits of precision (say for example, it is temperature data that was measured with a precision of 0.1 degrees), you can dramatically improve compression by quantizing (or truncating) the data. There are two methods supplied for doing this. You can use the least_significant_digit keyword argument to Dataset.createVariable() to specify the power of ten of the smallest decimal place in the data that is a reliable value. For example if the data has a precision of 0.1, then setting least_significant_digit=1 will cause data the data to be quantized using numpy.around(scale*data)/scale, where scale = 2**bits, and bits is determined so that a precision of 0.1 is retained (in this case bits=4). This is done at the python level and is not a part of the underlying C library. Starting with netcdf-c version 4.9.0, a quantization capability is provided in the library. This can be used via the significant_digits Dataset.createVariable() kwarg (new in version 1.6.0). The interpretation of significant_digits is different than least_signficant_digit in that it specifies the absolute number of significant digits independent of the magnitude of the variable (the floating point exponent). Either of these approaches makes the compression 'lossy' instead of 'lossless', that is some precision in the data is sacrificed for the sake of disk space.

In our example, try replacing the line

>>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",))

with

>>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib')

and then

>>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib',least_significant_digit=3)

or with netcdf-c >= 4.9.0

>>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib',significant_digits=4)

and see how much smaller the resulting files are.

Beyond homogeneous arrays of a fixed type - compound data types

Compound data types map directly to numpy structured (a.k.a 'record') arrays. Structured arrays are akin to C structs, or derived types in Fortran. They allow for the construction of table-like structures composed of combinations of other data types, including other compound types. Compound types might be useful for representing multiple parameter values at each point on a grid, or at each time and space location for scattered (point) data. You can then access all the information for a point by reading one variable, instead of reading different parameters from different variables. Compound data types are created from the corresponding numpy data type using the Dataset.createCompoundType() method of a Dataset or Group instance. Since there is no native complex data type in netcdf (but see Support for complex numbers), compound types are handy for storing numpy complex arrays. Here's an example:

>>> f = Dataset("complex.nc","w")
>>> size = 3 # length of 1-d complex array
>>> # create sample complex data.
>>> datac = np.exp(1j*(1.+np.linspace(0, np.pi, size)))
>>> # create complex128 compound data type.
>>> complex128 = np.dtype([("real",np.float64),("imag",np.float64)])
>>> complex128_t = f.createCompoundType(complex128,"complex128")
>>> # create a variable with this data type, write some data to it.
>>> x_dim = f.createDimension("x_dim",None)
>>> v = f.createVariable("cmplx_var",complex128_t,"x_dim")
>>> data = np.empty(size,complex128) # numpy structured array
>>> data["real"] = datac.real; data["imag"] = datac.imag
>>> v[:] = data # write numpy structured array to netcdf compound var
>>> # close and reopen the file, check the contents.
>>> f.close(); f = Dataset("complex.nc")
>>> v = f.variables["cmplx_var"]
>>> datain = v[:] # read in all the data into a numpy structured array
>>> # create an empty numpy complex array
>>> datac2 = np.empty(datain.shape,np.complex128)
>>> # .. fill it with contents of structured array.
>>> datac2.real = datain["real"]; datac2.imag = datain["imag"]
>>> print('{}: {}'.format(datac.dtype, datac)) # original data
complex128: [ 0.54030231+0.84147098j -0.84147098+0.54030231j -0.54030231-0.84147098j]
>>>
>>> print('{}: {}'.format(datac2.dtype, datac2)) # data from file
complex128: [ 0.54030231+0.84147098j -0.84147098+0.54030231j -0.54030231-0.84147098j]

Compound types can be nested, but you must create the 'inner' ones first. All possible numpy structured arrays cannot be represented as Compound variables - an error message will be raise if you try to create one that is not supported. All of the compound types defined for a Dataset or Group are stored in a Python dictionary, just like variables and dimensions. As always, printing objects gives useful summary information in an interactive session:

>>> print(f)
<class 'netCDF4._netCDF4.Dataset'>
root group (NETCDF4 data model, file format HDF5):
    dimensions(sizes): x_dim(3)
    variables(dimensions): {'names':['real','imag'], 'formats':['<f8','<f8'], 'offsets':[0,8], 'itemsize':16, 'aligned':True} cmplx_var(x_dim)
    groups:
>>> print(f.variables["cmplx_var"])
<class 'netCDF4._netCDF4.Variable'>
compound cmplx_var(x_dim)
compound data type: {'names':['real','imag'], 'formats':['<f8','<f8'], 'offsets':[0,8], 'itemsize':16, 'aligned':True}
unlimited dimensions: x_dim
current shape = (3,)
>>> print(f.cmptypes)
{'complex128': <class 'netCDF4._netCDF4.CompoundType'>: name = 'complex128', numpy dtype = {'names':['real','imag'], 'formats':['<f8','<f8'], 'offsets':[0,8], 'itemsize':16, 'aligned':True}}
>>> print(f.cmptypes["complex128"])
<class 'netCDF4._netCDF4.CompoundType'>: name = 'complex128', numpy dtype = {'names':['real','imag'], 'formats':['<f8','<f8'], 'offsets':[0,8], 'itemsize':16, 'aligned':True}

Variable-length (vlen) data types

NetCDF 4 has support for variable-length or "ragged" arrays. These are arrays of variable length sequences having the same type. To create a variable-length data type, use the Dataset.createVLType() method method of a Dataset or Group instance.

>>> f = Dataset("tst_vlen.nc","w")
>>> vlen_t = f.createVLType(np.int32, "phony_vlen")

The numpy datatype of the variable-length sequences and the name of the new datatype must be specified. Any of the primitive datatypes can be used (signed and unsigned integers, 32 and 64 bit floats, and characters), but compound data types cannot. A new variable can then be created using this datatype.

>>> x = f.createDimension("x",3)
>>> y = f.createDimension("y",4)
>>> vlvar = f.createVariable("phony_vlen_var", vlen_t, ("y","x"))

Since there is no native vlen datatype in numpy, vlen arrays are represented in python as object arrays (arrays of dtype object). These are arrays whose elements are Python object pointers, and can contain any type of python object. For this application, they must contain 1-D numpy arrays all of the same type but of varying length. In this case, they contain 1-D numpy int32 arrays of random length between 1 and 10.

>>> import random
>>> random.seed(54321)
>>> data = np.empty(len(y)*len(x),object)
>>> for n in range(len(y)*len(x)):
...     data[n] = np.arange(random.randint(1,10),dtype="int32")+1
>>> data = np.reshape(data,(len(y),len(x)))
>>> vlvar[:] = data
>>> print("vlen variable =\n{}".format(vlvar[:]))
vlen variable =
[[array([1, 2, 3, 4, 5, 6, 7, 8], dtype=int32) array([1, 2], dtype=int32)
  array([1, 2, 3, 4], dtype=int32)]
 [array([1, 2, 3], dtype=int32)
  array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32)
  array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32)]
 [array([1, 2, 3, 4, 5, 6, 7], dtype=int32) array([1, 2, 3], dtype=int32)
  array([1, 2, 3, 4, 5, 6], dtype=int32)]
 [array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32)
  array([1, 2, 3, 4, 5], dtype=int32) array([1, 2], dtype=int32)]]
>>> print(f)
<class 'netCDF4._netCDF4.Dataset'>
root group (NETCDF4 data model, file format HDF5):
    dimensions(sizes): x(3), y(4)
    variables(dimensions): int32 phony_vlen_var(y,x)
    groups:
>>> print(f.variables["phony_vlen_var"])
<class 'netCDF4._netCDF4.Variable'>
vlen phony_vlen_var(y, x)
vlen data type: int32
unlimited dimensions:
current shape = (4, 3)
>>> print(f.vltypes["phony_vlen"])
<class 'netCDF4._netCDF4.VLType'>: name = 'phony_vlen', numpy dtype = int32

Numpy object arrays containing python strings can also be written as vlen variables, For vlen strings, you don't need to create a vlen data type. Instead, simply use the python str builtin (or a numpy string datatype with fixed length greater than 1) when calling the Dataset.createVariable() method.

>>> z = f.createDimension("z",10)
>>> strvar = f.createVariable("strvar", str, "z")

In this example, an object array is filled with random python strings with random lengths between 2 and 12 characters, and the data in the object array is assigned to the vlen string variable.

>>> chars = "1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
>>> data = np.empty(10,"O")
>>> for n in range(10):
...     stringlen = random.randint(2,12)
...     data[n] = "".join([random.choice(chars) for i in range(stringlen)])
>>> strvar[:] = data
>>> print("variable-length string variable:\n{}".format(strvar[:]))
variable-length string variable:
['Lh' '25F8wBbMI' '53rmM' 'vvjnb3t63ao' 'qjRBQk6w' 'aJh' 'QF'
 'jtIJbJACaQk4' '3Z5' 'bftIIq']
>>> print(f)
<class 'netCDF4._netCDF4.Dataset'>
root group (NETCDF4 data model, file format HDF5):
    dimensions(sizes): x(3), y(4), z(10)
    variables(dimensions): int32 phony_vlen_var(y,x), <class 'str'> strvar(z)
    groups:
>>> print(f.variables["strvar"])
<class 'netCDF4._netCDF4.Variable'>
vlen strvar(z)
vlen data type: <class 'str'>
unlimited dimensions:
current shape = (10,)

It is also possible to set contents of vlen string variables with numpy arrays of any string or unicode data type. Note, however, that accessing the contents of such variables will always return numpy arrays with dtype object.

Enum data type

netCDF4 has an enumerated data type, which is an integer datatype that is restricted to certain named values. Since Enums don't map directly to a numpy data type, they are read and written as integer arrays.

Here's an example of using an Enum type to hold cloud type data. The base integer data type and a python dictionary describing the allowed values and their names are used to define an Enum data type using Dataset.createEnumType().

>>> nc = Dataset('clouds.nc','w')
>>> # python dict with allowed values and their names.
>>> enum_dict = {'Altocumulus': 7, 'Missing': 255,
... 'Stratus': 2, 'Clear': 0,
... 'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5,
... 'Cumulonimbus': 1, 'Stratocumulus': 3}
>>> # create the Enum type called 'cloud_t'.
>>> cloud_type = nc.createEnumType(np.uint8,'cloud_t',enum_dict)
>>> print(cloud_type)
<class 'netCDF4._netCDF4.EnumType'>: name = 'cloud_t', numpy dtype = uint8, fields/values ={'Altocumulus': 7, 'Missing': 255, 'Stratus': 2, 'Clear': 0, 'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, 'Cumulonimbus': 1, 'Stratocumulus': 3}

A new variable can be created in the usual way using this data type. Integer data is written to the variable that represents the named cloud types in enum_dict. A ValueError will be raised if an attempt is made to write an integer value not associated with one of the specified names.

>>> time = nc.createDimension('time',None)
>>> # create a 1d variable of type 'cloud_type'.
>>> # The fill_value is set to the 'Missing' named value.
>>> cloud_var = nc.createVariable('primary_cloud',cloud_type,'time',
...                               fill_value=enum_dict['Missing'])
>>> # write some data to the variable.
>>> cloud_var[:] = [enum_dict[k] for k in ['Clear', 'Stratus', 'Cumulus',
...                                        'Missing', 'Cumulonimbus']]
>>> nc.close()
>>> # reopen the file, read the data.
>>> nc = Dataset('clouds.nc')
>>> cloud_var = nc.variables['primary_cloud']
>>> print(cloud_var)
<class 'netCDF4._netCDF4.Variable'>
enum primary_cloud(time)
    _FillValue: 255
enum data type: uint8
unlimited dimensions: time
current shape = (5,)
>>> print(cloud_var.datatype.enum_dict)
{'Altocumulus': 7, 'Missing': 255, 'Stratus': 2, 'Clear': 0, 'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, 'Cumulonimbus': 1, 'Stratocumulus': 3}
>>> print(cloud_var[:])
[0 2 4 -- 1]
>>> nc.close()

Parallel IO

If MPI parallel enabled versions of netcdf and hdf5 or pnetcdf are detected, and mpi4py is installed, netcdf4-python will be built with parallel IO capabilities enabled. Parallel IO of NETCDF4 or NETCDF4_CLASSIC formatted files is only available if the MPI parallel HDF5 library is available. Parallel IO of classic netcdf-3 file formats is only available if the PnetCDF library is available. To use parallel IO, your program must be running in an MPI environment using mpi4py.

>>> from mpi4py import MPI
>>> import numpy as np
>>> from netCDF4 import Dataset
>>> rank = MPI.COMM_WORLD.rank  # The process ID (integer 0-3 for 4-process run)

To run an MPI-based parallel program like this, you must use mpiexec to launch several parallel instances of Python (for example, using mpiexec -np 4 python mpi_example.py). The parallel features of netcdf4-python are mostly transparent - when a new dataset is created or an existing dataset is opened, use the parallel keyword to enable parallel access.

>>> nc = Dataset('parallel_test.nc','w',parallel=True)

The optional comm keyword may be used to specify a particular MPI communicator (MPI_COMM_WORLD is used by default). Each process (or rank) can now write to the file independently. In this example the process rank is written to a different variable index on each task

>>> d = nc.createDimension('dim',4)
>>> v = nc.createVariable('var', np.int64, 'dim')
>>> v[rank] = rank
>>> nc.close()

% ncdump parallel_test.nc
netcdf parallel_test {
dimensions:
    dim = 4 ;
variables:
    int64 var(dim) ;
data:

    var = 0, 1, 2, 3 ;
}

There are two types of parallel IO, independent (the default) and collective. Independent IO means that each process can do IO independently. It should not depend on or be affected by other processes. Collective IO is a way of doing IO defined in the MPI-IO standard; unlike independent IO, all processes must participate in doing IO. To toggle back and forth between the two types of IO, use the Variable.set_collective() Variable method. All metadata operations (such as creation of groups, types, variables, dimensions, or attributes) are collective. There are a couple of important limitations of parallel IO:

  • parallel IO for NETCDF4 or NETCDF4_CLASSIC formatted files is only available if the netcdf library was compiled with MPI enabled HDF5.
  • parallel IO for all classic netcdf-3 file formats is only available if the netcdf library was compiled with PnetCDF.
  • If a variable has an unlimited dimension, appending data must be done in collective mode. If the write is done in independent mode, the operation will fail with a a generic "HDF Error".
  • You can write compressed data in parallel only with netcdf-c >= 4.7.4 and hdf5 >= 1.10.3 (although you can read in parallel with earlier versions). To write compressed data in parallel, the variable must be in 'collective IO mode'. This is done automatically on variable creation if compression is turned on, but if you are appending to a variable in an existing file, you must use Variable.set_collective()(True) before attempting to write to it.
  • You cannot use variable-length (VLEN) data types.

Import warning regarding threads: The underlying netcdf-c library is not thread-safe, so netcdf4-python cannot perform parallel IO in a multi-threaded environment. Users should expect segfaults if a netcdf file is opened on multiple threads - care should be taken to restrict netcdf4-python usage to a single thread, even when using free-threaded python.

Dealing with strings

The most flexible way to store arrays of strings is with the Variable-length (vlen) string data type. However, this requires the use of the NETCDF4 data model, and the vlen type does not map very well numpy arrays (you have to use numpy arrays of dtype=object, which are arrays of arbitrary python objects). numpy does have a fixed-width string array data type, but unfortunately the netCDF data model does not. Instead fixed-width byte strings are typically stored as arrays of 8-bit characters. To perform the conversion to and from character arrays to fixed-width numpy string arrays, the following convention is followed by the python interface. If the _Encoding special attribute is set for a character array (dtype S1) variable, the chartostring() utility function is used to convert the array of characters to an array of strings with one less dimension (the last dimension is interpreted as the length of each string) when reading the data. The character set is specified by the _Encoding attribute. If _Encoding is 'none' or 'bytes', then the character array is converted to a numpy fixed-width byte string array (dtype S#), otherwise a numpy unicode (dtype U#) array is created. When writing the data, stringtochar() is used to convert the numpy string array to an array of characters with one more dimension. For example,

>>> from netCDF4 import stringtochar
>>> nc = Dataset('stringtest.nc','w',format='NETCDF4_CLASSIC')
>>> _ = nc.createDimension('nchars',3)
>>> _ = nc.createDimension('nstrings',None)
>>> v = nc.createVariable('strings','S1',('nstrings','nchars'))
>>> datain = np.array(['foo','bar'],dtype='S3')
>>> v[:] = stringtochar(datain) # manual conversion to char array
>>> print(v[:]) # data returned as char array
[[b'f' b'o' b'o']
 [b'b' b'a' b'r']]
>>> v._Encoding = 'ascii' # this enables automatic conversion
>>> v[:] = datain # conversion to char array done internally
>>> print(v[:])  # data returned in numpy string array
['foo' 'bar']
>>> nc.close()

Even if the _Encoding attribute is set, the automatic conversion of char arrays to/from string arrays can be disabled with Variable.set_auto_chartostring().

A similar situation is often encountered with numpy structured arrays with subdtypes containing fixed-wdith byte strings (dtype=S#). Since there is no native fixed-length string netCDF datatype, these numpy structure arrays are mapped onto netCDF compound types with character array elements. In this case the string <-> char array conversion is handled automatically (without the need to set the _Encoding attribute) using numpy views. The structured array dtype (including the string elements) can even be used to define the compound data type - the string dtype will be converted to character array dtype under the hood when creating the netcdf compound type. Here's an example:

>>> nc = Dataset('compoundstring_example.nc','w')
>>> dtype = np.dtype([('observation', 'f4'),
...                      ('station_name','S10')])
>>> station_data_t = nc.createCompoundType(dtype,'station_data')
>>> _ = nc.createDimension('station',None)
>>> statdat = nc.createVariable('station_obs', station_data_t, ('station',))
>>> data = np.empty(2,dtype)
>>> data['observation'][:] = (123.,3.14)
>>> data['station_name'][:] = ('Boulder','New York')
>>> print(statdat.dtype) # strings actually stored as character arrays
{'names':['observation','station_name'], 'formats':['<f4',('S1', (10,))], 'offsets':[0,4], 'itemsize':16, 'aligned':True}
>>> statdat[:] = data # strings converted to character arrays internally
>>> print(statdat[:])  # character arrays converted back to strings
[(123.  , b'Boulder') (  3.14, b'New York')]
>>> print(statdat[:].dtype)
{'names':['observation','station_name'], 'formats':['<f4','S10'], 'offsets':[0,4], 'itemsize':16, 'aligned':True}
>>> statdat.set_auto_chartostring(False) # turn off auto-conversion
>>> statdat[:] = data.view(dtype=[('observation', 'f4'),('station_name','S1',10)])
>>> print(statdat[:])  # now structured array with char array subtype is returned
[(123.  , [b'B', b'o', b'u', b'l', b'd', b'e', b'r', b'', b'', b''])
 (  3.14, [b'N', b'e', b'w', b' ', b'Y', b'o', b'r', b'k', b'', b''])]
>>> nc.close()

Note that there is currently no support for mapping numpy structured arrays with unicode elements (dtype U#) onto netCDF compound types, nor is there support for netCDF compound types with vlen string components.

In-memory (diskless) Datasets

You can create netCDF Datasets whose content is held in memory instead of in a disk file. There are two ways to do this. If you don't need access to the memory buffer containing the Dataset from within python, the best way is to use the diskless=True keyword argument when creating the Dataset. If you want to save the Dataset to disk when you close it, also set persist=True. If you want to create a new read-only Dataset from an existing python memory buffer, use the memory keyword argument to pass the memory buffer when creating the Dataset. If you want to create a new in-memory Dataset, and then access the memory buffer directly from Python, use the memory keyword argument to specify the estimated size of the Dataset in bytes when creating the Dataset with mode='w'. Then, the Dataset.close() method will return a python memoryview object representing the Dataset. Below are examples illustrating both approaches.

>>> # create a diskless (in-memory) Dataset,
>>> # and persist the file to disk when it is closed.
>>> nc = Dataset('diskless_example.nc','w',diskless=True,persist=True)
>>> d = nc.createDimension('x',None)
>>> v = nc.createVariable('v',np.int32,'x')
>>> v[0:5] = np.arange(5)
>>> print(nc)
<class 'netCDF4._netCDF4.Dataset'>
root group (NETCDF4 data model, file format HDF5):
    dimensions(sizes): x(5)
    variables(dimensions): int32 v(x)
    groups:
>>> print(nc['v'][:])
[0 1 2 3 4]
>>> nc.close() # file saved to disk
>>> # create an in-memory dataset from an existing python
>>> # python memory buffer.
>>> # read the newly created netcdf file into a python
>>> # bytes object.
>>> with open('diskless_example.nc', 'rb') as f:
...     nc_bytes = f.read()
>>> # create a netCDF in-memory dataset from the bytes object.
>>> nc = Dataset('inmemory.nc', memory=nc_bytes)
>>> print(nc)
<class 'netCDF4._netCDF4.Dataset'>
root group (NETCDF4 data model, file format HDF5):
    dimensions(sizes): x(5)
    variables(dimensions): int32 v(x)
    groups:
>>> print(nc['v'][:])
[0 1 2 3 4]
>>> nc.close()
>>> # create an in-memory Dataset and retrieve memory buffer
>>> # estimated size is 1028 bytes - this is actually only
>>> # used if format is NETCDF3
>>> # (ignored for NETCDF4/HDF5 files).
>>> nc = Dataset('inmemory.nc', mode='w',memory=1028)
>>> d = nc.createDimension('x',None)
>>> v = nc.createVariable('v',np.int32,'x')
>>> v[0:5] = np.arange(5)
>>> nc_buf = nc.close() # close returns memoryview
>>> print(type(nc_buf))
<class 'memoryview'>
>>> # save nc_buf to disk, read it back in and check.
>>> with open('inmemory.nc', 'wb') as f:
...     f.write(nc_buf)
>>> nc = Dataset('inmemory.nc')
>>> print(nc)
<class 'netCDF4._netCDF4.Dataset'>
root group (NETCDF4 data model, file format HDF5):
    dimensions(sizes): x(5)
    variables(dimensions): int32 v(x)
    groups:
>>> print(nc['v'][:])
[0 1 2 3 4]
>>> nc.close()

Support for complex numbers

Although there is no native support for complex numbers in netCDF, there are some common conventions for storing them. Two of the most common are to either use a compound datatype for the real and imaginary components, or a separate dimension. netCDF4 supports reading several of these conventions, as well as writing using one of two conventions (depending on file format). This support for complex numbers is enabled by setting auto_complex=True when opening a Dataset:

>>> complex_array = np.array([0 + 0j, 1 + 0j, 0 + 1j, 1 + 1j, 0.25 + 0.75j])
>>> with netCDF4.Dataset("complex.nc", "w", auto_complex=True) as nc:
...     nc.createDimension("x", size=len(complex_array))
...     var = nc.createVariable("data", "c16", ("x",))
...     var[:] = complex_array
...     print(var)
<class 'netCDF4._netCDF4.Variable'>
compound data(x)
compound data type: complex128
unlimited dimensions:
current shape = (5,)

When reading files using auto_complex=True, netCDF4 will interpret variables stored using the following conventions as complex numbers:

  • compound datatypes with two float or double members who names begin with r and i (case insensitive)
  • a dimension of length 2 named complex or ri

When writing files using auto_complex=True, netCDF4 will use:

  • a compound datatype named _PFNC_DOUBLE_COMPLEX_TYPE (or *FLOAT* as appropriate) with members r and i for netCDF4 formats;
  • or a dimension of length 2 named _pfnc_complex for netCDF3 or classic formats.

Support for complex numbers is handled via the nc-complex library. See there for further details.

contact: Jeffrey Whitaker whitaker.jeffrey@gmail.com

copyright: 2008 by Jeffrey Whitaker.

license: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

Functions

def chartostring(b, encoding='utf-8')

chartostring(b,encoding='utf-8')

convert a character array to a string array with one less dimension.

b: Input character array (numpy datatype 'S1' or 'U1'). Will be converted to a array of strings, where each string has a fixed length of b.shape[-1] characters.

optional kwarg encoding can be used to specify character encoding (default utf-8). If encoding is 'none' or 'bytes', a numpy.string_ byte array is returned.

returns a numpy string array with datatype 'UN' (or 'SN') and shape b.shape[:-1] where where N=b.shape[-1].

def date2index(dates, nctime, calendar=None, select='exact', has_year_zero=None)

date2index(dates, nctime, calendar=None, select='exact', has_year_zero=None)

Return indices of a netCDF time variable corresponding to the given dates.

dates: A datetime object or a sequence of datetime objects. The datetime objects should not include a time-zone offset.

nctime: A netCDF time variable object. The nctime object must have a units attribute.

calendar: describes the calendar to be used in the time calculations. All the values currently defined in the CF metadata convention <http://cfconventions.org/cf-conventions/cf-conventions#calendar>__ are supported. Valid calendars 'standard', 'gregorian', 'proleptic_gregorian' 'noleap', '365_day', '360_day', 'julian', 'all_leap', '366_day'. Default is None which means the calendar associated with the first input datetime instance will be used.

select: 'exact', 'before', 'after', 'nearest' The index selection method. exact will return the indices perfectly matching the dates given. before and after will return the indices corresponding to the dates just before or just after the given dates if an exact match cannot be found. nearest will return the indices that correspond to the closest dates.

has_year_zero: if set to True, astronomical year numbering is used and the year zero exists. If set to False for real-world calendars, then historical year numbering is used and the year 1 is preceded by year -1 and no year zero exists. The defaults are set to conform with CF version 1.9 conventions (False for 'julian', 'gregorian'/'standard', True for 'proleptic_gregorian' (ISO 8601) and True for the idealized calendars 'noleap'/'365_day', '360_day', 366_day'/'all_leap') The defaults can only be over-ridden for the real-world calendars, for the the idealized calendars the year zero always exists and the has_year_zero kwarg is ignored. This kwarg is not needed to define calendar systems allowed by CF (the calendar-specific defaults do this).

returns an index (indices) of the netCDF time variable corresponding to the given datetime object(s).

def date2num(dates, units, calendar=None, has_year_zero=None, longdouble=False)

date2num(dates, units, calendar=None, has_year_zero=None, longdouble=False)

Return numeric time values given datetime objects. The units of the numeric time values are described by the units argument and the calendar keyword. The datetime objects must be in UTC with no time-zone offset. If there is a time-zone offset in units, it will be applied to the returned numeric values.

dates: A datetime object or a sequence of datetime objects. The datetime objects should not include a time-zone offset. They can be either native python datetime instances (which use the proleptic gregorian calendar) or cftime.datetime instances.

units: a string of the form describing the time units. can be days, hours, minutes, seconds, milliseconds or microseconds. is the time origin. months since is allowed only for the 360_day calendar and common_years since is allowed only for the 365_day calendar.

calendar: describes the calendar to be used in the time calculations. All the values currently defined in the CF metadata convention <http://cfconventions.org/cf-conventions/cf-conventions#calendar>__ are supported. Valid calendars 'standard', 'gregorian', 'proleptic_gregorian' 'noleap', '365_day', '360_day', 'julian', 'all_leap', '366_day'. Default is None which means the calendar associated with the first input datetime instance will be used.

has_year_zero: If set to True, astronomical year numbering is used and the year zero exists. If set to False for real-world calendars, then historical year numbering is used and the year 1 is preceded by year -1 and no year zero exists. The defaults are set to conform with CF version 1.9 conventions (False for 'julian', 'gregorian'/'standard', True for 'proleptic_gregorian' (ISO 8601) and True for the idealized calendars 'noleap'/'365_day', '360_day', 366_day'/'all_leap') Note that CF v1.9 does not specifically mention whether year zero is allowed in the proleptic_gregorian calendar, but ISO-8601 has a year zero so we have adopted this as the default. The defaults can only be over-ridden for the real-world calendars, for the the idealized calendars the year zero always exists and the has_year_zero kwarg is ignored. This kwarg is not needed to define calendar systems allowed by CF (the calendar-specific defaults do this).

longdouble: If set True, output is in the long double float type (numpy.float128) instead of float (numpy.float64), allowing microsecond accuracy when converting a time value to a date and back again. Otherwise this is only possible if the discretization of the time variable is an integer multiple of the units.

returns a numeric time value, or an array of numeric time values with approximately 1 microsecond accuracy.

def get_alignment()

get_alignment()

return current netCDF alignment within HDF5 files in a tuple (threshold,alignment). See netcdf C library documentation for nc_get_alignment for details. Values can be reset with set_alignment().

This function was added in netcdf 4.9.0.

def get_chunk_cache()

get_chunk_cache()

return current netCDF chunk cache information in a tuple (size,nelems,preemption). See netcdf C library documentation for nc_get_chunk_cache for details. Values can be reset with set_chunk_cache().

def getlibversion()

getlibversion()

returns a string describing the version of the netcdf library used to build the module, and when it was built.

def num2date(times,
units,
calendar='standard',
only_use_cftime_datetimes=True,
only_use_python_datetimes=False,
has_year_zero=None)

num2date(times, units, calendar='standard', only_use_cftime_datetimes=True, only_use_python_datetimes=False, has_year_zero=None)

Return datetime objects given numeric time values. The units of the numeric time values are described by the units argument and the calendar keyword. The returned datetime objects represent UTC with no time-zone offset, even if the specified units contain a time-zone offset.

times: numeric time values.

units: a string of the form describing the time units. can be days, hours, minutes, seconds, milliseconds or microseconds. is the time origin. months since is allowed only for the 360_day calendar and common_years since is allowed only for the 365_day calendar.

calendar: describes the calendar used in the time calculations. All the values currently defined in the CF metadata convention <http://cfconventions.org/cf-conventions/cf-conventions#calendar>__ are supported. Valid calendars 'standard', 'gregorian', 'proleptic_gregorian' 'noleap', '365_day', '360_day', 'julian', 'all_leap', '366_day'. Default is 'standard', which is a mixed Julian/Gregorian calendar.

only_use_cftime_datetimes: if False, python datetime.datetime objects are returned from num2date where possible; if True dates which subclass cftime.datetime are returned for all calendars. Default True.

only_use_python_datetimes: always return python datetime.datetime objects and raise an error if this is not possible. Ignored unless only_use_cftime_datetimes=False. Default False.

has_year_zero: if set to True, astronomical year numbering is used and the year zero exists. If set to False for real-world calendars, then historical year numbering is used and the year 1 is preceded by year -1 and no year zero exists. The defaults are set to conform with CF version 1.9 conventions (False for 'julian', 'gregorian'/'standard', True for 'proleptic_gregorian' (ISO 8601) and True for the idealized calendars 'noleap'/'365_day', '360_day', 366_day'/'all_leap') The defaults can only be over-ridden for the real-world calendars, for the the idealized calendars the year zero always exists and the has_year_zero kwarg is ignored. This kwarg is not needed to define calendar systems allowed by CF (the calendar-specific defaults do this).

returns a datetime instance, or an array of datetime instances with microsecond accuracy, if possible.

Note: If only_use_cftime_datetimes=False and use_only_python_datetimes=False, the datetime instances returned are 'real' python datetime objects if calendar='proleptic_gregorian', or calendar='standard' or 'gregorian' and the date is after the breakpoint between the Julian and Gregorian calendars (1582-10-15). Otherwise, they are ctime.datetime objects which support some but not all the methods of native python datetime objects. The datetime instances do not contain a time-zone offset, even if the specified units contains one.

def rc_get(key)

rc_get(key)

Returns the internal netcdf-c rc table value corresponding to key. See https://docs.unidata.ucar.edu/netcdf-c/current/md_auth.html for more information on rc files and values.

def rc_set(key, value)

rc_set(key, value)

Sets the internal netcdf-c rc table value corresponding to key. See https://docs.unidata.ucar.edu/netcdf-c/current/md_auth.html for more information on rc files and values.

def set_alignment(threshold, alignment)

set_alignment(threshold,alignment)

Change the HDF5 file alignment. See netcdf C library documentation for nc_set_alignment for details.

This function was added in netcdf 4.9.0.

def set_chunk_cache(size=None, nelems=None, preemption=None)

set_chunk_cache(size=None,nelems=None,preemption=None)

change netCDF4 chunk cache settings. See netcdf C library documentation for nc_set_chunk_cache for details.

def stringtoarr(string, NUMCHARS, dtype='S')

stringtoarr(a, NUMCHARS,dtype='S')

convert a string to a character array of length NUMCHARS

a: Input python string.

NUMCHARS: number of characters used to represent string (if len(a) < NUMCHARS, it will be padded on the right with blanks).

dtype: type of numpy array to return. Default is 'S', which means an array of dtype 'S1' will be returned. If dtype='U', a unicode array (dtype = 'U1') will be returned.

returns a rank 1 numpy character array of length NUMCHARS with datatype 'S1' (default) or 'U1' (if dtype='U')

def stringtochar(a, encoding='utf-8', n_strlen=None)

stringtochar(a,encoding='utf-8',n_strlen=None)

convert a string array to a character array with one extra dimension

a: Input numpy string array with numpy datatype 'SN' or 'UN', where N is the number of characters in each string. Will be converted to an array of characters (datatype 'S1' or 'U1') of shape a.shape + (N,).

optional kwarg encoding can be used to specify character encoding (default utf-8). If encoding is 'none' or 'bytes', a numpy.string_ the input array is treated a raw byte strings (numpy.string_).

optional kwarg n_strlen is the number of characters in each string. Default is None, which means n_strlen will be set to a.itemsize (the number of bytes used to represent each string in the input array).

returns a numpy character array with datatype 'S1' or 'U1' and shape a.shape + (N,), where N is the length of each string in a.

Classes

class CompoundType (...)

A CompoundType instance is used to describe a compound data type, and can be passed to the the Dataset.createVariable() method of a Dataset or Group instance. Compound data types map to numpy structured arrays. See CompoundType for more details.

The instance variables dtype and name should not be modified by the user.

__init__(group, datatype, datatype_name)

CompoundType constructor.

grp: Group instance to associate with the compound datatype.

dt: A numpy dtype object describing a structured (a.k.a record) array. Can be composed of homogeneous numeric or character data types, or other structured array data types.

dtype_name: a Python string containing a description of the compound data type.

Note 1: When creating nested compound data types, the inner compound data types must already be associated with CompoundType instances (so create CompoundType instances for the innermost structures first).

Note 2: CompoundType instances should be created using the Dataset.createCompoundType() method of a Dataset or Group instance, not using this class directly.

Instance variables

var dtype
var dtype_view
var name
class Dataset (...)

A netCDF Dataset is a collection of dimensions, groups, variables and attributes. Together they describe the meaning of data and relations among data fields stored in a netCDF file. See Dataset for more details.

A list of attribute names corresponding to global netCDF attributes defined for the Dataset can be obtained with the Dataset.ncattrs() method. These attributes can be created by assigning to an attribute of the Dataset instance. A dictionary containing all the netCDF attribute name/value pairs is provided by the __dict__ attribute of a Dataset instance.

The following class variables are read-only and should not be modified by the user.

dimensions: The dimensions dictionary maps the names of dimensions defined for the Group or Dataset to instances of the Dimension class.

variables: The variables dictionary maps the names of variables defined for this Dataset or Group to instances of the Variable class.

groups: The groups dictionary maps the names of groups created for this Dataset or Group to instances of the Group class (the Dataset class is simply a special case of the Group class which describes the root group in the netCDF4 file).

cmptypes: The cmptypes dictionary maps the names of compound types defined for the Group or Dataset to instances of the CompoundType class.

vltypes: The vltypes dictionary maps the names of variable-length types defined for the Group or Dataset to instances of the VLType class.

enumtypes: The enumtypes dictionary maps the names of Enum types defined for the Group or Dataset to instances of the EnumType class.

data_model: data_model describes the netCDF data model version, one of NETCDF3_CLASSIC, NETCDF4, NETCDF4_CLASSIC, NETCDF3_64BIT_OFFSET or NETCDF3_64BIT_DATA.

file_format: same as data_model, retained for backwards compatibility.

disk_format: disk_format describes the underlying file format, one of NETCDF3, HDF5, HDF4, PNETCDF, DAP2, DAP4 or UNDEFINED. Only available if using netcdf C library version >= 4.3.1, otherwise will always return UNDEFINED.

parent: parent is a reference to the parent Group instance. None for the root group or Dataset instance.

path: path shows the location of the Group in the Dataset in a unix directory format (the names of groups in the hierarchy separated by backslashes). A Dataset instance is the root group, so the path is simply '/'.

keepweakref: If True, child Dimension and Variables objects only keep weak references to the parent Dataset or Group.

_ncstring_attrs__: If True, all text attributes will be written as variable-length strings.

__init__(self, filename, mode="r", clobber=True, diskless=False, persist=False, keepweakref=False, memory=None, encoding=None, parallel=False, comm=None, info=None, format='NETCDF4')

Dataset constructor.

filename: Name of netCDF file to hold dataset. Can also be a python 3 pathlib instance or the URL of an OpenDAP dataset. When memory is set this is just used to set the filepath().

mode: access mode. r means read-only; no data can be modified. w means write; a new file is created, an existing file with the same name is deleted. x means write, but fail if an existing file with the same name already exists. a and r+ mean append; an existing file is opened for reading and writing, if file does not exist already, one is created. Appending s to modes r, w, r+ or a will enable unbuffered shared access to NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET or NETCDF3_64BIT_DATA formatted files. Unbuffered access may be useful even if you don't need shared access, since it may be faster for programs that don't access data sequentially. This option is ignored for NETCDF4 and NETCDF4_CLASSIC formatted files.

clobber: if True (default), opening a file with mode='w' will clobber an existing file with the same name. if False, an exception will be raised if a file with the same name already exists. mode=x is identical to mode=w with clobber=False.

format: underlying file format (one of 'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_CLASSIC', 'NETCDF3_64BIT_OFFSET' or 'NETCDF3_64BIT_DATA'. Only relevant if mode = 'w' (if mode = 'r','a' or 'r+' the file format is automatically detected). Default 'NETCDF4', which means the data is stored in an HDF5 file, using netCDF 4 API features. Setting format='NETCDF4_CLASSIC' will create an HDF5 file, using only netCDF 3 compatible API features. netCDF 3 clients must be recompiled and linked against the netCDF 4 library to read files in NETCDF4_CLASSIC format. 'NETCDF3_CLASSIC' is the classic netCDF 3 file format that does not handle 2+ Gb files. 'NETCDF3_64BIT_OFFSET' is the 64-bit offset version of the netCDF 3 file format, which fully supports 2+ GB files, but is only compatible with clients linked against netCDF version 3.6.0 or later. 'NETCDF3_64BIT_DATA' is the 64-bit data version of the netCDF 3 file format, which supports 64-bit dimension sizes plus unsigned and 64 bit integer data types, but is only compatible with clients linked against netCDF version 4.4.0 or later.

diskless: If True, create diskless (in-core) file. This is a feature added to the C library after the netcdf-4.2 release. If you need to access the memory buffer directly, use the in-memory feature instead (see memory kwarg).

persist: if diskless=True, persist file to disk when closed (default False).

keepweakref: if True, child Dimension and Variable instances will keep weak references to the parent Dataset or Group object. Default is False, which means strong references will be kept. Having Dimension and Variable instances keep a strong reference to the parent Dataset instance, which in turn keeps a reference to child Dimension and Variable instances, creates circular references. Circular references complicate garbage collection, which may mean increased memory usage for programs that create may Dataset instances with lots of Variables. It also will result in the Dataset object never being deleted, which means it may keep open files alive as well. Setting keepweakref=True allows Dataset instances to be garbage collected as soon as they go out of scope, potentially reducing memory usage and open file handles. However, in many cases this is not desirable, since the associated Variable instances may still be needed, but are rendered unusable when the parent Dataset instance is garbage collected.

memory: if not None, create or open an in-memory Dataset. If mode = r, the memory kwarg must contain a memory buffer object (an object that supports the python buffer interface). The Dataset will then be created with contents taken from this block of memory. If mode = w, the memory kwarg should contain the anticipated size of the Dataset in bytes (used only for NETCDF3 files). A memory buffer containing a copy of the Dataset is returned by the Dataset.close() method. Requires netcdf-c version 4.4.1 for mode=r netcdf-c 4.6.2 for mode=w. To persist the file to disk, the raw bytes from the returned buffer can be written into a binary file. The Dataset can also be re-opened using this memory buffer.

encoding: encoding used to encode filename string into bytes. Default is None (sys.getdefaultfileencoding() is used).

parallel: open for parallel access using MPI (requires mpi4py and parallel-enabled netcdf-c and hdf5 libraries). Default is False. If True, comm and info kwargs may also be specified.

comm: MPI_Comm object for parallel access. Default None, which means MPI_COMM_WORLD will be used. Ignored if parallel=False.

info: MPI_Info object for parallel access. Default None, which means MPI_INFO_NULL will be used. Ignored if parallel=False.

auto_complex: if True, then automatically convert complex number types

Subclasses

  • netCDF4._netCDF4.Group
  • netCDF4._netCDF4.MFDataset

Static methods

def fromcdl(cdlfilename, ncfilename=None, mode='a', format='NETCDF4')

fromcdl(cdlfilename, ncfilename=None, mode='a',format='NETCDF4')

call ncgen via subprocess to create Dataset from CDL text representation. Requires ncgen to be installed and in $PATH.

cdlfilename: CDL file.

ncfilename: netCDF file to create. If not given, CDL filename with suffix replaced by .nc is used..

mode: Access mode to open Dataset (Default 'a').

format: underlying file format to use (one of 'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_CLASSIC', 'NETCDF3_64BIT_OFFSET' or 'NETCDF3_64BIT_DATA'. Default 'NETCDF4'.

Dataset instance for ncfilename is returned.

Instance variables

var auto_complex
var cmptypes
var data_model
var dimensions
var disk_format
var enumtypes
var file_format
var groups
var keepweakref
var name

string name of Group instance

var parent
var path
var variables
var vltypes

Methods

def close(self)

close(self)

Close the Dataset.

def createCompoundType(self, datatype, datatype_name)

createCompoundType(self, datatype, datatype_name)

Creates a new compound data type named datatype_name from the numpy dtype object datatype.

Note: If the new compound data type contains other compound data types (i.e. it is a 'nested' compound type, where not all of the elements are homogeneous numeric data types), then the 'inner' compound types must be created first.

The return value is the CompoundType class instance describing the new datatype.

def createDimension(self, dimname, size=None)

createDimension(self, dimname, size=None)

Creates a new dimension with the given dimname and size.

size must be a positive integer or None, which stands for "unlimited" (default is None). Specifying a size of 0 also results in an unlimited dimension. The return value is the Dimension class instance describing the new dimension. To determine the current maximum size of the dimension, use the len function on the Dimension instance. To determine if a dimension is 'unlimited', use the Dimension.isunlimited() method of the Dimension instance.

def createEnumType(self, datatype, datatype_name, enum_dict)

createEnumType(self, datatype, datatype_name, enum_dict)

Creates a new Enum data type named datatype_name from a numpy integer dtype object datatype, and a python dictionary defining the enum fields and values.

The return value is the EnumType class instance describing the new datatype.

def createGroup(self, groupname)

createGroup(self, groupname)

Creates a new Group with the given groupname.

If groupname is specified as a path, using forward slashes as in unix to separate components, then intermediate groups will be created as necessary (analogous to mkdir -p in unix). For example, createGroup('/GroupA/GroupB/GroupC') will create GroupA, GroupA/GroupB, and GroupA/GroupB/GroupC, if they don't already exist. If the specified path describes a group that already exists, no error is raised.

The return value is a Group class instance.

def createVLType(self, datatype, datatype_name)

createVLType(self, datatype, datatype_name)

Creates a new VLEN data type named datatype_name from a numpy dtype object datatype.

The return value is the VLType class instance describing the new datatype.

def createVariable(self,
varname,
datatype,
dimensions=(),
compression=None,
zlib=False,
complevel=4,
shuffle=True,
szip_coding='nn',
szip_pixels_per_block=8,
blosc_shuffle=1,
fletcher32=False,
contiguous=False,
chunksizes=None,
endian='native',
least_significant_digit=None,
significant_digits=None,
quantize_mode='BitGroom',
fill_value=None,
chunk_cache=None)

createVariable(self, varname, datatype, dimensions=(), compression=None, zlib=False, complevel=4, shuffle=True, fletcher32=False, contiguous=False, chunksizes=None, szip_coding='nn', szip_pixels_per_block=8, blosc_shuffle=1, endian='native', least_significant_digit=None, significant_digits=None, quantize_mode='BitGroom', fill_value=None, chunk_cache=None)

Creates a new variable with the given varname, datatype, and dimensions. If dimensions are not given, the variable is assumed to be a scalar.

If varname is specified as a path, using forward slashes as in unix to separate components, then intermediate groups will be created as necessary For example, createVariable('/GroupA/GroupB/VarC', float, ('x','y')) will create groups GroupA and GroupA/GroupB, plus the variable GroupA/GroupB/VarC, if the preceding groups don't already exist.

The datatype can be a numpy datatype object, or a string that describes a numpy dtype object (like the dtype.str attribute of a numpy array). Supported specifiers include: 'S1' or 'c' (NC_CHAR), 'i1' or 'b' or 'B' (NC_BYTE), 'u1' (NC_UBYTE), 'i2' or 'h' or 's' (NC_SHORT), 'u2' (NC_USHORT), 'i4' or 'i' or 'l' (NC_INT), 'u4' (NC_UINT), 'i8' (NC_INT64), 'u8' (NC_UINT64), 'f4' or 'f' (NC_FLOAT), 'f8' or 'd' (NC_DOUBLE). datatype can also be a CompoundType instance (for a structured, or compound array), a VLType instance (for a variable-length array), or the python str builtin (for a variable-length string array). Numpy string and unicode datatypes with length greater than one are aliases for str.

Data from netCDF variables is presented to python as numpy arrays with the corresponding data type.

dimensions must be a tuple containing Dimension instances and/or dimension names (strings) that have been defined previously using Dataset.createDimension(). The default value is an empty tuple, which means the variable is a scalar.

If the optional keyword argument compression is set, the data will be compressed in the netCDF file using the specified compression algorithm. Currently zlib,szip,zstd,bzip2,blosc_lz,blosc_lz4,blosc_lz4hc, blosc_zlib and blosc_zstd are supported. Default is None (no compression). All of the compressors except zlib and szip use the HDF5 plugin architecture.

If the optional keyword zlib is True, the data will be compressed in the netCDF file using zlib compression (default False). The use of this option is deprecated in favor of compression='zlib'.

The optional keyword complevel is an integer between 0 and 9 describing the level of compression desired (default 4). Ignored if compression=None. A value of zero disables compression.

If the optional keyword shuffle is True, the HDF5 shuffle filter will be applied before compressing the data with zlib (default True). This significantly improves compression. Default is True. Ignored if zlib=False.

The optional kwarg blosc_shuffleis ignored unless the blosc compressor is used. blosc_shuffle can be 0 (no shuffle), 1 (byte-wise shuffle) or 2 (bit-wise shuffle). Default is 1.

The optional kwargs szip_coding and szip_pixels_per_block are ignored unless the szip compressor is used. szip_coding can be ec (entropy coding) or nn (nearest neighbor coding). Default is nn. szip_pixels_per_block can be 4, 8, 16 or 32 (default 8).

If the optional keyword fletcher32 is True, the Fletcher32 HDF5 checksum algorithm is activated to detect errors. Default False.

If the optional keyword contiguous is True, the variable data is stored contiguously on disk. Default False. Setting to True for a variable with an unlimited dimension will trigger an error. Fixed size variables (with no unlimited dimension) with no compression filters are contiguous by default.

The optional keyword chunksizes can be used to manually specify the HDF5 chunksizes for each dimension of the variable. A detailed discussion of HDF chunking and I/O performance is available here. The default chunking scheme in the netcdf-c library is discussed here. Basically, you want the chunk size for each dimension to match as closely as possible the size of the data block that users will read from the file. chunksizes cannot be set if contiguous=True.

The optional keyword endian can be used to control whether the data is stored in little or big endian format on disk. Possible values are little, big or native (default). The library will automatically handle endian conversions when the data is read, but if the data is always going to be read on a computer with the opposite format as the one used to create the file, there may be some performance advantage to be gained by setting the endian-ness.

The optional keyword fill_value can be used to override the default netCDF _FillValue (the value that the variable gets filled with before any data is written to it, defaults given in the dict netCDF4.default_fillvals). If fill_value is set to False, then the variable is not pre-filled.

If the optional keyword parameters least_significant_digit or significant_digits are specified, variable data will be truncated (quantized). In conjunction with compression='zlib' this produces 'lossy', but significantly more efficient compression. For example, if least_significant_digit=1, data will be quantized using numpy.around(scale*data)/scale, where scale = 2**bits, and bits is determined so that a precision of 0.1 is retained (in this case bits=4). From the PSL metadata conventions: "least_significant_digit – power of ten of the smallest decimal place in unpacked data that is a reliable value." Default is None, or no quantization, or 'lossless' compression. If significant_digits=3 then the data will be quantized so that three significant digits are retained, independent of the floating point exponent. The keyword argument quantize_mode controls the quantization algorithm (default 'BitGroom', 'BitRound' and 'GranularBitRound' also available). The 'GranularBitRound' algorithm may result in better compression for typical geophysical datasets. This significant_digits kwarg is only available with netcdf-c >= 4.9.0, and only works with NETCDF4 or NETCDF4_CLASSIC formatted files.

When creating variables in a NETCDF4 or NETCDF4_CLASSIC formatted file, HDF5 creates something called a 'chunk cache' for each variable. The default size of the chunk cache may be large enough to completely fill available memory when creating thousands of variables. The optional keyword chunk_cache allows you to reduce (or increase) the size of the default chunk cache when creating a variable. The setting only persists as long as the Dataset is open - you can use the set_var_chunk_cache method to change it the next time the Dataset is opened. Warning - messing with this parameter can seriously degrade performance.

The return value is the Variable class instance describing the new variable.

A list of names corresponding to netCDF variable attributes can be obtained with the Variable method Variable.ncattrs(). A dictionary containing all the netCDF attribute name/value pairs is provided by the __dict__ attribute of a Variable instance.

Variable instances behave much like array objects. Data can be assigned to or retrieved from a variable with indexing and slicing operations on the Variable instance. A Variable instance has six Dataset standard attributes: dimensions, dtype, shape, ndim, name and least_significant_digit. Application programs should never modify these attributes. The dimensions attribute is a tuple containing the names of the dimensions associated with this variable. The dtype attribute is a string describing the variable's data type (i4, f8, S1, etc). The shape attribute is a tuple describing the current sizes of all the variable's dimensions. The name attribute is a string containing the name of the Variable instance. The least_significant_digit attributes describes the power of ten of the smallest decimal place in the data the contains a reliable value. assigned to the Variable instance. The ndim attribute is the number of variable dimensions.

def delncattr(self, name)

delncattr(self,name,value)

delete a netCDF dataset or group attribute. Use if you need to delete a netCDF attribute with the same name as one of the reserved python attributes.

def filepath(self, encoding=None)

filepath(self,encoding=None)

Get the file system path (or the opendap URL) which was used to open/create the Dataset. Requires netcdf >= 4.1.2. The path is decoded into a string using sys.getfilesystemencoding() by default, this can be changed using the encoding kwarg.

def get_variables_by_attributes(self, **kwargs)

get_variables_by_attributes(self, **kwargs)

Returns a list of variables that match specific conditions.

Can pass in key=value parameters and variables are returned that contain all of the matches. For example,

>>> # Get variables with x-axis attribute.
>>> vs = nc.get_variables_by_attributes(axis='X')
>>> # Get variables with matching "standard_name" attribute
>>> vs = nc.get_variables_by_attributes(standard_name='northward_sea_water_velocity')

Can pass in key=callable parameter and variables are returned if the callable returns True. The callable should accept a single parameter, the attribute value. None is given as the attribute value when the attribute does not exist on the variable. For example,

>>> # Get Axis variables
>>> vs = nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T'])
>>> # Get variables that don't have an "axis" attribute
>>> vs = nc.get_variables_by_attributes(axis=lambda v: v is None)
>>> # Get variables that have a "grid_mapping" attribute
>>> vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None)
def getncattr(self, name, encoding='utf-8')

getncattr(self,name)

retrieve a netCDF dataset or group attribute. Use if you need to get a netCDF attribute with the same name as one of the reserved python attributes.

option kwarg encoding can be used to specify the character encoding of a string attribute (default is utf-8).

def has_blosc_filter(self)

has_blosc_filter(self) returns True if blosc compression filter is available

def has_bzip2_filter(self)

has_bzip2_filter(self) returns True if bzip2 compression filter is available

def has_szip_filter(self)

has_szip_filter(self) returns True if szip compression filter is available

def has_zstd_filter(self)

has_zstd_filter(self) returns True if zstd compression filter is available

def isopen(self)

isopen(self)

Is the Dataset open or closed?

def ncattrs(self)

ncattrs(self)

return netCDF global attribute names for this Dataset or Group in a list.

def renameAttribute(self, oldname, newname)

renameAttribute(self, oldname, newname)

rename a Dataset or Group attribute named oldname to newname.

def renameDimension(self, oldname, newname)

renameDimension(self, oldname, newname)

rename a Dimension named oldname to newname.

def renameGroup(self, oldname, newname)

renameGroup(self, oldname, newname)

rename a Group named oldname to newname (requires netcdf >= 4.3.1).

def renameVariable(self, oldname, newname)

renameVariable(self, oldname, newname)

rename a Variable named oldname to newname

def set_always_mask(self, value)

set_always_mask(self, True_or_False)

Call Variable.set_always_mask() for all variables contained in this Dataset or Group, as well as for all variables in all its subgroups.

True_or_False: Boolean determining if automatic conversion of masked arrays with no missing values to regular numpy arrays shall be applied for all variables. Default True. Set to False to restore the default behaviour in versions prior to 1.4.1 (numpy array returned unless missing values are present, otherwise masked array returned).

Note: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour.

def set_auto_chartostring(self, value)

set_auto_chartostring(self, True_or_False)

Call Variable.set_auto_chartostring() for all variables contained in this Dataset or Group, as well as for all variables in all its subgroups.

True_or_False: Boolean determining if automatic conversion of all character arrays <–> string arrays should be performed for character variables (variables of type NC_CHAR or S1) with the _Encoding attribute set.

Note: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour.

def set_auto_mask(self, value)

set_auto_mask(self, True_or_False)

Call Variable.set_auto_mask() for all variables contained in this Dataset or Group, as well as for all variables in all its subgroups. Only affects Variables with primitive or enum types (not compound or vlen Variables).

True_or_False: Boolean determining if automatic conversion to masked arrays shall be applied for all variables.

Note: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour.

def set_auto_maskandscale(self, value)

set_auto_maskandscale(self, True_or_False)

Call Variable.set_auto_maskandscale() for all variables contained in this Dataset or Group, as well as for all variables in all its subgroups.

True_or_False: Boolean determining if automatic conversion to masked arrays and variable scaling shall be applied for all variables.

Note: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour.

def set_auto_scale(self, value)

set_auto_scale(self, True_or_False)

Call Variable.set_auto_scale() for all variables contained in this Dataset or Group, as well as for all variables in all its subgroups.

True_or_False: Boolean determining if automatic variable scaling shall be applied for all variables.

Note: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour.

def set_fill_off(self)

set_fill_off(self)

Sets the fill mode for a Dataset open for writing to off.

This will prevent the data from being pre-filled with fill values, which may result in some performance improvements. However, you must then make sure the data is actually written before being read.

def set_fill_on(self)

set_fill_on(self)

Sets the fill mode for a Dataset open for writing to on.

This causes data to be pre-filled with fill values. The fill values can be controlled by the variable's _Fill_Value attribute, but is usually sufficient to the use the netCDF default _Fill_Value (defined separately for each variable type). The default behavior of the netCDF library corresponds to set_fill_on. Data which are equal to the _Fill_Value indicate that the variable was created, but never written to.

def set_ncstring_attrs(self, value)

set_ncstring_attrs(self, True_or_False)

Call Variable.set_ncstring_attrs() for all variables contained in this Dataset or Group, as well as for all its subgroups and their variables.

True_or_False: Boolean determining if all string attributes are created as variable-length NC_STRINGs, (if True), or if ascii text attributes are stored as NC_CHARs (if False; default)

Note: Calling this function only affects newly created attributes of existing (sub-) groups and their variables.

def setncattr(self, name, value)

setncattr(self,name,value)

set a netCDF dataset or group attribute using name,value pair. Use if you need to set a netCDF attribute with the with the same name as one of the reserved python attributes.

def setncattr_string(self, name, value)

setncattr_string(self,name,value)

set a netCDF dataset or group string attribute using name,value pair. Use if you need to ensure that a netCDF attribute is created with type NC_STRING if the file format is NETCDF4.

def setncatts(self, attdict)

setncatts(self,attdict)

set a bunch of netCDF dataset or group attributes at once using a python dictionary. This may be faster when setting a lot of attributes for a NETCDF3 formatted file, since nc_redef/nc_enddef is not called in between setting each attribute

def sync(self)

sync(self)

Writes all buffered data in the Dataset to the disk file.

def tocdl(self, coordvars=False, data=False, outfile=None)

tocdl(self, coordvars=False, data=False, outfile=None)

call ncdump via subprocess to create CDL text representation of Dataset. Requires ncdump to be installed and in $PATH.

coordvars: include coordinate variable data (via ncdump -c). Default False

data: if True, write out variable data (Default False).

outfile: If not None, file to output ncdump to. Default is to return a string.

class Dimension (...)

A netCDF Dimension is used to describe the coordinates of a Variable. See Dimension for more details.

The current maximum size of a Dimension instance can be obtained by calling the python len function on the Dimension instance. The Dimension.isunlimited() method of a Dimension instance can be used to determine if the dimension is unlimited.

Read-only class variables:

name: String name, used when creating a Variable with Dataset.createVariable().

size: Current Dimension size (same as len(d), where d is a Dimension instance).

__init__(self, group, name, size=None)

Dimension constructor.

group: Group instance to associate with dimension.

name: Name of the dimension.

size: Size of the dimension. None or 0 means unlimited. (Default None).

Note: Dimension instances should be created using the Dataset.createDimension() method of a Group or Dataset instance, not using Dimension directly.

Instance variables

var name

string name of Dimension instance

var size

current size of Dimension (calls len on Dimension instance)

Methods

def group(self)

group(self)

return the group that this Dimension is a member of.

def isunlimited(self)

isunlimited(self)

returns True if the Dimension instance is unlimited, False otherwise.

class EnumType (...)

A EnumType instance is used to describe an Enum data type, and can be passed to the the Dataset.createVariable() method of a Dataset or Group instance. See EnumType for more details.

The instance variables dtype, name and enum_dict should not be modified by the user.

__init__(group, datatype, datatype_name, enum_dict)

EnumType constructor.

group: Group instance to associate with the VLEN datatype.

datatype: An numpy integer dtype object describing the base type for the Enum.

datatype_name: a Python string containing a description of the Enum data type.

enum_dict: a Python dictionary containing the Enum field/value pairs.

Note: EnumType instances should be created using the Dataset.createEnumType() method of a Dataset or Group instance, not using this class directly.

Instance variables

var dtype
var enum_dict
var name
class Group (...)

Groups define a hierarchical namespace within a netCDF file. They are analogous to directories in a unix filesystem. Each Group behaves like a Dataset within a Dataset, and can contain it's own variables, dimensions and attributes (and other Groups). See Group for more details.

Group inherits from Dataset, so all the Dataset class methods and variables are available to a Group instance (except the close method).

Additional read-only class variables:

name: String describing the group name.

__init__(self, parent, name) Group constructor.

parent: Group instance for the parent group. If being created in the root group, use a Dataset instance.

name: - Name of the group.

Note: Group instances should be created using the Dataset.createGroup() method of a Dataset instance, or another Group instance, not using this class directly.

Ancestors

  • netCDF4._netCDF4.Dataset

Methods

def close(self)

close(self)

overrides Dataset close method which does not apply to Group instances, raises OSError.

class MFDataset (files, check=False, aggdim=None, exclude=[], master_file=None)

Class for reading multi-file netCDF Datasets, making variables spanning multiple files appear as if they were in one file. Datasets must be in NETCDF4_CLASSIC, NETCDF3_CLASSIC, NETCDF3_64BIT_OFFSET or NETCDF3_64BIT_DATA format (NETCDF4 Datasets won't work).

Adapted from pycdf by Andre Gosselin.

Example usage (See MFDataset for more details):

>>> import numpy as np
>>> # create a series of netCDF files with a variable sharing
>>> # the same unlimited dimension.
>>> for nf in range(10):
...     with Dataset("mftest%s.nc" % nf, "w", format='NETCDF4_CLASSIC') as f:
...         f.createDimension("x",None)
...         x = f.createVariable("x","i",("x",))
...         x[0:10] = np.arange(nf*10,10*(nf+1))
>>> # now read all those files in at once, in one Dataset.
>>> f = MFDataset("mftest*nc")
>>> print(f.variables["x"][:])
[ 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
 96 97 98 99]

__init__(self, files, check=False, aggdim=None, exclude=[], master_file=None)

Open a Dataset spanning multiple files, making it look as if it was a single file. Variables in the list of files that share the same dimension (specified with the keyword aggdim) are aggregated. If aggdim is not specified, the unlimited is aggregated. Currently, aggdim must be the leftmost (slowest varying) dimension of each of the variables to be aggregated.

files: either a sequence of netCDF files or a string with a wildcard (converted to a sorted list of files using glob) If the master_file kwarg is not specified, the first file in the list will become the "master" file, defining all the variables with an aggregation dimension which may span subsequent files. Attribute access returns attributes only from "master" file. The files are always opened in read-only mode.

check: True if you want to do consistency checking to ensure the correct variables structure for all of the netcdf files. Checking makes the initialization of the MFDataset instance much slower. Default is False.

aggdim: The name of the dimension to aggregate over (must be the leftmost dimension of each of the variables to be aggregated). If None (default), aggregate over the unlimited dimension.

exclude: A list of variable names to exclude from aggregation. Default is an empty list.

master_file: file to use as "master file", defining all the variables with an aggregation dimension and all global attributes.

Ancestors

  • netCDF4._netCDF4.Dataset

Methods

def close(self)

close(self)

close all the open files.

def isopen(self)

isopen(self)

True if all files are open, False otherwise.

def ncattrs(self)

ncattrs(self)

return the netcdf attribute names from the master file.

class MFTime (time, units=None, calendar=None)

Class providing an interface to a MFDataset time Variable by imposing a unique common time unit and/or calendar to all files.

Example usage (See MFTime for more details):

>>> import numpy as np
>>> f1 = Dataset("mftest_1.nc","w", format="NETCDF4_CLASSIC")
>>> f2 = Dataset("mftest_2.nc","w", format="NETCDF4_CLASSIC")
>>> f1.createDimension("time",None)
>>> f2.createDimension("time",None)
>>> t1 = f1.createVariable("time","i",("time",))
>>> t2 = f2.createVariable("time","i",("time",))
>>> t1.units = "days since 2000-01-01"
>>> t2.units = "days since 2000-02-01"
>>> t1.calendar = "standard"
>>> t2.calendar = "standard"
>>> t1[:] = np.arange(31)
>>> t2[:] = np.arange(30)
>>> f1.close()
>>> f2.close()
>>> # Read the two files in at once, in one Dataset.
>>> f = MFDataset("mftest_*nc")
>>> t = f.variables["time"]
>>> print(t.units)
days since 2000-01-01
>>> print(t[32])  # The value written in the file, inconsistent with the MF time units.
1
>>> T = MFTime(t)
>>> print(T[32])
32

__init__(self, time, units=None, calendar=None)

Create a time Variable with units consistent across a multifile dataset.

time: Time variable from a MFDataset.

units: Time units, for example, 'days since 1979-01-01'. If None, use the units from the master variable.

calendar: Calendar overload to use across all files, for example, 'standard' or 'gregorian'. If None, check that the calendar attribute is present on each variable and values are unique across files raising a ValueError otherwise.

Ancestors

  • netCDF4._netCDF4._Variable
class VLType (...)

A VLType instance is used to describe a variable length (VLEN) data type, and can be passed to the the Dataset.createVariable() method of a Dataset or Group instance. See VLType for more details.

The instance variables dtype and name should not be modified by the user.

__init__(group, datatype, datatype_name)

VLType constructor.

group: Group instance to associate with the VLEN datatype.

datatype: An numpy dtype object describing the component type for the variable length array.

datatype_name: a Python string containing a description of the VLEN data type.

Note: VLType instances should be created using the Dataset.createVLType() method of a Dataset or Group instance, not using this class directly.

Instance variables

var dtype
var name
class Variable (...)

A netCDF Variable is used to read and write netCDF data. They are analogous to numpy array objects. See Variable for more details.

A list of attribute names corresponding to netCDF attributes defined for the variable can be obtained with the Variable.ncattrs() method. These attributes can be created by assigning to an attribute of the Variable instance. A dictionary containing all the netCDF attribute name/value pairs is provided by the __dict__ attribute of a Variable instance.

The following class variables are read-only:

dimensions: A tuple containing the names of the dimensions associated with this variable.

dtype: A numpy dtype object describing the variable's data type.

ndim: The number of variable dimensions.

shape: A tuple with the current shape (length of all dimensions).

scale: If True, scale_factor and add_offset are applied, and signed integer data is automatically converted to unsigned integer data if the _Unsigned attribute is set to "true" or "True". Default is True, can be reset using Variable.set_auto_scale() and Variable.set_auto_maskandscale() methods.

mask: If True, data is automatically converted to/from masked arrays when missing values or fill values are present. Default is True, can be reset using Variable.set_auto_mask() and Variable.set_auto_maskandscale() methods. Only relevant for Variables with primitive or enum types (ignored for compound and vlen Variables).

chartostring(): If True, data is automatically converted to/from character arrays to string arrays when the _Encoding variable attribute is set. Default is True, can be reset using Variable.set_auto_chartostring() method.

least_significant_digit: Describes the power of ten of the smallest decimal place in the data the contains a reliable value. Data is truncated to this decimal place when it is assigned to the Variable instance. If None, the data is not truncated.

significant_digits: New in version 1.6.0. Describes the number of significant digits in the data the contains a reliable value. Data is truncated to retain this number of significant digits when it is assigned to the Variable instance. If None, the data is not truncated. Only available with netcdf-c >= 4.9.0, and only works with NETCDF4 or NETCDF4_CLASSIC formatted files. The number of significant digits used in the quantization of variable data can be obtained using the Variable.significant_digits method. Default None - no quantization done.

quantize_mode: New in version 1.6.0. Controls the quantization algorithm (default 'BitGroom', 'BitRound' and 'GranularBitRound' also available). The 'GranularBitRound' algorithm may result in better compression for typical geophysical datasets. Ignored if significant_digits not specified. If 'BitRound' is used, then significant_digits is interpreted as binary (not decimal) digits.

__orthogonal_indexing__: Always True. Indicates to client code that the object supports 'orthogonal indexing', which means that slices that are 1d arrays or lists slice along each dimension independently. This behavior is similar to Fortran or Matlab, but different than numpy.

datatype: numpy data type (for primitive data types) or VLType/CompoundType instance (for compound or vlen data types).

name: String name.

size: The number of stored elements.

__init__(self, group, name, datatype, dimensions=(), compression=None, zlib=False, complevel=4, shuffle=True, szip_coding='nn', szip_pixels_per_block=8, blosc_shuffle=1, fletcher32=False, contiguous=False, chunksizes=None, endian='native', least_significant_digit=None,fill_value=None,chunk_cache=None)

Variable constructor.

group: Group or Dataset instance to associate with variable.

name: Name of the variable.

datatype: Variable data type. Can be specified by providing a numpy dtype object, or a string that describes a numpy dtype object. Supported values, corresponding to str attribute of numpy dtype objects, include 'f4' (32-bit floating point), 'f8' (64-bit floating point), 'i4' (32-bit signed integer), 'i2' (16-bit signed integer), 'i8' (64-bit signed integer), 'i4' (8-bit signed integer), 'i1' (8-bit signed integer), 'u1' (8-bit unsigned integer), 'u2' (16-bit unsigned integer), 'u4' (32-bit unsigned integer), 'u8' (64-bit unsigned integer), or 'S1' (single-character string). From compatibility with Scientific.IO.NetCDF, the old Numeric single character typecodes can also be used ('f' instead of 'f4', 'd' instead of 'f8', 'h' or 's' instead of 'i2', 'b' or 'B' instead of 'i1', 'c' instead of 'S1', and 'i' or 'l' instead of 'i4'). datatype can also be a CompoundType instance (for a structured, or compound array), a VLType instance (for a variable-length array), or the python str builtin (for a variable-length string array). Numpy string and unicode datatypes with length greater than one are aliases for str.

dimensions: a tuple containing the variable's Dimension instances (defined previously with createDimension). Default is an empty tuple which means the variable is a scalar (and therefore has no dimensions).

compression: compression algorithm to use. Currently zlib,szip,zstd,bzip2,blosc_lz,blosc_lz4,blosc_lz4hc, blosc_zlib and blosc_zstd are supported. Default is None (no compression). All of the compressors except zlib and szip use the HDF5 plugin architecture.

zlib: if True, data assigned to the Variable instance is compressed on disk. Default False. Deprecated - use compression='zlib' instead.

complevel: the level of compression to use (1 is the fastest, but poorest compression, 9 is the slowest but best compression). Default 4. Ignored if compression=None or szip. A value of 0 disables compression.

shuffle: if True, the HDF5 shuffle filter is applied to improve zlib compression. Default True. Ignored unless compression = 'zlib'.

blosc_shuffle: shuffle filter inside blosc compressor (only relevant if compression kwarg set to one of the blosc compressors). Can be 0 (no blosc shuffle), 1 (bytewise shuffle) or 2 (bitwise shuffle)). Default is 1. Ignored if blosc compressor not used.

szip_coding: szip coding method. Can be ec (entropy coding) or nn (nearest neighbor coding). Default is nn. Ignored if szip compressor not used.

szip_pixels_per_block: Can be 4,8,16 or 32 (Default 8). Ignored if szip compressor not used.

fletcher32: if True (default False), the Fletcher32 checksum algorithm is used for error detection.

contiguous: if True (default False), the variable data is stored contiguously on disk. Default False. Setting to True for a variable with an unlimited dimension will trigger an error. Fixed size variables (with no unlimited dimension) with no compression filters are contiguous by default.

chunksizes: Can be used to specify the HDF5 chunksizes for each dimension of the variable. A detailed discussion of HDF chunking and I/O performance is available here. The default chunking scheme in the netcdf-c library is discussed here. Basically, you want the chunk size for each dimension to match as closely as possible the size of the data block that users will read from the file. chunksizes cannot be set if contiguous=True.

endian: Can be used to control whether the data is stored in little or big endian format on disk. Possible values are little, big or native (default). The library will automatically handle endian conversions when the data is read, but if the data is always going to be read on a computer with the opposite format as the one used to create the file, there may be some performance advantage to be gained by setting the endian-ness. For netCDF 3 files (that don't use HDF5), only endian='native' is allowed.

The compression, zlib, complevel, shuffle, fletcher32, contiguous and chunksizes keywords are silently ignored for netCDF 3 files that do not use HDF5.

least_significant_digit: If this or significant_digits are specified, variable data will be truncated (quantized). In conjunction with compression='zlib' this produces 'lossy', but significantly more efficient compression. For example, if least_significant_digit=1, data will be quantized using around(scaledata)/scale, where scale = 2*bits, and bits is determined so that a precision of 0.1 is retained (in this case bits=4). Default is None, or no quantization.

significant_digits: New in version 1.6.0. As described for least_significant_digit except the number of significant digits retained is prescribed independent of the floating point exponent. Default None - no quantization done.

quantize_mode: New in version 1.6.0. Controls the quantization algorithm (default 'BitGroom', 'BitRound' and 'GranularBitRound' also available). The 'GranularBitRound' algorithm may result in better compression for typical geophysical datasets. Ignored if significant_digts not specified. If 'BitRound' is used, then significant_digits is interpreted as binary (not decimal) digits.

fill_value: If specified, the default netCDF fill value (the value that the variable gets filled with before any data is written to it) is replaced with this value, and the _FillValue attribute is set. If fill_value is set to False, then the variable is not pre-filled. The default netCDF fill values can be found in the dictionary netCDF4.default_fillvals. If not set, the default fill value will be used but no _FillValue attribute will be created (this is the default behavior of the netcdf-c library). If you want to use the default fill value, but have the _FillValue attribute set, use fill_value='default' (note - this only works for primitive data types). Variable.get_fill_value() can be used to retrieve the fill value, even if the _FillValue attribute is not set.

chunk_cache: If specified, sets the chunk cache size for this variable. Persists as long as Dataset is open. Use set_var_chunk_cache to change it when Dataset is re-opened.

Note: Variable instances should be created using the Dataset.createVariable() method of a Dataset or Group instance, not using this class directly.

Instance variables

var always_mask
var auto_complex
var chartostring
var datatype

numpy data type (for primitive data types) or VLType/CompoundType/EnumType instance (for compound, vlen or enum data types)

var dimensions

get variables's dimension names

var dtype
var mask
var name

string name of Variable instance

var ndim
var scale
var shape

find current sizes of all variable dimensions

var size

Return the number of stored elements.

Methods

def assignValue(self, val)

assignValue(self, val)

assign a value to a scalar variable. Provided for compatibility with Scientific.IO.NetCDF, can also be done by assigning to an Ellipsis slice ([…]).

def chunking(self)

chunking(self)

return variable chunking information. If the dataset is defined to be contiguous (and hence there is no chunking) the word 'contiguous' is returned. Otherwise, a sequence with the chunksize for each dimension is returned.

def delncattr(self, name)

delncattr(self,name,value)

delete a netCDF variable attribute. Use if you need to delete a netCDF attribute with the same name as one of the reserved python attributes.

def endian(self)

endian(self)

return endian-ness (little,big,native) of variable (as stored in HDF5 file).

def filters(self)

filters(self)

return dictionary containing HDF5 filter parameters.

def getValue(self)

getValue(self)

get the value of a scalar variable. Provided for compatibility with Scientific.IO.NetCDF, can also be done by slicing with an Ellipsis ([…]).

def get_dims(self)

get_dims(self)

return a tuple of Dimension instances associated with this Variable.

def get_fill_value(self)

get_fill_value(self)

return the fill value associated with this Variable (returns None if data is not pre-filled). Works even if default fill value was used, and _FillValue attribute does not exist.

def get_var_chunk_cache(self)

get_var_chunk_cache(self)

return variable chunk cache information in a tuple (size,nelems,preemption). See netcdf C library documentation for nc_get_var_chunk_cache for details.

def getncattr(self, name, encoding='utf-8')

getncattr(self,name)

retrieve a netCDF variable attribute. Use if you need to set a netCDF attribute with the same name as one of the reserved python attributes.

option kwarg encoding can be used to specify the character encoding of a string attribute (default is utf-8).

def group(self)

group(self)

return the group that this Variable is a member of.

def ncattrs(self)

ncattrs(self)

return netCDF attribute names for this Variable in a list.

def quantization(self)

quantization(self)

return number of significant digits and the algorithm used in quantization. Returns None if quantization not active.

def renameAttribute(self, oldname, newname)

renameAttribute(self, oldname, newname)

rename a Variable attribute named oldname to newname.

def set_always_mask(self, always_mask)

set_always_mask(self,always_mask)

turn on or off conversion of data without missing values to regular numpy arrays.

always_mask is a Boolean determining if automatic conversion of masked arrays with no missing values to regular numpy arrays shall be applied. Default is True. Set to False to restore the default behaviour in versions prior to 1.4.1 (numpy array returned unless missing values are present, otherwise masked array returned).

def set_auto_chartostring(self, chartostring)

set_auto_chartostring(self,chartostring())

turn on or off automatic conversion of character variable data to and from numpy fixed length string arrays when the _Encoding variable attribute is set.

If chartostring() is set to True, when data is read from a character variable (dtype = S1) that has an _Encoding attribute, it is converted to a numpy fixed length unicode string array (dtype = UN, where N is the length of the the rightmost dimension of the variable). The value of _Encoding is the unicode encoding that is used to decode the bytes into strings.

When numpy string data is written to a variable it is converted back to individual bytes, with the number of bytes in each string equalling the rightmost dimension of the variable.

The default value of chartostring() is True (automatic conversions are performed).

def set_auto_mask(self, mask)

set_auto_mask(self,mask)

turn on or off automatic conversion of variable data to and from masked arrays .

If mask is set to True, when data is read from a variable it is converted to a masked array if any of the values are exactly equal to the either the netCDF _FillValue or the value specified by the missing_value variable attribute. The fill_value of the masked array is set to the missing_value attribute (if it exists), otherwise the netCDF _FillValue attribute (which has a default value for each data type). If the variable has no missing_value attribute, the _FillValue is used instead. If the variable has valid_min/valid_max and missing_value attributes, data outside the specified range will be masked. When data is written to a variable, the masked array is converted back to a regular numpy array by replacing all the masked values by the missing_value attribute of the variable (if it exists). If the variable has no missing_value attribute, the _FillValue is used instead.

The default value of mask is True (automatic conversions are performed).

def set_auto_maskandscale(self, maskandscale)

set_auto_maskandscale(self,maskandscale)

turn on or off automatic conversion of variable data to and from masked arrays, automatic packing/unpacking of variable data using scale_factor and add_offset attributes and automatic conversion of signed integer data to unsigned integer data if the _Unsigned attribute exists and is set to "true" (or "True").

If maskandscale is set to True, when data is read from a variable it is converted to a masked array if any of the values are exactly equal to the either the netCDF _FillValue or the value specified by the missing_value variable attribute. The fill_value of the masked array is set to the missing_value attribute (if it exists), otherwise the netCDF _FillValue attribute (which has a default value for each data type). If the variable has no missing_value attribute, the _FillValue is used instead. If the variable has valid_min/valid_max and missing_value attributes, data outside the specified range will be masked. When data is written to a variable, the masked array is converted back to a regular numpy array by replacing all the masked values by the missing_value attribute of the variable (if it exists). If the variable has no missing_value attribute, the _FillValue is used instead.

If maskandscale is set to True, and the variable has a scale_factor or an add_offset attribute, then data read from that variable is unpacked using::

data = self.scale_factor*data + self.add_offset

When data is written to a variable it is packed using::

data = (data - self.add_offset)/self.scale_factor

If either scale_factor is present, but add_offset is missing, add_offset is assumed zero. If add_offset is present, but scale_factor is missing, scale_factor is assumed to be one. For more information on how scale_factor and add_offset can be used to provide simple compression, see the PSL metadata conventions.

In addition, if maskandscale is set to True, and if the variable has an attribute _Unsigned set to "true", and the variable has a signed integer data type, a view to the data is returned with the corresponding unsigned integer data type. This convention is used by the netcdf-java library to save unsigned integer data in NETCDF3 or NETCDF4_CLASSIC files (since the NETCDF3 data model does not have unsigned integer data types).

The default value of maskandscale is True (automatic conversions are performed).

def set_auto_scale(self, scale)

set_auto_scale(self,scale)

turn on or off automatic packing/unpacking of variable data using scale_factor and add_offset attributes. Also turns on and off automatic conversion of signed integer data to unsigned integer data if the variable has an _Unsigned attribute set to "true" or "True".

If scale is set to True, and the variable has a scale_factor or an add_offset attribute, then data read from that variable is unpacked using::

data = self.scale_factor*data + self.add_offset

When data is written to a variable it is packed using::

data = (data - self.add_offset)/self.scale_factor

If either scale_factor is present, but add_offset is missing, add_offset is assumed zero. If add_offset is present, but scale_factor is missing, scale_factor is assumed to be one. For more information on how scale_factor and add_offset can be used to provide simple compression, see the PSL metadata conventions.

In addition, if scale is set to True, and if the variable has an attribute _Unsigned set to "true", and the variable has a signed integer data type, a view to the data is returned with the corresponding unsigned integer datatype. This convention is used by the netcdf-java library to save unsigned integer data in NETCDF3 or NETCDF4_CLASSIC files (since the NETCDF3 data model does not have unsigned integer data types).

The default value of scale is True (automatic conversions are performed).

def set_collective(self, value)

set_collective(self,True_or_False)

turn on or off collective parallel IO access. Ignored if file is not open for parallel access.

def set_ncstring_attrs(self, ncstring_attrs)

set_always_mask(self,ncstring_attrs)

turn on or off creating NC_STRING string attributes.

If ncstring_attrs is set to True then text attributes will be variable-length NC_STRINGs.

The default value of ncstring_attrs is False (writing ascii text attributes as NC_CHAR).

def set_var_chunk_cache(self, size=None, nelems=None, preemption=None)

set_var_chunk_cache(self,size=None,nelems=None,preemption=None)

change variable chunk cache settings. See netcdf C library documentation for nc_set_var_chunk_cache for details.

def setncattr(self, name, value)

setncattr(self,name,value)

set a netCDF variable attribute using name,value pair. Use if you need to set a netCDF attribute with the same name as one of the reserved python attributes.

def setncattr_string(self, name, value)

setncattr_string(self,name,value)

set a netCDF variable string attribute using name,value pair. Use if you need to ensure that a netCDF attribute is created with type NC_STRING if the file format is NETCDF4. Use if you need to set an attribute to an array of variable-length strings.

def setncatts(self, attdict)

setncatts(self,attdict)

set a bunch of netCDF variable attributes at once using a python dictionary. This may be faster when setting a lot of attributes for a NETCDF3 formatted file, since nc_redef/nc_enddef is not called in between setting each attribute

def use_nc_get_vars(self, use_nc_get_vars)

use_nc_get_vars(self,_use_get_vars)

enable the use of netcdf library routine nc_get_vars to retrieve strided variable slices. By default, nc_get_vars may not used by default (depending on the version of the netcdf-c library being used) since it may be slower than multiple calls to the unstrided read routine nc_get_vara.

netcdf4-python-1.7.4rel/examples/000077500000000000000000000000001512661643000167165ustar00rootroot00000000000000netcdf4-python-1.7.4rel/examples/README.md000066400000000000000000000016551512661643000202040ustar00rootroot00000000000000* `tutorial.py`: code from introduction section of documentation. * `json_att.py`: shows to to use json to serialize python objects, save them as netcdf attributes, and then convert them back to python objects. * `subset.py`: shows how to use 'orthogonal indexing' to select geographic regions. * `reading_netcdf.ipynb`: ipython notebook from Unidata python workshop. * `writing_netcdf.ipynb`: ipython notebook from Unidata python workshop. * `threaded_read.py`: test script for concurrent threaded reads. * `bench.py`: benchmarks for reading/writing using different formats. * `bench_compress*.py``: benchmarks for reading/writing with compression. * `bench_diskless.py`: benchmarks for 'diskless' IO. * `test_stringarr.py`: test utilities for converting arrays of fixed-length strings to arrays of characters (with an extra dimension), and vice-versa. Useful since netcdf does not have a datatype for fixed-length string arrays. netcdf4-python-1.7.4rel/examples/bench.py000066400000000000000000000033151512661643000203510ustar00rootroot00000000000000# benchmark reads and writes, with and without compression. # tests all four supported file formats. from typing import TYPE_CHECKING, Any from numpy.random.mtrand import uniform import netCDF4 from timeit import Timer import os, sys if TYPE_CHECKING: from netCDF4 import Format as NCFormat else: NCFormat = Any # create an n1dim by n2dim by n3dim random array. n1dim = 30 n2dim = 15 n3dim = 73 n4dim = 144 ntrials = 10 sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim)) array = uniform(size=(n1dim,n2dim,n3dim,n4dim)) def write_netcdf(filename,zlib=False,least_significant_digit=None,format: NCFormat='NETCDF4'): file = netCDF4.Dataset(filename,'w',format=format) file.createDimension('n1', n1dim) file.createDimension('n2', n2dim) file.createDimension('n3', n3dim) file.createDimension('n4', n4dim) foo = file.createVariable('data', 'f8',('n1','n2','n3','n4'),zlib=zlib,least_significant_digit=least_significant_digit) foo[:] = array file.close() def read_netcdf(filename): file = netCDF4.Dataset(filename) data = file.variables['data'][:] file.close() for format in ['NETCDF3_CLASSIC','NETCDF3_64BIT','NETCDF4_CLASSIC','NETCDF4']: sys.stdout.write('testing file format %s ...\n' % format) # writing, no compression. t = Timer("write_netcdf('test1.nc',format='%s')" % format,"from __main__ import write_netcdf") sys.stdout.write('writing took %s seconds\n' %\ repr(sum(t.repeat(ntrials,1))/ntrials)) # test reading. t = Timer("read_netcdf('test1.nc')","from __main__ import read_netcdf") sys.stdout.write('reading took %s seconds\n' % repr(sum(t.repeat(ntrials,1))/ntrials)) netcdf4-python-1.7.4rel/examples/bench_compress.py000066400000000000000000000040221512661643000222600ustar00rootroot00000000000000# benchmark reads and writes, with and without compression. # tests all four supported file formats. from typing import TYPE_CHECKING, Any from numpy.random.mtrand import uniform import netCDF4 import netCDF4.utils from timeit import Timer import os, sys if TYPE_CHECKING: from netCDF4 import CompressionLevel else: CompressionLevel = Any # create an n1dim by n2dim by n3dim random array. n1dim = 30 n2dim = 15 n3dim = 73 n4dim = 144 ntrials = 10 sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim)) sys.stdout.write('(average of %s trials)\n' % ntrials) array = netCDF4.utils._quantize(uniform(size=(n1dim,n2dim,n3dim,n4dim)),4) def write_netcdf(filename,zlib=False,shuffle=False,complevel: CompressionLevel = 6): file = netCDF4.Dataset(filename,'w',format='NETCDF4') file.createDimension('n1', n1dim) file.createDimension('n2', n2dim) file.createDimension('n3', n3dim) file.createDimension('n4', n4dim) foo = file.createVariable('data',\ 'f8',('n1','n2','n3','n4'),zlib=zlib,shuffle=shuffle,complevel=complevel) foo[:] = array file.close() def read_netcdf(filename): file = netCDF4.Dataset(filename) data = file.variables['data'][:] file.close() for compress_kwargs in ["zlib=False,shuffle=False","zlib=True,shuffle=False", "zlib=True,shuffle=True","zlib=True,shuffle=True,complevel=2"]: sys.stdout.write('testing compression %s...\n' % repr(compress_kwargs)) # writing. t = Timer("write_netcdf('test.nc',%s)" % compress_kwargs,"from __main__ import write_netcdf") sys.stdout.write('writing took %s seconds\n' %\ repr(sum(t.repeat(ntrials,1))/ntrials)) # test reading. t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") sys.stdout.write('reading took %s seconds\n' % repr(sum(t.repeat(ntrials,1))/ntrials)) # print out size of resulting files. sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) netcdf4-python-1.7.4rel/examples/bench_compress2.py000066400000000000000000000050541512661643000223500ustar00rootroot00000000000000# benchmark reads and writes, with and without compression. # tests all four supported file formats. from numpy.random.mtrand import uniform import netCDF4 from timeit import Timer import os, sys # create an n1dim by n2dim by n3dim random array. n1dim = 30 n2dim = 15 n3dim = 73 n4dim = 144 ntrials = 10 sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim)) sys.stdout.write('(average of %s trials)\n\n' % ntrials) array = uniform(size=(n1dim,n2dim,n3dim,n4dim)) def write_netcdf(filename,complevel,lsd): file = netCDF4.Dataset(filename,'w',format='NETCDF4') file.createDimension('n1', n1dim) file.createDimension('n2', n2dim) file.createDimension('n3', n3dim) file.createDimension('n4', n4dim) foo = file.createVariable('data',\ 'f8',('n1','n2','n3','n4'),\ zlib=True,shuffle=True,complevel=complevel,\ least_significant_digit=lsd) foo[:] = array file.close() def read_netcdf(filename): file = netCDF4.Dataset(filename) data = file.variables['data'][:] file.close() lsd = None sys.stdout.write('using least_significant_digit %s\n\n' % lsd) for complevel in range(0,10,2): sys.stdout.write('testing compression with complevel %s...\n' % complevel) # writing. t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf") sys.stdout.write('writing took %s seconds\n' %\ repr(sum(t.repeat(ntrials,1))/ntrials)) # test reading. t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") sys.stdout.write('reading took %s seconds\n' % repr(sum(t.repeat(ntrials,1))/ntrials)) # print out size of resulting files. sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) complevel = 4 sys.stdout.write('\nusing complevel %s\n\n' % complevel) for lsd in range(1,6): sys.stdout.write('testing compression with least_significant_digit %s...\n' % lsd) # writing. t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf") sys.stdout.write('writing took %s seconds\n' %\ repr(sum(t.repeat(ntrials,1))/ntrials)) # test reading. t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") sys.stdout.write('reading took %s seconds\n' % repr(sum(t.repeat(ntrials,1))/ntrials)) # print out size of resulting files. sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) netcdf4-python-1.7.4rel/examples/bench_compress3.py000066400000000000000000000053441512661643000223530ustar00rootroot00000000000000# benchmark reads and writes, with and without compression. # tests all four supported file formats. from numpy.random.mtrand import uniform import netCDF4 from timeit import Timer import os, sys # use real data. URL="http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/pressure/hgt.1990.nc" nc = netCDF4.Dataset(URL) # use real 500 hPa geopotential height data. n1dim = 100 n3dim = 73 n4dim = 144 ntrials = 10 sys.stdout.write('reading and writing a %s by %s by %s random array ..\n'%(n1dim,n3dim,n4dim)) sys.stdout.write('(average of %s trials)\n\n' % ntrials) print(nc) print(nc.variables['hgt']) array = nc.variables['hgt'][0:n1dim,5,:,:] print(array.min(), array.max(), array.shape, array.dtype) def write_netcdf(filename,complevel,lsd): file = netCDF4.Dataset(filename,'w',format='NETCDF4') file.createDimension('n1', None) file.createDimension('n3', n3dim) file.createDimension('n4', n4dim) foo = file.createVariable('data',\ 'f4',('n1','n3','n4'),\ zlib=True,shuffle=True,complevel=complevel,\ least_significant_digit=lsd) foo[:] = array file.close() def read_netcdf(filename): file = netCDF4.Dataset(filename) data = file.variables['data'][:] file.close() lsd = None sys.stdout.write('using least_significant_digit %s\n\n' % lsd) for complevel in range(0,10,2): sys.stdout.write('testing compression with complevel %s...\n' % complevel) # writing. t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf") sys.stdout.write('writing took %s seconds\n' %\ repr(sum(t.repeat(ntrials,1))/ntrials)) # test reading. t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") sys.stdout.write('reading took %s seconds\n' % repr(sum(t.repeat(ntrials,1))/ntrials)) # print out size of resulting files. sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) complevel = 4 complevel = 4 sys.stdout.write('\nusing complevel %s\n\n' % complevel) for lsd in range(0,6): sys.stdout.write('testing compression with least_significant_digit %s..\n'\ % lsd) # writing. t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf") sys.stdout.write('writing took %s seconds\n' %\ repr(sum(t.repeat(ntrials,1))/ntrials)) # test reading. t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf") sys.stdout.write('reading took %s seconds\n' % repr(sum(t.repeat(ntrials,1))/ntrials)) # print out size of resulting files. sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) netcdf4-python-1.7.4rel/examples/bench_compress4.py000066400000000000000000000040661512661643000223540ustar00rootroot00000000000000# benchmark reads and writes, with and without compression. # tests all four supported file formats. from typing import Literal from numpy.random.mtrand import uniform import netCDF4 from timeit import Timer import os, sys # use real data. URL="http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/pressure/hgt.1990.nc" nc = netCDF4.Dataset(URL) # use real 500 hPa geopotential height data. n1dim = 100 n3dim = 73 n4dim = 144 ntrials = 10 sys.stdout.write('reading and writing a %s by %s by %s random array ..\n'%(n1dim,n3dim,n4dim)) sys.stdout.write('(average of %s trials)\n\n' % ntrials) array = nc.variables['hgt'][0:n1dim,5,:,:] def write_netcdf( filename, nsd, quantize_mode: Literal["BitGroom", "BitRound", "GranularBitRound"] = "BitGroom" ): file = netCDF4.Dataset(filename,'w',format='NETCDF4') file.createDimension('n1', None) file.createDimension('n3', n3dim) file.createDimension('n4', n4dim) foo = file.createVariable('data',\ 'f4',('n1','n3','n4'),\ zlib=True,shuffle=True,\ quantize_mode=quantize_mode,\ significant_digits=nsd) foo[:] = array file.close() def read_netcdf(filename): file = netCDF4.Dataset(filename) data = file.variables['data'][:] file.close() for sigdigits in range(1,5,1): sys.stdout.write('testing compression with significant_digits=%s...\n' %\ sigdigits) write_netcdf('test.nc',sigdigits) read_netcdf('test.nc') # print out size of resulting files with standard quantization. sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) sys.stdout.write("testing compression with significant_digits=%s and 'GranularBitRound'...\n" %\ sigdigits) write_netcdf('test.nc',sigdigits,quantize_mode='GranularBitRound') read_netcdf('test.nc') # print out size of resulting files with alternate quantization. sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size)) netcdf4-python-1.7.4rel/examples/bench_diskless.py000066400000000000000000000056541512661643000222620ustar00rootroot00000000000000# benchmark reads and writes, with and without compression. # tests all four supported file formats. from typing import TYPE_CHECKING, Any, Literal from numpy.random.mtrand import uniform import netCDF4 from timeit import Timer import os, sys if TYPE_CHECKING: from netCDF4 import Format as NCFormat else: NCFormat = Any # create an n1dim by n2dim by n3dim random array. n1dim = 30 n2dim = 15 n3dim = 73 n4dim = 144 ntrials = 10 sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim)) array = uniform(size=(n1dim,n2dim,n3dim,n4dim)) def write_netcdf(filename, zlib=False, least_significant_digit=None, format: NCFormat='NETCDF4',closeit=False): file = netCDF4.Dataset(filename,'w',format=format,diskless=True,persist=True) file.createDimension('n1', n1dim) file.createDimension('n2', n2dim) file.createDimension('n3', n3dim) file.createDimension('n4', n4dim) foo = file.createVariable('data',\ 'f8',('n1','n2','n3','n4'),zlib=zlib,least_significant_digit=None) foo.testme="hi I am an attribute" foo.testme1="hi I am an attribute" foo.testme2="hi I am an attribute" foo.testme3="hi I am an attribute" foo.testme4="hi I am an attribute" foo.testme5="hi I am an attribute" foo[:] = array if closeit: file.close() return file def read_netcdf(ncfile): data = ncfile.variables['data'][:] for format in ['NETCDF4','NETCDF3_CLASSIC','NETCDF3_64BIT']: sys.stdout.write('testing file format %s ...\n' % format) # writing, no compression. t = Timer("write_netcdf('test1.nc',closeit=True,format='%s')" % format,"from __main__ import write_netcdf") sys.stdout.write('writing took %s seconds\n' %\ repr(sum(t.repeat(ntrials,1))/ntrials)) # test reading. ncfile = write_netcdf('test1.nc',format=format) # type: ignore t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile") sys.stdout.write('reading took %s seconds\n' % repr(sum(t.repeat(ntrials,1))/ntrials)) # test diskless=True in nc_open format: Literal["NETCDF3_CLASSIC"] = 'NETCDF3_CLASSIC' # mypy should know this but it needs help... trials=50 sys.stdout.write('test caching of file in memory on open for %s\n' % format) sys.stdout.write('testing file format %s ...\n' % format) write_netcdf('test1.nc',format=format,closeit=True) ncfile = netCDF4.Dataset('test1.nc',diskless=False) t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile") sys.stdout.write('reading (from disk) took %s seconds\n' % repr(sum(t.repeat(ntrials,1))/ntrials)) ncfile.close() ncfile = netCDF4.Dataset('test1.nc',diskless=True) # setting diskless=True should cache the file in memory, # resulting in faster reads. t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile") sys.stdout.write('reading (cached in memory) took %s seconds\n' % repr(sum(t.repeat(ntrials,1))/ntrials)) ncfile.close() netcdf4-python-1.7.4rel/examples/complex_numbers.py000066400000000000000000000033071512661643000224750ustar00rootroot00000000000000import netCDF4 import numpy as np complex_array = np.array([0 + 0j, 1 + 0j, 0 + 1j, 1 + 1j, 0.25 + 0.75j], dtype="c16") np_dt = np.dtype([("r", np.float64), ("i", np.float64)]) complex_struct_array = np.array( [(r, i) for r, i in zip(complex_array.real, complex_array.imag)], dtype=np_dt, ) print("\n**********") print("Reading a file that uses a dimension for complex numbers") filename = "complex_numbers_as_dimension.nc" with netCDF4.Dataset(filename, "w") as f: f.createDimension("x", size=len(complex_array)) f.createDimension("complex", size=2) c_ri = f.createVariable("data_dim", np.float64, ("x", "complex")) as_dim_array = np.vstack((complex_array.real, complex_array.imag)).T c_ri[:] = as_dim_array with netCDF4.Dataset(filename, "r", auto_complex=True) as f: print(f["data_dim"]) print("\n**********") print("Reading a file that uses a compound datatype for complex numbers") filename = "complex_numbers_as_datatype.nc" with netCDF4.Dataset(filename, "w") as f: f.createDimension("x", size=len(complex_array)) nc_dt = f.createCompoundType(np_dt, "nc_complex") breakpoint() c_struct = f.createVariable("data_struct", nc_dt, ("x",)) c_struct[:] = complex_struct_array with netCDF4.Dataset(filename, "r", auto_complex=True) as f: print(f["data_struct"]) print("\n**********") print("Writing complex numbers to a file") filename = "writing_complex_numbers.nc" with netCDF4.Dataset(filename, "w", auto_complex=True) as f: f.createDimension("x", size=len(complex_array)) c_var = f.createVariable("data", np.complex128, ("x",)) c_var[:] = complex_array print(c_var) with netCDF4.Dataset(filename, "r", auto_complex=True) as f: print(f["data"]) netcdf4-python-1.7.4rel/examples/json_att.py000066400000000000000000000012601512661643000211100ustar00rootroot00000000000000from netCDF4 import Dataset import json # example showing how python objects (lists, dicts, None, True) # can be serialized as strings, saved as netCDF attributes, # and then converted back to python objects using json. ds = Dataset('json.nc', 'w') ds.pythonatt1 = json.dumps(['foo', {'bar': ['baz', None, 1.0, 2]}]) ds.pythonatt2 = "true" # converted to bool ds.pythonatt3 = "null" # converted to None print(ds) ds.close() ds = Dataset('json.nc') def convert_json(s): try: a = json.loads(s) return a except: return s x = convert_json(ds.pythonatt1) print(type(x)) print(x) print(convert_json(ds.pythonatt2)) print(convert_json(ds.pythonatt3)) ds.close() netcdf4-python-1.7.4rel/examples/mpi_example.py000066400000000000000000000027711512661643000215770ustar00rootroot00000000000000# to run: mpirun -np 4 python mpi_example.py import sys from mpi4py import MPI import numpy as np from netCDF4 import Dataset nc_format = 'NETCDF4_CLASSIC' if len(sys.argv) < 2 else sys.argv[1] rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run) if rank == 0: print('Creating file with format {}'.format(nc_format)) nc = Dataset( "parallel_test.nc", "w", parallel=True, comm=MPI.COMM_WORLD, info=MPI.Info(), format=nc_format, # type: ignore # we'll assume it's OK ) # below should work also - MPI_COMM_WORLD and MPI_INFO_NULL will be used. #nc = Dataset('parallel_test.nc', 'w', parallel=True) d = nc.createDimension('dim',4) v = nc.createVariable('var', np.int32, 'dim') v[rank] = rank # switch to collective mode, rewrite the data. v.set_collective(True) v[rank] = rank nc.close() # reopen the file read-only, check the data nc = Dataset('parallel_test.nc', parallel=True, comm=MPI.COMM_WORLD, info=MPI.Info()) assert rank==nc['var'][rank] nc.close() # reopen the file in append mode, modify the data on the last rank. nc = Dataset('parallel_test.nc', 'a',parallel=True, comm=MPI.COMM_WORLD, info=MPI.Info()) if rank == 3: v[rank] = 2*rank nc.close() # reopen the file read-only again, check the data. # leave out the comm and info kwargs to check that the defaults # (MPI_COMM_WORLD and MPI_INFO_NULL) work. nc = Dataset('parallel_test.nc', parallel=True) if rank == 3: assert 2*rank==nc['var'][rank] else: assert rank==nc['var'][rank] nc.close() netcdf4-python-1.7.4rel/examples/mpi_example_compressed.py000066400000000000000000000012621512661643000240150ustar00rootroot00000000000000# to run: mpirun -np 4 python mpi_example_compressed.py import sys from mpi4py import MPI import numpy as np from netCDF4 import Dataset rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run) nc = Dataset('parallel_test_compressed.nc', 'w', parallel=True) d = nc.createDimension('dim',4) v = nc.createVariable('var', np.int32, 'dim', zlib=True) v[:] = np.arange(4) nc.close() # read compressed files in parallel, check the data, try to rewrite some data nc = Dataset('parallel_test_compressed.nc', 'a', parallel=True) v = nc['var'] assert rank==v[rank] v.set_collective(True) # issue #1108 (var must be in collective mode or write will fail) v[rank]=2*rank nc.close() netcdf4-python-1.7.4rel/examples/reading_netCDF.ipynb000066400000000000000000002061561512661643000225670ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": { "internals": { "slide_helper": "subslide_end", "slide_type": "subslide" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "slide" } }, "source": [ "# Reading netCDF data\n", "- requires [numpy](http://numpy.scipy.org) and netCDF/HDF5 C libraries.\n", "- Github site: https://github.com/Unidata/netcdf4-python\n", "- Online docs: http://unidata.github.io/netcdf4-python/\n", "- Based on Konrad Hinsen's old [Scientific.IO.NetCDF](http://dirac.cnrs-orleans.fr/plone/software/scientificpython/) API, with lots of added netcdf version 4 features.\n", "- Developed by Jeff Whitaker at NOAA, with many contributions from users." ] }, { "cell_type": "markdown", "metadata": { "internals": { "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Interactively exploring a netCDF File\n", "\n", "Let's explore a netCDF file from the *Atlantic Real-Time Ocean Forecast System*\n", "\n", "first, import netcdf4-python and numpy" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "internals": { "frag_number": 2, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [], "source": [ "import netCDF4\n", "import numpy as np" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 2, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Create a netCDF4.Dataset object\n", "- **`f`** is a `Dataset` object, representing an open netCDF file.\n", "- printing the object gives you summary information, similar to *`ncdump -h`*." ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 4, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "root group (NETCDF4_CLASSIC data model, file format HDF5):\n", " Conventions: CF-1.0\n", " title: HYCOM ATLb2.00\n", " institution: National Centers for Environmental Prediction\n", " source: HYCOM archive file\n", " experiment: 90.9\n", " history: archv2ncdf3z\n", " dimensions(sizes): MT(1), Y(850), X(712), Depth(10)\n", " variables(dimensions): float64 MT(MT), float64 Date(MT), float32 Depth(Depth), int32 Y(Y), int32 X(X), float32 Latitude(Y, X), float32 Longitude(Y, X), float32 u(MT, Depth, Y, X), float32 v(MT, Depth, Y, X), float32 temperature(MT, Depth, Y, X), float32 salinity(MT, Depth, Y, X)\n", " groups: \n" ] } ], "source": [ "f = netCDF4.Dataset('data/rtofs_glo_3dz_f006_6hrly_reg3.nc')\n", "print(f) " ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 4, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Access a netCDF variable\n", "- variable objects stored by name in **`variables`** dict.\n", "- print the variable yields summary info (including all the attributes).\n", "- no actual data read yet (just have a reference to the variable object with metadata)." ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 6, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "dict_keys(['MT', 'Date', 'Depth', 'Y', 'X', 'Latitude', 'Longitude', 'u', 'v', 'temperature', 'salinity'])\n", "\n", "float32 temperature(MT, Depth, Y, X)\n", " coordinates: Longitude Latitude Date\n", " standard_name: sea_water_potential_temperature\n", " units: degC\n", " _FillValue: 1.2676506e+30\n", " valid_range: [-5.078603 11.1498995]\n", " long_name: temp [90.9H]\n", "unlimited dimensions: MT\n", "current shape = (1, 10, 850, 712)\n", "filling on\n" ] } ], "source": [ "print(f.variables.keys()) # get all variable names\n", "temp = f.variables['temperature'] # temperature variable\n", "print(temp) " ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 6, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## List the Dimensions\n", "\n", "- All variables in a netCDF file have an associated shape, specified by a list of dimensions.\n", "- Let's list all the dimensions in this netCDF file.\n", "- Note that the **`MT`** dimension is special (*`unlimited`*), which means it can be appended to." ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 8 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "('MT', (unlimited): name = 'MT', size = 1)\n", "('Y', : name = 'Y', size = 850)\n", "('X', : name = 'X', size = 712)\n", "('Depth', : name = 'Depth', size = 10)\n" ] } ], "source": [ "for d in f.dimensions.items():\n", " print(d)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 9 }, "slideshow": { "slide_type": "fragment" } }, "source": [ "Each variable has a **`dimensions`** and a **`shape`** attribute." ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 10 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "data": { "text/plain": [ "('MT', 'Depth', 'Y', 'X')" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "temp.dimensions" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 11, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "data": { "text/plain": [ "(1, 10, 850, 712)" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "temp.shape" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 11, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "### Each dimension typically has a variable associated with it (called a *coordinate* variable).\n", "- *Coordinate variables* are 1D variables that have the same name as dimensions.\n", "- Coordinate variables and *auxiliary coordinate variables* (named by the *coordinates* attribute) locate values in time and space." ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 13, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "float64 MT(MT)\n", " long_name: time\n", " units: days since 1900-12-31 00:00:00\n", " calendar: standard\n", " axis: T\n", "unlimited dimensions: MT\n", "current shape = (1,)\n", "filling on, default _FillValue of 9.969209968386869e+36 used\n", "\n", "int32 X(X)\n", " point_spacing: even\n", " axis: X\n", "unlimited dimensions: \n", "current shape = (712,)\n", "filling on, default _FillValue of -2147483647 used\n" ] } ], "source": [ "mt = f.variables['MT']\n", "depth = f.variables['Depth']\n", "x,y = f.variables['X'], f.variables['Y']\n", "print(mt)\n", "print(x) " ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 13, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Accessing data from a netCDF variable object\n", "\n", "- netCDF variables objects behave much like numpy arrays.\n", "- slicing a netCDF variable object returns a numpy array with the data.\n", "- Boolean array and integer sequence indexing behaves differently for netCDF variables than for numpy arrays. Only 1-d boolean arrays and integer sequences are allowed, and these indices work independently along each dimension (similar to the way vector subscripts work in fortran)." ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 15 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[41023.25]\n" ] } ], "source": [ "time = mt[:] # Reads the netCDF variable MT, array of one element\n", "print(time) " ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 16 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[ 0. 100. 200. 400. 700. 1000. 2000. 3000. 4000. 5000.]\n" ] } ], "source": [ "dpth = depth[:] # examine depth array\n", "print(dpth) " ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 17, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape of temp variable: (1, 10, 850, 712)\n", "shape of temp slice: (6, 425, 356)\n" ] } ], "source": [ "xx,yy = x[:],y[:]\n", "print('shape of temp variable: %s' % repr(temp.shape))\n", "tempslice = temp[0, dpth > 400, yy > yy.max()/2, xx > xx.max()/2]\n", "print('shape of temp slice: %s' % repr(tempslice.shape))" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 17, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## What is the sea surface temperature and salinity at 50N, 140W?\n", "### Finding the latitude and longitude indices of 50N, 140W\n", "\n", "- The `X` and `Y` dimensions don't look like longitudes and latitudes\n", "- Use the auxiliary coordinate variables named in the `coordinates` variable attribute, `Latitude` and `Longitude`" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 19 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "float32 Latitude(Y, X)\n", " standard_name: latitude\n", " units: degrees_north\n", "unlimited dimensions: \n", "current shape = (850, 712)\n", "filling on, default _FillValue of 9.969209968386869e+36 used\n" ] } ], "source": [ "lat, lon = f.variables['Latitude'], f.variables['Longitude']\n", "print(lat)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 20, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "source": [ "Aha! So we need to find array indices `iy` and `ix` such that `Latitude[iy, ix]` is close to 50.0 and `Longitude[iy, ix]` is close to -140.0 ..." ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 20, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "outputs": [], "source": [ "# extract lat/lon values (in degrees) to numpy arrays\n", "latvals = lat[:]; lonvals = lon[:] \n", "# a function to find the index of the point closest pt\n", "# (in squared distance) to give lat/lon value.\n", "def getclosest_ij(lats,lons,latpt,lonpt):\n", " # find squared distance of every point on grid\n", " dist_sq = (lats-latpt)**2 + (lons-lonpt)**2 \n", " # 1D index of minimum dist_sq element\n", " minindex_flattened = dist_sq.argmin() \n", " # Get 2D index for latvals and lonvals arrays from 1D index\n", " return np.unravel_index(minindex_flattened, lats.shape)\n", "iy_min, ix_min = getclosest_ij(latvals, lonvals, 50., -140)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 22 }, "slideshow": { "slide_type": "fragment" } }, "source": [ "### Now we have all the information we need to find our answer.\n" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 23 }, "slideshow": { "slide_type": "fragment" } }, "source": [ "```\n", "|----------+--------|\n", "| Variable | Index |\n", "|----------+--------|\n", "| MT | 0 |\n", "| Depth | 0 |\n", "| Y | iy_min |\n", "| X | ix_min |\n", "|----------+--------|\n", "```" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 24 }, "slideshow": { "slide_type": "fragment" } }, "source": [ "### What is the sea surface temperature and salinity at the specified point?" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 25, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " 6.4631 degC\n", "32.6572 psu\n" ] } ], "source": [ "sal = f.variables['salinity']\n", "# Read values out of the netCDF file for temperature and salinity\n", "print('%7.4f %s' % (temp[0,0,iy_min,ix_min], temp.units))\n", "print('%7.4f %s' % (sal[0,0,iy_min,ix_min], sal.units))" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 25, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Remote data access via openDAP\n", "\n", "- Remote data can be accessed seamlessly with the netcdf4-python API\n", "- Access happens via the DAP protocol and DAP servers, such as TDS.\n", "- many formats supported, like GRIB, are supported \"under the hood\"." ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 27 }, "slideshow": { "slide_type": "fragment" } }, "source": [ "The following example showcases some nice netCDF features:\n", "\n", "1. We are seamlessly accessing **remote** data, from a TDS server.\n", "2. We are seamlessly accessing **GRIB2** data, as if it were netCDF data.\n", "3. We are generating **metadata** on-the-fly." ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 28, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "https://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/Global_0p5deg/GFS_Global_0p5deg_20230525_1200.grib2/GC\n" ] } ], "source": [ "import datetime\n", "date = datetime.datetime.now()\n", "# build URL for latest synoptic analysis time\n", "URL = 'https://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/Global_0p5deg/GFS_Global_0p5deg_%04i%02i%02i_%02i%02i.grib2/GC' %\\\n", "(date.year,date.month,date.day,6*(date.hour//6),0)\n", "# keep moving back 6 hours until a valid URL found\n", "validURL = False; ncount = 0\n", "while (not validURL and ncount < 10):\n", " print(URL)\n", " try:\n", " gfs = netCDF4.Dataset(URL)\n", " validURL = True\n", " except RuntimeError:\n", " date -= datetime.timedelta(hours=6)\n", " ncount += 1 " ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 28, "slide_helper": "subslide_end", "slide_type": "subslide" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "slide" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "float32 Temperature_surface(time1, lat, lon)\n", " long_name: Temperature @ Ground or water surface\n", " units: K\n", " abbreviation: TMP\n", " missing_value: nan\n", " grid_mapping: LatLon_Projection\n", " coordinates: reftime time1 lat lon \n", " Grib_Variable_Id: VAR_0-0-0_L1\n", " Grib2_Parameter: [0 0 0]\n", " Grib2_Parameter_Discipline: Meteorological products\n", " Grib2_Parameter_Category: Temperature\n", " Grib2_Parameter_Name: Temperature\n", " Grib2_Level_Type: 1\n", " Grib2_Level_Desc: Ground or water surface\n", " Grib2_Generating_Process_Type: Forecast\n", " Grib2_Statistical_Process_Type: UnknownStatType--1\n", "unlimited dimensions: \n", "current shape = (129, 361, 720)\n", "filling off\n", "\n", "float64 time1(time1)\n", " units: Hour since 2023-05-25T12:00:00Z\n", " standard_name: time\n", " long_name: GRIB forecast or observation time\n", " calendar: proleptic_gregorian\n", " _CoordinateAxisType: Time\n", "unlimited dimensions: \n", "current shape = (129,)\n", "filling off\n", "\n", "float32 lat(lat)\n", " units: degrees_north\n", " _CoordinateAxisType: Lat\n", "unlimited dimensions: \n", "current shape = (361,)\n", "filling off\n", "\n", "float32 lon(lon)\n", " units: degrees_east\n", " _CoordinateAxisType: Lon\n", "unlimited dimensions: \n", "current shape = (720,)\n", "filling off\n" ] } ], "source": [ "# Look at metadata for a specific variable\n", "# gfs.variables.keys() will show all available variables.\n", "sfctmp = gfs.variables['Temperature_surface']\n", "# get info about sfctmp\n", "print(sfctmp)\n", "# print coord vars associated with this variable\n", "for dname in sfctmp.dimensions: \n", " print(gfs.variables[dname])" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 28, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "##Missing values\n", "- when `data == var.missing_value` somewhere, a masked array is returned.\n", "- illustrate with soil moisture data (only defined over land)\n", "- white areas on plot are masked values over water." ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 31 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape=(361, 720), type=, missing_value=nan\n" ] }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAGdCAYAAAA44ojeAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABmwElEQVR4nO29f3RV1Z33/45AIkLIN0jJDwlMpJhWA5kOqECdGhWQtOIoXaOdzlCtTpe2CERgyaDrabGrA+KMRKgtz7SPBcE6+MwSHJ0CEsYGx4fHpxo1As7KWMXyQ9KsakjAoYnA+f4R9mXfk/Njn3P2+XHveb/Wugtyz7nnnLvv/vHen/35fHaBYRgGCCGEEEISxAVxPwAhhBBCiBkKFEIIIYQkDgoUQgghhCQOChRCCCGEJA4KFEIIIYQkDgoUQgghhCQOChRCCCGEJA4KFEIIIYQkjsFxP4Afzp49i48++gjFxcUoKCiI+3EIIYQQooBhGDhx4gQqKytxwQXONpKcFCgfffQRqqqq4n4MQgghhPjg8OHDGDNmjOM5OSlQiouLAfR/wREjRsT8NIQQQghRoaenB1VVVZlx3ImcFChiWWfEiBEUKIQQQkiOoeKeQSdZQgghhCQOChRCCCGEJA4KFEIIIYQkDk8CZf369Zg0aVLG92PatGnYsWNH5vidd96JgoKCrNfUqVOzrtHb24sFCxZg1KhRGDZsGG6++WYcOXJEz7chhBBCSF7gSaCMGTMGjzzyCN544w288cYbuP766/EXf/EXOHDgQOac2bNn49ixY5nX9u3bs67R2NiIbdu2YcuWLXj11Vdx8uRJ3HTTTThz5oyeb0QIIYSQnKfAMAwjyAVGjhyJf/iHf8Ddd9+NO++8E8ePH8fzzz9veW53dzc+97nPYfPmzbj99tsBnM9psn37dtx4441K9+zp6UFJSQm6u7sZxUMIIYTkCF7Gb98+KGfOnMGWLVvw6aefYtq0aZn3W1paMHr0aFx22WX4zne+g87Ozsyx1tZWfPbZZ5g1a1bmvcrKStTW1mLv3r229+rt7UVPT0/WixBCCCH5i2eBsm/fPgwfPhxFRUW49957sW3bNlx++eUAgIaGBvzyl7/Eyy+/jMceewyvv/46rr/+evT29gIAOjo6UFhYiNLS0qxrlpWVoaOjw/aeq1atQklJSebFLLKEEEJIfuM5UVtNTQ3efvttHD9+HM899xzuuOMO7NmzB5dffnlm2QYAamtrMWXKFIwbNw6/+tWvMHfuXNtrGobhmLRl+fLlWLx4ceZvkYmOEEIIIfmJZ4FSWFiIz3/+8wCAKVOm4PXXX8fatWvxT//0TwPOraiowLhx4/Dee+8BAMrLy9HX14eurq4sK0pnZyemT59ue8+ioiIUFRV5fVRCCCGE5CiBU90bhpFZwjHz8ccf4/Dhw6ioqAAATJ48GUOGDEFzczNuu+02AMCxY8ewf/9+PProo0EfhRBCCMlb2g5lrxzUjT1s+b44Jt4X5+UangTKgw8+iIaGBlRVVeHEiRPYsmULWlpasHPnTpw8eRIrVqzA17/+dVRUVODDDz/Egw8+iFGjRuHWW28FAJSUlODuu+/GkiVLcPHFF2PkyJFYunQpJk6ciBkzZoTyBQkhhKQLeWC2Grw3d03H1j1XYe61v8G80r2xDuBWzwc4iw+v181VoeJJoPz+97/HvHnzcOzYMZSUlGDSpEnYuXMnZs6ciVOnTmHfvn3YtGkTjh8/joqKClx33XV49tlns3YtbGpqwuDBg3Hbbbfh1KlTuOGGG7Bx40YMGjRI+5cjhBCSX3gZsJ3OHV7dPeC8zV3TMa/0fETprc834oOFS3w8pT11i5rO33ft/bbnqXzPtkNVOSc6vBA4D0ocMA8KIYTkH36tBV7Y3NXv7yiEyLy2b58/Vrch61wrgTLpxe8POE9c9x/rnrW856QXv4+TB0tQ/H524OymJWu8fwETdgLFbtknbryM3xQoKSRXzX2EkPwmLIFy6/ONAJBZ0rHCzWfDz7OZxVBS8CJq3D7jFQoUkhrsnMYIIclDbq/z2r6Ngt2lA5Y5xLLFpeseAwBsu+VxpWubl2e8IvcdspUk6HWTiB//FgoURShQ0o1bo6JIISSZmNuusDDsOlRjKVaAfp+NE+PP2vqCBLG6OFkS5GezWtLJZ8LsQylQArC0rT/ZnHktUaURcGCMBtkMa7ceLOBvQkj8THrx+5g1tt3REiEEwdY9V2Hs9tNo2bkMAFA/ezW6agqzzjVmdHkSDZu7pmPXoZrM3+/M+aHj+VH4wsSJm1UoKQIlcB6UXEWugKJhNP9iKmbe1b9eaF6LjPMHJdnYlbXVb5TvXu6E5ALvzPmh66Av2m7z+1PRVVOI+tmrMyKltL0vS6TMGttueQ1Z5Mh8sHAJUKf2rPkuToDzZW3uM5PWV6bagmKuiN96bDFm3vWapRBJ2g9HshEmWTm3AdD/uwmrmGBe6V7Ma/u26yyKEOKMHDILOIfNCoRvyfDqbksryK3PN6L4/QuyRElpex8AoKumECfGn3X1SxH9dd2iJsdnEs/i5Dyb7wiREtUYxyUeDwiR8q3Hzu/1YzYfmh2KKFZyC7O1TBYvhJDzeBUc8vknxp/F8OruTDit+Kzdsrl5eVYswxTszt5M1pjRlRWiqyJQAOv2bZ6sAMmLsIkaMbHTne/FDi7xeEB4i4tUcifGn8Xcse1ZpsIPFvYfE+9967H+RqkyWyDxI3dUdWNjfBBCEkzboSrMvGs6mn8xVf0zpj5wadvt2Howe3nFTgD0WzDPWzHrxqJ/GWZO9nlL227HLtQA7/cLF3MuESvswoTnlVqcnHKiFCdeSb0FRSBmAuYlHnNFv3TdY5kGQoGSe5hniAB/R5IuRBsw13u5bxPoahterM/mpfd5bd/GyYMl+GDhEkx68fso2F1qm+BM1SoqnHaB85YdN4f7fMUu3DssuMQTgEvXPYbh1d30T8gzrISJDEUKyVeEn0VY4kMFVYHilKUV6A/5PXmwZMASj9fl2jQ4wrohltRmjW1H8y+mJlKgpH6Jx0xSTV1xkhbfGycRQwFDcpFJL34fwMAU66r12cnJ1MoSI/oKMSsXx4WzurzEKp8rJoTCqmHOwCqECXDOuVZTUrY0CxVRfv9Y9yywNuaHsYEWFIKlbbcPaOyiAfd3cO55A+LAzSqimySIFCsnxrZDVaFsakZyH2E9MdcNVTHutiQqCxhhfQaQ5ehq126sBEpcqKYjyEdBI5xkZcfmMOESD3Elij0XdBK1GLEiiQJFrMWLdfqoOhmS2wRpT071y+66cuSNLKYvXfdY5n2rnBxJTpCZL2Il6r6DSzzEklzKuhqFIBG5FQTmbJVJx5jRlfn/O3N+iEkvfh9tC5Nn6SLJQwxE5mWa+tmrAfhvC21r789qu+Lv4dXdmYFQpvj9C7IiJndVn08rny8CIOlsrtuAb+1enMiJDS0oKcK8AdauQzWxm1bt0CVQ5EYnOl9V5FTb4v+EJB25nnupt+JzSanrk178fmL7J0E+RP5s7pqeWCdZCpQUIA/2Ytad9IbvV6CYrSJ+ETPIJM4qZOxCRkn6cBLgZotI2PVF+LV967HsmfnSttuz0tCLjLFJEUV+yDVLj5icCmEVtf8al3iILRnntTnO54WN7iWcoMJE7iDrZ6/OXM+L1cWqkzV/Ppc7YpK7yO2jq6YQdYuaPAsD1bpcP3s1Pplfg12HajBSuq9IlDbvlvP7wDS/n50UTmR6lZPFmZefZNb+7Imsv+NwuFX1lYmLeW3fzmzUKMqn7hdNuPVgIwC1xHdxQQtKHhN2UjK3fS7cPhsUHdYS814fQXDqsM3n+TXDE2KH1yVMGbc6KLdXs7CRl4bMPixy/yAiAk8eLMmkxB+7/XTW/Ze23d4vbH4yDF01hZl72X23T+Z/OmCX5M1d0wek1Q+bukVNtsnjkoJYypGJw/JKCwqxRFdllDsrs0OcFWGsbetaytF1HcDfABGkTFTuR/GTDvyKEyEkRDsWgkClXasgW2qE9XZsex+AYXhn5xJg4flz62evxqGvXnVuRt/nKk6A7KVq4Q9itRt92LStvR+XrlPbIygu5pXuxa4Z/X6HVlmDkwgtKHlKmNYTO+uH7MFv9ua3Ord+9mrf0QI6hUWchC1QdN6PJBNVoapaX+Q26dTOnJY0zddQsRoGsQABwJTVb8a6E32Sl3msMPsHRQWdZImjiAjrulbhhVHh1JGqdrgqWC0JWW0J7/VediZz+W+d5IJQoQOwd6zqilXd0oFTHbKymrr5sAR9tkNfHYzh1d2YNbY9k8Jd7I58YvzZ0PN8yEktkyhW5rV9GwCyyoUCJQQoUNTxun266nWs8CNKwhQyZsHgdI7bear3M19DVaSEKUZU7itIilOvriUGv5j9KnJB0KkQlUBRuaeKwPGL3A7FBrDm9PthIXzzkiBS5OR3IlpHtC1jRlcs0ZwUKBFgNle6zVzyAZUN95KQ8TVpJH05yosFKN/qtEBuv1blYWUhjFO4hHVvv8LA7Vm8ihQdQurQVwdj7rW/yfwtb4wn0B3iXLeoCSfGnwWAzO7LceVJkTcDBJDlOCyeM47tMegkqxErM7O58UQ96yX6sbJ+6ELVguN0vqrIMS9nqSxvebXy+EH4GyVpuSZLTJ+LGJHLS/6/WXjXLWoCzoXrqqDje5uj5tz+drqOmaAiWrdgsrLsOfnR2B2TfVK27rkKu1CDAul4V02htme3Ktf+DM+IRaSI7y7+lS06mzK6JNn7d9GC4oC5woUZiho35jBCYOCz5qIFJUzhYUbMnLx6x6um3A+Smt/ps17Ej7AcWCHqi9VAKTtEOz1LVAJGZVIRZr1x+56iDOV/BWZLl5tzuvleOgWKCAUGgvmUeFnuUbXWdNUUYuZdr2XeFxviAef3BtKdRVVYJuZe+5uMxSIJSz1m4tzWhEs8ARGKOgyBYkUSRIvVd7UaOFR3QXU7N5cQDnZ2f5vPBbyJFK9+MF59a6ywEyjygGPGbpaqsiRkN1g5fYewxIrTd1BBpfy9XkfeERhAxqFThOm6OV6bfzeVcpVDi92Q+yg7gaPiz6RiAVEROn6WlOQyFMs/YW/3UbeoKeMDkwSSsN8aBYpP7KJQovIhUG10KoJGxUdGHFMVEiodcxKtKG44CY4wPmdGt6OuCmFHNql+TuW7hx0e74RqdFiQ64lyCLK0Z4fKM5qtYk4hwF6chr0IDjNukUde+0ArumoKQ4nukeuZvItzXCRBlMjQB0UDUYsTQH2TL7sOwq5BOjXU+tmrAWmAybUdfb1gJyi8iAwra4rXa5jRWeZOFhAZnfXayY/GToCUtvcpWyLiDDfu0tQ2nMpbHFP1GQp6b6t7WPUnDWMWYseRdVnvib5E/Dt0/9EB55hx8x/Ridd7FL9/ATYtWYO2Q2syUT46HWejTIaWNCGiAwqUhOI0S9Gd10DuwFRmtHbnJMkB0gq3ZRm387xeT+U6TgOf2bSvOkCqipQwcFo6sjtPlaBCJUhyQC9Lb0HQ/btZXc/8ntyHDN1/NOuYlUgxn9cwpj8d7KnaSwI9qxVOky4VEeHUV5rb1Oa6DUBdfzjupese0xbhIof6hkU+ihOASzwA7E2/ujsLr7MIJ9No2DMSL+ZmeQ0915Z3BLqWa4Jey2q262ZpCGt5wCtOOWCshEsQi4TZgdSPaPFSV1X8QPIFs0gBzosPq2Pmc1SdvIPi1cph119aZaC99flG30s/VvUqrH16clGYcInHA1GJE0BvOHJY4sRLojHz7ETnLDPqpSadTq06hI5duKvduVYiJeoB1SlM14yu31e0X1XriixmnCKSrMglYRIk8Z+VGHESLYIgTtt2WE3EgggTeTIl+qzmX0xFM6YOcGadeddraDtU5XvzQZ2TnrSSeoFiJikOsUDy86tYmUx1oStSQhdBHVnlZSSnTsvv8oyKGND1+yThN/Fi/TCfG8bmlUklSOivitXEijB92YJaTUrb+yzLwLzrsjnBG+rU7xlW1M7mrukA+vOa6LKcmIW9VTi7II72QoHikzBTkztdU7d/gdnHIcmzRN0dn5cZTtDlCBE+Ku4L2AuVsBPGBfmNw9gOIEyshIy4f9InADrQ4aAqW0rsrCqnai8JdUnc7+Do57sX7C4F7uoXJs2/mIp3PC7z/GPds9pyn8hCpG6slkvaJh+1ssDKv2kcmZNTLVD85Dkx/0BReaebCSomkjAL9kpSntnKuuM08NYtakIxzieISiN+LGJWyzVh7tKdr+jsn8zLOkKkDN1/1NFJNqhlMKzBUbRb89LfpBe/j5MHS/CBz7r1rccWZ/7v16ISln+JlbXEql3291XxTlpT6SRrtSU44E+gBGn8ZiuMF6uMToEiDx5hVkavM/ioBYkuZ1OnSBbVpR4/qEQPBfkuUeIkOryKlDBSu+tCtc35sX6ZLRxhYhYoTu3ISyi11ff2u8mgSoRZWPlQ/DjJhu0Aa7dMaszoGpCa/9bnGzF2+2ktItHL+J06Dx6rCiznZAh6LT+fl4WJ6jXD6GCjSAqlA/F7yS/xvt25Xq8t/9/r9zI/l/x+8fsXZF6CE+PPRmZZ8fJd4hAnbWvvdx0kxDmijFXOl891yv2RL5j9RobuP5p5hcGp2kuyrm2u/177FvHbyKJGvHRFCprr96Yla7C07XYt1wayv7PwH/FC2CnyvYqxONpL6iwoTruWuqE7/4h83biWiYDkzCbtcAuvNc+G3Cwhfi05usvJqwDQkRTOCqsEYTrFidflnaiXbHS0va6aQhgzugAAJw+WYHh1N0b+ZFjg6+pACAchIsK2pgTBnPjNLjpQ1FEvdcXtWiLr67y2b2tLf1+3qMnSIqH02QhCiK3GQ4F5TyFdMMzYArGnhUzcwiTMa6qSdHFiRh48nUJa7cJu/VjLwiojp7wyqugIZdSRRM2KpEViyQRpc5/M/xSzxrZj16EajPzJsAHfb3h1NwC9S6ZBriULkihEikpIsozTzFzeisNpR3kV7Lb1EL9f8fsX4FuPLe7f7XiO58vnJFYbwpa268ukG5S8t6DYhU2pNnidPifEHyrJymScQoJzNeGW0/f2KlLsfI50ROiY8XvNMKwouiwlwulRpEZ3IpfqmE7MIsWvQAkDK0vKzLtewz/WPRvKtgqXrnvM0348tz7fqC2LbRKhBeUcTmuVfsRJ2HhxHksTZnHilJrer5Usl/G6N5Cd5cnPEpKc9MrpPk7IM7awJgA6rysSexVYHDMLvimr38TWPVcBAMZuP63l/iqTK1UriVMiNF2hyeJ55GMqe/h4xbzZqx3y9xN1VETZhLHZ6QcLl2DSi93Kyzz5LE684kmgrF+/HuvXr8eHH34IALjiiivw/e9/Hw0NDQAAwzDw8MMP42c/+xm6urpw9dVX4yc/+QmuuOKKzDV6e3uxdOlS/PM//zNOnTqFG264AT/96U8xZswYbV/K734bMlGIE9E5JNEEnmSYnfE8ZutJlGVjlfTKa102DyRJMS2bEd/LmNGVsZrI1riZd72WOXfrnqtQ/P4FA4T0J/M/xcmDJYGFioro9rKEYxYpYYjEqP1eVCK9suva/bbn6eCdOT/EpetKYt/ZONfw1JuNGTMGjzzyCN544w288cYbuP766/EXf/EXOHDgAADg0UcfxZo1a/DEE0/g9ddfR3l5OWbOnIkTJ05krtHY2Iht27Zhy5YtePXVV3Hy5EncdNNNOHPmjN5vlhCS2uHmInbRMfJxL9fJF0QUUJBIICF0ZIEjomXMUTPimFy3RYSFKirROklj5l2vYXPdhowzrMCY0dW/5HPuNby6G8aMLhS/f0HGegIAs8a2Y3h1N6asfhOfzP80UROTlp3LQovwsbqXbusJgKwILSvqFjXFulfYBwuXDIjm8RPdkyY8WVDmzMn2HPr7v/97rF+/Hq+99houv/xyPP7443jooYcwd+5cAMBTTz2FsrIyPPPMM7jnnnvQ3d2NJ598Eps3b8aMGTMAAE8//TSqqqqwe/du3HjjjZq+VvCty3UlBpKvY/aYFn/nql9EHPh1dM1H7ASJ6lKNk6BxEg/ysaAWk6hwi5TzE0oumDW2HUC/74Bg7rW/QTOmYnh1d+Y4gIyZf17pXqCuP6FXWHVZjt6xwtwvDYX60lAuE6cjaPMvpmLr+KswvLobJw+WAADm3XI+iVsubv4XJr59UM6cOYN/+Zd/waeffopp06bh4MGD6OjowKxZszLnFBUV4dprr8XevXtxzz33oLW1FZ999lnWOZWVlaitrcXevXttBUpvby96e3szf/f09Lg+nwgbK37/At8dgB+R4ifZmmrnSB+VftJeDlY+OHJdPzF+sK+lHnENOxEhtwWnTTbl8M8wnA7jYtehGuw6VIN35vwQ9T9ZfS6a503sOlSDrXuuyvIdEN971th2xyyixowuoF1vOLJVDhQzp2ovSbXDf1ISENYtasLmrumYV7r3nHNsLI+VWDwLlH379mHatGn44x//iOHDh2Pbtm24/PLLsXdvfyMsKyvLOr+srAy/+93vAAAdHR0oLCxEaWnpgHM6Ojps77lq1So8/PDDys9oF06mA/NMzE7AyOfZJYez+r8dbvskpIUgOTrsyiyKstRxDyfnYFmIi/+7lZOViFHZDVjGyvonXyNJwsRPviHZQVu2hExZ/WZGeDT/YiqKTZ+z+97m5Fub6zbgWzXn06IHnVSpYmVdMSdbC4MorBYqdd9tKQiIpu4Kp9yt46+ib50FngVKTU0N3n77bRw/fhzPPfcc7rjjDuzZsydzvKAg27fdMIwB75lxO2f58uVYvPh8I+7p6UFVlXuWvbAaulNelLCSrvkJkU4TfpfIrHKqqIbLes1QG4T+zzsnrDPfyy4022ovDr/5VHLNxypI2gA5YZUIM35nzg/Rtlb9/sKEL4eeyo63w6u7YVQDXSgNnPfESWzYLePk8vKO/Fv6zcFTP3s1cO4z8v48YZIkEZ80PAuUwsJCfP7znwcATJkyBa+//jrWrl2LZcv6G35HRwcqKioy53d2dmasKuXl5ejr60NXV1eWFaWzsxPTp9s7CxUVFaGoqMjTc9o1bi/739h1vk6dWhRmU4qTflStULJ4UVlWc8oP4neGq2OPH6fPB+nkVD5rFW2WTx2rU53oshiw6sYexjsBdpftXw5akhEq8/Dt/gy06Leq3HqwEUE2arPbhycqR1g74tgRV2D+ja2eJUmOy0TDXjyGYaC3txfV1dUoLy9Hc3Nz5lhfXx/27NmTER+TJ0/GkCFDss45duwY9u/f7yhQdGHVMOz2v8m1mWGSCLuR+3WSla0lTp93Sl4mrAzy3iBW93E67nReENO0V3IxkkY3LTuXZV7AwDIJayM5gfBbEctHWZaUGV049FV/boJmUSJep2ovUbKwhImXPceC4tZe5GV4K7eANPvp6ED81n7L0VMm2QcffBANDQ2oqqrCiRMnsGXLFjzyyCPYuXMnZs6cidWrV2PVqlXYsGEDJkyYgJUrV6KlpQXt7e0oLu5fpf3ud7+Lf/u3f8PGjRsxcuRILF26FB9//DFaW1sxaNAgpefwkonOTrG7FZiqQGEFTjZu+/hEjRxh47Ss4mYRko+HKaad9urIV3Fjl0MjTN+ES9c9BiC7Xmxasgabu6Zj656rXHOn6I4CjCqaR3fdlXeqN5eFn2VccS4nrN6x89UMLZPs73//e8ybNw/Hjh1DSUkJJk2alBEnAPDAAw/g1KlT+N73vpdJ1LZr166MOAGApqYmDB48GLfddlsmUdvGjRuVxYlXrPxFxIzJj7gwLw3lK/kebujWgXnFq+DxK07Mx8OqhyoJ2KJao48TIUzCzp/xwcIlUtqB/vdEdMeu6hp8Mh+Omw/qXvbN1bYvB0h4XVaNa9KSBuT+5IX//V3lz+X9XjwCKzVnJ1D8+J7oIm4HWN3CxC0XQ9g4dTg6BYr5mm5758iYU/Wr1IGwBbJKhuN8FCd26dKjEmPypqYtO5dh0ovfB3Au2uex84EC5v2UVPuNpIfo66jXbmJSxV/N6vx8n5Tqxs51wsv4zbgmCTGYxrFs48UPISzE9xfr1Srnq5xnDl9UydOgC9XOKEiiLqfj5gyvcsZWIUzMgkUMOGYfFvF3nOIkiYOaDmQriZVvTlRiTM7QWz97NQp2l6Jgdylufb7RMrGeyqzfqi5FFcbslSj8U/z0rxQn8UCBcg6Rfll2mIuKJJgV3ToUKzHi5mwnCx7zNayiDOKOMDBj54yrmmq/tL1vQPp4EQHkZRYnBpfS9r7YrRZJqKthEnf5mhF1RfigmNPsy+fIqDhoB0Fla4Mg7VmHSPHjTG/3GfoaBsfPuJpqgeI3S6wu7Bq5jtmNqnXDCuHpbxeq6HR9L/d0sqroREVQ2DnNmWed5s942R/I6vNmAeP0HaISzip1MmkDeVCSFtFk9VsXv38BCnar50cx58OJg6AixaswqJ+92rI9qrRVu/PlaxP/+Pk9fae6z1Vk86lXa0muVNCgfiR2fiPmrJNWyNYQL5YRq8/pxC6ZmRVyxIaOzt3LNczPKKeNj5N8t5wkFbm/8psY0JwHSAduz6IzK61VJJlVm7Drn3UKtLQESfhFXh5uswlE+epcdYfz1AkUgZeEbVEh+x54aVQ6stdadShBHFzF9VQ/K+dosPuc1fOEYYES2STDmnn6ym4JdoppQ/69dWeo9lu3k7qpadT9NtvkQOycYoP8NqkTKHaFFYcwMUd8+G38QZ/dbjMxFb8Uq8/puLcVVu97mRWqlGvb2vs9z1a9oBpVZHaaNpuaxXtRdpBJsOSkCR2DoFx3gkYI+rW+6LSKRiGMkjBZzSXk3DPAwN8oSHnmvUDJhUrmlLnUD7qXSKw6GPGeldVFft+rT4rXZ7fL5CrjZc1eZ31R+S29dLhWe42IpUrO6PKfoLNRnYO7ynLpjiPr0DCmf3veXMqr5KWM2e7O41S//NbbvHWSjTKdsk7iMJ3K6a/tsIvgUV2KUX0OL8iK3ckBTkUohOEA7Ybdb+1XYOnCKsSW1pPocauTKpE04jxV7CYUVm3N6jk4UKcPp9886Dic0wLlq3ObLL983MIkaeGyqgihIosPO/Ei7+9hh2o5WN3DbY8bwD680gqnzjysHaidcMtoSQgwsPM3/+22xOk11FZladfpfuZ2tOPIOuV7eyHuPp6oEVSw5t0STxIqbpimTLt15LBNqFYRPUGEmJsTrdk/xy0k2A271PJexYmdJUkFv1EYZuLONkziRdTZ0vY+fDL/U8wa+ybeWPZnmeNuFji3+uNUp71YY0S7Gqr8CZIPWG0v45ecFyg6CyNuVJxk7Y7Fsb4bRKR4FVQqg7JT5ymLE/laYdUZswlcl6BguC+RmTW2HfNK96K5ZioA+3w+dpZGXfXSTfCEYVVOUn9vtyltWrD77uYJYFdNIYoP/FH5ujm9xCOTpMoq8NooOSvOxutyjVVyJvGvbOrWWc4qS1067+dmstfVDoTfCX1PkokYELbuuQrz2r6NE+PPYuZdr3nesdev4KVQpr+NCub+yGtfmNMWlO1b+zvPJIoTwJ9VI8hg5pT51e/13K4TVpp6tw5Q1fck30UfO8l0058CfxgKai7AruoaFJiOW4n2OMRFrkTwqOLmG0Sykcunf7PAFUqfy2mBAiRXnISJypKBrmyy5oicMH1d/OQziTJ8MYnOz2k3LZN+Stv7gPZhANTSuOsUKfk0CfDSluoWNYWa0DEfCNo35fQSj5eUuQIvDdNqF9AkoOqH4nc/Hj977KiEKuvA/FuIe1rtcxMlVt89ih2qKU7SS5BZvNVyp0pf4XVPm1wiH7c9SRr1s1cz1b0uzJEkYaHTYc1qycVryvmg9/ey944KuqJfvOIlTFpGNf1+GNYfCpZ0ITshmpP2CcwWV7v+zK0uCstLnBOBMBBtRuy/JVDxv7Jytmcb1EdqBIqO7cVldDs+6hZBqkLB76Z+cRFGh2iXKVcHfsM5vcJOMb3IqcStRIpTqnvVCDPdWWidrqeSGl11408V5A3uvJAP4ixsUh9mrEKQyhzGZnRW+O04BG7ZW512KFYdjFVm/F5T22djbYnwamFS9dFxOmYuF6+hkuy8SFyEvfygI2ze7hqqGyTqnNTJ11GxmnB5JzryTqA4iRGnSm2eZcTtd+K1A1AZQN024YvCCVR+BierxanaSwb8DjqWwrxsjKgrf0OQXaGdMOcAohWFOGH2OzFvJCgf04WX61rVXydrig6RZKZuUZPrJpzmY+ZnY1s8T9A8ZTntJGuF3R4R4m877BwazeutYc2MW3YuC1yp41x+sXLIlVPlq4gg+VxxPT9l7rasYvVcbnhJ6e90/zBFIGd26cZL/+HmvB00OMDp8+Y2Lfd9UdVhnRNQihE1/JRT3llQgGxLyYnxZzG8uhsFu0t9XUf+Nwx0ZsIduv9o1t4XYidRp2Ufcaxl57LM+eIzdtE8Xv01xHeUr6+KuF+YDsuyhcOL46oXQRiFMyw7SuIX1T4ujL7QygIhY1evnawzureDcNu1mG0vHPJSoAQhjAZoJULczIRB7iFQXaIQn9Wdktpu2UQ+riJ25PN0/z7mZ3BbBrP6jHhPPjdMSwnFCbHDi9OrisXYL7rFgeqeWVaZclWTOgIDo3Ks+mm2Nf+07FyGa2Y8rHx+3i3xCERFK37/Al/WEyv8zN6jqMxuyyZOx+xMq34HWKvdSxvGLLS0nnixxKiklPeDm1OxH+xElo5nZ+dIoiYJuaBkwSDagNvyr9ccRCqRRSRa8tKC4qUxta29Pyv+XXVTOh2hbeZK7yfayK7hNIxZCITgk+K2VOG0jPPR16sz/9fh6Opm1fB6TbPlwy6XTNyh1oT4wSmHidlXzyrnSdQ4+aXYHbNy+g0irpjfJF7ywoLiN1unCCkzb4qmuhmdG2Gb4q1m5CIXgsogag7p05V11ureKs/jdeA3WyWC7Kxs9XkVR9oo09871R06yBJzojEZFWdYK8ESt0gB/LUxp81Brf5uW3u/p8gdEg05L1CCpBJ3atDimn7Vt1tlt0pTrZLtMSy8OLA6iRL5X/N7dimy5XI2+4Oo+Ie4iRQvETh2qPimBLmOGyodpBCnJJ3I1gM/iSVloWIe3OMSKQ1jFlpGADrVdS/Rm/L3snLO1RFdSfyT8wJFNQxVWEnM57qJlChRbQhO/gx+GpPX6Bor0aAjbbvoWD76erWrUHGybMj+KnbWERXM17G7RlCRExSrKIgk1WsSDV53AHc6HvYeUm6IqEJzu3cT4C07lyklWwOQyXci+kyzRZnET077oBz/fCEGFRVmiZSWncuyOmerymoXsirONXfuXvefCGL2b1t7f1bqZSu/FF0DnWjsQz18xikjrRXC78Sq85PXuVW8763wkmROxz5DceC1s4zboZHEg46omzC39NCFk0+KOC6HRVh9BycRQ3HiD7ffxQ8FhmEYga4QAz09PSgpKcEV96zEoKILAZxXwyqIgnRKcWw1Aw3quOoFq2e0eg67e6ma+oMM2iqb5HlFFiul7X3K2XGj9AVRjT4yP5uqX5AXxzyr37mrplB5FknyBx3Le1bOpap5RnRufhl24jaKEL14+Z1On/4j/s+/r0B3dzdGjBjheG5OL/H8f7/ts1X+ZuoWNTkm27HaydKpkw97lipMj3adg8CuYqg0QB3ixK0cvAyUskVFVQyGEXocFKvlJy9Oy17Xvs2maYoTooqXjK/mYzLyUqqu9ug06NE/JFmI38HsDxg0PUROL/HIeHECs8Mq9FcOQ9bh0S72ehCoNDDz8oj5GbwmD9I101FZpzYLP7eZmXgvaaJDxmveGa8RVV6wWj8nJErMSQp1oDojp1N48lCN7FQhLwSKyvKO+Rzxf/MA6jTYB8myaPYrkTMWelmm8XJfp4ynKgLALSOqXdSR2/u5IEIIyTXC2vDPDS+Dj45JnmpWWafPk/gY+u4x5XNzXqB4qWx2FhJxTGDe0dJPgxrgw+KQhE2c65YcyakDshM6ujOiysfkjsmcOtvs+Co/c64IEy8p68NOb2+F2/4ghKigK729G7qWxf2IE7aR+DDvcXbq8gpAUaPktEDZvlXvWrudALDzbHeyfAgvcq+N0qmDsDoW12Av+6B4XafOFZz217EKrzZvvKg6s/TbeYq6xf1BCOBPXERpbZH9BaO8L9tGdNhN1LxuxCrIaYGiE/OSj1OltoqScFP1fn1k3D4XdRSLlXOsnYBTXUZSPTcu3BqVnCbfai8iJ4J0nsI/imHFBAi+9OEVv0JDhzhJQoZbcp6GMQsd01X49XvM6SieqDAneLNqGFYDjcqasNvgojr4eB0Y/XCq9hJLr/8T48/6vqZOr385gZtbinovOGWnFfeRxYnYamDo/qOO0QZBZ3b1s1cP2H2VECAcwZ+ETQNlkvQsxJ4gdTE1AkU1JM1OTFhtu62Kaqpl3egULUKcWDH32t/AmNGV+Vt0ZFEmNvObit5tt2eVc1t2LrMsa/N7YQ0a4hkIiZKoLBhOYdBusF0kA79jQWoEig7MeQFkkeImWNxESlRbfQexMNg9565DNdhct0HXI3pG/h5W5RzEmmL3WafrWYlhK5+UoFYPziCJGVHv3NqECmZBLbf/KOueXToCt2egOMl9cjqTrEomOjfkjK1uzq/y+QKz34FdBljV2YbftVW7Z5b32bEaVIPsIyN/R2NGFwp2lw44p/K5g0rXd7qfSjZZc+SQl0RvVtdzWjM1Z4YVf7tZrHSmgpbrGROzETOZbSzO1eOgmV/jCmF2wu2ZKFCixWoSZlXHvGSSpZPsOVTEiZkgS0a6zgf6G2rdoiZfA5VqeKz5HHOlsxIn8nlBhJCKsPK766rdZodedk6WzxWC0EqsWKXv9hOBI+fUIcQr5joc1x5TQff9icrqTPwTtG55WuJZtWoVrrzyShQXF2P06NG45ZZb0N7ennXOnXfeiYKCgqzX1KlTs87p7e3FggULMGrUKAwbNgw333wzjhw5EuiLWFG3qCkTlSO257bbUlsX5twpXgcSP6mBhenV6vvtOLIutA7IqoM4Mf5sltOs+P5OSyVWS05uDnlmC4tdman+tkHKSNzb667QXhH1meKEeGHo/qOOSyLmtmOVotzL9hOqyJOKJFlmiD92HFmHlp3LbMcu0ad7SQ/iaYln9uzZ+MY3voErr7wSp0+fxkMPPYR9+/bh3XffxbBhwwD0C5Tf//732LDhvE9CYWEhRo4cmfn7u9/9Ll588UVs3LgRF198MZYsWYJPPvkEra2tGDRokOtzyEs8N9+2fsBxvxvs6cS84ZuVMDKf52alUMFpicguJbUXMSQsLmKXYjPGjC7MGtuOXYdqcPJgCYrf79fAXjugQ1/tN+6N3X46856dSdfLZnxBUSkrr0s9KvXRavNKAZd4iBX1s1dntQ3RN7htbgn474tUl4LcElLqECy0osSH04anXlw0PC3x7Ny5M+vvDRs2YPTo0WhtbcVXvvKVzPtFRUUoLy+3vEZ3dzeefPJJbN68GTNmzAAAPP3006iqqsLu3btx4403Kj/PV+c2YfDgC718BXTVFEaS2Mp8fbv76Y7sUBEn4m+VTserg+jJgyWYV7cX80r3YvPY6diKqzIiBbBPiW9G/oxAWGZKs412sZmo40LFQZCQgcuR6vl8ZLy0L1VhoZI1O4hIoTiJFzEpD/o7BIri6e7uBoAs6wgAtLS0YPTo0bjsssvwne98B52dnZljra2t+OyzzzBr1qzMe5WVlaitrcXevXuDPE7eEVS8qC4X+Q3RtaL4/Qswr+3bmNf2bew6VINttzwOY0bXAIdat9wpVmbf4vcvsBQuKugc0J3KRWdot1jSMVtPVEUeSTdWdVFlo0uv7d5vfhTznl0686wwL1C8OJX/17/wgPJ1fDvJGoaBxYsX45prrkFtbW3m/YaGBvzlX/4lxo0bh4MHD+J//I//geuvvx6tra0oKipCR0cHCgsLUVqa7VRZVlaGjo4Oy3v19vait7c383dPT4/jszmlqk9ah+7mpOp3+cKLuNFtxZEdZjePnd6/1HPu79L2PhwaX+Lrul5/OzmyR+fvbldeKuLEbXnHaSnHDJd2iBe87MKt6jxvzijtt53p7pdpQUkGVkLldBR78dx3331455138Oqrr2a9f/vtt2f+X1tbiylTpmDcuHH41a9+hblz59pezzAMFBQUWB5btWoVHn74Yb+POqDy50Ll9ZMC3qvQCCJMrDoUq9nP1j39Szz9FpP+Dmzs9tMDZktOmwsGIcqU2A1jFrqKFKe6ZxYnIpW9+T1CvOLmc+Y1mkdX9E8Y7TMX+neihi97+YIFC/DCCy/g17/+NcaMGeN4bkVFBcaNG4f33nsPAFBeXo6+vj50dXVlndfZ2YmysjLLayxfvhzd3d2Z1+HDh7OOW4V+6kyhHhZeoj68OrImBbEkI/61c2AWHZXV0k4Qs2+UiaVO1V7i27RsZTkxh45TnBCv7DiyLhNdIVDtH9wSHIrooCBQnOQfOqMZPQkUwzBw3333YevWrXj55ZdRXW0dySHz8ccf4/Dhw6ioqAAATJ48GUOGDEFzc3PmnGPHjmH//v2YPn265TWKioowYsSIrJfASpBYhczJ/w87HFQXSRdYZtw6G1l8mMWIk5AQxw59dXAmukdGJRW27o5QpwiUxYkQIWZhQnFCgqI6eKv6rYk2oDNM2OyLojqx0J0ugninYczCzD5kuvAkUObPn4+nn34azzzzDIqLi9HR0YGOjg6cOnUKAHDy5EksXboU//f//l98+OGHaGlpwZw5czBq1CjceuutAICSkhLcfffdWLJkCf793/8db731Fv7mb/4GEydOzET1qDL0XcWFLAwc7JMiUuw85u3MsCrryDqEjViD9pOTJQhukT1+nWRlglhT5I7ZnL9Ft6CkKCG6cRrErSL93M4R6LZQ+k28SPILTz4o69f35xypr6/Pen/Dhg248847MWjQIOzbtw+bNm3C8ePHUVFRgeuuuw7PPvssiouLM+c3NTVh8ODBuO2223Dq1CnccMMN2Lhxo1IOFJ2YRYqT/4CVoAkSsWEnkIJmddU1WKp+3vq8/mcQndaJ8WdthYWXtWyz17/V+14Jms1SIH+HoOF1FCYkbOycYGWn/DgsuDq3+SDRIcYz3ZmJPQkUt5xuQ4cOxUsvveR6nQsvvBA//vGP8eMf/9jL7UPHzsmxfvZqwKLBqjhFRkEU6aqtoons9rExYxYnsmOcLkc7QK1zMzvlmZ1z49hzxMoZlpCkYO5f7BI+xgHFSbJQ2SLk9Fn1vpW7GQekYczCzMvpHKe/vWLlvGblKGz3uaBp3eXrOT2TnbNrXGZbeV3bzSTtlJdBd84GQqLEKR25wOzPZ7fUG2YqfJIbuI1nQSxx3CzQhFzYwjrSsnOZkqjwIlL8YrfTrlksqDi5+a04bjv9ysesLBTmpRo/+U3M6MpgqXo/u2N+s7xyWYfExY4j63z1T247nIctWGg9STY6lghpQXFANNqkONTKmC0Zbg6tOkyxKmnvVe4TpBMzD/4z73otk6lWRRhYCSSddNUUUmyQnEPXUrXs3BqWlZGWy+TiJ7DCCQoUF5IiTtyie+QOJiznNpWMt1ZY5Tfxukzidp55F2Wnz4fdwdGfhCSdsH3nvC7nyn2BCBm2Cx3mUlJ64BJPHiA6GytTrSxsdDq0uUUA2OF3CcQ8MxM0/2IqCgDgnDiJMnMsIfmA7kmYWxuXRYfKzt5WO7+TdEALSo4Qd9K2oI618vNbWVN0orrUQ1MxSTNmK0oUEYlWu7y37FymlIzQ/FkKlvyHAiVHURUMbt73Ou9tFyFktzwlRMqJ8WfxyfxPlUWDlYVEVxI3QtKEX1Hi1v9YRfXocGqlY2xycIte1QF79DzAKvLIiiAOTEEtOLJIMVtTzFj5p8jvqUTx6ErARkhaCDOvE4VFfhGVbyYFSgyIDbyC4CQYwjbV6hIrgrHbT2PkT4ZZnqtiVfESYhwFjOIh5DwUJ/lFlIEjFCgxolNIeEnbrxMdyd8EOlPWh3EPQvIdFSuKl+VlQoJAgRIDYSlQs9OYlRNcFMLFatMxqx2mrTowL9YQ8fLq7BqmSKH1hOQaVn2Crr4iDJEiX5OOstESddoNhhnnKKqZYKNc7rHar8NttmUX/uwlf4L5M2bBYvUsfsOdCclH5BQFunxRwtqnZ8eRdaifvZrtN2LiyAlGC0rM6BQQcZtU/UYLyRaVofuPahEn8jXcNjgkhES3LKwTWizzGwqUHMBqJmI30DqFfon3o1rq8eub4lVEmHcnthI4Uey6ys6SpIEkiPyWncvoT5YCuMQTE7IZ1e9mXV7vZ3Vfr593Q3fn5ZYyOymdlCxO6hY1UayQvED0EXL79yL2w4zgYXRQdMS15QstKDlA0DT1OiqXH4uLX7Fi/q5exImXdekwtojnPjwkl8nFZR4SLnHuR0cLSkJws6J4HeydruXXCc7qM26VNw5zsNN+PHYOuUEc7mgtIfmE3BdZWXlVHeAJCQotKDESV+4ScW8/ylh8Lsw0x15ETZBQZR1QnJB8xC70mJAooUBJOV5FRlSd1ND9Rx2tGuKYPItT3cnYnDbf7w7ISfF/ISQO4nKWrZ+9OvMi4RLn8g5AgRI7bhVAV5ZWnViJFN3PKK4XZMOxoMng7BA7r9o9C60qJB9QsaKI9hWlWJHbHYVKfkMflIRiHvDtdgROAmEJKFlIqAgTL8LDKqmbmy+KivCgOCFpI6gTvx9EfyDEifiXkT36iNt6AlCgJAKz02qUjT2J6azF93frbOyWZuyy25o/C1iLIDkSh4KDkGxkh1mR0dpOpNTPXu1JNMjXVfmclVChSMkfuMSTQyTJemK1z48uhu4/GmsnI8KPKU4IcUfHhEp2uvdzPdFfdNUUom5RE8P9NZAEp2gKlISx48g6WyGSRF+UMLLSun1P0RkJ64eO8pIFUcvOZZyFEaKImx9KVD4i5jZLkZL7UKAkECeRovMeSVDIVqh8d7N1w/wZLz47VmKEjncEYD1wQ86JEnQCpXsCxii73Ic+KCRR6NxFVayL2zm+WnVgYpfU0sBPQfIF+jWEi980+lY4LcuaLSpcwk0+tKAklKRaN8LA7+xLDBp2n1e5ptXAw5kXEXC5zxqr/klYU6wmBE4RIVbtN4oyZ4hy8qFASQhWDdhqeUKHGTQp4sfcMel4LqcwYbPw6KopDKWD4to3SRMikkdgJ/DN2afjFAh21lOSLAoMwzDifgiv9PT0oKSkBDMq7sHgC/zvoZJEzIO0VaNx8qswdxZu19eJaty8VfbXIDMmUUZuOx+b0XFvK4RAoQmZpIWGMQtdJ09OEy5zmHJYFhSnnca5lGeN7nwop8/2Yfexf0J3dzdGjBjheC4tKAkhioyxSbGciDT2QTboM1/PCbf76GyAnIWRNCL6FtV2bdXfiXYcpkhwmjRQnAwk7mRtFCgJQTROc4UwD75u1pNcQH5Op5TxKsiCQOQvEYiysssQK849VXuJNmGhS3QRkmu07Fw2oA3KOPVPSdzSg8QPBUrCMDdSs9XDrhHbvR9WrhKvmDsgp44sCLLYUe3wdFpzCEkzwqlYnkiZ2xaFCFGFAiVHcBpEncRJlOw4si4jRFRmRLpMqlap8c1lpSKGgjq30jmWkH52HFlnmVAx7okSyS2YByWBmPfmccLOKTaujsBuTw75GXV2VHYiR+4UvczYdDnK0UGWEOv22bJzmeOSql1gAMVN+qAFJYG4DahmC0UY4bpBcNusL8rncytLs6UlyFIPRQkhash756gSt8Nm2ghS3rqW8ShQEkr97NWeKoiYYcQtTsQyT9yIHVbdCMsXhhDijmrbS0KfQtTRtVULBUqCsNtPxuxbYTXriFuYOOHFL0UHLTuX+b5PULFCKwohanhdSqVISR7ybxJG/04fFPQP7kk1H5r9UeQBlHH7aoiEbE7otKJQpBCihtyHMYdQslAZF736+HmFFhR4c0qNg4YxC7OWIrg/iBrCgTjqJRxG8xDiHXO/xtD/3EPX0o6AAuUcSbCgiJ13rRqmaLwUJt6xKjOrhsR8KITEjxyeLNqj7oGP6CPM3ybvBQrXLaPHSURF1dGYnWTlqIGumsIB9UKnMLFa4qH5mhB1zDlUdPTj5s0KiT/MPoXyfkpW5wXBk0BZtWoVrrzyShQXF2P06NG45ZZb0N7ennWOYRhYsWIFKisrMXToUNTX1+PAgQNZ5/T29mLBggUYNWoUhg0bhptvvhlHjhzx/PCnLq+wP+axcOIUMk7P6vW5kjQQJsEaIS/duaXi1o281JOEsiAkF9ExqamfvZqTVR/I/afTOCV8UeTUFzrw5CS7Z88ezJ8/H1deeSVOnz6Nhx56CLNmzcK7776LYcOGAQAeffRRrFmzBhs3bsRll12GH/3oR5g5cyba29tRXFwMAGhsbMSLL76ILVu24OKLL8aSJUtw0003obW1FYMGDfL0BawSlakUTlIqq+qzWvnJmMVIV00hSrU+nX9adi5LpC+GSBJldu6S/Xt0k8RyICTpiLYYxOqRaXvnJggtO9cN6De5bO6M1yAStz2XTp/+I3BM7VqeBMrOnTuz/t6wYQNGjx6N1tZWfOUrX4FhGHj88cfx0EMPYe7cuQCAp556CmVlZXjmmWdwzz33oLu7G08++SQ2b96MGTNmAACefvppVFVVYffu3bjxxhu9PNIArApHiBj5X5k41zdVvaCFSBH/tyJp+TzMzxNlOX/09WpUPnfQ8ljYnucCeamHkT2E+MNvW7WbGFhF9Ylz5XYqhAwFTPZvYFV+KslF/RAozLi7uxsAMHLkSADAwYMH0dHRgVmzZmXOKSoqwrXXXou9e/finnvuQWtrKz777LOscyorK1FbW4u9e/daCpTe3l709vZm/u7p6ck67tViYh6gZCuMXer4sDA/u5PISIrVRxWxrCF/p6iipUrb+2zvJd6XZ1JhdUJ1i5ooTgjRgJdlUlWrZVdNYda5dYuazjvnclk2g9v2BE4EGbd8O8kahoHFixfjmmuuQW1tLQCgo6MDAFBWVpZ1bllZWeZYR0cHCgsLUVpaanuOmVWrVqGkpCTzqqqq8vvYSoQtBJySlpkjSRhZQghJMy07l2X6QB0+dqqWZnFe3aImLtMi+3ewGpPCGKt8C5T77rsP77zzDv75n/95wLGCgoKsvw3DGPCeGadzli9fju7u7szr8OHDfh87Cz++KzqQf0jxf/k9oeBzXZjIDqlxbPal0pmFHbotW0/Y0RHiD9GOrCweXtpU3aKmAf2tjOivzP+SePC1xLNgwQK88MILeOWVVzBmzJjM++Xl5QD6rSQVFecjbDo7OzNWlfLycvT19aGrqyvLitLZ2Ynp06db3q+oqAhFRUV+HtUSK0/jqMRJ2PdJ0nqpbBYU4qRhzELLFP5h4bQ7cZRlRWFCiD7EUkyp9HdYiAkjl2v7haLo080CLwwx58mCYhgG7rvvPmzduhUvv/wyqqurs45XV1ejvLwczc3Nmff6+vqwZ8+ejPiYPHkyhgwZknXOsWPHsH//fluBopO4w4mB8z+knYXEznxGcgsxu6M4IUQfcv/ZVVOIlp3LtAkHt36WbbmfqMYjTxaU+fPn45lnnsG//uu/ori4OOMzUlJSgqFDh6KgoACNjY1YuXIlJkyYgAkTJmDlypW46KKL8M1vfjNz7t13340lS5bg4osvxsiRI7F06VJMnDgxE9WTBHQ7y3oVRrIaVa0MSbKeCMzPlGuOvoSQZGBeLo0SOastLSnWqOx55hVPFpT169eju7sb9fX1qKioyLyeffbZzDkPPPAAGhsb8b3vfQ9TpkzB0aNHsWvXrkwOFABoamrCLbfcgttuuw1f/vKXcdFFF+HFF1/0nAMlCDoVYBi7ONJiEowkiLW2tfdnXoQQfcTdptJsSREJL63QPW4VGIZhaL1iBPT09KCkpARfvmEFBg++0PPnrcJfrVCxoJjDl1XOk5/BDVVFmoQBWYUoQnut7hd3+XDWRUj46BIOssXE6m8gfpEUN6Ks5bJRGa9On/4j/s+/r0B3dzdGjBjheG7e7cXj5KFtjpQxvyeQc6KY8WopsdunwI0oU7LnO2bPf134iSAghCQflQkk2/N5nMYrc6Tq8c+rW1lyXqDIu/yanaXMobxWWBWsWYSYo37s8phYRQc5iR078lmYCEtGVBYN+T46OxRzqCMhJH+xGz/S3Pbl5euw0mIEyiQbN9u3upvYghSabPVQFRhumyWJLLZmEeLFRJbrhLXfjYrJNa6llrSbgwnJV6xESlrbu9UKRRByWqDY0bb2fsv1MS+EtV+L3X5AQYjbvyIpyB1FWB1EmmdMhBA1rPb2yVfE+GPuG63GpfrZq/HC/56Pkn96UOnaOb/E40ZXTSHa1t6fWcpR9e3wKyBUxJCTX4pXMUVx4h2/IoPihBBCrBFirG3t/dqSY+atQDGHeOoeyM3p6uV/5XPcECLFLu/J0P1HBwgZq/fSjnmm4iQmdM9q0jBLIoQQN3T3hXkrUKwQmx3pdOaxEide72EWKU5Ou7IwofUkGzuRIixoAj8bjtkJHooTQggJh1QJFJ3ocGa1WkZKg5NslAhhYd6I0c81vB4jhERHEttiEp8pl0i1QAlqSZHFhFsMuB9ka4l5WSfqnYFzBTuLhvA90ilOgsLOixBC7EmVQDGb9nVaK/yKEKc0+XbOtBQnzphFipxNNikWKooTQvSR5PaU5GdLOqkSKGHkGlG5lpf7CTFi5whLceIdKyuWkx+K112I2QERQpxgH+GPVAkUXclj/NxTxm3Zxy5Ch+IkGHWLmkLZcdN8DxVxQ+daQvTAwT9/SZVASQp+9tmhOPGGkwAQ2yJY4SfCx+5e7DgJIcQ/Ob2bscpuiDLy4KN7ucdprx+BGBTrZ6+2vT+tJ3rxEx4s/z4q0BpCSDzk2iSAfYW38Tu1FpQodgu2u77X/CUUJ7mB192Nna6Rax0vIX6pn73al+UyF2G79kZqBEocDUD2NZFFid2zMDusfuKYsfgVGebz2ZmRtOAn6WSutg9OQtRJjUARDSCMbaH9Xs/OwiKHHtN6Eg5OnYNdOLLVe04CiOZcQtxJc0ZsihRnUiNQzIQhVFQQ1hMx2MlhxWbC2E2ZnMfNtOyWiE+3OHH7jDzzYsdGSH7AtmxPqgRKXEq9q6bQdjAUIsScsM0p0oR4w27gVxGocmi6F0Hrt9ORN7h0ux6FCiH5AduyNakSKMDAmXAcVhQSPU4O0VbC0UocOl1DZ+ciX6tuUZOr/xQ7N5JG8nEJVUxk5VeaSZ1AsRp4dO7Jo3pcZIo1L+1wWSd67CxV4v3S9r4BnaHdbx5Gp6laPylSCMk/0ixSUidQAPvsrlFgl8Jevj+XdvRj5fha2t7nWtbyUpt8DfPnVJdlvOBH7NCaQtJCmup5WkVKKgWK3WAVBJFXxZxfRf4/w4jjRcdmgS07l4VuWpb9UPzeK02dN0knSdn40y+5/vxRkEqBAui1UlhZRbwmgmNljYYodjQWAkOXkPF7LTeRQhFDSDyopjGQSaNPSmoFCpBdIeIKOzbD5Z3wiUKkhIEuwcNlIJIPxGGR1tFvmC3sVpNZ83eT/06TUBkc9wPETRw7HJP4CUMIRhFV0Lb2fk/iQpwrns38NyG5TFw71Id1PzvRldbgidQLlCDIlelU7SUYuv+o54pkPr9+9mpaUSJGDNpmB9h8/C0oTEi+sOPIusgtCZzIRgsFigbSqm7zja6aQpS292V1evL/802sEJLrDN1/FB99vTruxwiMleWEKShS7oMiDzhuTq0qOUvkv4VPi3wPp/NJfAirQi76pcicGH8WJ8afjfsxCImMHUfWZdptVO1X930Y3WkPLSgKyBXIahlHfk/8X1RiOxMkxUmyMC99JNUJrX72apTC2dRs9jchJA3kgh+KmzOsGbtxIi1LTakXKHZ+I04Vx6/ipVLOHayWc3T4pDSMWRhoh2r5/mYBUvy+vUG0blET/U9IXtKyc1lmQpEkkSI78Oa6dTYuUr3EY0dQITF0/1HLgUxsCEjrSW5i/k29hPuJc3X+9laCQ37PfJwWFUL0YhcmLAhLnKRF8KTegqILs6hpGLMQQ8/9n4IkP7ESLHaWF7vPBMXNKuI1LJmQXMWr9cRsAfGzbBOXdSQtDvu0oJxDCIwwlmFUr8kloNzHyaKShE6FYoXkI37alhAX4uUmTsyWEnG++XPyeWmxdIRF6gWK8AcQVo44rR3i3g1jFsb2DMQ/Tp1knOKEvickDTiJAqclGD/o8nVxGm/sjiVhohMVXOKBvaNsXM+RhGch/rDqPJLQoVCkkHxHdpY14yQogogNc9r6sJ10k9CXREnqLSgyVrlOdF/b7vpc3iGEED34XVpxssKoiA/V+7q5FJgnqUnZKy5qaEE5R1wCQSWDIEkXYonPSzhyw5iFOFV7SepmWIToRPYrkS0iZuHhVYiYEVujEGdoQYkJO2sKKy0R0BeJEP+E4aCqq392uo6V9QRI5zItLSgxoVNBi3VXzp5zFzsx4jVEOR83OCREFbMfShC/EKsQYi/+gZxsBsezBeWVV17BnDlzUFlZiYKCAjz//PNZx++8804UFBRkvaZOnZp1Tm9vLxYsWIBRo0Zh2LBhuPnmm3HkyJFAX8QvccxSw6q4nHHnJk6/Gzs5QrxhFuhuydTM51r9P05K2/tSaT0BfAiUTz/9FHV1dXjiiSdsz5k9ezaOHTuWeW3fvj3reGNjI7Zt24YtW7bg1VdfxcmTJ3HTTTfhzJkz3r9BAOIc0N0GHtVnk2cLjP7JT+wyE1udRwixx0106HJE9bKEY/VZ8fm0OscKPC/xNDQ0oKGhwfGcoqIilJeXWx7r7u7Gk08+ic2bN2PGjBkAgKeffhpVVVXYvXs3brzxRq+PREhO4ccJVgU63hHijuqyj52DrBmrXe51tcWkWHHiIhQn2ZaWFowePRqXXXYZvvOd76CzszNzrLW1FZ999hlmzZqVea+yshK1tbXYu3ev5fV6e3vR09OT9SIkFwnTaic6xIYxC7ncR1KNmzhQyfSquiwkkIVJ0M1maQ3vR7tAaWhowC9/+Uu8/PLLeOyxx/D666/j+uuvR29vLwCgo6MDhYWFKC0tzfpcWVkZOjo6LK+5atUqlJSUZF5VVVW6HztWgmSx5Yw5dzCLBgoJQsJhx5F1Sn2j3V46Xi0Xou9mf6wX7QLl9ttvx9e+9jXU1tZizpw52LFjB/7rv/4Lv/rVrxw/ZxgGCgoKLI8tX74c3d3dmdfhw4d1P3bsmMWJm/mfg1u64G9NiDfc+lCnPXPi9vug0Okn9DwoFRUVGDduHN577z0AQHl5Ofr6+tDV1ZV1XmdnJ8rKyiyvUVRUhBEjRmS9gpKkDt8ty6yMkzBheGly0VHf7K6RpLpMSJJo2bks1AzhqnDJxh+hC5SPP/4Yhw8fRkVFBQBg8uTJGDJkCJqbmzPnHDt2DPv378f06dPDfpycxmogYsVPPjoFBMUIId5QdUb3akWRhY8OvxOBeT+2NE88PUfxnDx5Er/97W8zfx88eBBvv/02Ro4ciZEjR2LFihX4+te/joqKCnz44Yd48MEHMWrUKNx6660AgJKSEtx9991YsmQJLr74YowcORJLly7FxIkTM1E9aUZ3ZAeJj7DEhEoU0KnaS5i0jZBziLYitoSIA5XIHk44s/EsUN544w1cd911mb8XL14MALjjjjuwfv167Nu3D5s2bcLx48dRUVGB6667Ds8++yyKi4szn2lqasLgwYNx22234dSpU7jhhhuwceNGDBo0SMNXyl0oTogX8s2aUreoCUA6U3qTZCCsKHbOs05YOcp6CTemOBmIZ4FSX18PwzBsj7/00kuu17jwwgvx4x//GD/+8Y+93j7vEBXYjzhhhSZ2DN1/lPWDEBM7jqxTsqJYiRMhNOxEh9t7cfvB5CKp3SwwCdYK0UhUn8W8NilDUz7JddrW3k/rCQmdoH0/hUZ0pFagJAHVFOby+YCzUCHJIAnLL0P3H03EcxCS6+gUJV7677RPPFMtUOK2osR9fxIOSRMFzJlDSDay4NCV8yTIpJETTmtSLVByDQqaZEMhQEhu0VVTiBPjz2oRKap+X2ZLOMWJPRQoESJXSD+VkoNfMskVYZILz0hIlKhG6jj11162KlHt++POZJsUPEfx5BvCqzsshMd3mCo57euUcZBrgz1naYRkI/rlsdtP+76G333U3MaE0vY+9uugBSV0rCqiH4crMSDSg5x4RQ6LrJ+9OuanISS/UbWkEHdSb0GJArf4eZIb5JrVRMA6R4heRF9uZwnh7sZ6oAUF0TmfMmkPiRPRoeaq0CIkTvws5/j1OeTyTj+0oMRIw5iFnsSRVSVnRSaEEH949Q+Uz/WzTON2P/bn2VCg5AiqKZoJcUKYpr2KY0JIMJgF3Dtc4omRoAMEQ9GiI1+WRbi0SNKOXVuOo21QnDhDC0oI6IjasUM2EXrdbZMQGdFR05JC0krUzqxil2QKEzVoQTlH0E7ayRlKdq7yukGg1T1I9HAQJyS/iaJv5aTSG7SgaELF2Wro/qNaBjpd1yHpwyrUnf4oJO1YWb3DEiy0nqhDgRIhQSs8KzYJgmzJk3PzEEKCI3wCnawk7MO9wSUejTitYzJJG4kTsxChMCHEHraPZECBohFWapJLsL4SQpIMBUrI0GpCkoDIIksI6UdVoKu2m9L2PjrBaoYChRBF6EhKSLqgqI8XChSJoAOQVWWWVToHOJJUuMsxSROq1hMug8YLBYoJigjiRC7WDz+bnBGSFtysJGw38UGBEhEMLyNJhvWTpJ2w/bTYxrxDgUJISuB6OiHWFhHmBUomFCgW7DiyTmtaeSpnkiQoVEhaEftPiaRqUbUFjgH+oEDRBPfJIUlFnh2K/1OkEBI+FCbBoECxQVQspqcnYSJvMkmRS0g0qOYr6aopzLz8wjHAP9yLxyMcQMiOI+tQP3t1ICuEUz2Sj+m0dNhdix0oSSNe21ZXTSETsUUMBYoCwjTuVZyw489vvAoJ835MdvXDLScJ93UixB9icgEMbEcq/bvKhoAC9v/BoUBxwa9qZuUkVqjkURF1p372atudh8OyshCSzwhxotqvl7b3WS7v0JoSDRQoinBphwhkC4eqOAiS4M2t7gWxqFBIEzIQFYu52Zoif4btSg90knVAVEAvDlItO5excuY5XsWAX3HipR55FdCnai9hPSXkHELk+0nWJsYHTmL1QwuKJtjZpxOz9UJ3KvyWncsyFhv5/yrP4nQeIWlDXt4x49Ru7JZ5ZLjkEw4UKA60rb0fAFC3qAkARQjpR3a0k98LE1H3/IoUIUror0KId6xEStva+zNjg2Do/qM5uV9XUqFAUUAIFUIEslAI2yJhFsZuszUnS0oS1shFp852RXIJ0eaEUKlb1MQ6HDIUKIT4JK5BXsWUbBf9Q0iaMVtBnMS8k6OsyrIPCQ6dZAnJMcJ0no0C89IpIUnEqu2YAydYh8OFAoWQHERFpNiJE/qhkLThlvzQCqt2QkfYaKFAISRPsdtDJAlOfLSikCiR20Fpe19W7hI7nJZ3SDRQoBBCCMlb7ERwEEuiECl0kg0XzwLllVdewZw5c1BZWYmCggI8//zzWccNw8CKFStQWVmJoUOHor6+HgcOHMg6p7e3FwsWLMCoUaMwbNgw3HzzzThy5EigL0JI2vDiiyISCCYxVL5+9mrUz15NawoJBSEivCbedBMwtKSEj2eB8umnn6Kurg5PPPGE5fFHH30Ua9aswRNPPIHXX38d5eXlmDlzJk6cOJE5p7GxEdu2bcOWLVvw6quv4uTJk7jppptw5swZ/9+EkBRiJTySKkSskDt5dvgkDIIIXyeRkittLJcpMAzD8P3hggJs27YNt9xyC4B+60llZSUaGxuxbFn/j9fb24uysjKsXr0a99xzD7q7u/G5z30Omzdvxu233w4A+Oijj1BVVYXt27fjxhtvdL1vT08PSkpK0N3djREjRvh9fELynvrZqxPbkVo5Lib1WUluYxYppe19npZ4kpA/KF/wMn5r9UE5ePAgOjo6MGvWrMx7RUVFuPbaa7F3714AQGtrKz777LOscyorK1FbW5s5hxCihyR3qGZTe5KfleQPfnenZ/2MHq0CpaOjAwBQVlaW9X5ZWVnmWEdHBwoLC1FaWmp7jpne3l709PRkvQghuY3sYMjOn4SF2XridXO/JES9pZVQongKCgqy/jYMY8B7ZpzOWbVqFUpKSjKvqqoqbc9KCCGEkOShVaCUl5cDwABLSGdnZ8aqUl5ejr6+PnR1ddmeY2b58uXo7u7OvA4fPqzzsQnJO+oWNWX5ePhJVBUGImJH/J+QqKEzdu6gVaBUV1ejvLwczc3Nmff6+vqwZ88eTJ8+HQAwefJkDBkyJOucY8eOYf/+/ZlzzBQVFWHEiBFZL0KIM101hVmCIG7MgkneoZmQOLBb5tlxZF3mReLD82aBJ0+exG9/+9vM3wcPHsTbb7+NkSNHYuzYsWhsbMTKlSsxYcIETJgwAStXrsRFF12Eb37zmwCAkpIS3H333ViyZAkuvvhijBw5EkuXLsXEiRMxY8YMfd+MEOK683FU2IkkihMSNm5Zi502DCTx4lmgvPHGG7juuusyfy9evBgAcMcdd2Djxo144IEHcOrUKXzve99DV1cXrr76auzatQvFxcWZzzQ1NWHw4MG47bbbcOrUKdxwww3YuHEjBg0apOErEULa1t6f6MRnFCaEEDcC5UGJC+ZBIcQdIVBK2/tiFQR1i5oGWHEoUEiUyGLdzqIoW1G4tBMeseVBIYQkB2HajluczLzrNRz66nljLcUJIUQFChRC8pg4NzMTs9Zdh2pQ/D67GpIb0HqSHDz7oBBCiBvy8hLahwGId5mJEJJ7cFpDCNFKljghJEa8OorTepIsaEEhhGgjyZFDJJ241UnhHEtxkjxoQSGEhA6Xd0jUCGFizOjCpiVrLM+hOEk2tKAQQrQhnHLlxGwUJyRpMDFbbsA8KIQQQvKSukVN2LRkDea1fRsFu0sBDPSNohUlWpgHhRBCCPFAw5iFcT8CMUGBQgghJC9pW3s/NndNx8mDJa7n2m0cSOKDPiiEEELyCjlyZ9OSvdiKqzJ/mzfQpDBJLrSgEEIIyUtOjD8LABhe3e16Lp25kwcFCiGEkNRglUCQ4iSZUKAQQgjJK5z2oOqqKURXTWGET0P8QoFCCCEk71DZKJOWk2RDgUIIISQvEbtozxrbnvW+vMxDkZJcKFAIIYTkNfNK9wLoFybcxDJ3YJgxIYSQvKRt7f1Y2vYamn8xFcDAEGMvyNs3ALS8RAEFCiGEkLykPx/KVO3XpTiJBgoUQggheUlpe19WxE6Q5R2KkuihDwohhJC8xCwqzOHF5mUbkiwoUAghhKQKWbhQpCQXChRCCCF5i5wPpW3t/fhk/qe4dN1j+GT+p0zalnAKDMMw4n4Ir/T09KCkpATd3d0YMWJE3I9DCCEkR2g7VIXNXdMBIBPdU9reRx+TiPAyftNJlhBCSGq49fnGzP+Lz/1LK0oyoUAhhBCSGj5YuORc+HE/KinxSTxQoBBCCEkVFCW5AZ1kCSGEEJI4KFAIIYQQkjgoUAghhBCSOChQCCGEEJI4KFAIIYQQkjgoUAghhBCSOChQCCGEEJI4KFAIIYQQkjgoUAghhBCSOChQCCGEEJI4KFAIIYQQkjgoUAghhBCSOChQCCGEEJI4KFAIIYQQkji0C5QVK1agoKAg61VeXp45bhgGVqxYgcrKSgwdOhT19fU4cOCA7scghBBCQqHtUFXmRcJjcBgXveKKK7B79+7M34MGDcr8/9FHH8WaNWuwceNGXHbZZfjRj36EmTNnor29HcXFxWE8DiGEEBIIipHoCUWgDB48OMtqIjAMA48//jgeeughzJ07FwDw1FNPoaysDM888wzuueeeMB6HEEII8YQQJHVjDw8QJ3VjD8fxSKkjFB+U9957D5WVlaiursY3vvENfPDBBwCAgwcPoqOjA7NmzcqcW1RUhGuvvRZ79+61vV5vby96enqyXoQQQkjYUJzEh3aBcvXVV2PTpk146aWX8POf/xwdHR2YPn06Pv74Y3R0dAAAysrKsj5TVlaWOWbFqlWrUFJSknlVVdHURrJxWhOW3+PaMSHEDfYPyUD7Ek9DQ0Pm/xMnTsS0adMwfvx4PPXUU5g6dSoAoKCgIOszhmEMeE9m+fLlWLx4cebvnp4eihQF5EaWJtXvJlLk91TKRfU83cgmZkIISRuh+KDIDBs2DBMnTsR7772HW265BQDQ0dGBioqKzDmdnZ0DrCoyRUVFKCoqCvtR84Ygg3GU2A3AXoVVWLMds+VF9Xn83sfu2kn87QjJR9z6ErbDaAldoPT29uI///M/8ed//ueorq5GeXk5mpub8aUvfQkA0NfXhz179mD16tVhP0oqcGpgSZ2RB3nmoOLEbn3Z7rqq69F+OjqVcnC6JyGE5BPaBcrSpUsxZ84cjB07Fp2dnfjRj36Enp4e3HHHHSgoKEBjYyNWrlyJCRMmYMKECVi5ciUuuugifPOb39T9KKkhDeulUX1Hr/fx+1xBvo/Xz1LQEDKQNPSbuY52gXLkyBH81V/9Ff7whz/gc5/7HKZOnYrXXnsN48aNAwA88MADOHXqFL73ve+hq6sLV199NXbt2sUcKB7QbTWQMYfUhTW4sXMghBDiRIFhGEbcD+GVnp4elJSUoLu7GyNGjIj7cSIhiQO6H/GSxO+RJmhNIWQgXvoltqFgeBm/Q/dBIf5J+mDuJQqGJAOV34IdMCEkCVCgRIguB8skYfWsHOByG0YNkXzEq0M8iR8KFISTKVCl0ufr4M4Gn/vka90k6UQ1Ks8NtoHg7Dv8ReVzc9oH5dX9lRhe3J8M12vYZhDywfJBiF/YSZNcQWefzHofDPFbnDxxFtfUfpQuH5QoxQGFCEkzzMlC0giXPtXRNUbmjUAhhESPW8g6IXHgtBMx0UMU5RrKbsaEEMJNGUncsP6FQ1TlSgsKIYQQ36gOVlFa1Gg50U8c5UmBQgghxBdeBi07H46gEWN2PlHy/3UNrmnzQ4lb5FGgEEIIiQTVAc8sOvx8jngjiWVHgUII0Yo8oKRpthk1dgOKzjJ3+x2jSHaWpIEzX+tzkspYhgKFEKIN0YHna0eeFJwGFL+DjdNvlralDSvy9fsnVZwAFCiEEE3kawceF1EPHHa+IOL9tOe/yQeRlmQxYgUFCiEkELneacdBrgwUulLE5wv5IFJyCQoUQgjRSFoH77SQi5akXK2TFCiEEF/kSuccBbk6AJBgcFPNcKFAIYQQD1CMECeSEMGWL3WUAoUQQs7BGTHRhbkuhV2P8kWUyFCgEEJ8YZ4pBu0gkyoE8rHjJ9Gj23clDfWSAoUQEgidacStCFO4pKGTJ8mD9U4NChRCSKJhZ05IOrkg7gcghBBCCDFDgUIIIYSQxEGBQgghhJDEQYFCCCGEkMRBgUIIIYSQxEGBQgghhJDEQYFCCCGEkMRBgUIIIYSQxEGBQgghhJDEQYFCCCGEkMRBgUIIIYSQxEGBQgghhJDEQYFCCCGEkMRBgUIIIYSQxEGBQgghhJDEQYFCCCGEkMRBgUIIIYSQxEGBQgghhJDEQYFCCCGEkMQRq0D56U9/iurqalx44YWYPHky/uM//iPOxyGEEEJIQohNoDz77LNobGzEQw89hLfeegt//ud/joaGBhw6dCiuRyKEEEJIQohNoKxZswZ33303/vZv/xZf/OIX8fjjj6Oqqgrr16+P65EIIYQQkhAGx3HTvr4+tLa24u/+7u+y3p81axb27t074Pze3l709vZm/u7u7gYAfHrybLgPSgghhBBtiHHbMAzXc2MRKH/4wx9w5swZlJWVZb1fVlaGjo6OAeevWrUKDz/88ID3b5w68FxCCCGEJJsTJ06gpKTE8ZxYBIqgoKAg62/DMAa8BwDLly/H4sWLM38fP34c48aNw6FDh1y/YFro6elBVVUVDh8+jBEjRsT9OImAZTIQlslAWCYDYZkMhGUyED9lYhgGTpw4gcrKStdzYxEoo0aNwqBBgwZYSzo7OwdYVQCgqKgIRUVFA94vKSlhRTExYsQIlokJlslAWCYDYZkMhGUyEJbJQLyWiaphIRYn2cLCQkyePBnNzc1Z7zc3N2P69OlxPBIhhBBCEkRsSzyLFy/GvHnzMGXKFEybNg0/+9nPcOjQIdx7771xPRIhhBBCEkJsAuX222/Hxx9/jB/+8Ic4duwYamtrsX37dowbN871s0VFRfjBD35gueyTVlgmA2GZDIRlMhCWyUBYJgNhmQwk7DIpMFRifQghhBBCIoR78RBCCCEkcVCgEEIIISRxUKAQQgghJHFQoBBCCCEkceSkQPnpT3+K6upqXHjhhZg8eTL+4z/+I+5HCo1XXnkFc+bMQWVlJQoKCvD8889nHTcMAytWrEBlZSWGDh2K+vp6HDhwIOuc3t5eLFiwAKNGjcKwYcNw880348iRIxF+C32sWrUKV155JYqLizF69GjccsstaG9vzzonbWWyfv16TJo0KZMsadq0adixY0fmeNrKw4pVq1ahoKAAjY2NmffSVi4rVqxAQUFB1qu8vDxzPG3lITh69Cj+5m/+BhdffDEuuugi/Omf/ilaW1szx9NWLn/yJ38yoJ4UFBRg/vz5ACIuDyPH2LJlizFkyBDj5z//ufHuu+8aixYtMoYNG2b87ne/i/vRQmH79u3GQw89ZDz33HMGAGPbtm1Zxx955BGjuLjYeO6554x9+/YZt99+u1FRUWH09PRkzrn33nuNSy65xGhubjbefPNN47rrrjPq6uqM06dPR/xtgnPjjTcaGzZsMPbv32+8/fbbxte+9jVj7NixxsmTJzPnpK1MXnjhBeNXv/qV0d7ebrS3txsPPvigMWTIEGP//v2GYaSvPMz85je/Mf7kT/7EmDRpkrFo0aLM+2krlx/84AfGFVdcYRw7dizz6uzszBxPW3kYhmF88sknxrhx44w777zT+H//7/8ZBw8eNHbv3m389re/zZyTtnLp7OzMqiPNzc0GAOPXv/61YRjRlkfOCZSrrrrKuPfee7Pe+8IXvmD83d/9XUxPFB1mgXL27FmjvLzceOSRRzLv/fGPfzRKSkqM//k//6dhGIZx/PhxY8iQIcaWLVsy5xw9etS44IILjJ07d0b27GHR2dlpADD27NljGAbLRFBaWmr8r//1v1JfHidOnDAmTJhgNDc3G9dee21GoKSxXH7wgx8YdXV1lsfSWB6GYRjLli0zrrnmGtvjaS0XmUWLFhnjx483zp49G3l55NQST19fH1pbWzFr1qys92fNmoW9e/fG9FTxcfDgQXR0dGSVR1FREa699tpMebS2tuKzzz7LOqeyshK1tbV5UWbd3d0AgJEjRwJgmZw5cwZbtmzBp59+imnTpqW+PObPn4+vfe1rmDFjRtb7aS2X9957D5WVlaiursY3vvENfPDBBwDSWx4vvPACpkyZgr/8y7/E6NGj8aUvfQk///nPM8fTWi6Cvr4+PP3007jrrrtQUFAQeXnklED5wx/+gDNnzgzYULCsrGzAxoNpQHxnp/Lo6OhAYWEhSktLbc/JVQzDwOLFi3HNNdegtrYWQHrLZN++fRg+fDiKiopw7733Ytu2bbj88stTWx4AsGXLFrz55ptYtWrVgGNpLJerr74amzZtwksvvYSf//zn6OjowPTp0/Hxxx+nsjwA4IMPPsD69esxYcIEvPTSS7j33nuxcOFCbNq0CUA664nM888/j+PHj+POO+8EEH15xJbqPggFBQVZfxuGMeC9NOGnPPKhzO677z688847ePXVVwccS1uZ1NTU4O2338bx48fx3HPP4Y477sCePXsyx9NWHocPH8aiRYuwa9cuXHjhhbbnpalcGhoaMv+fOHEipk2bhvHjx+Opp57C1KlTAaSrPADg7NmzmDJlClauXAkA+NKXvoQDBw5g/fr1+Na3vpU5L23lInjyySfR0NCAysrKrPejKo+csqCMGjUKgwYNGqDCOjs7Byi6NCA88J3Ko7y8HH19fejq6rI9JxdZsGABXnjhBfz617/GmDFjMu+ntUwKCwvx+c9/HlOmTMGqVatQV1eHtWvXprY8Wltb0dnZicmTJ2Pw4MEYPHgw9uzZg3Xr1mHw4MGZ75W2cpEZNmwYJk6ciPfeey+19aSiogKXX3551ntf/OIXcejQIQDp7U8A4He/+x12796Nv/3bv828F3V55JRAKSwsxOTJk9Hc3Jz1fnNzM6ZPnx7TU8VHdXU1ysvLs8qjr68Pe/bsyZTH5MmTMWTIkKxzjh07hv379+dkmRmGgfvuuw9bt27Fyy+/jOrq6qzjaSwTKwzDQG9vb2rL44YbbsC+ffvw9ttvZ15TpkzBX//1X+Ptt9/GpZdemspykent7cV//ud/oqKiIrX15Mtf/vKANAX/9V//ldm0Nq3lAgAbNmzA6NGj8bWvfS3zXuTl4cerN05EmPGTTz5pvPvuu0ZjY6MxbNgw48MPP4z70ULhxIkTxltvvWW89dZbBgBjzZo1xltvvZUJq37kkUeMkpISY+vWrca+ffuMv/qrv7IM+RozZoyxe/du48033zSuv/76nA2B++53v2uUlJQYLS0tWaFw//3f/505J21lsnz5cuOVV14xDh48aLzzzjvGgw8+aFxwwQXGrl27DMNIX3nYIUfxGEb6ymXJkiVGS0uL8cEHHxivvfaacdNNNxnFxcWZvjNt5WEY/SHogwcPNv7+7//eeO+994xf/vKXxkUXXWQ8/fTTmXPSWC5nzpwxxo4dayxbtmzAsSjLI+cEimEYxk9+8hNj3LhxRmFhofFnf/ZnmRDTfOTXv/61AWDA64477jAMoz8M7gc/+IFRXl5uFBUVGV/5yleMffv2ZV3j1KlTxn333WeMHDnSGDp0qHHTTTcZhw4diuHbBMeqLAAYGzZsyJyTtjK56667Mu3hc5/7nHHDDTdkxIlhpK887DALlLSVi8hXMWTIEKOystKYO3euceDAgczxtJWH4MUXXzRqa2uNoqIi4wtf+ILxs5/9LOt4GsvlpZdeMgAY7e3tA45FWR4FhmEYnm0/hBBCCCEhklM+KIQQQghJBxQohBBCCEkcFCiEEEIISRwUKIQQQghJHBQohBBCCEkcFCiEEEIISRwUKIQQQghJHBQohBBCCEkcFCiEEEIISRwUKIQQQghJHBQohBBCCEkcFCiEEEIISRz/PxbMLoEQjFtlAAAAAElFTkSuQmCC", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "soilmvar = gfs.variables['Volumetric_Soil_Moisture_Content_depth_below_surface_layer']\n", "# flip the data in latitude so North Hemisphere is up on the plot\n", "soilm = soilmvar[0,0,::-1,:] \n", "print('shape=%s, type=%s, missing_value=%s' % \\\n", " (soilm.shape, type(soilm), soilmvar.missing_value))\n", "import matplotlib.pyplot as plt\n", "%matplotlib inline\n", "cs = plt.contourf(soilm)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 32, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "source": [ "##Packed integer data\n", "There is a similar feature for variables with `scale_factor` and `add_offset` attributes.\n", "\n", "- short integer data will automatically be returned as float data, with the scale and offset applied. " ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 32, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Dealing with dates and times\n", "- time variables usually measure relative to a fixed date using a certain calendar, with units specified like ***`hours since YY:MM:DD hh-mm-ss`***.\n", "- **`num2date`** and **`date2num`** convenience functions provided to convert between these numeric time coordinates and handy python datetime instances. \n", "- **`date2index`** finds the time index corresponding to a datetime instance." ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 34 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "name of time dimension = time1\n", "units = Hour since 2023-05-25T12:00:00Z, values = [ 0. 3. 6. 9. 12. 15. 18. 21. 24. 27. 30. 33. 36. 39.\n", " 42. 45. 48. 51. 54. 57. 60. 63. 66. 69. 72. 75. 78. 81.\n", " 84. 87. 90. 93. 96. 99. 102. 105. 108. 111. 114. 117. 120. 123.\n", " 126. 129. 132. 135. 138. 141. 144. 147. 150. 153. 156. 159. 162. 165.\n", " 168. 171. 174. 177. 180. 183. 186. 189. 192. 195. 198. 201. 204. 207.\n", " 210. 213. 216. 219. 222. 225. 228. 231. 234. 237. 240. 243. 246. 249.\n", " 252. 255. 258. 261. 264. 267. 270. 273. 276. 279. 282. 285. 288. 291.\n", " 294. 297. 300. 303. 306. 309. 312. 315. 318. 321. 324. 327. 330. 333.\n", " 336. 339. 342. 345. 348. 351. 354. 357. 360. 363. 366. 369. 372. 375.\n", " 378. 381. 384.]\n" ] } ], "source": [ "from netCDF4 import num2date, date2num, date2index\n", "timedim = sfctmp.dimensions[0] # time dim name\n", "print('name of time dimension = %s' % timedim)\n", "times = gfs.variables[timedim] # time coord var\n", "print('units = %s, values = %s' % (times.units, times[:]))" ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 35, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['2023-05-25 12:00:00', '2023-05-25 15:00:00', '2023-05-25 18:00:00', '2023-05-25 21:00:00', '2023-05-26 00:00:00', '2023-05-26 03:00:00', '2023-05-26 06:00:00', '2023-05-26 09:00:00', '2023-05-26 12:00:00', '2023-05-26 15:00:00']\n" ] } ], "source": [ "dates = num2date(times[:], times.units)\n", "print([date.strftime('%Y-%m-%d %H:%M:%S') for date in dates[:10]]) # print only first ten..." ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 35, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "###Get index associated with a specified date, extract forecast data for that date." ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 37 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2023-05-28 15:57:27.760935\n", "index = 25, date = 2023-05-28 15:00:00\n" ] } ], "source": [ "from datetime import datetime, timedelta\n", "date = datetime.now() + timedelta(days=3)\n", "print(date)\n", "ntime = date2index(date,times,select='nearest')\n", "print('index = %s, date = %s' % (ntime, dates[ntime]))" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 38 }, "slideshow": { "slide_type": "fragment" } }, "source": [ "###Get temp forecast for Boulder (near 40N, -105W)\n", "- use function **`getcloses_ij`** we created before..." ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 39, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Boulder forecast valid at 2023-05-28 15:00:00 UTC = 297.6 K\n" ] } ], "source": [ "lats, lons = gfs.variables['lat'][:], gfs.variables['lon'][:]\n", "# lats, lons are 1-d. Make them 2-d using numpy.meshgrid.\n", "lons, lats = np.meshgrid(lons,lats)\n", "j, i = getclosest_ij(lats,lons,40,-105)\n", "fcst_temp = sfctmp[ntime,j,i]\n", "print('Boulder forecast valid at %s UTC = %5.1f %s' % \\\n", " (dates[ntime],fcst_temp,sfctmp.units))" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 39, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "##Simple multi-file aggregation\n", "\n", "What if you have a bunch of netcdf files, each with data for a different year, and you want to access all the data as if it were in one file?" ] }, { "cell_type": "code", "execution_count": 21, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 41 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "-rw-rw-r-- 1 8985332 May 17 15:27 data/prmsl.2000.nc\r\n", "-rw-rw-r-- 1 8968789 May 17 15:27 data/prmsl.2001.nc\r\n", "-rw-rw-r-- 1 8972796 May 17 15:27 data/prmsl.2002.nc\r\n", "-rw-rw-r-- 1 8974435 May 17 15:27 data/prmsl.2003.nc\r\n", "-rw-rw-r-- 1 8997438 May 17 15:27 data/prmsl.2004.nc\r\n", "-rw-rw-r-- 1 8976678 May 17 15:27 data/prmsl.2005.nc\r\n", "-rw-rw-r-- 1 8969714 May 17 15:27 data/prmsl.2006.nc\r\n", "-rw-rw-r-- 1 8974360 May 17 15:27 data/prmsl.2007.nc\r\n", "-rw-rw-r-- 1 8994260 May 17 15:27 data/prmsl.2008.nc\r\n", "-rw-rw-r-- 1 8974678 May 17 15:27 data/prmsl.2009.nc\r\n", "-rw-rw-r-- 1 8970732 May 17 15:27 data/prmsl.2010.nc\r\n", "-rw-rw-r-- 1 8976285 May 17 15:27 data/prmsl.2011.nc\r\n" ] } ], "source": [ "!ls -ldgG data/prmsl*nc" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 42 }, "slideshow": { "slide_type": "fragment" } }, "source": [ "**`MFDataset`** uses file globbing to patch together all the files into one big Dataset.\n", "You can also pass it a list of specific files.\n", "\n", "Limitations:\n", "\n", "- It can only aggregate the data along the leftmost dimension of each variable.\n", "- only works with `NETCDF3`, or `NETCDF4_CLASSIC` formatted files.\n", "- kind of slow." ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 43, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "starting date = 2000-01-01 00:00:00\n", "ending date = 2011-12-31 00:00:00\n", "times shape = 4383\n", "prmsl dimensions = ('time', 'lat', 'lon'), prmsl shape = (4383, 91, 180)\n" ] } ], "source": [ "mf = netCDF4.MFDataset('data/prmsl*nc')\n", "times = mf.variables['time']\n", "dates = num2date(times[:],times.units)\n", "print('starting date = %s' % dates[0])\n", "print('ending date = %s'% dates[-1])\n", "prmsl = mf.variables['prmsl']\n", "print('times shape = %s' % times.shape)\n", "print('prmsl dimensions = %s, prmsl shape = %s' %\\\n", " (prmsl.dimensions, prmsl.shape))" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 43, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Closing your netCDF file\n", "\n", "It's good to close netCDF files, but not actually necessary when Dataset is open for read access only.\n" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 45 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [], "source": [ "f.close()\n", "gfs.close()" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 45, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "-" } }, "source": [ "##That's it!\n", "\n", "Now you're ready to start exploring your data interactively.\n", "\n", "To be continued with **Writing netCDF data** ...." ] } ], "metadata": { "celltoolbar": "Raw Cell Format", "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 1 } netcdf4-python-1.7.4rel/examples/subset.py000066400000000000000000000012071512661643000205750ustar00rootroot00000000000000# use 'orthogonal indexing' feature to subselect data over CONUS. import netCDF4 import numpy as np import matplotlib.pyplot as plt # use real data from CFS reanalysis. # note: we're reading GRIB2 data! URL="http://nomads.ncdc.noaa.gov/thredds/dodsC/modeldata/cmd_flxf/2010/201007/20100701/flxf00.gdas.2010070100.grb2" nc = netCDF4.Dataset(URL) lats = nc.variables['lat'][:]; lons = nc.variables['lon'][:] latselect = np.logical_and(lats>25,lats<50) lonselect = np.logical_and(lons>230,lons<305) data = nc.variables['Soil_moisture_content'][0,0,latselect,lonselect] plt.contourf(data[::-1]) # flip latitudes so they go south -> north plt.show() netcdf4-python-1.7.4rel/examples/test_stringarr.py000066400000000000000000000035551512661643000223520ustar00rootroot00000000000000from netCDF4 import Dataset, stringtochar, chartostring import random, numpy from typing import Final # test utilities for converting arrays of fixed-length strings # to arrays of characters (with an extra dimension), and vice-versa. # netCDF does not have a fixed-length string data-type (only characters # and variable length strings). The convenience function chartostring # converts an array of characters to an array of fixed-length strings. # The array of fixed length strings has one less dimension, and the # length of the strings is equal to the rightmost dimension of the # array of characters. The convenience function stringtochar goes # the other way, converting an array of fixed-length strings to an # array of characters with an extra dimension (the number of characters # per string) appended on the right. FILE_NAME = 'tst_stringarr.nc' FILE_FORMAT: Final = 'NETCDF4_CLASSIC' chars = '1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' nc = Dataset(FILE_NAME,'w',format=FILE_FORMAT) n2 = 10; nchar = 12; nrecs = 4 nc.createDimension('n1',None) nc.createDimension('n2',n2) nc.createDimension('nchar',nchar) v = nc.createVariable('strings','S1',('n1','n2','nchar')) for nrec in range(nrecs): data = numpy.empty((n2,),'S'+repr(nchar)) # fill data with random nchar character strings for n in range(n2): data[n] = ''.join([random.choice(chars) for i in range(nchar)]) print(nrec,data) # convert data to array of characters with an extra dimension # (the number of characters per string) added to the right. datac = stringtochar(data) v[nrec] = datac nc.close() nc = Dataset(FILE_NAME) v = nc.variables['strings'] print(v.shape, v.dtype) for nrec in range(nrecs): # read character array back, convert to an array of strings # of length equal to the rightmost dimension. print(nrec, chartostring(v[nrec])) nc.close() netcdf4-python-1.7.4rel/examples/threaded_read.py000066400000000000000000000032471512661643000220510ustar00rootroot00000000000000from netCDF4 import Dataset from numpy.testing import assert_array_equal, assert_array_almost_equal import numpy as np import threading import queue import time # demonstrate reading of different files from different threads. # Releasing the Global Interpreter Lock (GIL) when calling the # netcdf C library for read operations speeds up the reads # when threads are used (issue 369). # Test script contributed by Ryan May of Unidata. # Make some files nfiles = 4 fnames = []; datal = [] for i in range(nfiles): fname = 'test%d.nc' % i fnames.append(fname) nc = Dataset(fname, 'w') data = np.random.randn(500, 500, 500) datal.append(data) nc.createDimension('x', 500) nc.createDimension('y', 500) nc.createDimension('z', 500) var = nc.createVariable('grid', 'f', ('x', 'y', 'z')) var[:] = data nc.close() # Queue them up items: queue.Queue = queue.Queue() for data,fname in zip(datal,fnames): items.put(fname) # Function for threads to use def get_data(serial=None): if serial is None: # if not called from a thread fname = items.get() else: fname = fnames[serial] nc = Dataset(fname, 'r') data2 = nc.variables['grid'][:] # make sure the data is correct #assert_array_almost_equal(data2,datal[int(fname[4])]) nc.close() if serial is None: items.task_done() # Time it (no threading). start = time.time() for i in range(nfiles): get_data(serial=i) end = time.time() print('no threads, time = ',end - start) # with threading. start = time.time() for i in range(nfiles): threading.Thread(target=get_data).start() items.join() end = time.time() print('with threading, time = ',end - start) netcdf4-python-1.7.4rel/examples/tutorial.py000066400000000000000000000333131512661643000211360ustar00rootroot00000000000000from typing import Literal from netCDF4 import Dataset # code from tutorial. # create a file (Dataset object, also the root group). rootgrp = Dataset('test.nc', 'w', format='NETCDF4') print(rootgrp.file_format) rootgrp.close() # create some groups. rootgrp = Dataset('test.nc', 'a') fcstgrp = rootgrp.createGroup('forecasts') analgrp = rootgrp.createGroup('analyses') fcstgrp1 = rootgrp.createGroup('/forecasts/model1') fcstgrp2 = rootgrp.createGroup('/forecasts/model2') # walk the group tree using a Python generator. def walktree(top): yield top.groups.values() for value in top.groups.values(): yield from walktree(value) print(rootgrp) for children in walktree(rootgrp): for child in children: print(child) # dimensions. level_dim = rootgrp.createDimension('level', None) time_dim = rootgrp.createDimension('time', None) lat_dim = rootgrp.createDimension('lat', 73) lon_dim = rootgrp.createDimension('lon', 144) print(rootgrp.dimensions) print(len(lon_dim)) print(lon_dim.isunlimited()) print(time_dim.isunlimited()) for dimobj in rootgrp.dimensions.values(): print(dimobj) print(time_dim) # variables. times = rootgrp.createVariable('time','f8',('time',)) levels = rootgrp.createVariable('level','i4',('level',)) latitudes = rootgrp.createVariable('lat','f4',('lat',)) longitudes = rootgrp.createVariable('lon','f4',('lon',)) # 2 unlimited dimensions. #temp = rootgrp.createVariable('temp','f4',('time','level','lat','lon',)) # this makes the compression 'lossy' (preserving a precision of 1/1000) # try it and see how much smaller the file gets. temp = rootgrp.createVariable('temp','f4',('time','level','lat','lon',),least_significant_digit=3) print(temp) # create variable in a group using a path. temp = rootgrp.createVariable('/forecasts/model1/temp','f4',('time','level','lat','lon',)) print(rootgrp['/forecasts/model1']) # print the Group instance print(rootgrp['/forecasts/model1/temp']) # print the Variable instance # attributes. import time rootgrp.description = 'bogus example script' rootgrp.history = 'Created ' + time.ctime(time.time()) rootgrp.source = 'netCDF4 python module tutorial' latitudes.units = 'degrees north' longitudes.units = 'degrees east' levels.units = 'hPa' temp.units = 'K' times.units = 'hours since 0001-01-01 00:00:00.0' calendar: Literal['gregorian'] = 'gregorian' times.calendar = calendar for name in rootgrp.ncattrs(): print('Global attr', name, '=', getattr(rootgrp,name)) print(rootgrp) print(rootgrp.__dict__) print(rootgrp.variables) import numpy as np # no unlimited dimension, just assign to slice. lats = np.arange(-90,91,2.5) lons = np.arange(-180,180,2.5) latitudes[:] = lats longitudes[:] = lons print('latitudes =\n',latitudes[:]) print('longitudes =\n',longitudes[:]) # append along two unlimited dimensions by assigning to slice. nlats = len(rootgrp.dimensions['lat']) nlons = len(rootgrp.dimensions['lon']) print('temp shape before adding data = ',temp.shape) from numpy.random.mtrand import uniform # random number generator. temp[0:5,0:10,:,:] = uniform(size=(5,10,nlats,nlons)) print('temp shape after adding data = ',temp.shape) # levels have grown, but no values yet assigned. print('levels shape after adding pressure data = ',levels.shape) # assign values to levels dimension variable. levels[:] = [1000.,850.,700.,500.,300.,250.,200.,150.,100.,50.] # fancy slicing tempdat = temp[::2, [1,3,6], lats>0, lons>0] print('shape of fancy temp slice = ',tempdat.shape) print(temp[0, 0, [0,1,2,3], [0,1,2,3]].shape) # fill in times. from datetime import datetime, timedelta from netCDF4 import num2date, date2num, date2index dates = [datetime(2001,3,1)+n*timedelta(hours=12) for n in range(temp.shape[0])] times[:] = date2num(dates,units=times.units,calendar=times.calendar) print("time values (in units {}):\n{}".format(times.units, times[:])) dates_array = num2date(times[:],units=times.units,calendar=times.calendar) print("dates corresponding to time values:\n{}".format(dates_array)) rootgrp.close() # create a series of netCDF files with a variable sharing # the same unlimited dimension. for nfile in range(10): nc = Dataset('mftest'+repr(nfile)+'.nc','w',format='NETCDF4_CLASSIC') nc.createDimension('x',None) x_var = nc.createVariable('x','i',('x',)) x_var[0:10] = np.arange(nfile*10,10*(nfile+1)) nc.close() # now read all those files in at once, in one Dataset. from netCDF4 import MFDataset nc = MFDataset('mftest*nc') print(nc.variables['x'][:]) # example showing how to save numpy complex arrays using compound types. nc = Dataset('complex.nc','w') size = 3 # length of 1-d complex array # create sample complex data. datac = np.exp(1j*(1.+np.linspace(0, np.pi, size))) print(datac.dtype) # create complex128 compound data type. complex128 = np.dtype([('real',np.float64),('imag',np.float64)]) complex128_t = nc.createCompoundType(complex128,'complex128') # create a variable with this data type, write some data to it. nc.createDimension('x_dim',None) var_complex = nc.createVariable('cmplx_var',complex128_t,'x_dim') data = np.empty(size,complex128) # numpy structured array data['real'] = datac.real; data['imag'] = datac.imag var_complex[:] = data # close and reopen the file, check the contents. nc.close() nc = Dataset('complex.nc') print(nc) print(nc.variables['cmplx_var']) print(nc.cmptypes) print(nc.cmptypes['complex128']) var_complex = nc.variables['cmplx_var'] print(var_complex.shape) datain = var_complex[:] # read in all the data into a numpy structured array # create an empty numpy complex array datac2 = np.empty(datain.shape,np.complex128) # .. fill it with contents of structured array. datac2.real = datain['real'] datac2.imag = datain['imag'] print(datac.dtype,datac) print(datac2.dtype,datac2) # more complex compound type example. nc = Dataset('compound_example.nc','w') # create a new dataset. # create an unlimited dimension call 'station' nc.createDimension('station',None) # define a compound data type (can contain arrays, or nested compound types). winddtype = np.dtype([('speed','f4'),('direction','i4')]) statdtype = np.dtype([('latitude', 'f4'), ('longitude', 'f4'), ('surface_wind',winddtype), ('temp_sounding','f4',10),('press_sounding','i4',10), ('location_name','S12')]) # use this data type definitions to create a compound data types # called using the createCompoundType Dataset method. # create a compound type for vector wind which will be nested inside # the station data type. This must be done first! wind_data_t = nc.createCompoundType(winddtype,'wind_data') # now that wind_data_t is defined, create the station data type. station_data_t = nc.createCompoundType(statdtype,'station_data') # create nested compound data types to hold the units variable attribute. winddtype_units = np.dtype([('speed','S12'),('direction','S12')]) statdtype_units = np.dtype([('latitude', 'S12'), ('longitude', 'S12'), ('surface_wind',winddtype_units), ('temp_sounding','S12'), ('location_name','S12'), ('press_sounding','S12')]) # create the wind_data_units type first, since it will nested inside # the station_data_units data type. wind_data_units_t = nc.createCompoundType(winddtype_units,'wind_data_units') station_data_units_t =\ nc.createCompoundType(statdtype_units,'station_data_units') # create a variable of of type 'station_data_t' statdat = nc.createVariable('station_obs', station_data_t, ('station',)) # create a numpy structured array, assign data to it. data = np.empty(1,statdtype) data['latitude'] = 40. data['longitude'] = -105. data['surface_wind']['speed'] = 12.5 data['surface_wind']['direction'] = 270 data['temp_sounding'] = (280.3,272.,270.,269.,266.,258.,254.1,250.,245.5,240.) data['press_sounding'] = range(800,300,-50) data['location_name'] = 'Boulder, CO' # assign structured array to variable slice. statdat[0] = data # or just assign a tuple of values to variable slice # (will automatically be converted to a structured array). statdat[1] = np.array((40.78,-73.99,(-12.5,90), (290.2,282.5,279.,277.9,276.,266.,264.1,260.,255.5,243.), range(900,400,-50),'New York, NY'),data.dtype) print(nc.cmptypes) windunits = np.empty(1,winddtype_units) stationobs_units = np.empty(1,statdtype_units) windunits['speed'] = 'm/s' windunits['direction'] = 'degrees' stationobs_units['latitude'] = 'degrees N' stationobs_units['longitude'] = 'degrees W' stationobs_units['surface_wind'] = windunits stationobs_units['location_name'] = 'None' stationobs_units['temp_sounding'] = 'Kelvin' stationobs_units['press_sounding'] = 'hPa' print(stationobs_units.dtype) statdat.units = stationobs_units # close and reopen the file. nc.close() nc = Dataset('compound_example.nc') print(nc) statdat = nc.variables['station_obs'] print(statdat) # print out data in variable. print('data in a variable of compound type:') print(statdat[:]) nc.close() nc = Dataset('tst_vlen.nc','w') vlen_t = nc.createVLType(np.int32, 'phony_vlen') x = nc.createDimension('x',3) y = nc.createDimension('y',4) vlvar = nc.createVariable('phony_vlen_var', vlen_t, ('y','x')) import random data = np.empty(len(y)*len(x),object) for n in range(len(y)*len(x)): data[n] = np.arange(random.randint(1,10),dtype='int32')+1 data = np.reshape(data,(len(y),len(x))) vlvar[:] = data print(vlvar) print('vlen variable =\n',vlvar[:]) print(nc) print(nc.variables['phony_vlen_var']) print(nc.vltypes['phony_vlen']) z = nc.createDimension('z', 10) strvar = nc.createVariable('strvar',str,'z') chars = '1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' data = np.empty(10,object) for n in range(10): stringlen = random.randint(2,12) data[n] = ''.join([random.choice(chars) for i in range(stringlen)]) strvar[:] = data print('variable-length string variable:\n',strvar[:]) print(nc) print(nc.variables['strvar']) nc.close() # Enum type example. nc = Dataset('clouds.nc','w') # python dict describing the allowed values and their names. enum_dict = {'Altocumulus': 7, 'Missing': 255, 'Stratus': 2, 'Clear': 0, 'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, 'Cumulonimbus': 1, 'Stratocumulus': 3} # create the Enum type called 'cloud_t'. cloud_type = nc.createEnumType(np.uint8,'cloud_t',enum_dict) print(cloud_type) time_dim = nc.createDimension('time',None) # create a 1d variable of type 'cloud_type' called 'primary_clouds'. # The fill_value is set to the 'Missing' named value. cloud_var = nc.createVariable('primary_cloud',cloud_type,'time',\ fill_value=enum_dict['Missing']) # write some data to the variable. cloud_var[:] = [enum_dict['Clear'],enum_dict['Stratus'],enum_dict['Cumulus'],\ enum_dict['Missing'],enum_dict['Cumulonimbus']] # close file, reopen it. nc.close() nc = Dataset('clouds.nc') cloud_var = nc.variables['primary_cloud'] print(cloud_var) print(cloud_var.datatype.enum_dict) print(cloud_var[:]) nc.close() # dealing with strings from netCDF4 import stringtochar nc = Dataset('stringtest.nc','w',format='NETCDF4_CLASSIC') nc.createDimension('nchars',3) nc.createDimension('nstrings',None) var = nc.createVariable('strings','S1',('nstrings','nchars')) datain = np.array(['foo','bar'],dtype='S3') var[:] = stringtochar(datain) # manual conversion to char array print(var[:]) # data returned as char array var._Encoding = 'ascii' # this enables automatic conversion var[:] = datain # conversion to char array done internally print(var[:]) # data returned in numpy string array nc.close() # strings in compound types nc = Dataset('compoundstring_example.nc','w') dtype = np.dtype([('observation', 'f4'), ('station_name','S12')]) station_data_t = nc.createCompoundType(dtype,'station_data') nc.createDimension('station',None) statdat = nc.createVariable('station_obs', station_data_t, ('station',)) data = np.empty(2,station_data_t.dtype_view) data['observation'][:] = (123.,3.14) data['station_name'][:] = ('Boulder','New York') print(statdat.dtype) # strings actually stored as character arrays statdat[:] = data # strings converted to character arrays internally print(statdat[:]) # character arrays converted back to strings print(statdat[:].dtype) statdat.set_auto_chartostring(False) # turn off auto-conversion statdat[:] = data.view(station_data_t.dtype) print(statdat[:]) # now structured array with char array subtype is returned nc.close() # create a diskless (in-memory) Dataset, and persist the file # to disk when it is closed. nc = Dataset('diskless_example.nc','w',diskless=True,persist=True) d = nc.createDimension('x',None) v = nc.createVariable('v',np.int32,'x') v[0:5] = np.arange(5) print(nc) print(nc['v'][:]) nc.close() # file saved to disk # create an in-memory dataset from an existing python memory # buffer. # read the newly created netcdf file into a python bytes object. f = open('diskless_example.nc', 'rb') nc_bytes = f.read(); f.close() # create a netCDF in-memory dataset from the bytes object. nc = Dataset('inmemory.nc', memory=nc_bytes) print(nc) print(nc['v'][:]) nc.close() # create an in-memory Dataset and retrieve memory buffer # estimated size is 1028 bytes - this is actually only # used if format is NETCDF3 (ignored for NETCDF4/HDF5 files). nc = Dataset('inmemory.nc', mode='w',memory=1028) d = nc.createDimension('x',None) v = nc.createVariable('v',np.int32,'x') v[0:5] = np.arange(5) nc_buf = nc.close() # close returns memoryview print(type(nc_buf)) # save nc_buf to disk, read it back in and check. f2 = open('inmemory.nc', 'wb') f2.write(nc_buf); f2.close() nc = Dataset('inmemory.nc') print(nc) print(nc['v'][:]) nc.close() # Write complex numbers to file complex_array = np.array([0 + 0j, 1 + 0j, 0 + 1j, 1 + 1j, 0.25 + 0.75j]) with Dataset("complex.nc", "w", auto_complex=True) as nc: nc.createDimension("x", size=len(complex_array)) var = nc.createVariable("data", "c16", ("x",)) var[:] = complex_array print(var) netcdf4-python-1.7.4rel/examples/writing_netCDF.ipynb000066400000000000000000001074141512661643000226360ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": { "internals": { "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "# Writing netCDF data\n", "\n", "**Important Note**: when running this notebook interactively in a browser, you probably will not be able to execute individual cells out of order without getting an error. Instead, choose \"Run All\" from the Cell menu after you modify a cell." ] }, { "cell_type": "code", "execution_count": 25, "metadata": { "collapsed": false, "internals": { "frag_number": 1, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [], "source": [ "import netCDF4 # Note: python is case-sensitive!\n", "import numpy as np" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 1, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Opening a file, creating a new Dataset\n", "\n", "Let's create a new, empty netCDF file named 'data/new.nc', opened for writing.\n", "\n", "Be careful, opening a file with 'w' will clobber any existing data (unless `clobber=False` is used, in which case an exception is raised if the file already exists).\n", "\n", "- `mode='r'` is the default.\n", "- `mode='a'` opens an existing file and allows for appending (does not clobber existing data)\n", "- `format` can be one of `NETCDF3_CLASSIC`, `NETCDF3_64BIT`, `NETCDF4_CLASSIC` or `NETCDF4` (default). `NETCDF4_CLASSIC` uses HDF5 for the underlying storage layer (as does `NETCDF4`) but enforces the classic netCDF 3 data model so data can be read with older clients. " ] }, { "cell_type": "code", "execution_count": 26, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 3, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "root group (NETCDF4_CLASSIC data model, file format HDF5):\n", " dimensions(sizes): \n", " variables(dimensions): \n", " groups: \n", "\n" ] } ], "source": [ "try: ncfile.close() # just to be safe, make sure dataset is not already open.\n", "except: pass\n", "ncfile = netCDF4.Dataset('data/new.nc',mode='w',format='NETCDF4_CLASSIC') \n", "print(ncfile)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 3, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Creating dimensions\n", "\n", "The **ncfile** object we created is a container for _dimensions_, _variables_, and _attributes_. First, let's create some dimensions using the [`createDimension`](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createDimension) method. \n", "\n", "- Every dimension has a name and a length. \n", "- The name is a string that is used to specify the dimension to be used when creating a variable, and as a key to access the dimension object in the `ncfile.dimensions` dictionary.\n", "\n", "Setting the dimension length to `0` or `None` makes it unlimited, so it can grow. \n", "\n", "- For `NETCDF4` files, any variable's dimension can be unlimited. \n", "- For `NETCDF4_CLASSIC` and `NETCDF3*` files, only one per variable can be unlimited, and it must be the leftmost (fastest varying) dimension." ] }, { "cell_type": "code", "execution_count": 27, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 5, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "('lat', : name = 'lat', size = 73\n", ")\n", "('lon', : name = 'lon', size = 144\n", ")\n", "('time', (unlimited): name = 'time', size = 0\n", ")\n" ] } ], "source": [ "lat_dim = ncfile.createDimension('lat', 73) # latitude axis\n", "lon_dim = ncfile.createDimension('lon', 144) # longitude axis\n", "time_dim = ncfile.createDimension('time', None) # unlimited axis (can be appended to).\n", "for dim in ncfile.dimensions.items():\n", " print(dim)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 5, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Creating attributes\n", "\n", "netCDF attributes can be created just like you would for any python object. \n", "\n", "- Best to adhere to established conventions (like the [CF](http://cfconventions.org/) conventions)\n", "- We won't try to adhere to any specific convention here though." ] }, { "cell_type": "code", "execution_count": 28, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 7 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "My model data\n" ] } ], "source": [ "ncfile.title='My model data'\n", "print(ncfile.title)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 8, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "source": [ "Try adding some more attributes..." ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 8, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Creating variables\n", "\n", "Now let's add some variables and store some data in them. \n", "\n", "- A variable has a name, a type, a shape, and some data values. \n", "- The shape of a variable is specified by a tuple of dimension names. \n", "- A variable should also have some named attributes, such as 'units', that describe the data.\n", "\n", "The [`createVariable`](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createVariable) method takes 3 mandatory args.\n", "\n", "- the 1st argument is the variable name (a string). This is used as the key to access the variable object from the `variables` dictionary.\n", "- the 2nd argument is the datatype (most numpy datatypes supported). \n", "- the third argument is a tuple containing the dimension names (the dimensions must be created first). Unless this is a `NETCDF4` file, any unlimited dimension must be the leftmost one.\n", "- there are lots of optional arguments (many of which are only relevant when `format='NETCDF4'`) to control compression, chunking, fill_value, etc.\n" ] }, { "cell_type": "code", "execution_count": 29, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 10, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "float64 temp(time, lat, lon)\n", " units: K\n", " standard_name: air_temperature\n", "unlimited dimensions: time\n", "current shape = (0, 73, 144)\n", "filling on, default _FillValue of 9.96920996839e+36 used\n", "\n" ] } ], "source": [ "# Define two variables with the same names as dimensions,\n", "# a conventional way to define \"coordinate variables\".\n", "lat = ncfile.createVariable('lat', np.float32, ('lat',))\n", "lat.units = 'degrees_north'\n", "lat.long_name = 'latitude'\n", "lon = ncfile.createVariable('lon', np.float32, ('lon',))\n", "lon.units = 'degrees_east'\n", "lon.long_name = 'longitude'\n", "time = ncfile.createVariable('time', np.float64, ('time',))\n", "time.units = 'hours since 1800-01-01'\n", "time.long_name = 'time'\n", "# Define a 3D variable to hold the data\n", "temp = ncfile.createVariable('temp',np.float64,('time','lat','lon')) # note: unlimited dimension is leftmost\n", "temp.units = 'K' # degrees Kelvin\n", "temp.standard_name = 'air_temperature' # this is a CF standard name\n", "print(temp)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 10, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Pre-defined variable attributes (read only)\n", "\n", "The netCDF4 module provides some useful pre-defined Python attributes for netCDF variables, such as dimensions, shape, dtype, ndim. \n", "\n", "Note: since no data has been written yet, the length of the 'time' dimension is 0." ] }, { "cell_type": "code", "execution_count": 30, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 12, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "-- Some pre-defined attributes for variable temp:\n", "('temp.dimensions:', (u'time', u'lat', u'lon'))\n", "('temp.shape:', (0, 73, 144))\n", "('temp.dtype:', dtype('float64'))\n", "('temp.ndim:', 3)\n" ] } ], "source": [ "print(\"-- Some pre-defined attributes for variable temp:\")\n", "print(\"temp.dimensions:\", temp.dimensions)\n", "print(\"temp.shape:\", temp.shape)\n", "print(\"temp.dtype:\", temp.dtype)\n", "print(\"temp.ndim:\", temp.ndim)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 12, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Writing data\n", "\n", "To write data to a netCDF variable object, just treat it like a numpy array and assign values to a slice." ] }, { "cell_type": "code", "execution_count": 31, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 14 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "('-- Wrote data, temp.shape is now ', (3, 73, 144))\n", "('-- Min/Max values:', 280.00283562143028, 329.99987991477548)\n" ] } ], "source": [ "nlats = len(lat_dim); nlons = len(lon_dim); ntimes = 3\n", "# Write latitudes, longitudes.\n", "# Note: the \":\" is necessary in these \"write\" statements\n", "lat[:] = -90. + (180./nlats)*np.arange(nlats) # south pole to north pole\n", "lon[:] = (180./nlats)*np.arange(nlons) # Greenwich meridian eastward\n", "# create a 3D array of random numbers\n", "data_arr = np.random.uniform(low=280,high=330,size=(ntimes,nlats,nlons))\n", "# Write the data. This writes the whole 3D netCDF variable all at once.\n", "temp[:,:,:] = data_arr # Appends data along unlimited dimension\n", "print(\"-- Wrote data, temp.shape is now \", temp.shape)\n", "# read data back from variable (by slicing it), print min and max\n", "print(\"-- Min/Max values:\", temp[:,:,:].min(), temp[:,:,:].max())" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 15, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "source": [ "- You can just treat a netCDF Variable object like a numpy array and assign values to it.\n", "- Variables automatically grow along unlimited dimensions (unlike numpy arrays)\n", "- The above writes the whole 3D variable all at once, but you can write it a slice at a time instead.\n", "\n", "Let's add another time slice....\n" ] }, { "cell_type": "code", "execution_count": 32, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 15, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "('-- Wrote more data, temp.shape is now ', (4, 73, 144))\n" ] } ], "source": [ "# create a 2D array of random numbers\n", "data_slice = np.random.uniform(low=280,high=330,size=(nlats,nlons))\n", "temp[3,:,:] = data_slice # Appends the 4th time slice\n", "print(\"-- Wrote more data, temp.shape is now \", temp.shape)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 17 }, "slideshow": { "slide_type": "fragment" } }, "source": [ "Note that we have not yet written any data to the time variable. It automatically grew as we appended data along the time dimension to the variable `temp`, but the data is missing." ] }, { "cell_type": "code", "execution_count": 33, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 18, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "float64 time(time)\n", " units: hours since 1800-01-01\n", " long_name: time\n", "unlimited dimensions: time\n", "current shape = (4,)\n", "filling on, default _FillValue of 9.96920996839e+36 used\n", "\n", "(, masked_array(data = [-- -- -- --],\n", " mask = [ True True True True],\n", " fill_value = 9.96920996839e+36)\n", ")\n" ] } ], "source": [ "print(time)\n", "times_arr = time[:]\n", "print(type(times_arr),times_arr) # dashes indicate masked values (where data has not yet been written)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 18, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "Let's add write some data into the time variable. \n", "\n", "- Given a set of datetime instances, use date2num to convert to numeric time values and then write that data to the variable." ] }, { "cell_type": "code", "execution_count": 34, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 20, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[datetime.datetime(2014, 10, 1, 0, 0), datetime.datetime(2014, 10, 2, 0, 0), datetime.datetime(2014, 10, 3, 0, 0), datetime.datetime(2014, 10, 4, 0, 0)]\n", "(array([ 1882440., 1882464., 1882488., 1882512.]), u'hours since 1800-01-01')\n", "[datetime.datetime(2014, 10, 1, 0, 0) datetime.datetime(2014, 10, 2, 0, 0)\n", " datetime.datetime(2014, 10, 3, 0, 0) datetime.datetime(2014, 10, 4, 0, 0)]\n" ] } ], "source": [ "from datetime import datetime\n", "from netCDF4 import date2num,num2date\n", "# 1st 4 days of October.\n", "dates = [datetime(2014,10,1,0),datetime(2014,10,2,0),datetime(2014,10,3,0),datetime(2014,10,4,0)]\n", "print(dates)\n", "times = date2num(dates, time.units)\n", "print(times, time.units) # numeric values\n", "time[:] = times\n", "# read time data back, convert to datetime instances, check values.\n", "print(num2date(time[:],time.units))" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 20, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Closing a netCDF file\n", "\n", "It's **important** to close a netCDF file you opened for writing:\n", "\n", "- flushes buffers to make sure all data gets written\n", "- releases memory resources used by open netCDF files" ] }, { "cell_type": "code", "execution_count": 35, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 22, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "root group (NETCDF4_CLASSIC data model, file format HDF5):\n", " title: My model data\n", " dimensions(sizes): lat(73), lon(144), time(4)\n", " variables(dimensions): float32 \u001b[4mlat\u001b[0m(lat), float32 \u001b[4mlon\u001b[0m(lon), float64 \u001b[4mtime\u001b[0m(time), float64 \u001b[4mtemp\u001b[0m(time,lat,lon)\n", " groups: \n", "\n", "Dataset is closed!\n" ] } ], "source": [ "# first print the Dataset object to see what we've got\n", "print(ncfile)\n", "# close the Dataset.\n", "ncfile.close(); print('Dataset is closed!')" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 22, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "# Advanced features\n", "\n", "So far we've only exercised features associated with the old netCDF version 3 data model. netCDF version 4 adds a lot of new functionality that comes with the more flexible HDF5 storage layer. \n", "\n", "Let's create a new file with `format='NETCDF4'` so we can try out some of these features." ] }, { "cell_type": "code", "execution_count": 36, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 25, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "root group (NETCDF4 data model, file format HDF5):\n", " dimensions(sizes): \n", " variables(dimensions): \n", " groups: \n", "\n" ] } ], "source": [ "ncfile = netCDF4.Dataset('data/new2.nc','w',format='NETCDF4')\n", "print(ncfile)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 25, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "## Creating Groups\n", "\n", "netCDF version 4 added support for organizing data in hierarchical groups.\n", "\n", "- analogous to directories in a filesystem. \n", "- Groups serve as containers for variables, dimensions and attributes, as well as other groups. \n", "- A `netCDF4.Dataset` creates a special group, called the 'root group', which is similar to the root directory in a unix filesystem. \n", "\n", "- groups are created using the [`createGroup`](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createGroup) method.\n", "- takes a single argument (a string, which is the name of the Group instance). This string is used as a key to access the group instances in the `groups` dictionary.\n", "\n", "Here we create two groups to hold data for two different model runs." ] }, { "cell_type": "code", "execution_count": 37, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 27, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "('model_run1', \n", "group /model_run1:\n", " dimensions(sizes): \n", " variables(dimensions): \n", " groups: \n", ")\n", "('model_run2', \n", "group /model_run2:\n", " dimensions(sizes): \n", " variables(dimensions): \n", " groups: \n", ")\n" ] } ], "source": [ "grp1 = ncfile.createGroup('model_run1')\n", "grp2 = ncfile.createGroup('model_run2')\n", "for grp in ncfile.groups.items():\n", " print(grp)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 27, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "Create some dimensions in the root group." ] }, { "cell_type": "code", "execution_count": 38, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 29 }, "slideshow": { "slide_type": "fragment" } }, "outputs": [], "source": [ "lat_dim = ncfile.createDimension('lat', 73) # latitude axis\n", "lon_dim = ncfile.createDimension('lon', 144) # longitude axis\n", "time_dim = ncfile.createDimension('time', None) # unlimited axis (can be appended to)." ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 30 }, "slideshow": { "slide_type": "fragment" } }, "source": [ "Now create a variable in grp1 and grp2. The library will search recursively upwards in the group tree to find the dimensions (which in this case are defined one level up).\n", "\n", "- These variables are create with **zlib compression**, another nifty feature of netCDF 4. \n", "- The data are automatically compressed when data is written to the file, and uncompressed when the data is read. \n", "- This can really save disk space, especially when used in conjunction with the [**least_significant_digit**](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createVariable) keyword argument, which causes the data to be quantized (truncated) before compression. This makes the compression lossy, but more efficient." ] }, { "cell_type": "code", "execution_count": 39, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 31, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "('model_run1', \n", "group /model_run1:\n", " dimensions(sizes): \n", " variables(dimensions): float64 \u001b[4mtemp\u001b[0m(time,lat,lon)\n", " groups: \n", ")\n", "('model_run2', \n", "group /model_run2:\n", " dimensions(sizes): \n", " variables(dimensions): float64 \u001b[4mtemp\u001b[0m(time,lat,lon)\n", " groups: \n", ")\n" ] } ], "source": [ "temp1 = grp1.createVariable('temp',np.float64,('time','lat','lon'),zlib=True)\n", "temp2 = grp2.createVariable('temp',np.float64,('time','lat','lon'),zlib=True)\n", "for grp in ncfile.groups.items(): # shows that each group now contains 1 variable\n", " print(grp)" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 31, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "##Creating a variable with a compound data type\n", "\n", "- Compound data types map directly to numpy structured (a.k.a 'record' arrays). \n", "- Structured arrays are akin to C structs, or derived types in Fortran. \n", "- They allow for the construction of table-like structures composed of combinations of other data types, including other compound types. \n", "- Might be useful for representing multiple parameter values at each point on a grid, or at each time and space location for scattered (point) data. \n", "\n", "Here we create a variable with a compound data type to represent complex data (there is no native complex data type in netCDF). \n", "\n", "- The compound data type is created with the [`createCompoundType`](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createCompoundType) method." ] }, { "cell_type": "code", "execution_count": 40, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 33, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "compound cmplx_var(time, lat, lon)\n", "compound data type: [('real', '\n", "vlen phony_vlen_var(time, lat, lon)\n", "vlen data type: int64\n", "path = /model_run2\n", "unlimited dimensions: time\n", "current shape = (1, 73, 144)\n", "\n", "('data =\\n', array([[[array([0, 4, 0, 9, 2, 2, 2, 4, 2]), array([7, 5, 4, 4, 9, 8, 0]),\n", " array([3, 6, 6, 8, 2, 7]), ..., array([5, 0, 0, 8, 8, 1, 5, 3]),\n", " array([4, 2, 7]), array([0])],\n", " [array([5, 6, 6, 6, 1, 0, 7]), array([7]),\n", " array([7, 5, 8, 9, 6, 9, 3]), ..., array([0, 6, 5, 4]),\n", " array([7, 1, 9, 7, 7, 2]), array([1, 4, 0])],\n", " [array([4, 3, 1]), array([6, 3, 9, 7, 8]), array([8]), ...,\n", " array([6, 5, 8, 0]), array([0]), array([0, 9, 6, 2, 4])],\n", " ..., \n", " [array([8, 4, 4]), array([4, 1, 6]), array([1, 4, 2, 3, 9]), ...,\n", " array([9, 1]), array([7, 2, 5, 1, 5, 8, 2]),\n", " array([2, 9, 9, 1, 4, 6, 3, 5, 2])],\n", " [array([4, 7, 9, 8, 2, 3, 6, 6]),\n", " array([1, 4, 1, 6, 1, 1, 2, 3, 9]),\n", " array([9, 5, 6, 2, 4, 3, 8, 2, 9]), ..., array([9, 5, 7]),\n", " array([3, 9]), array([4, 2, 6, 9])],\n", " [array([8, 9, 9, 2, 2, 8, 8, 5]), array([3]),\n", " array([8, 8, 0, 2, 9, 2, 3, 0, 9]), ..., array([7]),\n", " array([5, 1, 0, 6, 8, 6]), array([8, 6, 3, 6, 9, 8, 4, 2, 5])]]], dtype=object))\n" ] } ], "source": [ "vlen_data = np.empty((nlats,nlons),object)\n", "for i in range(nlons):\n", " for j in range(nlats):\n", " size = np.random.randint(1,10,size=1) # random length of sequence\n", " vlen_data[j,i] = np.random.randint(0,10,size=size)# generate random sequence\n", "vlvar[0] = vlen_data # append along unlimited dimension (time)\n", "print(vlvar)\n", "print('data =\\n',vlvar[:])" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 39, "slide_type": "subslide" }, "slideshow": { "slide_type": "slide" } }, "source": [ "Close the Dataset and examine the contents with ncdump." ] }, { "cell_type": "code", "execution_count": 44, "metadata": { "collapsed": false, "internals": { "frag_helper": "fragment_end", "frag_number": 41, "slide_helper": "subslide_end" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "fragment" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "netcdf new2 {\r\n", "types:\r\n", " compound complex128 {\r\n", " double real ;\r\n", " double imag ;\r\n", " }; // complex128\r\n", " int64(*) phony_vlen ;\r\n", "dimensions:\r\n", "\tlat = 73 ;\r\n", "\tlon = 144 ;\r\n", "\ttime = UNLIMITED ; // (1 currently)\r\n", "\r\n", "group: model_run1 {\r\n", " variables:\r\n", " \tdouble temp(time, lat, lon) ;\r\n", " \tcomplex128 cmplx_var(time, lat, lon) ;\r\n", " } // group model_run1\r\n", "\r\n", "group: model_run2 {\r\n", " variables:\r\n", " \tdouble temp(time, lat, lon) ;\r\n", " \tphony_vlen phony_vlen_var(time, lat, lon) ;\r\n", " } // group model_run2\r\n", "}\r\n" ] } ], "source": [ "ncfile.close()\n", "!ncdump -h data/new2.nc" ] }, { "cell_type": "markdown", "metadata": { "internals": { "frag_helper": "fragment_end", "frag_number": 41, "slide_helper": "subslide_end", "slide_type": "subslide" }, "slide_helper": "slide_end", "slideshow": { "slide_type": "slide" } }, "source": [ "##Other interesting and useful projects using netcdf4-python\n", "\n", "- [xarray](https://xarray.pydata.org/en/stable/): N-dimensional variant of the core [pandas](https://pandas.pydata.org) data structure that can operate on netcdf variables.\n", "- [Iris](https://scitools.org.uk/iris/docs/latest/): a data model to create a data abstraction layer which isolates analysis and visualisation code from data format specifics. Uses netcdf4-python to access netcdf data (can also handle GRIB).\n", "- [Dask](https://dask.org/): Virtual large arrays (from netcdf variables) with lazy evaluation.\n", "- [cf-python](https://cfpython.bitbucket.io/): Implements the [CF](http://cfconventions.org) data model for the reading, writing and processing of data and metadata. " ] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.9" } }, "nbformat": 4, "nbformat_minor": 0 } netcdf4-python-1.7.4rel/external/000077500000000000000000000000001512661643000167225ustar00rootroot00000000000000netcdf4-python-1.7.4rel/external/README000066400000000000000000000001771512661643000176070ustar00rootroot00000000000000* 20240616: remove submodule, include v0.2.0 tag source files (https://github.com/PlasmaFAIR/nc-complex/releases/tag/v0.2.0). netcdf4-python-1.7.4rel/external/nc_complex/000077500000000000000000000000001512661643000210515ustar00rootroot00000000000000netcdf4-python-1.7.4rel/external/nc_complex/include/000077500000000000000000000000001512661643000224745ustar00rootroot00000000000000netcdf4-python-1.7.4rel/external/nc_complex/include/generated_fallbacks/000077500000000000000000000000001512661643000264345ustar00rootroot00000000000000netcdf4-python-1.7.4rel/external/nc_complex/include/generated_fallbacks/nc_complex_version.h000066400000000000000000000002751512661643000325050ustar00rootroot00000000000000#define NC_COMPLEX_GIT_SHA1 "37310ed00f3910974bdefefcdfa4787588651f59" #define NC_COMPLEX_GIT_VERSION "v0.2.0" #define NC_COMPLEX_GIT_STATE "clean" #define NC_COMPLEX_GIT_DATE "2023-12-08" netcdf4-python-1.7.4rel/external/nc_complex/include/nc_complex/000077500000000000000000000000001512661643000246235ustar00rootroot00000000000000netcdf4-python-1.7.4rel/external/nc_complex/include/nc_complex/nc_complex.h000066400000000000000000000212161512661643000271250ustar00rootroot00000000000000/// nc-complex: A lightweight, drop-in extension for complex number support in /// netCDF /// /// Copyright (C) 2023 Peter Hill /// /// SPDX-License-Identifier: MIT #ifndef PLASMA_FAIR_NC_COMPLEX #define PLASMA_FAIR_NC_COMPLEX // This header is required when building as a DLL on Windows and is // automatically generated by CMake. If you're not using CMake (and // not on Windows) for some reason, then define `NC_COMPLEX_NO_EXPORT` // to skip this. #ifndef NC_COMPLEX_NO_EXPORT #include "nc_complex/nc_complex_export.h" #else #define NC_COMPLEX_EXPORT #endif #include #include #include #include #ifdef __cplusplus #include #endif //@{ /// Portable typedefs for complex numbers /// /// These become aliases for `std::complex` with C++. #ifdef _MSC_VER typedef _Dcomplex double_complex; typedef _Fcomplex float_complex; #else #if defined(__cplusplus) && defined(__clang__) using double_complex = std::complex; using float_complex = std::complex; #else typedef double _Complex double_complex; typedef float _Complex float_complex; #endif #endif //@} #ifdef __cplusplus /// @name Helper functions ///@{ /// Helper functions for converting between (pointers to) C++ and C complex types NC_COMPLEX_EXPORT inline double_complex* cpp_to_c_complex(std::complex* data) { return reinterpret_cast(data); } NC_COMPLEX_EXPORT inline std::complex* c_to_cpp_complex(double_complex* data) { return reinterpret_cast*>(data); } NC_COMPLEX_EXPORT inline float_complex* cpp_to_c_complex(std::complex* data) { return reinterpret_cast(data); } NC_COMPLEX_EXPORT inline std::complex* c_to_cpp_complex(float_complex* data) { return reinterpret_cast*>(data); } ///@} extern "C" { #endif /// @name Complex datatype defines /// Datatype for complex numbers, for use with \rstref{pfnc_def_var} /// /// @note /// These *only* work when defining a variable with \rstref{pfnc_def_var}. To /// check the type of an existing variable use \rstref{pfnc_var_is_complex}, and /// to check if it is specifically using a compound datatype or a dimension use /// \rstref{pfnc_var_is_complex_type} or \rstref{pfnc_var_has_complex_dimension} /// respectively. /// @endnote ///@{ /// Uses complex compound datatype with netCDF4 format, and complex dimension otherwise #define PFNC_FLOAT_COMPLEX (NC_FIRSTUSERTYPEID - 4) /// Always use a complex dimension, regardless of file format #define PFNC_FLOAT_COMPLEX_DIM (NC_FIRSTUSERTYPEID - 3) /// Uses complex compound datatype with netCDF4 format, and complex dimension otherwise #define PFNC_DOUBLE_COMPLEX (NC_FIRSTUSERTYPEID - 2) /// Always use a complex dimension, regardless of file format #define PFNC_DOUBLE_COMPLEX_DIM (NC_FIRSTUSERTYPEID - 1) ///@} /// Return true if variable is complex NC_COMPLEX_EXPORT bool pfnc_var_is_complex(int ncid, int varid); /// Return true if variable is complex and uses a compound datatype NC_COMPLEX_EXPORT bool pfnc_var_is_complex_type(int ncid, int varid); /// Return true if variable is complex and has a complex dimension /// (assumed to be the last dimension) NC_COMPLEX_EXPORT bool pfnc_var_has_complex_dimension(int ncid, int varid); /// Return true if dimension is complex NC_COMPLEX_EXPORT bool pfnc_is_complex_dim(int ncid, int dim_id); /// Get the ID for the complex datatype with `double` elements, creating it if it doesn't already exist NC_COMPLEX_EXPORT int pfnc_get_double_complex_typeid(int ncid, nc_type* nc_typeid); /// Get the ID for the complex datatype with `float` elements, creating it if it doesn't already exist NC_COMPLEX_EXPORT int pfnc_get_float_complex_typeid(int ncid, nc_type* nc_typeid); /// Get complex dimension, creating one if it doesn't already exist NC_COMPLEX_EXPORT int pfnc_get_complex_dim(int ncid, int* nc_dim); /// Get the base numerical type of a complex type /// /// Returns the type of the components for a compound type, or the /// type of an element for a dimension type. NC_COMPLEX_EXPORT int pfnc_complex_base_type( int ncid, nc_type nc_typeid, nc_type* base_type_id ); /// Get the base numerical type of a complex variable NC_COMPLEX_EXPORT int pfnc_inq_var_complex_base_type( int ncid, int varid, nc_type* nc_typeid ); /// Return some information about the `nc-complex` library NC_COMPLEX_EXPORT const char* pfnc_inq_libvers(void); /// @name Wrappers /// Wrappers for the equivalent `nc_*` functions that correctly handle /// the start/count/stride arrays for complex dimensions. /// /// When the variable is stored using a complex dimension, the file /// representation has one more dimension than the user-visible /// in-memory representation. For example, a 1D array: /// /// ```c /// double_complex data[5]; /// ``` /// /// would be represented in the file with two dimensions (when not /// using a compound datatype!), and so if we use the standard netCDF /// API we would need to use `{5, 2}` for the `countp` arguments, for /// example, while using nc-complex, we only need `{5}`. /// /// NOTE: The `pfnc_put/get*` functions do *not* currently handle /// conversion between `float/double` base types ///@{ /// Extension to `nc_def_var` that also accepts /// \rstref{PFNC_FLOAT_COMPLEX}, \rstref{PFNC_FLOAT_COMPLEX_DIM}, /// \rstref{PFNC_DOUBLE_COMPLEX}, and \rstref{PFNC_DOUBLE_COMPLEX_DIM} NC_COMPLEX_EXPORT int pfnc_def_var( int ncid, const char* name, nc_type xtype, int ndims, const int* dimidsp, int* varidp ); NC_COMPLEX_EXPORT int pfnc_put_vara_double_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const double_complex* op ); NC_COMPLEX_EXPORT int pfnc_get_vara_double_complex( int ncid, int varid, const size_t* startp, const size_t* countp, double_complex* ip ); NC_COMPLEX_EXPORT int pfnc_put_vars_double_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, const double_complex* op ); NC_COMPLEX_EXPORT int pfnc_get_vars_double_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, double_complex* ip ); NC_COMPLEX_EXPORT int pfnc_put_var1_double_complex( int ncid, int varid, const size_t* indexp, const double_complex* data ); NC_COMPLEX_EXPORT int pfnc_get_var1_double_complex( int ncid, int varid, const size_t* indexp, double_complex* data ); NC_COMPLEX_EXPORT int pfnc_put_vara_float_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const float_complex* op ); NC_COMPLEX_EXPORT int pfnc_get_vara_float_complex( int ncid, int varid, const size_t* startp, const size_t* countp, float_complex* ip ); NC_COMPLEX_EXPORT int pfnc_put_vars_float_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, const float_complex* op ); NC_COMPLEX_EXPORT int pfnc_get_vars_float_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, float_complex* ip ); NC_COMPLEX_EXPORT int pfnc_put_var1_float_complex( int ncid, int varid, const size_t* indexp, const float_complex* data ); NC_COMPLEX_EXPORT int pfnc_get_var1_float_complex( int ncid, int varid, const size_t* indexp, float_complex* data ); NC_COMPLEX_EXPORT int pfnc_inq_var( int ncid, int varid, char* name, nc_type* xtypep, int* ndimsp, int* dimidsp, int* nattsp ); // NOLINTBEGIN(modernize-use-nullptr) NC_COMPLEX_EXPORT inline int pfnc_inq_varndims(int ncid, int varid, int* ndimsp) { return pfnc_inq_var(ncid, varid, NULL, NULL, ndimsp, NULL, NULL); } NC_COMPLEX_EXPORT inline int pfnc_inq_vardimid(int ncid, int varid, int* dimidsp) { return pfnc_inq_var(ncid, varid, NULL, NULL, NULL, dimidsp, NULL); } // NOLINTEND(modernize-use-nullptr) NC_COMPLEX_EXPORT int pfnc_def_var_chunking( int ncid, int varid, int storage, const size_t* chunksizesp ); NC_COMPLEX_EXPORT int pfnc_inq_var_chunking( int ncid, int varid, int* storagep, size_t* chunksizesp ); NC_COMPLEX_EXPORT int pfnc_get_vara( int ncid, int varid, const size_t* startp, const size_t* countp, void* ip ); NC_COMPLEX_EXPORT int pfnc_get_vars( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, void* ip ); NC_COMPLEX_EXPORT int pfnc_put_vara( int ncid, int varid, const size_t* startp, const size_t* countp, const void* op ); NC_COMPLEX_EXPORT int pfnc_put_vars( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, const void* op ); ///@} #ifdef __cplusplus } #endif #endif netcdf4-python-1.7.4rel/external/nc_complex/src/000077500000000000000000000000001512661643000216405ustar00rootroot00000000000000netcdf4-python-1.7.4rel/external/nc_complex/src/nc_complex.c000066400000000000000000000627671512661643000241550ustar00rootroot00000000000000#include "nc_complex/nc_complex.h" #include #include #include #include #include #include #include "nc_complex_version.h" // to enable compilation with older versions of netcdf-c #ifndef NC_FORMATX_NCZARR #define NC_FORMATX_NCZARR (10) #endif // NOLINTBEGIN(bugprone-assignment-in-if-condition) #define CHECK(func) \ do { \ int res; \ if ((res = (func))) { \ return res; \ } \ } while (0) // NOLINTEND(bugprone-assignment-in-if-condition) // Vector of ones for get/put_var1 functions static const size_t coord_one[NC_MAX_VAR_DIMS] = {1}; static const char* double_complex_struct_name = "_PFNC_DOUBLE_COMPLEX_TYPE"; static const char* float_complex_struct_name = "_PFNC_FLOAT_COMPLEX_TYPE"; #define COMPLEX_DIM_NAME "_pfnc_complex" static const char* complex_dim_name = COMPLEX_DIM_NAME; static const char* known_dim_names[] = {COMPLEX_DIM_NAME, "complex", "ri"}; static const size_t num_known_dim_names = sizeof(known_dim_names) / sizeof(known_dim_names[0]); static const char pfnc_libvers[] = NC_COMPLEX_GIT_VERSION; const char* pfnc_inq_libvers(void) { return pfnc_libvers; } bool pfnc_var_is_complex(int ncid, int varid) { return pfnc_var_is_complex_type(ncid, varid) || pfnc_var_has_complex_dimension(ncid, varid); } int pfnc_complex_base_type(int ncid, nc_type nc_typeid, nc_type* base_type_id) { if (nc_typeid < NC_MAX_ATOMIC_TYPE) { *base_type_id = nc_typeid; return NC_NOERR; } // TODO: This should probably handle vlens too return nc_inq_compound_field( ncid, nc_typeid, 0, NULL, NULL, base_type_id, NULL, NULL ); } int pfnc_inq_var_complex_base_type(int ncid, int varid, nc_type* nc_typeid) { nc_type var_type_id; CHECK(nc_inq_vartype(ncid, varid, &var_type_id)); return pfnc_complex_base_type(ncid, var_type_id, nc_typeid); } /// Return true if a compound type is compatible with a known convention bool compound_type_is_compatible(int ncid, nc_type nc_typeid) { // Does the name matching a known convention? char name[NC_MAX_NAME + 1]; nc_inq_compound_name(ncid, nc_typeid, name); if (name == double_complex_struct_name) { return true; } // Does it have exactly two fields? size_t num_fields; nc_inq_compound_nfields(ncid, nc_typeid, &num_fields); if (num_fields != 2) { return false; } // As far as I can tell, all conventions put the real part first and // the imaginary part second. I'm pretty sure all native language // types are also this way round. That means we don't have to worry // about trying both combinations! char real_name[NC_MAX_NAME + 1]; size_t real_offset; nc_type real_field_type; int real_rank; nc_inq_compound_field( ncid, nc_typeid, 0, real_name, &real_offset, &real_field_type, &real_rank, NULL ); // If it's not a floating type, we're not interested if (!(real_field_type == NC_FLOAT || real_field_type == NC_DOUBLE)) { return false; } // Also needs to be scalar if (real_rank != 0) { return false; } // Now check names. For now, just check it starts with "r", in any case if (tolower(real_name[0]) != 'r') { return false; } char imag_name[NC_MAX_NAME + 1]; size_t imag_offset; nc_type imag_field_type; int imag_rank; nc_inq_compound_field( ncid, nc_typeid, 1, imag_name, &imag_offset, &imag_field_type, &imag_rank, NULL ); // Both component types better match if (imag_field_type != real_field_type) { return false; } if (imag_rank != 0) { return false; } if (tolower(imag_name[0]) != 'i') { return false; } return true; } /// Return true if file already has a complex type with the given base type bool file_has_complex_struct(int ncid, nc_type* typeidp, nc_type base_type) { // Simplest case, check for our type name int err = nc_inq_typeid(ncid, double_complex_struct_name, typeidp); if (err == NC_NOERR) { return true; } int ntypes; err = nc_inq_typeids(ncid, &ntypes, NULL); if (err != NC_NOERR) { return false; } bool result = false; nc_type* typeids = malloc((size_t)ntypes * sizeof(nc_type)); err = nc_inq_typeids(ncid, NULL, typeids); if (err != NC_NOERR) { goto cleanup; } for (size_t i = 0; i < (size_t)ntypes; i++) { if (compound_type_is_compatible(ncid, typeids[i])) { nc_type base_type_id; err = pfnc_complex_base_type(ncid, typeids[i], &base_type_id); if (err != NC_NOERR) { goto cleanup; } if (base_type_id == base_type) { *typeidp = typeids[i]; result = true; goto cleanup; } } } cleanup: free(typeids); return result; } /// Return true if a given dimension matches a known convention bool pfnc_is_complex_dim(int ncid, int dim_id) { size_t length; nc_inq_dimlen(ncid, dim_id, &length); // Definitely can only be exactly two. Note that we can't catch // unlimited dimensions that only have two records so far. if (length != 2) { return false; } // Not sure if this is the best way, but here we are. char name[NC_MAX_NAME + 1]; nc_inq_dimname(ncid, dim_id, name); const size_t name_length = strlen(name); // Check against known names of complex dimensions for (size_t i = 0; i < num_known_dim_names; i++) { if (strncmp(name, known_dim_names[i], name_length) == 0) { return true; } } return false; } /// Return true if a variable uses the dimension-convention bool pfnc_var_has_complex_dimension(int ncid, int nc_varid) { int num_dims; nc_inq_varndims(ncid, nc_varid, &num_dims); int* dim_ids = (int*)malloc((size_t)num_dims * sizeof(int)); nc_inq_vardimid(ncid, nc_varid, dim_ids); // Now we check if any of the dimensions match one of our known // conventions. Do we need to check all of them, or just the // first/last? for (int i = 0; i < num_dims; i++) { if (pfnc_is_complex_dim(ncid, dim_ids[i])) { free(dim_ids); return true; } } free(dim_ids); return false; } /// Return true if a netCDF datatype is a compound type bool is_compound_type(int ncid, int type_id) { // There appears to be no API for detecting whether a type ID is a // primitive type, so we have to check ourselves if (type_id <= NC_MAX_ATOMIC_TYPE) { return false; } int class_type; nc_inq_user_type(ncid, type_id, NULL, NULL, NULL, NULL, &class_type); return class_type == NC_COMPOUND; } /// Copy an array meant for a complex-dimensioned variable size_t* copy_complex_dim_size_t_array( const size_t* old_array, int numdims, size_t complex_dim_value ) { size_t* new_buffer = NULL; if (old_array != NULL) { new_buffer = (size_t*)malloc(sizeof(size_t) * (size_t)numdims); size_t last_dim = (size_t)(numdims - 1); for (size_t i = 0; i < last_dim; i++) { new_buffer[i] = old_array[i]; } new_buffer[last_dim] = complex_dim_value; } return new_buffer; } ptrdiff_t* copy_complex_dim_ptrdiff_t_array( const ptrdiff_t* old_array, int numdims, ptrdiff_t complex_dim_value ) { ptrdiff_t* new_buffer = NULL; if (old_array != NULL) { new_buffer = (ptrdiff_t*)malloc(sizeof(ptrdiff_t) * (size_t)numdims); size_t last_dim = (size_t)(numdims - 1); for (size_t i = 0; i < last_dim; i++) { new_buffer[i] = old_array[i]; } new_buffer[last_dim] = complex_dim_value; } return new_buffer; } bool pfnc_var_is_complex_type(int ncid, int varid) { nc_type var_type_id; if (nc_inq_vartype(ncid, varid, &var_type_id)) { return false; } if (is_compound_type(ncid, var_type_id)) { return compound_type_is_compatible(ncid, var_type_id); } return false; } size_t complex_type_size(nc_type base_type) { switch (base_type) { case NC_FLOAT: return sizeof(float_complex); case NC_DOUBLE: return sizeof(double_complex); default: return 0; } } size_t base_type_size(nc_type base_type) { switch (base_type) { case NC_FLOAT: return sizeof(float); case NC_DOUBLE: return sizeof(double); default: return 0; } } int get_or_make_complex_struct( int ncid, nc_type* nc_typeid, nc_type base_type, const char* struct_name ) { // TODO: Error if not netCDF4 if (file_has_complex_struct(ncid, nc_typeid, base_type)) { return NC_NOERR; } const size_t complex_size = complex_type_size(base_type); if (complex_size == 0) { return NC_EBADTYPE; } const size_t base_size = base_type_size(base_type); if (base_size == 0) { return NC_EBADTYPE; } CHECK(nc_def_compound(ncid, complex_size, struct_name, nc_typeid)); CHECK(nc_insert_compound(ncid, *nc_typeid, "r", 0, base_type)); CHECK(nc_insert_compound(ncid, *nc_typeid, "i", base_size, base_type)); return NC_NOERR; } int pfnc_get_double_complex_typeid(int ncid, nc_type* nc_typeid) { return get_or_make_complex_struct( ncid, nc_typeid, NC_DOUBLE, double_complex_struct_name ); } int pfnc_get_float_complex_typeid(int ncid, nc_type* nc_typeid) { return get_or_make_complex_struct( ncid, nc_typeid, NC_FLOAT, float_complex_struct_name ); } int pfnc_get_complex_dim(int ncid, int* nc_dim) { int num_dims; CHECK(nc_inq_ndims(ncid, &num_dims)); int* dim_ids = (int*)malloc((size_t)num_dims * sizeof(int)); int ierr = nc_inq_dimids(ncid, NULL, dim_ids, true); if (ierr != NC_NOERR) { goto cleanup; } // Now we check if any of the dimensions match one of our known // conventions. Do we need to check all of them, or just the // first/last? for (int i = 0; i < num_dims; i++) { if (pfnc_is_complex_dim(ncid, dim_ids[i])) { *nc_dim = dim_ids[i]; goto cleanup; } } ierr = nc_def_dim(ncid, complex_dim_name, 2, nc_dim); cleanup: free(dim_ids); return ierr; } int pfnc_put_vara_double_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const double_complex* op ) { return pfnc_put_vars_double_complex(ncid, varid, startp, countp, NULL, op); } int pfnc_get_vara_double_complex( int ncid, int varid, const size_t* startp, const size_t* countp, double_complex* ip ) { return pfnc_get_vars_double_complex(ncid, varid, startp, countp, NULL, ip); } int pfnc_put_vars_double_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, const double_complex* op ) { if (!pfnc_var_is_complex(ncid, varid)) { return NC_EBADTYPE; } // TODO: handle converting different float sizes // Check if we can get away without fudging count/start sizes if (((startp == NULL) && (countp == NULL) && (stridep == NULL)) || !pfnc_var_has_complex_dimension(ncid, varid)) { return nc_put_vars(ncid, varid, startp, countp, stridep, op); } // The real variable has a complex dimension, but we're pretending // it doesn't, so now we need start/count arrays of the real size int numdims = 0; CHECK(nc_inq_varndims(ncid, varid, &numdims)); // Copy start/count buffers, appending an extra element for the // complex dimension. This dimension starts at 0 and has 2 elements size_t* start_buffer = copy_complex_dim_size_t_array(startp, numdims, 0); size_t* count_buffer = copy_complex_dim_size_t_array(countp, numdims, 2); ptrdiff_t* stride_buffer = copy_complex_dim_ptrdiff_t_array(stridep, numdims, 1); const int ierr = nc_put_vars(ncid, varid, start_buffer, count_buffer, stride_buffer, op); if (start_buffer != NULL) { free(start_buffer); } if (count_buffer != NULL) { free(count_buffer); } if (stride_buffer != NULL) { free(stride_buffer); } return ierr; } int pfnc_get_vars_double_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, double_complex* ip ) { if (!pfnc_var_is_complex(ncid, varid)) { return NC_EBADTYPE; } // TODO: handle converting different float sizes // Check if we can get away without fudging count/start sizes if (((startp == NULL) && (countp == NULL) && (stridep == NULL)) || !pfnc_var_has_complex_dimension(ncid, varid)) { return nc_get_vars(ncid, varid, startp, countp, stridep, ip); } // The real variable has a complex dimension, but we're pretending // it doesn't, so now we need start/count arrays of the real size int numdims = 0; CHECK(nc_inq_varndims(ncid, varid, &numdims)); // Copy start/count buffers, appending an extra element for the // complex dimension. This dimension starts at 0 and has 2 elements size_t* start_buffer = copy_complex_dim_size_t_array(startp, numdims, 0); size_t* count_buffer = copy_complex_dim_size_t_array(countp, numdims, 2); ptrdiff_t* stride_buffer = copy_complex_dim_ptrdiff_t_array(stridep, numdims, 1); const int ierr = nc_get_vars(ncid, varid, start_buffer, count_buffer, stride_buffer, ip); if (start_buffer != NULL) { free(start_buffer); } if (count_buffer != NULL) { free(count_buffer); } if (stride_buffer != NULL) { free(stride_buffer); } return ierr; } int pfnc_put_var1_double_complex( int ncid, int varid, const size_t* indexp, const double_complex* data ) { return pfnc_put_vara_double_complex(ncid, varid, indexp, coord_one, data); } int pfnc_get_var1_double_complex( int ncid, int varid, const size_t* indexp, double_complex* data ) { return pfnc_get_vara_double_complex(ncid, varid, indexp, coord_one, data); } int pfnc_put_vara_float_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const float_complex* op ) { return pfnc_put_vars_float_complex(ncid, varid, startp, countp, NULL, op); } int pfnc_get_vara_float_complex( int ncid, int varid, const size_t* startp, const size_t* countp, float_complex* ip ) { return pfnc_get_vars_float_complex(ncid, varid, startp, countp, NULL, ip); } int pfnc_put_vars_float_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, const float_complex* op ) { if (!pfnc_var_is_complex(ncid, varid)) { return NC_EBADTYPE; } // TODO: handle converting different float sizes // Check if we can get away without fudging count/start sizes if (((startp == NULL) && (countp == NULL) && (stridep == NULL)) || !pfnc_var_has_complex_dimension(ncid, varid)) { return nc_put_vars(ncid, varid, startp, countp, stridep, op); } // The real variable has a complex dimension, but we're pretending // it doesn't, so now we need start/count arrays of the real size int numdims = 0; CHECK(nc_inq_varndims(ncid, varid, &numdims)); // Copy start/count buffers, appending an extra element for the // complex dimension. This dimension starts at 0 and has 2 elements size_t* start_buffer = copy_complex_dim_size_t_array(startp, numdims, 0); size_t* count_buffer = copy_complex_dim_size_t_array(countp, numdims, 2); ptrdiff_t* stride_buffer = copy_complex_dim_ptrdiff_t_array(stridep, numdims, 1); const int ierr = nc_put_vars(ncid, varid, start_buffer, count_buffer, stride_buffer, op); if (start_buffer != NULL) { free(start_buffer); } if (count_buffer != NULL) { free(count_buffer); } if (stride_buffer != NULL) { free(stride_buffer); } return ierr; } int pfnc_get_vars_float_complex( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, float_complex* ip ) { if (!pfnc_var_is_complex(ncid, varid)) { return NC_EBADTYPE; } // TODO: handle converting different float sizes // Check if we can get away without fudging count/start sizes if (((startp == NULL) && (countp == NULL) && (stridep == NULL)) || !pfnc_var_has_complex_dimension(ncid, varid)) { return nc_get_vars(ncid, varid, startp, countp, stridep, ip); } // The real variable has a complex dimension, but we're pretending // it doesn't, so now we need start/count arrays of the real size int numdims = 0; CHECK(nc_inq_varndims(ncid, varid, &numdims)); // Copy start/count buffers, appending an extra element for the // complex dimension. This dimension starts at 0 and has 2 elements size_t* start_buffer = copy_complex_dim_size_t_array(startp, numdims, 0); size_t* count_buffer = copy_complex_dim_size_t_array(countp, numdims, 2); ptrdiff_t* stride_buffer = copy_complex_dim_ptrdiff_t_array(stridep, numdims, 1); const int ierr = nc_get_vars(ncid, varid, start_buffer, count_buffer, stride_buffer, ip); if (start_buffer != NULL) { free(start_buffer); } if (count_buffer != NULL) { free(count_buffer); } if (stride_buffer != NULL) { free(stride_buffer); } return ierr; } int pfnc_put_var1_float_complex( int ncid, int varid, const size_t* indexp, const float_complex* data ) { return pfnc_put_vara_float_complex(ncid, varid, indexp, coord_one, data); } int pfnc_get_var1_float_complex( int ncid, int varid, const size_t* indexp, float_complex* data ) { return pfnc_get_vara_float_complex(ncid, varid, indexp, coord_one, data); } int pfnc_def_var( int ncid, const char* name, nc_type xtype, int ndims, const int* dimidsp, int* varidp ) { // If it's not a complex number, we don't need to do anything if (!(xtype == PFNC_DOUBLE_COMPLEX || xtype == PFNC_DOUBLE_COMPLEX_DIM || xtype == PFNC_FLOAT_COMPLEX || xtype == PFNC_FLOAT_COMPLEX_DIM)) { return nc_def_var(ncid, name, xtype, ndims, dimidsp, varidp); } const bool base_is_double = (xtype == PFNC_DOUBLE_COMPLEX || xtype == PFNC_DOUBLE_COMPLEX_DIM); // Check the format used by this file. If it's some variation on the // classic model, then we have to use a complex dimension. Also, // NcZarr, for some reason doesn't support compound types (yet?). // I _think_ DAP supports compound types int format = 0; int mode = 0; CHECK(nc_inq_format_extended(ncid, &format, &mode)); if ((format == NC_FORMAT_CLASSIC || format == NC_FORMAT_NETCDF4_CLASSIC) || (mode == NC_FORMATX_NCZARR)) { xtype = base_is_double ? PFNC_DOUBLE_COMPLEX_DIM : PFNC_FLOAT_COMPLEX_DIM; } if (xtype == PFNC_DOUBLE_COMPLEX_DIM || xtype == PFNC_FLOAT_COMPLEX_DIM) { // Using a complex dimension. We need to get the complex dimension // used in this file and append it to the list of dimensions // passed in by the user int complex_dim = 0; CHECK(pfnc_get_complex_dim(ncid, &complex_dim)); int new_dims = ndims + 1; int* dim_ids_buffer = (int*)malloc((size_t)new_dims * sizeof(int)); for (size_t i = 0; i < (size_t)ndims; i++) { dim_ids_buffer[i] = dimidsp[i]; } dim_ids_buffer[ndims] = complex_dim; const nc_type base_type = base_is_double ? NC_DOUBLE : NC_FLOAT; const int ierr = nc_def_var(ncid, name, base_type, new_dims, dim_ids_buffer, varidp); free(dim_ids_buffer); return ierr; } // Using a complex type. We need to get the complex type used in // this file and pass that as `xtype` nc_type complex_type = 0; if (base_is_double) { CHECK(pfnc_get_double_complex_typeid(ncid, &complex_type)); } else { CHECK(pfnc_get_float_complex_typeid(ncid, &complex_type)); } return nc_def_var(ncid, name, complex_type, ndims, dimidsp, varidp); } int pfnc_inq_var( int ncid, int varid, char* name, nc_type* xtypep, int* ndimsp, int* dimidsp, int* nattsp ) { if (!pfnc_var_has_complex_dimension(ncid, varid)) { return nc_inq_var(ncid, varid, name, xtypep, ndimsp, dimidsp, nattsp); } // Tricky bit: if variable has complex dimension, and user used // pfnc_inq_varndims, then dimidsp is one smaller than netCDF thinks // it should be. So we'll have to allocate our own array of the // correct size and copy out of that. // This buffer will point to either the user's array, or our own one int* buffer = dimidsp; int numdims = 0; if (dimidsp != NULL) { CHECK(nc_inq_varndims(ncid, varid, &numdims)); buffer = (int*)malloc(sizeof(int) * (size_t)numdims); } int ierr = nc_inq_var(ncid, varid, name, xtypep, &numdims, buffer, nattsp); if (ierr != NC_NOERR) { goto cleanup; } if (dimidsp != NULL) { if (numdims <= 0) { // This should never happen goto cleanup; } const size_t other_dims = (size_t)(numdims - 1); for (size_t i = 0; i < other_dims; i++) { dimidsp[i] = buffer[i]; } } if (ndimsp != NULL) { *ndimsp = numdims - 1; } cleanup: free(buffer); return ierr; } int pfnc_def_var_chunking(int ncid, int varid, int storage, const size_t* chunksizesp) { if (chunksizesp == NULL || !pfnc_var_has_complex_dimension(ncid, varid)) { return nc_def_var_chunking(ncid, varid, storage, chunksizesp); } // The real variable has a complex dimension, but we're pretending // it doesn't, so now we need start/count arrays of the real size int numdims = 0; CHECK(nc_inq_varndims(ncid, varid, &numdims)); // Copy chunksize buffer, appending an extra element for the // complex dimension size_t* chunk_buffer = copy_complex_dim_size_t_array(chunksizesp, numdims, 2); const int ierr = nc_def_var_chunking(ncid, varid, storage, chunk_buffer); free(chunk_buffer); return ierr; } int pfnc_inq_var_chunking(int ncid, int varid, int* storagep, size_t* chunksizesp) { if (chunksizesp == NULL || !pfnc_var_has_complex_dimension(ncid, varid)) { return nc_inq_var_chunking(ncid, varid, storagep, chunksizesp); } int numdims = 0; CHECK(nc_inq_varndims(ncid, varid, &numdims)); // Copy chunksize buffer, appending an extra element for the // complex dimension size_t* chunk_buffer = copy_complex_dim_size_t_array(chunksizesp, numdims, 2); const int ierr = nc_inq_var_chunking(ncid, varid, storagep, chunk_buffer); if (ierr != NC_NOERR) { goto cleanup; } const size_t other_dims = (size_t)(numdims - 1); for (size_t i = 0; i < other_dims; i++) { chunksizesp[i] = chunk_buffer[i]; } cleanup: free(chunk_buffer); return ierr; } int pfnc_get_vara( int ncid, int varid, const size_t* startp, const size_t* countp, void* ip ) { if (pfnc_var_is_complex(ncid, varid)) { nc_type base_type; CHECK(pfnc_inq_var_complex_base_type(ncid, varid, &base_type)); switch (base_type) { case NC_DOUBLE: return pfnc_get_vara_double_complex(ncid, varid, startp, countp, ip); case NC_FLOAT: return pfnc_get_vara_float_complex(ncid, varid, startp, countp, ip); default: return NC_EBADTYPE; } } return nc_get_vara(ncid, varid, startp, countp, ip); } int pfnc_put_vara( int ncid, int varid, const size_t* startp, const size_t* countp, const void* op ) { if (pfnc_var_is_complex(ncid, varid)) { nc_type base_type; CHECK(pfnc_inq_var_complex_base_type(ncid, varid, &base_type)); switch (base_type) { case NC_DOUBLE: return pfnc_put_vara_double_complex(ncid, varid, startp, countp, op); case NC_FLOAT: return pfnc_put_vara_float_complex(ncid, varid, startp, countp, op); default: return NC_EBADTYPE; } } return nc_put_vara(ncid, varid, startp, countp, op); } int pfnc_put_vars( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, const void* op ) { if (pfnc_var_is_complex(ncid, varid)) { nc_type base_type; CHECK(pfnc_inq_var_complex_base_type(ncid, varid, &base_type)); switch (base_type) { case NC_DOUBLE: return pfnc_put_vars_double_complex( ncid, varid, startp, countp, stridep, op ); case NC_FLOAT: return pfnc_put_vars_float_complex( ncid, varid, startp, countp, stridep, op ); default: return NC_EBADTYPE; } } return nc_put_vars(ncid, varid, startp, countp, stridep, op); } int pfnc_get_vars( int ncid, int varid, const size_t* startp, const size_t* countp, const ptrdiff_t* stridep, void* ip ) { if (pfnc_var_is_complex(ncid, varid)) { nc_type base_type; CHECK(pfnc_inq_var_complex_base_type(ncid, varid, &base_type)); switch (base_type) { case NC_DOUBLE: return pfnc_get_vars_double_complex( ncid, varid, startp, countp, stridep, ip ); case NC_FLOAT: return pfnc_get_vars_float_complex( ncid, varid, startp, countp, stridep, ip ); default: return NC_EBADTYPE; } } return nc_get_vars(ncid, varid, startp, countp, stridep, ip); } netcdf4-python-1.7.4rel/include/000077500000000000000000000000001512661643000165235ustar00rootroot00000000000000netcdf4-python-1.7.4rel/include/membuf.pyx000066400000000000000000000016621512661643000205450ustar00rootroot00000000000000# Creates a memoryview from a malloced C pointer, # which will be freed when the python object is garbage collected. # Code found here is derived from # http://stackoverflow.com/a/28166272/428751 from cpython.buffer cimport PyBuffer_FillInfo from libc.stdlib cimport free # create a python memoryview object from a raw pointer. cdef memview_fromptr(void *memory, size_t size): cdef _MemBuf buf = _MemBuf() buf.memory = memory # malloced void pointer buf.size = size # size of pointer in bytes return memoryview(buf) # private extension type that implements buffer protocol. cdef class _MemBuf: cdef void *memory cdef size_t size def __getbuffer__(self, Py_buffer *buf, int flags): PyBuffer_FillInfo(buf, self, self.memory, self.size, 1, flags) def __releasebuffer__(self, Py_buffer *buf): # why doesn't this do anything?? pass def __dealloc__(self): free(self.memory) netcdf4-python-1.7.4rel/include/mpi-compat.h000066400000000000000000000010741512661643000207440ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef MPI_COMPAT_H #define MPI_COMPAT_H #include "netcdf-compat.h" #if HAS_PARALLEL_SUPPORT #include #ifdef MSMPI_VER #define PyMPI_HAVE_MPI_Message 1 #endif #if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) typedef void *PyMPI_MPI_Message; #define MPI_Message PyMPI_MPI_Message #endif #if (MPI_VERSION < 4) && !defined(PyMPI_HAVE_MPI_Session) typedef void *PyMPI_MPI_Session; #define MPI_Session PyMPI_MPI_Session #endif #endif /* HAS_PARALLEL_SUPPORT */ #endif/*MPI_COMPAT_H*/ netcdf4-python-1.7.4rel/include/netCDF4.pxi000066400000000000000000000523121512661643000204370ustar00rootroot00000000000000# size_t, ptrdiff_t are defined in stdlib.h cdef extern from "stdlib.h": ctypedef long size_t ctypedef long ptrdiff_t # hdf5 version info. cdef extern from "H5public.h": ctypedef int herr_t int H5get_libversion( unsigned int *majnum, unsigned int *minnum, unsigned int *relnum ) nogil cdef extern from *: ctypedef char* const_char_ptr "const char*" # netcdf functions. cdef extern from "netcdf.h": ctypedef int nclong ctypedef int nc_type ctypedef struct nc_vlen_t: size_t len # Length of VL data (in base type units) void *p # Pointer to VL data float NC_FILL_FLOAT long NC_FILL_INT double NC_FILL_DOUBLE char NC_FILL_CHAR long long NC_FILL_INT64 unsigned long NC_FILL_UINT unsigned long long NC_FILL_UINT64 cdef enum: NC_NAT # NAT = 'Not A Type' (c.f. NaN) NC_BYTE # signed 1 byte integer NC_CHAR # ISO/ASCII character NC_SHORT # signed 2 byte integer NC_INT # signed 4 byte integer NC_LONG # deprecated, but required for backward compatibility. NC_FLOAT # single precision floating point number NC_DOUBLE # double precision floating point number NC_UBYTE # unsigned 1 byte int NC_USHORT # unsigned 2-byte int NC_UINT # unsigned 4-byte int NC_INT64 # signed 8-byte int NC_UINT64 # unsigned 8-byte int NC_STRING # string NC_VLEN # used internally for vlen types NC_OPAQUE # used internally for opaque types NC_COMPOUND # used internally for compound types NC_ENUM # used internally for enum types. # Use these 'mode' flags for nc_open. NC_NOWRITE # default is read only NC_WRITE # read & write # Use these 'mode' flags for nc_create. NC_CLOBBER NC_NOCLOBBER # Don't destroy existing file on create NC_64BIT_OFFSET # Use large (64-bit) file offsets NC_64BIT_DATA # Use cdf-5 format NC_NETCDF4 # Use netCDF-4/HDF5 format NC_CLASSIC_MODEL # Enforce strict netcdf-3 rules. # Use these 'mode' flags for both nc_create and nc_open. NC_SHARE # Share updates, limit caching # The following flag currently is ignored, but use in # nc_open() or nc_create() may someday support use of advisory # locking to prevent multiple writers from clobbering a file NC_LOCK # Use locking if available # Default fill values, used unless _FillValue attribute is set. # These values are stuffed into newly allocated space as appropriate. # The hope is that one might use these to notice that a particular datum # has not been set. NC_FILL_BYTE #NC_FILL_CHAR NC_FILL_SHORT #NC_FILL_INT #NC_FILL_FLOAT #NC_FILL_DOUBLE NC_FILL_UBYTE NC_FILL_USHORT #NC_FILL_UINT #NC_FILL_INT64 #NC_FILL_UINT64 # These represent the max and min values that can be stored in a # netCDF file for their associated types. Recall that a C compiler # may define int to be any length it wants, but a NC_INT is *always* # a 4 byte signed int. On a platform with has 64 bit ints, there will # be many ints which are outside the range supported by NC_INT. But # since NC_INT is an external format, it has to mean the same thing # everywhere. NC_MAX_BYTE NC_MIN_BYTE NC_MAX_CHAR NC_MAX_SHORT NC_MIN_SHORT NC_MAX_INT NC_MIN_INT NC_MAX_FLOAT NC_MIN_FLOAT NC_MAX_DOUBLE8 NC_MIN_DOUBLE NC_MAX_UBYTE NC_MAX_USHORT NC_MAX_UINT NC_MAX_INT64 NC_MIN_INT64 NC_MAX_UINT64 X_INT64_MAX X_INT64_MIN X_UINT64_MAX # The above values are defaults. # If you wish a variable to use a different value than the above # defaults, create an attribute with the same type as the variable # and the following reserved name. The value you give the attribute # will be used as the fill value for that variable. _FillValue NC_FILL NC_NOFILL # Starting with version 3.6, there are different format netCDF # files. 4.0 introduces the third one. These defines are only for # the nc_set_default_format function. NC_FORMAT_CLASSIC NC_FORMAT_64BIT NC_FORMAT_64BIT_OFFSET NC_FORMAT_64BIT_DATA NC_FORMAT_NETCDF4 NC_FORMAT_NETCDF4_CLASSIC NC_FORMAT_NC3 NC_FORMAT_NC_HDF4 NC_FORMAT_NC_HDF5 NC_FORMAT_DAP2 NC_FORMAT_DAP4 NC_FORMAT_PNETCDF NC_FORMAT_UNDEFINED NC_SIZEHINT_DEFAULT NC_ALIGN_CHUNK # 'size' argument to ncdimdef for an unlimited dimension NC_UNLIMITED # attribute id to put/get a global attribute NC_GLOBAL # These maximums are enforced by the interface, to facilitate writing # applications and utilities. However, nothing is statically allocated to # these sizes internally. NC_MAX_DIMS NC_MAX_ATTRS NC_MAX_VARS NC_MAX_NAME NC_MAX_VAR_DIMS # Algorithms for netcdf-4 chunking. NC_CHUNK_SEQ NC_CHUNK_SUB NC_CHUNK_SIZES NC_CHUNKED NC_CONTIGUOUS # The netcdf version 3 functions all return integer error status. # These are the possible values, in addition to certain # values from the system errno.h. NC_ISSYSERR NC_NOERR NC2_ERR NC_EBADID NC_ENFILE NC_EEXIST NC_EINVAL NC_EPERM NC_ENOTINDEFINE NC_EINDEFINE NC_EINVALCOORDS NC_EMAXDIMS NC_ENAMEINUSE NC_ENOTATT NC_EMAXATTS NC_EBADTYPE NC_EBADDIM NC_EUNLIMPOS NC_EMAXVARS NC_ENOTVAR NC_EGLOBAL NC_ENOTNC NC_ESTS NC_EMAXNAME NC_EUNLIMIT NC_ENORECVARS NC_ECHAR NC_EEDGE NC_ESTRIDE NC_EBADNAME # N.B. following must match value in ncx.h NC_ERANGE # Math result not representable NC_ENOMEM # Memory allocation (malloc) failure NC_EVARSIZE # One or more variable sizes violate format constraints NC_EDIMSIZE # Invalid dimension size NC_ETRUNC # NetCDFFile likely truncated or possibly corrupted # The following was added in support of netcdf-4. Make all netcdf-4 # error codes < -100 so that errors can be added to netcdf-3 if # needed. NC4_FIRST_ERROR NC_EHDFERR NC_ECANTREAD NC_ECANTWRITE NC_ECANTCREATE NC_EFILEMETA NC_EDIMMETA NC_EATTMETA NC_EVARMETA NC_ENOCOMPOUND NC_EATTEXISTS NC_ENOTNC4 NC_ESTRICTNC3 NC_ENOTNC3 NC_ENOPAR NC_EPARINIT NC_EBADGRPID NC_EBADTYPID NC_ETYPDEFINED NC_EBADFIELD NC_EBADCLASS NC4_LAST_ERROR NC_ENDIAN_NATIVE NC_ENDIAN_LITTLE NC_ENDIAN_BIG const_char_ptr *nc_inq_libvers() nogil const_char_ptr *nc_strerror(int ncerr) int nc_create(char *path, int cmode, int *ncidp) nogil int nc_open(char *path, int mode, int *ncidp) nogil int nc_inq_path(int ncid, size_t *pathlen, char *path) nogil int nc_inq_format_extended(int ncid, int *formatp, int* modep) nogil int nc_inq_ncid(int ncid, char *name, int *grp_ncid) nogil int nc_inq_grps(int ncid, int *numgrps, int *ncids) nogil int nc_inq_grpname(int ncid, char *name) nogil int nc_inq_grp_parent(int ncid, int *parent_ncid) nogil int nc_inq_varids(int ncid, int *nvars, int *varids) nogil int nc_inq_dimids(int ncid, int *ndims, int *dimids, int include_parents) nogil int nc_def_grp(int parent_ncid, char *name, int *new_ncid) nogil int nc_def_compound(int ncid, size_t size, char *name, nc_type *typeidp) nogil int nc_insert_compound(int ncid, nc_type xtype, char *name, size_t offset, nc_type field_typeid) nogil int nc_insert_array_compound(int ncid, nc_type xtype, char *name, size_t offset, nc_type field_typeid, int ndims, int *dim_sizes) nogil int nc_inq_type(int ncid, nc_type xtype, char *name, size_t *size) nogil int nc_inq_compound(int ncid, nc_type xtype, char *name, size_t *size, size_t *nfieldsp) nogil int nc_inq_compound_name(int ncid, nc_type xtype, char *name) nogil int nc_inq_compound_size(int ncid, nc_type xtype, size_t *size) nogil int nc_inq_compound_nfields(int ncid, nc_type xtype, size_t *nfieldsp) nogil int nc_inq_compound_field(int ncid, nc_type xtype, int fieldid, char *name, size_t *offsetp, nc_type *field_typeidp, int *ndimsp, int *dim_sizesp) nogil int nc_inq_compound_fieldname(int ncid, nc_type xtype, int fieldid, char *name) nogil int nc_inq_compound_fieldindex(int ncid, nc_type xtype, char *name, int *fieldidp) nogil int nc_inq_compound_fieldoffset(int ncid, nc_type xtype, int fieldid, size_t *offsetp) nogil int nc_inq_compound_fieldtype(int ncid, nc_type xtype, int fieldid, nc_type *field_typeidp) nogil int nc_inq_compound_fieldndims(int ncid, nc_type xtype, int fieldid, int *ndimsp) nogil int nc_inq_compound_fielddim_sizes(int ncid, nc_type xtype, int fieldid, int *dim_sizes) nogil int nc_def_vlen(int ncid, char *name, nc_type base_typeid, nc_type *xtypep) nogil int nc_inq_vlen(int ncid, nc_type xtype, char *name, size_t *datum_sizep, nc_type *base_nc_typep) nogil int nc_inq_user_type(int ncid, nc_type xtype, char *name, size_t *size, nc_type *base_nc_typep, size_t *nfieldsp, int *classp) nogil int nc_inq_typeids(int ncid, int *ntypes, int *typeids) nogil int nc_put_att(int ncid, int varid, char *name, nc_type xtype, size_t len, void *op) nogil int nc_get_att(int ncid, int varid, char *name, void *ip) nogil int nc_get_att_string(int ncid, int varid, char *name, char **ip) nogil int nc_put_att_string(int ncid, int varid, char *name, size_t len, const char **op) nogil int nc_def_opaque(int ncid, size_t size, char *name, nc_type *xtypep) nogil int nc_inq_opaque(int ncid, nc_type xtype, char *name, size_t *sizep) nogil int nc_put_att_opaque(int ncid, int varid, char *name, size_t len, void *op) nogil int nc_get_att_opaque(int ncid, int varid, char *name, void *ip) nogil int nc_put_cmp_att_opaque(int ncid, nc_type xtype, int fieldid, char *name, size_t len, void *op) nogil int nc_get_cmp_att_opaque(int ncid, nc_type xtype, int fieldid, char *name, void *ip) nogil int nc_put_var1(int ncid, int varid, size_t *indexp, void *op) nogil int nc_get_var1(int ncid, int varid, size_t *indexp, void *ip) nogil int nc_put_vara(int ncid, int varid, size_t *startp, size_t *countp, void *op) nogil int nc_get_vara(int ncid, int varid, size_t *startp, size_t *countp, void *ip) nogil int nc_put_vars(int ncid, int varid, size_t *startp, size_t *countp, ptrdiff_t *stridep, void *op) nogil int nc_get_vars(int ncid, int varid, size_t *startp, size_t *countp, ptrdiff_t *stridep, void *ip) nogil int nc_put_varm(int ncid, int varid, size_t *startp, size_t *countp, ptrdiff_t *stridep, ptrdiff_t *imapp, void *op) nogil int nc_get_varm(int ncid, int varid, size_t *startp, size_t *countp, ptrdiff_t *stridep, ptrdiff_t *imapp, void *ip) nogil int nc_put_var(int ncid, int varid, void *op) nogil int nc_get_var(int ncid, int varid, void *ip) nogil int nc_def_var_deflate(int ncid, int varid, int shuffle, int deflate, int deflate_level) nogil int nc_def_var_fletcher32(int ncid, int varid, int fletcher32) nogil int nc_inq_var_fletcher32(int ncid, int varid, int *fletcher32p) nogil int nc_def_var_chunking(int ncid, int varid, int contiguous, size_t *chunksizesp) nogil int nc_def_var_fill(int ncid, int varid, int no_fill, void *fill_value) nogil int nc_def_var_endian(int ncid, int varid, int endian) nogil int nc_inq_var_chunking(int ncid, int varid, int *contiguousp, size_t *chunksizesp) nogil int nc_inq_var_deflate(int ncid, int varid, int *shufflep, int *deflatep, int *deflate_levelp) nogil int nc_inq_var_fill(int ncid, int varid, int *no_fill, void *fill_value) nogil int nc_inq_var_endian(int ncid, int varid, int *endianp) nogil int nc_set_fill(int ncid, int fillmode, int *old_modep) nogil int nc_set_default_format(int format, int *old_formatp) nogil int nc_redef(int ncid) nogil int nc_enddef(int ncid) nogil int nc_sync(int ncid) nogil int nc_abort(int ncid) nogil int nc_close(int ncid) nogil int nc_inq(int ncid, int *ndimsp, int *nvarsp, int *nattsp, int *unlimdimidp) nogil int nc_inq_ndims(int ncid, int *ndimsp) nogil int nc_inq_nvars(int ncid, int *nvarsp) nogil int nc_inq_natts(int ncid, int *nattsp) nogil int nc_inq_unlimdim(int ncid, int *unlimdimidp) nogil int nc_inq_unlimdims(int ncid, int *nunlimdimsp, int *unlimdimidsp) nogil int nc_inq_format(int ncid, int *formatp) nogil int nc_def_dim(int ncid, char *name, size_t len, int *idp) nogil int nc_inq_dimid(int ncid, char *name, int *idp) nogil int nc_inq_dim(int ncid, int dimid, char *name, size_t *lenp) nogil int nc_inq_dimname(int ncid, int dimid, char *name) nogil int nc_inq_dimlen(int ncid, int dimid, size_t *lenp) nogil int nc_rename_dim(int ncid, int dimid, char *name) nogil int nc_inq_att(int ncid, int varid, char *name, nc_type *xtypep, size_t *lenp) nogil int nc_inq_attid(int ncid, int varid, char *name, int *idp) nogil int nc_inq_atttype(int ncid, int varid, char *name, nc_type *xtypep) nogil int nc_inq_attlen(int ncid, int varid, char *name, size_t *lenp) nogil int nc_inq_attname(int ncid, int varid, int attnum, char *name) nogil int nc_copy_att(int ncid_in, int varid_in, char *name, int ncid_out, int varid_out) int nc_rename_att(int ncid, int varid, char *name, char *newname) nogil int nc_del_att(int ncid, int varid, char *name) nogil int nc_put_att_text(int ncid, int varid, char *name, size_t len, char *op) nogil int nc_get_att_text(int ncid, int varid, char *name, char *ip) nogil int nc_def_var(int ncid, char *name, nc_type xtype, int ndims, int *dimidsp, int *varidp) nogil int nc_inq_var(int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, int *dimidsp, int *nattsp) nogil int nc_inq_varid(int ncid, char *name, int *varidp) nogil int nc_inq_varname(int ncid, int varid, char *name) nogil int nc_inq_vartype(int ncid, int varid, nc_type *xtypep) nogil int nc_inq_varndims(int ncid, int varid, int *ndimsp) nogil int nc_inq_vardimid(int ncid, int varid, int *dimidsp) nogil int nc_inq_varnatts(int ncid, int varid, int *nattsp) nogil int nc_rename_var(int ncid, int varid, char *name) nogil int nc_free_vlen(nc_vlen_t *vl) nogil int nc_free_vlens(size_t len, nc_vlen_t *vl) nogil int nc_free_string(size_t len, char **data) nogil int nc_get_chunk_cache(size_t *sizep, size_t *nelemsp, float *preemptionp) nogil int nc_set_chunk_cache(size_t size, size_t nelems, float preemption) nogil int nc_set_var_chunk_cache(int ncid, int varid, size_t size, size_t nelems, float preemption) nogil int nc_get_var_chunk_cache(int ncid, int varid, size_t *sizep, size_t *nelemsp, float *preemptionp) nogil int nc_def_enum(int ncid, nc_type base_typeid, char *name, nc_type *typeidp) nogil int nc_insert_enum(int ncid, nc_type xtype, char *name, void *value) nogil int nc_inq_enum(int ncid, nc_type xtype, char *name, nc_type *base_nc_typep,\ size_t *base_sizep, size_t *num_membersp) nogil int nc_inq_enum_member(int ncid, nc_type xtype, int idx, char *name, void *value) nogil int nc_inq_enum_ident(int ncid, nc_type xtype, long long value, char *identifier) nogil cdef extern from "mpi-compat.h": pass # taken from numpy.pxi in numpy 1.0rc2. cdef extern from "numpy/arrayobject.h": ctypedef int npy_intp ctypedef extern class numpy.ndarray [object PyArrayObject]: pass npy_intp PyArray_SIZE(ndarray arr) nogil npy_intp PyArray_ISCONTIGUOUS(ndarray arr) nogil npy_intp PyArray_ISALIGNED(ndarray arr) nogil void* PyArray_DATA(ndarray) nogil char* PyArray_BYTES(ndarray) nogil npy_intp* PyArray_STRIDES(ndarray) nogil void import_array() include "parallel_support_imports.pxi" # Compatibility shims cdef extern from "netcdf-compat.h": int nc_rename_grp(int grpid, char *name) nogil int nc_set_alignment(int threshold, int alignment) int nc_get_alignment(int *threshold, int *alignment) int nc_rc_set(char* key, char* value) nogil const_char_ptr *nc_rc_get(char* key) int nc_open_mem(const char *path, int mode, size_t size, void* memory, int *ncidp) nogil int nc_create_mem(const char *path, int mode, size_t initialize, int *ncidp) nogil ctypedef struct NC_memio: size_t size void* memory int flags int nc_close_memio(int ncid, NC_memio* info) nogil # Quantize shims int nc_def_var_quantize(int ncid, int varid, int quantize_mode, int nsd) nogil int nc_inq_var_quantize(int ncid, int varid, int *quantize_modep, int *nsdp) nogil # Filter shims int nc_inq_filter_avail(int ncid, unsigned filterid) nogil int nc_def_var_szip(int ncid, int varid, int options_mask, int pixels_per_bloc) nogil int nc_inq_var_szip(int ncid, int varid, int *options_maskp, int *pixels_per_blockp) nogil int nc_def_var_zstandard(int ncid, int varid, int level) nogil int nc_inq_var_zstandard(int ncid, int varid, int* hasfilterp, int *levelp) nogil int nc_def_var_bzip2(int ncid, int varid, int level) nogil int nc_inq_var_bzip2(int ncid, int varid, int* hasfilterp, int *levelp) nogil int nc_def_var_blosc(int ncid, int varid, unsigned subcompressor, unsigned level, unsigned blocksize, unsigned addshuffle) nogil int nc_inq_var_blosc(int ncid, int varid, int* hasfilterp, unsigned* subcompressorp, unsigned* levelp, unsigned* blocksizep, unsigned* addshufflep) nogil # Parallel shims int nc_create_par(char *path, int cmode, MPI_Comm comm, MPI_Info info, int *ncidp) nogil int nc_open_par(char *path, int mode, MPI_Comm comm, MPI_Info info, int *ncidp) nogil int nc_var_par_access(int ncid, int varid, int par_access) nogil cdef enum: HAS_RENAME_GRP HAS_NC_INQ_PATH HAS_NC_INQ_FORMAT_EXTENDED HAS_NC_OPEN_MEM HAS_NC_CREATE_MEM HAS_CDF5_FORMAT HAS_PARALLEL_SUPPORT HAS_PARALLEL4_SUPPORT HAS_PNETCDF_SUPPORT HAS_SZIP_SUPPORT HAS_QUANTIZATION_SUPPORT HAS_ZSTANDARD_SUPPORT HAS_BZIP2_SUPPORT HAS_BLOSC_SUPPORT HAS_SET_ALIGNMENT HAS_NCFILTER HAS_NCRCSET NC_NOQUANTIZE NC_QUANTIZE_BITGROOM NC_QUANTIZE_GRANULARBR NC_QUANTIZE_BITROUND H5Z_FILTER_SZIP H5Z_FILTER_ZSTD H5Z_FILTER_BZIP2 H5Z_FILTER_BLOSC NC_COLLECTIVE NC_INDEPENDENT NC_MPIIO NC_MPIPOSIX NC_PNETCDF # Declarations for handling complex numbers cdef extern from "nc_complex/nc_complex.h": bint pfnc_var_is_complex(int ncid, int varid) nogil bint pfnc_var_is_complex_type(int ncid, int varid) nogil int pfnc_get_complex_dim(int ncid, int* nc_dim) nogil int pfnc_inq_var_complex_base_type(int ncid, int varid, int* nc_typeid) nogil int pfnc_inq_varndims (int ncid, int varid, int *ndimsp) nogil int pfnc_inq_vardimid (int ncid, int varid, int *dimidsp) nogil int pfnc_def_var(int ncid, char *name, nc_type xtype, int ndims, int *dimidsp, int *varidp) nogil int pfnc_get_vars(int ncid, int varid, size_t *startp, size_t *countp, ptrdiff_t *stridep, void *ip) nogil int pfnc_put_vars(int ncid, int varid, size_t *startp, size_t *countp, ptrdiff_t *stridep, void *op) nogil cdef enum: PFNC_DOUBLE_COMPLEX PFNC_DOUBLE_COMPLEX_DIM PFNC_FLOAT_COMPLEX PFNC_FLOAT_COMPLEX_DIM netcdf4-python-1.7.4rel/include/netcdf-compat.h000066400000000000000000000136101512661643000214210ustar00rootroot00000000000000#ifndef NETCDF_COMPAT_H #define NETCDF_COMPAT_H #include #include #define NC_VERSION_EQ(MAJOR, MINOR, PATCH) \ ((NC_VERSION_MAJOR == (MAJOR)) && \ (NC_VERSION_MINOR == (MINOR)) && \ (NC_VERSION_PATCH == (PATCH))) #define NC_VERSION_GT(MAJOR, MINOR, PATCH) \ (NC_VERSION_MAJOR > (MAJOR) || \ (NC_VERSION_MAJOR == (MAJOR) && \ (NC_VERSION_MINOR > (MINOR) || \ (NC_VERSION_MINOR == (MINOR) && \ (NC_VERSION_PATCH > (PATCH)))))) #define NC_VERSION_GE(MAJOR, MINOR, PATCH) \ (NC_VERSION_GT(MAJOR, MINOR, PATCH) || \ NC_VERSION_EQ(MAJOR, MINOR, PATCH)) #if NC_VERSION_GE(4, 3, 0) #define HAS_RENAME_GRP 1 #else #define HAS_RENAME_GRP 0 static inline int nc_rename_grp(int grpid, const char* name) { return NC_EINVAL; } #endif #if NC_VERSION_GE(4, 1, 2) #define HAS_NC_INQ_PATH 1 #else #define HAS_NC_INQ_PATH 0 static inline int nc_inq_path(int ncid, size_t *pathlen, char *path) { *pathlen = 0; *path = "\0"; return NC_EINVAL; } #endif #if NC_VERSION_GE(4, 3, 1) #define HAS_NC_INQ_FORMAT_EXTENDED 1 #else #define HAS_NC_INQ_FORMAT_EXTENDED 0 static inline int nc_inq_format_extended(int ncid, int *formatp, int* modep) { *formatp = 0; *modep = 0; return NC_EINVAL; } #endif #if NC_VERSION_GE(4, 9, 0) #define HAS_SET_ALIGNMENT 1 #else #define HAS_SET_ALIGNMENT 0 static inline int nc_set_alignment(int threshold, int alignment) { return NC_EINVAL; } static inline int nc_get_alignment(int* thresholdp, int* alignmentp) { *thresholdp = 0; *alignmentp = 0; return NC_EINVAL; } #endif #if NC_VERSION_GE(4, 9, 0) #define HAS_NCRCSET 1 #else #define HAS_NCRCSET 0 static inline int nc_rc_set(const char* key, const char* value) { return NC_EINVAL; } static inline const char *nc_rc_get(const char* key) { return NULL; } #endif #if NC_VERSION_GE(4, 4, 0) #include #define HAS_NC_OPEN_MEM 1 #else #define HAS_NC_OPEN_MEM 0 static inline int nc_open_mem(const char *path, int mode, size_t size, void* memory, int *ncidp) { return NC_EINVAL; } #endif #if NC_VERSION_GE(4, 6, 2) #define HAS_NC_CREATE_MEM 1 #else #define HAS_NC_CREATE_MEM 0 static inline int nc_create_mem(const char *path, int mode, size_t initialize, int *ncidp) { return NC_EINVAL; } typedef struct NC_memio { size_t size; void* memory; int flags; } NC_memio; static inline int nc_close_memio(int ncid, NC_memio* info) { return NC_EINVAL; } #endif #if defined(NC_HAS_CDF5) && NC_HAS_CDF5 #define HAS_CDF5_FORMAT 1 #else # ifndef NC_HAS_CDF5 # define NC_64BIT_DATA 0x0020 # define NC_CDF5 NC_64BIT_DATA # define NC_FORMAT_64BIT_OFFSET (2) # define NC_FORMAT_64BIT_DATA (5) # endif #define HAS_CDF5_FORMAT 0 #endif #if defined(NC_HAS_PARALLEL) && NC_HAS_PARALLEL #include #define HAS_PARALLEL_SUPPORT 1 #else #define HAS_PARALLEL_SUPPORT 0 typedef int MPI_Comm; typedef int MPI_Info; static inline int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info, int *ncidp) { return NC_EINVAL; } static inline int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info, int *ncidp) { return NC_EINVAL; } static inline int nc_var_par_access(int ncid, int varid, int par_access) { return NC_EINVAL; } # ifndef NC_INDEPENDENT # define NC_INDEPENDENT 0 # define NC_COLLECTIVE 1 # endif # ifndef NC_MPIIO # define NC_MPIIO 0x2000 # define NC_MPIPOSIX NC_MPIIO # define NC_PNETCDF (NC_MPIIO) # endif #endif #if defined(NC_HAS_PARALLEL4) && NC_HAS_PARALLEL4 #define HAS_PARALLEL4_SUPPORT 1 #else #define HAS_PARALLEL4_SUPPORT 0 #endif #if defined(NC_HAS_PNETCDF) && NC_HAS_PNETCDF #define HAS_PNETCDF_SUPPORT 1 #else #define HAS_PNETCDF_SUPPORT 0 #endif #if NC_VERSION_GE(4, 7, 0) #include #endif #if NC_VERSION_GE(4, 9, 0) #define HAS_NCFILTER 1 #else #define HAS_NCFILTER 0 static inline int nc_inq_filter_avail(int ncid, unsigned filterid) { return -136; } #endif #if defined(NC_HAS_SZIP) && NC_HAS_SZIP #define HAS_SZIP_SUPPORT 1 #else #define HAS_SZIP_SUPPORT 0 # ifndef NC_HAS_SZIP static inline int nc_def_var_szip(int ncid, int varid, int options_mask, int pixels_per_bloc) { return NC_EINVAL; } # endif # ifndef H5Z_FILTER_SZIP # define H5Z_FILTER_SZIP 4 # endif #endif #if defined(NC_HAS_QUANTIZE) && NC_HAS_QUANTIZE #define HAS_QUANTIZATION_SUPPORT 1 #else #define HAS_QUANTIZATION_SUPPORT 0 # ifndef NC_HAS_QUANTIZE static inline int nc_def_var_quantize(int ncid, int varid, int quantize_mode, int nsd) { return NC_EINVAL; } static inline int nc_inq_var_quantize(int ncid, int varid, int *quantize_modep, int *nsdp) { return NC_EINVAL; } # define NC_NOQUANTIZE 0 # define NC_QUANTIZE_BITGROOM 1 # define NC_QUANTIZE_GRANULARBR 2 # define NC_QUANTIZE_BITROUND 3 # endif #endif #if defined(NC_HAS_ZSTD) && NC_HAS_ZSTD #define HAS_ZSTANDARD_SUPPORT 1 #else # ifndef NC_HAS_ZSTD static inline int nc_def_var_zstandard(int ncid, int varid, int level) { return NC_EINVAL; } static inline int nc_inq_var_zstandard(int ncid, int varid, int* hasfilterp, int *levelp) { return NC_EINVAL; } # define H5Z_FILTER_ZSTD 32015 # endif #define HAS_ZSTANDARD_SUPPORT 0 #endif #if defined(NC_HAS_BZ2) && NC_HAS_BZ2 #define HAS_BZIP2_SUPPORT 1 #else # ifndef NC_HAS_BZ2 static inline int nc_def_var_bzip2(int ncid, int varid, int level) { return NC_EINVAL; } static inline int nc_inq_var_bzip2(int ncid, int varid, int* hasfilterp, int *levelp) { return NC_EINVAL; } # define H5Z_FILTER_BZIP2 307 # endif #define HAS_BZIP2_SUPPORT 0 #endif #if defined(NC_HAS_BLOSC) && NC_HAS_BLOSC #define HAS_BLOSC_SUPPORT 1 #else # ifndef NC_HAS_BLOSC static inline int nc_def_var_blosc(int ncid, int varid, unsigned subcompressor, unsigned level, unsigned blocksize, unsigned addshuffle) { return NC_EINVAL; } static inline int nc_inq_var_blosc(int ncid, int varid, int* hasfilterp, unsigned* subcompressorp, unsigned* levelp, unsigned* blocksizep, unsigned* addshufflep) { return NC_EINVAL; } # define H5Z_FILTER_BLOSC 32001 # endif #define HAS_BLOSC_SUPPORT 0 #endif #endif /* NETCDF_COMPAT_H */ netcdf4-python-1.7.4rel/include/no_parallel_support_imports.pxi.in000066400000000000000000000003401512661643000255100ustar00rootroot00000000000000# Stubs for when parallel support is not enabled ctypedef int MPI_Comm ctypedef int MPI_Info ctypedef int Comm ctypedef int Info cdef MPI_Comm MPI_COMM_WORLD cdef MPI_Info MPI_INFO_NULL MPI_COMM_WORLD = 0 MPI_INFO_NULL = 0 netcdf4-python-1.7.4rel/include/parallel_support_imports.pxi.in000066400000000000000000000005041512661643000250160ustar00rootroot00000000000000# Imports and typedefs required at compile time for enabling parallel support cimport mpi4py.MPI as MPI from mpi4py.libmpi cimport ( MPI_Comm, MPI_Info, MPI_Comm_dup, MPI_Info_dup, MPI_Comm_free, MPI_Info_free, MPI_INFO_NULL, MPI_COMM_WORLD, ) ctypedef MPI.Comm Comm ctypedef MPI.Info Info netcdf4-python-1.7.4rel/man/000077500000000000000000000000001512661643000156535ustar00rootroot00000000000000netcdf4-python-1.7.4rel/man/nc3tonc4.1000066400000000000000000000061641512661643000173770ustar00rootroot00000000000000.\" (C) Copyright 2015, Ross Gammon , .\" .TH NC3TONC4 1 "22 Mar 2015" .\" .SH NAME nc3tonc4 \- a program to convert netCDF 3 files to netCDF 4 format files .SH SYNOPSIS .B nc3tonc4 .RB [ \-h ] .RB [ \-o ] .RB [ \-\-vars=\fIvar1,var2,..\fR ] .RB [ \-\-zlib=\fI(0|1)\fR ] .RB [ \-\-complevel=\fI(1\-9)\fR ] .RB [ \-\-shuffle=\fI(0|1)\fR ] .RB [ \-\-fletcher32=\fI(0|1)\fR ] .RB [ \-\-unpackshort=\fI(0|1)\fR ] .RB [ \-\-quantize=\fIvar1=n1,var2=n2,..\fR ] .I netcdf3filename .I netcdf4filename .br .SH DESCRIPTION This manual page documents briefly the .B nc3tonc4 command. .PP \fBnc3tonc4\fP is a program that converts a netCDF 3 file into netCDF 4 format, optionally unpacking variables packed as short integers (with scale_factor and add_offset) to floats, and adding zlib compression (with the HDF5 shuffle filter and fletcher32 checksum). Data may also be quantized (truncated) to a specified precision to improve compression. .SH OPTIONS These programs follow the usual GNU command line syntax, with long options starting with two dashes (`-'). A summary of options is included below. .TP .B \-h Shows a summary of the available options. .TP .B \-o Overwrite destination file (default is to raise an error if output file already exists). .TP .B \-\-vars A comma separated list of variable names to copy (default is to copy all variables). .TP .B \-\-classic=(0|1) Use NETCDF4_CLASSIC format instead of NETCDF4 (default = 1). .TP .B \-\-zlib=(0|1) Activate (or disable) zlib compression (the default is to activate). .TP .B \-\-complevel=(1-9) Set the zlib compression level (6 is default). .TP .B \-\-shuffle=(0|1) Activate (or disable) the shuffle filter (it is active by default). .TP .B \-\-fletcher32=(0|1) Activate (or disable) the fletcher32 checksum (it is not active by default). .TP .B \-\-unpackshort=(0|1) Unpack short integer variables to float variables using scale_factor and add_offset netCDF variable attributes (it is active by default). .TP .B \-\-quantize=(comma separated list of "variable name=integer" pairs) Truncate the data in the specified variables to a given decimal precision. For example, 'speed=2, height=-2, temp=0' will cause the variable 'speed' to be truncated to a precision of 0.01, 'height' to a precision of 100 and 'temp' to 1. This can significantly improve compression. The default is not to quantize any of the variables. .TP .B \-\-quiet=(0|1) If set to 1, don't print any diagnostic information. .TP .B \-\-chunk=(integer) The number of records along unlimited dimension to write at once. The default is 10. It is ignored if there is no unlimited dimension. If chunk=0, it means write all the data at once. .TP .B \-\-istart=(integer) The number of the record to start at along unlimited dimension. The default is 0. This option is ignored if there is no unlimited dimension. .TP .B \-\-istop=(integer) The number of the record to stop at along unlimited dimension. The default is 1. This option is ignored if there is no unlimited dimension. .SH SEE ALSO .BR ncinfo (1), .BR nc4tonc3 (1). .br .SH AUTHOR This manual page was written by Ross Gammon based on the options displayed by nc3tonc4 \-h. netcdf4-python-1.7.4rel/man/nc4tonc3.1000066400000000000000000000025361512661643000173760ustar00rootroot00000000000000.\" (C) Copyright 2015, Ross Gammon , .\" .TH NC4TONC3 1 "22 Mar 2015" .\" .SH NAME nc4tonc3 \- a program to convert a classic netCDF 4 file to netCDF 3 format .SH SYNOPSIS .B nc4tonc3 .RB [ \-h ] .RB [ \-o ] .RB [ \-\-chunk ] .I netcdf4filename .I netcdf3filename .br .SH DESCRIPTION This manual page documents briefly the .B nc4tonc3 command. .PP \fBnc4tonc3\fP is a program that converts a netCDF 4 file (in NETCDF4_CLASSIC format) to netCDF 3 format. .SH OPTIONS These programs follow the usual GNU command line syntax, with long options starting with two dashes (`-'). A summary of options is included below. .TP .B \-h Shows a summary of the available options. .TP .B \-o Overwrite destination file (default is to raise an error if output file already exists). .TP .B \-\-quiet=(0|1) If set to 1, don't print any diagnostic information. .TP .B \-\-format Choose the netcdf3 format to use. NETCDF3_64BIT is used by default, or it can be set to NETCDF3_CLASSIC. .TP .B \-\-chunk=(integer) The number of records along unlimited dimension to write at once. The default is 10. It is ignored if there is no unlimited dimension. If chunk=0, this means write all the data at once. .SH SEE ALSO .BR ncinfo (1), .BR nc3tonc4 (1). .br .SH AUTHOR This manual page was written by Ross Gammon based on the options displayed by nc3tonc4 \-h. netcdf4-python-1.7.4rel/man/ncinfo.1000066400000000000000000000024401512661643000172110ustar00rootroot00000000000000.\" (C) Copyright 2015, Ross Gammon , .\" .TH NCINFO 1 "22 Mar 2015" .\" .SH NAME ncinfo \- a program to print summary information about a netCDF file .SH SYNOPSIS .B ncinfo .RB [ \-h ] .RB [ \-g|\-\-group=\fIgrp\fR ] .RB [ \-v|\-\-variable=\fIvar\fR ] .RB [ \-d|\-\-dimension=\fIdim\fR ] .I filename .br .SH DESCRIPTION This manual page documents briefly the .B ncinfo command. .PP \fBncinfo\fP is a program that prints summary information about a netCDF file .SH OPTIONS These programs follow the usual GNU command line syntax, with long options starting with two dashes (`-'). A summary of options is included below. .TP .B \-h Shows a summary of the available options. .TP .B \-g grp, \-\-group=grp Prints information for this group. The default group is the root group. Nested groups are specified using posix paths e.g. group1/group2/group3. .TP .B \-v , \-\-variable= Prints information for this variable. .TP .B \-d , \-\-dimension= Prints information for this dimension. .TP The filename of the netCDF file must be supplied as the last argument. .SH SEE ALSO .BR nc3tonc4 (1), .BR nc4tonc3 (1). .br .SH AUTHOR This manual page was written by Ross Gammon based on the options displayed by ncinfo \-h. netcdf4-python-1.7.4rel/pyproject.toml000066400000000000000000000126611512661643000200220ustar00rootroot00000000000000[build-system] requires = [ "Cython>=0.29", "numpy>=2.0.0", "setuptools>=77.0.1", "setuptools_scm[toml]>=3.4", ] build-backend = "setuptools.build_meta" [project] name = "netCDF4" description = "Provides an object-oriented python interface to the netCDF version 4 library" authors = [ {name = "Jeff Whitaker", email = "whitaker.jeffrey@gmail.com"}, ] requires-python = ">=3.10" keywords = [ "numpy", "netcdf", "data", "science", "network", "oceanography", "meteorology", "climate", ] license = "MIT" license-files = ["LICENSE"] classifiers = [ "Development Status :: 3 - Alpha", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Intended Audience :: Science/Research", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Archiving :: Compression", "Operating System :: OS Independent", ] dependencies = [ "cftime", "certifi", "numpy>=2.3.0; platform_system == 'Windows' and platform_machine == 'ARM64'", "numpy>=1.21.2; platform_system != 'Windows' or platform_machine != 'ARM64'", ] dynamic = ["version"] [project.optional-dependencies] tests = [ "Cython", "packaging", "pytest", "typing-extensions>=4.15.0", ] parallel = [ "mpi4py", ] [project.readme] text = """\ netCDF version 4 has many features not found in earlier versions of the library, such as hierarchical groups, zlib compression, multiple unlimited dimensions, and new data types. It is implemented on top of HDF5. This module implements most of the new features, and can read and write netCDF files compatible with older versions of the library. The API is modelled after Scientific.IO.NetCDF, and should be familiar to users of that module. """ content-type = "text/x-rst" [project.scripts] nc3tonc4 = "netCDF4.utils:nc3tonc4" nc4tonc3 = "netCDF4.utils:nc4tonc3" ncinfo = "netCDF4.utils:ncinfo" [project.urls] Documentation = "https://unidata.github.io/netcdf4-python/" Repository = "https://github.com/Unidata/netcdf4-python" [tool.setuptools.packages.find] where = ["src"] [tool.setuptools.package-data] "netCDF4.plugins" = ["*__nc*"] [tool.setuptools_scm] [tool.pytest.ini_options] pythonpath = ["test"] filterwarnings = [ "error", "ignore::UserWarning", "ignore::RuntimeWarning", ] [tool.mypy] files = ["src/netCDF4"] exclude = "utils.py" check_untyped_defs = true allow_redefinition = true # next 2 lines workarounds for mypy dealing with type_guards.py mypy_path = "test" explicit_package_bases = true [[tool.mypy.overrides]] ignore_missing_imports = true module = [ "cftime.*", "cython.*", "filter_availability", "matplotlib.*" ] [tool.cibuildwheel] build-verbosity = 1 build-frontend = "build" skip = [ "*-musllinux*", ] test-extras = "tests" test-sources = [ "test", "pyproject.toml" ] test-command = [ '''python -c "import netCDF4; print(f'netCDF4 v{netCDF4.__version__}')"''', "pytest -s -rxs -v test", ] manylinux-x86_64-image = "ghcr.io/ocefpaf/manylinux_2_28_x86_64-netcdf" manylinux-aarch64-image = "ghcr.io/ocefpaf/manylinux_2_28_aarch64-netcdf" environment = {NETCDF4_LIMITED_API="1"} [tool.cibuildwheel.macos] # https://cibuildwheel.pypa.io/en/stable/faq/#macos-passing-dyld_library_path-to-delocate repair-wheel-command = """\ DYLD_FALLBACK_LIBRARY_PATH=/Users/runner/micromamba/envs/build/lib \ delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel} \ """ [tool.cibuildwheel.windows] before-build = "python -m pip install delvewheel" repair-wheel-command = [ "delvewheel show --include blosc.dll;zstd.dll;lz4.dll {wheel}", "delvewheel repair --include blosc.dll;zstd.dll;lz4.dll -w {dest_dir} {wheel}", ] [[tool.cibuildwheel.overrides]] select = "*linux*" environment = {NETCDF_PLUGIN_DIR="/usr/local/hdf5/lib/plugin/"} [[tool.cibuildwheel.overrides]] select = "*-macosx_x86_64" inherit.environment = "append" environment = {MACOSX_DEPLOYMENT_TARGET="13.0",HDF5_DIR="/Users/runner/micromamba/envs/build",netCDF4_DIR="/Users/runner/micromamba/envs/build",PATH="${PATH}:/Users/runner/micromamba/envs/build/bin",NETCDF_PLUGIN_DIR="/Users/runner/micromamba/envs/build/hdf5/lib/plugin"} [[tool.cibuildwheel.overrides]] select = "*-macosx_arm64" inherit.environment = "append" environment = {MACOSX_DEPLOYMENT_TARGET="14.0",HDF5_DIR="/Users/runner/micromambe/envs/build",netCDF4_DIR="/Users/runner/micromambe/envs/build",PATH="${PATH}:/Users/runner/micromamba/envs/build/bin",NETCDF_PLUGIN_DIR="/Users/runner/micromamba/envs/build/hdf5/lib/plugin"} [[tool.cibuildwheel.overrides]] select = "*-win_*" inherit.environment = "append" environment = {HDF5_DIR='C:\\\\Users\\runneradmin\\micromamba\\envs\\build\\Library',netCDF4_DIR='C:\\\\Users\\runneradmin\\micromamba\\envs\\build\\Library',PATH='C:\\\\Users\\runneradmin\\micromamba\\envs\\build\\Library\\bin;${PATH}',NETCDF_PLUGIN_DIR='C:\\\\Users\\runneradmin\\micromamba\\envs\\build\\Library\\hdf5\\lib\\plugin'} [[tool.cibuildwheel.overrides]] select = "*-win_arm64" inherit.environment = "append" environment = { HDF5_DIR = 'C:\\\\vcpkg\\\\installed\\\\arm64-windows', netCDF4_DIR = 'C:\\\\vcpkg\\\\installed\\\\arm64-windows', PATH = 'C:\\\\vcpkg\\\\installed\\\\arm64-windows\\\\bin;${PATH}', NO_CDL = '1' } repair-wheel-command = [ "delvewheel show {wheel}", "delvewheel repair -w {dest_dir} {wheel}", ] netcdf4-python-1.7.4rel/setup.cfg000066400000000000000000000045321512661643000167250ustar00rootroot00000000000000# Rename this file to setup.cfg to set build options. # Follow instructions below for editing. [options] # if true, the nc-config script (installed with netcdf 4.1.2 and higher) # will be used to determine the locations of required libraries. # Usually, nothing else is needed. use_ncconfig=True # path to nc-config script (use if not found in unix PATH). #ncconfig=/usr/local/bin/nc-config [directories] # # If nc-config doesn't do the trick, you can specify the locations # of the libraries and headers manually below # # uncomment and set to netCDF install location. # Include files should be located in netCDF4_dir/include and # the library should be located in netCDF4_dir/lib. # If the libraries and include files are installed in separate locations, # use netCDF4_libdir and netCDF4_incdir to specify the locations # separately. #netCDF4_dir = /usr/local # uncomment and set to HDF5 install location. # Include files should be located in HDF5_dir/include and # the library should be located in HDF5_dir/lib. # If the libraries and include files are installed in separate locations, # use HDF5_libdir and HDF5_incdir to specify the locations # separately. #HDF5_dir = /usr/local # if HDF5 was built with szip support as a static lib, # uncomment and set to szip lib install location. # If the libraries and include files are installed in separate locations, # use szip_libdir and szip_incdir. #szip_dir = /usr/local # if netcdf lib was build statically with HDF4 support, # uncomment and set to hdf4 lib (libmfhdf and libdf) install location. # If the libraries and include files are installed in separate locations, # use hdf4_libdir and hdf4_incdir. #hdf4_dir = /usr/local # if netcdf lib was build statically with HDF4 support, # uncomment and set to jpeg lib install location (hdf4 needs jpeg). # If the libraries and include files are installed in separate locations, # use jpeg_libdir and jpeg_incdir. #jpeg_dir = /usr/local # if netcdf lib was build statically with OpenDAP support, # uncomment and set to curl lib install location. # If the libraries and include files are installed in separate locations, # use curl_libdir and curl_incdir. #curl_dir = /usr/local # location of mpi.h (needed for parallel support) #mpi_incdir=/opt/local/include/mpich-mp [check-manifest] ignore = .gitignore README.gh-pages README.release examples/data/*nc examples/*ipynb netcdf4-python-1.7.4rel/setup.py000066400000000000000000000472331512661643000166230ustar00rootroot00000000000000import os, sys, subprocess, glob import os.path as osp import pathlib import shutil import configparser import sysconfig from setuptools import setup, Extension from setuptools.dist import Distribution from typing import List USE_PY_LIMITED_API = ( # require opt-in (builds are specialized by default) os.getenv('NETCDF4_LIMITED_API', '0') == '1' # Cython + numpy + limited API de facto requires Python >=3.11 and sys.version_info >= (3, 11) # as of Python 3.14t, free-threaded builds don't support the limited API and not sysconfig.get_config_var("Py_GIL_DISABLED") ) ABI3_TARGET_VERSION = "".join(str(_) for _ in sys.version_info[:2]) ABI3_TARGET_HEX = hex(sys.hexversion & 0xFFFF00F0) if USE_PY_LIMITED_API: SETUP_OPTIONS = {"bdist_wheel": {"py_limited_api": f"cp{ABI3_TARGET_VERSION}"}} else: SETUP_OPTIONS = {} open_kwargs = {'encoding': 'utf-8'} def check_hdf5version(hdf5_includedir): try: f = open(os.path.join(hdf5_includedir, 'H5public.h'), **open_kwargs) except OSError: return None hdf5_version = None for line in f: if line.startswith('#define H5_VERS_INFO'): hdf5_version = line.split('"')[1] return hdf5_version def get_hdf5_version(direc): # check to see if hdf5 headers in direc, return version number or None hdf5_version = None print(f"checking {direc}...") hdf5_version = check_hdf5version(direc) if hdf5_version is None: print(f'hdf5 headers not found in {direc}') return None else: print(f'{hdf5_version} headers found in {direc}') return hdf5_version def check_ifnetcdf4(netcdf4_includedir): try: f = open(os.path.join(netcdf4_includedir, 'netcdf.h'), **open_kwargs) except OSError: return False isnetcdf4 = False for line in f: if line.startswith('nc_inq_compound'): isnetcdf4 = True return isnetcdf4 def check_has_parallel_support(inc_dirs: list) -> bool: has_parallel_support = False for d in inc_dirs: ncmetapath = os.path.join(d,'netcdf_meta.h') if not os.path.exists(ncmetapath): continue with open(ncmetapath) as f: for line in f: if line.startswith('#define NC_HAS_PARALLEL'): try: has_parallel_support = bool(int(line.split()[2])) except ValueError: pass return has_parallel_support def getnetcdfvers(libdirs): """ Get the version string for the first netcdf lib found in libdirs. (major.minor.release). If nothing found, return None. """ import os, re, sys, ctypes if sys.platform.startswith('win'): regexp = re.compile('^netcdf.dll$') elif sys.platform.startswith('cygwin'): bindirs = [] for d in libdirs: bindirs.append(os.path.dirname(d) + '/bin') regexp = re.compile(r'^cygnetcdf-\d.dll') elif sys.platform.startswith('darwin'): regexp = re.compile(r'^libnetcdf.dylib') else: regexp = re.compile(r'^libnetcdf.so') if sys.platform.startswith('cygwin'): dirs = bindirs else: dirs = libdirs for d in dirs: try: candidates = [x for x in os.listdir(d) if regexp.match(x)] if len(candidates) != 0: candidates.sort( key=lambda x: len(x)) # Prefer libfoo.so to libfoo.so.X.Y.Z path = os.path.abspath(os.path.join(d, candidates[0])) lib = ctypes.cdll.LoadLibrary(path) inq_libvers = lib.nc_inq_libvers inq_libvers.restype = ctypes.c_char_p vers = lib.nc_inq_libvers() return vers.split()[0] except Exception: pass # We skip invalid entries, because that's what the C compiler does return None def extract_version(CYTHON_FNAME): version = None with open(CYTHON_FNAME) as fi: for line in fi: if (line.startswith('__version__')): _, version = line.split('=') version = version.strip()[1:-1] # Remove quotation characters. break return version HDF5_dir = os.environ.get('HDF5_DIR') HDF5_incdir = os.environ.get('HDF5_INCDIR') HDF5_libdir = os.environ.get('HDF5_LIBDIR') netCDF4_dir = os.environ.get('NETCDF4_DIR') netCDF4_incdir = os.environ.get('NETCDF4_INCDIR') netCDF4_libdir = os.environ.get('NETCDF4_LIBDIR') szip_dir = os.environ.get('SZIP_DIR') szip_libdir = os.environ.get('SZIP_LIBDIR') szip_incdir = os.environ.get('SZIP_INCDIR') hdf4_dir = os.environ.get('HDF4_DIR') hdf4_libdir = os.environ.get('HDF4_LIBDIR') hdf4_incdir = os.environ.get('HDF4_INCDIR') jpeg_dir = os.environ.get('JPEG_DIR') jpeg_libdir = os.environ.get('JPEG_LIBDIR') jpeg_incdir = os.environ.get('JPEG_INCDIR') curl_dir = os.environ.get('CURL_DIR') curl_libdir = os.environ.get('CURL_LIBDIR') curl_incdir = os.environ.get('CURL_INCDIR') mpi_incdir = os.environ.get('MPI_INCDIR') USE_NCCONFIG = bool(int(os.environ.get('USE_NCCONFIG', 0))) # override use of setup.cfg with env var. USE_SETUPCFG = bool(int(os.environ.get('USE_SETUPCFG', 1))) setup_cfg = 'setup.cfg' # contents of setup.cfg will override env vars, unless # USE_SETUPCFG evaluates to False. ncconfig = None use_ncconfig = None if USE_SETUPCFG and os.path.exists(setup_cfg): print('reading from setup.cfg...') config = configparser.ConfigParser() config.read(setup_cfg) HDF5_dir = config.get("directories", "HDF5_dir", fallback=HDF5_dir) HDF5_libdir = config.get("directories", "HDF5_libdir", fallback=HDF5_libdir) HDF5_incdir = config.get("directories", "HDF5_incdir", fallback=HDF5_incdir) netCDF4_dir = config.get("directories", "netCDF4_dir", fallback=netCDF4_dir) netCDF4_libdir = config.get("directories", "netCDF4_libdir", fallback=netCDF4_libdir) netCDF4_incdir = config.get("directories", "netCDF4_incdir", fallback=netCDF4_incdir) szip_dir = config.get("directories", "szip_dir", fallback=szip_dir) szip_libdir = config.get("directories", "szip_libdir", fallback=szip_libdir) szip_incdir = config.get("directories", "szip_incdir", fallback=szip_incdir) hdf4_dir = config.get("directories", "hdf4_dir", fallback=hdf4_dir) hdf4_libdir = config.get("directories", "hdf4_libdir", fallback=hdf4_libdir) hdf4_incdir = config.get("directories", "hdf4_incdir", fallback=hdf4_incdir) jpeg_dir = config.get("directories", "jpeg_dir", fallback=jpeg_dir) jpeg_libdir = config.get("directories", "jpeg_libdir", fallback=jpeg_libdir) jpeg_incdir = config.get("directories", "jpeg_incdir", fallback=jpeg_incdir) curl_dir = config.get("directories", "curl_dir", fallback=curl_dir) curl_libdir = config.get("directories", "curl_libdir", fallback=curl_libdir) curl_incdir = config.get("directories", "curl_incdir", fallback=curl_incdir) mpi_incdir = config.get("directories","mpi_incdir", fallback=mpi_incdir) use_ncconfig = config.getboolean("options", "use_ncconfig", fallback=use_ncconfig) ncconfig = config.get("options", "ncconfig", fallback=ncconfig) try: if ncconfig is None: if netCDF4_dir is not None: ncconfig = os.path.join(netCDF4_dir, 'bin/nc-config') else: # otherwise, just hope it's in the users PATH. ncconfig = 'nc-config' HAS_NCCONFIG = subprocess.call([ncconfig, '--libs']) == 0 except OSError: HAS_NCCONFIG = False # make sure USE_NCCONFIG from environment takes # precedence over use_ncconfig from setup.cfg (issue #341). if use_ncconfig and not USE_NCCONFIG: USE_NCCONFIG = use_ncconfig elif not USE_NCCONFIG: # if nc-config exists, and USE_NCCONFIG not set, try to use it. USE_NCCONFIG = HAS_NCCONFIG try: HAS_PKG_CONFIG = subprocess.call(['pkg-config', '--libs', 'hdf5']) == 0 except OSError: HAS_PKG_CONFIG = False def config_flags(command: List[str], flag: str) -> list: """Pull out specific flags from a config command (pkg-config or nc-config)""" flags = subprocess.run(command, capture_output=True, text=True) return [arg[2:] for arg in flags.stdout.split() if arg.startswith(flag)] def _populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs): global HDF5_incdir, HDF5_dir, HDF5_libdir nohdf5dirs = HDF5_incdir is None and HDF5_libdir is None and HDF5_dir is None if HAS_PKG_CONFIG and nohdf5dirs: # if HDF5 dirs not specified, and pkg-config available, use it inc_dirs.extend(config_flags(["pkg-config", "--cflags", "hdf5"], "-I")) libs.extend(config_flags(["pkg-config", "--libs", "hdf5"], "-l")) lib_dirs.extend(config_flags(["pkg-config", "--libs", "hdf5"], "-L")) else: if HDF5_incdir is None and HDF5_dir is None: print(" HDF5_DIR environment variable not set, checking some standard locations ..") for direc in dirstosearch: hdf5_version = get_hdf5_version(os.path.join(direc, 'include')) if hdf5_version is None: continue else: HDF5_dir = direc HDF5_incdir = os.path.join(direc, 'include') print(f'{hdf5_version} found in {HDF5_dir}') break if HDF5_dir is None: raise ValueError('did not find HDF5 headers') else: if HDF5_incdir is None: HDF5_incdir = os.path.join(HDF5_dir, 'include') hdf5_version = get_hdf5_version(HDF5_incdir) if hdf5_version is None: raise ValueError(f'did not find HDF5 headers in {HDF5_incdir}') print(f'{hdf5_version} found in {HDF5_dir}') if HDF5_libdir is None and HDF5_dir is not None: HDF5_libdir = os.path.join(HDF5_dir, 'lib') if HDF5_libdir is not None: lib_dirs.append(HDF5_libdir) if HDF5_incdir is not None: inc_dirs.append(HDF5_incdir) libs.extend(['hdf5_hl', 'hdf5']) dirstosearch = [] if os.environ.get("CONDA_PREFIX"): dirstosearch.append(os.environ["CONDA_PREFIX"]) # linux,macosx dirstosearch.append(os.path.join(os.environ["CONDA_PREFIX"],'Library')) # windows dirstosearch += [os.path.expanduser('~'), '/usr/local', '/sw', '/opt', '/opt/local', '/opt/homebrew', '/usr'] # try nc-config first if USE_NCCONFIG and HAS_NCCONFIG and ncconfig is not None: print(f'using {ncconfig}...') libs = config_flags([ncconfig, "--libs"], "-l") lib_dirs = config_flags([ncconfig, "--libs"], "-L") inc_dirs = config_flags([ncconfig, '--cflags'], "-I") # check to see if hdf5 found in directories returned by nc-config hdf5_version = None for direc in inc_dirs: hdf5_version = get_hdf5_version(direc) if hdf5_version is not None: if sys.platform == "cygwin": _populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs) break # if hdf5 not found, search other standard locations (including those specified in env vars). if hdf5_version is None: print('nc-config did provide path to HDF5 headers, search standard locations...') _populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs) # If nc-config doesn't work, fall back on brute force method. else: lib_dirs = [] inc_dirs = [] libs = [] # _populate_hdf5_info will use HDF5_dir, HDF5_libdir and HDF5_incdir if they are set. # otherwise pkg-config will be tried, and if that fails, dirstosearch will be searched. _populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs) if netCDF4_incdir is None and netCDF4_dir is None: print("NETCDF4_DIR environment variable not set, checking standard locations..") for direc in dirstosearch: print(f'checking {direc}...') isnetcdf4 = check_ifnetcdf4(os.path.join(direc, 'include')) if not isnetcdf4: continue else: netCDF4_dir = direc netCDF4_incdir = os.path.join(direc, 'include') print(f'netCDF4 found in {netCDF4_dir}') break if netCDF4_dir is None: raise ValueError('did not find netCDF version 4 headers') else: if netCDF4_incdir is None: netCDF4_incdir = os.path.join(netCDF4_dir, 'include') isnetcdf4 = check_ifnetcdf4(netCDF4_incdir) if not isnetcdf4: raise ValueError( 'did not find netCDF version 4 headers %s' % netCDF4_incdir) if netCDF4_libdir is None and netCDF4_dir is not None: netCDF4_libdir = os.path.join(netCDF4_dir, 'lib') if sys.platform == 'win32': libs.extend(['netcdf', 'zlib']) else: libs.extend(['netcdf', 'z']) if netCDF4_libdir is not None: lib_dirs.append(netCDF4_libdir) if netCDF4_incdir is not None: inc_dirs.append(netCDF4_incdir) # add szip to link if desired. if szip_libdir is None and szip_dir is not None: szip_libdir = os.path.join(szip_dir, 'lib') if szip_incdir is None and szip_dir is not None: szip_incdir = os.path.join(szip_dir, 'include') if szip_incdir is not None and szip_libdir is not None: if sys.platform == 'win32': libs.append('szip') else: libs.append('sz') lib_dirs.append(szip_libdir) inc_dirs.append(szip_incdir) # add hdf4 to link if desired. if hdf4_libdir is None and hdf4_dir is not None: hdf4_libdir = os.path.join(hdf4_dir, 'lib') if hdf4_incdir is None and hdf4_dir is not None: hdf4_incdir = os.path.join(hdf4_dir, 'include') if hdf4_incdir is not None and hdf4_libdir is not None: libs.append('mfhdf') libs.append('df') lib_dirs.append(hdf4_libdir) inc_dirs.append(hdf4_incdir) # add jpeg to link if desired. if jpeg_libdir is None and jpeg_dir is not None: jpeg_libdir = os.path.join(jpeg_dir, 'lib') if jpeg_incdir is None and jpeg_dir is not None: jpeg_incdir = os.path.join(jpeg_dir, 'include') if jpeg_incdir is not None and jpeg_libdir is not None: libs.append('jpeg') lib_dirs.append(jpeg_libdir) inc_dirs.append(jpeg_incdir) # add curl to link if desired. if curl_libdir is None and curl_dir is not None: curl_libdir = os.path.join(curl_dir, 'lib') if curl_incdir is None and curl_dir is not None: curl_incdir = os.path.join(curl_dir, 'include') if curl_incdir is not None and curl_libdir is not None: libs.append('curl') lib_dirs.append(curl_libdir) inc_dirs.append(curl_incdir) if sys.platform == 'win32' or sys.platform == 'cygwin': runtime_lib_dirs = [] else: runtime_lib_dirs = lib_dirs # Do not require numpy for just querying the package # Taken from the h5py setup file. if any('--' + opt in sys.argv for opt in Distribution.display_option_names + ['help-commands', 'help']) or sys.argv[1] == 'egg_info': pass else: # append numpy include dir. import numpy inc_dirs.append(numpy.get_include()) # get netcdf library version. netcdf_lib_version = getnetcdfvers(lib_dirs) if netcdf_lib_version is None: print('unable to detect netcdf library version') else: netcdf_lib_version = str(netcdf_lib_version) print(f'using netcdf library version {netcdf_lib_version}') DEFINE_MACROS = [("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")] netcdf4_src_root = osp.join(osp.join('src','netCDF4'), '_netCDF4') netcdf4_src_c = netcdf4_src_root + '.c' netcdf4_src_pyx = netcdf4_src_root + '.pyx' if 'sdist' not in sys.argv[1:] and 'clean' not in sys.argv[1:] and '--version' not in sys.argv[1:]: print('using Cython to compile netCDF4.pyx...') # remove _netCDF4.c file if it exists, so cython will recompile _netCDF4.pyx. # run for build *and* install (issue #263). Otherwise 'pip install' will # not regenerate _netCDF4.c, even if the C lib supports the new features. if len(sys.argv) >= 2: if os.path.exists(netcdf4_src_c): os.remove(netcdf4_src_c) # for netcdf 4.4.x CDF5 format is always enabled. if netcdf_lib_version is not None and\ (netcdf_lib_version > "4.4" and netcdf_lib_version < "4.5"): has_cdf5_format = True has_parallel_support = check_has_parallel_support(inc_dirs) has_has_not = "has" if has_parallel_support else "does not have" print(f"netcdf lib {has_has_not} parallel functions") if has_parallel_support: try: import mpi4py except ImportError: msg = "Parallel support requires mpi4py but it is not installed." raise ImportError(msg) inc_dirs.append(mpi4py.get_include()) # mpi_incdir should not be needed if using nc-config # (should be included in nc-config --cflags) if mpi_incdir is not None: inc_dirs.append(mpi_incdir) # Name of file containing imports required for parallel support parallel_support_imports = "parallel_support_imports.pxi.in" else: parallel_support_imports = "no_parallel_support_imports.pxi.in" # Copy the specific version of the file containing parallel # support imports shutil.copyfile( osp.join("include", parallel_support_imports), osp.join("include", "parallel_support_imports.pxi") ) nc_complex_dir = pathlib.Path("./external/nc_complex") source_files = [ netcdf4_src_pyx, str(nc_complex_dir / "src/nc_complex.c"), ] include_dirs = inc_dirs + [ "include", str(nc_complex_dir / "include"), str(nc_complex_dir / "include/generated_fallbacks"), ] DEFINE_MACROS += [("NC_COMPLEX_NO_EXPORT", "1")] if USE_PY_LIMITED_API: DEFINE_MACROS.append(("Py_LIMITED_API", ABI3_TARGET_HEX)) ext_modules = [Extension("netCDF4._netCDF4", source_files, define_macros=DEFINE_MACROS, libraries=libs, library_dirs=lib_dirs, include_dirs=include_dirs, runtime_library_dirs=runtime_lib_dirs, py_limited_api=USE_PY_LIMITED_API)] # set language_level directive to 3 for e in ext_modules: e.cython_directives = {'language_level': "3"} # else: ext_modules = None # if NETCDF_PLUGIN_DIR set, install netcdf-c compression plugins inside package # (should point to location of lib__nc* files built by netcdf-c) copied_plugins=False if os.environ.get("NETCDF_PLUGIN_DIR"): plugin_dir = os.environ.get("NETCDF_PLUGIN_DIR") plugins = glob.glob(os.path.join(plugin_dir, "*__nc*")) if not plugins: print('no plugin files in %s, not installing...' % plugin_dir) if not os.path.exists(plugin_dir): print('directory %s does not exist!' % plugin_dir) data_files = [] else: data_files = plugins print(f'installing netcdf compression plugins from {plugin_dir} ...') sofiles = [os.path.basename(sofilepath) for sofilepath in data_files] print(repr(sofiles)) if 'sdist' not in sys.argv[1:] and 'clean' not in sys.argv[1:] and '--version' not in sys.argv[1:]: for f in data_files: shutil.copy(f, osp.join(os.getcwd(),osp.join(osp.join('src','netCDF4'),'plugins'))) copied_plugins=True else: print('NETCDF_PLUGIN_DIR not set, no netcdf compression plugins installed') data_files = [] # See pyproject.toml for project metadata setup( name="netCDF4", # need by GitHub dependency graph version=extract_version(netcdf4_src_pyx), ext_modules=ext_modules, options=SETUP_OPTIONS, ) # remove plugin files copied from outside source tree if copied_plugins: for f in sofiles: filepath = osp.join(osp.join(osp.join('src','netCDF4'),'plugins'),f) if os.path.exists(filepath): os.remove(filepath) netcdf4-python-1.7.4rel/src/000077500000000000000000000000001512661643000156675ustar00rootroot00000000000000netcdf4-python-1.7.4rel/src/netCDF4/000077500000000000000000000000001512661643000170565ustar00rootroot00000000000000netcdf4-python-1.7.4rel/src/netCDF4/__init__.py000066400000000000000000000031511512661643000211670ustar00rootroot00000000000000# init for netCDF4. package # if HDF5_PLUGIN_PATH not set, point to package path if plugins live there import os pluginpath = os.path.join(__path__[0],'plugins') if 'HDF5_PLUGIN_PATH' not in os.environ and\ (os.path.exists(os.path.join(pluginpath,'lib__nczhdf5filters.so')) or\ os.path.exists(os.path.join(pluginpath,'__nczhdf5filters.dll')) or\ os.path.exists(os.path.join(pluginpath,'lib__nczhdf5filters.dylib'))): os.environ['HDF5_PLUGIN_PATH']=pluginpath # Docstring comes from extension module _netCDF4. from ._netCDF4 import * # Need explicit imports for names beginning with underscores from ._netCDF4 import __doc__ from ._netCDF4 import (__version__, __netcdf4libversion__, __hdf5libversion__, __has_rename_grp__, __has_nc_inq_path__, __has_nc_inq_format_extended__, __has_nc_open_mem__, __has_nc_create_mem__, __has_cdf5_format__, __has_parallel4_support__, __has_pnetcdf_support__, __has_quantization_support__, __has_zstandard_support__, __has_bzip2_support__, __has_blosc_support__, __has_szip_support__, __has_set_alignment__, __has_parallel_support__, __has_ncfilter__, __has_nc_rc_set__) __all__ = [ 'Dataset', 'Variable', 'Dimension', 'Group', 'MFDataset', 'MFTime', 'CompoundType', 'VLType', 'date2num', 'num2date', 'date2index', 'stringtochar', 'chartostring', 'stringtoarr', 'getlibversion', 'EnumType', 'get_chunk_cache', 'set_chunk_cache', 'set_alignment', 'get_alignment', 'rc_get', 'rc_set', ] __pdoc__ = {'utils': False} netcdf4-python-1.7.4rel/src/netCDF4/__init__.pyi000066400000000000000000000674771512661643000213650ustar00rootroot00000000000000"""__init__.pyi - Type stubs for the netCDF4 Python package""" # Notes: # # - The stubs in this file are manually-generated and must be updated if and when the API is changed. # - The following **ruff** commands may be used to format this file according to # https://typing.readthedocs.io/en/latest/source/stubs.html # # ruff format --line-length 130 src/netCDF4/__init__.pyi # format code # ruff check --line-length 130 --select I --fix src/netCDF4/__init__.pyi # sort imports # # - The Variable class is a generic and may thus be statically typed, but this has limited utility for the following reasons: # - The return type of `Variable.__getitem__()` (and `Variable.getValue()`) depends on a number of factors (e.g. variable # shape, key shape, whether masking is enabled) that cannot be easily determined statically. # - Similarly, the types and shapes of data that `Variable.__setitem__()` may accept varies widely depending on many factors # and is intractable to determine statically. # - Some facility for automatically typing a Variable on creation has been provided, however it is not exhaustive as a variable # may created with a string literal indicating its type and it would require an excessive number of overloads to enumerate # each of these cases. # - It is not possible to statically type a Variable of any user-defined type (CompoundType, EnumType, VLType) as these types # are created dynamically. # Thus it is most often best for the user to implement TypeGuards and/or perform other mixed static/runtime type-checking to # ensure the type and shape of data retrieved from this library. # - The return type of some functions or properties (such as `Dataset.__getitem__()`) may one of a number of types. Where it is # not possible to narrow the type with overloads, the authors of these stubs have generally chosen to use `Any` as the return # type rather than a union of the possible types. # - `MFDataset.dimensions` returns `dict[str, Dimension]` and `MFDataset.variables` returns `dict[str, Variable]` even though the # dict value types may actually be `_Dimension` and `_Variable`, respectively. The original authors of this stubfile have # elected to do this for simplicity's sake, but it may make sense to change this in the future, or just return `dict[str, Any]`. import datetime as dt import os import sys from typing import ( TYPE_CHECKING, Any, Callable, final, Final, Generic, Iterable, Iterator, Literal, Mapping, NoReturn, Sequence, TypedDict, TypeVar, Union, overload, ) import cftime import numpy as np import numpy.typing as npt from typing_extensions import Buffer, Self, TypeAlias, disjoint_base __all__ = [ "Dataset", "Variable", "Dimension", "Group", "MFDataset", "MFTime", "CompoundType", "VLType", "date2num", "num2date", "rc_get", "rc_set", "date2index", "stringtochar", "chartostring", "stringtoarr", "getlibversion", "EnumType", "get_chunk_cache", "set_chunk_cache", "set_alignment", "get_alignment", ] __pdoc__ = {"utils": False} # string type specifiers # fmt: off RealTypeLiteral: TypeAlias = Literal[ "i1", "b", "B", "int8", # NC_BYTE "u1", "uint8", # NC_UBYTE "i2", "h", "s", "int16", # NC_SHORT "u2", "uint16", # NC_USHORT "i4", "i", "l", "int32", # NC_INT "u4", "uint32", # NC_UINT "i8", "int64", "int", # NC_INT64 "u8", "uint64", # NC_UINT64 "f4", "f", "float32", # NC_FLOAT "f8", "d", "float64", "float" # NC_DOUBLE ] # fmt: on ComplexTypeLiteral: TypeAlias = Literal["c8", "c16", "complex64", "complex128"] NumericTypeLiteral: TypeAlias = RealTypeLiteral | ComplexTypeLiteral CharTypeLiteral: TypeAlias = Literal["S1", "c"] # NC_CHAR TypeLiteral: TypeAlias = NumericTypeLiteral | CharTypeLiteral # Numpy types NumPyRealType: TypeAlias = ( np.int8 | np.uint8 | np.int16 | np.uint16 | np.int32 | np.uint32 | np.int64 | np.uint64 | np.float16 | np.float32 | np.float64 ) NumPyComplexType: TypeAlias = np.complex64 | np.complex128 NumPyNumericType: TypeAlias = NumPyRealType | NumPyComplexType # Classes that can create instances of NetCDF user-defined types NetCDFUDTClass: TypeAlias = CompoundType | VLType | EnumType # Possible argument types for the datatype argument used in Variable creation. At this time, it is not possible to allow unknown # strings arguments in the datatype field but exclude and string literals that are not one of `TypeLiteral`, so really # `TypeLiteral` is made irrelevant, except for anyone who looks at this file. DatatypeType: TypeAlias = ( TypeLiteral | str | np.dtype[NumPyNumericType | np.str_] | type[int | float | NumPyNumericType | str | np.str_] | NetCDFUDTClass ) VarT = TypeVar("VarT") RealVarT = TypeVar("RealVarT", bound=NumPyRealType) ComplexVarT = TypeVar("ComplexVarT", bound=NumPyComplexType) NumericVarT = TypeVar("NumericVarT", bound=NumPyNumericType) DimensionsType: TypeAlias = Union[str, bytes, Dimension, Iterable[Union[str, bytes, Dimension]]] CompressionType: TypeAlias = Literal[ "zlib", "szip", "zstd", "bzip2", "blosc_lz", "blosc_lz4", "blosc_lz4hc", "blosc_zlib", "blosc_zstd" ] CompressionLevel: TypeAlias = Literal[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] AccessMode: TypeAlias = Literal["r", "w", "r+", "a", "x", "rs", "ws", "r+s", "as"] Format: TypeAlias = Literal["NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_CLASSIC", "NETCDF3_64BIT_OFFSET", "NETCDF3_64BIT_DATA"] DiskFormat: TypeAlias = Literal["NETCDF3", "HDF5", "HDF4", "PNETCDF", "DAP2", "DAP4", "UNDEFINED"] QuantizeMode: TypeAlias = Literal["BitGroom", "BitRound", "GranularBitRound"] EndianType: TypeAlias = Literal["native", "little", "big"] CalendarType: TypeAlias = Literal[ "standard", "gregorian", "proleptic_gregorian", "noleap", "365_day", "360_day", "julian", "all_leap", "366_day" ] BoolInt: TypeAlias = Literal[0, 1] DateTimeArray: TypeAlias = npt.NDArray[np.object_] """numpy array of datetime.datetime or cftime.datetime""" class BloscInfo(TypedDict): compressor: Literal["blosc_lz", "blosc_lz4", "blosc_lz4hc", "blosc_zlib", "blosc_zstd"] shuffle: Literal[0, 1, 2] class SzipInfo(TypedDict): coding: Literal["nn", "ec"] pixels_per_block: Literal[4, 8, 16, 32] class FiltersDict(TypedDict): """Dict returned from netCDF4.Variable.filters()""" zlib: bool szip: Literal[False] | SzipInfo zstd: bool bzip2: bool blosc: Literal[False] | BloscInfo shuffle: bool complevel: int fletcher32: bool __version__: str __netcdf4libversion__: str __hdf5libversion__: str __has_rename_grp__: BoolInt __has_nc_inq_path__: BoolInt __has_nc_inq_format_extended__: BoolInt __has_nc_open_mem__: BoolInt __has_nc_create_mem__: BoolInt __has_cdf5_format__: BoolInt __has_parallel4_support__: BoolInt __has_pnetcdf_support__: BoolInt __has_parallel_support__: BoolInt __has_quantization_support__: BoolInt __has_zstandard_support__: BoolInt __has_bzip2_support__: BoolInt __has_blosc_support__: BoolInt __has_szip_support__: BoolInt __has_set_alignment__: BoolInt __has_ncfilter__: BoolInt __has_nc_rc_set__: BoolInt is_native_little: bool is_native_big: bool default_encoding: Final = "utf-8" unicode_error: Final = "replace" default_fillvals: dict[str, int | float | str] # date2index, date2num, and num2date are actually provided by cftime and if stubs for # cftime are completed these should be removed. def date2index( dates: dt.datetime | cftime.datetime | Sequence[dt.datetime | cftime.datetime] | DateTimeArray, nctime: Variable, calendar: CalendarType | str | None = None, select: Literal["exact", "before", "after", "nearest"] = "exact", has_year_zero: bool | None = None, ) -> int | npt.NDArray[np.int_]: ... def date2num( dates: dt.datetime | cftime.datetime | Sequence[dt.datetime | cftime.datetime] | DateTimeArray, units: str, calendar: CalendarType | str | None = None, has_year_zero: bool | None = None, longdouble: bool = False, ) -> np.number | npt.NDArray[np.number]: ... def num2date( times: Sequence[int | float | np.number] | npt.NDArray[np.number], units: str, calendar: CalendarType | str = "standard", only_use_cftime_datetimes: bool = True, only_use_python_datetimes: bool = False, has_year_zero: bool | None = None, ) -> dt.datetime | DateTimeArray: ... class NetCDF4MissingFeatureException(Exception): def __init__(self, feature: str, version: str): ... def dtype_is_complex(dtype: str) -> bool: ... @disjoint_base class Dataset: def __init__( self, filename: str | os.PathLike, mode: AccessMode = "r", clobber: bool = True, format: Format = "NETCDF4", diskless: bool = False, persist: bool = False, keepweakref: bool = False, memory: Buffer | int | None = None, encoding: str | None = None, parallel: bool = False, comm: Any = None, info: Any = None, auto_complex: bool = False, **kwargs: Any, ): ... @property def name(self) -> str: ... @property def groups(self) -> dict[str, Group]: ... @property def dimensions(self) -> dict[str, Dimension]: ... @property def variables(self) -> dict[str, Variable[Any]]: ... @property def cmptypes(self) -> dict[str, CompoundType]: ... @property def vltypes(self) -> dict[str, VLType]: ... @property def enumtypes(self) -> dict[str, EnumType]: ... @property def data_model(self) -> Format: ... @property def file_format(self) -> Format: ... @property def disk_format(self) -> DiskFormat: ... @property def parent(self) -> Dataset | None: ... @property def path(self) -> str: ... @property def keepweakref(self) -> bool: ... @property def auto_complex(self) -> bool: ... @property def _ncstring_attrs__(self) -> bool: ... @property def __orthogonal_indexing__(self) -> bool: ... def filepath(self, encoding: str | None = None) -> str: ... def isopen(self) -> bool: ... def close(self) -> memoryview: ... # only if writing and memory != None, but otherwise people ignore the return None anyway def sync(self) -> None: ... def set_fill_on(self) -> None: ... def set_fill_off(self) -> None: ... def createDimension(self, dimname: str, size: int | None = None) -> Dimension: ... def renameDimension(self, oldname: str, newname: str) -> None: ... @overload def createVariable( self, varname: str, datatype: np.dtype[NumericVarT] | type[NumericVarT], dimensions: DimensionsType = (), compression: CompressionType | None = None, zlib: bool = False, complevel: CompressionLevel | None = 4, shuffle: bool = True, szip_coding: Literal["nn", "ec"] = "nn", szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, blosc_shuffle: Literal[0, 1, 2] = 1, fletcher32: bool = False, contiguous: bool = False, chunksizes: Sequence[int] | None = None, endian: EndianType = "native", least_significant_digit: int | None = None, significant_digits: int | None = None, quantize_mode: QuantizeMode = "BitGroom", fill_value: int | float | np.generic | str | bytes | Literal[False] | np.ndarray | None = None, chunk_cache: int | None = None, ) -> Variable[NumericVarT]: ... @overload def createVariable( self, varname: str, datatype: np.dtype[np.str_] | type[str | np.str_], dimensions: DimensionsType = (), compression: CompressionType | None = None, zlib: bool = False, complevel: CompressionLevel | None = 4, shuffle: bool = True, szip_coding: Literal["nn", "ec"] = "nn", szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, blosc_shuffle: Literal[0, 1, 2] = 1, fletcher32: bool = False, contiguous: bool = False, chunksizes: Sequence[int] | None = None, endian: EndianType = "native", least_significant_digit: int | None = None, significant_digits: int | None = None, quantize_mode: QuantizeMode = "BitGroom", fill_value: int | float | np.generic | str | bytes | Literal[False] | np.ndarray | None = None, chunk_cache: int | None = None, ) -> Variable[str]: ... @overload def createVariable( self, varname: str, datatype: DatatypeType, dimensions: DimensionsType = (), compression: CompressionType | None = None, zlib: bool = False, complevel: CompressionLevel | None = 4, shuffle: bool = True, szip_coding: Literal["nn", "ec"] = "nn", szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, blosc_shuffle: Literal[0, 1, 2] = 1, fletcher32: bool = False, contiguous: bool = False, chunksizes: Sequence[int] | None = None, endian: EndianType = "native", least_significant_digit: int | None = None, significant_digits: int | None = None, quantize_mode: QuantizeMode = "BitGroom", fill_value: int | float | np.generic | str | bytes | Literal[False] | np.ndarray | None = None, chunk_cache: int | None = None, ) -> Variable: ... def renameVariable(self, oldname: str, newname: str) -> None: ... def createGroup(self, groupname: str) -> Group: ... def renameGroup(self, oldname: str, newname: str) -> None: ... def renameAttribute(self, oldname: str, newname: str) -> None: ... def createCompoundType( self, datatype: npt.DTypeLike | Sequence[tuple[str, npt.DTypeLike]], datatype_name: str ) -> CompoundType: ... def createVLType(self, datatype: npt.DTypeLike, datatype_name: str) -> VLType: ... def createEnumType( self, datatype: np.dtype[np.integer] | type[np.integer] | type[int] | str, datatype_name: str, enum_dict: Mapping[str, int | np.integer], ) -> EnumType: ... def ncattrs(self) -> list[str]: ... def setncattr_string(self, name: str, value: Any) -> None: ... def setncattr(self, name: str, value: Any) -> None: ... def setncatts(self, attdict: Mapping[str, Any]) -> None: ... def getncattr(self, name: str, encoding: str = "utf-8") -> Any: ... def delncattr(self, name: str) -> None: ... def set_auto_chartostring(self, value: bool) -> None: ... def set_auto_maskandscale(self, value: bool) -> None: ... def set_auto_mask(self, value: bool) -> None: ... def set_auto_scale(self, value: bool) -> None: ... def set_always_mask(self, value: bool) -> None: ... def set_ncstring_attrs(self, value: bool) -> None: ... def get_variables_by_attributes(self, **kwargs: Callable[[Any], bool] | Any) -> list[Variable]: ... @staticmethod def fromcdl( cdlfilename: str | os.PathLike, ncfilename: str | os.PathLike | None = None, mode: AccessMode = "a", format: Format = "NETCDF4" ) -> Dataset: ... @overload def tocdl(self, coordvars: bool = False, data: bool = False, outfile: None = None) -> str: ... @overload def tocdl(self, coordvars: bool = False, data: bool = False, *, outfile: str | os.PathLike) -> None: ... def has_blosc_filter(self) -> bool: ... def has_zstd_filter(self) -> bool: ... def has_bzip2_filter(self) -> bool: ... def has_szip_filter(self) -> bool: ... def __getitem__(self, elem: str) -> Any: ... # should be Group | Variable, but this causes too many problems # __iter__ and __contains__ always error because iteration and membership ops are not allowed def __iter__(self) -> NoReturn: ... def __contains__(self, key) -> NoReturn: ... def __setattr__(self, name: str, value: Any) -> None: ... def __getattr__(self, name: str) -> Any: ... def __delattr__(self, name: str): ... def __reduce__(self) -> NoReturn: ... def __enter__(self) -> Self: ... def __exit__(self, atype, value, traceback) -> None: ... class Group(Dataset): def __init__(self, parent: Dataset, name: str, **kwargs: Any) -> None: ... def close(self) -> NoReturn: ... @final class Dimension: def __init__(self, grp: Dataset, name: str, size: int | None = None, **kwargs: Any) -> None: ... @property def name(self) -> str: ... @property def size(self) -> int: ... def group(self) -> Dataset: ... def isunlimited(self) -> bool: ... def __len__(self) -> int: ... class _VarDatatypeProperty: # A faux descriptor definition of the property to allow overloads @overload def __get__(self, instance: Variable[RealVarT], owner: Any) -> np.dtype[RealVarT]: ... @overload def __get__(self, instance: Variable[ComplexVarT], owner: Any) -> CompoundType: ... @overload def __get__(self, instance: Variable[str], owner: Any) -> VLType: ... @overload def __get__( self, instance: Variable, owner: Any ) -> Any: ... # actual return type np.dtype | CompoundType | VLType | EnumType class _VarDtypeProperty: # A faux descriptor definition of the property to allow overloads @overload def __get__(self, instance: Variable[NumericVarT], owner: Any) -> np.dtype[NumericVarT]: ... @overload def __get__(self, instance: Variable[str], owner: Any) -> type[str]: ... @overload def __get__(self, instance: Variable, owner: Any) -> Any: ... # actual return type np.dtype | Type[str] @final class Variable(Generic[VarT]): # Overloads of __new__ are provided for some cases where the Variable's type may be statically inferred from the datatype arg @overload def __new__( cls, grp: Dataset, name: str, datatype: np.dtype[NumericVarT] | type[NumericVarT], dimensions: DimensionsType = (), compression: CompressionType | None = None, zlib: bool = False, complevel: CompressionLevel | None = 4, shuffle: bool = True, szip_coding: Literal["nn", "ec"] = "nn", szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, blosc_shuffle: Literal[0, 1, 2] = 1, fletcher32: bool = False, contiguous: bool = False, chunksizes: Sequence[int] | None = None, endian: EndianType = "native", least_significant_digit: int | None = None, significant_digits: int | None = None, quantize_mode: QuantizeMode = "BitGroom", fill_value: int | float | np.generic | str | bytes | Literal[False] | np.ndarray | None = None, chunk_cache: int | None = None, **kwargs: Any, ) -> Variable[NumericVarT]: ... @overload def __new__( cls, grp: Dataset, name: str, datatype: np.dtype[np.str_] | type[str | np.str_], dimensions: DimensionsType = (), compression: CompressionType | None = None, zlib: bool = False, complevel: CompressionLevel | None = 4, shuffle: bool = True, szip_coding: Literal["nn", "ec"] = "nn", szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, blosc_shuffle: Literal[0, 1, 2] = 1, fletcher32: bool = False, contiguous: bool = False, chunksizes: Sequence[int] | None = None, endian: EndianType = "native", least_significant_digit: int | None = None, significant_digits: int | None = None, quantize_mode: QuantizeMode = "BitGroom", fill_value: int | float | np.generic | str | bytes | Literal[False] | np.ndarray | None = None, chunk_cache: int | None = None, **kwargs: Any, ) -> Variable[str]: ... @overload def __new__( cls, grp: Dataset, name: str, datatype: DatatypeType, dimensions: DimensionsType = (), compression: CompressionType | None = None, zlib: bool = False, complevel: CompressionLevel | None = 4, shuffle: bool = True, szip_coding: Literal["nn", "ec"] = "nn", szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, blosc_shuffle: Literal[0, 1, 2] = 1, fletcher32: bool = False, contiguous: bool = False, chunksizes: Sequence[int] | None = None, endian: EndianType = "native", least_significant_digit: int | None = None, significant_digits: int | None = None, quantize_mode: QuantizeMode = "BitGroom", fill_value: int | float | np.generic | str | bytes | Literal[False] | np.ndarray | None = None, chunk_cache: int | None = None, **kwargs: Any, ) -> Variable: ... def __init__( self, grp: Dataset, name: str, datatype: DatatypeType, dimensions: DimensionsType = (), compression: CompressionType | None = None, zlib: bool = False, complevel: CompressionLevel | None = 4, shuffle: bool = True, szip_coding: Literal["nn", "ec"] = "nn", szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, blosc_shuffle: Literal[0, 1, 2] = 1, fletcher32: bool = False, contiguous: bool = False, chunksizes: Sequence[int] | None = None, endian: EndianType = "native", least_significant_digit: int | None = None, significant_digits: int | None = None, quantize_mode: QuantizeMode = "BitGroom", fill_value: int | float | np.generic | str | bytes | Literal[False] | np.ndarray | None = None, chunk_cache: int | None = None, **kwargs: Any, ) -> None: ... datatype: _VarDatatypeProperty dtype: _VarDtypeProperty @property def name(self) -> str: ... @property def shape(self) -> tuple[int, ...]: ... @property def size(self) -> int: ... @property def dimensions(self) -> tuple[str, ...]: ... @property def ndim(self) -> int: ... @property def scale(self) -> bool: ... @property def mask(self) -> bool: ... @property def chartostring(self) -> bool: ... @property def always_mask(self) -> bool: ... @property def __orthogonal_indexing__(self) -> bool: ... def group(self) -> Dataset: ... def ncattrs(self) -> list[str]: ... def setncattr(self, name: str, value: Any) -> None: ... def setncattr_string(self, name: str, value: Any) -> None: ... def setncatts(self, attdict: Mapping[str, Any]) -> None: ... def getncattr(self, name: str, encoding="utf-8"): ... def delncattr(self, name: str) -> None: ... def filters(self) -> FiltersDict: ... def quantization(self) -> tuple[int, QuantizeMode] | None: ... def endian(self) -> EndianType: ... def chunking(self) -> Literal["contiguous"] | list[int]: ... def get_var_chunk_cache(self) -> tuple[int, int, float]: ... def set_var_chunk_cache( self, size: int | None = None, nelems: int | None = None, preemption: float | None = None ) -> None: ... def renameAttribute(self, oldname: str, newname: str) -> None: ... def assignValue(self, val: Any) -> None: ... def getValue(self) -> Any: ... def get_fill_value(self) -> Any: ... def set_auto_chartostring(self, chartostring: bool) -> None: ... def use_nc_get_vars(self, use_nc_get_vars: bool) -> None: ... def set_auto_maskandscale(self, maskandscale: bool) -> None: ... def set_auto_scale(self, scale: bool) -> None: ... def set_auto_mask(self, mask: bool) -> None: ... def set_always_mask(self, always_mask: bool) -> None: ... def set_ncstring_attrs(self, ncstring_attrs: bool) -> None: ... def set_collective(self, value: bool) -> None: ... def get_dims(self) -> tuple[Dimension, ...]: ... def __delattr__(self, name: str) -> None: ... def __setattr__(self, name: str, value: Any) -> None: ... def __getattr__(self, name: str) -> Any: ... def __getitem__(self, elem: Any) -> Any: ... def __setitem__(self, elem: Any, data: npt.ArrayLike) -> None: ... def __array__(self) -> np.ndarray: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[Any]: ... # faux method so mypy believes Variable is iterable @final class CompoundType: dtype: np.dtype dtype_view: np.dtype name: str def __init__( self, grp: Dataset, dt: npt.DTypeLike | Sequence[tuple[str, npt.DTypeLike]], dtype_name: str, **kwargs: Any ) -> None: ... def __reduce__(self) -> NoReturn: ... @final class VLType: dtype: np.dtype name: str | None def __init__(self, grp: Dataset, dt: npt.DTypeLike, dtype_name: str, **kwargs: Any) -> None: ... def __reduce__(self) -> NoReturn: ... @final class EnumType: dtype: np.dtype[np.integer] name: str enum_dict: Mapping[str, int] def __init__( self, grp: Dataset, dt: np.dtype[np.integer] | type[np.integer] | type[int] | str, dtype_name: str, enum_dict: Mapping[str, int | np.integer], **kwargs: Any, ) -> None: ... def __reduce__(self) -> NoReturn: ... class MFDataset(Dataset): def __init__( self, files: str | Sequence[str | os.PathLike], check: bool = False, aggdim: str | None = None, exclude: Sequence[str] = [], master_file: str | os.PathLike | None = None, ) -> None: ... @property def dimensions(self) -> dict[str, Dimension]: ... # this should be: dict[str, Dimension | _Dimension] @property def variables(self) -> dict[str, Variable[Any]]: ... # this should be: dict[str, _Variable[Any] | _Variable] class _Dimension: dimlens: list[int] dimtolen: int def __init__(self, dimname: str, dim: Dimension, dimlens: list[int], dimtotlen: int) -> None: ... def __len__(self) -> int: ... def isunlimited(self) -> Literal[True]: ... class _Variable: dimensions: tuple[str, ...] dtype: np.dtype | type[str] def __init__(self, dset: Dataset, varname: str, var: Variable[Any], recdimname: str) -> None: ... # shape, ndim, and name actually come from __getattr__ @property def shape(self) -> tuple[int, ...]: ... @property def ndim(self) -> int: ... @property def name(self) -> str: ... def typecode(self) -> np.dtype | type[str]: ... def ncattrs(self) -> list[str]: ... def _shape(self) -> tuple[int, ...]: ... def set_auto_chartostring(self, val: bool) -> None: ... def set_auto_maskandscale(self, val: bool) -> None: ... def set_auto_mask(self, val: bool) -> None: ... def set_auto_scale(self, val: bool) -> None: ... def set_always_mask(self, val: bool) -> None: ... def __getattr__(self, name: str) -> Any: ... def __getitem__(self, elem: Any) -> Any: ... def __len__(self) -> int: ... class MFTime(_Variable): calendar: CalendarType | None units: str | None def __init__(self, time: Variable, units: str | None = None, calendar: CalendarType | str | None = None): ... def __getitem__(self, elem: Any) -> np.ndarray: ... @overload def stringtoarr( string: str, NUMCHARS: int, dtype: Literal["S"] | np.dtype[np.bytes_] = "S", ) -> npt.NDArray[np.bytes_]: ... @overload def stringtoarr( string: str, NUMCHARS: int, dtype: Literal["U"] | np.dtype[np.str_], ) -> npt.NDArray[np.str_]: ... @overload def stringtochar( a: npt.NDArray[np.character], encoding: Literal["none", "None", "bytes"], n_strlen: int | None = None, ) -> npt.NDArray[np.bytes_]: ... @overload def stringtochar( a: npt.NDArray[np.character], encoding: str = ..., ) -> npt.NDArray[np.str_] | npt.NDArray[np.bytes_]: ... @overload def chartostring( b: npt.NDArray[np.character], encoding: Literal["none", "None", "bytes"] = ..., ) -> npt.NDArray[np.bytes_]: ... @overload def chartostring( b: npt.NDArray[np.character], encoding: str = ..., ) -> npt.NDArray[np.str_] | npt.NDArray[np.bytes_]: ... def getlibversion() -> str: ... def rc_get(key: str) -> str | None: ... def rc_set(key: str, value: str) -> None: ... def set_alignment(threshold: int, alignment: int): ... def get_alignment() -> tuple[int, int]: ... def set_chunk_cache(size: int | None = None, nelems: int | None = None, preemption: float | None = None) -> None: ... def get_chunk_cache() -> tuple[int, int, float]: ... netcdf4-python-1.7.4rel/src/netCDF4/_netCDF4.pyi000066400000000000000000000022451512661643000211320ustar00rootroot00000000000000# The definitions are intentionally done in the __init__. # This file only exists in case someone imports from netCDF4._netCDF4 from . import ( CompoundType, Dataset, Dimension, EnumType, Group, MFDataset, MFTime, NetCDF4MissingFeatureException, Variable, VLType, __has_blosc_support__, __has_bzip2_support__, __has_cdf5_format__, __has_nc_create_mem__, __has_nc_inq_format_extended__, __has_nc_inq_path__, __has_nc_open_mem__, __has_nc_rc_set__, __has_ncfilter__, __has_parallel4_support__, __has_parallel_support__, __has_pnetcdf_support__, __has_quantization_support__, __has_rename_grp__, __has_set_alignment__, __has_szip_support__, __has_zstandard_support__, __hdf5libversion__, __netcdf4libversion__, __version__, chartostring, date2index, date2num, default_encoding, default_fillvals, dtype_is_complex, get_alignment, get_chunk_cache, getlibversion, is_native_big, is_native_little, num2date, rc_get, rc_set, set_alignment, set_chunk_cache, stringtoarr, stringtochar, unicode_error, ) netcdf4-python-1.7.4rel/src/netCDF4/_netCDF4.pyx000066400000000000000000011475271512661643000211670ustar00rootroot00000000000000"""Version 1.7.4 ------------- # Introduction netcdf4-python is a Python interface to the netCDF C library. [netCDF](http://www.unidata.ucar.edu/software/netcdf/) version 4 has many features not found in earlier versions of the library and is implemented on top of [HDF5](http://www.hdfgroup.org/HDF5). This module can read and write files in both the new netCDF 4 and the old netCDF 3 format, and can create files that are readable by HDF5 clients. The API modelled after [Scientific.IO.NetCDF](http://dirac.cnrs-orleans.fr/ScientificPython/), and should be familiar to users of that module. Most new features of netCDF 4 are implemented, such as multiple unlimited dimensions, groups and data compression. All the new numeric data types (such as 64 bit and unsigned integer types) are implemented. Compound (struct), variable length (vlen) and enumerated (enum) data types are supported, but not the opaque data type. Mixtures of compound, vlen and enum data types (such as compound types containing enums, or vlens containing compound types) are not supported. ## Quick Install - the easiest way to get going is to install via `pip install netCDF4`. (or if you use the [conda](http://conda.io) package manager `conda install -c conda-forge netCDF4`). ## Developer Install - Clone the [github repository](http://github.com/Unidata/netcdf4-python). Make sure you either clone recursively, or run `git submodule update --init` to ensure all the submodules are also checked out. - Make sure the dependencies are satisfied (Python 3.8 or later, [numpy](http://numpy.scipy.org), [Cython](http://cython.org), [cftime](https://github.com/Unidata/cftime), [setuptools](https://pypi.python.org/pypi/setuptools), the [HDF5 C library](https://www.hdfgroup.org/solutions/hdf5/), and the [netCDF C library](https://www.unidata.ucar.edu/software/netcdf/)). For MPI parallel IO support, an MPI-enabled versions of the netcdf library is required, as is [mpi4py](http://mpi4py.scipy.org). Parallel IO further depends on the existence of MPI-enabled HDF5 or the [PnetCDF](https://parallel-netcdf.github.io/) library. - By default, the utility `nc-config` (installed with netcdf-c) will be run used to determine where all the dependencies live. - If `nc-config` is not in your default `PATH`, you can set the `NETCDF4_DIR` environment variable and `setup.py` will look in `$NETCDF4_DIR/bin`. You can also use the file `setup.cfg` to set the path to `nc-config`, or enter the paths to the libraries and include files manually. Just edit the `setup.cfg` file in a text editor and follow the instructions in the comments. To disable the use of `nc-config`, set the env var `USE_NCCONFIG` to 0. To disable the use of `setup.cfg`, set `USE_SETUPCFG` to 0. As a last resort, the library and include paths can be set via environment variables. If you go this route, set `USE_NCCONFIG` and `USE_SETUPCFG` to 0, and specify `NETCDF4_LIBDIR`, `NETCDF4_INCDIR`, `HDF5_LIBDIR` and `HDF5_INCDIR`. If the dependencies are not found in any of the paths specified by environment variables, then standard locations (such as `/usr` and `/usr/local`) are searched. - if the env var `NETCDF_PLUGIN_DIR` is set to point to the location of the netcdf-c compression plugins built by netcdf >= 4.9.0, they will be installed inside the package. In this case `HDF5_PLUGIN_PATH` will be set to the package installation path on import, so the extra compression algorithms available in netcdf-c >= 4.9.0 will automatically be available. Otherwise, the user will have to set `HDF5_PLUGIN_PATH` explicitly to have access to the extra compression plugins. - run `pip install -v .` (as root if necessary) - run the tests in the 'test' directory by running `python run_all.py`. # Tutorial - [Creating/Opening/Closing a netCDF file](#creatingopeningclosing-a-netcdf-file) - [Groups in a netCDF file](#groups-in-a-netcdf-file) - [Dimensions in a netCDF file](#dimensions-in-a-netcdf-file) - [Variables in a netCDF file](#variables-in-a-netcdf-file) - [Attributes in a netCDF file](#attributes-in-a-netcdf-file) - [Dealing with time coordinates](#dealing-with-time-coordinates) - [Writing data to and retrieving data from a netCDF variable](#writing-data-to-and-retrieving-data-from-a-netcdf-variable) - [Reading data from a multi-file netCDF dataset](#reading-data-from-a-multi-file-netcdf-dataset) - [Efficient compression of netCDF variables](#efficient-compression-of-netcdf-variables) - [Beyond homogeneous arrays of a fixed type - compound data types](#beyond-homogeneous-arrays-of-a-fixed-type-compound-data-types) - [Variable-length (vlen) data types](#variable-length-vlen-data-types) - [Enum data type](#enum-data-type) - [Parallel IO](#parallel-io) - [Dealing with strings](#dealing-with-strings) - [In-memory (diskless) Datasets](#in-memory-diskless-datasets) All of the code in this tutorial is available in `examples/tutorial.py`, except the parallel IO example, which is in `examples/mpi_example.py`. Unit tests are in the `test` directory. ## Creating/Opening/Closing a netCDF file To create a netCDF file from python, you simply call the `Dataset` constructor. This is also the method used to open an existing netCDF file. If the file is open for write access (`mode='w', 'r+'` or `'a'`), you may write any type of data including new dimensions, groups, variables and attributes. netCDF files come in five flavors (`NETCDF3_CLASSIC`, `NETCDF3_64BIT_OFFSET`, `NETCDF3_64BIT_DATA`, `NETCDF4_CLASSIC`, and `NETCDF4`). `NETCDF3_CLASSIC` was the original netcdf binary format, and was limited to file sizes less than 2 Gb. `NETCDF3_64BIT_OFFSET` was introduced in version 3.6.0 of the library, and extended the original binary format to allow for file sizes greater than 2 Gb. `NETCDF3_64BIT_DATA` is a new format that requires version 4.4.0 of the C library - it extends the `NETCDF3_64BIT_OFFSET` binary format to allow for unsigned/64 bit integer data types and 64-bit dimension sizes. `NETCDF3_64BIT` is an alias for `NETCDF3_64BIT_OFFSET`. `NETCDF4_CLASSIC` files use the version 4 disk format (HDF5), but omits features not found in the version 3 API. They can be read by netCDF 3 clients only if they have been relinked against the netCDF 4 library. They can also be read by HDF5 clients. `NETCDF4` files use the version 4 disk format (HDF5) and use the new features of the version 4 API. The netCDF4 module can read and write files in any of these formats. When creating a new file, the format may be specified using the `format` keyword in the `Dataset` constructor. The default format is `NETCDF4`. To see how a given file is formatted, you can examine the `data_model` attribute. Closing the netCDF file is accomplished via the `Dataset.close` method of the `Dataset` instance. Here's an example: ```python >>> from netCDF4 import Dataset >>> rootgrp = Dataset("test.nc", "w", format="NETCDF4") >>> print(rootgrp.data_model) NETCDF4 >>> rootgrp.close() ``` Remote [OPeNDAP](http://opendap.org)-hosted datasets can be accessed for reading over http if a URL is provided to the `Dataset` constructor instead of a filename. However, this requires that the netCDF library be built with OPenDAP support, via the `--enable-dap` configure option (added in version 4.0.1). ## Groups in a netCDF file netCDF version 4 added support for organizing data in hierarchical groups, which are analogous to directories in a filesystem. Groups serve as containers for variables, dimensions and attributes, as well as other groups. A `Dataset` creates a special group, called the 'root group', which is similar to the root directory in a unix filesystem. To create `Group` instances, use the `Dataset.createGroup` method of a `Dataset` or `Group` instance. `Dataset.createGroup` takes a single argument, a python string containing the name of the new group. The new `Group` instances contained within the root group can be accessed by name using the `groups` dictionary attribute of the `Dataset` instance. Only `NETCDF4` formatted files support Groups, if you try to create a Group in a netCDF 3 file you will get an error message. ```python >>> rootgrp = Dataset("test.nc", "a") >>> fcstgrp = rootgrp.createGroup("forecasts") >>> analgrp = rootgrp.createGroup("analyses") >>> print(rootgrp.groups) {'forecasts': group /forecasts: dimensions(sizes): variables(dimensions): groups: , 'analyses': group /analyses: dimensions(sizes): variables(dimensions): groups: } >>> ``` Groups can exist within groups in a `Dataset`, just as directories exist within directories in a unix filesystem. Each `Group` instance has a `groups` attribute dictionary containing all of the group instances contained within that group. Each `Group` instance also has a `path` attribute that contains a simulated unix directory path to that group. To simplify the creation of nested groups, you can use a unix-like path as an argument to `Dataset.createGroup`. ```python >>> fcstgrp1 = rootgrp.createGroup("/forecasts/model1") >>> fcstgrp2 = rootgrp.createGroup("/forecasts/model2") ``` If any of the intermediate elements of the path do not exist, they are created, just as with the unix command `'mkdir -p'`. If you try to create a group that already exists, no error will be raised, and the existing group will be returned. Here's an example that shows how to navigate all the groups in a `Dataset`. The function `walktree` is a Python generator that is used to walk the directory tree. Note that printing the `Dataset` or `Group` object yields summary information about it's contents. ```python >>> def walktree(top): ... yield top.groups.values() ... for value in top.groups.values(): ... yield from walktree(value) >>> print(rootgrp) root group (NETCDF4 data model, file format HDF5): dimensions(sizes): variables(dimensions): groups: forecasts, analyses >>> for children in walktree(rootgrp): ... for child in children: ... print(child) group /forecasts: dimensions(sizes): variables(dimensions): groups: model1, model2 group /analyses: dimensions(sizes): variables(dimensions): groups: group /forecasts/model1: dimensions(sizes): variables(dimensions): groups: group /forecasts/model2: dimensions(sizes): variables(dimensions): groups: ``` ## Dimensions in a netCDF file netCDF defines the sizes of all variables in terms of dimensions, so before any variables can be created the dimensions they use must be created first. A special case, not often used in practice, is that of a scalar variable, which has no dimensions. A dimension is created using the `Dataset.createDimension` method of a `Dataset` or `Group` instance. A Python string is used to set the name of the dimension, and an integer value is used to set the size. To create an unlimited dimension (a dimension that can be appended to), the size value is set to `None` or 0. In this example, there both the `time` and `level` dimensions are unlimited. Having more than one unlimited dimension is a new netCDF 4 feature, in netCDF 3 files there may be only one, and it must be the first (leftmost) dimension of the variable. ```python >>> level = rootgrp.createDimension("level", None) >>> time = rootgrp.createDimension("time", None) >>> lat = rootgrp.createDimension("lat", 73) >>> lon = rootgrp.createDimension("lon", 144) ``` All of the `Dimension` instances are stored in a python dictionary. ```python >>> print(rootgrp.dimensions) {'level': (unlimited): name = 'level', size = 0, 'time': (unlimited): name = 'time', size = 0, 'lat': : name = 'lat', size = 73, 'lon': : name = 'lon', size = 144} ``` Using the python `len` function with a `Dimension` instance returns current size of that dimension. `Dimension.isunlimited` method of a `Dimension` instance be used to determine if the dimensions is unlimited, or appendable. ```python >>> print(len(lon)) 144 >>> print(lon.isunlimited()) False >>> print(time.isunlimited()) True ``` Printing the `Dimension` object provides useful summary info, including the name and length of the dimension, and whether it is unlimited. ```python >>> for dimobj in rootgrp.dimensions.values(): ... print(dimobj) (unlimited): name = 'level', size = 0 (unlimited): name = 'time', size = 0 : name = 'lat', size = 73 : name = 'lon', size = 144 ``` `Dimension` names can be changed using the `Dataset.renameDimension` method of a `Dataset` or `Group` instance. ## Variables in a netCDF file netCDF variables behave much like python multidimensional array objects supplied by the [numpy module](http://numpy.scipy.org). However, unlike numpy arrays, netCDF4 variables can be appended to along one or more 'unlimited' dimensions. To create a netCDF variable, use the `Dataset.createVariable` method of a `Dataset` or `Group` instance. The `Dataset.createVariable` method has two mandatory arguments, the variable name (a Python string), and the variable datatype. The variable's dimensions are given by a tuple containing the dimension names (defined previously with `Dataset.createDimension`). To create a scalar variable, simply leave out the dimensions keyword. The variable primitive datatypes correspond to the dtype attribute of a numpy array. You can specify the datatype as a numpy dtype object, or anything that can be converted to a numpy dtype object. Valid datatype specifiers include: | Specifier | Datatype | Old typecodes | |-----------|-------------------------|---------------| | `'f4'` | 32-bit floating point | `'f'` | | `'f8'` | 64-bit floating point | `'d'` | | `'i4'` | 32-bit signed integer | `'i'` `'l'` | | `'i2'` | 16-bit signed integer | `'h'` `'s'` | | `'i8'` | 64-bit signed integer | | | `'i1'` | 8-bit signed integer | `'b'` `'B'` | | `'u1'` | 8-bit unsigned integer | | | `'u2'` | 16-bit unsigned integer | | | `'u4'` | 32-bit unsigned integer | | | `'u8'` | 64-bit unsigned integer | | | `'S1'` | single-character string | `'c'` | The unsigned integer types and the 64-bit integer type can only be used if the file format is `NETCDF4`. The dimensions themselves are usually also defined as variables, called coordinate variables. The `Dataset.createVariable` method returns an instance of the `Variable` class whose methods can be used later to access and set variable data and attributes. ```python >>> times = rootgrp.createVariable("time","f8",("time",)) >>> levels = rootgrp.createVariable("level","i4",("level",)) >>> latitudes = rootgrp.createVariable("lat","f4",("lat",)) >>> longitudes = rootgrp.createVariable("lon","f4",("lon",)) >>> # two dimensions unlimited >>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",)) >>> temp.units = "K" ``` To get summary info on a `Variable` instance in an interactive session, just print it. ```python >>> print(temp) float32 temp(time, level, lat, lon) units: K unlimited dimensions: time, level current shape = (0, 0, 73, 144) filling on, default _FillValue of 9.969209968386869e+36 used ``` You can use a path to create a Variable inside a hierarchy of groups. ```python >>> ftemp = rootgrp.createVariable("/forecasts/model1/temp","f4",("time","level","lat","lon",)) ``` If the intermediate groups do not yet exist, they will be created. You can also query a `Dataset` or `Group` instance directly to obtain `Group` or `Variable` instances using paths. ```python >>> print(rootgrp["/forecasts/model1"]) # a Group instance group /forecasts/model1: dimensions(sizes): variables(dimensions): float32 temp(time,level,lat,lon) groups: >>> print(rootgrp["/forecasts/model1/temp"]) # a Variable instance float32 temp(time, level, lat, lon) path = /forecasts/model1 unlimited dimensions: time, level current shape = (0, 0, 73, 144) filling on, default _FillValue of 9.969209968386869e+36 used ``` All of the variables in the `Dataset` or `Group` are stored in a Python dictionary, in the same way as the dimensions: ```python >>> print(rootgrp.variables) {'time': float64 time(time) unlimited dimensions: time current shape = (0,) filling on, default _FillValue of 9.969209968386869e+36 used, 'level': int32 level(level) unlimited dimensions: level current shape = (0,) filling on, default _FillValue of -2147483647 used, 'lat': float32 lat(lat) unlimited dimensions: current shape = (73,) filling on, default _FillValue of 9.969209968386869e+36 used, 'lon': float32 lon(lon) unlimited dimensions: current shape = (144,) filling on, default _FillValue of 9.969209968386869e+36 used, 'temp': float32 temp(time, level, lat, lon) units: K unlimited dimensions: time, level current shape = (0, 0, 73, 144) filling on, default _FillValue of 9.969209968386869e+36 used} ``` `Variable` names can be changed using the `Dataset.renameVariable` method of a `Dataset` instance. Variables can be sliced similar to numpy arrays, but there are some differences. See [Writing data to and retrieving data from a netCDF variable](#writing-data-to-and-retrieving-data-from-a-netcdf-variable) for more details. ## Attributes in a netCDF file There are two types of attributes in a netCDF file, global and variable. Global attributes provide information about a group, or the entire dataset, as a whole. `Variable` attributes provide information about one of the variables in a group. Global attributes are set by assigning values to `Dataset` or `Group` instance variables. `Variable` attributes are set by assigning values to `Variable` instances variables. Attributes can be strings, numbers or sequences. Returning to our example, ```python >>> import time >>> rootgrp.description = "bogus example script" >>> rootgrp.history = "Created " + time.ctime(time.time()) >>> rootgrp.source = "netCDF4 python module tutorial" >>> latitudes.units = "degrees north" >>> longitudes.units = "degrees east" >>> levels.units = "hPa" >>> temp.units = "K" >>> times.units = "hours since 0001-01-01 00:00:00.0" >>> times.calendar = "gregorian" ``` The `Dataset.ncattrs` method of a `Dataset`, `Group` or `Variable` instance can be used to retrieve the names of all the netCDF attributes. This method is provided as a convenience, since using the built-in `dir` Python function will return a bunch of private methods and attributes that cannot (or should not) be modified by the user. ```python >>> for name in rootgrp.ncattrs(): ... print("Global attr {} = {}".format(name, getattr(rootgrp, name))) Global attr description = bogus example script Global attr history = Created Mon Jul 8 14:19:41 2019 Global attr source = netCDF4 python module tutorial ``` The `__dict__` attribute of a `Dataset`, `Group` or `Variable` instance provides all the netCDF attribute name/value pairs in a python dictionary: ```python >>> print(rootgrp.__dict__) {'description': 'bogus example script', 'history': 'Created Mon Jul 8 14:19:41 2019', 'source': 'netCDF4 python module tutorial'} ``` Attributes can be deleted from a netCDF `Dataset`, `Group` or `Variable` using the python `del` statement (i.e. `del grp.foo` removes the attribute `foo` the the group `grp`). ## Writing data to and retrieving data from a netCDF variable Now that you have a netCDF `Variable` instance, how do you put data into it? You can just treat it like an array and assign data to a slice. ```python >>> import numpy as np >>> lats = np.arange(-90,91,2.5) >>> lons = np.arange(-180,180,2.5) >>> latitudes[:] = lats >>> longitudes[:] = lons >>> print("latitudes =\\n{}".format(latitudes[:])) latitudes = [-90. -87.5 -85. -82.5 -80. -77.5 -75. -72.5 -70. -67.5 -65. -62.5 -60. -57.5 -55. -52.5 -50. -47.5 -45. -42.5 -40. -37.5 -35. -32.5 -30. -27.5 -25. -22.5 -20. -17.5 -15. -12.5 -10. -7.5 -5. -2.5 0. 2.5 5. 7.5 10. 12.5 15. 17.5 20. 22.5 25. 27.5 30. 32.5 35. 37.5 40. 42.5 45. 47.5 50. 52.5 55. 57.5 60. 62.5 65. 67.5 70. 72.5 75. 77.5 80. 82.5 85. 87.5 90. ] ``` Unlike NumPy's array objects, netCDF `Variable` objects with unlimited dimensions will grow along those dimensions if you assign data outside the currently defined range of indices. ```python >>> # append along two unlimited dimensions by assigning to slice. >>> nlats = len(rootgrp.dimensions["lat"]) >>> nlons = len(rootgrp.dimensions["lon"]) >>> print("temp shape before adding data = {}".format(temp.shape)) temp shape before adding data = (0, 0, 73, 144) >>> >>> from numpy.random import uniform >>> temp[0:5, 0:10, :, :] = uniform(size=(5, 10, nlats, nlons)) >>> print("temp shape after adding data = {}".format(temp.shape)) temp shape after adding data = (5, 10, 73, 144) >>> >>> # levels have grown, but no values yet assigned. >>> print("levels shape after adding pressure data = {}".format(levels.shape)) levels shape after adding pressure data = (10,) ``` Note that the size of the levels variable grows when data is appended along the `level` dimension of the variable `temp`, even though no data has yet been assigned to levels. ```python >>> # now, assign data to levels dimension variable. >>> levels[:] = [1000.,850.,700.,500.,300.,250.,200.,150.,100.,50.] ``` However, that there are some differences between NumPy and netCDF variable slicing rules. Slices behave as usual, being specified as a `start:stop:step` triplet. Using a scalar integer index `i` takes the ith element and reduces the rank of the output array by one. Boolean array and integer sequence indexing behaves differently for netCDF variables than for numpy arrays. Only 1-d boolean arrays and integer sequences are allowed, and these indices work independently along each dimension (similar to the way vector subscripts work in fortran). This means that ```python >>> temp[0, 0, [0,1,2,3], [0,1,2,3]].shape (4, 4) ``` returns an array of shape (4,4) when slicing a netCDF variable, but for a numpy array it returns an array of shape (4,). Similarly, a netCDF variable of shape `(2,3,4,5)` indexed with `[0, array([True, False, True]), array([False, True, True, True]), :]` would return a `(2, 3, 5)` array. In NumPy, this would raise an error since it would be equivalent to `[0, [0,1], [1,2,3], :]`. When slicing with integer sequences, the indices ***need not be sorted*** and ***may contain duplicates*** (both of these are new features in version 1.2.1). While this behaviour may cause some confusion for those used to NumPy's 'fancy indexing' rules, it provides a very powerful way to extract data from multidimensional netCDF variables by using logical operations on the dimension arrays to create slices. For example, ```python >>> tempdat = temp[::2, [1,3,6], lats>0, lons>0] ``` will extract time indices 0,2 and 4, pressure levels 850, 500 and 200 hPa, all Northern Hemisphere latitudes and Eastern Hemisphere longitudes, resulting in a numpy array of shape (3, 3, 36, 71). ```python >>> print("shape of fancy temp slice = {}".format(tempdat.shape)) shape of fancy temp slice = (3, 3, 36, 71) ``` ***Special note for scalar variables***: To extract data from a scalar variable `v` with no associated dimensions, use `numpy.asarray(v)` or `v[...]`. The result will be a numpy scalar array. By default, netcdf4-python returns numpy masked arrays with values equal to the `missing_value` or `_FillValue` variable attributes masked for primitive and enum data types. The `Dataset.set_auto_mask` `Dataset` and `Variable` methods can be used to disable this feature so that numpy arrays are always returned, with the missing values included. Prior to version 1.4.0 the default behavior was to only return masked arrays when the requested slice contained missing values. This behavior can be recovered using the `Dataset.set_always_mask` method. If a masked array is written to a netCDF variable, the masked elements are filled with the value specified by the `missing_value` attribute. If the variable has no `missing_value`, the `_FillValue` is used instead. ## Dealing with time coordinates Time coordinate values pose a special challenge to netCDF users. Most metadata standards (such as CF) specify that time should be measure relative to a fixed date using a certain calendar, with units specified like `hours since YY-MM-DD hh:mm:ss`. These units can be awkward to deal with, without a utility to convert the values to and from calendar dates. The functions [num2date](https://unidata.github.io/cftime/api.html) and [date2num](https://unidata.github.io/cftime/api.html) are provided by [cftime](https://unidata.github.io/cftime) to do just that. Here's an example of how they can be used: ```python >>> # fill in times. >>> from datetime import datetime, timedelta >>> from cftime import num2date, date2num >>> dates = [datetime(2001,3,1)+n*timedelta(hours=12) for n in range(temp.shape[0])] >>> times[:] = date2num(dates,units=times.units,calendar=times.calendar) >>> print("time values (in units {}):\\n{}".format(times.units, times[:])) time values (in units hours since 0001-01-01 00:00:00.0): [17533104. 17533116. 17533128. 17533140. 17533152.] >>> dates = num2date(times[:],units=times.units,calendar=times.calendar) >>> print("dates corresponding to time values:\\n{}".format(dates)) [cftime.DatetimeGregorian(2001, 3, 1, 0, 0, 0, 0, has_year_zero=False) cftime.DatetimeGregorian(2001, 3, 1, 12, 0, 0, 0, has_year_zero=False) cftime.DatetimeGregorian(2001, 3, 2, 0, 0, 0, 0, has_year_zero=False) cftime.DatetimeGregorian(2001, 3, 2, 12, 0, 0, 0, has_year_zero=False) cftime.DatetimeGregorian(2001, 3, 3, 0, 0, 0, 0, has_year_zero=False)] ``` `num2date` converts numeric values of time in the specified `units` and `calendar` to datetime objects, and `date2num` does the reverse. All the calendars currently defined in the [CF metadata convention](http://cfconventions.org) are supported. A function called `date2index` is also provided which returns the indices of a netCDF time variable corresponding to a sequence of datetime instances. ## Reading data from a multi-file netCDF dataset If you want to read data from a variable that spans multiple netCDF files, you can use the `MFDataset` class to read the data as if it were contained in a single file. Instead of using a single filename to create a `Dataset` instance, create a `MFDataset` instance with either a list of filenames, or a string with a wildcard (which is then converted to a sorted list of files using the python glob module). Variables in the list of files that share the same unlimited dimension are aggregated together, and can be sliced across multiple files. To illustrate this, let's first create a bunch of netCDF files with the same variable (with the same unlimited dimension). The files must in be in `NETCDF3_64BIT_OFFSET`, `NETCDF3_64BIT_DATA`, `NETCDF3_CLASSIC` or `NETCDF4_CLASSIC` format (`NETCDF4` formatted multi-file datasets are not supported). ```python >>> for nf in range(10): ... with Dataset("mftest%s.nc" % nf, "w", format="NETCDF4_CLASSIC") as f: ... _ = f.createDimension("x",None) ... x = f.createVariable("x","i",("x",)) ... x[0:10] = np.arange(nf*10,10*(nf+1)) ``` Now read all the files back in at once with `MFDataset` ```python >>> from netCDF4 import MFDataset >>> f = MFDataset("mftest*nc") >>> print(f.variables["x"][:]) [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99] ``` Note that `MFDataset` can only be used to read, not write, multi-file datasets. ## Efficient compression of netCDF variables Data stored in netCDF `Variable` objects can be compressed and decompressed on the fly. The compression algorithm used is determined by the `compression` keyword argument to the `Dataset.createVariable` method. `zlib` compression is always available, `szip` is available if the linked HDF5 library supports it, and `zstd`, `bzip2`, `blosc_lz`,`blosc_lz4`,`blosc_lz4hc`, `blosc_zlib` and `blosc_zstd` are available via optional external plugins. The `complevel` keyword regulates the speed and efficiency of the compression for `zlib`, `bzip` and `zstd` (1 being fastest, but lowest compression ratio, 9 being slowest but best compression ratio). The default value of `complevel` is 4. Setting `shuffle=False` will turn off the HDF5 shuffle filter, which de-interlaces a block of data before `zlib` compression by reordering the bytes. The shuffle filter can significantly improve compression ratios, and is on by default if `compression=zlib`. Setting `fletcher32` keyword argument to `Dataset.createVariable` to `True` (it's `False` by default) enables the Fletcher32 checksum algorithm for error detection. It's also possible to set the HDF5 chunking parameters and endian-ness of the binary data stored in the HDF5 file with the `chunksizes` and `endian` keyword arguments to `Dataset.createVariable`. These keyword arguments only are relevant for `NETCDF4` and `NETCDF4_CLASSIC` files (where the underlying file format is HDF5) and are silently ignored if the file format is `NETCDF3_CLASSIC`, `NETCDF3_64BIT_OFFSET` or `NETCDF3_64BIT_DATA`. If the HDF5 library is built with szip support, compression=`szip` can also be used (in conjunction with the `szip_coding` and `szip_pixels_per_block` keyword arguments). If your data only has a certain number of digits of precision (say for example, it is temperature data that was measured with a precision of 0.1 degrees), you can dramatically improve compression by quantizing (or truncating) the data. There are two methods supplied for doing this. You can use the `least_significant_digit` keyword argument to `Dataset.createVariable` to specify the power of ten of the smallest decimal place in the data that is a reliable value. For example if the data has a precision of 0.1, then setting `least_significant_digit=1` will cause data the data to be quantized using `numpy.around(scale*data)/scale`, where scale = 2**bits, and bits is determined so that a precision of 0.1 is retained (in this case bits=4). This is done at the python level and is not a part of the underlying C library. Starting with netcdf-c version 4.9.0, a quantization capability is provided in the library. This can be used via the `significant_digits` `Dataset.createVariable` kwarg (new in version 1.6.0). The interpretation of `significant_digits` is different than `least_signficant_digit` in that it specifies the absolute number of significant digits independent of the magnitude of the variable (the floating point exponent). Either of these approaches makes the compression 'lossy' instead of 'lossless', that is some precision in the data is sacrificed for the sake of disk space. In our example, try replacing the line ```python >>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",)) ``` with ```python >>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib') ``` and then ```python >>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib',least_significant_digit=3) ``` or with netcdf-c >= 4.9.0 ```python >>> temp = rootgrp.createVariable("temp","f4",("time","level","lat","lon",),compression='zlib',significant_digits=4) ``` and see how much smaller the resulting files are. ## Beyond homogeneous arrays of a fixed type - compound data types Compound data types map directly to numpy structured (a.k.a 'record') arrays. Structured arrays are akin to C structs, or derived types in Fortran. They allow for the construction of table-like structures composed of combinations of other data types, including other compound types. Compound types might be useful for representing multiple parameter values at each point on a grid, or at each time and space location for scattered (point) data. You can then access all the information for a point by reading one variable, instead of reading different parameters from different variables. Compound data types are created from the corresponding numpy data type using the `Dataset.createCompoundType` method of a `Dataset` or `Group` instance. Since there is no native complex data type in netcdf (but see [Support for complex numbers](#support-for-complex-numbers)), compound types are handy for storing numpy complex arrays. Here's an example: ```python >>> f = Dataset("complex.nc","w") >>> size = 3 # length of 1-d complex array >>> # create sample complex data. >>> datac = np.exp(1j*(1.+np.linspace(0, np.pi, size))) >>> # create complex128 compound data type. >>> complex128 = np.dtype([("real",np.float64),("imag",np.float64)]) >>> complex128_t = f.createCompoundType(complex128,"complex128") >>> # create a variable with this data type, write some data to it. >>> x_dim = f.createDimension("x_dim",None) >>> v = f.createVariable("cmplx_var",complex128_t,"x_dim") >>> data = np.empty(size,complex128) # numpy structured array >>> data["real"] = datac.real; data["imag"] = datac.imag >>> v[:] = data # write numpy structured array to netcdf compound var >>> # close and reopen the file, check the contents. >>> f.close(); f = Dataset("complex.nc") >>> v = f.variables["cmplx_var"] >>> datain = v[:] # read in all the data into a numpy structured array >>> # create an empty numpy complex array >>> datac2 = np.empty(datain.shape,np.complex128) >>> # .. fill it with contents of structured array. >>> datac2.real = datain["real"]; datac2.imag = datain["imag"] >>> print('{}: {}'.format(datac.dtype, datac)) # original data complex128: [ 0.54030231+0.84147098j -0.84147098+0.54030231j -0.54030231-0.84147098j] >>> >>> print('{}: {}'.format(datac2.dtype, datac2)) # data from file complex128: [ 0.54030231+0.84147098j -0.84147098+0.54030231j -0.54030231-0.84147098j] ``` Compound types can be nested, but you must create the 'inner' ones first. All possible numpy structured arrays cannot be represented as Compound variables - an error message will be raise if you try to create one that is not supported. All of the compound types defined for a `Dataset` or `Group` are stored in a Python dictionary, just like variables and dimensions. As always, printing objects gives useful summary information in an interactive session: ```python >>> print(f) root group (NETCDF4 data model, file format HDF5): dimensions(sizes): x_dim(3) variables(dimensions): {'names':['real','imag'], 'formats':['>> print(f.variables["cmplx_var"]) compound cmplx_var(x_dim) compound data type: {'names':['real','imag'], 'formats':['>> print(f.cmptypes) {'complex128': : name = 'complex128', numpy dtype = {'names':['real','imag'], 'formats':['>> print(f.cmptypes["complex128"]) : name = 'complex128', numpy dtype = {'names':['real','imag'], 'formats':['>> f = Dataset("tst_vlen.nc","w") >>> vlen_t = f.createVLType(np.int32, "phony_vlen") ``` The numpy datatype of the variable-length sequences and the name of the new datatype must be specified. Any of the primitive datatypes can be used (signed and unsigned integers, 32 and 64 bit floats, and characters), but compound data types cannot. A new variable can then be created using this datatype. ```python >>> x = f.createDimension("x",3) >>> y = f.createDimension("y",4) >>> vlvar = f.createVariable("phony_vlen_var", vlen_t, ("y","x")) ``` Since there is no native vlen datatype in numpy, vlen arrays are represented in python as object arrays (arrays of dtype `object`). These are arrays whose elements are Python object pointers, and can contain any type of python object. For this application, they must contain 1-D numpy arrays all of the same type but of varying length. In this case, they contain 1-D numpy `int32` arrays of random length between 1 and 10. ```python >>> import random >>> random.seed(54321) >>> data = np.empty(len(y)*len(x),object) >>> for n in range(len(y)*len(x)): ... data[n] = np.arange(random.randint(1,10),dtype="int32")+1 >>> data = np.reshape(data,(len(y),len(x))) >>> vlvar[:] = data >>> print("vlen variable =\\n{}".format(vlvar[:])) vlen variable = [[array([1, 2, 3, 4, 5, 6, 7, 8], dtype=int32) array([1, 2], dtype=int32) array([1, 2, 3, 4], dtype=int32)] [array([1, 2, 3], dtype=int32) array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32) array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32)] [array([1, 2, 3, 4, 5, 6, 7], dtype=int32) array([1, 2, 3], dtype=int32) array([1, 2, 3, 4, 5, 6], dtype=int32)] [array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32) array([1, 2, 3, 4, 5], dtype=int32) array([1, 2], dtype=int32)]] >>> print(f) root group (NETCDF4 data model, file format HDF5): dimensions(sizes): x(3), y(4) variables(dimensions): int32 phony_vlen_var(y,x) groups: >>> print(f.variables["phony_vlen_var"]) vlen phony_vlen_var(y, x) vlen data type: int32 unlimited dimensions: current shape = (4, 3) >>> print(f.vltypes["phony_vlen"]) : name = 'phony_vlen', numpy dtype = int32 ``` Numpy object arrays containing python strings can also be written as vlen variables, For vlen strings, you don't need to create a vlen data type. Instead, simply use the python `str` builtin (or a numpy string datatype with fixed length greater than 1) when calling the `Dataset.createVariable` method. ```python >>> z = f.createDimension("z",10) >>> strvar = f.createVariable("strvar", str, "z") ``` In this example, an object array is filled with random python strings with random lengths between 2 and 12 characters, and the data in the object array is assigned to the vlen string variable. ```python >>> chars = "1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" >>> data = np.empty(10,"O") >>> for n in range(10): ... stringlen = random.randint(2,12) ... data[n] = "".join([random.choice(chars) for i in range(stringlen)]) >>> strvar[:] = data >>> print("variable-length string variable:\\n{}".format(strvar[:])) variable-length string variable: ['Lh' '25F8wBbMI' '53rmM' 'vvjnb3t63ao' 'qjRBQk6w' 'aJh' 'QF' 'jtIJbJACaQk4' '3Z5' 'bftIIq'] >>> print(f) root group (NETCDF4 data model, file format HDF5): dimensions(sizes): x(3), y(4), z(10) variables(dimensions): int32 phony_vlen_var(y,x), strvar(z) groups: >>> print(f.variables["strvar"]) vlen strvar(z) vlen data type: unlimited dimensions: current shape = (10,) ``` It is also possible to set contents of vlen string variables with numpy arrays of any string or unicode data type. Note, however, that accessing the contents of such variables will always return numpy arrays with dtype `object`. ## Enum data type netCDF4 has an enumerated data type, which is an integer datatype that is restricted to certain named values. Since Enums don't map directly to a numpy data type, they are read and written as integer arrays. Here's an example of using an Enum type to hold cloud type data. The base integer data type and a python dictionary describing the allowed values and their names are used to define an Enum data type using `Dataset.createEnumType`. ```python >>> nc = Dataset('clouds.nc','w') >>> # python dict with allowed values and their names. >>> enum_dict = {'Altocumulus': 7, 'Missing': 255, ... 'Stratus': 2, 'Clear': 0, ... 'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, ... 'Cumulonimbus': 1, 'Stratocumulus': 3} >>> # create the Enum type called 'cloud_t'. >>> cloud_type = nc.createEnumType(np.uint8,'cloud_t',enum_dict) >>> print(cloud_type) : name = 'cloud_t', numpy dtype = uint8, fields/values ={'Altocumulus': 7, 'Missing': 255, 'Stratus': 2, 'Clear': 0, 'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, 'Cumulonimbus': 1, 'Stratocumulus': 3} ``` A new variable can be created in the usual way using this data type. Integer data is written to the variable that represents the named cloud types in enum_dict. A `ValueError` will be raised if an attempt is made to write an integer value not associated with one of the specified names. ```python >>> time = nc.createDimension('time',None) >>> # create a 1d variable of type 'cloud_type'. >>> # The fill_value is set to the 'Missing' named value. >>> cloud_var = nc.createVariable('primary_cloud',cloud_type,'time', ... fill_value=enum_dict['Missing']) >>> # write some data to the variable. >>> cloud_var[:] = [enum_dict[k] for k in ['Clear', 'Stratus', 'Cumulus', ... 'Missing', 'Cumulonimbus']] >>> nc.close() >>> # reopen the file, read the data. >>> nc = Dataset('clouds.nc') >>> cloud_var = nc.variables['primary_cloud'] >>> print(cloud_var) enum primary_cloud(time) _FillValue: 255 enum data type: uint8 unlimited dimensions: time current shape = (5,) >>> print(cloud_var.datatype.enum_dict) {'Altocumulus': 7, 'Missing': 255, 'Stratus': 2, 'Clear': 0, 'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, 'Cumulonimbus': 1, 'Stratocumulus': 3} >>> print(cloud_var[:]) [0 2 4 -- 1] >>> nc.close() ``` ## Parallel IO If MPI parallel enabled versions of netcdf and hdf5 or pnetcdf are detected, and [mpi4py](https://mpi4py.scipy.org) is installed, netcdf4-python will be built with parallel IO capabilities enabled. Parallel IO of NETCDF4 or NETCDF4_CLASSIC formatted files is only available if the MPI parallel HDF5 library is available. Parallel IO of classic netcdf-3 file formats is only available if the [PnetCDF](https://parallel-netcdf.github.io/) library is available. To use parallel IO, your program must be running in an MPI environment using [mpi4py](https://mpi4py.scipy.org). ```python >>> from mpi4py import MPI >>> import numpy as np >>> from netCDF4 import Dataset >>> rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run) ``` To run an MPI-based parallel program like this, you must use `mpiexec` to launch several parallel instances of Python (for example, using `mpiexec -np 4 python mpi_example.py`). The parallel features of netcdf4-python are mostly transparent - when a new dataset is created or an existing dataset is opened, use the `parallel` keyword to enable parallel access. ```python >>> nc = Dataset('parallel_test.nc','w',parallel=True) ``` The optional `comm` keyword may be used to specify a particular MPI communicator (`MPI_COMM_WORLD` is used by default). Each process (or rank) can now write to the file independently. In this example the process rank is written to a different variable index on each task ```python >>> d = nc.createDimension('dim',4) >>> v = nc.createVariable('var', np.int64, 'dim') >>> v[rank] = rank >>> nc.close() % ncdump parallel_test.nc netcdf parallel_test { dimensions: dim = 4 ; variables: int64 var(dim) ; data: var = 0, 1, 2, 3 ; } ``` There are two types of parallel IO, independent (the default) and collective. Independent IO means that each process can do IO independently. It should not depend on or be affected by other processes. Collective IO is a way of doing IO defined in the MPI-IO standard; unlike independent IO, all processes must participate in doing IO. To toggle back and forth between the two types of IO, use the `Variable.set_collective` `Variable` method. All metadata operations (such as creation of groups, types, variables, dimensions, or attributes) are collective. There are a couple of important limitations of parallel IO: - parallel IO for NETCDF4 or NETCDF4_CLASSIC formatted files is only available if the netcdf library was compiled with MPI enabled HDF5. - parallel IO for all classic netcdf-3 file formats is only available if the netcdf library was compiled with [PnetCDF](https://parallel-netcdf.github.io). - If a variable has an unlimited dimension, appending data must be done in collective mode. If the write is done in independent mode, the operation will fail with a a generic "HDF Error". - You can write compressed data in parallel only with netcdf-c >= 4.7.4 and hdf5 >= 1.10.3 (although you can read in parallel with earlier versions). To write compressed data in parallel, the variable must be in 'collective IO mode'. This is done automatically on variable creation if compression is turned on, but if you are appending to a variable in an existing file, you must use `Variable.set_collective(True)` before attempting to write to it. - You cannot use variable-length (VLEN) data types. ***Import warning regarding threads:*** The underlying netcdf-c library is not thread-safe, so netcdf4-python cannot perform parallel IO in a multi-threaded environment. Users should expect segfaults if a netcdf file is opened on multiple threads - care should be taken to restrict netcdf4-python usage to a single thread, even when using free-threaded python. ## Dealing with strings The most flexible way to store arrays of strings is with the [Variable-length (vlen) string data type](#variable-length-vlen-data-type). However, this requires the use of the NETCDF4 data model, and the vlen type does not map very well numpy arrays (you have to use numpy arrays of dtype=`object`, which are arrays of arbitrary python objects). numpy does have a fixed-width string array data type, but unfortunately the netCDF data model does not. Instead fixed-width byte strings are typically stored as [arrays of 8-bit characters](https://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html#bp_Strings-and-Variables-of-type-char). To perform the conversion to and from character arrays to fixed-width numpy string arrays, the following convention is followed by the python interface. If the `_Encoding` special attribute is set for a character array (dtype `S1`) variable, the `chartostring` utility function is used to convert the array of characters to an array of strings with one less dimension (the last dimension is interpreted as the length of each string) when reading the data. The character set is specified by the `_Encoding` attribute. If `_Encoding` is 'none' or 'bytes', then the character array is converted to a numpy fixed-width byte string array (dtype `S#`), otherwise a numpy unicode (dtype `U#`) array is created. When writing the data, `stringtochar` is used to convert the numpy string array to an array of characters with one more dimension. For example, ```python >>> from netCDF4 import stringtochar >>> nc = Dataset('stringtest.nc','w',format='NETCDF4_CLASSIC') >>> _ = nc.createDimension('nchars',3) >>> _ = nc.createDimension('nstrings',None) >>> v = nc.createVariable('strings','S1',('nstrings','nchars')) >>> datain = np.array(['foo','bar'],dtype='S3') >>> v[:] = stringtochar(datain) # manual conversion to char array >>> print(v[:]) # data returned as char array [[b'f' b'o' b'o'] [b'b' b'a' b'r']] >>> v._Encoding = 'ascii' # this enables automatic conversion >>> v[:] = datain # conversion to char array done internally >>> print(v[:]) # data returned in numpy string array ['foo' 'bar'] >>> nc.close() ``` Even if the `_Encoding` attribute is set, the automatic conversion of char arrays to/from string arrays can be disabled with `Variable.set_auto_chartostring`. A similar situation is often encountered with numpy structured arrays with subdtypes containing fixed-wdith byte strings (dtype=`S#`). Since there is no native fixed-length string netCDF datatype, these numpy structure arrays are mapped onto netCDF compound types with character array elements. In this case the string <-> char array conversion is handled automatically (without the need to set the `_Encoding` attribute) using [numpy views](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.view.html). The structured array dtype (including the string elements) can even be used to define the compound data type - the string dtype will be converted to character array dtype under the hood when creating the netcdf compound type. Here's an example: ```python >>> nc = Dataset('compoundstring_example.nc','w') >>> dtype = np.dtype([('observation', 'f4'), ... ('station_name','S10')]) >>> station_data_t = nc.createCompoundType(dtype,'station_data') >>> _ = nc.createDimension('station',None) >>> statdat = nc.createVariable('station_obs', station_data_t, ('station',)) >>> data = np.empty(2,dtype) >>> data['observation'][:] = (123.,3.14) >>> data['station_name'][:] = ('Boulder','New York') >>> print(statdat.dtype) # strings actually stored as character arrays {'names':['observation','station_name'], 'formats':['>> statdat[:] = data # strings converted to character arrays internally >>> print(statdat[:]) # character arrays converted back to strings [(123. , b'Boulder') ( 3.14, b'New York')] >>> print(statdat[:].dtype) {'names':['observation','station_name'], 'formats':['>> statdat.set_auto_chartostring(False) # turn off auto-conversion >>> statdat[:] = data.view(dtype=[('observation', 'f4'),('station_name','S1',10)]) >>> print(statdat[:]) # now structured array with char array subtype is returned [(123. , [b'B', b'o', b'u', b'l', b'd', b'e', b'r', b'', b'', b'']) ( 3.14, [b'N', b'e', b'w', b' ', b'Y', b'o', b'r', b'k', b'', b''])] >>> nc.close() ``` Note that there is currently no support for mapping numpy structured arrays with unicode elements (dtype `U#`) onto netCDF compound types, nor is there support for netCDF compound types with vlen string components. ## In-memory (diskless) Datasets You can create netCDF Datasets whose content is held in memory instead of in a disk file. There are two ways to do this. If you don't need access to the memory buffer containing the Dataset from within python, the best way is to use the `diskless=True` keyword argument when creating the Dataset. If you want to save the Dataset to disk when you close it, also set `persist=True`. If you want to create a new read-only Dataset from an existing python memory buffer, use the `memory` keyword argument to pass the memory buffer when creating the Dataset. If you want to create a new in-memory Dataset, and then access the memory buffer directly from Python, use the `memory` keyword argument to specify the estimated size of the Dataset in bytes when creating the Dataset with `mode='w'`. Then, the `Dataset.close` method will return a python memoryview object representing the Dataset. Below are examples illustrating both approaches. ```python >>> # create a diskless (in-memory) Dataset, >>> # and persist the file to disk when it is closed. >>> nc = Dataset('diskless_example.nc','w',diskless=True,persist=True) >>> d = nc.createDimension('x',None) >>> v = nc.createVariable('v',np.int32,'x') >>> v[0:5] = np.arange(5) >>> print(nc) root group (NETCDF4 data model, file format HDF5): dimensions(sizes): x(5) variables(dimensions): int32 v(x) groups: >>> print(nc['v'][:]) [0 1 2 3 4] >>> nc.close() # file saved to disk >>> # create an in-memory dataset from an existing python >>> # python memory buffer. >>> # read the newly created netcdf file into a python >>> # bytes object. >>> with open('diskless_example.nc', 'rb') as f: ... nc_bytes = f.read() >>> # create a netCDF in-memory dataset from the bytes object. >>> nc = Dataset('inmemory.nc', memory=nc_bytes) >>> print(nc) root group (NETCDF4 data model, file format HDF5): dimensions(sizes): x(5) variables(dimensions): int32 v(x) groups: >>> print(nc['v'][:]) [0 1 2 3 4] >>> nc.close() >>> # create an in-memory Dataset and retrieve memory buffer >>> # estimated size is 1028 bytes - this is actually only >>> # used if format is NETCDF3 >>> # (ignored for NETCDF4/HDF5 files). >>> nc = Dataset('inmemory.nc', mode='w',memory=1028) >>> d = nc.createDimension('x',None) >>> v = nc.createVariable('v',np.int32,'x') >>> v[0:5] = np.arange(5) >>> nc_buf = nc.close() # close returns memoryview >>> print(type(nc_buf)) >>> # save nc_buf to disk, read it back in and check. >>> with open('inmemory.nc', 'wb') as f: ... f.write(nc_buf) >>> nc = Dataset('inmemory.nc') >>> print(nc) root group (NETCDF4 data model, file format HDF5): dimensions(sizes): x(5) variables(dimensions): int32 v(x) groups: >>> print(nc['v'][:]) [0 1 2 3 4] >>> nc.close() ``` ## Support for complex numbers Although there is no native support for complex numbers in netCDF, there are some common conventions for storing them. Two of the most common are to either use a compound datatype for the real and imaginary components, or a separate dimension. `netCDF4` supports reading several of these conventions, as well as writing using one of two conventions (depending on file format). This support for complex numbers is enabled by setting `auto_complex=True` when opening a `Dataset`: ```python >>> complex_array = np.array([0 + 0j, 1 + 0j, 0 + 1j, 1 + 1j, 0.25 + 0.75j]) >>> with netCDF4.Dataset("complex.nc", "w", auto_complex=True) as nc: ... nc.createDimension("x", size=len(complex_array)) ... var = nc.createVariable("data", "c16", ("x",)) ... var[:] = complex_array ... print(var) compound data(x) compound data type: complex128 unlimited dimensions: current shape = (5,) ``` When reading files using `auto_complex=True`, `netCDF4` will interpret variables stored using the following conventions as complex numbers: - compound datatypes with two `float` or `double` members who names begin with `r` and `i` (case insensitive) - a dimension of length 2 named `complex` or `ri` When writing files using `auto_complex=True`, `netCDF4` will use: - a compound datatype named `_PFNC_DOUBLE_COMPLEX_TYPE` (or `*FLOAT*` as appropriate) with members `r` and `i` for netCDF4 formats; - or a dimension of length 2 named `_pfnc_complex` for netCDF3 or classic formats. Support for complex numbers is handled via the [`nc-complex`](https://github.com/PlasmaFAIR/nc-complex) library. See there for further details. **contact**: Jeffrey Whitaker **copyright**: 2008 by Jeffrey Whitaker. **license**: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # Make changes to this file, not the c-wrappers that Cython generates. from cpython.mem cimport PyMem_Malloc, PyMem_Free from cpython.buffer cimport PyObject_GetBuffer, PyBuffer_Release, PyBUF_SIMPLE, PyBUF_ANY_CONTIGUOUS from cpython.bytes cimport PyBytes_FromStringAndSize # pure python utilities from .utils import (_StartCountStride, _quantize, _find_dim, _walk_grps, _out_array_shape, _sortbylist, _tostr, _safecast, _is_int) import sys import functools from typing import Union __version__ = "1.7.4" # Initialize numpy import posixpath from cftime import date2num, num2date, date2index import numpy cimport numpy import weakref import warnings import subprocess import pathlib import os from glob import glob from numpy import ma from libc.string cimport memcpy, memset from libc.stdlib cimport malloc, free numpy.import_array() include "membuf.pyx" include "netCDF4.pxi" __has_rename_grp__ = HAS_RENAME_GRP __has_nc_inq_path__ = HAS_NC_INQ_PATH __has_nc_inq_format_extended__ = HAS_NC_INQ_FORMAT_EXTENDED __has_cdf5_format__ = HAS_CDF5_FORMAT __has_nc_open_mem__ = HAS_NC_OPEN_MEM __has_nc_create_mem__ = HAS_NC_CREATE_MEM __has_parallel4_support__ = HAS_PARALLEL4_SUPPORT __has_pnetcdf_support__ = HAS_PNETCDF_SUPPORT __has_parallel_support__ = HAS_PARALLEL_SUPPORT __has_quantization_support__ = HAS_QUANTIZATION_SUPPORT __has_zstandard_support__ = HAS_ZSTANDARD_SUPPORT __has_bzip2_support__ = HAS_BZIP2_SUPPORT __has_blosc_support__ = HAS_BLOSC_SUPPORT __has_szip_support__ = HAS_SZIP_SUPPORT __has_set_alignment__ = HAS_SET_ALIGNMENT __has_ncfilter__ = HAS_NCFILTER __has_nc_rc_set__ = HAS_NCRCSET # set path to SSL certificates (issue #1246) # available starting in version 4.9.1 if __has_nc_rc_set__: import certifi if nc_rc_set("HTTP.SSL.CAINFO", _strencode(certifi.where())) != 0: raise RuntimeError('error setting path to SSL certificates') def rc_get(key): """ **```rc_get(key)```** Returns the internal netcdf-c rc table value corresponding to key. See for more information on rc files and values. """ cdef int ierr cdef char *keyc cdef char *valc if __has_nc_rc_set__: bytestr = _strencode(_tostr(key)) keyc = bytestr valc = nc_rc_get(keyc) if valc is NULL: return None else: return valc.decode('utf-8') else: raise RuntimeError( "This function requires netcdf-c 4.9.0+ to be used at compile time" ) def rc_set(key, value): """ **```rc_set(key, value)```** Sets the internal netcdf-c rc table value corresponding to key. See for more information on rc files and values. """ cdef int ierr cdef char *keyc cdef char *valuec if __has_nc_rc_set__: key_bytestr = _strencode(_tostr(key)) keyc = key_bytestr val_bytestr = _strencode(_tostr(value)) valuec = val_bytestr with nogil: ierr = nc_rc_set(keyc,valuec) _ensure_nc_success(ierr) else: raise RuntimeError( "This function requires netcdf-c 4.9.0+ to be used at compile time" ) # check for required version of netcdf-4 and hdf5. def _gethdf5libversion(): cdef unsigned int majorvers, minorvers, releasevers cdef herr_t ierr with nogil: ierr = H5get_libversion( &majorvers, &minorvers, &releasevers) if ierr != 0: raise RuntimeError('error getting HDF5 library version info') return '%d.%d.%d' % (majorvers,minorvers,releasevers) def getlibversion(): """ **```getlibversion()```** returns a string describing the version of the netcdf library used to build the module, and when it was built. """ return (nc_inq_libvers()).decode('ascii') def get_chunk_cache(): """ **```get_chunk_cache()```** return current netCDF chunk cache information in a tuple (size,nelems,preemption). See netcdf C library documentation for `nc_get_chunk_cache` for details. Values can be reset with `set_chunk_cache`.""" cdef int ierr cdef size_t sizep, nelemsp cdef float preemptionp with nogil: ierr = nc_get_chunk_cache(&sizep, &nelemsp, &preemptionp) _ensure_nc_success(ierr) size = sizep; nelems = nelemsp; preemption = preemptionp return (size,nelems,preemption) def set_chunk_cache(size=None,nelems=None,preemption=None): """ **```set_chunk_cache(size=None,nelems=None,preemption=None)```** change netCDF4 chunk cache settings. See netcdf C library documentation for `nc_set_chunk_cache` for details.""" cdef int ierr cdef size_t sizep, nelemsp cdef float preemptionp # reset chunk cache size, leave other parameters unchanged. size_orig, nelems_orig, preemption_orig = get_chunk_cache() if size is not None: sizep = size else: sizep = size_orig if nelems is not None: nelemsp = nelems else: nelemsp = nelems_orig if preemption is not None: preemptionp = preemption else: preemptionp = preemption_orig with nogil: ierr = nc_set_chunk_cache(sizep,nelemsp, preemptionp) _ensure_nc_success(ierr) def get_alignment(): """**```get_alignment()```** return current netCDF alignment within HDF5 files in a tuple (threshold,alignment). See netcdf C library documentation for `nc_get_alignment` for details. Values can be reset with `set_alignment`. This function was added in netcdf 4.9.0.""" if not __has_set_alignment__: raise RuntimeError( "This function requires netcdf-c 4.9.0+ to be used at compile time" ) cdef int ierr cdef int thresholdp, alignmentp ierr = nc_get_alignment(&thresholdp, &alignmentp) _ensure_nc_success(ierr) threshold = thresholdp alignment = alignmentp return (threshold, alignment) def set_alignment(threshold, alignment): """**```set_alignment(threshold,alignment)```** Change the HDF5 file alignment. See netcdf C library documentation for `nc_set_alignment` for details. This function was added in netcdf 4.9.0.""" if not __has_set_alignment__: raise RuntimeError( "This function requires netcdf-c 4.9.0+ to be used at compile time" ) cdef int ierr cdef int thresholdp, alignmentp thresholdp = threshold alignmentp = alignment ierr = nc_set_alignment(thresholdp, alignmentp) _ensure_nc_success(ierr) __netcdf4libversion__ = getlibversion().split()[0] __hdf5libversion__ = _gethdf5libversion() _needsworkaround_issue485 = __netcdf4libversion__ < "4.4.0" or \ (__netcdf4libversion__.startswith("4.4.0") and \ "-development" in __netcdf4libversion__) class NetCDF4MissingFeatureException(Exception): """Custom exception when trying to use features missing from the linked netCDF library""" def __init__(self, feature: str, version: str): super().__init__( f"{feature} requires netCDF lib >= {version} (using {__netcdf4libversion__}). " f"To enable, rebuild netcdf4-python using netCDF {version} or higher " f"(and possibly enable {feature})" ) # numpy data type <--> netCDF 4 data type mapping. _nptonctype = {'S1' : NC_CHAR, 'i1' : NC_BYTE, 'u1' : NC_UBYTE, 'i2' : NC_SHORT, 'u2' : NC_USHORT, 'i4' : NC_INT, 'u4' : NC_UINT, 'i8' : NC_INT64, 'u8' : NC_UINT64, 'f4' : NC_FLOAT, 'f8' : NC_DOUBLE} # just integer types. _intnptonctype = {'i1' : NC_BYTE, 'u1' : NC_UBYTE, 'i2' : NC_SHORT, 'u2' : NC_USHORT, 'i4' : NC_INT, 'u4' : NC_UINT, 'i8' : NC_INT64, 'u8' : NC_UINT64} _complex_types = { "c16": PFNC_DOUBLE_COMPLEX, "c8": PFNC_FLOAT_COMPLEX, } # create dictionary mapping string identifiers to netcdf format codes _format_dict = {'NETCDF3_CLASSIC' : NC_FORMAT_CLASSIC, 'NETCDF4_CLASSIC' : NC_FORMAT_NETCDF4_CLASSIC, 'NETCDF4' : NC_FORMAT_NETCDF4} # create dictionary mapping string identifiers to netcdf create format codes _cmode_dict = {'NETCDF3_CLASSIC' : NC_CLASSIC_MODEL, 'NETCDF4_CLASSIC' : NC_CLASSIC_MODEL | NC_NETCDF4, 'NETCDF4' : NC_NETCDF4} # dicts for blosc, szip compressors. _blosc_dict={'blosc_lz':0,'blosc_lz4':1,'blosc_lz4hc':2,'blosc_snappy':3,'blosc_zlib':4,'blosc_zstd':5} _blosc_dict_inv = {v: k for k, v in _blosc_dict.items()} _szip_dict = {'ec': 4, 'nn': 32} _szip_dict_inv = {v: k for k, v in _szip_dict.items()} if __has_cdf5_format__: # NETCDF3_64BIT deprecated, saved for compatibility. # use NETCDF3_64BIT_OFFSET instead. _format_dict['NETCDF3_64BIT_OFFSET'] = NC_FORMAT_64BIT_OFFSET _format_dict['NETCDF3_64BIT_DATA'] = NC_FORMAT_64BIT_DATA _cmode_dict['NETCDF3_64BIT_OFFSET'] = NC_64BIT_OFFSET _cmode_dict['NETCDF3_64BIT_DATA'] = NC_64BIT_DATA else: _format_dict['NETCDF3_64BIT'] = NC_FORMAT_64BIT _cmode_dict['NETCDF3_64BIT'] = NC_64BIT_OFFSET # invert dictionary mapping _reverse_format_dict = dict((v, k) for k, v in _format_dict.iteritems()) # add duplicate entry (NETCDF3_64BIT == NETCDF3_64BIT_OFFSET) if __has_cdf5_format__: _format_dict['NETCDF3_64BIT'] = NC_FORMAT_64BIT_OFFSET _cmode_dict['NETCDF3_64BIT'] = NC_64BIT_OFFSET else: _format_dict['NETCDF3_64BIT_OFFSET'] = NC_FORMAT_64BIT _cmode_dict['NETCDF3_64BIT_OFFSET'] = NC_64BIT_OFFSET _parallel_formats = [] if __has_parallel4_support__: _parallel_formats += ['NETCDF4', 'NETCDF4_CLASSIC'] if __has_pnetcdf_support__: _parallel_formats += [ 'NETCDF3_CLASSIC', 'NETCDF3_64BIT_OFFSET', 'NETCDF3_64BIT_DATA', 'NETCDF3_64BIT' ] # Default fill_value to numpy datatype mapping. Last two for complex # numbers only applies to complex dimensions default_fillvals = {#'S1':NC_FILL_CHAR, 'S1':'\0', 'i1':NC_FILL_BYTE, 'u1':NC_FILL_UBYTE, 'i2':NC_FILL_SHORT, 'u2':NC_FILL_USHORT, 'i4':NC_FILL_INT, 'u4':NC_FILL_UINT, 'i8':NC_FILL_INT64, 'u8':NC_FILL_UINT64, 'f4':NC_FILL_FLOAT, 'f8':NC_FILL_DOUBLE, 'c8':NC_FILL_FLOAT, 'c16':NC_FILL_DOUBLE, } # logical for native endian type. is_native_little = numpy.dtype('f4').byteorder == c'=' # hard code these here, instead of importing from netcdf.h # so it will compile with versions <= 4.2. NC_DISKLESS = 0x0008 # introduced in 4.6.2 if __netcdf4libversion__[0:5] >= "4.6.2": NC_PERSIST = 0x4000 else: # prior to 4.6.2 this flag doesn't work, so make the same as NC_DISKLESS NC_PERSIST = NC_DISKLESS # next two lines do nothing, preserved for backwards compatibility. default_encoding = 'utf-8' unicode_error = 'replace' _nctonptype = {} for _key,_value in _nptonctype.items(): _nctonptype[_value] = _key _supportedtypes = _nptonctype.keys() # make sure NC_CHAR points to S1 _nctonptype[NC_CHAR]='S1' # Mapping from numpy dtype endian format to what we expect _dtype_endian_lookup = { "=": "native", ">": "big", "<": "little", "|": None, None: None, } # internal C functions. cdef _get_att_names(int grpid, int varid): # Private function to get all the attribute names in a group cdef int ierr, numatts, n cdef char namstring[NC_MAX_NAME+1] if varid == NC_GLOBAL: with nogil: ierr = nc_inq_natts(grpid, &numatts) else: with nogil: ierr = nc_inq_varnatts(grpid, varid, &numatts) _ensure_nc_success(ierr, err_cls=AttributeError) attslist = [] for n in range(numatts): with nogil: ierr = nc_inq_attname(grpid, varid, n, namstring) _ensure_nc_success(ierr, err_cls=AttributeError) # attribute names are assumed to be utf-8 attslist.append(namstring.decode('utf-8')) return attslist cdef _get_att(grp, int varid, name, encoding='utf-8'): # Private function to get an attribute value given its name cdef int ierr, n, _grpid cdef size_t att_len cdef char *attname cdef nc_type att_type cdef ndarray value_arr # attribute names are assumed to be utf-8 bytestr = _strencode(name,encoding='utf-8') attname = bytestr _grpid = grp._grpid with nogil: ierr = nc_inq_att(_grpid, varid, attname, &att_type, &att_len) _ensure_nc_success(ierr, err_cls=AttributeError) # attribute is a character or string ... if att_type == NC_CHAR: value_arr = numpy.empty(att_len,'S1') with nogil: ierr = nc_get_att_text(_grpid, varid, attname, PyArray_BYTES(value_arr)) _ensure_nc_success(ierr, err_cls=AttributeError) if name == '_FillValue': # make sure _FillValue for character arrays is a byte on python 3 # (issue 271). pstring = value_arr.tobytes() else: pstring =\ value_arr.tobytes().decode(encoding,errors='replace').replace('\x00','') return pstring elif att_type == NC_STRING: values = PyMem_Malloc(sizeof(char*) * att_len) if not values: raise MemoryError() try: with nogil: ierr = nc_get_att_string(_grpid, varid, attname, values) _ensure_nc_success(ierr, err_cls=AttributeError) try: result = [values[j].decode(encoding,errors='replace').replace('\x00','') if values[j] else "" for j in range(att_len)] finally: with nogil: ierr = nc_free_string(att_len, values) # free memory in netcdf C lib finally: PyMem_Free(values) if len(result) == 1: return result[0] else: return result else: # a regular numeric or compound type. if att_type == NC_LONG: att_type = NC_INT try: type_att = _nctonptype[att_type] # see if it is a primitive type value_arr = numpy.empty(att_len,type_att) except KeyError: # check if it's a compound try: type_att = _read_compound(grp, att_type) value_arr = numpy.empty(att_len,type_att.dtype_view) except: # check if it's an enum try: type_att = _read_enum(grp, att_type) value_arr = numpy.empty(att_len,type_att.dtype) except: raise KeyError('attribute %s has unsupported datatype' % attname) with nogil: ierr = nc_get_att(_grpid, varid, attname, PyArray_BYTES(value_arr)) _ensure_nc_success(ierr, err_cls=AttributeError) if value_arr.shape == (): # return a scalar for a scalar array return value_arr.item() elif att_len == 1: # return a scalar for a single element array return value_arr[0] else: return value_arr def _set_default_format(object format='NETCDF4'): # Private function to set the netCDF file format cdef int ierr, formatid if format not in _format_dict: raise ValueError("unrecognized format requested") formatid = _format_dict[format] with nogil: ierr = nc_set_default_format(formatid, NULL) _ensure_nc_success(ierr) cdef _get_format(int grpid): # Private function to get the netCDF file format cdef int ierr, formatp with nogil: ierr = nc_inq_format(grpid, &formatp) _ensure_nc_success(ierr) if formatp not in _reverse_format_dict: raise ValueError('format not supported by python interface') return _reverse_format_dict[formatp] cdef _get_full_format(int grpid): """Private function to get the underlying disk format""" if not __has_nc_inq_format_extended__: return "UNDEFINED" cdef int ierr, formatp, modep with nogil: ierr = nc_inq_format_extended(grpid, &formatp, &modep) _ensure_nc_success(ierr) if formatp == NC_FORMAT_NC3: return 'NETCDF3' if formatp == NC_FORMAT_NC_HDF5: return 'HDF5' if formatp == NC_FORMAT_NC_HDF4: return 'HDF4' if formatp == NC_FORMAT_PNETCDF: return 'PNETCDF' if formatp == NC_FORMAT_DAP2: return 'DAP2' if formatp == NC_FORMAT_DAP4: return 'DAP4' if formatp == NC_FORMAT_UNDEFINED: return 'UNDEFINED' cdef issue485_workaround(int grpid, int varid, char* attname): # check to see if attribute already exists # and is NC_CHAR, if so delete it and re-create it # (workaround for issue #485). Fixed in C library # with commit 473259b7728120bb281c52359b1af50cca2fcb72, # which was included in 4.4.0-RC5. cdef nc_type att_type cdef size_t att_len if not _needsworkaround_issue485: return with nogil: ierr = nc_inq_att(grpid, varid, attname, &att_type, &att_len) if ierr == NC_NOERR and att_type == NC_CHAR: with nogil: ierr = nc_del_att(grpid, varid, attname) _ensure_nc_success(ierr) cdef _set_att(grp, int varid, name, value,\ nc_type xtype=-99, force_ncstring=False): # Private function to set an attribute name/value pair cdef int ierr, lenarr, N, grpid cdef char *attname cdef char *datstring cdef char **string_ptrs cdef ndarray value_arr bytestr = _strencode(name) attname = bytestr grpid = grp._grpid # put attribute value into a numpy array. value_arr = numpy.array(value) if value_arr.ndim > 1: # issue #841 if __version__ > "1.4.2": raise ValueError('multi-dimensional array attributes not supported') else: msg = """ Multi-dimensional array attributes are now deprecated. Instead of silently flattening the array, an error will be raised in the next release.""" warnings.warn(msg,FutureWarning) # if array is 64 bit integers or # if 64-bit datatype not supported, cast to 32 bit integers. fmt = _get_format(grpid) is_netcdf3 = fmt.startswith('NETCDF3') or fmt == 'NETCDF4_CLASSIC' if value_arr.dtype.str[1:] == 'i8' and ('i8' not in _supportedtypes or\ (is_netcdf3 and fmt != 'NETCDF3_64BIT_DATA')): value_arr = value_arr.astype('i4') # if array contains ascii strings, write a text attribute (stored as bytes). # if array contains unicode strings, and data model is NETCDF4, # write as a string. if value_arr.dtype.char in ['S','U']: # force array of strings if array has multiple elements (issue #770) N = value_arr.size if N > 1: force_ncstring=True if not is_netcdf3 and force_ncstring and N > 1: string_ptrs = PyMem_Malloc(N * sizeof(char*)) if not string_ptrs: raise MemoryError() try: strings = [_strencode(s) for s in value_arr.flat] for j in range(N): if len(strings[j]) == 0: strings[j] = _strencode('\x00') string_ptrs[j] = strings[j] issue485_workaround(grpid, varid, attname) with nogil: ierr = nc_put_att_string(grpid, varid, attname, N, string_ptrs) finally: PyMem_Free(string_ptrs) else: # don't allow string array attributes in NETCDF3 files. if is_netcdf3 and N > 1: msg='array string attributes can only be written with NETCDF4' raise OSError(msg) if not value_arr.shape: dats = _strencode(value_arr.item()) else: value_arr1 = value_arr.ravel() dats = _strencode(''.join(value_arr1.tolist())) lenarr = len(dats) datstring = dats if lenarr == 0: # write null byte lenarr=1; datstring = '\x00' if (force_ncstring or value_arr.dtype.char == 'U') and not is_netcdf3: # try to convert to ascii string, write as NC_CHAR # else it's a unicode string, write as NC_STRING (if NETCDF4) try: if force_ncstring: raise UnicodeError dats_ascii = _to_ascii(dats) # try to encode bytes as ascii string with nogil: ierr = nc_put_att_text(grpid, varid, attname, lenarr, datstring) except UnicodeError: issue485_workaround(grpid, varid, attname) with nogil: ierr = nc_put_att_string(grpid, varid, attname, 1, &datstring) else: with nogil: ierr = nc_put_att_text(grpid, varid, attname, lenarr, datstring) _ensure_nc_success(ierr, err_cls=AttributeError) # a 'regular' array type ('f4','i4','f8' etc) else: if value_arr.dtype.kind == 'V': # compound attribute. xtype = _find_cmptype(grp,value_arr.dtype) elif value_arr.dtype.str[1:] not in _supportedtypes: raise TypeError, 'illegal data type for attribute %r, must be one of %s, got %s' % (attname, _supportedtypes, value_arr.dtype.str[1:]) elif xtype == -99: # if xtype is not passed in as kwarg. xtype = _nptonctype[value_arr.dtype.str[1:]] lenarr = PyArray_SIZE(value_arr) with nogil: ierr = nc_put_att(grpid, varid, attname, xtype, lenarr, PyArray_DATA(value_arr)) _ensure_nc_success(ierr, err_cls=AttributeError) cdef _get_types(group): # Private function to create `CompoundType`, # `VLType` or `EnumType` instances for all the # compound, VLEN or Enum types in a `Group` or `Dataset`. cdef int ierr, ntypes, classp, n, _grpid cdef nc_type xtype cdef nc_type *typeids cdef char namstring[NC_MAX_NAME+1] _grpid = group._grpid # get the number of user defined types in this group. with nogil: ierr = nc_inq_typeids(_grpid, &ntypes, NULL) _ensure_nc_success(ierr) if ntypes > 0: typeids = malloc(sizeof(nc_type) * ntypes) with nogil: ierr = nc_inq_typeids(_grpid, &ntypes, typeids) _ensure_nc_success(ierr) # create empty dictionary for CompoundType instances. cmptypes = dict() vltypes = dict() enumtypes = dict() if ntypes > 0: for n in range(ntypes): xtype = typeids[n] with nogil: ierr = nc_inq_user_type(_grpid, xtype, namstring, NULL,NULL,NULL,&classp) _ensure_nc_success(ierr) if classp == NC_COMPOUND: # a compound name = namstring.decode('utf-8') # read the compound type info from the file, # create a CompoundType instance from it. try: cmptype = _read_compound(group, xtype) except KeyError: msg='WARNING: unsupported Compound type, skipping...' warnings.warn(msg) continue cmptypes[name] = cmptype elif classp == NC_VLEN: # a vlen name = namstring.decode('utf-8') # read the VLEN type info from the file, # create a VLType instance from it. try: vltype = _read_vlen(group, xtype) except KeyError: msg='WARNING: unsupported VLEN type, skipping...' warnings.warn(msg) continue vltypes[name] = vltype elif classp == NC_ENUM: # an enum type name = namstring.decode('utf-8') # read the Enum type info from the file, # create a EnumType instance from it. try: enumtype = _read_enum(group, xtype) except KeyError: msg='WARNING: unsupported Enum type, skipping...' warnings.warn(msg) continue enumtypes[name] = enumtype free(typeids) return cmptypes, vltypes, enumtypes cdef _get_dims(group): # Private function to create `Dimension` instances for all the # dimensions in a `Group` or Dataset cdef int ierr, numdims, n, _grpid cdef int *dimids cdef char namstring[NC_MAX_NAME+1] # get number of dimensions in this Group. _grpid = group._grpid with nogil: ierr = nc_inq_ndims(_grpid, &numdims) _ensure_nc_success(ierr) # create empty dictionary for dimensions. dimensions = dict() if numdims > 0: dimids = malloc(sizeof(int) * numdims) if group.data_model == 'NETCDF4': with nogil: ierr = nc_inq_dimids(_grpid, &numdims, dimids, 0) _ensure_nc_success(ierr) else: for n in range(numdims): dimids[n] = n for n in range(numdims): with nogil: ierr = nc_inq_dimname(_grpid, dimids[n], namstring) _ensure_nc_success(ierr) name = namstring.decode('utf-8') dimensions[name] = Dimension(group, name, id=dimids[n]) free(dimids) return dimensions cdef _get_grps(group): # Private function to create `Group` instances for all the # groups in a `Group` or Dataset cdef int ierr, numgrps, n, _grpid cdef int *grpids cdef char namstring[NC_MAX_NAME+1] # get number of groups in this Group. _grpid = group._grpid with nogil: ierr = nc_inq_grps(_grpid, &numgrps, NULL) _ensure_nc_success(ierr) # create dictionary containing `Group` instances for groups in this group groups = dict() if numgrps > 0: grpids = malloc(sizeof(int) * numgrps) with nogil: ierr = nc_inq_grps(_grpid, NULL, grpids) _ensure_nc_success(ierr) for n in range(numgrps): with nogil: ierr = nc_inq_grpname(grpids[n], namstring) _ensure_nc_success(ierr) name = namstring.decode('utf-8') groups[name] = Group(group, name, id=grpids[n]) free(grpids) return groups cdef _get_vars(group, bint auto_complex=False): # Private function to create `Variable` instances for all the # variables in a `Group` or Dataset cdef int ierr, numvars, n, nn, numdims, varid, classp, iendian, _grpid cdef int *varids cdef nc_type xtype cdef char namstring[NC_MAX_NAME+1] cdef char namstring_cmp[NC_MAX_NAME+1] cdef bint is_complex cdef nc_type complex_nc_type # get number of variables in this Group. _grpid = group._grpid with nogil: ierr = nc_inq_nvars(_grpid, &numvars) _ensure_nc_success(ierr, err_cls=AttributeError) # create empty dictionary for variables. variables = dict() if numvars > 0: # get variable ids. varids = malloc(sizeof(int) * numvars) if group.data_model == 'NETCDF4': with nogil: ierr = nc_inq_varids(_grpid, &numvars, varids) _ensure_nc_success(ierr) else: for n in range(numvars): varids[n] = n # loop over variables. for n in range(numvars): varid = varids[n] # get variable name. with nogil: ierr = nc_inq_varname(_grpid, varid, namstring) _ensure_nc_success(ierr) name = namstring.decode('utf-8') # get variable type. with nogil: ierr = nc_inq_vartype(_grpid, varid, &xtype) _ensure_nc_success(ierr) # get endian-ness of variable. endianness = None with nogil: ierr = nc_inq_var_endian(_grpid, varid, &iendian) if ierr == NC_NOERR: if iendian == NC_ENDIAN_LITTLE: endianness = '<' elif iendian == NC_ENDIAN_BIG: endianness = '>' # check to see if it is a supported user-defined type. try: datatype = _nctonptype[xtype] if endianness is not None: datatype = endianness + datatype except KeyError: if xtype == NC_STRING: datatype = str else: with nogil: ierr = nc_inq_user_type(_grpid, xtype, namstring_cmp, NULL, NULL, NULL, &classp) _ensure_nc_success(ierr) if classp == NC_COMPOUND: # a compound type # create CompoundType instance describing this compound type. try: datatype = _read_compound(group, xtype, endian=endianness) except KeyError: msg="WARNING: variable '%s' has unsupported compound datatype, skipping .." % name warnings.warn(msg) continue elif classp == NC_VLEN: # a compound type # create VLType instance describing this compound type. try: datatype = _read_vlen(group, xtype, endian=endianness) except KeyError: msg="WARNING: variable '%s' has unsupported VLEN datatype, skipping .." % name warnings.warn(msg) continue elif classp == NC_ENUM: # create EnumType instance describing this compound type. try: datatype = _read_enum(group, xtype, endian=endianness) except KeyError: msg="WARNING: variable '%s' has unsupported Enum datatype, skipping .." % name warnings.warn(msg) continue else: msg="WARNING: variable '%s' has unsupported datatype, skipping .." % name warnings.warn(msg) continue # get number of dimensions. dimids = _inq_vardimid(_grpid, varid, auto_complex) # loop over dimensions, retrieve names. # if not found in current group, look in parents. # QUESTION: what if grp1 has a dimension named 'foo' # and so does it's parent - can a variable in grp1 # use the 'foo' dimension from the parent? dimensions = [] for dimid in dimids: grp = group found = False while not found: for key, value in grp.dimensions.items(): if value._dimid == dimid: dimensions.append(key) found = True break grp = grp.parent # create new variable instance. dimensions = tuple(_find_dim(group, d) for d in dimensions) if auto_complex and pfnc_var_is_complex(_grpid, varid): with nogil: ierr = pfnc_inq_var_complex_base_type(_grpid, varid, &complex_nc_type) _ensure_nc_success(ierr) # TODO: proper lookup datatype = "c16" if complex_nc_type == NC_DOUBLE else "c8" endian = _dtype_endian_lookup[endianness] or "native" variables[name] = Variable(group, name, datatype, dimensions, id=varid, endian=endian) free(varids) # free pointer holding variable ids. return variables def _ensure_nc_success(ierr, err_cls=RuntimeError, filename=None, extra_msg=None): # print netcdf error message, raise error. if ierr == NC_NOERR: return err_str = (nc_strerror(ierr)).decode('ascii') if issubclass(err_cls, OSError): if isinstance(filename, bytes): filename = filename.decode() raise err_cls(ierr, err_str, filename) if extra_msg: if isinstance(extra_msg, bytes): extra_msg = extra_msg.decode() err_str = f"{err_str}: {extra_msg}" raise err_cls(err_str) def dtype_is_complex(dtype): """Return True if dtype is a complex number""" return dtype in ("c8", "c16") cdef int _inq_varndims(int ncid, int varid, bint auto_complex): """Wrapper around `nc_inq_varndims`/`pfnc_inq_varndims` for complex numbers""" cdef int ierr = NC_NOERR cdef int ndims if auto_complex: with nogil: ierr = pfnc_inq_varndims(ncid, varid, &ndims) else: with nogil: ierr = nc_inq_varndims(ncid, varid, &ndims) _ensure_nc_success(ierr) return ndims cdef _inq_vardimid(int ncid, int varid, bint auto_complex): """Wrapper around `nc_inq_vardimid`/`pfnc_inq_vardimid` for complex numbers""" cdef int ierr = NC_NOERR cdef int ndims = _inq_varndims(ncid, varid, auto_complex) cdef int* dimids = malloc(sizeof(int) * ndims) if auto_complex: with nogil: ierr = pfnc_inq_vardimid(ncid, varid, dimids) else: with nogil: ierr = nc_inq_vardimid(ncid, varid, dimids) _ensure_nc_success(ierr) result = [dimids[n] for n in range(ndims)] free(dimids) return result # these are class attributes that # only exist at the python level (not in the netCDF file). _private_atts = \ ('_grpid','_grp','_varid','groups','dimensions','variables','dtype','data_model','disk_format', '_nunlimdim','path','parent','ndim','mask','scale','cmptypes','vltypes','enumtypes','_isprimitive', 'file_format','_isvlen','_isenum','_iscompound','_cmptype','_vltype','_enumtype','name', '__orthogoral_indexing__','keepweakref','_has_lsd','always_mask', '_buffer','chartostring','_use_get_vars','_ncstring_attrs__', 'auto_complex' ) cdef class Dataset: """ A netCDF `Dataset` is a collection of dimensions, groups, variables and attributes. Together they describe the meaning of data and relations among data fields stored in a netCDF file. See `Dataset.__init__` for more details. A list of attribute names corresponding to global netCDF attributes defined for the `Dataset` can be obtained with the `Dataset.ncattrs` method. These attributes can be created by assigning to an attribute of the `Dataset` instance. A dictionary containing all the netCDF attribute name/value pairs is provided by the `__dict__` attribute of a `Dataset` instance. The following class variables are read-only and should not be modified by the user. **`dimensions`**: The `dimensions` dictionary maps the names of dimensions defined for the `Group` or `Dataset` to instances of the `Dimension` class. **`variables`**: The `variables` dictionary maps the names of variables defined for this `Dataset` or `Group` to instances of the `Variable` class. **`groups`**: The groups dictionary maps the names of groups created for this `Dataset` or `Group` to instances of the `Group` class (the `Dataset` class is simply a special case of the `Group` class which describes the root group in the netCDF4 file). **`cmptypes`**: The `cmptypes` dictionary maps the names of compound types defined for the `Group` or `Dataset` to instances of the `CompoundType` class. **`vltypes`**: The `vltypes` dictionary maps the names of variable-length types defined for the `Group` or `Dataset` to instances of the `VLType` class. **`enumtypes`**: The `enumtypes` dictionary maps the names of Enum types defined for the `Group` or `Dataset` to instances of the `EnumType` class. **`data_model`**: `data_model` describes the netCDF data model version, one of `NETCDF3_CLASSIC`, `NETCDF4`, `NETCDF4_CLASSIC`, `NETCDF3_64BIT_OFFSET` or `NETCDF3_64BIT_DATA`. **`file_format`**: same as `data_model`, retained for backwards compatibility. **`disk_format`**: `disk_format` describes the underlying file format, one of `NETCDF3`, `HDF5`, `HDF4`, `PNETCDF`, `DAP2`, `DAP4` or `UNDEFINED`. Only available if using netcdf C library version >= 4.3.1, otherwise will always return `UNDEFINED`. **`parent`**: `parent` is a reference to the parent `Group` instance. `None` for the root group or `Dataset` instance. **`path`**: `path` shows the location of the `Group` in the `Dataset` in a unix directory format (the names of groups in the hierarchy separated by backslashes). A `Dataset` instance is the root group, so the path is simply `'/'`. **`keepweakref`**: If `True`, child Dimension and Variables objects only keep weak references to the parent Dataset or Group. **`_ncstring_attrs__`**: If `True`, all text attributes will be written as variable-length strings. """ cdef object __weakref__, _inmemory cdef public int _grpid cdef public int _isopen cdef Py_buffer _buffer cdef public groups, dimensions, variables, disk_format, path, parent,\ file_format, data_model, cmptypes, vltypes, enumtypes, __orthogonal_indexing__, \ keepweakref, _ncstring_attrs__, auto_complex def __init__(self, filename, mode='r', clobber=True, format='NETCDF4', diskless=False, persist=False, keepweakref=False, memory=None, encoding=None, parallel=False, comm=None, info=None, auto_complex=False, **kwargs): """ **`__init__(self, filename, mode="r", clobber=True, diskless=False, persist=False, keepweakref=False, memory=None, encoding=None, parallel=False, comm=None, info=None, format='NETCDF4')`** `Dataset` constructor. **`filename`**: Name of netCDF file to hold dataset. Can also be a python 3 pathlib instance or the URL of an OpenDAP dataset. When memory is set this is just used to set the `filepath()`. **`mode`**: access mode. `r` means read-only; no data can be modified. `w` means write; a new file is created, an existing file with the same name is deleted. `x` means write, but fail if an existing file with the same name already exists. `a` and `r+` mean append; an existing file is opened for reading and writing, if file does not exist already, one is created. Appending `s` to modes `r`, `w`, `r+` or `a` will enable unbuffered shared access to `NETCDF3_CLASSIC`, `NETCDF3_64BIT_OFFSET` or `NETCDF3_64BIT_DATA` formatted files. Unbuffered access may be useful even if you don't need shared access, since it may be faster for programs that don't access data sequentially. This option is ignored for `NETCDF4` and `NETCDF4_CLASSIC` formatted files. **`clobber`**: if `True` (default), opening a file with `mode='w'` will clobber an existing file with the same name. if `False`, an exception will be raised if a file with the same name already exists. mode=`x` is identical to mode=`w` with clobber=False. **`format`**: underlying file format (one of `'NETCDF4'`, `'NETCDF4_CLASSIC'`, `'NETCDF3_CLASSIC'`, `'NETCDF3_64BIT_OFFSET'` or `'NETCDF3_64BIT_DATA'`. Only relevant if `mode = 'w'` (if `mode = 'r','a'` or `'r+'` the file format is automatically detected). Default `'NETCDF4'`, which means the data is stored in an HDF5 file, using netCDF 4 API features. Setting `format='NETCDF4_CLASSIC'` will create an HDF5 file, using only netCDF 3 compatible API features. netCDF 3 clients must be recompiled and linked against the netCDF 4 library to read files in `NETCDF4_CLASSIC` format. `'NETCDF3_CLASSIC'` is the classic netCDF 3 file format that does not handle 2+ Gb files. `'NETCDF3_64BIT_OFFSET'` is the 64-bit offset version of the netCDF 3 file format, which fully supports 2+ GB files, but is only compatible with clients linked against netCDF version 3.6.0 or later. `'NETCDF3_64BIT_DATA'` is the 64-bit data version of the netCDF 3 file format, which supports 64-bit dimension sizes plus unsigned and 64 bit integer data types, but is only compatible with clients linked against netCDF version 4.4.0 or later. **`diskless`**: If `True`, create diskless (in-core) file. This is a feature added to the C library after the netcdf-4.2 release. If you need to access the memory buffer directly, use the in-memory feature instead (see `memory` kwarg). **`persist`**: if `diskless=True`, persist file to disk when closed (default `False`). **`keepweakref`**: if `True`, child Dimension and Variable instances will keep weak references to the parent Dataset or Group object. Default is `False`, which means strong references will be kept. Having Dimension and Variable instances keep a strong reference to the parent Dataset instance, which in turn keeps a reference to child Dimension and Variable instances, creates circular references. Circular references complicate garbage collection, which may mean increased memory usage for programs that create may Dataset instances with lots of Variables. It also will result in the Dataset object never being deleted, which means it may keep open files alive as well. Setting `keepweakref=True` allows Dataset instances to be garbage collected as soon as they go out of scope, potentially reducing memory usage and open file handles. However, in many cases this is not desirable, since the associated Variable instances may still be needed, but are rendered unusable when the parent Dataset instance is garbage collected. **`memory`**: if not `None`, create or open an in-memory Dataset. If mode = `r`, the memory kwarg must contain a memory buffer object (an object that supports the python buffer interface). The Dataset will then be created with contents taken from this block of memory. If mode = `w`, the memory kwarg should contain the anticipated size of the Dataset in bytes (used only for NETCDF3 files). A memory buffer containing a copy of the Dataset is returned by the `Dataset.close` method. Requires netcdf-c version 4.4.1 for mode=`r` netcdf-c 4.6.2 for mode=`w`. To persist the file to disk, the raw bytes from the returned buffer can be written into a binary file. The Dataset can also be re-opened using this memory buffer. **`encoding`**: encoding used to encode filename string into bytes. Default is None (`sys.getdefaultfileencoding()` is used). **`parallel`**: open for parallel access using MPI (requires mpi4py and parallel-enabled netcdf-c and hdf5 libraries). Default is `False`. If `True`, `comm` and `info` kwargs may also be specified. **`comm`**: MPI_Comm object for parallel access. Default `None`, which means MPI_COMM_WORLD will be used. Ignored if `parallel=False`. **`info`**: MPI_Info object for parallel access. Default `None`, which means MPI_INFO_NULL will be used. Ignored if `parallel=False`. **`auto_complex`**: if `True`, then automatically convert complex number types """ cdef int grpid, ierr, numgrps, numdims, numvars, cdef size_t initialsize cdef char *path cdef char namstring[NC_MAX_NAME+1] cdef int cmode, parmode cdef MPI_Comm mpicomm cdef MPI_Info mpiinfo memset(&self._buffer, 0, sizeof(self._buffer)) # flag to indicate that Variables in this Dataset support orthogonal indexing. self.__orthogonal_indexing__ = True if diskless and __netcdf4libversion__ < '4.2.1': #diskless = False # don't raise error, instead silently ignore raise ValueError('diskless mode requires netcdf lib >= 4.2.1, you have %s' % __netcdf4libversion__) # convert filename into string (from os.path object for example), # encode into bytes. if encoding is None: encoding = sys.getfilesystemencoding() bytestr = _strencode(_tostr(filename), encoding=encoding) path = bytestr if memory is not None and mode not in ('r', 'w'): raise ValueError("if memory kwarg specified, mode must be 'r' or 'w'") if parallel: if not __has_parallel_support__: raise ValueError("parallel mode requires MPI enabled netcdf-c") if format not in _parallel_formats: raise ValueError( f"parallel mode only works with the following formats: {' '.join(_parallel_formats)}" ) mpicomm = (comm).ob_mpi if comm is not None else MPI_COMM_WORLD mpiinfo = (info).ob_mpi if info is not None else MPI_INFO_NULL parmode = NC_MPIIO | _cmode_dict[format] self._inmemory = False self.auto_complex = auto_complex # mode='x' is the same as mode='w' with clobber=False if mode == "x": mode = "w" clobber = False # r+ is synonym for append if "r+" in mode: mode = mode.replace("r+", "a") # If appending and the file doesn't exist, we need to create it if mode in ("a", "as") and not os.path.exists(filename): mode = mode.replace("a", "w") read_mode = mode in ("r", "rs") write_mode = mode in ("w", "ws") append_mode = mode in ("a", "as") if not (read_mode or write_mode or append_mode): raise ValueError(f"mode must be 'w', 'x', 'r', 'a' or 'r+', got '{mode}'") # Initial value for cmode if write_mode: cmode = NC_CLOBBER if clobber else NC_NOCLOBBER else: cmode = NC_WRITE if append_mode else NC_NOWRITE if mode.endswith("s") and not parallel: cmode |= NC_SHARE if diskless: cmode |= NC_DISKLESS if write_mode and persist: cmode |= NC_WRITE | NC_PERSIST if write_mode: _set_default_format(format=format) if memory is not None: if not __has_nc_create_mem__: raise NetCDF4MissingFeatureException("nc_create_mem", "4.6.2") # if memory is not None and mode='w', memory # kwarg is interpreted as advisory size. initialsize = memory with nogil: ierr = nc_create_mem(path, 0, initialsize, &grpid) self._inmemory = True # checked in close method else: if parallel: with nogil: ierr = nc_create_par(path, cmode | parmode, mpicomm, mpiinfo, &grpid) else: with nogil: ierr = nc_create(path, cmode, &grpid) elif read_mode and memory is not None: if not __has_nc_open_mem__: raise NetCDF4MissingFeatureException("nc_open_mem", "4.4.1") # Store reference to memory result = PyObject_GetBuffer( memory, &self._buffer, PyBUF_SIMPLE | PyBUF_ANY_CONTIGUOUS ) if result != 0: raise ValueError(f"Unable to retrieve Buffer from {memory}") with nogil: ierr = nc_open_mem( path, 0, self._buffer.len, self._buffer.buf, &grpid ) else: # Read or append mode, flags already all set in cmode if parallel: with nogil: ierr = nc_open_par(path, cmode | NC_MPIIO, mpicomm, mpiinfo, &grpid) else: with nogil: ierr = nc_open(path, cmode, &grpid) _ensure_nc_success(ierr, err_cls=OSError, filename=path) # data model and file format attributes self.data_model = _get_format(grpid) # data_model attribute used to be file_format (versions < 1.0.8), retain # file_format for backwards compatibility. self.file_format = self.data_model self.disk_format = _get_full_format(grpid) self._grpid = grpid self._isopen = 1 self.path = '/' self.parent = None self.keepweakref = keepweakref self._ncstring_attrs__ = False # get compound, vlen and enum types in the root Group. self.cmptypes, self.vltypes, self.enumtypes = _get_types(self) # get dimensions in the root group. self.dimensions = _get_dims(self) # get variables in the root Group. self.variables = _get_vars(self, self.auto_complex) # get groups in the root Group. if self.data_model == 'NETCDF4': self.groups = _get_grps(self) else: self.groups = dict() # these allow Dataset objects to be used via a "with" statement. def __enter__(self): return self def __exit__(self,atype,value,traceback): self.close() def __getitem__(self, elem): # return variable or group defined in relative path. # split out group names in unix path. elem = posixpath.normpath(elem) # last name in path, could be a variable or group dirname, lastname = posixpath.split(elem) nestedgroups = dirname.split('/') group = self # iterate over groups in path. for g in nestedgroups: if g: group = group.groups[g] # return last one, either a group or a variable. if lastname in group.groups: return group.groups[lastname] elif lastname in group.variables: return group.variables[lastname] else: raise IndexError('%s not found in %s' % (lastname,group.path)) def __iter__(self): raise TypeError( "Dataset is not iterable. Consider iterating on Dataset.variables." ) def __contains__(self, key): raise TypeError( "Dataset does not support membership operations. Perhaps try 'varname in" " dataset.variables' or 'dimname in dataset.dimensions'." ) def filepath(self,encoding=None): """**`filepath(self,encoding=None)`** Get the file system path (or the opendap URL) which was used to open/create the Dataset. Requires netcdf >= 4.1.2. The path is decoded into a string using `sys.getfilesystemencoding()` by default, this can be changed using the `encoding` kwarg. """ if not __has_nc_inq_path__: raise NetCDF4MissingFeatureException("filepath method", "4.1.2") cdef int ierr cdef size_t pathlen cdef char *c_path if encoding is None: encoding = sys.getfilesystemencoding() with nogil: ierr = nc_inq_path(self._grpid, &pathlen, NULL) _ensure_nc_success(ierr) c_path = malloc(sizeof(char) * (pathlen + 1)) if not c_path: raise MemoryError() try: with nogil: ierr = nc_inq_path(self._grpid, &pathlen, c_path) _ensure_nc_success(ierr) py_path = c_path[:pathlen] # makes a copy of pathlen bytes from c_string finally: free(c_path) return py_path.decode(encoding) def __repr__(self): return self.__str__() def __str__(self): ncdump = [repr(type(self)).replace("._netCDF4", "")] dimnames = tuple(_tostr(dimname)+'(%s)'%len(self.dimensions[dimname])\ for dimname in self.dimensions.keys()) varnames = tuple(\ [_tostr(self.variables[varname].dtype)+' '+_tostr(varname)+ ((_tostr(self.variables[varname].dimensions)).replace(",)",")")).replace("'","") for varname in self.variables.keys()]) grpnames = tuple(_tostr(grpname) for grpname in self.groups.keys()) if self.path == '/': ncdump.append('root group (%s data model, file format %s):' % (self.data_model, self.disk_format)) else: ncdump.append('group %s:' % self.path) for name in self.ncattrs(): ncdump.append(' %s: %s' % (name, self.getncattr(name))) ncdump.append(' dimensions(sizes): %s' % ', '.join(dimnames)) ncdump.append(' variables(dimensions): %s' % ', '.join(varnames)) ncdump.append(' groups: %s' % ', '.join(grpnames)) return '\n'.join(ncdump) def _close(self, check_err): cdef int ierr with nogil: ierr = nc_close(self._grpid) if check_err: _ensure_nc_success(ierr) self._isopen = 0 # indicates file already closed, checked by __dealloc__ # Only release buffer if close succeeded # per impl of PyBuffer_Release: https://github.com/python/cpython/blob/master/Objects/abstract.c#L667 # view.obj is checked, ref on obj is decremented and obj will be null'd out PyBuffer_Release(&self._buffer) def _close_mem(self, check_err): cdef int ierr cdef NC_memio memio with nogil: ierr = nc_close_memio(self._grpid, &memio) if check_err: _ensure_nc_success(ierr) self._isopen = 0 PyBuffer_Release(&self._buffer) # membuf_fromptr from membuf.pyx - creates a python memoryview # from a raw pointer without making a copy. return memview_fromptr(memio.memory, memio.size) def close(self): """**`close(self)`** Close the Dataset. """ if __has_nc_create_mem__ and self._inmemory: return self._close_mem(True) self._close(True) def isopen(self): """ **`isopen(self)`** Is the Dataset open or closed? """ return bool(self._isopen) def __dealloc__(self): # close file when there are no references to object left and clear the cache. if self.get_variables_by_attributes: self.get_variables_by_attributes.cache_clear() if self._isopen: self._close(False) def __reduce__(self): # raise error is user tries to pickle a Dataset object. raise NotImplementedError('Dataset is not picklable') def sync(self): """ **`sync(self)`** Writes all buffered data in the `Dataset` to the disk file.""" cdef int ierr with nogil: ierr = nc_sync(self._grpid) _ensure_nc_success(ierr) def _redef(self): cdef int ierr with nogil: ierr = nc_redef(self._grpid) def _enddef(self): cdef int ierr with nogil: ierr = nc_enddef(self._grpid) def set_fill_on(self): """ **`set_fill_on(self)`** Sets the fill mode for a `Dataset` open for writing to `on`. This causes data to be pre-filled with fill values. The fill values can be controlled by the variable's `_Fill_Value` attribute, but is usually sufficient to the use the netCDF default `_Fill_Value` (defined separately for each variable type). The default behavior of the netCDF library corresponds to `set_fill_on`. Data which are equal to the `_Fill_Value` indicate that the variable was created, but never written to.""" cdef int oldmode, ierr with nogil: ierr = nc_set_fill(self._grpid, NC_FILL, &oldmode) _ensure_nc_success(ierr) def set_fill_off(self): """ **`set_fill_off(self)`** Sets the fill mode for a `Dataset` open for writing to `off`. This will prevent the data from being pre-filled with fill values, which may result in some performance improvements. However, you must then make sure the data is actually written before being read.""" cdef int oldmode, ierr with nogil: ierr = nc_set_fill(self._grpid, NC_NOFILL, &oldmode) _ensure_nc_success(ierr) def createDimension(self, dimname, size=None): """ **`createDimension(self, dimname, size=None)`** Creates a new dimension with the given `dimname` and `size`. `size` must be a positive integer or `None`, which stands for "unlimited" (default is `None`). Specifying a size of 0 also results in an unlimited dimension. The return value is the `Dimension` class instance describing the new dimension. To determine the current maximum size of the dimension, use the `len` function on the `Dimension` instance. To determine if a dimension is 'unlimited', use the `Dimension.isunlimited` method of the `Dimension` instance.""" self.dimensions[dimname] = Dimension(self, dimname, size=size) return self.dimensions[dimname] def renameDimension(self, oldname, newname): """ **`renameDimension(self, oldname, newname)`** rename a `Dimension` named `oldname` to `newname`.""" cdef char *namstring cdef Dimension dim bytestr = _strencode(newname) namstring = bytestr if self.data_model != 'NETCDF4': self._redef() try: dim = self.dimensions[oldname] except KeyError: raise KeyError('%s not a valid dimension name' % oldname) with nogil: ierr = nc_rename_dim(self._grpid, dim._dimid, namstring) if self.data_model != 'NETCDF4': self._enddef() _ensure_nc_success(ierr) # remove old key from dimensions dict. self.dimensions.pop(oldname) # add new key. self.dimensions[newname] = dim # Variable.dimensions is determined by a method that # looks in the file, so no need to manually update. def createCompoundType(self, datatype, datatype_name): """ **`createCompoundType(self, datatype, datatype_name)`** Creates a new compound data type named `datatype_name` from the numpy dtype object `datatype`. ***Note***: If the new compound data type contains other compound data types (i.e. it is a 'nested' compound type, where not all of the elements are homogeneous numeric data types), then the 'inner' compound types **must** be created first. The return value is the `CompoundType` class instance describing the new datatype.""" self.cmptypes[datatype_name] = CompoundType(self, datatype,\ datatype_name) return self.cmptypes[datatype_name] def createVLType(self, datatype, datatype_name): """ **`createVLType(self, datatype, datatype_name)`** Creates a new VLEN data type named `datatype_name` from a numpy dtype object `datatype`. The return value is the `VLType` class instance describing the new datatype.""" self.vltypes[datatype_name] = VLType(self, datatype, datatype_name) return self.vltypes[datatype_name] def createEnumType(self, datatype, datatype_name, enum_dict): """ **`createEnumType(self, datatype, datatype_name, enum_dict)`** Creates a new Enum data type named `datatype_name` from a numpy integer dtype object `datatype`, and a python dictionary defining the enum fields and values. The return value is the `EnumType` class instance describing the new datatype.""" self.enumtypes[datatype_name] = EnumType(self, datatype, datatype_name, enum_dict) return self.enumtypes[datatype_name] def createVariable(self, varname, datatype, dimensions=(), compression=None, zlib=False, complevel=4, shuffle=True, szip_coding='nn',szip_pixels_per_block=8, blosc_shuffle=1,fletcher32=False, contiguous=False, chunksizes=None, endian='native', least_significant_digit=None, significant_digits=None,quantize_mode='BitGroom',fill_value=None, chunk_cache=None): """ **`createVariable(self, varname, datatype, dimensions=(), compression=None, zlib=False, complevel=4, shuffle=True, fletcher32=False, contiguous=False, chunksizes=None, szip_coding='nn', szip_pixels_per_block=8, blosc_shuffle=1, endian='native', least_significant_digit=None, significant_digits=None, quantize_mode='BitGroom', fill_value=None, chunk_cache=None)`** Creates a new variable with the given `varname`, `datatype`, and `dimensions`. If dimensions are not given, the variable is assumed to be a scalar. If `varname` is specified as a path, using forward slashes as in unix to separate components, then intermediate groups will be created as necessary For example, `createVariable('/GroupA/GroupB/VarC', float, ('x','y'))` will create groups `GroupA` and `GroupA/GroupB`, plus the variable `GroupA/GroupB/VarC`, if the preceding groups don't already exist. The `datatype` can be a numpy datatype object, or a string that describes a numpy dtype object (like the `dtype.str` attribute of a numpy array). Supported specifiers include: `'S1' or 'c' (NC_CHAR), 'i1' or 'b' or 'B' (NC_BYTE), 'u1' (NC_UBYTE), 'i2' or 'h' or 's' (NC_SHORT), 'u2' (NC_USHORT), 'i4' or 'i' or 'l' (NC_INT), 'u4' (NC_UINT), 'i8' (NC_INT64), 'u8' (NC_UINT64), 'f4' or 'f' (NC_FLOAT), 'f8' or 'd' (NC_DOUBLE)`. `datatype` can also be a `CompoundType` instance (for a structured, or compound array), a `VLType` instance (for a variable-length array), or the python `str` builtin (for a variable-length string array). Numpy string and unicode datatypes with length greater than one are aliases for `str`. Data from netCDF variables is presented to python as numpy arrays with the corresponding data type. `dimensions` must be a tuple containing `Dimension` instances and/or dimension names (strings) that have been defined previously using `Dataset.createDimension`. The default value is an empty tuple, which means the variable is a scalar. If the optional keyword argument `compression` is set, the data will be compressed in the netCDF file using the specified compression algorithm. Currently `zlib`,`szip`,`zstd`,`bzip2`,`blosc_lz`,`blosc_lz4`,`blosc_lz4hc`, `blosc_zlib` and `blosc_zstd` are supported. Default is `None` (no compression). All of the compressors except `zlib` and `szip` use the HDF5 plugin architecture. If the optional keyword `zlib` is `True`, the data will be compressed in the netCDF file using zlib compression (default `False`). The use of this option is deprecated in favor of `compression='zlib'`. The optional keyword `complevel` is an integer between 0 and 9 describing the level of compression desired (default 4). Ignored if `compression=None`. A value of zero disables compression. If the optional keyword `shuffle` is `True`, the HDF5 shuffle filter will be applied before compressing the data with zlib (default `True`). This significantly improves compression. Default is `True`. Ignored if `zlib=False`. The optional kwarg `blosc_shuffle`is ignored unless the blosc compressor is used. `blosc_shuffle` can be 0 (no shuffle), 1 (byte-wise shuffle) or 2 (bit-wise shuffle). Default is 1. The optional kwargs `szip_coding` and `szip_pixels_per_block` are ignored unless the szip compressor is used. `szip_coding` can be `ec` (entropy coding) or `nn` (nearest neighbor coding). Default is `nn`. `szip_pixels_per_block` can be 4, 8, 16 or 32 (default 8). If the optional keyword `fletcher32` is `True`, the Fletcher32 HDF5 checksum algorithm is activated to detect errors. Default `False`. If the optional keyword `contiguous` is `True`, the variable data is stored contiguously on disk. Default `False`. Setting to `True` for a variable with an unlimited dimension will trigger an error. Fixed size variables (with no unlimited dimension) with no compression filters are contiguous by default. The optional keyword `chunksizes` can be used to manually specify the HDF5 chunksizes for each dimension of the variable. A detailed discussion of HDF chunking and I/O performance is available [here](https://support.hdfgroup.org/HDF5/doc/Advanced/Chunking). The default chunking scheme in the netcdf-c library is discussed [here](https://www.unidata.ucar.edu/software/netcdf/documentation/NUG/netcdf_perf_chunking.html). Basically, you want the chunk size for each dimension to match as closely as possible the size of the data block that users will read from the file. `chunksizes` cannot be set if `contiguous=True`. The optional keyword `endian` can be used to control whether the data is stored in little or big endian format on disk. Possible values are `little, big` or `native` (default). The library will automatically handle endian conversions when the data is read, but if the data is always going to be read on a computer with the opposite format as the one used to create the file, there may be some performance advantage to be gained by setting the endian-ness. The optional keyword `fill_value` can be used to override the default netCDF `_FillValue` (the value that the variable gets filled with before any data is written to it, defaults given in the dict `netCDF4.default_fillvals`). If fill_value is set to `False`, then the variable is not pre-filled. If the optional keyword parameters `least_significant_digit` or `significant_digits` are specified, variable data will be truncated (quantized). In conjunction with `compression='zlib'` this produces 'lossy', but significantly more efficient compression. For example, if `least_significant_digit=1`, data will be quantized using `numpy.around(scale*data)/scale`, where scale = 2**bits, and bits is determined so that a precision of 0.1 is retained (in this case bits=4). From the [PSL metadata conventions](http://www.esrl.noaa.gov/psl/data/gridded/conventions/cdc_netcdf_standard.shtml): "least_significant_digit -- power of ten of the smallest decimal place in unpacked data that is a reliable value." Default is `None`, or no quantization, or 'lossless' compression. If `significant_digits=3` then the data will be quantized so that three significant digits are retained, independent of the floating point exponent. The keyword argument `quantize_mode` controls the quantization algorithm (default 'BitGroom', 'BitRound' and 'GranularBitRound' also available). The 'GranularBitRound' algorithm may result in better compression for typical geophysical datasets. This `significant_digits` kwarg is only available with netcdf-c >= 4.9.0, and only works with `NETCDF4` or `NETCDF4_CLASSIC` formatted files. When creating variables in a `NETCDF4` or `NETCDF4_CLASSIC` formatted file, HDF5 creates something called a 'chunk cache' for each variable. The default size of the chunk cache may be large enough to completely fill available memory when creating thousands of variables. The optional keyword `chunk_cache` allows you to reduce (or increase) the size of the default chunk cache when creating a variable. The setting only persists as long as the Dataset is open - you can use the set_var_chunk_cache method to change it the next time the Dataset is opened. Warning - messing with this parameter can seriously degrade performance. The return value is the `Variable` class instance describing the new variable. A list of names corresponding to netCDF variable attributes can be obtained with the `Variable` method `Variable.ncattrs`. A dictionary containing all the netCDF attribute name/value pairs is provided by the `__dict__` attribute of a `Variable` instance. `Variable` instances behave much like array objects. Data can be assigned to or retrieved from a variable with indexing and slicing operations on the `Variable` instance. A `Variable` instance has six Dataset standard attributes: `dimensions, dtype, shape, ndim, name` and `least_significant_digit`. Application programs should never modify these attributes. The `dimensions` attribute is a tuple containing the names of the dimensions associated with this variable. The `dtype` attribute is a string describing the variable's data type (`i4`, `f8`, `S1`, etc). The `shape` attribute is a tuple describing the current sizes of all the variable's dimensions. The `name` attribute is a string containing the name of the Variable instance. The `least_significant_digit` attributes describes the power of ten of the smallest decimal place in the data the contains a reliable value. assigned to the `Variable` instance. The `ndim` attribute is the number of variable dimensions.""" # if varname specified as a path, split out group names. varname = posixpath.normpath(varname) dirname, varname = posixpath.split(varname) # varname is last. # create parent groups (like mkdir -p). if not dirname: group = self else: group = self.createGroup(dirname) # if dimensions is a single string or Dimension instance, # convert to a tuple. # This prevents a common error that occurs when # dimensions = 'lat' instead of ('lat',) if isinstance(dimensions, (str, bytes, Dimension)): dimensions = dimensions, # convert elements of dimensions tuple to Dimension # instances if they are strings. # _find_dim looks for dimension in this group, and if not # found there, looks in parent (and it's parent, etc, back to root). dimensions =\ tuple(_find_dim(group,d) if isinstance(d,(str,bytes)) else d for d in dimensions) # create variable. group.variables[varname] = Variable(group, varname, datatype, dimensions=dimensions, compression=compression, zlib=zlib, complevel=complevel, shuffle=shuffle, szip_coding=szip_coding, szip_pixels_per_block=szip_pixels_per_block, blosc_shuffle=blosc_shuffle, fletcher32=fletcher32, contiguous=contiguous, chunksizes=chunksizes, endian=endian, least_significant_digit=least_significant_digit, significant_digits=significant_digits,quantize_mode=quantize_mode,fill_value=fill_value, chunk_cache=chunk_cache) return group.variables[varname] def renameVariable(self, oldname, newname): """ **`renameVariable(self, oldname, newname)`** rename a `Variable` named `oldname` to `newname`""" cdef char *namstring cdef Variable var try: var = self.variables[oldname] except KeyError: raise KeyError('%s not a valid variable name' % oldname) bytestr = _strencode(newname) namstring = bytestr if self.data_model != 'NETCDF4': self._redef() with nogil: ierr = nc_rename_var(self._grpid, var._varid, namstring) if self.data_model != 'NETCDF4': self._enddef() _ensure_nc_success(ierr) # remove old key from dimensions dict. self.variables.pop(oldname) # add new key. self.variables[newname] = var def createGroup(self, groupname): """ **`createGroup(self, groupname)`** Creates a new `Group` with the given `groupname`. If `groupname` is specified as a path, using forward slashes as in unix to separate components, then intermediate groups will be created as necessary (analogous to `mkdir -p` in unix). For example, `createGroup('/GroupA/GroupB/GroupC')` will create `GroupA`, `GroupA/GroupB`, and `GroupA/GroupB/GroupC`, if they don't already exist. If the specified path describes a group that already exists, no error is raised. The return value is a `Group` class instance.""" # if group specified as a path, split out group names groupname = posixpath.normpath(groupname) nestedgroups = groupname.split('/') group = self # loop over group names, create parent groups if they do not already # exist. for g in nestedgroups: if not g: continue if g not in group.groups: group.groups[g] = Group(group, g) group = group.groups[g] # if group already exists, just return the group # (prior to 1.1.8, this would have raised an error) return group def ncattrs(self): """ **`ncattrs(self)`** return netCDF global attribute names for this `Dataset` or `Group` in a list.""" return _get_att_names(self._grpid, NC_GLOBAL) def setncattr(self,name,value): """ **`setncattr(self,name,value)`** set a netCDF dataset or group attribute using name,value pair. Use if you need to set a netCDF attribute with the with the same name as one of the reserved python attributes.""" cdef nc_type xtype xtype=-99 if self.data_model != 'NETCDF4': self._redef() _set_att(self, NC_GLOBAL, name, value, xtype=xtype, force_ncstring=self._ncstring_attrs__) if self.data_model != 'NETCDF4': self._enddef() def setncattr_string(self,name,value): """ **`setncattr_string(self,name,value)`** set a netCDF dataset or group string attribute using name,value pair. Use if you need to ensure that a netCDF attribute is created with type `NC_STRING` if the file format is `NETCDF4`.""" cdef nc_type xtype xtype=-99 if self.data_model != 'NETCDF4': msg='file format does not support NC_STRING attributes' raise OSError(msg) _set_att(self, NC_GLOBAL, name, value, xtype=xtype, force_ncstring=True) def setncatts(self,attdict): """ **`setncatts(self,attdict)`** set a bunch of netCDF dataset or group attributes at once using a python dictionary. This may be faster when setting a lot of attributes for a `NETCDF3` formatted file, since nc_redef/nc_enddef is not called in between setting each attribute""" if self.data_model != 'NETCDF4': self._redef() for name, value in attdict.items(): _set_att(self, NC_GLOBAL, name, value) if self.data_model != 'NETCDF4': self._enddef() def getncattr(self,name,encoding='utf-8'): """ **`getncattr(self,name)`** retrieve a netCDF dataset or group attribute. Use if you need to get a netCDF attribute with the same name as one of the reserved python attributes. option kwarg `encoding` can be used to specify the character encoding of a string attribute (default is `utf-8`).""" return _get_att(self, NC_GLOBAL, name, encoding=encoding) def __delattr__(self,name): # if it's a netCDF attribute, remove it if name not in _private_atts: self.delncattr(name) else: raise AttributeError( "'%s' is one of the reserved attributes %s, cannot delete. Use delncattr instead." % (name, tuple(_private_atts))) def delncattr(self, name): """ **`delncattr(self,name,value)`** delete a netCDF dataset or group attribute. Use if you need to delete a netCDF attribute with the same name as one of the reserved python attributes.""" cdef char *attname cdef int ierr bytestr = _strencode(name) attname = bytestr if self.data_model != 'NETCDF4': self._redef() with nogil: ierr = nc_del_att(self._grpid, NC_GLOBAL, attname) if self.data_model != 'NETCDF4': self._enddef() _ensure_nc_success(ierr) def __setattr__(self,name,value): # if name in _private_atts, it is stored at the python # level and not in the netCDF file. if name not in _private_atts: self.setncattr(name, value) elif not name.endswith('__'): if hasattr(self,name): raise AttributeError( "'%s' is one of the reserved attributes %s, cannot rebind. Use setncattr instead." % (name, tuple(_private_atts))) else: self.__dict__[name]=value def __getattr__(self,name): # if name in _private_atts, it is stored at the python # level and not in the netCDF file. if name.startswith('__') and name.endswith('__'): # if __dict__ requested, return a dict with netCDF attributes. if name == '__dict__': names = self.ncattrs() values = [] for name in names: values.append(_get_att(self, NC_GLOBAL, name)) return dict(zip(names, values)) else: raise AttributeError elif name in _private_atts: return self.__dict__[name] else: return self.getncattr(name) def renameAttribute(self, oldname, newname): """ **`renameAttribute(self, oldname, newname)`** rename a `Dataset` or `Group` attribute named `oldname` to `newname`.""" cdef char *oldnamec cdef char *newnamec cdef int ierr bytestr = _strencode(oldname) oldnamec = bytestr bytestr = _strencode(newname) newnamec = bytestr with nogil: ierr = nc_rename_att(self._grpid, NC_GLOBAL, oldnamec, newnamec) _ensure_nc_success(ierr) def renameGroup(self, oldname, newname): """ **`renameGroup(self, oldname, newname)`** rename a `Group` named `oldname` to `newname` (requires netcdf >= 4.3.1).""" cdef char *newnamec cdef int grpid cdef int ierr if not __has_rename_grp__: raise ValueError( "renameGroup method not enabled. To enable, install Cython, make sure you have" "version 4.3.1 or higher of the netcdf C lib, and rebuild netcdf4-python." ) bytestr = _strencode(newname) newnamec = bytestr try: grp = self.groups[oldname] grpid = grp._grpid except KeyError: raise KeyError('%s not a valid group name' % oldname) with nogil: ierr = nc_rename_grp(grpid, newnamec) _ensure_nc_success(ierr) # remove old key from groups dict. self.groups.pop(oldname) # add new key. self.groups[newname] = grp def set_auto_chartostring(self, value): """ **`set_auto_chartostring(self, True_or_False)`** Call `Variable.set_auto_chartostring` for all variables contained in this `Dataset` or `Group`, as well as for all variables in all its subgroups. **`True_or_False`**: Boolean determining if automatic conversion of all character arrays <--> string arrays should be performed for character variables (variables of type `NC_CHAR` or `S1`) with the `_Encoding` attribute set. ***Note***: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour. """ # this is a hack to make inheritance work in MFDataset # (which stores variables in _vars) _vars = self.variables if _vars is None: _vars = self._vars for var in _vars.values(): var.set_auto_chartostring(value) for groups in _walk_grps(self): for group in groups: for var in group.variables.values(): var.set_auto_chartostring(value) def set_auto_maskandscale(self, value): """ **`set_auto_maskandscale(self, True_or_False)`** Call `Variable.set_auto_maskandscale` for all variables contained in this `Dataset` or `Group`, as well as for all variables in all its subgroups. **`True_or_False`**: Boolean determining if automatic conversion to masked arrays and variable scaling shall be applied for all variables. ***Note***: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour. """ # this is a hack to make inheritance work in MFDataset # (which stores variables in _vars) _vars = self.variables if _vars is None: _vars = self._vars for var in _vars.values(): var.set_auto_maskandscale(value) for groups in _walk_grps(self): for group in groups: for var in group.variables.values(): var.set_auto_maskandscale(value) def set_auto_mask(self, value): """ **`set_auto_mask(self, True_or_False)`** Call `Variable.set_auto_mask` for all variables contained in this `Dataset` or `Group`, as well as for all variables in all its subgroups. Only affects Variables with primitive or enum types (not compound or vlen Variables). **`True_or_False`**: Boolean determining if automatic conversion to masked arrays shall be applied for all variables. ***Note***: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour. """ # this is a hack to make inheritance work in MFDataset # (which stores variables in _vars) _vars = self.variables if _vars is None: _vars = self._vars for var in _vars.values(): var.set_auto_mask(value) for groups in _walk_grps(self): for group in groups: for var in group.variables.values(): var.set_auto_mask(value) def set_auto_scale(self, value): """ **`set_auto_scale(self, True_or_False)`** Call `Variable.set_auto_scale` for all variables contained in this `Dataset` or `Group`, as well as for all variables in all its subgroups. **`True_or_False`**: Boolean determining if automatic variable scaling shall be applied for all variables. ***Note***: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour. """ # this is a hack to make inheritance work in MFDataset # (which stores variables in _vars) _vars = self.variables if _vars is None: _vars = self._vars for var in _vars.values(): var.set_auto_scale(value) for groups in _walk_grps(self): for group in groups: for var in group.variables.values(): var.set_auto_scale(value) def set_always_mask(self, value): """ **`set_always_mask(self, True_or_False)`** Call `Variable.set_always_mask` for all variables contained in this `Dataset` or `Group`, as well as for all variables in all its subgroups. **`True_or_False`**: Boolean determining if automatic conversion of masked arrays with no missing values to regular numpy arrays shall be applied for all variables. Default True. Set to False to restore the default behaviour in versions prior to 1.4.1 (numpy array returned unless missing values are present, otherwise masked array returned). ***Note***: Calling this function only affects existing variables. Variables created after calling this function will follow the default behaviour. """ # this is a hack to make inheritance work in MFDataset # (which stores variables in _vars) _vars = self.variables if _vars is None: _vars = self._vars for var in _vars.values(): var.set_always_mask(value) for groups in _walk_grps(self): for group in groups: for var in group.variables.values(): var.set_always_mask(value) def set_ncstring_attrs(self, value): """ **`set_ncstring_attrs(self, True_or_False)`** Call `Variable.set_ncstring_attrs` for all variables contained in this `Dataset` or `Group`, as well as for all its subgroups and their variables. **`True_or_False`**: Boolean determining if all string attributes are created as variable-length NC_STRINGs, (if True), or if ascii text attributes are stored as NC_CHARs (if False; default) ***Note***: Calling this function only affects newly created attributes of existing (sub-) groups and their variables. """ self._ncstring_attrs__ = bool(value) # this is a hack to make inheritance work in MFDataset # (which stores variables in _vars) _vars = self.variables if _vars is None: _vars = self._vars for var in _vars.values(): var.set_ncstring_attrs(value) for groups in _walk_grps(self): for group in groups: group.set_ncstring_attrs(value) # recurse into subgroups... @functools.lru_cache(maxsize=128) def get_variables_by_attributes(self, **kwargs): """ **`get_variables_by_attributes(self, **kwargs)`** Returns a list of variables that match specific conditions. Can pass in key=value parameters and variables are returned that contain all of the matches. For example, ```python >>> # Get variables with x-axis attribute. >>> vs = nc.get_variables_by_attributes(axis='X') >>> # Get variables with matching "standard_name" attribute >>> vs = nc.get_variables_by_attributes(standard_name='northward_sea_water_velocity') ``` Can pass in key=callable parameter and variables are returned if the callable returns True. The callable should accept a single parameter, the attribute value. None is given as the attribute value when the attribute does not exist on the variable. For example, ```python >>> # Get Axis variables >>> vs = nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T']) >>> # Get variables that don't have an "axis" attribute >>> vs = nc.get_variables_by_attributes(axis=lambda v: v is None) >>> # Get variables that have a "grid_mapping" attribute >>> vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None) ``` """ vs = [] has_value_flag = False # this is a hack to make inheritance work in MFDataset # (which stores variables in _vars) _vars = self.variables if _vars is None: _vars = self._vars for vname in _vars: var = _vars[vname] for k, v in kwargs.items(): if callable(v): has_value_flag = v(getattr(var, k, None)) if has_value_flag is False: break elif hasattr(var, k) and getattr(var, k) == v: has_value_flag = True else: has_value_flag = False break if has_value_flag is True: vs.append(_vars[vname]) return vs def _getname(self): # private method to get name associated with instance. cdef int ierr cdef char namstring[NC_MAX_NAME+1] with nogil: ierr = nc_inq_grpname(self._grpid, namstring) _ensure_nc_success(ierr) return namstring.decode('utf-8') property name: """string name of Group instance""" def __get__(self): return self._getname() def __set__(self,value): raise AttributeError("name cannot be altered") @staticmethod def fromcdl(cdlfilename,ncfilename=None,mode='a',format='NETCDF4'): """ **`fromcdl(cdlfilename, ncfilename=None, mode='a',format='NETCDF4')`** call [ncgen][ncgen] via subprocess to create Dataset from [CDL][cdl] text representation. Requires [ncgen][ncgen] to be installed and in `$PATH`. **`cdlfilename`**: CDL file. **`ncfilename`**: netCDF file to create. If not given, CDL filename with suffix replaced by `.nc` is used.. **`mode`**: Access mode to open Dataset (Default `'a'`). **`format`**: underlying file format to use (one of `'NETCDF4'`, `'NETCDF4_CLASSIC'`, `'NETCDF3_CLASSIC'`, `'NETCDF3_64BIT_OFFSET'` or `'NETCDF3_64BIT_DATA'`. Default `'NETCDF4'`. Dataset instance for `ncfilename` is returned. [ncgen]: https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_utilities_guide.html#ncgen_guide [cdl]: https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_utilities_guide.html#cdl_guide """ filepath = pathlib.Path(cdlfilename) if ncfilename is None: ncfilename = filepath.with_suffix('.nc') else: ncfilename = pathlib.Path(ncfilename) formatcodes = {'NETCDF4': 4, 'NETCDF4_CLASSIC': 7, 'NETCDF3_CLASSIC': 3, 'NETCDF3_64BIT': 6, # legacy 'NETCDF3_64BIT_OFFSET': 6, 'NETCDF3_64BIT_DATA': 5} if format not in formatcodes: raise ValueError('illegal format requested') if not filepath.exists(): raise FileNotFoundError(filepath) if ncfilename.exists(): raise FileExistsError(ncfilename) ncgenargs="-knc%s" % formatcodes[format] subprocess.run(["ncgen", ncgenargs, "-o", str(ncfilename), str(filepath)], check=True) return Dataset(ncfilename, mode=mode) def tocdl(self,coordvars=False,data=False,outfile=None): """ **`tocdl(self, coordvars=False, data=False, outfile=None)`** call [ncdump][ncdump] via subprocess to create [CDL][cdl] text representation of Dataset. Requires [ncdump][ncdump] to be installed and in `$PATH`. **`coordvars`**: include coordinate variable data (via `ncdump -c`). Default False **`data`**: if True, write out variable data (Default False). **`outfile`**: If not None, file to output ncdump to. Default is to return a string. [ncdump]: https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_utilities_guide.html#ncdump_guide [cdl]: https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_utilities_guide.html#cdl_guide """ self.sync() if coordvars: ncdumpargs = "-cs" else: ncdumpargs = "-s" if not data: ncdumpargs += "h" result=subprocess.run(["ncdump", ncdumpargs, self.filepath()], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') if outfile is None: return result.stdout else: f = open(outfile,'w') f.write(result.stdout) f.close() def has_blosc_filter(self): """**`has_blosc_filter(self)`** returns True if blosc compression filter is available """ #if __has_blosc_support__: # return True #else: # return False cdef int ierr with nogil: ierr = nc_inq_filter_avail(self._grpid, H5Z_FILTER_BLOSC) return ierr == 0 def has_zstd_filter(self): """**`has_zstd_filter(self)`** returns True if zstd compression filter is available """ #if __has_zstandard_support__: # return True #else: # return False cdef int ierr with nogil: ierr = nc_inq_filter_avail(self._grpid, H5Z_FILTER_ZSTD) return ierr == 0 def has_bzip2_filter(self): """**`has_bzip2_filter(self)`** returns True if bzip2 compression filter is available """ #if __has_bzip2_support__: # return True #else: # return False cdef int ierr with nogil: ierr = nc_inq_filter_avail(self._grpid, H5Z_FILTER_BZIP2) return ierr == 0 def has_szip_filter(self): """**`has_szip_filter(self)`** returns True if szip compression filter is available """ #if not __has_ncfilter__: # return __has_szip_support__ #if __has_szip_support__: # return True #else: # return False cdef int ierr with nogil: ierr = nc_inq_filter_avail(self._grpid, H5Z_FILTER_SZIP) return ierr == 0 cdef class Group(Dataset): """ Groups define a hierarchical namespace within a netCDF file. They are analogous to directories in a unix filesystem. Each `Group` behaves like a `Dataset` within a Dataset, and can contain it's own variables, dimensions and attributes (and other Groups). See `Group.__init__` for more details. `Group` inherits from `Dataset`, so all the `Dataset` class methods and variables are available to a `Group` instance (except the `close` method). Additional read-only class variables: **`name`**: String describing the group name. """ def __init__(self, parent, name, **kwargs): """ **`__init__(self, parent, name)`** `Group` constructor. **`parent`**: `Group` instance for the parent group. If being created in the root group, use a `Dataset` instance. **`name`**: - Name of the group. ***Note***: `Group` instances should be created using the `Dataset.createGroup` method of a `Dataset` instance, or another `Group` instance, not using this class directly. """ cdef char *groupname cdef int ierr, grpid # flag to indicate that Variables in this Group support orthogonal indexing. self.__orthogonal_indexing__ = True # set data_model and file_format attributes. self.data_model = parent.data_model self.file_format = parent.file_format # full path to Group. self.path = posixpath.join(parent.path, name) # parent group. self.parent = parent # propagate weak reference setting from parent. self.keepweakref = parent.keepweakref # propagate _ncstring_attrs__ setting from parent. self._ncstring_attrs__ = parent._ncstring_attrs__ self.auto_complex = parent.auto_complex if 'id' in kwargs: self._grpid = kwargs['id'] # get compound, vlen and enum types in this Group. self.cmptypes, self.vltypes, self.enumtypes = _get_types(self) # get dimensions in this Group. self.dimensions = _get_dims(self) # get variables in this Group. self.variables = _get_vars(self, self.auto_complex) # get groups in this Group. self.groups = _get_grps(self) else: bytestr = _strencode(name) groupname = bytestr grpid = parent._grpid with nogil: ierr = nc_def_grp(grpid, groupname, &self._grpid) _ensure_nc_success(ierr) self.cmptypes = dict() self.vltypes = dict() self.enumtypes = dict() self.dimensions = dict() self.variables = dict() self.groups = dict() def close(self): """ **`close(self)`** overrides `Dataset` close method which does not apply to `Group` instances, raises OSError.""" raise OSError('cannot close a `Group` (only applies to Dataset)') cdef class Dimension: """ A netCDF `Dimension` is used to describe the coordinates of a `Variable`. See `Dimension.__init__` for more details. The current maximum size of a `Dimension` instance can be obtained by calling the python `len` function on the `Dimension` instance. The `Dimension.isunlimited` method of a `Dimension` instance can be used to determine if the dimension is unlimited. Read-only class variables: **`name`**: String name, used when creating a `Variable` with `Dataset.createVariable`. **`size`**: Current `Dimension` size (same as `len(d)`, where `d` is a `Dimension` instance). """ cdef public int _dimid, _grpid cdef public _data_model, _name, _grp def __init__(self, grp, name, size=None, **kwargs): """ **`__init__(self, group, name, size=None)`** `Dimension` constructor. **`group`**: `Group` instance to associate with dimension. **`name`**: Name of the dimension. **`size`**: Size of the dimension. `None` or 0 means unlimited. (Default `None`). ***Note***: `Dimension` instances should be created using the `Dataset.createDimension` method of a `Group` or `Dataset` instance, not using `Dimension.__init__` directly. """ cdef int ierr cdef char *dimname cdef size_t lendim self._grpid = grp._grpid # make a weakref to group to avoid circular ref (issue 218) # keep strong reference the default behaviour (issue 251) if grp.keepweakref: self._grp = weakref.proxy(grp) else: self._grp = grp self._data_model = grp.data_model self._name = name if 'id' in kwargs: self._dimid = kwargs['id'] else: bytestr = _strencode(name) dimname = bytestr if size is not None: lendim = size else: lendim = NC_UNLIMITED if grp.data_model != 'NETCDF4': grp._redef() with nogil: ierr = nc_def_dim(self._grpid, dimname, lendim, &self._dimid) if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr) def _getname(self): # private method to get name associated with instance. cdef int err, _grpid cdef char namstring[NC_MAX_NAME+1] _grpid = self._grp._grpid with nogil: ierr = nc_inq_dimname(_grpid, self._dimid, namstring) _ensure_nc_success(ierr) return namstring.decode('utf-8') property name: """string name of Dimension instance""" def __get__(self): return self._getname() def __set__(self,value): raise AttributeError("name cannot be altered") property size: """current size of Dimension (calls `len` on Dimension instance)""" def __get__(self): return len(self) def __set__(self,value): raise AttributeError("size cannot be altered") def __repr__(self): return self.__str__() def __str__(self): if not dir(self._grp): return 'Dimension object no longer valid' typ = repr(type(self)).replace("._netCDF4", "") if self.isunlimited(): return "%r (unlimited): name = '%s', size = %s" %\ (typ, self._name, len(self)) else: return "%r: name = '%s', size = %s" %\ (typ, self._name, len(self)) def __len__(self): # len(`Dimension` instance) returns current size of dimension cdef int ierr cdef size_t lengthp with nogil: ierr = nc_inq_dimlen(self._grpid, self._dimid, &lengthp) _ensure_nc_success(ierr) return lengthp def group(self): """ **`group(self)`** return the group that this `Dimension` is a member of.""" return self._grp def isunlimited(self): """ **`isunlimited(self)`** returns `True` if the `Dimension` instance is unlimited, `False` otherwise.""" cdef int ierr, n, numunlimdims, ndims, nvars, ngatts, xdimid cdef int *unlimdimids if self._data_model == 'NETCDF4': with nogil: ierr = nc_inq_unlimdims(self._grpid, &numunlimdims, NULL) _ensure_nc_success(ierr) if numunlimdims == 0: return False else: unlimdimids = malloc(sizeof(int) * numunlimdims) dimid = self._dimid with nogil: ierr = nc_inq_unlimdims(self._grpid, &numunlimdims, unlimdimids) _ensure_nc_success(ierr) unlimdim_ids = [] for n in range(numunlimdims): unlimdim_ids.append(unlimdimids[n]) free(unlimdimids) if dimid in unlimdim_ids: return True else: return False else: # if not NETCDF4, there is only one unlimited dimension. # nc_inq_unlimdims only works for NETCDF4. with nogil: ierr = nc_inq(self._grpid, &ndims, &nvars, &ngatts, &xdimid) if self._dimid == xdimid: return True else: return False cdef class Variable: """ A netCDF `Variable` is used to read and write netCDF data. They are analogous to numpy array objects. See `Variable.__init__` for more details. A list of attribute names corresponding to netCDF attributes defined for the variable can be obtained with the `Variable.ncattrs` method. These attributes can be created by assigning to an attribute of the `Variable` instance. A dictionary containing all the netCDF attribute name/value pairs is provided by the `__dict__` attribute of a `Variable` instance. The following class variables are read-only: **`dimensions`**: A tuple containing the names of the dimensions associated with this variable. **`dtype`**: A numpy dtype object describing the variable's data type. **`ndim`**: The number of variable dimensions. **`shape`**: A tuple with the current shape (length of all dimensions). **`scale`**: If True, `scale_factor` and `add_offset` are applied, and signed integer data is automatically converted to unsigned integer data if the `_Unsigned` attribute is set to "true" or "True". Default is `True`, can be reset using `Variable.set_auto_scale` and `Variable.set_auto_maskandscale` methods. **`mask`**: If True, data is automatically converted to/from masked arrays when missing values or fill values are present. Default is `True`, can be reset using `Variable.set_auto_mask` and `Variable.set_auto_maskandscale` methods. Only relevant for Variables with primitive or enum types (ignored for compound and vlen Variables). **`chartostring`**: If True, data is automatically converted to/from character arrays to string arrays when the `_Encoding` variable attribute is set. Default is `True`, can be reset using `Variable.set_auto_chartostring` method. **`least_significant_digit`**: Describes the power of ten of the smallest decimal place in the data the contains a reliable value. Data is truncated to this decimal place when it is assigned to the `Variable` instance. If `None`, the data is not truncated. **`significant_digits`**: New in version 1.6.0. Describes the number of significant digits in the data the contains a reliable value. Data is truncated to retain this number of significant digits when it is assigned to the `Variable` instance. If `None`, the data is not truncated. Only available with netcdf-c >= 4.9.0, and only works with `NETCDF4` or `NETCDF4_CLASSIC` formatted files. The number of significant digits used in the quantization of variable data can be obtained using the `Variable.significant_digits` method. Default `None` - no quantization done. **`quantize_mode`**: New in version 1.6.0. Controls the quantization algorithm (default 'BitGroom', 'BitRound' and 'GranularBitRound' also available). The 'GranularBitRound' algorithm may result in better compression for typical geophysical datasets. Ignored if `significant_digits` not specified. If 'BitRound' is used, then `significant_digits` is interpreted as binary (not decimal) digits. **`__orthogonal_indexing__`**: Always `True`. Indicates to client code that the object supports 'orthogonal indexing', which means that slices that are 1d arrays or lists slice along each dimension independently. This behavior is similar to Fortran or Matlab, but different than numpy. **`datatype`**: numpy data type (for primitive data types) or VLType/CompoundType instance (for compound or vlen data types). **`name`**: String name. **`size`**: The number of stored elements. """ cdef public int _varid, _grpid, _nunlimdim cdef public _name, ndim, dtype, mask, scale, always_mask, chartostring, _isprimitive, \ _iscompound, _isvlen, _isenum, _grp, _cmptype, _vltype, _enumtype,\ __orthogonal_indexing__, _has_lsd, _use_get_vars, _ncstring_attrs__, auto_complex def __init__(self, grp, name, datatype, dimensions=(), compression=None, zlib=False, complevel=4, shuffle=True, szip_coding='nn', szip_pixels_per_block=8, blosc_shuffle=1, fletcher32=False, contiguous=False, chunksizes=None, endian='native', least_significant_digit=None, significant_digits=None,quantize_mode='BitGroom',fill_value=None, chunk_cache=None, **kwargs): """ **`__init__(self, group, name, datatype, dimensions=(), compression=None, zlib=False, complevel=4, shuffle=True, szip_coding='nn', szip_pixels_per_block=8, blosc_shuffle=1, fletcher32=False, contiguous=False, chunksizes=None, endian='native', least_significant_digit=None,fill_value=None,chunk_cache=None)`** `Variable` constructor. **`group`**: `Group` or `Dataset` instance to associate with variable. **`name`**: Name of the variable. **`datatype`**: `Variable` data type. Can be specified by providing a numpy dtype object, or a string that describes a numpy dtype object. Supported values, corresponding to `str` attribute of numpy dtype objects, include `'f4'` (32-bit floating point), `'f8'` (64-bit floating point), `'i4'` (32-bit signed integer), `'i2'` (16-bit signed integer), `'i8'` (64-bit signed integer), `'i4'` (8-bit signed integer), `'i1'` (8-bit signed integer), `'u1'` (8-bit unsigned integer), `'u2'` (16-bit unsigned integer), `'u4'` (32-bit unsigned integer), `'u8'` (64-bit unsigned integer), or `'S1'` (single-character string). From compatibility with Scientific.IO.NetCDF, the old Numeric single character typecodes can also be used (`'f'` instead of `'f4'`, `'d'` instead of `'f8'`, `'h'` or `'s'` instead of `'i2'`, `'b'` or `'B'` instead of `'i1'`, `'c'` instead of `'S1'`, and `'i'` or `'l'` instead of `'i4'`). `datatype` can also be a `CompoundType` instance (for a structured, or compound array), a `VLType` instance (for a variable-length array), or the python `str` builtin (for a variable-length string array). Numpy string and unicode datatypes with length greater than one are aliases for `str`. **`dimensions`**: a tuple containing the variable's Dimension instances (defined previously with `createDimension`). Default is an empty tuple which means the variable is a scalar (and therefore has no dimensions). **`compression`**: compression algorithm to use. Currently `zlib`,`szip`,`zstd`,`bzip2`,`blosc_lz`,`blosc_lz4`,`blosc_lz4hc`, `blosc_zlib` and `blosc_zstd` are supported. Default is `None` (no compression). All of the compressors except `zlib` and `szip` use the HDF5 plugin architecture. **`zlib`**: if `True`, data assigned to the `Variable` instance is compressed on disk. Default `False`. Deprecated - use `compression='zlib'` instead. **`complevel`**: the level of compression to use (1 is the fastest, but poorest compression, 9 is the slowest but best compression). Default 4. Ignored if `compression=None` or `szip`. A value of 0 disables compression. **`shuffle`**: if `True`, the HDF5 shuffle filter is applied to improve zlib compression. Default `True`. Ignored unless `compression = 'zlib'`. **`blosc_shuffle`**: shuffle filter inside blosc compressor (only relevant if compression kwarg set to one of the blosc compressors). Can be 0 (no blosc shuffle), 1 (bytewise shuffle) or 2 (bitwise shuffle)). Default is 1. Ignored if blosc compressor not used. **`szip_coding`**: szip coding method. Can be `ec` (entropy coding) or `nn` (nearest neighbor coding). Default is `nn`. Ignored if szip compressor not used. **`szip_pixels_per_block`**: Can be 4,8,16 or 32 (Default 8). Ignored if szip compressor not used. **`fletcher32`**: if `True` (default `False`), the Fletcher32 checksum algorithm is used for error detection. **`contiguous`**: if `True` (default `False`), the variable data is stored contiguously on disk. Default `False`. Setting to `True` for a variable with an unlimited dimension will trigger an error. Fixed size variables (with no unlimited dimension) with no compression filters are contiguous by default. **`chunksizes`**: Can be used to specify the HDF5 chunksizes for each dimension of the variable. A detailed discussion of HDF chunking and I/O performance is available [here](https://support.hdfgroup.org/HDF5/doc/Advanced/Chunking). The default chunking scheme in the netcdf-c library is discussed [here](https://www.unidata.ucar.edu/software/netcdf/documentation/NUG/netcdf_perf_chunking.html). Basically, you want the chunk size for each dimension to match as closely as possible the size of the data block that users will read from the file. `chunksizes` cannot be set if `contiguous=True`. **`endian`**: Can be used to control whether the data is stored in little or big endian format on disk. Possible values are `little, big` or `native` (default). The library will automatically handle endian conversions when the data is read, but if the data is always going to be read on a computer with the opposite format as the one used to create the file, there may be some performance advantage to be gained by setting the endian-ness. For netCDF 3 files (that don't use HDF5), only `endian='native'` is allowed. The `compression, zlib, complevel, shuffle, fletcher32, contiguous` and `chunksizes` keywords are silently ignored for netCDF 3 files that do not use HDF5. **`least_significant_digit`**: If this or `significant_digits` are specified, variable data will be truncated (quantized). In conjunction with `compression='zlib'` this produces 'lossy', but significantly more efficient compression. For example, if `least_significant_digit=1`, data will be quantized using around(scale*data)/scale, where scale = 2**bits, and bits is determined so that a precision of 0.1 is retained (in this case bits=4). Default is `None`, or no quantization. **`significant_digits`**: New in version 1.6.0. As described for `least_significant_digit` except the number of significant digits retained is prescribed independent of the floating point exponent. Default `None` - no quantization done. **`quantize_mode`**: New in version 1.6.0. Controls the quantization algorithm (default 'BitGroom', 'BitRound' and 'GranularBitRound' also available). The 'GranularBitRound' algorithm may result in better compression for typical geophysical datasets. Ignored if `significant_digts` not specified. If 'BitRound' is used, then `significant_digits` is interpreted as binary (not decimal) digits. **`fill_value`**: If specified, the default netCDF fill value (the value that the variable gets filled with before any data is written to it) is replaced with this value, and the `_FillValue` attribute is set. If fill_value is set to `False`, then the variable is not pre-filled. The default netCDF fill values can be found in the dictionary `netCDF4.default_fillvals`. If not set, the default fill value will be used but no `_FillValue` attribute will be created (this is the default behavior of the netcdf-c library). If you want to use the default fill value, but have the `_FillValue` attribute set, use `fill_value='default'` (note - this only works for primitive data types). `Variable.get_fill_value` can be used to retrieve the fill value, even if the `_FillValue` attribute is not set. **`chunk_cache`**: If specified, sets the chunk cache size for this variable. Persists as long as Dataset is open. Use `set_var_chunk_cache` to change it when Dataset is re-opened. ***Note***: `Variable` instances should be created using the `Dataset.createVariable` method of a `Dataset` or `Group` instance, not using this class directly. """ cdef int ierr, ndims, icontiguous, icomplevel, numdims, _grpid, nsd, cdef unsigned int iblosc_complevel,iblosc_blocksize,iblosc_compressor,iblosc_shuffle cdef int iszip_coding, iszip_pixels_per_block cdef char namstring[NC_MAX_NAME+1] cdef char *varname cdef nc_type xtype cdef int *dimids = NULL cdef size_t sizep, nelemsp cdef size_t *chunksizesp cdef float preemptionp cdef int nc_complex_typeid, complex_base_type_id, complex_dim_id cdef int _nc_endian # Extra information for more helpful error messages error_info = f"(variable '{name}', group '{grp.name}')" # flag to indicate that orthogonal indexing is supported self.__orthogonal_indexing__ = True # For backwards compatibility, deprecated zlib kwarg takes # precedence if compression kwarg not set. if zlib and not compression: compression = 'zlib' # if complevel is set to zero, turn off compression if not complevel: compression = None zlib = False szip = False zstd = False bzip2 = False blosc_lz = False blosc_lz4 = False blosc_lz4hc = False #blosc_snappy = False blosc_zlib = False blosc_zstd = False if compression == 'zlib': zlib = True elif compression == 'szip': szip = True elif compression == 'zstd': zstd = True elif compression == 'bzip2': bzip2 = True elif compression == 'blosc_lz': blosc_lz = True elif compression == 'blosc_lz4': blosc_lz4 = True elif compression == 'blosc_lz4hc': blosc_lz4hc = True #elif compression == 'blosc_snappy': # blosc_snappy = True elif compression == 'blosc_zlib': blosc_zlib = True elif compression == 'blosc_zstd': blosc_zstd = True elif not compression: compression = None # if compression evaluates to False, set to None. pass else: raise ValueError(f"Unsupported value for compression kwarg {error_info}") if grp.data_model.startswith("NETCDF3") and endian != 'native': raise RuntimeError( f"only endian='native' allowed for NETCDF3 files, got '{endian}' {error_info}" ) if endian not in ("little", "big", "native"): raise ValueError( f"'endian' keyword argument must be 'little','big' or 'native', got '{endian}' " f"{error_info}" ) self._grpid = grp._grpid # make a weakref to group to avoid circular ref (issue 218) # keep strong reference the default behaviour (issue 251) if grp.keepweakref: self._grp = weakref.proxy(grp) else: self._grp = grp self._iscompound = isinstance(datatype, CompoundType) self._isvlen = isinstance(datatype, VLType) or datatype==str self._isenum = isinstance(datatype, EnumType) user_type = self._iscompound or self._isvlen or self._isenum # convert to a real numpy datatype object if necessary. if not user_type and type(datatype) != numpy.dtype: datatype = numpy.dtype(datatype) # convert numpy string dtype with length > 1 # or any numpy unicode dtype into str if (isinstance(datatype, numpy.dtype) and ((datatype.kind == 'S' and datatype.itemsize > 1) or datatype.kind == 'U')): datatype = str user_type = True self._isvlen = True # If datatype is complex, convert to compoundtype is_complex = dtype_is_complex(datatype) if is_complex and not self._grp.auto_complex: raise ValueError( f"complex datatypes ({datatype}) are only supported with `auto_complex=True`" ) # check if endian keyword consistent with datatype specification. dtype_endian = _dtype_endian_lookup[getattr(datatype, "byteorder", None)] if dtype_endian is not None and dtype_endian != endian: if not (dtype_endian == 'native' and endian == sys.byteorder): warnings.warn('endian-ness of dtype and endian kwarg do not match, using endian kwarg') # check validity of datatype. self._isprimitive = not user_type if user_type: if self._iscompound: self._cmptype = datatype if self._isvlen: self._vltype = datatype if self._isenum: self._enumtype = datatype if datatype==str: if grp.data_model != 'NETCDF4': raise ValueError( 'Variable length strings are only supported for the ' 'NETCDF4 format. For other formats, consider using ' 'netCDF4.stringtochar to convert string arrays into ' 'character arrays with an additional dimension.' f' {error_info}') datatype = VLType(self._grp, str, None) self._vltype = datatype xtype = datatype._nc_type # make sure this a valid user defined datatype defined in this Group with nogil: ierr = nc_inq_type(self._grpid, xtype, namstring, NULL) _ensure_nc_success(ierr, extra_msg=error_info) # dtype variable attribute is a numpy datatype object. self.dtype = datatype.dtype elif datatype.str[1:] in _supportedtypes: # find netCDF primitive data type corresponding to # specified numpy data type. xtype = _nptonctype[datatype.str[1:]] # dtype variable attribute is a numpy datatype object. self.dtype = datatype elif is_complex: xtype = _complex_types[datatype.str[1:]] self.dtype = datatype else: raise TypeError(f'Illegal primitive data type, must be one of {_supportedtypes}, got {datatype} {error_info}') if 'id' in kwargs: self._varid = kwargs['id'] else: bytestr = _strencode(name) varname = bytestr ndims = len(dimensions) # find dimension ids. if ndims: dimids = malloc(sizeof(int) * ndims) for n in range(ndims): dimids[n] = dimensions[n]._dimid # go into define mode if it's a netCDF 3 compatible # file format. Be careful to exit define mode before # any exceptions are raised. if grp.data_model != 'NETCDF4': grp._redef() # define variable. with nogil: ierr = pfnc_def_var(self._grpid, varname, xtype, ndims, dimids, &self._varid) if ndims: free(dimids) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) # set chunk cache size if desired # default is 1mb per var, can cause problems when many (1000's) # of vars are created. This change only lasts as long as file is # open. if grp.data_model.startswith('NETCDF4') and chunk_cache is not None: with nogil: ierr = nc_get_var_chunk_cache(self._grpid, self._varid, &sizep, &nelemsp, &preemptionp) _ensure_nc_success(ierr, extra_msg=error_info) # reset chunk cache size, leave other parameters unchanged. sizep = chunk_cache with nogil: ierr = nc_set_var_chunk_cache(self._grpid, self._varid, sizep, nelemsp, preemptionp) _ensure_nc_success(ierr, extra_msg=error_info) # set compression, shuffle, chunking, fletcher32 and endian # variable settings. # don't bother for NETCDF3* formats. # for NETCDF3* formats, the comopression,zlib,shuffle,chunking, # and fletcher32 flags are silently ignored. Only # endian='native' allowed for NETCDF3. if grp.data_model in ['NETCDF4','NETCDF4_CLASSIC']: # set compression and shuffle parameters. if compression is not None and ndims: # don't bother for scalar variable if zlib: icomplevel = complevel if shuffle: with nogil: ierr = nc_def_var_deflate(self._grpid, self._varid, 1, 1, icomplevel) else: with nogil: ierr = nc_def_var_deflate(self._grpid, self._varid, 0, 1, icomplevel) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) if szip: if not __has_szip_support__: raise ValueError("compression='szip' only works if linked version of hdf5 has szip functionality enabled") try: iszip_coding = _szip_dict[szip_coding] except KeyError: raise ValueError("unknown szip coding ('ec' or 'nn' supported)") iszip_pixels_per_block = szip_pixels_per_block with nogil: ierr = nc_def_var_szip(self._grpid, self._varid, iszip_coding, iszip_pixels_per_block) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) if zstd: if not __has_zstandard_support__: raise NetCDF4MissingFeatureException("compression='zstd'", "4.9.0") icomplevel = complevel with nogil: ierr = nc_def_var_zstandard(self._grpid, self._varid, icomplevel) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) if bzip2: if not __has_bzip2_support__: raise NetCDF4MissingFeatureException("compression='bzip2'", "4.9.0") icomplevel = complevel with nogil: ierr = nc_def_var_bzip2(self._grpid, self._varid, icomplevel) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) if blosc_zstd or blosc_lz or blosc_lz4 or blosc_lz4hc or blosc_zlib: if not __has_blosc_support__: raise NetCDF4MissingFeatureException("compression='blosc_*'", "4.9.0") iblosc_compressor = _blosc_dict[compression] iblosc_shuffle = blosc_shuffle iblosc_blocksize = 0 # not currently used by c lib iblosc_complevel = complevel with nogil: ierr = nc_def_var_blosc(self._grpid, self._varid, iblosc_compressor, iblosc_complevel,iblosc_blocksize, iblosc_shuffle) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) # set checksum. if fletcher32 and ndims: # don't bother for scalar variable with nogil: ierr = nc_def_var_fletcher32(self._grpid, self._varid, 1) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) # set chunking stuff. if ndims: # don't bother for scalar variable. if contiguous: icontiguous = NC_CONTIGUOUS if chunksizes is not None: raise ValueError('cannot specify chunksizes for a contiguous dataset') else: icontiguous = NC_CHUNKED if chunksizes is None: chunksizesp = NULL else: if len(chunksizes) != len(dimensions): if grp.data_model != 'NETCDF4': grp._enddef() raise ValueError('chunksizes must be a sequence with the same length as dimensions') chunksizesp = malloc(sizeof(size_t) * ndims) for n in range(ndims): if not dimensions[n].isunlimited() and \ chunksizes[n] > dimensions[n].size: msg = 'chunksize cannot exceed dimension size' raise ValueError(msg) chunksizesp[n] = chunksizes[n] if chunksizes is not None or contiguous: with nogil: ierr = nc_def_var_chunking(self._grpid, self._varid, icontiguous, chunksizesp) free(chunksizesp) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) # set endian-ness of variable if endian != 'native': _nc_endian = NC_ENDIAN_LITTLE if endian == "little" else NC_ENDIAN_BIG with nogil: ierr = nc_def_var_endian(self._grpid, self._varid, _nc_endian) _ensure_nc_success(ierr, extra_msg=error_info) # set quantization if significant_digits is not None: if not __has_quantization_support__: raise ValueError( "significant_digits kwarg only works with netcdf-c >= 4.9.0. " "To enable, install Cython, make sure you have version 4.9.0 " "or higher netcdf-c, and rebuild netcdf4-python. Otherwise, " f"use least_significant_digit kwarg for quantization. {error_info}" ) nsd = significant_digits if quantize_mode == 'BitGroom': with nogil: ierr = nc_def_var_quantize(self._grpid, self._varid, NC_QUANTIZE_BITGROOM, nsd) elif quantize_mode == 'GranularBitRound': with nogil: ierr = nc_def_var_quantize(self._grpid, self._varid, NC_QUANTIZE_GRANULARBR, nsd) elif quantize_mode == 'BitRound': ierr = nc_def_var_quantize(self._grpid, self._varid, NC_QUANTIZE_BITROUND, nsd) else: raise ValueError("'quantize_mode' keyword argument must be 'BitGroom','GranularBitRound' or 'BitRound', got '%s'" % quantize_mode) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) # set a fill value for this variable if fill_value keyword # given. This avoids the HDF5 overhead of deleting and # recreating the dataset if it is set later (after the enddef). if fill_value is not None: if fill_value is False: # no filling for this variable if fill_value==False. if self._isprimitive: with nogil: ierr = nc_def_var_fill(self._grpid, self._varid, 1, NULL) if ierr != NC_NOERR: if grp.data_model != 'NETCDF4': grp._enddef() _ensure_nc_success(ierr, extra_msg=error_info) elif fill_value == 'default': if self._isprimitive: fillval = numpy.array(default_fillvals[self.dtype.str[1:]]) if not fillval.dtype.isnative: fillval.byteswap(True) _set_att(self._grp, self._varid, '_FillValue',\ fillval, xtype=xtype) else: msg = """ WARNING: there is no default fill value for this data type, so fill_value='default' does not do anything.""" warnings.warn(msg) else: if self._isprimitive or self._isenum or \ (self._isvlen and self.dtype == str): if self._isvlen and self.dtype == str: _set_att(self._grp, self._varid, '_FillValue',\ _tostr(fill_value), xtype=xtype, force_ncstring=True) else: fillval = numpy.array(fill_value, self.dtype) if not fillval.dtype.isnative: fillval.byteswap(True) _set_att(self._grp, self._varid, '_FillValue',\ fillval, xtype=xtype) else: raise AttributeError("cannot set _FillValue attribute for VLEN or compound variable") if least_significant_digit is not None: self.least_significant_digit = least_significant_digit # leave define mode if not a NETCDF4 format file. if grp.data_model != 'NETCDF4': grp._enddef() # If the variable is a complex number, we need to check if # it was created using a compound type or a complex # dimension, and then make the equivalent class in Python if is_complex: self._fix_complex_numbers() # count how many unlimited dimensions there are. self._nunlimdim = 0 for dim in dimensions: if dim.isunlimited(): self._nunlimdim = self._nunlimdim + 1 # set ndim attribute (number of dimensions). self.ndim = _inq_varndims(self._grpid, self._varid, self._grp.auto_complex) self._name = name # default for automatically applying scale_factor and # add_offset, and converting to/from masked arrays is True. self.scale = True self.mask = True # issue 809: default for converting arrays with no missing values to # regular numpy arrays self.always_mask = True # default is to automatically convert to/from character # to string arrays when _Encoding variable attribute is set. self.chartostring = True # propagate _ncstring_attrs__ setting from parent group. self._ncstring_attrs__ = grp._ncstring_attrs__ if 'least_significant_digit' in self.ncattrs(): self._has_lsd = True # avoid calling nc_get_vars for strided slices by default. # a fix for strided slice access using HDF5 was added # in 4.6.2. # always use nc_get_vars for strided access with OpenDAP (issue #838). if __netcdf4libversion__ >= "4.6.2" or\ self._grp.filepath().startswith('http'): self._use_get_vars = True else: self._use_get_vars = False def _fix_complex_numbers(self): cdef char name[NC_MAX_NAME + 1] cdef int complex_typeid, complex_dim_id error_info = f"(variable '{name}', group '{self._grp.name}')" if pfnc_var_is_complex_type(self._grpid, self._varid): self._isprimitive = False self._iscompound = True with nogil: ierr = pfnc_inq_var_complex_base_type(self._grpid, self._varid, &complex_typeid) _ensure_nc_success(ierr, extra_msg=error_info) np_complex_type = _nctonptype[complex_typeid] compound_complex_type = f"{np_complex_type}, {np_complex_type}" self._cmptype = CompoundType( self._grp, compound_complex_type, "complex", typeid=complex_typeid ) else: with nogil: ierr = pfnc_get_complex_dim(self._grpid, &complex_dim_id) _ensure_nc_success(ierr, extra_msg=error_info) with nogil: ierr = nc_inq_dimname(self._grpid, complex_dim_id, name) _ensure_nc_success(ierr, extra_msg=error_info) self._grp.dimensions[name.decode("utf-8")] = Dimension( self._grp, name, size=2, id=complex_dim_id ) def __array__(self): # numpy special method that returns a numpy array. # allows numpy ufuncs to work faster on Variable objects # (issue 216). return self[...] def __repr__(self): return self.__str__() def __str__(self): cdef int ierr, no_fill if not dir(self._grp): return 'Variable object no longer valid' ncdump = [repr(type(self)).replace("._netCDF4", "")] show_more_dtype = True if self._iscompound: kind = 'compound' elif self._isvlen: kind = 'vlen' elif self._isenum: kind = 'enum' else: show_more_dtype = False kind = str(self.dtype) dimnames = tuple(_tostr(dimname) for dimname in self.dimensions) ncdump.append('%s %s(%s)' %\ (kind, self._name, ', '.join(dimnames))) for name in self.ncattrs(): ncdump.append(' %s: %s' % (name, self.getncattr(name))) if show_more_dtype: ncdump.append('%s data type: %s' % (kind, self.dtype)) unlimdims = [] for dimname in self.dimensions: dim = _find_dim(self._grp, dimname) if dim.isunlimited(): unlimdims.append(dimname) if (self._grp.path != '/'): ncdump.append('path = %s' % self._grp.path) ncdump.append('unlimited dimensions: %s' % ', '.join(unlimdims)) ncdump.append('current shape = %r' % (self.shape,)) if __netcdf4libversion__ < '4.5.1' and\ self._grp.file_format.startswith('NETCDF3'): # issue #908: no_fill not correct for NETCDF3 files before 4.5.1 # before 4.5.1 there was no way to turn off filling on a # per-variable basis for classic files. no_fill=0 else: with nogil: ierr = nc_inq_var_fill(self._grpid,self._varid,&no_fill,NULL) _ensure_nc_success(ierr) if self._isprimitive: if no_fill != 1: try: fillval = self._FillValue msg = 'filling on' except AttributeError: fillval = default_fillvals[self.dtype.str[1:]] if self.dtype.str[1:] in ['u1','i1']: msg = 'filling on, default _FillValue of %s ignored' % fillval else: msg = 'filling on, default _FillValue of %s used' % fillval ncdump.append(msg) else: ncdump.append('filling off') return '\n'.join(ncdump) def _getdims(self): # Private method to get variables's dimension names cdef int ierr, dimid cdef char namstring[NC_MAX_NAME+1] dimids = _inq_vardimid(self._grpid, self._varid, self._grp.auto_complex) # loop over dimensions, retrieve names. dimensions = () for dimid in dimids: with nogil: ierr = nc_inq_dimname(self._grpid, dimid, namstring) _ensure_nc_success(ierr) name = namstring.decode('utf-8') dimensions = dimensions + (name,) return dimensions def _getname(self): # Private method to get name associated with instance cdef int err, _grpid cdef char namstring[NC_MAX_NAME+1] _grpid = self._grp._grpid with nogil: ierr = nc_inq_varname(_grpid, self._varid, namstring) _ensure_nc_success(ierr) return namstring.decode('utf-8') property name: """string name of Variable instance""" def __get__(self): return self._getname() def __set__(self,value): raise AttributeError("name cannot be altered") property datatype: """numpy data type (for primitive data types) or VLType/CompoundType/EnumType instance (for compound, vlen or enum data types)""" def __get__(self): if self._iscompound: return self._cmptype elif self._isvlen: return self._vltype elif self._isenum: return self._enumtype elif self._isprimitive: return self.dtype property shape: """find current sizes of all variable dimensions""" def __get__(self): shape = () for dimname in self._getdims(): # look in current group, and parents for dim. dim = _find_dim(self._grp,dimname) shape = shape + (len(dim),) return shape def __set__(self,value): raise AttributeError("shape cannot be altered") property size: """Return the number of stored elements.""" def __get__(self): # issue #957: add int since prod(())=1.0 return int(numpy.prod(self.shape)) property dimensions: """get variables's dimension names""" def __get__(self): return self._getdims() def __set__(self,value): raise AttributeError("dimensions cannot be altered") def group(self): """ **`group(self)`** return the group that this `Variable` is a member of.""" return self._grp def get_fill_value(self): """ **`get_fill_value(self)`** return the fill value associated with this `Variable` (returns `None` if data is not pre-filled). Works even if default fill value was used, and `_FillValue` attribute does not exist.""" cdef int ierr, no_fill with nogil: ierr = nc_inq_var_fill(self._grpid,self._varid,&no_fill,NULL) _ensure_nc_success(ierr) if no_fill == 1: # no filling for this variable return None else: try: fillval = self._FillValue return fillval except AttributeError: # _FillValue attribute not set, see if we can retrieve _FillValue. # for primitive data types. if self._isprimitive: #return numpy.array(default_fillvals[self.dtype.str[1:]],self.dtype) fillval = numpy.empty((),self.dtype) ierr=nc_inq_var_fill(self._grpid,self._varid,&no_fill,PyArray_DATA(fillval)) _ensure_nc_success(ierr) return fillval else: # no default filling for non-primitive data types. return None def ncattrs(self): """ **`ncattrs(self)`** return netCDF attribute names for this `Variable` in a list.""" return _get_att_names(self._grpid, self._varid) def setncattr(self,name,value): """ **`setncattr(self,name,value)`** set a netCDF variable attribute using name,value pair. Use if you need to set a netCDF attribute with the same name as one of the reserved python attributes.""" cdef nc_type xtype xtype=-99 # issue #959 - trying to set _FillValue results in mysterious # error when close method is called so catch it here. It is # already caught in __setattr__. if name == '_FillValue': msg='_FillValue attribute must be set when variable is '+\ 'created (using fill_value keyword to createVariable)' raise AttributeError(msg) if self._grp.data_model != 'NETCDF4': self._grp._redef() _set_att(self._grp, self._varid, name, value, xtype=xtype, force_ncstring=self._ncstring_attrs__) if self._grp.data_model != 'NETCDF4': self._grp._enddef() def setncattr_string(self,name,value): """ **`setncattr_string(self,name,value)`** set a netCDF variable string attribute using name,value pair. Use if you need to ensure that a netCDF attribute is created with type `NC_STRING` if the file format is `NETCDF4`. Use if you need to set an attribute to an array of variable-length strings.""" cdef nc_type xtype xtype=-99 if self._grp.data_model != 'NETCDF4': msg='file format does not support NC_STRING attributes' raise OSError(msg) _set_att(self._grp, self._varid, name, value, xtype=xtype, force_ncstring=True) def setncatts(self,attdict): """ **`setncatts(self,attdict)`** set a bunch of netCDF variable attributes at once using a python dictionary. This may be faster when setting a lot of attributes for a `NETCDF3` formatted file, since nc_redef/nc_enddef is not called in between setting each attribute""" if self._grp.data_model != 'NETCDF4': self._grp._redef() for name, value in attdict.items(): _set_att(self._grp, self._varid, name, value) if self._grp.data_model != 'NETCDF4': self._grp._enddef() def getncattr(self,name,encoding='utf-8'): """ **`getncattr(self,name)`** retrieve a netCDF variable attribute. Use if you need to set a netCDF attribute with the same name as one of the reserved python attributes. option kwarg `encoding` can be used to specify the character encoding of a string attribute (default is `utf-8`).""" return _get_att(self._grp, self._varid, name, encoding=encoding) def delncattr(self, name): """ **`delncattr(self,name,value)`** delete a netCDF variable attribute. Use if you need to delete a netCDF attribute with the same name as one of the reserved python attributes.""" cdef char *attname bytestr = _strencode(name) attname = bytestr if self._grp.data_model != 'NETCDF4': self._grp._redef() with nogil: ierr = nc_del_att(self._grpid, self._varid, attname) if self._grp.data_model != 'NETCDF4': self._grp._enddef() _ensure_nc_success(ierr) def filters(self): """ **`filters(self)`** return dictionary containing HDF5 filter parameters.""" cdef int ierr,ideflate,ishuffle,icomplevel,ifletcher32 cdef int izstd=0 cdef int ibzip2=0 cdef int iblosc=0 cdef int iszip=0 cdef int iszip_coding=0 cdef int iszip_pixels_per_block=0 cdef int icomplevel_zstd=0 cdef int icomplevel_bzip2=0 cdef unsigned int iblosc_shuffle=0 cdef unsigned int iblosc_compressor=0 cdef unsigned int iblosc_blocksize=0 cdef unsigned int iblosc_complevel=0 filtdict = {'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False} if self._grp.data_model not in ['NETCDF4_CLASSIC','NETCDF4']: return with nogil: ierr = nc_inq_var_deflate(self._grpid, self._varid, &ishuffle, &ideflate, &icomplevel) _ensure_nc_success(ierr) with nogil: ierr = nc_inq_var_fletcher32(self._grpid, self._varid, &ifletcher32) _ensure_nc_success(ierr) if __has_zstandard_support__: with nogil: ierr = nc_inq_var_zstandard(self._grpid, self._varid, &izstd,\ &icomplevel_zstd) if ierr != 0: izstd=0 # _ensure_nc_success(ierr) if __has_bzip2_support__: with nogil: ierr = nc_inq_var_bzip2(self._grpid, self._varid, &ibzip2,\ &icomplevel_bzip2) if ierr != 0: ibzip2=0 #_ensure_nc_success(ierr) if __has_blosc_support__: with nogil: ierr = nc_inq_var_blosc(self._grpid, self._varid, &iblosc,\ &iblosc_compressor,&iblosc_complevel,&iblosc_blocksize,&iblosc_shuffle) if ierr != 0: iblosc=0 #_ensure_nc_success(ierr) if __has_szip_support__: with nogil: ierr = nc_inq_var_szip(self._grpid, self._varid, &iszip_coding,\ &iszip_pixels_per_block) if ierr != 0: iszip=0 else: if iszip_coding: iszip=1 else: iszip=0 #_ensure_nc_success(ierr) if ideflate: filtdict['zlib']=True filtdict['complevel']=icomplevel if izstd: filtdict['zstd']=True filtdict['complevel']=icomplevel_zstd if ibzip2: filtdict['bzip2']=True filtdict['complevel']=icomplevel_bzip2 if iblosc: blosc_compressor = iblosc_compressor filtdict['blosc']={'compressor':_blosc_dict_inv[blosc_compressor],'shuffle':iblosc_shuffle} filtdict['complevel']=iblosc_complevel if iszip: szip_coding = iszip_coding filtdict['szip']={'coding':_szip_dict_inv[szip_coding],'pixels_per_block':iszip_pixels_per_block} if ishuffle: filtdict['shuffle']=True if ifletcher32: filtdict['fletcher32']=True return filtdict def quantization(self): """ **`quantization(self)`** return number of significant digits and the algorithm used in quantization. Returns None if quantization not active. """ if not __has_quantization_support__: return None cdef int ierr, nsd, quantize_mode if self._grp.data_model not in ['NETCDF4_CLASSIC','NETCDF4']: return None with nogil: ierr = nc_inq_var_quantize(self._grpid, self._varid, &quantize_mode, &nsd) _ensure_nc_success(ierr) if quantize_mode == NC_NOQUANTIZE: return None if quantize_mode == NC_QUANTIZE_GRANULARBR: sig_digits = nsd quant_mode = 'GranularBitRound' elif quantize_mode == NC_QUANTIZE_BITROUND: sig_digits = nsd # interpreted as bits, not decimal quant_mode = 'BitRound' else: sig_digits = nsd quant_mode = 'BitGroom' return sig_digits, quant_mode def endian(self): """ **`endian(self)`** return endian-ness (`little,big,native`) of variable (as stored in HDF5 file).""" cdef int ierr, iendian if self._grp.data_model not in ['NETCDF4_CLASSIC','NETCDF4']: return 'native' with nogil: ierr = nc_inq_var_endian(self._grpid, self._varid, &iendian) _ensure_nc_success(ierr) if iendian == NC_ENDIAN_LITTLE: return 'little' elif iendian == NC_ENDIAN_BIG: return 'big' else: return 'native' def chunking(self): """ **`chunking(self)`** return variable chunking information. If the dataset is defined to be contiguous (and hence there is no chunking) the word 'contiguous' is returned. Otherwise, a sequence with the chunksize for each dimension is returned.""" cdef int ierr, icontiguous, ndims cdef size_t *chunksizesp if self._grp.data_model not in ['NETCDF4_CLASSIC','NETCDF4']: return None ndims = self.ndim chunksizesp = malloc(sizeof(size_t) * ndims) with nogil: ierr = nc_inq_var_chunking(self._grpid, self._varid, &icontiguous, chunksizesp) _ensure_nc_success(ierr) chunksizes=[] for n in range(ndims): chunksizes.append(chunksizesp[n]) free(chunksizesp) if icontiguous: return 'contiguous' else: return chunksizes def get_var_chunk_cache(self): """ **`get_var_chunk_cache(self)`** return variable chunk cache information in a tuple (size,nelems,preemption). See netcdf C library documentation for `nc_get_var_chunk_cache` for details.""" cdef int ierr cdef size_t sizep, nelemsp cdef float preemptionp with nogil: ierr = nc_get_var_chunk_cache(self._grpid, self._varid, &sizep, &nelemsp, &preemptionp) _ensure_nc_success(ierr) size = sizep; nelems = nelemsp; preemption = preemptionp return (size,nelems,preemption) def set_var_chunk_cache(self,size=None,nelems=None,preemption=None): """ **`set_var_chunk_cache(self,size=None,nelems=None,preemption=None)`** change variable chunk cache settings. See netcdf C library documentation for `nc_set_var_chunk_cache` for details.""" cdef int ierr cdef size_t sizep, nelemsp cdef float preemptionp # reset chunk cache size, leave other parameters unchanged. size_orig, nelems_orig, preemption_orig = self.get_var_chunk_cache() if size is not None: sizep = size else: sizep = size_orig if nelems is not None: nelemsp = nelems else: nelemsp = nelems_orig if preemption is not None: preemptionp = preemption else: preemptionp = preemption_orig with nogil: ierr = nc_set_var_chunk_cache(self._grpid, self._varid, sizep, nelemsp, preemptionp) _ensure_nc_success(ierr) def __delattr__(self,name): # if it's a netCDF attribute, remove it if name not in _private_atts: self.delncattr(name) else: raise AttributeError( "'%s' is one of the reserved attributes %s, cannot delete. Use delncattr instead." % (name, tuple(_private_atts))) def __setattr__(self,name,value): # if name in _private_atts, it is stored at the python # level and not in the netCDF file. if name not in _private_atts: # if setting _FillValue or missing_value, make sure value # has same type and byte order as variable. if name == '_FillValue': msg='_FillValue attribute must be set when variable is '+\ 'created (using fill_value keyword to createVariable)' raise AttributeError(msg) #if self._isprimitive: # value = numpy.array(value, self.dtype) #else: # msg="cannot set _FillValue attribute for "+\ # "VLEN or compound variable" # raise AttributeError(msg) elif name in ['valid_min','valid_max','valid_range','missing_value'] and self._isprimitive: # make sure these attributes written in same data type as variable. # also make sure it is written in native byte order # (the same as the data) valuea = numpy.array(value, self.dtype) # check to see if array cast is safe if _safecast(numpy.array(value),valuea): value = valuea if not value.dtype.isnative: value.byteswap(True) else: # otherwise don't do it, but issue a warning msg="WARNING: %s cannot be safely cast to variable dtype" \ % name warnings.warn(msg) self.setncattr(name, value) elif not name.endswith('__'): if hasattr(self,name): raise AttributeError( "'%s' is one of the reserved attributes %s, cannot rebind. Use setncattr instead." % (name, tuple(_private_atts))) else: self.__dict__[name]=value def __getattr__(self,name): # if name in _private_atts, it is stored at the python # level and not in the netCDF file. if name.startswith('__') and name.endswith('__'): # if __dict__ requested, return a dict with netCDF attributes. if name == '__dict__': names = self.ncattrs() values = [] for name in names: values.append(_get_att(self._grp, self._varid, name)) return dict(zip(names, values)) else: raise AttributeError elif name in _private_atts: return self.__dict__[name] else: return self.getncattr(name) def renameAttribute(self, oldname, newname): """ **`renameAttribute(self, oldname, newname)`** rename a `Variable` attribute named `oldname` to `newname`.""" cdef int ierr cdef char *oldnamec cdef char *newnamec bytestr = _strencode(oldname) oldnamec = bytestr bytestr = _strencode(newname) newnamec = bytestr with nogil: ierr = nc_rename_att(self._grpid, self._varid, oldnamec, newnamec) _ensure_nc_success(ierr) def __getitem__(self, elem): # This special method is used to index the netCDF variable # using the "extended slice syntax". The extended slice syntax # is a perfect match for the "start", "count" and "stride" # arguments to the nc_get_var() function, and is much more easy # to use. start, count, stride, put_ind =\ _StartCountStride(elem,self.shape,dimensions=self.dimensions,grp=self._grp,use_get_vars=self._use_get_vars) datashape = _out_array_shape(count) if self._isvlen: data = numpy.empty(datashape, dtype='O') else: data = numpy.empty(datashape, dtype=self.dtype) # Determine which dimensions need to be # squeezed (those for which elem is an integer scalar). # The convention used is that for those cases, # put_ind for this dimension is set to -1 by _StartCountStride. squeeze = data.ndim * [slice(None),] for i,n in enumerate(put_ind.shape[:-1]): if n == 1 and put_ind.size > 0 and put_ind[...,i].ravel()[0] == -1: squeeze[i] = 0 # Reshape the arrays so we can iterate over them. start = start.reshape((-1, self.ndim or 1)) count = count.reshape((-1, self.ndim or 1)) stride = stride.reshape((-1, self.ndim or 1)) put_ind = put_ind.reshape((-1, self.ndim or 1)) # Fill output array with data chunks. for (a,b,c,i) in zip(start, count, stride, put_ind): datout = self._get(a,b,c) if not hasattr(datout,'shape') or data.shape == datout.shape: data = datout else: shape = getattr(data[tuple(i)], 'shape', ()) if self._isvlen and not len(self.dimensions): # special case of scalar VLEN data[0] = datout else: if self._isvlen and not shape: # issue #1306 - convert length 1 object array to string data[tuple(i)] = datout.item() else: data[tuple(i)] = datout.reshape(shape) # Remove extra singleton dimensions. if hasattr(data,'shape'): data = data[tuple(squeeze)] if hasattr(data,'ndim') and self.ndim == 0: # Make sure a numpy scalar array is returned instead of a 1-d array of # length 1. if data.ndim != 0: data = numpy.asarray(data[0]) # if auto_scale mode set to True, (through # a call to set_auto_scale or set_auto_maskandscale), # perform automatic unpacking using scale_factor/add_offset. # if auto_mask mode is set to True (through a call to # set_auto_mask or set_auto_maskandscale), perform # automatic conversion to masked array using # missing_value/_Fill_Value. # applied for primitive and (non-string) vlen, # ignored for compound and enum datatypes. try: # check to see if scale_factor and add_offset is valid (issue 176). if hasattr(self,'scale_factor'): float(self.scale_factor) if hasattr(self,'add_offset'): float(self.add_offset) valid_scaleoffset = True except: valid_scaleoffset = False if self.scale: msg = 'invalid scale_factor or add_offset attribute, no unpacking done...' warnings.warn(msg) if self.mask and (self._isprimitive or self._isenum):\ data = self._toma(data) else: # if attribute _Unsigned is "true", and variable has signed integer # dtype, return view with corresponding unsigned dtype (issue #656) if self.scale: # only do this if autoscale option is on. is_unsigned = getattr(self, '_Unsigned', False) in ["true","True"] if is_unsigned and data.dtype.kind == 'i': data=data.view('%su%s'%(data.dtype.byteorder,data.dtype.itemsize)) if self.scale and\ (self._isprimitive or (self._isvlen and self.dtype != str)) and\ valid_scaleoffset: # if variable has scale_factor and add_offset attributes, apply # them. if hasattr(self, 'scale_factor') and hasattr(self, 'add_offset'): if self.add_offset != 0.0 or self.scale_factor != 1.0: data = data*self.scale_factor + self.add_offset else: data = data.astype(self.scale_factor.dtype) # issue 913 # else if variable has only scale_factor attribute, rescale. elif hasattr(self, 'scale_factor') and self.scale_factor != 1.0: data = data*self.scale_factor # else if variable has only add_offset attribute, add offset. elif hasattr(self, 'add_offset') and self.add_offset != 0.0: data = data + self.add_offset # if _Encoding is specified for a character variable, return # a numpy array of strings with one less dimension. if self.chartostring and getattr(self.dtype,'kind',None) == 'S' and\ getattr(self.dtype,'itemsize',None) == 1: encoding = getattr(self,'_Encoding',None) # should this only be done if self.scale = True? # should there be some other way to disable this? if encoding is not None: # only try to return a string array if rightmost dimension of # sliced data matches rightmost dimension of char variable if len(data.shape) > 0 and data.shape[-1] == self.shape[-1]: # also make sure slice is along last dimension matchdim = True for cnt in count: if cnt[-1] != self.shape[-1]: matchdim = False break if matchdim: data = chartostring(data, encoding=encoding) # if structure array contains char arrays, return view as strings # if _Encoding att set (issue #773) if self._iscompound and \ self._cmptype.dtype != self._cmptype.dtype_view and \ self.chartostring: # self.chartostring and getattr(self,'_Encoding',None) is not None: data = data.view(self._cmptype.dtype_view) return data def _toma(self,data): cdef int ierr, no_fill # if attribute _Unsigned is "true", and variable has signed integer # dtype, return view with corresponding unsigned dtype (issues #656, # #794) # _Unsigned attribute must be "true" or "True" (string). Issue #1232. is_unsigned = getattr(self, '_Unsigned', False) in ["True","true"] is_unsigned_int = is_unsigned and data.dtype.kind == 'i' if self.scale and is_unsigned_int: # only do this if autoscale option is on. dtype_unsigned_int='%su%s' % (data.dtype.byteorder,data.dtype.itemsize) data = data.view(dtype_unsigned_int) # private function for creating a masked array, masking missing_values # and/or _FillValues. totalmask = numpy.zeros(data.shape, numpy.bool_) fill_value = None safe_missval = self._check_safecast('missing_value') if safe_missval: mval = numpy.array(self.missing_value, self.dtype) if self.scale and is_unsigned_int: mval = mval.view(dtype_unsigned_int) # create mask from missing values. mvalmask = numpy.zeros(data.shape, numpy.bool_) if mval.shape == (): # mval a scalar. mval = [mval] # make into iterable. for m in mval: # is scalar missing value a NaN? try: mvalisnan = numpy.isnan(m) except TypeError: # isnan fails on some dtypes (issue 206) mvalisnan = False if mvalisnan: mvalmask += numpy.isnan(data) else: mvalmask += data==m if mvalmask.any(): # set fill_value for masked array # to missing_value (or 1st element # if missing_value is a vector). fill_value = mval[0] totalmask += mvalmask # set mask=True for data == fill value safe_fillval = self._check_safecast('_FillValue') if safe_fillval: fval = numpy.array(self._FillValue, self.dtype) if self.scale and is_unsigned_int: fval = fval.view(dtype_unsigned_int) # is _FillValue a NaN? try: fvalisnan = numpy.isnan(fval) except: # isnan fails on some dtypes (issue 202) fvalisnan = False if fvalisnan: mask = numpy.isnan(data) elif (data == fval).any(): mask = data==fval else: mask = None if mask is not None: if fill_value is None: fill_value = fval totalmask += mask # issue 209: don't return masked array if variable filling # is disabled. else: if __netcdf4libversion__ < '4.5.1' and\ self._grp.file_format.startswith('NETCDF3'): # issue #908: no_fill not correct for NETCDF3 files before 4.5.1 # before 4.5.1 there was no way to turn off filling on a # per-variable basis for classic files. no_fill=0 else: with nogil: ierr = nc_inq_var_fill(self._grpid,self._varid,&no_fill,NULL) _ensure_nc_success(ierr) # if no_fill is not 1, and not a byte variable, then use default fill value. # from http://www.unidata.ucar.edu/software/netcdf/docs/netcdf-c/Fill-Values.html#Fill-Values # "If you need a fill value for a byte variable, it is recommended # that you explicitly define an appropriate _FillValue attribute, as # generic utilities such as ncdump will not assume a default fill # value for byte variables." # Explained here too: # http://www.unidata.ucar.edu/software/netcdf/docs/known_problems.html#ncdump_ubyte_fill # "There should be no default fill values when reading any byte # type, signed or unsigned, because the byte ranges are too # small to assume one of the values should appear as a missing # value unless a _FillValue attribute is set explicitly." # (do this only for non-vlens, since vlens don't have a default _FillValue) if not self._isvlen and (no_fill != 1 or self.dtype.str[1:] not in ['u1','i1']): fillval = numpy.array(default_fillvals[self.dtype.str[1:]],self.dtype) has_fillval = data == fillval # if data is an array scalar, has_fillval will be a boolean. # in that case convert to an array. if type(has_fillval) == bool: has_fillval=numpy.asarray(has_fillval) if has_fillval.any(): if fill_value is None: fill_value = fillval mask=data==fillval totalmask += mask # set mask=True for data outside valid_min,valid_max. # (issue #576) validmin = None; validmax = None # if valid_range exists use that, otherwise # look for valid_min, valid_max. No special # treatment of byte data as described at # http://www.unidata.ucar.edu/software/netcdf/docs/attribute_conventions.html). safe_validrange = self._check_safecast('valid_range') safe_validmin = self._check_safecast('valid_min') safe_validmax = self._check_safecast('valid_max') if safe_validrange and self.valid_range.size == 2: validmin = numpy.array(self.valid_range[0], self.dtype) validmax = numpy.array(self.valid_range[1], self.dtype) else: if safe_validmin: validmin = numpy.array(self.valid_min, self.dtype) if safe_validmax: validmax = numpy.array(self.valid_max, self.dtype) if validmin is not None and self.scale and is_unsigned_int: validmin = validmin.view(dtype_unsigned_int) if validmax is not None and self.scale and is_unsigned_int: validmax = validmax.view(dtype_unsigned_int) # http://www.unidata.ucar.edu/software/netcdf/docs/attribute_conventions.html). # "If the data type is byte and _FillValue # is not explicitly defined, # then the valid range should include all possible values. # Otherwise, the valid range should exclude the _FillValue # (whether defined explicitly or by default) as follows. # If the _FillValue is positive then it defines a valid maximum, # otherwise it defines a valid minimum." byte_type = self.dtype.str[1:] in ['u1','i1'] if safe_fillval: fval = numpy.array(self._FillValue, self.dtype) else: fval = numpy.array(default_fillvals[self.dtype.str[1:]],self.dtype) if byte_type: fval = None if self.dtype.kind != 'S': # don't set mask for character data # issues #761 and #748: setting valid_min/valid_max to the # _FillVaue is too surprising for many users (despite the # netcdf docs attribute best practices suggesting clients # should do this). #if validmin is None and (fval is not None and fval <= 0): # validmin = fval #if validmax is None and (fval is not None and fval > 0): # validmax = fval if validmin is not None: totalmask += data < validmin if validmax is not None: totalmask += data > validmax if fill_value is None and fval is not None: fill_value = fval # if all else fails, use default _FillValue as fill_value # for masked array. if fill_value is None: fill_value = default_fillvals[self.dtype.str[1:]] # create masked array with computed mask masked_values = bool(totalmask.any()) if masked_values: data = ma.masked_array(data,mask=totalmask,fill_value=fill_value) else: # issue #785: always return masked array, if no values masked data = ma.masked_array(data) # issue 515 scalar array with mask=True should be converted # to numpy.ma.MaskedConstant to be consistent with slicing # behavior of masked arrays. if data.shape == () and data.mask.all(): # return a scalar numpy masked constant not a 0-d masked array, # so that data == numpy.ma.masked. data = data[()] # changed from [...] (issue #662) elif not self.always_mask and not masked_values: # issue #809: return a regular numpy array if requested # and there are no missing values data = numpy.array(data, copy=False) return data def _pack(self,data): # pack non-masked values using scale_factor and add_offset if hasattr(self, 'scale_factor') and hasattr(self, 'add_offset'): data = (data - self.add_offset)/self.scale_factor if self.dtype.kind in 'iu': data = numpy.around(data) elif hasattr(self, 'scale_factor'): data = data/self.scale_factor if self.dtype.kind in 'iu': data = numpy.around(data) elif hasattr(self, 'add_offset'): data = data - self.add_offset if self.dtype.kind in 'iu': data = numpy.around(data) if self.dtype != data.dtype: data = data.astype(self.dtype) # cast data to var type, if necessary. if ma.isMA(data): # if underlying data in masked regions of masked array # corresponds to missing values, don't fill masked array - # just use underlying data instead if hasattr(self, 'missing_value') and \ numpy.all(numpy.isin(data.data[data.mask],self.missing_value)): data = data.data else: if hasattr(self, 'missing_value'): # if missing value is a scalar, use it as fill_value. # if missing value is a vector, raise an exception # since we then don't know how to fill in masked values. if numpy.array(self.missing_value).shape == (): fillval = self.missing_value else: msg="cannot assign fill_value for masked array when missing_value attribute is not a scalar" raise RuntimeError(msg) if numpy.array(fillval).shape != (): fillval = fillval[0] elif hasattr(self, '_FillValue'): fillval = self._FillValue else: fillval = default_fillvals[self.dtype.str[1:]] # some versions of numpy have trouble handling # MaskedConstants when filling - this is is # a workaround (issue #850) if data.shape == (1,) and data.mask.all(): data = numpy.array([fillval],self.dtype) else: data = data.filled(fill_value=fillval) return data def _assign_vlen(self, elem, data): """private method to assign data to a single item in a VLEN variable""" cdef size_t *startp cdef size_t *countp cdef int ndims, n cdef nc_vlen_t *vldata cdef char **strdata cdef ndarray data2 if not self._isvlen: raise TypeError('_assign_vlen method only for use with VLEN variables') ndims = self.ndim msg="data can only be assigned to VLEN variables using integer indices" # check to see that elem is a tuple of integers. # handle negative integers. if _is_int(elem): if ndims > 1: raise IndexError(msg) if elem < 0: if self.shape[0]+elem >= 0: elem = self.shape[0]+elem else: raise IndexError("Illegal index") elif isinstance(elem, tuple): if len(elem) != ndims: raise IndexError("Illegal index") elemnew = [] for n,e in enumerate(elem): if not _is_int(e): raise IndexError(msg) elif e < 0: enew = self.shape[n]+e if enew < 0: raise IndexError("Illegal index") else: elemnew.append(self.shape[n]+e) else: elemnew.append(e) elem = tuple(elemnew) else: raise IndexError(msg) # set start, count if isinstance(elem, tuple): start = list(elem) else: start = [elem] count = [1]*ndims startp = malloc(sizeof(size_t) * ndims) countp = malloc(sizeof(size_t) * ndims) for n in range(ndims): startp[n] = start[n] countp[n] = count[n] if self.dtype == str: # VLEN string strdata = malloc(sizeof(char *)) # use _Encoding attribute to specify string encoding - if # not given, use 'utf-8'. encoding = getattr(self,'_Encoding','utf-8') bytestr = _strencode(data,encoding=encoding) strdata[0] = bytestr with nogil: ierr = nc_put_vara(self._grpid, self._varid, startp, countp, strdata) _ensure_nc_success(ierr) free(strdata) else: # regular VLEN if data.dtype != self.dtype: raise TypeError("wrong data type: should be %s, got %s" % (self.dtype,data.dtype)) data2 = data vldata = malloc(sizeof(nc_vlen_t)) vldata[0].len = PyArray_SIZE(data2) vldata[0].p = PyArray_DATA(data2) with nogil: ierr = nc_put_vara(self._grpid, self._varid, startp, countp, vldata) _ensure_nc_success(ierr) free(vldata) free(startp) free(countp) def _check_safecast(self, attname): # check to see that variable attribute exists # can can be safely cast to variable data type. msg="""WARNING: %s not used since it cannot be safely cast to variable data type""" % attname if hasattr(self, attname): att = numpy.array(self.getncattr(attname)) else: return False try: atta = numpy.array(att, self.dtype) except ValueError: is_safe = False warnings.warn(msg) return is_safe is_safe = _safecast(att,atta) if not is_safe: warnings.warn(msg) return is_safe def __setitem__(self, elem, data): # This special method is used to assign to the netCDF variable # using "extended slice syntax". The extended slice syntax # is a perfect match for the "start", "count" and "stride" # arguments to the nc_put_var() function, and is much more easy # to use. # if _Encoding is specified for a character variable, convert # numpy array of strings to a numpy array of characters with one more # dimension. if self.chartostring and getattr(self.dtype,'kind',None) == 'S' and\ getattr(self.dtype,'itemsize',None) == 1: # NC_CHAR variable encoding = getattr(self,'_Encoding',None) if encoding is not None: # _Encoding attribute is set # if data is a string or a bytes object, convert to a numpy string array # whose length is equal to the rightmost dimension of the # variable. if type(data) in [str,bytes]: if encoding == 'ascii': data = numpy.asarray(data,dtype='S'+repr(self.shape[-1])) else: data = numpy.asarray(data,dtype='U'+repr(self.shape[-1])) if data.dtype.kind in ['S','U'] and data.dtype.itemsize > 1: # if data is a numpy string array, convert it to an array # of characters with one more dimension. data = stringtochar(data, encoding=encoding,n_strlen=self.shape[-1]) # if structured data has strings (and _Encoding att set), create view as char arrays # (issue #773) if self._iscompound and \ self._cmptype.dtype != self._cmptype.dtype_view and \ _set_viewdtype(data.dtype) == self._cmptype.dtype_view and \ self.chartostring: # self.chartostring and getattr(self,'_Encoding',None) is not None: # may need to cast input data to aligned type data = data.astype(self._cmptype.dtype_view).view(self._cmptype.dtype) if self._isvlen: # if vlen, should be object array (don't try casting) if self.dtype == str: # for string vars, if data is not an array # assume it is a python string and raise an error # if it is an array, but not an object array. if not isinstance(data, numpy.ndarray): # issue 458, allow Ellipsis to be used for scalar var if type(elem) == type(Ellipsis) and not\ len(self.dimensions): elem = 0 self._assign_vlen(elem, data) return elif data.dtype.kind in ['S', 'U']: if ma.isMA(data): msg='masked arrays cannot be assigned by VLEN str slices' raise TypeError(msg) data = data.astype(object) elif data.dtype.kind != 'O': msg = ('only numpy string, unicode or object arrays can ' 'be assigned to VLEN str var slices') raise TypeError(msg) else: # for non-string vlen arrays, if data is not multi-dim, or # not an object array, assume it represents a single element # of the vlen var. if not isinstance(data, numpy.ndarray) or data.dtype.kind != 'O': # issue 458, allow Ellipsis to be used for scalar var if type(elem) == type(Ellipsis) and not\ len(self.dimensions): elem = 0 # pack as integers if desired. if self.scale: data = self._pack(data) self._assign_vlen(elem, data) return # A numpy or masked array (or an object supporting the buffer interface) is needed. # Convert if necessary. if not ma.isMA(data) and not (hasattr(data,'data') and isinstance(data.data,memoryview)): # if auto scaling is to be done, don't cast to an integer yet. if self.scale and self.dtype.kind in 'iu' and \ hasattr(self, 'scale_factor') or hasattr(self, 'add_offset'): data = numpy.array(data,numpy.float64) else: data = numpy.array(data,self.dtype) # for Enum variable, make sure data is valid. if self._isenum: test = numpy.zeros(data.shape,numpy.bool_) if ma.isMA(data): # fix for new behaviour in numpy.ma in 1.13 (issue #662) for val in self.datatype.enum_dict.values(): test += data.filled() == val else: for val in self.datatype.enum_dict.values(): test += data == val if not numpy.all(test): msg="trying to assign illegal value to Enum variable" raise ValueError(msg) start, count, stride, put_ind =\ _StartCountStride(elem,self.shape,self.dimensions,self._grp,datashape=data.shape,put=True,use_get_vars=self._use_get_vars) datashape = _out_array_shape(count) # if a numpy scalar, create an array of the right size # and fill with scalar values. if data.shape == (): data = numpy.tile(data,datashape) # reshape data array if needed to conform with start,count,stride. if data.ndim != len(datashape) or\ (data.shape != datashape and data.ndim > 1): # issue #1083 # create a view so shape in caller is not modified (issue 90) try: # if extra singleton dims, just reshape data = data.view() data.shape = tuple(datashape) except ValueError: # otherwise broadcast data = numpy.broadcast_to(data, datashape) # Reshape these arrays so we can iterate over them. start = start.reshape((-1, self.ndim or 1)) count = count.reshape((-1, self.ndim or 1)) stride = stride.reshape((-1, self.ndim or 1)) put_ind = put_ind.reshape((-1, self.ndim or 1)) # quantize data if least_significant_digit attribute # exists (improves compression). if self._has_lsd: data = _quantize(data,self.least_significant_digit) if self.scale and self._isprimitive: # pack non-masked values using scale_factor and add_offset data = self._pack(data) # Fill output array with data chunks. for (a,b,c,i) in zip(start, count, stride, put_ind): dataput = data[tuple(i)] if dataput.size == 0: continue # nothing to write # convert array scalar to regular array with one element. if dataput.shape == (): if self._isvlen: dataput=numpy.array(dataput,'O') else: dataput=numpy.array(dataput,dataput.dtype) self._put(dataput,a,b,c) def __len__(self): if not self.shape: raise TypeError('len() of unsized object') else: return self.shape[0] def assignValue(self,val): """ **`assignValue(self, val)`** assign a value to a scalar variable. Provided for compatibility with Scientific.IO.NetCDF, can also be done by assigning to an Ellipsis slice ([...]).""" if len(self.dimensions): raise IndexError('to assign values to a non-scalar variable, use a slice') self[:]=val def getValue(self): """ **`getValue(self)`** get the value of a scalar variable. Provided for compatibility with Scientific.IO.NetCDF, can also be done by slicing with an Ellipsis ([...]).""" if len(self.dimensions): raise IndexError('to retrieve values from a non-scalar variable, use slicing') return self[slice(None)] def set_auto_chartostring(self,chartostring): """ **`set_auto_chartostring(self,chartostring)`** turn on or off automatic conversion of character variable data to and from numpy fixed length string arrays when the `_Encoding` variable attribute is set. If `chartostring` is set to `True`, when data is read from a character variable (dtype = `S1`) that has an `_Encoding` attribute, it is converted to a numpy fixed length unicode string array (dtype = `UN`, where `N` is the length of the the rightmost dimension of the variable). The value of `_Encoding` is the unicode encoding that is used to decode the bytes into strings. When numpy string data is written to a variable it is converted back to individual bytes, with the number of bytes in each string equalling the rightmost dimension of the variable. The default value of `chartostring` is `True` (automatic conversions are performed). """ self.chartostring = bool(chartostring) def use_nc_get_vars(self,use_nc_get_vars): """ **`use_nc_get_vars(self,_use_get_vars)`** enable the use of netcdf library routine `nc_get_vars` to retrieve strided variable slices. By default, `nc_get_vars` may not used by default (depending on the version of the netcdf-c library being used) since it may be slower than multiple calls to the unstrided read routine `nc_get_vara`. """ self._use_get_vars = bool(use_nc_get_vars) def set_auto_maskandscale(self,maskandscale): """ **`set_auto_maskandscale(self,maskandscale)`** turn on or off automatic conversion of variable data to and from masked arrays, automatic packing/unpacking of variable data using `scale_factor` and `add_offset` attributes and automatic conversion of signed integer data to unsigned integer data if the `_Unsigned` attribute exists and is set to "true" (or "True"). If `maskandscale` is set to `True`, when data is read from a variable it is converted to a masked array if any of the values are exactly equal to the either the netCDF _FillValue or the value specified by the missing_value variable attribute. The fill_value of the masked array is set to the missing_value attribute (if it exists), otherwise the netCDF _FillValue attribute (which has a default value for each data type). If the variable has no missing_value attribute, the _FillValue is used instead. If the variable has valid_min/valid_max and missing_value attributes, data outside the specified range will be masked. When data is written to a variable, the masked array is converted back to a regular numpy array by replacing all the masked values by the missing_value attribute of the variable (if it exists). If the variable has no missing_value attribute, the _FillValue is used instead. If `maskandscale` is set to `True`, and the variable has a `scale_factor` or an `add_offset` attribute, then data read from that variable is unpacked using:: data = self.scale_factor*data + self.add_offset When data is written to a variable it is packed using:: data = (data - self.add_offset)/self.scale_factor If either scale_factor is present, but add_offset is missing, add_offset is assumed zero. If add_offset is present, but scale_factor is missing, scale_factor is assumed to be one. For more information on how `scale_factor` and `add_offset` can be used to provide simple compression, see the [PSL metadata conventions](http://www.esrl.noaa.gov/psl/data/gridded/conventions/cdc_netcdf_standard.shtml). In addition, if `maskandscale` is set to `True`, and if the variable has an attribute `_Unsigned` set to "true", and the variable has a signed integer data type, a view to the data is returned with the corresponding unsigned integer data type. This convention is used by the netcdf-java library to save unsigned integer data in `NETCDF3` or `NETCDF4_CLASSIC` files (since the `NETCDF3` data model does not have unsigned integer data types). The default value of `maskandscale` is `True` (automatic conversions are performed). """ self.scale = self.mask = bool(maskandscale) def set_auto_scale(self,scale): """ **`set_auto_scale(self,scale)`** turn on or off automatic packing/unpacking of variable data using `scale_factor` and `add_offset` attributes. Also turns on and off automatic conversion of signed integer data to unsigned integer data if the variable has an `_Unsigned` attribute set to "true" or "True". If `scale` is set to `True`, and the variable has a `scale_factor` or an `add_offset` attribute, then data read from that variable is unpacked using:: data = self.scale_factor*data + self.add_offset When data is written to a variable it is packed using:: data = (data - self.add_offset)/self.scale_factor If either scale_factor is present, but add_offset is missing, add_offset is assumed zero. If add_offset is present, but scale_factor is missing, scale_factor is assumed to be one. For more information on how `scale_factor` and `add_offset` can be used to provide simple compression, see the [PSL metadata conventions](http://www.esrl.noaa.gov/psl/data/gridded/conventions/cdc_netcdf_standard.shtml). In addition, if `scale` is set to `True`, and if the variable has an attribute `_Unsigned` set to "true", and the variable has a signed integer data type, a view to the data is returned with the corresponding unsigned integer datatype. This convention is used by the netcdf-java library to save unsigned integer data in `NETCDF3` or `NETCDF4_CLASSIC` files (since the `NETCDF3` data model does not have unsigned integer data types). The default value of `scale` is `True` (automatic conversions are performed). """ self.scale = bool(scale) def set_auto_mask(self,mask): """ **`set_auto_mask(self,mask)`** turn on or off automatic conversion of variable data to and from masked arrays . If `mask` is set to `True`, when data is read from a variable it is converted to a masked array if any of the values are exactly equal to the either the netCDF _FillValue or the value specified by the missing_value variable attribute. The fill_value of the masked array is set to the missing_value attribute (if it exists), otherwise the netCDF _FillValue attribute (which has a default value for each data type). If the variable has no missing_value attribute, the _FillValue is used instead. If the variable has valid_min/valid_max and missing_value attributes, data outside the specified range will be masked. When data is written to a variable, the masked array is converted back to a regular numpy array by replacing all the masked values by the missing_value attribute of the variable (if it exists). If the variable has no missing_value attribute, the _FillValue is used instead. The default value of `mask` is `True` (automatic conversions are performed). """ self.mask = bool(mask) def set_always_mask(self,always_mask): """ **`set_always_mask(self,always_mask)`** turn on or off conversion of data without missing values to regular numpy arrays. `always_mask` is a Boolean determining if automatic conversion of masked arrays with no missing values to regular numpy arrays shall be applied. Default is True. Set to False to restore the default behaviour in versions prior to 1.4.1 (numpy array returned unless missing values are present, otherwise masked array returned). """ self.always_mask = bool(always_mask) def set_ncstring_attrs(self,ncstring_attrs): """ **`set_always_mask(self,ncstring_attrs)`** turn on or off creating NC_STRING string attributes. If `ncstring_attrs` is set to `True` then text attributes will be variable-length NC_STRINGs. The default value of `ncstring_attrs` is `False` (writing ascii text attributes as NC_CHAR). """ self._ncstring_attrs__ = bool(ncstring_attrs) def _put(self,ndarray data,start,count,stride): """Private method to put data into a netCDF variable""" cdef int ierr, ndims cdef npy_intp totelem, dataelem, i cdef size_t *startp cdef size_t *countp cdef ptrdiff_t *stridep cdef char **strdata cdef void* elptr cdef char* databuff cdef ndarray dataarr cdef nc_vlen_t *vldata # rank of variable. ndims = len(self.dimensions) # make sure data is contiguous. # if not, make a local copy. if not PyArray_ISCONTIGUOUS(data): data = data.copy() # fill up startp,countp,stridep. totelem = 1 negstride = 0 sl = [] startp = malloc(sizeof(size_t) * ndims) countp = malloc(sizeof(size_t) * ndims) stridep = malloc(sizeof(ptrdiff_t) * ndims) for n in range(ndims): count[n] = abs(count[n]) # make -1 into +1 countp[n] = count[n] # for neg strides, reverse order (then flip that axis after data read in) if stride[n] < 0: negstride = 1 stridep[n] = -stride[n] startp[n] = start[n]+stride[n]*(count[n]-1) stride[n] = -stride[n] sl.append(slice(None, None, -1)) # this slice will reverse the data else: startp[n] = start[n] stridep[n] = stride[n] sl.append(slice(None,None, 1)) totelem = totelem*countp[n] # check to see that size of data array is what is expected # for slice given. dataelem = PyArray_SIZE(data) if totelem != dataelem: raise IndexError('size of data array does not conform to slice') if negstride: # reverse data along axes with negative strides. data = data[tuple(sl)].copy() # make sure a copy is made. if self._isprimitive or self._iscompound or self._isenum: # primitive, enum or compound data type. # if data type of array doesn't match variable, # try to cast the data. if self.dtype != data.dtype: data = data.astype(self.dtype) # cast data, if necessary. # byte-swap data in numpy array so that is has native # endian byte order (this is what netcdf-c expects - # issue #554, pull request #555) if not data.dtype.isnative: data = data.byteswap() # strides all 1 or scalar variable, use put_vara (faster) if self._grp.auto_complex: with nogil: ierr = pfnc_put_vars(self._grpid, self._varid, startp, countp, stridep, PyArray_DATA(data)) elif sum(stride) == ndims or ndims == 0: with nogil: ierr = nc_put_vara(self._grpid, self._varid, startp, countp, PyArray_DATA(data)) else: with nogil: ierr = nc_put_vars(self._grpid, self._varid, startp, countp, stridep, PyArray_DATA(data)) _ensure_nc_success(ierr) elif self._isvlen: if data.dtype.char !='O': raise TypeError('data to put in string variable must be an object array containing Python strings') # flatten data array. data = data.flatten() if self.dtype == str: # convert all elements from strings to bytes # use _Encoding attribute to specify string encoding - if # not given, use 'utf-8'. encoding = getattr(self,'_Encoding','utf-8') for n in range(data.shape[0]): data[n] = _strencode(data[n],encoding=encoding) # vlen string (NC_STRING) # loop over elements of object array, put data buffer for # each element in struct. # allocate struct array to hold vlen data. strdata = malloc(sizeof(char *)*totelem) for i in range(totelem): strdata[i] = data[i] # strides all 1 or scalar variable, use put_vara (faster) if sum(stride) == ndims or ndims == 0: with nogil: ierr = nc_put_vara(self._grpid, self._varid, startp, countp, strdata) else: with nogil: ierr = nc_put_vars(self._grpid, self._varid, startp, countp, stridep, strdata) _ensure_nc_success(ierr) free(strdata) else: # regular vlen. # loop over elements of object array, put data buffer for # each element in struct. databuff = PyArray_BYTES(data) # allocate struct array to hold vlen data. vldata = malloc(totelem*sizeof(nc_vlen_t)) for i in range(totelem): elptr = (databuff)[0] dataarr = elptr if self.dtype != dataarr.dtype.str[1:]: #dataarr = dataarr.astype(self.dtype) # cast data, if necessary. # casting doesn't work ?? just raise TypeError raise TypeError("wrong data type in object array: should be %s, got %s" % (self.dtype,dataarr.dtype)) vldata[i].len = PyArray_SIZE(dataarr) vldata[i].p = PyArray_DATA(dataarr) databuff = databuff + PyArray_STRIDES(data)[0] # strides all 1 or scalar variable, use put_vara (faster) if sum(stride) == ndims or ndims == 0: with nogil: ierr = nc_put_vara(self._grpid, self._varid, startp, countp, vldata) else: with nogil: ierr = nc_put_vars(self._grpid, self._varid, startp, countp, stridep, vldata) _ensure_nc_success(ierr) # free the pointer array. free(vldata) free(startp) free(countp) free(stridep) def _get(self,start,count,stride): """Private method to retrieve data from a netCDF variable""" cdef int ierr, ndims cdef npy_intp totelem, i cdef size_t *startp cdef size_t *countp cdef ptrdiff_t *stridep cdef ndarray data, dataarr cdef void *elptr cdef char **strdata cdef nc_vlen_t *vldata # if one of the counts is negative, then it is an index # and not a slice so the resulting array # should be 'squeezed' to remove the singleton dimension. shapeout = () squeeze_out = False for lendim in count: if lendim == -1: shapeout = shapeout + (1,) squeeze_out = True else: shapeout = shapeout + (lendim,) # rank of variable. ndims = len(self.dimensions) # fill up startp,countp,stridep. negstride = 0 sl = [] startp = malloc(sizeof(size_t) * ndims) countp = malloc(sizeof(size_t) * ndims) stridep = malloc(sizeof(ptrdiff_t) * ndims) for n in range(ndims): count[n] = abs(count[n]) # make -1 into +1 countp[n] = count[n] # for neg strides, reverse order (then flip that axis after data read in) if stride[n] < 0: negstride = 1 stridep[n] = -stride[n] startp[n] = start[n]+stride[n]*(count[n]-1) stride[n] = -stride[n] sl.append(slice(None, None, -1)) # this slice will reverse the data else: startp[n] = start[n] stridep[n] = stride[n] sl.append(slice(None,None, 1)) if self._isprimitive or self._iscompound or self._isenum: data = numpy.empty(shapeout, self.dtype) # strides all 1 or scalar variable, use get_vara (faster) # if count contains a zero element, no data is being read if 0 not in count: if self._grp.auto_complex: with nogil: ierr = pfnc_get_vars(self._grpid, self._varid, startp, countp, stridep, PyArray_DATA(data)) elif sum(stride) == ndims or ndims == 0: with nogil: ierr = nc_get_vara(self._grpid, self._varid, startp, countp, PyArray_DATA(data)) else: with nogil: ierr = nc_get_vars(self._grpid, self._varid, startp, countp, stridep, PyArray_DATA(data)) else: ierr = 0 if ierr == NC_EINVALCOORDS: raise IndexError('index exceeds dimension bounds') elif ierr != NC_NOERR: _ensure_nc_success(ierr) elif self._isvlen: # allocate array of correct primitive type. data = numpy.empty(shapeout, 'O') # flatten data array. data = data.flatten() totelem = PyArray_SIZE(data) if self.dtype == str: # vlen string (NC_STRING) # allocate pointer array to hold string data. strdata = malloc(sizeof(char *) * totelem) # strides all 1 or scalar variable, use get_vara (faster) if sum(stride) == ndims or ndims == 0: with nogil: ierr = nc_get_vara(self._grpid, self._varid, startp, countp, strdata) else: with nogil: ierr = nc_get_vars(self._grpid, self._varid, startp, countp, stridep, strdata) if ierr == NC_EINVALCOORDS: raise IndexError elif ierr != NC_NOERR: _ensure_nc_success(ierr) # loop over elements of object array, fill array with # contents of strdata. # use _Encoding attribute to decode string to bytes - if # not given, use 'utf-8'. encoding = getattr(self,'_Encoding','utf-8') for i in range(totelem): if strdata[i]: data[i] = strdata[i].decode(encoding) else: data[i] = "" # issue 915 # reshape the output array data = numpy.reshape(data, shapeout) # free string data internally allocated in netcdf C lib with nogil: ierr = nc_free_string(totelem, strdata) # free the pointer array free(strdata) else: # regular vlen # allocate struct array to hold vlen data. vldata = malloc(totelem*sizeof(nc_vlen_t)) for i in range(totelem): vldata[i].len = 0 vldata[i].p = 0 # strides all 1 or scalar variable, use get_vara (faster) if sum(stride) == ndims or ndims == 0: with nogil: ierr = nc_get_vara(self._grpid, self._varid, startp, countp, vldata) else: with nogil: ierr = nc_get_vars(self._grpid, self._varid, startp, countp, stridep, vldata) if ierr == NC_EINVALCOORDS: raise IndexError elif ierr != NC_NOERR: _ensure_nc_success(ierr) # loop over elements of object array, fill array with # contents of vlarray struct, put array in object array. for i in range(totelem): arrlen = vldata[i].len dataarr = numpy.empty(arrlen, self.dtype) #dataarr.data = vldata[i].p memcpy(PyArray_DATA(dataarr), vldata[i].p, dataarr.nbytes) data[i] = dataarr # reshape the output array data = numpy.reshape(data, shapeout) # free vlen data internally allocated in netcdf C lib with nogil: ierr = nc_free_vlens(totelem, vldata) # free the pointer array free(vldata) free(startp) free(countp) free(stridep) if negstride: # reverse data along axes with negative strides. data = data[tuple(sl)].copy() # make a copy so data is contiguous. # netcdf-c always returns data in native byte order, # regardless of variable endian-ness. Here we swap the # bytes if the variable dtype is not native endian, so the # dtype of the returned numpy array matches the variable dtype. # (pull request #555, issue #554). if not data.dtype.isnative: data.byteswap(True) # in-place byteswap if not self.dimensions: return data[0] # a scalar elif squeeze_out: return numpy.squeeze(data) else: return data def set_collective(self, value): """**`set_collective(self,True_or_False)`** turn on or off collective parallel IO access. Ignored if file is not open for parallel access. """ if not __has_parallel_support__: return mode = NC_COLLECTIVE if value else NC_INDEPENDENT with nogil: ierr = nc_var_par_access(self._grpid, self._varid, mode) _ensure_nc_success(ierr) def get_dims(self): """ **`get_dims(self)`** return a tuple of `Dimension` instances associated with this `Variable`. """ return tuple(_find_dim(self._grp, dim) for dim in self.dimensions) def __reduce__(self): # raise error is user tries to pickle a Variable object. raise NotImplementedError('Variable is not picklable') # Compound datatype support. cdef class CompoundType: """ A `CompoundType` instance is used to describe a compound data type, and can be passed to the the `Dataset.createVariable` method of a `Dataset` or `Group` instance. Compound data types map to numpy structured arrays. See `CompoundType.__init__` for more details. The instance variables `dtype` and `name` should not be modified by the user. """ cdef public nc_type _nc_type cdef public dtype, dtype_view, name def __init__(self, grp, object dt, object dtype_name, **kwargs): """ ***`__init__(group, datatype, datatype_name)`*** CompoundType constructor. **`grp`**: `Group` instance to associate with the compound datatype. **`dt`**: A numpy dtype object describing a structured (a.k.a record) array. Can be composed of homogeneous numeric or character data types, or other structured array data types. **`dtype_name`**: a Python string containing a description of the compound data type. ***Note 1***: When creating nested compound data types, the inner compound data types must already be associated with CompoundType instances (so create CompoundType instances for the innermost structures first). ***Note 2***: `CompoundType` instances should be created using the `Dataset.createCompoundType` method of a `Dataset` or `Group` instance, not using this class directly. """ cdef nc_type xtype # convert dt to a numpy datatype object # and make sure the isalignedstruct flag is set to True # (so padding is added to the fields to match what a # C compiler would output for a similar C-struct). # This is needed because nc_get_vara is # apparently expecting the data buffer to include # padding to match what a C struct would have. # (this may or may not be still true, but empirical # evidence suggests that segfaults occur if this # alignment step is skipped - see issue #705). # numpy string subdtypes (i.e. 'S80') are # automatically converted to character array # subtypes (i.e. ('S1',80)). If '_Encoding' # variable attribute is set, data will be converted # to and from the string array representation with views. dt = _set_alignment(numpy.dtype(dt)) # create a view datatype for converting char arrays to/from strings dtview = _set_viewdtype(numpy.dtype(dt)) if 'typeid' in kwargs: xtype = kwargs['typeid'] else: xtype = _def_compound(grp, dt, dtype_name) self._nc_type = xtype self.dtype = dt self.dtype_view = dtview self.name = dtype_name def __repr__(self): return self.__str__() def __str__(self): typ = repr(type(self)).replace("._netCDF4", "") return "%s: name = '%s', numpy dtype = %s" %\ (typ, self.name, self.dtype) def __reduce__(self): # raise error is user tries to pickle a CompoundType object. raise NotImplementedError('CompoundType is not picklable') def _set_alignment(dt): # recursively set alignment flag in nested structured data type names = dt.names; formats = [] for name in names: fmt = dt.fields[name][0] if fmt.kind == 'V': if fmt.shape == (): dtx = _set_alignment(dt.fields[name][0]) else: if fmt.subdtype[0].kind == 'V': # structured dtype raise TypeError('nested structured dtype arrays not supported') else: dtx = dt.fields[name][0] else: # convert character string elements to char arrays if fmt.kind == 'S' and fmt.itemsize != 1: dtx = numpy.dtype('(%s,)S1' % fmt.itemsize) else: # primitive data type dtx = dt.fields[name][0] formats.append(dtx) # leave out offsets, they will be re-computed to preserve alignment. dtype_dict = {'names':names,'formats':formats} return numpy.dtype(dtype_dict, align=True) def _set_viewdtype(dt): # recursively change character array dtypes to string dtypes names = dt.names; formats = [] for name in names: fmt = dt.fields[name][0] if fmt.kind == 'V': if fmt.shape == (): dtx = _set_viewdtype(dt.fields[name][0]) else: if fmt.subdtype[0].kind == 'V': # structured dtype raise TypeError('nested structured dtype arrays not supported') elif fmt.subdtype[0].kind == 'S' and len(dt.fields[name][0].shape) == 1: lenchar = dt.fields[name][0].shape[0] dtx = numpy.dtype('S%s' % lenchar) else: dtx = dt.fields[name][0] else: # primitive data type dtx = dt.fields[name][0] formats.append(dtx) dtype_dict = {'names':names,'formats':formats} return numpy.dtype(dtype_dict, align=True) cdef _def_compound(grp, object dt, object dtype_name): # private function used to construct a netcdf compound data type # from a numpy dtype object by CompoundType.__init__. cdef nc_type xtype, xtype_tmp cdef int ierr, ndims, grpid cdef size_t offset, size cdef char *namstring cdef char *nested_namstring cdef int *dim_sizes bytestr = _strencode(dtype_name) namstring = bytestr size = dt.itemsize grpid = grp._grpid with nogil: ierr = nc_def_compound(grpid, size, namstring, &xtype) _ensure_nc_success(ierr) names = list(dt.fields.keys()) formats = [v[0] for v in dt.fields.values()] offsets = [v[1] for v in dt.fields.values()] # make sure entries in lists sorted by offset. # (don't know why this is necessary, but it is for version 4.0.1) names = _sortbylist(names, offsets) formats = _sortbylist(formats, offsets) offsets.sort() for name, format, offset in zip(names, formats, offsets): bytestr = _strencode(name) namstring = bytestr if format.kind != 'V': # scalar primitive type try: xtype_tmp = _nptonctype[format.str[1:]] except KeyError: raise ValueError('Unsupported compound type element') with nogil: ierr = nc_insert_compound(grpid, xtype, namstring, offset, xtype_tmp) _ensure_nc_success(ierr) else: if format.shape == (): # nested scalar compound type # find this compound type in this group or it's parents. xtype_tmp = _find_cmptype(grp, format) bytestr = _strencode(name) nested_namstring = bytestr with nogil: ierr = nc_insert_compound(grpid, xtype,\ nested_namstring,\ offset, xtype_tmp) _ensure_nc_success(ierr) else: # nested array compound element ndims = len(format.shape) dim_sizes = malloc(sizeof(int) * ndims) for n in range(ndims): dim_sizes[n] = format.shape[n] if format.subdtype[0].kind != 'V': # primitive type. try: xtype_tmp = _nptonctype[format.subdtype[0].str[1:]] except KeyError: raise ValueError('Unsupported compound type element') with nogil: ierr = nc_insert_array_compound(grpid,xtype,namstring, offset,xtype_tmp,ndims,dim_sizes) _ensure_nc_success(ierr) else: # nested array compound type. raise TypeError('nested structured dtype arrays not supported') # this code is untested and probably does not work, disable # for now... # # find this compound type in this group or it's parents. # xtype_tmp = _find_cmptype(grp, format.subdtype[0]) # bytestr = _strencode(name) # nested_namstring = bytestr # with nogil: # ierr = nc_insert_array_compound(grpid,xtype,\ # nested_namstring,\ # offset,xtype_tmp,\ # ndims,dim_sizes) # _ensure_nc_success(ierr) free(dim_sizes) return xtype cdef _find_cmptype(grp, dtype): # look for data type in this group and it's parents. # return datatype id when found, if not found, raise exception. cdef nc_type xtype match = False for cmpname, cmpdt in grp.cmptypes.items(): xtype = cmpdt._nc_type names1 = dtype.fields.keys() names2 = cmpdt.dtype.fields.keys() formats1 = [v[0] for v in dtype.fields.values()] formats2 = [v[0] for v in cmpdt.dtype.fields.values()] formats2v = [v[0] for v in cmpdt.dtype_view.fields.values()] # match names, formats, but not offsets (they may be changed # by netcdf lib). if names1==names2 and formats1==formats2 or (formats1 == formats2v): match = True break if not match: try: parent_grp = grp.parent except AttributeError: raise ValueError("cannot find compound type in this group or parent groups") if parent_grp is None: raise ValueError("cannot find compound type in this group or parent groups") else: xtype = _find_cmptype(parent_grp,dtype) return xtype cdef _read_compound(group, nc_type xtype, endian=None): # read a compound data type id from an existing file, # construct a corresponding numpy dtype instance, # then use that to create a CompoundType instance. # called by _get_vars, _get_types and _get_att. # Calls itself recursively for nested compound types. cdef int ierr, nf, numdims, ndim, classp, _grpid cdef size_t nfields, offset cdef nc_type field_typeid cdef int *dim_sizes cdef char field_namstring[NC_MAX_NAME+1] cdef char cmp_namstring[NC_MAX_NAME+1] # get name and number of fields. _grpid = group._grpid with nogil: ierr = nc_inq_compound(_grpid, xtype, cmp_namstring, NULL, &nfields) _ensure_nc_success(ierr) name = cmp_namstring.decode('utf-8') # loop over fields. names = [] formats = [] offsets = [] for nf in range(nfields): with nogil: ierr = nc_inq_compound_field(_grpid, xtype, nf, field_namstring, &offset, &field_typeid, &numdims, NULL) _ensure_nc_success(ierr) dim_sizes = malloc(sizeof(int) * numdims) with nogil: ierr = nc_inq_compound_field(_grpid, xtype, nf, field_namstring, &offset, &field_typeid, &numdims, dim_sizes) _ensure_nc_success(ierr) field_name = field_namstring.decode('utf-8') names.append(field_name) offsets.append(offset) # if numdims=0, not an array. field_shape = () if numdims != 0: for ndim in range(numdims): field_shape = field_shape + (dim_sizes[ndim],) free(dim_sizes) # check to see if this field is a nested compound type. try: field_type = _nctonptype[field_typeid] if endian is not None: format = endian + format except KeyError: with nogil: ierr = nc_inq_user_type(_grpid, field_typeid,NULL,NULL,NULL,NULL,&classp) if classp == NC_COMPOUND: # a compound type # recursively call this function? field_type = _read_compound(group, field_typeid, endian=endian) else: raise KeyError('compound field of an unsupported data type') if field_shape != (): formats.append((field_type,field_shape)) else: formats.append(field_type) # make sure entries in lists sorted by offset. names = _sortbylist(names, offsets) formats = _sortbylist(formats, offsets) offsets.sort() # create a dict that can be converted into a numpy dtype. dtype_dict = {'names':names,'formats':formats,'offsets':offsets} return CompoundType(group, dtype_dict, name, typeid=xtype) # VLEN datatype support. cdef class VLType: """ A `VLType` instance is used to describe a variable length (VLEN) data type, and can be passed to the the `Dataset.createVariable` method of a `Dataset` or `Group` instance. See `VLType.__init__` for more details. The instance variables `dtype` and `name` should not be modified by the user. """ cdef public nc_type _nc_type cdef public dtype, name def __init__(self, grp, object dt, object dtype_name, **kwargs): """ **`__init__(group, datatype, datatype_name)`** VLType constructor. **`group`**: `Group` instance to associate with the VLEN datatype. **`datatype`**: An numpy dtype object describing the component type for the variable length array. **`datatype_name`**: a Python string containing a description of the VLEN data type. ***`Note`***: `VLType` instances should be created using the `Dataset.createVLType` method of a `Dataset` or `Group` instance, not using this class directly. """ cdef nc_type xtype if 'typeid' in kwargs: xtype = kwargs['typeid'] else: xtype, dt = _def_vlen(grp, dt, dtype_name) self._nc_type = xtype self.dtype = dt if dt == str: self.name = None else: self.name = dtype_name def __repr__(self): return self.__str__() def __str__(self): typ = repr(type(self)).replace("._netCDF4", "") if self.dtype == str: return '%r: string type' % (typ,) else: return "%r: name = '%s', numpy dtype = %s" %\ (typ, self.name, self.dtype) def __reduce__(self): # raise error is user tries to pickle a VLType object. raise NotImplementedError('VLType is not picklable') cdef _def_vlen(grp, object dt, object dtype_name): # private function used to construct a netcdf VLEN data type # from a numpy dtype object or python str object by VLType.__init__. cdef nc_type xtype, xtype_tmp cdef int ierr, ndims, grpid cdef size_t offset, size cdef char *namstring cdef char *nested_namstring grpid = grp._grpid if dt == str: # python string, use NC_STRING xtype = NC_STRING # dtype_name ignored else: # numpy datatype bytestr = _strencode(dtype_name) namstring = bytestr dt = numpy.dtype(dt) # convert to numpy datatype. if dt.str[1:] in _supportedtypes: # find netCDF primitive data type corresponding to # specified numpy data type. xtype_tmp = _nptonctype[dt.str[1:]] with nogil: ierr = nc_def_vlen(grpid, namstring, xtype_tmp, &xtype); _ensure_nc_success(ierr) else: raise KeyError("unsupported datatype specified for VLEN") return xtype, dt cdef _read_vlen(group, nc_type xtype, endian=None): # read a VLEN data type id from an existing file, # construct a corresponding numpy dtype instance, # then use that to create a VLType instance. # called by _get_types, _get_vars. cdef int ierr, grpid cdef size_t vlsize cdef nc_type base_xtype cdef char vl_namstring[NC_MAX_NAME+1] grpid = group._grpid if xtype == NC_STRING: dt = str name = None else: with nogil: ierr = nc_inq_vlen(grpid, xtype, vl_namstring, &vlsize, &base_xtype) _ensure_nc_success(ierr) name = vl_namstring.decode('utf-8') try: datatype = _nctonptype[base_xtype] if endian is not None: datatype = endian + datatype dt = numpy.dtype(datatype) # see if it is a primitive type except KeyError: raise KeyError("unsupported component type for VLEN") return VLType(group, dt, name, typeid=xtype) # Enum datatype support. cdef class EnumType: """ A `EnumType` instance is used to describe an Enum data type, and can be passed to the the `Dataset.createVariable` method of a `Dataset` or `Group` instance. See `EnumType.__init__` for more details. The instance variables `dtype`, `name` and `enum_dict` should not be modified by the user. """ cdef public nc_type _nc_type cdef public dtype, name, enum_dict def __init__(self, grp, object dt, object dtype_name, object enum_dict, **kwargs): """ **`__init__(group, datatype, datatype_name, enum_dict)`** EnumType constructor. **`group`**: `Group` instance to associate with the VLEN datatype. **`datatype`**: An numpy integer dtype object describing the base type for the Enum. **`datatype_name`**: a Python string containing a description of the Enum data type. **`enum_dict`**: a Python dictionary containing the Enum field/value pairs. ***`Note`***: `EnumType` instances should be created using the `Dataset.createEnumType` method of a `Dataset` or `Group` instance, not using this class directly. """ cdef nc_type xtype if 'typeid' in kwargs: xtype = kwargs['typeid'] else: xtype, dt = _def_enum(grp, dt, dtype_name, enum_dict) self._nc_type = xtype self.dtype = dt self.name = dtype_name self.enum_dict = enum_dict def __repr__(self): return self.__str__() def __str__(self): typ = repr(type(self)).replace("._netCDF4", "") return "%r: name = '%s', numpy dtype = %s, fields/values =%s" %\ (typ, self.name, self.dtype, self.enum_dict) def __reduce__(self): # raise error is user tries to pickle a EnumType object. raise NotImplementedError('EnumType is not picklable') cdef _def_enum(grp, object dt, object dtype_name, object enum_dict): # private function used to construct a netCDF Enum data type # from a numpy dtype object or python str object by EnumType.__init__. cdef nc_type xtype, xtype_tmp cdef int ierr, grpid cdef char *namstring cdef ndarray value_arr bytestr = _strencode(dtype_name) namstring = bytestr grpid = grp._grpid dt = numpy.dtype(dt) # convert to numpy datatype. if dt.str[1:] in _intnptonctype.keys(): # find netCDF primitive data type corresponding to # specified numpy data type. xtype_tmp = _intnptonctype[dt.str[1:]] with nogil: ierr = nc_def_enum(grpid, xtype_tmp, namstring, &xtype) _ensure_nc_success(ierr) else: msg="unsupported datatype specified for ENUM (must be integer)" raise KeyError(msg) # insert named members into enum type. for field in enum_dict: value_arr = numpy.array(enum_dict[field],dt) bytestr = _strencode(field) namstring = bytestr with nogil: ierr = nc_insert_enum(grpid, xtype, namstring, PyArray_DATA(value_arr)) _ensure_nc_success(ierr) return xtype, dt cdef _read_enum(group, nc_type xtype, endian=None): # read a Enum data type id from an existing file, # construct a corresponding numpy dtype instance, # then use that to create a EnumType instance. # called by _get_types, _get_vars. cdef int ierr, grpid, nmem cdef ndarray enum_val cdef nc_type base_xtype cdef char enum_namstring[NC_MAX_NAME+1] cdef size_t nmembers grpid = group._grpid # get name, datatype, and number of members. with nogil: ierr = nc_inq_enum(grpid, xtype, enum_namstring, &base_xtype, NULL,\ &nmembers) _ensure_nc_success(ierr) enum_name = enum_namstring.decode('utf-8') try: datatype = _nctonptype[base_xtype] if endian is not None: datatype = endian + datatype dt = numpy.dtype(datatype) # see if it is a primitive type except KeyError: raise KeyError("unsupported component type for ENUM") # loop over members, build dict. enum_dict = {} enum_val = numpy.empty(1,dt) for nmem in range(nmembers): with nogil: ierr = nc_inq_enum_member(grpid, xtype, nmem, \ enum_namstring,PyArray_DATA(enum_val)) _ensure_nc_success(ierr) name = enum_namstring.decode('utf-8') enum_dict[name] = enum_val.item() return EnumType(group, dt, enum_name, enum_dict, typeid=xtype) cdef _strencode(pystr,encoding=None): # encode a string into bytes. If already bytes, do nothing. # uses 'utf-8' for default encoding. if encoding is None: encoding = 'utf-8' try: return pystr.encode(encoding) except (AttributeError, UnicodeDecodeError): return pystr # already bytes or unicode? def _to_ascii(bytestr): # encode a byte string to an ascii encoded string. return str(bytestr,encoding='ascii') def stringtoarr(string,NUMCHARS,dtype='S'): """ **`stringtoarr(a, NUMCHARS,dtype='S')`** convert a string to a character array of length `NUMCHARS` **`a`**: Input python string. **`NUMCHARS`**: number of characters used to represent string (if len(a) < `NUMCHARS`, it will be padded on the right with blanks). **`dtype`**: type of numpy array to return. Default is `'S'`, which means an array of dtype `'S1'` will be returned. If dtype=`'U'`, a unicode array (dtype = `'U1'`) will be returned. returns a rank 1 numpy character array of length NUMCHARS with datatype `'S1'` (default) or `'U1'` (if dtype=`'U'`)""" if dtype not in ["S","U"]: raise ValueError("dtype must string or unicode ('S' or 'U')") arr = numpy.zeros(NUMCHARS,dtype+'1') arr[0:len(string)] = tuple(string) return arr def stringtochar(a,encoding='utf-8',n_strlen=None): """ **`stringtochar(a,encoding='utf-8',n_strlen=None)`** convert a string array to a character array with one extra dimension **`a`**: Input numpy string array with numpy datatype `'SN'` or `'UN'`, where N is the number of characters in each string. Will be converted to an array of characters (datatype `'S1'` or `'U1'`) of shape `a.shape + (N,)`. optional kwarg `encoding` can be used to specify character encoding (default `utf-8`). If `encoding` is 'none' or 'bytes', a `numpy.string_` the input array is treated a raw byte strings (`numpy.string_`). optional kwarg `n_strlen` is the number of characters in each string. Default is None, which means `n_strlen` will be set to a.itemsize (the number of bytes used to represent each string in the input array). returns a numpy character array with datatype `'S1'` or `'U1'` and shape `a.shape + (N,)`, where N is the length of each string in a.""" dtype = a.dtype.kind if n_strlen is None: n_strlen = a.dtype.itemsize if dtype not in ["S","U"]: raise ValueError("type must string or unicode ('S' or 'U')") if encoding in ['none','None','bytes']: b = numpy.array(tuple(a.tobytes()),'S1') elif encoding == 'ascii': b = numpy.array(tuple(a.tobytes().decode(encoding)),dtype+'1') b.shape = a.shape + (n_strlen,) else: if not a.ndim: a = numpy.array([a]) bbytes = [text.encode(encoding) for text in a] pad = b'\0' * n_strlen bbytes = [(x + pad)[:n_strlen] for x in bbytes] b = numpy.array([[bb[i:i+1] for i in range(n_strlen)] for bb in bbytes]) return b def chartostring(b,encoding='utf-8'): """ **`chartostring(b,encoding='utf-8')`** convert a character array to a string array with one less dimension. **`b`**: Input character array (numpy datatype `'S1'` or `'U1'`). Will be converted to a array of strings, where each string has a fixed length of `b.shape[-1]` characters. optional kwarg `encoding` can be used to specify character encoding (default `utf-8`). If `encoding` is 'none' or 'bytes', a `numpy.string_` byte array is returned. returns a numpy string array with datatype `'UN'` (or `'SN'`) and shape `b.shape[:-1]` where where `N=b.shape[-1]`.""" dtype = b.dtype.kind if dtype not in ["S","U"]: raise ValueError("type must be string or unicode ('S' or 'U')") bs = b.tobytes() slen = int(b.shape[-1]) if encoding in ['none','None','bytes']: a = numpy.array([bs[n1:n1+slen] for n1 in range(0,len(bs),slen)],'S'+repr(slen)) else: a = numpy.array([bs[n1:n1+slen].decode(encoding) for n1 in range(0,len(bs),slen)],'U'+repr(slen)) a.shape = b.shape[:-1] return a class MFDataset(Dataset): """ Class for reading multi-file netCDF Datasets, making variables spanning multiple files appear as if they were in one file. Datasets must be in `NETCDF4_CLASSIC`, `NETCDF3_CLASSIC`, `NETCDF3_64BIT_OFFSET` or `NETCDF3_64BIT_DATA` format (`NETCDF4` Datasets won't work). Adapted from [pycdf](http://pysclint.sourceforge.net/pycdf) by Andre Gosselin. Example usage (See `MFDataset.__init__` for more details): ```python >>> import numpy as np >>> # create a series of netCDF files with a variable sharing >>> # the same unlimited dimension. >>> for nf in range(10): ... with Dataset("mftest%s.nc" % nf, "w", format='NETCDF4_CLASSIC') as f: ... f.createDimension("x",None) ... x = f.createVariable("x","i",("x",)) ... x[0:10] = np.arange(nf*10,10*(nf+1)) >>> # now read all those files in at once, in one Dataset. >>> f = MFDataset("mftest*nc") >>> print(f.variables["x"][:]) [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99] ``` """ def __init__(self, files, check=False, aggdim=None, exclude=[], master_file=None): """ **`__init__(self, files, check=False, aggdim=None, exclude=[], master_file=None)`** Open a Dataset spanning multiple files, making it look as if it was a single file. Variables in the list of files that share the same dimension (specified with the keyword `aggdim`) are aggregated. If `aggdim` is not specified, the unlimited is aggregated. Currently, `aggdim` must be the leftmost (slowest varying) dimension of each of the variables to be aggregated. **`files`**: either a sequence of netCDF files or a string with a wildcard (converted to a sorted list of files using glob) If the `master_file` kwarg is not specified, the first file in the list will become the "master" file, defining all the variables with an aggregation dimension which may span subsequent files. Attribute access returns attributes only from "master" file. The files are always opened in read-only mode. **`check`**: True if you want to do consistency checking to ensure the correct variables structure for all of the netcdf files. Checking makes the initialization of the MFDataset instance much slower. Default is False. **`aggdim`**: The name of the dimension to aggregate over (must be the leftmost dimension of each of the variables to be aggregated). If None (default), aggregate over the unlimited dimension. **`exclude`**: A list of variable names to exclude from aggregation. Default is an empty list. **`master_file`**: file to use as "master file", defining all the variables with an aggregation dimension and all global attributes. """ # Open the master file in the base class, so that the CDFMF instance # can be used like a CDF instance. if isinstance(files, str): if files.startswith('http'): msg='cannot using file globbing for remote (OPeNDAP) datasets' raise ValueError(msg) else: files = sorted(glob(files)) if not files: msg='no files specified (file list is empty)' raise OSError(msg) if master_file is not None: if master_file not in files: raise ValueError('master_file not in files list') else: master = master_file else: master = files[0] # Open the master again, this time as a classic CDF instance. This will avoid # calling methods of the CDFMF subclass when querying the master file. cdfm = Dataset(master) # copy attributes from master. for name, value in cdfm.__dict__.items(): self.__dict__[name] = value # Make sure the master defines a dim with name aggdim, # or an unlimited dimension. aggDimId = None for dimname,dim in cdfm.dimensions.items(): if aggdim is None: if dim.isunlimited(): aggDimId = dim aggDimName = dimname else: if dimname == aggdim: aggDimId = dim aggDimName = dimname if aggDimId is None: raise OSError("master dataset %s does not have a aggregation dimension" % master) # Get info on all aggregation variables defined in the master. # Make sure the master defines at least one aggregation variable. masterRecVar = {} for vName,v in cdfm.variables.items(): # skip variables specified in exclude list. if vName in exclude: continue dims = v.dimensions shape = v.shape dtype = v.dtype # Be careful: we may deal with a scalar (dimensionless) variable. # Unlimited dimension always occupies index 0. if (len(dims) > 0 and aggDimName == dims[0]): masterRecVar[vName] = (dims, shape, dtype) if len(masterRecVar) == 0: raise OSError("master dataset %s does not have any variables to aggregate" % master) # Create the following: # cdf list of Dataset instances # cdfVLen list unlimited dimension lengths in each CDF instance # cdfRecVar dictionary indexed by the aggregation var names; each key holds # a list of the corresponding Variable instance, one for each # cdf file of the file set cdf = [] self._cdf = cdf # Store this now, because dim() method needs it cdfVLen = [] cdfRecVar = {} # Open each remaining file in read-only mode. # Make sure each file defines the same aggregation variables as the master # and that the variables are defined in the same way (name, shape and type) for f in files: if f == master: part = cdfm else: part = Dataset(f) if cdfRecVar == {}: empty_cdfRecVar = True else: empty_cdfRecVar = False varInfo = part.variables for v in masterRecVar.keys(): if check: # Make sure master rec var is also defined here. if v not in varInfo.keys(): raise OSError("aggregation variable %s not defined in %s" % (v, f)) #if not vInst.dimensions[0] != aggDimName: masterDims, masterShape, masterType = masterRecVar[v][:3] extDims = varInfo[v].dimensions extShape = varInfo[v].shape extType = varInfo[v].dtype # Check that dimension names are identical. if masterDims != extDims: raise OSError("variable %s : dimensions mismatch between " "master %s (%s) and extension %s (%s)" % (v, master, masterDims, f, extDims)) # Check that the ranks are identical, and the dimension lengths are # identical (except for that of the unlimited dimension, which of # course may vary. if len(masterShape) != len(extShape): raise OSError("variable %s : rank mismatch between " "master %s (%s) and extension %s (%s)" % (v, master, len(masterShape), f, len(extShape))) if masterShape[1:] != extShape[1:]: raise OSError("variable %s : shape mismatch between " "master %s (%s) and extension %s (%s)" % (v, master, masterShape, f, extShape)) # Check that the data types are identical. if masterType != extType: raise OSError("variable %s : data type mismatch between " "master %s (%s) and extension %s (%s)" % (v, master, masterType, f, extType)) # Everything ok. if empty_cdfRecVar: cdfRecVar[v] = [part.variables[v]] else: cdfRecVar[v].append(part.variables[v]) else: # No making sure of anything -- assume this is ok.. if empty_cdfRecVar: cdfRecVar[v] = [part.variables[v]] else: cdfRecVar[v].append(part.variables[v]) cdf.append(part) cdfVLen.append(len(part.dimensions[aggDimName])) # Attach attributes to the MFDataset instance. # A local __setattr__() method is required for them. self._files = files # list of cdf file names in the set self._cdfVLen = cdfVLen # list of unlimited lengths self._cdfTLen = sum(cdfVLen) # total length self._cdfRecVar = cdfRecVar # dictionary of Variable instances for all # the aggregation variables self._dims = cdfm.dimensions self._grps = cdfm.groups for dimname, dim in self._dims.items(): if dimname == aggDimName: self._dims[dimname] = _Dimension(dimname, dim, self._cdfVLen, self._cdfTLen) self._vars = cdfm.variables for varname,var in self._vars.items(): if varname in self._cdfRecVar.keys(): self._vars[varname] = _Variable(self, varname, var, aggDimName) self._file_format = [] self._data_model = [] self._disk_format = [] for dset in self._cdf: if dset.file_format == 'NETCDF4' or dset.data_model == 'NETCDF4': raise ValueError('MFNetCDF4 only works with NETCDF3_* and NETCDF4_CLASSIC formatted files, not NETCDF4') self._file_format.append(dset.file_format) self._data_model.append(dset.data_model) self._disk_format.append(dset.disk_format) self._path = '/' def __setattr__(self, name, value): """override base class attribute creation""" self.__dict__[name] = value def __getattribute__(self, name): if name in ['variables','dimensions','file_format','groups',\ 'data_model','disk_format','path']: if name == 'dimensions': return self._dims if name == 'variables': return self._vars if name == 'file_format': return self._file_format if name == 'data_model': return self._data_model if name == 'disk_format': return self._disk_format if name == 'path': return self._path if name == 'groups': return self._grps else: return Dataset.__getattribute__(self, name) def ncattrs(self): """ **`ncattrs(self)`** return the netcdf attribute names from the master file. """ return list(self._cdf[0].__dict__) def close(self): """ **`close(self)`** close all the open files. """ for dset in self._cdf: dset.close() def isopen(self): """ **`isopen(self)`** True if all files are open, False otherwise. """ return all(map(lambda dset: dset.isopen(), self._cdf)) def __repr__(self): ncdump = [repr(type(self)).replace("._netCDF4", "")] dimnames = tuple(str(dimname) for dimname in self.dimensions.keys()) varnames = tuple(str(varname) for varname in self.variables.keys()) grpnames = () if self.path == '/': ncdump.append('root group (%s data model, file format %s):' % (self.data_model[0], self.disk_format[0])) else: ncdump.append('group %s:' % self.path) for name in self.ncattrs(): ncdump.append(' %s: %s' % (name, self.__dict__[name])) ncdump.append(' dimensions = %s' % str(dimnames)) ncdump.append(' variables = %s' % str(varnames)) ncdump.append(' groups = %s' % str(grpnames)) return '\n'.join(ncdump) def __reduce__(self): # raise error is user tries to pickle a MFDataset object. raise NotImplementedError('MFDataset is not picklable') class _Dimension: def __init__(self, dimname, dim, dimlens, dimtotlen): self.dimlens = dimlens self.dimtotlen = dimtotlen self._name = dimname def __len__(self): return self.dimtotlen def isunlimited(self): return True def __repr__(self): typ = repr(type(self)).replace("._netCDF4", "") if self.isunlimited(): return "%r (unlimited): name = '%s', size = %s" %\ (typ, self._name, len(self)) else: return "%r: name = '%s', size = %s" %\ (typ, self._name, len(self)) class _Variable: def __init__(self, dset, varname, var, recdimname): self.dimensions = var.dimensions self._dset = dset self._grp = dset self._mastervar = var self._recVar = dset._cdfRecVar[varname] self._recdimname = recdimname self._recLen = dset._cdfVLen self.dtype = var.dtype self._name = var._name # copy attributes from master. for name, value in var.__dict__.items(): self.__dict__[name] = value def typecode(self): return self.dtype def ncattrs(self): return list(self._mastervar.__dict__.keys()) def __getattr__(self,name): if name == 'shape': return self._shape() if name == 'ndim': return len(self._shape()) if name == 'name': return self._name try: return self.__dict__[name] except: raise AttributeError(name) def __repr__(self): ncdump = [repr(type(self)).replace("._netCDF4", "")] dimnames = tuple(str(dimname) for dimname in self.dimensions) ncdump.append('%s %s%s' % (self.dtype, self._name, dimnames)) for name in self.ncattrs(): ncdump.append(' %s: %s' % (name, self.__dict__[name])) unlimdims = [] for dimname in self.dimensions: dim = _find_dim(self._grp, dimname) if dim.isunlimited(): unlimdims.append(str(dimname)) ncdump.append('unlimited dimensions = %r' % (tuple(unlimdims),)) ncdump.append('current size = %r' % (self.shape,)) return '\n'.join(ncdump) def __len__(self): if not self._shape: raise TypeError('len() of unsized object') else: return self._shape()[0] def _shape(self): recdimlen = len(self._dset.dimensions[self._recdimname]) return (recdimlen,) + self._mastervar.shape[1:] def set_auto_chartostring(self,val): for v in self._recVar: v.set_auto_chartostring(val) def set_auto_maskandscale(self,val): for v in self._recVar: v.set_auto_maskandscale(val) def set_auto_mask(self,val): for v in self._recVar: v.set_auto_mask(val) def set_auto_scale(self,val): for v in self._recVar: v.set_auto_scale(val) def set_always_mask(self,val): for v in self._recVar: v.set_always_mask(val) def __getitem__(self, elem): """Get records from a concatenated set of variables.""" # This special method is used to index the netCDF variable # using the "extended slice syntax". The extended slice syntax # is a perfect match for the "start", "count" and "stride" # arguments to the nc_get_var() function, and is much more easy # to use. start, count, stride, put_ind =\ _StartCountStride(elem, self.shape) datashape = _out_array_shape(count) data = ma.empty(datashape, dtype=self.dtype) # Determine which dimensions need to be squeezed # (those for which elem is an integer scalar). # The convention used is that for those cases, # put_ind for this dimension is set to -1 by _StartCountStride. squeeze = data.ndim * [slice(None),] for i,n in enumerate(put_ind.shape[:-1]): if n == 1 and put_ind[...,i].ravel()[0] == -1: squeeze[i] = 0 # Reshape the arrays so we can iterate over them. strt = start.reshape((-1, self.ndim or 1)) cnt = count.reshape((-1, self.ndim or 1)) strd = stride.reshape((-1, self.ndim or 1)) put_ind = put_ind.reshape((-1, self.ndim or 1)) # Fill output array with data chunks. # Number of variables making up the MFVariable.Variable. nv = len(self._recLen) for (start,count,stride,ind) in zip(strt, cnt, strd, put_ind): # make sure count=-1 becomes count=1 count = [abs(cnt) for cnt in count] if (numpy.array(stride) < 0).any(): raise IndexError('negative strides not allowed when slicing MFVariable Variable instance') # Start, stop and step along 1st dimension, eg the unlimited # dimension. sta = start[0] step = stride[0] stop = sta + count[0] * step # Build a list representing the concatenated list of all records in # the MFVariable variable set. The list is composed of 2-elem lists # each holding: # the record index inside the variables, from 0 to n # the index of the Variable instance to which each record belongs idx = [] # list of record indices vid = [] # list of Variable indices for n in range(nv): k = self._recLen[n] # number of records in this variable idx.extend(range(k)) vid.extend([n] * k) # Merge the two lists to get a list of 2-elem lists. # Slice this list along the first dimension. lst = list(zip(idx, vid)).__getitem__(slice(sta, stop, step)) # Rebuild the slicing expression for dimensions 1 and ssq. newSlice = [slice(None, None, None)] for n in range(1, len(start)): # skip dimension 0 s = slice(start[n],start[n] + count[n] * stride[n], stride[n]) newSlice.append(s) # Apply the slicing expression to each var in turn, extracting records # in a list of arrays. lstArr = [] ismasked = False for n in range(nv): # Get the list of indices for variable 'n'. idx = [i for i,numv in lst if numv == n] if idx: # Rebuild slicing expression for dimension 0. newSlice[0] = slice(idx[0], idx[-1] + 1, step) # Extract records from the var, and append them to a list # of arrays. dat = Variable.__getitem__(self._recVar[n],tuple(newSlice)) if ma.isMA(dat) and not ismasked: ismasked=True fill_value = dat.fill_value lstArr.append(dat) if ismasked: lstArr = ma.concatenate(lstArr) else: lstArr = numpy.concatenate(lstArr) if lstArr.dtype != data.dtype: data = data.astype(lstArr.dtype) # sometimes there are legitimate singleton dimensions, in which # case the array shapes won't conform. If so, a ValueError will # result, and no squeeze will be done. try: data[tuple(ind)] = lstArr.squeeze() except ValueError: data[tuple(ind)] = lstArr # Remove extra singleton dimensions. data = data[tuple(squeeze)] # if no masked elements, return numpy array. if ma.isMA(data) and not data.mask.any(): data = data.filled() return data class MFTime(_Variable): """ Class providing an interface to a MFDataset time Variable by imposing a unique common time unit and/or calendar to all files. Example usage (See `MFTime.__init__` for more details): ```python >>> import numpy as np >>> f1 = Dataset("mftest_1.nc","w", format="NETCDF4_CLASSIC") >>> f2 = Dataset("mftest_2.nc","w", format="NETCDF4_CLASSIC") >>> f1.createDimension("time",None) >>> f2.createDimension("time",None) >>> t1 = f1.createVariable("time","i",("time",)) >>> t2 = f2.createVariable("time","i",("time",)) >>> t1.units = "days since 2000-01-01" >>> t2.units = "days since 2000-02-01" >>> t1.calendar = "standard" >>> t2.calendar = "standard" >>> t1[:] = np.arange(31) >>> t2[:] = np.arange(30) >>> f1.close() >>> f2.close() >>> # Read the two files in at once, in one Dataset. >>> f = MFDataset("mftest_*nc") >>> t = f.variables["time"] >>> print(t.units) days since 2000-01-01 >>> print(t[32]) # The value written in the file, inconsistent with the MF time units. 1 >>> T = MFTime(t) >>> print(T[32]) 32 ``` """ def __init__(self, time, units=None, calendar=None): """ **`__init__(self, time, units=None, calendar=None)`** Create a time Variable with units consistent across a multifile dataset. **`time`**: Time variable from a `MFDataset`. **`units`**: Time units, for example, `'days since 1979-01-01'`. If `None`, use the units from the master variable. **`calendar`**: Calendar overload to use across all files, for example, `'standard'` or `'gregorian'`. If `None`, check that the calendar attribute is present on each variable and values are unique across files raising a `ValueError` otherwise. """ import datetime self.__time = time # copy attributes from master time variable. for name, value in time.__dict__.items(): self.__dict__[name] = value # Make sure calendar attribute present in all files if no default calendar # is provided. Also assert this value is the same across files. if calendar is None: calendars = [None] * len(self._recVar) for idx, t in enumerate(self._recVar): if not hasattr(t, 'calendar'): msg = 'MFTime requires that the time variable in all files ' \ 'have a calendar attribute if no default calendar is provided.' raise ValueError(msg) else: calendars[idx] = t.calendar calendars = set(calendars) if len(calendars) > 1: msg = 'MFTime requires that the same time calendar is ' \ 'used by all files if no default calendar is provided.' raise ValueError(msg) else: calendar = list(calendars)[0] # Set calendar using the default or the unique calendar value across all files. self.calendar = calendar # Override units if units is specified. self.units = units or time.units # Reference date to compute the difference between different time units. ref_date = datetime.datetime(1900,1,1) ref_num = date2num(ref_date, self.units, self.calendar) # Create delta vector: delta = ref_num(ref_date) - num(ref_date) # So that ref_num(date) = num(date) + delta self.__delta = numpy.empty(len(self), time.dtype) i0 = 0; i1 = 0 for i,v in enumerate(self._recVar): n = self._recLen[i] # Length of time vector. num = date2num(ref_date, v.units, self.calendar) i1 += n self.__delta[i0:i1] = ref_num - num i0 += n def __getitem__(self, elem): return self.__time[elem] + self.__delta[elem] netcdf4-python-1.7.4rel/src/netCDF4/plugins/000077500000000000000000000000001512661643000205375ustar00rootroot00000000000000netcdf4-python-1.7.4rel/src/netCDF4/plugins/empty.txt000066400000000000000000000000001512661643000224240ustar00rootroot00000000000000netcdf4-python-1.7.4rel/src/netCDF4/py.typed000066400000000000000000000000001512661643000205430ustar00rootroot00000000000000netcdf4-python-1.7.4rel/src/netCDF4/utils.py000066400000000000000000001110671512661643000205760ustar00rootroot00000000000000import sys import numpy as np from numpy import ma from numpy.lib.stride_tricks import as_strided import warnings import getopt import os try: bytes except NameError: # no bytes type in python < 2.6 bytes = str def _safecast(a,b): # check to see if array a can be safely cast # to array b. A little less picky than numpy.can_cast. try: is_safe = ((a == b) | (np.isnan(a) & np.isnan(b))).all() #is_safe = np.allclose(a, b, equal_nan=True) # numpy 1.10.0 except: try: is_safe = (a == b).all() # string arrays. except: is_safe = False return is_safe def _sortbylist(A,B): # sort one list (A) using the values from another list (B) return [A[i] for i in sorted(range(len(A)), key=B.__getitem__)] def _find_dim(grp, dimname): # find Dimension instance given group and name. # look in current group, and parents. group = grp dim = None while 1: try: dim = group.dimensions[dimname] break except: try: group = group.parent except: raise ValueError("cannot find dimension %s in this group or parent groups" % dimname) if dim is None: raise KeyError("dimension %s not defined in group %s or any group in its family tree" % (dimname, grp.path)) else: return dim def _walk_grps(topgrp): """Iterate through all (sub-) groups of topgrp, similar to os.walktree. """ yield topgrp.groups.values() for grp in topgrp.groups.values(): yield from _walk_grps(grp) def _quantize(data,least_significant_digit): """ quantize data to improve compression. data is quantized using around(scale*data)/scale, where scale is 2**bits, and bits is determined from the least_significant_digit. For example, if least_significant_digit=1, bits will be 4. """ precision = pow(10.,-least_significant_digit) exp = np.log10(precision) if exp < 0: exp = int(np.floor(exp)) else: exp = int(np.ceil(exp)) bits = np.ceil(np.log2(pow(10.,-exp))) scale = pow(2.,bits) datout = np.around(scale*data)/scale if ma.isMA(datout): datout.set_fill_value(data.fill_value) return datout else: return datout def _StartCountStride(elem, shape, dimensions=None, grp=None, datashape=None,\ put=False, use_get_vars = True): """Return start, count, stride and indices needed to store/extract data into/from a netCDF variable. This function is used to convert a slicing expression into a form that is compatible with the nc_get_vars function. Specifically, it needs to interpret integers, slices, Ellipses, and 1-d sequences of integers and booleans. Numpy uses "broadcasting indexing" to handle array-valued indices. "Broadcasting indexing" (a.k.a "fancy indexing") treats all multi-valued indices together to allow arbitrary points to be extracted. The index arrays can be multidimensional, and more than one can be specified in a slice, as long as they can be "broadcast" against each other. This style of indexing can be very powerful, but it is very hard to understand, explain, and implement (and can lead to hard to find bugs). Most other python packages and array processing languages (such as netcdf4-python, xray, biggus, matlab and fortran) use "orthogonal indexing" which only allows for 1-d index arrays and treats these arrays of indices independently along each dimension. The implementation of "orthogonal indexing" used here requires that index arrays be 1-d boolean or integer. If integer arrays are used, the index values must be sorted and contain no duplicates. In summary, slicing netcdf4-python variable objects with 1-d integer or boolean arrays is allowed, but may give a different result than slicing a numpy array. Numpy also supports slicing an array with a boolean array of the same shape. For example x[x>0] returns a 1-d array with all the positive values of x. This is also not supported in netcdf4-python, if x.ndim > 1. Orthogonal indexing can be used in to select netcdf variable slices using the dimension variables. For example, you can use v[lat>60,lon<180] to fetch the elements of v obeying conditions on latitude and longitude. Allow for this sort of simple variable subsetting is the reason we decided to deviate from numpy's slicing rules. This function is used both by the __setitem__ and __getitem__ method of the Variable class. Parameters ---------- elem : tuple of integer, slice, ellipsis or 1-d boolean or integer sequences used to slice the netCDF Variable (Variable[elem]). shape : tuple containing the current shape of the netCDF variable. dimensions : sequence The name of the dimensions. __setitem__. grp : netCDF Group The netCDF group to which the variable being set belongs to. datashape : sequence The shape of the data that is being stored. Only needed by __setitem__ put : True|False (default False). If called from __setitem__, put is True. Returns ------- start : ndarray (..., n) A starting indices array of dimension n+1. The first n dimensions identify different independent data chunks. The last dimension can be read as the starting indices. count : ndarray (..., n) An array of dimension (n+1) storing the number of elements to get. stride : ndarray (..., n) An array of dimension (n+1) storing the steps between each datum. indices : ndarray (..., n) An array storing the indices describing the location of the data chunk in the target/source array (__getitem__/__setitem__). Notes: netCDF data is accessed via the function: nc_get_vars(grpid, varid, start, count, stride, data) Assume that the variable has dimension n, then start is a n-tuple that contains the indices at the beginning of data chunk. count is a n-tuple that contains the number of elements to be accessed. stride is a n-tuple that contains the step length between each element. """ # Adapted from pycdf (http://pysclint.sourceforge.net/pycdf) # by Andre Gosselin.. # Modified by David Huard to handle efficiently fancy indexing with # sequences of integers or booleans. nDims = len(shape) if nDims == 0: nDims = 1 shape = (1,) # is there an unlimited dimension? (only defined for __setitem__) if put: hasunlim = False unlimd={} if dimensions: for i in range(nDims): dimname = dimensions[i] # is this dimension unlimited? # look in current group, and parents for dim. dim = _find_dim(grp, dimname) unlimd[dimname]=dim.isunlimited() if unlimd[dimname]: hasunlim = True else: hasunlim = False # When a single array or (non-tuple) sequence of integers is given # as a slice, assume it applies to the first dimension, # and use ellipsis for remaining dimensions. if np.iterable(elem): if type(elem) == np.ndarray or (type(elem) != tuple and \ np.array([_is_int(e) for e in elem]).all()): elem = [elem] for n in range(len(elem)+1,nDims+1): elem.append(slice(None,None,None)) else: # Convert single index to sequence elem = [elem] # ensure there is at most 1 ellipse # we cannot use elem.count(Ellipsis), as with fancy indexing would occur # np.array() == Ellipsis which gives ValueError: The truth value of an # array with more than one element is ambiguous. Use a.any() or a.all() if sum(1 for e in elem if e is Ellipsis) > 1: raise IndexError("At most one ellipsis allowed in a slicing expression") # replace boolean arrays with sequences of integers. newElem = [] IndexErrorMsg=\ "only integers, slices (`:`), ellipsis (`...`), and 1-d integer or boolean arrays are valid indices" i=0 for e in elem: # string-like object try to cast to int # needs to be done first, since strings are iterable and # hard to distinguish from something castable to an iterable numpy array. if type(e) in [str, bytes]: try: e = int(e) except: raise IndexError(IndexErrorMsg) ea = np.asarray(e) # Raise error if multidimensional indexing is used. if ea.ndim > 1: raise IndexError("Index cannot be multidimensional") # set unlim to True if dimension is unlimited and put==True # (called from __setitem__) if hasunlim and put and dimensions: try: dimname = dimensions[i] unlim = unlimd[dimname] except IndexError: # more slices than dimensions (issue 371) unlim = False else: unlim = False # convert boolean index to integer array. if np.iterable(ea) and ea.dtype.kind =='b': # check that boolean array not too long if not unlim and shape[i] != len(ea): msg=""" Boolean array must have the same shape as the data along this dimension.""" raise IndexError(msg) ea = np.flatnonzero(ea) # an iterable (non-scalar) integer array. if np.iterable(ea) and ea.dtype.kind == 'i': # convert negative indices in 1d array to positive ones. ea = np.where(ea < 0, ea + shape[i], ea) if np.any(ea < 0): raise IndexError("integer index out of range") # if unlim, let integer index be longer than current dimension # length. if ea.shape != (0,): elen = shape[i] if unlim: elen = max(ea.max()+1,elen) if ea.max()+1 > elen: msg="integer index exceeds dimension size" raise IndexError(msg) newElem.append(ea) # integer scalar elif ea.dtype.kind == 'i': newElem.append(e) # slice or ellipsis object elif type(e) == slice or type(e) == type(Ellipsis): if not use_get_vars and type(e) == slice and e.step not in [None,-1,1] and\ dimensions is not None and grp is not None: # convert strided slice to integer sequence if possible # (this will avoid nc_get_vars, which is slow - issue #680). start = e.start if e.start is not None else 0 step = e.step if e.stop is None and dimensions is not None and grp is not None: stop = len(_find_dim(grp, dimensions[i])) else: stop = e.stop if stop < 0: stop = len(_find_dim(grp, dimensions[i])) + stop try: ee = np.arange(start,stop,e.step) if len(ee) > 0: e = ee except: pass newElem.append(e) else: # castable to a scalar int, otherwise invalid try: e = int(e) newElem.append(e) except: raise IndexError(IndexErrorMsg) if type(e)==type(Ellipsis): i+=1+nDims-len(elem) else: i+=1 elem = newElem # replace Ellipsis and integer arrays with slice objects, if possible. newElem = [] for e in elem: ea = np.asarray(e) # Replace ellipsis with slices. if type(e) == type(Ellipsis): # The ellipsis stands for the missing dimensions. newElem.extend((slice(None, None, None),) * (nDims - len(elem) + 1)) # Replace sequence of indices with slice object if possible. elif np.iterable(e) and len(e) > 1: start = e[0] stop = e[-1]+1 step = e[1]-e[0] try: ee = range(start,stop,step) except ValueError: # start, stop or step is not valid for a range ee = False if ee and len(e) == len(ee) and (e == np.arange(start,stop,step)).all(): # don't convert to slice unless abs(stride) == 1 # (nc_get_vars is very slow, issue #680) if not use_get_vars and step not in [1,-1]: newElem.append(e) else: newElem.append(slice(start,stop,step)) else: newElem.append(e) elif np.iterable(e) and len(e) == 1: newElem.append(slice(e[0], e[0] + 1, 1)) else: newElem.append(e) elem = newElem # If slice doesn't cover all dims, assume ellipsis for rest of dims. if len(elem) < nDims: for n in range(len(elem)+1,nDims+1): elem.append(slice(None,None,None)) # make sure there are not too many dimensions in slice. if len(elem) > nDims: raise ValueError("slicing expression exceeds the number of dimensions of the variable") # Compute the dimensions of the start, count, stride and indices arrays. # The number of elements in the first n dimensions corresponds to the # number of times the _get method will be called. sdim = [] for i, e in enumerate(elem): # at this stage e is a slice, a scalar integer, or a 1d integer array. # integer array: _get call for each True value if np.iterable(e): sdim.append(len(e)) # Scalar int or slice, just a single _get call else: sdim.append(1) # broadcast data shape when assigned to full variable (issue #919) try: fullslice = elem.count(slice(None,None,None)) == len(elem) except: # fails if elem contains a numpy array. fullslice = False if fullslice and datashape and put and not hasunlim: datashape = broadcasted_shape(shape, datashape) # pad datashape with zeros for dimensions not being sliced (issue #906) # only used when data covers slice over subset of dimensions if datashape and len(datashape) != len(elem) and\ len(datashape) == sum(1 for e in elem if type(e) == slice): datashapenew = (); i=0 for e in elem: if type(e) != slice and not np.iterable(e): # scalar integer slice datashapenew = datashapenew + (0,) else: # slice object datashapenew = datashapenew + (datashape[i],) i+=1 datashape = datashapenew # Create the start, count, stride and indices arrays. sdim.append(max(nDims, 1)) start = np.empty(sdim, dtype=np.intp) count = np.empty(sdim, dtype=np.intp) stride = np.empty(sdim, dtype=np.intp) indices = np.empty(sdim, dtype=object) for i, e in enumerate(elem): ea = np.asarray(e) # set unlim to True if dimension is unlimited and put==True # (called from __setitem__). Note: grp and dimensions must be set. if hasunlim and put and dimensions: dimname = dimensions[i] unlim = unlimd[dimname] else: unlim = False # SLICE # if type(e) == slice: # determine length parameter for slice.indices. # shape[i] can be zero for unlim dim that hasn't been written to # yet. # length of slice may be longer than current shape # if dimension is unlimited (and we are writing, not reading). if unlim and e.stop is not None and e.stop > shape[i]: length = e.stop elif unlim and e.stop is None and datashape != (): try: if e.start is None: length = datashape[i] else: length = e.start+datashape[i] except IndexError: raise IndexError("shape of data does not conform to slice") else: if unlim and datashape == () and len(dim) == 0: # writing scalar along unlimited dimension using slicing # syntax (var[:] = 1, when var.shape = ()) length = 1 else: length = shape[i] beg, end, inc = e.indices(length) n = len(range(beg,end,inc)) start[...,i] = beg count[...,i] = n stride[...,i] = inc indices[...,i] = slice(None) # ITERABLE # elif np.iterable(e) and np.array(e).dtype.kind in 'i': # Sequence of integers if start[...,i].size: start[...,i] = np.apply_along_axis(lambda x: e*x, i, np.ones(sdim[:-1])) indices[...,i] = np.apply_along_axis(lambda x: np.arange(sdim[i])*x, i, np.ones(sdim[:-1], int)) count[...,i] = 1 stride[...,i] = 1 # all that's left is SCALAR INTEGER # else: if e >= 0: start[...,i] = e elif e < 0 and (-e <= shape[i]) : start[...,i] = e+shape[i] else: raise IndexError("Index out of range") count[...,i] = 1 stride[...,i] = 1 indices[...,i] = -1 # Use -1 instead of 0 to indicate that # this dimension shall be squeezed. return start, count, stride, indices#, out_shape def _out_array_shape(count): """Return the output array shape given the count array created by getStartCountStride""" s = list(count.shape[:-1]) out = [] for i, n in enumerate(s): if n == 1 and count.size > 0: c = count[..., i].ravel()[0] # All elements should be identical. out.append(c) else: out.append(n) return out def _is_container(a): # is object container-like? (can test for # membership with "is in", but not a string) try: 1 in a except: return False if type(a) == type(basestring): return False return True def _is_int(a): try: return int(a) == a except: return False def _tostr(s): try: ss = str(s) except: ss = s return ss def _getgrp(g,p): import posixpath grps = p.split("/") for gname in grps: if gname == "": continue g = g.groups[gname] return g def ncinfo(): from netCDF4 import Dataset usage = """ Print summary information about a netCDF file. usage: %s [-h/--help] [-g grp or --group=grp] [-v var or --variable=var] [-d dim or --dimension=dim] filename -h/--help -- Print usage message. -g or --group= -- Print info for this group (default is root group). Nested groups specified using posix paths ("group1/group2/group3"). -v or --variable= -- Print info for this variable. -d or --dimension= -- Print info for this dimension. netcdf filename must be last argument. \n""" % os.path.basename(sys.argv[0]) try: opts, pargs = getopt.getopt(sys.argv[1:],'hv:g:d:', ['group=', 'variable=', 'dimension=']) except: (type, value, traceback) = sys.exc_info() sys.stdout.write("Error parsing the options. The error was: %s\n" % value) sys.stderr.write(usage) sys.exit(0) # Get the options group = None; var = None; dim=None for option in opts: if option[0] == '-h' or option[0] == '--help': sys.stderr.write(usage) sys.exit(0) elif option[0] == '--group' or option[0] == '-g': group = option[1] elif option[0] == '--variable' or option[0] == '-v': var = option[1] elif option[0] == '--dimension' or option[0] == '-d': dim = option[1] else: sys.stdout.write("%s: Unrecognized option\n" % option[0]) sys.stderr.write(usage) sys.exit(0) # filename passed as last argument try: filename = pargs[-1] except IndexError: sys.stdout.write("You need to pass netcdf filename!\n.") sys.stderr.write(usage) sys.exit(0) f = Dataset(filename) if group is None: if var is None and dim is None: print(f) else: if var is not None: print(f.variables[var]) if dim is not None: print(f.dimensions[dim]) else: if var is None and dim is None: print(_getgrp(f,group)) else: g = _getgrp(f,group) if var is not None: print(g.variables[var]) if dim is not None: print(g.dimensions[var]) f.close() def _nc4tonc3(filename4,filename3,clobber=False,nchunk=10,quiet=False,format='NETCDF3_64BIT'): """convert a netcdf 4 file (filename4) in NETCDF4_CLASSIC format to a netcdf 3 file (filename3) in NETCDF3_64BIT format.""" from netCDF4 import Dataset ncfile4 = Dataset(filename4,'r') if ncfile4.file_format != 'NETCDF4_CLASSIC': raise OSError('input file must be in NETCDF4_CLASSIC format') ncfile3 = Dataset(filename3,'w',clobber=clobber,format=format) # create dimensions. Check for unlimited dim. unlimdimname = False unlimdim = None # create global attributes. if not quiet: sys.stdout.write('copying global attributes ..\n') #for attname in ncfile4.ncattrs(): # setattr(ncfile3,attname,getattr(ncfile4,attname)) ncfile3.setncatts(ncfile4.__dict__) if not quiet: sys.stdout.write('copying dimensions ..\n') for dimname,dim in ncfile4.dimensions.items(): if dim.isunlimited(): unlimdimname = dimname unlimdim = dim ncfile3.createDimension(dimname,None) else: ncfile3.createDimension(dimname,len(dim)) # create variables. for varname,ncvar in ncfile4.variables.items(): if not quiet: sys.stdout.write('copying variable %s\n' % varname) # is there an unlimited dimension? if unlimdimname and unlimdimname in ncvar.dimensions: hasunlimdim = True else: hasunlimdim = False if hasattr(ncvar, '_FillValue'): FillValue = ncvar._FillValue else: FillValue = None var = ncfile3.createVariable(varname,ncvar.dtype,ncvar.dimensions,fill_value=FillValue) # fill variable attributes. attdict = ncvar.__dict__ if '_FillValue' in attdict: del attdict['_FillValue'] var.setncatts(attdict) #for attname in ncvar.ncattrs(): # if attname == '_FillValue': continue # setattr(var,attname,getattr(ncvar,attname)) # fill variables with data. if hasunlimdim: # has an unlim dim, loop over unlim dim index. # range to copy if nchunk: start = 0; stop = len(unlimdim); step = nchunk if step < 1: step = 1 for n in range(start, stop, step): nmax = n+nchunk if nmax > len(unlimdim): nmax=len(unlimdim) var[n:nmax] = ncvar[n:nmax] else: var[0:len(unlimdim)] = ncvar[:] else: # no unlim dim or 1-d variable, just copy all data at once. var[:] = ncvar[:] ncfile3.sync() # flush data to disk # close files. ncfile3.close() ncfile4.close() def nc4tonc3(): usage = """ Convert a netCDF 4 file (in NETCDF4_CLASSIC format) to netCDF 3 format. usage: %s [-h/--help] [-o] [--chunk] netcdf4filename netcdf3filename -h/--help -- Print usage message. -o -- Overwrite destination file (default is to raise an error if output file already exists). --quiet=(0|1) -- if 1, don't print diagnostic information. --format -- netcdf3 format to use (NETCDF3_64BIT by default, can be set to NETCDF3_CLASSIC) --chunk=(integer) -- number of records along unlimited dimension to write at once. Default 10. Ignored if there is no unlimited dimension. chunk=0 means write all the data at once. \n""" % os.path.basename(sys.argv[0]) try: opts, pargs = getopt.getopt(sys.argv[1:], 'ho', ['format=','chunk=','quiet=']) except: (type, value, traceback) = sys.exc_info() sys.stdout.write("Error parsing the options. The error was: %s\n" % value) sys.stderr.write(usage) sys.exit(0) # default options quiet = 0 chunk = 1000 format = 'NETCDF3_64BIT' overwritefile = 0 # Get the options for option in opts: if option[0] == '-h' or option[0] == '--help': sys.stderr.write(usage) sys.exit(0) elif option[0] == '-o': overwritefile = 1 elif option[0] == '--quiet': quiet = int(option[1]) elif option[0] == '--format': format = option[1] elif option[0] == '--chunk': chunk = int(option[1]) else: sys.stdout.write("%s : Unrecognized option\n" % options[0]) sys.stderr.write(usage) sys.exit(0) # if we pass a number of files different from 2, abort if len(pargs) < 2 or len(pargs) > 2: sys.stdout.write("You need to pass both source and destination!\n.") sys.stderr.write(usage) sys.exit(0) # Catch the files passed as the last arguments filename4 = pargs[0] filename3 = pargs[1] # copy the data from filename4 to filename3. _nc4tonc3(filename4,filename3,clobber=overwritefile,quiet=quiet,format=format) def _nc3tonc4(filename3,filename4,unpackshort=True, zlib=True,complevel=6,shuffle=True,fletcher32=False, clobber=False,lsd_dict=None,nchunk=10,quiet=False,classic=0, vars=None,istart=0,istop=-1): """convert a netcdf 3 file (filename3) to a netcdf 4 file The default format is 'NETCDF4', but can be set to NETCDF4_CLASSIC if classic=1. If unpackshort=True, variables stored as short integers with a scale and offset are unpacked to floats. in the netcdf 4 file. If the lsd_dict is not None, variable names corresponding to the keys of the dict will be truncated to the decimal place specified by the values of the dict. This improves compression by making it 'lossy'.. If vars is not None, only variable names in the list will be copied (plus all the dimension variables). The zlib, complevel and shuffle keywords control how the compression is done.""" from netCDF4 import Dataset ncfile3 = Dataset(filename3,'r') if classic: ncfile4 = Dataset(filename4,'w',clobber=clobber,format='NETCDF4_CLASSIC') else: ncfile4 = Dataset(filename4,'w',clobber=clobber,format='NETCDF4') mval = 1.e30 # missing value if unpackshort=True # create dimensions. Check for unlimited dim. unlimdimname = False unlimdim = None # create global attributes. if not quiet: sys.stdout.write('copying global attributes ..\n') #for attname in ncfile3.ncattrs(): # setattr(ncfile4,attname,getattr(ncfile3,attname)) ncfile4.setncatts(ncfile3.__dict__) if not quiet: sys.stdout.write('copying dimensions ..\n') for dimname,dim in ncfile3.dimensions.items(): if dim.isunlimited(): unlimdimname = dimname unlimdim = dim ncfile4.createDimension(dimname,None) if istop == -1: istop=len(unlimdim) else: ncfile4.createDimension(dimname,len(dim)) # create variables. if vars is None: varnames = ncfile3.variables.keys() else: # variables to copy specified varnames = vars # add dimension variables for dimname in ncfile3.dimensions.keys(): if dimname in ncfile3.variables.keys() and\ dimname not in varnames: varnames.append(dimname) for varname in varnames: ncvar = ncfile3.variables[varname] if not quiet: sys.stdout.write('copying variable %s\n' % varname) # quantize data? if lsd_dict is not None and varname in lsd_dict: lsd = lsd_dict[varname] if not quiet: sys.stdout.write('truncating to least_significant_digit = %d\n'%lsd) else: lsd = None # no quantization. # unpack short integers to floats? if unpackshort and hasattr(ncvar,'scale_factor') and hasattr(ncvar,'add_offset'): dounpackshort = True datatype = 'f4' else: dounpackshort = False datatype = ncvar.dtype # is there an unlimited dimension? if unlimdimname and unlimdimname in ncvar.dimensions: hasunlimdim = True else: hasunlimdim = False if dounpackshort: if not quiet: sys.stdout.write('unpacking short integers to floats ...\n') sys.stdout.write('') # is there missing value? if hasattr(ncvar, '_FillValue'): fillvalue3 = ncvar._FillValue elif hasattr(ncvar, 'missing_value'): fillvalue3 = ncvar.missing_value else: fillvalue3 = None if fillvalue3 is not None: fillvalue4 = fillvalue3 if not dounpackshort else mval else: fillvalue4 = None var = ncfile4.createVariable(varname,datatype,ncvar.dimensions, fill_value=fillvalue4, least_significant_digit=lsd,zlib=zlib,complevel=complevel,shuffle=shuffle,fletcher32=fletcher32) # fill variable attributes. attdict = ncvar.__dict__ if '_FillValue' in attdict: del attdict['_FillValue'] if dounpackshort and 'add_offset' in attdict: del attdict['add_offset'] if dounpackshort and 'scale_factor' in attdict: del attdict['scale_factor'] if dounpackshort and 'missing_value' in attdict: attdict['missing_value'] = fillvalue4 var.setncatts(attdict) # fill variables with data. if hasunlimdim: # has an unlim dim, loop over unlim dim index. # range to copy if nchunk: start = istart; stop = istop; step = nchunk if step < 1: step = 1 for n in range(start, stop, step): nmax = n+nchunk if nmax > istop: nmax=istop var[n-istart:nmax-istart] = ncvar[n:nmax] else: var[0:len(unlimdim)] = ncvar[:] else: # no unlim dim or 1-d variable, just copy all data at once. var[:] = ncvar[:] ncfile4.sync() # flush data to disk # close files. ncfile3.close() ncfile4.close() def nc3tonc4(): usage = """ Convert a netCDF 3 file to netCDF 4 format, optionally unpacking variables packed as short integers (with scale_factor and add_offset) to floats, and adding zlib compression (with the HDF5 shuffle filter and fletcher32 checksum). Data may also be quantized (truncated) to a specified precision to improve compression. usage: %s [-h/--help] [-o] [--vars=var1,var2,..] [--zlib=(0|1)] [--complevel=(1-9)] [--shuffle=(0|1)] [--fletcher32=(0|1)] [--unpackshort=(0|1)] [--quantize=var1=n1,var2=n2,..] netcdf3filename netcdf4filename -h/--help -- Print usage message. -o -- Overwrite destination file (default is to raise an error if output file already exists). --vars -- comma separated list of variable names to copy (default is to copy all variables) --classic=(0|1) -- use NETCDF4_CLASSIC format instead of NETCDF4 (default 1) --zlib=(0|1) -- Activate (or disable) zlib compression (default is activate). --complevel=(1-9) -- Set zlib compression level (6 is default). --shuffle=(0|1) -- Activate (or disable) the shuffle filter (active by default). --fletcher32=(0|1) -- Activate (or disable) the fletcher32 checksum (not active by default). --unpackshort=(0|1) -- Unpack short integer variables to float variables using scale_factor and add_offset netCDF variable attributes (active by default). --quantize=(comma separated list of "variable name=integer" pairs) -- Truncate the data in the specified variables to a given decimal precision. For example, 'speed=2, height=-2, temp=0' will cause the variable 'speed' to be truncated to a precision of 0.01, 'height' to a precision of 100 and 'temp' to 1. This can significantly improve compression. The default is not to quantize any of the variables. --quiet=(0|1) -- if 1, don't print diagnostic information. --chunk=(integer) -- number of records along unlimited dimension to write at once. Default 10. Ignored if there is no unlimited dimension. chunk=0 means write all the data at once. --istart=(integer) -- number of record to start at along unlimited dimension. Default 0. Ignored if there is no unlimited dimension. --istop=(integer) -- number of record to stop at along unlimited dimension. Default -1. Ignored if there is no unlimited dimension. \n""" % os.path.basename(sys.argv[0]) try: opts, pargs = getopt.getopt(sys.argv[1:], 'ho', ['classic=', 'vars=', 'zlib=', 'quiet=', 'complevel=', 'shuffle=', 'fletcher32=', 'unpackshort=', 'quantize=', 'chunk=', 'istart=', 'istop=']) except: (type, value, traceback) = sys.exc_info() sys.stdout.write("Error parsing the options. The error was: %s\n" % value) sys.stderr.write(usage) sys.exit(0) # default options overwritefile = 0 complevel = 6 classic = 1 zlib = 1 shuffle = 1 fletcher32 = 0 unpackshort = 1 vars = None quantize = None quiet = 0 chunk = 1000 istart = 0 istop = -1 # Get the options for option in opts: if option[0] == '-h' or option[0] == '--help': sys.stderr.write(usage) sys.exit(0) elif option[0] == '-o': overwritefile = 1 elif option[0] == '--classic': classic = int(option[1]) elif option[0] == '--zlib': zlib = int(option[1]) elif option[0] == '--quiet': quiet = int(option[1]) elif option[0] == '--complevel': complevel = int(option[1]) elif option[0] == '--shuffle': shuffle = int(option[1]) elif option[0] == '--fletcher32': fletcher32 = int(option[1]) elif option[0] == '--unpackshort': unpackshort = int(option[1]) elif option[0] == '--chunk': chunk = int(option[1]) elif option[0] == '--vars': vars = option[1] elif option[0] == '--quantize': quantize = option[1] elif option[0] == '--istart': istart = int(option[1]) elif option[0] == '--istop': istop = int(option[1]) else: sys.stdout.write("%s: Unrecognized option\n" % option[0]) sys.stderr.write(usage) sys.exit(0) # if we pass a number of files different from 2, abort if len(pargs) < 2 or len(pargs) > 2: sys.stdout.write("You need to pass both source and destination!.\n") sys.stderr.write(usage) sys.exit(0) # Catch the files passed as the last arguments filename3 = pargs[0] filename4 = pargs[1] # Parse the quantize option, create a dictionary from key/value pairs. if quantize is not None: lsd_dict = {} for p in quantize.split(','): kv = p.split('=') lsd_dict[kv[0]] = int(kv[1]) else: lsd_dict=None # Parse the vars option, create a list of variable names. if vars is not None: vars = vars.split(',') # copy the data from filename3 to filename4. _nc3tonc4(filename3,filename4,unpackshort=unpackshort, zlib=zlib,complevel=complevel,shuffle=shuffle, fletcher32=fletcher32,clobber=overwritefile,lsd_dict=lsd_dict, nchunk=chunk,quiet=quiet,vars=vars,classic=classic, istart=istart,istop=istop) def broadcasted_shape(shp1, shp2): # determine shape of array of shp1 and shp2 broadcast against one another. x = np.array([1]) # trick to define array with certain shape that doesn't allocate all the # memory. a = as_strided(x, shape=shp1, strides=[0] * len(shp1)) b = as_strided(x, shape=shp2, strides=[0] * len(shp2)) return np.broadcast(a, b).shape netcdf4-python-1.7.4rel/test/000077500000000000000000000000001512661643000160575ustar00rootroot00000000000000netcdf4-python-1.7.4rel/test/20171025_2056.Cloud_Top_Height.nc000066400000000000000000010113261512661643000231410ustar00rootroot00000000000000HDF  0MOHDR,vYYvYY"$ˇ| {v5\<܏OHDR-vYvYvYvY,, * 666tX0OHDR-vYvYvYvY * p55~rRGCOL""""  "  " .00 OCHK LDIMENSION_LIST + _Netcdf4Dimid uOCHKV`s4 GBTHD  d(o \ZFHIBVFF]A}B4CxFSSE64>BTHDd(P "rFSSE]͝+BTHD  d(>QhFSSEC|cŔTREE9L7HN,TREE"/ R,OHDR-vYvYvYvYQ   *|G7 X long_name9GOES-R fixed grid projection y-coordinate center of image :standard_nameprojection_y_coordinate unitsrad axisYyJA =qF NaFRHPq (%%BTHDd( %4C6*xOHDR-vYvYvYvY!  { +CLASSDIMENSION_SCALE?0h + _Netcdf4Dimid kdBTHD  d( E ?FSSEqI{(E(EB8OCHK ZNAME@This is a netCDF dimension but not a netCDF variable. 2`>OHDR-vYvYvYvY!  r +CLASSDIMENSION_SCALE2h + _Netcdf4Dimid SMOCHK| + _Netcdf4Dimid iOCHK ZNAME@This is a netCDF dimension but not a netCDF variable. 2`>OHDR-vYvYvYvY ?@4 4*G`4  long_namettime variable (t) is the mid-point between the start and end image scan in seconds since epoch (2000-01-01 12:00:00) 'standard_nametime <units!seconds since 2000-01-01 12:00:00 axisT 'bounds time_boundsmy}ɆAFSSE$a Z(BTHDd(PӍFSSE-RxBTHDd(o %jdf4Dimid laOCHK77 <units!seconds since 2000-01-01 12:00:003>BTHDd('LOBTHDd(q dBTHD  d(| #tFSSEYq4FRHP6 (ldBTHDd(h 44HBTHD  d(j 0/FSHDPx(uV`w/+AA-=xe`I6 $wwwq7dzZG$!@pwݝeqww=U]]UW~w@t%x ]1r>Yw 3oMDs_+ٶcnu5oCb{{L񛢸 QƉG3 riS<&&'"3R)SRsRfg8OSGKOMI/y$.9,bXx~HByrrRbԝYeO= OD%oF[:P̫|Y6KTQ :gKN[=p=V܅ qd<6IOacdU@+6pt@'`'`MLb*I/7 'r`x!37FE3)0\; UB Ma1aH$BD¥_ ~/ϟɕq6U+pGHG܁3#}D?cNՕ]`!tc3ƐK*+7^Cfj!!Gt.i}Pk('%p*M9 CQܤZi+%Lޒ6A?dl@0!e\J|4X‹4^ 1ٓ&t%q[v\d9 \IR%GDC48Vfa\Wr^yvV*3-.0FBX:9&eV3֗( /fcrWBV.WNPd٘C(NIh@ ߀VI@WTqpMtXt $:%w_ FPd}$׀S{k S# dl r8X͕DICG=7CV6s.m]WS-Ɵ&$Ƙ!.4^hxQJ_טekEj_cGX1[v= ψW{(L˜G)TԴ- EC>)FiGX)3 2)f>i˓ RrS'O2vAƼ dON&df>dTB튎~Dqf3*UIepSŬ feD:/kxhu $j Pe\ͮqOKѯ(IfM:&*S;Tא <-@}@$֓X!0:uRKL%""Xi#0bbMh)p Z{Ew$wa=Y%=bᣀ(_Hx$L2 pl<8 Cھfp3TS^/w9.>OXJI9}B49UAgDl#;6,X<6w `%hqHx˨% Qޭ'%Iv3-gN\(^lΏ]DutV2Tg&%&Ύ ?>9I;eIRk*twb\sK*(,w/Kp{*85[}1,"\2QvJ;b!#!/[R}&+6++e/aaÙx0^xP ֈ%>H ⌘( i7po)|u\e-eRXSRIۦ+~5t. ]#'l'5Vv,ibt:C_AColg2Ylxv}o5wZXy;m;Oy 9* ?Jm[DE3\2|Q#q[$^[fNj`L ;(]ȼDl7eaZg5ϭې$8pcx$oJd~ܞYnK.Vsm7YK7oz3n7-ҳ۪ V]+ \SËc|]_~N yJ!yvL%/"R:HMD-\x"J݋|@rPNhp^ea/K,HDFJ@ w_E(W}}M9.zf(ҙ]UZ1 vI=W6γڟTEo7}s QzKW6"˷I{ģ ';JusTnU$t|,D+V4Mj.u:vJU6JSWY:avL[h,i+O۵@(¡J8wKPR4"8: X)j V"{gQ--oEy1WMF\jYǚ=hYo^^< X!~iiNI+S6Vݫ'6U9w+.RDZ_{FVVmlOzTZ5vq%vJVsS;nVtkR4G8:u'Ͷ5gvIམ -mXzhq蒙RN0sKGp rv Up'hF|܄U*e\t.Q1Wdb_o X#uq껟w+!\nB7 ăn;Z%FP4hD/C+ .UrTNsba O0l(ES`ڮ 5:5N #=9ºArC^M0)^{+囮Kz43Z{'<7[';i33<5y_E&Qڔ6hYcwJyBͰa"c΅==tsB5Ak ~Wufɼ%yW;bi,D^m_zYzt5}km숇ώhbV{pd{^lƍ7bkx~iEJ\шVcFGys]ў+/_UUL^{+wS1N _?`1uߦߧM{;a0C;`x @j.\THee15P8GRul/*~@QjO0k*ږfcF^&u4?|N4=N74@ )l") [ߣ绸#XKнEB r c-u^20beqHlmDHآ ieɗXs&$͛=fnj#a`+Cܽƌw+["S߾]SfયEk 55d nI!dSN-~^=,D3{K؅zV /^ua{Tj-6ٴAAse05j(HKOnӷۢ}o*44xg:0HtлL=*3P>"{!AtT k!*6Fa礘썼W^yMLAvd((@&挖uZfו2†*L'̵ht\V:jYNfzh R+!?ÍOLrzi&NW{pWYPskc#vHxw'2,5+?[9NLe܄ٵ>Ǘ47Ư3-hiޥrE˭ڄe8ugQl394dk+2o }.{M9ѦO,E_޷gW7sfqIqgdqsLG6maxuZjjJWY(+ZhRʈ=v%^}dZmn k+Iʇ]Jn'5dEÑ`#2NGKG1K$/qXmIJvH`49aOHix`%X3'ukӦߪ :a1s9-.йIQ( e)7q%8_B%B#>"Tv/xe{D{:Dϵ$8u=/zDxnv _=gWdɸx2[DntUˤȥ7g5F)ϼfF#f6-ícT.X;}a4ZZ4}4|=C&?J$kA9xg0e(ycXkȾ]{w?nRB\(%ёzN°oWUꞹ= U2ۣNwt猞o=UO/YJ ` er& w@ˡ(R@No1/y*! xWTJ` 2@ 戲Ey0v5˝Cep;X,VZS;ბ+tKtS/Gh t:\ VPH qԾ=\;Ec&sD-n1NWA[4هLrrcڽr/YnDz}[C&Q +bc?82lB eV}M걚ȲجFnC3ǸWa) n |}C+ͲdmȾÞЛ_0)EMbo+^g[Y9foygofv"l0 {ҕݯfy1qhx>chZ`ִ#lJyH#TI fNUPW +tU+){ovv&(^ME':AaC,OQ]%F6"^(uxd ;trC H@$`ODh̀:왭:ֱ;c99WX;̖' a~GE",uHkhB! EA2'6璔,m<ڥGO.њN6%eۍqȷ899v8X&KB^/ Zl6m,jV2$cBUUleZ{Qykqŭnjx5N9ù|3)9xl1CulZ)M'ٲ]Fٿw~1ux\8]׹YZrB2*&IuJMęad :ǘ%ݒjRJM={GGFF|TqqFfX:7kG‘h3#fN< <ֿkھQ]>n7pkݲӝM1 <z۱">G=S ,WW-e_ w@ca:is#*h]![뽲C$z&Y;'BɳSlS3YVNUlٶܩq^l gоGCfq42wf̰쩳 f8*3Tf f}zVꌚ}j>nߒ^wYg%<.rZV uثեqg?j3iYċnEɀ~0;NDlC|B7Q2vU)P փ?w ,Kb|Pw'C@p̄wit~5k*i[*Fg[u9W֧fEb,l5:zzM#lk>HɞgTc:HMz $J(86!W=(xڿ-}spY֨+J=LfTʽ|*Bh:7uML-Ua-!N<xoIOӶdLWFbĨ c& OuHkcjmںZ_#FO벀ocqQ7 P4+fvGF$c5[urvR*ToV:1O,O* 鼉zȅV -k\pܓPkޏXO}R O x'm|HIR aڰH,@脐Ccp ̓A`d0a+MA87/A[@[{E~!Ía|5(gm_Ψod\8Qq߄LBDD}j#Z"I-@Zi"ŜMN9I4"z>kh[.ΏhĊ`q|'LGZzqƧMRsl~ ށvhA[o/`ZT_EEИp~';,vOݓTz7E~ -6apz| ^%VmLnJО6=~8tZ YXsBvL(gtLӧĸO+}]t{\#TUnPG q\9†35v-4 ;©\p.>/hD ol% _A:5ڙp`p.sE Jc1jYxDSfX$ .A Rx8@#[QkJzчB4:?S'I9B,YK=ei3O}`/j#2mꩾn#KJq;X1m!.,o#WT`Gϒm)?n">IgM saFZ]6c,ijtx%75dy_B -']9N2F ,֊ @"XNB![$%#R > ϗ!qE!Σ-D$flFxB[d1UܦK ϑi>R}'й]V1CEe&JPHpD!e{Fi;ny/D?x ADWPiz%il2f1BHowkrU ,h*:SOTY~D5sڤfOv(׸Y(_,/̽ez|%%WY<h`ܺ_<"4oPy;`iz![PTjL|^NNZ!,b2oLK&cCZ^}݄;E/`7OZSk( sу>_ 'gwJ,yYw=BV"cҏlMDu6PUAO%P|6>I ЍH _M+ٳ:;p emu-'[.6kf.QowDHqW!:M#R*+ s4YkDn/7Y~;y-i7gtj*!yK66 f.FdBzM)Bokvӥ+L/3?K7 Kj2 ҍFހLni|^6 }oÒ=b?I4ɬсmm&7gڊn'Ǡ2)EI aۓm0-l4o<_;>(nlq04Z}Z{v4~Ff4f58Iy> :oL$bu>.6ϳ~iEU N=i:DM2ӗ+V--[/" ru A]CJ~HLwҽONF>Ss DK 屲$ T oFHo6q]`4me=রmF,R{}ZT(91f~Ҵ;qՄ!;D"5YtjrIC\KJȧ{5~i:h8RDOO:hkcwt=p3E띋5vyK3''!$7RtY=2 :FjVS zᱏLq _%yz 5l,d'4vG{M/kIPnI?;p.K & #jW9ѯĎ-o5b}9[ke豆Fߐʲbx4xϭ0]Ct}OTvE,[SM*,M>rRT1CL'4],t^@d&pO8#$kq0+ N ԡߟșǦJ&nSx<"900C(!(gN"DCF|M'!Rޛn f̤ˌaCpuy bgnux\@l!+4_>&k qC]LR#ַ1g9M-Pb S,-o:VqW#gJ݂p+siVa*k̾gkQ_PgT0xYZŠN/WWCoG f:",t. *Lݐp=$U)xGUIPxKj|W0^1_iZ+}ؠ_nMF H(\,^>[oj0qWumi9XDx8_?b6ϸY8-s G)!SR)IIqYS$cyML *5J{I׉H(ԭdmlyŊa} /NWvYdZNԬLe_] i|z |TW7[z'_mhQ\Oo=d|S즆@6 l/dX )F`9q(3}kewVAvsMʹĕ<'ߎ T0&kMhMN ̎=i [?p2~^y!,KYt?yhSLi$r(ڔZ4\KO,ֽ_)tk_qi˵qx%.H6K$YjhPY&dty)>I29`.l*y<&wAdm9vx҉fĿ6Zo4v[m'8z4q.dA&,J} z̵fs{\?T}C/[+daZD'\O:(1Z}.M@[#װGS5 9ɐLt"h7iEnJi+([SIėd;f:a@u=4ױQ|~en}}*&Ʀ"{vHw]d2$ٜrާC0#c|J '̅VV(;PK-ְcrKxcWGfH̞R•(`2A<]# l69-H925t|ݒW/c0CJi?ڋ1EE|0m9fD??,-غԮɓ}wU)N|VR* E"݃NG3CC P(Ar3 [*U`'N΋5QkhmOmy[͈/J]FtdCJ-LFBqdk8Hlͦu{,^8tlqt@٭uWk&+ z"<ڿ -4(^4mj%Gɭ4MC't2uR Lr g:/p.f^Qa{t93X,8oznZyg[t9,B u XQş >־4[gc2cl 7[ו=VpZASx9tjXrUɧx 2" .61b{?/h.Lsy ތ4[kCijW7wJcĄ)6'q h>rG'\%)z{ 9{ZT,IwIO@ CDl dHag4_CjRA<_״K{i;[ZTML!F@RRoQPaMFacI3u  "C&Cf6"'ecki{SkzWdkU Ie4յܑV(u‚!BV4S) #\~NJtYɇ~jAkH꥕~"rwt4h4 3=T0+4UNd&Bpk ;k 򨼛!~Md{EcS322К?+j$)iYp,;F/[y  œހSA pY|FWJ65! xh,Jk٢1n#Ͻ -ň򕲉GMQɱZnCCfcg ]\ǏW[ꛕ,1ixјaf*}.+y|GUKAhuGWJb 4-i|ݰwuEygxr vVE|AŦcl쭎^hdY4U#k\mJpCs`/wtX-h!}f?Z㨆kNza[W|l{$J|`m4T#J'z ©t `d7̀_+R 4hw8w{z~6(3ȇɕMj_˅جɅ$Vğw$&l5 ,l~Դ0-"hϵ %&NAH n{?ߢZGvZb]Vl8x PU܎!/鴢Qߝ^JJFZٌm]О2h6̗KE-z ޶h*~&u%iaտ3􆛕ק..jјoG cq[c ::IW q]yw yJv!ߟSzoU.%DפU1y$Oh1hE{jV}{l=C<$XS] p/;L/ɞLL${$fIP*y$>f/x 5o#XDyD&I1čExDjwQoo.~~!=(] Ŋ(ZHq+ťxf,`[{j5 c}5PqK^^#{' _z>,>#pYirk?:$##kS!ku2hI IKNݚ3kCA eK٫=2ſя𶵢Ep:7Dq|Qlx-'~<=wB?ohH!puP.>go=).7dZ q%ٮjSwhjbFaTޖXiݟeFb5>4`Fuɾ{s_.)O#Պ{f6hh,}*dtvF$>i-3h31)٭<^GyEc6bʰj)Hhnd da` 9 O.6ʴGF^4jc74-~X3JΌzqNG4^W>ReTI;"v4 \Щǡf%P3bݓ27yJZ _aL=.iҩ EVpZliW!|PbuooSEf~I h>noQLhV)w4sou@e2W:40MkYg~Zh&I6ST}߬{AB?*Y;AmU/cwk}9Reʺs$Bx_)#ɬ#l"J ׊+ ;pr5$Ҿ>{(`0F9"Y {n>AyO}4 J3r_Wh g=>628K͘{RA ZoSw*w3O~-5 Z+*H^8~XC|2o@̲hCļ*,jV0a9qz"fCzqM#8ūSvx? uUՆ3ui׻=~o_3EfNz&+2jeE'ͧu`M4甕=eF+$Nc3uca-)hkFVVO5Zaa.0k|ΆMK֬T_lueĂ07L{>UjՑ)^4v1ӆItB=Hlfqvr٣$$9 Z\{sj&3&d.N_`\(hҜ)8u/_xn[j^HLļ')(ttT<cc'l/;vNF6|3: N kPu"&_8l [k/iHU֒es~=n6;6 0VmN13\ٴഺs`ί:NEgqyՋSW8|c6*lB:;$%4RcEm/31vhV 5G WҸoe5MYK{}qhvl/(ED=x$A~?鏢<'{#5nmAat7[nHǛ b܍!Fu)s57wXրe13좪w_ufs ߬R%z#ݐq,e[ՔřpFF΁Os[N[+ZvW/$psMOem ]=4 3iiHny2f-z~6*`m;k`0sͺ)<?  'p5Oq N(jߑfUH}Y1xNfݩLV s?!+iǢo/ќ.8p.'\:}1 "F "G:!nkvJ-Mc܃;455:S<+kd=up{+#>%(7foސݺ}?Jk_O4uyْy=uf6K>7xer_#c c#}5c {8ܞc/`Gq`1YGI~(⁋R:ڱ6G)߹yg4E=J\HDSE٫WlaE-lbs |:`!":CXbw{elQG*mlt;R)ȿFeoKs0I:23udP6.5xv\SF~,.4Ϳ3@M<2w֨έ7􃎮ݝW)~J!maFYi !ҧ3!VC cc fdJx? SGٕ5yw|ΐ'V[J۞_K"?e2ӚNf>sD49xn`OYa~+p UT>hhH!Vs5ʌ84ɩgluQ6c%끖9^_ԛݝB\ 7P6kH&kKeU\glH?h1YT5K̜-3["mkMcu/7_h =uYMTM6Z*`W Es987l*wJ@+F*$ /C.Ք]4YVXd bdfeXΰm9"ߑa kׇh %ֺĽJZ91fi ԝյ[Ds0}=O!+٬N:p&}ʺnjYSiS(ƶ[Ж^$5?K/*<ܰ}q:NJcAn9M f]v[M 0[yC:џhsWONcf1O"9><<*gw!kb%x8 ÉT:L?DJ-k-Og8t7| Kt-9Ukm1ShZ5DFs OW+g쐘eɸeCEqU 5{ KDENμ6ZȨWLIKSJWU)+ybo5 v?knPМ2zu7NvŬ܎Mk {`DK#xo{ %'_^=ି24u!ɇ4zYKKOa\8EtUe7.;GG7 1h-GG`R5G;/z:?hԝ+&" @3a㵌_#K_8yRN<{:59)r;]3])=z a4pM;˵.essȦn"NFuO λwϻ7Ü}[TO\1.1=\)I*+bOJx|NngmI<LNùZ̧ cȯ]$r嶖zav(z1簫 Nl+~KD]E=D/?EEby %n^=+y:$tr6y] eSXMYj7^_9k7nAQidϸ۔BB;{'硱.xixC'QNZ )3)%c`bp~a-c{W0'#晙Y&|hb7n.heTb$`rcxV`+K嬇JE9 dhh2oĂˊdw6{+h6-LGnhIypQQQ`Z0FG2W BQYy+4O6ruA}ݤFd7L^l?h!2wo~Mcd`faܧQ)d2|yR[LgsU"[te}81d:h \/3j 1pWCbΡb{ 1Jn0_':&4*J|T 6Ġk/bkFX:oHŸ yyr2\3$rICl!C/mC AX;cE\իm1UNҿ 87ٯL>rF o}-J(E,λ-u  Fxfî(-~|o\P&Sˡxy?6>GLϝq+P4.K)ȞhʂԎ9$}Y8A謹jo{P%Rj]a9șM_@nEyz< ZKٕѥc\aC˚ټM47&L ({4ͶX14wτ1`]iOɤ6/G5*<ra>}o\?̹?!ڔchghfcX}0R4mzW)L8Vçk4ܐ7R.u} (b> hu-o\:̡61a Z6N̦Nh5@Cp`6?v޶|ok1lFg;x#碩]ufv᦯*(V?4>.wC0Tx䵾q+{[qW[p%8f|u-V5V 0^dT07b}jO~2{21G= "Sҩ#$Jؤ~R'YrI6SQnP )C1 [OR+A+Jۥ}9nduhOid1o0^ <;{ 4[3j2;0ћ\e`]VJp^8ϪBȀz-.!:=ÛMT:WHB2gJݜ FcV ̧̅nќ2hftlCe?oӉl,ٶ |)}sf$qfnNQWܱPk:Sy#Iwf'}U7g N'i r~i)u;V }=bs[1h*!H5r>Td֕a[s3ѾRS˸V骩K Oh60j 1Czf]ip%07.wUJCƂ(/ T<6CZż$·&z2X5M@Q@hgOhֱ@*0L}"I迫v[HhШi b>v3>:/fmSʹK^SkakY.t 4цYÏznGcH $$!< v,eHP֘\6+g)F ߩE9cӷ7;z@r$j\= q,LQ|*BQe9SS&SR1q[AjZ*D֒X^!/; 1>BQ uH- ip˭ßΙ\̒ZN9Ãq _F t_c$^ސIsscYOvKcveSV*- hst̴ ;KeR"cr܈@枥%<ƭ)9]yznZ |eo96vfUuʬiWFL6 1m6 >l-;o F6h]eRgi4EhKgCؚ nGպFQ4JB!IA{Cf0wb=eD̃`B6$:P̏'&:hSŊ,M댷s\0\1LUIye8w m(? ҽ٢20:19FbPg/7ewuKԍpfuV:3i;7Ip D1IOrb{:Ǔ4RR B(בg ]kZ/-b*5|g{1X^_ˠa<Fzo]}y${ISɞ@Y{~7ۛ< ?:3;?s^EIRkqyDH}Q!!tf!sS]ƴ6\9Nu[,.Oy3nO>&αIU*ߓ4.h:03سP]S1|8Qʠ3cWm=-ːBZ=4MX#`Rj=~.|,{Ձvm}w)K~*n:= = +umF 2S4/ fhOJ<#1 s?پbVS[|?D~LT [)N8 k4|a߫2VJWρ\ `ҏqk1p|NE1rٍf3_X8P "{W'68+De?n9{>-ox{r@؄tNNk;G[M^)'xL i0ٍPm8#{2|UA]"\'qj4&l%Xl+Y}$MD3P|wZ> ]:%_5םl:WYJl6G`|flTX/!B>( )}cL(4H: !{~CqoD bKs8*jV.^ XI.肦hM]?"R%xjP_O}<ݟ1l dL3nKL\?UYLէWf e@׀(Y1ib5#ZWU9gXiZc1F+TQJĥBVJ'2+yH& 6*Mg<݀wI=^0is/U)cQ!@ 2^f Hz.{Vsl3\9-sDuLI3/nhs=n)%=%ᰗUN6ZXyf R @c>)}յK-@P6Dn\|"xB'ϡ>vmwod}Cظ-wœLsOR꡶IZ1GlkS1ʠΆS^Q +RH%UpdSes%mC=vԮT@1(#%NOL6q+m-tlTn"hfBh ܁~6T~Zt({;Vot靲ss q>'cMVY!sQ.&0 ^9sj"uF:9Td;_ hy-eblȉ_q-ðҼ!z7}V ۳ AZ6l?~:kzց{'En0NII;5Ak j}5dt/g(EuHHdEmFf>S߲7T1lz-!p +] d}3ɵ"@JG͍̽m2nOgq5hv7碱Aw|$WVI$>!'.F?;7Ӿ}kJ 'CܹRL-Jc.ַ;s<֢ Fa@,ӈлVDzM:1b$j;9u8|վ 5;f<.B1GV̝^̷,̿=9}O}ʎӸ$]y(R?tN}^EJGzзꎡ;QG)`9 )niʇ%{@(qCx ߺA֚ +Ѕ\tg-9BI[VmV|JElkHZg!\]N'Q8REca:E|;y6S$+7F*vF}*<&Xf3gQ]@ݓ#~'KbӋE#g'\Ê>W. nU3!}KCa7Buխ0[qnCR:ǮKB2 =9x6P.i1FCP^wmܾ]W[mc<Z,+%Z31I9E&}TZ;QYqa~Dʄ嬾 1!׊Jo2Qqtt5a")mdXoNGO7<"!O)hX>Pi~p ~ 0D.j$8/<*PUltkOMVJȿ^=c^6Y rW݋upOC[}%ʫgi=`{JbB~'0Q,QPPS4EE>.1qrz5#vCg:7R^UV.?~NrR 'Z ;o歖Qm :4*=!ކN o]t?TOat6u$S.:5Uaudob5j(sj4/k`svO]fI`J_>sU\~O+K=Ÿp_ 0eo"BMb n+6c]ͳ|X"};Z1"?D}8'wNCc3A[ ZR: nj3юGDɃ7GZ+c+wo7!>JiiH+n`}Z!Gc7gE? (CgQ^؈QDu_3*o5`n1)&#942RjbWn!ޥJ`, -WKgj:訥f)ʉ),\P ψt .,%G oQ" iM@ .A|jӇH^) diۿw|LMQJRro%zpv+pV.'Q>?*>T`=JCk <|+Ք/֢?,~>f4|J-ox Ux9׆| E1-{|#ptkP!{QiT>~5Egm1M_ulw_}ġ__+ 1\c0ټ-SNoRqmUgV3[H"VݒR]rO=+EX-Ե.İ{Fl@y5x\F`k hO?r!K Ag+M 'u'?׋0_L>@TaB/Ʊ=$M|I[,W\'.]gfmll^¾>[0MwA ߠ}%ZWN緔i8̽;~K"Гm^۩!P;՟}|]T.z߀{+0I+]n{^mbGMlۍ*6MFg=É k~=4/ OQiǧ؛NVŜr,:5H Ė}U]Fzk6_dSo1:\mvӥ ѧ;9d3%sLRI_Aq]ï;unx,Z18`M8| w }7{FVlGQVb8ckCiEm68w׺oe'CuP}n)LV6 _7wCQ ?l6?Y\džmvq#/V>a*׺k#m5r:찓tNj%M^ Qfm@'-f[;>AuIâqV j:b6a]"4yW U "CbtLo#p/~nqAԋ0!*Y/^({"SYIlʤXtvF?5Z6]A@JI|vb0"[)σo"Xqesۂ(|Mب^^p] _ hL;$MXFܬ -Uޗq:}2nCbF{_@뉗 +_}U%GQ=).[sȸfXnw94?&]v΃ϐshm1:[P.%H/c|%!J s$.Kک&7W!Kq[J'˿;Mn=2tq|W/[ MO2&x@,&kf){'[;a|R\ mv3VE-#n$-lGLu-B 1L#'+l q Bך% FWb!h"@W}|^qKzULjO@M!߃X]Io+/ٜ`sWr<{ZRo.M@`J!Pm϶PVzUT LUe>I'='M%>~SdEDU~=垤!_$̸vW)4`Lfl@qYUC/ГD̃u\+kz}.e~ӫp6Z n@R0ɂ2K4k>=h #De@$U> s!#/ve0FL4! oym%[l:Ҕ $GNZMm?phDLk)A^|8!6IESh?׵" ?%x=D1`Ož< 7#bז-r7j"v~ou1\58N- Xx.s!y]#ŴG02=Tm߳wfcg1|ƵXǴI0 #_IB1Ga_!H7F%PymLq'qֿoppOĐXp*΄srvQs#buf?t#]%lF1@qt~ҕ$Ov#@b#hԄ4C1m s&vnFPFv>cOS낛݃Fn%&n‹"C7c_nkk_],d0X$=y(WLSΊzCjhU0wߚNFZ( ?tv+A! +xSR˩-}oѿ ﹟ݵ,4sjQ@Ҵ$~^x%HiB> [-ϩTww̢n1bsu+%ttzppW.gʧp@ o\9#_ZT̪5nV)/ml߀=+YIMA+UR S79XAiB+3!.J`RflFsזnK *'~ַbV[?;pg9Gsb<<:HceViq7q.GZǶ\Ia~\gl%Q'~Ӎ+T0wW3؟xQb]!8k\3$ߣ]å3%̥$b;H[;X66FN؇}9}\9eirZ9|F5巔W:++M |)Z_gZ-k+S3*8RTw6ߐꪾYWׯޒOUk{3>0N6 #h\=UrZnyHgηtI<\j5:+}i &McSJv*&g7S˥èo$*d831a?۶BVJ>rlS:[_+*͙).Jf1R}xdڻ\e>^OKr %{ÙΕdwrP J{"MtN{R,@"h{+S4u>l`V:W[l-TjTSҀLNrɍvB,$+<,߀vkFYl#<j2ӎ_T ~+ V Jiיekjl)Zй0{`^k_M`6{Co HINk2%% +\5Wy/_Ƹw}^z{c}xO:t1˜bXcp݅H٫d vmOrбF#z@0iG%EieTƩvGGZM nW^rOtMÑ'g bؑ>%ɶy:[cnًU$ug)TX4#VcTXmsrk#֨Q=^oB.=SOb o|!)~iSniឈͿ6NI Մ 줕rrVwOU/f,hIz$Z u Ͽ̞:Xݫv?W÷-u8Σ'e9D?f)8^Ekv2~?(It%1'!{pp%Kqr;wK jO!de)Nf#')Y=,#qϽқ&}u v Q (.yY$z=Ztٌ.#ZZ9i}{9|BG,!O!%p`ir芮o#1J/!<я<|CjTҔ*l)T\-uZ_+Yug R|<:9`b[73JUk)c(G2- 8#>$ըG\[܈-X%4 gї0#)Mg 5D8}-%MZgl=0lUYAԑ-#Fz/r'-6ɿd hx/U2J)s5'yDd_\H_<ª,1ֿnd 9r̽Fj΄& pi[P)M Qa> ĖTK>ncE{`=s/o?9@ 8@)~}Nz `NE.K~VXtZ4aXکVNbr2zJ@! [};6/}~a>w9ﴥ[PWkɂRZCA5.θ?kVwo%%%Y$?tvt,+g .9qy,Ƽ@gm±_3TH_L"R# £`#78 p[ij\TXaTWXDR]-WR/Mg_*]kܮVU?dY<pgWxL#/jZGRy5ikо7|~ }TaXv\۱5KvӘ7"rO6\ N sW[]wU(M(lt${ UlG%|3-w+Qq{Lϥ]KqU~00?qv_PM.d˺[cyJe]ޞ<<> =)}ujuZ  0(:-eErVI+,"%y S-FǸy݊d!qF̩,-;DE7G~@G|"+5izno oGczp a<+%r{;b3ԥ2_:ѹNɷ\ 螲R tC=텠(yzYtGxa{s6q&ry6tz=HRW)1z#kpwW 8>]J.wNa J{|sZ>m/9kx!F-F# ,r,9kˎJT6NڣM]MBHЃk6#/ xSJ X\jx/@f+JFUkXB.BKJ>#F7!(7SX@9hX`; 7C[0pY0;D'غJ^I|nj 2z]V~. #`)aK%Jl6a&' V0Oe#D3 y X#])ޝKaI(^k=Lk7{x+E KJ PFѽ{)o{ִ:Hybm$̧}d*Uc=jm1dx;,^O4]~r7hiPҨ@"<]#ToJ4OAjInꎯŚH\Vĉ-{8N Q k{gZG>n`N[~\e  22<{ޤ[i6ZE_uIbrl~@O v; = q}^_{cTI9 F[v ́{0I qުټw yьִX~-ԾkX[3o\` []y1PX0>KUӹ+qkxٗ-~=c۹oz &'^uhƢ[#`8Qb[m `^8|$AYwwCA1b8R>cwj8+A4 p an>wX=fFvN0 /$˛/ c}ǖC/[**b|=t ɚǮ -6[ $BRF B1/lf$Uq=$."^k@[cFq9t˧Tx%j3.hqBisaÛ΢UF~ED{H{BSrJxSnY8k;EbCKO1o\:Vk_݊[A("[ rdQwl\ $PuQsGZˋ5B`X坒mi[Xm-{Y@d5d_dpkq\7ssE{wwnja_'k!nZZ1x)2N@{ZEn мbB{_SN}XWpyEF_N^EFpZ\@=-]e9ZD$U/* ;*{߶s$OG1=靻O+$yj`?u6Ft()B*lfS.&Xy}}r+i4~՚_HRK ?juI7tG<6LMLmW*^>es..Jo17yN9F[2Dž*Bz((,2$QIr`8M2-f 0GV{XPә˾x@oşC?^~cɇQ>=M^>vWRnEѬ>4CY9P(;KIi~+<\ٮ-md\B=)q3 ~ќ %iۃb/N~Ba6eJ)Bas? FI\AJ8Ӛ%E|3SGOiި"'%s"96#h)#:rIbgt5^fKC*σ ɉZAVmV [~PCu_ʿfū@qjhpD][p gr])Qz8}xK^Ea3.4.MٵҤ3wShU`17J"ppl_X?3ﴧ$٭C voZwuk ܴ~np7jǴW²5&ZV@[{~&r@퍤=2^B"t|dWaYaAnHk$r 5 {Ͽ ~)`2 7% oӎэe/zc@Fn4SX21~b p`zHwČV5~E=\]?(W )"+,8ʪ\1DphO1Ge!Oн=J<i1s=k8UrRQD<*>E@iU 9J%w#Mh^r$S :bJJ JG‡1CW)A0_b\u'&]K:s(EPX?C_=R odpda[E@wSɪFň+Z܎Dn!_pBq GTw_dRL'RX/e=% BVxfqH~EBHB~v:K`D%ks'azum MbW:H`ٗ64i|;[VS);͑ S!4 te"'Apfl F>B ]G|3vITP)JQ'҆P5CB6vg1)HXzmd{Z]6$5'-8!%2u[X=䫙$^/=e:N:$*f8%+(1HqjOX^O~kw_*[+k){,Y+uUJ]c.|;9]D|.'~ap1!=8뛽!#۱z%qO@8H>F.D` nn&tC̀mhk>WXrZ0R[zR`ɧ9si ͶƋ~}68btWt[+Z1dy~@ܾ"^\PpYn/p2RvJ:Xt vʲ9?Zzǡkp H spynOWj , C #hB6>[X))TأKM\PnYLeMٸIsF)dJcESEy%r],wغdqHQɰI4{^N!YYؾ`xt.%NkpEL}(gՎZсPC@2va  $trGAKNP=FH8|ZV+ |o2pm UbHeռmN4%* ]̩X~[Obד4p6%:y7ݧ}Ǐ7a5Q Z2S}@6xB#8NDeR$].l8O)~H͚otwGuR=o9@@j!,`o6EqtqZOH*h '@!t,a4XQ4y,|? }?є*\rJҼY}7ZsӾ6ʹCx,{$<~x(Aqtb;v9.-OU{|7 [bN)B+gP84hZ4Z)Dق4x\t/^Eq܌»}G}f#Z~s5A:$N̰+ ]Dڋ:a!1x OC1OaAkt{u(hW4u鏣dO}cY_J .RD ]GʺaEUpμ7ēTq&`h*Z]gaq9IH&Gh707&HXBbs>OOfg) ky2"F1I)nU}0SX`k:0t9L0Iw6#cf8wM  Αn`Y6Gۉ tui 5XM&twл~` F^E`~t f`\)r^b\>r)KCh2 jגPJlA7H$\VV_I6ChSs[*Jo3U<[UE/3/RUϮKҎʙv-)o`E=o>/}*L6Gon 'v _Zixce!~#L!*r Px*~:߆44H+Q3$p_ޘ]e[vr[orJ5 %=0X%L_BKf-[D = W !mӇ}I?Jnp?(y#],wT@\GO<%-rdb^*l3MkmpƅMa }"}p3(DGw%I&`:Kq=eZt55cp3*\[;DP{SvRyhgt tDżYǁs[*E䋥Od U[јHp) )&,/۞u?]2u~%`|^Yij{ɟ^}8 V~KTzC_1{>fFHB 2c- p62 ۈEǘ^/"<aĹUYN M@(_{.})-PR x/UBRuY/0mա@ u}W@/87n%SxRšJ$"1R`A7 ч#l6CC P$ϽNX)\H1".`<hq) QX=<<,b ~aT)Wg%Dro~[r/!i?ܬb3/8t̠-u*-UDה ֠o˟Q+Ձ@,cdY7$㝄{ETNJ@M,GK/KOBʰP,%"X( :#F, jsI 1Z t`{)\ 1,)HYDlȣ|uN<`` IOyhn3X4e'f@*'+_swnXy`g[g:f{*9D_ AJ)ӓX*mN<@`h>FGp{x o#ɤmT%L_xO@[q^Qbm )1~@ߩO?@aZ{Ya܆9_]D 1bHCemRk|YjK2mH!]6v 65hѦzm8Gٽ54 "%f`mS<ewfςg< C`=;Ĥ0IG<8خCHwd6!+ȣA "|b3D~ dž|j?M{0r.xXS KJY|QEWr+|}8Kn/ClSWǭkIG ' ztO,x}u@m+H<8, ?F_$"ƛm\P*$RAwab&RE0 A ]$_ =$WҖ[T6Uں\kT( :;MLgƨ҉%2oH=TWU߫Mj`V#K ;U*.|⽸Ou~N![o!rx6{+@/?\e]JN:ll$iX!+0~8 &Yȧ()m٨GQҨ83&$'9XcLpǑ GVsC:ʸBY3R}.G'Y\bf;v} w õ+M\JVe%}- OwQQ)'[*Lxq, {++`.vPtS =a6fr}U T1 V5Lc?1:o@NrHȡH[aJ=<uG"7^dt,;ەD$]b7 L @'M{iOPoq:|W/ϳ7'0`|utx<7ZS`VV9ޱj[%ʓ>_4^Y3G* | Nwl F@7@J?$w}4y@O $ pod.Κ&qW }ACڃC)|L6hx_pdB/_g6\^GhNK=! aw{UB{k~nuDGWnrο_y%CLdfތ&-YK`_X]](_O0lǢ&N{=1Fɿ+8.ABAi/'չahYotC@Fy)X GW Л"|]ALD , A!3QC:zmpjꅖ@`VB!'˙jr4'EF ,$o+ɫ٣"xi< !.`34;3{EM6HFn`5O-h[b[l ?>oa!lܨ3YMp79z9|yaV"r  }`?* rqZk;zw_yG$}vDb ܻ&vq_uU7nG}RwCDk4\Q^w~/(%)8nU] ܠxOd'jӈSBB D 'ʊsQ5zPJ*;Sܐ߁>ރ5îT\`yvIz- Gt^m4}B~ǔ0Vp _H-~tum`7?>ؙmpH?z1V /߆ w Y:s/uu'{CZByjbln`RPEc+mWCDI=)<7nONtR i FB4 "垷X rVH DX _,'g_wb2reMa6I!>*#qU7?u7OzI_qu]\xg (`cUݛw21GZ,с7g _ ~oW tSʞf\}c|\OyUjGGkQ_jYCȯQPfy]l=UK(KK᝜bib3JG;nD&>vA1~T +F想38|-&7#94Wr ܪ#Ss3{ۂu3 o? QⰯg1IfD (~}`A+^%pCb$Ot?ӻ@J|.ūfG2Z^87=(]}9eow+yc7!PXfWm7 թa1*C"n ˷K/J_^2N>՛O<F[GIq*u7zul4 MvRZ ~ۋR$yq>`FKxYh!7ʊ Ig)5I=_MRRWh|qp5m=m5c0,c #uQ4[CĊoyYTB[^뿎sYltp6?>qWifS7Y5k~A|~l#qCa6B#'k}"i"㴍¯9/;kh[ R*%;͝ W}[; oD/`a,|̷rXC}fvo2f@/OԉzYH Ф6Cy?u U_G>/2y{3@jRlG93hzWp/'sH { {Pp@A Oq7Ka1;WW[6nFRwt/KZɘ.`/MAek?A(ӕ{9}Vji'uuc#"T6>Rm.u uyE?v0TKx6dP>_mv aXEgbcWDRܛI<ڧhc& 㥫d%oB;I+xSAo{z]7+8q>,DT$깢P!g,y|ïKKH<9ܫb!84|yȟ##M{#߇cDGGѣz]tmdcerrGro]N99'*uZ(Pj)f,,:C) c)寑VM$3oz*y~;PDۿ:Z #KQsuL|݃sK"$}ҕY]،4u̩ʬMn3"GzE9~+4*lkG:Cw# 'G{;\8]Ұݔ1"("/Ĝ*fq; ԇ:t`ͥߠXp6T#ySBT`,3,B6_}["䥺ZURqg*?m^jgۓG#,!|V{QH/ʗɸn džtB&JŸFԺkuDmӭҍZCT['eɲ4no*ř.Hcz"/r,OL&;>?Ï}(k  g]W.jskZ.LNM^8^9`e,|8SnʚdJ9󬧜c;ݸOkb~S'~LZʢOT3g{ hxm$kbքD >r Lˎ<>afTE]V-د* ϒP9 ~ǤqBq$?72e#h=g{{3'WRQsnՈv~ y;a "k#jWF t_a ΏtӾuvRPDO`%ڥ_dYeW; h.7~ϭ9S8j%<&AY1R.W^4:gxH} w/_)P%}yM \;Cjh|V&{G- lYV:'O/zy[+m7sKΘg{N\y] ˈj% sw婰ѯՆezewH'ku7K=G!*z0^扨w'? I=awM%XMQZ8@z/|&w3{%Çdfߍ9̆o8JxrK_x8c xZ-s<QG{ }߯$JqڜGnz$z{4d`GύEY}5 {އ3I~$ k1P8IPf搸j"/쳩ГmtjH{W_ȟ5(*nڅD5x 9XC#3i%k-B 9"Xu)czڊT ,+_-=_pr ϋrDKi5~dP6$w"ߣ899?: >90B1@w7Ҡt6Q|--C(K־eS>aM.2Peo3\Lױ]-/U=gugR\]Jkq&5p'@h3>`>a=~]9Jqf>yL|ҽsXXuT?t9حQ]@ǡѥp}0H Ik_$ix{y~ ZK3r^0I4 l?#̳ sӹ;2Z]#VN(#(r;d5NDuGƗt ^i;@dѕ2>EР0 K q ^F){i /D鴅X e"?3ha=$JVL8ſ^.cC~Eraj ":!@X[d_Lg7M:pgœwle|1` 3rZ:_E`ׁMpu8C}@k{.l+ fWr/|dO8De,NTPZ%6G~h'yOX mD__J-.37o ] Gnx#ֆ:yrh,EVц*Y3wM"Ů%%Dnh%4~ CajZ ޳5;|76XF 喟,/?@&%f`Ft2 t<bBE0[ o=A%8Y\ɜJkE@ӄ/;%.&D [^!\IǴ x+NfxJY в|rŁm41zaa~~amTnz4J[A{7LtY@B$$َjޘ^Ge\>!##sj@7p6Jǚ`޸4aP8y`UcPR0vIwo7Uf|]f)VVWR0$K`߲طW BMJ+3'eE9muc&ϖoV|\ob[-]ն5StuO.[\<gJJ̅ʼnzrF#s?{Џ070<5bWEGꖆ'oqj;|ڇh# Bw ^#젇z6@FʤmUB)}fѕ|yMAll > ?L ū(2KDUPMvZyR|?;XyH5GZNupF~`#2_^@G /G70 JXt QBoa17̨Z:^+ZU|-os";w/w:V ]}]7+N^Ic˒sE{Z,eٖ|(nf(/_WWOQw+_;s3Eދ>?u-(]q+ >h`N~޴ /Dp24-)ƏcFDRv` [`bz|Lj`{ohfU@`4WhN|ѣscݗ+" ئh t1́zuu}d - /= W-΍@'hs=%4?pLJ13IZ؀C/cY# g;eG8sjN**f*k[ܐ64 2./q)Z Zh-ewrk'VuVvNr)i\*6EHǐP٠%~Bz 8-~3Dx՝> X%w ؊X|F /w!b!dnx2yHLƒ l:&|+umgk|(x?X4"^sn.6BD@H͹an?3rk ek90܎rwנ^ρr_w& $ͥm;0}8 ,'J oc<ʤL%w;V~b#በjCL]ΞrCZk[t-%݆`b<'f3j?g XK$ kaGPΟ{O^K4 cfa8^@#ɓ=艴~1ΑդK7n>*iTFX6&gKyt72#y s8jzM eﯮ 7#5g̤sQ⭢ 3ݤ6|wIf;oj6)V [WiRfo%[lM*l$ۂ}(i5kS\4!'U]7ʼng;cXtNiO)b9.| N૮NQv W4CqI!O4~ L Q~¡@{ ڗҖQ*8B*$ƶZIJfWQoR>\yEk(qPWp$Dн?<2u]O~ `.$%yyyc4"F.Cd= V~w_M&ش䁠? PNv4t&.죚P]Yg^\jfzkotM[mIYfҋ%KEl^6mʼ]%SJN[>n.h{z`7sAv+ceUݒuz% ;U9]q\sK ooТ}wl; ;Rx~Gõ%iqX6e5bC{S`="EKh*?RL:wq1ԑD.[_ ȺLN_=D#۬/؜5 P_h!.b'Z;g\J|4G=<ԄT^t1tR!4:Y О]g? T ~E[BXAh !#>jUYJ}~W>bh%-dbْ%*.l/)[mz,sRPF.^\eXңb; +cdU0˷.X+X~lN^ tZKn0_7Ak^a~l.Rt\シmHe;{F=Pp-fF`9'u^1RH!)MFÑ-8N@AH^`jr)KDŽHp?&LqF,c%?x!(&jNuww$i[^Y{ */i1ةY~%ބUnj9{2eK-q{ߦ>{kLYNieOm%*+Ng&]MVp@?̍-ϑd%7*1Mzs7MK,OQ؍O {\aeIϘeӢY&׻?p#4j+R+@/HdNxmmn5s[?yBkY鲻ƦvYvTӖ}'.H5/WId~y9rG_;3Rk`NHy3ᯮ>扺*UGitt[#:G< wlԩQL˿B4:=<1x@Ρ'nBZ1q.+EYYbg n=Qbw0J+!80KٌzTڑ&AJam#RV2O ?)`r -RxėZ}6 ?ol}d9b8rK(A+#dTi .6̓320:E!gp=7˷@ԄCo*9$V%hÈ)]Wa{oYR8?rŸLe٦_jάr7kʾJfy|g*F_Ԛ2w;u 14GLp)X$=hdK#FffYҴa -]Z98 9%L3a\cBNR ^ANX,@렁B0 Ga'nZ,+KZ?"4=9ϾKgyN'}Jc['Yxfl8+e+ye"Ͱ$W8M@v*ɑ~=9[ni5R9+v*r9ٷ}^#SeYYy\]=BѿNk&\߬{[oKLRֺj6"MtݧQg\V*y{A%1ċ.ZFL%,ax CN {)zfިhQju5R9&?Z*gʁK ;: O-,Lyi%θ^"U];dr8 @>X$w=\p;I" <8˖U^=o [ E O7Sn[ͤLLqy\C7~M@2 qA73*sF>o=ROGBCvk(!>2xc[CDzܫ4fNPbi(=1|!U'>Hd{ +d.ΐ^9޻1ڋ^2Q#"|ey us;N8AҦ]6o\$[|aՄ5QO4M>(ټd34ޒnWЯ ݤ.>JI 3*@"΄6'  DgcOK+k\ӓ:E^v3Bi1-f K?Nζ%lԼ䦕3act>mV0،6!]pyU葤2=(yyCӅ] F}ZfU,Te4lw#Q21+C?"r61\n8/] yaFztpavy1cG'0NS뻇`;yID-kΕb3|H0OFyshkFUgN&W&]o?MDAEym).m5s2Z+PwgvTP̄!-o@^@rLdtZb]hYıRܽmY9f';zO֫6aVe%m/yEnTVEjsfX ,\ؾLfemCɨƝ)`I>4˙YȊP̑`f֢B| 8co>оջF_^ȋ96:HػTک?e$[1Q&MɰbVZoUYZ K+gs?ڰgS3N?qwjɲpa&kE֜zC s^1G_P;ڟ9VbKqu5gP3u,f9lmll}mUa04ì'Mw fҶbjvDTl{O_2m7Ȅp_+¹+ o3׈? zP7UկޔÔ t1]z#ctg[,j#ZmEz32;3X̞zY͖.|pv^dL^LL>j(ϋRY ~zjB͉-Py \.or爮6ʨ063&R5}k{;З renh:Y^N'd,gW."tՂ3K (\be9ebX[X­i)֣#g_֓֞62UmcJ15*mlGXZMnj]o`+/[VWTFvw?aq3~b I))@XEL{BPϒ )iTzHI^9l+tQwymhӬm#FD_PlVDmoHs9"pcHum)DtP܅7!t ޞ4 Qq hv`I&lႵ75{Nc5!os?9kRdgl5]ۑ*9ٰA$mGa"*>KGZU} #3kip] ^Ԭ4_p9Un &ail),6|4 Ljٺ[{oYz-<)iin-XS8?)L[jdI9G NM8ɜz* ђ~"OSSϓPwRN&/5Xvtff:}f<ˆh4a~V1_* c6U.t}"+CU_)ybZV%Uxvh0FR0 IxwEy7QcEs{E-w쑱(gvxꤥ`Mke-8]>gB``̻M+ZW$M<ōKLi'=*Cv\M7̸`W4/E]f̳I-{Ǿ*hnkzb4?..XY;WbŽ|18?h#żƚκjœIe1l#EVc⩅,s_{;I=ƌE5Č&hW?\^Do2>ޛU >]||IΚM 2o.{!mL3ͅ$%wm]y]o󺸨go{sCW[4ٲiE׊nlqVoB._v]da,v\nl(h[>iSdvksIw䥘oHiR"v@~hq}kBE6o,t52[[YvEMXNL*HTܿ@m^g*zPضx`[Y N;Y,FKo`>k]VAb.Tn{P(K)r8 ٻY}9/ٿ#SZ'+ҝ6 K%q ʿؠ!t%7w3 ] dy!7Bt`p4]Е>]N'×T]8uuQ-s+ G~0i4^e^ӽ Ë[愬=\tet͂64m`9kui-ceBė4o@ߵ#\- 5,P:eZd<ʼ.Ta P(Q[Ws&^/9?74/1.H)_w̻cb025,\7#Rxd݌3`Z3RXa633'jl` -+y^`&HN1sB(DY$9.8,Α/R3 /12Bi7A|G)fK`XO]aX _,N儍Ǵ)GE v1vh" #p:B{-HPzkM %o~Po&Pet^{UadAўa^~M‚96mXrD)sF-fUYq'F33e-;|>׾if3֙Ƨ) g7^bU [J2W1'd/Hi3'G d =EnikRiO,i6>563licL!ցQ [pIf1&}N~#GzqKf6}XTK&, tw#-`+("tn]^WΞsfygޞ>v78flvhˡC"6A|_B7AlP?w?xg ;q%Il")!.]qg2V=1!d'9nsDv }ǀMG{ް53N1pc<{[<_')o4*Z6LiعqA+^5qAT=L[`lϽ]MO;Ns4'7=9 H&5W4+σ+"2ukyt?~-NeUFu? WU'(rU{Aٟ؈gqK_'+).xqC3kb~T y~D`T#a2:xpiѡ K cX؄ۦqh2>EŽN;kc.nyz CEǽO;[c$Xy}~\z|q!=> MKO<5^#\:4q:ep۝na>6qnþZz_4ߐ!T"/P?GI| |]\w@Ϋ Wme7 ggr6sIۤ;t ʟ)^_*ߠXTL=l{ubM/Vb;&k"XT۞3ysܙ4 O#~BӤ!U]@[^U_A$Q'_z>/LEF˿y]Sʴ%73zdGtڊʶnQ0v.}/v^,f_n}oۀ б]JTvF6^Yv5$&#nTaBFWiZ$lOșqA~+=o>vF3##艪T6ӬjҒex zQ;d: =!'ziN}UXvy6}Ŵg/ K` e; q2CLh^6j^mЎGifT=((HSX};AczY{&DűQ1w=c6ƹtc- y>Wgj/΂ї|,Zmۃ &(7jbGŨu7UOMV2:y/Kn-|a{u>*y`΀_Fr?^rO#8`W[pǝD(dn/ypg8?7S]-xoP7|'ET){Up1>3{m=2SIGz@?Z~`~[5xH&VY3ƒ#}tuG3-i$aLdzlXa˜6][[/뻋٢L0ϳu_HB!gB-P%du/=d6[̮WK]#i!J>(N􌸔ؐS>>»GEæ䓵v'\ {=KΊ {}Kkף"eںY FUIGu/_Ka8_ _ #XWB7o{[k+N?nґ7Y[0b6<*#JLe^z\Fik;-˘A#AD$ח5q,٦)7i_OfPO@S3h%DvcJ'X\L.mt72;u^Kn4{!ovD^<d$ $v7 234!LsIcsfںNs5حjԽ FN;Jx1nEHiv.&#{v2Gu.!Ow65JNH5ihJ#SlxLdbhH\5B9cqy_V+ߡ>t~+|XlsI+=C;m՝QwDKR7 U zaQ=҉=KS~_ !w5uop2e "bj. #T{BŒch2{khJRn"+fBIZF`$F)GPƸq] ݑսHAB)Orڏ ѲMޞ8dA\z4&?itiv%}t]pVȡȁ bυ pR>G>Zvm:4i<.qH'lYa'*:ע YE& eJ5Uk87X:~|x,!A=.Mck8<0zZ[oԬmazU@eS-"8=2RjJvbD1g4Otuwg { 6Pj{8 ߃TT45!*XJDXOLތ5?I=K>Gzh}ckk:fsJBYv(k)"HfW?`no'|˜Dn7ȅp&qpaVRRO'1|-k[9kOd6C}o-:znYEDr.lb[ 熯b?ⰽi؀ᵶoYGXؾ_ab V9"տ[VG![z΍.]f3@S4,ӔhuoIFN@(G"젿ະDxR@.`<c[f}g֏2{4%9r"N6IJц>r̯irL#y+?Pdhn oA s 7k\4>`NFs՘BV&][ ]6O-+NId[=9 f}z-C32@|% 0(G bV7{+K9M0SzI1Ɩ)ՕI14djwԑ36Lw.|\=gv_'="-!zk+ӹօIV+NCY"y~վ$w :iO}؄r%tuSˤ@A.TG˶7,;2p2.^_GoPgś$T$Isg˧qp$w'S+:cbK^ B#4U9Kul*6wqvZ5 N8Y1*/I]yBUd :hWQGsI`אݘ*IS=W`36+?Cp|zX[f%J%gL\iмӼNFZ0_ ~mqy\N&񂝧BMn-L:H;Yvv>ɾC}v,FM8e'se؞d(7mMg@g ={F7m_eYZCZkm"lb(Wz ަw3S ڦs $x6[yOʝj`Vlae+T gQtgAwݟ9'v1kԅ;p]%ڱm"*`!]WMs;ʪ!dĽ*dQ&,f^y ]8^Y1X2Rit}p1'G#x7y{/ogm.U)7O0q񓄁Ղ{x9mbvÀ4|6:ZUZN 3O3YXK!BNvEi>>M-j99="ϒ Aٓ=s'--1QI}.Mތy![JosܘZ~_{D`ŷ3,EuN1]zlirΈ4Z`l": B"y.Bxmۿw.dz&?ƽZ`s~T9 \̞ )Ta RXr(FǗ!=ѩD Rٕl;Eʩ*ކ'/LXfSfC@; <,UP'VzjUBz,L$6Åk*$uE=a'-򺦣v8 }OV%n+Lݚ mC \nW?kpO ,_)?q/id 2ZP;P;KHE&!T,ьƏi"`PPxlc g9BQnt2 r}lh? NgT q:ޖPN @X y׮k.: 4`^heiU6}>E}"2?~ [}Z-0'4Պ@βҔXZmP\byaY\(LtJ8_`F^7>1Aل$\tb;}a|w7r|0`>vj&ӕTCbSUV*h;jљSC"R D(KnPU!#dqKEz r#c6_[>ƕ^6:E?#shR2/6E TXi#z#(z}V MDffJ4KY)[YkwS.Ѵh4<Y>R M"ḾtA`!t'v x{*2ht^客KB,{wdw*kwcG8vC=*b]]X{Nd>,>:}AT=֑5gO?/TgmWy4GAt8NdFgKBPC9Ov{3qU-sYB{ەY9%!aӪAEa@L"9F" ndl{]wM?ѠpӬ!)&{b6YJJ>D_%*'K̝Q_gGG^ha |;2 c]q/6ӖI^}IsD*E[ c=@3H0-jty7ߟGYvfw%_ycaUߔT=oY{%_3}&1Pk2OӰ:ji)gx.7űׂ c,'&C tpB~g^wόkp__᠕ kt(6 K/rVb~[73@MA]|&^#Y^E}㻚8,DS\S]_MJCh2El+Q0jBw ΀"b1g*tn4W,Z3J{X1> b"ʇB8 <Kf9hvF ;NJe;6hRtЮ\IR;&h/:ewf{ ֞|c~F? |p:?X(!ߞ,5hRc{SL[i/3p!ɬ? iƱ7sc;aC\ӽF\9~,L9{aZp_éְ>q.z<ه^tKD-߸"uD0T]fiY89Si"^5R/J]GOiC3MJI'#(mbX@AHz$rDߋ ;tG%FqoyL![Tc~:ʿ]߲{Z-3ћ!*X=n p]fAaqЖ>0R~XFZ]a&ЇmJve$3#Z=.g+49}}Du$lBi[Wo:j̅1c? O:zu75 bT~Ӌ@;#i[xKc,'hgZ kQ78%kx\X5R1;mHj d7M~LsUaQ9Xo(VNm VHv'O# S=KYCOb>#J \ oEJ<"RGdDreԧ^t̐npU1{ʑN>Ny籲~e{ۑr4hu8I{IFʌmzӏhf67<&JgVݳ˞>tbR+{;όz͌ve>Ȁr|vh$ [A|}|-:}>ߠ{.FxٻLJ_z`sH[M!(m8E9%kjW޶OX3mm/V%OhxQnbnl~׸h^?OtG#pvX4>GXӫkOOM0t{gxZ0+lJ 7!H9K:M%BGt ,Sf)*J F=J>O7mcE%c8v ?T'c,{{7@a@kXSI{Gkj:Qs~xkSVgٚ=oRyŽ *EkkbP~=F\*f:>oX3GDԻiVo9r]l`&}VR8iؗ~V}q.A7qZ= nLG]Vq~Wk&ہi)ٍj"t1#&rsx2-129ir4)wuxi9y0Z^|j;]@O۴tlrK9qۍFADrK1P\n呺_6lPHd 6K/n1&^I<gsi+Ns5G]CjPnoXeIsO}Š5)OaAk~\6F xЮtY.mpη cSsV*D?ӗsc E #갔F__ ]D'19/7w{čuU*1F3fBވBVЕ.6`A!(;pmi0^HK$^*$4DU-2X/ذ8,^|K5T̂CYHnYdcoS z$"{l?P) ZY(%dʇ]VgU_j&+(&O7jtc ҾAKޤ[#{p#7>/rSUx"'7SWuE5ѨY j * ?(Ծv98 [UeRlFyIBgT{]OoBY} 3Q>*!qwU8WCpfaѨ UDw'w'_wXȌ!;pd QǨ J&)i27&۩[I<~"3xݍ^ r^{TXI])^TloWTZ R6(*ś2e pV c"#32SqHZpO,hEGB*;D%yj͌?*O*(DӪwVm_W wHC5nFuI/YG~{ ^t3v{)fI/r:z/S^ϨsUn?h*=SzRaDf  z)+m=%~0Iـ5'f*z_3xЮ:-KA\h&5DPm|sFAZ ΁gwj"DBkd5B8~ W=]XY=+.IIp,9&4CIIyҷ*>܇B8^Cbs_f'V1+y4GU&'V% 7 BK$e!POI =g5=VAUUGTunJ\ou[n䇍lx\\0z]=h/s}k}.FUY&'~"fT%]Z-ׅ٠6lzA}\nuuh ^D *"1n6 =jq*׿ 51_d6`r~~څh\!ÄErL@E IƬqu3y΄Àͻbq_zK-OR_:%ƁyLgdiyQ^Ovrֲ&Z[{*O(\]k3Z6#}{ˬa r\',,\T~_Ha9הjTbǩb&]Ew>57](o'iFy0yy2_aBDDV1(y"C/Piм/5sU$J<ŠS^_9fNPmA/S=FbQ|G^޵os5+\?n׉cYs~Vh4%.AVk;>|[ 'G>`B4HPMI+jȩqjUIşB203 `X3u u"|8Zp9KLW~U/NOUB`5^\> . ^_⭋xyo" sA[MX^ aAd0tqF K1N RG9FhT CF+V#QxJ[?T"1XC\AK-IP;[x wwGoo'l=[[g ]O߭tݫ! N7Y ېwXV)JSiw#ļٺBo 'o4"8eGg֕ХVL'2UbsmDf=G }\wآ/ =ƾ_02־sׯ"oZOE2=`p&g%%z 3!x W7pg哽~~>nX< {o1DU'[ֺDd<6Zj/dTK(C>Pɞ9HˎBLICvCs7ɅD3YΦ*ɮ4b2nψa P~NZ2uGMh zLVpe5=_ˁB
_vOsP`%u6wzA*Bb@w^pQ/QO3EEpDL^4Il6!y]AIAƭ3FhXak)i^d׳+Yf ~?iTnYO|3vӣ_3m⦿*#jn6\gCBb Ck?C.QGcTj'H9#\y _Vs~K'+PNEKާ. BhChAȨv]NY6 }R^Q {yu Xp7зX q "HGt:8^(ߧ&kV ;_lûo0M2+22%';}c*e(QԜW+X.vN^D0 :?B]YYuQ!m E! 7/w]O$Ok^eN>F oSyA/ ߡnn>x٧bAMD”iC$l-r&DŠi& .lc sdC4a-- *r0K7<(!DQؠcpcfY! 1ZB 5C0x:--vLȗ cӜ?,mZ SWBIJj xfc9?tfVs )zwS,HP&=vvf{c;Q'`x =.&RIn"ju2[־vO@>rN5J EMxRD>Gnܱlc]qqr 7* Ϝ1]:{r\ZRCh?Cw'mhLvkC 4V& wt7QGO pCLfyゖ+Ў``i "owBY+ÊD91NfYnxX[Y?ƕ+܂k*@)N H @۩t6uLuN۩ {<϶{^{Xua +?iOt8OG<@(zK#@\(AESt3  > [FH 998VG3ț& 7wύy{Y֐>Oxkx; -Bp`_1,_(Dͻ[p%L!-jvxx8IyIAI+D 2 #'R\i183o؏@[Rlk⤴wFw{-Fy5:,6Lc~n\/ku Ӻ%CB}55EHZeMt1#_vUP-T[:K+ ;S5-[U2zJ U=HHh۲2gPb"-.Z&iΫ457.wL;jTZopP])Q;x.]u u|riP]ZOxpCC _WR=4<9 / L(S gDZl39-y>N2CA>ׅ>Yl!Y% 7 FlD%p/+dg̸IqQ %IKⲣf16 KFzh>A8db&~jnT&=A=ZUawvf.nb^WjX>[ٖY7Zfq>7k ]4G)~S\ Fbvp@r)7>Tφ|]HGˌ(-C?ye=¶zڴѰPjPR `nCii1\l>Cϒl4-n>j#*/;EJ1EC&d/iJN7ն rLi 3}Br,pzOU"&]y5iqز&rD~OZMj~يp# 򕕾|"yP779,-ޒ]Xx !A"/x(T刱(&h,t&Jf#R{H:SdA? RaH \gF;UN@ys%9}>!>|Nv 1;*ƀYIi /M&5='\ObCI+)(%B#!9ɈTl)r9ZUN[fC3J k L}uǶɾecoh 9͚a-kk%(\I7k=kt*P,2QvmPv,m\9_vU̮^Վ+uM.ƿap?bNLamfz7mlh(-8n_u407g%9qBDKS]{[U6- tNmu߿sa|Qx@#;qW/7\+ 9 3z yhBCWPЖ{hi2vZ܈=~|a{8 og-g&'n/g5%k(7n4ƘH"8})^235~cԴyQ#'AD30BQJ`Z{.7ZSr,>CjPğ}&akl0&9|ټRlPgqraRߦۭhli͟ 7TĶ-'&S;{m@CծැxhU7'dT0w+kʹ@R65#0 eXXsk2ffˋs~"nRس1_wEuD.}+ێAs`5>|<+ndo> ̇'X( @BH,okӜӿi ͳmgx1|hC$Dz3@y}d4X|-Uw&b8AODY3YsPҖ-X8P[Ҫ$)K֫[o?Wedf/R9-gX YZ';ogE)WY'i.iq٤}ڝ Y)X1;xpKu?r:頦ѡQWE˽:wb9y+=2!&ϸ%xx=f'DPh2h ^@d`Wc3pD)Sc?1hij+}v [Qʑ*&;O@ΏMEɔҰxb< ^5t\2O΍`*|^>0j'U^Vp,N Tg$W5؞c*CdM3_J䢽E5Y߱:R<䱔p8F3^`x`J"rrmG&jm+^Ju;B/NŊbtNQEn GΏy./|E[)i} PN{y+Ut!kŤKlm''8E#}mlZ',8ܢB43d;ienjsW5uw?.X{FSaS`yKYHeL29[\x̭9F-Jߚ IM8TI)}Unʓʚ KtŐw 2>Y5ɡ`ZkL֙.Kq&}Mi2)Y+Me776nR:Zu86ki &(<Ɨ/##2r"Byny\{-)Kr2Dz) vk⼤1 6fЎ06ʘ2.sIdw Hi kIO YFc6cc1gބD<\9=S8Y4/46h?!50;G)cDŽ4A}o& ГoT@2ՙ ;J$jvel7A";pLnl`8tCC׌v-T-OoɒKHF]T0={`<sfaC?-M+V4MwbK9z6vƲ̢lmi 5gM_}mC}AaAY MW&(5M*m~CF⍅lP>k 8axRU}}DPgqYjiÑ'_%9pͮi_gT=)x2H2hྸsW;VV6vR 5 ?5^ z~>A5|~&<.b4|O>n"g(<JB6cq N$<yC\`1bHg,v$=˛N K1L)&x#Ojs$Z x(.Uԃh%aQ"#}#" #E٢["YP_)ȹ5W&9XÜ/')Z>/дbEѤE3)"'\]Jv55Yn :Y8-ujzQ Z$"jN3%]?h$˺Y_Y өы+NDߟ?ٺOAv>>z˴aLi5Nӑnv_5Tqrf̺:^ITҬh+ɨ(-OECg0Zvs^s U;uݡ_ڮ 8w)r'@p8~Ăk5c?v:y۝JS&WQҠ Լ^㭻ekՍUfxJR#^{@/6v .OX;O@Ӓ㺛zGA@#h+vbDÀ`r(n)¢.3S5OH#tQ͝(8Ɨ{9lK8=>1y:"h#GG,K[2*.rEģkdITCphREAٗ+f!88Cɓho>KjcE?jתJEMb=L 73~ͪwjm,I^Ʉ;oUzKrMRrQT9| *=Y۸DW}vg5*&QD rjvc%ZUuDDeZPBw8RR6fŎ^9KY^1˝Nͥsn|1eXۃlۢ9EtFvAz(yRV&MW~սsUcF'7_)}x% xzaP%d-80 }DDDtcoĹ8!nfě[}"u J8#@ƥg1wR0{=҂E!51Uj#7EL 'FD`"7_. OM q:ȇ.>>v.-fR8)ILcB;x/^cv3/>'Oۼ{BmvComk=d74ŘQ#BKGce}6~KDz1JrTͮ>W)ol7T2{P0>㇍OӺgz`l m{ƴ=K!V)vvz}[Oh2W{`֣jxF6ePiL4vi;UVSI\qo8@2%/df,Cd =l|=ja,{{7 'h)iFH2bpg9%{Lxu ]7)an4]ffɵY^q{ZZZkCmփjMWdۥ%7df ]jliw&a/u#6ݫז 98,-%˜n57[=-B0#AS*gzy^GPjsY w 8<_vang~cx(frG믪x}c1P[Z;u XC!w.炧B=nx~u4 4?zG;$C !Ba6c!5}Pdg`Mp;&:`(B{/ϼ=Qi"v{ \M+> bXxӃ_&/ k 0CZ*`a ^sn9}>{JDEe}&Wt*8@9}Gs`j/߽wCI Sn/O/03M3 C WzvƷ`+'aR`I,CQS('X*=Z7~}eN<\a(NjD}2i=B"~e\Ӯ݁9*\ea_ݪV1V늒䭌zDܭWaakZ3Ooj]{jJ՝Lupd 2z5^ ~-}%C(j!?`O8 ^h,,$ cAP(, *W>+1 ^ܰaIwRE ;DuDsmшxI^4+8 3օ<=}weѯR$C,lOhwIg[18fBw#?Iz3!V{ B4$5}q o3lnJ[ji}c?4y6C޸?oG=bE osΕِs@?h8dN!A+)@OP<8 $F@j"8/s83HE8M'&Z% N !01 W>6"( tm (zt  4xتyOyHAn~:}sttByXLn}& %#y5w<cUܙgbiETx&n-zr;AZV{'U[Zſ$cH06WX:T8_w8*8ObD& ƅ>DDi#|"E &o~^g&AΘF'b0<Ҏ^Fi;б[ș=&@x^ޒӷwNo]; ^.{tמ!VZ"Mi5ѳp޲wXQoS9Ŵb^ֻϮ rU|&%RsHL֮vPS@]dkfSh*4v!U\;}W @'F!W]͚:Գ;DAqu\=/i7o7c+t*a 4oO멥ۛ<7V%>Pw-̂"A_ IW~Hf~ z( q< p@@Ugz.!t)'3R oqs@_cODVP\a:xHxc5_㒑r<9^ ) C`_b`$=ŗfu9#h9꡸xGR\$]`+WhzBt ^M3m]}SMQNc9ҦdD@ݭ[m!b~uS4g٬Vi>UK:b?NH0o?As(1\=z&2ǹ{v Jx/|jU?E"{S fccC g?oZ_:JJt`zr< %KjiX;tb5vC.F,BI5ɽngNCDEPθOCOD=HO\&q^H)1! 䣈EX 5F ~gxG̅sx6KǢؤAz.4iwQ;a|)C&jߙܟ1Z"z)sn7m_f[Џ%ZvZ$iƕ׹za*2 oְQqe7loaʚ+(Q1C+UOUŪdն6$1d)Օ>αC'[]oNtr 8ex]$KsFSR7|A=Xlҹ6Į${IS" o)? 7LXb/P"$?buW& G쀝ޅx+ؠ~DJd  2Nf! sHǩ1Alэ+d $dFBg$԰Ǽgi`^kaY= ?[ɽomkl3{-mOLm3=ԍ=?vRTtlQD}&߉/g=ɍrsR˪4ݨĕM2h]nQli[:mP{>=zW>W:+2~i~n/^D scSVa0p`= (A|[?`P*ll p%`0"|O`ǡPya THWj*T>'f# `2 L'ڏu#A B-A)Q¸'77I3IB%< L )B~F#CeEh?/ ~H9C x$TWeۛ %/>2YΣp͆KOm,Z+>-e^sw:NNgEB%V)')u RSQ7JX٨X1rȅ7V?/woW4NoO\{ 4zWh9[ߍ#NE/*_ןRc"| kcl亽EOQ{cR|{(mhH{?JHܪܗ@i^x d' Ef"?oƌ݇ `F@R1^3 I Svg(11c3#Nz2(}"ݹKSZ4*b[&6/;yb$Y&(wauFLXm X7Ql~f߮6=jɭ9ʑ]nLI12kx"&kUCOHؕB,\9bf*EHK^^+ qfJ -O4r4c7UBG&K5xKŝL&3vpP .1(^xQNDS~ްSGySŦ͜*X'GIGsĵꭰmMVYB=~ݚ`R8=߶ot"4AmGT?w+ ͤ朮mj~kMC\ M^x~q[Iф2T l&3Bԩ|h)Yò<ʆj64$:=}/lkєAt@-oy?E&'0kХo;r88;5_@o|w#`G&pu2dX| j9Uu [= ւ,cV:lac8!AxˣAA,7ᣉϼzMy`NCJ28 YJ6X&(hRxt8D뇯||76ut=|232ƛ]> VX%*;wT˨\u7l}gyingZ&911Xa8*[ B"^CNƢ#@GW?2 >8U@$1{]W]I?h?bhD  &i Z"HP>B.uZGx>0PQ6BN(.c({Aiz鍳xET*j,>h!tIVn9jRݬ.Hpڌ7g]L{ML;\)^DfhyA,7?oT,yCkVU0pp:qv#JRkv -vM0:ot{ie SiXwe!&MȞaS>nBnd/Y$hkSXYۊ_wVD+>d9 D^} mb~ǥ"8X Z2}$%f 33إ?xEd60Ptt58Ć#vR3b|*l_"4el>4Oruu^2eHN :lOAIUYHWqfS&!M~Gxsjz<~=8TNKm&Uς`vߣn~ H3gaZjZ \'FJh/hW#^@`Y <;~~Y|UcH!;D]_|S*ll=Y/hsמ2 Fud[I%ܸC3H70QIzk=1j2Y8gPhTE'ò6.^0w#|0yUOk+dhcwz͓ ]W'VgKy}sJ>@qeĄ<:뿷\^@"#Z#[>o+m?J*~N󀵀J:=O< ,|i $" NZ GM#{1q޴'D&=(b& zI&&R3j-D]Ex~NC>Gs n= 2l55̦ M3!8gD3l~TY]E~K=P{2Z}ǭk wvcM[[֍wtb%%-;mU%^2ŐY]?D/4+[V} [,CN c`VjVTI֊ lSS;I ԶwoNon~z0<eMVNy@]4; r"GŞD@#vlt)!+(AR6 $Sf@" $2H0Fp>+pq\ '9]2TFVoL6zu OH6Bꏖn1kF%.w41o4l~mXEpC)=&{fgG}-iIy| ! ڐ]{BiFݠ*{.s!='-~~2W׮WKO[q^{aAA$]Qu(3M?5wgM=Y#N9$__~}_\oiIL h&&V;^DAЧP00s^Kz™ 0p41Cn$ͦ$oSBh };$n}TkbēFқ)kXBtf0s{m)7벤gdG%Q,_tTz6&]"){{VxܿDGa^k1v1 N}!Y"lknV,l[o[7Y$fq$_r^}!@%V7KK&-Ye=)o[peEiP( x傺h H(Gll+$k-t4n3uMhhV}3%AeE3&ngoVō1 H HHBC(P"Dmn[ۭluwwwP?ͽs/s3 =t qxkcv򈫷ׅ6jJ^22@wsmvgwWSmcnLM5 O?i]ގh5ZU*&Wfkձ fLMSS/1PmI<7C%,t%kyky|LPAۦUM[U+j T/,[Z +T+芈ҏ! UB̆E3;8W+O%6,71B ;f_LO爰پ?81Hzo$_Px{зoa7`X4PaeM#I};G[{{g'78e2skV^>v<[;|WX;&8{ʆjaVtm?ÁGQFl]sܰ"̷b{MjMò?W5A6oDqe)r_ھ_zڻV8 ,w r..f]bfԵn\xҤ}갪*vkiKڢ6D̡ Ǒ}&WS͒baeaUtg%_oYAO'w1/&:\ߋ?~EpKP{ oxmu w-{•e0< c.xŵijΣ7ۧ`O帴۳NW0;*;:;nFݯUªΗYVUͮ VUxl:^wM>ٕlP|t*}Uy`B6nF3B?eL~F;ecJrO]AI̴|MQ@H=;oB xU#U'^]eq5^PnS}& 1= $(}l5,z퀴M>RoD y ݨBO3="BC /072e{zDJx#%D{0ta@\'p5Ӯ#0i>i^rrcOjiݮpKae9j$>&rw>u/`xm{㛚:zzc m0I cJ_B{7)މ{yl^,DuC](C06iYѾ vB{hr{ TE|WY]^ί.}Tɞ'IYy5gm*lh-"+%e)蜦023v/nhݎ>Z k-?Z" v-ἥ_mq +JdY)/mxd̒M!սAFȚGJ''8] ~&'@^0|`?^NCG ksX >e#m/? K<;Ms{')4َi^i \GW>z`Eȍ-97 !3H@>8tlYK:sG&њo,bf12^_o8^QuvPY(XP4y_veI m1F_o\7AI|3gkq| Z΀,Uk!یLoiMg6= <n6 wA꡶ y8xisՒbdc?b?qֆ{gض2S+Ԏ/T,kRq$ZV;GTS$D\\;ȼu3*ZrOI-_"LPen7j*RHiywbɊ2IN`<Ԕa4&qwt#͛!8s[^J/hA/K^Z1%37;C  Jn.bd4&emo2Ǥ0_?Y#3"[WW<5xMMS?TחVϫ[^PBÝ DܮRhu4T]\SM˕ݞ>F+}{ڽ{T}KuE̹|@3,b b.)w sdLkA>6VpD@0~k·jZS`ֲ+I#5kucy ;%mg/Cgl(LD@tP0έgAl wXq[hz?PG+#w|ޛp8*ͳ>>~! U5q>Q9FH=aayv|"FD5=hX;qYHV=@ 0F_Ce^g&_[z*9uԵ@:x/!Ebi sp`\7jğcƾ{[]efS7';;^bݮEok֯MuwJ퍞=%=Gw5jntGO@}!K{,ɟiYB6=E]sneÓ9uoW*Y.bX{er&.+{JiVYjP4vr>q|A+h?EmăW+Z0tLζI?T}Cݓ~W}4 օ5paoIв Q]6`ǢLA1 - N4HZh"L]Ŵ\ HqN=k~ŨM(hj{ r7XMf֛͇ z˱,qXoI[SlCzۢsb-c}ɾD"ބp|#"} \/@oTzbQKuNmK; i.LWZyӵbڂjB6=?D֟9c`don̾rپ+.ɟ"s%, WK%AX.M/:}e\QzʆІilmmcUs;ՠ2L8k/KIM_ t bhOr fohXout`uA[@)?0g;|f&'WǚdTxrk{vM `F1?7m2Q-AK]Wk ;}m{꧸,xBMbe=1"Տ !r!:llwׅL/iq`'} IcMbIsIkq{CкK P+ǜ,X/3%?3 227@x ՒK_ ޢeK*T.]VƼ-Z'WVT {ݦ k,U_U=Vտ+r{YK%'ɱ3[P# *yO8Ǹ)"3iP|n%6]m|>6:biKcm3[-OW*H\;IhEyh\[L^x-t 3"3zvUs݁7ÃЮp0z z3YuхΆKLX O!bܭ=ԛqK(wR:܊) I. "ndO{YX>no $>CO#QTbup9&}! oFyy{ 8\kömζw@ڴ2}óIhl> Lhz,peƪguњ[r?u@>장ޤ P^W*VI_#Dם;HBvlr@HFBgfSu kJn&/T]ta2.ZGv= S<*`ܿF8@0 A}G?(%t SH/@kܕ6],|:E/e0UbG",:>'Y7vnM an,gTW_sZHqcGBSɞ$,Vj?q QkST2e Eʱrxiu%U7M7m o‚'g8{g+Ha']RKыP *Uyըͽf ^H&利Kk/)kɧ".uv鰪},//Ce9GoL%@qT 5)n{9vuسtBv̪Tt?ku9x5_2ˣf _5<;TNre=*3L׵j*UEB|2H]roiuǯ͢JrB{Ko=A;ۧw*Cem>I 8>Mة~B>,_-dЖpw{/yˉg }uYܧ4 q-~&q(uRnC =n847ŧ:U~J]~$=_HՉ2 ]P"B4z3CWGS@ĵq;Z{]Y||)S 0Mُ;nre'vIZ;Z P46xuko0|ZT;r8\cذ]w&4I# ?tݍBvtr^`q=} DxFx:{UG3=$]Fo^K5LXrp;Oqivj$d& amꑸ 0~6#XXEx|xt2#6LK!ƞ%QڨAqT)E@y@ބ. CH\丿;yZQI,#IN7p4pϫʵ3M6[^̯xZ\ZXiopm4a)\+kU6{h 5~X%iUϕ9]Ӕ囀Bip+ד;򦈣+%^#5z/cZfEu1oNf&ϩfgw.551;ɝN}9Jk파$@_:2.pg~h 9k$ 60MaⴗX7*%Ѧ4زҡeO 3~c,"&DՖ2-_gR_b2Q<)ըi䀰xExJG1eG(R䯑wSqp>+@784*vd:ue5Ʃvwo(;bĸ%62rg:tOibG꫔T"UIDjr%)e %.1C֏Y9xQ#OA IYQ#ŏdDH^O-܃YFGLlm\/[xR& jDfָmN@ө)~&EEk*_.lʤyw4ѽz*L6 @{Pz Jz8\4Txveqq1Ţ wxba0^Vdqmes.O~>r }-dwS~:WMOuNJ?2-hGi'%q=>Fp+!zȍ2?R^`~_?\XClT)WQCa!1OiR9e36b\.A/tM`BmuZOlJ{O!cbyi)s%RKHYJ neT b*XѸ]1j ɖ,=JSV=\JUJUwt7dPT$KxE 3֔֋mgڜx^"޹SfbT5ln76i%/l.[B.ϖgi: P8Zhvf` {yaCV~ +>)efQ:6 0'AyIR̚2.+pb HyIwW.xNyKWr$%߈54Ģ:S $,(4`EaM4<fEVB q>i-)#v&yr_G)B.D x|;Nv5:׺:R!VhCZSKuG쫤nV@//ˏ)/ߚǛ[8*IrtFܳof۰q⑂avm߲M==ldzIM-T픕성ga)Sw9uX72Z4]o/1KuP7W)\TYVmMe}$a9Y})P:/D3\mT;1[M8jYsh*gD.p{񝇾Tgj2 wvDoq<߂E" SM9DbQzbT&ϤR=<Eb??N]KDC0'Ĝc6Ğ-L܊ 7&\G o"I~=݊6"lVs Fh.F[!rskn9`s-qsgR/)(0*Y<%?;MpX]8"Pܦfy# }{{,y]ӤQΔ%+Ҕ(ڀ\M@-#4>F}O;]_p,| pރCC/գu"/i/k@?LL ЬEPMpDnD|X'1}){&k_DzFf)V r\%M^VqZzĜ S P; >[0EI[јķ1;CDp46j,aLT.)znX8jqHKAY:I%i7 <`WE]T&bNm+fl|b?=ભF־ӞtxҷDQIPW AN-{ {9)nJǠqʄEMr@ܽSR ĕ@йwg @f{D?砧 }i0ݠ y y`o\ } iBwÞ6Mրܯ蚀A: ǩκ}kuu}{ؾ1;eGIFF  K-Y_QN&Fۑ.*t/(=5Q0eBLNܓOHKa^q| ;H3(DᐖF /Ą}+ ¡K1@zW/O6P&~_Tm:0?M]De0ui|dǯL!@aUKJs _15T󌞲E\hPtD 鑩d2(C|?l4u8`3rH2|`pbh~C iB!ʩJTWO" :L[ Y+1+ _:vT=1]SEƊVhˉ_?>_xQE@C+ u es5ΆAY`]X ,F}s*D^ f[v5k)n.Gfr9 0'o܀4˂TŸVvx j{qRFS7&9Po*&6p>U'8>ڌ8?=&2$Og%&pfEɑ31G#Pm(`d3?6""I'N,,:;<$o8݆:uYǛ\, A%ֹ[  ^uuK>}S--XĖ<3ORJ$,ƽ+gЪ%.2OZuJz%?T(z C3~?sكCsE!հo3홥=<Cl]U ~7ђLOj ZQy`WI7/!&v GEzb Z(bDL`|]5]_XWqd^0lv d: P{X4ϟ21hڑPȮPɜOuT=(yms.2m[ّi@ɫcZ=8 q~<,lW >%$mLVu5vԤ1I4{4JM&-bWcag]AhQжwO-Q/}ܴCQpvEmlnSʰX]Ig:H0 3>b{,M'uB$*fd5wR{s ( ۷'$LV-xV*rQ9#GG]CAǡC}Aj$\_ˌol³4|)7~M+ܺDg$qqR#L&;CQ͏)_p@[GXЀ@C!!4( z\PN?Ԅdncmj޾`袎&x(9L .Ѿn])Ǧ,b+!rg3eDpҸ%)f%pKY1HSMRK䊤}}o1=)A`^z+2.P-0-DXPNؓD.#,}it%`c+ihigh5d|uQ `$BNN@m)hGuYMW*`[딻լjӆHz3gM|E,p,045j71rnT=b0=sӬ%QНle#O}vmf#О7Iohw ^sb x,"G\7bzg $$K: Ob0j6q_uRVV΄Yֈajž0Q!I.h7_90EUlNw ڿ8)xi3L0C)2=SwY%m%.O2#tSpqザ䶂v1=Qu{GIM0q1[bb/S{cU%J Lڗd|?s8]Fe0WĉCE= F®b1yQAQ%~[620A$(o @;'ʨq'x;sc/`Q=hV3i7{Z>qxImur;,w6gS'fe9TN_:i6ӇLPv7mv&A=WFuj{}G+g3:=8h`P`O~e|؂`psglYTƀs[[ԇ`ǵ4ΖXKR8 .AvTS޵R)"f럃6Xm**AYkXn7g ʛ#z'={_ڰaEݪy~*[/}.fzl X4vFC Qj5w/GݓԚr2qVS3g%J"ɔ!*2d1u=8,HWӯYR;q0[|DZ~ I!Ww"; Q E5]2xx-єd8ŠNC~]$-DȻə "0eT|ou]K-x`!$FQ0i8x*yfE $ŸFf`aVa^aO0 &Z|V=,! CTHԺpeyyQ~"!y5B!N[[-veG7mr^6L,\I ]nYsa#Er SɅ{N*h+VQ˛{y*NY0ޏ5ƃLޝ"e06ZiKzfeq(e/F2k;t>g/>´,njvn4ao))L,P ٍF"QZ))(\z)I ހAeX0NșPE6/L#v蕯%LrK R1cKzBW-dʢs; qeUuAbl^a}X1Ɛ;OoxL%)%2Yasސ~Ϟv7M}E ~('h'#,4 ewoЦtt_5U['G;X:*b|{K6k9!&AoC'e,ym$H\ D{:{3qv%H$!`ՔMSSɩRegSEdxRVƲdd5"=>2=3Nʇ]3"Ya;" |XˡBA5L:|-*!݌|!|%ze$z5b fo7sv`v&ծb81 L D7u"5 UKf\-QO\0t(-sz~ZIˍv'4V* ԵPZyǨoVq4)v).4X:,mSw֌)݊"A,'( sD&. a|VA_$NٴQӢ>.ΨΦjn\eLy;. r9mM ?\LF4Bk ǷMR? K 5?<$ˈ’HB.nd<#>KfHwno43&5#yby8}A鴚e%I ³TcT5ݖ@E +EPD {z?$3z"h|kq gF¯q %-?k%oN!u)J1rL)LƶPs5L1'rHE/\iWC{p_8( /kH-ʐTp%]p>Χ\ tv_ߵ[ ,{Y9kIvIUFJgI4tj;}E""k;v1=Qf uSϴ5RU)'J"ӗhβSDJ= oX :8sh'xhz`vIHl?-TgW?LH)"T L-MIWeI QWǕJ$~&#qK&'=INNse9e~:?u_lIݨ6\DRA{p+P/j+Zԅ  JY-Oc,%:k؝FG=+iӘ]\8:V ;ry$t` F"Mk;\pčge(2axaK8T6]RSCP*U>pŃΗֱv̉5b Se7])=;պjR*T!ah@-n;/hftŝMgs, ~AfZ{/7#:jھўک<Ov(w6_e ж)仼z7~}? ^ctBnFo%/+_$%0x @uyle`gHxaJdJ~zuġK_qoQ}Ic-1D@ N¦ abԘ]AΈp?47Q_a~.#=WD[YgJ P6tlU_6JgNzgNi&@ᙐ ' L߶?a3YS?]-^9J^{{_ӯDQvƥ-K'8;aZG=K4mQyUJQKY]$N0ɮL!.3 y-!1$U6]A&-k`jU!qڻov/TfU] !@o}E n440p/CX"/D*f7)'J"(~gmq:;zuq~G$d9. Œ9L&c3<Ҵ Y ,$lN:>AI24&:-U"=*#= ɫўoܹqwe%a߁O/^'ډ!?EgxB&E,w2{L{}Qf~V']¶kt $ʊ?T4%1+;$|O~pPQMWOS"Eգ<s]W* ;#nsml6ߊ☄ig$FGKbg Dΐڛ4D0,xػw\? w)Kj%߸Y%j[F-10%|p^p/ˊqW5v Idjðas#τ.̊4U1uryX %AGii禙L={ʵ؃s_zssW3os?&@;+!MAK}. yFO˾@Gc=_lTL66-ɿR𴰦o>ˈ);]&;"7ۨИ3 Qh Dq.qBQ 1X[{]^} >3~7}9-up&9^ ܭUu ʹ[AN0q%W5 )')${Klu9X[Xjؗ5R??.S)e_/ihf0cq|E8X":Dy3/b]*nAB2h@Ͳ1aD[  \X ~߾[Qhá_#H[ue1kp xܹwxő"Y\hp%{@mo@=6(wF@66 8YIBNf+M~% XN3tůk5AcIǣi͑?لtN8 +*f4,&ZNĭHC&|gmx r}r57'qw2jf:ݷW#@1'/j[3Mq:K'xDd3K"~r*f}5eY7LKm ^inLx$Gd4*x% iΓoQݑZ:=yddbE+ch$.  W.:@xЙYT7޹CDZUWq$=̟7X4#}{>U\mf-`0/Rf-5VL1!oLX &Z Ă,0}6}ݭ]! ;n%X.Z6i%FA;/{U:g$ GFТ;GeFiRBl]ba'V'lI6uH1IQZ#Z+pޠzyxWS A՞UKMrquff1+!xS ݇vÛdX| ,#KūY&2! D Iv#$d'c ʆ__xT|a`dF9nM pk޼gyɷP[O3t[Xȱ# BAEߪn&倍š0aoԬ -vu^^л.)㗹uG[@bcZֳ%tr3zOlMSvMwM*0#ڐfD'{E_II7/y$'&pȆ|.n e.\/tݑ$Aa}/"u8BFRΔr $ j!x)HO[sANK,Y=w aXAe,` _DQdĸXwJ~kYk=>q(pdFval+^:++}$mıE . luc^E1KktcuN%hW{EEFQō{ק~ɝF\^77D|x^mYٝ;3Mw!dOpe7NL&ͮTpA5iq)q#ZV(*=RZ }Ы5Ik.AV6U'G-7g;ٮ%>;Ďen6O4e/|kiDfwV˳?0EOAѤQ ^očDgT@y>;R߰??b}]isCr6C{X@hD"A%cP6rbY 쐜C)R!UB)3}:H;|C.ވ"J@#uH; ShQX $v{-T,4B^%9%zXo@fՃa93 'EmHP;퇺hvZ뎍e1u_+4fy>-ijD'ܵ-/L/J~qɷ|GQs:o{#sZl.W| |'&,F-BmzXWQWShiј]GG3A"qa1Gqj1l{mlg?tG=5^J]yA!EWC5^*;zV{g6c +_ОK᫼3-zwiZ: sܷ3JR_mygYc>-0ro! :a3$݆`6T?::,_%. Bc|k!P dCMf E¤OH?*t]zpϕQp.3},QIMbHBu^s dP 31"(+h!m =;l9uץ?կ46Z}ݟT'#ZHW|0ݫ;P'uiW |9~[,Lv΀ПN 9|1n]ѱiI3ر\[YE}OR[u[;\>at0Mv4X.#Xrcf9'Qn"Qby!)^;| ߩ R8gyE0$kVGYϳ3ogW9os뼲̺Şeg^mbʟ+R UPA/&:yk2 EjKV-yÒU\Iwfp҂R7Eo =#@G1ȃRjz&Ĕ2zjRr`-\4{>" a Z㞽ͦ ^ ɔ-XFR23UTc;:7ae֣WxıBC7g{esGO[p{kTIJ/f;ns-( 'EUGZ~LYK%FbGB_숟~^F= u[ Z`.^!IvO}ݨUTG^m-f>6L-vJtwZ`/{@[ֶ%b}p|AU]2,| S#хJl h^{SfsNqfjuVk@K$Z&QtHBB3DxQPKe-ÛTTW.O[XfJHvHIj—=zY` £ [ebBQ 8׼{.D<4`~5pr >~%+s`';d's f6BlHIRl,OV'2M'y<^W% Xf/my (D:UKQ|ʖ7tpqS9W<7bH*>:û@RQ{kz0Npszq8P;ܳ`󴥉'VFG6D$aVp#T:{(Z0gk@|o')-d{ARi+{ZadCz:N!RvU1g# #iwẅύ340tt%i/5N3D2{fpvB-0ݒa"+ˣtJu1Q2OIZbKbj:-pl퍅iX3krGGF%eG9>G>bVyqN  [4]܆"RIzV>"?("(#{/%\.nQN%/FY;'bcΤ[犂rr5)Tް%N$Drj4p--qlft{Zf9?}Xi@MԆJR:Vo I :gH+ TޟC^;(lW88h=kܗ3P76c #"kcAߥ;~- KECR1ao~,;,"=#*_I9O9 d$AxG1!W~8 N^~D)1YaQ {Z(Ib_XrKN II4\*[t"t2Cp")bƼ^i`f; 튣mD;Tu<4+V};rw~2/F;|BaOc,@+QJ4Ji"D|dp.bsRY DL򦌒hl54)1=i΢섾༁-@>< Kcc}]_6lvy1iMRj4;REBYڡIoM-։#B9E1Y=]t?gWcc˽V;]#-k,-Ij;goŖڴ,2`.S:݇qs4' *uF!mL!0:aOlf T.~bΌH*/j1%N{?@J#M!=0`g|ӈ;YbݟW 慄,Թcz 8K+8c줗L9ON؎_ bPgHdxh@Q#9]Hk5k$-ҏ$k MQI']pW3\[p>51mXZSQqٱn1Sq4G/0 5Z lD) = %$@iFޠ%uBB_!*d6.> %y:C@ syz3f: wcc2 "N-m*cW|gn bvU-ӫMNq8M!ADzYe(>6D~2eH_&E,4cӃ-|% ;[>-$g rXeܱ1ԨGn΀:/9fZHZ~#ԣ ]DM&^:&迧n6x fu]5'c =ʍj wj5GG ikU^k P^Rj-mq=meH11?@Kuilc1hpgc8D4YZd#Т#RwO(apIGS+mxW\zgyftWS٠VܘslRS%Ƞ(?XxvCĎtg&`x6_%#=d풗˭d02t]hT,[>/1}ɞL1?BũM"78A׌7jAb H!|opvC"3*v9;=ʨ|%t;5ko rR%o:L5v^^W^@Ca3v^}rnݖzKOh#u?L2 |3I>&p_0+MI:hQpll~5ȑ_13?L}O $RWܴ7"z7{Av7ָŲz*l0'^?hO@80AdhM !!M`k7'ŇeWs&_i$eT#sQ>S f̊(/x2yVxVx5:nt2f2_F`^aQ|Bad+䥲( Eܑ!7IKOgKKy @OP`WR#nwN3Q=ԁxw( xEG0teZ>xԏe6$r{,{ SG=xoe-:5u3 |YMSK[broi ] =~''PUBXxw7_s¸x8GHjSOь´joաs5hXytDc/H=Z Ci H vj6Xm$d(gJKN0Ƴ5.v'B}B N-/)HW,Vwxe36ѕǬͬ~'MpY-mR9+)O+rgryU]^!,{( Q.U.҈*|誼Rzރס3QBKnII%Dxw]Z4">ds& u8i}uJlN6ׄ:T̥ qe8ߤ ;q22S@LPhPm׷2uJޏ=\cuywzR([\YOT.q߆^Qi89+_j&}<1xruLI@{n|^˃|rrbC) X~o);60Yr]ǟgϮ< 5)= n^JXhw_zvJK~4,RNSf(+C=a1+ن׋ Wd T `Ux.~!dOS{Â"]x4QeK@ {)QO8C|FKiȽt2;R.,`>dl>0REXˌ]' ׻W3y}ajWYo#]w:Ǎ3£㻢\ײ'?jj踐{I3"Ӧk9Zc$69J\"qI!fd+ֳıgj_G#靦|Sp ,W]뚓^!瑫˜ 36Nf?C8u{YL$.{'+UUU)*LQ",Zr31A<,Ia) R{,thBG 13X ^o8z}w9Y,mEV-Ќ؆I~.IhnS~MԿɣO‡~o|B:3!d31&vn:ԺyvO-2$-#m.-&ҦZ+b$9\&PאZ-p@!w|EV9ǽψDSB.i= G|1+"Ţ_%gR.!E5`I:I/]š^¾Vlq|Z*Ph)+ܔnC!㐏jjq8KEV"JC9IRJ5IeRvd .4xxI'PTal,y̓vH#Sѧ vZs>]Yڹ%E!. " L̋cM5L[{ fK 6 ) {ϻ=(;3 įesY ^'*ugs%NSGQ6p2F ^÷w,#|m$ p$'"HƠv?Hy`pBG-uzf쟞B5~&͵W(_߶ΡyMր<gs wXO#|lWWR2ti8}vx"E5?^<^DN*eR K&CDDY^fbi1rX/1Dlj@ZL/'@𹰾7{Dq4q<^)psG8e;x~OO\7hGSwMsGeQ5N+׮+gtw`u3Rj]&&lZ d1Dw7McRƦAʰ-3~HXԕMvhabKoWcC>QC?APIyBi8 W-qXoE#e&D)`픇6ӻPo1tmQԛ=;Ifi>y(m}6ґOMn $06N{\, S<*S,.dF@v]Iq0DY~.J/>6Ҝ{ZH̱Y;8Vr6Qe˪ G5?܌?C B$@6 ,H?Sex611AW2%clтt&í[^v9`o~؃ٝ`phb?/q8o?,IXP9CyjXcPu 3(p+]lL HĝJ^3'{,Iw!*Y{+ ?N|+ŕo-(U&*ȳdY q5:BK?µ 8R{Cw=uҸ9x\˅Є"4< @X{3wwkq|SZ͂/\j4.{q9q?2F~1']휑1;=y8xI%ս-ӥkf#ͯKe]vIdEȀ$^$.IuKL[#ߦU9*jV ƯPHOh!$(?2}Б GI04L$KJKۤ5.$-fAKd] 6z-UU{=+I)_ʫ,W|~Ĭ{һסaq G`">u/nq{E˃'(l{oW降ѰV3Ӥ|s8\C^1`a@>eNa >X0,JĻ%RHZ4Qm?.WvTTsCmCa( Xqp5{TyPrT[\|Y աQ td?HsP M=2pM)ȐdΎs2CL*7?Sg* e ugӛei-5Zi+X.L-M1RRբp''DZR5>:ߒE2Yp< +ݚ5G !а'M$@_z X0![w(7Vho 4~gx~ gH?1CIOKk@?ђop 4tL6;;p_-p4t tu%ZEXʐ OɥӤGb *2 =~;<(F@w+fF@=z1'lPrhWG#<^ISR 2TâQQ;{%_虠-QTHmіMbUվ:1O3DxIʦfM,֠)݂].|ND*"m~Ǧꏕ5ǰ=m#dO j2 ֹGm՗_˻|Chå8[Ho kfbm 'PBFHMe+YJ1]| s~JkET-rHѠ#/Vٲ::xXY ʗt gK%U ![!B !p2kFZ7Zf(# ^p'%`}'.Ra-tpibxxJbO5EtÄe{쿡Y˽Yz 3QK pKȷ]~uo/!|xi6N!B}3V2#fYW٪WSQ* T MQNBddok J <@dʧ a-5m1iXVPW\>[^E, 9}SH Bs#Q|88et}s=p oX_[y:Qv;/x4 dN,]8~M5иnB<>A&@˱.ybD-Y~ZqN(6Ta*(DCϕxe$\'R ۘNhxow\x]D(H$)VrPJ!D]`ؠHbJ[>E&D, v{O&^J̯`/{JX(5!1?fMFq}$}:SCxWN7Q$KYx%U" pYiy?Pw]ZINp77[v)-oEd{aa"QQ(#YCCzC=Cʽ 5;E3E8r(բD"R"0~#(ґ/B9b'd9rUTq\FqTbmw@c;t[9&!oUѵ,pkN_Lw=K6vUn#K.dfLIfp8q;fbĦ@:03wfܙvwNl#T*J>gs߅լ}VD>bY3?j<ܢ"PFo߯V`Q }Fs68҈J)E1 b7)*NZ LC11֙awx/EpɱQ8<W|C>IG|#2:&0^2P%ݩ%&Kbjfx+H+ d}d[0 Zӓ@ß |)bGU().%&X.̽30Dt߃?;Vl]VIHnjoͭj|Hħ=֢UM w[d&PǬG8Gdz5xȳtp6)i7ޠ2W_UGOqpόVj'a[XB"! IFbz|Ufl]& XNLK.\T262 ӽ(^n.;=_oKlx sCYh7ـT%GbU_i忕 W<su,9 Mv8oʷI~fomպ['֝YgNni ţ0Z grn?ohlbo(źێy=N\l aD!?;@ݵYMcLۍcp2x[nd}:)bڣp 6z VĝgŧR)LPW ht >ȿKnU_W[Y1 6 a L2]7xԿ3tҳ}R%~N;?CWliGYMt r)&}3A,l/r,q5"brshL!Rk85D!HMנrl5V8IBw3%6=rKoF +tu͓Gz?B͙WqzS\h!΋ט0b$,<283DN6>$ϟIM]AL%SoR$kD5>*jStlitkg IBJٲyg͵y: `HMa]K\/bbqanwq/cWJ<1ŮQ=|8 XDkCəx d6+ܣg9Nպz]t1E }|VWR1@hwΚB>HxN5O3`H S-GsGͩ@!h<{ַ߄nٿckM^8MFfMlh <9 C^g*9_ß:sjf/ dVK=x]Ѐ'sSx5.{bd0rv٦ERbj,,5L!*]beDZ%d4[odvuYiN|Ǽ`roI{hR;R7?S'dw0?T==ƚWgj YSқeF0|/+_lF3i-~*XN@@.Yp \:Jёy7ׅ\Y,,gBM v`uDt%;_'PXaXuAkd&Ϧ)M =qHhf9 ClRr 3-}q/).#B<4\=%89:߈ĕ`$mdP8M̂->cFGr;oen,;KlM)mu,ŔVz4m>Xr0DV؊J6[΋ o[/qRe7Q2jd>ˠM4٘tBe'ٞYƁ$bZVѤvcN8;gv xV|#n-<W A.!L]k[,r86 ӨDci\k%@^x uf l>"B>|ӚLVDߖCZ$ +k6OȮqQa}ߎ\>gp:ܼ: ^[CM}m1w]ѱy `0 wNz ,O´ LjFWh F@=%[,\hᡤ/KHO^W{^M%tD)kD JMv,ہmwId4NHCR5ڔި}lf֏uEf1A#'M9\aIH2XuUsfKhxUo.;+OcP7 3 Sox7IvU ԓE~ j6[qy}6baY=ͣ,QD@!sȅeViݡ [ . L"<9zE-Mx_`Qm(9-[Tsr*x(,-àT\+&r72Ey*әɦhOVo1mm4Ej5{W'^\P,PPVfZAP1gA*T4 {ץ荀!lلG눖dg*D&CoU,@>|=O`ܿ#17DxQ8s^hȭvv{Ҽ2]xc`Q8h5[}Քܱѫ﮼֦J탧)5+@S5GJ~bD8xO" ك(# 7ݎ$tu+ G7kqǣX>M /oQ.P(, zKƏՓzJaP]Dfi*Qվ*b ?eԑCZ)A B ,,/s>aQ\L+HqN1+˦ΕdI2 [ot]g7`H][x,L rيΕi`E ޒI3`$7J: n[jξ}߸ |oba{ afX*>jem6v1KYx2,{v|ܪ^i=n~G3t1#>o?>| '$5vhejIlw~!h4⽡\UD ַ8R4YԚ#ۍZ}qE[d>3`hCa|d-im{-.W3GO.2\6y\K2#n?/6;]E;!Dq+_PՍ~b;Н(t />N7׈M /pZnk;n//]C/zWlрv#Nث s\ſ֠ȨOtqZmW\v=-QZC&z=x `CoFU k\+8rV#)_k{,d-cߎC4Pg'H>~UADmVxQ=Gҷ.cgn5 t}ίy*ʥqv؈&9:g1ĤF!TXWbЏADz -G(qh=#!FC@ 6?HP*85C1uGcShvЄ^X;ըl[a/p/෯D9EU|-M=`Իo<ר8PXGK+" ^uofRQHȦiIVlg>m7yb^<{zf(#ue).!vɢ,=fkq,:ӹLJ7$MCh#CLs T]`y39ܱz(RWLE.MvΎ9ȅps>SORHhxn:$)BFsy2CugFPz;" Ծe쌮yaym̕9em-\fP;჈tgyAo-):D_Qaz<՟h>,AU(1NN!XBmoLmJ$nsbz]eyf;ӹڛ&y}.`j1,|u}Ͼ2pD_rp%-q.zv%D'[Ƅ? 5|~ B|*ZNJC*Okt u{PP>..yxfASdO0,72+웏-ұi&hFKJy-Dv #-RxƹozR<*~d+E 9Y>L]b<d-vZrOh&HQWCCbv`>M$1c+n-**ˉ/ a#ՀYL\&"~ {ReY@k՘& o8Up@\ 'BP (- >P"diS_Zs EvV7D:ZX< Ev36Ow=ZEަS޸KDSѴ%vkm-M^S۱m jfT$,1' x̗=;ەpRx J7=<}M>鎆5Bw9`)TeeoVhfͱCȋ1ű!(|"5Z P_Cb!_j`vȇɖJs_RH eT%>4"c \8g~nEa_]]05oÚY U JLا8zxކޗnuՅ:xKlltȸXbnQ Ӗ6Zd|,wH) =DCpu6︫· օ aƟɮaPK#*ޣ=rŰz+#+n+A4Ju_:}3#4}۵1[S;  Mc첲(|+4jWI*g]t:nc%6^^ ҕjk*i?kc4l4Xo[o›ӶaNe5E*O"S+N:gr;"QH3i4K1NV7=' /W[nxG* GlWgu瞲u( erp(+&Rl~c6gq>H@w3--n#HfFE.]2_؃ Th+RM:ˡݑ*VC%m >׷.1#&!h2FʼbdY7Z.b%=ś_Ԯ(E'ș* ro8JB3Dc;2+L8#YEL TYxI_@VGVJέn{%??bJd|Orq a,ri;ŎΑ,]进ʺaHFij~oy+jyxz'O ҫҪH5CXJz ;w#5 ?XƘoZuz9 CvRuYtO|sEZWqjΏo $upE_5%չx릸x-`#{(P@I']Π\/{ αNj MH}O3d݂J 81~ׁ)(7#&zEqA<6WX^4Wq+p*X=_o}e~R DG]R ;:'Zza'cU)SksOXCMM5]tH_t[9Dz逛-Bm%'~!B,]dڦ[rMQv??7wX$=0BԆbIH[%.5oB]^czr5pgxzرG &Mx"iHV mwnnN||T}6Ǜ}Glk}K>Jw@zz~"]u mg杦`^n8il\`je|ϏO0/JxŠ]<ўޖq&O9x+|>{@%֞!g{%l>_ô6l?<j;f?Ffpe'BCwo6O[U1ohQg ssqt VrGpM`ҋl˹8L9 ketʚOSaO.V8VWi^H!tOTq$F`qz[>}k6hFҰ0fb~5}F:݈'uv&;r\nl7ux^;[S%~&#.&0/߀KyYf ՛$Id0U?`a!y>Ix :Dmm X]k`~ Z,deb5ӮֺLOlfk]p3m\ l$א+TQ;3s}kGQ(& oT>ң? ԤkHeB* I4"pK.X52Հ+,Msj' 9'np43=w9W1g؃]O<6UBpٔm.D3i+9':wtfG: 4wGanX#a6n_Aݶ~ZJ)UԦy ^JA~71I]gopz+u}R^]0SfUF+3 (T +SMQiLz*䵏!,ݯ>l13vqD*k[G(aSڞZ̏u ?֘l;,U-)x%n9 ~ߎ7ڟ: {ژZ=,ۍL)HC,}/C]JS! r{un=@UJ*;%SXsimxme L~<܇oCYJ;U-~'k&%^[-AQ(Ͱbo9Bq^K=:jpCGn\+=[)_o <+fi`#c}_rBbG=n7pgeUb_ 'šZdh~7!RNQGHI$9\f@5$W#oA> ,(kXl=9 M&̍qD '.3'C!J9cj6RH!P#7mx.r4ܸܼVV`2JZ% uCM?qe<=f&<&Fz=I#d8Q Aa.ig1M1-='v%b܃,LwUon:*n zfhPuY/-+m_}]=8@db@x"^Z~ϯs p _]ꨌO8.k-xmM$Ұ0t^؆<@A/tmvĝc "rwxv/eMtǂ0n W6@<3{ MGbxgaIE{vxt)eq) /e:Q}}IW}Ft\+y{7gQ)0Iﱅ/XH:a= V>a ~ngօz3':1ŋ^Ȓ b $nVna 7ogw{8aaeL-sq[{f6г/WGoo[(#.9+"?'P|k!IsFyd-ы@/AWPCe'lЀAmlO4Σѫ@* [ P : +d}n$K2f'H=|O73[+[OnW[K n.i6S׺눡!z${uܑ_4tk[TyܾhֺYc>uzH{-7+zc=8>#\yFfȇ*2ֳyҝ,V?;R9:ẘ!L^m0㟄t[0]O߾_ѾcY i,HJA${T~݃&EU/VvTRew@}$`JͲ2ֳ%|"?aCgFW%ޑ*_Eٺ`AL%q\ vE}V68˴u\.8`ڄ#  ͍q~y ҆Dc |]RtQa 1[HuIޒ̩벞X*`FQSpNj[i-82j;xLA 󵧖XڄKv޼m*_UqC; &Oik&;ˬ@Y(OTߐ~Er?h\z^zHh^jkށy~2teM׻ׇ 5߿APTcا˻Pcb n~ߚg~@'4Ew^-ɶd[* !19`h@Ds22tP롭#6P2X(XӶjg7XNZSоDA0V1|{iDҼBwoLfLl5EY֦'-<>ꧬSAy;Jyv["qinǦQ$pvt>r mZv ߴVGJ0T-Cc&*sߞV-'Y|Z!__>|]}:_qMiV/ +CGzH5&(g O[O΢1\f=\ 5*[mmCj(NդoMw%PfBSnռ|i~y!N?qڼ]/# Nr5·*#] K%%os:3Qn;}4Eب/݌Va#V{C.e93RWDEu65Nz-v<._Lff:6{P/pe;*X0W>ZufE>"`˭P2|[@^)!'IJH}7n U ~ڤKX+)Ɗ'eΪFe:j_Gnޅ&c T]RXlF0Þ,8D;k|YDXNN7\cβ`]>x^Z~Bmۉ倘Qzv:Gf(8})[ŽD#M*?;fn്HV}WO?TuTORTuLR`4V?Gr@h3)+ 'η{}ӜПJmSCз]F[ÙjG.cZQ*lECX1VL{XGRdt m}`-d>IJhEanӇ`Zn~NzW`i(`!u?P1kp绺8w8GUY>a\s; ŀVRnnƩDSWVPtCԆIߩ" CSf_7Wޏ㢆W Pg?5M @huWBH ǶuRbd-`;8y#Fbŝd%"`?A jŬt8|ՆG˭Hi;ee Oz&xmq'iflbjd6ց* G m ~HCuP1TdJS˖ۿluNœ5i1Ô2_õ1LQ 1yAgBK T#Zf;+qq-gr\?-c**ʐTeGAUE#e飘*Z[d/zAwR^Q[~Sb/ds 55c0Y6HhF*ib>xPouRK-^ҸEAɲh`i-Ə{7~ ۋFΙ\$e7maf[lcokALN~ϴxTf G!#-;MH胙!8"t54KDw̒jjo'jkYˬ> w boI]Gݼ(hUɞV+UZlXȚ_wjK>ac7r%~aQTi1p𸫜* nȈF * Q]C8q*]oۊlbm@Ad(dV{=UZe!>ԗ7UU'e9B)m))CeBEzLQ &o_+GUuPxyGYP,MIW,3-Zcg?n_!purݳHUBq gvZ͵AI+7>[V;یoZSC;ڔr[ΚZqt9 cn!Lmyb EpՐTX0 f 'X^:x,2e`=Yvs]gWMr`G=e}wv9|zkcyRTdUQoRAF }1O;;x,^~Deɬs0uk2ˮYX~MR>Xǡ1,i+wI2D!,?UmߕU)*?kn SU^IGvMX:BLV?)#-mtwTJ8{CX-;x=xA9o:`ԼBښ^X K )S`3F$gSx$];}yy˗] ׇי7 ّ٦F+e|Жjni7ZL#dq:_T;յQޢ,G#{]ў$2վArtMjCU;!^GdU!-)}o/rN̯Fu-h}ҷ|kz_4f.՟ou .i'-]+*TS-Vg4W3Jᡩb,^}YtȞ,)/М@^"HHNJQҳʇr(~V];1y.{D>+߀Ϗ˞%s1x:;XM9xYMc% [N%zc^ǂ \k\PP!?|\125w6>2l7kL_,s+ZUsNօ[Ҋl'*I%mBJ:{AsiqŲtiOJdAt>qs'?]P_]G5ݑڰ]h4Mā-KIw4_]$) <\TeZroa&QIw=KY2gJhX[=Ǭ,'H&VJȷ(˲Tɲi4wJN@Dg4^f̖Mm$ma[9T;4l½v>/)sjW\7=9=7G[4Bk\g97 e|U״L7Y`TgngPЭ'TZpشi3Xzhqc%meynYUse]N >3[qb]Mv> o*N_DrNXY}IMs|7(4|gLU_Z)VQcT_+9ymTQ(3_=C ;~l>JnM$|0^w;sqv)*h/smRb;?QV"oاiS'%]doq_Ɇ # lcܵ7HX;b.j,]g4x_sk 0((UOuIzNi!gUP|R=Y#OIII/J=e4ʪ+ym~Ϸ\^z;j"A**ƢCeد A~w bq`mmhݡԅԨRN[lX)q,$@uewݍB|{󰲲$@9s;^ Lh2B.4G WrGVJ/o&Cj2J* KW1DQѡBT14:?xiUɭoe gKR\Hj/ oR0 3su<7̼QjMm ή\J8-׎AJZ K_SpĸF?c}GzO@`N"5acjcK $!هlxz/< xdnR^ xtda7B|bp8=p/7JlO.tA ^0Fz#K$E2p\T~HŽ5>OrgFedIK^`Bl؟H "H+p9$U _=rLeVpOiQ)[p$K/) \ˇ4R+3Ȥxҥ%w6W/X_PzrW- + ).ޕJq}^ WAv\OUf4NEwYKMO!1?Aj\[<;^XgG8+XZ#e-V`*O21[Ӷ;"t~-|y%PQh+E^ڭ ˪4yAVqovPMbK钫}:Ex[l#ٜ ckdZ.", dvA#$DR4I0E h^X  7 `a)U:`o[ΊG-_Z vmq.Gc-Yn$i?`'<]k4nEnq'p,>0t0IwS4"}tYCSEH^W{Z}]QSqJI"N4NiaIXQpqu;d;r,._/J[IԮ;_?!3#&^^_vP<:<ȫt:M$ϐN|6O[Ss^4)V.z^N@Fقdu;sF~[b4BI>{ o=7X6Yhj}Mٝ^H/`T"3eP")-)[L@Z;ț( 5O M̘azN>觺hV+Q1<ϒf9\ՊqMA$E)bi O*R:kHHlChӀMx| 1K6r*KD݉$u'4&MY8- GCUs XvlsU9< Rdg1.O ls>:oԜ: C֝_[AGɖҖh;E4lySTf6nGF ψO̩KAo2?ſ-<=İ{-7_!$> .XH{Oն36JQSy[udg"m<3V% ʹW"K3PmjjcOB~^ƾ'7A϶fW=^3O.:ޙ~7 7G)M*O\ZU*>زuJ](͌jFviJ']}SC^3 ]+6@.z߭XZJ~ܺYѭ^@]RI렎)P1cGo+W.7HwqicS8FHn=E=M-zb0 f\hwQh,71#c O~v MG/ʍx*(|ݻdkl)"6]|ʈ¶sw6?5_g5 #`q2$|>D#XQj9Q2"p(^E :Z8gw2雇7<(;̣  6'cCg{5K3(,#,[$d5aQX֎ ol[ dOgV>Ĝ[_f) /7bWkNj0)(O7.)_(P[ػ(=#.gƋ Y /T4BtJ*9<\UeD% ExZ[7ѿ3S-dgs {9z uU蟖&ѿwSw:K%hp/_"%> -R^I2Iz&o!Q$%֞X Jvd4[(u.yϳ5L5|;XY. A핛iF|O|D=%< aw(;!T YDpGMFb"|Y{uhJмM|:x@Rl D|#d3%. QLbGDOkͮ`4]-]mhf ij5> ۵!HR^'Pr%%%%y$Ef,}cJ_VHsuwerMS$8e@Hle1HTA0@7FWw>.b)Q3>F[֜?i_ؠ׌}+HIA!,/\/Ni%9K]#߻ d'tyb#U=̥D];R 1aZuÊqf#k=D4>c6$rm۴ {b:TE/zƫN m[f U\ѭ|l,QQ˚adgD5f5v֏lkheE&3XOeØa޵ێLԩ׃02k(ZDgOn"o \z6w[ Ϋh z5n?6N,e}f&|mBOr͑K%y /Io s8=<=$E!{cEBJpd<v_UqbƔf { ߶Y1?*PҍP,7FSؠjԁn@{[cEMl?"֌BR\~(kwh77a}`~mjI _167Pט[sxQK $iKCj*ud_l|e~e[p{bJ|ճL3tm=fi'As-cۇr6[[Y[6Wqš9? dϼ6KP%{mlY*SöM–gQ*V [bgg%?a`-)יXz< ĢLD$<~* Э7 U_ׄkPidA=z맃WLjӛg"C4|.*>Q5jc4)̃aoLl,ggњ5T} ׬1weL76+~ŋU9ϳ}-}[Yxu׫HdgV2SMaO<7B 0THU 1d,f -qM"{$ǽ?p1x5tq^֐9`SG ,]O\sjjۭ{rADY (1 C<*h0D8I։Lt{5Z2uImA2 sv|5LۨŹ'@p)8ɯׁ06#o:t\W›|R&w[8w,c~yǾW&dF&_6MрP,Sd ۥCe G*;)-f5\7ғlDj%c&ݨ~~[;<=C-]- =k|FROSI)IOڕo/oW~£c`.8ULy|t|VY=8LS=Y(~`ז\%aC0e'rsr41Er8v]A (-4L"pB[=7wM3qϟA`]V;y/E!"?@%r; kΓ2}ı^b`dWe h}5U?5?[!s?pb6񏀊*@p/o|IBktR?Psp`l8$?zʏVŶLi.z \>-^K^u+.1'2 WJ4| D!6e#[-^ϤMͶdnǎ8[4u;MgMֆ&X]^Zix ~ TAjH%x-*WO^-Q>&[& .ȴ"8NlZ˰ H>;`w/!df4;qf>G@LHD.їP|0|v}|Z WRK Š[JƳI.Oc/L}n@%d  ~.NO&߱r|Y 3f|nCԉ5F:Y{B޼T@=S;YNVdyW3yR/QҩCE}y|;eOFji5p9([cIPDi`9LEY+7#9 GX, *.Tj]\}fx3qu^h''E9[gt|M(M{_4rA"x{lpꅟŹ-mm2P2@s,G!5}ku݀Uz_#Ϙ˼ v5YTc=&)$U4^{(yۨnKݞ{Yو.0hYǸ]ќSx6z,d{wz7+W5i"2 VLDq;vkS5mךtqpQlxo70-sKfd;JT)S)m6Zz!">.{kS_4/u}!"%mi|9fi{TG!`5/Tc;~"r IvE;Wp}en/ݺ2s/t}3;;'k0pQ)/r]G*kjzWha tPM= ee>ݼCo]͘Z>ű']m*jکQA*)uH|l<^'|wC/k{Q0A+x9`d/a;\!7TZC3yA8cHXƔ)YN!ȕqOw[.#Q(oE;O*e7~ }jlco|u@6q4%Жh7?~ P?401ww輕o+j WE}bOeZ9pG|2i?&t0|Ӟz٦DqiBeswp?@;3ڸ \|NvA&tk&S^YI 8✏-i ?9{I=gc@c,bςyѥaf?T@pm2uF.*a h0T1mJ&D 2Ţr/KGhE)2ۅ7IzܒKb^t< ^St/)fX6ѡ2CW r,&ohcyqZP&ض${%Yya}tP_0CDž˜C ʋ׵/|y ~Av GUΗz"{4-T S e`YxwyX ^E , :,/xo =i N ȩ&噜%+RIIrhYexݹdEK7"S^RHs;/uOd3&{3cƩ*`6kKcB}`lmyuB8,2[>>QЦp=uB߭)))soTU/,/W~T.Ht.9Pk̠Z%ƺmh{h녏l}laf5@K_-͊Nե_'S>\6i\v :e{RYς˞O, Ze0zb'ӛU/z D>(OpFXb~mG;ezLŒa(?'f N߭sUuJ:[-iFi+?tK-ɊEl ҭTAXeҠ5hc bG?{x{eʹf !n:l_56nyv٧?j-嵍#"id%:{ rZW*aR(iاS= CC]7[P ^[{|.t7JѠG+Q8ԯ\Sc!ƃdbbt>1'suጌzBd@L0:u>Jd;;#lFWBEPC|XЅUiwqʎ\4FXQ| GrQsgqinƭgE'Jux).o1ѫɐ@HrNO0!'X?gD'>T`7D2cź )&~sLM$PW k_-~nfmt340[VbE aR{ZX'+]\ƽftlOZnYez9٦:qq6 !_8 7!0M&d7V6Ʊ &G / 8.ZW 0nR~iq׊z=׭n&';/-y@o%t g'g`?h34h`Z");L{ڌY=Rn.%WOLKA wgPѨP_^_5nmeKDv-fiմL*wxG`xEF u6;km?jƇⶲ+Ɠ?òcL(ODxpDȏcL a$>d"]<|C)wfs0;4\v0_՝׏ҭǯ飹d+cR!MuYEhîqc7i= Tslt+a˴E)C';WRWh2=zbN"b@BiBRz\k?U.]J~y#J4 .-RM7fKh\Axߥү+3ޅ9vލ/)bMN(fs\tJcVv 7NI*lЙ. *dMWvArènzda_=M5Qvk0!7 5v vzu_Ղٽg,{:䰝`ڳ?ю0OfVypl5LOmgĺID1CbCDlVF?:V;k!#mR[OkKcy%}< ٦o}$$ToQqqY&T)S/?X6|.*Rz[ ;@#+~wsl4T8.UʓAMwb6-pbkt0=c/2}o", Op"b+RA= jH=3g/H1>o;pm5Ѿ60m%VvzXMuNmxvBE(ɣC+j(wgt%sGe=ƇqRUȷf7S*{{2=^[ZukU\{?@M{Mj-kNJ[>h" q!֮F>w[lzHpo 3+B\G$0SR z.H2~"}o@,X͟&x}I'\p9p%Fy/s v6Hv%fD v}L v2q !fqg3#Ȟ~@Q,%7>BM&|6D>9LjL`ylJl)=i1XZÄefcҬW M\c_N9WY9@eQl4 _ 3lJl$AW9:c_IUꪪ~lcckW!Fc{zhq;()_ jxW_-+:sV@jj@ѴcuL[ ;~U\1>%K:^Q>tr i}ف'W2LxT[lS0Ua %g!\z*|* |pF/xJx q 'E#DͺJNIiK*{npE΋Ft OZ8GU7] Yʻh/2~hc=VIL  5&@ 2,%)ci2؆-@MN*%c[v6mt,`+D!Z lUf 62gꞩvƶXmYՀK Zݨ{k\j{A [M ].YzL+o LF6P{ZԺA(X?5.K[)u`/ v?],PO(ʂL夕iεݽ;#O8G[`v4fᬕуlB!aJIk 6{LaA0<}n6S6z>ǮMә$jQDzɲFs=6%=d(9] 2 ,xޅ(aZ5Ө]Kj-FD4u2хlïsz)~},u}IR +m K7RVC$&suxzt# G\SGoGWf|𞨜Iujz?DxPreqmc u7m$Ļci &vBWZ^ $OeK$UB8C2V)8]X+;;<yx4ڊ_jQFH&'yN H{k%b^H%LL,*Ft5SW_3SuC~{ViIFAcw$deKMO<]JbL+UV aJGpc iUU%ϡAݵ*8!HJO]vN/b ];Bg?{^`_΂05at>uVv .}Bv ^j znʯazm/=Tc|dhuO"Ǵw/mgY ՞@%6fPpUcGX5؇vgz+|diN6@^~ dOby}0X{a+Л_Y3 ]Ǐ\*7|H(vNCr@,{e=cSX0yk1ԗ+3R^:AKlh濤/l:H~7ż +kUVҮt8v*u]55{a<+p ,=thK `JD&>_Y@'K0^?}uDFш̐^>nܮ}E ş=o(} k$fm-?bnVP|!a"ށjS<4dyt*&B"Zɴ!~!Y/ Z`imm`[{T˥V>_ 75l>t;LF>xaj2AA*W7GC哥.GыK%QyZRk%u21L(ckkUCz*\~;Q6Kk?bdUY{"rm+PQM` u:>!,Q'CWrO+߿0O]55QZPXFNcDg7H8 'Rn[>`B@;ׅa өӭg^ u%c'* [l-Ē;E pNi+wF~7=o)&XDm]Rw2YH&cqO^j n$qn&=Ƙ KTH5""j -xZ&Ʃ_/{1~3CBtВd֩:2iR|7)сnLMÝ>UϪWnao1KxmB m#]VO?MoAɵ Ge,<+ `=k AMs]l+LtUF!Q/<{Խ6=CPLz =3ꫤ9ӦPLu5M*|ݎ+$; ^A_g|c ?::rQN CYw='m=K~!hxj~wGiPO[0kh됙V=ҕRJ'IJpG2SPqW'^Hwsb 2цKc9<XQL:O; )k16u2ڋk4qV\|3:&?_'#{SI"$=}Bg-S6tt2~PPr@-Uq8Ⱦ㵡@y~PVszurOT]wu2Y,,:Cs{W3!Nx/Q?\WTE`{*J6_x,NMmwYB7 }b:t3ET>L? 1w7FDn/*. n </;=")ߞ!#!"5hc0S 3 +  6 IR/Cbr9+\7=pIά'ݜH "i̹8ymkmFCĒq-1L#bLٰS܅kjVhEe² xbMY=/=buE=ڛ*pt1xigc;s"Cjj \cb[peUުRzߎ__,yb{*ssژgS/dk2z5k 7q{[Ph6鮩K3u?R*MݜM5DgŔfg_Pnqe?ӂW,{%Vh/<A)w DnjY57ozC} ihCo ;h_]+sgNl;NHzPY 5gi#ـ|LFtEgl‡Rgw VdBl6DI')"md"?[~S}zȟn̥ gϹj#ۓyE;J%ք<^ +3g=e4ѺF'g<[rKA%zL}zəPYR5yoV ;gk$.ɝP[^ŧn C X7ho ?DclJ 3Y?*tnFOnDYC;(-w{,dm 6%|X2$q~s'ŏ}z XQy 7v`j@WXȷ="{ ݷzQ !W7^cb~2ZKLn5+FoɆeCwa\,ӝ"euzb;׃KНى` Xf!I 9f{mn+,jlmmlj76y!&e=# U.P@;WC /Xv%qrqdWpY$2?@։5 Gʒ o{fBG$)f `%d-3w%Vޓ=ynw;0a;|s {_ XxJA tV0$.r{ 2)PWQ gZzUu[ +ёJxҼ{Uͻ {k{(.3P vȹEv23+ȫ ꭏ, ~wEh^tq3UYg[k+U%"9)h GD/ 3GC.u\i[۝ 嚠#IVN=y3rzO : OźI'Q^/*i .Ev(zgkr[4W:QzC٢2]adzˆuŭI54-^x,-9}-Kk'[OxqeKVtO{ K,Xnx\:]_fI쏱\rR^޴31T{v=CYXOP_B_<(g3&ǥM ;E~sAA AD#Yk~7ra'ENc<'-^BQLOb:zGGkQFgI4+^CGL _p.fs!]"^ξ*lkWr$b 1i:Cu1gs91;23י{%U]*\g3tRDR1:\>!> vhy5o=d5Yג)N|QݖPW+LM-K'(ҌI,U*n/]6J+R%! %PLc3i@f\UůZfsFh9Jh4^eUk`nXoyĮJQ)S,1m[562 A9%0Ai69JAbp""C=xk RA؉؇GΈ:f* );mV 0>y~źQV/mi~,+vӥȊ-_4|! dnez7+Rc~;̣3ƭt#VWC*QN(-\w]D- nRxھEU\;˄6UVIm Cj7}k)AldL Z> CcӖB`)Tn5Xg0ԁ ^=%_E:i =f$k*hU*CV}i˸N/ESڨu]#ڠ"ʷ؞5a5+nXeSהUTVX v2n4ޱcw|+Lhr&9_`2de596ɵϖG9,p܄+ks VlC`,.ox&+Wˇ xOhM;AP{xNwl"V`" lGZ']t$#^Q.,7.p3<W_{-[͓{$r˶/n _6F5OڟOۖ)H2PS f1Z31!<Ɂ9^H<,Ff"t qGqU=)# XF?hO֖Z\ZվDQ;h k T evbߚJUUW*U99/F;FdZo4czUmvo弚hFu7n_rG {#p2`ZVR8詩Oe'1V[gײՆܸ!k2.pՓJ'co;GXm՜^hekKpCKb2`ϣf|n%`_6/2/gNZ)2tjC ~>e´-^s~pv W{)&/ufliUePE:qF/dHG= c)!`y&a -_ěD["7CъMhVܫ,[=Yz"*B}hju$p%cu?绕hb io5}L/v֫ vXBtB o:`x}^ye{\kLSvu~ږOT&XUUda#\*}=adv#xr)]NXך#E} U܉>1V(s Wmb_"Kzh~q,\tsg\NJ[ A?VoX<ݍD;r cבW0 5JU<A>b*Nׄ׎л۸.%VUk|V*g`* v1py/TibHQ7rm8.(Sk[hg0Xm7~UN0Ax#BMeUrŵ_X)o(nLT>D80 xQgH;(-ko~4լ+_vk<=&7C lI(mrKmpm'~üMqLqbMϨBƴbFU'/NV`KIKr[ 9"4`-`ۃG:bFELIR8ÜaUUP eYO~CjTl!{c yz/K)wu׳+[;Wz}3i`Owԫ <=}Ѹ}M~E鎟kB%09|Oӯ#kM`PQZB sjf-z<_19zy7E܆߭Z݆8cN6 kȷ9 ,Cl5i;3-5,l~g 部XT8 er >$FLepS8͋e%bL"=ula3J j H%\^Z8$ǩeg&&H0]6UR:]YSH3I6BvM|CFSŪ@duSAfj`I?.KeHXaW೭2[xe*n\L,ܾcc7o谿jxN_msyyQP-}I9u߳g}hZxsLq N> %TO-"!65;K@ PWhe܄_$[c6lZ&7tNLΛizc#%k>lxh3} mwu1EHuTP3ƱL>#mمF_`le~-гYINCyS8;E6LU^5DnInjEc%SI *E/mNAF~* =Sܕ`*73V\fxЫ&c4g;θ(x;Ƅě5b(C~H/$!waNu'*si3u=:g Pbk0wyN[&͟wc_ tod)5ZkNp@[#{GS@5‰$q0nYՔ47%15{BNregE{4B뢺 ]hp4cjb}?HDwNGwv0,7)7[(F7F#&H+0/pa9Yo{(K_ j<Gh= 6IU@BAl,K!Xs$ 2!P|Imn7\YeNujc˪I5V2zk}`:p+T%6 $64bH@{Hir4`# bE/yUӨn{$İQ)IZ;[~JMa>ڣs j9XOm?aN AsHT~K|] --q 1j I[DtuԔznܢg$۸aLq4 'dLߙ%ùSv!{ "1=y4PZm*C{`xOt\#C7=xCg+aq,XN3QBB x9Gv9"ڡk|&SF^/*͝՘.Q!$)?vgS/vW!uamCYl!ske۪Lea%esǸo2JP~+h蝶B燮_7ӞPnz{L}1;vZxV,\~Iz[Z8cVCӇ,kioq7MXۇcxr9)bLp-b\'e{-,|w6(^AŁz)kP7M(ҕ;H_*ʆb;/ E%9Z߃d(4b</Q'~f+|xu8ȃٌS ݖ!r|2@WȗhND_ɻh5 ^Op4NchXկւ⊺>IKS$m)=%.kV]jqpj6ZWf zL*VO~U?̝WO~(x"j2=QF8|Ya~wEbqOo^}9۷E722!K5QW;;ߤkm?집2d'n,G\G- ޅjU -3sp lq O(oL-jL!{+]vkt'j㢱lGbW Ab Ș/NF%2Ɂ| m:~h|M7WO#!Qh#$|beoN 'A_S"!)4x7PmyZիVu{ëțD:[:y!گoYT:$Qro3bTMe[ 8L7Xnt-b[02s$O [r&~Ɽ\ړuXaw"i;dB:Vf.c M52M;s+ClHs^vF޵fjrkO#n}dAڷrws|_sǺ~׿ޟkQ6]ܶښ2@Џ0#7%24ɶH Acңfjzuge!ly<u,s)5rQ ۬cp2`> >dňZ~ 5VM\#S7=uS yyeRQmse;|)xwB9\#HvOKWu庹Ih.'M̪|SI߯}aN7 9TZ  ZqkrE &>(J]=!&k* um۱ϲ;lhlbDl#voѪuȰT[kztoF6qQJA]}8}YJu"?=S]'B`1nᤄdIXx 6hxP"&9Dyao֜4ּٕbf̾bl$GD!Ȍï$vxj;alM{ۤ1{;{# 8 V64&h:AB`1.GΒA+s)g2kXq"N]`:L~0V."TQ #G>VN40g{[CBY3 aF5  ,LpI1&Sz0([q{GnhjuZ*~ m-*a蜐PJ;ݒVNZ.uIo|d*PxNeJ|Eps=񂥫Y @"h̘6eg<=aҞI l8z䎜4P}uc6|-BXiR6WϿqo0b)GQqXER[\_YTu\_\Օy ϏP{Vy Fv#a.eŠ,axP Yw| kz?fka_FQTb\1[Ge&PߜOg[Zw=7z8T357Lu7aY+{zDX=I$2k4ͥ9-"LGl"y# urf{Z8fgs~NygQWLYX,UN(7F+.LaU?A_'DmMkb`ek ]QrFz||%O73vǖz({YFYױ{,)R>{,u؜s +KEWc:ՓN?D{E:m>oF^锃z>B=0RKvI {1Ycf4{E~F@E #p-X#$6cD$ &r Ild=²3_ݔ<uQuIONRF[F /Z+D_Uf{q`=A |CU tZ^͋*ysȍgٽoɞ./ޗr4wTmeWu]PtsLkbLHDɖǷߏ}&$IxUwfqu>>75+ґ 1iUMiiy [Kc4)B #m,摝qv#B5x7X ]34RDIFz Z1m}iB|r9_HfTMq_㗷̏1 /K@ggj5 p="i6j ًxJĂXH#X jcPyD5 ڿO,7 6f +d=a[x܎}ћ=́s,&>[[ڍMi]uбcA.Ptiv*aqp@# 6-_-:*;rJ3Q%;M&"]T{ :\Y'䪧\?ƶ[S_!(aVҲW ]^Wې ܫ~*د^Zٕ6)E&:Z?dUW@ˎf{Ėu``g{3sG1 [ioZTG%&q̧ٕeg`r+|cu&|ڎF*vqqo#D>hè P-M916Ųdw6r|b~:YH3 I@/.s6Ǻ):y5H%.L9Ĺ& SwW+ DQ"`$g7BbvsPj/e&TWZqJ~ Lֳ!LXFnW>kF's"l[b"Iى&[ݡLUmi"<^\ý͋aTr~!]'L"xCAVf *y. 0aX&j?1k+Bme=HC&秛D orۀny6FFk/1xI@̒|-?jGQ!;O6cYx$d8l9f2RDtR g+y ﵶӅ^η#~ڡ.U^aC ,ů.tVt,J'dK%r]TYZq֓ΪCY7"tZ^D2Jk2Ӻ@Bxz_/b~˄-G kܸRC[~|5?b^BEREQˑqv3΄#B`?oǨ{{ '!|k &CmHMAͰif_=i"܉u4af O[h}dK$v3%:io%ݹ/GThf+3?RΓ F>ќJMU;4L,U y<:raN=Ԛf+O"z}Smn%bul4tUrHA-/\eN@ }r>J?)σ+Y]1w¹n!:خr)9emI->ۆ:WUG]q:]Ȩ̘/F=aM 6/AfM÷oA_-^41I23w9W7$X\RDe%m?$|`Qq /D/m쓜5lFzKȶd2CfCG>i&F3A {a1f\; _`*-T/TݱBr0>31^Yd]9ب$ BR/w8!^+'ՆKńÓ3>/(89a(F+V BJAǹ= L']PWMsEFUxKLҮ7.̇_~}/ḿi=c9WB X,Ӷ7?e.h>:SwCp94$Ub=?&^n!fD;K[cˬ .vS Jx?pDZ}#!-;\h3Cx%ruSVwewaFTȎʍs 7qXʵfM:i%0 :NvirTQǒ)Y''!7_k.Bb-GUZwX}°59ն iO?0VhMe_pv){ƶQk/ [K[%[jUB.;w+pe3G(RxT6C8COL7EiOLaQ%dF~bgmoqL LZMP[͊>) r+^H:Kb3@_)Su -tNrL)" 1KMD>c@DNV4O(R^}9ĘWy>ܶMC'/[Y]1}jRXr>g=R5[lҴKn1.InO*'YqŻ s0elD4\kA@=cz[y̭ӱ`G 3VݎxoN0~f|oTb}{=R\}\l+2j Fl)1 p%zà(*TZFA, -uH/iwhJ< OIl*\6kb}G\ͼ7ȷc2zJj:lv-SJ tU&;K. WHFD%y 2,TF5SYJyZP6X(+x}89t7ymf:Vz{E=fSɢʨHa`dm׸&nர)`Ki (w*YмLS>,GKŘkĊMlh=z&559"Hg`9":=>g\pxdܬ>5t;9V$D v+eR9j6z[YO9g ٶ5ԡV˛OyәS{s|?ۗm;~BVh&Xֵqq$uD`t|p~ZNgGAxBuG^q^l8ehmo;okd6*pԑYoOp#" 2]TdqA+ Un o- i~bhN(慂:` ZQ>Y/Q_R}JFEUd-!-9s{X]뇡dQ1cc} U_j{hظ_E*n$fx*29*A 2+5Y|꒟^4,؛5U2Q/_}Me PL#%D|375w<tAtG(2n}}}2"fx?BT!lAR! ]%@^1_4T dhrF΀".o'ċNky_eoZ26fI [ŚJ} d .?t}TT[߷- vwba %b`9}Lϙ3gzk߾{{o}ſttuefN>[XVGgt9Uh*:eKֹ@ͺ [6GgMemTŻ>yb ׭6;/bNI7kq =Tv(`γeU T/#ԏVX;p|0J U2njf,^p;HώZnA?.hn;^kh-ezwBB`Sm1;bJȇ׵Xu[QaeYs70pyyǺ\G\'\`QEHZ|J nvơΆo0osmr7 0 s8@.%;KXi֪vi.A~Or1T"Xǂ.J}@)3g> SնMwYo_H\.z^U)S'3sU nRfNg_em^*-o-Ԣڅ?-⋫)LwoGS->Y_jTőrt8ڛŠr09Rz?Һwc2lXw?n?.f\Iˏň'Cvv%XZ7dIAzRk f8r] #p"ͬA]gGvϡXTaKPZa?˻Wǭq\Fp'Jzebcn:izfqEznrZ<4r$;XvPVrh#0 i g@kgFLczZ'm:r 7_? Ol$\uxD7ti_ר)g(;~HM=_V+oT&KgrV0szlRV64PZ;}ɞS]wCZjuᮚoPnw[3L=jV8ߑ(7:#?733,-n:i87w7}W_QB嘵kjjN\86!UkwӬt(7Oqn#f*]/dL3I:z;P<]^mPJ+c RQ{0`oU9X7n}d=鴁 6S\ '7r|} 64XV$"s)CU3"e8fvu^+(ɩzɗ{ ͬ_7Vzi*N(6vo3s}%kUOq":ɳe'{밢zTEW:nFUC/C.x;O o Xכ9czrGv϶]ꄅBm;'I!eK v]eݰ r 4ʞGZ'4n 1$#c{)w1)Jbcm/d4 |L#R`n37Jd99Dk+D:IdvMedg,qM>.Dk 2_)SU K./(j%ڙa]~%ʞb6.O-++! 庅y|.QN1v61'hpk/bjA{;f}4ۏn ˩?-o̴Gˋθv7Ch,*pԌSϋR}]a~M"rkbu+X.RYXǒ {M.F!eXs*;pL5/6}|O.w#սasm\GsĈՀ3R F9  pYԠMҰz,Gfsm줥m#`nkIwf42x(mh  ƄYKLV[Y?_ZGtRT8,H|nQAAIC7Vy?d`f󘱤jIZ-xv}YәtSy9ۘb&@[OX6YzD- ' )؀)u6فL-~uI^n❮mYQQ18OEӒX [-'{ˬi~qsCUKSk.wVQUQ l%6֧D%K6{U | |Z€2 `ws?53/t OCz_Ąʶ51YSwNk^]i7rt>E;3OאSt ֊0/)ghU3J]W! 8Tv4{ѵS%""QSB_r׀ynbFǘ.tsFK;ːEa*ܕ!5p=]!cP3] `?;F}]]?8(-obi~SrvB/o#B/=Z;8Bi+MElN51#_ƧԞ3b!Ͼa d݄ӮIdS )&7;kK :  W l*}}_Gvr; t1,Y4K nim0G1' ibe{6ӆWwX>EERw*Xh5[[fDa7lXQCSܶ*6c3qDsQQdw^@'B"^uƱX$´+6 sHځ`h/Uoei{XEO﵏88Lm⬬FjpCX,>2n#)],P+WbnnkY_#S/5< f;Ra gs7s!F5T|ɚ^=%}ub}tlK $&"s[ O^]:9#V 9GePݹH'9llA^d`}#I|>>n w|Ӝ}' ]h䱌4^dW豖q[z>4$tna4A V?j)խf&窇hOj.u*7#5L俨_f T#q 5c[>/OF y%5=ٺ`w\&h>=ja)К?[Nsuy/A/>9k\!hOL>^[M킇 n-Ԧ(ȓږښ|yZ4XyBzdBh |#Ot ;sO}FDO#5hx |!5$S?G$eA( +C8F+& ͦ~'1M _p sH{Ć2A9B>@skJ$3a޺?+sY s 8yyQ58*broPk;i/,Y0;OUi[Ry|"S8кlmٺuUs5w^naWmoky"җ͘tpjFV13a#sp=652Vr+ƙ\PIҡvr2]=XK+XRߎ ww둭bl| *n0H6HS~Y$ 5cjmnGhByDhUC#`/192{LA> 9t i`jfb<B r܍}o^u/e-D͢Px!t\6 hUL8qPÈ縁O2=2 KzudDb^M)h{*[kqJmW[ezal *4ld4>E%+p){KAQ1aP?bԲO3-Z*s]n{{)=?(;1h~pNJOcrBc,Nig6ٔ0uuX\[(qٝP&9yvu( 3//TO1hB)Ri2y5gqg"U#fxpesp2($`oe1ޅ9`Dbc[ѹdvG[$1+'z 9MV5P:d%~4ʑx6XU_V}ܶ FXbmaa4{I?GȚ" -N rj!B`_P{%"07`L\u*ģVƞ[Z㊄%!c֘6^h>ە{X;,FMOh w^#Q+vɉ&5qk}`iq5 >fIyaY vў1юI ]G3{g&ܑ4~zbC< ՜!bt;S^@ -6L6?"WTOrn5E=]H<˚C/2ѸLmO'@e/[&3S~AJSgc>`|Ԕ=Ek9|VEBa9H?N~ju_oYxȧLYCT=ךS']fXD~P}J`fX&%S_,ޡTUSV+Fz2َY#QG6 bX8@G`ி~DLS.殎Nr+|kB=o%2DZe'SM+PRQVDpY֙Ɓh+ӄjB7zb߽\eo{nWI__?5WtzOxFU'B9NYX+(tZǨmk[!ca-,+_ggsxRh/EJMp.ݨOxsύfꙡ]6&ŕ ky7ayCq;Lp)r) 7=_s{&$9Ko}9KLu y,Kzs豴"ISBV0F-y^żhf-VFQ%f?zYXT%/ޙm@&sI淚TRQݠ8PLTfN1}ͽaH%̇ƬT-OpSe (OA  H2Uo7Rm)U_t?uڏHQJgrF}OKL۾rr'R|[ZO{g*vnsdGyxgsB{={9ў $,I?P+rVǜ/%0NrEC/TsQPMYK/0ؒھR< W=H-XLźO=U=HMc-]Q"]m@?LYY\:UyXfj;;ؿܚ s8+|qjQd=9~գl<Qˬ\jp8kr]FսYcCZG>jsj٠]7C~X\)ҍyڴQ}yZBV [s?v6'08|[>?狜ڻ 2ef/7U#n.Bvox> T,k m3,;ZǮy,<GS*q"ʄ'ջTM]Edp 848?%ƽK''T,ȈW t+^3:9Yeʻjf-BÑDtIDrYn \8#tAJwj:y<¨}p&:SwoGK89gރe,O3o:iyO_w<\tVwȇSӸI3!H+1Wuw\;N)j K5}yb)tliQ=;XM/Mbn%]|n[¸VwRBV}Q%tbfڋͷNtnŢQ(uC71babmN % t2V]|5{t:mm zбSm=sNds3tY7wT/>#O$1(wA3SywuYWvQ+eA"JƫyM@&4sU:sy);qWt׵3 7%l4CYH\4ЀƨLR7AN+K@~n%Ĩ^5Yϖ[Ɇ~[f2oѾ=W3-'ܻCWxs_z%Ҍ7‰Gu+]{pҔX&,!IԦiP"$vrn,mͱjE&ܫUPf&zCK9S &~ŇEۧWֺ0n䕤$ N 6#l6TE kt|v dCaxwh,^j1l32(WKWpd>ZkvlGB@`C)ܐ OW())z꫙PR,r9d*U}Q,reZnEK+;X׺q%J1 bsP{e:/.{68p#_c*E ITףM=:E7q?-@fdt&fW/m&xʇ>dz@z/{XCmX>e @V2O4L/ae@]_&9.00-j/OޙjY5M઱h2,6 g7nݐVyٙid-/y[u ӈ-sX:rt<߀LB*C_T5ꟲ˪pف_Ip"˧.Ssϼ$<P++pt󄙆|:zSDz=ѷ?݈fۙck(sR>! Hu]|_KȞCx{KvF^\x'[=s2zeSTQGƾr㧒AO1vw0˞> c=&M#.se ^=uC/I%w.\+  [cCg$Ҿ5D^UV[{ՔGPbbt@t֎Ln\jlͿY&7 O]Egcs_L|.O"ts.k? % ~3.~:Zk&a3|2զl$[F?ҹDq't0s\a먘R*8x+5 hjRz;HBO?IryCJ{lM+ӄ>L_,Sۀ3=@tԋTG4~ Pt^\?XCL&pf01^Mke+ћlakKNS %CQm7<&vSSeMWHs@O$+1s Սl6|m}R LECRmb~CnN4|-)6ԭ_xrqٴ[uӝb#u&]:gOXܰ*U7.u(xĶaAnh,R=7k"جLwR%KC!4U :R4$&]o3Vml{jt^}nOG%&T}ʹ=_0)ɘQ_YZpHU2 /P|x9ڀƩRSW~*JHߧy*^Ε-zeVM pr,%t?aSv'Sj."oG53`|Q0n'AJغt _ 4ʙc@qjlAE6:e}AZYl u$M38 gpwxzZItJjYJTp[>vZO#fH;0ȥHVSڢbkN]ޕY lX?lt+ҫ\%3jeiNVeL罫 ^ {8H,t$dysR'c9ؖTseKr$HCN}'Ξv<\/8dfft)NE.\P9ޢA{sʖZ5[m#l px?7[ey-i:ݸcTՇ 8t:1a* }J\j1xh_B_zVc`Zx ]{"```bk ~j 85.fI QPתڸFgj͝a.4|FOYM+w abʱ^2'B!U|ŷO2Zb(r}IߒhEc^ʑDjlC Pӓ`(gYKk笌=:'—ᥜ-\w݆]vKf%v:,+u>.W.a9۸]*RT"0 8B':3/SDeX w N)?~f뼞=f21z&7FL uhbl!SU '~[x6X]|a.GPma0';23`uT&6`h D{cbM?aZbxS.0bޖ=ƴ [x}W}J}MՃ8Gp|'ٌ=,8!GT C-'<Ϥkw0b&jvIcmv ~LB2u| P#1NMA8j`!=BSp9D34qsE9^m(T> MI 8s{zaoR ,ޟ-~q69Z=\^Y3tPiy#Z-@ҏŭ#!~$pwSkf!sN&qYGȧn*n=g{g 7mu;Ļj۪5Z37S2~!nP:FE>|`؟3GG$o >Fj35uX04cxM݊e-tiWKV;yG+6ї\:%g[zhjjjcNZlZxbT o>]oN[G ƶ]} ܅ّ1!3_XKTag{e.f{,͖y6Z,2'j޲koa Qˑ'zlmUn끡u02]j"}&UGwu-[ .++So 5mշ0Njyk{5bWl+ci`߾V~6k zZ=9̽n@6sQPh []R J`'ř>a^p5quvE&}B|96XI<ԤՂkQo=4 mH&}$z[#9ȸO{r$|޸zo{{{6ُ:R~){ 4ko oyݧPoRy4@#k vL6 X{u-c&eRMnA_?U8 a MԸWYb"mvn;+;/L3鷘m?+ΑB 7Iz}a-r#,)g[Q1zV*15rG=(zPdYJs;+r, ?~C /O I&j #΂Id[̠>,.zAn$-t$3hMr%l K$qSupZg6l‚fӌL-9Ʈ8S%Tސպl,KgP<©|=pG''Nhl՘D{IkK$S's}%82OJWC:kIv0?2$я4]B4[|/앦dw7k5)Q8ec?Vi: xk뫺xO+#G=c-|3|?~_k_ȿ_ͤy$0OZJ#v5v))=(\OX<48nQ7gGVq-|U!0eȲhvHKϽ0:'n|L1{2. uTxg;Ee%d.< A#rYO2V6ٛIuY'72YU*P /4/?3Af6[S7eO -UT/O,Dbgi\޲=h:Ίj=)MXpv{K^a}a]:_ºC[4^g~u]?Ao0[},~_ϾA3}?z׳gX+VL$s\EfFTʗ,ӎ (j oBV1AWb*۹6Y ƶ`zQ ~n0KЎ5KA?E7wIuED ED~=C`>Sd;P34D帏mtZ/7m2]pUl(55L~ѕ/=b{5ݥs&ٗZԶh)O"'G''˚HylqS]l#]!δr |孭ScSV#0[,w-O [퉣ip_:}ik/ W:[ߕj)t?oj,HtFKs,&W{٦!TvN'-6K4Vua7z1=REM8׫,0|lڶN' =U൥He惦#nE[X%s0!aeǸ3b 'xN;k!CA3y *GqMAL??4c(qHZ#tNO G;Y&Y@*`4Vhgf}.=p +7\澯ui7?{6yRebwm"L a{&㥷{vʼn6z[Xw HIk%gͩ9dIo/`fQO{9(a󷳣~ݮu=at 6@~;= ?+lO7ZZ;pcxW3 !zںa  bڛ-I7|9n7뗇̴hOx-rXW΢ ~YlSR`A IDʯ,TgCi] ܒvg;uǖ/ 5CT:5 ^vƂdڎn»տ_]~pEҼRk5I? >zj 4(J,ԄmnQ*ܗPO-i+L_oO{hC_S_o?7A<M**2:V[-k~RyHo{`gyeYs]Dsvq_?;;[yvFG/'gGxb {̏ͿaKi_U^h>d3QQQoն:/"-?!yKv dimxK<\̳Z-2763hQW=MmYockOniD(,v+[ztҍer qʞZE^2ۣ<]{V6.57M4]\]_B+Nvrt֐͝s5Ѿmtڞ>vq`E..0_vg#h;{ vT;\b\\]\#}gGb@aW׷Cs=J2 O5RȪ^2VJO>9[k$ Q K xV*.`nvX*> ==8+F4gvjB@ԠE5!R) ó㈉gq㡗B5MqS]6]{vX7 VcL]fеuC%EIpchll=on1Z6Z:+?Xvr ϩ2hm5~xkz (4#Crb*Bs8N<=G}Gm~sE23HS Aطc}}poo;:Ǻjk<_\g<]>^zy68Ruև M0P鉽oPo9n67"^ێv}_3ٴB@ rvkUVޢR_ܠ09jw7d;0(ܓ17תD>+*`^lf+ F!8Șoen`+l42<>Ziwzj{pyn }=kn z*pz6Ck OTuQQ][X"*( h@JD"1%QA`>0;s{Sb 5cd$w~Zs3m1}S$0!!0$~@ OS6 ! ݖPlsN-&Ao7Ɨ({C?:{0)gIEZGN+ʋJ@$/ZTYq'@JnS@"VpWX8$vnoTT|Tt='|a9`LյZ \øl6{v K%:7|so"wYcHɄ-G!N "oҭ^A?TaZeA_QK jĞEEI~˛MMEEJI2WyQD Es,G_0ُ4֛CY_!UFxDj&2~LGgp9c`ʸ{$11Nhi6K;t^[zbrܹ y:qJ3b*BK6qd~Q'\ξv{$l"Aw  h| ఓyd%@_Ӽr@oeO*tݥ>s>tis8I fp}AO 3ˉ~YpAQu.࢘jo.[wl./M}SlW{P.EԩE귪E $odG ef5t+J҄M+f*JnRXS ʓ0OKKPUN_$ %t[h}̨i Mptt]bx`#%(a[;ljjoFu]k8i<(E$J\ c|G7tSm gq$эD /zXG`OۿyvZX\/*qmS72ܕN n Ì[i+37hRj Z]Wa~} =D[FU)SNf)g_hO돫c5dtFi@d8ʩ(3ѿ58Znm 0[`. Ehqk!тY⡂aMݍSMRC,YެdTn}c)_7KO ZlRE֫ԡO75Rwm+H@<8['`wݏŸqs.mn˝a]5^g=?Xj4{)`(Pv(rKN>Rc*~ONCǟ*Wy%Ӭpr9 ƕ#4!Z]?ft;U;^s9"{l4@%qi׶'|HpǬYT6U Kt7+T WWE^Q*?iM' VB7RKE FNC0Ғ^CNiIߩ6h(rx_untk%+28&#V{5vVQw_bci0L'4q!5+% DxCL}gm) kdmk&o-А!)N8.b&,M>@_^(lBSƛ81Lm)0]O*hJo## j `&*f*H.&wZF̏i_|X>{fLLUDs+l8S6WRc&kl}NsΓ 26t;n 06&NDaq#qg ?n=^]~A|̽Ƅ2]qy;MEVO C';8G5k(<0Ϛ [ua\^WkBj)&EoqE25JÛ QWY^NN6w(| ~2Qc$d!xƒ5.X"kl3X.|@jd&"ح5s g ujTbh"ti^˦. No.[{lkQo~bުo["^oUD(V"`"ՅFtI|ܘZhGK)S֒^AZbQR׋HdӮ͹iMvz Y`Y]v0E-|s5ZxUkʌ6C.BkT{4Wˌ~U Ej2 _' ;ۃF%H"}XLbW.>zRi}um ڦ_u+0P5>bK0ù7|6?v_y?.ǀ3:J'9a Z5.% T_[Vl,ލ,< ?3ުnvltt٢6:>V>_D_EAM̊6L <({D y[+ =6W6k΍ZGדkpf1'xMZaE0b>W^m⊓?VվͨJl̫9̘۫HRT$!Mh/.Q*ŬXY藢uW_cCACtJZ!2iOW+ͺ.Y% .m9&YU,h * h5c[4<-m6Mm5'6o]QX_1̢ ;xc7 k \x&co6P0_"3r;:Wml_k`ҍ/Huq$'EZ>bCP*!I`pXG;׿?˶ɹh?8hg稨q<hw͔t{Vb̬c?ߜ~n8O]?RGyw~ދ]uOۖw~rd=Ays~T_?:}Q[}M&~WG1Y<3lS;A2'8P*0w \+?VY'?6kGXcI}}b.W_3_uܔaSW1_/&ETA1~#i.-QhDboCqng 1FYUp{Ыn8~?#08\x_!B:Ƥ ,{*ݺhe;dr(Iڌ WmzkSZ-}4= ¥6aGuд#0pdJ%{ᠻU?zuM'RNW-=IJnץo۞'[L јrWt)XUXl@VI$Ӄh"(uL! H]D$!yKm}УuؓxC(jlyq[$L&%N=_ii.AVbNj+Y]M{Wzmқv!Nzp.{IUu$ҡЖThhJ>鴓V$OIk1`f֤ V=7 4B@3od=؅BgC/CrGa{paPXmX&vڅ Q99G4TNeš2b7*JYP6}n G<H<ȅ>{I?\ΰHd/h{Np:rH  D݃5]uf9\(yE^&uEUwxiFcyYf9Hnմ0zFQW'bOлB<$10$NcjF->87g @k` 1h '4(Q##l2U@I0#W7~ iJCӫÍ~ecJ6 66cld_IL\DzAL$"Eh!AdИGEҊR !e;KnDnE ]"zcy4ո'E<Dz/MVf€<9W^fV}x =JeY3C=?OjM3IAl[BVz@ilw݉ߑנHbs_4: %vJ!Rd]'ӖN22,vV8ϴFltkWYW{؎ײU405!k|h 7:"i[ԠrL !s~ 1)o͊ T)6vٴzCuLGm;,To\j'jB%ǡ-4$ eHT9Xx %lI<]fZ;n#}{i-#urGTcBI\|MWդ}:MhãiY|2wa@l"| }_鍺2⭠ eNM–$EsO]mi)oh*$*W^_`)~,u8/tn ʱE\j=\!뾣[I^A3wZa4U@Ay(%䷊JQQn$ykO+(e8ѱ;t>rglZ ⇕DgDW} *& B yQ=о 6;|]aYeWXZw VWH^y>2 /D>?9%l1b_ 2܇f^[3Cz~wensAVsw-h*'&`}[邪.?.G)<ܼq94=9D)׆^o;ǝuƿj@gFKBbFJ1b O =aj+q\c`:55,OP "71C%l /kB3QY뜷È DIYs6%(6n7#fľ@t$/ݥ"4`\/>)JoS[P G6dʳsH%CJTJ|hY~BjN τtƺ[kC3tXy ʣ#t+Fѓ/v>Ej'ls$Bf{d+l׌/̥ь,C|QUWq^8;<gQYQS^*w!.PV9gswtqv3,@2ci)% %ms> .#Z}ofI6WxOD 5V3";TP!pՎ8~ r%b ڔuË/Ǫ?`4_^(qݸ;r7ni^~#T kkb?虂Ѯ&}jAZװxc]q,Q"fѡp["ɖw|pMǪxٽ_ OƏG< ׎P:&'g`֔a&Y C+Fu[c\'yk5 gCEBc)d`\ҰxpYʕŏhem?qf|MšqoΌD9@B(6lj="u{khq93퓃= WbL. m Uo3۬_ ҍr Wma[2 lҤ| 6z7mCr(^Gh gzCۻwi'yaks!}#Q7 3a~\oZ [)̒S lzfU[áOE&a "}(5/ld/ۈAr=6{ثL 6z]NdfRvz;:~:/2B_f/QEPNUt|Uj'VKiߺOtC:'r[ˬ''ƥ2w:`sG&YZG..9Eۧ⮅?i{euZpS=0%{%'J? 6)0|wJsuRMyhAGX|jUyG;%} Z0+:wTs+Ã=I$A4[!ѩ4x3_uQqH1Ph6k )®<2 +gaTvgC.WCV3ΝxYqXK8XHA'Uca(/KBۢk{ _LAew5q_/O%| >dCF`9RR;ye0}WNJLز]Guz9 ':Fr̪3\Zktkno/ (e*2Uܶ_ dy}BG dWo90;X9=MC9'y(@)(0i 3#| P?+O#΃5 2"PNK"-3Ր4$9ۙjiQaZAS.>C$%XV7a1f8كGl vU>`S%46#Jڅ;wVL;r_y$ats>˨SLd 1'|m=Q %`𣏕:U%43J~glyX7XuJ cT*3J{r e7OC5'ާ9$ꩾ5 fj#N&Qjַ6yC%0EVftj_* =֧y'-9|ZvAPUuZ𲳶{+iD ɻy>Z4އvJr&zHmhX~EjuufmyDO azht2- ͤ 'Y@qnSKBGiSC@tDtM$ 'P ,)/8ғJ#8|ջHL|WIЃWM/ڦS'ίƣv;KݪiC0g| &gź$D.ؗmJ]HΏ5ncp}Sy[oyV*l4$jAt ?ڳvʾ%ӼXសl꨿xu9mY-KV48SHDJM0 %O)[1npjAJ 8wі 5,FcڍWہV )&nz>,]@Z.%Ա6;Ol%[ΐS*Šr'ߝ T_i2ؙ_PH_8=;pdi:v?Q\NJs1T [$P6}ѳ*u?K8+zJ<6coSƶM*^= ^TV ^MTU-XVb0Qנmf@0Q6wj="'@rm.zGr(?,=R=L gu '#,+zHKӑPZbmNyC24 |Fsq]c£2CI [z zBť׶2!kV-"H]k-kU@}45Y?E:zXK{VuLA=KjYJp=bQExJT2̱(ïp,`ٚss^s|B)(f#s h|h>ܨf\7^sv*B8MBðd:-QEZ%z[a&H_R Tj_nm+!_,׎@s~ h@+ui-X#-ṷ7ix{NC*R0g=!"59 +ՠM61X1QmvAgkW'S+Dy^alFpIT@ߔǞ뤹6yN3܎:szjՓ$̬ yP@x=g\6뭓IF`pXqjq5M6E,A ^s8ϟ:*T&eDoWeh2_NQ}1~SWF<%{]^?sJZYFSY:'2}Kb \x8/h*Sh>}ʵ.Z$] kI/]eK^WVO|g{gUg\"a2A}5P$b*ȉ Į9?0_i!`4!1QL|BkyoB՝&g7-eq'~@feb ~uIJsE]a,ǾYՙw(X!Cj=Lдd#^X.o[&euٍ4 _vp//(qBpS=+wԙ,B:nG?{cCEOtmr2(.m>F1 2شAkwip5tm'1G&gP3:{ՁF.${`>ANjo(1G~:S 1+1:B䳞0 2:NtqiEXiğf>'_eG 9Gmed`AiLKf?6*(zHpD}pn/./dRc;at+26(yrfJ$tg8E`sX]1Gg9s~ɾ8rkzZʫUkjOoZscP97mQvǯSzP *{o^Ə\%`Sc'4Ue8[s|gN!]ëK礂d HDDO>Up!#an"L%q?Eƅ[`hoY2H}_Vebk]~eMkԾյWDWtySTXhs8ǟ<oLdւ\f"AVG,?iOqk(kNzTy5vh`R[yK .ˑyzJmt Gzv[4DKFr[q^Sv<zk:eRF9%'/jf1,NMQc|%^\t>N^G$V!95,@CY[5kTuN=Փb%o켣876?wiPmy!])۷A4w T vZy}Y<ݢp2~=ڹ>B SnTz\Jڻ^5c=ܹiyswB=whl'=~6 L}@tU{'!B/I1h\ӈC49QŮT9mI?peOoJ#k`([ }|}b?;yXo(!2ZK~8dwb!ccjzيni#͞&=OaT  (9J4pA|:1fO¹vWꞍx\ɍX6띀ֺol[s,һY0Eܭ4kw$nyKUZO跢sña?:j8]y%1c:|U~EwE604%hµ :3Y fwN I>܆"cDT T\&֦巨!}BRBLw|~諩u?JՓ~ꁣ tZ7&M*8}VJDPe}m eq⬾gia35GܠEf &vwO`nB뎋na@UAx٢6 E B&B:W<&'@k! .`VF yx1­|gVɶh'ngRj+WU7CWv0&GP똉\gf ( ֒?e~ ]wb h];qQW5YefcVv:`yម|̺\0uߴnĆuXFp=a=I 35dqgcנ0ŊmyLw$yMC"ڃb-Eo~H’stvն2, X]'+r,E _˒9䨣&0TʝZݡC[\weZoEo⥽" hO[ S_t4\ד]qRzE$n~|2A!ӲZ 6-1>VͦU[),{Hwm:q'OCY`A=)+2_=0WUn*GyY[IpV&J7VWLYwq=N[Ky@^e[M +jT< k)$D-,αzs|}s9P9C{i.*ҰxJ h7ۡ~ěwf"`ԽBA0/O|j J&챱^ġ#BǑ0[&>{+;$9\pkHiXI2/(ED~{=+70PK(^VgWQ+e` ov)hp "t&ȟzH@Њ/o倷 knA:NڍvÏH)=8%޳0 >⦝]Ęs3D:oXgSؗ R^wǵIЮAO[RURK~څwS4p.'#@gj"/'Ghf8u+P-"|;?м:I^DVhCFޚK.v`z_byWn"Gd!/ېhE# lu١_;l JlvÕxs6`S?f~h!jj7V{V61Bl%/%VV檊w:G|D2T^?)m_\]lr>GDGqrD^|wsMζrCn Qs}pb:ǴR5$|шT0XJL> 9mQ+ ْS=#l)WQúSPv wSKd$y"W_ Swx4cJ'Fst26#Z߻"@Ll /ackzUM<β`C!n}F1EDUuҙ. 7eUdd4SwJ|9rmTT薞^0*+59\}=d'ȣ5x+6T-KcF/FjxC#iN# bj1SC5XK *ot( wMG >X\|5nE8]Sep庖~T n\tQLoGe&x!@͎>ێU$s$➳u*\>7#YoD8)+:k6?R)<$|U9H@n \XkxRfgrm86k#{eFbRyY A*m:d G] y0@WLe%^3!K썳yz?`޹u=ȉأiPǯW *ɻkȗlRPW`9^kJmV'Q\tJF9RNI|LO5 &HgDp_PQfB1`o3n2* ]:mKc%GY/MݱKK~=<EVR1%)Sg֊iOɻ%yVT~&Qbku<NrN؞U;jGz"?p#ˈ&Eü[>oz\Px8ߙNE$Vh!$382C 4?Z %lVs詖l#Ïr,Qӽoqgu=8BlѤFhzw>P38LaK>8 }\0!i#: 9~QXBu9bxny!${Nz#븭 [H<hlPY j8k_A9 y**XW*=njTr 9)A~7nwGL*Zeͧ`bI~m@$Z\F+/%t1r$~ꞣwVpS4aI,%nqcs=kS {7ͻJ66<7+7ROvrdmƥܦ_ٶZO-]\IĞU:J^g-{8ynk6v\EeN~uI:R2Y"zV=۸K{WGZ7kkj{75y_?h{_!vК:#襀U/"ނ9L%>EP`I6z>$SdplئPl9|[pߐ ղZ}xZi 56dRvT 9mrL"s쏟\l4Ղ9-3QLܴ;(Qs551d;M[O ))ʑC(%NEKDM#X߈˧©|~joZOਫ਼`}k?~8&vpN~B=A'*'Ƭ dM]AnO4LO~0-v?7uĚdž&lۇQ#r_8!*JӐdrыhmv-urj13FtZp'|3SkJT po|w0UxE+ZrT]$/H $  fOsBAfJ*ʙSǫ,gM6MOmɋŌuJ,<0`jNfzvP}K;p(@DB!K{d^pZ`߬ riG6iF6ê6w=ep5K)wE[m|Nin]|㊏2[X3MgfJ <LHjw#E/#:Rۯ 7ETN\|>_YOAHbQR7글sZ/EXD'=$l~1< נ׆L 10I `\M2Jm= hN Na48iUS2ukw+p+>,3P.cx+K/'a7YOsaVjǧ[ȳվڏR1)߸л(dܥBTLF; L F@&'B^2,B7+<: x/RA_]+e >i|g7yBTLF +A]+|BF@; : :FHDB6CLASSDIMENSION_SCALENAMEy _Netcdf4Dimid 0REFERENCE_LIST6datasetdimension Ea scale_factor  ̒ add_offset  ?t>unitsradaxisY long_nameGOES Projection y-Coordinatestandard_nameprojection_y_coordinateFRHPYq (wtFSHDPx(50wBTLF; L F@&'B^2,B7+<: x/RA_]+e >i|g7yOHDR-vYvYvYvY ?@4 4*G5b d long_nameEScan start and end times in seconds since epoch (2000-01-01 12:00:00)  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+FHDBprCLASSDIMENSION_SCALENAMEx _Netcdf4Dimid 0REFERENCE_LIST6datasetdimension Ea scale_factor  ̒9 add_offset  ,eunitsradaxisX long_nameGOES Projection x-Coordinatestandard_nameprojection_x_coordinateOCHK l0REFERENCE_LIST6datasetdimension s=qFRHP.C| ("FSHDDPx(a%%CBTHD d(""_G>eS.=&BTLF +A]+|BF@; : :  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~BTLF#  M< ! @MS4 &~?zT9fR<}N=tF+ }(F=& (F]TrHnZ=c5S{2Qńck<X@SK'0-$ a)!/Pͽ+.pK5.K /8u]g(~f`BTLF 5K'rHX!).K5+ $ K # =& ckP2fR@}u]]/8g(?=+S4<yL4OHDR-vYvYvYvY   * ../FRHP]" FSHDPx ,,1BTLF(~}eF,l̷K%dT OQ!6[Do"+" 2 &]'g**&h(}'=3(+,(6' *1(U;)` G9U%\.! _n +-Hiv4H4p,oE+kA@1dYR3hx)*RHM@,Rsl!i!qN3) JKBTLF=Rh())  )  .! Oi!11+[1@,l!++( +6']'946K%p,% ,!3" 2 &(FSHDqPx(0BTLFh'j ?L mLKX I`A[Wמ`XY5$IU4úR@ߠP~ BTLF ?U4RI$ImLWXh'CFHDB.=a) long_name GOES-R ABI fixed grid projectiongrid_mapping_name geostationaryperspective_point_height ?@4 48iAsemi_major_axis ?@4 4@TXAsemi_minor_axis ?@4 4?XAinverse_flattening ?@4 4r@latitude_of_projection_origin ?@4 4longitude_of_projection_origin ?@4 4`Vsweep_angle_axisxFHDBˇyxHTEDQFanumber_of_time_bounds.number_of_image_bounds0t|2 time_boundss goes_imager_projection y_image, y_image_bounds x_imageWx_image_boundsĝnominal_satellite_subpoint_latnominal_satellite_subpoint_lon-FHDB{-Inaming_authoritygov.nesdis.noaa ConventionsCF-1.7Metadata_ConventionsUnidata Dataset Discovery v1.0standard_name_vocabulary*CF Standard Name Table (v25, 05 July 2013) institutionDOC/NOAA/NESDIS > U.S. Department of Commerce, National Oceanic and Atmospheric Administration, National Environmental Satellite, Data, and Information ServicesprojectGOESproduction_siteNSOFproduction_environmentOEspatial_resolution 10km at nadir orbital_slot GOES-Test platform_IDG16instrument_type&GOES R Series Advanced Baseline Imager scene_idCONUSinstrument_IDFM1 dataset_nameIOR_ABI-L2-ACHAC-M3_G16_s20172982052203_e20172982054576_c20172982056226.nctitleABI L2 Cloud Top HeightOHDR-vYvYvYvY:   *|1Q d long_nameEGOES-R fixed grid projection y-coordinate north/south extent of image LDIMENSION_LIST pOHDR-vYvYvYvYQ   *|| X long_name9GOES-R fixed grid projection x-coordinate center of image :standard_nameprojection_x_coordinate unitsrad axisXx OHDR-vYvYvYvY8   *|Q b long_nameCGOES-R fixed grid projection x-coordinate west/east extent of image LDIMENSION_LISTOCHK |0REFERENCE_LIST6datasetdimension ĝ'SOHDR-vYvYvYvYo   *y? V long_name7nominal satellite subpoint latitude (platform latitude) +standard_namelatitude @ _FillValue  y (units degrees_northkLiOHDR-vYvYvYvYq   *y X long_name9nominal satellite subpoint longitude (platform longitude) ,standard_name longitude @ _FillValue  y 'units degrees_eastIUOHDR-vYvYvYvY   *y b long_nameCnominal satellite height above GRS 80 ellipsoid (platform altitude).FSSE]6CJOCHK Cstandard_name height_above_reference_ellipsoid @ _FillValue  y unitskm^FHDBˇkUnominal_satellite_heightgeospatial_lat_lon_extentoutlier_pixelsominimum_cloud_top_heightFmaximum_cloud_top_heightYmean_cloud_top_heightlstd_dev_cloud_top_heightynumber_of_LZA_boundsnumber_of_SZA_bounds&algorithm_dynamic_input_data_container!processing_parm_version_containerHFHIBˇ FRHP (F@VFSSEHA4uBTHD  d(5FSHDPx(4%%QfFRHP$(FفBTHDd(V+7BTHD  d(V~"{FSHD$Px(h4BTLF7L @&'6$,,B^2,M#HO Cx/RG? o  +e >+ i8 ļL _}p9=L\cpBTLF 6L\+7 CM#p9B@+ G? L 8  ,R FRHP(.q?BTLFJL 5$szC4/ B< KJ#HO.x/RbSfDj +e >i> y۴ 8 ļkGL_}m7=u^yg@G(MK\cEf~@nG-RBTLF 5K\+J.J#m7L 8 Dj < / > (Mu^Efskbg6SSOHDR-vYvYvYvY   *| 4qXFSHDPx(zŎBTLFKL aT57 o)6 {\/KlBGJO]TxWp TӸxKC)JNMT$IBTLF KaTT T]TKKGJJ7 6 l+OHDR-vYvYvYvY   *- o long_namePnumber of cloud top height pixels whose value is outside valid measurement range 8 _FillValue  unitscount X coordinates7local_zenith_angle solar_zenith_angle t y_image x_image 8 grid_mappinggoes_imager_projection  cell_methodslocal_zenith_angle: sum solar_zenith_angle: sum t: sum area: sum (interval: 0.000280 rad comment: good quality pixels whose values are outside valid measurement range only) where cloud6FHDBT long_name,geospatial latitude and longitude referencesgeospatial_westbound_longitude  ĝ geospatial_northbound_latitude  "SBgeospatial_eastbound_longitude  Dgeospatial_southbound_latitude  `Ageospatial_lat_center  ZAgeospatial_lon_center  ϶geospatial_lat_nadir  geospatial_lon_nadir  geospatial_lat_units degrees_northgeospatial_lon_units degrees_eastFHDBVvqflag_meanings good_quality_qf invalid_due_to_not_geolocated_qf invalid_due_to_LZA_threshold_exceeded_qf invalid_due_to_bad_or_missing_brightness_temp_data_qf invalid_due_to_clear_or_probably_clear_sky_qf invalid_due_to_unknown_cloud_type_qf invalid_due_to_nonconvergent_retrieval_qf1percent_invalid_due_to_LZA_threshold_exceeded_qf  \;>percent_invalid_due_to_bad_or_missing_brightness_temp_data_qf  6percent_invalid_due_to_clear_or_probably_clear_sky_qf  ^>-percent_invalid_due_to_unknown_cloud_type_qf  2percent_invalid_due_to_nonconvergent_retrieval_qf  #_7FHDBV _FillValueDIMENSION_LIST long_name+ABI L2+ Cloud Top Height data quality flags _Netcdf4Dimid standard_name status_flag _Unsignedtrue valid_rangeunits1 coordinates+local_zenith_angle solar_zenith_angle t y x grid_mappinggoes_imager_projection cell_methodsHlocal_zenith_angle: point solar_zenith_angle: point t: point area: point flag_values number_of_qf_valuespercent_good_quality_qf  ?)percent_invalid_due_to_not_geolocated_qf  FHDB&KI _FillValueDIMENSION_LIST _Netcdf4Dimid  long_nameABI L2+ Cloud Top Heightstandard_name geopotential_height_at_cloud_top _Unsignedtrue valid_range scale_factor  C> add_offset  unitsm resolutiony: 0.000280 rad x: 0.000280 rad coordinates+local_zenith_angle solar_zenith_angle t y x grid_mappinggoes_imager_projection cell_methodslocal_zenith_angle: point (good quality pixel produced) solar_zenith_angle: point (good quality pixel produced) t: point area: pointancillary_variablesDQFOHDR-vYvYvYvY   *y- 7 long_nameminimum cloud top height Cstandard_name geopotential_height_at_cloud_top @ _FillValue  y E valid_range  @F unitsm X coordinates7local_zenith_angle solar_zenith_angle t y_image x_image 8 grid_mappinggoes_imager_projection  cell_methodslocal_zenith_angle: sum solar_zenith_angle: sum t: sum area: minimum (interval: 0.000280 rad comment: good quality pixels only) where cloud3OHDR-vYvYvYvY   *y- 7 long_namemaximum cloud top height Cstandard_name geopotential_height_at_cloud_top @ _FillValue  y E valid_range  @F unitsm X coordinates7local_zenith_angle solar_zenith_angle t y_image x_image 8 grid_mappinggoes_imager_projection  cell_methodslocal_zenith_angle: sum solar_zenith_angle: sum t: sum area: maximum (interval: 0.000280 rad comment: good quality pixels only) where cloudS!OHDR-vYvYvYvY   *y00 4 long_namemean cloud top height Cstandard_name geopotential_height_at_cloud_top @ _FillValue  y E valid_range  @F unitsm X coordinates7local_zenith_angle solar_zenith_angle t y_image x_image 8 grid_mappinggoes_imager_projection  cell_methodslocal_zenith_angle: sum solar_zenith_angle: sum t: sum area: mean (interval: 0.000280 rad comment: good quality pixels only) where cloudNOHDR-vYvYvYvY   *y40 L long_name-standard deviation of cloud top height values Cstandard_name geopotential_height_at_cloud_top @ _FillValue  y unitsm X coordinates7local_zenith_angle solar_zenith_angle t y_image x_image 8 grid_mappinggoes_imager_projection  cell_methodslocal_zenith_angle: sum solar_zenith_angle: sum t: sum area: standard_deviation (interval: 0.000280 rad comment: good quality pixels only) where cloudFSSEC|=OHDR-vYvYvYvY!  Q +CLASSDIMENSION_SCALE ZNAME@This is a netCDF dimension but not a netCDF variable. 2lS4OHDR-vYvYvYvY!  XQ +CLASSDIMENSION_SCALE ZNAME@This is a netCDF dimension but not a netCDF variable. 2m&OHDR-vYvYvYvY   *{NFRHP (n4BTHDd(8BTHD  d(VǁFSHD Px(//ɜBTLF` u  V.K mW Q l|TY VL  Nc+vh -ScGe KHi NyIm H7NmSh 0dFh k e!`LulLt^in Qʫq )iNP܍ XՖK䐝`i0OBTLF Vl|qSK!``` vh h Fh m mmi^iiKN Qi N X Ve K N Q5FSSE9O nJ/OHDR-vYvYvYvY7   * K long_name,container for processing parameter filenames zL2_processing_parm_versionJOR_ABI-L2-PARM-ACH_G16_v01r00.zip OR_ABI-L2-PARM-SEMISTATIC_G16_v01r00.zipOHDR-vYvYvYvY   * [ long_name<container for algorithm package filename and product version[QFSSE]qs2OCHK ualgorithm_versionNOR-ALG-COMMON_v01r00.zip, OR-ALG-CLOUDCOMMON_v01r00.zip, OR-ALG-ACH_v01r00.zip +product_versionv01r00H1ZOHDR-vYvYvYvY   *|80  long_namethreshold angle between the line of sight to the satellite and the local zenith at the observation target for good quality cloud top temperature, pressure and height data production 8standard_nameplatform_zenith_angle !unitsdegree 5boundslocal_zenith_angle_bounds<^UFHDBˇ2n#algorithm_product_version_containerlocal_zenith_anglelocal_zenith_angle_boundssolar_zenith_angle  solar_zenith_angle_boundsW! percent_uncorrectable_GRB_errorsh"percent_uncorrectable_L0_errors # cloud_pixels4 FHDBPinput_ABI_L2_intermediate_product_CRTM_cloudy_sky_radiance_band_16_profile_datanull2input_dynamic_ancillary_NWP_surface_pressure_datanull5input_dynamic_ancillary_NWP_surface_temperature_datanull8input_dynamic_ancillary_NWP_tropopause_temperature_datanull5input_dynamic_ancillary_NWP_temperature_profile_datanull?input_dynamic_ancillary_NWP_temperature_inversion_profile_datanull=input_dynamic_ancillary_NWP_geopotential_height_profile_datanull2input_dynamic_ancillary_NWP_pressure_profile_datanull5input_dynamic_ancillary_NWP_surface_level_index_datanull8input_dynamic_ancillary_NWP_tropopause_level_index_datanullFHDB^kGinput_ABI_L2_intermediate_product_CRTM_clear_sky_radiance_band_16_datanullOinput_ABI_L2_intermediate_product_CRTM_clear_sky_radiance_band_14_profile_datanullOinput_ABI_L2_intermediate_product_CRTM_clear_sky_radiance_band_15_profile_datanullOinput_ABI_L2_intermediate_product_CRTM_clear_sky_radiance_band_16_profile_datanullTinput_ABI_L2_intermediate_product_CRTM_clear_sky_transmittance_band_14_profile_datanullTinput_ABI_L2_intermediate_product_CRTM_clear_sky_transmittance_band_15_profile_datanullTinput_ABI_L2_intermediate_product_CRTM_clear_sky_transmittance_band_16_profile_datanullPinput_ABI_L2_intermediate_product_CRTM_cloudy_sky_radiance_band_14_profile_datanullPinput_ABI_L2_intermediate_product_CRTM_cloudy_sky_radiance_band_15_profile_datanullFHIBKzFHDBJ long_name7container for filenames of dynamic algorithm input data(input_ABI_L1b_radiance_band_14_2km_data?OR_ABI-L1b-RADC-M3C14_G16_s20172982052203_e20172982054576_c*.nc5input_ABI_L2_brightness_temperature_band_14_2km_data?OR_ABI-L2-CMIPC-M3C14_G16_s20172982052203_e20172982054576_c*.nc5input_ABI_L2_brightness_temperature_band_15_2km_data?OR_ABI-L2-CMIPC-M3C15_G16_s20172982052203_e20172982054576_c*.nc5input_ABI_L2_brightness_temperature_band_16_2km_data?OR_ABI-L2-CMIPC-M3C16_G16_s20172982052203_e20172982054576_c*.nc:input_ABI_L2_intermediate_product_4_level_cloud_mask_datanull2input_ABI_L2_intermediate_product_cloud_type_datanullGinput_ABI_L2_intermediate_product_CRTM_clear_sky_radiance_band_14_datanullGinput_ABI_L2_intermediate_product_CRTM_clear_sky_radiance_band_15_datanullFHDB{7time_coverage_end2017-10-25T20:54:57.6Zproduction_data_sourceRealtimeid$b59c6a09-7962-4149-81f7-7002b8d9ee50FHDB{ 3Biso_series_metadata_id$4571d650-b00c-11e1-afa6-0800200c9a66summary5The Cloud Top Height product consists of the height at the top of clouds. The product is derived using a physical retrieval composed of a radiative transfer model that calculates clear sky radiances, which is then used to compute the air temperature at cloud top. Product data is generated both day and night. keywords"ATMOSPHERE > CLOUDS > CLOUD HEIGHTkeywords_vocabularyTNASA Global Change Master Directory (GCMD) Earth Science Keywords, Version 7.0.0.0.0license@Unclassified data. Access is restricted to approved users only.processing_level7National Aeronautics and Space Administration (NASA) L2 date_created2017-10-25T20:56:22.6Zcdm_data_typeImagetime_coverage_start2017-10-25T20:52:20.3Z timeline_id ABI Mode 3FHIB{OCHKy + _Netcdf4Dimid 6FOCHK + _Netcdf4Dimid عdOCHK7 + _Netcdf4Dimid .OHDR-vYvYvYvYc   *|Q  long_namenlocal zenith angle degree range where good quality cloud top temperature, pressure and height data is produced LDIMENSION_LISTr[OOCHK l0REFERENCE_LIST6datasetdimension #LxOHDR-vYvYvYvY   *|l  long_namethreshold angle between the line of sight to the sun and the local zenith at the observation target for good quality cloud top temperature, pressure and height data production 5standard_namesolar_zenith_angle !unitsdegree 5boundssolar_zenith_angle_bounds]OCHKd + _Netcdf4Dimid GSOHDR-vYvYvYvYc   *|.  long_namensolar zenith angle degree range where good quality cloud top temperature, pressure and height data is produced LDIMENSION_LIST}:OCHK l0REFERENCE_LIST6datasetdimension WKOHDR-vYvYvYvYK   *yp P long_name1percent data lost due to uncorrectable GRB errors @ _FillValue  y E valid_range  ? "unitspercent 2 coordinatest y_image x_image 8 grid_mappinggoes_imager_projection R cell_methods0t: sum area: sum (uncorrectable GRB errors only) OHDR-vYvYvYvYI   *y. O long_name0percent data lost due to uncorrectable L0 errors @ _FillValue  y E valid_range  ? "unitspercent 2 coordinatest y_image x_image 8 grid_mappinggoes_imager_projection Q cell_methods/t: sum area: sum (uncorrectable L0 errors only)TOHDR-vYvYvYvY   *P I long_name*number of cloudy or probably cloudy pixels 8 _FillValue  gWBTIN@,P oh())  )  .! Oi!11+[1@,l!++( +6']'946K%p,% ,!3" 2 &(BTLFl!++( +6']'946K%p,% ,!3" 2#RSRl!++( +6']'946K%p,% ,!3" 2 &(OCHK. unitscount 2 coordinatest y_image x_image 8 grid_mappinggoes_imager_projection  cell_methodst: sum area: sum (interval: 0.000056 rad comment: based on temporally coincident intermediate 4-level cloud mask produced by clear sky mask algorithm) where cloudPLnetcdf4-python-1.7.4rel/test/CRM032_test1.nc000066400000000000000000005251041512661643000203760ustar00rootroot00000000000000CDF  longitudelatitude=rgrid*gtime  ConventionsCF1.0sourceARPEGE experimentDA9  longitude units degrees_eastlatitude units degrees_northlrgrid compresslatitude longitude standard_name(atmosphere_cloud_condensed_water_contentWtime unitsdays since 1961-01-01MSLP  standard_name)atmosphere_cloud_condensed_water_content long_name MSLP PressureunitshPa cell_method time: mean grid_mappingrotated_stretched_latitude_longitude rotated_north_pole_latitude: 40. rotated_north_pole_longitude: 12. stretching_coefficient: 3 coordinateslongitude latitude|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||netcdf4-python-1.7.4rel/test/__init__.py000066400000000000000000000000001512661643000201560ustar00rootroot00000000000000netcdf4-python-1.7.4rel/test/filter_availability.py000066400000000000000000000012371512661643000224530ustar00rootroot00000000000000from tempfile import NamedTemporaryFile from netCDF4 import ( Dataset, __has_zstandard_support__, __has_bzip2_support__, __has_blosc_support__, __has_szip_support__, ) import os # True if plugins have been disabled no_plugins = os.getenv("NO_PLUGINS") with NamedTemporaryFile(suffix=".nc", delete=False) as tf: with Dataset(tf.name, "w") as nc: has_zstd_filter = __has_zstandard_support__ and nc.has_zstd_filter() has_bzip2_filter = __has_bzip2_support__ and nc.has_bzip2_filter() has_blosc_filter = __has_blosc_support__ and nc.has_blosc_filter() has_szip_filter = __has_szip_support__ and nc.has_szip_filter() netcdf4-python-1.7.4rel/test/issue1152.nc000066400000000000000000000140501512661643000200420ustar00rootroot00000000000000HDF  (0ùOHDR " xv3SEOHDR 8 !  ? +CLASSDIMENSION_SCALE ZNAME@This is a netCDF dimension but not a netCDF variable. 10 + _Netcdf4Dimid 4kOHDR     *( LDIMENSION_LIST$]pOCHK( l0REFERENCE_LIST6datasetdimension 3׀OCHK E_NCProperties"version=2,netcdf=4.8.0,hdf5=1.12.1h5YOCHK Omissing_valueGCOL-1.e+34f netcdf4-python-1.7.4rel/test/issue671.nc000066400000000000000000003610301512661643000177720ustar00rootroot00000000000000HDF  `OHDR "" ="#>#numRows=TnumCellseU  soil_moistureV[!2FRHPd#I <( (xh}BTHDd(J=;0BTHD  d(N=lBTHDd(C_NCProperties version=1|netcdflibversion=4.4.1|hdf5libversion=1.8.17BTLF#( țBTLFa3,j91P_VQM L-F( D'!$z?5[%bL5Wc,l3-9Jv.s)ݲ1~L2+,2 k#<7: FBTLF 4J0z?80!7X.N_ 3A t" - m 0WQ9J3042L3,12H+6CFSHDPx(T99BTINL3Hc<F L.#ERLP_VQM L-F( D'!$z?5[%bL5Wc,l3-9Jv.s)ݲ1~L2+,2 k#<7: FBTLF= X.0-17b L+:z2L1? T0cdz}#(<g\m qLK| D)BIF: [9Vny0 L/OVL6l>4; 2!|, L&e!7 s$啽1-3A ѷ;BTIN s)GPXCz?80!7X.N_ 3A t" - m 0WQ9J3042L3,12H+6CBTLF 11= ; 2!m 1" k# s$| ,% L& D'8 D(| D) L*b L+ L, L-F L. L/ T02L1~L2L3L4bL5L6T7jL8[9F: >;(<}BTLFt" <0tFL3 F L*4KjL8MJ0R2TNnZT7^8 D(Ny_2bgdm 1"+d >;mIi- 곁H+ML4gD_ Qń4078~t L,΂N| ,% ڡ0W/`IA ѷ;FSSEI $WvOHDR \ \ ! n 0CLASSDIMENSION_SCALE `NAME@This is a netCDF dimension but not a netCDF variable. 3164ԚxOHDR RR! o 0CLASSDIMENSION_SCALE `NAME@This is a netCDF dimension but not a netCDF variable. 82[OHDR (\ R\ R   deflate\ R Y|Y` cKBTLF BXBB@=Y'/6L, 9 ` xOFSSEZGDLGCOL=T=TeUOCHKp1 tREFERENCE_LISTdatasetdimension VV͇OCHKH tREFERENCE_LISTdatasetdimension VFHDBY]x _FillValue valid_min valid_max'standard_namesoil_moisture_percentage long_nameSurface soil moistureunitspercent quality_flag_soil_moisture_error, soil_moisture_sensitivity, rainfall_flag, correction_flag, processing_flag coordinateslat lon scale_factor   #< _UnsignedtruecommentSoil Moisture (0 to 100%)DIMENSION_LIST^^FHDB"Gn qa_percent_degraded_data  qa_percent_missing_data soil_moisture_granularity#Along-track/across-track swath gridsigma0_spatial_filter_methodHamming windowFHIB"}ytwb0FHDB" @rD start_orbit_number y  end_orbit_number z  orbit_semi_major_axis  orbit_eccentricity  orbit_inclination  rev_orbit_period  equator_crossing_longitude ֤[equator_crossing_date_time2007-06-01T10:20:28Z subsat_track_start_lat b subsat_track_start_lon 4 subsat_track_end_lat 1 subsat_track_end_lon A qa_duration_product ZFHDB"d gprocessing_level02 product_typeSMRprocessing_typeOprocessor_major_version1product_minor_version3instrument_calibration_versionxxxxxformat_major_version10format_minor_version0 granule_nameCASCA_SMR_02_M02_20070601105400Z_20070601123259Z_R_O_20090320155427Zparent_granule_nameCASCA_SZR_1B_M02_20070601105400Z_20070601123259Z_R_O_20081221060941Z contentsnrcs native_product_size }production_date_time2009-03-20T15:56:18Zstart_sensing_data_time2007-06-01T10:54:00Zend_sensing_data_time2007-06-01T12:32:59Zpixel_size_on_horizontal12500mFHDB"ÚXhistoryFri Jun 2 17:27:15 2017: ncks -v soil_moisture W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_20070601105400_3193_eps_o_125_ssm_l2.nc issue671.nc 2015-10-27T12:37:03Z - Created by EUMETSAT;reference_urlhttp://navigator.eumetsat.int/discovery/Start/DirectSearch/Extended.do?freeTextValue(resourceidentifier)=EO:EUM:DAT:METOP:SOMO12 wmo_filenameeW_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_20070601105400_3193_eps_o_125_ssm_l2.ncdata_format_typeNetCDF4 Classic Data ModelnetCDF_format_version1.0producer_agencyEUMETSATprocessing_centreERF1platform_type spacecraftplatform_long_nameM02instrument_nameASCAinstrument_model1FHDB"ν- _nc3_strict  creator_nameEUMETSAT creator_urlhttp://www.eumetsat.intcreator_emailops@eumetsat.int institutionEUMETSATlicenseCopyRight EUMETSAT 2015 ConventionsCF-1.5Metadata_ConventionsUnidata Dataset Discovery v1.0title?ASCAT Level 2 Surface Soil Moisture Index in 12.5 km Swath Gridtitle_short_nameASCAT-L2-SSM-12.5summaryNAsource MetOp-A ASCAT referencesEhttp://www.eumetsat.int/Home/Main/DataAccess/Resources/index.htm?l=encomment7Search for Advanced Scatterometer in the references URL keywords)EUMETSAT, DATA CENTRE, EPS, ASCAT, NetCDFNCO"4.6.4"TREEN?\ Rx^tIֶ$Y03333NadffF-:7#yrlKN몺nGD/~_/~_?ĸG<)EvxI{4a{4!q;yN=pxMq?I(F{k?4Vrټv@uY[ůf3CFMh)w0VPaqYYᮀ۸Y2LL!-w|sa?ǵ T %c[ (  "&tC@m6g . 1_y^)fWƫmouHQscZu\rj]{aтu 9 7DCx13G--RY vII8:DͥƽgbȌ#~s6}|3\(-vm*OJ f"+*qٹ{aO߿G^#1ȧ7GTi:\jO!Qo GpZv, 7JGyVS)tN&C;tYmooVIR!iH7|XR {M`eg,E"?Xߡmq\{ K|Kv;jFGp5pN訴+)E^p,#8jďL[VC>#~͙!_GO29{N]!m2+_,N4Y_zSH|KiJK!^(U C=rYØa7oo3l~UN. 3Kŗz)+CUEdSJM$y{u:oX J!bf!#/ AR1PE:,*& bq?}YjnO|*>)0gs+SS5 x&/2+F{Of.k~Rc64L1߁ͭn(Ovwa C\O=!u9tupv ћ{,G{x_SRGrh{?Be϶ꉕZqcL2zCjMMWjG巜šp#lS@3}~%``3S\E7=bk'7;J(:'~둅f*39®a[;Cc*QS\V#L99nb4KBS|pAv5,`p%qqq5xp8^ry*j .zg4 n1h}vJ,v? {-]On3Lؖ׮blǘefwɛe4Zҏ(ǔRF ۮDC^?[5]DH4%إxۚ>LiV1#1ԕ10_X3T`힓!pLo!9I[+\nv5G=^ .3g@`uoGoO=m!1+ ?2J` >xk@ AzSӯ3a?ƨP0t}F fvK%Uؐ޹c^>wZ" fr0c2Zz<ByIwWf4ӯ$ 6=u I~6I\qp?u=mc \h@䧹w~؏OS㿬&%q?/~_/~?@]nvVB}gSRzELNKNL` ܌cw&kVMn7%֏?s?;y:K3boH@sy7{2y<)X#S˞'a[ $ YkiHGX,I;rc>s&%}z b C%CYi ~r؎Пv]00 }eYR~+U ~@.{K̓F[yw|tekQ\u \Iv9w4L>d2RTm_Hs+`=LvcJX}T?L{9^O| إL^050Jyʟ5D{Vπf%Byi+=L7f>]I~3uZbUcGI\N30z^#TͦZ!?򛯌OA)N >fR׭Wxڝg*Sj -JVn &_?lҮl2jHYӮ^i텺A}Mk9l"#fFH FjЄ,ױ{ zjyfW2a^" ݽJj}[ U:TWKjV '0,pwH`s7:i9s MP3졟ִ{oѮwufzlݾapdkWim=bmۮ-`ŕڧUl{]9 `naǃvzVY!.dw,uY#\]66GGJ (oh7u\Uk#OZk}3-@a"$Z& >m46_ڒcy;>m2kZ=}XBn d[n(gm>- k4 {zɩ7 U5ŝߵ®o.;ZM-Pˠ.03C` ,xnCڥqsLb5YS+⎥Z bLMߨr3kpT&g}@ߪŒ$;v 3!%tj#ΆiNR*딹h-TIJx5ZUZhe:y:zlO7:]}{e$?؜Q˓՛c;[žj.6hKcԆhf6a:jV6Bm'{Z^;+7wH/ w8Rf_MPopqSm4 f-7j) 8ZD=kRN{VURE) "%g[.?>?cSN(` Xs5ڽn5L TN,E\._uRM䖳HCi(5BUKCf@+s;X[im)\ۭNQ)URNf}E]D$^&NjK٫7}pkw kFfݲϹ)W6kG 2j鷴er'vU;N5Q,*r5%R}j9=ܸ́謝4K0uuȧ?T;))t!H}lz0_@( P)݌f+}kɹBY7-zKY\*)&zQUYaD=H0J(Ůvm|:*{%{zTjgNY۳߼&QcٯSuM)$zìo#6Ƞ3@bWˤ;}Cd['Lu3 QH㘪Ζ #ɬ.7]LѦYe{}vJWl//ƚ U=-*=k'aG I6 OUn"Jy)4Ng\a:%eSS^4;]Im55/ 6:ah$,W9zJ\w|~ƒufVPV,a>ك{j}KJbU5Al0A w'ryB sJV[0sZ>fUF%cz_MC&Ga&71xP9]j5s+I o"  NY3ߑdU5D0xY`.Z^+ĜP s$j.\HV"31uDI>yEjZ!\qv,aث;_'KYq?}&:l:&BN4}^?倱Ͻǘ(= ~DJa9K6c򕸚L! C?.&3lUV)rl1,a.?:ӑCTKf`){)wq'_yMdkyLvERATKXVIMXb;mŜ^_d9j/QhO=|fYCt{ޖN D:ŦXN(AOBt*Zd~&x',Yl+ ʎi+r #GG0!8{ҳ5dIfѣb\ 9pKdw]BsA_t5n$^Hɖ&b8E&ࣈSv i[36 PTޛZSf!4h+|7))=U{8fZqyHʀ[M6DER[RxZNrQkҰٙ^a#GKK)~2t  _~ /yyx{3PsiyXƈZ=9Hd1Icl6wU.@J e±xogA]ďS Jc)u|\)QDb!;3fK"Ui~Bo2%^}EOĶk!wbP1}F/~_OX?!QcTnrIMTf(фS~Ofz:\)s$u4xߥ::W^S9_B meF|7KPXEd$hB\ptP%*W}Yl^zpk1̗ #^CQ/10H6j,$R*hL|1LPO=$e)BHw@[ASHszOv`ju_Ɔ;pec`jr̞jf@ufxĺjćd>M_| $6-ALM~ d;4 }G5O0 #"#(n0:s2?&X6Ɉ$ WNi &OYsBCz&qZ0VmW5%Y. Ə?,4$Fc.q/KbxWWbe]/xj+`Z=ߌ&lnטgRA RE9Zz&Vn&,D"OL- H~8lxۗiHoT4: $6H-FD|K,pYKMjM!ϓK &KZB#bQJ\P3B (bJR]X Y"ےLKGDwLLJP'c1 ß`SEG- A*'$[z [/$໑>^b겟9CwcDf\qټsNIm#q`׳yp(>V0>'7<4q v ssaxd.rh,w7t.-0W LcdײMC>L.Z;)nѥh*N QC @D5=KD"LVM@QODD-$kt>,r|dy}ZʎU˰\U\DHN#91IEg󯨒1E7RZZU =*+C> G*&&R>h9 ȆcwihH(%{>Q$Ї4M?I{ &=ػDTێGTV&3zf[I ŗQ3&(1Du<5ƶ2:z]3+H$Om!^V_j~2tE,JI!A>Vo=hq#``j[tt8- .%_[{ C_ 6:9^`LHA`: H&EsPNlG0A8:wA4zO)hKƃY@_H@$^J&yxWfG_DGu#:x&=I  ;/qe/戠?X1 ʠANv}f.;>'8CTV-?;6a^(̢A5FsU2".a(§v#F,"xj9a=Dh9,k,C)b/?6 |;KƬރ- ̋DbpșHZ ИΒ=b&QoAg0\XXOdz1d񖗽!;K_$?1PHe~E$!1*{o'*So3t|J65뇷"?bPpåW;EMrJ]fzR8)]+`O|U-Tbѭ};wF?Gd,phJ___ƬJp .1Ho|.!SE0) EiV]prb^M\hP=@&ȷv=x ]BSthKd3 (A!uPtsSV#}jz8_ E4s6r+FJcWdq,@p Dr<ͮ<w@:hL!wh) &/Fe 4NA*S/;OYؑo:3 s8d ] (@;\Ydwb564&B6v0D dzc_c)ub|$%9ev4:c 4mk``v @XmR@JgDDMVlj  ")DK"Ñ#- @M!=PPhR,<n" $coAP}H~0}$P9 dg?K%Q-޳h?8N#Pv&{i8%QhE=q%j}F[x$~%U_cg B5b) S͝WwhHMO;NNa{#`7h8rW^Db4F:*T ecv:ƛDY5:B@8 1>h4! C(-hr6~WK`јXֆ QTg}q."ʏM 1U+`6wry/}*VkDS[ u_evP. YN爎,|QXg~lm_) @*B >LrT50hɀQ8":qz"`:J8UhzՑ%>?zh!/>"+"Ց 2 (q>:H'v?f۹wZw5k1]Df0Ut3E`M`7F)$5x7t@nǢDt%H)@ ^l;.]h}A~XxW"tGa/Uwj--@A t4ӄx[z"7~;RexZǼxyy_w&>ܯn*CNI@pH;'KL 2<h(`"5ZѮk5x"d)'҅}c4·*ӥ" zWG=^^qzϏ>|ׂ}-v&eW#жX+8v!*%#ϙ=͋xpJjF_ӡT VDZ6 eo#a=t2f2Z#5CњU+KeMp}`=P%h2T?GuWD(;"M#9pl.ҙļHM`IEX$ }}aGz;6'[`:R&Ao,r0/>h NśGG^-rEDkdaoEyŪ J/1[2yH.8Zi2 |VE|Flj<{䈝 Dm&Z Y(ci+7WT!y,PZ вX4ist^CƋA|=̏nZG}=A%qP]F@=/*"sl !9`5>%a57ӣ.jXdmY"r/!1 ?򁅡е߀K"yP8T\GoSq .O$j|@ќ6$LL'B^^h1|z7 ~vt£SJC[G _c̰rて ˛"cgG.dh~8 @s1 < y?-VVH @6Z: GɉnN?̑`f͊]ЈLv&E"3}{lb!mVN4BJ iJ}0^Y<`b^=E#iT0'sx?#(CČrgaxh'/BaAxZ4v솛"#(G/`P)_)D7Uc! CŬm0(AӠcЩ9dK懏á?bYf"cy~GB=΃a ބ;y0 ghVTFB_ڝc|I":hS$G,-pHŁKC&X þOXE|?=ZAcpXGmi/YLu4&Q9<mAJh8=z+A*"ŐH%:D?qXy}cj_ *'F{wl/\!4-Tr+E{߫!Do"vD:ꬒhّnb6 +H)?h|M{wl_*h q3"9aj`s6ov^Ma@(zs 6{ ڍr~VrcG02)DM|WQ]ow%}ldg ڋhhͳ ໜ :`)}czJ$Ϡ'b['9݇. g""1(Zc=>e4mQOf<|$ˣq?Xhv,=k\_\L@LRNEI9(<bQ%VctJ7B.w/ZlF:AITaE?T1:BW,._pl~J,+B]QDHr0h42]l?"ˀ+TUرE8U\s/BW}yx y"㐃PUz-e\XT%V@ 3F@>FI rBl`@$2'ǣ U;d=XM:BJ/fm[AU1W'$U(uǨUŸ%}9WP@(<DtwDhF7/Pi,DO}CW`zA q, 1C<䃂\m9 `q;=q }K"wtYMo'@:h-F+~WZxP  pP竁c~= N0݃-IxށªۢƿA"F v'}! mBasMgG)^qM\VM(XmQqL@,T(C`TUP]O> t)>w=L3Pg:&<Į#Ϡѭ^z^wh !ѩ3MH8C3";NBG1b@][pYX}rPUZtrt-H9 #ЭeߨqBhN#HLԖ,[,u_ 4VE^kr7;~orjꔴ 'sc=}o x=Ïjm_El$˾])6:Tdgr'2 Jmh06r<4ϟu%yq-"wso_^U ^sc stn^!0ˈC#5q ZFx^@vb۸WgIvIm4CU Q4S\Fn?C#ߐlV  R:sŏ2Է# OF]2KE?o h '2=$LݱBc::u<v_fNBD3"֙4l]>N5e13 @$摅-ܖIFyɴT)*f0"S7 s.ݐz@"a\E/êֿE⥈xus5r9Ļy)\6܏z)v>@^ŠRU|svp?͏|! י ω{FNQh?p`!d_% .nHPrJ+h/ ݑ59wP<Ϯ&'l3?ԩIM8cKkR/yWXE jF&%2H外a@z*=Vr [J憐q5!w&cx~@QdN44E$Pt7ɖQ7=Ⲳ!6U xꍾ'e#8!gUd5=)"IT3if?PHW$#YA7Aa/,TOz}I-ao;~TWj$Fc3ՙ3)ƓDDLĪFІD}"#!%"Ri|RɱTUS#.ۗYL&h/G:xQ,F: QOaHNP1dOj!I¦ᾰ tx>* @"Uxh-Mb}O&*xi+1阒lad?@s42r ىiY6o[l@@bMz0=Jdc 0vw9I$%cdgbgd[#~9=\j3]xǾ=x rN8 }A T#9,g;3Wh*б)D SMcR0뎝@A5~<ˆ,D3"[D 'k&p.3>C֧>'ɲDulD H8 ƁhNj=d!r2D'.xK< z ntnkL;NB!$$ grn\7n$,/נ_-`Y8y}-qY3#/2:aMP1iH&E"nt3Z N ÞF<׹7\1pØtS<j Nb Cxqt,p{R%(!\rb~ŗ榳eXU74'|-G8Q `MKݥkx&,;idxi=C3w9Hh~؄+Fsd^G$\"hk:=S 6 ݆&:KM{K#pl+B PG&zcDR,pGl5QlN+cC,y8^O6}N~k6Áߡh3@XrLBP7\C &fu*L?nw<6ȼa#5oÊvԉp,;V@ݐ[N 73 q6xT;N%Lj8qX_1L!nd'G"`'78%GZ".p6O90zy7 U0 !MU@p=x܃!ѶE7"d'1#Z:"+ #pLUP14nħxQ|9M&St֏D$9*tFh B w\4esHrr˥L ,S@كסzYC3F;dEf&}'"^+. 9P~!9 pbw;I}4 7j~zcH]X#rqF}tuLGy~>Np"|}>RGk>~R~ {Lft{AHF"Gp8Q|Jud`_͂L^|$ڇGP^ 9:3՚Nf'G;Zc*֛DOAt}~ ͈wDߢˉ$!T<`[q\Xr/j#}O/<3'r%HtV(C׍/恟Bo،/KW9Nl$i蒒r.^  7"GPD!)e6 nT>H76berK$x좣+SK̑e7/K#RbFJVŒk4f#z_J(/*5*k,([( "\{=ݔf'J V[qd3,rF캟HN!f2me>GBd 0,"~ O3K#xW?9㳳ŋK"݋K@qb wEkR睄$}>7ْ>{]d39gΙ]f3Ɩ)g+"6wc-)i[R-)Oߒ\?P][RF-):9|Ks%9-#m[HoIinERKm)Mn$W%w6ŷ5GSv ~&ț EO_ <%9e@;f̬25+ Zj64Ǹ+j_?IʏERL-׭G>68 !]ZL}֊7Yvy6ҫS+~l {*42:hk-8366:vYß1l761&))KF;Z~>NS:fS׌L{艐Qx"4 12/v[,1#ًf-"陃g\1Vcw\.F)\QllGO9PL%Ow D5XfjdRY&L(SQSfCa(~;?P~Oyen>kIe&Z{diTΌN7ERj>sP?9_BjcRթmjJ&Vh}rN{0ce6I~D$\qvx9~^i[85jk;ͮ|%Ny;mU\et`xm4)w-}k?<}yCߔ44p7v67F WCyVD1oJcuOT36u}.yK*_׽ږ߶מSY7͕e,%KHd/1}(;UӾMʸ>5lrQ&g:}x[o0x2RKp?w޳˛9i|5X)l*Y*\>bLL{vsm>\yww?(proQ<_lݵ̾Zc;dom|W},;#7_zOx2JupE- (V񝫝{8K;tG&75s|ᒼ|o~_?~o /]pr Q BQޣSo'WwY_`0tTO747SE<_aO9>}5~^rwuvp㾝>\_7O1û7_.Ihߝ' :17ʷ6ꧨ)V߼oo7b]'ZgO|3{4"~𚿔T`~hupj5'_7&`s=ߺ3y[gZ/J{jFb|ݼX7;dDU CCJ/oCsE \~|W<ռyNxv{%7y /NsǺvvg_>.Z=>eMO*E l(m@es= &fvjKJEiᶸwxৡ]$`ۨvʵm-|P ^͕{"6fQdpIh;}¤l?j~prp``Tao{#s+wN Kz u=P#  +zsxz$g~"'yG /ōŃ]'|˂eBBνw܈->?՛= ;3Д ?^‡ScE- 'RhDpUţMk<n~vX׬~&#وe>ʿg;9cZ(潄CO};vgf(G9ekF[^{3G9n"&]ԴHd\{kixzVz+vOzyZ'9 -,j:ck?kpZ|ۮNWw{ׁIs9=㩽aʚ˜4TĔ3 1wNk"0uuvjսؗ!4alU4ws>/;eigmc/`[cV=te1's=mlyl;m/]=|b`>*hKga=lmdmchnQ29dFV(K7>~RA חl vQE|<=kܭe]}SlG,-͚i^Y.iK4<[X ; xnG꡻W].o)*<<*k턣ikcnL :ZGsZhPkjGZ/Kv\KkOcfOm9k筤uٺXYXSM}tw܆z >pwqasutqejL#5j 9^'ͬ66ɜXS٥RSݤZvsm2>ґnp匶1?&;q5Y$0u351m7lU2S"TPCiKF>1}N^_|YnlT[R0GOIFVŴ˜(VuE #6yv|V̞6%W.ڵ\PSeR)A^9kmhc袍2N*-9^+opkr^-uj>cku9\̠|\_(^RoXz*Uh&ͣUO)7mR?_UE%C!\Ik \s.0mG뢗wq ĭEuaʩQ/Jި_U22#5#?vNJ'jiUB5BQ0G,&rQ^K'V_꠬T)͕j y\9>TS-}ani<T/zʅ;ZG?rL1J%H5jB!TRy)U&GkH_د\ZZ:'G Lt)>e5fD8Q&Vߩ 6"/(oS)5Rc4G? k§IYdkyf]Fl} / r;9W^Hs,5\ޯRJMb#QTɧEfnpP!LW{d2ĻjeZi\S*\ov1S;RjY5 sF1|[:+6vh_A,/%->XR9p "'؅]l<' re%(A>* yŗ .4y9e` g 9UYn*ߓfćbq-CФLR}1R<$ޑ*Vz!VK@/ կ5شզ>\u&SnOAkoog 4( \OQM MVTL.VyJ<(P0iԟz%*$ 4&;pe(F`+qmI$3:\ɡ]%JJK qrJi/gVn~C7(7N5 c&|5 Iwk vT}o]J$Ssy|A(.pgzJub@ڳ5&fI"8ߑ{N B;&y c]*'6C& S^f5dTȟ+|,_=b2*݄c"v! Gꯄ#q2h7baPOj2Rjc?K dOP/t+'=h#s"Pa" \7xK Z%bfJY=Qx.~a;7N\4=w?A!'n N/H03Fp0/&|! ryJ1hʌ|l.>vh7C 2? #ޑw,W#lg?9,|n7| bmixC'#L/mKS>ap3B}Ͻbj}uOY i䕾s9_|mY+ H{mBOA=*YT }ImMPA6B*s9aA2&%&!/|HigmiJfՠE tof^RMIX7 $(|_ 5!3{: _/3 ytXL-5d[1qYE6}^‡ 資\o0Xx)Δ#d4iXF'?dtN|)W~S곳rNG}%An]h!5*uv2D-q|itYǷ>[J.B.atA! zTOc`Ԉ$V Inя33#14O]ɬzA^DwOfj}\;nߌ&(S5T44>7<׎| bVuQ'@[_mj>7Uo^fPiA@7csm$No}Ϥ*zRTg~! qn2n~*M|nS PI TF&P#+%|z&J.XE–Ӫ]i&]@M96iW(9L:dJ09PMD񣅛Rg5VN#ebrMXAcFMɲW*(a>/+/7 QHy9F% NA͸6A?F!>.Jf.B>1WR( Ά#|<̟o&C T\dNkq}5mbWr;W(ƷBRsŪ4H9MqJ,:{H. KvYs)q$j Fiv9.*k+ǎYk"w 'auuC X=h9ũRwR"t2{d2T?Jw_qrke髠$oE1=H7eaO ?2ǘ~ўٔ\R[We0ԐJre*oؚ@edAn-,D;P{2./ӏcu5_2|\+s%WfT_۫޽S$f]n|@iVo֣sG)eorNtA_cC( ae|x^|%?Tl`mznSH5bz{Hc8jP^(WLRxJp+\|c (+*eU8ӽ): #MM1ƖцoHJ< +*>]Z'SG+()Z5(yBm ;?nԋ+ϕZuvuF5m6K >/72SF)6L+ntj^P8*+Q/f},I\!ӳ\GqMw-F~RtМE&[ Dž;bYC7:Ƴ9c]sZͯpws4}GmTXk8R)_k-e01巼4>U_˘>'<)~.f3f*Z Wυ\ YIl#d>|)~hgPcr&69A]XuqF~(naqss)k-K@Ӥ |v߈%{q Evr-#Zw.;ͳ͙51rv S}:Wg[Z XZL 뫰|G"?P*T&ryrupV: /鿗ImD}F^Q%n"37ۅT.$TަQ֋aOi)R >f}FKu`..JlӜT_Q/bkD̨BUZKۯ,m-bIV71t4YA_"|LU&cmtoa0FfM34jurUڪXK!롕ts:3sKCkXF{-!v^Řgۙz& әfNZVxRRʣ՗ZV3}?X2~t%j,5 A\& +1C-ZhFK.8׼l4 55pi[!)Q79_Rj(c C Vv7(19yYidMLivE;d ˻IinP*^jGF [/B$ߑ∻1ܭ 7tNKmf7Վ s ˜4qqv\Y[ņBHl'zYG"IHhl:ތ=N,XL^H?*#+iMO 0Ueվ6kKhS\O|&0ٹ\S>-CHc+IcSDmfss{d8ϐPHg2؍M-Bzf6W4}k8/ͮAwc2g3ӍLRlz:?^ Q~ 4FתElVkiKZMmݸTM+>;ܷ^n=_;eP gSR6''O|!匵w)/U_k!sk  3-IUJk\8ΥF2,W:hzJg̈e'tU67MgeX`,4\&ů5d>1T)|IO,0R_EP,B& tمÿs3۳d J B%P3LE1D,})wrrg'R^n%oz`6.VFyؗtk:[Wܧna ]@y)KWQL-W-JzI\*ɽ<.%l`OrO)Cu(Y0ŷj.QFIL]ۉkǷBz)%P6&NR !Cqg2&W -GR7 %^0W/RuxE_g$̈́]oӺ%Ǝnq_6Nb'Q#HC|VCUK -G?dn"X%7_@@bnuk.̖~HMJX[;̧ 3P~9HkZ'D$\Pf̩D7]/EXMHE ؝#vX6˧W-\!?kvӈ~I`X]E,&: WL+#}:5U(?n4UؠrjJt\&8DU"%i/,=^V ;sˤL.{Q+pXܒJvK:lb1r%L]!b)b0,֞ !SOP?t&\86O 9IR&f~y=MEPQaH&A;vYB͎u%?3k2ţ%9JG.%+@4+L( ϩkqkB(;3%Ŭ6е9ޕVLj_L@>h[~?#}KU_%]i#VMyN-S5QI);⭩c *I dm?K|ṉ4dauPd>s7]# j\DZdz\ME:E?NJV<+]dX-->SHVb#Nƥ/7%f12.#ӣbL{znR'W\r_r7[]$lC=L^4VrI ɦTńԿ&ԳbdNsbm} YFb cҞaW&dψaO/nGgA|gv,APsMFKͿnF~](`ד(_Wfv8ޑ$qwȂtg>\\W]S_֑j2j=%^}yGS" ӚHigl]݆(kvW=q ݃Ej<-ndF JwEmQ4I[6 ^9<.N8(wL O2 b"ƤG؅egff~;^x?_3uԷ؇ZAVPtR6.Ndxڤ.5o}Ձve#q!}^NHM&'DӃ]d6:I_- F`B'O*)X4A$4ܤ:폙Sh'[*B GmN#c>0lΈ :2 5l/49y t+0m(]1;#b|5C$'{ ,5 1~OԏNHXŹEtx6b(IjTtMH81 c.ր(_'mtw2NN;]Cr%mc81-fG=Cy,F?<$ 'S3(6~&(,LTUIRB}q`œ >Ozo:EAH43G_مh;wOg$ Ji8t?F#>#k"[:O/6Bg?e9aoc#z.?C!fP Q{ء .ߩ&%4`Đ_nZ;09P0{ə6PS7z5~mG&,U,X'3F%##HF1x?֓톋PO ^>HGݠ%<OON~N@'Uj2FzC/yMؒ4"j&Ȇ}㋨ c=GGy ~n2Τ 0h!^A}ͥ  kBʂX3b-]-FȵB V4$ًhOȻDwj, 6qE~E9dё*@wХѷhۜڃO #qm@Y_& 3Rɳdi+5؎'^j>mD̬}s) dlqQwJ*5ןCc𻸉I\!hAl*B#PYQ /Afg^I،9}TzeqHOŐx/^ZEgeD2}-3۱ܱ d%žyU3y@2d <#L9%;;mW^C?fˈ;0pj?7=3o(O.'Su LĒ3蚌+~DɚG0у9O9D#k`0}>Ԧ;DPfɧda-H,{d*01hk7ّ蓐F:Fd1 O7ƒ=(B. 6ՒaRdB<ͼSjäƚ%,l4N'Xl1VlG6b3j**KKp+3zk2O1m K0G:7}#^Bv$XQP?-^‡SeqgI&R#//Z"2,~c* Vcq&೉Md 5?4@@ƞX j*X +o!'~$Rۈ,OwN*́Mp2ƕy]ʎe$jy~Eݡ4SCsJ,5n$mWIAx =A1_чFh<C{1 UĂ"I`uXcU-a5>\bY&Tr/Y=Ǯ0N6D73'>+gIGa}q&>-DSz$[ɆX>α 1LOHJ[#q /ݦ+\zX:/9ގrgC^' j _úը`RTl$6fKL{ja$_!°+Dĝ-m29N#FFP çc *K"@9x1~W~Qc3sQO8q!W$d,A]!S%lOtUd0m]-|K%ᝉ-;zAlv@"֛IߧsOM,K!]&6 PbBKm q4{wJ8w~ ?g**r=s/Ӄm ('R i6ӧLSf^&jr u*IǍhc`Kn:Q3Evt=,=IG>[2= i$tc7Sl_^=d,L:; ]c3Lc^8Z(R|MvŮH4d+o^ʤa-LI E5lv+s6qX,oGsbD(KSػl#3ur+:oz6]zOj 8ds3%]F >zU!;)O~olWzm^1h)d_s~z$ \_I߱/9DK}?bDW)E Egs=zh@M"[Ȗd?Z#h9AcN"qoD_Mݠtu*> u1]e$zKf=êq j.٘4==|]eE#ş`ʃ60]M[VO^~:>h ΡNhU2eMKcagS:+4)Ȯx~ߎ.,Ϥg9LmOϢ1؟fP[ܚ苵0f.ãa+ ?u.\8?R &1C:}_#2t]m6;&n!jKDŽ@NrI#\Qu*z،ͅiu_{`$VL |Q-ZdvFr)H%ZQI}d*}$gS\Sꐂ35 6R;+#p9Vh!mn\-,RZ)_2^'W,vHh4:y3x;&k cchͭStmjh3~1>O:Pet8VY8錡߿ᨱ@{1/SԘMB5X2~JD=|T#+1[s.~1mKA5f; |2~^om)jmxOYl<~~uoω?RϖxާG'cgx7?*Ix/j3C4~ N0>B~p)j>WdLq$ç4E/ Q ;)~o\<ۺ7G:O:R6Nj㨍s7!c8?GerVcfů%[s;g2zEDWL#~e8|].s^t}?e_S+$5g&3HŸ_Ygzrz÷͍^ :IHkt+ó-OO;3#ޅQjV}oigr LƧ7-$-_oJ}$7Vi<bCDp[/_"X<}{|SB~o)[l!/oJ~Q2ƾvd:%u/K3nemO|[|=].==S -/7&h$f7|_{:K ٰ|uƽQ۾:ÖRLlt^oGwX9[_-ӳ8.>vwXjlm]mk9!^O􎮞aǙ<.yP=1{g'K&9dfqwBNqP@nd)l T|$sΥ#?Ķ3;uiY-IϘ%2A^`S8Z}bI]>?* njV`<#Ru)0@!$fbKn1F(֑=ǿ5jDrNyA t6ir烬|F}{0>{2Q "Jwlk@ RW^.6 9}P .J(cbv| _=yvd&OU09JSaVxWÛM7#o#;.O j>- jM…̯hOt*Ruqd8]7H]LMA,a{I4LŏsOtj.NB>*ILq6+##9G~g9%| e53ia~&allY>c+EN KQ4,z2݄:/u_m_ WW.ر)tw ߛev/'O[#A4~tܟv' qߙ,PP .kܮz7{'rsYM{Cxb[vP[,u-X7}Kxֿ _ic NcVj([&X@)mx1x}~gjKJi}{k^+'pCFr-?w\%VCE; ˆ﻽ڞϻI$0j kjpFk?pEE%,*ma½AƗpE?"8q&3ü &8`~17RQ%?'N8=[Ody(l$5^;+t??]溲5BGS֊Ruܮll_ijJQ;, }Lqe֫ڰ{Gez]>n %/VتWR-vbקpat$Hn1ĕ&':mZ{ܥs0mbjV+T8-SLxEY<!`'RL/Wzڈ B|8\:!~"اޞFJ٦3ΙL+ߋYj:( .McyTw4wHces02N(}˒P&u=Zb3V,cݷ]S2hD#?j=\5ށs*6g}l+ї[Ѯ`Wj0w'#O[ 3ļ?œ.-|BTQ>bbT5w^Ga[sw՟|&U YWK^UepGT7jGswj.؟)=q{C܅jG,Vq))ӿ~cWҁ=Mw:]bdwW*AbxafyXdžr<2]F`ZאMB#t!x''*lƄBxf]ٳs,Ha;[;X+;_oeЇ*N`#) *shXS5ʿ!MJrƽM'L U,S LWn67=/W + %w.fOq)dܜ c~sg' ݃C+lv'2Ud3E V,*,-9{UCN)FhP G Be69>w(}yZ6dl~l=#<fQW4~ͼbc55:)xfyT'{|_j6qQt@H=3˳9u;UXALw[ojZat3-{͗7h=`^'E.Je ~XL$ѫK wSQ?UI AtEo36QXOwb*uӻDK٪~+F Z#;yޢ2:9zxn~i8^1PQ^pFd'3[:*;9AL%b 1664wwdHp.xUTI+Tv'\qN$e&whMdrt$N :'%"V.y4+a%#Cez)݂ݐ;E-R?{Kɾޙd131"P5??Y8^UwBj{Ŕ"KYD(ti,Lpsrv'~S׼Km%LC|?3_7C*ɇ{!ЫاrMlm+-݁Ge3)'^fI#鏏"UZyL) ]NΏϑ# Da'UlUY͔D-L"Q;FL;¼f01LeTTgxz8M @.WH}埸l\*noqE{|vg=q%* PF-a8Ϋg=;n|oIlcOI٢|MaX[) ͅoB7JkchZUuVQ)̡v=Mɵ?\]\AD]-fSٻ\Udha+M-yygqVS~(]⿣ϒ\YȃdPPw(df]SGV1;JD`cȝ 8/cllf9FrjJs}rTnY#H4)CT!#_ 16GnxU+Uպrb=BaЕlπo<O\y`MaQ~3|fURQclLxYrIk,` W&I>}bdꎷ0%֝_ c \N_<']OͮMqə;_l;63wlg QFV1RnMX*k)V]~16 )%h{UJ 'ˎ]< /I| g;#nge0i7*ZFh~ {i?H%C}( _h\}Jͦ* .JeE166ה( y>V&7꯱ȳ j٨b|@bqJ.LjҘӊ<&Lߧ҃ F]'{|Yŭn$}JA~85:ɤn)shm6B^)T+1BkF8e+䱟d5q4vgf3|#'%]IZbV -t[KO2-q SiWFɠ.N`4*IT58WJ'vOU6IL;%B&Bɦ'sg1~ mB*5G]W.{4ׅ-Էޯ rc߈/',3C٩dg{ Q~YI# mA^W saw>fچ&|燑JolFA\'R&hY<@]9 ދ_FeerӺ/G?pL"K^r5 }V;LS}zB7,wbd"1i3tcaPYF\!V# %0JV/E>8ȣ>[(^f+3)Ds"Xj2Ӛm*U4V|\SBR_#*zFH3nGiDG-`"|jz7'RyY+WjIj](X{SZ~_]%? 8{ ۨd5:yF{k-|f2sLbntL&HG$#k(} (eX9ݜE#r"B"g{K lZf E:(ݖK~SR?[s$#9}ޛݻ&!d2AgHGv۷B>Fj,]D?"wzjKT.׳+RR9tGhu GW" @lcO~D'`ӘmEFunj%s GR 9v_)IG}TG=yvv°b1BdW^S]O>Qa]?j=bKDxs\ILsM\tGQIُXo8Q(G #%ޒfțrH~xE:e3JW`Otu;+,\H*+ԝf'\J9 n}sJ{fNqP?Ə\GRQ̄Pgw [YhƖbtR~6QR|xC m?oxNg-v[Nh,nۉ)ꊪ~TҲJi?zN a't'~%()qss9Ma̮,Z]K\~@/W6MuާqPNjp͘9a'w;' L{|D;ygʺU&c+Zbm>Jx(ΕGk`mF~D/tTM7l }ؔn-⚳glIap^+5Si =Gk4,_wj'fʹڐ>hc6bLEÎgI7J+}W+>v}C\X$esz^w2:OgsDwŴr&i 1~9)N υoW`6VRccI>|4A%يKBnKi5/usD#lxR(( @Ԏ3}y{T6h Ù}SIhK\&+1ќO'I# "3eVzB7`%P/>noz _ ~t=$c|up_…ssz 9ôstSJ2/V2-$bPi:ڋZoެU'o4f0,i)7KeV%BC }tq4/ì=%~pDޭS˪OLLъ}TNoǃjۏ=RZ|Vvń(rErY^&Gӵj|^V !tN R͹-"{T,sJ+d$OQj p4qcyCOKrZe2Xv˓%ZZ*7m+$^wqلBjPr gxDI#qՉrx /$ﳁDfZb~^>eQxR4IUbF%rk|+E>*>F/BN:ɍpރ_{2SB5Pä"Pa@ i0@X(-"|{? ?K1DrR[ =Xp[%1?jΆ֒sL&7҈ȣm!T<-5ihH?YNG WF Lp(+;Wtc 롴(R$ sŅBIrS)nω $K-WNP]'vTzk :1jDuw4tMP M,]BOaYL,tU$mdB&8' "|4}r>E{&'Z;)VJŤ5lOȔ\D#\2ɤB^)ea8^ֵl`,/urj6AޓymW(,(pN啾 "i\Z:.&U*r^^'4?mܡ&b;ꅶ)F|1%7f\bT,:~kդDۏTeLN%.^:| ~.͗+avqGa͒H>q" J Cfg*u[@OJ+UR*ůT(KūriVn#oHwV8Q]4ZP2=j]G@fwW%/R;TSԒc |^j,^'Xhɸi3HA{"}uӳ2&|N3*;wjtZ^j)Tvd6˹_qZ;{lAJ&R[)uM#*JA#ג8_\>%+Swu~w)1Vc^rcxFɤPrfiIi"`kSM}2Ym g̥dum/۔\/kL.T:Jקi ]zN ~cHab\Ef-Zx.R+uRmv+G2&!RN#>Z&! |$BܭNIb燝M봡~RmDK0;K&*S) tko8۔=Gs ;{Ubh5,KxC[VOp?nU٠K07o2`ڙkiTP^z*U:@LF_0ǽ-=k##k'Z-1n㟌=F8 1U֫:)خgѸeLU6}rhXDާT{+t/謬Ӳ!#3^s; |cbV#Ze]3`*o[Fk6},_-(RSQ)PoIȮ'Pv0f }W|@b{F1nk]2 Sn\fveIn 7 KŸe=c]ω5 fFM-j{⟢=`T -FCtL[#thot)ܠ 0LpIcIxĮpCDwIOM%j3SK$@iǍk 4RQ5PGՌ{fz Z ި\H_Kx9*H*,*%P]KE5]_xx{IncfMc4^R~v`8Hm3;:P"Լn^r{gx'LaMQrk)qpw<  Km4P"&fz[ѷ0 mAE2NaiC$WzWA:uۈJ\# Nvsw?tZ\:<>Nvp%:)֋x5_-qukg!-*9+Z|g fo62 =O>$L/6)F!0ћHiYEf;JV-iox c>3[B/h;8?ǷQ7X6㐻yr 2=N&&Ir/̜gQsR"3[=A/vo]7\}[(~Lkz$v[`;LX?XyF##dM->H~A^`s*C#ytNd+3:e{;3}>JӤ+TYfc/۠u۰:f&6#dx?*"`sx}Frr}]v"tf>ȵI#co%R X){X9]_$yp\ k9,/>m1p!Cś@ p_FJ&Ռt*5ZAk4D'b=9*SાOVU4'\ [Yr زLf,ێ,MԒ&jGr)NfZhb3 fi`vt +oo=:7݉}ISf}ZSe(: Lg<Fh-Vj)ťl:]GFH6cYmě>X4Yi2"^Fk؇>`>wYL6b &e2Wj-ǭ~EY .Cp /iԉR6@/+QzsކeDu㹭.j3 ϱZQ4L/]Oԝ(;XwSyBObr)̂(=JOm*vT)/tf 5,upڨʠ6ȉNUAqFc7ߣՒƎmx[oJR50FV加QvpQ߭uQ{(5G_$4~aC8v_#9>!%|ÅQ2k s{A&&o;yt'vZͤ^ qդzeJ9xR/wVtPD˯(2#?rHa-2xZlOQuNng,1x$j3O@]1f:Kn>Vd51il:.L젎q>]^6ʽZ\"u;r4U9 ICF(?j4y 7x\²16cF "dUfOG$[IK/-s/J.vo(@,߇ xb2#z<9 43}<[?lȡnmFulD:cܕ= fNϨ,8JVj V`_P*I J9evIb7AӉAe^eٍF2Cl?2nD$ E{'ܱ"J:oC#@a]c` ,:` $0 U]`7yʨn3Q 4V P_1+m;qzXL#6#ǞygqNձ!xHtHȾd(=TcF@*'tI\g`9!tP5 `-bm>YfGٮއ6nr٣a579[M=.lpuw wq>)݆Bb&i"RB'0c~j^nFEG0)b64lRT@Vw3|:j81H݀s=p/Xsz0MYivUu}9i-, @I>`1` $׊ӨK ;=VLu8(l7N[7h3Bx'ej[_Uht njtU$XoP*mcWV =+<ߺ9K:xen:-qB:iFot1( 2jEQxD! rL|Ŭ$#jC'P;9lo#WIzwNNT#g{y=xO~O WQ玈SULJ&jBg5KōffsdUwη7OJk07#m ؁πb0lGĨ,t3Uj{ ~r1ؓG `OWSMSY±};Sz_x m @^ r[=|ItHc+l{ldU|Rt .+|aL3%RFV=.5\iiF^cvޫrCT%νyϱϳȻ\BcfWٶ,Oޢ sk|LkV24 F͍f L+qKZ&rj?\Z 9/o P*|ΚG辱lb61NQWL~UP&۹28;=p&3u+GHYm%2F _> _P!+2Rρ60ˡwDKdrEcqܬav1G5eZGNǺ ,W ]N{c Cw~F;%V%c+O}d,.pF_f71J 5ZnA[KmOFLHOrޞqZ@;'V8ИeҏCvUƎ;ˬS~Ol50ŬW]&XQe&/U2CYO|cQ28eFa4p5X<R*Ewf[ %Tb$UU,=cWPS (%H--֚(n>rjX10Ŭl' PI-:A XgzreIE6 Q燵3D`GyRmg:QKk29pOB_Ef4!r(Rn۔z/]R9l)QVc#=o,p)if=InjzR-5“op|0T!]yKF𶥿BI]r%f7C;kPVuk( Ci?46Kݭ[H̀ԨO#,m׹DnXRof& U}5>,eXIQd}u/r,r.wr#1b7EQ*t<̋{zU w ͟|,W-*PyЏj)o^u9?R餕Q>t[}TTz42E8Fߥ`SI 0Θ|E 88!!YB B.vrǎuuޡ ^+W #\L86 >ꁛ6ATh`ϰG,BMRAo\n8~m,AupImLnɰ,>4rp!ltr0 !B#t!*?w!UpD_ d{q~Fȷ  8+1p3~hVQhh۳s΁i`ԆHEYV,23kw ݓO6> bA2cpF#GG5Nm[KjY5Z}F^D0`@+p\‰ZC*aYcQ-Nۼc6Oz;̯7*5mM Z-[ }0-+qt(%RRA?̉֠i<]Jk'IAFu㜙~ (k0 F!O j>I-)_3i={-uū>J& rx1S獿T"3 Q>Wn te>F']fRX =h hmVyQfE[Q 4'6m;q<lV_h=80zMҿD0m9xF`8b]y.]}pZ NU0 ZCܴP B+=lGc9}AG δgTA:2ikG=0@,h֪zFղm zuy‘ͥ-1XB%Pa$Q}DA]߫[_@5cQU 캤hI5P,6XG?VRx",VC. J V-p !|CBm<Ƌ"9sg+x%kޜvsӴ`4hZ7m-ۚѠ1Rq2hn޵V@7`5BkM!fc0S hl0jkϚ{-§[)fjy*d5٫R.Ϙ=o5 V7\h"w卂x (WwQ( 1(yܜo3YNB'hR% SkL`u۷C)Xy|nnSkGIxx[J$5jvWe FwAfgWGJK~BM~RhaB״ZEԀim~#c,b5vZ_g 3k.WX8Ms0&h0RSWZ'*:|ή\c'7 ruYߖ^ Nj mAO91Xn&eV='K;xP5þ1a=x vc"&1hȱ\>n7{9Ȏg*ú:9vE܎lnBql`gk롓 >Q@pmzD6|6s`hfuifcQŮI7iPk<,*xZ!qs+Urjr+e\@Z+c3qЭ##;b].(=";%5 6iUZOi ҮfAW3IPK07M(`|;( <)7DZ3odf7k_]reJPB;'lA_r|ʵ=KDu~ғjUz*JYw?j}-JVGmeq+yKi8`!{ZkШF)#HWw *jIBFa"_1((sPx{"f9xG9+1Q}x0+\sa5j d 3@+"5MYcd}f(eL_"ΝgG3ȷ 71ʛ$"t6FK餁|uH{6G!‡F o=E-&ʸ{:FFF +{ٚ{N_5h#GY 5\ha=suiDz*h[zg+Μ2;H/;C`;DvdiYz @-Yײ &(yĠ!C&8xq_)*ޑzeIy3TB55dtȞveBEQktN.ʹYkձVY}˸VkŸ:ڵ*0v -xivz8q\-8(DP 7BSG9lLκv-ƙDZ5q fcEXeU6KL*̊/=nj+.h-_5x><(G "Gew%sN/X-+p_h=\o\gWa)8/WkZt L~"epj嵣 E8SU3G}u5@G*A_ycR" 7FlU&>70O&=8XJ kt4`ͣ,alp_v{S:d=cm.d1=ޑR@ˌO:Iߠy2Qb\ #|Nu1K)s=ܳ4 tdY;Ϸ-t++i9|^ L"̷^]8j7cŴpw\5/wd,Uy5MwNktb܀/q¼ ŭl izZPdo E {դ8{˾ePkSle/^V#S#Dv1zGE6v*z^ r\֞=׼06TLzQ*V Yx~Q2@;}s>}GdFR!h=)u3kYw9ŒÙ+Vd,KEG _6'e̘=-*"9.C5kŰcGNƭwo#2,nQ#=|L~)o`\DMgq`?sfT0`}CVڃfR\a } KZk +k5&UfP(xn;~ Zg1-T9ZD)~ι#.D˕2m< NG&e0Y00%G4z4iǵsO <O) #;^1>2kT)uv 襞q8Ƕy>Kl!Jѫvĝ/ 旪O&QU{0: 6g=9!+Kw]S{ɏѕb;pT/ƻ0pi kYݕ1:]0)lux6q]#F)S\b>i.쓘c5C5:LмxnC3&mz6#ّ9q⼁m_̆Ǻ1J 47ÇN^Ln:<_.DWvws=<<"gtoEVD;:fd\c.߄SFs9'7kQx ix_ `?~t;q%|5͉@˳N3V6gY3VWuzQup*ueGF4z-bpx󰜑s\m]k#F%@mtI7q|qG;`45P9ۼvzw(v%15hn⍭2V5GڣxC/?%I|3X2kyBW:d6?2Bh)|7etǭ;~aucےG4D53;goC*1]F!Xs<vŌ)7:~aѸJcSUxۘ_SiU=jKuѨg@vSǪtmuXf'מT/+1rz\M֢+F ,$,!2G\`5GmG**C?^r6A ݀X#ܜd`.>WsKk:5хqM멏qѲ/6gs\}1zm^5~Pؕ>uH튣C4;f}1i\3a0i3;{t6 g7eӇݍDgP;ͪN/06l&*:SLr1HJC= `ߠY^4c0o+c6~57!<.vԡLuQ^iQ:;wm\?w'OQrް"b e4Cw~6W+ͯDzL'o酌*fC]jM /yp \cxf`=Pg; 0Oskv\9}D19K?`4hAݣf(õ<{EC㈈ThQ35'jou<3˨iX܅]%FK_qPLf:kὣ/7,N ՗S/Tw6jg7Fh iO1o'ĸs .n^NN|+"Uewa!g껍l_dX؆5=ҽ= 'w~i4PAjEZQ 0neQOXC;`n %vॹZ,FB=Ʈf5auQ %[Y]_+L<bbxcJFyj#J%J.]-9ohm ߨ7"M%/n8O-4hzKҫgCb|Pߨ7'½s ӱ(f|OmdO7Xi /:  ?҆.e j}M`LףoFfS0Jn8,$x^3Kx6JY?FQoQi#ODsyGL3~P[ >yF| +ZV+FvmqjL jS8ʸ7J.n,b$/†ᡸJڢl-_hefgKbholщ_͜LvU=>GǮ_+@VuChSuG9Z:UԲFrY4_""i.70`'ank=`.ãQaǓu +tq{/%? e>Q(S2+ xQaI/J1/{ Όg`ڌ d1BFn-H+'D. mDi4@`AOq/gl83;1S/%C(+HDNނ=!5cqY&)&afyҞeblbK#(' z}Ncfqoy\_4pK)nKcFyPnVF[mѫ'I:q6#Nb۸{põWܬ)Z7sPYkvS!R IYq4T~-6yS.*PXX[C^=mf|i/,@dpk_nfEnI,5}]Me޳>t>e%}X`DP[CeBrk J*q_.q)LJwj D[!ql (@R'cY7sv'dG نm)^dއ!VVV'Vo_Z*-RHGqF $!l|ko~mRT΃Zs6Kk9,#\7c?mz I$dĺ{*;KMt>\xȖ}FM3dz%W_E() )E ܱLLL+-:)͔J1wV\_PiԺj¼5n3Cڸ3Mif'4c|;ь|t"NXlJ52^ ɫ\R/4@IA-(C ;9yu2%\͔jEE_7=c1z?r"m[z,)X? < bL. z4jȟ+I>-Uȑ8]\&SF!=$x.3d KZPSƁBQ 3G:xNAc[v#{@T,y +)ؚu稰1]ۥlMRUWF*RFquR(L&N}0P,tBʢWVK=ųi@N՚NoΪwvL9߸Ǚ}M+ʺ2mZ2tLjŵUX6|}iqĥbQ0[^nrk)Z+aQQ(Uh.ÎfWkC7`XE˟c}89 NWn$e | fd+[~+ R~$6QI*T)E|jp Rk& _T|dA->c,l :{@{Ca 6:7= hՖDk7R6qGQ3s]v5$WɃ7RWP$T%d KB8LU~sA]iR~5Vk#ͧȌ+ɖtn04-Ew=UNo)|*E39#m7xmL[E'e~;dtVNJ#_z!RXY,Uvu˸p57a.*χ{GFX ip#λsVa{_f0D_J"j[9.guJj%(YJăBQS*@?EMM@%.pXԼ1*x6+$E%DL$cc{'~2\33IGWYOI+8 21Z{?)٤߄)8Ch-KU( h ./dSKkmZU㴼X-iBh4:.q>e?fى+A&X-8 xz|J2-^sx([U]kXyֈNt)3(AYzC Lrx5Θk:6t=JO1mڷ:!":~yc,{ڍ3N6W)'zL?ShNRiTӞ3N,LuFPX=KB04+I~Eţ`:X7y6kcO[Lv& \6>'O'N`#@sJe ]7N^ +eRci,h{c5Qրjj72 yA~uh ރ=Z<5+7ؖĩ&Њ(c9>AYzuQyF W3$&=D 4Z-S黂&*QS9={.X)Wsij kmEµ[jWu\L~dP(PG8h@۩k1z_7u ?3spO/Te҇d<ܚXjwWhr'rb Ȕc@/9lmB'ձWmUȝF,yW(TVQxc&yE|sε䀕ݬ/SS+6I5̞es/l_qtoq>5'PVڌ7ox BJ:奼N.*Wn˿/{J*u)B6r9)T4X^ ݔHn }lKXG-f7HJ|Ok/nFVG:*}:s|n7`?dFär TR-.QiHz  >mWO4jƳxFEl=V+4Kk5.[< qd+<+I^0Gg a454}l\xqXY$%pO ,ވBB]Cx6470|&˱̱l&{||NiG$t,3YqG`[47}C6hd#,?*;Jj sBiVݯ8EhL/&P<4L/!#ϑ}l ryGKYOxgt|/Z+%7$5@ )P; {9牴@  ]|`{ϰq}j[ҍgI;zY@@}x=[PM'i!wq; ^іS;0EPHaWX$6JbKމⷸ2ϙմ@_E1X \XXTD09Qv 6 q l??.t3rXXި5fᓝ*v~tW|l63zk,QE( J YM$D)1b r^+6<_W8~\ȟ\F^I'`puw׬ra4 7p]UϮ6Q78<<] ~z=&VL>~Bbsa +ܧb#fܪjOXzn)<W^-ϬmX,lJ(%ܠ<>XسR7v\AϬ>Icb?o.פ5ӕ*Q_7P.(wg[n,Ǵ-j}+ Z~JiQWg/(/#{CWc>65}gKo#>4?Û߬˛GJ^騴\ s;,[ OEb{@laW=Ĉlv{W+,ֿ1j/bg@TunAZ zUļ 1nϮn+G>]_Q> 9.=t4 d5vXXDش})R=e\Q b V~%k+_^y\Gmjl{>?Yi]<'x|Y5OZڣZ?6.@cx0*7*TsxWGca<||̷rYϬdv{zl.?wK>-a'kr_962؂yn;1Ye=UK^a̮طl2IC氃$*wǸrJ}JIy4Տ O@ ӳ'YR'2O۸]>Qz+=fnsg')s{"L6Tڗ/#ۉ+ǚś_}or{1A&ôs!\,c,"Iq0%".Fȓc`Q+-_gxъ[5Pai72.tRTgvM1TO"\# 4RGvj ln[o=ޒof*K3Rgfӥ ӎ3[$5V 3ӛLAߥ* 7x$,k ҉Ry%(ڀRruٖH8qhdR9͔@Vgz!O}l=ĺ[{T P7>;-qIRX J!92 Ԯ\TJyE*-4qxNUa>[ ZG$'$hgnhmLcc[X0=ўԱr=F *okrYyYެXn*+X)W׈B&;_*QB RH$4`ipCL85iB +0ΰ89i${aaCk5>P~%Ev'8v'̐SIrIS3=[=KmB}扫nٳxO'u! /I~>`؟s:}-FV+7J6:AJ*=Ae^,֖+ bPhit\*Uʹ M0X_amIUuw෸7΍Jc/=I $<ȳmۂ+mj6ئ]RK(T%<@%pe]:&&2UK*˔ar-e9hԦj経FњLvH\ȍ3h=ƏͶM*vy,g>0ښ;_j-pDW)u"o@TKeA"59KO u&^ZE-hMY.Oz(d5mTS2(5a|jݬ|+όD# iՙ;0d/].$6[]uTWJ}UbVP~=e,Ѹ{/,CNV/e2 /;:P{]]^;.cunzONxU]֧jWTR/)^1鸲D>'R|BTS-.VvvVm37LhڈFHGi)3HkgXy 6-2kjpSּ:S) ~R*mJ{bziV٠ihU+2pحs)G&r %2U*jCmCz'}qԘd33dޏ=)*A88;]Jѭh*@&"t="hwv 5*6fw䨩]4L]RH fJ%ft^WViFhy]>o8H(,S$L4r"xW03I]niWa郵MSekkau"hR)Y6o}~0H=nцנ>A?/1#;<sRVcՇ&{aU?^\,={MIϏⰓ+Q-0oNzgc>Zo˝ RG5(kH^tFm` xsҡE>k Յh:gYZt SɞC2Oކ^W#>nS9\Nryc04T2ԕ`ۃA@AdOh tS{5z[:.$qR(͏м.Z0 DJ:PF\5扜7٧]1~ch#QŘT7vu:VMU;3mZ4R4险fyX?pf)۔ Mfsdh<#?Cΰ<1 d { `Ԯ Jd,Ec}>ĸrr HSKLF} YjM}Cc0ị9ڜi1ϙ8 4 (;ޒEL:c $UiK^[F[DIJ?Sƨizu0Le\6hYKOK;gTFyB9/Ҙjkk4=s z,'񨤿_C8;Ѳ5VEp Rk@m2\m55Қ 7k$` suD-VKJF$h Ɉ0_a$ uu|Ψip j8JJArԣ`8N=pMyکa56ZAԗax(&s۲:<̺gW%}Y9"V. Lv^&h7ft֞:O} ;_ס?\ÖPc- ;Boh)zY!5_m`AM1TDP(=|.w+ʹA\p"3kc?c>6g=8찴{oa1ɩc%w͵m8A4wA:0 Ojgqd0G?E <_M}a`qwdۇiiu"۩/1:i;\ct\כ;t7Rfk84+e>8]FwH3,8ϜCPl 7PpߴCG7zS"KYրmJo.c~G XBT\5g#O C1Eh7O8a^᯸>o` G4hx>~:2Z`COM;I"Am˜@ \'S>.0L{Y w~lm1SKp\z:ӄU`S8a]+j46<"Pf3b/6Ri fZ+C&ц4[<_`t8=쒓Sc.8?p< W50f5p,#O^:]qyA 4OG@j1.g'4I]&8}9VV'(mr69+0˲4;]D4fujJSG dKPd91'FXjkW܃{)6pԯ̼kN"}~0¾d{v:nۇ\v{' 5NgՎ2NRS?D $Of9A^gO)HubBlmI7N` p?{_"i 22;qb?CvA+ܮltӰ[xZ5w|YO>^&^"f4~t8>sS;v 199Ǯ:hmvhSvJ|#JPNO&9~/M\^J[w+o _p٪l,ߟ۝e? i*:\|:;wQv l2Bq$^$'z8 4UHi+w"[ }cXH) rSS+/af]%|E,ϢӼ( ({ḧ`ه{U?(='kczc߅飌F#=RߤA9Lop|K-{dF+>]:f19,R,)̙\AElVn˰L^SkID' =彿zIS1u^냒h7 _|R]A&yKuR*BVs2n T4J !t\*,+AG3(aZG9AMy;Mk0G-]Sk}:̧';87S[<$Uʮ&3ܽKU^Jݳ-ܠ {T@ Մ*aY%7ZR塼NQ+dD)6Uk7̸MVj4B()bF,XqUO=P_Q3bžu>N#'3*Mf, ;bA-A>S}躲Qqzd-^32RR[qf70g/J@Q}Bᅮ,4̬ݘWC>1M"LOˌLl,"JDX'V-V$55} RW)~]9#A%#輭 Nn-}G< G.e u2bna LF@ s<. \PSb!qIH.)rcO<"TNn=4T)yV@hc'=qc(K@$t{@V؎/ sRr6P8O=CQ'JXmЌjp@Ij3EJ-l1RW陾Jv@B2b#9-UCf!Ҙ$؊ø̧yƙ(CVn.>&oǤ{3s2=b(Ȩ9bi~^j ΚQNLIrH;Z`08!?z!sSLԓMuqu]*C%3[˻[+Ԣ~R)cjW ۅfJDŽCBR<()ڈ#uٯ%L1_M@k (NQJ)|n;m7.vl>䖣/$6R۳srŞ04\;lz>9"&eF5bR|MKhJΣL=C6pkTvː]la[=k'UQ>~y'9̌޽j`?lƆ@gu92+ubQtg<Ҍe^KX@Lj8-fzchKZ&oxn8,ߥw~e|ӟI ڃPAj"1;RTN*-H^e2Ag/r:=BQF7&_Bvug̏!-^oXU?'U6ŝ\eeR Ϋ.9H\VpJ]w& ;cT&US3 fV]z}\I~"M3[ 5Z,(oWr>q#<ӋC1qY12`ԌcurPM)3+2x^=ҁi?w~:ӇPP hM(B7JsV}4z:M4j-Q)4\_?tQzpK lka/;|6in}kᦲ3R?44X[09~QgU Kd-Rn^WK62lsͣ݀׌Zjt3UHs5y{x^׌Y)n$> kFqjJXԎ}cZUn*Qa*0/VRɦ|#Ր PNfkZ~cJ2:EJ/iR?-AO<%b32:KYԗZZ}~_}-5)zix1ؚ-$þ{C "8י[4d]]iƏٻg&-tKmf5MaVrBIn/ˇJ>ӏxl5@9L-uuՋh}7#N >Z,|(wW"KhOF3RoYCYoJiLR7kn+hՉ&S#!V~ S\VpVdVR38]Q%uB4qUs'*@4h>J6ѦhZmO8#vM[ZUVnJ[EbwY74Ō ;oG߲Fnn Z @ WHr|ihU~ZCӓiTonƣ'CQu[4*)OTXoVJ `ט 1hn3\]N7 .wk\yvĸiL7V is%N:'6T^5irAj4ǏR`bh]ZZ0G&̒,U7ȦUy̒d!qhKew/YGZwa?Xom:)VVwyQx.^dU!Q~PI~cy96L_A}{rL' r7 lz)=G&Nj</&jzP}U6J)Kԑ2S*fp -M 0 ?0-mӓr+3dbn>:x7(&I>4[¿ xæ1R_i<Ѻۤt3woܞrU=o ``@AS|))W3 lD V9@^S ߊ˧P\!_=Wp'U2}NgznC(M]F ]~2K^P7vkIcZ gz#\A\KH/-I\ oFka9v ;ߕګ.)]aBHvZ*+ϓG)^3 Bͬuj|zŌL_.v ϬZ푺Rpa _YHf#88 +dO˙cgOf"^!*̗?B(v͹T\n8WU/7SA=/?hSf)J`B]ESp2Yt9_+ rOku%Dhȿr~gc?g1:4vKL6󴳾s㍸8g ႗?ϏZAO +`wx ZJ:\Y192W5e|@9 | OiäeO+5PK+d\| 䙄-N8 FGϢǰ#q! gQ^|B$48RH&d*H9(Em}g5Cz"+˫W5rIn/Ց.|Y *kma@prFRsyH)99/0VSmfr]»Ի2h2:{<[ˑu- ~}k^D\A.,sRP `+aY"Kf[oDBzhxㅑR5iT͝\ m(l~՚ uV9Yٽ6`p9\^%o'B@'͔eR-n7@wW^K, z@QZ[}E;<.L e[M L Ԑ~ׂ*s >6.^#29~=bVd݂|qnw7_n, 45o*T ZBuhTIJj*Y|W[9ũaEp2/Y M9/L@.n{exsOW O7XAa D^),IRmyLө(KZ\n#bc)apA(:x!_nk-\[X(9XO m>ٕ1}/=Woշ65  gdZo͕ùNeswkԁn|aӆ' qTN< Va_{xW pm XkOQH2jY|%/6ó;ZCa:󑸻gu'ZlI'% X4ktY|{Kc'!g- lc> έbDj<\ϧu=d=qmm:ݕqȚ@'jJmv[Uא>+3U=<k:J%fxLYx=QyԞ]dlQOvv{5*f?e-DojKtF:@6X[1;NQ! s:9$Ŧ9o0]-ܷG\w2ff|;YRlgշ]~NͲ簷#,E.dNX-v8٧cm]N f`H"9=]O.b>vXGELqo ഠ?DgsUrsQ,bsZ(,I]ƶjljjO':l:XwD&ZYrS`% o{-3y\A`62Wt[MsyWOC셩κEdm+w 1<>xb;+}Y-tDV<@Aw:߬de'z {V0])uȵUw5vuRC)ϟThImZF$566N\wEX*y%ZZOPR&Ķ1huGZaF[xݫD?󨟝>9ӷYA.s1D~aFi,f$y x:k y/)\ooo{?}2]/,R ՅQtbR=LۡbYEvĚK9GuZg!oDpߠѝܵb݅=ȶDޚ3>4op3; Uc\ X !힢oi2[ k!;q*:x]}Y"W|%A1s pop/(Zsdʰ!Hn8?'@cы0gn#kSeɬtC'$ĕC'[d a3x5[s")Ŷ_9|>:=Hahge|_lq?fĆ gpj1{.y0j\[\jy[9|>ٙ.Bf)<) )Ȥ?(w AYľ ?B! oƲd64j7m`M.{l}͍է,dWXo0{IꂰB$|dOfoat_?FSXB !?2 ڂ@c8˚J43հ7vvv?a YE}/Q&$!w2vohg5;x\\v#5.xEAD|=3(Bs0?0Q1"cVs3=֪@u'~+i+lm̿(yсcvz2]};N}R8~װf[h0eD}B $$i7ڇ*Ք.ô4UVT̉@9tt'q=rtL!f܊,6š#%D7p8k]k #Þ;5=Lg2o㚍!Z3sXz;.Yb'wLeL&2'f륔?eC/ꅝFb5hXv!)D0c;;3XED'YiM,y?6۬;b#R ob\cxzeha{, OS#r7`y22 sGh?/nݬQ Z U4jkX`٩+t uþns4qL}pX"?#/|-r6Iy|ҫPLϷu격g|$Cn->T |+'ǴG9ܕ>aZ~%f0ϝȔj:Ыs#Qrtt &+m <B^sR?XBiw =NH & Oƒ#)[V fO5C[jCN2Ǝ⽱mh8|!\V<x&{Oܕ\Ɠ!{jG ,Xhh!| n5ll0gޒɉ¶68bFsG$֒nOyr9&y G"gw2 :_8l *]=֟+Bԇ>wD`lGQ6̹O|y@k|H$XkK/\AY+5&]<7Y5">nQŻf?b8=Mw!&c^6Z17y'XAe^ʖ¾:+Y뤰,A- koK9PSe'G?TK~x9JLm"[, xޏB؟i䊈hȍc?p?uGsӖU1yR(3ſ U>v9'9|>9|.G?}q9B:?:g5~?Ç'"ߨ3?Uo ng,jr@dZ QsFFxjoa;3}uUv '0| 3o ?}"{ɾbgP^{rkR'rG$Z?hq4Vy|K|у#vȎsa6xj "@xj:F֟qfmmyoہ1V ?畭g;mcN6~=QE V'9<q.nn.y2d8-[K~1=U\4W⢬]:n~$H"$H"s͹G'Xz#~ƿM23S= XSF#[jw'̎ߩGVŸz-%3VyH&cfcרZ[Z)_ ߄ j/ rX[oTa'tzu榷]CW+b_Sف\rG{=Yr ou }G!JI]4ͯ[k.f+gpVJs0z\ f&~3+>0.v^d(uU@`PUlA)"X4,f6[|Dw$w4chttX_l13i쨍Ŗi”5з(SŜ~GVu0\Ux2*}/ySap_)mBKWpM #M3ԓgqN\aOC DPc=O,tRacm<^j;.w+0lkbWs|ڻ+X^x•K̶]= 4PH5]\c*8 ˺s| :Spm i]'6y0k.Tkz3 % >N#B~* *+|n:lm'1[ "S!2׭9ZWgUfE2ŘܮSdK,/Rig`f,o/i9MC' u-_;C3]M lXRauG"4+4^je3P'h ̻QFHcU/7z?Tx;' EBK@+kœFG"0. dbILGFN<}u9ZFrfĖbcc!=b*]o * J?Zbz?,M{?>R(cW>n5ƪrS|``>_^nsVvs hϣ^Kq$-ll[1%Wu.`@iMOVrX!#q0z:H46ij6G- )uAvj XW\Ie!4CGsҊE=ژx.s#le"(]Ni,FTˁr'5=!ϋ;3})XIGD#m}iMVq^YP1xHr-* W%O@kow;le1m 5 v_tTRq m'ZR-ZPJy 'gugv tfѧɼY͵wROQ Q%qXk,Kmܯ~)K{DAbÝIյ'.F`W܄&U+ E룴jrDI-Y?u2 t/ s׃9a?O:"yB:j^KM+?cWkp|q_&C:kW. 3)x^ݘŎW.5B Noɗ>KQJip\Qz`2ZBNޚZWf)h3}Gr'VE'߂}ZUP*Y}R«5^U,vݵ]}ѧtv:9tшjy* J%R+bo#7_k״F|B%Ts|d` 2רrtyxb+$[٤$6 jj^-i8Te8] 5xU-\3eW5-P\YG:jY eUT*a(乲ؤ|KVar1btzIXJ$ۖRR*uy tJ,.} J]ܦk~0"wݳ˨%BފHVcO*&ɥZr pI~-eKݥR7a,̯> s@WMs,tvt9V/2y߫䲂tRaUZ\e"54؆ 9+VG~@&XY"?'ON"&W+Ra7 ŕb> Rn]D=qr\x;|7騝|SI'ĩNEiuFLl+_0Ka]sjKѼHع>^c HM|uJ)̖.- Yo %c0,-f=VgAU*h\QQ+Y*ސbj?}a_w?;r68ƒlwBGHXYUdU;d]![q47]˕ V ۱63iVd Y넣yxBFO#EEјn`ՔSH5_3ܴ "nj&'.dq멛]B(*,-7"q&NQ@qrMᨰ_h%\Ղ|!6SxgY',$J ه]v@X5 E<.% ?pnC?28[8Qd{&{3akBk"c i%oSE\8ĕ eMGO zӲ=t])ѕY>˾\L!/B@cm5"{)mQ>@Cdo_oYOWOYOLfz+?qJ$`y&3{Nc &>gz}u:|A}E8nNex ik@t(1 qa6O78Sݮ]ΥkoOל5圝ٚ؄ќò3P22#-hiD o5e{ErSt0ɣd3{{fߞߞ>JKZ}X3d6גTDz,\-յJ7 ^m}=ݰ_%s۲6fc3 `oP[hee4Nv"w]|):$F^e#d2r<ȶ,J"૭5P9/ CLmK7|4nbZď4gN{Ltbў؈dcYmUG"Etp&8#T8 L@u^/~1\Cˠ2>/Mldžglx6<@hD= qmm6D2Y覥S(B#@V F^*|c~,-Vˌ˰W>D gB-mM F:C})^)#8ohE6#IXNUb6Z ~`j`Zs F^I}8=k=}v'jnF֠)CsP>K?gL+KlN|2v!($5b`U59C@aP;S: aňƫL9ˎ"ΒlD?6io]cm`-b}B!둡 h nA- `lAt5>.sLJ:9:O9vo~&V;cNDO;H3d!| } E[j+,,T7_[&foDKK n?O .H+$2 !uX@>0shI$d~;D\j/i/J\ƶ_!/@Oh4Յ GÑP /w5j4&tw<.Ap<&.`'_w<"".AӠRpRx1cZ yI6/&qG#LFl~ d$ #pSx ^hp5.׆sGHR%- %| %egK蹮yTC4⾵Z9?B+2A2<-fuO?DH4i5ahVۮL.FzɋE5=:a7\iZ7P(W8#&B-Bh: ::=ֵ]0j1F/mf͋Br!vPSe}%7#gC".B6?Hxx17f~Dkh/kW繃>F#%P%(4 o pvx1;B kZy W w{.-;Xk5CKfn4}zP-'tlZfiQn g& ;׳YlH»aű{(:ўH}X3…> :fo#Pwxice+tԐz`"7S2jv󏩪 8 9{<^-J -XcڮʚPXfdh66FLᖘʏR:ln}~}} K.Go.aBRs->.A#IMBYBnB7iŔHrץuJ?'BȷƽFΖA~~4@YdCBQډ0s:L"Y(tAٚ?:Vl̶MCaWg7ۛ)M!5ynuO~Ꟃ7.?we=+I 8$܄l˵ yr\&=ZN0Z߱69?}uGmW1%5tZL YܽaԀ՜Q0$QFdٳK)^tQoҾ7XL5͖"oeƙ 4tv9tZxpm_۰І9dң9L#RsZIja?8)Rynetcdf4-python-1.7.4rel/test/issue672.nc000066400000000000000000006657431512661643000200150ustar00rootroot00000000000000HDF  k`OHDR 4"" C"2#X#numRowsaTnumCellsUnumSigmaV!azi_angle_tripWsigma00`4!lإFRHP~# I B( (c? BTHDd(JC+mBTHD  d(NCݿcrBTHDd(C+_NCProperties version=1|netcdflibversion=4.4.1|hdf5libversion=1.8.17BTLF#(  bvBTLFan?iQM L.F( D(!$F?5[%>@5+L6Wc,l3-Fv.:ZGS0)ݲ1L3+,2Z k$<7:7Hc<G" G" i- | W iQZF40H4|2+7$M2+)FSHDPx(TCCTLBTIN0tFW /`ERLB( D(!$F?5[%>@5+L6Wc,l3-Fv.:ZGS0)ݲ1L3+,2Z k$<7:7Hc<G" H4 2"|,d 8'e7 s%啽 1- > ѷc;BTIN 2GP%!L807$.RNi > G" i- | W iQZF40H4|2+7$M2+)BTLF  16 ;q C C! 2") 1#Z k$ s%8 ,&d 8' D( D)Z D* L+ L,6 L- L. L/T0nT1L2L3ZL4L5L6bL79819:d;'<=>n?>@>AE(B4BTLFZL4 F L+4Kd;f/L19M0RM2TRNnZ4^98^ D)Ny_|2bgd) 1#+d>^h>AmIi'<Sjq C 4 {{=!|i- 곁+ML5gDiQń8~t6 L-΂N8 ,& ڡ9> ѷc;FSSEI m, qOHDR  ! w 0CLASSDIMENSION_SCALE `NAME@This is a netCDF dimension but not a netCDF variable. 3264֒rOHDR RR! x 0CLASSDIMENSION_SCALE `NAME@This is a netCDF dimension but not a netCDF variable. 82/bSOHDR ! uy 0CLASSDIMENSION_SCALE `NAME@This is a netCDF dimension but not a netCDF variable. 1@4#OHDR 8 R R   8shuffledeflateR Zw[[ i>GFRHP[ (c~HmBTHDd(\ eBTHD  d(^ MoFSHDPx(`BTLF:L BQB$L^2,:x/R/`P'iw6_}XB>|p cBTLF BXBB::P'w6Lp &FSSE[UмrOHDR 2   nbc&cq`8FRHPvLc (czZjGBTHDd(c ׿(BTHD  d(e QHFSHDvPx(g@BTLFD.L )Q4 ;$@:P,.x/Rr%i4 @)>|T>I<?#:P c,!lӰBTLF .D.r%)),?T>@4 4 :P 5h~FSSELcv{GCOLaTaTUaTUVVHOCHK3 tREFERENCE_LISTdatasetdimension W`9OCHKH tREFERENCE_LISTdatasetdimension WMOCHK tREFERENCE_LISTdatasetdimension W0``sFHDBnbstandard_namesigma0 long_nameSigma0unitslevel valid_min0 valid_max2 positivedown definition0: FORE, 1: MID, 2: AFT flag_meaningForward, Middle, After_CoordinateTransformTypeVertical_CoordinateAxisTypeGeoZ_CoordinateZisPositivedownDIMENSION_LISTgFHDBZfD _FillValue valid_min valid_maxPFstandard_namebeam_azimuth_angle long_namebeam azimuth angleunitsdegrees coordinateslat lon sigma0 scale_factor   #<commentuIncidence angle for re-sampled sigma0 tripplet. Values range from -180 to +180, where minus is west and plus is east.DIMENSION_LISTgggFHDB"$instrument_calibration_file_pointersxxxx_OSV_xx_M01_20161231000000Z_20170103120000Z_20161231063935Z_FDFx_FDORBPREDI 20170101003900000 20170101022058125spatial_filter_methodHamming windowpixel_size_on_horizontal12500mNCO"4.6.4"FHDB"&0 subsat_track_end_lon #  qa_duration_product pZ]qa_percent_degraded_data0qa_percent_missing_data0orbit_prediction_file_pointersxxxx_LSM_xx_xxx_20060717000000Z_xxxxxxxxxxxxxxZ_20060713000100Z_xxxx_xxxxxxxxxx 20170101003900000 20170101022058125normalisation_file_pointer,NTB_data 20170101003900000 20170101022058125"processing_parameter_file_pointersASCA_PRC_xx_M01_20140520000000Z_xxxxxxxxxxxxxxZ_20140415000800Z_xxxx_FM1xxxxTCE 20170101003900000 20170101022058125"instrument_parameter_file_pointersASCA_INS_xx_M01_20130625000000Z_xxxxxxxxxxxxxxZ_20130325000101Z_xxxx_FM1xxxxTCE 20170101003900000 20170101022058125sigma0_granularity>Averaged and re-sampled to along-track/across-track swath gridFHIB"ccrtjZFHDB" ޏstart_sensing_data_time2017-01-01T00:39:00Zend_sensing_data_time2017-01-01T02:20:58Z start_orbit_number V end_orbit_number V orbit_semi_major_axis  orbit_eccentricity  orbit_inclination rev_orbit_period   E equator_crossing_longitude equator_crossing_date_time2017-01-01T00:09:31Z subsat_track_start_lat  subsat_track_start_lon { subsat_track_end_lat UFHDB";7<platform_long_nameMetOp-Bprocessing_level1B product_typeSZRprocessing_typeNprocessor_major_version10product_minor_version0instrument_calibration_version1.3$processing_parameters_configuration8.0$instrument_parameters_configuration1.1format_major_version12format_minor_version0 granule_nameCASCA_SZR_1B_M01_20170101003900Z_20170101022058Z_N_O_20170101022104Zparent_granule_nameCASCA_xxx_1A_M01_20170101003900Z_20170101022059Z_N_O_20170101022017Z contentsnrcsnative_product_size26618899production_date_time2017-01-01T03:08:45ZFHDB"ʈ9historyzFri Jun 2 17:37:15 2017: ncks -v azi_angle_trip -d numSigma,2 issue672.nc issue672b.nc Fri Jun 2 17:28:01 2017: ncks -v azi_angle_trip W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPB+ASCAT_C_EUMP_20170101003900_22253_eps_o_125_l1.nc issue672.nc 2017-05-25T07:51:37Z - Created by EUMETSAT; 2015-05-28T12:14:53Z - processing_type update O replaced by N, add R for reprocessingreference_urlhttp://navigator.eumetsat.int/discovery/Start/DirectSearch/Extended.do?freeTextValue(resourceidentifier)=EO:EUM:DAT:METOP:ASCSZR1B wmo_filenamebW_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPB+ASCAT_C_EUMP_20170101003900_22253_eps_o_125_l1.ncproduct_xml_version12.0producer_agencyEUMETSATprocessing_centreCGS1platform_type spacecraft platformM01sensorASCAFHDB"" creator_nameEUMETSAT creator_urlhttp://www.eumetsat.intcreator_emailops@eumetsat.int institutionEUMETSATlicenseCopyRight EUMETSAT 2017 ConventionsCF-1.5Metadata_ConventionsUnidata Dataset Discovery v1.0titleIASCAT Level 1B normalised radar cross section values in 12.5km swath gridtitle_short_nameASCAT-L1B-12.5summaryNAsource MetOp-B ASCAT referencesThttp://www.eumetsat.int/website/home/Data/DataDelivery/EUMETSATDataCentre/index.htmlcomment7Search for Advanced Scatterometer in the references URL keywords)EUMETSAT, DATA CENTRE, EPS, ASCAT, NetCDFdata_format_typeNetCDF-4 classic modelTREEkHQ֚([c;o<1NPlmd Vx [RRx^w@}>.RD"*IV)$2KSRY"QvF32+#YYE29}|<xZm 14y!rԯiō5j~|}6ai } ~z]kB7vC+}rvLu_džtU}+lT7շWv۹:*LIX7s5J[eۣǼ}k挹wQ+{#Yh{p|mk2ˡ#4_|ygBGZqS_ҿCe7qݪIz wg?Oo eQF3ljnI(}Gonݯ/oO:S3Y8md_F? غpYrLkO/O Q83n>6 R6#u`I0bhkOPmK{yeQ~#:>3Mia{jNBV[Oh^ycN xa&V jk܇L/$AvҌ[5{"kq .jeLl#Zᆵ_\6Ѳs.ؼcH@3{V͞0TO7̛d۽P7sm ?;gk\Mވ;?;mvb!-Y8fh_>5A/R۴WU<@je7 /K")Z#[;h3%3٪Ϣ8Z/o޹Mu0r@O9eNڡ-Kgզѯi6XS|7%b6Ĥi;smQ(H1\{ݤzYyrv,GfNݶ񟒇b ԴfËϝ4ܴݵ%o ݻYe^9.=4?;߶CWyZziDuY;sC wyG-1fHW]OGb:iY);~stm#yi IxMW3@$m˃&1Ҫ!?ĎA=~"vYU_ooժv)Ìf\;Jɵ(Pc,tD0rD>M?>vjvۨA{qC(!n(3_OvExhJImj#6es{7{jLsuhTQvc{ FjRІ#L}~p&&ͼg;ŝ mIʶѽ|}pr +Lr줭sО#Y*.>չo틧Y[zH 'S@[G~ʶ07l̜Q=選}Ed']v:{iۀMb]9âEO1:^ӛPro3h:i] I;x q}Z hc=Έy{#:7s|Ln+X?BGRt"H4s *_F33 E >;ѧBC{w3)w pSeۣ##g{~Sl\$z};c`VJهg8hIpmd_;CuVxHtu ~ͽr|I}4k˻|xb7%ؘ6rBmgy1[,LmB65hݣWy;[vh;"g!zuʟdo:YQU#b z_4L9qu*Z5-:7zT 0:pgDx0k{;#gĈU)srsA7wQ, jHX#߾:\u A[c>}C|'DL鎒|uh̑f^\I:5CQ/.-Nay jW$k~*qڨ-ͽ|3vi 7Њ@YoqNs?H:oUk?mo_8e,fWO\9sYko ]OK%o ۿM7iQuցU~4V*vp;.׸Yl80o'.S,ڪUf(zpQ-jvh͌t8#L?&o7o9Sk;H[0u@g3J{Os0miU>gj6Ʋe@L? 2^=bx 1՗f,[Ӻ7KFuT>;5b~zQrNn q_UB<+jTA-~}x4֬M_o^H\_}$8)t܀v>O޾DWxy s,Z(L\g;z4!=Z7q]B&9mPƝ7zu⃴ĵtӲTu>9te+G7׆n֎3qk8Vtʟ` Ù+^=uѴԽ˧;*Y-=I3]z-ąl͍n-{ _fעm>ƞ1yFKV;ԩ*zȥwsWYEQ3tV H=tl@kh:6+y`|o#9žvݚ|~9qSO?ϯ$qe']EILC<q,vqL[ =`Ԭ}[yI޴ς +ɪ qPR[ ިIm*I_CbgS/7o;ڨAEnʮ>6]4+^Зq`G5 ç™]Np[Mצ l :=˦uR|t3I} $}x֥3@,hڹKb uPҾ3FY.}rٹ'XʌF力BE=U*"Gj+83khgʂ+DrNo ™D+s3NVܽU%WƮ9T1ez:aH'/&gmKń#{.}2f>NKg_0ֆZÓk냡Tv]t>>ne%R,PǞ%> KE9>>iLt1]lsDgI,ʢVH ^baPٝu׮% g^T :<>ɩH=*g`QU+hXsfb_zoo!c@D]3 BwI^qd~M?b }~.M>ednj 묉Z;gjE#۵f4=`bnǭ[jE6s k85 {1Xyf҆ C:k*:1l?8=۽qϺp6gHNAzҖ4W.zv>mW'F3иgW ;}ïObĈϘ`߷N]$10ݾO&>>ΙCw?r~mU?;*ȽaϏVݯM /'+OI^:@XWlY4;;Nw67Prk6>ů շb 7E'9a} :Ű-+wu^ecמbO5^?ҢUo@9mh/V XFZ8S{B';1ҨQ0iک}âqM7oٱdr67 =͔"Gv}@>@t&.ƛT)m! ]S,{2ѻeRbׯnomj4wi஭yyd3v/jX?_^?%MW[BlV'[x VC"ب&uHiDn{[5ݪz勬x͈{ZWk6-9BЋ僋b 2mVw+aoWl~xF9g/qDjbmj.pXVW4#i\/SZ%Z^sȎsASW{Âi\sRSwagذuZD.ڈT*ѯ\9gmxX[M>I+9~y; Ǚw=\J 1ɼgWĄ؛뉮YB:oӒYd׫,~xH( $RfjK?~$0m7D.&FLڥ!v#`oȅ/&?Ak@vքLnu)~:a5s1b#!eͽMdY'$ey}&3c+ 6Όun^m+_=ݦ 9L44o (-cg5m/=DܣFOH^,PMEwNk>m54\ )@=C"|{3Kٕc*=8Z|BI#zM],yz@^ϛ;nxx~TP4sC-O7&ZF?%A([IlL}Ʃ|s%GhNa%ˋXk3<h+~Ȼ,n$Dޱyd 7d$sBzQB;rm4ow?gݶ$@28igHFF ^ܹxg1P\4L 6ElMcMwꠅ_ÈHo`OM/dÙ=AXC X5~S-k¶\ajK[Hg&eY4Q 5M7Zs?c#q/m˧9zZ yfesG VT;k8MUwDoKE g`6ezھj4V GYg6b{߲ܽxxhD16R},4 6=%ĵs^ؿ cԤH"d|Hѯs.vr̦@i)]XkkoE|!v9غSF>?x,WyS 3sHvW3DC@0͟R֩x.Q*m{ k&8Zu5PY"5U*L@_:Dp!tMX\/qٓB$QYH+$(!h"§}E9?wҏQ;Np4i=I9 Td}FYF@ӎ}§ӝȟ3,Vma'3VU!A7偎C9bPg!.h3>q!i2vDNU˿~6qRؿ^X Z=m`Ӗ_fow|~/7/l㋻Nݰx$gkxZi*%(u=rdg{btx+Ƶ:0_PɒGXEhǟo2i/u7Ӈ qWDf ŵ`! YV](P GvH!j)V͊Kq6fm2Eϭ}Ƨ{WNoZ6⼫gh h7*D-܏ضۈ}bX6=>/C\6뛼kn_=o]ΰ 䇱z"D!j9iJ-݄ND߸E1 oֻ.۽n,=j)}-zy:^ ~#H GD=EϧHWwC}x {b"CJV |*&wӏ-\Zݍ/ ]," GSXBޭ50j3)Dî ;!*yѣ/tzO.#{7,:Ԭv7SOuREGb0!߃پbNƙĭ+NGYL,/K CԄǠoHx?mI&3}wFG9fM[x[WrMݿ Z@ÌZ#_M,.!^:Om\<}EG=ǜtAlb)yepְOI4diG }i۪ToȐ)9oEt.Ÿ.1v4%'h!jtsv[4y:vQ!g.ڶ"]@cW@6/%D^7G5drdr 5ۛ/ RY,% U8BB|# ={6SdLEjװcR*m4,xGv}?L9P6>%_?N#h~+9t4\%< /0rbfJ~dF~b%y~^ ܖ vGOI_f-{iL9"'y-JqZo {K3r$g|VNZ1o;Ѯʯ;^2ےbM[6q;.$Y: S5޳52m* |c+!7/AY^>zv BKѐ4Z.{r#E[D8k9k*~~yj܈׉굿Ϳvtφ%sIqlorHX[.?tlw4!aDaQY]9u< oH!ba|E"YnJ~lWI9w`K碼ZG6§pB #_fpBhȬƵʥ HsYLjRWT>sdB̊~Lhq @^ui"_U4-%۹f ,Y^8kj@ya\6/:?' jd܂=4JǶ^MCO+1GdC͵\V1#0#ez~/#pRRV6}:pJ1BlԼyP![wyt k9faDyw.sN9.BB}; (jr" ۰ޔRΛYJe$9sh8tiI!L=BB 3J-^i&Y D]OǬ2B)sk] ԁsthv7͍fl DԞ&䃱 G51P^8h+;_eHKDh~4T#Àݻqyt_Fڪ8H;%O&W%s%Jm[)G]ⶨ\ -K!Y:Ҽ)tv=dX6Z䮝=@w1=mKC|N" L4a+y5 ز PcVx 5勞(]ub/Di)"Ėl2"tzųx7m۬/8 )8ix#`Y~̂Ƶ&V!*H[\ϑ{whHki@Smvn+#! lҒx wI#l-D=N!Qi]{.{3aമTK/d ! E>6kX˛7Rـ2Ш8CqHH9K IviUCƙ Sf4&00w#"Xz^z""GclVG7Re!c CPJl4ƻ<|nzF2K[' @ϲw3)ej8H\]k!!Y~ &T\aUwC`e!C̛ iQw-%ig{aWAC9d(>:1RCǻW8ę0oR~&`C_,<D0d"\T#xX[nUkTf1߸ ~i[W'ػd-s\| uڬCJrFԒ_e*QV8;vKUŏOӫ"F8= Ӊ[N !tMҏӞJr ]=)~+yz28kp:5 :PVh !GwQH -ZnVZ*?"G(H{cSǪ;(_ =jEoGkҊ>/<_Ɗn\Tf ,xR9wjc{[j5) a{P&4d2&FR R%uv/On_:H!bHRb7ށԼN%蘴s" '#U~?޿xrde`Nֽe_8F>߿z&J'crDK ֡կQ&VmmbDP@TRU!b"ڕ߿z B f#bo\#,|j?E=y侭9Dn oDQf*;5Tx۲ MbVZp7\.8}3 P{6&~f=nS=!V$_H;uumɇR@`D 5!2Oi $!!/AMJd=k=B;7V^= %'AUsSGP9bSg̽zP6G ٸ|pQ}pǺ!EM埔߳vӯ9fa;3 z#j[uh\̪Ea_A-+ 2s{@/ qvmADک-M~z%sd凋O6L`:uAL;6  U(?(z,+̜,vVV"2{cɣE#݆J.+C-Db٤Z]8X 1P[,; !b(qJ@.P e[22mI({@\[( N QS J+3ӎzDLW/=u,DL}"dVsۣ՞B4޲ln+H }Y<ض5Z*)ϙ*DD%lDԱbEo]XfO1c]N1ΪU9#:J~FVDW)2YDE*ZH sbBE> vI8GTKsYS8-B|%}̘KJ?nY[d~GX Y,I&Țv mm KQsI;Ԁʼ|xbU6ӚBX,$Q/ VU@щ#upQ‡4r ʿ c f8j<,\/GOWyiֽ;!D| Ltuy߽"xOMfk|F/U!Jэ#35Dv±Ūx #ZnZ(CZS=n7Ь;i H?%LI>$0sJ-$Cv3ԥ,"ċp /,{Y(z "pzu4BoC,PNޝ3#akI@KYjqJtEo\ïpܠ*$2ڬ#Nyr\ )R׳CKݫb1gxe6Mux7yf; Qx_Dm՚_KVa^H?u`:ۂX1=(! MpIDͯR9G`RqJi¹ Dj*DOBlV0Kv_>7AnjC޵!< hS<pM_SE XHQ@Q4AH@&a͉]4ߋ@xR R*fkpbVLn0!oKv:$}J?n)4[D%b%xYE4Ѷ5 yD8GгUF9eOe?-R]P6t F;N22&k F s^c_Z7X0GRC[Pi㑓n!ٵOS*ͺp| ooCXܨqh}9E4TH^c};d! "/`aҦY'vb&qCMNcĹc-@w brҞͫġctzAL@nP fQ$"y*ߠǠ \$Eg+HZ!r o Nέ]b BfQϯ% sEq1tۅV X|^;!V#Ԫܽzxb,~1c2Z"u2u9;Tǹ7(1A [v4ilȗqdϘ, -A<$q]~&|avGNO'oPb0<%Vr2Rk]GX?*"~LBc4:ˇaB[{Z#hJ`V:ї"} hИiQ6{R(hj3UǢ"0f:urVQU:` bIINt<~͓3(?$1j2m}el0rݍjB_h 1q4`?S#= %%(&>M>4  h œ!D+QC>CQBRrIsCGkT[$5@N sp!y0u sGWԠO DTzN۳Yfomn+QQ8&Z8_JTtdhSvq=[ҏJ? KCÊ-MfX @=^淲B sJ].ϣsLnҗNpܧ+Ք* 7"MTk~R`|gCBHC&; 8<תJH.~vZ 06ȗB^ qFQ6V(Z(^u9dD`xՋץ-%o! 2zujT$BL ).:@S\Bh֤n3TFxw2S! 9!f{b9e7a@_Ų_K_@Qidg =K :ąx<'Xh zxS<\_{!`s Bt״>d̻u-Bjˆ6\F|ʈ JF$h(EbjfN%; yr q3$B"q!"Z߃:&l)H F2"I5jsP <Ⱦx/;sg?ІwMTkA9_(lJa3|:QԞ2.6a.7;iZ"i"5#: ݸeSjC(kѓYTnUbHݿv8emEk|y"֕sG㷭b)*;sGBq6ӄD=TPs\xr-L-͝`b:[: q˚%aYkmP3B3G/Athx,xQ ΝC, a0#-p 9ӽ8!||):s*Wʢ ش;eĊCߺ C8{D7{>1MBDKΜ¬z0u.(/A{>4Sw̞6q t!I[͛9u<4wvz#WLX7q[hAXV?N5byC=5+bC`hiA, hTWHjgmըE♤kͧ_6_!|d^^?+'lh*##<̾8c"n[|=])|JO[N B0Oc;AZ$Ä h'4k~Ȳ\pN953{KbtB\|esiaDRdláo&C3ET^bu XІb7OnrJ':T ;tJ ~`D<}]d8O ڃڰ_5? 2!bR=&UǴ zVOC WZZGn :4# Gi1Dfcorn-V Ϭx8 m Bs5=1պ4m'3bgnE# QC|p٣vlXut'!nX0$@r\Y #",q/xR QEBtVUWTKuۜˇd騞}Ε S̬hC& jA9Yi߂G/:i4jJ+)u+Pi'Yp/İԡHvELU1y|/r RǴC+PM| ([:zB3#ytྨB,!΅. F5hH`*F91d&  Du Qԓ#_JC3Ο8;fyX(Qd[?/XCv : #>4k SLHPP6d9ă{AӪD -}e0VH1/%Db1ÓoҮV^bxJ*MDC+ \*Dh b=І] !DhV{I:4cСiwFX3AX2DNp>wWM^KOMTL O 1qhiu&D ScPNBI޵i"$XUsoeJݰ \z:F-!D@rT]O$&s+AWb$@ܸj\ԕ`CK?TƣoBy1؍!gӨpDV:qKmy=@o-jW d^tDMӡ.Bbk-ɽ20ǏlR(ziP`3ƶ7G7E [Bl(GiCLhP\B$+U*.9P^CpUw$D1n`x w(>d{% f]L>+f 'G{>"F4YI U H<1zyx0!Vko1'~+-k!>hRVOKϥl ;x-pĸ5BrnjߡT0T#< Ɗ߾xÈ{BBgAm)DM u!;|h[FЦ:p-'  ruimgJClo\֏@k3cvTCtuahCPߧ:-~6xlk$3f$/u @* + 9ځ>.DƳ4+b~&B+K)'9CcOD[bD!^A+T*N~h`{]moֵ=BoMuO~FY٤QZ4#f_N9y(rt>͘4#TWa!B1ùU%a Y6dvI2@1]{WΟL߱i2 nXtȡ"DPg-$Zl>NDNm Qݸѕc3"$p'>1gG --Q(F١{í%/ᄝSNL0y u!2  ^*d"޿q};c#RӀ| QBZ$Q<5!Z1}b֮X6s(BuA ] Յƺ4y~&Ď2җOH=s$qזu+͝5}X"dntrvܫppaZhQIC f@B*І~CA*MA||Ya}2W`?uh\_N^\e&wWB4eDdS ^L>9@m0 !lRw>@&ѡm(JU^=}xųݎ^Lw}(DY%}b ۂh."f]<{1j5Dy" 4ʾ &_b:$7.vj_/Ք!Ԩy8WYo;@"bL_P\uҊU0r +CTp.`BR(D]bhld K4LHg BFx|AHOt!  dnl8!訨e4|c%PqY"rvڐ(B>(ķz-+sbMԔ^=tSO#ঙ}C$SC%4#YK3 D.&¸J⃧@qr'3WbcvElR{ Dz #@Ӭ\ B2}"D9hPzb?DGnYzA}I!r{ Y15a;!OqB1mg, 9)YAm/" 86\ 9m2džuCcvq@'GcKtŵKHVm!'9۾{SI|hj(&!Hk${ HG'XŤX̺+F*k ;n'e!^LI_TJJ_=͹NO9IeO!f ]fZV@ -3g3Ԇ8IquSEbAkT9b3 ^NwI*41VDю"D&4-`_ġk #7{D*),-8!>nβ2un7>4ybAJ+c$ġPdKl^ I&^ؘݻƦD[MDЅ=rE) 5vXCٶ1j9e$ 1`QH z:saM$F&#&=]T!ا`l^xbB zua牝c>kx}A(6eBE50t1_v+ #*"65q;K@*6A.W)M >_DXBٵ(1 $C! Ydbb(5ws@qbQT"cVҡ"I^ #}I8IbjKct ktR|u:5%Ĝ 4M!B0iBF$w``39"ч=jd]6dx RIMH/xXN==yDgB.bO!Z"h4ׅXe;m[jkխ$R!d)K$ʄ»`2hKF6 ]Kiч=I3ݡ}[HCMbOug&uiҾ@(!h"$ӽcEpG:~dTUQJlj'0b7FlHo;G*QMȴ1[D>dxV!j1"j"$CZ:RcB B]$(NeH(| MOCda7TBbm PӀ $ ;;W"&D]hcUuhQ2D "MB2}3I5u!qXիM"MB2}3s:9Dp.TaloUMà A޼ ;bCGB߼;E|׬YNf:J3467 ԇ`e!T_c:Xhh7iT!4/$'P#f. D112yIZwW-6E'u5 (~" b3hx9H 4d2V9+}ׯ!>뉈3M }k %lɪBl TZ&#sˆ5b!3鲲|eN6q ȧ!A.6D!ݠNj~gY jT E!B#x*lahѠѽ[.C!bC'xPh@#N}jY*w--TbgFT DcSӀ Ҋԇa^cE" p$k[! !Q yxY8w,!خ__>JuLќ*Dm RAMÄ$AQEƺ:Bi }'? ~@lDM 0 AQ GF&n-\-̪ " ($bSi:CNS3| 7!"'jܻ\l*҈d!"?udf2bwB0"5xCŖJh;ĊBTAhd㒤݇)hˆ_ʹi@+iTlYibo"MsXa2@T&(+}RNRil[)B )":NZT`eh,6|Jx#J#sNcZeu1b#MBX [E(mAmX„}dŦҀdR=p53L}'T C;?K@bL0s}D4}ys"6q%أK"JRט>*eܯK x7 ZW!R Ph]U_H60ZI$DAH.6]{Y%ܑ89b+=KPCŦҠk!*KKyKC#eXO!b45 0G/{XL" D_LqM]eC4>Q;b4pe+#|ȟ6$B}PlĽm]u_pbC S{Q}Pl8<$P]cn&;"B!%G'C5mTqRe"ć #ЇM!9C"V}  $8zU;R*fUc6.l@BRcS$ $'-*ٓU7v81ĝv"I~}w1QxN$q#)bp"G'.bք׬;x jlJ* ()u 'ɐRֿ& ==EOFM(B"2ΥDJy*Ğ̇&w=t$6T 0QNd97V3ԡIFb/ <@ T4&P.6?eAo"EU^uK]IVHIaL',zl7VTNd|Ьu OfɈ H*ІLBk"$+˜qNXi $S}Ŧ҈DxW\Fd٨.th49B{FFԆb4R"I&֍.xѡelo3d !ThkAAHj`ĥ%eɧ3eօ"J]ˈ-"!(N 'B1ٰ.#X DAO(MDy_CuCKfVB4e!}UC)e׭Pzbkʼn>Tl D$RLbJԅ8}-PH-+H<B.᭢"eumѓ|PHjl4ZiD"DW-CP] ۪`[RA*4ȓ!Z2lf?ŮJ)9B\š6#CVy"OWq4:?Xu薺Hr&CK#O䙛RaQ&RDqBRK#1|r6CfGy^Up !jX*[Z*"Ŷ@ki "KbQcbb\YD, [l#>ss{=޹3ߝz1=s=w};C*Mc-cRy_Btd.K曯KRllH6GÌc!1~ e@KD?s0bOۦ7>q XHʱ%"ݟ>W._@ү倔M#ht!7f,?vAH'c.?0D HmF# ܼ=s,?u KDUTfm|0l %;I0ymz=ŋG6 `:d )d,nxNZ>ˬm3:l 2΅d~nQh؀ :2\O8m@">Ch\ȑ!09KgDDs4-G e؇O=Όsk3?gɹQY,q劥ۦ" l÷@郝p!?^q)|]w*J<ac6# lC $]d>Cd~nC- JĎRⱋ̋T3$K0㲐39,Qe.k7? kY4H[!G )9{7)%\G~bȣB">}>ڕ玟1 cJ\}7Z~;x [F# -Ϻ;*=_5&GK\ё8_/ >#g|d58/޻Lϒ["}WB،8Oiӌ.$ߐ|vsu.-^qR} Dnӿ篜pJm!qnl $#h$;m.X>4d]sy<妑@1d,$ϟs?w"n "bcKlo±v2Ky8ΰq}Bbk]猞9SOva}do68*iH1 L$ƶ"?u'QG6%|.‘ˍ# 㲐m3u|c9i v-Q/Yg|8lI#m3,=s,Hee8|iH6G,Cm6\UcYuܲ%Z{c2_%-1 )mclf iFXD ,rޡ<iC>Tp!m_>id/O>&lϛhIS3Ʋ]V{qЦ5D@;l](m"19| p4&v!mYḎ̓j&Ak"^WR>LEw-Q^{?1[E&2mMs۟A);m.>􍼗=% KtDmYDYwjҋ~Wh uf4mSdg9Y/s~16CwYs'\/^GcRf$ڞͮ-d'mKوYHͽ5XDzBKfns mYHۖH"xWMgOgI|ht!mUsE741p.ͯMFiږHb*XDwR~X1WG# %+-Ms@SEF:1XFÅd~m2&]j-@fdmv!m\ff1%$~[Of8 5֖r\G(D7ڮ_T[H3f!˥mm汅1,Ij.rAm~,$clϩNJrGɦ9Z5XH͏iQ6aA2a⣣хe68L90}2qx4LquB&m3Gq>LdvC @/ y휺cާ8N".Ѽ[W" Dgp=4+ރy DYH6Nr[n^exk|d4sym̆)JB`x}{8R-^d6,R8{DO<~9#6 #Mf#E 12Tz?yo{1F{gт&S^ J wv&>6FsHcK&EBȴt'nR:4}3Dćht!mL0כS"9*%g7BcFi[{7&;sD Ny  p=`ml7꾑"8 *_oZ{ǎ yPfeNr(ʰ9YHGCnN"bμj瓔hr+H3YHǴz'S妵_i-хSD2wSG,ۃ(Q#M_'"w\LTX3[ nnj 3RڞaڶEfA=q}psU2B&޿]䞻܁I|s'ra[ߪȅE-\|J)qYZ"re'qvPoǂ\y"vmf/¶|IW_qٯ څL6Iƍo{g$ۅ%r.D]HvR$3)۠.|kf!M: )RÙ<մc6%^˟YхԶe6N2.R"򋟭9 mP$v}>[3}-,}Z3O,gLj>GK1hGӅb舳ľvy%1hZXX8抳bD̖XA EM! ž}b2:b8WW7\/ƀqL`M﹪XV=`".+%fD\E E舡eLAqV}b1[^1 TY􀎘i:#zThH0^L%f2>0%fJ, *b1XALYpS' XqXZˉ~0#,WDXFEb/F#b,8kzľ1v8+H)0[by"#x`e08i#F4M|XVӠb$/i1,)`U_j( :`Xi0R̂^ 4-qbbIJbܘ z"~qc1Rb]1-vaSb"Ƃ4XUEpE E`bPE,,X 6B f+zK ,X /n(/FXNML^1 ,#v` 110OL7'zKKlUE5XQ,(Xb! _L@8 O%N1/C`i1fK-,qb8 Ĩˈq`W0X8\N F ~SbXE_ `U1/&p''Xb_XTbCBb11N;,XKY~,+f1`b(fK,#9` 1/M̂AʊY/fšYq&H$M E`C<~qh,b016VtbX -}.cc.Q`w|~,!惍cYKbW6L6I;DXF.惱b_b>(6F =``eWUE8l' E/1vWľK~1b,+b l{Â= M)/S]%z-^9bb! _(/`@i1NM1 v] e1䍃8PR y C)I!8Ћ@/W,"'Zl=$:`.3b 1 6R{C'X@Ƃ fƉ9TFSQ `sD?b上`3bbl }d,u1)%0y+,}*(L!cx1%v|2%v\2%vqRydJs86IE/XU jbXI,`1C`i1C\Y1 A C>!(y"bJo6nXz% ϙ5~: VRBK/V䓏 |/O\9}%SӥM2E[7_raw;mټi8sZzujլQJ *QHB?R'3eH.Mɓ%I|"䘿Wsg͘ڠ^ V\ц 'u2}7G|hH1[]3s֮D"r&lq2q )3\\e]64aKmd1"bOYM"Ŭ.NԐK#Q3)WsgO:qm3Mذ>'R􃰥6ꤍ;UK%bP(..Y$.NЗe˔*1NRq(tiR$cCRU|1'o$asmIwj(cG< %Ƭ.#"+҄͵;iN9x(I4.bjjX'8HS C9ܶ⋙\8nlcH m̟q##SHI1DH 8ōTGL¶@Jc49qH%8bnU Ǭ.rtH6Ն$qS*3eGLD9fQM$I..JoHI#p"5l$iRdI :)aI%`.YN@3>[IZd#M(K޽ZDդmu{̳q̒DO <ժx,YB"G88!(fCNzHInL1@O۴HY{~ޗ>ܣ[N@J2p(1w68@I&q=H1whT$'TɩSgT H1wܶMq0).> p24hCyR*vҹ#;`{ Dv3I_>ua,O(3%K:[7$!^&=);vh:qTuPHb'ۓ4!ȑ #gx~ %1['MLf5hc;)?k۞ǭ:=R* 5hA?]6?^qB Mf2j=yvfgmZwhHD: bW^ mV"- F3۴nٱUM H9^X?Cۇy`U5n4[֘F8摃~`:M*K\Bt![oy؜KzHvB|fՋaǼBйw #&X<[\xr2v[7u vtK͕YϺФq<$#Й (] a3<`hȦsa&rЎ<ؑp&vDkSWJsU>&(Q[nhLĞVY)ڲe+b-}qxCs@hJWS@私O7mܢ寚5 -EC2۴TT$5[Qi- zeP57kj7j}PiРaFd=k4o:הI׽@4P֫_~² Jh2I1Lpk,XCfN]|L ,k֬U6cY>4,_ {b`QOJX1|Z `1-VjիT'UjO8iB53s*UVczyǯnޙBVre,qoc}3SBUp׊+U%ruk#^J}0j| 2>8۷ZEQ\96VqīXB@Y2e˖+PP k#*T[ڧ볒J!_0?>jV >roLUVxq0JTzFXŊP2=2`@I"EP Ws*d *a X`^k` ~Lj *5i~w|T@(C pK_*Y6 XFB 5T-)V"1CF ΓA*m!U8P 6thGL@Z̐/S,GYC70~VPC` 3 "0-{ 3ܢE (1mĈ#?e~dra!RØ%aVjdCe)qF=:SL P ,X6 n12ZP ц{@ژ/ =7DO0T6 Linدҥ'fT:By&mzMS܆iƍ*Mt>fA6>7 37xx Fbd,Fr}Ȇ-Tt h;"GB )ru88…/yd"F.^xoe4*Jn#7)d vp\tY7KL `@.[<|H29r?0 +V@$ W9(@$X* i*DAc  1;HZzK@dN0UHk_0$20x+n|=FJ'&1d57S"9Cx_/ dBrғ6 6n|p| ֏ӦkUq{ iHht[|ᥗ:Z71 ƭ;wîN\:$ܾcwv_y0TGT. .ׁCc`KL |//`. wʵ'R$_t?/5,.\TCCCg_|͟`3C1a>{R$ I 9T] !/nSb^;~`q{.(=~섉pd'EV={M&V7N9w⥝?xg\wRꢉHۖB:xC 2N,&fM" .]uǮd2i\ &є׶lP9Ӻ($jU._v} PK}.ژ$zݦő hiY}1n\~[ Ije.hO"oo^qMF[Zdc^Uʮ-۹8FA2''}.rY4f_ P]{+d9"EֶĬIDUIDaNu.rYIZ~UPH'_4NB@+] 3'w${o(N MbxV!'s/r[] H D8T颉c?{N^T'j#.nŬI|ӣKZ!qz.b$" [qDAsES[g's7C]dpYb/bsN&yNzQ]\DY71@'IA<z#AYG99' 4 E\I3};IsTgH'Eg)䴙s4I#Q9gMئ..RY˔@'eNE@6쯥.E.ˍ^k4/2'qXپ.:ER%rs&'MwVIH\HG]$pY|89$NR$E:C,nRnqo/T~$Ku; 3 [%MV:]1IJӲ fI҄H$GE7)'Y隷`AHXgE܉H6}VMOiF6D$A_DtI&]&-6,I/li> &3B").fll08 liK|pbfw4G]԰Ԇ%IaSPجM#Mԅ]LYhC64i_"%Ew2gfw/liGOȯ<'Çt1[0fW\ETkGmw/Ls9ln/ ڠ8硣.{?Sl9k-զINmcɥ^4ҡ%Ōp1קuPm:6lڲ]єDQŔbl9sWUv '5Ihy֬۰i`ؘ66\otBJ,I z0'RJMb|u1G<+!Fa˴0"I( ֠E:tb.B>XS 0ҏMոA[u1.~'kԮ 0'-Q6_j/Y6.f'?.W bHČQ$":I !f%8Jsu=)>.\*dƮM$k}r.n.4tŢ%j<ɹmhP"ÆKs~Bӷ$HutЀ8].+nf~0L"4_vCA[1.ԝDuD1DNm "E5SRHuX6OrI$ÉaCAo] b4ҡ3bO|aK$+~R1R]q\4.)ac[BJs 1r]H:\rM4;w|AM]X| ذiÆJŖ3f1ںt2\4.VȯalKmDƥ4^5ߺ4H]H\τ&0qk06hRb*y .^bU:mɥ1}ȟ&ẖ<@:yUssUji@ӂcMR*hMbu1.*ZT 碰Uӂ#TsiPl@Ӡy&X# چ#}xkGMb9P\*.V.]ʧ\!F ( GN>ȕ%Unc+ 1t&h/F:uD%V䶡FD"U( ً1A˨鈋u3g'JSې~HO( ۤQ@wvL6 3HѸR/xAw&dȚ3o"%K>H$iܔh :X]sV?ԈHq) }\XAS]lVeH-܇ +hXQ46FDMHc`Ёakfϒ~cJ$++ ,.nra)MQ)Js⌤QEC.ց-Sh%5ΥDIu.6i:]0$5x~ HGX:b$T?ԈȁCITC&>|ڱ.$j@ؼgM$I i>bN@.,5k٦}> hm:lP5\lF֎/h I .vH4UzJ$v4(idxf1~p{IH?6ҠklڡY.&[}֡sDӈ~hDBR!Mb4ut[iשkODBR.6į5 hEӠqVm;t;>'08ϔ܇ِڱ6bthݮc1A?Ԉ68lPI>{Ac*MA㺰[m߹[qI?҈8jiؐiDy#hօӢg-vԈH4ԇ+`V|$i"F[R7سoX5HS:-bC> *kG1t.p#^"iDM) [4v|AC.$R7\)zЈH$EG?p߹QŔ9Fu!鰋N2477 ՅݳOoE^l"Y4Rl$HJ#iGX1Sn"#nH- )64v441SR7]d)5"'S:k4}MڡJ$F_]nlvXhDI$RC[t0\xюMGԅMoٗH8S&Y>D;ڠE!ѐN7wDbFƥ4܇1W{HI4납EnF{M$Ҹ) =ņ|X豨ahz#h@y(ȜyeXԳb|HFj4齠ITtws#cQ" ҠF>$Ԉ4vlԁ#܁`ɇMPY>~I#74h3? x%'fdBJCCw 4B;:.7$DkgPgcC>$P B;tN!F S{EJ$>3>q[.IiD B;2с#|@~1aCņ|H!hAhS16n&ua錟5hckuΏHq) 4r⠩ʝ/ϢȄ4,rTg-!n޾[Hlj :/Z+M\g AB,7 Y&wQ=:6IX(M8k*H<*M2h-CBnId<8b5CDiJ|=]\B"ýIKOV[NJڡkj%2- !11ؒFtu U:Ę6tIىҨ57WnĉGPRE>FMռ}uY2x&,$4f1`?<|R@iY>HܗF4L`ж.µ;~}@>b DN#·rAn+?cPp$L$Z*F~ָBGP օ៾x㇇иWC⋡o4ZxA{bf5N^f) BZl3HM4S/h_]6o;qmq)  M? R$ii ڈQv;{=hZ.[l gXLE44jжYң/`sigb| "AY|-Fݎ.w>ucpiL,$IMjDvL܁,FS{8bY>iB^l5b+UNЁg1,0mi4`|1$` ߚ4vI.?AiUyV"ARl,idp"ˈ>K>"$f%o4vʚy潇X4~PC.6ӧHڐhC ZHs4hԇRl$yW&U:-Hڡ)a*-wKci@CJY>$HH|.aF0A߼3u C)64+nJ>S|wѓ;>9.gARPi ĸV~>?mϡ44Ф؆ARӠ %:tߡ\ٍki[g:pƏ&arSlI3w*F - ӌ{o 浈Qb%Hf- Ac4xL?sAGh|HhלO|@/|!u9wy4~4oD l4h쌒F^ؠIP W Ki"C}ȝMAB܆24;U{].mBҼ:+?G SӀ9I;wȿQ7mIk9A>$HIY8}K6 \ꭵ4hb+Q WjXտРG=Ai~Ljbg#aAuh͎Df1:a- >"PSgȖ`QL$IM^4~\!6p?|kXϥRx*viVAą6GnIYuz!ƃG._ҼT.)M^3`h̍Ĉ!q[w,]cK>y1Ndy$IMe\"9y;~ZB@>|Tl̊OJ I҆֘4B@~Cϣ.?>ZҖYQ83HF8avME{'P]po4ԇ\lO$H85F^Tz7>ňe5r" 7ĩixTi^bz1ϔ;n$g614~zū7nHqf| Qߤhk4MF㏏~y6!3Âlܲ}W4 Dَ4k&{kn\]#}ņ|0} HxhCjUJ]_Vaޅ'_1./6c>gY-6ClԂm[܂~T :bD]^ ?KC}hMqqLHxCG4vh4jл8k\W#Ld5-6'Obx܆޶^O1,9Sޜ<{Ѫ@}HŎÂĩiBc/ZaWI"Ƈ.[f.\a Mc<6ɇII3x8cP c;4~>DG IM[w956OR__xf7;IU>$IMC=eiF (_4k62Ŷ +TЭ/ɳ,ooa6b|3RT(FM6Q`F+ݨ1Ơ4+7Prg$f_FA߾+uk ,R$ނڐF񋶾o!g6GY 5 242WtAs]bԹC)6fG 76Q1cR/Ď?lًчAG`?~QA~CL׎/h#%n!c&͢>bߺaAbN=iTLĿ KڠY{I>u= R$^m𙻄.)YKQq<+ގF4o6Q)K5{(AsQDɿ1p3:5oE YLDqʺX;Hþ>=v*͊Q iZ1q90^ԁ$Ƹ 7ɳbcVܥ=^4"qj.c'Zh.?۽_.GR9ukoA85钜MGK%7 1vފlt|*6W1}^A4a@.c8JҾ Jؑu,9>ba ii %&Jr\I2As3|am!L#lyi3~sRmhy72{[v|H S7 ڐ|F7ޑGYsУ?{CL$^\:<*ذ9ݚʥ;_lt eܮ{C>$ UEc "-(F;6$ȨqH \a ZiΝ{# ~'$8%"AFS<! 3 {]M 4Zdvݸ4+0}X$qjR?ŨrVFF ھw[S.6:sBϖPIjϺ!>MM#kF%7'a4$%5nә=kG Hazv=p(FŬ4sWneOu)U{ ]iνhI69zM/Tl\1+ $I<ay{}1%4wF&ܨK8| !Y 1* qq\}K s?r9_zfac] Ϝ56OQ2AÜg(7AV?Ӵ-9KaAF4MSkc3 ɳ;+X se!*wj5ΦY^,7i  Fs+M/ه Ym$j6jթQ4+6a IhШ8gH<7v͆g64KRsFA*ةiU@|6l< !qцE{!l/7i :?*u1+H>,H85 ڰ$F4g. svvs_ݗ9tmHGę-4 e(0tGŚZw3h4ɇ٥[85M:n熹-4v(hoNɅK7Tfm+|ڠU^Gf' R$mX]CHڑydhˡD>6 ASC}F 3gk6h] Ej󬘾`,{hil!WFN¤4[?g>&cSӴm ]f :]j׵dMڒ|0}C4hCJ,-k|0C?>S7 ܂F;3Y$qjFÇF.~O6BJ7v8hŗy[Ii#<۱$^4Έi4ᠣ咿Pʞ| n.bx>4rHqE;9tCI.XR"̳mA߸JQqcskn`HkdaĠVx$K' ;kn ![h8jy(bԁ8/_| 1,=8a4Y0|*~!qtյZ_Ӭ&W1$SD` ?ޤ-Hڡ"kfJn}.cÂ$SӼ;Q̹h5h!DKVQ =,>>wU ^I$;8i"3*P| lqI#Z}&C^c >Ƭ|00!74G@7gF iKNI9 4QccЀlFSrwAüU+Y?*FWߎQ㣢1gIge=zSyz|EA>Ѕ |6ojK)3(v h󽤾AL5o$ރ H8mYA@x5YG%?WZw9t-='KOTעC$KNW"F{O2U6eًVHoRMdn42}XӾ $NM6/O2U#~bqF49c$vI$qnaܤ>[!:443_tͷx?ɧIn HH\|bc&K^_w%]!AN_j5?Sc'I.}1G8P $ M+uAaANr#5 6^(/^fI1[:MּVoЪS!_Nk4 ՔFy " cV|$TsRZ1!Ih4*Rg㒘;o2Ȥ1.^!%ë9 $ARc[4 x J u+iզrB>uH&B[R;>r,ߗ􁰒y/heFƬ֗f-B2O%̳FNķIs1 %'@A;8V^]m/$/^ $NMs;3k#f"@;hMJ}#,O!~HhᯯGM=7m@r|"Ae$]{\w !"HjUh3Wa?Izۜ{+4Ej$| biN_o| nHo"pd ;puzS&/14hG/aЀ$|m,7- VYg19h̔y4'/7`aǠ<>7Ly=`:mھPGlvjß^%.$ DDS&vM{sJ!vLd;H|Ҝ;q"D8/+NFMgvCֽ|A/͖ I|%pߍ$ V3w0AS.[^]ŀ|X 34hÛh$I!W,dFAXOFO:K$Ȯskb'NIG_EH0FZvDx䅨Sf< ;cXf#8_{XdwVUC4iEo'dQ(g q>{> Dipg:lqJy|tݸ2$AbrnkoL:KnC\ 4l=jI(AħG:Q Sgoiz5h@+ގ8 %>eiW0Reσ:u9igEt[(6}+F/#$>y.+?J"u`>*{}1yAs:ޛΚhIPĨq8[ЮmdyF2lICmx _oQ'27;O٢G( VgAt)QKa>>#Z0Xkeo}[^Pj Zu|Nh> 9Y6Zg)ovdAMozZ8,Ȫ[v;'46F #b "ҩF:۵,8h@~A3LHx!8iWNZB42Zw6nWފI,A='ͻ S"*-QTо!Y"$kNUШߍ16xJlq!zu˯f+V9p! X&Q_dYOJУiIGSe[$ѓ@cL""1Y6Vz• Eaw0jw!b:>0!͟9%0Gzڻ䇟M3re_wbb#&rӚenh$hY(* 2fn4 pg7qw}Kƭ;>+z+oN~΂8mt>1FjA^NV0SZ6YJVA.Y m xWWz! 2$NM6\ngSi2A;0kmYU?x @╨iІn :~ΣߊA| ,f1y~ I$4qbTx#bgyǛ8c`R'LM^ 1slڮ {_JĿ]$j4,X>~-r,q41SL' Qi[evK0 Bs)}Gс6I"q4`zH;:Z7d#|*WTi_MΣWމ/SM3`y+u͇/fLf,0qr~~XIIs1*Q 8^-9heT\,=}ԙn>|1b H\m8g&~YDHrÔ4f*׬ft87 N˾މqJ) mxib6"kZ!DF0"8x#7|)IDOW^ԁrQ⧂4?kk=2~G>[D:V>~={)-{I^yk oi#A 2|SBz7r}LJ|44 ߛ|XGށ45pc/Zu+wi'#bU!zkI {!|ٮ>9wgܤ46ٺ׆%4z[1fS"ٛ_z+&x25QcO :qΤaƔr.aޛ oLs|\zC46Fqz-;4ͩm-O]37QZ4MhlU ^&P]TZ>ƷO.^{ij(Y>; ƢoRUKɀu//+SiCcCDM15ym٥df{h뢱GLmCCTֵ<)ȅ;@hb렱clum߱DJ7{G"M)Q|#F^eHO}1zBN1wc`ṩDmvxfon=p$ M6lڡqb@ӀSg;|FMXh\hC4fb:˳OE,v.{b :yތ8=!{W3q }ɡ¥&mŒ4w uiz xtц>9h*YA>n;p:=ZB2DZ{v:Ayt6gf_$M6ߺµ[ofA)a&[y,䂴iޮC$qjaV]`}LMBVR4:0]vٛxTe,XeɫtC٣wU/Zeۺ̍^~;a<ԽӇ?vxį60AB>lZt1[8ڰBi6Q/iRծVaRm&SӠ n nJ/C@K@di9bxL7{N\mXV}Or^M5!b BTTVGn4 iT6aΊoz'W ]h2{ͻri0j[;zGK64*z?gŦ4%by՜߰L'PSf2|zHM6^F3,9".`0g-r)^@SO^CZzD=,@-- ~c$0K6vH֣W}/aT|q6s!2+QV^$IWD/Y?/em{ ́B{ 21*-j>l*H×цYaTdCp"HB葖 Z? 1֗(t MvdHۋ"o.׊ޞ;Knjk[i 4 0SVgaq2a8M ~scZTҭyaYTR-[q+_x3fҌ1*00r8tN(;^JӦ,FZ]8x#H:- = !R5BJ\OVk}]#~-Bt4v [t?ju8>b:M̳ ASn|pݎh؟q6jµfez2̬ۖki~}=jt9 at|k8{Ȕ>_!4kQqٚM;~>b5${uZt3{oMxMoAj7J]̧M;1ijsVNQFL\yNe +DEpJhҡIVoO6> q$ZL7"FUZNMej7n{y'8 lU *{sЌe6"G%5j{؄hvjM"D.SK66Dcc`@0g% i]Kڳ}wOWSo@!bd󕨊9dĵ/5 _A{OLSy?ؙҠm!fؼ5JėFk66ܲAa8i3)V~Af/}*]h#D1jd6Ө`"5 0B$rTuA_ZS*2A{~1GUo1+b4u'] 0|c7 8Yo!h>#s]N]3Ms_^(} " M"ʈ?z'!] @.Z0bHH'tBB@j1  1t CE@jCC@j1 C"] I tNb0 .K @@j3яtĠ1@ z>A#1W'b?&f:b#1MĠ/bWb|b"z,|ZbLSNb O @b?#C>P,b1 M " >']Ⓚ*g>D$xg袑@j =DKR{^.1H9%H\ @j. @j. @j. @j. @j. @j. u@j)i.J|J =ħB@jh.ާE@j3).?F@j\G. &:.HH?Btx9'!],|b瑘#.\y$F>-"x01LL!a!@sB @>- >Đ+1tD C>P1TA G߈aАO >']ⓀaCA>'#>-g!TD$p9#> E#Ӊ9>b_Ct`9:1FtrĜх_A ._@ #]< KO s9G@/D! 1$:Y"> o&>@!>|Đ 'CE:ubg&눡!fbg60x^ty\=^)QHJ(dhBE*4(EPРA R ( *sRThPQ%)I߽uW}kks?|_;6c%ӗn?OȮhcr2A]N_u qp |_QmhqJ!Z'M-lozD'.njYRO=L7oW;{&)%cn<~W^xbN_Vnڮxࢭ Q`13 $&9Cg(= NJ"dU蟽t?z'B_fi62wanӈDKvxޣp\G]qFVNyUmsvH)_v?$uEן l+6l8y᚛_h ޾J\nz&Jf֎O/3Էt 3e5|_#UNąk|oNEmZZQ}]<od?,_e|Cx`gepH?}{78شgz=(d^'%~H\Vi}; Q >C޷v6eE Z\<ҋ_VRqS  ʨh^F>L|UT;ٴ s$chLJ6(ü%wEk>1oCz&7}c3)D.Q ΃!1w &ةpHy+]?^)5X Q5v";ysبM;s*18*U1Y= [ƙv5BČψf 1ٓ o!tiQ}4OXعc1dTÓr{\#&s+Oc2>AtĠ%5 bإv4dmi;wrWH9KVR9{ʼn ~hdp7Y(pϊlz㐁.@$1[~Dvhtz^\ćp {GMٱu1퀩Z'C/,EW"ޥv 3ɘpuZF7<ӊ޵ x܁64G|gGI%ͥ6H:zABbηi9 FhqtH8Ii@r/s os ɀf`9̜ AL ['Vٱqe(!q"qt:$+6J+%F@q Q=GB<֘.[M6olnKȘW ߼uݝGiEuCt @ dYSs1aiSqpф#]'C)A|@ \$f 㨌MAz`O/~骍R?H.x To!9cfjP`@v\ g"&(4,)bI3;jTb%9exXgp֮ayU- >JS?af{; "Fo aఖ6 HHPZݼwy o?gItQՇN*GO@F|z >!ٕz~I@Yp Ɣk+M_ Jzp{ R16S@|!ј\E}[mzg`^og2 @*F?wo:,md ISq! EVyӷ_ۭw*p>xdס?sةM뜾`T\I{d6~aX%[~v*u7ԦAvͤB;% >Ed Oӈ&V XBz 2LRz/o?L/hLl Aĸ_g-6 dd'qF1<J*L Bk]/>{dx #(S葝ͭ Cl|w1S9hpZ Ԧ8@F%e#K&H:0GckWg x7دs"4zp0LlAܴG@(=2 葌 3V7}Òk'ظ]Y q_ė@%6mnK20t-$.du@=, !u *i49˞Ԧ]q/(:Z5f& zxp\<ӈCraӆfW=!S )M'N]vobrmF" 9ECsX'SNOs$T"n?t # 8?󸅶*hZ8W@1xOy+7HQ< u?8&%wJ=V aJh4ħge G jD%Zl WAv3 Q$ .k;tAG m?rS,ZViXJ$(#ewdc^V f) HdYB6 ruµ Ns/8Fۏ3ʚFr;|?*O Dtmp@G^fU2g6Rrèo Ok0 :q-t&vjxF%gՁH%!Ou|o6q xVlN}Tf>!$}I3Z>H-2´d%DA fD͟M |R6Et&$߳ΟLkeObE[ؖqMS@(9ڴRKO?bh\X1̸x^QrQyMO 6uf/RP2ORr4t|b+{.jI{5N[>K-icmd Oʗm:46#쎅F˨j̝6 VػO!rߦu/<@ 鈄ץumrSir|AtC'/;%5w) è2(M Z+ h&Nɫl2]LR{Յ7_Pvx/4Ȝ؃ַ#R¦a1 ;5]?JHQۻSN#ox$ "ڭS&@Tց0nQN1&6Z\io bz ^ ?gsD4~.߀JenT/@%CJRޯ{e޻a&N-Zg b9ls L/!~Z[ aXLozjmuÐ΁ִ3-ܢMls v "fn Lʁ]EwTD@Jy_3l:(& 3[dH`\**խ7 "[@$"Ns+n:9M#|ȸi䋱%9[X …h *;*}"b%4.l[P )Lv|IڛeN@FfT|d!>vͻYFUj| (q9RV p)tw2p{^o_L+"~X+,3s# ǵZ>qD/O} ;UO\vH+w!" >4ݺ"rbʳNb7OW?n"sD+3"NCnepH݀Ï_cNZEGԒ+EjL!M,٣fTP( S rޓ$X81-G\mٍ{a)%=XE%Q70w\XjtΘCl V\f)k[u/8x&7tT2p O)z A ~[! ǖt HFJG`5>O uݬEiK/.i|#(+"VzO }MW}A2J!qI"H@cg-/={ӗu_Xx6VF4#*clfq]|cmHi[OkO ڸ =r'y_F)D 7YȜshr>}iRSɸLPtd7ŧ{CDf  q̫7ͩ_+@$B\<=O h%-߼[ql_7M!"i 5ʖ̒0/Y%] \Q܊jZ[V]%qoQ?m(6R2ZkwXJ>cdCl- 'F&/۰Kԕ[_dWw&FCdX, ނQE|ʩZ{ZPp9hmbq T5t}]sbTD9hӹ5";5O74%rq̀UlN 1M %sy'.Gr#.:Uyo\VɒuN`7%(M9_vĶـJ!VBCd6e]¦tz bCn9t/9EfU!FNOߚdܧv4&rL8mlb_EY7? zA" ]-%>mOǐʚ.^w%9{i̓)ψ|c]N 6q g\f4K+kŹ~)v4w'9G`\(S'?a:"4MlR>)D ZNfU !UjAa|<{_ M)aɺOrt,s#8;FA A;_.[*(GUĊ6՟тq w?OԈ=FSt@\ : lnM/.u#AMk5ftb5jKhs uN4pMu|-jw&5D WPy+DB*ohBJ7z)YԍhK%nXiOAye* `6n1yS P,4ѿK\'9xvSVO {!gpbmXJ:nItXi+~)5"AW_6NN! brZ&!Iōo,O=JRHfZ- vmNZ 􁓈[76$hhJɇI$qI/F̣<"V8L/Zq[UNXz3/AI-~f3Yڦp) EK./r뻦'بM(Z-(PxNGKjl8`o)O -sn}" ӄgqmإnhC?|+HS*=tK6jO!&"@PBRJs]%ve#5mAx{CH氱 "zbƥP%B´ hVZ UL$lF4D$u]~Y{YӈAI%dӞ#ƶ?pP:gmv4_ʁubѺgS ❠OC7=jbwYFeOfY1ٴ鋩%Aa_CugU՝9˅egm58X3=ޥH^w n?dt F,k1oN<|]60ʳaЋ9Db"|[( }W4Qy L5_@Gg5 AHQ>`6Ӏv1$dNL%HO5! Uc}R*$MG~nźz6ao;i@LhaWԽRL1D&Uz.hfʏ]#Wm=p#, JGhڛ#VJ`;ɱS/KhD6#}<-*4DU@ _%AQ8c!qq ^\hէh{lfY)b`u74r 3,} @K7d5~ÁvG=GiE'ݐvhx3;ZWHFbT|\\qF["mOc燡'A1 q&?hZ%>ۣȴbP tMY؛W`!klpw¤7zǑ:3G3Myĕ-oITCvB. (Aߵ1;wCDCY{`Qh63-Sr >DCmRay]s))D[k'ApʮU^ob>φ8R0m{jh,{y qY[:΍{[HH@"e%ܼ[SgGsmګuQCڬ2rکE S6d>K$O#FcXyø&eVP3 аAOY:\)Dр8kś^}HTnPG 1j،)wt2aE@,'~1yHSyˮ %?W@rPK5Ao:,ě&SE>ۓ,+%9:h M̫i%QKNJV0r#EB{n&QMn(]ik-&2JFׂbI\/o;~& q!A|[$A*a)+!u*Rj; S [[cvaSǀ R(P~P##Y҆o):zLҶхke""u> oUu»qTEd"(yҿ,sN㋛[Uwīw]K69*y4_s8aBN_]x>q~Z2@pבΏ Ώ^VY3 ]C7&|O&`;*gQ#ou1ׂ5 }"1BJᅩ̦hq2?/ ?fsN4D28] P;R~<+ǣhDY {єBdGD5IHH8\XO-Q!?a;RiLB5`Y]c qn ;8a_ avSwWPJM9b*rXVIk:>h]t}rs@i+i!}B xԐhF4zXЈkgV5iDnU&VYICڣyꟉ1z@q|hxf q!]KE[ ˨6ɹ $ _ݷ3N- ͛"ھ|[H(k_ C3kEOXy_& LhcrX E;{w}LR7Z ˪홵\\+"q}AhZLpMoHeR=@Cܧkq7U]ʩk"so` 8J[Yhi{GpJ#eJB| Š'mE*腛OR ×l5]Pu,L>JMsM _& b9 (\)4pnҪAi#IjYAtB$((/;!%dp_l@>fō\򋙛,ƽPe sF.]KEO!*[:H'B'WZ=A$JQ 8h\i4RQӶ~q%m!{xDdحzΑ6WսBt~8GaK%D!RすKޑMGp K$];*qЊ&8EO"H:Q*W} ;1?,Ci1m$Q4ys &RD1FveqYxa!n7,iDnZi0^ h.T@KsDT"_%u|rFTfZy>_ȏ}ۨAh܂f7b*lj62ec3A|~S`JU7 Pa~L+n5@M6@7rlVXm֊ng}g^㨙[VJSDqwThn0xq% 9-BĀʮY`e!Y,kvi^ae ` J%#-#?g{XEN""#B_5 ܭm~7"iUPFWdyD16I:3*-S_릟1m73au{u-=#??Eq^J\ Z74!A\r>\(︕wLQbHp#iڵ"ˈ㋸@Rjf@!ߡyND_O\[It:KN~3*A/k;**عg j^A6 gN@QXu?cLW>M(؊ |tt`s bfv;g.msHZ$U"It˥LoeѶxnPbaCB~ y-wDfްq3b%fYHGi(Dzʿ0@:qx:Y=Qad 5 !^-wP[QjA|;ZW^Tb"6ȟ - WK)rsDZiS|nq`}*>/h́kԫ*2$߭~ i3Vѻ`P̄m|V;0IUsW҆;p10'nVe]>ᩥ8J`q*"d 19PZXfB$ ! ά'{h,$s(q?:9iu@#Sx@}_V|};lځ#x@04v г`?ʺ_bzO;dO^H2] 4hu'q쁖? GXCC{bvZA* 1nT2rRKO8m:oDP'} aRv? ffۼSPVȢFddƗ2FR ,t  "Lo C`fOva.!?ruFYb鴃w3 0r qln14 jZ==mB}^SO2F07kh_qiY~/F-@Tj#BXv/e-"f5Z,zVx^˟jݟ5s4q jiF #; Zz{ v Oawܛf@4iCy[_YRr[_"?roQ# b\/q #Vl;jSIB{#=CK>- ]/UlIc+B zaSBݨNږʿ1ԾG` Xy]d$w<_TtiǠsIi\)ȿ똵o۞ku3^x:eBe!v*4PʹNjr 1e[{ŖwY5 4\ 6cP/XMWnLyܵ{Q{pN A:B)$)L gꍥy5 *Ĕu!SHNmW'ퟷ^ o偗N[ J~c+D"SHn_VI5 6(9gG^A%9Xpϐ[)C/(&( 94ur!*Ʈat%\ M-kBZFC;ʇV} 1,oՀE"ϹGnײbZf`_C4{Ɲ@?Y \?ӯ9~QF/`/dl@~Q'MFy@ʺM)os*3ѷ j[*y2P{ɩ,j'SH#8J19M`zl}y1Ac'8νUDozCǠWM8D/kśd7@Z랷s( gnF~aZԍ_һSZ&A&d4TO%ϸF}- w)DHDbQ K\&S P4mFgExvj. lԸ[m6deZEO^1yYv4xYԢ\L!Ft1aͰ{Y"۴`!:]Ѐ]FW?7#ᅈhU"lYJ::dIU؂UR?OxZb-d]3oyBJƷ":W=alP38~P4L%^40O {Ly/VnR9w;l:QA,M,Z]" =Ţ׍Gx7Кl!MnT/جj_5&joe\kJkh-&uI4=cch"yQ}%wC%T p'Xp z ۢ W+JcXutK)nsB:#G|ɣVқƗе}Ċ Ec\RG,|nX#@tgc6vSnS|U.xܢe0CPN=ǮH(sL5XF`OOxHoe. z%ڪs-u SNaLrDpd];mz_5D95q3ĺ_ˤu~F5Rh yf+O\ᘙM*y#eh%Տq81{{tE<~_1pqeHсP( Bă|u1+z %}#(z MEɺn.}č4m{fKkBқ~sK\ Hr'%36:TEzV}aިz7ibӮ} E5,|S~/ߡg$nnys@\a%3< XdjGUn"?p) n:dqʁEG2?c!*TDq#$*lM[ӃA'=kQ?jy;?lչưF̭Hи*x\:2o%U!9({W7cmK`Aܬf A@ڑe 䝴ldIo=8lt,\F>ʲYJVƵxdN:wJk_}I+<>%R@ְz*} |~6W?Y9 };RQ~Y,|v:anqa9s7?{EՏ%[f2 z^-,cn ڡmC;bts^GP~ &0d APgB/̠BH_,ZK2}=;p 7PΣ A=B*a(}j{N|^޻@D~Q*0At6|VB2tO;zi7V/nV qHi]}yǹ\,0ĽYVȴE:5u/в ./zqgu1;otYN戩_4ߥN:ld4DqYŵlC{٥tJz$`HF 5o9x״s8њ~)K*Vy]va9Fplu+Y(~qv;Z/U3w{\mc 3`>=&D\ ^6dKd'#+},/ tyXp ̱ホ/izN"Bc7U r4S=|i?<" -`e!~jiLtđn]|vR+eNwnBo^3?nw +[ݫcfrCC0O++wms{nb*ީ&(Jj4ry;!u |nO?0_5xKw$Az%zwWzAK~^hfT:E~t؀rF.&Xc%@avw0 |ɲYrAf3̚o41/h˶FJDK@\$}V\ݟJ>3o>|i!].MB(F'IG?);},ﮠsoZ>.]-_rӲ +!v}~vIC%,019hF}wH ,MJ1-=vw{\P)w2aLmhU> /Lygn6 "N;IԬ8O֎əz%7M (w/vks|Q韵tޣ~$|a.#OL4 .18)_V썇,fR2B|%TAߥ! r9%>m)s+eQʟ<&^͌Ԭi9u}`gX;,Jzg\zgi;FUT0}"iZJ']Nˉ-(3ObV1|1!ǠȨ*z?]CϹq&YLn]EǾ"Oġrp2Lkcqͯxu'>|szwBӫCv6&ݭG< q1d:n'O}3L2Y-WoQ!Zr1i4R2Bc8}rط|Am htWpx`2ge?n|wQ-В>yS6hS4+XIOA? Jإ 1/ml?٫^˷$D*0 [̟ y3|J 獽3>܈Ż;Sp166s!Fr_fPd?-uW9hR>Bb@n"iD}E;;v S/5Y⫷ԫSՇFL;lYwmDVA{MJEBoFvbAcv|!+9u5"bL@zd-/6i9xO/t6jl~G=O4r)t5usbB-,6"7:w_H2&"ꯐKbFޯP}Gܲm?u;,!hiL,kźzDށ[VΦ<¦ZVOnqMpown*BP+A?x.eY(%frA|q3-A+;HK(JnV2~[Z"|*6iGm8bnBP&l{AC;aq@[>I9zBa){)|L@>a qSL݄AʆB|ʰVD^ξ^;KGV2Ky>.xZx(~oNlm#}'Se~oq <{7Q)I&MI86\t L*k-*PzxZ?o`zAڣ7:6{R>s%OM`am黥m4Rj EgH2 ؓZG6Γ6}T>AvzeaSj.S|6߮x4q/oc~M+'=_P8WwvepoukE 'և:@|픉ϳ7ms_h&]fA |ٺp繇%#kԀznINNfVU/879FfUa&?tqI՘Ol{.rA:[i_,B'7sKi[*{> oˡqEi`r[E8M*eų,M|3;&->9,A]{V%yExlQc ΅J'uzkrVXe.0JqIj_ RPƋbzZTN|L*NZ,(rJö>s*\ifh`6 zy=>hbS>>MCX؞RyFʶq(R'V$e uNlcePMX屮AH9a ewkXWڽVqHL>S&Y[: Sr%g6&x=mD*%E;&v^a)HI.+cJSjA% ziR#Vg}sEd7 fփ 4% O0UG^:޻Ѝv}u9}˕ª&ֿUed>VǤL],PO;$w-ވ6Nܨw!{ZnּsOɸo>#tng@N1}Q쟍m7*cNr󿟞:8 h{ ~PI/eXʹ[zqCni߸䠸ݨ5wх;+GLm雖LfJJ?bX#ngv^ y(i|8ж`1Kt9r_E=ǽ5o蔑9sHH߯aۅ1mJ휊^5}(q'wj)4gч AuMz|5 j.Vy>u׌$/?/{9n;['d@d##Fdm,WwZU0 q*W&sqq, Ą>!ýyo03n`SO]!5|vӧ5L[L97ȯj}c>]J}!/ͬs2ow<Ko쵊n^$g$)Aw6[ ˬ隄iWUtg &^|޶L=oLi=>N=|7zx,YrnV@3o):7ռWθJޣw=bv>vJ: xuJౝч'$ldMUY2GģZVEعA@]QKl:ŷ-އ&UrؙaH ZL.ZC~wlųdm:ּTNᄔY9ccKH;ZoܢJx!?h&SmpփzurK]zu5>]2:)Ӡ:杗38˸B>Q9}mվ@$M=Bт6o>U 1캚2,D{޸ihm7C.i=+D+\!UZqu O4GIЬ%[[E)T^KYY[T@GSu<}AhXD~Mxړ7-[zl^g-o珋gkmµ/nEԒԕmCKnU1 j7j;#_-b6I r)睯g},|~V`#]FFyۨھ(S0cɧ<~l]'^\,0 'cxiR|A]g~4i{ϠLz6hٵth mN~4+'Q̓~JZ$ ]p&G~%>eR&UtNㄒfDijMy'bkլ]v'wfo48um#[8鞵\ ~7 R?nHWIk~u0f@:cBFl[,ns\w57 kͧ*Cgkm33*72gD᭶9,PWy=.a}=uPT`|k/P[ (JK-k hqBԭ_; xq lYeڹ0}~^Lm{VNJlgxul%`W܈pajqݮlG wo6{C"qXlX e)vKUTNRߙB&Ԋi&Wn5nW!i{ v]O ƭb)jԉvIKw ?pYPYٔʈR٣P2JU YYٔn(}cs]|x?_}_q߯+vLId蓴G*/zNo0!`WQ]1p8[)5).=TqUnW^%wؔ,$ w9kىnq_b nٮR0s> 'Xy>H/ڋst|qTF1f0k\mF1y-_͊-ۿ!h:,i; K%mg %XC^7g}/ Q7'B[d[NӰov5J<o&!u#z ?(jwߐGTW>s!Z{+)Q}=PJA9z!#zFٓ@, > ~]}Ҙlw\CeCA^9&j<2D %煬SǀOVRyarօ͏̮4Ot _ںa-6Eǟ[f.JUj ֠5߸5|UuG+OBi'շ# ȵ0Y[ '3_H繚2k#aH+׵_ iFv@w}R j]]7aVA>\SfDKݫJ۲1ݐC;XAfkv7pUMu>c˱:VQ~t|A>`3酴+J6FȄ<#8ض2>+^!NLW Htfd 1U&}_r*"a;9e2U]I!ܽ&b2}6&MIoiwꄽ}ҹSëb ]KT<(pIL% x}8XR\T)$c2%h;9?O{C"> >!J"!|LwƽEd:3^mLf?jyEsʷX1d-Sϱ5 Oo>QN)~n#p%kE}vЎ}լ?En hhG+ hz!];v25UoH5x*[v yN$m7e ^O!/CvX>w&G+ix̑. z/N)wr ~?Z~v܍Wqsyƙ:Ij!BnMtRʸS~h9@f)3dI  B L <_H$|$7{W3XWYP;Eإ0!|lYxN`#\ }:SҌ| |ӗCJ26qX`Gt*r\ȩrV cGE?C xO|.1v=gq =~2!2!AؽsU1уvEJ^u;ԑ(5OLЮ>>j C%H?؞[)F,h&jmN>|FoǶT0EڔCOw+U~cWCYC~J%=_LcYh]EoYBy.}v%\Gl%OIyUNUJm[y yеٲd^<,{ʒJǰ(_ވ;Э ~,`5qgNgn>=*><TiǃveB_NvZ提Fz`•'/A^I 2)6"Mi޺4{CGN;ZIhs2"t!t7!%l:8w6ku#ߨ'Q^nr /W)2V!F6E lvrt {q@>J-s,mF$i8_5PƎnY!钊GBa4L,q )4GkfS靇cϗ۵pUE}.u^λZ?[Ɂg!MBKBBZ}u`~^ⰴ4dYnQn ?X ;dg p{` hC.CɹFGR(z$"jY1X|&K-.\+>sqU_8tL<{f*1 p<ςuT1< |bTnGV{~iЖ`hO+>#PLu_&װ֦աf%#-B _ 6EJve||OZ'[L޲ sGyJtꅂZd{oh+pLf~qhG>86T tNQͫXym+v%mPGݠۡ}tNUXi(tۂc|!DVI7:Df529ƒu$na| 7[X ЖqNV`wpgљtؽ$wg:UrW}L#1-(yz-Bvm^x,C3=ǫ$S.=xD Q6΋y] q,S&!;Vt{پDclc˒GHk-FT hcJv`}c[nVw[SҲ'U+%{rX8n&a}>Ҳ H@ Nnm',q;|4)}G(-?Â' :9fB(YIɅZ Dc>$m/m%qrM?uI55M}~A 9,j?BEZMܜ_wm)NHۀ{ 20n 0<>5s2>)%^vbJN]4>O!I'd[|ɘgfx+,4^N >k30tpM-S3X-ly=r8/k8т͉!OG.!ɀb4L58| RqT <|LK򆃳ӈĴwuE-i8].ZOYZRҟ&EMKq isv Nxuۚ6fvVFO%~ LGM2ځ;}?~L!cU>~k@ȸ7Սm=&g޹w8yx^Vc'O2F畗FK|{9ʪhjsߓj0mv{h&~N?/=QRr[p^Z8kvj?{Gwʧtvkj7g`mEw?A! #ju_jexqPHxt\bJ CJ \[Eq |udN6mWkg,hHɫ;mp9zk빗x;'k]|r _5o0nNA13^ye.ewi?gviOSsqӚ#M髌n/Z$mM){_&T/CC'Ϣ!NQYEmckgDZel|B픻iX d}^%P.=cIp"tI/o{3vO_eoLX% &#E_@PD\JVQ54?aMͲd |KƩ$/x%5L;g+1e;b JO349Q[onqqۥ}Y~}7Hĥ|%y?A 'u L.5vw},?i>E'L @ؕ/{ĽPV8yFG gbLjX?gTt/9r8y3gut LΝ`ze+W-]ޢ G}$κOf|^9W`hp3m𵧮㖽;Nw]\ݻG~?yʶTMi[hh[y_.v1QO~2%%5---===##ի̬윜ׯ )ōnin-$s$xkSPV90| a`ϱ##ܹ<L)= pH<;))))wر};)0H ۈWG.[+族nI0$srtMIqAAk# r22Rb"B:$ pf/W 9k0uZwuiσ}\.>~TQVJA[v7oX]fyKfM/?gbldhh}ӧ8VT%{j.˰4)lԤ[fdž:*_JKNL},~}x{z߿|mGGt;|l 㖺v2"_xTtc|tu45TU}SZ\X2)EB|\LH~/-ҏ,~ kG2VXMPpa//L}2wݝm͍ u5Uvxo [HV.=?EQ˱j$ͼu)?摃EAvZRY)qA\,L t4T;)vm#"ZZwON|ٟ|#MD k5!s|Lpdwef][Mi>y\]UIQNZRLOqscfbCK{%3ط2d*e5 /+`M𹡧xx.1yDh8Us &:gNhU9 '#%!&"1|fu4TfL }].+*{pc^[}tTg&ϴWv鸜/>F䤄x[&|MIa^NfzJҋ8Վ"tD4.ϟWAS|uoWP-UUѷEizKnX^:op1i1<,MAFgy~vjrζ憺Gw̍.Rpk&Gz:}mWq,i-5Byޱ03iUE C\YPAJc{~kIv͇]'~)V|ܒGe[CR}`qDOk]0_7_4>qTI^Z9X2R$ܴ6ɦXrY~q{žw!zL2n1NU۝'#ݺϩ/z;k+9eBLDh^((uq)6O278 }{iq`Cm*Ʊ~w_29(%|d떅m5R 3_U^L?ו87Qkk5W-W\"l){yr)IOmM{iY$N.'DxgоL|]Eƛ '\kQڃ85:XYjGdqcIF0kny3nUمZLIbWϼtv 䶞B<,X_߿k{[26<ᆥi U`deK+hL.l?.Q-517_s̃O:qewE*B{(q~~_(;wvE\Tas|2?5扏 dgM05:\]:#rbW.Mk}|v܏Kl#rm/I Q`M}ixlkqN[CQ3B )̫/^P.81r4A#&0owx6A060#Cř/"Itq%IA}{(.LUs>[޲ݢouu;qzZ O\;wZE>j-3Z* 2{4xWH5N6a|H4K(OGn ,-cQ:T-#q߷W&=t.+@ua}OۼǞN?*'xY,7g034Zӽdlr3a7^ Ks578qDgn|B)j{>IfI/8SK[<n)8GڌEt*hǤq?tVD_59*}pis_kuI@L8c6߫gjɛ!MyЁ./I 7xw\9j3j;V: "|h'?6Wg%HRy`Hԙtg._~-TGj\[ѥ`w[3mq]WeSʒ8-_ٮQ!+Ɵ&#U9`UnL_#H+\$ d~h{깿ͅʢ ;43!=0EXiw,Mi;rSwʜ*k ;#lj]dKP|h(Jx|ܩ#"\:MNRdmz'LT߮OXM\r%{B042]cS%x*W%KO0]ඥ aNs}_F?py(y0 2!Ksϔ-m<[@X{Y[ImYqPؐG쯙~Ɣ-[;v,Z2s^<=b_v.uQ$r2>.@w俌si{L+N N-W9?L\Oaұ0˩&4KyDݾTeIu4Ԑ9sw]QZLkϪr+rق'C\gB-&xx(5$Qq>]9ryxO0{߼xFYkٖ#U/<,ϝQbp$Lw/37i.QtT޾P](`k"l;q 4|sqYV*/IvDIQ"zgGsz>\{_lO(4wv|z8 0#30="Ls<`4fxwU%?"I}:o/#/4w:4yC2IXGYjs2| c=vY'j_$AH9^x 3 ar}>,m[γ sx直p[ɱ'KҢ0=*ϼpab0=<Ȁ#uZn'':GYf뫴%NJwtL[g>Wd?r0f99`BVe2#JP[|<a (='/]8_{*3},qn+΍KCeӾRpwJLSO/i.ĝI7Uxs6e AO}g߅=[|PCq;`sYfOǫƧxwYT|,崯zOu*bu)kݤsnKUeEzۚLw5n&'Xv|p_f'`-B}{2X#c!)u3Do~G_eF cu1]j^{OH}zp}^^OTRLhIΥD1==xBrSkqRu5QSCM%t!6jů-osN-ƨ΄*L< U95cv?m}oSa!E5T[GZ2X2z2]z)Z8/Nb%jn/Mk6E|kgn7GxX+ 08[:,;1niۻ4!5"6Q`FS~]ki[c軃v3/Oyr<^S&`)3?&sfۥ]o3sw{I|%^90{[!Ȑ[ƞ9gev-n)|璖,PSaSNbĝi}f6zXEqK.?^[*qU&m7n/Oe!FU{ #T_E 0ZkK}%ӒS@G!L䅟yuafEQn*J|c<"' ;IZ{T~qQL>D#kgoW˧ɍ5.`z@L,ml苼sȓ4;#.Z7QP /oѓ\̚뺊ջWΟb;o)I }Eo \*ew{޿#댱'n$MW/|wH{%?桝0x3Xp> `Myn;pUp&Ћ뢸kʼF1EsD1pZpZcROq [':K܅=6- j~#Iz\wvt{9ҎK sKʾkaXA9ᙹ8IomueAf;N/+㙇00:3"}/duVJ)K\0DmbϯKKr8KCÃ/<-J:&9q}>hozBzߘ8tG]ɶ }IvGvQ3ýq]ssJo&90i(WV+ڊL$JӞA O2g.AuOY.L:O/hq,4D}`#I1?T*Yy^jܯ]oRCݭ Dvߒkry8]ue2DqW_&Lu~Tl(ˠ$ =K#=luVϭ ?]@}Buֶ]=txc2NW>[x3鏑Ƽ8+ʬ79Ś`x-QejL_>;m셟~pA,I~r} oj0ve!Cn ED7yQDQDZGDQ7E%ua_QDXDT@q#Q_"""q#DdV@ϋHKDQ("r/"(/&"nӢHA""K?%"("R Uen&}ExEd _čைH/"AD}(_D?_# E?_AɚOn.b#eQč%Lb$uča@Lņ9Q~р'EE\DUD bÊA?7Re,bc"RWDPD6`DD4"EQE("j7؀VO,a7 qsр" /1~IUFWEtTDE " 5$"AfsaM(⟈(JYQ nQQFD%"WD/0 z );e~]D ZQ<#6" qaD."UDE _AQ3ay_1(;"f1D/EWDsӈ)n"7"<{b&"<[⦠eG q3/A uqb؀ND {"4H_1bL%#pSc+1!"f5){" )؀BD&"р.LD:b#D* HD _1DT/53T'D Ay"alXlcqVl(({"S u"Zp06UlB+k"& i]5F\PGP"6a b.b )ADvbMFd",/B?(č S VD Tl b @lZ1I\ќ&LD، 'nWnTln  iD.c3kU= >͘/pfLAbF 5*i͘ b/uSwA"b͘"S '76H@8uFalD%n(#0/ELM&#SSؼ*nyfEa+rp3LUm-<)TDr[07"`\{W#BjFrڪ)zcp`fDD(؏&BX3,؏"|U_7=pho(F;FD\+6"䬬HōT\+)ؿbЈi+"X44"2ꭈhrp"lDTZ q\ a"D."Z"$"P3pW"+0&dEBDtX֌y## XW+FZ l@8{p- '"ĕ hnWxއ蠵FUp2Akc5bZk ͙ڶy8b%+yPO30cz'@Z0CԀ6p5 rkdF=H<@AGUUx6 )88lf&  KA:hmp1ZmBt:hm|wQttLNNLL|*NO}mrCW6fPG?fgu 6 Q]fe0( NNl6`E,_1t5: v` Bk(Z \7U` {ՠh-@-&,:QDmA &RZ7 b谵A\!7h]:jel#!1:ЈP\ 1W:āR\ FƄ17`$c™ 0jH$Ąhȕ&"Hz@'Ձ"$$P"0f=\D ƄH W%ac@"rAq-FrD!ndq-$n6&fKLrn` 1V:x앐(ɟ c3s앐HUql"T*†%W+ollDn` Q*(…\9ƵJ]a\= %ڰ*aEDq3uʁ l@@AYAnTIVŵ(&@Ȧ m7 N6h#R1*Ԅ0Pp"A4M >Dlrl(,܌҄GI^ lzٰ!PlddK ziUD(yS&rtMDliAf"AI$d@gHɖyP1:;60(qYZk]kJ^91oxNqWΦ=*]&ڷ6+|~zth/"OZQ2FUiPܐFuVa')ϴѲobjQd]6OGuutm2F4j̞[ ϻcZ$ ㏭6T0\}ISK$F2ʟ݋orhR4SFu*޷PSv~d.ҔsFX&3aU3T( Vn6a߬M#e"LW9|pP]?=ʠhw/u%_m[2שX)PӺ]ī_ŝrY:HU[cҫsk\g(2r逸mutrt(9b!Ӣ6Wh.V .,N~UsLZo6}毴ӛn0*Ι3? q%vᰝ9 I~zq:]Z}zfdP yIװr˅mrC'tdI*Xcء~AQnK_0wV2bo60>`txb/ScC\m.8LQ =3kSl*}{bG+~N 6SUSsQŸۡGV0}SnMy)2u`Iݿ$Z+|{b(^LT=:mgѐo~ ؼ +Ɵ?fMy_9kvvuo#5hˤ56O5%޴\I8MXEvTlI~OQs!$shǺycJ7umbygu~?Y}~\rY0P8Z.թv23_;k|.mRcBZ4I\ZA3pڭ]\9\zX>iҟw7RGIcސ̸*f@õKox,kWt}n$_8haةI=\ؽHۻG-mFL͢q:Jp=MTx}+=B}IrVsym\|fLV/5$ o0]Z}Okk>igWN{:'&a2eqGVɾeiҥS{|zi!no`ۅ&Z 6AOyTޝ~}qGWɒތ?=𳙨ZsvQ]Pzƞqd6o RMeëo|r3KǷ Rr~z߹nCwUU~*ϼqzH W)mсVئU>Sum'{riN7X|v^ǟGM5?WF:?fh/9^,u֝p}8~vyb ٗݖ}N>@O쏒W|vi&ARi[SLq\ݤI}z\i Xa(1 MGN~,{ڗI^Ny(ʏAv*湣NJx.[qԦ=2;жu5i!fbٺ8fZFÎX9AnZ4~pvJcCZ8q:G⾟dlX8g}N2CϏt+:0Ѷ2#ۗOˋWnZaȉ:`a3;H4wc፣&)9i(6*؂!.?kx^LmY1cDOц须Җx}nd;55&z=;Կ}z ΟmտMͣc \phZ/_vqbJRM }0]ic>qHOikbgi%NXORkȋ\mܵ;bdoYWwϋ>j=5ڟ[ѣ+{#w? ' Ey *u+ |i}A[׳ClȿqzJ֥wG.sZ3@]u-ꌁp?K&wjᵳR9 n~%N|mi6w{- Î9XLS|~9#5|Ƚ*mLG7)T:/ԓx}ɷwgH >wtţs)\=lݒ }xvkg+=LBۥSkT%£;te*΀'jX3NۯXU/Ž;25PkU[MWFJmRUn+F*fGX3ǯ׏0mJYqr_2=MӦ,]VrCoStyjFj޽e@(PPe:Rw/G3|N7--,;SaB0S1'I>Km۩2-}X ;Swz>C hs LSmpkfacU bNؘ(K rZ4LӨVS$= caSu8Hּx㲁*޽|zxُ2I'BX^=eC\3?kA+3=-H9bJ ' a^h7~lo'[|~nliu׽ѓ F)EǕӇ/{r]fcv8-(}{yВQ/`P'߾ew/OQ|Q{+Mͳ,3{ri1Im@rhm^B׎n2i! eI!{WѪ޾f󉃻*-ܑ P{}7 XOj[,R<{]/O"[HJ?hm:@*֊)¤H=P|k먴d,G7N߮]]am_u7"u/yJ?(l^LS޽K޻,Wj]Y8:e T}vئR:/׋~;zttO%WّPK7gS~V 1:|L ;vBj5g]VQo&)mDM)%/53Qd &G{v+g ȧJuϢ$92%>ډ}%_p_7IKcZ3uxz;SJ~-NQ,f@~$`\-w) Ӵ;|ʈ ٚߤܿyA]0U!T %UܶV8u\uv1:UeE5Xi|.O;/ӛ^>Mў4kOwـ1;OrQ!vɽLI ( Ȩg\VӐH ?l3{~ `l[1M_}]iF,Ƚvxͫ[a:\ 1>f]Aˌh|ɾ~rkg}|n5E)nWO^.l[0L?ϫ{+m{ӻ-úugDZh+,xӍy=fݛLSRmMҿCevf*KC-M`Uhxp[9{D_#|6`Y|ܱXطc{'_>HkM_%mSÏl4of)(mv¥'!F|;Gto,gՄ#T"!e'1VLң"8F-2^[O /O)NG$gtqHտiN-0v@o]Vv̻]V`c@ ׂ+*{xakrbN0ճK6׫VXlon?OAGCk &ݳĨ Hخ=q,IT>߷y 9w#0WI]~VV"- [[=ǯy]/{Z?gR#%op{$>g2:'Z%ӻVcƕ[͆wk|tzϢ{!+=]֭-L ԥɌ =nΟifʰ>[W e1ԧ'W-mÛ SaA%m_[e+0z'Y5ZUL9w̙Rȋ}|G}v=[E_4Rh/iɧ"mY,kc/ٿ ekD/5B8cIv}v.١:V`__eqX7rho7#71(U( g+kvzl1^K:7俿J1M%zHxozΘA udN_N^֠.k& ޷a߯ 2wo;U/X%I%=YfϚ):[AI8f9]W}F4[H?-M;_.xݼb(Yv:Zo:sKel}Ʋc5*-ۭm5-H=;(J#癌4~.L 9b̔!]7m 69 3JmJvqj mgܯ3&صWi7C9oX0^L9O];}+? Dֳ6I?fhtvwY3u¯G+;|MeY# lb ]Q'tɊ!bصyzه%zˌg( #U>uza϶bwF[n9Jw215X7f Cd>?Wn_<.YWᘍEzz-*:'ƏnO5*RY8sâJ}af#{I3J}M]? Gk ob.ӍYw"=֙M*qF&Ѓ["1etNzj=(䒦eXwLr+n]`Ds8 ?O7 *?!=kRy!Z%uh?3JT9~!0)͓ކ__g݋ Dw$*sIߎuEO찘rފiz (f9N[UÁ#nYd{sT*췡;|a Vғ`.{JGmlK7| wז?[~x|$voh6v&^y#aԗTY4o]CǷCN/M: LQeԇa'g.tgmb0[ͷ7O_=#Y<{Oeҏ۵G~~8!&nXO^b_^eމhGi{61~${\mCvFW*OS;Ǎ ^jBP~=+0T#pBX cpz[:mD??n1!ϰm2Gi5? sGj|/IaNo@s=ZGEۜQ-k˞wOAB~r4fZr2n㷸PychX4}O1/o[=oXuIhd3Xy }-aYHCCt#bzqz=n2{,>Q/>rt Vs\8c%NwmGwDE:s~U'ݐ0ā=yʯV\At:BsdML;L4os t)w1)Q7,{prm+Q?{pvCXa[{i ] dKۯ%X֝|fUgqD;fڮERQ8t͌=i%;W/[4ـӇvZ-jO9G4+JӫSkq^k3A~;ૂk~V|o_k6wyV\01IAڙW"ti~ʥ'9%6 tCsb{llF-+MOq,X:=́-K&VRyt׸S89;7@+;G9kӲ "=_6/rvV#>t?*ed!2=/?O `Kbxj/Sbuڰ`!]lҺ7,c0yG'954:,{r+>g7Њr;42(&{,ˈ= /hMG]B)Т,hE40}m]5dpNUřw";e4GvnQfXgt{vX@?KcԈ*R??ąyxDf7^z`_ SP:b~ƌMM;Lfnڷozӭ!R^D߲~R#'YRl>< uRuYj$GپVT%jPjDh)cth]f:D]Y72>Hc~u%oPna:O.%] :(IT߾w`;[bۺw֖:كۀҋ]:sD_qފ"vjJ2 !wэ{X`jb5s|{EWVv+j#uD/|zj<'uj^gݽx؉֭ݭřد&ufgmY-kcX${t}+{%|20 }J\R$HWMOnu\3Vq~_udERNK"QYR㮛-fUo.qD2JG +ua7Ɨ=6dӑ:=dP(m\7* $; "f*V$#Ӻi*Kqt !{'vY^_ajػ=\tʍ)\xX`Ykgzu;Ga즤~}Lvj{@Hz)$OhYcxXkw_t&;A聓pDPAweO@aC4{G1Ob &ZYYRvKٮ7@u:thKMQEQej|n՜8Vl3vg]],4D7O/@x%J33n_cGKLGh n_uO=".mi6;/?nȦA TiΟ0T>VF>vr߶u &TC GC}y>2ܢ8QO% 9|u̙4|.ʼs+gRtNӮk/*w}Jf[<9vHs~Qq(ѱ;1 , `E\r/*4ҧnpwGA53*t{o EvbAR>Ht̕be( tHnU|T"mj= ?(JGǥ@~]IɺW(UJb(rui#tKCOD炏 {ÞjЯhF+R8 %]Ghܺjx}L/"ϰ#y6Ez, gE>M>W[?2"Nr޸l 5,q%NBdq ]ߦk 3"'aLaJ9%k$:<ӏ{G騈Ƴ튙ƃT;6VAWmb!$~T'fƇ$|O-`PR;OrcI4H(O{`3-,1&,v;~<}r|bG/%v?=Nt,J_E ݺzM _e!jb:܀J:M/t:5Vg n{j)KWނ:m\:XcBQb{EGhuik>7F|&qs=y#ּa,Dd[ Ô_k􈄎Lgt^)̝`ϯkD3LG^ɨ獔cy~\ #4y;6,a9ZG!1a*'Ju^/V5&{bl]`T)&N|ЋFv9u%uY8?@(RV,ռyfAM+fMF6g@ Pq!^t",FENj'ETy*rhͧj*!kLI0u-JuYɓ\oo'ߺl^5RN5:4-ib{:QF5vY_ .Kյ=6;yf Cly+bixf\)] 1 3گ[$@K}1qQ{W}G)SvkkU|)yxAgqwnۊ*k©@&>5'Q> 4LJwvw!4{ 6eWsN}1V;Ӛ=l&ٯEeyw׭OxHVvoZjʏ*q~!g4^CIz)q0oM},4甇SfBDّ,n~ E+b(@E.2bloSќn <6!gWjzHc%tyO31Wv ^1xr!gоR+K& ? /HScAzjzrn4Q$&EFTxA9I^DVX]P?@+Y1k M=SP]0Z]T}|+eV#,"{Vwx8٬42fc ;" cj+4 {ŋ;Q'ܶ_"{S9ƒm\p˚n4Ao:bXhB7}_q/],sᔇտ΢TNsֵB"J=q>0 s$#hV U X ,l`a/2_wȩnBD#\x.FΓF]h^&ߎIq1{K$J5cB=츜˸IGT) +ua"oR⯜;d{5';g{Պ}-KceLYfw^>EԦCQփKu=ڈq)ߦ}a8ȂqD Lg'8{)@Y;fT$fƯ兙7.1t,Y)nQrm?>K@v<(k@SdQG iaw$͸w-ۼjѺ~+B0lFʘ}z ˦sI^<&*/u^P&넿ߡ!B-ځRc*x'wxoF*-V[Q0,D9BV# kvnoSBbh@Oy5H$mTv~Uy(2%Ѡil8}-8Q=//VDbGyꝨ ,[I]eZ}(~0i6I Kkr%s9` m%?CDe&:ΑnX<1y"vXJ?иaJ Fc)9)iaEd1aB+Jy8æ}}'\PK.h+Ksd;9xfNm~|D,8TIb- ~ؘȲt!}v0:> Ծ( +H#E!#t)c?hq ˟7<\hj2Tl۟Hi]["ڬߪtBewDs& ե_.v'"۬Mjsyp`醻W9naN E!/rK:LΎlbd#A%opF21HCY [C ӬhР^ew9'jaXL@(rOgҾgy{Kz]( ͫhVF ӕ{GӨhB\̓oz FVdJGnËܼfь=AO-"zS%I[a3h'ނ2OT`n_f%RΛ,1nvbdglሼ[Q֦F lY0\rPV+Mw}󈁽~ p迩18ʥFAN]OLB)& d Td8+r #鮘ۢs ieЏO lH܇v#˂@ϯR"W̟j|j94@].n2T[G,(+~Q0"55`;|CM]C3"j>fV7(cà ;3Yj3_(J}㣬qcu'7@̲MSΙѸ$;rNusظr!XϒQWu H0Ww}ƕPI][/ŖKv?pئ%-n^ GьvpJ哫ri&ia _f>9bͪAJsSxr)Gd1!՜R \@4[>Ȅw;nZfwn1tiϓl zC4jBܦKf)A}qw(g)@UڄȦen)qa L__ 2"ϝ:\WD~ _]}!Ւ}ʼn<QWjWTd W8VMypHnH僠'(2<ɹ/Bd#e>}5t}EVS{=,$_G7'jrP NU<2 iY&A;K, T$-y%^B,݌ȧf&'\GZӠA Lh)o"S޸0\^#!ΛuʼnTX_3"c4/nlQ4&a 6O3SAMwc-8lR «8Kiw3G@ox hF׎ȬѮc)[")IVg%)nf [Ezވ7/$7ʀEH?g܏\WrބAX#vm@V$,(M1|#g_-B:!xӃBљ {xb0*Vdݧ< ?  &'rGgU,nÛMAIE$u&uhj|w0B92abTX7IsSeQɰޤߏl03; Evj۰Zن 9R1ǗoG?R4G7bBe`2|5!tnSi^}氎gr*VKg=woXaf:IӫL7fD(_Ma-#n;l֠ +Ouμ'sM6xh(cá"e-ϛjOh"ąnP,0@nޟ޿wŬȞ͝bס mE^ҴIWW1a@3nE\e|\%'"+Y4ۡh`YX[+am(qR{Dj>/䁈:vy \Nn"%][e='6!6W31rMšȳ9m7MNMcjH"T}2"WعyRFd&`ybspkX gb` Db*(1'>JuYZb,/r֤T{'>@ՖH4}1e&O[/cE6 4!NΜAh ^FDg,ml^IHXijHPXC HlBT"l=<_v$FDo`[]i}1t5vrx|OOl} E!\ &J=: T^8#P_QEjjBl@F% 0vrHy1 '<\qDF8ň%ͬ7@  M:FJK76j*+M_B;4M*9q +Q[e5}.{^[ESпḬ9ޠ}Yg<ydOe/D0Dk }ۧ7|٘lP @-+~,A4`;ge<ǁz3u omzI;I BH ۱l QJ>h"7 fLA2jz4\G QC `8kLl!F4Y# NC-Ὤ7LjL^1vZE|bu"_hnbm 9{/r4>DCQE,2dMY:D^P@@DaVEH}SPaiD YH'/#&rb=D~-ck@4UeY#@𮲒D+Ȩ[%#!r3$zHrv+pF@\IHadlf'$,QQ,M%zց Ұag|?-͒h"q Bolׁ@PʋPؖT2'-Ջ-M+Jf s]loe6=-{wbafD"(|pM>pӚr~?nB2.bn͈TfㄉD}–c9;hxM@(m| 3 rv0>5a\^7Ƣ(~6EvW.g`Xmw*R?(J{@uFgScQ$j.aD?MQvb/[R"|>"k_"'xSdih1&4@]-<dSϞ2f.aI쉙F{u$Y QW2:4*cvoߴv(Rh|W InFd/-·RGf[pIiESE2mIEp zѲ"s[ٲ0 7ϟ>~$QQ">-zD`D Dѷ(j<9 /۩Ȯ#/{ їUbd[e#ђaLY=@Gy"'!!sDLPѲE WEk 8$Y'޺v)X@\! &)_6W Ed<Łg"xɡ1;>"Ap ;~U4 Qd!ɗ&T+s.VR ɔ@? &|M?ut6TFwБI0xvu|l0Ro™&xPdn#zd}&CdI"D0ʄl01BsX.KtI@Ekh=|:L pW"ջ@pFɾPؕ!Dɬ7Myh\TS}aSث| kAD}l^HրZA҂B=Jߐa=}"rVF.-[.އ E^Rvb̋2 (` QP@űA*Bo^$AhTiŎDp, ;̿_D[(Z6&{ap_Y_ޗ`KnGf6cb"t4!ٲq 8]"U:KSoE_]9rD-)ʩ̴hXMBc(<ѱ$r5%@Ѳi4hC>HC.T$ӺI80'|Ӫ> /y2 E.'<sN؝~W#Lo\͖=Yk7 Py+ȃwcV8!2 |9´#T0qWšH"O=<7Y{q͈b_B.DOӄLbcEy~_x3=8njF'81l CE3ۯX+rBH&=;{EXdl&܆6O" D[8M>vofD(VL@oxt7s@ܵEl`VҲG0J2+l+= 1> x(̗=FbHѦ%!h?Ct9zӅDyw'IJ"~,dMhz!mB.͗d!:3YVdRBC-#xzF LB &Wli%Yo nVdU $s ca,Pɠ VdW0'~>8b-F!A0ilncqTڏFt"xxh)y,| f>':-4j;Wa(٠3&1}l_6Y $F~rHQ(ZMI"/zqYO iFġ20Cl NuG~ /5Ap{voZoa>?A̿۱"Ajt@iO2iXGnY$ڳ"2AG<~u"DH*kN $U  2x9"8DHFq݊sO4ބX_SEG< E}V+V(#Otą3~ˎ׮X_D-F0c1M5άHXЗ Hwl^|\Sڢfp.2miA{(Nbs*DX8xpl9k+-(I y MFpp5b [ I9yMoQ.*;K![n0 + M8ߖ#GM4n*)QHxV=Df4@y;sbMg[WMEc TM&0C2V͈ze(}˾KBSY)+TVy"[vmB [v}f~G"G][EV`)|AVG\tľ`D%bl!0v;̪\F#2̌p!^5!AA%"5Xk{ j'QS#Al"^łb6itML4rgsk3gfA s~`CDgV3<E,z.qUetf"wC ~;ƌ;*=w.d^ fKU^;4i]@>DN7S.Otp}AL3<\vd/<{X"]-qFhc(}=̠X}mooom) PD>p>}{M շ8^xMa,1j'#i/#.fVsO:(7TFehDKchw <r:Di@ܑsE"e`DGcգGvډ\bΙX@S&76y`|LT\1RDVFBIE֦ۼ .'/";`&jqDsɞ'G6\}%˨t 3YA{)kMND}tsOYYF$rJcj9؟Hn3-/mLۺB$Nc`5:|=hj:l(1,d `?pk4V㛮ۑs%в9npnc8rd$X$ngDeXKr+#F45VcDp ! wׯZ>e= jY!ƒyʰ(Gx+ VxB: )w!ev|xkX=qĝ0C}gGM)1y,m?+tH~a1N'VOO\7U -ec7/Y is6vCE "ʘoeDzÉ{FE`EZc9[nҭV\Xw'Q tR??QQMoIo nc% &*%EV^cBHadH$P|Na> :I#&C{K;ZvkKnO :5A d{CcJCȔMb)^}A^.3-ưtd0X?GUfQHyN$"[c 0}p7}476\Er!A<1 VKx&c>Atuӕla7P RKEjG(D >PЊs#$eQ#s؛">݉D}fH[ڌ.[q_};ժQ:Bu= {r y12AȀՓ#rǍۋbsJyC:}7˗>tđ}SKg^>|1Ap5(PxSw^Xb;n`ii'!ÑC+H_,!rw4mda 9hrZ _)Hdr⸧/b̟Χ'+qp1Kx(!)&<݉0奱GIi 8>qlK_nm!X-a9^%SP⍤ˈ+ls61{bqgu@\^eۋlAúZ̓8OLp-MB>T7&YE_|&|1d q܉ {đltH&"p1S.,D]ɻ̉DƜ931G8?.ԔI)$r$u_j?(#?}}jn&),J\5xEPޖm8C0u_sZڨDܕNo}Tm6"iW_fŘj#/#MhOEsPUaF!/M߃Qf cW^Fo5Ɔ>י, }1A(Q`771VGߒywLjԟ9rŲ/n6'(gptӋHM~PH914FsD.=j)hݗ0;F|3VD0zyH .~a3;k xmV8Kof#yLn0G~t_dvF_ಷsH)GZU뾈1mc՚ql?QnH,nȺZ=@c5K1`hۤ&Ϊ:fC61Ju!qhp iF=tc lϗ6&;`OiHF#[mю/{+SiJBC̎q c'!#αW;Ejh]Avl91bbWyjak(ّ6>X5RK=:يԍ\Zl#W*%5?HF@6hݘo-Qڨƨyd}Kaɽ=s n[{nZ4v#ȿoۂ"5ݻ9`01GX7>h՘ ? VZղPa#R>ta}A{0:X=?e7K?p27ZE1m vʍn_ ˦7zQ ߬l-h~ۈ` ,صZnMֲ tD07[)NKv1S>ZʖHRN؀H[F+M7l&A&`Q7=nG.Mo,,SEAo#V3β7#skLsX MGqkz !2[#0FdEKEn_I,%<2"GnFްH11l-Z nb9|c'0@V$nOk+sEKaf7`1B|~^rNAp ~ݞ]xsQ,Y8'Ixsδ[|H*[Cb>"IoXA IyEkdl$pLc@M{4,"^fxG5/7Xɫ^w"D{x|ϊN"A0up-Z w`8*aԢ%4Vkx@TE:YDb9R7[ w`8*آ9ȷC%g~"X =zZTZ$ҍ{Ө|ެOM +H̹9G+Υ`hc`5H`E" ;>Oj.rx:Yof̝i|~K ^O$z"Ą; 1 avDA <PyXr9hB Ql_dPllDOywCDLAeC&VED%Jt#eEf'&"g&ʮ Ho_dV_hs|Y^ș^8b5D!ƙEEf$z Z9+ KDF)"" :$ }Ar"}xbLbRC@W&Dtl%1D,hdBv&Bt K@CCLY bY1Udq4A #"%"/+TE}abmAD8?;х,=q~v=y8b,2щ,=q~f3it#TWWrbNdۀBxuE)!2B+ECL!Fab]*1$kl=1Y.4d ZKLY2E#H'qAkHobcIEv(YbVcEf!l1Nd&Vc0>'z ˑJt,1Y bYb8Ytdu#DEL-dĆD:bJOlh1YdI DbZELYbbC "Ć2##MBEv$YvdH݉,D?& ;h.\qjĚ5D!"5k>\C;5\CL?OOC@]3x^wѐّE2J$%EHi'-R$d$d̦2KDsz^|T?=3\\ժTPlR%)vUW̟7WKf⴩Rԉc>=ھe^t9O4a/|~}>gnڿJ+K][XWȟ'WY2e8M K~ıqn\fW/7{_;aC ߷OzCśʖ)}5ŋ^U,G,3Ks%;y:m7YӧN8nCiO>A Œ%]]AFЙ/I6u R$oG~vܶcsfN儱{ di\xQt6)NoG~vضsfS'O?qk*?E֥PA#?x`wlEm|'8{An..&l3G0Rڱnnh\0w q>tOHԥ@>VZӨ1&冓k䒅TFJv"4jaXjVFNXx$qT.7.&hhigMI)7zd,[W.5ӨxН(9 =+ @H# RS[,/aqXoDt.J]4hhjDYu9rø5 \ԺhЎQ Pĭ4H%2rQAC;L8'׮^)qLe  Ƞ2hjGӈ°fͧ47^Ι?7Ɋ}t&M0(oNbN8{I%#Hh@ƸhfSS( \vaI)E`^) M-Q*q#H"!rD84#i [6)Üűq3d.G c}MbjՎ9ftȩAٹHHP8bdݽGC"rl] Zx@6NjqֺTFȱ9cclD1?Ah.Kg".zuMكձ?%nJQ#d1FwuR qHh23vL!ϢG4 s糉[Sis DY\3\+HR5 u1Aж0}IJf!tSb6.<"Z6#7!% D.A5IE.Yp>ȧM<*YQ^1P-Yx!9gUM]Da]丗J\͙3⹘ h9?!W,C291,Тq1W-Qk]#U+Z1,EJ'7 HW>(s1ŨA_S:%rh,e,ں-xLǷKr-n.Y,1EJA?}=~7eK4e."4/hGT3nM-Wy,ES T)/5gm/ 0>d]E>q0DX&(+OndT)LPPsku1DTL!u.2hyɩ燇2@tIt1+xh#dz Dy]C q8Q S<Z· d(<{zqQdVAfpHGO?+Ѡ7,/"&]p";4h+eԌڪ؉)i2<]+2[T75vBjdߜD;Qnc5~*FUg={hK CEaX$NEĠcJXGzO{H#}0\R>R4LJ?:{.jxzRKa"jv{'9C _yEA &B%6MaibY SikRܮϭ}d/,HLq2 *}EWiyivS MaBqug"@%v:J*ڈ-ЏyE?N] 2 T<,YH;R-, U4n SR-DEEl2Ey%pTۇD.[ȫm2EGIO Vlq0hG?AF.LPfg?ƺB\\8 MSK#s<ʓt PƇk4KW^akyZq 00O*\5ɠ8 9E/(YZ㦄N&Ji3]wu%@I%62iĶ 'R܌a?:`.UJ$@\kW!^ X#KYcJ*]YMQ2_n&[Ԡ,O&xC2&5T?-OD>wq DAI%7|8$\m#-L(0LQEoQR_(UJܓJ!(1g+G8*&5|z:U@⬛n":*Tb#7 n:(+%Q]Y&2J` ['0p$cX)Nx cI8uN@Ls a9͞p_z$pkذѽg6<~Mp=w|ih'D%0;@^&3}aiY8cixpFMᬃMq5}htߦu kq8 5:uoS Nxpʣ{ݩMQ388pfnu&My7x !=R~sd&Y8n\=j|3_xp{k*<>䄉j_Ƙ~mB;%3>SY GchMy{qƋMH`u;&{q<>̳'4ƌ9 O 9l1Í3ycFlZ??j4a1=:!s^9 6bc Q1<`|p^⋑#X`Ir#F|!ToN~|6m_>|Bj&`Wڽ:tذab=T1+^k?dP Uhqs%Aį־C"R 6F4q0*I^}o|`TulcK"~:9h 5*5KzΕ8Hot|>#3 b=C!FQĒ7:OUj,4yފ)|w TG>(m a6Qce(wvG|Ujpӵ~Q^|ao2f8B}0L طg8ffM}ѳW/L=~o=ǧa-v{{)Q:.N6p{ݔ˥4!Ne}޵{ޏ 1Y@m̧~ য়}>˻]{/&tMgSqg? `aŸtIa7KÑ7Cw&vvޣ?C njc.@eGM6Z7cZeG&ㅈ8$n 7;4fMbln8w1MsT 8fDfLqtnbI/3logL)Q&Upk|3E..J^2~⤩:vy_0KN;)r-lHL!Ed&vsI:}Vyd +āQΜ|]:p$Θ=W"H i+~f̚3_D!m!GH㢔Ĭ<.$Ι=E)C.%8 s,rOHhb38kKZ% -E- uΜy />?,EEub`$(e뻨™hɲOҺHhY՘gh+~?τDT)Kb1Jg{E9~Y4慈yπThQhźy׫iT$;ں+GYtWYhLZ2tE!qRDkm"D TJ]<r\YXzM[yHcE|uI]"-ZxeeٸyCUP`NJbOm_9~YݲD.f*lڼmMW 34vruhe]MlUw3(ݢeٶD6AЉqqG^ubR׻U:2\]쥔NΉڃ06o|ʺ (AMaƻm|>LrbVqApFDw/\SM"9xԦ0oiV/x}$o69 )- 4/׾/lأwE{yߘ~tE< `L4˂2lmdd6wԮ0=$Ԉ4~Sdfmʊ)[jc¦}?ub,.7xD-)G~Fl{h剹[\rh07҈٨i*g߉$*&"I'6.ydC?5[F֔a(5{pϫ["I i&rߡ0(؄D=;|' OivwwS9e pъǔzö~n-:m6Qhc~&, SV<(5;[7a)M0qP/\D#v2QTe+TQl?|Ӆ-lpdAe(OxRͻrԺ1M90-.;BEq^)G8KnwP2$l<`HmD?L3 e p#Ń9!Fx%5lP hEqAbT%IT9R<,oRMx\aۨ~d}klBݍE{X2MeoFD"9q^j28,| zShi ӂ1  rpb[aAhY21ҰIPg̤Y Yj ى m#6-"Ym߳/ΪkɄai!~!J[H{x栫/ʐݭߓmK#2iI&E<͕R/{ؽV!j[e p#'愖yi2t*{Ŷ۰GY(k2z 29b`N,XnJ63om7i7GO9(e pʱwgWkY\%`ނ7&l$#m͖)ǏP<EfõH2YI}6si"qaaP ֿQJj:o]]Ga~]0p1zhS#3uΒURj`f FQ1a"q(\$Di{pv,x0'6R_4|ꇉ3Jzס 2OrHv:KXIrah"7m|/ʐ T5h(qlA<+PGكp5K:wa~H 2dσA&l 9 ijΔ:{B(nv+ ;ҏ$/%$Mش3/ԭ[6=ҧ$j[Î&)R_riBů/s-i_G9tLJ]$M9U7n°_}tֵBPSDNJ\f>c:Y*cˋ\w_G<F81}YR)2aޢi<93_av#ں!ZWpN(vGrn\\aH#2rÇnRF8e^j/lx,Q7yMAhy\1FD"1(+Ƕnʖcڍ9.{GdrKmy]oQ)%WwZFY 멋2%o홊Dـ~ЈL:@V MsFe tu7I{ZĬ"Xر N9xRΔ@R=]41nE#J"ˍaVf5-#1y WTؗ #mort^Y hk4 I#'K]I_kJ ۴͇%8\} 9nI\-J]Y|rkr!Fd"0.T E62'C|6{Jwd-!JR8-,x.fi3as h=e"'ϜÕ5=y@[8/_s"'|u5QY [oq@?lD s%Olkrx0'P 5t]B:Qֳe=+v>FD*k9z 2"pQŃ9QL[:".[J5;ŐaLBD[ 28c/œ5O7Tu_ܑ;@D"1 }уf;xԤY}",u [v796l`&R?'-3dِGDuaȲKo2VPmZ|1#'/`Nk8okݱ8h^>9+Ԩуғ-3{ xЊ-7hJ.ۑE+֨wσ-m&7x9K@x0'hу -s)no3P-|WEE?2zZb \1(؄l < X:—H.l sw0=S1aS32"py~W}Vm–h;}&2"bA[O͖/Ζv%򂓆Fީ{ m=g7[~gW96~闹 6mY\ڋ"O>D JO``P쳑_^sg<,Xr,k۲4kQf \K7m J>Vmg#'Ix٪i Dj ۬6z[L|h dǃFфh ޕ'% [\襄fH9@z磧]Mx*r,{vZSwߺ6o]O߮Sw)hk4a8c!nfQΰB.l.Fh3?K[nw;ˁ=Fe oumai"1kG yK9v{|vhYʰE+D?rKi ˀ!rPlNL2k_{烆,L؇y6fYw!!4#;à`[Q Uj7h w\95"U7\7ę'2x{(Ǯ*\OEKV~ # _aѳ'lknnfO{׳w0.l6 D%k؄l ӱ͛ѻMx1!fFZ:!>R9(Є2.Tw4t|{_$]؛q[V>aFϾӢ 2<}2doT>a}׵zxxz 5>q"-CCr[^j#o E. ˈJlݎhk4!ZFޣIa3c–Te3 py8gzhB ޹)Ivv{h%բ@w_`oU+nIcVB,c6D 5Dnwҷ384p$mZh?j7}w5xf:LVvw߇Զ {bϨA="g.0֕nkɗ g·]>cÞDz%V'AIŶ\-8< >[+[*E0݆~ p8(Uje #Jݦ6$ʅUz+'q5\N4O.KZW&/D[I6ںZ{[/ "Y]pehk6K߇ Χu&lcD>adwB[ 2O^$ى~r"Ff|`M?~<2cn5-3WjXޮ?l$<|îN̔ҕфh^*!b,,!b< &#ْ < lI 4!(po"$" b@ *1)91 =1L*O$bTJ 3#l2ȿ Lȿ Ȥ#F" +CˉA3bȿ1Q俟201ߏ'yvg 1T?Ͻ*{k}GSl=[(3EJ뷚>y'IBNJϕ}3g,QTٲo?JxciM ~sqoRem&֧<ߍxSU[BڻO_Z{&YyŤWlhBʾpi\Jfޗo]ƙE)(oc`{O=M')kv;$Yѐ ލ ?Ygɬپ׭Ȥϵ1=|s-S\C|7QK?o6~1YX Q뺞ŧfֶtaZ^*ҍg/RK#:{ݸ[hBo6ݙ~S>4|zS\< 9%P,?3gBHoCʖ vq9;ۻ\ &5SY$\e k"ݾ{Q7 Y#"f~l훘)/p8{ܯz)Ta\ U4|@m gp,1SU?3ɯٴQW;7}LzS78kڍnFĢl@,j59Kx u] x"/0 ) J_6]DqjO_UVau[$Ċ,_f|luwm.Y TjZn=٠*?1GrC ' [A7HcT쪵Tzܙ(GG EĚ)ι NeVaM & Vna0 zW-2r?Q"ȣcsu[>q%{Ew/[QI>v 2rH)jn5v "|ސBJ,/I>}"d!"bqm #7m6N^)4J) eCN/i}"!&0\%-0 DnoYDI({m~V04@u 3[~a1 wB74JŲ7`']i{ { h w  yZ^Ys9қW`l-R !Ob3TL#A_꿏0_Zg :M~yK$~қOHQYuT  A! dT6? FO%O&eˬR nIie"QbNu|LFSӽ)@JHP.۟tvg.!"X!j c.P(*!odY"Bkrjh=ׂ"^ft8h7b/( h"U)Xq=舯)Dk6 Ike ,> _Dq6@T3~٫w?FYM98zCQrxo;0{ϡocg*(H^}"ߗ4tzHQ"ntTlZצ[[;MP(QV5M( {qb0ܽ=oFL2{{N(I *{zK?ӈ;1SXb%أ׹e}[v[coȄD0lȄaMPXosxma!Hd$*|"hϬ9L8_{[g+ l .'64=*EټHa?r?s(߶4lm\ǪA&^M7@;BP"(js֙F<1w*-33(2>Uw ࣽ zCh%UՂM9n!$\)%fv.`o A̳Up#ޖ'6^lfL#+ EGXVnbIz$J<2+A//Vqpecl+Tla.{IWҶEX ]chqjpDb!Ԇ |(B%a坋eLRR>2K7<fFk-\H/{#H` f "L g s%V߼ zat"Pga&<(#(x;=&F]~/2d "ݸ}% Z50ʃe#[sA@:"̔ADTmyT5p|fV .v\D7"WiĀp.W {a\ƧNb6>Q*(#`D0 ({:Zo?z񶰶kMPbײU0LH+m9)e%[u`Ek؀=>,>.,9ŎũCq>O{Jߥ~MM/+4b"bz; YARsQ=%%PmnFC ,Zw =J3X f=e:(f䠕M6& ~ĕ`(xF$ V^>)I}V6 AD+5ٝSwkrP2 H+{g;tv٫ϒ>QLqϗD]%A-=#TY B QtÆW@rZ~1-u@\uI\jvaʶ?,Iv(߾ndJnyB&T?{7ĺ>߲C%Wi^Tj^E+_(J~L/oq "Wi8vzH|0 fE U\@ zofD0?X6m+m#w^mxع1i`,_; h\F)o[,l}?Dy՝#l Te7%lzhuhNYgb?t /Q2/iMlD5WJy.aq`:ōA/JKT`v{t}ҫ߃o#!Ph*PP F a<[\> 7>t="2y 80EIP".qi-&5~i@i0.~ F1Of v(( .WT")FD\kdxqB֗Ir7&.nǦ+l}ҺͻqZ}pl"IDqaw.7ų` "\¤7F`<²gD7h`[N!DtCMxNxҦ_ bbOco Y S\;FVJC?zΰW63-\4M}`(j8 %QM?l2J[yt<5=cx!0"CYE Z'gB1BWAJE&fa-q?ı949upbE^畀fl7:v) ;\\M-! a"5l:] z[6"(J'ql;$#2;g$(?̀e+,_ " "~˕g)yìBoyf'x`}yz;E!CDu}S۳WFWv Kֽ C⦺(Pnxk VO([DB^*Rm?SHh}SE.e#  l޵(JXN 1MiD؆rksklܥzzm},+PÇO^ }/)DhcY"{FN!қ=`^7@v@ْ+h2&#:-_z7j'-5FzmsUTb߲)5Dj 05º?nuy gtX9P7e/XZ,XEČI.0 nQ&gBohv['=*]c=n+iD4@& `,ne]rꭉHCdVkm5J^D- 0vBE_ WP㳾|cW Nl|q?, Lea 5[DGQ#{㮰oqXvwm@,F<(!Ī Z[_gL~荦$vl-*]a"2, qNs[GwXvcI,,_[Y(oku YNkBIPxVvE mUHJe^]@ ߬|5e$5 ʞY6,MpAԓW9lҪM "9 Mвʖ,eᄗƊ!'I9#sCP@_ ; PA20HNKNjw"s+;Ggl1&,G7+uw#%M63ͦv㥀yUcDeշ` %D]2AuRӔqyrjbb^c@ n7\uitF"y1A,5om?Li29Af6GTQ~: Ƴ.+A5?&k8(B|<T\FyVCl)kP4U:"_-b%b|Y,+Ho\ (JHHDz&Qi^3xwpTڧI+vZa_7Dŝ7:HIP",CqmxrA`to N+h>,4RD!}͗^ J hsd x68,  Ale[?.QXkO@ .'j#̺ y0` ]c[yR)=::B#_e~, Ye0 Qm\_L7Û؅}֧h]R1@nѫn@J`›Up 3>{a/2~\gj?B)X~fқ}CbR]oq4DGBRs`9>H҆!F+ԁNpb\9KU7ڻݸOCDyEXZm1j9,Q!H궾 v*:֧#:-]U2:g隭ap /ǛT^`~?Ml_XJE_Ľ^d͕Q߶D xK!Ym$ٌma4 IA7"Zv#,>k0Zp8܏w}I {iaPbo`|2 .YvA»"r:Va(*uNI=ʵzFM*iƁLDV@|_>:KtN ߩ7?CUXJz#PQgW"_AzS b,xG ڻдc)D~~_֝% *cH\xqBk),لL;B܌Iy?&y);D$"&̑@J&iEƝq/عtȴo,sd4v:}^,D7 \"RttAiUZ/4 o>7 Ε]ghub4XYh# ,H!l"r;\O[$>xa s}dŔ@%4!vwY } A DMrJnًuV~#vqw6I`^,h-)'{ ܠ݀exhFpU:bpے+7۟_E JIFbs HZ7{P01k[\%DlSظtk,] "aAιKVsCEo غz gO#q) 60EMeh 16qy)RGSt_ǸdzB6zݯ=O$ иPڻm\o$$MIw]4D8[JFz}gH" <0F)xW 5ŻSRk["l +)._g/?b(HI)ouӈQZ}!;;zv0ٿ) Uw a"Vu3,]oHE+`{I.cX$Vc Ǽ΃vMR%[gNdBHҵ]}#HPܡl,bf0zcl, aR#a\)8E`Eݸm1N8_NT:~zbX$$ng$EblTui9,yV[{2'##ߗm}TOi08mQmЌQ!3(Z -77hi L/n3q3"& 9=vY1 @s2sH_؛O>7 ͜RN$"rEe\kg`L,FFC@b1>t#y~9dQ.qbnm/^Aۺ#JX1v("/LUao\o|j`Yed{7-},":{^0w:fDzsƣ_,s>~il*_DL^"(e$q7GaQ_'Γԙjl">7p.#bDcSʦ^zv2%DǶ}vlkƚLBց4|.jXgy8oFCH-IIYԃtp0JlD56OAw /aD#]z -,HDNL -&ih*W6_a+aDY0ZAn/˭ll|Q}"|M槮>xufrju090GJIs7BAy`>jЪ?}`&_?@Qo㵇I@!&d^aoof{<Hl=pä8;kڞ5;qQzѵ]CM/FXoث߆8lr| \1XRDdbrf19vnjE "s.R5rI(mMHBZ܁sve7=^q+ bQ8) EL`䆺nmS)&;2vAU̘+kvswbhd'ݧY.QovM ,e ٕ=Lsuh#\j;<X`Lqq$5N K;&fKy謊n9uLڜ@az I!2*9*$[i_y[20 F%*ii)y$ځ1D=E[eGBmaۑo˾3iETi"TEw!O՝#Ӌ$)2Nꍛp FM\e|{2_ub-1 Fv &3l*Yߧ]SZtD%kw]i'LHEyA5ܭ[.%xIzY|mQM'ߗ6SYTT$Iwl-S$nyJ[#bA< b>p{`1$Zw+q9z*mץ?fz(q19 0bh>)n1*(vh*%a[jݞc`\%]L"[D @l z@hFSIAvVc~,\e0Nƹ_~龎BbTIa'E2UFeW5_cjCnwbA7G֫tvYa (%Z=' g}⪆B_OY- /b~3@J״t YSw)+@sa[^⧶?B+މ6q-- -CKNjOShy'Fy5Ӡ󔀅 [&7t xQRq{)`a88|qΑV3%b"_C?|b$xj%O4sp GL^xw"p5sNIESk'݂ͥ?ĭ1:B\H/*eڽ/?߄]m-a[PBad.3=L[$)9ONcVZQ #|2:g݃: iH(nԊ}%+1;~wi?P eQ'>Dbhy9PW9uW3Yet32vgK!:w7sWHL4 ɔ}J )>F~ oa=S3DsO9Fh!HH谿>SC3HQ!n)Mo~qHhsJ7uB;Nr8s -QcIh`n:,qMOE_LoFKfJL-L94eEHJe9*s2:k^O;/?x]ͺPm7彳$4#ZI*G|T<KA7K3W~BhWﰻ0'ƧnD'ɥ=VN Uoh^*j,jgĢFQ%#RK{9yQDM.IL>f薯^{ERiw{ Z@*-݃@Sm\U|щSb挏W>86[D8!B,wuԲ^<3\I d#Pt6s!<s]ǮEd#Þzu!ue0mxԥgk'yulEbߌ|W7'[`Ba|GFd׎P:K 񿑴kCc FǯEd ,66y <ɪa+)Dm%"z>S7J]\"k T+A*Zfgc ZDv_ Ϭ!^aiߛmebH$x0nCxޤY d`{Y '?sWma r R3dzf_и8h–ÞnE@[9MŠ!$x&0KX ƻzId,44 vң!园qhEPR'n6 ~Uڊɹu;,Fd}m60j1c+0 ^x^=LI $d% Az҂E$޵mTm.#ydty&uRv!\BXY\!PP1ҍC_"bfXlƿ sȽy쨀<, ˵t{_{97<Zޏg֍?BgB$LQ9L2z{Y:Lݩ x=YK*陵D^J/bF :)3ÐCnu$f>N)A?ks |Y]Rk]@&Y eE2~MҘ5Nٸ}}:kM㋺Xk'㖡'֒0-C]|WtM,RZq}Ƹ@m_E%qlO"/aշG3L)AAo2=4 ka]IQ'oXak :8Z@R~W='+N lP<}oU>}{Qy MN$MYT$ uT(̴$2 ._c8vTPڳSu ݷ)=;78g7b?A-UVh<<ѳfoۤ'iCǼ嚆T%-C$ՃpTCZEh6sW9ގͱT'(B~=S0z=*];ҀAg$}@k_r.ӵm򮓾/gIo 0!Y8`SCx6lI:IL-sL)aգBvWc.AdH-l##AY#}kЈ&OZB^W (8v3w:"p9X6K1@As~T}qݘcףf3sR>8{7bB-LdvB}$F :C+_n=jwf|b>v-HgSgf ͥ}/6>mDYW;%}r[E# (72F6$ڶNWC?#6Xz>ȨSFjd^FKuUL?-Zo, rG"^+:qIGwZg,ErC] ipTz7LFt'I)v6r%p=:"k}iN3AicIGii'&*5 l7;'M\n<;LOD^n9}Qd"`m:."+8 &QbN?0ɠ7g_S="U&粒m,:߷̠#1<av!HRTK1}.e[ .""53->t1"&dc2L#1K<!sUЉ93ңo 5-<e71.Ҷ5ߩPctIx! NUP~ k;t1 .m s21kQ'.n%a(`ȑ.Rn- I\|XF<}!{)/<`bBpֹ`P :K6Xz=jnzug߹j 1NWC`NaЏ3hYi:sд98& *v LV{6$D@k;z0m<ȃвu"Ƨ ON(5U6$ݴ? 4x!"N!1I|, c1i=_dgx/{7c~r/78PϿjs뚉suEI㐖1~iEQA| ->\wjTY2[o)t L=_ *gEfCV]k*&ȵ;N$WwCQ{7R^Ԣfǃ)1BYs 1ѵ=s;9% 8~Ca+ 6>o^LfuMU39{/f\de r1%_b9AđJNvh'8dn}]w^csOC+~C?-wldsD/ 2#kcrNz ʼ'i>ޕ$UdH݌/f]xy@dgT,LZ (+C+4yQ/w3,`}-x@HBD/|B_P7];\}#@KDsu24K~ǩ? |T1=\5*\abxZK*C/(9AK˞93eOiy9O` A=M#?"@;MØ vs-78X6,j"Q#?V6q+)0[ !$"ߢ~TZ'sUFg70Jl@ט{=-ɧ pYR8%Dze hӗJ H S-xͻaN颿D`T.hE=sǀ1u޷&V[wRda/2l`=?tqqM{ʮ Ǧy$VFiŭ$Kpŗ~~=.!X0ˍ(K RX՚6D1UAHf HX[݅^lNNWhċv R9GН>EjLQl%cװV6m'l>Z$"Mwqn8erI)6DhsJثaNNp z(TgF;ߤ) cw^7lwD-A#h9Z:V|}%m~~i[ė-вP1!/aRܟ>#YMzd(7=ێ'WOiW24w1Ńs5,}[m{+IzSȻNr@RL?x9 E-ylԠ}WOPma7k:D|HNeIxQJZ O zPh9?xL=ѰIr^v'OPM1%g10UGl;tYƪ=afpMvsHV;=nk^yQGBu:fj)ɊB%5vz |](J@6ٝgtV9"Ypo261 U2]:&x >KNN1 S:_2sUKhťh:غ?7hP02*{,0 Np>2TO1i)V\b`F *s=/Ŕ.`. 6 | FwOm B As/VVŭ4vٱ8g4aOB;Z8 Vr=.y:(KAf~sBE]S$wfY0VVr&(['AriXew<)nqGEdq%~ pUqd{WApٺݶUdUymn$V3,z*]jqK9Yo"2d\6\l~;$㣘%g^j-x4 C+t|МšGnr5@:_ =%oaTvr%D`G2'dOs5o4,yXlӉ5l޴rRCsQyMן />_Kb68 wz[6>I3W_y0sŵo'.,@`g_בò &ds~/Pby/2m+^bXݱN>5ɍ3.'Բ0PǦ`~%~;HYb&_HdRAC?,FxU(mSHVj,}g{=dWͲbF)0XwGHE>uM!zE/ƥl_$kuR=]c*^NlLꔏ0h_|Z61{z@n5>XG4p /]L1ne {/E=f"aq*sJh3ۯf]:!ÂN럷T!.%\_,Ypf=C# V?.]znarsz\[zڙF}Q~-Yjƃ [-PiL50k:ȭ[oZ;ZN>([#.fW1.xQv϶A#$XPV'u7o-}3: /6<[3MzzӃ' tꇮg7Z\T2#jV3> ݟW= d~Oߔt_{DvQB}G# \~e5ȥf6*VKG.Xڟ`U3= sb}}sA;:>3aWsJ@3nHk8tu:thn\*ntX5yŭ"<`h_󴈡;F0͕5w>Sҋj&P]l y/9T2:=2%'KXi3at&z|,l)X%+g|#'FFm=]1cv>^j.x9B! .aZڜ.VdooCC&GK]HhZc{7wXrĖ'KԷYLN'譴ʽCiFۀ-9 IogȣyQ #?BpK֯yZQeM٤NȯS2F^qlL>g7ӻ=8tڟXD`Krb sD7?˟e{.%6h؇|61GLT,uSx]zt=C.>h% ̋ g:ߥ̦|}WB3cDɶvQ9M+Ko^ZvcpK˰Z6EiB:'OCT-[Rfe{|C-? V>mtQY>KzQϡj;~*iLUŌ?N~ħ%U 801WKwzM(h³NOQ=' 7һsiXʬd}JߴoEp:Ƶh!RrF9˯ I캐؂WΪb{pje@r;=})~OOnEڼCkHﲻXx4$_l˳27|sfwJ;y2>Ș=E;Ga.MB8QIW}RZ`yu|a\i;fYҦG?@$ L/a>5pnL+̮u otzRƼP@}レ'i|3{x^⣬Ww.>Aѣp h; npy,Uѥ&>? /&wx&u~1gO ;Wh'{1h??dIɛ]Miy<,аgV!=׸o\_ {y퀐4goŵ<]x%IHlKd|3{ 7q\w*kSTFy۽ѿUTPGS /mpv;@r \$7[ԓr6U/3maQ;cMs^|GxK qjQ <*#JrΩ2fA,s`Rª~Yf$VJ`^ZJx,ͩt:YНa{޻'kH"j>I`4ih|rHB^Lpcufj뀜AݗS:E[ZnJYl|3g|̓rnǥsN8Wѿ`%F BOCC4WZ{i K\ﷃJf?E/FwG4v .~`0*%YaXgb5#L'D }>L*jYC _ l8ܳEᩫ&d9{2VUǕZg^_y$F`qţϚH 3ޥqi59m|چiz c]&|J?w%Q54Cxĭ|WR{\fVX Z ~hܒC(KY݃ܓR"ɛy"8_6^h5qȹbQ̜pnm/h&B?%}DqϿnV,#WrRmb1'?gI+|B L*mT -eگ*6pJNw(بn(qEzన3l19rf^)]5&5"~a8s){4F-(eZ^S&K`+Ż>ѹc$oOz|&u)~; uw]Nyinˬ{xF7o!?p J.뾟5#x90cAh/CԚ!CB[Tqm $^^Y>N~Ȭ &~T98Va!c>ieY⼜GE:F7I[Wzp)A+㲗>3W5YzW K(mga>봵G=IsNOt&7jZZB W=^/J|#f_O\Ie򉎹QS#zcdsqc Nky2qC`/ZCM$D8BXxLȈS叔:CfͫUӷX^ 3_J7?JZC$^M/fHN1&3$="bB~XύsJeFˎ~Bۘ5;wrf6M&P |)Cɠ4Q9rTknJ+VOJ"\jFɿs,[rtھmb>oh"i9pS`7O>B^IsEKk䟵oND0ƼVԽqtq#5(ii)! ID oGgWxڼ2R;fA9рcA3*i*Bu*$tOeHK?}jaK\#XAŧuEګQ 6^&~1 zP{$)Xw`Qاүߢ\`p XӹvEI&Y.u^Nym`O&Xi9ƭ ΰarIEދuzy&NO)̨FY~QyG6^n<4aJnTRe*y58w ?huvjzg(q_, BY7IHq+EJ6 d_˩FI)!4 dGx]5_-˓PE|XTk,hɭ|D3^L#M"7bY'|a`O"1, }̀=G'۔mu9/UT >RP}"ɜfJKP Z_Py <0BjA?~jhzɶG5W lFj`ۋg`ׂԂm)H}K~ ҏ' i/q8٩%^u`5M>`Y;eXAX5 Z`pG_ >_إDZ؉xm lZk/:+'MuRS@Ը`>c(zZ>JšPg +;JnA k'DqEOuMwPˮrqs譛U<*iۗ0]c 7c9vya"ņO硞eVw\a-j`yX1$r.E/{~a)%ImwH2y;T"nMR6Pxw8Wyج l%@֑U8mdp;}]o=w|f?P:^f1nG.;tͷ y`GrG5.>0V3#P$aI?=w{$ҦK%W(n[}$&.5J6c:h*ZU=[L|zqwjlf1lsn~6q*cuVmopd+d4xۓ`OK=@ҼK7xZAlA&>u&U1l~^l錄^yb"HiZBؔ=}`>3B73#lΔdJ+~0]jAuz}’GN9oas|=cnXBnXsc{ GO]pH9+IMEM.l |"GO3 v_nD v͔jJ%br5^2V/?Ne!3wsD2ȟ]{}_G'v ss N?TrS]MtqlsRNod^ x pOC}}S>yNt]^Mgtj ]HV|7GfdRdñ,2zW%'f~m¸c7~Q#Jg.\stO.181OH|^Apۅ9"̾[Gkּ*˹ik`[ QP9clniLOwi%GϜLwdVЭ@+漯3@,#&"6!cnaYU}KW菹Y8Oj]up3.)C,?SiQzް/xM~ϬgMnwQIٟJ*6w.4Xw 3rcM1٧Tjmܶ[Gl}DF \0bm{o@ț)sk{'fS0fgz/Q۸^MFI׾=?Y]^"Ok_y_@(1>d!׵t~YCDN[VTko;X;XИ}paܨd:'5Ɵi2>r򬎡ɥk6.1G&!wdQ2gʤ2$|qq"$S_޾z'u:"2 - vqh M]#S ;n}^DĞ )ީ2Lz#/5wRzGU6ZU\N,XXRQgfoLȲs 6x 9 _bמI=bͅ陖=-QP踄䴏9>~kjYXKHFج{xE%ds)E=҇p(O"7d͗oLUax>a=gGO}B߼}D^ ߀דSQSW `ѿj^:a}KtzV԰ԩZ&fZrprqzUHۘwIЋJ+kZ^ed5}ڵIrp[^^nj/ %NO?Q\/$"&qX戼 Փ5 ϛ[\i; {ǫh &s-}&q &U9,bfڭ`'pRP5 㛸.Vϲ*^J&u3eV­afWͅF704mĤK {yO| 13ϤFw(Q\mQYpO,E(+'SAQqiͭݽ#c'&~#͖1"J3XM2erХ\pgDаȨ I)32s?~.)khji40({v Z+ǰ9폢Vmnݺ}ន}ׇ=?yW!{ ^QԶr P5L!c]{_\G {n)?|BUMƩgj704:W\捛vP!c/4B,jt1L=33s9ݷ_@`!aaу:,-#++w(H䈚Uh{v!Kno\eAg?q  IHHI6lظr&***jj͛ihh|Sv!(sM2^9v`afb۲J!=;n"];=U_>m;wf=5yj޸)3#6lDIq)1?g'' vw5%D=vbqTdѺ0?>=>N]ES?ƿhkij(+)*HKI|zLbn&jɾurSkVyēP74#KyYIqQ᧼̌1Qa^>};7J\lΫIݲ]TGw'ol;?G ~cG.NܲzX_铪夯ڹyp]N͹CL51;G{tN]{g⎳v7m]lnzH_G)u GeExbgaL099"ߖT߫,s<4Kyr^-}mЧ}쩓j'+?(,ϻwNVmxϹFWbH2p Aki.pճ*v8Dxrr0m"'#^f~ǷO)l#z芪 ͷ[j;3+l,jKdWa00-&!X8(#mMu%yYic"B_=t굇򡲨T]6z' dI%lΜf`ct;6:m U%a^f& 7g/jj(Iٽ(U5ꡂ +yd#ƒI)F_PT_c y۶ׯ^21=wJU騴[6,N}h:{hϖ.a._+VwEI9sNP<*#)&$ùa Fsc]-%)oC^<{|AWGY"@9!S|N6rC.;[ñfS߇z;j+>}Ly6,'+]啙|]g]"7VE78('GsS_>uj {f۪>Fx':B^DmS`0=:_b!;@cVHomuIѡϽ\nY"pLВ-~i(zޞoI7ͧX>:,?W~bdWkv6}d)ltk'Z*?}~q}2_ 8z$3pgjPhyT˙ɱ`MWCIZ}%HWw{Mk'3+n|F^t@ vG,[B6cX>m Eiޝ Tz+R]W?"bͷחgݓ^~Z߃`SG/ 7YgTua}IN V}|vT@8^>,@Uxs'rS2X?j ݢ73h:7j$`![39QWzY%I m9C:YQ~}'܀Cv`τjm 9PpQγeb8庁) saͶtnKnkϟ٭U"5ք+ ?ڛ͑՞Iq{6L^\DzW\o[hʊpA ӳ[eF+ }PN%.ga6z8ߵ<FAu3dׂpog+C5)^Fxk4 R/Vm’6Imԅ5Y|>oyi7l'kGw-/h*I#l*novNAdcׄ}U}&^os`x+{joDk,H{`ua]6..!왳4?+@]˂-[<v4Ԑ; 6Z~l.ݿc׼,uYzJqGΖgk[(a|ukPnG,͙gܿi^L|,yq/X) nI"(xp䓃;SZC#'FTXP)=u财8b`*eƽ{Eӆپ\?.N9әHUs$U3p7G[`Yw_{{*ѥi-RY{ܳ6Ꜹ9z+<4:M8{і9&Qgnɺ)ܺp!}Upken"p19sLs+dWUvK'8i~dE>KyhM]t0ͅvOdR Sz%7D.[ #庉ކ⌸ {F'pl!̊yjc*w˺_3%o 'o ٘ua_*%]w~SJ zl}oȄ<5InzoM\hh4o!Ps` P hsc5KY=^Ny\E\?FD[qR CH;Jxckm=>Ǧߜ"&yE{jRZ+/G gĀ"ptgD=<üLdSi۝W9b*IٓۚNn*vХ)i~~ld2Y0[65w9RyLOHfh}qQo{}qxu$K7 Ut6Yulw?DK\WEZc+@lk;_9wh>[2%U^* ÌSf&J{SrM'hFi ,JlS /ǧ:Cr$2p&=92V"Ü؆ ڂ7@j4}=RMt#-~D^6!Tv|>QZf Eف:?)y 9Av굣)a^/F8֐{T`{;Y*?mjsk-pҤp5b'4\i1VwS%&J # ӬZ`q Ʃ WK-9-Yb?4t8a{u$--B9[:r`'-x{(zJԋՙOϟA=D`>:}K1`9PyD-ohmJRde!' ~Ipm%k4bOMM&|I ͼ3co~. 8 3wmM4d1Mv0P:B!'ԎG7}}1c?hΎ,%By5zS okdz`!f<9ygڻFIJbwpqyCJ2 )EݴF󃫕~foyQO [",Uh\I_̰?8t/TL7IY|/Jyd"xk/{u=)AKwQcꍳ *`h 2`3bxK8V`(L]10oΎs񷆼h畅+^_,:gug})J#ν#6ΜOuC `cIz &Qj(x wQYjri#(}z;pN.87D4K lo)y|o-i} Dc¶{֞,}q7$=Onm bJ*y{]m/eㇸHX* ST|u{oۺ/)W$6օ\]_L}uy.e**</&X1kc|U}}{O A7=hC~=s T3텱>,#iUzoJU"8N|1bM.s2>u4T`^3R!Lć\Z"r" ~"zru">yR#ՊIL&D Њ )"CBboX6{"W>Z|^LA'?rU"^d $I\"Qă\g"Q.${1L"CL"}i!"Uk _Hb1" x҆+EE|UxՊID*$щXE\o8=$" qD%'$"dO"~䊈yIx\كeCIxD#AVMݐXDE\dRڨ;EA"A&yC‹6$D#v!oH4"f w4lv QҞF6tAK{j O!O 1HQD4(*ي3HDيO".}(iËMET>H"FDIk&'4[Z卜6$NF!q$E6[0Ii pA"lI mi{%H *b3HAѠbs A DAy#웥)PD Df.H "EĦ%)oa6#\5?F D W":tYlBݔKA¥ND6!\'&&CHtA‹ȥ Ͳ# '67 p$BA1 H ыh YN{|֣!W^ rIj}}=:r9_r|e@ad7|p"-+崑D,l RKiѫuuQDU.h b«_ѓHAB"4p_HX`V+ 6ȯ+C1彔6ɭE F.罔63I lMM 6r%Ⱦo߾2 $ˬ7ђ ! ,"&<7#\UdBa"PDT F..%D"ʪ\&[Z`ywJpk`TpaB$,1LE"/7VDw9D%Bp( OwVFlC! qA+^]FM0L_ oBd(" DBL q+li 30pA+ nPl.f.he" DwIQ\(50DG%" Q:\@#"ϰ Z"(,,,( t(,O0ȣu.Hܜli8+Z~eeVF!((\ Z %.'pA+n|e/#=-4<1rPxe3p= @ + A a\@~q0 pA+[XV_<(Ze lr()Sq((V\@J)-<(_ LA|!zaR`ŃSeqcAXt[B T\^bCAiB ڄRVDAPa!bC ‡^DƠ+!h%o)@"4>PD BDQLG䌗@EҖ !>ĥE ADDHUD.y bhDLZ/$d)iQ8PD "xPT!r0qDr"q==ADzOVT4 6=?P+!B;ADՖ9HD (DE)_"\Ow7&>DHďE9ĥъh=@q)id[1H0~(ƒH!(C𺻱Bqy]]{%D ~nB4 r]NTZxE uA=f9ȳ3Vfj";Ƌ!ns!J*$Oes~,Nj[tDHG;+cyٷcڜ~g%vPl/Nxx"n Uk*p=]>;rnc} *;$e[PN%?zN˚gwX)b|/C?=^h& 9Cw8@ۓrCWiaOϋՄ ˾ƈI/=:cfFlACMê/(>?0w?2aTOy><~zgrM%JdU'wPql l,|eo,cgXOmZ!4̻Fk#lo~<~]e Uqs6?ۻ&xΎ nwu8֨{'QF_Vb}p晍Ժ{K5FZU:sTxѷҴfҼI7 tϵ~CgSo|wF핫:hUB>GZ|V.T0SKu養8-a?jWϨqP9DO5UZ;Ǐ\4KgĿ 8GnH<^湿 dweFId(FP8b-i0eYFN=K ќZۆ2PM&ݻrV #ُ۶G2~ˉv0<»q =>*죅gD(ez\UX{cz㡾u Սd2; [ԃWa=z{P_f7TGQXY<ˏ"TzRoz\酽Fx%(3%|m*Iu=*mt_Mvq] 3y]=W{Qog]QΔ[z|ĵю C VUZJP6=Lpg@;hOcȈq:&U$jjih)8TF^Q圪tTdz#Mcx)ace:^^xp87٦栞(KD[f'eb} ;+'>?qm䥣[fzLd&J^y Mkc 1Zuy o\0@kkh[qc%9l-u0qCҘp] 'UGh@շ5KTr?:֛ǤXNT}f#τ*qWc۩[ ewZɲm-~lu?^hG9,ωQG_6PBGzf&!l^;JrlFh$Ki D՝m*FӓqXhMK_7z(TS,M;;q:Zt„XE)m_Q8Q]U,Έi6k#/߰5RN3[sۚ=sJb`##,?JB.a.eOTe/5>ʧ;Ht||nw5DL7->ݥIZodIKWyitvxb~Psij}gsce)a^vFr8 /P!b+rlתx7=ݎ~rTbQxN3_e}O|r7 uQ^&o^).'yeoy#U[̣O/)nxf>^b-y I5-/]w, 2ʎ  ^=ѕg$y~D~/RqS_xLT -7}E/;K_!P7+ˈs֗>sG[lL ~KH_r`aU/=pk;RPw1hM{HGႀIa>W&ıQƄ D_k}DŽnR珑I39r~uE͋YWEoo;FcyϮ4xWԾń 8fLt5;.1^MGyѲJShz옪pPĎ|O뷊$vn1I_jFaBAfҡOY^6v{dĹ-}rb. pt>eQ}>:/EߟsJ_aF"luDM3s]ΊƄ,d#J朗O:}`W5yI[LeHR7>v5¯ On_:ji"PQh+2<%WR~~h#:)^&43ͺzMRM 9h>WvXӼ7V;^{J2|+F.]W-H xδ;ƻ)f0;d{{[ƒFʣBe!I ׶6d ۭGpl]B+j$Ȅ=9AJ\_{D0¶4yV7&x P&2X,=p"Tf[;R2m%Bcq!كLH,BzIX 37C|cSð'ڱ֢@W3u15yPa]G}ޛ`δ<:7Q}O7&HKiSK&J"3c/x+ge }e٧^>HY|978jS۫~}&@dw6rS1.;hW6$ܚNv>}QpWW?EBrAĭ,嶣b{Fq"UhO։.hw@}vF3tte6<Y3h?8)4YpTgUơ+4}3WD;ki֡GOܴ5Ri, w}}v:i mV'B$YGݗN%Y-#?ȿun\4VRp:c$uEc2yϻ`c|k{siwfX+#H2ݿ }~'=k'>Du ; n)rؓv"CM,828S˲Mվi"AM4(gzX sΊE>&:˒u 9hS.0g}Fo>u]luhlJC0G7TvYlm"hVE³$ "{ l#\~|48^fҏ =>u<PX sL}~wn:*tǵ՗Àڃ2;cuGc utE辗E}+p\7pM0z^-KՏv1Z6 ѓPW}"7YwNua @S'T_Bңw-<'yCh_׽C4SeV{v/i6 u0;as $F//Q}Tjf4_ja[{c~\53uqBهw-) ] $L&3?C$& KѓZkX۠0>po3WٹWW W3Ղp7i6 W0Eِku{Ë*:G1!r96<&a_eZ +Ek[+^e6ԟ5rlIV&zd²3BXBzy!6ʟؤ585gR1n'-L7!& u!> =39-Y܌0ۯ3mClKH:D}qJf;Iw^~f8/S-t745vDT/N/u3Xiڜ}mTmůc+4%/L?1.kƪʠ3.~caOK&ULt 4) or ( libversion[0] == 4 and (libversion[1] >= 9) ) try: has_h5ls = subprocess.check_call(['h5ls', '--version'], stdout=subprocess.PIPE) == 0 except Exception: has_h5ls = False file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name class AlignmentTestCase(unittest.TestCase): def setUp(self): self.file = file_name # This is a global variable in netcdf4, it must be set before File # creation if has_alignment: set_alignment(1024, 4096) assert get_alignment() == (1024, 4096) f = Dataset(self.file, 'w') f.createDimension('x', 4096) # Create many datasets so that we decrease the chance of # the dataset being randomly aligned for i in range(10): f.createVariable(f'data{i:02d}', np.float64, ('x',)) v = f.variables[f'data{i:02d}'] v[...] = 0 f.close() if has_alignment: # ensure to reset the alignment to 1 (default values) so as not to # disrupt other tests set_alignment(1, 1) assert get_alignment() == (1, 1) def test_version_settings(self): if has_alignment: # One should always be able to set the alignment to 1, 1 set_alignment(1, 1) assert get_alignment() == (1, 1) else: with self.assertRaises(RuntimeError): set_alignment(1, 1) with self.assertRaises(RuntimeError): get_alignment() def test_reports_alignment_capabilities(self): # Assert that the library reports that it supports alignment correctly assert has_alignment == __has_set_alignment__ # if we have no support for alignment, we have no guarantees on # how the data can be aligned @unittest.skipIf( not has_h5ls, "h5ls not found." ) @unittest.skipIf( not has_alignment, "No support for set_alignment in libnetcdf." ) def test_setting_alignment(self): # We choose to use h5ls instead of h5py since h5ls is very likely # to be installed alongside the rest of the tooling required to build # netcdf4-python # Output from h5ls is expected to look like: """ Opened "/tmp/tmpqexgozg1.nc" with sec2 driver. data00 Dataset {4096/4096} Attribute: DIMENSION_LIST {1} Type: variable length of object reference Attribute: _Netcdf4Coordinates {1} Type: 32-bit little-endian integer Location: 1:563 Links: 1 Storage: 32768 logical bytes, 32768 allocated bytes, 100.00% utilization Type: IEEE 64-bit little-endian float Address: 8192 data01 Dataset {4096/4096} Attribute: DIMENSION_LIST {1} Type: variable length of object reference Attribute: _Netcdf4Coordinates {1} Type: 32-bit little-endian integer Location: 1:1087 Links: 1 Storage: 32768 logical bytes, 32768 allocated bytes, 100.00% utilization Type: IEEE 64-bit little-endian float Address: 40960 [...] x Dataset {4096/4096} Attribute: CLASS scalar Type: 16-byte null-terminated ASCII string Attribute: NAME scalar Type: 64-byte null-terminated ASCII string Attribute: REFERENCE_LIST {10} Type: struct { "dataset" +0 object reference "dimension" +8 32-bit little-endian unsigned integer } 16 bytes Attribute: _Netcdf4Dimid scalar Type: 32-bit little-endian integer Location: 1:239 Links: 1 Storage: 16384 logical bytes, 0 allocated bytes Type: IEEE 32-bit big-endian float Address: 18446744073709551615 """ h5ls_results = subprocess.check_output( ["h5ls", "--verbose", "--address", "--simple", self.file] ).decode() addresses = { f'data{i:02d}': -1 for i in range(10) } data_variable = None for line in h5ls_results.split('\n'): if not line.startswith(' '): data_variable = line.split(' ')[0] # only process the data variables we care to inspect if data_variable not in addresses: continue line = line.strip() if line.startswith('Address:'): address = int(line.split(':')[1].strip()) addresses[data_variable] = address for key, address in addresses.items(): is_aligned = (address % 4096) == 0 assert is_aligned, f"{key} is not aligned. Address = 0x{address:x}" # Alternative implementation in h5py # import h5py # with h5py.File(self.file, 'r') as h5file: # for i in range(10): # v = h5file[f'data{i:02d}'] # assert (dataset.id.get_offset() % 4096) == 0 def tearDown(self): # Remove the temporary files os.remove(self.file) if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_atts.py000066400000000000000000000232321512661643000204450ustar00rootroot00000000000000import math import subprocess import sys import unittest import os import tempfile import warnings import pathlib import numpy as np from collections import OrderedDict from numpy.random.mtrand import uniform import netCDF4 # test attribute creation. #FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME = 'tst_atts.nc' VAR_NAME="dummy_var" GROUP_NAME = "dummy_group" DIM1_NAME="x" DIM1_LEN=2 DIM2_NAME="y" DIM2_LEN=3 DIM3_NAME="z" DIM3_LEN=25 STRATT = 'string attribute' EMPTYSTRATT = '' INTATT = 1 FLOATATT = math.pi SEQATT = np.arange(10) STRINGSEQATT = ['mary ','','had ','a ','little ','lamb',] #ATTDICT = {'stratt':STRATT,'floatatt':FLOATATT,'seqatt':SEQATT, # 'stringseqatt':''.join(STRINGSEQATT), # changed in issue #770 # 'emptystratt':EMPTYSTRATT,'intatt':INTATT} ATTDICT = {'stratt':STRATT,'floatatt':FLOATATT,'seqatt':SEQATT, 'stringseqatt':STRINGSEQATT, 'emptystratt':EMPTYSTRATT,'intatt':INTATT} class VariablesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME with netCDF4.Dataset(self.file,'w') as f: # try to set a dataset attribute with one of the reserved names. f.setncattr('file_format','netcdf4_format') # test attribute renaming f.stratt_tmp = STRATT f.renameAttribute('stratt_tmp','stratt') f.emptystratt = EMPTYSTRATT f.intatt = INTATT f.floatatt = FLOATATT f.seqatt = SEQATT # sequences of strings converted to a single string. f.stringseqatt = STRINGSEQATT f.setncattr_string('stringseqatt_array',STRINGSEQATT) # array of NC_STRING g = f.createGroup(GROUP_NAME) f.createDimension(DIM1_NAME, DIM1_LEN) f.createDimension(DIM2_NAME, DIM2_LEN) f.createDimension(DIM3_NAME, DIM3_LEN) g.createDimension(DIM1_NAME, DIM1_LEN) g.createDimension(DIM2_NAME, DIM2_LEN) g.createDimension(DIM3_NAME, DIM3_LEN) g.stratt_tmp = STRATT g.renameAttribute('stratt_tmp','stratt') g.emptystratt = EMPTYSTRATT g.intatt = INTATT g.floatatt = FLOATATT g.seqatt = SEQATT g.stringseqatt = STRINGSEQATT if netCDF4.__version__ > "1.4.2": with self.assertRaises(ValueError): g.arrayatt = [[1, 2], [3, 4]] # issue #841 g.setncattr_string('stringseqatt_array',STRINGSEQATT) # array of NC_STRING v = f.createVariable(VAR_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME)) # try to set a variable attribute with one of the reserved names. v.setncattr('ndim','three') v.setncatts({'foo': 1}) v.setncatts(OrderedDict(bar=2)) v.stratt_tmp = STRATT v.renameAttribute('stratt_tmp','stratt') v.emptystratt = EMPTYSTRATT v.intatt = INTATT v.floatatt = FLOATATT v.seqatt = SEQATT v.stringseqatt = STRINGSEQATT v.setncattr_string('stringseqatt_array',STRINGSEQATT) # array of NC_STRING v1 = g.createVariable(VAR_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME)) v1.stratt = STRATT v1.emptystratt = EMPTYSTRATT v1.intatt = INTATT v1.floatatt = FLOATATT v1.seqatt = SEQATT v1.stringseqatt = STRINGSEQATT v1.setncattr_string('stringseqatt_array',STRINGSEQATT) # array of NC_STRING # issue #959: should not be able to set _FillValue after var creation try: v1._FillValue(-999.) except AttributeError: pass else: raise ValueError('This test should have failed.') try: v1.setncattr('_FillValue',-999.) except AttributeError: pass else: raise ValueError('This test should have failed.') # issue #485 (triggers segfault in C lib # with version 1.2.1 without pull request #486) f.foo = np.array('bar','S') f.foo = np.array('bar','U') # issue #529 write string attribute as NC_CHAR unless # it can't be decoded to ascii. Add setncattr_string # method to force NC_STRING. f.charatt = 'foo' # will be written as NC_CHAR f.setncattr_string('stringatt','bar') # NC_STRING f.cafe = 'caf\xe9' # NC_STRING f.batt = 'caf\xe9'.encode() #NC_CHAR v.setncattr_string('stringatt','bar') # NC_STRING # issue #882 - provide an option to always string attribute # as NC_STRINGs. Testing various approaches to setting text attributes... f.set_ncstring_attrs(True) f.stringatt_ncstr = 'foo' # will now be written as NC_STRING f.setncattr_string('stringatt_ncstr','bar') # NC_STRING anyway f.caf_ncstr = 'caf\xe9' # NC_STRING anyway f.bat_ncstr = 'caf\xe9'.encode() # now NC_STRING g.stratt_ncstr = STRATT # now NC_STRING #g.renameAttribute('stratt_tmp','stratt_ncstr') v.setncattr_string('stringatt_ncstr','bar') # NC_STRING anyway v.stratt_ncstr = STRATT v1.emptystratt_ncstr = EMPTYSTRATT def tearDown(self): # Remove the temporary files #pass os.remove(self.file) def runTest(self): """testing attributes""" with netCDF4.Dataset(self.file, 'r') as f: v = f.variables[VAR_NAME] g = f.groups[GROUP_NAME] v1 = g.variables[VAR_NAME] # check attributes in root group. # global attributes. # check __dict__ method for accessing all netCDF attributes. for key,val in ATTDICT.items(): if type(val) == np.ndarray: assert f.__dict__[key].tolist() == val.tolist() else: assert f.__dict__[key] == val # check accessing individual attributes. assert f.intatt == INTATT assert f.floatatt == FLOATATT assert f.stratt == STRATT assert f.emptystratt == EMPTYSTRATT assert f.seqatt.tolist() == SEQATT.tolist() #assert f.stringseqatt == ''.join(STRINGSEQATT) # issue 770 assert f.stringseqatt == STRINGSEQATT assert f.stringseqatt_array == STRINGSEQATT assert f.getncattr('file_format') == 'netcdf4_format' # variable attributes. # check __dict__ method for accessing all netCDF attributes. for key,val in ATTDICT.items(): if type(val) == np.ndarray: assert v.__dict__[key].tolist() == val.tolist() else: assert v.__dict__[key] == val # check accessing individual attributes. assert v.intatt == INTATT assert v.floatatt == FLOATATT assert v.stratt == STRATT assert v.seqatt.tolist() == SEQATT.tolist() #assert v.stringseqatt == ''.join(STRINGSEQATT) # issue 770 assert v.stringseqatt == STRINGSEQATT assert v.stringseqatt_array == STRINGSEQATT assert v.getncattr('ndim') == 'three' assert v.getncattr('foo') == 1 assert v.getncattr('bar') == 2 # check type of attributes using ncdump (issue #529) if not os.getenv('NO_CDL'): ncdump_output = f.tocdl() for line in ncdump_output: line = line.strip('\t\n\r') line = line.strip()# Must be done another time for group variables if "stringatt" in line: assert line.startswith('string') if "charatt" in line: assert line.startswith(':') if "cafe" in line: assert line.startswith('string') if "batt" in line: assert line.startswith(':') if "_ncstr" in line: assert line.startswith('string') # check attributes in subgroup. # global attributes. for key,val in ATTDICT.items(): if type(val) == np.ndarray: assert g.__dict__[key].tolist() == val.tolist() else: assert g.__dict__[key] == val assert g.intatt == INTATT assert g.floatatt == FLOATATT assert g.stratt == STRATT assert g.emptystratt == EMPTYSTRATT assert g.seqatt.tolist() == SEQATT.tolist() #assert g.stringseqatt == ''.join(STRINGSEQATT) # issue 770 assert g.stringseqatt == STRINGSEQATT assert g.stringseqatt_array == STRINGSEQATT for key,val in ATTDICT.items(): if type(val) == np.ndarray: assert v1.__dict__[key].tolist() == val.tolist() else: assert v1.__dict__[key] == val assert v1.intatt == INTATT assert v1.floatatt == FLOATATT assert v1.stratt == STRATT assert v1.emptystratt == EMPTYSTRATT assert v1.seqatt.tolist() == SEQATT.tolist() #assert v1.stringseqatt == ''.join(STRINGSEQATT) # issue 770 assert v1.stringseqatt == STRINGSEQATT assert v1.stringseqatt_array == STRINGSEQATT assert getattr(v1,'nonexistantatt',None) == None # issue 915 empty string attribute (ncdump reports 'NIL') with netCDF4.Dataset(pathlib.Path(__file__).parent / "test_gold.nc") as f: assert f['RADIANCE'].VAR_NOTES == "" if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_cdf5.py000066400000000000000000000026771512661643000203250ustar00rootroot00000000000000from netCDF4 import Dataset, __has_cdf5_format__ import numpy as np import sys, os, unittest, tempfile import struct from numpy.testing import assert_array_equal FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name dimsize = np.iinfo(np.int32).max*2 # only allowed in CDF5 ndim = 100 arrdata = np.random.randint(np.iinfo(np.uint8).min,np.iinfo(np.uint8).max,size=ndim) @unittest.skipIf(not __has_cdf5_format__ or struct.calcsize("P") < 8, "no CDF5 support") class test_cdf5(unittest.TestCase): def setUp(self): self.netcdf_file = FILE_NAME nc = Dataset(self.netcdf_file,'w',format='NETCDF3_64BIT_DATA') # create a 64-bit dimension d = nc.createDimension('dim',dimsize) # 64-bit dimension # create an 8-bit unsigned integer variable v = nc.createVariable('var',np.uint8,'dim') v[:ndim] = arrdata # create a 64-bit integer attribute (issue #878) nc.setncattr('int64_attr', np.int64(-9223372036854775806)) nc.close() def tearDown(self): # Remove the temporary files os.remove(self.netcdf_file) def runTest(self): """testing NETCDF3_64BIT_DATA format (CDF-5)""" f = Dataset(self.netcdf_file, 'r') assert f.dimensions['dim'].size == dimsize assert_array_equal(arrdata, f.variables['var'][:ndim]) assert (type(f.int64_attr) == np.int64) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_cdl.py000066400000000000000000000044271512661643000202410ustar00rootroot00000000000000import unittest import netCDF4 import os import pathlib test_ncdump="""netcdf ubyte { dimensions: d = 2 ; variables: byte ub(d) ; ub:_Unsigned = "true" ; byte sb(d) ; byte sb2(d) ; sb2:_Unsigned = "false" ; // global attributes: :_Format = "classic" ; } """ test_ncdump2="""netcdf ubyte { dimensions: d = 2 ; variables: byte ub(d) ; ub:_Unsigned = "true" ; byte sb(d) ; byte sb2(d) ; sb2:_Unsigned = "false" ; // global attributes: :_Format = "classic" ; data: ub = 0, -1 ; sb = -128, 127 ; sb2 = -127, -127 ; } """ ubyte_filename = pathlib.Path(__file__).parent / "ubyte.nc" @unittest.skipIf(os.getenv("NO_CDL"), "CDL test disabled") class Test_CDL(unittest.TestCase): """ Test import/export of CDL """ def setUp(self): with netCDF4.Dataset(ubyte_filename) as f: f.tocdl(outfile="ubyte.cdl", data=True) def test_tocdl(self): # treated as unsigned integers. with netCDF4.Dataset(ubyte_filename) as f: assert f.tocdl() == test_ncdump assert f.tocdl(data=True) == test_ncdump2 def test_fromcdl(self): with netCDF4.Dataset.fromcdl("ubyte.cdl", ncfilename="ubyte2.nc") as f1: with netCDF4.Dataset(ubyte_filename) as f2: assert f1.variables.keys() == f2.variables.keys() assert f1.filepath() == "ubyte2.nc" assert f1.dimensions.keys() == f2.dimensions.keys() assert len(f1.dimensions["d"]) == len(f2.dimensions["d"]) assert (f1["ub"][:] == f2["ub"][:]).all() assert (f1["sb"][:] == f2["sb"][:]).all() # test if os.PathLike works with netCDF4.Dataset.fromcdl(pathlib.Path("ubyte.cdl"), ncfilename=pathlib.Path("ubyte3.nc")) as f3: assert f1.variables.keys() == f3.variables.keys() # check if correct errors are raised self.assertRaises(FileNotFoundError, netCDF4.Dataset.fromcdl, "doesnotexist.cdl") self.assertRaises(FileExistsError, netCDF4.Dataset.fromcdl, "ubyte.cdl", ncfilename="ubyte2.nc") # cleanup os.remove("ubyte2.nc") os.remove("ubyte3.nc") def tearDown(self): # Remove the temporary files os.remove('ubyte.cdl') if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_chunk_cache.py000066400000000000000000000030421512661643000217220ustar00rootroot00000000000000import unittest, netCDF4, tempfile, os file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name cache_size = 10000 cache_nelems = 100 cache_preempt = 0.5 cache_size2 = 20000 cache_nelems2 = 200 cache_preempt2 = 1.0 class RefCountTestCase(unittest.TestCase): def setUp(self): nc = netCDF4.Dataset(file_name, mode='w', format='NETCDF4') d = nc.createDimension('fred', 2000) # can only change cache size in createVariable (not nelems or preemption) # this change lasts only as long as file is open. v = nc.createVariable('frank','f',('fred',),chunk_cache=15000) size, nelems, preempt = v.get_var_chunk_cache() assert size==15000 self.file=file_name nc.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing methods for accessing and changing chunk cache""" # change cache parameters before opening fil. netCDF4.set_chunk_cache(cache_size, cache_nelems, cache_preempt) nc = netCDF4.Dataset(self.file, mode='r') # check to see that chunk cache parameters were changed. assert netCDF4.get_chunk_cache() == (cache_size, cache_nelems, cache_preempt) # change cache parameters for variable, check nc['frank'].set_var_chunk_cache(cache_size2, cache_nelems2, cache_preempt2) assert nc['frank'].get_var_chunk_cache() == (cache_size2, cache_nelems2, cache_preempt2) nc.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_complex.py000066400000000000000000000066001512661643000211410ustar00rootroot00000000000000import netCDF4 import numpy as np import pathlib import tempfile import unittest complex_array = np.array([0 + 0j, 1 + 0j, 0 + 1j, 1 + 1j, 0.25 + 0.75j], dtype="c16") np_dt = np.dtype([("r", np.float64), ("i", np.float64)]) complex_struct_array = np.array( [(r, i) for r, i in zip(complex_array.real, complex_array.imag)], dtype=np_dt, ) class ComplexNumbersTestCase(unittest.TestCase): def setUp(self): self.tmp_path = pathlib.Path(tempfile.mkdtemp()) def test_read_dim(self): filename = self.tmp_path / "test_read_dim.nc" with netCDF4.Dataset(filename, "w") as f: f.createDimension("x", size=len(complex_array)) f.createDimension("ri", size=2) c_ri = f.createVariable("data_dim", np.float64, ("x", "ri")) as_dim_array = np.vstack((complex_array.real, complex_array.imag)).T c_ri[:] = as_dim_array with netCDF4.Dataset(filename, "r", auto_complex=True) as f: assert "data_dim" in f.variables data_dim = f["data_dim"] assert data_dim.shape == complex_array.shape data = data_dim[:] assert np.array_equal(data, complex_array) def test_read_struct(self): filename = self.tmp_path / "test_read_struct.nc" with netCDF4.Dataset(filename, "w") as f: f.createDimension("x", size=len(complex_array)) nc_dt = f.createCompoundType(np_dt, "nc_complex") c_struct = f.createVariable("data_struct", nc_dt, ("x",)) c_struct[:] = complex_struct_array with netCDF4.Dataset(filename, "r", auto_complex=True) as f: assert "data_struct" in f.variables data = f["data_struct"][:] assert np.array_equal(data, complex_array) def test_write(self): filename = self.tmp_path / "test_write.nc" with netCDF4.Dataset(filename, "w", auto_complex=True) as f: f.createDimension("x", size=len(complex_array)) complex_var = f.createVariable("complex_data", "c16", ("x",)) complex_var[:] = complex_array with netCDF4.Dataset(filename, "r") as f: assert "complex_data" in f.variables assert np.array_equal(f["complex_data"], complex_struct_array) def test_write_with_np_complex128(self): filename = self.tmp_path / "test_write_with_np_complex128.nc" with netCDF4.Dataset(filename, "w", auto_complex=True) as f: f.createDimension("x", size=len(complex_array)) complex_var = f.createVariable("complex_data", np.complex128, ("x",)) complex_var[:] = complex_array with netCDF4.Dataset(filename, "r") as f: assert "complex_data" in f.variables assert np.array_equal(f["complex_data"], complex_struct_array) def test_write_netcdf3(self): filename = self.tmp_path / "test_write_netcdf3.nc" with netCDF4.Dataset( filename, "w", format="NETCDF3_CLASSIC", auto_complex=True ) as f: f.createDimension("x", size=len(complex_array)) complex_var = f.createVariable("complex_data", "c16", ("x",)) complex_var[:] = complex_array with netCDF4.Dataset(filename, "r", auto_complex=True) as f: assert "complex_data" in f.variables assert np.array_equal(f["complex_data"][:], complex_array) if __name__ == "__main__": unittest.main() netcdf4-python-1.7.4rel/test/test_compound_alignment.py000066400000000000000000001122271512661643000233570ustar00rootroot00000000000000""" This illustrates a bug when a structured array is extracted from a netCDF4.Variable using the slicing operation. Bug is observed with EPD 7.3-1 and 7.3-2 (64-bit) """ import netCDF4, numpy, tempfile, sys, os, unittest from numpy.testing import assert_array_equal, assert_array_almost_equal def string_to_bytes(xstring, size=-1, pad="\0"): nbytes = len(xstring) if (size >= 0): xsize = size else: xsize = nbytes xbytes = numpy.empty(xsize, dtype=numpy.uint8) xbytes[:] = ord(pad) if (nbytes > xsize): nbytes = xsize for i in range(nbytes): xbytes[i] = ord(xstring[i]) return xbytes cells = numpy.array([ (387, 289, 65.64321899414062, -167.90093994140625, 3555, -10158, 8934, -16608, 19, 34199, 2, 0, 218, 619, 534, 314, 234, 65528, 39, 1524, 2429, 3137, 2795, 3092, 6431, 12949, 6780, 18099, 8248, 9331, 972, 553, 721, 2874, 2488, 3087, 3072, 2537, 3295, 334, 334, 9888, 10552, 7175, 6981, 7250, 8133, 14349, 16565, 17097, 20945, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 10, 11, 15, 7, 14, 4, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 12210, 16433, 45, 241, 243, 71, 131, [87, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (387, 290, 65.64067077636719, -167.93258666992188, 3546, -10161, 8934, -16611, 13, 34165, 1, 0, 215, 582, 534, 317, 204, 65528, 34, 1533, 2428, 3161, 2803, 3107, 6336, 12721, 6670, 17775, 7973, 8770, 933, 554, 714, 2904, 2480, 3102, 3087, 2560, 3323, 359, 359, 9934, 10585, 7235, 7007, 7315, 8209, 14421, 16538, 17046, 20924, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 11, 15, 6, 15, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 12235, 16433, 45, 241, 243, 71, 131, [-43, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (388, 287, 65.65902709960938, -167.84213256835938, 3574, -10167, 8936, -16602, 15, 34269, 1, 0, 213, 626, 521, 313, 230, 64, 35, 1519, 2391, 3091, 2719, 3011, 6313, 12685, 6657, 17785, 8169, 9420, 960, 541, 705, 2881, 2488, 3084, 3065, 2500, 3328, 357, 357, 10023, 10578, 7250, 6986, 7285, 8149, 14469, 16671, 17188, 20849, 13, 4, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 6, 15, 4, 4, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12241, 16432, 25, 241, 243, 71, 131, [-41, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (388, 288, 65.65646362304688, -167.8740692138672, 3565, -10171, 8936, -16605, 17, 34234, 1, 0, 214, 618, 523, 310, 226, 70, 36, 1528, 2408, 3107, 2751, 3026, 6320, 12708, 6673, 17824, 8138, 9309, 960, 541, 712, 2881, 2496, 3084, 3079, 2477, 3259, 349, 349, 10023, 10528, 7281, 7011, 7285, 8149, 14416, 16503, 17057, 20928, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 6, 13, 4, 4, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12239, 16433, 45, 241, 243, 71, 131, [-43, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (388, 289, 65.65390014648438, -167.9058380126953, 3555, -10174, 8935, -16608, 15, 34200, 2, 0, 212, 582, 526, 307, 208, 60, 40, 1519, 2408, 3107, 2751, 3042, 6226, 12504, 6548, 17477, 7880, 8732, 929, 541, 689, 2911, 2496, 3129, 3094, 2500, 3300, 342, 342, 10001, 10595, 7413, 7086, 7396, 8292, 14486, 16601, 16949, 21066, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 5, 13, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12272, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (388, 290, 65.6513442993164, -167.9374542236328, 3546, -10177, 8935, -16611, 6, 34166, 2, 0, 213, 568, 531, 315, 198, 64, 34, 1537, 2424, 3147, 2782, 3081, 6242, 12534, 6571, 17524, 7833, 8550, 921, 541, 689, 2926, 2496, 3144, 3102, 2546, 3341, 358, 358, 10045, 10629, 7421, 7078, 7448, 8326, 14485, 16572, 16984, 21085, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 5, 13, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12307, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (388, 291, 65.6487808227539, -167.96910095214844, 3536, -10180, 8934, -16614, 5, 34131, 1, 0, 218, 586, 538, 321, 211, 74, 40, 1546, 2424, 3171, 2806, 3113, 6368, 12821, 6704, 17895, 8029, 8835, 937, 549, 705, 2926, 2496, 3152, 3117, 2476, 3286, 350, 350, 9978, 10612, 7468, 7128, 7474, 8360, 14547, 16572, 17019, 20766, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 3, 7, 0, 0, 11, 12, 15, 5, 13, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (389, 287, 65.66973876953125, -167.84698486328125, 3574, -10183, 8937, -16603, 8, 34270, 2, 0, 211, 598, 526, 304, 206, 65528, 35, 1516, 2378, 3069, 2697, 2984, 6168, 12394, 6515, 17382, 7931, 9011, 935, 530, 694, 2923, 2495, 3147, 3106, 2530, 3413, 334, 334, 9999, 10723, 7479, 7160, 7494, 8378, 14631, 16670, 17111, 21141, 12, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 11, 13, 15, 6, 11, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 12325, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (389, 288, 65.66716003417969, -167.87890625, 3565, -10186, 8937, -16606, 9, 34235, 2, 0, 212, 602, 528, 309, 218, 65528, 38, 1525, 2387, 3101, 2736, 3016, 6240, 12542, 6585, 17587, 7994, 9050, 943, 530, 701, 2938, 2503, 3170, 3128, 2552, 3371, 333, 333, 9930, 10706, 7533, 7176, 7546, 8412, 14595, 16697, 17010, 20876, 12, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 11, 13, 15, 6, 10, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 12360, 16433, 45, 241, 243, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (389, 289, 65.66458892822266, -167.91065979003906, 3555, -10190, 8936, -16609, 5, 34201, 2, 0, 212, 561, 527, 311, 202, 65528, 34, 1524, 2412, 3117, 2744, 3032, 6137, 12342, 6461, 17241, 7721, 8408, 897, 530, 678, 2967, 2495, 3185, 3158, 2552, 3344, 335, 335, 9953, 10757, 7586, 7219, 7598, 8474, 14622, 16711, 17085, 20855, 12, 7, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 12, 13, 15, 6, 11, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (389, 290, 65.6620101928711, -167.94241333007812, 3546, -10193, 8936, -16611, 5, 34166, 2, 0, 213, 558, 533, 315, 190, 65528, 35, 1533, 2420, 3141, 2767, 3071, 6168, 12424, 6500, 17312, 7721, 8360, 905, 530, 678, 2952, 2495, 3177, 3128, 2507, 3371, 334, 334, 9975, 10689, 7517, 7176, 7546, 8426, 14577, 16559, 17109, 21037, 12, 7, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 12, 13, 15, 6, 11, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (389, 291, 65.65943145751953, -167.97409057617188, 3536, -10196, 8935, -16614, 5, 34132, 0, 0, 217, 578, 536, 324, 206, 65528, 36, 1542, 2420, 3165, 2799, 3095, 6303, 12683, 6640, 17713, 7924, 8654, 920, 546, 694, 2938, 2495, 3170, 3143, 2530, 3358, 327, 327, 9952, 10672, 7517, 7184, 7539, 8419, 14550, 16627, 17046, 20934, 12, 7, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 11, 12, 15, 6, 11, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (389, 292, 65.65685272216797, -168.0056915283203, 3527, -10199, 8934, -16617, 5, 34097, 1, 0, 226, 625, 545, 329, 232, 65528, 56, 1542, 2428, 3189, 2845, 3165, 6580, 13244, 6943, 18555, 8375, 9328, 973, 569, 732, 2952, 2503, 3155, 3106, 2507, 3289, 341, 341, 9861, 10552, 7494, 7176, 7513, 8405, 14460, 16489, 16983, 20873, 11, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 6, 3, 7, 0, 0, 10, 11, 15, 6, 10, 3, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 3, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [81, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (390, 287, 65.66167449951172, -167.85145568847656, 3573, -10045, 8937, -16603, 14, 34267, 1, 0, 213, 624, 529, 315, 216, 68, 44, 1533, 2414, 3105, 2719, 3022, 6294, 12637, 6630, 17704, 8134, 9315, 969, 542, 712, 2888, 2500, 3097, 3042, 2456, 3268, 334, 334, 10122, 10624, 7274, 7110, 7307, 8181, 14498, 16617, 17137, 21090, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 9, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12243, 16433, 45, 241, 243, 71, 131, [-41, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (390, 288, 65.65919494628906, -167.88340759277344, 3564, -10048, 8936, -16606, 15, 34233, 2, 0, 215, 610, 526, 316, 213, 72, 38, 1533, 2414, 3105, 2735, 3038, 6278, 12621, 6607, 17641, 8055, 9125, 954, 542, 704, 2910, 2506, 3113, 3071, 2501, 3254, 342, 342, 10032, 10624, 7358, 7181, 7353, 8243, 14453, 16522, 17075, 20905, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 8, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12255, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (390, 289, 65.65672302246094, -167.91519165039062, 3554, -10050, 8935, -16608, 10, 34198, 2, 0, 211, 570, 533, 318, 196, 64, 34, 1524, 2414, 3128, 2751, 3038, 6177, 12399, 6491, 17304, 7774, 8523, 914, 542, 688, 2940, 2500, 3143, 3086, 2523, 3310, 335, 335, 10054, 10691, 7456, 7199, 7470, 8347, 14560, 16656, 17000, 20986, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 9, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12318, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (390, 290, 65.65425109863281, -167.9468994140625, 3545, -10053, 8935, -16611, 5, 34164, 2, 0, 213, 572, 538, 319, 196, 64, 41, 1533, 2438, 3160, 2789, 3077, 6231, 12534, 6561, 17477, 7829, 8538, 922, 542, 696, 2933, 2500, 3151, 3086, 2456, 3324, 343, 343, 10054, 10674, 7441, 7199, 7470, 8340, 14533, 16562, 16987, 20985, 13, 7, 6, 15, 15, 15, 15, 0, 9, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 9, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (390, 291, 65.65176391601562, -167.97862243652344, 3535, -10056, 8934, -16614, 5, 34129, 2, 0, 220, 600, 544, 324, 209, 78, 52, 1532, 2446, 3175, 2821, 3124, 6426, 12898, 6754, 18017, 8110, 8951, 961, 557, 712, 2948, 2500, 3143, 3094, 2456, 3268, 342, 342, 10054, 10624, 7433, 7181, 7490, 8361, 14524, 16615, 17011, 21005, 13, 6, 6, 15, 15, 15, 15, 0, 9, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 12, 15, 5, 9, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (391, 286, 65.67485809326172, -167.82440185546875, 3583, -10058, 8938, -16600, 13, 34302, 2, 0, 209, 603, 516, 306, 206, 69, 42, 1500, 2373, 3048, 2663, 2961, 6145, 12346, 6479, 17279, 7924, 9049, 930, 526, 697, 2902, 2500, 3127, 3090, 2513, 3361, 338, 338, 10063, 10809, 7433, 7131, 7427, 8311, 14635, 16809, 17275, 20874, 13, 5, 7, 15, 15, 15, 15, 0, 10, 6, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 6, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 12294, 16433, 45, 241, 243, 72, 131, [85, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (391, 287, 65.67237854003906, -167.8563232421875, 3573, -10061, 8938, -16603, 7, 34268, 2, 0, 212, 608, 525, 309, 220, 66, 42, 1517, 2389, 3080, 2701, 2985, 6200, 12440, 6557, 17474, 7995, 9121, 946, 534, 697, 2932, 2500, 3165, 3113, 2490, 3320, 329, 329, 10085, 10776, 7527, 7186, 7538, 8414, 14652, 16698, 17108, 20833, 13, 5, 7, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 6, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 12350, 16433, 45, 241, 243, 71, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (391, 288, 65.66989135742188, -167.8882598876953, 3564, -10064, 8937, -16606, 6, 34233, 2, 0, 213, 598, 528, 311, 225, 77, 39, 1534, 2405, 3111, 2724, 3016, 6239, 12527, 6572, 17545, 7971, 8954, 946, 542, 704, 2955, 2507, 3172, 3128, 2467, 3320, 353, 353, 9952, 10725, 7534, 7241, 7571, 8441, 14617, 16615, 17081, 20772, 13, 5, 7, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 6, 11, 3, 2, 5, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 12369, 16433, 45, 241, 243, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (391, 289, 65.66740417480469, -167.9201202392578, 3554, -10066, 8936, -16609, 5, 34198, 2, 0, 209, 552, 534, 314, 188, 62, 40, 1525, 2413, 3119, 2740, 3032, 6114, 12267, 6409, 17084, 7635, 8240, 891, 534, 681, 2940, 2500, 3180, 3135, 2536, 3333, 330, 330, 10018, 10775, 7597, 7214, 7610, 8476, 14660, 16683, 17158, 20892, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 6, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (391, 290, 65.66490936279297, -167.951904296875, 3545, -10069, 8936, -16612, 5, 34164, 2, 0, 212, 560, 530, 319, 192, 61, 38, 1525, 2421, 3143, 2763, 3055, 6184, 12425, 6502, 17326, 7729, 8391, 907, 542, 697, 2962, 2500, 3157, 3135, 2490, 3306, 330, 330, 9952, 10758, 7542, 7214, 7564, 8441, 14582, 16669, 17132, 20831, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (391, 291, 65.66241455078125, -167.98362731933594, 3535, -10072, 8935, -16615, 5, 34129, 0, 0, 219, 586, 545, 320, 207, 70, 47, 1543, 2438, 3166, 2802, 3110, 6357, 12780, 6697, 17880, 8011, 8788, 938, 557, 704, 2962, 2500, 3172, 3128, 2490, 3319, 353, 353, 10018, 10691, 7542, 7186, 7557, 8441, 14590, 16614, 17131, 20971, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 12, 15, 5, 12, 3, 2, 5, 3, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (391, 292, 65.65991973876953, -168.01527404785156, 3526, -10075, 8935, -16618, 5, 34095, 0, 0, 228, 635, 548, 330, 234, 91, 63, 1542, 2446, 3198, 2848, 3165, 6639, 13364, 6985, 18685, 8480, 9494, 993, 573, 744, 2947, 2500, 3165, 3113, 2467, 3250, 353, 353, 9929, 10606, 7518, 7204, 7531, 8407, 14461, 16475, 16938, 20849, 13, 4, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 10, 11, 15, 6, 12, 3, 2, 5, 4, 15, 15, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (392, 286, 65.68557739257812, -167.8292694091797, 3583, -10074, 8939, -16601, 6, 34303, 2, 0, 205, 586, 519, 302, 195, 65528, 36, 1511, 2365, 3033, 2638, 2931, 6076, 12187, 6396, 17037, 7800, 8827, 916, 521, 675, 2950, 2505, 3202, 3155, 2456, 3417, 343, 343, 9789, 10958, 7655, 7207, 7655, 8550, 14842, 16863, 17290, 20901, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 11, 2, 2, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 12393, 16433, 45, 241, 243, 72, 131, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (392, 287, 65.6830825805664, -167.8612823486328, 3573, -10077, 8939, -16604, 5, 34268, 2, 0, 211, 609, 527, 308, 217, 65528, 38, 1519, 2399, 3073, 2700, 2994, 6226, 12518, 6568, 17499, 8037, 9190, 948, 529, 699, 2957, 2497, 3195, 3140, 2525, 3362, 342, 342, 9765, 10857, 7609, 7207, 7635, 8508, 14686, 16766, 17165, 20820, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 12, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (392, 288, 65.68058013916016, -167.8932342529297, 3564, -10079, 8938, -16606, 5, 34233, 2, 0, 209, 575, 527, 312, 206, 65528, 29, 1528, 2407, 3105, 2716, 3002, 6163, 12369, 6474, 17264, 7808, 8653, 908, 537, 691, 2950, 2497, 3187, 3132, 2502, 3361, 351, 351, 9601, 10724, 7571, 7188, 7596, 8460, 14572, 16655, 17127, 20820, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 12, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (392, 289, 65.6780776977539, -167.92510986328125, 3554, -10082, 8937, -16609, 5, 34198, 2, 0, 208, 538, 531, 314, 182, 65528, 37, 1519, 2415, 3105, 2732, 3017, 6061, 12148, 6357, 16927, 7539, 8101, 885, 529, 675, 2957, 2505, 3195, 3147, 2502, 3334, 352, 352, 9624, 10740, 7609, 7151, 7609, 8487, 14624, 16737, 17089, 20859, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 11, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (392, 290, 65.67557525634766, -167.9569091796875, 3545, -10085, 8937, -16612, 5, 34163, 0, 0, 212, 550, 530, 320, 185, 65528, 43, 1528, 2423, 3136, 2755, 3056, 6147, 12345, 6459, 17225, 7673, 8283, 885, 529, 683, 2965, 2497, 3195, 3147, 2479, 3334, 344, 344, 9600, 10774, 7608, 7179, 7628, 8508, 14659, 16737, 17113, 20818, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 12, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (392, 291, 65.67306518554688, -167.9886474609375, 3535, -10088, 8936, -16615, 5, 34129, 0, 0, 214, 574, 533, 322, 199, 65528, 49, 1527, 2423, 3160, 2786, 3088, 6297, 12668, 6623, 17664, 7910, 8630, 924, 545, 699, 2972, 2505, 3187, 3139, 2525, 3334, 351, 351, 9647, 10757, 7601, 7169, 7622, 8494, 14633, 16709, 17113, 21018, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 7, 4, 5, 4, 7, 0, 0, 11, 12, 15, 5, 11, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (392, 292, 65.67056274414062, -168.02029418945312, 3526, -10091, 8936, -16618, 5, 34095, 2, 0, 225, 627, 539, 327, 220, 65528, 56, 1527, 2431, 3176, 2833, 3150, 6604, 13291, 6951, 18566, 8416, 9411, 979, 568, 738, 2965, 2497, 3187, 3139, 2479, 3292, 350, 350, 9624, 10723, 7585, 7197, 7615, 8487, 14606, 16571, 17037, 20836, 13, 4, 6, 15, 15, 15, 15, 0, 10, 5, 7, 7, 4, 5, 4, 7, 0, 0, 10, 11, 15, 5, 12, 2, 3, 6, 3, 15, 15, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (393, 286, 65.6962890625, -167.834228515625, 3583, -10089, 8940, -16601, 5, 34303, 1, 0, 203, 586, 515, 301, 208, 62, 39, 1504, 2362, 3033, 2647, 2933, 6076, 12190, 6396, 17048, 7825, 8887, 931, 518, 673, 2985, 2506, 3242, 3177, 2499, 3353, 341, 341, 10146, 11031, 7764, 7255, 7766, 8653, 14901, 16912, 17203, 21074, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 14, 15, 5, 9, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (393, 287, 65.69377899169922, -167.8662567138672, 3573, -10092, 8939, -16604, 5, 34268, 0, 0, 205, 592, 521, 303, 211, 61, 38, 1503, 2387, 3057, 2671, 2956, 6139, 12323, 6467, 17236, 7873, 8958, 931, 525, 681, 3000, 2499, 3235, 3185, 2476, 3339, 333, 333, 10101, 11014, 7749, 7264, 7779, 8674, 14910, 16840, 17229, 20975, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 14, 15, 5, 10, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (393, 288, 65.6912612915039, -167.89822387695312, 3564, -10095, 8939, -16607, 5, 34233, 0, 0, 204, 542, 529, 304, 189, 59, 35, 1512, 2403, 3081, 2702, 2987, 6013, 12080, 6318, 16828, 7540, 8173, 892, 518, 666, 2978, 2499, 3227, 3163, 2476, 3353, 350, 350, 9966, 10849, 7679, 7236, 7707, 8585, 14724, 16670, 17097, 21013, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 14, 15, 5, 10, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (393, 289, 65.68875122070312, -167.93011474609375, 3554, -10098, 8938, -16610, 5, 34198, 2, 0, 203, 531, 529, 305, 169, 58, 35, 1521, 2403, 3097, 2726, 3003, 6013, 12064, 6310, 16812, 7460, 7990, 876, 525, 658, 2985, 2499, 3227, 3155, 2499, 3339, 343, 343, 9988, 10799, 7664, 7199, 7680, 8564, 14671, 16726, 17136, 21032, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 14, 15, 5, 10, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (393, 290, 65.68623352050781, -167.9619140625, 3545, -10101, 8938, -16613, 5, 34163, 2, 0, 206, 546, 529, 311, 188, 64, 43, 1520, 2411, 3120, 2741, 3050, 6115, 12300, 6436, 17158, 7643, 8228, 900, 533, 673, 2985, 2506, 3227, 3148, 2453, 3339, 342, 342, 10011, 10832, 7679, 7255, 7680, 8585, 14724, 16698, 17162, 20953, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 9, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (393, 291, 65.6837158203125, -167.99365234375, 3535, -10104, 8937, -16616, 5, 34129, 2, 0, 211, 567, 536, 318, 199, 70, 45, 1520, 2420, 3128, 2773, 3074, 6265, 12590, 6585, 17559, 7850, 8530, 923, 549, 689, 3000, 2499, 3242, 3140, 2499, 3325, 342, 342, 9988, 10782, 7664, 7217, 7674, 8557, 14635, 16655, 17083, 20893, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 7, 7, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 10, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [81, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (393, 292, 65.68119812011719, -168.0253143310547, 3526, -10107, 8937, -16619, 5, 34095, 2, 0, 221, 628, 535, 324, 219, 78, 51, 1529, 2419, 3152, 2804, 3128, 6573, 13234, 6930, 18526, 8430, 9490, 986, 557, 728, 3007, 2491, 3242, 3170, 2453, 3339, 357, 357, 9988, 10832, 7702, 7245, 7713, 8605, 14688, 16711, 17161, 20951, 13, 5, 6, 15, 15, 15, 15, 0, 10, 5, 7, 7, 4, 5, 4, 7, 0, 0, 10, 13, 15, 5, 12, 2, 3, 6, 3, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 6, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (394, 286, 65.70700073242188, -167.83920288085938, 3583, -10105, 8941, -16601, 5, 34303, 0, 0, 200, 581, 516, 295, 202, 55, 28, 1489, 2346, 3018, 2636, 2906, 6020, 12097, 6352, 16932, 7760, 8872, 929, 517, 685, 3003, 2495, 3255, 3202, 2549, 3444, 330, 330, 10008, 11099, 7876, 7303, 7860, 8761, 14994, 16898, 17317, 20986, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 14, 15, 5, 11, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (394, 287, 65.70447540283203, -167.87124633789062, 3573, -10108, 8940, -16604, 5, 34268, 2, 0, 200, 568, 515, 297, 200, 59, 31, 1497, 2354, 3042, 2651, 2929, 6012, 12065, 6336, 16885, 7681, 8659, 921, 525, 677, 3010, 2503, 3277, 3217, 2548, 3444, 330, 330, 10054, 11082, 7883, 7321, 7873, 8774, 14967, 16911, 17264, 20995, 14, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 9, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (394, 288, 65.70195007324219, -167.90321350097656, 3564, -10111, 8940, -16607, 5, 34233, 2, 0, 197, 514, 520, 297, 172, 49, 32, 1506, 2370, 3058, 2667, 2937, 5869, 11783, 6161, 16411, 7286, 7798, 866, 517, 646, 3003, 2503, 3247, 3188, 2525, 3389, 339, 339, 9985, 10951, 7791, 7257, 7801, 8692, 14845, 16843, 17250, 20935, 14, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 14, 15, 5, 9, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (394, 289, 65.69942474365234, -167.9351043701172, 3554, -10114, 8939, -16610, 5, 34198, 2, 0, 197, 513, 527, 301, 166, 51, 31, 1506, 2387, 3090, 2683, 2969, 5909, 11854, 6201, 16514, 7310, 7790, 866, 517, 654, 2980, 2495, 3232, 3180, 2525, 3389, 339, 339, 9962, 10918, 7738, 7239, 7748, 8623, 14792, 16720, 17093, 20954, 14, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 14, 15, 5, 11, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (394, 290, 65.6968994140625, -167.96693420410156, 3545, -10117, 8939, -16613, 5, 34164, 2, 0, 204, 538, 529, 306, 185, 61, 37, 1514, 2395, 3105, 2730, 3008, 6067, 12183, 6376, 16988, 7563, 8146, 889, 525, 670, 3003, 2503, 3255, 3202, 2502, 3389, 339, 339, 9939, 10934, 7791, 7266, 7761, 8664, 14792, 16760, 17171, 21051, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 9, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (394, 291, 65.69436645507812, -167.99867248535156, 3535, -10120, 8938, -16616, 5, 34129, 2, 0, 209, 569, 526, 314, 201, 67, 48, 1514, 2395, 3129, 2761, 3055, 6266, 12591, 6582, 17565, 7879, 8635, 921, 540, 693, 3003, 2488, 3255, 3180, 2502, 3375, 338, 338, 9962, 10835, 7745, 7248, 7748, 8643, 14739, 16637, 17145, 20923, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 8, 7, 4, 5, 4, 7, 0, 0, 11, 13, 15, 5, 14, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [81, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (394, 292, 65.69184112548828, -168.0303497314453, 3526, -10123, 8938, -16619, 6, 34095, 2, 0, 220, 651, 526, 318, 234, 85, 48, 1505, 2386, 3121, 2785, 3094, 6590, 13265, 6963, 18615, 8567, 9875, 1015, 564, 740, 3010, 2503, 3255, 3188, 2456, 3361, 345, 345, 9870, 10868, 7783, 7302, 7761, 8650, 14739, 16678, 17144, 20990, 13, 4, 6, 15, 15, 15, 15, 0, 10, 5, 8, 7, 4, 5, 4, 7, 0, 0, 10, 12, 15, 5, 9, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 15, 28, 28, 6, 6, 6, 3, 6, 0, 12383, 49, 43, 250, 248, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (395, 287, 65.71517181396484, -167.87623596191406, 3573, -10124, 8941, -16605, 5, 34268, 0, 0, 198, 542, 513, 300, 183, 60, 29, 1496, 2343, 3027, 2635, 2904, 5886, 11824, 6182, 16478, 7447, 8243, 888, 511, 654, 3031, 2498, 3290, 3230, 2519, 3464, 340, 340, 10093, 11046, 7932, 7323, 7919, 8816, 14994, 16941, 17302, 21015, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 10, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 3, 0, 0, 0, 255, 255, [23, -97, -114, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (395, 288, 65.71263885498047, -167.908203125, 3564, -10127, 8941, -16608, 5, 34233, 2, 0, 194, 500, 516, 295, 158, 47, 25, 1496, 2360, 3035, 2643, 2912, 5775, 11590, 6048, 16115, 7146, 7586, 841, 511, 638, 3001, 2498, 3260, 3186, 2519, 3409, 342, 342, 9957, 10878, 7800, 7279, 7815, 8713, 14916, 16818, 17263, 20857, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 13, 13, 15, 5, 10, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (395, 289, 65.71009826660156, -167.94012451171875, 3554, -10130, 8940, -16611, 6, 34198, 1, 0, 198, 505, 516, 299, 159, 57, 29, 1505, 2368, 3051, 2659, 2936, 5847, 11746, 6127, 16305, 7226, 7673, 849, 511, 646, 2994, 2490, 3229, 3186, 2519, 3422, 349, 349, 9957, 10861, 7762, 7252, 7743, 8645, 14838, 16831, 17237, 20876, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 13, 13, 15, 5, 12, 3, 3, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (395, 290, 65.70755767822266, -167.97195434570312, 3545, -10133, 8940, -16614, 6, 34164, 2, 0, 202, 530, 516, 305, 170, 59, 33, 1487, 2359, 3051, 2690, 2967, 5974, 12003, 6285, 16740, 7471, 8037, 888, 534, 661, 3024, 2498, 3275, 3223, 2474, 3422, 356, 356, 9957, 10928, 7893, 7332, 7847, 8740, 14855, 16776, 17249, 21150, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 12, 15, 5, 10, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 12421, 49, 43, 250, 248, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (395, 291, 65.70501708984375, -168.0037078857422, 3535, -10136, 8939, -16617, 6, 34129, 1, 0, 211, 587, 521, 310, 202, 76, 50, 1495, 2367, 3067, 2738, 3014, 6259, 12580, 6585, 17570, 7971, 8892, 935, 550, 701, 3061, 2505, 3297, 3223, 2541, 3409, 340, 340, 9979, 10894, 7916, 7332, 7873, 8761, 14838, 16775, 17160, 20953, 13, 6, 6, 15, 15, 15, 15, 0, 10, 5, 7, 8, 4, 5, 4, 7, 0, 0, 11, 12, 15, 4, 8, 3, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 3, 6, 0, 0, 2, 0, 0, 0, 255, 255, [-47, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (396, 288, 65.72332000732422, -167.9132080078125, 3564, -10143, 8942, -16608, 5, 34233, 2, 0, 193, 492, 506, 293, 149, 65528, 23, 1499, 2334, 3021, 2614, 2881, 5710, 11442, 5980, 15879, 7044, 7474, 823, 504, 637, 3017, 2504, 3268, 3212, 2535, 3451, 347, 347, 10063, 11038, 7836, 7335, 7844, 8742, 14915, 16850, 17220, 20894, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 13, 14, 15, 5, 12, 2, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (396, 289, 65.72077178955078, -167.94512939453125, 3554, -10146, 8941, -16611, 6, 34198, 2, 0, 195, 501, 510, 300, 157, 65528, 28, 1481, 2334, 3013, 2621, 2904, 5781, 11557, 6051, 16093, 7147, 7624, 847, 504, 637, 3017, 2504, 3253, 3205, 2490, 3410, 332, 332, 10017, 10938, 7821, 7291, 7805, 8707, 14853, 16850, 17207, 21072, 13, 8, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 13, 14, 15, 5, 12, 2, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 12431, 49, 44, 245, 245, 71, 131, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0]), (396, 290, 65.71821594238281, -167.9770050048828, 3545, -10149, 8941, -16614, 9, 34164, 1, 0, 200, 526, 511, 301, 170, 65528, 35, 1480, 2350, 3029, 2645, 2928, 5907, 11842, 6208, 16528, 7384, 7988, 870, 527, 661, 3054, 2504, 3291, 3235, 2490, 3424, 354, 354, 10039, 10988, 7958, 7395, 7902, 8811, 14853, 16836, 17231, 20852, 13, 7, 6, 15, 15, 15, 15, 0, 10, 5, 8, 8, 4, 5, 4, 7, 0, 0, 12, 13, 15, 5, 12, 2, 2, 6, 3, 15, 15, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 28, 6, 6, 6, 0, 6, 0, 0, 2, 0, 0, 0, 255, 255, [83, -97, 14, -111, 0, 0], [13, -128, -114, 4, 0, 0, 11, 3, 20, 1], [0, 0, 0, 0, 0])], dtype=[('mxd03_granule_row', ' size_save # check chunksizes f.close() f = Dataset(self.files[6]) checkarray2 = _quantize(array2,lsd) assert_almost_equal(checkarray2,f.variables['data2'][:]) assert f.variables['data2'].filters() ==\ {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':6,'fletcher32':True} assert f.variables['data2'].chunking() == [chunk1,chunk2] f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_compression_blosc.py000066400000000000000000000103151512661643000232130ustar00rootroot00000000000000from typing import TYPE_CHECKING, Any, Literal from numpy.random.mtrand import uniform from netCDF4 import Dataset from numpy.testing import assert_almost_equal import os, tempfile, unittest, sys, pytest from filter_availability import no_plugins, has_blosc_filter if TYPE_CHECKING: from netCDF4 import CompressionLevel else: CompressionLevel = Any ndim = 100000 iblosc_shuffle=2 iblosc_complevel=4 filename = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name datarr = uniform(size=(ndim,)) def write_netcdf(filename, dtype='f8', blosc_shuffle: Literal[0, 1, 2] = 1, complevel: CompressionLevel = 6): nc = Dataset(filename,'w') nc.createDimension('n', ndim) foo = nc.createVariable('data',\ dtype,('n'),compression=None) foo_lz = nc.createVariable('data_lz',\ dtype,('n'),compression='blosc_lz',blosc_shuffle=blosc_shuffle,complevel=complevel) foo_lz4 = nc.createVariable('data_lz4',\ dtype,('n'),compression='blosc_lz4',blosc_shuffle=blosc_shuffle,complevel=complevel) foo_lz4hc = nc.createVariable('data_lz4hc',\ dtype,('n'),compression='blosc_lz4hc',blosc_shuffle=blosc_shuffle,complevel=complevel) foo_zlib = nc.createVariable('data_zlib',\ dtype,('n'),compression='blosc_zlib',blosc_shuffle=blosc_shuffle,complevel=complevel) foo_zstd = nc.createVariable('data_zstd',\ dtype,('n'),compression='blosc_zstd',blosc_shuffle=blosc_shuffle,complevel=complevel) foo_lz[:] = datarr foo_lz4[:] = datarr foo_lz4hc[:] = datarr foo_zlib[:] = datarr foo_zstd[:] = datarr nc.close() @unittest.skipIf(no_plugins or not has_blosc_filter, "blosc filter not available") # allow failures for this test for now (it fails in Windows wheel workflow) @pytest.mark.xfail class CompressionTestCase(unittest.TestCase): def setUp(self): self.filename = filename write_netcdf(self.filename,complevel=iblosc_complevel,blosc_shuffle=iblosc_shuffle) # type: ignore def tearDown(self): # Remove the temporary files os.remove(self.filename) def runTest(self): f = Dataset(self.filename) assert_almost_equal(datarr,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False} assert_almost_equal(datarr,f.variables['data_lz'][:]) dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': {'compressor': 'blosc_lz', 'shuffle': iblosc_shuffle}, 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} assert f.variables['data_lz'].filters() == dtest assert_almost_equal(datarr,f.variables['data_lz4'][:]) dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': {'compressor': 'blosc_lz4', 'shuffle': iblosc_shuffle}, 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} assert f.variables['data_lz4'].filters() == dtest assert_almost_equal(datarr,f.variables['data_lz4hc'][:]) dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': {'compressor': 'blosc_lz4hc', 'shuffle': iblosc_shuffle}, 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} assert f.variables['data_lz4hc'].filters() == dtest assert_almost_equal(datarr,f.variables['data_zlib'][:]) dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': {'compressor': 'blosc_zlib', 'shuffle': iblosc_shuffle}, 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} assert f.variables['data_zlib'].filters() == dtest assert_almost_equal(datarr,f.variables['data_zstd'][:]) dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc': {'compressor': 'blosc_zstd', 'shuffle': iblosc_shuffle}, 'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False} assert f.variables['data_zstd'].filters() == dtest f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_compression_bzip2.py000066400000000000000000000043161512661643000231430ustar00rootroot00000000000000from typing import TYPE_CHECKING, Any from numpy.random.mtrand import uniform from netCDF4 import Dataset from numpy.testing import assert_almost_equal import os, tempfile, unittest, sys from filter_availability import no_plugins, has_bzip2_filter if TYPE_CHECKING: from netCDF4 import CompressionLevel else: CompressionLevel = Any ndim = 100000 filename1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name filename2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name array = uniform(size=(ndim,)) def write_netcdf(filename,dtype='f8',complevel: CompressionLevel = 6): nc = Dataset(filename,'w') nc.createDimension('n', ndim) foo = nc.createVariable('data',\ dtype,('n'),compression='bzip2',complevel=complevel) foo[:] = array nc.close() @unittest.skipIf(no_plugins or not has_bzip2_filter, "bzip2 filter not available") class CompressionTestCase(unittest.TestCase): def setUp(self): self.filename1 = filename1 self.filename2 = filename2 write_netcdf(self.filename1,complevel=0) # no compression write_netcdf(self.filename2,complevel=4) # with compression def tearDown(self): # Remove the temporary files os.remove(self.filename1) os.remove(self.filename2) def runTest(self): uncompressed_size = os.stat(self.filename1).st_size # check uncompressed data f = Dataset(self.filename1) size = os.stat(self.filename1).st_size assert_almost_equal(array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False} assert_almost_equal(size,uncompressed_size) f.close() # check compressed data. f = Dataset(self.filename2) size = os.stat(self.filename2).st_size assert_almost_equal(array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':False,'szip':False,'zstd':False,'bzip2':True,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False} assert size < 0.96*uncompressed_size f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_compression_quant.py000066400000000000000000000124271512661643000232470ustar00rootroot00000000000000from typing import TYPE_CHECKING, Any from numpy.random.mtrand import uniform from netCDF4 import Dataset, __has_quantization_support__ from numpy.testing import assert_almost_equal import numpy as np import os, tempfile, unittest if TYPE_CHECKING: from netCDF4 import CompressionLevel, QuantizeMode else: CompressionLevel = Any QuantizeMode = Any ndim = 100000 nfiles = 7 files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)] data_array = uniform(size=(ndim,)) nsd = 3 nsb = 10 # for BitRound, use significant bits (~3.32 sig digits) complevel = 6 def write_netcdf(filename,zlib,significant_digits,data,dtype='f8',shuffle=False,\ complevel: CompressionLevel = 6, quantize_mode: QuantizeMode = "BitGroom"): file = Dataset(filename,'w') file.createDimension('n', ndim) foo = file.createVariable('data',\ dtype,('n'),zlib=zlib,significant_digits=significant_digits,\ shuffle=shuffle,complevel=complevel,quantize_mode=quantize_mode) foo[:] = data file.close() file = Dataset(filename) data = file.variables['data'][:] file.close() @unittest.skipIf(not __has_quantization_support__, "missing quantisation support") class CompressionTestCase(unittest.TestCase): def setUp(self): self.files = files # no compression write_netcdf(self.files[0],False,None,data_array) # compressed, lossless, no shuffle. write_netcdf(self.files[1],True,None,data_array) # compressed, lossless, with shuffle. write_netcdf(self.files[2],True,None,data_array,shuffle=True) # compressed, lossy, no shuffle. write_netcdf(self.files[3],True,nsd,data_array) # compressed, lossy, with shuffle. write_netcdf(self.files[4],True,nsd,data_array,shuffle=True) # compressed, lossy, with shuffle, and alternate quantization. write_netcdf(self.files[5],True,nsd,data_array,quantize_mode='GranularBitRound',shuffle=True) # compressed, lossy, with shuffle, and alternate quantization. write_netcdf(self.files[6],True,nsb,data_array,quantize_mode='BitRound',shuffle=True) def tearDown(self): # Remove the temporary files for file in self.files: os.remove(file) def runTest(self): """testing zlib and shuffle compression filters""" uncompressed_size = os.stat(self.files[0]).st_size #print('uncompressed size = ',uncompressed_size) # check compressed data. f = Dataset(self.files[1]) size = os.stat(self.files[1]).st_size #print('compressed lossless no shuffle = ',size) assert_almost_equal(data_array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':complevel,'fletcher32':False} assert size < 0.95*uncompressed_size f.close() # check compression with shuffle f = Dataset(self.files[2]) size = os.stat(self.files[2]).st_size #print('compressed lossless with shuffle ',size) assert_almost_equal(data_array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':True,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':True,'complevel':complevel,'fletcher32':False} assert size < 0.85*uncompressed_size f.close() # check lossy compression without shuffle f = Dataset(self.files[3]) size = os.stat(self.files[3]).st_size errmax = (np.abs(data_array-f.variables['data'][:])).max() #print('compressed lossy no shuffle = ',size,' max err = ',errmax) assert f.variables['data'].quantization() == (nsd,'BitGroom') assert errmax < 1.e-3 assert size < 0.35*uncompressed_size f.close() # check lossy compression with shuffle f = Dataset(self.files[4]) size = os.stat(self.files[4]).st_size errmax = (np.abs(data_array-f.variables['data'][:])).max() print('compressed lossy with shuffle and standard quantization = ',size,' max err = ',errmax) assert f.variables['data'].quantization() == (nsd,'BitGroom') assert errmax < 1.e-3 assert size < 0.24*uncompressed_size f.close() # check lossy compression with shuffle and alternate quantization f = Dataset(self.files[5]) size = os.stat(self.files[5]).st_size errmax = (np.abs(data_array-f.variables['data'][:])).max() print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax) assert f.variables['data'].quantization() == (nsd,'GranularBitRound') assert errmax < 1.e-3 assert size < 0.24*uncompressed_size f.close() # check lossy compression with shuffle and alternate quantization f = Dataset(self.files[6]) size = os.stat(self.files[6]).st_size errmax = (np.abs(data_array-f.variables['data'][:])).max() print('compressed lossy with shuffle and alternate quantization = ',size,' max err = ',errmax) assert f.variables['data'].quantization() == (nsb,'BitRound') assert errmax < 1.e-3 assert size < 0.24*uncompressed_size f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_compression_szip.py000066400000000000000000000031731512661643000231020ustar00rootroot00000000000000from numpy.random.mtrand import uniform from netCDF4 import Dataset from numpy.testing import assert_almost_equal import os, tempfile, unittest, sys from filter_availability import has_szip_filter ndim = 100000 filename = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name datarr = uniform(size=(ndim,)) def write_netcdf(filename,dtype='f8'): nc = Dataset(filename,'w') nc.createDimension('n', ndim) foo = nc.createVariable('data',\ dtype,('n'),compression=None) foo_szip = nc.createVariable('data_szip',\ dtype,('n'),compression='szip',szip_coding='ec',szip_pixels_per_block=32) foo[:] = datarr foo_szip[:] = datarr nc.close() @unittest.skipIf(not has_szip_filter, "szip filter not available") class CompressionTestCase(unittest.TestCase): def setUp(self): self.filename = filename write_netcdf(self.filename) def tearDown(self): # Remove the temporary files os.remove(self.filename) def runTest(self): f = Dataset(self.filename) assert_almost_equal(datarr,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False} assert_almost_equal(datarr,f.variables['data_szip'][:]) dtest = {'zlib': False, 'szip': {'coding': 'ec', 'pixels_per_block': 32}, 'zstd': False, 'bzip2': False, 'blosc': False, 'shuffle': False, 'complevel': 0, 'fletcher32': False} assert f.variables['data_szip'].filters() == dtest f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_compression_zstd.py000066400000000000000000000043111512661643000230740ustar00rootroot00000000000000from typing import TYPE_CHECKING, Any from numpy.random.mtrand import uniform from netCDF4 import Dataset from numpy.testing import assert_almost_equal import os, tempfile, unittest, sys from filter_availability import no_plugins, has_zstd_filter if TYPE_CHECKING: from netCDF4 import CompressionLevel else: CompressionLevel = Any ndim = 100000 filename1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name filename2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name array = uniform(size=(ndim,)) def write_netcdf(filename,dtype='f8',complevel: CompressionLevel = 6): nc = Dataset(filename,'w') nc.createDimension('n', ndim) foo = nc.createVariable('data',\ dtype,('n'),compression='zstd',complevel=complevel) foo[:] = array nc.close() @unittest.skipIf(no_plugins or not has_zstd_filter, "zstd filter not available") class CompressionTestCase(unittest.TestCase): def setUp(self): self.filename1 = filename1 self.filename2 = filename2 write_netcdf(self.filename1,complevel=0) # no compression write_netcdf(self.filename2,complevel=4) # with compression def tearDown(self): # Remove the temporary files os.remove(self.filename1) os.remove(self.filename2) def runTest(self): uncompressed_size = os.stat(self.filename1).st_size # check uncompressed data f = Dataset(self.filename1) size = os.stat(self.filename1).st_size assert_almost_equal(array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False} assert_almost_equal(size,uncompressed_size) f.close() # check compressed data. f = Dataset(self.filename2) size = os.stat(self.filename2).st_size assert_almost_equal(array,f.variables['data'][:]) assert f.variables['data'].filters() ==\ {'zlib':False,'szip':False,'zstd':True,'bzip2':False,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False} assert size < 0.96*uncompressed_size f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_create_mem.py000066400000000000000000000017541512661643000216000ustar00rootroot00000000000000import unittest import netCDF4 import numpy as np from numpy.testing import assert_array_equal @unittest.skipIf(not netCDF4.__has_nc_create_mem__, "missing `nc_create_mem`") class TestCreateMem(unittest.TestCase): def test_mem_create(self): def check_inmemory(format): # memory is 'advisory size' - not needed for NETCDF4/HDF5 # but is used for NETCDF3. nc = netCDF4.Dataset('test.nc','w',memory=1028,format=format) d = nc.createDimension('x',None) v = nc.createVariable('v',np.int32,'x') data = np.arange(5) v[0:5] = data # retrieve memory buffer b = nc.close() # open a new file using this memory buffer nc2 = netCDF4.Dataset('test2.nc','r',memory=b) assert_array_equal(nc2['v'][:],data) nc2.close() check_inmemory('NETCDF3_CLASSIC') check_inmemory('NETCDF4_CLASSIC') if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_dap.py000066400000000000000000000024451512661643000202410ustar00rootroot00000000000000import unittest import netCDF4 import numpy as np from datetime import datetime, timedelta from numpy.testing import assert_array_almost_equal import os # test accessing data over http with opendap. yesterday = datetime.now() - timedelta(days=1) URL = f'http://nomads.ncep.noaa.gov/dods/gfs_1p00/gfs{yesterday:%Y%m%d}/gfs_1p00_00z' URL_https = 'https://www.neracoos.org/erddap/griddap/WW3_EastCoast_latest' varname = 'hgtsfc' data_min = -40; data_max = 5900 varshape = (181, 360) @unittest.skipIf(os.getenv("NO_NET"), "network tests disabled") class DapTestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def runTest(self): """testing access of data over http using opendap""" ncfile = netCDF4.Dataset(URL) assert varname in ncfile.variables.keys() var = ncfile.variables[varname] data = var[0,...] assert data.shape == varshape assert np.abs(data.min()-data_min) < 10 assert np.abs(data.max()-data_max) < 100 ncfile.close() # test https support (linked curl lib must built with openssl support) ncfile = netCDF4.Dataset(URL_https) assert ncfile['hs'].long_name=='Significant Wave Height' ncfile.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_dims.py000066400000000000000000000136411512661643000204310ustar00rootroot00000000000000import sys import unittest import os import tempfile from numpy.random.mtrand import uniform import netCDF4 FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name LAT_NAME="lat" LAT_LEN = 25 LAT_LENG = 50 LON_NAME="lon" LON_LEN = 50 LON_LENG = 100 LEVEL_NAME="level" LEVEL_LEN = None LEVEL_LENG = None TIME_NAME="time" TIME_LEN = None TIME_LENG = None GROUP_NAME='forecasts' VAR_NAME1='temp1' VAR_NAME2='temp2' VAR_NAME3='temp3' VAR_NAME4='temp4' VAR_NAME5='temp5' VAR_TYPE='f8' class DimensionsTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = netCDF4.Dataset(self.file, 'w') lat_dim=f.createDimension(LAT_NAME,LAT_LEN) lon_dim=f.createDimension(LON_NAME,LON_LEN) lev_dim=f.createDimension(LEVEL_NAME,LEVEL_LEN) time_dim=f.createDimension(TIME_NAME,TIME_LEN) # specify dimensions with names fv1 = f.createVariable(VAR_NAME1,VAR_TYPE,(LEVEL_NAME, LAT_NAME, LON_NAME, TIME_NAME)) # specify dimensions with instances fv2 = f.createVariable(VAR_NAME2,VAR_TYPE,(lev_dim,lat_dim,lon_dim,time_dim)) # specify dimensions using a mix of names and instances fv3 = f.createVariable(VAR_NAME3,VAR_TYPE,(lev_dim, LAT_NAME, lon_dim, TIME_NAME)) # single dim instance for name (not in a tuple) fv4 = f.createVariable(VAR_NAME4,VAR_TYPE,time_dim) fv5 = f.createVariable(VAR_NAME5,VAR_TYPE,TIME_NAME) g = f.createGroup(GROUP_NAME) g.createDimension(LAT_NAME,LAT_LENG) g.createDimension(LON_NAME,LON_LENG) # should get dimensions from parent group. # (did not work prior to alpha 18) #g.createDimension(LEVEL_NAME,LEVEL_LENG) #g.createDimension(TIME_NAME,TIME_LENG) gv = g.createVariable(VAR_NAME1,VAR_TYPE,(LEVEL_NAME, LAT_NAME, LON_NAME, TIME_NAME)) f.close() def tearDown(self): # Remove the temporary file os.remove(self.file) def runTest(self): """testing dimensions""" # check dimensions in root group. f = netCDF4.Dataset(self.file, 'r+') v1 = f.variables[VAR_NAME1] v2 = f.variables[VAR_NAME2] v3 = f.variables[VAR_NAME3] v4 = f.variables[VAR_NAME4] v5 = f.variables[VAR_NAME5] isunlim = [dim.isunlimited() for dim in f.dimensions.values()] dimlens = [len(dim) for dim in f.dimensions.values()] names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME] lens_check = [LAT_LEN, LON_LEN, LEVEL_LEN, TIME_LEN] isunlim = [dimlen == None for dimlen in lens_check] for n,dimlen in enumerate(lens_check): if dimlen is None: lens_check[n] = 0 lensdict = dict(zip(names_check,lens_check)) unlimdict = dict(zip(names_check,isunlim)) # check that dimension names are correct. for name in f.dimensions.keys(): self.assertTrue(name in names_check) for name in v1.dimensions: self.assertTrue(name in names_check) for name in v2.dimensions: self.assertTrue(name in names_check) for name in v3.dimensions: self.assertTrue(name in names_check) self.assertTrue(v4.dimensions[0] == TIME_NAME) self.assertTrue(v5.dimensions[0] == TIME_NAME) # check that dimension lengths are correct. # check that dimension lengths are correct. for name,dim in f.dimensions.items(): self.assertTrue(len(dim) == lensdict[name]) # check that isunlimited() method works. for name,dim in f.dimensions.items(): self.assertTrue(dim.isunlimited() == unlimdict[name]) # add some data to variable along unlimited dims, # make sure length of dimensions change correctly. nadd1 = 2 nadd2 = 4 v1[0:nadd1,:,:,0:nadd2] = uniform(size=(nadd1,LAT_LEN,LON_LEN,nadd2)) lensdict[LEVEL_NAME]=nadd1 lensdict[TIME_NAME]=nadd2 # check that dimension lengths are correct. for name,dim in f.dimensions.items(): self.assertTrue(len(dim) == lensdict[name]) # check dimensions in subgroup. g = f.groups[GROUP_NAME] vg = g.variables[VAR_NAME1] isunlim = [dim.isunlimited() for dim in g.dimensions.values()] dimlens = [len(dim) for dim in g.dimensions.values()] names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME] lens_check = [LAT_LENG, LON_LENG, LEVEL_LENG, TIME_LENG] isunlim = [dimlen == None for dimlen in lens_check] for n,dimlen in enumerate(lens_check): if dimlen is None: lens_check[n] = 0 lensdict = dict(zip(names_check,lens_check)) unlimdict = dict(zip(names_check,isunlim)) # check that dimension names are correct. for name in g.dimensions.keys(): self.assertTrue(name in names_check) # check that dimension lengths are correct. for name,dim in g.dimensions.items(): self.assertTrue(len(dim) == lensdict[name]) # check get_dims variable method dim_tuple = vg.get_dims() # some dimensions from parent group dim_tup1 = (f.dimensions['level'],g.dimensions['lat'],\ g.dimensions['lon'],f.dimensions['time']) dim_tup2 = vg.get_dims() assert dim_tup1 == dim_tup2 # check that isunlimited() method works. for name,dim in g.dimensions.items(): self.assertTrue(dim.isunlimited() == unlimdict[name]) # add some data to variable along unlimited dims, # make sure length of dimensions change correctly. nadd1 = 8 nadd2 = 4 vg[0:nadd1,:,:,0:nadd2] = uniform(size=(nadd1,LAT_LENG,LON_LENG,nadd2)) lensdict[LEVEL_NAME]=nadd1 lensdict[TIME_NAME]=nadd2 for name,dim in g.dimensions.items(): self.assertTrue(len(dim) == lensdict[name]) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_diskless.py000066400000000000000000000061551512661643000213200ustar00rootroot00000000000000import unittest, os, tempfile import numpy as np from numpy.random.mtrand import uniform from numpy.testing import assert_array_equal, assert_array_almost_equal import netCDF4 # rudimentary test of diskless file capability. # create an n1dim by n2dim by n3dim random array n1dim = 10 n2dim = 73 n3dim = 144 ranarr = 100.*uniform(size=(n1dim,n2dim,n3dim)) ranarr2 = 100.*uniform(size=(n1dim,n2dim,n3dim)) FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=True).name FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name @unittest.skipIf( netCDF4.__netcdf4libversion__ < "4.2.1" or netCDF4.__has_parallel4_support__ or netCDF4.__has_pnetcdf_support__, "no diskless support", ) class DisklessTestCase(unittest.TestCase): def setUp(self): # in memory file, does not exist on disk (closing it # makes data disappear from memory) self.file = FILE_NAME f = netCDF4.Dataset(self.file,'w',diskless=True, persist=False) self.f = f # foo has a single unlimited dimension f.createDimension('n1', n1dim) f.createDimension('n2', n2dim) f.createDimension('n3', n3dim) foo = f.createVariable('data1', ranarr.dtype.str[1:], ('n1','n2','n3')) # write some data to it. foo[0:n1dim-1] = ranarr[:-1,:,:] foo[n1dim-1] = ranarr[-1,:,:] # bar has 2 unlimited dimensions f.createDimension('n4', None) # write some data to it. bar = f.createVariable('data2', ranarr.dtype.str[1:], ('n1','n2','n4')) bar[0:n1dim,:, 0:n3dim] = ranarr2 # in memory file, that is persisted to disk when close method called. self.file2 = FILE_NAME2 f2 = netCDF4.Dataset(self.file2,'w',diskless=True, persist=True) f2.createDimension('n1', n1dim) f2.createDimension('n2', n2dim) f2.createDimension('n3', n3dim) foo = f2.createVariable('data1', ranarr.dtype.str[1:], ('n1','n2','n3')) # write some data to it. foo[0:n1dim-1] = ranarr[:-1,:,:] foo[n1dim-1] = ranarr[-1,:,:] f2.close() def tearDown(self): # Remove the temporary files os.remove(self.file2) self.f.close() def runTest(self): """testing diskless file capability""" foo = self.f.variables['data1'] bar = self.f.variables['data2'] # check shape. self.assertTrue(foo.shape == (n1dim,n2dim,n3dim)) self.assertTrue(bar.shape == (n1dim,n2dim,n3dim)) # check data. assert_array_almost_equal(foo[:], ranarr) assert_array_almost_equal(bar[:], ranarr2) # file does not actually exist on disk assert os.path.isfile(self.file)==False # open persisted file. # first, check that file does actually exist on disk assert os.path.isfile(self.file2)==True f = netCDF4.Dataset(self.file2) foo = f.variables['data1'] # check shape. self.assertTrue(foo.shape == (n1dim,n2dim,n3dim)) # check data. assert_array_almost_equal(foo[:], ranarr) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_endian.py000066400000000000000000000137561512661643000207420ustar00rootroot00000000000000import netCDF4 import numpy as np import unittest, os, tempfile from numpy.testing import assert_array_equal, assert_array_almost_equal data = np.arange(12,dtype='f4').reshape(3,4) FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME3 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name def create_file(file,format,data): import warnings dataset = netCDF4.Dataset(file,'w',format=format) dataset.createDimension('time', None) dataset.createDimension('space', 4) dims = ('time', 'space') little = data.astype('f4') warnings.simplefilter('ignore') # ignore UserWarnings generated below ll = dataset.createVariable('little-little', 'f4', dims) bb = dataset.createVariable('big-big', '>f4', dims) ll[:] = little lb[:] = big bl[:] = little bb[:] = big dataset.close() def check_byteswap(file, data): # byteswapping is done internally to native endian format # when numpy array has non-native byte order. The byteswap was # initially done in place, which caused the numpy array to # be modified in the calling program. Pull request #555 # changed the byteswap to a copy, and this test checks # to make sure the input numpy array is not modified. dataset = netCDF4.Dataset(file,'w') dataset.createDimension('time', None) dataset.createDimension('space', 4) dims = ('time', 'space') bl = dataset.createVariable('big-little', np.float32, dims, endian='big') data2 = data.copy() bl[:] = data dataset.close() f = netCDF4.Dataset(file) bl = f.variables['big-little'][:] # check data. assert_array_almost_equal(data, data2) assert_array_almost_equal(bl, data) f.close() def check_data(file, data): f = netCDF4.Dataset(file) ll = f.variables['little-little'][:] lb = f.variables['little-big'][:] bb = f.variables['big-big'][:] bl = f.variables['big-little'][:] # check data. assert_array_almost_equal(ll, data) assert_array_almost_equal(lb, data) assert_array_almost_equal(bl, data) assert_array_almost_equal(bb, data) f.close() def issue310(file): mval = 999.; fval = -999 nc = netCDF4.Dataset(file, "w") nc.createDimension('obs', 10) if netCDF4.is_native_little: endian='big' elif netCDF4.is_native_big: endian='little' else: raise ValueError('cannot determine native endianness') var_big_endian = nc.createVariable( 'obs_big_endian', '>f8', ('obs', ), endian=endian, fill_value=fval, # type: ignore # mypy is bad at narrowing endian ) # use default _FillValue var_big_endian2 = nc.createVariable( 'obs_big_endian2', '>f8', ('obs', ), endian=endian, # type: ignore # mypy is bad at narrowing endian ) # NOTE: missing_value be written in same byte order # as variable, or masked array won't be masked correctly # when data is read in. var_big_endian.missing_value = mval var_big_endian[0]=np.pi var_big_endian[1]=mval var_big_endian2.missing_value = mval var_big_endian2[0]=np.pi var_big_endian2[1]=mval var_native_endian = nc.createVariable(\ 'obs_native_endian', ' '1.9.0': # fails for old numpy versions assert_equal(d1[m], ()) # Check that no assignment is made d1[m] = 0 assert_equal(d1[:], self.data1) # boolean slices, only single items returned. iby = np.array([True, False, False, False, False, False, False, False,\ False, False]) ibz = np.array([False, True, False, False, False, False, False, False,\ False,False,False]) assert_array_equal(v[:,iby,ibz],self.data[:,0:1,1:2]) # check slicing with unsorted integer sequences # and integer sequences with duplicate elements. v1 = v[:,[1],:]; v2 = v[:,[3],:]; v3 = v[:,[2],:] vcheck = np.concatenate((v1,v2,v3),axis=1) assert_array_equal(vcheck,v[:,[1,3,2],:]) vcheck = np.concatenate((v1,v3,v3),axis=1) assert_array_equal(vcheck,v[:,[1,2,2],:]) # Ellipse assert_array_equal(v[...,::2],self.data[..., ::2]) assert_array_equal(v[...,::-2],self.data[..., ::-2]) assert_array_equal(v[[1,2],...],self.data[[1,2],...]) assert_array_equal(v[0], self.data[0]) # slicing with all False booleans (PR #1197) iby[:] = False data = v[ibx,iby,ibz] assert data.size == 0 f.close() def test_set(self): f = Dataset(self.file, 'a') data = np.arange(xdim*ydim*zdim).reshape((xdim,ydim,zdim)).astype('i4') vu = f.variables['data'] vu[0,:,:] = data[0,:,:] assert_array_equal(vu[0,:,:], data[0,:,:]) vu[1:,:,:] = data[:] assert_array_equal(vu[1:, :, :], data) f.close() def test2unlim(self): """Test with a variable that has two unlimited dimensions.""" f = Dataset(self.file, 'a') f.createDimension('time',None) v = f.createVariable('u2data', 'i2', ('time', 'x', 'y')) xdim = len(f.dimensions['x']) data = np.arange(3*xdim*ydim).reshape((3, xdim, ydim)) v[:] = data assert_equal(v.shape, data.shape) v[3:6, 0:xdim, 0:ydim] = data try: assert_equal(v.shape, (6, xdim, ydim)) except AssertionError: import warnings warnings.warn(""" There seems to be a bug in the netCDF4 or HDF5 library that is installed on your computer. Please upgrade to the latest version to avoid being affected. This only matters if you use more than 1 unlimited dimension.""") raise AssertionError f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_filepath.py000066400000000000000000000022721512661643000212670ustar00rootroot00000000000000import os, sys, shutil import tempfile import unittest import netCDF4 import pathlib @unittest.skipIf(not netCDF4.__has_nc_inq_path__, "missing `nc_inq_path`") class test_filepath(unittest.TestCase): def setUp(self): self.netcdf_file = pathlib.Path(__file__).parent / "netcdf_dummy_file.nc" self.nc = netCDF4.Dataset(self.netcdf_file) def tearDown(self): self.nc.close() def test_filepath(self): assert self.nc.filepath() == str(self.netcdf_file) def test_filepath_with_non_ascii_characters(self): # create nc-file in a filepath using a cp1252 string tmpdir = tempfile.mkdtemp() filepath = os.path.join(tmpdir,b'Pl\xc3\xb6n.nc'.decode('cp1252')) nc = netCDF4.Dataset(filepath,'w',encoding='cp1252') filepatho = nc.filepath(encoding='cp1252') assert filepath == filepatho assert filepath.encode('cp1252') == filepatho.encode('cp1252') nc.close() shutil.rmtree(tmpdir) def test_no_such_file_raises(self): fname = 'not_a_nc_file.nc' with self.assertRaisesRegex(OSError, fname): netCDF4.Dataset(fname, 'r') if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_get_fill_value.py000066400000000000000000000031021512661643000224450ustar00rootroot00000000000000import unittest, os, tempfile import netCDF4 from numpy.testing import assert_array_equal import numpy as np fill_val = np.array(9.9e31) # test Variable.get_fill_value class TestGetFillValue(unittest.TestCase): def setUp(self): self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name f = netCDF4.Dataset(self.testfile, 'w') dim = f.createDimension('x',10) for dt in netCDF4.default_fillvals.keys(): if not dt.startswith('c'): v = f.createVariable(dt+'_var',dt,dim) v = f.createVariable('float_var',np.float64,dim,fill_value=fill_val) # test fill_value='default' option (issue #1374) v2 = f.createVariable('float_var2',np.float64,dim,fill_value='default') f.close() def tearDown(self): os.remove(self.testfile) def runTest(self): f = netCDF4.Dataset(self.testfile, "r") # no _FillValue set, test that default fill value returned for dt in netCDF4.default_fillvals.keys(): if not dt.startswith('c'): fillval = np.array(netCDF4.default_fillvals[dt]) if dt == 'S1': fillval = fillval.astype(dt) v = f[dt+'_var'] assert_array_equal(fillval, v.get_fill_value()) # _FillValue attribute is set. v = f['float_var'] assert_array_equal(fill_val, v.get_fill_value()) v = f['float_var2'] assert_array_equal(np.array(netCDF4.default_fillvals['f8']), v._FillValue) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_get_variables_by_attributes.py000066400000000000000000000033531512661643000252430ustar00rootroot00000000000000import os import unittest import netCDF4 class VariablesByAttributesTests(unittest.TestCase): def setUp(self): netcdf_file = os.path.join(os.path.dirname(__file__), "netcdf_dummy_file.nc") self.nc = netCDF4.Dataset(netcdf_file) def test_find_variables_by_single_attribute(self): vs = self.nc.get_variables_by_attributes(axis='Z') self.assertEqual(len(vs), 1) vs = self.nc.get_variables_by_attributes(units='m/s') self.assertEqual(len(vs), 4) def test_find_variables_by_multiple_attribute(self): vs = self.nc.get_variables_by_attributes(axis='Z', units='m') self.assertEqual(len(vs), 1) def test_find_variables_by_single_lambda(self): vs = self.nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T']) self.assertEqual(len(vs), 1) vs = self.nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None) self.assertEqual(len(vs), 12) def test_find_variables_by_multiple_lambdas(self): vs = self.nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, long_name=lambda v: v is not None and 'Upward (w) velocity' in v) self.assertEqual(len(vs), 1) def test_find_variables_by_attribute_and_lambda(self): vs = self.nc.get_variables_by_attributes(units='m/s', grid_mapping=lambda v: v is not None) self.assertEqual(len(vs), 4) vs = self.nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, long_name='Upward (w) velocity') self.assertEqual(len(vs), 1) if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_gold.nc000066400000000000000000006630331512661643000204000ustar00rootroot00000000000000HDF  f`OHDR p"6UUU JnewFRHP^ J(BTHDd(J(BTHD  d(JȠuFSHDtPx(DSSBTLF~X>bXGUХ ~PA PBR T!YKV X$ J"XDv& X+ !)_l3- X&`32`00:&X;;:En<L6S=`X]@nX6KC X(ͧD&`C|ÜGDXMrzOXEVTShaFRHP a,(WBTHDd(,Ѧ BTHD  d(.R GCOLThis is a data product from the NASA Global-scale Observations of the Limb and Disk (GOLD) mission, an Heliophysics Explorer mission of opportunity launched in December 2017. Responsibility of the mission science falls to the Principal Investigator, Dr. Richard Eastes at University of Colorado. Validation of the L1B data products falls to the instrument lead investigators/scientists. * Dr. Bill McClintock Validation of the L2 data products falls to Computational Physics, Inc. * Dr. Jerry Lumpe Overall validation of the products is overseen by the Project Scientist Dr. Alan Burns. Users of these data should contact and acknowledge the Principal Investigator Dr. Richard Eastes and the party directly responsible for the data product and the NASA Explorers Project Office.NASA Contract > NNG12PQ28C"SPDF ISTP/IACF Modified for NetCDFL1CBAPIDx0F > GOLD Application ID 0x0F: Level 1C Day Disk Science DataDAY_DISKHI_RESS 2019-02-16T09:11:28.000Z 2019-01-01T08:03:53.118Z 2019-01-01T07:52:25.120Z -GOLD L1C spectral radiance image in Rayleighs 3CHA > GOLD L1C spectral radiance image in Rayleighs#Space Physics > Ionospheric Science.GOLD_L1C_CHA_DAY_2019_001_07_52_v01_r01_c01.nc2019-02-16T09:11:28.000ZGOLD SDC > GOLD L1C ProcessorHVersion 1.1.1, Created by GOLD L1C Processor on 2019-02-16T09:11:28.000Zhttp://gold.cs.ucf.eduCHAUV Imaging Spectrograph (Space)2All GOLD information and data can be found at the  GOLD Website+GOLD_L1C_CHA_DAY_2019_001_07_52_v01_r01_c01#GOLD_L1C_CHA_DAY_2019_001_07_52_v017GOLD Channel-A L1C spectral radiance image in Rayleighs,Thermospheric and Ionospheric InvestigationsUniversity of Colorado/LASPRichard Eastes NASA > GOLDPublic Data for Scientific Use VGOLD SDC > heads/release/GOLD-563-sdc-release-10-0-gffa2ac9 2019-02-15 22:37:11 -0500!iGOLD > Global-scale Observations of the Limb and Disk (GOLD) Heliophysics Explorer mission of opportunity"SES > GOLD - 518#CThe GOLD mission of opportunity flies an ultraviolet (UV) imaging spectrograph on a geostationary satellite to measure densities and temperatures in Earth's thermosphere and ionosphere and to understand the global-scale response to forcing in the integrate Sun-Earth system. Visit 'http://gold.cs.ucf.edu' for more details.$8Fixed with integration time at each Scan Mirror Position%2GOLD Level 1C spectral radiance image in Rayleighs&Background Counts'n_ns(n_ew) n_wavelength*Gridded+Background_Counts,F10.1-Background Counts.Background Counts/counts0Data1Corrected_Count2n_ns3n_ew4 n_wavelength5Gridded6Corrected_Count7F10.18Corrected_Count9Corrected_Count:counts;Data<Corrected_Count_Random_Unc=n_ns>n_ew? n_wavelength@GriddedACorrected_Count_Random_UncBF10.1CCorrected_Count_Random_UncDCorrected_Count_Random_UncEcountsFDataG Raw_CountBTLF _uwXDXXXLX`BF< B B XB B :::PBBBJ`XL`dTLbLDPLJ X X X R T! X" X#%PBTINB *bWX7< "mBTLFXB huw@soX8ss}P2(8J X>VϾ X} X,; VX-AB O L%fIg\n L' uvL%A D)HX4K X ,ݏ~X<UX.P@3>nX3R6X1B! X#bBX/œdBTIN V X$$ %֨ BTLF L% X&n L' X( D)V P* X+ X,VX-P.X/VX0X1P2nX3X4P5nX6X7X8vX9X:&X;~X<h=~X>X?.P@~PAXB&`CXDXE6PFXGPHfIBTLF:`WvX9 +^YX?O\6PF(^PH0_B 9ugLXÀrlBpP"IqV P*fbz X"szh=*դ~dTڒF< :DٌAP.=XBmGX/hLܢ:X: VX0kT` /`bLVJHP5 BdBX۫ڳJIWOHDR ! ^ 0CLASSDIMENSION_SCALE `NAME@This is a netCDF dimension but not a netCDF variable. 18ۮOHDR ! "^ 0CLASSDIMENSION_SCALE `NAME@This is a netCDF dimension but not a netCDF variable. 1w&OHDR   !  v n 0CLASSDIMENSION_SCALE `NAME@This is a netCDF dimension but not a netCDF variable. 800 aOHDR ! f 0CLASSDIMENSION_SCALE `NAME@This is a netCDF dimension but not a netCDF variable. 246i0OHDR 8     || deflate nFSHDPx(!%%5_XBTLFbP X8*L 2:jP "?FXGXlrqLp X&Xu1X fZX6'bXtjP pcbL ldLc$P SVXtxnBTLF LbPX XbXXXjPXX jP L bL P XVXLpOHDR 8     || deflateN*t45,5~IFRHP R5(LtBTHDd(56BTHD  d(7MFSHDPx(%%qBTLFbP X8*L 2:jP "?FXGXlrqLp X&Xu1X fZX6'bXtjP pcbL ldLc$P SVXtxnBTLF LbPX XbXXXjPXX jP L bL P XVXLpOHDR 8     || deflate6e==>E#FRHP C>( BTHDd(>mzBTHD  d(@ FSHDPx(%%nBTLFbP X8*L 2:jP "?FXGXlrqLp X&Xu1X fZX6'bXtjP pcbL ldLc$P SVXtxnBTLF LbPX XbXXXjPXX jP L bL P XVXLpOHDR 8   || deflateBxV W0W   GCOLCorrected_Count_Systematic_Uncn_nsn_ew n_wavelengthGriddedCorrected_Count_Systematic_UncF10.1Corrected_Count_Systematic_Unc Corrected_Count_Systematic_Unc counts Data YThe planar angle between the pixel ray from center and the normal to the reference point. n_ewn_nsGridded?Planar angle between ray and the normal to the reference point.F8.5=Planar Angle between Ray and Reference Point Normal (Degrees)YThe planar angle between the pixel ray from center and the normal to the reference point.Degrees Support_Data)East-West grid location (center of pixel)n_ewGriddedGrid_EWF10.6EW AngleEast-West AngleDegrees Support_Data(Latitude grid location (center of pixel) n_ns!n_ew"Gridded#Grid_LAT$F10.6%Latitude&Gridded Latitude'Degrees( Support_Data))Longitude grid Location (center of pixel)*n_ns+n_ew,Gridded-Grid_LON.F10.6/ Longitude0Gridded Longitude1Degrees2 Support_Data3+North-South grid Location (center of pixel)4n_ns5Gridded6Grid_NS7F10.68NS Angle9North-South Angle:Degrees; Support_Data<L1B pixels per L1C pixel=n_ns>n_ew?Gridded@L1b_Pixels_Per_GridAI10BL1B pixels per L1C pixelC<The number of L1B pixels that fall into each L1C super-pixelD L1B pixelsE Support_DataFL1B time bins per L1C pixelGn_nsHn_ewIGriddedJL1b_Time_Bins_Per_GridKI10LL1B time bins per L1C pixelMRThe number of L1B time bins (mirror positions) that fall into each L1C super-pixelN time binsO Support_DataP Quality_FlagQn_nsRn_ewSGriddedT Quality_FlagUI10V Quality_FlagW Quality_FlagXsamplesY Support_DataZRadiance[n_ns\n_ew] n_wavelength^Gridded_Radiance`F10.1aRadiancebRadiancec Rayleighs/nmdDataeRadiance Random Uncertaintyfn_nsgn_ewh n_wavelengthiGriddedjRadiance_Random_UnckF10.1lRadiance_Random_UncmRadiance_Random_Uncn Rayleighs/nmoDatapRadiance Systematic Uncertaintyqn_nsrn_ews n_wavelengthtGriddeduRadiance_Systematic_UncvF10.1wRadiance_Systematic_UncxRadiance_Systematic_Uncy Rayleighs/nmzDataOHDR (   || deflateObcjaFSHDPx(@%%ABTLFbP X8*L 2:jP "?FXGXlrqLp X&Xu1X fZX6'bXtjP pcbL ldLc$P SVXtxnBTLF LbPX XbXXXjPXX jP L bL P XVXLpBTLFt. m "- !T)_ $#'Ep'\& (O.m=('pR?)MDKMwZ*!Jm[10۴v|-O&<9%ĥ6&$ܨ*"UBY&b$>@ZCBweQv,_ <BTLF,Cb$$"-1*! K e   &)>Y&*'"(6&\&%!:uOHDR {   || deflateh/bUbFY$BTHDd(ih+BTHD  d(k; FRHP bc(u:&BTHDd(cVbqBTHD  d(ebAFSHDDPx(e%%MBTLFbP X8*bL 2:P "?FbXGfX lrqLp X&bXu1XfZX6'P b`cL ldLc$P SX鰉_BTLF LbPX XbXXPbXXP bL L P fX XLb`y{m Zg3BTHDd(~sEFRHP M(0v9BTHD  d(1"zFSHDPx(%%٥BTLFbP X8*L 2:jP "?FXGXlrqLp X&Xu1X fZX6'bXtjP pcbL ldLc$P SVXtxnBTLF LbPX XbXXXjPXX jP L bL P XVXLpOHDR 8   || deflate~NK  QBTHDd(dFRHP 3(YJBTHD  d(k.gFSHDPx(%%XBTLFbP X8*L 2:jP "?FXGXlrqLp X&Xu1X fZX6'bXtjP pcbL ldLc$P SVXtxnBTLF LbPX XbXXXjPXX jP L bL P XVXLpGCOLn_nsn_ew n_wavelengthGridded Raw_CountF10.1 Raw_Count Raw_Count counts Data Raw Count Random Uncertainty n_ns n_ew n_wavelengthGriddedRaw_Count_Random_UncF10.1Raw_Count_Random_UncRaw_Count_Random_UnccountsDataLThe planar angle between the pixel ray from center and the spacecraft nadir.n_ewn_nsGridded.Planar angle between ray and spacecraft nadir.F8.57Planar Angle between Ray and Spacecraft Nadir (Degrees)HThe planar angle between the pixel ray from center and Spacecraft Nadir.Degrees Support_Data IThe planar angle between the pixel ray from center and the sun direction.!n_ew"n_ns#Gridded$+Planar angle between ray and sun direction.%F8.5&4Planar Angle between Ray and Sun direction (Degrees)'IThe planar angle between the pixel ray from center and the sun direction.(Degrees) Support_Data*MThis is the reference point Latitude of the ray from pixel center in degrees.+n_ew,n_ns-Gridded.CPixel Center reference point Latitude relative to fixed Earth frame/F8.50/Pixel Center reference point Latitude (Degrees)1OPixel Center reference point Latitude in degrees relative to fixed earth frame.2Degrees3 Support_Data4NThis is the reference point Longitude of the ray from pixel center in degrees.5n_ew6n_ns7Gridded8DPixel Center reference point Longitude relative to fixed Earth frame9F8.5:0Pixel Center reference point Longitude (Degrees);PPixel Center reference point Longitude in degrees relative to fixed earth frame.<Degrees= Support_Data>hThe planar angle between the sun direction to the reference point and the normal to the reference point.?n_ew@n_nsAGriddedBCPlanar angle between the sun and the normal at the reference point.CF8.5DZPlanar Angle the sun direction to the reference point and Reference Point Normal (Degrees)EhThe planar angle between the sun direction to the reference point and the normal to the reference point.FDegreesG Support_DataHBThe tangent height of the pixel center ray from the Earth's crust.In_ewJn_nsKGriddedLAThe tangent height of the pixel center ray from the Earth's crustMF8.5N&Tangent Height from Earth's Crust (km)OBThe tangent height of the pixel center ray from the Earth's crust.PkmQ Support_DataRLTDB seconds from January 1, 2000, 11:58:55.816 UTC at start of L1B time bin.Sn_nsTn_ewUGriddedVTDB TimeWF10.2X TDB SecondsYLTDB seconds from January 1, 2000, 11:58:55.816 UTC at start of L1B time bin.ZIncrease[Linear\2000-01-01T11:58:55.816Z]TDBOHDR 8     || deflate1ŰL|BTHDd(X~wFRHP q(#*.BTHD  d(=FSHDPx(p$%%fBTLFbP X8*L 2:jP "?FXGXlrqLp X&Xu1X fZX6'bXtjP pcbL ldLc$P SVXtxnBTLF LbPX XbXXXjPXX jP L bL P XVXLpOHDR ( || deflate^o  dBTHDd(P \FRHP W(yq5BTHD  d(FSHDPx($%%E ?BTLFbP X8*L 2:jP "?FXGXlrqLp X&Xu1X fZX6'bXtjP pcbL ldLc$P SVXtxnBTLF LbPX XbXXXjPXX jP L bL P XVXLpOHDR ( || deflate6C  LLFRHP (q!PBTHD  d((tFSHDDPx($%%BTLFbP X8*bL 2:P "?FbXGfX lrqLp X&bXu1XfZX6'P b`cL ldLc$P SX鰉_BTLF LbPX XbXXPbXXP bL L P fX XLb`y$?$@(%A$B$C$D(%E$F$G(%H$I$J(%K$L$M(%N$O(%PP&Q$R$S(%T$U(%VP&W$X$Y(%Z$[(%\P&]$^$_(%`$a(%bP&c$d$e(%f$g(%hP&i$j$k(%l$m$n(%o$p$q(%r$s$t(%u$v$w(%x$y$z(%{$|$}(%~$$(%$(%x'$$(%$(%P&pOHDR 8   || deflate-,R  %$FRHP;(=8XBTHDd(fBTHD  d(fw2FSHDgPx(%%%J-BTLFX MbP X8*nT2:P"?FXGnX FjΨFSSEV2IX>FSSEVWFjxFSSEbc>ˎFSSEyib FSSEMq>NFSSEw>%hFSSEbcHFSSE$TsFSSE$TigFSSE)N&PxbFSSEFjPFSSEi>O3eFSSE>XFSSE>oFSSE>B1FSSE>fFSSE9FnwFSSEb;FSSExFjeFHDBz: VALIDMAX  C VALIDMIN   VALID_RANGE  C VAR_NOTES VAR_TYPE  _FILLVALUE  DIMENSION_LISTFHDBI~ BIN_LOCATION  ?CATDESC  DEPEND_0 DEPEND_1 DEPEND_2  DISPLAY_TYPE FIELDNAM FORMAT LABLAXIS  LONG_NAME UNITSFHIB*&$hѨFHDB6U% WAVELENGTH FHDB6_ SCALETYP TIME_BASE TIME_SCALEUNITS VALIDMAX2150-01-01T00:00:00Z VALIDMIN1970-01-01T00:00:00Z VALID_RANGE((1970-01-01T00:00:00Z2150-01-01T00:00:00Z VAR_NOTES8 VAR_TYPE DIMENSION_LISTFHDB6 BIN_LOCATION  ?CATDESC8 DEPEND_0 DEPEND_1 DEPEND_2 DISPLAY_TYPE FIELDNAM FORMAT  LABLAXIS  LONG_NAME8 MONOTON  _FILLVALUE*FHIB6=4=0]bFHDB[ɪ TIME_BASE\ TIME_SCALE]UNITS VALIDMAX ?@4 4!6A VALIDMIN ?@4 4 VALID_RANGE ?@4 4!6A VAR_NOTES VAR_TYPE  _FILLVALUE ?@4 4DIMENSION_LIST|}FHDB[iw BIN_LOCATION  ?CATDESCLR DEPEND_0S DEPEND_1T DISPLAY_TYPEU FIELDNAMVFORMATW LABLAXIS X LONG_NAMELYMONOTONZ SCALETYP[FHIB[s<s8FHDB VALID_RANGE  (kn(knN VAR_NOTES VAR_TYPE Q _FILLVALUE  DIMENSION_LISTyzFHDB\hqA BIN_LOCATION  ?CATDESCBH DEPEND_0I DEPEND_1J DISPLAY_TYPEK FIELDNAMALFORMATM LABLAXIS&N LONG_NAMEBOUNITSP VALIDMAX  (knN VALIDMIN  (knFHIBD@ЦFHDB'! > VALID_RANGE  4C VAR_NOTES VAR_TYPE G _FILLVALUE  DIMENSION_LISTvwFHDB' K BIN_LOCATION  ?CATDESCh> DEPEND_0? DEPEND_1@ DISPLAY_TYPEA FIELDNAMCBFORMATC LABLAXISZD LONG_NAMEhEUNITSF VALIDMAX  4C VALIDMIN  FHIB'LHvFHDB[ VALID_RANGE  44C VAR_NOTES VAR_TYPE = _FILLVALUE  DIMENSION_LISTstFHDBN BIN_LOCATION  ?CATDESCN4 DEPEND_05 DEPEND_16 DISPLAY_TYPE7 FIELDNAMD8FORMAT9 LABLAXIS0: LONG_NAMEP;UNITS< VALIDMAX  4C VALIDMIN  4FHIBUQY=FHDBz VALID_RANGE  C VAR_NOTES VAR_TYPE 3 _FILLVALUE  DIMENSION_LISTpqFHDBR' BIN_LOCATION  ?CATDESCM* DEPEND_0+ DEPEND_1, DISPLAY_TYPE- FIELDNAMC.FORMAT/ LABLAXIS/0 LONG_NAMEO1UNITS2 VALIDMAX  C VALIDMIN  FHIBK]KYFHDBcJz VALID_RANGE  C VAR_NOTES VAR_TYPE ) _FILLVALUE  DIMENSION_LISTmnFHDBu  BIN_LOCATION  ?CATDESCI  DEPEND_0! DEPEND_1" DISPLAY_TYPE# FIELDNAM+$FORMAT% LABLAXIS4& LONG_NAMEI'UNITS( VALIDMAX  C VALIDMIN  FHIBeaA,FHDB^T VALID_RANGE  C VAR_NOTES VAR_TYPE  _FILLVALUE  DIMENSION_LISTjkFHDBv  BIN_LOCATION  ?CATDESCL DEPEND_0 DEPEND_1 DISPLAY_TYPE FIELDNAM.FORMAT LABLAXIS7 LONG_NAMEHUNITS VALIDMAX  C VALIDMIN  FHIBmiD@mFHDBu VALIDMAX  (knN VALIDMIN   VALID_RANGE  (knN VAR_NOTES VAR_TYPE _FILLVALUE  DIMENSION_LISTfghFHDBXYn BIN_LOCATION  ?CATDESC  DEPEND_0  DEPEND_1  DEPEND_2  DISPLAY_TYPE FIELDNAMFORMAT LABLAXIS LONG_NAMEUNITSFHIBuqР|OFHDB"h. VALIDMAX  (knN VALIDMIN   VALID_RANGE  (knN VAR_NOTES VAR_TYPE  _FILLVALUE  DIMENSION_LIST`abFHDB{˼ BIN_LOCATION  ?CATDESC G DEPEND_0 DEPEND_1 DEPEND_2  DISPLAY_TYPE FIELDNAM FORMAT LABLAXIS  LONG_NAME UNITS FHIB#~#z;FHDB{4T VALIDMAX  (knN VALIDMIN   VALID_RANGE  (knN VAR_NOTES VAR_TYPE*Dz _FILLVALUE  DIMENSION_LISTZ[\FHDB{U% BIN_LOCATION  ?CATDESC*Dp DEPEND_0*Dq DEPEND_1*Dr DEPEND_2 *Ds DISPLAY_TYPE*Dt FIELDNAM*DuFORMAT*Dv LABLAXIS*Dw LONG_NAME*DxUNITS *DyFHIB{YYQkFHDBm VALIDMAX  (knN VALIDMIN   VALID_RANGE  (knN VAR_NOTES VAR_TYPE*Do _FILLVALUE  DIMENSION_LISTTUVFHDBP̡x BIN_LOCATION  ?CATDESC*De DEPEND_0*Df DEPEND_1*Dg DEPEND_2 *Dh DISPLAY_TYPE*Di FIELDNAM*DjFORMAT*Dk LABLAXIS*Dl LONG_NAME*DmUNITS *DnFHIBFHDBWMAzl VALIDMAX  (knN VALIDMIN   VALID_RANGE  (knN VAR_NOTES VAR_TYPE*Dd _FILLVALUE  DIMENSION_LISTNOPFHDBWf BIN_LOCATION  ?CATDESC*DZ DEPEND_0*D[ DEPEND_1*D\ DEPEND_2 *D] DISPLAY_TYPE*D^ FIELDNAM*D_FORMAT*D` LABLAXIS*Da LONG_NAME*DbUNITS *DcFHIBWŖŒfiFHDBK:m2 VALID_RANGE  VAR_NOTES VAR_TYPE *DY _FILLVALUE DIMENSION_LISTIJFHDBK.K BIN_LOCATION CATDESC *DP DEPEND_0*DQ DEPEND_1*DR DISPLAY_TYPE*DS FIELDNAM *DTFORMAT*DU LABLAXIS *DV LONG_NAME *DWUNITS*DX VALIDMAX  VALIDMIN FHIBKFHDB ` VALID_RANGE VAR_NOTES VAR_TYPE *DO _FILLVALUEDIMENSION_LISTFGFHDBF BIN_LOCATIONCATDESC*DF DEPEND_0*DG DEPEND_1*DH DISPLAY_TYPE*DI FIELDNAM*DJFORMAT*DK LABLAXIS*DL LONG_NAMER*DMUNITS *DN VALIDMAX VALIDMINFHIB11@ FHDB6UVL1B_TIME_BINS_PER_GRIDփRADIANCERADIANCE_RANDOM_UNC{RADIANCE_SYSTEMATIC_UNCa RAW_COUNTRAW_COUNT_RANDOM_UNCRAY_NADIR_ANGLERAY_SOLAR_PHASE_ANGLEREFERENCE_POINT_LATREFERENCE_POINT_LONSOLAR_ZENITH_ANGLETANGENT_HEIGHTTIME_ETWTIME_UTC+FHDB )q VALID_RANGE VAR_NOTES VAR_TYPE *DE _FILLVALUEDIMENSION_LISTCDFHDB +Oi BIN_LOCATIONCATDESC*D< DEPEND_0*D= DEPEND_1*D> DISPLAY_TYPE*D? FIELDNAM*D@FORMAT*DA LABLAXIS*DB LONG_NAME<*DCUNITS *DD VALIDMAX VALIDMINFHIB gg+,ГFHDB~&=S VAR_NOTES VAR_TYPE *D; _FILLVALUE  DIMENSION_LISTAFHDB~? BIN_LOCATION  ?CATDESC+*D3 DEPEND_0*D4 DISPLAY_TYPE*D5 FIELDNAM*D6FORMAT*D7 LABLAXIS*D8 LONG_NAME*D9UNITS*D: VALIDMAX  0A VALIDMIN  0 VALID_RANGE  00AFHIB~n&FHDBv'C&  VALID_RANGE  44C VAR_NOTES VAR_TYPE *D2 _FILLVALUE  DIMENSION_LIST?@FHDBv>x BIN_LOCATION  ?CATDESC)*D) DEPEND_0*D* DEPEND_1*D+ DISPLAY_TYPE*D, FIELDNAM*D-FORMAT*D. LABLAXIS *D/ LONG_NAME*D0UNITS*D1 VALIDMAX  4C VALIDMIN  4FHIBvӽ>b@FHDBopRp6 VALID_RANGE  B VAR_NOTES VAR_TYPE *D( _FILLVALUE  DIMENSION_LIST<=FHDBopDz BIN_LOCATION  ?CATDESC(*D DEPEND_0*D  DEPEND_1*D! DISPLAY_TYPE*D" FIELDNAM*D#FORMAT*D$ LABLAXIS*D% LONG_NAME*D&UNITS*D' VALIDMAX  B VALIDMIN  FHIBop  QFHDBh VAR_NOTES VAR_TYPE *D _FILLVALUE  DIMENSION_LIST:FHDBhe BIN_LOCATION  ?CATDESC)*D DEPEND_0*D DISPLAY_TYPE*D FIELDNAM*DFORMAT*D LABLAXIS*D LONG_NAME*DUNITS*D VALIDMAX   A VALIDMIN    VALID_RANGE   AFHIBh??U*FHDBb W( VALID_RANGE  4C VAR_NOTES VAR_TYPE *D _FILLVALUE  DIMENSION_LIST89FHDBbxX BIN_LOCATION  ?CATDESCY*D  DEPEND_0*D  DEPEND_1*D DISPLAY_TYPE*D FIELDNAM?*DFORMAT*D LABLAXIS=*D LONG_NAMEY*DUNITS*D VALIDMAX  4C VALIDMIN  FHIBbuuVBFHDB6Uytjn_ns$n_ew(% n_wavelengthP&UTC_String_Lengthx'BACKGROUND_COUNTS(CORRECTED_COUNT0CORRECTED_COUNT_RANDOM_UNC9CORRECTED_COUNT_SYSTEMATIC_UNCBEMISSION_ANGLE*T GRID_EW_ GRID_LATg GRID_LONm GRID_NSu L1B_PIXELS_PER_GRID| QUALITY_FLAG=FHIB6Ug=.FHDBxV;S VALIDMAX  (knN VALIDMIN   VALID_RANGE  (knN VAR_NOTES VAR_TYPE*D  _FILLVALUE  DIMENSION_LIST456FHDBxVa- BIN_LOCATION  ?CATDESC*D DEPEND_0*D DEPEND_1*D DEPEND_2 *D DISPLAY_TYPE*D FIELDNAM*DFORMAT*D LABLAXIS*D LONG_NAME*D UNITS*D FHIBxVFHDBe=]u. VALIDMAX  (knN VALIDMIN   VALID_RANGE  (knN VAR_NOTES VAR_TYPEF _FILLVALUE  DIMENSION_LIST./0FHDBe=Jl BIN_LOCATION  ?CATDESC< DEPEND_0= DEPEND_1> DEPEND_2 ? DISPLAY_TYPE@ FIELDNAMAFORMATB LABLAXISC LONG_NAMEDUNITSEFHIBe=V϶FHDBt4 5>U VALIDMAX  (knN VALIDMIN   VALID_RANGE  (knN VAR_NOTES VAR_TYPE; _FILLVALUE  DIMENSION_LIST()*FHDBt4Hh) BIN_LOCATION  ?CATDESC1 DEPEND_02 DEPEND_13 DEPEND_2 4 DISPLAY_TYPE5 FIELDNAM6FORMAT7 LABLAXIS8 LONG_NAME9UNITS:FHIBt4LLFHDB   VALIDMAX  (knN VALIDMIN   VALID_RANGE  (knN VAR_NOTES VAR_TYPE0 _FILLVALUE  DIMENSION_LIST"#$FHDB c BIN_LOCATION  ?CATDESC& DEPEND_0' DEPEND_1( DEPEND_2 ) DISPLAY_TYPE* FIELDNAM+FORMAT, LABLAXIS- LONG_NAME.UNITS/FHIB 'P~^FHDBMeCTITLE2%NCO`netCDF Operators version 4.7.7 (Homepage = http://nco.sf.net, Code = http://github.com/nco/nco)FHDBLOGICAL_SOURCE_DESCRIPTION7MISSION_GROUP,PI_AFFILIATIONPI_NAMEPROJECT  RULES_OF_USESOFTWARE_VERSIONV  SOURCE_NAMEi!SPACECRAFT_ID"TEXTC#TIME_RESOLUTION8$FHDB_ DISCIPLINE# FILE_DATE GENERATED_BYHISTORYH HTTP_LINK INSTRUMENTINSTRUMENT_TYPE LINK_TEXT2 LINK_TITLE LOGICAL_FILE_ID+LOGICAL_SOURCE#FHDB "SC_NADIR_LAT_MIN_MAX  G.; SC_NADIR_LON  c=SC_NADIR_LON_MIN_MAX  ==SC_YAW  -:SC_YAW_MIN_MAX  a::;DATE_PROCESSED  DATE_END  DATE_START DATE_START_ET ?@4 4y&A DESCRIPTION-  DESCRIPTOR3 FILE.FHDBXESC_REF_ALTITUDE   G SC_REF_LON  >SC_ALT_MIN_MAX   G4 GSC_POS  +C#ǪEBSC_POS_X_MIN_MAX  #"SC_POS_Y_MIN_MAX  ؋E\ESC_POS_Z_MIN_MAX  m8BB SC_Z_DIR  }?N}YSC_Z_DIR_X_MIN_MAX  |?~?SC_Z_DIR_Y_MIN_MAX  ͼuٽSC_Z_DIR_Z_MIN_MAX  }, SC_NADIR_LAT  :FHDB5q- DATA_VERSION DATA_REVISION DATA_CYCLE GC_VER OC_VER MINIMUM_PHD MAXIMUM_PHD SPATIAL_BINNING SPECTRAL_BINNING SLIT_NAME GYM_POSITION  4CMIRROR_HEMISPHEREREFERENCE_ALTITUDE  CSC_ALT   GFHDB\5_NCProperties7version=1|netcdflibversion=4.6.1|hdf5libversion=1.10.4 _NCPROPERTIES77version=1|netcdflibversion=4.6.1|hdf5libversion=1.10.4ACKNOWLEDGEMENT ADID_REF CONVENTIONS" DATA_LEVEL DATA_TYPEBOBSERVATION_TYPE OBS_TYPE OBS_ID  CHANNEL_ID TC_VERFHIB  LTREE::TREE6:;TREE;g<TREE<=TREE >TREE >TREE >TREE >TREE >TREE >TREE >TREE >TREE>DTREEhJ'LTREEMSTREE|GY{YTREE>ZZTREE T[TREE `[TREE l[TREE x[TREE [TREE [TREE[TREE[TREE'[*`xsmsţxQI͹.A X1t9~Gmnwb/%t BaLjg<})q|O9O(lq1 z*UO<6a". YDxc` &Ɵ10)jpwO9v`q'ҋ{Ɂı[lh( b/aчL䴀Vj`̅#аH|n%603Ǖf5vq)XۈʷdJF b#P›fƒ2 [{aI w#[?anrcg,HDWs; YSd6s Ć\q@ HsP=Rtϔ ] 6z҃ `G~Eׇ\ +ơe6z^txU'xKK;xc`8P@xc`8P@x9S 6xSaJ%xca xc```x yXu;l0m0.9he>)!f*&i >ފI(*&)O<01DyDţd0x=*v_c'^?|,d ^ja=&?kan,OB~6}G?Kg I5Nά߰7gj+?Vn`_5 cXie FY +K.D:`?|:m%U >޲}[vw ʬ~޿5ָqXSaa8(2jH3gMnqPݮ!UJH%R@<-~U}4OB]1B8R\O-єuB4&wBk:ZBy=DrM9"jwpE#0Ӥ1TgcĐ60[+!?k%1*  j=`]$@Nj%XR!z~Fw%XR)Ңiob#L·.HnC a)ꚥȹ.E`-*SyH0$G yln=N ,e<B<.xsӡ,Fg:s_a 㼐 GP ޸5z1;]s}P:Qy@|@;|ӻH M~;!aK0ˇJ0ǜq/G4o |-Ǵyi U 8 96GA=:YyAz$Ah{R/M Kp@5>+37Յ!PCK (~I^ť(uIE0u~j zk* G1^8f{D No@:eFٚh}HkEbxd+QUXIC;Ҕ  p@1V Z#*HiDM#4OEXܼTm$gN.FB:x k\DXZ13Ou}E5E)ZuUI..EӲ(ۜdv31(5gN//~KQJCبWR|z7gG9YY!4-Ku$ÒVTzZ^+z ,êyܿ;^>^FzMq'8 yBk|5'߰5uJڠdCI*[VRO;,hҮ}!{N<-ip 9r#:/9c$NqsD N|Ljgq3Wm\(r!^‡\1WڧdۦD^ ^L7qc;MvG1>V -=9SO4t}O)ţ1K~$<*t,`jUjNNgN?17b(Ar-4W~Fw-Pe q]kHz)$y*h]K:⶯c kG>L;gQ|9.U>1DRJa8[lT_0,c0oUF7qq';?7lluB #REbdż(T>mќ~"$ ?P赅;?Hm,rqIؿp+u[y8g: xa,_K"Jf&IcOxh;LJbJb)=AD'z*%&j:SxjM*TVZFKj;ҧNt2 3 \4~An&dus&㳘y~,?,\fQ9d HPSs`'ùy؋qU_  ;̧a<=Ot09YXB]1mK{0SnbUH\$%Q@Oo}qJ#F,t&A &Ɯ6f54 $#?&KX)bZ8oԭJ4y6\oKnYv9Hŵv W:K^`o%Wysy?Dsc $/>pxf^TK4wΤnzjƓ^}}pSoa3M$_굚9f0`h{PTY# ?=qaysXvF 3Zx􄥦G%jS*a~9s_m#Uxc` ,?]LQcȡo1mU^3X\gv@oWu::rKs| .޵N/~ǐ+G?r_YLۍ~F::%ocП .ViIDt5 C,nZng\|XVY5G) BTO0P"Nٮ1Rp2?t|vj ^;rpwɣȕ&)tu_%-JĤ+WqZ^i2yUɨcҀ= *,j[ tP pRߥU(f]xc`7;)4q2g4m4u ?d&,Ve| W-0Àj+ 9O父awpbUcnX͙c-A]Ǫ滿/~bf6=X j~i(^Mwbz")%iDO`λt暞[J}ns`,-]P촷J2XKv8D6+ SRS~^s>YI[40, ;)y"Uy6n! ߋՋ8!Gep5+5Δ79 ؊S. {b.Qj`'m'QωnyWmѻ`5^$9|YͷTsRR31JGĦ)qZ)Ԍ]5${iзEY [4i-km$i9erH@ZI:6QJ_8ZGRCRW)3tx9멻`o=_ٻCϑD=,=5z޽Zjicm Ԣ5r}+*Eԍ<'x?V kNPM! y"R1~~KYĵwZ1bb?vS|Vۆ =% s9SiN}4%26 v?ih֕qIx+Y+rVO`*]c;*|Gɿ gx eTsm%>JoT2ǥNB]E[5^adTqO?Ou@gq,^EYýbmZv-Z9ms+Iu$1d#צBy >8w&᫟I_qX^|etK{=S7Эln8$HQB/xsw"ڱ+52#;-Q[ Dȼu {Nd'fgv$4f^݂ .̾NT^|zMXo]fجV[qj6^c>zQ]aU> J~$<ʃElvr*` 4vɌN-b־."[hv-ט20_1nuf\g:|x }\ vQ]'Ց$W_]8_5!WMմNNv:=u\A}Դ$NS8+;~(v=#ն0zN_PHf n9NEvD/~KRNFighӤ,8ݜ>A'H1A+R-]濣wd¤D2^p%ׁ}cHw].L"ĺNzTz5G>$/jP\khq0@M4XhG:d{ 1q{GCL$8@6dF(K2ݎz{rE.vra*2c1#Ghdº6:ՀrS/07P ̐fHRa|xh.anG>zn>:72 T>F8n%0xeZɘ|ɦVɭ~|n~.k5j LZ0wָgvk<7m}*mH *4#OTLGT#hB ! ll 7'l01OjHأx3 xfu#wkp yxN${А'6΄_LX8n靅ry6x>̆fW(9vnN;cs}o.6/uX;3`<$s14bG{Ta(ǵqpYʺh5 9a[5vH*Z ´/,gq5t1tՋ!Ҥ%h4YX !(n;_Đ!;>ظ.XQ$C"?Ĵx M @yuz##O/i@ҝ϶@|% Y{*m]f+K%{` bvƮG)`o¤ C0m@lNn8": a=;Vm-V:xŇ8N{A•;0 ~.׏x Zl>hܘD/ pه9At?nFS7yTP=L%57կzj,vĽ& 8xxc` 4car#'l갹YYYĘ}!)p=1ۏ/LOo1;oKڀq)lr[9A0a+D4A;I':\yF+Ixc`챋đe&!0g8!d>I͹.A X1t9~Gmnwb/%t BaLjg<})q|O9O(lq1 z*UO<6a". YDx{+1x j[x8ds xSy}8,x;~ iSx;'5x|Y{$x32050 +c+c=CC(<x `[e5oԡMQMACa DX u,Phx(4s4U.AIxkCm0%AIС6S.EAHPԡ@&H|&5A rxWu W.Y\ {(X#eA sAt5nEI JlwT$2!qfC f$nxrNbMA⼒ĮE &qvCe$ę=[V%&d6ydT&9e\ f̵329 sUI _9&syCf̋dl,tcAUXSh\4Ya(paF G/(J PU᜚Ά N[ᬎ\O*ʍ*HQ 1q@Ju}3*/Ϫ'2SPT^ZPygU劚 ,RmUXUAkj\(k<5qTJig4h!9Q,h(iΚƯnKch<0::_:?ԑ":Fur1yoJi}蜔u :.鼫sEU{56tF[:Ξ{: >|K<;hֈA:jpwq7% >23mnjA1oV \5MkL }3&7&&rd2qX$3a?wM>]1~_5Ѧt;m;&o|"`mYa1Xs &-LY)m18m1-g-RmB-M/,~ն0[yLQ|D6ې>ij&>msuG9mm>SAŦWm>մnۻ68<:D|8%Ö7R)iesX3W䰩-}.r#k\N\py(tY?rʹY9u.W].ؒeuMZ.ǭ\uw癰|Jq& '}98s>o)>}s牺Y|V|]%Bܨx! 0bv,D=B)u!Mx Ux `eaz LN"r$ʰaX:Jy56ɒ6VlPy_ M[6;6͍!_^+;%>uٰC_!p&KiwN;|:Ü yyOUqx{ {ϴ=o "R!fTpxX ₉I)>)YrN 6 n- - ޸$xM!uS^2 QЈKWOT}zYvo |/,|: u}XTgmE9`EC807\4P hN(3`\ 8w_R@p#ܼP'03&Gw#9ẑ#p]"Inetcdf4-python-1.7.4rel/test/test_grps.py000066400000000000000000000047271512661643000204550ustar00rootroot00000000000000import sys import unittest import os import tempfile import netCDF4 # test group creation. FILE_NAME1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name DYNASTY = "Tudor" HENRY_VII = "Henry_VII" MARGARET = "Margaret" JAMES_V_OF_SCOTLAND = "James_V_of_Scotland" MARY_I_OF_SCOTLAND = "Mary_I_of_Scotland" JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND = "James_VI_of_Scotland_and_I_of_England" names = [HENRY_VII,MARGARET,JAMES_V_OF_SCOTLAND,MARY_I_OF_SCOTLAND,JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND] root = '/' TREE1 = [root] for n in range(1,len(names)+1): path = [] for name in names[0:n]: path.append(root+name) TREE1.append(''.join(path)) TREE2 = [root,root+DYNASTY] for name in names: TREE2.append(root+DYNASTY+root+name) TREE2.sort() # python generator to walk the Group tree. def walktree(top): yield top.groups.values() for value in top.groups.values(): yield from walktree(value) class GroupsTestCase(unittest.TestCase): def setUp(self): self.file1 = FILE_NAME1 f = netCDF4.Dataset(self.file1, 'w') g1 = f.createGroup(HENRY_VII) g2 = g1.createGroup(MARGARET) g3 = g2.createGroup(JAMES_V_OF_SCOTLAND) g4 = g3.createGroup(MARY_I_OF_SCOTLAND) g5 = g4.createGroup(JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND) f.close() self.file2 = FILE_NAME2 f = netCDF4.Dataset(self.file2, 'w') g1 = netCDF4.Group(f,DYNASTY) g2 = g1.createGroup(HENRY_VII) g3 = g1.createGroup(MARGARET) g4 = g1.createGroup(JAMES_V_OF_SCOTLAND) g5 = g1.createGroup(MARY_I_OF_SCOTLAND) g6 = g1.createGroup(JAMES_VI_OF_SCOTLAND_AND_I_OF_ENGLAND) f.close() def tearDown(self): # Remove the temporary files os.remove(self.file1) os.remove(self.file2) def runTest(self): """testing groups""" f = netCDF4.Dataset(self.file1, 'r') # issue 988 f.name tree = [f.path] for children in walktree(f): for child in children: tree.append(child.path) f.close() assert tree == TREE1 f = netCDF4.Dataset(self.file2, 'r') tree = [f.path] for children in walktree(f): for child in children: tree.append(child.path) tree.sort() f.close() assert tree == TREE2 if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_grps2.py000066400000000000000000000024661512661643000205350ustar00rootroot00000000000000import sys import unittest import os import tempfile import netCDF4 # test implicit group creation by using unix-like paths # in createVariable and createGroups (added in 1.1.8). # also test Dataset.__getitem__, also added in 1.1.8. FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name class Groups2TestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = netCDF4.Dataset(self.file,'w') x = f.createDimension('x',10) # create groups in path if they don't already exist v = f.createVariable('/grouped/data/v',float,('x',)) g = f.groups['grouped'] # create groups underneath 'grouped' v2 = g.createVariable('./data/data2/v2',float,('x',)) f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing implicit group and creation and Dataset.__getitem__""" f = netCDF4.Dataset(self.file, 'r') v1 = f['/grouped/data/v'] v2 = ((f.groups['grouped']).groups['data']).variables['v'] g = f['/grouped/data'] v3 = g['data2/v2'] assert v1 == v2 assert g == f.groups['grouped'].groups['data'] assert v3.name == 'v2' f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_issue908.py000066400000000000000000000006321512661643000210620ustar00rootroot00000000000000import netCDF4, unittest import numpy as np import pathlib class Issue908TestCase(unittest.TestCase): def setUp(self): self.nc = netCDF4.Dataset(pathlib.Path(__file__).parent / "CRM032_test1.nc") def tearDown(self): self.nc.close() def runTest(self): data = self.nc['rgrid'][:] assert data.all() is np.ma.masked if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_masked.py000066400000000000000000000155341512661643000207440ustar00rootroot00000000000000import sys import unittest import os import tempfile import numpy as np from numpy import ma from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.random.mtrand import uniform import netCDF4 from numpy.ma import masked_all import pathlib # test automatic conversion of masked arrays, and # packing/unpacking of short ints. # create an n1dim by n2dim random ranarr. FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME3 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name ndim = 10 ranarr = 100.*uniform(size=(ndim)) ranarr2 = 100.*uniform(size=(ndim)) # used for checking vector missing_values arr3 = np.linspace(0,9,ndim) mask = np.zeros(ndim,np.bool_); mask[-1]=True; mask[-2]=True marr3 = np.ma.array(arr3, mask=mask, dtype=np.int32) packeddata = 10.*uniform(size=(ndim)) missing_value = -9999. missing_value2 = np.nan missing_value3 = [8,9] ranarr[::2] = missing_value ranarr2[::2] = missing_value2 np.seterr(invalid='ignore') # silence warnings from ma.masked_values maskedarr = ma.masked_values(ranarr,missing_value) #maskedarr2 = ma.masked_values(ranarr2,missing_value2) maskedarr2 = ma.masked_invalid(ranarr2) scale_factor = (packeddata.max()-packeddata.min())/(2.*32766.) add_offset = 0.5*(packeddata.max()+packeddata.min()) packeddata2 = np.around((packeddata-add_offset)/scale_factor).astype('i2') class PrimitiveTypesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME self.file2 = FILE_NAME2 self.file3 = FILE_NAME3 file = netCDF4.Dataset(self.file,'w') file.createDimension('n', ndim) foo = file.createVariable('maskeddata', 'f8', ('n',)) foo2 = file.createVariable('maskeddata2', 'f8', ('n',)) foo3 = file.createVariable('maskeddata3', 'i4', ('n',)) foo.missing_value = missing_value foo.set_auto_maskandscale(True) foo2.missing_value = missing_value2 foo2.set_auto_maskandscale(True) foo3.missing_value = missing_value3 foo3.set_auto_maskandscale(True) bar = file.createVariable('packeddata', 'i2', ('n',)) bar.set_auto_maskandscale(True) bar.scale_factor = scale_factor bar.add_offset = add_offset foo[:] = maskedarr foo2[:] = maskedarr2 foo3[:] = arr3 bar[:] = packeddata # added to test fix to issue 46 doh = file.createVariable('packeddata2','i2','n') doh.scale_factor = 0.1 doh.add_offset = 0. doh[0] = 1.1 # added to test fix to issue 381 doh2 = file.createVariable('packeddata3','i2','n') doh2.add_offset = 1. doh2[0] = 1. # added test for issue 515 file.createDimension('x',1) v = file.createVariable('v',np.float64,'x',fill_value=-9999) file.close() # issue #972: when auto_fill off byte arrays (u1,i1) should # not be masked, but other datatypes should. dataset = netCDF4.Dataset(self.file2, "w") dataset.set_fill_off() dim = dataset.createDimension("dim", 10) var1 = dataset.createVariable("var1", "f8", (dim.name,)) var1[:] = masked_all((10,), "f8") var2 = dataset.createVariable("var2", "u1", (dim.name,)) var2[:] = masked_all((10,), "u1") dataset.close() # issue #1152: if missing_value is a string that can't # be cast to the variable type, issue a warning instead # of raising an exception when auto-converted slice to a # masked array with netCDF4.Dataset(pathlib.Path(__file__).parent / "issue1152.nc") as dataset: with self.assertWarns(UserWarning): data = dataset['v'][:] # issue #1271 (mask is ignored when assigning bool array to uint8 var) ds = netCDF4.Dataset(self.file3, "w") dim = ds.createDimension('time', 48) var = ds.createVariable('blaat', np.uint8, ('time',), zlib=True, complevel=4, shuffle=True, fletcher32=True, fill_value=240) mask = np.full((48,), False) for x in range(30): mask[x] = True mama = ma.array(np.full((48,), True), mask=mask) var[:] = mama ds.close() def tearDown(self): # Remove the temporary files os.remove(self.file) os.remove(self.file2) def runTest(self): """testing auto-conversion of masked arrays and packed integers""" file = netCDF4.Dataset(self.file) datamasked = file.variables['maskeddata'] datamasked2 = file.variables['maskeddata2'] datamasked3 = file.variables['maskeddata3'] datapacked = file.variables['packeddata'] datapacked2 = file.variables['packeddata2'] datapacked3 = file.variables['packeddata3'] # check missing_value, scale_factor and add_offset attributes. assert datamasked.missing_value == missing_value assert datapacked.scale_factor == scale_factor assert datapacked.add_offset == add_offset # no auto-conversion. datamasked.set_auto_maskandscale(False) datamasked2.set_auto_maskandscale(False) datapacked.set_auto_maskandscale(False) assert_array_equal(datapacked[:],packeddata2) assert_array_equal(datamasked3[:],marr3) assert_array_almost_equal(datamasked[:],ranarr) assert_array_almost_equal(datamasked2[:],ranarr2) # auto-conversion datamasked.set_auto_maskandscale(True) datamasked2.set_auto_maskandscale(True) datapacked.set_auto_maskandscale(True) datapacked2.set_auto_maskandscale(False) assert_array_almost_equal(datamasked[:].filled(),ranarr) assert_array_almost_equal(datamasked2[:].filled(),ranarr2) assert_array_almost_equal(datapacked[:],packeddata,decimal=4) assert datapacked3[:].dtype == np.float64 # added to test fix to issue 46 (result before r865 was 10) assert_array_equal(datapacked2[0],11) # added test for issue 515 assert file['v'][0] is np.ma.masked file.close() # issue 766 np.seterr(invalid='raise') f = netCDF4.Dataset(self.file, 'w') f.createDimension('dimension', 2) f.createVariable('variable', np.float32, dimensions=('dimension',)) f['variable'][:] = np.nan data = f['variable'][:] # should not raise an error f.close() # issue #972 dataset = netCDF4.Dataset(self.file2, "r") var1 = dataset.variables["var1"] var2 = dataset.variables["var2"] assert var1[:].mask.all() assert var2[:].mask.any() == False dataset.close() # issue #1271 ds = netCDF4.Dataset(self.file3,"r") var = ds['blaat'] assert np.count_nonzero(var[:].mask) == 30 ds.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_masked2.py000077500000000000000000000101371512661643000210230ustar00rootroot00000000000000import sys import unittest import os import tempfile import numpy as np from numpy import ma, seterr from numpy.testing import assert_array_equal, assert_array_almost_equal from netCDF4 import Dataset, default_fillvals seterr(over='ignore') # don't print warning for overflow errors # test automatic conversion of masked arrays, and # packing/unpacking of short ints. FILE_NAME1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name FILE_NAME3 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name datacheck1 =\ ma.array([0,5000.0,4000.0,0],dtype=np.float64,mask=[True,False,False,True]) datacheck2 =\ ma.array([3000.0,5000.0,4000.0,0],dtype=np.float64,mask=[False,False,False,True]) datacheck3 =\ ma.array([3000.0,5000.0,0,2000.0],dtype=np.float64,mask=[False,False,True,False]) mask = [False,True,False,False] datacheck4 = ma.array([1.5625,0,3.75,4.125],mask=mask,dtype=np.float32) fillval = default_fillvals[datacheck4.dtype.str[1:]] datacheck5 = np.array([1.5625,fillval,3.75,4.125],dtype=np.float32) class PrimitiveTypesTestCase(unittest.TestCase): def setUp(self): self.files = [FILE_NAME1] f = Dataset(FILE_NAME1,'w') x = f.createDimension('x',None) v = f.createVariable('v',np.int16,'x') v.scale_factor = np.array(1,np.float32) v.add_offset = np.array(32066,np.float32) v.missing_value = np.array(-9999,v.dtype) #v[0] not set, will be equal to _FillValue v[1]=5000 v[2]=4000 v[3]=v.missing_value f.close() self.files.append(FILE_NAME2) f = Dataset(FILE_NAME1,'r') # create a new file, copy data, but change missing value and # scale factor offset. f2 = Dataset(FILE_NAME2,'w') a = f2.createDimension('a',None) b = f2.createVariable('b',np.int16,'a') b.scale_factor = np.array(10.,np.float32) b.add_offset = np.array(0,np.float32) b.missing_value = np.array(9999,v.dtype) b[:] = f.variables['v'][:] f.close() f2.close() self.files.append(FILE_NAME3) f = Dataset(FILE_NAME3,'w') x = f.createDimension('x',None) # create variable with lossy compression v = f.createVariable('v',np.float32,'x',zlib=True,least_significant_digit=1) # assign masked array to that variable with one missing value. data =\ ma.MaskedArray([1.5678,99.99,3.75145,4.127654],mask=np.array([False,True,False,False],np.bool_)) v[:] = data f.close() def tearDown(self): # Remove the temporary files for f in self.files: os.remove(f) def runTest(self): """testing auto-conversion of masked arrays and packed integers""" f = Dataset(self.files[0]) data = f.variables['v'][:] assert_array_almost_equal(data,datacheck1) f.close() f = Dataset(self.files[1]) data = f.variables['b'][:] assert_array_almost_equal(data,datacheck1) f.close() f = Dataset(self.files[0],'a') # change first element from _FillValue to actual data. v = f.variables['v'] v[0]=3000 f.close() f = Dataset(self.files[0],'r') # read data back in, check. data = f.variables['v'][:] assert_array_almost_equal(data,datacheck2) f.close() f = Dataset(self.files[0],'a') # change 3rd element to missing, 4 element to valid data. v = f.variables['v'] data = v[:] v[2]=-9999 v[3]=2000 f.close() f = Dataset(self.files[0],'r') # read data back in, check. data = f.variables['v'][:] assert_array_almost_equal(data,datacheck3) f.close() # check that masked arrays are handled correctly when lossy compression # is used. f = Dataset(self.files[2],'r') data = f.variables['v'][:] assert_array_almost_equal(data,datacheck4) assert_array_almost_equal(data.filled(),datacheck5) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_masked3.py000077500000000000000000000126661512661643000210350ustar00rootroot00000000000000import unittest import os import tempfile import numpy as np from numpy import ma from numpy.testing import assert_array_almost_equal from netCDF4 import Dataset, default_fillvals # Test automatic conversion of masked arrays (set_auto_mask()) class SetAutoMaskTestBase(unittest.TestCase): """Base object for tests checking the functionality of set_auto_mask()""" def setUp(self): self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name self.fillval = default_fillvals["i2"] self.v = np.array([self.fillval, 5, 4, -9999], dtype = "i2") self.v_ma = ma.MaskedArray([self.fillval, 5, 4, -9999], dtype = "i2", mask = [True, False, False, True]) self.scale_factor = 10. self.add_offset = 5. self.v_scaled = self.v * self.scale_factor + self.add_offset self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset f = Dataset(self.testfile, 'w') _ = f.createDimension('x', None) v = f.createVariable('v', "i2", 'x') v.missing_value = np.array(-9999, v.dtype) # v[0] not set, will be equal to _FillValue v[1] = self.v[1] v[2] = self.v[2] v[3] = v.missing_value f.close() def tearDown(self): os.remove(self.testfile) class SetAutoMaskFalse(SetAutoMaskTestBase): def test_unscaled(self): """Testing auto-conversion of masked arrays for set_auto_mask(False)""" f = Dataset(self.testfile, "r") f.variables["v"].set_auto_mask(False) v = f.variables["v"][:] self.assertEqual(v.dtype, "i2") self.assertTrue(isinstance(v, np.ndarray)) self.assertTrue(not isinstance(v, ma.masked_array)) assert_array_almost_equal(v, self.v) f.close() def test_scaled(self): """Testing auto-conversion of masked arrays for set_auto_mask(False) with scaling""" # Update test data file f = Dataset(self.testfile, "a") f.variables["v"].scale_factor = self.scale_factor f.variables["v"].add_offset = self.add_offset f.close() # Note: Scaling variables is default if scale_factor and/or add_offset are present f = Dataset(self.testfile, "r") f.variables["v"].set_auto_mask(False) v = f.variables["v"][:] self.assertEqual(v.dtype, "f8") self.assertTrue(isinstance(v, np.ndarray)) self.assertTrue(not isinstance(v, ma.masked_array)) assert_array_almost_equal(v, self.v_scaled) f.close() class SetAutoMaskTrue(SetAutoMaskTestBase): def test_unscaled(self): """Testing auto-conversion of masked arrays for set_auto_mask(True)""" f = Dataset(self.testfile) f.variables["v"].set_auto_mask(True) # The default anyway... v_ma = f.variables['v'][:] self.assertEqual(v_ma.dtype, "i2") self.assertTrue(isinstance(v_ma, np.ndarray)) self.assertTrue(isinstance(v_ma, ma.masked_array)) assert_array_almost_equal(v_ma, self.v_ma) f.close() def test_scaled(self): """Testing auto-conversion of masked arrays for set_auto_mask(True)""" # Update test data file f = Dataset(self.testfile, "a") f.variables["v"].scale_factor = self.scale_factor f.variables["v"].add_offset = self.add_offset f.close() # Note: Scaling variables is default if scale_factor and/or add_offset are present f = Dataset(self.testfile) f.variables["v"].set_auto_mask(True) # The default anyway... v_ma = f.variables['v'][:] self.assertEqual(v_ma.dtype, "f8") self.assertTrue(isinstance(v_ma, np.ndarray)) self.assertTrue(isinstance(v_ma, ma.masked_array)) assert_array_almost_equal(v_ma, self.v_ma_scaled) f.close() class GlobalSetAutoMaskTest(unittest.TestCase): def setUp(self): self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name f = Dataset(self.testfile, 'w') grp1 = f.createGroup('Group1') grp2 = f.createGroup('Group2') f.createGroup('Group3') # empty group f.createVariable('var0', "i2", ()) grp1.createVariable('var1', 'f8', ()) grp2.createVariable('var2', 'f4', ()) f.close() def tearDown(self): os.remove(self.testfile) def runTest(self): # Note: The default behaviour is to to have both auto-masking and auto-scaling activated. # This is already tested in tst_scaled.py, so no need to repeat here. Instead, # disable auto-masking and auto-scaling altogether. f = Dataset(self.testfile, "r") # Neither scaling and masking enabled f.set_auto_maskandscale(False) v0 = f.variables['var0'] v1 = f.groups['Group1'].variables['var1'] v2 = f.groups['Group2'].variables['var2'] self.assertFalse(v0.scale) self.assertFalse(v0.mask) self.assertFalse(v1.scale) self.assertFalse(v1.mask) self.assertFalse(v2.scale) self.assertFalse(v2.mask) # No auto-masking, but auto-scaling f.set_auto_maskandscale(True) f.set_auto_mask(False) self.assertTrue(v0.scale) self.assertFalse(v0.mask) self.assertTrue(v1.scale) self.assertFalse(v1.mask) self.assertTrue(v2.scale) self.assertFalse(v2.mask) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_masked4.py000077500000000000000000000101631512661643000210240ustar00rootroot00000000000000import unittest import os import tempfile import pathlib import numpy as np from numpy import ma from numpy.testing import assert_array_almost_equal from netCDF4 import Dataset, default_fillvals # Test use of valid_min/valid_max/valid_range in generation of masked arrays class SetValidMinMax(unittest.TestCase): def setUp(self): self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name self.valid_min = -32765 self.valid_max = 32765 self.valid_range = [self.valid_min,self.valid_max] self.v = np.array([self.valid_min-1, 5, 4, self.valid_max+1], dtype = "i2") self.v_ma = ma.MaskedArray([self.valid_min-1, 5, 4, self.valid_max+1], dtype = "i2", mask = [True, False, False, True]) self.scale_factor = 10. self.add_offset = 5. self.v_scaled = self.v * self.scale_factor + self.add_offset self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset f = Dataset(self.testfile, 'w') _ = f.createDimension('x', None) v = f.createVariable('v', "i2", 'x') v2 = f.createVariable('v2', "i2", 'x') v3 = f.createVariable('v3', "i2", 'x', fill_value=self.valid_min) v.missing_value = np.array(32767, v.dtype) v.valid_min = np.array(self.valid_min, v.dtype) v.valid_max = np.array(self.valid_max, v.dtype) v.valid_range = np.array(0, v.dtype) # issue 1013, this is wrong but should not raise an exception v[0] = self.valid_min-1 v[1] = self.v[1] v[2] = self.v[2] v[3] = self.valid_max+1 v2.missing_value = np.array(32767, v.dtype) v2.valid_range = np.array(self.valid_range, v.dtype) v2[0] = self.valid_range[0]-1 v2[1] = self.v[1] v2[2] = self.v[2] v2[3] = self.valid_range[1]+1 v3.missing_value = np.array(32767, v.dtype) v3.valid_max = np.array(self.valid_max, v.dtype) # _FillValue should act as valid_min v3[0] = v3._FillValue-1 v3[1] = self.v[1] v3[2] = self.v[2] v3[3] = self.valid_max+1 f.close() def tearDown(self): os.remove(self.testfile) def test_scaled(self): """Testing auto-conversion of masked arrays""" # Update test data file f = Dataset(self.testfile, "a") f.variables["v"].scale_factor = self.scale_factor f.variables["v"].add_offset = self.add_offset f.variables["v2"].scale_factor = self.scale_factor f.variables["v2"].add_offset = self.add_offset f.close() f = Dataset(self.testfile, "r") v = f.variables["v"][:] v2 = f.variables["v2"][:] v3 = f.variables["v3"][:] self.assertEqual(v.dtype, "f8") self.assertTrue(isinstance(v, np.ndarray)) self.assertTrue(isinstance(v, ma.masked_array)) assert_array_almost_equal(v, self.v_scaled) self.assertEqual(v2.dtype, "f8") self.assertTrue(isinstance(v2, np.ndarray)) self.assertTrue(isinstance(v2, ma.masked_array)) assert_array_almost_equal(v2, self.v_scaled) self.assertTrue(np.all(self.v_ma.mask == v.mask)) self.assertTrue(np.all(self.v_ma.mask == v2.mask)) # treating _FillValue as valid_min/valid_max was # too surprising, revert to old behaviour (issue #761) #self.assertTrue(np.all(self.v_ma.mask == v3.mask)) # check that underlying data is same as in netcdf file v = f.variables['v'] v.set_auto_scale(False) v = v[:] self.assertTrue(np.all(self.v == v.data)) f.close() # issue 672 with Dataset(pathlib.Path(__file__).parent / "issue672.nc") as f: field = 'azi_angle_trip' v = f.variables[field] data1 = v[:] v.set_auto_scale(False) data2 = v[:] v.set_auto_maskandscale(False) data3 = v[:] assert data1[(data3 < v.valid_min)].mask.sum() == 12 assert data2[(data3 < v.valid_min)].mask.sum() ==\ data1[(data3 < v.valid_min)].mask.sum() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_masked5.py000077500000000000000000000033751512661643000210340ustar00rootroot00000000000000import unittest import os import tempfile import numpy as np from numpy import ma from numpy.testing import assert_array_equal from netCDF4 import Dataset, __netcdf4libversion__ # Test use of vector of missing values. class VectorMissingValues(unittest.TestCase): def setUp(self): self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name self.missing_values = [-999,999,0] self.v = np.array([-999,0,1,2,3,999], dtype = "i2") self.v_ma = ma.MaskedArray([-1,0,1,2,3,4], dtype = "i2", \ mask = [True, True, False, False, False, True]) f = Dataset(self.testfile, 'w') d = f.createDimension('x',6) v = f.createVariable('v', "i2", 'x') # issue 730: set fill_value for vlen str vars v2 = f.createVariable('v2', str, 'x', fill_value='') v.missing_value = self.missing_values v[:] = self.v v2[0]='first' f.close() def tearDown(self): os.remove(self.testfile) def test_scaled(self): """Testing auto-conversion of masked arrays""" f = Dataset(self.testfile) v = f.variables["v"] v2 = f.variables["v2"] self.assertTrue(isinstance(v[:], ma.masked_array)) assert_array_equal(v[:], self.v_ma) assert_array_equal(v[2],self.v[2]) # issue #624. v.set_auto_mask(False) self.assertTrue(isinstance(v[:], np.ndarray)) assert_array_equal(v[:], self.v) # issue 730 # this part fails with netcdf 4.1.3 # a bug in vlen strings? if __netcdf4libversion__ >= '4.4.0': assert v2[0] == 'first' assert v2[1] == '' f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_masked6.py000066400000000000000000000070571512661643000210330ustar00rootroot00000000000000import unittest import os import tempfile import numpy as np from numpy import ma from numpy.testing import assert_array_almost_equal from netCDF4 import Dataset # Test automatic conversion of masked arrays (set_always_mask()) class SetAlwaysMaskTestBase(unittest.TestCase): """Base object for tests checking the functionality of set_always_mask()""" def setUp(self): self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name self.v = np.array([4, 3, 2, 1], dtype="i2") self.w = np.ma.MaskedArray([-1, -2, -3, -4], mask=[False, True, False, False], dtype="i2") f = Dataset(self.testfile, 'w') _ = f.createDimension('x', None) v = f.createVariable('v', "i2", 'x') w = f.createVariable('w', "i2", 'x') v[...] = self.v w[...] = self.w f.close() def tearDown(self): os.remove(self.testfile) class SetAlwaysMaskTrue(SetAlwaysMaskTestBase): def test_always_mask(self): """Testing auto-conversion of masked arrays with no missing values to regular arrays.""" f = Dataset(self.testfile) f.variables["v"].set_always_mask(True) # The default anyway... v = f.variables['v'][:] self.assertTrue(isinstance(v, np.ndarray)) self.assertTrue(isinstance(v, ma.masked_array)) assert_array_almost_equal(v, self.v) w = f.variables['w'][:] self.assertTrue(isinstance(w, np.ndarray)) self.assertTrue(isinstance(w, ma.masked_array)) assert_array_almost_equal(w, self.w) f.close() class SetAlwyasMaskFalse(SetAlwaysMaskTestBase): def test_always_mask(self): """Testing auto-conversion of masked arrays with no missing values to regular arrays.""" f = Dataset(self.testfile) f.variables["v"].set_always_mask(False) v = f.variables['v'][:] self.assertTrue(isinstance(v, np.ndarray)) self.assertFalse(isinstance(v, ma.masked_array)) assert_array_almost_equal(v, self.v) w = f.variables['w'][:] self.assertTrue(isinstance(w, np.ndarray)) self.assertTrue(isinstance(w, ma.masked_array)) assert_array_almost_equal(w, self.w) f.close() class GlobalSetAlwaysMaskTest(unittest.TestCase): def setUp(self): self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name f = Dataset(self.testfile, 'w') grp1 = f.createGroup('Group1') grp2 = f.createGroup('Group2') f.createGroup('Group3') # empty group f.createVariable('var0', "i2", ()) grp1.createVariable('var1', 'f8', ()) grp2.createVariable('var2', 'f4', ()) f.close() def tearDown(self): os.remove(self.testfile) def runTest(self): # Note: The default behaviour is to always return masked # arrays, which is already tested elsewhere. f = Dataset(self.testfile, "r") # Without regular numpy arrays f.set_always_mask(True) v0 = f.variables['var0'] v1 = f.groups['Group1'].variables['var1'] v2 = f.groups['Group2'].variables['var2'] self.assertTrue(v0.always_mask) self.assertTrue(v1.always_mask) self.assertTrue(v2.always_mask) # With regular numpy arrays f.set_always_mask(False) self.assertFalse(v0.always_mask) self.assertFalse(v1.always_mask) self.assertFalse(v2.always_mask) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_multifile.py000066400000000000000000000143201512661643000214620ustar00rootroot00000000000000from netCDF4 import Dataset, MFDataset, MFTime import numpy as np from numpy.random import seed, randint from numpy.testing import assert_array_equal, assert_equal from numpy import ma import tempfile, unittest, os, datetime import cftime from packaging.version import Version nx=100; ydim=5; zdim=10 nfiles = 10 ninc = nx/nfiles files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)] data = randint(0,10,size=(nx,ydim,zdim)) missval = 99 data[::10] = missval data = ma.masked_values(data,missval) class VariablesTestCase(unittest.TestCase): def setUp(self): self.files = files for nfile,file in enumerate(self.files): f = Dataset(file,'w',format='NETCDF4_CLASSIC') f.createDimension('x',None) f.createDimension('y',ydim) f.createDimension('z',zdim) f.history = 'created today' x = f.createVariable('x','i',('x',)) x.units = 'zlotys' dat = f.createVariable('data','i',('x','y','z',)) dat.long_name = 'phony data' dat.missing_value = missval nx1 = int(nfile*ninc); nx2 = int(ninc*(nfile+1)) #x[0:ninc] = np.arange(nfile*ninc,ninc*(nfile+1)) x[:] = np.arange(nfile*ninc,ninc*(nfile+1)) #dat[0:ninc] = data[nx1:nx2] dat[:] = data[nx1:nx2] f.close() def tearDown(self): # Remove the temporary files for file in self.files: os.remove(file) def runTest(self): """testing multi-file dataset access""" f = MFDataset(self.files,check=True) f.set_auto_maskandscale(True) # issue570 f.set_always_mask(False) assert f.history == 'created today' assert_array_equal(np.arange(0,nx),f.variables['x'][:]) varin = f.variables['data'] datin = varin[:] assert isinstance(data, np.ma.masked_array) assert_array_equal(datin.mask,data.mask) varin.set_auto_maskandscale(False) data2 = data.filled() assert varin.long_name == 'phony data' assert len(varin) == nx assert varin.shape == (nx,ydim,zdim) assert varin.dimensions == ('x','y','z') assert_array_equal(varin[4:-4:4,3:5,2:8],data2[4:-4:4,3:5,2:8]) assert varin[0,0,0] == data2[0,0,0] assert_array_equal(varin[:],data2) assert getattr(varin,'nonexistantatt',None) == None f.close() # test master_file kwarg (issue #835). f = MFDataset(self.files,master_file=self.files[-1],check=True) assert_array_equal(np.arange(0,nx),f.variables['x'][:]) varin = f.variables['data'] assert_array_equal(varin[4:-4:4,3:5,2:8],data2[4:-4:4,3:5,2:8]) f.close() # testing multi-file get_variables_by_attributes f = MFDataset(self.files,check=True) assert f.get_variables_by_attributes(axis='T') == [] assert f.get_variables_by_attributes(units='zlotys')[0] == f['x'] assert f.isopen() f.close() assert not f.isopen() class NonuniformTimeTestCase(unittest.TestCase): ninc = 365 def setUp(self): self.files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(2)] for nfile,file in enumerate(self.files): f = Dataset(file,'w',format='NETCDF4_CLASSIC') f.createDimension('time',None) f.createDimension('y',ydim) f.createDimension('z',zdim) f.history = 'created today' time = f.createVariable('time', 'f', ('time', )) #time.units = 'days since {0}-01-01'.format(1979+nfile) yr = 1979+nfile time.units = 'days since %s-01-01' % yr # Do not set the calendar attribute on the created files to test calendar # overload. # time.calendar = 'standard' x = f.createVariable('x','f',('time', 'y', 'z')) x.units = 'potatoes per square mile' nx1 = self.ninc*nfile; nx2 = self.ninc*(nfile+1) time[:] = np.arange(self.ninc) x[:] = np.arange(nx1, nx2).reshape(self.ninc,1,1) * np.ones((1, ydim, zdim)) f.close() def tearDown(self): # Remove the temporary files for file in self.files: os.remove(file) def runTest(self): # The test files have no calendar attribute on the time variable. calendar = 'standard' # Get the real dates dates = [] for file in self.files: ds = Dataset(file) t = ds.variables['time'] dates.extend(cftime.num2date(t[:], t.units, calendar)) ds.close() # Compare with the MF dates ds = MFDataset(self.files,check=True) t = ds.variables['time'] T = MFTime(t, calendar=calendar) assert_equal(T.calendar, calendar) assert_equal(len(T), len(t)) assert_equal(T.shape, t.shape) assert_equal(T.dimensions, t.dimensions) assert_equal(T.typecode(), t.typecode()) # skip this until cftime pull request #55 is in a released # version (1.0.1?). Otherwise, fix for issue #808 breaks this if Version(cftime.__version__) >= Version('1.0.1'): assert_array_equal(cftime.num2date(T[:], T.units, T.calendar), dates) assert_equal(cftime.date2index(datetime.datetime(1980, 1, 2), T), 366) ds.close() # Test exception is raised when no calendar attribute is available on the # time variable. with MFDataset(self.files, check=True) as ds: with self.assertRaises(ValueError): MFTime(ds.variables['time']) # Test exception is raised when the calendar attribute is different on the # variables. First, add calendar attributes to file. Note this will modify # the files inplace. calendars = ['standard', 'gregorian'] for idx, file in enumerate(self.files): with Dataset(file, 'a') as ds: ds.variables['time'].calendar = calendars[idx] with MFDataset(self.files, check=True) as ds: with self.assertRaises(ValueError): MFTime(ds.variables['time']) if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_multifile2.py000066400000000000000000000115541512661643000215520ustar00rootroot00000000000000from netCDF4 import Dataset, MFDataset, MFTime import numpy as np from numpy.random import seed, randint from numpy.testing import assert_array_equal, assert_equal from numpy import ma import tempfile, unittest, os, datetime import cftime from packaging.version import Version nx=100; ydim=5; zdim=10 nfiles = 10 ninc = nx/nfiles files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(nfiles)] data = randint(0,10,size=(nx,ydim,zdim)) missval = 99 data[::10] = missval data = ma.masked_values(data,missval) class VariablesTestCase(unittest.TestCase): def setUp(self): self.files = files for nfile,file in enumerate(self.files): f = Dataset(file,'w',format='NETCDF4_CLASSIC') #f.createDimension('x',None) f.createDimension('x',int(ninc)) f.createDimension('y',ydim) f.createDimension('z',zdim) f.history = 'created today' x = f.createVariable('x','i',('x',)) x.units = 'zlotys' dat = f.createVariable('data','i',('x','y','z',)) dat.long_name = 'phony data' dat.missing_value = missval nx1 = int(nfile*ninc); nx2 = int(ninc*(nfile+1)) #x[0:ninc] = np.arange(nfile*ninc,ninc*(nfile+1)) x[:] = np.arange(nfile*ninc,ninc*(nfile+1)) #dat[0:ninc] = data[nx1:nx2] dat[:] = data[nx1:nx2] f.close() def tearDown(self): # Remove the temporary files for file in self.files: os.remove(file) def runTest(self): """testing multi-file dataset access""" # specify the aggregation dim (not necessarily unlimited) f = MFDataset(self.files,aggdim='x',check=True) assert f.history == 'created today' assert_array_equal(np.arange(0,nx),f.variables['x'][:]) varin = f.variables['data'] datin = varin[:] assert isinstance(data, np.ma.masked_array) assert_array_equal(datin.mask,data.mask) varin.set_auto_maskandscale(False) data2 = data.filled() assert varin.long_name == 'phony data' assert len(varin) == nx assert varin.shape == (nx,ydim,zdim) assert varin.dimensions == ('x','y','z') assert_array_equal(varin[4:-4:4,3:5,2:8],data2[4:-4:4,3:5,2:8]) assert varin[0,0,0] == data2[0,0,0] assert_array_equal(varin[:],data2) assert getattr(varin,'nonexistantatt',None) == None f.close() class NonuniformTimeTestCase(unittest.TestCase): ninc = 365 def setUp(self): self.files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(2)] for nfile,file in enumerate(self.files): f = Dataset(file,'w',format='NETCDF4_CLASSIC') f.createDimension('time',None) f.createDimension('y',ydim) f.createDimension('z',zdim) f.history = 'created today' time = f.createVariable('time', 'f', ('time', )) #time.units = 'days since {0}-01-01'.format(1979+nfile) yr = 1979+nfile time.units = 'days since %s-01-01' % yr time.calendar = 'standard' x = f.createVariable('x','f',('time', 'y', 'z')) x.units = 'potatoes per square mile' nx1 = self.ninc*nfile; nx2 = self.ninc*(nfile+1) time[:] = np.arange(self.ninc) x[:] = np.arange(nx1, nx2).reshape(self.ninc,1,1) * np.ones((1, ydim, zdim)) f.close() def tearDown(self): # Remove the temporary files for file in self.files: os.remove(file) def runTest(self): # Get the real dates # skip this until cftime pull request #55 is in a released # version (1.0.1?). Otherwise, fix for issue #808 breaks this dates = [] if Version(cftime.__version__) >= Version('1.0.1'): for file in self.files: f = Dataset(file) t = f.variables['time'] dates.extend(cftime.num2date(t[:], t.units, t.calendar)) f.close() # Compare with the MF dates f = MFDataset(self.files,check=True) t = f.variables['time'] mfdates = cftime.num2date(t[:], t.units, t.calendar) T = MFTime(t) assert_equal(len(T), len(t)) assert_equal(T.shape, t.shape) assert_equal(T.dimensions, t.dimensions) assert_equal(T.typecode(), t.typecode()) # skip this until cftime pull request #55 is in a released # version (1.0.1?). Otherwise, fix for issue #808 breaks this if Version(cftime.__version__) >= Version('1.0.1'): assert_array_equal(cftime.num2date(T[:], T.units, T.calendar), dates) assert_equal(cftime.date2index(datetime.datetime(1980, 1, 2), T), 366) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_multiple_open_close.py000066400000000000000000000035711512661643000235370ustar00rootroot00000000000000import os import tracemalloc import unittest import netCDF4 @unittest.skipUnless( os.getenv("MEMORY_LEAK_TEST"), "computationally intensive test not enabled" ) class MultipleVariablesByAttributesCallsTests(unittest.TestCase): def test_multiple_calls(self): netcdf_file = os.path.join(os.path.dirname(__file__), "netcdf_dummy_file.nc") tracemalloc.start() snapshot = tracemalloc.take_snapshot() k_times = 10 for _k in range(k_times): nc = netCDF4.Dataset(netcdf_file) vs = nc.get_variables_by_attributes(axis='Z') self.assertEqual(len(vs), 1) vs = nc.get_variables_by_attributes(units='m/s') self.assertEqual(len(vs), 4) vs = nc.get_variables_by_attributes(axis='Z', units='m') self.assertEqual(len(vs), 1) vs = nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T']) self.assertEqual(len(vs), 1) vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None) self.assertEqual(len(vs), 12) vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, long_name=lambda v: v is not None and 'Upward (w) velocity' in v) self.assertEqual(len(vs), 1) vs = nc.get_variables_by_attributes(units='m/s', grid_mapping=lambda v: v is not None) self.assertEqual(len(vs), 4) vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None, long_name='Upward (w) velocity') self.assertEqual(len(vs), 1) nc.close() stats = tracemalloc.take_snapshot().compare_to(snapshot, 'filename') tracemalloc.stop() print("[ Top 10 differences ]") for stat in stats[:10]: print(stat) if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_ncrc.py000066400000000000000000000007221512661643000204160ustar00rootroot00000000000000import unittest import netCDF4 from netCDF4 import __has_nc_rc_set__ class NCRCTestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def runTest(self): """test rc_get, rc_set functions""" if __has_nc_rc_set__: netCDF4.rc_set('foo','bar') assert netCDF4.rc_get('foo') == 'bar' assert netCDF4.rc_get('bar') == None if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_no_iter_contains.py000066400000000000000000000021071512661643000230250ustar00rootroot00000000000000import os import tempfile import unittest import netCDF4 FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name class TestNoIterNoContains(unittest.TestCase): def setUp(self) -> None: self.file = FILE_NAME with netCDF4.Dataset(self.file, "w") as dataset: # just create a simple variable dataset.createVariable("var1", int) def tearDown(self) -> None: os.remove(self.file) def test_no_iter(self) -> None: """Verify that iteration is explicitly not supported""" with netCDF4.Dataset(self.file, "r") as dataset: with self.assertRaises(TypeError): for _ in dataset: # type: ignore # type checker catches that this doesn't work pass def test_no_contains(self) -> None: """Verify the membership operations are explicity not supported""" with netCDF4.Dataset(self.file, "r") as dataset: with self.assertRaises(TypeError): _ = "var1" in dataset if __name__ == "__main__": unittest.main(verbosity=2) netcdf4-python-1.7.4rel/test/test_open_mem.py000066400000000000000000000013251512661643000212700ustar00rootroot00000000000000import os import unittest import netCDF4 CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) class TestOpenMem(unittest.TestCase): def test_mem_open(self): fpath = os.path.join(CURRENT_DIR, "netcdf_dummy_file.nc") with open(fpath, 'rb') as f: nc_bytes = f.read() if not netCDF4.__has_nc_open_mem__: with self.assertRaises(ValueError): netCDF4.Dataset('foo_bar', memory=nc_bytes) return with netCDF4.Dataset('foo_bar', memory=nc_bytes) as nc: assert nc.filepath() == 'foo_bar' assert nc.project_summary == 'Dummy netCDF file' if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_refcount.py000066400000000000000000000017221512661643000213170ustar00rootroot00000000000000import unittest, netCDF4, tempfile, os file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name class RefCountTestCase(unittest.TestCase): def setUp(self): nc = netCDF4.Dataset(file_name, mode='w', keepweakref=True, format='NETCDF4') d = nc.createDimension('fred', 2000) v = nc.createVariable('frank','f',('fred',)) self.file = file_name self.nc = nc def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing garbage collection (issue 218)""" # this should trigger garbage collection (__dealloc__ method) del self.nc # if __dealloc__ not called to close file, then this # will fail with "Permission denied" error (since you can't # open a file 'w' that is already open for writing). nc = netCDF4.Dataset(self.file, mode='w', format='NETCDF4') if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_rename.py000066400000000000000000000120571512661643000207440ustar00rootroot00000000000000import sys import unittest import os import tempfile import netCDF4 from netCDF4 import __has_rename_grp__ # test changing dimension, variable names # and deleting attributes. FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name LAT_NAME="lat" LON_NAME="lon" LON_NAME2 = "longitude" LEVEL_NAME="level" TIME_NAME="time" VAR_NAME='temp' VAR_NAME2='wind' GROUP_NAME='subgroup' GROUP_NAME2='subgroup2' class VariablesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = netCDF4.Dataset(self.file, 'w') f.createDimension(LAT_NAME,73) f.createDimension(LON_NAME,145) f.createDimension(LEVEL_NAME,10) f.createDimension(TIME_NAME,None) if __has_rename_grp__: g = f.createGroup(GROUP_NAME) else: g = f.createGroup(GROUP_NAME2) g.createDimension(LAT_NAME,145) g.createDimension(LON_NAME,289) g.createDimension(LEVEL_NAME,20) g.createDimension(TIME_NAME,None) f.foo = 'bar' f.goober = 2 g.foo = 'bar' g.goober = 2 f.createVariable(VAR_NAME,'f4',(LAT_NAME, LON_NAME, TIME_NAME)) v = f.variables[VAR_NAME] v.bar = 'foo' v.slobber = 3 g.createVariable(VAR_NAME,'f4',(LAT_NAME, LON_NAME, TIME_NAME)) v2 = g.variables[VAR_NAME] v2.bar = 'foo' v2.slobber = 3 f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing renaming of dimensions, variables and attribute deletion""" f = netCDF4.Dataset(self.file, 'r+') v = f.variables[VAR_NAME] names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME] # check that dimension names are correct. for name in f.dimensions.keys(): self.assertTrue(name in names_check) names_check = [VAR_NAME] # check that variable names are correct. for name in f.variables.keys(): self.assertTrue(name in names_check) # rename dimension. f.renameDimension(LON_NAME,LON_NAME2) # rename variable. f.renameVariable(VAR_NAME,VAR_NAME2) # rename group. if __has_rename_grp__: f.renameGroup(GROUP_NAME,GROUP_NAME2) # check that new dimension names are correct. names_check = [LAT_NAME, LON_NAME2, LEVEL_NAME, TIME_NAME] for name in f.dimensions.keys(): self.assertTrue(name in names_check) names_check = [VAR_NAME2] # check that new variable names are correct. for name in f.variables.keys(): self.assertTrue(name in names_check) g = f.groups[GROUP_NAME2] vg = g.variables[VAR_NAME] names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME] # check that dimension names are correct. for name in g.dimensions.keys(): self.assertTrue(name in names_check) names_check = [VAR_NAME] # check that variable names are correct. for name in g.variables.keys(): self.assertTrue(name in names_check) # check that group name is correct. self.assertTrue(GROUP_NAME not in f.groups and GROUP_NAME2 in f.groups) # rename dimension. g.renameDimension(LON_NAME,LON_NAME2) # rename variable. g.renameVariable(VAR_NAME,VAR_NAME2) # check that new dimension names are correct. names_check = [LAT_NAME, LON_NAME2, LEVEL_NAME, TIME_NAME] for name in g.dimensions.keys(): self.assertTrue(name in names_check) names_check = [VAR_NAME2] # check that new variable names are correct. for name in g.variables.keys(): self.assertTrue(name in names_check) # delete a global attribute. atts = f.ncattrs() del f.goober atts.remove('goober') self.assertTrue(atts == f.ncattrs()) atts = g.ncattrs() del g.goober atts.remove('goober') self.assertTrue(atts == g.ncattrs()) # delete a variable attribute. atts = v.ncattrs() del v.slobber atts.remove('slobber') self.assertTrue(atts == v.ncattrs()) atts = vg.ncattrs() del vg.slobber atts.remove('slobber') self.assertTrue(atts == vg.ncattrs()) f.close() # make sure attributes cannot be deleted, or vars/dims renamed # when file is open read-only. f = netCDF4.Dataset(self.file) v = f.variables[VAR_NAME2] self.assertRaises(RuntimeError, delattr, v, 'bar') self.assertRaises(RuntimeError, f.renameVariable, VAR_NAME2, VAR_NAME) self.assertRaises(RuntimeError, f.renameDimension, LON_NAME2, LON_NAME) g = f.groups[GROUP_NAME2] vg = g.variables[VAR_NAME2] self.assertRaises(RuntimeError, delattr, vg, 'bar') self.assertRaises(RuntimeError, g.renameVariable, VAR_NAME2, VAR_NAME) self.assertRaises(RuntimeError, g.renameDimension, LON_NAME2, LON_NAME) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_scalarvar.py000066400000000000000000000034351512661643000214530ustar00rootroot00000000000000import sys import unittest import os import tempfile from numpy.testing import assert_almost_equal import netCDF4 import math VAR_NAME='temp' VAR_TYPE='f4' VAR_VAL=math.pi FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name GROUP_NAME = 'subgroup' # test scalar variable creation and retrieval. class ScalarVariableTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME rootgrp = netCDF4.Dataset(self.file, 'w') # scalar variable. temp = rootgrp.createVariable(VAR_NAME,VAR_TYPE) #temp[:] = VAR_VAL temp.assignValue(VAR_VAL) subgroup = rootgrp.createGroup(GROUP_NAME) tempg = subgroup.createVariable(VAR_NAME,VAR_TYPE) tempg[:] = VAR_VAL #tempg.assignValue(VAR_VAL) rootgrp.close() def tearDown(self): # Remove the temporary file os.remove(self.file) def runTest(self): """testing scalar variables""" # check dimensions in root group. f = netCDF4.Dataset(self.file, 'r+') v = f.variables[VAR_NAME] # dimensions and shape should be empty tuples self.assertTrue(v.dimensions == ()) self.assertTrue(v.shape == ()) # check result of getValue and slice assert_almost_equal(v.getValue(), VAR_VAL, decimal=6) assert_almost_equal(v[:], VAR_VAL, decimal=6) g = f.groups[GROUP_NAME] vg = g.variables[VAR_NAME] # dimensions and shape should be empty tuples self.assertTrue(vg.dimensions == ()) self.assertTrue(vg.shape == ()) # check result of getValue and slice assert_almost_equal(vg.getValue(), VAR_VAL, decimal=6) assert_almost_equal(vg[:], VAR_VAL, decimal=6) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_scaled.py000077500000000000000000000162241512661643000207330ustar00rootroot00000000000000import unittest import os import tempfile import numpy as np from numpy import ma from numpy.testing import assert_array_almost_equal from netCDF4 import Dataset, default_fillvals # Test automatic scaling of variables (set_auto_scale()) class SetAutoScaleTestBase(unittest.TestCase): """Base object for tests checking the functionality of set_auto_scale()""" def setUp(self): self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name self.fillval = default_fillvals["i2"] self.missing_value = -9999 self.v = np.array([0, 5, 4, self.missing_value], dtype = "i2") self.v_ma = ma.MaskedArray([0, 5, 4, self.missing_value], dtype = "i2", mask = [True, False, False, True], fill_value = self.fillval) self.scale_factor = 10. self.add_offset = 5. self.v_scaled = self.v * self.scale_factor + self.add_offset self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset f = Dataset(self.testfile, 'w') x = f.createDimension('x', None) xx = f.createDimension('xx', 10) v = f.createVariable('v', "i2", 'x') vv = f.createVariable('vv', "i2", 'xx') vv.add_offset=0; vv.scale_factor=np.float32(1.0) v[:] = self.v vv[:] = np.ones(10) # Note: Scale factors are only added after writing, so that no auto-scaling takes place! v.scale_factor = self.scale_factor v.add_offset = self.add_offset f.close() def tearDown(self): os.remove(self.testfile) class SetAutoScaleFalse(SetAutoScaleTestBase): def test_unmasked(self): """Testing (not) auto-scaling of variables for set_auto_scale(False)""" f = Dataset(self.testfile, "r") f.variables["v"].set_auto_scale(False) v = f.variables["v"][:] self.assertEqual(v.dtype, "i2") self.assertTrue(isinstance(v, np.ndarray)) # issue 785: always return masked array by default self.assertTrue(isinstance(v, ma.masked_array)) assert_array_almost_equal(v, self.v) f.close() def test_masked(self): """Testing auto-conversion of masked arrays for set_auto_mask(False) with masking""" # Update test data file f = Dataset(self.testfile, "a") f.variables["v"].missing_value = self.missing_value f.close() # Note: Converting arrays to masked arrays is default if missing_value is present f = Dataset(self.testfile, "r") f.variables["v"].set_auto_scale(False) v_ma = f.variables["v"][:] self.assertEqual(v_ma.dtype, "i2") self.assertTrue(isinstance(v_ma, np.ndarray)) self.assertTrue(isinstance(v_ma, ma.masked_array)) assert_array_almost_equal(v_ma, self.v_ma) f.close() class SetAutoScaleTrue(SetAutoScaleTestBase): def test_unmasked(self): """Testing auto-scaling of variables for set_auto_scale(True)""" f = Dataset(self.testfile) f.variables["v"].set_auto_scale(True) # The default anyway... v_scaled = f.variables['v'][:] # issue 913 vv_scaled = f.variables['vv'][:] self.assertEqual(vv_scaled.dtype,f.variables['vv'].scale_factor.dtype) assert_array_almost_equal(vv_scaled, np.ones(10)) self.assertEqual(v_scaled.dtype, "f8") self.assertTrue(isinstance(v_scaled, np.ndarray)) # issue 785: always return masked array by default self.assertTrue(isinstance(v_scaled, ma.masked_array)) assert_array_almost_equal(v_scaled, self.v_scaled) f.close() def test_masked(self): """Testing auto-scaling of variables for set_auto_scale(True) with masking""" # Update test data file f = Dataset(self.testfile, "a") f.variables["v"].missing_value = self.missing_value f.close() # Note: Converting arrays to masked arrays is default if missing_value is present f = Dataset(self.testfile) f.variables["v"].set_auto_scale(True) # The default anyway... v_ma_scaled = f.variables['v'][:] self.assertEqual(v_ma_scaled.dtype, "f8") self.assertTrue(isinstance(v_ma_scaled, np.ndarray)) self.assertTrue(isinstance(v_ma_scaled, ma.masked_array)) assert_array_almost_equal(v_ma_scaled, self.v_ma_scaled) f.close() class WriteAutoScaleTest(SetAutoScaleTestBase): def test_auto_scale_write(self): """Testing automatic packing to all kinds of integer types""" def packparams(dmax, dmin, dtyp): kind = dtyp[0] n = int(dtyp[1]) * 8 scale_factor = (dmax - dmin) / (2**n - 1) if kind == 'i': add_offset = dmin + 2**(n-1) * scale_factor elif kind == 'u': add_offset = dmin else: raise Exception return((add_offset, scale_factor)) for dtyp in ['i1', 'i2', 'i4', 'u1', 'u2', 'u4']: np.random.seed(456) data = np.random.uniform(size=100) f = Dataset(self.testfile, 'w') f.createDimension('x') # # save auto_scaled v = f.createVariable('v', dtyp, ('x',)) v.set_auto_scale(True) # redundant v.add_offset, v.scale_factor = packparams( np.max(data), np.min(data), dtyp) v[:] = data f.close() # # read back f = Dataset(self.testfile, 'r') v = f.variables['v'] v.set_auto_mask(False) v.set_auto_scale(True) # redundant vdata = v[:] # error normalized by scale factor maxerrnorm = np.max(np.abs((vdata - data) / v.scale_factor)) # 1e-5 accounts for floating point errors assert maxerrnorm < 0.5 + 1e-5 f.close() class GlobalSetAutoScaleTest(unittest.TestCase): def setUp(self): self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name f = Dataset(self.testfile, 'w') grp1 = f.createGroup('Group1') grp2 = f.createGroup('Group2') f.createGroup('Group3') # empty group f.createVariable('var0', "i2", ()) grp1.createVariable('var1', 'f8', ()) grp2.createVariable('var2', 'f4', ()) f.close() def tearDown(self): os.remove(self.testfile) def runTest(self): f = Dataset(self.testfile, "r") # Default is both scaling and masking enabled v0 = f.variables['var0'] v1 = f.groups['Group1'].variables['var1'] v2 = f.groups['Group2'].variables['var2'] self.assertTrue(v0.scale) self.assertTrue(v0.mask) self.assertTrue(v1.scale) self.assertTrue(v1.mask) self.assertTrue(v2.scale) self.assertTrue(v2.mask) # No auto-scaling f.set_auto_scale(False) self.assertFalse(v0.scale) self.assertTrue(v0.mask) self.assertFalse(v1.scale) self.assertTrue(v1.mask) self.assertFalse(v2.scale) self.assertTrue(v2.mask) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_shape.py000066400000000000000000000021461512661643000205730ustar00rootroot00000000000000from netCDF4 import Dataset import tempfile, unittest, os import numpy as np file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name xdim=None; ydim=121; zdim=169 datashape = (ydim,zdim) data = np.ones(datashape,dtype=np.float64) class ShapeTestCase(unittest.TestCase): def setUp(self): self.file = file_name f = Dataset(file_name,'w') f.createDimension('x',xdim) f.createDimension('y',ydim) f.createDimension('z',zdim) v = f.createVariable('data',np.float64,('x','y','z')) f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """test for issue 90 (array shape should not be modified by assignment to netCDF variable)""" f = Dataset(self.file, 'a') v = f.variables['data'] v[0] = data # make sure shape of data array # is not changed by assigning it # to a netcdf var with one more dimension (issue 90) assert data.shape == datashape f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_slicing.py000066400000000000000000000236221512661643000211250ustar00rootroot00000000000000from netCDF4 import Dataset from numpy.random import seed, randint from numpy.testing import assert_array_equal, assert_equal,\ assert_array_almost_equal import tempfile, unittest, os, random, sys import numpy as np file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name xdim=9; ydim=10; zdim=11 #seed(9) # fix seed data = randint(0,10,size=(xdim,ydim,zdim)).astype('u1') datarev = data[:,::-1,:] class VariablesTestCase(unittest.TestCase): def setUp(self): self.file = file_name f = Dataset(file_name,'w') f.createDimension('x',xdim) f.createDimension('xu',None) f.createDimension('xu2',None) f.createDimension('y',ydim) f.createDimension('z',zdim) f.createDimension('zu',None) v = f.createVariable('data','u1',('x','y','z')) vu = f.createVariable('datau','u1',('xu','y','zu')) v1 = f.createVariable('data1d', 'u1', ('x',)) v2 = f.createVariable('data1dx', 'u1', ('xu2',)) # variable with no unlimited dim. # write slice in reverse order v[:,::-1,:] = data # variable with an unlimited dimension. # write slice in reverse order #vu[0:xdim,::-1,0:zdim] = data vu[:,::-1,:] = data v1[:] = data[:, 0, 0] if sys.maxsize > 2**32: v2[2**31] = 1 # issue 1112 (overflow on windows) f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def test_3d(self): """testing variable slicing""" f = Dataset(self.file, 'r') v = f.variables['data'] vu = f.variables['datau'] # test return of array scalar. assert_equal(v[0,0,0].shape,()) assert_array_equal(v[:], datarev) # test reading of slices. # negative value means count back from end. assert_array_equal(v[:-1,:-2,:-3],datarev[:-1,:-2,:-3]) # every other element (positive step) assert_array_equal(v[2:-1:2,2:-2:2,2:-3:2],datarev[2:-1:2,2:-2:2,2:-3:2]) # every other element (negative step) assert_array_equal(v[-1:2:-2,-2:2:-2,-3:2:-2],datarev[-1:2:-2,-2:2:-2,-3:2:-2]) # read elements in reverse order assert_array_equal(v[:,::-1,:],data) assert_array_equal(v[::-1,:,::-1],datarev[::-1,:,::-1]) assert_array_equal(v[xdim-1::-3,:,zdim-1::-3],datarev[xdim-1::-3,:,zdim-1::-3]) # ellipsis slice. assert_array_equal(v[...,2:],datarev[...,2:]) # variable with an unlimited dimension. assert_array_equal(vu[:], data[:,::-1,:]) # read data in reverse order assert_array_equal(vu[:,::-1,:],data) # index using an integer array scalar i = np.ones(1,'i4')[0] assert_array_equal(v[i],datarev[1]) f.close() def test_1d(self): f = Dataset(self.file, 'r') v1 = f.variables['data1d'] v2 = f.variables['data1dx'] d = data[:,0,0] assert_equal(v1[:], d) if sys.maxsize > 2**32: assert_equal(v2[2**31], 1) assert_equal(v1[4:], d[4:]) # test return of array scalar. assert_equal(v1[0].shape, ()) i1 = np.array([2,3,4]) assert_equal(v1[i1], d[i1]) i2 = np.array([2,3,5]) assert_equal(v1[i2], d[i2]) assert_equal(v1[d<5], d[d<5]) assert_equal(v1[5], d[5]) f.close() def test_0d(self): f = Dataset(self.file, 'w') v = f.createVariable('data', float) v[...] = 10 assert_array_equal(v[...], 10) assert_equal(v.shape, v[...].shape) # issue #785: always return masked array #assert type(v[...]) == np.ndarray assert type(v[...]) == np.ma.masked_array f.set_auto_mask(False) assert type(v[...]) == np.ndarray f.close() def test_issue259(self): dset = Dataset(self.file, 'w', format='NETCDF4_CLASSIC') dset.createDimension('dim', None) a = dset.createVariable('a', 'i', ('dim',)) b = dset.createVariable('b', 'i', ('dim',)) c = dset.createVariable('c', 'i', ('dim',)) c[:] = 1 # c initially is empty, new entry created assert_array_equal(c[...], np.array([1])) b[:] = np.array([1,1]) a[:] = 1 # a should be same as b assert_array_equal(a[...], b[...]) dset.close() def test_issue371(self): dataset = Dataset(self.file, 'w') dataset.createDimension('dim', 5) var = dataset.createVariable('bar', 'i8', ('dim', )) data = [1, 2, 3, 4, 5] var[..., :] = data assert_array_equal(var[..., :], np.array(data)) dataset.close() def test_issue306(self): f = Dataset(self.file,'w') nlats = 7; lat = f.createDimension('lat',nlats) nlons = 12; lon = f.createDimension('lon',nlons) nlevs = 1; lev = f.createDimension('lev',nlevs) time = f.createDimension('time',None) var = f.createVariable('var',np.float64,('time','lev','lat','lon')) a = np.random.uniform(size=(10,nlevs,nlats,nlons)) var[0:10] = a f.close() f = Dataset(self.file) aa = f.variables['var'][4,-1,:,:] assert_array_almost_equal(a[4,-1,:,:],aa) v = f.variables['var'] try: aa = v[4,-2,:,:] # -2 when dimension is length 1 except IndexError: pass else: raise IndexError('This test should have failed.') try: aa = v[4,...,...,:] # more than one Ellipsis except IndexError: pass else: raise IndexError('This test should have failed.') try: aa = v[:,[True,True],:,:] # boolean array too long. except IndexError: pass else: raise IndexError('This test should have failed.') try: aa = v[:,[0,1],:,:] # integer index too large except IndexError: pass else: raise IndexError('This test should have failed.') f.close() def test_issue300(self): f = Dataset(self.file,'w') nlats = 11; lat = f.createDimension('lat',nlats) nlons = 20; lon = f.createDimension('lon',nlons) time = f.createDimension('time',None) var = f.createVariable('var',np.float64,('time','lat','lon')) a = np.random.uniform(size=(3,nlats,nlons)) var[[True,True,False,False,False,True]] = a var[0,2.0,"-1"] = 0 # issue 312 a[0,2,-1]=0 f.close() f = Dataset(self.file) var = f.variables['var'] aa = var[[0,1,5]] bb = var[[True,True,False,False,False,True]] lats = np.arange(nlats); lons = np.arange(nlons) cc = var[-1,lats > 2,lons < 6] assert_array_almost_equal(a,aa) assert_array_almost_equal(bb,aa) assert_array_almost_equal(cc,a[-1,3:,:6]) f.close() def test_retain_single_dims(self): f = Dataset(self.file, 'r') v = f.variables['data'] keys = ((0, 1, 2, 3, 4, 5, 6, 7, 8), (5,), (4,)) shape = (9, 1, 1) data = v[keys] assert_equal(data.shape, shape) keys = ((0, 1, 2, 3, 4, 5, 6, 7, 8), 5, 4,) shape = (9,) data = v[keys] assert_equal(data.shape, shape) f.close() def test_issue743(self): nc = Dataset(self.file,'w',format='NETCDF3_CLASSIC') td = nc.createDimension('t',None) xd = nc.createDimension('x',33) yd = nc.createDimension('y',4) v = nc.createVariable('v',np.float64,('t','x','y')) nc.close() nc = Dataset(self.file) data = np.empty(nc['v'].shape, nc['v'].dtype) data2 = nc['v'][...] assert_array_equal(data,data2) nc.close() def test_issue906(self): f = Dataset(self.file,'w') f.createDimension('d1',3) f.createDimension('d2',None) f.createDimension('d3',5) f.createVariable('v2',np.float64,('d1','d2','d3')) f['v2'][:] = np.zeros((3,4,5)) f['v2'][0,:,0] = np.arange(4) f['v2'][0,:,:] = np.ones((4,5)) f.close() def test_issue919(self): with Dataset(self.file,'w') as f: f.createDimension('time',2) f.createDimension('lat',10) f.createDimension('lon',9) f.createVariable('v1',np.int64,('time', 'lon','lat',)) arr = np.arange(9*10).reshape((9, 10)) f['v1'][:] = arr assert_array_equal(f['v1'][:],np.broadcast_to(arr,f['v1'].shape)) arr = np.arange(10) f['v1'][:] = arr assert_array_equal(f['v1'][:],np.broadcast_to(arr,f['v1'].shape)) def test_issue922(self): with Dataset(self.file,'w') as f: f.createDimension('d1',3) f.createDimension('d2',None) f.createVariable('v1',np.int64,('d2','d1',)) f['v1'][0] = np.arange(3,dtype=np.int64) f['v1'][1:3] = np.arange(3,dtype=np.int64) assert_array_equal(f['v1'][:], np.broadcast_to(np.arange(3),(3,3))) f.createVariable('v2',np.int64,('d1','d2',)) f['v2'][:,0] = np.arange(3,dtype=np.int64) f['v2'][:,1:3] = np.arange(6,dtype=np.int64).reshape(3,2) assert_array_equal(f['v2'][:,1:3],np.arange(6,dtype=np.int64).reshape(3,2)) assert_array_equal(f['v2'][:,0],np.arange(3,dtype=np.int64)) def test_issue1083(self): with Dataset(self.file, "w") as nc: nc.createDimension("test", 5) v = nc.createVariable("var", "f8", ("test", "test", "test")) v[:] = 1 # works v[:] = np.ones(()) # works v[:] = np.ones((1,)) # works v[:] = np.ones((5,)) # works v[:] = np.ones((5,5,5)) # works v[:] = np.ones((5,1,1)) # fails (before PR #1084) v[:] = np.ones((5,1,5)) # fails (before PR #1084) v[:] = np.ones((5,5,1)) # fails (before PR #1084) if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_stringarr.py000066400000000000000000000104431512661643000215050ustar00rootroot00000000000000from netCDF4 import Dataset, stringtochar, chartostring import random, numpy, string import unittest import os from numpy.testing import assert_array_equal, assert_array_almost_equal import numpy as np def generateString(length, alphabet=string.ascii_letters + string.digits + string.punctuation): return(''.join([random.choice(alphabet) for i in range(length)])) # test conversion of arrays of fixed-length strings # to arrays of characters (with an extra dimension), and vice-versa. FILE_NAME = 'tst_stringarr.nc' FILE_FORMAT = 'NETCDF4_CLASSIC' n2 = 20; nchar = 12; nrecs = 4 data = numpy.empty((nrecs,n2),'S'+repr(nchar)) for nrec in range(nrecs): for n in range(n2): data[nrec,n] = generateString(nchar) datau = data.astype('U') datac = stringtochar(data, encoding='ascii') nx, n_strlen = 3, 12 unicode_strings = np.array(['Münster', 'Liége', '東京'],dtype='U'+str(n_strlen)) unicode_strings2 = np.array(['Münster', 'Москва', '東京'],dtype='U'+str(n_strlen)) unicode_strings2_bytes = [b'M', b'\xc3', b'\xbc', b'n', b's', b't', b'e', b'r', b'\xd0', b'\x9c', b'\xd0', b'\xbe', b'\xd1', b'\x81', b'\xd0', b'\xba', b'\xd0', b'\xb2', b'\xd0', b'\xb0', b'\xe6', b'\x9d', b'\xb1', b'\xe4', b'\xba', b'\xac'] class StringArrayTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME nc = Dataset(FILE_NAME,'w',format=FILE_FORMAT) # type: ignore # FILE_FORMAT nc.createDimension('n1',None) nc.createDimension('n2',n2) nc.createDimension('nchar',nchar) nc.createDimension("x", nx) nc.createDimension("nstr", n_strlen) v = nc.createVariable('strings','S1',('n1','n2','nchar')) v2 = nc.createVariable('strings2','S1',('n1','n2','nchar')) # if _Encoding set, string array should automatically be converted # to a char array and vice-versan v2._Encoding = 'ascii' v3 = nc.createVariable('strings3','S1',('n1','n2','nchar')) v3._Encoding = 'ascii' for nrec in range(nrecs): datac = stringtochar(data,encoding='ascii') v[nrec] = datac[nrec] v2[:-1] = data[:-1] v2[-1] = data[-1] v2[-1,-1] = data[-1,-1] # write single element v2[-1,-1] = data[-1,-1].tobytes() # write single python string # _Encoding should be ignored if an array of characters is specified v3[:] = stringtochar(data, encoding='ascii') # test unicode strings (issue #1440) v4 = nc.createVariable("strings4", "S1", dimensions=("x", "nstr",)) v4._Encoding = "UTF-8" v4[:] = unicode_strings v4[1] = "Москва" nc.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing functions for converting arrays of chars to fixed-len strings""" nc = Dataset(FILE_NAME) assert nc.dimensions['n1'].isunlimited() == True v = nc.variables['strings'] v2 = nc.variables['strings2'] v3 = nc.variables['strings3'] v4 = nc.variables['strings4'] assert np.all(v4[:]==unicode_strings2) v4.set_auto_chartostring(False) assert (v4[:].compressed().tolist() == unicode_strings2_bytes) assert v.dtype.str[1:] in ['S1','U1'] assert v.shape == (nrecs,n2,nchar) for nrec in range(nrecs): data2 = chartostring(v[nrec],encoding='ascii') assert_array_equal(data2,datau[nrec]) data2 = v2[:] data2[0] = v2[0] data2[0,1] = v2[0,1] assert_array_equal(data2,datau) data3 = v3[:] assert_array_equal(data3,datau) # these slices should return a char array, not a string array data4 = v2[:,:,0] assert data4.dtype.itemsize == 1 assert_array_equal(data4, datac[:,:,0]) data5 = v2[0,0:nchar,0] assert data5.dtype.itemsize == 1 assert_array_equal(data5, datac[0,0:nchar,0]) # test turning auto-conversion off. v2.set_auto_chartostring(False) data6 = v2[:] assert data6.dtype.itemsize == 1 assert_array_equal(data6, datac) nc.set_auto_chartostring(False) data7 = v3[:] assert data7.dtype.itemsize == 1 assert_array_equal(data7, datac) nc.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_types.py000066400000000000000000000103141512661643000206330ustar00rootroot00000000000000import sys from typing import TYPE_CHECKING, Any import unittest import os import tempfile import numpy as np from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.random.mtrand import uniform import netCDF4 if TYPE_CHECKING: from netCDF4 import CompressionLevel else: CompressionLevel = Any # test primitive data types. # create an n1dim by n2dim random ranarr. FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name n1dim = 5 n2dim = 10 ranarr = 100.*uniform(size=(n1dim,n2dim)) zlib=False; complevel=0; shuffle=False; least_significant_digit=None datatypes = ['f8','f4','i1','i2','i4','i8','u1','u2','u4','u8','S1'] FillValue = 1.0 issue273_data = np.ma.MaskedArray(['z']*10,dtype='S1',\ mask=[False,False,False,False,False,True,False,False,False,False]) class PrimitiveTypesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = netCDF4.Dataset(self.file,'w') f.createDimension('n1', None) f.createDimension('n2', n2dim) for typ in datatypes: foo = f.createVariable( f"data_{typ}", typ, ('n1','n2',), zlib=zlib, complevel=complevel, # type: ignore # type checkers bad at narrowing shuffle=shuffle, least_significant_digit=least_significant_digit, fill_value=FillValue, ) #foo._FillValue = FillValue # test writing of _FillValue attribute for diff types # (should be cast to type of variable silently) foo[1:n1dim] = ranarr[1:n1dim] v = f.createVariable('issue271', np.dtype('S1'), [], fill_value=b'Z') v2 = f.createVariable('issue273', np.dtype('S1'), 'n2',\ fill_value='\x00') v2[:] = issue273_data v3 = f.createVariable('issue707',np.int8,'n2') v3.setncattr('missing_value',255) v3[:]=-1 f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing primitive data type """ f = netCDF4.Dataset(self.file) for typ in datatypes: data = f.variables['data_'+typ] data.set_auto_maskandscale(False) datarr: np.ndarray = data[1:n1dim] # fill missing data with _FillValue # ('S1' array will have some missing values) if hasattr(datarr, 'mask'): assert isinstance(datarr, np.ma.masked_array) datarr = datarr.filled() datfilled = data[0] # check to see that data type is correct if typ == 'S1': self.assertTrue(data.dtype.str[1:] in ['S1','U1']) else: self.assertTrue(data.dtype.str[1:] == typ) # check data in variable. if data.dtype.str[1:] != 'S1': #assert np.allclose(datarr, ranarr[1:n1dim].astype(data.dtype)) assert_array_almost_equal(datarr,ranarr[1:n1dim].astype(data.dtype)) else: assert datarr.tobytes() == ranarr[1:n1dim].astype(data.dtype).tobytes() # check that variable elements not yet written are filled # with the specified _FillValue. assert_array_equal(datfilled,np.asarray(data._FillValue,datfilled.dtype)) # issue 271 (_FillValue should be a byte for character arrays on # Python 3) v = f.variables['issue271'] assert type(v._FillValue) == bytes assert v._FillValue == b'Z' # issue 273 (setting _FillValue to null byte manually) v2 = f.variables['issue273'] assert type(v2._FillValue) == bytes assert v2._FillValue == b'\x00' assert str(issue273_data) == str(v2[:]) # issue 707 (don't apply missing_value if cast to variable type is # unsafe) v3 = f.variables['issue707'] assert_array_equal(v3[:],-1*np.ones(n2dim,v3.dtype)) f.close() # issue #850 (masked scalar char variable) f = netCDF4.Dataset(self.file,'a') a = f.createVariable('a', 'c', ()) a[:] = np.ma.masked f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_unicode.py000066400000000000000000000022341512661643000211170ustar00rootroot00000000000000import netCDF4 import numpy as np import sys, unittest, os, tempfile FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name ATT1 = '\u03a0\u03a3\u03a9' ATT2 = 'x\xb0' ATT3 = ['\u03a0', '\u03a3', '\u03a9'] DIM_NAME = 'x\xb0' VAR_NAME = 'Andr\xe9' class UnicodeTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = netCDF4.Dataset(self.file,'w') f.attribute1 = ATT1 f.attribute2 = ATT2 f.attribute3 = ATT3 d = f.createDimension(DIM_NAME, None) v = f.createVariable(VAR_NAME, np.float64, (DIM_NAME,)) f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing unicode""" f = netCDF4.Dataset(self.file, 'r') d = f.dimensions[DIM_NAME] v = f.variables[VAR_NAME] # check accessing individual attributes. assert f.attribute1 == ATT1 assert f.attribute2 == ATT2 #assert f.attribute3 == ''.join(ATT3) # behavior changed issue 770 assert f.attribute3 == ATT3 f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_unicodeatt.py000066400000000000000000000025001512661643000216240ustar00rootroot00000000000000from netCDF4 import Dataset import sys, unittest, os, tempfile FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name class UnicodeAttTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME nc = Dataset(self.file,'w') # write as a utf-8 string nc.stratt = b'\xe6\xb7\xb1\xe5\x85\xa5 Python'.decode('utf-8') # write as raw bytes (decoded string is same as above with 'big5' encoding) nc.stratt2 = b'\xb2`\xa4J Python' # same as above, but attribute forced to be of type NC_STRING nc.setncattr_string('stratt3',b'\xb2`\xa4J Python') nc.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing unicode attributes""" nc = Dataset(self.file, 'r') assert nc.stratt.encode('utf-8') == b'\xe6\xb7\xb1\xe5\x85\xa5 Python' stratt2 = nc.getncattr('stratt2',encoding='big5') # decodes using big5 stratt3 = nc.getncattr('stratt3',encoding='big5') # same as above assert stratt2.encode('big5') == b'\xb2`\xa4J Python' assert nc.stratt == stratt2 # decoded strings are the same assert nc.stratt == stratt3 # decoded strings are the same nc.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_unlimdim.py000066400000000000000000000042421512661643000213100ustar00rootroot00000000000000import sys import unittest import os import tempfile import numpy as np from numpy.random.mtrand import uniform from numpy.testing import assert_array_equal, assert_array_almost_equal import netCDF4 # test creating variables with unlimited dimensions, # writing to and retrieving data from such variables. # create an n1dim by n2dim by n3dim random array n1dim = 4 n2dim = 10 n3dim = 8 ranarr = 100.*uniform(size=(n1dim,n2dim,n3dim)) FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name class UnlimdimTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = netCDF4.Dataset(self.file, 'w') # foo has a single unlimited dimension f.createDimension('n1', n1dim) f.createDimension('n2', None) f.createDimension('n3', n3dim) foo = f.createVariable('data1', ranarr.dtype.str[1:], ('n1','n2','n3')) # write some data to it. #foo[:,0:n2dim,:] = ranarr foo[:] = ranarr foo[:,n2dim:,:] = 2.*ranarr # bar has 2 unlimited dimensions f.createDimension('n4', None) f.createDimension('n5', n2dim) f.createDimension('n6', None) # write some data to it. bar = f.createVariable('data2', ranarr.dtype.str[1:], ('n4','n5','n6')) # bar[0:n1dim,:, 0:n3dim] = ranarr bar[0:n1dim,:, 0:n3dim] = 2.0 f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing unlimited dimensions""" f = netCDF4.Dataset(self.file, 'r') foo = f.variables['data1'] # check shape. self.assertTrue(foo.shape == (n1dim,2*n2dim,n3dim)) # check data. assert_array_almost_equal(foo[:,0:n2dim,:], ranarr) assert_array_almost_equal(foo[:,n2dim:2*n2dim,:], 2.*ranarr) bar = f.variables['data2'] # check shape. self.assertTrue(bar.shape == (n1dim,n2dim,n3dim)) # check data. #assert_array_almost_equal(bar[:,:,:], ranarr) assert_array_almost_equal(bar[:,:,:], 2.*np.ones((n1dim,n2dim,n3dim),ranarr.dtype)) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_utils.py000066400000000000000000000331401512661643000206310ustar00rootroot00000000000000from numpy.testing import assert_equal from netCDF4.utils import _StartCountStride, _out_array_shape import unittest import numpy as np class TestgetStartCountStride(unittest.TestCase): def test_basic(self): # Basic usage elem = [0, slice(None), slice(None)] start, count, stride, put_ind = _StartCountStride(elem, (50, 4, 10)) assert_equal(start, 0) assert_equal(count[..., 0], 1) assert_equal(count[..., 1], 4) assert_equal(count[..., 2], 10) assert_equal(stride, 1) assert_equal(put_ind[...,0], -1) assert_equal(put_ind[...,1], slice(None)) assert_equal(put_ind[...,2], slice(None)) assert_equal(_out_array_shape(count), (1, 4,10)) def test_slice(self): # start and stop slice elem = [5, slice(None), slice(5, 8, 2)] start, count, stride, put_ind = _StartCountStride(elem, (50, 4, 10)) assert_equal(start[..., 0], 5) assert_equal(start[..., 1], 0) assert_equal(start[..., 2], 5) assert_equal(count[..., 0], 1) assert_equal(count[..., 1], 4) assert_equal(count[..., 2], 2) assert_equal(stride[..., 2], 2) assert_equal(_out_array_shape(count), (1, 4,2)) def test_fancy(self): # Fancy indexing elem = [slice(None), [1,2,3], 8] start, count, stride, put_ind = _StartCountStride(elem, (50, 4, 10)) assert_equal(start[..., 0], 0) assert_equal(start[..., 1].squeeze(), 1) assert_equal(start[..., 2], 8) assert_equal(count[...,0], 50) assert_equal(count[...,1], 3) assert_equal(count[...,2], 1) assert_equal(put_ind[...,1].squeeze(), slice(None, None, None)) assert_equal(_out_array_shape(count), (50, 3, 1)) i = np.array([2,5,7],'i4') elem = [slice(None, -1,2),i,slice(None)] start, count, stride, put_ind = _StartCountStride(elem, (9,10,11)) try: elem2 = ( np.arange(6).reshape((3,2)), slice(None), slice(None) ) start, count, stride, put_ind = _StartCountStride(elem2, (3,4,5)) except IndexError: pass # this one should be converted to a slice elem3 = [slice(None), [1,3,5], 8] start, count, stride, put_ind = _StartCountStride(elem3, (50, 6, 10)) # pull request #683 now does not convert integer sequences to strided # slices. PR #1224 reverts this behavior. assert_equal(put_ind[...,1].squeeze(), slice(None,None,None)) #assert_equal(put_ind[...,1].squeeze(), [0,1,2]) def test_multiple_sequences(self): elem = [[4,6,7], [1,2,3], slice(None)] start, count, stride, put_ind = _StartCountStride(elem, (50, 4, 10)) assert_equal(_out_array_shape(count), (3, 3, 10)) assert_equal(start[..., 0].squeeze(), [4,6,7]) assert_equal(start[..., 1].squeeze(), [1,1,1]) assert_equal(start[..., 2], 0) assert_equal(count[...,0], 1) assert_equal(count[...,1], 3) assert_equal(count[...,2], 10) i = [1,3,4] elem2 = (i, i, i) start, count, stride, put_ind = _StartCountStride(elem2, (50, 5, 10)) assert_equal(_out_array_shape(count), (3,3,3)) def test_put_indices(self): elem = (1, slice(None), slice(None)) start, count, stride, put_ind = _StartCountStride(elem, (3,4,5)) orig = np.arange(60).reshape((3,4,5)) dest = np.empty(_out_array_shape(count)) dest[tuple(put_ind[0,0,0])] = orig[tuple(elem)] def test_boolean(self): elem = (1, slice(None), np.array([True, True, False, False, True])) start, count, stride, put_ind = _StartCountStride(elem, (50, 4,5)) assert_equal(start[..., 2].squeeze(), [0,1,4]) assert_equal(count[...,2], 1) assert_equal(_out_array_shape(count), (1, 4, 3)) # Multiple booleans --- The behavior is different from NumPy in this case. elem = (np.array([True, True, False]), np.array([True, True, False, True]), slice(None)) start, count, stride, put_ind = _StartCountStride(elem, (3,4,5)) assert_equal(_out_array_shape(count), (2,3,5)) try: elem = (np.array([True, True, False]), np.array([True, True, True, False]), slice(None)) except IndexError: pass def test_1d(self): # Scalar elem = (0) start, count, stride, put_ind = _StartCountStride(elem, (10,)) assert_equal(start, 0) assert_equal(count, 1) assert_equal(stride, 1) assert_equal(put_ind, -1) elem = (-1) start, count, stride, put_ind = _StartCountStride(elem, (10,)) assert_equal(start, 9) assert_equal(count, 1) assert_equal(stride, 1) assert_equal(put_ind, -1) # test conversion of a integer index array to a slice elem = (np.array([0,1])) start, count, stride, put_ind = _StartCountStride(elem, (10,)) assert_equal(start, 0) assert_equal(count, 2) assert_equal(stride, 1) assert_equal(put_ind[:,0], slice(None,None,None)) # Slice elem = (slice(2,5,2)) start, count, stride, put_ind = _StartCountStride(elem, (10,)) assert_equal(start, 2) assert_equal(count, 2) assert_equal(stride, 2) assert_equal(put_ind, slice(None)) # Integer sequence elem = ([2,4,7]) start, count, stride, put_ind = _StartCountStride(elem, (10,)) assert_equal(start.squeeze(), [2,4,7]) assert_equal(count, 1) assert_equal(stride, 1) assert_equal(put_ind[:,0], [0,1,2]) # Boolean slicing elem = (np.array([True, True, False, True, False]),) start, count, stride, put_ind = _StartCountStride(elem, (5,)) assert_equal(start.squeeze(), [0,1,3]) assert_equal(count, 1) assert_equal(stride, 1) assert_equal(put_ind[:,0], [0,1,2]) # Integer sequence simplification elem = ([2,3,4]) start, count, stride, put_ind = _StartCountStride(elem, (10,)) assert_equal(start, 2) assert_equal(count, 3) assert_equal(stride, 1) assert_equal(put_ind, slice(None)) # Boolean indices simplification elem = (np.array([False, True, True, True, False])) start, count, stride, put_ind = _StartCountStride(elem, (5,)) assert_equal(start, 1) assert_equal(count, 3) assert_equal(stride, 1) assert_equal(put_ind, slice(None)) # All False elem = (np.array([False, False, False, False])) start, count, stride, put_ind = _StartCountStride(elem, (4,)) assert_equal(count, 0) assert_equal(_out_array_shape(count), (0,)) def test_ellipsis(self): elem=(Ellipsis, slice(1, 4)) start, count, stride, put_ind = _StartCountStride(elem, (22,25,4)) assert_equal(start[0,0,0], [0, 0, 1]) assert_equal(count[0,0,0], (22, 25, 3)) assert_equal(put_ind[0,0,0], (slice(None), slice(None), slice(None))) elem=(Ellipsis, [15,16,17,18,19], slice(None), slice(None)) start, count, stride, put_ind = _StartCountStride(elem, (2,10,20,10,10)) assert_equal(start[0,0,0,0,0], [0, 0, 15, 0, 0]) assert_equal(count[0,0,0,0,0], (2, 10, 5, 10, 10)) assert_equal(put_ind[0,0,0,0,0], (slice(None), slice(None), slice(None), slice(None), slice(None))) try: elem2=(Ellipsis, [15,16,17,18,19], slice(None)) start, count, stride, put_ind = _StartCountStride(elem2, (2,10,20,10,10)) assert_equal(None, 'Should throw an exception') except IndexError as e: assert_equal(str(e), "integer index exceeds dimension size") try: elem3=(Ellipsis, [15,16,17,18,19], Ellipsis) start, count, stride, put_ind = _StartCountStride(elem3, (2,10, 20,10,10)) assert_equal(None, 'Should throw an exception') except IndexError as e: assert_equal(str(e), "At most one ellipsis allowed in a slicing expression") class TestsetStartCountStride(unittest.TestCase): def test_basic(self): grp = FakeGroup({'x':False, 'y':False, 'time':True}) elem=(slice(None), slice(None), 1) start, count, stride, take_ind = _StartCountStride(elem, (22, 25, 1), ['x', 'y', 'time'], grp, (22,25), put=True) assert_equal(start[0][0][0], [0, 0, 1]) assert_equal(count[0][0][0], (22, 25, 1)) assert_equal(take_ind[0][0][0], (slice(None), slice(None), -1)) elem=(slice(None), slice(None), slice(1, 4)) start, count, stride, take_ind = _StartCountStride(elem, (22,25,1),\ ['x', 'y', 'time'], grp, (22,25,3), put=True) assert_equal(start[0][0][0], [0, 0, 1]) assert_equal(count[0][0][0], (22, 25, 3)) assert_equal(take_ind[0][0][0], (slice(None), slice(None), slice(None))) def test_integer(self): grp = FakeGroup({'x':False, 'y':False}) elem=([0,4,5], slice(20, None)) start, count, stride, take_ind = _StartCountStride(elem, (22, 25), ['x', 'y'], grp, (3,5), put=True) assert_equal(start[0][0], (0, 20)) assert_equal(start[1][0], (4, 20)) assert_equal(start[2][0], (5, 20)) assert_equal(count[0], np.array([[1,5],])) assert_equal(stride[0][0], (1, 1)) assert_equal(take_ind[0][0], (0, slice(None))) assert_equal(take_ind[1][0], (1, slice(None))) assert_equal(take_ind[2][0], (2, slice(None))) def test_booleans(self): grp = FakeGroup({'x':False, 'y':False, 'z':False}) elem=([0,4,5], np.array([False, True, False, True, True]), slice(None)) start, count, stride, take_ind = _StartCountStride(elem, (10, 5, 12), ['x', 'y', 'z'], grp, (3, 3, 12), put=True) assert_equal(start[0][0][0], (0, 1, 0)) assert_equal(start[1][0][0], (4, 1, 0)) assert_equal(start[2][0][0], (5, 1, 0)) assert_equal(start[0][1][0], (0, 3, 0)) assert_equal(count[0][0][0], (1, 1, 12)) assert_equal(stride[0][0][0], (1, 1, 1)) assert_equal(take_ind[0][0][0], (0, 0, slice(None))) assert_equal(take_ind[1][0][0], (1, 0, slice(None))) assert_equal(take_ind[0][1][0], (0, 1, slice(None))) def test_unlim(self): grp = FakeGroup({'time':True,'x':False, 'y':False}) elem = ([0,2,5], slice(None), slice(None)) start, count, stride, take_ind = _StartCountStride(elem, (0, 6, 7),\ ['time', 'x', 'y'], grp, (3, 6, 7), put=True) assert_equal(start[0][0][0], (0, 0, 0)) assert_equal(start[2][0][0], (5, 0, 0)) assert_equal(count[2][0][0], (1, 6, 7)) assert_equal(take_ind[0][0][0], (0, slice(None), slice(None))) assert_equal(take_ind[2][0][0], (2, slice(None), slice(None))) # pull request #683 broke this, since _StartCountStride now uses # Dimension.__len__. #elem = (slice(None, None, 2), slice(None), slice(None)) #start, count, stride, take_ind = _StartCountStride(elem, (0, 6, 7),\ # ['time', 'x', 'y'], grp, (10, 6, 7),put=True) #assert_equal(start[0][0][0], (0,0,0)) #assert_equal(count[0][0][0], (5, 6, 7)) #assert_equal(stride[0][0][0], (2, 1, 1)) #assert_equal(take_ind[0][0][0], 3*(slice(None),)) def test_ellipsis(self): grp = FakeGroup({'x':False, 'y':False, 'time':True}) elem=(Ellipsis, slice(1, 4)) start, count, stride, take_ind = _StartCountStride(elem, (22,25,1),\ ['x', 'y', 'time'], grp, (22,25,3), put=True) assert_equal(start[0,0,0], [0, 0, 1]) assert_equal(count[0,0,0], (22, 25, 3)) assert_equal(take_ind[0,0,0], (slice(None), slice(None), slice(None))) grp = FakeGroup({'time':True, 'h':False, 'z':False, 'y':False, 'x':False}) elem=(Ellipsis, [15,16,17,18,19], slice(None), slice(None)) start, count, stride, take_ind = _StartCountStride(elem, (2,10,20,10,10),\ ['time', 'h', 'z', 'y', 'x'], grp, (2,10,5,10,10), put=True) assert_equal(start[0,0,0,0,0], [0, 0, 15, 0, 0]) assert_equal(count[0,0,0,0,0], [2, 10, 5, 10, 10]) assert_equal(stride[0,0,0,0,0], [1, 1, 1, 1, 1]) assert_equal(take_ind[0,0,0,0,0], (slice(None), slice(None), slice(None), slice(None), slice(None))) try: elem2=(Ellipsis, [15,16,17,18,19], slice(None)) start, count, stride, take_ind = _StartCountStride(elem2, (2,10,20,10,10),\ ['time', 'z', 'y', 'x'], grp, (2,10,5,10,10), put=True) assert_equal(None, 'Should throw an exception') except IndexError as e: #assert_equal(str(e), "integer index exceeds dimension size") assert_equal(str(e), "list index out of range") try: elem3=(Ellipsis, [15,16,17,18,19], Ellipsis) start, count, stride, take_ind = _StartCountStride(elem3, (2,10, 20,10,10),\ ['time', 'z', 'y', 'x'], grp, (2,10,5,10,10), put=True) assert_equal(None, 'Should throw an exception') except IndexError as e: #assert_equal(str(e), "At most one ellipsis allowed in a slicing expression") assert_equal(str(e), "list index out of range") class FakeGroup: """Create a fake group instance by passing a dictionary of booleans keyed by dimension name.""" def __init__(self, dimensions): self.dimensions = {} for k,v in dimensions.items(): self.dimensions[k] = FakeDimension(v) class FakeDimension: def __init__(self, unlimited=False): self.unlimited = unlimited def isunlimited(self): return self.unlimited if __name__=='__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_vars.py000066400000000000000000000064621512661643000204530ustar00rootroot00000000000000import sys import unittest import os import tempfile import numpy as np from numpy.random.mtrand import uniform from numpy.testing import assert_array_equal, assert_array_almost_equal import netCDF4 # test variable creation. FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VAR_DOUBLE_NAME="dummy_var" VAR_SHORT_NAME='dummy_var_short' VARNAMES = sorted([VAR_DOUBLE_NAME,VAR_SHORT_NAME]) GROUP_NAME = "dummy_group" DIM1_NAME="x" DIM1_LEN=2 DIM2_NAME="y" DIM2_LEN=3 DIM3_NAME="z" DIM3_LEN=25 randomdata = uniform(size=(DIM1_LEN,DIM2_LEN,DIM3_LEN)) class VariablesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = netCDF4.Dataset(self.file, 'w') f.createDimension(DIM1_NAME, DIM1_LEN) f.createDimension(DIM2_NAME, DIM2_LEN) f.createDimension(DIM3_NAME, DIM3_LEN) v1 = f.createVariable(VAR_DOUBLE_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME)) v2 = f.createVariable(VAR_SHORT_NAME, 'i2',(DIM2_NAME,DIM3_NAME)) v1.long_name = 'dummy data root' g = f.createGroup(GROUP_NAME) g.createDimension(DIM1_NAME, DIM1_LEN) g.createDimension(DIM2_NAME, DIM2_LEN) g.createDimension(DIM3_NAME, DIM3_LEN) v1g = g.createVariable(VAR_DOUBLE_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME)) v2g = g.createVariable(VAR_SHORT_NAME, 'i2',(DIM2_NAME,DIM3_NAME)) v1g.long_name = 'dummy data subgroup' v1[:] = randomdata v1g[:] = randomdata f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing primitive variables""" f = netCDF4.Dataset(self.file, 'r') # check variables in root group. varnames = sorted(f.variables.keys()) v1 = f.variables[VAR_DOUBLE_NAME] v2 = f.variables[VAR_SHORT_NAME] assert varnames == VARNAMES assert v1.dtype.str[1:] == 'f8' assert v2.dtype.str[1:] == 'i2' assert v1.long_name == 'dummy data root' assert v1.dimensions == (DIM1_NAME,DIM2_NAME,DIM3_NAME) assert v2.dimensions == (DIM2_NAME,DIM3_NAME) assert v1.shape == (DIM1_LEN,DIM2_LEN,DIM3_LEN) assert v2.shape == (DIM2_LEN,DIM3_LEN) assert v1.size == DIM1_LEN * DIM2_LEN * DIM3_LEN assert len(v1) == DIM1_LEN #assert np.allclose(v1[:],randomdata) assert_array_almost_equal(v1[:],randomdata) # check variables in sub group. g = f.groups[GROUP_NAME] varnames = sorted(g.variables.keys()) v1 = g.variables[VAR_DOUBLE_NAME] # test iterating over variable (should stop when # it gets to the end and raises IndexError, issue 121) for v in v1: pass v2 = g.variables[VAR_SHORT_NAME] assert varnames == VARNAMES assert v1.dtype.str[1:] == 'f8' assert v2.dtype.str[1:] == 'i2' assert v1.long_name == 'dummy data subgroup' assert v1.dimensions == (DIM1_NAME,DIM2_NAME,DIM3_NAME) assert v2.dimensions == (DIM2_NAME,DIM3_NAME) assert v1.shape == (DIM1_LEN,DIM2_LEN,DIM3_LEN) assert v2.shape == (DIM2_LEN,DIM3_LEN) #assert np.allclose(v1[:],randomdata) assert_array_almost_equal(v1[:],randomdata) f.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/test_vlen.py000066400000000000000000000177211512661643000204440ustar00rootroot00000000000000import sys import unittest import os import tempfile from netCDF4 import Dataset import numpy as np from numpy.testing import assert_array_equal FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name VL_NAME = 'vlen_type' VL_BASETYPE = np.int16 DIM1_NAME = 'lon' DIM2_NAME = 'lat' nlons = 5; nlats = 5 VAR1_NAME = 'ragged' VAR2_NAME = 'strings' VAR3_NAME = 'strings_alt' VAR4_NAME = 'string_scalar' VAR5_NAME = 'vlen_scalar' data = np.empty(nlats*nlons,object) datas = np.empty(nlats*nlons,object) nn = 0 for n in range(nlats*nlons): nn = nn + 1 data[n] = np.arange(nn,dtype=VL_BASETYPE) datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)]) data = np.reshape(data,(nlats,nlons)) datas = np.reshape(datas,(nlats,nlons)) class VariablesTestCase(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) ragged = f.createVariable(VAR1_NAME, vlen_t,\ (DIM2_NAME,DIM1_NAME)) strings = f.createVariable(VAR2_NAME, str, (DIM2_NAME,DIM1_NAME)) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) string_scalar = f.createVariable(VAR4_NAME,str,()) vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,()) ragged[:] = data ragged[-1,-1] = data[-1,-1] strings[:] = datas strings[-2,-2] = datas[-2,-2] strings_alt[:] = datas.astype(str) string_scalar[...] = 'foo' #issue458 vlen_scalar[...] = np.array([1,2,3],np.int16) f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing vlen variables""" f = Dataset(self.file, 'r') v = f.variables[VAR1_NAME] vs = f.variables[VAR2_NAME] vs_alt = f.variables[VAR3_NAME] assert list(f.vltypes.keys()) == [VL_NAME] assert f.vltypes[VL_NAME].dtype == VL_BASETYPE assert f.variables['string_scalar'][...] == 'foo' assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16)) data2 = v[:] data2s = vs[:] # issue #1306 assert repr(vs[[0,2,3],0]) == "array(['ab', 'abcdefghijkl', 'abcdefghijklmnopq'], dtype=object)" for i in range(nlons): for j in range(nlats): assert_array_equal(data2[j,i], data[j,i]) assert datas[j,i] == data2s[j,i] assert_array_equal(datas, vs_alt[:]) # issue #1408 data2a = data2[::2,::2] data2b = v[::2,::2] data2sa = data2s[::2,::2] data2sb = vs[::2,::2] for i in range(nlons//2): for j in range(nlats//2): assert_array_equal(data2a[j,i], data2b[j,i]) assert_array_equal(data2sa[j,i], data2sb[j,i]) f.close() class TestInvalidDataType(unittest.TestCase): def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC') f.createDimension('x', 1) # using assertRaisesRegext as a context manager # only works with python >= 2.7 (issue #497) #with self.assertRaisesRegexp(ValueError, 'strings are only supported'): # f.createVariable('foo', str, ('x',)) try: f.createVariable('foo', str, ('x',)) except ValueError: pass f.close() os.remove(FILE_NAME) class TestScalarVlenString(unittest.TestCase): # issue 333 def runTest(self): f = Dataset(FILE_NAME, 'w', format='NETCDF4') teststring = f.createVariable('teststring', str) stringout = "yyyymmdd_hhmmss" teststring[()] = stringout f.close() f = Dataset(FILE_NAME) assert f.variables['teststring'][:] == stringout f.close() os.remove(FILE_NAME) class TestIntegerIndex(unittest.TestCase): # issue 526 def runTest(self): strtest = Dataset(FILE_NAME, 'w', format='NETCDF4') strtest.createDimension('tenstrings', 10) strtest.createVariable('tenstrings', str, ['tenstrings']) strtest['tenstrings'][np.int32(5)] = 'asdf' strtest['tenstrings'][6.0] = 'asdf' strtest.close() f = Dataset(FILE_NAME) assert f.variables['tenstrings'][np.int32(5)] == 'asdf' assert f.variables['tenstrings'][6.0] == 'asdf' f.close() os.remove(FILE_NAME) class TestObjectArrayIndexing(unittest.TestCase): def setUp(self): self.file = FILE_NAME f = Dataset(self.file,'w') vlen_t = f.createVLType(VL_BASETYPE, VL_NAME) f.createDimension(DIM1_NAME,nlons) f.createDimension(DIM2_NAME,nlats) strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype, (DIM2_NAME, DIM1_NAME)) strings_alt[:] = datas.astype(str) f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing vlen variables""" f = Dataset(self.file, 'r') vs_alt = f.variables[VAR3_NAME] unicode_strings = vs_alt[:] fancy_indexed = unicode_strings[0][[1,2,4]] assert fancy_indexed[0] == 'abc' assert fancy_indexed[1] == 'abcd' assert fancy_indexed[2] == 'abcdef' f.close() class VlenAppendTestCase(unittest.TestCase): def setUp(self): import netCDF4 if netCDF4.__netcdf4libversion__ < "4.4.1": self.skip = True try: self.skipTest("This test requires NetCDF 4.4.1 or later.") except AttributeError: # workaround for Python 2.6 (skipTest(reason) is new # in Python 2.7) pass else: self.skip = False self.file = FILE_NAME f = Dataset(self.file, 'w') vlen_type = f.createVLType(np.float64, 'vltest') f.createDimension('x', None) v = f.createVariable('vl', vlen_type, 'x') w = f.createVariable('vl2', np.float64, 'x') f.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing appending to vlen variables (issue #527).""" # workaround for Python 2.6 if self.skip: return f = Dataset(self.file, 'a') w = f.variables["vl2"] v = f.variables["vl"] w[0:3] = np.arange(3, dtype=np.float64) v[0] # sometimes crashes v[0].tolist() # sometimes crashes v[0].size # BOOM! f.close() class Vlen_ScaledInts(unittest.TestCase): def setUp(self): self.file = FILE_NAME nc = Dataset(self.file, 'w') vlen_type = nc.createVLType(np.uint8, 'vltest') nc.createDimension('x', None) v = nc.createVariable('vl', vlen_type, 'x') v.scale_factor = 1./254. v.missing_value=np.array(255,np.uint8) # random lengths between 1 and 1000 ilen = np.random.randint(1,1000,size=100) n = 0 for nlen in ilen: data = np.random.uniform(low=0.0, high=1.0, size=nlen) v[n] = data if n==99: self.data = data n += 1 nc.close() def tearDown(self): # Remove the temporary files os.remove(self.file) def runTest(self): """testing packing float vlens as scaled integers (issue #1003).""" nc = Dataset(self.file) data = nc['vl'][-1] # check max error of compression err = np.abs(data - self.data) assert err.max() < nc['vl'].scale_factor # turn off auto-scaling nc.set_auto_maskandscale(False) data = nc['vl'][-1] assert data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor) nc.close() if __name__ == '__main__': unittest.main() netcdf4-python-1.7.4rel/test/ubyte.nc000066400000000000000000000003401512661643000175260ustar00rootroot00000000000000CDF d ub  _Unsignedtruesbsb2  _Unsignedfalse