...
 
Commits (170)
environment:
CONDA_INSTALL_LOCN: C:\\Miniconda36-x64
matrix:
- TARGET_ARCH: x64
NPY: 1.16
PY: 3.6
- TARGET_ARCH: x64
NPY: 1.16
PY: 3.7
platform:
- x64
install:
# If there is a newer build queued for the same PR, cancel this one.
# The AppVeyor 'rollout builds' option is supposed to serve the same
# purpose but it is problematic because it tends to cancel builds pushed
# directly to master instead of just PR builds (or the converse).
# credits: JuliaLang developers.
- ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod `
https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | `
Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { `
throw "There are newer queued builds for this pull request, failing early." }
# Add path, activate `conda` and update conda.
- cmd: call %CONDA_INSTALL_LOCN%\Scripts\activate.bat
- cmd: conda config --set always_yes yes --set changeps1 no --set show_channel_urls true
- cmd: conda update conda
- cmd: conda config --add channels conda-forge --force
- cmd: conda config --set channel_priority strict
- cmd: set PYTHONUNBUFFERED=1
- cmd: conda install conda-build vs2008_express_vc_python_patch
- cmd: call setup_x64
- cmd: conda create --name TEST python=%PY% numpy=%NPY% cython pip pytest hdf5 libnetcdf cftime
- cmd: conda info --all
- cmd: conda activate TEST
- cmd: echo [options] > setup.cfg
- cmd: echo [directories] >> setup.cfg
- cmd: echo HDF5_libdir = %CONDA_PREFIX%\Library\lib >> setup.cfg
- cmd: echo HDF5_incdir = %CONDA_PREFIX%\Library\include >> setup.cfg
- cmd: echo netCDF4_libdir = %CONDA_PREFIX%\Library\lib >> setup.cfg
- cmd: echo netCDF4_incdir = %CONDA_PREFIX%\Library\include >> setup.cfg
# Skip .NET project specific build phase.
build: off
test_script:
- python -m pip install . --no-deps --ignore-installed --no-cache-dir -vvv
- set NO_NET=1
- cd test && python run_all.py
language: python
dist: bionic
cache: pip
addons:
apt:
packages:
- libhdf5-serial-dev
- netcdf-bin
- libnetcdf-dev
env:
global:
- DEPENDS="numpy cython"
- NO_NET=1
global:
- DEPENDS="numpy>=1.10.0 cython>=0.21 setuptools>=18.0 cftime"
- NO_NET=1
- MPI=0
python:
- "2.7"
- "3.3"
- "3.4"
- "3.5"
- "3.6"
- "3.7"
- "3.8-dev"
matrix:
allow_failures:
- python: "3.8-dev"
- python: 3.7
env:
- MPI=1
- CC=mpicc.mpich
- DEPENDS="numpy>=1.10.0 cython>=0.21 setuptools>=18.0 mpi4py>=1.3.1 cftime"
- NETCDF_VERSION=GITMASTER
- NETCDF_DIR=$HOME
- PATH=${NETCDF_DIR}/bin:${PATH} # pick up nc-config here
include:
# Absolute minimum dependencies
# Absolute minimum dependencies.
- python: 2.7
env:
- DEPENDS="numpy==1.7.0 cython==0.19"
# test without Cython installed
- python: 2.7
- DEPENDS="numpy==1.10.0 cython==0.21 ordereddict==1.1 setuptools==18.0 cftime"
# test MPI with latest released version
- python: 3.7
env:
- MPI=1
- CC=mpicc.mpich
- DEPENDS="numpy>=1.10.0 cython>=0.21 setuptools>=18.0 mpi4py>=1.3.1 cftime"
- NETCDF_VERSION=4.6.3
- NETCDF_DIR=$HOME
- PATH=${NETCDF_DIR}/bin:${PATH} # pick up nc-config here
addons:
apt:
packages:
- mpich
- libmpich-dev
- libhdf5-mpich-dev
# test MPI with latest released version
- python: 3.7
env:
- MPI=1
- CC=mpicc.mpich
- DEPENDS="numpy>=1.10.0 cython>=0.21 setuptools>=18.0 mpi4py>=1.3.1 cftime"
- NETCDF_VERSION=4.6.3
- PNETCDF_VERSION=1.11.0
- NETCDF_DIR=$HOME
- PATH=${NETCDF_DIR}/bin:${PATH} # pick up nc-config here
addons:
apt:
packages:
- mpich
- libmpich-dev
- libhdf5-mpich-dev
# test with netcdf-c from github master
- python: 3.7
env:
- DEPENDS="numpy"
- MPI=1
- CC=mpicc.mpich
- DEPENDS="numpy>=1.10.0 cython>=0.21 setuptools>=18.0 mpi4py>=1.3.1 cftime"
- NETCDF_VERSION=GITMASTER
- NETCDF_DIR=$HOME
- PATH=${NETCDF_DIR}/bin:${PATH} # pick up nc-config here
addons:
apt:
packages:
- mpich
- libmpich-dev
- libhdf5-mpich-dev
notifications:
email: false
before_install:
- sudo apt-get install libhdf5-serial-dev netcdf-bin libnetcdf-dev
- pip install Cython # workaround for pip bug
- pip install $DEPENDS
install:
- if [ $MPI -eq 1 ] ; then ci/travis/build-parallel-netcdf.sh; fi
- python setup.py build
- python setup.py install
script:
- cd test
- python run_all.py
- |
echo "MPI = ${MPI}"
echo "PNETCDF_VERSION = ${PNETCDF_VERSION}"
if [ $MPI -eq 1 ] ; then
cd ../examples
mpirun.mpich -np 4 python mpi_example.py
if [ $? -ne 0 ] ; then
echo "mpi test failed!"
exit 1
else
echo "mpi test passed!"
fi
if [ -n "${PNETCDF_VERSION}" ] ; then
mpirun.mpich -np 4 python mpi_example.py NETCDF3_64BIT_DATA
if [ $? -ne 0 ] ; then
echo "PnetCDF mpi test failed!"
exit 1
else
echo "PnetCDF mpi test passed!"
fi
fi
fi
......@@ -15,18 +15,6 @@ OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
parts of pyiso8601 are included in netcdftime under the following license:
Copyright (c) 2007 Michael Twomey
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
......
This diff is collapsed.
recursive-include docs *
recursive-include man *
recursive-include include *
include MANIFEST.in
include README.md
include COPYING
include Changelog
include appveyor.yml
include .travis.yml
include setup.cfg
include setup.cfg.template
include examples/*py
include examples/*ipynb
include examples/README.md
include test/*py
include utils/nc3tonc4
include utils/nc4tonc3
include utils/ncinfo
include netcdftime/__init__.py
include netcdftime/_datetime.pyx
include netcdftime/netcdftime.py
include netcdftime/_datetime.c
include test/*nc
include netCDF4/__init__.py
include netCDF4/_netCDF4.pyx
include netCDF4/utils.py
include netCDF4/_netCDF4.c
include include/netCDF4.pxi
include include/mpi-compat.h
include include/membuf.pyx
Metadata-Version: 1.0
Name: netCDF4
Version: 0.5
Summary: netCDF version 4 has many features not found in earlier versions of the library, such as hierarchical groups, zlib compression, multiple unlimited dimensions, and new data types. It is implemented on top of HDF5. This module implements many of the new features, and can read netCDF files created with older versions of the library. The API is modelled after Scientific.IO.NetCDF, and should be familiar to users of that module
Home-page: http://www.cdc.noaa.gov/people/jeffrey.s.whitaker/python/netCDF4.html
Author: Jeff Whitaker
Author-email: jeffrey.s.whitaker@noaa.gov
License: ['OSI Approved']
Download-URL: http://www.cdc.noaa.gov/people/jeffrey.s.whitaker/python/netCDF4-0.4.4.tar.gz
Description: UNKNOWN
Platform: any
Classifier: Development Status :: 3 - Alpha
Classifier: Intended Audience :: Science/Research
Classifier: License :: OSI Approved
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: System :: Archiving :: Compression
Classifier: Operating System :: OS Independent
To update web docs at http://github.unidata.io/netcdf4-python:
First install fork of pdoc from https://github.com/jswhit/pdoc (requires mako,
markdown and pygments).
markdown, pygments and future).
Then in netcdf4-python github clone directory (after building and
installing github master),
......
This diff is collapsed.
* create a release branch ('vX.Y.Zrel'). In the release branch...
* make sure version number in PKG-INFO, setup.py and netCDF4/_netCDF4.pyx are up to date
(in _netCDF4.pyx, change 'Version' in first line of docstring at top of file,
and __version__ variable).
* update Changelog and README.md as needed.
* commit and push all of the above changes.
* install the module (python setup.py install), then run 'sh create_docs.sh'
to update html docs. Commit and push the update to docs/netCDF4/index.html.
* create a pull request for the release branch.
* After release branch has been merged, tag a release
git tag -a vX.Y.Zrel -m "version X.Y.Z release"
git push origin --tags
* push an empty commit to the netcdf4-python-wheels repo to trigger new builds.
(e.g. git commit --allow-empty -m "Trigger build")
You will likely want to edit the .travis.yml file at
https://github.com/MacPython/netcdf4-python-wheels to specify the BUILD_COMMIT before triggering a build.
* update the pypi entry, upload the wheels from wheels.scipy.org.
Lastly, create a source tarball using
'python setup.py sdist' and upload to pypi.
* update web docs by copying docs/netCDF4/index.html somewhere, switch
to the gh-pages branch, copy the index.html file back, commit and push
the updated index.html file (see README.gh-pages).
import netCDF4
import sys
import netCDF4, sys, numpy
sys.stdout.write('netcdf4-python version: %s\n'%netCDF4.__version__)
sys.stdout.write('HDF5 lib version: %s\n'%netCDF4.__hdf5libversion__)
sys.stdout.write('netcdf lib version: %s\n'%netCDF4.__netcdf4libversion__)
sys.stdout.write('numpy version %s\n' % numpy.__version__)
#!/bin/bash
set -e
pushd /tmp
if [ -n "${PNETCDF_VERSION}" ]; then
echo "Using downloaded PnetCDF version ${PNETCDF_VERSION}"
wget https://parallel-netcdf.github.io/Release/pnetcdf-${PNETCDF_VERSION}.tar.gz
tar -xzf pnetcdf-${PNETCDF_VERSION}.tar.gz
pushd pnetcdf-${PNETCDF_VERSION}
./configure --prefix $NETCDF_DIR --enable-shared --disable-fortran --disable-cxx
NETCDF_EXTRA_CONFIG="--enable-pnetcdf"
make -j 2
make install
popd
fi
echo "Using downloaded netCDF version ${NETCDF_VERSION} with parallel capabilities enabled"
if [ ${NETCDF_VERSION} == "GITMASTER" ]; then
git clone http://github.com/Unidata/netcdf-c netcdf-c
pushd netcdf-c
autoreconf -i
else
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-c-${NETCDF_VERSION}.tar.gz
tar -xzf netcdf-c-${NETCDF_VERSION}.tar.gz
pushd netcdf-c-${NETCDF_VERSION}
fi
# for Ubuntu xenial
export CPPFLAGS="-I/usr/include/hdf5/mpich -I${NETCDF_DIR}/include"
export LDFLAGS="-L${NETCDF_DIR}/lib"
export LIBS="-lhdf5_mpich_hl -lhdf5_mpich -lm -lz"
./configure --prefix $NETCDF_DIR --enable-netcdf-4 --enable-shared --disable-dap --enable-parallel4 $NETCDF_EXTRA_CONFIG
make -j 2
make install
popd
netcdf4-python (1.5.3-2) UNRELEASED; urgency=medium
* Drop Provides: ${python3:Provides}.
* Drop Name field from upstream metadata.
-- Bas Couwenberg <sebastic@debian.org> Thu, 07 Nov 2019 18:39:23 +0100
netcdf4-python (1.5.3-1) unstable; urgency=medium
* New upstream release.
* Bump Standards-Version to 4.4.1, no changes.
-- Bas Couwenberg <sebastic@debian.org> Wed, 23 Oct 2019 09:21:45 +0200
netcdf4-python (1.5.2-1) unstable; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Wed, 04 Sep 2019 05:56:15 +0200
netcdf4-python (1.5.1.2-4) unstable; urgency=medium
* Drop Python 2 support.
-- Bas Couwenberg <sebastic@debian.org> Tue, 20 Aug 2019 21:05:08 +0200
netcdf4-python (1.5.1.2-3) unstable; urgency=medium
* Drop versioned Breaks on python-xarray, see: #932509.
-- Bas Couwenberg <sebastic@debian.org> Sat, 20 Jul 2019 20:47:54 +0200
netcdf4-python (1.5.1.2-2) unstable; urgency=medium
* Bump Standards-Version to 4.4.0, no changes.
* Add versioned Breaks on python-xarray, see: #931971.
-- Bas Couwenberg <sebastic@debian.org> Sat, 13 Jul 2019 12:42:37 +0200
netcdf4-python (1.5.1.2-1) unstable; urgency=medium
* Update gbp.conf to use --source-only-changes by default.
* Move from experimental to unstable.
-- Bas Couwenberg <sebastic@debian.org> Sun, 07 Jul 2019 08:57:49 +0200
netcdf4-python (1.5.1.2-1~exp1) experimental; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Tue, 07 May 2019 06:24:56 +0200
netcdf4-python (1.5.1.1-1~exp1) experimental; urgency=medium
* New upstream release.
* Add Provides field for ${python{,3}:Provides} substvar.
-- Bas Couwenberg <sebastic@debian.org> Fri, 03 May 2019 06:15:44 +0200
netcdf4-python (1.5.1-1~exp1) experimental; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Tue, 30 Apr 2019 06:41:09 +0200
netcdf4-python (1.5.0.1-1~exp1) experimental; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Tue, 02 Apr 2019 07:08:19 +0200
netcdf4-python (1.5.0-1~exp1) experimental; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Sun, 24 Mar 2019 07:59:15 +0100
netcdf4-python (1.4.3.2-1~exp1) experimental; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Sat, 09 Mar 2019 08:29:04 +0100
netcdf4-python (1.4.3.1-1~exp1) experimental; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Thu, 07 Mar 2019 06:41:49 +0100
netcdf4-python (1.4.3-1~exp1) experimental; urgency=medium
* New upstream release.
* Bump Standards-Version to 4.3.0, no changes.
-- Bas Couwenberg <sebastic@debian.org> Tue, 05 Mar 2019 06:56:31 +0100
netcdf4-python (1.4.2-1) unstable; urgency=medium
* New upstream release.
* Update watch file to limit matches to archive path.
* Bump Standards-Version to 4.2.1, no changes.
-- Bas Couwenberg <sebastic@debian.org> Thu, 25 Oct 2018 07:11:13 +0200
netcdf4-python (1.4.1-1) unstable; urgency=medium
* New upstream release.
* Bump Standards-Version to 4.2.0, no changes.
* Drop autopkgtests to test installability & module import.
* Add lintian override for testsuite-autopkgtest-missing.
-- Bas Couwenberg <sebastic@debian.org> Wed, 15 Aug 2018 07:02:39 +0200
netcdf4-python (1.4.0-1) unstable; urgency=medium
* New upstream release.
* Update copyright-format URL to use HTTPS.
* Update Vcs-* URLs for Salsa.
* Bump Standards-Version to 4.1.4, no changes.
* Add module import tests to autopkgtest configuration.
* Add python{,3}-cftime to build dependencies.
* Drop ancient X-Python-Version field.
* Strip trailing whitespace from control & rules files.
-- Bas Couwenberg <sebastic@debian.org> Wed, 16 May 2018 06:50:56 +0200
netcdf4-python (1.3.1-1) unstable; urgency=medium
* New upstream release.
* Update watch file to handle 1.cd release tag.
* Bump Standards-Version to 4.1.1, no changes.
-- Bas Couwenberg <sebastic@debian.org> Tue, 31 Oct 2017 07:37:16 +0100
netcdf4-python (1.3.0-1) unstable; urgency=medium
* New upstream release.
* Bump Standards-Version to 4.1.0, no changes.
* Add autopkgtest to test installability.
-- Bas Couwenberg <sebastic@debian.org> Mon, 25 Sep 2017 08:16:29 +0200
netcdf4-python (1.2.9-1) unstable; urgency=medium
* New upstream release.
* Move from expermental to unstable.
-- Bas Couwenberg <sebastic@debian.org> Sun, 18 Jun 2017 10:20:12 +0200
netcdf4-python (1.2.8-1~exp1) experimental; urgency=medium
* New upstream release.
* Enable PIE hardening buildflags.
* Add cython3 to build dependencies.
-- Bas Couwenberg <sebastic@debian.org> Sun, 28 May 2017 08:47:38 +0200
netcdf4-python (1.2.7-1) unstable; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Sun, 08 Jan 2017 11:06:57 +0100
netcdf4-python (1.2.6-1) unstable; urgency=medium
* New upstream release.
* Bump X-Python-Version to 2.7.
-- Bas Couwenberg <sebastic@debian.org> Sat, 10 Dec 2016 19:22:19 +0100
netcdf4-python (1.2.5-1) unstable; urgency=medium
* New upstream release.
* Drop spelling-errors.patch, applied upstream.
* Drop license & copyright for netcdftime.py, removed upstream.
-- Bas Couwenberg <sebastic@debian.org> Tue, 29 Nov 2016 19:15:00 +0100
netcdf4-python (1.2.4-1) unstable; urgency=medium
* New upstream release.
* Drop tempfile-mktemp.patch, applied upstream.
* Bump Standards-Version to 3.9.8, no changes.
* Update dh_python & dh_numpy calls to act on specific package.
* Add patch to fix spelling errors.
-- Bas Couwenberg <sebastic@debian.org> Sat, 16 Apr 2016 17:25:36 +0200
netcdf4-python (1.2.3.1-2) unstable; urgency=medium
* Enable all hardening buildflags.
* Add build dependency on netcdf-bin for ncdump (required for tests).
* Add patch to use NamedTemporaryFile instead of deprecated mktemp.
-- Bas Couwenberg <sebastic@debian.org> Sun, 27 Mar 2016 22:35:59 +0200
netcdf4-python (1.2.3.1-1) unstable; urgency=medium
* New upstream release.
* Drop spelling-errors.patch, applied upstream.
-- Bas Couwenberg <sebastic@debian.org> Sat, 12 Mar 2016 01:37:24 +0100
netcdf4-python (1.2.3-1) unstable; urgency=medium
* New upstream release.
* Update Vcs-Git URL to use HTTPS.
* Bump Standards-Version to 3.9.7, no changes.
* Add patch to fix various spelling errors.
-- Bas Couwenberg <sebastic@debian.org> Fri, 11 Mar 2016 13:39:15 +0100
netcdf4-python (1.2.2-2) unstable; urgency=medium
* Bump minimum required libnetcdf-dev to 4.4.0.
-- Bas Couwenberg <sebastic@debian.org> Thu, 21 Jan 2016 21:09:53 +0100
netcdf4-python (1.2.2-1) unstable; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Sat, 02 Jan 2016 01:23:07 +0100
netcdf4-python (1.2.1-2) unstable; urgency=medium
* Bump minimum required libnetcdf-dev to 4.4.0~rc3.
-- Bas Couwenberg <sebastic@debian.org> Sun, 01 Nov 2015 21:25:58 +0100
netcdf4-python (1.2.1-1) unstable; urgency=medium
* New upstream release.
* Drop patches, applied upstream.
-- Bas Couwenberg <sebastic@debian.org> Sun, 18 Oct 2015 12:51:03 +0200
netcdf4-python (1.2.0-1) unstable; urgency=medium
* New upstream release.
* Update Vcs-Browser URL to use HTTPS.
* Add patch to fix 'homogeneous' typo.
-- Bas Couwenberg <sebastic@debian.org> Thu, 24 Sep 2015 08:45:12 +0200
netcdf4-python (1.1.9-2) unstable; urgency=medium
* Bump minimum required libnetcdf-dev to 4.4.0~rc2.
* Move from experimental to unstable.
-- Bas Couwenberg <sebastic@debian.org> Wed, 19 Aug 2015 19:51:28 +0200
netcdf4-python (1.1.9-2~exp2) experimental; urgency=medium
* Bump minimum required libnetcdf-dev to 4.4.0~rc2-1~exp4 for serial HDF5.
......
......@@ -6,43 +6,19 @@ Section: science
Priority: optional
Build-Depends: debhelper (>= 9),
dh-python,
python-all-dev (>= 2.6.6-3~),
python-setuptools,
python-numpy,
python3-all-dev,
python3-cftime,
python3-setuptools,
python3-numpy,
cython,
cython3,
libhdf5-dev,
libnetcdf-dev (>= 1:4.4.0~rc2-1~exp4),
libnetcdf-dev (>= 1:4.4.0),
netcdf-bin,
chrpath
Standards-Version: 3.9.6
Vcs-Browser: http://anonscm.debian.org/cgit/pkg-grass/netcdf4-python.git/
Vcs-Git: git://anonscm.debian.org/pkg-grass/netcdf4-python.git -b experimental
Standards-Version: 4.4.1
Vcs-Browser: https://salsa.debian.org/debian-gis-team/netcdf4-python/
Vcs-Git: https://salsa.debian.org/debian-gis-team/netcdf4-python.git
Homepage: http://unidata.github.io/netcdf4-python/
X-Python-Version: >= 2.6
Package: python-netcdf4
Architecture: any
Section: python
Depends: ${python:Depends},
${shlibs:Depends},
${misc:Depends}
Description: Python interface to the netCDF4 (network Common Data Form) library
NetCDF version 4 has many features not found in earlier versions of the
library and is implemented on top of HDF5. This module can read and write
files in both the new netCDF 4 and the old netCDF 3 format, and can create
files that are readable by HDF5 clients. The API is modelled after
Scientific.IO.NetCDF, and should be familiar to users of that module.
.
Most new features of netCDF 4 are implemented, such as multiple unlimited
dimensions, groups and zlib data compression. All the new numeric data types
(such as 64 bit and unsigned integer types) are implemented. Compound and
variable length (vlen) data types are supported, but the enum and opaque data
types are not. Mixtures of compound and vlen data types (compound types
containing vlens, and vlens containing compound types) are not supported.
.
This package contains the netCDF 4 module for Python 2.
Package: python3-netcdf4
Architecture: any
......@@ -65,4 +41,3 @@ Description: Python 3 interface to the netCDF4 (network Common Data Form) librar
containing vlens, and vlens containing compound types) are not supported.
.
This package contains the netCDF 4 module for Python 3.
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: netcdf4-python
Upstream-Contact: Jeff Whitaker <jeffrey.s.whitaker@noaa.gov>
Source: https://github.com/Unidata/netcdf4-python
......@@ -7,34 +7,10 @@ Files: *
Copyright: 2008, Jeffrey Whitaker
License: ISC
Files: netcdftime/netcdftime.py
Copyright: 2007, Michael Twomey
License: Expat
Files: debian/*
Copyright: 2015, Ross Gammon <rossgammon@mail.dk>
License: ISC
License: Expat
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
.
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
License: ISC
Permission to use, copy, modify, and distribute this software and
its documentation for any purpose and without fee is hereby granted,
......
......@@ -6,7 +6,7 @@ upstream-branch = upstream
# The default name for the Debian branch is "master".
# Change it if the name is different (for instance, "debian/unstable").
debian-branch = experimental
debian-branch = master
# git-import-orig uses the following names for the upstream tags.
# Change the value if you are not using git-import-orig
......@@ -14,3 +14,6 @@ upstream-tag = upstream/%(version)s
# Always use pristine-tar.
pristine-tar = True
[buildpackage]
pbuilder-options = --source-only-changes
......@@ -2,6 +2,9 @@
#DH_VERBOSE := 1
# Enable hardening build flags
export DEB_BUILD_MAINT_OPTIONS=hardening=+all
DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
export HDF5_INCDIR=/usr/include/hdf5/serial
......@@ -11,7 +14,7 @@ export USE_NCCONFIG=1
%:
dh $@ \
--with python2,python3 \
--with python3 \
--buildsystem=pybuild \
--parallel
......@@ -28,11 +31,6 @@ override_dh_auto_install:
override_dh_install:
dh_install --list-missing
override_dh_python2:
dh_python2
dh_numpy
override_dh_python3:
dh_python3
dh_numpy3
dh_python3 -ppython3-netcdf4
dh_numpy3 -ppython3-netcdf4
# Not worth the effort
testsuite-autopkgtest-missing
Bug-Database: https://github.com/Unidata/netcdf4-python/issues
Bug-Submit: https://github.com/Unidata/netcdf4-python/issues/new
Name: NetCDF4-python
Other-References: http://unidata.github.io/netcdf4-python/netCDF4-module.html
Repository: https://github.com/Unidata/netcdf4-python.git
Repository-Browse: https://github.com/Unidata/netcdf4-python
version=3
opts=\
dversionmangle=s/\+(debian|dfsg|ds|deb)\d*$//,\
uversionmangle=s/(\d)rel$/$1/g;s/_/./g;s/(\d)[_\.\-\+]?((RC|rc|pre|dev|gamma|beta|alpha|b|a)[\-\.]?(\d*))$/$1~$3$4/;s/RC/rc/,\
uversionmangle=s/(\d)rel$/$1/g;s/_/./g;s/(\d)[_\.\-\+]?((RC|rc|pre|dev|gamma|beta|alpha|b|a)[\-\.]?(\d*))$/$1~$3$4/;s/RC/rc/;s/\.cd$//,\
filenamemangle=s/(?:.*?)?[vr]?(\d[\d\.\-\w]*)\.(tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz)))/netcdf4-python-$1.$2/ \
https://github.com/Unidata/netcdf4-python/releases \
(?:.*/)*(?:rel|v|r|netcdf4-python|)[\-\_]?(\d[\d\-\.\w]+)\.(?:tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz)))
(?:.*?/archive/)*(?:rel|v|r|netcdf4-python|)[\-\_]?(\d[\d\-\.\w]+)\.(?:tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz)))
<meta http-equiv="refresh" content="0; url=./netCDF4/index.html" />
This diff is collapsed.
......@@ -13,7 +13,7 @@ n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
sys.stdout.write('(average of %s trials)\n' % ntrials)
array = netCDF4._quantize(uniform(size=(n1dim,n2dim,n3dim,n4dim)),4)
array = netCDF4.utils._quantize(uniform(size=(n1dim,n2dim,n3dim,n4dim)),4)
def write_netcdf(filename,zlib=False,shuffle=False,complevel=6):
......
......@@ -12,22 +12,20 @@ n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
sys.stdout.write('(average of %s trials)\n' % ntrials)
lsd = 4
if lsd is not None:
array = netCDF4._quantize(uniform(size=(n1dim,n2dim,n3dim,n4dim)),4)
else:
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))
sys.stdout.write('(average of %s trials)\n\n' % ntrials)
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))
def write_netcdf(filename,complevel):
def write_netcdf(filename,complevel,lsd):
file = netCDF4.Dataset(filename,'w',format='NETCDF4')
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f8',('n1','n2','n3','n4'),zlib=True,shuffle=True,complevel=complevel)
'f8',('n1','n2','n3','n4'),\
zlib=True,shuffle=True,complevel=complevel,\
least_significant_digit=lsd)
foo[:] = array
file.close()
......@@ -36,10 +34,27 @@ def read_netcdf(filename):
data = file.variables['data'][:]
file.close()
for complevel in range(0,10):
lsd = None
sys.stdout.write('using least_significant_digit %s\n\n' % lsd)
for complevel in range(0,10,2):
sys.stdout.write('testing compression with complevel %s...\n' % complevel)
# writing.
t = Timer("write_netcdf('test.nc',%s)" % complevel,"from __main__ import write_netcdf")
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# print out size of resulting files.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
complevel = 4
sys.stdout.write('\nusing complevel %s\n\n' % complevel)
for lsd in range(1,6):
sys.stdout.write('testing compression with least_significant_digit %s...\n' % lsd)
# writing.
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
......
from __future__ import print_function
# benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from numpy.random.mtrand import uniform
......@@ -6,7 +7,7 @@ from timeit import Timer
import os, sys
# use real data.
URL="http://nomad1.ncep.noaa.gov:9090/dods/reanalyses/reanalysis-2/6hr/pgb/pgb"
URL="http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/pressure/hgt.1990.nc"
nc = netCDF4.Dataset(URL)
# use real 500 hPa geopotential height data.
......@@ -15,19 +16,22 @@ n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s random array ..\n'%(n1dim,n3dim,n4dim))
sys.stdout.write('(average of %s trials)\n' % ntrials)
print nc
array = nc.variables['hgtprs'][0:n1dim,5,:,:]
print array.min(), array.max(), array.shape, array.dtype
sys.stdout.write('(average of %s trials)\n\n' % ntrials)
print(nc)
print(nc.variables['hgt'])
array = nc.variables['hgt'][0:n1dim,5,:,:]
print(array.min(), array.max(), array.shape, array.dtype)
def write_netcdf(filename,complevel):
def write_netcdf(filename,complevel,lsd):
file = netCDF4.Dataset(filename,'w',format='NETCDF4')
file.createDimension('n1', None)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f4',('n1','n3','n4'),zlib=True,shuffle=True,complevel=complevel)
'f4',('n1','n3','n4'),\
zlib=True,shuffle=True,complevel=complevel,\
least_significant_digit=lsd)
foo[:] = array
file.close()
......@@ -36,10 +40,29 @@ def read_netcdf(filename):
data = file.variables['data'][:]
file.close()
for complevel in range(0,10):
lsd = None
sys.stdout.write('using least_significant_digit %s\n\n' % lsd)
for complevel in range(0,10,2):
sys.stdout.write('testing compression with complevel %s...\n' % complevel)
# writing.
t = Timer("write_netcdf('test.nc',%s)" % complevel,"from __main__ import write_netcdf")
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
t = Timer("read_netcdf('test.nc')","from __main__ import read_netcdf")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# print out size of resulting files.
sys.stdout.write('size of test.nc = %s\n'%repr(os.stat('test.nc').st_size))
complevel = 4
complevel = 4
sys.stdout.write('\nusing complevel %s\n\n' % complevel)
for lsd in range(0,6):
sys.stdout.write('testing compression with least_significant_digit %s..\n'\
% lsd)
# writing.
t = Timer("write_netcdf('test.nc',%s,%s)" % (complevel,lsd),"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
......
# to run: mpirun -np 4 python mpi_example.py
import sys
from mpi4py import MPI
import numpy as np
from netCDF4 import Dataset
if len(sys.argv) == 2:
format = sys.argv[1]
else:
format = 'NETCDF4_CLASSIC'
rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run)
if rank == 0:
print('Creating file with format {}'.format(format))
nc = Dataset('parallel_test.nc', 'w', parallel=True, comm=MPI.COMM_WORLD,
info=MPI.Info(),format=format)
# below should work also - MPI_COMM_WORLD and MPI_INFO_NULL will be used.
#nc = Dataset('parallel_test.nc', 'w', parallel=True)
d = nc.createDimension('dim',4)
v = nc.createVariable('var', np.int32, 'dim')
v[rank] = rank
# switch to collective mode, rewrite the data.
v.set_collective(True)
v[rank] = rank
nc.close()
# reopen the file read-only, check the data
nc = Dataset('parallel_test.nc', parallel=True, comm=MPI.COMM_WORLD,
info=MPI.Info())
assert rank==nc['var'][rank]
nc.close()
# reopen the file in append mode, modify the data on the last rank.
nc = Dataset('parallel_test.nc', 'a',parallel=True, comm=MPI.COMM_WORLD,
info=MPI.Info())
if rank == 3: v[rank] = 2*rank
nc.close()
# reopen the file read-only again, check the data.
# leave out the comm and info kwargs to check that the defaults
# (MPI_COMM_WORLD and MPI_INFO_NULL) work.
nc = Dataset('parallel_test.nc', parallel=True)
if rank == 3:
assert 2*rank==nc['var'][rank]
else:
assert rank==nc['var'][rank]
nc.close()
This source diff could not be displayed because it is too large. You can view the blob instead.
from __future__ import print_function
from netCDF4 import Dataset
from numpy.testing import assert_array_equal, assert_array_almost_equal
import numpy as np
import threading
import Queue
import queue
import time
# demonstrate reading of different files from different threads.
......@@ -28,7 +29,7 @@ for i in range(nfiles):
nc.close()
# Queue them up
items = Queue.Queue()
items = queue.Queue()
for data,fname in zip(datal,fnames):
items.put(fname)
......@@ -51,7 +52,7 @@ start = time.time()
for i in range(nfiles):
get_data(serial=i)
end = time.time()
print 'no threads, time = ',end - start
print('no threads, time = ',end - start)
# with threading.
start = time.time()
......@@ -59,4 +60,4 @@ for i in range(nfiles):
threading.Thread(target=get_data).start()
items.join()
end = time.time()
print 'with threading, time = ',end - start
print('with threading, time = ',end - start)
......@@ -46,8 +46,8 @@ print(time)
# variables.
times = rootgrp.createVariable('time','f8',('time',))
levels = rootgrp.createVariable('level','i4',('level',))
latitudes = rootgrp.createVariable('latitude','f4',('lat',))
longitudes = rootgrp.createVariable('longitude','f4',('lon',))
latitudes = rootgrp.createVariable('lat','f4',('lat',))
longitudes = rootgrp.createVariable('lon','f4',('lon',))
# 2 unlimited dimensions.
#temp = rootgrp.createVariable('temp','f4',('time','level','lat','lon',))
# this makes the compression 'lossy' (preserving a precision of 1/1000)
......@@ -162,18 +162,17 @@ datac2.real = datain['real']
datac2.imag = datain['imag']
print(datac.dtype,datac)
print(datac2.dtype,datac2)
# more complex compound type example.
from netCDF4 import chartostring, stringtoarr
f = Dataset('compound_example.nc','w') # create a new dataset.
# create an unlimited dimension call 'station'
f.createDimension('station',None)
# define a compound data type (can contain arrays, or nested compound types).
NUMCHARS = 80 # number of characters to use in fixed-length strings.
winddtype = numpy.dtype([('speed','f4'),('direction','i4')])
statdtype = numpy.dtype([('latitude', 'f4'), ('longitude', 'f4'),
('surface_wind',winddtype),
('temp_sounding','f4',10),('press_sounding','i4',10),
('location_name','S1',NUMCHARS)])
('location_name','S12')])
# use this data type definitions to create a compound data types
# called using the createCompoundType Dataset method.
# create a compound type for vector wind which will be nested inside
......@@ -182,12 +181,12 @@ wind_data_t = f.createCompoundType(winddtype,'wind_data')
# now that wind_data_t is defined, create the station data type.
station_data_t = f.createCompoundType(statdtype,'station_data')
# create nested compound data types to hold the units variable attribute.
winddtype_units = numpy.dtype([('speed','S1',NUMCHARS),('direction','S1',NUMCHARS)])
statdtype_units = numpy.dtype([('latitude', 'S1',NUMCHARS), ('longitude', 'S1',NUMCHARS),
winddtype_units = numpy.dtype([('speed','S12'),('direction','S12')])
statdtype_units = numpy.dtype([('latitude', 'S12'), ('longitude', 'S12'),
('surface_wind',winddtype_units),
('temp_sounding','S1',NUMCHARS),
('location_name','S1',NUMCHARS),
('press_sounding','S1',NUMCHARS)])
('temp_sounding','S12'),
('location_name','S12'),
('press_sounding','S12')])
# create the wind_data_units type first, since it will nested inside
# the station_data_units data type.
wind_data_units_t = f.createCompoundType(winddtype_units,'wind_data_units')
......@@ -196,35 +195,33 @@ f.createCompoundType(statdtype_units,'station_data_units')
# create a variable of of type 'station_data_t'
statdat = f.createVariable('station_obs', station_data_t, ('station',))
# create a numpy structured array, assign data to it.
data = numpy.empty(1,station_data_t)
data = numpy.empty(1,statdtype)
data['latitude'] = 40.
data['longitude'] = -105.
data['surface_wind']['speed'] = 12.5
data['surface_wind']['direction'] = 270
data['temp_sounding'] = (280.3,272.,270.,269.,266.,258.,254.1,250.,245.5,240.)
data['press_sounding'] = range(800,300,-50)
# variable-length string datatypes are not supported inside compound types, so
# to store strings in a compound data type, each string must be
# stored as fixed-size (in this case 80) array of characters.
data['location_name'] = stringtoarr('Boulder, Colorado, USA',NUMCHARS)
data['location_name'] = 'Boulder, CO'
# assign structured array to variable slice.
statdat[0] = data
# or just assign a tuple of values to variable slice
# (will automatically be converted to a structured array).
statdat[1] = (40.78,-73.99,(-12.5,90),
statdat[1] = numpy.array((40.78,-73.99,(-12.5,90),
(290.2,282.5,279.,277.9,276.,266.,264.1,260.,255.5,243.),
range(900,400,-50),stringtoarr('New York, New York, USA',NUMCHARS))
range(900,400,-50),'New York, NY'),data.dtype)
print(f.cmptypes)
windunits = numpy.empty(1,winddtype_units)
stationobs_units = numpy.empty(1,statdtype_units)
windunits['speed'] = stringtoarr('m/s',NUMCHARS)
windunits['direction'] = stringtoarr('degrees',NUMCHARS)
stationobs_units['latitude'] = stringtoarr('degrees north',NUMCHARS)
stationobs_units['longitude'] = stringtoarr('degrees west',NUMCHARS)
windunits['speed'] = 'm/s'
windunits['direction'] = 'degrees'
stationobs_units['latitude'] = 'degrees N'
stationobs_units['longitude'] = 'degrees W'
stationobs_units['surface_wind'] = windunits
stationobs_units['location_name'] = stringtoarr('None', NUMCHARS)
stationobs_units['temp_sounding'] = stringtoarr('Kelvin',NUMCHARS)
stationobs_units['press_sounding'] = stringtoarr('hPa',NUMCHARS)
stationobs_units['location_name'] = 'None'
stationobs_units['temp_sounding'] = 'Kelvin'
stationobs_units['press_sounding'] = 'hPa'
print(stationobs_units.dtype)
statdat.units = stationobs_units
# close and reopen the file.
f.close()
......@@ -234,22 +231,7 @@ statdat = f.variables['station_obs']
print(statdat)
# print out data in variable.
print('data in a variable of compound type:')
print('----')
for data in statdat[:]:
for name in statdat.dtype.names:
if data[name].dtype.kind == 'S': # a string
# convert array of characters back to a string for display.
units = chartostring(statdat.units[name])
print(name,': value =',chartostring(data[name]),\
': units=',units)
elif data[name].dtype.kind == 'V': # a nested compound type
units_list = [chartostring(s) for s in tuple(statdat.units[name])]
print(name,data[name].dtype.names,': value=',data[name],': units=',\
units_list)
else: # a numeric type.
units = chartostring(statdat.units[name])
print(name,': value=',data[name],': units=',units)
print('----')
print(statdat[:])
f.close()
f = Dataset('tst_vlen.nc','w')
......@@ -280,3 +262,97 @@ print('variable-length string variable:\n',strvar[:])
print(f)
print(f.variables['strvar'])
f.close()
# Enum type example.
f = Dataset('clouds.nc','w')
# python dict describing the allowed values and their names.
enum_dict = {u'Altocumulus': 7, u'Missing': 255, u'Stratus': 2, u'Clear': 0,
u'Nimbostratus': 6, u'Cumulus': 4, u'Altostratus': 5, u'Cumulonimbus': 1,
u'Stratocumulus': 3}
# create the Enum type called 'cloud_t'.
cloud_type = f.createEnumType(numpy.uint8,'cloud_t',enum_dict)
print(cloud_type)
time = f.createDimension('time',None)
# create a 1d variable of type 'cloud_type' called 'primary_clouds'.
# The fill_value is set to the 'Missing' named value.
cloud_var = f.createVariable('primary_cloud',cloud_type,'time',\
fill_value=enum_dict['Missing'])
# write some data to the variable.
cloud_var[:] = [enum_dict['Clear'],enum_dict['Stratus'],enum_dict['Cumulus'],\
enum_dict['Missing'],enum_dict['Cumulonimbus']]
# close file, reopen it.
f.close()
f = Dataset('clouds.nc')
cloud_var = f.variables['primary_cloud']
print(cloud_var)
print(cloud_var.datatype.enum_dict)
print(cloud_var[:])
f.close()
# dealing with strings
from netCDF4 import stringtochar
nc = Dataset('stringtest.nc','w',format='NETCDF4_CLASSIC')
nc.createDimension('nchars',3)
nc.createDimension('nstrings',None)
v = nc.createVariable('strings','S1',('nstrings','nchars'))
datain = numpy.array(['foo','bar'],dtype='S3')
v[:] = stringtochar(datain) # manual conversion to char array
print(v[:]) # data returned as char array
v._Encoding = 'ascii' # this enables automatic conversion
v[:] = datain # conversion to char array done internally
print(v[:]) # data returned in numpy string array
nc.close()
# strings in compound types
nc = Dataset('compoundstring_example.nc','w')
dtype = numpy.dtype([('observation', 'f4'),
('station_name','S12')])
station_data_t = nc.createCompoundType(dtype,'station_data')
nc.createDimension('station',None)
statdat = nc.createVariable('station_obs', station_data_t, ('station',))
data = numpy.empty(2,station_data_t.dtype_view)
data['observation'][:] = (123.,3.14)
data['station_name'][:] = ('Boulder','New York')
print(statdat.dtype) # strings actually stored as character arrays
statdat[:] = data # strings converted to character arrays internally
print(statdat[:]) # character arrays converted back to strings
print(statdat[:].dtype)
statdat.set_auto_chartostring(False) # turn off auto-conversion
statdat[:] = data.view(station_data_t.dtype)
print(statdat[:]) # now structured array with char array subtype is returned
nc.close()
# create a diskless (in-memory) Dataset, and persist the file
# to disk when it is closed.
nc = Dataset('diskless_example.nc','w',diskless=True,persist=True)
d = nc.createDimension('x',None)
v = nc.createVariable('v',numpy.int32,'x')
v[0:5] = numpy.arange(5)
print(nc)
print(nc['v'][:])
nc.close() # file saved to disk
# create an in-memory dataset from an existing python memory
# buffer.
# read the newly created netcdf file into a python bytes object.
f = open('diskless_example.nc', 'rb')
nc_bytes = f.read(); f.close()
# create a netCDF in-memory dataset from the bytes object.
nc = Dataset('inmemory.nc', memory=nc_bytes)
print(nc)
print(nc['v'][:])
nc.close()
# create an in-memory Dataset and retrieve memory buffer
# estimated size is 1028 bytes - this is actually only
# used if format is NETCDF3 (ignored for NETCDF4/HDF5 files).
nc = Dataset('inmemory.nc', mode='w',memory=1028)
d = nc.createDimension('x',None)
v = nc.createVariable('v',numpy.int32,'x')
v[0:5] = numpy.arange(5)
nc_buf = nc.close() # close returns memoryview
print(type(nc_buf))
# save nc_buf to disk, read it back in and check.
f = open('inmemory.nc', 'wb')
f.write(nc_buf); f.close()
nc = Dataset('inmemory.nc')
print(nc)
print(nc['v'][:])
nc.close()
This diff is collapsed.
DEF HAS_RENAME_GRP = 0
DEF HAS_NC_INQ_PATH = 0
DEF HAS_NC_INQ_FORMAT_EXTENDED = 0
# Creates a memoryview from a malloced C pointer,
# which will be freed when the python object is garbage collected.
# Code found here is derived from
# http://stackoverflow.com/a/28166272/428751
from cpython.buffer cimport PyBuffer_FillInfo
from libc.stdlib cimport free
# create a python memoryview object from a raw pointer.
cdef memview_fromptr(void *memory, size_t size):
cdef _MemBuf buf = _MemBuf()
buf.memory = memory # malloced void pointer
buf.size = size # size of pointer in bytes
return memoryview(buf)
# private extension type that implements buffer protocal.
cdef class _MemBuf:
cdef const void *memory
cdef size_t size
def __getbuffer__(self, Py_buffer *buf, int flags):
PyBuffer_FillInfo(buf, self, <void *>self.memory, self.size, 1, flags)
def __releasebuffer__(self, Py_buffer *buf):
# why doesn't this do anything??
pass
def __dealloc__(self):
free(self.memory)
/* Author: Lisandro Dalcin */
/* Contact: dalcinl@gmail.com */
#ifndef MPI_COMPAT_H
#define MPI_COMPAT_H
#include <mpi.h>
#if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message)
typedef void *PyMPI_MPI_Message;
#define MPI_Message PyMPI_MPI_Message
#endif
#endif/*MPI_COMPAT_H*/
......@@ -2,18 +2,12 @@
cdef extern from "stdlib.h":
ctypedef long size_t
ctypedef long ptrdiff_t
void *malloc(size_t size)
void free(void *ptr)
# hdf5 version info.
cdef extern from "H5public.h":
cdef char *H5_VERS_INFO
cdef char *H5_VERS_SUBRELEASE
cdef enum:
H5_VERS_MAJOR
H5_VERS_MINOR
H5_VERS_RELEASE
ctypedef int herr_t
int H5get_libversion( unsigned int *majnum, unsigned int *minnum, unsigned int *relnum )
cdef extern from *:
ctypedef char* const_char_ptr "const char*"
......@@ -24,20 +18,6 @@ cdef extern from "netcdf.h":
ctypedef struct nc_vlen_t:
size_t len # Length of VL data (in base type units)
void *p # Pointer to VL data
# default fill values.
# could define these in the anonymous enum, but then they
# would be assumed to be integers.
#define NC_FILL_BYTE ((signed char)-127)
#define NC_FILL_CHAR ((char)0)
#define NC_FILL_SHORT ((short)-32767)
#define NC_FILL_INT (-2147483647L)
#define NC_FILL_FLOAT (9.9692099683868690e+36f) /* near 15 * 2^119 */
#define NC_FILL_DOUBLE (9.9692099683868690e+36)
#define NC_FILL_UBYTE (255)
#define NC_FILL_USHORT (65535)
#define NC_FILL_UINT (4294967295U)
#define NC_FILL_INT64 ((long long)-9223372036854775806)
#define NC_FILL_UINT64 ((unsigned long long)18446744073709551614)
float NC_FILL_FLOAT
long NC_FILL_INT
double NC_FILL_DOUBLE
......@@ -63,6 +43,7 @@ cdef extern from "netcdf.h":
NC_VLEN # used internally for vlen types
NC_OPAQUE # used internally for opaque types
NC_COMPOUND # used internally for compound types
NC_ENUM # used internally for enum types.
# Use these 'mode' flags for nc_open.
NC_NOWRITE # default is read only
NC_WRITE # read & write
......@@ -70,12 +51,11 @@ cdef extern from "netcdf.h":
NC_CLOBBER
NC_NOCLOBBER # Don't destroy existing file on create
NC_64BIT_OFFSET # Use large (64-bit) file offsets
NC_64BIT_DATA # Use cdf-5 format
NC_NETCDF4 # Use netCDF-4/HDF5 format
NC_CLASSIC_MODEL # Enforce strict netcdf-3 rules.
# Use these 'mode' flags for both nc_create and nc_open.
NC_SHARE # Share updates, limit cacheing
NC_MPIIO
NC_MPIPOSIX
# The following flag currently is ignored, but use in
# nc_open() or nc_create() may someday support use of advisory
# locking to prevent multiple writers from clobbering a file
......@@ -135,6 +115,8 @@ cdef extern from "netcdf.h":
# the nc_set_default_format function.
NC_FORMAT_CLASSIC
NC_FORMAT_64BIT
NC_FORMAT_64BIT_OFFSET
NC_FORMAT_64BIT_DATA
NC_FORMAT_NETCDF4
NC_FORMAT_NETCDF4_CLASSIC
NC_FORMAT_NC3
......@@ -693,12 +675,51 @@ cdef extern from "netcdf.h":
void nc_set_log_level(int new_level)
int nc_show_metadata(int ncid)
int nc_free_vlen(nc_vlen_t *vl)
int nc_free_vlens(size_t len, nc_vlen_t *vl)
int nc_free_string(size_t len, char **data)
int nc_set_chunk_cache(size_t size, size_t nelems, float preemption)
int nc_get_chunk_cache(size_t *sizep, size_t *nelemsp, float *preemptionp)
int nc_set_var_chunk_cache(int ncid, int varid, size_t size, size_t nelems, float preemption)
int nc_get_var_chunk_cache(int ncid, int varid, size_t *sizep, size_t *nelemsp, float *preemptionp) nogil
int nc_rename_grp(int grpid, char *name)
int nc_def_enum(int ncid, nc_type base_typeid, char *name, nc_type *typeidp)
int nc_insert_enum(int ncid, nc_type xtype, char *name, void *value)
int nc_inq_enum(int ncid, nc_type xtype, char *name, nc_type *base_nc_typep,\
size_t *base_sizep, size_t *num_membersp) nogil
int nc_inq_enum_member(int ncid, nc_type xtype, int idx, char *name, void *value) nogil
int nc_inq_enum_ident(int ncid, nc_type xtype, long long value, char *identifier) nogil
IF HAS_NC_OPEN_MEM:
cdef extern from "netcdf_mem.h":
int nc_open_mem(const char *path, int mode, size_t size, void* memory, int *ncidp)
IF HAS_NC_CREATE_MEM:
cdef extern from "netcdf_mem.h":
int nc_create_mem(const char *path, int mode, size_t initialize, int *ncidp);
ctypedef struct NC_memio:
size_t size
void* memory
int flags
int nc_close_memio(int ncid, NC_memio* info);
IF HAS_PARALLEL4_SUPPORT or HAS_PNETCDF_SUPPORT:
cdef extern from "mpi-compat.h": pass
cdef extern from "netcdf_par.h":
ctypedef int MPI_Comm
ctypedef int MPI_Info
int nc_create_par(char *path, int cmode, MPI_Comm comm, MPI_Info info, int *ncidp);
int nc_open_par(char *path, int mode, MPI_Comm comm, MPI_Info info, int *ncidp);
int nc_var_par_access(int ncid, int varid, int par_access);
cdef enum:
NC_COLLECTIVE
NC_INDEPENDENT
cdef extern from "netcdf.h":
cdef enum:
NC_MPIIO
NC_MPIPOSIX
NC_PNETCDF
# taken from numpy.pxi in numpy 1.0rc2.
cdef extern from "numpy/arrayobject.h":
......
......@@ -33,7 +33,7 @@ A summary of options is included below.
Shows a summary of the available options.
.TP
.B \-o
Overwite destination file (default is to raise an error if output file already exists).
Overwrite destination file (default is to raise an error if output file already exists).
.TP
.B \-\-vars
A comma separated list of variable names to copy (default is to copy all variables).
......
......@@ -27,7 +27,7 @@ A summary of options is included below.
Shows a summary of the available options.
.TP