Commit 1bc993ba authored by Bas Couwenberg's avatar Bas Couwenberg

Update upstream source from tag 'upstream/1.5.2'

Update to upstream version '1.5.2'
with Debian dir 265aa714cf7782f3de407679b4f6d0a092482451
parents 96f6fdf0 93a42b17
......@@ -27,8 +27,8 @@ install:
- cmd: call %CONDA_INSTALL_LOCN%\Scripts\activate.bat
- cmd: conda config --set always_yes yes --set changeps1 no --set show_channel_urls true
- cmd: conda update conda
- cmd: conda config --remove channels defaults --force
- cmd: conda config --add channels conda-forge --force
- cmd: conda config --set channel_priority strict
- cmd: set PYTHONUNBUFFERED=1
- cmd: conda install conda-build vs2008_express_vc_python_patch
- cmd: call setup_x64
......
language: python
dist: xenial
sudo: true
cache: pip
addons:
apt:
......@@ -17,6 +17,7 @@ env:
python:
- "2.7"
- "3.5"
- "3.6"
- "3.7"
- "3.8-dev"
......@@ -39,7 +40,6 @@ matrix:
- DEPENDS="numpy==1.10.0 cython==0.21 ordereddict==1.1 setuptools==18.0 cftime"
# test MPI with latest released version
- python: 3.7
dist: xenial
env:
- MPI=1
- CC=mpicc.mpich
......@@ -55,7 +55,6 @@ matrix:
- libhdf5-mpich-dev
# test MPI with latest released version
- python: 3.7
dist: xenial
env:
- MPI=1
- CC=mpicc.mpich
......@@ -72,7 +71,6 @@ matrix:
- libhdf5-mpich-dev
# test with netcdf-c from github master
- python: 3.7
dist: xenial
env:
- MPI=1
- CC=mpicc.mpich
......
version 1.5.2 (not yet released)
==============================
* fix for scaling bug when _Unsigned attribute is set and byteorder of data
does not match native byteorder (issue #930).
* revise documentation for Python 3 (issue #946).
* establish support for Python 2.7, 3.5, 3.6 and 3.7 (issue #948).
* use dict built-in instead of OrderedDict for Python 3.7+
(pull request #955).
* remove underline ANSI in Dataset string representation (pull request #956).
* remove newlines from string representation (pull request #960).
* fix for issue #957 (size of scalar var is a float since numpy.prod(())=1.0).
* make sure Variable.setncattr fails to set _FillValue (issue #959).
* fix detection of parallel HDF5 support with netcdf-c 4.6.1 (issue #964).
version 1.5.1.2 (tag v1.5.1.2rel)
==================================
* fix another slicing bug introduced by the fix to issue #906 (issue #922).
......
......@@ -10,8 +10,10 @@
## News
For details on the latest updates, see the [Changelog](https://github.com/Unidata/netcdf4-python/blob/master/Changelog).
09/03/2019: Version [1.5.2](https://pypi.python.org/pypi/netCDF4/1.5.2) released. Bugfixes, no new features.
05/06/2019: Version [1.5.1.2](https://pypi.python.org/pypi/netCDF4/1.5.1.2) released. Fixes another slicing
slicing regression ([issue #922)](https://github.com/Unidata/netcdf4-python/issues/922)) introduced in the 1.5.1 release.
regression ([issue #922)](https://github.com/Unidata/netcdf4-python/issues/922)) introduced in the 1.5.1 release.
05/02/2019: Version [1.5.1.1](https://pypi.python.org/pypi/netCDF4/1.5.1.1) released. Fixes incorrect `__version__`
module variable in 1.5.1 release, plus a slicing bug ([issue #919)](https://github.com/Unidata/netcdf4-python/issues/919)).
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
......@@ -49,13 +49,14 @@ def check_ifnetcdf4(netcdf4_includedir):
return isnetcdf4
def check_api(inc_dirs):
def check_api(inc_dirs,netcdf_lib_version):
has_rename_grp = False
has_nc_inq_path = False
has_nc_inq_format_extended = False
has_cdf5_format = False
has_nc_open_mem = False
has_nc_create_mem = False
has_parallel_support = False
has_parallel4_support = False
has_pnetcdf_support = False
......@@ -91,10 +92,20 @@ def check_api(inc_dirs):
for line in open(ncmetapath):
if line.startswith('#define NC_HAS_CDF5'):
has_cdf5_format = bool(int(line.split()[2]))
elif line.startswith('#define NC_HAS_PARALLEL4'):
if line.startswith('#define NC_HAS_PARALLEL'):
has_parallel_support = bool(int(line.split()[2]))
if line.startswith('#define NC_HAS_PARALLEL4'):
has_parallel4_support = bool(int(line.split()[2]))
elif line.startswith('#define NC_HAS_PNETCDF'):
if line.startswith('#define NC_HAS_PNETCDF'):
has_pnetcdf_support = bool(int(line.split()[2]))
# NC_HAS_PARALLEL4 missing in 4.6.1 (issue #964)
if not has_parallel4_support and has_parallel_support and not has_pnetcdf_support:
has_parallel4_support = True
# for 4.6.1, if NC_HAS_PARALLEL=NC_HAS_PNETCDF=1, guess that
# parallel HDF5 is enabled (must guess since there is no
# NC_HAS_PARALLEL4)
elif netcdf_lib_version == "4.6.1" and not has_parallel4_support and has_parallel_support:
has_parallel4_support = True
break
return has_rename_grp, has_nc_inq_path, has_nc_inq_format_extended, \
......@@ -182,7 +193,7 @@ ncconfig = None
use_ncconfig = None
if USE_SETUPCFG and os.path.exists(setup_cfg):
sys.stdout.write('reading from setup.cfg...\n')
config = configparser.SafeConfigParser()
config = configparser.ConfigParser()
config.read(setup_cfg)
try:
HDF5_dir = config.get("directories", "HDF5_dir")
......@@ -494,7 +505,8 @@ if 'sdist' not in sys.argv[1:] and 'clean' not in sys.argv[1:]:
# this determines whether renameGroup and filepath methods will work.
has_rename_grp, has_nc_inq_path, has_nc_inq_format_extended, \
has_cdf5_format, has_nc_open_mem, has_nc_create_mem, \
has_parallel4_support, has_pnetcdf_support = check_api(inc_dirs)
has_parallel4_support, has_pnetcdf_support = \
check_api(inc_dirs,netcdf_lib_version)
# for netcdf 4.4.x CDF5 format is always enabled.
if netcdf_lib_version is not None and\
(netcdf_lib_version > "4.4" and netcdf_lib_version < "4.5"):
......@@ -584,7 +596,7 @@ else:
setup(name="netCDF4",
cmdclass=cmdclass,
version="1.5.1.2",
version="1.5.2",
long_description="netCDF version 4 has many features not found in earlier versions of the library, such as hierarchical groups, zlib compression, multiple unlimited dimensions, and new data types. It is implemented on top of HDF5. This module implements most of the new features, and can read and write netCDF files compatible with older versions of the library. The API is modelled after Scientific.IO.NetCDF, and should be familiar to users of that module.\n\nThis project is hosted on a `GitHub repository <https://github.com/Unidata/netcdf4-python>`_ where you may access the most up-to-date source.",
author="Jeff Whitaker",
author_email="jeffrey.s.whitaker@noaa.gov",
......@@ -597,12 +609,11 @@ setup(name="netCDF4",
'meteorology', 'climate'],
classifiers=["Development Status :: 3 - Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Intended Audience :: Science/Research",
"License :: OSI Approved",
"Topic :: Software Development :: Libraries :: Python Modules",
......
......@@ -7,13 +7,10 @@ import tempfile
import warnings
import numpy as NP
from collections import OrderedDict
from numpy.random.mtrand import uniform
import netCDF4
try:
from collections import OrderedDict
except ImportError: # or else use drop-in substitute
from ordereddict import OrderedDict
import netCDF4
# test attribute creation.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
......@@ -94,6 +91,19 @@ class VariablesTestCase(unittest.TestCase):
v1.seqatt = SEQATT
v1.stringseqatt = STRINGSEQATT
v1.setncattr_string('stringseqatt_array',STRINGSEQATT) # array of NC_STRING
# issue #959: should not be able to set _FillValue after var creation
try:
v1._FillValue(-999.)
except AttributeError:
pass
else:
raise ValueError('This test should have failed.')
try:
v1.setncattr('_FillValue',-999.)
except AttributeError:
pass
else:
raise ValueError('This test should have failed.')
# issue #485 (triggers segfault in C lib
# with version 1.2.1 without pull request #486)
f.foo = NP.array('bar','S')
......
......@@ -121,6 +121,27 @@ def issue346(file):
assert_array_equal(datal,xl)
nc.close()
def issue930(file):
# make sure view to unsigned data type (triggered
# by _Unsigned attribute being set) is correct when
# data byte order is non-native.
nc = netCDF4.Dataset(file,'w')
d = nc.createDimension('x',2)
v1 = nc.createVariable('v1','i2','x',endian='big')
v2 = nc.createVariable('v2','i2','x',endian='little')
v1[0] = 255; v1[1] = 1
v2[0] = 255; v2[1] = 1
v1._Unsigned="TRUE"; v1.missing_value=np.int16(1)
v2._Unsigned="TRUE"; v2.missing_value=np.int16(1)
nc.close()
nc = netCDF4.Dataset(file)
assert_array_equal(nc['v1'][:],np.ma.masked_array([255,1],mask=[False,True]))
assert_array_equal(nc['v2'][:],np.ma.masked_array([255,1],mask=[False,True]))
nc.set_auto_mask(False)
assert_array_equal(nc['v1'][:],np.array([255,1]))
assert_array_equal(nc['v2'][:],np.array([255,1]))
nc.close()
class EndianTestCase(unittest.TestCase):
def setUp(self):
......@@ -141,6 +162,7 @@ class EndianTestCase(unittest.TestCase):
check_byteswap(self.file3, data)
issue310(self.file)
issue346(self.file2)
issue930(self.file2)
if __name__ == '__main__':
unittest.main()
......@@ -523,7 +523,7 @@ class TestDate2index(unittest.TestCase):
:Example:
>>> t = TestTime(datetime(1989, 2, 18), 45, 6, 'hours since 1979-01-01')
>>> print num2date(t[1], t.units)
>>> print(num2date(t[1], t.units))
1989-02-18 06:00:00
"""
self.units = units
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment