Skip to content
Commits on Source (4)
version 1.5.1 (tag v1.5.1rel)
==============================
* fix issue #908 by adding workaround for incorrect value returned
by nc_inq_var_fill for netcdf-c < 4.5.1.
* fix bug writing slice to unlimited dimension that is not the first
(leftmost). Issue #906.
* make sure data gets converted to type of scale_factor when add_offset=0
and scale_factor=1 (issue #913).
* fix for reading empty (NIL) string attributes (issue #915).
version 1.5.0.1 (tag v1.5.0.1rel)
==================================
* binary wheels for linux and macosx rebuilt against netcdf-c 4.6.3 (instead
......
Metadata-Version: 1.1
Name: netCDF4
Version: 1.5.0
Author: Jeff Whitaker
Author-email: jeffrey s whitaker at noaa gov
Home-page: https://github.com/Unidata/netcdf4-python
Summary: python/numpy interface to netCDF library (versions 3 and 4)
License: OSI Approved
Description: netCDF version 4 has many features not found in earlier versions of the library
and is implemented on
top of HDF5. This module can read and write files in both the new netCDF 4 and
the old netCDF 3
format, and can create files that are readable by HDF5 clients. The API modelled
after
Scientific.IO.NetCDF, and should be familiar to users of that module.
Most new features of netCDF 4 are implemented, such as multiple unlimited
dimensions, groups and zlib data compression. All the new numeric data types
(such as 64 bit and unsigned integer types) are implemented. Compound,
variable length (vlen), and enumerated (enum) data types are supported, but
the opaque type is not. Mixtures of compound, vlen and/or enum data types are not supported.
This project has a `Github repository
<https://github.com/Unidata/netcdf4-python>`_ where you may access
the most
up-to-date source.
`Documentation
<http://unidata.github.io/netcdf4-python>`_
`Changelog
<https://github.com/Unidata/netcdf4-python/blob/master/Changelog>`_
Also available in the `Anaconda scientific python distribution <https://store.continuum.io/cshop/anaconda/>`_
Download source tarball and binary wheels below...
Keywords: numpy netcdf data science network oceanography meteorology climate
Platform: any
Classifier: Intended Audience :: Science/Research
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Topic :: Scientific/Engineering
......@@ -10,6 +10,8 @@
## News
For details on the latest updates, see the [Changelog](https://github.com/Unidata/netcdf4-python/blob/master/Changelog).
04/30/2019: Version [1.5.1](https://pypi.python.org/pypi/netCDF4/1.5.1) released. Bugfixes, no new features.
04/02/2019: Version [1.5.0.1](https://pypi.python.org/pypi/netCDF4/1.5.0.1) released. Binary wheels for macos x
and linux rebuilt with netcdf-c 4.6.3 (instead of 4.4.1.1). Added read-shared capability for faster reads
of NETCDF3 files (mode='rs').
......
netcdf4-python (1.5.1-1~exp1) experimental; urgency=medium
* New upstream release.
-- Bas Couwenberg <sebastic@debian.org> Tue, 30 Apr 2019 06:41:09 +0200
netcdf4-python (1.5.0.1-1~exp1) experimental; urgency=medium
* New upstream release.
......
This diff is collapsed.
"""
Version 1.5.0.1
Version 1.5.1
-------------
- - -
......@@ -1414,7 +1414,7 @@ cdef _get_att(grp, int varid, name, encoding='utf-8'):
_ensure_nc_success(ierr, err_cls=AttributeError)
try:
result = [values[j].decode(encoding,errors='replace').replace('\x00','')
for j in range(att_len)]
if values[j] else "" for j in range(att_len)]
finally:
ierr = nc_free_string(att_len, values) # free memory in netcdf C lib
finally:
......@@ -3937,6 +3937,13 @@ behavior is similar to Fortran or Matlab, but different than numpy.
if (self._grp.path != '/'): ncdump_var.append('path = %s\n' % self._grp.path)
ncdump_var.append('unlimited dimensions: %s\n' % ', '.join(unlimdims))
ncdump_var.append('current shape = %s\n' % repr(self.shape))
if __netcdf4libversion__ < '4.5.1' and\
self._grp.file_format.startswith('NETCDF3'):
# issue #908: no_fill not correct for NETCDF3 files before 4.5.1
# before 4.5.1 there was no way to turn off filling on a
# per-variable basis for classic files.
no_fill=0
else:
with nogil:
ierr = nc_inq_var_fill(self._grpid,self._varid,&no_fill,NULL)
_ensure_nc_success(ierr)
......@@ -4389,14 +4396,17 @@ rename a `netCDF4.Variable` attribute named `oldname` to `newname`."""
data = data.view('u%s' % data.dtype.itemsize)
if self.scale and self._isprimitive and valid_scaleoffset:
# if variable has scale_factor and add_offset attributes, rescale.
if hasattr(self, 'scale_factor') and hasattr(self, 'add_offset') and\
(self.add_offset != 0.0 or self.scale_factor != 1.0):
# if variable has scale_factor and add_offset attributes, apply
# them.
if hasattr(self, 'scale_factor') and hasattr(self, 'add_offset'):
if self.add_offset != 0.0 or self.scale_factor != 1.0:
data = data*self.scale_factor + self.add_offset
# else if variable has only scale_factor attributes, rescale.
else:
data = data.astype(self.scale_factor.dtype) # issue 913
# else if variable has only scale_factor attribute, rescale.
elif hasattr(self, 'scale_factor') and self.scale_factor != 1.0:
data = data*self.scale_factor
# else if variable has only add_offset attributes, rescale.
# else if variable has only add_offset attribute, add offset.
elif hasattr(self, 'add_offset') and self.add_offset != 0.0:
data = data + self.add_offset
......@@ -4491,6 +4501,13 @@ rename a `netCDF4.Variable` attribute named `oldname` to `newname`."""
totalmask += mask
# issue 209: don't return masked array if variable filling
# is disabled.
else:
if __netcdf4libversion__ < '4.5.1' and\
self._grp.file_format.startswith('NETCDF3'):
# issue #908: no_fill not correct for NETCDF3 files before 4.5.1
# before 4.5.1 there was no way to turn off filling on a
# per-variable basis for classic files.
no_fill=0
else:
with nogil:
ierr = nc_inq_var_fill(self._grpid,self._varid,&no_fill,NULL)
......@@ -5304,7 +5321,10 @@ NC_CHAR).
# not given, use 'utf-8'.
encoding = getattr(self,'_Encoding','utf-8')
for i from 0<=i<totelem:
if strdata[i]:
data[i] = strdata[i].decode(encoding)
else:
data[i] = "" # issue 915
# reshape the output array
data = numpy.reshape(data, shapeout)
# free string data internally allocated in netcdf C lib
......
......@@ -139,7 +139,7 @@ def _StartCountStride(elem, shape, dimensions=None, grp=None, datashape=None,\
grp : netCDF Group
The netCDF group to which the variable being set belongs to.
datashape : sequence
The shape of the data that is being stored. Only needed by __setitime__
The shape of the data that is being stored. Only needed by __setitem__
put : True|False (default False). If called from __setitem__, put is True.
Returns
......@@ -342,6 +342,17 @@ Boolean array must have the same shape as the data along this dimension."""
else:
sdim.append(1)
# pad datashape with zeros for dimensions not being sliced
if datashape:
datashapenew = (); i=0
for e in elem:
if type(e) != slice:
datashapenew = datashapenew + (0,)
else:
datashapenew = datashapenew + (datashape[i],)
i+=1
datashape = datashapenew
# Create the start, count, stride and indices arrays.
sdim.append(max(nDims, 1))
......
......@@ -584,7 +584,7 @@ else:
setup(name="netCDF4",
cmdclass=cmdclass,
version="1.5.0.1",
version="1.5.1",
long_description="netCDF version 4 has many features not found in earlier versions of the library, such as hierarchical groups, zlib compression, multiple unlimited dimensions, and new data types. It is implemented on top of HDF5. This module implements most of the new features, and can read and write netCDF files compatible with older versions of the library. The API is modelled after Scientific.IO.NetCDF, and should be familiar to users of that module.\n\nThis project is hosted on a `GitHub repository <https://github.com/Unidata/netcdf4-python>`_ where you may access the most up-to-date source.",
author="Jeff Whitaker",
author_email="jeffrey.s.whitaker@noaa.gov",
......
......@@ -38,7 +38,6 @@ else:
test_files.remove('tst_dap.py')
test_files.insert(0,'tst_dap.py')
# Build the test suite from the tests found in the test files.
testsuite = unittest.TestSuite()
for f in test_files:
......
......@@ -219,6 +219,10 @@ class VariablesTestCase(unittest.TestCase):
assert v1.stringseqatt_array == STRINGSEQATT
assert getattr(v1,'nonexistantatt',None) == None
f.close()
# issue 915 empty string attribute (ncdump reports 'NIL')
f = netCDF4.Dataset('test_gold.nc')
assert f['RADIANCE'].VAR_NOTES == ""
f.close()
if __name__ == '__main__':
unittest.main()
import netCDF4, unittest
import numpy as np
class Issue908TestCase(unittest.TestCase):
def setUp(self):
nc = netCDF4.Dataset('CRM032_test1.nc')
self.nc = nc
def tearDown(self):
self.nc.close()
def runTest(self):
data = self.nc['rgrid'][:]
assert(data.all() is np.ma.masked)
if __name__ == '__main__':
unittest.main()
......@@ -33,9 +33,13 @@ class SetAutoScaleTestBase(unittest.TestCase):
f = Dataset(self.testfile, 'w')
x = f.createDimension('x', None)
xx = f.createDimension('xx', 10)
v = f.createVariable('v', "i2", 'x')
vv = f.createVariable('vv', "i2", 'xx')
vv.add_offset=0; vv.scale_factor=np.float32(1.0)
v[:] = self.v
vv[:] = np.ones(10)
# Note: Scale factors are only added after writing, so that no auto-scaling takes place!
......@@ -106,6 +110,11 @@ class SetAutoScaleTrue(SetAutoScaleTestBase):
f.variables["v"].set_auto_scale(True) # The default anyway...
v_scaled = f.variables['v'][:]
# issue 913
vv_scaled = f.variables['vv'][:]
self.assertEqual(vv_scaled.dtype,f.variables['vv'].scale_factor.dtype)
assert_array_almost_equal(vv_scaled, np.ones(10))
self.assertEqual(v_scaled.dtype, "f8")
self.assertTrue(isinstance(v_scaled, np.ndarray))
# issue 785: always return masked array by default
......
......@@ -211,5 +211,16 @@ class VariablesTestCase(unittest.TestCase):
assert_array_equal(data,data2)
nc.close()
def test_issue906(self):
f = Dataset('test.nc','w')
f.createDimension('d1',3)
f.createDimension('d2',None)
f.createDimension('d3',5)
f.createVariable('v2',np.float,('d1','d2','d3'))
f['v2'][:] = np.zeros((3,4,5))
f['v2'][0,:,0] = np.arange(4)
f['v2'][0,:,:] = np.ones((4,5))
f.close()
if __name__ == '__main__':
unittest.main()