Skip to content
Commits on Source (6)
0.3.0
- feat: add option to choose the strategy for propagating values to
the next register (#14)
- feat: add option to return the pure sum and the internal normalization
count (#14)
0.2.0
- tests: filter warnings and check with flake8
- implement unique warning classes
0.1.9
- include docs in sdist
0.1.8
......
......@@ -10,7 +10,7 @@ correlation on a linear scale such as `numpy.correlate <http://docs.scipy.org/do
Installation
------------
Multipletau supports Python 2.6+ and Python 3.3+ with a common codebase.
Multipletau supports Python 2.7 and Python 3.3+ with a common codebase.
The only requirement for ``multipletau`` is `NumPy <http://www.numpy.org/>`__ (for fast
operations on arrays). Install multipletau from the Python package index:
......@@ -56,7 +56,7 @@ You can find out what version you are using by typing (in a Python console):
>>> import multipletau
>>> multipletau.__version__
'0.1.4'
'0.3.0'
......@@ -64,7 +64,7 @@ You can find out what version you are using by typing (in a Python console):
:target: https://pypi.python.org/pypi/multipletau
.. |Tests Status| image:: http://img.shields.io/travis/FCS-analysis/multipletau.svg?label=tests
:target: https://travis-ci.org/FCS-analysis/multipletau
.. |Coverage Status| image:: https://img.shields.io/coveralls/FCS-analysis/multipletau.svg
:target: https://coveralls.io/r/FCS-analysis/multipletau
.. |Coverage Status| image:: https://img.shields.io/codecov/c/github/FCS-analysis/multipletau/master.svg
:target: https://codecov.io/gh/FCS-analysis/multipletau
.. |Docs Status| image:: https://readthedocs.org/projects/multipletau/badge/?version=latest
:target: https://readthedocs.org/projects/multipletau/builds/
python-multipletau (0.3.0+ds-1) unstable; urgency=medium
* Update Files-Excluded section, add .readthedocs.yml
* New upstream version 0.3.0+ds
* Refresh patches
* Update d/tests, run upstream tests as autopkgtests
-- Alexandre Mestiashvili <mestia@debian.org> Thu, 01 Nov 2018 15:09:16 +0000
python-multipletau (0.1.9+ds-2) unstable; urgency=medium
* Team upload.
......
......@@ -4,6 +4,7 @@ Source: https://github.com/FCS-analysis/multipletau
Files-Excluded:
.gitignore
.travis.yml
.readthedocs.yml
Files: *
Copyright: 2014 Paul Mueller <paul.mueller@biotec.tu-dresden.de>
......
......@@ -2,7 +2,7 @@ Description: Fix relative patch
From: Alex Mestiashvili <mailatgoogl@gmail.com>
--- python-multipletau.orig/docs/extensions/fancy_include.py
+++ python-multipletau/docs/extensions/fancy_include.py
@@ -92,7 +92,7 @@
@@ -97,7 +97,7 @@
def setup(app):
......
Test-Command: set -e ; for py in $(pyversions -r 2>/dev/null) ; do cd "$ADTTMP" ; echo "Testing with $py:" ; $py -c "import multipletau; print multipletau" ; done
Depends: python-all, python-multipletau
Restrictions: allow-stderr
Test-Command: set -e ; for py in $(py3versions -r 2>/dev/null) ; do cd "$ADTTMP" ; echo "Testing with $py:" ; $py -c "import multipletau; print(multipletau)" ; done
Depends: python3-all, python3-multipletau
Restrictions: allow-stderr
Test-Command: for p in $(pyversions -s) $(py3versions -s); do $p -m pytest tests/; done
Depends:
python-pytest,
python-multipletau,
python3-pytest,
python3-multipletau,
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# project documentation build configuration file, created by
......@@ -15,12 +16,7 @@
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Get version number from qpimage._version file
import mock
import os.path as op
import sys
......@@ -36,12 +32,18 @@ install_requires = ["numpy"]
for mod_name in install_requires:
sys.modules[mod_name] = mock.Mock()
# There should be a file "setup.py" that has the property "version"
from setup import author, authors, description, name, version, year
name = 'multipletau'
github_project = 'FCS-analysis/' + name
year = "2012"
author = 'Paul Müller'
authors = [author]
description = 'A multiple-tau algorithm for Python/NumPy'
projectname = name
projectdescription = description
exec(open(op.join(pdir, "multipletau/_version.py")).read())
release = version #@UndefinedVariable
# http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_member_order
# Order class attributes and functions in separate blocks
autodoc_member_order = 'bysource'
......@@ -94,7 +96,7 @@ copyright = year+", "+author
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = version
#release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
......@@ -275,7 +277,7 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', projectname, projectname+u' Documentation',
('index', projectname, projectname+' Documentation',
author, projectname,
projectdescription,
'Numeric'),
......
......@@ -47,14 +47,19 @@ class IncludeDirective(Directive):
with io.open(full_path, "r") as myfile:
text = myfile.read()
# add reference
name = op.basename(full_path)[:-3]
rst = [".. _example_{}:".format(name),
"",
]
# add docstring
source = text.split('"""')
doc = source[1].split("\n")
doc.insert(1, "~" * len(doc[0])) # make title heading
code = source[2].split("\n")
# documentation
rst = []
for line in doc:
rst.append(line)
......
......@@ -51,7 +51,7 @@ You can find out what version you are using by typing
>>> import multipletau
>>> multipletau.__version__
'0.1.4'
'0.3.0'
Usage
......
......@@ -44,8 +44,11 @@ if True: # pragma: no cover
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
cmd = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
out = cmd.communicate()[0]
pop = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
out = pop.communicate()[0]
return out
# change directory
......
......@@ -41,8 +41,16 @@ import warnings
__all__ = ["autocorrelate", "correlate", "correlate_numpy"]
def autocorrelate(a, m=16, deltat=1, normalize=False,
copy=True, dtype=None):
class DtypeWarning(UserWarning):
pass
class InvalidMWarning(UserWarning):
pass
def autocorrelate(a, m=16, deltat=1, normalize=False, copy=True, dtype=None,
compress="average", ret_sum=False):
"""
Autocorrelation of a 1-dimensional sequence on a log2-scale.
......@@ -52,7 +60,7 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
:func:`numpy.correlate(a, a, mode="full")[len(a)-1:]`
:math:`z_k = \Sigma_n a_n a_{n+k}`
:math:`z_k = \\Sigma_n a_n a_{n+k}`
Parameters
......@@ -72,12 +80,34 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
dtype: object to be converted to a data type object
The data type of the returned array and of the accumulator
for the multiple-tau computation.
compress: str
strategy for propagating values to the next register
- `"average"` (default): average two measurements when pushing
to the next level of the correlator.
- `"first"`: use only the first value when pushing to the next
level of the correlator.
- `"second"`: use only the second value when pushing to the
next level of the correlator.
Using only the first or the second values during propagation
completely removes the systematic error at the cost of
increasing the statistical error.
See https://doi.org/10.1063/1.3491098 for a discussion on the
effect of averaging.
ret_sum: bool
return the exact sum :math:`z_k = \\Sigma_n a_n a_{n+k}`. In addition
:math:`M-k` is returned as an array of length N.
Returns
-------
autocorrelation: ndarray of shape (N,2)
the lag time (1st column) and the autocorrelation (2nd column).
count: ndarray of length N
only returned if `ret_sum` is True; the value of :math:`M-k`
for each row in `autocorrelation`.
Notes
-----
......@@ -110,6 +140,13 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
"""
assert isinstance(copy, bool)
assert isinstance(normalize, bool)
msg = "'normalize' and 'ret_sum' must not both be true"
assert not (normalize and ret_sum), msg
compress_values = ["average", "first", "second"]
assert any(compress in s for s in compress_values), \
"Unvalid string of compress. Possible values are " + \
','.join(compress_values)
if dtype is None:
dtype = np.dtype(a[0].__class__)
......@@ -127,7 +164,8 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
copy=copy,
dtype=dtype)
elif dtype.kind != "f":
warnings.warn("Input dtype is not float; casting to np.float_!")
warnings.warn("Input dtype is not float; casting to np.float_!",
DtypeWarning)
dtype = np.dtype(np.float_)
# If copy is false and dtype is the same as the input array,
......@@ -139,7 +177,7 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
mold = m
m = np.int_((m // 2 + 1) * 2)
warnings.warn("Invalid value of m={}. Using m={} instead"
.format(mold, m))
.format(mold, m), InvalidMWarning)
else:
m = np.int_(m)
......@@ -184,8 +222,13 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
# Check if len(trace) is even:
if N % 2 == 1:
N -= 1
# Add up every second element
# compress every second element
if compress == compress_values[0]:
trace = (trace[:N:2] + trace[1:N:2]) / 2
elif compress == compress_values[1]:
trace = trace[:N:2]
elif compress == compress_values[2]:
trace = trace[1:N:2]
N //= 2
# Start iteration for each m/2 values
for step in range(1, k + 1):
......@@ -226,20 +269,29 @@ def autocorrelate(a, m=16, deltat=1, normalize=False,
# Check if len(trace) is even:
if N % 2 == 1:
N -= 1
# Add up every second element
# compress every second element
if compress == compress_values[0]:
trace = (trace[:N:2] + trace[1:N:2]) / 2
elif compress == compress_values[1]:
trace = trace[:N:2]
elif compress == compress_values[2]:
trace = trace[1:N:2]
N //= 2
if normalize:
G[:, 1] /= traceavg**2 * normstat
else:
elif not ret_sum:
G[:, 1] *= N0 / normnump
if ret_sum:
return G, normstat
else:
return G
def correlate(a, v, m=16, deltat=1, normalize=False,
copy=True, dtype=None):
def correlate(a, v, m=16, deltat=1, normalize=False, copy=True, dtype=None,
compress="average", ret_sum=False):
"""
Cross-correlation of two 1-dimensional sequences
on a log2-scale.
......@@ -250,7 +302,7 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
:func:`numpy.correlate(a, v, mode="full")[len(a)-1:]`
:math:`z_k = \Sigma_n a_n v_{n+k}`
:math:`z_k = \\Sigma_n a_n v_{n+k}`
Note that only the correlation in the positive direction is
computed. To obtain the correlation for negative lag times
......@@ -273,12 +325,33 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
dtype: object to be converted to a data type object
The data type of the returned array and of the accumulator
for the multiple-tau computation.
compress: str
strategy for propagating values to the next register
- `"average"` (default): average two measurements when pushing
to the next level of the correlator.
- `"first"`: use only the first value when pushing to the next
level of the correlator.
- `"second"`: use only the second value when pushing to the
next level of the correlator.
Using only the first or the second values during propagation
completely removes the systematic error at the cost of
increasing the statistical error.
See https://doi.org/10.1063/1.3491098 for a discussion on the
effect of averaging.
ret_sum: bool
return the exact sum :math:`z_k = \\Sigma_n a_n v_{n+k}`. In addition
:math:`M-k` is returned as an array of length N.
Returns
-------
cross_correlation: ndarray of shape (N,2)
the lag time (column 1) and the cross-correlation (column2).
the lag time (1st column), the cross-correlation (2nd column).
count: ndarray of length N
only returned if `ret_sum` is True; the value of :math:`M-k`
for each row in `autocorrelation`.
Notes
......@@ -311,6 +384,14 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
"""
assert isinstance(copy, bool)
assert isinstance(normalize, bool)
msg = "'normalize' and 'ret_sum' must not both be true"
assert not (normalize and ret_sum), msg
compress_values = ["average", "first", "second"]
assert any(compress in s for s in compress_values), \
"Unvalid string of compress. Possible values are " + \
','.join(compress_values)
# See `autocorrelation` for better documented code.
traceavg1 = np.average(v)
traceavg2 = np.average(a)
......@@ -325,16 +406,19 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
if dtype.kind == "c" or dtype2.kind == "c":
# The user might try to combine complex64 and float128.
warnings.warn(
"Input dtypes not equal; casting to np.complex_!")
"Input dtypes not equal; casting to np.complex_!",
InvalidMWarning)
dtype = np.dtype(np.complex_)
else:
warnings.warn("Input dtypes not equal; casting to np.float_!")
warnings.warn("Input dtypes not equal; casting to np.float_!",
InvalidMWarning)
dtype = np.dtype(np.float_)
else:
dtype = np.dtype(dtype)
if dtype.kind not in ["c", "f"]:
warnings.warn("Input dtype is not float; casting to np.float_!")
warnings.warn("Input dtype is not float; casting to np.float_!",
InvalidMWarning)
dtype = np.dtype(np.float_)
trace1 = np.array(v, dtype=dtype, copy=copy)
......@@ -357,7 +441,7 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
mold = m
m = np.int_(m // 2 + 1) * 2
warnings.warn("Invalid value of m={}. Using m={} instead"
.format(mold, m))
.format(mold, m), InvalidMWarning)
else:
m = np.int_(m)
......@@ -394,9 +478,16 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
# Check if len(trace) is even:
if N % 2 == 1:
N -= 1
# Add up every second element
# compress every second element
if compress == compress_values[0]:
trace1 = (trace1[:N:2] + trace1[1:N:2]) / 2
trace2 = (trace2[:N:2] + trace2[1:N:2]) / 2
elif compress == compress_values[1]:
trace1 = trace1[:N:2]
trace2 = trace2[:N:2]
elif compress == compress_values[2]:
trace1 = trace1[1:N:2]
trace2 = trace2[1:N:2]
N //= 2
for step in range(1, k + 1):
......@@ -420,16 +511,26 @@ def correlate(a, v, m=16, deltat=1, normalize=False,
# Check if len(trace) is even:
if N % 2 == 1:
N -= 1
# Add up every second element
# compress every second element
if compress == compress_values[0]:
trace1 = (trace1[:N:2] + trace1[1:N:2]) / 2
trace2 = (trace2[:N:2] + trace2[1:N:2]) / 2
elif compress == compress_values[1]:
trace1 = trace1[:N:2]
trace2 = trace2[:N:2]
elif compress == compress_values[2]:
trace1 = trace1[1:N:2]
trace2 = trace2[1:N:2]
N //= 2
if normalize:
G[:, 1] /= traceavg1 * traceavg2 * normstat
else:
elif not ret_sum:
G[:, 1] *= N0 / normnump
if ret_sum:
return G, normstat
else:
return G
......
......@@ -3,3 +3,7 @@ test = pytest
[bdist_wheel]
universal = 1
[metadata]
license_file = LICENSE
......@@ -7,14 +7,14 @@ import sys
author = u"Paul Müller"
authors = [author]
description = 'A multiple-tau algorithm for Python/NumPy.'
description = 'A multiple-tau algorithm for Python/NumPy'
name = 'multipletau'
year = "2013"
year = "2012"
sys.path.insert(0, realpath(dirname(__file__))+"/"+name)
from _version import version
if __name__ == "__main__":
setup(
name=name,
author=author,
......
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Tests correlation-autocorrelation identity
"""
"""Test correlation-autocorrelation identity"""
from __future__ import division, print_function
import numpy as np
import os
from os.path import abspath, basename, dirname, join, split, exists
import platform
import sys
import warnings
import zipfile
# Add parent directory to beginning of path variable
DIR = dirname(abspath(__file__))
sys.path = [split(DIR)[0]] + sys.path
import numpy as np
import multipletau
......@@ -52,7 +43,7 @@ def test_ac_cc_m():
dtype=np.float_)
rescc.append(r)
# test minimal length of array
_r2 = multipletau.correlate(a=a[:2*m], v=a[:2*m],
multipletau.correlate(a=a[:2*m], v=a[:2*m],
m=m,
deltat=1,
normalize=False,
......
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Tests autocorrelation algorithm
"""
"""Tests autocorrelation algorithm"""
from __future__ import division, print_function
import numpy as np
import os
from os.path import abspath, basename, dirname, join, split, exists
import platform
import sys
import warnings
import zipfile
# Add parent directory to beginning of path variable
DIR = dirname(abspath(__file__))
sys.path = [split(DIR)[0]] + sys.path
import numpy as np
import pytest
import multipletau
......@@ -28,12 +21,12 @@ def get_reference_data(funcname, pyfile):
def get_sample_arrays():
a = [-4.3, 1, 9, -99.2, 13]
b = [9921, 281, 23.5, 5.3, 77]
l = [ 33, 92, 47, 54, 99]
ll = [33, 92, 47, 54, 99]
r = [0, 1, 12, 4, 0]
p = [1, 4, .5, 2, 3]
arrs = []
for ai, bi, li, ri, pi in zip(a,b,l,r,p):
for ai, bi, li, ri, pi in zip(a, b, ll, r, p):
x = np.linspace(ai, bi, li)
arr = (x*np.roll(x, ri))**pi
arrs.append(arr)
......@@ -78,6 +71,7 @@ def test_ac_copy():
assert not np.all(arrs == refarrs)
@pytest.mark.filterwarnings("ignore::multipletau.core.DtypeWarning")
def test_ac_dtype():
myframe = sys._getframe()
myname = myframe.f_code.co_name
......@@ -85,7 +79,6 @@ def test_ac_dtype():
a = np.round(get_sample_arrays()[0])
# integer
rf = multipletau.autocorrelate(a=a,
m=16,
......@@ -108,10 +101,14 @@ def test_ac_dtype():
copy=True,
dtype=None)
assert ri.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
assert ri2.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
assert np.all(rf == ri), "result should be the same, because input us the same"
assert np.all(rf == ri2), "result should be the same, because input us the same"
assert ri.dtype == np.dtype(
np.float_), "if wrong dtype, dtype should default to np.float_"
assert ri2.dtype == np.dtype(
np.float_), "if wrong dtype, dtype should default to np.float_"
assert np.all(
rf == ri), "result should be the same, because input us the same"
assert np.all(
rf == ri2), "result should be the same, because input us the same"
def test_ac_m():
......@@ -135,7 +132,7 @@ def test_ac_m():
res.append(r)
# test minimal length of array
_r2 = multipletau.autocorrelate(a=a[:2*m],
multipletau.autocorrelate(a=a[:2*m],
m=m,
deltat=1,
normalize=False,
......@@ -143,12 +140,14 @@ def test_ac_m():
dtype=np.float_)
res = np.concatenate(res)
#np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
# np.save(os.path.dirname(__file__)
# + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
ref = get_reference_data(myname, __file__)
assert np.allclose(res, ref, atol=0, rtol=1e-15)
@pytest.mark.filterwarnings("ignore::multipletau.core.InvalidMWarning")
def test_ac_m_wrong():
myframe = sys._getframe()
myname = myframe.f_code.co_name
......@@ -215,7 +214,8 @@ def test_ac_normalize():
res.append(r)
res = np.concatenate(res)
#np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
# np.save(os.path.dirname(__file__)
# + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
ref = get_reference_data(myname, __file__)
assert np.allclose(res, ref, atol=0, rtol=1e-14)
......@@ -239,7 +239,8 @@ def test_ac_simple():
res.append(r)
res = np.concatenate(res)
#np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
# np.save(os.path.dirname(__file__)
# + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
ref = get_reference_data(myname, __file__)
assert np.allclose(res, ref, atol=0, rtol=1e-15)
......@@ -251,4 +252,3 @@ if __name__ == "__main__":
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
basic tests also available in the function docs
"""
"""basic tests also available in the function docs"""
import numpy as np
from os.path import abspath, dirname, join
import sys
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from multipletau import autocorrelate, correlate
......@@ -38,4 +32,3 @@ if __name__ == "__main__":
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
\ No newline at end of file
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""test strategies for propagating values to the next level"""
import numpy as np
from multipletau import autocorrelate, correlate
def test_ac_compress_average():
ist = autocorrelate(range(42), m=2, dtype=np.float_, compress="average")
soll = np.array([[0.00000000e+00, 2.38210000e+04],
[1.00000000e+00, 2.29600000e+04],
[2.00000000e+00, 2.21000000e+04],
[4.00000000e+00, 2.03775000e+04],
[8.00000000e+00, 1.50612000e+04]])
assert np.allclose(soll, ist)
def test_cc_compress_average():
ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
compress="average")
soll = np.array([[0.00000000e+00, 2.46820000e+04],
[1.00000000e+00, 2.38210000e+04],
[2.00000000e+00, 2.29600000e+04],
[4.00000000e+00, 2.12325000e+04],
[8.00000000e+00, 1.58508000e+04]])
assert np.allclose(soll, ist)
def test_ac_compress_first():
ist = autocorrelate(range(42), m=2, dtype=np.float_,
compress="first")
soll = np.array([[0.00000e+00, 2.38210e+04],
[1.00000e+00, 2.29600e+04],
[2.00000e+00, 2.21000e+04],
[4.00000e+00, 1.96080e+04],
[8.00000e+00, 1.31712e+04]])
assert np.allclose(soll, ist)
def test_cc_compress_first():
ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
compress="first")
soll = np.array([[0.00000e+00, 2.46820e+04],
[1.00000e+00, 2.38210e+04],
[2.00000e+00, 2.29600e+04],
[4.00000e+00, 2.04440e+04],
[8.00000e+00, 1.39104e+04]])
assert np.allclose(soll, ist)
def test_ac_compress_second():
ist = autocorrelate(range(42), m=2, dtype=np.float_,
compress="second")
soll = np.array([[0.00000e+00, 2.38210e+04],
[1.00000e+00, 2.29600e+04],
[2.00000e+00, 2.21000e+04],
[4.00000e+00, 2.11660e+04],
[8.00000e+00, 1.71024e+04]])
assert np.allclose(soll, ist)
def test_cc_compress_second():
ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
compress="second")
soll = np.array([[0.00000e+00, 2.46820e+04],
[1.00000e+00, 2.38210e+04],
[2.00000e+00, 2.29600e+04],
[4.00000e+00, 2.20400e+04],
[8.00000e+00, 1.79424e+04]])
assert np.allclose(soll, ist)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Tests correlation algorithm
"""
"""Tests correlation algorithm"""
from __future__ import division, print_function
import numpy as np
import os
from os.path import abspath, basename, dirname, join, split, exists
import platform
import sys
import warnings
import zipfile
# Add parent directory to beginning of path variable
DIR = dirname(abspath(__file__))
sys.path = [split(DIR)[0]] + sys.path
import numpy as np
import pytest
import multipletau
......@@ -27,12 +18,12 @@ def get_sample_arrays_cplx():
b = [9921, 281, 23.5, 5.3, 77]
c = [12, 0, 2.1, 1.3, 33]
d = [32, .1, -2, 6.3, 88]
l = [ 33, 92, 47, 54, 99]
ll = [33, 92, 47, 54, 99]
r = [0, 1, 12, 4, 0]
p = [1, 4, .5, 2, 3]
arrs = []
for ai, bi, ci, di, li, ri, pi in zip(a,b,c,d,l,r,p):
for ai, bi, ci, di, li, ri, pi in zip(a, b, c, d, ll, r, p):
x = np.linspace(ai, bi, li)
y = np.linspace(ci, di, li)
arr = (x*np.roll(x, ri))**pi + 1j*y
......@@ -78,6 +69,7 @@ def test_cc_copy():
assert not np.all(arrs == refarrs)
@pytest.mark.filterwarnings("ignore::multipletau.core.InvalidMWarning")
def test_cc_dtype():
myframe = sys._getframe()
myname = myframe.f_code.co_name
......@@ -85,7 +77,6 @@ def test_cc_dtype():
a = np.round(get_sample_arrays_cplx()[0].real)
# integer
rf = multipletau.correlate(a=a,
v=a,
......@@ -111,12 +102,17 @@ def test_cc_dtype():
copy=True,
dtype=None)
assert ri.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
assert ri2.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_"
assert np.all(rf == ri), "result should be the same, because input us the same"
assert np.all(rf == ri2), "result should be the same, because input us the same"
assert ri.dtype == np.dtype(
np.float_), "if wrong dtype, dtype should default to np.float_"
assert ri2.dtype == np.dtype(
np.float_), "if wrong dtype, dtype should default to np.float_"
assert np.all(
rf == ri), "result should be the same, because input us the same"
assert np.all(
rf == ri2), "result should be the same, because input us the same"
@pytest.mark.filterwarnings("ignore::multipletau.core.InvalidMWarning")
def test_cc_dtype2():
myframe = sys._getframe()
myname = myframe.f_code.co_name
......@@ -124,7 +120,6 @@ def test_cc_dtype2():
a = np.round(get_sample_arrays_cplx()[0])
print("this should issue a warning of unequal input dtypes, casting to complex")
rf = multipletau.correlate(a=a.real,
v=a,
m=16,
......@@ -133,7 +128,6 @@ def test_cc_dtype2():
copy=True)
assert np.dtype(rf.dtype) == np.dtype(np.complex_)
print("this should issue a warning of unequal input dtypes, casting to float")
rf2 = multipletau.correlate(a=a.real,
v=np.array(a.imag, dtype=np.int_),
m=16,
......@@ -165,7 +159,7 @@ def test_cc_m():
res.append(r)
# test minimal length of array
_r2 = multipletau.correlate(a=a[:2*m],
multipletau.correlate(a=a[:2*m],
v=a[:2*m],
m=m,
deltat=1,
......@@ -174,12 +168,14 @@ def test_cc_m():
dtype=np.complex_)
res = np.concatenate(res)
#np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
# np.save(os.path.dirname(__file__)
# + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
ref = get_reference_data(myname, __file__)
assert np.allclose(res, ref, atol=0, rtol=1e-15)
@pytest.mark.filterwarnings("ignore::multipletau.core.InvalidMWarning")
def test_cc_m_wrong():
myframe = sys._getframe()
myname = myframe.f_code.co_name
......@@ -247,7 +243,8 @@ def test_cc_normalize():
dtype=np.float_)
res.append(r)
res = np.concatenate(res)
#np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
# np.save(os.path.dirname(__file__)
# + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
ref = get_reference_data(myname, __file__)
assert np.allclose(res, ref, atol=0, rtol=1e-14)
......@@ -272,7 +269,8 @@ def test_cc_simple():
res.append(r)
res = np.concatenate(res)
#np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
# np.save(os.path.dirname(__file__)
# + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
ref = get_reference_data(myname, __file__)
assert np.allclose(res, ref, atol=0, rtol=1e-15)
......@@ -298,4 +296,3 @@ if __name__ == "__main__":
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Compare to numpy data.
"""
import numpy as np
from os.path import abspath, dirname, join
"""Compare to numpy data"""
import sys
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import numpy as np
import multipletau
......
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""test returning exact sum plus normalization factor"""
import numpy as np
from multipletau import autocorrelate, correlate
def test_ac_return_sum():
ist, ist_count = autocorrelate(range(42), m=2, dtype=np.float_,
ret_sum=True)
soll = np.array([[0.000000e+00, 2.382100e+04],
[1.000000e+00, 2.296000e+04],
[2.000000e+00, 2.210000e+04],
[4.000000e+00, 1.018875e+04],
[8.000000e+00, 3.586000e+03]])
soll_count = [42., 41., 40., 19., 8.]
assert np.allclose(soll, ist)
assert np.allclose(soll_count, ist_count)
def test_cc_compress_average():
ist, ist_count = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
ret_sum=True)
soll = np.array([[0.000000e+00, 2.468200e+04],
[1.000000e+00, 2.382100e+04],
[2.000000e+00, 2.296000e+04],
[4.000000e+00, 1.061625e+04],
[8.000000e+00, 3.774000e+03]])
soll_count = [42., 41., 40., 19., 8.]
assert np.allclose(soll, ist)
assert np.allclose(soll_count, ist_count)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()