Skip to content
Commits on Source (2)
0.3.2
- fix: refactor code for opening .dat files; moved from a Cython
module to a pure Python module due to errors on big-endian
systems (#17)
0.3.1
- fix: console script entry point broken (#16)
- automate distribution to PyPI
- update documentation
0.3.0
- Move from Python 2 to Python 3 (#12)
- Move to wxPython 4.0.1 (#12)
......
clone_depth: 256
build: off
notifications:
- provider: Email
on_build_success: false
on_build_failure: false
on_build_status_changed: false
environment:
matrix:
- PYTHON: "C:\\Miniconda36-x64"
PYTHON_VERSION: "3.6"
PYTHON_ARCH: "64"
init:
- "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%"
install:
# Install InnoSetup and add to path
# Copied from
# https://github.com/Phonations/Joker/blob/master/appveyor.yml
- appveyor-retry choco install -y InnoSetup
# Prepend newly installed Python to the PATH of this build (this cannot be
# done from inside the powershell script as it would require to restart
# the parent CMD process).
- "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
# CONDA installs
# Pinned versions are defined in .appveyor\pinned
- xcopy .appveyor\pinned %PYTHON%\conda-meta\ /Y
- "appveyor-retry conda install --yes --quiet matplotlib numpy pip scipy wxpython"
# Check that we have the expected version and architecture for Python
- "python --version"
- "pip install cython wheel"
- "pip install pyinstaller==3.3.1"
- "pip install -e ."
# Show the installed packages
- "pip freeze"
test_script:
- "pip install coverage"
- "pip install codecov"
- "coverage run --source=pyscanfcs ./setup.py test"
- "coverage report -m"
- "codecov || exit 0"
after_test:
# If tests are successful, create a whl package for the project.
- "python setup.py bdist_wheel"
- ps: "ls dist"
# Run pyinstaller
# This will create the "win7_innosetup.iss" file
- "pyinstaller -y --log-level=WARN .appveyor\\PyScanFCS_win7.spec"
# Create InnoSetup installers
# Set InnoSetup path here, because Cython complained about it.
- set PATH=%PATH%;"C:\\Program Files (x86)\\Inno Setup 5"
- iscc /Q win7_innosetup.iss
artifacts:
# Archive the generated wheel package in the ci.appveyor.com build report.
- path: dist\*
# InnoSetup files
- path: Output\*
deploy:
provider: GitHub
auth_token:
secure: TODO_not_set_yet
artifact: /.*\.exe/, /.*\.whl/
draft: true
prerelease: true
on:
branch: master # release from master branch only
appveyor_repo_tag: true # deploy on tag push only
......@@ -83,6 +83,7 @@ master_doc = 'index'
# General information about the project.
project = 'PyScanFCS'
github_project = 'FCS-analysis/' + project
copyright = '2012, Paul Müller'
author = 'Paul Müller'
......
......@@ -29,13 +29,13 @@ class IncludeDirective(Directive):
def run(self):
full_path = self.arguments[0]
project = self.state.document.settings.env.config.project
project = self.state.document.settings.env.config.github_project
def insert_github_link(reobj):
line = reobj.string
instr = line[reobj.start():reobj.end()]
issue = instr.strip("#()")
link = "https://github.com/RI-imaging/{}/issues/".format(project)
link = "https://github.com/{}/issues/".format(project)
rstlink = "(`#{issue} <{link}{issue}>`_)".format(issue=issue,
link=link)
return rstlink
......@@ -70,5 +70,6 @@ class IncludeDirective(Directive):
def setup(app):
app.add_config_value('github_project', "user/project", 'html')
app.add_directive('include_changelog', IncludeDirective)
return {'version': '0.1'} # identifies the version of our extension
......@@ -7,7 +7,7 @@ Installation
- Windows installers for PyScanFCS are available at the `release page <https://github.com/FCS-analysis/PyScanFCS/releases>`_.
Note: The installer is currently broken because astropy cannot be frozen, see https://github.com/astropy/astropy/issues/7052.
- On Debian-based systems, install via ``apt-get install pyscanfcs``.
- If you have Python 3.6 installed, you may install PyScanFCS via ``pip install pyscanfcs[GUI]``.
- If you have Python 3.6 installed, you may install PyScanFCS via ``pip install pyscanfcs``.
After the installation, type ``pyscanfcs`` in a command shell to start PyScanFCS.
......@@ -15,7 +15,7 @@ Documentation
-------------
The documentation is in the process of being transferred entirely to
readthedocs.org. Currently, the it is scattered across several
places and it is most-likely outdated:
places and it might be outdated:
- Original LaTeX-based PDF file (outdated): https://github.com/FCS-analysis/PyScanFCS/wiki/PyScanFCS_doc.pdf
- Wiki pages: https://github.com/FCS-analysis/PyScanFCS/wiki
- Original LaTeX-based PDF file: https://github.com/FCS-analysis/PyScanFCS/wiki/PyScanFCS_doc.pdf
- Book chapter in *Methods in Molecular Biology* (2013) on perpendicular line scanning FCS: https://arxiv.org/abs/1806.00070
from . import fitting
from . import openfile
from . import sfcs_alg
from . import util
from ._version import version as __version__
......
#!/usr/bin/env python
"""
Determine package version for git repositories.
"""Determine package version for git repositories from tags
Each time this file is imported it checks if the ".git" folder is
present and if so, obtains the version from the git history using
`git describe`. This information is then stored in the file
`_version_save.py` which is not versioned by git, but distributed
along with e.g. pypi.
along e.g. on PyPI.
"""
from __future__ import print_function
......@@ -15,7 +14,7 @@ from __future__ import print_function
if True: # pragma: no cover
import imp
import os
from os.path import join, abspath, dirname
from os.path import abspath, basename, dirname, join
import subprocess
import sys
import time
......@@ -24,12 +23,12 @@ if True: # pragma: no cover
def git_describe():
"""
Returns a string describing the version returned by the
Return a string describing the version returned by the
command `git describe --tags HEAD`.
If it is not possible to determine the correct version,
then an empty string is returned.
"""
# make sure we are in a directory that belongs to the correct
# Make sure we are in a directory that belongs to the correct
# repository.
ourdir = dirname(abspath(__file__))
......@@ -67,8 +66,7 @@ if True: # pragma: no cover
return git_revision
def load_version(versionfile):
""" load version from version_save.py
"""
"""load version from version_save.py"""
longversion = ""
try:
_version_save = imp.load_source("_version_save", versionfile)
......@@ -85,8 +83,7 @@ if True: # pragma: no cover
return longversion
def save_version(version, versionfile):
""" save version to version_save.py
"""
"""save version to version_save.py"""
data = "#!/usr/bin/env python\n" \
+ "# This file was created automatically\n" \
+ "longversion = '{VERSION}'\n"
......@@ -97,7 +94,15 @@ if True: # pragma: no cover
msg = "Could not write package version to {}.".format(versionfile)
warnings.warn(msg)
versionfile = join(dirname(abspath(__file__)), "_version_save.py")
hdir = dirname(abspath(__file__))
if basename(__file__) == "conf.py" and "name" in locals():
# This script is executed in conf.py from the docs directory
versionfile = join(join(join(hdir, ".."),
name), # noqa: F821
"_version_save.py")
else:
# This script is imported as a module
versionfile = join(hdir, "_version_save.py")
# Determine the accurate version
longversion = ""
......
......@@ -3,7 +3,6 @@ import warnings
import numpy as np
# See cython documentation for following stuff
# "cimport" is used to import special compile-time information
# about the numpy module (this is stored in a file numpy.pxd which is
# currently part of the Cython distribution).
......@@ -13,23 +12,18 @@ cimport numpy as np
# type info object.
DTYPEuint32 = np.uint32
DTYPEuint16 = np.uint16
DTYPEfloat32 = np.float32
# "ctypedef" assigns a corresponding compile-time type to DTYPE_t. For
# every type in the numpy module there's a corresponding compile-time
# type with a _t-suffix.
ctypedef np.uint32_t DTYPEuint32_t
ctypedef np.uint16_t DTYPEuint16_t
ctypedef np.float32_t DTYPEfloat32_t
cimport cython
# Negative indices are checked for and handled correctly. The code is
# explicitly coded so that it doesn’t use negative indices, and it (hopefully)
# always access within bounds. We can add a decorator to disable bounds
# checking:
cimport cython
__all__ = ["bin_photon_events", "open_dat"]
@cython.cdivision(True)
@cython.boundscheck(False) # turn of bounds-checking for entire function
def bin_photon_events(np.ndarray[DTYPEuint32_t] data, double t_bin,
......@@ -146,120 +140,5 @@ def bin_photon_events(np.ndarray[DTYPEuint32_t] data, double t_bin,
TempTrace.append(phot_c)
NewFile.write(outdtype(TempTrace))
del TempTrace
NewFile.close()
return outfile
@cython.cdivision(True)
@cython.boundscheck(False) # turn of bounds-checking for entire function
def open_dat(filename, callback=None, cb_kwargs={}):
"""Load "Flex02-12D" correlator.com files
We open a .dat file as produced by the "Flex02-12D" correlator in photon
history recorder mode.
The file contains the time differences between single photon events.
Parameters
----------
filename : str
Path to file
callback : callable or None
Callback function to be called throughout the algorithm. If the
return value of `callback` is not None, the function will abort.
Number of function calls: 3
cb_kwargs : dict, optional
Keyword arguments for `callback` (e.g. "pid" of process).
Returns
-------
system_clock, datData
The system clock in MHz and the photon time event stream.
Returns (None, None) if the progress was aborted through the
callback function.
Notes
-----
Raw data file format (taken from manual):
1. The file records the difference in system clock ticks (1/60 us)
between photon event.
2. The first byte identifies the format of the file 8 : 8 bit, 16: 16 bit
3. The second byte identifies the system clock. 60MHz.
4. The time unit is 1/system clock.
5. 16 bit format. Each WORD (2 bytes) represents a photon event,
time = WORD/system clock, unless the value is 0xFFFF, in which case,
the following four bytes represent a photon event.
6. 8 bit format: Each BYTE represents a photon event unless the value is
0xFF, in which case, the BYTE means 255 clock ticks passed without a
photon event. For example 0A 0B FF 08 means there are three
photon events. The time series are 0x0A+1, 0x0B+1, 0xFF+8+1.
"""
cdef np.ndarray[DTYPEuint16_t] Data
cdef np.ndarray[DTYPEuint32_t] datData
cdef int i, N
# open file
File = open(filename, 'rb')
# 1st byte: get file format
# should be 16 - for 16 bit
fformat = int(np.fromfile(File, dtype="uint8", count=1))
# 2nd byte: read system clock
system_clock = int(np.fromfile(File, dtype="uint8", count=1))
if fformat == 8:
# No 8 bit format supported
warnings.warn('8 bit format not supported.')
File.close()
return system_clock, None
elif fformat == 32:
# (There is an utility to convert data to 32bit)
datData = np.fromfile(File, dtype="uint32", count=-1)
File.close()
return system_clock, datData
elif fformat == 16:
pass
else:
warnings.warn("Unknown format: {} bit".format(fformat))
File.close()
return system_clock, None
# In case of 16 bit file format (assumed), read the rest of the file in
# 16 bit format.
# Load bunch of Data
Data = np.fromfile(File, dtype="uint16", count=-1)
File.close()
# Now we need to check if there are any 0xFFFF values which would
# mean, that we do not yet have the true data in our array.
# There is 32 bit data after a 0xFFFF = 65535
if callback is not None:
ret = callback(**cb_kwargs)
if ret is not None:
return None, None
occurrences = np.where(Data == 65535)[0]
N = len(occurrences)
if callback is not None:
ret = callback(**cb_kwargs)
if ret is not None:
return None, None
# Make a 32 bit array
datData = np.uint32(Data)
datData[occurrences] = np.uint32(
Data[occurrences + 1]) + np.uint32(Data[occurrences + 2]) * 65536
if callback is not None:
ret = callback(**cb_kwargs)
if ret is not None:
return None, None
# Now delete the zeros
zeroids = np.zeros(N * 2)
zeroids[::2] = occurrences + 1
zeroids[1::2] = occurrences + 2
datData = np.delete(datData, zeroids)
del Data
return system_clock, datData
......@@ -139,7 +139,8 @@ PyScanFCS) in numerous ways:
\tfamiliar with reStrucuredText or LaTeX, you might be able to help
\tout with the online documentation.
5. \tPlease cite: Müller et al. Bioinformatics 30(17): 2532–2533, 2014
5. \tPlease cite: Müller et al. Methods in Molecular Biology, 1076,
\t(635–51), 2014.
If you are planning to contribute to PyScanFCS, please contact me via
the PyScanFCS issue page on GitHub such that we may coordinate a pull
......
......@@ -28,7 +28,7 @@ from wx.lib.scrolledpanel import ScrolledPanel
from .. import fitting
from .. import openfile
from .. import sfcs_alg
from .. import bin_pe
from .. import util
from . import doc
......@@ -230,22 +230,6 @@ maximum. \n The data achieved will automatically be updated within the main prog
self.redline = self.axes.vlines(
ltime, self.ymin, self.ymax, color='red')
# Tried with gaussian fit
#amplitudes = self.ampl[start:stop]
#frequencies = self.freq[start:stop]
#argmax = np.argmax(amplitudes)
# Get gaussian function and optimal parameters
## popt = [freq, ampl, sigma]
## gauss(popt, frequencies)
#popt, gauss = sfcs_alg.FitGaussian(amplitudes, frequencies, argmax)
#self.pnt.t_linescan = 1./popt[0]
#lenplot = 1000
#ids_plot = np.linspace(start,stop,lenplot, endpoint=False)
#freq_plot = np.linspace(frequencies[0],frequencies[-1],lenplot, endpoint=False)
#a = np.argmax(gauss(popt, freq_plot))
#self.pnt.t_linescan = 1./freq_plot[a]
#self.redline = self.axes.plot(ids_plot, gauss(popt, freq_plot), '-', color='red')
self.canvas.draw()
# Plot the results
# Set check box to True: "use cycle time"
......@@ -383,7 +367,7 @@ class MyFrame(wx.Frame):
print("Creating file {} ({})".format(outfile, outdtype.__name__))
sfcs_alg.bin_photon_events(Data, t_bin, binshift=eb,
bin_pe.bin_photon_events(Data, t_bin, binshift=eb,
outfile=outfile,
outdtype=outdtype,
callback=wxdlg.Iterate)
......@@ -418,7 +402,7 @@ class MyFrame(wx.Frame):
print("Creating file {} ({})".format(outfile, outdtype.__name__))
sfcs_alg.bin_photon_events(Data, t_bin, binshift=eb,
bin_pe.bin_photon_events(Data, t_bin, binshift=eb,
outfile=outfile,
outdtype=outdtype,
callback=wxdlg.Iterate)
......@@ -1287,8 +1271,8 @@ class MyFrame(wx.Frame):
wxdlg = uilayer.wxdlg(parent=self, steps=3,
title="Importing dat file...")
datData2 = sfcs_alg.open_dat(
path, callback=wxdlg.Iterate)[1]
datData2 = openfile.openDAT(
path, callback=wxdlg.Iterate)["data_stream"]
wxdlg.Finalize()
# Bin to obtain intData2
......@@ -1719,8 +1703,9 @@ class MyFrame(wx.Frame):
wxdlg = uilayer.wxdlg(parent=self, steps=3,
title="Importing dat file...")
self.system_clock, self.datData = sfcs_alg.open_dat(
filename, callback=wxdlg.Iterate)
info = openfile.openDAT(filename, callback=wxdlg.Iterate)
self.system_clock = info["system_clock"]
self.datData = info["data_stream"]
wxdlg.Finalize()
self.GetTotalTime()
......
"""filetype definitions"""
import astropy.io.fits
import numpy as np
from skimage.external import tifffile
from . import sfcs_alg
def openAny(fname, callback=None):
def openAny(path, callback=None):
"""load any supported file type"""
methods = methods_binned.copy()
methods.update(methods_stream)
for key in list(methods.keys()):
if fname.endswith(key):
return methods[key](fname, callback)
def openDAT(fname, callback=None):
system_clock, intensity_data = sfcs_alg.open_dat(fname, callback)
info = dict()
info["data_stream"] = intensity_data
info["system_clock"] = system_clock
if path.endswith(key):
return methods[key](path, callback)
def openDAT(path, callback=None, cb_kwargs={}):
"""Load "Flex02-12D" correlator.com files
We open a .dat file as produced by the "Flex02-12D" correlator in photon
history recorder mode.
The file contains the time differences between single photon events.
Parameters
----------
path : str
Path to file
callback : callable or None
Callback function to be called throughout the algorithm. If the
return value of `callback` is not None, the function will abort.
Number of function calls: 3
cb_kwargs : dict, optional
Keyword arguments for `callback` (e.g. "pid" of process).
Returns
-------
info: dict
Dictionary containing the "system_clock" in MHz and the
"data_stream" (photon arrival time event stream).
Returns `None` if the progress was aborted through the
callback function.
Notes
-----
Raw data file format (taken from manual):
1. The file records the difference in system clock ticks (1/60 us)
between photon event.
2. The first byte identifies the format of the file 8 : 8 bit, 16: 16 bit
3. The second byte identifies the system clock. 60MHz.
4. The time unit is 1/system clock.
5. 16 bit format. Each WORD (2 bytes) represents a photon event,
time = WORD/system clock, unless the value is 0xFFFF, in which case,
the following four bytes represent a photon event.
6. 8 bit format: Each BYTE represents a photon event unless the value is
0xFF, in which case, the BYTE means 255 clock ticks passed without a
photon event. For example 0A 0B FF 08 means there are three
photon events. The time series are 0x0A+1, 0x0B+1, 0xFF+8+1.
"""
# open file
filed = open(path, 'rb')
# 1st byte: get file format
# should be 16 - for 16 bit
fformat = int(np.fromfile(filed, dtype="<u1", count=1))
# 2nd byte: read system clock
system_clock = int(np.fromfile(filed, dtype="<u1", count=1))
if fformat == 8:
# No 8 bit format supported
raise ValueError("8 bit format not supported!")
elif fformat == 32:
# (There is an utility to convert data to 32bit)
data = np.fromfile(filed, dtype="<u4", count=-1)
elif fformat == 16:
# convert 16bit to 32bit
# Read the rest of the file in 16 bit format.
# Load bunch of Data
data16 = np.fromfile(filed, dtype="<u2", count=-1)
# Now we need to check if there are any 0xFFFF values which would
# mean, that we do not yet have the true data in our array.
# There is 32 bit data after a 0xFFFF = 65535
if callback is not None:
ret = callback(**cb_kwargs)
if ret is not None:
return
# occurences of large values
occ = np.where(data16 == 65535)[0]
N = len(occ)
if callback is not None:
ret = callback(**cb_kwargs)
if ret is not None:
return
# Make a 32 bit array
data = np.uint32(data16)
data[occ] = data16[occ + 1] + data16[occ + 2] * 65536
if callback is not None:
ret = callback(**cb_kwargs)
if ret is not None:
return None
# Now delete the zeros
zeroids = np.zeros(N * 2, dtype=int)
zeroids[::2] = occ + 1
zeroids[1::2] = occ + 2
data = np.delete(data, zeroids)
else:
raise ValueError("Unknown format: {} bit".format(fformat))
filed.close()
info = {"data_stream": data,
"system_clock": system_clock
}
return info
......
......@@ -18,8 +18,8 @@ except ImportError:
"with this setup script will not work:", sys.exc_info())
extensions = []
else:
extensions = [Extension("pyscanfcs.sfcs_alg",
sources=["pyscanfcs/sfcs_alg.pyx"],
extensions = [Extension("pyscanfcs.bin_pe",
sources=["pyscanfcs/bin_pe.pyx"],
include_dirs=[np.get_include()]
)
]
......@@ -85,6 +85,6 @@ setup(
],
platforms=['ALL'],
entry_points={
"gui_scripts": ["{name:s}={name:s}:Main".format(**{"name":name})]
"gui_scripts": ["pyscanfcs=pyscanfcs.gui_wx.main:Main"]
}
)
import pathlib
import numpy as np
from pyscanfcs import sfcs_alg
def test_open_dat():
here = pathlib.Path(__file__).parent
f16 = here / "data/n2000_7.0ms_16bit.dat"
f32 = here / "data/n2000_7.0ms_32bit.dat"
r16, d16 = sfcs_alg.open_dat(str(f16))
r32, d32 = sfcs_alg.open_dat(str(f32))
assert r16 == 60
assert r32 == 60
assert np.all(d16==d32)
assert np.all(d16[-5:] == np.array([1, 1, 21420, 21418, 21420]))
from pyscanfcs import bin_pe
def test_bin_photon_events():
......@@ -25,7 +11,7 @@ def test_bin_photon_events():
# blank
8, 1, 1, # 28
], dtype=np.uint32)
binf = sfcs_alg.bin_photon_events(data=data, t_bin=5.0001)
binf = bin_pe.bin_photon_events(data=data, t_bin=5.0001)
binned = np.fromfile(binf, dtype="uint16", count=-1)
assert np.all(binned == np.array([1, 3, 4, 3, 0, 3]))
......
import pathlib
import numpy as np
from pyscanfcs import openfile
def test_open_dat():
here = pathlib.Path(__file__).parent
f16 = here / "data/n2000_7.0ms_16bit.dat"
f32 = here / "data/n2000_7.0ms_32bit.dat"
info16 = openfile.openDAT(str(f16))
info32 = openfile.openDAT(str(f32))
assert info16["system_clock"] == 60
assert info32["system_clock"]== 60
assert np.all(info16["data_stream"] == info32["data_stream"])
ref = np.array([1, 1, 21420, 21418, 21420])
assert np.all(info16["data_stream"][-5:] == ref)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()