Skip to content
Commits on Source (9)
SHELL = /bin/bash -e
utest:
PYTHONPATH=.:${PYTHONPATH} py.test -s -v test/test_internal.py
all: build install
build:
......@@ -23,7 +26,7 @@ clean:
test: tests
check: tests
tests: cram-tests unit-tests extra-tests
tests: cram-tests py-tests extra-tests
cram-tests:
cram --xunit-file=cramtests.xml test/cram/*.t
......@@ -31,8 +34,9 @@ cram-tests:
long-tests:
cram test/cram/long_running/*.t
unit-tests:
nosetests -s -v --with-xunit test/*.py
py-tests:
#nosetests -s -v --with-xunit test/*.py
py.test -s -v --junit-xml=nosetests.xml test/*.py
extra-tests:
cram --xunit-file=cramtests-extra.xml test/cram/extra/*.t
......
......@@ -9,3 +9,7 @@ Academic Publications:
Documentation:
* [Tool documentation](http://github.com/PacificBiosciences/kineticsTools/blob/master/doc/manual.rst)
* [Methods description](http://github.com/PacificBiosciences/kineticsTools/blob/master/doc/whitepaper/kinetics.pdf)
DISCLAIMER
----------
THIS WEBSITE AND CONTENT AND ALL SITE-RELATED SERVICES, INCLUDING ANY DATA, ARE PROVIDED "AS IS," WITH ALL FAULTS, WITH NO REPRESENTATIONS OR WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, SATISFACTORY QUALITY, NON-INFRINGEMENT OR FITNESS FOR A PARTICULAR PURPOSE. YOU ASSUME TOTAL RESPONSIBILITY AND RISK FOR YOUR USE OF THIS SITE, ALL SITE-RELATED SERVICES, AND ANY THIRD PARTY WEBSITES OR APPLICATIONS. NO ORAL OR WRITTEN INFORMATION OR ADVICE SHALL CREATE A WARRANTY OF ANY KIND. ANY REFERENCES TO SPECIFIC PRODUCTS OR SERVICES ON THE WEBSITES DO NOT CONSTITUTE OR IMPLY A RECOMMENDATION OR ENDORSEMENT BY PACIFIC BIOSCIENCES.
#!/bin/bash -ex
NX3PBASEURL=http://nexus/repository/unsupported/pitchfork/gcc-6.4.0
export PATH=$PWD/build/bin:/mnt/software/a/anaconda2/4.2.0/bin:$PWD/bin:$PATH
export PYTHONUSERBASE=$PWD/build
export CFLAGS="-I/mnt/software/a/anaconda2/4.2.0/include"
PIP="pip --cache-dir=$bamboo_build_working_directory/.pip"
type module >& /dev/null || . /mnt/software/Modules/current/init/bash
module load gcc
rm -rf build
mkdir -p build/bin build/lib build/include build/share
$PIP install --user \
iso8601
$PIP install --user \
$NX3PBASEURL/pythonpkgs/xmlbuilder-1.0-cp27-none-any.whl \
$NX3PBASEURL/pythonpkgs/tabulate-0.7.5-cp27-none-any.whl \
$NX3PBASEURL/pythonpkgs/pysam-0.13-cp27-cp27mu-linux_x86_64.whl \
$NX3PBASEURL/pythonpkgs/avro-1.7.7-cp27-none-any.whl
$PIP install --user -e repos/pbcommand
$PIP install --user -e repos/pbcore
$PIP install --user -r requirements-ci.txt
$PIP install --user -r requirements-dev.txt
$PIP install --user --no-index $PWD
make test
from __future__ import print_function
#################################################################################
# Copyright (c) 2011-2013, Pacific Biosciences of California, Inc.
#
......@@ -45,13 +46,13 @@ class Sub(Process):
def run(self):
import time
print "self.arr[10] = %f, Process = %s" % (self.arr[10], current_process())
print("self.arr[10] = %f, Process = %s" % (self.arr[10], current_process()))
print self.arr.shape
print(self.arr.shape)
n = self.arr.shape[0] - 1
print "self.arr[%d] = %f, Process = %s" % (n, self.arr[n], current_process())
print("self.arr[%d] = %f, Process = %s" % (n, self.arr[n], current_process()))
time.sleep(10)
......
......@@ -30,6 +30,7 @@
#################################################################################
from __future__ import print_function
import cProfile
from pbcore.io import GffReader, Gff3Record
import os
......@@ -132,13 +133,13 @@ class IpdRatioSummaryWriter(PBToolRunner):
if field == 'sequence-header':
[internalTag, delim, externalTag] = value.strip().partition(' ')
self.seqMap[internalTag] = externalTag
print >>summaryWriter, line.strip()
print(line.strip(), file=summaryWriter)
continue
if inHeader:
# We are at the end of the header -- write the tool-specific headers
for field in headers:
print >>summaryWriter, ("##%s %s" % field)
print(("##%s %s" % field), file=summaryWriter)
inHeader = False
# Parse the line
......@@ -153,7 +154,7 @@ class IpdRatioSummaryWriter(PBToolRunner):
rec.modsfwd = strand0Hits
rec.modsrev = strand1Hits
print >>summaryWriter, str(rec)
print(str(rec), file=summaryWriter)
if __name__ == "__main__":
kt = ModificationSummary()
......
kineticstools (0.6.1+git20180425.27a1878-1) UNRELEASED; urgency=medium
* Team upload.
* Use Git mode in watch file
* debhelper 11
* Point Vcs fields to salsa.debian.org
* Standards-Version: 4.2.1
* Secure URI in copyright format
* Build-Depends: python-pytest
-- Andreas Tille <tille@debian.org> Sun, 28 Oct 2018 19:34:03 +0100
kineticstools (0.6.1+20161222-1) unstable; urgency=medium
[ Andreas Tille ]
......
Source: kineticstools
Section: science
Priority: optional
Maintainer: Debian Med Packaging Team <debian-med-packaging@lists.alioth.debian.org>
Uploaders: Afif Elghraoui <afif@debian.org>
Build-Depends:
debhelper (>= 9),
Section: science
Priority: optional
Build-Depends: debhelper (>= 11~),
dh-python,
python-all,
python-setuptools,
python-docutils,
python-pbcore (>= 1.2.8),
python-pbcommand (>= 0.3.22),
python-numpy (>= 1.6.0),
python-h5py (>= 1.3.0),
python-scipy (>= 0.9.0),
python-numpy,
python-h5py,
python-scipy,
# Test-Depends:
python-nose,
Standards-Version: 3.9.8
python-pytest
Standards-Version: 4.2.1
Vcs-Browser: https://salsa.debian.org/med-team/kineticstools
Vcs-Git: https://salsa.debian.org/med-team/kineticstools.git
Homepage: https://github.com/PacificBiosciences/kineticsTools
Vcs-Git: https://anonscm.debian.org/git/debian-med/kineticstools.git
Vcs-Browser: https://anonscm.debian.org/cgit/debian-med/kineticstools.git
Package: kineticstools
Architecture: all
Depends:
${misc:Depends},
Depends: ${misc:Depends},
${python:Depends},
python-kineticstools (>= ${source:Version}),
python-pkg-resources
......@@ -53,13 +52,12 @@ Description: detection of DNA modifications -- data files
This package contains the package data files.
Package: python-kineticstools
Section: python
Architecture: any
Depends:
${misc:Depends},
Section: python
Depends: ${misc:Depends},
${shlibs:Depends},
${python:Depends},
kineticstools-data (= ${source:Version}),
kineticstools-data (= ${source:Version})
Suggests: python-pybigwig
Description: detection of DNA modifications (Python 2 library)
Tools for detecting DNA modifications from single molecule, real-time (SMRT®)
......
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: kineticsTools
Upstream-Contact: devnet@pacificbiosciences.com
Source: https://github.com/PacificBiosciences/kineticsTools
......
......@@ -2,14 +2,14 @@ Description: Don't execute tests that require unavailable data files
Author: Afif Elghraoui <afif@debian.org>
Forwarded: not-needed
Last-Update: 2017-01-15
--- kineticstools.orig/Makefile
+++ kineticstools/Makefile
@@ -26,7 +26,7 @@
tests: cram-tests unit-tests extra-tests
--- a/Makefile
+++ b/Makefile
@@ -29,7 +29,7 @@ check: tests
tests: cram-tests py-tests extra-tests
cram-tests:
- cram --verbose --xunit-file=cramtests.xml test/cram/*.t
+ cram --verbose --xunit-file=cramtests.xml `ls test/cram/*.t | grep -v detection_bam`
long-tests:
cram --verbose test/cram/long_running/*.t
cram test/cram/long_running/*.t
......@@ -39,7 +39,7 @@ Last-Update: 2015-12-09
Modification Detection
----------------------
The basic mode of kineticsTools does an independent comparison of IPDs at each position on the genome, for each strand, and emits various statistics to CSV and GFF (after applying a significance filter).
@@ -28,8 +27,11 @@
@@ -28,8 +27,11 @@ kineticsTools also has a *Modification I
* Different modifications occuring on the same base can be distinguished (for example m5C and m4C)
* The signal from one modification is combined into one statistic, improving sensitivity, removing extra peaks, and correctly centering the call
......@@ -52,7 +52,7 @@ Last-Update: 2015-12-09
Algorithm
=========
@@ -56,7 +58,6 @@
@@ -56,7 +58,6 @@ Statistical Testing
We test the hypothesis that IPDs observed at a particular locus in the sample have a longer means than IPDs observed at the same locus in unmodified DNA. If we have generated a Whole Genome Amplified dataset, which removes DNA modifications, we use a case-control, two-sample t-test. This tool also provides a pre-calibrated 'synthetic control' model which predicts the unmodified IPD, given a 12 base sequence context. In the synthetic control case we use a one-sample t-test, with an adjustment to account for error in the synthetic control model.
......@@ -60,7 +60,7 @@ Last-Update: 2015-12-09
Example Usage
=============
@@ -69,7 +70,6 @@
@@ -69,7 +70,6 @@ With cmp.h5 input, methyl fraction calcu
ipdSummary aligned.cmp.h5 --reference ref.fasta --identify m6A,m4C --methylFraction --gff basemods.gff --csv kinetics.csv
......@@ -68,7 +68,7 @@ Last-Update: 2015-12-09
Inputs
======
@@ -85,7 +85,6 @@
@@ -85,7 +85,6 @@ Reference Sequence
The tool requires the reference sequence used to perform alignments. This can
be either a FASTA file or a ReferenceSet XML.
......@@ -76,7 +76,7 @@ Last-Update: 2015-12-09
Outputs
=======
@@ -176,3 +175,8 @@
@@ -224,3 +223,8 @@ coverage mean of case and
controlCoverage count of valid control IPDs at this position (see Filtering section for details)
caseCoverage count of valid case IPDs at this position (see Filtering section for details)
================ ===========
......
......@@ -2,9 +2,9 @@ Description: Fix import statement for tests
Author: Afif Elghraoui <afif@ghraoui.name>
Forwarded: no
Last-Update: 2015-10-29
--- kineticstools.orig/bin/testShared.py
+++ kineticstools/bin/testShared.py
@@ -29,7 +29,7 @@
--- a/bin/testShared.py
+++ b/bin/testShared.py
@@ -30,7 +30,7 @@ from __future__ import print_function
#################################################################################
from multiprocessing.process import Process, current_process
......
......@@ -2,21 +2,19 @@ Description: Make test execution verbose
Author: Afif Elghraoui <afif@debian.org>
Forwarded: no
Last-Update: 2017-01-15
--- kineticstools.orig/Makefile
+++ kineticstools/Makefile
@@ -26,16 +26,16 @@
tests: cram-tests unit-tests extra-tests
--- a/Makefile
+++ b/Makefile
@@ -29,7 +29,7 @@ check: tests
tests: cram-tests py-tests extra-tests
cram-tests:
- cram --xunit-file=cramtests.xml test/cram/*.t
+ cram --verbose --xunit-file=cramtests.xml test/cram/*.t
long-tests:
- cram test/cram/long_running/*.t
+ cram --verbose test/cram/long_running/*.t
unit-tests:
nosetests -s -v --with-xunit test/*.py
cram test/cram/long_running/*.t
@@ -39,7 +39,7 @@ py-tests:
py.test -s -v --junit-xml=nosetests.xml test/*.py
extra-tests:
- cram --xunit-file=cramtests-extra.xml test/cram/extra/*.t
......
version=4
opts=dversionmangle=s/.*/0.No-Track/ \
https://people.debian.org/~eriberto/ FakeWatchNoUpstreamTrackingForThisPackage-(\d\S+)\.gz
opts="mode=git,pretty=0.6.1+git%cd.%h" \
https://github.com/PacificBiosciences/kineticsTools.git HEAD
# Issue asking for release tags:
# https://github.com/PacificBiosciences/kineticsTools/issues/49
\ No newline at end of file
......@@ -100,6 +100,9 @@ The following output options are available:
- ``--csv_h5 FILENAME``: compact binary equivalent of CSV in HDF5 format
- ``--bigwig FILENAME``: BigWig file (mostly only useful for SMRTView)
If you are running base modification analysis through SMRT Link or a pbsmrtpipe
pipeline, the GFF, HDF5, and BigWig outputs are automatically generated.
modifications.gff
-----------------
......@@ -125,13 +128,58 @@ phase Not applicable
attributes Extra fields relevant to base mods. IPDRatio is traditional IPDRatio, context is the reference sequence -20bp to +20bp around the modification, and coverage level is the number of IPD observations used after Mapping QV filtering and accuracy filtering. If the row results from an identified modification we also include an identificationQv tag with the from the modification identification procedure. identificationQv is the phred-transformed probability of an incorrect identification, for bases that were identified as having a particular modification. frac, fracLow, fracUp are the estimated fraction of molecules carrying the modification, and the 5% confidence intervals of the estimate. The methylated fraction estimation is a beta-level feature, and should only be used for exploratory purposes.
================ ===========
modifications.csv
-----------------
The modifications.csv file contains one row for each (reference position, strand) pair that appeared in the dataset with coverage at least x.
x defaults to 3, but is configurable with '--minCoverage' flag to ipdSummary.py. The reference position index is 1-based for compatibility with the gff file the R environment. Note that this output type scales poorly and is not
recommended for large genomes; the HDF5 output should perform much better in
these cases.
these cases. We have preserved the CSV option to support legacy applications
but this is no longer produce by the pipelines in SMRT Link/pbsmrtpipe.
modifications.h5
----------------
The HDF5 output largely mirrors the CSV output in content, but is structured
slightly differently. Each contig in the reference has its own group in the
file, keyed by FASTA ID. For each group, the columns in the CSV file are
represented as arrays::
modifications.h5
--> refName
--> tpl
--> strand
--> base
--> score
--> tMean
--> tErr
--> modelPrediction
--> ipdRatio
--> coverage
For example, the following code to iterate over the CSV file::
import csv
with open("modifications.csv") as f:
for rec in csv.reader(f):
process_record(rec)
translates approximately to this code for reading the HDF5::
import h5py
COLUMNS="refName,tpl,strand,base,score,tMean,tErr,modelPrediction,ipdRatio,coverage".split(",")
with h5py.File(file_name) as f:
for ctg_id in sorted(f.keys()):
values = f[ctg_id]
for i in range(len(values["tpl"])):
rec = [ctg_id] + [fmt(values[k][i]) for k in COLUMNS[1:]]
process_record(rec)
Note that the exact columns present in both files may vary depending on how
kineticsTools was run; however, the example above is valid for the results of
the pbsmrtpipe base modification analysis pipelines.
Output columns
--------------
......
from __future__ import absolute_import
# Basic LDA Enricher class
from math import sqrt
......@@ -10,8 +11,8 @@ from scipy.special import gammaln as gamln
from numpy import log, pi, log10, e, log1p, exp
import numpy as np
from MultiSiteCommon import MultiSiteCommon
from MixtureEstimationMethods import MixtureEstimationMethods
from .MultiSiteCommon import MultiSiteCommon
from .MixtureEstimationMethods import MixtureEstimationMethods
class BasicLdaEnricher(MultiSiteCommon):
......
from __future__ import print_function
from __future__ import absolute_import
#################################################################################
# Copyright (c) 2011-2013, Pacific Biosciences of California, Inc.
#
......@@ -38,18 +40,18 @@ import numpy as np
import scipy.stats.mstats as mstats
import sys
from MixtureEstimationMethods import MixtureEstimationMethods
from MultiSiteCommon import MultiSiteCommon, canonicalBaseMap, modNames, ModificationPeakMask, FRAC, FRAClow, FRACup, log10e
from .MixtureEstimationMethods import MixtureEstimationMethods
from .MultiSiteCommon import MultiSiteCommon, canonicalBaseMap, modNames, ModificationPeakMask, FRAC, FRAClow, FRACup, log10e
from MultiSiteDetection import *
from .MultiSiteDetection import *
from MedakaLdaEnricher import MedakaLdaEnricher
from BasicLdaEnricher import BasicLdaEnricher
from PositiveControlEnricher import PositiveControlEnricher
from .MedakaLdaEnricher import MedakaLdaEnricher
from .BasicLdaEnricher import BasicLdaEnricher
from .PositiveControlEnricher import PositiveControlEnricher
from kineticsTools.ModificationDecode import ModificationDecode, ModificationPeakMask
from WorkerProcess import WorkerProcess, WorkerThread
from .WorkerProcess import WorkerProcess, WorkerThread
import pdb
import traceback
......@@ -145,13 +147,13 @@ class KineticWorker(object):
# Only convert to positive control call if we actually have enough
# coverage on the cognate base!
if siteDict.has_key(mod['tpl']):
if mod['tpl'] in siteDict:
# Copy mod identification data
siteDict[mod['tpl']]['modificationScore'] = mod['QMod']
siteDict[mod['tpl']]['modification'] = mod['modification']
if self.options.methylFraction and mod.has_key(FRAC):
if self.options.methylFraction and FRAC in mod:
siteDict[mod['tpl']][FRAC] = mod[FRAC]
siteDict[mod['tpl']][FRAClow] = mod[FRAClow]
siteDict[mod['tpl']][FRACup] = mod[FRACup]
......@@ -161,7 +163,7 @@ class KineticWorker(object):
for nk in newKeys:
siteDict[mod['tpl']][nk] = mod[nk]
if mod.has_key('Mask'):
if 'Mask' in mod:
# The decoder should supply the off-target peak mask
mask = mod['Mask']
mask.append(0) # make sure we always mask the cognate position
......@@ -173,7 +175,7 @@ class KineticWorker(object):
# Mask out neighbor peaks that may have been caused by this mod
for offset in mask:
shadowPos = mod['tpl'] + strandSign * offset
if siteDict.has_key(shadowPos):
if shadowPos in siteDict:
siteDict[shadowPos]['offTargetPeak'] = True
finalCalls.extend(siteDict.values())
......@@ -518,11 +520,11 @@ class KineticWorker(object):
return 0.1
if np.isnan(rawIpds).any():
print "got nan: %s" % str(rawIpds)
print("got nan: %s" % str(rawIpds))
if rawIpds.mean() < 0.0001:
print "small"
print "got small: %s" % str(rawIpds)
print("small")
print("got small: %s" % str(rawIpds))
capValue = min(10, np.percentile(rawIpds, 99))
capIpds = np.minimum(rawIpds, capValue)
......
from __future__ import print_function
from __future__ import absolute_import
# Try to implement method used in Morishita et al.'s Medaka fish genome paper here
from collections import defaultdict, Counter
......@@ -13,7 +15,7 @@ from scipy.special import gammaln as gamln
from numpy import log, pi, log10, e, log1p, exp
import numpy as np
from MultiSiteCommon import MultiSiteCommon
from .MultiSiteCommon import MultiSiteCommon
class MedakaLdaEnricher(MultiSiteCommon):
......@@ -32,7 +34,7 @@ class MedakaLdaEnricher(MultiSiteCommon):
def useLDAmodel(self, kinetics, pos, model, up, down ):
""" Test out LDA model """
print "From use LDA model.\n"
print("From use LDA model.\n")
res = np.zeros((up + down + 1, 6))
ind = 0
......@@ -57,7 +59,7 @@ class MedakaLdaEnricher(MultiSiteCommon):
def callLDAstrand(self, kinetics, strand, model, up, down):
print "From callLDAstrand.\n"
print("From callLDAstrand.\n")
tmp = [d for d in kinetics if d["strand"] == strand]
tmp.sort(key=lambda x: x["tpl"])
......@@ -75,18 +77,18 @@ class MedakaLdaEnricher(MultiSiteCommon):
def aggregate(self, dataset, group_by_key, sum_value_key):
print "From aggregate.\n"
print("From aggregate.\n")
emp = {}
for item in dataset:
if item.has_key( sum_value_key ):
if emp.has_key( item[group_by_key] ):
if sum_value_key in item:
if item[group_by_key] in emp:
emp[ item[group_by_key] ] += item[sum_value_key]
else:
emp[ item[group_by_key] ] = item[sum_value_key]
# Need to go back over the set again?
for item in dataset:
if item.has_key( sum_value_key ):
if sum_value_key in item:
item[ sum_value_key ] = emp[ item[group_by_key] ]
return dataset
......@@ -95,7 +97,7 @@ class MedakaLdaEnricher(MultiSiteCommon):
def callEnricherFunction(self, kinetics, up=10, down=10):
print "From callEnricher function.\n"
print("From callEnricher function.\n")
fwd = self.callLDAstrand(kinetics, 0, self.fwd_model, up, down)
rev = self.callLDAstrand(kinetics, 1, self.rev_model, up, down)
......
......@@ -184,7 +184,7 @@ class MixtureEstimationMethods(object):
# Bootstraps mix prop estimates to return estimate and simple bounds for 95% confidence interval
def bootstrap(self, pos, mu0, mu1, nSamples=500):
if not self.rawKinetics.has_key(pos):
if pos not in self.rawKinetics:
return np.array([float('nan'), float('nan'), float('nan')])
res = np.zeros(3)
......
from __future__ import absolute_import
#################################################################################
# Copyright (c) 2011-2013, Pacific Biosciences of California, Inc.
#
......@@ -38,8 +39,8 @@ from scipy.special import gammaln as gamln
from numpy import log, pi, log10, e, log1p, exp
import numpy as np
from MultiSiteCommon import MultiSiteCommon, canonicalBaseMap, modNames, ModificationPeakMask, FRAC, FRAClow, FRACup, log10e
from MixtureEstimationMethods import MixtureEstimationMethods
from .MultiSiteCommon import MultiSiteCommon, canonicalBaseMap, modNames, ModificationPeakMask, FRAC, FRAClow, FRACup, log10e
from .MixtureEstimationMethods import MixtureEstimationMethods
class ModificationDecode(MultiSiteCommon):
......@@ -274,7 +275,7 @@ class ModificationDecode(MultiSiteCommon):
modScore = self.scoreRegion(pos - self.post, pos + self.pre, modSeq)
modScores = self.getRegionScores(pos - self.post, pos + self.pre, modSeq)
if self.methylFractionFlag and self.rawKinetics.has_key(pos):
if self.methylFractionFlag and pos in self.rawKinetics:
if self.rawKinetics[pos]["coverage"] > self.methylMinCov:
modifiedMeanVectors = self.getContextMeans(pos - self.post, pos + self.pre, modSeq)
......@@ -283,7 +284,7 @@ class ModificationDecode(MultiSiteCommon):
noModScore = self.scoreRegion(pos - self.post, pos + self.pre, modSeq)
noModScores = self.getRegionScores(pos - self.post, pos + self.pre, modSeq)
if self.methylFractionFlag and self.rawKinetics.has_key(pos):
if self.methylFractionFlag and pos in self.rawKinetics:
if self.rawKinetics[pos]["coverage"] > self.methylMinCov:
unModifiedMeanVectors = self.getContextMeans(pos - self.post, pos + self.pre, modSeq)
......@@ -306,7 +307,7 @@ class ModificationDecode(MultiSiteCommon):
# if self.rawKinetics[pos].has_key('Ca5C'):
# llr = -self.rawKinetics[pos]['Ca5C']
# qModScore = 100 * llr * log10e + 100*log1p(exp(-llr))*log10e
if self.methylFractionFlag and self.rawKinetics.has_key(pos):
if self.methylFractionFlag and pos in self.rawKinetics:
if self.rawKinetics[pos]["coverage"] > self.methylMinCov:
......@@ -333,7 +334,7 @@ class ModificationDecode(MultiSiteCommon):
sc = 0.0
for pos in xrange(start, end + 1):
ctx = sequence[(pos - self.pre):(pos + self.post + 1)].tostring()
if self.scores.has_key(pos):
if pos in self.scores:
sc += self.scores[pos][ctx]
return sc
......@@ -343,7 +344,7 @@ class ModificationDecode(MultiSiteCommon):
for pos in xrange(start, end + 1):
ctx = sequence[(pos - self.pre):(pos + self.post + 1)].tostring()
if self.scores.has_key(pos):
if pos in self.scores:
scores[pos - start] = self.scores[pos][ctx]
return scores
......@@ -358,7 +359,7 @@ class ModificationDecode(MultiSiteCommon):
# Add a neighboring peak to the mask if
# a) it has a single-site qv > 20
# b) the observed IPDs are somewhat more likely under the modified hypothesis than the unmodified hypothesis
if self.rawKinetics.has_key(i) and self.rawKinetics[i]["score"] > 20:
if i in self.rawKinetics and self.rawKinetics[i]["score"] > 20:
if modScores[i - start] - noModScores[i - start] > 1.0:
maskPos.append(i - pos)
......