Skip to content
Commits on Source (5)
steps:
- bash: conda create -n foo -q --yes -c conda-forge -c bioconda numpy scipy matplotlib==2.1.2 nose flake8 plotly==2.0.12 pysam pyBigWig py2bit deeptoolsintervals cython
- bash: conda create -n foo -q --yes -c conda-forge -c bioconda python=$(python.version) numpy scipy matplotlib==3.1.1 nose flake8 plotly pysam pyBigWig py2bit deeptoolsintervals
displayName: Installing dependencies
- bash: |
source activate foo
python setup.py install
python -m pip install . --no-deps --ignore-installed -vvv
displayName: Installing deeptools
- bash: |
source activate foo
......
name: pypi
on: [push]
jobs:
pypi:
name: upload to pypi
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Setup conda
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags')
run: |
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -o miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
hash -r
conda config --set always_yes yes --set changeps1 no
- name: create env
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags')
run: |
export PATH=$HOME/miniconda/bin:$PATH
conda create -n foo -q --yes -c conda-forge -c bioconda python=3.7 twine
- name: sdist
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags')
run: |
export PATH=$HOME/miniconda/bin:$PATH
source activate foo
rm -f dist/*
python setup.py sdist
- name: upload
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags')
env:
TWINE_USERNAME: "__token__"
TWINE_PASSWORD: ${{ secrets.pypi_password }}
run: |
export PATH=$HOME/miniconda/bin:$PATH
source activate foo
twine upload dist/*
name: Test
on: [push]
jobs:
build-linux:
name: Test on Linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Setup conda
run: |
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -o miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
hash -r
conda config --set always_yes yes --set changeps1 no
- name: create env
run: |
export PATH=$HOME/miniconda/bin:$PATH
conda create -n foo -q --yes -c conda-forge -c bioconda python=3.7 numpy scipy matplotlib==3.1.1 nose flake8 plotly pysam pyBigWig py2bit deeptoolsintervals
- name: install deeptools
run: |
export PATH=$HOME/miniconda/bin:$PATH
source activate foo
python -m pip install . --no-deps --ignore-installed -vvv
- name: PEP8
run: |
export PATH=$HOME/miniconda/bin:$PATH
source activate foo
flake8 . --exclude=.venv,.build,build --ignore=E501,F403,E402,F999,F405,E722,W504,W605
- name: Test deepTools
run: |
export PATH=$HOME/miniconda/bin:$PATH
source activate foo
nosetests --with-doctest -sv deeptools
build-osx:
name: Test on OSX
runs-on: macOS-latest
steps:
- uses: actions/checkout@v1
- name: Setup conda
run: |
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
hash -r
conda config --set always_yes yes --set changeps1 no
- name: create env
run: |
export PATH=$HOME/miniconda/bin:$PATH
conda create -n foo -q --yes -c conda-forge -c bioconda python=3.7 numpy scipy matplotlib==3.1.1 nose flake8 plotly pysam pyBigWig py2bit deeptoolsintervals
- name: install deeptools
run: |
export PATH=$HOME/miniconda/bin:$PATH
source activate foo
python -m pip install . --no-deps --ignore-installed -vvv
- name: Test deepTools
run: |
export PATH=$HOME/miniconda/bin:$PATH
source activate foo
nosetests --with-doctest -sv deeptools
planemo:
name: First planemo chunk
runs-on: ubuntu-latest
needs: build-linux
strategy:
matrix:
chunk: [1, 2, 3]
steps:
- uses: actions/checkout@v1
- name: Setup conda
run: |
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -o miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
hash -r
conda config --set always_yes yes --set changeps1 no
- name: create env
run: |
export PATH=$HOME/miniconda/bin:$PATH
conda create -n foo -q --yes -c conda-forge -c bioconda python=3.7 numpy scipy matplotlib==3.1.1 nose flake8 plotly pysam pyBigWig py2bit deeptoolsintervals planemo samtools
- name: install deeptools
run: |
export PATH=$HOME/miniconda/bin:$PATH
source activate foo
python -m pip install . --no-deps --ignore-installed -vvv
- name: planemo
run: |
export PATH=$HOME/miniconda/bin:$PATH
source activate foo
./.planemo.sh ${{ matrix.chunk }}
3.3.1
* Fixed `--plotNumbers` not working in `plotCorrelation`. This was issue #838.
* Fixed compatibility with matplotlib 3 and restrict to at least that version.
* The Y-axis labels should once again appear in both plotHeatmap and plotProfile (issue #844). This was related to the previous point.
* Testing is no longer performed with python 2.7, which will reach end of life in a couple months.
* Various documentation updates (issues #868, #867 and #851).
* Increased support for BED files with track header lines (issue #866).
3.3.0
* `plotCoverage` now has a `--BED` option, to restrict plots and output to apply to a specific set of regions given by a BED or GTF file or files (issue #829).
......
......@@ -8,8 +8,6 @@ jobs:
vmImage: 'ubuntu-16.04'
strategy:
matrix:
Python27:
python.version: '2.7'
Python37:
python.version: '3.7'
maxParallel: 4
......@@ -24,8 +22,6 @@ jobs:
vmImage: 'macOS-10.13'
strategy:
matrix:
Python27:
python.version: '2.7'
Python37:
python.version: '3.7'
maxParallel: 1
......@@ -54,7 +50,7 @@ jobs:
steps:
- bash: echo "##vso[task.prependpath]$CONDA/bin"
displayName: Add conda to PATH
- bash: conda create -n foo -q --yes -c conda-forge -c bioconda numpy scipy matplotlib==2.1.2 nose flake8 plotly==2.0.12 pysam pyBigWig py2bit deeptoolsintervals cython planemo setuptools
- bash: conda create -n foo -q --yes -c conda-forge -c bioconda python=$(python.version) numpy scipy matplotlib==3.1.1 nose flake8 plotly pysam pyBigWig py2bit deeptoolsintervals planemo samtools
displayName: Installing dependencies
- bash: |
source activate foo
......
python-deeptools (3.3.0-1) UNSTABLE; urgency=medium
python-deeptools (3.3.1-1) UNSTABLE; urgency=medium
* Initial release (Closes: #934321)
-- Steffen Moeller <moeller@debian.org> Thu, 26 Sep 2019 14:37:35 +0200
python-deeptools (3.3.0-1) UNSTABLE; urgency=medium
* Initial packaging
TODO: build documentation, would involve fixing/overriding this
lintian error:
source-is-missing docs/_static/welcome_owl.carousel.min.js
......
Index: python-deeptools/deeptools/test/testskip_heatmapper_images.py
===================================================================
--- python-deeptools.orig/deeptools/test/testskip_heatmapper_images.py
+++ python-deeptools/deeptools/test/testskip_heatmapper_images.py
@@ -69,14 +69,14 @@ def test_plotHeatmap_multiple_colors_mut
os.remove(outfile.name)
-def test_plotHeatmap_multiple_colormap_no_boxes():
- outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
- args = "-m {}/master_multi.mat.gz --colorMap Reds binary terrain --boxAroundHeatmaps no " \
- "--outFileName {}".format(ROOT, outfile.name).split()
- deeptools.plotHeatmap.main(args)
- res = compare_images(ROOT + '/heatmap_master_multi_colormap_no_box.png', outfile.name, tolerance)
- assert res is None, res
- os.remove(outfile.name)
+#def test_plotHeatmap_multiple_colormap_no_boxes():
+# outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
+# args = "-m {}/master_multi.mat.gz --colorMap Reds binary terrain --boxAroundHeatmaps no " \
+# "--outFileName {}".format(ROOT, outfile.name).split()
+# deeptools.plotHeatmap.main(args)
+# res = compare_images(ROOT + '/heatmap_master_multi_colormap_no_box.png', outfile.name, tolerance)
+# assert res is None, res
+# os.remove(outfile.name)
def test_plotHeatmap_interpolation():
python2to3_for_executables.patch
deactivateFailingPatch.patch
......@@ -33,4 +33,5 @@ Registry:
Entry: OMICS_08441
- Name: bio.tools
Entry: deeptools
- Name: conda:bioconda
Entry: deeptools
......@@ -2,4 +2,4 @@
# This file is originally generated from Git information by running 'setup.py
# version'. Distribution tarballs contain a pre-generated copy of this file.
__version__ = '3.3.0'
__version__ = '3.3.1'
......@@ -30,7 +30,7 @@ def parseArguments():
general.add_argument('--numberOfProcessors', '-p',
help='Number of processors to use. Type "max/2" to '
'use half the maximum number of processors or "max" '
'to use all available processors.',
'to use all available processors. (Default: %(default)s)',
metavar="INT",
type=parserCommon.numberOfProcessors,
default=1,
......@@ -80,7 +80,7 @@ def parseArguments():
filtering.add_argument('--filterRNAstrand',
help='Selects RNA-seq reads (single-end or paired-end) in '
'the given strand.',
'the given strand. (Default: %(default)s)',
choices=['forward', 'reverse'],
default=None)
......@@ -129,7 +129,7 @@ def parseArguments():
help='The minimum fragment length needed for read/pair '
'inclusion. This option is primarily useful '
'in ATACseq experiments, for filtering mono- or '
'di-nucleosome fragments.',
'di-nucleosome fragments. (Default: %(default)s)',
metavar='INT',
default=0,
type=int,
......@@ -137,7 +137,7 @@ def parseArguments():
filtering.add_argument('--maxFragmentLength',
help='The maximum fragment length needed for read/pair '
'inclusion.',
'inclusion. A value of 0 indicates no limit. (Default: %(default)s)',
metavar='INT',
default=0,
type=int,
......
......@@ -85,7 +85,7 @@ def getOptionalArgs():
'If a method is specified, then it will be used to compensate '
'for sequencing depth differences between the samples. '
'As an alternative, this can be set to None and an option from '
'--normalizeUsing <method> can be used.',
'--normalizeUsing <method> can be used. (Default: %(default)s)',
choices=['readCount', 'SES', 'None'],
default='readCount')
......@@ -97,14 +97,14 @@ def getOptionalArgs():
'If you do not have a good sequencing depth for '
'your samples consider increasing the sampling '
'regions\' size to minimize the probability '
'that zero-coverage regions are used.',
'that zero-coverage regions are used. (Default: %(default)s)',
default=1000,
type=int)
optional.add_argument('--numberOfSamples', '-n',
help='*Only relevant when SES is chosen for the '
'scaleFactorsMethod.* Number of samplings taken '
'from the genome to compute the scaling factors.',
'from the genome to compute the scaling factors. (Default: %(default)s)',
default=1e5,
type=int)
......@@ -125,7 +125,7 @@ def getOptionalArgs():
'values are interpreted as negative fold changes. '
'Instead of performing a computation using both files, the scaled signal can '
'alternatively be output for the first or second file using '
'the \'--operation first\' or \'--operation second\'',
'the \'--operation first\' or \'--operation second\'. (Default: %(default)s)',
default='log2',
choices=['log2', 'ratio', 'subtract', 'add', 'mean',
'reciprocal_ratio', 'first', 'second'],
......@@ -137,7 +137,7 @@ def getOptionalArgs():
'You can specify different values as pseudocounts for '
'the numerator and the denominator by providing two '
'values (the first value is used as the numerator '
'pseudocount and the second the denominator pseudocount).',
'pseudocount and the second the denominator pseudocount). (Default: %(default)s)',
default=[1],
type=float,
nargs='+',
......
......@@ -67,7 +67,7 @@ def get_optional_args():
optional.add_argument('--scaleFactor',
help='The computed scaling factor (or 1, if not applicable) will '
'be multiplied by this.',
'be multiplied by this. (Default: %(default)s)',
default=1.0,
type=float,
required=False)
......
......@@ -51,7 +51,7 @@ def parse_arguments():
parser.add_argument('--numberOfProcessors', '-p',
help='Number of processors to use. The default is '
'to use 1.',
'to use 1. (Default: %(default)s)',
metavar="INT",
type=int,
default=1,
......@@ -65,10 +65,10 @@ def parse_arguments():
nargs='+')
parser.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title.',
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
parser.add_argument('--maxFragmentLength',
help='The maximum fragment length in the histogram. A value of 0 (the default) indicates to use twice the mean fragment length',
help='The maximum fragment length in the histogram. A value of 0 (the default) indicates to use twice the mean fragment length. (Default: %(default)s)',
default=0,
type=int)
parser.add_argument('--logScale',
......@@ -76,7 +76,7 @@ def parse_arguments():
action='store_true')
parser.add_argument('--binSize', '-bs',
metavar='INT',
help='Length in bases of the window used to sample the genome. (default 1000)',
help='Length in bases of the window used to sample the genome. (Default: %(default)s)',
default=1000,
type=int)
parser.add_argument('--distanceBetweenBins', '-n',
......@@ -87,7 +87,7 @@ def parse_arguments():
'for high coverage samples, while smaller values are useful for '
'lower coverage samples. Note that if you specify a value that '
'results in too few (<1000) reads sampled, the value will be '
'decreased. (default 1000000)',
'decreased. (Default: %(default)s)',
default=1000000,
type=int)
parser.add_argument('--blackListFileName', '-bl',
......
......@@ -54,7 +54,7 @@ def parse_arguments(args=None):
'You can specify different values as pseudocounts for '
'the numerator and the denominator by providing two '
'values (the first value is used as the numerator '
'pseudocount and the second the denominator pseudocount).',
'pseudocount and the second the denominator pseudocount). (Default: %(default)s)',
default=1,
nargs='+',
action=parserCommon.requiredLength(1, 2),
......@@ -73,11 +73,10 @@ def parse_arguments(args=None):
'the negative of the inverse of the ratio '
'if the ratio is less than 0. The resulting '
'values are interpreted as negative fold changes. '
'*NOTE*: Only with --operation subtract can --normalizeUsing RPGC or '
'--normalizeUsing RPKM be used. Instead of performing a '
'Instead of performing a '
'computation using both files, the scaled signal can '
'alternatively be output for the first or second file using '
'the \'--operation first\' or \'--operation second\'',
'the \'--operation first\' or \'--operation second\' (Default: %(default)s)',
default='log2',
choices=['log2', 'ratio', 'subtract', 'add', 'mean',
'reciprocal_ratio', 'first', 'second'],
......
......@@ -94,7 +94,7 @@ def getRequiredArgs():
optional.add_argument('--sampleSize',
default=5e7,
help='Number of sampling points to be considered.',
help='Number of sampling points to be considered. (Default: %(default)s)',
type=int)
optional.add_argument('--extraSampling',
......@@ -130,7 +130,7 @@ def getRequiredArgs():
'standard fragment size for Illumina machines. However, '
'if the depth of sequencing is low, a larger bin size '
'will be required, otherwise many bins will not '
'overlap with any read')
'overlap with any read (Default: %(default)s)')
return parser
......
......@@ -144,7 +144,7 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
default=1000,
type=int,
help='Distance in bases to which all regions will '
'be fit.')
'be fit. (Default: %(default)s)')
optional.add_argument('--startLabel',
default='TSS',
help='Label shown in the plot for the start of '
......@@ -153,38 +153,38 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
'e.g. "peak start". Note that this is only '
'useful if you plan to plot the results yourself '
'and not, for example, with plotHeatmap, which '
'will override this.')
'will override this. (Default: %(default)s)')
optional.add_argument('--endLabel',
default='TES',
help='Label shown in the plot for the region '
'end. Default is TES (transcription end site). '
'See the --startLabel option for more '
'information. ')
'information. (Default: %(default)s) ')
optional.add_argument('--beforeRegionStartLength', '-b', '--upstream',
default=0,
type=int,
help='Distance upstream of the start site of '
'the regions defined in the region file. If the '
'regions are genes, this would be the distance '
'upstream of the transcription start site.')
'upstream of the transcription start site. (Default: %(default)s)')
optional.add_argument('--afterRegionStartLength', '-a', '--downstream',
default=0,
type=int,
help='Distance downstream of the end site '
'of the given regions. If the '
'regions are genes, this would be the distance '
'downstream of the transcription end site.')
'downstream of the transcription end site. (Default: %(default)s)')
optional.add_argument("--unscaled5prime",
default=0,
type=int,
help='Number of bases at the 5-prime end of the '
'region to exclude from scaling. By default, '
'each region is scaled to a given length (see the --regionBodyLength option). In some cases it is useful to look at unscaled signals around region boundaries, so this setting specifies the number of unscaled bases on the 5-prime end of each boundary.')
'each region is scaled to a given length (see the --regionBodyLength option). In some cases it is useful to look at unscaled signals around region boundaries, so this setting specifies the number of unscaled bases on the 5-prime end of each boundary. (Default: %(default)s)')
optional.add_argument("--unscaled3prime",
default=0,
type=int,
help='Like --unscaled5prime, but for the 3-prime '
'end.')
'end. (Default: %(default)s)')
elif case == 'reference-point':
optional.add_argument('--referencePoint',
......@@ -195,7 +195,7 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
'region end (TES) or the center of the region. '
'Note that regardless of what you specify, '
'plotHeatmap/plotProfile will default to using "TSS" as the '
'label.')
'label. (Default: %(default)s)')
# set region body length to zero for reference point mode
optional.add_argument('--regionBodyLength', help=argparse.SUPPRESS,
......@@ -207,13 +207,13 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
type=int,
metavar='INT bp',
help='Distance upstream of the reference-point '
'selected.')
'selected. (Default: %(default)s)')
optional.add_argument('--afterRegionStartLength', '-a', '--downstream',
default=1500,
metavar='INT bp',
type=int,
help='Distance downstream of the '
'reference-point selected.')
'reference-point selected. (Default: %(default)s)')
optional.add_argument('--nanAfterEnd',
action='store_true',
help='If set, any values after the region end '
......@@ -225,7 +225,7 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
optional.add_argument('--binSize', '-bs',
help='Length, in bases, of the non-overlapping '
'bins for averaging the score over the '
'regions length.',
'regions length. (Default: %(default)s)',
type=int,
default=10)
......@@ -240,7 +240,7 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
'the input files. If you require the output order to '
'match that of the input regions, then either specify '
'"keep" or use computeMatrixOperations to resort the '
'results file.',
'results file. (Default: %(default)s)',
choices=["descend", "ascend", "no", "keep"],
default='keep')
......@@ -249,7 +249,7 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
'sorting. The value is computed for each row.'
'Note that the region_length option will lead '
'to a dotted line within the heatmap that indicates '
'the end of the regions.',
'the end of the regions. (Default: %(default)s)',
choices=["mean", "median", "max", "min", "sum",
"region_length"],
default='mean')
......@@ -268,7 +268,7 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
help='Define the type of statistic that should be '
'used over the bin size range. The '
'options are: "mean", "median", "min", "max", "sum" '
'and "std". The default is "mean".')
'and "std". The default is "mean". (Default: %(default)s)')
optional.add_argument('--missingDataAsZero',
help='If set, missing data (NAs) will be treated as zeros. '
......@@ -291,7 +291,7 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
'will be skipped. This is useful to skip, '
'for example, genes where the read count is zero '
'for any of the bins. This could be the result of '
'unmappable areas and can bias the overall results.')
'unmappable areas and can bias the overall results. (Default: %(default)s)')
optional.add_argument('--maxThreshold',
default=None,
......@@ -301,7 +301,7 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
'will be skipped. The maxThreshold is useful to '
'skip those few regions with very high read counts '
'(e.g. micro satellites) that may bias the average '
'values.')
'values. (Default: %(default)s)')
optional.add_argument('--blackListFileName', '-bl',
help="A BED file containing regions that should be excluded from all analyses. Currently this works by rejecting genomic chunks that happen to overlap an entry. Consequently, for BAM files, if a read partially overlaps a blacklisted region or a fragment spans over it, then the read/fragment might still be considered.",
......@@ -336,13 +336,13 @@ def computeMatrixOptArgs(case=['scale-regions', 'reference-point'][0]):
optional.add_argument('--scale',
help='If set, all values are multiplied by '
'this number.',
'this number. (Default: %(default)s)',
type=float,
default=1)
optional.add_argument('--numberOfProcessors', '-p',
help='Number of processors to use. Type "max/2" to '
'use half the maximum number of processors or "max" '
'to use all available processors.',
'to use all available processors. (Default: %(default)s)',
metavar="INT",
type=numberOfProcessors,
default=1,
......
......@@ -270,7 +270,7 @@ def sortArgs():
default='transcript',
help='When a GTF file is used to provide regions, only '
'entries with this value as their feature (column 2) '
'will be processed as transcripts.')
'will be processed as transcripts. (Default: %(default)s)')
optional.add_argument('--transcript_id_designator',
default='transcript_id',
......@@ -281,7 +281,7 @@ def sortArgs():
'\'transcript_id "ACTB"\', for a key of transcript_id '
'and a value of ACTB). In some cases it can be '
'convenient to use a different identifier. To do so, set '
'this to the desired key.')
'this to the desired key. (Default: %(default)s)')
return parser
......@@ -679,6 +679,8 @@ def sortMatrix(hm, regionsFileName, transcriptID, transcript_id_designator, verb
if not labelColumn:
labelColumn = dti.getLabel(line)
line = dti.getNext(fp)
while line.startswith("track "):
line = dti.getNext(fp)
# Find the label column
subtract = 0
......
......@@ -299,8 +299,8 @@ class Correlation:
# a good contrast between the correlation numbers that are
# plotted on black.
if plot_numbers:
cmap = cmap.from_list(colormap + "clipped",
cmap(np.linspace(0, 0.9, 10)))
cmap = pltcolors.LinearSegmentedColormap.from_list(colormap + "clipped",
cmap(np.linspace(0, 0.9, 10)))
cmap.set_under((0., 0., 1.))
# Plot distance matrix.
......
......@@ -4,6 +4,7 @@ import matplotlib.pyplot as plt
import numpy as np
import scipy.cluster.hierarchy as sch
from matplotlib import rcParams
import matplotlib.colors as pltcolors
rcParams['pdf.fonttype'] = 42
rcParams['svg.fonttype'] = 'none'
......@@ -50,8 +51,8 @@ def plot_correlation(corr_matrix, labels, plotFileName, vmax=None,
# a good contrast between the correlation numbers that are
# plotted on black.
if plot_numbers:
cmap = cmap.from_list(colormap + "clipped",
cmap(np.linspace(0, 0.9, 10)))
cmap = pltcolors.LinearSegmentedColormap.from_list(colormap + "clipped",
cmap(np.linspace(0, 0.9, 10)))
cmap.set_under((0., 0., 1.))
# Plot distance matrix.
......