Skip to content
Commits on Source (5)
before_script:
- docker info
- cat /etc/hosts
- export PYTHONIOENCODING=utf-8
after_script:
# We need to clean up any files that Toil may have made via Docker that
# aren't deletable by the Gitlab user. If we don't do this, Gitlab will try
......@@ -11,17 +12,150 @@ after_script:
stages:
- main_tests
- test
- integration
py2-job:
# Python2.7
py2_batch_systems:
stage: test
script:
- pwd
- virtualenv -p python2.7 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor && PYTHONIOENCODING=utf-8 make test
- virtualenv -p python2.7 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/batchSystems/batchSystemTest.py
- python -m pytest src/toil/test/mesos/MesosDataStructuresTest.py
py3-job:
py2_cwl:
stage: test
script:
- pwd
- virtualenv -p python3.6 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor && PYTHONIOENCODING=utf-8 make test
- virtualenv -p python2.7 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/cwl/cwlTest.py
py2_wdl:
stage: test
script:
- pwd
- virtualenv -p python2.7 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/wdl/toilwdlTest.py
py2_jobstore_and_provisioning:
stage: test
script:
- pwd
- virtualenv -p python2.7 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/sort/sortTest.py
- python -m pytest src/toil/test/provisioners/aws/awsProvisionerTest.py
# - python -m pytest src/toil/test/provisioners/azureProvisionerTest.py # disabled and no longer maintained
- python -m pytest src/toil/test/provisioners/clusterScalerTest.py
- python -m pytest src/toil/test/provisioners/gceProvisionerTest.py
py2_main:
stage: main_tests
script:
- pwd
- virtualenv -p python2.7 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/src
- python -m pytest src/toil/test/utils
py2_integration_jobstore:
stage: integration
script:
- pwd
- apt update && DEBIAN_FRONTEND=noninteractive apt install -y tzdata && apt install -y awscli jq
- virtualenv -p python2.7 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor awscli
- export TOIL_TEST_INTEGRATIVE=True
- export TOIL_AWS_KEYNAME=id_rsa
- export TOIL_AWS_ZONE=us-west-2a
- python setup_gitlab_ssh.py
- mkdir ~/.aws
- echo -e $(aws secretsmanager get-secret-value --secret-id allspark/runner/credentials --region us-west-2 | jq -r .SecretString) > ~/.aws/credentials
- python -m pytest src/toil/test/jobStores/jobStoreTest.py
py2_integration_sort:
stage: integration
script:
- pwd
- apt update && DEBIAN_FRONTEND=noninteractive apt install -y tzdata && apt install -y awscli jq
- virtualenv -p python2.7 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor awscli
- export TOIL_TEST_INTEGRATIVE=True
- export TOIL_AWS_KEYNAME=id_rsa
- export TOIL_AWS_ZONE=us-west-2a
- python setup_gitlab_ssh.py
- mkdir ~/.aws
- echo -e $(aws secretsmanager get-secret-value --secret-id allspark/runner/credentials --region us-west-2 | jq -r .SecretString) > ~/.aws/credentials
- python -m pytest src/toil/test/sort/sortTest.py
- python -m pytest src/toil/test/provisioners/clusterScalerTest.py
#py2_integration_provisioner:
# stage: integration
# script:
# - pwd
# - apt update && DEBIAN_FRONTEND=noninteractive apt install -y tzdata && apt install -y awscli jq
# - virtualenv -p python2.7 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor awscli
# - export TOIL_TEST_INTEGRATIVE=True
# - export TOIL_AWS_KEYNAME=id_rsa
# - export TOIL_AWS_ZONE=us-west-2a
# - python setup_gitlab_ssh.py
# - mkdir ~/.aws
# - echo -e $(aws secretsmanager get-secret-value --secret-id allspark/runner/credentials --region us-west-2 | jq -r .SecretString) > ~/.aws/credentials
# - python -m pytest src/toil/test/provisioners/aws/awsProvisionerTest.py
# Python3.6
py3_batch_systems:
stage: test
script:
- pwd
- virtualenv -p python3.6 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/batchSystems/batchSystemTest.py
- python -m pytest src/toil/test/mesos/MesosDataStructuresTest.py
py3_cwl:
stage: test
script:
- pwd
- virtualenv -p python3.6 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/cwl/cwlTest.py
py3_wdl:
stage: test
script:
- pwd
- virtualenv -p python3.6 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/wdl/toilwdlTest.py
py3_jobstore_and_provisioning:
stage: test
script:
- pwd
- virtualenv -p python3.6 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/jobStores/jobStoreTest.py
- python -m pytest src/toil/test/sort/sortTest.py
- python -m pytest src/toil/test/provisioners/aws/awsProvisionerTest.py
# - python -m pytest src/toil/test/provisioners/azureProvisionerTest.py # disabled and no longer maintained
- python -m pytest src/toil/test/provisioners/clusterScalerTest.py
- python -m pytest src/toil/test/provisioners/gceProvisionerTest.py
py3_main:
stage: main_tests
script:
- pwd
- virtualenv -p python3.6 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor
- python -m pytest src/toil/test/src
- python -m pytest src/toil/test/utils
#py3_integration:
# stage: integration
# script:
# - pwd
# - apt update && DEBIAN_FRONTEND=noninteractive apt install -y tzdata && apt install -y awscli jq
# - virtualenv -p python3.6 venv && . venv/bin/activate && make prepare && make develop extras=[all] && pip install htcondor awscli
# - export TOIL_TEST_INTEGRATIVE=True
# - export TOIL_AWS_KEYNAME=id_rsa
# - export TOIL_AWS_ZONE=us-west-2a
# - python setup_gitlab_ssh.py
# - mkdir ~/.aws
# - echo -e $(aws secretsmanager get-secret-value --secret-id allspark/runner/credentials --region us-west-2 | jq -r .SecretString) > ~/.aws/credentials
# - python -m pytest src/toil/test/jobStores/jobStoreTest.py
......@@ -59,9 +59,6 @@ test. This test does not capture terminal output.
The 'integration_test' target is the same as the previous except that it does capture output.
The 'test_parallel' target runs Toil's unit tests in parallel and generates an XML test report
from the results. It is designed to be used only in Jenkins.
The 'pypi' target publishes the current commit of Toil to PyPI after enforcing that the working
copy and the index are clean.
......@@ -88,6 +85,10 @@ export help
help:
@printf "$$help"
# This Makefile uses bash features like printf and <()
SHELL=bash
python=python
......@@ -102,14 +103,10 @@ dist_version:=$(shell $(python) version_template.py distVersion)
sdist_name:=toil-$(dist_version).tar.gz
docker_tag:=$(shell $(python) version_template.py dockerTag)
default_docker_registry:=$(shell $(python) version_template.py dockerRegistry)
default_docker_registry:=quay.io/ucsc_cgl
docker_path:=$(strip $(shell which docker))
ifdef docker_registry
export TOIL_DOCKER_REGISTRY?=$(docker_registry)
else
export TOIL_DOCKER_REGISTRY?=$(default_docker_registry)
endif
export TOIL_DOCKER_REGISTRY?=$(shell $(python) version_template.py dockerRegistry)
export TOIL_DOCKER_NAME?=$(shell $(python) version_template.py dockerName)
export TOIL_APPLIANCE_SELF:=$(TOIL_DOCKER_REGISTRY)/$(TOIL_DOCKER_NAME):$(docker_tag)
......@@ -122,12 +119,14 @@ endif
develop: check_venv
$(pip) install -e .$(extras)
clean_develop: check_venv
- $(pip) uninstall -y toil
- rm -rf src/*.egg-info
- rm src/toil/version.py
sdist: dist/$(sdist_name)
dist/$(sdist_name): check_venv
@test -f dist/$(sdist_name) && mv dist/$(sdist_name) dist/$(sdist_name).old || true
$(python) setup.py sdist
......@@ -137,48 +136,35 @@ dist/$(sdist_name): check_venv
&& printf "$(cyan)No significant changes to sdist, reinstating backup.$(normal)\n" \
|| rm dist/$(sdist_name).old ) \
|| true
clean_sdist:
- rm -rf dist
- rm src/toil/version.py
# We always claim to be Travis, so that local test runs will not skip Travis tests.
# Gitlab doesn't run tests via the Makefile.
# This target will skip building docker and all docker based tests
test_offline: check_venv check_build_reqs
@printf "$(cyan)All docker related tests will be skipped.$(normal)\n"
TOIL_SKIP_DOCKER=True \
TRAVIS=true \
$(python) -m pytest $(pytest_args_local) $(tests_local)
# The auto-deployment test needs the docker appliance
test: check_venv check_build_reqs docker
TOIL_APPLIANCE_SELF=$(docker_registry)/$(docker_base_name):$(docker_tag) \
TRAVIS=true \
$(python) -m pytest --cov=toil $(pytest_args_local) $(tests)
# For running integration tests locally in series (uses the -s argument for pyTest)
integration_test_local: check_venv check_build_reqs sdist push_docker
TOIL_TEST_INTEGRATIVE=True \
TRAVIS=true \
$(python) run_tests.py --local integration-test $(tests)
# These two targets are for backwards compatibility but will be removed shortly
# FIXME when they are removed add check_running_on_jenkins to the jenkins targets
test_parallel: jenkins_test_parallel
integration_test: jenkins_test_integration
# This target is designed only for use on Jenkins
jenkins_test_parallel: check_venv check_build_reqs docker
$(python) run_tests.py test $(tests)
# This target is designed only for use on Jenkins
jenkins_test_integration: check_venv check_build_reqs sdist push_docker
TOIL_TEST_INTEGRATIVE=True $(python) run_tests.py integration-test $(tests)
pypi: check_venv check_clean_working_copy check_running_on_jenkins
$(pip) install setuptools --upgrade
$(python) setup.py egg_info sdist bdist_egg upload
clean_pypi:
- rm -rf build/
test_integration: check_venv check_build_reqs docker
TRAVIS=true \
$(python) run_tests.py integration-test $(tests)
ifdef TOIL_DOCKER_REGISTRY
......@@ -197,7 +183,6 @@ define tag_docker
@printf "$(green)Tagged appliance image $1 as $2.$(normal)\n"
endef
docker: docker/Dockerfile
@set -ex \
; cd docker \
......@@ -230,13 +215,6 @@ clean_docker:
-rm docker/Dockerfile docker/$(sdist_name)
-docker rmi $(docker_image):$(docker_tag)
obliterate_docker: clean_docker
-@set -x \
; docker images $(docker_image) \
| tail -n +2 | awk '{print $$1 ":" $$2}' | uniq \
| xargs docker rmi
-docker images -qf dangling=true | xargs docker rmi
push_docker: docker check_docker_registry
for i in $$(seq 1 5); do docker push $(docker_image):$(docker_tag) && break || sleep 60; done
for i in $$(seq 1 5); do docker push $(grafana_image):$(docker_tag) && break || sleep 60; done
......@@ -254,27 +232,23 @@ endif
docs: check_venv check_build_reqs
# Strange, but seemingly benign Sphinx warning floods stderr if not filtered:
cd docs && make html
clean_docs: check_venv
- cd docs && make clean
clean: clean_develop clean_sdist clean_pypi clean_docs
check_build_reqs:
@$(python) -c 'import mock; import pytest' \
|| ( printf "$(red)Build requirements are missing. Run 'make prepare' to install them.$(normal)\n" ; false )
prepare: check_venv
$(pip) install mock==1.0.1 pytest==4.3.1 pytest-cov==2.6.1 stubserver==1.0.1 pytest-timeout==1.3.3 cwltest
check_venv:
@$(python) -c 'import sys, os; sys.exit( int( 0 if "VIRTUAL_ENV" in os.environ else 1 ) )' \
|| ( printf "$(red)A virtualenv must be active.$(normal)\n" ; false )
check_clean_working_copy:
@printf "$(green)Checking if your working copy is clean ...$(normal)\n"
@git diff --exit-code > /dev/null \
......@@ -286,33 +260,23 @@ check_clean_working_copy:
; git ls-files --other --exclude-standard --directory \
; false )
check_running_on_jenkins:
@printf "$(green)Checking if running on Jenkins ...$(normal)\n"
@test -n "$$BUILD_NUMBER" \
|| ( printf "$(red)This target should only be invoked on Jenkins.$(normal)\n" ; false )
check_docker_registry:
@test "$(default_docker_registry)" != "$(TOIL_DOCKER_REGISTRY)" || test -n "$$BUILD_NUMBER" \
|| ( printf '$(red)Please set TOIL_DOCKER_REGISTRY to a value other than \
$(default_docker_registry) and ensure that you have permissions to push \
to that registry. Only CI builds should push to $(default_docker_registry).$(normal)\n' ; false )
check_cpickle:
# fail if cPickle.dump(s) called without HIGHEST_PROTOCOL
# https://github.com/BD2KGenomics/toil/issues/1503
! find src -iname '*.py' | xargs grep 'cPickle.dump' | grep --invert-match HIGHEST_PROTOCOL
.PHONY: help \
prepare \
check_cpickle \
develop clean_develop \
sdist clean_sdist \
test test_offline test_parallel integration_test \
jenkins_test_parallel jenkins_test_integration \
pypi clean_pypi \
docs clean_docs \
clean \
......
ATTENTION: Toil has moved from https://github.com/BD2KGenomics/toil to https://github.com/DataBiosphere/toil as of July 5th, 2018.
ATTENTION: Toil will be dropping python 2.7 support on January 1, 2020 when python 2.7 itself is scheduled to die. This is when the last release of python 2.7 compatible toil will be.
Toil is a scalable, efficient, cross-platform (Linux & macOS) pipeline management system,
written entirely in Python, and designed around the principles of functional
programming.
Our next scheduled release is May 24, 2019.
* Check the `website`_ for a description of Toil and its features.
* Full documentation for the latest stable release can be found at
`Read the Docs`_.
......@@ -21,3 +19,5 @@ Our next scheduled release is May 24, 2019.
.. image:: https://badges.gitter.im/bd2k-genomics-toil/Lobby.svg
:alt: Join the chat at https://gitter.im/bd2k-genomics-toil/Lobby
:target: https://gitter.im/bd2k-genomics-toil/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
Note: Toil moved from https://github.com/BD2KGenomics/toil to https://github.com/DataBiosphere/toil on July 5th, 2018.
toil (3.21.0-1) unstable; urgency=medium
* New upstream version
-- Michael R. Crusoe <michael.crusoe@gmail.com> Sun, 17 Nov 2019 15:13:56 +0100
toil (3.20.0-1) unstable; urgency=medium
[ Michael R. Crusoe ]
......
Author: Michael R. Crusoe <michael.crusoe@gmail.com>
Description: Adjust to newer version of cwltool
--- a/src/toil/cwl/cwltoil.py
+++ b/src/toil/cwl/cwltoil.py
@@ -41,6 +41,7 @@ import six
from schema_salad import validate
from schema_salad.schema import Names
import schema_salad.ref_resolver
+from schema_salad.schema import Names
import cwltool.errors
import cwltool.load_tool
@@ -469,6 +470,7 @@ class CWLJobWrapper(Job):
self.addChild(realjob)
return realjob.rv()
+
def _makeNestedTempDir(top, seed, levels=2):
"""
Gets a temporary directory in the hierarchy of directories under a given
@@ -615,6 +617,7 @@ class CWLJob(Job):
return output
+
def makeJob(tool, jobobj, step_inputs, runtime_context):
"""Create the correct Toil Job object for the CWL tool (workflow, job, or job
wrapper for dynamic resource requirements.)
@@ -1149,7 +1152,8 @@ def main(args=None, stdout=sys.stdout):
if options.provisioner and not options.jobStore:
raise NoSuchJobStoreException(
- 'Please specify a jobstore with the --jobStore option when specifying a provisioner.')
+ 'Please specify a jobstore with the --jobStore option when '
+ 'specifying a provisioner.')
use_container = not options.no_container
@@ -1271,7 +1275,8 @@ def main(args=None, stdout=sys.stdout):
return 33
wf1.cwljob = initialized_job_order
- if wf1 is CWLJob: # Clean up temporary directories only created with CWLJobs.
+ if wf1 is CWLJob:
+ # Clean up temporary directories only created with CWLJobs.
wf1.addFollowOnFn(cleanTempDirs, wf1)
outobj = toil.start(wf1)
--- a/src/toil/jobGraph.py
+++ b/src/toil/jobGraph.py
@@ -103,6 +103,9 @@ class JobGraph(JobNode):
def __hash__(self):
return hash(self.jobStoreID)
+ def __hash__(self):
+ return hash(self.jobStoreID)
+
def setupJobAfterFailure(self, config):
"""
Reduce the remainingRetryCount if greater than zero and set the memory
......@@ -2,7 +2,7 @@ From: Michael R. Crusoe <michael.crusoe@gmail.com>
Subject: cherry pick py3 testing fixes from upstream
--- toil.orig/src/toil/test/jobStores/jobStoreTest.py
+++ toil/src/toil/test/jobStores/jobStoreTest.py
@@ -54,7 +54,6 @@
@@ -55,7 +55,6 @@
from toil.job import Job, JobNode
from toil.jobStores.abstractJobStore import (NoSuchJobException,
NoSuchFileException)
......@@ -10,67 +10,3 @@ Subject: cherry pick py3 testing fixes from upstream
from toil.jobStores.fileJobStore import FileJobStore
from toil.statsAndLogging import StatsAndLogging
from toil.test import (ToilTest,
@@ -656,7 +655,7 @@
@classmethod
def makeImportExportTests(cls):
- testClasses = [FileJobStoreTest, AWSJobStoreTest, AzureJobStoreTest, GoogleJobStoreTest]
+ testClasses = [FileJobStoreTest, AWSJobStoreTest, AzureJobStoreTest]
activeTestClassesByName = {testCls.__name__: testCls
for testCls in testClasses
@@ -1151,54 +1150,6 @@
os.unlink(path)
-@needs_google
-class GoogleJobStoreTest(AbstractJobStoreTest.Test):
- projectID = os.getenv('TOIL_GOOGLE_PROJECTID')
- headers = {"x-goog-project-id": projectID}
-
- def _createJobStore(self):
- from toil.jobStores.googleJobStore import GoogleJobStore
- return GoogleJobStore(GoogleJobStoreTest.projectID + ":" + self.namePrefix)
-
- def _corruptJobStore(self):
- # The Google job store has only one resource, the bucket, so we can't corrupt it without
- # fully deleting it.
- pass
-
- def _prepareTestFile(self, bucket, size=None):
- from toil.jobStores.googleJobStore import GoogleJobStore
- fileName = 'testfile_%s' % uuid.uuid4()
- url = 'gs://%s/%s' % (bucket.name, fileName)
- if size is None:
- return url
- with open('/dev/urandom', 'r') as readable:
- contents = readable.read(size)
- GoogleJobStore._writeToUrl(StringIO(contents), urlparse.urlparse(url))
- return url, hashlib.md5(contents).hexdigest()
-
- def _hashTestFile(self, url):
- from toil.jobStores.googleJobStore import GoogleJobStore
- contents = GoogleJobStore._getBlobFromURL(urlparse.urlparse(url)).download_as_string()
- return hashlib.md5(contents).hexdigest()
-
- @googleRetry
- def _createExternalStore(self):
- from google.cloud import storage
- bucketName = ("import-export-test-" + str(uuid.uuid4()))
- storageClient = storage.Client()
- return storageClient.create_bucket(bucketName)
-
- @googleRetry
- def _cleanUpExternalStore(self, bucket):
- # this is copied from googleJobStore.destroy
- try:
- bucket.delete(force=True)
- # throws ValueError if bucket has more than 256 objects. Then we must delete manually
- except ValueError:
- bucket.delete_blobs(bucket.list_blobs)
- bucket.delete()
-
-
@needs_aws
class AWSJobStoreTest(AbstractJobStoreTest.Test):
Author: Michael R. Crusoe <michael.crusoe@gmail.com>
Description: skip galaxy-lib, not yet packaged for Debian
--- a/setup.py
+++ b/setup.py
@@ -83,8 +83,7 @@ def runSetup():
gcs]
--- toil.orig/setup.py
+++ toil/setup.py
@@ -76,8 +76,7 @@
azureStorage]
cwl_reqs = [
cwltool,
- schemaSalad,
- galaxyLib]
+ schemaSalad]
wdl_reqs = []
htcondor_reqs = [
htcondor]
encryption_reqs = [
pynacl]
google_reqs = [
Author: Michael R. Crusoe <michael.crusoe@gmail.com>
Description: pathlib2 isn't needed for python3-toil
--- toil.orig/setup.py
+++ toil/setup.py
@@ -47,7 +47,6 @@
@@ -49,7 +49,6 @@
dateutil = 'python-dateutil'
addict = 'addict<=2.2.0'
sphinx = 'sphinx==1.7.5'
......@@ -8,7 +10,7 @@
core_reqs = [
dill,
@@ -58,8 +57,7 @@
@@ -60,8 +59,7 @@
dateutil,
psutil,
subprocess32,
......@@ -16,5 +18,5 @@
- pathlib2]
+ sphinx]
mesos_reqs = [
addict,
aws_reqs = [
boto,
......@@ -3,7 +3,6 @@ no_virtualenv_to_create_manpages.patch
setting_version.patch
no_galaxy_lib
debianize_docs
adjust_to_newer_cwltool
fix_tests
soften-mesos-deps
remove-pathlib2-dep
......@@ -2,16 +2,7 @@ Author: Michael R. Crusoe <michael.crusoe@gmail.com>
Subject: Use Debian's newer python3-psutil
--- toil.orig/setup.py
+++ toil/setup.py
@@ -25,7 +25,7 @@
futures = 'futures==3.1.1'
pycryptodome = 'pycryptodome==3.5.1'
pymesos = 'pymesos==0.3.7'
- psutil = 'psutil==3.0.1'
+ psutil = 'psutil>=3.0.1'
azureCosmosdbTable = 'azure-cosmosdb-table==0.37.1'
azureAnsible = 'ansible[azure]==2.5.0a1'
azureStorage = 'azure-storage==0.35.1'
@@ -58,11 +58,11 @@
@@ -60,7 +60,6 @@
dateutil,
psutil,
subprocess32,
......@@ -19,8 +10,11 @@ Subject: Use Debian's newer python3-psutil
sphinx,
pathlib2]
@@ -89,6 +88,7 @@
kubernetes]
mesos_reqs = [
+ addict,
pymesos,
+ addict,
psutil]
aws_reqs = [
wdl_reqs = []
Author: Michael R. Crusoe <michael.crusoe@gmail.com>
Description: allow the use of Debian packaged dependencies
--- a/setup.py
+++ b/setup.py
@@ -34,8 +34,8 @@ def runSetup():
--- toil.orig/setup.py
+++ toil/setup.py
@@ -34,7 +34,7 @@
gcs = 'google-cloud-storage==1.6.0'
gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'
apacheLibcloud = 'apache-libcloud==2.2.1'
- cwltool = 'cwltool==1.0.20190228155703'
- schemaSalad = 'schema-salad<5,>=4.1'
+ cwltool = 'cwltool>=1.0.20181217162649'
+ schemaSalad = 'schema-salad<5,>=3.0.20181206233650'
- cwltool = 'cwltool==1.0.20190906054215'
+ cwltool = 'cwltool>=1.0.20190906054215'
schemaSalad = 'schema-salad<5,>=4.5.20190815125611'
galaxyLib = 'galaxy-lib==18.9.2'
htcondor = 'htcondor>=8.6.0'
dill = 'dill==0.2.7.1'
@@ -171,33 +171,6 @@ def importVersion():
@@ -182,33 +182,6 @@
required.
"""
import imp
......
......@@ -34,7 +34,15 @@ dependencies = ' '.join(['libffi-dev', # For client side encryption for 'azure'
'mesos=1.0.1-2.0.94.ubuntu1604',
"nodejs", # CWL support for javascript expressions
'rsync',
'screen'])
'screen',
'build-essential', # We need a build environment to build Singularity 3.
'uuid-dev',
'libgpgme11-dev',
'libseccomp-dev',
'pkg-config',
'squashfs-tools',
'cryptsetup',
'git'])
def heredoc(s):
......@@ -71,7 +79,26 @@ print(heredoc('''
RUN add-apt-repository -y ppa:jonathonf/python-3.6
RUN apt-get -y update --fix-missing && apt-get -y upgrade && apt-get -y install {dependencies} && apt-get clean && rm -rf /var/lib/apt/lists/*
RUN apt-get -y update --fix-missing && \
DEBIAN_FRONTEND=noninteractive apt-get -y upgrade && \
DEBIAN_FRONTEND=noninteractive apt-get -y install {dependencies} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN wget https://dl.google.com/go/go1.13.3.linux-amd64.tar.gz && \
tar xvf go1.13.3.linux-amd64.tar.gz && \
mv go/bin/* /usr/bin/ && \
mv go /usr/local/
RUN mkdir -p $(go env GOPATH)/src/github.com/sylabs && \
cd $(go env GOPATH)/src/github.com/sylabs && \
git clone https://github.com/sylabs/singularity.git && \
cd singularity && \
git checkout v3.4.2 && \
./mconfig && \
cd ./builddir && \
make -j4 && \
make install
RUN mkdir /root/.ssh && \
chmod 700 /root/.ssh
......
......@@ -186,8 +186,8 @@ the single_job_executor from CWLTool, providing a Toil-specific constructor for
filesystem access, and overriding the default PathMapper to use ToilPathMapper.
The ToilPathMapper keeps track of a file's symbolic identifier (the Toil
FileStore token), its local path on the host (the value returned by
readGlobalFile) and the the location of the file inside the Docker container.
FileID), its local path on the host (the value returned by readGlobalFile) and
the the location of the file inside the Docker container.
After executing single_job_executor from CWLTool, it gets back the output
object and status. If the underlying job failed, raise an exception. Files
......
.. _minAwsPermissions:
Minimum AWS IAM permissions
---------------------------
Toil requires at least the following permissions in an IAM role to operate on a cluster.
These are added by default when launching a cluster. However, ensure that they are present
if creating a custom IAM role when :ref:`launching a cluster <launchAwsClusterDetails>`
with the ``--awsEc2ProfileArn`` parameter.
::
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:*",
"s3:*",
"sdb:*",
"iam:PassRole"
],
"Resource": "*"
}
]
}
......@@ -4,6 +4,21 @@ Environment Variables
=====================
There are several environment variables that affect the way Toil runs.
+----------------------------------+----------------------------------------------------+
| TOIL_CHECK_ENV | A flag that determines whether Toil will try to |
| | refer back to a Python virtual environment in |
| | which it is installed when composing commands that |
| | may be run on other hosts. If set to ``True``, if |
| | Toil is installed in the current virtual |
| | environment, it will use absolute paths to its own |
| | executables (and the virtual environment must thus |
| | be available on at the same path on all nodes). |
| | Otherwise, Toil internal commands such as |
| | ``_toil_worker`` will be resolved according to the |
| | ``PATH`` on the node where they are executed. This |
| | setting can be useful in a shared HPC environment, |
| | where users may have their own Toil installations |
| | in virtual environments. |
+----------------------------------+----------------------------------------------------+
| TOIL_WORKDIR | An absolute path to a directory where Toil will |
| | write its temporary files. This directory must |
......@@ -28,7 +43,7 @@ There are several environment variables that affect the way Toil runs.
| TOIL_DOCKER_REGISTRY | The URL of the registry of the Toil Appliance |
| | image you wish to use. Docker will use Dockerhub |
| | by default, but the quay.io registry is also |
| | very popular and easily specifiable by settting |
| | very popular and easily specifiable by setting |
| | this option to ``quay.io``. |
+----------------------------------+----------------------------------------------------+
| TOIL_DOCKER_NAME | The name of the Toil Appliance image you |
......@@ -36,6 +51,14 @@ There are several environment variables that affect the way Toil runs.
| | this option is provided to override this, |
| | since the image can be built with arbitrary names. |
+----------------------------------+----------------------------------------------------+
| TOIL_AWS_SECRET_NAME | For the Kubernetes batch system, the name of a |
| | Kubernetes secret which contains a ``credentials`` |
| | file granting access to AWS resources. Will be |
| | mounted as ``~/.aws`` inside Kubernetes-managed |
| | Toil containers. Enables the AWSJobStore to be |
| | used with the Kubernetes batch system, if the |
| | credentials allow access to S3 and SimpleDB. |
+----------------------------------+----------------------------------------------------+
| TOIL_AWS_ZONE | The EC2 zone to provision nodes in if using |
| | Toil's provisioner. |
+----------------------------------+----------------------------------------------------+
......
Static Azure Clusters
*********************
.. warning::
Azure support in Toil is in an experimental state and has limited support!
This section describes how to launch Azure virtual machines to create a static
(non-autoscaling) cluster.
......
......@@ -93,7 +93,7 @@ can be made available for local testing:
$ export TOIL_X_KEYNAME=[Your Keyname]
$ export TOIL_X_ZONE=[Desired Zone]
Where ``X`` is one of our currently supported cloud providers (``AZURE``, ``GCE``, ``AWS``).
Where ``X`` is one of our currently supported cloud providers (``AZURE (limited support)``, ``GCE``, ``AWS``).
- For example, to prepare for running Azure related integration tests in the ``westus`` region::
......@@ -382,3 +382,16 @@ Pull Requests
Developers who have push access to the main Toil repository are encouraged to
make their pull requests from within the repository, to avoid this step.
* Prefer using "Squash and marge" when merging pull requests to master especially
when the PR contains a "single unit" of work (i.e. if one were to rewrite the
PR from scratch with all the fixes included, they would have one commit for
the entire PR). This makes the commit history on master more readable
and easier to debug in case of a breakage.
When squashing a PR from multiple authors, please add
`Co-authored-by`_ to give credit to all contributing authors.
See `Issue #2816`_ for more details.
.. _Co-authored-by: https://github.blog/2018-01-29-commit-together-with-co-authors/
.. _Issue #2816: https://github.com/DataBiosphere/toil/issues/2816
......@@ -63,6 +63,11 @@ workflows. For example:
.. literalinclude:: ../../src/toil/test/docs/scripts/tutorial_invokeworkflow.py
.. note::
Do not include a `.` in the name of your python script (besides `.py` at the end).
This is to allow toil to import the types and functions defined in your file while starting a new process.
Alternatively, the more powerful :class:`toil.common.Toil` class can be used to
run and resume workflows. It is used as a context manager and allows for
preliminary setup, such as staging of files into the job store on the leader
......@@ -323,7 +328,7 @@ multiple jobs' output values::
Just like regular promises, the return value must be determined prior to
scheduling any job that depends on the return value. In our example above, notice
how the dependant jobs were follow ons to the parent while promising jobs are
how the dependent jobs were follow ons to the parent while promising jobs are
children of the parent. This ordering ensures that all promises are
properly fulfilled.
......@@ -344,9 +349,10 @@ Managing files within a workflow
--------------------------------
It is frequently the case that a workflow will want to create files, both
persistent and temporary, during its run. The :class:`toil.fileStore.FileStore`
class is used by jobs to manage these files in a manner that guarantees cleanup
and resumption on failure.
persistent and temporary, during its run. The
:class:`toil.fileStores.abstractFileStore.AbstractFileStore` class is used by
jobs to manage these files in a manner that guarantees cleanup and resumption
on failure.
The :func:`toil.job.Job.run` method has a file store instance as an argument.
The following example shows how this can be used to create temporary files that
......@@ -454,6 +460,12 @@ An example of a basic ``dockerCall`` is below::
workDir=job.tempDir,
parameters=['index', '/data/reference.fa'])
Note the assumption that `reference.fa` file is located in `/data`. This is Toil's
standard convention as a mount location to reduce boilerplate when calling `dockerCall`.
Users can choose their own mount locations by supplying a `volumes` kwarg to `dockerCall`,
such as: `volumes={working_dir: {'bind': '/data', 'mode': 'rw'}}`, where `working_dir`
is an absolute path on the user's filesystem.
``dockerCall`` can also be added to workflows like any other job function:
.. literalinclude:: ../../src/toil/test/docs/scripts/tutorial_docker.py
......
.. _api-filestore:
Job.FileStore API
job.fileStore API
*****************
The FileStore is an abstraction of a Toil run's shared storage.
The AbstractFileStore is an abstraction of a Toil run's shared storage.
.. autoclass:: toil.fileStore::FileStore
.. autoclass:: toil.fileStores.abstractFileStore::AbstractFileStore
:members:
.. autoclass:: toil.fileStore::FileID
.. autoclass:: toil.fileStores::FileID
:members:
......@@ -17,7 +17,7 @@ below.
Preparing Your Python Runtime Environment
-----------------------------------------
Toil currently supports only Python 2.7 and requires a virtualenv to be active to install.
Toil currently supports Python 2.7, 3.5, and 3.6, and requires a virtualenv to be active to install.
If not already present, please install the latest Python ``virtualenv`` using pip_::
......